blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b1d9f8bcd6774f79dc1d85c50778658e5e0a95e0
|
e953c138d3808d92fcc9848824985be5bc42f034
|
/r/list.r
|
6433ce3dd8409354d4aa435dd26cb4d630e57abb
|
[] |
no_license
|
hotoku/samples
|
1cf3f7006ae8ba9bae3a52113cdce6d1e1d32c5a
|
ce0d95d87e08386d9eb83d7983bd2eaff0682793
|
refs/heads/main
| 2023-08-09T09:05:15.185012
| 2023-08-04T09:29:06
| 2023-08-04T09:29:06
| 222,609,036
| 0
| 0
| null | 2022-03-30T01:44:03
| 2019-11-19T04:35:27
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 52
|
r
|
list.r
|
x <- list()
x[1] <- 100
y <- list()
y[[1]] <- 100
|
e6190b2b72b9bb3d1c333ca5f0dc177a6fdcbc2a
|
ad5b9d6a560f8b023e1aa3391ebb072f7d40afb5
|
/plot_mq_dist.R
|
77c24769a278995f1abf8049ca9d5e8366cf9423
|
[] |
no_license
|
ebete/MC_HiC
|
1d0138652881cf59cdc8438901d51ec11addf7ac
|
09b48db36623b2e187d7b73070a0d231a1812d8e
|
refs/heads/master
| 2020-04-08T00:11:09.794448
| 2019-04-11T14:08:12
| 2019-04-11T14:08:12
| 158,840,919
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 713
|
r
|
plot_mq_dist.R
|
#!/usr/bin/env Rscript
suppressPackageStartupMessages({
library(ggplot2)
library(reshape2)
library(scales)
})
# load MAPQ matrix
mq <- read.csv("/home/thom/mc_hic/mc_4c/mq_dist.txt", header = F, sep = ";", as.is = T)
mq <- t(mq)
colnames(mq) <- mq[1,]
mq <- mq[- 1,]
class(mq) <- "numeric"
mq.melt <- melt(mq)
# plot density
ggplot(mq.melt, aes(x = value, fill = Var2)) +
geom_histogram(position = "dodge", na.rm = T, binwidth = 10) +
theme_bw() +
scale_x_continuous(limits = c(0, 70), breaks = 1 : 10 * 10) +
scale_y_continuous(labels = comma) +
xlab("MAPQ") +
ylab("# reads") +
ggtitle("Distribution of MAPQ values") +
theme(
plot.title = element_text(face = "bold", hjust = 0.5)
)
|
bed33e0ed7c3bd13c9a0965ad78552694743c59e
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.customer.engagement/man/connect_list_prompts.Rd
|
84ef98e0fb2c7aeb91146c6309cfbbd8452badc0
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 890
|
rd
|
connect_list_prompts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect_operations.R
\name{connect_list_prompts}
\alias{connect_list_prompts}
\title{Provides information about the prompts for the specified Amazon Connect
instance}
\usage{
connect_list_prompts(InstanceId, NextToken = NULL, MaxResults = NULL)
}
\arguments{
\item{InstanceId}{[required] The identifier of the Amazon Connect instance.}
\item{NextToken}{The token for the next set of results. Use the value returned in the
previous response in the next request to retrieve the next set of
results.}
\item{MaxResults}{The maximum number of results to return per page. The default MaxResult
size is 100.}
}
\description{
Provides information about the prompts for the specified Amazon Connect instance.
See \url{https://www.paws-r-sdk.com/docs/connect_list_prompts/} for full documentation.
}
\keyword{internal}
|
1f28c86632ecc26180ffe4dc27a0e6389ddebde1
|
c4910920221d62ec83a9345dcc918e708351eae1
|
/tests/testthat/test_chisquare_extractor.R
|
0111f4555e00d15f79d5489c576f49a9da3f39df
|
[
"MIT"
] |
permissive
|
fsingletonthorn/EffectSizeScraping
|
7e0a7b1a9b1c9eea25b91a0d8390a67934929b67
|
bcc3abfe088ddaf16439b772d9bcb316f41c52b9
|
refs/heads/master
| 2021-06-01T22:40:05.114137
| 2020-03-25T03:04:09
| 2020-03-25T03:04:09
| 148,737,572
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,566
|
r
|
test_chisquare_extractor.R
|
context("chisquare_extractor")
testChi <- c("chi square = 12.32",
"chi2 = 123.32",
"χ2(1234) = 1232.23, p < .05",
"χ2 = 122.23,p = .13",
"chi2(12345) = 123.2, p < .001",
"χ2(1, N = 320) = 22.31, p < 0.001",
"χ2(n = 320, df =12) = 23.31, p < 0.001",
"χ2( 12399) = 1232.23, p < .05",
"χ2(12399 ) = 1232.23, p < .05",
"χ2( 12399) = 1232.23, p < .05")
testChiString <- stringr::str_flatten(testChi, collapse = " ")
# Setting up chi square values
chis <- c(stringr::str_remove_all(
stringr::str_extract(
testChi,
"(?<=((chi square)|(χ2)|(<U\\+03C7>)|(chi squared)|(chisquared)|(chisquare)|(chi2?))\\s{0,3}\\(?\\s{0,3}\\d{0,10}\\s{0,3},?\\s{0,3}N?\\s{0,3}\\=?\\s{0,3}\\d{0,10}\\s{0,3}\\)?\\s{0,3}\\=\\s{0,3})\\s{0,3}-?\\s{0,3}\\d*\\.?\\d*"
),
"\\s"
))
chis[6] <- 22.31
chis[7] <- 23.31
dfs <- stringr::str_remove_all(stringr::str_extract(testChi,
"((chi square)|(χ2)|(<U\\+03C7>)|(chi squared)|(chisquared)|(chisquare)|(chi2?))\\s{0,3}\\(\\s{0,3}\\d*"),
"(chi square)|(χ2)|(<U\\+03C7>)|(chi squared)|(chisquared)|(chisquare)|(chi2?)\\s{0,3}\\(|\\(|\\s")
dfs[7] <- "12"
test_that("chi squared test extractor works", {
extracted <- extractChiSquare(testChiString)
expect_identical(extracted$reported, testChi)
expect_identical(extracted$value,
as.numeric(chis))
expect_true(all(is.na(extracted$df1)))
expect_equal(extracted$df2,
as.double(dfs)
)
expect_identical(extracted$p,
stringr::str_extract(
testChi,
"(p|P|, ns).*"
)
)
expect_equal(extracted$n,
as.numeric(
stringr::str_extract(
testChi,
"(?<=((N|n)\\s{0,5}=\\s{0,5}))\\d+"
)))
})
test_that("Chi squares that are misread are properly detected", {
extracted <- extractChiSquare("χ(2)2=2.12,p < 0.001")
expect_equal(extracted$value, 2.12)
expect_equal(extracted$df2, 2)
expect_equal(extracted$p, "p < 0.001")
})
test_that("Chi squares that are misread are properly detected", {
extracted <- extractChiSquare("χ(2, n = 123)2=2.12,p < 0.001")
expect_equal(extracted$value, 2.12)
expect_equal(extracted$df2, 2)
expect_equal(extracted$p, "p < 0.001")
expect_equal(extracted$n, 123)
})
|
5bd08ce6ceee500a004e7956447328c1aa305c59
|
344513a699b3aa6c15ce2e9c3b7c7a913a4f4a1f
|
/man/rport.db.cache.save.Rd
|
f8426912fc02293c923a0e87e1f77bc57b21689c
|
[
"MIT"
] |
permissive
|
logufu/rport
|
df1d2a3f2c0dfa58c91d4ebd170c98cd5c5d2c83
|
a3ae88ae2fe60a8a44545824482433ceb6cd237e
|
refs/heads/master
| 2020-12-28T23:35:38.957165
| 2014-06-01T11:04:39
| 2014-06-01T11:44:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 390
|
rd
|
rport.db.cache.save.Rd
|
\name{rport.db.cache.save}
\alias{rport.db.cache.save}
\title{Upsert an entry in the .Rportcache file with a hash(query)}
\usage{
rport.db.cache.save(query, conn, dat)
}
\arguments{
\item{query}{sql query string}
\item{conn}{string connection name}
\item{dat}{data.table with the results to save.}
}
\description{
Upsert an entry in the .Rportcache file with a
hash(query)
}
|
a59009d4abdd4086b4a010bdd33d8db56dd09e81
|
9fd42fbf2ec96f73a4a64f9bc21884c9cafb4a11
|
/bigdata/N2H4.R
|
799239fdb773532b1d6ec08672fc21b3d0e10c70
|
[] |
no_license
|
ParkNuri/R-Study
|
11742a0647f87ec9c395b78d9fd3567a9c933ff3
|
8837f01fc7edf17cb4156b306535d1c0b62abc2a
|
refs/heads/master
| 2022-03-30T17:12:17.187885
| 2020-05-01T09:06:18
| 2020-05-01T09:06:18
| 260,416,620
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 266
|
r
|
N2H4.R
|
install.packages("N2H4")
library(N2H4)
library(stringr)
library(dplyr)
url <- "https://news.naver.com/main/read.nhn?mode=LSD&mid=shm&sid1=100&oid=020&aid=0003276790"
mydata <- getAllComment(url) %>%
select(userName, contents)
mydata
# ctrl + shift + A : 줄맞춤
|
0699a8b4edfbcc267ce6fa3d923794fc64061e55
|
ae576fcfb2f1da232ec4345c2f207ff5e2f1a8b7
|
/pollscrape.R
|
17a0d69dffc9b3119af6fa3c2214d3550c7bb1c5
|
[] |
no_license
|
fghjorth/danishpolls
|
2827ca124f04aa613d1076cda7f6454ccf36e589
|
148c8d683d194f1a40c8c8e3c453b0e30f76c1a4
|
refs/heads/master
| 2016-09-02T05:54:40.314602
| 2015-05-28T12:28:22
| 2015-05-28T12:28:22
| 22,598,248
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,814
|
r
|
pollscrape.R
|
library(RCurl)
#read in data
polls<-read.csv("https://raw.github.com/fghjorth/danish-polls/master/all-polls.csv")
#clean up
polls <- polls[,1:11]
names(polls) <- c("house", "date","Venstre","Socialdemokraterne","DF","Radikale","SF","Enhedslisten","LA","Konservative","Kristendemokraterne")
polls$house<-gsub("Rambøll","Ramboll",polls$house)
#add date
require(lubridate)
polls$fulldate<-mdy(polls$date)
#calculate blocs
names(polls)
polls$redbloc<-as.numeric(as.character((polls[,4])))+as.numeric(as.character((polls[,6])))+as.numeric(as.character((polls[,7])))+as.numeric(as.character((polls[,8])))
polls$bluebloc<-as.numeric(as.character((polls[,3])))+as.numeric(as.character((polls[,5])))+as.numeric(as.character((polls[,9])))+as.numeric(as.character((polls[,10])))
polls$blocdiff<-polls$bluebloc-polls$redbloc
#house effects?
anova(lm(blocdiff~factor(house),data=polls))
houseeffects<-as.data.frame(summary(lm(blocdiff~factor(house),data=polls))$coefficients[2:9,1:2])
names(houseeffects)<-c("est","se")
<<<<<<< HEAD
houseeffects$house<-as.character(levels(as.factor(polls$house))[2:9])
bdmean<-mean(polls$blocdiff)
=======
houseeffects$house<-as.character(levels(as.factor(polls$Institut))[2:8])
bdmean<-mean(polls$blocdiff,na.rm=T)
>>>>>>> origin/master
t95<-1.96
ggplot(houseeffects,aes(x=est-bdmean,y=reorder(house,est))) +
geom_point() +
geom_errorbarh(aes(xmin=est-t95*se-bdmean,xmax=est+t95*se-bdmean,height=0)) +
geom_vline(xintercept=0,linetype=2) +
theme_bw() +
xlab("") +
ylab("")
#create house effect-corrected estimate of bloc difference
polls$housecorrblocdiff<-NA
for (i in 1:nrow(polls)){
if(polls$house[i] %in% houseeffects$house){
correction<-houseeffects$est[houseeffects$house==polls$house[i]]-bdmean
polls$housecorrblocdiff[i]<-(polls$blocdiff[i]-correction)
}
}
#plot
require(ggplot2)
require(scales)
ggplot(polls,aes(x=fulldate,y=housecorrblocdiff,colour=housecorrblocdiff)) +
geom_point(alpha=1) +
geom_smooth(method="loess",span=.2,level=.90) +
geom_smooth(method="loess",span=.2,level=.95) +
geom_smooth(method="loess",span=.2,level=.99,color="black") +
xlab("") +
ylab("Fordel til blå blok, pct.-point") +
geom_hline(yintercept=0,linetype=2) +
scale_colour_gradient2(low="red",high="blue",mid="dark gray",guide=F) +
theme(legend.position="none") +
theme_bw()
#########################
# obsolete stuff
#########################
#linear trend past year?
polls$dayssinceelec<-as.numeric(difftime(as.Date(polls$fulldate),as.Date("2011-09-15"),unit="days"))
linfcast<-coef(lm(blocdiff~dayssinceelec,data=subset(polls,as.Date(fulldate)>"2013-10-07")))
fcast<-data.frame(date=ymd(c("2013-10-07","2014-10-07","2027-06-01")))
fcast$daysin<-as.numeric(difftime(as.Date(fcast$date),as.Date(fcast$date[1]),unit="days"))
fcast$predblocdiff<-linfcast[1]+fcast$daysin*linfcast[2]
fcast
# geom_line(data=fcast,aes(x=date,y=predblocdiff),color="red",linetype="dotted") +
# geom_line(data=subset(fcast,daysin<366),aes(x=date,y=predblocdiff),color="red",linetype="solid") +
# xlim(ymd("2011-09-15"),ymd("2021-11-01")) +
#########################
# code used for scraping from the wiki
#########################
#get full date
breakpoints<-c(which(is.na(polls$date)),nrow(polls)-1)
yearsindata<-c(2014,2013,2012,2011)
polls$fulldate<-NA
for (i in 4:1){
polls$fulldate[0:breakpoints[i]]<-paste(polls$date[0:breakpoints[i]],", ",yearsindata[i],sep="")
}
polls$fulldate[nrow(polls)]<-polls$date[nrow(polls)]
polls<-polls[!is.na(polls$date),]
polls$fulldate[nrow(polls)]<-as.character(polls$date[nrow(polls)])
require(lubridate)
polls$fulldate<-mdy(tolower(polls$fulldate),locale="English")
#clean up house names
polls$Institut <- gsub("\\[|1|2|3|4|5|6|\\]", "", polls$Institut)
polls$Institut <- gsub("DR", "Epinion", polls$Institut)
|
e18257b10970b4fa3b269013e70efd508564373a
|
a1a25e620a92a30f7fcff613cfc005fed63fd0e0
|
/R/faoswsEnsure-package.R
|
1b5bc6eee5bc0a7f01f3fcf5c5bfcc2d6baafb13
|
[] |
no_license
|
SWS-Methodology/faoswsEnsure
|
12614f7efa803490743184766beb26b01e59f881
|
40a6d73fa2bbbc997511f82ad2d91bc480b1fe22
|
refs/heads/master
| 2021-01-21T14:44:06.030613
| 2018-08-07T07:40:10
| 2018-08-07T07:40:10
| 59,017,938
| 0
| 0
| null | 2016-05-17T15:19:55
| 2016-05-17T11:52:34
|
R
|
UTF-8
|
R
| false
| false
| 343
|
r
|
faoswsEnsure-package.R
|
##' Package to ensure data and input quality
##'
##' @name faoswsEnsure-package
##' @aliases faoswsEnsure
##' @docType package
##' @title The package host standard check function for the Statistical Working
##' System (SWS).
##'
##' @author Michael. C. J. Kao \email{michael.kao@@fao.org}
##' @keywords package
##' @import data.table
NULL
|
a791cbe37d764aa6a32e9d97bccdbf1f6b3e771d
|
15f8232b8a574ae94266927e4df5182cfc99f517
|
/man/get_parent_id.Rd
|
f166adb29e1433a257c362cb931813ebc7d108cd
|
[] |
no_license
|
cran/autoharp
|
a2c6d51ad22354a276145098200e92aecd7bc3fd
|
d2efbb0a76285ba95ed6950a61e05f1398b5e656
|
refs/heads/master
| 2023-09-06T05:29:22.580401
| 2021-11-12T21:50:02
| 2021-11-12T21:50:02
| 334,082,346
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,041
|
rd
|
get_parent_id.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/th_getter-length.R
\name{get_parent_id}
\alias{get_parent_id}
\alias{get_parent_id,TreeHarp-method}
\alias{get_parent_id,list-method}
\title{Generic for Getting Parent Node Id.}
\usage{
get_parent_id(x, node_num)
\S4method{get_parent_id}{TreeHarp}(x, node_num)
\S4method{get_parent_id}{list}(x, node_num)
}
\arguments{
\item{x}{An object of class TreeHarp or an adjacency list.}
\item{node_num}{An integer, length 1. This the node whose parent we are
after. If node_num is equal to 1, then NULL is returned because that should
be the root node.}
}
\value{
An integer, indicating the parent node.
}
\description{
The generic method definition for getting parent node id.
}
\section{Methods (by class)}{
\itemize{
\item \code{TreeHarp}: Obtain parent node id.
Extracts parent id of a node from a TreeHarp object.
\item \code{list}: Obtain parent node id.
Extracts parent id of a node from an adjacency list object.
}}
\seealso{
\code{\link{get_child_ids}}
}
|
f87db766bfadf5e863a6aa90b108d94dc72c582d
|
f836ed096d28f86e86c9704903840ff969cbf9b5
|
/src/assignment3/exercise1.R
|
a814f468c2a188eeea7ab390dd4b486fe9d039be
|
[] |
no_license
|
charx7/statisticalGenomics
|
bf7556de89e20db5a0796a6e4b8f1748d6301470
|
45466758c9e9f127b121e4e32b732ab2fd307332
|
refs/heads/master
| 2020-07-22T18:01:41.178986
| 2019-10-21T11:38:14
| 2019-10-21T11:38:14
| 207,283,352
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,209
|
r
|
exercise1.R
|
rm(list=ls()) # clean R environment
cat("\f") # clear the console
source('./assignment3.R') # source pre-processing script
# Exercise 1
true_Net <- make_true_Net() # get the true net value
true_Net <- true_Net + t(true_Net) # get the simmetric true network
# Compute the AUCROC for each data set on the data set list
AUROC.vector = vector() # empty auroc scores list
for (idx in 1:15) {
curr_data = data.list[[idx]]
corr_coef_matrix = cor(t(curr_data), method = 'pearson') # calculate the corr coefficient matrix
C = abs(corr_coef_matrix) # get the abs value of each element
AUROC = compute_AUROC(C, true_Net) # compute the ROC
AUROC.vector = c(AUROC.vector, AUROC) # append the current AUROC
}
# means computations
cat('The mean of the AUROC of m=20 is: ', mean(AUROC.vector[1:5]), '\n')
cat('The mean of the AUROC of m=50 is: ', mean(AUROC.vector[6:10]), '\n')
cat('The mean of the AUROC of m=100 is: ', mean(AUROC.vector[11:15]), '\n')
# std computations
cat('The std of the AUROC of m=20 is: ', sqrt(var(AUROC.vector[1:5])), '\n')
cat('The std of the AUROC of m=50 is: ', sqrt(var(AUROC.vector[6:10])), '\n')
cat('The std of the AUROC of m=100 is: ', sqrt(var(AUROC.vector[11:15])), '\n')
|
a2f8df8fa6597f03f6f1b2b52135fe8a8db62641
|
56cf8abe4f8a8bdf0bf903f87020d73e8d882261
|
/rankhospital.R
|
d9c098ce3433952fdd0185d580ae85a73a56d667
|
[] |
no_license
|
ebratt/ProgrammingAssignment3
|
5a658fcff2e9c0fb52af4bd53b77f344aef9ae6c
|
5eb3c5770e2fc1555fb5d05a5f94d038f80c4413
|
refs/heads/master
| 2020-05-17T03:01:34.574885
| 2014-08-23T13:09:29
| 2014-08-23T13:09:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,492
|
r
|
rankhospital.R
|
## 2014-08-22
## Eric Bratt
## Coursera rprog-006
## https://github.com/ebratt/ProgrammingAssignment3
## data from http://hospitalcompare.hhs.gov
## Assignment 3
## exercise 3
## function that takes a state, an outcome type, and a ranking number
## The function reads the outcome-of-care-measures.csv file and returns a
## character vector with the name of the hospital that has the ranking
## specified by the num argument. For example, the call
##
## rankhospital("MD", "heart failure", 5)
##
## would return a character vector containing the name of the hospital with
## the 5th lowest 30-day death rate for heart failure.
rankhospital <- function(state, outcome, num = "best") {
rank <- num
## read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## require that state be a valid state
states <- unique(data[, 7])
stateIndex <- match(state, states)
if (is.na(stateIndex)) {
stop("invalid state")
}
## require that outcome be a valid outcome
outcomes <- c("heart attack", "heart failure", "pneumonia")
outcomeIndex <- match(outcome, outcomes)
if (is.na(outcomeIndex)) {
stop("invalid outcome")
}
## require that rank be a valid rank
ranks <- c("best", "worst")
rankIndex <- match(rank, ranks)
if (!is.numeric(rank) && is.na(rankIndex)) {
stop("invalid rank")
}
## Return hospital name in that state with lowest 30-day death rate
## get data for state
data2 <- subset(data, State == state)
## map outcome to column number
colMap <- list("heart attack" = 11, "heart failure" = 17, "pneumonia" = 23)
outcomeCol <- colMap[[outcome]]
## convert the outcome column to numeric
data2[, outcomeCol] <- suppressWarnings(as.numeric(data2[, outcomeCol]))
## get a skinnier data set of hospital name and outcome column
data3 <- data2[, c(2, outcomeCol)]
## rename columns for ease of future reference
colnames(data3) <- c("Hospital", "Outcome")
## order the data by outcome and hospital name
data4 <- data3[order(data3$Outcome, data3$Hospital), ]
## remove non-numeric data in outcome column
data5 <- subset(data4, !(Outcome == 'Not Available'))
## select the "rank"ed hospital name
if (rank == "best") data5[[1, 1]]
else if (rank == "worst") data5[[nrow(data5), 1]]
else if (rank > nrow(data5)) NA
else data5[[rank, 1]]
}
|
fadb4dc459b6e905dae84e4fe6a89b8a191f828f
|
533848bd6eee73a18b9995e23c8f7667233a6226
|
/materiales/prope/programacion_r/Script-Introduccion-R.R
|
17f5c0f3b672fb9ca67a3ca513339219aa52e253
|
[] |
no_license
|
ramegon76/maec
|
839b60bdbc54bb86cf48bdc3db41dfc0fc2a580f
|
0b9d46be0a53f9b94e83e1f43b8563d044e240c2
|
refs/heads/master
| 2021-11-27T13:57:04.541059
| 2021-08-16T02:41:53
| 2021-08-16T02:41:53
| 216,220,925
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,518
|
r
|
Script-Introduccion-R.R
|
#-----------------------------------------------------
#
# Maestria MAEC INEGI
# Propedeutico
# Introduccion a la Programación
# Nociones Basicas de R
#
#-----------------------------------------------------
#-----------------------------------------------------
# Librerías necesarias
library(MASS)
library(lattice)
#-----------------------------------------------------
# LA CONSOLA DE R
#-----------------------------------------------------
# Manera clasica de consultar la ayuda:
help(sd)
?sd
help.search("solve") # Archivos relacionados con "solve"
help() # Ayuda para caracteres especiales o palabras reservadas
#Librerias y objetos disponibles
library() # Muestra las librerias disponibles que pueden ser cargadas
search() # Muestra las librerias ya cargadas y disponibles
ls() # Objetos usados
# Se puede usar R como una simple calculadora
5 - 1 + 10 # Suma y resta
7 * 10 / 2 # multiplica y divida
pi # el número pi
sqrt(2) # Raiz cuadrada
# Crear objetos mediante asignaciones
x <- 5 # El objeto x toma el valor 5
x # imprime x
x = 6 # x ahora toma el valor 6
x
(x <- pi) # asigna el numero pi a x e imprime
# R es sensible a mayúsculas y minúsculas
# Dos objetos diferentes
b <- 2
B <- 4
# Comandos utiles
ls() #muestra los objetos usados
rm(b) #borra objetos usados
ls()
# Saber si un nombre es una función de R
c
#-----------------------------------------------------
# TIPOS DE OBJETOS EN R
#-----------------------------------------------------
#-----------------------------------------------------
# Vectores
x <- 0:19 # una secuencia de 20 números
x[5] # muestra el elemento en la 5ta. posición
x <- seq(1, 10, length = 5)
seq(1, 20, by = 2) # Secuencia de dos en dos
# Vector de ceros.
numeric(6)
integer(3)
rep(2,6) # Vector de constantes
#vectores a traves de "c"
x <- c(1.3,2.5,3.6,4.7,8.2)
y <- c(x, 0, x) # Generamos un vector a partir de otro vector.
# Se pueden agregar nombres a los vectores.
z <- c(1, 3, 45)
names(z)
names(z) <- c("primero","segundo","tercero")
z
#vector logico
x <- 1:5
cond1 <- x < 4
cond1
cond2 <- x >= 3
cond1 & cond2 # Hacemos una ’y’ lógica
logical(3) # Vector de tres elementos lógicos
# Generacion de caracteres
(x <- character(3)) #vector de 3 entradas vacias
# Vector con las 4 primeras letras del alfabeto
x2 <- c("A","B","C","D")
# Otra forma
x3 <- LETTERS[1:4]
# Atributos de un objeto
is.logical(x)
is.numeric(x)
is.integer(x)
# Uso de NA, NaN e Inf
(x <- c(NA,0/0,Inf-Inf,Inf,5)) #diferencias entre NA, NaN e Inf
# Longitud de un vector
length(y)
# Convertir vectores
h1 <- c(1.3,0.6,-3.8)
h1
h2 <- as.integer(h1) # convierte a enteros
h2
as.logical(h2) # convierte en logico
# Operaciones con vectores
a <- 5:2
b <- (1:4) * 2
a
b
a + b # Sumamos vectores
a - b # Diferencia de vectores
a * b # Producto de vectores (elemento a elemento)
a / b # Cociente de vectores (elemento a elemento)
a ^ b # Potencia de vectores (elemento a elemento)
# Producto de un escalar por un vector
2 * a
# Operación con más de dos vectores
d <- rep(2,4)
a + b + d
# Evaluacion de la funcion
# x^2 + 2y
# f(x,y) = log (-----------)
# (x + y)^2
# Definimos los vectores x e y
(x <- 10:6)
(y <- seq(1,9,2))
# Definimos f(x,y) en términos de estos vectores.
# Guardamos los resultados en z
(z <- log(x^2 + 2*y) / (x + y)^2)
#-----------------------------------------------------
# Matrices y arreglos
# definir la matriz
# | 1 4 7 |
# | 2 5 8 |
# | 3 6 9 |
# USaremos
matriz <- matrix(c(1,2,3,4,5,6,7,8,9), nrow = 3, ncol = 3)
# dimensiones de la matriz
dim(matriz)
# Elementos especficos de una matriz
matriz[2,3]
matriz[1:2,2:3]
matriz[,c(1,3)]
# Multiplicacion de matrices
matriz %*% matriz # Multiplicacion de matrices
matriz * matriz # Multiplicacion elemento a elemento
# Dos ejemplos mas
matrix(1:6)
matrix(1:6, nrow = 3)
# Arreglos
x <- array (1:24, c(3,4,2))
array(1:4, 6) # Vector de tamaño 6
array(1:6, c(2,5)) # Matriz 2x5, se llena por columnas
# Matrices (Por defecto llena por columnas)
matrix(c(4,5,9,52,67,48), nrow = 2, ncol = 3)
# Para llenarla por filas, se le agrega byrow=TRUE
matrix(c(4,5,9,52,67,48), nrow = 2, ncol = 3, byrow = TRUE)
# Colocando vectores como columna
(y <- cbind(letters[1:4], LETTERS[1:4]))
#-----------------------------------------------------
# Listas y factores
# Listas
lista <- list(Marca = "Chevrolet", Modelo = "Aveo", n.puertas = 5, Año = c(2006,2007))
lista
# Seleccionamos posiciones de la lista
lista[[1]] # Posición 1 de la lista
lista[[4]][2] # Posición 4 de la lista, subposición2
# También podemos referirnos a las posiciones por los nombres.
lista$Marca
lista$Modelo
# Factores
# Con la funcion gl()
edades <- gl(4,5,16,labels=c("Niños","Jovenes","Adulto","Anciano"))
edades
# Grafica del factor
plot(edades)
# Con la funcion factor()
sangre <- factor(rep(c("A","B","AB","O"),4,15))
sangre
# Grafica del factor
plot(sangre)
# Ordenar
escolaridad <- factor(rep(c("MedioSuperior","Primaria","Secundaria","Superior","Prescolar"),
5,15))
escolaridad
ordered(escolaridad,levels = c("Superior","MedioSuperior","Secundaria","Primaria","Prescolar"))
#-----------------------------------------------------
# Data.frame
# Creando una data.frame
dataf <- data.frame(Nombre = c("Juan","Maria","Jose","Carla"),
Edad = c(27,34,40,39),
Poblacion = c("Monterrey","Apodaca","Guadalupe", "San Pedro"),
Sexo = c("M","F","M","F"),
Edo.Civil = c("C","S","S","C"))
dataf
# attach y detach
attach(lista)
Marca
attach(dataf)
Edad
# Otro ejemplo
ojos <- factor(c("Azules","Marrones","Marrones"),
levels = c("Azules","Marrones","Verdes","Negros"))
datos <- data.frame(Color.ojos = ojos,
Peso = c(68,75,88),
Altura = c(1.65,1.79,1.85))
datos
# Convertir matriz en data.frame
datos2 <- as.data.frame(matriz)
# Nombre por defecto de las variables
names(datos2)
# Cambiando los nombres de las varaibles
names(datos2) <- c("Variable 1","Variable 2","Variable 3")
# Uso del operador $
datos$Color.ojos
#-----------------------------------------------------
# LECTURA/ESCRITURA DE DATOS
#-----------------------------------------------------
#-----------------------------------------------------
# Leer datos de un archivo
# Uso de la funcion read.table()
PreciosCasas <- read.table("datos.casas.txt")
PreciosCasas
# Otro ejemplo de uso de read.table()
MEXpob <- read.table("MEXpob.txt", header = TRUE)
MEXpob
# Grafico de los datos
plot(Pob. ~ Año, data = MEXpob, pch = 16)
# La funcion scan()
misdatos <- scan("entrada.txt", list("",0,0)) #uso de la funcion scan
misdatos
# La función read.fwf()
misdatos2 <- read.fwf("datos.txt", widths=c(1, 4, 3))
misdatos2
#-----------------------------------------------------
# Guardar datos
# La función write.table()
write.table(PreciosCasas, "./Datos/PreciosCasas2.txt")
write.table(MEXpob, "./Datos/MEXpob2.txt")
# La función write()
# guardando un vector
x <- c(1,2,3,4,5)
write(x, "./Datos/x.txt")
# guardando una matriz
x <- matrix(1:9, ncol = 3, byrow = T)
x
write(t(x), "./Datos/xx.txt", ncol = ncol(x))
#-----------------------------------------------------
# COMO PROGRAMAR EN R
#-----------------------------------------------------
# Condicional if
# Un ejemplo de if
z <- 1:10
z1 <- sample(z,1)
if (z1 > 7) {w = 1} else {w = 0}
w
# Un ejemplo con condición lógica
x <- TRUE
if(x) y <- 1 else y <- 0
y
# Dos asignaciones a ejecutar
x <- FALSE
if(!x){y <- 1 ; z <- 2}
y
z
# Dos condiciones a verificar
# Raíz n-ésima de un número real
n <- 7; x <- -32
if(n%%2 == 1 || x >=0 ){
sign(x)*abs(x)^(1/n)
} else{
NaN
}
# Ciclos
# Usando un for y creando vectores logicos
x1 <- as.logical(as.integer(runif(5, 0, 2)))
x1
y1 <- vector()
y1
for(i in 1 : length(x1)){
if(x1[i]){y1[i] <- 1}
else {y1[i] <- 0}
}
y1
# Un ejemplo de repeat y break
x2 <- 1:10; x3 <- 10:20; i <- 1
repeat{
y <- x2[i] + x3[i]
i <- i+1
if(i == 10) break
}
y
# Funciones
# Definimos la función cubo
cubo <- function(x)
{return(x^3)}
# Ejecutamos la función cubo con x=2
cubo(3)
# Definimos la funcion ff
ff <- function (x = 1:10, y = (1:10)^2, showgraph = T)
{
if (showgraph) plot (x,y)
else print (cbind (x,y))
return (invisible ())
}
# Ejecuciones de la funcion ff
ff (1:10, (1:10)^2, T)
ff (1:10, (1:10)^2)
ff (1:20, (1:20)^2)
#-----------------------------------------------------
# GRAFICOS EN R
#-----------------------------------------------------
# Demos
demo(graphics)
demo(persp)
demo(image)
#-----------------------------------------------------
# Distribucion Weibull
tt <- seq(0,2,length=200)
par(mfrow=c(2,2),mar=c(3, 3, 2, 2), oma=c(0,0,2,0))
# Primera funcion
b1 <- .5; t1 <- 1/gamma(1+1/b1)
plot(tt, exp(-(tt/t1)^b1), xlim=c(0,2), cex.axis=.7, cex.lab=.7,
mgp=c(1.5,.5,0), lwd=2, col="blue", type="l", xlab="t", ylab="",
main= expression(paste(beta == .5, ", ", theta == .5)))
# Segunda funcion
b2 <- 1.5; t2 <- 1/gamma(1+1/b2)
plot(tt, exp(-(tt/t2)^b2), xlim=c(0,2), cex.axis=.7, cex.lab=.7,
mgp=c(1.5,.5,0), lwd=2, col="blue", type="l", xlab="t", ylab="",
main= expression(paste(beta == 1.5, ", ", theta == 1.108)))
# Tercera funcion
b3 <- 2.5; t3 <- 1/gamma(1+1/b3)
plot(tt, exp(-(tt/t3)^b3), xlim=c(0,2), cex.axis=.7, cex.lab=.7,
mgp=c(1.5,.5,0), lwd=2, col="blue", type="l", xlab="t", ylab="",
main= expression(paste(beta == 2.5, ", ", theta == 1.127)))
# Cuarta funcion
b4 <- 5; t4 <- 1/gamma(1+1/b4)
plot(tt, exp(-(tt/t4)^b4), xlim=c(0,2), cex.axis=.7, cex.lab=.7,
mgp=c(1.5,.5,0), lwd=2, col="blue", type="l", xlab="t", ylab="",
main=
substitute(paste(beta == 5, ", ", theta, " = ",t4),list(t4=round(t4,3))))
mtext("Funciones de Supervivencia", outer=TRUE, cex=1.2)
# (en esta grafica, notar en la ultima, el uso de substitute)
#-----------------------------------------------------
# Uso de la funcion layout():
layout( matrix(c(1,1,2,3), ncol = 2, byrow = T), heights = c(2,1))
par(mar=c(3, 3, 2, 2))
b2 <- 1.5; t2 <- 1/gamma(1+1/b4)
# Funcion de densidad
plot(tt, dweibull(tt,shape=b2, scale=t2), xlim=c(0,2), cex.axis=.7, cex.lab=1.2,
mgp=c(1.5,.5,0), lwd=2, col="blue", type="l", xlab="Funcion de Densidad",
ylab="", ylim=c(0,.8),main= expression(paste(beta == 1.5, ", ", theta == 1.108)))
# Funcion de supervivencia
plot(tt, exp(-(tt/t2)^b2), xlim=c(0,2), cex.axis=.7, cex.lab=.7,
mgp=c(1.5,.5,0), lwd=2, col="blue", type="l", xlab="t", ylab="",
main= "Funcion de Supervivencia")
# Funcion de distribucion
plot(tt, 1-exp(-(tt/t2)^b2), xlim=c(0,2), cex.axis=.7, cex.lab=.7,
mgp=c(1.5,.5,0), lwd=2, col="blue", type="l", xlab="t", ylab="",
main= "Funcion de Distribucion")
#-----------------------------------------------------
# Caminata aleatoria
par(mfrow=c(1,1))
n <- 100
xx <- 1:n
cam <- cumsum(sample(c(-1,1), size = n, replace = T))
camina <- function(k){
plot(1:k, cam[1:k], xlim = c(1,n), ylim = c(-n/10,n/10), type = "l",
col = "blue", lwd = 2, mgp = c(2,1,0), ylab = "Caminata",
xlab = "", cex.axis = .8)
abline(h = 0, col = gray(.8))
Sys.sleep(0.1) } # Sys.sleep() controla la rapidez de la animacion
trash <- sapply(xx,camina)
#-----------------------------------------------------
# Los siguientes graficos fueron tomados del demo(graphics)
#-----------------------------------------------------
#-----------------------------------------------------
# Una grafica simple, ilustrando el uso de colores en sus distintos elementos.
opar <- par(bg = "white") # guardar parametros
x <- rnorm(50)
plot(x, ann = FALSE, type = "n")
abline(h = 0, col = gray(.90))
lines(x, col = "green4", lty = "dotted")
points(x, bg = "limegreen", pch = 21)
title(main = "Ejemplo simple de uso de color en Plot",
xlab = "Informacion con un color desvanecido",
col.main = "blue", col.lab = gray(.7),
cex.main = 1.2, cex.lab = 1.0, font.main = 4, font.lab = 3)
#-----------------------------------------------------
# Diagrama de pastel.
#x11()
par(bg = "gray")
pie(rep(1,24), col = rainbow(24), radius = 0.9)
title(main = "Una muestra del catalogo de colores",
cex.main = 1.4, font.main = 3)
title(xlab = "(Use esto como una prueba de la linealidad del monitor)(?)",
cex.lab = 0.8, font.lab = 3)
#-----------------------------------------------------
# Diagrama de pastel (de nuevo).
pie.sales <- c(0.12, 0.3, 0.26, 0.16, 0.04, 0.12)
names(pie.sales) <- c("Blueberry", "Cherry",
"Apple", "Boston Cream",
"Other", "Vanilla Cream")
pie(pie.sales,
col = c("purple","violetred1","green3","cornsilk","cyan","white"))
title(main = "Ventas de Pasteles en Enero", cex.main = 1.8, font.main = 1)
title(xlab = "Pasteleria Lety", cex.lab = 1.2, font.lab = 3)
#-----------------------------------------------------
# Boxplots.
par(bg="cornsilk")
n <- 10
g <- gl(n, 100, n*100)
x <- rnorm(n*100) + sqrt(as.numeric(g))
boxplot(split(x,g), col = "lavender", notch = TRUE)
title(main = "Boxplots con intervalos de confianza para las medianas",
xlab = "Grupo", font.main = 4, font.lab = 1, cex.main = .9)
#-----------------------------------------------------
# Area sombreada entre dos graficas.
par(bg="white")
n <- 100
x <- c(0,cumsum(rnorm(n)))
y <- c(0,cumsum(rnorm(n)))
xx <- c(0:n, n:0)
yy <- c(x, rev(y))
plot(xx, yy, type = "n", xlab = "Tiempo", ylab = "Distancia")
polygon(xx, yy, col = "gray")
title("Distancia entre dos movimientos Brownianos")
#-----------------------------------------------------
# Graficas tipo Excel, o algo parecido.
x <- c(0.00, 0.40, 0.86, 0.85, 0.69, 0.48, 0.54, 1.09, 1.11, 1.73, 2.05, 2.02)
par(bg = "lightgray")
plot(x, type = "n", axes = FALSE, ann = FALSE)
usr <- par("usr") # c(x1,x2,y1,y2) con coordenadas de region de graficacion
rect(usr[1], usr[3], usr[2], usr[4], col = "cornsilk", border = "black")
lines(x, col = "blue")
points(x, pch = 21, bg = "lightcyan", cex = 1.25)
axis(2, col.axis = "blue", las = 1)
axis(1, at = 1:12, lab = month.abb, col.axis = "blue")
title(main = "Nivel de interes en R", font.main = 4, col.main = "red")
title(xlab = "1996", col.lab = "red")
#-----------------------------------------------------
# Histograma.
par(bg = "cornsilk")
x <- rnorm(1000)
hist(x, xlim = range(-4, 4, x), col = "lavender", main = "", ylab = "Frecuencia")
title(main = "1000 realizaciones simuladas de una variable normal", font.main = 3)
#-----------------------------------------------------
# Grafica por parejas.
pairs(iris[1:4], main = "Datos de Iris de Edgar Anderson", font.main = 4, pch = 19)
#-----------------------------------------------------
# Grafica por parejas (colores diferentes para cada especie).
aa <- iris
names(aa) <- c("Long.Sepalo","Ancho.Sepalo",
"Long.Petalo","Ancho.Petalo","Especie")
pairs(aa[1:4], main = "Datos de Iris de Edgar Anderson", pch = 21,
bg = c("red", "green3", "blue")[unclass(iris$Species)])
#-----------------------------------------------------
# Grafica de contornos.
# volcano es la matriz 87 x 61 de elevaciones del volcan Maunga Whau en NZ
x11()
x <- 10*1:nrow(volcano)
y <- 10*1:ncol(volcano)
lev <- pretty(range(volcano), 10)
par(bg = "lightcyan")
pin <- par("pin")
xdelta <- diff(range(x))
ydelta <- diff(range(y))
xscale <- pin[1]/xdelta
yscale <- pin[2]/ydelta
scale <- min(xscale, yscale)
xadd <- 0.5*(pin[1]/scale - xdelta)
yadd <- 0.5*(pin[2]/scale - ydelta)
plot(numeric(0), numeric(0),
xlim = range(x)+c(-1,1)*xadd, ylim = range(y)+c(-1,1)*yadd,
type = "n", ann = FALSE)
usr <- par("usr")
rect(usr[1], usr[3], usr[2], usr[4], col = "green3")
contour(x, y, volcano, levels = lev, col = "yellow", lty = "solid", add = TRUE)
title("Mapa Topografico del Maunga Whau, NZ", font = 4)
title(xlab = "Direccion Norte (metros)", ylab = "Direccion Oeste (metros)",
font = 3)
mtext("Curvas de nivel cada 10 Metros", side = 3, line = 0.35, outer = FALSE,
at = mean(par("usr")[1:2]), cex = 0.7, font = 3)
#-----------------------------------------------------
# Graficas condicionales.
# El conjunto de datos quakes es un data frame con 1000 observaciones
# en 5 variables:
# lat = Latitud del evento
# long = Longitud
# depth = Profundidad (km)
# mag = Magnitud en escala de Richter
# stations = Numero de estaciones reportando el evento
par(bg = "cornsilk")
coplot(lat ~ long | depth, data = quakes, pch = 21, bg = "green3")
#-----------------------------------------------------
# Algunas figuras interesantes
#-----------------------------------------------------
# Espiral de Ulam
# prim = Un programa para calcular los primeros n primos
prim <- function(n){
if(n==1){return(2)}
primos <- 2
notyet <- TRUE
probar <- 2
while(notyet){
probar <- probar + 1
pritst <- primos[ primos<=sqrt(probar) ]
aa <- (probar %% pritst)
if( any(aa==0) ){next}
primos <- c(primos,probar)
if( length(primos)==floor(n) ){ return(primos) }
}
}
# Variables
m <- 100
pp <- prim( (m+1)^2 )
ii <- seq(3,m+1,by=2)
jj <- length(ii)
# Grafico
par(mar=c(0,0,0,0)+1); xylim <- c(1,m+1)
plot(1, 1, xlim = xylim, ylim = xylim, type = "n", xaxt = "n", yaxt = "n",
bty = "n", xlab = "", ylab = "")
aa <- c(floor(m/2)+1,floor(m/2)+1)
for(k in 1:jj){
r <- ii[k]
co <- cbind(c(rep(r,r),(r-1):2,rep(1,r),2:(r-1)),c(r:1,rep(1,r-2),1:r,rep(r,r-2)))
co <- co + (jj-k)
n <- dim(co)[1]
uu <- (r^2):((r-2)^2)
rr <- is.element(uu[-(n+1)],pp)
bb <- co[n,]
segments(aa[1], aa[2], bb[1], bb[2], col = "black", lwd = 1)
aa <- co[1,]
for(i in 1:(n-1)){
segments(co[i,1], co[i,2], co[i+1,1], co[i+1,2], col = "black", lwd = 1)
}
points(co[rr,1], co[rr,2], col = "blue", pch = 20)
}
title("Espiral de Ulam", cex = .9, line = -.3)
#-----------------------------------------------------
# Laberinto circular
M <- 40; m <- 120; n <- M; xylim <- .95*c(-M,M)
par(mar = c(0,0,0,0)+.6)
plot(0, 0, type = "n", xlim = xylim, ylim = xylim, xaxt = "n", yaxt = "n",
xlab = "", ylab = "", bty = "n")
pp <- c(0,0)
tet1 <- runif(1, min = 0, max = 2*pi)
for( r in 1:n ){
qq <- r*c(cos(tet1),sin(tet1))
segments(pp[1],pp[2],qq[1],qq[2], col = "blue", lwd = 2)
tet2 <- tet1 + runif(1, min = 0, max = 2*pi)
ts <- seq(tet1, tet2, length = 200)
nc <- r*cbind( cos(ts), sin(ts) )
lines( nc[,1], nc[,2], col = "red", lwd = 2 )
tet1 <- tet2
pp <- nc[200,]
}
#-----------------------------------------------------
# El copo de nieve de Koch
# En este ejemplo se tiene anidadas varias funciones
KochSnowflakeExample <- function(){ # Funcion general
iterate <- function(T,i){ # Primera funcion anidada
A = T[ ,1]; B=T[ ,2]; C = T[,3];
if (i == 1){
d = (A + B)/2; h = (C-d); d = d-(1/3)*h;
e = (2/3)*B + (1/3)*A; f = (1/3)*B + (2/3)*A;
}
if (i == 2){
d = B; e = (2/3)*B + (1/3)*C; f = (2/3)*B + (1/3)*A;
}
if (i == 3){
d = (B + C)/2; h = (A-d); d = d-(1/3)*h;
e = (2/3)*C + (1/3)*B; f = (1/3)*C + (2/3)*B;
}
if (i == 4){
d = C; e = (2/3)*C + (1/3)*A; f = (2/3)*C + (1/3)*B;
}
if (i == 5){
d = (A + C)/2; h = (B-d); d = d-(1/3)*h;
e = (2/3)*A + (1/3)*C; f = (1/3)*A + (2/3)*C;
}
if (i == 6){
d = A; e = (2/3)*A + (1/3)*C; f = (2/3)*A + (1/3)*B;
}
if (i == 0){
d = A; e = B; f = C;
}
Tnew = cbind(d,e,f)
return(Tnew); # Devuelve un triángulo más pequeño.
}
# Segunda funcion anidada
draw <- function(T, col=rgb(0.5,0.2,0),border=rgb(0.5,0.2,0)){
polygon(T[1,], T[2,], col = col, border = border)
}
# Tercera funcion anidada
Iterate = function(T,v,col=rgb(0.5,0.2,0),border=rgb(0.5,0.2,0)){
for (i in v) T = iterate(T,i);
draw(T, col = col, border = border);
}
# Los vértices del triángulo inicial:
A = matrix(c(1,0),2,1);
B = matrix(c(cos(2*pi/3), sin(2*pi/3)),2,1);
C = matrix(c(cos(2*pi/3),-sin(2*pi/3)),2,1);
T0 = cbind(A,B,C);
plot(numeric(0), xlim = c(-1.1,1.1), ylim = c(-1.1,1.1), axes = FALSE,
frame = FALSE, ann = FALSE);
par(mar = c(0,0,0,0), bg = rgb(1,1,1));
par(usr = c(-1.1,1.1,-1.1,1.1));
# Dibujar copo de nieve:
for (i in 0:6) for (j in 0:6) for (k in 0:6) for (l in 0:6) Iterate(T0,c(i,j,k,l));
}
# Ejecucion de la funcion
KochSnowflakeExample()
#-----------------------------------------------------
# Final del script
|
2d4bdf5c44ab08ad9aef9a71d0f8ed5d88e3686b
|
7f0b89090cbbf264ec3084573795c8a28df19cd8
|
/run_analysis.R
|
00e7475386c83e0d212fecb6dbb10218d9a161d9
|
[] |
no_license
|
ssmandyam/GettingandCleaningData
|
14a49cd12cca6a2faa629b42bb8a978580f68520
|
6633b6e32e8006f06d79ad736e0854e22104e217
|
refs/heads/master
| 2021-01-01T05:26:23.673714
| 2016-04-12T20:50:25
| 2016-04-12T20:50:25
| 56,013,311
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,715
|
r
|
run_analysis.R
|
#run_analysis.R
#consolidates the training and test data sets into a single tidy data set
#Step 1: read the training and test sets
x_traindf <- read.table("./train/X_train.txt", stringsAsFactors=FALSE)
y_traindf <- read.table("./train/y_train.txt", stringsAsFactors=FALSE)
x_testdf <- read.table("./test/X_test.txt", stringsAsFactors=FALSE)
y_testdf <- read.table("./test/y_test.txt", stringsAsFactors=FALSE)
sub_traindf <- read.table("./train/subject_train.txt", stringsAsFactors=FALSE)
sub_testdf <- read.table("./test/subject_test.txt", stringsAsFactors=FALSE)
act_label <- read.table("./activity_labels.txt", stringsAsFactors=FALSE)
feat_df <- read.table("./features.txt", stringsAsFactors=FALSE)
# Step 2: bind test and train sets and assign column names
M <- rbind(x_traindf, x_testdf)
#fix duplicates and assign labels
for (i in which(duplicated(feat_df$V2))) {
feat_df$V2[i] = paste(feat_df$V2[i], "_dupe", as.character(i),sep="")
}
colnames(M) <- feat_df$V2
M$activity <- rbind(y_traindf, y_testdf)$V1
M$subject <- rbind(sub_traindf, sub_testdf)$V1
# assign variable values for activities
M$activity <- sapply(M$activity, function(x) act_label$V2[x])
#Step 3: Extracting the mean and standard deviation columns only
library(dplyr)
M <- tbl_df(M)
mean_std_vars <- c(grep("mean()",names(M), value=TRUE), grep("std()", names(M), value=TRUE))
MStd <- select(M, one_of("subject", "activity", mean_std_vars))
#Step 4: create a tidy data set with average of each variable for subject and activity
library(reshape2)
tmelt <- melt(MStd, id.vars=c("subject", "activity"), measure.vars=mean_std_vars)
tcast <- dcast(tmelt, subject+activity~variable, mean)
write.table(tcast, "tidy_data.txt", row.name=FALSE)
|
2e49e6b8420575fddb5a7d68d601ff2e1ee4cbf8
|
8a045230ab4809ea6d28cf4a8d1105e9977a4076
|
/code/ml1.r
|
3884c4c710a74d17ccb37e46405c04f4afa185a3
|
[
"BSD-4-Clause"
] |
permissive
|
jaredlander/odscwest2020
|
d69db3163305f8dd62d7739c969d3fcf53b1e9c0
|
06c3afe93939cfe6b437fb9b304fbfd57a3b55e0
|
refs/heads/main
| 2023-03-05T15:49:57.428459
| 2021-02-17T21:52:10
| 2021-02-17T21:52:10
| 306,472,524
| 5
| 10
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,311
|
r
|
ml1.r
|
# Packages ####
# resampling, splitting and validation
library(rsample)
# feature engineering or preprocessing
library(recipes)
# specifying models
library(parsnip)
# tuning
library(tune)
# tuning parameters
library(dials)
# performance measurement
library(yardstick)
# variable importance plots
library(vip)
# combing feature engineering and model specification
library(workflows)
# data manipulation
library(dplyr)
# plotting
library(ggplot2)
# library(tidymodels)
# parallelism
library(doFuture)
library(parallel)
# timing
library(tictoc)
# Data ####
data(credit_data, package='modeldata')
credit_data <- credit_data %>% as_tibble()
credit_data
# EDA ####
ggplot(credit_data, aes(x=Status)) + geom_bar()
ggplot(credit_data, aes(x=Status, y=Amount)) + geom_violin()
ggplot(credit_data, aes(x=Status, y=Age)) + geom_violin()
ggplot(credit_data, aes(x=Status, y=Income)) + geom_violin(draw_quantiles=0.5)
ggplot(credit_data, aes(x=Age, y=Income, color=Status)) + geom_point()
ggplot(credit_data, aes(x=Age, y=Income, color=Status)) +
geom_hex() +
facet_wrap(~Status) + theme(legend.position='none')
# Split Data ####
set.seed(871)
# from rsample
credit_split <- initial_split(credit_data, prop=0.8, strata='Status')
credit_split
credit_split %>% class()
train <- training(credit_split)
test <- testing(credit_split)
train
train %>% glimpse()
train %>% class()
library(skimr)
skim(train)
# Feature Engineering ####
# from recipes
# caret
# tidymodels
# food themes
# Max Kuhn
# Outcomes: response, y, label, target, dependent variable, output, known, result
# inputs: predictors, x, features, covariates, independent variable, data, attributes, descriptors
table(credit_data$Status)
# red, green, blue
colors <- c('blue', 'blue', 'red', 'green', 'blue', 'green', 'blue', 'blue', 'blue', 'blue',
'red', 'green', 'pink', 'yellow', 'pink', 'purple')
model.matrix(~ colors)
colors2 <- c('blue', 'blue', 'red', 'green', 'blue', 'green', 'blue', 'blue', 'blue', 'blue',
'red', 'green', 'Misc', 'Misc', 'Misc', 'Misc')
model.matrix(~colors2)
cat_train_1 <- c('rent', 'own', 'mortgage')
cat_test_1 <- c('rent', 'parents')
cat_train_2 <- c('rent', 'own', 'mortgage')
cat_test_2 <- c('rent', 'own')
rec1 <- recipe(Status ~ ., data=train) %>%
# xgboost can handle this, so we'll remove it later
step_downsample(Status, under_ratio=1.2) %>%
# not really needed for xgboost
step_normalize(Age, Price) %>%
# collapse infrequent columns in categorical variables
# step_other(Home, Marital, Records, Job, other='Misc')
# this line is a shortcut for the line above
step_other(all_nominal(), -Status, other='Misc') %>%
# remove columns with very little variability, nero-zero variance
step_nzv(all_predictors()) %>%
# xgboost doesn't need imputation, so we will remove later
step_modeimpute(all_nominal(), -Status) %>%
step_knnimpute(all_numeric()) %>%
step_dummy(all_nominal(), -Status, one_hot=TRUE)
rec1
# Model Specification ####
# from parsnip
xg_spec1 <- boost_tree(mode='classification') %>%
set_engine('xgboost')
xg_spec1
boost_tree(mode='classification') %>% set_engine('C5.0')
boost_tree(mode='classification') %>% set_engine('spark')
# BART: dbart
# catboost
# LightGBM
xg_spec1 <- boost_tree(mode='classification', trees=100) %>%
set_engine('xgboost')
xg_spec1
# gives us a uniform naming convention for all of the parameters
linear_reg() %>% set_engine('lm')
linear_reg(penalty=0.826) %>% set_engine('glmnet')
linear_reg() %>% set_engine('keras')
linear_reg() %>% set_engine('stan')
linear_reg() %>% set_engine('spark')
rand_forest() %>% set_engine('randomForest')
rand_forest() %>% set_engine('ranger')
# Build Workflow ####
rec1 %>% prep()
rec_prep <- rec1 %>% prep()
bake(rec_prep, new_data=NULL)
bake(rec_prep, new_data=test)
prepped <- rec1 %>% prep() %>% bake(new_data=NULL)
prepped
fit0 <- fit(xg_spec1, Status ~ ., data=prepped)
fit0
# from workflows
# combine featuring engineering and model specification into one step
flow1 <- workflow() %>%
add_recipe(rec1) %>%
add_model(xg_spec1)
flow1
# Fit Our Model
fit1 <- fit(flow1, data=train)
# fit1 <- fit(flow1, data=train2)
fit1
fit1 %>% class()
fit1 %>% extract_model() %>% class()
fit1 %>% extract_model() %>% vip()
fit1 %>% extract_model() %>% xgboost::xgb.plot.multi.trees()
# readr::write_rds(fit1, 'fit1.rds')
# xgboost::xgb.save(fit1 %>% extract_model(), fname='xg1.model')
# How did we Do? ####
# accuracy, logloss, AUC
# from yardstick
loss_fn <- metric_set(accuracy, mn_log_loss, roc_auc)
loss_fn
# train and validation sets
# cross-validation
# from rsample
val_split <- validation_split(data=train, prop=0.8, strata='Status')
val_split
val_split$splits[[1]]
credit_split
credit_split %>% class()
val_split$splits[[1]] %>% class()
val_split %>% class()
# from tune
val1 <- fit_resamples(object=flow1, resamples=val_split, metrics=loss_fn)
val1
val1 %>% collect_metrics()
val1$.metrics
library(animation)
cv.ani(k=10)
# from rsample
cv_split <- vfold_cv(data=train, v=10, strata='Status')
cv_split
cv_split %>% class()
val_split %>% class()
cv_split$splits[[1]]
vfold_cv(data=train, v=10, strata='Status', repeats=3)
cv_split <- vfold_cv(data=train, v=5, strata='Status', repeats=2)
cv_split
val1 <- fit_resamples(object=flow1, resamples=val_split, metrics=loss_fn)
cv1 <- fit_resamples(object=flow1, resamples=cv_split, metrics=loss_fn)
cv1
cv1$.metrics[[1]]
cv1$.metrics[[2]]
cv1$.metrics[[3]]
cv1 %>% collect_metrics()
# More Parameters ####
xg_spec2 <- boost_tree(mode='classification', trees=300) %>%
set_engine('xgboost')
xg_spec2
# workflow() %>%
# add_model(xg_spec2) %>%
# add_recipe(rec1)
flow2 <- flow1 %>%
update_model(xg_spec2)
flow2
val2 <- fit_resamples(flow2, resamples=val_split, metrics=loss_fn)
val2
val2 %>% collect_metrics()
xg_spec3 <- boost_tree('classification', trees=300, learn_rate=0.2) %>%
set_engine('xgboost')
xg_spec3
flow3 <- flow2 %>%
update_model(xg_spec3)
val3 <- fit_resamples(flow3, resamples=val_split, metrics=loss_fn)
val3 %>% collect_metrics()
xg_spec4 <- boost_tree('classification', trees=300, learn_rate=0.2, sample_size=0.5) %>%
set_engine('xgboost')
xg_spec4
flow4 <- flow3 %>%
update_model(xg_spec4)
val4 <- fit_resamples(flow4, resamples=val_split, metrics=loss_fn)
val4 %>% collect_metrics()
# Missing Data ####
rec2 <- recipe(Status ~ ., data=train) %>%
step_nzv(all_predictors()) %>%
step_other(all_nominal(), -Status, other='Misc') %>%
themis::step_downsample(Status, under_ratio=1.2) %>%
step_dummy(all_nominal(), -Status, one_hot=TRUE)
rec2
flow5 <- flow4 %>%
update_recipe(rec2)
flow5
val5 <- fit_resamples(flow5, resamples=val_split, metrics=loss_fn)
val5 %>% collect_metrics()
val4 %>% collect_metrics()
val5
val5$.notes
# Imbalanced Data
rec3 <- recipe(Status ~ ., data=train) %>%
step_nzv(all_predictors()) %>%
step_other(all_nominal(), -Status, other='Misc') %>%
step_dummy(all_nominal(), -Status, one_hot=TRUE)
rec3
flow6 <- flow5 %>%
update_recipe(rec3)
val6 <- fit_resamples(flow6, resamples=val_split, metrics=loss_fn)
val5 %>% collect_metrics()
val6 %>% collect_metrics()
table(train$Status)
1004/2561
scaler <- train %>% count(Status) %>% pull(n) %>% purrr::reduce(`/`)
xg_spec5 <- boost_tree('classification', trees=300, learn_rate=0.2, sample_size=0.5) %>%
set_engine('xgboost', scale_pos_weight=!!(1/scaler))
xg_spec5
flow7 <- flow6 %>%
update_model(xg_spec5)
flow7
val7 <- fit_resamples(flow7, resamples=val_split, metrics=loss_fn)
val7 %>% collect_metrics()
val6 %>% collect_metrics()
val5 %>% collect_metrics()
# Tune Parameters ####
# from tune
xg_spec6 <- boost_tree('classification', learn_rate=0.2, sample_size=0.5, trees=tune()) %>%
set_engine('xgboost', scale_pos_weight=!!(1/scaler))
xg_spec6
flow8 <- flow7 %>%
update_model(xg_spec6)
flow8
# does not work
# fit8 <- fit(flow8, data=train)
# does not work
val8 <- fit_resamples(flow8, resamples=val_split, metrics=loss_fn)
val8$.notes
# benefits and draw backs of validate vs cross-validate
# from doFuture and parallel
registerDoFuture()
cl <- makeCluster(6)
plan(cluster, workers=cl)
options(tidymodels.dark=TRUE)
# from tictoc
tic()
# tune
tune8_val <- tune_grid(
flow8,
resamples=val_split,
grid=20,
metrics=loss_fn,
control=control_grid(verbose=TRUE, allow_par=TRUE)
)
toc()
tune8_val
tune8_val$.notes
tune8_val$.metrics
tune8_val %>% collect_metrics()
tune8_val %>% show_best(metric='roc_auc')
tic()
tune8_cv <- tune_grid(
flow8,
resamples=cv_split,
grid=20,
metrics=loss_fn,
control=control_grid(verbose=TRUE, allow_par=TRUE)
)
toc()
tune8_cv
tune8_cv$.metrics[[1]]$trees %>% unique
tune8_cv %>% collect_metrics()
tune8_cv %>% autoplot()
tune8_cv %>% show_best(metric='roc_auc')
# Other Tuning Parameters ####
xg_spec7 <- boost_tree(
'classification',
trees=tune(), learn_rate=0.2, sample_size=tune(), tree_depth=tune()
) %>%
set_engine('xgboost', scale_pos_weight=!!(1/scaler))
xg_spec7
flow9 <- flow8 %>%
update_model(xg_spec7)
flow9
flow9 %>% parameters()
flow9 %>% parameters() %>% class()
flow9 %>% parameters() %>% pull(object)
# from dials
trees()
trees(range=c(10, 300))
tree_depth()
tree_depth(range=c(2, 8))
sample_size()
# sample_size(range=c(0.3, 0.8))
sample_prop()
sample_prop(c(0.3, 0.8))
params9 <- flow9 %>%
parameters() %>%
update(
trees=trees(range=c(10, 300)),
tree_depth=tree_depth(range=c(2, 8)),
sample_size=sample_prop(range=c(0.3, 0.8))
)
params9
params9 %>% pull(object)
tic()
val9 <- tune_grid(
flow9,
resamples=val_split,
grid=40,
metrics=loss_fn,
control=control_grid(verbose=TRUE, allow_par=TRUE),
param_info=params9
)
toc()
val9
val9 %>% show_best(metric='roc_auc')
val9 %>% autoplot(metric='roc_auc')
grid10 <- grid_max_entropy(params9, size=40)
grid10
tic()
val10 <- tune_grid(
flow9,
resamples=val_split, # or cv_split
grid=grid10,
metrics=loss_fn,
control=control_grid(verbose=TRUE, allow_par=FALSE)
)
toc()
val10 %>% collect_metrics()
val10 %>% show_best(metric='roc_auc', n=10)
val10 %>% select_best(metric='roc_auc')
boost_tree('classification', trees=127, tree_depth=2, sample_size=0.509)
# Finalize Model ####
mod10 <- flow9 %>%
finalize_workflow(val10 %>% select_best(metric='roc_auc'))
flow9
mod10
val10.1 <- fit_resamples(mod10, resamples=val_split, metrics=loss_fn)
val10.1 %>% collect_metrics()
val10 %>% show_best()
val10.1 %>% collect_metrics()
test
# Last Fit ####
results10 <- last_fit(mod10, split=credit_split, metrics=loss_fn)
results10 %>% collect_metrics()
# Make Predictions ####
# fit the model on ALL the data
# predict on some new data (pretend 'test' is new)
fit10 <- fit(mod10, data=credit_data)
fit10 %>% extract_model() %>% vip()
# pretend 'test' is new
preds10 <- predict(fit10, new_data=test)
preds10
# fit is for fitting one model with set parameters
# fit_resamples is for fitting multiple models for validation with set parameters
# tune_grid is for tuning over tuning parameters
preds10_prob <- predict(fit10, new_data=test, type='prob')
preds10_prob
|
2f2d60d5b4abf5845bc39374d5c7b5db5292fb20
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkCTreeFind.Rd
|
35cfb6ab0d1a5e325ddcba67a6e856aa67bdd0eb
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 626
|
rd
|
gtkCTreeFind.Rd
|
\alias{gtkCTreeFind}
\name{gtkCTreeFind}
\title{gtkCTreeFind}
\description{
\strong{WARNING: \code{gtk_ctree_find} is deprecated and should not be used in newly-written code.}
}
\usage{gtkCTreeFind(object, node, child)}
\arguments{
\item{\verb{object}}{The node to start searching from. May be \code{NULL}.}
\item{\verb{node}}{True if \code{child} is on some level a child (grandchild...) of the \code{node}.}
\item{\verb{child}}{\emph{undocumented
}}
}
\value{[logical] True if \code{child} is on some level a child (grandchild...) of the \code{node}.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
7a57e21ace0a63c17d9f45d6c01381993ad9146b
|
694d8ed931a130b5b03900c250444e3eae494293
|
/metropolis_prod_geom.R
|
2aae1c43755d4f61b29beb73920a98281d56f0ae
|
[] |
no_license
|
angieshen6/anisotropy
|
de0e8d3fdb4fbf1885aefbca587cfb68992188da
|
b7ced5d90ab0ac72b52d79f683cbc112e87a0d12
|
refs/heads/master
| 2020-05-22T10:05:09.440237
| 2019-05-23T21:16:02
| 2019-05-23T21:16:02
| 186,303,618
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,814
|
r
|
metropolis_prod_geom.R
|
##############################################
#This script compares isotropy with geometric
# anisotropy with matern covariance and product
# covariance
###############################################
# product anisotropy
library(MASS)
library(mvtnorm)
library(invgamma)
library(truncnorm)
library(spBayes)
library(geoR)
library(fields)
# ARGS <- commandArgs(trailingOnly = TRUE)
# numSites <- as.numeric(ARGS[1])
# numTotal <- as.numeric(ARGS[2])
# sigmasq <- as.numeric(ARGS[3])
# tausq <- as.numeric(ARGS[4])
numSites <- 500
numTotal <- 600
sigmasq <- 1
tausq <- 0.2
set.seed(1127)
sites <- cbind(runif(numTotal, 0, 1), runif(numTotal, 0, 1))
phiX <- 3 / 0.3 # 10
phiY <- 3 / 0.6 # 5
mu <- 0
alphaX <- 0.5
alphaY <- 1.5
distX <- as.matrix(dist(sites[,1]))
distY <- as.matrix(dist(sites[,2]))
Sigma <- sigmasq * exp(-phiX * distX^alphaX - phiY * distY^alphaY)
Sigma <- Sigma + diag(tausq, nrow=numTotal, ncol=numTotal)
y <- mvrnorm(1, rep(0, numTotal), Sigma)
ind_test <- sample(1:numTotal, size=numTotal-numSites)
ind_train <- setdiff(1:numTotal, ind_test)
sites_test <- sites[ind_test,]
sites_train <- sites[ind_train,]
y_test <- y[ind_test]
y_train <- y[ind_train]
dTrainX <- as.matrix(dist(sites_train[,1]))
dTrainY <- as.matrix(dist(sites_train[,2]))
#initialize
numSim <- 30000
mcmc.sigma <- rep(sigmasq, numSim)
mcmc.tau <- rep(tausq, numSim)
mcmc.mu <- rep(mu, numSim)
mcmc.phiX <- rep(phiX, numSim)
mcmc.phiY <- rep(phiY, numSim)
mcmc.ax <- rep(alphaX, numSim)
mcmc.ay <- rep(alphaY, numSim)
# Metropolis
set.seed(1127)
for(i in 2:numSim){
print(i)
# update alphaX
var <- 2
#ax_star <- rlnorm(1, log(mcmc.ax[i-1]), sqrt(var))
ax_star <- rtruncnorm(1, a=0, b=Inf, mean = mcmc.ax[i-1], sd = sqrt(var))
# covariances
Sigma_star <- mcmc.sigma[i-1] * exp(-mcmc.phiX[i-1] * dTrainX^ax_star -
mcmc.phiY[i-1] * dTrainY^mcmc.ay[i-1])
Sigma_prev <- mcmc.sigma[i-1] * exp(-mcmc.phiX[i-1] * dTrainX^mcmc.ax[i-1] -
mcmc.phiY[i-1] * dTrainY^mcmc.ay[i-1])
diag(Sigma_star) <- mcmc.sigma[i-1] + mcmc.tau[i-1]
diag(Sigma_prev) <- mcmc.sigma[i-1] + mcmc.tau[i-1]
# calulate ratio
mvn_star <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_star, log=TRUE)
mvn_prev <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_prev, log=TRUE)
prior_star <- dunif(ax_star, 0, 2)
prior_prev <- dunif(mcmc.ax[i-1], 0, 2)
ratio <- exp(mvn_star-mvn_prev) * prior_star / prior_prev
u <- runif(1)
if(log(u) < log(ratio)){
mcmc.ax[i] <- ax_star
} else {
mcmc.ax[i] <- mcmc.ax[i-1]
}
# update alphaY
var <- 2
#ay_star <- rlnorm(1, log(mcmc.ay[i-1]), sqrt(var))
ay_star <- rtruncnorm(1, a=0, b=Inf, mean = mcmc.ay[i-1], sd = sqrt(var))
# covariances
Sigma_star <- mcmc.sigma[i-1] * exp(-mcmc.phiX[i-1] * dTrainX^mcmc.ax[i] -
mcmc.phiY[i-1] * dTrainY^ay_star)
Sigma_prev <- mcmc.sigma[i-1] * exp(-mcmc.phiX[i-1] * dTrainX^mcmc.ax[i] -
mcmc.phiY[i-1] * dTrainY^mcmc.ay[i-1])
diag(Sigma_star) <- mcmc.sigma[i-1] + mcmc.tau[i-1]
diag(Sigma_prev) <- mcmc.sigma[i-1] + mcmc.tau[i-1]
# calulate ratio
mvn_star <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_star, log=TRUE)
mvn_prev <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_prev, log=TRUE)
prior_star <- dunif(ay_star, 0, 2)
prior_prev <- dunif(mcmc.ay[i-1], 0, 2)
ratio <- exp(mvn_star-mvn_prev) * prior_star / prior_prev
u <- runif(1)
if(log(u) < log(ratio)){
mcmc.ay[i] <- ay_star
} else {
mcmc.ay[i] <- mcmc.ay[i-1]
}
# update phiX
var <- 2
phiX_star <- rlnorm(1, log(mcmc.phiX[i-1]), sqrt(var))
# covariances
Sigma_star <- mcmc.sigma[i-1] * exp(-phiX_star * dTrainX^mcmc.ax[i] -
mcmc.phiY[i-1] * dTrainY^mcmc.ay[i])
Sigma_prev <- mcmc.sigma[i-1] * exp(-mcmc.phiX[i-1] * dTrainX^mcmc.ax[i] -
mcmc.phiY[i-1] * dTrainY^mcmc.ay[i])
diag(Sigma_star) <- mcmc.sigma[i-1] + mcmc.tau[i-1]
diag(Sigma_prev) <- mcmc.sigma[i-1] + mcmc.tau[i-1]
# calulate ratio
mvn_star <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_star, log=TRUE)
mvn_prev <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_prev, log=TRUE)
prior_star <- dunif(phiX_star, 3/0.7, 3/0.2)
prior_prev <- dunif(mcmc.phiX[i-1], 3/0.7, 3/0.2)
ratio <- exp(mvn_star-mvn_prev) * prior_star / prior_prev
u <- runif(1)
if(log(u) < log(ratio)){
mcmc.phiX[i] <- phiX_star
} else {
mcmc.phiX[i] <- mcmc.phiX[i-1]
}
# update phiY
var <- 2
phiY_star <- rlnorm(1, log(mcmc.phiY[i-1]), sqrt(var))
# covariances
Sigma_star <- mcmc.sigma[i-1] * exp(-mcmc.phiX[i] * dTrainX^mcmc.ax[i] -
phiY_star * dTrainY^mcmc.ay[i])
Sigma_prev <- mcmc.sigma[i-1] * exp(-mcmc.phiX[i] * dTrainX^mcmc.ax[i] -
mcmc.phiY[i-1] * dTrainY^mcmc.ay[i])
diag(Sigma_star) <- mcmc.sigma[i-1] + mcmc.tau[i-1]
diag(Sigma_prev) <- mcmc.sigma[i-1] + mcmc.tau[i-1]
# calulate ratio
mvn_star <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_star, log=TRUE)
mvn_prev <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_prev, log=TRUE)
prior_star <- dunif(phiY_star, 2, 3/0.2)
prior_prev <- dunif(mcmc.phiY[i-1], 2, 3/0.2)
ratio <- exp(mvn_star-mvn_prev) * prior_star / prior_prev
u <- runif(1)
if(log(u) < log(ratio)){
mcmc.phiY[i] <- phiY_star
} else {
mcmc.phiY[i] <- mcmc.phiY[i-1]
}
# update sigma sqaured
var <- 3
sigma_star <- rlnorm(1, log(mcmc.sigma[i-1]), sqrt(var))
# covariances
Sigma_star <- sigma_star * exp(-mcmc.phiX[i] * dTrainX^mcmc.ax[i]
- mcmc.phiY[i] * dTrainY^mcmc.ay[i])
Sigma_prev <- mcmc.sigma[i-1] * exp(-mcmc.phiX[i] * dTrainX^mcmc.ax[i] -
mcmc.phiY[i] * dTrainY^mcmc.ay[i])
diag(Sigma_star) <- sigma_star + mcmc.tau[i-1]
diag(Sigma_prev) <- mcmc.sigma[i-1] + mcmc.tau[i-1]
mvn_star <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_star, log=TRUE)
mvn_prev <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_prev, log=TRUE)
prior_star <- dinvgamma(sigma_star, shape=1, scale=1)
prior_prev <- dinvgamma(mcmc.sigma[i-1], shape=1, scale=1)
ratio <- exp(mvn_star-mvn_prev) * prior_star / prior_prev
u <- runif(1)
if(log(u) < log(ratio)){
mcmc.sigma[i] <- sigma_star
} else {
mcmc.sigma[i] <- mcmc.sigma[i-1]
}
# update tau sqaured
var <- 2
tau_star <- rlnorm(1, log(mcmc.tau[i-1]), sqrt(var))
# covariance sampled
Sigma <- mcmc.sigma[i] * exp(-mcmc.phiX[i] * dTrainX^mcmc.ax[i] -
mcmc.phiY[i] * dTrainY^mcmc.ay[i])
Sigma_prev <- Sigma
Sigma_star <- Sigma
diag(Sigma_star) <- mcmc.sigma[i] + tau_star
diag(Sigma_prev) <- mcmc.sigma[i] + mcmc.tau[i-1]
# calulate ratio
mvn_star <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_star, log=TRUE)
mvn_prev <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_prev, log=TRUE)
prior_star <- dinvgamma(tau_star, shape=1, scale=1)
prior_prev <- dinvgamma(mcmc.tau[i-1], shape=1, scale=1)
ratio <- exp(mvn_star-mvn_prev) * prior_star / prior_prev
u <- runif(1)
if(log(u) < log(ratio)){
mcmc.tau[i] <- tau_star
} else {
mcmc.tau[i] <- mcmc.tau[i-1]
}
# update mu
var <- 3
mu_star <- rnorm(1, mcmc.mu[i-1], sqrt(var))
# covariance sampled
Sigma <- mcmc.sigma[i] * exp(-mcmc.phiX[i] * dTrainX^mcmc.ax[i] -
mcmc.phiY[i] * dTrainY^mcmc.ay[i])
diag(Sigma) <- mcmc.sigma[i] + mcmc.tau[i]
# calulate ratio
mvn_star <- dmvnorm(y_train, rep(mu_star, numSites), Sigma, log=TRUE)
mvn_prev <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma, log=TRUE)
prior_star <- dnorm(mu_star, 0, 100)
prior_prev <- dnorm(mcmc.mu[i-1], 0, 100)
ratio <- exp(mvn_star-mvn_prev) * prior_star / prior_prev
u <- runif(1)
if(log(u) < log(ratio)){
mcmc.mu[i] <- mu_star
} else {
mcmc.mu[i] <- mcmc.mu[i-1]
}
}
save(mcmc.ax, mcmc.ay, mcmc.sigma, mcmc.tau, mcmc.phiX, mcmc.phiY, mcmc.mu, file="mcmc_prod_comp.Rdata")
numBurn <- 20000
numKeep <- seq(numBurn+20, numSim, by=20)
sig_samp <- mcmc.sigma[numKeep]
tau_samp <- mcmc.tau[numKeep]
phiX_samp <- mcmc.phiX[numKeep]
phiY_samp <- mcmc.phiY[numKeep]
alphaX_samp <- mcmc.ax[numKeep]
alphaY_samp <- mcmc.ay[numKeep]
mu_samp <- mcmc.mu[numKeep]
# par(mfrow=c(3,2))
# plot(density(alphaX_samp), xlab = "alpha_X")
# plot(density(alphaY_samp), xlab = "alpha_Y")
# plot(density(phiX_samp), xlab = "phi_X")
# plot(density(phiY_samp), xlab = "phi_Y")
# plot(density(sig_samp), xlab = "sigma")
# plot(density(tau_samp), xlab = "tau")
# Krigging: marginalize out w
makePredictionAniso <- function(numSample, newSites){
sigmasq <- sig_samp[numSample]
tausq <- tau_samp[numSample]
mu <- mu_samp[numSample]
phiX <- phiX_samp[numSample]
phiY <- phiY_samp[numSample]
alphaX <- alphaX_samp[numSample]
alphaY <- alphaY_samp[numSample]
SigmaObs <- sigmasq * exp(-phiX * dTrainX^alphaX - phiY * dTrainY^alphaY)
diag(SigmaObs) <- sigmasq + tausq
S <- solve(SigmaObs)
predictions <- NULL
for(j in 1:nrow(newSites)){
newSite <- newSites[j,]
distPOX <- rdist(newSite[1], sites_train[,1])
distPOY <- rdist(newSite[2], sites_train[,2])
SigmaPO <- sigmasq * exp(-phiX * distPOX^alphaX - phiY * distPOY^alphaY)
mean_yPred <- mu + SigmaPO %*% S %*% (y_train - rep(mu, numSites))
var_yPred <- sigmasq + tausq - SigmaPO %*% S %*% t(SigmaPO)
yPred <- rnorm(1, mean_yPred, sqrt(var_yPred))
predictions <- c(predictions, yPred)
}
return(predictions)
}
nMCMC <- length(sig_samp)
predAniso <- t(sapply(1:nMCMC, makePredictionAniso, newSites=sites_test))
# Isotropy
fitIso <- spLM(y_train ~ 1, coords = sites_train,
cov.model = "matern",
n.samples = numSim,
starting = list("sigma.sq" = sigmasq,
"tau.sq" = tausq,
"phi" = 6,
"nu" = 1.5),
tuning = list("phi" = 0.1,
"sigma.sq" = 0.1,
"tau.sq" = 0.1,
"nu" = 0.1),
priors = list("sigma.sq.IG" = c(1, 1),
"tau.sq.IG" = c(1, 1),
"phi.Unif" = c(3/0.7, 3/0.2),
"nu.Unif"=c(0.1,2)
), verbose = FALSE)
preds <- spPredict(fitIso, pred.covars=as.matrix(rep(1,nrow(sites_test))), pred.coords=sites_test,
start=numBurn+20, thin=20)
predIso <- t(preds$p.y.predictive.samples)
#plot(1:40, y_test, pch=19, cex=0.5, xlab="observed y", ylab="predicted y",ylim=c(min(y.hat), max(y.hat)))
#arrows(1:40, y.hat[2,], 1:40, y.hat[1,], angle=90, length=0.05)
#arrows(1:40, y.hat[2,], 1:40, y.hat[3,], angle=90, length=0.05)
# 1. empirical coverage
EC <- function(mat, yObs){
qt <- apply(mat, 2, function(x) quantile(x, probs = c(0.05, 0.95)))
ave <- apply(mat, 2, mean)
empCov <- data.frame(cbind(ave, t(qt), yObs))
colnames(empCov) <- c("Mean", "Lower", "Higher", "True")
empCov$capture <- empCov$Lower <= empCov$True & empCov$Higher >= empCov$True
return(empCov)
}
empCovIso <- EC(predIso, y_test)
empCovAniso <- EC(predAniso, y_test)
numPred <- length(y_test)
ecIso <- sum(empCovIso$capture)/numPred
ecAniso <- sum(empCovAniso$capture)/numPred
# pdf("empCovIso.pdf")
# numPred <- length(y_test)
# ggplot(empCovAniso, aes(y=True, x=1:numPred, color=capture)) +
# xlab("index") +
# geom_errorbar(aes(ymax=Higher, ymin=Lower), width=0, color='black', alpha=0.3, size=2) +
# geom_point(size=3) +
# labs(x = "Index of New Sites", y="Predcited/Observed Value") +
# ggtitle(paste("Empirical Coverage of Isotropic Model =",sum(empCovAniso$capture)/numPred))
# dev.off()
# 2. PMSE
mseIso <- mean((empCovIso$Mean-empCovIso$True)^2)
mseAniso <- mean((empCovAniso$Mean-empCovAniso$True)^2)
# 3. CRPS
crps_test = function(post,obs,n_pts = 1e6)
{
F_post = ecdf(post)
F_obs = ecdf(obs)
d = c(obs,post)
s = seq(min(d),max(d),len=n_pts)
sum( (F_post(s) - F_obs(s))^2 ) * (max(d)-min(d)) / n_pts
}
crpsIso <- mean(sapply(1:ncol(predIso), function(x) crps_test(predIso[,x],y_test[x])))
crpsAniso <- mean(sapply(1:ncol(predAniso), function(x) crps_test(predAniso[,x],y_test[x])))
name <- paste(numSites, numTotal, sigmasq, tausq, "prod_comp.Rdata", sep = "_")
save(ecIso, mseIso, crpsIso, ecAniso, mseAniso, crpsAniso, file=name)
####### geometric anisotropy with matern covariance
## distance matrix for x and y coordinate
Dx <- matrix(0, nrow=numSites, ncol=numSites)
Dy <- matrix(0, nrow=numSites, ncol=numSites)
for (m in 1:(numSites-1)) {
for (n in (m+1):numSites) {
h <- sites_train[m,] - sites_train[n,]
Dx[m,n] <- h[1]
Dx[n,m] <- h[1]
Dy[m,n] <- h[2]
Dy[n,m] <- h[2]
}
}
makeB <- function(B){
b1 <- B[1,1]
b2 <- B[2,1]
b3 <- B[1,2]
b4 <- B[2,2]
pwr <- Dx^2 * b1 + Dx * Dy * (b2 + b3) + Dy^2 * b4
return(pwr)
}
# initialize
numSim <- 30000
mcmc.sigma <- rep(sigmasq, numSim)
mcmc.a <- rep(1, numSim)
mcmc.r <- rep(8, numSim)
mcmc.tau <- rep(tausq, numSim)
mcmc.mu <- rep(mu, numSim)
mcmc.phi <- rep(6, numSim)
mcmc.kappa <- rep(1.5, numSim)
# Metropolis
set.seed(1127)
for(i in 2:numSim){
# update kappa
print(i)
var <- 2
kappa_star <- rlnorm(1, log(mcmc.kappa[i-1]), sqrt(var))
amin <- 1
amax <- amin*mcmc.r[i-1]
rotationMat <- matrix(c(cos(mcmc.a[i-1]),-sin(mcmc.a[i-1]),sin(mcmc.a[i-1]),cos(mcmc.a[i-1])),nrow=2,ncol=2)
aMat <- matrix(c(1/amax,0,0,1/amin),nrow=2,ncol=2)
A <- rotationMat %*% aMat
B <- A %*% t(A)
# covariances
Sigma_star <- cov.spatial(sqrt(makeB(B)), cov.model= "matern", cov.pars=c(mcmc.sigma[i-1], 1/mcmc.phi[i-1]), kappa = kappa_star)
Sigma_prev <- cov.spatial(sqrt(makeB(B)), cov.model= "matern", cov.pars=c(mcmc.sigma[i-1], 1/mcmc.phi[i-1]), kappa = mcmc.kappa[i-1])
diag(Sigma_star) <- mcmc.sigma[i-1] + mcmc.tau[i-1]
diag(Sigma_prev) <- mcmc.sigma[i-1] + mcmc.tau[i-1]
# calulate ratio
mvn_star <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_star, log=TRUE)
mvn_prev <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_prev, log=TRUE)
prior_star <- dunif(kappa_star, 0.1, 2)
prior_prev <- dunif(mcmc.kappa[i-1], 0.1, 2)
ratio <- exp(mvn_star-mvn_prev) * prior_star / prior_prev
u <- runif(1)
if(log(u) < log(ratio)){
mcmc.kappa[i] <- kappa_star
} else {
mcmc.kappa[i] <- mcmc.kappa[i-1]
}
# update phi
var <- 2
phi_star <- rlnorm(1, log(mcmc.phi[i-1]), sqrt(var))
amin <- 1
amax <- amin*mcmc.r[i-1]
rotationMat <- matrix(c(cos(mcmc.a[i-1]),-sin(mcmc.a[i-1]),sin(mcmc.a[i-1]),cos(mcmc.a[i-1])),nrow=2,ncol=2)
aMat <- matrix(c(1/amax,0,0,1/amin),nrow=2,ncol=2)
A <- rotationMat %*% aMat
B <- A %*% t(A)
# covariances=
Sigma_star <- cov.spatial(sqrt(makeB(B)), cov.model= "matern", cov.pars=c(mcmc.sigma[i-1], 1/phi_star), kappa = mcmc.kappa[i])
Sigma_prev <- cov.spatial(sqrt(makeB(B)), cov.model= "matern", cov.pars=c(mcmc.sigma[i-1], 1/mcmc.phi[i-1]), kappa = mcmc.kappa[i])
diag(Sigma_star) <- mcmc.sigma[i-1] + mcmc.tau[i-1]
diag(Sigma_prev) <- mcmc.sigma[i-1] + mcmc.tau[i-1]
# calulate ratio
mvn_star <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_star, log=TRUE)
mvn_prev <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_prev, log=TRUE)
prior_star <- dunif(phi_star, 3/0.7, 3/0.2)
prior_prev <- dunif(mcmc.phi[i-1], 3/0.7, 3/0.2)
ratio <- exp(mvn_star-mvn_prev) * prior_star / prior_prev
u <- runif(1)
if(log(u) < log(ratio)){
mcmc.phi[i] <- phi_star
} else {
mcmc.phi[i] <- mcmc.phi[i-1]
}
# update sigma squared
var <- 3
sigma_star <- rlnorm(1, log(mcmc.sigma[i-1]), sqrt(var))
amin <- 1
amax <- amin*mcmc.r[i-1]
rotationMat <- matrix(c(cos(mcmc.a[i-1]),-sin(mcmc.a[i-1]),sin(mcmc.a[i-1]),cos(mcmc.a[i-1])),nrow=2,ncol=2)
aMat <- matrix(c(1/amax,0,0,1/amin),nrow=2,ncol=2)
A <- rotationMat %*% aMat
B <- A %*% t(A)
# covariances
Sigma_star <- cov.spatial(sqrt(makeB(B)), cov.model= "matern", cov.pars=c(sigma_star, 1/mcmc.phi[i]), kappa = mcmc.kappa[i])
Sigma_prev <- cov.spatial(sqrt(makeB(B)), cov.model= "matern", cov.pars=c(mcmc.sigma[i-1], 1/mcmc.phi[i]), kappa = mcmc.kappa[i])
diag(Sigma_star) <- sigma_star + mcmc.tau[i-1]
diag(Sigma_prev) <- mcmc.sigma[i-1] + mcmc.tau[i-1]
mvn_star <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_star, log=TRUE)
mvn_prev <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_prev, log=TRUE)
prior_star <- dinvgamma(sigma_star, shape=1, scale=1)
prior_prev <- dinvgamma(mcmc.sigma[i-1], shape=1, scale=1)
ratio <- exp(mvn_star-mvn_prev) * prior_star / prior_prev
u <- runif(1)
if(log(u) < log(ratio)){
mcmc.sigma[i] <- sigma_star
} else {
mcmc.sigma[i] <- mcmc.sigma[i-1]
}
# update tau sqaured
var <- 2
tau_star <- rlnorm(1, log(mcmc.tau[i-1]), sqrt(var))
amin <- 1
amax <- amin*mcmc.r[i-1]
rotationMat <- matrix(c(cos(mcmc.a[i-1]),-sin(mcmc.a[i-1]),sin(mcmc.a[i-1]),cos(mcmc.a[i-1])),nrow=2,ncol=2)
aMat <- matrix(c(1/amax,0,0,1/amin),nrow=2,ncol=2)
A <- rotationMat %*% aMat
B <- A %*% t(A)
# covariance sampled
Sigma <- cov.spatial(sqrt(makeB(B)), cov.model= "matern", cov.pars=c(mcmc.sigma[i], 1/mcmc.phi[i]), kappa = mcmc.kappa[i])
Sigma_prev <- Sigma
Sigma_star <- Sigma
diag(Sigma_star) <- mcmc.sigma[i] + tau_star
diag(Sigma_prev) <- mcmc.sigma[i] + mcmc.tau[i-1]
# calulate ratio
mvn_star <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_star, log=TRUE)
mvn_prev <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_prev, log=TRUE)
prior_star <- dinvgamma(tau_star, shape=1, scale=1)
prior_prev <- dinvgamma(mcmc.tau[i-1], shape=1, scale=1)
ratio <- exp(mvn_star-mvn_prev) * prior_star / prior_prev
u <- runif(1)
if(log(u) < log(ratio)){
mcmc.tau[i] <- tau_star
} else {
mcmc.tau[i] <- mcmc.tau[i-1]
}
# update angle and ratio
var1 <- 10
tt <- rnorm(1, mcmc.a[i-1], sqrt(var1))
a_star <- tt%%(pi)
var2 <- 8
r_star <- rtruncnorm(1, a=0, b=Inf, mean = mcmc.r[i-1], sd = sqrt(var2))
amin <- 1
# angle ratio sampled
amax <- amin*r_star
rotationMat <- matrix(c(cos(a_star),-sin(a_star),sin(a_star),cos(a_star)),nrow=2,ncol=2)
aMat <- matrix(c(1/amax,0,0,1/amin),nrow=2,ncol=2)
A <- rotationMat %*% aMat
B_star <- A %*% t(A)
# angle previous
amax <- amin*mcmc.r[i-1]
rotationMat <- matrix(c(cos(mcmc.a[i-1]),-sin(mcmc.a[i-1]),sin(mcmc.a[i-1]),cos(mcmc.a[i-1])),nrow=2,ncol=2)
A <- rotationMat %*% aMat
B_prev <- A %*% t(A)
#covariances
Sigma_star <- cov.spatial(sqrt(makeB(B_star)), cov.model= "matern", cov.pars=c(mcmc.sigma[i], 1/mcmc.phi[i]), kappa = mcmc.kappa[i])
Sigma_prev <- cov.spatial(sqrt(makeB(B_prev)), cov.model= "matern", cov.pars=c(mcmc.sigma[i], 1/mcmc.phi[i]), kappa = mcmc.kappa[i])
diag(Sigma_star) <- mcmc.sigma[i] + mcmc.tau[i]
diag(Sigma_prev) <- mcmc.sigma[i] + mcmc.tau[i]
# calulate ratio
mvn_star <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_star, log=TRUE)
mvn_prev <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma_prev, log=TRUE)
prior_star <- dunif(a_star, 0, pi) * dinvgamma(r_star, shape=1, scale=1)
prior_prev <- dunif(mcmc.a[i-1], 0, pi) * dinvgamma(mcmc.r[i-1], shape=1, scale=1)
ratio <- exp(mvn_star-mvn_prev) * prior_star / prior_prev
u <- runif(1)
if(log(u) < log(ratio)){
mcmc.a[i] <- a_star
mcmc.r[i] <- r_star
} else {
mcmc.a[i] <- mcmc.a[i-1]
mcmc.r[i] <- mcmc.r[i-1]
}
# update mu
var <- 3
mu_star <- rnorm(1, mcmc.mu[i-1], sqrt(var))
amin <- 1
amax <- amin*mcmc.r[i]
rotationMat <- matrix(c(cos(mcmc.a[i]),-sin(mcmc.a[i]),sin(mcmc.a[i]),cos(mcmc.a[i])),nrow=2,ncol=2)
aMat <- matrix(c(1/amax,0,0,1/amin),nrow=2,ncol=2)
A <- rotationMat %*% aMat
B <- A %*% t(A)
# covariance sampled
Sigma <- cov.spatial(sqrt(makeB(B)), cov.model= "matern", cov.pars=c(mcmc.sigma[i], 1/mcmc.phi[i]), kappa = mcmc.kappa[i])
diag(Sigma) <- mcmc.sigma[i] + mcmc.tau[i]
# calulate ratio
mvn_star <- dmvnorm(y_train, rep(mu_star, numSites), Sigma, log=TRUE)
mvn_prev <- dmvnorm(y_train, rep(mcmc.mu[i-1], numSites), Sigma, log=TRUE)
prior_star <- dnorm(mu_star, 0, 100)
prior_prev <- dnorm(mcmc.mu[i-1], 0, 100)
ratio <- exp(mvn_star-mvn_prev) * prior_star / prior_prev
u <- runif(1)
if(log(u) < log(ratio)){
mcmc.mu[i] <- mu_star
} else {
mcmc.mu[i] <- mcmc.mu[i-1]
}
}
save(mcmc.sigma, mcmc.tau, mcmc.a, mcmc.r, mcmc.phi, mcmc.kappa, mcmc.mu, file="mcmc_matern_comp.Rdata")
numBurn <- 20000
numKeep <- seq(numBurn+20, numSim, by=20)
a_samp <- mcmc.a[numKeep]
r_samp <- mcmc.r[numKeep]
sig_samp <- mcmc.sigma[numKeep]
tau_samp <- mcmc.tau[numKeep]
phi_samp <- mcmc.phi[numKeep]
mu_samp <- mcmc.mu[numKeep]
kappa_samp <- mcmc.kappa[numKeep]
# par(mfrow=c(2,3))
# plot(density(a_samp, from=0), xlab = "a")
# plot(density(r_samp, from=0), xlab = "r")
# plot(density(sig_samp, from=0), xlab = "phi")
# plot(density(phi_samp, from=0), xlab = "sigma")
# plot(density(tau_samp, from=0), xlab = "tau")
# plot(density(kappa_samp, from=0), xlab = "kappa")
# Krigging: marginalize out w
makePredictionAniso <- function(numSample, newSites){
sigmasq <- sig_samp[numSample]
tausq <- tau_samp[numSample]
mu <- mu_samp[numSample]
a <- a_samp[numSample]
r <- r_samp[numSample]
phi <- phi_samp[numSample]
kappa <- kappa_samp[numSample]
amin <- 1
amax <- amin*r
rotationMat <- matrix(c(cos(a),-sin(a),sin(a),cos(a)),nrow=2,ncol=2)
aMat <- matrix(c(1/amax,0,0,1/amin),nrow=2,ncol=2)
A <- rotationMat %*% aMat
B <- A %*% t(A)
SigmaObs <- cov.spatial(sqrt(makeB(B)), cov.model= "matern", cov.pars=c(sigmasq, 1/phi), kappa = kappa)
diag(SigmaObs) <- sigmasq + tausq
S <- solve(SigmaObs)
predictions <- NULL
for(j in 1:nrow(newSites)){
newSite <- newSites[j,]
distPO <- matrix(0, nrow=numSites, ncol = 2)
for(i in 1:numSites){
distPO[i,] <- newSite - sites_train[i,]
}
SigmaPO <- matrix(0, nrow=1, ncol=numSites)
for(i in 1:numSites){
SigmaPO[,i] <- cov.spatial(sqrt(distPO[i,] %*% B %*% distPO[i,]), cov.model= "matern", cov.pars=c(sigmasq, 1/phi), kappa = kappa)
}
mean_yPred <- mu + SigmaPO %*% S %*% (y_train - rep(mu, numSites))
var_yPred <- sigmasq + tausq - SigmaPO %*% S %*% t(SigmaPO)
yPred <- rnorm(1, mean_yPred, sqrt(var_yPred))
predictions <- c(predictions, yPred)
}
return(predictions)
}
nMCMC <- length(a_samp)
predAniso <- t(sapply(1:nMCMC, makePredictionAniso, newSites=sites_test))
#plot(1:40, y_test, pch=19, cex=0.5, xlab="observed y", ylab="predicted y",ylim=c(min(y.hat), max(y.hat)))
#arrows(1:40, y.hat[2,], 1:40, y.hat[1,], angle=90, length=0.05)
#arrows(1:40, y.hat[2,], 1:40, y.hat[3,], angle=90, length=0.05)
# 1. empirical coverage
EC <- function(mat, yObs){
qt <- apply(mat, 2, function(x) quantile(x, probs = c(0.05, 0.95)))
ave <- apply(mat, 2, mean)
empCov <- data.frame(cbind(ave, t(qt), yObs))
colnames(empCov) <- c("Mean", "Lower", "Higher", "True")
empCov$capture <- empCov$Lower <= empCov$True & empCov$Higher >= empCov$True
return(empCov)
}
#empCovIso <- EC(predIso, y_test)
empCovAniso <- EC(predAniso, y_test)
numPred <- length(y_test)
#ecIso <- sum(empCovIso$capture)/numPred
ecAniso <- sum(empCovAniso$capture)/numPred
# pdf("empCovIso.pdf")
# numPred <- length(y_test)
# ggplot(empCovAniso, aes(y=True, x=1:numPred, color=capture)) +
# xlab("index") +
# geom_errorbar(aes(ymax=Higher, ymin=Lower), width=0, color='black', alpha=0.3, size=2) +
# geom_point(size=3) +
# labs(x = "Index of New Sites", y="Predcited/Observed Value") +
# ggtitle(paste("Empirical Coverage of Isotropic Model =",sum(empCovAniso$capture)/numPred))
# dev.off()
# 2. PMSE
#mseIso <- mean((empCovIso$Mean-empCovIso$True)^2)
mseAniso <- mean((empCovAniso$Mean-empCovAniso$True)^2)
# 3. CRPS
crps_test = function(post,obs,n_pts = 1e6)
{
F_post = ecdf(post)
F_obs = ecdf(obs)
d = c(obs,post)
s = seq(min(d),max(d),len=n_pts)
sum( (F_post(s) - F_obs(s))^2 ) * (max(d)-min(d)) / n_pts
}
#crpsIso <- mean(sapply(1:ncol(predIso), function(x) crps_test(predIso[,x],y_test[x])))
crpsAniso <- mean(sapply(1:ncol(predAniso), function(x) crps_test(predAniso[,x],y_test[x])))
name <- paste(numSites, numTotal, sigmasq, tausq, "matern_comp.Rdata", sep = "_")
save(ecAniso, mseAniso, crpsAniso, file=name)
#load("500_600_1_0.2_matern_comp.Rdata")
|
a6e4c2a63fe1027b805c7dd69ef197d651b7bad6
|
5ae7cdb3e0f6cdd172b4b9c2f4605859b389ce0a
|
/scripts/data-carpentry/generate-tidy-data.R
|
80697d9c2bd824865ca1e0d4832c1d19df51ad32
|
[] |
no_license
|
mikoontz/ppp-establishment
|
2d2573d5669f2740c6b6374d8a034684e1ac11a3
|
eefc8a27cd669293ff4fd3737464bf60d2f62143
|
refs/heads/master
| 2018-10-31T18:13:22.108599
| 2018-03-26T15:43:10
| 2018-03-26T15:43:10
| 74,297,185
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,338
|
r
|
generate-tidy-data.R
|
# Title: generate tidy data
#
# Author: Michael Koontz
# Email: mikoontz@gmail.com
#
# Date Created: 20150330
# Last Updated: 20150331
# This function takes the entered Tribolium flour beetle data from the "Eco-evolutionary consequences of multiple introductions" main experiment (which is in long form), and puts it in a 2 dimensional form to more easily represent the time series. Each population is defined by a unique ID, which is included in each of the produced dataframes here. This makes for easy merging (using ID as a key) with the 'attributes.csv' file (which includes block number, treatment types, whether a gap in the introduction occurred, whether the populations experienced the reduced incubator moisture during generation 2, etc.)
# Note this code also corrects 12 population trajectories that had their population drop to 1 individual, but rebounded due to egg contamination (eggs on a lab surface getting into the microcosm). We coerced those populations to be extinct after they dropped in size to 1 individual.
# Affected populations: 13, 45, 87, 98, 303, 362, 500, 523, 640, 758, 777, 825
# Requires the tidyr package for reshaping the data.
# Input is the long-form entered data in a dataframe object type.
# Returns a list of dataframes representing the different values that are unique to each ID/Generation combination.
# Further manipulations on these data frames are possible using other functions. For instance, cbind(Ntp1$ID, Ntp1[,2:11]/Nt[,2:11] would be equivalent to a dataframe of lambda values for each time step.
# Load tidyr library
library(tidyr)
tidy.beetles <- function(beetles, deal_with_loners=TRUE)
{
#----------
# N[t+1] dataframe
# ---------
# The final column has NA for ID 481:945 because only blocks 1 and 2 were set up at the end of generation 9 to yield N[t+1] data for generation 10
# Subset to relevant parts
b <- subset(beetles, select=c(ID, Generation, Census))
# Spread it
Ntp1 <- spread(b, Generation, Census)
names(Ntp1) <- c("ID", paste0("N", 0:9, "plus1"))
# Check it
head(Ntp1)
tail(Ntp1)
#----------
# N[t] dataframe
# ---------
# The final column has NA for ID 481:945 because only blocks 1 and 2 were set up at the end of generation 9 to yield N[t+1] data for generation 10
b <- subset(beetles, select=c(ID, Generation, N0))
Nt <- spread(b, Generation, N0)
names(Nt) <- c("ID", paste0("N", 0:9))
head(Nt)
tail(Nt)
#----------
# Migration dataframe
# ---------
b <- subset(beetles, select=c(ID, Generation, Addition))
migrants <- spread(b, Generation, Addition)
# Add the initial introduction onto the dataframe
migrants$'0' <- Nt$N0
# Reorder dataframe columns and leave off final generation addition, since those populations weren't even set up for another generation
total.columns <- ncol(migrants)
migrants <- migrants[, c(1, total.columns, 2:(total.columns-2))]
names(migrants) <- c("ID", paste0("migrants", 0:9))
head(migrants, 120)
tail(migrants)
#----------
# Environment % standard media mixture dataframe
# ---------
b <- subset(beetles, select=c(ID, Generation, Environment))
environment <- spread(b, Generation, Environment)
names(environment) <- c("ID", paste0("env", 0:9))
head(environment)
#----------
# Census taker dataframe
# ---------
b <- subset(beetles, select=c(ID, Generation, Person))
person <- spread(b, Generation, Person)
names(person) <- c("ID", paste0("person", 0:9))
head(person)
tail(person)
#----------
# Setup order dataframe
# ---------
b <- subset(beetles, select=c(ID, Generation, Setup.Order))
setup.order <- spread(b, Generation, Setup.Order)
names(setup.order) <- c("ID", paste0("setup.order", 0:9))
head(setup.order)
tail(setup.order)
#---------
# Make populations of size 1 go extinct
#---------
loners_df <- data.frame()
if (deal_with_loners)
{
# Due to some egg contamination, populations of size 1 sometimes persisted. Make these extinct with this code.
# Two conditions must be met: Population had 1 individual, no more introductions were coming
# Make next generation size 0, then make the rest of the columns NA
# Affected population IDs: 13, 45, 87, 98, 303, 500, 523, 640, 758
# First determine when introductions stopped
# Total columns in migrants data frame
columns <- ncol(migrants)
# By rows, look for the index of the first non-zero number of migrants.
idx <- apply(migrants[, (columns:2)], MARGIN=1, FUN=function(x) match(FALSE, x==0))
# The total columns minus this index value represents the last generation whose Ntp1 census data was influenced by migrants
last.migrants <- columns - idx
loner.col <- rep(NA, nrow(Ntp1))
# For each row, determine the index for the first generation where the population has only 1 individual, but only look at the columns AFTER no more migrants arrive (that is, the (last.migrants + 1) column until the final column)
# We have to add back in the columns that we skipped (last.migrants columns) to this index value
# Also, adding 1 to the last.migrants value accounts for the "ID" column
for (i in 1:nrow(Ntp1))
{
loner.col[i] <- match(1, Ntp1[i, (last.migrants[i] + 1):columns]) + last.migrants[i]
}
# Indices of populations that had 1 individual after introductions were finished (includes populations that actually did go extinct as well as populations that should have gone extinct)
loner.idx <- which(!is.na(loner.col))
# Column of Ntp1 dataframe for each of the above indices that should be 0
extinct.col <- loner.col[loner.idx] + 1
# Note that the equivalent column in Nt is extinct.col+1
# Column of Ntp1 dataframe for each of the loner indicies that should be NA. All columns after this should be NA as well.
NA.col <- extinct.col + 1
counter <- 0
running_sum <- 0
culprits <- numeric(length(loner.idx))
for (j in 1:length(loner.idx))
{
# Only proceed if loner population wasn't in the final generation.
if (extinct.col[j] <= columns)
{
# Only proceed if population didn't go extinct when it should have
if (as.numeric(Ntp1[loner.idx[j], extinct.col[j]] > 0))
{
# Update counter of number of culprit populations
counter <- counter + as.numeric(Ntp1[loner.idx[j], extinct.col[j]] > 0)
# Add total Ntp1 to running sum
running_sum <- running_sum + Ntp1[loner.idx[j], extinct.col[j]]
# Flag this population as a culprit
culprits[j] <- 1
# Force the appropriate Ntp1 time point to extinction
Ntp1[loner.idx[j], extinct.col[j]] <- 0
# Only proceed if there are still generations after extinction that need to be forced to NAs
# This also covers proceeding if there are columns in the Nt dataframe that need to be forced to 0
if (NA.col[j] <= columns)
{
# Force appropriate Nt columns to 0
Nt[loner.idx[j], (extinct.col[j]+1):columns] <- 0
# Force appropriate Ntp1 columns to NA
Ntp1[loner.idx[j], NA.col[j]:columns] <- NA
}
} # End check on whether loners didn't go extinct
} # End check on whether loners occured before end of experiment
} # End for loop iterating through each loner population
loners_df <- data.frame(loner.idx, culprits, extinct.col)
} # End if statement about making loners go extinct
return(list(Nt=Nt, Ntp1=Ntp1, migrants=migrants, environment=environment, person=person, setup.order=setup.order, loners_df=loners_df))
}
|
2cf6e3a9d7ba1147bf3d34d3cbf881b9d407d00c
|
7e4603d96817a188ff0f4e45a94054668d1ea49b
|
/R/summarise.glm.R
|
49826ff8545c24cd1990a166e6502e0ba29fc7f6
|
[] |
no_license
|
c97sr/SRileyIDD
|
5ff45e19804b7ec0b86d959ee48e38765ce75a99
|
ddd9d0f18fa5af6633da47fb25b780c5cbc4d017
|
refs/heads/master
| 2020-05-18T10:49:00.615332
| 2015-07-01T22:02:00
| 2015-07-01T22:02:00
| 22,818,900
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,937
|
r
|
summarise.glm.R
|
summarise.glm <- function(
lstModels,
outfunc=exp,
writetab=TRUE,
file="modsum.csv",
sigdigits=3,
transpose=FALSE) {
# Figure out the number of models
nomods <- length(lstModels)
# Make a vector of all coefficients
allCoeffs <- c()
for (i in 1:nomods) {
# Select current model results
mod <- lstModels[[i]]
# Get a list of variables
vars <- names(mod$coefficients)
novars <- length(vars)
# Go through each variabel and add it if its not already in the list
for (j in 1:novars) {
# Get the variable name
curname <- vars[j]
# Test for the presence of the variable in the master list
var_present <- (curname %in% allCoeffs)
# If not in the list add it
if (!(var_present)) allCoeffs <- c(allCoeffs,curname)
# Close the for loop for j
}
# Close the for loop for i
}
# Define the data structures used to extract the information from the models
noCoeffs <- length(allCoeffs)
matPointEst <- matrix(NA,nrow=nomods,ncol=noCoeffs,dimnames=list(1:nomods,allCoeffs))
matLB <- matrix(NA,nrow=nomods,ncol=noCoeffs,dimnames=list(1:nomods,allCoeffs))
matUB <- matrix(NA,nrow=nomods,ncol=noCoeffs,dimnames=list(1:nomods,allCoeffs))
vecAIC <- vector(mode="numeric",length=nomods)
vecDEX <- vector(mode="numeric",length=nomods)
# Loop back though the models and the coeffciients to populate the data structures
for (i in 1:nomods) {
# Select current model results
mod <- lstModels[[i]]
cis <- confint.default(mod)
# Get a list of variables
vars <- names(mod$coefficients)
novars <- length(vars)
# Record the AIC
vecAIC[i] <- mod$aic
vecDEX[i] <- (1-mod$deviance/mod$null.deviance)*100
# Go through each variabel and add it if its not already in the list
for (j in 1:novars) {
# Get the variable name
curname <- vars[j]
# Extract the point estimate and confidence intervals for the parameters
matPointEst[i,curname] <- mod$coefficients[curname]
matLB[i,curname] <- cis[curname,1]
matUB[i,curname] <- cis[curname,2]
# Close the for loop for j
}
# Close the for loop for i
}
# If selected, write a nicely formatted csv table for the parameters and models
if (writetab) {
if (transpose) {
# Declare the output string
strTable <- ""
# Put in the first header row
strTable <- paste(strTable,"Parameter",sep="")
for (i in 1:noCoeffs) strTable <- paste(strTable,",",allCoeffs[i],",",allCoeffs[i],sep="")
strTable <- paste(strTable,",AIC,DEX\n",sep="")
# Put in the second header row
strTable <- paste(strTable,"Model",sep="")
for (i in 1:noCoeffs) strTable <- paste(strTable,",PE,CI",sep="")
strTable <- paste(strTable,",AIC,DEX\n",sep="")
# Output individual model lines, starting with coefficient loop
for (i in 1:nomods) {
# Pull the name of the current coefficient
# curname <- allCoeffs[i]
# Put in the name of the coefficient
strTable <- paste(strTable,i,sep="")
# Cycle through the tables looking at the different models
for (j in 1:noCoeffs) {
# Itentify the current coefficient
curname <- allCoeffs[j]
# Put in the point estimates and confidence intervals for each parameter / model combination
curPE <- signif(outfunc(matPointEst[i,curname]),digits=sigdigits)
curLB <- signif(outfunc(matLB[i,curname]),digits=sigdigits)
curUB <- signif(outfunc(matUB[i,curname]),digits=sigdigits)
# Paste in the parameter values and the confidence intervals
if (is.na(curPE)) {
# Put in the entry for NA results
strTable <- paste(strTable,",","-",",","-",sep="")
} else {
# Put in the entry for non NA results
strTable <- paste(strTable,",",curPE,",","(",curLB,"--",curUB,")",sep="")
}
# End j loop for coefficients
}
# Add the AIC at the end of the line, with a return
mod <- lstModels[[i]]
curAIC <- round(mod$aic,digits=1)
curDEX <- round((1-mod$deviance/mod$null.deviance)*100,digits=1)
strTable <- paste(strTable,",",curAIC,",",curDEX,"\n",sep="")
# End the i for loop for models
}
# End the if clause for transpose
} else {
# Declare the output string
strTable <- ""
# Put in the first header row
strTable <- paste(strTable,",Model 1",sep="")
if (nomods>1) for (i in 2:nomods) strTable <- paste(strTable,",,Model ",i,sep="")
strTable <- paste(strTable,"\n",sep="")
# Put in the second header row
if (nomods>1) for (i in 1:nomods) {
strTable <- paste(strTable,",Estimate,(95% CI)",sep="")
}
strTable <- paste(strTable,"\n",sep="")
# Output individual coefficient lines, starting with coefficient loop
for (i in 1:noCoeffs) {
# Pull the name of the current coefficient
curname <- allCoeffs[i]
# Put in the name of the coefficient
strTable <- paste(strTable,curname,sep="")
# Cycle through the tables looking at the different models
for (j in 1:nomods) {
# Put in the point estimates and confidence intervals for each
# parameter / model combination
curPE <- signif(outfunc(matPointEst[j,curname]),digits=sigdigits)
curLB <- signif(outfunc(matLB[j,curname]),digits=sigdigits)
curUB <- signif(outfunc(matUB[j,curname]),digits=sigdigits)
# Paste in the parameter values and the confidence intervals
if (is.na(curPE)) {
# Put in the entry for NA results
strTable <- paste(strTable,",","-",",","-",sep="")
} else {
# Put in the entry for non NA results
strTable <- paste(strTable,",",curPE,",","(",curLB,"--",curUB,")",sep="")
}
# End model for loop
}
# Return at the end of the line
strTable <- paste(strTable,"\n",sep="")
# End for for coeffs
}
# Write the row name for the AICs
strTable <- paste(strTable,"AIC",sep="")
# Start the for loop for the AICs
for (i in 1:nomods) {
# Get the current model
mod <- lstModels[[i]]
# Format the AIC for the current model
curAIC <- round(mod$aic,digits=1)
# Write the value and the space
strTable <- paste(strTable,",",curAIC,",",sep="")
}
# Return at the end of the AIC line
strTable <- paste(strTable,"\n",sep="")
# Write the row name for the DEXs
strTable <- paste(strTable,"DEX",sep="")
# Start the for loop for the DEX
for (i in 1:nomods) {
# Get the current model
mod <- lstModels[[i]]
# Format the AIC for the current model
curDEX <- round((1-mod$deviance/mod$null.deviance)*100,digits=1)
# Write the value and the space
strTable <- paste(strTable,",",curDEX,",",sep="")
}
# Return at the end of the DEX line
strTable <- paste(strTable,"\n",sep="")
# End else statement for transpose of
}
# Write the string to the selected file
cat(strTable,file=file)
}
# Return
data.frame(pe=matPointEst,lb=matLB,ub=matUB,aic=vecAIC,dex=vecDEX)
}
|
db0f442424733eae7fc7c7a96337e910ce37946c
|
9cc7423f4a94698df5173188b63c313a7df99b0e
|
/man/crawford.test.freq.Rd
|
de422645aed3f72fc0386e062d9d4875b9eb7e7c
|
[
"MIT"
] |
permissive
|
HugoNjb/psycho.R
|
71a16406654b11007f0d2f84b8d36587c5c8caec
|
601eef008ec463040c68bf72ac1ed8d4a8f7751f
|
refs/heads/master
| 2020-03-27T01:24:23.389884
| 2018-07-19T13:08:53
| 2018-07-19T13:08:53
| 145,707,311
| 1
| 0
| null | 2018-08-22T12:39:27
| 2018-08-22T12:39:27
| null |
UTF-8
|
R
| false
| true
| 1,142
|
rd
|
crawford.test.freq.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crawford.test.R
\name{crawford.test.freq}
\alias{crawford.test.freq}
\title{Crawford-Howell (1998) frequentist t-test for single-case analysis.}
\usage{
crawford.test.freq(patient, controls)
}
\arguments{
\item{patient}{Single value (patient's score).}
\item{controls}{Vector of values (control's scores).}
}
\value{
Returns a data frame containing the t-value, degrees of freedom, and p-value. If significant, the patient is different from the control group.
}
\description{
Neuropsychologists often need to compare a single case to a small control group. However, the standard two-sample t-test does not work because the case is only one observation. Crawford and Garthwaite (2012) demonstrate that the Crawford-Howell (1998) t-test is a better approach (in terms of controlling Type I error rate) than other commonly-used alternatives.
.
}
\examples{
library(psycho)
crawford.test.freq(patient = 10, controls = c(0, -2, 5, 2, 1, 3, -4, -2))
crawford.test.freq(patient = 7, controls = c(0, -2, 5, 2, 1, 3, -4, -2))
}
\author{
Dan Mirman, Dominique Makowski
}
|
2ba48e62ab904cc878659f543ff9e4e47a7245e9
|
0031c07492e0878b9add94356c112ece169ff2dc
|
/assignment5/question3.R
|
9982d8e8128f0fca3960aeceeb0e5220c8a5d700
|
[] |
no_license
|
jonkeane/datasci_course_materials
|
6c13965827220650dafee76536740ac1781851c9
|
1dde97ef1a947a8d868aaab2d7b063bb22e9a965
|
refs/heads/master
| 2020-12-27T12:02:51.673899
| 2015-11-28T18:12:05
| 2015-11-28T18:12:05
| 47,002,383
| 0
| 0
| null | 2015-11-27T22:58:53
| 2015-11-27T22:58:53
| null |
UTF-8
|
R
| false
| false
| 66
|
r
|
question3.R
|
source("quiz.R")
# question 3
summary(flow$fsc_small)["3rd Qu."]
|
f5ce16dbb6170862ce28acb7404bae18f2d9befe
|
2c1561e4467b6664a2791df07827cbf91f48b97a
|
/Part_05_Linear_Model_Examplepedit.R
|
34686dff846d1dfe5029044e2f87ead6da323e44
|
[] |
no_license
|
anhnguyendepocen/R_Book
|
38ea878d4843df063adf719215bb65a83179d29c
|
e3935d740f6094b10f0a75b896810eb365144ac3
|
refs/heads/master
| 2020-03-18T02:46:29.153118
| 2017-08-20T20:39:27
| 2017-08-20T20:39:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34,635
|
r
|
Part_05_Linear_Model_Examplepedit.R
|
#
# SCS 2011: Statistical Analysis and Programming with R
# September-October 2011
#
# Linear Models in R
# -- much of the code is borrowed from Fox,
# Introduction to the R Statistical Computing Environment
# ICPSR Summer Program, 2010-2011
"
== Install packages (if needed) ==
"
install.packages(c('car','Hmisc','rgl'))
download.file("http://www.math.yorku.ca/people/georges/Files/R/spida.beta.zip", "spida.beta.zip")
download.file("http://www.math.yorku.ca/people/georges/Files/R/p3d.beta.zip", "p3d.beta.zip")
install.packages("spida.beta.zip", repos = NULL)
install.packages("p3d.beta.zip", repos = NULL)
"
== Load packages ==
"
library(car)
library(spida.beta)
library(p3d.beta)
"
== Fitting a linear regression model with numerical reponse ==
* R approach to data analysis:
** use many small tools, not one big package: more flexible and adaptive, requires more knowledge
** integrate graphical and numerical exploration of data
* numerical and categorical predictors
* interaction
* asking questions: linear hypotheses
"
"
=== Looking at your data ===
"
# The data:
data(Prestige) # optional: bring data from 'car' package to workspace
# this is a way of 'reading' a data set in a package
# later we will talk about reading other data sets
xqplot( Prestige ) # quick look (from spida.beta)
?Prestige # 'help' on Prestige
names(Prestige)
head(Prestige) # first 6 lines
tail(Prestige, 2) # last 2 lines
some(Prestige) # 10 randomly sampled observations
# Selecting subsets and indexing
Prestige[ c(1,4), ] # Selecting rows
Prestige[ c(1,4), c('type','women')] # ... rows and columns
Prestige[ Prestige$women > 50, ] # selects on rows
Prestige[ Prestige$type == 'prof', c('type','income') ] # selects on rows and columns
Prestige[ order(Prestige$women), ] # ordering
order(Prestige$women) # why is works
# Tables:
with( Prestige, table( type ) ) # using 'with' to refer to variable in a date frame
# note that 'table' drops NAs
with( Prestige, table( type , cut( women, 3)) ) # creating intervals from a numeric variable
tab(Prestige, ~ type + cut(women,3)) # 'tab' in spida.beta refers to variables with a formula
# similarly to fitting functions: lm, etc.
tab(Prestige, ~ type + cut(women,3), pct = 2)
tab(Prestige, ~ type + cut(women,3), pct = 2, useNA = 'no')
# more plots
scatterplotMatrix( Prestige ) # fancy from 'car'
pairs( Prestige ) # old
splom( Prestige ) # newer
Init3d()
Plot3d( income ~ education + women | type, Prestige)
Axes3d()
Id3d()
# why a linear (in x and y) model won't work
fit.lin <- lm( income ~ education + women, Prestige)
str(fit.lin)
fit.lin
Fit3d( fit.lin )
Pop3d(2)
colors()
Fit3d( fit.lin , col = 'hotpink', alpha = .6)
pal(colors())
pal(grep("pink",colors(), value = T)) # global regular expression print (from old unix)
"
=== Regression on numerical variables and interpretation ===
"
# Regression with an additive numerical model (no interactions, all vars continuous )
# Note: you can transform a variable on the fly
income.mod <- lm( log(income) ~ education + women,
data = Prestige)
#
summary(income.mod) # Interpretation of coefficients: Proportion change in y per unit change in x keeping other x constant
# multiply coef by 100 to get percentage change in y -- or use 100*log(y) as dependent variable
Plot3d( log(income) ~ education + women | type, Prestige) # note: some curvature and negative outliers
Fit3d( income.mod )
Axes3d()
Id3d()
confint(income.mod)
wald(income.mod)
wald(income.mod, -1) # removing first variable
"
=== Diagnostics ===
"
plot( income.mod ) # residuals vs fitted, normal quantiles for resid,
# scale location, residual vs leverage
avPlots( income.mod ) # added variable plots
"
=== Anova ===
"
anova( income.mod ) # sequential tests -- type I SS
# Q: does each term 'add' to previous one in list
Anova( income.mod ) # type II SS: does each term add to others
# excluding terms with that interact with the term
Anova( income.mod, type = "III") # does each term have an effect when added last
"
=== Getting information from a regression ===
"
"
<math>\hat{\beta}</math>:
"
coef( income.mod )
"
<math>\hat{\Var}(\hat{\beta})</math>:
"
vcov( income.mod )
"
=== Factors in regression ===
"
#
Prestige$incomes <- sqrt( Prestige$income )
prestige.add <- lm( prestige ~ incomes + type, Prestige)
summary(prestige.add)
# regular graphics
"
==== Basic plotting ====
"
plot( prestige ~ incomes, Prestige, col = type)
plot( prestige ~ incomes, Prestige, col = type, pch = 16)
plot( prestige ~ incomes, Prestige, col = type, cex = 1.5, lwd = 2)
plot( prestige ~ incomes, Prestige, col = c('red','blue','magenta')[type], cex = 1.5, lwd = 2)
plot( prestige ~ incomes, Prestige, col = type, cex = 1.5, lwd = 2, axes = FALSE)
axis(1, at = seq(20,160,by=20), labels = seq(20,160,by=20)^2)
axis(2, at = seq(20,80,by=20))
box()
abline( h = seq(20,80,20), lty = 2, col ='gray')
abline( v = seq(20,160,20), lty = 2, col ='gray')
"
==== Note on factors ====
* R's way of representing categorical data for analysis
* reading a data frame automatically turns a character variable into a factor
* example: type
"
Prestige$type
str( Prestige$type )
unclass( Prestige$type ) # raw internal representation
# internally it's integers
# but it prints as character
as.character( Prestige$type )
unclass( as.character( Prestige$type )) # this is really a character var.
# in some ways factors are numeric, in others character
f.ex <- factor( c('One','Two','Three','Four','One','Two'))
f.ex # note that levels are in lexicographical (alpha) order by default
unclass(f.ex)
tab( f.ex)
letters
letters[f.ex] # when indexing, f.ex acts as a number and uses its **codes**
f.ex == "Three" # in logical operations, as a character
f.ex[1:2] # when subsetting, it remembers its original **levels**
f.ex[1:2, drop = T] # unless you ask
# reordering a factor
f.ex.ro <- factor( f.ex, levels = c('One','Two','Three','Four'))
f.ex.ro
letters[f.ex.ro]
outer( f.ex, f.ex.ro, "==") # applies function '==' to all pairs
z <- outer(f.ex, f.ex.ro, "==") #
dimnames(z) <- list(f.ex, f.ex.ro)
z # shows that == is applied to levels, not codes
"
==== Quick programs in R ====
* It's easy to turn a good idea into a function
"
# turn a good idea into a funtion:
out # make sure it is not already used
out <- function( x, y, FUN ){
ret <- outer( x, y, FUN)
dimnames( ret ) <- list( x, y)
ret # value returned by a function is 'exiting' line
}
out
out( f.ex, f.ex.ro, `==`) # uses levels, not codes
out( f.ex, f.ex.ro, `<`) # < not meaningful for factors
out( as.character(f.ex), as.character(f.ex.ro), `<`) # BUT it IS meaningful for characters!!
# Useful for lots of stuff
out( c(TRUE,FALSE,NA),c(TRUE,FALSE,NA), "|") # 3-valued logic in R
out( c(TRUE,FALSE,NA),c(TRUE,FALSE,NA), "&") # 3-valued logic in R
out( c(-Inf, -1, 0, 1, Inf, NA, NaN, 1i),c(-Inf, -1, 0, 1, Inf, NA, NaN,1i), "+" ) # extended arithmetic
out( c(-Inf, -1, 0, 1, Inf, NA, NaN, 1i),c(-Inf, -1, 0, 1, Inf, NA, NaN,1i), "*" ) # extended arithmetic
"
==== Factors in regression ====
<math> Y = X \beta + \epsilon </math>
* A factor with k levels generates k-1 columns in the X matrix
"
model.matrix( prestige ~ incomes + type, Prestige) # creates the X matrix
z <- model.matrix( prestige ~ incomes + type, Prestige)
some(z)
z$incomes # ERROR because a matrix is not a data frame
z <- as.data.frame(z) # turns matrix into a data frame [Example of coercion]
some( z )
z$incomes # a data frame contains variables
"
==== Merging data frames ====
"
# merging two data frames:
z$id <- rownames(z) # create an id variable for indexing in z
Prestige$id <- rownames(Prestige) # corresponding id for Prestige
zm <- merge( z, Prestige[,c('id','type')], by = 'id') # merges on common var 'id'
# note that the name of the variable here must be quoted!!!
# Recall: Functions may required a variable to be referenced:
# by name in quotes
# by name without quotes
# using a formula
# sometimes more than one will work, often only one
# Sorry!!! but good to know so it's easier to get out of dead ends
some( zm )
# Note dummy (indicator) variable for typeprof and typewc
"
==== Prediction data frames ====
"
# prediction data frame
# values for which we want to predict prestige
pred <- expand.grid( type = levels(Prestige$type), incomes = seq(15,185,10))
some( pred ) # all combination, good to use 'levels' to make sure in correct order
pred$y <- predict( prestige.add, newdata = pred)
some( pred )
"
==== For loop ====
"
for ( nn in levels(pred$type)) {
lines( y ~ incomes, pred, subset = type == nn, col = type)
}
"
==== lapply (better) ====
* <tt>lapply( list, FUN)</tt> applies function FUN to each element of list or vector
"
lapply( levels(pred$type), function(x){
lines( y ~ incomes, pred, subset = type == x, col = type, lwd =2)
})
"
=== Linking numbers with pictures and answers with questions ===
* Most statistical output answers questions you don't want to ask and doesn't answer the questions you should ask
* Linking the numbers with the graphs is a ideal test of understanding
* Interpreting coefficients for factor indicators: comparisons with the reference level -- the level that doesn't appear
"
summary( prestige.add )
################################ Show where the numbers appear on the graph
"
==== A colorful[sic] digression ====
"
# Easier in 3d
# BUT: need at most two numerical predictors and one factor -- here only one numerical so add any other
Plot3d( prestige ~ incomes + education | type, Prestige) # note categorical predictor after "|"
# changing colors:
colors()
pals() # palettes of all colors
pal(c('blue','skyblue','yellow4'))
pal(grepv('blue',colors()))
blues <- grepv( 'blue', colors())
length(blues)
Blues <- list(blues[1:33], blues[-(1:33)])
Blues
lapply( Blues, pal) # example lapply
# choose some colors:
Plot3d( prestige ~ incomes + education | type, Prestige, # note categorical predictor after "|"
col = c('gray90','red','blue'))
prestige.add <- lm( prestige ~ incomes + type, Prestige)
summary(prestige.add)
Fit3d(prestige.add)
Id3d()
# Exercise: explore other models
"
=== Interaction with numeric variables ===
* additive model vs model with interaction
"
data(Ginzberg)
head(Ginzberg)
Plot3d( depression ~ fatalism + simplicity, Ginzberg, xlim= c(0,3), zlim=c(0,3))
fit.int <- lm( depression ~ fatalism * simplicity, Ginzberg)
summary(fit.int)
# Additive model:
Fit3d( lm(depression ~ fatalism + simplicity, Ginzberg)) # additive
some( model.matrix(depression ~ fatalism + simplicity, Ginzberg) ) # X matrix
# Interaction model:
Fit3d( fit.int <- lm(depression ~ fatalism * simplicity, Ginzberg), col = 'red') # interaction
some( model.matrix(depression ~ fatalism * simplicity, Ginzberg) ) # X matrix
summary(fit.int) # What do the coefficients mean?
Axes3d()
"
==== Exploring a curved regression function ====
* Getting real answers to real questions
"
# Forming a linear hypothesis matrix:
L <- rbind( "Eff. of fatalism | simp = 0" = c( 0, 1, 0, 0), #take derivative wrt fatalism, ":" means product
"Eff. of fatalism | simp = 1" = c( 0, 1, 0, 1),
"Eff. of fatalism | simp = 3" = c( 0, 1, 0, 3),
"Eff. of simplicity | fatal = 0" = c( 0, 0, 1, 0),
"Eff. of simplicity | fatal = 3" = c( 0, 0, 1, 3)
)
L
wald( fit.int, L ) # compare with :
summary( fit.int )
"
* Note how, when there is an interaction term (SIG. OR NOT):
*: main effects only estimate a '''conditional''' effect (sometimes called a ''specific'' or ''special'' effect)
*: '''NOT''' a general effect of the variable
*: when there is interaction, the conditional effect varies and should be explored and described
* Note that graphs often reveal important structure not at all visible through numerical output -- google(TM) Anscombe examples
"
"
==== 'Additive' curvature ====
"
# Additive model with curvature
Fit3d( lm(depression ~ fatalism + simplicity + I(simplicity^2), Ginzberg), col = 'green') # interaction
some( model.matrix(depression ~ fatalism + simplicity + I(simplicity^2), Ginzberg) ) # X matrix
# Notes:
# distinguish among:
# 1. IV (independent variables) -- NOT Statitistical Independent but 'functionally' independent
# 2. regressor = column of X matrix
# 3. term = expression that generates columns of X matrix
"
=== Interaction with factors and numeric variables ===
* additive model vs model with interaction
"
######################################################################################### RE{ #
Prestige$incomes <- sqrt( Prestige$income )
prestige.add <- lm( prestige ~ incomes + type, Prestige)
summary(prestige.add)
# regular graphics
"
==== Basic plotting ====
"
plot( prestige ~ incomes, Prestige, col = type)
plot( prestige ~ incomes, Prestige, col = type, pch = 16)
plot( prestige ~ incomes, Prestige, col = type, cex = 1.5, lwd = 2)
plot( prestige ~ incomes, Prestige, col = c('red','blue','magenta')[type], cex = 1.5, lwd = 2)
plot( prestige ~ incomes, Prestige, col = type, cex = 1.5, lwd = 2, axes = FALSE)
axis(1, at = seq(20,160,by=20), labels = seq(20,160,by=20)^2)
axis(2, at = seq(20,80,by=20))
box()
abline( h = seq(20,80,20), lty = 2, col ='gray')
abline( v = seq(20,160,20), lty = 2, col ='gray')
"
==== Note on factors ====
* R's way of representing categorical data for analysis
* reading a data frame automatically turns a character variable into a factor
* example: type
"
Prestige$type
str( Prestige$type )
unclass( Prestige$type ) # raw internal representation
# internally it's integers
# but it prints as character
as.character( Prestige$type )
unclass( as.character( Prestige$type )) # this is really a character var.
# in some ways factors are numeric, in others character
f.ex <- factor( c('One','Two','Three','Four','One','Two'))
f.ex # note that levels are in lexicographical (alpha) order by default
unclass(f.ex)
tab( f.ex)
letters
letters[f.ex] # when indexing, f.ex acts as a number and uses its **codes**
f.ex == "Three" # in logical operations, as a character
f.ex[1:2] # when subsetting, it remembers its original **levels**
f.ex[1:2, drop = T] # unless you ask
# reordering a factor
f.ex.ro <- factor( f.ex, levels = c('One','Two','Three','Four'))
f.ex.ro
letters[f.ex.ro]
outer( f.ex, f.ex.ro, "==") # applies function '==' to all pairs
z <- outer(f.ex, f.ex.ro, "==") #
dimnames(z) <- list(f.ex, f.ex.ro)
z # shows that == is applied to levels, not codes
"
==== Quick programs in R ====
* It's easy to turn a good idea into a function
"
# turn a good idea into a funtion:
out # make sure it is not already used
out <- function( x, y, FUN ){
ret <- outer( x, y, FUN)
dimnames( ret ) <- list( x, y)
ret # value returned by a function is 'exiting' line
}
out
out( f.ex, f.ex.ro, `==`) # uses levels, not codes
out( f.ex, f.ex.ro, `<`) # < not meaningful for factors
out( as.character(f.ex), as.character(f.ex.ro), `<`) # BUT it IS meaningful for characters!!
# Useful for lots of stuff
out( c(TRUE,FALSE,NA),c(TRUE,FALSE,NA), "|") # 3-valued logic in R
out( c(TRUE,FALSE,NA),c(TRUE,FALSE,NA), "&") # 3-valued logic in R
out( c(-Inf, -1, 0, 1, Inf, NA, NaN, 1i),c(-Inf, -1, 0, 1, Inf, NA, NaN,1i), "+" ) # extended arithmetic
out( c(-Inf, -1, 0, 1, Inf, NA, NaN, 1i),c(-Inf, -1, 0, 1, Inf, NA, NaN,1i), "*" ) # extended arithmetic
"
==== Factors in regression ====
<math> Y = X \beta + \epsilon </math>
* A factor with k levels generates k-1 columns in the X matrix
"
model.matrix( prestige ~ incomes + type, Prestige) # creates the X matrix
z <- model.matrix( prestige ~ incomes + type, Prestige)
some(z)
z$incomes # ERROR because a matrix is not a data frame
z <- as.data.frame(z) # turns matrix into a data frame [Example of coercion]
some( z )
z$incomes # a data frame contains variables
"
==== Merging data frames ====
"
# merging two data frames:
z$id <- rownames(z) # create an id variable for indexing in z
Prestige$id <- rownames(Prestige) # corresponding id for Prestige
zm <- merge( z, Prestige[,c('id','type')], by = 'id') # merges on common var 'id'
# note that the name of the variable here must be quoted!!!
# Recall: Functions may required a variable to be referenced:
# by name in quotes
# by name without quotes
# using a formula
# sometimes more than one will work, often only one
# Sorry!!! but good to know so it's easier to get out of dead ends
some( zm )
# Note dummy (indicator) variable for typeprof and typewc
"
==== Prediction data frames ====
"
# prediction data frame
# values for which we want to predict prestige
pred <- expand.grid( type = levels(Prestige$type), incomes = seq(15,185,10))
some( pred ) # all combination, good to use 'levels' to make sure in correct order
pred$y <- predict( prestige.add, newdata = pred)
some( pred )
"
==== For loop ====
"
for ( nn in levels(pred$type)) {
lines( y ~ incomes, pred, subset = type == nn, col = type)
}
"
==== lapply (better) ====
* <tt>lapply( list, FUN)</tt> applies function FUN to each element of list or vector
"
lapply( levels(pred$type), function(x){
lines( y ~ incomes, pred, subset = type == x, col = type, lwd =2)
})
"
=== Linking numbers with pictures and answers with questions ===
* Most statistical output answers questions you don't want to ask and doesn't answer the questions you should ask
* Linking the numbers with the graphs is a ideal test of understanding
* Interpreting coefficients for factor indicators: comparisons with the reference level -- the level that doesn't appear
"
summary( prestige.add )
################################ Show where the numbers appear on the graph
"
==== A colorful[sic] digression ====
"
# Easier in 3d
# BUT: need at most two numerical predictors and one factor -- here only one numerical so add any other
Plot3d( prestige ~ incomes + education | type, Prestige) # note categorical predictor after "|"
# changing colors:
colors()
pals() # palettes of all colors
pal(c('blue','skyblue','yellow4'))
pal(grepv('blue',colors()))
blues <- grepv( 'blue', colors())
length(blues)
Blues <- list(blues[1:33], blues[-(1:33)])
Blues
lapply( Blues, pal) # example lapply
# choose some colors:
Plot3d( prestige ~ incomes + education | type, Prestige, # note categorical predictor after "|"
col = c('gray90','red','blue'))
prestige.add <- lm( prestige ~ incomes + type, Prestige)
summary(prestige.add)
Fit3d(prestige.add)
Id3d()
# Exercise: explore other models}
#
Prestige$incomes <- sqrt( Prestige$income )
prestige.add <- lm( prestige ~ incomes + type, Prestige)
summary(prestige.add)
# regular graphics
"
==== Basic plotting ====
"
plot( prestige ~ incomes, Prestige, col = type)
plot( prestige ~ incomes, Prestige, col = type, pch = 16)
plot( prestige ~ incomes, Prestige, col = type, cex = 1.5, lwd = 2)
plot( prestige ~ incomes, Prestige, col = c('red','blue','magenta')[type], cex = 1.5, lwd = 2)
plot( prestige ~ incomes, Prestige, col = type, cex = 1.5, lwd = 2, axes = FALSE)
axis(1, at = seq(20,160,by=20), labels = seq(20,160,by=20)^2)
axis(2, at = seq(20,80,by=20))
box()
abline( h = seq(20,80,20), lty = 2, col ='gray')
abline( v = seq(20,160,20), lty = 2, col ='gray')
"
==== Note on factors ====
* R's way of representing categorical data for analysis
* reading a data frame automatically turns a character variable into a factor
* example: type
"
Prestige$type
str( Prestige$type )
unclass( Prestige$type ) # raw internal representation
# internally it's integers
# but it prints as character
as.character( Prestige$type )
unclass( as.character( Prestige$type )) # this is really a character var.
# in some ways factors are numeric, in others character
f.ex <- factor( c('One','Two','Three','Four','One','Two'))
f.ex # note that levels are in lexicographical (alpha) order by default
unclass(f.ex)
tab( f.ex)
letters
letters[f.ex] # when indexing, f.ex acts as a number and uses its **codes**
f.ex == "Three" # in logical operations, as a character
f.ex[1:2] # when subsetting, it remembers its original **levels**
f.ex[1:2, drop = T] # unless you ask
# reordering a factor
f.ex.ro <- factor( f.ex, levels = c('One','Two','Three','Four'))
f.ex.ro
letters[f.ex.ro]
outer( f.ex, f.ex.ro, "==") # applies function '==' to all pairs
z <- outer(f.ex, f.ex.ro, "==") #
dimnames(z) <- list(f.ex, f.ex.ro)
z # shows that == is applied to levels, not codes
"
==== Quick programs in R ====
* It's easy to turn a good idea into a function
"
# turn a good idea into a funtion:
out # make sure it is not already used
out <- function( x, y, FUN ){
ret <- outer( x, y, FUN)
dimnames( ret ) <- list( x, y)
ret # value returned by a function is 'exiting' line
}
out
out( f.ex, f.ex.ro, `==`) # uses levels, not codes
out( f.ex, f.ex.ro, `<`) # < not meaningful for factors
out( as.character(f.ex), as.character(f.ex.ro), `<`) # BUT it IS meaningful for characters!!
# Useful for lots of stuff
out( c(TRUE,FALSE,NA),c(TRUE,FALSE,NA), "|") # 3-valued logic in R
out( c(TRUE,FALSE,NA),c(TRUE,FALSE,NA), "&") # 3-valued logic in R
out( c(-Inf, -1, 0, 1, Inf, NA, NaN, 1i),c(-Inf, -1, 0, 1, Inf, NA, NaN,1i), "+" ) # extended arithmetic
out( c(-Inf, -1, 0, 1, Inf, NA, NaN, 1i),c(-Inf, -1, 0, 1, Inf, NA, NaN,1i), "*" ) # extended arithmetic
"
==== Factors in regression ====
<math> Y = X \beta + \epsilon </math>
* A factor with k levels generates k-1 columns in the X matrix
"
model.matrix( prestige ~ incomes + type, Prestige) # creates the X matrix
z <- model.matrix( prestige ~ incomes + type, Prestige)
some(z)
z$incomes # ERROR because a matrix is not a data frame
z <- as.data.frame(z) # turns matrix into a data frame [Example of coercion]
some( z )
z$incomes # a data frame contains variables
"
==== Merging data frames ====
"
# merging two data frames:
z$id <- rownames(z) # create an id variable for indexing in z
Prestige$id <- rownames(Prestige) # corresponding id for Prestige
zm <- merge( z, Prestige[,c('id','type')], by = 'id') # merges on common var 'id'
# note that the name of the variable here must be quoted!!!
# Recall: Functions may required a variable to be referenced:
# by name in quotes
# by name without quotes
# using a formula
# sometimes more than one will work, often only one
# Sorry!!! but good to know so it's easier to get out of dead ends
some( zm )
# Note dummy (indicator) variable for typeprof and typewc
"
==== Prediction data frames ====
"
# prediction data frame
# values for which we want to predict prestige
pred <- expand.grid( type = levels(Prestige$type), incomes = seq(15,185,10))
some( pred ) # all combination, good to use 'levels' to make sure in correct order
pred$y <- predict( prestige.add, newdata = pred)
some( pred )
"
==== For loop ====
"
for ( nn in levels(pred$type)) {
lines( y ~ incomes, pred, subset = type == nn, col = type)
}
"
==== lapply (better) ====
* <tt>lapply( list, FUN)</tt> applies function FUN to each element of list or vector
"
lapply( levels(pred$type), function(x){
lines( y ~ incomes, pred, subset = type == x, col = type, lwd =2)
})
"
=== Linking numbers with pictures and answers with questions ===
* Most statistical output answers questions you don't want to ask and doesn't answer the questions you should ask
* Linking the numbers with the graphs is a ideal test of understanding
* Interpreting coefficients for factor indicators: comparisons with the reference level -- the level that doesn't appear
"
summary( prestige.add )
################################ Show where the numbers appear on the graph
"
==== A colorful[sic] digression ====
"
# Easier in 3d
# BUT: need at most two numerical predictors and one factor -- here only one numerical so add any other
Plot3d( prestige ~ incomes + education | type, Prestige) # note categorical predictor after "|"
# changing colors:
colors()
pals() # palettes of all colors
pal(c('blue','skyblue','yellow4'))
pal(grepv('blue',colors()))
blues <- grepv( 'blue', colors())
length(blues)
Blues <- list(blues[1:33], blues[-(1:33)])
Blues
lapply( Blues, pal) # example lapply
# choose some colors:
Plot3d( prestige ~ incomes + education | type, Prestige, # note categorical predictor after "|"
col = c('gray90','red','blue'))
prestige.add <- lm( prestige ~ incomes + type, Prestige)
summary(prestige.add)
Fit3d(prestige.add)
Id3d()
# Exercise: explore other models
######################################################################################### END OF REPEAT
The summary graphics
#################################################################################
pred$prestige <-
xyplot( prestige ~ incomes , Prestige, groups = type)
xyplot( prestige ~ incomes , Prestige, groups = type)
wald( prestige.int, ":")
wald( prestige.int, "en:")
Plot3d( income ~ sqrt(income)*log(income)|type, Prestige)
Fit3d( prestige.int )
plot(income.mod)
# dummy regression
Prestige$type # a factor
class(Prestige$type)
str(Prestige$type) # structure
sapply(Prestige, class) # sapply applies the 'class' function to each variable in Prestige
Prestige.2 <- na.omit(Prestige) # filter out missing data
nrow(Prestige)
nrow(Prestige.2)
levels(Prestige.2$type)
Prestige.2$type <- with(Prestige.2, factor(type, levels=c("bc", "wc", "prof"))) # reorder levels
Prestige.2$type
# generating contrasts from factors
getOption("contrasts")
contrasts(Prestige.2$type)
model.matrix(~ type, data=Prestige.2)
contrasts(Prestige.2$type) <- contr.treatment(levels(Prestige.2$type), base=2) # changing baseline category
contrasts(Prestige.2$type)
contrasts(Prestige.2$type) <- "contr.helmert" # Helmert contrasts
contrasts(Prestige.2$type)
contrasts(Prestige.2$type) <- "contr.sum" # "deviation" contrasts
contrasts(Prestige.2$type)
contrasts(Prestige.2$type) <- NULL # back to default
Prestige.2$type.ord <- ordered(Prestige.2$type, levels=c("bc", "wc", "prof")) # ordered factor
Prestige.2$type.ord
round(contrasts(Prestige.2$type.ord), 3) # orthogonal polynomial contrasts
prestige.mod.1 <- lm(prestige ~ log2(income) + education + type, data=Prestige.2)
summary(prestige.mod.1)
anova(prestige.mod.1) # sequential ("type-I") tests
prestige.mod.0 <- lm(prestige ~ income + education, data=Prestige.2) # note: NA's filtered!
summary(prestige.mod.0)
prestige.mod.0 <- update(prestige.mod.1, . ~ . - type) # equivalent [in a formula '-' means remove]
anova(prestige.mod.0, prestige.mod.1) # incremental F-test
Anova(prestige.mod.1) # "type-II" tests
prestige.mod.3 <- update(prestige.mod.1,
. ~ . + log2(income):type + education:type) # adding interactions
summary(prestige.mod.3)
Anova(prestige.mod.3)
lm(prestige ~ log2(income*type) + education*type, data=Prestige.2) # equivalent specifications
lm(prestige ~ (log2(income) + education)*type, data=Prestige.2)
# effect displays
library(effects)
plot(allEffects(prestige.mod.3), ask=FALSE)
# Anova Models
some(Moore)
Moore$fcategory <- factor(Moore$fcategory, levels=c("low", "medium", "high"))
Moore$partner.status <- relevel(Moore$partner.status, ref="low")
xtabs(~ fcategory + partner.status, data=Moore)
with(Moore, tapply(conformity,
list(Authoritarianism=fcategory, "Partner's Status"=partner.status),
mean))
with(Moore, tapply(conformity,
list(Authoritarianism=fcategory, "Partner's Status"=partner.status),
sd))
# graph of means:
with(Moore, {
interaction.plot(fcategory, partner.status, conformity, type="b",
pch=c(1, 16), cex=2, ylim=range(conformity))
points(jitter(as.numeric(fcategory), factor=0.5), conformity,
pch=ifelse(partner.status == "low", "L", "H"))
identify(fcategory, conformity)
})
# ANOVA tables
contr <- options(contrasts=c("contr.sum", "contr.poly")) # contr.sum = deviation contrasts
moore.mod <- lm(conformity ~ fcategory*partner.status, data=Moore)
summary(moore.mod)
Anova(moore.mod) # type II sums of squares
Anova(moore.mod, type="III") # type III sums of squares
options(contr) # restore defaults
# more on lm
args(lm)
some(Davis)
lm(weight ~ repwt, data=Davis, subset=sex == "F") # observation selection (women only)
lm(weight ~ repwt, data=Davis, subset=1:100)
lm(prestige ~ income + education, data=Duncan, subset=-c(6, 16))
lm(conformity ~ partner.status*fcategory, # specifying contrasts
contrasts=list(partner.status=contr.sum, fcategory=contr.poly),
data=Moore)
lm(100*conformity/40 ~ partner.status*fcategory, data=Moore) # data argument; note computation of y
lm(prestige~I(income + education), data=Duncan) # "protecting" expresssion on RHS of the model
# Generalized linear models
# binary logit model
some(Mroz)
mroz.mod <- glm(lfp ~ k5 + k618 + age + wc + hc + lwg + inc,
data=Mroz, family=binomial)
summary(mroz.mod)
round(exp(cbind(Estimate=coef(mroz.mod), confint(mroz.mod))), 2) # odds ratios
mroz.mod.2 <- update(mroz.mod, . ~ . - k5 - k618)
anova(mroz.mod.2, mroz.mod, test="Chisq") # likelihood-ratio test
Anova(mroz.mod) # analysis-of-deviance table
plot(allEffects(mroz.mod), ask=FALSE)
# Poisson regression
some(Ornstein)
nrow(Ornstein)
(tab <- xtabs(~interlocks, data=Ornstein))
x <- as.numeric(names(tab)) # the names are the distinct values of interlocks
plot(x, tab, type="h", xlab="Number of Interlocks", ylab="Frequency")
points(x, tab, pch=16)
mod.ornstein <- glm(interlocks ~ log2(assets) + nation + sector,
family=poisson, data=Ornstein)
summary(mod.ornstein)
Anova(mod.ornstein)
# quasi-Poisson model, allowing for overdispersion
mod.ornstein.q <- update(mod.ornstein, family=quasipoisson)
summary(mod.ornstein.q)
plot(allEffects(mod.ornstein.q, default.levels=50), ask=FALSE)
# repeated-measures ANOVA and MANOVA
some(OBrienKaiser)
?OBrienKaiser
contrasts(OBrienKaiser$treatment)
contrasts(OBrienKaiser$gender)
# defining the within-subjects design
phase <- factor(rep(c("pretest", "posttest", "followup"), c(5, 5, 5)),
levels=c("pretest", "posttest", "followup"))
hour <- ordered(rep(1:5, 3))
idata <- data.frame(phase, hour)
idata
# fitting the multivariate linear model
mod.ok <- lm(cbind(pre.1, pre.2, pre.3, pre.4, pre.5,
post.1, post.2, post.3, post.4, post.5,
fup.1, fup.2, fup.3, fup.4, fup.5) ~ treatment*gender,
data=OBrienKaiser)
mod.ok
# multivariate and univariate tests
(av.ok <- Anova(mod.ok, idata=idata, idesign=~phase*hour))
summary(av.ok)
# graphing the means
# reshape the data from "wide" to "long"
OBrien.long <- reshape(OBrienKaiser,
varying=c("pre.1", "pre.2", "pre.3", "pre.4", "pre.5",
"post.1", "post.2", "post.3", "post.4", "post.5",
"fup.1", "fup.2", "fup.3", "fup.4", "fup.5"),
v.names="score",
timevar="phase.hour", direction="long")
OBrien.long$phase <- ordered(c("pre", "post", "fup")[1 + ((OBrien.long$phase.hour - 1) %/% 5)],
levels=c("pre", "post", "fup"))
OBrien.long$hour <- ordered(1 + ((OBrien.long$phase.hour - 1) %% 5))
dim(OBrien.long)
head(OBrien.long, 25) # first 25 rows
# compute means
Means <- as.data.frame(ftable(with(OBrien.long,
tapply(score, list(treatment=treatment, gender=gender, phase=phase, hour=hour), mean))))
names(Means)[5] <- "score"
dim(Means)
head(Means, 25)
# graph of means
library(lattice)
xyplot(score ~ hour | phase + treatment, groups=gender, type="b",
strip=function(...) strip.default(strip.names=c(TRUE, TRUE), ...),
ylab="Mean Score", data=Means, auto.key=list(title="Gender", cex.title=1))
###############
factor and numertical
estimate using stuff in Lab 3
###################
|
1b970f163c6ae5e7eac6f512aa56d45c6e5bb607
|
25bb3d517b8b5b847b184aac8b0e12320cb265b0
|
/somatic_germline_overview/1_somatic_germline_overlap_Figure1D.R
|
67d108503e8073e792b70e513447562c3cbb78c0
|
[] |
no_license
|
tao-qing/DDRImmune
|
922128fac03f9be34f0ea30cdf4db5d73a36e4a8
|
b6294541ce5402b8bf77eb7cb057019b7e4d0316
|
refs/heads/master
| 2023-07-15T05:53:12.508020
| 2021-08-27T01:29:08
| 2021-08-27T01:29:08
| 346,764,696
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,106
|
r
|
1_somatic_germline_overlap_Figure1D.R
|
##### somatic_germline_overlap.R #####
# Find overlap of genes/variants for somatic/germline variants
setwd("/Users/qingtao/Box Sync/GermlineSomatic/analysis/somatic_germline_overlap/")
#epig<-data.frame(readxl::read_xlsx("/Users/qingtao/Box Sync/GermlineSomatic/Huang_lab_data/TCGA_PanCanAtlas_2018/DDR_Knijnenburg_CellReport2018/TCGA_DDR_Data_Resources.xlsx",sheet = "DDR epigenetic silencing",col_names =F))
#epig<-epig[,-1]
#epig=t(epig)
#colnames(epig)=epig[1,]
#epig=epig[-1,]
overlapsplrmhyper<-read.table("/Users/qingtao/Box Sync/GermlineSomatic/Huang_lab_data/TCGA_PanCanAtlas_2018/clinical/all_overlaped_samples_removehypermutator_n9738.txt")[,1]
#all core DDR genes
ddr=read.csv("../../Huang_lab_data/TCGA_PanCanAtlas_2018/DDR_Knijnenburg_CellReport2018/DDR_Pathways.csv",h=T)
ddrgene=unique(as.character(unlist(ddr)))[-1]
#ddrgene=c("MLH1","MSH2","MSH3","MSH6","PMS1","PMS2")
#ddrepig=as.data.frame(epig[,c("TCGA Sample (tumor type abbr. below)","Gene Symbol",ddrgene)])
#colnames(ddrepig)[c(1,2)]=c("bcr_patient_barcode","cancer")
#ddrepig$bcr_patient_barcode=as.character(ddrepig$bcr_patient_barcode)
#ddrepig$cancer=as.character(ddrepig$cancer)
#apply(ddrepig[,c(2:82)],2,function(x)sum(as.numeric(as.matrix(x[-which(is.na(x))]))))
#DDR genes
#ddr_genes=gene_lists$Nanostring_categories$DNA_repair#read.table("/Users/qingtao/Box Sync/GermlineSomatic/Huang_lab_data/TCGA_PanCanAtlas_2018/DDR_Knijnenburg_CellReport2018/TCGA_DDR_Data_Resources/Genes.tsv",h=F)[,1]
### dependencies ###
source("../global_aes_out.R")
source("../dependency_files_tq.R")
source("../load_somatic_column.R")
rmhypermutator=TRUE
pathVarP=pathVarP[which(pathVarP$bcr_patient_barcode%in%overlapsplrmhyper),]
somatic_likelyfunctional_driver=somatic_likelyfunctional_driver[which(somatic_likelyfunctional_driver$bcr_patient_barcode%in%overlapsplrmhyper),]
#germline and somatic overlap
somatic_gene = unique(somatic_likelyfunctional_driver$Hugo_Symbol)
germline_gene = unique(pathVarP$HUGO_Symbol)
somatic_mut = unique(apply(somatic_likelyfunctional_driver,1,function(x)paste(c(x["Chromosome"],as.numeric(x["Start_Position"]),as.numeric(x["End_Position"]),x["Reference_Allele"],x["Tumor_Seq_Allele2"]),collapse = "|")))
germline_mut =unique(apply(pathVarP,1,function(x)paste(c(x["Chromosome"],as.numeric(x["Start"]),as.numeric(x["Stop"]),x["Reference"],x["Alternate"]),collapse = "|")))
library(VennDiagram)
library(grDevices)
par(mar=c(5,5,5,5))
tmp=venn.diagram(
x = list(germline_gene,somatic_gene,ddrgene),
category.names = c("" , "",""),
filename =NULL, #'./out/Figure1A_germline_somatic_overlapgene.tiff',
output = TRUE ,
imagetype="tiff" ,
height = 600,
width = 600,
resolution = 300,
compression = "lzw",
lwd = 2,
lty = 'blank',
fill = c('purple', 'green',"orange"),
cex = 1,
fontface = "bold",
fontfamily = "sans",
cat.cex = 0.6,
cat.fontface = "bold",
cat.default.pos = "outer"#,
#cat.pos = c(-27,135),
#cat.dist = c(0.055, 0.085),
#rotation = 1
)
#pdf(file="./out/Figure1A_germline_somatic_overlapgene_rmhypermutator.pdf",height=2,width=2)
pdf(file="./out/Figure1A_germline_somatic_overlapgene.pdf",height=2,width=2)
grid.draw(tmp)
dev.off()
#mut
library(VennDiagram)
library(grDevices)
par(mar=c(5,5,5,5))
tmp=venn.diagram(
x = list(somatic_mut,germline_mut),
category.names = c("" , ""),
filename =NULL, # './out/Figure1A_germline_somatic_overlapMutations.tiff',
output = TRUE ,
imagetype="tiff" ,
height = 600,
width = 600,
resolution = 300,
compression = "lzw",
lwd = 2,
lty = 'blank',
fill = c('orange', 'lightblue'),
cex = 1,
fontface = "bold",
fontfamily = "sans",
cat.cex = 0.6,
cat.fontface = "bold",
cat.default.pos = "outer"#,
#cat.pos = c(-27,135),
#cat.dist = c(0.055, 0.085),
#rotation = 1
)
#pdf(file="./out/Figure1A_germline_somatic_overlapMutations_rmhypermutator.pdf",height=2,width=2)
pdf(file="./out/Figure1A_germline_somatic_overlapMutations.pdf",height=2,width=2)
grid.draw(tmp)
dev.off()
# counts of somatic functional mutation by gene
somatic_gene_count = data.frame(table(somatic_likelyfunctional_driver$Hugo_Symbol))
germline_gene_count = data.frame(table(pathVarP$HUGO_Symbol))
colnames(somatic_gene_count) = c("Gene","PredictedFunctionalSomaticMutationCount")
colnames(germline_gene_count) = c("Gene","PathogenicGermlineVariantCount")
gene_count = merge(somatic_gene_count,germline_gene_count,by="Gene",all=T)
gene_count[is.na(gene_count)] = 0
highlight_g = as.character(gene_count$Gene[gene_count$PredictedFunctionalSomaticMutationCount > 400 | gene_count$PathogenicGermlineVariantCount > 10 | (gene_count$PredictedFunctionalSomaticMutationCount > 140 & gene_count$PathogenicGermlineVariantCount > 3)])
highlight_g=highlight_g[-which(highlight_g%in%c("EXT2","POT1","PRDM9","RECQL","COL7A1","GJB2"))]
#core DDR pathway
ddr=as.data.frame(read.csv("../../Huang_lab_data/TCGA_PanCanAtlas_2018/DDR_Knijnenburg_CellReport2018/DDR_Pathways.csv",h=T))
gene_count$GeneClass="Other genes"
gene_count$GeneClass[gene_count$Gene %in% unique(as.character(ddr$Base.Excision.Repair..BER.))[-1]] = "Base Excision Repair"
gene_count$GeneClass[gene_count$Gene %in% unique(as.character(ddr$Nucleotide.Excision.Repair..NER..including.TC.NER.and.GC.NER..))[-1]] = "Nucleotide Excision Repair"
gene_count$GeneClass[gene_count$Gene %in% unique(as.character(ddr$Mismatch.Repair..MMR.))[-1]] = "Mismatch Repair"
gene_count$GeneClass[gene_count$Gene %in% unique(as.character(ddr$Fanconi.Anemia..FA.))[-1]] = "Fanconi Anemia"
gene_count$GeneClass[gene_count$Gene %in% unique(as.character(ddr$Homologous.Recomination..HR.))[-1]] = "Homologous Recomination"
gene_count$GeneClass[gene_count$Gene %in% unique(as.character(ddr$Non.homologous.End.Joining..NHEJ.))[-1]] = "Nonhomologous End Joining"
gene_count$GeneClass[gene_count$Gene %in% unique(as.character(ddr$Direct.Repair..DR.))[-1]] = "Direct Repair"
gene_count$GeneClass[gene_count$Gene %in% unique(as.character(ddr$Translesion.Synthesis..TLS.))[-1]] = "Other genes"
gene_count$GeneClass[gene_count$Gene %in% unique(as.character(ddr$Damage.Sensor.etc.))[-1]] = "Damage Sensor"
gene_count$GeneClass
gene_count$GeneClass=factor(gene_count$GeneClass,levels=c("Homologous Recomination","Mismatch Repair","Nucleotide Excision Repair","Damage Sensor","Fanconi Anemia","Direct Repair","Translesion Synthesis","Other genes"))
colors = c("#ED2891","#C1A72F", "#FAD2D9","#F6B667","#97D1A9", "#B2509E", "#3953A4", "#007EB5")#,"#B2509E","#97D1A9","#ED1C24"
names(colors) =c("Homologous Recomination","Mismatch Repair","Nucleotide Excision Repair","Damage Sensor","Fanconi Anemia","Direct Repair","Translesion Synthesis","Other genes")
p = ggplot(gene_count,aes(x=PredictedFunctionalSomaticMutationCount, y =PathogenicGermlineVariantCount, color = GeneClass))
p = p + geom_point(stroke=0,alpha = 0.2) + theme_bw()
p = p + geom_text_repel(aes(label=ifelse(as.character(Gene) %in% highlight_g,as.character(Gene), NA)),cex=6,min.segment.length = 0)
p = p + theme(legend.position = c(0.74, 0.60),legend.text=element_text(size=16),legend.title=element_text(size=20),axis.title = element_text(size=20), axis.text.x = element_text(colour="black", size=20,vjust=0.5), axis.text.y = element_text(colour="black", size=20))#element_text(colour="black", size=14))
p = p +scale_x_log10() + scale_y_log10()
p = p + expand_limits(x = 0,y=0) + ylim(0,100)+ xlim(0,800)
p = p + xlab("Somatic Variant Count") + ylab("Germline Variant Count")
p = p + scale_color_manual("DDR Pathways",values =colors)
p
fn = "./out/somatic_vs_germline_var_counts_DDR_genes_allsample.pdf"
ggsave(file=fn, width=12, h =4, useDingbats=FALSE)
#######mutation frequency COADREAD-MIS-H, MSI-L####################
#Figure 1E Distribution of DDR mutated genes
#immuneProfile<-read.table("../germline_immune_cov/out/pca10260_immuneprofile_covariates.txt",h=T,sep="\t",stringsAsFactors=FALSE)
#immuneProfileMSI=immuneProfile[immuneProfile$variable=="MSISensor" & immuneProfile$TCGA_Study=="COADREAD",]
#msihspl=immuneProfileMSI$bcr_patient_barcode[which(immuneProfileMSI$value >=4)]
#msshspl=immuneProfileMSI$bcr_patient_barcode[which(immuneProfileMSI$value < 4)]
#clin$typemsi=ifelse(clin$bcr_patient_barcode%in%msihspl,"COADREAD-MSI",ifelse(clin$bcr_patient_barcode%in%msshspl,"COADREAD-MSS",clin$type))
#MSI<-data.frame(readxl::read_xlsx("/Users/qingtao/Box Sync/GermlineSomatic/Huang_lab_data/TCGA_PanCanAtlas_2018/MSI_Isidro_NatureCom2017/41467_2017_BFncomms15180_MOESM259_ESM.xlsx",sheet = "41467_2017_BFncomms15180_MOESM2"))
#MSI=MSI[MSI$Cancer_type%in%c("COAD","READ"),]
#tcgamsi=MSI$Barcode[MSI$MSI_category_nb_from_TCGA_consortium=="msi-h"]
#tcgamss=MSI$Barcode[MSI$MSI_category_nb_from_TCGA_consortium=="mss"]
#clin$typemsi=ifelse(clin$bcr_patient_barcode%in%tcgamsi,"COADREAD-MSI",ifelse(clin$bcr_patient_barcode%in%tcgamss,"COADREAD-MSS",ifelse(clin$type=="COADREAD","COADREAD-Other",clin$type)))
clin$typemsi=clin$type
allspl=table(clin$typemsi[clin$bcr_patient_barcode%in%overlapsplrmhyper])
#allspl=table(clin$typemsi)
freqMat=as.data.frame(cbind(Cancer=names(allspl),SampleSize=allspl))
freqMat$Cancer=as.character(freqMat$Cancer)
freqMat$SampleSize=as.numeric(as.matrix(freqMat$SampleSize))
#gfrequency
tmp0=pathVarP[which(pathVarP$HUGO_Symbol%in%ddrgene),]
if(any(duplicated(tmp0$bcr_patient_barcode))){
tmp0=tmp0[-which(duplicated(tmp0$bcr_patient_barcode)),]
}
gfreq=table(tmp0$cancer)
#sfrequency
#epigstatus=ddrepig$bcr_patient_barcode[which(apply(ddrepig[,ddrgene],1,function(x)any(as.numeric(as.matrix(x))==1)))]
#ddrepigstatus=ddrepig[,c("bcr_patient_barcode","cancer")]
#ddrepigstatus$silencing=ifelse(ddrepigstatus$bcr_patient_barcode%in%epigstatus,"Yes","No")
somatic_likelyfunctional_driver=somatic_likelyfunctional_driver[somatic_likelyfunctional_driver$bcr_patient_barcode%in%overlapsplrmhyper,]
#somatic_likelyfunctional_driver=somatic_likelyfunctional_driver[somatic_likelyfunctional_driver$bcr_patient_barcode%in%clin$bcr_patient_barcode,]
somatic_likelyfunctional_driver$cancer=clin$typemsi[sapply(somatic_likelyfunctional_driver$bcr_patient_barcode,function(x)which(clin$bcr_patient_barcode==x))]
tmp1=somatic_likelyfunctional_driver[which(somatic_likelyfunctional_driver$Hugo_Symbol%in%ddrgene),]
#tmp2=rbind(ddrepig[ddrepig$bcr_patient_barcode%in%epigstatus,c("bcr_patient_barcode","cancer")],tmp1[,c("bcr_patient_barcode","cancer")])
#tmp2=tmp2[-which(duplicated(tmp2$bcr_patient_barcode)),]
#sfreq=table(tmp2$cancer)
tmp1=tmp1[-which(duplicated(tmp1$bcr_patient_barcode)),]
sfreq=table(tmp1$cancer)
#g+s frequency
mtmp0=tmp0[,c("bcr_patient_barcode","cancer")]
mtmp1=tmp1[,c("Tumor_Sample_Barcode","cancer")]
colnames(mtmp0)=colnames(mtmp1)=c("Tumor_Sample_Barcode","cancer")
mtmp=rbind(mtmp0,mtmp1)
#mtmp=mtmp[-which(duplicated(mtmp$Tumor_Sample_Barcode)),]
mfreq=table(mtmp$cancer)
freqMat$Germline=as.numeric((gfreq[freqMat$Cancer]/freqMat$SampleSize)*100)
freqMat$Somatic=as.numeric((sfreq[freqMat$Cancer]/freqMat$SampleSize)*100)
freqMat$Merge=as.numeric((mfreq[freqMat$Cancer]/freqMat$SampleSize)*100)
library(reshape2)
plotMat=melt(freqMat,id=c("Cancer","SampleSize"))
plotMat$Label=paste0(plotMat$Cancer," (",plotMat$SampleSize,")")
tmp3=plotMat[plotMat$variable=="Germline",]
or=tmp3$Label[order(tmp3$value,decreasing=T)]
plotMat$Label=factor(plotMat$Label,levels=or)
colnames(plotMat)=gsub("variable","Type",colnames(plotMat))
p= ggplot(plotMat,aes(y=as.numeric(as.matrix(value)),x=Label,fill=Type)) + geom_bar(stat="identity",position='dodge')
#p= p+ geom_text(aes(label=Counts), hjust=1, size=3)
p = p + theme_bw()
p = p + theme(legend.position = c(0.85, 0.85),axis.text.x = element_text(colour="black", size=14, angle=90, vjust = 0.5,hjust = 0.95), axis.text.y = element_text(colour="black", size=14,hjust = 0.95),axis.ticks = element_blank(),plot.title = element_text(hjust = 0,size=16,face="bold"),axis.title=element_text(size=14,face="bold"),panel.border = element_blank(),axis.line= element_line(color='white'),panel.grid.major = element_blank(), panel.grid.minor = element_blank())
p = p + labs(title="Samples affected by DDR mutations",y="Percentage",x="Cancer")
p
fn = "out/Figure1E_somaticgermline_frequency_of_DDR_affected_samples_MSI.pdf"
ggsave(file=fn, width=12, h =4, useDingbats=FALSE)
####################Correlated with Overall Response Rate###################
ORR<-data.frame(readxl::read_xlsx("./Clone_ORR.xlsx",sheet = "Sheet2"))
#ORR$types=gsub("COAD_MSI","COADREAD-MSI",gsub("COAD_MSS","COADREAD-MSS",ORR$types))
#ORR$types=gsub("COAD_MSI","COADREAD",gsub("COAD_MSS","COADREAD",ORR$types))
ORR$types=gsub("COAD","COADREAD",ORR$types)
ORR$ORR=ORR$ORR*100
ORR$gFreq=freqMat$Germline[pmatch(ORR$types,freqMat$Cancer)]
ORR$sFreq=freqMat$Somatic[pmatch(ORR$types,freqMat$Cancer)]
ORR$mFreq=freqMat$Merge[pmatch(ORR$types,freqMat$Cancer)]
plot(ORR$sFreq,ORR$ORR)
round(cor.test(ORR$sFreq,ORR$ORR)$p.value,digits = 2)
cc=round(cor(ORR$sFreq,ORR$ORR),digits = 2)
pvalue=round(cor.test(ORR$sFreq,ORR$ORR)$p.value,digits = 2)
p = ggplot(ORR,aes(y=ORR, x =sFreq))
p = p + geom_point(stroke=0,alpha = 0.2)+geom_smooth(method = lm)+geom_text(aes(label=types),cex=3) + theme_bw()#+xlim(0,20)+ylim(0,40)
p = p + geom_abline(intercept = 0, slope=1, alpha=0.2)
#p = p + geom_text_repel(aes(label=types))
p = p + theme(legend.position = c(0.74, 0.78),axis.title = element_text(size=16), axis.text.x = element_text(colour="black", size=14,vjust=0.5), axis.text.y = element_text(colour="black", size=14))
#p = p + scale_x_log10() + scale_y_log10()
#p = p + expand_limits(x = 0,y=0) + ylim(0,1100)
p=p+ggtitle(paste0())
p = p + xlab("Percentage of sample with DDR somatic mutation (%)") + ylab("Overall response rate (%)")
p=p+th
p
fn = "out/somatic_ddr_mutation_vs_overallresponserate.pdf"
ggsave(file=fn, width=5, h =4, useDingbats=FALSE)
plot(ORR$sFreq,ORR$ORR)
cor(ORR$sFreq,ORR$ORR)
cor.test(ORR$sFreq,ORR$ORR)
summary(lm(ORR~sFreq,data=ORR))
plot(ORR$gFreq,ORR$ORR)
cor(ORR$gFreq,ORR$ORR)
cor.test(ORR$gFreq,ORR$ORR)
plot(ORR$mFreq,ORR$ORR)
cor(ORR$mFreq,ORR$ORR)
cor.test(ORR$mFreq,ORR$ORR)
|
aba277234ef057233ef1ba6dfa299ea75f8ceae8
|
abea0b5d000d7c01d390eeb615427bc0322aa30f
|
/src/merge/R_finish.R
|
fbd3495e17ce7d3d957d4858b374cf40eafd7279
|
[] |
no_license
|
janmandel/firewx-evaluation
|
5e176d8762f34b4e88a9446f1d898b3698abc5e5
|
51ca3c4a1c63d8c6ba00e910a87f4c87c2c0ac53
|
refs/heads/master
| 2020-05-05T01:10:49.662013
| 2017-08-24T17:40:06
| 2017-08-24T17:40:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,432
|
r
|
R_finish.R
|
############# FINISH SOLAR/FM/MERGING RAWS DATA
### STEP #1 / Finish solar radiation
## Function to correct max solar to station solar
cloudFun = function(cloud_percent,maxsolar) {
try1 = ifelse(cloud_percent < 10, (0.93*maxsolar),ifelse(cloud_percent >= 10&
(cloud_percent < 50), (0.8*maxsolar),ifelse((cloud_percent >= 50) &
(cloud_percent <90),(0.63*maxsolar),ifelse(cloud_percent >= 90,
(0.25*maxsolar),"error"))))
return(try1) }
## Add solarMax to dataframe
data = cbind(data,solarMax_wm2)
## Run solar correction function
solar_wm2 = mapply(cloudFun,data$cloud_cover_percent,data$solarMax_wm2)
data = cbind(data,solar_wm2)
## Fix the output order to match other data
data = data[c("station_id","station_type","data_type","lon","lat","datetime",
"air_temp_c","rh","wind_speed20ft_mps","wind_speedMid_mps","wind_direction_deg",
"cloud_cover_percent","precip_mm","solar_wm2","FM40","asp_deg","elev_m","slope_deg",
"CBD_kgm3","CBH_m","CC_percent","CH_m")]
## Save final output
setwd("/media/wpage/Elements/Page/NDFD_Project/Weather/RAWS/NDFD_Forecast_mod")
write.csv(data,file="raws2015pred_final.csv")
### STEP #2 / Finish fuel moisture / create files
setwd("/media/wpage/Elements/Page/NDFD_Project/Weather/RAWS/nelson_fms/forecast")
## Create FMS files for each station using for loop
stn = unique(data$station_id) #get unique station info
for (i in 1:length(stn)) {
temp = subset(data,station_id==stn[i])
# Remove any row with a NA in it
temp = temp[complete.cases(temp$air_temp_c),]
temp = temp[complete.cases(temp$rh),]
# Sort data by datetime
temp = temp[with(temp,order(datetime)),]
# Break up datetime
temp$year = 2015
for (j in 1:length(temp$datetime)) {
temp$month[j] = unlist(strsplit(temp$datetime[j],"[-: ]"))[2]
temp$day[j] = unlist(strsplit(temp$datetime[j],"[-: ]"))[3]
temp$hour[j] = unlist(strsplit(temp$datetime[j],"[-: ]"))[4]
temp$min[j] = unlist(strsplit(temp$datetime[j],"[-: ]"))[5]
temp$sec[j] = unlist(strsplit(temp$datetime[j],"[-: ]"))[6] }
temp$milsec = 0
# Make necessary unit conversions
temp$airhumidity = temp$rh/100
temp$precip_cm = temp$precip_mm * 0.1
for (j in 1:length(temp$year)) {
temp$stickT[j] = ""
temp$stickHum[j] = ""
temp$moisture[j] = "" }
# Create output file
out = data.frame(cbind(temp$year,temp$month,temp$day,temp$hour,temp$min,temp$sec,
temp$milsec,temp$air_temp_c,temp$airhumidity,temp$solar_wm2,temp$precip_cm,
temp$stickT,temp$stickHum,temp$moisture))
# Make sure order is good
out = out[with(out,order(X2,X3,X4)),]
# Create starting values
out$X12 = as.character(out$X12)
out$X13 = as.character(out$X13)
out$X14 = as.character(out$X14)
out$X12[1] = "20"
out$X13[1] = "0.006"
out$X14[1] = "0.05"
# Save output
filename = paste(stn[i],"_raws_input",".txt",sep="")
filename.out = paste(stn[i],"_raws_out",".txt",sep="")
X = data.frame()
write.table(out,file=filename,sep=" ",col.names=FALSE,row.names=FALSE,quote=FALSE)
write.table(X,file=filename.out,sep=" ",col.names=FALSE,row.names=FALSE) }
### STEP #3 / Finish fuel moisture / run files
### Move copy of output files to /home/wpage/Documents/firewx-evaluation/build
move.files = "cd /media/wpage/Elements/Page/NDFD_Project/Weather/RAWS/nelson_fms/forecast && cp * /home/wpage/Documents/firewx-evaluation/src/nelson_model/build"
system(move.files)
setwd("/media/wpage/Elements/Page/NDFD_Project/Weather/RAWS/nelson_fms/forecast")
## Read-in prepared txt files (see R_nelson_files script)
files = list.files()
input.loc = grep("input",files)
## Run the Nelson dfm program / Start for loop for each input file
for (i in 1:length(input.loc)) {
# Get input file name
input = files[input.loc[i]]
# Make output file name (just to make sure)
out.name = unlist(strsplit(input,"[_]"))
out = paste(out.name[1],"_",out.name[2],"_","out.txt",sep="")
# Build call
changedir = "cd /home/wpage/Documents/firewx-evaluation/src/nelson_model/build && "
dfm = paste(changedir,"./compute_dfm ","--input_file ",input,
" --output_file ",out,sep="")
# Run the program
run = system(dfm) }
## Extract output from Nelson / Save output
dir = "/home/wpage/Documents/firewx-evaluation/src/nelson_model/build/"
out.loc = grep("out",files)
dead.fms = data.frame()
for (i in 1:length(out.loc)) {
output = files[out.loc[i]]
out.name = unlist(strsplit(output,"[_]"))[1]
fms = read.csv(paste(dir,output,sep=""),header=TRUE)
fms$datetime = paste(fms$month,"/",fms$day,"/",fms$year," ",fms$hour,":","00",sep="")
fms$station_id = out.name
drops = c("year","month","day","hour")
fms = fms[,!(names(fms) %in% drops)]
dead.fms = rbind(fms,dead.fms) }
## Save the output for later
write.csv(dead.fms,file="/media/wpage/Elements/Page/NDFD_Project/Weather/RAWS/nelson_fms/deadfms_raws_pred.csv")
### STEP #4 / Merge the raws data
## Read-in dead FM
raws.pred.dfm = read.csv("/media/wpage/Elements/Page/NDFD_Project/Weather/RAWS/nelson_fms/deadfms_raws_pred.csv",stringsAsFactors=F)
raws.pred.dfm$datetime = as.character(raws.pred.dfm$datetime)
raws.pred.dfm$datetime = strptime(raws.pred.dfm$datetime,"%m/%d/%Y %H:%M",tz="UTC")
raws.pred.dfm = raws.pred.dfm[with(raws.pred.dfm,order(station_id,datetime)),]
## Add original RAWS pred data
data = read.csv("/media/wpage/Elements/Page/NDFD_Project/Weather/RAWS/NDFD_Forecast_mod/raws2015pred_final.csv",stringsAsFactors=F)
data$datetime = as.character(data$datetime)
data$datetime = strptime(data$datetime, "%Y-%m-%d %H:%M:%S",tz="UTC")
## Break-up datasets
stn1 = unique(data$station_id)[1:460]
stn2 = unique(data$station_id)[461:920]
stn3 = unique(data$station_id)[921:length(raws.pred.dfm$station_id)]
dfm1 = raws.pred.dfm[raws.pred.dfm$station_id %in% stn1,]
data1 = data[data$station_id %in% stn1,]
dfm2 = raws.pred.dfm[raws.pred.dfm$station_id %in% stn2,]
data2 = data[data$station_id %in% stn2,]
dfm3 = raws.pred.dfm[raws.pred.dfm$station_id %in% stn3,]
data3 = data[data$station_id %in% stn3,]
## Merge QC data with dfm
raws2_1 = merge(data1,dfm1,by=c("station_id","datetime"))
raws2_2 = merge(data2,dfm2,by=c("station_id","datetime"))
raws2_3 = merge(data3,dfm3,by=c("station_id","datetime"))
## Clean up memory
rm(data)
rm(raws.pred.dfm)
rm(data1)
rm(data2)
rm(data3)
rm(dfm1)
rm(dfm2)
rm(dfm3)
gc()
## Combine all data frames
raws2 = rbind(raws2_1,raws2_2,raws2_3)
## Clean up memory
rm(raws2_1)
rm(raws2_2)
rm(raws2_3)
gc()
## Add live fuel moisture info
raws2$LiveHerb_frac_percent = 1.20
raws2$LiveWood_frac_percent = 0.60
## Organize data
raws2 = raws2[c("station_id","station_type","data_type","lon","lat","datetime",
"air_temp_c","rh","wind_speed20ft_mps","wind_speedMid_mps","wind_direction_deg",
"cloud_cover_percent","precip_mm","solar_wm2","FM40","asp_deg","elev_m","slope_deg",
"CBD_kgm3","CBH_m","CC_percent","CH_m","X1hrfm","X10hrfm","X100hrfm",
"LiveHerb_frac_percent","LiveWood_frac_percent")]
## Save the output for later
write.csv(raws2,file="/media/wpage/Elements/Page/NDFD_Project/Weather/RAWS/raws_pred_temp.csv")
## Read-in formatted raws data
final1 = read.csv("/media/wpage/Elements/Page/NDFD_Project/Weather/RAWS/raws_pred_temp.csv")
final1 = final1[,-c(1)]
final3 = read.csv("/media/wpage/Elements/Page/NDFD_Project/Weather/RAWS/raws_obs_temp.csv")
## Merge two datasets using SQLite
library(RSQLite)
db = dbConnect(SQLite(),dbname="raws_final.sqlite")
dbWriteTable(db,"raws_pred",final1,row.names=FALSE)
dbWriteTable(db,"raws_obs",final3,row.names=FALSE)
merge.raws = dbGetQuery(db,"SELECT * FROM raws_pred UNION SELECT * FROM raws_obs")
## Order data
merge.raws = merge.raws[with(merge.raws,order(station_id,datetime,data_type)),]
## Keep rows that have both observed and forecast info (for same time and station)
temp1 = merge.raws[duplicated(merge.raws[c(1,6)]),]
temp2 = merge.raws[duplicated(merge.raws[c(1,6)],fromLast=TRUE),]
dbWriteTable(db,"temp1",temp1,row.names=FALSE)
dbWriteTable(db,"temp2",temp2,row.names=FALSE)
raws_final = dbGetQuery(db,"SELECT * FROM temp1 UNION SELECT * FROM temp2")
dbDisconnect(db)
## Save combined raws data frame
raws_final = raws_final[with(raws_final,order(station_id,datetime,data_type)),]
setwd("/media/wpage/Elements/Page/NDFD_Project/Weather/RAWS")
write.csv(raws_final,file="raws_final.csv",row.names=FALSE)
|
4ccc7bf4b4b1e763d64436917e6989bc1c96a8dc
|
228055717bb12cd31410cf0723be428b050602bb
|
/R/lines.regression.circular.R
|
c7414b610487282825da18e5da8c56120654cbf9
|
[] |
no_license
|
cran/NPCirc
|
a2e0f329b9e48969940e29f5dd4fb387de8e401d
|
3717249094d79c668c2b9fe53b39bab5a23521b2
|
refs/heads/master
| 2022-11-19T16:49:47.532395
| 2022-11-10T12:00:11
| 2022-11-10T12:00:11
| 17,681,103
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,313
|
r
|
lines.regression.circular.R
|
lines.regression.circular<-function(x, plot.type=c("circle", "line"), points.plot=FALSE, rp.type="p", type="l",
line.col=1, points.col="grey", points.pch=1, units=NULL, zero=NULL, clockwise=NULL, radial.lim=NULL, plot.info=NULL, ...){
xcircularp <- attr(x$x, "circularp")
ycircularp <- attr(x$y, "circularp")
if (is.null(xcircularp) && is.null(ycircularp))
stop("the component 'x' and/or the component 'y' of the object must be of class circular")
plot.type <- match.arg(plot.type)
if (is.circular(x$datax) && !is.circular(x$datay)){
if (is.null(units)) units <- xcircularp$units
template <- xcircularp$template
x$x <- conversion.circular(x$x, units = "radians", modulo = "2pi")
x$datax <- conversion.circular(x$datax, units = "radians", modulo = "2pi")
attr(x$x, "class") <- attr(x$x, "circularp") <- NULL
attr(x$datax, "class") <- attr(x$datax, "circularp") <- NULL
if (plot.type=="line" & units == "degrees") {
x$x <- x$x/pi * 180
x$datax <- x$datax/pi * 180
}
if (plot.type=="line" & units == "hours") {
x$x <- x$x/pi * 12
x$datax <- x$datax/pi * 12
}
} else if (is.circular(x$datax) && is.circular(x$datay)){
if (plot.type=="circle") plot.type <- "torus"
template <- xcircularp$template
if (is.null(units)) units <- xcircularp$units
x$x <- conversion.circular(x$x, units = "radians", modulo = "2pi")
x$datax <- conversion.circular(x$datax, units = "radians", modulo = "2pi")
attr(x$x, "class") <- attr(x$x, "circularp") <- NULL
attr(x$datax, "class") <- attr(x$datax, "circularp") <- NULL
template <- ycircularp$template
x$y <- conversion.circular(x$y, units = "radians", modulo = "2pi")
x$datay <- conversion.circular(x$datay, units = "radians", modulo = "2pi")
attr(x$y, "class") <- attr(x$y, "circularp") <- NULL
attr(x$datay, "class") <- attr(x$datay, "circularp") <- NULL
x$datax[x$datax>pi]<-x$datax[x$datax>pi]-2*pi
x$datay[x$datay>pi]<-x$datay[x$datay>pi]-2*pi
x$x[x$x>pi]<-x$x[x$x>pi]-2*pi
x$y[x$y>pi]<-x$y[x$y>pi]-2*pi
if (plot.type=="line" & units == "degrees") {
x$x <- x$x/pi * 180
x$datax <- x$datax/pi * 180
x$y <- x$y/pi * 180
x$datay <- x$datay/pi * 180
}
if (plot.type=="line" & units == "hours") {
x$x <- x$x/pi * 12
x$datax <- x$datax/pi * 12
x$y <- x$y/pi * 12
x$datay <- x$datay/pi * 12
}
} else if (!is.circular(x$datax) && is.circular(x$datay)){
if (plot.type=="circle") plot.type <- "cylinder"
template <- ycircularp$template
if (is.null(units)) units <- ycircularp$units
x$y <- conversion.circular(x$y, units = "radians", modulo = "2pi")
x$datay <- conversion.circular(x$datay, units = "radians", modulo = "2pi")
attr(x$y, "class") <- attr(x$y, "circularp") <- NULL
attr(x$datay, "class") <- attr(x$datay, "circularp") <- NULL
if (plot.type=="line" & units == "degrees") {
x$y <- x$y/pi * 180
x$datay <- x$datay/pi * 180
}
if (plot.type=="line" & units == "hours") {
x$y <- x$y/pi * 12
x$datay <- x$datay/pi * 12
}
}
if (plot.type == "line") {
xorder <- order(x$x)
x$x <- x$x[xorder]
x$y <- x$y[xorder]
lines.default(x$x, x$y, type = type, col=line.col, ...)
if (points.plot) points(x$datax, x$datay, col=points.col, pch=points.pch, ...)
} else {
if (plot.type=="torus"){
xx <-cos(x$x)*(1+0.25*cos(x$y))
yy <- sin(x$x)*(1+0.25*cos(x$y))
zz <- 0.25*sin(x$y)
lines3d(xx, yy, zz, col=line.col, ...)
if (points.plot) {
xx <- cos(x$datax)*(1+0.25*cos(x$datay))
yy <- sin(x$datax)*(1+0.25*cos(x$datay))
zz <- 0.25*sin(x$datay)
points3d(xx, yy, zz, col=points.col)
}
} else if (plot.type=="cylinder"){
R<- diff(range(x$datax))/8
xx <- x$x
yy <- R*cos(x$y)
zz <- R*sin(x$y)
lines3d(xx, yy, zz, col=line.col, ...)
if (points.plot) {
xx <- x$datax
yy <- R*cos(x$datay)
zz <- R*sin(x$datay)
points3d(xx, yy, zz, col=points.col)
}
}else{
if (is.null(plot.info)) {
if (is.null(radial.lim)) radial.lim <- range(c(x$datay,x$y))
if (is.null(zero)) {
if (template == "geographics" | template == "clock24") zero <- pi/2
else zero <- xcircularp$zero
}
if (is.null(clockwise)) {
if (template == "geographics" | template == "clock24") clockwise <- TRUE
else clockwise <- ifelse(xcircularp$rotation=="counter", FALSE, TRUE)
}
} else {
zero <- plot.info$zero
clockwise <- plot.info$clockwise
radial.lim <- plot.info$radial.lim
}
radial.plot(x$y, x$x, rp.type=rp.type, line.col=line.col, start=zero, clockwise=clockwise, radial.lim=radial.lim, add=TRUE, ...)
if (points.plot) {
radial.plot(x$datay, x$datax, rp.type="s", start=zero, clockwise=clockwise, radial.lim=radial.lim,
point.col=points.col, point.symbols=points.pch, add=TRUE, ...)
}
}
}
}
|
56cd9470d6608dddda9def5764246359315b92dd
|
9e03756d86ee78175357c87c936a7716dcf5b6f7
|
/man/gsdim.Rd
|
7706e74238b96be130413e33ae053cc9cdb822e3
|
[] |
no_license
|
anishsingh20/imager
|
8659e760682697bc1f44a226eb63b4d6d9d9754e
|
c9c6f17364411aae3add9160f93775d8a2d077c1
|
refs/heads/master
| 2021-01-16T19:07:01.381026
| 2017-06-21T13:47:18
| 2017-06-21T13:47:18
| 100,137,455
| 1
| 0
| null | 2017-08-12T20:57:27
| 2017-08-12T20:57:27
| null |
UTF-8
|
R
| false
| true
| 408
|
rd
|
gsdim.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cimg_class.R
\name{gsdim}
\alias{gsdim}
\title{Grayscale dimensions of image}
\usage{
gsdim(im)
}
\arguments{
\item{im}{an image}
}
\value{
returns c(dim(im)[1:3],1)
}
\description{
Shortcut, returns the dimensions of an image if it had only one colour channel
}
\examples{
imnoise(dim=gsdim(boats))
}
\author{
Simon Barthelme
}
|
3874fbf9bac74e857f3e7392c98dea06fd366c34
|
afeb43c3b8828758886a5f32c828249e44daf811
|
/R/run.R
|
35833f3fbc2831a84370a3a9cc1e7de42d1f36cf
|
[] |
no_license
|
parksw3/observation
|
ff74fede75c31e9c677fd317b10a304aaeff9102
|
f6011df2b769b72c50fec11bc3c41302f682027f
|
refs/heads/master
| 2020-05-19T14:21:13.152800
| 2020-02-13T19:50:52
| 2020-02-13T19:50:52
| 185,058,936
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 436
|
r
|
run.R
|
run_sir <- function(param=c(R0=2, gamma=1, N=1e5, rho=0.5),
yini=c(S=1e5-10, I=10, R=0),
tmax=20,
tlength=0.1) {
param[["beta"]] <- param[["R0"]] * param[["gamma"]]
tvec <- seq(0, tmax, by=tlength)
dd <- ode(yini, tvec, sir, param)
data.frame(
time=tail(tvec, -1),
incidence=-diff(dd[,"S"]) * param[["rho"]],
prevalence=tail(dd[,"I"],-1) * param[["rho"]],
mortality=diff(dd[,"R"]) * param[["rho"]]
)
}
|
49975224593cd811bfa4442d1575ade8826b15c7
|
3105237755f3ef7ba5ead8b87ee88afe00dbfb6a
|
/man/vcov_outcome.CBPSContinuous.Rd
|
3569d6fdef6f7aed16d38c0dbd9fb1e4fa3afdd5
|
[] |
no_license
|
kosukeimai/CBPS
|
4237517a2c9a09f230ec60e8f31a6d5930a6cb05
|
c6695181b44f494ea335548b4060271c727ebd52
|
refs/heads/master
| 2022-01-21T15:51:57.714571
| 2022-01-18T16:49:48
| 2022-01-18T16:49:48
| 72,245,166
| 24
| 10
| null | 2022-01-18T03:31:59
| 2016-10-28T21:50:18
|
R
|
UTF-8
|
R
| false
| true
| 936
|
rd
|
vcov_outcome.CBPSContinuous.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analytic_vcov.R
\name{vcov_outcome.CBPSContinuous}
\alias{vcov_outcome.CBPSContinuous}
\title{vcov_outcome}
\usage{
\method{vcov_outcome}{CBPSContinuous}(object, Y, Z, delta, tol = 10^(-5), lambda = 0.01)
}
\arguments{
\item{object}{A fitted CBPS object.}
\item{Y}{The outcome.}
\item{Z}{The covariates (including the treatment and an intercept term) that predict the outcome.}
\item{delta}{The coefficients from regressing Y on Z, weighting by the cbpsfit$weights.}
\item{tol}{Tolerance for choosing whether to improve conditioning of the "M"
matrix prior to conversion. Equal to 1/(condition number), i.e. the
smallest eigenvalue divided by the largest.}
\item{lambda}{The amount to be added to the diagonal of M if the condition of the matrix is worse than tol.}
}
\value{
Variance-Covariance Matrix for Outcome Model
}
\description{
vcov_outcome
}
|
5d7d33f18dfe6624e8755acbd5638a519ae23e82
|
6365ca059c0ba5ab9ef70d6ec143c28d85e550c0
|
/Homework/HW03_C++_code_from_R/bios735/R/RcppExports.R
|
0be171235c4b7c0b677431293abcea44b7e6d96a
|
[] |
no_license
|
leoleosuperdope/UNC_BIOS735
|
140102a38135880ea513bbe9daebf7585188fc51
|
2bc28899698b492361f50e7087aefaf42452a526
|
refs/heads/main
| 2023-05-09T11:07:49.775659
| 2021-05-30T02:23:21
| 2021-05-30T02:23:21
| 372,105,219
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 572
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
one_or_exp <- function(x) {
.Call('_bios735_one_or_exp', PACKAGE = 'bios735', x)
}
randomWalk2Rcpp <- function(niter, lambda) {
.Call('_bios735_randomWalk2Rcpp', PACKAGE = 'bios735', niter, lambda)
}
armadillo_solve <- function(A, b) {
.Call('_bios735_armadillo_solve', PACKAGE = 'bios735', A, b)
}
col_ridge_2 <- function(Y, X, lambda) {
.Call('_bios735_col_ridge_2', PACKAGE = 'bios735', Y, X, lambda)
}
|
389610d39ff70c1dc0b51ac4bea2a5ea5509eb26
|
3282d51ed8f89ead3d9f16af1e843501f5fbe8cb
|
/man/gmdh.combi.Rd
|
895fde671dd214b4fdfd26f4eef13008fcfaa2c7
|
[] |
no_license
|
cran/GMDHreg
|
7d69b110f57df5e1220c007a88b6d3f0c695013b
|
0104cbc52becf0515e3ea6007b77c66b625325ab
|
refs/heads/master
| 2021-07-09T12:34:51.724176
| 2021-07-05T11:30:02
| 2021-07-05T11:30:02
| 174,552,055
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,161
|
rd
|
gmdh.combi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/combi.R
\name{gmdh.combi}
\alias{gmdh.combi}
\title{GMDH Combinatorial}
\usage{
gmdh.combi(
X,
y,
G = 2,
criteria = c("PRESS", "test", "ICOMP"),
x.test = NULL,
y.test = NULL
)
}
\arguments{
\item{X}{matrix with N>1 columns and M rows, containing independent variables in the model. \cr
Be careful, N>4 and G=2, could be computationally very expensive and time consuming. \cr
The data must not contain NAs}
\item{y}{vector or matrix containing dependent variable in the model. \cr
The data must not contain NAs}
\item{G}{polynomial degree. \cr
0: linear regression without quadratic and interactrion terms. \cr
1: linear regression with interaction terms. \cr
2: original Ivakhnenko quadratic polynomial.}
\item{criteria}{GMDH external criteria. Values: \cr
\itemize{
\item PRESS: Predicted Residual Error Sum of Squares. It take into account all information in data sample and it is computed without recalculating of system for each test point.\cr
\item test: use x.test and y.test to estimate RMSE (Root Mean Squeare Errors). \cr
\item ICOMP: Index of Informational Complexity. Like PRESS, it is computed without recalculating of system.
}}
\item{x.test}{matrix with a sample randomly drawn from the initial data. This sample should not be included in X. \cr
It is used when criteria = test. \cr}
\item{y.test}{vector or matrix with y values correspond with x.test values.}
}
\value{
An object of class 'combi'. This is a list with two elements: results and G. \cr
Results is a list with two elements: \cr
\itemize{
\item coef: coeficients of final selected GMDH Combinatorial model.
\item CV: external criteria value for selected model.
}
G the grade of polynomial used in GMDH Combinatorial model.
}
\description{
Build a regression model performing GMDH Combinatorial. \cr
This is the basic GMDH algorithm. For more information, please read the package's vignette.
}
\examples{
set.seed(123)
x <- matrix(data = c(rnorm(1050)), ncol = 3, nrow = 350)
colnames(x) <- c("a", "b", "c")
y <- matrix(data = c(10 + x[, "a"] + x[, "b"]^2 + x[, "c"]^3), ncol = 1)
colnames(y) <- "y"
x.test <- x[1:10, ]
y.test <- y[1:10]
x <- x[-c(1:10), ]
y <- y[-c(1:10)]
mod <- gmdh.combi(X = x, y = y, criteria = "PRESS")
pred <- predict(mod, x.test)
summary(sqrt((pred - y.test)^2))
}
\references{
Bozdogan, H. and Haughton, D.M.A. (1998): "Information complexity criteria for regression models", Computational Statistics & Data Analysis, 28, pp. 51-76 <doi: 10.1016/S0167-9473(98)00025-5> \cr
Hild, Ch. R. and Bozdogan, H. (1995): "The use of information-based model selection criteria in the GMDH algorithm", Systems Analysis Modelling Simulation, 20(1-2), pp. 29-50 \cr
Ivakhnenko, A.G. (1968): "The Group Method of Data Handling - A Rival of the Method of Stochastic Approximation", Soviet Automatic Control, 13(3), pp. 43-55 \cr
Müller, J.-A., Ivachnenko, A.G. and Lemke, F. (1998): "GMDH Algorithms for Complex Systems Modelling", Mathematical and Computer Modelling of Dynamical Systems, 4(4), pp. 275-316 <doi: 10.1080/13873959808837083>
}
|
61a0312a985f501e5fa9d377edb488b79a5ef976
|
c84951af9d248e3e2cf0f8f265a5649c31a08320
|
/man/coef_xtune.Rd
|
894db9f9e12bd00ef079d13c5b9b8de18d648b0a
|
[
"MIT"
] |
permissive
|
JingxuanH/xtune
|
f250f64a947ccb4eb3ea3a31c2ea032a16109479
|
33d88886b6f66decb262fd90eb8e688e14d073bc
|
refs/heads/main
| 2023-06-28T18:54:59.826328
| 2023-06-18T23:02:32
| 2023-06-18T23:02:32
| 653,879,167
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 786
|
rd
|
coef_xtune.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coef.xtune.R
\name{coef_xtune}
\alias{coef_xtune}
\title{Extract model coefficients from fitted \code{xtune} object}
\usage{
coef_xtune(object, ...)
}
\arguments{
\item{object}{Fitted 'xtune' model object.}
\item{...}{Not used}
}
\value{
Coefficients extracted from the fitted model.
}
\description{
\code{coef_xtune} extracts model coefficients from objects returned by \code{xtune} object.
}
\details{
\code{coef} and \code{predict} methods are provided as a convenience to extract coefficients and make prediction. \code{coef.xtune} simply extracts the estimated coefficients returned by \code{xtune}.
}
\examples{
# See examples in \code{predict_xtune}.
}
\seealso{
\code{xtune}, \code{predict_xtune}
}
|
4c4622d1e88cddec65d42f9dff03e0ee84a6d15d
|
05a249bd9d45f691df5599816b0929770fb47bf7
|
/scripts/methods/04-sim_data-dyngen.R
|
98445e924be34241cdc8ea8d0c76962524d3257e
|
[] |
no_license
|
jligm-hash/simulation-comparison
|
457dbdfae7c09e7e4aef74af3639858b4f6566fc
|
0724439875168fb497bf9ada0742a6082a77b5ac
|
refs/heads/master
| 2023-04-16T08:04:59.746920
| 2021-04-30T08:21:22
| 2021-04-30T08:21:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 230
|
r
|
04-sim_data-dyngen.R
|
suppressPackageStartupMessages({
library(dyngen)
library(SingleCellExperiment)
})
fun <- function(x) {
sink(tempfile())
y <- generate_dataset(x,
format = "sce",
make_plots = FALSE)
sink()
return(y$dataset)
}
|
de3f377da73a65d0bd3b4cf20bdff007e5ca8bfc
|
bebda466cc6b3c0772b8b64c0e706b2478c1ebaa
|
/plot3.R
|
956a07a65b6fa825e684d0bfe6255fde0ba6ae22
|
[] |
no_license
|
xiaoq007/ExData_Plotting1
|
438e6ae60e970d0ab3323bc9c3d121962c8f29df
|
42d55aa251666b420f5ec477debe849c20f3f22b
|
refs/heads/master
| 2021-01-17T23:59:54.184567
| 2016-04-30T02:01:37
| 2016-04-30T02:01:37
| 57,347,616
| 0
| 0
| null | 2016-04-29T02:12:44
| 2016-04-29T02:12:43
| null |
UTF-8
|
R
| false
| false
| 1,024
|
r
|
plot3.R
|
setwd("ExData_Plotting1")
## download zip file
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url,"household_power_consumption.zip",method="curl")
## load data and subset data then clean up R objects
hpc <- read.csv("household_power_consumption.txt",sep=";",stringsAsFactors=F)
head(hpc);dim(hpc)
data <- subset(hpc,(Date=="1/2/2007")|(Date=="2/2/2007"))
head(data);dim(data)
rm(list=(ls()[ls()!="data"]))
data$dateTime <- strptime(paste(data$Date,data$Time),"%d/%m/%Y %H:%M:%S")
#sub_metering plot
png("plot3.png")
with(data,plot(dateTime,as.numeric(Sub_metering_1),type="l",col="black",
ylab="Energy sub metering",xlab=""))
with(data,points(dateTime,as.numeric(Sub_metering_2),type="l",col="red"))
with(data,points(dateTime,as.numeric(Sub_metering_3),type="l",col="blue"))
leg.txt <- c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
leg.col <- c("black","red","blue")
legend("topright",legend=leg.txt,col=leg.col,lty=1)
dev.off()
|
3c8f318fe8583a1c5660c0555cd564271417fb63
|
2e940271c21be18f391ebaeab2079e03728eecb9
|
/man/prob_distribution_2.Rd
|
c66e974b425195750feee61fa56032b3739a866e
|
[] |
no_license
|
pspc-data-science/branchsim
|
3fe1a98b4f219dd6258f82868f36718c2e344ef5
|
d49ab68e071e91ac8f46b074ff23cb728a147c64
|
refs/heads/master
| 2023-02-13T18:56:06.240842
| 2021-01-12T17:48:36
| 2021-01-12T17:48:36
| 273,470,739
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 852
|
rd
|
prob_distribution_2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CMJ-functions.R
\name{prob_distribution_2}
\alias{prob_distribution_2}
\title{The probability distribution of births from a single mother in the branching process.}
\usage{
prob_distribution_2(tbar, kappa, lambda, p, n_samp = 300000L, min_count = 4)
}
\arguments{
\item{lambda}{The arrival rate of infectious interactions. Default lambda = .11}
\item{p}{The parameter of the logarithmic distribution for the number of infected during an event.
Default p=0.5}
\item{a}{The shape parameter of the gamma life time distribution. Default a =10}
\item{b}{The rate parameter of the gamma life time distribution. Default b = 1}
}
\value{
A tibble of counts with probability.
}
\description{
The probability distribution of births from a single mother in the branching process.
}
|
aa9743f97796c943f8eb47aea7fcf4231b09a459
|
5f0cfcec5194f11137db76056ef2b3836ab80ff8
|
/R/abcmodels.intrinsic.R
|
5c303163e66db0df3d82639e38416ce04767aac6
|
[] |
no_license
|
JakeJing/treevo
|
54d341655f1e6ddac5ab73df38c890be557e7d17
|
3429ba37e8dc7c79cf441361d07c000f07423b6e
|
refs/heads/master
| 2021-01-12T01:20:10.296046
| 2016-10-03T01:09:15
| 2016-10-03T01:09:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,808
|
r
|
abcmodels.intrinsic.R
|
#intrinsic models
#note that these work for univariate, but need to be generalized for multivariate
#otherstates has one row per taxon, one column per state
#states is a vector for each taxon, with length=nchar
#' Intrinsic Character Evolution Models
#'
#' This function describes a model of no intrinsic character change
#'
#'
#' @param params describes input paramaters for the model
#' @param states vector of states for each taxon
#' @param timefrompresent which time slice in the tree
#' @return A matrix of values representing character displacement from a single
#' time step in the tree.
#' @author Brian O'Meara and Barb Banbury
#' @references O'Meara and Banbury, unpublished
#' @keywords nullIntrinsic intrinsic
nullIntrinsic<-function(params,states, timefrompresent) {
newdisplacement<-0*states
return(newdisplacement)
}
#' Intrinsic Character Evolution Models
#'
#' This function describes a model of intrinsic character evolution via
#' Brownian motion.
#'
#'
#' @param params describes input paramaters for the model.
#' \code{boundaryIntrinsic} params = sd
#' @param states vector of states for each taxon
#' @param timefrompresent which time slice in the tree
#' @return A matrix of values representing character displacement from a single
#' time step in the tree.
#' @author Brian O'Meara and Barb Banbury
#' @references O'Meara and Banbury, unpublished
#' @keywords boundaryIntrinsic intrinsic
brownianIntrinsic<-function(params,states, timefrompresent) {
newdisplacement<-rnorm(n=length(states),mean=0,sd=params) #mean=0 because we ADD this to existing values
return(newdisplacement)
}
#' Intrinsic Character Evolution Models
#'
#' This function describes a model of intrinsic character evolution. Character
#' change is restricted above a minimum and below a maximum threshold
#'
#'
#' @param params describes input paramaters for the model.
#' \code{boundaryMinIntrinsic} params = sd, minimum, maximum
#' @param states vector of states for each taxon
#' @param timefrompresent which time slice in the tree
#' @return A matrix of values representing character displacement from a single
#' time step in the tree.
#' @author Brian O'Meara and Barb Banbury
#' @references O'Meara and Banbury, unpublished
#' @keywords boundaryIntrinsic intrinsic
boundaryIntrinsic<-function(params, states, timefrompresent) {
#params[1] is sd, params[2] is min, params[3] is max. params[2] could be 0 or -Inf, for example
newdisplacement<-rnorm(n=length(states),mean=0,sd=params[1])
for (i in length(newdisplacement)) {
newstate<-newdisplacement[i]+states[i]
if (newstate<params[2]) { #newstate less than min
newdisplacement[i]<-params[2]-states[i] #so, rather than go below the minimum, this moves the new state to the minimum
}
if (newstate>params[3]) { #newstate greater than max
newdisplacement[i]<-params[3]-states[i] #so, rather than go above the maximum, this moves the new state to the maximum
}
}
return(newdisplacement)
}
#' Intrinsic Character Evolution Models
#'
#' This function describes a model of intrinsic character evolution. Character
#' change is restricted above a minimum threshold
#'
#'
#' @param params describes input paramaters for the model.
#' \code{boundaryMinIntrinsic} params = sd, minimum
#' @param states vector of states for each taxon
#' @param timefrompresent which time slice in the tree
#' @return A matrix of values representing character displacement from a single
#' time step in the tree.
#' @author Brian O'Meara and Barb Banbury
#' @references O'Meara and Banbury, unpublished
#' @keywords boundaryMinIntrinsic intrinsic
boundaryMinIntrinsic <-function(params, states, timefrompresent) {
#params[1] is sd, params[2] is min boundary
newdisplacement<-rnorm(n=length(states),mean=0,sd=params[1])
for (i in length(newdisplacement)) {
newstate<-newdisplacement[i]+states[i]
if (newstate<params[2]) { #newstate less than min
newdisplacement[i]<-params[2]-states[i] #so, rather than go below the minimum, this moves the new state to the minimum
}
}
return(newdisplacement)
}
#' Intrinsic Character Evolution Models
#'
#' This function describes a model of intrinsic character evolution. New
#' character values are generated after one time step via a discrete-time OU
#' process.
#'
#'
#' @param params describes input paramaters for the model.
#' \code{autoregressiveIntrinsic} params = sd (sigma), attractor (character
#' mean), attraction (alpha)
#' @param states vector of states for each taxon
#' @param timefrompresent which time slice in the tree
#' @return A matrix of values representing character displacement from a single
#' time step in the tree.
#' @author Brian O'Meara and Barb Banbury
#' @references O'Meara and Banbury, unpublished
#' @keywords autoregressiveIntrinsic intrinsic
autoregressiveIntrinsic<-function(params,states, timefrompresent) { #a discrete time OU, same sd, mean, and attraction for all chars
#params[1] is sd (sigma), params[2] is attractor (ie. character mean), params[3] is attraction (ie. alpha)
sd<-params[1]
attractor<-params[2]
attraction<-params[3] #in this model, this should be between zero and one
newdisplacement<-rnorm(n=length(states),mean=(attractor-states)*attraction,sd=sd) #subtract current states because we want displacement
return(newdisplacement)
}
#' Intrinsic Character Evolution Models
#'
#' This function describes a model of intrinsic character evolution. New
#' character values are generated after one time step via a discrete-time OU
#' process with a minimum bound.
#'
#'
#' @param params describes input paramaters for the model.
#' \code{MinBoundaryAutoregressiveIntrinsic} params = sd (sigma), attractor
#' (character mean), attraction (alpha), minimum
#' @param states vector of states for each taxon
#' @param timefrompresent which time slice in the tree
#' @return A matrix of values representing character displacement from a single
#' time step in the tree.
#' @author Brian O'Meara and Barb Banbury
#' @references O'Meara and Banbury, unpublished
#' @keywords MinBoundaryAutoregressiveIntrinsic intrinsic
MinBoundaryAutoregressiveIntrinsic<-function(params,states, timefrompresent) { #a discrete time OU, same sd, mean, and attraction for all chars
#params[1] is sd (sigma), params[2] is attractor (ie. character mean), params[3] is attraction (ie. alpha), params[4] is min bound
sd<-params[1]
attractor<-params[2]
attraction<-params[3] #in this model, this should be between zero and one
minBound<-params[4]
newdisplacement<-rnorm(n=length(states),mean=(attractor-states)*attraction,sd=sd) #subtract current states because we want displacement
#print(newdisplacement)
for (i in length(newdisplacement)) {
newstate<-newdisplacement[i] + states[i]
#print(newstate)
if (newstate <params[4]) { #newstate less than min
newdisplacement[i]<-params[4] - states[i] #so, rather than go below the minimum, this moves the new state to the minimum
}
}
return(newdisplacement)
}
#' Intrinsic Character Evolution Models
#'
#' This function describes a model of intrinsic character evolution. New
#' character values are generated after one time step via a discrete-time OU
#' process with differing means, sigma, and attraction over time
#'
#' In the TimeSlices models, time threshold units are in time before present
#' (i.e., 65 could be 65 MYA). The last time threshold should be 0.
#'
#' @param params describes input paramaters for the model.
#' \code{autoregressiveIntrinsicTimeSlices} params = sd-1 (sigma-1),
#' attractor-1 (character mean-1), attraction-1 (alpha-1), time threshold-1,
#' sd-2 (sigma-2), attractor-2 (character mean-2), attraction-2 (alpha-2), time
#' threshold-2
#' @param states vector of states for each taxon
#' @param timefrompresent which time slice in the tree
#' @return A matrix of values representing character displacement from a single
#' time step in the tree.
#' @author Brian O'Meara and Barb Banbury
#' @references O'Meara and Banbury, unpublished
#' @keywords autoregressiveIntrinsicTimeSlices intrinsic
autoregressiveIntrinsicTimeSlices<-function(params,states, timefrompresent) { #a discrete time OU, differing mean, sigma, and attaction with time
#params=[sd1, attractor1, attraction1, timethreshold1, sd2, attractor2, attraction2, timethreshold2, ...]
#time is time before present (i.e., 65 could be 65 MYA). The last time threshold should be 0, one before that is the end of the previous epoch, etc.
numRegimes<-length(params)/4
timeSliceVector=c(Inf,params[which(c(1:length(params))%%4==0)])
#print(timeSliceVector)
sd<-params[1]
attractor<-params[2]
attraction<-params[3] #in this model, this should be between zero and one
#print(paste("timefrompresent = ",timefrompresent))
for (regime in 1:numRegimes) {
#print(paste ("tryiing regime = ",regime))
if (timefrompresent<timeSliceVector[regime]) {
#print("timefrompresent>timeSliceVector[regime] == TRUE")
if (timefrompresent>=timeSliceVector[regime+1]) {
#print("timefrompresent<=timeSliceVector[regime+1] == TRUE")
#print(paste("choose regime ",regime, " so 4*(regime-1)=",4*(regime-1)))
sd<-params[1+4*(regime-1)]
attractor<-params[2+4*(regime-1)]
attraction<-params[3+4*(regime-1)]
#print(paste("sd = ",sd," attractor = ",attractor, " attraction = ", attraction))
}
}
}
#print(paste("sd = ",sd," attractor = ",attractor, " attraction = ", attraction))
newdisplacement<-rnorm(n=length(states),mean=(attractor-states)*attraction,sd=sd)
return(newdisplacement)
}
#' Intrinsic Character Evolution Models
#'
#' This function describes a model of intrinsic character evolution. New
#' character values are generated after one time step via a discrete-time OU
#' process with differing sigma and attraction over time
#'
#' In the TimeSlices models, time threshold units are in time before present
#' (i.e., 65 could be 65 MYA). The last time threshold should be 0.
#'
#' @param params describes input paramaters for the model.
#' \code{autoregressiveIntrinsicTimeSlicesConstantMean} params = sd-1
#' (sigma-1), attraction-1 (alpha-1), time threshold-1, sd-2 (sigma-2),
#' attraction-2 (alpha-2), time threshold-2, attractor (character mean)
#' @param states vector of states for each taxon
#' @param timefrompresent which time slice in the tree
#' @return A matrix of values representing character displacement from a single
#' time step in the tree.
#' @author Brian O'Meara and Barb Banbury
#' @references O'Meara and Banbury, unpublished
#' @keywords autoregressiveIntrinsicTimeSlicesConstantMean intrinsic
autoregressiveIntrinsicTimeSlicesConstantMean<-function(params,states, timefrompresent) { #a discrete time OU, constant mean, differing sigma, and differing attaction with time
#params=[sd1 (sigma1), attraction1 (alpha 1), timethreshold1, sd2 (sigma2), attraction2 (alpha 2), timethreshold2, ..., attractor (mean)]
#time is time before present (i.e., 65 could be 65 MYA). The last time threshold should be 0, one before that is the end of the previous epoch, etc.
numTimeSlices<-(length(params)-1)/3
sd<-params[1]
attractor<-params[length(params)]
attraction<-params[2] #in this model, this should be between zero and one
previousThresholdTime<-Inf
for (slice in 0:(numTimeSlices-1)) {
thresholdTime<-params[3+3*slice]
if (thresholdTime >= timefrompresent) {
if (thresholdTime<previousThresholdTime) {
sd<-params[1+3*slice]
attraction<-params[2+3*slice]
}
}
previousThresholdTime<-thresholdTime
}
newdisplacement<-rnorm(n=length(states),mean=attraction*states + attractor,sd=sd)-states
return(newdisplacement)
}
#' Intrinsic Character Evolution Models
#'
#' This function describes a model of intrinsic character evolution. New
#' character values are generated after one time step via a discrete-time OU
#' process with differing means and attraction over time.
#'
#' In the TimeSlices models, time threshold units are in time before present
#' (i.e., 65 could be 65 MYA). The last time threshold should be 0.
#'
#' @param params describes input paramaters for the model.
#' \code{autoregressiveIntrinsicTimeSlicesConstantSigma} params = sd (sigma),
#' attractor-1 (character mean-1), attraction-1 (alpha-1), time threshold-1,
#' attractor-2 (character mean-2), attraction-2 (alpha-2), time threshold-2
#' @param states vector of states for each taxon
#' @param timefrompresent which time slice in the tree
#' @return A matrix of values representing character displacement from a single
#' time step in the tree.
#' @author Brian O'Meara and Barb Banbury
#' @references O'Meara and Banbury, unpublished
#' @keywords autoregressiveIntrinsicTimeSlicesConstantSigma intrinsic
autoregressiveIntrinsicTimeSlicesConstantSigma<-function(params,states, timefrompresent) { #a discrete time OU, differing mean, constant sigma, and attaction with time
#params=[sd, attractor1, attraction1, timethreshold1, attractor2, attraction2, timethreshold2, ...]
#time is time before present (i.e., 65 could be 65 MYA). The last time threshold should be 0, one before that is the end of the previous epoch, etc.
numRegimes<-(length(params)-1)/3
#print(numRegimes)
timeSliceVector<-c(Inf)
for (regime in 1:numRegimes) {
timeSliceVector<-append(timeSliceVector,params[4+3*(regime-1)])
}
#timeSliceVector=c(Inf,params[which(c(1:length(params))%%4==0)])
#print(timeSliceVector)
sd<-params[1]
attractor<-params[2]
attraction<-params[3] #in this model, this should be between zero and one
#print(paste("timefrompresent = ",timefrompresent))
for (regime in 1:numRegimes) {
#print(paste ("trying regime = ",regime))
if (timefrompresent<timeSliceVector[regime]) {
#print("timefrompresent>timeSliceVector[regime] == TRUE")
if (timefrompresent>=timeSliceVector[regime+1]) {
#print("timefrompresent>=timeSliceVector[regime+1] == TRUE")
#print(paste("chose regime ",regime))
#sd<-params[1+4*(regime-1)]
attractor<-params[2+3*(regime-1)]
attraction<-params[3+3*(regime-1)]
#print(paste("sd = ",sd," attractor = ",attractor, " attraction = ", attraction))
}
}
}
#print(paste("sd = ",sd," attractor = ",attractor, " attraction = ", attraction))
newdisplacement<-rnorm(n=length(states),mean=(attractor-states)*attraction,sd=sd)
return(newdisplacement)
}
varyingBoundariesFixedSigmaIntrinsic<-function(params,states, timefrompresent) { #differing boundaries with time
#params=[sd, min1, max1, timethreshold1, min2, max2, timethreshold2, ...]
#time is time before present (i.e., 65 could be 65 MYA). The last time (present) threshold should be 0, one before that is the end of the previous epoch, etc.
numRegimes<-(length(params)-1)/3
#print(numRegimes)
timeSliceVector<-c(Inf)
for (regime in 1:numRegimes) {
timeSliceVector<-append(timeSliceVector,params[4+3*(regime-1)])
}
#timeSliceVector=c(Inf,params[which(c(1:length(params))%%4==0)])
#print(timeSliceVector)
sd<-params[1]
minBound<-params[2]
maxBound<-params[3]
for (regime in 1:numRegimes) {
#print(paste ("trying regime = ",regime))
if (timefrompresent<timeSliceVector[regime]) {
#print("timefrompresent>timeSliceVector[regime] == TRUE")
if (timefrompresent>=timeSliceVector[regime+1]) {
#print("timefrompresent>=timeSliceVector[regime+1] == TRUE")
#print(paste("chose regime ",regime))
#sd<-params[1+4*(regime-1)]
minBound<-params[2+3*(regime-1)]
maxBound<-params[3+3*(regime-1)]
#print(paste("sd = ",sd," attractor = ",attractor, " attraction = ", attraction))
}
}
}
#print(paste("sd = ",sd," attractor = ",attractor, " attraction = ", attraction))
newdisplacement<-rnorm(n=length(states),mean=0,sd=sd)
for (i in length(newdisplacement)) {
newstate<-newdisplacement[i]+states[i]
if (newstate<minBound) { #newstate less than min
newdisplacement[i]<-minBound-states[i] #so, rather than go below the minimum, this moves the new state to the minimum
}
if (newstate>maxBound) { #newstate greater than max
newdisplacement[i]<-maxBound-states[i] #so, rather than go above the maximum, this moves the new state to the maximum
}
}
return(newdisplacement)
}
varyingBoundariesVaryingSigmaIntrinsic<-function(params,states, timefrompresent) { #differing boundaries with time
#params=[sd1, min1, max1, timethreshold1, sd2, min2, max2, timethreshold2, ...]
#time is time before present (i.e., 65 could be 65 MYA). The last time (present) threshold should be 0, one before that is the end of the previous epoch, etc.
numRegimes<-(length(params))/3
#print(numRegimes)
timeSliceVector<-c(Inf)
for (regime in 1:numRegimes) {
timeSliceVector<-append(timeSliceVector,params[4+4*(regime-1)])
}
#timeSliceVector=c(Inf,params[which(c(1:length(params))%%4==0)])
#print(timeSliceVector)
sd<-params[1]
minBound<-params[2]
maxBound<-params[3]
for (regime in 1:numRegimes) {
#print(paste ("trying regime = ",regime))
if (timefrompresent<timeSliceVector[regime]) {
#print("timefrompresent>timeSliceVector[regime] == TRUE")
if (timefrompresent>=timeSliceVector[regime+1]) {
#print("timefrompresent>=timeSliceVector[regime+1] == TRUE")
#print(paste("chose regime ",regime))
#sd<-params[1+4*(regime-1)]
sd<-params[1+4*(regime-1)]
minBound<-params[2+4*(regime-1)]
maxBound<-params[3+4*(regime-1)]
#print(paste("sd = ",sd," attractor = ",attractor, " attraction = ", attraction))
}
}
}
#print(paste("sd = ",sd," attractor = ",attractor, " attraction = ", attraction))
newdisplacement<-rnorm(n=length(states),mean=0,sd=sd)
for (i in length(newdisplacement)) {
newstate<-newdisplacement[i]+states[i]
if (newstate<minBound) { #newstate less than min
newdisplacement[i]<-minBound-states[i] #so, rather than go below the minimum, this moves the new state to the minimum
}
if (newstate>maxBound) { #newstate greater than max
newdisplacement[i]<-maxBound-states[i] #so, rather than go above the maximum, this moves the new state to the maximum
}
}
return(newdisplacement)
}
#this model assumes a pull (perhaps weak) to a certain genome size, but with
# occasional doublings
genomeDuplicationAttraction<-function(params, states, timefrompresent) {
#params = [sd, attractor, attraction, doubling.prob]
sd<-params[1]
attractor<-params[2]
attraction<-params[3] #in this model, this should be between zero and one
doubling.prob<-params[4]
newdisplacement<-rnorm(n=length(states),mean=(attractor-states)*attraction,sd=sd) #subtract current states because we want displacement
for (i in length(newdisplacement)) {
newstate<-newdisplacement[i]+states[i]
if (newstate<0) { #newstate less than min
newdisplacement[i]<-0-states[i] #so, rather than go below the minimum, this moves the new state to the minimum
}
}
if (runif(1,0,1)<doubling.prob) { #we double
newdisplacement<-states
}
return(newdisplacement)
}
#This is the same as the above model, but where the states are in log units
# The only difference is how doubling occurs
genomeDuplicationAttractionLogScale<-function(params, states, timefrompresent) {
#params = [sd, attractor, attraction, doubling.prob]
sd<-params[1]
attractor<-params[2]
attraction<-params[3] #in this model, this should be between zero and one
doubling.prob<-params[4]
newdisplacement<-rnorm(n=length(states),mean=(attractor-states)*attraction,sd=sd) #subtract current states because we want displacement
if (runif(1,0,1)<doubling.prob) { #we double
newdisplacement<-log(2*exp(states))-states
}
return(newdisplacement)
}
#Genome duplication, but with no attraction. However, each duplication may shortly result in less than a full doubling. Basically, the increased size is based on a beta distribution. If you want pure doubling only,
#shape param 1 = Inf and param 2 = 1
genomeDuplicationPartialDoublingLogScale<-function(params, states, timefrompresent) {
#params = [sd, shape1, doubling.prob]
sd<-params[1]
beta.shape1<-params[2] #the larger this is, the more the duplication is exactly a doubling. To see what this looks like, plot(density(1+rbeta(10000, beta.shape1, 1)))
duplication.prob<-params[3]
newdisplacement<-rnorm(n=length(states),mean=0,sd=sd)
if (runif(1,0,1)<duplication.prob) { #we duplicate
newdisplacement<-log((1+rbeta(1,beta.shape1,1))*exp(states))-states
}
return(newdisplacement)
}
##Get Genome duplication priors
GetGenomeDuplicationPriors <- function(numSteps, phy, data) {
#returns a matrix with 3 priors for genome duplication (genomeDuplicationPartialDoublingLogScale)
timeStep<-1/numSteps #out of doRun_rej code
sd <- GetBMRatePrior(phy, data, timeStep) #new TreEvo function
beta.shape1 <- 1 #for(i in 1:10) {lines(density(1+rbeta(10000, 10^runif(1,0,2), 1)), xlim=c(1,2))} seems to produce nice distributions, but how to justify using 3?
duplication.prob <- 2 #exponential, but which rate?
}
|
7de3e3e2b1f27b63e28e9a2ea609dbbdcec96225
|
7e0d67d6676662e15f26a8b9aafa4ace40fa3e28
|
/TimeSeries_TD1_FirstAnalysis.R
|
9d04b5afb7441a160b9a2390a03fbd640c9d8e6b
|
[] |
no_license
|
ettabib/TimeSeries
|
09754d83465c4e13903f1b30c779a652212a74e0
|
b18ab034a293f03176336e8353b8a106b1a694ce
|
refs/heads/master
| 2020-06-03T13:59:14.912686
| 2015-01-09T21:04:34
| 2015-01-09T21:04:34
| 24,608,232
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,099
|
r
|
TimeSeries_TD1_FirstAnalysis.R
|
# Time Series
# TD1: First Analysis
data()
EuStockMarkets
data.class(EuStockMarkets)
summary(EuStockMarkets)
plot(EuStockMarkets)
cac.ts=EuStockMarkets[,"CAC"]
plot(cac.ts)
# Differenciation
dcac40=diff(cac.ts)
rcac40=diff(log(cac.ts))*100
par(mfrow=c(2,1))
plot(dcac40)
plot(rcac40)
# 1) Underlying Unconditional Distribution function
# Numeric summaries
summary(rcac40)
kurtosis(rcac40)
# Graphical summaries
# Histogram
par(mfrow=c(2,2))
hist(rcac40,breaks=5)
hist(rcac40)
hist(rcac40,breaks=25)
hist(rcac40,breaks=50)
# Density
par(mfrow=c(1,1))
plot(density(rcac40))
x=seq(-5,5,0.1)
lines(x,dnorm(x,mean(rcac40),sd(rcac40)),lty=2,col="red")
# QQ plot
qqnorm(rcac40)
abline(0,1,col="red")
# Gaussian test
jarque.bera.test(rcac40)
ks.test(rcac40,pnorm)
# 2) Serial dependence
acf(rcac40)
pacf(rcac40)
Box.test(rcac40,lag=1,type="Box")
Box.test(rcac40,lag=10,type="Box")
Box.test(rcac40,lag=1,type="Ljung")
# Dependence on higher moments ?
rcac40.2=rcac40*rcac40
acf(rcac40.2)
pacf(rcac40.2)
Box.test(rcac40.2,lag=1,type="Box")
|
94c13f8cf785762758b379f183564cb310b30250
|
1cdd0be21213738bc8429f136f2188346709436b
|
/kappa_score.R
|
e2fbd1f31b632f2cf9616451772fdf6021937c98
|
[] |
no_license
|
vitkl/imex_vs_uniprot
|
abdebdb80cc60cd3797ae9b29c00610d7bcc8a4c
|
6fb090c9ae15d05cf6d31bd1829f86f43d160ef1
|
refs/heads/master
| 2020-06-15T09:26:41.622769
| 2017-11-23T11:14:49
| 2017-11-23T11:14:49
| 75,309,520
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,641
|
r
|
kappa_score.R
|
# GO term similarity - kappa score
#####################################
##' @author Vitalii Kleshchevnikov
# function to calculate kappa score between two categories describing a set of elements(GO terms, KEGG pathways, genes)
# defined as described in this article:
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2375021/figure/F2/
# relies on data.table for subsetting and vcd package for calculating kappa score given 2x2 incidence matrix
# the function is faster than irr::kappa2(ratings = value, weight = "unweighted")
# the function is ~25 times slower than dist()
# function takes a data.table with 2 columns corresponding to two categories to which each element in rows can belong
kappa_score = function(value){
colnames(value) = c("x","y")
table_ = value[,.N, by = .(x,y)]
table_2 = matrix(,2,2)
table_2[1,1] = ifelse(length(table_[x == 1 & y == 1, N]), table_[x == 1 & y == 1, N],0)
table_2[1,2] = ifelse(length(table_[x == 1 & y == 0, N]), table_[x == 1 & y == 0, N],0)
table_2[2,1] = ifelse(length(table_[x == 0 & y == 1, N]), table_[x == 0 & y == 1, N],0)
table_2[2,2] = ifelse(length(table_[x == 0 & y == 0, N]), table_[x == 0 & y == 0, N],0)
kappa_score = vcd::Kappa(table_2,weights = "Fleiss-Cohen")$Unweighted
return(kappa_score)
}
#####################################
#filename = "/Users/vitalii/Downloads/goa_human.gaf"
#mapping_table = fread(filename, skip = 34)[,.(UNIPROT = V2, GO = V5, evidence = V7, ontology = V9)]
#mapping_table = mapping_table[ontology == "P", .(UNIPROT, GO)]
#mapping_table = unique(mapping_table)
#####################################
##' @author Vitalii Kleshchevnikov
# the categ_dist function to calculate categorical distance (Cohen's Kappa score) between multiple terms
# the function is intended to measure distances between GO terms based on proteins they annotate
# more generally, the function can be used to measure categorical distances between any terms(categories) annotating objects
# objects should be provided as a first column of a data.table, terms should be provided as a second column
categ_dist = function(mapping_table, terms_to_compare = unlist(unique(mapping_table[,2,with = F])), ignore_limit = F){
if(ncol(mapping_table) > 2) stop("table has more than 2 columns, object id column and term column")
if(ignore_limit == F) if(length(terms_to_compare) > 1000) stop("more than 1000 terms to compare, set ignore_limit = T if you are sure to proceed")
if(!is.data.table(mapping_table)) stop("provided mapping / annotation table may not be in the right format (wrong class: not data.table)")
mapping_table = copy(unique(mapping_table))
print(mapping_table)
colnames(mapping_table) = c("UNIPROT", "GO")
z2 = dcast(mapping_table[,.(UNIPROT, GO, value = 1)], UNIPROT ~ GO, fill = 0, drop = F)[,UNIPROT := NULL][,terms_to_compare, with=F]
combinations = t(caTools::combs(colnames(z2),2))
dist = t(sapply(as.data.table(combinations), function(x) kappa_score(z2[,c(x[1],x[2]),with = F])))
dist = cbind(as.data.table(dist), as.data.table(t(combinations)))
colnames(dist) = c("kappa_score", "kappa_error", "GO1", "GO2")
dist_temp = unique(rbind(dist,dist[,.(kappa_score,kappa_error, GO1 = GO2, GO2 = GO1)]))
dist2 = as.matrix(dcast(dist_temp[,.(GO1,GO2, kappa_score)], GO1 ~ GO2))
rownames_dist2 = dist2[,"GO1"]
dist2 = as.matrix(dcast(dist_temp[,.(GO1,GO2, kappa_score)], GO1 ~ GO2)[,GO1 := NULL])
rownames(dist2) = rownames_dist2
dist2 = dist2[sort(rownames(dist2)), sort(colnames(dist2))]
diag(dist2) = 1
return(list(similarity_matrix = dist2, kappa_score_table = dist, kappa_score_table_redundant = dist_temp))
}
|
31f6c213d0d08136036cfe19d898831bf4774945
|
49f06ff67c5a8723f0158985850f5758500e941d
|
/man/logsum_matrix.Rd
|
62fb5e5f08d14f9a64daa4e52a5f4da550eb0e53
|
[
"MIT"
] |
permissive
|
annahutch/corrcoverage
|
518e7356cab27c6e1860bfc9dea14f72abaaab9c
|
3b610d1450c7936db6ab2664de37543165ac6c0e
|
refs/heads/master
| 2021-12-15T03:52:12.104240
| 2021-11-26T13:16:39
| 2021-11-26T13:16:39
| 169,068,733
| 8
| 3
|
NOASSERTION
| 2019-08-28T14:27:44
| 2019-02-04T11:29:57
|
R
|
UTF-8
|
R
| false
| true
| 352
|
rd
|
logsum_matrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/func_utils.R
\name{logsum_matrix}
\alias{logsum_matrix}
\title{logsum rows of a matrix}
\usage{
logsum_matrix(x)
}
\arguments{
\item{x}{numeric matrix}
}
\value{
rowwise sums
}
\description{
matrix-ified version of logsum to avoid needing apply()
}
\author{
Chris Wallace
}
|
082fd52653d97786f28aca6e04b38ac970cbdd73
|
2b1899fd505ee7382a88e218d652852b493fce78
|
/man/clustA.Rd
|
0d45551f6e1e41d4e381248fa5f70d79b2a7fe74
|
[] |
no_license
|
anwaarms/package-clustA
|
726d2bcc99dd787c90701782cf6fcf251d472813
|
cdd600b6ab810991fe1f25312592d44d8065b48f
|
refs/heads/master
| 2020-04-10T19:06:37.423467
| 2018-12-10T19:07:36
| 2018-12-10T19:07:36
| 161,223,265
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 802
|
rd
|
clustA.Rd
|
\name{clustA}
\alias{clustA}
\title{Joining Clustering methods}
\usage{
clustA(data,kmeanclust,fit)}
\description{
this function prints the plot of a kmeans clustering as well as the Ward Hierarchical Clustering along with the optimal number of clusters proposed by kmeans
}
\arguments{
\item{data}{The dataset to which you will apply the clustering process, no missing values allowed. }
\item{kmeanclust}{An object obtained after using kmeans and defining a prior number of clusters}
\item{fit}{It's an hclust object that generates a Cluster Dendrogram. }
}
\examples{
require(cluster)
require(factoextra)
data=USArrests
kmeanclust= kmeans(df, 4)
d <- dist(df, method = "euclidean")
fit <- hclust(d, method="ward.D")
clustA(data,kmeanclust,fit)
}
\author{
Anwaar Msehli
}
|
90143c66dfd3bf91f87f13ba5282a63bc97bfc37
|
a02959f6a5e0df6666722eb7c78d74b047b588ae
|
/man/ds_plot_histogram.Rd
|
9265c2215c80b33aee437c877b6abe852c241900
|
[] |
no_license
|
cran/descriptr
|
ea2e24f865f84589ad03c25ffb8d35eb201cb922
|
793dbc743532c9e243801dcf9286c20cb32208b8
|
refs/heads/master
| 2021-01-01T05:20:10.888615
| 2020-12-09T16:10:02
| 2020-12-09T16:10:02
| 77,553,829
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 742
|
rd
|
ds_plot_histogram.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ds-plots.R
\name{ds_plot_histogram}
\alias{ds_plot_histogram}
\title{Generate histograms}
\usage{
ds_plot_histogram(data, ..., bins = 5, fill = "blue", print_plot = TRUE)
}
\arguments{
\item{data}{A \code{data.frame} or \code{tibble}.}
\item{...}{Column(s) in \code{data}.}
\item{bins}{Number of bins in the histogram.}
\item{fill}{Color of the histogram.}
\item{print_plot}{logical; if \code{TRUE}, prints the plot else returns a plot object.}
}
\description{
Creates histograms if the data has continuous variables.
}
\examples{
ds_plot_histogram(mtcarz)
ds_plot_histogram(mtcarz, mpg)
ds_plot_histogram(mtcarz, mpg, disp, hp)
}
|
ebf018ec92e0b310f155fab1e519f8c44ec5aede
|
76b2e98418bfdc467292653e7aada9fea8f81c89
|
/R/postcrawling.R
|
b2119bb0b35cbac7e0729e503c84da656719d9d6
|
[] |
no_license
|
swoos91/TIL
|
8d7b4b32d819fe266835f5c3e99cf167cc8a89b6
|
292ff24a33cc8ac3ba513ffb6a1a7575b37804f3
|
refs/heads/master
| 2022-12-10T06:15:09.046562
| 2020-09-17T22:21:10
| 2020-09-17T22:21:10
| 226,819,185
| 0
| 0
| null | 2022-12-08T03:17:51
| 2019-12-09T08:14:53
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 212
|
r
|
postcrawling.R
|
unico<-POST('http://unico2013.dothome.co.kr/crawling/post.php',
encode='form', body=list(name='R',
age='27'))
a<-html_nodes(read_html(unico), 'h1')
b<-html_text(a)
|
a448f5fa375023f030c98e792dd3881a501fe21a
|
82836f3fd15546df37fbaaa1946dd0f2342f83c7
|
/myFun.NP.R
|
c02494067c72a1f1773a21720b56d4c5d7078a96
|
[] |
no_license
|
fugapku/NPscan
|
358c184a334a8d9500257693cf400e19ef59efa9
|
ab705560ebda89ca69435991402fec0a15d35932
|
refs/heads/master
| 2020-09-28T05:55:34.466999
| 2016-09-12T04:55:43
| 2016-09-12T04:55:43
| 67,960,279
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 365,344
|
r
|
myFun.NP.R
|
# myFun.NP.R
read.antiSMASH <- function(file='fungalAntismashClusters.txt', root=NULL){ #'/Users/yongli/Dropbox/Galaxy/Project_Current/t.NPbioinformatics'){
setwd(root)
a = read.table(file, header=F, sep='\t', comment='', quote="")
# Description Genbank ID Cluster num antismash cluster type cluster length (bp) Start End
}
get.runs <- function(seq){
# 20140429: compute the length of runs for each type of elements in the sequence
# 20140429: YF Li
is.changed = c(T, seq[2:length(seq)]!= seq[1:(length(seq)-1)], T);
i = which(is.changed)
k = diff(i) # run length
names(k) = seq[i[1:(length(i)-1)]]
return(k)
}
label.runs <- function(seq=NULL, runs=NULL){
# Yong Fuga Li, 20140528
if (is.null(runs)){
runs = get.runs(seq+1);
loc.runs = cumsum(runs)
n = length(seq)
}else{
loc.runs = cumsum(runs)
# n = sum(runs)
n = loc.runs[length(loc.runs)]
}
labels.runs = zeros(n=n)
idx.pos = names(loc.runs)=='2'
labels.runs[loc.runs[idx.pos]] = runs[idx.pos]
return(labels.runs)
}
label.successes <- function(seq, window.size){
require('TTR')
if (window.size>length(seq))
return(zeros(n=length(seq)))
a = runSum(seq+0,n=window.size)
a[is.na(a)] = 0;
return(a)
}
get.local.max.index <- function(a, tie.resolve=c('first', 'last', 'all')){
# 20140527-28
# Yong Fuga Li
tie.resolve = match.arg(tie.resolve)
b = diff(c(-Inf, a,-Inf))
s = sign(b)
idx = which(s!=0)
idx.max = which(diff(s[idx])==-2)
# s[idx[idx.max]]
# s[idx[idx.max+1]]
if (tie.resolve=='first'){
out = idx[idx.max]
}else if (tie.resolve == 'last'){
out = idx[idx.max+1]-1
}else if (tie.resolve =='all'){
out = c()
for (i in 1:length(idx.max)){
out = c(out, idx[idx.max[i]]:(idx[idx.max[i]+1]-1))
}
}
return(out)
}
label.successes.local.max <- function(seq, window.size, tie.resolve=c('first', 'last', 'All'), default = 0){
# get the local max # number successes in sliding windows and set non-local maxes to a default value
# tie.resolve: resolving ties by taking the first, last, or all
# 20140527-28
require('TTR')
tie.resolve = match.arg(tie.resolve)
if (window.size>length(seq))
return(zeros(n=length(seq)))
a = runSum(seq+0,n=window.size)
a[is.na(a)] = 0;
to.keep = get.local.max.index(a, tie.resolve)
a[setdiff(1:length(a),to.keep)] = default
return(a)
}
count.runs <- function(runs, max.k = max(runs), types=NULL){
# count the number of runs of each type of elements
# 20140429: YF Li
if (is.null(types))
types = unique(names(runs))
C = matrix(0, nrow=length(types), ncol=max.k, dimnames=list(types,1:max.k))
for (t in types){
uc = unique.count(runs[names(runs)==t])$counts.unique
uc = uc[intersect(names(uc), colnames(C))]
C[t, names(uc)] = uc
}
return(C)
}
count.successes <- function(seq, window.size=20, weights=NULL, types=types){
# counts the elements within sliding windows
# YF Li, 20140429
# require('IRanges')
require('TTR') # 20140527
warning('weights no implemented yet')
if (!length(types))
types = unique(seq)
C = matrix(0, nrow=length(types), ncol=window.size+1, dimnames=list(types,0:window.size))
for (t in types){
# n.success = as.vector(runsum(Rle((seq==t)+0),k=window.size))
if (window.size>length(seq)){
n.success = c()
}else{
n.success = runSum((seq==t)+0,n=window.size)[window.size:length(seq)]
}
uc = unique.count(n.success)$counts.unique
C[as.character(t), names(uc)] = as.double(uc)
}
return(C)
}
count.successes.local <- function(seq, window.size=20, types){
# counts the elements within sliding windows
# YF Li, 20140429
# require('IRanges')
require('TTR') # 20140527
warning('weights no implemented yet')
if (!length(types))
types = unique(seq)
C = matrix(0, nrow=length(types), ncol=window.size+1, dimnames=list(types,0:window.size))
for (t in types){
# n.success = as.vector(runsum(Rle((seq==t)+0),k=window.size))
if (window.size>length(seq)){
n.success = c()
}else{
n.success = label.successes.local.max((seq==t)+0,window.size)
}
uc = unique.count(n.success)$counts.unique
C[as.character(t), names(uc)] = as.double(uc)
}
return(C)
}
successes.expect <- function(N, n, probs){
# N: sequence length, n: window size; k: successes; probs: success probability
# counts the elements within sliding windows
# YF Li, 20140429
if (length(probs)<2)
stop('need probability profile, i.e. for more than one elements')
probs = probs/sum(probs);
if (any(probs<0))
stop('Need positive probabilities')
if (is.null(names(probs)))
names(probs) = 1:length(probs)
C = matrix(0, nrow=length(probs), ncol=n+1, dimnames=list(names(probs), 0:n))
for (t in 1:length(probs)){
C[t,] = (N-n+1)* dbinom(0:n, size=n, prob=probs[t])
}
return(C)
}
run.expect <- function(L, probs, max.k = L){
# calculate the expected # of runs of length k for each types of elements
# L: sequence length
# probs: probability profile for m elements
# max.k: max run length to evaluate
# 20140429: YF Li
if (length(probs)<2)
stop('need probability profile, i.e. for more than one elements')
probs = probs/sum(probs);
if (any(probs<0))
stop('Need positive probabilities')
if (is.null(names(probs)))
names(probs) = 1:length(probs)
ks = 1:max.k
C = matrix(0, nrow=length(probs), ncol=length(ks), dimnames=list(names(probs), ks))
for (k in ks){
C[,k] = ((L-k>=1)*((L-k-1)*(1-probs)^2 + 2*(1-probs)) + (L==k)) * probs^k
}
return(C)
}
plot.fdr <- function(observed, expected, quantile.cutoff = 0.5, reverse=T, do.plot=T, log.scale=F,tag = '', ...){
# plot FDR curved based on observed distribution and theoretical distribution
# reverse = T ==> higher score more likely to be true
# reverse = F ==> lower score more likely to be true
# Yong Fuga Li
# 20140428
# 20140503: quantile.cutoff, quantile of uptail instances in the expected distribution to used for the estimation of FDR
# 20140527: do.plot
quantile.cutoff = min(max(0, quantile.cutoff), 1)
if (is.null(names(observed)))
names(observed) = 1:length(observed)
if (is.null(names(expected)))
names(expected) = 1:length(expected)
if (reverse){
observed = rev(observed)
expected = rev(expected)
}
epsilon = 1E-15
n.pos = cumsum(observed - expected)
fdr = cumsum(expected)/(cumsum(observed)+epsilon)
quant = cumsum(expected)/sum(expected)
i.max = min(which(quantile.cutoff<=quant))
idx = (fdr<=1 & fdr>=0 & observed >= 1); # 20150528: add observed >= 1
idx[seq2(from=i.max+1, to=length(expected), by=1)] = F
if (any(idx) & do.plot){
plot(fdr[idx], n.pos[idx], xlab='False Discovery Rate', ylab='# True gene cluster',...)
}
if (do.plot){
# dat = rbind(data.frame(score = as.factor(as.numeric(names(observed))), counts=observed, observed='observed'),
# data.frame(score = as.factor(as.numeric(names(expected))), counts=expected, observed='expected'))
# print(barchart(counts~score, data= dat, xlab=tag, groups=observed,
# equispaced.log=T, scales=list(y = list(log = log.scale)), auto.key=T))
dat = rbind(data.frame(score = as.numeric(names(observed)), counts=observed, observed='observed'),
data.frame(score = as.numeric(names(expected)), counts=expected, observed='expected'))
g = ggplot(data=dat) +
geom_line(aes(x=score ,y=counts,color=observed))
print(g)
}
return(max(max(c(-Inf,n.pos[idx])), 0))
}
distribution.diff <- function(sample=labels.succ.local.all, null.samples=labels.succ.local.all.simus, nbins = NULL, quantile.cutoff = 0.5, reverse=T, do.plot=T, log.scale=F, tag = ''){
# estimate the total number of true instances in sample with multiple null.samples as reference
# Yong Fuga Li, 20141220, modified from plot.fdr
# note: the sample and null.samples can be trancated distribution (e.g. filtered to be postive only), so I do not assume equal sizes of the data
# but we do assume the full samples are of the same sizes for all samples
# input: sample - a vector
# null.samples - a list of vectors
# output: 1) total trues in samples; 2) null distribution of total trues and p-values associated with it.
# 3) a plot of the sample distribution against null; 4) a plot of the null distribution of total trues
quantile.cutoff = min(max(0, quantile.cutoff), 1)
size.sample = length(sample)
size.total.null = sum(sapply(null.samples, FUN = length)); size.total = size.total.null + size.sample
n.sample = length(null.samples)
if (is.null(nbins))
nbins = round(sqrt(size.sample))
R = range(c(unlist(null.samples), sample))
# R = range(sample)
breaks = seq(from = R[1], to = R[2], by = (R[2]-R[1])/nbins)
rep.value = round((breaks[2:(nbins+1)] + breaks[1:nbins])/2,4)
breaks[1] = breaks[1] - (R[2]-R[1])/nbins * 0.01; breaks[nbins+1] = breaks[nbins+1] + (R[2]-R[1])/nbins * 0.01;
get.count <- function(x, breaks){
observed = unique.count(rep.value[cut(x, breaks = breaks)])$counts.unique
observed = sort.by(observed, as.numeric(names(observed)))
observed = mat.fill.row(observed, rep.value)
return(observed)
}
observed = get.count(sample, breaks)
expected.all = lapply(null.samples, FUN = function(x){get.count(x, breaks = breaks)})
expected.merged = get.count(unlist(null.samples), breaks)
n.pos = plot.fdr(observed, expected.merged/n.sample, reverse=T, main='FDR curve', tag=tag)
# ggplot(data=rbind(data.frame(x=as.numeric(names(observed)), y=observed, data='real genome'),
# data.frame(x=as.numeric(names(expected.merged)), y=expected.merged, data='null'))) +
# geom_line(aes(x=x,y=y,color=data))
n.pos.null = vector(mode = 'numeric', length = n.sample)
if (n.sample>1){
for (i in 1:n.sample){
n.pos.null[i] = plot.fdr(expected.all[[i]], (expected.merged-expected.all[[i]])/(n.sample-1+1E-10), reverse=T, do.plot = F)
}
}
d = hist(n.pos.null, plot=F); plot(runMean(d$breaks,2)[2:length(d$breaks)], d$counts, type = 'l',
xlim = c(min(c(d$breaks), n.pos), max(c(n.pos.null, n.pos))), xlab = paste('#true clusters (bin average):', n.pos), ylab='freq'); abline(v=n.pos, lty = 2)
################## value -> p-value and value -> fdr
score2p.value <- function(x){
x = x/sum(x); x = rev(cumsum(rev(x)))
return(x)
}
x = c(breaks[length(breaks)]+10, sort(unlist(null.samples), decreasing = T), breaks[1], breaks[1]-10)
pvalue = c(0, (0:size.total.null)/size.total.null,1)
score2pvalue = approxfun(x, pvalue, method='linear')
x = c(breaks[length(breaks)]+10, sort(sample, decreasing = T), breaks[1]-10)
fdr = score2pvalue(x) * size.total.null/n.sample/c(0, 1:size.sample, size.sample); fdr[1] = 0
fdr[fdr>1] = 1; fdr = cummax(fdr);
nTruths = c(0, 1:size.sample, size.sample) - score2pvalue(x) * size.total.null/n.sample
nTruths[nTruths<0] = 0; nTruths = cummax(nTruths)
score2fdr = approxfun(x, fdr, method='linear')
score2ntrue = approxfun(x, nTruths, method='linear')
# plot(sample,score2pvalue(sample))
# plot(sample,score2fdr(sample))
plot(score2fdr(sample),score2ntrue(sample), xlab='q-value', ylab='#true clusters (monotuned)')
return(list(n.pos = n.pos, p.value = mean(n.pos.null>=n.pos), score2pvalue=score2pvalue, score2fdr=score2fdr, score2ntrue=score2ntrue))
}
NPGC.clustering <- enzyme.clustering <- function(gff.file, iprscan.tab.file = NULL, chromosome.specific=F,
gene.definition = c('gene', 'transcript', 'mRNA'), proteinID = 'ID',
annotation.by = c('OR', 'desc', 'domain'),
tag = 'A_nidulans_FGSC_A4', window.size = 20, log.scale = F,
simu.rep = 5, enzyme.definition = c('ase', 'EC6', 'MC29', 'MC29e'),
prediction.file='Top.Clusters', min.contig.len=4,
compare.against =c('simulation','theoretical'),
p.value.cutoff = 0.005,
outformat=c('csv', 'tab')){
# statistical analysis of the enzyme runs in a genome
# chromosome.specific: estimate chromosome specific enzyme probability estimation
# simu.rep: simulated gene sequences
# compare.against: using theoretical model or simulation to estimation null distribution, 20140527
# Yong Fuga Li, 20140428-29
# 20141124-25: allow the use of domain annotation instead
# enzyme.definition = match.arg(enzyme.definition)
compare.against = match.arg(compare.against)
gene.definition = match.arg(gene.definition) # 20141125
outformat = match.arg(outformat)
annotation.by = match.arg(annotation.by) # 20141125
require('rtracklayer')
require('genomeIntervals')
require(lattice)
# anno = import(gff.file, format='gff')
gff.format = sub('^.*\\.([^\\.]*$)', '\\1', gff.file)
# anno = read.gff3(gff.file, format=gff.format)
anno = import.gff(gff.file) # 20160502
# anno.chr = anno[anno$type=='chromosome',]
# chrs = anno.chr@seqnames
chrs = as.character(unique(anno@seqnames))
## keep genes only
# idx.gene = (anno$type=='gene')
idx.gene = (anno$type==gene.definition) # 20141125
anno = anno[idx.gene, ]
anno = sort.intervals(anno)
colnames(anno@elementMetadata) = toupper(colnames(anno@elementMetadata)) # 20141125
if (!is.null(anno$NOTE)){ # 20141125
desc.fname = 'NOTE'
}else if (!is.null(anno$DESCRIPTION)){
desc.fname = 'DESCRIPTION'
}else{
warning('No description or Note field for the annotation of genes')
desc.fname = 'NOTE'
anno$NOTE = ''
}
# read ipr anno: 20141125
ipr.anno = iprscan.flat(iprscan.tab.file, na.strings = c('-', 'NA', 'NULL'))
ipr.anno = mat.fill.row(t(t(ipr.anno)), row.names = anno@elementMetadata[,toupper(proteinID)], default = '')[,1]
names(ipr.anno) = anno$ID
if (annotation.by %in% 'desc'){
annotation.text = as.character(as.vector(anno@elementMetadata[[toupper(desc.fname)]]))
}else if(annotation.by %in% 'domain'){
annotation.text = as.character(as.vector(ipr.anno));
}else if(annotation.by %in% c('OR')){
annotation.text = paste(as.character(as.vector(anno@elementMetadata[[toupper(desc.fname)]])), as.character(as.vector(ipr.anno)))
}
# is.enzyme.ase = regexpr(pattern='ase[ $]', text = annotation.text, perl=T)>0
is.enzyme.ase = regexpr(pattern='(?: |^)[^ ]+ase(?: |$)', text = annotation.text, perl=T)>0 # 20140519
is.enzyme.EC6 = regexpr(pattern='(oxidoreductase|transferase|hydrolase|lyase|isomerase|ligase)', text = annotation.text, perl=T, ignore.case=T) > 0
is.enzyme.MC29 = regexpr(pattern='(oxidoreductase|hydrolase|dehydrogenase|synthase|reductase|transferase|methyltransferase|oxidase|synthetase|monooxygenase|isomerase|dehydratase|decarboxylase|deaminase|O\\-methyltransferase|transaminase|hydratase|acetyltransferase|N\\-acetyltransferase|dioxygenase|aminotransferase|O\\-acyltransferase|esterase|N\\-methyltransferase|acyltransferase|aldolase|thiolesterase|O\\-acetyltransferase|cyclase)', text = annotation.text, perl=T, ignore.case=T) > 0
is.enzyme.MC29e = regexpr(pattern='(oxidoreductase|hydrolase|dehydrogenase|synthase|reductase|transferase|methyltransferase|oxidase|synthetase|monooxygenase|isomerase|dehydratase|decarboxylase|deaminase|O\\-methyltransferase|transaminase|hydratase|acetyltransferase|N\\-acetyltransferase|dioxygenase|aminotransferase|O\\-acyltransferase|esterase|N\\-methyltransferase|acyltransferase|aldolase|O\\-acetyltransferase|cyclase|catalase|hydroxylase|P450|transporter|transcription factor)', text = annotation.text, perl=T, ignore.case=T) > 0
cat('# enzymes by ase:', sum(is.enzyme.ase))
cat('# enzymes by EC 6 class:', sum(is.enzyme.EC6))
cat('Some none EC6 enzymes', as.vector(annotation.text[is.enzyme.ase & !is.enzyme.EC6])[1:10])
if (sum(is.enzyme.ase)==0 && sum(is.enzyme.EC6)==0){
warning('No enzyme annotated in the gff file\n')
return(NULL)
}
if (enzyme.definition =='ase'){
is.enzyme = is.enzyme.ase;
}else if (enzyme.definition =='EC6'){
is.enzyme = is.enzyme.EC6;
}else if (enzyme.definition =='MC29'){
is.enzyme = is.enzyme.MC29
}else if (enzyme.definition == 'MC29e'){
is.enzyme = is.enzyme.MC29e
}else{ # 20141125
is.enzyme = regexpr(pattern=paste('(', enzyme.definition, ')',sep=''), text = annotation.text, perl=T, ignore.case=T) > 0
cat('# enzymes:', sum(is.enzyme.EC6))
}
# p.enzyme = sum(is.enzyme)/length(anno)
C.run = list()
C.run.all = c();
C.run.exp = list()
C.run.exp.all = c()
C.run.simu = list()
C.run.simu.all = c()
C.success = list()
C.success.all = c();
C.success.exp = list()
C.success.exp.all = c()
C.success.simu = list()
C.success.simu.all = c()
C.success.local = list()
C.success.local.all = c();
C.success.local.simu = list()
C.success.local.simu.all = c()
types = unique(is.enzyme+1)
L.gene = list()
p.enzyme = list()
for (i in 1:length(chrs)){
chr = as.character(chrs[i])
# cat('processing', chr,'\n')
# if (chr == '1099437636266_N_fischeri_NRRL_181'){
# 1
# 1
# }
is.in.chr = as.vector(anno@seqnames==chr)
L.gene[[chr]] = sum(is.in.chr)# number of genes in this chromosome
seq = is.enzyme[is.in.chr]
if (L.gene[[chr]] < min.contig.len)
next
if (chromosome.specific){
p.enzyme[[chr]] = sum(seq)/L.gene[[chr]];
}else{
p.enzyme[[chr]] = sum(is.enzyme)/length(anno)
}
runs = get.runs(seq+1);
labels.runs = label.runs(runs=runs)
labels.succ = label.successes(seq,window.size)
labels.succ.local = label.successes.local.max(seq,window.size)
C.run[[chr]] = count.runs(runs, types=types)
C.run.all = sum.union(C.run.all, C.run[[chr]])
C.success[[chr]] = count.successes(seq+1,window.size=window.size, types=types)
C.success.all = sum.union(C.success.all, C.success[[chr]])
C.success.local[[chr]] = count.successes.local(seq+1,window.size=window.size, types=types)
C.success.local.all = sum.union(C.success.local.all, C.success.local[[chr]])
}
max.k = ncol(C.run.all)
# anno[anno@seqnames==chr,]
for (i in 1:length(chrs)){ # recalculate C.run.exp using the global max.k
chr = as.character(chrs[i])
if (L.gene[[chr]] < min.contig.len)
next
C.run.exp[[chr]] = run.expect(L.gene[[chr]], c(1-p.enzyme[[chr]], p.enzyme[[chr]]), max.k=max.k)
C.run.exp.all = sum.union(C.run.exp.all, C.run.exp[[chr]])
C.success.exp[[chr]] = successes.expect(L.gene[[chr]], n=window.size, probs=c(1-p.enzyme[[chr]], p.enzyme[[chr]]))
C.success.exp.all = sum.union(C.success.exp.all, C.success.exp[[chr]])
C.run.simu[[chr]] = c() # one chr in all simulations
C.success.simu[[chr]] = c() # one chr in all simulations
C.success.local.simu[[chr]] = c() # one chr in all simulations
}
npos.success.simus <- npos.success.local.simus <- npos.run.simus <- vector('numeric',simu.rep) # number of positive cluster estimated in each of the simulated samples
C.run.simu.all1s = list(); # record all simulations
C.success.simu.all1s = list(); # record all simulations
C.success.local.simu.all1s = list(); # record all simulations
for (r in 1:simu.rep){
cat('\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b')
cat('iteration', r, '\n')
# all chr in one simulation: initialization
C.run.simu.all1 = c();
C.success.simu.all1 = c();
C.success.local.simu.all1 = c();
for (i in 1:length(chrs)){ # recalculate C.run.exp using the global max.k
chr = as.character(chrs[i])
if (L.gene[[chr]] < min.contig.len)
next
seq.simu = rbinom(L.gene[[chr]], size=1, prob=p.enzyme[[chr]])
# sumamry of one chr in one simulation
C.run.simu1 = count.runs(get.runs(seq.simu+1), types=types,max.k=max.k)
C.success.simu1 = count.successes(seq.simu+1, window.size=window.size, types=types)
C.success.local.simu1 = count.successes.local(seq.simu+1, window.size=window.size, types=types)
# one chr in all simulations
C.run.simu[[chr]] = sum.union(C.run.simu[[chr]], C.run.simu1/simu.rep)
C.success.simu[[chr]] = sum.union(C.success.simu[[chr]], C.success.simu1/simu.rep)
C.success.local.simu[[chr]] = sum.union(C.success.local.simu[[chr]], C.success.local.simu1/simu.rep)
# all chr in one simulations
C.run.simu.all1 = sum.union(C.run.simu.all1, C.run.simu1)
C.success.simu.all1 = sum.union(C.success.simu.all1, C.success.simu1)
C.success.local.simu.all1 = sum.union(C.success.local.simu.all1, C.success.local.simu1)
# all chr in all simulations
C.run.simu.all = sum.union(C.run.simu.all, C.run.simu1/simu.rep)
C.success.simu.all = sum.union(C.success.simu.all, C.success.simu1/simu.rep)
C.success.local.simu.all = sum.union(C.success.local.simu.all, C.success.local.simu1/simu.rep)
}
C.run.simu.all1s[[r]] = C.run.simu.all1; # record all simulations
C.success.simu.all1s[[r]] = C.success.simu.all1; # record all simulations
C.success.local.simu.all1s[[r]] = C.success.local.simu.all1; # record all simulations
}
# obtain #pos estimation for simulated data
for (r in 1:simu.rep){
C.run.simu.all1 = C.run.simu.all1s[[r]];
C.success.simu.all1 = C.success.simu.all1s[[r]];
C.success.local.simu.all1 = C.success.local.simu.all1s[[r]];
# number of estimated pos in each simulated sample
if (compare.against=='simulation'){
npos.run.simus[r] = plot.fdr(C.run.simu.all1[1,], C.run.simu.all[1,], reverse=T, do.plot=F);
npos.success.simus[r] = plot.fdr(C.success.simu.all1[1,], C.success.simu.all[1,], reverse=T, do.plot=F);
npos.success.local.simus[r] = plot.fdr(C.success.local.simu.all1[1,], C.success.local.simu.all[1,], reverse=T, do.plot=F);
}else if (compare.against=='theoretical'){
npos.run.simus[r] = plot.fdr(C.run.simu.all1[1,], C.run.exp.all[1,], reverse=T, do.plot=F);
npos.success.simus[r] = plot.fdr(C.success.simu.all1[1,], C.success.exp.all[1,], reverse=T, do.plot=F);
npos.success.local.simus[r] = plot.fdr(C.success.local.simu.all1[1,], C.success.local.exp.all[1,], reverse=T, do.plot=F);
}else{
stop('compare.against unknown')
}
}
# round(C.run.exp.all,3) # simulated and theoretical are very close when simu.rep is large, e.g. 2000
# C.run.simu.all
pdf(paste('theoretical.distribution.', tag, '.pdf', sep=''),5,4)
mapping = list(enzyme = '2', 'non-enzyme' = '1')
for (n in names(mapping)){
m = mapping[[n]]
t = plot.fdr(C.run.simu.all[m,], C.run.exp.all[m,],reverse=T, main='FDR curve (k-runs): simulated', tag=paste('#', n, 'runs: simulated', simu.rep));
if (n == 'enzyme')
npos.run.simu = t
t = plot.fdr(C.success.simu.all[m,], C.success.exp.all[m,],reverse=T, main='FDR curve (sliding window): simulated', tag=paste('#', n, 'in', window.size, 'genes: simulated', simu.rep));
if (n == 'enzyme')
npos.success.simu = t
}
dev.off()
## simulated gene sequences
pdf(paste('run.stats.', tag, '.pdf', sep=''),5,4)
for (n in names(mapping)){
m = mapping[[n]]
if (compare.against=='theoretical'){
t = plot.fdr(C.run.all[m,], C.run.exp.all[m,],reverse=T, main='FDR curve (k-runs) against theoretical: all chr', tag=paste('#', n, 'runs: All'))
}else if(compare.against=='simulation'){
t = plot.fdr(C.run.all[m,], C.run.simu.all[m,],reverse=T, main='FDR curve (k-runs) against simulation: all chr', tag=paste('#', n, 'runs: All'))
}else{
stop('compare.against unknown')
}
if (n == 'enzyme')
npos.run = t
# by chromosome plot
for (i in 1:length(chrs)){
chr = as.character(chrs[i])
if (L.gene[[chr]] < min.contig.len)
next
plot.fdr(C.run[[chr]][m,], C.run.exp[[chr]][m,],reverse=T, main=paste('FDR curve (k-runs) against theoretical:', chr), tag=paste('#', n, 'runs:', chr))
}
# dat = rbind(data.frame(run.length = as.factor(as.numeric(colnames(C.run.all))), counts=C.run.all[m,], observed='observed'),
# data.frame(run.length = as.factor(as.numeric(colnames(C.run.exp.all))), counts=C.run.exp.all[m,], observed='expected'))
# print(barchart(counts~run.length, data= dat, xlab=paste(n, 'run length'), groups=observed,
# equispaced.log=T, scales=list(y = list(log = log.scale)), auto.key=T))
# plot(C.exp.all[1,], C.all[1,], ylab='observed', xlab='expected', main='non-enzymes')
# abline(0,1, lty='dashed', lwd=2)
}
dev.off()
### binomial model
pdf(paste('window.stats.', tag, '.pdf', sep=''), 5,4)
for (n in names(mapping)){
m = mapping[[n]]
if (compare.against=='theoretical'){
t = plot.fdr(C.success.all[m,], C.success.exp.all[m,],reverse=T, main='FDR curve (sliding window) against theoretical: all chr', tag=paste('#', n, 'in', window.size, 'genes: All'))
}else if (compare.against=='simulation'){
t = plot.fdr(C.success.all[m,], C.success.simu.all[m,],reverse=T, main='FDR curve (sliding window) against simulation: all chr', tag=paste('#', n, 'in', window.size, 'genes: All'))
}else{
stop('compare.against unknown')
}
if (n == 'enzyme')
npos.window = t
# by chromosome plot
for (i in 1:length(chrs)){
chr = as.character(chrs[i])
if (L.gene[[chr]] < min.contig.len)
next
plot.fdr(C.success[[chr]][m,], C.success.exp[[chr]][m,],reverse=T, main=paste('FDR curve (sliding window):', chr), tag=paste('#', n, 'in', window.size, 'genes:', chr))
}
}
dev.off()
### binomial model
pdf(paste('local.window.stats.', tag, '.pdf', sep=''), 5,4)
for (n in names(mapping)){
m = mapping[[n]]
if (compare.against=='theoretical'){
t = plot.fdr(C.success.local.all[m,], C.success.local.exp.all[m,],reverse=T, main='FDR curve (sliding window) against theoretical: all chr', tag=paste('#', n, 'in', window.size, 'genes: All'))
}else if (compare.against=='simulation'){
t = plot.fdr(C.success.local.all[m,], C.success.local.simu.all[m,],reverse=T, main='FDR curve (sliding window) against simulation: all chr', tag=paste('#', n, 'in', window.size, 'genes: All'))
}else{
stop('compare.against unknown')
}
if (n == 'enzyme')
npos.window.local = t
}
dev.off()
#### random distribution of npos
pdf(paste('npos.null.distribution.', tag, '.pdf', sep=''), 5,4)
p.runCluster = mean(npos.run.simus>npos.run);
hist(npos.run.simus, xlab='#Cluster in simulation', main=paste('# clusters by run length:', round(npos.run,2) , 'p-value:', p.runCluster));abline(v=npos.run, lty=5, col='black')
p.windowCluster = mean(npos.success.simus>npos.window);
hist(npos.success.simus, xlab='#Cluster in simulation', main=paste('# clusters by successes:', round(npos.window,2), 'p-value:', p.windowCluster));abline(v=npos.window, lty=5, col='black')
p.window.localCluster = mean(npos.success.local.simus>npos.window.local);
hist(npos.success.local.simus, xlab='#Cluster in simulation', main=paste('# clusters by successes:', round(npos.window.local,2), 'p-value:', p.window.localCluster));abline(v=npos.window.local, lty=5, col='black')
dev.off()
################ output top predictions
anno.df = as.data.frame(anno,stringsAsFactors=F)
for (i in 1:length(anno.df)){
if (class(anno.df[[i]])!='integer')
anno.df[[i]] = unlist2(anno.df[[i]])
}
c2p <- function(x){
x = x/sum(x); x = rev(cumsum(rev(x)))
return(x)
}
# padding missing counts of zeros
C.run.simu.all = cbind('0' = length(anno) - rowSums(C.run.simu.all), C.run.simu.all)
C.success.simu.all[,'0'] = length(anno) - rowSums(C.success.simu.all) + C.success.simu.all[,'0']
p.run = c2p(C.run.simu.all[2,])
p.succ = c2p(C.success.simu.all[2,])
p.succ.local = c2p(C.success.local.simu.all[2,])
anno.df[, 'run_len'] = 0
anno.df[, paste('succ_', window.size, sep='')] = 0
anno.df[, paste('succ_local', window.size, sep='')] = 0
anno.df[, 'p.value(run_len)'] = 1
anno.df[, paste('p.value(succ_', window.size, ')',sep='')] = 1
anno.df[, paste('p.value(succ_local', window.size, ')',sep='')] = 1
for (i in 1:length(chrs)){
chr = as.character(chrs[i])
is.in.chr = as.vector(anno@seqnames==chr)
L.gene[[chr]] = sum(is.in.chr)# number of genes in this chromosome
if (L.gene[[chr]] < min.contig.len)
next
seq = is.enzyme[is.in.chr]
if (chromosome.specific){
p.enzyme[[chr]] = sum(seq)/L.gene[[chr]];
}else{
p.enzyme[[chr]] = sum(is.enzyme)/length(anno)
}
labels.runs = label.runs(seq=seq)
labels.succ = label.successes(seq,window.size)
labels.succ.local = label.successes.local.max(seq,window.size)
# mark the peaks
cat(chr)
anno.df[is.in.chr, 'run_len'] = labels.runs
anno.df[is.in.chr, paste('succ_', window.size, sep='')] = labels.succ
anno.df[is.in.chr, paste('succ_local', window.size, sep='')] = labels.succ.local
anno.df[is.in.chr, 'p.value(run_len)'] = p.run[as.character(labels.runs)]
anno.df[is.in.chr, paste('p.value(succ_', window.size, ')',sep='')] = p.succ[as.character(labels.succ)]
anno.df[is.in.chr, paste('p.value(succ_local', window.size, ')',sep='')] = p.succ.local[as.character(labels.succ.local)]
}
# mark the whole clusters
run.count = 0;
anno.df[, 'run_clusters'] = ''
anno.df[, 'succ_clusters'] = ''
for (i in which(anno.df[, 'run_len']>0)){
# cat(i)
run.count = run.count + 1;
l = anno.df[i, 'run_len'];
anno.df[(i-l+1):i, 'run_len'] = rowMax(cbind(anno.df[(i-l+1):i, 'run_len'], anno.df[i, 'run_len']))
anno.df[(i-l+1):i, 'p.value(run_len)'] = rowMin(cbind(anno.df[(i-l+1):i, 'p.value(run_len)'], anno.df[i, 'p.value(run_len)']))
anno.df[(i-l+1):i, 'run_clusters'] = paste(anno.df[(i-l+1):i, 'run_clusters'], paste('R', run.count,sep=''))
}
sl = paste('succ_local', window.size, sep='')
slp = paste('p.value(succ_local', window.size, ')',sep='')
l = window.size;
succ.loc.count = 0;
for (i in which(anno.df[, sl]>0)){
succ.loc.count = succ.loc.count+1;
anno.df[(i-l+1):i, sl] = rowMax(cbind(anno.df[(i-l+1):i, sl], anno.df[i, sl]))
anno.df[(i-l+1):i, slp] = rowMin(cbind(anno.df[(i-l+1):i, slp], anno.df[i, slp]))
anno.df[(i-l+1):i, 'succ_clusters'] = paste(anno.df[(i-l+1):i, 'succ_clusters'], paste('S', succ.loc.count,sep=''))
}
# select top window and run clusters
to.output.windows = anno.df[,paste('p.value(succ_local', window.size, ')',sep='')] < p.value.cutoff;
to.output.runs = anno.df[,'p.value(run_len)'] < p.value.cutoff;
# how many top clusters are included?
s.names = anno.df[to.output.windows, 'succ_clusters']
s.names = strsplit(paste(s.names,collapse=' '), '\\s+',perl=T)[[1]];
uc = unique.count(s.names)
n.clusters.localwindows = sum(uc$counts.unique==window.size)
r.names = anno.df[to.output.runs, 'run_clusters']
n.clusters.runs = length(unique(r.names))
out.names = c(intersect(c('seqnames', 'start', 'end', 'ID', 'Note', 'orf_classification', 'Gene'),colnames(anno.df)),
colnames(anno.df)[ncol(anno.df)-8+c(7,1,4,8,3,6)])
if (outformat=='csv'){
write.table(anno.df[,out.names], file=paste('cluster.anno.full.', tag, '.csv',sep=''),sep=',', row.names=F)
write.table(anno.df[to.output.windows,out.names], file=paste('cluster.anno.', tag, '.p', p.value.cutoff, '.NWindowClusters',n.clusters.localwindows, '.csv',sep=''),sep=',', row.names=F)
write.table(anno.df[to.output.runs,out.names], file=paste('cluster.anno.', tag, '.p', p.value.cutoff, '.NRunClusters',n.clusters.runs, '.csv',sep=''),sep=',', row.names=F)
}else if (outformat=='tab'){
write.table(anno.df[,out.names], file=paste('cluster.anno.full.', tag, '.tab',sep=''),sep='\t', row.names=F, quote = F)
write.table(anno.df[to.output.windows,out.names], file=paste('cluster.anno.', tag, '.p', p.value.cutoff, '.NWindowClusters',n.clusters.localwindows, '.tab',sep=''),sep='\t', row.names=F, quote = F)
write.table(anno.df[to.output.runs,out.names], file=paste('cluster.anno.', tag, '.p', p.value.cutoff, '.NRunClusters',n.clusters.runs, '.tab',sep=''),sep='\t', row.names=F, quote = F)
}
# write clean per cluster output, 20140611
write.NPGC <- function(anno.df, i.new.NPG = to.output.windows, window.size=window.size, method=c('WindowLocal', 'Run'),
file.out=paste('cluster.anno.clean', tag, '.p', p.value.cutoff, '.NWindowClusters',n.clusters.localwindows, '.tab',sep='')){
# 20140613
is.SM = regexpr(pattern='secondary metab', text = as.character(as.vector(anno.df$Note)), perl=T, ignore.case=T)>0
is.PKS = regexpr(pattern='polyketide synthase', text = as.character(as.vector(anno.df$Note)), perl=T, ignore.case=T)>0
if (method=='WindowLocal'){
all.SID = anno.df$succ_clusters[i.new.NPG]
all.SID = strsplit(paste(all.SID,collapse=' '), '\\s+',perl=T)[[1]];
uc = unique.count(all.SID)
cluster.names = names(uc$counts.unique[uc$counts.unique==window.size])
}else if (method=='Run'){
r.names = anno.df[i.new.NPG, 'run_clusters']
cluster.names = unique(r.names)
}
clean.table = matrix('',nrow=length(cluster.names),ncol=8,
dimnames=list(cluster.names, c('cluster ID', 'chr', 'coordinate', 'gene range', 'min distance to SM genes', 'closest SM gene(s)', 'p-value', 'cluster gene annotations')));
n.correct.cluster = 0;
for (nc in cluster.names){
if (method=='WindowLocal'){
i.match = regexpr(paste(nc,'(\\s|$)',sep=''), anno.df$succ_clusters)>0
}else{
i.match = regexpr(paste(nc,'(\\s|$)',sep=''), anno.df$run_clusters)>0
}# mapped$cluster.ID[i.match] = nc
## get closest SM
chr = unique(anno.df$seqnames[i.match])
loc.SM = t(which(is.SM & anno.df$seqnames==chr))
loc.cluster = t(t(which(i.match)))
dist.to.SM = repmat(loc.cluster,1,length(loc.SM)) - repmat(loc.SM, length(loc.cluster),1)
min.dist.to.SM = min(abs(dist.to.SM))
#if (min.dist.to.SM)
if (!min.dist.to.SM) # 20140720
n.correct.cluster = n.correct.cluster + 1
closest.SM = which(abs(dist.to.SM)==min.dist.to.SM,arr.ind=T)
if (!is.null(closest.SM) && length(closest.SM)>0){
min.dist.to.SM = paste(dist.to.SM[closest.SM], collapse='...')
closest.SM = loc.SM[closest.SM[,2]]
}
# cluster coordinates
min.l = min(c(anno.df$start[i.match], anno.df$end[i.match]))
max.l = max(c(anno.df$start[i.match], anno.df$end[i.match]))
# cluster gene ranges
first.gene = anno.df$ID[min(which(i.match))]
last.gene = anno.df$ID[max(which(i.match))]
# cluster all gene annotations;
cluster.anno = paste(anno.df$ID[i.match], anno.df$Note[i.match], sep='|', collapse='\t')
matchedSM.anno = paste(anno.df$ID[closest.SM], anno.df$Note[closest.SM], sep='|', collapse='...')
if (method=='WindowLocal'){
clean.table[nc, ] = c(nc,chr, paste(min.l, '-', max.l),
paste(first.gene, '-', last.gene), min.dist.to.SM,
matchedSM.anno, min(anno.df[i.match,paste('p.value(succ_local', window.size, ')',sep='')]), cluster.anno)
}else{
clean.table[nc, ] = c(nc,chr, paste(min.l, '-', max.l),
paste(first.gene, '-', last.gene), min.dist.to.SM,
matchedSM.anno, min(anno.df[i.match,'p.value(run_len)']), cluster.anno)
}
}
write(x='#Some of the predicted clusters are overlapping. They may indicate a larger cluster if the clusters significantly overlap (according to the coordiates in column 3).', file=file.out, append=F)
write(x='#Column 5 gives the distance of the cluster to the closest known secondary metabolite genes', file=file.out, append=T)
write(x='#Column 5, 0 means known SM genes are within the predicted cluster', file=file.out, append=T)
write(x='#Column 6 gives the gene names and annotations of the closest SM gene(s)', file=file.out, append=T)
write(x='#Column 5 and column 6, when there are multiple closest SM genes, they are separated by ...', file=file.out, append=T)
write(x='#Column 8+ gives the gene names and annotations of the genes in the predicted cluster', file=file.out, append=T)
write(x=paste('#Estimated No. true NP gene clusters:',npos.window.local), file=file.out, append=T)
write.table(clean.table, file=file.out,sep='\t', row.names=F, quote = F, append=T)
# n.SM.cluster = sum((diff(which(is.SM))>1) | (diff.str(anno.df$seqnames[is.SM])))+1
# number of known SM gene clusters cannot be determined accurately
out = c(sum(is.SM),sum(is.PKS), sum(i.new.NPG & is.SM),
sum(i.new.NPG & is.PKS), n.correct.cluster);
names(out) = c('#known SM genes', '#known PKSs',
paste('#matched SM genes:', method, sep=''),
paste('#matched PKS genes:', method, sep=''),
paste('#matched SM clusters:', method, sep=''))
return(out)
}
a = write.NPGC(anno.df, i.new.NPG = to.output.windows, window.size=window.size, method='WindowLocal',
file.out=paste('cluster.annoCompact.', tag, '.p', p.value.cutoff, '.NWindowClusters',n.clusters.localwindows, '.tab',sep=''))
b = write.NPGC(anno.df, i.new.NPG = to.output.runs, window.size=window.size,method='Run',
file.out=paste('cluster.annoCompact.', tag, '.p', p.value.cutoff, '.NRunClusters',n.clusters.runs, '.tab',sep=''))
n.unknowns = sum(regexpr(pattern='Protein of unknown function', text = annotation.text, perl=T)>0) # 20140529
n.genes = length(anno)
return(list(stats = c('#Pos Run Clusters'=npos.run, 'p Pos Run Clusters'=p.runCluster,
'#Pos WindowLocal Clusters'=npos.window.local, 'p Pos WindowLocal Clusters'=p.window.localCluster,
"#Top Run Clusters"=n.clusters.runs, "#Top WindowLocal Clusters"=n.clusters.localwindows,
a, b[3:5],
'#Protein of unknown function'=n.unknowns,'#genes'=n.genes, 'enzyme prob'=sum(is.enzyme)/length(anno)),
npos.run.simu=npos.run.simu, npos.success.simu=npos.success.simu,
npos.run.simus=npos.run.simus, npos.success.simus=npos.success.simus, n.chr = length(chrs)))
}
express.clustering <- function(gff.file="/Users/yongli/Universe/data/NPgenome/Aspergillus/A_nidulans_FGSC_A4_current_features.gff", geMat, iters = 5){
# detect spacial clustering behavior of genes expression levels
# 20140729, YF Li
require(gplots)
## read gff
gff.format = sub('^.*\\.([^\\.]*$)', '\\1', gff.file)
# anno = read.gff3(gff.file, format=gff.format)
anno = import.gff(gff.file) # 20160502
idx.gene = (anno$type=='gene')
anno = anno[idx.gene, ]
anno = sort.intervals(anno)
n = length(anno)
## filter genes
idx = !is.na(match(anno$ID, rownames(geMat)))
IDs = anno$ID[idx]
geMat = geMat[IDs,]
## get gene modules
require("fcanalysis",lib="~/Dropbox/Galaxy/R/lib")
geMat.n =preprocessICA(geMat,F)
s = ncol(geMat.n)-1
ica.spatial = ica.do(geMat.n, iters = iters, nComponents = s)
## analyze the spacial autocorrelation for each sample and each gene module
autocorr.all = zeros(n = s)
names(autocorr.all) = 1:s
autocorr.all.20 <- autocorr.all.R2 <- autocorr.all.Z <- autocorr.all
pdf('/Users/yongli/Universe/write/Project_Current/t.NPbioinformatics/Nidulans.SlidingWindow/Autocorr.ICAmodules.pdf',8,6)
par(mfrow=c(2,5))
for (i in 1:s){
lag.max = 60
a = acf(ica.spatial$S[,i],lag.max = lag.max, main=paste('M', i, ' V%:', round(ica.spatial$power[i],4), sep=''))
autocorr.all[i] = mean(a$acf[2:(lag.max+1)])
autocorr.all.R2[i] = sqrt(mean(a$acf[2:(lag.max+1)]^2))
autocorr.all.Z[i] = sum(1/2*log((1+a$acf[2:(lag.max+1)])/(1-a$acf[2:(lag.max+1)]))*sqrt(nrow(ica.spatial$S)-3))/sqrt(lag.max)
lag.max = 20;
a = acf(ica.spatial$S[,i],lag.max = lag.max, main=paste('M', i, ' V%:', round(ica.spatial$power[i],4), sep=''), plot=F)
autocorr.all.20[i] = mean(a$acf)
}
colnames(ica.spatial$A) = sub('nidulans', '', colnames(ica.spatial$A))
colnames(ica.spatial$A) = sub('.CEL', '', colnames(ica.spatial$A))
dev.off()
pdf('/Users/yongli/Universe/write/Project_Current/t.NPbioinformatics/Nidulans.SlidingWindow/Autocorr.MeanExpression.pdf',4,4)
par(mfrow=c(1,1))
a = acf(rowMeans(geMat),lag.max = 100, main=paste('average gene expression'))
dev.off()
pdf('/Users/yongli/Universe/write/Project_Current/t.NPbioinformatics/Nidulans.SlidingWindow/clustering.ICA.pdf',6,8)
heatmap.quick.geMat(ica.spatial$A, id.type = 'symbol', color = bluered(256),sd.cutoff = 0, margins=c(9,2))
dev.off()
autocorr.all = (autocorr.all - min(autocorr.all))/(max(autocorr.all)-min(autocorr.all))
autocorr.all.20 = (autocorr.all.20 - min(autocorr.all.20))/(max(autocorr.all.20)-min(autocorr.all.20))
## examine known PKS
is.SM = regexpr(pattern='secondary metab', text = as.character(as.vector(anno$Note[idx])), perl=T, ignore.case=T)>0
is.PKS = regexpr(pattern='polyketide synthase', text = as.character(as.vector(anno$Note[idx])), perl=T, ignore.case=T)>0
asso.FET = TFAI.FET(ica.spatial$S, mod.full = cbind(SM=is.SM, PKS=is.PKS))
asso.lm = TFAI.lm(ica.spatial$S, mod.full = cbind(SM=is.SM, PKS=is.PKS), lm.joint = F, normalize.TF = 'none')
asso.mu.lm = TFAI.lm(rowMeans(geMat), mod.full = cbind(SM=is.SM, PKS=is.PKS), lm.joint = F, normalize.TF = 'none')
pdf('/Users/yongli/Universe/write/Project_Current/t.NPbioinformatics/Nidulans.SlidingWindow/Autocorr.vs.knownSM.Enrichment.pdf',4,4)
plot(autocorr.all, -log10(asso.FET$p.value[1,]), xlab='normalized autocorr', ylab='-log10(p-value) FET', main='all SM genes')
plot(autocorr.all, -log10(asso.FET$p.value[2,]), xlab='normalized autocorr', ylab='-log10(p-value) FET', main='PKS enzymes')
plot(autocorr.all, -log10(asso.lm$p.value[1,]), xlab='normalized autocorr', ylab='-log10(p-value)', main='all SM genes')
plot(autocorr.all, -log10(asso.lm$p.value[2,]), xlab='normalized autocorr', ylab='-log10(p-value)', main='PKS enzymes')
plot(autocorr.all, asso.lm$statistic[1,], xlab='normalized autocorr', ylab='T statistics', main='all SM genes')
plot(autocorr.all, asso.lm$statistic[2,], xlab='normalized autocorr', ylab='T statistics', main='PKS enzymes')
plot(autocorr.all, abs(asso.lm$statistic[1,]), xlab='normalized autocorr', ylab='T statistics', main='all SM genes')
plot(autocorr.all, abs(asso.lm$statistic[2,]), xlab='normalized autocorr', ylab='T statistics', main='PKS enzymes')
dev.off()
cor(autocorr.all, -asso.FET$p.value[1,], method = 'spearman')
cor(autocorr.all, -asso.lm$p.value[1,], method = 'spearman')
cor(autocorr.all, abs(asso.lm$statistic[1,]), method = 'spearman')
cor(autocorr.all.R2, -asso.FET$p.value[1,], method = 'spearman')
cor(autocorr.all.R2, -asso.lm$p.value[1,], method = 'spearman')
cor(autocorr.all.R2, abs(asso.lm$statistic[1,]), method = 'spearman')
cor(autocorr.all.Z, -asso.FET$p.value[1,], method = 'spearman')
cor(autocorr.all.Z, -asso.lm$p.value[1,], method = 'spearman')
cor(autocorr.all.Z, abs(asso.lm$statistic[1,]), method = 'spearman')
cor(autocorr.all.20, -asso.FET$p.value[1,], method = 'spearman')
cor(autocorr.all.20, -asso.lm$p.value[1,], method = 'spearman')
cor(autocorr.all.20, abs(asso.lm$statistic[1,]), method = 'spearman')
venn(list(module16 = which(ica.spatial$S[,16]>3), Known.NPG=which(is.SM)))
venn(list(module16 = which(ica.spatial$S[,33]>3), Known.NPG=which(is.SM)))
venn(list(module16 = which(ica.spatial$S[,34]>3), Known.NPG=which(is.SM)))
venn(list(module16 = which(ica.spatial$S[,13]>3), Known.NPG=which(is.SM)))
venn(list(module16 = which(ica.spatial$S[,32]>3), Known.NPG=which(is.SM)))
pdf('/Users/yongli/Universe/write/Project_Current/t.NPbioinformatics/Nidulans.SlidingWindow/ExpressionLevel.NPvOther.pdf',4,4)
hist.by(rowMeans(geMat[IDs,]), as.factor(c('No', 'Yes')[is.SM+1]), by.name = 'NPG', xlab='expression')
hist.by(rowMeans(geMat[IDs,1:36]), as.factor(c('No', 'Yes')[is.SM+1]), by.name = 'NPG', main='liquid medium', xlab='expression')
hist.by(rowMeans(geMat[IDs,37:44]), as.factor(c('No', 'Yes')[is.SM+1]), by.name = 'NPG', main='solid medium', xlab='expression')
hist.by(rowMeans(geMat.n[,37:44])- rowMeans(geMat.n[,1:36]), as.factor(c('No', 'Yes')[is.SM+1]), by.name = 'NPG', main='solid/liquid difference', xlab='change')
dev.off()
ica.spatial$autocorr$R = autocorr.all;
ica.spatial$autocorr$R2 = autocorr.all.R2;
ica.spatial$autocorr$Z = autocorr.all.Z;
ica.spatial$spatial.cluster.index = autocorr.all;
ica.spatial$spatial.cluster.method = 'mean'
ica.spatial$anno = anno;
ica.spatial$autocorr.lag = 60;
ica.spatial$geMat = geMat
return(ica.spatial)
}
score.spatial.cluster <- function(ica.spatial, gene.range=c('AN8131', 'AN8137'),
score.type = c('R', 'R2', 'Z'), median.substraction=T, do.plot=T){
# compute the clustering scores of a given gene range
# YF Li, 20140731
score.type = match.arg(score.type)
spatial.cluster.score = ica.spatial$autocorr[[score.type]]
if (median.substraction){
spatial.cluster.score = spatial.cluster.score - median(spatial.cluster.score)
}
is = match(gene.range, rownames(ica.spatial$S))
is = sort(is)
gene.range = rownames(ica.spatial$S)[is]
# s1 = sum(colSums(ica.spatial$S[is[1]:is[2],]^2)*ica.spatial$autocorr)
# s2 = sum(colSums(ica.spatial$S[is[1]:is[2],])^2*ica.spatial$autocorr)
k = is[2]-is[1]+1
rs.unsigned = (apply(ica.spatial$S2, MARGIN = 2, function(x){y = as.vector(runsum(Rle(x), k)); names(y) = names(x)[1:(length(x)-k+1)]; return(y)}) %*% spatial.cluster.score)[,1]
rs.signed = (apply(ica.spatial$S, MARGIN = 2, function(x){y = as.vector(runsum(Rle(x), k)); names(y) = names(x)[1:(length(x)-k+1)]; return(y)})^2 %*% spatial.cluster.score)[,1]
fdr.signed = fdr.symmatric(log2(rs.signed),iterative = F, plot = do.plot)
fdr.unsigned = fdr.symmatric(log2(rs.unsigned),iterative = F, plot = do.plot)
names(fdr.signed) <- names(fdr.unsigned) <- names(rs.unsigned)
return(c(s.unsigned = rs.unsigned[gene.range[1]],
s.signed = rs.signed[gene.range[1]],
fdr.unsigned = fdr.unsigned[gene.range[1]],
fdr.signed = fdr.signed[gene.range[1]]))
}
ica.spatial.prep <- function(ica.spatial, K= 50, center.method='median',
score.type = c('R', 'R2', 'Z'), median.substraction=F, do.plot=T){
require(modeest)
score.type = match.arg(score.type)
spatial.cluster.score = ica.spatial$autocorr[[score.type]]
if (median.substraction){
spatial.cluster.score = spatial.cluster.score - median(spatial.cluster.score)
spatial.cluster.score = spatial.cluster.score/mad(spatial.cluster.score)
spatial.cluster.score[spatial.cluster.score<1] = 0
}
ica.spatial$S2 = ica.spatial$S^2;
ica.spatial$rs.unsigned <- ica.spatial$fdr.unsigned <- ica.spatial$rs.signed <-
ica.spatial$fdr.signed <- ica.spatial$rs.LowExpression <- ica.spatial$fdr.LowExpression <- c()
mu = rowMeans(ica.spatial$geMat) # average expression
for (k in 1:K){
rs.unsigned = (apply(ica.spatial$S2, MARGIN = 2, function(x){return(runsum.2(x,k,addzeros=T))}) %*% spatial.cluster.score)[,1]
if (mean(rs.unsigned<0)<0.01)
rs.unsigned = log2(rs.unsigned)
fdr.unsigned = fdr.symmatric(rs.unsigned,iterative = F, plot = do.plot, center.method=center.method)
rs.signed = (apply(ica.spatial$S, MARGIN = 2, function(x){return(runsum.2(x,k,addzeros=T))})^2 %*% spatial.cluster.score)[,1]
if (mean(rs.signed<0)<0.01)
rs.signed = log2(rs.signed)
fdr.signed = fdr.symmatric(rs.signed,iterative = F, plot = do.plot, center.method=center.method)
rs.LowExpression = -runsum.2(mu,k=k,addzeros=T)# low expression score
fdr.LowExpression = fdr.symmatric(rs.LowExpression,iterative = F, plot = do.plot, center.method=center.method)
names(fdr.LowExpression) <- names(fdr.signed) <- names(fdr.unsigned) <- names(rs.unsigned)
ica.spatial$rs.unsigned <- cbind(ica.spatial$rs.unsigned, rs.unsigned)
ica.spatial$fdr.unsigned <- cbind(ica.spatial$fdr.unsigned, fdr.unsigned);
ica.spatial$rs.signed <- cbind(ica.spatial$rs.signed, rs.signed)
ica.spatial$fdr.signed <- cbind(ica.spatial$fdr.signed, fdr.signed);
ica.spatial$rs.LowExpression <- cbind(ica.spatial$rs.LowExpression, rs.LowExpression)
ica.spatial$fdr.LowExpression <- cbind(ica.spatial$fdr.LowExpression, fdr.LowExpression)
}
rownames(ica.spatial$rs.unsigned) <- rownames(ica.spatial$fdr.unsigned) <-
rownames(ica.spatial$rs.signed) <- rownames(ica.spatial$fdr.signed) <-
rownames(ica.spatial$rs.LowExpression) <- rownames(ica.spatial$fdr.LowExpression) <- names(rs.unsigned)
ica.spatial$mu = mu
ica.spatial$center.method=center.method;
ica.spatial$score.type = score.type
ica.spatial$median.substraction=median.substraction
return(ica.spatial)
}
score.spatial.cluster.2d <- function(ica.spatial, gene.range=c('AN8131', 'AN8137'),
cor.method = 'spearman', CS.n.neighbor = 3){
# compute the clustering scores of all possible windows within a given gene range
# YF Li, 20140730
require(modeest)
is = match(gene.range, rownames(ica.spatial$S))
is = sort(is)
gene.range = rownames(ica.spatial$S)[is]
all.genes = rownames(ica.spatial$S)[is[1]:is[2]]
# s1 = sum(colSums(ica.spatial$S[is[1]:is[2],]^2)*ica.spatial$autocorr)
# s2 = sum(colSums(ica.spatial$S[is[1]:is[2],])^2*ica.spatial$autocorr)
K = is[2]-is[1]+1
s.unsigned.2d <- s.signed.2d <- fdr.unsigned.2d <- fdr.signed.2d <-
s.LowExpression.2d <- fdr.LowExpression.2d <- matrix(NA, nrow = K, ncol = K, dimnames = list(all.genes, all.genes))
for (k in 1:K){
for (i in is[1]:(is[2]-k+1)){
s.unsigned.2d[all.genes[i-is[1]+1], all.genes[i+k-is[1]]] = ica.spatial$rs.unsigned[all.genes[i-is[1]+1],k];
fdr.unsigned.2d[all.genes[i-is[1]+1], all.genes[i+k-is[1]]] = ica.spatial$fdr.unsigned[all.genes[i-is[1]+1],k];
s.signed.2d[all.genes[i-is[1]+1], all.genes[i+k-is[1]]] = ica.spatial$rs.signed[all.genes[i-is[1]+1],k];
fdr.signed.2d[all.genes[i-is[1]+1], all.genes[i+k-is[1]]] = ica.spatial$fdr.signed[all.genes[i-is[1]+1],k];
s.LowExpression.2d[all.genes[i-is[1]+1], all.genes[i+k-is[1]]] = ica.spatial$rs.LowExpression[all.genes[i-is[1]+1],k];
fdr.LowExpression.2d[all.genes[i-is[1]+1], all.genes[i+k-is[1]]] = ica.spatial$fdr.LowExpression[all.genes[i-is[1]+1],k];
}
}
R.ext = cor(t(ica.spatial$geMat[max((is[1]-CS.n.neighbor),1):min(is[2]+CS.n.neighbor,nrow(ica.spatial$geMat)),]), method = cor.method)
R = R.ext[all.genes, all.genes]
R.ext[R.ext<0] = 0;
ai = arrayInd(1:length(R.ext),.dim = dim(R.ext));
ai = ai[abs(ai[,1]-ai[,2])>CS.n.neighbor | ai[,1]==ai[,2],]
R.ext[ai] = 0;
CS = rowSums(R.ext^2)[all.genes]
is.anno = match(gene.range, ica.spatial$anno$ID)
is.anno = sort(is.anno)
all.genes.anno = ica.spatial$anno$ID[is.anno[1]:is.anno[2]]
return(list(s.unsigned = s.unsigned.2d,
s.signed = s.signed.2d,
fdr.unsigned = fdr.unsigned.2d,
fdr.signed = fdr.signed.2d,
s.lowExpress = s.LowExpression.2d,
fdr.lowExpress = fdr.LowExpression.2d,
cor = R,
CS = CS,
mu = ica.spatial$mu[all.genes],
sd = rowSds(ica.spatial$geMat[all.genes,]),
err = rowSds(ica.spatial$geMat[all.genes,])/sqrt(ncol(ica.spatial$geMat)),
geMat = ica.spatial$geMat[all.genes,],
center.method=ica.spatial$center.method,
score.type = ica.spatial$score.type,
median.substraction=ica.spatial$median.substraction,
cor.method = cor.method, CS.n.neighbor = CS.n.neighbor,
all.gene.geMat = all.genes,
all.gene.anno = all.genes.anno))
}
plot.spatial.cluster.2d <- function(s2d, col = bluered(256), tag='',
heatmap.clustering=T, no.fdr=F){
# visualize the expression clustering for all pairwise windows
require(lattice);
min.logp = 2
n.color = 32
p = s2d$fdr.unsigned;
p[p==0] = min(p[p!=0 & !is.na(p)])/2
# get proper color scale so that p-value 0.1 is assigned the middle color
max.logp = max(max(-log10(p),na.rm = T),min.logp-log10(5))+log10(5);
l1 = round((length(col)+1)/2); l2 = length(col); l0 = (l1*max.logp-l2)/(max.logp-1)
col0 = col[l0:l2]
x4 = levelplot(t(-log10(p)),col.regions = col0, xlab='to gene', ylab='from gene', main='-log10(fdr unsiged cluster score)',
scales=list(x=list(rot=90),alternating=1),at=seq(0, max.logp, length.out=n.color))
p = s2d$fdr.signed;
p[p==0] = min(p[p!=0 & !is.na(p)])/2
max.logp = max(max(-log10(p),na.rm = T),min.logp-log10(5))+log10(5);
l1 = round((length(col)+1)/2); l2 = length(col); l0 = (l1*max.logp-l2)/(max.logp-1)
col0 = col[l0:l2]
x5 = levelplot(t(-log10(p)),col.regions = col0, xlab='to gene', ylab='from gene', main='-log10(fdr siged cluster score)',
scales=list(x=list(rot=90),alternating=1),at=seq(0, max.logp, length.out=n.color))
p = s2d$fdr.lowExpress;
p[p==0] = min(p[p!=0 & !is.na(p)])/2
max.logp = max(max(-log10(p),na.rm = T),min.logp-log10(5))+log10(5);
l1 = round((length(col)+1)/2); l2 = length(col); l0 = (l1*max.logp-l2)/(max.logp-1)
col0 = col[l0:l2]
x6 = levelplot(t(-log10(p)),col.regions = col0, xlab='to gene', ylab='from gene', main='-log10(fdr low express score)',
scales=list(x=list(rot=90),alternating=1),at=seq(0, max.logp, length.out=n.color))
R = s2d$cor; R[lower.tri(R)] <- NA; diag(R) <- NA
x3 = levelplot(t(R),col.regions = col, xlab='gene A', ylab='gene B', main='pairwise correlation',
scales=list(x=list(rot=90),alternating=1))
x2 = error.bar(s2d$mu, err = s2d$sd, ylab='average expression', main='');
x2.2 = barchart(s2d$CS~factor(names(s2d$CS), levels = names(s2d$CS)), ylab=paste('Andersen CS', s2d$cor.method), scales = list(x = list(draw = FALSE)), main= tag);
f = mat2xyz(s2d$geMat, sym=F)
ng = nrow(s2d$geMat)
x1 = xyplot(z~y, group=x, data=f,type='l', scales=list(x=list(rot=90),alternating=1), par.settings = list(superpose.line = list(lwd = 3)), col=greenred(ng), at = seq(1, ng, length = ng), xlab='sample',ylab='expression',
panel = function(...) {
panel.text(1, max(s2d$geMat,na.rm =T), "color maps to gene order", pos=4)
panel.xyplot(...)
})
x1.2 = heatmap.lattice(s2d$geMat, top = F, col.regions = col)
x = scale(t(s2d$geMat),center = T,scale = F); m = max(abs(x))
x1.3 = levelplot(x, scales = list(x = list(rot = 90),alternating=1),xlab='', ylab='',
colorkey = F, at = seq(-m, m, length = 32),
aspect = 'fill', col.regions = col)
if (no.fdr){
print(x1.3, split=c(1,1,3,1), newpage=T)
print(x1.2, split=c(3,1,3,1), newpage=F)
print(x2, split=c(2,2,3,2), newpage=F)
print(x2.2, split=c(2,1,3,2), newpage=F)
# print(x3, split=c(3,1,3,2), newpage=F)
}else{
print(x1.3, split=c(1,1,3,2), newpage=T)
print(x1.2, split=c(3,1,3,2), newpage=F)
print(x2, split=c(2,2,3,4), newpage=F)
print(x2.2, split=c(2,1,3,4), newpage=F)
# print(x3, split=c(3,1,3,2), newpage=F)
print(x4, split=c(1,2,3,2), newpage=F)
print(x5, split=c(2,2,3,2), newpage=F)
print(x6, split=c(3,2,3,2), newpage=F)
}
k = ncol(s2d$fdr.unsigned)
# trellis.focus("panel",column = 1,row=1)
# panel.text(cex=1, x=(1:k), y=(1:k), labels=rownames(s2d$fdr.unsigned), xpd=TRUE, srt=0, pos=1)
# trellis.unfocus()
# print(lattice::levelplot(t(s2d$s.signed),col.regions = bluered(32), xlab='from', ylab='to', main='siged cluster score',
# scales=list(x=list(rot=90))))
# print(lattice::levelplot(t(s2d$s.unsigned),col.regions = bluered(32), xlab='from', ylab='to', main='unsiged cluster score',
# scales=list(x=list(rot=90))))
}
read.gff3 <- function(con, format=sub('^.*\\.([^\\.]*$)', '\\1', con),
genome = NA, asRangedData = F, colnames = NULL,
which = NULL, feature.type = NULL){
# modified from rtklayer to handle the quotation mark bug:
# original version: https://github.com/genome-vendor/r-bioc-rtracklayer/blob/master/R/gff.R
# 20140502
# Yong Fuga Li
require('rtracklayer')
lines <- readLines(con, warn = FALSE) # unfortunately, not a table
lines <- lines[nzchar(lines)]
## strip comments
notComments <- which(substr(lines, start=1L, stop=1L) != "#")
lines <- lines[notComments]
### TODO: handle ontologies (store in RangedData)
## strip FASTA sequence
fastaHeaders <- which(substr(lines, start=1L, stop=1L) == ">")
if (length(fastaHeaders))
lines <- head(lines, fastaHeaders[1] - 1)
## construct table
fields <- c("seqname", "source", "type", "start", "end", "score",
"strand", "phase", "attributes")
linesSplit <- strsplit(lines, "\t", fixed=TRUE)
fieldCounts <- elementLengths(linesSplit)
if (any(fieldCounts > length(fields)) ||
any(fieldCounts < (length(fields) - 1)))
stop("GFF files must have ", length(fields),
" tab-separated columns")
haveAttr <- fieldCounts == length(fields)
data <- unlist(linesSplit[haveAttr], use.names=FALSE)
if (is.null(data))
data <- character(0)
haveAttrMat <- matrix(data, ncol=length(fields), byrow=TRUE)
data <- unlist(linesSplit[!haveAttr], use.names=FALSE)
if (is.null(data))
data <- character(0)
noAttrMat <- matrix(data, ncol=length(fields)-1L, byrow=TRUE)
noAttrMat <- cbind(noAttrMat, rep.int("", nrow(noAttrMat)))
table <- rbind(noAttrMat, haveAttrMat)
colnames(table) <- fields
if (!is.null(feature.type))
table <- table[table[,"type"] %in% feature.type,,drop=FALSE]
## handle missings
table[table == "."] <- NA_character_
attrCol <- table[,"attributes"]
if (format=='gff3') {
table <- table[,setdiff(colnames(table), "attributes"),drop=FALSE]
table[table[,"strand"] == "?","strand"] <- NA_character_
is_not_NA <- !is.na(table)
table[is_not_NA] <- urlDecode(table[is_not_NA])
}
table[is.na(table[,"strand"]),"strand"] = '*'
extraCols <- c("source", "type", "score", "strand", "phase")
if (!is.null(colnames))
extraCols <- intersect(extraCols, colnames)
xd <- as(table[,extraCols,drop=FALSE], "DataFrame")
if (!is.null(xd$phase))
xd$phase <- as.integer(as.character(xd$phase))
if (!is.null(xd$strand))
xd$strand <- strand(xd$strand)
if (!is.null(xd$score))
suppressWarnings(xd$score <- as.numeric(as.character(xd$score)))
if (is.null(colnames) || length(setdiff(colnames, extraCols))) {
if (format=='gff1') {
if (is.null(colnames) || "group" %in% colnames)
attrList <- list(group = factor(attrCol,
levels=unique(attrCol)))
else attrList <- list()
} else {
attrSplit <- strsplit(attrCol, ";", fixed=TRUE)
attrs <- unlist(attrSplit, use.names=FALSE)
lines <- rep.int(seq_len(length(attrSplit)),
elementLengths(attrSplit))
attrs <- sub(" *$", "", sub("^ *", "", attrs))
if (format=='gff3') {
equals.pos <- regexpr("=", attrs, fixed=TRUE)
if (any(equals.pos == -1L))
stop("Some attributes do not conform to 'tag=value' format")
tags <- substring(attrs, 1L, equals.pos - 1L)
vals <- substring(attrs, equals.pos + 1L, nchar(attrs))
} else { # split on first space (FIXME: not sensitive to quotes)
tags <- sub(" .*", "", attrs) # strip surrounding quotes
vals <- sub("^\"([^\"]*)\"$", "\\1",
sub("^[^ ]* ", "", attrs))
}
if (!is.null(colnames)) {
keep <- tags %in% colnames
lines <- lines[keep]
vals <- vals[keep]
tags <- urlDecode(tags[keep])
}
tags <- factor(tags, levels=unique(tags))
lineByTag <- split(lines, tags)
valByTag <- split(vals, tags)
## FIXME: Parent, Alias, Note, DBxref,
## Ontology_term are allowed to have multiple
## values. We should probably always return them as a
## CharacterList.
multiTags <- c("Parent", "Alias", "Note", "DBxref",
"Ontology_term")
attrList <- sapply(names(lineByTag), function(tagName) {
vals <- valByTag[[tagName]]
if (format=='gff3' &&
(any(grepl(",", vals, fixed=TRUE)) ||
tagName %in% multiTags)) {
vals <- CharacterList(strsplit(vals, ",", fixed=TRUE))
vals <- relist(urlDecode(unlist(vals)), vals)
coerced <- suppressWarnings(as(vals, "NumericList"))
if (!any(any(is.na(coerced))))
vals <- coerced
vec <- as(rep.int(list(character()), nrow(table)),
class(vals))
} else {
coerced <- suppressWarnings(as.numeric(vals))
if (!any(is.na(coerced)))
vals <- coerced
if (format=='gff3')
vals <- urlDecode(vals)
vec <- rep.int(NA, nrow(table))
}
vec[lineByTag[[tagName]]] <- vals
vec
}, simplify = FALSE)
}
xd <- DataFrame(xd, attrList)
}
end <- as.integer(table[,"end"])
GenomicData(IRanges(as.integer(table[,"start"]), end),
xd, chrom = table[,"seqname"], genome = genome,
seqinfo = attr(con, "seqinfo"),
asRangedData = asRangedData)
}
urlDecode <- function(str)
{
require('RCurl')
curlUnescape(str)
}
promoter.statistics <- function(gff.file="A_nidulans_FGSC_A4_current_features.gff",
DNA.fasta.file="A_nidulans_FGSC_A4_current_chromosomes.fasta",
window.promoter = c(-4000, 1000), k=8, n.top.motifs=10,
tag='window4k1k.8mer'){
# computer based promoter statiscs: gene-gene distances orientations etc
# k: k-mer size
# Yong Fuga Li
# 20140606
require(Biostrings)
require(markovchain)
require(IRanges)
require(ggplot2)
require('TTR') # 20140527
# read gff
gff.format = sub('^.*\\.([^\\.]*$)', '\\1', gff.file)
# anno = read.gff3(gff.file, format=gff.format)
anno = import.gff(gff.file) # 20160502
idx.gene = (anno$type=='gene')
anno = anno[idx.gene, ]
anno = sort.intervals(anno)
n = length(anno)
# read fasta
fa = import(DNA.fasta.file,format='fasta')
# are gene orientations independent?
s4 = substr.stats(anno@strand, anno@seqnames)
# are NP gene orientations different?
is.SM = regexpr(pattern='secondary metab', text = as.character(as.vector(anno$Note)), perl=T)>0 # 20140519
sum(is.SM)
s4.SM = substr.stats(anno@strand, anno@seqnames, is.SM)
pdf(paste('geneOrientation.pdf',sep=''),width=5,height=3.5)
for (i in 1:4){
da = rbind(data.frame(x=names(s4[[i]]), y=s4[[i]]/sum(s4[[i]])*100,genes='all'),
data.frame(x=names(s4.SM[[i]]), y=s4.SM[[i]]/sum(s4.SM[[i]])*100,genes='NP'))
q = ggplot(data=da, aes(x = x, y=y, by=genes, fill=genes))+geom_bar(stat='identity',position='dodge')+
xlab('orientation')+ylab('%')+theme(axis.text.x = element_text(angle=90, vjust=1))
print(q)
}
dev.off()
# intergene region lengths: for {+/+, -/-} vs {+/-, -/+} intergenic regions
inter.dist = get.intergene.dist(anno, cutoff=10000)
inter.dist.SM = get.intergene.dist(anno[is.SM], cutoff=10000)
gene.len = end(anno@ranges) - start(anno@ranges)
pdf(paste('intergeneDist.pdf', sep=''))
print(hist.by(inter.dist$dist, as.factor(inter.dist$type), by.name='gene orientations',hist=T,xlab='distance',main='all'))
print(hist.by(inter.dist$dist, as.factor(inter.dist$type.brief), by.name='gene orientations', hist=F,xlab='distance',main='all'))
print(hist.by(inter.dist$dist, as.factor(inter.dist$type.brief), by.name='gene orientations', hist=T,xlab='distance',main='all'))
print(hist.by(inter.dist.SM$dist, as.factor(inter.dist.SM$type), by.name='gene orientations',hist=T,xlab='distance',main='NPGC'))
print(hist.by(inter.dist.SM$dist, as.factor(inter.dist.SM$type.brief), by.name='gene orientations', hist=F,xlab='distance',main='NPGC'))
print(hist.by(inter.dist.SM$dist, as.factor(inter.dist.SM$type.brief), by.name='gene orientations', hist=T,xlab='distance',main='NPGC'))
hist.by(gene.len,by=is.SM,by.name='NP gene',main='gene length')
dev.off()
# motif findings around known NPGCs
names(fa) = sub(pattern='^([^ ]*) .*$', replacement='\\1',names(fa))
fa.promoter = get.promoter.seq(fa, anno[is.SM],k=window.promoter);
SM.mstats = motif.stats(fa.promoter, l=k)
n.shift = 100; is.SM.rand = (which(is.SM)+n.shift-1)%%n+1 # random promoters
fa.promoter.rand = get.promoter.seq(fa, anno[is.SM.rand],k=window.promoter);
n.shift = 191; is.SM.rand = (which(is.SM)+n.shift-1)%%n+1 # random promoters
fa.promoter.rand2 = get.promoter.seq(fa, anno[is.SM.rand],k=window.promoter);
SM.mstats.rand = motif.stats(fa.promoter.rand, l=k)
SM.mstats.rand2 = motif.stats(fa.promoter.rand2, l=k)
write.fasta(fa.promoter, paste('A.nidulans.NPG.promoter.',tag,'.fa',sep=''))
write.fasta(fa.promoter.rand, paste('A.nidulans.rand.promoter.',tag,'.fa',sep=''))
out = motif.comp(SM.mstats, SM.mstats.rand)
out2 = motif.comp(SM.mstats, SM.mstats.rand2)
# msets = motif.find(fa) # learn a motif sets
m.anno = motif.annotate(fa.promoter, msets=out$fitered[1:n.top.motifs,]) # annotate sequences by motif sets
m.anno.rand = motif.annotate(fa.promoter.rand, msets=out$fitered[1:n.top.motifs,]) # annotate sequences by motif sets
pdf(paste('motifClust',tag,'.pdf',sep=''),width=4,16)
heatmap.quick.geMat(t((m.anno$count[,colSums(m.anno$count)>0]>0)+0),centering=F,id.type='symbol', distfun=dist,sd.cutoff=-1, lhei=c(1, 14), margins=c(9,5))
heatmap.quick.geMat(t(m.anno$loc.average[,colSums(m.anno$count)>0]),centering=F,id.type='symbol', distfun=dist,sd.cutoff=-1, lhei=c(1, 14), margins=c(9,5))
dev.off()
pdf(paste('motifLocation',tag,'.pdf',sep=''))
nbins = floor(sqrt(sum(m.anno$loc.average>0)))
hist(m.anno$loc.average[m.anno$loc.average>0]+window.promoter[1],xlab='distance to CDS',ylab='#Motifs',breaks=nbins,main='NPGC')
abline(h=(diff(window.promoter)-k+2)/nbins/2^k*n.top.motifs, col='grey', lty='dashed')
hist(m.anno.rand$loc.average[m.anno.rand$loc.average>0]+window.promoter[1],xlab='distance to CDS',ylab='#Motifs',breaks=nbins,main='rand')
abline(h=(diff(window.promoter)-k+2)/nbins/2^k*n.top.motifs, col='grey', lty='dashed')
dev.off()
motif.associations = asso.FET(t(m.anno$count>0)+0) # testing associations among the k-mers
write.table(motif.associations,file=paste('motif.association.', tag, '.xls',sep=''), col.names=NA, sep='\t')
}
get.intergene.dist <- function(anno,cutoff){
# 20140607
n = length(anno)
intergene.dist = -end(anno@ranges)[1:(n-1)] + start(anno@ranges)[2:n]
intergene.type = paste(as.vector(anno@strand)[1:(n-1)],as.vector(anno@strand)[2:(n)], sep='')
to.keep = (abs(intergene.dist) < cutoff)
intergene.dist = intergene.dist[to.keep]; intergene.type = intergene.type[to.keep]
ii = intergene.type=='-+'
intergene.type2 = intergene.type;
intergene.type2[intergene.type2 %in% c('++', '--')] = '++,--'
intergene.type2[intergene.type2 %in% c('+-', '-+')] = '+-,-+'
return(list(dist=intergene.dist, type= intergene.type, type.brief=intergene.type2))
}
motif.annotate <- function(fa.promoter, msets=out$fitered[1:10,]){
# 20140610, annotate fasta sequences by a set of motifs
# msets: k x 4 matrix describing a set of motifs
# fa.promoter: fasta sequences
n.seq = length(fa.promoter)
n.motif = nrow(msets)
motifs = rownames(msets)
m.anno.count <- m.anno.loc <- matrix(0,nrow=n.motif,ncol=n.seq,dimnames=list(motifs=rownames(msets), seqs=sapply(strsplit(names(fa.promoter),split='\\|'),FUN=function(x){return(x[1])})))
for (m in 1:n.motif){
locs = gregexpr(motifs[m], fa.promoter, ignore.case=T)
m.anno.loc[m,] <- sapply(locs,FUN=function(x){
if(sum(x>0)>0)
return(mean(x[x>0]))
else
return(0)
})
m.anno.count[m,] <- sapply(locs,FUN=function(x){return(sum(x>0))})
}
return(list(count=m.anno.count,
loc.average=m.anno.loc))
}
get.promoter.seq <- function(fa, anno, k = c(-2000,500)){
# Yong Fuga Li 20140606
require(Biostrings)
fa.promoter = list()
strands = as.vector(anno@strand)
for (i in 1:length(anno)){
if (anno$type[i]!='gene') # only use gene features
next
chr = as.character(anno@seqnames[i]);
if (strands[i]=='+'){
pseq = substr(fa[[chr]], max(1,start(anno[i])+k[1]),min(start(anno[i])+k[2], length(fa[[chr]])))
}else{
pseq = reverseComplement(substr(fa[[chr]], max(1,end(anno[i])-k[2]),min(end(anno[i])-k[1], length(fa[[chr]]))))
}
fa.promoter[[paste(anno$ID[i], '|', anno$Note[i],sep='')]] = as.character(pseq)
}
return(fa.promoter)
}
substr.stats <- function(s, chr, filter=NULL){
if (!is.null(filter))
s = s[filter]
gs1 = as.vector(s)
n = length(s)
gs2 = paste(gs1[1:(n-1)], gs1[2:n], sep='')
gs3 = paste(gs1[1:(n-2)], gs1[2:(n-1)], gs1[3:(n)], sep='')
gs4 = paste(gs1[1:(n-3)], gs1[2:(n-2)], gs1[3:(n-1)], gs1[4:(n)], sep='')
uc1 = unique.count(gs1)$counts.unique
uc2 = unique.count(gs2)$counts.unique
uc3 = unique.count(gs3)$counts.unique
uc4 = unique.count(gs4)$counts.unique
d = list()
uc.d = list()
d[['2']] = (gs1[1:(n-1)]!= gs1[2:n])+0 # transitions in window size 2
uc.d[['2']] = unique.count(d[['2']])$counts.unique
for (i in 3:10){ # number of transitions in window size i
d[[paste(i)]] = as.vector(runsum(Rle(d[['2']]),i-1))
uc.d[[paste(i)]] = unique.count(d[[paste(i)]])$counts.unique
}
return(list(uc1, uc2, uc3, uc4, uc.d))
}
motif.stats <- function(fa.promoter, l = 8){
# fa.promoter: fasta sequences
# l: motif length
# Yong Fuga Li, 20140606
mstats = c();
nt.freq = c() # nucleotide frequencies
for (i in 1:length(fa.promoter)){
mstats = sum.union(mstats, unique.count.substr(fa.promoter[[i]],l))
nt.freq = sum.union(nt.freq, unique.count.substr(fa.promoter[[i]],1))
}
nt.freq = (nt.freq+1)/sum(nt.freq+1)
p.motifs = mstats;
motifs = names(mstats);
for (i in 1:length(p.motifs)){ # obtain motif probability based on frequency model
p.motifs = prod(nt.freq[strsplit(motifs[i], '')[[1]]])
}
n = sum(mstats)
p = 1-pbinom(mstats-1, size=n, prob=p.motifs)
p.neg = pbinom(mstats, size=n, prob=p.motifs)
out = cbind(motif.count=mstats, p.value=p, p.value.deplete=p.neg)
out = out[order(out[,2],decreasing=F),]
cat(nt.freq)
return(out)
}
motif.comp <- function(SM.mstats, SM.mstats.rand){
# compare motif stats results
mstats = cbind.union(SM.mstats[,1],SM.mstats.rand[,1])
colnames(mstats) = c('SM', 'rand')
tot = colSums(mstats)
p = pbinom(mstats[,2],rowSums(mstats),prob=tot[2]/sum(tot),lower.tail=T)
q = qvalue.2(p)
p.deplete = pbinom(mstats[,1],rowSums(mstats),prob=tot[1]/sum(tot),lower.tail=T)
q.deplete = qvalue.2(p.deplete)
out = cbind(mstats, fold =mstats[,1]/mstats[,2], p.value=p, p.deplete=p.deplete, q.value=q, q.deplete=q.deplete)
out.cut = out[q.deplete<0.1 | p<0.001,]
out.cut = out.cut[order(out.cut[,4],decreasing=F),]
out = out[order(out[,4], decreasing=F),]
return(list(all = out, fitered=out.cut))
}
unique.count.substr <- function(string, l){
# count the number of each length l unique substrings
sq = strsplit(string,split='')[[1]]
n = length(sq)
subsq = sq[1:(n-l+1)]; # get length l sub-strings
for (j in seq2(from=2, to=min(l, n), by=1)){
subsq = paste(subsq,sq[j:(n-l+j)], sep='')
}
# count sub strings
return(unique.count(subsq)$counts.unique)
}
sort.intervals <- function(anno, do.strand=F){
# sort Genome intervals by seqnames and ranges
# Yong Fuga Li, 20140606
# by location
i = order(anno@ranges)
anno = anno[i,]
# by chr
i = order(as.character(anno@seqnames))
anno = anno[i,]
# by strand
if (do.strand){
i = order(anno@strand)
anno = anno[i,]
}
return(anno)
}
sort.gff <- function(gff.file, format = 'gff3', out.file = sub('.([^\\.]+)$', '_sorted.\\1',gff.file), do.strand=F){
# 20150916, sort GFF file, used in script analysis.KU2015.RORA.R to handle unsorted features from GenBank
# Yong Fuga Li
# anno = read.gff3(gff.file, format = format)
anno = import.gff(gff.file) # 20160502
anno = sort.intervals(anno, do.strand = do.strand)
export(anno, out.file, format = 'gff3', append=F)
return(out.file)
}
read.orthologs <- function(desc.file = 'desc.txt',
ortholog.file = 'All_Species_Orthologs_by_Jaccard_clustering.txt', root = '/Users/yongli/Universe/data/NPgenome/Aspergillus'){
# desc.file: species -- gff file mappings
# ortholog.file: ortholog groups
# 20140604
require('rtracklayer')
require('genomeIntervals')
require(lattice)
# read gff files
desc = read.table(desc.file, header=T, sep='\t', as.is=T)
n.species = ncol(ortho)-1; # n
# read orthologs
ortho = read.table(ortholog.file, header=T, sep='\t', as.is=T)
conservativity = (rowSums(ortho[,2:ncol(ortho)]!='')-1)/(n.species-1) # in all species ==> 1, in one species ==> 0
# read all genome annotations
anno.all = list()
for (i in 1:length(desc$gff)){
# i = 11
species = desc$species[i]
gff.file = desc$gff[i]
gff.format = sub('^.*\\.([^\\.]*$)', '\\1', gff.file)
# anno = read.gff3(gff.file, format=gff.format)
anno = import.gff(gff.file) # 20160502
idx.gene = (anno$type=='gene')
anno = anno[idx.gene, ]
anno = sort.intervals(anno) # 20140606 sort to sort.intervals
# is.enzyme.ase = regexpr(pattern='(?: |^)[^ ]+ase(?: |$)', text = as.character(as.vector(anno$Note)), perl=T)>0 # 20140519
# is.enzyme.EC6 = regexpr(pattern='(oxidoreductase|transferase|hydrolase|lyase|isomerase|ligase)', text = as.character(as.vector(anno$Note)), perl=T, ignore.case=T) > 0
# is.enzyme.MC29 = regexpr(pattern='(oxidoreductase|hydrolase|dehydrogenase|synthase|reductase|transferase|methyltransferase|oxidase|synthetase|monooxygenase|isomerase|dehydratase|decarboxylase|deaminase|O\\-methyltransferase|transaminase|hydratase|acetyltransferase|N\\-acetyltransferase|dioxygenase|aminotransferase|O\\-acyltransferase|esterase|N\\-methyltransferase|acyltransferase|aldolase|thiolesterase|O\\-acetyltransferase|cyclase)', text = as.character(as.vector(anno$Note)), perl=T, ignore.case=T) > 0
rownames(anno) = anno$ID
anno.all[[species]] = anno
}
# attach single gene conservation levels to each species' annotation
pdf('conservation.pdf')
for (i in 1:length(desc$gff)){
# i = 11
species = desc$species[i]
gs = ortho[,species]
ID2i = 1:length(anno.all[[species]]);
names(ID2i) = anno.all[[species]]$ID
rownames(anno.all[[species]]) = anno.all[[species]]$ID
anno.df = as.data.frame(anno.all[[species]])
is.SM = regexpr(pattern='secondary metab', text = as.character(as.vector(anno$Note)), perl=T)>0 # 20140519
for (i in 1:length(anno.df)){
if (class(anno.df[[i]])!='integer')
anno.df[[i]] = unlist2(anno.df[[i]])
}
for (g in 1:length(gs)){
if (gs[[g]]=='')
next
idx = ID2i[strsplit(gs[[g]], split='\\|')[[1]]]
idx = idx[!is.na(idx)]
anno.df[idx,'conservativity']= conservativity[g]
}
anno.df[is.na(anno.df[,'conservativity']),'conservativity'] = 0;
hist(anno.df$conservativity, main=species)
hist.by(anno.df$conservativity,is.SM, by.name='NP gene',hist=T,binwidth=0.05, xlab='conservativity')
anno.all[[species]] = anno.df
}
# neighor context conservation
# output enzyme clusters with the conservation information
# analyze conservation information of known NPGCs
# construct ortholog gene-adjacency graph, nodes --- ortholog groups, edge --- adjacency in each species
# discovery of in-dels & HGT
}
ortholog.graph <- function(){
}
csv2tab <- function(csv.file){
csv = read.csv(csv.file, header=T)
write.table(csv, sub('\\.csv$', '\\.tab',csv.file), quote=F, sep='\t', row.names=F)
}
summerize.cluster <- function(s2d, gene.range = NULL, extra.nt=2500, all.proteins = NULL, anno = NULL, gff.file = NULL, #"/Users/yongli/Universe/data/NPgenome/Aspergillus/A_nidulans_FGSC_A4_current_features.gff",
bam.file=NULL,unmapped.bam.file=NULL,
swiss.db = 'swissprot', swiss.fasta.file = paste('/Users/yongli/Universe/data/blastdb/',swiss.db, '.fasta', sep=''), genome.db=NULL,
DNA.fasta.file='/Users/yongli/Universe/data/NPgenome/Aspergillus/A_nidulans_FGSC_A4_current_chromosomes.fasta',
prot.fasta.file = 'A_nidulans_FGSC_A4_current_orf_trans_all.fasta',
iprscan.tab.file='A_nidulans_FGSC_A4_iprscan.out.txt',
prot.seq = read.fasta(prot.fasta.file, type='AA'),
ipr.anno = iprscan.flat(iprscan.tab.file), multialn.method = 'mafft',
intergenic.evidence = T, # 20160805
tag=deparse(substitute(s2d)), no.top.hits = 5, RORA.iteration=2, RORA.topOnly=T, plotLogo=F, species=NULL, use.RNAseq.exonpart=T,
minintronlen = 15, maxintronLen = 5000, # to be consistent with tophat2 parameters used for A fumigatus: RNAseq_mapping.sh
do.blast=T, do.tblastx=F, extract.bam=!is.null(bam.file),
gene.definition = 'gene',
geneID2cdsID = geneID2cdsID,# 20141003
version = 3 # 20160818, version 3 add start, stop codon, and intergenic region evidences, it assigns different priorities to evidences of different confidence levels
# blastp.xml.file = '',# aln.cerevisiae.file='', aln.albicans.file='', aln.NCrassa.file='', aln.fischeri.file='', aln.self.file='', aln.sp.file='',
){# root = '/Users/yongli/Universe/write/Project_Current/t.NPbioinformatics/Nidulans.SlidingWindow/Annotation'){
# 20140801
# 20141003: add automated blast search
# 20141114: add anno, gene all.genes, add DNA blast searches, and RNA-seq bam reads extraction
# all.proteins: protein IDs with gene names as names
# setwd(root)
# 20141215: modify RORA to RORA.iteration
# dir.create(gene.range[3])
# setwd(gene.range[3])
# 20160805: add intergenic.evidence
if (RORA.iteration>0)
system('VBoxHeadless -s BioLinux7&')
# system('VBoxManage controlvm BioLinux7 poweroff')
if (!is.null(all.proteins)){
genes = intersect2(sub("transcript:","",all.proteins), rownames(prot.seq))
}else{
genes = intersect(s2d$all.gene.anno, rownames(prot.seq)) # 20141212
}
if (!is.null(s2d)){
CS = round(s2d$CS[genes],2); express = round(s2d$mu[genes],2)
}else{
CS <- express <- rep('|', times = length(genes)); names(CS) <- names(express) <- genes;
}
# gene2protein = learn.gff.ID.mapping(genes, )
if ('anno' %in% colnames(prot.seq)){
desc = prot.seq[genes, 'anno'];
}else{
desc = '';
}
desc = sub('^.*amino acids\\) (.+)$', '\\1', desc)
# extract gff sub set
locs = geneRanges2ntRanges(anno, gene.range, extra.nt)
gff.sub.file = paste(tag, '.gff', sep='')
gff.subset(gff.file, locs, out.file=gff.sub.file, format = 'gff3', shift = F)
# get blast result
fasta.file = paste(tag, '.fasta', sep='');
prot.seq = prot.seq[genes, ]; prot.seq[, 'name'] = names(genes); rownames(prot.seq) = names(genes)
write.fasta(prot.seq, fasta.file)
if (is.null(ipr.anno) || length(ipr.anno)==0 || is.na(ipr.anno)){
ipr.anno = vector('character', length = length(genes))
names(ipr.anno) = genes
}
out = cbind('protein seq' = prot.seq[names(genes), 'seq'], name=prot.seq[names(genes), 'name'], length = sapply(prot.seq[names(genes), 'seq'],nchar), Existing.Anno = desc, domains = ipr.anno[genes], CS = CS, express = express)
if (do.blast){
if (RORA.iteration >0){
no.top.hits1 = 100L
}else{
no.top.hits1 = no.top.hits
}
no.top.hits2 = 100000L
Sys.setenv(BLASTDB='/Users/yongli/Universe/data/blastdb/')
blastp.asn.file = paste(tag, '_swissprot.asn', sep='');
blastp.xml.file = paste(tag, '_swissprot.xml', sep='');
blastp.hitList = paste(tag, '_swissprot.list', sep='')
blastp.hitTab = paste(tag, '_swissprot.tab', sep='')
blastp.hitFasta = paste(tag,'_blastp.fasta', sep='')
if (!file.exists(blastp.xml.file) | RORA.iteration>0){
cat('Blast seaerch', tag)
system(paste('blastp -query', fasta.file, '-num_threads 6 -db ', swiss.db, '-outfmt 11 -out', blastp.asn.file, '-evalue 1 -max_target_seqs ', no.top.hits1))
system(paste('blast_formatter -archive', blastp.asn.file, '-outfmt 5 -out', blastp.xml.file, '-max_target_seqs ', no.top.hits))
}
system(paste('blast_formatter -archive', blastp.asn.file, '-outfmt \'6 sseqid\' -out', blastp.hitList, '-max_target_seqs ', no.top.hits1))
# system(paste('formatting.pl -idlist ', blastp.hitList, ' -input ', swiss.fasta.file, ' -o ', blastp.hitFasta, sep=''))
# system(paste('blast_formatter -archive', blastp.asn.file, '-outfmt \'6 qseqid qframe qstart qend evalue qseq sseq sseqid sstart send\' -out', blastp.hitTab, '-max_target_seqs ', no.top.hits2))
# DNA-blast search ## get DNA sequence and perform DNA blast
DNA.seq = getDNA.subseq(DNA.fasta.file, locs = locs)
DNA.sub.fasta.file = paste(tag, 'DNA_subseq.fasta', sep='')
blastx.asn.file = paste(tag, 'DNA_subseq.swissprot.asn', sep='')
blastx.xml.file = paste(tag, 'DNA_subseq.swissprot.xml', sep='')
blastx.hitList = paste(tag, 'DNA_subseq.swissprot.list', sep='')
blastx.hitTab = paste(tag, 'DNA_subseq.swissprot.tab', sep='')
blastx.hitFasta = paste(tag,'_blastx.fasta', sep='')
blast.hitList = paste(tag, 'match.list', sep='')
blast.hitFasta = paste(tag,'match.fasta', sep='')
blast.AspG.asn.file = paste(tag, 'DNA_subseq.AspGenomes.asn', sep='')
blast.AspG.xml.file = paste(tag, 'DNA_subseq.AspGenomes.xml', sep='')
export(DNA.seq, con = DNA.sub.fasta.file, format = 'fasta')
cat('Genome Blast seaerch', tag, ' ',swiss.db,'\n')
Sys.setenv(BLASTDB='/Users/yongli/Universe/data/blastdb/')
if (!file.exists(blastx.xml.file) & !is.null(swiss.db) & RORA.iteration>0){
#if (!file.exists(blastx.asn.file))
system(paste('blastx -query', DNA.sub.fasta.file, '-db', swiss.db, '-num_threads 6 -outfmt 11 -out', blastx.asn.file, '-evalue 1 -max_target_seqs ', no.top.hits2))
#if (!file.exists(blastx.xml.file))
system(paste('blast_formatter -archive', blastx.asn.file, '-outfmt 5 -out', blastx.xml.file, '-max_target_seqs ', no.top.hits2))
# swissSeq = read.fasta(fasta.files = swiss.fasta.file, type = 'AA')
# swissHits = unique(read.table(blastx.hitList, header=F, as.is=T)[,1]);
# system(paste('cdbfasta ',swiss.fasta.file))
system(paste('blast_formatter -archive', blastx.asn.file, '-outfmt \'6 sseqid\' -out', blastx.hitList, '-max_target_seqs ', no.top.hits2))
# system(paste('formatting.pl -idlist ', blastx.hitList, ' -input ', swiss.fasta.file, ' -o ', blastx.hitFasta, sep=''))
# system(paste('blast_formatter -archive', blastx.asn.file, '-outfmt \'6 qseqid qframe qstart qend evalue qseq sseq sseqid sstart send\' -out', blastx.hitTab, '-max_target_seqs ', no.top.hits2))
# system(paste('rm ', blastx.asn.file)) # 20141125
system(paste('cat ', blastx.hitList, ' ', blastp.hitList, ' > ', blast.hitList, sep=''))
system(paste('formatting.pl -idlist ', blast.hitList, ' -input ', swiss.fasta.file, ' -o ', blast.hitFasta, sep=''))
}
if (do.tblastx){
cat('Genome Blast seaerch', tag, ' genome.db\n')
if (!file.exists(blast.AspG.xml.file) & !is.null(genome.db)){
#if (!file.exists(blast.AspG.asn.file))
system(paste('tblastx -query', DNA.sub.fasta.file, '-db', genome.db, '-num_threads 6 -outfmt 11 -out', blast.AspG.asn.file, '-evalue 1 -max_target_seqs ', no.top.hits2))
#if (!file.exists(blast.AspG.xml.file))
system(paste('blast_formatter -archive', blast.AspG.asn.file, '-outfmt 5 -out', tblastx.hitList, '-max_target_seqs ', no.top.hits2))
# system(paste('rm ', blast.AspG.asn.file)) # 20141125
}
}
}
######### match predicted proteins with existing protein models and renames predicted genes
score.file = ''
for (iteration in seq2(1,RORA.iteration,1)){
# extract bam
if (!is.null(bam.file) & extract.bam){
bam.out.file = bam.extract.shift(bam.file, locs, tag, shift=F)
}
# protein evidences: http://bioinf.uni-greifswald.de/bioinf/wiki/pmwiki.php?n=Augustus.IncorporateProteins
# using protein profiles --proteinprofile=filename: http://bioinf.uni-greifswald.de/augustus/binaries/tutorial/ppx.html
# RNA-tophat evidence: http://bioinf.uni-greifswald.de/bioinf/wiki/pmwiki.php?n=IncorporatingRNAseq.Tophat
# EST hits: http://www.ncbi.nlm.nih.gov/pmc/articles/PMC1810548/
# ESTs or assembled RNAseq transcripts: http://bioinf.uni-greifswald.de/bioinf/wiki/pmwiki.php?n=Augustus.IncorporateESTs
# Conservation:
## old approach AGRIPPA, http://www.ncbi.nlm.nih.gov/pmc/articles/PMC1810548/
if (iteration == 1){
######### proMap scoring orignal proteins
pMap = blast2profile.PP(blast.asn.file = blastp.asn.file,
query.gff.file = gff.sub.file,
query.faa.file = fasta.file,
DNA.fasta.file = DNA.fasta.file,
geneID2cdsID=geneID2cdsID,
multialn.method = multialn.method, plot.width = 50, plotLogo =plotLogo,
db = swiss.fasta.file)
nSeq.file = paste('pMap_nSeq_', tag, '_', '', '.faa', sep='')
nSeq.naive.file = paste('pMap_nSeqNaive_', tag, '_', '', '.faa', sep='')
cSeq.long.file = paste('pMap_cSeqLong_', tag, '_', '', '.faa', sep='')
proMap.hint.file = paste(tag, '_proMap2hints.gff', sep='')
proMap.hint.all.file = paste(tag, '_proMap2hints_all.gff', sep='')
score.file = write.proMap(pMap, nSeq.file = nSeq.file, nSeq.naive.file = nSeq.naive.file, cSeq.long.file = cSeq.long.file, tag = tag, append=F, iteration = '')
proMap2hints(pMap, gff.file = gff.sub.file, out.file = proMap.hint.all.file, geneID2cdsID=geneID2cdsID, append=F, version = version)
proMap.Mosaichint.file = paste(tag, 'exonerate.nSeq.hints.gff', sep='')
system(paste('exonerate --model protein2genome --showtargetgff T -q ', nSeq.file, ' -t ', DNA.sub.fasta.file, ' > exonerate.nSeq.out', sep=''))
system(paste('exonerate2hints.pl --minintronlen=',minintronlen, ' --maxintronlen=', maxintronLen, ' --in=exonerate.nSeq.out --source=P --out=exonerate.hints', sep=''))
gff.unshift('exonerate.hints', proMap.Mosaichint.file)
system(paste('cat ', proMap.Mosaichint.file, ' >> ', proMap.hint.all.file, sep=''))
# proMap.hintNaive.file = paste(tag, 'exonerate.nSeqNaive.hints.gff', sep='')
# system(paste('exonerate --model protein2genome --showtargetgff T -q ', nSeq.naive.file, ' -t ', DNA.sub.fasta.file, ' > exonerate.nSeqNaive.out', sep=''))
# system(paste('exonerate2hints.pl --minintronlen=',minintronlen, ' --maxintronlen=', maxintronLen, ' --in=exonerate.nSeqNaive.out --source=M --out=exonerate.hints', sep=''))
# gff.unshift('exonerate.hints', proMap.hintNaive.file)
#
# proMap.hintcSeqLong.file = paste(tag, 'exonerate.cSeqLong.hints.gff', sep='')
# system(paste('exonerate --model protein2genome --showtargetgff T -q ', cSeq.long.file, ' -t ', DNA.sub.fasta.file, ' > exonerate.cSeqLong.out', sep=''))
# system(paste('exonerate2hints.pl --minintronlen=',minintronlen, ' --maxintronlen=', maxintronLen, ' --in=exonerate.cSeqLong.out --source=M --out=exonerate.hints', sep=''))
# gff.unshift('exonerate.hints', proMap.hintcSeqLong.file)
# system(paste('/usr/local/bin/python ~/Universe/code/python/gff2other.py -g', gff.sub.file, '-f', DNA.sub.fasta.file, '-k genbank -s _A_',sep=' '))
chrseq.file = extra.chr(DNA.fasta.file, locs[,1]) # extract chromosome sequence
#out.folder = sub('/Users/yongli/Universe/', 'Universe/', getwd())
out.folder = sub('/Users/yongli/', 'yongli/', getwd())
############ repeatmasker evidences
repeatmasker.hint.file = paste(tag, 'rpeatmasker.gff', sep='')
system(paste('repeatmasker ', DNA.sub.fasta.file, sep=''))
system(paste('cat ', DNA.sub.fasta.file, '.out | tail -n +3 | perl -ne \'chomp; next if (/^\\s*$/); s/^\\s+//; @t = split(/\\s+/);print $t[4]."\\t"."repmask\\tnonexonpart\\t".$t[5]."\\t".$t[6]."\\t0\\t.\\t.\\tsrc=RM\\n";\' | sort -n -k 1,1 > ', repeatmasker.hint.file, sep=''))
gff.unshift(repeatmasker.hint.file, gff.out.file = repeatmasker.hint.file)
############ denovo predictions
auguNovoAll.file = paste(tag, '_augoNovoAll.gff', sep='')
auguNovoTop.file = paste(tag, '_augoNovoTop.gff', sep='')
system(paste('sshpass -p abcd ssh fuga@192.168.56.110 \'cd ', out.folder, '; augustus --stopCodonExcludedFromCDS=false --sample=300 --predictionStart=', locs[,2], ' --predictionEnd=', locs[,3], ' --singlestrand=false --species=', species, ' --extrinsicCfgFile=~/',out.folder,'/extrinsic.cfg --alternatives-from-evidence=true --alternatives-from-sampling=true --minexonintronprob=0.08 --minmeanexonintronprob=0.3 --maxtracks=100 --gff3=on --genemodel=complete ', chrseq.file, ' > ', auguNovoAll.file, '\'', sep=''))
system(paste('sshpass -p abcd ssh fuga@192.168.56.110 \'cd ', out.folder, '; augustus --stopCodonExcludedFromCDS=false --sample=300 --predictionStart=', locs[,2], ' --predictionEnd=', locs[,3], ' --singlestrand=false --species=', species, ' --extrinsicCfgFile=~/', out.folder, '/extrinsic.cfg --alternatives-from-evidence=false --alternatives-from-sampling=false --minexonintronprob=0.08 --minmeanexonintronprob=0.3 --maxtracks=100 --gff3=on --genemodel=complete ', chrseq.file, ' > ', auguNovoTop.file, '\'', sep=''))
gff.match(gff.file = auguNovoAll.file, gff.reference = gff.sub.file, tag = '', match.by = gene.definition) #, geneID2cdsID=geneID2cdsID); # change gene names
gff.match(gff.file = auguNovoTop.file, gff.reference = gff.sub.file, tag = '', match.by = gene.definition) #, geneID2cdsID=geneID2cdsID); # change gene names
######### proMap scoring of de novo proteins
if (RORA.topOnly){
auguNovo.file = auguNovoTop.file
}else{
auguNovo.file = auguNovoAll.file
}
system(paste('getAnnoFasta.pl --seqfile=', chrseq.file, ' ', auguNovo.file, sep=''))
cds.seq.file = sub('.gff', '.codingseq', auguNovo.file);
fasta.file = sub('.gff', '.aa', auguNovo.file);
translate.fasta(CDS.file=cds.seq.file, pep.file=fasta.file); #
blastp.asn.file = sub('.gff', '.asn', auguNovo.file);
cat('Blast seaerch of Augustus de novo proteins')
system(paste('blastp -query', fasta.file, '-num_threads 6 -db ', swiss.db,' -outfmt 11 -out', blastp.asn.file, '-evalue 1 -max_target_seqs ', no.top.hits1))
system(paste('blast_formatter -archive', blastp.asn.file, '-outfmt \'6 sseqid\' -out', blastp.hitList, '-max_target_seqs ', no.top.hits1))
system(paste('cat ', blastp.hitList, ' >> ', blast.hitList, sep='')) # add but not replacing
pMap = blast2profile.PP(blast.asn.file = blastp.asn.file,
query.gff.file = auguNovo.file,
query.faa.file = fasta.file,
DNA.fasta.file = DNA.fasta.file,
geneID2cdsID=function(x){paste(x, '.cds', sep='')},
# geneID2cdsID=geneID2cdsID,
multialn.method = multialn.method, plot.width = 50, plotLogo =plotLogo, iteration = paste('', sep=''),
db = swiss.fasta.file)
nSeq.file = paste('pMap_nSeq_', tag, '_', 'deNovo', '.faa', sep='')
nSeq.naive.file = paste('pMap_nSeqNaive_', tag, '_', 'deNovo', '.faa', sep='')
cSeq.long.file = paste('pMap_cSeqLong_', tag, '_', 'deNovo', '.faa', sep='')
score.file = write.proMap(pMap, nSeq.file = nSeq.file, nSeq.naive.file = nSeq.naive.file, cSeq.long.file = cSeq.long.file, tag = tag, append=T, iteration = 'deNovo')
proMap2hints(pMap, gff.file = auguNovo.file, out.file = proMap.hint.file, geneID2cdsID=function(x){paste(x, '.cds', sep='')}, version = version)
system(paste('cat ', proMap.hint.file, ' >> ', proMap.hint.all.file, sep=''))
proMap.Mosaichint.file = paste(tag, 'exonerate.nSeq.hints','deNovo', '.gff', sep='')
system(paste('exonerate --model protein2genome --showtargetgff T -q ', nSeq.file, ' -t ', DNA.sub.fasta.file, ' > exonerate.nSeq.out', sep=''))
system(paste('exonerate2hints.pl --minintronlen=',minintronlen, ' --maxintronlen=', maxintronLen, ' --in=exonerate.nSeq.out --source=M --out=exonerate.hints', sep=''))
gff.unshift('exonerate.hints', proMap.Mosaichint.file)
system(paste('cat ', proMap.Mosaichint.file, ' >> ', proMap.hint.all.file, sep=''))
}
# cdbfasta protein.fa
# cdbfasta genome.fa
# cat cAfu3g01400_Afu3g01480tblastn.out | allBlastMatches_ncbi-blast.pl > tblastn.matches
# cat tblastn.matches | perl -e 'while(<>){split; if ($q eq $_[0]){$t .= "\t$_[1]"} else {print "$q$t\n"; $t="\t$_[1]";$q=$_[0];}} print "$q$t\n";' > tblastn.matchlists
##################### protein hints by exonerate, using all hits proteins
system(paste('formatting.pl -idlist ', blast.hitList, ' -input ', swiss.fasta.file, ' -o ', blast.hitFasta, sep='')) # prepare all blastx and blastp hits for next rounds of exonerate, 20141216
exonerate.hint.file = paste(tag, 'exonerate.hints.gff', sep='')
system(paste('exonerate --model protein2genome --showtargetgff T -q ', blast.hitFasta, ' -t ', DNA.sub.fasta.file, ' > exonerate.out', sep=''))
system(paste('exonerate2hints.pl --minintronlen=', minintronlen, ' --maxintronlen=', maxintronLen, ' --in=exonerate.out --source=P --out=exonerate.hints', sep=''))
gff.unshift('exonerate.hints', exonerate.hint.file)
##################### iteration 1
all.hints.file = paste(tag, 'all.hints', iteration, sep='')
if (!is.null(bam.file) & extract.bam){
if (iteration > 1){
system(paste('cat ', proMap.hint.all.file, ' ', exonerate.hint.file, ' ', auguHintsAll.file, ' ', auguNovoAll.file, ' all.hints | grep -e \'\tintron\t\' > newIntrons.gff', sep=''))
system(paste('cat newIntrons.gff | perl -ne \'@array = split(/\\t/, $_);print "$array[0]:$array[3]-$array[4]\\n";\'| sort -u > introns.lst', sep=''))
system(paste('/Users/yongli/Universe/ubuntu_bin/augustus-3.0.3/scripts/intron2exex.pl --flank=100 --introns=introns.lst --seq=', chrseq.file, ' --exex=exex.fa --map=map.psl', sep=''))
system(paste('bowtie2-build exex.fa ', tag, '_exex1', sep = ''))
# remapping using unmapped reads
unmapped.fastq.file = sub('bam', 'fastq', unmapped.bam.file)
if (!file.exists(unmapped.fastq.file)){
system(paste('samtools bam2fq -O ', unmapped.bam.file, ' > ', unmapped.fastq.file, sep=''))
}
system(paste('bowtie2 --no-unal -p 6 -x ', tag, '_exex1 -U', unmapped.fastq.file, ' -S bowtieNewIntrons.sam', sep='')) # mapping to the junctions, keep only mapped reads
# system('samtools view -S -F 4 bowtieNewIntrons1.sam > bowtieNewIntrons.F.sam') # filter to keep mapped reads
system('samMap.pl bowtieNewIntrons.sam map.psl 100 > bowtie.global.sam')
system('cat header.txt bowtie.global.sam > bowtie.global.h.sam')
system('samtools view -bS -o bowtie.global.h.bam bowtie.global.h.sam')
# join bam files
system(paste('samtools merge -f both.bam bowtie.global.h.bam ', bam.out.file, sep=''))
system(paste('samtools sort -n both.bam tmp', iteration, sep=''))
# system('bam2hints --intronsonly --in=both.ssf.bam --out=hints.2.gff')
system(paste('filterBam --uniq --in tmp', iteration, '.bam --out tmp', iteration, '_f.bam', sep=''))
system(paste('samtools sort tmp', iteration, '_f.bam tmp', iteration, '_sf', sep=''))
}else{
system(paste('filterBam --uniq --in sorted_', bam.out.file, ' --out tmp', iteration, '_f.bam', sep=''))
system(paste('samtools view -H tmp', iteration, '_f.bam > header.txt', sep=''))
system(paste('samtools sort tmp', iteration, '_f.bam tmp', iteration, '_sf', sep=''))
}
hintIntron.file = paste(tag, '_hints_intron',iteration, '.gff', sep='')
RNAseq.hint.file = paste(tag, '_RNAseqhints',iteration, '.gff', sep='')
# exon parts hints from RNA-seq
if (use.RNAseq.exonpart){
system(paste('bam2hints --trunkSS --remove_redundant --minintronlen=',minintronlen, ' --maxintronlen=', maxintronLen, ' --in=tmp_sf.bam --out=', RNAseq.hint.file, sep=''))
#DNA.size.file = paste(DNA.fasta.file, 'chrSize.tab', sep='')
#system(paste('faSize -detailed -tab ', DNA.fasta.file, ' > ', DNA.size.file, sep=''))
#system(paste('bam2bigWig bam tmp2_sf ', DNA.size.file, sep=''))
## system('bam2wig bam tmp_sf')
#system('cat tmp_sf.wig | wig2hints.pl --width=10 --margin=10 --minthresh=2 --minscore=4 --prune=0.1 --src=W --type=ep --UCSC=unstranded.track --radius=4.5 --pri=4 --strand="." > hints.ep.gff')
#system(paste('cat hints.ep.gff ', RNAseq.hint.file,' > hints.tmp', sep=''))
#system(paste('mv hints.tmp ', RNAseq.hint.file, sep=''))
}else{
system(paste('bam2hints --intronsonly --trunkSS --minintronlen=',minintronlen, ' --maxintronlen=', maxintronLen, ' --in=tmp_sf.bam --out=', RNAseq.hint.file, sep=''))
}
# system(paste('cat ', exonerate.hint.file, hintIntron.file, ' > all.hintsIntron', sep=' '))
system(paste('cat ', repeatmasker.hint.file, ' ', proMap.hint.all.file, ' ', exonerate.hint.file, ' ', RNAseq.hint.file, ' > ', all.hints.file, sep=''))
}else{
system(paste('cat ', repeatmasker.hint.file, ' ', proMap.hint.all.file, ' ', exonerate.hint.file, ' > ', all.hints.file, sep=''))
}
#################### prediction based on all hints combined
auguHintsAll.file = paste(tag, '_augoHintsAll',iteration, '.gff', sep='')
auguHintsTop.file = paste(tag, '_augoHintsTop', iteration, '.gff', sep='')
# auguHintsIntron.file = paste(tag, '_augoHintsIntron',iteration, '.gff', sep='')
system(paste('sshpass -p abcd ssh fuga@192.168.56.110 \'cd ', out.folder, '; augustus --stopCodonExcludedFromCDS=false --sample=300 --predictionStart=', locs[,2], ' --predictionEnd=', locs[,3], ' --singlestrand=false --species=', species, ' --extrinsicCfgFile=~/', out.folder, '/extrinsic.cfg --alternatives-from-evidence=true --alternatives-from-sampling=true --minexonintronprob=0.08 --minmeanexonintronprob=0.3 --maxtracks=100 --hintsfile=',all.hints.file, ' --allow_hinted_splicesites=atac --introns=on --gff3=on --genemodel=complete ', chrseq.file, ' > ', auguHintsAll.file, '\'', sep=''))
# system(paste('sshpass -p abcd ssh fuga@192.168.56.110 \'cd ', out.folder, '; augustus --stopCodonExcludedFromCDS=false--sample=300 --predictionStart=', locs[,2], ' --predictionEnd=', locs[,3], ' --singlestrand=false --species=', species, ' --extrinsicCfgFile=extrinsic.cfg --alternatives-from-evidence=true --alternatives-from-sampling=true --minexonintronprob=0.05 --minmeanexonintronprob=0.3 --maxtracks=100 --hintsfile=all.hintsIntron', ' --allow_hinted_splicesites=atac --introns=on --gff3=on --genemodel=complete ', chrseq.file, ' > ', auguHintsIntron.file, '\'', sep=''))
system(paste('sshpass -p abcd ssh fuga@192.168.56.110 \'cd ', out.folder, '; augustus --stopCodonExcludedFromCDS=false --sample=300 --predictionStart=', locs[,2], ' --predictionEnd=', locs[,3], ' --singlestrand=false --species=', species, ' --extrinsicCfgFile=~/', out.folder, '/extrinsic.cfg --alternatives-from-evidence=false --alternatives-from-sampling=false --minexonintronprob=0.08 --minmeanexonintronprob=0.3 --maxtracks=100 --hintsfile=',all.hints.file, ' --allow_hinted_splicesites=atac --introns=on --gff3=on --genemodel=complete ', chrseq.file, ' > ', auguHintsTop.file, '\'', sep=''))
gff.match(gff.file = auguHintsAll.file, gff.reference = gff.sub.file, tag = '', match.by = gene.definition) #, geneID2cdsID=geneID2cdsID); # change gene names
gff.match(gff.file = auguHintsTop.file, gff.reference = gff.sub.file, tag = '', match.by = gene.definition) #, geneID2cdsID=geneID2cdsID); # change gene names
#################### proMap scoring and generate new hints
if (RORA.topOnly){
auguHints.file = auguHintsTop.file
}else{
auguHints.file = auguHintsAll.file
}
system(paste('getAnnoFasta.pl --seqfile=', DNA.fasta.file, ' ', auguHints.file, sep=''))
cds.seq.file = sub('.gff', '.codingseq', auguHints.file);
fasta.file = sub('.gff', '.aa', auguHints.file);
translate.fasta(CDS.file=cds.seq.file, pep.file=fasta.file); #
blastp.asn.file = sub('.gff', '.asn', auguHints.file);
cat('Blast seaerch of iteration ',iteration, ' predictions', tag)
system(paste('blastp -query', fasta.file, '-num_threads 6 -db ', swiss.db, ' -outfmt 11 -out', blastp.asn.file, '-evalue 1 -max_target_seqs ', no.top.hits1))
system(paste('blast_formatter -archive', blastp.asn.file, '-outfmt \'6 sseqid\' -out', blastp.hitList, '-max_target_seqs ', no.top.hits1))
system(paste('cat ', blastp.hitList, ' >> ', blast.hitList, sep='')) # add but not replacing
pMap = blast2profile.PP(blast.asn.file = blastp.asn.file,
query.gff.file = auguHints.file,
query.faa.file = fasta.file,
DNA.fasta.file = DNA.fasta.file,
geneID2cdsID=function(x){paste(x, '.cds', sep='')},
# geneID2cdsID=geneID2cdsID,
multialn.method = multialn.method, plot.width = 50, plotLogo =plotLogo, iteration = paste('iter', iteration, sep=''),
db = swiss.fasta.file)
nSeq.file = paste('pMap_nSeq_', tag, '_', iteration, '.faa', sep='')
nSeq.naive.file = paste('pMap_nSeqNaive_', tag, '_', iteration, '.faa', sep='')
cSeq.long.file = paste('pMap_cSeqLong_', tag, '_', iteration, '.faa', sep='')
score.file = write.proMap(pMap, nSeq.file = nSeq.file, nSeq.naive.file = nSeq.naive.file, cSeq.long.file = cSeq.long.file, tag = tag, append=T, iteration = iteration)
proMap2hints(pMap, gff.file = auguHints.file, out.file = proMap.hint.file,
log.file = paste('log', tag, '.txt', sep=''), geneID2cdsID=function(x){paste(x, '.cds', sep='')}, version = version)
system(paste('cat ', proMap.hint.file, ' >> ', proMap.hint.all.file, sep=''))
proMap.Mosaichint.file = paste(tag, 'exonerate.nSeq.hints',iteration, '.gff', sep='')
proMap.hintNaive.file = paste(tag, 'exonerate.nSeqNaive.hints',iteration, '.gff', sep='')
proMap.hintcSeqLong.file = paste(tag, 'exonerate.cSeqLong.hints',iteration, '.gff', sep='')
system(paste('exonerate --model protein2genome --showtargetgff T -q ', nSeq.file, ' -t ', DNA.sub.fasta.file, ' > exonerate.nSeq.out', sep=''))
system(paste('exonerate2hints.pl --minintronlen=',minintronlen, ' --maxintronlen=', maxintronLen, ' --in=exonerate.nSeq.out --source=M --out=exonerate.hints', sep=''))
gff.unshift('exonerate.hints', proMap.Mosaichint.file)
system(paste('cat ', proMap.Mosaichint.file, ' >> ', proMap.hint.all.file, sep=''))
# system(paste('exonerate --model protein2genome --showtargetgff T -q ', nSeq.naive.file, ' -t ', DNA.sub.fasta.file, ' > exonerate.nSeqNaive.out', sep=''))
# system(paste('exonerate2hints.pl --minintronlen=',minintronlen, ' --maxintronlen=', maxintronLen, ' --in=exonerate.nSeqNaive.out --source=M --out=exonerate.hints', sep=''))
# gff.unshift('exonerate.hints', proMap.hintNaive.file)
#
# system(paste('exonerate --model protein2genome --showtargetgff T -q ', cSeq.long.file, ' -t ', DNA.sub.fasta.file, ' > exonerate.cSeqLong.out', sep=''))
# system(paste('exonerate2hints.pl --minintronlen=',minintronlen, ' --maxintronlen=', maxintronLen, ' --in=exonerate.cSeqLong.out --source=M --out=exonerate.hints', sep=''))
# gff.unshift('exonerate.hints', proMap.hintcSeqLong.file)
}
if (score.file != '' && file.exists(score.file))
select.CDS(score.file) # chose the top 2 gene models
# blastp.xml.file = sub('.gff', '.xml', auguHints2top.file);
# system(paste('blast_formatter -archive', blastp.asn.file, '-outfmt 5 -out', blastp.xml.file, '-max_target_seqs ', no.top.hits))
# system(paste('sshpass -p abcd ssh fuga@192.168.56.110 \'poweroff\''))
## read alignment results
if (blastp.xml.file != '' & file.exists(blastp.xml.file)){
blast.out = blast.xml.parse(blast.xml = blastp.xml.file, no.top.hits = no.top.hits)
top.species = sub('^.+\\[([^\\[\\]]+)\\].?$','\\1', blast.out$query$Top_nonself_Hit_def, perl=T)
names(top.species) = rownames(blast.out$query)
out = cbind(out, Top_nonself_Hit_species = top.species[names(genes)], blast.out$query[names(genes), c(8,16,19)])
add.names = c('cluster', 'RNA-seq reads', 'No.introns', 'intron anno by RNA-seq', 'inron anno by orthologs',
'conclusion', 'new.protein.seq');
}else{
add.names = c('Top_nonself_Hit_species','Top_nonself_Hit_accession', 'Top_nonself_Hit_identity.percent', 'top.3.hits',
'cluster', 'RNA-seq reads', 'No.introns', 'intron anno by RNA-seq', 'inron anno by orthologs',
'conclusion', 'new.protein.seq');
}
out = cbind(out, matrix('|', nrow = nrow(out),
ncol=length(add.names), dimnames = list(names(genes), add.names)))
out = as.matrix(out)
out[is.na(out)] = '|';
invisible(out)
}
select.CDS <- function(score.file = 'pMapcJL1All.xls'){
# select canidate CDSs based on score and rank and high light them in sorted pMap output file
# Yong Fuga Li, 20150108
require('xlsx')
s = read.table(score.file, header = T, sep = '\t')
# score = as.numeric(s$pHMM.score); score[is.na(score)] = 0
CDS.rank = regexpr.match('^[^\\.]+(?:\\.t([^\\.]+))?(?:\\.[^\\.]+)?$', s$X, perl=T)[,1]
CDS.rank[CDS.rank==''] = '1';
CDS.rank = as.numeric(CDS.rank)
iter = as.character(s$iteration); iter[iter=='deNovo'] = 0; iter[iter==''] = -1; iter = as.numeric(iter)
s = sort.by(s, cbind(CDS.rank, iter))
gene.ID = regexpr.match('^([^\\.]+)(?:\\..+)?$', s$X, perl=T)[,1]
s = sort.by(s, gene.ID)
if (1){ # 20160613 - sort by gene locations
gene.ID = regexpr.match('^([^\\.]+)(?:\\..+)?$', s$X, perl=T)[,1]
gID.first = which.first.by(gene.ID)
gfrom = as.numeric(as.character(s$from))[gID.first]; # 20160613 - sort by gene locations
names(gfrom) = gene.ID[gID.first];
gfrom = gfrom[gene.ID];
s = sort.by(s, gfrom)
}
score = as.numeric(s$pHMM.score); score[is.na(score)] = 0
gene.ID = regexpr.match('^([^\\.]+)(?:\\..+)?$', s$X, perl=T)[,1]
CDS.rank = regexpr.match('^[^\\.]+(?:\\.t([^\\.]+))?(?:\\.[^\\.]+)?$', s$X, perl=T)[,1]
CDS.rank[CDS.rank==''] = '1'
CDS.rank = as.numeric(CDS.rank)
iter = as.character(s$iteration); iter[iter=='deNovo'] = 0; iter[iter==''] = -1; iter = as.numeric(iter)
# suggest the two best candidates 1) 2nd iter top -- iff score + 3 > top score, otherwise chose the one with top score;
deta.score = 3;
i.candidate = unlist.dupenames(by(1:nrow(s), INDICES = gene.ID, FUN = function(x){i = which((iter[x] %in% c(max(iter[x]), -1)) & CDS.rank[x]==1);
score1 = score[x];
s.max = max(score1)
i.max = which(score1==s.max);
i = unique(c(i[score1[i] + deta.score >= s.max], i.max))
return(i.select=x[i])})) # candidates: with max score, or close to max && is original/last iteration of CDS prediction
i.select = unlist.dupenames(by(1:nrow(s), INDICES = gene.ID, FUN = function(x){max.iter = max(iter[x]);
i = which((iter[x] %in% c(max.iter, -1)) & CDS.rank[x]==1);
score1 = score[x];
s.max = max(score1)
i.max = which(score1==s.max);
i = unique(c(i[score1[i] + deta.score >= s.max], i.max));
i.max.iter = intersect(i, which(iter[x]==max.iter)) # final augustus prediction
i.original = intersect(i, which(iter[x]==-1)) # original gene
if (length(i.original)>0)
i = i.original
else if (length(i.max.iter)>0)
i = i.max.iter
else
i = i[which.min((CDS.rank[x])[i[(iter[x])[i]==max((iter[x])[i])]])] # the one with highest augustus probability in the last iteration in the candidates
return(i.select=(x[i])[1])})) # candidates: with max score, or close to max && is original/last iteration of CDS prediction
i.max = which.max.tie.by(score, by = gene.ID)
score.improvement.select = sapply(1:length(i.select), function(x){i = (iter == -1 & gene.ID == names(i.select[x])); if (!any(i)) d = 'NA' else d = round(score[i.select[x]] - unique(score[i],3))})
score.improvement.max = sapply(1:length(i.max), function(x){i = (iter == -1 & gene.ID == names(i.max[x])); if (!any(i)) d = 'NA' else d = round(score[i.max[x]] - unique(score[i],3))})
score.improvement.candidate = sapply(1:length(i.candidate), function(x){i = (iter == -1 & gene.ID == names(i.candidate[x])); if (!any(i)) d = 'NA' else d = round(score[i.candidate[x]] - unique(score[i],3))})
### write.xlsx file with proper highlighting
# high light 1) the promising and 2) the max scored CDS
score.file = sub(pattern = '^.+\\/([^\\/]+)$', replacement = '\\1', score.file)
out.file = paste('pretty_', sub('\\.[^\\.]*$','.xlsx', score.file), sep='')
names(s)[1] = 'ID'
s$candidates = ''; s$max.scored = ''; s$selected = '';
s$score.improvement = ''; # 20150123
s$candidates[i.candidate] = 'Yes'; s$max.scored[i.candidate] = 'Yes'; s$selected[i.select] = 'Yes'; s$score.improvement[i.select] = score.improvement.select; s$score.improvement[i.max] = score.improvement.max; s$score.improvement[i.candidate] = score.improvement.candidate
s$pHMM.Evalue[s$pHMM.Evalue == Inf] = 1
write.xlsx2(s, out.file, row.names = F, showNA = F)
xlsx.color(xlsx.file = out.file, include.header=T, FUN.select = function(x){y = matrix(T, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x)); y},
font=list(color = NULL, heightInPoints=12, name='Calibri', isItalic=F, isBold=F, isStrikeout=F, underline=NULL),
out.file = out.file, na.strings='|') # change global style
CDS.groups = cbind(unlist(as.list(by(1:length(gene.ID), gene.ID, FUN = min))), unlist(as.list(by(1:length(gene.ID), gene.ID, FUN = max)))); # group CDS by gene IDs
xlsx.color(xlsx.file = out.file, row.groups = CDS.groups, out.file=out.file) # frame to indicate the genes
xlsx.color(xlsx.file = out.file, FUN.select = function(x){y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
y[i.candidate, ] = T
return(y)}, fill.color = 'green', out.file = out.file, na.strings='|')
xlsx.color(xlsx.file = out.file, FUN.select = function(x){y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
y[i.max, ] = T
return(y)},
font=list(color = NULL, isItalic=T, isBold=F, isStrikeout=F, underline=NULL), out.file = out.file, na.strings='|')
xlsx.color(xlsx.file = out.file, FUN.select = function(x){y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
y[i.select, ] = T
return(y)},
font=list(color = 'red', isItalic=F, isBold=T, isStrikeout=F, underline=NULL), out.file = out.file, na.strings='|')
write(paste('green: candidates - either with max phmm score or with scores no less than [max score] - ', deta.score, '
italic: max scored
bold red: selected - among the candidates,
if the original CDS model is no less than [max score] - 3, the original CDS is selected,
otherwise if the best model in the last iteration of augustus is the is no less than [max score] - 3, that is selected,
otherwise the highest iteration and highest augustus probability model among the candidates is selected', sep=''), file = 'readme_select_CDS.txt')
}
table.merge <- function(files = m[i,3], extra.columns = m[i,2:3], idx.keep = 1:17, file.format = 'xlsx', out.file = 'KU0011.merge.xls'){
# Yong Fuga Li, 20151011
# idx.keep, columns in the orginal table to keep
require(xlsx)
require(gdata)
dat.all = c()
for (i in 1:length(files)){
if (file.format == 'xlsx'){
x = read.xlsx2(files[i], 1)
}else{
x = read.table(files[i], sep='\t', header = T)
}
if (is.null(idx.keep))
idx.keep = 1:ncol(x)
extra = repmat(extra.columns[i,,drop=F]);
colnames(extra) = colnames(extra.columns)
dat.all = rbind(dat.all, cbind(x[,idx.keep], extra))
}
dat.all1 = as.matrix(dat.all)
rownames(dat.all1) = dat.all1[,1]
write.table(dat.all1[,2:ncol(dat.all1)], col.names=NA, sep = '\t', file = out.file)
}
select.CDS.multiModel <- function(re.cluster.model = 'pretty_pMapc(\\w{2}\\d{4})_(.*).xlsx$', # provide cluster ID and model species
all.files = list.files(pattern = re.cluster.model)) {
# select canidate CDSs based on score and rank and high light them in sorted pMap output file
# Yong Fuga Li, 20151011
# 20160804, add all.files
require('xlsx')
## group files
m = regexpr.match(txt = all.files, pat = re.cluster.model)
m = cbind(m, all.files)
m = m[m[,1]!='',,drop=F]
rownames(m)= m[,1]
colnames(m) = c('cluster', 'model', 'file')
# by(m[,2], m[,1], as.character)
# by(all.files, m[,1], as.character)
## merge files
idx = by(1:nrow(m), m[,1], identity)
for (i in names(idx)){
# print(i+1)
out.file = paste('pMapc', i, '.merge.xlx', sep='')
table.merge(files = m[idx[[i]],3], extra.columns = m[idx[[i]],2:3, drop=F], file.format = 'xlsx', out.file = out.file)
select.CDS(score.file = out.file)
}
}
blast.xml.parse <- function(blast.xml = 'AN8127-Alignment.xml', no.top.hits = 3,
query.self.min.identity = 0.95, query.self.len.diff = 10,
query.species = 'Aspergillus nidulans FGSC A4'){
# ref: https://stat.ethz.ch/pipermail/bioc-sig-sequencing/2010-September/001580.html
# http://rstudio-pubs-static.s3.amazonaws.com/12097_1352791b169f423f910d93222a4c2d85.html
# ref: blastSequences
# query.self.min.identity, query.self.len.diff, and query.species are used to define which hits are itential to query, and hence excluded from the output
# YF Li, 20140803
require(XML)
result <- xmlTreeParse(blast.xml, useInternalNodes=TRUE)
aa = xmlToList(result)
# read: iteration level
tags.query = c('Iteration_iter-num', 'Iteration_query-ID', 'Iteration_query-def', 'Iteration_query-len')
query = list();
for (t in tags.query)
query[[t]] = unlist(xpathApply(result, paste("//",t,sep=''), xmlValue))
query = data.frame(query,stringsAsFactors = F)
tags.query = make.names(tags.query)
query[[1]] = as.numeric(query[[1]]); query[[4]] = as.numeric(query[[4]]);
rownames(query) = query$Iteration_query.def
# hit level
tags.hit = c('Hit_num', 'Hit_id', 'Hit_def', 'Hit_accession', 'Hit_len')
hit = list();
for (t in tags.hit)
hit[[t]] = unlist(xpathApply(result, paste("//",t,sep=''), xmlValue))
hit = as.data.frame(hit,stringsAsFactors = F)
tags.hit = make.names(tags.hit)
hit[[1]] = as.numeric(hit[[1]]); hit[[5]] = as.numeric(hit[[5]]);
# hsp level
tags.hsp = c('Hsp_num', 'Hsp_bit-score','Hsp_score','Hsp_evalue','Hsp_query-from','Hsp_query-to',
'Hsp_hit-from','Hsp_hit-to','Hsp_query-frame','Hsp_hit-frame','Hsp_identity','Hsp_positive',
'Hsp_gaps','Hsp_align-len')
hsp = list();
for (t in tags.hsp)
hsp[[t]] = unlist(xpathApply(result, paste("//",t,sep=''), xmlValue))
hsp = as.data.frame(hsp,stringsAsFactors = F)
tags.hsp = make.names(tags.hsp)
hsp = data.frame(lapply(hsp, FUN = as.numeric))
# expand query
No.hits = xpathApply(result, "//Iteration_hits", function(x)sum(names(xmlApply(x,xmlName))=='Hit')) # get the number of hits from each qurey
# No.hits = xpathApply(result, "//Iteration_hits", xmlSize) # xmlSize has a bug that returns 1 for empty node
No.hits = sapply(No.hits, unlist)
iQuery4hit = rep(rownames(query), No.hits)
# i = hit[['Hit_num']]; i = i[which(c(i[2:length(i)],1) == 1)] # this will messup with a query has 0 hits
# iQuery4hit = rep((1:length(i)), i)
query.hit = as.data.frame(apply(query, MARGIN = 2, function(x) rep(x, times = No.hits)),stringsAsFactors = F)
query.hit[[1]] = as.numeric(query.hit[[1]]); query.hit[[4]] = as.numeric(query.hit[[4]]);
# expand query and hit
No.hsps = xpathApply(result, "//Hit_hsps", function(x)xmlSize(x)) # get the number of hits from each qurey
No.hsps = sapply(No.hsps, unlist)
i = as.numeric(hsp[['Hsp_num']]); i = i[which(c(i[2:length(i)],1) == 1)]
iQuery4hsp = rep(iQuery4hit, i); iHit4hsp = rep((1:length(i)), i) # all(iQuery4hit[iHit4hsp] == iQuery4hsp) == TRUE
query.hsp = as.data.frame(apply(query.hit, MARGIN = 2, function(x) rep(x, times = i)),stringsAsFactors = F)
hit.hsp = as.data.frame(apply(hit, MARGIN = 2, function(x) rep(x, times = i)),stringsAsFactors = F)
query.hsp[[1]] = as.numeric(query.hsp[[1]]); query.hsp[[4]] = as.numeric(query.hsp[[4]]);
hit.hsp[[1]] = as.numeric(hit.hsp[[1]]); hit.hsp[[5]] = as.numeric(hit.hsp[[5]]);
# summerize hsp to hits
hit.extra = apply(hsp[,c(2,3,11:14)],MARGIN = 2,FUN = function(x)by(x, INDICES = list(hit=iHit4hsp), FUN = sum))
colnames(hit.extra) = sub('Hsp', 'Hit', colnames(hit.extra))
hit = cbind(hit, hit.extra)
hit$Hit_identity.percent = hit$Hit_identity/hit$Hit_align.len
hit$Hit_positive.percent = hit$Hit_positive/hit$Hit_align.len
hit$Hit_gaps.percent = hit$Hit_gaps/hit$Hit_align.len
# sumerize hits to query
i.self = (abs(hit$Hit_len - query.hit$Iteration_query.len)< query.self.len.diff | regexpr(query.species, hit$Hit_def)>0)&
(hit$Hit_identity.percent > query.self.min.identity)
i.top.hit = by(hit[!i.self,4:13], INDICES = factor(iQuery4hit[!i.self], levels =rownames(query)),
FUN=function(x){i = which.max(x$Hit_bit.score);
return(as.numeric(rownames(x[i, ])))},simplify=T)
i.top.hit = sapply(i.top.hit, unlist) # note that i.top.hits is the rownames, hence no need to be shifted by i.self
hit$Hit_identity.percent = paste(round(hit$Hit_identity.percent*100,1), '%', sep='')
hit$Hit_positive.percent = paste(round(hit$Hit_positive.percent*100,1), '%', sep='')
hit$Hit_gaps.percent = paste(round(hit$Hit_gaps.percent*100,1), '%', sep='')
query.extra = hit[i.top.hit,]
rownames(query.extra) = names(i.top.hit)
top.N.hits = by(hit[!i.self,], INDICES = factor(iQuery4hit[!i.self], levels =rownames(query)),
FUN=function(x){i = which.max.n(x$Hit_bit.score, no.top.hits); x = x[i,];
top.N.hits = paste(x$Hit_identity.percent, ' | ', x$Hit_accession,' | ', x$Hit_def, sep='', collapse = ' // ')
return(top.N.hits)},simplify=T)
top.N.hits = sapply(top.N.hits, unlist)
query.extra[[paste('top', no.top.hits, 'hits', sep='.')]] = top.N.hits
colnames(query.extra) = sub('Hit', 'Top_nonself_Hit', colnames(query.extra))
query = cbind(query, query.extra[rownames(query),])
blast.out = list(query=query, hit = hit, hsp=hsp, query.hit = query.hit, query.hsp=query.hsp, hit.hsp=hit.hsp,
iQuery4hit=iQuery4hit, iQuery4hsp=iQuery4hsp, iHit4hsp=iHit4hsp)
return(blast.out)
}
blast.filter <- function(bl, Evalue = 0.1){
# 20140916, YF Li
i = bl$hsp$Hsp_evalue < Evalue
i.hit = unique(bl$iHit4hsp[i])
i.query = unique(bl$iQuery4hsp[i])
bl$query = bl$query[i.query,]
bl$hit = bl$hit[i.hit,]
bl$hsp = bl$hsp[i,]
bl$query.hit = bl$query.hit[i.hit,]
bl$query.hsp = bl$query.hsp[i,]
bl$hit.hsp = bl$hit.hsp[i,]
bl$iQuery4hit = bl$iQuery4hit[i.hit]
bl$iQuery4hsp = bl$iQuery4hsp[i]
bl$iHit4hsp = bl$iHit4hsp[i]
return(bl)
}
blast2coverage <- function(blast.xml, Evalue=0.1, type = c('count', 'bit/length')){
# 20141125
# Yong Fuga Li
bl = blast.xml.parse(blast.xml = blast.xml)
bl.f = blast.filter(bl, Evalue = Evalue)
}
blast2profile <- function(blast.xml='cUUp0_S281DNA_subseq.swissprot.xml', no.top.hits = 10E10, Evalue=0.1, type = c('count', 'bit/length')){
# 20141125
# output:
# profile = list(query.DNA, query.AA = matrix(6, n.AA), matches=matrix(nrow=21, ncol=n.aa), insertions = list(locations, inserts.aligned))
# Yong Fuga Li
require(XML)
result <- xmlTreeParse(blast.xml, useInternalNodes=TRUE)
aa = xmlToList(result)
# read: iteration level
tags.query = c('Iteration_iter-num', 'Iteration_query-ID', 'Iteration_query-def', 'Iteration_query-len')
query = list();
for (t in tags.query)
query[[t]] = unlist(xpathApply(result, paste("//",t,sep=''), xmlValue))
query = data.frame(query,stringsAsFactors = F)
tags.query = make.names(tags.query)
query[[1]] = as.numeric(query[[1]]); query[[4]] = as.numeric(query[[4]]);
rownames(query) = query$Iteration_query.def
# hit level
tags.hit = c('Hit_num', 'Hit_id', 'Hit_def', 'Hit_accession', 'Hit_len')
hit = list();
for (t in tags.hit)
hit[[t]] = unlist(xpathApply(result, paste("//",t,sep=''), xmlValue))
hit = as.data.frame(hit,stringsAsFactors = F)
tags.hit = make.names(tags.hit)
hit[[1]] = as.numeric(hit[[1]]); hit[[5]] = as.numeric(hit[[5]]);
# hsp level
tags.hsp = c('Hsp_num', 'Hsp_bit-score','Hsp_score','Hsp_evalue','Hsp_query-from','Hsp_query-to',
'Hsp_hit-from','Hsp_hit-to','Hsp_query-frame','Hsp_hit-frame','Hsp_identity','Hsp_positive',
'Hsp_gaps','Hsp_align-len')
hsp = list();
for (t in tags.hsp)
hsp[[t]] = unlist(xpathApply(result, paste("//",t,sep=''), xmlValue))
hsp = as.data.frame(hsp,stringsAsFactors = F)
tags.hsp = make.names(tags.hsp)
hsp = data.frame(lapply(hsp, FUN = as.numeric))
# expand query
No.hits = xpathApply(result, "//Iteration_hits", function(x)sum(names(xmlApply(x,xmlName))=='Hit')) # get the number of hits from each qurey
# No.hits = xpathApply(result, "//Iteration_hits", xmlSize) # xmlSize has a bug that returns 1 for empty node
No.hits = sapply(No.hits, unlist)
iQuery4hit = rep(rownames(query), No.hits)
# i = hit[['Hit_num']]; i = i[which(c(i[2:length(i)],1) == 1)] # this will messup with a query has 0 hits
# iQuery4hit = rep((1:length(i)), i)
query.hit = as.data.frame(apply(query, MARGIN = 2, function(x) rep(x, times = No.hits)),stringsAsFactors = F)
query.hit[[1]] = as.numeric(query.hit[[1]]); query.hit[[4]] = as.numeric(query.hit[[4]]);
# expand query and hit
No.hsps = xpathApply(result, "//Hit_hsps", function(x)xmlSize(x)) # get the number of hits from each qurey
No.hsps = sapply(No.hsps, unlist)
i = as.numeric(hsp[['Hsp_num']]); i = i[which(c(i[2:length(i)],1) == 1)]
iQuery4hsp = rep(iQuery4hit, i); iHit4hsp = rep((1:length(i)), i) # all(iQuery4hit[iHit4hsp] == iQuery4hsp) == TRUE
query.hsp = as.data.frame(apply(query.hit, MARGIN = 2, function(x) rep(x, times = i)),stringsAsFactors = F)
hit.hsp = as.data.frame(apply(hit, MARGIN = 2, function(x) rep(x, times = i)),stringsAsFactors = F)
query.hsp[[1]] = as.numeric(query.hsp[[1]]); query.hsp[[4]] = as.numeric(query.hsp[[4]]);
hit.hsp[[1]] = as.numeric(hit.hsp[[1]]); hit.hsp[[5]] = as.numeric(hit.hsp[[5]]);
# summerize hsp to hits
hit.extra = apply(hsp[,c(2,3,11:14)],MARGIN = 2,FUN = function(x)by(x, INDICES = list(hit=iHit4hsp), FUN = sum))
colnames(hit.extra) = sub('Hsp', 'Hit', colnames(hit.extra))
hit = cbind(hit, hit.extra)
hit$Hit_identity.percent = hit$Hit_identity/hit$Hit_align.len
hit$Hit_positive.percent = hit$Hit_positive/hit$Hit_align.len
hit$Hit_gaps.percent = hit$Hit_gaps/hit$Hit_align.len
# sumerize hits to query
i.self = (abs(hit$Hit_len - query.hit$Iteration_query.len)< query.self.len.diff | regexpr(query.species, hit$Hit_def)>0)&
(hit$Hit_identity.percent > query.self.min.identity)
i.top.hit = by(hit[!i.self,4:13], INDICES = factor(iQuery4hit[!i.self], levels =rownames(query)),
FUN=function(x){i = which.max(x$Hit_bit.score);
return(as.numeric(rownames(x[i, ])))},simplify=T)
i.top.hit = sapply(i.top.hit, unlist) # note that i.top.hits is the rownames, hence no need to be shifted by i.self
hit$Hit_identity.percent = paste(round(hit$Hit_identity.percent*100,1), '%', sep='')
hit$Hit_positive.percent = paste(round(hit$Hit_positive.percent*100,1), '%', sep='')
hit$Hit_gaps.percent = paste(round(hit$Hit_gaps.percent*100,1), '%', sep='')
query.extra = hit[i.top.hit,]
rownames(query.extra) = names(i.top.hit)
top.N.hits = by(hit[!i.self,], INDICES = factor(iQuery4hit[!i.self], levels =rownames(query)),
FUN=function(x){i = which.max.n(x$Hit_bit.score, no.top.hits); x = x[i,];
top.N.hits = paste(x$Hit_identity.percent, ' | ', x$Hit_accession,' | ', x$Hit_def, sep='', collapse = ' // ')
return(top.N.hits)},simplify=T)
top.N.hits = sapply(top.N.hits, unlist)
query.extra[[paste('top', no.top.hits, 'hits', sep='.')]] = top.N.hits
colnames(query.extra) = sub('Hit', 'Top_nonself_Hit', colnames(query.extra))
query = cbind(query, query.extra[rownames(query),])
blast.out = list(query=query, hit = hit, hsp=hsp, query.hit = query.hit, query.hsp=query.hsp, hit.hsp=hit.hsp,
iQuery4hit=iQuery4hit, iQuery4hsp=iQuery4hsp, iHit4hsp=iHit4hsp)
return(blast.out)
}
cluster.deepAnno <- function(gene.ranges = NULL, gff.file=NULL,
geMat=NULL, ica.spatial=NULL, prot.fasta.file=NULL, iprscan.tab.file = NULL, iprscan.table.file = NULL,
bam.file = NULL, unmapped.bam.file=NULL, EST.db = NULL, swiss.db = c('swissprot', 'fungiRefSwiss70'),
swiss.fasta.file = paste('/Users/yongli/Universe/data/blastdb/', swiss.db, '.fasta', sep=''),
DNA.fasta.file=NULL, genome.db = NULL,
in.ID.type = NULL,pat.prot.ID='',
prot.seq = read.fasta(prot.fasta.file, pattern = pat.prot.ID, type='AA'),
ipr.anno = iprscan.flat(iprscan.table.file),
gene.definition = c('gene', 'transcript', 'mRNA', 'CDS'),
out.file = NULL,append=F, proteinID = 'ID',
geneID2cdsID = NULL,
# geneID2cdsID = function(x){paste(x, '-P', sep='')},
extra.genes = 0, RORA.iteration=2, RORA.topOnly =T, multialn.method = 'mafft', # mafft is better based on hmmsearch of predicted genes against pHMM models build from blast hits
plotLogo=T, species=NULL, do.blast=T, do.tblastx=F, center.method = 'median', score.type = 'R', median.substraction = F, cor.method = 'pearson',
n.cluster.per.file = 70, start.from=1,end.to=NULL,
extra.nt = 2500, remove.intermediate.files = T,
s2d=NULL, # precomputed s2d
version = 3 # 20160818, version 3 add start, stop codon, and intergenic region evidences, it assigns different priorities to evidences of different confidence levels
){
# deep annotation of multiple predicted clusters
# YF Li 20140723-0803
# 20141003: add extra.genes
# 20141010: add genome file, modify to work without expression data
# 20141112: modify to work without expression data - add bam.file and EST.db
require(xlsx)
require('XLConnect')
require('Biostrings')
require(gplots)
require(rtracklayer)
root.dir = getwd()
system('cp /Users/yongli/Universe/write/Project_Current/9.O.NPbioinformatics/extrinsic.cfg ./')
swiss.db = match.arg(swiss.db); # 20160611
if (is.null(iprscan.table.file))
iprscan.table.file = iprscan.tab.file
gene.definition = match.arg(gene.definition)
tag = sub('^(.*)\\.xls.*','\\1', out.file)
if (!is.data.frame(gene.ranges) & !is.matrix(gene.ranges)){
gene.ranges = matrix(gene.ranges, 1, ncol = length(gene.ranges))
}
if (is.null(end.to)){
end.to=nrow(gene.ranges)
}
if (ncol(gene.ranges)==2){
gene.ranges = cbind(gene.ranges, paste(gene.ranges[,1], gene.ranges[,2], sep = '_'))
}else if (ncol(gene.ranges)!=3){
stop('gene.ranges need 3 or 2 columns')
}
anno = import.gff(gff.file) # 20160502
if (is.null(geneID2cdsID)){
m = learn.gff.ID.mapping(unlist.multi(anno@elementMetadata@listData$ID),
parent = unlist.multi(anno@elementMetadata@listData[[which(tolower(colnames(anno@elementMetadata))=='parent')]]),
node.type = as.character(anno@elementMetadata@listData$type))
geneID2cdsID = m[[paste(gene.definition, '2CDS', sep='')]]
}
if(!is.null(geMat)&!is.null(gff.file)){
ica.spatial = express.clustering(gff.file, geMat)
anno = ica.spatial$anno;
is.expressed = !is.na(match(ica.spatial$anno$ID, rownames(ica.spatial$S)))
names(is.expressed) = ica.spatial$anno$ID
}else if(!is.null(ica.spatial)){
anno = ica.spatial$anno;
is.expressed = !is.na(match(ica.spatial$anno$ID, rownames(ica.spatial$S)))
names(is.expressed) = ica.spatial$anno$ID
}else if(!is.null(gff.file)){
# gff.format = sub('^.*\\.([^\\.]*$)', '\\1', gff.file)
# anno = tryCatch(read.gff3(gff.file, format='gff3'), error = function(e){read.gff3(gff.file, format='gff')}, finally = NULL)
#anno = read.gff3(gff.file, format=gff.format)
idx.gene = (anno$type==gene.definition)
anno = anno[idx.gene, ]
anno = sort.intervals(anno)
# anno$ID = sub('transcript:', '', anno$ID)
is.expressed = vector('logical', length = length(anno)) | T;
names(is.expressed) = anno$ID;
}else{
stop('Provide ica.spatial or gff.file')
}
if (!is.null(in.ID.type)){ # 20151001
gene.ranges[,1:2] = anno$ID[sort(match(gene.ranges[,1:2], sub('\\.\\d$', '', anno@elementMetadata[,in.ID.type])))]
}
gene.ranges.original = gene.ranges
gene.index.ranges = matrix(0, nrow=nrow(gene.ranges), ncol = 2)
K = 0; # window size
for (i in start.from:end.to){
# tweek gene.ranges to find nearest expressed gene
i.1 <- i.10 <- match(gene.ranges[i,1], names(is.expressed));
while(i.1 > 1 && (as.character(anno@seqnames[i.1-1])==as.character(anno@seqnames[i.10])) && (!is.expressed[i.1] | i.10 - i.1 < extra.genes))
i.1 = i.1 - 1;
gene.ranges[i,1] = names(is.expressed)[i.1]
i.2 <- i.20 <- match(gene.ranges[i,2], names(is.expressed));
if (as.character(anno@seqnames[i.10])!=as.character(anno@seqnames[i.20])){
cat(gene.ranges[i,1], gene.ranges[i,2])
stop('clusters spans to chromosomes')
}
while(i.2 < length(is.expressed) && (as.character(anno@seqnames[i.2+1])==as.character(anno@seqnames[i.20])) && (!is.expressed[i.2] | i.2 - i.20 < extra.genes))
i.2 = i.2 + 1;
gene.ranges[i,2] = names(is.expressed)[i.2]
# gene.index.ranges = rbind(gene.index.ranges, c(i.1, i.2))
gene.index.ranges[i,] = c(i.1, i.2) # 20141125
K = max(K, i.2-i.1+1)
}
if (is.null(s2d) & !is.null(ica.spatial)){
s2d = list()
ica.spatial = ica.spatial.prep(ica.spatial, K= K, center.method=center.method,
score.type = score.type, median.substraction=median.substraction, do.plot = F) # precompute scores
for (i in start.from:end.to){
s2d[[gene.ranges[i,1]]]= score.spatial.cluster.2d(ica.spatial, gene.range=gene.ranges[i,1:2], cor.method = cor.method)
}
}
# pdf(paste('spatial.cluster.', 'cm_', center.method,'.st_',score.type,'.ms_',median.substraction, '.cor_', cor.method, '.pdf', sep=''),20,12)
if (!is.null(s2d)){
for (i in start.from:end.to){
# cID = paste(gene.ranges[i,1],gene.ranges[i,3], sep='_')
# cID = paste(gene.ranges.original[i,3],gene.ranges.original[i,1], sep='_')
cID = paste('c', gene.ranges.original[i,3], sep='')
fig.file = paste(cID, '.png', sep='')
png(fig.file, 20,12,units = 'in', res=60);
plot.spatial.cluster.2d(s2d[[gene.ranges[i,1]]], tag=paste(gene.ranges.original[i,3],gene.ranges.original[i,1],gene.ranges.original[i,2], sep='_'))
# plot.spatial.cluster.2d(AN8127, tag=paste(gene.ranges[i,3],gene.ranges[i,1], sep=': '), no.fdr = T)
dev.off()
# plot.spatial.cluster.2d(s2d[[i]], tag=cID)
}
# dev.off()
}
for (i in start.from:end.to){
# cID = paste(gene.ranges[i,1],gene.ranges[i,3], sep='_')
file.index = floor((i-1)/n.cluster.per.file)
#cID = paste('c', gene.ranges.original[i,3],'_',gene.ranges.original[i,1], sep='')
cID = paste('c', gene.ranges.original[i,3], sep='')
all.genes = names(is.expressed)[gene.index.ranges[i,1]:gene.index.ranges[i,2]]
all.proteins = anno@elementMetadata[gene.index.ranges[i,1]:gene.index.ranges[i,2],proteinID]; names(all.proteins) = all.genes;
tab = summerize.cluster(s2d[[gene.ranges[i,1]]], gene.range =gene.ranges[i,], extra.nt=extra.nt, all.proteins = all.proteins,
swiss.db = swiss.db, swiss.fasta.file = swiss.fasta.file,
genome.db=genome.db, anno=anno, gff.file=gff.file, prot.seq = prot.seq, bam.file=bam.file,
unmapped.bam.file=unmapped.bam.file, RORA.iteration=RORA.iteration, RORA.topOnly = RORA.topOnly, multialn.method = multialn.method, species=species,plotLogo=plotLogo,
DNA.fasta.file=DNA.fasta.file, ipr.anno = ipr.anno, tag = cID, # paste(gene.ranges[i,1], gene.ranges[i,2], sep='_'),
do.blast=do.blast, do.tblastx=do.tblastx, geneID2cdsID=geneID2cdsID, gene.definition=gene.definition, version = version); # blastp.xml.file = NULL,
setwd(root.dir)
CDSs = get.CDS(gene.IDs = rownames(tab),
gff.file = gff.file, DNA.fasta.file = DNA.fasta.file,
geneID2cdsID=geneID2cdsID) # 20141014: retrieve CDS sequences
for (j in 1:nrow(CDSs)){
pep = as.character(translate(DNAString(as.character(CDSs[j,1])), if.fuzzy.codon = 'X'));
if (gsub('\\*', '', pep) != gsub('\\*', '', tab[j,'protein seq'])){
warning(paste('CDS translation dose not match protein sequences, likely due to ambiguous nucleotide\n', pep ,'\n', tab[j,'protein seq'], '\n'))
}
}
CDSs = cbind(as.matrix(CDSs),nchar(as.character(CDSs[,1])),
'coding NT%' = round(nchar(as.character(CDSs[,1]))/CDSs[,3]*100,1),
'average exon size' = round(nchar(as.character(CDSs[,1]))/CDSs[,2],1),
'average intron size' = round((CDSs[,3]-nchar(as.character(CDSs[,1])))/(CDSs[,2]-1+1E-10),1))
colnames(CDSs)[1:5] = c('CDS', 'NO.exon', 'CDS_span(nt)', 'CDS_length(nt)', 'coding percentage')
tab = cbind(CDSs, tab) # 20141014: add CDS sequences
tab = mat.fill.row(tab, all.genes, default = '|') # 20141014: add non-protein coding genes back
extra.info = cbind(as.matrix(anno@ranges[gene.index.ranges[i,1]:gene.index.ranges[i,2]])[,1:2],
as.character(anno@strand[gene.index.ranges[i,1]:gene.index.ranges[i,2]]));
colnames(extra.info) = c('start', 'width', 'strand')
cluster.boundary = c('', 'Boundary')[1+!is.na(match(rownames(tab), gene.ranges.original))]
tab = cbind(cluster.boundary = cluster.boundary, extra.info, tab)
out.file.1 = sub('\\.([^\\.]*$)', c('.xlsx', paste('\\.',file.index, '\\.\\1', sep=''))[(file.index>0)+1], out.file);
write.xlsx(tab, out.file.1, sheetName = substr(cID, 1,31),col.names = T, row.names=T,showNA = F,
append = (((i-1)%%n.cluster.per.file != 0)|append))
wb <- loadWorkbook(out.file.1, create = TRUE)
fig.file = paste(cID, '.png', sep='')
cID = substr(cID, 1,31)
if (!is.null(s2d)){ # 20141112{
createName(wb, name = cID, formula = paste(cID, "!$A$", nrow(tab)+6, ':', "$S$", nrow(tab)+63, sep=''))
addImage(wb, filename = fig.file, name = cID, originalSize = F)
}
setColumnWidth(wb,sheet=cID,column=5,width=256*30)
# setColumnWidth(wb,sheet=cID,column=10,width=256*30)
# setColumnWidth(wb,sheet=cID,column=11,width=256*30)
setColumnWidth(wb,sheet=cID,column=10+5,width=256*30)
setColumnWidth(wb,sheet=cID,column=10+6,width=256*30)
setColumnWidth(wb,sheet=cID,column=10+9,width=256*20)
setColumnWidth(wb,sheet=cID,column=10+12,width=256*25)
saveWorkbook(wb)
}
if (remove.intermediate.files){
system(paste('rm ', cID, '*', sep = ''))
fs = setdiff(dir(pattern = paste('.*', cID, sep='')),dir(pattern = paste('pMap', cID, sep='')))
for (f in fs){
system(paste('rm ', f))
}
system('rm tmp*')
system('rm exonerate*')
system('rm hits*')
system('rm bowtie*')
for (f in c('both.bam', 'exex.fa', 'introns.lst','map.psl',
'newIntrons.gff', 'header.txt',
'chr.fasta', 'extrinsic.cfg')){
system(paste('rm ', f))
}
}
return(s2d)
}
augustus.species.ID <- function(augu.file = '/Users/yongli/Universe/ubuntu_bin/augustus-3.0.3/README.TXT'){
txt = read.table(augu.file, sep = '\t', quote = '', header = F)
m = regexpr.match('^([^\\s\\(\\)]*) *\\| (.*)$', txt[regexpr(pattern = '^[^\\)]*\\| .*$', text = txt[,1])>0,1]) # identifiers in () are older versions, ignored
rownames(m) = m[,2]
m = m[m[,2] != 'species',1]
return(m)
}
RORA.pipeline <- function(root = '/Users/yongli/Universe/write/Project_Current/t.NPbioinformatics/KU2015',
cluster.info.file = '150925_UbiA_like_terpene_clusters_JGI_boundaries.xlsx',
from.id.type = 'protein_id', from.gene.definition = 'CDS',
use.multiple.model.species = F, RORA.iteration = 2, # swiss.db = c('swissprot', 'fungiRefSwiss70'),
skip.existing =F, plotLogo=F, extra.genes = 1,
i.start = 1, i.all = NULL,
simplify.model.species = T, tag = '',
GenBank.Only = F,
version = 2,# 20160818, version 3, add start, stop codon, and intergenic region evidences, it assigns different priorities to evidences of different confidence levels
...){
# i.start - starting clustering or i.all - index of clusters to included
# when i.all is provided, it is used instead of i.start
# 20151006-20151009
# 20160611, add tag, RORA.iteration, swiss.db, ...
# YF Li
require(xlsx)
cluster.info = read.xlsx2(cluster.info.file, 1, stringsAsFactors = F)
cluster.info = cluster.info[rowSums(cluster.info!='')>0,]
if (!is.null(i.all))
cluster.info = cluster.info[i.all,,drop=F]
else
cluster.info = cluster.info[i.start:nrow(cluster.info),,drop=F]
modelSpecies = select.ModelSpecies(unique(cluster.info$species), simplify = simplify.model.species)
write.table(modelSpecies[[2]], file = paste('model_species_selection',i.start,'.tsv', sep=''), sep='\t')
cluster.info$First.Protein = sub('\\.\\d$', '', cluster.info$First.Protein)
cluster.info$Last.Protein = sub('\\.\\d$', '', cluster.info$Last.Protein)
clusters = cbind(gsub(' ', '', cluster.info$First.Protein), gsub(' ', '', cluster.info$Last.Protein), paste(cluster.info$ClusterID, tag, sep=''))
auguSpeciesID = augustus.species.ID()
log.file = paste('log', i.start, '.txt', sep='')
cat('\n\n# log info for the gene prediction tasks\n', file = log.file, append = T)
cat(date(), file = log.file, append = T)
for (i in which(cluster.info$In.House.Genome!='')){
dat = cluster.info$In.House.Genome[i]
dat = gsub("'","", dat)
a = strsplit(dat, '; ')[[1]]
b = read.table(text = a, header = F, sep = '=', as.is = T, strip.white = T)
c = b[,2]
names(c) = b[,1]
model.species.all = auguSpeciesID[names(auguSpeciesID) %in% modelSpecies[[2]][rownames(modelSpecies[[2]])==cluster.info$species[i],1]]
if (!use.multiple.model.species){
model.species.all = model.species.all[1]
}
for (model.species in model.species.all){
setwd(root)
folder.name = paste(cluster.info$ClusterID[i], model.species, sep='_')
if (skip.existing & file.exists(folder.name))
next
dir.create(folder.name)
setwd(folder.name)
cluster.deepAnno(gene.ranges = clusters[i,],
species = model.species, RORA.iteration = RORA.iteration,
gff.file = c['gff.file'], DNA.fasta.file = c['DNA.file'], iprscan.tab.file=c['iprscan.tab.file'],
gene.definition = c['gene.definition'] , proteinID = c['proteinID'], prot.fasta.file = c['pep.fasta.file'],
extra.genes = extra.genes,
plotLogo=plotLogo, multialn.method = 'muscle', RORA.topOnly=F,
# geneID2cdsID = identity, # 20160528 -- learn geneID2cdsID instead
out.file = paste(clusters[i,3], '.xlsx', sep=''),...)
xlsx.color.NPGC(paste(clusters[i,3], '.xlsx', sep=''))
# system(paste('rm -r ', NCBI.genome.tag, '*', sep=''))
}
}
for (i in which(cluster.info$JGI.Genome=='' & cluster.info$GenBank.Genome != '')){
if (!GenBank.Only)
break
model.species.all = auguSpeciesID[names(auguSpeciesID) %in% modelSpecies[[2]][rownames(modelSpecies[[2]])==cluster.info$species[i],1]]
if (!use.multiple.model.species){
model.species.all = model.species.all[1]
}
for (model.species in model.species.all){
setwd(root)
folder.name = paste(cluster.info$ClusterID[i], model.species, sep='_')
if (skip.existing & file.exists(folder.name))
next
dir.create(folder.name)
setwd(folder.name)
NCBI.genome.tag = sub('_genomic', '', cluster.info$GenBank.Genome[i])
prot.fasta.file = paste(NCBI.genome.tag, '_protein.faa', sep='')
gff.file = paste(NCBI.genome.tag, '_genomic.gff', sep='')
DNA.file = paste(NCBI.genome.tag, '_genomic.fna', sep='')
iprscan.tab.file = NULL
download.file(paste('ftp://ftp.ncbi.nlm.nih.gov/genomes/all/', NCBI.genome.tag, '/', prot.fasta.file, '.gz', sep=''),destfile = paste(prot.fasta.file, '.gz', sep=''))
download.file(paste('ftp://ftp.ncbi.nlm.nih.gov/genomes/all/', NCBI.genome.tag, '/', gff.file, '.gz', sep=''),destfile = paste(gff.file, '.gz', sep=''))
download.file(paste('ftp://ftp.ncbi.nlm.nih.gov/genomes/all/', NCBI.genome.tag, '/', DNA.file, '.gz', sep=''),destfile = paste(DNA.file, '.gz', sep=''))
system(paste('gzip -d *.gz -f', sep=''))
files.to.remove = list.files()
files.to.keep = list.files(pattern = '(\\.xlsx|\\.xls)')
files.to.remove = setdiff(files.to.remove, c(files.to.keep, 'readme_deepAnno.txt', 'readme_select_CDS.txt'))
setwd(root)
cat(paste('\n#########\n RORA for', folder.name, '\n', sep=''), file = log.file, append=T)
cat(paste('protein file', prot.fasta.file), file = log.file, append=T)
cat(paste('DNA file', DNA.file), file = log.file, append=T)
cat(paste('gff file', gff.file), file = log.file, append=T)
setwd(folder.name)
if (!length(prot.fasta.file) | !length(gff.file) |!length(DNA.file))
warning('GenBank file Missing')
in.info = c(feature = 'CDS', id.type = 'protein_id')
locs = gff.id.change(gff.file, in.info = in.info, in.ids = clusters[i,1:2],
extra.nt = 2500, out.type = 'nt') # change IDs
DNA.seq = getDNA.subseq(DNA.fasta.file, locs = locs)
DNA.sub.file = paste(clusters[i,3], tag, '_Genome_subseq.fa', sep='')
export(DNA.seq, con = DNA.sub.file, format = 'fasta')
system(paste('sshpass -p abcd ssh fuga@192.168.56.110 \'cd ', out.folder, '; augustus --stopCodonExcludedFromCDS=false --sample=300 --predictionStart=', locs[,2], ' --predictionEnd=', locs[,3], ' --singlestrand=false --species=', species, ' --extrinsicCfgFile=~/',out.folder,'/extrinsic.cfg --alternatives-from-evidence=true --alternatives-from-sampling=true --minexonintronprob=0.08 --minmeanexonintronprob=0.3 --maxtracks=100 --gff3=on --genemodel=complete ', chrseq.file, ' > ', auguNovoAll.file, '\'', sep=''))
# sshpass -p abcd ssh fuga@192.168.56.110 'cd NPbioinformatics/TangLab; augustus --sample=300 --singlestrand=false --species=aspergillus_nidulans --alternatives-from-evidence=false --alternatives-from-sampling=false --minexonintronprob=0.08 --minmeanexonintronprob=0.3 --maxtracks=-1 --protein=on --introns=on --start=on --stop=on --cds=on --gff3=on --genemodel=partial Pt_K85_scafSeq.fasta > Pt_K85_scafSeq.augoNovo_mANidulans.gff'
cluster.deepAnno(gene.ranges = clusters[i,],
species = model.species, RORA.iteration = RORA.iteration,
gff.file = gff.file, DNA.fasta.file = DNA.sub.file, iprscan.tab.file=iprscan.tab.file,
gene.definition = 'gene', proteinID = 'protein_id', prot.fasta.file = prot.fasta.file, extra.genes = extra.genes,
plotLogo=plotLogo, multialn.method = 'muscle', RORA.topOnly=F,
# geneID2cdsID = identity,
out.file = paste(clusters[i,3], '.xlsx', sep=''),...)
xlsx.color.NPGC(paste(clusters[i,3], '.xlsx', sep=''))
system(paste('rm -r ', NCBI.genome.tag, '*', sep=''))
# select.CDS(score.file = paste('pMap', clusters[i,3], '.xlsx', sep=''))
}
}
for (i in which(cluster.info$JGI.Genome!='' & cluster.info$GenBank.Genome == '')){
model.species.all = auguSpeciesID[names(auguSpeciesID) %in% modelSpecies[[2]][rownames(modelSpecies[[2]])==cluster.info$species[i],1]]
if (!use.multiple.model.species){
model.species.all = model.species.all[1]
}
for (model.species in model.species.all){
setwd(root)
folder.name = paste(cluster.info$ClusterID[i], model.species, sep='_')
if (skip.existing & file.exists(folder.name))
next
dir.create(folder.name)
setwd(folder.name)
download.file('http://genome.jgi.doe.gov/fungi/fungi.info.html', 'JGI_list.html')
system(paste('downloadJGIassembly.pl -html JGI_list.html -species ', cluster.info$JGI.Genome[i], sep=''))
system(paste('gzip -d *.gz -f', sep=''))
files.to.remove = list.files()
files.to.keep = list.files(pattern = '(\\.xlsx|\\.xls)')
files.to.remove = setdiff(files.to.remove, c(files.to.keep, 'readme_deepAnno.txt', 'readme_select_CDS.txt'))
iprscan.tab.file = list.files(pattern = paste(cluster.info$JGI.Genome[i], '.*_IPR.tab', sep=''), ignore.case = T)
if (!length(iprscan.tab.file))
iprscan.tab.file = list.files(pattern = '.*.domaininfo.*.tab')
prot.fasta.file = list.files(pattern = paste(cluster.info$JGI.Genome[i], '.*.aa.fasta', sep=''), ignore.case = T)
if (!length(prot.fasta.file))
prot.fasta.file = list.files(pattern = '.*.proteins.fasta')
gff.file = list.files(pattern = paste(cluster.info$JGI.Genome[i], '.*proteins.*FilteredModels1.gff3', sep=''), ignore.case = T)
if (!length(gff.file))
gff.file = list.files(pattern = '.*.gff3')
DNA.file = list.files(pattern = paste(cluster.info$JGI.Genome[i], '.*_Repeatmasked.fasta', sep=''), ignore.case = T)
if (!length(DNA.file))
DNA.file = list.files(pattern = '.*masked.*', ignore.case = T)
setwd(root)
cat(paste('\n#########\n RORA for', folder.name, '\n', sep=''), file = log.file, append=T)
cat(paste('\niprscan file', iprscan.tab.file), file = log.file, append=T)
cat(paste('\nprotein file', prot.fasta.file), file = log.file, append=T)
cat(paste('\nDNA file', DNA.file), file = log.file, append=T)
cat(paste('\ngff file', gff.file), file = log.file, append=T)
setwd(folder.name)
iprscan.tab.file = iprscan.tab.file[1]
prot.fasta.file = prot.fasta.file[1]
gff.file = gff.file[1]
DNA.file = DNA.file[1]
if (!length(iprscan.tab.file) | !length(prot.fasta.file) | !length(gff.file) |!length(DNA.file))
warning('JGI Missing file')
# cat('Mapping input IDs from GenBank to JGI\n')
# hits = best.blast.hits(from.file = from.fasta.file,
# from.gff.file = from.gff.file,
# to.file = prot.fasta.file,
# from.IDs = clusters[i,], id.type = from.id.type)
cluster.deepAnno(gene.ranges = clusters[i,], # c(hits$sseqid[1], hits$sseqid[length(hits$sseqid)], folder.name), # c('gene11134', 'gene11143', 'KU0001'),
species = model.species, RORA.iteration = RORA.iteration,
gff.file = gff.file, DNA.fasta.file = DNA.file, iprscan.tab.file=iprscan.tab.file,
in.ID.type = 'proteinId',
gene.definition = 'gene', proteinID = 'proteinId', prot.fasta.file = prot.fasta.file, extra.genes = extra.genes,
plotLogo=plotLogo, multialn.method = 'muscle', RORA.topOnly=F,
# geneID2cdsID = function(x){sub('gene_', 'CDS_', x)},
out.file = paste(clusters[i,3], '.xlsx', sep=''),...)
xlsx.color.NPGC(paste(clusters[i,3], '.xlsx', sep=''))
for (f in files.to.remove){
system(paste('rm -r ', f, sep=''))
}
system(paste('rm -r ', cluster.info$JGI.Genome[i], '*', sep=''))
# select.CDS(score.file = paste('pMap', clusters[i,3], '.xlsx', sep=''))
}
}
for (i in which(cluster.info$JGI.Genome!='' & cluster.info$GenBank.Genome != '')){ # need ID mapping from NCBI to JGI
model.species.all = auguSpeciesID[names(auguSpeciesID) %in% modelSpecies[[2]][rownames(modelSpecies[[2]])==cluster.info$species[i],1]]
if (!use.multiple.model.species){
model.species.all = model.species.all[1]
}
for (model.species in model.species.all){
setwd(root)
folder.name = paste(cluster.info$ClusterID[i], model.species, sep='_')
if (skip.existing & file.exists(folder.name))
next
dir.create(folder.name)
setwd(folder.name)
download.file('http://genome.jgi.doe.gov/fungi/fungi.info.html', 'JGI_list.html')
system(paste('downloadJGIassembly.pl -html JGI_list.html -species ', cluster.info$JGI.Genome[i], sep=''))
system(paste('gzip -d *.gz -f', sep=''))
NCBI.genome.tag = sub('_genomic', '', cluster.info$GenBank.Genome[i])
from.fasta.file = paste(NCBI.genome.tag, '_protein.faa', sep='')
from.gff.file = paste(NCBI.genome.tag, '_genomic.gff', sep='')
DNA.file = paste(NCBI.genome.tag, '_genomic.fna', sep='')
iprscan.tab.file = NULL
download.file(paste('ftp://ftp.ncbi.nlm.nih.gov/genomes/all/', NCBI.genome.tag, '/', from.fasta.file, '.gz', sep=''),destfile = paste(from.fasta.file, '.gz', sep=''))
download.file(paste('ftp://ftp.ncbi.nlm.nih.gov/genomes/all/', NCBI.genome.tag, '/', from.gff.file, '.gz', sep=''),destfile = paste(from.gff.file, '.gz', sep=''))
# download.file(paste('ftp://ftp.ncbi.nlm.nih.gov/genomes/all/', NCBI.genome.tag, '/', DNA.file, '.gz', sep=''),destfile = paste(DNA.file, '.gz', sep=''))
system(paste('gzip -d *.gz -f', sep=''))
files.to.remove = list.files()
files.to.keep = list.files(pattern = '(\\.xlsx|\\.xls)')
files.to.remove = setdiff(files.to.remove, c(files.to.keep, 'readme_deepAnno.txt', 'readme_select_CDS.txt'))
iprscan.tab.file = list.files(pattern = paste(cluster.info$JGI.Genome[i], '.*_IPR.tab', sep=''), ignore.case = T)
if (!length(iprscan.tab.file))
iprscan.tab.file = list.files(pattern = '.*.domaininfo.*.tab')
prot.fasta.file = list.files(pattern = paste(cluster.info$JGI.Genome[i], '.*.aa.fasta', sep=''), ignore.case = T)
if (!length(prot.fasta.file))
prot.fasta.file = list.files(pattern = '.*.proteins.fasta')
gff.file = list.files(pattern = paste(cluster.info$JGI.Genome[i], '.*proteins.*FilteredModels1.gff3', sep=''), ignore.case = T)
if (!length(gff.file))
gff.file = list.files(pattern = '.*.gff3')
DNA.file = list.files(pattern = paste(cluster.info$JGI.Genome[i], '.*_Repeatmasked.fasta', sep=''), ignore.case = T)
if (!length(DNA.file))
DNA.file = list.files(pattern = '.*masked.*', ignore.case = T)
setwd(root)
cat(paste('\n#########\n RORA for', folder.name, '\n', sep=''), file = log.file, append=T)
cat(paste('\niprscan file', iprscan.tab.file), file = log.file, append=T)
cat(paste('\nprotein file', prot.fasta.file), file = log.file, append=T)
cat(paste('\nDNA file', DNA.file), file = log.file, append=T)
cat(paste('\ngff file', gff.file), file = log.file, append=T)
setwd(folder.name)
iprscan.tab.file = iprscan.tab.file[1]
prot.fasta.file = prot.fasta.file[1]
gff.file = gff.file[1]
DNA.file = DNA.file[1]
if (!length(iprscan.tab.file) | !length(prot.fasta.file) | !length(gff.file) |!length(DNA.file))
warning('JGI Missing file')
cat('Mapping input IDs from GenBank to JGI\n')
hits = best.blast.hits(from.file = from.fasta.file,
from.gff.file = from.gff.file,
to.file = prot.fasta.file,
from.IDs = clusters[i,], id.type = from.id.type)
pat.prot.ID = '^.*\\|.*\\|(.+)\\|(.*)$'; # extract protein names from fasta preambles
hits$sseqid = sub(pat.prot.ID, '\\1',hits$sseqid)
sseqid.ordered = geneRanges2allGenes(gff.file, hits$sseqid, id.type = 'proteinId', gene.definition = 'gene')
setwd(root)
cat(paste(paste(hits$qseqid, collapse = ','), 'mapped to', paste(hits$sseqid, collapse = ',')), file = log.file, append=T)
if (length(unique(sseqid.ordered))!=length(unique(hits$sseqid))){
cat('\nGene number changed after mapping: ', file = log.file, append=T)
cat(paste('\n!!!',paste(hits$sseqid, collapse = ','), 'mapped to', paste(sseqid.ordered, collapse = ',')), file = log.file, append=T)
}else if (sseqid.ordered[1]!=hits$sseqid[1] | sseqid.ordered[length(sseqid.ordered)] != hits$sseqid[length(hits$sseqid)]){
cat('\nGene order changed after mapping: ', file = log.file, append=T)
cat(paste('\n!!!',paste(hits$sseqid, collapse = ','), 'spans to', paste(sseqid.ordered, collapse = ',')), file = log.file, append=T)
}
setwd(folder.name)
cluster.deepAnno(gene.ranges = c(sseqid.ordered[1], sseqid.ordered[length(sseqid.ordered)], folder.name), # c(hits$sseqid[1], hits$sseqid[length(hits$sseqid)], folder.name), # c('gene11134', 'gene11143', 'KU0001'),
species = model.species, RORA.iteration = RORA.iteration,
gff.file = gff.file, DNA.fasta.file = DNA.file, iprscan.tab.file=iprscan.tab.file,
in.ID.type = 'proteinId',pat.prot.ID= pat.prot.ID,
gene.definition = 'gene', proteinID = 'proteinId', prot.fasta.file = prot.fasta.file, extra.genes = extra.genes,
plotLogo=plotLogo, multialn.method = 'muscle', RORA.topOnly=F,
# geneID2cdsID = function(x){sub('gene_', 'CDS_', x)},
out.file = paste(clusters[i,3], '.xlsx', sep=''),...)
xlsx.color.NPGC(paste(clusters[i,3], '.xlsx', sep=''))
for (f in files.to.remove){
system(paste('rm -r ', f, sep=''))
}
system(paste('rm -r ', cluster.info$JGI.Genome[i], '*', sep=''))
# select.CDS(score.file = paste('pMap', clusters[i,3], '.xlsx', sep=''))
}
}
setwd(root)
system('cp */colored_*.xlsx ./')
# system('cp */pretty_pMap*.xlsx ./')
# system('cp */*blastp.hits ./')
if (1){# only process the folders generated in the current execution of the program
for (i in cluster.info$ClusterID){
files = dir(recursive = T, pattern = paste('pretty_pMapc', i, '.xlsx', sep=''))
files = files[regexpr(paste(i,'_.*\\/', sep=''), files)>0]
select.CDS.multiModel(all.files = files,
re.cluster.model = '^(.*\\.[^_\\/]*)_([^\\/]*)\\/')
}
}else{# process all folders under the current directory
select.CDS.multiModel(all.files = dir(recursive = T, pattern = paste('pretty_pMapc.*.xlsx', sep='')),
re.cluster.model = '^(.*\\.[^_\\/]*)_([^\\/]*)\\/')
}
system('rm ./pMap*.xlx')
}
deepAnno.clusters <- clusters.deepAnno <- function(cluster.file = '/Users/yongli/Universe/data/NPgenome/Aspergillus/A_nidulans_FGSC_A4_current/cluster.annoCompact.A_nidulans_FGSC_A4_current.MC29e.simu2000.refSimu.chrNonspecific.w20.p0.005.NWindowClusters98.tab',
gff.file="/Users/yongli/Universe/data/NPgenome/Aspergillus/A_nidulans_FGSC_A4_current_features.gff",
DNA.fasta.file='/Users/yongli/Universe/data/NPgenome/Aspergillus/A_nidulans_FGSC_A4_current_chromosomes.fasta',
prot.fasta.file = "A_nidulans_FGSC_A4_current_orf_trans_all.fasta",
iprscan.tab.file = 'A_nidulans_FGSC_A4_iprscan.out.txt',
geMat = NULL,
gene.definition = c('gene', 'transcript', 'mRNA'), proteinID = 'ID',
geneID2cdsID=function(x){paste(x, '-P', sep='')},
ica.spatial=NULL,n.cluster.per.file=70, start.from=1, end.to=NULL,
out.file = 'nidulans.deepAnno.all.xlsx',
RORA.iteration = 2, species = 'aspergillus_nidulans', plotLogo=F,multialn.method = 'muscle',RORA.topOnly=T,
max.dist.merge = -13, # distance cut off for mergeing clusters
# negative <=> overlaps, 0<=>next to each other
extra.genes = 5 # add to each side of the cluster
){ # root = '/Users/yongli/Universe/write/Project_Current/t.NPbioinformatics/Nidulans.SlidingWindow/Annotation'){
# 20141003-1004, YFLi
# cluster.file = '/Users/yongli/Universe/data/NPgenome/Aspergillus/A_nidulans_FGSC_A4_current/cluster.annoCompact.A_nidulans_FGSC_A4_current.MC29e.simu2000.refSimu.chrNonspecific.w20.p0.005.NWindowClusters98.tab'
# gff.file="/Users/yongli/Universe/data/NPgenome/Aspergillus/A_nidulans_FGSC_A4_current_features.gff"
# prot.fasta.file = "/Users/yongli/Universe/write/Project_Current/t.NPbioinformatics/Nidulans.SlidingWindow/Annotation/A_nidulans_FGSC_A4_current_orf_trans_all.fasta"
# iprscan.tab.file = '/Users/yongli/Universe/write/Project_Current/t.NPbioinformatics/Nidulans.SlidingWindow/Annotation/A_nidulans_FGSC_A4_iprscan.out.txt'
# prot.seq = read.fasta(prot.fasta.file, type='AA')
# ipr.anno = iprscan.flat(iprscan.tab.file)
gene.definition = match.arg(gene.definition);
if(!is.null(geMat)&!is.null(gff.file)){
ica.spatial = express.clustering(gff.file, geMat)
anno = ica.spatial$anno;
}else if(!is.null(ica.spatial)){
anno = ica.spatial$anno;
}else if(!is.null(gff.file)){
gff.format = sub('^.*\\.([^\\.]*$)', '\\1', gff.file)
# anno = read.gff3(gff.file, format=gff.format)
anno = import.gff(gff.file) # 20160502
idx.gene = (anno$type==gene.definition)
anno = anno[idx.gene, ]
anno = sort.intervals(anno)
}else{
stop('Provide ica.spatial or gff.file')
}
# get gene annotation and gene orders
# anno = read.gff3(gff.file, format='gff3')
idx.gene = (anno$type==gene.definition)
anno = anno[idx.gene, ]
anno = sort.intervals(anno)
n = length(anno)
# anno$ID = sub('transcript:', '', anno$ID)
# get clusters and merge by distances
cf = read.csv(cluster.file, header = F, sep='\t', as.is = T, comment.char = '#')
cf = cf[2:nrow(cf),]
n.zeros = sapply(strsplit(cf$V5, '\\.\\.\\.'), length)
cf$V5[n.zeros>1] = paste(0,'*', n.zeros[n.zeros>1], sep='') # simply the matched SM gene notation
gene.ranges = t(sapply(strsplit(cf$V4, ' - '),FUN = 'identity'))
locs = matrix(match(gene.ranges, anno$ID),nrow = nrow(gene.ranges), ncol=2)
gene.ranges = cbind(gene.ranges, cf$V1, cf$V5, sprintf('%.1e', as.numeric(cf$V7)))
gene.ranges = sort.by(gene.ranges, by = locs[,1])
locs = sort.by(locs, by = locs[,1])
nc = nrow(locs)
cluster.ID = cumsum(c(1, locs[2:nc,1]- locs[1:(nc-1),2] - 1 > max.dist.merge))
# cbind(gene.ranges, cluster.ID)
# get gene ranges and create cluster names
s = cbind(by(gene.ranges[,1],INDICES = cluster.ID, FUN = function(x){as.character(x)[1]}),
by(gene.ranges[,2],INDICES = cluster.ID, FUN = function(x){as.character(x)[length(x)]}),
paste(c('UU', 'KN')[((regexpr('\\*', by(gene.ranges[,4],INDICES = cluster.ID, FUN = paste,collapse = '_'))>0)|by(gene.ranges[,4]=='0', INDICES=cluster.ID, FUN=any)) + 1],'p',
by(gene.ranges[,5],INDICES = cluster.ID, FUN = function(x){sprintf('%.0e',min(as.numeric(as.character(x))))}), '_',
by(gene.ranges[,3],INDICES = cluster.ID, FUN = function(x){paste(as.character(x)[unique(c(1,length(x)))],collapse = '_')}),
sep=''))
s = sort.by(s, by = as.numeric(sub(pattern = '.*p([^ _]*)_S.*', replacement = '\\1', s[,3])))
s = sort.by(s, by = sub(pattern = '(.*)p[^ _]*_S.*', replacement = '\\1', s[,3]), decreasing=T)
s[,3] = sub('-0', '_', s[,3])
s[,3] = sub('-', '_', s[,3])
s[,3] = sub('p0e\\+00', 'p0', s[,3])
if (is.null(end.to))
end.to = nrow(s)
# s2d = cluster.deepAnno(ica.spatial = ica.spatial, gene.ranges = s[30:31,], prot.seq=prot.seq, ipr.anno = ipr.anno, out.file = out.file, extra.genes=extra.genes, append=F)
s2d = cluster.deepAnno(ica.spatial = ica.spatial, proteinID = proteinID, gff.file = gff.file, gene.ranges = s, DNA.fasta.file = DNA.fasta.file, prot.fasta.file = prot.fasta.file, iprscan.table.file = iprscan.tab.file, out.file = out.file, extra.genes=extra.genes,
start.from=start.from, end.to=end.to, n.cluster.per.file=n.cluster.per.file, append=F, geneID2cdsID = geneID2cdsID, gene.definition=gene.definition,
RORA.iteration = RORA.iteration, species = species, plotLogo=plotLogo, multialn.method = multialn.method, RORA.topOnly=RORA.topOnly)
invisible(s2d)
}
get.NCBI.blast <- function(query, db = 'nr', no.hit = 100, filter='L', program='blastp'){
require('annotate')
baseUrl <- "http://www.ncbi.nlm.nih.gov/blast/Blast.cgi"
url0 = paste(baseUrl, '?QUERY=',query,'&DATABASE=',db,'&HITLIST_SIZE=',no.hit, '&FILTER=', filter, '&PROGRAM=', program, '&CMD=Put', sep='')
post <- htmlTreeParse(url0, useInternalNodes = TRUE)
x <- post[["string(//comment()[contains(., \"QBlastInfoBegin\")])"]]
rid <- sub(".*RID = ([[:alnum:]]+).*", "\\1", x)
rtoe <- as.integer(sub(".*RTOE = ([[:digit:]]+).*", "\\1", x)) * 10
url1 <- sprintf("%s?RID=%s&FORMAT_TYPE=XML&CMD=Get", baseUrl,
rid)
message("Waiting for NCBI to process the request")
result <- .tryParseResult(url1)
results <- tempfile()
download.file(url1, destfile = results)
return(results)
}
landmark.dist.by <- function(idx.query, idx.landmarks, groups, ...){
# Yong Fuga Li, 20150222
x = as.list(by(1:length(groups), INDICES = groups,
FUN = function(x){d = landmark.dist(idx.query = idx.query[x], idx.landmarks = idx.landmarks[x]);
names(d)=intersect(which(idx.query),x);
d}, simplify=T))
out = c();
for (z in x){
out = c(out, z)
}
out = sort.by(out, as.numeric(names(out)))
return(out)
}
dist.landmark <- landmark.dist <- landmark.distances <- function(idx.query, idx.landmarks, query = NULL, landmarks = NULL, sequences = NULL, method = 'min'){
# idx.query: lenght n logical/indicator vector for the query instances on a sequence of length n
# idx.landmarks: lenght n logical/indicator vector for the landmarks instances on a sequence of length n
# query: queries - a subset from sequences
# landmarks: landmarks - a subset from sequences
# sequences: a character vectors for the elements in a sequences
# compute the distances between a set of query instances and a set of predefined landmarks on a linear sequence
# Yong Fuga Li, 20140820, 20141214
# idx.query = mod$metabolism2nd; idx.landmarks = mod$TF
if (is.logical(idx.query)){
loc.q = t(which(idx.query))
colnames(loc.q) = names(idx.query)[loc.q]
}else{
loc.q = t(idx.query)
}
if (is.logical(idx.landmarks)){
loc.l = t(t(which(idx.landmarks)))
rownames(loc.l) = names(idx.query)[loc.l]
}else{
loc.l = t(t(idx.landmarks))
}
if (length(loc.l)==0)
return(rep(Inf, length(loc.q)))
d = repmat(loc.q, length(loc.l),1) - repmat(loc.l,1,length(loc.q))
d.min = colMin(abs(d))
return(d.min)
}
dist.landmark.which <- landmark.dist.which <- landmark.distances.which <- function(idx.query, idx.landmarks, query = NULL,
landmarks = NULL, sequences = NULL,
method = c('both', 'left', 'right'),
include.equal = T){
# idx.query: lenght n logical/indicator vector for the query instances on a sequence of length n
# idx.landmarks: lenght n logical/indicator vector for the landmarks instances on a sequence of length n
# query: queries - a subset from sequences
# landmarks: landmarks - a subset from sequences
# sequences: a character vectors for the elements in a sequences
# compute the distances between a set of query instances and a set of predefined landmarks on a linear sequence
# Yong Fuga Li, 20141214
# 20141219: add method
# idx.query = mod$metabolism2nd; idx.landmarks = mod$TF
method = match.arg(method);
if (is.logical(idx.query)){
loc.q = t(which(idx.query))
}else{
loc.q = t(idx.query)
}
if (is.logical(idx.landmarks)){
loc.l = t(t(which(idx.landmarks)))
}else{
loc.l = t(t(idx.landmarks))
}
if (length(loc.l)==0)
return(rep(NA, length(loc.q)))
d = repmat(loc.q, length(loc.l),1) - repmat(loc.l,1,length(loc.q))
# d.min = colMin(abs(d))
if (method=='both'){
idx = max.col(-t(abs(d)))
}else if (method == 'left'){
if (include.equal){
d[d<0] = Inf;
}else{
d[d<=0] =Inf;
}
idx = max.col(-t(d))
}else if (method =='right'){
if (include.equal){
d[d>0] = -Inf;
}else{
d[d>=0] = -Inf;
}
idx = max.col(t(d))
}
if (is.logical(idx.landmarks)){ # change to the original index if input is indicator vector, 20141218
idx = loc.l[idx]
}
return(idx)
}
blast2profile.DD.P <- blast2profile.tblastx <- function(blast.xml = 'AN8428_blastx_nr.xml', query.file = 'AN8428.fasta', db = 'nr', by=c('query','db', 'align'),
root = '/Users/yongli/Universe/write/Project_Current/t.NPbioinformatics/Nidulans.SlidingWindow/AutoAnno'){
## obtain the consensus sequence based on blastx
## Yong Fuga Li
## 20140916
setwd(root)
}
Sys.setenv(WISECONFIGDIR='/Users/yongli/Universe/bin/wise2.4.1/wisecfg/')
veriGene <- blast2profile.DP.P <- blast2profile.blastx <- function(blast.xml = 'AN8428_blastx_nr.xml',
query.file = 'AN8428.fasta',
db = 'nr', Evalue = 0.1,
by=c('query','hits', 'align'),
max.seq = 100,
tag = sub('.xml', '', blast.xml),
root = '/Users/yongli/Universe/write/Project_Current/t.NPbioinformatics/Nidulans.SlidingWindow/AutoAnno'){
## obtain the consensus sequence based on blastx
## Yong Fuga Li
## 20140916
require(annotate)
require(Biostrings)
require(lattice)
if (is.null(tag)){
tag = sub('.xml', '', blast.xml)
}
setwd(root)
querySeq = read.fasta(fasta.file = query.file, type = 'DNA')
# parse blast results
bl = blast.xml.parse(blast.xml = blast.xml)
bl.f = blast.filter(bl, Evalue = Evalue)
# retrieve hit sequences from GenBank
gi = sub('gi\\|([^\\|]*)\\|.*', '\\1', bl.f$hit$Hit_def, perl = T)
n.seq = min(length(gi), max.seq)
hitSeq = getSEQS(gi[1:n.seq])
# multiple alignment & HMM bulding
Evalue.string = sub('\\.', '_', paste(Evalue))
fa.file = paste(tag, '_hits', n.seq, '_E', Evalue.string, '.fasta', sep='')
aln.file = paste(tag, '_hits', n.seq, '_E', Evalue.string, '_aln.fasta', sep='')
hmm.file = paste(tag, '_hits', n.seq, '_E', Evalue.string, '.hmm', sep='')
hmm2.file = paste(tag, '_hits', n.seq, '_E', Evalue.string, '.hmm2', sep='')
genewise.gff.file = paste(tag, '_hits', n.seq, '_E', Evalue.string, '_wise.gff', sep='')
genewise.fasta.file = paste(tag, '_hits', n.seq, '_E', Evalue.string, '_wise.fasta', sep='')
blastx2.asn.file = paste(tag, '_hits', n.seq, '_E', Evalue.string, '_wise.asn', sep='')
blastx2.xml.file = paste(tag, '_hits', n.seq, '_E', Evalue.string, '_wise.xml', sep='')
genomescan.fasta.file = paste(tag, '_hits', n.seq, '_E', Evalue.string, '_GenomeScan_Vertibrate.fasta', sep='')
blastx3.asn.file = paste(tag, '_hits', n.seq, '_E', Evalue.string, '_GenomeScan_Vertibrate.asn', sep='')
blastx3.xml.file = paste(tag, '_hits', n.seq, '_E', Evalue.string, '_GenomeScan_Vertibrate.xml', sep='')
logo.file = paste(tag, '_hits', n.seq, '_E', Evalue.string, '_aln_Logo.pdf', sep='')
write.fasta(hitSeq[,'seq'], out.file = fa.file)
# system(paste('muscle -in', fa.file, '-out', aln.file))
system(paste('linsi ', fa.file, '>', aln.file))
system(paste('hmmbuild', hmm.file, aln.file), ignore.stdout=T, ignore.stderr = T,intern = F)
system(paste('hmmconvert -2', hmm.file, '>', hmm2.file))
### genewise
system(paste('genewise', hmm2.file, query.file, '-hmmer -both -pep -divide \'\' > ', genewise.fasta.file), ignore.stderr = T,intern = F)
system(paste('genewise', hmm2.file, query.file, '-hmmer -both -gff -divide \'\' > ', genewise.gff.file), ignore.stderr = T,intern = F)
# system('fasttree hits_aln.fasta > hits.tree')
if (file.info(genewise.fasta.file)$size > 5){
system(paste('blastx -subject', genewise.fasta.file, '-query', query.file, '-outfmt 11 -out', blastx2.asn.file, '-evalue 1'))
system(paste('blast_formatter -archive', blastx2.asn.file, '-outfmt 5 -out', blastx2.xml.file))
}
# run genome scan
system(paste('blastx -subject', genomescan.fasta.file, '-query', query.file, '-outfmt 11 -out', blastx3.asn.file, '-evalue 1'))
system(paste('blast_formatter -archive', blastx3.asn.file, '-outfmt 5 -out', blastx3.xml.file))
# profile model
aln = readAAMultipleAlignment(aln.file)
aln.m = maskGaps(aln)
prof = consensusMatrix(aln)
prof.m = consensusMatrix(aln.m)
pdf(logo.file, 10,6)
print(seqLogo2(prof))
print(seqLogo2(prof.m))
dev.off()
#
# consensusString(aln.m)
# consensusViews(aln.m)
}
blast2profile.DD.D <- blast2profile.blastn <- function(blast.xml = 'AN8428_blastx_nr.xml', query.file = 'AN8428.fasta', db = 'nr', by=c('query','db', 'align')){
## obtain the consensus sequence based on blastx
## Yong Fuga Li
## 20140916
}
proMap.hmm <- function(){ # use pHMM to align, score, and generate hints
# 20141215
# cluster all hits by genomic locations
# MAS of hits in each cluster
# build pHMM models: hmmbuild -O modifiedMSA.txt cS818AN8444.hmm cS818_augoNovoAN8444.t3.aln
# align query gene models to the pHMM of the same genomic location: hmmsearch --tblout AN8444.hmmtblout.txt cS818AN8444.hmm AN8444.fa > AN8444.hmm.aln
# score whole protein, and protein part to generate hints
}
proMap <- blastp2profile <- blast2profile.PP <- function(blast.asn.file = 'cAfu3g01400_Afu3g01480_swissprot.asn',
query.gff.file = 'cAfu3g01400_Afu3g01480.gff',
query.faa.file = 'cAfu3g01400_Afu3g01480.fasta',
DNA.fasta.file = 'cAfu3g01400_Afu3g01480DNA_subseq.fasta',
geneID2cdsID=function(x){paste(x, '-P', sep='')},
remove.identical.hits =T,
tag = sub('.asn', '', blast.asn.file),
hints.gff = paste(tag, 'proMap.hints.gff'),
multialn.method = multialn.method,
plotLogo = T, plotLogo.noInDel = F,
plot.width = 50, iteration = '',
db = '/Users/yongli/Universe/data/blastdb/swissprot.fasta',
by=c('query','db', 'align'),
aln.maxiters = 4 # previously 2, now 4, 20160818, ref: http://www.drive5.com/muscle/manual/compromise.html
){
## obtain the consensus sequence based on blastp
## output: visualization, protein scoring (conservation score, aln score, mutation score, ins score, del score), consensus protein fasta, intron hints gff
## Yong Fuga Li
## 20140916,
## 201412-20141213
## version 3, 20160618, add start, stop, intergenic region evidences
require(annotate)
require(Biostrings)
require(lattice)
require(ggplot2);
no.top.hits2 = 100000L
require(gridExtra)
if (is.null(tag))
tag = sub('.asn', '', blast.asn.file) ;
# get CDS and peptides
qSeq = read.fasta(fasta.files = query.faa.file, type = 'AA')
qCDSSeq = get.CDS(gene.IDs = rownames(qSeq),
gff.file = query.gff.file, DNA.fasta.file = DNA.fasta.file,
geneID2cdsID=geneID2cdsID)
for (j in rownames(qCDSSeq)){
pep = as.character(translate(DNAString(as.character(qCDSSeq[j,1])),if.fuzzy.codon = 'X'));
if (gsub('\\*', '', pep) != gsub('\\*', '', qSeq[j,'seq'])){
cat(pep, '\n')
cat(qSeq[j, 'seq'], '\n')
warning('CDS translation dose not match protein sequences')
}
}
exon.sizes = sapply(qCDSSeq$exon.sizes, FUN = function(x)as.numeric(unlist(strsplit(as.character(x), ','))))
names(exon.sizes) = rownames(qCDSSeq)
#### parse blast results
system(paste('blast_formatter -archive', blast.asn.file, '-outfmt \'6 qseqid sseqid evalue pident gaps qstart qend\' -out hits.txt -max_target_seqs ', no.top.hits2)) # 20160505
# system(paste('blast_formatter -archive', blast.asn.file, '-outfmt \'6 qseqid sseqid evalue\' -out hits.txt -max_target_seqs ', no.top.hits2))
hits = read.table('hits.txt', header = F, sep = '\t', as.is=T)
if (remove.identical.hits){ # remove the blast hits that is identical to the query,
hits = hits[!(hits[[4]]==100 & hits[[5]]==0),]
}
# extract all proteins once
write(as.character(hits[[2]]), 'hitsList.txt');
system(paste('formatting.pl -idlist hitsList.txt -input ', db, ' -o hits.fasta', sep=''));
hits = by(hits[[2]], INDICES = hits[[1]], identity)
nq = nrow(qSeq)
# evidence.proteins = data.frame(cSeq=vector('character', length = nq), # protein level evidences
# score = vector('numeric', length = nq), stringsAsFactors=F)
# rownames(evidence.proteins) = rownames(qSeq);
# evidence.introns = data.frame(start=c(), end=c(), coverage=c(), score=c()); # intron level evidences
scores.all = list();
for (qID in rownames(qCDSSeq)){ # extract hits for invididual proteins
if (qID == 'gene_7282~gene_7283'){
cat('Here it is')
print(1)
cat('Here it is')
}
if (!(qID %in% names(hits))){ # proteins without swissprot htis # 20141215
scores = list(hmm.global.score=NA,
hmm.Evalue = NA,
global.score = NA,
local.score = NA,
match.score = NA,
mean.coverage = 0,
total.coverage = 0,
nSeq = '',
cSeq = '',
cSeq.long = '',
nSeq.naive = '',
all = NA,
new.intron=NA,
intron=NA,
new.exon = NA,
nHits = 0,
exon=c(), mutation=c())
scores$CDS = qCDSSeq[qID, ];
scores.all[[qID]] = scores;
next
}
write(as.character(hits[[qID]]), 'hits1q.txt');
system(paste('formatting.pl -idlist hits1q.txt -input hits.fasta -o hits1q.fasta', sep=''));
qSeq1 = qSeq[qID,'seq']; qIDd = paste(qID, as.character(qCDSSeq[qID,'exon.sizes']), sep='_'); names(qSeq1) = qIDd;
write.fasta(qSeq1, out.file = 'hits1q.fasta', append=T)
if (!file.exists(paste(tag, iteration, qID, '.aln', sep=''))){
cat('Aligning ', qID, '\n')
if (multialn.method=='muscle'){
system(paste(multialn.method, ' -maxiters ', aln.maxiters, ' -quiet -in hits1q.fasta > ', tag, iteration, qID, '.aln', sep=''))
}else if (multialn.method=='mafft'){
# system(paste('linsi --thread 6 hits1q.fasta > ', tag, iteration, qID, '.aln', sep=''))
system(paste('mafft --auto --thread 6 hits1q.fasta > ', tag, iteration, qID, '.aln', sep=''))
}else{
stop('Unsupported alignment method')
}
}else{
cat('Using existing file ', paste(tag, iteration, qID, '.aln', sep=''),'\n')
}
# profile model
aln = readAAMultipleAlignment(paste(tag, iteration, qID, '.aln', sep=''), 'fasta')
idx = names(aln@unmasked) != qIDd
aln.m = tryCatch(maskGaps(aln), error = function(e){aln}, finally = NULL) # 20160806
# aln.m = maskGaps(aln)
prof = consensusMatrix(as.matrix(aln)[idx,,drop=F])
prof.m = consensusMatrix(as.matrix(aln.m)[idx,,drop=F])
# exon locations on the aligned sequence
idx.alned = strsplit(as.character(aln@unmasked[[qIDd]]), '')[[1]] != '-'
aa.loc.alned = cumsum(idx.alned)
intron.loc = cumsum(exon.sizes[[qID]])/3; intron.loc=intron.loc[seq2(1,(length(intron.loc)-1),1)]
intron.loc.alned = approx(aa.loc.alned, 1:length(aa.loc.alned), xout = intron.loc, method = 'linear')$y
nrows = ceiling(length(aa.loc.alned)/plot.width);
if (qID == 'g6901.t1'){
1
}
## scoring and proposing new sequence
# if (qID == 'Afu3g01440'){
# 1
# }
scores = score.CDS(aln, qIDd = qIDd, intron.locs = intron.loc)
s = get.hmmer.global.score(aln, qIDd, paste(tag, iteration, sep='')); scores$hmm.global.score = s[2]; scores$hmm.Evalue = s[1];
scores$CDS = qCDSSeq[qID, ];
scores.all[[qID]] = scores;
### visualization
pdf.out.file = paste(tag, iteration, qID, 'aln.pdf', sep='_');
if (plotLogo & !file.exists(pdf.out.file)){
pdf(pdf.out.file, 14, 8/4*nrows)
seqLogo2(prof, intron.locs = intron.loc.alned, qSeq=as.character(aln@unmasked[[qIDd]]), scores = scores$all[,c('IC', 'global.score', 'coverage')], width = plot.width)
if (plotLogo.noInDel){
grid.newpage()
seqLogo2(prof[,idx.alned], intron.locs = intron.loc, qSeq = qSeq1, scores = scores$all[idx.alned,c('IC', 'global.score', 'coverage')], width = plot.width)
}
# print(seqLogo2(prof.m) + geom_vline(xintercept = inron.loc.alned+0.5))
dev.off()
}
# write evidence file
# write.evidence(scores, )
# intron coverage and hint file
# evidence.introns = c();
# # overall protein scores
# evidence.proteins[qID, 'global.score'] = sum(scores$global.score.raw)
# evidence.proteins[qID, 'local.score'] = sum(scores$loca.scorel)
# evidence.proteins[qID, 'match.score'] = sum(scores$match.score)
# evidence.proteins[qID, 'cSeq'] = cSeq # consensus sequence
######## write hints.gff
# hints.gff
}
return(scores.all)
# return(list(proteins=evidence.proteins, exons=evidence.exons, introns=evidence.introns))
# system(paste('hmmbuild', hmm.file, aln.file), ignore.stdout=T, ignore.stderr = T,intern = F)
# system(paste('hmmconvert -2', hmm.file, '>', hmm2.file))
### blastx on the consensus seq (not including the query)
### exonerate (instead of genewise) on the consensus seq
# system(paste('genewise', hmm2.file, query.file, '-hmmer -both -pep -divide \'\' > ', genewise.fasta.file), ignore.stderr = T,intern = F)
# system(paste('genewise', hmm2.file, query.file, '-hmmer -both -gff -divide \'\' > ', genewise.gff.file), ignore.stderr = T,intern = F)
}
get.hmmer.global.score <- function(aln, qIDd, tag){
# compute gene - hmm alignment global score
# Yong Fuga Li, 20141219
hits.file = paste(tag, qIDd, '_hits.faa', sep = '');
q.file = paste(tag, qIDd, '.faa', sep = '');
hmm.file = paste(tag, qIDd, '_hits.hmm', sep = '');
hmmtblout.file = paste(tag, qIDd, '_hmmsearch.tab', sep = '');
hmmout.file = paste(tag, qIDd, '_hmmsearch.out', sep = '');
idx = qIDd!=rownames(aln);
aln.mat = as.matrix(aln)
aln.seq = paste.row(aln.mat[idx, colSums(aln.mat[idx,,drop=F]!='-')!=0,drop=F], collapse=''); # get hits alignment
qSeq = paste(aln.mat[qIDd, aln.mat[qIDd,]!= '-'], collapse =''); # get query seq
names(qSeq) = qIDd
write.fasta(aln.seq, hits.file)
write.fasta(qSeq, q.file)
# build pHMM models: hmmbuild -O modifiedMSA.txt cS818AN8444.hmm cS818_augoNovoAN8444.t3.aln
system(paste('hmmbuild ', hmm.file, hits.file))
# align query gene models to the pHMM of the same genomic location: hmmsearch --tblout AN8444.hmmtblout.txt cS818AN8444.hmm AN8444.fa > AN8444.hmm.aln
system(paste('hmmsearch --tblout ', hmmtblout.file, hmm.file, q.file, '> ', hmmout.file))
temp = tryCatch(read.table(hmmtblout.file, header = F), error = function(e){rbind(c(0,0,0,0,Inf,0))}, finally = NULL)
if (nrow(temp)>1)
stop('more than one row in hmmsearch out')
return(c('Evalue'=temp[1,5],'score'=temp[1,6]))
}
score.CDS <- function(aln, qIDd = qIDd, intron.locs = intron.loc, junction.window = 3,
query.weighted.consensus = F, # used query weighted consensus sequence when proposing new exons
new.intron.min.size=5,
new.intron.min.junction.coverage=0.3,
new.intron.min.junction.score=0,
new.exon.min.size = 5,
new.exon.old.intron.max.junction.coverage=0.8,
new.exon.old.intron.max.junction.score=1,
new.exon.old.intron.max.distance = 50,
new.exon.min.coverage.stringent = 0.8,
new.exon.old.intron.overhaning.exon.maxscore =0,
new.exon.min.coverage=0.3){
# Yong Fuga Li
# 20141212-14
# scoring a predicted protein against a set of known proteins (conservation score, aln score, mutation score, ins score, del score)
# names(aln@unmasked)
## version 3, 20160618, add start, stop, intergenic region evidences
idx.alned = strsplit(as.character(aln@unmasked[[qIDd]]), '')[[1]] != '-' # query aligned locations
aa.loc.alned = cumsum(idx.alned)
intron.locs = intron.locs #, max(aa.loc.alned)
intron.loc.alned = approx(aa.loc.alned, 1:length(aa.loc.alned), xout = intron.locs, method = 'linear')$y
idx = qIDd!=rownames(aln)
prof = consensusMatrix(as.matrix(aln)[idx,,drop=F])
if (!('-' %in% rownames(prof))){# fix a bug for alignment without any gaps, 20141223
prof=rbind('-'=0, prof);
}
scores = data.frame(IC = pwm2ic(prof,pseudocount = 1)$IC,
coverage = colSums(prof[!(rownames(prof) %in% c('-', '#')),])) # scores along alignment
scores.intron = data.frame() # scores for the introns
scores.new.intron = data.frame() # scores for proposed new introns
scores.exon = data.frame() # scores for the exons
scores.start = data.frame() # scores for the start codon
scores.stop = data.frame() # scores for the stop codon
scores.irpart = data.frame() # scores for the intergenic region part
# calculate alignment score between the query sequences and the rest of tha alignment
aln = as.matrix(aln)
aln.local = aln; # local alignment
for (j in 1:nrow(aln.local)){
tt = cumsum(aln.local[j,] != '-')
aln.local[j,tt == 0 | tt == max(tt)] = '*'
}
if (exists('BLOSUM62', mode = "matrix"))
remove('BLOSUM62');
data(BLOSUM62)
BLOSUM62 = cbind(rbind(BLOSUM62, '-' = -4), '-' = -4); BLOSUM62['-', '-'] = 0
BLOSUM62['*',] = 0; BLOSUM62[,'*'] = 0; # N or C term indels
BLOSUM62.noIndel = BLOSUM62;
BLOSUM62.noIndel['-',] = 0; BLOSUM62.noIndel[,'-'] = 0; # N or C term indels
score.mat = mat2xyz(BLOSUM62, sym=F); aapairs = paste(score.mat[,1], score.mat[,2], sep='')
score.mat = score.mat[,3]; names(score.mat) = aapairs # scoring matrix to vector
score.mat.noIndel = mat2xyz(BLOSUM62.noIndel, sym = F); aapairs = paste(score.mat.noIndel[,1], score.mat.noIndel[,2], sep='')
score.mat.noIndel = score.mat.noIndel[,3]; names(score.mat.noIndel) = aapairs # scoring matrix to vector
scores$global.score = 0; scores$local.score = 0; scores$match.score = 0
for (i in which(idx)){
scores$global.score = scores$global.score + score.mat[paste(aln[i,], aln[qIDd,], sep='')]
scores$local.score = scores$local.score + score.mat[paste(aln.local[i,], aln.local[qIDd,], sep='')]
scores$match.score = scores$match.score + score.mat.noIndel[paste(aln[i,], aln[qIDd,], sep='')]
}
nHits = nrow(aln)-1
scores$global.score.raw = scores$global.score
scores$global.score = (scores$global.score/(nrow(aln)-1))/max(abs(score.mat))
scores$IC = scores$IC/max(scores$IC)
scores$coverage = (scores$coverage)/nHits
scores.intron = data.frame(locs = intron.locs, locs.alned = intron.loc.alned,
# coverage = windowMeans(scores$coverage[idx.alned], locs=intron.locs, window.size=2), # alignment evidences at the splicing sites
coverage = site.coverage(aln, qIDd, intron.loc.alned, p.indel = 0.5, normalize = T),
# match.score = windowMeans(scores$match.score[idx.alned], locs=intron.locs, window.size=2)) # alignment evidences at the splicing sites
match.score = windowMeans(scores$match.score, locs=intron.loc.alned, window.size=2)) # 20141218 alignment evidences at the splicing sites
##### wrong exon, new intron: old exon coverage = 0, new intron coverage > 30%, match.score > 0
coverage = round(scores$coverage[idx.alned]);
scores.new.intron = data.frame(start = which(coverage == 0 & diff(c(Inf, coverage) !=0)),
end = which(coverage == 0 & diff(c(coverage, Inf) !=0)));
idx.exon.new = coverage!=0
intron.locs.new = cumsum(idx.exon.new)[scores.new.intron$start]
scores.new.intron = cbind(scores.new.intron,
coverage = windowMeans((scores$coverage[idx.alned])[idx.exon.new], locs=intron.locs.new, window.size=2), # alignment evidences at the splicing sites
match.score = windowMeans((scores$match.score[idx.alned])[idx.exon.new], locs=intron.locs.new, window.size=2)) # alignment evidences at the splicing sites
scores.new.intron = scores.new.intron[scores.new.intron$end-scores.new.intron$start+1 >= new.intron.min.size &
scores.new.intron$coverage >= new.intron.min.junction.coverage &
scores.new.intron$match.score >= new.intron.min.junction.score,]
# scores$splice.after = # is a splicing site between this aa and the 3' aa
# scores$mutation = # is the aa location likely containing a mutation
####### wrong intron, new exon: gap in query sequence, new intron coverage < 30%, match.score < 0
cSeq.with.Ins = rownames(prof)[max.col(t(prof), ties.method = 'first')]
prof['-',prof['-',]!=nHits] = 0; # consensus target sequence
cSeq = rownames(prof)[max.col(t(prof), ties.method = 'first')]
nSeq.long <- nSeq <- aln[qIDd,]
is.potential.new.exon = scores$coverage > new.exon.min.coverage & nSeq == '-';
scores.new.exon = data.frame(start = which(is.potential.new.exon & diff(c(Inf, is.potential.new.exon*1)) !=0),
end = which(is.potential.new.exon & diff(c(is.potential.new.exon*1, Inf)) !=0));
scores.new.exon$dist.to.old.junctions = landmark.dist((scores.new.exon$start+scores.new.exon$end)/2, intron.loc.alned, ncol)
scores.new.exon$mean.coverage = sapply(seq2(1,nrow(scores.new.exon),1), function(x){mean(scores$coverage[scores.new.exon$start[x]:scores.new.exon$end[x]])})
# intron.locs.new = cumsum(!is.potential.new.exon)[scores.new.exon$start]
idx.matched.introns = landmark.dist.which((scores.new.exon$start+scores.new.exon$end)/2, intron.loc.alned) # index of the nearest introns
if (length(intron.loc.alned)){
scores.new.exon = cbind(scores.new.exon,
nearest.intron.locs = scores.intron[idx.matched.introns,'locs'],
nearest.intron.junction.coverage = scores.intron[idx.matched.introns,'coverage'],
nearest.intron.junction.score = scores.intron[idx.matched.introns,'match.score'])
}else if (nrow(scores.new.exon)){ # intron less cases
scores.new.exon = cbind(scores.new.exon,
nearest.intron.locs = Inf,
nearest.intron.junction.coverage = 1,
nearest.intron.junction.score = Inf)
}else{
scores.new.exon = cbind(scores.new.exon,
nearest.intron.locs = vector('numeric',0),
nearest.intron.junction.coverage = vector('numeric',0),
nearest.intron.junction.score = vector('numeric',0))
}
scores.new.exon = scores.new.exon[((scores.new.exon$nearest.intron.junction.coverage <= new.exon.old.intron.max.junction.coverage &
scores.new.exon$nearest.intron.junction.score <= new.exon.old.intron.max.junction.score) |
scores.new.exon$mean.coverage >= new.exon.min.coverage.stringent &
nHits >=3) & scores.new.exon$dist.to.old.junctions <= new.exon.old.intron.max.distance &
scores.new.exon$end-scores.new.exon$start + 1 >=new.exon.min.size,]
###### propose new protein sequence
idx.new.intron = unlist(sapply(seq2(1, nrow(scores.new.intron),1), function(x){scores.new.intron$start[x]:scores.new.intron$end[x]}))
idx.new.exon = unlist(sapply(seq2(1, nrow(scores.new.exon),1), function(x){scores.new.exon$start[x]:scores.new.exon$end[x]}))
#modify the new sequence based on the new introns and new exons
cat(nrow(scores.new.intron), ' new introns for', qIDd, '\n')
cat(nrow(scores.new.exon), ' new exons for', qIDd, '\n')
nSeq[which(idx.alned)[idx.new.intron]] = '-'
nSeq[idx.new.exon] = cSeq[idx.new.exon]
rbind(cSeq, nSeq)
# scores$seq = aln[qIDd,]
to.change = nSeq.long == '-' & cSeq.with.Ins !='-'
nSeq.long[to.change] = cSeq.with.Ins[to.change]
# scores.intron$coverage = scores.intron$coverage*nHits
return(list(global.score = sum(scores$global.score.raw),
local.score = sum(scores$local.score),
match.score = sum(scores$match.score),
total.coverage = sum(scores$coverage[aln[qIDd,]!='-']),
mean.coverage = mean(scores$coverage[aln[qIDd,]!='-']),
nSeq = paste(nSeq[nSeq!='-'], collapse =''),
cSeq = paste(cSeq.with.Ins[cSeq.with.Ins!='-'], collapse=''),
cSeq.long = paste(cSeq[cSeq!='-'], collapse =''), nSeq.naive = paste(nSeq.long[nSeq.long!='-'], collapse =''),
all = scores,
new.intron=scores.new.intron,
intron=scores.intron,
new.exon = scores.new.exon,
nHits = nHits,
exon=c(), mutation=c()))
# qSeq = aln@unmasked[[qIDd]]
}
site.coverage <- function(aln, qIDd, intron.loc.alned, p.indel = 0.5, normalize=T){
# 20141219, Yong Fuga Li
if (is.null(intron.loc.alned) || !length(intron.loc.alned))
return(c())
delta = 1E-10
idx = qIDd!=rownames(aln)
qSeq = (aln[qIDd,] != '-'); r = range(which(qSeq)); # 20141231, padding 2 on both sides to indicate the end of sequences
qSeq = c(2, (aln[qIDd,] != '-') * 1,2); qSeq[c(r[1]-1, r[2]+1)] = 2; # 20141231, padding 2 on both sides to indicate the end of sequences
sAln = cbind(0, (aln[idx,,drop=F] != '-')*1, 0) # 20141231, padding 1 on both sides to indicate the end of sequences
inDels = t(qSeq != t(sAln))*1;
#DelDels = t((1-qSeq) * t(1-sAln))
matches = t(qSeq * t(sAln))
coverage = 0
for (i in 1:nrow(sAln)){
# x.R = landmark.dist.which(intron.loc.alned, inDels[i,]>0, method = 'right', include.equal = F)-1
x.R = landmark.dist.which(intron.loc.alned+1, inDels[i,]>0, method = 'right', include.equal = F)-1 # 20141231, adjucting for the 1 padded to the ends - padding 1 on both sides to indicate the end of sequences
# d.R = x.R -intron.loc.alned;
# d.R[d.R<0] = 0
# d.R = d.R - windowSums(DelDels[i,], locs.s = intron.loc.alned, locs.e = x.R+1)
# d.R = windowSums(matches[i,], locs.s = intron.loc.alned, locs.e = x.R+1)
# x.L = landmark.dist.which(intron.loc.alned, inDels[i,2:ncol(inDels)]>0, method = 'left', include.equal = F)+1
d.R = windowSums(matches[i,], locs.s = intron.loc.alned + 1, locs.e = x.R+1) # 20141231
x.L = landmark.dist.which(intron.loc.alned+1, inDels[i,]>0, method = 'left', include.equal = F)+1 # 20141231,
# d.L = intron.loc.alned - x.L;
# d.L[d.L<0] = 0
# d.L = d.L - windowSums(DelDels[i,], locs.s = x.L-1, locs.e = intron.loc.alned)
d.L = windowSums(matches[i,], locs.s = x.L-1, locs.e = intron.loc.alned+1)
# rbind(intron.loc.alned, landmark.dist.which(intron.loc.alned+delta, inDels[i,]>0, method = 'left'))
coverage = coverage + (1-(1-p.indel)^d.R)*(1-(1-p.indel)^d.L)
}
if (normalize)
coverage = coverage/(nrow(aln)-1)
return(coverage)
}
write.proMap <- function(pMap, score.file = paste('pMap',tag, '.xls', sep=''), nSeq.file = paste('pMap_nSeq',tag, '.faa', sep=''),
nSeq.naive.file = paste('pMap_nSeqNaive',tag, '.faa', sep=''),
cSeq.long.file = paste('pMap_cSeqLong',tag, '.faa', sep=''), tag='', iteration = 0, append = T){
# write proMap output to files
# Yong Fuga Li, 20141214
## protein scores
scores = c()
seqs = c()
for (p in names(pMap)){
scores = rbind(scores, cbind(pMap[[p]]$CDS, pHMM.score = pMap[[p]]$hmm.global.score, pHMM.Evalue = pMap[[p]]$hmm.Evalue,
match=pMap[[p]]$match.score, total.coverage=pMap[[p]]$total.coverage, mean.coverage=pMap[[p]]$mean.coverage,
local=pMap[[p]]$local.score, global=pMap[[p]]$global.score, iteration=iteration))
seqs = rbind(seqs, c(nSeq = pMap[[p]]$nSeq, cSeq.long=pMap[[p]]$cSeq.long, nSeq.naive = pMap[[p]]$nSeq.naive))
}
scores$prot_len = nchar(as.character(scores$seq))/3
rownames(scores) <- rownames(seqs) <- names(pMap)
rownames(scores)[scores[,'iteration']!=''] <- paste(names(pMap), scores[,'iteration'], sep='.')[scores[,'iteration']!=''];
if (append){
write.table(scores, append = append, file = score.file, quote = F, sep = '\t', row.names = T, col.names = F)
}else{
write.table(scores, append = append, file = score.file, quote = F, sep = '\t', row.names = T, col.names = NA)
}
write.fasta(seqs[nchar(seqs[,'nSeq'])>0,'nSeq'], append = F, out.file = nSeq.file)
write.fasta(seqs[nchar(seqs[,'nSeq.naive'])>0,'nSeq.naive'], append = F, out.file = nSeq.naive.file)
write.fasta(seqs[nchar(seqs[,'cSeq.long'])>0,'cSeq.long'], append = F, out.file = cSeq.long.file)
## proposed protein sequences to fasta format
return(score.file)
}
blast2profile.PD.P <- blast2profile.tblastn <- function(blast.xml = 'AN8428_blastx_nr.xml', query.file = 'AN8428.fasta', db = 'nr', by=c('query','db', 'align')){
## obtain the consensus sequence based on blastx
## Yong Fuga Li
## 20140916
}
cluster.success.rate <- function(n = 6, alpha.g=0.78, # gene level success rate
par = list(beta.neg = 0.7, # false genes called error
beta.pos=0.1, # true genes called error
gamma.neg=0.25, # make correction among called error from false genes
gamma.pos=0.25, # make correction among called error from true genes
delta=0.8)){ # success rate of correction (only for called errors from false genes)
# 20140917, success rate in the rescue or abandon approach
require(gridExtra)
alpha.c = alpha.g^n # cluster level success rate
abandon.g = (1-alpha.g) * par$beta.neg * (1-par$gamma.neg) + alpha.g * par$beta.pos * (1-par$gamma.pos)
success.g = alpha.g * (1-par$beta.pos) + (1-alpha.g) * par$beta.neg * par$gamma.neg * par$delta
fail.g = (1-alpha.g) * (1 - par$beta.neg) + (1-alpha.g) * par$beta.neg * par$gamma.neg * (1-par$delta) +
alpha.g * par$beta.pos * par$gamma.pos
p = c(abandon = abandon.g, success=success.g, fail=fail.g)
abandon.c = 1-(1-abandon.g)^n # % of clusters abandoned
alpha.g.wRorA = p[2]/(p[2]+p[3])
alpha.c.wRorA = alpha.g.wRorA ^ n
out = data.frame(success.rate.regular = alpha.c, success.rate.RORA = alpha.c.wRorA, success.rate.Abandon = alpha.c.wRorA,
percent.abandon.RORA = abandon.c, percent.abandon.Abandon = abandon.c,
success.rate.gene = alpha.g, success.rate.gene.RORA = alpha.g.wRorA, success.rate.gene.Abandon = alpha.g.wRorA,
row.names = n)
### only abandon, without rescues
par$gamma.neg <- par$gamma.pos <- 0
abandon.g = (1-alpha.g) * par$beta.neg * (1-par$gamma.neg) + alpha.g * par$beta.pos * (1-par$gamma.neg)
success.g = alpha.g * (1-par$beta.pos) + (1-alpha.g) * par$beta.neg * par$gamma.neg * par$delta
fail.g = (1-alpha.g) * (1 - par$beta.neg) + (1-alpha.g) * par$beta.neg * par$gamma.neg * (1-par$delta) +
alpha.g * par$beta.pos * par$gamma.pos
p = c(abandon = abandon.g, success=success.g, fail=fail.g)
abandon.c = 1-(1-abandon.g)^n # % of clusters abandoned
alpha.g.wRorA = p[2]/(p[2]+p[3])
alpha.c.wRorA = alpha.g.wRorA ^ n
out$success.rate.Abandon = alpha.c.wRorA;
out$percent.abandon.Abandon = abandon.c;
out$success.rate.gene.Abandon = alpha.g.wRorA;
out1 = out[1:3]; names(out1) = c('regular', 'RORA', 'RORA\nno rescue');
xyz = mat2xyz(as.matrix(out1), sym=F)
q1 = theme.ggplot(ggplot(xyz, aes(x=y, y = z*100, fill = x)) + geom_bar(stat = 'identity', position = 'dodge') + xlab('') + ylab('Cluster success%') +
labs(fill='Cluster Size')) + theme(axis.text.x=element_text(angle=0))
out2 = out[4:5]; names(out2) = c('RORA', 'RORA\nno rescue');
xyz = mat2xyz(as.matrix(out2), sym=F)
q2 = theme.ggplot(ggplot(xyz, aes(x=y, y = z*100, fill = x)) + geom_bar(stat = 'identity', position = 'dodge') + xlab('') + ylab('Cluster Abandoned%') +
labs(fill='Cluster Size'), legend.position = 'none') + theme(axis.text.x=element_text(angle=0))
grid.arrange(q1, q2, ncol=2, widths = c(3,2))
return(out)
}
xlsx.color.NPGC <- color.NPGC.xlsx <- function(xlsx.file = 'nidulans.deepAnno.all.xlsx', out.file=paste('colored_', xlsx.file, sep='')){
# Yong Fuga Li, 20141004
write('1. Typically there are 5 extra genes included on each side of a cluster. Below explains the coloring scheme used in the deepAnnotation tables.\n', file = 'readme_deepAnno.txt', append = F)
write('2. Black box - cluster boundary: expression > 9 or CS < 0.5\n', file = 'readme_deepAnno.txt', append = T)
xlsx.color(xlsx.file = xlsx.file, FUN.select = FUN.select.boundary, border.color = 'black', out.file = out.file, na.strings='|')
write('3. Green fill - promising expression feature (CS column): expression clustering coefficient > 3\n', file = 'readme_deepAnno.txt', append = T)
xlsx.color(xlsx.file = out.file, FUN.select = FUN.select.promising, fill.color = 'green', out.file = out.file, na.strings='|')
write('4. Green box - promising: oxidoreductase (oxidoreductase|P450|oxidase|dehydrogenase|oxygenase|reductase), \n', file = 'readme_deepAnno.txt', append = T)
xlsx.color(xlsx.file = out.file, FUN.select = FUN.select.oxidoreductase, border.color = 'green', out.file = out.file, na.strings='|')
write('5. Green fill - promising protein function (domains/annotation/top.5.hits columns): (anabolism) transferase, synthase, synthetase, ligase, \n', file = 'readme_deepAnno.txt', append = T)
xlsx.color(xlsx.file = out.file, FUN.select = FUN.select.catabolism, fill.color = 'green', out.file = out.file, na.strings='|')
write('6. Blue fill - special: llm, laeA, molybdenum containing, \n', file = 'readme_deepAnno.txt', append = T)
xlsx.color(xlsx.file = out.file, FUN.select = FUN.select.special, fill.color = 'light blue', out.file = out.file, na.strings='|')
write('7. Purple box - interesting: or length > 800 aa, homology with swissprot proteins high (>75%) or low (<25%), polyketide or alkaloid or terpenoid or terpene or nonribosomal peptide mentioned in domain annotations, swissprot hits, or existing genome annotations, \n', file = 'readme_deepAnno.txt', append = T)
xlsx.color(xlsx.file = out.file, FUN.select = FUN.select.interesting, border.color = 'purple', out.file = out.file, na.strings='|')
write('8. Purple fill - potential known clusters: polyketide or alkaloid or terpenoid or terpene or nonribosomal peptide mentioned in domain annotations, swissprot hits, or existing genome annotations, \n', file = 'readme_deepAnno.txt', append = T)
xlsx.color(xlsx.file = out.file, FUN.select = FUN.select.maybeKU, fill.color = 'purple', out.file = out.file, na.strings='|')
write('9. Red box - warning (possible gene structure error): average intron size > 100, or average exon size < 100, \n', file = 'readme_deepAnno.txt', append = T)
xlsx.color(xlsx.file = out.file, FUN.select = FUN.select.warning, border.color = 'red', out.file = out.file, na.strings='|')
write('10. Red fill - boring: human annotated/known SM cluster genes in current genome annotation, \n', file = 'readme_deepAnno.txt', append = T)
xlsx.color(xlsx.file = out.file, FUN.select = FUN.select.boring, fill.color = 'red', out.file = out.file, na.strings='|')
# write.xlsx(append = T, file = out.file, 'Green - promising: expression clustering coefficient > 3\nGreen box - promising: P450\nBlue - special: llm, laeA, molybdenum containing\nPurple box - interesting: or length > 1000 aa, homology with swissprot proteins high (>50%) or low (<25%), polyketide or alkaloid or terpenoid or terpene or nonribosomal peptide mentioned in domain annotations, swissprot hits, or existing AspGD annotations\nRed - boring: annotated SM genes in current AspGD annotation')
}
enzyme.clustering.simple <- function(gff.file = gff.file, iprscan.tab.file = iprscan.tab.file, gene.definition = c('gene', 'transcript', 'mRNA'),
enzyme.definition = 'P450', max.window = 6, min.enzymes = 2){
# find minimum windows of size max.window or smaller that contains the largest number of enzymes
# both k and n can be ranges
# Yong FUga Li, 20141007
require(gplots)
## read gff
gff.format = sub('^.*\\.([^\\.]*$)', '\\1', gff.file)
# anno = read.gff3(gff.file, format=gff.format)
anno = import.gff(gff.file) # 20160502
idx.gene = (anno$type==gene.definition)
anno = anno[idx.gene, ]
anno = sort.intervals(anno)
n = length(anno)
# anno$ID = sub('transcript:', '', anno$ID)
ipr.anno = iprscan.flat(iprscan.tab.file)
ipr.anno = mat.fill.row(t(t(ipr.anno)), row.names = anno$ID, default = '')[,1]
is.enzyme = regexpr(pattern=enzyme.definition, text = as.character(as.vector(anno$Note)), perl=T, ignore.case=T) > 0 |
regexpr(pattern=enzyme.definition, text = as.character(as.vector(ipr.anno.1)), perl=T, ignore.case=T) > 0
window.size = 2:3
counts = sapply(window.size, FUN = function(x){return(runsum.2(is.enzyme+0, k = x, addzeros = T))})
rownames(counts) = anno$ID; colnames(counts) = window.size
max(counts)
sum(counts>=2)
tt = unique(which(counts>=2, arr.ind = T)[,1])
cbind(counts[sort(tt),], sort(tt))
is.cluster = (counts == k.enzyme[1])
for (i in seq2(2, length(k.enzyme), by = 1))
is.cluster = is.cluster | (counts == k.enzyme[i])
is.cluster = lapply(apply(k.enzyme, function(x){return(counts == x)}))
}
NPGC.query <- NPGC.scan <- function(gff.file=NULL, iprscan.tab.file = NULL, query=list(func = list('P450', 'O-methyltransferase'),
freq = list(2:10, 1:10)),
window.size=15, out.file = 'Tvirens.xls', window.extend=window.size,
gene.definition = c('gene', 'transcript', 'mRNA'), proteinID = 'ID',
max.dist.merge = window.size){
# find a window of size 15 or less that meet the gene function query criteria
# YF Li
# 20141028, 20141111
require('xlsx')
gff.format = sub('^.*\\.([^\\.]*$)', '\\1', gff.file)
# anno = read.gff3(gff.file, format=gff.format)
anno = import.gff(gff.file) # 20160502
idx.gene = (anno$type==gene.definition)
colnames(anno@elementMetadata) = toupper(colnames(anno@elementMetadata))
m = match(anno$PARENT[idx.gene], anno$ID)
anno$DESCRIPTION[idx.gene] = anno$DESCRIPTION[m] # transfer annotation from parents to childs
anno$PARENT = as.character(anno$PARENT); anno$PARENT[is.na(anno$PARENT)] = anno$ID[is.na(anno$PARENT)]
anno = anno[idx.gene, ]
anno = sort.intervals(anno)
n = length(anno)
# anno$ID = sub('transcript:', '', anno$ID)
ipr.anno = iprscan.flat(iprscan.tab.file, na.strings = c('-', 'NA', 'NULL'))
ipr.anno = mat.fill.row(t(t(ipr.anno)), row.names = anno@elementMetadata[,toupper(proteinID)], default = '')[,1]
names(ipr.anno) = anno$ID
to.keep = ones(n)
enzyme.count.all = c()
is.enzyme.all = c()
if (!is.null(anno$NOTE)){
desc.fname = 'NOTE'
}else if (!is.null(anno$DESCRIPTION)){
desc.fname = 'DESCRIPTION'
}else{
warning('No description or Note field for the annotation of genes')
desc.fname = 'NOTE'
anno$NOTE = ''
}
for (i in 1:length(query$func)){
enzyme.definition = query$func[[i]]
enzyme.count = query$freq[[i]]
is.enzyme = (regexpr(pattern=enzyme.definition, text = as.character(as.vector(anno@elementMetadata[[desc.fname]])), perl=T, ignore.case=T) > 0);
is.enzyme[is.na(is.enzyme)] = 0;
is.enzyme = is.enzyme | (regexpr(pattern=enzyme.definition, text = as.character(as.vector(ipr.anno)), perl=T, ignore.case=T) > 0)
names(is.enzyme) = anno$ID
counts = runsum.by(is.enzyme+0, k = window.size, by = as.character(anno@seqnames))
to.keep = to.keep & (counts %in% enzyme.count)
enzyme.count.all = cbind(enzyme.count.all, counts)
is.enzyme.all = cbind(is.enzyme.all, is.enzyme)
cat(enzyme.definition, sum(is.enzyme), '\n')
}
cat('# of total gene', length(anno))
colnames(enzyme.count.all) <- colnames(is.enzyme.all) <- query$func
# sum(to.keep)
#core.regions = intersect(extend.index(which(to.keep), window.size), which(rowSums(is.enzyme.all)>0))
core.regions = extend.index(which(to.keep), window.size, sides='down', do.unique = F);
m = match(core.regions, which(rowSums(is.enzyme.all)>0)) # 20141111
core.regions = core.regions[!is.na(m)]
# merge clusters
gene.ranges = unique(cbind(by(core.regions, names(core.regions), FUN = min), by(core.regions, names(core.regions), FUN = max)), MARGIN = 1, drop=F)
gene.ranges = sort.by(gene.ranges, by = gene.ranges[,1])
nc = nrow(gene.ranges)
if (is.null(nc) | !nc){
cat('\nNumber of clusters: 0')
return(NULL)
}
cluster.ID = cumsum(c(1, gene.ranges[seq2(2,nc,1),1]- gene.ranges[seq2(1,nc-1,1),2] - 1 > max.dist.merge))
gene.ranges = data.frame(from=sapply(by(gene.ranges[,1],INDICES = cluster.ID, FUN = function(x){x[1]}), FUN = 'identity'),
to = sapply(by(gene.ranges[,2],INDICES = cluster.ID, FUN = function(x){x[length(x)]}), FUN = 'identity'),
name= sapply(by(rownames(gene.ranges),INDICES = cluster.ID, FUN = function(x){paste(c(x[1], x[length(x)]), collapse = '_')}), FUN = 'identity'))
geneID2clusterID = lapply(1:nrow(gene.ranges), function(x){cbind(as.character(anno$ID)[gene.ranges[x,1]:gene.ranges[x,2]], rep(as.character(gene.ranges[x,3]), gene.ranges[x,2]-gene.ranges[x,1]+1))})
if (is.list(geneID2clusterID)){
geneID2clusterID = do.call(rbind, geneID2clusterID)
}
gene.ranges = cbind(from=as.character(anno$ID)[gene.ranges[,1]], to=as.character(anno$ID)[gene.ranges[,2]], name=as.character(gene.ranges[,3]))
cat('\nNumber of clusters: ', nrow(gene.ranges))
#### output
to.keep.extend = extend.index(core.regions, window.extend, sides='both', do.unique=T)
to.keep.extend = to.keep.extend[to.keep.extend<=length(anno) & to.keep.extend>=1]
anno$PARENT[1] ==c()
is.enzyme.all[] = c('', 'Yes')[is.enzyme.all+1]
out = cbind(chr = as.character(anno@seqnames)[], gene=anno$ID, 'protein ID' = anno@elementMetadata[,toupper(proteinID)], Existing.Anno = anno@elementMetadata[,toupper(desc.fname)],
is.enzyme.all, domains = ipr.anno)[to.keep.extend,]
rownames(geneID2clusterID) = geneID2clusterID[,1];
out = cbind(out, clusterID = mat.fill.row(geneID2clusterID, rownames(out), '')[,2])
write.xlsx(out, out.file)
return(gene.ranges)
}
NPGC.mutliscan <- function(gff.files=NULL, iprscan.tab.files = NULL, prot.fasta.files = NULL,
query=list(func = list('P450', 'O-methyltransferase'),
freq = list(2:10, 1:10)),
window.size=15, out.file = 'Tvirens.xls', window.extend=window.size,
gene.definition = c('gene', 'transcript', 'mRNA'), proteinID = 'ID',
max.dist.merge = window.size){
# deepAnno.clusters
# find a window of size 15 or less that meet the gene function query criteria
# YF Li
# Step 1: NPGC.query for multiple genomes using the same query
# Step 2: Blast search of the identified query against the other genome
# Step 3: BBH ortholog assignemnet
# Step 4: Output all clusters with the ortholog information
require('xlsx')
gff.format = sub('^.*\\.([^\\.]*$)', '\\1', gff.file)
# anno = read.gff3(gff.file, format=gff.format)
anno = import.gff(gff.file) # 20160502
idx.gene = (anno$type==gene.definition)
colnames(anno@elementMetadata) = toupper(colnames(anno@elementMetadata))
m = match(anno$PARENT[idx.gene], anno$ID)
anno$DESCRIPTION[idx.gene] = anno$DESCRIPTION[m] # transfer annotation from parents to childs
anno$PARENT = as.character(anno$PARENT); anno$PARENT[is.na(anno$PARENT)] = anno$ID[is.na(anno$PARENT)]
anno = anno[idx.gene, ]
anno = sort.intervals(anno)
n = length(anno)
#anno$ID = sub('transcript:', '', anno$ID)
ipr.anno = iprscan.flat(iprscan.tab.file, na.strings = c('-', 'NA', 'NULL'))
ipr.anno = mat.fill.row(t(t(ipr.anno)), row.names = anno@elementMetadata[,toupper(proteinID)], default = '')[,1]
names(ipr.anno) = anno$ID
to.keep = ones(n)
enzyme.count.all = c()
is.enzyme.all = c()
if (!is.null(anno$NOTE)){
desc.fname = 'NOTE'
}else if (!is.null(anno$DESCRIPTION)){
desc.fname = 'DESCRIPTION'
}else{
warning('No description or Note field for the annotation of genes')
desc.fname = 'NOTE'
anno$NOTE = ''
}
for (i in 1:length(query$func)){
enzyme.definition = query$func[[i]]
enzyme.count = query$freq[[i]]
is.enzyme = (regexpr(pattern=enzyme.definition, text = as.character(as.vector(anno@elementMetadata[[desc.fname]])), perl=T, ignore.case=T) > 0);
is.enzyme[is.na(is.enzyme)] = 0;
is.enzyme = is.enzyme | (regexpr(pattern=enzyme.definition, text = as.character(as.vector(ipr.anno)), perl=T, ignore.case=T) > 0)
names(is.enzyme) = anno$ID
counts = runsum.by(is.enzyme+0, k = window.size, by = as.character(anno@seqnames))
to.keep = to.keep & (counts %in% enzyme.count)
enzyme.count.all = cbind(enzyme.count.all, counts)
is.enzyme.all = cbind(is.enzyme.all, is.enzyme)
cat(enzyme.definition, sum(is.enzyme), '\n')
}
cat('# of total gene', length(anno))
colnames(enzyme.count.all) <- colnames(is.enzyme.all) <- query$func
# sum(to.keep)
#core.regions = intersect(extend.index(which(to.keep), window.size), which(rowSums(is.enzyme.all)>0))
core.regions = extend.index(which(to.keep), window.size, sides='down', do.unique = F);
m = match(core.regions, which(rowSums(is.enzyme.all)>0)) # 20141111
core.regions = core.regions[!is.na(m)]
# merge clusters
gene.ranges = unique(cbind(by(core.regions, names(core.regions), FUN = min), by(core.regions, names(core.regions), FUN = max)), MARGIN = 1, drop=F)
gene.ranges = sort.by(gene.ranges, by = gene.ranges[,1])
nc = nrow(gene.ranges)
if (is.null(nc) | !nc){
cat('\nNumber of clusters: 0')
return(NULL)
}
cluster.ID = cumsum(c(1, gene.ranges[seq2(2,nc,1),1]- gene.ranges[seq2(1,nc-1,1),2] - 1 > max.dist.merge))
gene.ranges = data.frame(from=sapply(by(gene.ranges[,1],INDICES = cluster.ID, FUN = function(x){x[1]}), FUN = 'identity'),
to = sapply(by(gene.ranges[,2],INDICES = cluster.ID, FUN = function(x){x[length(x)]}), FUN = 'identity'),
name= sapply(by(rownames(gene.ranges),INDICES = cluster.ID, FUN = function(x){paste(c(x[1], x[length(x)]), collapse = '_')}), FUN = 'identity'))
geneID2clusterID = lapply(1:nrow(gene.ranges), function(x){cbind(as.character(anno$ID)[gene.ranges[x,1]:gene.ranges[x,2]], rep(as.character(gene.ranges[x,3]), gene.ranges[x,2]-gene.ranges[x,1]+1))})
if (is.list(geneID2clusterID)){
geneID2clusterID = do.call(rbind, geneID2clusterID)
}
gene.ranges = cbind(from=as.character(anno$ID)[gene.ranges[,1]], to=as.character(anno$ID)[gene.ranges[,2]], name=as.character(gene.ranges[,3]))
cat('\nNumber of clusters: ', nrow(gene.ranges))
#### output
to.keep.extend = extend.index(core.regions, window.extend, sides='both', do.unique=T)
to.keep.extend = to.keep.extend[to.keep.extend<=length(anno) & to.keep.extend>=1]
anno$PARENT[1] ==c()
is.enzyme.all[] = c('', 'Yes')[is.enzyme.all+1]
out = cbind(chr = as.character(anno@seqnames)[], gene=anno$ID, 'protein ID' = anno@elementMetadata[,toupper(proteinID)], Existing.Anno = anno@elementMetadata[,toupper(desc.fname)],
is.enzyme.all, domains = ipr.anno)[to.keep.extend,]
rownames(geneID2clusterID) = geneID2clusterID[,1];
out = cbind(out, clusterID = mat.fill.row(geneID2clusterID, rownames(out), '')[,2])
write.xlsx(out, out.file)
return(gene.ranges)
}
deepAnno.landmarks <- function(landmark.sizes=null, gff.file=NULL,
DNA.fasta.file='/Users/yongli/Universe/data/NPgenome/Aspergillus/A_nidulans_FGSC_A4_current_chromosomes.fasta',
prot.fasta.file=NULL, iprscan.tab.file=NULL, ica.spatial=NULL, max.dist.merge = 0,
gene.definition = c('gene', 'transcript', 'mRNA'),
extra.genes = 20, n.cluster.per.file=40, geMat = NULL,proteinID =c('ID', 'proteinId'),
out.file = 'nidulans.deepAnno.llms.xlsx', geneID2cdsID = function(x){sub('gene_(.*)$', 'CDS_\\1', x)}){ #root = '/Users/yongli/Universe/write/Project_Current/t.NPbioinformatics/Nidulans.SlidingWindow/Annotation'){
# deep annotation around lankmark genes with n.genes on each side
# YF Li
# 20141007
prot.seq = read.fasta(prot.fasta.file, type='AA')
ipr.anno = iprscan.flat(iprscan.tab.file)
if(!is.null(geMat)&!is.null(gff.file)){
ica.spatial = express.clustering(gff.file, geMat)
anno = ica.spatial$anno;
}else if(!is.null(ica.spatial)){
anno = ica.spatial$anno;
}else if(!is.null(gff.file)){
gff.format = sub('^.*\\.([^\\.]*$)', '\\1', gff.file)
# anno = read.gff3(gff.file, format=gff.format)
anno = import.gff(gff.file) # 20160502
idx.gene = (anno$type==gene.definition)
anno = anno[idx.gene, ]
anno = sort.intervals(anno)
}else{
stop('Provide ica.spatial or gff.file')
}
# anno$ID = sub('transcript:', '', anno$ID)
# get gene annotation and gene orders
# get clusters and merge by distances
locs = match(as.character(landmark.sizes[,1]), anno$ID);
locs = cbind(locs, locs+landmark.sizes[,2]-1)
gene.ranges = cbind(as.character(landmark.sizes[,1]), anno$ID[locs[,2]])
gene.ranges = sort.by(gene.ranges, by = locs[,1])
landmark.sizes = sort.by(landmark.sizes, by = locs[,1])
locs = sort.by(locs, by = locs[,1])
nc = nrow(locs)
cluster.ID = cumsum(c(1, locs[2:nc,1]- locs[1:(nc-1),2] - 1 > max.dist.merge))
# cbind(gene.ranges, cluster.ID)
# get gene ranges and create cluster names
if (ncol(landmark.sizes)==3){
s = cbind(by(as.character(gene.ranges[,1]),INDICES = cluster.ID, FUN = function(x){as.character(x)[1]}),
by(as.character(gene.ranges[,2]),INDICES = cluster.ID, FUN = function(x){as.character(x)[length(x)]}),
by(landmark.sizes[,3], INDICES = cluster.ID, FUN = function(x){paste(as.character(x), collapse = '_')}))
s = cbind(s[,1:2], paste(s[,3], s[,1], s[,2], sep='_'))
}else{
s = cbind(by(as.character(gene.ranges[,1]),INDICES = cluster.ID, FUN = function(x){as.character(x)[1]}),
by(as.character(gene.ranges[,2]),INDICES = cluster.ID, FUN = function(x){as.character(x)[length(x)]}))
s = cbind(s, paste(s[,1], s[,2], sep='_'))
}
s2d = cluster.deepAnno(ica.spatial = ica.spatial,proteinID =proteinID, gff.file = gff.file, gene.ranges = s, prot.seq=prot.seq, ipr.anno = ipr.anno, out.file = out.file, extra.genes=extra.genes,
DNA.fasta.file = DNA.fasta.file, gene.definition = gene.definition,
n.cluster.per.file=n.cluster.per.file, append=F, geneID2cdsID = geneID2cdsID)
invisible(s2d)
}
domain.clustering.unsupervised <- function(iprscan.tab.file = iprscan.tab.file, gff.file = gff.file,
window.size = 20){ # enzyme.definition = NULL,
# find the frequent item set rule of domain annotation combinations among local regions in the genome
# YF Li
# 20141007
require(gplots)
## read gff
gff.format = sub('^.*\\.([^\\.]*$)', '\\1', gff.file)
# anno = read.gff3(gff.file, format=gff.format)
anno = import.gff(gff.file) # 20160502
idx.gene = (anno$type=='gene')
anno = anno[idx.gene, ]
anno = sort.intervals(anno)
n = length(anno)
# read domain annotations
ipr.anno = iprscan.flat(iprscan.tab.file, out.type = 'itemset')
ipr.anno.gene = mat.fill.row(t(t(ipr.anno)), row.names = anno$ID, default = '')
names(ipr.anno.gene) = anno$ID;
ipr.anno.gene = sapply(ipr.anno.gene, function(x){if (length(x)==1 && x=='') x=c(); return(x)})
# combine annotation sets to cluster level
ipr.anno.cluster = sapply(1 : (length(ipr.anno.gene)-window.size+1), FUN = function(x){unique(unlist(ipr.anno.gene[x:(x+window.size-1)]))})
names(ipr.anno.cluster) = paste(anno$ID[1:(length(ipr.anno.gene)-window.size+1)], anno$ID[window.size:length(ipr.anno.gene)], sep='-')
# create fake gene and clusters
ipr.anno.gene.rand = relist2(sample(unlist(ipr.anno.gene), replace = F), ipr.anno.gene) # domain level perm
ipr.anno.gene.perm = sample(ipr.anno.gene) # gene level perm
ipr.anno.cluster.rand = sapply(1 : (length(ipr.anno.gene)-window.size+1), FUN = function(x){unique(unlist(ipr.anno.gene.perm[x:(x+window.size-1)]))})
# transactions
ipr.anno.gene.tr = as(ipr.anno.gene, 'transactions')
ipr.anno.cluster.tr = as(ipr.anno.cluster, 'transactions')
ipr.anno.gene.rand.tr = as(ipr.anno.gene.rand, 'transactions')
ipr.anno.cluster.rand.tr = as(ipr.anno.cluster.rand, 'transactions')
# identify eqivalent domains and merge domain concept
require("arules");
require("arulesViz")
data("Adult")
rules <- apriori(ipr.anno.gene.tr,
parameter = list(supp = 1/500, conf = 0.7,
target = "maximally frequent itemsets"))
summary(rules)
cluster.rules <- apriori(ipr.anno.cluster.tr,
parameter = list(supp = 1/500, conf = 0.7,
target = "maximally frequent itemsets"))
summary(cluster.rules)
# unique coding of domain combinations in each gene
# domain clustering rule finding
}
proMap2hints <- function(pMap, gff.file='Afu3g01340_Afu3g01340.gff', out.file = 'proMap_hints.gff',
log.file = 'log.txt',
geneID2cdsID = function(x){paste(x, '-P', sep='')}, append = F, version = 3){
# Yong Fuga Li, 20141216
# create proMap hints gff file based on proMap results and gff file of the genes
# anno = tryCatch(read.gff3(gff.file, format='gff3'), error = function(e){read.gff3(gff.file, format='gff')}, finally = NULL)
anno = import.gff(gff.file) # 20160502
idx.CDS = (anno$type=='CDS')
anno = anno[idx.CDS, ]
anno = sort.intervals(anno);
i = order(as.character(anno$ID)); anno = anno[i,]; # there are overlapping genes, so sort by gene names to avoid wrong orders, 20121216
anno = anno[anno$ID %in% geneID2cdsID(names(pMap))]
out = anno
out$source = 'proMap2hints'
out$phase = '.';
out@elementMetadata$Score = 0;
out@elementMetadata$coverage = 0;
out@elementMetadata$mult = 0;
# create introns hints as manual hints
to.keep = vector('logical', length = length(anno)) | T
for (g in names(pMap)){
i = range(which(anno$ID == geneID2cdsID(g)))
to.keep[i[2]] = F
i = seq2(i[1], i[2]-1, 1)
if (!pMap[[g]]$nHits){
to.keep[i] = F
next
}
if (!length(i))
next
if (g == 'AN9494'){
1
}
width = anno@ranges@start[i+1] - (anno@ranges@start[i]+anno@ranges@width[i])
if (any(width<=0)){
write(paste('gene', g, 'contains introns with negative size.'), file = log.file, append = T)
width[width<=0] = 1;
}
out@ranges[i] = IRanges(anno@ranges@start[i]+anno@ranges@width[i], width = width)
i.confident.intron = (pMap[[g]]$intron$coverage * pMap[[g]]$nHits >=5 & pMap[[g]]$intron$coverage > 0.3) | pMap[[g]]$intron$match.score > 3
out@elementMetadata$mult[i] = round(pMap[[g]]$intron$coverage * pMap[[g]]$nHits,1)
out@elementMetadata$coverage[i] = round(pMap[[g]]$intron$coverage,3)
out@elementMetadata$Score[i] = round(pMap[[g]]$intron$match.score,1)
to.keep[i[!i.confident.intron]] = F
out$ID[i] = g
}
out = out[to.keep]
out$type = 'intron'
elementMetadata(out) = data.frame(out@elementMetadata[,c('source', 'type', 'score', 'phase', 'score', 'Score', 'mult', 'coverage')], grp = out@elementMetadata[,'ID'], src = 'M',pri=4)
if (version == 3){
out$pri = out$pri - ((out$coverage < 0.3) + (out$mult < 5) + (out$Score < 3)) # 20160818 -- assigned different priorities to evidences of different reliability
}
### convert exon hints to manual hints
export(out, out.file, format = 'gff3', append=append)
}
get.CDS <- function(gene.IDs = c('AN5093', 'AN5092'), gff.file, DNA.fasta.file, geneID2cdsID=function(x){paste(x, '-P', sep='')}){
# get the CDS sequences for a list of genes based on genome sequences and gff format file
# v2. 20150406, retirve all seuqences when gene.IDs = NULL or empty
require('Biostrings')
require('rtracklayer')
require('GenomicFeatures')
fa=import(DNA.fasta.file, 'fasta', type='DNA')
names(fa) <- sub('^([^ ]+) .+$','\\1', names(fa))
# anno = tryCatch(read.gff3(gff.file, format='gff3'), error = function(e){read.gff3(gff.file, format='gff')}, finally = NULL)
anno = import.gff(gff.file)
idx.CDS = (anno$type=='CDS')
anno = anno[idx.CDS, ]
anno = sort.intervals(anno)
DNAseq = import(DNA.fasta.file, 'fasta', type='DNA')
CDSs = data.frame('seq'=c(), 'Exons'=c(), 'CDSspan(nt)'=c(), 'from'=c(), 'to'=c())
if (is.null(gene.IDs) | !length(gene.IDs)){ # 20150406
gene.IDs = anno$ID;
gene.IDs = unique(gene.IDs[!is.na(gene.IDs)])
geneID2cdsID = identity;
}
# i.strand = which(regexpr('strand',names(anno@elementMetadata@listData))>0)
idx.parent = tolower(colnames(anno@elementMetadata)) == 'parent'
for (g in gene.IDs){
# i = anno$ID == geneID2cdsID(g)
i = (anno$ID == geneID2cdsID(g)) | (as.character(anno@elementMetadata[[which(idx.parent)]]) == g) # 20150819 use the CDS's ID or the parents ID to match
ranges = data.frame(chr = anno@seqnames[i], anno@ranges[i], ID = g, strands = anno@strand[i])
gs = getDNA.subseq(DNA.fasta.file, locs = ranges[,1:3])
seq = paste(gs, collapse='')
# strand = anno@elementMetadata@listData[[i.strand]][i]
strand = anno@strand[i]
ustrand = unique(strand)
if (length(ustrand)>1){
stop(paste('One both strands', paste(strand, collapse = '')))
}
if (ustrand=='-'){
seq = as.character(reverseComplement(DNAString(seq)))
# ranges = ranges[rev(1:nrow(ranges)),] # 20141215
}
exon.sizes = ranges[,3]-ranges[,2]+1;
if (ustrand=='-'){
exon.sizes = rev(exon.sizes)
}
CDSs = rbind(CDSs, data.frame ('seq' = seq, 'Exons'=nrow(ranges),
'CDSspan(nt)' = 1 - ranges[1,2] + ranges[nrow(ranges),3],
'exon.sizes' = paste(exon.sizes, collapse = ','),
'from' = ranges[1,2], 'to' = ranges[nrow(ranges),3],
'chr' = as.character(anno@seqnames[i])[1])) # 20141210, add exon.sizes
}
rownames(CDSs) = gene.IDs
return(CDSs)
}
get.CDS.errorCorrection <- function(g = 'AN2596', DNA.fasta.file='/Users/yongli/Universe/data/NPgenome/Aspergillus/A_nidulans_FGSC_A4_current_chromosomes.fasta',
gff.file="/Users/yongli/Universe/data/NPgenome/Aspergillus/A_nidulans_FGSC_A4_current_features.gff",
new.exon = data.frame(chr=c(), start=c(), end=c()),
error.correction = data.frame(from=c(), to=c())){
# Yong Fuga Li, 20141014
fa=import(DNA.fasta.file, 'fasta', type='DNA')
names(fa) <- sub('^([^ ]+) .+$','\\1', names(fa))
gff.format = sub('^.*\\.([^\\.]*$)', '\\1', gff.file)
# anno = read.gff3(gff.file, format=gff.format)
anno = import.gff(gff.file) # 20160502
idx.CDS = (anno$type=='CDS')
anno = anno[idx.CDS, ]
anno = sort.intervals(anno)
DNAseq = import(DNA.fasta.file, 'fasta', type='DNA')
CDSs = data.frame('seq'=c(), 'Exons'=c(), 'CDSspan(nt)'=c())
i = anno$ID == paste(g, '-P', sep='')
# i.strand = which(regexpr('strand',names(anno@elementMetadata@listData))>0)
if (sum(i)>0){
# strand = anno@elementMetadata@listData[[i.strand]][i] # 20160502
strand = anno@strand[i]
ranges = data.frame(chr = anno@seqnames[i], anno@ranges[i], ID = g, strands = anno@strand[i])
exons = rbind(ranges[,1:3], new.exon);
}else{
strand = new.exon[,'strand']
exons = rbind(new.exon);
}
# exons = ranges[,1:3]
gs = getDNA.subseq(DNA.fasta.file, locs = exons[,1:3])
seq = paste(gs, collapse='')
for (j in seq2(1,nrow(error.correction),1)){
seq1 = sub(error.correction[j,1], error.correction[j,2], seq)
if (seq1 == seq)
stop(paste('Could not replace from', error.correction[j,1], error.correction[j,2]))
seq = seq1
}
if (unique(strand)=='-')
seq = as.character(reverseComplement(DNAString(seq)))
prot0 = as.character(translate(DNAString(seq),if.fuzzy.codon = 'X'))
prot1 = as.character(translate(DNAString(seq, start=2),if.fuzzy.codon = 'X'))
prot2 = as.character(translate(DNAString(seq, start=3),if.fuzzy.codon = 'X'))
return(list(CDS=seq, exon.seqs = gs, protein=list(frame1=prot0, frame2=prot1, frame3=prot2), exons = exons))
}
plot.genes <- function(gff.file="/Users/yongli/Universe/data/NPgenome/Aspergillus/A_nidulans_FGSC_A4_current_features.gff",
start.gene = 'AN2596', end.gene = 'AN2612', tag = 'S700',
out.file = paste(tag, 'NPGC_Struct.pdf', sep=''),
cluster.anno = matrix(0,0,0),width=12, height=4, gamma = 0.4, rotation=40, extra.nt = 3000,
class2colors = list(oxidoreductase='red', P450='pink', monooxygenase='red',
hydrolase='orange', aldolase='orange',
unknown='grey', transporter='blue',other='grey', DUF='black',
acyltransferases='green', methyltransferase='green', transferase='green')){
# visaulize gene structure and functions (labels) in a genome regions
# cluster.anno: a list of gene annotation in the cluster
# cluster.anno = data.frame(ID, function, synthesize, class)
# 'Gviz', 'genoPlotR', 'GenomeGraphs'
# 'http://genometools.org'
# YFL, 20141017
require('Gviz')
require('grid')
require('GenomicFeatures')
# gff.file="/Users/yongli/Universe/data/NPgenome/Aspergillus/A_nidulans_FGSC_A4_current_features.gff"
# start.gene = 'AN2596'; end.gene = 'AN2612'
gff.format = sub('^.*\\.([^\\.]*$)', '\\1', gff.file)
# anno = read.gff3(gff.file, gff.format)
anno = import.gff(gff.file) # 20160502
anno = sort.intervals(anno)
IDs = sub('^([^\\-]*)\\-?.*$', '\\1',anno$ID)
gene.range.i = c(min(which(IDs==start.gene)), max(which(IDs==end.gene)))
# anno$feature = anno$type
chr = as.character(anno@seqnames[gene.range.i[1]])
if (chr != as.character(anno@seqnames[gene.range.i[2]]))
stop('Two genes in different chromosome')
# anno.sub = cbind(as.data.frame(anno[gene.range.i[1]:gene.range.i[2]]), group=IDs[gene.range.i[1]:gene.range.i[2]])
anno.sub = as.data.frame(anno[gene.range.i[1]:gene.range.i[2]])
# anno.sub0 = anno[gene.range.i[1]:gene.range.i[2]]; anno.sub0$group=IDs[gene.range.i[1]:gene.range.i[2]]
anno.gene = anno.sub[anno.sub$type=='gene',]; rownames(anno.gene) = anno.gene$ID
st.nt = min(anno.gene$start)-extra.nt; en.nt = max(anno.gene$end)+extra.nt
rownames(cluster.anno) = cluster.anno[,'ID']; cluster.anno = mat.fill.row(cluster.anno, row.names = as.character(anno.gene$ID), default = 'other')
# tracks
synthesize.genes = rownames(cluster.anno)[cluster.anno[,'synthesize']=='T']
axisTrack <- GenomeAxisTrack(range=reduce(IRanges(start=anno.gene[synthesize.genes, 'start'], end = anno.gene[synthesize.genes, 'end'], names = rep('synthesize', length(synthesize.genes)))))
options(ucscChromosomeNames=FALSE)
gene.track.color = AnnotationTrack(start = anno.gene$start, width = anno.gene$width, chromosome = chr,
strand = as.character(anno.gene$strand),
feature = cluster.anno[as.character(anno.gene$ID),'class'], genome = "Asp. Nidulans", name = 'Genes')
gene.track = AnnotationTrack(start = anno.gene$start, width = anno.gene$width, chromosome = chr,
strand = as.character(anno.gene$strand),
feature = cluster.anno[as.character(anno.gene$ID),'function'], genome = "Asp. Nidulans", name = 'Genes')
txDB = GenomicFeatures::makeTranscriptDbFromGFF(gff.file)
txTr <- GeneRegionTrack(txDB, chromosome = as.character(anno.gene$seqnames[1]), group = IDs,
start = st.nt, end = en.nt, name='Transcripts')
class2color[setdiff(names(class2color))]
# plotTracks(axisTrack, from = st.nt, to = en.nt, synthesize='green')
# feature(txTr)
if (!is.null(out.file)){
pdf(out.file, width = width,height = height)
}
plotTracks(c(axisTrack, gene.track, txTr), featureAnnotation = 'feature', just.feature = 'below',
fontcolor.feature='#555555', fontsize.feature=10,
rotation.item=rotation, transcriptAnnotation = 'group', shape='arrow', just.group = 'left')
if (1){
a = 0.15
grid.newpage()
# pushViewport(viewport(height=a, y=1, just="top"))
# grid.rect(gp=gpar(col="grey"))
# plotTracks(axisTrack, add=TRUE, from = st.nt, to = en.nt)
# popViewport(1)
# pushViewport(viewport(height=(1-a)*gamma, y=(1-a)*(1-gamma), just="bottom"))
# grid.rect(gp=gpar(col="white"))
# plotTracks(c(gene.track), featureAnnotation = 'feature', fontcolor.feature='#555555', fontsize=10,
# rotation.item=rotation, add=TRUE, from = st.nt, to = en.nt)
# popViewport(1)
pushViewport(viewport(height=(1-a)*gamma+a, y=(1-a)*(1-gamma), just="bottom"))
grid.rect(gp=gpar(col="white"))
do.call(plotTracks, c(list(trackList= c(axisTrack, gene.track.color), featureAnnotation = 'feature', fontcolor.feature='#555555', fontsize=10,
rotation.item=rotation, add=TRUE, from = st.nt, to = en.nt), class2colors))
# plotTracks(c(axisTrack, gene.track), featureAnnotation = 'feature', fontcolor.feature='#555555', fontsize=10,
# rotation.item=rotation, add=TRUE, from = st.nt, to = en.nt)
plotTracks(c(axisTrack, gene.track), featureAnnotation = 'feature', fontcolor.feature='#555555', fontsize=10,
rotation.item=rotation, add=TRUE, from = st.nt, to = en.nt)
popViewport(1)
pushViewport(viewport(height=(1-a)*(1-gamma), y=0, just="bottom"))
grid.rect(gp=gpar(col="white"))
plotTracks(txTr, add=TRUE, transcriptAnnotation = 'group', shape='box', just.group = 'left', from = st.nt, to = en.nt)
popViewport(1)
}
if (!is.null(out.file)){
dev.off()
}
# st = 4463337 - 1000; en = 4543536 + 1000; out.image = 'cluster.png'
# system(paste('gt gff3 -tity', gff.file, 'tidy.gff'))
# system(paste('gt sketch -seqid', chr,'-start', st, '-end', en, out.image, 'tidy.gff'))
}
codon.optimizer <- function(CDS.list.file='Batch1_CDS.txt', format = c('tab', 'fasta'), CDS=NULL,
N.codon.types.to.change = 6, out.file = NULL, restriction.sites = c(BsaI='GGTCTC', BsaI.rc='GAGACC',
AarI='CACCTGC', AarI.rc = 'GCAGGTG',
polyA8='AAAAAAAA', polyC8 = 'CCCCCCCC',
polyG5='GGGGG', polyT8 = 'TTTTTTTT'),
repeats = c(rep.CCAGAG='CCAGAGC'),# provide unites of the repeats
tag = '',
genetic.table=1, host.species='4932', #left.extra='CCCGGG', right.extra='CCCGGG',
left.extra='GATCAGCGGCCGC', right.extra='CCCGGGAACAC'){ # 20141217, use Not1 and XmaI sites
# V1:
# 20141023, YF Li
# V2:
# 20141210: add format
# 20141211: fix a bug the one replacement creates another sites,
# and allow using all alternative codons from most frequent to rarest for all codons in a site to be removed
# V3:
# 20141230: add repeats removal function by random sampling of codons for the same aa
# 20151014: change the default of N.codon.types.to.change to 4, in yeast, their are 3 codons (CGG, CGA, CGC) with freq < 3/1000,
# and another one (CTC) with freq < 6/100. These 4 also shows the highest codon usage ratios between A. nidulans and S. cerevisiae.
# V4: to do -- perform codon harmonization
extra.nt = max(c(1,nchar(restriction.sites))) - 1 # extra nt to include on a site to be changes, so that we can check to make sure no new restriction sites are created
require(RCurl)
require(xlsx)
require('Biostrings')
for (i in seq2(1, length(repeats),1)){ # get two unites of the repeats
repeats[i] = paste(repeats[i], repeats[i], sep='')
}
if (sub('^.*\\.([^\\.]+)$', '\\1', CDS.list.file) %in% c('fna', 'fasta'))
format = 'fasta'
print(paste('Input format', format, sep=''))
format = match.arg(format);
if (format=='fasta'){
fa = read.fasta(fasta.files = CDS.list.file, type = 'DNA')
CDS.list.file.bck = CDS.list.file;
CDS.list.file = paste(CDS.list.file, '.txt', sep='')
write.table(fa[,'seq', drop=F], file = CDS.list.file, row.names = T, col.names=F, quote = F, sep='\t')
}
if (is.null(CDS)){
CDS = read.table(CDS.list.file,header = F, sep='\t',as.is = T)
}
if (!is.null(CDS.list.file) & is.null(out.file))
out.file = paste('optimized', tag, '_N',N.codon.types.to.change, '_gt', genetic.table, '_h',host.species, '_', sub('\\..+', '.xls', CDS.list.file), sep='')
codon.usage.table.url = paste('http://www.kazusa.or.jp/codon/cgi-bin/showcodon.cgi?species=', host.species, '&aa=', genetic.table, '&style=GCG', sep='');
a <- getURL(codon.usage.table.url)
gtable <- read.table(text=regexpr.match('PRE\\>[\\s\\S]*\\.\\.\\n \\n([\\s\\S]*)\\n\\n\\<\\/PRE',a, perl=T)[[1]], header = F, as.is=T,strip.white = T)
colnames(gtable) = c('AA', 'Codon', 'Number', 'per1000', 'Fraction')
rownames(gtable) = gtable[,'Codon']
ii = which.max.by(gtable[,'Number'], gtable[,'AA'])
gtable.max = gtable[ii,]; rownames(gtable.max) = gtable.max[,'AA']
gtable$freqNorm = gtable$Number/gtable.max[gtable$AA, 'Number'] # normalized by the most frequent aa
# get rare codons
rare.codon = sort.by(gtable, by = gtable$freqNorm)[seq2(1, N.codon.types.to.change,1),]
gtable$toCodon = gtable$Codon;
gtable[as.character(rare.codon$Codon), 'toCodon'] = gtable.max[as.character(rare.codon$AA), 'Codon']
rare.codon = sort.by(gtable, by = gtable$freqNorm)[seq2(1, N.codon.types.to.change,1),]
cat('rare codons to be changed')
print(rare.codon)
# best alternative codons for restriction site optimization
gtable$bestAlternativeCodon = gtable.max[ gtable$AA, 'Codon']
ii = which.max.n.by(gtable[,'Number'], gtable[,'AA'], n = 2)
gtable.max$bestAlternativeCodon = gtable$Codon[ii[gtable.max$AA]]
gtable.max$bestAlternativeCodon[gtable.max$bestAlternativeCodon %in% rare.codon$Codon] = NA; # if the second best codon is rare than do there is no second best
gtable.max$bestAlternativeCodon[is.na(gtable.max$bestAlternativeCodon)] = gtable.max$Codon[is.na(gtable.max$bestAlternativeCodon)]
gtable[as.character(gtable.max$Codon), 'bestAlternativeCodon'] = gtable.max$bestAlternativeCodon
gtable[,c('per1000Alt', 'freqNormAlt')] = gtable[as.character(gtable$bestAlternativeCodon),c('per1000','freqNorm')]
# optimize codons
cat('\noptimizing codons & Removing restriction sites\n')
colnames(CDS) = c('name', 'CDS')
n.site.corrected = 0;
n.protein.corrected = 0;
for (i in 1:nrow(CDS)){
cat('\n')
cat(CDS$name[i],'\t')
l = nchar(CDS[i,2])
CDS[i, 'newCDS'] = paste(gtable[substring(CDS[i,2], first = seq(1, l, 3), last = seq(3, l, 3)), 'toCodon'], collapse = '')
has.restriction.site = F
notes = ''
for (rr in seq2(1, length(restriction.sites),1)){
r = restriction.sites[[rr]]; r.name = names(restriction.sites)[rr];
m <- m00 <- as.matrix(matchPattern(r, DNAString(CDS[i, 'newCDS']), fixed=F)@ranges);
r = as.character(r)
m[,2] = m[,1]+ m[,2]-1; m0 = m;
m[,1] = floor((m[,1]-1)/3)*3+1 # extend to cover whole codons
m[,2] = ceiling((m[,2])/3)*3 # extend to cover whole codons
m0 = m0 - m[,1] + 1; # match in the local coordiate
if (length(m00>0)){
has.restriction.site = T
cat(r.name, r, 'site:', nrow(m00), '\t')
notes = paste(notes, r.name, r, 'site:', nrow(m00), ' ', sep=' ')
}
# if (i == 77){
# cat('here we are')
# }
for (j in seq2(1,nrow(m),1)){
local.seq = substring(CDS[i,'newCDS'], m[j,1], m[j,2]);
local.seq.left = substring(CDS[i,'newCDS'], m[j,1]-extra.nt, m[j,1]-1);
local.seq.right = substring(CDS[i,'newCDS'], m[j,2]+1, m[j,2]+extra.nt);
ll = nchar(local.seq)
Cs = substring(local.seq, first = seq(1, ll, 3), last = seq(3, ll, 3)) # codons
# to.table = gtable[Cs,]; to.table$index = 1:nrow(to.table)
# to.table = sort.by(to.table, by = to.table$per1000Alt, decreasing=T);
# to.table = sort.by(to.table, by = to.table$freqNormAlt, decreasing=T); # sort the codons by
# to.try = to.table$index[to.table$bestAlternativeCodon != to.table$Codon]; # index of the codons to change in the orders of codon prefrence
to.table = c()
for (tt in 1:length(Cs)){
indx = gtable$AA == gtable[Cs[tt], 'AA'] & gtable$Codon != Cs[tt]
if (any(indx))
to.table = rbind(to.table, cbind(gtable[indx,c('AA', 'Codon', 'freqNorm', 'per1000')], index = tt))
}
to.table = sort.by(to.table, by = to.table$freqNorm, decreasing=T); # sort by the codons freq
succeed = F;
for (t in 1:nrow(to.table)){ # to.try){
CsNew = Cs;
#CsNew[t] = to.table[CsNew[t], 'bestAlternativeCodon']
CsNew[to.table$index[t]] = to.table[t, 'Codon']
local.seq.new = paste(CsNew, collapse = '')
matched.any = F
for (r1 in restriction.sites){ # 20141211
if (length(matchPattern(r1, DNAString(paste(local.seq.left, local.seq.new, local.seq.right, sep='')), fixed=F))){
matched.any=T
break
}
}
if (!matched.any){
succeed = T
n.site.corrected = n.site.corrected + 1;
break;
}
}
if (!succeed){cat(CDS$name[i],'\t')
warning(paste('\nFailed to remove site ', r, ' location ', m00[j,1], ' in sequence ', CDS$name[i], sep=''))
}else{
CDS[i, 'newCDS'] = paste(substring(CDS[i, 'newCDS'], 1, m[j,1]-1), local.seq.new, substring(CDS[i, 'newCDS'], m[j,2]+1, l), sep='')
}
}
}
# handling repeats
for (rr in seq2(1, length(repeats),1)){
r = repeats[[rr]]; r.name = names(repeats)[rr];
m <- m00 <- as.matrix(matchPattern(r, DNAString(CDS[i, 'newCDS']), fixed=F)@ranges);
r = as.character(r)
m[,2] = m[,1]+ m[,2]-1; m0 = m;
m[,1] = floor((m[,1]-1)/3)*3+1 # extend to cover whole codons
m[,2] = ceiling((m[,2])/3)*3 # extend to cover whole codons
m0 = m0 - m[,1] + 1; # match in the local coordiate
if (length(m00>0)){
has.restriction.site = T
cat(r.name, r, 'site:', nrow(m00), '\t')
notes = paste(notes, r.name, r, 'site:', nrow(m00), ' ', sep=' ')
}
# if (i == 77){
# cat('here we are')
# }
for (j in seq2(1,nrow(m),1)){
local.seq = substring(CDS[i,'newCDS'], m[j,1], m[j,2]);
local.seq.left = substring(CDS[i,'newCDS'], m[j,1]-extra.nt, m[j,1]-1);
local.seq.right = substring(CDS[i,'newCDS'], m[j,2]+1, m[j,2]+extra.nt);
ll = nchar(local.seq)
Cs = substring(local.seq, first = seq(1, ll, 3), last = seq(3, ll, 3)) # codons
# to.table = gtable[Cs,]; to.table$index = 1:nrow(to.table)
# to.table = sort.by(to.table, by = to.table$per1000Alt, decreasing=T);
# to.table = sort.by(to.table, by = to.table$freqNormAlt, decreasing=T); # sort the codons by
# to.try = to.table$index[to.table$bestAlternativeCodon != to.table$Codon]; # index of the codons to change in the orders of codon prefrence
to.table = c()
CsNew = Cs;
for (reps in 1:10){# try 10 times to get one meet restriction site criteria
for (tt in 1:length(Cs)){
indx = gtable$AA == gtable[Cs[tt], 'AA']
if (!any(indx))
next
candidates = gtable[indx,'Codon'];
to.use = sample(1:sum(indx),1, T, prob=gtable[indx, 'freqNorm']/sum(gtable[indx, 'freqNorm']))
CsNew[tt] = candidates[to.use]
}
local.seq.new = paste(CsNew, collapse = '')
matched.any = F
for (r1 in restriction.sites){ # 20141211
if (length(matchPattern(r1, DNAString(paste(local.seq.left, local.seq.new, local.seq.right, sep='')), fixed=F))){
matched.any=T
break
}
}
if (!matched.any){
succeed = T
n.site.corrected = n.site.corrected + 1;
break;
}
}
if (!succeed){cat(CDS$name[i],'\t')
warning(paste('\nFailed to remove site ', r, ' location ', m00[j,1], ' in sequence ', CDS$name[i], sep=''))
}else{
CDS[i, 'newCDS'] = paste(substring(CDS[i, 'newCDS'], 1, m[j,1]-1), local.seq.new, substring(CDS[i, 'newCDS'], m[j,2]+1, l), sep='')
}
}
}
n.protein.corrected = n.protein.corrected + has.restriction.site
oldSeq = strsplit(CDS$CDS[i], '')[[1]]; newSeq = strsplit(CDS$newCDS[i], '')[[1]]
CDS[i, 'Nchanged'] = sum(oldSeq != newSeq)
CDS[i, 'Nchanged%'] = round(CDS[i, 'Nchanged']/l*100,1)
CDS[i, 'CG%_old'] = round(sum(oldSeq %in% c('C','G'))/l*100,1)
CDS[i, 'CG%_new'] = round(sum(newSeq %in% c('C','G'))/l*100,1)
CDS[i, 'CAI_old'] = exp(mean(log(gtable[substring(CDS[i,'CDS'], first = seq(1, l, 3), last = seq(3, l, 3)), 'freqNorm'])))
CDS[i, 'CAI_new'] = exp(mean(log(gtable[substring(CDS[i,'newCDS'], first = seq(1, l, 3), last = seq(3, l, 3)), 'freqNorm'])))
CDS[i, 'sites removed'] = notes
}
## confirm protein seq
cat('\nconfirm protein sequences\n')
for (i in 1:nrow(CDS)){
if (translate(DNAString(CDS[i, 'newCDS']),if.fuzzy.codon = 'X') != translate(DNAString(CDS[i, 'CDS']),if.fuzzy.codon = 'X'))
stop('Protein sequence changed')
}
cat('n.protein.corrected', n.protein.corrected, '\n')
cat('n.site.corrected', n.site.corrected, '\n')
CDS$CDSwExtra = paste(left.extra, CDS$newCDS, right.extra, sep='')
CDS$length = nchar(CDS$CDSwExtra)
write.table(x = cbind(CDS$name, CDS$CDSwExtra), paste('optimized_',CDS.list.file,sep=''), sep='\t', row.names = F, col.names = F, append = F, quote = F)
# write.table(x = cbind(CDS$name, CDS$newCDS), paste('new_',CDS.list.file,sep=''), sep='\t', row.names = F, col.names = F, append = F, quote = F)
# seqlist = cbind(seq = CDS$newCDS, ID = CDS$name);
seqlist = cbind(seq = CDS$CDSwExtra, ID = CDS$name); # 20160616
rownames(seqlist) = CDS$name;
write.fasta(seqlist, paste('optimized_', CDS.list.file.bck, sep=''))
if (!is.null(out.file))
write.xlsx(CDS,file = out.file, row.names = F)
invisible(CDS)
}
predict.genes <- function(genome){
}
annotate.functions <- function(genome, genes, do=c('iprscan', 'blastp')){
# Yong Fuga Li,
# 20141027
}
FUN.select.KU = function(x){ # 20141126
y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
t = as.numeric(sub('(.*)\\%','\\1',as.character(x$Top_nonself_Hit_identity.percent))); t[is.na(t)] = 0
if ('Existing.Anno' %in% colnames(y))
y[, 'Existing.Anno'] = regexpr(pattern='polyketide|alkaloid|terpenoid|terpene|nonribosomal peptide', x$Existing.Anno, ignore.case = T)>0
if ('domains' %in% colnames(y))
y[, 'domains'] = (regexpr(pattern='polyketide', x$domains, ignore.case = T)>0 & as.numeric(as.character(x$length)) > 800) | # polyketide
regexpr(pattern='alkaloid', x$domains, ignore.case = T)>0 | # alkaloid
regexpr(pattern='terpenoid|terpene', x$domains, ignore.case = T)>0 | # terpenoid
regexpr(pattern='nonribosomal peptide', x$domains, ignore.case = T)>0 | (regexpr(pattern='Adenylation|ACP', x$domains, ignore.case = T)>0 & regexpr(pattern='Condensation', x$domains, ignore.case = T)>0 & regexpr(pattern='Phosphopantetheine', x$domains, ignore.case = T)>0) # required domains for NRPS or PKS
return(y)
}
is.KU <- function(anno.txt='', domain.txt=''){
# 20150415, Yong Fuga Li
y = regexpr(pattern='polyketide|alkaloid|terpenoid|terpene|nonribosomal peptide|secondary metabo', anno.txt, ignore.case = T)>0
y = y | regexpr(pattern='polyketide', domain.txt, ignore.case = T)>0 | # polyketide
regexpr(pattern='alkaloid', domain.txt, ignore.case = T)>0 | # alkaloid
regexpr(pattern='terpenoid|terpene', domain.txt, ignore.case = T)>0 | # terpenoid
regexpr(pattern='nonribosomal peptide', domain.txt, ignore.case = T)>0 | (regexpr(pattern='Adenylation|ACP', domain.txt, ignore.case = T)>0 & regexpr(pattern='Condensation', domain.txt, ignore.case = T)>0 & regexpr(pattern='Phosphopantetheine', domain.txt, ignore.case = T)>0) # required domains for NRPS or PKS
}
FUN.select.maybeKU = function(x){ # 20141126
y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
t = as.numeric(sub('(.*)\\%','\\1',as.character(x$Top_nonself_Hit_identity.percent))); t[is.na(t)] = 0
if ('Existing.Anno' %in% colnames(y))
y[, 'Existing.Anno'] = regexpr(pattern='polyketide|alkaloid|terpenoid|terpene|nonribosomal peptide', x$Existing.Anno, ignore.case = T)>0
if ('domains' %in% colnames(y))
y[, 'domains'] = regexpr(pattern='(polyketide|acyl carrier protein)', x$domains, ignore.case = T)>0 | # polyketide
regexpr(pattern='alkaloid', x$domains, ignore.case = T)>0 | # alkaloid
regexpr(pattern='(terpenoid|terpene|geranyl diphosphate|farnesyl diphosphate)', x$domains, ignore.case = T)>0 | # terpenoid
regexpr(pattern='nonribosomal peptide', x$domains, ignore.case = T)>0 | (regexpr(pattern='Adenylation|ACP', x$domains, ignore.case = T)>0 & regexpr(pattern='Condensation', x$domains, ignore.case = T)>0 & regexpr(pattern='Phosphopantetheine', x$domains, ignore.case = T)>0) # required domains for NRPS or PKS
if ('top.5.hits' %in% colnames(y))
y[, 'top.5.hits'] = regexpr(pattern='polyketide|alkaloid|terpenoid|terpene|nonribosomal peptide', x$top.5.hits, ignore.case = T)>0
return(y)
}
FUN.select.promising = function(x){
y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
if ('CS' %in% colnames(y)){
y[,'CS'] = as.numeric(as.character(x[,'CS']))>3
}
return(y)
}
FUN.select.boundary = function(x){
y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
if ('express' %in% colnames(y)){
y[,'express'] = as.numeric(as.character(x$express))>9
}
if ('CS' %in% colnames(y)){
y[,'CS'] = as.numeric(as.character(x[,'CS'])) < 0.5
}
return(y)
}
FUN.select.special = function(x){
y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
if ('name' %in% colnames(y))
y[, 'name'] = regexpr(pattern='llm|laeA', x$name, ignore.case = T)>0
if ('Existing.Anno' %in% colnames(y))
y[, 'Existing.Anno'] = regexpr(pattern='llm|laeA|molyb', x$Existing.Anno, ignore.case = T)>0
if ('domains' %in% colnames(y))
y[, 'domains'] = regexpr(pattern='laeA|molyb', x$domains, ignore.case = T)>0
return(y)
}
FUN.select.interestingv1 = function(x){
y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
t = as.numeric(sub('(.*)\\%','\\1',as.character(x$Top_nonself_Hit_identity.percent))); t[is.na(t)] = 0
if ('length' %in% colnames(y))
y[, 'length'] = as.numeric(as.character(x$length)) > 1000
if ('Top_nonself_Hit_identity.percent' %in% colnames(y))
y[, 'Top_nonself_Hit_identity.percent'] = t > 50 | t < 25
if ('Existing.Anno' %in% colnames(y))
y[, 'Existing.Anno'] = regexpr(pattern='polyketide|alkaloid|terpenoid|terpene|nonribosomal peptide', x$Existing.Anno, ignore.case = T)>0
if ('domains' %in% colnames(y))
y[, 'domains'] = regexpr(pattern='polyketide|alkaloid|terpenoid|terpene|nonribosomal peptide', x$domains, ignore.case = T)>0
if ('top.5.hits' %in% colnames(y))
y[, 'top.5.hits'] = regexpr(pattern='polyketide|alkaloid|terpenoid|terpene|nonribosomal peptide', x$top.5.hits, ignore.case = T)>0
return(y)
}
FUN.select.interesting = function(x){
y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
t = as.numeric(sub('(.*)\\%','\\1',as.character(x$Top_nonself_Hit_identity.percent))); t[is.na(t)] = 0
if ('length' %in% colnames(y))
y[, 'length'] = as.numeric(as.character(x$length)) > 800
if ('Top_nonself_Hit_identity.percent' %in% colnames(y))
y[, 'Top_nonself_Hit_identity.percent'] = t > 75 | (t < 25 & t > 0)
return(y)
}
FUN.select.oxidoreductase = function(x){
y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
if ('Existing.Anno' %in% colnames(y))
y[, 'Existing.Anno'] = regexpr(pattern='oxidoreductase|P450|oxidase|dehydrogenase|oxygenase|reductase', x$Existing.Anno, ignore.case = T)>0
if ('domains' %in% colnames(y))
y[, 'domains'] = regexpr(pattern='oxidoreductase|P450|oxidase|dehydrogenase|oxygenase|reductase', x$domains, ignore.case = T)>0
if ('top.5.hits' %in% colnames(y))
y[, 'top.5.hits'] = regexpr(pattern='oxidoreductase|P450|oxidase|dehydrogenase|oxygenase|reductase', x$top.5.hits, ignore.case = T)>0
return(y)
}
FUN.select.boring = function(x){
y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
if ('Existing.Anno' %in% colnames(y))
y[, 'Existing.Anno'] = regexpr(pattern='secondary metab', x$Existing.Anno, ignore.case = T)>0
return(y)
}
FUN.select.warning = function(x){
y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
if ('average.intron.size' %in% colnames(y))
y[, 'average.intron.size'] = as.numeric(as.character(x$average.intron.size)) > 100
if ('average.exon.size' %in% colnames(y))
y[, 'average.exon.size'] = as.numeric(as.character(x$average.exon.size)) < 100
return(y)
}
FUN.select.catabolism = function(x){
y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
if ('Existing.Anno' %in% colnames(y))
y[, 'Existing.Anno'] = regexpr(pattern='transferase|synthase|synthetase|ligase', x$Existing.Anno, ignore.case = T)>0
if ('domains' %in% colnames(y))
y[, 'domains'] = regexpr(pattern='transferase|synthase|synthetase|ligase', x$domains, ignore.case = T)>0
if ('top.5.hits' %in% colnames(y))
y[, 'top.5.hits'] = regexpr(pattern='transferase|synthase|synthetase|ligase', x$top.5.hits, ignore.case = T)>0
return(y)
}
xlsx.extractSheet.NPGC <- function(xlsx.file='Pexpansum_MC29w20p0.005_DeepAnno.xlsx', header = T, na.strings = '|',extra.genes = 5){
# YF Li, 20141126
# FUN.select: a function to select the cells and return a logical matrix
options(java.parameters = "-Xmx4g" )
require(XLConnect)
xlsx.KU.file = sub(pattern = '.([^\\.]+$)', replacement = '_KU.\\1', xlsx.file)
xlsx.maybeUU.file = sub(pattern = '.([^\\.]+$)', replacement = '_maybeUU.\\1', xlsx.file)
xlsx.UU.file = sub(pattern = '.([^\\.]+$)', replacement = '_UU.\\1', xlsx.file)
wb <- XLConnect::loadWorkbook(xlsx.file)
n = length(XLConnect::getSheets(wb))
is.KU <- is.maybeUU <- is.UU <- vector(mode = 'logical', length = n)
jgc()
for (i in 1:n){
x = XLConnect::readWorksheet(wb, sheet = i) # 20141126
x[x==na.strings] = NA
is.KU[i] = any(FUN.select.KU(x)[(extra.genes+1):(nrow(x)-extra.genes),], na.rm = T)
is.maybeUU[i] = any(FUN.select.maybeKU(x)[(extra.genes+1):(nrow(x)-extra.genes),], na.rm = T)
}
is.maybeUU = is.maybeUU & ! is.KU
is.UU = ! (is.KU | is.maybeUU)
cat('Number of KU ', sum(is.KU))
cat('\nNumber of maybe KU ', sum(is.maybeUU))
cat('\nNumber of UU ', sum(is.UU))
extract.sheets <- function(xlsx.file, xlsx.out.file, to.keep, name.from = 'UU', name.to = 'KU'){
wb = XLConnect::loadWorkbook(xlsx.file)
sheetnames = XLConnect::getSheets(wb)
XLConnect::clearSheet(wb, sheet = sheetnames[!to.keep])
XLConnect::removeSheet(wb, sheet = sheetnames[!to.keep])
sheetnames = XLConnect::getSheets(wb)
renameSheet(wb, sheet = sheetnames, sub(name.from, name.to, sheetnames))
XLConnect::saveWorkbook(wb, xlsx.out.file)
}
extract.sheets(xlsx.file, xlsx.KU.file, is.KU, name.from = 'UU', name.to = 'KU'); jgc()
extract.sheets(xlsx.file, xlsx.maybeUU.file, is.maybeUU, name.from = 'UU', name.to = 'maybeUU'); jgc()
extract.sheets(xlsx.file, xlsx.UU.file, is.UU, name.from = 'UU', name.to = 'UU'); jgc()
xlsx.color.NPGC(xlsx.KU.file)
xlsx.color.NPGC(xlsx.maybeUU.file)
xlsx.color.NPGC(xlsx.UU.file)
}
gff.id.change <- function(gff.file, anno = NULL,
in.info = c(feature = 'CDS', id.type = 'ID'), in.ids = NULL,
out.info = c(feature = 'gene', id.type = 'ID'),
extra.nt = 2500,
out.type = c('nt', 'id')){
# 20151012, YF LI
out.type = match.arg(out.type)
if (out.type == 'nt')
out.info = c(feature = 'gene', id.type = 'ID')
if (is.null(anno)){
gff.format = sub('^.*\\.([^\\.]*$)', '\\1', gff.file)
# anno = read.gff3(gff.file, format=gff.format)
anno = import.gff(gff.file) # 20160502
}
anno.in = sort.intervals(anno[anno$type==in.info['feature'], ])
anno.out = sort.intervals(anno[anno$type==out.info['feature'], ])
anno.in = anno.in[sort(match(in.ids, sub('\\.\\d$', '', anno.in@elementMetadata[,in.info['id.type']])))]
m = findOverlaps(anno.in, subject = anno.out)
out.ids = anno.out@elementMetadata[m@subjectHits,out.info['id.type']]
if (out.type == 'nt'){
locs = geneRanges2ntRanges(anno, out.ids, extra.nt)
return(locs)
}else{
return(out.ids)
}
}
gff.match <- gff.mapping <- function(gff.file = 'cS818_augoHintstop1.gff', gff.reference = 'cS818.gff', tag = 'mapped_', gff.to.out = paste(tag, gff.file, sep=''),
geneID.re = '^([^\\.]+)(?:\\..+)?$', # extra gene IDs from element (e.g. exon) IDs
match.by = 'gene', format='gff3', geneID2cdsID=identity){
# Yong Fuga Li, 20141215
# 20141231: add geneID2cdsID
g = import(gff.file)
g.ref = import(gff.reference)
if (!any(match.by %in% g$type)) # 20151002
stop(paste(g$type, 'is not a feature type for ', gff.file))
if (!any(match.by %in% g.ref$type))
stop(paste(g.ref$type, 'is not a feature type for ', gff.reference))
g.slim = g[g$type==match.by]
g.ref.slim = g.ref[g.ref$type==match.by]
m = findOverlaps(g.slim, subject = g.ref.slim)
g.ref.IDs = geneID2cdsID(g.ref.slim$ID[m@subjectHits])
g.IDs = g.slim$ID[m@queryHits]
g.IDs.extra = setdiff(g.slim$ID, g.IDs)
g.IDs = c(g.IDs, g.IDs.extra)
g.ref.IDs = c(g.ref.IDs, g.IDs.extra)
ID.map = unlist(as.list(by(g.ref.IDs, regexpr.match(geneID.re, g.IDs), FUN = function(x){paste(unique(x), collapse = '~')})))
locs = regexpr.match.loc(geneID.re, g$ID)
for (i in 1: length(locs)){
if (is.na(g$ID[i]))
next
g$ID[i] = paste(substr(g$ID[i], 1, locs[[i]][1,1]-1), ID.map[substr(g$ID[i], locs[[i]][1,1], locs[[i]][1,2])], substr(g$ID[i], locs[[i]][1,2]+1,nchar(g$ID[i])), sep='')
}
pa = unlist.multi(g$Parent);
locs = regexpr.match.loc(geneID.re, pa)
for (i in 1: length(locs)){
if (is.na(pa[i]))
next
g$Parent[[i]] = paste(substr(pa[i], 1, locs[[i]][1,1]-1), ID.map[substr(pa[i], locs[[i]][1,1], locs[[i]][1,2])], substr(pa[i], locs[[i]][1,2]+1,nchar(pa[i])), sep='')
}
export(g, gff.to.out, format = format)
}
NPGC.wclustering <- enzyme.wclustering <- function(gff.file, iprscan.tab.file = NULL, chromosome.specific=F,
anno = NULL,
ipr.anno = NULL,
pep.fasta.file=pep.fasta.file, filter.proteins = T, min.protein.length = 150,
gene.definition = c('gene', 'transcript', 'mRNA'), proteinID = 'ID',
annotation.by = c('OR', 'desc', 'domain'),
tag = 'A_nidulans_FGSC_A4', window.size = 20, log.scale = F,
simu.rep = 5,
method = c('TXTwe', 'TXTw', 'MC29e', 'MC29'),
enzyme.weight.file = paste('/Users/yongli/Universe/write/Project_Current/t.NPbioinformatics/enzyme_weighting/', method, '.txt', sep=''),
multi.match = c('max', 'mean'),
prediction.file='Top.Clusters', min.contig.len=window.size/2,
compare.against =c('simulation','theoretical'),
p.value.cutoff = 0.005,
outformat=c('csv', 'tab')){
# statistical analysis of weighted enzyme clustering in a genome
# chromosome.specific: estimate chromosome specific enzyme probability estimation
# simu.rep: simulated gene sequences
# Yong Fuga Li, 20141220, modified from enzyme.wclustering
compare.against = match.arg(compare.against)
gene.definition = match.arg(gene.definition) # 20141125
outformat = match.arg(outformat)
annotation.by = match.arg(annotation.by) # 20141125
method = match.arg(method);
require('rtracklayer')
require('genomeIntervals')
require(lattice)
if (is.null(anno)){
gff.format = sub('^.*\\.([^\\.]*$)', '\\1', gff.file)
# anno = read.gff3(gff.file, format=gff.format)
anno = import.gff(gff.file) # 20160502
}
chrs = as.character(unique(anno@seqnames))
## keep genes only
idx.gene = (anno$type==gene.definition) # 20141125
anno = anno[idx.gene, ]
anno = sort.intervals(anno)
colnames(anno@elementMetadata) = toupper(colnames(anno@elementMetadata)) # 20141125
if (!is.null(anno$NOTE)){ # 20141125
desc.fname = 'NOTE'
}else if (!is.null(anno$DESCRIPTION)){
desc.fname = 'DESCRIPTION'
}else{
warning('No description or Note field for the annotation of genes')
desc.fname = 'NOTE'
anno$NOTE = ''
}
# ### remove short chrs, 20141222
# to.remove = vector('logical', length(anno))
# for (i in 1:length(chrs)){
# chr = chrs[i]
# is.in.chr = as.vector(anno@seqnames==chr)
# if (sum(is.in.chr) < min.contig.len){
# to.remove[is.in.chr] = T
# }
# }
# read ipr anno: 20141125
if (is.null(ipr.anno)){
ipr.anno = iprscan.flat(iprscan.tab.file, na.strings = c('-', 'NA', 'NULL'))
}
ipr.anno = mat.fill.row(t(t(ipr.anno)), row.names = anno@elementMetadata[,toupper(proteinID)], default = '')[,1]
names(ipr.anno) = anno$ID
if (annotation.by %in% 'desc'){
annotation.text = as.character(as.vector(anno@elementMetadata[[toupper(desc.fname)]]))
}else if(annotation.by %in% 'domain'){
annotation.text = as.character(as.vector(ipr.anno));
}else if(annotation.by %in% c('OR')){
annotation.text = paste(as.character(as.vector(anno@elementMetadata[[toupper(desc.fname)]])), as.character(as.vector(ipr.anno)))
}
# filter proteins by length
cat('removing short unannotated protein\n')
if(filter.proteins){
len.pep = nchar(read.fasta(pep.fasta.file)[anno@elementMetadata[,toupper(proteinID)], 'seq']);
i.fake.protein = as.character(as.vector(ipr.anno)) == '' & (as.character(as.vector(anno@elementMetadata[[toupper(desc.fname)]])) == '' |
as.character(as.vector(anno@elementMetadata[[toupper(desc.fname)]])) == 'Protein of unknown function')
cat('Number of unannotated protein', sum(i.fake.protein), '\n')
i.fake.protein = i.fake.protein & (len.pep < min.protein.length)
anno = anno[!i.fake.protein];
ipr.anno = ipr.anno[!i.fake.protein];
annotation.text = annotation.text[!i.fake.protein];
cat('removed', sum(i.fake.protein), 'proteins\n')
}
chrs = as.character(unique(anno@seqnames))
### read weight file
w = read.table(file = enzyme.weight.file, header = T, sep = '\t')
# pat = paste('(', paste(w$enzyme, collapse = '|'), ')', sep='')
# # get gene weights
# ma = regexpr.match(pat = pat, txt = annotation.text, perl=T, ignore.case=T)
cat('Calculating gene weights\n')
is.enzyme = vector('numeric', length(annotation.text))
if (multi.match=='max')
is.enzyme = is.enzyme - Inf;
nmatches = 0;
for (p in 1:nrow(w)){
if (multi.match=='max'){
ma = c(NA, 1)[(regexpr(w$enzyme[p], text = annotation.text, perl=T)>0)+1];
is.enzyme = rowMax(cbind(is.enzyme, ma * w$AverageScore[p]), na.rm=T)
}else{
ma = (regexpr(w$enzyme[p], text = annotation.text, perl=T)>0)
is.enzyme = is.enzyme + ma * w$AverageScore[p]
nmatches = nmatches + ma
}
}
if (multi.match=='mean'){
is.enzyme = is.enzyme/(1E-10+nmatches)
}else{
is.enzyme[is.infinite(is.enzyme)] = 0
}
# get local max of enzyme weights in sliding windows
cat('Identify local max cluster scores\n')
epsilon = 1E-10
L.gene = list()
chr.ranges = matrix(0, length(chrs), ncol = 2, dimnames = list(chrs,c('start', 'end')))
labels.succ.local.all = is.enzyme
for (i in 1:length(chrs)){
chr = as.character(chrs[i])
is.in.chr = as.vector(anno@seqnames==chr)
chr.ranges[chr, c('start', 'end')] = range(which(is.in.chr))
L.gene[[chr]] = sum(is.in.chr)# number of genes in this chromosome
seq = is.enzyme[is.in.chr]
if (L.gene[[chr]] < window.size){
labels.succ.local.all[is.in.chr] = 0
next
}
labels.succ.local = label.successes.local.max(seq,window.size) # only keep the local max that are greater than 0
labels.succ.local[labels.succ.local<0] = 0;
labels.succ.local.all[is.in.chr] = labels.succ.local
}
labels.succ.local.all.wZeros = labels.succ.local.all
labels.succ.local.all = labels.succ.local.all[labels.succ.local.all>epsilon]
# simulations: get local max of enzyme weights in sliding windows
labels.succ.local.all.simus = list();
for (r in 1:simu.rep){
txt = paste('iteration', r);
cat(txt)
labels.succ.local.all.simu = is.enzyme;
is.enzyme.simu = is.enzyme;
if (!chromosome.specific){
is.enzyme.simu = is.enzyme.simu[sample.int(length(is.enzyme.simu))]
}
for (i in 1:length(chrs)){
chr = chrs[i]
idx = chr.ranges[chr,1]:chr.ranges[chr,2]
if (L.gene[[chr]] < window.size){
labels.succ.local.all.simu[idx] = 0;
next
}
if (chromosome.specific){
seq.simu = is.enzyme[idx]
seq.simu = seq.simu[sample.int(length(seq.simu))]
} else{
seq.simu = is.enzyme.simu[idx]
}
labels.succ.local = label.successes.local.max(seq.simu, window.size) # only keep the local max that are greater than 0
labels.succ.local[labels.succ.local<0] = 0;
labels.succ.local.all.simu[idx] = labels.succ.local
}
labels.succ.local.all.simus[[r]] = labels.succ.local.all.simu[labels.succ.local.all.simu>epsilon];
cat(paste(rep('\b',nchar(txt)), collapse = ''))
}
# dat = rbind(data.frame(score = unlist(labels.succ.local.all.simus), data ='simulation'),
# data.frame(score = labels.succ.local.all, data = 'real genome'))
#
# hist.by(dat$score, by = dat$data, by.name = '')
pdf(paste(tag, '_TrueClustersEstimates.pdf', sep=''), 5,4)
dd = distribution.diff(sample=labels.succ.local.all, null.samples=labels.succ.local.all.simus, tag = '')
dev.off()
################ output top predictions
anno.df = as.data.frame(anno)
for (i in 1:length(anno.df)){
if (class(anno.df[[i]])!='integer')
anno.df[[i]] = unlist2(anno.df[[i]])
}
anno.df[, 'score'] = labels.succ.local.all.wZeros
anno.df[, 'p.value'] = dd$score2pvalue(labels.succ.local.all.wZeros)
anno.df[, 'fdr'] = dd$score2fdr(labels.succ.local.all.wZeros)
anno.df[, '#true'] = dd$score2ntrue(labels.succ.local.all.wZeros)
# mark the whole clusters
anno.df[, 'cluster.ID'] = ''
l = window.size;
succ.loc.count = 0;
gene.ranges = c();
for (i in which(anno.df[, 'score']>0)){
succ.loc.count = succ.loc.count+1;
st = max((i-l+1),chr.ranges[anno.df$seqnames[i],'start'])
anno.df[st:i, 'score'] = rowMax(cbind(anno.df[st:i, 'score'], anno.df[i, 'score']))
anno.df[st:i, 'p.value'] = rowMin(cbind(anno.df[st:i, 'p.value'], anno.df[i, 'p.value']))
anno.df[st:i, 'cluster.ID'] = paste(anno.df[st:i, 'cluster.ID'], paste('S', succ.loc.count,sep=''))
gene.ranges = rbind(gene.ranges, c(start=anno.df$ID[st], end=anno.df$ID[i],
ID = paste('S', succ.loc.count,sep=''), p.value=anno.df[i, 'p.value']))
}
gene.ranges = as.data.frame(gene.ranges); gene.ranges$p.value = as.numeric(as.character(gene.ranges$p.value))
# select top window and run clusters
to.output.windows = anno.df[,'p.value'] < p.value.cutoff;
write.table(gene.ranges, file = paste(tag,'_geneRanges_all.tsv', sep=''), sep='\t')
gene.ranges = gene.ranges[gene.ranges$p.value < p.value.cutoff,]
write.table(gene.ranges, file = paste(tag,'_geneRanges_filtered.tsv', sep=''), sep='\t')
# how many top clusters are included?
s.names = anno.df[to.output.windows, 'cluster.ID']
s.names = strsplit(paste(s.names,collapse=' '), '\\s+',perl=T)[[1]];
uc = unique.count(s.names)
n.clusters.localwindows = sum(uc$counts.unique==window.size)
out.names = c(intersect(c('seqnames', 'start', 'end', 'ID', 'Note', 'orf_classification', 'Gene'),colnames(anno.df)),
colnames(anno.df)[ncol(anno.df)-5+c(5,1:4)])
if (outformat=='csv'){
write.table(anno.df[to.output.windows,out.names], file=paste('cluster.anno.', tag, '.p', p.value.cutoff, '.NWindowClusters',n.clusters.localwindows, '.csv',sep=''),sep=',', row.names=F)
}else if (outformat=='tab'){
write.table(anno.df[to.output.windows,out.names], file=paste('cluster.anno.', tag, '.p', p.value.cutoff, '.NWindowClusters',n.clusters.localwindows, '.tab',sep=''),sep='\t', row.names=F, quote = F)
}
# write clean per cluster output, 20140611
write.NPGC <- function(anno.df, i.new.NPG = to.output.windows, window.size=window.size,
file.out=paste('cluster.anno.clean', tag, '.p', p.value.cutoff,
'.NWindowClusters',n.clusters.localwindows, '.tab',sep='')){
# 20140613
is.SM = regexpr(pattern='secondary metab', text = as.character(as.vector(anno.df$Note)), perl=T, ignore.case=T)>0
is.PKS = regexpr(pattern='polyketide synthase', text = as.character(as.vector(anno.df$Note)), perl=T, ignore.case=T)>0
all.SID = anno.df$cluster.ID[i.new.NPG]
all.SID = strsplit(paste(all.SID,collapse=' '), '\\s+',perl=T)[[1]];
uc = unique.count(all.SID)
cluster.names = names(uc$counts.unique[uc$counts.unique==window.size])
clean.table = matrix('',nrow=length(cluster.names),ncol=8,
dimnames=list(cluster.names, c('cluster ID', 'chr', 'coordinate', 'gene range', 'min distance to SM genes', 'closest SM gene(s)', 'p-value', 'cluster gene annotations')));
n.correct.cluster = 0;
for (nc in cluster.names){
i.match = regexpr(paste(nc,'(\\s|$)',sep=''), anno.df$cluster.ID)>0
## get closest SM
chr = unique(anno.df$seqnames[i.match])
loc.SM = t(which(is.SM & anno.df$seqnames==chr))
loc.cluster = t(t(which(i.match)))
dist.to.SM = repmat(loc.cluster,1,length(loc.SM)) - repmat(loc.SM, length(loc.cluster),1)
min.dist.to.SM = min(c(Inf, abs(dist.to.SM)))
#if (min.dist.to.SM)
if (!min.dist.to.SM) # 20140720
n.correct.cluster = n.correct.cluster + 1
closest.SM = which(abs(dist.to.SM)==min.dist.to.SM,arr.ind=T)
if (!is.null(closest.SM) && length(closest.SM)>0){
min.dist.to.SM = paste(dist.to.SM[closest.SM], collapse='...')
closest.SM = loc.SM[closest.SM[,2]]
}
# cluster coordinates
min.l = min(c(anno.df$start[i.match], anno.df$end[i.match], Inf))
max.l = max(c(anno.df$start[i.match], anno.df$end[i.match], Inf))
# cluster gene ranges
first.gene = anno.df$ID[min(which(i.match))]
last.gene = anno.df$ID[max(which(i.match))]
# cluster all gene annotations;
cluster.anno = paste(anno.df$ID[i.match], anno.df$Note[i.match], sep='|', collapse='\t')
matchedSM.anno = paste(anno.df$ID[closest.SM], anno.df$Note[closest.SM], sep='|', collapse='...')
clean.table[nc, ] = c(nc,chr, paste(min.l, '-', max.l),
paste(first.gene, '-', last.gene), min.dist.to.SM,
matchedSM.anno, min(anno.df[i.match,'p.value']), cluster.anno)
}
write(x='#Some of the predicted clusters are overlapping. They may indicate a larger cluster if the clusters significantly overlap (according to the coordiates in column 3).', file=file.out, append=F)
write(x='#Column 5 gives the distance of the cluster to the closest known secondary metabolite genes', file=file.out, append=T)
write(x='#Column 5, 0 means known SM genes are within the predicted cluster', file=file.out, append=T)
write(x='#Column 6 gives the gene names and annotations of the closest SM gene(s)', file=file.out, append=T)
write(x='#Column 5 and column 6, when there are multiple closest SM genes, they are separated by ...', file=file.out, append=T)
write(x='#Column 8+ gives the gene names and annotations of the genes in the predicted cluster', file=file.out, append=T)
write(x=paste('#Estimated No. true NP gene clusters:',dd$n.pos), file=file.out, append=T)
suppressWarnings(write.table(clean.table, file=file.out,sep='\t', row.names=F, quote = F, append=T))
# n.SM.cluster = sum((diff(which(is.SM))>1) | (diff.str(anno.df$seqnames[is.SM])))+1
# number of known SM gene clusters cannot be determined accurately
out = c(sum(is.SM),sum(is.PKS), sum(i.new.NPG & is.SM),
sum(i.new.NPG & is.PKS), n.correct.cluster);
names(out) = c('#known SM genes', '#known PKSs', '#matched SM genes', '#matched PKS genes',
'#matched SM clusters')
return(out)
}
a = write.NPGC(anno.df, i.new.NPG = to.output.windows, window.size=window.size,
file.out=paste('cluster.annoCompact.', tag, '.p', p.value.cutoff, '.NWindowClusters',n.clusters.localwindows, '.tab',sep=''))
n.unknowns = sum(regexpr(pattern='Protein of unknown function', text = annotation.text, perl=T)>0) # 20140529
n.genes = length(anno)
cat('number of genes', n.genes, '\n')
cat('number with unknown function', n.unknowns, '\n')
return(gene.ranges)
}
indicible.promoters <- function(gseID, platformID = ""){
# load series and platform data from GEO
gset <- getGEO(gseID, GSEMatrix =TRUE)
if (length(gset) > 1 & (!is.null(platformID) & platformID != "")) idx <- grep(platformID, attr(gset, "names")) else idx <- 1
gset <- gset[[idx]]
fvarLabels(gset) <- make.names(fvarLabels(gset))
desc = gset@phenoData@data
geMat = exprs.gene(gset, ID.type = 'symbol', remove.unmappable = T, coding.only = F)
colnames(geMat) = desc$title
write.table(geMat,file = paste(gseID, '.xls', sep=''), sep='\t', col.names = NA)
SD = rowSds(geMat)
hist(SD)
# clustering of gene and
r = cor.wrap(t(geMat['ADH2',,drop=F]), t(geMat))
g.inducible = sort(r[,r>0.4],decreasing = T)
write.table(t(g.inducible), file = paste(gseID, '_ADH2_associated.xls', sep=''), sep='\t')
pdf(paste(gseID, 'clustering.pdf',sep=''),width = 200,height = 200)
heatmap.quick.geMat(geMat,col.by.all = T, id.type = 'symbol')
heatmap.quick.geMat(geMat[g.inducible,],col.by.all = T,sd.cutoff = 0)
dev.off()
# ICA modules of genes
### ICA do
}
get.codon.usage <- function(DNA.file, gff.file, using.existing.file = T){
# 20120507, Yong Fuga Li
cu.file = sub('.[^\\.]*$', '.cut', x = gff.file) # codon usage table
if (!using.existing.file | !file.exists(cu.file)){
system(paste('getAnnoFasta.pl --seqfile=', DNA.file, ' ', gff.file, sep=''))
cds.file = list.files(pattern=sub('.[^\\.]*$', '.*.codingseq', x = gff.file))
# cds.file = list.files(pattern=paste('Penex1.filtered_proteins.FilteredModels1', '.*\\.codingseq', sep=''))
cdss = read.fasta(cds.file)
gcounts = matrix(data = 0,64, nrow(cdss), dimnames = list(codon=names(GENETIC_CODE), gene = rownames(cdss)))
codes = rownames(gcounts)
for (i in 1:nrow(cdss)){
s = toupper(cdss[i,'seq']);
l = nchar(s);
ss = substring(s, first = seq(1, l, 3), last = seq(3, l, 3))
uc = unique.count(ss)$counts.unique
gcounts[,i] = mat.fill.row(uc, codes)
}
write.table(gcounts,file = cu.file, quote = F, sep = '\t', row.names = T, col.names = NA)
}else{
gcounts = as.matrix(read.table(file = cu.file, header = T, row.names = 1, check.names=F))
}
return(gcounts)
}
sdf2smiles <- function (sdf)
{
require(ChemmineR)
if (!class(sdf) == "SDFset") {
stop("reference compound must be a compound of class \"SDFset\"")
}
if (1){ #(.haveOB()) {
sdfstrList = as(as(sdf, "SDFstr"), "list")
defs = paste(Map(function(x) paste(x, collapse = "\n"),
sdfstrList), collapse = "\n")
t = Reduce(rbind, strsplit(unlist(strsplit(convertFormat("SDF",
"SMI", defs), "\n", fixed = TRUE)), "\t", fixed = TRUE))
if (class(t) == "character") {
smiles = t[1]
names(smiles) = t[2]
}
else {
smiles = t[, 1]
if (ncol(t)>=2)
names(smiles) = t[, 2]
}
return(smiles)
}
else {
message("ChemmineOB not found, falling back to web service version. This will be much slower")
sdf2smilesWeb(sdf)
}
}
best.blast.hits <- function(from.file = 'GCA_000264905.1_Stehi1_protein.faa',
from.gff.file = NULL,
to.file='Stehi1_GeneCatalog_proteins_20101026.aa.fasta',
from.IDs = c('EIM85216', 'EIM85220', '2015KU8'),
gene.definition = 'CDS', id.type = 'protein_id'){ # for NCBI gff files
# 20151001: get the best blast hits for a set of sequences
# 20151007: add from gff file, so that the internal protein IDs can be extracted accurately from the start and end protein
fa.from = read.fasta(fasta.files = from.file, type = 'AA')
proIDs = sub('\\.\\d$', '', rownames(fa.from)) # remove version numbers
from.IDs = sub('\\.\\d$', '', from.IDs)
rownames(fa.from) = proIDs
if (is.null(from.gff.file)){
i = sort(match(from.IDs[1:2], proIDs))
all.IDs = proIDs[i[1]:i[2]]
}else{
all.IDs = geneRanges2allGenes(from.gff.file, from.IDs[1:2], gene.definition, id.type)
}
all.IDs = sub('\\.\\d$', '', all.IDs)
fa.from.select = paste(from.IDs[3], '.from.fa', sep='')
write.fasta(fa.from[all.IDs,], fa.from.select)
blastp.asn.file = paste(from.IDs[3], '.blastp.asn', sep='')
blastp.xml.file = paste(from.IDs[3], '.blastp.xml', sep='')
blastp.hitList = paste(from.IDs[3], '.blastp.hits', sep='')
no.top.hits = 1
system(paste('blastp -query', fa.from.select, '-num_threads 6 -subject', to.file, '-outfmt 11 -out', blastp.asn.file, '-evalue 1 -max_target_seqs ', no.top.hits))
# system(paste('blast_formatter -archive', blastp.asn.file, '-outfmt 5 -out', blastp.xml.file, '-max_target_seqs ', no.top.hits))
system(paste('blast_formatter -archive ', blastp.asn.file, ' -outfmt \'6 qseqid sseqid length pident mismatch gaps\' -out', blastp.hitList, '-max_target_seqs 1'))
hits = read.table(blastp.hitList, header = F, comment.char = '')
colnames(hits) = c('qseqid', 'sseqid', 'length', 'pident', 'mismatch', 'gaps')
hits = as.data.frame(lapply(hits, FUN = function(x){unlist(as.list(by(x, paste(hits$qseqid, hits$sseqid, sep=':'), # 20151016, handle the case where there are more than 1 hits per target protein
function(y){
if (is.character(y) | is.factor(y))
return(unique(as.character(y)))
else if(is.numeric(y))
return(sum(y))
else
return(y[1])})))}))
rownames(hits) = hits$qseqid
hits = hits[all.IDs,] # 20151023: keep original gene orders
hits$pident = 1-(hits$mismatch+hits$gaps)/hits$length
hits$paln = hits$length/sapply(fa.from[all.IDs,1], nchar)[rownames(hits)] # percentage of query sequences aligned
write.table(hits, blastp.hitList, sep='\t', row.names = F, quote = F)
return(hits)
}
lineage.map <- function(queries = c('Aspergillus','Penicillium', 'Epichloe', 'Fusarium'), Rank = 'class', SubTree = 'Fungi'){
# convert a list of taxonomy queries to a specific rank
# Yong Fuga Li, 20151106
queries.old = queries;
queries = unique(queries)
out.rank = ncbiTaxonomy(paste(SubTree, '[SubTree] AND ', Rank, '[Rank]', sep=''), FALSE)
lineage = ncbiTaxonomy(queries, FALSE)
a = unlist.dupenames(sapply(out.rank$name, function(x) grep(x,lineage$lineage)))
out = cbind(from = lineage$name[a], to = names(a))
rownames(out) = out[,'from']
out = mat.fill.row(out, queries.old)
return(out)
}
taxonomy.lineage.overlap.v1 <- function(species1, species2){
# 20151006, YF Li
require('genomes')
species1 = unique(species1)
species2 = unique(species2)
lineage1 = ncbiTaxonomy(species1, FALSE)
lineage2 = ncbiTaxonomy(species2, FALSE)
n1 = nrow(lineage1); n2 = nrow(lineage2)
overlaps = matrix(1, nrow = n1, ncol= n2, dimnames = list(lineage1$name, lineage2$name))
common = matrix('', nrow = n1, ncol= n2, dimnames = list(lineage1$name, lineage2$name))
for (i in 1:n1){
for (j in 1:n2){
common[i,j] = substr(lineage1$lineage[i], 1, lcprefix(lineage1$lineage[i], lineage2$lineage[j]))
overlaps[i,j] = length(strsplit(common[i,j], split = '; ')[[1]])
}
}
return(list(common.lineage = common, overlaps = overlaps))
}
taxonomy.lineage.overlap <- function(species1, species2=NULL){
# 20151006, YF Li
# 20150324: allow species2 to be null
require('genomes')
Sys.setenv(email='yonli@umail.iu.edu')
species1 = sort(unique(species1))
#lineage1 = ncbiTaxonomy(species1, FALSE)
# lineage1 = t(sapply(species1, function(x) ncbiTaxonomy(x, FALSE)))
lineage1 = t(sapply(species1, function(x){y = ncbiTaxonomy(x, FALSE); y[5] = paste(y[5],'; ',x, sep=''); return(y)})) # 20160324, the lineage does not contain the last level, adding it to obtain better species similarity matrix
if (all(is.null(species2))){
species2 = species1;
lineage2 = lineage1;
}else{
species2 = sort(unique(species2))
#lineage2 = ncbiTaxonomy(species2, FALSE)
lineage2 = t(sapply(species2, function(x) {y = ncbiTaxonomy(x, FALSE); y[5] = paste(y[5],'; ',x, sep=''); return(y)})) # 20160324,
}
n1 = nrow(lineage1); n2 = nrow(lineage2)
overlaps = matrix(1, nrow = n1, ncol= n2, dimnames = list(species1, species2))
common = matrix('', nrow = n1, ncol= n2, dimnames = list(species1, species2))
Len1 = vector('numeric', length = nrow(lineage1))
Len2 = vector('numeric', length = nrow(lineage2))
for (i in 1:n1){
for (j in 1:n2){
if (0){
common[i,j] = substr(lineage1[i,'lineage'], 1, lcprefix(lineage1[i,'lineage'][[1]], lineage2[j,'lineage'][[1]]))
overlaps[i,j] = length(strsplit(common[i,j], split = '; ')[[1]])
}else{ # 20160324
L1 = strsplit(lineage1[i,'lineage'][[1]], split = '; ')[[1]]
L2 = strsplit(lineage2[j,'lineage'][[1]], split = '; ')[[1]]
Len1[i] = length(L1); Len2[j] = length(L2);
l = min(Len1[i], Len2[j])
overlaps[i,j] = min(which(c(L1[1:l]!=L2[1:l], T)))-1
common[i,j] = paste(L1[seq2(1, overlaps[i,j],1)], collapse = '; ')
}
}
}
# 20160324 & 20160527-- compute normalized similarity
# max.overlaps = diag(1/sqrt(diag(overlaps)))
sim = diag(1/sqrt(Len1), nrow = length(Len1)) %*% overlaps %*% diag(1/sqrt(Len2), nrow = length(Len2));
rownames(sim) <- rownames(overlaps);
colnames(sim) <- colnames(overlaps);
return(list(common.lineage = common, overlaps = overlaps, similarity = sim))
}
select.ModelSpecies.v1 <- function(query.species){
# 20151006, YF Li
# version 1, do so maually using NCBI common Tree functionality
# ref: http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3901538/
# ref: http://www.ncbi.nlm.nih.gov/books/NBK179288/
# ref: http://www.chnosz.net/manual/taxonomy.html
require('taxize')
require('genomes')
q.lineage = ncbiTaxonomy(query.species, FALSE)
# system(paste('epost -db ', from.db, ' -id ', paste(uids[((k-1)*max.query+1):(min(k*max.query,length(uids)))], collapse = ','), '| ', ' elink -target ', to.db, ' -cmd neighbor | xtract -pattern LinkSet -element Id > ', out.file, sep='')) # -block Stat -match @type:PubMed -element @count
query.genus = sub('^(\\S+)\\s.*$', '\\1',query.species) # sapply(strsplit(query.species, ' '), rbind)[1,]
# Cryptococcus neoformans gattii ==> Cryptococcus gattii
# Cryptococcus neoformans neoformans ==> Cryptococcus neoformans
model.species = strsplit('Homo sapiens, Drosophila melanogaster, Arabidopsis thaliana, Brugia malayi, Aedes aegypti, Tribolium castaneum, Schistosoma mansoni, Tetrahymena thermophila, Galdieria sulphuraria, Zea mays, Toxoplasma gondii, Caenorhabditis elegans, Caenorhabditis elegans , Aspergillus fumigatus, Aspergillus nidulans, Aspergillus nidulans, Aspergillus oryzae, Aspergillus terreus, Botrytis cinerea, Candida albicans, Candida guilliermondii, Candida tropicalis, Chaetomium globosum, Coccidioides immitis, Coprinus cinereus, Coprinus cinereus, Nicotiana attenuata, Cryptococcus gattii, Cryptococcus neoformans, Debaryomyces hansenii, Encephalitozoon cuniculi, Eremothecium gossypii, Fusarium graminearum, Fusarium graminearum, Histoplasma capsulatum, Histoplasma capsulatum, Kluyveromyces lactis, Laccaria bicolor, Petromyzon marinus, Leishmania tarentolae, Lodderomyces elongisporus, Magnaporthe grisea, Neurospora crassa, Neurospora crassa, Phanerochaete chrysosporium, Phanerochaete chrysosporium, Pichia stipitis, Rhizopus oryzae, Saccharomyces cerevisiae, Saccharomyces cerevisiae, Saccharomyces cerevisiae, Schizosaccharomyces pombe, Thermoanaerobacter tengcongensis, Trichinella spiralis, Ustilago maydis, Ustilago maydis, Yarrowia lipolytica, Nasonia vitripennis, Solanum lycopersicum, Chlamydomonas reinhardtii, Amphimedon queenslandica, Pneumocystis jirovecii, Triticum aestivum, Gallus gallus, Danio rerio, Escherichia coli, Staphylococcus aureus', split = ', ')[[1]]
model.genus = sub('^(\\S+)\\s.*$', '\\1',model.species)
cat('\nOverlapping with model species:')
cat(intersect(tolower(query.species), tolower(model.species)));
cat('\nOverlapping with model genus:')
cat(intersect(toupper(query.genus), toupper(model.genus)));
cat('\n')
q.species = paste(c(query.species, unique(model.species)), collapse = ' OR ')
q.genus = paste(c(query.genus, unique(model.genus)), collapse = ' OR ')
URL1 = paste('http://www.ncbi.nlm.nih.gov/taxonomy/?term=', q.species, sep='')
URL2 = paste('http://www.ncbi.nlm.nih.gov/taxonomy/?term=', q.genus, sep='')
return(list(URL.species = URL1, URL.genus = URL2))
}
select.ModelSpecies <- function(query.species, simplify = T){
# 20151006, YF Li
# version 2: do so automatically based on the largest linear overlaps
# ref: http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3901538/
# ref: http://www.ncbi.nlm.nih.gov/books/NBK179288/
# ref: http://www.chnosz.net/manual/taxonomy.html
require('taxize')
require('genomes')
query.species = unique(query.species)
query.genus = sub('^(\\S+)\\s.*$', '\\1',query.species) # sapply(strsplit(query.species, ' '), rbind)[1,]
query.genus = unique(query.genus)
# Cryptococcus neoformans gattii ==> Cryptococcus gattii
# Cryptococcus neoformans neoformans ==> Cryptococcus neoformans
model.species = strsplit('Homo sapiens, Drosophila melanogaster, Arabidopsis thaliana, Brugia malayi, Aedes aegypti, Tribolium castaneum, Schistosoma mansoni, Tetrahymena thermophila, Galdieria sulphuraria, Zea mays, Toxoplasma gondii, Caenorhabditis elegans, Aspergillus fumigatus, Aspergillus nidulans, Aspergillus oryzae, Aspergillus terreus, Botrytis cinerea, Candida albicans, Candida guilliermondii, Candida tropicalis, Chaetomium globosum, Coccidioides immitis, Coprinus cinereus, Coprinus cinereus, Nicotiana attenuata, Cryptococcus gattii, Cryptococcus neoformans, Debaryomyces hansenii, Encephalitozoon cuniculi, Eremothecium gossypii, Fusarium graminearum, Fusarium graminearum, Histoplasma capsulatum, Histoplasma capsulatum, Kluyveromyces lactis, Laccaria bicolor, Petromyzon marinus, Leishmania tarentolae, Lodderomyces elongisporus, Magnaporthe grisea, Neurospora crassa, Neurospora crassa, Phanerochaete chrysosporium, Phanerochaete chrysosporium, Pichia stipitis, Rhizopus oryzae, Saccharomyces cerevisiae, Saccharomyces cerevisiae, Saccharomyces cerevisiae, Schizosaccharomyces pombe, Thermoanaerobacter tengcongensis, Trichinella spiralis, Ustilago maydis, Ustilago maydis, Yarrowia lipolytica, Nasonia vitripennis, Solanum lycopersicum, Chlamydomonas reinhardtii, Amphimedon queenslandica, Pneumocystis jirovecii, Triticum aestivum, Gallus gallus, Danio rerio, Escherichia coli, Staphylococcus aureus', split = ', ')[[1]]
if (simplify){
model.species = strsplit('Homo sapiens, Drosophila melanogaster, Arabidopsis thaliana, Brugia malayi, Aedes aegypti, Tribolium castaneum, Schistosoma mansoni, Tetrahymena thermophila, Galdieria sulphuraria, Zea mays, Toxoplasma gondii, Caenorhabditis elegans, Aspergillus nidulans, Botrytis cinerea, Candida albicans, Candida guilliermondii, Candida tropicalis, Chaetomium globosum, Coccidioides immitis, Coprinus cinereus, Coprinus cinereus, Nicotiana attenuata, Cryptococcus gattii, Cryptococcus neoformans, Debaryomyces hansenii, Encephalitozoon cuniculi, Eremothecium gossypii, Fusarium graminearum, Fusarium graminearum, Histoplasma capsulatum, Histoplasma capsulatum, Kluyveromyces lactis, Laccaria bicolor, Petromyzon marinus, Leishmania tarentolae, Lodderomyces elongisporus, Magnaporthe grisea, Neurospora crassa, Neurospora crassa, Phanerochaete chrysosporium, Phanerochaete chrysosporium, Pichia stipitis, Rhizopus oryzae, Saccharomyces cerevisiae, Saccharomyces cerevisiae, Saccharomyces cerevisiae, Schizosaccharomyces pombe, Thermoanaerobacter tengcongensis, Trichinella spiralis, Ustilago maydis, Ustilago maydis, Yarrowia lipolytica, Nasonia vitripennis, Solanum lycopersicum, Chlamydomonas reinhardtii, Amphimedon queenslandica, Pneumocystis jirovecii, Triticum aestivum, Gallus gallus, Danio rerio, Escherichia coli, Staphylococcus aureus', split = ', ')[[1]]
}else{
}
model.species = unique(model.species)
model.genus = sub('^(\\S+)\\s.*$', '\\1',model.species)
model.genus = unique(model.genus)
cat('\nOverlapping with model species:')
cat(intersect(tolower(query.species), tolower(model.species)));
cat('\nOverlapping with model genus:')
cat(intersect(toupper(query.genus), toupper(model.genus)));
cat('\n')
overlap.mat = taxonomy.lineage.overlap(query.species, model.species)
matches = cbind(# query.species = rownames(overlap.mat$overlaps),
best.model.species = colnames(overlap.mat$overlaps)[apply(overlap.mat$overlaps, 1, FUN = which.max)],
overlap = apply(overlap.mat$overlaps, 1, FUN = max))
best.model.species = apply(overlap.mat$overlaps, 1, FUN = function(x)names(which(x==max(x))))
if (is.matrix(best.model.species)){
best.model.species = unlist.dupenames(as.list(as.data.frame(best.model.species, stringsAsFactors = F)))
}
all.matches = cbind(best.model.species = unlist.dupenames(best.model.species),
overlap = unlist.dupenames(apply(overlap.mat$overlaps, 1, FUN = function(x) x[x==max(x)])))
return(list(matches, all.matches))
}
### HGT detection using phymltest (PhyML in ape): http://www.inside-r.org/packages/cran/ape/docs/phymltest
### ref: http://journals.plos.org/plosgenetics/article?id=10.1371/journal.pgen.1003035
### To confirm the non-metazoan origin of the sequences with hU≥30 and at least one significant metazoan hit, each transcript meeting these conditions was translated and aligned using ClustalW2 to the output (the best hits for each of the five taxa) of the previous blastx analysis. Each alignment was then trimmed to exclude regions where only one of the sequences was present, and phylogenetic trees were built in PhyML from amino-acids sequences using a JTT model [15];
gff.addgene <- function(gff.file, gene.definition = 'CDS', ID = 'proteinId', out.file){
# 20151110, add gene features to gff files
# Yong Fuga Li
# anno = read.gff3(meta$gff.file[i])
anno = import.gff(gff.file) # 20160502
anno.c = anno[anno$type==gene.definition]
a = tapply(anno.c, anno.c$proteinId, FUN = function(x){y = range(x);
y@elementMetadata = x@elementMetadata[1,];
return(y)}, simplify=T)
anno.g = c()
for (i in a){
if (!length(anno.g))
anno.g = i
else
anno.g = c(anno.g, i)
}
anno.g$type = 'gene'
anno.g$phase = '.'
anno.g$exonNumber = NA
anno = c(anno, anno.g)
anno = sort.intervals(anno)
export.gff3(anno, out.file)
}
NPscanv0 <- function(gff.files = NULL, iprscan.tab.files = NULL){
# HMM based learning and prediction of NPGC based on domain annotaiton information
require(HMM) # ref: http://web.stanford.edu/class/stats366/hmmR2.html;
require(hmm.discnp)
require(Rhmm)
# require(NHMM)
require(G1DBN)
# require(HiddenMarkov)
# require('ebdbNet')
# dynamic naive bayes; dynamic bayesian network, generalized HMM
# http://stackoverflow.com/questions/17696547/hidden-markov-models-package-in-r
# http://a-little-book-of-r-for-bioinformatics.readthedocs.org/en/latest/src/chapter10.html
# Dec.2015, Jan. 2016
setwd('/Users/yongli/Universe/write/Project_Current/9.O.NPbioinformatics/AllFungalGenomes/Aspergillus_Binary')
gff.file = 'Aspzo1.filtered_proteins.GeneCatalog_2013_12_03_15_38.gff3' # Aspergillus zonatus v1.0
iprscan = 'Aspzo1_GeneCatalog_proteins_20121010_IPR.tab'
if (0){
setwd('/Users/yongli/Universe/write/Project_Current/9.O.NPbioinformatics/AllFungalGenomes/Aspergillus_Binary')
gff.file = 'Aspnid1.filtered_proteins.AspGD_genes.gff3' # Aspergillus nidulans from AspGD
iprscan = 'Aspnid1_GeneCatalog_proteins_20110130_IPR.tab'
}
if (0){
setwd('/Users/yongli/Universe/write/Project_Current/9.O.NPbioinformatics/AllFungalGenomes/Pleurotus/')
gff.file = 'PleosPC15_2.filtered_proteins.FilteredModels1.gff3' # Pleurotus ostreatus PC15 v2.0
iprscan = 'PleosPC15_2_domaininfo_FilteredModels1.tab'
signalP = 'PleosPC15_2_signalp_FilteredModels1.tab'
ECpathway = 'PleosPC15_2_ecpathwayinfo_FilteredModels1.tab'
}
# anno = read.gff3(gff.file)
anno = import.gff(gff.file) # 20160502
ipr = iprscan.flat(iprscan, out.type = 'table')
n.hidden = 2;
}
plot.hmm <- function(hmm = training.trace[[4]]$hmm){
# 20160203
require(lattice)
require(latticeExtra)
require(gridExtra)
require(gplots)
color <- colorRampPalette(c('white','red'))(256)
x = log(hmm$transProbs); colnames(x) = NULL
a = levelplot(t(x), col.regions=color,aspect = 'fill',
ylab = 'From', xlab = 'To') #,scales = list(x = list(rot = 90),alternating=1))
x = log(hmm$emissionProbs);
colnames(x) = NULL
b = levelplot(t(x), ylab = '', aspect = 'fill',#scales = list(x = list(rot = 90),alternating=1),
xlab = 'Domains', col.regions=color)
# Combination via `c.trellis`
d = grid.arrange(a,b, nrow=1, ncol=2,widths = c(0.45,0.8), heights = 1)
# print(d)
# layout_matrix = cbind(c(1,2,2,2))
# comb_levObj <- c(a, b, layout = c(2, 1), merge.legends = T)
# print(comb_levObj)
}
NPscanv1 <- function(species = 'Aspergillus nidulans from AspGD', gff.file=NULL, iprscan.tab.file = NULL,
bin.file = NULL, out.file = 'A.nidu.xls',
domain.order = F, pseudocount = 0.5,
data.root = '/Users/yongli/Dropbox/NPGC/NPGCquery_data',
gene.definition = c('gene', 'transcript'), proteinID = 'ID'){
# find a window of size 15 or less that meet the gene function query criteria
# YF Li
# 20141028, 20141111
# 20150625-27: fixed a bug that occurs when no cluster is found, changed the interface and added species, bin.file
# separate the gff and iprscan file parsing and the querying code
require(seqHMM)
require(HMM) # ref: http://web.stanford.edu/class/stats366/hmmR2.html;
require(HiddenMarkov)
require(depmixS4)
require(CRF)
require('R2HTML')
require('xlsx')
root = getwd()
setwd(data.root)
if (!is.null(species)){
meta = read.table('NPGCquery_meta.txt',header = T,as.is = T, sep= '\t', row.names = 1)
bin.file = meta[species, 'bin.file']
gff.file=NULL; iprscan.tab.file = NULL;
proteinID = meta[species, 'proteinID']
gene.definition = meta[species, 'gene.definition']
}
if (!is.null(bin.file)){
load(bin.file)
}else{
bin.file = paste('NPGCquery', gff.file, '.RData', sep='')
get.NPGC.query.bin(gff.file=gff.file, iprscan.tab.file = iprscan.tab.file, bin.file = bin.file,
gene.definition = gene.definition, proteinID = proteinID)
load(bin.file)
}
if (0){
domains = length(unique(ipr.tab$ipr.acc[ipr.tab$analysis %in% c('HMMPfam', 'HMMSmart', 'HMMTigr', 'HMMPIR', 'superfamily', 'BlastProDom')]))
motifs = length(unique(ipr.tab$ipr.acc[ipr.tab$analysis %in% c('ProfileScan', 'FPrintScan', 'ScanRegExp')]))
}
# to.keep = ipr.tab$analysis %in% c('HMMPfam', 'HMMSmart', 'HMMTigr', 'HMMPIR', 'superfamily', 'BlastProDom')
to.keep = ipr.tab$ipr.acc != ''
ipr.tab = ipr.tab[to.keep,]
emit.symbols = sort(unique(ipr.tab$ipr.acc))
##### prepare training data
# ignor domain orders
if (domain.order)
stop('domain.order not implemented yet')
seqs = c()
accs = by(ipr.tab, ipr.tab$ID, FUN = function(x)unique(x$ipr.acc))
nDomPerGene = sapply(accs[anno@elementMetadata[,toupper(proteinID)]], FUN = length)
hist(nDomPerGene, xlab = '#Domains per Gene')
for (chr in unique(anno@seqnames)){
for (g in anno@elementMetadata[anno@seqnames == chr,toupper(proteinID)]){
seqs = c(seqs, 'b', accs[[g]], 'e') # add 'b' and 'e' to indicate protein start and end
}
}
domCounts = unique.count(seqs)$counts.unique[emit.symbols]
hist(log(domCounts), xlab = 'log(#Genes per Domain)')
################
### initialize model
################
# define enzymes
domain.anno = paste(ipr.tab$ipr.desc, ipr.tab$signature.desc, sep='~~~');
domain.anno = by(domain.anno, INDICES = ipr.tab$ipr.acc, FUN = function(x){paste(unique(x), collapse = ';')})
is.enzyme.EC6 = regexpr(pattern='(oxidoreductase|transferase|hydrolase|lyase|isomerase|ligase)', text =paste(ipr.tab$ipr.desc, ipr.tab$signature.desc), perl=T, ignore.case=T) > 0
is.enzyme.EC6 = by(is.enzyme.EC6, ipr.tab$ipr.acc, FUN = any)
is.enzyme.EC6 = is.enzyme.EC6[emit.symbols]
is.enzyme.MC29e = regexpr(pattern='(oxidoreductase|hydrolase|dehydrogenase|synthase|reductase|transferase|methyltransferase|oxidase|synthetase|monooxygenase|isomerase|dehydratase|decarboxylase|deaminase|O\\-methyltransferase|transaminase|hydratase|acetyltransferase|N\\-acetyltransferase|dioxygenase|aminotransferase|O\\-acyltransferase|esterase|N\\-methyltransferase|acyltransferase|aldolase|O\\-acetyltransferase|cyclase|catalase|hydroxylase|P450|transporter|transcription factor)', text =paste(ipr.tab$ipr.desc, ipr.tab$signature.desc), perl=T, ignore.case=T) > 0
is.enzyme.MC29e = by(is.enzyme.MC29e, ipr.tab$ipr.acc, FUN = any)
is.enzyme.MC29e = is.enzyme.MC29e[emit.symbols]
is.enzyme = is.enzyme.MC29e
NPG.initialProfile = domCounts * is.enzyme
NPG.initialProfile = (NPG.initialProfile + pseudocount)/sum(NPG.initialProfile+pseudocount)
initialProfile = (domCounts + pseudocount)/sum(domCounts+pseudocount)
nH = 6; nE = length(emit.symbols);
HMM = initHMM(States = c("NPG.b","NPG.d","NPG.e","OG.b","OG.d","OG.e"),
Symbols = c('b','e',emit.symbols),
startProbs = c(.25,0,0.25,0.25,0,0.25),
transProbs = rbind(t(c(0,0.9,0.1,0,0,0)),
t(c(0,0.8,0.2,0,0,0)),
t(c(0.9,0,0,0.1,0,0)),
t(c(0,0,0,0,0.9,0.1)),
t(c(0,0,0,0,0.8,0.2)),
t(c(0.1,0,0,0.9,0,0))),
emissionProbs = rbind(t(c(1, rep(0,nE+1))),
t(c(0,0,NPG.initialProfile)),
t(c(0,1,rep(0, nE))),
t(c(1, rep(0, nE+1))),
t(c(0,0,initialProfile)),
t(c(0,1,rep(0, nE)))))
# HMM.ori
# HMM.dom
# HMM.dom.ori
################
# unsupervised learning
################
step = 2; max.steps = 50; delta=1E-5
training.trace = list()
training.trace[['0']] = list(hmm = HMM, difference = Inf)
s = 1
while (1){
cat((s-1)*step, training.trace[[paste(s-1)]]$difference)
# training.trace[[paste(s)]] = baumWelch(training.trace[[paste(s-1)]]$hmm, observation=seqs, maxIterations=step, delta=delta, pseudoCount=0)
training.trace[[paste(s)]] = viterbiTraining(training.trace[[paste(s-1)]]$hmm, observation=seqs, maxIterations=step, delta=delta, pseudoCount=pseudocount)
if (all(training.trace[[paste(s)]]$difference < delta))
break
cat('\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b')
s = s+1
}
require('lattice')
plot.hmm(training.trace[[1]]$hmm)
plot.hmm(training.trace[[s]]$hmm)
top.NPG.domains = sort(training.trace[[4]]$hmm$emissionProbs['NPG.d',], decreasing = T)[1:100]
top.NPG.domains = cbind(domain.anno[names(top.NPG.domains)], top.NPG.domains)
top.OG.domains = sort(training.trace[[4]]$hmm$emissionProbs['OG.d',], decreasing = T)[1:100]
top.OG.domains = cbind(domain.anno[names(top.OG.domains)], top.OG.domains)
top.diff.domains = sort(training.trace[[4]]$hmm$emissionProbs['NPG.d',]/training.trace[[4]]$hmm$emissionProbs['OG.d',], decreasing = T)[1:100]
top.diff.domains = cbind(domain.anno[names(top.diff.domains)], top.diff.domains)
step = 2; max.steps = 50; delta=1E-9
training.traceBW = list()
training.traceBW[['0']] = training.trace[[length(training.trace)]]
s = 1
while (1){
cat((s-1)*step, training.traceBW[[paste(s-1)]]$difference)
training.traceBW[[paste(s)]] = baumWelch(training.traceBW[[paste(s-1)]]$hmm, observation=seqs, maxIterations=step, delta=delta, pseudoCount=0.5)
if (all(training.traceBW[[paste(s)]]$difference < delta))
break
cat('\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b')
s = s+1
}
posterior(hmm, observation)
viterbi(hmm, observation)
image(vt$hmm$transProbs)
image(HMM$transProbs)
require(gplots)
image(log(vt$hmm$emissionProbs),col=greenred(256))
image(log(HMM$emissionProbs),col=greenred(256))
# label KUs
# semisupervised EM learning
# Predictions
#### output
cat('\n#Identified clusters: ', nrow(gene.ranges))
to.keep.extend = extend.index(core.regions, window.extend, sides='both', do.unique=T)
to.keep.extend = to.keep.extend[to.keep.extend<=length(anno) & to.keep.extend>=1]
anno$PARENT[1] ==c()
is.enzyme.all[] = c('', 'Yes')[is.enzyme.all+1]
out = cbind(chr = as.character(anno@seqnames)[], gene=anno$ID, 'protein ID' = anno@elementMetadata[,toupper(proteinID)], Existing.Anno = anno@elementMetadata[,toupper(desc.fname)],
is.enzyme.all, domains = ipr.anno)[to.keep.extend,]
rownames(geneID2clusterID) = geneID2clusterID[,1];
out = cbind(out, clusterID = mat.fill.row(geneID2clusterID, rownames(out), '')[,2])
write.xlsx(out, out.file)
# HTML(out, 'out.html')
return(out)
}
NPscan <- function(genome.ID = c('A.nidu_AspGD', 'Aspergillus flavus NRRL3357',
'Aspergillus tubingensis v1.0', 'Penicillium expansum',
'Trichoderma virens'),
nNPG= 2, # number of NPG cluster types;
nOG = 1, # number of other gene cluster types;
init.truth = c('IPR', 'MC29e', 'EC6', 'note', 'domain', 'both'),
init.expand = 1, # add the neighbors of inital genes as potential SM genes
init.decay.rate = 1-1/8, init.expand.combine = c('prob', 'max'), # max, use max when combine two probability diffused to a location
init.weighted = F,
predict.extra = F, # output the prediction results for initial HMM and final HMM removing pseudocounts
eval.truth = c('note', 'domain', 'both', 'IPR'),
do.viterbi = T, remove.pseudocounts = F,
EM.method = c('null', 'depmixS4', 'seqHMM', 'HMM'), # null: no EM step is done
domain.order = F, pseudocount = 0.1, annotation.by = 'OR',
data.root = '/Users/yongli/Dropbox/NPGC/NPGCquery_data',
dom.freq.cutoff = 2, # only the emitted domains with frequency above this are retained for training
gff.file=NULL, iprscan.tab.file = NULL,
bin.file = NULL, out.tag =sub(' ', '_', genome.ID),
out.file = paste(out.tag, '.xls', sep=''), remove.glycan=T,
gene.definition = c('gene', 'transcript'), proteinID = 'ID'){
# YF Li
# 20160202-20160204
# v2: HMM is slow using seqHMM instead
# v3: 20160413, add init.truth, evaluation.truth,
# 'IPR+/-5': mark proteins based on IPR domains and then expand to +/- 5 genes as SM genes
# v4. 20160421: allow multiple chromosome, and multi-genomes
require('R.matlab')
require(seqHMM)
require(HMM) # ref: http://web.stanford.edu/class/stats366/hmmR2.html;
# require(HiddenMarkov)
require(depmixS4) # depmixS4 is super fast compared to HMM and seqHMM, which are similar in speed,
# although HMM provide viterbi training, which is order of magnitude faster
# require(CRF)
require('R2HTML')
require('xlsx')
root = getwd()
EM.method = match.arg(EM.method)
eval.truth = match.arg(eval.truth)
init.truth = match.arg(init.truth)
init.expand.combine = match.arg(init.expand.combine)
################
###Prepare data
################
if (0){ # compared to the JGI version, AspGD gff contains gene annotation
genome.ID = 'A.nidu_AspGD'
setwd('/Users/yongli/Universe/write/Project_Current/9.O.NPbioinformatics/Nidulans.SlidingWindow/Annotation')
DNA.file='A_nidulans_FGSC_A4_current_chromosomes.fasta'
gff.file="A_nidulans_FGSC_A4_current_features.gff"
pep.fasta.file = "A_nidulans_FGSC_A4_current_orf_trans_all.fasta"
iprscan.tab.file = 'A_nidulans_FGSC_A4_iprscan.out.txt';
proteinID = 'ID'; cds2gene = function(x)(sub('-T$', '', x, perl=T))
}
setwd(data.root)
if (!is.null(genome.ID)){
meta = read.table('NPGCquery_meta.txt',header = T,as.is = T, sep= '\t', row.names = 1)
bin.file = meta[genome.ID, 'bin.file']
gff.file=NULL; iprscan.tab.file = NULL;
proteinID = meta[genome.ID, 'proteinID']
gene.definition = meta[genome.ID, 'gene.definition']
}
ipr.anno.all = c(); ipr.tab.all = data.frame()
if (!is.null(bin.file)){
first = 1;
for (b in bin.file){
cat('loading', b, '\n')
load(b)
if (first){
first = 0;
anno.all = anno;
}else{
anno.all = c(anno.all, anno);
}
ipr.anno.all = c(ipr.anno.all, ipr.anno);
ipr.tab.all = rbind(ipr.tab.all, ipr.tab[c('ID', 'analysis', "ipr.acc", "ipr.desc", "signature.acc", "signature.desc")])
}
}else{
if (length(genome.ID)>1)
stop('Only allow one genome if binary files is not used!\n')
bin.file = paste(genome.ID, '.RData', sep='')
get.NPGC.query.bin(gff.file=gff.file, iprscan.tab.file = iprscan.tab.file, bin.file = bin.file,
gene.definition = 'gene', proteinID = proteinID)
load(bin.file)
}
chr = as.character(anno@seqnames)[]; gene.ID = anno$ID; prot.ID = anno@elementMetadata[,toupper(proteinID)];
anno.txt = unlist(anno@elementMetadata@listData[[toupper(desc.fname)]])
domain.txt = as.character(as.vector(ipr.anno))
if (annotation.by %in% 'desc'){
annotation.text = anno.txt
}else if(annotation.by %in% 'domain'){
annotation.text = domain.txt;
}else if(annotation.by %in% c('OR')){
annotation.text = paste(anno.txt, domain.txt)
}
names(annotation.text) = names(ipr.anno)
if (eval.truth == 'note'){ # 20160413
t.sm.evaluation = is.KU(anno.txt)
}else if (eval.truth == 'domain'){
t.sm.evaluation = is.KU(domain.txt = domain.txt)
}else if (eval.truth == 'both'){
t.sm.evaluation = is.KU(anno.txt, domain.txt)
}
if (init.truth == 'note'){ # 20160413
is.ku = is.KU(anno.txt)
}else if (init.truth == 'domain'){
is.ku = is.KU(domain.txt = domain.txt)
}else if (init.truth == 'both'){
is.ku = is.KU(anno.txt, domain.txt)
}
if (0){
domains = length(unique(ipr.tab$ipr.acc[ipr.tab$analysis %in% c('HMMPfam', 'HMMSmart', 'HMMTigr', 'HMMPIR', 'superfamily', 'BlastProDom')]))
motifs = length(unique(ipr.tab$ipr.acc[ipr.tab$analysis %in% c('ProfileScan', 'FPrintScan', 'ScanRegExp', 'ProSitePatterns', 'ProSiteProfiles', 'PRINTS', 'ScanRegExp')]))
locations = c('TMHMM', 'SignalP_EUK', 'SignalP_GRAM_NEGATIVE', 'SignalP_GRAM_POSITIVE')
to.keep = ipr.tab$analysis %in% c('HMMPfam', 'HMMSmart', 'HMMTigr', 'HMMPIR', 'superfamily', 'BlastProDom')
ipr.tab = ipr.tab[to.keep,]
}
to.keep = toupper(ipr.tab$analysis) %in% toupper(c('HMMPfam', 'HMMSmart', 'HMMTigr', 'HMMPIR', 'superfamily', 'BlastProDom', 'Pfam', 'SMART', 'TIGRFAM','ProDom','PIRSF', 'Hamap', 'Gene3D'))
to.keep = to.keep & ipr.tab$ipr.acc != ''
ipr.tab = ipr.tab[to.keep,]
################
##### remove low frequency domains; 20160404
################
accs0 = by(ipr.tab, ipr.tab$ID, FUN = function(x){y = unique(x$ipr.acc); y[!is.na(y)]})
accs0 = do.call(list, accs0)
domCounts0 = unique.count(unlist(accs0))$counts.unique
domCounts0 = domCounts0[domCounts0 >= dom.freq.cutoff]
doms.tokeep = names(domCounts0)
accs = by(ipr.tab, ipr.tab$ID, FUN = function(x){y = unique(x$ipr.acc); y[!is.na(y) & y %in% doms.tokeep]})
accs = do.call(list, accs)
# accs[setdiff(names(ipr.anno), names(accs))] = NA
# accs = accs[names(ipr.anno)]
idx = !is.na(ipr.tab$ipr.acc)
emit.symbols = sort(doms.tokeep)
# emit.symbols = emit.symbols[!is.na(doms.tokeep)]
### domain based NPG labeling, 20160413
sm.ipr <- function(ipr.acc=NULL, file = '/Users/yongli/Dropbox/NPGC/NPGCquery_data/SM.domains_manualAnno_v2.xlsx'){
SM.doms = read.xlsx2(file,sheetIndex = 1, as.is =T)
rownames(SM.doms) = SM.doms$id
SM.doms$BGC.confidence = as.numeric(as.character(SM.doms$BGC.confidence))
# sum(SM.doms$BGC.confidence>0)
if (0){
ipr.acc.confident = as.character(SM.doms$id[SM.doms$BGC.confidence==1])
ipr.acc.maybe = as.character(SM.doms$id[SM.doms$BGC.confidence==0.5])
}
if (is.null(ipr.acc)){
ipr.acc = SM.doms$id
}
x = SM.doms[ipr.acc,'BGC.confidence'];
x[is.na(x)] = 0;
names(x) = ipr.acc
return(x)
}
################
##### obtaining domain dependencies by association rule mining, 20160310
################
if (0){
require(arules)
require("arules");
require("arulesViz")
data("Adult")
accs.tr = as(accs, 'transactions')
rules <- apriori(accs.tr,
parameter = list(support=1/1000, conf = 1,minlen = 2, maxlen= 2,
target = 'rules')) # "maximally frequent itemsets"))
summary(rules)
inspect(rules)
# rules@items@itemsetInfo
rules1 <- subset(rules, subset = lift > 2)
}
################
##### prepare training data
################
# ignor domain orders
warning('Some domains has not ipr.acc')
nDomPerGene = sapply(accs[anno@elementMetadata[,toupper(proteinID)]], FUN = length)
hist(nDomPerGene, xlab = '#Domains per Gene')
if (domain.order)
stop('domain.order not implemented yet')
seqs = c()
seqs.nr = c() # non-redundant domain annotations
geneseqs = c()
for (chr in unique(anno@seqnames)){
for (g in anno@elementMetadata[anno@seqnames == chr,toupper(proteinID)]){
seqs = c(seqs, 'b', accs[[g]], 'e') # add 'b' and 'e' to indicate protein start and end
geneseqs = c(geneseqs, rep(g, length(accs[[g]])+2))
}
}
emit.symbols = emit.symbols[emit.symbols%in%unique(seqs)] # some in iprscan file are not in gff file...
domCounts = unique.count(seqs)$counts.unique[emit.symbols]
pdf('Domain_prevalence.pdf',5,4)
hist(log(domCounts), xlab = 'log(#Genes per Domain)', main ='')
dev.off()
################
### initialize model
################
# define enzymes
domain.anno = paste(ipr.tab$ipr.desc, ipr.tab$signature.desc, sep='~~~');
domain.anno = by(domain.anno, INDICES = ipr.tab$ipr.acc, FUN = function(x){paste(unique(x), collapse = ';')})
if (remove.glycan){ # 20160311
head(ipr.tab)
is.glycan = regexpr(pattern='(glyco|galacto|fructo|gluco)', text =paste(ipr.tab$ipr.desc, ipr.tab$signature.desc), perl=T, ignore.case=T) > 0
is.othercontaminants = regexpr(pattern='(kinase|proteasome)', text =paste(ipr.tab$ipr.desc, ipr.tab$signature.desc), perl=T, ignore.case=T) > 0
# is.enzyme.KUnotGlycan = (ipr.tab$ID %in% anno@elementMetadata$ID[is.ku]) & !is.glycan & !is.othercontaminants
# is.enzyme.KUnotGlycan = by(is.enzyme.KUnotGlycan[idx], ipr.tab$ipr.acc[idx], FUN = any)
# is.enzyme.KUnotGlycan = is.enzyme.KUnotGlycan[emit.symbols]
is.glycanContam = by((is.glycan | is.othercontaminants)[idx], ipr.tab$ipr.acc[idx], FUN = any)
is.glycanContam = is.glycanContam[emit.symbols]
is.glycan = by(is.glycan[idx], ipr.tab$ipr.acc[idx], FUN = any)
is.glycan = is.glycan[emit.symbols]
}else{
is.glycan <- is.glycanContam <- zeros(length(emit.symbols))
}
is.SM = sm.ipr(emit.symbols); # domain based SM gene annotation
is.SM = max.by(is.SM[ipr.tab$ipr.acc], ipr.tab$ID, min = 0)[ipr.tab$ID] # keep weights
if (init.truth == 'IPR'){
is.enzyme = sm.ipr(emit.symbols);
if (1){ # expand from domains to genes, 20160414
if (init.weighted){
is.enzyme = max.by(is.enzyme[ipr.tab$ipr.acc], ipr.tab$ID, min = 0)[ipr.tab$ID] # keep weights
}else{
is.enzyme = ipr.tab$ID %in% ipr.tab$ID[ipr.tab$ipr.acc %in% names(which(is.enzyme>0))]
}
}
}else if (init.truth == 'MC29e'){
is.enzyme = regexpr(pattern='(oxidoreductase|hydrolase|dehydrogenase|synthase|reductase|transferase|methyltransferase|oxidase|synthetase|monooxygenase|isomerase|dehydratase|decarboxylase|deaminase|O\\-methyltransferase|transaminase|hydratase|acetyltransferase|N\\-acetyltransferase|dioxygenase|aminotransferase|O\\-acyltransferase|esterase|N\\-methyltransferase|acyltransferase|aldolase|O\\-acetyltransferase|cyclase|catalase|hydroxylase|P450|transporter|transcription factor)', text =paste(ipr.tab$ipr.desc, ipr.tab$signature.desc), perl=T, ignore.case=T) > 0
if (1){ # expand from domains to genes, 20160414
is.enzyme = ipr.tab$ID %in% ipr.tab$ID[is.enzyme]
}
}else if (init.truth == 'EC6'){
is.enzyme = regexpr(pattern='(oxidoreductase|transferase|hydrolase|lyase|isomerase|ligase)', text =paste(ipr.tab$ipr.desc, ipr.tab$signature.desc), perl=T, ignore.case=T) > 0
if (1){ # expand from domains to genes, 20160414
is.enzyme = ipr.tab$ID %in% ipr.tab$ID[is.enzyme]
}
}else if (init.truth %in% c('note', 'domain', 'both')){ # based on KU and SM specific keywords
is.enzyme = ipr.tab$ID %in% anno@elementMetadata$ID[is.ku]
}else{
}
if (eval.truth=='IPR'){# expand from domains to genes, 20160414
t.sm.evaluation = sm.ipr(emit.symbols);
t.sm.evaluation = vector.fill(max.by(t.sm.evaluation[ipr.tab$ipr.acc], ipr.tab$ID, min = 0) > 0, prot.ID)
}
if (init.expand>0){ # expand to neighbor genes
is.enzyme = vector.fill(max.by(is.enzyme, ipr.tab$ID, min = 0), prot.ID);is.enzyme[is.na(is.enzyme)] = 0 # to gene level
if (init.expand.combine == 'prob'){
is.enzyme = diffuse.by(is.enzyme, init.expand, decay.rate = init.decay.rate, combine.fun = function(x,y)1-(1-x)*(1-y)) # diffuse to neighbor genes
}else if (init.expand.combine=='max'){
is.enzyme = diffuse.by(is.enzyme, init.expand, decay.rate = init.decay.rate, combine.fun = max2) # diffuse to neighbor genes
}
is.enzyme = is.enzyme[ipr.tab$ID] # to domains in the genes
is.enzyme[is.na(is.enzyme)] = 0
}
#is.enzyme = is.enzyme.MC29e; tag = 'MC29e'
#is.enzyme = is.enzyme.EC6; tag = 'EC6'
if (init.weighted){
is.enzyme = by(is.enzyme[idx], ipr.tab$ipr.acc[idx], FUN = sum) # use max to keep the weights if provided, as in 'IPR' method for init.truth
is.enzyme = is.enzyme[emit.symbols]
is.enzyme[is.na(is.enzyme)] = 0
NPG.initialProfile = is.enzyme
NPG.initialProfile = (NPG.initialProfile + pseudocount)/sum(NPG.initialProfile+pseudocount)
NPG.initialProfile = NPG.initialProfile[emit.symbols];
is.enzyme = is.enzyme > 0 # use binary for remaining initialization
}else{
is.enzyme = by(is.enzyme[idx], ipr.tab$ipr.acc[idx], FUN = max) # use max to keep the weights if provided, as in 'IPR' method for init.truth
is.enzyme = is.enzyme[emit.symbols]
is.enzyme[is.na(is.enzyme)] = 0
is.enzyme = is.enzyme > 0 # use binary for all initialization
NPG.initialProfile = domCounts * is.enzyme
NPG.initialProfile = (NPG.initialProfile + pseudocount)/sum(NPG.initialProfile+pseudocount)
NPG.initialProfile = NPG.initialProfile[emit.symbols];
}
NPG.initialProfile.noGlycan = domCounts * (is.enzyme & !is.glycanContam)
NPG.initialProfile.noGlycan = (NPG.initialProfile.noGlycan + pseudocount)/sum(NPG.initialProfile.noGlycan+pseudocount)
NPG.initialProfile.noGlycan = NPG.initialProfile.noGlycan[emit.symbols];
NPG.initialProfile.glycan = domCounts * is.glycan
NPG.initialProfile.glycan = (NPG.initialProfile.glycan + pseudocount)/sum(NPG.initialProfile.glycan+pseudocount)
NPG.initialProfile.glycan = NPG.initialProfile.glycan[emit.symbols];
initialProfile = (domCounts + pseudocount)/sum(domCounts+pseudocount)
initialProfile = initialProfile[emit.symbols];
nTypes = nNPG + nOG
out.tag = paste(out.tag, 'it',toupper(init.truth),'ie',init.expand,'iw',c('F','T')[init.weighted+1], 'et', toupper(eval.truth),
'iec', toupper(init.expand.combine), 'idr', signif(init.decay.rate,2),
'rc', c('F','T')[remove.glycan+1],'rp', c('F','T')[remove.pseudocounts+1],
'npg', nNPG, 'og', nOG, 'domfr',
dom.freq.cutoff, 'pc',pseudocount, sep='')
nH = nTypes * 3; nE = length(emit.symbols);
States = c()
emissionProbs = c()
for (i in 1:nNPG){
States = c(States, paste('NPG', i, c('.b', '.d', '.e'), sep=''))
if (i == 1){
NPG.profile = (NPG.initialProfile.noGlycan + runif(nE)*mean(NPG.initialProfile.noGlycan) *0.5) * exp(rnorm(nE)*0.1)
}else if (i==2){
NPG.profile = (NPG.initialProfile.glycan + runif(nE)*mean(NPG.initialProfile.glycan) *0.5) * exp(rnorm(nE)*0.1)
}else{
NPG.profile = (NPG.initialProfile + runif(nE)*mean(NPG.initialProfile) *0.5) * exp(rnorm(nE)*0.1)
}
NPG.profile = NPG.profile/sum(NPG.profile)
emissionProbs = rbind(emissionProbs, t(c(1, rep(0,nE+1))),
t(c(0,0,NPG.profile)),
t(c(0,1,rep(0, nE))))
}
for (i in 1:nOG){
States = c(States, paste('OG', i, c('.b', '.d', '.e'), sep=''))
OG.profile = (initialProfile + runif(nE)*mean(initialProfile) *0.5) * exp(rnorm(nE)*0.1)
OG.profile = OG.profile/sum(OG.profile)
emissionProbs = rbind(emissionProbs,
t(c(1, rep(0, nE+1))),
t(c(0,0,OG.profile)),
t(c(0,1,rep(0, nE))))
}
startProbs = c()
transProbs = c()
p.i = 0.9 # intra-state transition probability
for (i in 1:nTypes){
startProbs = c(startProbs, c(1/nTypes, 0, 0))
block.intra = rbind(t(c(0,0.9,0.1)),t(c(0,0.8,0.2)),t(c(p.i,0,0)))
block.inter = rbind(t(c(0,0,0)),t(c(0,0,0)),t(c((1-p.i)/(nTypes-1),0,0)))
transProbs1 = c()
for (j in 1: nTypes){
if (i == j){
transProbs1 = cbind(transProbs1, block.intra)
}else{
transProbs1 = cbind(transProbs1, block.inter)
}
}
transProbs = rbind(transProbs, transProbs1)
}
rownames(transProbs) <- colnames(transProbs) <- States
if(0){#
nH = 6; nE = length(emit.symbols);
HMM = initHMM(States = c("NPG.b","NPG.d","NPG.e","OG.b","OG.d","OG.e"),
Symbols = c('b','e',emit.symbols),
startProbs = c(.25,0,0.25,0.25,0,0.25),
transProbs = rbind(t(c(0,0.9,0.1,0,0,0)),
t(c(0,0.8,0.2,0,0,0)),
t(c(0.9,0,0,0.1,0,0)),
t(c(0,0,0,0,0.9,0.1)),
t(c(0,0,0,0,0.8,0.2)),
t(c(0.1,0,0,0.9,0,0))),
emissionProbs = rbind(t(c(1, rep(0,nE+1))),
t(c(0,0,NPG.initialProfile)),
t(c(0,1,rep(0, nE))),
t(c(1, rep(0, nE+1))),
t(c(0,0,initialProfile)),
t(c(0,1,rep(0, nE)))))
}
################
### viterbi training
################
HMM0 = initHMM(States = States,
Symbols = c('b','e',emit.symbols),
startProbs = startProbs,
transProbs = transProbs,
emissionProbs = emissionProbs)
if (do.viterbi){
step = 2; max.steps = 50; delta=1E-5
training.trace = list()
training.trace[['0']] = list(hmm = HMM0, difference = Inf)
s = 1
ptm <- proc.time();
while (1){
cat((s-1)*step, training.trace[[paste(s-1)]]$difference)
# training.trace[[paste(s)]] = baumWelch(training.trace[[paste(s-1)]]$hmm, observation=seqs, maxIterations=step, delta=delta, pseudoCount=0)
training.trace[[paste(s)]] = viterbiTraining(training.trace[[paste(s-1)]]$hmm, observation=seqs, maxIterations=step, delta=delta, pseudoCount=0.1)
if (all(training.trace[[paste(s)]]$difference < delta) | s > max.steps) # |
# abs(training.trace[[paste(s)]]$difference - training.trace[[paste(s-1)]]$difference)[2] < delta |
# s > 15) # no longer improves
break
cat('\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b')
s = s+1
}
print(proc.time()-ptm)
s = s+1
# write.csv(anno@elementMetadata[is.ku,], 'KU.csv')
# write.csv(ipr.anno[is.ku], 'KU_ipr.csv')
### prediction
statePred = HMM::viterbi(training.trace[[s]]$hmm, seqs)
postP = HMM::posterior(training.trace[[s]]$hmm, seqs)
output.NPscan.hmm(HMM0, training.trace[[s]]$hmm, domain.anno, nNPG, nOG,
geneseqs, seqs, statePred,postP,anno,
anno.txt, domain.txt, t.sm.evaluation, out.tag= paste(out.tag,'_viterbi', sep=''))
if (predict.extra){
statePred0 = HMM::viterbi(HMM0, seqs)
postP0 = HMM::posterior(HMM0, seqs)
output.NPscan.hmm(HMM0, training.trace[[s]]$hmm, domain.anno, nNPG, nOG,
geneseqs, seqs, statePred0,postP0,anno,
anno.txt, domain.txt, t.sm.evaluation, out.tag= paste(out.tag,'_viterbi_init', sep=''))
i.be = c(seq(1,nH,3),seq(3,nH,3)); # begin and end states remove pseudo counts;
finalHMM = training.trace[[s]]$hmm
finalHMM$emissionProbs[i.be, ] = HMM0$emissionProbs[i.be,]
finalHMM$emissionProbs[HMM0$emissionProbs==0] = 0;
finalHMM$emissionProbs = 1/rowSums(finalHMM$emissionProbs) * finalHMM$emissionProbs
finalHMM$transProbs[HMM0$transProbs==0] = 0
finalHMM$transProbs = 1/rowSums(finalHMM$transProbs) * finalHMM$transProbs
statePred.rp = HMM::viterbi(finalHMM, seqs)
postP.rp = HMM::posterior(finalHMM, seqs)
output.NPscan.hmm(HMM0, finalHMM, domain.anno, nNPG, nOG,
geneseqs, seqs, statePred.rp, postP.rp, anno,
anno.txt, domain.txt, t.sm.evaluation, out.tag= paste(out.tag,'_viterbi_rp', sep=''))
}
################
### remove undesired transition and emmision due to pseudocounts
################
HMM.nsc = training.trace[[s]]$hmm
HMM.nsc$transProbs = HMM.nsc$transProbs * (training.trace[[1]]$hmm$transProbs>0)
HMM.nsc$transProbs = 1/rowSums(HMM.nsc$transProbs) * HMM.nsc$transProbs
i.be = c(seq(1, nH, 3), seq(3, nH, 3)) # begin and end state, remove pseudo count information
HMM.nsc$emissionProbs[i.be,] = HMM.nsc$emissionProbs[i.be,] * (training.trace[[1]]$hmm$emissionProbs[i.be,] > 0)
HMM.nsc$emissionProbs[i.be,] = 1/rowSums(HMM.nsc$emissionProbs[i.be,]) * HMM.nsc$emissionProbs[i.be,]
}else{
HMM.nsc = HMM0
}
################
### EM training, based on the viterbi training output, if desired
################
if (EM.method == 'seqHMM'){
seq.formated <- seqdef(t(seqs), 1:length(seqs),
labels = c('b','e',emit.symbols))
HMM = build_hmm(state_names = States,
observations = seq.formated,
initial_probs = HMM.nsc$startProbs,
transition_probs = HMM.nsc$transProbs,
emission_probs = HMM.nsc$emissionProbs)
# HMM.reinit = build_hmm(state_names = States, # initialize using vertabi training output
# observations = seq.formated,
# initial_probs = training.trace[[s]]$hmm$startProbs,
# transition_probs = training.trace[[s]]$hmm$transProbs,
# emission_probs = training.trace[[s]]$hmm$emissionProbs)
# alphabet(seqs)[1:10]
# fit.HMM <- fit_model(HMM, threads=3, control_em = list(restart = list(times = 0)))
fit.HMM <- fit_model(HMM,control_em = list(maxeval = 100, restart = list(times = 0)),
global_step=T, control_global = list(maxtime=1000),
local_step=T)
# plot.hmm(fit.HMM$model)
statePred.EM = hidden_paths(fit.HMM$model)
statePred.EM = as.character(unlist(as.list(statePred.EM)))
postP.EM = posterior_probs(fit.HMM$model)
postP.EM = postP.EM[,,1]
output.NPscan.hmm(seqHMM2HMM(HMM), seqHMM2HMM(fit.HMM$model), domain.anno, nNPG, nOG,
geneseqs, seqs, statePred.EM,postP.EM, anno, is.SM,
anno.txt, domain.txt, t.sm.evaluation, out.tag=paste(out.tag,'_viterbi_seqHMM_EM', sep=''))
}else if (EM.method == 'depmixS4'){ # speed similar to viterbi according to testing: analysis.HMM.speedComparison
if (0){
depmix0 <- depmix(list(obs~1), data=data.frame(obs = seqs),nstates=nH,
family=list(multinomial('identity')))
depmix0@prior@parameters$coefficients = HMM.nsc$startProbs
depmix0@init = t(HMM.nsc$startProbs)
depmix0@trDens[] = HMM.nsc$transProbs
for (i in 1:length(depmix0@transition)){
depmix0@transition[[i]]@parameters$coefficients = HMM.nsc$transProbs[i,]
}
for (i in 1:length(depmix0@response)){
depmix0@response[[i]][[1]]@parameters$coefficients = HMM.nsc$emissionProbs[i,]
}
}
dmHMM0.viterbi <- HMM2depmix(HMM.nsc, seqs)
set.seed(3)
ptm <- proc.time()
dmHMM.viterbi.EM <- fit(dmHMM0.viterbi, emc = em.control(rand=F)) # no random start, otherwise, em.depmix gives an error message Starting values not feasible; please provide them"
proc.time()-ptm
ptm <- proc.time()
dmHMM.viterbi.viterbi <- fit(dmHMM0.viterbi, emc = em.control(rand=F,classification='hard')) # no random start, otherwise, em.depmix gives an error message Starting values not feasible; please provide them"
proc.time()-ptm
# user system elapsed
# 33425.32 87016.23 128767.56 9 (1.5 days)
if (0){
dmHMM0 <- HMM2depmix(HMM0, seqs)
xx <- depmix2HMM(dmHMM0)
all(xx$Symbols == HMM0$Symbols)
all(xx$emissionProbs == HMM0$emissionProbs)
set.seed(3)
ptm <- proc.time()
dmHMM.EM <- fit(dmHMM0, emc = em.control(rand=F))
proc.time()-ptm
}
if (0){
# iteration 0 logLik: -126822.8
# iteration 5 logLik: -125822.1
# iteration 10 logLik: -125609
# iteration 15 logLik: -125575.9
# iteration 20 logLik: -125543.3
# iteration 25 logLik: -125514
# iteration 30 logLik: -125504.1
# iteration 35 logLik: -125495
# iteration 40 logLik: -125475.7
# iteration 45 logLik: -125464.7
# iteration 50 logLik: -125459.2
# iteration 55 logLik: -125456.1
# iteration 60 logLik: -125455.2
# iteration 65 logLik: -125453.6
# iteration 70 logLik: -125453.3
# iteration 75 logLik: -125453.1
# iteration 80 logLik: -125453
# iteration 85 logLik: -125452.9
# iteration 90 logLik: -125452.8
# iteration 95 logLik: -125452.7
# iteration 100 logLik: -125452.6
# iteration 105 logLik: -125452.6
# iteration 110 logLik: -125452.6
# iteration 115 logLik: -125452
# iteration 120 logLik: -125450.8
# iteration 125 logLik: -125450.7
# iteration 130 logLik: -125450.5
# iteration 135 logLik: -125448.6
# iteration 140 logLik: -125448.6
# iteration 145 logLik: -125448.6
# iteration 150 logLik: -125448.6
# iteration 155 logLik: -125448.6
# converged at iteration 156 with logLik: -125448.5
}
post = depmixS4::posterior(dmHMM.viterbi.EM)
statePred.depmix = post[,1]
statePred.depmix = HMM0$States[statePred.depmix] # statePred.depmix = HMM::viterbi(depmix2HMM(dmHMM.viterbi.EM), seqs)
postP.depmix = forwardbackward(dmHMM.viterbi.EM)$gamma # postP.depmix = HMM::posterior(depmix2HMM(dmHMM.viterbi.EM), seqs)
colnames(postP.depmix) = names(dmHMM.viterbi.EM@prior@parameters$coefficients)
postP.depmix = t(postP.depmix);
# postP.depmix = post[,2:ncol(post)] this is wrong
output.NPscan.hmm(HMM.nsc, depmix2HMM(dmHMM.viterbi.EM), domain.anno,
geneseqs, seqs, statePred.depmix,postP.depmix,anno,
anno.txt, domain.txt, t.sm.evaluation, out.tag=paste(out.tag,'_viterbi_depmixEM', sep=''))
class(dmHMM.viterbi.viterbi) = 'depmix.fitted'
post = depmixS4::posterior(dmHMM.viterbi.viterbi)
statePred.depmix = post[,1]
statePred.depmix = HMM0$States[statePred.depmix] # statePred.depmix = HMM::viterbi(depmix2HMM(dmHMM.viterbi.EM), seqs)
postP.depmix = forwardbackward(dmHMM.viterbi.viterbi)$gamma # postP.depmix = HMM::posterior(depmix2HMM(dmHMM.viterbi.EM), seqs)
colnames(postP.depmix) = names(dmHMM.viterbi.viterbi@prior@parameters$coefficients)
postP.depmix = t(postP.depmix);
# postP.depmix = post[,2:ncol(post)] this is wrong
output.NPscan.hmm(HMM.nsc, depmix2HMM(dmHMM.viterbi.viterbi), domain.anno, nNPG, nOG,
geneseqs, seqs, statePred.depmix,postP.depmix,anno,
anno.txt, domain.txt, t.sm.evaluation, out.tag=paste(out.tag,'_viterbi_depmixviterbi', sep=''))
}else if (EM.method == 'HMM'){ # HMM
step = 2; max.steps = 50; delta=1E-9
training.traceBW = list()
training.traceBW[['0']] = list(hmm = HMM.nsc, difference = Inf)
s = 1
while (1){
cat((s-1)*step, training.traceBW[[paste(s-1)]]$difference)
training.traceBW[[paste(s)]] = baumWelch(training.traceBW[[paste(s-1)]]$hmm, observation=seqs, maxIterations=step, delta=delta, pseudoCount=0.5)
if (all(training.traceBW[[paste(s)]]$difference < delta))
break
cat('\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b')
s = s+1
}
statePred.EM = HMM::viterbi(training.traceBW[[s]]$hmm, seqs)
postP.EM = HMM::posterior(training.traceBW[[s]]$hmm, seqs)
output.NPscan.hmm(training.traceBW[[1]]$hmm, training.traceBW[[s]]$hmm, domain.anno, nNPG, nOG,
geneseqs, seqs, statePred.EM, postP.EM,anno,
anno.txt, domain.txt, t.sm.evaluation, out.tag= paste(out.tag,'_viterbi', sep=''))
}else if (EM.method == 'null'){
# nothing is done here
}
# HMM.ori
# HMM.dom
# HMM.dom.ori
if (0){
image(vt$hmm$transProbs)
image(HMM$transProbs)
require(gplots)
image(log(vt$hmm$emissionProbs),col=greenred(256))
image(log(HMM$emissionProbs),col=greenred(256))
# label KUs
# semisupervised EM learning
# Predictions
#### output
cat('\n#Identified clusters: ', nrow(gene.ranges))
to.keep.extend = extend.index(core.regions, window.extend, sides='both', do.unique=T)
to.keep.extend = to.keep.extend[to.keep.extend<=length(anno) & to.keep.extend>=1]
anno$PARENT[1] ==c()
is.enzyme.all[] = c('', 'Yes')[is.enzyme.all+1]
out = cbind(chr = chr, gene=gene.ID, 'protein ID' = prot.ID, Existing.Anno = anno@elementMetadata[,toupper(desc.fname)],
is.enzyme.all, domains = ipr.anno)[to.keep.extend,]
rownames(geneID2clusterID) = geneID2clusterID[,1];
out = cbind(out, clusterID = mat.fill.row(geneID2clusterID, rownames(out), '')[,2])
write.xlsx(out, out.file)
# HTML(out, 'out.html')
return(out)
}
}
viterbiTraining.depmix <- function(dmHMM0, seqs){
# implement fast viterbiTraining based on depmixS4:::viterbi
class(dmHMM0) = 'depmix.fitted'
s = depmixS4:::viterbi(seqs, dmHMM0)
s = depmixS4::posterior(dmHMM0)
logLik(dmHMM0)
}
mut.gene <- function(seq, mut.model){
# implement fast viterbiTraining based on depmixS4:::viterbi
}
HMM2depmix <- function(HMM, seqs=NULL){
# 20160331, initialize a HMM model based on an HMM model in package HMM
nH = length(HMM$States)
if (is.null(seqs)){
seqs = HMM$Symbols
}
depmix0 <- depmix(list(obs~1), data=data.frame(obs = seqs),nstates=nH,
respstart = setNames(t(HMM$emissionProbs)[1:length(HMM$emissionProbs)],
rep(colnames(HMM$emissionProbs),
times = nrow(HMM$emissionProbs))),
trstart = t(HMM$transProbs),
instart = t(HMM$startProbs),
family=list(multinomial('identity')))
# names(depmix0@prior@parameters$coefficients) = names(HMM$startProbs)
return(depmix0)
}
depmix2HMM <- function(depmixHMM){
# 20160331
emissionProbs = c()
for (i in 1:length(depmixHMM@response)){
emissionProbs = rbind(emissionProbs, depmixHMM@response[[i]][[1]]@parameters$coefficients)
}
hmm.model = initHMM(States = names(depmixHMM@prior@parameters$coefficients),
Symbols = colnames(emissionProbs),
startProbs = depmixHMM@init,
transProbs = t(depmixHMM@trDens[1,,]),
emissionProbs = emissionProbs)
return(hmm.model)
}
seqHMM2HMM <- function(seqhmm.model = fit.HMM$model){
# 20160330, YFL
hmm.model = initHMM(States = seqhmm.model$state_names,
Symbols = seqhmm.model$symbol_names,
startProbs = seqhmm.model$initial_probs,
transProbs = seqhmm.model$transition_probs,
emissionProbs = seqhmm.model$emission_probs)
return(hmm.model)
}
output.NPscan.hmm <- function(initialHMM, finalHMM, domain.anno,nNPG, nOG,
geneseqs, seqs, statePred, postP,anno,
anno.txt, domain.txt, truth = t.sm.evaluation, out.tag){
# 20160508, v2, output cluster types,
require('lattice')
pdf(paste('HMM_learning',out.tag, '.pdf', sep=''), 7,3)
plot.hmm(initialHMM)
plot.hmm(finalHMM)
dev.off()
append=F
out.file = paste('NPScan_Emission', out.tag, '.xlsx', sep='')
for (i in 1:nNPG){
tag = paste('NPG', i, '.d', sep='')
top.NPG.domains = sort(initialHMM$emissionProbs[tag,], decreasing = T)
top.NPG.domains = cbind(domain.anno[names(top.NPG.domains)], top.NPG.domains)
write.xlsx2(top.NPG.domains, append=append, sheetName = paste(tag, '_init', sep=''), file = out.file)
tag = paste('NPG', i, '.d', sep='')
append = T
top.NPG.domains = sort(finalHMM$emissionProbs[tag,], decreasing = T)
top.NPG.domains = cbind(domain.anno[names(top.NPG.domains)], top.NPG.domains)
write.xlsx2(top.NPG.domains, append=append, sheetName = paste(tag, '_Final', sep=''), file = out.file)
}
for (i in 1:nOG){
tag = paste('OG', i, '.d', sep='')
top.NPG.domains = sort(initialHMM$emissionProbs[tag,], decreasing = T)
top.NPG.domains = cbind(domain.anno[names(top.NPG.domains)], top.NPG.domains)
write.xlsx2(top.NPG.domains, append=append, sheetName = paste(tag, '_init', sep=''), file = out.file)
tag = paste('OG', i, '.d', sep='')
top.NPG.domains = sort(finalHMM$emissionProbs[tag,], decreasing = T)
top.NPG.domains = cbind(domain.anno[names(top.NPG.domains)], top.NPG.domains)
write.xlsx2(top.NPG.domains, append=append, sheetName = paste(tag, '_Final', sep=''), file = out.file)
}
# top.diff.domains = sort(initialHMM$emissionProbs['NPG.d',]/initialHMM$emissionProbs['OG.d',], decreasing = T)[1:100]
# top.diff.domains = cbind(domain.anno[names(top.diff.domains)], top.diff.domains)
# write.csv(top.NPG.domains, 'top.NPG1.domains_init.csv')
# write.csv(top.OG1.domains, 'top.OG1.domains_init.csv')
# write.csv(top.OG2.domains, 'top.OG2.domains_init.csv')
# write.csv(top.diff.domains, 'top.diff.domains_init.csv')
# top.NPG.domains = sort(finalHMM$emissionProbs[tag,], decreasing = T)
nH = nrow(postP)/3
##### 20160508, computing the KU domains
Type = vector(mode = 'character', length(statePred));
ipr2sm = IPR2SMtype();
i.domains = !(seqs%in%c('b','e'))
SMTypes = colnames(ipr2sm)
i.KU.domains = !is.na(match(seqs, rownames(ipr2sm)));
SMTypeMat = as.matrix(ipr2sm[seqs[i.KU.domains],])
Type[i.KU.domains] = apply(SMTypeMat, MARGIN = 1, function(x)paste(SMTypes[which(x==1)], ':',1, collapse = '; ', sep=''))
geneID = cumsum(seqs == 'b');
Type.gene = vector(mode = 'character', max(geneID))
a = tapply(1:sum(i.KU.domains), geneID[i.KU.domains], function(x){
x= colSums(SMTypeMat[x,,drop=F]);
# cat(x)
paste(SMTypes[which(x>0)], ':',x[x>0], collapse = '; ', sep='')
})
Type.gene[as.numeric(names(a))] = a;
# assign cluster ID
NP.Gene.pred = regexpr('^OG.*b', statePred[seqs=='b'])<=0
cluster.start = diff(c(F, NP.Gene.pred)) == 1;
chr = as.character(anno@seqnames)
chr.start = (chr != c('begin',chr[1:(length(chr)-1)]) )
cluster.start[chr.start & NP.Gene.pred] = T
cluster.ID = cumsum(cluster.start);
cluster.ID[!NP.Gene.pred] = 0
cluster.size = cluster.ID;
cluster.size[cluster.ID!=0] = (unique.count(cluster.ID[cluster.ID!=0])$counts.unique)[as.character(cluster.ID[cluster.ID!=0])]
cluster.max.p = cluster.ID;
ii = which(cluster.ID!=0)
cluster.max.p[cluster.ID!=0] = unlist(tapply(t(postP)[seqs=='b',][cbind(ii, match((statePred[seqs=='b'])[ii],rownames(postP)))], INDEX = cluster.ID[cluster.ID!=0], max))[as.character(cluster.ID[cluster.ID!=0])]
## aggregate gene NP type to the cluster level
Type.cluster = vector(mode = 'character', max(geneID));
gene.SMTypeMat = do.call(rbind, tapply(1:sum(i.KU.domains), geneID[i.KU.domains], function(x){
x= colSums(SMTypeMat[x,,drop=F])}, simplify = T));
a = tapply(1:nrow(gene.SMTypeMat), cluster.ID[as.numeric(rownames(gene.SMTypeMat))], function(x){
x= colSums(gene.SMTypeMat[x,,drop=F]);
paste(SMTypes[which(x>0)], ':',x[x>0], collapse = '; ', sep='')
})
a= unlist(a); a = a[setdiff(names(a), '0')]
i.matched = as.character(cluster.ID) %in% names(a)
Type.cluster[i.matched] = a[as.character(cluster.ID[i.matched])]
Type.cluster[cluster.ID!=0 & !i.matched] = 'UU'
Type.cluster[cluster.ID!=0 & !i.matched & regexpr('^NPG1.*b', statePred[seqs=='b'])<=0] = 'UU(NPG2)'
pred.domains = data.frame(Gene = geneseqs,
Feature = seqs,
Annotation = domain.anno[seqs],
State = statePred,
NP.Type = Type,
Posterior = t(postP))
if (0){ # domain level predictions
out.file = paste('NPScan_DomainPred_', out.tag, '.csv', sep='')
write.csv(pred.domains, row.names = F, file = out.file)
}
gene.out.file = paste('NPScan_GenePred2_', out.tag, '.csv', sep='')
pred.gene = data.frame(chr = as.character(anno@seqnames),
pred.domains[pred.domains[,'Feature']=='b',c(1,4, seq(6, ncol(pred.domains),3))],
Gene.NP.type = Type.gene,
Cluster.NP.type = Type.cluster,
Cluster.ID = cluster.ID,
Cluster.size = cluster.size,
Cluster.p.max = cluster.max.p,
known.KU = truth*1,
Gene.Anno = anno.txt,
Domains = domain.txt)
pdf(paste('perf_', out.tag, '.pdf', sep=''),3,3) # 20160404
for (i in 3+(1:nH)){
s = sub('Posterior\\.(.+)\\.b', '\\1', colnames(pred.gene)[i])
print(hist.by(log10(pred.gene[,i]/(1-pred.gene[,i])), c('Other genes', 'True NP genes')[1+truth], xlab = paste('log odds ', s, sep=''), by.name = ''))
}
for (i in 3+(1:nH)){
print(hist.by(pred.gene[,i], c('Other genes', 'True NP genes')[1+truth], xlab = paste('probability ', s, sep=''), by.name = ''))
}
dev.off()
write.csv(pred.gene, row.names = F, file = gene.out.file)
### visualization of gene probability
pdf(paste('prob_', out.tag, 'plot.pdf', sep=''), 10,6)
dat = data.frame()
for (i in 3+(1:(nH-1))){
dat = rbind(dat, data.frame(gene = 1:nrow(pred.gene), score = pred.gene[[i]],
type = sub('Posterior\\.(.*)\\.b', '\\1', colnames(pred.gene)[i]),
chr = pred.gene$chr,
is.KU = pred.gene$known.KU))
}
print(ggplot(data = dat) + geom_line(mapping = aes(x=gene, y=score, color=type), alpha=0.3) +
geom_point(aes(x=gene, y=score),shape = 1,data = dat[dat$type=='NPG1' & dat$is.KU,]) +
facet_wrap(~chr, nrow=4, scales="free") + labs(color = 'scores')+
theme_bw() + theme(panel.grid.major = element_blank()))
print(ggplot(data = dat) + geom_line(mapping = aes(x=gene, y=log10(score/(1-score)), color=type), alpha=0.3) +
geom_point(aes(x=gene, y=log10(score/(1-score))),shape = 1,data = dat[dat$type=='NPG1' & dat$is.KU,]) +
facet_wrap(~chr, nrow=4, scales="free") + labs(color = 'scores') +
theme_bw() + theme(panel.grid.major = element_blank()))
dev.off()
# save(list = c('initialHMM', 'finalHMM', 'seqs', 'domain.anno', 'nNPG','nOG','geneseqs',
# 'seqs', 'statePred', 'postP', 'anno','anno.txt', 'domain.txt', 'truth', 'out.tag'), file = paste(out.tag, '.RData', sep=''))
# save(list = c('initialHMM', 'finalHMM', 'seqs', 'statePred', 'postP'), file = paste(out.tag, '.RData', sep=''))
}
IPR2SMtype <- function(file = '/Users/yongli/Dropbox/NPGC/NPGCquery_data/SM.domains_manualAnno_v2.xlsx'){
# 20160508
SM.doms = read.xlsx2(file,sheetIndex = 1, as.is =T)
SM.doms$BGC.confidence = as.numeric(as.character(SM.doms$BGC.confidence))
SMtypes = unique(unlist(strsplit(as.character(SM.doms$NP_Class), split = '; ')))
SMtypes = c(SMtypes, 'UU')
# IPR2SM = matrix(0, nrow = sum(SM.doms$NP_Class!=''), ncol = length(SMtypes),
# dimnames = c(SM.doms$id[SM.doms$NP_Class!=''],SMtypes));
seq = strsplit(as.character(SM.doms$NP_Class[SM.doms$NP_Class!='']), split = '; ');
names(seq) = SM.doms$id[SM.doms$NP_Class!='']
IPR2SM = seq2mat(seq, alphabet = SMtypes)
return(IPR2SM)
}
NPscan.postprocess <- function(files = dir(pattern = '^NPScan_GenePred2_*'), tag = '', remove.TF=F, remove.transporter=F,
meta.file = '/Users/yongli/Dropbox/NPGC/NPGCquery_data/NPGCquery_meta.txt',
cluster.info.file = paste(tag, 'cluster_info.xlsx', sep=''),
length.cutoff = 5, length.cutoff.max = 25, p.cutoff = 0.99, extra.gene = 1, Walsh.only = F, verbose = F){
# 20160508
# 20160517: add semi-UU (without condensation and Keto-synthase),
# and highlight special protein types
# "radical.SAM"/'(FAD|Flavin)' & "oxygenase"/ IPR005123 - Fe(II) oxygenase, IPR014030/IPR014031 - KS domain, IPR001242 -- condensation
# 20160526: add length.cutoff.max and Walsh.only
# note that the cutoffs only applies to UUs and semiUUs
cluster.info = c()
for (f in files){
dat = read.csv(f);
if (0){
idx = dat$Cluster.NP.type == 'UU' & dat$Cluster.size >= length.cutoff & dat$Cluster.size <= length.cutoff.max & dat$Cluster.p.max >=p.cutoff;
idx = dat$Cluster.ID %in% unique(dat$Cluster.ID[idx])
UU = dat[extend.index(which(idx), n = extra.gene),];
idx = !(dat$Cluster.NP.type %in% c('UU', 'UU(NPG2)')) & dat$Cluster.NP.type!=''
idx = dat$Cluster.ID %in% unique(dat$Cluster.ID[idx])
KU = dat[extend.index(which(idx), n = extra.gene),]
}
idx = dat$Cluster.NP.type == 'UU(NPG2)' & dat$Cluster.size >= length.cutoff & dat$Cluster.size <= length.cutoff.max & dat$Cluster.p.max >=p.cutoff
idx = dat$Cluster.ID %in% unique(dat$Cluster.ID[idx])
UU.NNPG2 = dat[extend.index(which(idx), n = extra.gene), ];
file = '/Users/yongli/Dropbox/NPGC/NPGCquery_data/SM.domains_manualAnno_v2.xlsx'
SM.doms = read.xlsx2(file,sheetIndex = 1, as.is =T)
core.enzyme.IPR = paste(SM.doms$id[SM.doms$SufficientFor!=''], collapse = '|')
is.semiUU = as.character(dat$Gene.NP.type)!='' & regexpr(core.enzyme.IPR,as.character(dat$Domains), perl = T)<0
is.KU = as.character(dat$Gene.NP.type)!='' & regexpr(core.enzyme.IPR,as.character(dat$Domains), perl = T)>0
is.semiUU = by(is.semiUU, INDICES = dat$Cluster.ID, FUN = sum) > 0
is.KU = by(is.KU, INDICES = dat$Cluster.ID, FUN = sum) >0
is.semiUU = is.semiUU & ! is.KU
is.UU = by(dat$Cluster.NP.type == 'UU', INDICES = dat$Cluster.ID, FUN = sum) > 0
if (verbose){
cat('\nCore enzymes motifs but not core enzyme domains:\n')
print(dat[as.character(dat$Gene.NP.type)=='' & regexpr(core.enzyme.IPR,as.character(dat$Domains), perl = T)>0,])
cat('\nCore enzymes (motifs) outside clusters:\n')
print(dat[regexpr(core.enzyme.IPR,as.character(dat$Domains), perl = T)>0 & dat$Cluster.ID == 0,])
}
if (Walsh.only){ # only select clusters that contain understudied oxidoreductase
is.Walsh = regexpr('(radical.SAM|IPR005123)',as.character(dat$Domains), perl = T)>0 | (regexpr('(FAD|Flavin)',as.character(dat$Domains), perl = T)>0 & regexpr('oxygenase',as.character(dat$Domains), perl = T)>0)
is.Walsh = by(is.Walsh, INDICES = dat$Cluster.ID, FUN = sum) >0
is.KU = is.KU & is.Walsh;
is.semiUU = is.semiUU & is.Walsh;
is.UU = is.UU & is.Walsh;
}
is.enzyme = vector(mode = 'logical', length = nrow(dat)) | T
if (remove.TF)
is.enzyme = is.enzyme & !regexpr('Transcription factor',as.character(dat$Domains), perl = T, ignore.case = T)>0
if (remove.transporter)
is.enzyme = is.enzyme & !regexpr('Major facilitator superfamily|transporter',as.character(dat$Domains), perl = T, ignore.case = T)>0
cluster.size = by(is.enzyme, dat$Cluster.ID, sum);
to.keep = cluster.size >= length.cutoff & cluster.size <= length.cutoff.max & by(dat$Cluster.p.max >= p.cutoff, INDICES = dat$Cluster.ID, FUN = sum) > 0
UU = dat[extend.index(which(dat$Cluster.ID %in% setdiff(as.numeric(names(is.KU)[is.UU & to.keep]),0)), n = extra.gene),];
semiUU = dat[extend.index(which(dat$Cluster.ID %in% setdiff(as.numeric(names(is.KU)[is.semiUU & to.keep]),0)), n = extra.gene),];
KU = dat[extend.index(which(dat$Cluster.ID %in% setdiff(as.numeric(names(is.KU)[is.KU]),0)), n = extra.gene),];
cat('KU cluster:', sum(is.KU & to.keep), '\nsemi-UU cluster:', sum(is.semiUU & to.keep), '\nUU:', sum(is.UU & to.keep), '\n')
# is.KU1 = regexpr('(PK|NRP)',as.character(dat$Gene.NP.type), perl = T) < 0 & regexpr('(IPR014030|IPR014031|IPR001242)',as.character(dat$Domains), perl = T)>0
# write.csv(UU, row.names = F, file = paste('UUselect_', f, sep=''))
# write.csv(UU.NNPG2, row.names = F, file = paste('UUselect.NPG2_', f, sep=''))
# write.csv(KU, row.names = F, file = paste('KUselect_', f, sep=''))
if (nrow(UU)){
write.xlsx2(UU, sheetName = 'UU', row.names = F, file = paste('UUselect_', tag, sub('it.*.csv', '.xlsx', f), sep=''))
xlsx.color.NPscan(paste('UUselect_', tag, sub('it.*.csv', '.xlsx', f), sep=''))
unlink(paste('UUselect_', tag, sub('it.*.csv', '.xlsx', f), sep=''))
}
if (nrow(UU.NNPG2)){
write.xlsx2(UU.NNPG2, sheetName = 'UU.NPG2', row.names = F, file = paste('GlycoUUSelect_', tag, sub('it.*.csv', '.xlsx', f), sep=''))
xlsx.color.NPscan(paste('GlycoUUSelect_',tag, sub('it.*.csv', '.xlsx', f), sep=''))
unlink(paste('GlycoUUSelect_', tag, sub('it.*.csv', '.xlsx', f), sep=''))
}
if (nrow(semiUU)){
write.xlsx2(semiUU, sheetName = 'semiUU', row.names = F, file = paste('semiUUselect_', tag, sub('it.*.csv', '.xlsx', f), sep=''))
xlsx.color.NPscan(paste('semiUUselect_', tag, sub('it.*.csv', '.xlsx', f), sep=''))
unlink(paste('semiUUselect_', tag, sub('it.*.csv', '.xlsx', f), sep=''))
}
if (nrow(KU)){
write.xlsx2(KU, sheetName = 'KU', row.names = F, file = paste('KUselect_', tag, sub('it.*.csv', '.xlsx', f), sep=''))
xlsx.color.NPscan(paste('KUselect_', tag, sub('it.*.csv', '.xlsx', f), sep=''))
unlink(paste('KUselect_', tag, sub('it.*.csv', '.xlsx', f), sep=''))
}
meta = read.csv(meta.file, sep = '\t', as.is = T)
j = which(sub(' ', '_', meta$genome.ID) == sub('NPScan_GenePred2_(.+)it.+ie.+$', '\\1', f) |
sub(' ', '_', meta$species) == sub('NPScan_GenePred2_(.+)it.+ie.+$', '\\1', f))
in.house.genome = '';
jgi.genome = ''
if (meta$Source[j] == 'JGI'){
jgi.genome = meta$genome.ID[j];
}
if (meta$Source[j] != 'JGI'){
idx = c("gff.file", "iprscan.tab.file", "pep.fasta.file", "DNA.file", "gene.definition", "proteinID") # c(3,4,6,8,9,10)
for (i in c("gff.file", "iprscan.tab.file", "pep.fasta.file", "DNA.file")){
meta[meta$folder!='',i] = paste(meta$folder[meta$folder!=''], meta[meta$folder!='',i], sep='/')
}
in.house.genome = paste(idx, ' = \'', meta[j,idx], '\'', sep='', collapse = '; ')
}
if (nrow(UU)){
cluster.locs = data.frame(first = which.first.by(UU$Cluster.ID), last = which.last.by(UU$Cluster.ID))
cluster.locs = cluster.locs[setdiff(rownames(cluster.locs), '0'),]
cluster.info = rbind(cluster.info,
data.frame(ClusterID = paste('UU',UU$Cluster.ID[cluster.locs$first], sep=''), # , meta$genome.ID[j], '|'
type = 'UU',
'GenBank Genome' = '',
'JGI Genome' = jgi.genome,
'Same Genome' = '',
'In House Genome'= in.house.genome,
species = sub('(\\S+ \\S+).*$','\\1', meta$species[j]),
'First Protein' = UU$Gene[cluster.locs$first],
'Last Protein' = UU$Gene[cluster.locs$last]))
}
if (nrow(semiUU)){
cluster.locs = data.frame(first = which.first.by(semiUU$Cluster.ID), last = which.last.by(semiUU$Cluster.ID))
cluster.locs = cluster.locs[setdiff(rownames(cluster.locs), '0'),]
cluster.info = rbind(cluster.info,
data.frame(ClusterID = paste('semiUU', semiUU$Cluster.ID[cluster.locs$first], sep=''), # meta$genome.ID[j], '|',
type = 'semiUU',
'GenBank Genome' = '',
'JGI Genome' = jgi.genome,
'Same Genome' = '',
'In House Genome'= in.house.genome,
species = sub('(\\S+ \\S+).*$','\\1', meta$species[j]),
'First Protein' = semiUU$Gene[cluster.locs$first],
'Last Protein' = semiUU$Gene[cluster.locs$last]))
}
### 20160527: output cluster info
# ClusterID GenBank Genome JGI Genome Same Genome In House Genome species First Protein Last Protein
# Ca157 iprscan.tab.file = 'CA_K87_contig_Anidulans.faa.tsv'; gff.file = 'CA_K87_contig_Anidulans.gff'; DNA.file = 'CA_K87_contig.fasta'; pep.fasta.file = 'CA_K87_contig_Anidulans.faa'; gene.definition = 'transcript'; proteinID = 'ID' Calcarisporium arbuscula g7062.t1 g7069.t1
# Afu1g17740 Aspfu1 Aspergillus fumigatus Afu1g17700 Afu1g17750
}
cluster.info$ClusterID = paste(cluster.info$JGI.Genome, cluster.info$GenBank.Genome, '.', cluster.info$ClusterID, sep='')
write.xlsx2(cluster.info, row.names = F, sheetName = 'cluster.info', file = cluster.info.file)
write.xlsx2(cluster.info[cluster.info$type == 'semiUU',], row.names = F, sheetName = 'cluster.info', file = sub('.xls', '_semiUU.xls', cluster.info.file))
write.xlsx2(cluster.info[cluster.info$type == 'UU',], row.names = F, sheetName = 'cluster.info', file = sub('.xls', '_UU.xls', cluster.info.file))
}
xlsx.color.NPscan <- function(xlsx.file = 'nidulans.deepAnno.all.xlsx', out.file=paste('colored_', xlsx.file, sep='')){
# Yong Fuga Li, 20141004
xlsx.color(xlsx.file = xlsx.file, FUN.select = FUN.select.semiUU.NPScan, fill.color = 'purple', out.file = out.file, na.strings='|')
xlsx.color(xlsx.file = out.file, FUN.select = FUN.select.Walsh.NPScan, border.color = 'blue', out.file = out.file, na.strings='|')
}
FUN.select.semiUU.NPScan = function(x){
# semi-UU (without condensation and Keto-synthase),
y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
y[,'Domains'] <- y[,'Gene.NP.type'] <- regexpr('(PK|NRP)',as.character(x$Gene.NP.type), perl = T)>=0 & regexpr('(IPR014030|IPR014031|IPR001242)',as.character(x$Domains), perl = T)<0
return(y)
}
FUN.select.Walsh.NPScan <- function(x){
# # and highlight special protein types
# "radical.SAM"/'(FAD|Flavin)' & "oxygenase"/ IPR005123 - Fe(II) oxygenase, IPR014030/IPR014031 - KS domain, IPR001242 -- condensation
y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
y[,'Domains'] <- y[,'Gene.NP.type'] <- regexpr('(radical.SAM|IPR005123)',as.character(x$Domains), perl = T)>0 | (regexpr('(FAD|Flavin)',as.character(x$Domains), perl = T)>0 & regexpr('oxygenase',as.character(x$Domains), perl = T)>0)
return(y)
}
xlsx.color.mergedDeepAnno <- function(xlsx.file = 'nidulans.deepAnno.all.xlsx', out.file=paste('colored_', xlsx.file, sep='')){
# Yong Fuga Li, 20141004
xlsx.color(xlsx.file = xlsx.file, include.header=T, FUN.select = function(x){y = matrix(T, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x)); y},
border.color = 'grey',
out.file = out.file, na.strings='|') # change global style
xlsx.color(xlsx.file = out.file, include.header=T, FUN.select = function(x){y = matrix(T, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x)); y},
font=list(color = NULL, heightInPoints=12, name='Arial', isItalic=F, isBold=F, isStrikeout=F, underline=NULL),
out.file = out.file, na.strings='|') # change global style
xlsx.color(xlsx.file = out.file, header = F,include.header=F, FUN.select = function(x){y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x)); y[1,]=T; y},
font=list(color = NULL, heightInPoints=12, name='Arial', isItalic=F, isBold=T, isStrikeout=F, underline=NULL),
out.file = out.file, na.strings='|') # bold headers
xlsx.color(xlsx.file = out.file, FUN.select = FUN.select.KU.mergedDeepAnno, fill.color = 'purple', out.file = out.file, na.strings='|')
xlsx.color(xlsx.file = out.file, FUN.select = FUN.select.Walsh.mergedDeepAnno, border.color = 'blue', out.file = out.file, na.strings='|')
}
FUN.select.KU.mergedDeepAnno = function(x){
# semi-UU (without condensation and Keto-synthase),
y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
y[,'IPR_Domain_Annotation'] <- is.KU.ipr(x[,'IPR_Domain_Annotation'])
y[,'Manual_Annotation'] <- is.KU(x[,'Manual_Annotation'])
return(y)
}
FUN.select.Walsh.mergedDeepAnno <- function(x){
# # and highlight special protein types
# "radical.SAM"/'(FAD|Flavin)' & "oxygenase"/ IPR005123 - Fe(II) oxygenase, IPR014030/IPR014031 - KS domain, IPR001242 -- condensation
y = matrix(F, nrow = nrow(x), ncol = ncol(x), dimnames = dimnames(x));
y[,'IPR_Domain_Annotation'] <- regexpr('(radical.SAM|IPR005123)',as.character(x$IPR_Domain_Annotation), perl = T)>0 | (regexpr('(FAD|Flavin)',as.character(x$IPR_Domain_Annotation), perl = T)>0 & regexpr('oxygenase',as.character(x$IPR_Domain_Annotation), perl = T)>0)
return(y)
}
is.KU.ipr <- function(txt){
ipr2sm = IPR2SMtype();
iprIDs = paste(names(which(apply(ipr2sm[,1:10], MARGIN = 1, sum)>0)), collapse ='|')
return(regexpr(iprIDs, txt, perl=T)>0)
}
merge.deepAnno <- function(clusterIDs = c('semiUU174', 'semiUU204', 'semiUU559',
'UU1', 'UU10', 'UU29', 'UU48'),
out.file = 'merged.xlsx',
root = '/Users/yongli/Dropbox/NPGC/NPGCquery_data'){
# Yong Fuga Li, 20160604
require(xlsx)
require('XLConnect')
setwd(root)
append=F
for (i in clusterIDs){
f = paste('colored_', i, '.xlsx', sep='');
dat = read.xlsx2(f, sheetIndex = 1)
i.range = which(dat$cluster.boundary == 'Boundary')
dat.out = dat[i.range[1]:i.range[2], c('name', 'length', 'Existing.Anno', 'domains')];
dat.out$Existing.Anno = sub('Uncharacterized ORF; ', '', dat.out$Existing.Anno)
dat.out$Existing.Anno = sub('^Ortholog of .*$', '', dat.out$Existing.Anno)
dat.out$Existing.Anno = sub('^.*description:\\"([^\\"]+)\\".*$', '\\1', dat.out$Existing.Anno)
dat.out$name = sub('transcript:','', dat.out$name)
colnames(dat.out) = c('Gene', 'Length', 'Manual_Annotation', 'IPR_Domain_Annotation')
write.xlsx2(dat.out, file = out.file, sheetName = i, append = append, row.names = F)
append = T
}
wb <- loadWorkbook(out.file, create = TRUE)
cs <- createCellStyle(wb)
# Specify to wrap the text
setWrapText(cs, wrap = TRUE)
for (cID in clusterIDs){
setColumnWidth(wb,sheet=cID,column=1,width=256*9)
setColumnWidth(wb,sheet=cID,column=2,width=256*8)
setColumnWidth(wb,sheet=cID,column=3,width=256*8*4)
setColumnWidth(wb,sheet=cID,column=4,width=256*8*16)
for (r in 1:getLastRow(wb,cID)){
setCellStyle(wb, sheet = cID, row = r, col = 3,
cellstyle = cs)
setCellStyle(wb, sheet = cID, row = r, col = 4,
cellstyle = cs)
}
saveWorkbook(wb)
}
xlsx.color.mergedDeepAnno(out.file)
# in excels, select all tabs together and then print "entire workbook" as pdf in landscape mode with 65% size.
}
get.NPScan.nchr <- function(NPScan.files = dir(pattern = 'NPScan_GenePred2_.*_viterbi.csv')){
# get the number of chromosomes in a genome, 20160802
nchr = vector('numeric', length(NPScan.files));
names(nchr) = NPScan.files
for (f in NPScan.files){
dat = read.csv(f)
nchr[f] = length(unique(dat$chr))
}
return(nchr)
}
|
4c6d819a5b50b4a302285d2956b1285104b5915a
|
d7c90718e8ddbad5f4b5be582f05177dfabc7c61
|
/scripts/global.R
|
d84da40d6cc36ab366d50580d8f1a3b14192ba86
|
[
"MIT"
] |
permissive
|
BPSTechServices/ARPA-Shootings-and-COVID-19
|
93cb56236e1e7d41887204d37f95e07d362f77d6
|
720926f72e8d15f1e9dc9e846baa12a8a9b0d975
|
refs/heads/main
| 2023-05-26T06:22:42.220017
| 2021-06-11T18:46:35
| 2021-06-11T18:46:35
| 375,158,384
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 675
|
r
|
global.R
|
library(tidyverse)
library(tidycensus)
library(sf)
library(mapview)
library(tigris)
library(geojsonsf)
library(areal)
library(effectsize)
library(corrplot)
library(GGally)
library(factoextra)
library(biscale)
library(cowplot)
library(extrafont) #; font_import()
library(viridis)
library(tmaptools)
library(tmap)
tmap_mode("view")
options(
tigris_use_cache = T,
tigris_class = "sf"
)
range01 <- function(x, ...){(x - min(x, ...)) / (max(x, ...) - min(x, ...))}
sfc_as_cols <- function(x, names = c("lon","lat")) {
ret <- sf::st_coordinates(x)
ret <- tibble::as_tibble(ret)
x <- x[ , !names(x) %in% names]
ret <- setNames(ret,names)
dplyr::bind_cols(x,ret)
}
|
e5fd68c514c31fc01e1b65c0d32d7bbdb7c94d9e
|
e34e41af9dbaf18c572961627bef058672c8a785
|
/R/figures-maturity-ogive.R
|
f67eb625d3a5b44fda793efd3c7c84172a5986c5
|
[] |
no_license
|
aaronmberger-nwfsc/hake-assessment
|
a14bcaef0babafe7c20ab81a17100c799a39bc3c
|
ae469a25f7394cad97757e98b70dfa4fab35fc73
|
refs/heads/master
| 2022-02-19T15:06:04.240995
| 2022-02-12T13:43:13
| 2022-02-12T13:43:36
| 79,608,293
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,441
|
r
|
figures-maturity-ogive.R
|
maturity.ogive.figure <- function(model, useyears = 1975:(assess.yr - 1)){
# maturity.samples is created by data-tables.r
# which reads
# maturity.samples.file <- "hake-maturity-data.csv"
mat1 <- maturity.samples
Amax <- 15 # plus group used in calculations below
# subset for North and South of Point Conception (34.44 degrees N)
mat1.N <- mat1[mat1$N_or_S_of_34.44 == "N",]
mat1.S <- mat1[mat1$N_or_S_of_34.44 == "S",]
# vector of ages and another to store maturity values
# (starting at 0 as required by wtatage although no ovary samples for age 0 fish)
age.vec <- 0:20
mat.N.vec <- NA * age.vec
mat.S.vec <- NA * age.vec
nsamp.N.vec <- NA * age.vec
nsamp.S.vec <- NA * age.vec
# loop over ages
for(a in 1:Amax){
# subset for a given age
mat1.N.age <- mat1.N[!is.na(mat1.N$Age) & mat1.N$Age == a,]
mat1.S.age <- mat1.S[!is.na(mat1.S$Age) & mat1.S$Age == a,]
# subset plus group for age Amax
if(a==Amax){
mat1.N.age <- mat1.N[!is.na(mat1.N$Age) & mat1.N$Age >= a,]
mat1.S.age <- mat1.S[!is.na(mat1.S$Age) & mat1.S$Age >= a,]
}
# sample size
nsamp.N <- nrow(mat1.N.age)
nsamp.S <- nrow(mat1.S.age)
# calculate y-values (fraction mature)
y.N <- mean(mat1.N.age$Functional_maturity, na.rm=TRUE)
y.S <- mean(mat1.S.age$Functional_maturity, na.rm=TRUE)
# store maturities
mat.N.vec[age.vec == a] <- y.N
mat.S.vec[age.vec == a] <- y.S
nsamp.N.vec[age.vec == a] <- nsamp.N
nsamp.S.vec[age.vec == a] <- nsamp.S
# apply plus-group values to all ages above Amax
if(a==Amax){
mat.N.vec[age.vec >= a] <- y.N
nsamp.N.vec[age.vec >= a] <- nsamp.N
}
}
nsamp.N.vec2 <- rep(0, 21) # vector similar to nsamp.N.vec but going to 21 instead of 15
for(a in 0:20){
nsamp.N.vec2[a + 1] <- sum(!is.na(mat1.N$Age) & mat1.N$Age == a)
}
avg.wt <- apply(model$wtatage[model$wtatage$Fleet == 1 &
model$wtatage$Yr %in% useyears,
grep("^\\d", colnames(model$wtatage))],
2,
mean)
fec.vec.new <- apply(model$wtatage[model$wtatage$Fleet == -2 &
model$wtatage$Yr %in% useyears,
grep("^\\d", colnames(model$wtatage))],
2,
mean)
# define colors
col.N <- rgb(0.3, 0.3, 1, 0.8)
col.S <- rgb(1, 0, 0, 0.5)
# create empty plot with axes and grid lines
par(mar = c(2, 4, 1, 1), mfrow = c(2, 1), oma = c(2, 0, 0, 0))
plot(0, xlim=c(1, 20),
ylim = c(0, 1.1),
type = "n",
xlab = "",
ylab = "Fraction mature",
axes = FALSE)
lines(0:20, mat.N.vec, lwd = 2, col = col.N)
lines(0:20, mat.S.vec, lwd = 2, col = col.S)
axis(1, at = 1:14)
axis(1, at = 15, label = "15+")
axis(2, las = 1)
abline(h = seq(0, 1, 0.2), col = "grey")
# loop over ages
for(a in 1:Amax){
# add points to plot
nsamp.N <- nsamp.N.vec[age.vec == a]
nsamp.S <- nsamp.S.vec[age.vec == a]
y.N <- mat.N.vec[age.vec == a]
y.S <- mat.S.vec[age.vec == a]
points(a, y.N, cex = 0.3 * sqrt(nsamp.N), col = 1, bg = col.N, pch = 21)
points(a, y.S, cex = 0.3 * sqrt(nsamp.S), col = 1, bg = col.S, pch = 21)
text(a, y.N, cex = 0.8, labels = nsamp.N, pos = if(nsamp.N < 60) 1 else NULL)
text(a, y.S, cex = 0.8, labels = nsamp.S, pos = if (nsamp.S < 60) 3 else NULL,
col = rgb(1, 0, 0, 0.5))
}
# add legend
legend("bottomright", legend=c("South of 34.44\u00B0", "North of 34.44\u00B0"),
#title="Size/number indicates sample size")
bg = "white", box.col = "grey", col = 1, pt.bg = c(col.S, col.N), pch = 21, pt.cex = 2)
box()
# second plot
plot(0, type = "l", lwd = 3, xlim = c(1, 20),
ylim = c(0, max(c(avg.wt, fec.vec.new)) * 1.05), #yaxs='i',
xlab = "", ylab = "Weight (kg) or fecundity", axes = FALSE)
axis(1, at = 1:20)
axis(2, las = 1)
abline(h = seq(0, 1, 0.2), col = "grey")
lines(1:20, avg.wt[-1], lwd = 2, col = 3)
lines(1:20, fec.vec.new[-1], lwd = 4, col = rgb(0.8, 0, 0.8, 0.8))
legend("bottomright", col = c(3, rgb(.8,0,0.8),1), lwd = c(2, 4, 4),
bg = "white", box.col = "grey",
legend = c("Mean weight at age",
paste0("Mean fecundity (maturity at age x weight at age)")))
box()
mtext(side = 1, line = 0, outer = TRUE, "Age")
}
|
fddfad51babb697c42b16d5e1e766b364637739c
|
6557f5c17490476c54eff0ebf1270773060afed9
|
/man/createCutoffsDF.Rd
|
9f5c5767156cdafa99604df4a96283e218a94064
|
[] |
no_license
|
cran/QuantileGradeR
|
8db9be8aad76e905ad7ecf669bc2ab719cc216f1
|
5a1903963062f97084baad15ddf549ba8b15516a
|
refs/heads/master
| 2021-01-09T06:18:00.827284
| 2017-02-06T20:22:48
| 2017-02-06T20:22:48
| 80,955,265
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,256
|
rd
|
createCutoffsDF.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createCutoffsDF.R
\name{createCutoffsDF}
\alias{createCutoffsDF}
\title{Create Cutoffs Dataframe}
\usage{
createCutoffsDF(X, z, gamma, type)
}
\arguments{
\item{X}{Numeric matrix of size \code{n} x \code{p}, where \code{n} is the
number is restaurants to be graded and \code{p} is the number of inspections
to be used in grade assignment. Entry \code{X[i,j]} represents the
inspection score for the \code{i}th restaurant in the \code{j}th most recent
inspection.}
\item{z}{Character vector of length \code{n} representing ZIP codes (or other
subunits within a jurisdiction). \code{z[i]} is the ZIP code corresponding
to the restaurant with inspection scores in row \code{i} of \code{X}.}
\item{gamma}{Numeric vector representing absolute grade cutoffs or quantiles,
depending on \code{type} variable value. Entries in gamma should be
increasing, with \code{gamma[1] <= gamma[2]} etc (this is related to the
"Warning" section and larger scores being associated with higher risk). If
\code{type = "perc"} or \code{type = "perc.resolve.ties"}, gamma values
represent quantiles and should take on values between 0 and 1.}
\item{type}{Character string that is one of \code{"unadj"},
\code{"perc"}, or \code{"perc.resolve.ties"}, and that indicates the grading
algorithm to be implemented.}
}
\description{
\code{createCutoffsDF} is an internal function, which creates a dataframe with
identical cutoff values for all ZIP codes (if \code{type = "unadj"}), or
quantile cutoffs in a ZIP code (if \code{type = "perc"} or \code{type =
"perc.resolve.ties"}). This function is called extensively by the
\code{findCutoffs} function.
}
\details{
\code{createCutoffsDF} takes in a matrix of restaurants' scores and a vector
corresponding to restaurants' ZIP codes, and outputs a data frame of cutoff
scores to be used in grade classification. The returned ZIP code cutoff data
frame has one row for each unique ZIP code and has \code{(length(gamma)+1)}
columns, corresponding to one column for the ZIP code name, and
\code{(length(gamma))} cutoff scores separating the \code{(length(gamma)+1)}
grading categories. Across each ZIP code's row, cutoff scores increase and we
assume, as in the King County (WA) case, that greater risk is associated with
larger inspection scores. (If scores are decreasing in risk, users should
transform inspection scores before utilizing functions in the
\code{QuantileGradeR} package with a simple function such as \code{f(score) =
- score}.)
The way in which cutoff scores are calculated for each ZIP code depends on the
value of the \code{type} variable. The \code{type} variable can take one of
three values (see later).
}
\section{Modes}{
\code{type = "unadj"} creates a ZIP code cutoff data frame
with the same cutoff scores (meaningful values in a jurisdiction's
inspection system that are contained in the vector \code{gamma}) for all ZIP
codes. This ZIP code data frame can then be used to carry out "unadjusted"
grading, in which a restaurant's most recent routine inspection score is
compared to these cutoffs.
\code{type = "perc"} takes in a vector of quantiles,
\code{gamma}, and returns a data frame of the scores in each ZIP code
corresponding to these quantiles (using the "Nearest Rank" definition of
quantile).
\code{type = "perc.resolve.ties"} takes in a vector of
quantiles, \code{gamma}, and instead of returning (for B/C cutoffs, for
example) the scores in each ZIP code that result in \emph{at least}
(\code{gamma[2]} x 100)\% of restaurants in the ZIP code scoring less than
or equal to these cutoffs, \code{type = "perc.resolve.ties"} takes into
account the fact that ties exist in ZIP codes. Returned scores for A/B
cutoffs are those that result in the \emph{closest} percentage of
restaurants in the ZIP code scoring less than or equal to the A/B cutoff to
the desired percentage, (\code{gamma[1]} x 100)\%. Similarly, B/C cutoffs
are the scores in the ZIP code that result in the \emph{closest} percentage
of restaurants in the ZIP code scoring less than or equal to the B/C cutoff
and more than the A/B cutoff to the desired percentage, (\code{(gamma[2] -
gamma[1])} x 100)\%.
}
\keyword{internal}
|
e3d0e4d27b1382dd75fd331e985c0ef1a84efb9b
|
18b55cde3cc1050831d7567dd7bee9e5c1a12b82
|
/R/hello.R
|
7fb1822bef9318f5d311ff01f01e40e00c86dd8a
|
[] |
no_license
|
stla/AOV2F
|
3ddbfc558a6843ff2a6259d9a7a4a0444728cc63
|
6c3b3433264805d0f668404bf8b2cd7e38971780
|
refs/heads/master
| 2020-04-30T22:08:20.712223
| 2019-03-22T09:33:24
| 2019-03-22T09:33:24
| 177,110,998
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,288
|
r
|
hello.R
|
#' Run the two-way ANOVA child.
#'
#' @description Run knitr child for two-way ANOVA; to be used as inline code in
#' a knitr document.
#'
#' @param dat dataframe
#' @param factors the column names or indices of the factors
#' @param resp the column name or index of the response
#' @param factor.names the names of the factor for the report
#' (default value is the column names)
#' @param resp.name the name of the response for the report
#' (default value is the column name)
#' @param factor1.ref the reference level of factor1
#' @param factor2.ref the reference level of factor2
#' @param title study title
#'
#' @export
#' @importFrom knitr knit_child
childANOVA2F <- function(dat, factors=c(1,2), resp=3,
factor.names=names(dat[,factors]),
resp.name=names(dat[,resp,drop=FALSE]),
factor1.ref=levels(dat[,factors[1]])[1],
factor2.ref=levels(dat[,factors[2]])[1],
title="title") {
if(!is.factor(dat[, factors[1]])){
dat[, factors[1]] <- factor(dat[, factors[1]])
}
if(!is.factor(dat[, factors[2]])){
dat[, factors[2]] <- factor(dat[, factors[2]])
}
dat <- droplevels(dat)
myenv <- new.env()
args <- formals(childANOVA2F)
for(arg in names(args)) assign(arg, get(arg), envir=myenv)
knitr::knit_child(
system.file(package = "AOV2F", "knitr", "AOV2F_child.Rmd"),
envir = myenv) # faire une possibilité knit
}
#' Simulate a two-way ANOVA design with random effects
#' @description Simulate a two-way ANOVA dataset with random effects.
#' @param I number of levels of first factor
#' @param J number of leveles of second factor
#' @param Kmin minimum number of repeats
#' @param Kmax maximum number of repeats
#' @param p numeric vector giving the probabilities to sample between
#' \code{Kmin} and \code{Kmax}
#' @param sigmaP standard deviation of first factor
#' @param sigmaO standard deviation of second factor
#' @param sigmaPO standard deviation of interaction
#' @param sigmaE residual standard deviation
#' @param factor.names names of the two factors
#' @param resp.name name of the response
#' @param keep.intermediate keep intermediate calculations in the output
#' @return A dataframe.
#'
#' @examples
#' SimDataAV2(I=3, J=2, Kmin=0, Kmax=2, p=c(0.1,0.2))
#'
#' @export
#' @importFrom stats rnorm
SimDataAV2 <- function(I, J, Kmin, Kmax, p=NULL, mu=0, sigmaP=1, sigmaO=1,
sigmaPO=1, sigmaE=1, factor.names=c("Operator","Part"),
resp.name="y", keep.intermediate=FALSE){
Operator <- rep(1:J, each=I)
Oj <- rep(rnorm(J, 0, sigmaO), each=I)
Part <- rep(1:I, times=J)
Pi <- rep(rnorm(I, 0, sigmaP), times=J)
POij <- rnorm(I*J, 0, sigmaPO)
simdata0 <- data.frame(Part, Operator, Pi, Oj, POij)
simdata0$Operator <- factor(simdata0$Operator)
levels(simdata0$Operator) <- sprintf(paste0("%0", floor(log10(J))+1, "d"), 1:J)
simdata0$Part <- factor(simdata0$Part)
levels(simdata0$Part) <- sprintf(paste0("%0", floor(log10(I))+1, "d"), 1:I)
II <- 0 ; JJ <- 0
while(II<I | JJ <J){
if(Kmin < Kmax){
Kij <- sample(Kmin:Kmax, I*J, replace=TRUE, prob=c(p,1-sum(p)))
}else{
Kij <- rep(Kmin, I*J)
}
simdata <- droplevels(
as.data.frame(
sapply(simdata0, function(v) rep(v, times=Kij), simplify=FALSE)))
JJ <- length(levels(simdata$Operator)); II <- length(levels(simdata$Part))
}
Eijk <- rnorm(sum(Kij), 0, sigmaE)
simdata <- cbind(simdata, Eijk)
simdata[[resp.name]] <- mu + with(simdata, Oj+Pi+POij+Eijk)
levels(simdata[,1]) <- paste0("A", levels(simdata[,1]))
levels(simdata[,2]) <- paste0("B", levels(simdata[,2]))
names(simdata)[1:2] <- factor.names
if(!keep.intermediate) simdata <- simdata[,c(factor.names,resp.name)]
simdata
}
#' Format a table of type \code{ftable}
#' @description Format a table of type \code{ftable} for HTML printing.
#' @note This function is based on \code{R2HTML:::HTML.ftable}
#'
#' @param x a table of type \code{\link{ftable}}
#' @param digits number of digits to print
#' @return A table which can be used in \code{kable}.
#'
#' @export
format_ftable <- function(x, digits = getOption("digits"))
{
if (!inherits(x, "ftable"))
stop("x must be an `ftable'")
ox <- x
makeLabels <- function(lst) {
lens <- sapply(lst, length)
cplensU <- c(1, cumprod(lens))
cplensD <- rev(c(1, cumprod(rev(lens))))
y <- NULL
for (i in rev(seq(along = lst))) {
ind <- 1 + seq(from = 0, to = lens[i] - 1) * cplensD[i+1]
tmp <- character(length = cplensD[i])
tmp[ind] <- lst[[i]]
y <- cbind(rep(tmp, times = cplensU[i]), y)
}
y
}
makeNames <- function(x) {
nmx <- names(x)
if (is.null(nmx))
nmx <- rep("", length = length(x))
nmx
}
xrv <- attr(x, "row.vars")
xcv <- attr(x, "col.vars")
LABS <- cbind(rbind(matrix("", nrow = length(xcv), ncol = length(xrv)),
makeNames(xrv), makeLabels(xrv)),
c(makeNames(xcv), rep("", times = nrow(x) + 1)))
DATA <- rbind(t(makeLabels(xcv)), rep("", times = ncol(x)),
format(unclass(x), digits = digits))
cbind(apply(LABS, 2, format, justify = "left"),
apply(DATA, 2, format, justify = "right"))
}
|
26352ac2b53f8c7b9802a459ad8abf13b30fa29c
|
69d690455a1a86f500efbd7815eb6b415c4c64ee
|
/aula1_.r
|
aa6111c0f6900f58c3df6f76f885d676aa215336
|
[] |
no_license
|
RAmluiz/compR_aulas
|
e56e99802861589a1338eb44bcd029eec7f378af
|
33ce59a7b3e2096c02723a71637aae6a4171f4f7
|
refs/heads/main
| 2023-04-07T13:08:42.215169
| 2021-04-18T23:22:52
| 2021-04-18T23:22:52
| 359,262,483
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 117
|
r
|
aula1_.r
|
library(tidyverse)
teste = function(x,y){
a = x^(25*y)
return(a)
}
b = teste(2,5)
soma = b*25
soma + b
|
0465c2570fa890c1195a9fe26894de9e388b3736
|
51f64cbf05bb169808e618a9054619ff9d25bf7f
|
/R/4Factors.R
|
54b23e0d0ccb86442ede8e452029339378be1cb8
|
[] |
no_license
|
Vijayoswalia/R-R
|
8143865fc2dee900e60f9b0cd0f2e5ab567475fb
|
d15ae45f3d6baa0e061d9cec8c6c3352f6de22ae
|
refs/heads/master
| 2021-08-18T18:30:48.848192
| 2017-11-23T14:04:01
| 2017-11-23T14:04:01
| 105,735,681
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,062
|
r
|
4Factors.R
|
## Factor might be viewed as a vector but they spectify a discrete classification of the components of other vectors
##################################################################################
## Method 1 converting vector to factor
##################################################################################
x <-c(1,2,1,3,4,2,1,2)
xf<-facror(x)
> x1
[1] 1 2 1 3 4 2 1 2
Levels: 1 2 3 4
> str(x1)
Factor w/ 4 levels "1","2","3","4": 1 2 1 3 4 2 1 2
> length(x1)
[1] 8
## It will be determined by number of values in xf
##################################################################################
## Method 2
##################################################################################
x<-c(1,2,1,3,2,1,2)
xff<-factor(x,levels=c(1,2,3,4),labels=c("Quarter1","Quarter2","Quarter3","Quarter4"))
##by default it will set levels as per data but we can also set levels as per our requirement and later on add it in data.
## but we can not add a value in data which is not existing in levels
xff[6]<-4 ## This is fine
xff[6]<-5 ## it will error out
##################################################################################
## ts is a function that create timeseries data
##################################################################################
ts(data, start, end, frequency, deltat, ts.eps, class, names)
data = vector or matrix
start = time of the first observati
end = the time of last observation
frequency = number of observation per unit of time
deltat= fraction of sampling period between successive observation
class = class to be given to result
names= vector of names
ts(data =NA, start=1, end=, frequency=1, deltat = 1, ts.eps = getOption(“ts.eps”), class, names)
ts(1:10,start=2000, frequency=1) ## yearly data
ts(1:24,start=2000, frequency=12 ,calender=TRUE) ## monthly data
ts(1:12,start=2000, frequency=4) ## quaterly data
ts(1:24,start=2000, frequency=24) ## 15 days data
ts(1:52,start=2000, frequency=52) ## 15 days data
plot(t)
plot(z, plot.type = "single", lty = 1:3)
|
76d2f916fa6b7e806317cddc48c4dec1433aca9a
|
02ad97dab34af39120a5fa3882090e2ccc0039cb
|
/Risk/src - DP/8.4 - Gating Rules - 2021.R
|
10df17b02bd3d48ffffee2b6b2af8fbf69dbdf0d
|
[] |
no_license
|
DineshPamanji/Python
|
7c674c274e728d79037d910a5bf857b28467b360
|
e0580ba41a2fa79fc6ccbf2789a86ffc20e2a503
|
refs/heads/master
| 2022-05-12T12:58:49.564546
| 2022-05-06T13:16:17
| 2022-05-06T13:16:17
| 170,642,127
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 39,451
|
r
|
8.4 - Gating Rules - 2021.R
|
############################################################################################
################## 8 - Gating Rules ######################################################
############################################################################################
## 0. Load helper functions & libraries ----------------------------------------------------
load_libaries <- file.path("src","utils","load_libraries.R")
source(load_libaries)
io_helper <- file.path("src","utils","io_helper.R")
source(io_helper)
options(scipen = 999)
voptions(raise = "all")
`%notin%` <- Negate(`%in%`)
## 1. Define constants -----------------------------------------------------------------------
## 1.1 dataframe for month - day mapping
month_day_mapping <- data.frame(month = c(1,3,6,12,18,24),
days = c(30, 90, 180, 365, 545, 730))
## 1.2 vector of all months
months_vector <- c(1,3,6,12,18,24)
## 1.3 vector of all loan products
loans_vector <- c('HL','PL','LAP','AL','BL','TW','Tractor','CV','CE','Gold')
## 1.4 vector for all DPD
dpd_vector <- c(30,60,90)
## 1.5 Enquiry buffer days
buffer <- 7
## 1.6 TAT for product
tat_2W_new <- 0
tat_2W_used <- 0
tat_PV_new <- 0
tat_PV_used <- 7
## 2. Create Product Ownership Rules --------------------------------------------------------
## 2.1 load tradeline data
load_rdata_intermediate("ADS_data//bureau_tradelines_2021.rdata")
## 2.2 load application data
mar_2021_applications <- fread_raw("Mar 21 - Point 1.txt")
mar_2021_applications <- mar_2021_applications %>% filter(Type == 'app')
mar_2021_applications$loan_type <- paste0(mar_2021_applications$Product,"-",mar_2021_applications$Vehicle_Type)
# primary_customer_codes <- fread_raw("Customer Code Mar 21.txt")
#
# mar_2021_applications <- left_join(mar_2021_applications,primary_customer_codes,by = c('AppNo' = 'application_no'))
mar_2021_applications$applicant_id <- paste0(mar_2021_applications$AppNo,"_",mar_2021_applications$Customer_Code)
final_output <- distinct(mar_2021_applications %>% filter(loan_type %in% c('C-N','C-U','H-N','S-R')))
final_output$application_date <- as.Date(substr(final_output$app_syntime,1,10),
format = "%Y-%m-%d",
origin = "1970-01-01")
final_output <- distinct(final_output %>% dplyr::select(AppNo,
Customer_Code,
applicant_id,
Product,
Vehicle_Type,
loan_type,
Category,
Cibil_Score,
application_date))
final_output$Cibil_Score <- ifelse(final_output$Cibil_Score == '000-1', -1, final_output$Cibil_Score)
final_output$Cibil_Score <- as.numeric(final_output$Cibil_Score)
rm(mar_2021_applications)
colnames(final_output) <- c('application_no',
'customer_code',
'applicant_id',
'product',
'vehicle_type',
'loan_type',
'category',
'cibil_score',
'application_date')
required_data <- distinct(final_output %>% dplyr::select(application_no,customer_code,applicant_id))
application_df <- final_output %>% dplyr::select('application_no','customer_code','applicant_id','loan_type','cibil_score','application_date')
trades_clean_backup <- trades_clean
trades_clean <- inner_join(trades_clean,required_data,by = c('application_no' = 'application_no',
'customer_code' = 'customer_code',
'applicant_id' = 'applicant_id'))
## 2.2 create live loans flag
trades_clean$live_loan <- ifelse((trades_clean$loan_notation != 'CC' &
is.na(trades_clean$account_date_closed) &
trades_clean$current_balance > 0) |
(trades_clean$loan_notation == 'CC' &
is.na(trades_clean$account_date_closed) &
(trades_clean$current_balance > 0 | trades_clean$current_balance == 0)
),
1,0)
## 2.3 calculate days between date opened & date on which cibil report was pulled
trades_clean$days <- as.numeric(trades_clean$cibil_reported_date - trades_clean$account_date_opened)
## 2.4 create tradelines subset for adding rules
tradelines <- trades_clean %>% dplyr::select(application_no,
customer_code,
applicant_id,
high_credit_sanctioned_amount,
overdue_amount,
account_type,
account_date_opened,
cibil_reported_date,
loan_identifier,
payment_history_start_date,
payment_history_end_date,
current_balance,
payment_history,
loan_description,
loan_notation,
unsecured_flag,
account_date_closed,
yearmon_reported_date,
yearmon_account_open_date,
yearmon_payment_start_date,
live_loan,
days)
## 2.5 create rules function - loans taken in last x months
create_POR_loans_in_x_months <- function(tradelines, months_vector){
for(month_value in months_vector){
days_cutoff <- unique((month_day_mapping %>% filter(month == month_value))$days)
tradelines[,paste0('Rule_PO_months_',month_value)] <- ifelse(tradelines$days > 0 &
tradelines$days <= days_cutoff, 1, 0)
}
return(tradelines)
}
## 2.6 create rules function - live loans taken in last x months
create_POR_live_loans_in_x_months <- function(tradelines, months_vector){
for(month_value in months_vector){
days_cutoff <- unique((month_day_mapping %>% filter(month == month_value))$days)
tradelines[,paste0('Rule_PO_months_',month_value, '_live')] <- ifelse(tradelines$days > 0 &
tradelines$days <= days_cutoff &
tradelines$live_loan == 1, 1, 0)
}
return(tradelines)
}
## 2.7 create rules function - Specific loans taken in last x months
create_POR_product_loans_in_x_months <- function(tradelines, months_vector, loans_vector){
for(month_value in months_vector){
for(loan_value in loans_vector){
days_cutoff <- unique((month_day_mapping %>% filter(month == month_value))$days)
tradelines[,paste0('Rule_PO_months_',month_value, '_',loan_value)] <- ifelse(tradelines$days > 0 &
tradelines$days <= days_cutoff &
tradelines$loan_notation == loan_value, 1, 0)
}
}
return(tradelines)
}
## 2.8 create rules function - Specific live loans taken in last x months
create_POR_product_live_loans_in_x_months <- function(tradelines, months_vector, loans_vector){
for(month_value in months_vector){
for(loan_value in loans_vector){
days_cutoff <- unique((month_day_mapping %>% filter(month == month_value))$days)
tradelines[,paste0('Rule_PO_months_',month_value, '_',loan_value, '_live')] <- ifelse(tradelines$days > 0 &
tradelines$days <= days_cutoff &
tradelines$loan_notation == loan_value &
tradelines$live_loan == 1, 1, 0)
}
}
return(tradelines)
}
## 2.9 create rules function - unsecured loans taken in last x months
create_POR_unsec_loans_in_x_months <- function(tradelines, months_vector){
for(month_value in months_vector){
days_cutoff <- unique((month_day_mapping %>% filter(month == month_value))$days)
tradelines[,paste0('Rule_PO_months_',month_value, '_unsec')] <- ifelse(tradelines$days > 0 &
tradelines$days <= days_cutoff &
tradelines$unsecured_flag == 'unsecured', 1, 0)
}
return(tradelines)
}
## 2.10 create rules function - unsecured live loans taken in last x months
create_POR_unsec_live_loans_in_x_months <- function(tradelines, months_vector){
for(month_value in months_vector){
days_cutoff <- unique((month_day_mapping %>% filter(month == month_value))$days)
tradelines[,paste0('Rule_PO_months_',month_value, '_live_unsec')] <- ifelse(tradelines$days > 0 &
tradelines$days <= days_cutoff &
tradelines$live_loan == 1 &
tradelines$unsecured_flag == 'unsecured', 1, 0)
}
return(tradelines)
}
## 2.11 create rules function - unsecured loans taken in last x months excluding CC, CD
create_POR_unsec_loans_in_x_months_excl_CC_CD <- function(tradelines, months_vector){
for(month_value in months_vector){
days_cutoff <- unique((month_day_mapping %>% filter(month == month_value))$days)
tradelines[,paste0('Rule_PO_months_',month_value, '_unsec_excl_CC_CD')] <- ifelse(tradelines$days > 0 &
tradelines$days <= days_cutoff &
tradelines$unsecured_flag == 'unsecured' &
tradelines$loan_notation %notin% c('CD','CC'), 1, 0)
}
return(tradelines)
}
## 2.12 call all rule creation functions
tradelines <- create_POR_loans_in_x_months(tradelines, months_vector)
tradelines <- create_POR_live_loans_in_x_months(tradelines, months_vector)
tradelines <- create_POR_product_loans_in_x_months(tradelines, months_vector, loans_vector)
tradelines <- create_POR_product_live_loans_in_x_months(tradelines, months_vector, loans_vector)
tradelines <- create_POR_unsec_loans_in_x_months(tradelines, months_vector)
tradelines <- create_POR_unsec_live_loans_in_x_months(tradelines, months_vector)
tradelines <- create_POR_unsec_loans_in_x_months_excl_CC_CD(tradelines, months_vector)
## 2.13 subset for required columns
req_cols <- names(tradelines)[grep("Rule_PO",names(tradelines))]
tradelines <- tradelines %>% dplyr::select(c('application_no', 'customer_code', 'applicant_id', req_cols))
## 2.14 aggregate rules at deal number level
rules_PO <- tradelines %>% group_by(application_no,customer_code,applicant_id) %>% summarise(across(everything(), list(sum)))
colnames(rules_PO) <- c('application_no', 'customer_code', 'applicant_id', req_cols)
rules_PO <- rules_PO %>% ungroup()
## 2.15 convert sum to flags
for(col in req_cols){
value_list <- c(1,2,3,4,5)
for(value in value_list){
new_col <- paste0(col, "_GE_",value)
rules_PO[,new_col] <- as.numeric(ifelse(rules_PO[,col] >= value, 1, 0))
}
}
## 2.16 select only flag columns
new_cols <- colnames(rules_PO)
new_cols <- new_cols[new_cols %notin% req_cols]
rules_PO <- rules_PO %>% dplyr::select(new_cols)
rm(trades_clean,tradelines)
## 3. Create delinquency rules ---------------------------------------------------------
## 3.1 load tradeline melt data
load_rdata_intermediate("ADS_data//trades_melt_2021.rdata")
trades_melt <- inner_join(trades_melt,required_data,by = c('application_no' = 'application_no',
'customer_code' = 'customer_code',
'applicant_id' = 'applicant_id'))
## 3.2 create subset of data with required columns
trades_dpd <- trades_melt %>% dplyr::select(application_no,
customer_code,
applicant_id,
high_credit_sanctioned_amount,
overdue_amount,
account_type,
account_date_opened,
account_date_closed,
cibil_reported_date,
payment_history_start_date,
payment_history_end_date,
loan_identifier,
current_balance,
yearmon_reported_date,
yearmon_account_open_date,
yearmon_payment_start_date,
loan_notation,
unsecured_flag,
variable,
diff_reported_payment,
dpd_num
) %>% filter(!is.na(dpd_num))
rm(trades_melt)
gc()
## 3.3 create rules function - x DPD in last y months
create_DR_x_dpd_y_mon <- function(trades_dpd, dpd_vector, months_vector){
for(dpd_value in dpd_vector){
for(month_value in months_vector){
trades_dpd[, paste0('Rule_DR_all_',dpd_value,'dpd_',month_value,'mon')] <- ifelse(trades_dpd$diff_reported_payment >= 1 &
trades_dpd$diff_reported_payment <= month_value &
trades_dpd$dpd_num >= dpd_value &
trades_dpd$current_balance >= 3000, 1, 0)
}
}
return(trades_dpd)
}
## 3.4 create rules function - x DPD in last y months - non CC, CD
create_DR_x_dpd_y_mon_non_CC_CD <- function(trades_dpd, dpd_vector, months_vector){
for(dpd_value in dpd_vector){
for(month_value in months_vector){
trades_dpd[, paste0('Rule_DR_non_CC_CD_',dpd_value,'dpd_',month_value,'mon')] <- ifelse(trades_dpd$diff_reported_payment >= 1 &
trades_dpd$diff_reported_payment <= month_value &
trades_dpd$dpd_num >= dpd_value &
trades_dpd$loan_notation %notin% c('CD','CC') &
trades_dpd$current_balance >= 3000 ,1, 0)
}
}
return(trades_dpd)
}
## 3.5 create rules function - x DPD in last y months - Gold
create_DR_x_dpd_y_mon_specific_loans <- function(trades_dpd, dpd_vector, months_vector){
for(dpd_value in dpd_vector){
for(month_value in months_vector){
trades_dpd[, paste0('Rule_DR_Gold_',dpd_value,'dpd_',month_value,'mon')] <- ifelse(trades_dpd$diff_reported_payment >= 1 &
trades_dpd$diff_reported_payment <= month_value &
trades_dpd$dpd_num >= dpd_value &
trades_dpd$account_type %in% c(7) &
trades_dpd$current_balance >= 3000 ,1, 0)
trades_dpd[, paste0('Rule_DR_Education_',dpd_value,'dpd_',month_value,'mon')] <- ifelse(trades_dpd$diff_reported_payment >= 1 &
trades_dpd$diff_reported_payment <= month_value &
trades_dpd$dpd_num >= dpd_value &
trades_dpd$account_type %in% c(8) &
trades_dpd$current_balance >= 3000 ,1, 0)
trades_dpd[, paste0('Rule_DR_Agri_',dpd_value,'dpd_',month_value,'mon')] <- ifelse(trades_dpd$diff_reported_payment >= 1 &
trades_dpd$diff_reported_payment <= month_value &
trades_dpd$dpd_num >= dpd_value &
trades_dpd$account_type %in% c(36, 53,57) &
trades_dpd$current_balance >= 3000 ,1, 0)
trades_dpd[, paste0('Rule_DR_Mudra_',dpd_value,'dpd_',month_value,'mon')] <- ifelse(trades_dpd$diff_reported_payment >= 1 &
trades_dpd$diff_reported_payment <= month_value &
trades_dpd$dpd_num >= dpd_value &
trades_dpd$account_type %in% c(39) &
trades_dpd$current_balance >= 3000 ,1, 0)
trades_dpd[, paste0('Rule_DR_CC_',dpd_value,'dpd_',month_value,'mon')] <- ifelse(trades_dpd$diff_reported_payment >= 1 &
trades_dpd$diff_reported_payment <= month_value &
trades_dpd$dpd_num >= dpd_value &
trades_dpd$account_type %in% c(10,31,35,36) &
trades_dpd$current_balance >= 3000 ,1, 0)
trades_dpd[, paste0('Rule_DR_non_Gold_',dpd_value,'dpd_',month_value,'mon')] <- ifelse(trades_dpd$diff_reported_payment >= 1 &
trades_dpd$diff_reported_payment <= month_value &
trades_dpd$dpd_num >= dpd_value &
trades_dpd$account_type %notin% c(7) &
trades_dpd$current_balance >= 3000 ,1, 0)
trades_dpd[, paste0('Rule_DR_non_Education_',dpd_value,'dpd_',month_value,'mon')] <- ifelse(trades_dpd$diff_reported_payment >= 1 &
trades_dpd$diff_reported_payment <= month_value &
trades_dpd$dpd_num >= dpd_value &
trades_dpd$account_type %notin% c(8) &
trades_dpd$current_balance >= 3000 ,1, 0)
trades_dpd[, paste0('Rule_DR_non_Agri_',dpd_value,'dpd_',month_value,'mon')] <- ifelse(trades_dpd$diff_reported_payment >= 1 &
trades_dpd$diff_reported_payment <= month_value &
trades_dpd$dpd_num >= dpd_value &
trades_dpd$account_type %notin% c(36, 53,57) &
trades_dpd$current_balance >= 3000 ,1, 0)
trades_dpd[, paste0('Rule_DR_non_Mudra_',dpd_value,'dpd_',month_value,'mon')] <- ifelse(trades_dpd$diff_reported_payment >= 1 &
trades_dpd$diff_reported_payment <= month_value &
trades_dpd$dpd_num >= dpd_value &
trades_dpd$account_type %notin% c(39) &
trades_dpd$current_balance >= 3000 ,1, 0)
trades_dpd[, paste0('Rule_DR_non_CC_',dpd_value,'dpd_',month_value,'mon')] <- ifelse(trades_dpd$diff_reported_payment >= 1 &
trades_dpd$diff_reported_payment <= month_value &
trades_dpd$dpd_num >= dpd_value &
trades_dpd$account_type %notin% c(10,31,35,36) &
trades_dpd$current_balance >= 3000 ,1, 0)
trades_dpd[, paste0('Rule_DR_non_Gold_Edu_Agri_',dpd_value,'dpd_',month_value,'mon')] <- ifelse(trades_dpd$diff_reported_payment >= 1 &
trades_dpd$diff_reported_payment <= month_value &
trades_dpd$dpd_num >= dpd_value &
trades_dpd$account_type %notin% c(7, 8, 36, 53, 57) &
trades_dpd$current_balance >= 3000 ,1, 0)
trades_dpd[, paste0('Rule_DR_non_Gold_Edu_Agri_CC_',dpd_value,'dpd_',month_value,'mon')] <- ifelse(trades_dpd$diff_reported_payment >= 1 &
trades_dpd$diff_reported_payment <= month_value &
trades_dpd$dpd_num >= dpd_value &
trades_dpd$account_type %notin% c(7, 8, 36, 53, 57, 10,31,35) &
trades_dpd$current_balance >= 3000 ,1, 0)
}
}
return(trades_dpd)
}
## 3.5 call all rule creation functions
trades_dpd <- create_DR_x_dpd_y_mon(trades_dpd, dpd_vector, months_vector)
trades_dpd <- create_DR_x_dpd_y_mon_non_CC_CD(trades_dpd, dpd_vector, months_vector)
trades_dpd <- create_DR_x_dpd_y_mon_specific_loans(trades_dpd, dpd_vector, months_vector)
gc()
## 3.6 subset for required columns
req_cols <- names(trades_dpd)[grep("Rule_DR",names(trades_dpd))]
trades_dpd <- trades_dpd %>% dplyr::select(c('application_no', 'customer_code', 'applicant_id', req_cols))
gc()
rules_numbers <- c(1:length(req_cols))
blocks_temp <- split(rules_numbers, ceiling(seq_along(rules_numbers)/50))
rules_DL <- distinct(trades_dpd %>% dplyr::select(application_no,customer_code,applicant_id))
for(temp_block in blocks_temp){
temp_rules <- req_cols[min(temp_block) : max(temp_block)]
temp_agg <- trades_dpd %>% dplyr::select(c('application_no', 'customer_code', 'applicant_id',temp_rules))
temp_agg <- temp_agg %>% group_by(application_no,customer_code,applicant_id) %>% summarise(across(everything(), list(sum)))
colnames(temp_agg) <- c('application_no', 'customer_code', 'applicant_id', temp_rules)
temp_agg <- temp_agg %>% ungroup()
rules_DL <- left_join(rules_DL,temp_agg,by = c('application_no' = 'application_no',
'customer_code' = 'customer_code',
'applicant_id' = 'applicant_id'))
gc()
}
rm(temp_agg,temp_rules,temp_block,blocks_temp,rules_numbers)
gc()
rm(trades_dpd)
gc()
## 3.8 convert sum to flags
for(col in req_cols){
value_list <- c(1,2,3,4,5)
for(value in value_list){
new_col <- paste0(col, "_GE_",value)
rules_DL[,new_col] <- as.numeric(ifelse(rules_DL[,col] >= value, 1, 0))
}
}
## 3.9 select only flag columns
new_cols <- colnames(rules_DL)
new_cols <- new_cols[new_cols %notin% req_cols]
rules_DL <- rules_DL %>% dplyr::select(new_cols)
gc()
## 4. Create enquiry variables -------------------------------------------------------------
## 4.1 load enquiry data
load_rdata_intermediate("cleaned_data//enquiry_data_2021.rdata")
enquiry_data_2021$applicant_id <- paste0(enquiry_data_2021$application_no,"_",enquiry_data_2021$customer_code)
enquiry_data <- inner_join(enquiry_data_2021,application_df,by = c('application_no' = 'application_no',
'customer_code' = 'customer_code',
'applicant_id' = 'applicant_id'))
# rm(application_df)
account_mapping <- fread_mapping("account_type_mapping.csv")
enquiry_data <- left_join(enquiry_data,account_mapping, by = c("enquiry_purpose" = "account_type"))
enquiry_data$account_description <- NULL
rm(enquiry_data_2021)
trades_clean <- distinct(trades_clean_backup %>% dplyr::select(application_no, customer_code, applicant_id,cibil_reported_date))
enquiry_data <- left_join(enquiry_data,trades_clean,by = c('application_no' = 'application_no',
'customer_code' = 'customer_code',
'applicant_id' = 'applicant_id'))
enquiry_data$cibil_reported_date <- fifelse(is.na(enquiry_data$cibil_reported_date), enquiry_data$application_date, enquiry_data$cibil_reported_date)
## 7.6 calculate days between enquiry date & date on which cibil report was pulled
enquiry_data$days <- as.numeric(enquiry_data$cibil_reported_date - enquiry_data$enquiry_date)
enquiry_data <- enquiry_data %>% filter(days >= 0)
# load_rdata_intermediate("ADS_data//enquiry_data_2021.rdata")
enquiry_data <- distinct(enquiry_data)
enquiry_data <- enquiry_data %>% filter(!is.na(loan_notation))
# test <- enquiry_data %>% filter(cibil_score == -1) %>% group_by(loan_type) %>% summarise(median_days = median(days,na.rm=T))
# test <- test %>% group_by()
# table(test$loan_type)
## 4.2 create rules function
create_ER_x_enq_y_mon <- function(enquiry_data, months_vector, buffer){
# get only deal number columns
base_output <- distinct(enquiry_data %>% dplyr::select(application_no, customer_code, applicant_id))
# create buffer
buffer_2W_new <- buffer + tat_2W_new
buffer_2W_used <- buffer + tat_2W_used
buffer_PV_new <- buffer + tat_PV_new
buffer_PV_used <- buffer + tat_PV_used
# iterate over different months
for(month_value in months_vector){
# get days cutoff
days_cutoff <- unique((month_day_mapping %>% filter(month == month_value))$days)
# subset for loan types
enquiry_agg_p1 <- enquiry_data %>% filter(loan_type == 'H-N')
enquiry_agg_p2 <- enquiry_data %>% filter(loan_type == 'C-N')
enquiry_agg_p3 <- enquiry_data %>% filter(loan_type == 'C-U')
enquiry_agg_p4 <- enquiry_data %>% filter(loan_type == 'S-R')
### get count of enquiries at deal no x customer code x loan notation level
# 2W
enquiry_agg_p1 <- enquiry_agg_p1 %>% filter((days > (0+buffer_2W_new)) & (days <= (days_cutoff+buffer_2W_new))) %>% group_by(application_no, customer_code, applicant_id,
loan_notation) %>% summarise(enquiry_count = length(unique(enquiry_date)))
enquiry_agg_p1 <- enquiry_agg_p1 %>% ungroup()
enquiry_agg_p2 <- enquiry_agg_p2 %>% filter((days > (0+buffer_PV_new)) & (days <= (days_cutoff+buffer_PV_new))) %>% group_by(application_no, customer_code, applicant_id,
loan_notation) %>% summarise(enquiry_count = length(unique(enquiry_date)))
enquiry_agg_p2 <- enquiry_agg_p2 %>% ungroup()
enquiry_agg_p3 <- enquiry_agg_p3 %>% filter((days > (0+buffer_PV_used)) & (days <= (days_cutoff+buffer_PV_used))) %>% group_by(application_no, customer_code, applicant_id,
loan_notation) %>% summarise(enquiry_count = length(unique(enquiry_date)))
enquiry_agg_p3 <- enquiry_agg_p3 %>% ungroup()
enquiry_agg_p4 <- enquiry_agg_p4 %>% filter((days > (0+buffer_2W_used)) & (days <= (days_cutoff+buffer_2W_used))) %>% group_by(application_no, customer_code, applicant_id,
loan_notation) %>% summarise(enquiry_count = length(unique(enquiry_date)))
enquiry_agg_p4 <- enquiry_agg_p4 %>% ungroup()
enquiry_agg <- data.frame(rbindlist(l=list(enquiry_agg_p1,enquiry_agg_p2,enquiry_agg_p3,enquiry_agg_p4), use.names = T))
# change column name
colnames(enquiry_agg)[colnames(enquiry_agg) == 'enquiry_count'] <- paste0('Rule_EN_enquiry_count_',month_value,'m')
# melt data to get enquiry count column name as a separate column
enquiry_agg <- melt(enquiry_agg, id.vars = c('application_no','customer_code','applicant_id','loan_notation'))
# transpose rows to columns to get count across loan types
enquiry_agg_dcast <- dcast(application_no+customer_code+applicant_id ~ variable+loan_notation,
data = enquiry_agg,
value.var = "value",
sum)
# get columns names to calculate overall enquiry count
enquiry_count_cols <- names(enquiry_agg_dcast)[grep("enquiry_count",names(enquiry_agg_dcast))]
# get column names to calculate enquiry count excluding CC
enquiry_count_cols_non_CC <- enquiry_count_cols[!grepl("_CC",enquiry_count_cols)]
# get column names to calculate enquiry count excluding CD
enquiry_count_cols_non_CD <- enquiry_count_cols[!grepl("_CD",enquiry_count_cols)]
# get column names to calculate enquiry count excluding CC & CD
enquiry_count_cols_non_CC_CD <- enquiry_count_cols[!grepl("_CC",enquiry_count_cols)]
enquiry_count_cols_non_CC_CD <- enquiry_count_cols_non_CC_CD[!grepl("_CD",enquiry_count_cols_non_CC_CD)]
# create overall enquiry columns
enquiry_agg_dcast[,paste0('Rule_EN_enquiry_count_',month_value,'m')] <- rowSums(enquiry_agg_dcast[,enquiry_count_cols])
enquiry_agg_dcast[,paste0('Rule_EN_enquiry_count_',month_value,'m_non_CC')] <- rowSums(enquiry_agg_dcast[,enquiry_count_cols_non_CC])
enquiry_agg_dcast[,paste0('Rule_EN_enquiry_count_',month_value,'m_non_CD')] <- rowSums(enquiry_agg_dcast[,enquiry_count_cols_non_CD])
enquiry_agg_dcast[,paste0('Rule_EN_enquiry_count_',month_value,'m_non_CC_CD')] <- rowSums(enquiry_agg_dcast[,enquiry_count_cols_non_CC_CD])
# join with base output
base_output <- left_join(base_output, enquiry_agg_dcast, by = c('application_no' = 'application_no',
'customer_code' = 'customer_code',
'applicant_id' = 'applicant_id'))
}
return(data.frame(base_output))
}
## 4.3 call enquiry rules creation function
rules_EN <- create_ER_x_enq_y_mon(enquiry_data, months_vector, buffer)
rules_EN[is.na(rules_EN)] <- 0
## 4.4 create rules from enquiry variables
req_cols <- names(rules_EN)[grep("Rule_EN_enquiry_count_",names(rules_EN))]
for(col in req_cols){
value_list <- c(3,6,5,7,9)
for(value in value_list){
new_col <- paste0(col, "_GE_",value)
rules_EN[,new_col] <- as.numeric(ifelse(rules_EN[,col] >= value, 1, 0))
}
}
## 4.5 subset for only rules
new_cols <- colnames(rules_EN)
new_cols <- new_cols[new_cols %notin% req_cols]
rules_EN <- rules_EN %>% dplyr::select(new_cols)
rm(enquiry_data,month_day_mapping)
## 5. Combine all rules ------------------------------------------------------------------------------
all_rules <- left_join(final_output,rules_PO, by = c('application_no' = 'application_no',
'customer_code' = 'customer_code',
'applicant_id' = 'applicant_id'))
all_rules <- left_join(all_rules, rules_DL, by = c('application_no' = 'application_no',
'customer_code' = 'customer_code',
'applicant_id' = 'applicant_id'))
all_rules <- left_join(all_rules, rules_EN, by = c('application_no' = 'application_no',
'customer_code' = 'customer_code',
'applicant_id' = 'applicant_id'))
all_rules[is.na(all_rules)] <- 0
rm(rules_DL,rules_EN,rules_PO)
# all_rules$cibil_score <- ifelse(all_rules$cibil_score == '000-1', -1, all_rules$cibil_score)
# all_rules$cibil_score <- as.numeric(all_rules$cibil_score)
###############################################################################################################
## 1. select 2W New rules
common_cols <- c('application_no','loan_type','category','cibil_score')
selected_rules <-
c(
'Rule_DR_non_Gold_Edu_Agri_90dpd_1mon_GE_1',
'Rule_DR_non_Gold_Edu_Agri_60dpd_6mon_GE_4',
'Rule_DR_non_Gold_Edu_Agri_90dpd_12mon_GE_3',
'Rule_DR_non_Gold_Edu_Agri_60dpd_1mon_GE_1',
'Rule_PO_months_1_TW_live_GE_1',
'Rule_PO_months_3_BL_live_GE_4',
'Rule_PO_months_3_CV_live_GE_2',
'Rule_PO_months_1_AL_live_GE_1',
'Rule_PO_months_3_Tractor_live_GE_1',
'Rule_PO_months_6_live_unsec_GE_4',
'Rule_PO_months_3_live_unsec_GE_2',
'Rule_PO_months_6_PL_live_GE_2',
'Rule_EN_enquiry_count_3m_GE_7',
'Rule_EN_enquiry_count_1m_GE_5',
'Rule_EN_enquiry_count_6m_GE_9'
)
req_cols <- c(common_cols,selected_rules)
rules_2W_New <- data.frame(all_rules %>% filter(loan_type == 'H-N') %>% dplyr::select(req_cols))
rules_2W_New[,'gated_flag'] <- apply(rules_2W_New[,selected_rules], 1, max)
## 2. select PV New rules
common_cols <- common_cols <- c('application_no','loan_type','category','cibil_score')
selected_rules <-
c(
'Rule_DR_non_Gold_Edu_Agri_90dpd_1mon_GE_1',
# 'Rule_PO_months_3_Tractor_live_GE_1',
'Rule_PO_months_3_live_unsec_GE_2',
'Rule_EN_enquiry_count_1m_GE_6'
)
req_cols <- c(common_cols,selected_rules)
rules_PV_New <- data.frame(all_rules %>% filter(loan_type == 'C-N') %>% dplyr::select(req_cols))
rules_PV_New[,'gated_flag'] <- apply(rules_PV_New[,selected_rules], 1, max)
## 3. select PV Used rules
common_cols <- common_cols <- c('application_no','loan_type','category','cibil_score')
selected_rules <-
c(
'Rule_DR_non_Gold_Edu_Agri_90dpd_1mon_GE_1',
'Rule_DR_non_Gold_Edu_Agri_90dpd_6mon_GE_3',
'Rule_PO_months_12_TW_live_GE_2',
'Rule_PO_months_3_live_unsec_GE_2',
'Rule_EN_enquiry_count_1m_GE_6'
)
req_cols <- c(common_cols,selected_rules)
rules_PV_Used <- data.frame(all_rules %>% filter(loan_type == 'C-U') %>% dplyr::select(req_cols))
rules_PV_Used[,'gated_flag'] <- apply(rules_PV_Used[,selected_rules], 1, max)
## 4. select 2W used rules
common_cols <- c('application_no','loan_type','category','cibil_score')
selected_rules <-
c(
"Rule_DR_non_Gold_Edu_Agri_60dpd_6mon_GE_5",
"Rule_DR_non_Gold_Edu_Agri_90dpd_3mon_GE_1",
"Rule_PO_months_3_BL_live_GE_2",
"Rule_PO_months_3_CV_live_GE_2",
"Rule_PO_months_3_live_unsec_GE_3",
"Rule_PO_months_3_PL_live_GE_1",
"Rule_PO_months_6_live_unsec_GE_4",
"Rule_PO_months_3_TW_live_GE_1",
"Rule_EN_enquiry_count_1m_GE_5",
"Rule_EN_enquiry_count_3m_GE_7",
"Rule_EN_enquiry_count_6m_GE_9"
)
req_cols <- c(common_cols,selected_rules)
rules_2W_Used <- data.frame(all_rules %>% filter(loan_type == 'S-R') %>% dplyr::select(req_cols))
rules_2W_Used[,'gated_flag'] <- apply(rules_2W_Used[,selected_rules], 1, max)
output_list <- list("rules_2W_New" = rules_2W_New,
"rules_PV_New" = rules_PV_New,
"rules_PV_Used" = rules_PV_Used,
"rules_2W_Refinance" = rules_2W_Used
)
save_xlsx_output(data = output_list, relative_path = "//gating_rules//post_workshop//Gating Rules - Mar 2021.xlsx")
|
dd5d09ac093e0c0f990aa29b7103868f5337b4c7
|
f247158fb166901454b8e4e6c4afe3340eed663b
|
/R/Distribution_DeletEffects.R
|
116ef9a35a5b9e01be32dfbb6cbfbee7c755b261
|
[] |
no_license
|
kjgilbert/aNEMOne
|
3126382f7588c4bfb998c1f23fa6b4a9718eea88
|
51701f2991c5693e71e19b1147c0c9a43599a86f
|
refs/heads/master
| 2020-06-02T18:18:31.577221
| 2017-04-05T08:41:09
| 2017-04-05T08:41:09
| 34,544,653
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,765
|
r
|
Distribution_DeletEffects.R
|
#'
#' Takes an output file from Nemo of deleterious loci genotypes, and plots the distribution of both the homozygous and heterozygous effects of these loci.
#'
#' @title Examine effect size distribution of deleterious loci from Nemo
#'
#'
#' @param file The file containing deleterious loci output from Nemo.
#'
#' @param num.loci The number of deleterious loci that were simulated in that file.
#'
#' @param xlim.ho The limits of the x-axis for the homozygous effect distribution, default is from 0 to 1.
#'
#' @param xlim.he The limits of the x-axis for the heterozygous effect distribution, default is from 0 to 1.
#'
#' @return
#'
#' Creates a plot of two histograms showing the distributions.
#'
#' @author Kimberly J Gilbert
#'
#' @references \href{http://nemo2.sourceforge.net/index.html}{Nemo} is created and maintained by Fred Guillaume. The manual and source files are available online.
#'
#' @export dist.delet.effects
dist.delet.effects <- function(file, num.loci, xlim.ho=c(0,1), xlim.he=c(0,1)){
# delet.traits <- read.table(file, header=TRUE, sep=" ", stringsAsFactors=FALSE)
delet.traits <- matrix(scan(file, skip=1, nlines=2, what="numeric()"), ncol=num.loci+5, byrow=TRUE)
# strip the -1's from nemo's extra information
# because there are 1000 loci, get rid of spot 1, pop ID and last 4 spots - age, sex, ped, origin
delet.traits <- delet.traits[, -c(1,(num.loci+2):(num.loci+5))]
# delet loci effect sizes:
ho <- as.numeric(delet.traits[1,])
he <- as.numeric(delet.traits[2,])
par(mfrow=c(1,2))
hist(as.matrix(ho), col="steelblue1", breaks=50, xlab="Homozygous Effect Size", main="", xlim=xlim.ho)
hist(as.matrix(he), col="steelblue3", breaks=50, xlab="Heterozygous Effect Size", main="", xlim=xlim.he)
}
#'
#' Takes an output file from Nemo of deleterious loci genotypes, and plots the mean number of deleterious mutations per individual in a patch over the landscape.
#'
#' @title Look at numbers of deleterious mutations across the landscape
#'
#' @param del.file The file containing deleterious loci output from Nemo.
#'
#' @param num.loci The number of deleterious loci that were simulated in that file.
#'
#' @param patches.x The number of patches on the landscape along the x-axis (parallel to expansion).
#'
#' @param patches.y The number of patches on the landscape along the y-axis (perpendicular to expansion).
#'
#' @param count.type Whether to count homozygous, heterozygous, or total number of mutations.
#'
#' @return
#'
#' Creates a plot over the landscape (heat map style) for the mean number of deleterious mutations per individual in a patch.
#'
#' @author Kimberly J Gilbert
#'
#' @references \href{http://nemo2.sourceforge.net/index.html}{Nemo} is created and maintained by Fred Guillaume. The manual and source files are available online.
#'
#' @export delet.muts.over.landscape
delet.muts.over.landscape <- function(del.file, patches.x, patches.y, num.loci, count.type="total"){
# custom colors
purple=rgb(1,0,1)
red=rgb(1,0,0)
yellow=rgb(1,1,0)
green=rgb(0,1,0)
teal=rgb(0,1,1)
blue=rgb(0,0,1)
white=rgb(1,1,1)
whiteToWhite <- colorRampPalette(white)
whiteToYellow <- colorRampPalette(c(white, yellow))
yellowToRed <- colorRampPalette(c(yellow, red))
redToPurple <- colorRampPalette(c(red, purple))
greenToWhite <- colorRampPalette(c(green, white))
delet.muts <- matrix(scan(del.file, skip=3, what="character()"), ncol=num.loci+6, byrow=TRUE)
# strip the -1's from nemo's extra information
# because there are 1000 loci, last 5 spots - age, sex, ped, origin, and some other number
delet.muts <- delet.muts[, -c((num.loci+2):(num.loci+6))]
pop.list <- as.numeric(delet.muts[,1])
delet.muts <- data.frame(delet.muts[,-1])
num.zero <- apply(delet.muts, MARGIN=1, FUN=function(x) length(which(x == "00")))
num.hets <- apply(delet.muts, MARGIN=1, FUN=function(x) length(which(x == "01" | x=="10")))
num.homs <- apply(delet.muts, MARGIN=1, FUN=function(x) length(which(x == "11")))
total.muts <- num.hets + (2*num.homs)
mut.counts <- data.frame(cbind(pop.list, num.zero, num.hets, num.homs, total.muts))
avg.mut.counts <- aggregate(mut.counts, by=list(mut.counts$pop.list), FUN=mean)
total.num.patches <- patches.x*patches.y
# there shouldn't be any ghost patches in the list because they're culled to pop size zero, but if errors arise down the line, that could be at fault
if(dim(avg.mut.counts)[1] > total.num.patches) avg.mut.counts <- avg.mut.counts[- (total.num.patches+1),]
if(dim(avg.mut.counts)[1] > total.num.patches) avg.mut.counts <- avg.mut.counts[- (total.num.patches+1),]
# if some patches are empty:
if(dim(avg.mut.counts)[1] < total.num.patches){
empty.patches <- setdiff(1:total.num.patches, avg.mut.counts$pop.list)
empty.rows <- data.frame(matrix(0, ncol=6, nrow=length(empty.patches)))
empty.rows[,1] <- empty.patches
empty.rows[,2] <- empty.patches
names(empty.rows) <- names(avg.mut.counts)
avg.mut.counts <- rbind(avg.mut.counts, empty.rows)
avg.mut.counts <- avg.mut.counts[order(avg.mut.counts$pop.list), ]
}
if(count.type == "total" | count.type == "all"){
# make total fitness into a matrix matched to the landscape
total.mut.mat <- matrix(avg.mut.counts$total.muts, nrow=patches.y, ncol=patches.x, byrow=FALSE)
# make the scale always the same:
total.mut.mat[1,1] <- max(avg.mut.counts[,4:6]); total.mut.mat[2,1] <- 0
image.plot(x=1:patches.x, y=1:patches.y, t(total.mut.mat), col=c(whiteToWhite(40), whiteToYellow(60), yellowToRed(60), redToPurple(15)), ylab="", xlab="Axis of expansion", main="Mean number total delet muts per ind (within a patch)")
}
if(count.type == "homozygous" | count.type == "all"){
# make quanti fitness into a matrix matched to the landscape
hom.mut.mat <- matrix(avg.mut.counts$num.homs, nrow=patches.y, ncol=patches.x, byrow=FALSE)
# make the scale always the same:
hom.mut.mat[1,1] <- max(avg.mut.counts[,4:6]); hom.mut.mat[2,1] <- 0
image.plot(x=1:patches.x, y=1:patches.y, t(hom.mut.mat), col=c(whiteToWhite(40), whiteToYellow(60), yellowToRed(60), redToPurple(15)), ylab="", xlab="Axis of expansion", main="Mean number homozygous delet muts per ind (within a patch)")
}
if(count.type == "heterozygous" | count.type == "all"){
# make delet fitness into a matrix matched to the landscape
het.mut.mat <- matrix(avg.mut.counts$num.hets, nrow=patches.y, ncol=patches.x, byrow=FALSE)
# make the scale always the same:
het.mut.mat[1,1] <- max(avg.mut.counts[,4:6]); het.mut.mat[2,1] <- 0
image.plot(x=1:patches.x, y=1:patches.y, t(het.mut.mat), col=c(whiteToWhite(40), whiteToYellow(60), yellowToRed(60), redToPurple(15)), ylab="", xlab="Axis of expansion", main="Mean number heterozygous delet muts per ind (within a patch)")
}
}
|
8a4d0e8d48d5f6c2fbd0fd7fc3711b530378bb13
|
7f691f36fe8c40efc2a01bf4dae10086140182fe
|
/qPCR expression graphs.R
|
c162f74fc3d2c15106e352499e407e42e9641061
|
[] |
no_license
|
nunngm/RNA-sequencing
|
5594400818b8e428bc36c33d0caf2d4cb342c578
|
4056982e449cce7812ba9b611ab9ef2cbe280f24
|
refs/heads/master
| 2023-07-19T22:03:02.746290
| 2023-07-18T16:22:50
| 2023-07-18T16:22:50
| 236,590,229
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,777
|
r
|
qPCR expression graphs.R
|
## Plotting qPCR data
library(tidyr)
library(ggplot2)
library(dplyr)
library(svglite)
library(agricolae)
library(car)
library(ggthemes)
setwd("C:\\Users\\garre\\OneDrive\\Documents\\Cameron Lab- McMaster University\\Data\\Data-ARR RNA-seq\\Exp-qRT-PCR\\Graphs")
df.avg = read.table("clipboard", sep = "\t", row.names = 1,header=T)
samps = rownames(df.avg)
samps = t(as.data.frame(strsplit(samps, split = "_", fixed = T)))
samps = as.data.frame(cbind(t(as.data.frame(strsplit(samps[,1], split = ".", fixed = T))), samps[,2]))
colnames(samps) = c("age", "treatment","hpi", "rep")
samps$age = factor(samps$age, levels = c("Y", "M"))
samps$treatment = factor(samps$treatment, levels = c("UN", "MO", "PST"))
samps$hpi = factor(samps$hpi, levels = c("00", "06","12"))
df.graph = cbind(samps, df.avg)
upperbound <- function(x) {
return(quantile(x, 0.75, na.rm = T) + 1.5 * IQR(x,na.rm = T))
}
lowerbound <- function(x){
return(quantile(x, 0.25, na.rm = T) - 1.5 * IQR(x, na.rm=T))
}
is_outlier <- function(x) {
return(x < quantile(x, 0.25, na.rm = T) - 1.5 * IQR(x, na.rm=T) | x > quantile(x, 0.75, na.rm = T) + 1.5 * IQR(x,na.rm = T))
}
targetGeneName = "ALD1"
refGeneName = "SEC5A"
data = df.graph %>% mutate( sampGroup = paste(age, treatment, hpi, sep = "_"), .keep = "all") %>% mutate(target = 2^(-(get(targetGeneName)-get(refGeneName))))
anovaModel = aov(log2(target) ~ sampGroup, data = data[data$age=="M",])
print(HSD.test(anovaModel, alpha=0.05, "sampGroup", console=F)$groups)
## Graph for graphing 3 factor data of young and mature samples
qpcr3FGraph = function(data, targetGeneName, refGeneName, exptID = "temp", colours = c("red", "green", "blue"), width = 8, height = 6, graph = F){
data = data %>% mutate( sampGroup = paste(age, treatment, hpi, sep = "_"), .keep = "all") %>% mutate(target = 2^(-(get(targetGeneName)-get(refGeneName))))
print(data)
anovaModel = aov(log2(target) ~ sampGroup, data = data)
print(HSD.test(anovaModel, alpha=0.05, "sampGroup", console=F)$groups)
p = data %>% group_by(sampGroup) %>%
#mutate(target = log10(target)) %>%
#mutate(inlier = ifelse(is_outlier(target), as.numeric(NA), target), outlier = ifelse(is_outlier(target), target, as.numeric(NA)) ) %>%
mutate(inlier = target) %>%
ggplot(., aes(x=hpi:treatment, y=inlier, fill = treatment)) +
stat_summary(fun = mean, geom = "bar", position = position_dodge(width = 1), colour = "#000000", size = 0.75) +
geom_jitter( size=2,#colour = df.graph$rep,
alpha = 0.5, position = position_jitterdodge(dodge.width = 1, jitter.width = 0.8)) + facet_grid(.~age, labeller = labeller(age = c(Y = "Young", M = "Mature"))) +
stat_summary(fun = mean,
fun.min = function(x) {ifelse(mean(x) - sd(x)>0,
mean(x) - sd(x)
, 0 )
},
fun.max = function(x) {mean(x) + sd(x)},
geom = "errorbar", lty =1 , size =0.75, width = 0.25, colour = "#000000", position = position_dodge(width = 1)) +
scale_y_continuous(expand = expansion(c(0, 0.1)))+
theme(
legend.position="none",
plot.title = element_text(size=11),
axis.text.x = element_blank()
) + ylab(paste0(targetGeneName,"/", refGeneName)) + xlab("") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA, size = 1),
#strip.text.x = element_text(size = 15),
#strip.background = element_rect(colour = "black", fill = "#FFFFFF", size = 1),
axis.line = element_line(colour = "black", size=0),
axis.title.x=element_text(size=15),
#axis.text.x=element_blank()),
axis.ticks=element_line(colour = "black", size =1),
axis.ticks.length = unit(5,"points") ,
axis.title.y = element_text(size=15),
axis.text = element_text(color = "black", size=15),
strip.background.x = element_blank(),
strip.text.x = element_blank()) +
scale_fill_manual(values = colours)
# + theme_few()
if(graph ==T){
exptID = readline(prompt = "Enter experimentID:")
ggsave(file = paste(targetGeneName, refGeneName, paste0(exptID, ".svg"), sep = "_"), plot = p, width = width, height = height)
} else{
p
}
}
qpcr3FGraph(df.graph, targetGeneName = "FMO1", refGeneName = "SEC5A",exptID = "ARR-PIP-22-1", height = 6, width = 7, colours = c("#54B031", "#0993AE" , "#F6A63C"), graph = F)
qpcr3FGraph(df.graph, targetGeneName = "ALD1", refGeneName = "SEC5A",exptID = "ARR-PIP-22-1", height = 6, width = 7, colours = c("#378717", "#6DFDFD" , "#FFFF00"), graph = T)
targetGeneName = "RLP28"
refGeneName = "SEC5A"
qpcr3FGraph(df.graph, targetGeneName = targetGeneName, refGeneName = refGeneName,exptID = "ARR-PIP-22-1", height = 6, width = 7, colours = c("#54B031", "#0993AE" , "#F6A63C"), graph = F)
data = df.graph %>% mutate( sampGroup = paste(age, treatment, hpi, sep = "_"), .keep = "all") %>% mutate(target = 2^(-(get(targetGeneName)-get(refGeneName))))
anovaModel = aov(log2(target) ~ sampGroup, data = data[data$age=="M",])
HSD.test(anovaModel, alpha=0.05, "sampGroup", console=F)$groups
anovaModel = aov(log2(target) ~ sampGroup, data = data)
temp = HSD.test(anovaModel, alpha=0.05, "sampGroup", console=F)$groups
temp[,1] = 2^temp[,1]
temp
qpcrCtGraph = function(data, targetGeneName, exptID = "exptID", colours = c("red", "green", "blue"), width = 8, height = 6, graph = F){
data = data %>% mutate(target = get(targetGeneName), sampGroup = paste(age, treatment, hpi, sep = "_"), .keep = "all")
print(data)
anovaModel = aov(target ~ sampGroup, data = data)
print(HSD.test(anovaModel, alpha=0.05, "sampGroup", console=F)$groups)
p = data %>% group_by(sampGroup) %>%
mutate(inlier = ifelse(is_outlier(target), as.numeric(NA), target), outlier = ifelse(is_outlier(target), target, as.numeric(NA)) ) %>%
ggplot(., aes(x=hpi:treatment, y=inlier, fill = treatment)) +
stat_summary(fun = mean, geom = "bar", position = position_dodge(width = 1), colour = "#000000", size = 0.75) +
geom_jitter( size=2, alpha = 0.5, position = position_jitterdodge(dodge.width = 1, jitter.width = 0.8)) + facet_grid(.~age, labeller = labeller(age = c(Y = "Young", M = "Mature"))) +
stat_summary(fun = mean,
fun.min = function(x) {ifelse(mean(x) - sd(x)>0,mean(x) - sd(x),0 )},
fun.max = function(x) {mean(x) + sd(x)},
geom = "errorbar", lty =1 , size =0.75, width = 0.25, colour = "#000000", position = position_dodge(width = 1)) +
scale_y_continuous(expand = expansion(c(0, 0.1)))+
theme(
legend.position="right",
plot.title = element_text(size=11),
axis.text.x = element_blank()
) + ylab(paste0(targetGeneName,"(Ct)")) + xlab("") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA, size = 1),
strip.text.x = element_text(size = 15),
strip.background = element_rect(colour = "black", fill = "#FFFFFF", size = 1),
axis.line = element_line(colour = "black", size=0),
axis.title.x=element_text(size=15),
#axis.text.x=element_blank()),
axis.ticks=element_line(colour = "black", size =1),
axis.ticks.length = unit(5,"points") ,
axis.title.y = element_text(size=15),
axis.text = element_text(color = "black", size=15)) +
scale_fill_manual(values = colours)
# + theme_few()
if(graph ==T){
ggsave(file = paste("CtVal", targetGeneName, paste0(exptID, ".svg"), sep = "_"), plot = p, width = width, height = height)
} else{
p
}
}
qpcrCtGraph(df.graph, targetGeneName = "ALD1", exptID = "ARR-PIP-22-1", colours = c("#54B031", "#0993AE" , "#F6A63C"), graph = T)
# Moving on to weekly expression graphs
weeklyBacterialLevel = function(data, exptID = "exptID", colours = c("red", "green", "blue"), width = 8, height = 6, graph = F){
data = data %>% mutate(target = 2^(-(get(targetGeneName)-get(refGeneName))), sampGroup = paste(age, treatment, hpi, sep = "_"), .keep = "all")
print(data)
anovaModel = aov(log2(target) ~ sampGroup, data = data)
print(HSD.test(anovaModel, alpha=0.05, "sampGroup", console=F)$groups)
p = data %>% group_by(sampGroup) %>%
mutate(inlier = ifelse(is_outlier(target), as.numeric(NA), target), outlier = ifelse(is_outlier(target), target, as.numeric(NA)) ) %>%
ggplot(., aes(x=hpi:treatment, y=inlier, fill = treatment)) +
stat_summary(fun = mean, geom = "bar", position = position_dodge(width = 1), colour = "#000000", size = 0.75) +
geom_jitter( size=2, alpha = 0.5, position = position_jitterdodge(dodge.width = 1, jitter.width = 0.8)) + facet_grid(.~age, labeller = labeller(age = c(Y = "Young", M = "Mature"))) +
stat_summary(fun = mean,
fun.min = function(x) {ifelse(mean(x) - sd(x)>0,mean(x) - sd(x),0 )},
fun.max = function(x) {mean(x) + sd(x)},
geom = "errorbar", lty =1 , size =0.75, width = 0.25, colour = "#000000", position = position_dodge(width = 1)) +
scale_y_continuous(expand = expansion(c(0, 0.1)))+
theme(
legend.position="right",
plot.title = element_text(size=11),
axis.text.x = element_blank()
) + ylab(paste0(targetGeneName,"/", refGeneName)) + xlab("") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA, size = 1),
strip.text.x = element_text(size = 15),
strip.background = element_rect(colour = "black", fill = "#FFFFFF", size = 1),
axis.line = element_line(colour = "black", size=0),
axis.title.x=element_text(size=15),
#axis.text.x=element_blank()),
axis.ticks=element_line(colour = "black", size =1),
axis.ticks.length = unit(5,"points") ,
axis.title.y = element_text(size=15),
axis.text = element_text(color = "black", size=15)) +
scale_fill_manual(values = colours)
# + theme_few()
if(graph ==T){
ggsave(file = paste(targetGeneName, refGeneName, paste0(exptID, ".svg"), sep = "_"), plot = p, width = width, height = height)
} else{
p
}
}
qpcrWeeklyGraph = function(data, targetGeneName, refGeneName, exptID = "exptID", colours = c("red", "green", "blue"), width = 8, height = 6, graph = F){
data = data %>% mutate(target = 2^(-(get(targetGeneName)-get(refGeneName))), sampGroup = paste(age, treatment, hpi, sep = "_"), .keep = "all")
print(data)
anovaModel = aov(log2(target) ~ sampGroup, data = data)
print(HSD.test(anovaModel, alpha=0.05, "sampGroup", console=F)$groups)
p = data %>% group_by(sampGroup) %>%
mutate(inlier = ifelse(is_outlier(target), as.numeric(NA), target), outlier = ifelse(is_outlier(target), target, as.numeric(NA)) ) %>%
ggplot(., aes(x=hpi:treatment, y=inlier, fill = treatment)) +
stat_summary(fun = mean, geom = "bar", position = position_dodge(width = 1), colour = "#000000", size = 0.75) +
geom_jitter( size=2, alpha = 0.5, position = position_jitterdodge(dodge.width = 1, jitter.width = 0.8)) + facet_grid(.~age, labeller = labeller(age = c(Y = "Young", M = "Mature"))) +
stat_summary(fun = mean,
fun.min = function(x) {ifelse(mean(x) - sd(x)>0,mean(x) - sd(x),0 )},
fun.max = function(x) {mean(x) + sd(x)},
geom = "errorbar", lty =1 , size =0.75, width = 0.25, colour = "#000000", position = position_dodge(width = 1)) +
scale_y_continuous(expand = expansion(c(0, 0.1)))+
theme(
legend.position="right",
plot.title = element_text(size=11),
axis.text.x = element_blank()
) + ylab(paste0(targetGeneName,"/", refGeneName)) + xlab("") +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
panel.border = element_rect(colour = "black", fill = NA, size = 1),
strip.text.x = element_text(size = 15),
strip.background = element_rect(colour = "black", fill = "#FFFFFF", size = 1),
axis.line = element_line(colour = "black", size=0),
axis.title.x=element_text(size=15),
#axis.text.x=element_blank()),
axis.ticks=element_line(colour = "black", size =1),
axis.ticks.length = unit(5,"points") ,
axis.title.y = element_text(size=15),
axis.text = element_text(color = "black", size=15)) +
scale_fill_manual(values = colours)
# + theme_few()
if(graph ==T){
ggsave(file = paste(targetGeneName, refGeneName, paste0(exptID, ".svg"), sep = "_"), plot = p, width = width, height = height)
} else{
p
}
}
p = df.graph %>%
group_by(age, hpi, treatment) %>%
mutate(target = 2^(-(get(targetGeneName)-get(refGeneName))) , .keep = "unused") %>%
mutate(inlier = ifelse(is_outlier(target), as.numeric(NA), target), outlier = ifelse(is_outlier(target), target, as.numeric(NA)) ) %>%
ggplot(., aes(x=hpi:treatment, y=inlier, fill = treatment)) +
stat_summary(fun = mean, geom = "bar", position = position_dodge(width = 1), colour = "#000000", size = 0.75) +
stat_summary(fun = mean,
fun.min = function(x) {ifelse(mean(x) - sd(x)>0,mean(x) - sd(x),0 )},
fun.max = function(x) {mean(x) + sd(x)},
geom = "errorbar", lty =1 , size =0.75, width = 0.25, colour = "#000000", position = position_dodge(width = 1)) +
geom_jitter( size=2, alpha=1, position = position_jitterdodge(dodge.width = 1, jitter.width = 0)) + facet_grid(.~age) +
scale_y_continuous(expand = expansion(c(0, 0.1)))+
theme(
legend.position="none",
plot.title = element_text(size=11)
) + ylab(paste0(targetGeneName,"/SEC5A")) + theme_few()
geom_text(aes(label=Tukey, y = uptake_mean + sd + 2), size = 3, color = "Gray25",
show.legend = FALSE,
position = position_dodge(0.9))
p = expression %>%
group_by(type) %>%
mutate(inlier = ifelse(is_outlier(!!as.name(targetGeneName)), as.numeric(NA), !!as.name(targetGeneName)), outlier = ifelse(is_outlier(!!as.name(targetGeneName)), !!as.name(targetGeneName), as.numeric(NA)) ) %>%
ggplot(., aes(x=type, y=inlier, colour = rep)) +
stat_summary(fun = mean, geom = "bar", fill = rep(c( "#444444",
"#666666", "#9A9A9A", "#CDCDCD", "#FFFFFF"),2), colour = "#000000", size = 0.75) +
stat_summary(fun = mean,
fun.min = function(x) {mean(x) - sd(x)},
fun.max = function(x) {mean(x) + sd(x)},
geom = "errorbar", lty =1 , size =0.75, width = 0.25, colour = "#000000") +
#geom_boxplot(fill = rep(c("#FFFFFF"), 5)) +
geom_jitter(width = 0.25, color= "#000000", size = 2, alpha = 0.4) +
geom_point(aes(x = type, y = outlier), size =2, alpha = 1, shape = 8, colour = "#000000") +
scale_y_continuous(expand = expansion(c(0, 0.1))) +
theme(
legend.position="none",
plot.title = element_text(size=11)
) +
xlab("Weeks post-germination (wpg)") + ylab(paste0(targetGeneName,"/SEC5A")) + theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black", size=1),
axis.title.x=element_text(size=15),
#axis.text.x=element_blank()),
axis.ticks=element_line(colour = "black", size =1),
axis.ticks.length = unit(5,"points") ,
axis.title.y = element_text(size=15),
axis.text = element_text(color = "black", size=15)
)
p
ggsave(file = paste0(targetGeneName,"_WKEX-22-2.svg"), plot = p, width = 5, height = 4)
|
66fa3904510614753208a93a61a2ab9ed351d8a3
|
da71dedb5877dfb0807cf33cea66ac0817317d58
|
/ISRIC2CropSyst/src/CalculateSlope.R
|
03384f5006cd6e4c14eab0ba731456ac0430cfe5
|
[] |
no_license
|
sonthuybacha/misc_R
|
359bcc53612ac208c88c00641ead1102c5e1de45
|
03541711817eff8ad08be71aff690eea86ecaee7
|
refs/heads/master
| 2020-09-24T00:50:41.296265
| 2019-10-31T16:10:31
| 2019-10-31T16:10:31
| 225,622,353
| 1
| 0
| null | 2019-12-03T13:06:52
| 2019-12-03T13:06:51
| null |
UTF-8
|
R
| false
| false
| 432
|
r
|
CalculateSlope.R
|
#calculate slope
#author: John Mutua
#load packages
require(raster)
require(rgdal)
#set working directory
setwd("D:\\ToBackup\\Projects\\SWAT\\ArcSWAT_Projects\\Sasumua_data\\ISRIC2Cropsyst_Sasumua")
layers<-list.files(".", pattern='tif')
dem<-raster("DEM.tif")
plot(dem)
#calculate slope
slp <- terrain(dem, "slope")
plot(slp)
#write slope raster
writeRaster(slp, filename = "Slope.tif", format = "GTiff", overwrite = TRUE)
|
1e74d7c1ca433392a41993e71451eae5398cc339
|
895b3548b2dc255e0f544fd08e61973b5bde6e84
|
/BasicUnitTest.R
|
c7a5ed39f2462461e2a024539f9a2d2aa79b5f79
|
[] |
no_license
|
SuzanElbadry/Hamlet
|
e054eee13beed38112587ca4df195c81417fdcc1
|
89e1420824810633efdf19b15135876085706597
|
refs/heads/master
| 2020-09-13T03:28:39.156840
| 2020-01-15T05:41:33
| 2020-01-15T05:41:33
| 222,644,080
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,022
|
r
|
BasicUnitTest.R
|
library(testthat)
test_that('First 2 Scenes Speakers', {
Sub <- substr(Hamlet,0,21039)
SubHamletTest <- data.table(text=Sub)
SpeakersPart <- setorder(Task1(SubHamletTest),-Total)
RightSpeakersCount <- data.table(Speakers=c("HORATIO","HAMLET","KING CLAUDIUS","MARCELLUS","BERNARDO","FRANCISCO","QUEEN GERTRUDE","LAERTES","LORD POLONIUS","CORNELIUS","VOLTIMAND"),
Total=c(149,95,93,52,38,10,10,7,4,1,1))
expect_that(SpeakersPart[,1], equals(RightSpeakersCount[,1]))
})
test_that('First 2 Scenes Speakers Count', {
Sub <- substr(Hamlet,0,21039)
SubHamletTest <- data.table(text=Sub)
SpeakersPart <- setorder(Task1(SubHamletTest),-Total)
RightSpeakersCount <- data.table(Speakers=c("HORATIO","HAMLET","KING CLAUDIUS","MARCELLUS","BERNARDO","FRANCISCO","QUEEN GERTRUDE","LAERTES","LORD POLONIUS","CORNELIUS","VOLTIMAND"),
Total=c(149,95,93,52,38,10,10,7,4,1,1))
expect_that(SpeakersPart[,2], equals(RightSpeakersCount[,2]))
})
|
ce49c996c760e8e4c11124dd4459deccca827ad6
|
6b4fe2baa84e74af637f319ea5d887cb2fd6f9a2
|
/kevin/rimod-analysis/ENA_data_upload_renaming.R
|
ee9e07c1e3f95defd418007cce11e7841baaf38b
|
[] |
no_license
|
dznetubingen/analysis_scripts
|
1e27ca43a89e7ad6f8c222507549f72b1c4efc20
|
4fcac8a3851414c390e88b4ef4ac461887e47096
|
refs/heads/master
| 2021-06-25T10:47:40.562438
| 2021-01-04T16:02:34
| 2021-01-04T16:02:34
| 187,789,014
| 1
| 0
| null | 2020-09-03T11:37:25
| 2019-05-21T07:55:17
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 3,026
|
r
|
ENA_data_upload_renaming.R
|
library(stringr)
setwd("/media/kevin/89a56127-927e-42c0-80de-e8a834dc81e8/data_upload/")
# Load the master sample table
md <- read.table("RiMod_master_sample_file.txt", sep="\t", header=T)
number <- as.numeric(gsub("rimod", "", md$Sample_UID))
md <- md[order(number),]
write.table(md, "ordered_master_table.txt", sep="\t", quote=F, row.names = F)
# load id-mapping table
idmap <- read.table("RiMod_ID_mapping.txt", sep="\t", header=T, stringsAsFactors = F)
####
# smRNA Tübingen renaming
#####
# Rename smRNA-seq Tübingen data
setwd("smrnaseq_frontal_tübingen/final_trimmed/")
files <- list.files()
nf <- files
nf <- gsub("final_5bp_trimmed_sample_", "", nf)
nf <- gsub("F.fastq.gz", "", nf)
nf <- gsub("_", "", nf)
tmp <- idmap[idmap$old_id %in% nf,]
tmp <- tmp[match(nf, tmp$old_id),]
nf <- tmp$new_id
nf <- paste("smRNAseq_tuebingen_frontal_human_", nf, ".fastq.gz", sep="")
# do the renaming
file.rename(files, nf)
#=== end renaming smRNA Tübingen ===#
####
# smRNA Göttingen renaming
#####
setwd("/media/kevin/89a56127-927e-42c0-80de-e8a834dc81e8/data_upload/smrnaseq_frontal_göttingen/")
files <- list.files()
samples <- files
samples <- gsub("RNAomeTb", "", samples)
samples <- gsub("NDC", "FTD", samples)
samples <- str_split(samples, pattern="FTD", simplify = T)[,1]
# rename 103277 and 110140
samples[samples == "103277"] <- "10327"
samples[samples == "110140"] <- "11014"
all(samples %in% idmap$old_id)
tmp <- idmap[idmap$old_id %in% samples,]
tmp <- tmp[match(samples, tmp$old_id),]
nf <- tmp$new_id
nf <- paste("smRNAseq_goettingen_frontal_human_", nf, ".fastq.gz", sep="")
# do the renaming
file.rename(files, nf)
#======== end renaming smRNA Göttingen ===#
####
# Rename frontal CAGE-seq data from human post-mortem brain
####
setwd("/media/kevin/89a56127-927e-42c0-80de-e8a834dc81e8/data_upload/frontal_cageseq/cageseq_fastq/")
files <- list.files()
samples <- files
samples <- gsub("_fro.fastq.gz", "", samples)
# rename A144_12
samples[samples == "A144_12"] <- "0A144"
all(samples %in% idmap$old_id)
tmp <- idmap[idmap$old_id %in% samples,]
tmp <- tmp[match(samples, tmp$old_id),]
nf <- tmp$new_id
nf <- paste("CAGEseq_frontal_", nf, ".fastq.gz", sep="")
# do the renaming
file.rename(files, nf)
#========== end renaming CAGE-seq frontal data ===========#
####
# Rename frontal RNA-seq data
#####
setwd("/media/kevin/89a56127-927e-42c0-80de-e8a834dc81e8/data_upload/frontal_rnaseq/")
files <- list.files()
# Adjust the A144 sample
files <- gsub("A144_12", "0A144", files)
samples <- str_split(files, pattern="_", simplify = T)[,1]
all(samples %in% idmap$old_id)
tmp <- idmap[idmap$old_id %in% samples,]
tmp <- tmp[match(samples, tmp$old_id),]
nf <- tmp$new_id
f <- str_split(files, pattern="_", simplify = T)
f[,1] <- nf
# flatten the files
new_filenames <- c()
for (i in 1:nrow(f)){
fname = f[i,]
fname = str_flatten(fname, collapse="_")
fname <- gsub(".gz_", ".gz", fname)
new_filenames <- c(new_filenames, fname)
}
# do the renaming
file.rename(files, new_filenames)
|
85c9ac37cb70357b164b90170496b8a116ca4c72
|
a069699b0d96c6083a202e5b82eb9026e10f5833
|
/R/helpers.R
|
b113501ef4bf0f9142541862b51b279ccc92811e
|
[] |
no_license
|
JonasMoss/SPQR
|
46c795b79648a7485b69eb246a5f7f660d88316b
|
aa669e7b42919310a985c291d007208928686971
|
refs/heads/master
| 2020-07-19T04:49:22.188854
| 2019-09-04T19:59:33
| 2019-09-04T19:59:33
| 206,376,891
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,367
|
r
|
helpers.R
|
#' Checks if a call references a name.
#'
#' @param call A call.
#' @param name A name.
call_uses_name = function(call, name) {
args = as.list(call)[-1]
for(i in which(sapply(args, is.name)))
if(name == args[[i]])
return(TRUE)
val = FALSE
for(i in which(sapply(args, is.call)))
val = val | call_uses_name(args[[i]], name)
return(val)
}
#' Checks if all the arguments of a call are bound.
#'
#' @param call A call.
#' @param env An environment.
#' @return e
arguments_available = function(call, env = parent.frame()) {
args = as.list(call)[-1]
formals = lapply(names(args), function(arg) parse(text = arg)[[1]])
names(formals) = names(args)
name_indices = sapply(args, is.name)
call_indices = sapply(args, is.call)
call_args = args[which(call_indices)]
e = as.environment(args[!name_indices & !call_indices])
parent.env(e) = env
# Arguments with calls are included in two cases:
# 1.) They are self-evaluating, without reference to the other names.
# 2.) They are referenced name by another variable.
for(arg in names(call_args)) {
include = TRUE
for(name in formals)
if(call_uses_name(call_args[[arg]], name)) {
include = FALSE
break
}
if(include) do.call(delayedAssign, list(arg, call_args[[arg]], e, e))
}
# The 'name' indices must also be added.
e
}
|
cc77dc86b641415eb65b47650802bd856714f1b3
|
8c5e07af84c21e5b252069d1232b269c44b4ca69
|
/core.R
|
8709e9bc00798b7fefbc45c2495d92c584e6c1fe
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
mysociety/councillor_participation_research
|
514356443efd30c0fab8293a21b7f64d3074bf77
|
8a5b71486d406b278ebe1e204f99e1fde9738972
|
refs/heads/master
| 2023-03-08T15:46:52.387388
| 2021-02-22T21:21:23
| 2021-02-22T21:21:23
| 332,840,452
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,328
|
r
|
core.R
|
require(MASS)
require(pscl)
require(car)
require(broom)
require(ggplot2)
require(weights)
rm(list=ls())
weighted_t_test <- function(x,y,w){
ysplit <- split(x, y)
wsplit <- split(w, y)
wtd.t.test(ysplit[[1]], ysplit[[2]], wsplit[[1]], wsplit[[2]])
}
df = read.csv("data//survey_with_weights.csv",header=TRUE)
#restrict to completed current uses
colnames(df)[colnames(df)=="Should.participatory.processes.be.ad.hoc..convened.only.for.specific.purpose..or.permanent..a.recurring.process.on.a.policy.area.."] <- "adhoc"
colnames(df)[colnames(df)=="Should.participatory.processes.be.authoritative..citizen.recommendations.should.be.carried.out..or.consultative..results.are.reviewed.by.council.decision.making.processes.."] <- "consultative"
colnames(df)[colnames(df)=="As.a.Councillor..do.you.feel.that.citizen.participation.activities.overlap.with.your.role.as.an.elected.representative."] <- "overlap"
colnames(df)[colnames(df)=="Has.your.Local.Authority..during.your.tenure.as.a.Councillor..ever.conducted.any.participatory.exercises..beyond.typical.consulations.."] <- "done_exercise"
colnames(df)[colnames(df)=="Demographically.balanced.participants.In.general..how.important.are.the.following.in.terms.of.the.legitimacy.or.validity.of.the.process...1.is.low..5.is.high."] <- "balanced"
colnames(df)[colnames(df)=="Number.of.people.participating.In.general..how.important.are.the.following.in.terms.of.the.legitimacy.or.validity.of.the.process...1.is.low..5.is.high."] <- "numbers"
colnames(df)[colnames(df)=="Independent.conveners.In.general..how.important.are.the.following.in.terms.of.the.legitimacy.or.validity.of.the.process...1.is.low..5.is.high."] <- "independent"
colnames(df)[colnames(df)=="Length.of.exercise.In.general..how.important.are.the.following.in.terms.of.the.legitimacy.or.validity.of.the.process...1.is.low..5.is.high."] <- "length"
colnames(df)[colnames(df)=="Transparency.of.process.In.general..how.important.are.the.following.in.terms.of.the.legitimacy.or.validity.of.the.process...1.is.low..5.is.high."] <- "transparency"
colnames(df)[colnames(df)=="Quality.of.discussion.In.general..how.important.are.the.following.in.terms.of.the.legitimacy.or.validity.of.the.process...1.is.low..5.is.high."] <- "quality"
colnames(df)[colnames(df)=="Would.you.be.supportive.of.a.participatory.process.if.initiated.by.the.current.leadership.of.the.council."] <- "current_leadership"
colnames(df)[colnames(df)=="Does.holding.participatory.exercises.balance.representation.problems.in.one.party.councils."] <- "one_party_council"
colnames(df)[colnames(df)=="In.the.event.of.a.citizen.participation.exercise.drawing.conclusions...making.recommendations.that.conflict.with.your.own.views.as.a.Councillor..which.would.you.give.more.weight.to."] <- "conflict"
colnames(df)[colnames(df)=="Should.a.citizen.participation.exercise.be.conducted.by.your.council..would.you.expect.to.discuss.the.results.as.part.of.your.regular.council.meeting."] <- "discuss_meeting"
df$conflict_perfer_own <- 0
df$conflict_perfer_own[df$conflict=="The view agreed by the citizens participating in the Participatory Democracy exercise"] <- 1
df$adhoc_scale <- 0
df$adhoc_scale[df$adhoc=="Ad hoc"] <- 3
df$adhoc_scale[df$adhoc=="Mixture / Ad hoc leaning"] <- 2
df$adhoc_scale[df$adhoc=="Mixture / Permanent leaning"] <- 1
df$adhoc_scale[df$adhoc=="Permanent"] <- 0
df$consult_scale <- 0
df$consult_scale[df$consultative=="Consultative"] <- 3
df$consult_scale[df$consultative=="Mixture / Consultative Leaning"] <- 2
df$consult_scale[df$consultative=="Mixture / Authoritative Leaning"] <- 1
df$consult_scale[df$consultative=="Authoritative"] <- 0
df$pmajority <- 0
df$pmajority[df$part_of_majority=="TRUE"] <- 1
df$phasmajority <- 0
df$phasmajority[df$council_has_majority=="TRUE"] <- 1
df$dexercise <- NA
df$dexercise[df$done_exercise=="No"] <- 0
df$dexercise[df$done_exercise=="Unsure"] <- 0
df$dexercise[df$done_exercise=="Yes"] <- 1
df$dmeeting <- 0
df$dmeeting[df$discuss_meeting=="Yes"] <- 1
df$cleadership <- 0
df$cleadership[df$current_leadership=="Yes"] <- 1
df$soverlap <- 0
df$soverlap[df$overlap=="Yes"] <- 1
df$one_party_scale <- 0
df$one_party_scale[df$one_party_council=="Yes"] <- 1
df$one_party_scale[df$one_party_council=="No"] <- -1
df$one_party_ok <- 0
df$one_party_ok[df$one_party_scale==0] <- 1
|
0f8141176d95573b2eeeed34b54d6b1eb7d2f4a7
|
01bce092104ec1b0ae0eaa7daf3aa03e18d3d56d
|
/R/read_smf.R
|
357afb3926cc3b7edb8c4f03edc7d021179a7815
|
[] |
no_license
|
niszet/rmusicwork
|
a557d13afdf82e16e60c3aac365cade956aa7449
|
de1e4c8a5b5ab27e45199e8ff14051dd30e18a21
|
refs/heads/master
| 2020-04-05T14:33:56.825678
| 2017-08-26T00:49:21
| 2017-08-26T00:49:21
| 94,679,935
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,392
|
r
|
read_smf.R
|
#' SMF read funcition
#'
#' @name read_smf
#' @param file input file path
#' @export
#'
read_smf <- function(file){
con <- file(file, "rb")
on.exit(close(con))
file_size <- file.info(file)[["size"]]
# smf_header <- data.frame(stringsAsFactors=FALSE)
# smf_header <- rbind(smf_header, c("fileSize", file_size), stringsAsFactors =FALSE)
# colnames(smf_header) <- c("item", "val")
# smf_header <- rbind(smf_header, read_header(con), stringsAsFactors=FALSE)
smf_header <- read_header(con)
# TODO the number of track is written in header. to clarify this point of view, it should be separate the each tracks.
# smf_data <- data.frame(stringsAsFactors=FALSE)
smf_data <- list()
while(file_size != seek(con, where=NA)){
tmp <- read_mtrk(con)
abs_time <- 0
# smf_track <- data.frame(stringsAsFactors=FALSE)
smf_track <- list()
smf_track$data <- data.frame(stringsAsFactors=FALSE)
if(all(is.na(tmp))){
stop("MTrk is needed")
}
# skipping to add MTrk
# smf_track <- rbind(smf_track, tmp, stringsAsFactors=FALSE)
# smf_data <- rbind(smf_data, tmp, stringsAsFactors=FALSE)
track_size <- read_track_data_size(con)
smf_track$size <- as.integer(track_size)
# skipping to add track_data_size
# smf_track <- rbind(smf_track, tmp, stringsAsFactors=FALSE)
# smf_data <- rbind(smf_data, tmp, stringsAsFactors=FALSE)
# track_end_point <- seek(con, where=NA) + as.integer(tmp[2])
track_end_point <- seek(con, where=NA) + as.integer(track_size)
# check the end of the track
while(seek(con, where=NA) < track_end_point){
tmp <- .read_dtime(con)
# smf_data <- rbind(smf_data, tmp, stringsAsFactors=FALSE)
abs_time <- abs_time + tmp
# smf_track <- rbind(smf_track, tmp, stringsAsFactors=FALSE)
tmp <- .read_ctrl(con)
tmp[[length(tmp)+1]] <- abs_time
# smf_data <- rbind(smf_data, tmp, stringsAsFactors=FALSE)
# smf_track <- rbind(smf_track, tmp, stringsAsFactors=FALSE)
smf_track$data <- rbind(smf_track$data, tmp, stringsAsFactors=FALSE)
}
# colnames(smf_track) <- c("item", "ch", "type", "val", "com", "abs_time")
colnames(smf_track$data) <- c("item", "ch", "type", "val", "com", "abs_time")
# smf_track[["abstime"]] <- 0
# smf_data[[length(smf_data)+1]]$data <- smf_track
smf_data[[length(smf_data)+1]] <- smf_track
}
# colnames(smf_data) <- c("item", "ch", "type", "val", "com")
#smf <- list("header"=smf_header, "data"=smf_data)
smf <- c( list("header" = smf_header),
list("tracks" = smf_data))
class(smf) <- "rsmf"
smf
# print("EOF")
}
#' This is internal function
#'
read_data_size <- function(con, an=1L, asize=4L, endian="big"){
tmp <- readBin(con, "integer", n=an, size=asize, endian = endian)
# list("data_size", tmp)
# c("data_size", tmp)
tmp
}
#' This is internal function
#'
read_mthd <- function(con){
# tmp <- readChar(con, 4L, useBytes=TRUE)
tmp <- readChar(con, 4L, useBytes=TRUE)
# list("MThd", tmp)
#c("MThd", tmp)
tmp
}
#' This is internal function
#'
read_format <- function(con){
#tmp <- readBin(con, "integer", n=1L, size=2L,endian = "big")
tmp <- readBin(con, "integer", n=1L, size=2L,endian = "big")
# list("format", tmp)
# c("format", tmp)
tmp
}
#' This is internal function
#'
read_track <- function(con){
# tmp <- readBin(con, "integer", n=1L, size=2L,endian = "big")
tmp <- readBin(con, "integer", n=1L, size=2L,endian = "big")
# list("track", tmp)
# c("track", tmp)
tmp
}
#' This is internal function
#'
read_time_unit<- function(con){
# tmp <- readBin(con, "integer", n=1L, size=2L,endian = "big")
tmp <- readBin(con, "integer", n=1L, size=2L,endian = "big")
# list("timeunit", tmp)
# c("timeunit", tmp)
tmp
}
#' This is internal function
read_header <- function(con){
# MThd
# tmp <- readChar(con, 4L, useBytes=TRUE)
# print(tmp)
head <- list()
# smf <- data.frame(stringsAsFactors=FALSE)
# smf <- rbind(smf, .read_mthd(con), stringsAsFactors=FALSE)
head$mthd <- read_mthd(con)
# colnames(smf) <- c("item", "val")
# Data size
# smf <- rbind(smf, .read_data_size(con), stringsAsFactors=FALSE)
head$data_size <- read_data_size(con)
# smf$data_size <- .read_data_size(con)
# Format
# smf <- rbind(smf, .read_format(con), stringsAsFactors=FALSE)
head$format <- read_format(con)
# smf$format <- .read_format(con)
# track
# smf <- rbind(smf, .read_track(con), stringsAsFactors=FALSE)
head$track <- read_track(con)
# smf$track <- .read_track(con)
# time unit
# smf <- rbind(smf, .read_time_unit(con), stringsAsFactors=FALSE)
head$time_unit <- read_time_unit(con)
# smf$time_unit <- .read_time_unit(con)
head
}
#' This is internal function
read_track_data_size <- function(con){
tds <- readBin(con, "integer", n=1L, size=4L,endian = "big")
tds
}
#' This is internal function
read_mtrk <- function(con){
tmp <- readChar(con, 4L, useBytes=TRUE)
if(tmp=="MTrk"){
return(c("MTrk", NA, NA, NA, NA, 0))
}
return(list(NA,NA))
}
#' This is internal function
.read_ctrl <- function(con){
tmp <- readBinInt(con)
tmpu <- bitops::bitShiftR(tmp, 4) #extract upper 4bits
tmpl <- bitops::bitAnd(tmp, 15) # extract lower 4bits
if(tmpu==8){
# 8n note off
chn <- tmpl
type <- readBinInt(con)
val <- readBinInt(con)
com <- "Note off"
# return(list("8", chn, type, val, "Note Off"))
return(list(tmp, chn, type, val, com))
}
if(tmpu==9){
# 9n note on
chn <- tmpl
type <- readBinInt(con)
val <- readBinInt(con)
com <- "Note On"
# print(paste0("9 :", chn, " ", type, " ", val))
# return(list(tmp, chn, type, val, "Note On"))
return(list(tmp, chn, type, val, com))
}
if(tmpu==10){
# An polyphonic key
chn <- tmpl
type <- readBinInt(con)
val <- readBinInt(con)
com <- "polyphonic key"
# return(list("A", chn, type, val, "polyphonic key"))
return(list(tmp, chn, type, val, com))
}
if(tmpu==11){
# Bn control change
# 4byte code under some condition
chn <- tmpl
type <- readBinInt(con)
val <- readBinInt(con)
com <- "control change"
# return(list("B", chn, type, val, "control change"))
return(list(tmp, chn, type, val, com))
}
if(tmpu==12){
# Cn program change
chn <- tmpl
type <- NA
val <- readBinInt(con)
com <- "program change"
# return(list("C", chn, NA, val, "program change"))
# return(list(tmp, chn, NA, val, "program change"))
return(list(tmp, chn, type, val, com))
}
if(tmpu==13){
# Dn channel pressure
chn <- tmpl
type <- NA
val <- readBinInt(con)
com <- "channel pressure"
# return(list("D", chn, NA, val, "channel pressure"))
# return(list(tmp, chn, NA, val, "channel pressure"))
return(list(tmp, chn, type, val, com))
}
if(tmpu==14){
# En pitch bend
chn <- tmpl
type <- NA
mm <- readBinInt(con)
ll <- readBinInt(con)
# val <- mm*128+ll
val <- ll*128+mm # little endian
com <- "pitch bend"
# return(list("E", chn, NA, val, "pitch bend"))
return(list(tmp, chn, type, val, com))
}
if(tmpu==15){
if(tmpl==15){
# FF commands
meta_event <- readBinInt(con)
d_len <- readBinInt(con)
me_data <- readBinInt(con, n=d_len)
# sequenceNumber
if(meta_event==0){
# print(paste0("FF ", meta_event, " " , d_len, " " , intToUtf8(me_data)))
# return(list("FF", meta_event, d_len, intToUtf8(me_data), "Sequence Number"))
com <- "Sequence Number"
return(list(tmp, meta_event, d_len, intToUtf8(me_data), com))
}
# text
if(meta_event==1){
# print(paste0("FF ", meta_event, " " , d_len, " " , intToUtf8(me_data)))
# return(list("FF", meta_event, d_len, intToUtf8(me_data), "Text"))
com <- "Text"
return(list(tmp, meta_event, d_len, intToUtf8(me_data), com))
}
# copyright
if(meta_event==2){
# print(paste0("FF ", meta_event, " " , d_len, " " , intToUtf8(me_data)))
# return(list("FF", meta_event, d_len, intToUtf8(me_data), "copy right"))
com <- "copy right"
return(list(tmp, meta_event, d_len, intToUtf8(me_data), com))
}
# sequenceName
if(meta_event==03){
# print(paste0("FF ", meta_event, " " , d_len, " " , intToUtf8(me_data)))
# return(list("FF", meta_event, d_len, intToUtf8(me_data), "Sequence Name"))
com <- "Sequence Name"
return(list(tmp, meta_event, d_len, intToUtf8(me_data), com))
}
# instruments name
if(meta_event==04){
# print(paste0("FF ", meta_event, " " , d_len, " " , intToUtf8(me_data)))
# return(list("FF", meta_event, d_len, intToUtf8(me_data), "Instruments Name"))
com <- "Instruments Name"
return(list(tmp, meta_event, d_len, intToUtf8(me_data), com))
}
# deviceName
if(meta_event==09){
# print(paste0("FF ", meta_event, " " , d_len, " " , intToUtf8(me_data)))
# return(list("FF", meta_event, d_len, intToUtf8(me_data), "Device Name"))
com <- "Device Name"
return(list(tmp, meta_event, d_len, intToUtf8(me_data), com))
}
# SMPTE offset
if(meta_event==84){
# print(paste0("FF ", meta_event, " " , d_len, " " , mbyte_to_int_big(me_data)))
# return(list("FF", meta_event, d_len, mbyte_to_int_big(me_data), "SMPTE offset"))
com <- "SMPTE offset"
return(list(tmp, meta_event, d_len, mbyte_to_int_big(me_data), com))
}
# haku
if(meta_event==88){
# return(list("FF", meta_event, d_len, me_data))
# add_beat
# nn dd cc bb
# beat <- as.list(me_data)
# names(beat) <- c("numerator", "denominator", "metro", "num32")
# rsmf$beat <- beat
# return(rsmf)
# return(list("FF", meta_event, d_len, mbyte_to_int_big(me_data), "haku"))
com <- "haku"
return(list(tmp, meta_event, d_len, mbyte_to_int_big(me_data), com))
}
# coard
if(meta_event==89){
# print(me_data[1]) # number of # as positive integer, b as negative int.
# print(me_data[2]) # 0 is majar, 1 is minar
# return(list("FF", meta_event, d_len, me_data))
# return(list("FF", meta_event, d_len, mbyte_to_int_big(me_data), "coard"))
com <- "coard"
return(list(tmp, meta_event, d_len, mbyte_to_int_big(me_data), com))
}
# tempo
if(meta_event==81){
# changed from little
# return(list("FF", meta_event, d_len, mbyte_to_int_lit(me_data), "tempo"))
com <- "tempo"
return(list(tmp, meta_event, d_len, mbyte_to_int_big(me_data), com))
}
# track end
if(meta_event==47){
# print("->track end")
# return(list("FF", meta_event, d_len, NA, "track_end"))
com <- "track_end"
return(list(tmp, meta_event, d_len, NA, com))
}
warning("unmatched FF")
com <- "track_end"
return(list(tmp, meta_event, d_len, NA, com))
}
warning("unmatched F*")
#return(list("F*", NA, NA, NA, "F*"))
return(list(tmp, NA, NA, NA, "F*"))
}
warning(paste0("unmatched ** ", tmp))
# return(list("**", NA, NA, NA, NA))
return(list(tmp, NA, NA, NA, NA))
}
mbyte_to_int_lit <- function(vec){
sum(256**seq(0, length(vec)-1) * vec)
}
mbyte_to_int_big <- function(vec){
sum(256**seq(length(vec)-1, 0, by=-1) * vec)
}
#' This is internal function
.read_dtime <- function(con){
stmp <- 0
tmp <- readBinInt(con)
# delta time is defined as "signed" when the value is over 127
while(tmp>127){
tmp <- tmp - 128
stmp <- (stmp + tmp)*128
tmp <- readBinInt(con)
}
stmp <- stmp + tmp
stmp
#return(list("Delta Time", stmp, NA, NA, NA))
}
readBinInt <- function(con, what="integer", n=1L, size=1L, endian="big", signed=FALSE){
readBin(con, what=what, n=n, size=size, endian=endian, signed=signed)
}
|
a6ff8a35bcec2e8db78befc88a79a255af687034
|
d6dc738b26970938ab2311983d5f0cb32fb4c277
|
/saymyname/tests/testthat/test-saymyname.R
|
9099525f6c6b6ddd59c828960237802b2840bdfc
|
[] |
no_license
|
CavinWard/My_Name_Is
|
26d7a50522107bd28f8ae11c820a5f9102139f1d
|
b7f1217c90b656afad66c321277353cfa67c41e1
|
refs/heads/master
| 2021-05-04T08:37:44.226606
| 2016-10-11T18:03:29
| 2016-10-11T18:16:51
| 70,406,930
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 423
|
r
|
test-saymyname.R
|
context("say my name")
name <- "Fred"
test_that("name correct", {
expect_equal(saymyname(test_name="John",my_name="John"), "My name is John")
})
test_that("integer: name is what?", {
expect_equal(saymyname(1), "What?")
})
test_that("NULL: name is what?", {
expect_equal(saymyname(NULL), "What?")
})
test_that("wrong name?", {
expect_equal(saymyname(test_name="John", my_name="Cavin"), "Who?")
})
|
be5c4d970ab2f0264ec8753af3c437a35775e69a
|
2c8c644d446e9ed0fb885ba28cf793b35ee4257a
|
/man/draw_legend.Rd
|
6526389f1294939acc9a9c19d35c54ea140ba67b
|
[] |
no_license
|
EngeLab/CIMseq
|
7a195fd6c400740ff30d417f223284bba9a09981
|
65adabfc662491a9e00c39ad8a443b9da95760ca
|
refs/heads/master
| 2023-04-17T05:02:11.676987
| 2022-01-20T12:24:05
| 2022-01-20T12:24:05
| 59,555,300
| 9
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 308
|
rd
|
draw_legend.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotExtras.R
\name{draw_legend}
\alias{draw_legend}
\title{draw_legend}
\usage{
draw_legend(l)
}
\arguments{
\item{l}{A grob containing a legend.}
}
\description{
Helper function for plotSwarmCircos.
}
\author{
Jason T. Serviss
}
|
5dab97e7c5f1229c732de6fd46d9bc7aae311598
|
308d107fd0cfffb6f13b9101f77bb6ed2f3fe9ae
|
/01 - Environmental data/02 - SoilVeg/Soil and fecundity/01 - Analysis.soil.R
|
33403d459e7491073f884949d040bfba80661e53
|
[] |
no_license
|
MarcoAndrello/Stoch_Demogr_Comp_Arabis
|
08a5a241c76550aed1e70fb2aecd2b56d4724fba
|
d327e434e3a7634f28f7efa4acc27de7e4f2f25d
|
refs/heads/master
| 2020-08-26T18:22:08.247883
| 2020-02-18T10:23:11
| 2020-02-18T10:23:11
| 217,101,255
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,998
|
r
|
01 - Analysis.soil.R
|
rm(list=ls())
library(MCMCglmm)
dataCN <- read.csv("CN.csv",h=T,sep=",")
# Nitrogen
hist(dataCN$N)
summary(dataCN$N)
m1 <- MCMCglmm(N ~ 1,data=dataCN,random= ~site + SiteQuad,
nitt=101000, thin=50, burnin=1000,verbose=F)
summary(m1)
autocorr.diag(m1$VCV)
plot(m1$VCV)
vSite <- m1$VCV[, "site"]/(m1$VCV[, "SiteQuad"] + m1$VCV[, "site"] + m1$VCV[, "units"])
mean(vSite)
vSiteQuad <- m1$VCV[, "SiteQuad"]/(m1$VCV[, "SiteQuad"] + m1$VCV[, "site"] + m1$VCV[, "units"])
mean(vSiteQuad)
vUnits <- m1$VCV[, "units"]/(m1$VCV[, "SiteQuad"] + m1$VCV[, "site"] + m1$VCV[, "units"])
mean(vUnits)
# Carbon
hist(dataCN$C)
summary(dataCN$C)
m1 <- MCMCglmm(C ~ 1,data=dataCN,random= ~site + SiteQuad,
nitt=101000, thin=50, burnin=1000,verbose=F)
summary(m1)
autocorr.diag(m1$VCV)
plot(m1$VCV)
vSite <- m1$VCV[, "site"]/(m1$VCV[, "SiteQuad"] + m1$VCV[, "site"] + m1$VCV[, "units"])
mean(vSite)
vSiteQuad <- m1$VCV[, "SiteQuad"]/(m1$VCV[, "SiteQuad"] + m1$VCV[, "site"] + m1$VCV[, "units"])
mean(vSiteQuad)
vUnits <- m1$VCV[, "units"]/(m1$VCV[, "SiteQuad"] + m1$VCV[, "site"] + m1$VCV[, "units"])
mean(vUnits)
cor.test(dataCN$C,dataCN$N,method="sp")
# pH
datapH <- read.csv("pH.csv",h=T,sep=",")
hist(datapH$pH)
summary(datapH$pH)
m1 <- MCMCglmm(pH ~ 1,data=datapH,random= ~Site + SiteQuad,
nitt=101000, thin=50, burnin=1000,verbose=F)
autocorr.diag(m1$VCV)
plot(m1$VCV)
summary(m1)
vSite <- m1$VCV[, "Site"]/(m1$VCV[, "SiteQuad"] + m1$VCV[, "Site"] + m1$VCV[, "units"])
mean(vSite)
vSiteQuad <- m1$VCV[, "SiteQuad"]/(m1$VCV[, "SiteQuad"] + m1$VCV[, "Site"] + m1$VCV[, "units"])
mean(vSiteQuad)
vUnits <- m1$VCV[, "units"]/(m1$VCV[, "SiteQuad"] + m1$VCV[, "Site"] + m1$VCV[, "units"])
mean(vUnits)
# Boxplots
par(mfrow=c(2,2))
boxplot(C~site,dataCN,main="Soil: carbon content")
boxplot(N~site,dataCN,main="Soil: nitrogen content")
boxplot(C/N~site,dataCN,main="Soil: carbon/nitrogen ratio")
boxplot(pH~site,datapH,ylim=c(4,8),ylab="pH",main="Soil acidity")
|
0a29654f2d0c9617c98cba6467c5077f5ff50104
|
b4eb6cc4124477bd884e2558bd6f9ac63aaab531
|
/gDNA_correction/test_imbalance.R
|
e1d6cf0ebbc5e4d3a9c83478be45b84a09990c6d
|
[] |
no_license
|
dagousket/cisreg
|
8a0e2af3be81216858a560ca163aa039fbfb61fb
|
ec351d0734a734442e71107ea0f7191ef0e24591
|
refs/heads/master
| 2021-05-24T10:17:18.807140
| 2020-04-10T17:54:25
| 2020-04-10T17:54:25
| 253,514,707
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,043
|
r
|
test_imbalance.R
|
#!/usr/bin/env Rscript
library("optparse")
library("tools")
library("reshape2")
option_list = list(
make_option(c("-i", "--input"), type="character", default=NULL,
help="list of count files to use in a txt file", metavar="character"),
make_option(c("-o", "--out"), type="character", default="binom_test",
help="file prefix to store the output as a tab-delimited file (2 files per sample) [default= %default]", metavar="character"),
make_option(c("-x", "--chrx"), type="character", default = NULL,
help="list of features in input files on chrX", metavar="character"),
make_option(c("--chrx_treshold"), type="integer", default = 10,
help="Minimum total coverage required for feature on chrx", metavar="character"),
make_option(c("--autosome_treshold"), type="integer", default = 10,
help="Minimum total coverage required for feature on autosomes", metavar="character"),
make_option(c("-n", "--null_hypothesis_mean"), type="character",
help="Table of mean value of ASE under the H0 hypothesis", metavar="character")
);
opt_parser = OptionParser(option_list=option_list, description = "An R script to perform a binomial test on count file and extract the imbalanced features.");
opt = parse_args(opt_parser);
##------------------------- Part 1 : Run binomial test
# Step 1 : set up environment and general variable you might want to modify
files = read.delim(opt$input,head=F, colClasses = "character")[,1]
h0_means = read.table(file = opt$null_hypothesis_mean, head = TRUE)
chrXFeat = read.delim(opt$chrx ,head=F, colClasses = "character")[,1]
# Step 2 : construct dataframe and apply binom.test (Warning : different test for chrX and autosomes SNPs)
for (i in seq(length(files),1)){
print(paste('Processing',files[i]))
df<-data.frame(read.table(files[i],head=TRUE, fill = TRUE, colClasses = c('character','numeric','numeric'))[,1:3])
# Compute ASE and keep coverage information
df$coverage = df[,2] + df[,3]
df$mASE = df[,2]/(df[,3] + df[,2]) # Assume virginizer is the second column
df = df[!is.na(df$mASE),]
# Load H0 hyposthesis means
mean_ase_autosomes = h0_means$mean_autosome[h0_means$sample == files[i]]
mean_ase_chrx = h0_means$mean_chrx[h0_means$sample == files[i]]
df$chr[df$feature %in% chrXFeat] = as.numeric(mean_ase_chrx)
df$chr[!(df$feature %in% chrXFeat)] = as.numeric(mean_ase_autosomes)
print("Header of the file :")
print(head(df))
raw_pvals = apply(as.matrix(df[,-1]),1,function(c) binom.test(x = as.numeric(c[1]), n = as.numeric(c[1])+as.numeric(c[2]), p = as.numeric(c[5]), alternative = 'two.sided')$p.value)
df$pval = raw_pvals
df$adj_pval = p.adjust(as.numeric(raw_pvals), method = "fdr")
write.table(df, file = paste(opt$out,file_path_sans_ext(basename(files[i])),'_binom_test.tab',sep = ''), quote = FALSE, sep = "\t", row.names = FALSE)
##------------------------- Part 2 : Save the list of imbalanced SNPs
# Step 1 : find the SNP files and extract imbalanced and balanced SNP names
dfi = df[(df$coverage >= opt$chrx_treshold & df[,1] %in% chrXFeat) | (df$coverage >= opt$autosome_treshold & !(df[,1] %in% chrXFeat)),]
dfi = dfi[dfi$adj_pval <= 0.05,]
the_snps = as.character(dfi$feature[!is.na(dfi$adj_pval)])
write.table(the_snps, file = paste(opt$out,file_path_sans_ext(basename(files[i])),'_imbalanced_SNP_list.tab',sep = ''), quote = FALSE, row.names = FALSE, col.names = FALSE)
dfb = df[(df$coverage >= opt$chrx_treshold & df[,1] %in% chrXFeat) | (df$coverage >= opt$autosome_treshold & !(df[,1] %in% chrXFeat)),]
dfb = dfb[dfb$adj_pval > 0.05,]
the_snps = as.character(dfb$feature[!is.na(dfb$adj_pval)])
write.table(the_snps, file = paste(opt$out,file_path_sans_ext(basename(files[i])),'_balanced_SNP_list.tab',sep = ''), quote = FALSE, row.names = FALSE, col.names = FALSE)
}
# Tip : the python script 'remove_snp_from_list' wonderfully takes care of the next step, i.e. using this file to create a filtered SNP file for Peaks/Genes/... without the imbalanced SNPs
q('no')
|
79dfbcde066598065f28acb71e7cd8aa4fcda766
|
531086b7ef1d45aea9f7d7de065891a39ebd5c35
|
/jhu/regmod/manipulate.R
|
f76bc11d45e593213f9cbfa3d985fa8ea97bf596
|
[] |
no_license
|
githubfun/coursera
|
5d06463b646bf1ca4b4b6aac307945e93f34eada
|
209361035ac384eb4a9e31174fe3a26c92754ec8
|
refs/heads/master
| 2017-05-29T00:11:21.768914
| 2016-02-23T21:53:04
| 2016-02-23T21:53:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 310
|
r
|
manipulate.R
|
library(ggplot2)
library(manipulate)
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
y <- c(1.39, 0.72, 1.55, 0.48, 1.19, -1.59, 1.23, -0.65, 1.49, 0.05)
myHist <- function(beta){
lse <- sum((y-beta*x)^2)
plot(x, y, main=lse)
}
manipulate(myHist(beta), beta=slider(0, 2, step=0.1))
|
8ca78726944faf79b74cedc11e6a09f8e0531b70
|
c4ee384cbb2a7071832a87c27e19bdb9f1662d66
|
/alessandra_eda.R
|
62bf19ddca21b0df197ecc9819dd5d996ca1918f
|
[] |
no_license
|
naterowan00/cloth_filter
|
eb836ec1b0e22a910bd7ecadf26d8627dfe94843
|
e57eee69027d15172118dac80c50f597454cd4a1
|
refs/heads/master
| 2022-10-07T12:10:38.467989
| 2020-06-10T05:46:23
| 2020-06-10T05:46:23
| 269,202,652
| 2
| 1
| null | 2020-06-10T05:46:24
| 2020-06-03T21:55:41
|
Rich Text Format
|
UTF-8
|
R
| false
| false
| 1,893
|
r
|
alessandra_eda.R
|
#######################
##set working directory
#######################
setwd("/Users/alessandrarodriguez/Desktop/AQUACLOTH")
################
##load libraries
################
library("tidyverse"); theme_set(theme_minimal())
theme_update(panel.grid.minor = element_blank())
library(xts)
library(tidyverse)
##############
##loading data
##############
clar_eff <- read.csv(file = "clarifier_effluent.csv")
clar_inf <- read.csv(file = "clarifier_influent.csv")
filter_eff <- read.csv(file = "filter_effluent.csv")
filter_inf <- read.csv(file = "filter_influent.csv")
clar_eff <- clar_eff[,-1]
clar_inf <- clar_inf[,-1]
filter_eff <- filter_eff[,-1]
filter_inf <- filter_inf[,-1]
clar_eff$date<- as_date(clar_eff$date)
clar_inf$date<- as_date(clar_inf$date)
filter_eff$date<- as_date(filter_eff$date)
filter_inf$date<- as_date(filter_inf$date)
######################
##spreading parameters
######################
##clar_eff##
unique(clar_eff$parameter)
clar_eff$time <- as.character(clar_eff$time)
clar_eff$value <- as.numeric(clar_eff$value)
clar_eff<- clar_eff %>%
spread(., parameter, value)
##clar_inf##
unique(clar_inf$parameter)
clar_inf$time <- as.character(clar_inf$time)
clar_inf$value <- as.numeric(clar_inf$value)
clar_inf<- clar_inf %>%
spread(., parameter, value)
##filter_eff##
unique(filter_eff$parameter)
filter_eff$time <- as.character(filter_eff$time)
filter_eff$value <- as.numeric(filter_eff$value)
filter_eff<- filter_eff %>%
group_by_at(vars(-value)) %>%
mutate(row_id=1:n()) %>%
ungroup() %>%
spread(., parameter, value) %>%
select(-row_id)
##filter_inf##
unique(filter_inf$parameter)
filter_inf$time <- as.character(filter_inf$time)
filter_inf$value <- as.numeric(filter_inf$value)
filter_inf<- filter_inf %>%
group_by_at(vars(-value)) %>%
mutate(row_id=1:n()) %>%
ungroup() %>%
spread(., parameter, value) %>%
select(-row_id)
|
a64a868150b56c29da79b176926ac8b424ed4f6b
|
d3dd96dc9a8d6ee708ef3430b384cf633d09bf32
|
/tests/test_rfci.R
|
cd78b72f2a6c4e3e75f1c8b9257992ab477158d3
|
[] |
no_license
|
cran/pcalg
|
3f0609f316139a29cae839cd798251e92b96d5ee
|
032fd6d1c51579a784f893a4c4838b0381dc9830
|
refs/heads/master
| 2023-01-06T08:18:03.758296
| 2022-12-20T23:20:05
| 2022-12-20T23:20:05
| 17,723,261
| 27
| 25
| null | 2021-12-15T22:20:59
| 2014-03-13T19:30:38
|
R
|
UTF-8
|
R
| false
| false
| 4,992
|
r
|
test_rfci.R
|
library(pcalg)
doExtras <- pcalg:::doExtras()
source(system.file(package="Matrix", "test-tools-1.R", mustWork=TRUE))
##--> showProc.time(), assertError(), relErrV(), ...
R.home(); sessionInfo() # helping package maintainers to debug ...
.libPaths()
packageDescription("pcalg")
packageDescription("Matrix")
## load the functions for the simulations of this paper
## source("/u/colombo/Diss/RAusw/First_paper_RFCI/functions_for_the_simulations.R")
## RFCI improves the output
##______________________________________________
## Input: L1=1; L2=2; X1=6; X2=4; X3=3; X4=5; X5=7; X6=8
## Output: X1=4; X2=2; X3=1; X4=3; X5=5; X6=6
amat <- rbind(0,# 2 3 4 5 6 7 8
c(0,0,0,0,1,1,0,0),
c(0,1,0,1,0,1,0,0),
c(1,0,0,0,0,1,0,0),
c(0,0,0,0,0,1,0,0),
c(1,1,0,0,0,0,0,0),
c(0,0,0,1,1,0,0,0),
0)
colnames(amat) <- rownames(amat) <- as.character(1:8)
Matrix::Matrix(amat) # to "visualize"
L <- c(1,2)
V <- as.character(1:8)
edL <- setNames(vector("list", length=length(V)), V)
edL[[6]] <- list(edges=NULL, weights=NULL)
edL[[8]] <- list(edges=NULL, weights=NULL)
edL[[4]] <- list(edges=c(7,8), weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[3]] <- list(edges=c(4,5,8), weights=c(abs(rnorm(1)),abs(rnorm(1)),abs(rnorm(1))))
edL[[5]] <- list(edges=c(6,8), weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[7]] <- list(edges= 8, weights=abs(rnorm(1)))
edL[[1]] <- list(edges=c(4,6), weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[2]] <- list(edges=c(5,7), weights=c(abs(rnorm(1)),abs(rnorm(1))))
g <- new("graphNEL", nodes=V, edgeL=edL, edgemode="directed")
if(dev.interactive())
plot(g)
## Compute the true covariance matrix of g
cov.mat <- trueCov(g)
## Delete rows and columns which belong to L
true.cov <- cov.mat[-L,-L]
## Transform it in a correlation matrix
true.corr <- cov2cor(true.cov)
suffStat <- list(C=true.corr, n=10^9)
showSys.time(pop.fci1 <-
fci(suffStat, gaussCItest, labels=V[-L],
alpha=0.9999, doPdsep=TRUE,verbose=FALSE)@amat)
showSys.time(pop.rfci1 <-
rfci(suffStat, gaussCItest, labels=V[-L],
alpha=0.9999, verbose=FALSE)@amat)
if (any(pop.fci1 != pop.rfci1)) {
stop("Test of RFCI wrong: small example!")
}
## if (doExtras) {
if (FALSE) {
## Thomas' example (version number 8) about discriminating path orientation rule
V <- as.character(1:25)
edL <- setNames(vector("list", length=length(V)), V)
edL[[ 1]] <- list(edges=c(14,18),weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[ 2]] <- list(edges=c(16,18),weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[ 3]] <- list(edges=c(16,24),weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[ 4]] <- list(edges=c(18,24),weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[ 5]] <- list(edges=c(15,25),weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[ 6]] <- list(edges=c(17,19),weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[ 7]] <- list(edges=c(14,19),weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[ 8]] <- list(edges=c(14,24),weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[ 9]] <- list(edges=c(19,20),weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[10]] <- list(edges=c(20,25),weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[11]] <- list(edges=c(23,25),weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[12]] <- list(edges=c(22,24),weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[13]] <- list(edges=c(21,23),weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[14]] <- list(edges=NULL, weights=NULL)
edL[[15]] <- list(edges=c(16,17,24),weights=c(abs(rnorm(1)),abs(rnorm(1)),abs(rnorm(1))))
edL[[16]] <- list(edges=c(19,25), weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[17]] <- list(edges=c(18,24,25),weights=c(abs(rnorm(1)),abs(rnorm(1)),abs(rnorm(1))))
edL[[18]] <- list(edges=c(21,25), weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[19]] <- list(edges=c(23,24,25),weights=c(abs(rnorm(1)),abs(rnorm(1)),abs(rnorm(1))))
edL[[20]] <- list(edges= 24, weights=abs(rnorm(1)))
edL[[21]] <- list(edges=c(22,25), weights=c(abs(rnorm(1)),abs(rnorm(1))))
edL[[22]] <- list(edges= 25, weights=abs(rnorm(1)))
edL[[23]] <- list(edges= 24, weights=abs(rnorm(1)))
edL[[24]] <- list(edges=NULL,weights=NULL)
edL[[25]] <- list(edges=NULL,weights=NULL)
(g <- new("graphNEL", nodes=V, edgeL=edL,edgemode="directed"))
if(dev.interactive())
plot(g)
## Latent variables (all having no parents):
L <- c(1:13)
## Compute the true covariance matrix of g
cov.mat <- trueCov(g)
## Delete rows and columns which belong to L
true.cov <- cov.mat[-L,-L]
## Transform it in a correlation matrix
true.corr <- cov2cor(true.cov)
suffStat <- list(C=true.corr, n=10^9)
p.tr <- dim(true.corr)[1]
showSys.time(pop.fci2 <- fci(suffStat, gaussCItest, p=p.tr,
alpha=0.9999, doPdsep=TRUE)@amat)
showSys.time(pop.rfci2 <- rfci(suffStat, gaussCItest, p=p.tr,
alpha=0.9999)@amat)
if (any(pop.fci2 != pop.rfci2)) {
stop("Test of RFCI wrong: big example!")
}
}
|
1ff80344021a33dcb5c2eb0775162c3b5626e516
|
a36271f5008e6178473337db948649a7fdce2027
|
/R/tex2rmd.r
|
33f34d56997e2035a200dc01bfbe1f6140b8b2e2
|
[] |
no_license
|
sctyner/tex2rmd
|
655638b6b1bfea7f40b763b759b2b33e984d1dc3
|
085ce4f3fbce1124518d60cc3a3fc6c2f0e3f5d7
|
refs/heads/master
| 2020-05-23T07:18:36.368874
| 2018-03-01T20:52:07
| 2018-03-01T20:52:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,057
|
r
|
tex2rmd.r
|
#' @title tex2rmd
#'
#' @description Converts a raw LaTex file to RMarkdown format, then to Word
#' format. Can optionally convert to any format supported by RMarkdown
#'
#' @param infile Full path to the input Latex file.
#'
#' @return The markdown code is written to a file named <root>.Rmd,
#' where \code{inFile} is <root>.tex. The markdown code in the
#' form of a vector, one line per element, is invisably returned.
#'
#' @details
#' The general workflow to convert a Latex document into Markdown,
#' and eventually Word, is as follows:
#' \enumerate{
#' \item Compile the latex document, using pdftex or whatever,
#' so that the .aux and .bbl files are generated.
#' \item Run \code{tex3rmd} specifying the file containing
#' the raw Latex code as the input. The associated .aux
#' and .bbl files must be in the same directory. This will
#' generate a markdown document of the same root name but
#' with .Rmd extension.
#' \item Compile the .Rmd file. In RStudio, open it and hit <ctrl>-<shift>-K.
#' }
#'
#' @author Trent McDonald
#'
#' @examples
#' \notrun{
#' tex2rmd("main.tex")
#' }
#' @export
tex2rmd <- function(infile){
# Images can also be included using either raw HTML with img
# tags (<img src = "" />) or using markdown directly ().
#
# For differencing text files, try online tools or suggestions here:
#https://stackoverflow.com/questions/4078933/find-difference-between-two-text-files-with-one-item-per-line
fileRoot <- sub("\\..+$","",infile)
tex <- readLines(infile)
# ---- Put in place holders for legit percent signs
tex <- gsub("\\\\%","##",tex)
# remove any comments
tex <- sub("\\%.+$","",tex)
# ---- Restore legit percent signs
tex <- gsub("\\#\\#","%",tex)
# ---- Remove Latex double quotes
tex <- gsub("``","'",tex)
tex <- gsub("''","'",tex)
# Remove header
headPos <- grep("\\\\begin\\{document\\}",tex)
header <- tex[1:headPos]
tex <- tex[(headPos+1):length(tex)]
# extract title
title <- returnCommandArg(header, "title")
if(nchar(title) == 0){
title <- paste("Contents of", fileRoot)
} else {
title <- convertTexTag(title, "textbf", "")
}
# extract author(s)
auth <- returnCommandArg(header, "author")
if(nchar(auth) == 0){
auth <- Sys.info()["user"]
}
# if there's a comma between authors, replace last with ", and"
commas <- gregexpr(",", auth)[[1]]
if( commas[1] > 0 ){
if(length(commas) > 1){
harvardComma <- ","
} else {
harvardComma <- ""
}
lastComma <- commas[length(commas)]
auth <- paste0( substring(auth,1,lastComma-1), harvardComma,
" and", substring(auth,lastComma+1))
}
# Remove any textbf from author string
auth <- convertTexTag(auth, "textbf", "")
# extract date
dt <- returnCommandArg(header, "date")
if(nchar(dt) == 0){
dt <- format(Sys.time(), "%d-%b-%Y")
}
# ---- Remove maketitle
tex <- sub("\\\\maketitle","",tex)
# ---- Remove end{document}
tex <- sub("\\\\end\\{document\\}","",tex)
# ---- Keywords
keyw <- tex[grep("\\\\keywords\\{",tex)]
if(length(keyw) > 0){
tmp3 <- regexpr("\\{.+\\}",keyw)
keyw <- substring(keyw,tmp3+1, tmp3 + attr(tmp3,"match.length") - 2)
tex <- sub("\\\\keywords\\{.+\\}", "", tex)
}
# ---- Fix up Abstract
begline <- grep("\\\\begin\\{abstract\\}",tex)
if( length(begline) >0){
endline <- grep("\\\\end\\{abstract\\}",tex)
abst <- paste(tex[begline:endline], collapse=" ")
tmp3 <- regexpr("\\\\begin\\{abstract\\}.+\\\\end\\{abstract\\}", abst)
abst <- substring(abst,tmp3+16, tmp3 + 16 + attr(tmp3,"match.length") - (16+15))
abst <- paste("**Abstract:**", abst)
tex[begline] <- abst
tex <- tex[-((begline+1):endline)]
}
if(length(keyw) > 0){
tex <- c(tex[1:begline], " ", paste("*Keywords:*", keyw)," ", tex[(begline+1):length(tex)])
}
# ---- Fix up bibliography and citations
# Do this here so that textbf and texttt get changed below.
tex <- processBibliography(tex, fileRoot)
# ---- Sections
# Sections must be on a line by themselves. Can't have "\section{A} more text"
seclines <- grep("\\\\section\\*?\\{", tex)
secs <- tex[seclines]
secs <- sub("\\\\section\\*?\\{","",secs)
secs <- sub("\\}","", secs)
tex[seclines] <- paste("#", secs)
# ---- SubSections
# Subsections must be on a line by themselves.
seclines <- grep("\\\\subsection\\*?\\{", tex)
secs <- tex[seclines]
secs <- sub("\\\\subsection\\*?\\{","",secs)
secs <- sub("\\}","", secs)
tex[seclines] <- paste("##", secs)
# ---- SubSubSections
# Must be on a line by themselves.
seclines <- grep("\\\\subsubsection\\*?\\{", tex)
secs <- tex[seclines]
secs <- sub("\\\\subsubsection\\*?\\{","",secs)
secs <- sub("\\}","", secs)
tex[seclines] <- paste("###", secs)
# ---- Texttt
tex <- convertTexTag(tex, "texttt", "`")
# ---- Textit
tex <- convertTexTag(tex, "textit", "*")
# ---- Textbf
tex <- convertTexTag(tex, "textbf", "**")
# ---- Process tables
tex <- processTables(tex)
# ---- Process Figures
tex <- processFigures(tex)
# ---- Process display equations
tex <- processDisplayEqns(tex)
# ---- Process crossRefs
tex <- processCrossRefs(tex, fileRoot)
# ---- Process labels
# Just need to remove labels. All Table and Fig labels are taken care
# of, and other labels should just be deleted.
tex <- gsub("\\\\label\\{[^\\}]+\\}", "", tex)
# ---- Process List environments
tex <- processLists(tex)
# ---- Remove double blanks at end of lines
tex <- processNewLines(tex)
# ---- add header info to tex lines
header <- c("---",
paste0('title: "',title,'"'),
paste0('author: "',auth,'"'),
paste0('date: "',dt,'"'),
"output: word_document",
"---")
tex <- c(header, tex)
# Make outfile name
outfile <- paste0(sub("\\..+$","",infile), ".Rmd")
# write out all modified text
writeLines(tex, outfile)
invisible(tex)
}
|
ec99b1185331327691242a512379a6086b92d04d
|
032f396221e412ae04fb013e15fc310a17cc3e68
|
/climate/CWD_Hist.R
|
de57ef2df6719b3b56eb9381a65d138d5066cbec
|
[] |
no_license
|
tvpenha/sismoi
|
6a2f7fde2106c45f256a44cef158aa790f98a41f
|
1e6267b74faf7daf6f0de064c59cf230f945714e
|
refs/heads/master
| 2020-04-20T21:34:15.199323
| 2019-03-20T18:25:27
| 2019-03-20T18:25:27
| 169,112,900
| 0
| 0
| null | 2019-02-04T16:51:30
| 2019-02-04T16:51:29
| null |
ISO-8859-1
|
R
| false
| false
| 15,460
|
r
|
CWD_Hist.R
|
require(ncdf4)
require(ncdf4.helpers)
require(ncdf4.tools)
require(ggplot2)
require(raster)
require(rgdal)
require(spatial.tools)
################################################################################
setwd("C:/Users/inpe-eba/SISMOI/CWD/Historical")
# Abrir shapefile
brasil = readOGR("C:/Users/inpe-eba/SISMOI/Shapefiles/Brasil.shp")
grid = readOGR("C:/Users/inpe-eba/SISMOI/Shapefiles/Grid.shp")
# abrir um arquivo netCDF file
CWD_1 <- nc_open("CWDETCCDI_yr_ACCESS1-0_historical_r1i1p1_1850-2005.nc")
print(CWD_1)
# tempo
CWD_time <- nc.get.time.series(CWD_1, v="CWDETCCDI",
time.dim.name = "time")
head(CWD_time)
tail(CWD_time)
# get time
time <- ncvar_get(CWD_1, "time")
time <- as.vector(time)
tunits <- ncatt_get(CWD_1,"time","units")
nt <- dim(time)
# CWD analise
CWD <- ncvar_get(CWD_1, "CWDETCCDI")
head(CWD)
tail(CWD)
#Modelo ACCESS1
# transforma o NetCDF em Raster
CWD1 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_ACCESS1-0_historical_r1i1p1_1850-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD1 = rotate(CWD1)
#recorte espacial da área de estudo
#CWD1_mask = crop(CWD1, brasil)
#recorte temporal no dado
CWD1_slice = subset(CWD1, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD1_ajusted = resample(CWD1_slice, rp, method="bilinear")
# Modelo bcc-csm1
# transforma o NetCDF em Raster
CWD2 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_BNU-ESM_historical_r1i1p1_1950-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD2 = rotate(CWD2)
#recorte espacial da área de estudo
#CWD2_mask = crop(CWD2, brasil)
#recorte temporal no dado
CWD2_slice = subset(CWD2, 12:56) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD2_ajusted = resample(CWD2_slice, rp, method='bilinear')
# transforma o NetCDF em Raster
CWD3 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_CanCM4_historical_r1i1p1_1961-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD3 = rotate(CWD3)
#recorte espacial da área de estudo
#CWD3_mask = crop(CWD3, brasil)
#recorte temporal no dado
#CWD3_slice = subset(CWD3, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD3_ajusted = resample(CWD3, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD4 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_CanESM2_historical_r1i1p1_1850-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD4 = rotate(CWD4)
#recorte espacial da área de estudo
#CWD4_mask = crop(CWD4, brasil)
#recorte temporal no dado
CWD4_slice = subset(CWD4, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD4_ajusted = resample(CWD4_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD5 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_CCSM4_historical_r1i1p1_1850-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD5 = rotate(CWD5)
#recorte espacial da área de estudo
#CWD5_mask = crop(CWD5, brasil)
#recorte temporal no dado
CWD5_slice = subset(CWD5, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD5_ajusted = resample(CWD5_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD6 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_CESM1-FASTCHEM_historical_r1i1p1_1850-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD6 = rotate(CWD6)
#recorte espacial da área de estudo
#CWD6_mask = crop(CWD6, brasil)
#recorte temporal no dado
CWD6_slice = subset(CWD6, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD6_ajusted = resample(CWD6_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD7 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_CMCC-CESM_historical_r1i1p1_1850-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD7 = rotate(CWD7)
#recorte espacial da área de estudo
#CWD7_mask = crop(CWD7, brasil)
#recorte temporal no dado
CWD7_slice = subset(CWD7, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD7_ajusted = resample(CWD7_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD8 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_CMCC-CM_historical_r1i1p1_1850-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD8 = rotate(CWD8)
#recorte espacial da área de estudo
#CWD8_mask = crop(CWD8, brasil)
#recorte temporal no dado
CWD8_slice = subset(CWD8, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD8_ajusted = resample(CWD8_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD9 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_CMCC-CMS_historical_r1i1p1_1850-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD9 = rotate(CWD9)
#recorte espacial da área de estudo
#CWD9_mask = crop(CWD9, brasil)
#recorte temporal no dado
CWD9_slice = subset(CWD9, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD9_ajusted = resample(CWD9_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD10 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_CNRM-CM5_historical_r1i1p1_1850-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD10 = rotate(CWD10)
#recorte espacial da área de estudo
#CWD10_mask = crop(CWD10, brasil)
#recorte temporal no dado
CWD10_slice = subset(CWD10, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD10_ajusted = resample(CWD10_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD11 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_CSIRO-Mk3-6-0_historical_r1i1p1_1850-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD11 = rotate(CWD11)
#recorte espacial da área de estudo
#CWD11_mask = crop(CWD11, brasil)
#recorte temporal no dado
CWD11_slice = subset(CWD11, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD11_ajusted = resample(CWD11_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD12 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_FGOALS-s2_historical_r1i1p1_1850-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD12 = rotate(CWD12)
#recorte espacial da área de estudo
#CWD12_mask = crop(CWD12, brasil)
#recorte temporal no dado
CWD12_slice = subset(CWD12, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD12_ajusted = resample(CWD12_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD13 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_GFDL-CM3_historical_r1i1p1_1860-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD13 = rotate(CWD13)
#recorte espacial da área de estudo
#CWD13_mask = crop(CWD13, brasil)
#recorte temporal no dado
CWD13_slice = subset(CWD13, 102:146) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD13_ajusted = resample(CWD13_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD15 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_HadCM3_historical_r1i1p1_1859-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD15 = rotate(CWD15)
#recorte espacial da área de estudo
#CWD15_mask = crop(CWD15, brasil)
#recorte temporal no dado
CWD15_slice = subset(CWD15, 103:147) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD15_ajusted = resample(CWD15_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD16 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_HadGEM2-CC_historical_r1i1p1_1859-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD16 = rotate(CWD16)
#recorte espacial da área de estudo
#CWD16_mask = crop(CWD16, brasil)
#recorte temporal no dado
CWD16_slice = subset(CWD16, 103:147) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD16_ajusted = resample(CWD16_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD17 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_HadGEM2-ES_historical_r1i1p1_1859-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD17 = rotate(CWD17)
#recorte espacial da área de estudo
#CWD17_mask = crop(CWD17, brasil)
#recorte temporal no dado
CWD17_slice = subset(CWD17, 103:147) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD17_ajusted = resample(CWD17_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD18 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_IPSL-CM5A-LR_historical_r1i1p1_1850-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD18 = rotate(CWD18)
#recorte espacial da área de estudo
#CWD18_mask = crop(CWD18, brasil)
#recorte temporal no dado
CWD18_slice = subset(CWD18, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD18_ajusted = resample(CWD18_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD19 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_IPSL-CM5A-MR_historical_r1i1p1_1850-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD19 = rotate(CWD19)
#recorte espacial da área de estudo
#CWD19_mask = crop(CWD19, brasil)
#recorte temporal no dado
CWD19_slice = subset(CWD19, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD19_ajusted = resample(CWD19_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD20 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_IPSL-CM5B-LR_historical_r1i1p1_1850-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD20 = rotate(CWD20)
#recorte espacial da área de estudo
#CWD20_mask = crop(CWD20, brasil)
#recorte temporal no dado
CWD20_slice = subset(CWD20, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD20_ajusted = resample(CWD20_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD21 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_MIROC4h_historical_r1i1p1_1950-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD21 = rotate(CWD21)
#recorte espacial da área de estudo
#CWD21_mask = crop(CWD21, brasil)
#recorte temporal no dado
CWD21_slice = subset(CWD21, 12:56) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD21_ajusted = resample(CWD21_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD22 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_MIROC5_historical_r1i1p1_1850-2012.nc")
#transforma a longitude de 0-360 para -180-180
CWD22 = rotate(CWD22)
#recorte espacial da área de estudo
#CWD22_mask = crop(CWD22, brasil)
#recorte temporal no dado
CWD22_slice = subset(CWD22, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD22_ajusted = resample(CWD22_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD23 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_MIROC-ESM_historical_r1i1p1_1850-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD23 = rotate(CWD23)
#recorte espacial da área de estudo
#CWD23_mask = crop(CWD23, brasil)
#recorte temporal no dado
CWD23_slice = subset(CWD23, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD23_ajusted = resample(CWD23_slice, rp, method="bilinear")
# transforma o NetCDF em Raster
CWD24 = brick("C:/Users/inpe-eba/SISMOI/CWD/Historical/cwdETCCDI_yr_MIROC-ESM-CHEM_historical_r1i1p1_1850-2005.nc")
#transforma a longitude de 0-360 para -180-180
CWD24 = rotate(CWD24)
#recorte espacial da área de estudo
#CWD24_mask = crop(CWD24, brasil)
#recorte temporal no dado
CWD24_slice = subset(CWD24, 112:156) # 1961-2005
#Transformação do GRID em raster
r <- raster(ncol=18, nrow=16)
extent(r) <- extent(grid)
rp <- rasterize(grid, r)
#Reamostragem para o GRID
CWD24_ajusted = resample(CWD24_slice, rp, method="bilinear")
#cria lista de rasters
CWD_Hist = stack(CWD1_ajusted, CWD2_ajusted, CWD3_ajusted, CWD4_ajusted, CWD5_ajusted,
CWD6_ajusted, CWD7_ajusted, CWD8_ajusted, CWD9_ajusted, CWD10_ajusted,
CWD11_ajusted, CWD12_ajusted, CWD13_ajusted,
CWD15_ajusted, CWD16_ajusted, CWD17_ajusted, CWD18_ajusted, CWD19_ajusted,
CWD20_ajusted, CWD21_ajusted, CWD22_ajusted, CWD23_ajusted, CWD24_ajusted)
#calcula a media CWD
rMean <- calc( CWD_Hist , fun = function(x){ by(x , c( rep(seq(1:45), times = 23)) , mean, na.rm=TRUE ) } )
#trasnformação do dado em dataframe
CWD_Hist_df = as.data.frame(rMean, xy=TRUE)
# nomeando as colunas do data frame
dates <- seq(as.Date("1961/1/1"), by = "year", length.out = 45)
head(dates)
tail(dates)
names(CWD_Hist_df) <- c("lon","lat", paste(dates,as.character(), sep="_"))
# Exportar o data frame como tabela CSV
write.csv(CWD_Hist_df, file = "CWD_Historical_mean.csv")
|
c3c91a6c1fd08e685b25ff49c5e2f465463c6177
|
fb35ed59baa26c8945b4c8253c59cfb92db706c4
|
/cgatpipelines/Rtools/filtercounts.R
|
00ff92ef9e7527a294c83edabb359025137b5aab
|
[
"MIT"
] |
permissive
|
cgat-developers/cgat-flow
|
483f3c582e7ec72efab8440cfefed967cb521e79
|
7ae2e893a41f952c07f35b5cebb4c3c408d8477b
|
refs/heads/master
| 2023-04-13T22:47:19.627132
| 2022-04-27T08:57:44
| 2022-04-27T08:57:44
| 120,881,178
| 13
| 9
|
MIT
| 2022-04-27T08:57:45
| 2018-02-09T08:50:08
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 8,223
|
r
|
filtercounts.R
|
#' Basic filtering analysis Script
#'
#'
#' Example usage:
#'
#' Rscript PATH/TO/filtercounts.R
#'
#' input: directory containing read count files or tsv file containing reads
#' additional input variables: method used to generate file, model
#' output: `experiment_out.rds` is an experiment object after filtering
#'
suppressMessages(library(futile.logger))
suppressMessages(library(getopt))
suppressMessages(library(tidyverse))
suppressMessages(library(data.table))
suppressMessages(library(DESeq2))
suppressMessages(library(edgeR))
suppressMessages(library(csaw))
suppressMessages(library(rhdf5))
suppressMessages(library(tximport))
suppressMessages(library(DEXSeq))
source(file.path(Sys.getenv("R_ROOT"), "experiment.R"))
run <- function(opt) {
### READING DATA ###
# Read in sampleData Table
futile.logger::flog.info(paste("reading sampleData table from", normalizePath(opt$sampleData)))
sampleData <- read.table(opt$sampleData, header = TRUE)
sampleData <-sampleData[sampleData$include ==1, ]
futile.logger::flog.info(paste("read sampleData ", paste(dim(sampleData), collapse = ",")))
rownames(sampleData) <- sampleData$track
futile.logger::flog.info(paste("reading in data from ", opt$source))
if(opt$source %in% c("salmon", "kallisto")){
# Read in Transcript to Gene Map
tx2gene <- read_tsv(opt$tx2gene)
colnames(tx2gene) <- c("ensembl_transcript_id", "ensembl_gene_id")
if(opt$tx2gene_regex != "None"){
tx2gene <- filter(tx2gene, !grepl(opt$tx2gene_regex,ensembl_transcript_id))
}
# Read in Data
futile.logger::flog.info(opt$counts_dir)
futile.logger::flog.info(sampleData$track)
files <- file.path(opt$counts_dir, sampleData$track, "quant.sf")
names(files) <- sampleData$track
txi <- tximport(files, type = opt$source, tx2gene = tx2gene)
if(opt$method == "deseq2"){
dataset <- DESeqDataSetFromTximport(txi, colData = sampleData, design = formula(opt$model))
} else if(opt$method == "edger"){
cts <- txi$counts
normMat <- txi$length
normMat <- normMat/exp(rowMeans(log(normMat)))
normCts <- cts/normMat
normMat <- sweep(normMat, 2, eff.lib, "*")
normMat <- log(normMat)
dataset <- DGEList(counts=cts,
samples=sampleData)
dataset <- scaleOffset(dataset,normMat)
} else if(opt$method == "Sleuth"){
stop("Sleuth method not yet implemented. Sorry.")
} else{
stop("Method not defined. Allowable methods are \"DESeq2\", \"EdgeR\" or \"Sleuth\"")
}
}
if(opt$source == "dexseq"){
# Read in Data
files <- file.path(opt$counts_dir, paste0(sampleData$track, ".txt"))
names(files) <- sampleData$track
if(opt$method != "dexseq"){
stop("DEXSeq input is handled by diffexonexpression. Please correct the method argument.")
}
dataset = DEXSeqDataSetFromHTSeq(
files,
sampleData=sampleData,
design=formula(opt$model),
flattenedfile=opt$flattenedFile)
} else if(opt$source == "counts_table"){
# Read in Data
raw <- read.table(file = gzfile(opt$counts_tsv), header=TRUE, row.name=1)
experiment_tsv <- raw[,sampleData$track,drop=FALSE]
if(opt$method == "deseq2"){
dataset = DESeqDataSetFromMatrix(experiment_tsv, sampleData, design = formula(opt$model))
} else if(opt$method == "edger"){
dataset <- DGEList(counts=experiment_tsv, samples=sampleData)
} else if(opt$method == "sleuth"){
stop("Sleuth method not yet implemented. Sorry.")
} else{
stop("Method not defined. Allowable methods are \"DESeq2\", \"EdgeR\" or \"Sleuth\"")
}
}
### FILTERING ###
if(opt$filter == TRUE) {
futile.logger::flog.info(paste("filtering data ", opt$source))
if(opt$method == "edger"){
futile.logger::flog.info(paste("Counts before filtering ", paste(dim(dataset$counts), collapse = ",")))
keep <- filterByExpr(dataset)
dataset <- dataset[keep, , keep.lib.sizes=FALSE]
counts_table <- dataset$counts
futile.logger::flog.info(paste("Counts after filtering ", paste(dim(dataset$counts), collapse = ",")))
} else if(opt$method == "deseq2"){
futile.logger::flog.info(paste("Counts before filtering ", paste(dim(counts(dataset)), collapse = ",")))
keep <- rowSums(counts(dataset)) >= 10
dataset <- dataset[keep,]
counts_table <- counts(dataset)
futile.logger::flog.info(paste("Counts after filtering ", paste(dim(counts(dataset)), collapse = ",")))
} else if(opt$method == "dexseq"){
futile.logger::flog.info(paste("Filtering for DEXSeq not implemented "))
counts_table <- counts(dataset)
}
}
else {
futile.logger::flog.info(paste("No filtering on dataset performed.", opt$source))
}
### SAVING DATA ###
file = get_output_filename(paste0(opt$outdir,"/experiment_out.rds"))
flog.info(paste("saving experiment data to", file))
saveRDS(dataset, file = file)
## Set up gene lengths for RPKM
flog.info("outputting counts data")
write.table(counts(dataset),
file = paste0(opt$outdir,"/Counts_Data.tsv"),
sep = "\t",
quote = FALSE,
row.names = TRUE,
col.names = NA)
if(opt$source %in% c("salmon", "kallisto")){
write.table(txi$abundance,
file = paste0(opt$outdir,"/tpm.tsv"),
sep = "\t",
quote = FALSE,
row.names = TRUE,
col.names = NA)
}
}
main <- function() {
option_list <- list(
make_option(
"--counts-dir",
dest="counts_dir",
type="character",
help=paste("directory containing expression estimates",
"from salmon/kallisto/DEXSeq.")
),
make_option(
"--counts-tsv",
dest="counts_tsv",
type="character",
help=paste("file containing counts generated",
"by e.g. featurecounts.")
),
make_option(
c("-d", "--sampleData"),
dest="sampleData",
type="character",
default = "",
help=paste("input file with experimental design/sample info")
),
make_option(
c("--outdir"),
dest="outdir",
type="character",
default = "results.dir",
help=paste("output directory")
),
make_option(
"--model",
dest = "model",
type = "character",
default = "~group",
help = paste("formula for multivariate model")
),
make_option(
c("-s", "--source"),
dest="source",
type="character",
default="salmon",
help=paste("Source of data. Possible options are ",
"\"salmon\", \"kallisto\", \"counts_table\", \"dexseq\"")
),
make_option(
c("--tx2gene"),
dest="tx2gene",
type="character",
default="transcript2geneMap.tsv",
help=paste("Path to transcript to gene tsv.")
),
make_option(
c("--tx2gene_regex"),
dest="tx2gene_regex",
type="character",
default="None",
help=paste("Regex/Prefix for removal of certain features from ",
"experiment (e.g. removal of spike-ins)")
),
make_option(
"--method",
dest="method",
type="character",
default="deseq2",
help=paste("differential expression method to apply ")
),
make_option(
"--filter",
dest="filter",
type="logical",
default=TRUE,
help=paste("adopt filtering strategy. ",
"For EDGER, the default strategy is applied. ",
"For DESeq2 basic rowsum filtering < 10 is applied.")
),
make_option(
"--dexseq-flattened-file",
dest="flattenedFile",
type="character",
help=paste("directory containing flat gtf for dexseq. DEXSeq ",
"expects this to be generated by the",
"DEXSeq_prepare_annotations.py script")
)
)
opt <- experiment_start(option_list = option_list,
description = description)
if (!is.null(opt$method)) {
opt$method = str_to_lower(opt$method)
}
if (!is.null(opt$source)) {
opt$source = str_to_lower(opt$source)
}
run(opt)
experiment_stop()
}
main()
|
62c5de1ee7e20cc08f3f58b2042eb7024dd32a02
|
c36626e74b54e0c748f1da46904a58198415641e
|
/man/eco.2genepop.Rd
|
d8e91132e6432da311af6e1288b6a8924add05e2
|
[] |
no_license
|
jcassiojr/EcoGenetics
|
a3f3e5586bee15f2f87fc284b4ad8f3243db061e
|
9256797efd715f3eb3de960dcec03aa31e53510f
|
refs/heads/master
| 2021-01-24T08:32:49.115341
| 2016-07-15T20:02:10
| 2016-07-15T20:02:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,078
|
rd
|
eco.2genepop.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eco.2genepop.R
\name{eco.2genepop}
\alias{eco.2genepop}
\title{Exporting an ecogen genetic data frame into Genepop format}
\usage{
eco.2genepop(eco, name = "infile.genepop.txt", grp = NULL, nout = 3,
sep = "")
}
\arguments{
\item{eco}{Object of class "ecogen".}
\item{name}{The name of the output file.}
\item{grp}{The name of the S slot column with groups in which the sample
must be divided (e.g., populations). If groups are not given (grp = NULL),
all individuals will be assigned to a single one.}
\item{nout}{Number of digits in the output file}
\item{sep}{Character separating alleles.}
}
\value{
A Genepop file in the working directory.
}
\description{
This function converts the genetic
data of an ecogen object into a Genepop input file.
}
\examples{
\dontrun{
data(eco.test)
eco.2genepop(eco, grp = "pop", name = "infile.genepop.txt")
# an output file "infile.genepop.txt" is generated in the working directory
}
}
\author{
Leandro Roser \email{leandroroser@ege.fcen.uba.ar}
}
|
7d4ebc1e5f5894e8289fdcbbb3735ec7fe066aaa
|
f25c5405790cf17a2b6e78b4ef58654810c8bb7b
|
/man/label_tooltip.Rd
|
48f0c2983eac82b88add1f7fed50284adfd922ab
|
[] |
no_license
|
moturoa/shintodashboard
|
15ad881ea4c72549b616a3021852a0db8c25f6fd
|
80385da221d370a563eb1cfe8946964acfacfe15
|
refs/heads/master
| 2023-05-31T07:05:11.026309
| 2021-06-28T12:55:32
| 2021-06-28T12:55:32
| 312,505,839
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 244
|
rd
|
label_tooltip.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tooltip.R
\name{label_tooltip}
\alias{label_tooltip}
\title{Handy tooltip for Shiny}
\usage{
label_tooltip(label, helptext)
}
\description{
Handy tooltip for Shiny
}
|
cd9cb5ef14488f215e3cf0142236e666aeb2cef8
|
b39cbdaadbc53903b9cd19cfe5d80002dab69c3b
|
/demo/convertGraph.R
|
3f1ead256e4e89cd152a58a238d88804adfd7fbd
|
[] |
no_license
|
cran/GGMselect
|
19b460696fbcd1a7553f7b8e299260c77e374b6d
|
c985831fbcb5f1e66f10ef5dd5fed15474057d79
|
refs/heads/master
| 2023-06-20T00:22:49.184652
| 2023-05-24T15:10:04
| 2023-05-24T15:10:04
| 17,679,431
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 437
|
r
|
convertGraph.R
|
p=30
n=30
# simulate graph
eta=0.11
Gr <- simulateGraph(p,eta)
X <- rmvnorm(n, mean=rep(0,p), sigma=Gr$C)
# estimate graph
GRest <- selectFast(X, family="C01")
# Neighb and G are 2 forms of the same result
a <- convertGraph(GRest$C01$Neighb)
cat("Is G equal to Neighb?\n")
print(all.equal(a, GRest$C01$G)) # TRUE
# recalculate the graph by MyFamily
GMF <- selectMyFam(X, list(a))
cat("Is G the same?\n")
print(all.equal(a,GMF$G)) # TRUE
|
f4171ede963eb96e9fce185760932976cf1bc397
|
4a4a4dedf17b593c0ec206f0b6681d06d42ff62e
|
/tests/testthat/test_and.R
|
9ca3316513e70cd352aaba877f6cbfe2d4382a1f
|
[
"MIT"
] |
permissive
|
ellisvalentiner/DeepOperators
|
c2ffb56159fff1f82f87bc6e416090d854e47545
|
7043817af00492b7ea5ed820475df93828549141
|
refs/heads/master
| 2020-04-23T00:10:38.524510
| 2019-10-15T13:48:09
| 2019-10-15T13:48:09
| 170,769,482
| 1
| 1
|
NOASSERTION
| 2019-10-15T15:07:28
| 2019-02-14T22:55:25
|
R
|
UTF-8
|
R
| false
| false
| 298
|
r
|
test_and.R
|
context("AND operator")
library(DeepOperators)
test_that("TRUE and TRUE is TRUE", {
expect_true(TRUE %&% TRUE)
})
test_that("TRUE and FALSE is FALSE", {
expect_false(TRUE %&% FALSE)
expect_false(FALSE %&% TRUE)
})
test_that("FALSE and FALSE is FALSE", {
expect_false(FALSE %&% FALSE)
})
|
c08df4d779c28d4d3ee094ac31f1cf31476e6f39
|
0f8ead4a8550b858634e62a85af4b2d2d999038f
|
/man/summarize_vdj.Rd
|
0e4833b4fadc5bbd4de6f655916cbf4419f073c5
|
[
"MIT"
] |
permissive
|
rnabioco/djvdj
|
b6d39e678d130986aa4c0c9944097512ab615ea0
|
4f27269103a3ed32d5e9149560b9ff38ddc9a047
|
refs/heads/master
| 2023-06-27T23:42:22.757221
| 2023-05-08T15:39:12
| 2023-05-08T15:39:12
| 249,536,177
| 23
| 3
|
NOASSERTION
| 2023-09-09T21:09:39
| 2020-03-23T20:23:59
|
R
|
UTF-8
|
R
| false
| true
| 3,420
|
rd
|
summarize_vdj.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mutate-vdj.R
\name{summarize_vdj}
\alias{summarize_vdj}
\title{Summarize V(D)J data for each cell}
\usage{
summarize_vdj(
input,
data_cols,
fn = NULL,
...,
chain = NULL,
chain_col = global$chain_col,
col_names = "{.col}",
return_df = FALSE,
sep = global$sep
)
}
\arguments{
\item{input}{Single cell object or data.frame containing V(D)J data. If a
data.frame is provided, the cell barcodes should be stored as row names.}
\item{data_cols}{meta.data column(s) containing V(D)J data to summarize for
each cell}
\item{fn}{Function to apply to each selected column, possible values can be
either a function, e.g. mean, or a purrr-style lambda, e.g. ~ mean(.x,
na.rm = TRUE). If NULL, the mean will be calculated for numeric values,
non-numeric columns will be combined into a single string.}
\item{...}{Additional arguments to pass to fn}
\item{chain}{Chain to use for summarizing V(D)J data}
\item{chain_col}{meta.data column(s) containing chains for each cell}
\item{col_names}{A glue specification that describes how to name the output
columns, use \{.col\} to refer to the original column name. If col_names is
NULL, the original column names will be used.}
\item{return_df}{Return results as a data.frame. If FALSE, results will be
added to the input object.}
\item{sep}{Separator used for storing per cell V(D)J data}
}
\value{
Object containing V(D)J data summarized for each cell
}
\description{
Summarize per-chain values for each cell using a function or purrr-style
lambda. This is useful for plotting or filtering cells based on the V(D)J
meta.data.
}
\examples{
# Summarize numeric columns
# by default the mean will be calculated for numeric columns
res <- summarize_vdj(
vdj_so,
data_cols = c("all_del", "all_ins")
)
head(slot(res, "meta.data"), 3)
# Specifying a different summary function
# this calculates the median number of insertions and deletions for each
# cell
res <- summarize_vdj(
vdj_sce,
data_cols = c("all_del", "all_ins"),
fn = stats::median
)
head(slot(res, "colData"), 3)
# Summarize values for a specific chain
res <- summarize_vdj(
vdj_so,
data_cols = c("all_del", "all_ins"),
chain = "IGK"
)
head(slot(res, "meta.data"), 3)
# Specifying new names for summarized columns
# use {.col} to refer to the original column name
res <- summarize_vdj(
vdj_sce,
data_cols = c("all_del", "all_ins"),
fn = stats::median,
col_names = "median_{.col}"
)
head(slot(res, "colData"), 1)
# Return a data.frame instead of adding the results to the input object
res <- summarize_vdj(
vdj_so,
data_cols = c("all_del", "all_ins"),
return_df = TRUE
)
head(res, 1)
# Using a lambda function to summarize values
# use '.x' to refer to values in the column
# this creates a new column showing the unique chains for each cell
res <- summarize_vdj(
vdj_sce,
data_cols = "chains",
fn = ~ paste0(unique(.x), collapse = "_"),
col_names = "unique_chains"
)
head(slot(res, "colData"), 3)
# Creating an index column to use for filtering/plotting
# this creates a column indicating which cells have no insertions
# the V(D)J data can be filtered based on this new column
res <- summarize_vdj(
vdj_so,
data_cols = "all_ins",
fn = ~ all(.x == 0),
col_names = "no_insertions"
)
res <- filter_vdj(
res,
filt = no_insertions
)
head(slot(res, "meta.data"), 3)
}
|
2cbfe45211a3fa82714435fac63c14a84b4c0a2c
|
af5228afb1a0b9edcd46025e0fb60b2434a7298a
|
/vresidual.r
|
ccd5ddcf97538de0cca34f1e2d02a88896490111
|
[
"MIT"
] |
permissive
|
yuting1214/2021_Fall_RA
|
3fdff4c0f79ad1b1b83a3e8977b125e2639a6adb
|
62bfa7d90f1330be5020eca551e1312130543fc8
|
refs/heads/master
| 2023-08-25T09:59:04.359051
| 2021-11-08T19:57:58
| 2021-11-08T19:57:58
| 425,974,278
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,476
|
r
|
vresidual.r
|
function(y,yfit,family=binomial(),variance=NULL)
{
# Calculate the residual for given observed y and its fitted value yfit:
# the length between y and yfit along the quardratic variance function:
# V(mu) = v2*mu^2+v1*mu+v0
qvresidual<-function(y,yfit,v2,v1)
{
vpa <- 2*v2*yfit+v1
svpa2 <- sqrt(1+vpa*vpa)
vpb <- 2*v2*y+v1
svpb2 <- sqrt(1+vpb*vpb)
vr <- (log((vpb+svpb2)/(vpa+svpa2))+vpb*svpb2-vpa*svpa2)/(4*v2)
vr
}
if( is.character(family) ) {
cf <- family
family <- get(family, mode="function", envir=parent.frame())
} else
cf <- family$family
if( pmatch("Negative Binomial",cf,nomatch=F) )
{
theta <- as.numeric(gsub("(?<=\\()[^()]*(?=\\))(*SKIP)(*F)|.","", cf, perl=T))
cf <- "negative.binomial"
} else if( pmatch("Tweedie",cf,nomatch=F) )
{
dv <- Deriv(family$variance,"mu")
theta <- dv(1)
}
if( is.null(variance) )
{
switch(cf,
binomial={DFUN<-function(x) qvresidual(x[1],x[2],-1,1)}, # Need modify for Y~Bin(n,p)
gaussian={DFUN<-function(x) x[1]-x[2]},
Gamma={DFUN<-function(x) qvresidual(x[1],x[2],1,0)},
negative.binomial={DFUN<-function(x) qvresidual(x[1],x[2],1/theta,1)},
poisson={DFUN<-function(x) x[1]-x[2]},
quasibinomial={DFUN<-function(x) qvresidual(x[1],x[2],-1,1)},
quasipoisson={DFUN<-function(x) x[1]-x[2]},
inverse.gaussian={
DFUN<-function(x) integrate(function(mu){sqrt(1+9*mu^4)},x[1],x[2])$value},
Tweedie={ # var.power: 0, 1, (1,2), 2, >2
if( (theta==0)|(theta==1) )
DFUN<-function(x) x[1]-x[2]
else if( theta==2 )
DFUN<-function(x) qvresidual(x[1],x[2],1,0)
else
DFUN<-function(x) integrate(function(mu){sqrt(1+theta^2*mu^(2*theta-2))},x[1],x[2])$value},
quasi={ # variance for quasi: "constant","mu(1-mu)","mu","mu^2","mu^3", or other
if( (family$varfun=="constant")|(family$varfun=="mu") )
DFUN <- function(x) x[1]-x[2]
else if( family$varfun=="mu(1-mu)" )
DFUN<-function(x) qvresidual(x[1],x[2],-1,1)
else if( family$varfun=="mu^2" )
DFUN<-function(x) qvresidual(x[1],x[2],1,0)
else
DFUN<-function(x) integrate(function(mu){sqrt(1+Deriv(family$variance,"mu")^2)},x[1],x[2])$value})
}
else
DFUN<-function(x) integrate(function(mu){sqrt(1+Deriv(variance,"mu")^2)},x[1],x[2])$value
vresidual <- apply(cbind(y,yfit),1,DFUN)
}
|
4f8af8f9bb7b511a726c9d80b583ae14540299d9
|
166745a0a997ccf5a6aa5d6fb0d3ff7dcf322ac9
|
/man/dcem_cluster_uv.Rd
|
7f42bb6b41105b486183f64c4021cd06f0b371b8
|
[] |
no_license
|
parichit/DCEM
|
21498e70e4fd03c04e5160370afa1677a091cc93
|
9c88ddaf031d3c572491f1d5952eca44ed8a5a36
|
refs/heads/master
| 2022-01-29T18:48:36.529380
| 2022-01-15T22:54:22
| 2022-01-15T22:54:22
| 149,658,612
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,524
|
rd
|
dcem_cluster_uv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dcem_cluster_uv.R
\name{dcem_cluster_uv}
\alias{dcem_cluster_uv}
\title{dcem_cluster_uv (univariate data): Part of DCEM package.}
\usage{
dcem_cluster_uv(data, meu, sigma, prior, num_clusters, iteration_count,
threshold, num_data, numcols)
}
\arguments{
\item{data}{(matrix): The dataset provided by the user (converted to matrix format).}
\item{meu}{(vector): The vector containing the initial meu.}
\item{sigma}{(vector): The vector containing the initial standard deviation.}
\item{prior}{(vector): The vector containing the initial prior.}
\item{num_clusters}{(numeric): The number of clusters specified by the user. Default is 2.}
\item{iteration_count}{(numeric): The number of iterations for which the algorithm should run. If the
convergence is not achieved then the algorithm stops.
Default: 200.}
\item{threshold}{(numeric): A small value to check for convergence (if the estimated meu(s)
are within the threshold then the algorithm stops).
\strong{Note: Choosing a very small value (0.0000001) for threshold can increase the runtime
substantially and the algorithm may not converge. On the other hand, choosing a larger
value (0.1) can lead to sub-optimal clustering. Default: 0.00001}.}
\item{num_data}{(numeric): The total number of observations in the data.}
\item{numcols}{(numeric): Number of columns in the dataset (After processing the
missing values).}
}
\value{
A list of objects. This list contains parameters associated with the
Gaussian(s) (posterior probabilities, meu, standard-deviation and prior)
\enumerate{
\item (1) Posterior Probabilities: \strong{prob}: A matrix of
posterior-probabilities.
\item (2) Meu(s): \strong{meu}: It is a vector of
meu. Each element of the vector corresponds to one meu.
\item (3) Sigma: Standard-deviation(s): \strong{sigma}: A vector of standard
deviation.
\item (4) prior: \strong{prior}: A vector of prior.
\item (5) Membership: \strong{membership}: A vector of
cluster membership for data.
}
}
\description{
Implements the Expectation Maximization algorithm for the univariate data. This function is internally
called by the dcem_train routine.
}
\references{
Parichit Sharma, Hasan Kurban, Mehmet Dalkilic DCEM: An R package for clustering big data via
data-centric modification of Expectation Maximization, SoftwareX, 17, 100944 URL
https://doi.org/10.1016/j.softx.2021.100944
}
|
cfcb9722f5487a4ed4427a85c12bba742bba7364
|
63625ddab551f84243149bb89e9686801a893f66
|
/R/etkpf_util_f90.R
|
4479fe1dd1578a994a18f6772b1a414b9e81246a
|
[] |
no_license
|
robertsy/ETKPF
|
6602d81b79f57017ed766563cb40478acf34787e
|
a3d9404e95834058a4a0ed4f13d6ff6e76721edc
|
refs/heads/master
| 2021-01-20T14:41:03.481210
| 2017-05-08T16:29:40
| 2017-05-08T16:29:40
| 90,648,252
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,069
|
r
|
etkpf_util_f90.R
|
## fortran implementation of basic etkpf functions
## see etkpf_util_R.R for the documentation
# Wmu part ---------------------------------------------------------------
get_Wmu_f90 <- function(R_evc, R_evl, n, gam){
wmu <- matrix(0,n,n)
output <- .Fortran('get_Wmu',
R_evc = as.double(R_evc),
R_evl = as.double(R_evl),
n = as.integer(n),
Wmu = wmu, ## important to not pass as.double, no idea why...
gam = as.double(gam))
return(output$Wmu)
}
get_wmubar_f90 <- function(R_evc, R_evl, C, n, gam){
wmubar <- numeric(n)
output <- .Fortran('get_wmubar',
R_evc = as.double(R_evc),
R_evl = as.double(R_evl),
C = as.double(C),
n = as.integer(n),
wmubar= wmubar,
gam = as.double(gam))
return(output$wmubar)
}
# Walpha part --------------------------------------------------------------
get_alpha_f90 <- function(R_evc, R_evl, C, n, gam){
alphai <- numeric(n)
ess <- 0
output <- .Fortran('get_alpha',
R_evc = as.double(R_evc),
R_evl = as.double(R_evl),
C = as.double(C),
n = as.integer(n),
alphai= alphai,
gam = as.double(gam),
ess = as.double(ess))
return(list(w=output$alphai, ess=output$ess))
}
bal_sample_f90 <- function(w, R=length(w), unif=runif(1)){
n <- length(w)
ind_resample <- as.integer(numeric(n))
output <- .Fortran('bal_sample',
pai = as.double(w),
n = as.integer(n),
ind = ind_resample,
unif = as.double(unif))
ind_resample <- output$ind
Ni <- numeric(n)
Ni[sort(unique(ind_resample))] <- table(ind_resample)
return(list(N=Ni, index=ind_resample) )
}
# Rearrange indices such that they match 1:n
reorder_ind_f90 <- function(ind_resample, n){
ind_reordered <- as.integer(numeric(n))
output <- .Fortran('reorder_ind',
ind_in = as.integer(ind_resample),
out = ind_reordered,
n = as.integer(n))
return(output$out)
}
# Weps part: --------------------------------------------------------------
get_Weps_stochastic_f90 <- function( R_evc, R_evl, n, gam, eps ){
## where eps is a nxn matrix of iid N(0,1)
## center eps (done in fortran code)
# epsbar <- apply(eps, 1, mean)
# eps <- eps - epsbar %*% t(rep(1,n))
Weps <- matrix(0,n,n)
output <- .Fortran('get_Weps_stoch',
R_evc = as.double(R_evc),
R_evl = as.double(R_evl),
n = as.integer(n),
Weps = Weps, ## important to not pass as.double, no idea why...
gam = as.double(gam),
eps = as.double(eps))
return(output$Weps)
}
get_Weps_riccati_f90 <- function( R_evc, R_evl, n, gam, Wmu, ind_resample,
tol=10^-9, maxit=20){
Weps <- matrix(0,n,n)
output <- .Fortran('get_Weps_riccati',
R_evc = as.double(R_evc),
R_evl = as.double(R_evl),
n = as.integer(n),
Weps = Weps, ## important to not pass as.double, no idea why...
gam = as.double(gam),
Wmu = as.double(Wmu),
ind_resample = as.integer(ind_resample))
return(output$Weps)
}
lyapunov_f90 <- function(A,C,n){
## solves for AX + XA' = C
X <- matrix(0,n,n)
INFO <- 0
output <- .Fortran('lyap',
X = X,
A = as.double(A),
C = as.double(C),
n = as.integer(n),
INFO = as.integer(INFO))
return(output$X)
}
|
8371484a9a32033fec87bfc1ca2d56deb7c225ec
|
13457e168e5628a931e3dd3ab696a865e05327e5
|
/man/TestModularity.Rd
|
339e591109fa757e13ae3e7721fbae91486010c7
|
[
"MIT"
] |
permissive
|
aivuk/Morphometrics
|
3c74f652295796384b08becdca82452d074013b1
|
4371a964cf3dd52573560abded1e0f0861c2bf30
|
refs/heads/master
| 2021-04-12T08:59:55.965063
| 2015-02-15T03:06:57
| 2015-02-15T03:06:57
| 30,614,868
| 0
| 0
| null | 2015-02-10T21:14:12
| 2015-02-10T21:14:12
| null |
UTF-8
|
R
| false
| false
| 1,450
|
rd
|
TestModularity.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/TestModularity.R
\name{TestModularity}
\alias{CreateHipotMatrix}
\alias{TestModularity}
\title{Test modularity hypothesis}
\usage{
TestModularity(cor.matrix, modularity.hipot, iterations = 100)
CreateHipotMatrix(modularity.hipot)
}
\arguments{
\item{cor.matrix}{Correlation matrix}
\item{modularity.hipot}{Matrix of hypothesis. Each line represents a trait and each column a module.
if modularity.hipot[i,j] == 1, trait i is in module j.}
\item{iterations}{Number of iterations, to be passed to MantelCor}
}
\value{
Returns mantel correlation and associated probability for each modularity hypothesis, along with AVG+, AVG-, AVG Ratio for each module.
A total hypothesis combining all hypotesis is also tested.
}
\description{
Tests modularity hypothesis using cor.matrix matrix and trait groupings
}
\examples{
cor.matrix <- RandomMatrix(10)
rand.hipots <- matrix(sample(c(1, 0), 30, replace=TRUE), 10, 3)
mod.test <- TestModularity(cor.matrix, rand.hipots)
}
\author{
Diogo Melo, Guilherme Garcia
}
\references{
Porto, Arthur, Felipe B. Oliveira, Leila T. Shirai, Valderes Conto, and Gabriel Marroig. 2009. "The Evolution of Modularity in the Mammalian Skull I: Morphological Integration Patterns and Magnitudes." Evolutionary Biology 36 (1): 118-35. doi:10.1007/s11692-008-9038-3.
}
\seealso{
\code{\link{MantelCor}}
}
\keyword{mantel}
\keyword{modularity}
|
7ef601249e0652dc7059dec6fcbabd1ea875a0f6
|
24570c916b873579da36f0405789f8b374f8d1d4
|
/SNPtoAA.r
|
380fadbeec856aeb6a6b9076a264b6e4a1ff9d2f
|
[
"MIT"
] |
permissive
|
RILAB/siftmappR
|
4010d7cd8d27ee23226f26644baa76721a04d401
|
22ec698151533359c7c05b006f769733dd4dea93
|
refs/heads/master
| 2020-04-06T18:32:24.060731
| 2013-11-13T01:34:33
| 2013-11-13T01:34:33
| 13,351,733
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,873
|
r
|
SNPtoAA.r
|
#-----------------------------------------------------------------------------------
#XOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOX
#-----------------------------------------------------------------------------------
# This script gives the amino acid position (first transcript) of SNPs in exons
# It Also creat input files for polydNdS and a summary file for processing the results of
# This script was written to carry out a particular analysis; it may not be applied to another case without changes
# A header of each file is put under every file used
# Script written by Sofiane mezmouk (Ross-Ibarra laboratory)
#-----------------------------------------------------------------------------------
#XOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOXOX
#-----------------------------------------------------------------------------------
#Choose a chromosome to work with (1 to 10)
Chr <- 10
#--------------
library(stringr)
library(gdata)
is.odd <- function(x) x %% 2 != 0
#--------------
#-----------------------------
# A function that converts a nucleotide sequence to an amino acid sequence
transcript <- function(x){xtm <- as.vector(str_sub(paste(as.vector(x),collapse="",sep=""),debut,fin)); as.vector(codon[tapply(xtm,rep(1:length(xtm)),function(x){match(x,codon[,1])}),2])}
#-----------------------------
#--------------
gffall <- read.table("ZmB73_5b_FGS.gff")
# gffall format
#9 ensembl chromosome 1 156750706 . . . ID=9;Name=chromosome:AGPv2:9:1:156750706:1
#9 ensembl gene 66347 68582 . - . ID=GRMZM2G354611;Name=GRMZM2G354611;biotype=protein_coding
#9 ensembl mRNA 66347 68582 . - . ID=GRMZM2G354611_T01;Parent=GRMZM2G354611;Name=GRMZM2G354611_T01;biotype=protein_coding
#9 ensembl intron 68433 68561 . - . Parent=GRMZM2G354611_T01;Name=intron.1
#9 ensembl intron 67142 67886 . - . Parent=GRMZM2G354611_T01;Name=intron.2
codon <- read.table("Codons.txt", header=T, sep="\t")
#codon format
#Codon AA_1 AA_3 AA_Full AntiCodon
#TCA S Ser Serine TGA
#TCG S Ser Serine CGA
#TCC S Ser Serine GGA
#TCT S Ser Serine AGA
genelist <- read.table("GeneProtNames", header=F, sep="\t")
#genelist format
#AC147602.5_FG004 AC147602.5_FGT004 AC147602.5_FGP004
#AC148152.3_FG001 AC148152.3_FGT001 AC148152.3_FGP001
#AC148152.3_FG005 AC148152.3_FGT005 AC148152.3_FGP005
#AC148152.3_FG006 AC148152.3_FGT006 AC148152.3_FGP006
#AC148152.3_FG008 AC148152.3_FGT008 AC148152.3_FGP008
transc <- as.vector(read.table("ListeProtFirstTranscrit", header=F, sep="\t")[,1])
#transc format
#AC147602.5_FGP004
#AC148152.3_FGP001
#AC148152.3_FGP005
#AC148152.3_FGP006
#AC148152.3_FGP008
genelist <- genelist[as.vector(genelist[,3]%in%transc),]; rm(transc)
geneposi <- read.table("GenePositions", header=T, sep="\t")
#geneposi format
#Genes Chr Start End
#GRMZM2G059865 1 4854 9652
#GRMZM5G888250 1 9882 10387
#GRMZM2G093344 1 109519 111769
#GRMZM2G093399 1 136307 138929
geneposi <- geneposi[geneposi[,2]==Chr,]
genelist <- genelist[as.vector(genelist[,1]) %in% as.vector(geneposi[,1]),]
#---------------
geno <- read.table(paste("282_20120110_scv10mF8maf002_mgs_E1pLD5kpUn_imp95_1024_chr",Chr,".hmp.txt", sep=""), header=T, sep="\t")
# geno format
#rs alleles chrom pos strand assembly center protLSID assayLSID panelLSID QCcode 33-16 38-11 ...
#S1_2111 C/T 1 2111 + NA NA NA NA NA NA C C ...
#S1_10097 C/G 1 10097 + NA NA NA NA NA NA C C ...
#S1_10390 G/A 1 10390 + NA NA NA NA NA NA G G ...
geno <- as.matrix(geno[,-c(1:2,4:10)])
geno[is.element(geno, c("M","K","S","W","Y","R","V","H","D","B","H"))] <- "N"
#---------------
# Result file to keep track between the real positions and the positions in the sequence used for polydNdS estimates
RespolydNdS <- matrix(NA,1,8, dimnames=list(NULL, c("gene","SNP","Chr","Position","SeqPosition","Sens","LengthCDS","NbSeq")))
# Result file with the amino acid polymorphisms corresponding to the
resaa <- matrix(NA,1,(6+ncol(geno)),dimnames=list(NULL,c("gene","transcript","AAposition","SNP1","SNP2","SNP3","B73ref",dimnames(geno)[[2]][-1])))
problemes <- vector()
#---------------
#---------------
#Loop over gene
for(i in 1:nrow(geneposi)){
if(nrow(geno[as.numeric(as.vector(geno[,1]))%in%c(geneposi[i,3]:geneposi[i,4]),,drop=F])>0){ # if I have SNPs in the gene{
gff <- gffall[grep(geneposi[i,1],gffall[,9]),]
posgene <- as.vector(c(geneposi[i,3]:geneposi[i,4]))
posgene <- posgene[order(posgene)]
SENStransc <- as.vector(gff[grep("gene",gff[,3]),7])
posi <- gffall[grep(as.vector(genelist[match(geneposi[i,1],genelist[,1]),2]),gffall[,9]),]
posi <- posi[grep("CDS",posi[,3]),,drop=F]
CDS <- c(posi[1,4]:posi[1,5])
if (nrow(posi)>1)
{
for (j in 2:nrow(posi))
{
CDS <- c(CDS,c(posi[j,4]:posi[j,5]))
}
rm(j)
}
CDS <- CDS[order(CDS)]
rm(posi)
#----------------
if(nrow(geno[as.numeric(as.vector(geno[,1]))%in%CDS,,drop=F])>0){
geneseq <- readLines(paste("gene",geneposi[i,1],".fasta",sep=""))
# geneseq format for geneAC147602.5_FG004.fasta
#>AC147602.5_FG004 seq=gene; coord=3:178846540..178848005:-1
#ATGGAGATCGTCGCCACGCGCTCCCCGGCTTGCTGCGCCGCCGTGTCCTTCTCCCAGTCG
#TACAGGCCCAAGGTACGTACGGCACCTTCATATCTCGTGACTACTGTACGTAAGCGGAAA
#GTAGCAGCAGCTCGTCGCGCACACGTGCAGAAGCCTTAAGTTTGCTGATGATGTTGATGA
geneseq <- paste(geneseq[-1],collapse="", sep="")
geneseq <- strsplit(geneseq,split=character(1),fixed=T)[[1]]
tprot <- readLines(paste("tprot_",genelist[as.vector(genelist[,1])==as.vector(geneposi[i,1]),3],".fasta",sep=""))
#tprot format for tprot_AC147602.5_FGP004.fasta
#>AC147602.5_FGP004 seq=translation; coord=3:178846540..178848005:-1; parent_transcript=AC147602.5_FGT004; parent_gene=AC147602.5_FG004
#MEIVATRSPACCAAVSFSQSYRPKASRPPTTFYGESVRVNTARPLSARRQSKAASRAALS
#ARCEIGDSLEEFLTKATPDKNLIRLLICMGEAMRTIAFKVRTASCGGTACVNSFGDEQLA
#VDMLANKLLFEALEYSHVCKYACSEEVPELQDMGGPVEGS
tprot <- paste(tprot[-1],collapse="",sep="")
tprot <- strsplit(tprot, split = "", fixed = T, perl = FALSE, useBytes = FALSE)[[1]]
# Creat the nucleotide sequenc of every genotype
if(SENStransc=="-"){
sequ <- matrix(rep(geneseq,ncol(geno)), length(geneseq),ncol(geno), dimnames=list(rev(posgene),c("B73ref",dimnames(geno)[[2]][-1])))
}else
{
sequ <- matrix(rep(geneseq,ncol(geno)), length(geneseq),ncol(geno), dimnames=list(posgene,c("B73ref",dimnames(geno)[[2]][-1])))
}
rm(geneseq)
sequ <- sequ[as.numeric(dimnames(sequ)[[1]])%in%CDS,,drop=F]
tmp <- geno[as.numeric(as.vector(geno[,1]))%in%CDS,, drop=F]
dimnames(tmp)[[1]] <- as.numeric(as.vector(tmp[,1])); tmp <- tmp[,-1,drop=F]
if(SENStransc=="-")
{
tmp2 <- tmp[,,drop=F]
tmp[tmp2=="A"] <- "T";tmp[tmp2=="T"] <- "A";tmp[tmp2=="C"] <- "G";tmp[tmp2=="G"] <- "C"
tmp[tmp2=="M"] <- "K";tmp[tmp2=="K"] <- "M";tmp[tmp2=="Y"] <- "R";tmp[tmp2=="R"] <- "Y"
rm(tmp2)
}
for(j in 1:nrow(tmp))
{
bof <- tmp[j,tmp[j,]!="N",drop=F]
sequ[match(dimnames(bof)[[1]],dimnames(sequ)[[1]]),match(dimnames(bof)[[2]],dimnames(sequ)[[2]])] <- bof
rm(bof)
}
rm(j)
#-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-
# write an input file for polydNdS
bofseq <- apply(sequ, 2, function(x){paste(as.vector(x),collapse="",sep="")})
bofseq <-unique(bofseq)
bof <- vector()
bof[is.odd(1:(length(bofseq)*2))] <- paste("> sequenceNumber",c(1:length(bofseq)),sep="")
bof[!is.odd(1:(length(bofseq)*2))] <- bofseq
writeLines(bof,paste("seq_",as.vector(geneposi[i,1]),".fasta", sep="")); rm(bof)
#---------
bof <- cbind(as.vector(geneposi[i,1]),paste("S",Chr,"_",as.numeric(as.vector(dimnames(tmp)[[1]])),sep=""),Chr,as.numeric(as.vector(dimnames(tmp)[[1]])),
match(dimnames(tmp)[[1]],dimnames(sequ)[[1]]),SENStransc,nrow(sequ),length(bofseq))
dimnames(bof)[[2]] <- c("gene","SNP","Chr","Position","SeqPosition","Sens","LengthCDS","NbSeq")
RespolydNdS <- rbind(RespolydNdS, bof); rm(bof,bofseq)
#-X-X-X-X-X-X-X-X-X-X-X-X-X-X-X-
# nucleotide to aa
debut <- seq(1,nrow(sequ),3)
fin <- pmin(debut+2,nrow(sequ))
AA <- matrix(apply(sequ,2,transcript),ncol=ncol(geno), byrow=F)
AA <- cbind(c(1:nrow(AA)),dimnames(sequ)[[1]][debut],dimnames(sequ)[[1]][(debut+1)],dimnames(sequ)[[1]][fin],AA)
# Put a warning if the aa sequence I transcript is different from the aa sequence from the files I upload for the reference B73
if(sum(as.numeric(as.vector(AA[,5])[1:length(tprot)]!=tprot),na.rm=T)!=0){
problemes[length(problemes)+1] <- as.vector(geneposi[i,1])
#print("!!!problem"); print(as.vector(as.matrix(genelistmp[ii,])))
}
AA <- AA[(as.numeric(AA[,2])%in%as.numeric(as.vector(geno[,1])))|(as.numeric(AA[,3])%in%as.numeric(as.vector(geno[,1])))|(as.numeric(AA[,4])%in%as.numeric(as.vector(geno[,1]))),,drop=F]
if (nrow(AA)>0){
AA <- cbind(as.vector(geneposi[i,1]),as.vector(genelist[as.vector(genelist[,1])==as.vector(geneposi[i,1]),3]),AA)
dimnames(AA) <- list(NULL,c("gene","transcript","AAposition","SNP1","SNP2","SNP3",dimnames(sequ)[[2]]))
resaa <- rbind(resaa,AA)
}
rm(AA,debut,fin,tprot,sequ)
}
rm(gff,SENStransc,CDS)
}
}
resaa <- resaa[-1,]
RespolydNdS <- RespolydNdS[-1,]
if(length(problemes)>0){write.table(problemes,paste("Problemes_Chr",Chr,sep=""), sep="\t", row.names=F, quote=F, col.names=F)}
write.table(RespolydNdS, paste("SummaryPolydNdS.Chr",Chr,sep=""), sep="\t", quote=F, row.names=F)
write.table(resaa,paste("NucToAA_Chr",Chr,".txt",sep=""), sep="\t", row.names=F, quote=F)
|
dcfebe86196fe5bf1d5b672dca00791f05a2ee24
|
17cd1bc89ddf0a7567dbc386329585b91e76f1fc
|
/data-raw/clean-clev.R
|
818d3876fb1ad6d2035490d74e25fea1476344bf
|
[
"MIT"
] |
permissive
|
VaishnavMenon/geodaData
|
be7fab8ac2c2f24f56de98a05ff6a8a503b7010f
|
9e7bba1d879e45c475cea52fb76715d06bf8c9c5
|
refs/heads/master
| 2020-08-17T20:17:18.386421
| 2019-10-17T05:08:29
| 2019-10-17T05:08:29
| 215,707,472
| 0
| 0
| null | 2019-10-17T05:06:40
| 2019-10-17T05:06:39
| null |
UTF-8
|
R
| false
| false
| 131
|
r
|
clean-clev.R
|
library(sf)
library(usethis)
clev_pts <- st_read("data-raw/clev_sls_154_core.shp")
usethis::use_data(clev_pts, overwrite = TRUE)
|
6dc1ec1589f58650036162c7240f9842c3cf07d3
|
ecbf1722b5a6a8d100126b2995afa4b80e9b34f1
|
/docs/Dropout/AnalysisBasicsDataPrepetc.R
|
693f15cab314704a5039d7bff4d377ca097b4b90
|
[] |
no_license
|
bclavio/stats-on-grades
|
86e9aeeb7778c36e342804b8650f13adf7e25437
|
029d14a635a584833aa65f5f61b5c41d995fa5bc
|
refs/heads/master
| 2021-04-30T05:49:57.406611
| 2019-08-26T12:32:05
| 2019-08-26T12:32:05
| 121,425,406
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36,213
|
r
|
AnalysisBasicsDataPrepetc.R
|
myWD<-if(grepl("BiancaClavio", getwd())){'C:/Users/BiancaClavio/Documents/stats-on-grades'} else {"~/git/AAU/DropOutProject/analysis/"}
setwd(myWD)
#when downloading from Qlikview remember to remove last three lines and upload download as cvs from google docs
source('importDataAndgetInShape.R')
### Comment: I get a warning that SD is 0 and corrplot prints no numbers:
M <- cor(dfMed2AalX)
corrplot(M, method="circle")
moddfMed2Aal.form<- "isDropOut~ MATGrade + jobHoursPerWeek +
ParentsEduMax +
ParentsEduAvg +
MedHappyWith +
MedBelongHere +
WantMScDeg"
dropOutModeldfMed2AalGLM<- glm(moddfMed2Aal.form ,dfMed2AalX,family=binomial())
summary(step(dropOutModeldfMed2AalGLM))
summary(dropOutModeldfMed2AalGLM)
step(dropOutModeldfMed2AalGLM)
summary(dropOutModeldfMed2AalGLM)
mean(dfMed2Aal$hoursWorkedPerWeek,na.rm = TRUE)
mean(dfMed2Aal[dfMed2Aal$DropOutQ999Combo==1,]$hoursWorkedPerWeek,na.rm = TRUE)
numCols <- sapply(dfMed2Aal, is.numeric)
### R suggests using `summarise_all()`, `summarise_at()` or `summarise_if()` instead.
med2DOOverview<-dfMed2Aal[,numCols] %>% group_by(DropOutQ999Combo) %>% summarise_each(funs(mean(.,na.rm=T)))
#predict cohort 2016 (in May 2017 with data up to Feb/Mar)
#glm()
#XXXXXXXXXXXXXXXXXXXX
# remove?? ----------------------------------------------------------------
#### OLD
SVNData<-if(grepl("BiancaClavio", getwd())){'C:/Users/BiancaClavio/Documents/SVN/01Projects/dropOut/data'} else {"~/SVN/01Projects/dropOut/data/"}
setwd(SVNData)
dfUD1 <-read.csv("RawDataOnlyUD1Engl.csv", header = TRUE, fill=TRUE, sep = ",",fileEncoding = "UTF-8")
dfUD1$status<-dfUD1$status2015
dfUD1$statusYear<-2015
df2016<-read.csv("RawDataOnlyUD1Engl2016.csv", header = TRUE, fill=TRUE, sep = ",",fileEncoding = "UTF-8")
df2016$status<-df2016$status2016
df2016$statusYear<-2016
df2016<-plyr::rename(df2016,c("cprnr"="cprnr", "optag_aar"="yearOfEnrolment", "efter_adgeksamen"="delayAfterGraduationFromGymnEtc", "ADGGRU"="ADGGRU", "kvotient"="waitTimeAdjustedGradeInclBonus", "geobag"="residenceBeforeEnrolment", "Aldop"="ageAtEnrolment", "NAVN"="FullName", "postnr"="zip", "geoinst"="GraduationSchoolArea", "institution"="graduationSchool", "campus"="campus", "kon"="gender", "type_optag"="degreeEnrolledFor", "ramme_retning_optag"="studyDirectionAtUniEnrolment", "ramme_2016"="studyDirectionInYear", "orlov2016"="studyLeaveInYear", "studienaevn2016"="studyboardResponsibleInYear", "MAT"="mathGrade", "Niveau_MAT"="mathLevel", "DAN"="DanishGrade", "Niveau_DAN"="DanishLevel", "ENG"="EnglishGrade", "NIveau_ENG"="EnglishLevel", "staa"="staa"
))
# can go? -----------------------------------------------------------------
dfUD1<-rbind.fill(dfUD1,df2016)
dfUD1$campus<-factor(dfUD1$campus,levels=c("Aalborg","Kbh.","Esbjerg"))
#dfUD1$isDropOut<-ifelse(dfUD1$status2015="Afbrudt",1,0)
lookupDropOutsVector=c(Afsluttet= 0, Afbrudt=1, Indskrevet=0, 'Afbrudt (School skift)'=1,'Afbrudt(Fak skift)'=1,'Afbrudt (SN skift)'=1)
lookupDropOutsiUniVector=c(Afsluttet= 0, Afbrudt=1, Indskrevet=0, 'Afbrudt (School skift)'=0,'Afbrudt(Fak skift)'=0,'Afbrudt (SN skift)'=0)
dfUD1$isDropOutButInUni<-lookupDropOutsiUniVector[as.character(dfUD1$status)]
dfUD1$isDropOut<-lookupDropOutsVector[as.character(dfUD1$status)]
dfUD1$isInternationalStudent<-ifelse(dfUD1$GraduationSchoolArea=="Ikke Danmark",1,0)
#dfUD1$yearsFromEnrolment<-2015-dfUD1$yearOfEnrolment
dfUD1$mathGradeBinned<-cut(dfUD1$mathGrade,breaks=c(-6,-1,1.5,3,5.5,8.5,11,18))
dfUD1$mathGradeBinHighGran<-cut(dfUD1$mathGrade,breaks=c(-6,-1,1.5,3,4.5,5.5,6.5,7.5,8.5,11,18))
dfUD1<-dfUD1[!is.na(dfUD1$mathGrade),]
dfUD1$mathLevelABC<-dfUD1$mathLevel
dfUD1$mathLevel<-ifelse(dfUD1$mathLevel %in% c("B","C"),"B","A" )
#super slow dfUD1 <- read.xlsx("RawDataOnly.xlsx", sheetName="UDDATA-1")
#dfUD2 <- read.xlsx("RawDataOnly.xlsx", sheetName="UDDATA-2")
#dfCourseGrades <- read.xlsx("RawDataOnly.xlsx", sheetName="UDDATA-3")
dfCG <-read.csv("RawDataOnlyUD3-googleDocs.csv", header = TRUE, fill=TRUE, sep = ",",fileEncoding = "UTF-8",check.names = FALSE)
dfCG$Kvotient<-NULL
#XXXXXXXXXXXXXXXXXXXX
#XXXXXXXXXXXXXXXXXXX
# keep figure prduction --------------------------------------------------------------------
### Comment: "geom_path: Each group consists of only one observation. Do you need to adjust the group aesthetic?"
dropOutByMathGradeByCampusBy<-sqldf("select mathGradeBinned, mathLevel, avg(isDropOut) as dropOutPct, count(campus) as CountOfStudents from dfM where mathLevel<>'' group by mathLevel,mathGradeBinned")
ggplot(dropOutByMathGradeByCampusBy,aes(mathGradeBinned,dropOutPct*100,colour=mathLevel))+theme_bw()+scale_y_continuous(limits=c(0,100),breaks=seq(0,100,10)) +geom_point(aes(size=CountOfStudents,alpha=.5))+geom_line()+ylab("% dropped out by 2017")+theme(strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16), panel.spacing = unit(2, "lines"),plot.margin=unit(c(0,1,0,0),"lines") )+guides(alpha=FALSE)#+facet_grid(. ~ campus)
#p<-predict(dropOutModel,newdata=test,type="response")
# dropModel only for complete years 2011/12/13 ---------------------------------------------------------------
### Comment: dfM3y not found
myvars<-names(dfM3y) %in% c("isDropOut","MAT_Niveau", "MATGrade")
dataForModel<-dfM3y[myvars]
dfM3y<-dfM[dfM$startaar %in% c(2011,2012,2013) & !is.na(dfM$isDropOut) ,]
dfM3y$MAT_Niveau<-as.factor(dfM3y$MAT_Niveau)
dfM3y$ENG_Niveau<-as.factor(dfM3y$ENG_Niveau)
dfM3y$DAN_Niveau<-as.factor(dfM3y$DAN_Niveau)
dfM3y$DANGradeX<-ifelse(is.na(dfM3y$DANGrade),dfM3y$ENGGrade,dfM3y$DANGrade)
#dfM3y<-dfM3y[!is.na(dfM3y$isDropOut),]
#find out when mandatory enrolment to exams happened
#dfTemp<-
# lm model with all sorts of vars -----------------------------------------
dropOutModel<- glm(isDropOut ~ #
#mathGrade +
MAT_Niveau
* MATGrade
+ ENG_Niveau* ENGGrade
+ DANGradeX
#+ GPRO_PassedBy1stAttempt
#+ GPRO_PassedBy2ndAttempt
#+ MMA_PassedBy1stAttempt
#+ MMA_PassedBy2ndAttempt
#+ PFI_PassedBy1stAttempt
#+ PFI_PassedBy2ndAttempt
#mathLevel
#+ EnglishGrade
#+ EnglishLevel
#+ DanishGrade
#+ yearsFromEnrolment
#+ADGGRU
+campus
#+ gender
+isIntl
,dfM[dfM$startaar==2016,],family=binomial())
#,dfM[dfM$yearOfEnrolment== & dfM$campus=="Aalborg" ,])
#,dfM[dfM$startaar %in% c(2011,2012,2013) ,])
summary(dropOutModel)
PFIModel<- lm(PFI_1g ~ #
MAT_Niveau
* MATGrade +
GPRO_Lg
#+ ENG_Niveau* ENGGrade
#+ DANGradeX
#+ GPRO_PassedBy1stAttempt
#+ GPRO_PassedBy2ndAttempt
#+ MMA_PassedBy1stAttempt
#+ MMA_PassedBy2ndAttempt
#+ PFI_PassedBy1stAttempt
#+ PFI_PassedBy2ndAttempt
#mathLevel
#+ EnglishGrade
#+ EnglishLevel
#+ DanishGrade
#+ yearsFromEnrolment
#+ADGGRU
+campus
#+ gender
#+isIntl
,dfM[dfM$startaar==2016 ,])
#,dfM[dfM$yearOfEnrolment== & dfM$campus=="Aalborg" ,])
#,dfM[dfM$startaar %in% c(2011,2012,2013) ,])
summary(PFIModel)
#
# Coefficients:
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) 1.044795 0.086623 12.061 < 2e-16 ***
# MAT_NiveauB 0.092581 0.081301 1.139 0.254811
# MATGrade -0.150067 0.011331 -13.244 < 2e-16 ***
# ENG_NiveauB 0.189702 0.046480 4.081 4.48e-05 ***
# ENGGrade -0.029819 0.009331 -3.196 0.001396 **
# DANGrade -0.147841 0.011416 -12.950 < 2e-16 ***
# campusKbh. -0.365480 0.046271 -7.899 2.82e-15 ***
# campusEsbjerg 0.164903 0.076888 2.145 0.031975 *
# isIntl -0.386010 0.114862 -3.361 0.000778 ***
# MAT_NiveauB:MATGrade 0.129486 0.014788 8.756 < 2e-16 ***
# proper model testing GLM based ----------------------------------------------------
mod.form<-"isDropOut ~MAT_Niveau + MATGrade + DANGradeX + ENGGrade + campus + MAT_Niveau:MATGrade"
dropOutModelGLM<- glm(mod.form ,dfM3y,family=binomial())
summary(dropOutModelGLM)
step(glm(isDropOut~1 ,data=dfM3y,family=binomial()),scope="~MAT_Niveau + MATGrade + DANGradeX + ENGGrade + campus + MAT_Niveau:MATGrade",direction = "forward")
#campus has does not sign. predict dropout
indx <- apply(dfM3y, 2, function(x) any(is.na(x)))
colnames[indx]
apply(dfM3y, 2, function(x) any(is.na(x)))
mod.formNonDan<-"isDropOut ~(MAT_Niveau*MATGrade)"
dropOutModelGLMNonDan<- glm(mod.formNonDan ,dfM3y[is.na(dfM3y$DANGrade),],family=binomial())
summary(dropOutModelGLMNonDan)
#for non-Danes (no Dan grades) the matgrade and MATH A B have no predictive power on dropout (motivated?)
dropOutNullModelGLM<-glm(isDropOut~1, dfM3y,family=binomial())
# MATGrade -0.15 , DanGrade -.13 and MATB:MATGrade 0.13 are sign. pred. of drop-out
#before taking exams at Medialogy
#following Andy Field book here page 332
modelCHI<-dropOutModelGLM$null.deviance-dropOutModelGLM$deviance
chidf<-dropOutModelGLM$df.null-dropOutModelGLM$df.residual
chisq.prob<-1-pchisq(modelCHI,chidf)
chisq.prob
dfM3y$predicted.prob<-fitted(dropOutModelGLM)
#predict drop out semester
dfM3ypid<-dfM[dfM$startaar %in% c(2012,2013) & !is.na(dfM$isDropOut) ,]
mod.form2<-"isDropOut ~ MMA_1+GPRO_2+PID_2"
mod.form2<-"MMA_1~MATGrade*MAT_Niveau"
mod.form2<-"GPRO_1~MATGrade*MAT_Niveau+DANGrade"
dropOutModelGLMpid<- glm(mod.form2 ,dfM,family=binomial())
#mod.form2<-"isDropOut ~ (MAT_Niveau*MATGrade)+MMA_1+GPRO_1+PID_1"
summary(dropOutModelGLMpid)
sqldf("select aktivitetshort, takenInYear, avg(isPassed) from dfAAUGrades where `Forsoeg.nr.`=1 group by aktivitetShort, takenInYear")
dfAAUGrades$
dfM3y$`A+2`<-NULL
mod.PF1<-"isDropOut ~ MAT_Niveau * MATGrade"
mod.GPRO1<-
mod.MMA1
#,dfM[dfM$yearOfEnrolment== & dfM$campus=="Aalborg" ,])
#,dfM[dfM$startaar %in% c(2011,2012,2013) ,])
anova(dropOutModelGLM,test="Chisq")
# ROC of Model ------------------------------------------------------------
dfM3y$predictedDO<-predict(dropOutModelGLM,type = "response")
pr<-prediction(dfM3y$predictedDO,dfM3y$isDropOut)
prf <- performance(pr, measure = "tpr", x.measure = "fpr")
plot(prf)
abline(0,1)
#run all models for dropout predicition
hurdleList<-c("MMA_1","MMA_2","GPRO_1","GPRO_2","PFI_1","PFI_2")
#average passing grade of re-exam (2nd) to check Martin's assumption hypothesis: grade rather high (maybe correlate with entry grades mathA/B)
#check average drop-out semester after 2nd semester, higher vs smaller.
modpfi.form<-"isDropOut ~GPRO_1+PFI_1+MMA_1"
dropOutModelGLMpfi<- glm(modpfi.form ,dfPFI,family=binomial())
summary(dropOutModelGLMpfi)
# GPRO_2 -1.8110 0.7840 -2.310 0.020897 *
# PFI_2 -3.0485 0.8023 -3.800 0.000145 ***
# MMA_2 -1.1446 0.6067 -1.887 0.059225 .
#correlation between the courses
pairs(~GPRO_1+GPRO_2+MMA_1+MMA_2+PFI_1+PFI_2,data=dfPFI,
main="Simple Scatterplot Matrix")
pairs(~GPRO_1+GPRO_2+MMA_1+MMA_2+PFI_1+PFI_2,data=dfPFI,
main="Simple Scatterplot Matrix")
M<-cor(dfPFI[,c("GPRO_1","GPRO_2","PFI_1","PFI_2","MMA_1","MMA_2")],)
corrplot(M,method="ellipse")
# further plotting --------------------------------------------------------
dropOutByMathGradeAll<-sqldf("select mathGradeBinned, mathLevel, avg(isDropOut) as dropOutPct, count(campus) as CountOfStudents from dfM where mathLevel<>'' and startaar in (2011,2012,2013) group by mathLevel,mathGradeBinned")
ggplot(dropOutByMathGradeAll,aes(mathGradeBinned,dropOutPct*100,colour=mathLevel))+theme_bw()+scale_y_continuous(limits=c(0,100),breaks=seq(0,100,10)) +geom_point(aes(size=CountOfStudents,alpha=.5))+geom_line()+ylab("% dropped out by 2017")+theme(strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16), panel.spacing = unit(2, "lines"),plot.margin=unit(c(0,1,0,0),"lines") )+guides(alpha=FALSE)
ggsave("dropOutEngGradesANdLevels'11-'13cohorts.png",width=10,height = 7.3)
ggplot(dropOutByMathGradeAll,aes(mathGradeBinned,dropOutPct*100,colour=mathLevel))+theme_bw()+scale_y_continuous(limits=c(0,100),breaks=seq(0,100,10)) +geom_point(aes(size=CountOfStudents,alpha=.5))+geom_line()+ylab("% dropped out by 2017")+theme(strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16), panel.spacing = unit(2, "lines"),plot.margin=unit(c(0,1,0,0),"lines") )+guides(alpha=FALSE)
dropOutByMathGradeAllHighGran<-sqldf("select mathGradeBinHighGran, mathLevel, avg(isDropOut) as dropOutPct, count(campus) as CountOfStudents from dfM where mathLevel<>'' and startaar in (2011,2012,2013) group by mathLevel,mathGradeBinHighGran")
ggplot(dropOutByMathGradeAllHighGran,aes(mathGradeBinHighGran,dropOutPct*100,colour=mathLevel))+theme_bw()+scale_y_continuous(limits=c(0,100),breaks=seq(0,100,10)) +geom_point(aes(size=CountOfStudents,alpha=.5))+geom_line()+ylab("% dropped out by Mar 2017")+theme(strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16), panel.spacing = unit(2, "lines"),plot.margin=unit(c(0,1,0,0),"lines") )+guides(alpha=FALSE)
dropOutByEngGradeAllHighGran<-sqldf("select ENGGradeBinned, ENG_Niveau, avg(isDropOut) as dropOutPct, count(campus) as CountOfStudents from dfM where ENG_Niveau<>'' and startaar in (2011,2012,2013) group by ENG_Niveau, ENGGradeBinned")
ggplot(dropOutByEngGradeAllHighGran,aes(ENGGradeBinned,dropOutPct*100,colour=ENG_Niveau))+theme_bw()+scale_y_continuous(limits=c(0,100),breaks=seq(0,100,10)) +geom_point(aes(size=CountOfStudents,alpha=.5))+geom_line()+ylab("% dropped out by Mar 2017")+theme(strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16), panel.spacing = unit(2, "lines"),plot.margin=unit(c(0,1,0,0),"lines") )+guides(alpha=FALSE)
dropOutByEngGrade<-sqldf("select ENGGradeBinned, avg(isDropOut) as dropOutPct, count(campus) as CountOfStudents from dfM where ENG_Niveau<>'' and startaar in (2011,2012,2013) group by ENGGradeBinned")
ggplot(dropOutByEngGrade,aes(ENGGradeBinned,dropOutPct*100))+theme_bw()+scale_y_continuous(limits=c(0,100),breaks=seq(0,100,10)) +geom_point(aes(size=CountOfStudents,alpha=.5))+geom_line()+ylab("% dropped out by Mar 2017")+theme(strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16), panel.spacing = unit(2, "lines"),plot.margin=unit(c(0,1,0,0),"lines") )+guides(alpha=FALSE)
ggsave("dropOutEngGradesANdLevels.png")
#MATH grades vs. MMA grades
dropOutByMathGradeAllHighGran<-sqldf("select mathGradeBinHighGran, mathLevel, avg(MMA_FinExamGrade) as avgFinalMathExamGrade, count(campus) as CountOfStudents from dfM where mathLevel<>'' and startaar in (2011,2012,2013) group by mathLevel,mathGradeBinHighGran")
ggplot(dropOutByMathGradeAllHighGran,aes(mathGradeBinHighGran,avgFinalMathExamGrade,colour=mathLevel))+theme_bw()+geom_point(aes(size=CountOfStudents,alpha=.5))+geom_line()+ylab("MMA final exam grade")+theme(strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16), panel.spacing = unit(2, "lines"),plot.margin=unit(c(0,1,0,0),"lines") )+guides(alpha=FALSE)
#correlation of math grades with MMA grades
dfMGr<-dfM[!duplicated(dfM$studienr) & dfM$startaar %in% c(2011,2012,2013),]
cor.test(dfMGr[dfMGr$MAT_Niveau=="A",]$MATGrade,dfMGr[dfMGr$MAT_Niveau=="A",]$MMA_Lg,use="complete")
cor.test(dfMGr[dfMGr$MAT_Niveau=="B",]$MATGrade,dfMGr[dfMGr$MAT_Niveau=="B",]$MMA_Lg,use="complete")
sum(dropOutByMathGradeAllHighGran$CountOfStudents)
sum(dropOutByMathGradeAll$CountOfStudents)
dropOutByMathGradeByCampusBy<-sqldf("select mathGradeBinned, campus, mathLevel, avg(isDropOut) as dropOutPct, count(campus) as CountOfStudents from dfM where mathLevel<>'' group by campus, mathLevel,mathGradeBinned")
ggplot(dropOutByMathGradeByCampusBy,aes(mathGradeBinned,dropOutPct*100,colour=mathLevel))+theme_bw()+scale_y_continuous(limits=c(0,100),breaks=seq(0,100,10)) +geom_point(aes(size=CountOfStudents,alpha=.5))+geom_line()+ylab("% of cohort dropped out by 2017")+theme(strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16), panel.spacing = unit(2, "lines"),plot.margin=unit(c(0,1,0,0),"lines") )+guides(alpha=FALSE)+facet_grid(. ~ campus)
dropOutByMathGrade<-sqldf("select mathGradeBinned, mathLevel, avg(isDropOut) as dropOutPct, count(campus) as CountOfStudents from dfMGr where mathLevel<>'' and yearOfEnrolment=2012 group by mathLevel,mathGradeBinned")
ggplot(dropOutByMathGrade,aes(mathGradeBinned,dropOutPct*100,colour=mathLevel))+theme_bw()+scale_y_continuous(limits=c(0,100),breaks=seq(0,100,10)) +geom_point(aes(size=CountOfStudents,alpha=.5))+geom_line()+ylab("% of cohort dropped out by 2015")+theme(strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16), panel.spacing = unit(2, "lines"),plot.margin=unit(c(0,1,0,0),"lines") )+guides(alpha=FALSE)
# data again --------------------------------------------------------------
### Comment: Do we want the figure in dropbox or in git?
#myWD2<-if(grepl("BiancaClavio", getwd())){'C:/Users/BiancaClavio/Documents/stats-on-grades/output'} else {"~/git/AAU/DropOutProject/analysis/output"}
#setwd(myWD2)
myWD3 <- ifelse(grepl("BiancaClavio", getwd()), 'C:/Users/BiancaClavio/Dropbox/Apps/ShareLatex/MedialogyBSc re-design/figures', '~/Dropbox/Apps/ShareLatex/MedialogyBSc re-design/figures')
setwd(myWD3)
#dfCG$campus<-factor(dfCG$campus,levels=c("Aalborg","Kbh.","Esbjerg"))
# dfCG$isDropOut<-ifelse(dfCG$status2015=="Afbrudt",1,0)
dfCG<-plyr::rename(dfCG, c(isLastEligibleExamAttempt="isLastAttemptAtExam",ExamGradeNum="FirstExamGradeNum"))
dfCG$isNoShow<-ifelse(dfCG$ExamGradeText=="U",1,0)
dfCG$isNumericGrade<-!is.na(as.numeric(levels(dfCG$ExamGradeText))[dfCG$ExamGradeText])
dfCG<-dfCG[!is.na(dfCG$examAttempt),]
dfCG$examGradeNumeric<-ifelse(dfCG$isNumericGrade,as.numeric(levels(dfCG$ExamGradeText))[dfCG$ExamGradeText],NA)
dfCG$passed<-ifelse(dfCG$examGradeNumeric<=0,0,1)
dfCG$Kvotient<-NULL
#dfCG$isDropOut<-ifelse(dfCG)
gradesPassedLUVec<-c('02'=1,'4'=1,'7'=1,'10'=1,'12'=1,'00'=0,'-3'=0,'B'=1,'EB'=-1,'U'=-1,'I'=-1)
monthsLookupVector <-c('< 0 md'=0,'6 md'=6,'12 md'=12,'18 md'=18,'24 md'=24, '30 md'=30, '36 md'=36, '42 md'=42)
dfCG$monthsIntoStudy<-monthsLookupVector[as.character(dfCG$takenInWhichSemesterInMonths)]
latestCommittment<-sqldf("select cprnr, max(monthsIntoStudy) as LatestExam from dfCG where passed in (0,1) group by cprnr ")
latestCommittment$LatestExamInSem<-ifelse(is.na(latestCommittment$LatestExam),0,latestCommittment$LatestExam)/6
finalFails<-sqldf("select cprnr, max(monthsIntoStudy) as FinalFailmonthsIntoStudy,1 as failedLast from dfCG where passed=0 and isLastAttemptAtExam='Ja' group by cprnr, failedLast")
latestCommittment<-merge(latestCommittment,finalFails,by="cprnr",all.x=TRUE)
#OLD dfM<-dfUD1[dfUD1$studyDirectionAtUniEnrolment=="Medialogi",]
minYear=2011
#min(dfM$startaar,na.rm = TRUE)
maxYear=max(dfM$startaar,na.rm = TRUE)
dropOutByCampusByYear<-dfM %>% group_by(campus,startaar)%>%summarise(mean=mean(isDropOut))
dropOutByCampusByYearSQLDF<-sqldf("select campus, startaar, avg(isDropOut) as mean from dfM group by campus, startaar")
dropOutByCampusByYear<-sqldf("select startaar, campus, mathLevel, avg(isDropOut) as dropOutPct, count(campus) as CountOfStudents from dfM where mathLevel<>'' group by startaar, campus, mathLevel")
ggplot(dropOutByCampusByYear,aes(startaar,dropOutPct*100,colour=mathLevel))+theme_bw()+scale_y_continuous(limits=c(20,80),breaks=seq(0,100,10))+scale_x_continuous(limits=c(minYear,maxYear),breaks = minYear:maxYear) +geom_point(aes(size=CountOfStudents,alpha=.5))+geom_line()+ylab("% of cohort dropped out by 2015")+theme(strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16), panel.spacing = unit(2, "lines"),plot.margin=unit(c(0,1,0,0),"lines") )+guides(alpha=FALSE)+facet_grid(. ~ campus)
ggsave("DropOutByCampusByYear.png",width=9.71,height=8)
dfUD1<-merge(dfUD1,latestCommittment,by="cprnr")
semesterScaffold<-data.frame(semesterNum= 0:8)
dfMBlowUp<-sqldf("select * from dfM, semesterScaffold")
dfMBlowUp$isOddSemester<-is.odd(dfMBlowUp$semesterNum)
dfMBlowUp$YearsToAdd<-ceiling(dfMBlowUp$semesterNum/2)
dfMBlowUp$SemCutOffDate<-as.Date(ifelse(dfMBlowUp$isOddSemester, paste(as.character(dfMBlowUp$startaar+dfMBlowUp$YearsToAdd),"/2/1", sep = ""),
paste(as.character(dfMBlowUp$startaar+dfMBlowUp$YearsToAdd),"/9/1", sep = "")), format="%Y/%m/%d")
dfMBlowUp$IsDropOutInSem <-ifelse(is.na(dfMBlowUp$slutdatosn),0,ifelse(dfMBlowUp$slutdatosn < dfMBlowUp$SemCutOffDate,dfMBlowUp$isDropOut,0))
dfMBlowUp<-dfMBlowUp[dfMBlowUp$SemCutOffDate<=as.Date("2017/3/1",format="%Y/%m/%d"),]
#dfMBlowUp<-dfMBlowUp[dfMBlowUp$semesterNum<= (2017-dfMBlowUp$startaar)*2,]
# more plotting ----------------------------------------------------------
#grade correlations
plot(jitter(dfPFI[dfPFI$campus=="Aalborg"]$GPRO_1g,1),jitter(dfPFI[dfPFI$campus=="Aalborg"]$PFI_Lg,1))
plot(jitter(dfPFI[dfPFI$campus=="Aalborg",]$GPRO_1g,1),jitter(dfPFI[dfPFI$campus=="Aalborg",]$PFI_Lg,1))
plot(jitter(dfPFI[dfPFI$campus=="Kbh.",]$GPRO_1g,1),jitter(dfPFI[dfPFI$campus=="Kbh.",]$PFI_Lg,1))
plot(jitter(dfPFI[dfPFI$campus=="Aalborg",]$GPRO_1g,1),jitter(dfPFI[dfPFI$campus=="Aalborg",]$PFI_Lg,1))
plot(jitter(dfPFI[dfPFI$campus=="Kbh.",]$GPRO_1g,1),jitter(dfPFI[dfPFI$campus=="Kbh.",]$PFI_Lg,1))
plot(jitter(dfM[dfM$campus=="Kbh.",]$GPRO_1g,1),jitter(dfM[dfM$campus=="Kbh.",]$MMA_Lg,1))
plot(jitter(dfM[dfM$campus=="Aalborg",]$GPRO_1g,1),jitter(dfM[dfM$campus=="Aalborg",]$MMA_Lg,1))
plot(jitter(dfM[dfM$campus=="Aalborg",]$GPRO_1g,1),jitter(dfM[dfM$campus=="Aalborg",]$GPRO_Lg,1))
plot(jitter(dfM[dfM$campus=="Kbh.",]$GPRO_1g,1),jitter(dfM[dfM$campus=="Kbh.",]$GPRO_Lg,1))
plot(jitter(dfPFI$GPRO_Lg,1),jitter(dfPFI$PFI_Lg,1))
z1<-lm(PFI_Lg~GPRO_Lg,data = dfPFI)
abline(z1)
z1
cor(dfPFI$PFI_Lg,dfPFI$GPRO_Lg, use = "complete.obs")
z<-lm(PFI_Lg~GPRO_Lg,data = dfPFI)
abline(z)
dfMBlowUp$cohort<-as.factor(dfMBlowUp$startaar)
dfDbSem<-sqldf("select cohort,campus ,semesternum, mathlevel, avg(IsDropOutInSem) as dropOutPct, count(studienr) as numOfStudents from dfMBlowUp where isDropOutInSem in (0,1) and cohort in (2011,2012,2013,2014,2015,2016) group by cohort, campus, semesterNum, mathlevel")
ggplot(dfDbSem[!dfDbSem$mathLevel=="C" & !dfDbSem$campus=="Esbjerg",],aes(semesterNum,dropOutPct*100,colour=cohort))+theme_bw()+scale_y_continuous(limits=c(0,100),breaks=seq(0,100,10))+scale_x_continuous(limits=c(0,8)) +geom_point()+geom_line()+ylab("% of cohort dropped out")+theme(strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16), panel.spacing = unit(2, "lines"),plot.margin=unit(c(0,1,0,0),"lines") )+guides(alpha=FALSE)+facet_grid(. ~ campus*mathLevel)
ggsave("DropOutByCampusBySemesterByCohortByMathAB.png",width=9.71,height=5)
dfDbSemCamp<-sqldf("select cohort,campus ,semesternum, avg(IsDropOutInSem) as dropOutPct, count(studienr) as numOfStudents from dfMBlowUp where isDropOutInSem in (0,1) and cohort in (2011,2012,2013,2014,2015,2016) group by cohort, campus, semesterNum")
ggplot(dfDbSemCamp[ !dfDbSemCamp$campus=="Esbjerg",],aes(semesterNum,dropOutPct*100,colour=cohort))+theme_bw()+scale_y_continuous(limits=c(0,70),breaks=seq(0,100,10))+scale_x_continuous(limits=c(0,8)) +geom_point()+geom_line()+ylab("% of cohort dropped out")+theme(strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16), panel.spacing = unit(2, "lines"),plot.margin=unit(c(0,1,0,0),"lines") )+guides(alpha=FALSE)+facet_grid(. ~ campus)
ggsave("DropOutByCampusBySemesterByCohort.png",width=9.71,height=5)
#now only math levels
dfDbSem<-sqldf("select semesternum, mathlevel, mathGradeBinned, avg(IsDropOutInSem) as dropOutPct, count(studienr) as numOfStudents from dfMBlowUp where isDropOutInSem in (0,1) and startaar in (2011,2012,2013) group by mathGradeBinned, semesterNum, mathlevel")
ggplot(dfDbSem[!dfDbSem$mathLevel=="C" ,],aes(semesterNum,dropOutPct*100,colour=mathGradeBinned))+theme_bw()+scale_y_continuous(limits=c(0,60),breaks=seq(0,60,10))+scale_x_continuous(limits=c(0,7)) +geom_point()+geom_line()+ylab("% of cohort dropped out")+theme(strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16), panel.spacing = unit(2, "lines"),plot.margin=unit(c(0,1,0,0),"lines") )+guides(alpha=FALSE) +facet_grid(. ~ mathLevel)
ggsave("DropOutBySemesterByMathLevel.png",width=9.71,height=5)
inDistributionByMathLevels<-sqldf("select mathLevel, count(studienr) from dfM where mathlevel<>'' group by mathLevel")
MathEnrolmentByCampusByYear<-dfM[dfM$mathLevel!='',] %>% group_by(campus,startaar,mathLevel) %>% summarise (n = n()) %>% mutate(freq = n / sum(n))
ggplot(MathEnrolmentByCampusByYear,aes(startaar,freq*100,colour=mathLevel))+theme_bw()+theme(panel.spacing = unit(2, "lines"))+scale_y_continuous(limits=c(0,100),breaks=seq(0,100,10))+scale_x_continuous(limits=c(minYear,maxYear),breaks = minYear:maxYear) +geom_point()+geom_line()+ylab("percentage of enrolling students")+facet_grid(. ~ campus)
ggsave("MathEnrolmentByCampusByYear.png",width=6.71,height=2.5)
intStudEnrolmentByCampusByYear<-dfM[dfM$startaar>=2011 & !is.na(dfM$isIntl),] %>% group_by(campus,startaar,isIntl) %>% summarise (n = n()) %>% mutate(freq = n / sum(n))
intStudEnrolmentByYear<-dfM[dfM$startaar>=2011 & !is.na(dfM$isIntl),] %>% group_by(startaar,isIntl) %>% summarise (n = n()) %>% mutate(freq = n / sum(n))
ggplot(intStudEnrolmentByCampusByYear,aes(startaar,freq*100,colour=factor(isIntl)))+theme_bw()+theme(panel.spacing = unit(2, "lines"))+scale_y_continuous(limits=c(0,100),breaks=seq(0,100,10))+scale_x_continuous(limits=c(minYear,maxYear),breaks = minYear:maxYear) +geom_point()+geom_line()+ylab("percentage of enrolling students")+facet_grid(. ~ campus)+
theme(panel.spacing = unit(2, "lines"),strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"),plot.margin = unit( c(0,0,0,0) , units = "lines" ), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16) )
ggplot(intStudEnrolmentByYear,aes(startaar,freq*100,colour=factor(isIntl)))+theme_bw()+theme(panel.spacing = unit(2, "lines"))+scale_y_continuous(limits=c(0,100),breaks=seq(0,100,10))+scale_x_continuous(limits=c(minYear,maxYear),breaks = minYear:maxYear) +geom_point()+geom_line()+ylab("percentage of enrolling students")+
theme(panel.spacing = unit(2, "lines"),strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"),plot.margin = unit( c(0,0,0,0) , units = "lines" ), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16) )
ggsave("intStudEnrolmentByYear.png",width=8.71,height=3.5)
AvgMathGradeByCampusByYear<-sqldf("select campus, startaar, MAT_Niveau, avg(MATGRade) as avgMATGRade from dfM where MAT_Niveau<>'' and MAT_Niveau<>'C' group by campus, startaar, MAT_Niveau ")
ggplot(AvgMathGradeByCampusByYear[AvgMathGradeByCampusByYear$MAT_Niveau!='',],aes(startaar, avgMATGRade ,colour=factor(MAT_Niveau)))+theme_bw()+scale_x_continuous(limits=c(minYear,maxYear),breaks = minYear:maxYear) +geom_point()+geom_line()+ylab("math grade avg")+theme(strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"),plot.margin = unit( c(0,0,0,0) , units = "lines" ), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16) , panel.spacing = unit(2, "lines") )+facet_grid(. ~ campus)
ggsave("MathGradeByCampusByYear.png",width=6.71,height=3.5)
intStudEnrolmentByCampusByYear<-dfM %>% group_by(campus,startaar,isInternationalStudent) %>% summarise (n = n()) %>% mutate(freq = n / sum(n))
genderStudEnrolmentByCampusByYear<-dfM %>% group_by(campus,startaar,gender) %>% summarise (n = n()) %>% mutate(freq = n / sum(n))
ggplot(genderStudEnrolmentByCampusByYear,aes(startaar,freq*100,colour=gender))+theme_bw()+scale_y_continuous(limits=c(0,100),breaks=seq(0,100,10))+scale_x_continuous(limits=c(minYear,maxYear),breaks = minYear:maxYear) +geom_point()+geom_line()+ylab("percentage of enrolling students")+facet_grid(. ~ campus)
ggsave("genderStudEnrolmentByCampusByYear.png")
dfCG<-merge(dfUD1,dfCG,by="cprnr")
#sqldf("select GraduationSchoolarea,count(zip) from dfM group by GraduationSchoolarea ")
dfCGM<-dfCG[dfCG$studyDirectionAtUniEnrolment=="Medialogi",]
dfGPRO<-dfCGM[dfCGM$activityName=="Grundlæggende programmering",]
dfMMA<-dfCGM[dfCGM$activityName=="Matematik til multimedie-applikationer",]
#dfPFI<-dfCGM[dfCGM$activityName=="Grundlæggende programmering",]
dfCGlast<-dfCG[dfCG$isLastAttemptAtExam=="Ja",-(15:22)]
#use also for no-show
dfCGfirst<-dfCG[dfCG$examAttempt==1,-(15:22)]
#columns rød and år are not reading... need to change manually in CSV file then re-import after export from gdocs spreadsheet
df<-dfUD1
#merge(dfUD1,dfUD2,by="cprnr") #only import ECTS column
#for prior grade analysis remove all NAs from prior grades no show/
#dfCG$mathGradeBinned<-cut(dfCG$mathGrade,breaks=c(-6,-1,1.5,3,5.5,8.5,11,18))
#dfGPRO$mathGradeBinned<-cut(dfGPRO$mathGrade,breaks=c(-6,-1,1.5,3,5.5,8.5,11,18))
#replace(dfGPRO$mathGradeBinnedText, dfGPRO$mathGradeBinnedText== c("(-6,1.5]", "(1.5,3]", "(3,5.5]","(5.5,8.5]","(8.5,11]", "(11,18]"), c(0,2,4,7,10,12))
lookUpGradesVector=c('(-6,-1]'="-2", '(-1,1.5]'="0", '(1.5,3]'="2", '(3,5.5]'="4",'(5.5,8.5]'="7",'(8.5,11]'="10", '(11,18]'="12")
dfGPRO$mathGradeBinnedNum<-as.numeric(lookUpGradesVector[dfGPRO$mathGradeBinned])
dfGPRO<-dfGPRO[!is.na(dfGPRO$mathGrade),]
#dflastExamAttempts<- sqldf("select cprnr, activityName, examattempt,examGradeNumeric from dfCGM where isLastAttemptAtExam='Ja'")
#dfFirstExamAttempts<-sqldf("select cprnr, activityName, examattempt,examGradeNumeric from dfCGM where examattempt=1 and ")
dfNoShowRisk<-sqldf("select avg(isNoShow), count(mathgrade) as numOfStudents, mathGrade,mathlevel from dfGPRO group by mathgrade, mathlevel");dfNoShowRisk
ggplot(dfGPRO[dfGPRO$isNoShow==1,], aes(x=mathGrade,colour=mathLevel)) + geom_density()
mathGPROpass<-sqldf("select mathLevel,mathGradeBinnedNum,avg(passed) as probabilityPassingGPROmed1,count(mathgrade) as numOfStudents from dfGPRO where isLastAttemptAtExam='Ja' group by mathGradeBinnedNum, mathLevel order by mathlevel, mathGradeBinnedNum")
#mathPFIpass<-sqldf("select mathLevel,mathGradeBinnedNum,avg(passed) as probabilityPassingGPROmed1,count(mathgrade) as numOfStudents from dfAAUGrades where isLastAttemptAtExam='Ja' group by mathGradeBinnedNum, mathLevel order by mathlevel, mathGradeBinnedNum")
mathGPROnoShow<-sqldf("select mathLevel,mathGradeBinnedNum,avg(isNoShow) as probabilityNoShowGPROmed1,count(mathgrade) as numOfStudents from dfGPRO group by mathGradeBinnedNum, mathLevel order by mathlevel, mathGradeBinnedNum")
ggplot(mathGPROpass,aes(mathGradeBinnedNum,probabilityPassingGPROmed1,colour=mathLevel))+theme_bw()+geom_point()+geom_line()+ylim(0,1)+theme(strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"),plot.margin = unit( c(0,0,0,0) , units = "lines" ), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16),panel.spacing = unit(2, "lines"))
ggsave("mathGPROpass.png",width=3.5,height=10.5)
ggplot(mathGPROnoShow,aes(mathGradeBinnedNum,probabilityNoShowGPROmed1,colour=mathLevel))+theme_bw()+geom_point(aes(size=numOfStudents))+geom_line()+ylim(0,0.5)+theme(strip.text.x = element_text(size = 18),legend.position = "bottom",panel.background=element_rect(fill = "white",color = "white"),plot.margin = unit( c(0,0,0,0) , units = "lines" ), axis.text.x = element_text(size=16),axis.text.y = element_text(size=16), panel.spacing = unit(2, "lines"))
ggsave("mathGPROnoShow.png",width=3.5,height=6.5)
#hist_cut + geom_bar(position="dodge")
sqldf("select activityName, examAttempt, avg(isNoShow) from dfCGM where activityCode='NJA110006L' group by activityName, examAttempt")
sqldf("select distinct activityName from dfCGM group by activityName")
ggplot(dfGPRO,aes(mathGrade,FirstExamGradeNum))+geom_point(aes(colour=mathLevel,alpha=0.05))
df$Mat7<-ifelse(df$mathGrade<7,0,1)
dfM$Mat7<-ifelse(dfM$mathGrade<7,0,1)
dropOutModel<- lm(isDropOut ~ #
#mathGrade +
MAT_Niveau
* MATGrade
#+ ENG_Niveau
#+ ENGGrade
#+ DAN_Niveau
#+ DANGrade
#+GPRO_PassedBy1stAttempt
#+GPRO_PassedBy2ndAttempt
#+MMA_PassedBy1stAttempt
#+MMA_PassedBy2ndAttempt
# +PFI_PassedBy1stAttempt
# +PFI_PassedBy2ndAttempt
#mathLevel
#+ EnglishGrade
#+ EnglishLevel
#+ DanishGrade
#+ yearsFromEnrolment
#+ADGGRU
#+campus
#+ gender
# isInternationalStudent
#,dfM[dfM$yearOfEnrolment== & dfM$campus=="Aalborg" ,])
,dfM[dfM$startaar %in% c(2011,2012,2013) ,])
summary(dropOutModel)
dropOutModel<- lm(isDropOut ~ #
Mat7
#+
#+ EnglishGrade
#+ EnglishLevel
#+ DanishGrade
#+ yearsFromEnrolment
#+ADGGRU
+ campus
#+ gender
# isInternationalStudent
,dfM[dfM$startaar<2015 && dfM$mathLevel=="B",])
summary(dropOutModel)
GPROMathMod <- lm(FirstExamGradeNum~ mathGrade
+mathLevel,
dfGPRO[dfGPRO$startaar==2014,])
summary(GPROMathMod)
#MMAMathMod
#P4IMathMod
GPROMathModb <- lm(FirstExamGradeNum~ mathGrade,
dfGPROmB)
summary(GPROMathModb)
#create cdf by factor mathlevel
write.csv(dflastExamAttempts,"ForKasper.csv")
library(reshape)
castData<-cast(dflastExamAttempts, cprnr+examAttempt~activityName, value = "examGradeNumeric", sum)
|
5a128dcd7c6fe5babb601be189c0edabaf656a3d
|
e34b03c2bca6573c00dccf3906302371c5fae0bd
|
/man/dewpoint.Rd
|
0bcdfaf8409dd7dd08a15aa782de795a3a85d5bc
|
[] |
no_license
|
SnowHydrology/humidity
|
f3c274ce0e2aa2b0dfec851b2d19c6a38a7708a9
|
e1b144519340fc6499a288df581df55a59986cca
|
refs/heads/master
| 2020-06-26T19:12:05.447860
| 2019-07-30T21:40:19
| 2019-07-30T21:40:19
| 199,727,126
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 746
|
rd
|
dewpoint.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/humidity.R
\name{dewpoint}
\alias{dewpoint}
\title{Dew point temperature conversion}
\usage{
dewpoint(TAIR, RH)
}
\arguments{
\item{TAIR}{The air temperature in degrees Celsius}
\item{RH}{The relative humidity in percent}
}
\value{
The dew point temperature in degrees Celsius
}
\description{
Convert air temperature in degrees Celsius and relative humidity in percent
to dew point temperature in degrees Celsius
}
\details{
From \url{http://andrew.rsmas.miami.edu/bmcnoldy/Humidity.html}
Based on Alduchov and Eskridge(1996): "Improved Magnus form approximation of
saturation vapor pressure." Journal of Applied Meteorology
}
\examples{
tdew <- dewpoint(5, 80);
}
|
65b102c4b3f4ae76aa83bc796edb9aa3d7629704
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/geophys/examples/DoMohrFig1.Rd.R
|
81f67a8994ffe8ef2eb3f2bf9c30e95174fe30fe
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 222
|
r
|
DoMohrFig1.Rd.R
|
library(geophys)
### Name: DoMohrFig1
### Title: Annotated Stress Tensor
### Aliases: DoMohrFig1
### Keywords: misc
### ** Examples
Stensor =matrix(
c(50, 40,
40, 10), ncol=2)
DoMohrFig1(Stensor)
|
e5f4e3c15f5f9bcba0e6ef7f301d402643f67302
|
20646e416b48befc8d8152d1b262b6886d19fed1
|
/R/8.1.f.metrics.R
|
0db8bc305af33da5f1af0de8a5dc5042da1a524f
|
[] |
no_license
|
lizhizhong1992/ENMwizard
|
d0bce677d0957564d22170572f65c8ad0889440e
|
a4d7e16e85f038ca2d86166b772314e81439083f
|
refs/heads/master
| 2023-01-31T07:13:14.074555
| 2020-12-11T17:41:05
| 2020-12-11T17:41:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,440
|
r
|
8.1.f.metrics.R
|
# # ##### 5. METRICS
# TO DO - get_tsa;
# In area.occ.spp[[sp]][] <- array(aperm(ar.mods.t.p, c(3, 2, 1))) :
# number of items to replace is not a multiple of replacement length
# TO DO - get_fpa
# Error in `[<-.data.frame`(`*tmp*`, , ncol(areas), value = c(0.526, 0.461, :
# replacement has 6 rows, data has 8
# # #### 4.6 compute area occupied
# #' Compute total suitable area
# #'
# #' General function description. A short paragraph (or more) describing what the function does.
# #' @inheritParams f.plot.mxnt.preds
# #' @param pred.nm name of prediction to be appended to the final name. Usually "pres", "past" or "fut".
# #' @param thrshld.i List of threshold criteria to be applied
# #' @return Stack or brick of thresholded predictions
# #' @examples
# #' areas.occ.lst <- f.area.occ(mtp.l)
# #' @export
# #### 5.1 compute area occupied at multiple scenarios
#' Compute species' total suitable area
#'
#' Compute total suitable area at multiple climatic scenario, threshold and model criteria.
#'
#' @inheritParams plot_mdl_diff_b
#' @inheritParams get_tsa
#' @seealso \code{\link[raster]{area}}, \code{\link{get_tsa}}, \code{\link{get_cont_permimport}},
#' \code{\link{get_fpa}}, \code{\link{get_cont_permimport_b}}, \code{\link{get_fpa_b}}
#' @return List of arrays containing species' total suitable areas for each climatic scenario, threshold and model criteria
#' @examples
#' \dontrun{
#' areas.occ.lst <- get_tsa_b(mtp.l=mods.thrshld.lst)
#' }
#' @export
get_tsa_b <- function(mtp.l, restrict=NULL, digits=0){
# thrshld.nms <- c("fcv1", "fcv5", "fcv10", "mtp", "x10ptp", "etss", "mtss", "bto", "eetd")
# area.occ.spp <- vector("list", length = length(mtp.l))
# names(area.occ.spp) <- names(mtp.l)
sp.nm.l <- names(mtp.l)
# areas.occ.df <- vector("list")
area.occ.spp <- lapply(seq_along(mtp.l), function(i, mtp.l, restrict, digits, sp.nm.l){
get_tsa(mtp.l[[i]], restrict, digits, sp.nm.l[i])
}, mtp.l, restrict, digits, sp.nm.l) # species, areas
# area.occ.spp <- lapply(names(mtp.l), , mtp.l, restrict, digits) # species, areas
names(area.occ.spp) <- names(mtp.l)
# area.occ.spp <- lapply(area.occ.spp, function(x) data.table::melt(x))
# area.occ.spp <- lapply(area.occ.spp, function(x) data.table::melt(x)) # , cols=c("Clim.scen", "threshold", "Model"), value.name="TotSuitArea")
# area.occ.spp <- lapply(area.occ.spp, function(x) {
# colnames(x) <- c("Clim.scen", "threshold", "Model", "TotSuitArea")
# return(x)})
area.occ.spp.c <- data.table::rbindlist(area.occ.spp, idcol = "sp")
# colnames(area.occ.spp.c)[1:5] <- c("sp", "Clim.scen", "threshold", "Model", "TotSuitArea")
utils::write.csv(area.occ.spp.c, paste0("3_out.MaxEnt/metric.totalArea.csv")) # reorder ds
return(area.occ.spp)
}
#' Compute species' total suitable area
#'
#' Compute total suitable area at multiple climatic scenario, threshold and model criteria.
#'
#' @inheritParams plot_mdl_diff
#' @param digits integer indicating the number of decimal places. see ?round for details.
#' @param restrict a raster to select a region to compute area.
#' @seealso \code{\link[raster]{area}}, \code{\link{get_tsa_b}}, \code{\link{get_cont_permimport}}, \code{\link{get_fpa}},
#' \code{\link{get_cont_permimport_b}}, \code{\link{get_fpa_b}}
#' @return List of arrays containing species' total suitable areas for each climatic scenario, threshold and model criteria
#' @examples
#' \dontrun{
#' areas.occ.lst <- get_tsa_b(mtp.l=mods.thrshld.lst)
#' }
#' @export
get_tsa <- function(mtp, restrict, digits, sp.nm){ # species, areas
thrshld.nms <- paste(paste0(".", tnm), collapse = "|")
# thrshld.nms <- paste(paste0(".", c("fcv1", "fcv5", "fcv10", "mtp", "x10ptp", "etss", "mtss", "bto", "eetd")), collapse = "|")
c.nms <- gsub(paste0("Mod\\.|", gsub("\\.", "\\\\.", thrshld.nms)), "", names(mtp[[1]][[2]][[1]]))
c.nms2 <- vector("character", length(c.nms))
s.nms <- c("LowAIC", "ORmtp", "OR10", "AUCmtp", "AUC10", "^AvgAIC", "^EBPM", "^WAAUC", "^ESOR")
invisible(sapply(seq_along(s.nms), function(i, x, y, z){
si <- grepl(s.nms[i], c.nms)
if(sum(si)>0){
c.nms2[si] <<- gsub("\\^|^\\.", "", paste(c.nms2[si], s.nms[i], sep = "."))
}
}, c.nms, s.nms, c.nms2))
c.nms <- c.nms2
areas <- array(dim=c(length(mtp), # rows for pred.scenario
length(mtp[[1]][[2]]), # cols for threshold criteria
raster::nlayers(mtp[[1]][[2]][[1]])), # sheet (3rd dim) for model criteria
dimnames = list(names(mtp), # pred.scenario
names(mtp[[1]][[2]]), # threshold criteria
c.nms )) # model criteria
thrshld.crit <- names(mtp[[1]][[1]])
# print(sp.nm)
# areas <- areas
# mtp <- mtp
ar.mods.t.p <- lapply(seq_along(mtp), function(sc, mtp, sp.nm, restrict, digits){ # , areas # pred.scenario
mtp.sc <- mtp[[sc]][[2]]
ar.mods.t <- sapply(seq_along(mtp.sc), function(t, mtp.sc, sp.nm, sc, restrict, digits){ # , areas # threshold criteria
mtp.sc.t <- mtp.sc[[t]]
ar.mods <- sapply(1:raster::nlayers(mtp.sc.t), function(m, mtp.sc.t, sp.nm, sc, t, restrict, digits){ # , areas # model criteria
ar <- mtp.sc.t[[m]]
if(grDevices::is.raster(restrict)){
if(raster::res(ar)!=raster::res(restrict)){
ar <- raster::resample(ar, restrict)
ar <- ar*restrict
}
}
ar <- sum(raster::area(ar, na.rm=TRUE)[raster::getValues(ar)==1], na.rm=TRUE)
ar <- round(ar, digits = digits)
# areas[sc,t,m] <<- ar
return(ar) }, mtp.sc.t, sp.nm, sc, t, restrict, digits) # , areas # model criteria
return(ar.mods) }, mtp.sc, sp.nm, sc, restrict, digits) # , areas# threshold criteria
return(ar.mods.t) }, mtp, sp.nm, restrict, digits) # , areas # pred.scenario
ar.mods.t.p <- simplify2array(ar.mods.t.p) # transform list into array
if(length(dim(ar.mods.t.p))==3){
ar.mods.t.p <- array(aperm(ar.mods.t.p, c(3,2,1))) #,
} else if(length(dim(ar.mods.t.p))==2){
dim(ar.mods.t.p) <- c(dim(ar.mods.t.p), 1)
ar.mods.t.p <- array(aperm(ar.mods.t.p, c(3,2,1))) #,
} else if(length(dim(ar.mods.t.p))==1){
dim(ar.mods.t.p) <- c(dim(ar.mods.t.p), 1, 1)
ar.mods.t.p <- array(aperm(ar.mods.t.p, c(3,2,1))) #,
} # else if(is.null(dim(ar.mods.t.p))){
# ar.mods.t.p <- ar.mods.t.p
# }
# https://stackoverflow.com/questions/40921426/converting-array-to-matrix-in-r
areas <- data.frame(expand.grid(Clim.scen=names(mtp), # pred.scenario
threshold=names(mtp[[1]][[2]]), # threshold criteria
Model=c.nms), # model criteria
TotSuitArea=ar.mods.t.p)
# areas <- as.data.frame(areas) #
# colnames(areas) <- paste(thrshld.crit, rep(c.nms, each=length(thrshld.crit)), sep = ".")
# areas <- data.table::melt(areas)
# colnames(areas) <- c("Clim.scen", "threshold", "Model", "TotSuitArea")
utils::write.csv(areas, paste0("3_out.MaxEnt/Mdls.", sp.nm, "/metric.totalArea", sp.nm, ".csv"))
return(areas)
}
# #### 4.7 extract model results
# ### 4.7.1 variable contribution and importance
#' Compute variable contribution and permutation importance
#'
#' Compute variable contribution and importance for each model
#'
# #' @param mcmp.l Stack or brick of predictions to apply the threshold
#' @inheritParams thrshld_b
#' @inheritParams get_tsa_b
#' @seealso \code{\link{get_cont_permimport}}, \code{\link{get_tsa}}, \code{\link{get_fpa}},
#' \code{\link{get_tsa_b}}, \code{\link{get_fpa_b}}, \code{\link[dismo]{maxent}}
#' @return List of arrays containing variable contribution and importance for each species
#' @examples
#' \dontrun{
#' get_cont_permimport_b(mcmp.l = mxnt.mdls.preds.lst)
#' }
#' @export
get_cont_permimport_b <- function(mcmp.l){
path.res <- "3_out.MaxEnt"
if(dir.exists(path.res)==FALSE) dir.create(path.res)
# var.contPermImp <- stats::setNames(vector("list", length(mcmp.l)), names(mcmp.l))
sp.nm.l <- names(mcmp.l)
var.contPermImp <- lapply(seq_along(mcmp.l), function(i, mcmp.l, sp.nm.l){
get_cont_permimport(mcmp.l[[i]], sp.nm.l[i])
}, mcmp.l, sp.nm.l) # species, areas
names(var.contPermImp) <- sp.nm.l
var.cont.sp <- data.table::rbindlist(lapply(var.contPermImp, function(x) x[[1]]), idcol = "sp", fill=T)
utils::write.csv(var.cont.sp, paste0("3_out.MaxEnt/metric.var.Contribution.csv")) # reorder ds
var.permImp.sp <- data.table::rbindlist(lapply(var.contPermImp, function(x) x[[2]]), idcol = "sp", fill=T)
utils::write.csv(var.permImp.sp, paste0("3_out.MaxEnt/metric.var.PermImportance.csv")) # reorder ds
# var.contPermImp.c <- data.table::rbindlist(var.contPermImp[[1]], idcol = "sp")
# colnames(area.occ.spp.c)[1:5] <- c("sp", "Clim.scen", "threshold", "Model", "TotSuitArea")
return(var.contPermImp)
}
#' Compute variable contribution and permutation importance
#'
#' Compute variable contribution and importance for each model
#'
# #' @param mcmp.l Stack or brick of predictions to apply the threshold
#' @inheritParams thrshld
#' @inheritParams get_tsa
#' @seealso \code{\link{get_cont_permimport_b}}, \code{\link{get_tsa}}, \code{\link{get_fpa}},
#' \code{\link{get_tsa_b}}, \code{\link{get_fpa_b}}, \code{\link[dismo]{maxent}}
#' @return List of arrays containing variable contribution and importance for each species
#' @examples
#' \dontrun{
#' get_cont_permimport(mcmp = mxnt.mdls.preds)
#' }
#' @export
get_cont_permimport <- function(mcmp, sp.nm) {
mxnt.mdls <- mcmp$mxnt.mdls
sel.mod.nms <- paste0("Mod.", mcmp$selected.mdls$sel.cri)
mod.nms <- paste0("Mod_", format(mcmp$selected.mdls[, "rm"], digits=2), "_", mcmp$selected.mdls[, "features"]) #
# mod.nms <- paste0("Mod.", mcmp$selected.mdls$settings)
pred.nms <- names(mcmp$mxnt.preds[[1]])
var.nms <- gsub( ".contribution", "", rownames(mxnt.mdls[[1]]@results)[grepl("contribution", rownames(mxnt.mdls[[1]]@results))])
# w.mdls <- mcmp$selected.mdls$w.AIC
if(sum(grepl("AvgAIC", pred.nms))>0) {
wv.aic <- mcmp[["selected.mdls"]][grep("AIC_", mcmp[["selected.mdls"]]$sel.cri),"w.AIC"]
}
if(sum(grepl("WAAUC", pred.nms))>0) {
wv.wa <- mcmp[["selected.mdls"]][grep("WAAUC_", mcmp[["selected.mdls"]]$sel.cri),"avg.test.AUC"]
}
if(sum(grepl("EBPM", pred.nms))>0) {
wv.bp <- rep(1, length(grep("EBPM", mcmp[["selected.mdls"]]$sel.cri)))
}
if(sum(grepl("ESOR", pred.nms))>0) {
wv.es <- rep(1, length(grep("ESOR_", mcmp[["selected.mdls"]]$sel.cri)))
}
## variable contributions and importance
var.cont.df <- matrix(nrow = length(mxnt.mdls), ncol = length(var.nms))
rownames(var.cont.df) <- mod.nms
colnames(var.cont.df) <- var.nms
var.permImp.df <- var.cont.df
for(i in 1:nrow(var.cont.df)){
var.cont.df[i,] <- mxnt.mdls[[i]]@results[grepl("contribution", rownames(mxnt.mdls[[i]]@results))]
var.permImp.df[i,] <- mxnt.mdls[[i]]@results[grepl("permutation.importance", rownames(mxnt.mdls[[i]]@results))]
}
f.wm <- function(pattern="AIC_", pred.nms, sel.mod.nms, var.nms, wv, df, dimnames1="Mod.ensemble" ){
matrix(apply(data.frame(matrix(df[grep(pattern, sel.mod.nms),],
nrow = sum(grepl(pattern, sel.mod.nms)), byrow = FALSE ) ), 2, function(x, wv) {
stats::weighted.mean(x, wv)
}, wv), nrow = 1, dimnames = list(dimnames1, var.nms) )
}
var.cont.df <- as.data.frame(rbind(
if(sum(grepl("AvgAIC", pred.nms))>0){
f.wm("AIC_", pred.nms, sel.mod.nms, var.nms, wv.aic, var.cont.df, dimnames1="Mod.AvgAIC")
},
if(sum(grepl("WAAUC", pred.nms))>0){
f.wm("WAAUC_", pred.nms, sel.mod.nms, var.nms, wv.wa, var.cont.df, dimnames1="Mod.WAAUC")
},
if(sum(grepl("EBPM", pred.nms))>0){
f.wm("EBPM_", pred.nms, sel.mod.nms, var.nms, wv.bp, var.cont.df, dimnames1="Mod.EBPM")
},
if(sum(grepl("ESOR", pred.nms))>0){
f.wm("ESOR_", pred.nms, sel.mod.nms, var.nms, wv.es, var.cont.df, dimnames1="Mod.ESOR")
},
var.cont.df))
var.permImp.df <- as.data.frame(rbind(
if(sum(grepl("AvgAIC", pred.nms))>0){
f.wm("AIC_", pred.nms, sel.mod.nms, var.nms, wv.aic, var.permImp.df, dimnames1="Mod.AvgAIC")
},
if(sum(grepl("WAAUC", pred.nms))>0){
f.wm("WAAUC_", pred.nms, sel.mod.nms, var.nms, wv.wa, var.permImp.df, dimnames1="Mod.WAAUC")
},
if(sum(grepl("EBPM", pred.nms))>0){
f.wm("EBPM_", pred.nms, sel.mod.nms, var.nms, wv.bp, var.permImp.df, dimnames1="Mod.EBPM")
},
if(sum(grepl("ESOR", pred.nms))>0){
f.wm("ESOR_", pred.nms, sel.mod.nms, var.nms, wv.es, var.permImp.df, dimnames1="Mod.ESOR")
},
var.permImp.df))
mnms.i <- is.na(match(rownames(var.cont.df), mod.nms))
sel.mod.nms <- c(rownames(var.cont.df)[mnms.i], sel.mod.nms)
var.cont.df <- cbind(sel.crit=sel.mod.nms, var.cont.df)
# var.cont.df$sel.crit <- as.character(var.cont.df$sel.crit)
# var.cont.df$sel.crit[!is.na(match(rownames(var.cont.df), mod.nms))] <- sel.mod.nms
var.permImp.df <- cbind(sel.crit=sel.mod.nms, var.permImp.df)
# var.contPermImp[[sp]] <- array(c(as.matrix(var.cont.df), as.matrix(var.permImp.df)), c(nrow(var.cont.df), ncol(var.cont.df), 2), dimnames = c(dimnames(var.cont.df), list(c("contribution", "permutation.importance") )))
utils::write.csv(var.cont.df, paste0("3_out.MaxEnt/Mdls.", sp.nm, "/metric.var.Contribution.", sp.nm, ".csv"))
utils::write.csv(var.permImp.df, paste0("3_out.MaxEnt/Mdls.", sp.nm, "/metric.var.PermImportance", sp.nm, ".csv"))
# var.contPermImp[[sp]] <- list(contribution=var.cont.df, permutation.importance=var.permImp.df)
return(list(contribution=var.cont.df, permutation.importance=var.permImp.df))
}
#' Compute "Fractional predicted area" ('n of occupied pixels'/n)
#'
#' Compute "Fractional predicted area" ('n of occupied pixels'/total n) or ('area of occupied pixels'/total area)
#'
#' @inheritParams get_tsa_b
#' @seealso \code{\link{get_fpa}}, \code{\link{get_tsa}}, \code{\link{get_cont_permimport}}
#' @seealso \code{\link{get_tsa_b}}, \code{\link{get_cont_permimport_b}}
#' @return A list of species' FPAs computed for each climatic scenario, threshold and model criteria
#' @examples
#' \dontrun{
#' get_fpa_b(mtp.l=mods.thrshld.lst)
#' }
#' @export
get_fpa_b <- function(mtp.l, digits = 3){
# df.FPA <- vector("list", length = length(mtp.l))
sp.nm.l <- names(mtp.l)
df.FPA <- lapply(seq_along(mtp.l), function(i, mtp.l, digits, sp.nm.l){
get_fpa(mtp.l[[i]], digits, sp.nm.l[i])
}, mtp.l, digits, sp.nm.l) # species, areas
names(df.FPA) <- sp.nm.l
df.FPA.c <- data.table::rbindlist(df.FPA, idcol = "sp")
utils::write.csv(df.FPA.c, paste0("3_out.MaxEnt/metric.FracPredArea.csv")) # reorder ds
return(df.FPA)
}
#' Compute "Fractional predicted area" ('n of occupied pixels'/n)
#'
#' Compute "Fractional predicted area" ('n of occupied pixels'/total n) or ('area of occupied pixels'/total area)
#'
#' @inheritParams get_tsa
#' @seealso \code{\link{get_fpa_b}}, \code{\link{get_tsa}}, \code{\link{get_cont_permimport}}
#' @return A list of species' FPAs computed for each climatic scenario, threshold and model criteria
#' @examples
#' \dontrun{
#' get_fpa(mtp.l=mods.thrshld.lst)
#' }
#' @export
get_fpa <- function(mtp, digits, sp.nm){ # species, areas
# print(sp.nm)
# areas <- array(dim=c(length(mtp), # rows for pred.scenario
# length(mtp[[1]][[2]]), # cols for threshold criteria
# raster::nlayers(mtp[[1]][[2]][[1]])), # sheet (3rd dim) for model criteria
# dimnames = list(names(mtp), # pred.scenario
# names(mtp[[1]][[2]]), # threshold criteria
# gsub(paste(c(".mxnt.pred.", ".current.", "Mod.", "fcv1", "fcv5",
# "fcv10", "mtp", "x10ptp", "etss", "mtss", "bto",
# "eetd", paste0(".", names(mtp), ".") ), collapse = "|"), "", names(mtp[[1]][[2]][[1]]))
# )) # model criteria
# areas <- data.table::melt(areas)
# colnames(areas)[1:4] <- c("Clim.scen", "threshold", "Model", "FPA")
#
# # areas <- areas
# # mtp.l.sp <- mtp
# areas <- expand.grid(Clim.scen=names(mtp),
# threshold=names(mtp[[1]][[2]]),
# Model=gsub(paste(c(".mxnt.pred.", ".current.", "Mod.", "fcv1", "fcv5",
# "fcv10", "mtp", "x10ptp", "etss", "mtss", "bto",
# "eetd", paste0(".", names(mtp), ".") ), collapse = "|"), "", names(mtp[[1]][[2]][[1]])),
# FPA=NA)
fpa.mods.t.p <- lapply(seq_along(mtp), function(sc, mtp, sp.nm, digits){ # pred.scenario
mtp.sc <- mtp[[sc]][[2]]
fpa.mods.t <- sapply(seq_along(mtp.sc), function(t, mtp.sc, sp.nm,sc, digits){ # threshold criteria
mtp.sc.t <- mtp.sc[[t]]
fpa.mods <- sapply(1:raster::nlayers(mtp.sc.t), function(m, mtp.sc.t, sp.nm,sc,t, digits){ # model criteria
ar <- mtp.sc.t[[m]]
FPA <- (sum(raster::area(ar, na.rm=TRUE)[raster::getValues(ar)==1], na.rm=TRUE)/
sum(raster::area(ar, na.rm=TRUE)[!is.na(raster::getValues(ar))], na.rm=TRUE) )
return(FPA) }, mtp.sc.t, sp.nm,sc,t, digits) # model criteria
return(fpa.mods) }, mtp.sc, sp.nm,sc, digits) # threshold criteria
return(fpa.mods.t) }, mtp, sp.nm, digits) # pred.scenario
fpa.mods.t.p <- simplify2array(fpa.mods.t.p)
if(length(dim(fpa.mods.t.p))==3){
fpa.mods.t.p <- round(array(aperm(fpa.mods.t.p, c(3,2,1))), digits = digits) #,
} else if(length(dim(fpa.mods.t.p))==2){
dim(fpa.mods.t.p) <- c(dim(fpa.mods.t.p), 1)
fpa.mods.t.p <- round(array(aperm(fpa.mods.t.p, c(3,2,1))), digits = digits) #,
} else if(length(dim(fpa.mods.t.p))==1){
dim(fpa.mods.t.p) <- c(dim(fpa.mods.t.p), 1, 1)
fpa.mods.t.p <- round(array(aperm(fpa.mods.t.p, c(3,2,1))), digits = digits) #,
} #else if(is.null(dim(fpa.mods.t.p))){
# fpa.mods.t.p <- fpa.mods.t.p
# }
areas <- data.frame(expand.grid(Clim.scen=names(mtp),
threshold=names(mtp[[1]][[2]]),
Model=gsub(paste(c(".mxnt.pred.", ".current.", "Mod.", "fcv1", "fcv5",
"fcv10", "mtp", "x10ptp", "etss", "mtss", "bto",
"eetd", paste0(".", names(mtp), ".") ), collapse = "|"), "", names(mtp[[1]][[2]][[1]]))),
FPA=fpa.mods.t.p)
utils::write.csv(areas, paste0("3_out.MaxEnt/Mdls.", sp.nm, "/metric.FracPredArea.", sp.nm, ".csv")) # reorder ds
return(areas)
}
#' Compute "Omission Rate"
#'
#' Compute "Omission Rate" of species occurence points for a climatic scenario (usually "current")
#'
#' @inheritParams get_tsa_b
#' @inheritParams ENMevaluate_b
#' @param clim.scn.nm name to locate climatic scenario from which Omission Rate will
#' be extracted. Usually the scenario used to calibrate maxent models
#' @seealso \code{\link{get_tsa}}, \code{\link{get_cont_permimport}}, \code{\link{get_fpa}}
#' @return A list of species' ORs computed for the selected (current) climatic scenario and
#' each threshold and model criteria
#' @examples
#' \dontrun{
#' get_OR(mtp.l=mods.thrshld.lst, occ.l=occ.locs)
#' }
# #'@export
get_OR <- function(mtp.l, occ.l, clim.scn.nm = "current", digits = 3){ # , save=TRUE
if(is.null(clim.scn.nm)){
stop("Need to specify 'clim.scn.nm'")
}
df.OmR <- vector("list")
for(sp in names(mtp.l)){ # species
occ.spdf <- occ.l[[sp]]
if(!class(occ.spdf) %in% c("SpatialPoints", "SpatialPointsDataFrame")){
lon.col <- colnames(occ.spdf)[grep("^lon$|^long$|^longitude$", colnames(occ.spdf), ignore.case = T, fixed = F)][1]
lat.col <- colnames(occ.spdf)[grep("^lat$|^latitude$", colnames(occ.spdf), ignore.case = T)][1]
sp::coordinates(occ.spdf) <- c(lon.col, lat.col)
}
N.pts <- length(occ.spdf)
ci <- grep(clim.scn.nm, names(mtp.l[[sp]]))
if(length(ci)<1){
stop("No climatic scenario named as: ", clim.scn.nm)
}
trlds <- names(mtp.l[[sp]][[ci]]$binary)
thrshld.nms <- paste0(".", trlds, collapse = "|") # c("fcv1", "fcv5", "fcv10", "mtp", "x10ptp", "etss", "mtss", "bto", "eetd")
mdls <- gsub(paste(c(thrshld.nms, "Mod.", ".current"), collapse = "|"), "", names(mtp.l[[sp]][[ci]]$binary[[1]]))
nr <- length(mdls)
nc <- length(trlds)
df.OmR[[sp]] <- data.frame(matrix(nrow=nr, ncol=nc), Model=mdls)
colnames(df.OmR[[sp]])[1:length(trlds)] <- trlds
for(t in names(mtp.l[[sp]][[ci]]$binary)){ # threshold criteria
for(m in 1:raster::nlayers(mtp.l[[sp]][[ci]]$binary[[t]])){ # model criteria
df.OmR[[sp]][m, t] <- round((1-(sum(raster::extract(mtp.l[[sp]][[ci]]$binary[[t]][[m]], occ.spdf), na.rm = T)/N.pts) ), digits)
} # model criteria
} # threshold criteria
df.OmR[[sp]] <- data.table::melt(data.table::data.table(df.OmR[[sp]]), id.vars="Model", variable.name="threshold", value.name="OmR") # reshape2::melt(df.OmR[[sp]], id="Model") #
# colnames(df.OmR[[sp]])[1:3] <- c("Model", "threshold", "OmR")
utils::write.csv(as.data.frame(df.OmR[[sp]]), paste0("3_out.MaxEnt/Mdls.", sp, "/metric.OmRate", sp, ".csv")) # reorder ds
}
df.OmR.c <- data.table::rbindlist(df.OmR, idcol = "sp")
utils::write.csv(df.OmR.c, paste0("3_out.MaxEnt/metric.OmRate.csv")) # reorder ds
return(OmR = df.OmR)
}
|
d135eb12f18320326e20dd2532e408a49803703c
|
25102e1b8cc367b03be176800fbfea9e3e6eeed6
|
/R/print.adf.r
|
d80091ee658033e28a5337f89f3bc8809a493797
|
[] |
no_license
|
kaneplusplus/adf
|
b63731157990a5ffc8b1928370e730d7dfb42977
|
16bbc3072da2effcb9faabf6cc1ab24e1f360cca
|
refs/heads/master
| 2021-05-02T08:45:44.939640
| 2018-03-23T14:39:46
| 2018-03-23T14:39:46
| 120,813,401
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 285
|
r
|
print.adf.r
|
print.adf <- function (x, ...) {
w = min(max(nchar(x$colNames), 10L), 40L)
cat(sprintf(" An abstract data frame with %d columns:\n\n",
length(x$colClasses)))
cat(sprintf(paste0(" %-", w, "s %-10s"), names(x$colClasses),
x$colClasses), sep = "\n")
}
|
6380375ac7fe87ec5f18b6b389ba077024c4e3d3
|
de0c51e2035c743f8855b0c7041a34139eb1e4fe
|
/man/cambioInterAnual.Rd
|
9c0ff28bb8e49cd2162e238a160e8df726cbec81
|
[] |
no_license
|
hugoallan9/funcionesINE
|
6c163b55ccb445ecb0427c9bd93d252560a7946a
|
0031f6236b40a6c04adc7c7481f8a0525c79f380
|
refs/heads/master
| 2021-01-19T02:11:56.808791
| 2018-01-15T13:13:21
| 2018-01-15T13:13:21
| 29,080,328
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 559
|
rd
|
cambioInterAnual.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/funcionesINE.R
\name{cambioInterAnual}
\alias{cambioInterAnual}
\title{Función que calcula el cambio interanual en porcentaje para un data frame dado}
\usage{
cambioInterAnual(data, primeraPos = 5, ultimaPos = 9)
}
\arguments{
\item{data}{El data frame sobre el cual se desea hacer el calculo}
\item{paso}{El paso de retroceso para el calculo}
}
\value{
Cambio interanual
}
\description{
Función que calcula el cambio interanual en porcentaje para un data frame dado
}
|
84a699e32483eec065c8a75970808819a22f2a9b
|
f8a8acd017d7a3cf0891ab51b64cb67750805da0
|
/plot3.R
|
2b1c34f1ebade04e21f516418bee06ce1852db73
|
[] |
no_license
|
jrosenbl/NEI_data
|
414a61e94849adcfe910b8a68e4e479b30b2e674
|
b676bff42f6f2a954ec9328eeaf32705cf6de6ea
|
refs/heads/master
| 2016-09-06T07:22:21.034915
| 2014-07-27T22:41:13
| 2014-07-27T22:41:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,686
|
r
|
plot3.R
|
# plot 3
# Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad) variable,
# which of these four sources have seen decreases in emissions from 1999–2008 for Baltimore City?
# Which have seen increases in emissions from 1999–2008? Use the ggplot2 plotting system to make
# a plot answer this question.
#
library(ggplot2)
library(plyr)
use_existing_dataframe = function(df_name) {
if (exists(df_name)) {
message('data frame ',df_name,' with ',nrow(eval(as.symbol(df_name))),' rows exists')
yn = readline('use it? ')
if (yn %in% c('Y','y')) {
return(TRUE)
}
}
return(FALSE)
}
PLOT_NUMBER = 3
NEI_DATA_FILE_NAME = 'summarySCC_PM25.rds'
IN_DIR = './'
PLOT_DIR = './'
nei_data_file = paste0(IN_DIR,NEI_DATA_FILE_NAME)
plot_name = sprintf('plot%s.png',PLOT_NUMBER)
plot_file = paste0(PLOT_DIR,plot_name)
if (!use_existing_dataframe('nei')) {
message('reading nei data')
nei = readRDS(nei_data_file)
}
# subset to Baltimore Maryland (fips == 24510) and years 1999 to 2008
baltimore_nei = subset(nei, fips == 24510 & (year > 1998 & year < 2009))
# compute table of total Emissions by type and year
sum_by_type_yr = ddply(baltimore_nei, type ~ year, summarize, sum=sum(Emissions))
# plot a facet of annual totals for each type
ggplot(sum_by_type_yr, aes(x=factor(year), y=sum)) +
facet_grid(.~type) +
geom_bar(stat='identity') +
geom_text(aes(label=round(sum,4)), size=2, vjust=-1) +
theme(axis.text.x = element_text(size=8)) +
labs(title='Total Emissions by Source and Year', x = 'Year', y = 'Total Emissions')
ggsave(plot_file, width=8, height=8, units='in')
message(plot_name,' saved')
|
0318a31ce0ab8d2e17714b72e683bb301ef2ee52
|
d7ddcb1505b3df46d22578a704634e47a4f41155
|
/man/MTDrh.Rd
|
fcd2244690e1a9c8a064548dd4c8325f756d9fcc
|
[] |
no_license
|
cran/MTDrh
|
04474f82b24b0de69c9f1c6f5862fdba8d6b9d44
|
54b8d92ca1317c2fc72fe5b53fb6560408ac93ae
|
refs/heads/master
| 2021-01-12T08:47:44.164591
| 2016-12-16T23:31:52
| 2016-12-16T23:31:52
| 76,692,689
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,155
|
rd
|
MTDrh.Rd
|
\name{MTDrh}
\alias{MTDrh}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Construct Mass Transportation Distance Rank Histogram
}
\description{
Constructs a mass transportation distance rank histogram to assess the reliability of probabilistic scenarios using observations for a set of instances [1].
}
\usage{
MTDrh(scenarios, observation, prob = NULL, debias = FALSE, transformation = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{scenarios}{ A dataset that contains scenarios. It should be a 3 dimensional array:
%% ~~Describe \code{scenarios} here~~
(dimension of each scenario)x(number of scenarios per instance)x(number of instances)
}
\item{observation}{A dataset that contains observations. The dimension of each observation and the number of instances should match the dimension and number of instances of the scenarios. It should be a matrix:
%% ~~Describe \code{observation} here~~
(dimension of each observation)x(number of instances)
}
\item{prob}{ A dataset that contains the probability of each scenario for each instance.
If prob is not given, the default that the scenarios have equal probabilities.
%% ~~Describe \code{prob} here~~
It should be a matrix:
(number of scenarios)x(number of instances)
}
\item{debias}{ If debias=TRUE, the data are debiased for each instance [1].
%% ~~Describe \code{debias} here~~
}
\item{transformation}{If transformation=TRUE, the data are transformed with Mahalanobis transformation for each instance [1].
%% ~~Describe \code{transformation} here~~
}
}
\value{
%% ~Describe the value returned
Returns an array of mass transportation ranks and a histogram plot.
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
[1] D. Sari, Y. Lee, S. Ryan, D. Woodruff. Statistical metrics for assessing the quality of
wind power scenarios for stochastic unit commitment. Wind Energy
19, 873-893 (2016)
doi:10.1002/we.1872
}
\author{ Didem Sari, Sarah M. Ryan
%% ~~who you are~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
#Generate 1000 instances of 10 scenarios and observation with dimension 8
#from the same normal distribution.
scen <- array(rnorm(8*10*1000,0,1),dim=c(8,10,1000))
obs <- array(rnorm(8*1000,0,1),dim=c(8,1000))
ranks <- MTDrh(scen,obs,prob=NULL,debias=FALSE,transformation=FALSE)
#Generate 1000 instances of 27 scenarios and observation with dimension 8
#from AR(1) processes. The marginal distributions of the scenarios and observation
#are the same but the autocorrelation levels are different. The Mahalanobis
#transformation is applied. See Figure 8 [1].
scen <- array(arima.sim(list(order=c(1,0,0),ar=0.10),n=8*27*1000,sd=1),dim=c(8,27,1000))
obs <- array(arima.sim(list(order=c(1,0,0),ar=0.90),n=8*1000,sd=0.45),dim=c(8,1000))
ranks<-MTDrh(scen,obs,prob=NULL,debias=FALSE,transformation=TRUE)
hist(ranks, breaks=c(0:28),xlab="bin",ylab="frequency",col="gray",main="MTD rh")
#Generate 1000 instances of 27 scenarios that have heterogeneous autocorrelation
#levels and corresponding observations with autocorrelation different
#from the scenarios.
#The marginal standard deviations of scenarios and observation match. See Figure 9 [1]
scen1 <- array(arima.sim(list(order=c(1,0,0),ar=0.10),n=8*10*1000,sd=1),dim=c(8,10,1000))
scen2 <- array(arima.sim(list(order=c(1,0,0),ar=0.80),n=8*17*1000,sd=0.64),dim=c(8,17,1000))
scen <- array(NA,dim=c(8,27,1000))
scen[,1:10,]<-scen1
scen[,11:27,]<-scen2
obs <- array(arima.sim(list(order=c(1,0,0),ar=0.50),n=8*1000,sd=0.86),dim=c(8,1000))
ranks<-MTDrh(scen,obs,prob=NULL,debias=FALSE,transformation=TRUE)
hist(ranks, breaks=c(0:28),xlab="bin",ylab="frequency",col="gray",main="MTD rh")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ Mass Transportation Distance rank histogram }% use one of RShowDoc("KEYWORDS")
|
c12c444a9259972f79381bf6fac3d2dcded4626c
|
b247d884a1508bc5862d05c920e37b7cf4c805f0
|
/TransientSymmetry/PAA2018/Poster/ExampleBrute.R
|
0dd2401ca1cf91e09e5dd8ffab67b6fd2874299a
|
[] |
no_license
|
timriffe/TransientSymmetry
|
8967d7b66396b9e0c717a78709c45251da71fac4
|
9457c24767b1cae65c74c9b50d0c7ca2ec9e9805
|
refs/heads/master
| 2021-01-25T12:13:45.425307
| 2020-02-24T08:33:01
| 2020-02-24T08:33:01
| 123,459,141
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,104
|
r
|
ExampleBrute.R
|
# Author: tim
###############################################################################
if (system("hostname",intern=TRUE) %in% c("triffe-N80Vm", "tim-ThinkPad-L440")){
# if I'm on the laptop
setwd("/home/tim/git/TransientSymmetry/TransientSymmetry")
}
# -----------------
# gets all possible trajectories assuming nonzero
# transition probs. x is a character vector of states (one character per state)
# results is list with character vector of state sequence. In list because of
# variable length
get_traj <- function(x,maxn=4){
traj <- list()
for (i in 1:maxn){
li <- as.matrix(expand.grid( rep(list(x), i)))
traj <- c(traj,split(li,1:nrow(li)))
}
names(traj) <- lapply(traj,paste,collapse="")
traj
}
# for a given trajectory and set of transition probabilities
# what is the total probability of observing this trajectory.
get_probsHS <- function(traj, probs){
traj <- c(traj,"D") # end traj in death
n <- length(traj)
# get birth state probs
pr <- probs[[paste0("r",traj[1])]]
# get to/from concatenations, which is how trans probs are named
outs <- paste0(traj[1:(n-1)],traj[2:n])
# located in diag, get prob vector
pt <- diag(probs$Tout[,outs,drop=FALSE])
# product
p <- pr * prod(pt)
p
}
get_TA <- function(traj, state = "S", probs, radix = 1e5,n=4){
# this assumes we have a large radix, otherwise we wouldn't round!
# ideally radix chosen to match digit precision of probabilities.
w <- round(get_probsHS(traj = traj, probs = probs) * radix)
# get all spell durations
rl <- rle(traj)
# select to desired state
dur <- rl$lengths[rl$values == state]
# all T and A values
TA <- c(unlist(sapply(dur,":",0)))
# tabulate values and weight
tab <- table(TA) * w
# fit into standard-length output
out <- rep(0,(n+1))
names(out) <- 0:n
out[names(tab)] <- c(tab)
out
}
# draw a trajectory as horizontal rect, hard coded to H and S
drawTraj <- function(traj,H = "#399345", S ="#e89792",y=0,h=1){
cols <- traj
cols[cols == "H"] <- H
cols[cols == "S"] <- S
rl <- rle(cols)
r <- cumsum(rl$lengths)
n <- length(r)
l <- c(0, r[-n])
rect(l,y,r,y+h,border=NA,col=rl$values)
}
# same, but right-aligned. Used to show non-symmetry in poster
drawTrajr <- function(traj,H = "#399345", S ="#e89792",y=0,h=1,maxl=7){
cols <- traj
cols[cols == "H"] <- H
cols[cols == "S"] <- S
rl <- rle(cols)
r <- cumsum(rl$lengths)
n <- length(r)
l <- c(0, r[-n])
d <- maxl - max(r)
r <- r + d
l <- l + d
rect(l,y,r,y+h,border=NA,col=rl$values)
}
# draw a trajectory in PC diagonal, used for step 2 in poster
drawTrajPC <- function(traj,H = "#05872c", S =gray(.8),x=0,w=.2,...){
cols <- traj
cols[cols == "H"] <- H
cols[cols == "S"] <- S
rl <- rle(cols)
len <- rl$lengths
r <- cumsum(rl$lengths)
n <- length(r)
l <- c(0, r[-n])
X <- c(rbind(l,l+len,l+len,l,NA)) + x
Y <- c(rbind(l,l+len,l+len+w,l+w,NA))
polygon(X,Y,border=NA,col=rl$values,xpd=TRUE,...)
}
# non-rounded TA, overwrite previous. Hard coded length!
get_TA <- function(traj, state = "S", probs, radix = 1){
w <- get_probsHS(traj = traj, probs = probs) * radix
rl <- rle(traj)
dur <- rl$lengths[rl$values == state]
TA <- c(unlist(sapply(dur,":",0)))
tab <- table(TA) * w
out <- rep(0,7)
names(out) <- 0:6
out[names(tab)] <- c(tab)
out
}
#
# what about an example w 6 ages?
rS <- .1
rH <- .9
# probs
HS <- c(.1,.2,.3,.4,.6,0)
SH <- c(.6,.5,.4,.3,.2,.0)
HD <- c(.01,.05,.1,.2,.3,1)
SD <- c(.2,.3,.4,.5,.7,1)
HH <- 1 - (HS + HD)
SS <- 1 - c(SH + SD)
#transition probs in a single matrix, with directions coded in
# column names
Tout <- cbind(HH, HS, HD, SS, SH, SD)
# all necessary probabilities together
probs <- list(rH=rH,rS=rS,Tout=Tout)
# all possible trajectories assuming non-zero transition rates
# between the specified states
trajs <- get_traj(x=c("H","S"),6)
probHS <- lapply(trajs,get_probsHS,probs=probs)
TR <- data.frame(traj = names(trajs),prob = unlist(probHS))
#which(TR$traj=="HSHHSS")
rownames(TR) <- NULL
library(xtable)
# for making transition probability line plot
trans <- colnames(probs$Tout)
names(trans) <- c("#92a654", "#4fa9c1", "#256676", "#c99084", "#d6061a", "#84241a")
# transition rate plot
pdf("PAA2018/Poster/Figures/ToyTrans.pdf")
plot(NULL,xlim = c(0,5),ylim=c(0,1),axes=FALSE,xlab = "",ylab="")
rect(0,0,5,1,border=NA,col=gray(.91))
segments(0,seq(.1,.9,by=.1),5,seq(.1,.9,by=.1),col = "white",lwd=.5)
segments(1:4,0,1:4,1,col = "white",lwd=.5)
matplot(0:5, probs$Tout, type = 'o',
pch=16,
lty=1,
col = names(trans),add=TRUE
)
text(0:5,0,0:5,pos=1,xpd=TRUE)
text(0,seq(0,1,by=.2),c("0.0","0.2","0.4","0.6","0.8","1.0"),pos=2,xpd=TRUE)
dev.off()
# now show all possible trajectories:
nseq <- length(trajs)
maxy <- 1
yat <- seq(maxy,0,length=(nseq+1))
# aspect ratio shared in several plots
asp <- 20
# the trajectory space
pdf("PAA2018/Poster/Figures/TrajSpace.pdf",width=5,height=12)
plot(NULL, xlim = c(0,8), ylim = c(0, maxy), axes = FALSE, xlab = "", ylab = "",asp=asp)
for (i in 1:nseq){
drawTraj(trajs[[i]], y = yat[i+1],h= maxy/nseq,H="#05872c",S=gray(.8))
rect(7,yat[i+1],7+TR$prob[i]*asp,yat[i],border=NA,col=gray(.2),xpd=TRUE)
}
segments(0:6,0,0:6,-.01,xpd=TRUE)
text(0:6,-.01,0:6,pos=1,xpd=TRUE)
segments(7,0,7+.1*asp,0,xpd=TRUE)
segments(c(7,7+.1*asp),0,c(7,7+.1*asp),-.01,xpd=TRUE)
text(c(7,7+.1*asp),-.01,c(0,"0.1"),pos=1,xpd=TRUE)
dev.off()
# the probability-weighted trajectory space, aka trajectory composition
#yat2 <- cumsum(c(0,TR$prob))
yat2 <- rev(cumsum(c(0,rev(TR$prob))))
pdf("PAA2018/Poster/Figures/TrajProbs.pdf", width = 5, height = 12)
plot(NULL, xlim = c(0, 8), ylim = c(0, maxy), axes = FALSE, xlab = "", ylab = "",asp=asp)
for (i in 1:nseq){
drawTraj(trajs[[i]], y = yat2[i+1],h= TR$prob[i],H="#05872c",S=gray(.8))
}
segments(0:6,0,0:6,-.01,xpd=TRUE)
text(0:6,-.01,0:6,pos=1,xpd=TRUE)
dev.off()
# same, but aligned on death, shows non symmetry
pdf("PAA2018/Poster/Figures/TrajProbsTTD.pdf", width = 5, height = 12)
plot(NULL, xlim = c(0, 8), ylim = c(0, maxy), axes = FALSE, xlab = "", ylab = "",asp=asp)
for (i in 1:nseq){
drawTrajr(trajs[[i]], y = yat2[i+1],h= TR$prob[i],H="#05872c",S=gray(.8))
}
segments(1:7,0,1:7,-.01,xpd=TRUE)
text(1:7,-.01,6:0,pos=1,xpd=TRUE)
dev.off()
#TR[which.max(TR$prob),]
#TAlist <- lapply(trajs,get_TA,state="S",probs=probs,radix=1e5)
#TA <- colSums(do.call("rbind",TAlist))
# now get prevalence as of a Markov model
Hx <- Sx <- rep(0,6)
for (i in 1:6){
Hi <- unlist(lapply(trajs, function(traj,i){
TF <- length(traj) >= i
if (TF){
TF <- traj[i] == "H"
}
TF
},i=i))
Hx[i] <- sum(TR$prob[Hi])
Si <- unlist(lapply(trajs, function(traj,i){
TF <- length(traj) >= i
if (TF){
TF <- traj[i] == "S"
}
TF
},i=i))
Sx[i] <- sum(TR$prob[Si])
}
# and repeat for TTD prevalence because we can!
# to show non-symmetry
Hy <- Sy <- rep(0,6)
for (i in 1:6){
# traj <- c("H","S","H","S","S","H")
Hi <- unlist(lapply(trajs, function(traj,i){
TF <- length(traj) >= i
if (TF){
TF <- rev(traj)[i] == "H"
}
TF
},i=i))
Hy[i] <- sum(TR$prob[Hi])
Si <- unlist(lapply(trajs, function(traj,i){
TF <- length(traj) >= i
if (TF){
TF <- rev(traj)[i] == "S"
}
TF
},i=i))
Sy[i] <- sum(TR$prob[Si])
}
# the asymptotic prevalence functions (same as those
# returned by fundamental matrix)
pdf("PAA2018/Poster/Figures/TrajPrev.pdf", width = 5, height = 12)
plot(NULL, xlim = c(0, 8), ylim = c(0, maxy), axes = FALSE, xlab = "", ylab = "",asp=asp)
rect(0:5,0,1:6,Hx,col="#05872c",border=NA)
rect(0:5,Hx,1:6,Sx+Hx,col=gray(.8),border=NA)
polygon(c(0,rep(1:5,each=2),6,6,rep(5:1,each=2),0),
c(rep(Sx+Hx,each=2),rep(0,12)))
segments(0:6,0,0:6,-.01,xpd=TRUE)
text(0:6,-.01,0:6,pos=1,xpd=TRUE)
segments(0,c(0,1),-.01*asp,c(0,1),xpd=TRUE)
text(0,c(0,1),c(0,1),pos=2,xpd=TRUE)
dev.off()
# same prevalence right aligned (non-symmatrical)
pdf("PAA2018/Poster/Figures/TrajPrevTTD.pdf", width = 5, height = 12)
plot(NULL, xlim = c(0, 8), ylim = c(0, maxy), axes = FALSE, xlab = "", ylab = "",asp=asp)
rect(0:5,0,1:6,Hy,col="#05872c",border=NA)
rect(0:5,Hy,1:6,Hy+Sy,col=gray(.8),border=NA)
polygon(c(0,rep(1:5,each=2),6,6,rep(5:1,each=2),0),
c(rep(Sy+Hy,each=2),rep(0,12)))
segments(0:6,0,0:6,-.01,xpd=TRUE)
text(0:6,-.01,0:6,pos=1,xpd=TRUE)
segments(0,c(0,1),-.01*asp,c(0,1),xpd=TRUE)
text(0,c(0,1),c(0,1),pos=2,xpd=TRUE)
dev.off()
# get actual proportions, show non-symmetry
pdf("PAA2018/Poster/Figures/PrevPropCompare.pdf")
plot(0:5,Sx/(Sx+Hx),type='l',ylim=c(0,1),axes=FALSE,xlab="",ylab="")
lines(0:5,Sy/(Sy+Hy))
axis(1);axis(2,las=1)
dev.off()
# how about spell duration prevalence
# this is trajectory nr 113, used as example in poster
pdf("PAA2018/Poster/Figures/TrajExample.pdf", width = 5, height = 5)
plot(NULL, xlim=c(0,6),ylim=c(0,1),xlab="",ylab = "",axes=FALSE)
drawTraj(c("H","S","H","H","S","S"),y=.4,h=.2,H="#05872c",S=gray(.8))
axis(1)
dev.off()
# same, drawn in PC in even steps. Narrower traj (actually has no width)
pdf("PAA2018/Poster/Figures/TrajExamplePC.pdf", width = 5, height = 12)
plot(NULL, xlim=c(0,13),ylim=c(0,7),xlab="",ylab = "",axes=FALSE,asp=1)
for (i in 0:8){
drawTrajPC(traj=c("H","S","H","H","S","S"),x=i,H="#05872c",S=gray(.8),w=.5)
}
#drawTrajPC(traj=c("H","S","H","H","S","S"),x=0,H="#05872c",S=gray(.8),w=1)
axis(1,at=0:12,pos=-.1)
axis(2,las=1,at=0:6,pos=-.1)
dev.off()
# zoom in to show period equivalency
pdf("PAA2018/Poster/Figures/TrajExamplePCzoom.pdf", width = .4*5, height = 2.5*5)
par(xaxs="i",yaxs="i",mai=c(0,0,0,0))
plot(NULL, xlim=c(4.8,5.2),ylim=c(2,4.5),xlab="",ylab = "",axes=FALSE,asp=1)
for (i in 0:8){
drawTrajPC(traj=c("H","S","H","H","S","S"),x=i,H="#05872c",S=gray(.8),w=.5)
}
#drawTrajPC(traj=c("H","S","H","H","S","S"),x=0,H="#05872c",S=gray(.8),w=1)
#axis(1,at=0:12,pos=-.1)
#axis(2,las=1,at=0:6,pos=-.1)
dev.off()
TAlist <- lapply(trajs,get_TA,state="S",probs=probs,radix=1)
TA <- colSums(do.call("rbind",TAlist))
TA <- TA / sum(TA)
# used for final symmetry figure in poster
pdf("PAA2018/Poster/Figures/TAdist.pdf")
barplot(TA, space = 0,las=1, col = gray(.8))
dev.off()
# sickness distributions.
# how about spell duration distribution instead? Not used in poster.
#traj <- trajs[[100]]
#EPL <- colSums(do.call(rbind,lapply(trajs, function(traj,state="S",probs){
# w <- get_probsHS(traj = traj, probs = probs)
# durs<- rep(0,6)
# names(durs) <- 1:6
# rl <- rle(traj)
# val <- rl$values
# len <- rl$lengths
# episodes <- table(len[val == state]) * w
# if (length(episodes) > 0){
# durs[names(episodes)] <- c(episodes)
# }
# durs
# },probs=probs)))
#
#plot(1:6,EPL,type='o',pch=16,axes=FALSE,xlab="",ylab="")
#axis(1)
#axis(2,las=1)
#barplot(EPL,space=0,las=1)
# these were experiments for that final figure:
# really you can see variation better in logged plot,
# but trans probs end up making a straight line in log space!
#TA <- TA / sum(TA)
#plot(NULL,xlim=c(-6,6),ylim=c(0,.5),axes = FALSE,xlab="",ylab="")
#
#
#plot(-6:6,c(rev(TA),TA[-1]), type='l',
# pch=16,
# xlim = c(-6,6),
# axes=FALSE, xlab = "",ylab = "",
# log='y')
#axis(1,at=-6:6,labels=c(c(6:0,1:6)))
#axis(2,las=1,at=1/10^(0:5),labels=c("1","1/10","1/100","1/1000","1/10k","1/100k"),xpd=TRUE)
#abline(v=0)
#
#
#segments(-6,0,6,0)
#text(-6:6,0,c(6:0,1:6),pos=1,xpd=TRUE)
#
#pdf("Figures/ToyDist.pdf")
#par(mai=c(.5,.2,.5,0))
#plot(NULL, xlim = c(-4,4),ylim = c(0,57000), axes=FALSE, xlab = "",ylab = "")
#segments(-4.1,TA[-1],1:4,TA[-1],col=gray(.6))
#text(-3.7, TA[2:4],TA[2:4], pos = 3)
#text(-4.1,c(-2000,TA[5]+300),c(0,TA[5]),pos=2,xpd=TRUE)
#segments(-4.1,0,-4.2,-2000,xpd=TRUE)
#segments(-4.1,0,-4.2,TA[5]+300,xpd=TRUE)
#
#segments(0:4,TA,0:4,0)
#segments(0:-4,TA,0:-4,0)
#lines(-4:4,c(rev(TA[-1]),TA))
#segments(-4,0,4,0)
#text(-4:4,0,c(4:0,1:4),pos=1,xpd=TRUE)
#text(-2,-6000,"time spent",xpd=TRUE,cex=1.5)
#text(2,-6000,"time left",xpd=TRUE,cex=1.5)
#segments(0,-4000,0,-8000,xpd=TRUE)
#text(-3.9,61000,"Count",cex=1.5,xpd=TRUE)
#dev.off()
|
a7be1a80054e1eb0c44d8e31eec5396415bdca62
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mcmc/examples/foo.Rd.R
|
f96e782c49e467c3d58517d7fd0e8635c36514de
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 235
|
r
|
foo.Rd.R
|
library(mcmc)
### Name: foo
### Title: Simulated logistic regression data.
### Aliases: foo
### Keywords: datasets
### ** Examples
library(mcmc)
data(foo)
out <- glm(y ~ x1 + x2 + x3, family = binomial, data = foo)
summary(out)
|
371ebb5b692123e2063517ccc60d8b1c011dadcf
|
67af11952ff7ef35d9cdbf490351abfb020b34da
|
/man/pm_eloRunTourneyELO.Rd
|
ea3f9165e096b710c72da47f9669278256a85a51
|
[] |
no_license
|
quietsnooze/pmpackage
|
1c68d1f1aa70c53a81fc1abc2e0182dec6ce30b0
|
45bf5a3694cfb2c162f65855a2b8a827649198c4
|
refs/heads/master
| 2021-04-15T09:26:15.434185
| 2021-02-28T17:57:34
| 2021-02-28T17:57:34
| 126,635,991
| 1
| 1
| null | 2018-12-29T13:57:30
| 2018-03-24T20:05:07
|
R
|
UTF-8
|
R
| false
| true
| 1,001
|
rd
|
pm_eloRunTourneyELO.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eloFunctions.R
\name{pm_eloRunTourneyELO}
\alias{pm_eloRunTourneyELO}
\title{pm_eloRunTourneyELO}
\usage{
pm_eloRunTourneyELO(
tournamentSetup,
keyCols = c("roundNum", "player_name", "opponent_name", "match_date", "Tournament"),
simCols = c("predictions", "simWinner", "winnerName"),
roundeloDB,
roundmatchDB
)
}
\arguments{
\item{tournamentSetup}{A dataframe strucutred to describe the tournament}
\item{keyCols}{Key columns for reporting results (DO NOT CHANGE FOR NOW)}
\item{simCols}{Key columsn for debugging results (DO NOT CHANGE FOR NOW)}
\item{eloDB}{a dictionary of player ELO scores, or NA to create a new one}
\item{matchDB}{a dictionary of matches a player has played, or NA}
}
\value{
A dataframe with winners (simWinner, winnerName) for each match in the Tournament
}
\description{
Run a simulated Tournament based on input ELO databases and a dataframe that describes the draw
}
\examples{
}
|
0cab03a890dda112ef0b0aebd816ad597f42808d
|
4a73d57edc5ef1ea7798549ef74210eb7be51883
|
/shotspotter.R
|
cb1c46b0a2c56e4e2bf4567d9a04e2c417c29b11
|
[] |
no_license
|
AlistairGluck/shotspotter
|
bd2be72c4fa90a951f651b74a5fa436cfac3c74f
|
e8f01782cd328229fc59b86a581a4106131376a6
|
refs/heads/master
| 2020-05-15T06:53:17.434225
| 2019-04-23T04:50:48
| 2019-04-23T04:50:48
| 182,131,656
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,847
|
r
|
shotspotter.R
|
# Loading packages
library(tigris)
library(tidyverse)
library(maps)
library(readr)
library(fs)
library(sf)
library(lubridate)
library(gganimate)
library(transformr)
library(ggthemes)
# Reading in the CSV file with data from Fresno
fresno = read_csv("http://justicetechlab.org/wp-content/uploads/2018/09/fresno_sst.csv", col_types = cols(
address = col_character(),
city = col_character(),
state = col_character(),
datetime = col_character(),
numrounds = col_double(),
shotspotterflexid = col_double(),
lat = col_double(),
long = col_double()
))
# Used tigris package to obtain shape files for Fresno
shapes = urban_areas(class = "sf") %>%
# NAME10 is the variable that corresponds to the municipality's name
filter(NAME10 == "Fresno, CA")
# Converting the "datetime" variable to a POSIXct class rather than a character string
fresno = fresno %>%
mutate(datetime = as.POSIXct(datetime, format = "%m/%d/%Y %H:%M:%OS"))
# Subsetting the data to only include observations with a unique shotspotterflexid
# (the ID assigned to each shooting incident identified by the program)
fresno = fresno[!(duplicated(fresno$shotspotterflexid)), ]
fresno_final = fresno %>%
select(long, lat, numrounds, datetime) %>%
filter(!is.na(lat)) %>%
filter(!is.na(long)) %>%
filter(long > -120 & long < -119.45) %>%
filter(lat > 36.5 & lat < 37) %>%
mutate(date_shot = date(datetime)) %>%
arrange(date_shot)
locations = st_as_sf(fresno_final, coords = c("long", "lat"), crs = 4326)
# Creating a map out of the shapes data
ggplot(data = shapes) +
geom_sf() +
# Delineating the number of rounds fired in each shooting incident by colour and
# decreasing the transparency to show overlap of the points
geom_sf(data = locations, aes(colour = numrounds, alpha = 0.6)) +
# Adding a source note
labs(caption = "Source: Justice Tech Lab ShotSpotter Data") +
# Removing the legend for alpha
guides(alpha = FALSE) +
# Changing the colour scale of the plot
scale_colour_gradient(name = "Rounds Fired",
low = "mediumblue",
high = "orangered3") +
# Moving the legend position so it doesn't cover the map
theme(legend.position = c(0.8, 0.1)) +
# Centering the title of the plot
theme(plot.title = element_text(hjust = 0.5)) +
# Applying the map theme
theme_map() +
# Making each data a different frame in the animation
transition_time(date_shot) +
ease_aes() +
# Leaving each previous frame as a permanent mark on the map
shadow_mark(past = TRUE) +
# Adding a title that includes the date shown in the present frame
ggtitle("Gunshots Fired in Fresno, California on {frame_time}")
# Save the animation as a GIF to include in the app
anim_save("shotspotter/fresno.gif", animation = last_animation())
|
ea0998fc20fb12c4f0995949d11aae7947943027
|
b6af1fe1c1ed1b3d2b56779f29644fd8d30fc4f2
|
/man/rpsblast.Rd
|
76038a6516f31a7bd2451dbda53e14f69efab56e
|
[] |
no_license
|
418704194/blastr
|
8f59a25f8433ad94afe585aa28a8472aa5e146a6
|
f4acc87bf0e082e8d58d7c6f254af90b08dc2583
|
refs/heads/master
| 2020-05-02T10:10:54.162009
| 2019-03-27T00:52:01
| 2019-03-27T00:52:01
| 177,890,346
| 1
| 0
| null | 2019-03-27T00:38:08
| 2019-03-27T00:38:08
| null |
UTF-8
|
R
| false
| true
| 1,431
|
rd
|
rpsblast.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blast.r
\name{rpsblast}
\alias{rpsblast}
\title{Wrapper for the NCBI Reversed Position Specific Blast}
\usage{
rpsblast(query, db = "Cdd", out = NULL, outfmt = "xml", max_hits = 20,
evalue = 10, remote = FALSE, ...)
}
\arguments{
\item{...}{Additional parameters passed on to the BLAST commmand line
tools. See \href{http://www.ncbi.nlm.nih.gov/books/NBK1763/#CmdLineAppsManual.4_User_manual}{here}
for a description of common options.}
\item{query}{Query sequences as path to a FASTA file,
an \code{\linkS4class{XStringSet}} object, or a character vector.}
\item{db}{The database to BLAST against (default: Cdd).}
\item{out}{(optional) Output file for alignment.
If \code{NULL} and the BLAST result is returned as
a \code{\linkS4class{BlastReport}} or \code{\linkS4class{BlastTable}}
object.}
\item{outfmt}{Output format, \code{'xml'} or \code{'table'}.}
\item{max_hits}{How many hits to return (default: 20).}
\item{evalue}{Expect value cutoff (default: 10).}
\item{remote}{Execute search remotely.}
}
\description{
Run \code{rpsblast()} without arguments to print usage and
arguments description.
}
\examples{
##
}
\seealso{
Other blast functions: \code{\link{blastn}},
\code{\link{blastp}}, \code{\link{blastx}},
\code{\link{makeblasttdb}}, \code{\link{qblast}},
\code{\link{tblastn}}, \code{\link{tblastx}}
}
\concept{blast functions}
|
87950f0e42621b89c5b82b0bf9bbc4d3242d19c9
|
be5a158c7571df8faa94c50ce174ea0d41c29c67
|
/TwoProps/ui.R
|
2b0a657d4036b98f05804d209b8f42de714af359
|
[] |
no_license
|
nxknuepp/ShinyPrograms
|
602dd136fbca71e88b16c4cea2a5cde717da25ae
|
7124aff1df776e95ba27e6cec0aebbf919173559
|
refs/heads/master
| 2020-07-21T12:41:16.303168
| 2019-09-08T20:54:57
| 2019-09-08T20:54:57
| 206,867,622
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 899
|
r
|
ui.R
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Simulation: Difference of Two Proportions"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
numericInput("n1",
"n1", 50),
numericInput("p1",
"p1", 0.5),
numericInput("n2",
"n2", 50),
numericInput("p2",
"p2", 0.5),
numericInput("value",
"Our p1-hat - p2-hat (solid, vertical line)",
-2),
submitButton("Run Simulation")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("propPlot")
)
)
))
|
5bc5f97cdde8e1ea1ebb22192096f209edffd384
|
388d7a62bbbd144f243438f9e6a5a456eb2cce3c
|
/R/tCol.R
|
feca2cf5428f83bc4ee79552dcbb1afd33558033
|
[] |
no_license
|
aspillaga/fishtrack3d
|
64c7dcb2a97a833ef830d845e8bfbc3aaf387827
|
2be695e0f88d97e095f074acd17240cb8878dbbc
|
refs/heads/master
| 2022-01-18T10:50:53.776454
| 2019-05-23T15:09:18
| 2019-05-23T15:09:18
| 118,634,135
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,261
|
r
|
tCol.R
|
#' Create colors with transparency
#'
#' This function easily creates transparent colors providing a color name and
#' the desired proportion of transparency
#'
#' @param color character vector with the names or codes of the original
#' colors.
#' @param trans percentage of transparency to apply to the colors (0 to 100).
#' If a unique value is provided, the same percentage of transparency will
#' be applied to all the colors. If it is a vector of the same length as
#' \code{color}, a given transparency will be applied to each color.
#' @param name optional name to give to the resulting colors.
#'
#' @importFrom grDevices col2rgb
#' @importFrom grDevices rgb
#'
#' @export
#'
#' @examples
#' plot(rnorm(1:100), rnorm(1:100), pch = 16, cex = 2, col = tCol("black", 70))
#' points(rnorm(1:100), rnorm(1:100), pch = 16, cex = 2, col = tCol("red", 40))
#' points(rnorm(1:100), rnorm(1:100), pch = 16, cex = 2, col = tCol("blue", 60))
#'
#'
tCol <- function(color, trans = 50, name = NULL) {
# Check if arguments are correct =============================================
if (is.null(color) | class(color) != "character") {
stop("Color names or codes must be provided as 'character'.",
call. = FALSE)
}
if (length(trans) > 1 & length(trans) != length(color)) {
stop(paste("Transparency values must be of length 1 or equal to the",
"length of colors."), call. = FALSE)
}
if (any(trans < 0) | any(trans > 100)) {
stop("Transparency values must be between 0 and 100.", call. = FALSE)
}
if (!is.null(name) & length(name) != length(color)) {
stop("A unique name must be provided for each color.", call. = FALSE)
}
t.colors <- lapply(seq_along(color), function(c) {
# Get the RGB values of the original color
rgb.val <- col2rgb(color[c])
# Make a new color by setting the transparency with the alpha value
t <- ifelse(length(trans) > 1, trans[c], trans)
if (is.null(name)) {
n <- NULL
} else {
n <- name[c]
}
t.col <- rgb(rgb.val[1], rgb.val[2], rgb.val[3],
maxColorValue = 255,
alpha = (100 - t) * 255 / 100,
names = n)
invisible(t.col)
})
invisible(unlist(t.colors))
}
|
e9eead68ec579dde2d7826a891f767711dd3ef42
|
2975fba6bf359214c55e7d936f896a5a4be3d8f5
|
/tests/testthat/test-plotRisk.R
|
c54359967040299977f37178ac3bb264870b4158
|
[] |
no_license
|
tagteam/riskRegression
|
6bf6166f098bbdc25135f77de60122e75e54e103
|
fde7de8ca8d4224d3a92dffeccf590a786b16941
|
refs/heads/master
| 2023-08-08T03:11:29.465567
| 2023-07-26T12:58:04
| 2023-07-26T12:58:04
| 36,596,081
| 38
| 14
| null | 2023-05-17T13:36:27
| 2015-05-31T09:22:16
|
R
|
UTF-8
|
R
| false
| false
| 2,186
|
r
|
test-plotRisk.R
|
### test-plotRisk.R ---
#----------------------------------------------------------------------
## Author: Thomas Alexander Gerds
## Created: Sep 15 2022 (16:04)
## Version:
## Last-Updated: Sep 16 2022 (12:56)
## By: Thomas Alexander Gerds
## Update #: 4
#----------------------------------------------------------------------
##
### Commentary:
##
### Change Log:
#----------------------------------------------------------------------
##
### Code:
library(riskRegression)
library(testthat)
library(rms)
library(prodlim)
library(cmprsk)
library(survival)
library(data.table)
library(lava)
test_that("More than one competing risk",{
set.seed(8)
learndat = sampleData(80,outcome="competing.risk")
testdat = sampleData(140,outcome="competing.risk")
setkey(learndat,time)
setkey(testdat,time)
learndat[,event := as.character(event)]
testdat[,event := as.character(event)]
learndat[9:17,event := "cr2"]
testdat[9:17,event := "cr2"]
m1 = FGR(Hist(time,event)~X2+X7+X9,data=learndat,cause=1)
m2 = CSC(Hist(time,event)~X2+X7+X9,data=learndat,cause=1)
xcr=Score(list("FGR"=m1,"CSC"=m2),formula=Hist(time,event)~1,
data=testdat,summary="risks",null.model=0L,times=c(3,5))
plotRisk(xcr,times=3)
# check when no censored before time horizon
testdat[time <= 3 & event == 0, event := 1]
xcr=Score(list("FGR"=m1,"CSC"=m2),formula=Hist(time,event)~1,
data=testdat,summary="risks",null.model=0L,times=c(3,5))
plotRisk(xcr,times=3)
# check when no censored in all data
testdat[event == 0, event := 1]
xcr=Score(list("FGR"=m1,"CSC"=m2),formula=Hist(time,event)~1,
data=testdat,summary="risks",null.model=0L,times=c(3,5))
plotRisk(xcr,times=3)
# check when no event-free at horizon
testdat[event == 0, event := 1]
Score(list("FGR"=m1,"CSC"=m2),formula=Hist(time,event)~1,
data=testdat,summary="risks",null.model=0L,times=c(3,8))
# all predicted risks of model m2 are NA
expect_error(xcr=Score(list("FGR"=m1,"CSC"=m2),formula=Hist(time,event)~1,
data=testdat,summary="risks",null.model=0L,times=c(3,8)))
})
######################################################################
### test-plotRisk.R ends here
|
a23922d3d29aecbe7c0183c7dbb6c700c62a98ed
|
857aa1256af30137c47baeed06bc45b412a5a27c
|
/google_schoolar_scrapping/scrape.R
|
3779a8d8b4e71508467e906f772933d73c9c91e8
|
[] |
no_license
|
vinayvamshirr/r-blogs-examples
|
c2a367ecd0358cdbaffbdbf23c4341a627476fe6
|
fa7dfd9c0fc26479e8a475098cef67767289bfe0
|
refs/heads/master
| 2021-06-05T01:05:54.669298
| 2016-07-23T23:48:47
| 2016-07-23T23:48:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,900
|
r
|
scrape.R
|
library(rvest)
library(ggplot2)
# -----
page <- read_html("https://scholar.google.com/citations?user=sTR9SIQAAAAJ&hl=en&oi=ao")
# http://selectorgadget.com/
citations <- page %>%
html_nodes("#gsc_a_b .gsc_a_c") %>%
html_text() %>%
as.numeric()
citations
barplot(citations, main="How many times has each paper been cited?", ylab='Number of citations', col="skyblue", xlab="")
# -----
page <- read_html("https://scholar.google.com/citations?view_op=list_colleagues&hl=en&user=sTR9SIQAAAAJ")
Coauthors <- page %>%
html_nodes(css=".gsc_1usr_name a") %>%
html_text()
Coauthors <- as.data.frame(Coauthors)
Coauthors$Coauthors <- as.character(Coauthors$Coauthors)
names(Coauthors) <- 'Coauthors'
head(Coauthors)
dim(Coauthors)
str(Coauthors)
# -----
page <- read_html("https://scholar.google.com/citations?view_op=list_colleagues&hl=en&user=sTR9SIQAAAAJ")
citations <- page %>%
html_nodes(css = ".gsc_1usr_cby") %>%
html_text()
citations
citations <- gsub('Cited by','', citations)
citations <- as.numeric(citations)
citations <- as.data.frame(citations)
# -----
page <- read_html("https://scholar.google.com/citations?view_op=list_colleagues&hl=en&user=sTR9SIQAAAAJ")
affilation <- page %>%
html_nodes(css = ".gsc_1usr_aff") %>%
html_text()
affilation <- as.data.frame(affilation)
names(affilation) <- 'Affilation'
# -----
cauthors <- cbind(Coauthors, citations, affilation)
cauthors
cauthors$Coauthors <- factor(cauthors$Coauthors, levels = cauthors$Coauthors[order(cauthors$citations, decreasing=F)])
ggplot(cauthors, aes(x = Coauthors, y = citations)) +
geom_bar(stat="identity", fill="#ff8c1a", size=7) +
theme(axis.title.y = element_blank()) +
ylab("# of citations") +
theme(plot.title = element_text(size = 18, colour="blue"), axis.text.y = element_text(colour="grey20", size=12)) +
ggtitle('Citations of his coauthors') +
coord_flip()
|
3ec0c09b92c8c6ab026883a7e73501f003bcc515
|
885c4683202c5af87698f5ffbdb19a1905303737
|
/code/accuracy_vs_time/produce_plots.r
|
362b2a829652017e7367a2ff00bb17b14f431a7d
|
[] |
no_license
|
mauriziofilippone/preconditioned_GPs
|
ebc50cb85f06f59700257f25860ceaddc14a9775
|
d7bc09b6804ef002cc3fc6bbf936517578d7436e
|
refs/heads/master
| 2021-01-19T03:32:35.745442
| 2016-06-09T09:05:26
| 2016-06-09T09:05:26
| 51,930,826
| 14
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,561
|
r
|
produce_plots.r
|
## Code to produce plots of error versus time for GPs trained using CG and preconditioned CG
DATASET = "concrete"
DATASET = "powerplant"
DATASET = "protein"
DATASET = "credit"
DATASET = "spam"
DATASET = "eeg"
## KERNEL_TYPE = "RBF"
KERNEL_TYPE = "ARD"
ps.options(width=10, height=8, paper="special", horizontal=F, pointsize=32)
pdf.options(width=10, height=8, pointsize=32)
TIME_IN_LOG = T ## Should the time axis of the plots be in log scale?
if(TIME_IN_LOG) XLAB = expression(log[10](seconds))
if(!TIME_IN_LOG) XLAB = "seconds"
YLAB = list()
if(DATASET %in% c("concrete", "powerplant", "protein")) YLAB[["RMSE"]] = "RMSE"
if(DATASET %in% c("credit", "spam", "eeg")) YLAB[["RMSE"]] = "Error Rate"
YLAB[["NEG_LLIK"]] = "Negative Test Log-Lik"
NAMES_ERROR_MEASURES_GPSTUFF = list()
NAMES_ERROR_MEASURES_GPSTUFF[["RMSE"]] = "MSE"
NAMES_ERROR_MEASURES_GPSTUFF[["NEG_LLIK"]] = "NMLL"
NAMES_KERNEL_TITLE_PLOT = list()
NAMES_KERNEL_TITLE_PLOT[["RBF"]] = "isotropic kernel"
NAMES_KERNEL_TITLE_PLOT[["ARD"]] = "ARD kernel"
NAMES_DATASET_TITLE_PLOT= list()
NAMES_DATASET_TITLE_PLOT[["concrete"]] = "Concrete"
NAMES_DATASET_TITLE_PLOT[["powerplant"]] = "Power Plant"
NAMES_DATASET_TITLE_PLOT[["protein"]] = "Protein"
NAMES_DATASET_TITLE_PLOT[["credit"]] = "Credit"
NAMES_DATASET_TITLE_PLOT[["spam"]] = "Spam"
NAMES_DATASET_TITLE_PLOT[["eeg"]] = "EEG"
## ## **************************************************
NRVECT = 4
if(DATASET == "concrete") {
STEPSIZE = 1.0
PREDICTEVERY = 5
PREDICTEVERY_CHOL = 3
NFOLDS = 5
}
if(DATASET == "powerplant") {
STEPSIZE = 1.0
PREDICTEVERY = 5
PREDICTEVERY_CHOL = 3
NFOLDS = 5
}
if(DATASET == "protein") {
STEPSIZE = 1.0
PREDICTEVERY = 5
PREDICTEVERY_CHOL = 1
NFOLDS = 3
}
if(DATASET == "credit") {
STEPSIZE = 1.0
PREDICTEVERY = 5
PREDICTEVERY_CHOL = 3
NFOLDS = 5
}
if(DATASET == "spam") {
STEPSIZE = 1.0
PREDICTEVERY = 5
PREDICTEVERY_CHOL = 1
NFOLDS = 5
}
if(DATASET == "eeg") {
STEPSIZE = 1.0
PREDICTEVERY = 5
PREDICTEVERY_CHOL = 1
NFOLDS = 3
}
for(ERROR_MEASURE in c("RMSE", "NEG_LLIK")) {
error_vs_time = list()
for(SOLVER in c("PCG", "CG")) {
ntokeep = Inf
for(FOLD in 1:NFOLDS) {
OPTIONS = paste(DATASET, KERNEL_TYPE, ERROR_MEASURE, SOLVER, "STEPSIZE", STEPSIZE, "Nr", NRVECT, "PREDICTEVERY", PREDICTEVERY, "FOLD", FOLD, sep="_")
ntokeep = min(ntokeep, dim(read.table(paste("results/ERROR_VS_TIME_", OPTIONS, ".txt", sep="")))[1])
}
error_vs_time[[SOLVER]] = matrix(0, ntokeep, 2)
for(FOLD in 1:NFOLDS) {
OPTIONS = paste(DATASET, KERNEL_TYPE, ERROR_MEASURE, SOLVER, "STEPSIZE", STEPSIZE, "Nr", NRVECT, "PREDICTEVERY", PREDICTEVERY, "FOLD", FOLD, sep="_")
error_vs_time[[SOLVER]] = error_vs_time[[SOLVER]] + read.table(paste("results/ERROR_VS_TIME_", OPTIONS, ".txt", sep=""))[1:ntokeep,] / NFOLDS
}
}
ntokeep = Inf
for(FOLD in 1:NFOLDS) {
OPTIONS = paste(DATASET, KERNEL_TYPE, ERROR_MEASURE, "CHOL", "PREDICTEVERY", PREDICTEVERY_CHOL, "FOLD", FOLD, sep="_")
ntokeep = min(ntokeep, dim(read.table(paste("results/ERROR_VS_TIME_", OPTIONS, ".txt", sep="")))[1])
}
error_vs_time[['CHOL']] = matrix(0, ntokeep, 2)
for(FOLD in 1:NFOLDS) {
OPTIONS = paste(DATASET, KERNEL_TYPE, ERROR_MEASURE, "CHOL", "PREDICTEVERY", PREDICTEVERY_CHOL, "FOLD", FOLD, sep="_")
error_vs_time[['CHOL']] = error_vs_time[['CHOL']] + read.table(paste("results/ERROR_VS_TIME_", OPTIONS, ".txt", sep=""))[1:ntokeep,] / NFOLDS
}
if(KERNEL_TYPE == "RBF") {
if(DATASET == "concrete") base_dir_gpstuff = paste("../pcgComparison/GpStuff Comparison/", KERNEL_TYPE, "/RBF_RESULTS_CONCRETE/", sep="")
if(DATASET == "powerplant") base_dir_gpstuff = paste("../pcgComparison/GpStuff Comparison/", KERNEL_TYPE, "/RBF_RESULTS_POWER/", sep="")
if(DATASET == "protein") base_dir_gpstuff = paste("../pcgComparison/GpStuff Comparison/", KERNEL_TYPE, "/RBF_RESULTS_PROTEIN/", sep="")
}
## if(KERNEL_TYPE == "ARD") {
## if(DATASET == "concrete") base_dir_gpstuff = paste("../pcgComparison/GpStuff Comparison/", KERNEL_TYPE, "/ARD_RESULTS_CONC/", sep="")
## if(DATASET == "powerplant") base_dir_gpstuff = paste("../pcgComparison/GpStuff Comparison/", KERNEL_TYPE, "/ARD_RESULTS_POWER/", sep="")
## if(DATASET == "protein") base_dir_gpstuff = paste("../pcgComparison/GpStuff Comparison/", KERNEL_TYPE, "/ARD_RESULTS_PROTEIN/", sep="")
## if(DATASET == "credit") base_dir_gpstuff = paste("../pcgComparison/GpStuff Comparison/CLASS/CL_RESULTS_CREDIT/", sep="")
## if(DATASET == "spam") base_dir_gpstuff = paste("../pcgComparison/GpStuff Comparison/CLASS/CL_RESULTS_SPAM/", sep="")
## }
if(KERNEL_TYPE == "ARD") {
if(DATASET == "concrete") base_dir_gpstuff = paste("../pcgComparison/GpStuff Comparison/post_submission_results/REG_CONC_RES/", sep="")
if(DATASET == "powerplant") base_dir_gpstuff = paste("../pcgComparison/GpStuff Comparison/post_submission_results/REG_POWER_RES/", sep="")
if(DATASET == "protein") base_dir_gpstuff = paste("../pcgComparison/GpStuff Comparison/post_submission_results/REG_PROT_RES/", sep="")
if(DATASET == "credit") base_dir_gpstuff = paste("../pcgComparison/GpStuff Comparison/post_submission_results/CL_CREDIT_RES/", sep="")
if(DATASET == "spam") base_dir_gpstuff = paste("../pcgComparison/GpStuff Comparison/post_submission_results/CL_SPAM_RES/", sep="")
if(DATASET == "eeg") base_dir_gpstuff = paste("../pcgComparison/GpStuff Comparison/post_submission_results/CL_EEG_RES/", sep="")
}
if(TIME_IN_LOG) {
for(i in 1:length(error_vs_time)) error_vs_time[[i]][,1] = log10(error_vs_time[[i]][,1])
}
error_vs_time[['FITC']] = read.table(paste(base_dir_gpstuff, "FIC_", NAMES_ERROR_MEASURES_GPSTUFF[[ERROR_MEASURE]], ".txt", sep=""))[,-1]
error_vs_time[['PITC']] = read.table(paste(base_dir_gpstuff, "PIC_", NAMES_ERROR_MEASURES_GPSTUFF[[ERROR_MEASURE]], ".txt", sep=""))[,-1]
error_vs_time[['VAR']] = read.table(paste(base_dir_gpstuff, "VAR_", NAMES_ERROR_MEASURES_GPSTUFF[[ERROR_MEASURE]], ".txt", sep=""))[,-1]
xlim = ylim = c(+Inf, -Inf)
for(i in 1:length(error_vs_time)) {
if(xlim[1] > min(error_vs_time[[i]][,1])) xlim[1] = min(error_vs_time[[i]][,1])
if(xlim[2] < max(error_vs_time[[i]][,1])) xlim[2] = max(error_vs_time[[i]][,1])
if(ylim[1] > min(error_vs_time[[i]][,2])) ylim[1] = min(error_vs_time[[i]][,2])
if(ylim[2] < max(error_vs_time[[i]][,2])) ylim[2] = max(error_vs_time[[i]][,2])
}
MAIN = paste(NAMES_DATASET_TITLE_PLOT[[DATASET]], NAMES_KERNEL_TITLE_PLOT[[KERNEL_TYPE]], sep=" - ")
linetypes = c("F1", "11", "22", "42", "2111", "3111")
pdf(paste("results/PLOT_", DATASET, "_", KERNEL_TYPE, "_", ERROR_MEASURE, ".pdf", sep=""))
par("mar"=c(3.0,3.0,1.1,0.3), "mgp"=c(1.8,0.6,0))
plot(error_vs_time[[1]], col=1, lwd=8, type="l", xlab=XLAB, ylab=YLAB[[ERROR_MEASURE]], xlim=xlim, ylim=ylim, main = MAIN)
for(i in 2:length(error_vs_time)) {
points(error_vs_time[[i]], col=i, lwd=8, lty=linetypes[i], type="l")
}
## legend(0.6*(max(xlim)-min(xlim))+min(xlim), max(ylim), lwd=8, col=c(1:length(error_vs_time)), legend=names(error_vs_time))
dev.off()
}
## ## ## ## ## ************************************************** Create a legend "box"
## pdf.options(width=9, height=0.7, pointsize=16)
## pdf("results/PLOT_COMPARE_ERROR_VS_TIME_LEGEND.pdf")
## par("mar"=c(0.2,0.2,0.3,0.2), "mgp"=c(0,0,0))
## plot(1, type = "n", axes=FALSE, xlab="", ylab="")
## plot_colors <- c("blue","black", "green", "orange", "pink")
## legend("top", inset=0, lwd=4, col=c(1:length(error_vs_time)), legend=names(error_vs_time), horiz = TRUE, box.lwd=2, lty=linetypes)
## dev.off()
|
3f74fbc5f6a266b5867a88f054a546e536744808
|
8c98d9d669743136fa8a11bc3751162a45518bb2
|
/R/01_early_examples/json_file.R
|
3eba92bee8e3501b06915c9b5f6323af7d9d57b0
|
[] |
no_license
|
retodomax/Bauland
|
e9e2c4b62c433ce3f6540f55f9e90d2613cfb34d
|
0a7d7a3285b4f97e123e363f354780b549ee950b
|
refs/heads/master
| 2022-03-09T05:11:32.876186
| 2019-10-26T21:16:43
| 2019-10-26T21:16:43
| 212,530,509
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,141
|
r
|
json_file.R
|
## Try to flip the y coordinate
# 1) import LV03 (CH1903) coordinates of municipals
## --> flip coordinate
# 2) trasform to WGS84
# 3) plot with leaflet
# 1) ----------------------------------------------------------------------
ch_cant <- geojsonio::geojson_read("swiss-maps/topo/ch-cantons.json",
what = "sp")
library(sf)
library(leaflet)
ch_sf <- st_as_sf(ch_cant)
nr <- 1
for(nr in 1:26){
for (i in 1:length(st_geometry(ch_sf)[[nr]])) {
st_geometry(ch_sf)[[nr]][[i]][[1]][, 1] <- st_geometry(ch_sf)[[nr]][[i]][[1]][, 1]
st_geometry(ch_sf)[[nr]][[i]][[1]][, 2] <- -st_geometry(ch_sf)[[nr]][[i]][[1]][, 2]
}
}
ch_cant <- as(ch_sf, 'Spatial')
# 2) ----------------------------------------------------------------------
library(sp)
ch_cant@proj4string <- CRS("+init=epsg:21781") # CH1903 / LV03 seems to have this epsg code
ch_cant2 <- spTransform(ch_cant, CRS("+init=epsg:4326"))
# 3) ----------------------------------------------------------------------
leaflet(ch_cant2) %>%
addTiles() %>%
addPolygons(stroke = FALSE, smoothFactor = 0.3, fillOpacity = 0.3)
|
0d1c6d4f355e3c913921a93c60d52968a58208b6
|
92e979e0e55cf88078795becc261a20850004acb
|
/man/calc_ratio.Rd
|
1b332ea8b5071b54dcd6f5e2469363b6d85f5d07
|
[] |
no_license
|
rjnell/digitalPCRsimulations
|
c47becb1e19e2a3fb8f5056fc02f78d8c25e3f86
|
b341cf024b262cbc0cb77a6745c05b71247fd70d
|
refs/heads/master
| 2023-02-07T07:47:51.514474
| 2023-02-04T14:00:03
| 2023-02-04T14:00:03
| 295,371,471
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,156
|
rd
|
calc_ratio.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calc_ratio.R
\name{calc_ratio}
\alias{calc_ratio}
\title{Calculate ratio with confidence intervals of two values with known confidence intervals.}
\usage{
calc_ratio(input_a, input_b)
}
\arguments{
\item{input_a}{A vector specifying the value, lower and higher limit of the corresponding confidence interval.}
\item{input_b}{A vector specifying the value, lower and higher limit of the corresponding confidence interval.}
}
\value{
Ratio with confidence interval.
}
\description{
As described in Dube et al., the geometric interpretation of Fieller's theorem can be used to calculate the confidence intervals for a ratio of two values with known confidence intervals.
}
\examples{
# Create two default universes simulating 50 ng DNA input.
universe_target = universe(50)
universe_reference = universe(50)
# Take two samples
sample = sample(1:length(universe_target), 20000)
conc_target = sample_from_universe(universe_target, sample)
conc_reference = sample_from_universe(universe_target, sample)
# Calculate the ratio
ratio = calc_ratio(conc_target, conc_reference)
ratio
}
|
a27994645690cea8f693f8d234427a63bb54f8b0
|
1ea35aa8adc3131f178d873800c1c818343b9dec
|
/src/R/shiny/ROMOPOmics_demo/ROMOPOmics/R/readInputFiles.R
|
be4a45b1225e37b7cc1a3622821b4cba6854104c
|
[
"MIT"
] |
permissive
|
NCBI-Codeathons/OMOPOmics
|
9afa7abd4f59baa48248b73a823d5e50d0197663
|
c6f0293f99189cc682d04aef9f40e43a8878ca8b
|
refs/heads/master
| 2020-12-06T04:54:42.723704
| 2020-06-04T16:45:14
| 2020-06-04T16:45:14
| 232,348,286
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,813
|
r
|
readInputFiles.R
|
#' readInputFiles
#'
#' Function reads in TSV files designed with a given mask in mind, with rows
#' for each field and table combination and columns for input data entries.
#' Output is an "exhaustive" table including all fields and tables from the
#' specified data model, including unused tables and fields.
#'
#' @param input_file Name of a TSV file containing required alias column names.
#' @param data_model Data model being used, typically as a tibble returned by loadDataModel().
#' @param mask_table Mask contained in a tibble, typically as a tibble loaded by loadModelMask().
#'
#' readInputFiles
#'
#' @import tibble
#' @import data.table
#' @import magrittr
#'
#' @export
readInputFiles <- function(input_file = input_files[[2]],
data_model = dm,
mask_table = msks$patient_sequencing){
#Get file names to append to each column.
fl_nm <- str_match(basename(input_file),"(.+)\\.tsv$")[,2]
#Merge input file into the full data model.
in_tab <- fread(input_file,sep = "\t",header = FALSE,stringsAsFactors = FALSE) %>%
rename(alias=1) %>%
merge(.,select(mask_table,table,alias,field),all.x = TRUE, all.y=TRUE) %>%
as_tibble() %>%
rename_at(vars(starts_with("V")), function(x) gsub("V",fl_nm,x)) %>%
select(table,field,everything(),-alias)
#The "standard table" now is the entire data model with mapped inputs, all
# unspecified values as NA. Each individual entry is stored in unique column.
data_model %>%
select(field,table,required,type,description,table_index) %>% #Only keep standard cols.
mutate(table=toupper(table)) %>%
merge(in_tab,all=TRUE) %>%
as_tibble() %>%
mutate_all(function(x) ifelse(x=="",NA,x)) %>%
return()
}
|
7ab34c98c33e5b7989777103a5d6a45019b35521
|
b56eee2ac6a95d0e0eb9bdbdda40679de795de10
|
/microEMAResponseSummaries/plotParticipantResponsebehavior.R
|
c280d1d1600c3380659016323d75c1f950b9e00b
|
[] |
no_license
|
adityaponnada/microEMA-Preprocessing
|
1c658a46582753c1dae2fd1349a339a34e2868a0
|
d254a4fcd8b088399108261994b56c6cfe6b3424
|
refs/heads/master
| 2020-03-10T14:36:24.369331
| 2018-10-30T16:17:44
| 2018-10-30T16:17:44
| 129,430,763
| 0
| 0
| null | 2018-10-30T16:17:46
| 2018-04-13T16:57:48
|
R
|
UTF-8
|
R
| false
| false
| 567
|
r
|
plotParticipantResponsebehavior.R
|
#### Include libraires
library(psych)
library(MASS)
library(ggplot2)
library(plotly)
library(reshape2)
library(dplyr)
#### Plot stacked histograms of response rates
RRSet <- c("USER_ID", "W1_COMPLIANCE", "TOTAL_COMPLIANCE", "W1_COMPLETION", "TOTAL_COMPLETION")
uEMARRSubset <- uEMAResponseRate[RRSet]
meltRRDataFrame <- melt(uEMARRSubset)
ggplot(meltRRDataFrame, aes(x = USER_ID, y = value, fill = variable)) +
geom_bar(stat = "identity", position = "dodge")+
ggtitle("Response summary of all participants") +
labs(x="Participants",y="Response rate (%)")
|
170a10d4c262203519f1855118c5ca0193994bc5
|
926b4a96b4a68250e1e44ab332bd5a80d050b5ec
|
/R/print.summary.lognlm.R
|
a00275167fc4ca4776d7e020c3e24af477e9530a
|
[] |
no_license
|
cran/logNormReg
|
037c51fea1ab7a72fa6ad6b577c7a9e5832aff2b
|
a5c9af006d98a3778090f4478ba8cdc470494853
|
refs/heads/master
| 2021-11-26T18:04:05.306516
| 2021-11-08T16:00:02
| 2021-11-08T16:00:02
| 153,307,292
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,411
|
r
|
print.summary.lognlm.R
|
print.summary.lognlm <-
function(x, digits = max(3L, getOption("digits") - 3L), signif.stars = getOption("show.signif.stars"), ...) {
cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"),
"\n", sep = "")
# cat("Deviance Residuals: \n")
# if (x$df.residual > 5) {
# x$deviance.resid <- setNames(quantile(x$deviance.resid,
# na.rm = TRUE), c("Min", "1Q", "Median", "3Q", "Max"))
# }
# xx <- zapsmall(x$deviance.resid, digits + 1L)
# print.default(xx, digits = digits, na.print = "", print.gap = 2L)
cat("\nCoefficients:\n")
coefs <- x$coefficients
printCoefmat(coefs, digits = digits, signif.stars = signif.stars, na.print = "NA", ...)
# if (length(x$aliased) == 0L) {
# cat("\nNo Coefficients\n")
# }
# else {
# df <- if ("df" %in% names(x)) x[["df"]]
# else NULL
# if (!is.null(df) && (nsingular <- df[3L] - df[1L]))
# cat("\nCoefficients: (", nsingular, " not defined because of singularities)\n", sep = "")
# else cat("\nCoefficients:\n")
# coefs <- x$coefficients
# if (!is.null(aliased <- x$aliased) && any(aliased)) {
# cn <- names(aliased)
# coefs <- matrix(NA, length(aliased), 4L, dimnames = list(cn,
# colnames(coefs)))
# coefs[!aliased, ] <- x$coefficients
# }
# printCoefmat(coefs, digits = digits, signif.stars = signif.stars,
# na.print = "NA", ...)
# }
#
if(x$lik) {
Fnobj<- "Log Likelihood:"
} else {
Fnobj<-if(length(x$weights)<=0) "Sum of squared Residuals (logs):" else "Sum of (weighted) squared residuals (logs):"
}
# Fnobj<- paste(Fnobj, " (on",,"")
V<-x$cov
se.sd<-if(nrow(V)==(nrow(coefs)+1)) sqrt(V[nrow(coefs)+1,nrow(coefs)+1]) else NA
cat("\nStandard deviation estimate: ", format(x$sigma, digits=max(5L, digits)),
"(St.Err =", paste(format(se.sd, digits=max(4, digits)),")", sep=""))
cat("\n")
if (nzchar(mess <- naprint(x$na.action))) cat(" (", mess, ")\n", sep = "")
cat(Fnobj, format(x$loglik, digits = max(5L, digits + 1L)), " (on", x$df.residual ,"degrees of freedom)",
"\npseudo-R2:", formatC(x$r.squared, digits = digits), " Adj pseudo-R2:", formatC(x$adj.r.squared, digits = digits)
)
cat("\n")
invisible(x)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.