blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23d7d668bede7fc364475006b1682e394b693db3
|
c6b7ff06a37716fa19d982ce8924a1a95bc8973e
|
/cachematrix.R
|
34498092f3f681f5e2c715887fc26075d7bc91b1
|
[] |
no_license
|
Aramisisc/ProgrammingAssignment2
|
17be03111e571bdcc942d621ad19d8d0f75b6e42
|
af46523fa773bf8ea8fd15c4fe882c4b82e96175
|
refs/heads/master
| 2021-01-17T14:12:50.837253
| 2014-06-22T13:24:45
| 2014-06-22T13:24:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,547
|
r
|
cachematrix.R
|
#create a function that creates a first special object that stores a matrix
# and cache's its inverse.
#The first function, makeCacheMatrix creates a special "vector", which is really
#a list containing a function to:
#-set the value of the matrix
#-get the value of the matrix
#-set the the inverse matrix
#-get the inverse matrix
#We assume that the provided matrix is a square invertible matrix
#f.i.: matrix(1:4,2,2)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#The second function,cacheSolve(), calculates the inverse of the matrix that
#has been created with the makeCacheMatrix function.
cacheSolve <- function(x, ...) {
m <- x$getinverse() #checks to see if the inverser has already been
#calculated
if(!is.null(m)) { #If so, it gets the inverse from the cache and skips
#the computation
message("getting cached data")
return(m)
}
data <- x$get() #Otherwise, it calculates the inverse of the matrix...
m <- solve(data, ...)
x$setinverse(m) #..and sets the value of the inverse in the cache via
#the setinverse function
m
}
|
c9eabe2fb39356a027d7dbccb8a975a7f3027a4e
|
adbebff4f9f4b6ebd17c30c237ed00b10b3d15d8
|
/test.R
|
ac839f83010e84ecc35fb1cf297dab15d680170b
|
[
"MIT"
] |
permissive
|
dowlir/nhm-test-project
|
2e73d5c9b38ea897add768292a1bab7b7e0ec0b6
|
e49476d8c34527179c02d993a528ace3ce6a3bce
|
refs/heads/master
| 2021-09-15T16:18:15.443777
| 2018-06-06T13:17:49
| 2018-06-06T13:17:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12
|
r
|
test.R
|
x = 21
x + 3
|
ba91e3f6e6f7205b30c94b7c63c44c75d99d09d3
|
927e74b280556daae5b90929a1586aec858dbdb7
|
/man/get_daily_weather.Rd
|
2f6118d1d5667c1d8ce6f1bf5a9ae129be22c2c0
|
[] |
no_license
|
alfcrisci/rWeatherITA
|
37ff6ebcb2842e680d3419abdae3f270a771980d
|
8f4c39438894e2d6f94402c799392c0ce56c34c0
|
refs/heads/master
| 2020-12-24T06:42:21.603561
| 2016-11-28T15:39:39
| 2016-11-28T15:39:39
| 52,808,245
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 995
|
rd
|
get_daily_weather.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_daily_weather.r
\name{get_daily_weather}
\alias{get_daily_weather}
\title{get_daily_weather}
\usage{
get_daily_weather(typestaz = "WU-STAZ", year, idstaz = "LIRQ")
}
\arguments{
\item{typestaz}{Character Type of data "WU-STAZ" done by ICAO code of airport, or "WU-PWS" by id of personal weather station.}
\item{idstaz}{Character Weather station ID or code for identification}
\item{startdate}{Character Initial date of period in "YYYY-MM-DD" format.}
\item{enddate}{Character Final date of period in "YYYY-MM-DD" format.}
}
\value{
Return data.frame Data are returned as R data.frame object.
}
\description{
Retrieve meteorological data from two different source : ICAO wunderground network data or PWS by using weatherData R packages.
}
\author{
Istituto di Biometeorologia Firenze Italy Alfonso crisci \email{a.crisci@ibimet.cnr.it}.
}
\keyword{data,daily,wunderground,weatherData.}
\keyword{weather,}
|
7ac240074d5fa69c2accd556a924d77902036fc1
|
23cb7aba69c3191582e862a1e43940d2ec6232ca
|
/man/checkHomopolymerCount.Rd
|
f557a4afcf4d71bdb730c2ee02e3f9138a909bb3
|
[] |
no_license
|
DKMS-LSL/dr2s
|
23278c630dda2b3f1110e9929bdfdf462f3e537a
|
5d999345426b4d13644d6711adda29733eeab15b
|
refs/heads/master
| 2021-06-11T07:09:45.889559
| 2021-03-12T08:58:08
| 2021-03-12T08:58:08
| 132,594,780
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 583
|
rd
|
checkHomopolymerCount.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sequtils.R
\name{checkHomopolymerCount}
\alias{checkHomopolymerCount}
\title{Get a distribution of homopolymer counts in alleles}
\usage{
checkHomopolymerCount(x, hpCount = 10)
}
\arguments{
\item{x}{a DR2S object.}
\item{hpCount}{the minimal length of a homopolymer to be checked (10).}
\item{map}{Which result to use. Either "mapFinal" or "remap".}
}
\value{
plot a pdf with length histogram and write mode to log
}
\description{
Get a distribution of homopolymer counts in alleles
}
\examples{
###
}
|
ea40976749f0d0faf255ffff9b18aa86214dd7d0
|
20bfcff74f158557d50f1293c8f70404ece0d5a5
|
/glmPR/tests/testthat.R
|
7f7c8b491b1a8ddcc5182e9c6e96f40274b43d03
|
[] |
no_license
|
Xia-Zhang/Poisson-Regression
|
76d047ccae6300841906929f5cfc875b4ab9258b
|
82ed7237db8cbade82b1dcf3cc36a40cbec0e2a0
|
refs/heads/master
| 2021-01-18T07:26:29.615945
| 2017-05-11T15:37:48
| 2017-05-11T15:37:48
| 84,288,908
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(glmPR)
test_check("glmPR")
|
baf9c6ce85a7701460722d860ca14f2674125449
|
cf72b47122c4f991ff1c42daf67a6862c3658764
|
/clouds_example.R
|
57aada5caf2c8dc20a397da2cf1740dbe691ad50
|
[] |
no_license
|
jgabry/R2prior
|
5838c304c997e577b9834abd575676f8805b4f55
|
3ae190bb22b258410d02ce34c2357f812718af31
|
refs/heads/master
| 2020-04-05T23:00:40.425839
| 2018-05-30T17:28:20
| 2018-05-30T17:28:20
| 52,554,603
| 0
| 1
| null | 2016-03-31T03:41:53
| 2016-02-25T20:29:41
|
TeX
|
UTF-8
|
R
| false
| false
| 349
|
r
|
clouds_example.R
|
library(rstanarm)
library(HSAUR3)
data("clouds", package = "HSAUR3")
mod <- rainfall ~ seeding * (sne+cloudcover+prewetness+echomotion) + time
mle <- lm(formula = mod, data = clouds)
round(summary(mle)$coefficients[, 1:2], 1)
R2prior <- R2(location = 0.2, what = "mode")
post <- stan_lm(formula = mod, data = clouds, prior = R2prior)
print(post)
|
a575f31cdc44d00cdd01cdd12c88d995a31e8937
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/nse/examples/nse.geyer.Rd.R
|
f82a601c31fc757d0db70fe4a65cef82c85d7b41
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 417
|
r
|
nse.geyer.Rd.R
|
library(nse)
### Name: nse.geyer
### Title: Geyer estimator
### Aliases: nse.geyer
### ** Examples
n = 1000
ar = 0.9
mean = 1
sd = 1
set.seed(1234)
x = as.vector(arima.sim(n = n, list(ar = ar), sd = sd) + mean)
nse.geyer(x = x, type = "bm", nbatch = 30)
nse.geyer(x = x, type = "obm", nbatch = 30)
nse.geyer(x = x, type = "iseq", iseq.type = "pos")
nse.geyer(x = x, type = "iseq.bm", iseq.type = "con")
|
423f7e60dcf81359308d832e25276e711158e594
|
8d657b14c43472f23bd8faaf0ed3aba9e151cea6
|
/R/vertical_Kparty_logistic.R
|
5acdbf707f857fb696fe3a1e5aa344c6c0d919c4
|
[] |
no_license
|
cran/vdra
|
42a73738350a38c6c95af0a3156ec877829ae694
|
54c44c79831ea3407fc5cbf6fbc0301ce8c2cda2
|
refs/heads/master
| 2023-07-16T08:14:03.433005
| 2021-09-09T05:20:02
| 2021-09-09T05:20:02
| 404,778,003
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 35,056
|
r
|
vertical_Kparty_logistic.R
|
################### DISTRIBUTED LOGISTIC REGRESSION FUNCTIONS ###################
GetProductsLogistic.AC = function(params) {
if (params$trace) cat(as.character(Sys.time()), "GetProductsLogistic.AC\n\n")
readTime = 0
readSize = 0
p = 0
n = 0
pi = c()
allproducts = rep(list(list()), params$numDataPartners)
allhalfshare = rep(list(list()), params$numDataPartners)
alltags = rep(list(list()), params$numDataPartners)
products = NULL
halfshare = NULL
tags = NULL
allcolmin = allcolrange = allcolsum = allcolnames = NULL
colmin = colrange = colsum = colnames = NULL
party = NULL
for (id in 1:params$numDataPartners) {
readTime = readTime - proc.time()[3]
load(file.path(params$readPathDP[id], "products.rdata"))
load(file.path(params$readPathDP[id], "halfshare.rdata"))
load(file.path(params$readPathDP[id], "colstats.rdata"))
readSize = readSize + sum(file.size(file.path(params$readPathDP[id],
c("products.rdata",
"halfshare.rdata",
"colstats.rdata"))))
readTime = readTime + proc.time()[3]
allproducts[[id]] = products
allhalfshare[[id]] = halfshare
alltags[[id]] = tags
allcolmin = c(allcolmin, colmin)
allcolrange = c(allcolrange, colrange)
allcolsum = c(allcolsum, colsum)
allcolnames = c(allcolnames, colnames)
party = c(party, rep(paste0("dp", id), length(colnames)))
p = p + ncol(halfshare)
pi = c(pi, ncol(halfshare))
if (id == 1) n = nrow(halfshare)
}
M = matrix(0, p, p)
colnames(M) = allcolnames
rownames(M) = allcolnames
offset1 = 1
params$pi = rep(0, params$numDataPartners)
for (id1 in 1:params$numDataPartners) {
p1 = ncol(allhalfshare[[id1]])
params$pi[id1] = p1
offset2 = offset1
for (id2 in id1:params$numDataPartners) {
p2 = ncol(allhalfshare[[id2]])
if (id1 == id2) {
M[offset1:(offset1 + p1 - 1), offset2:(offset2 + p2 - 1)] = allproducts[[id1]][[id2]]
} else {
temp = allproducts[[id1]][[id2]] + allproducts[[id2]][[id1]] +
t(allhalfshare[[id1]]) %*% allhalfshare[[id2]]
M[offset1:(offset1 + p1 - 1), offset2:(offset2 + p2 - 1)] = temp
M[offset2:(offset2 + p2 - 1), offset1:(offset1 + p1 - 1)] = t(temp)
}
offset2 = offset2 + p2
}
offset1 = offset1 + p1
}
params$halfshare = allhalfshare
params$sts = M[2:p, 2:p, drop = FALSE]
params$sty = M[2:p, 1, drop = FALSE]
params$yty = M[1, 1]
params$meansy = allcolsum[1] / n
params$means = allcolsum[-1] / n
params$n = n
params$p = p
params$pi = pi
params$colmin = allcolmin[-1]
params$colrange = allcolrange[-1]
params$colsum = allcolsum[-1]
params$colnames = allcolnames[-1]
params$party = party[-1]
params$tags = alltags
params = AddToLog(params, "GetProductsLogistic.AC", readTime, readSize, 0, 0)
return(params)
}
CheckColinearityLogistic.AC = function(params) {
if (params$trace) cat(as.character(Sys.time()), "CheckColinearityLogistic.AC\n\n")
sts = params$sts
sty = params$sty
nrow = nrow(sts)
indicies = c(1)
for (i in 2:nrow) {
tempIndicies = c(indicies, i)
if (rcond(sts[tempIndicies, tempIndicies]) > 10^8 * .Machine$double.eps) {
indicies = c(indicies, i)
}
}
sts = sts[indicies, indicies, drop = FALSE]
sty = sty[indicies, drop = FALSE]
params$sts = sts
params$sty = sty
# Extract the indicies to keep for each party and check for errors.
params$colmin = params$colmin[indicies]
params$colrange = params$colrange[indicies]
params$colsum = params$colsum[indicies]
params$fullindicies = indicies # To be used when computing stats
params$p = params$p - 1 # Get rid of the response from the count
indicies = indicies + 1 # take into account that pi still counts sty, which we removed earlier.
params$indicies = rep(list(list()), params$numDataPartners)
tags = rep(list(list()), params$numDataPartners)
min = 1
for (id in 1:params$numDataPartners) {
max = min + params$pi[id] - 1
idx = indicies[which(min <= indicies & indicies <= max)] - min + 1
params$indicies[[id]] = idx
if (id == 1) {
idx = (idx - 1)[-1]
}
temp = params$tags[[id]]
temp = temp[idx]
tags[[id]] = temp
min = max + 1
}
params$errorMessage = ""
if ((length(unique(tags[[1]])) == 1) | (length(unique(tags[[1]])) >= 2 & !("numeric" %in% names(tags[[1]])))) {
params$failed = TRUE
params$errorMessage = "Data Partner 1 must have no covariates or at least 2 covariates at least one of which is continuous.\n"
}
for (id in 2:params$numDataPartners) {
if (length(unique(tags[[id]])) < 2) {
params$failed = TRUE
params$errorMessage = paste0(params$errorMessage,
paste("After removing colinear covariates, Data Partner", id, "has 1 or fewer covariates.\n"))
} else if (!("numeric" %in% names(tags[[id]]))) {
params$failed = TRUE
params$errorMessage = paste0(params$errorMessage,
paste("After removing colinear covariates, Data Partner", id, "has no continuous covariates.\n"))
}
}
indicies = params$indicies
params$pReduct = c()
for (id in 1:params$numDataPartners) {
params$pReduct = c(params$pReduct, length(indicies[[id]]))
}
for (id in 1:params$numDataPartners) {
params$halfshare[[id]] = params$halfshare[[id]][, indicies[[id]], drop = FALSE]
}
writeTime = proc.time()[3]
save(indicies, file = file.path(params$writePath, "indicies.rdata"))
writeSize = file.size(file.path(params$writePath, "indicies.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "CheckColinearityLogistic.AC", 0, 0, writeTime, writeSize)
return(params)
}
ComputeInitialBetasLogistic.AC = function(params) {
if (params$trace) cat(as.character(Sys.time()), "ComputeInitialBetasLogistic.AC\n\n")
writeTime = 0
writeSize = 0
colsumS = (params$colsum - params$n * params$colmin) / params$colran
beta = 4 * solve(params$sts) %*% (params$sty - 0.5 * colsumS)
u = sum(runif(length(beta), min = 1, max = 5) * abs(beta))
params$u = u
start = 1
for (id in 1:params$numDataPartners) {
end = start + length(params$indicies[[id]]) - 1
betas = beta[start:end]
writeTime = writeTime - proc.time()[3]
save(u, betas, file = file.path(params$writePath, paste0("u_beta_", id, ".rdata")))
writeSize = writeSize + file.size(file.path(params$writePath, paste0("u_beta_", id, ".rdata")))
writeTime = writeTime + proc.time()[3]
start = end + 1
}
params = AddToLog(params, "ComputeInitialBetasLogistic.AC", 0, 0, writeTime, writeSize)
return(params)
}
UpdateParamsLogistic.DP = function(params) {
if (params$trace) cat(as.character(Sys.time()), "UpdateParamsLogistic.DP\n\n")
indicies = NULL
u = NULL
betas = NULL
readTime = proc.time()[3]
load(file.path(params$readPathAC, "indicies.rdata"))
filename = paste0("u_beta_", params$dataPartnerID, ".rdata")
load(file.path(params$readPathAC, filename))
readSize = file.size(file.path(params$readPathAC, "indicies.rdata")) +
file.size(file.path(params$readPathAC, filename))
readTime = proc.time()[3] - readTime
params$u = u
params$betas = betas
params$indicies = indicies
params = AddToLog(params, "UpdateParamsLogistic.DP", readTime, readSize, 0, 0)
return(params)
}
UpdateDataLogistic.DP = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "UpdateDataLogistic.DP\n\n")
if (params$dataPartnerID == 1) {
data$Y = data$X[, 1, drop = FALSE]
}
idx = params$indicies[[params$dataPartnerID]]
data$X = data$X[, idx, drop = FALSE]
data$colmin = data$colmin[idx]
data$colmax = data$colmax[idx]
data$colsum = data$colsum[idx]
data$colrange = data$colrange[idx]
return(data)
}
ComputeSbetaLogistic.DP = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "ComputeSbetaLogistic.DP\n\n")
set.seed(params$seed + params$algIterationCounter, kind = "Mersenne-Twister")
V = matrix(rnorm(params$n, mean = runif(n = 1, min = -1, max = 1), sd = 10), ncol = 1)
Vsum = 0
for (id in 1:params$numDataPartners) {
set.seed(params$seeds[id] + params$algIterationCounter, kind = "Mersenne-Twister")
Vsum = Vsum + matrix(rnorm(params$n, mean = runif(n = 1, min = -1, max = 1), sd = 10), ncol = 1)
}
Sbeta = (data$X %*% params$betas + params$u) / (2 * params$u) + V - params$scaler / sum(params$scalers) * Vsum
writeTime = proc.time()[3]
save(Sbeta, file = file.path(params$writePath, "sbeta.rdata"))
writeSize = file.size(file.path(params$writePath, "sbeta.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "ComputeSbetaLogistic.DP", 0, 0, writeTime, writeSize)
return(params)
}
ComputeWeightsLogistic.AC = function(params) {
if (params$trace) cat(as.character(Sys.time()), "ComputeWeightsLogistic.AC\n\n")
Sbeta = 0
readTime = 0
readSize = 0
sbeta = 0
for (id in 1:params$numDataPartners) {
readTime = readTime - proc.time()[3]
load(file.path(params$readPathDP[id], "sbeta.rdata"))
readSize = readSize + file.size(file.path(params$readPathDP[id], "sbeta.rdata"))
readTime = readTime + proc.time()[3]
sbeta = sbeta + Sbeta
}
sbeta = 2 * params$u * sbeta - params$numDataPartners * params$u
pi_ = 1 / (1 + exp(-sbeta))
params$pi_ = pi_
writeTime = proc.time()[3]
save(pi_, file = file.path(params$writePath, "pi.rdata"))
writeSize = file.size(file.path(params$writePath, "pi.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "ComptueWeightsLogistic.AC", readTime, readSize, writeTime, writeSize)
return(params)
}
ComputeStWSLogistic.DP = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "ComputeStWSLogistic.DP\n\n")
pi_ = NULL
readTime = proc.time()[3]
load(file.path(params$readPathAC, "pi.rdata"))
readSize = file.size(file.path(params$readPathAC, "pi.rdata"))
readTime = proc.time()[3] - readTime
params$pi_ = pi_
W = pi_ * (1 - pi_)
C = rep(list(list()), params$numDataPartners)
idx = params$indicies[[params$dataPartnerID]]
set.seed(params$seed, kind = "Mersenne-Twister")
halfshare = matrix(rnorm(params$n * params$p, sd = 20),
nrow = params$n, ncol = params$p)[, idx, drop = FALSE]
for (id in 1:params$numDataPartners) {
if (id < params$dataPartnerID) {
set.seed(params$seeds[id], kind = "Mersenne-Twister")
idx = params$indicies[[id]]
halfshareDP = matrix(rnorm(params$n * params$ps[id], sd = 20),
nrow = params$n, ncol = params$ps[id])[, idx, drop = FALSE]
C[[id]] = params$scaler / (params$scaler + params$scalers[id]) *
t(halfshareDP) %*% MultiplyDiagonalWTimesX(W, halfshare) +
t(halfshareDP) %*% MultiplyDiagonalWTimesX(W, data$X - halfshare)
} else if (id == params$dataPartnerID) {
C[[id]] = t(data$X) %*% MultiplyDiagonalWTimesX(W, data$X)
} else { # id > params$dataPartnerID
set.seed(params$seeds[id], kind = "Mersenne-Twister")
idx = params$indicies[[id]]
halfshareDP = matrix(rnorm(params$n * params$ps[id], sd = 20),
nrow = params$n, ncol = params$ps[id])[, idx, drop = FALSE]
C[[id]] = params$scaler / (params$scaler + params$scalers[id]) *
t(halfshare) %*% MultiplyDiagonalWTimesX(W, halfshareDP) +
t(data$X - halfshare) %*% MultiplyDiagonalWTimesX(W, halfshareDP)
}
}
writeTime = proc.time()[3]
save(C, file = file.path(params$writePath, "stwsshare.rdata"))
writeSize = file.size(file.path(params$writePath, "stwsshare.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "ComputeStWSLogistic.DP", readTime, readSize, writeTime, writeSize)
return(params)
}
ComputeStWSLogistic.AC = function(params) {
if (params$trace) cat(as.character(Sys.time()), "ComputeStWSLogistic.AC\n\n")
readTime = 0
readSize = 0
C = NULL
W = params$pi_ * (1 - params$pi_)
StWS = matrix(0, sum(params$pReduct), sum(params$pReduct))
for (id1 in 1:params$numDataPartners) {
end = sum(params$pReduct[1:id1])
start = end - params$pReduct[id1] + 1
idx1 = start:end
readTime = readTime - proc.time()[3]
load(file.path(params$readPathDP[id1], "stwsshare.rdata"))
readSize = readSize + file.size(file.path(params$readPathDP[id1], "stwsshare.rdata"))
readTime = readTime + proc.time()[3]
for (id2 in 1:params$numDataPartners) {
end = sum(params$pReduct[1:id2])
start = end - params$pReduct[id2] + 1
idx2 = start:end
if (id1 < id2) {
StWS[idx1, idx2] = StWS[idx1, idx2] + C[[id2]]
StWS[idx2, idx1] = StWS[idx2, idx1] + t(C[[id2]])
} else if (id1 == id2) {
StWS[idx1, idx1] = C[[id1]]
} else {
StWS[idx2, idx1] = StWS[idx2, idx1] + C[[id2]]
StWS[idx1, idx2] = StWS[idx1, idx2] + t(C[[id2]])
}
}
if (id1 < params$numDataPartners) {
for (id2 in (id1 + 1):params$numDataPartners) {
end = sum(params$pReduct[1:id2])
start = end - params$pReduct[id2] + 1
idx2 = start:end
temp = t(params$halfshare[[id1]]) %*% MultiplyDiagonalWTimesX(W, params$halfshare[[id2]])
StWS[idx1, idx2] = StWS[idx1, idx2] + temp
StWS[idx2, idx1] = StWS[idx2, idx1] + t(temp)
}
}
}
I = NULL
tryCatch({I = solve(StWS)},
error = function(err) { I = NULL }
)
if (is.null(I)) {
params$failed = TRUE
params$singularMatrix = TRUE
params$errorMessage =
paste0("The matrix t(X)*W*X is not invertible.\n",
" This may be due to one of two possible problems.\n",
" 1. Poor random initialization of the security matrices.\n",
" 2. Near multicollinearity in the data\n",
"SOLUTIONS: \n",
" 1. Rerun the data analysis.\n",
" 2. If the problem persists, check the variables for\n",
" duplicates for both parties and / or reduce the\n",
" number of variables used. Once this is done,\n",
" rerun the data analysis.")
params = AddToLog(params, "ComputeStWSLogistic.AC", readTime, readSize, 0, 0)
return(params)
}
params$I = I
halfshare = params$halfshare[[1]]
for (id in 2:params$numDataPartners) {
halfshare = cbind(halfshare, params$halfshare[[id]])
}
IDt = I %*% (params$sty - t(halfshare) %*% params$pi_)
Itemp = I
IDttemp = IDt
writeTime = 0
writeSize = 0
start = 1
stop = params$pReduct[1]
for (id in 1:params$numDataPartners) {
I = Itemp[start:stop, , drop = FALSE]
IDt = IDttemp[start:stop, , drop = FALSE]
writeTime = writeTime - proc.time()[3]
save(I, IDt, file = file.path(params$writePath, paste0("ID", id, ".rdata")))
writeSize = writeSize + file.size(file.path(params$writePath, paste0("ID", id, ".rdata")))
writeTime = writeTime + proc.time()[3]
start = stop + 1
stop = stop + params$pReduct[id + 1]
}
params = AddToLog(params, "ComputeStWSLogistic.AC", readTime, readSize, writeTime, writeSize)
return(params)
}
UpdateBetaLogistic.DP = function(params) {
if (params$trace) cat(as.character(Sys.time()), "UpdateBetaLogistic.DP\n\n")
I = IDt = NULL
readTime = proc.time()[3]
load(file.path(params$readPathAC, paste0("ID", params$dataPartnerID, ".rdata")))
readSize = file.size(file.path(params$readPathAC, paste0("ID", params$dataPartnerID, ".rdata")))
readTime = proc.time()[3] - readTime
id = 1
set.seed(params$seeds[id], kind = "Mersenne-Twister")
idx = params$indicies[[id]]
halfshareDP = matrix(rnorm(params$n * params$ps[id], sd = 20),
nrow = params$n, ncol = params$ps[id])[, idx, drop = FALSE]
for (id in 2:params$numDataPartners) {
set.seed(params$seeds[id], kind = "Mersenne-Twister")
idx = params$indicies[[id]]
halfshareDP = cbind(halfshareDP,
matrix(rnorm(params$n * params$ps[id], sd = 20),
nrow = params$n, ncol = params$ps[id])[, idx, drop = FALSE])
}
D0 = t(halfshareDP) %*% params$pi_
deltaBeta = IDt - I %*% D0
params$betas = params$betas + deltaBeta
maxdifference = max(abs(deltaBeta) / (abs(params$betas) + .1))
utemp = sum(runif(length(deltaBeta), min = 1, max = 5) * abs(params$betas))
writeTime = proc.time()[3]
save(utemp, maxdifference, file = file.path(params$writePath, "u_converge.rdata"))
writeSize = file.size(file.path(params$writePath, "u_converge.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "UpdateBetaLogistic.DP", readTime, readSize, writeTime, writeSize)
return(params)
}
ComputeConvergeStatusLogistic.AC = function(params) {
if (params$trace) cat(as.character(Sys.time()), "ComputeConvergeStatusLogistic.AC\n\n")
readTime = 0
readSize = 0
u = 0
converged = TRUE
utemp = NULL
maxdifference = NULL
for (id in 1:params$numDataPartners) {
readTime = readTime - proc.time()[3]
load(file.path(params$readPathDP[id], "u_converge.rdata"))
readSize = readSize + file.size(file.path(params$readPathDP[id], "u_converge.rdata"))
readTime = readTime + proc.time()[3]
u = u + utemp
converged = converged && (maxdifference < params$cutoff)
}
maxIterExceeded = params$algIterationCounter >= params$maxIterations
params$maxIterExceeded = maxIterExceeded
params$u = u
params$converged = converged
writeTime = proc.time()[3]
save(u, converged, maxIterExceeded, file = file.path(params$writePath, "u_converge.rdata"))
writeSize = file.size(file.path(params$writePath, "u_converge.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "ComputeConvergeStatusLogistic.AC", readTime, readSize, writeTime, writeSize)
return(params)
}
GetConvergeStatusLogistic.DP = function(params) {
converged = NULL
if (params$trace) cat(as.character(Sys.time()), "GetconvergeStatusLogistic.DP\n\n")
u = converge = maxIterExceeded = NULL
readTime = proc.time()[3]
load(file.path(params$readPathAC, "u_converge.rdata"))
readSize = file.size(file.path(params$readPathAC, "u_converge.rdata"))
readTime = proc.time()[3] - readTime
params$u = u
params$converged = converged
params$maxIterExceeded = maxIterExceeded
params = AddToLog(params, "GetConvergeStatusLogistic.DP", readTime, readSize, 0, 0)
return(params)
}
SendFinalBetasLogistic.DP = function(params) {
if (params$trace) cat(as.character(Sys.time()), "SendFinalBetasLogistic.DP\n\n")
betas = params$betas
writeTime = proc.time()[3]
save(betas, file = file.path(params$writePath, "finalbetas.rdata"))
writeSize = file.size(file.path(params$writePath, "finalbetas.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "SendFinalBetasLogistic.DP", 0, 0, writeTime, writeSize)
return(params)
}
ComputeFinalSBetaLogistic.AC = function(params) {
if (params$trace) cat(as.character(Sys.time()), "ComputeFinalSBetaLogistic.AC\n\n")
Sbeta = 0
readTime = 0
readSize = 0
sbeta = 0
for (id in 1:params$numDataPartners) {
readTime = readTime - proc.time()[3]
load(file.path(params$readPathDP[id], "sbeta.rdata"))
readSize = readSize + file.size(file.path(params$readPathDP[id], "sbeta.rdata"))
readTime = readTime + proc.time()[3]
sbeta = sbeta + Sbeta
}
sbeta = 2 * params$u * sbeta - params$numDataPartners * params$u
writeTime = proc.time()[3]
save(sbeta, file = file.path(params$writePath, "sbeta.rdata"))
writeSize = file.size(file.path(params$writePath, "sbeta.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "ComputeFinalSBetaLogistic.AC", readTime, readSize, writeTime, writeSize)
return(params)
}
ComputeResultsLogistic.DP = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "ComputeResultsLogistic.DP\n\n")
sbeta = NULL
readTime = proc.time()[3]
load(file.path(params$readPathAC, "sbeta.rdata"))
readSize = file.size(file.path(params$readPathAC, "sbeta.rdata"))
readTime = proc.time()[3] - readTime
n = params$n
ct = sum(data$Y)
params$FinalFitted = sbeta
resdev = -2 * (sum(data$Y * sbeta) - sum(log(1 + exp(sbeta))))
nulldev = -2 * (ct * log(ct / n) + (n - ct) * log(1 - ct / n))
hoslem = HoslemInternal(params, data)
ROC = RocInternal(params, data)
writeTime = proc.time()[3]
save(resdev, nulldev, hoslem, ROC, file = file.path(params$writePath, "logisticstats.rdata"))
writeSize = file.size(file.path(params$writePath, "logisticstats.rdata"))
writeTime = proc.time()[3] - writeTime
params = AddToLog(params, "ComputeResultsLogistic.DP", readTime, readSize, writeTime, writeSize)
return(params)
}
ComputeResultsLogistic.AC = function(params) {
if (params$trace) cat(as.character(Sys.time()), "ComputeResultsLogistic.AC\n\n")
nulldev = NULL
resdev = NULL
hoslem = NULL
ROC = NULL
readTime = proc.time()[3]
load(file.path(params$readPathDP[1], "logisticstats.rdata"))
readSize = file.size(file.path(params$readPathDP[1], "logisticstats.rdata"))
readTime = proc.time()[3] - readTime
coefficients = c()
p = 0
betas = NULL
for (id in 1:params$numDataPartners) {
readTime = readTime - proc.time()[3]
load(file.path(params$readPathDP[id], "finalbetas.rdata"))
readSize = readSize + file.size(file.path(params$readPathDP[id], "finalbetas.rdata"))
readTime = readTime + proc.time()[3]
coefficients = c(coefficients, betas)
p = p + length(params$indicies[[id]])
}
coefficients[2:p] = coefficients[2:p] / params$colran[2:p]
coefficients[1] = coefficients[1] - sum(coefficients[2:p] * params$colmin[2:p])
serror = rep(0, p)
serror[2:p] = sqrt(diag(params$I)[2:p]) / params$colran[2:p]
d1 = diag(c(1, params$colmin[-1] / params$colran[-1]))
temp = d1 %*% params$I %*% d1
serror[1] = sqrt(temp[1, 1] - 2 * sum(temp[1, 2:p]) + sum(temp[2:p, 2:p]))
stats = params$stats
stats$failed = FALSE
stats$converged = params$converged
# If xtwx were singular, it would have been caught in GetII.A2(), so we may
# assume that xtwx is NOT singular and so we do not have to do a check.
stats$party = params$party
stats$coefficients = rep(NA, params$p)
stats$secoef = rep(NA, params$p)
stats$tvals = rep(NA, params$p)
stats$pvals = rep(NA, params$p)
stats$n = params$n
stats$nulldev = nulldev
stats$resdev = resdev
stats$aic = resdev + 2 * sum(params$pReduct)
stats$bic = resdev + sum(params$pReduct) * log(params$n)
stats$nulldev_df = params$n - 1
stats$resdev_df = params$n - sum(params$pReduct)
stats$coefficients[params$fullindicies] = coefficients
stats$secoef[params$fullindicies] = serror
tvals = coefficients / serror
pvals = 2 * pnorm(abs(tvals), lower.tail = FALSE)
stats$tvals[params$fullindicies] = tvals
stats$pvals[params$fullindicies] = pvals
stats$hoslem = hoslem
stats$ROC = ROC
stats$iter = params$algIterationCounter - 1
names(stats$coefficients) = params$colnames
names(stats$party) = params$colnames
names(stats$secoef) = params$colnames
names(stats$tvals) = params$colnames
names(stats$pvals) = params$colnames
writeTime = proc.time()[3]
save(stats, file = file.path(params$writePath, "stats.rdata"))
writeSize = file.size(file.path(params$writePath, "stats.rdata"))
writeTime = proc.time()[3] - writeTime
params$stats = stats
params = AddToLog(params, "ComputeResultsLogistic.AC", readTime, readSize, writeTime, writeSize)
return(params)}
GetResultsLogistic.DP = function(params, data) {
if (params$trace) cat(as.character(Sys.time()), "GetResultsLogistic.DP\n\n")
stats = NULL
readTime = proc.time()[3]
load(file.path(params$readPathAC, "stats.rdata"))
readSize = file.size(file.path(params$readPathAC, "stats.rdata"))
readTime = proc.time()[3] - readTime
if (params$dataPartnerID == 1) {
stats$Y = data$Y # For Hoslem and ROC
stats$FinalFitted = params$FinalFitted
}
params$stats = stats
params = AddToLog(params, "GetResultsLogistic.DP", readTime, readSize, 0, 0)
return(params)
}
############################## PARENT FUNCTIONS ###############################
DataPartnerKLogistic = function(data,
yname = NULL,
numDataPartners = NULL,
dataPartnerID = NULL,
monitorFolder = NULL,
sleepTime = 10,
maxWaitingTime = 24 * 60 * 60,
popmednet = TRUE,
trace = FALSE,
verbose = TRUE) {
params = PrepareParams.kp("logistic", dataPartnerID, numDataPartners, ac = FALSE,
popmednet = popmednet, trace = trace, verbose = verbose)
if (params$failed) {
warning(params$errorMessage)
return(invisible(NULL))
}
params = InitializeLog.kp(params)
params = InitializeStamps.kp(params)
params = InitializeTrackingTable.kp(params)
Header(params)
params = PrepareFolder.ACDP(params, monitorFolder)
if (params$failed) {
warning(params$errorMessage)
return(invisible(NULL))
}
if (dataPartnerID == 1) {
# data = PrepareDataLogistic.DP(params, data, yname)
data = PrepareDataLinLog.DP1(params, data, yname)
params = AddToLog(params, "PrepareParamsLinLog.DP1", 0, 0, 0, 0)
} else {
data = PrepareDataLinLog.DPk(params, data)
params = AddToLog(params, "PrepareParamsLinLog.DPk", 0, 0, 0, 0)
}
params = AddToLog(params, "PrepareParamsLinear.DP", 0, 0, 0, 0)
if (data$failed) {
params$errorMessage = paste("Error processing data for data partner", params$dataPartnerID, "\n")
MakeErrorMessage(params$writePath, params$errorMessage)
files = "errorMessage.rdata"
params = SendPauseContinue.kp(params, filesAC = files, from = "AC",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime, waitForTurn = TRUE)
params$errorMessage = ReadErrorMessage(params$readPathAC)
warning(params$errorMessage)
params = SendPauseQuit.kp(params, sleepTime = sleepTime, waitForTurn = TRUE)
return(params$stats)
}
params = SendBasicInfo.DP(params, data)
files = "n_analysis.rdata"
params = SendPauseContinue.kp(params, filesAC = files, from = "AC",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime, waitForTurn = TRUE)
possibleError = ReceivedError.kp(params, from = "AC")
if (possibleError$error) {
params$errorMessage = possibleError$message
warning(possibleError$message)
params = SendPauseQuit.kp(params, sleepTime = sleepTime, waitForTurn = TRUE)
return(params$stats)
}
params = PrepareParamsLinear.DP(params, data)
files = "p_scaler_seed.rdata"
params = SendPauseContinue.kp(params, filesDP = files, from = "DP",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime, waitForTurn = TRUE)
params = PrepareSharesLinear.DP(params, data)
files = c("products.rdata", "halfshare.rdata", "colstats.rdata")
params = SendPauseContinue.kp(params, filesAC = files, from = "AC",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime, waitForTurn = TRUE)
possibleError = ReceivedError.kp(params, from = "AC")
if (possibleError$error) {
params$errorMessage = possibleError$message
warning(possibleError$message)
params = SendPauseQuit.kp(params, sleepTime = sleepTime, waitForTurn = TRUE)
return(params$stats)
}
params = UpdateParamsLogistic.DP(params)
data = UpdateDataLogistic.DP(params, data)
params = AddToLog(params, "UpdateDataLogistic.DP", 0, 0, 0, 0)
params$algIterationCounter = 1
while (!params$converged && !params$maxIterExceeded) {
BeginningIteration(params)
params = ComputeSbetaLogistic.DP(params, data)
files = "Sbeta.rdata"
params = SendPauseContinue.kp(params, filesAC = files, from = "AC",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime, waitForTurn = TRUE)
params = ComputeStWSLogistic.DP(params, data)
files = "stwsshare.rdata"
params = SendPauseContinue.kp(params, filesAC = files, from = "AC",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime, waitForTurn = TRUE)
possibleError = ReceivedError.kp(params, from = "AC")
if (possibleError$error) {
params$errorMessage = possibleError$message
warning(possibleError$message)
params = SendPauseQuit.kp(params, sleepTime = sleepTime, waitForTurn = TRUE)
return(params$stats)
}
params = UpdateBetaLogistic.DP(params)
files = "u_converge.rdata"
params = SendPauseContinue.kp(params, filesAC = files, from = "AC",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime, waitForTurn = TRUE)
params = GetConvergeStatusLogistic.DP(params)
EndingIteration(params)
params$algIterationCounter = params$algIterationCounter + 1
}
params = ComputeSbetaLogistic.DP(params, data)
params = SendFinalBetasLogistic.DP(params)
files = c("sbeta.rdata", "finalbetas.rdata")
params = SendPauseContinue.kp(params, filesAC = files, from = "AC",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime, waitForTurn = TRUE)
if (dataPartnerID == 1) {
params = ComputeResultsLogistic.DP(params, data)
files = "logisticstats.rdata"
params = SendPauseContinue.kp(params, filesAC = files, from = "AC",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
}
params = GetResultsLogistic.DP(params, data)
params = SendPauseQuit.kp(params, sleepTime = sleepTime, waitForTurn = TRUE)
return(params$stats)
}
AnalysisCenterKLogistic = function(numDataPartners = NULL,
monitorFolder = NULL,
msreqid = "v_default_0_000",
cutoff = 1E-8,
maxIterations = 25,
sleepTime = 10,
maxWaitingTime = 24 * 60 * 60,
popmednet = TRUE,
trace = FALSE,
verbose = TRUE) {
filesList = rep(list(list()), numDataPartners)
params = PrepareParams.kp("logistic", 0, numDataPartners, msreqid, cutoff, maxIterations, ac = TRUE,
popmednet = popmednet, trace = trace, verbose = verbose)
if (params$failed) {
warning(params$errorMessage)
return(invisible(NULL))
}
params = InitializeLog.kp(params)
params = InitializeStamps.kp(params)
params = InitializeTrackingTable.kp(params)
Header(params)
params = PrepareFolder.ACDP(params, monitorFolder)
if (params$failed) {
warning(params$errorMessage)
return(invisible(NULL))
}
params = PauseContinue.kp(params, from = "DP", maxWaitingTime = maxWaitingTime)
possibleError = ReceivedError.kp(params, from = "DP")
if (possibleError$error) {
params$errorMessage = possibleError$message
warning(possibleError$message)
MakeErrorMessage(params$writePath, possibleError$message)
files = "errorMessage.rdata"
params = SendPauseContinue.kp(params, filesDP = files, from = "DP",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = SendPauseQuit.kp(params, sleepTime = sleepTime, job_failed = TRUE)
SummarizeLog.kp(params)
return(params$stats)
}
params = CheckAgreement.AC(params)
if (params$failed) {
MakeErrorMessage(params$writePath, params$errorMessage)
files = "errorMessage.rdata"
warning(params$errorMessage)
params = SendPauseContinue.kp(params, filesDP = files, from = "DP",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = SendPauseQuit.kp(params, sleepTime = sleepTime, job_failed = TRUE)
SummarizeLog.kp(params)
return(params$stats)
}
files = "empty.rdata"
params = SendPauseContinue.kp(params, filesDP = files, from = "DP",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = GetProductsLogistic.AC(params)
params = CheckColinearityLogistic.AC(params)
if (params$failed) {
MakeErrorMessage(params$writePath, params$errorMessage)
files = "errorMessage.rdata"
warning(params$errorMessage)
params = SendPauseContinue.kp(params, filesDP = files, from = "DP",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = SendPauseQuit.kp(params, sleepTime = sleepTime, job_failed = TRUE)
SummarizeLog.kp(params)
return(params$stats)
}
params = ComputeInitialBetasLogistic.AC(params)
for (id in 1:numDataPartners) {
filesList[[id]] = c(paste0("u_beta_", id, ".rdata"), "indicies.rdata")
}
params = SendPauseContinue.kp(params, filesDP = filesList, from = "DP",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params$algIterationCounter = 1
while (!params$converged && !params$maxIterExceeded) {
BeginningIteration(params)
params = ComputeWeightsLogistic.AC(params)
files = "pi.rdata"
params = SendPauseContinue.kp(params, filesDP = files, from = "DP",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = ComputeStWSLogistic.AC(params)
if (params$failed) {
MakeErrorMessage(params$writePath, params$errorMessage)
files = "errorMessage.rdata"
warning(params$errorMessage)
params = SendPauseContinue.kp(params, filesDP = files, from = "DP",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = SendPauseQuit.kp(params, sleepTime = sleepTime, job_failed = TRUE)
SummarizeLog.kp(params)
return(params$stats)
}
for (id in 1:numDataPartners) {
filesList[[id]] = paste0("id", id, ".rdata")
}
params = SendPauseContinue.kp(params, filesDP = filesList, from = "DP",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = ComputeConvergeStatusLogistic.AC(params)
files = "u_converge.rdata"
params = SendPauseContinue.kp(params, filesDP = files, from = "DP",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
EndingIteration(params)
params$algIterationCounter = params$algIterationCounter + 1
}
params = ComputeFinalSBetaLogistic.AC(params)
filesList = rep(list(list()), numDataPartners)
filesList[[1]] = "sbeta.rdata"
params = SendPauseContinue.kp(params, filesDP = filesList, from = "DP1",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = ComputeResultsLogistic.AC(params)
files = "stats.rdata"
params = SendPauseContinue.kp(params, filesDP = files, from = "DP",
sleepTime = sleepTime, maxWaitingTime = maxWaitingTime)
params = SendPauseQuit.kp(params, sleepTime = sleepTime)
SummarizeLog.kp(params)
return(params$stats)
}
|
f97179d0d28d5984e57a612ca111792988c8abe6
|
8a30b598326966214ea263e645aa240a074d12a5
|
/Analysis/for_live_session_4.R
|
07eb93e09723d4607e868004b2cb5553f7e44ccb
|
[] |
no_license
|
pankajti/timeseries
|
b2956ba132ce828e5b5699f5999a0df3dbbc070d
|
8206604ab18e9436a248e720c6d3ab9b2c718877
|
refs/heads/master
| 2020-11-28T19:42:46.406581
| 2020-04-12T07:19:00
| 2020-04-12T07:19:00
| 229,905,648
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 933
|
r
|
for_live_session_4.R
|
library(tswge)
library(tidyverse)
factor.wge(phi = c(-.5,-.6 ))
ts = gen.arma.wge(n = 100, phi = c(-.5, -.6))
plotts.parzen.wge(ts)
plotts.true.wge(phi = c(-.5, -.6))
wm_data= read.csv('/Users/pankaj/dev/git/smu/timeseries/data/Walmart.csv')
wmst9it50 = wm_data %>% filter(item==50, store==9)
plotts.sample.wge( wmst9it50$sales)
ts_1 = gen.arma.wge(1000, theta=c(0.967))
plotts.sample.wge(ts_1)
ts_2 = gen.arma.wge(1000, theta=c(1.452, -.453, -.294, .175, -.237, -.154))
plotts.sample.wge(ts_2)
ts_3 = gen.arma.wge(1000, theta=c(1.445, -.411, -.038, .170, .362, -.245, -.177, .213))
plotts.sample.wge(ts_3)
ts_4 = gen.arma.wge(1000, theta=c(1.384, -.359, -.309, .063, .317, -.140, -.0587, -.199, .2877))
plotts.sample.wge(ts_4)
ts_spec = gen.arma.wge(1000, theta=c(-1.452, -.453, -.294, .175, -.237, -.154))
plotts.sample.wge(ts_spec)
factor.wge(phi= c(1.384, -.359, -.309, .063, .317, -.140, -.0587, -.199, .2877))
|
1e097b2d4dc238a834a6e1847ef506e19b5367bd
|
d5004526b771aa11f2b564b15ccd47ca8a99b7c7
|
/man/beta_regress.Rd
|
de45d5506ca6b890bc44500430492c9c9fec3f15
|
[] |
no_license
|
xinchoubiology/Rcppsva
|
2d7031143f2627e3c8cf055cc6d8dcaddfb02a03
|
5140dbf361b4808c13addfd3342ca3ead092b401
|
refs/heads/master
| 2021-01-10T01:19:32.166223
| 2015-09-26T16:30:36
| 2015-09-26T16:30:36
| 36,160,015
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 711
|
rd
|
beta_regress.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{beta_regress}
\alias{beta_regress}
\title{beta_regress}
\usage{
beta_regress(M, pv, svs, full = 0L)
}
\arguments{
\item{M}{m x n expression matrix; Each row represents probes and each col means samples}
\item{pv}{n x B design matrix; Each col means phenotype of interest, and if B >=2,
means we have B-1 permutations on covariates of interest}
\item{svs}{n x (p-1) design matrix; Each row represent samples and each col means parameters}
\item{full}{full output or coefficient only}
}
\description{
Linear regression on covariate of interest when comparing with other covariates
}
\author{
Xin Zhou
}
|
dbf64ae493db8cf75b617ff34bfe0f6bac5c51d0
|
6e8993b8d91bd4d5563efba0d88750900c42177f
|
/WeibinResults/tri_ccg_dist_hard_soft_compare_316/hist_edgenum_large.R
|
563b29908931e6e2b1183c628e8f36f8ce1f4806
|
[] |
no_license
|
Erich-McMillan/physics-research
|
346b0f2275bdda630d901991acd1db4276f3232b
|
969245148421d3bfe422fb85ba3f14898e48b37e
|
refs/heads/master
| 2021-07-08T05:00:57.775905
| 2017-09-25T14:58:22
| 2017-09-25T14:58:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 751
|
r
|
hist_edgenum_large.R
|
N=316
D=3
Sq=0
SqTotal=1000
for(D in 11:66)
{
finame=sprintf("triangle_compare_ChungLu_Vs_SNM_N%dD%dSqTotal%d.txt",N,D,SqTotal)
m_CL=read.table(finame,header=T)$m_CL
finame=sprintf("triangle_compare_gnp_N%dD%dSqTotal%d.txt",N,D,SqTotal)
m_NP=read.table(finame,header=T)$m_NP
tRange=range(m_CL,m_NP)
zstep=(tRange[2]-tRange[1])/50
zbreaks=seq(tRange[1],tRange[2],zstep)
hCL=hist(m_CL,breaks=zbreaks,plot=F)
hNP=hist(m_NP,breaks=zbreaks,plot=F)
foname=sprintf("edgenum_compare_SNM_RW_Rvl_N%dD%dSqTotal%d.png",N,D,SqTotal)
png(foname)
plot(hCL$mids,hCL$counts,col="black",type="l",main=foname)
lines(hNP$mids,hNP$counts,col="purple")
abline(v=D*N/2,col="red")
legend("topright",c("CL","NP"),col=c("black","purple"),lty=1)
dev.off()
}
|
85e7ee03549c9403e3e25d8dec2fc1ff6ba67b3f
|
79abe59a6bf0b8ec95730268494f39ab7796a9f5
|
/cachematrix.R
|
49a2c61aec657c892761288454fedbffe64b4747
|
[] |
no_license
|
akalapodis/ProgrammingAssignment2
|
9db5402965e009de111cd36a20c5991f7617b88e
|
fd4c64baa04d950c10c3c9cb62fdade73c6a60c3
|
refs/heads/master
| 2020-12-30T18:38:22.004824
| 2014-07-24T17:50:09
| 2014-07-24T17:50:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,784
|
r
|
cachematrix.R
|
## Matrix inversion is usually a costly computation and their may be some
## benefit to caching the inverse of a matrix rather than compute it repeatedly.
## This is a pair of functions that cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() {
x
}
setinverse <- function(inverse) {
m <<- inverse
}
getinverse <- function() {
m
}
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the
## matrix has not changed), then the cacheSolve should retrieve the inverse from
## the cache.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if (!is.null(m)) {
message("Getting cached data")
return(m)
}
data <- x$get()
## if X is a square invertible matrix, then solve(X) returns its inverse
m <- solve(data, ...)
x$setinverse(m)
m
}
############ Test Run ############
x <- matrix(c(1:4, 1, 6:9), 3, 3)
## [,1] [,2] [,3]
##[1,] 1 4 7
##[2,] 2 1 8
##[3,] 3 6 9
y <- makeCacheMatrix(x)
cacheSolve(y)
## [,1] [,2] [,3]
##[1,] -0.8125 0.125 0.5208333
##[2,] 0.1250 -0.250 0.1250000
##[3,] 0.1875 0.125 -0.1458333
cacheSolve(y)
##Getting cached data
## [,1] [,2] [,3]
##[1,] -0.8125 0.125 0.5208333
##[2,] 0.1250 -0.250 0.1250000
##[3,] 0.1875 0.125 -0.1458333
############ Test Run ############
|
67ff0d4911d0456049dc724d2e303a12bcfab5bc
|
f66031b70813e47120b974b012987e8453261ed4
|
/eda_Project1/1_Code/allPlots.R
|
ef557433c623c1cfc00e255a4ff82f20e84ad153
|
[] |
no_license
|
ckyau/ExData_Plotting1
|
2525b66053232d406ef41d9db241658167846484
|
7c09bbfa84afaa53416c2ca3162f7a0876ebc2c4
|
refs/heads/master
| 2020-08-29T09:49:32.473448
| 2020-02-20T10:11:05
| 2020-02-20T10:11:05
| 217,997,995
| 0
| 0
| null | 2019-10-28T08:21:48
| 2019-10-28T08:21:47
| null |
UTF-8
|
R
| false
| false
| 3,676
|
r
|
allPlots.R
|
# Load Libraries --------------------------------------------------------------------------------------------------------------------------------------
# Installs and loads packages ---------------------------------------------
for (func in list.files(path = "1_code/0_functions/")) { source(file = paste0("1_code/0_functions/", func)) }
installLoadPackages(c("tidyverse", "chron"), folderToInstallTo = "C:/Users/cyau/R/R-3.6.1/library")
rm(func, installLoadPackages)
# Load Libraries --------------------------------------------------------------------------------------------------------------------------------------
# Installs and loads packages ---------------------------------------------
install.packages("tidyverse")
library(tidyverse)
# Download Data -----------------------------------------------------------
path <- paste0(getwd(), "/household_power_consumption.zip")
if (!file.exists(path)){
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, path, method="curl")
unzip(path)
}
dat <- read.table("0_Data/household_power_consumption.txt",sep = ";", header = 1, stringsAsFactors = FALSE)
dat2 <- dat %>%
mutate(date = lubridate::dmy(Date),
dateTime = lubridate::dmy_hms(paste0(Date, " ", Time))) %>%
filter(date >= "2007-02-01", date <= "2007-02-02")
for (i in 3:9) {
dat2[[i]] <- as.numeric(dat2[[i]])
}
# Plot 1 ------------------------------------------------------------------
plot1 <- hist(dat2$Global_active_power,xlab = "Global Active Power (kilowatts)", main = "Global Active Power", col = "red", ylim = c(0, 1200))
# Plot 2 ------------------------------------------------------------------
plot2 <- plot(dat2$dateTime, dat2$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
# Plot 3 ------------------------------------------------------------------
plot3 <- plot(dat2$dateTime, dat2$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(dat2$dateTime, dat2$Sub_metering_2, type = "l", xlab = "", ylab = "Energy sub metering", col = "red")
lines(dat2$dateTime, dat2$Sub_metering_3, type = "l", xlab = "", ylab = "Energy sub metering", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = 1, col = c("black", "red", "blue"))
# Plot 4 ------------------------------------------------------------------
par(mfcol = c(2, 2))
# Top left plot
plot(dat2$dateTime, dat2$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
# Bottom left plot
plot(dat2$dateTime, dat2$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(dat2$dateTime, dat2$Sub_metering_2, type = "l", xlab = "", ylab = "Energy sub metering", col = "red")
lines(dat2$dateTime, dat2$Sub_metering_3, type = "l", xlab = "", ylab = "Energy sub metering", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = 1, col = c("black", "red", "blue"), bty = "n")
# Top right plot
plot(dat2$dateTime, dat2$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
# Bottom right plot
plot(dat2$dateTime, dat2$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
# Copy plots to png device
dev.copy(png,filename = "plot1.png", width = 480, height = 480, units = "px")
dev.copy(png,filename = "plot2.png", width = 480, height = 480, units = "px")
dev.copy(png,filename = "plot3.png", width = 480, height = 480, units = "px")
dev.copy(png,filename = "plot4.png", width = 480, height = 480, units = "px")
dev.off(which = dev.cur())
|
ab6a8217ffd766dec9ccb795b169c5d771fa3445
|
c3018f3062b72df8ac075707b832449f2798c555
|
/R/createtableoptions.R
|
53409673ac35290e23b692811d541a6550f2c63a
|
[] |
no_license
|
nachti/kudusparklyr
|
36f0e3cd3052514d0d0ffc10f4331c26277fe83e
|
0b099ada62dda31e8ba4724ff378c8835f75f8a5
|
refs/heads/master
| 2020-09-13T07:52:44.033653
| 2019-11-19T15:47:04
| 2019-11-19T15:47:04
| 222,701,328
| 0
| 0
| null | 2019-11-19T13:21:43
| 2019-11-19T13:21:42
| null |
UTF-8
|
R
| false
| false
| 1,700
|
r
|
createtableoptions.R
|
#' @title kudu_table_options
#' @description invoke kudu_table_options
#'
#' @param sc spark connection
#'
#' @import magrittr
#' @export
kudu_table_options <- function(sc){
opts <- invoke_new(sc,
"org.apache.kudu.client.CreateTableOptions")
opts
}
#' @title set_num_replicas
#' @description set number of replicas
#'
#' @param opts options
#' @param num_replicas integer containing the number of replicas
#' @export
set_num_replicas <- function(opts, num_replicas){
opts %>% invoke("setNumReplicas", as.integer(num_replicas))
}
#' @title add_hash_partitions
#' @description add hash partitions to kudu table
#'
#' @param opts additional options
#' @param columns columns for partitioning
#' @param buckets buckets
#' @param seed seed
#'
#' @export
add_hash_partitions <- function(opts, columns, buckets,
seed = 0){
cols <- invoke_new(sc, "java.util.ArrayList")
### probably sc can be exchanged by opts
### or spark_connection(opts)? NOTE no visible binding ...
for(item in columns){
cols %>% invoke("add", item)
}
opts %>% invoke("addHashPartitions", cols, as.integer(buckets),
as.integer(seed))
}
#' @title set_range_partition_columns
#' @description set range partition columns for kudu table
#'
#' @param opts additional options
#' @param columns columns
#'
#' @export
set_range_partition_columns <- function(opts, columns){
cols <- invoke_new(sc, "java.util.ArrayList")
### probably sc can be exchanged by opts
### or spark_connection(opts)? NOTE no visible binding ...
for(item in columns){
cols %>% invoke("add", item)
}
opts %>% invoke("setRangePartitionColumns", columns)
}
|
b845fec9559a0f65e37111a4027b7a960a9baa43
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/vegan/examples/indpower.Rd.R
|
abab0c447e111cefa62e7148905b82c4d7cb429e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 835
|
r
|
indpower.Rd.R
|
library(vegan)
### Name: indpower
### Title: Indicator Power of Species
### Aliases: indpower
### Keywords: multivariate
### ** Examples
data(dune)
## IP values
ip <- indpower(dune)
## and TIP values
diag(ip) <- NA
(TIP <- rowMeans(ip, na.rm=TRUE))
## p value calculation for a species
## from Halme et al. 2009
## i is ID for the species
i <- 1
fun <- function(x, i) indpower(x)[i,-i]
## 'c0' randomizes species occurrences
os <- oecosimu(dune, fun, "c0", i=i, nsimul=99)
## get z values from oecosimu output
z <- os$oecosimu$z
## p-value
(p <- sum(z) / sqrt(length(z)))
## 'heterogeneity' measure
(chi2 <- sum((z - mean(z))^2))
pchisq(chi2, df=length(z)-1)
## Halme et al.'s suggested output
out <- c(TIP=TIP[i],
significance=p,
heterogeneity=chi2,
minIP=min(fun(dune, i=i)),
varIP=sd(fun(dune, i=i)^2))
out
|
e4a52b4adebad012df1ca165c37d27b75d59a215
|
a2f1975e730afe2def76f7299dd4ca7ed0d61dd6
|
/DataBasesAnalysis/Sesion1_Estudiante.R
|
d729f9e594a142ac2f1392a57f9d9640be570b68
|
[] |
no_license
|
iRetray/R
|
9b5d7f1fd147b363d529db382300af08a9cda499
|
b452771c11fc312f8d3830fcf092fb299e408dd8
|
refs/heads/main
| 2023-05-06T06:48:44.568042
| 2021-05-31T02:43:15
| 2021-05-31T02:43:15
| 300,345,161
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,308
|
r
|
Sesion1_Estudiante.R
|
# DIRECTORIO DE TRABAJO.
#===============================================================================
getwd()
# R CÓMO CALCULADORA
#===============================================================================
# R reconoce los siguientes operadores matemáticos: suma(+), resta (-),
# multiplicación (*), división (/), exponentes (^), división entera (%/%)
# Ejercicio: Realizar algunas operaciones matemáticas.
# OPERADORES L?GICOS EN R
#===============================================================================
# R trabaja con los siguientes operadores lógicos: menor o mayor (< o >),
# menor o igual (<=), mayor o igual (>=), excatamente igual (==),
# no es igual a (!=), no a (!a), a o b (a|b), a y b (a&b).
# Ejercicio:Revisar cómo funcionan los operadores lógicos.
# CREACIÓN DE VARIABLES
#===============================================================================
# Un objeto puede ser creado con el operador "assignar?" el cual se denota
# como "<-" o con un "=".
# R es sensible a minúsculas y mayúsculas.
# Ejercicio: Crear algunas variables, por ejemplo: a <- 2.
# Si el objeto ya existe y se vuelve a asignar, su valor anterior es
# borrado después de la asignación (la modificación afecta solo objetos
# en memoria, no a los datos en el disco). El valor asignado puede
# ser el resultado de una operacion y/o de una función.
#Ejercicio: Reasignar variables y hacer operaciones con las variables.
# La función ls() simplemente lista los objetos en memoria:
# Solo se muestran los nombres de los mismos.
# Ejercicio: ¿Cuáles son los objetos que tenemos en la memoria?
# NOTA: Se puede usar punto y coma para separar comandos diferentes
# en la misma línea.
# Si se quiere listar solo aquellos objetos que contengan un carácter en
# particular, se puede usar la opción pattern (que se puede abreviar
# como pat).
# Ejercicio: Buscar todos los objetos con la siguiente opción ls(pat = "m")
# TIPOS DE DATOS EN R
#===============================================================================
# numeric: Número Decimal "4.5".
# numeric: Número Entero "4".
# logical: Boleano "TRUE - FALSE".
# characters: Texto o cadena de texto.
# Ejercicio: ¿Qué tipos de datos tengo?. Utilizar la función class()
|
0a1dcce9f787a60eae9ad8628ce117766c3f8578
|
666f1cc22538362c71a404c6977c05c54d09d587
|
/04_prepare-fixes.R
|
b74e280cb9d9dd1e17bba7ddf3fa974dd7921327
|
[] |
no_license
|
kuriwaki/cces_cumulative
|
09c8b2548006e5b0abf6dfdecaa1816dc50be2a4
|
2449f5c76fb29b1f0ea88060189357b12c9cb53e
|
refs/heads/main
| 2023-05-25T14:38:23.523698
| 2023-05-18T13:29:08
| 2023-05-18T13:29:08
| 93,090,050
| 19
| 1
| null | 2023-05-12T17:47:20
| 2017-06-01T19:00:36
|
TeX
|
UTF-8
|
R
| false
| false
| 2,688
|
r
|
04_prepare-fixes.R
|
# subsets to fix in 02
library(tidyverse)
library(dplyr)
library(haven)
load("data/output/01_responses/common_all.RData")
# Retrieve 2007 County -----
# Retrieve 2010 PID -------
pid3_cc10 <- cc10 |>
mutate(pid3 = V212a) |>
mutate(
pid3_char = as.character(as_factor(pid3)),
pid3_num = as.numeric(pid3)
) |>
mutate(
pid3_char = replace(pid3_char, pid3_char == "NaN", NA),
pid3_num = replace(pid3_num, is.nan(pid3_num), NA)
) |>
select(year, case_id, pid3_char, pid3_num)
# 2009 Economic retrospective recode
econ_recoded <- cc09 |>
select(year, case_id, cc09_20) |>
mutate(economy_retro_num = recode(as.integer(haven::zap_labels(cc09_20)),
`1` = 5L, `2` = 4L, `3` = 3L, `4` = 2L, `5` = 1L)) |>
mutate(economy_retro_char = case_when(economy_retro_num == 5L ~ "Gotten Much Worse",
economy_retro_num == 4L ~ "Gotten Worse / Somewhat Worse",
economy_retro_num == 3L ~ "Stayed About The Same",
economy_retro_num == 2L ~ "Gotten Better / Somewhat Better",
economy_retro_num == 1L ~ "Gotten Much Better"
)) |>
select(-cc09_20)
# date time in 2006 and 2009 ----
fmt_date <- function(vec) {
as.POSIXct(vec, format = "%a %b %e %T %Y")
}
cc06_time <- cc06 |>
mutate(starttime = fmt_date(starttime)) |>
select(year, case_id, starttime) |>
bind_rows(select(mit06_add, year, case_id, starttime))
cc09_time <- cc09 |>
mutate(starttime = as.POSIXct(v401)) |>
select(year, case_id, starttime)
# recode newsinterest to fit with 2008 - 2018
cc06_interest <- cc06 |>
transmute(year,
case_id,
interest = as.integer(v2042))
# save ---------
write_rds(pid3_cc10, "data/output/01_responses/cc10_pid3.Rds")
write_rds(econ_recoded, "data/output/01_responses/cc09_econ_retro.Rds")
write_rds(cc06_time, "data/output/01_responses/cc06_datetime.Rds")
write_rds(cc09_time, "data/output/01_responses/cc09_datetime.Rds")
write_rds(cc06_interest, "data/output/01_responses/cc06_newsintnum.Rds")
# fs::file_copy("data/output/01_responses/cc06_newsintnum.Rds",
# "~/Dropbox/CCES_representation/data/output/intermediate", overwrite = TRUE)
# 2009 split sample distinction
if (FALSE) {
p09_recontact <- read_dta("~/Dropbox/CCES_SDA/2009/Data/HUM/cces09_harvard_recontact_output.dta") %>%
mutate(year = 2009, samp = "recontact", case_id = v100) %>%
select(year, samp, everything()) %>%
select(-case_id)
write_dta(p09_recontact, "data/source/cces/2009_hum_recontact.dta")
}
|
67ed0ca59a84df9b388bbe43d5eaae478d400902
|
205e1e0a2e23f362b7987804ebe8e17a23ac6010
|
/inst/examples/input/selection/app.R
|
572bb30257c6898bc5a21049e593f87958b74a5b
|
[
"MIT"
] |
permissive
|
dreamRs/apexcharter
|
97f93ec61d2ad96f8bf2446fe50e2cb22f4824df
|
11d244e9922a9abe41aee90124224d8f5cababa9
|
refs/heads/master
| 2023-06-22T11:11:57.709837
| 2023-06-14T12:05:06
| 2023-06-14T12:05:06
| 142,926,526
| 135
| 15
|
NOASSERTION
| 2023-03-22T15:30:53
| 2018-07-30T20:47:09
|
R
|
UTF-8
|
R
| false
| false
| 1,027
|
r
|
app.R
|
library(shiny)
library(apexcharter)
data("economics", package = "ggplot2")
ui <- fluidPage(
tags$h2("Retrieve selection information"),
fluidRow(
column(
width = 8,
tags$b("Datetime"),
apexchartOutput("chart1")
),
column(
width = 4,
verbatimTextOutput("result1")
)
),
fluidRow(
column(
width = 8,
tags$b("Scatter"),
apexchartOutput("chart2")
),
column(
width = 4,
verbatimTextOutput("result2")
)
)
)
server <- function(input, output, session) {
output$chart1 <- renderApexchart({
apex(economics, aes(date, psavert), type = "line") %>%
set_input_selection("selection_ts")
})
output$result1 <- renderPrint({
input$selection_ts
})
output$chart2 <- renderApexchart({
apex(iris, aes(Sepal.Length, Sepal.Width), type = "scatter") %>%
set_input_selection("selection_scatter", type = "xy")
})
output$result2 <- renderPrint({
input$selection_scatter
})
}
shinyApp(ui, server)
|
5f5268586a2202089f89bbb211b1be613628e58a
|
65636eac47278518c1fc343559efa712eeb988fe
|
/tests/testthat/test_CoupledMWCA_common.R
|
c28cce57f4b3540bcf1d2f597f3f44238bf70f9e
|
[
"MIT"
] |
permissive
|
rikenbit/mwTensor
|
5a8b9bd15f4f11b0bdbad5274022b22cac43e498
|
36a9f19bab5952b33dd9a323d0def15cc917a310
|
refs/heads/main
| 2023-07-21T13:49:52.685004
| 2023-07-06T12:43:23
| 2023-07-06T12:43:23
| 381,632,949
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,509
|
r
|
test_CoupledMWCA_common.R
|
Xs <- mwTensor::toyModel("coupled_CP_Easy")
params <- new("CoupledMWCAParams",
# 1. Data-wise setting
Xs=Xs,
mask=list(X1=NULL, X2=NULL, X3=NULL),
pseudocount=1E-10,
weights=list(X1=1, X2=1, X3=1),
# 2. Common Model setting
common_model=list(
X1=list(I1="A1", I2="A2"),
X2=list(I2="A2", I3="A3", I4="A4"),
X3=list(I4="A4", I5="A5")),
# 3. Common Factor matrix-wise setting
common_initial=list(A1=NULL, A2=NULL, A3=NULL, A4=NULL, A5=NULL),
common_algorithms=list(A1="mySVD", A2="myALS_SVD", A3="myNMF",
A4="myICA", A5="myCX"),
common_iteration=list(A1=1, A2=10, A3=10, A4=10, A5=10),
common_decomp=list(A1=TRUE, A2=TRUE, A3=TRUE, A4=TRUE, A5=TRUE),
common_fix=list(A1=FALSE, A2=FALSE, A3=FALSE, A4=FALSE, A5=FALSE),
common_dims=list(A1=3, A2=3, A3=5, A4=4, A5=4),
common_transpose=list(A1=FALSE, A2=FALSE, A3=FALSE, A4=FALSE, A5=FALSE),
common_coretype="Tucker",
# 4. Specific Model setting
specific_model=list(
X1=list(J1="B1", J2="B2"),
X2=list(J3="B3", J4="B4", J5="B5"),
X3=list(J6="B6", J7="B7")),
# 5. Specific Factor matrix-wise setting
specific_initial=list(B1=NULL, B2=NULL, B3=NULL, B4=NULL, B5=NULL,
B6=NULL, B7=NULL),
specific_algorithms=list(B1="mySVD", B2="myALS_SVD", B3="myALS_SVD",
B4="myNMF", B5="myICA", B6="myICA", B7="myCX"),
specific_iteration=list(B1=1, B2=10, B3=10, B4=10, B5=10, B6=10, B7=10),
specific_decomp=list(B1=TRUE, B2=TRUE, B3=TRUE, B4=TRUE, B5=TRUE,
B6=TRUE, B7=TRUE),
specific_fix=list(B1=FALSE, B2=FALSE, B3=FALSE, B4=FALSE, B5=FALSE,
B6=FALSE, B7=FALSE),
specific_dims=list(B1=1, B2=1, B3=1, B4=1, B5=1, B6=1, B7=1),
specific_transpose=list(B1=FALSE, B2=FALSE, B3=FALSE, B4=FALSE,
B5=FALSE, B6=FALSE, B7=FALSE),
specific_coretype="Tucker",
# 6. Other option
specific=FALSE,
thr=1e-10,
viz=FALSE,
figdir=NULL,
verbose=FALSE)
out <- CoupledMWCA(params)
# Test Output
# Data-wise setting
expect_equal(length(out@weights), 3)
# Common Factor Matrices
expect_equal(length(out@common_model), 3)
expect_equal(length(out@common_initial), 5)
expect_equal(length(out@common_algorithms), 5)
expect_equal(length(out@common_iteration), 5)
expect_equal(length(out@common_decomp), 5)
expect_equal(length(out@common_fix), 5)
expect_equal(length(out@common_dims), 5)
expect_equal(length(out@common_transpose), 5)
expect_identical(out@common_coretype, "Tucker")
expect_equal(dim(out@common_factors[[1]]), c(3, 20))
expect_equal(dim(out@common_factors[[2]]), c(3, 30))
expect_equal(dim(out@common_factors[[3]]), c(5, 30))
expect_equal(dim(out@common_factors[[4]]), c(4, 30))
expect_equal(dim(out@common_factors[[5]]), c(4, 25))
expect_equal(dim(out@common_cores[[1]]), c(3,3))
expect_equal(dim(out@common_cores[[2]]), c(3,5,4))
expect_equal(dim(out@common_cores[[3]]), c(4,4))
# Specific Factor Matrices
expect_equal(length(out@specific_model), 3)
expect_identical(out@specific_initial, list(NULL))
expect_equal(length(out@specific_algorithms), 7)
expect_equal(length(out@specific_iteration), 7)
expect_equal(length(out@specific_decomp), 7)
expect_equal(length(out@specific_fix), 7)
expect_equal(length(out@specific_dims), 7)
expect_equal(length(out@specific_transpose), 7)
expect_identical(out@specific_coretype, "Tucker")
expect_identical(out@specific_factors, list(NULL))
expect_identical(out@specific_cores, list(NULL))
# Other option
expect_identical(out@specific, FALSE)
expect_identical(out@thr, 1e-10)
expect_equal(out@viz, FALSE)
expect_equal(out@figdir, NULL)
expect_equal(out@verbose, FALSE)
# Iteration
expect_true(is.numeric(out@rec_error))
expect_true(is.numeric(out@train_error))
expect_true(is.numeric(out@test_error))
expect_true(is.numeric(out@rel_change))
# Test Xs
Xs_dummy <- Xs
Xs_dummy[[1]][1,1] <- NA
params_dummy <- params
params_dummy@Xs <- Xs_dummy
expect_error(CoupledMWCA(params_dummy))
Xs_dummy[[1]][1,1] <- NaN
params_dummy <- params
params_dummy@Xs <- Xs_dummy
expect_error(CoupledMWCA(params_dummy))
Xs_dummy[[1]][1,1] <- Inf
params_dummy <- params
params_dummy@Xs <- Xs_dummy
expect_error(CoupledMWCA(params_dummy))
# Test mask
Ms <- Xs
Ms[[1]][] <- 1
Ms[[2]][] <- 1
Ms[[3]][] <- 1
params_dummy <- params
params_dummy@mask <- Ms
expect_error(expect_error(CoupledMWCA(params_dummy)))
Ms[[1]][1,1] <- 2
params_dummy <- params
params_dummy@mask <- Ms
expect_error(CoupledMWCA(params_dummy))
Ms[[1]][1,1] <- NA
params_dummy <- params
params_dummy@mask <- Ms
expect_error(CoupledMWCA(params_dummy))
Ms[[1]][1,1] <- Inf
params_dummy <- params
params_dummy@mask <- Ms
expect_error(CoupledMWCA(params_dummy))
# Test weights
params_dummy <- params
params_dummy@weights <- list(X1=1, X3=1)
expect_error(CoupledMWCA(params_dummy))
# Test model
params_dummy <- params
params_dummy@common_model <- list(
X1=list(I1="A1", I2="A2"),
X2=list(I2="A2", I3="A2", I4="A4"),
X3=list(I4="A4", I5="A5"))
expect_error(CoupledMWCA(params_dummy))
# Test initial
A1 <- mwTensor:::.randMat(3, 20)
A2 <- mwTensor:::.randMat(3, 30)
A2_dummy <- mwTensor:::.randMat(4, 30)
A3 <- mwTensor:::.randMat(5, 30)
A4 <- mwTensor:::.randMat(4, 30)
A5 <- mwTensor:::.randMat(4, 25)
params_dummy <- params
params_dummy@common_initial <- list(A1=A1, A2=A2_dummy, A3=A3, A4=A4, A5=A5)
expect_error(CoupledMWCA(params_dummy))
# Test algorithms
params_dummy <- params
params_dummy@common_algorithms <- list(A1="mySVDD", A2="myALS_SVD", A3="myNMF",
A4="myICA", A5="myCX")
expect_error(CoupledMWCA(params_dummy))
params_dummy <- params
params_dummy@common_algorithms <- list(A1="mySVD", A2="myALS_SVD", A3="myNMF",
A4="myICA")
expect_error(CoupledMWCA(params_dummy))
# Test iteration
params_dummy <- params
params_dummy@common_iteration <- list(A1=1.1, A2=2, A3=3, A4=4, A5=5)
expect_error(CoupledMWCA(params_dummy))
# Test initial/fix
params_dummy <- params
params_dummy@common_initial <- list(A1=A1, A2=A2, A3=A3, A4=A4, A5=A5)
params_dummy@common_fix <- list(A1=TRUE, A2=FALSE, A3=TRUE, A4=FALSE, A5=TRUE)
out_dummy <- CoupledMWCA(params_dummy)
expect_equal(out_dummy@common_factors$A1, A1)
expect_false(identical(out_dummy@common_factors$A2, A2))
expect_equal(out_dummy@common_factors$A3, A3)
expect_false(identical(out_dummy@common_factors$A4, A4))
expect_equal(out_dummy@common_factors$A5, A5)
# Test decomp
params_dummy <- params
params_dummy@common_initial <- list(A1=A1, A2=A2, A3=A3, A4=A4, A5=A5)
params_dummy@common_decomp <- list(A1=FALSE, A2=TRUE, A3=FALSE,
A4=TRUE, A5=FALSE)
out_dummy <- CoupledMWCA(params_dummy)
expect_equal(diag(out_dummy@common_factors$A1), rep(1, 3))
expect_equal(diag(out_dummy@common_factors$A3), rep(1, 5))
expect_equal(diag(out_dummy@common_factors$A5), rep(1, 4))
# Test dims
params_dummy <- params
params_dummy@common_dims <- list(A1=300, A2=3, A3=5, A4=4, A5=4)
expect_error(CoupledMWCA(params_dummy))
# Test transpose
params_dummy <- params
params_dummy@common_transpose <- list(A1=FALSE, A2=TRUE, A3=FALSE,
A4=TRUE, A5=FALSE)
expect_error(expect_error(CoupledMWCA(params_dummy)))
params_dummy <- params
params_dummy@common_transpose <- list(A1="FALSE", A2=TRUE, A3=FALSE,
A4=TRUE, A5=FALSE)
expect_error(CoupledMWCA(params_dummy))
# Test specific
params_dummy <- params
expect_error(expect_error(params_dummy@specific <- TRUE) )
expect_error(expect_error(params_dummy@specific <- FALSE))
expect_error(params_dummy@specific <- "TRUE")
# Test coretype
params_dummy <- params
params_dummy@common_coretype <- "TUCKER"
expect_error(CoupledMWCA(params_dummy))
# Test thr
params_dummy <- params
params_dummy@thr <- 1E+100
expect_equal(length(CoupledMWCA(params_dummy)@rel_change), 2)
# Test CP
.diag <- function(out){
num_modes <- mwTensor:::.ndim(out@data)
min.s <- min(dim(out@data))
tmp <- rep(0, min.s)
cmd <- paste0("for(i in seq_len(min.s)){",
"tmp[i] <- out@data[",
paste(rep("i", length=num_modes), collapse=","), "]}")
eval(parse(text=cmd))
tmp
}
.nonDiagonal <- function(X, k=1){
allels <- unique(as.vector(X@common_cores[[k]]@data))
diagels <- unique(.diag(X@common_cores[[k]]))
setdiff(allels, diagels)
}
params_cp <- params
params_cp@common_dims <- list(A1=3, A2=3, A3=3, A4=3, A5=3)
params_cp@common_coretype <- "CP"
res_cp <- CoupledMWCA(params_cp)
expect_equal(.nonDiagonal(res_cp, 1), 0)
expect_equal(.nonDiagonal(res_cp, 2), 0)
expect_equal(.nonDiagonal(res_cp, 3), 0)
|
07482118bee11ad366ca9c5876941859e038a807
|
0dfe50e7f553927442a27ed4b1cf366216b06727
|
/pls/coefficient-plot-LDPE.R
|
866141271606f69204bca1eee032f569abdd8056
|
[] |
no_license
|
kgdunn/figures
|
3543d2bcb96cc61cc9c2217da3a4210dd23b1103
|
662076362df316069ba9c903a0a71344da887142
|
refs/heads/main
| 2021-07-06T06:59:34.977099
| 2021-06-14T20:47:11
| 2021-06-14T20:47:11
| 244,129,830
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 493
|
r
|
coefficient-plot-LDPE.R
|
coeff <- read.csv('coefficient-plot-LDPE-A-is-6.csv')
library(lattice)
bitmap('coefficient-plot-LDPE-A-is-6.png', type="png256", width=10, height=5, res=300, pointsize=14)
par(mar=c(4.5, 4.5, 0.5, 0.5)) # (bottom, left, top, right); defaults are par(mar=c(5, 4, 4, 2) + 0.1)
barchart(as.matrix(coeff$Value), ylab=list(label="X-variables", cex=1.5), xlab=list(cex=1.5,label="Coefficients related to y=Conversion"),
scales=list(y=list(labels=coeff$Name),cex=1.25), col=0)
dev.off()
|
9a94a600eb8c81792eac4773cdeefababe1085e0
|
9c451420f6f06663e6a08002ad88eb8054ba405a
|
/cachematrix.R
|
662a67b0a1a6f19e6d7822ffedcd7522169f9e17
|
[] |
no_license
|
olivem/RProgrammingAssignment2
|
4dce9ab34a8791be093b44d3392335611a0a65ac
|
ee47f9baa367bc62371c486fe65a0b8537c4f668
|
refs/heads/master
| 2021-01-22T06:48:54.454467
| 2014-08-24T13:14:40
| 2014-08-24T13:14:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,476
|
r
|
cachematrix.R
|
#This function creates a special "matrix" object that can cache its inverse.
#The first function, makeCacheMatrix has a number of functions within that
#1.sets the value of the object
#2.gets the value of the object
#3.sets the value of the inverse and
#4.gets the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#The second function, cacheSolve calculates the inverse of the special "matrix"
#object created with the above function. However, it first checks to see if the
#inverse has already been calculated. If so, it gets the inverse from the cache
#and skips the computation. Otherwise, it calculates the inverse of the data
#and sets the value of the inverse in the cache via the setinverse function.
#'Solve' function used to calculate the inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
# Below script to test function.
# Matrix Vector
#z <- matrix(-4:-1,nrow = 2, byrow = TRUE)
# makeVector result
#k <- makeCacheMatrix(z)
# cachemean result
#cacheSolve(k)
|
9e2299f6b845d335530daf29591b7b512827d25d
|
ecb1635b28a5489d810e1ac07ae26d13d2814a8e
|
/R/trim_urls.r
|
ae21d9d6b20675399f9ed3c68b27d214f9aee5bd
|
[] |
no_license
|
alfcrisci/rTwChannel
|
7570d0959aaef80debb6d11f6dad19f0141183f3
|
d99dec1d300b37290e985a26eca25a2f94193254
|
refs/heads/master
| 2021-01-19T01:57:04.044235
| 2020-03-18T16:46:21
| 2020-03-18T16:46:21
| 42,121,011
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 401
|
r
|
trim_urls.r
|
#' trim_urls
#'
#' @description Remove links in a tweet message.
#'
#' @param x Character Message of tweet
#' @return Return the message of tweet without links.
#' @author Istituto per la Bioeconomia Firenze Italy Alfonso Crisci \email{alfonso.crisci@@ibe.cnr.it}
#' @keywords retweet
#'
#'
#'
#' @export
trim_urls <- function(x) {
stringr::str_replace_all(x, 'http[^[:blank:]]+', '')
}
|
1c36c83da3ba56637a8d68d3d9c7b4a5981749c4
|
79db8bc1987a57e7063907c1d1a0ed95d2774817
|
/OIB_Sandbox.R
|
59277e9742d880cf230da90677e51b9c0b560ce6
|
[] |
no_license
|
kingjml/OIBSandbox
|
6bd88b907de2867bf08afcac53ba54b899f8fa06
|
c239463b1655eb02b60f59822876d117ba7fe132
|
refs/heads/master
| 2021-01-11T02:41:17.212364
| 2016-10-14T18:16:07
| 2016-10-14T18:16:07
| 70,935,908
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,094
|
r
|
OIB_Sandbox.R
|
#####################################################################
# OIB footprint testing sandbox #
# JKing 13/10/16 #
# Input: #
# Output: #
#####################################################################
#Required libraries
require(gstat)
require(rgdal)
require(sp)
require(rgeos)
require(ggplot2)
require(raster)
#####################################################################
# FUNCTIONS #
#####################################################################
#Gives barring between from two points
bearingDeg <- function(yy,xx) {
return(90 - (180/pi)*atan2(yy, xx))
}
#Degrees to radians
deg2rad <- function(deg) {(deg * pi) / (180)}
#Creates a maximum extent polygon of a set of spatial points
extentPoly <- function(bbox, offset, GCS="+proj=longlat +ellps=WGS84", PCS=NULL){
rownames(bbox) <- c("lon","lat")
colnames(bbox) <- c('min','max')
bbox[1,1] = bbox[1,1] - offset
bbox[2,1] = bbox[2,1] - offset
bbox[1,2] = bbox[1,2] + offset
bbox[2,2] = bbox[2,2] + offset
# clockwise, 5 points to close it
bboxMat <- rbind( c(bbox['lon','min'],bbox['lat','min']), c(bbox['lon','min'],
bbox['lat','max']), c(bbox['lon','max'],bbox['lat','max']), c(bbox['lon','max'],
bbox['lat','min']), c(bbox['lon','min'],bbox['lat','min']) )
bboxSP <- SpatialPolygons( list(Polygons(list(Polygon(bboxMat)),"bbox")))
if(!is.null(PCS)){
proj4string(bboxSP) <-CRS(PCS)
} else {
proj4string(bboxSP) <- CRS(GCS)
}
return(bboxSP)
}
#########################################
# CONSTANTS #
#########################################
setwd("C:/Users/KingJ/Documents/R") #Set this directory to where the script is located
snowfile <- "April13LiDARSite_Magna_ArcReady.csv"
fpAlong <- 5 # in meters alongtrack radar footprint
fpAcross <- 10 # in meters acrosstrack radar footprint;
numLines <- 100 #must be even; number of flight lines;
lengthLine <- 200 #in meters; length of each flight line;
GCS <- "+proj=longlat +ellps=WGS84" #CRS geographic coordniate system WGS84;
PCS <- "+proj=utm +zone=16 +north +units=m +ellps=WGS84" #CRS projected coordniate system UTM16N/WGS84
#########################################
# PROCESS DATA #
#########################################
#Load raw magnaprobe data
snow <-read.csv(file=snowfile,head=TRUE,sep=",")
coordinates(snow) <- ~ Longitude+Latitude
#Identify and remove GPS duplicates
zd <- zerodist(snow, zero = 0.0)
snow <- snow[-zd[,2], ]
#Identify and remove maxed out measurements
snow <- snow[snow$DepthCm!=120,]
#Set coordinate system and project to UTM 16N
proj4string(snow) <- CRS(GCS)
snow <- spTransform(snow, CRS(PCS))
hist(snow$DepthCm, breaks=20,freq = FALSE, main="Magnaprobe distrobution", xlab="Snow depth (cm)", ylab="PDF")
mean(snow$DepthCm)
min(snow$DepthCm)
max(snow$DepthCm)
#Create random ponits within the observation area, match them as pairs, and build pseudo flight lines
mpExtent <- extentPoly(bbox(snow),-10, GCS,PCS)
randomP <- spsample(mpExtent, numLines*2,"random")
randomDF <- data.frame(ID=1:length(randomP))
randomDF$lineId[sample(1:nrow(randomDF), nrow(randomDF), FALSE)] <- rep(1:(numLines), rep(2, numLines))
randomPDF <- SpatialPointsDataFrame(randomP,randomDF)
fpList <- lapply(split(randomPDF, randomPDF$lineId), function(x) Lines(list(Line(coordinates(x))), x$lineId[1L])) #fp is short for flight path
fpLength <- unlist(lapply(1:length(fpList), function(x) {sqrt((coordinates(fpList[[x]])[[1]][1] - coordinates(fpList[[x]])[[1]][2])^2 + (coordinates(fpList[[x]])[[1]][3] - coordinates(fpList[[x]])[[1]][4])^2)}))
fpDirection <- unlist(lapply(1:length(fpList), function(x) {bearingDeg(coordinates(fpList[[x]])[[1]][1] - coordinates(fpList[[x]])[[1]][2],coordinates(fpList[[x]])[[1]][3] - coordinates(fpList[[x]])[[1]][4])}))
#Create spatial lines
fpLines <- SpatialLines(fpList)
fpData <- data.frame(line = unique(randomPDF$lineId), length=fpLength)
rownames(fpData)<-fpData$line
fpLines <- SpatialLinesDataFrame(fpLines, fpData)
proj4string(fpLines) <- CRS(PCS)
#Plot data
mpPlot.df = snow@data
mpPlot.df$x = coordinates(snow)[,1]
mpPlot.df$y = coordinates(snow)[,2]
dev.new()
ggplot(mpPlot.df, aes(x,y))+
geom_point(aes(colour = mpPlot.df$DepthCm), size = 1)+
scale_colour_gradient(low = "green", high = "red", limit=c(min( mpPlot.df$DepthCm),max( mpPlot.df$DepthCm)), name="Snow depth (cm)")+
labs(title = "Magnaprobe FYI Snow Grid", x="Northing (m)", y="Easting(m)")
#This extends the randomly generated lines to a min length specified in the constants
npDf <- data.frame(lineId = rep(0, length(fpList)*2), x= rep(0, length(fpList)), y = rep(0, length(fpList)))
for (a in 1:length(fpList)){
nx1 <- coordinates(fpList[[a]])[[1]][1]+((lengthLine - fpLength[a])/2)*cos(deg2rad(fpDirection[a]))
ny1 <- coordinates(fpList[[a]])[[1]][3]+((lengthLine - fpLength[a])/2)*sin(deg2rad(fpDirection[a]))
nx2 <- coordinates(fpList[[a]])[[1]][2]-((lengthLine - fpLength[a])/2)*cos(deg2rad(fpDirection[a]))
ny2 <- coordinates(fpList[[a]])[[1]][4]-((lengthLine - fpLength[a])/2)*sin(deg2rad(fpDirection[a]))
npDf[(a-1)*2+1,] = as.numeric(c(fpList[[a]]@ID,nx1,ny1))
npDf[(a-1)*2+2,] = as.numeric(c(fpList[[a]]@ID,nx2,ny2))
}
coordinates(npDf) <- ~x+y
nfpList <- lapply(split(npDf, npDf$lineId), function(x) Lines(list(Line(coordinates(x))), x$lineId[1L]))
nfpLines <-SpatialLines(nfpList)
nfpData <- data.frame(line = unique(npDf$lineId), length=lengthLine)
rownames(fpData)<-nfpData$line
nfpLines <- SpatialLinesDataFrame(nfpLines, nfpData)
proj4string(nfpLines) <- CRS(PCS)
#Plot the lines over the extent box.
dev.new()
plot(mpExtent)
lines(nfpLines)
#Generate the snow radar footprints
#TODO: Write a function that can mimic variations in filght atitude.
numFp <- lengthLine/fpAlong
radarPoint <- lapply(1:length(nfpLines), function(x) {spsample(nfpLines[x,], numFp,type="regular")})
radarBuffer <- lapply(1:length(radarPoint), function(x) gBuffer(radarPoint[[x]], width = fpAlong/2, byid=TRUE,capStyle="ROUND", quadsegs=10))
radarSeg <- lapply(1:length(radarBuffer), function(x) gIntersection(nfpLines[x,],radarBuffer[[x]], byid=TRUE))
radarFootprint <- lapply(1:length(radarSeg), function(x) gBuffer(radarSeg[[x]], width=fpAcross/2, byid=TRUE, capStyle="FLAT")) #TODO, adjust width dynamicaly based on topography
radarFootprint <- lapply(1:length(radarSeg), function(x) radarFootprint[[x]][-c(1,length(radarFootprint[[x]]))]) #last one might be the wrong size, remove incase
radarDf <- lapply(1:length(radarFootprint), function(x) data.frame(Line = rep(x,length(radarFootprint[[x]])))) #add barring of the flight line here
radarFootprint <- do.call(bind, radarFootprint)
radarDf <- do.call(bind, radarDf)
radarFootprint <- SpatialPolygonsDataFrame(radarFootprint,radarDf)
radarPointMerge <- lapply(1:length(radarPoint), function(x) radarPoint[[x]][-c(1,length(radarPoint[[x]]))]) #last one might be the wrong size, remove incase
radarPointMerge <- do.call(bind, radarPointMerge)
#Display the generated footprints along with the in situ observations
ggplot(mpPlot.df, aes(x,y))+
scale_colour_gradient(low = "green", high = "red", limit=c(min( mpPlot.df$DepthCm),max( mpPlot.df$DepthCm)), name="Snow depth (cm)")+
geom_polygon(data=radarFootprint, aes(x=long, y=lat,group=id), alpha=1,colour="darkgrey", fill=NA, size=0.1)+
geom_point(aes(colour = mpPlot.df$DepthCm), size = 1)+
labs(title = "Synthetic snowradar footprints", x="Northing (m)", y="Easting(m)")
#Extract point observations within each footprint and derive statistics
mpPolylist <- over(radarFootprint, snow, returnList = TRUE, fn=NULL) #get point list to do precentile
resultMp <- data.frame(count = rep(NA,length(mpPolylist)),mean = rep(NA,length(mpPolylist)), sd = rep(NA,length(mpPolylist)))
for ( i in 1:nrow(resultMp)){
mpPoints.df = as.data.frame(mpPolylist[[i]])
if(nrow( mpPoints.df)>0){
resultMp$mean[i] = mean(mpPoints.df$DepthCm)
resultMp$count[i] = nrow(mpPoints.df)
resultMp$sd[i] = sd(mpPoints.df$DepthCm)
}
}
#Extract NN in situ measurement for each footprint centroid, this emulates the CRESIS lat long phase center
distNN <- gDistance(radarPointMerge, snow,byid=TRUE)
NN <- apply(distNN , 2, function(X) rownames(distNN )[order(X)][1])
resultMp$nnDist <- round(apply(distNN , 2, function(X) min(X)),2)
resultMp$nnVal <- snow$DepthCm[as.numeric(NN)]
#Clean up analysis; remove footprints with less than 10 in situ meausrements
resultMp <- resultMp[complete.cases(resultMp),]
resultMp <- resultMp[resultMp$count>10,]
#Plot relationship between nn and integrated sampling
plot(resultMp$nnVal, resultMp$mean,
xlab="NN depth (cm)", ylab="Footprint depth (cm)",
xlim=c(0, 120), ylim=c(0, 120))
abline(0,1)
rmseMp = sqrt(sum((resultMp$nnVal-resultMp$mean)^2)/nrow(resultMp))
biasMp = mean(resultMp$nnVal-resultMp$mean)
corMp = cor(resultMp$nnVal,resultMp$mean)
text(10, 105, paste0("R = ",round(corMp,2)), cex = .8)
text(10, 110, paste0("Bias = ",round(biasMp,2)), cex = .8)
text(10, 115, paste0("RMSE = ",round(rmseMp,2)), cex = .8)
|
1b9d3d1c0b1744033cbd7d7e3bf5772a8bd0aa50
|
1a43a1e23100142da1197f72a09c3b256600e8e8
|
/R/RcppExports.R
|
2a3a9a24b51481b7612c3351360a6d2cf5d52fa8
|
[
"BSD-3-Clause"
] |
permissive
|
ThomasCarroll/rhumba
|
d4e62d5027eeb87766447bf8a6031c0611cc943c
|
06c304c06c60236f52feda09c03da2f2ffbbd9e7
|
refs/heads/master
| 2023-01-12T07:06:42.790576
| 2020-10-13T06:12:33
| 2020-10-13T06:12:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 877
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
set_conda_version <- function(conda_version) {
invisible(.Call(`_rhumba_set_conda_version`, conda_version))
}
set_verbosity <- function(lvl) {
invisible(.Call(`_rhumba_set_verbosity`, lvl))
}
set_root_prefix <- function(root_prefix) {
invisible(.Call(`_rhumba_set_root_prefix`, root_prefix))
}
set_channels <- function(channels) {
invisible(.Call(`_rhumba_set_channels`, channels))
}
set_opt <- function(option, value) {
invisible(.Call(`_rhumba_set_opt`, option, value))
}
print_context <- function() {
invisible(.Call(`_rhumba_print_context`))
}
list <- function() {
invisible(.Call(`_rhumba_list`))
}
install <- function(specs, create_env = FALSE) {
invisible(.Call(`_rhumba_install`, specs, create_env))
}
|
7819dca11616f82d24a7dfb1a56714cb9db8d21e
|
3a50e82d1796b6d80d384b38e45fdce6a72c2c64
|
/Lectures/Lecture 6 - Matrices.R
|
284e8bcc1f02d572333578cc02be3e396257fa15
|
[] |
no_license
|
senanarci/CMPE140
|
5362161ac19ab9416f4f8c02049d9277a2c1fd22
|
b564832bbce66c32ad20208036f483348660a6cb
|
refs/heads/master
| 2020-03-19T12:16:20.291400
| 2018-05-15T08:56:36
| 2018-05-15T08:56:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,840
|
r
|
Lecture 6 - Matrices.R
|
# In R, a matrix is a vector that has two additional attributes:
# * Number of rows
# * Number of columns
#
# As with vectors, every element of a matrix must be of the same _mode_;
# either purely numeric, or purely text, etc.
#
# Creating a matrix from a vector
# ===============
#
# One way to create a matrix is to begin with a specific vector that holds the values.
# When we specify the number of rows and columns in the desired matrix,
# R can create a matrix structure to hold them.
m <- matrix( c(1,2,3,4), nrow=2, ncol=2)
m
attributes(m)
dim(m)
class(m)
# Note that the vector is broken into `ncol` number of _columns_,
# each of size `nrow`. The values from the vector `c(1,2,3,4)` are put in a columns.
# This is called the _column-major order_.
#
# We can instead force a _row-major order_ by setting the `byrow` parameter to `TRUE'.
matrix( c(1,2,3,4), nrow=2, ncol=2, byrow=TRUE)
# If we specify only `nrow` or only `ncol`, and the unspecified one will be determined
# using the length of the vector.
matrix( 1:6, nrow=2 )
matrix( 1:6, ncol=3 )
# If the specified matrix sizes are not compatible with the vector's length,
# the vector is _recycled_ until it fills the matrix.
matrix( 1:5, nrow=2, ncol=3)
# The same recycling is done also when one of the shape parameters is omitted.
matrix( 1:5, nrow=2 )
# Accessing matrix elements
# ======
# Once we have data stored in a matrix, we may want to access its elements, rows, or columns.
# Accessing individual elements
# ----------------
# The element in the `r`-th row and the `c`-th column of a matrix `m` can be accessed
# with the `m[r,c]` notation.
m <- matrix(1:6, nrow=2)
m
m[1,1]
m[2,3]
# Row and column access
# ----------
# We may instead want to access the `r`-th row in its entirety.
# Then, we use the `m[r,]` notation. Similarly, `m[,c]` gives all entries in column `c`.
m <- matrix(1:6, nrow=2)
m
m[1,] # first row, all columns
m[,1] # first column, all rows
# Accessing ranges of rows/columns
# --
# You may have noticed that the notation to access elements is similar
# between vectors and matrices. As in matrices, we can provide a vector of indices
# to specify rows and columns.
m <- matrix( 1:12, nrow=3 )
m
# Select rows 1 and 2, all columns:
m[1:2,]
# select rows 1 and 2, second column only.
m[1:2, 2]
# Select rows 1 and 2, and columns 1,4 and 3, in that order.
m[1:2, c(1,4,3)]
# Excluding some rows and columns
# ---
# As seen in the context of vectors, negative indices can be used to
# get a new matrix with some rows/columns removed.
m <- matrix( 1:12, nrow=3 )
m
# Remove 3rd row.
m[-3,]
# Remove 2nd column
m[,-2]
# Remove 1st row and 3rd column
m[-1,-3]
# Remove columns from 1 to 2.
m[,-1:-2]
# Setting and getting row and column names
# ==
# As with vectors, we can provide names to the rows and to the columns of a matrix.
m <- matrix( 1:6, nrow=2)
m
# The functions `rownames()` and `colnames()` are used to set the names
# for rows and columns, respectively.
rownames(m) <- c("row I", "row II")
colnames(m) <- c("col a", "col b", "col c")
m
# When called without an assignment, they return the existing names.
rownames(m)
colnames(m)
# These names provide an alternative method to access matrix elements.
m["row I", "col b"]
m["row I",]
m[,"col a"]
# Create a matrix by setting individual elements
# =============
# Sometimes we may not have all the data at hand at once. It is possible to start
# with an empty matrix, and fill it up element-by-element.
m <- matrix(nrow=2, ncol=2)
m[1,1] <- 1
m[2,1] <- 2
m[1,2] <- 3
m[2,2] <- 4
m
# Create a matrix by combining columns or rows
# =========
# When we have several different vectors, we can combine them in
# _columns_ using `cbind()`, or by _rows_ using `rbind()`.
cbind( c(1,2), c(3,4) )
rbind( c(1,2), c(3,4) )
# Add a row or a column to an existing matrix
# ===
# The functions `cbind()` and `rbind()` can also be used to extend an existing matrix.
m <- matrix( 1:4, nrow = 2)
m
# Add a new column at the end of the matrix.
cbind(m, c(10,11))
# Add a new column at the beginning of the matrix.
cbind(c(10,11), m)
# Add a new row at the end of the matrix
rbind(m, c(10,11))
# Add a new row at the beginning of the matrix.
rbind(c(10,11), m)
# Insert a row or a column into a matrix
# ===============
# Another application of `cbind()` and `rbind()` is inserting
# columns and rows to existing matrices. As with vectors,
# such insertion is not done on the original matrix.
# We generate a new matrix using existing rows/columns,
# combine them with `rbind()`/`cbind()`, and reassign to the variable.
m <- matrix( 1:9, nrow=3, ncol=3)
m
# Insert a row between second and third rows.
rbind(m[1:2,], c(-1, -2, -3), m[3,])
# Insert a column between first and second columns
cbind( m[,1], c(-4,-5,-6), m[,2:3] )
# Assign new values to submatrices
# ==
# A matrix can be changed in-place by selecting a submatrix
# using index notation, and assigning a new matrix to it.
m <- matrix( 1:9, nrow=3 )
m
m[ c(1,2), c(2,3) ] <- matrix(c(20,21,22,23))
m
# Removing rows and columns
# ====
# To remove some selected rows or colums, we just use the index notation to
# specify the rows and columns we want to keep,
# and assign the result to the variable's name.
m <- matrix( 1:9, nrow=3 )
m
# Remove 2nd row.
m <- m[c(1,3),]
m
# Remove 1st column.
m <- m[, c(2,3)]
m
m[ c(1,3), c(2,3)]
# Filtering on matrices
# ========
m <- matrix( c(2,9,4,7,5,3,6,1,8) , nrow=3 )
m
m >= 5
m[m>=5]
m[m[,1]>=5]
m[ m< 5] <- 0
m
# Matrix recycling
# ==========
# Remember that when two vectors of different lengths are combined in an operation,
# the shorter one is _recycled_ (i.e., elements repeated until the desired length).
c(1,1,1,1,1) + c(1,2,3) # converted to c(1,1,1,1,1) + c(1,2,3,1,2)
# The same procedure also applies to matrices.
m1 <- matrix(1:9, nrow=3)
m2 <- matrix( c(1,2,3), nrow=3,ncol=3)
m2
m1 + m2
# Matrix operations
# =========
# transpose
# ----------
m <- matrix(1:4, nrow=2)
m
t(m)
# elementwise product
# --
m
m * m
# matrix multiplication
# --
m
m %*% m
# multiply by a scalar
# --
m
3 * m
# matrix addition
# --
m
m + m
# Functions on matrices
# ==============
m <- matrix( 1:9, nrow=3 )
m
rowSums(m)
rowMeans(m)
colSums(m)
colMeans(m)
# The apply() function
# ------
# The `apply()` function and its relatives are quite common in R programming.
# Here, we provide a function to apply to rows or columns, and the resulting vector
# of numbers is returned.
m <- matrix( 1:9, nrow=3)
m
apply(m, 1, mean) # same as rowMeans()
apply(m, 2, mean) # same as colMeans()
# We can also use `apply()` with user-defined functions.
alt_inverse_sum <- function(x) {return(sum(c(1,-1)/x))}
m <- matrix(1:12, nrow=3)
m
apply(m,1,alt_inverse_sum)
apply(m,2,alt_inverse_sum)
|
1644dcf4a78e4fcb551aa90b8b24b2c52d411431
|
dfcafafc4ad5a8281a45d694723503c25fa60e08
|
/plot2.R
|
83999d8c8cae2eab827cb237edafaae4c3e154c3
|
[] |
no_license
|
espensvendsen/ExData_Plotting1
|
16f21bf5c973a224b0782e31f50889039ff5096d
|
f122780876b8d1fc3da5831a6eb8f9fec7920656
|
refs/heads/master
| 2021-01-18T06:42:03.674515
| 2016-09-11T12:54:35
| 2016-09-11T12:54:35
| 67,923,132
| 0
| 0
| null | 2016-09-11T10:07:57
| 2016-09-11T10:07:57
| null |
UTF-8
|
R
| false
| false
| 971
|
r
|
plot2.R
|
# Loading
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = "household_power_consumption.zip", method = "curl")
consumption <- read.table(unz("household_power_consumption.zip", "household_power_consumption.txt"),
header=T, quote="\"", sep=";", na.strings="?",
colClasses=c("character","character","double","double","double","double","double","double","numeric"))
# Formatting
consumption$DateTime = as.POSIXlt(paste(consumption$Date, consumption$Time), format="%d/%m/%Y %H:%M:%S")
consumption$Date = as.Date(strptime(consumption$Date, format="%d/%m/%Y"))
# Plot
png("plot2.png",width=480,height=480)
consFeb <- subset(consumption, Date >= as.Date("2007-02-01") & Date <= as.Date("2007-02-02"))
with(consFeb, plot(DateTime, Global_active_power,
type="l", ylab="Global Active Power (kilowatts)", xlab=NA))
dev.off()
|
86e17b83aefdf2b460560ec3d1d7900df653723f
|
6247b7c36ce15b0d08a7a2111e2a99981ba794e3
|
/coba.r
|
6222603d88972d71e324c429bdb4527f48774fd2
|
[] |
no_license
|
kireikharisma/kmmi_r
|
21871d8295817cac19fffd24e8973dea726f32d3
|
6fd54a94fa1c6deebc4d444b2adc8d071c505f52
|
refs/heads/main
| 2023-06-29T21:13:36.744862
| 2021-08-08T19:09:51
| 2021-08-08T19:09:51
| 394,012,585
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
coba.r
|
teks1 = "Welcome"
teks2 = "Let's Explore R"
teks1
teks2
|
efc141029e65db30559105f3a965d9c2234ee30f
|
8adc4c6b545b6c0c5739bc57d0cbccfe162b30cc
|
/tests/testthat/test-two-channel.R
|
82029025c29707d5e7feb9855e2287e0609787b6
|
[] |
no_license
|
alexvpickering/crossmeta
|
b43f8f55806b2c175abf24f734b675613003ed34
|
4aa2fca1d381eebdf92277418450b454b0b4908a
|
refs/heads/master
| 2022-06-18T18:44:02.918536
| 2022-05-27T21:05:22
| 2022-05-27T21:05:22
| 51,627,594
| 3
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,740
|
r
|
test-two-channel.R
|
library(testthat)
library(Biobase)
library(limma)
# RG normalized as in load_agil for two-channel arrays
apoa1_path <- system.file('testdata', 'Apoa1.RData', package = 'crossmeta')
load(apoa1_path)
elist <- RG
elist <- limma::backgroundCorrect(elist, method="normexp", offset=50)
elist <- limma::normalizeWithinArrays(elist, method="loess")
elist <- limma::normalizeBetweenArrays(elist, method="Aquantile")
# setup dummy eset with required pdata columns needed for phenoData.ch2
targets <- elist$targets
colnames(elist) <- targets$geo_accession <- row.names(targets)
elist$genes$rn <- seq_len(nrow(elist))
targets$label_ch1 <- 'Cy3'
targets$label_ch2 <- 'Cy5'
targets$source_name_ch1 <- targets$Cy3
targets$source_name_ch2 <- targets$Cy5
targets$Cy3 <- targets$Cy5 <- NULL
eset <- ExpressionSet(elist$M,
phenoData = as(targets, 'AnnotatedDataFrame'),
featureData = as(elist$genes, 'AnnotatedDataFrame'))
pdata <- crossmeta::phenoData.ch2(eset)
test_that("phenoData.ch2 orders all reds first then all greens", {
res <- pdata@data$label
target <- rep(c('Cy5', 'Cy3'), each = ncol(RG))
expect_equal(res, target)
})
test_that("phenoData.ch2 produces twice as many rows as arrays", {
res <- length(pdata@data$label)
target <- ncol(RG)*2
expect_equal(res, target)
})
test_that("crossmeta produces similar results to limma", {
# setups eset for diff_expr
E <- crossmeta:::exprs.MA(elist)
eset <- ExpressionSet(E,
phenoData = pdata,
featureData = as(elist$genes, 'AnnotatedDataFrame'))
# avoid GUI selection
eset$group <- make.names(eset$source_name)
eset <- list(Apoa1 = eset)
prev <- crossmeta::setup_prev(eset, 'ApoAI...-C57BL.6')
data_dir = tempdir()
dir.create(file.path(data_dir, names(eset)))
res <- crossmeta::diff_expr(eset, prev_anals = prev, data_dir = data_dir, svanal = FALSE, annot='rn')
# cleanup
unlink('Rplots.pdf')
res <- res$Apoa1$top_tables$`Apoa1_ApoAI...-C57BL.6`
# as in limma user guide
MA <- backgroundCorrect(RG, method="normexp", offset=50)
MA <- normalizeWithinArrays(MA, method = 'loess')
MA <- normalizeBetweenArrays(MA, method = 'Aquantile')
design <- cbind("Control-Ref"=1,"KO-Control"=MA$targets$Cy5=="ApoAI-/-")
fit <- lmFit(MA, design)
fit <- eBayes(fit)
tt <- topTable(fit,coef=2, n = Inf)
tt <- tt[!is.na(tt$NAME), ]
# annotation by row name so should be unchanged
expect_equal(nrow(tt), nrow(res))
# correlation between logFCs
lfc1 <- res$logFC
names(lfc1) <- row.names(res)
lfc2 <- tt$logFC
names(lfc2) <- row.names(tt)
lfc2 <- lfc2[names(lfc1)]
cor12 <- cor(lfc1, lfc2)
expect_gt(cor12, 0.96)
})
|
8734711481f51bfea3db32b15cd316839df4af08
|
fda540791ba58168598b8320571356a565f9faf1
|
/libs/1_enoe.R
|
9ac433bc0dc0bc9fff5181871f71de846840207b
|
[] |
no_license
|
monzalo14/conciliacion
|
5c3e1272090d3575552ab9b58b5b514ab9cfe58f
|
5e4670ec32026a85f5bedd0f01decee1cec01394
|
refs/heads/master
| 2021-01-12T08:58:27.217523
| 2017-05-04T07:16:47
| 2017-05-04T07:16:47
| 76,738,998
| 1
| 3
| null | 2017-02-15T18:40:55
| 2016-12-17T18:05:28
|
R
|
UTF-8
|
R
| false
| false
| 5,399
|
r
|
1_enoe.R
|
library(foreign)
library(rformat)
library(dplyr)
## sobre el ampliado
part1 <- dir("../data", pattern = "coe1t1.*\\.dbf$", full.names = TRUE, recursive = T)
part2 <- dir("../data", pattern = "coe2t1.*\\.dbf$", full.names = TRUE, recursive = T)
hogs <- dir("../data", pattern = "hogt1.*\\.dbf$", full.names = TRUE, recursive = T)
sdem <- dir("../data", pattern = "sdemt1.*\\.dbf$", full.names = TRUE, recursive = T)
fac2chr <- function(x){as.character(x)}
hogs_df <- read.dbf(hogs)
sdem_df <- read.dbf(sdem)
part1_df <- read.dbf(part1)
part2_df <- read.dbf(part2)
df.all <- NULL
df.h.all <- NULL
for (i in seq(part1)) {
print(paste0("Anio procesado: ", i))
p <- read.dbf(part1[i]) %>%
dplyr::filter(R_DEF == "00")
p2 <- read.dbf(part2[i])
h <- read.dbf(hogs[i])
sd <- read.dbf(sdem[i]) %>%
dplyr::filter(
R_DEF == "00" # entrevistas incompletas o eliminadas
, C_RES != "2" # gente que ya no habita en la vivienda
# , EDA != "00", EDA != "11", EDA != "99" # eliminamos a menores de 12
)
# por cada periodo, hay que hacer el read y el join y luego pegamos todo
hh <- dplyr::mutate_each(h, funs(fac2chr), CD_A, ENT, CON, UPM, D_SEM,
N_PRO_VIV, V_SEL, N_HOG, H_MUD, N_ENT, EST, T_LOC, PER) %>%
dplyr::select(., CD_A, ENT, CON, UPM, D_SEM, N_PRO_VIV, V_SEL, N_HOG,
H_MUD, N_ENT, PER, EST, T_LOC)
print("hh cool")
sd <- dplyr::mutate_each(sd, funs(fac2chr),
R_DEF, CD_A, ENT, CON, UPM, D_SEM, N_PRO_VIV,
V_SEL, N_HOG, H_MUD, N_ENT, PER, N_REN, EDA, EST, T_LOC, FAC) %>%
dplyr::select(.,
R_DEF, CD_A, ENT, CON, UPM, D_SEM, N_PRO_VIV, V_SEL, N_HOG,
H_MUD, N_ENT, PER, N_REN, EDA, EST, EST_D, T_LOC, FAC, INGOCUP, SALARIO, CLASE1, CLASE2) %>%
dplyr::mutate(
FAC = as.integer(FAC)
)
print("sd cool")
## Creo tabla de cuestionarios pegandole demograficos y carac del hogar
df <- dplyr::inner_join(p, p2) %>%
dplyr::mutate_each(., funs(fac2chr),
R_DEF, CD_A, ENT, CON, UPM, D_SEM, N_PRO_VIV, V_SEL, N_HOG,
H_MUD, N_ENT, PER, N_REN, EDA)
df <- inner_join(df, hh)
df <- inner_join(df, sd)
names(df) <- normalize_names(names(df))
print("df cool")
## Creo una base por hogar
df.h <- sd %>%
dplyr::group_by(ENT, CON, UPM, D_SEM, N_PRO_VIV, V_SEL, N_HOG, H_MUD, N_ENT, PER, EST, EST_D, T_LOC, FAC) %>%
dplyr::summarise(
integrantes = n(),
integrantes.m12 = sum(ifelse(EDA %in% c("00", "11", "99"), 1, 0)),
perceptores = sum(ifelse(INGOCUP > 0, 1, 0)),
ingocup = sum(INGOCUP, na.rm = T)
)
names(df.h) <- normalize_names(names(df.h))
print(paste0("dim de df.h: ", nrow(df.h)))
rm(p, p2, h, hh)
rm(sd)
## Junto
df.all <- plyr::rbind.fill(df.all, df)
df.h.all <- plyr::rbind.fill(df.h.all, df.h)
rm(df)
rm(df.h)
}
saveRDS(df.h.all, "../data/sdem_hogares.rds")
saveRDS(df.all, "../data/enoe_ampliado_raw.rds")
rm(list = ls())
# NO juegues con todos, la base esta grande, juega con estos para limpiar mas vars!
# p <- read.dbf("2014trim1/coe1t114.dbf") %>%
# dplyr::filter(R_DEF == "00")
# p2 <- read.dbf("2014trim1/coe2t114.dbf")
# h <- read.dbf("2014trim1/hogt114.dbf")
# sd <- read.dbf("2014trim1/sdemt114.dbf") %>%
# dplyr::filter(
# R_DEF == "00" # entrevistas incompletas o eliminadas
# , C_RES != "2" # gente que ya no habita en la vivienda
# # , EDA != "00", EDA != "11", EDA != "99" # eliminamos a menores de 12
# )
# # por cada periodo, hay que hacer el read y el join y luego pegamos todo
#
# fac2chr <- function(x){as.character(x)}
# hh <- dplyr::mutate_each(h, funs(fac2chr), CD_A, ENT, CON, UPM, D_SEM,
# N_PRO_VIV, V_SEL, N_HOG, H_MUD, N_ENT, EST, T_LOC, PER) %>%
# dplyr::select(., CD_A, ENT, CON, UPM, D_SEM, N_PRO_VIV, V_SEL, N_HOG,
# H_MUD, N_ENT, PER, EST, T_LOC)
# sd <- dplyr::mutate_each(sd, funs(fac2chr),
# R_DEF, CD_A, ENT, CON, UPM, D_SEM, N_PRO_VIV,
# V_SEL, N_HOG, H_MUD, N_ENT, PER, N_REN, EDA, EST, T_LOC, FAC) %>%
# dplyr::select(.,
# R_DEF, CD_A, ENT, CON, UPM, D_SEM, N_PRO_VIV, V_SEL, N_HOG,
# H_MUD, N_ENT, PER, N_REN, EDA, EST, T_LOC, FAC, INGOCUP) %>%
# dplyr::mutate(
# FAC = as.integer(FAC)
# )
#
# ## Creo tabla de cuestionarios pegandole demograficos y carac del hogar
# df <- dplyr::inner_join(p, p2) %>%
# dplyr::mutate_each(., funs(fac2chr),
# R_DEF, CD_A, ENT, CON, UPM, D_SEM, N_PRO_VIV, V_SEL, N_HOG,
# H_MUD, N_ENT, PER, N_REN, EDA)
# df <- inner_join(df, hh)
# df <- inner_join(df, sd)
# names(df) <- normalize_names(names(df))
#
# ## Creo una base por hogar
# df.h <- sd %>%
# dplyr::group_by(CD_A, ENT, CON, UPM, D_SEM, N_PRO_VIV, V_SEL, N_HOG, H_MUD, N_ENT, PER, EST, T_LOC, FAC) %>%
# dplyr::summarise(
# integrantes = n(),
# integrantes.m12 = sum(ifelse(EDA %in% c("00", "11", "99"), 1, 0)),
# perceptores = sum(ifelse(INGOCUP > 0, 1, 0)),
# ingocup = sum(INGOCUP, na.rm = T)
# )
# names(df.h) <- normalize_names(names(df.h))
#
# rm(p, p2, h, hh)
# rm(sd)
|
3c6ffdfe310e1f9a68cdd9a18cbe374e0292d1f0
|
79757f415f30c2a89ed5fb8d6b46824adc8ab5ed
|
/Scripts/Tweet_seniments.R
|
98b8f49bb3ac396b7d43b659711a992312a16e10
|
[] |
no_license
|
aksoyundan/Khutbas
|
8764d2fa5c359eb7bcbd1798af5748d420aae88f
|
4b0121af6e2c5242fb143da8af0713536409e059
|
refs/heads/main
| 2023-04-23T17:27:08.892396
| 2021-05-11T13:26:11
| 2021-05-11T13:26:11
| 335,759,549
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,190
|
r
|
Tweet_seniments.R
|
Sys.setlocale(category = "LC_ALL", locale = "Turkish")
options(stringsAsFactors=F)
library(rtweet)
library(tidyverse)
library(lubridate)
library(ggplot2)
#library(stringr)
tws1520_s <- readRDS("data/sent/tws1520_s.RDS")
tws1520 <- readRDS("data/tws1520_c.RDS")
tws1520_t <- tws1520 %>%
select(com, fam, hlt, nat, pat, tru, uma, id)
tws1520_s <- tws1520_s %>%
inner_join(tws1520_t, by ="id")
tws1520_s <- tws1520_s %>%
mutate (scom = ifelse(com > 0, com*sentiment, NA)) %>%
mutate (sfam = ifelse(fam > 0, fam*sentiment, NA)) %>%
mutate (shlt = ifelse(hlt > 0, hlt*sentiment, NA)) %>%
mutate (snat = ifelse(nat > 0, nat*sentiment, NA)) %>%
mutate (spat = ifelse(pat > 0, pat*sentiment, NA)) %>%
mutate (stru = ifelse(tru > 0, tru*sentiment, NA)) %>%
mutate (suma = ifelse(uma > 0, uma*sentiment, NA))
saveRDS(tws1520_s, "data/sent/tws1520_s.RDS")
tws1520_s <- tws1520_s %>%
mutate(week = floor_date(date_tr, "week",
week_start = getOption("lubridate.week.start", 5)))
############ weekly data #################################
tws1520_ws <- tws1520_s %>%
group_by(week) %>%
filter(week > "2015-01-01") %>%
summarise(sbusiness = mean(scom, na.rm = TRUE),
sfamily = mean(sfam, na.rm = TRUE),
shealth = mean(shlt, na.rm = TRUE),
snationalism = mean(snat, na.rm = TRUE),
spatience = mean(spat, na.rm = TRUE),
strust = mean(stru, na.rm = TRUE),
summa = mean(suma, na.rm = TRUE),
sall = mean(sentiment, na.rm = TRUE)) %>%
ungroup()
saveRDS(tws1520_ws, "data/sent/tws1520_ws.RDS")
########################## friday only #######################################
tws1520_sfr <- tws1520_s %>%
filter(wday(date_tr) == 6) %>%
mutate(hour = hour(date_tr))
tws1520_sfr <- tws1520_sfr %>%
group_by(week, hour) %>%
filter(week > "2015-01-01") %>%
summarise(sbusiness = mean(scom, na.rm = TRUE),
sfamily = mean(sfam, na.rm = TRUE),
shealth = mean(shlt, na.rm = TRUE),
snationalism = mean(snat, na.rm = TRUE),
spatience = mean(spat, na.rm = TRUE),
strust = mean(stru, na.rm = TRUE),
summa = mean(suma, na.rm = TRUE),
sall = mean(sentiment, na.rm = TRUE)) %>%
ungroup()
tws1520_sfr <- tws1520_sfr %>%
pivot_longer(-c(week, hour), names_to = "group", values_to = "average")
saveRDS(tws1520_sfr, "data/sent/tws1520_sfr.RDS")
################# computing sentiments ##########################
#tws1520 <- readRDS("data/tws1520.RDS")
#tws1520$date <- ymd_hms(tws1520$created_at)
#tws1520$date_tr <- with_tz(tws1520$date, tzone ="Europe/Istanbul")#
#tws1520 <- tws1520 %>%
# mutate(year = floor_date(date_tr, "year"))
#sent_scores <- function(x) syuzhet::get_sentiment(plain_tweets(x), method = "nrc",
# language ="turkish") - .5
#system.time({
#tws1520_s <- tws1520 %>%
# mutate( sentiment = sent_scores(text) ) %>%
# select(sentiment, id, date_tr)
#saveRDS(tws1520_s, "data/sent/tws1520_s.RDS")
#})
|
cc0d8661274c15bb60acae28f4bd3295033a9d4a
|
1f64d43e7d2c9398664a97a9384442c656b5b01f
|
/man/cache_lookup_users.Rd
|
ff03c4b2b400ab1433e0781f44356b14e3514f49
|
[
"MIT"
] |
permissive
|
alexpghayes/twittercache
|
8b8a2b4232230e28dbf0d5380b9253fbc5b1c95b
|
c84c38b4d479926d28444d6d72fd2a0c339cb7e2
|
refs/heads/master
| 2021-07-02T13:02:27.213464
| 2020-10-07T20:09:58
| 2020-10-07T20:09:58
| 194,210,911
| 5
| 0
|
NOASSERTION
| 2020-11-09T18:32:12
| 2019-06-28T05:20:26
|
R
|
UTF-8
|
R
| false
| true
| 344
|
rd
|
cache_lookup_users.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cached-api-calls.R
\name{cache_lookup_users}
\alias{cache_lookup_users}
\title{Title}
\usage{
cache_lookup_users(users)
}
\arguments{
\item{users}{}
}
\value{
}
\description{
note: new users are gonna burn get_friends and get_followers tokens
so be careful here!
}
|
b0056b1f8ee5e072bf2e53aaf326be118bf67387
|
8b5214771da43e53875e1666b2a63d37ad8b6a9f
|
/R/TCRseq/expanded/run_expanded.R
|
371aa8c99e9600eaed82b0447d92ab7e0aa570f5
|
[] |
no_license
|
janihuuh/melanomap_manu
|
763e9d704248875a8920eec14b832bb26519d34e
|
6ec1d82ecf421c05633366d1635f789e5b715cf3
|
refs/heads/main
| 2023-04-13T04:52:44.955381
| 2022-01-28T16:17:44
| 2022-01-28T16:17:44
| 433,838,662
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,255
|
r
|
run_expanded.R
|
source("src/jani/R/tcrb/expanded/fun_expanded.R")
## Analyse the expanded clonotypes
# Get sample matrix and sample matrix into sampling pairs
helsinki_mat <- get_sample_matrix(vdj_files = list.files("data/unselected_TCRseq/Helsinki/", pattern = "MNC"), dataset = "Helsinki")
helsinki_pairs <- sample_matrix_to_pairs(helsinki_mat)
helsinki_exp <- pbapply::pbapply(helsinki_pairs, 1, da_analysis_aa, folder = "data/unselected_TCRseq/Helsinki/")
helsinki_exp <- do.call(rbind, helsinki_exp)
write.table(helsinki_exp, paste0(local_folder," results/expansion/helsinki_expansion.txt"), sep = "\t", quote = F, row.names = F)
write.table(helsinki_pairs, "results/expansion/helsinki_sample_pairs.txt", sep = "\t", quote = F, row.names = F)
write.table(helsinki_mat, "results/expansion/helsinki_sample_matrix.txt", sep = "\t", quote = F, row.names = F)
tumeh_mat <- get_sample_matrix(list.files("data/unselected_TCRseq/Tumeh/", pattern = "MNC"), dataset = "Tumeh")
tumeh_pairs <- sample_matrix_to_pairs(tumeh_mat)
tumeh_exp <- pbapply::pbapply(tumeh_pairs, 1, da_analysis_aa, folder = "data/unselected_TCRseq/Tumeh/")
tumeh_exp <- do.call(rbind, tumeh_exp)
write.table(tumeh_exp, paste0("results/expansion/tumeh_expansion.txt"), sep = "\t", quote = F, row.names = F)
write.table(tumeh_pairs, "results/expansion/tumeh_sample_pairs.txt", sep = "\t", quote = F, row.names = F)
yusko_mnc_mat <- get_sample_matrix(list.files("data/unselected_TCRseq/Yusko/MNC/", pattern = "MNC"), dataset = "Yusko")
yusko_mnc_pairs <- sample_matrix_to_pairs(yusko_mnc_mat)
yusko_mnc_exp <- pbapply::pbapply(yusko_mnc_pairs, 1, da_analysis_aa, folder = "data/unselected_TCRseq/Yusko/MNC//")
yusko_mnc_exp <- do.call(rbind, yusko_mnc_exp)
write.table(yusko_mnc_exp, paste0("results/expansion/yusko_mnc_expansion.txt"), sep = "\t", quote = F, row.names = F)
write.table(yusko_mnc_pairs, "results/expansion/yusko_mnc_sample_pairs.txt", sep = "\t", quote = F, row.names = F)
write.table(yusko_mnc_mat, "results/expansion/yusko_mnc_sample_matrix.txt", sep = "\t", quote = F, row.names = F)
yusko_cd8_mat <- get_sample_matrix(list.files("data/unselected_TCRseq/Yusko/CD8/", pattern = "CD8"), dataset = "Yusko")
yusko_cd8_pairs <- sample_matrix_to_pairs(yusko_cd8_mat)
yusko_cd8_exp <- pbapply::pbapply(yusko_cd8_pairs, 1, da_analysis_aa, folder = "data/unselected_TCRseq/Yusko/CD8/")
yusko_cd8_exp <- do.call(rbind, yusko_cd8_exp)
write.table(yusko_cd8_exp, paste0("results/expansion/yusko_cd8_expansion.txt"), sep = "\t", quote = F, row.names = F)
write.table(yusko_cd8_pairs, "results/expansion/yusko_cd8_sample_pairs.txt", sep = "\t", quote = F, row.names = F)
write.table(yusko_cd8_mat, "results/expansion/yusko_cd8_sample_matrix.txt", sep = "\t", quote = F, row.names = F)
robert_mat <- get_sample_matrix(list.files("data/unselected_TCRseq/Robert/"), dataset = "Robert")
robert_pairs <- sample_matrix_to_pairs(robert_mat)
robert_exp <- pbapply::pbapply(robert_pairs, 1, da_analysis_aa, folder = "data/unselected_TCRseq/robert/")
robert_exp <- do.call(rbind, robert_exp)
write.table(robert_exp, paste0("results/expansion/robert_expansion.txt"), sep = "\t", quote = F, row.names = F)
write.table(robert_pairs, "results/expansion/robert_sample_pairs.txt", sep = "\t", quote = F, row.names = F)
riaz_mat <- get_sample_matrix(list.files("data/unselected_TCRseq/Riaz/"), dataset = "Riaz")
riaz_pairs <- sample_matrix_to_pairs(riaz_mat)
riaz_exp <- pbapply::pbapply(riaz_pairs, 1, da_analysis_aa, folder = "data/unselected_TCRseq/Riaz/")
riaz_exp <- do.call(rbind, riaz_exp)
write.table(riaz_exp, paste0("results/expansion/riaz_expansion.txt"), sep = "\t", quote = F, row.names = F)
write.table(riaz_pairs, "results/expansion/riaz_sample_pairs.txt", sep = "\t", quote = F, row.names = F)
helsinki_exp <- helsinki_exp %>% filter(direction == "Up" & BH.sigf == "Sigf" & log2_FC_count > 1)
tumeh_exp <- tumeh_exp %>% filter(direction == "Up" & BH.sigf == "Sigf" & log2_FC_count > 1)
yusko_mnc_exp <- yusko_mnc_exp %>% filter(direction == "Up" & BH.sigf == "Sigf" & log2_FC_count > 1)
yusko_cd8_exp <- yusko_cd8_exp %>% filter(direction == "Up" & BH.sigf == "Sigf" & log2_FC_count > 1)
robert_expanded <- robert_exp %>% filter(direction == "Up" & BH.sigf == "Sigf" & log2_FC_count > 1)
riaz_expanded <- riaz_exp %>% filter(direction == "Up" & BH.sigf == "Sigf" & log2_FC_count > 1)
helsinki_expanded <- helsinki_expanded %>% bind_cols(breakName(helsinki_expanded$sample1_name)) %>% mutate(clonotypename = paste(name, cdr3aa, sep = "_"))
tumeh_expanded <- tumeh_expanded %>% bind_cols(breakName(tumeh_expanded$sample1_name)) %>% mutate(clonotypename = paste(name, cdr3aa, sep = "_"))
yusko_mnc_expanded <- yusko_mnc_expanded %>% bind_cols(breakName(yusko_mnc_expanded$sample1_name)) %>% mutate(clonotypename = paste(name, cdr3aa, sep = "_"))
yusko_cd8_expanded <- yusko_cd8_expanded %>% bind_cols(breakName(yusko_cd8_expanded$sample1_name)) %>% mutate(clonotypename = paste(name, cdr3aa, sep = "_"))
robert_expanded <- robert_expanded %>% bind_cols(breakName(robert_expanded$sample1_name)) %>% mutate(clonotypename = paste(name, cdr3aa, sep = "_"))
riaz_expanded <- riaz_expanded %>% bind_cols(breakName(riaz_expanded$sample1_name)) %>% mutate(clonotypename = paste(name, cdr3aa, sep = "_"))
write.table(helsinki_expanded, "results/expansion/expanded/helsinki_sigf_expanded.txt", sep = "\t", quote = F, row.names = F)
write.table(tumeh_expanded, "results/expansion/expanded/tumeh_sigf_expanded.txt", sep = "\t", quote = F, row.names = F)
write.table(yusko_mnc_expanded, "results/expansion/expanded/yusko_mnc_sigf_expanded.txt", sep = "\t", quote = F, row.names = F)
write.table(yusko_cd8_expanded, "results/expansion/expanded/yusko_cd8_sigf_expanded.txt", sep = "\t", quote = F, row.names = F)
write.table(robert_expanded, "results/expansion/expanded/robert_sigf_expanded.txt", sep = "\t", quote = F, row.names = F)
write.table(riaz_expanded, "results/expansion/expanded/riaz_sigf_expanded.txt", sep = "\t", quote = F, row.names = F)
|
f1957d84dccbe200ec2708e8811fe2076d4f73d9
|
59baf1491b149066ebe3451863778f04f334ea9c
|
/man/splom_select.Rd
|
4e9d214b76e2ae7811990da7e53055d6501bb574
|
[] |
no_license
|
amy-mcg/tephrochron
|
ef2de9dc5d795a856286ed1a8c433319e3de7e8e
|
469d5164033a1fe63597b82268451e154446250a
|
refs/heads/master
| 2023-01-28T04:33:03.234496
| 2020-12-11T10:40:42
| 2020-12-11T10:40:42
| 268,802,612
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 888
|
rd
|
splom_select.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/splom_select.R
\name{splom_select}
\alias{splom_select}
\title{Scatterplot matrix of selected elements}
\usage{
splom_select(data, cols, colour, symbol)
}
\arguments{
\item{data}{A data frame containing element geochemistries}
\item{cols}{The names of columns containing selected element geochemistries}
\item{colour}{The variable which sets the plotting colour (e.g. sample id)}
\item{symbol}{The variable which sets the plotting symbol (e.g. site, volcano)}
}
\value{
A scatterplot matrix showing selected element geochemistries
}
\description{
Create a scatterplot matrix showing selected element geochemistries
}
\examples{
plot <- splom_select(data, cols = c("SiO2","K2O","Na2O"), Sample_ID);
data$K2O_Na2O <- data$K2O / data$Na2O
plot <- splom_select(data, cols = c("SiO2","K2O_Na2O"), Sample_ID);
}
|
bfbd7cbd6863dced84e447058700631ea3e399da
|
1cc9f664bd685d1dda000272a3e5df8903fd6b22
|
/code-zi-pi-plot.R
|
55441c21fc3f8dda3c6b97dfff2e542fe143dfa0
|
[] |
no_license
|
bing-g/AD-BW-network
|
df84a940a5c60a9ccc9aa1c888d91d7372d3269f
|
dd1983dc253815b9cfbc6695ec418fb1701c5554
|
refs/heads/main
| 2023-05-26T13:35:03.769230
| 2023-05-15T15:18:12
| 2023-05-15T15:18:12
| 367,874,988
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,730
|
r
|
code-zi-pi-plot.R
|
# calculate Pi, Zi
library(igraph)
#igraph.group1
seqdeg=degree(igraph.group1)
Nnodes=length(seqdeg)
Z=seqdeg
Z[]=0
P=Z
Membership=membership(fc.group1)
Seq=seq(1:Nnodes)
for(i in 1:Nnodes){
L=Membership==Membership[i]
neighbs=neighbors(igraph.group1,i)
Kis=sum(L[neighbs])
SUM=0
SUMsq=0
SUMP=0
Miv=Seq[L]
for(j in 1:sum(L)){
neighbsj=neighbors(igraph.group1,Miv[j])
Kjs=sum(L[neighbsj])
SUM=SUM+Kjs
SUMsq=SUMsq+Kjs^2
}
Z[i]=(Kis-SUM/sum(L))/sqrt(SUMsq/sum(L)-(SUM/sum(L))^2)
if(Kis-SUM/sum(L)==0){Z[i]=0}
for(k in 1:max(Membership)){
Lp=Membership==k
Kisp=sum(Lp[neighbs])
SUMP=SUMP+(Kisp/seqdeg[i])^2}
P[i]=1-SUMP
}
attribute_node.group1=cbind(degree=seqdeg,module=Membership,Pi=P,Zi=Z)
#igraph.group2
seqdeg=degree(igraph.group2)
Nnodes=length(seqdeg)
Z=seqdeg
Z[]=0
P=Z
Membership=membership(fc.group2)
Seq=seq(1:Nnodes)
for(i in 1:Nnodes){
L=Membership==Membership[i]
neighbs=neighbors(igraph.group2,i)
Kis=sum(L[neighbs])
SUM=0
SUMsq=0
SUMP=0
Miv=Seq[L]
for(j in 1:sum(L)){
neighbsj=neighbors(igraph.group2,Miv[j])
Kjs=sum(L[neighbsj])
SUM=SUM+Kjs
SUMsq=SUMsq+Kjs^2
}
Z[i]=(Kis-SUM/sum(L))/sqrt(SUMsq/sum(L)-(SUM/sum(L))^2)
if(Kis-SUM/sum(L)==0){Z[i]=0}
for(k in 1:max(Membership)){
Lp=Membership==k
Kisp=sum(Lp[neighbs])
SUMP=SUMP+(Kisp/seqdeg[i])^2}
P[i]=1-SUMP
}
attribute_node.group2=cbind(degree=seqdeg,module=Membership,Pi=P,Zi=Z)
#igraph.group3
seqdeg=degree(igraph.group3)
Nnodes=length(seqdeg)
Z=seqdeg
Z[]=0
P=Z
Membership=membership(fc.group3)
Seq=seq(1:Nnodes)
for(i in 1:Nnodes){
L=Membership==Membership[i]
neighbs=neighbors(igraph.group3,i)
Kis=sum(L[neighbs])
SUM=0
SUMsq=0
SUMP=0
Miv=Seq[L]
for(j in 1:sum(L)){
neighbsj=neighbors(igraph.group3,Miv[j])
Kjs=sum(L[neighbsj])
SUM=SUM+Kjs
SUMsq=SUMsq+Kjs^2
}
Z[i]=(Kis-SUM/sum(L))/sqrt(SUMsq/sum(L)-(SUM/sum(L))^2)
if(Kis-SUM/sum(L)==0){Z[i]=0}
for(k in 1:max(Membership)){
Lp=Membership==k
Kisp=sum(Lp[neighbs])
SUMP=SUMP+(Kisp/seqdeg[i])^2}
P[i]=1-SUMP
}
attribute_node.group3=cbind(degree=seqdeg,module=Membership,Pi=P,Zi=Z)
#igraph.group4
seqdeg=degree(igraph.group4)
Nnodes=length(seqdeg)
Z=seqdeg
Z[]=0
P=Z
Membership=membership(fc.group4)
Seq=seq(1:Nnodes)
for(i in 1:Nnodes){
L=Membership==Membership[i]
neighbs=neighbors(igraph.group4,i)
Kis=sum(L[neighbs])
SUM=0
SUMsq=0
SUMP=0
Miv=Seq[L]
for(j in 1:sum(L)){
neighbsj=neighbors(igraph.group4,Miv[j])
Kjs=sum(L[neighbsj])
SUM=SUM+Kjs
SUMsq=SUMsq+Kjs^2
}
Z[i]=(Kis-SUM/sum(L))/sqrt(SUMsq/sum(L)-(SUM/sum(L))^2)
if(Kis-SUM/sum(L)==0){Z[i]=0}
for(k in 1:max(Membership)){
Lp=Membership==k
Kisp=sum(Lp[neighbs])
SUMP=SUMP+(Kisp/seqdeg[i])^2}
P[i]=1-SUMP
}
attribute_node.group4=cbind(degree=seqdeg,module=Membership,Pi=P,Zi=Z)
#igraph.group5
seqdeg=degree(igraph.group5)
Nnodes=length(seqdeg)
Z=seqdeg
Z[]=0
P=Z
Membership=membership(fc.group5)
Seq=seq(1:Nnodes)
for(i in 1:Nnodes){
L=Membership==Membership[i]
neighbs=neighbors(igraph.group5,i)
Kis=sum(L[neighbs])
SUM=0
SUMsq=0
SUMP=0
Miv=Seq[L]
for(j in 1:sum(L)){
neighbsj=neighbors(igraph.group5,Miv[j])
Kjs=sum(L[neighbsj])
SUM=SUM+Kjs
SUMsq=SUMsq+Kjs^2
}
Z[i]=(Kis-SUM/sum(L))/sqrt(SUMsq/sum(L)-(SUM/sum(L))^2)
if(Kis-SUM/sum(L)==0){Z[i]=0}
for(k in 1:max(Membership)){
Lp=Membership==k
Kisp=sum(Lp[neighbs])
SUMP=SUMP+(Kisp/seqdeg[i])^2}
P[i]=1-SUMP
}
attribute_node.group5=cbind(degree=seqdeg,module=Membership,Pi=P,Zi=Z)
###################################################################
#zi pi graph on one plot
par(mfrow=c(1,1),mar=c(4,4,2,8))
plot(attribute_node.group1[,3],attribute_node.group1[,4]
,xlim=c(0,1),ylim=c(-4,4),xlab="Among-module connectivity Pi",ylab=("Within-module connectivity Zi"),col=2,pch=1,cex=0.8)
abline(v=0.62,h=2.5,col=8)
points(attribute_node.group5[,3],attribute_node.group5[,4],
col="grey27",pch=0,cex=0.8)
points(attribute_node.group4[,3],attribute_node.group4[,4],
col="cadetblue1",pch=5,cex=0.8)
points(attribute_node.group2[,3],attribute_node.group2[,4],
col=3,pch=6,cex=0.8)
points(attribute_node.group3[,3],attribute_node.group3[,4],
col=4,pch=2,cex=0.8)
text(0.15,4,"Module hubs")
text(0.8,4,"Network hubs")
text(0.15,-4,"Peripherals")
text(0.8,-4,"Connectors")
legend(1.05,4,legend=c("Ambient","Mesophilic Low-solid","Mesophilic","Mesophilic Co-digestion","Thermo"),
pch=c(0,2,5,6,1),col=c("grey27",4,"cadetblue1",3,2),xpd=T,bty="n",pt.lwd = 2)
|
b76058137907afc0e33bb25460d9bdc51dd9a6ff
|
a4fc7ca6720c4eab3065e6c0dedd0eb2895bbbb3
|
/4/34.R
|
af4b22e896365410724d5a8d6bc67913b4fb92b0
|
[] |
no_license
|
minoeru/100KnockR
|
38b3499f75e4d99a38fb947db13980d9bdcf24b1
|
7a01aab1b19ab9dd6181ee2142c425fc1718c04a
|
refs/heads/master
| 2020-09-26T13:39:15.086507
| 2020-08-12T08:48:25
| 2020-08-12T08:48:25
| 226,265,979
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 496
|
r
|
34.R
|
library(RMeCab)
library(magrittr)
Func34 <- function(){
hoge <- RMeCabText("neko.txt")
surface <- lapply(1:length(hoge),function(x){hoge[[x]][1]}) %>% unlist()
pos <- lapply(1:length(hoge),function(x){hoge[[x]][2]}) %>% unlist()
num <- grep("^の$",surface)
name <- grep("名詞",pos)
ans <- intersect(intersect(num - 1,name),intersect(num + 1,name)-2)
spout <- lapply(1:length(ans),function(x){
surface[ans[x]:(ans[x]+2)] %>% paste0(.,collapse="") %>% print()
})
}
Func34()
|
1a9c3f23a2273b62b9a5a61bfa5ca439611d40a0
|
4f63cfcfa3be78d33ecae08123d90fed61f98f8d
|
/R/get_bb_aspect_ratio.R
|
a130b48c7a87b01d4447135e238dbdebd42a1f73
|
[] |
no_license
|
Mallabarius/rmaps
|
6d1b1b91d82728bc42f919f282091e20738b02b7
|
c3b223a0b5bcf2cc4ff314fe5fb5640bd35cc4bc
|
refs/heads/master
| 2023-03-16T12:09:37.321704
| 2021-03-09T16:14:29
| 2021-03-09T16:14:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 481
|
r
|
get_bb_aspect_ratio.R
|
#' Get the aspect ratio of a bounding box
#'
#' @param bb bounding box
#'
#' @return the aspect ratio as a number
#' @export
#'
get_bb_aspect_ratio <- function (bb) {
# Get width and height in actual units
w <- st_sfc(st_point(c(bb$xmin, bb$ymin)), st_point(c(bb$xmax, bb$ymin)), crs = 4326) %>% st_distance() %>% .[1,2]
h <- st_sfc(st_point(c(bb$xmin, bb$ymin)), st_point(c(bb$xmin, bb$ymax)), crs = 4326) %>% st_distance() %>% .[1,2]
ar <- as.numeric(w / h)
ar
}
|
4bb1865381e91648a02d880bc2029e0dc712ea95
|
385940a7794a7bf3beb84a30b24de7eb34f7efaf
|
/Afrobarometer - Stage Two Data Cleaning.r
|
8a3d38af71ce5136b96562e65f054cc9e2ee70fa
|
[] |
no_license
|
adelinelo/Ethnic-wives
|
dd5bba4602539324c2fdda1462bf41a37d137da4
|
3ce024d7d55375ad15386370980ec9359e6fd801
|
refs/heads/master
| 2020-03-26T22:36:06.074398
| 2018-08-20T21:34:30
| 2018-08-20T21:34:30
| 145,472,386
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,440
|
r
|
Afrobarometer - Stage Two Data Cleaning.r
|
###############################################
#
# The Spousal Bump:
# Do cross-ethnic marriages increase political support in multiethnic democracies?
#
# Data Replication
#
# Claire Adida
# Nathan Combes
# Adeline Lo
# Alex Verink
#
###############################################
###############################################
# This code reads in the merged and cleaned Afrobarometer
# Rounds 3 and 4 data, along with a dataset of the ethnicities of
# political opponents. It merges these two datasets, recodes
# the ethnicity variables incorporating new information, and
# creates a variable for the portion of the population constituted by
# each ethnic group.
###############################################
library(foreign)
rm(list=ls())
###############################################
# Read in Data
###############################################
afbData <-read.dta(file="ABR3and4Data.dta")
ethData <- read.csv(file = "Opponents and Wives.csv")
###############################################
# Merge two datasets
###############################################
#change country label in afb to lower-case to match, preserve upper case in Country variable
afbData$country <- tolower(afbData$country)
#add the AfB code for the ethnicity(ethnicities) of the opponent to each row
afbDataMerge <- merge(afbData,ethData[,c("country","Round","Opponent.AfB.Ethnic.Code","Opponent.AfB.Ethnic.Code.2")],by.x=c("country","round"),by.y=c("country","Round"),all.x=T)
###############################################
# Create Opponent Coethnic Variable
###############################################
#replace NA with 0 for opponent ethnic code variables
afbDataMerge$Opponent.AfB.Ethnic.Code[is.na(afbDataMerge$Opponent.AfB.Ethnic.Code)]<-0
afbDataMerge$Opponent.AfB.Ethnic.Code.2[is.na(afbDataMerge$Opponent.AfB.Ethnic.Code.2)]<-0
# oppcoethnic NA if there was no opponent ethnicity in the dataset from nathan (both opponent variables are zero)
# 1 if matched with either row of opponent ethnicity
# 0 if it is unmatched
afbDataMerge$oppcoethnic <- ifelse(afbDataMerge$Opponent.AfB.Ethnic.Code+afbDataMerge$Opponent.AfB.Ethnic.Code.2==0,NA,
ifelse(afbDataMerge$round==3,
ifelse(afbDataMerge$Opponent.AfB.Ethnic.Code==afbDataMerge$R3Ethnic|
afbDataMerge$Opponent.AfB.Ethnic.Code.2==afbDataMerge$R3Ethnic,1,0),
ifelse(afbDataMerge$Opponent.AfB.Ethnic.Code==afbDataMerge$R4Ethnic|
afbDataMerge$Opponent.AfB.Ethnic.Code.2==afbDataMerge$R4Ethnic,1,0)))
afbDataMerge$oppcoethnic <- ifelse(afbDataMerge$round==3,
ifelse(afbDataMerge$R3Ethnic<0|afbDataMerge$R3Ethnic>990,NA,afbDataMerge$oppcoethnic),
ifelse(afbDataMerge$R4Ethnic<0|afbDataMerge$R4Ethnic>990,NA,afbDataMerge$oppcoethnic))
###############################################
# Create ethfrac variable (ethnic group % of population)
###############################################
library(Zelig)
library(ZeligChoice)
afb <- afbDataMerge
rm(list=c("afbData","afbDataMerge","ethData"))
#variable for weights is Withinwt for rd4, withinwt for rd3, it is the fraction or number of individuals each row should be weighted by
# where withinwt sums up to the total number of individuals
# check Tanzania
vars<-c("country","oppcoethnic","round","q79")
tz<-afb[vars]
tz3<-tz[which(tz$round==3),] #Note madagascar has no oppcoethnic==1 (not15%), Zim has no oppcoethnic==1 (no Shona/Karanga)
tz4<-tz[which(tz$round==4),] #Note madagascar has no oppcoethnic==1 (not15%)
table(tz3$country,tz3$oppcoethnic)
#sum up across individuals of the same ethnicity, then that is the weight for each ethnic group out of total # of individuals.
#test round 4
table(afb$country,afb$round)
#Withinwt<-as.numeric(levels(afb$Withinwt))[afb$Withinwt] #change factor Withinwt to numeric
#afb$Withinwt<-Withinwt
testa<-aggregate(afb$Withinwt, list(Country=afb$country, Round=afb$round), sum) #check that this sums up to # of respondents per country
testa<-testa[19:38,] #just round4
ethnicfrac<-aggregate(afb$Withinwt, list(Country=afb$country, Ethnic=afb$R4Ethnic), sum)
#check that summing across one country gets full # of obs per country
testb<-aggregate(ethnicfrac$x,list(Country=ethnicfrac$Country),sum) #testa$x should be same as testb$x
#if we take the weight of each ethnic group/# individuals ==> estimated percentage of population for given ethnic group
#each country # of individuals
#merge
ethnicfracR4<-merge(ethnicfrac,testa, by.x=c("Country"),by.y=c("Country"),all.x=T)
ethnicfracR4$ethnicpercent<-(ethnicfracR4$x.x/ethnicfracR4$x.y)
aggregate(ethnicfracR4$ethnicpercent, list(Country=ethnicfracR4$Country), sum) #check that ethnicpercent adds up to 1 for each country
####### do it for R3 ##
#sum up across individuals of the same ethnicity, then that is the weight for each ethnic group out of total # of individuals.
testa<-aggregate(afb$withinwt, list(Country=afb$country, Round=afb$round), sum)#check that this sums up to # of respondents per country
testa<-testa[1:18,] #just round 3
ethnicfrac<-aggregate(afb$withinwt, list(Country=afb$country, Ethnic=afb$R3Ethnic), sum)
#check that summing across one country gets full # of obs per country
testb<-aggregate(ethnicfrac$x,list(Country=ethnicfrac$Country),sum) #testa$x should be same as testb$x
#if we take the weight of each ethnic group/# individuals ==> estimated percentage of population for given ethnic group
#each country # of individuals
#merge
ethnicfracR3<-merge(ethnicfrac,testa, by.x=c("Country"),by.y=c("Country"),all.x=T)
ethnicfracR3$ethnicpercent<-(ethnicfracR3$x.x/ethnicfracR3$x.y)
aggregate(ethnicfracR3$ethnicpercent, list(Country=ethnicfracR3$Country), sum) #check that ethnicpercent adds up to 1 for each country
#combine R3 and R4
ethnicfrac<-rbind(ethnicfracR3,ethnicfracR4)
#head(ethnicfrac)
#tail(ethnicfrac)
names<-c("Country", "ethniccode", "withinwt", "round","countryroundn", "ethnicpercent")
names(ethnicfrac)<-names
#head(ethnicfrac)
# merge in ethnic weights and percentages
ethnicfrac4<-subset(ethnicfrac,round==4)
ethnicfrac3<-subset(ethnicfrac,round==3)
mergedata <- merge(afb,ethnicfrac4,by.x=c("country","round","R4Ethnic"),by.y=c("Country","round","ethniccode"),all.x=T)
#tail(mergedata)
#head(mergedata)
mergedata2 <- merge(mergedata,ethnicfrac3,by.x=c("country","round","R3Ethnic"),by.y=c("Country","round","ethniccode"),all.x=T)
mergedata2$ethnicpercent<-ifelse(is.na(mergedata2$ethnicpercent.y),mergedata2$ethnicpercent.x,mergedata2$ethnicpercent.y)
sum(is.na(mergedata2$ethnicpercent)) #should be no NAs
mergedata2$withinwt<-ifelse(is.na(mergedata2$withinwt.y),mergedata2$withinwt.x,mergedata2$withinwt.y)
sum(is.na(mergedata2$withinwt)) #should be no NAs
mergedata2$countryroundn<-ifelse(is.na(mergedata2$countryroundn.y),mergedata2$countryroundn.x,mergedata2$countryroundn.y)
sum(is.na(mergedata2$countryroundn)) #should be no NAs
#drop excess variables: withinwt.x, withinwt.y, Withinwt, ethnicpercent.x, ethnicpercent.y, countryroundn.x, countryroundn.y
exclude<-names(mergedata2) %in% c("withinwt.x", "withinwt.y", "Withinwt", "ethnicpercent.x", "ethnicpercent.y", "countryroundn.x", "countryroundn.y")
mergedata3<-mergedata2[!exclude]
#head(mergedata3)
###############################################
# Drop Unused Observations
###############################################
rm(list=ls()[ls()!="mergedata3"])
data <- mergedata3
names(data)
# Keep only 14 countries that are nonnational marriages in rounds 3 and 4: to verify, pls check "DatasetInclusionDecision.xlsx" file
#mozambique BOTH rounds
data1 <- data[which(data$country=='benin'|data$country=='botswana' & data$round==3| data$country=='ghana'|data$country=='kenya'
|data$country=='madagascar'|data$country=='mali'|data$country=='mozambique'#& data$round==4
|data$country=='namibia'|data$country=='nigeria'|data$country=='south africa'& data$round==3|data$country=='uganda'
|data$country=='tanzania'|data$country=='zambia'|data$country=='zimbabwe'&data$round==4), ]
#check that correct country/years included
table(data1$country,data1$round)
###############################################
# Recodes ethnicity variable given new information
###############################################
#Recode Botswana r3 (see Amanda Robinson email 4/9/15) so 140-144, 146, 149-150, 153, 156, and 159 are Tswana sub-tribes.
table(data1$prescoethnic)
data1$prescoethnic <- ifelse(data1$round==3&data1$q79==140&data1$country=='botswana',1,data1$prescoethnic)
table(data1$prescoethnic)
data1$prescoethnic <- ifelse(data1$round==3&data1$q79==141&data1$country=='botswana',1,data1$prescoethnic)
table(data1$prescoethnic)
data1$prescoethnic <- ifelse(data1$round==3&data1$q79==142&data1$country=='botswana',1,data1$prescoethnic)
table(data1$prescoethnic)
data1$prescoethnic <- ifelse(data1$round==3&data1$q79==143&data1$country=='botswana',1,data1$prescoethnic)
table(data1$prescoethnic)
data1$prescoethnic <- ifelse(data1$round==3&data1$q79==144&data1$country=='botswana',1,data1$prescoethnic)
table(data1$prescoethnic)
data1$prescoethnic <- ifelse(data1$round==3&data1$q79==146&data1$country=='botswana',1,data1$prescoethnic)
table(data1$prescoethnic)
data1$prescoethnic <- ifelse(data1$round==3&data1$q79==149&data1$country=='botswana',1,data1$prescoethnic)
table(data1$prescoethnic)
data1$prescoethnic <- ifelse(data1$round==3&data1$q79==150&data1$country=='botswana',1,data1$prescoethnic)
table(data1$prescoethnic)
data1$prescoethnic <- ifelse(data1$round==3&data1$q79==153&data1$country=='botswana',1,data1$prescoethnic)
table(data1$prescoethnic)
data1$prescoethnic <- ifelse(data1$round==3&data1$q79==156&data1$country=='botswana',1,data1$prescoethnic)
table(data1$prescoethnic)
data1$prescoethnic <- ifelse(data1$round==3&data1$q79==159&data1$country=='botswana',1,data1$prescoethnic)
table(data1$prescoethnic)
#Recode Benin R3 (anyone who is Ditamari (125) is coethnic with president)
data1$prescoethnic <- ifelse(data1$round==3&data1$q79==125&data1$country=='benin',1,data1$prescoethnic)
table(data1$prescoethnic)
#Recode Tanzania R3 (such that anyone who is Mmakonde (387) is coethnic with president)
data1$prescoethnic <- ifelse(data1$round==3&data1$q79==387&data1$country=='tanzania',1,data1$prescoethnic)
table(data1$prescoethnic)
###############################################
# Code cases where Opponent and Spouse are Coethnic
###############################################
# divide it up btwn cases where opponent is coethnic of spouse & where the opponent is not?
#need new variable: oppwifecoethnic; use "Opponents and Wives - AB3 and 4.xlsx"
# pairs for each country-round are opponent ethnicity first, then wife ethnicity
# [1,0] benin-3: (Fon, Fon); benin-4: (Goun, Fon)
# [0] botswana-3: (Tswana/Mongwato, Kalanga)
# [1,1] ghana-3: (Akan, Akan); ghana-4: (Akan, Akan) # (Wife is Ashanti but coded as Akan since subgroup of Akan)
# [1,1] kenya-3: (Kikuyu, Kikuyu); kenya-4: (Kikuyu, Kikuyu)
# [NA,NA] madagascar-3: (*Not 15%, Merina); madagascar-4: (*Not 15%, Merina) [Note both treated as NA]
# [0,0] mali-3: (Sonrhai, Bambara); mali-4: (Sonrhai, Bambara)
# [1,1] mozambique-3: (Ndau, Ndau), mozambique-4: (Ndau, Ndau)
# [1,1] namibia-3: (Ovambo, Ovambo); namibia-4: (Ovambo, Ovambo)
# [0,1] nigeria-3: (Hausa, Edo); nigeria-4: (Hausa, Hausa-Fulani) [Note Nigeria-4 treated as 1]
# [0] safrica-3: (Afrikaner, Zulu)
# [1,0] tanzania-3: (Chaga, Chagga); (Nyamwezi, Coastal Muslim); [tanzania-4 treated as 1]
# [0,0] uganda-3: (Bunyankole, Ankole); uganda-4: (Bunyankole, Ankole) # check to make sure these are NOT same
# [0,0] zambia-3: (Tonga, Lenje); zambia-4: (Bisa, Tumbuka)
# [1,1] zimbabwe-3: (Shona/Karanga, Shona); (Shona/Karanga, Shona) [treated as 1]
data1$oppwifecoethnic<-ifelse(#1 values
data1$country=="benin" & data1$round==3|
data1$country=="ghana"|
data1$country=="kenya"|
data1$country=="mozambique" |
data1$country=="namibia"|
data1$country=="nigeria" & data1$round==4|
data1$country=="tanzania" & data1$round==3|
data1$country=="zimbabwe"
,
1, #code as 1 since oppwifecoethnic
ifelse(#0 values
data1$country=="benin" & data1$round==4|
data1$country=="botswana"&data1$round==3|
data1$country=="mali"|
data1$country=="nigeria" & data1$round==3|
data1$country=="south africa" & data1$round==3|
data1$country=="tanzania" & data1$round==4|
data1$country=="uganda"|
data1$country=="zambia"
,
0, #code as 0 since NOT oppwifecoethnic
NA)) #otherwise code as NA since not all info available
###############################################
# Drop Unused Data and Save
###############################################
#drop unnecessary (empty) levels (data1 full)
data1<-droplevels(data1)
#keep only variables we need for models
keep<-c("wifecoethnic","ethnic","oppcoethnic", "ethnicpercent","round", "rural","race", "head","cecon","recon",
"fcecon", "frecon","interest","favorOwnGroup","noFood","noWater","hasRadio","hasTV","hasVehicle","voted",
"educ","iage","ieducation","igender","country","vote", "approval", "ethnicUnfair", "ethnicPolitical",
"prescoethnic", "preswifesame","age","female","educ", "oppwifecoethnic", "prescoethnic2", "wifecoethnic2")
data2<-data1[keep]
data<-data2 #data with all cases
data1<-subset(data,(prescoethnic==0 & preswifesame==0)) #subversion of data
dataMAKUA<-subset(data,(prescoethnic2==0 & preswifesame==0)) #subversion of data for robustness check: Mozambique R3 president coded as MAKUA (not Ronga)
# dataMAKUA smaller dataset than data1 because there are more prescoethnics (adding in Mozambique R3 Makuas), so using only the prescoethnics==0 means fewer people
data1<-droplevels(data1)
dataMAKUA<-droplevels(dataMAKUA)
# save files
write.csv(data, "DataforAnalysisFull.csv") # full data, not subsetted to (prescoethnic==0 & preswifesame==0)
write.csv(data1,"DataforAnalysisSub.csv") #subset data (prescoethnic==0 & preswifesame==0)
write.csv(dataMAKUA, "DataforAnalysisSubMAKUA.csv") #subset data (prescoethnic2==0 & preswifesame==0)
|
45e2ae313b1db23fb8923cfff69617152320373b
|
08f82c108d01efcbc33139bdd80d18ec35d0fa4f
|
/Prefix.R
|
1454042a1cb543fda849e7aef24a6599c23553d1
|
[] |
no_license
|
STAT545-UBC-hw-2018-19/hw09-ecool50
|
543fa1c521b7692983b39b7fc8390dad67d1bdec
|
9db07a3e67475ba7259f801e4fd9c941cddf3725
|
refs/heads/master
| 2020-04-08T13:23:15.553810
| 2018-11-28T06:51:33
| 2018-11-28T06:51:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 599
|
r
|
Prefix.R
|
#load required libraries
library(tidyverse)
#load in the words text document
words <- readLines("words.txt")
#compute the prefixes
words_pre <- str_sub(words, start = 1, end = 3)
#convert it into a dataframe
prefix_data <- as.tibble(words_pre)
#get the top 30
top_30 <- plyr::count(prefix_data) %>% top_n(30)
#get the frequencie for all the prefixes
all_freq <- plyr::count(prefix_data)
#write it to a tsv for later visualization
write.table(top_30, file = "top_30_prefixes.tsv", sep = "\t")
#write all the prefixes to a table
write.table(all_freq, file = "All_frequencies.tsv", sep = "\t")
|
7087e0d459eae5d33baf91182f4a891c816c5e8b
|
858a0c6bee8b6785d8e043028a74d82a76b3333a
|
/#7_Script_Boga.r
|
37e817db877ce4bcc94ee7f7a8a33e2a65306dfd
|
[] |
no_license
|
BellaBoga/Quantitative_Phonetics_with_R
|
e91cfd0e25b93b8bda48c4d9248f218ffa58cd2f
|
12f2004d6bdb6815faa2ff538c8b78e5fb5ceb57
|
refs/heads/main
| 2023-06-05T16:09:43.769448
| 2021-06-17T21:43:08
| 2021-06-17T21:43:08
| 377,966,790
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,674
|
r
|
#7_Script_Boga.r
|
############################################################
## TASK
############################################################
DIR = '/home/bella/Dropbox/Master/1st Semester/Quantitative Phonetics with R/Material Corpus etc/KEC/'
setwd(DIR)
load('DF_textgrids_addedinformation.rda', verbose = TRUE)
head(DFbig2)
DFbig = DFbig2
DFbig$BG_AB=NULL
DFbig$BG_BC=NULL
DFbig$AB.counts=NULL
DFbig$AB.count=NULL
DFbig$BC.counts=NULL
DFbig$BC.count=NULL
head(DFbig)
#TASK 20#
#-------#
#Calculate the durations for the bigrams and include them into the dataframe "DFbig3".
DFtmp = data.frame()
for (ifn in FN){
tmp = DFbig2[DFbig2$filename == ifn,]
Target = tmp$wordduration
A= c(0, Target[1:(nrow(tmp)-1)])
C= c(Target[2:nrow(tmp)], 0)
tmp$BiDurAB = paste(A+tmp$wordduration)
tmp$BiDurBC = paste(tmp$wordduration+C)
DFtmp = rbind(DFbig3, tmp)
}
DFbig2 = DFtmp
save(DFbig2, file = 'tmp.rda')
#TASK 21#
#-------#
#Create a column with trigrams ABC.
#Calculate the conditional probability of B given AC.
#Calculate the durations of the trigrams ABC.
FN = unique(DFbig$filename)
DFtmp = data.frame()
DFtmp$TG_ABC = NULL
for (ifn in FN){
tmp = DFbig[DFbig$filename == ifn,]
Target = tmp$word
A= c('#', Target[1:(nrow(tmp)-1)])
C= c(Target[2:nrow(tmp)], '#')
tmp$Trigram = paste(A, Target,C)
DFtmp = rbind(DFbig2, tmp)
}
DFbig2 = DFtmp
save(DFbig2, file = 'tmp.rda')
#####Get BG_AC
DFtmp = data.frame()
for(ifn in FN)
{
tmp = DFbig2[DFbig2$filename == ifn,]
Target = tmp$word
A = c('#', Target[1:(nrow(tmp)-1)])
C = c(Target[2:nrow(tmp)],'#')
tmp$BG_AC = paste(A,C)
DFtmp = rbind(DFtmp,tmp)
}
save(DFbig2, file='tmp.rda')
##Get count of AC
DFtmp$AC.count = NA
AC.count = sort(table(DFtmp$BG_AC),decreasing = TRUE)
for(i in 1:length(AC.count))
{
count = AC:count[i]
which.word = names(count)
where = DF.tmp$BG_AC == which.word
DFtmp$AC.count[where] = count
}
DFtmp$ABC.count = NA
ABC.count = sort(table(DFtmp$TG_ABC),decreasing=TRUE)
for(i in 1:length(ABC.count))
{
count = ABC.count[i]
which.word = names(count)
where = DFtmp$TG_ABC == which.word
DFtmp$ABC.count[where] = count
}
DFbig2 = DFtmp
save(DFbig2, file = 'tmp.rda')
##Get prob of B given AC
DFtmp$cndP.B_AC = DFtmp$ABC.count/DFtmp$AC.count
DFbig2 = DFtmp
save(DFbig2, file = 'tmp.rda')
##Get ABC durations
DFtmp = data.frame()
for(ifn in FN)
{
tmp = DFbig2[DFbig2$filename ==ifn,]
A = c(0,tmp$wordduration[1:(nrow(tmp)-1)])
C = c(tmp$wordduration[2:nrow(tmp)],0)
ABC = (A+tmp$wordduration + C)
tmp$TriDurABC = paste(ABC)
DFtmp = rbind(DFtmp, tmp)
}
DFbig2 = DFtmp
save(DFbig2, file = 'DFbig2.rda')
|
65a2688b6f86fbc0529570f1570188e26cb3ecdf
|
616d210404dc2bb124e3498b9f5d0e42c968481f
|
/R/labels.R
|
4d4211925f3620de15ffe5331596f47622f50846
|
[] |
no_license
|
hofnerb/papeR
|
67018ecd80e97b1e044b2962721cdb31bea209ac
|
28347a1c01d7339a94d56be7a275077fdb1d2974
|
refs/heads/master
| 2021-06-20T07:45:30.370771
| 2021-03-18T19:33:05
| 2021-03-22T13:15:33
| 27,235,121
| 27
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,939
|
r
|
labels.R
|
################################################################################
## Author: Benjamin Hofner, benjamin.hofner@fau.de
################################################################################
# Extract labels from data sets
labels.data.frame <- function(object, which = NULL, abbreviate = FALSE, ...) {
## if no labels were properly set use alternative methods to specify labels:
if (!is.ldf(object)) {
## if no labels specified temporarily set names as labels
if (is.null(attr(object, "variable.labels"))) {
labels(object) <- colnames(object)
} else {
## clean labels
object <- CLEAN_LABELS(object)
## set these labels temporarily as elementwise labels
labels(object) <- attr(object, "variable.labels")
}
}
## which labels should be extracted?
which <- check_which(which, object, "extract")
## now extract labels
RET <- sapply(as.data.frame(object)[which], get_labels)
## fix non-existing labels
if (is.list(RET) && any(idx_null <- sapply(RET, is.null))) {
nms <- colnames(object)
if (is.character(which))
names(nms) <- nms
RET[idx_null] <- nms[which][idx_null]
RET <- unlist(RET)
}
## should labels be abbreviated?
if (abbreviate) {
nms <- names(RET)
RET <- abbreviate(RET, ...)
names(RET) <- nms
}
return(RET)
}
################################################################################
# Extract labels from labeled variables
labels.lv <- function(object, abbreviate = FALSE, ...) {
RET <- get_labels(object)
## should labels be abbreviated?
if (abbreviate) {
nms <- names(RET)
RET <- abbreviate(RET, ...)
names(RET) <- nms
}
RET
}
################################################################################
# Sets labels
"labels<-" <- function(data, which = NULL, value){
which <- check_which(which, data, "define")
if (!is.null(value)) {
if (length(which) != length(value))
stop("One must supply a label for each _selected_ column of the data set.")
if (is.character(which))
names(value) <- which
}
for (i in seq_along(which)) {
attr(data[[which[i]]], "variable.label") <- value[[i]]
class(data[[which[i]]]) <- c("lv", class(data[[which[i]]]))
}
## remove attribute of data set if it exists
if (!is.null(attr(data, "variable.labels")))
attr(data, "variable.labels") <- NULL
class(data) <- c("ldf", class(data))
return(data)
}
"labels[<-" <- function(data, i, value)
labels(data, which = i) <- value
CLEAN_LABELS <- function(data) {
## drop spare labels
spare <- !(names(attr(data, "variable.labels")) %in% names(data))
if (any(spare)) {
message("Note: Variables have been removed or label names and ",
"column names don't match. ",
"Corresponding variable labels are removed.")
attr(data, "variable.labels") <- attr(data, "variable.labels")[!spare]
}
## add missing labels
missing <- !(names(data) %in% names(attr(data, "variable.labels")))
if (any(missing)) {
tmp <- names(data)[missing]
names(tmp) <- names(data)[missing]
attr(data, "variable.labels") <- c(attr(data, "variable.labels"),
tmp)
}
## re-order
attr(data, "variable.labels") <- attr(data, "variable.labels")[names(data)]
## return altered data set
return(data)
}
## define coercion function
as.ldf <- function(object, ...)
UseMethod("as.ldf")
as.ldf.data.frame <- function(object, ...) {
labels(object) <- labels(object)
object
}
convert.labels <- function(object)
as.ldf.data.frame(object)
is.ldf <- function(object)
!all(sapply(lapply(object, get_labels), is.null))
|
0a23dfb29b680d9eeeeb406875892ca0224ce233
|
fe79d8b8345565ca364d8603ec86cea6f2146e42
|
/GROseq/GROseq_snakemake/tools/GROseq_pausing.R
|
6ba08c3bd295c02e174e0144123bcc4228b6b3c5
|
[] |
no_license
|
iovinolab/dom-study_2020
|
6accac2a8e2a6f05d99b3aa2e477d7c2fa4819a3
|
6b2ef92c2a0ec071747fdbc4274c14f38a0ee935
|
refs/heads/main
| 2023-08-29T08:38:04.994284
| 2021-01-29T13:23:57
| 2021-01-29T13:23:57
| 305,729,978
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,168
|
r
|
GROseq_pausing.R
|
library(optparse)
option_list <- list(
make_option(c("-g", "--gtf"),
help="Genome annotation file (gtf)"),
make_option(c("-p", "--promoter"),
help="Quantification in promoter region"),
make_option(c("-d", "--downstream_feature"),
help="Quantification in promoter region"),
make_option(c('-o','--outprefix'),
help = 'output prefix for GROseq pausing tables per sample (tsv)'),
make_option(c("--lib.location"), default = NULL, type = 'character',
help="Specify library location")
)
opt <- parse_args(OptionParser(option_list=option_list))
lib.loc = opt$lib.location
suppressPackageStartupMessages(library(rtracklayer, lib.loc = lib.loc))
suppressPackageStartupMessages(library(readr, lib.loc = lib.loc))
suppressPackageStartupMessages(library(dplyr, lib.loc = lib.loc))
suppressPackageStartupMessages(library(tibble, lib.loc = lib.loc))
suppressPackageStartupMessages(library(reshape2, lib.loc = lib.loc))
suppressPackageStartupMessages(library(ggplot2, lib.loc = lib.loc))
## debug
# opt$gtf = 'dm6_ensembl96.gtf'
# opt$promoter = 'pausing.bam_umi_dedup/test_run.tss.counts.tsv'
# opt$downstream_feature = 'pausing.bam_umi_dedup/test_run.tssds.counts.tsv'
#
# Rscript GROseq_pausing.R
# -g dm6_ensembl96.gtf
# -p pausing.bam_umi_dedup/test_run.tss.counts.tsv
# -d pausing.bam_umi_dedup/test_run.tssds.counts.tsv
# -o pausing.bam_umi_dedup/test_run
anno.transcript = import.gff(opt$gtf, feature.type = 'transcript')
tx2gid <- deframe(mcols(anno.transcript)[c('transcript_id','gene_id')])
tx2biotype = deframe(mcols(anno.transcript)[c('transcript_id','transcript_biotype')])
anno.genes <- import.gff(opt$gtf, feature.type = 'gene')
gid2biotype = deframe(mcols(anno.genes)[c('gene_id','gene_biotype')])
gid2symbol = deframe(mcols(anno.genes)[c('gene_id','gene_name')])
gid2width <- deframe(data.frame(mcols(anno.genes)$gene_id, width(anno.genes), stringsAsFactors = FALSE))
tabset <- list(tss = opt$promoter, txunit = opt$downstream_feature)
quantTabSet <- lapply(tabset, read_tsv, col_names = TRUE, comment = '#')
# quantTabSet <- lapply(quantTabSet, function(x) {colnames(x) <- gsub('.*(T.*)\\.bam$','\\1',colnames(x));return(x)})
quantTabSet <- lapply(quantTabSet, function(x) {colnames(x) <- gsub('.bam$','',basename(colnames(x)));return(x)})
cols.anno = c('Geneid','Chr','Start','End','Strand','Length')
dd0 <- melt(quantTabSet, id.vars = cols.anno)
colnames(dd0)[1] = 'Transcriptid'
head(dd0)
dd.wide <- dcast(dd0, formula(paste(paste(c('Transcriptid','variable'), collapse = '+')," ~ L1")))
# Active definition (active/inactive):
# - GRO-seq in TSS or TXunit
# Active TSS (active/inactive)
# - GRO-seq in TSS
# Paused (gradual, 3 groups) [Inf - 1, 1-0.5, 0.5 - -Inf]
# - log2 TSS/TXUNIT
# unique(sort(dd.wide$txunit)) %>% head
# ggplot(dd.wide, aes(tss)) + geom_density() + geom_vline(xintercept = 1e-2) + scale_x_log10() + facet_wrap(~condition)
# ggplot(dd.wide, aes(txunit)) + geom_density() + scale_x_log10() + facet_wrap(~condition)
dd.wide <- dd.wide %>%
mutate(pausing.ratio = (tss + 1e-8)/(txunit + 1e-8)) %>%
mutate(Geneid = tx2gid[Transcriptid],
# Definition of 'active' run by RPKM + peak calling. Not necessary any more in this part
tss.active = tss > 1e-2) %>%
## Be stricter with transcript_active
## reconsider txunit > 1e-3. Upstream TSS may be considered active for downstream TSS beig txunit
mutate(transcript_active = tss > 1e-2 | txunit > 1e-2) %>%
# group by signal in TSS
group_by(Geneid) %>%
mutate(gene_active = any(transcript_active)) %>%
# not.paused := log2(pausing) <= 0
mutate(pausing.status = ifelse(transcript_active & log2(pausing.ratio) > 0.5, 'paused',
# relaxed cutoff - e.g. 0.5
ifelse(transcript_active & log2(pausing.ratio) > 1,'strongly.paused', 'not.paused')),
symbol = gid2symbol[Geneid]) %>%
ungroup
dd.wide_per_sample = split(dd.wide %>% select(-variable), dd.wide$variable)
for(sample0 in names(dd.wide_per_sample)){
write_tsv(dd.wide_per_sample[[sample0]], paste0(opt$outprefix,'.',sample0,'.tsv'))
}
|
2b64bb23533314d86619fe34e947499d6c0a6cfe
|
8d9e2a5319e96043b04203e2af8afecce20bc8e3
|
/R/csv/spe.R
|
c24fe2263463d5540c5a5d17fe93c23257034e8d
|
[
"Apache-2.0"
] |
permissive
|
gbif/analytics
|
098d9b071b435b3f581698c0cd7eb0605617700c
|
7f0ca9459f75909f6708e25facfaead97ad5468e
|
refs/heads/master
| 2023-08-08T20:44:12.524328
| 2023-07-24T14:09:53
| 2023-07-24T14:09:53
| 20,252,443
| 6
| 5
|
Apache-2.0
| 2021-01-28T17:36:30
| 2014-05-28T09:40:55
|
HiveQL
|
UTF-8
|
R
| false
| false
| 2,006
|
r
|
spe.R
|
source("R/csv/utils.R")
extractAreaCSV(
areaType = "country",
sourceFile = "spe_country.csv",
sourceSchema = c("snapshot", "country", "speciesCount"),
targetFile = "spe.csv",
group = c("country"),
groupLabel = c("about")
)
extractAreaCSV(
areaType = "country",
sourceFile = "spe_publisherCountry.csv",
sourceSchema = c("snapshot", "publisherCountry", "speciesCount"),
targetFile = "spe.csv",
group = c("publisherCountry"),
groupLabel = c("publishedBy")
)
extractAreaCSV(
areaType = "gbifRegion",
sourceFile = "spe_gbifRegion.csv",
sourceSchema = c("snapshot", "gbifRegion", "speciesCount"),
targetFile = "spe.csv",
group = c("gbifRegion"),
groupLabel = c("about")
)
extractAreaCSV(
areaType = "gbifRegion",
sourceFile = "spe_publisherGbifRegion.csv",
sourceSchema = c("snapshot", "publisherGbifRegion", "speciesCount"),
targetFile = "spe.csv",
group = c("publisherGbifRegion"),
groupLabel = c("publishedBy")
)
# This data is duplicated into the global folder for convenience.
prepareGlobalCSV(
sourceFile = "spe_country.csv",
sourceSchema = c("snapshot", "country", "speciesCount"),
targetFile = "spe_country.csv"
)
# This data is duplicated into the global folder for convenience.
prepareGlobalCSV(
sourceFile = "spe_publisherCountry.csv",
sourceSchema = c("snapshot", "publisherCountry", "speciesCount"),
targetFile = "spe_publisherCountry.csv"
)
# This data is duplicated into the global folder for convenience.
prepareGlobalCSV(
sourceFile = "spe_gbifRegion.csv",
sourceSchema = c("snapshot", "gbifRegion", "speciesCount"),
targetFile = "spe_gbifRegion.csv"
)
# This data is duplicated into the global folder for convenience.
prepareGlobalCSV(
sourceFile = "spe_publisherGbifRegion.csv",
sourceSchema = c("snapshot", "publisherGbifRegion", "speciesCount"),
targetFile = "spe_publisherGbifRegion.csv"
)
prepareGlobalCSV(
sourceFile = "spe.csv",
sourceSchema = c("snapshot", "speciesCount"),
targetFile = "spe.csv"
)
|
df08feb9ea99749aaa0824d0c0b1bc1137306c5a
|
973748a68316c615fa2113cbad696d24ae4f91b4
|
/R_DeepestLearners/10_ufc_visualisations/vis.R
|
b9f745f25c5ead6ae7513b4c5ac2ec1ad0814afe
|
[] |
no_license
|
PratyakshGitArchived/UFCP
|
1e79cb13aa0a4c9de308f132138118d4849c8660
|
cce3409b11602cd933086145938d7e89512de660
|
refs/heads/master
| 2023-06-08T05:09:28.674780
| 2020-06-19T18:37:28
| 2020-06-19T18:37:28
| 273,562,515
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,591
|
r
|
vis.R
|
# ****************
# No License.
# The model is for educational purposes only
# ****************
# 2019 PRATICAL BUSINESS ANALYTICS - University of Surrrey
#
# Group Name : The Deepest Learners
# November-December 2019
# ****************
# Data Analysis for UFC fight data 1993-2019
# About This Script: - Generates set of figures in Plots and Viewer which some are interactive visualisations.
#
# ****************
# Set the currect source file as Working Directory
sourceFile<- dirname(parent.frame(2)$ofile)
setwd(sourceFile)
gc() # garbage collection to automatically release memory
# clear plots and other graphics
if(!is.null(dev.list())) dev.off()
graphics.off()
# This clears all warning messages
assign("last.warning", NULL, envir = baseenv())
# clears the console area
cat("\014")
# clears all objects in "global environment"
rm(list=ls())
print("~~ VISUALISATION STARTED:")
# Global variables - i.e. available to all functions
DATA_FILE <- "../01_ufc_preprocess/UFC_FINAL.csv"
# Define and then load the libraries used in this project
MYLIBRARIES<-c(
"plotly",
"countrycode",
"ggthemes",
"gridExtra",
"ggplot2",
"dplyr"
)
library(pacman)
pacman::p_load(char= MYLIBRARIES,install=TRUE,character.only=TRUE)
print("Please Wait...")
t <- list(family = "sans-serif",size = 14,color = 'black') # Text style
m <- list(l = 8,r = 8,b = 35,t = 35,pad =1) # Magins
# *********************** #
# 1- KNN Accuracy Chart
acc_df<-read.csv("../03_ufc_knn/acc_df.csv",encoding="UTF-8",stringsAsFactors = FALSE) # accuracy table from knn.r
acc_df <- acc_df[,c(1,2,4)]
names(acc_df) <- c("k_value","avg_accuracy","pca_avg_accuracy")
is.num <- sapply(acc_df, is.numeric) # Format to 3 Decimal Points
acc_df [is.num] <- lapply(acc_df [is.num], round, 3)
x <- acc_df$k_value
y1 <- acc_df$avg_accuracy
y2 <- acc_df$pca_avg_accuracy
t1 <- list(family = "sans-serif",size = 16,color = 'black') # Text style
m1 <- list(l = 50,r = 50,b = 100,t = 100,pad = 4) # Magins
a<-plot_ly(acc_df,x=x, y=y1, type="scatter", mode="line", name="KNN") %>%
add_trace(y = y2, name = 'PCA-KNN', mode = 'lines') %>%
layout(
title="KNN Average Accuracy on 30 runs per K",
yaxis = list(
title="Accuracy (%)",
range=c(55,72)
),
xaxis = list(
title="K Value",
range=c(0,105)
),
font = t,
margin = m
)
print(a)
# *********************** #
# 2- Weight Class Donut
ufc_data <- read.csv("../data.csv",encoding="UTF-8",stringsAsFactors = TRUE)
is.num <- sapply(ufc_data, is.numeric) # Format to 3 Decimal Points
ufc_data [is.num] <- lapply(ufc_data [is.num], round, 3)
weight_class <- ufc_data$weight_class
weight_class <- na.omit(weight_class) # drop na
weight_class <- as.data.frame(table(ufc_data$weight_class)) # frequency
t2 <- list(family = "sans-serif",size = 16,color = 'black') # Text style
m2 <- list(l = 50,r = 50,b = 100,t = 100,pad = 4) # Magins
b <- plot_ly(weight_class, labels = ~Var1, values = ~Freq)%>%add_pie(hole = 0.6) %>%
layout(title = "UFC Weight Class 1993 - 2019",
showlegend = T,
xaxis = list(showgrid = FALSE, zeroline = FALSE,showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
font = t,
margin = m
)
print(b)
# *********************** #
# 3- Location Map Graph
ufc_data$location <- na.omit(ufc_data$location) # drop na
# Extract Country
countryList <- c()
for(loc in ufc_data[,]$location){
country <- strsplit(loc,",") # split by ,
country <- country[[1]][length(country[[1]])] # get country
countryList <- c(countryList,country)
}
countryDF <- data.frame(countryList)
countryDF <- as.data.frame(table(countryDF)) # frequency
codes<-as.data.frame(countrycode(countryDF$countryDF, 'country.name', 'iso3c')) # get contry codes
countryDF<- data.frame(countryDF,codes)
names(countryDF) <- c("country", "fights", "code")
t3 <- list(family = "sans-serif",size = 16,color = 'black') # Text style
m3 <- list(l = 50,r = 50,b = 100,t = 100,pad = 4) # Magins
l <- list(color = toRGB("grey"), width = 0.5) # light grey boundaries
g <- list(showframe = FALSE,showcoastlines = TRUE,projection = list(type = 'Mercator')) # specify map projection/options
c <- plot_geo(countryDF) %>%
add_trace(
z = ~fights, color = ~fights, colors = 'Blues',
text = ~country, locations = ~code, marker = list(line = l)
) %>%
colorbar(title = 'UFC Events') %>%
layout(
title = '1993 - 2019 UFC EVENTS WORLDWIDE',
geo = g,
font = t,
margin = m
)
print(c)
# *********************** #
# 4- Events vs Years BarChart
ufc_data$date <- na.omit(ufc_data$date) # drop na
# Extract Year
yearsList <- c()
for(date in ufc_data[,]$date){
date <- strsplit(date,"-") # split by -
date <- date[[1]][1] # get date
yearsList <- c(yearsList,date)
}
yearsDF <- data.frame(yearsList)
yearsDF <- as.data.frame(table(yearsDF)) # frequency
names(yearsDF) <- c("year", "count")
x4 = yearsDF$year
y4 = yearsDF$count
t4 <- list(family = "sans-serif",size = 14,color = 'Black') # Text style
m4 <- list(l = 50,r = 50,b = 100,t = 100,pad = 4) # Magins
bar_color <- rep("#3caef2",27)
bar_color[22] <- '#07466c'
d <- plot_ly(yearsDF, x = ~x4, y = ~y4, type = 'bar',text=y4, textposition="auto",
marker = list(color = bar_color)) %>%
layout(title = "Number of UFC matches Over Years",
xaxis = list(title = "Year"),
yaxis = list(title = "No. Of matches"),
font = t,
margin = m)
print(d)
# *********************** #
# 5- Density plots
fighter_measures <- data.frame(
"height" = c(ufc_data$B_Height_cms, ufc_data$B_Height_cms),
"reach" = c(ufc_data$B_Reach_cms, ufc_data$R_Reach_cms),
"weight" = c(ufc_data$B_Weight_lbs, ufc_data$R_Weight_lbs),
"age" = c(ufc_data$B_age, ufc_data$R_age))
fighter_measures <- na.omit(fighter_measures)
p1 <- ggplot(fighter_measures, aes(x=age))+
geom_density(color="darkblue", fill="lightblue")
p2 <- ggplot(fighter_measures, aes(x=height))+
geom_density(color="darkblue", fill="lightblue")
p3 <- ggplot(fighter_measures, aes(x=weight))+
geom_density(color="darkblue", fill="lightblue")
p4 <- ggplot(fighter_measures, aes(x=reach))+
geom_density(color="darkblue", fill="lightblue")
# Subcharts
grid.arrange(p1, p2, p3, p4, ncol=2, nrow=2)
# pie charts
fighter_stat <- read.csv("../02_fighter_stat_SQL/master_fighter_recent_stats.csv",encoding="UTF-8",stringsAsFactors = TRUE)
is.num <- sapply(fighter_stat, is.numeric) # Format to 3 Decimal Points
fighter_stat [is.num] <- lapply(fighter_stat [is.num], round, 3)
# 6- Win Type
win_by <- c()
win_by_count <- c()
for (col in names(fighter_stat)[10:15]){
win_by <- c(win_by, col)
win_by_count <- c(win_by_count,(sum(fighter_stat[,col])))
}
win_by <- data.frame(win_by,win_by_count)
win_by$win_by <- c("Decision Majority", "Decision Split", "Decision Unanimous", "knock Out", "Submission", "Doctor Stoppage")
f <- plot_ly(win_by, labels = ~win_by, values = ~win_by_count, type = 'pie',
textposition = 'inside',
textinfo = 'percent',
insidetextfont = list(color = '#FFFFFF'),
hoverinfo = 'text',
text = ~paste(win_by_count, ' wins'),
marker = list(colors = colors,
line = list(color = '#FFFFFF', width = 1))) %>%
layout(title = 'Win Type',
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
font = t,
margin = m)
print(f)
# 7- Stance Type
stance <- c()
stance_count <- c()
for (col in names(fighter_stat)[21:24]){
stance <- c(stance, col)
stance_count <- c(stance_count,(sum(fighter_stat[,col])))
}
stance <- data.frame(stance,stance_count)
stance$stance <- c("Open Stance", "Orthodox", "SouthPaw", "Switch")
g <- plot_ly(stance, labels = ~stance, values = ~stance_count, type = 'pie',
textposition = 'inside',
textinfo = 'percent',
insidetextfont = list(color = '#FFFFFF'),
hoverinfo = 'text',
text = ~paste(stance_count, stance),
marker = list(colors = colors,
line = list(color = '#FFFFFF', width = 1))) %>%
layout(title = 'Stance Type',
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
font = t,
margin = m)
print(g)
# 8- Models' Accuracy radar chart
h <- plot_ly(type = 'scatterpolar', fill = 'toself', mode="markers") %>%
add_trace(
r = c(71.61, 69.11, 67.94, 67.42),
theta = c('KNN','SVM','DNN', 'RF'),
name = 'Normal Data'
) %>%
add_trace(
r = c(71.27, 68.77, 67.72, 66.67),
theta = c('KNN','SVM','DNN', 'RF'),
name = 'PCA-Performed Data'
) %>%
layout(
margin=c(l=1,r=1,t=2,b=1),
title = "Models Accuracy Performance",
polar = list(
radialaxis = list(
visible = T,
range = c(65,72)
)
)
)
print(h)
# 9- Models' Accuracy bar chart
models <- c("KNN", "SVM", "DNN", "RF")
normalt <- c(0.69, 0.692, 0.66, 0.66)
pca_accu <- c(0.68, 0.694, 0.67, 0.65)
data <- data.frame(models, normalt, pca_accu)
i <- plot_ly(data, x = ~models, y = ~normalt, type = 'bar', name = 'Normal Data') %>%
add_trace(y = ~pca_accu, name = 'PCA-Performed Data') %>%
layout(yaxis = list(title = 'Accuracy (%)',range=c(0.55,0.72)), barmode = 'group')
print(i)
print("~~ VISUALISATION ENDED:")
|
a7abbc931aa28fe3274edff36971cf39458bce1e
|
f6d5a97fcdda7d9621baed759186c3ac8fcf0a5d
|
/R/lib/phoneme/phoneme.R
|
7404b6d241ba91db9df050db7c6614b7d7530448
|
[] |
no_license
|
e155721/src
|
1245f988c4aa878dc976e92ef97df003f327e64d
|
f6b9dd1c19decd0789fe3dd3cf74654f00a18db1
|
refs/heads/master
| 2022-01-01T19:21:17.172143
| 2021-12-18T21:46:34
| 2021-12-18T21:46:34
| 155,394,876
| 3
| 1
| null | 2018-11-19T05:18:30
| 2018-10-30T14:00:34
|
R
|
UTF-8
|
R
| false
| false
| 2,059
|
r
|
phoneme.R
|
make_phone_vec <- function(feat_mat) {
vec <- dimnames(feat_mat)[[1]]
vec <- vec[-which(vec == "-")] # removing the gap symbols
return(vec)
}
make_feat_mat <- function(file) {
feat_mat <- read.table(file, fileEncoding = "utf-8")
feat_mat <- as.matrix(feat_mat)
N <- dim(feat_mat)[2]
for (j in 1:N) {
feat_mat[, j] <- gsub(pattern = "*[ ]*", replacement = "", feat_mat[, j])
}
return(feat_mat)
}
add_attr <- function(x, attr) {
# attr: A list of new attributes.
attr_list <- attributes(x)
attr_list <- c(attr_list, attr)
attributes(x) <- attr_list
return(x)
}
get_phone_info <- function(cons_file, vowel_file) {
# Consonant features
mat.C.feat <- make_feat_mat(cons_file)
# Vowel features
mat.V.feat <- make_feat_mat(vowel_file)
# Consonants
C <- make_phone_vec(mat.C.feat)
# Vowels
V <- make_phone_vec(mat.V.feat)
N.cons <- dim(mat.C.feat)[2]
for (j in 1:N.cons) {
mat.C.feat[, j] <- paste(j, "C", mat.C.feat[, j], sep = "")
}
mat.C.feat <- add_attr(mat.C.feat, list(sound = "C"))
N.vowel <- dim(mat.V.feat)[2]
for (j in 1:N.vowel) {
mat.V.feat[, j] <- paste(j, "V", mat.V.feat[, j], sep = "")
}
mat.V.feat <- add_attr(mat.V.feat, list(sound = "V"))
if (N.cons == N.vowel) {
mat.CV.feat <- rbind(make_feat_mat(cons_file), make_feat_mat(vowel_file))
CV <- c(C, V)
for (j in 1:N.cons) {
mat.CV.feat[, j] <- paste(j, "CV", mat.CV.feat[, j], sep = "")
}
mat.CV.feat <- add_attr(mat.CV.feat, list(sound = "CV"))
assign(x = "CV", value = CV, envir = .GlobalEnv)
assign(x = "mat.CV.feat", value = mat.CV.feat, envir = .GlobalEnv)
}
# Make global variables.
assign(x = "C", value = C, envir = .GlobalEnv)
assign(x = "V", value = V, envir = .GlobalEnv)
assign(x = "mat.C.feat", value = mat.C.feat, envir = .GlobalEnv)
assign(x = "mat.V.feat", value = mat.V.feat, envir = .GlobalEnv)
}
cons_file <- "lib/phoneme/features/consonants.txt"
vowel_file <- "lib/phoneme/features/vowels.txt"
get_phone_info(cons_file, vowel_file)
|
cf57e7cb7838762ef1af08f6ac1c7e643f66ab35
|
0b7d4e038db0794dce51813dd71c86dcf19c9878
|
/density_clustering_experiments.R
|
9df1e99c7857f305b37c0bb8d3a128edfdf65461
|
[] |
no_license
|
torebre/kanjiR
|
5f0af777eead9cd430b680d2d49296cd66c71b1f
|
07b9f1b26da5f5255f2159a09fe70c6188771d6a
|
refs/heads/master
| 2021-05-05T21:34:01.018072
| 2020-04-09T12:58:06
| 2020-04-09T12:58:06
| 115,754,019
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,234
|
r
|
density_clustering_experiments.R
|
library(MASS)
library(ks)
library(colorspace)
library(RColorBrewer)
library(rgl)
library(misc3d)
library(alphashape3d)
predictions <- predict(kernel.density.estimate, x = filtered.matrix)
max(predictions)
min(predictions)
truehist(predictions)
# TODO Find clustering
# kernel mean shift clustering
filtered.matrix.normalized <- cbind(filtered.matrix[ , 1] / max(filtered.matrix[ , 1]),
filtered.matrix[ , 2] / max(filtered.matrix[ , 2]),
filtered.matrix[ , 3] / max(filtered.matrix[ , 3]))
evaluation.points <- cbind(seq(0, max(filtered.matrix.normalized[, 1]), length.out = 5),
seq(0, max(filtered.matrix.normalized[, 2]), length.out = 5),
seq(0, max(filtered.matrix.normalized[, 3]), length.out = 5))
unique.rows <- unique(filtered.matrix.normalized[ , 1:3])
bandwidth.matrix <- Hlscv(x = unique.rows)
# bandwidth.matrix <- Hns(x = filtered.matrix.normalized)
start.matrix <- rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1))
bandwidth.matrix <- Hpi(x = filtered.matrix.normalized, pilot = 'dscalar', Hstart = start.matrix, bgridsize = c(0.0001, 0.0001, 0.0001), binned = T)
#filtered.matrix.kms <- kms(x = filtered.matrix.normalized, H = bandwidth.matrix, y = evaluation.points, verbose = T)
filtered.matrix.kms <- kms(x = filtered.matrix, verbose = T)
xlab <- "x"
ylab <- "y"
zlab <- "z"
# xlim <- c(0, max(filtered.matrix.normalized[, 1]))
# ylim <- c(0, max(filtered.matrix.normalized[, 2]))
# zlim <- c(0, max(filtered.matrix.normalized[, 3]))
xlim <- c(0, max(filtered.matrix[, 1]))
ylim <- c(0, max(filtered.matrix[, 2]))
zlim <- c(0, max(filtered.matrix[, 3]))
plot(filtered.matrix.kms)
clear3d()
plot(filtered.matrix.kms, col=pt.col((1:filtered.matrix.kms$nclust)*2), splom=FALSE, size=8, axes=FALSE, alpha=(filtered.matrix.kms$label+1)^1.5/40, asp=1, xlim=xlim, ylim=ylim, zlim=zlim, xlab=xlab, ylab=ylab, zlab="")
hsct.rgl(zlab=zlab)
box3d()
density.experiment <- kde(x = filtered.matrix.normalized, H = bandwidth.matrix)
clear3d()
plot(density.experiment, xlim=xlim, ylim=ylim, zlim=zlim, xlab="", ylab="", zlab="", axes=FALSE)
hsct.rgl(xlab=xlab, ylab=ylab, zlab=zlab)
kernel.feature.significance <- kfs(filtered.matrix)
plot(kernel.feature.significance)
max.prediction <- which(predictions == max(predictions))
filtered.matrix[max.prediction, ]
max.line <- filtered.lines[max.prediction, ]
kanji.test <- kanji.line.data[which(kanji.line.data[ , 1] == max.line[6]), ]
DrawHighlightedLines(max.line[6], kanji.test, c(max.line[7], max.line[8]))
DrawLineKanji(max.line[6], kanji.line.data)
lines.from <- filtered.lines[which(filtered.lines[ , 6] == 32993 & filtered.lines[ , 7] == max.line[7]), ]
lines.to <- filtered.lines[which(filtered.lines[ , 8] == max.line[8]), ]
lines.from[which(lines.from[ , 8] == 11), ]
# Using the max line as an example
# max.line involving 8 and 10
max.line
# Looking at 8
line.from.numbers <- which(filtered.lines[ , 6] == max.line[6] & filtered.lines[ , 7] == max.line[7])
lines.from <- filtered.lines[line.from.numbers, ]
lines.from.short <- filtered.matrix[line.from.numbers, ]
predict(kernel.density.estimate, x = lines.from.short)
|
a452f21fd00d76b471ccd76e045e781aaf85d9ac
|
69c6053763d1984b7a880e7b219bed2e7867de8f
|
/CFA_fit_examples/OpenMx/StarWars_OpenMx.R
|
88a1db18cfd3a81496c6209d7c787b7a43280574
|
[] |
no_license
|
SachaEpskamp/SEM-code-examples
|
273bff2350344666e2df7e51ae692a0a8ed343a9
|
7e65df05bbe72701d40c5bc935c97fbb927b7235
|
refs/heads/master
| 2022-05-10T02:23:39.059146
| 2022-03-15T11:34:57
| 2022-03-15T11:34:57
| 179,825,307
| 28
| 29
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,342
|
r
|
StarWars_OpenMx.R
|
# Load packages:
library("dplyr") # I always load this
library("semPlot")
# For info on OpenMx, see:
# https://openmx.ssri.psu.edu/
# Install with:
#
library("OpenMx")
# This code show an example of matrix specification in OpenMx, which is a clear benefit of OpenMx.
# You can also use path specification (see website above), but that generally does not work as nice
# as lavaan in my oppinion.
# Read the data:
Data <- read.csv("StarWars.csv", sep = ",")
# This data encodes the following variables:
# Q1: I am a huge Star Wars fan! (star what?)
# Q2: I would trust this person with my democracy.
# Q3: I enjoyed the story of Anakin's early life.
# Q4: The special effects in this scene are awful (Battle of Geonosis).
# Q5: I would trust this person with my life.
# Q6: I found Darth Vader'ss big reveal in "Empire" one of the greatest moments in movie history.
# Q7: The special effects in this scene are amazing (Death Star Explosion).
# Q8: If possible, I would definitely buy this droid.
# Q9: The story in the Star Wars sequels is an improvement to the previous movies.
# Q10: The special effects in this scene are marvellous (Starkiller Base Firing).
# Q11: What is your gender?
# Q12: How old are you?
# Q13: Have you seen any of the Star Wars movies?
# Observed variables:
obsvars <- paste0("Q",1:10)
# Latents:
latents <- c("Prequels","Original","Sequels")
# Set the data (summary statistics, raw data is also possible):
# Max likelihood cov mat:
n <- nrow(Data)
covMat <- (n-1)/n * cov(Data[,obsvars])
dataRaw <- mxData(observed=covMat, type="cov", numObs = nrow(Data))
# Lambda matrix:
Lambda <- matrix(0, 10, 3)
Lambda[1:4,1] <- 1
Lambda[c(1,5:7),2] <- 1
Lambda[c(1,8:10),3] <- 1
mxLambda <- mxMatrix("Full",
nrow = 10,
ncol = 3,
free = Lambda!=0,
values = Lambda,
name = "lambda",
dimnames = list(obsvars, latents)
)
# Psi matrix:
diag <- diag(3)
mxPsi <- mxMatrix("Symm",
nrow = 3,
ncol = 3,
values = diag,
free = diag != 1,
name = "psi",
dimnames = list(latents, latents)
)
# Theta matrix:
mxTheta <- mxMatrix("Diag",
nrow = 10,
ncol = 10,
name = "theta",
free = TRUE,
dimnames = list(obsvars, obsvars),
lbound = 0 # Lower bound
)
# Implied variance--covariance matrix:
mxSigma <- mxAlgebra(lambda %*% psi %*% t(lambda) + theta, name = "sigma")
# Expectation (this tells OpenMx that sigma is the expected cov matrix):
exp <- mxExpectationNormal( covariance="sigma",dimnames=obsvars)
# Fit function (max likelihood)
funML <- mxFitFunctionML()
# Combine everything in a big model:
model <- mxModel("Star Wars",
dataRaw,
mxLambda,
mxPsi,
mxTheta,
mxSigma,
exp,
funML)
# Run model:
model <- mxRun(model)
# Look at model summary:
summary(model)
# chi-square: χ² ( df=30 ) = 34.56062, p = 0.2589784
# very similar to lavaan and psychonetrics
# Modification indices:
MIs <- mxMI(model)
sort(MIs$MI, decreasing = TRUE)[1:10]
|
5fe82c7e326dc441f062fbc43a16f0bcb058f05d
|
866a5bcef6311cd5f724086a4ef80bfe75485eaa
|
/main_r/1_course_category.R
|
45ae84865b4ff306816af8b411bd023bd6ccc2f9
|
[] |
no_license
|
Sandy4321/KDD2015-4
|
08dff1ca62bd2915e9664bf9282d7366631cf001
|
e60a1b13f9220485b3655fd2eb4f44b1475330d8
|
refs/heads/master
| 2020-04-19T05:49:51.443040
| 2015-06-16T12:29:16
| 2015-06-16T12:29:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 883
|
r
|
1_course_category.R
|
setwd('Google Drive/KDD2015')
rm(list = ls()); gc()
require(data.table)
load('data/new/raw_data_log.RData')
object <- fread('data/object.csv',data.table=F)
nFeat <- as.matrix(aggregate(object$category,list(object$course_id),FUN=table))
colnames(nFeat) <- c('course_id',paste0('course_',sub("x.","",colnames(nFeat)[-1])))
# for(i in 1: ncol(nFeat[,2])){
# print(paste0(colnames(nFeat[,2])[i],': ',length(table(nFeat[,2][,i]))))
# }
nFeat <- nFeat[,-c(5,6)]
train <- merge(train,nFeat,sort=F,all.x=T)
test <- merge(test,nFeat,sort=F,all.x=T)
tail(train)
tail(test)
for(i in 59:71){
train[,i] <- as.numeric(train[,i])
}
for(i in 58:70){
test[,i] <- as.numeric(test[,i])
}
write.csv(train,file='data/new/train_extend.csv',quote=F, row.names=F)
write.csv(test,file='data/new/test_extend.csv',quote=F, row.names=F)
save(train, test, file='data/new/raw_data_extend.RData')
|
c5e4b9f05339dca1fa28726a03365637a4052a3d
|
3eb69d7a6d64650bec15b8cbe15f4c0ec22b6d14
|
/scripts/R/imp/impManhattan.R
|
1245862fb5d2917591d79a98334da20fd6c403fe
|
[] |
no_license
|
vujkovicm/mvp
|
df6aec06a3c4ecc4664772f83b6ea5509dfcd93c
|
33d563f2d47e890a3d3b3a335091bef1ff603e0e
|
refs/heads/master
| 2021-01-19T17:58:59.774768
| 2020-01-21T21:17:54
| 2020-01-21T21:17:54
| 101,102,160
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,164
|
r
|
impManhattan.R
|
rm(list=ls())
args = (commandArgs(TRUE))
info = read.table(paste0("/group/research/mvp001/snakemake/info/imputed.", args[4], ".info"), T, stringsAsFactors = F)
info$SNP = paste0("chr", info$SNP)
info = unlist(info)
##=============================
### data handling
###=============================
source('/group/research/mvp001/snakemake/scripts/R/imp/qqman.R')
in.data <- read.table(file = args[1], header = T, sep = " ", stringsAsFactors = FALSE)
# other names if metal or snptest or plink
names(in.data) = c("SNP", "CHR", "BP", "NEA", "EA", "N", "EAF", "BETA", "SE", "P")
### handle zero pvalue
in.data$P = as.numeric(in.data$P)
in.data[which(in.data$P == 0), "P"] <- NA #min(in.data[which(in.data$P > 0), "P"])
in.data[which(in.data$P >= 1), "P"] <- NA
in.data[which(in.data$P < 0), "P"] <- NA
in.data = in.data[in.data$CHR %in% c(1:22),]
in.data = in.data[which(is.na(in.data$P) == F),]
in.data = in.data[order(in.data$CHR, in.data$BP),]
in.data = in.data[which(in.data$SNP %in% info),]
# args[5] = "summary/imp/NAFLDadj/NAFLDadj.EUR.imp.info0.6.maf0.01.score.out"
#write.table(in.data, args[5], row.names = F, col.names = T, quote = F, sep = " ")
### remove the observations with non-valid p-values and not duplicated chrcbp
in.data <- in.data[!duplicated(in.data[,"SNP"]) & !is.na(in.data[,"P"]),]
write.table(in.data, args[5], row.names = F, col.names = T, quote = F, sep = " ")
# color the genome wide significant snps
highlight = unlist(in.data[which(in.data$P < 0.00000005),"SNP"])
png(file = args[2], width = 1000, height = 700)
manhattan(in.data, cex = 0.6, highlight = highlight)
dev.off()
in.data$stats <- qchisq(1 - in.data$P, 1)
LAMBDA <- median(in.data$stats) / 0.4549
print(LAMBDA)
lambdatext <- LAMBDA
### QQ plot
qqdata = in.data
obs = -log10(sort(qqdata$P))
exp = -log10(1:length(obs) / length(obs))
maxxy = max(max(obs), max(exp))
png(file = args[3])
plot(x = exp,
y = obs,
pch = 20,
xlim = c(0, maxxy + 1),
ylim = c(0, maxxy + 1),
xlab = "Expected -lg(p-value)",
ylab = "Observed -lg(p-value)",
sub = lambdatext
)
segments(x0 = 0,
y0 = 0,
x1 = maxxy,
y1 = maxxy,
lwd = 2,
col = "blue"
)
dev.off()
|
e7e52dfdd7cb6b767c65f8ba6d21bb0c464d11f5
|
27ce885b2fade2fb60e13c55e900d84423f78cf3
|
/man/GetModelJob.Rd
|
68e85c6d94c73abe923bcfa3af71f435a4b106ba
|
[] |
no_license
|
bgreenwell/datarobot
|
93d1a0bce65c733db47e23121d46dc5cab74dd3a
|
ff21e1efbb261fe80d1671a01e5913b5e5f10bf6
|
refs/heads/master
| 2020-04-22T23:02:15.962619
| 2019-02-12T08:55:40
| 2019-02-12T08:55:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,715
|
rd
|
GetModelJob.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ListModelJobs.R
\name{GetModelJob}
\alias{GetModelJob}
\title{Request information about a single model job}
\usage{
GetModelJob(project, modelJobId)
}
\arguments{
\item{project}{character. Either (1) a character string giving the unique alphanumeric
identifier for the project, or (2) a list containing the element projectId with this
identifier.}
\item{modelJobId}{Character string specifying the job id}
}
\value{
list with following elements:
\itemize{
\item status. Model job status; an element of JobStatus, e.g. JobStatus$Queue.
\item processes. List of character vectors describing any preprocessing applied.
\item projectId. Character string giving the unique identifier for the project.
\item samplePct. Numeric: the percentage of the dataset used for model building.
\item trainingRowCount. Integer. The number of rows of the project dataset used in training
the model.
\item modelType. Character string specifying the model this job builds.
\item modelCategory. Character string: what kind of model this is - 'prime' for DataRobot Prime
models, 'blend' for blender models, and 'model' for other models.
\item featurelistId. Character string: id of the featurelist used in fitting the model.
\item blueprintId. Character string: id of the DataRobot blueprint on which the model is based.
\item modelJobId. Character: id of the job.
}
}
\description{
Request information about a single model job
}
\examples{
\dontrun{
projectId <- "59a5af20c80891534e3c2bde"
initialJobs <- ListModelJobs(project)
job <- initialJobs[[1]]
modelJobId <- job$modelJobId
GetModelJob(projectId, modelJobId)
}
}
|
0edd722c55e898c09954456d11b9fcfc6d106234
|
046a8712f15b3fb39d8da5c2b81b77119a10eb99
|
/R/amtrak_analysis_lightweight.R
|
5e7891952aa357b8d1e05081580e0e2117f29aef
|
[] |
no_license
|
michaelgaunt404/amtrack_extack
|
d081595c0b5ad648322a83075589ff7268fc83d7
|
e005f9863966704e6961cebc2e7e18e68ea0bc1d
|
refs/heads/master
| 2021-04-24T11:20:59.725885
| 2020-05-05T19:37:19
| 2020-05-05T19:37:19
| 250,110,678
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 51,628
|
r
|
amtrak_analysis_lightweight.R
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# This script performs the Amtrak merge verification analysis
#
# By: mike gaunt, michael.gaunt@wsp.com
#
# README: script is inteded to be lightweight
#-------- script intended to be sourced by a markdown file
#-------- for summary and reporting purposes
#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#package install and load~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
library(data.table)
library(magrittr)
library(dplyr)
library(stringr)
library(lubridate)
library(kableExtra)
library(formattable)
library(knitr)
library(ggplot2)
library(forcats)
library(visdat)
library(ggpubr)
library(treemapify)
#global settings~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ASSETSPEC 2020-04-21
# ASSETSPEC 2020-03-16
file = "ASSETSPEC 2020-04-21"
#path and data set-up~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#path set-up~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
setwd("~/")
rstudioapi::getSourceEditorContext()$path %>%
as.character() %>%
gsub("(matching).*","\\1", .) %>%
path.expand() %>%
setwd()
#assetspec import~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#defining asset data columns~~~~~
names_indx = c('assetspecid', 'assetnum', 'assetattrid',
'classstructureid', 'displaysequence', 'alnvalue',
'changedate', 'changeby', 'Data_Date')
#loading asset data~~~~~
file_path = paste0("./data/", file, ".csv")
asset = fread(file_path)
num_index = c(which(colnames(asset) %in% names_indx))
asset = asset %>%
.[, ..num_index]
# asset = asset %>%
# set_colnames(names_indx)
#munging asset data~~~~~
index = grepl("SEDTRACKSEGMENT", asset$assetattrid) |
grepl("SED_CULVERTSPAN_", asset$assetattrid) |
grepl("AMTMAX", asset$assetattrid) |
grepl("JCH", asset$assetattrid) == TRUE
asset_records_removed = asset[index,]
asset_records_removed %>%
fwrite(file = "./output/data/asset_records_removed.csv")
asset = asset[!index,]
dupe_checkr = asset %>%
.[,.(.N), by = .(assetnum, assetattrid)] %>%
.[order(-N)] %>%
.[N >1]
dupe_checkr %>%
.[, .(.N), by = assetattrid]
dupe_checkr %>%
.[, .(sum(N))]
dupe_checkr
fwrite(., file = paste0("./output/data/",
"wtf", ".csv"))
#sed file import~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#loading source file information data~~~~~
master_id = fread("./data/master_id.csv")
master_id_lookup = fread("./data/master_lookup.csv") %>%
.[,`:=`(full_src = paste0("./data/", src, ".csv"))]
#removing unnneded source file directories
index = grepl("GEO", master_id_lookup$src) |
grepl("META", master_id_lookup$src) |
grepl("Speed", master_id_lookup$src) |
grepl("Route", master_id_lookup$src) |
grepl("Frogs", master_id_lookup$src) |
grepl("FROGVERALTVER]", master_id_lookup$src) == TRUE
master_id_lookup = master_id_lookup[!index,]
#merging both files and changing attribute data type
all_source_file_ids = master_id %>%
merge(master_id_lookup) %>%
.[,c(2:3)] %>%
unique()
all_source_file_ids$ID = all_source_file_ids$ID %>% as.integer()
#EDA and munging~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#ASSETSPEC side EDA and munging~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#assetattrid == SED_ID~~~~~
name_sub_index = names_indx[c(1,2,3,6)]
lookup_asset_num_id = asset[assetattrid == "SED_ID", ..name_sub_index] %>%
.[order(alnvalue)]
# lookup_asset_num_id = asset[assetattrid == "SED_ID",c(1,2,3,6)] %>%
# .[order(alnvalue)]
lookup_asset_num_id %>%
fwrite(file = "./output/data/lookup_asset_num_id.csv")
first_num = lookup_asset_num_id %>% nrow()
lookup_asset_num_id$alnvalue = lookup_asset_num_id$alnvalue %>%
as.integer()
#writing out incomplete records
tmp = lookup_asset_num_id[!complete.cases(lookup_asset_num_id)]
bad_num = tmp %>% nrow()
#complete cases only
lookup_asset_num_id = lookup_asset_num_id[complete.cases(lookup_asset_num_id)]
#aggregating duplicated records per field~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
get_unique = function(list){
list %>%
unique() %>%
length()
}
#summary dupe per column
tmp_check = data.table(complete_records = lookup_asset_num_id %>% nrow(),
lookup_asset_num_id %>%
purrr::map_df(., get_unique)) %>%
melt.data.table(variable.name = "Number of Records per Item",
value.name = "Count") %>%
.[,`:=`(non_Distinct_Count = nrow(lookup_asset_num_id)-Count)] %>%
.[-4,]
#dupe extract~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
index_dupe_ids = unique(lookup_asset_num_id[duplicated(alnvalue)]$alnvalue)
#get asset_compelete dupe ids
lookup_asset_num_id_duplicated = lookup_asset_num_id[which(alnvalue %in% index_dupe_ids),]
#gets source ID dupes
sfiles_duplicated_ids = all_source_file_ids[which(ID %in% unique(all_source_file_ids[duplicated(ID)]$ID)),] %>%
.[order(ID)]
#sorts all source files by dupe ids
different_source_duplicates = all_source_file_ids[which(ID %in% index_dupe_ids)] %>%
.[order(ID)]
#checks to see if there dupes from different source files
different_source_duplicates = setDT(different_source_duplicates)[, if(.N > 1) .SD, by = ID]
#SED extract merge IDs with nonempty asset IDS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
raw_merge = all_source_file_ids %>%
merge(.,
lookup_asset_num_id,
by.x = "ID",
by.y = "alnvalue",
all = TRUE) %>%
data.table() %>%
unique()
duplicate_id_check = raw_merge %>%
semi_join(different_source_duplicates[,1], by = 'ID') %>%
data.table() %>%
.[order(ID, assetnum)]
#full merge check and identification of NA records
merge_check_NA = raw_merge[!complete.cases(raw_merge)] #all rows w/ NA
merge_check_NA_num_total = merge_check_NA %>% nrow()
merge_check_NA_src = merge_check_NA[is.na(src)] #records w/ asset data but no s.file date
merge_check_NA_src_num = merge_check_NA_src %>% nrow()
merge_check_NA_assetnum = merge_check_NA[is.na(assetnum)] #records w/ s.file date but no asset data
merge_check_NA_assetnum_num = merge_check_NA_assetnum %>% nrow()
#matched IDs b/w SEDEXTARCT and ASSETSPEC
#complete cases
master_matched_ids = raw_merge %>%
na.omit() %>%
unique()
duplicated_id_list = master_matched_ids[duplicated(ID),1] %>%
unique()
duplicated_record_list = master_matched_ids[duplicated(master_matched_ids),]
master_matched_ids_duplicate_rm = anti_join(master_matched_ids,
duplicated_id_list,
by = "ID") %>%
data.table() %>%
.[order(ID), c(1:4)]
#THIS IS IMPORTANT SHOULD THEY BE REMOVED??? THEY ARE CURRENTLY NOT
master_matched_ids_duplicate_only = semi_join(master_matched_ids,
duplicated_id_list,
by = "ID") %>%
data.table() %>%
.[order(ID),]
#section~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#DT below has all records ASSETNUMS where their SED_ID values were found
# asset_merged = lookup_asset_num_id %>%
# semi_join(asset[,c(1,2,3,6)], ., by = c("assetnum")) %>%
# merge(., master_matched_ids[,c(-3,-5)], by = c("assetnum")) %>%
# data.table() %>%
# unique()
asset_merged = lookup_asset_num_id %>%
semi_join(asset[, ..name_sub_index], ., by = c("assetnum")) %>%
merge(., master_matched_ids[,c(-3,-5)], by = c("assetnum")) %>%
data.table() %>%
unique()
asset_merged$ID = as.integer(asset_merged$ID)
#full merge check and identification of NA records
merge_check_NA = asset_merged[!complete.cases(asset_merged)] #all rows w/ NA
merge_check_NA_num_total = merge_check_NA %>% nrow()
# PERFORMS CHECK ON DUPLICATED COLNAMES IN ASSETSPEC~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tmp = asset_merged %>%
.[str_detect(assetattrid, "2")] %>%
.[,.(assetattrid, src)] %>%
unique()
tmp$assetattrid = str_remove_all(tmp$assetattrid, pattern = "2") %>%
gsub('^\\.|\\_$', '', .) %>%
gsub('^\\.|\\-$', '', .)
yolo = asset_merged[grepl(paste0(tmp$assetattrid, collapse = "|"), assetattrid)] %>%
.[,.(src, assetattrid)] %>%
unique()
duplicated_colname_extract_side = yolo[which(src %in% tmp$src)] %>%
.[order(src, assetattrid)]
#intial merge~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#extracting SED file data~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#removing SED_TUNNEL, has bad headers
# index = grepl("Tunnel", asset_merged$src)
# asset_merged = asset_merged[!index,]
index = grepl("ALTVER", asset_merged$src)
asset_merged = asset_merged[!index,]
#Extracting all matched ASSETxSEDEXTRACT records' columns and values
suppressMessages({
suppressWarnings({
retreived_source_id_info = data.table()
print("Extracting data from the following source files:")
for (i in unique(asset_merged$src)){
print(i)
tmp = asset_merged %>%
.[src == i,ID] %>%
unique()
retreived_source_id_info = master_id_lookup[src == i,full_src] %>%
fread(., colClasses = 'character') %>%
.[ID %in% tmp,] %>%
.[,`:=`(id = ID)] %>%
purrr::map_df(as.character) %>%
reshape2::melt(id.vars = c("ID"),
variable.factor = FALSE,
warning = FALSE) %>%
data.table() %>%
.[,`:=`(assetattrid = paste0("SED_", str_to_upper(variable)))] %>%
bind_rows(retreived_source_id_info, .)
}
print("Done")
})
})
retreived_source_id_info$ID = retreived_source_id_info$ID %>%
as.integer()
#full merge check and identification of NA records
merge_check_NA = retreived_source_id_info[!complete.cases(retreived_source_id_info)] #all rows w/ NA
merge_check_NA_num_total = merge_check_NA %>% nrow()
#sed file and assetspec merge~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#investigating mismatched occurance
colum_order_index = c("src", "assetnum", "ID", "assetspecid",
"assetattrid", "variable", "alnvalue", "value")
attribute_value_master = merge(asset_merged,
retreived_source_id_info,
by.x = c("ID", "assetattrid"),
by.y = c("ID", "assetattrid"), all = TRUE) %>%
unique() %>%
.[,..colum_order_index] %>%
.[order(src, ID, assetnum, assetattrid,)] %>%
.[,-1] %>%
merge(., all_source_file_ids, by = "ID" ) %>%
.[,..colum_order_index] %>%
.[order(src, ID, assetnum, assetattrid,)]
index = grepl("JCH", attribute_value_master$assetattrid)
attribute_value_master = attribute_value_master[!index]
attribute_value_master_rows = attribute_value_master %>% nrow()
attribute_value_master %>%
.[duplicated(variable)]
merge_check_NA = attribute_value_master[!complete.cases(attribute_value_master)] #all rows w/ NA
merge_check_NA_num_total = merge_check_NA %>% nrow()
na_count = is.na(merge_check_NA) %>%
data.table() %>%
.[,.(.N), by = .(src, ID, assetnum, assetspecid, assetattrid, alnvalue, variable, value)] %>%
.[,-c('src', 'ID', 'assetattrid')]
#incomplete record exploration~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ffmerge_na_no_asset_data = merge_check_NA[is.na(assetnum) &
is.na(assetspecid) &
is.na(alnvalue) &
!is.na(variable) &
!is.na(value),]
ffmerge_na_no_extract_data = merge_check_NA[!is.na(assetnum) &
!is.na(assetspecid) &
!is.na(alnvalue) &
is.na(variable) &
is.na(value),]
ffmerge_na_no_alnvalue_value = merge_check_NA[!is.na(assetnum) &
!is.na(assetspecid) &
is.na(alnvalue) &
!is.na(variable) &
is.na(value),]
ffmerge_na_no_value = merge_check_NA[!is.na(assetnum) &
!is.na(assetspecid) &
!is.na(alnvalue) &
!is.na(variable) &
is.na(value),]
#splitting on match types~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
attribute_value_master = attribute_value_master %>%
na.omit()
attribute_value_master_good_row = attribute_value_master %>% nrow()
attribute_value_notmatching = attribute_value_master %>%
.[alnvalue != value,] %>%
.[,..colum_order_index] %>%
.[order(src, ID, assetnum, assetattrid,)]
attribute_value_matching = attribute_value_master %>%
.[alnvalue == value,] %>%
.[,..colum_order_index] %>%
.[order(src, ID, assetnum, assetattrid,)]
#type 1 error identification~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
###Case and Whitespace~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#removes all mismatch cases caused by CASE or whitespace issues
index_character_mismatch = str_trim(str_to_lower(attribute_value_notmatching$alnvalue), side = "both") == str_trim(str_to_lower(attribute_value_notmatching$value),side = "both")
tmp = attribute_value_notmatching[!index_character_mismatch]
total_matched_example_case_whitespace_sensitive = attribute_value_notmatching[index_character_mismatch]
### Numeric Padding
#removes all records where mismatch was caused by false padding encoding
index_alnvalue = as.numeric(tmp$alnvalue)
index_alnvalue[is.na(index_alnvalue)] = 0
index_value = as.numeric(tmp$value)
index_value[is.na(index_value)] = -29348576.127
index_PADDING_mismatch = index_alnvalue == index_value
total_matched_example_padding = tmp[index_PADDING_mismatch]
tmp = tmp[!index_PADDING_mismatch]
vairable_type = total_matched_example_padding %>%
.[,.(count = .N), by = .(variable)] %>%
.[order(-count)]
### Rounding~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#removes long/lat columns as proven to be just a rounding error
index = grepl("LONG", tmp$assetattrid) | grepl("LATITUDE", tmp$assetattrid)
rounding = tmp[index]
tmp = tmp[!index]
### Special Characters~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#removes all records where mismatch was caused by false encoding
index_DIMENSIONS = tmp$assetattrid == "SED_DIMENSIONS"
total_matched_bad = tmp[!index_DIMENSIONS]
bad_special = nrow(tmp[index_DIMENSIONS])
### Date Format Mismatch~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#identify date records
tmp_time_dt = total_matched_bad[str_detect(total_matched_bad$assetattrid, "DATE") ,]
date_pos_match_excel = tmp_time_dt[value %>%
ymd_hms() %>%
date() == alnvalue %>%
as.numeric() %>%
as.Date(origin = "1899-12-30"),]
date_pos_match_format = tmp_time_dt[str_detect(alnvalue, "/")] %>%
.[value %>%
ymd_hms() %>%
date() == alnvalue %>%
mdy_hm(format = "%Y-%m-%d") %>%
as_date() %>%
na.omit(),]
tmp_time_dt_remaining = tmp_time_dt %>%
anti_join(date_pos_match_excel, by = c("ID", "assetspecid")) %>%
anti_join(date_pos_match_format, by = c("ID", "assetspecid")) %>%
data.table()
total_matched_bad = total_matched_bad %>%
anti_join(date_pos_match_excel, by = c("ID", "assetspecid")) %>%
anti_join(date_pos_match_format, by = c("ID", "assetspecid")) %>%
data.table()
tmp_good_date = date_pos_match_excel %>%
bind_rows(date_pos_match_format) %>%
data.table()
#Final data merge -- sliced on match type~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
total_matched_good = attribute_value_master %>%
anti_join(total_matched_bad, by = c('assetnum', 'assetspecid', "assetattrid")) %>%
data.table()
total_matched_bad %>%
fwrite(file = "./output/data/total_matched_bad.csv")
total_matched_good %>%
fwrite(file = "./output/data/total_matched_good.csv")
fail_percent = 100*nrow(total_matched_bad)/(nrow(total_matched_bad)+ nrow(total_matched_good))
#File Correction~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#function creation~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
checkr_empty = function(data){
ifelse(nrow(data) == 0,
"All Records Accounted for!",
"Records Missing!") %>%
message()
}
checkr_compare = function(data_one, data_two){
ifelse(nrow(data_one) == nrow(data_two),
"All Records Accounted for!",
"Records Missing!") %>%
message()
}
checkr_dupe_diff = function(data, column){
dupe_num = data %>%
.[,.(.N), by = c('assetnum', 'assetattrid', column)] %>%
.[,.(.N), by = .(assetnum, assetattrid)] %>%
.[N > 1] %>%
nrow()
print("Number of duplicates", dupe_num)
ifelse(dupe_num == 0,
"All dupe records have same value!",
"Different Values!!!!") %>%
message()
}
#Date Correction~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Data Set-up~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
remaining_bad_dates = tmp_time_dt_remaining
remaining_bad_dates$value = remaining_bad_dates$value %>%
str_trim()
#Date Correction: Negative Matches~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_date_empty_value = remaining_bad_dates %>%
.[value == "",]
remaining_bad_dates = remaining_bad_dates %>%
anti_join(bad_date_empty_value, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX:
bad_date_empty_value$nu_date = "NULL"
# bad_date_empty_value %>%
# fwrite(., file = paste0("./output/data/outstanding/",
# deparse(substitute(bad_date_NULL)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_date_NULL_value = remaining_bad_dates %>%
.[value == "NULL",]
remaining_bad_dates = remaining_bad_dates %>%
anti_join(bad_date_NULL_value, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX:
bad_date_NULL_value$nu_date = "FURTHER WORK REQUIRED"
# bad_date_NULL_value %>%
# fwrite(., file = paste0("./output/data/outstanding/",
# deparse(substitute(bad_date_NULL)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_date_NULL_alnvalue_year = remaining_bad_dates %>%
.[alnvalue == "NULL",] %>%
.[str_count(value) == 4]
remaining_bad_dates = remaining_bad_dates %>%
anti_join(bad_date_NULL_alnvalue_year, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX:
bad_date_NULL_alnvalue_year = bad_date_NULL_alnvalue_year %>%
.[,`:=`(nu_date = paste0(value, "-01-01 00:00:00"))]
# bad_date_NULL_alnvalue_year %>%
# fwrite(., file = paste0("./output/data/outstanding/",
# deparse(substitute(bad_date_NULL_alnvalue_year)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_date_NULL_alnvalue_fulldate = remaining_bad_dates %>%
.[alnvalue == "NULL",] %>%
.[str_count(value) > 10]
remaining_bad_dates = remaining_bad_dates %>%
anti_join(bad_date_NULL_alnvalue_fulldate, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX:
bad_date_NULL_alnvalue_fulldate = bad_date_NULL_alnvalue_fulldate %>%
.[,`:=`(nu_date = str_trunc(value, 19, "right", ellipsis = ""))]
# bad_date_NULL_alnvalue_fulldate %>%
# fwrite(., file = paste0("./output/data/outstanding/",
# deparse(substitute(bad_date_NULL_alnvalue)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_date_NULL_alnvalue_integer = remaining_bad_dates %>%
.[alnvalue == "NULL",]
remaining_bad_dates = remaining_bad_dates %>%
anti_join(bad_date_NULL_alnvalue_integer, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX:
bad_date_NULL_alnvalue_integer = bad_date_NULL_alnvalue_integer %>%
.[,`:=`(nu_date = as.numeric(value)%/%1 %>%
as_date(origin = "1899-12-30")),] %>%
.[,`:=`(nu_date = nu_date %>%
paste("00:00:00") %>%
str_trunc(19, "right", ellipsis = "")),]
# bad_date_NULL_alnvalue_integer %>%
# fwrite(., file = paste0("./output/data/outstanding/",
# deparse(substitute(bad_date_NULL_alnvalue)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_date_zero = remaining_bad_dates %>%
.[alnvalue == "0",]
remaining_bad_dates = remaining_bad_dates %>%
anti_join(bad_date_zero, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX:
bad_date_zero = bad_date_zero %>%
.[,`:=`(nu_date = str_trunc(value, 19, "right", ellipsis = ""))]
# bad_date_zero %>%
# fwrite(., file = paste0("./output/data/outstanding/",
# deparse(substitute(bad_date_zero)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_date_custom_format_w_integer = remaining_bad_dates %>%
.[str_detect(alnvalue, ":") &
str_detect(alnvalue, ".")] %>%
.[str_count(value) < 9]
remaining_bad_dates = remaining_bad_dates %>%
anti_join(bad_date_custom_format_w_integer, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX:
bad_date_custom_format_w_integer = bad_date_custom_format_w_integer %>%
.[,`:=`(nu_date = as.numeric(value) %>%
as_date(origin = "1899-12-30") %>%
paste0(" 00:00:00")), ]
# bad_date_custom_format_w_integer %>%
# fwrite(., file = paste0("./output/data/outstanding/",
# deparse(substitute(bad_date_custom_format_w_integer)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_date_custom_format_w_date = remaining_bad_dates %>%
.[str_detect(alnvalue, ":") &
str_detect(alnvalue, ".")] %>%
.[str_count(value) >= 9]
remaining_bad_dates = remaining_bad_dates %>%
anti_join(bad_date_custom_format_w_date, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX:
bad_date_custom_format_w_date$nu_date = bad_date_custom_format_w_date$value %>%
str_trunc(19, "right", ellipsis = "")
# bad_date_custom_format_w_date %>%
# fwrite(., file = paste0("./output/data/outstanding/",
# deparse(substitute(bad_date_custom_format_w_date)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_date_decimallong = remaining_bad_dates %>%
.[!is.na(as.integer(alnvalue)),] %>%
.[str_count(alnvalue)>=8]
remaining_bad_dates = remaining_bad_dates %>%
anti_join(bad_date_decimallong, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX
bad_date_decimallong = bad_date_decimallong %>%
.[,`:=`(nu_date = as.numeric(alnvalue)%/%1 %>%
as_date(origin = "1899-12-30")),] %>%
.[,`:=`(nu_date = nu_date %>%
paste("00:00:00") %>%
str_trunc(19, "right", ellipsis = "")),]
# bad_date_decimallong %>%
# fwrite(., file = paste0("./output/data/outstanding/",
# deparse(substitute(bad_date_decimallong)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_date_integers_year = remaining_bad_dates[str_count(value) == 4 ,] %>%
.[as.integer(alnvalue) < 2025 &
as.integer(alnvalue) > 1800]
remaining_bad_dates = remaining_bad_dates %>%
anti_join(bad_date_integers_year, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX
bad_date_integers_year$nu_date = ifelse(abs(as.integer(bad_date_integers_year$alnvalue)-as.integer(bad_date_integers_year$value)) < 10,
paste0(bad_date_integers_year$value, "-01-01 00:00:00"),
"FURTHER WORK REQUIRED")
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_date_remaining = remaining_bad_dates %>%
.[,`:=`(value = str_trunc(paste(value, "00:00:00"),19, "right", ellipsis = "" ))] %>%
.[,`:=`(nu_date = as.numeric(alnvalue) %>%
as_date(origin = "1899-12-30") %>%
paste0(" 00:00:00")), ] %>%
.[alnvalue == 1 & value == "1900-01-01 00:00:00", `:=`(nu_date = "1900-01-01 00:00:00")]
remaining_bad_dates = remaining_bad_dates %>%
anti_join(bad_date_remaining, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#final check~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
checkr_empty(remaining_bad_dates)
#Date Correction: Positive Matches~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
remaining_good_dates = total_matched_good[str_detect(assetattrid, "DATE")]
# remaining_good_dates[assetspecid == "8294503"]
# remaining_good_dates$value = remaining_good_dates$value %>%
# str_trim()
# remaining_good_dates$alnvalue = remaining_good_dates$alnvalue %>%
# str_trim()
#Good Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
good_date_NULL = remaining_good_dates[value == "NULL"]
remaining_good_dates = remaining_good_dates %>%
anti_join(good_date_NULL, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX:
good_date_NULL$nu_date = good_date_NULL$value
#Good Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
good_date_multi_year = remaining_good_dates %>%
.[str_detect(alnvalue, ";")]
remaining_good_dates = remaining_good_dates %>%
anti_join(good_date_multi_year, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX
good_date_multi_year$nu_date = good_date_multi_year$value
#Good Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
good_date_X = remaining_good_dates %>%
.[str_trim(value) == "X"]
remaining_good_dates = remaining_good_dates %>%
anti_join(good_date_X, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX
good_date_X$nu_date = good_date_X$value
#Good Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# good_date_integers_year = remaining_good_dates %>%
# .[str_count(alnvalue) == 4] %>%
# .[str_count(value) == 4]
good_date_integers_year = remaining_good_dates[as.integer(alnvalue) == as.integer(value)] %>%
.[as.integer(alnvalue) < 2025 &
as.integer(alnvalue) > 1800]
remaining_good_dates = remaining_good_dates %>%
anti_join(good_date_integers_year, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX
good_date_integers_year = good_date_integers_year %>%
.[,`:=`(nu_date = paste0(value, "-01-01 00:00:00"))]
#Good Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
good_date_integers = remaining_good_dates[as.integer(alnvalue) == as.integer(value)]
good_date_integers %>% sample_n(30)
remaining_good_dates = remaining_good_dates %>%
anti_join(good_date_integers, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX
good_date_integers = good_date_integers[,`:=`(nu_date = as.numeric(value) %>%
as_date(origin = "1899-12-30") %>%
paste0(" 00:00:00")), ]
#Good Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
good_date_SUB = remaining_good_dates[assetattrid == "SED_SUBDATE"]
remaining_good_dates = remaining_good_dates %>%
anti_join(good_date_SUB, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX:
good_date_SUB = good_date_SUB %>%
.[,`:=`(nu_date = paste0(value, "-01-01 00:00:00"))]
#Good Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
good_date_SUP = remaining_good_dates[assetattrid == "SED_SUPDATE"]
remaining_good_dates = remaining_good_dates %>%
anti_join(good_date_SUP, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX:
good_date_SUP = good_date_SUP %>%
.[,`:=`(nu_date = paste0(value, "-01-01 00:00:00"))]
#Good Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
good_date_frogpoint = remaining_good_dates[str_detect(assetattrid, "FROG") |
str_detect(assetattrid, "POINT"),]
remaining_good_dates = remaining_good_dates %>%
anti_join(good_date_frogpoint, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
good_date_frogpoint = good_date_frogpoint %>%
.[,`:=`(nu_date = strptime(value, format = "%b %d %Y %H:%M") %>%
as.character())]
#Good Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
good_date_formatlong = remaining_good_dates %>%
.[str_count(value)>=15]
remaining_good_dates = remaining_good_dates %>%
anti_join(good_date_formatlong, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#FIX
good_date_formatlong = good_date_formatlong %>%
.[,`:=`(nu_date = str_trunc(value,19, "right", ellipsis = "" ))]
#final check~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
checkr_empty(remaining_good_dates)
#Date Aggregation~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
all_bad_date = bind_rows(bad_date_custom_format_w_date,
bad_date_custom_format_w_integer,
bad_date_decimallong,
bad_date_empty_value,
bad_date_integers_year,
bad_date_NULL_alnvalue_fulldate,
bad_date_NULL_alnvalue_integer,
bad_date_NULL_value,
bad_date_NULL_alnvalue_year,
bad_date_remaining,
bad_date_zero) %>%
.[,`:=`(match_type = "bad")]
all_good_date = bind_rows(good_date_formatlong,
good_date_frogpoint,
good_date_integers,
good_date_integers_year,
good_date_multi_year,
good_date_NULL,
good_date_SUB,
good_date_SUP,
good_date_X,) %>%
.[,`:=`(match_type = "good")]
#checks~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
cbind(all_bad_date[,.(alnvalue, value, nu_date)] %>% sample_n(30),
all_good_date[,.(alnvalue, value, nu_date)] %>% sample_n(30))
checkr_compare(all_bad_date, tmp_time_dt_remaining)
checkr_compare(all_good_date, total_matched_good[str_detect(assetattrid, "DATE")])
total_date = bind_rows(all_bad_date, all_good_date)
checkr_dupe_diff(total_date, "nu_date")
total_date[nu_date != "FURTHER WORK REQUIRED"] %>%
.[,.(assetnum, assetspecid, assetattrid, nu_date)] %>%
fwrite(., file = paste0("./output/data/corrected_records/",
"corrected_dates_",
file %>%
str_remove_all(" ") %>%
str_remove_all("-"),
".csv"))
total_date[nu_date == "FURTHER WORK REQUIRED"] %>%
.[,.(assetnum, assetspecid, assetattrid, nu_date)] %>%
fwrite(., file = paste0("./output/data/outstanding/",
"outstanding_dates_",
file %>%
str_remove_all(" ") %>%
str_remove_all("-"),
".csv"))
hdaaatotal_date %>%
.[,.(.N), by = .(assetnum, assetspecid, assetattrid, value)] %>%
.[order(-N)] %>%
.[N > 1]
column = "nu_date"
dupe_DT = total_date %>%
.[,.(.N), by = c('assetnum', 'assetattrid', "assetspecid", column)] %>%
.[order(-N)] %>%
.[,.(.N), by = .(assetnum, assetattrid, assetspecid)] %>%
.[order(-N)] %>%
.[N > 1]
tmp = total_date %>%
semi_join(dupe_DT, by = c("assetnum", 'assetattrid', 'assetspecid'))
attribute_value_master[assetspecid == "5669921" &assetattrid == "SED_VALIDATIONDATE"]
asset[assetspecid == "5669921" &assetattrid == "SED_VALIDATIONDATE"]
asset[which(assetnum %in% tmp$assetnum[5:6])] %>%
.[which(assetspecid %in% tmp$assetspecid[5:6]),]
asset[which(assetnum %in% tmp$assetnum[5:6])]
# %>%
# .[which(assetspecid %in% tmp$assetspecid[5:6]),]
asset %>%
.[,.(.N), by = .(assetnum, assetattrid)] %>%
.[order(-N)] %>%
.[N >1] %>%
fwrite(., file = paste0("./output/data/",
"wtf", ".csv"))
asset %>%
.[,.(Num_repeated_assetattrid_per_assetnum = .N), by = .(assetnum, assetattrid)] %>%
# .[order(-N)] %>%
.[,.(Count = .N), by = Num_repeated_assetattrid_per_assetnum] %>%
.[order(-Count)]
.[N >1] %>%
.[assetattrid == "SED_ID"]
assetnum[]
asset %>%
.[,.(.N), by = .(assetnum)] %>%
.[N >1] %>%
.
asset[assetnum == "AN LINE UG BRIDGE B ST. - NO 82.82"] %>%
.[assetattrid == "SED_ID"]
asset_merged[assetnum == "AN LINE UG BRIDGE B ST. - NO 82.82" &
asse]
master_id_lookup
master_id[ID == "155412" |
ID == "155659"]
#Non-Date Record Correction~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Data Set-up~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#splitting total bad to non-date
total_matched_bad_cleaned = total_matched_bad %>%
anti_join(tmp_time_dt_remaining, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
remaining = total_matched_bad_cleaned
total_matched_bad %>%
.[str_detect(assetattrid, "DATE")] %>%
checkr_compare(., all_bad_date)
#Bad empty~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_empty_character_null = remaining %>%
.[value == ""] %>%
.[alnvalue == "NULL"]
remaining = remaining %>%
anti_join(bad_empty_character_null, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#Fix
bad_empty_character_null$nu_value = "NULL"
bad_empty_character_null %>%
fwrite(., file = paste0("./output/data/outstanding/",
deparse(substitute(bad_empty_character_null)), ".csv"))
#Bad empty~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_empty_character_alnvalue = remaining %>%
.[value == ""]
remaining = remaining %>%
anti_join(bad_empty_character_alnvalue, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#Fix
bad_empty_character_alnvalue$nu_value = "FURTHER WORK REQUIRED"
bad_empty_character_alnvalue %>%
fwrite(., file = paste0("./output/data/outstanding/",
deparse(substitute(bad_empty_character)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_NA_character = remaining %>%
.[value == "N/A"] %>%
.[alnvalue == "NULL"]
remaining = remaining %>%
anti_join(bad_NA_character, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#Fix
bad_NA_character$nu_value = "N/A"
bad_NA_character %>%
fwrite(., file = paste0("./output/data/outstanding/",
deparse(substitute(bad_NA_character)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_null_w_value = remaining[alnvalue == "NULL"] %>%
.[value != "NULL"] %>%
.[value != ""] %>%
.[value != "N/A"]
remaining = remaining %>%
anti_join(bad_null_value, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#Fix
bad_null_w_value$nu_value = bad_null_w_value$value
bad_null_value %>%
fwrite(., file = paste0("./output/data/outstanding/",
deparse(substitute(bad_null_value)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_null_alnvalue = remaining[alnvalue != "NULL" & value == "NULL"]
remaining = remaining %>%
anti_join(bad_null_alnvalue, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#Fix
bad_null_alnvalue$nu_value = "FURTHER WORK REQUIRED"
bad_null_alnvalue %>%
fwrite(., file = paste0("./output/data/outstanding/",
deparse(substitute(bad_null_alnvalue)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_integer_round = remaining[as.integer(alnvalue) == as.integer(value)]
remaining = remaining %>%
anti_join(bad_integer_round, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#Fix
bad_integer_round$nu_value = bad_integer_round$value
bad_integer_round %>%
fwrite(., file = paste0("./output/data/outstanding/",
deparse(substitute(bad_integer_round)), ".csv"))
#Bad Scientific~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_scientific_notation_2d = remaining %>%
data.table() %>%
.[alnvalue == value %>%
as.numeric() %>%
formatC(format = "E", digits=2) %>%
as.character(),]
remaining = remaining %>%
anti_join(bad_scientific_notation_2d, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#Fix
bad_scientific_notation_2d$nu_value = bad_scientific_notation_2d$value
bad_scientific_notation_2d %>%
fwrite(., file = paste0("./output/data/outstanding/",
deparse(substitute(bad_scientific_notation_2d)), ".csv"))
#Bad Scientific~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_scientific_notation_1d = remaining %>%
data.table() %>%
.[alnvalue == value %>%
as.numeric() %>%
formatC(format = "E", digits=1) %>%
as.character(),]
remaining = remaining %>%
anti_join(bad_scientific_notation_1d, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#Fix
bad_scientific_notation_1d$nu_value = bad_scientific_notation_2d$value
bad_scientific_notation_1d %>%
fwrite(., file = paste0("./output/data/outstanding/",
deparse(substitute(bad_scientific_notation_1d)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_custom_format_timestamp = remaining %>%
.[assetattrid == "SED_DBTIMESTAMP"]
remaining = remaining %>%
anti_join(bad_custom_format_timestamp, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#Fix
bad_custom_format_timestamp = bad_custom_format_timestamp %>%
.[,`:=`(nu_value = str_trunc(value,10, "right", ellipsis = "" ) %>%
paste("00:00:00"))]
bad_custom_format_timestamp %>%
fwrite(., file = paste0("./output/data/outstanding/",
deparse(substitute(bad_custom_format_timestamp)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_custom_format = remaining %>%
.[str_detect(alnvalue, ":") &
str_detect(alnvalue, ".") ]
remaining = remaining %>%
anti_join(bad_custom_format, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#Fix
bad_custom_format$nu_value = bad_custom_format$value
bad_custom_format %>%
fwrite(., file = paste0("./output/data/outstanding/",
deparse(substitute(bad_custom_format)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_hastags = remaining %>%
.[str_detect(alnvalue,"#"),]
remaining = remaining %>%
anti_join(bad_hastags, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#Fix
bad_hastags$nu_value = bad_hastags$value
bad_hastags %>%
fwrite(., file = paste0("./output/data/outstanding/",
deparse(substitute(bad_hastags)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_integers = remaining %>%
.[!is.na(as.integer(alnvalue)),] %>%
.[!is.na(as.integer(value)),]
remaining = remaining %>%
anti_join(bad_integers, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#Fix
bad_integers$nu_value = "FURTHER WORK REQUIRED"
bad_integers %>%
fwrite(., file = paste0("./output/data/outstanding/",
deparse(substitute(bad_integers)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_backslash = remaining %>%
.[str_detect(value,"/"),]
remaining = remaining %>%
anti_join(bad_backslash, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#Fix
bad_backslash$nu_value = bad_backslash$alnvalue
bad_backslash %>%
fwrite(., file = paste0("./output/data/outstanding/", deparse(substitute(bad_backslash)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_numeric_dash = remaining %>%
.[str_detect(value,"-"),] %>%
.[!is.na(as.integer(alnvalue))]
remaining = remaining %>%
anti_join(bad_numeric_dash, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#Fix
bad_numeric_dash$nu_value = "FURTHER WORK REQUIRED"
bad_numeric_dash %>%
fwrite(., file = paste0("./output/data/outstanding/",
deparse(substitute(bad_numeric_dash)), ".csv"))
#Bad Dates~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
bad_just_wrong = remaining
remaining = remaining %>%
anti_join(bad_just_wrong, by = c('assetnum', 'ID', 'assetspecid')) %>%
data.table()
#Fix
bad_just_wrong$nu_value = "FURTHER WORK REQUIRED"
bad_just_wrong %>%
fwrite(., file = paste0("./output/data/outstanding/",
deparse(substitute(bad_just_wrong)), ".csv"))
#final check~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
checkr_empty(remaining)
#Date Correction: Positive Matches~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
all_good = total_matched_good[str_detect(assetattrid, "DATE")] %>%
.[,`:=`(nu_value = value)] %>%
.[,`:=`(match_type = "good")]
#Date Aggregation~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
all_bad = bind_rows(bad_empty_character_null,
bad_empty_character_alnvalue,
bad_NA_character,
bad_null_w_value,
bad_null_alnvalue,
bad_integer_round,
bad_scientific_notation_2d,
bad_scientific_notation_1d,
bad_custom_format_timestamp,
bad_custom_format,
bad_hastags,
bad_integers,
bad_backslash,
bad_numeric_dash,
bad_just_wrong) %>%
.[,`:=`(match_type = "bad")]
checkr_compare(all_bad, total_matched_bad_cleaned)
total_date = bind_rows(all_good, all_bad)
checkr_dupe_diff(total_date, "nu_value")
total_date %>% unique() %>%
.[,.(.N), by = .(assetnum, assetspecid, assetattrid, value)] %>%
.[order(-N)]
total_date[,.(assetnum, assetattrid, nu_date)] %>%
unique()
fwrite(., file = paste0("./output/data/corrected_records/", "corrected_dates", ".csv"))
column = "nu_value"
total_date[,..column]
total_date %>%
.[,.(.N), by = c('assetnum', 'assetattrid', column)]
checkr_dupe_diff = function(data, column){
dupe_num = data %>%
.[,.(.N), by = c('assetnum', 'assetattrid', ..column)] %>%
.[,.(.N), by = .(assetnum, assetattrid)] %>%
.[N > 1] %>%
nrow()
checkr_compare(all_bad_date, tmp_time_dt_remaining)
checkr_compare(all_good_date, total_matched_good[str_detect(assetattrid, "DATE")])
total_date = bind_rows(all_bad_date, all_good_date)
checkr_dupe_diff(total_date, "nu_date")
|
8215af290b930bcc78591505fdeec85bfe435f17
|
4014719bdbff8b7771ee42a6dae89a4e7b97167f
|
/preprocessing_test_2.R
|
ca2609c1ae9270ff8d3c2aa34dc4ad209250fb5c
|
[] |
no_license
|
macjankowski/pub_lda
|
1d7decf690429241c97847faec6554abf73c3b32
|
c718a1bdbd1ab647271ac75f6d85aecc5c8b644d
|
refs/heads/master
| 2021-01-21T14:29:00.859831
| 2019-04-29T10:05:41
| 2019-04-29T10:05:41
| 95,290,947
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 811
|
r
|
preprocessing_test_2.R
|
setwd('C:/doc/s/sem2/chudy/repo/pub_lda')
source('./preprocessing_2.R')
source('./dimRed.R')
source('./classification.R')
tree_number <- 500
topic_number <- 30
filePath = 'C:/doc/s/sem2/chudy/repo/pub_lda/politiciants data.csv'
tweetsAll <- cleanData(filePath)
tweetsAllLabelsNumeric <- labelsToNumeric(tweetsAll)
tfIdfWithLabels <- prepareTfIdfWithLabels(tweetsAllLabelsNumeric)
partitioned <- partitionData(tfIdfWithLabels)
dim(partitioned$train)
dim(partitioned$test)
lda <- calculateLDA(partitioned, topic_number)
posterior(lda$topicmodel)[2]
res <- trainAndPredict(tree_number, lda$ldaTrainData, tfData$cleanedTrainLabels,
lda$ldaTestData, tfData$cleanedTestLabels)
plotResults(res$testResult$threshold, res$testResult$bridgeRatio, res$testResult$errorRatio)
res$model
|
c2319021627654f6a8ecb22e3bafc7e2db485ee8
|
cdbc1057868bef1b44b28e9a30d0fcbf86d98bbf
|
/ASCAT_AUS_MPI.R
|
4e234a557ae95d064a291201b53eaeff99249b1a
|
[] |
no_license
|
xwang234/ovarian
|
4f57ecfe16dc1f0a06afa72b1d5be5ed29ad3198
|
b23bdd340230b3d0145bd3e4b432397c4c1512ba
|
refs/heads/master
| 2021-07-08T17:14:48.542354
| 2017-10-05T21:09:22
| 2017-10-05T21:09:22
| 105,941,140
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,770
|
r
|
ASCAT_AUS_MPI.R
|
#!/usr/bin/env Rscript
#salloc -t 0-8 -n 82 mpirun -n 1 R --interactive
library(data.table)
library(ASCAT)
setwd("/fh/fast/dai_j/CancerGenomics/Ovarian_Cancer/result/tmp/")
#test the real homoLimit
ascat.predictGermlineGenotypes1=function (ASCATobj, platform = "AffySNP6")
{
Homozygous = matrix(nrow = dim(ASCATobj$Tumor_LogR)[1], ncol = dim(ASCATobj$Tumor_LogR)[2])
colnames(Homozygous) = colnames(ASCATobj$Tumor_LogR)
rownames(Homozygous) = rownames(ASCATobj$Tumor_LogR)
if (platform == "Custom10k") {
maxHomozygous = 0.05
proportionHetero = 0.59
proportionHomo = 0.38
proportionOpen = 0.02
segmentLength = 20
}
else if (platform == "Illumina109k") {
maxHomozygous = 0.05
proportionHetero = 0.35
proportionHomo = 0.6
proportionOpen = 0.02
segmentLength = 20
}
else if (platform == "IlluminaCytoSNP") {
maxHomozygous = 0.05
proportionHetero = 0.28
proportionHomo = 0.62
proportionOpen = 0.03
segmentLength = 100
}
else if (platform == "Illumina610k") {
maxHomozygous = 0.05
proportionHetero = 0.295
proportionHomo = 0.67
proportionOpen = 0.015
segmentLength = 30
}
else if (platform == "Illumina660k") {
maxHomozygous = 0.05
proportionHetero = 0.295
proportionHomo = 0.67
proportionOpen = 0.015
segmentLength = 30
}
else if (platform == "Illumina700k") {
maxHomozygous = 0.05
proportionHetero = 0.295
proportionHomo = 0.67
proportionOpen = 0.015
segmentLength = 30
}
else if (platform == "Illumina1M") {
maxHomozygous = 0.05
proportionHetero = 0.22
proportionHomo = 0.74
proportionOpen = 0.02
segmentLength = 100
}
else if (platform == "Illumina2.5M") {
maxHomozygous = 0.05
proportionHetero = 0.21
proportionHomo = 0.745
proportionOpen = 0.03
segmentLength = 100
}
else if (platform == "IlluminaOmni5") {
maxHomozygous = 0.05
proportionHetero = 0.13
proportionHomo = 0.855
proportionOpen = 0.01
segmentLength = 100
}
else if (platform == "Affy10k") {
maxHomozygous = 0.04
proportionHetero = 0.355
proportionHomo = 0.605
proportionOpen = 0.025
segmentLength = 20
}
else if (platform == "Affy100k") {
maxHomozygous = 0.05
proportionHetero = 0.27
proportionHomo = 0.62
proportionOpen = 0.09
segmentLength = 30
}
else if (platform == "Affy250k_sty") {
maxHomozygous = 0.05
proportionHetero = 0.26
proportionHomo = 0.66
proportionOpen = 0.05
segmentLength = 50
}
else if (platform == "Affy250k_nsp") {
maxHomozygous = 0.05
proportionHetero = 0.26
proportionHomo = 0.66
proportionOpen = 0.05
segmentLength = 50
}
else if (platform == "AffySNP6") {
maxHomozygous = 0.05
proportionHetero = 0.25
proportionHomo = 0.67
proportionOpen = 0.04
segmentLength = 100
}
else if (platform == "AffyOncoScan") {
maxHomozygous = 0.04
proportionHetero = 0.355
proportionHomo = 0.605
proportionOpen = 0.025
segmentLength = 30
}
else if (platform == "AffyCytoScanHD") {
maxHomozygous = 0.04
proportionHetero = 0.32
proportionHomo = 0.6
proportionOpen = 0.03
segmentLength = 100
}
else if (platform == "HumanCNV370quad") {
maxHomozygous = 0.05
proportionHetero = 0.295
proportionHomo = 0.67
proportionOpen = 0.015
segmentLength = 20
}
else if (platform == "HumanCore12") {
maxHomozygous = 0.05
proportionHetero = 0.295
proportionHomo = 0.67
proportionOpen = 0.015
segmentLength = 20
}
else if (platform == "HumanCoreExome24") {
maxHomozygous = 0.05
proportionHetero = 0.175
proportionHomo = 0.79
proportionOpen = 0.02
segmentLength = 100
}
else if (platform == "HumanOmniExpress12") {
maxHomozygous = 0.05
proportionHetero = 0.295
proportionHomo = 0.67
proportionOpen = 0.015
segmentLength = 100
}
else if (platform == "IlluminaOmniExpressExome") {
maxHomozygous = 0.05
proportionHetero = 0.35
proportionHomo = 0.6
proportionOpen = 0.03
segmentLength = 100
}
else {
print("Error: platform unknown")
}
failedarrays = NULL
for (i in 1:dim(ASCATobj$Tumor_LogR)[2]) {
Tumor_BAF_noNA = ASCATobj$Tumor_BAF[!is.na(ASCATobj$Tumor_BAF[,
i]), i]
names(Tumor_BAF_noNA) = rownames(ASCATobj$Tumor_BAF)[!is.na(ASCATobj$Tumor_BAF[,
i])]
Tumor_LogR_noNA = ASCATobj$Tumor_LogR[names(Tumor_BAF_noNA),
i]
names(Tumor_LogR_noNA) = names(Tumor_BAF_noNA)
chr_noNA = list()
prev = 0
for (j in 1:length(ASCATobj$chr)) {
chrke = ASCATobj$chr[[j]]
next2 = prev + sum(!is.na(ASCATobj$Tumor_BAF[chrke,
i]))
chr_noNA[[j]] = (prev + 1):next2
prev = next2
}
ch_noNA = list()
prev = 0
for (j in 1:length(ASCATobj$ch)) {
chrke = ASCATobj$ch[[j]]
next2 = prev + sum(!is.na(ASCATobj$Tumor_BAF[chrke,
i]))
ch_noNA[[j]] = (prev + 1):next2
prev = next2
}
tbsam = Tumor_BAF_noNA
bsm = ifelse(tbsam < 0.5, tbsam, 1 - tbsam)
homoLimit = max(sort(bsm)[round(length(bsm) * proportionHomo)],
maxHomozygous)
if (homoLimit > 0.35) {
failedarrays = c(failedarrays, ASCATobj$samples[i])
}
Hom = ifelse(bsm < homoLimit, T, NA)
Homo = sum(Hom == T, na.rm = T)
Undecided = sum(is.na(Hom))
extraHetero = round(min(proportionHetero * length(Tumor_BAF_noNA),
Undecided - proportionOpen * length(Tumor_BAF_noNA)))
if (extraHetero > 0) {
allProbes = 1:length(Tumor_BAF_noNA)
nonHomoProbes = allProbes[is.na(Hom) | Hom == F]
lowestDist = NULL
bsmHNA = bsm
bsmHNA[!is.na(Hom) & Hom] = NA
for (chrke in chr_noNA) {
chrNonHomoProbes = intersect(nonHomoProbes, chrke)
if (length(chrNonHomoProbes) > 5) {
segmentLength2 = min(length(chrNonHomoProbes) -
1, segmentLength)
chrNonHomoProbesStartWindowLeft = c(rep(NA,
segmentLength2), chrNonHomoProbes[1:(length(chrNonHomoProbes) -
segmentLength2)])
chrNonHomoProbesEndWindowLeft = c(NA, chrNonHomoProbes[1:(length(chrNonHomoProbes) -
1)])
chrNonHomoProbesStartWindowRight = c(chrNonHomoProbes[2:length(chrNonHomoProbes)],
NA)
chrNonHomoProbesEndWindowRight = c(chrNonHomoProbes[(segmentLength2 +
1):length(chrNonHomoProbes)], rep(NA, segmentLength2))
chrNonHomoProbesStartWindowMiddle = c(rep(NA,
segmentLength2/2), chrNonHomoProbes[1:(length(chrNonHomoProbes) -
segmentLength2/2)])
chrNonHomoProbesEndWindowMiddle = c(chrNonHomoProbes[(segmentLength2/2 +
1):length(chrNonHomoProbes)], rep(NA, segmentLength2/2))
chrLowestDist = NULL
for (probeNr in 1:length(chrNonHomoProbes)) {
probe = chrNonHomoProbes[probeNr]
if (!is.na(chrNonHomoProbesStartWindowLeft[probeNr]) &
!is.na(chrNonHomoProbesEndWindowLeft[probeNr])) {
medianLeft = median(bsmHNA[chrNonHomoProbesStartWindowLeft[probeNr]:chrNonHomoProbesEndWindowLeft[probeNr]],
na.rm = T)
}
else {
medianLeft = NA
}
if (!is.na(chrNonHomoProbesStartWindowRight[probeNr]) &
!is.na(chrNonHomoProbesEndWindowRight[probeNr])) {
medianRight = median(bsmHNA[chrNonHomoProbesStartWindowRight[probeNr]:chrNonHomoProbesEndWindowRight[probeNr]],
na.rm = T)
}
else {
medianRight = NA
}
if (!is.na(chrNonHomoProbesStartWindowMiddle[probeNr]) &
!is.na(chrNonHomoProbesEndWindowMiddle[probeNr])) {
medianMiddle = median(c(bsmHNA[chrNonHomoProbesStartWindowMiddle[probeNr]:chrNonHomoProbesEndWindowLeft[probeNr]],
bsmHNA[chrNonHomoProbesStartWindowRight[probeNr]:chrNonHomoProbesEndWindowMiddle[probeNr]]),
na.rm = T)
}
else {
medianMiddle = NA
}
chrLowestDist[probeNr] = min(abs(medianLeft -
bsm[probe]), abs(medianRight - bsm[probe]),
abs(medianMiddle - bsm[probe]), Inf, na.rm = T)
}
}
else {
chrLowestDist = NULL
if (length(chrNonHomoProbes) > 0) {
chrLowestDist[1:length(chrNonHomoProbes)] = 1
}
}
lowestDist = c(lowestDist, chrLowestDist)
}
lowestDistUndecided = lowestDist[is.na(Hom[nonHomoProbes])]
names(lowestDistUndecided) = names(Tumor_LogR_noNA)[nonHomoProbes[is.na(Hom[nonHomoProbes])]]
sorted = sort(lowestDistUndecided)
Hom[names(sorted[1:min(length(sorted), extraHetero)])] = F
Hetero = sum(Hom == F, na.rm = T)
Homo = sum(Hom == T, na.rm = T)
Undecided = sum(is.na(Hom))
}
png(filename = paste("tumorSep", colnames(ASCATobj$Tumor_LogR)[i],
".png", sep = ""), width = 2000, height = 500, res = 200)
title = paste(paste(colnames(ASCATobj$Tumor_BAF)[i],
Hetero), Homo)
ascat.plotGenotypes(ASCATobj, title, Tumor_BAF_noNA,
Hom, ch_noNA)
dev.off()
Hom[is.na(Hom)] = T
Homozygous[names(Hom), i] = Hom
}
return(list(germlinegenotypes = Homozygous, failedarrays = failedarrays, homoLimit=homoLimit))
}
#tumor only samples
tumorsamples=read.table("/fh/fast/dai_j/CancerGenomics/Ovarian_Cancer/result/ascat_AUS_samplelist.txt",stringsAsFactors = F)
mpi_ascat2=function(jobn)
{
res=FALSE
library(ASCAT)
mysample=tumorsamples[jobn,1]
tumorlogrfile=paste0("/fh/fast/dai_j/CancerGenomics/Ovarian_Cancer/result/ascat_input/",mysample,"_tumorlogr.txt")
tumorbaffile=paste0("/fh/fast/dai_j/CancerGenomics/Ovarian_Cancer/result/ascat_input/",mysample,"_tumorbaf.txt")
ascat.bc2 = ascat.loadData(tumorlogrfile, tumorbaffile)
#ascat.bc2 = ascat.GCcorrect(ascat.bc2,"/fh/fast/dai_j/CancerGenomics/Tools/database/other/GC_IlluminaOmniexpress.txt")
ascat.plotRawData(ascat.bc2)
Sys.time()
ascat.gg2 = ascat.predictGermlineGenotypes1(ascat.bc2, "Illumina2.5M")
filecon=file(paste0("/fh/fast/dai_j/CancerGenomics/Ovarian_Cancer/result/tmp/",mysample,".ascat.homoLimit"),"w")
writeLines(paste0(ascat.gg2$homoLimit),filecon)
close(filecon)
Sys.time()
#ascat.plotRawData(ascat1.bc)
ascat.bc2 = ascat.aspcf(ascat.bc2,ascat.gg=ascat.gg2)
Sys.time()
ascat.plotSegmentedData(ascat.bc2)
#Sys.time()
ascat.output = ascat.runAscat(ascat.bc2)
if (!is.null(ascat.output$segments))
{
res=TRUE
mysample=ascat.bc2$samples
write.table(ascat.output$segments,file=paste0("/fh/fast/dai_j/CancerGenomics/Ovarian_Cancer/result/tmp/",mysample,".ascat.segment"),
row.names = F,col.names = T,sep="\t",quote=F)
filecon=file(paste0("/fh/fast/dai_j/CancerGenomics/Ovarian_Cancer/result/tmp/",mysample,".ascat.res"),"w")
writeLines(paste0("psi ",ascat.output$psi),filecon)
writeLines(paste0("aberrantcellfraction ",ascat.output$aberrantcellfraction),filecon)
writeLines(paste0("ploidy ",ascat.output$ploidy),filecon)
writeLines(paste0("goodnessOfFit ",ascat.output$goodnessOfFit),filecon)
close(filecon)
}
return(res)
}
library(Rmpi)
njobs=mpi.universe.size() - 1
print(njobs)
mpi.spawn.Rslaves(nslaves=njobs,needlog = F)
mpi.bcast.Robj2slave(ascat.predictGermlineGenotypes1)
mpi.bcast.Robj2slave(tumorsamples)
#res=mpi.parSapply(X=1:(ncol(tumorbaf)-3),FUN=mpi_ascat,tumorlogr=tumorlogr,tumorbaf=tumorbaf,normallogr=normallogr,normalbaf=normalbaf,job.num=njobs)
res=mpi.parSapply(X=1:nrow(tumorsamples),FUN=mpi_ascat2,job.num=njobs)
## quit program
mpi.close.Rslaves()
mpi.quit()
quit()
|
c9722a702ad4690d5f7a8bdf1f1ed7d4ab14b6a9
|
7bc0759b1a3f1a8a4c48e02f1c5e393a13e94eff
|
/R/import_data.R
|
fb046df953911ff6e63775ca00bc1ed3431c9457
|
[] |
no_license
|
kvittingseerup/IsoformSwitchAnalyzeR
|
b7e4a249572b3d487ffcdea62a409e800ac23fe3
|
5f471360da38101777d37d9eb91a99c3ac81eda4
|
refs/heads/master
| 2023-07-21T19:52:53.966098
| 2023-06-30T11:04:50
| 2023-06-30T11:04:50
| 88,636,530
| 66
| 20
| null | 2023-01-31T12:23:30
| 2017-04-18T14:46:47
|
R
|
UTF-8
|
R
| false
| false
| 233,024
|
r
|
import_data.R
|
### Acutal import functions
importCufflinksFiles <- function(
### Core arguments
pathToGTF,
pathToGeneDEanalysis,
pathToIsoformDEanalysis,
pathToGeneFPKMtracking,
pathToIsoformFPKMtracking,
pathToIsoformReadGroupTracking,
pathToSplicingAnalysis = NULL,
pathToReadGroups,
pathToRunInfo,
isoformNtFasta = NULL,
### Advanced arguments
fixCufflinksAnnotationProblem = TRUE,
addIFmatrix = TRUE,
estimateDifferentialGeneRange = TRUE,
quiet = FALSE
) {
### Test that files exist
if (TRUE) {
if( pathToGTF == '' ) {
stop(
paste(
'The \'pathToGTF\' argument does not lead anywhere (acutally you just suppled "" to the argument).',
'\nDid you try to use the system.file("your/quant/dir/", package="IsoformSwitchAnalyzeR")',
'to import your own data? The system.file() should only be used',
'to access the example data stored in the IsoformSwitchAnalyzeR package.',
'To access your own data simply provide the string to the directory with the data as:',
'"path/to/quantification/".',
sep=' '
)
)
}
if( pathToGeneDEanalysis == '' ) {
stop(
paste(
'The \'pathToGeneDEanalysis\' argument does not lead anywhere (acutally you just suppled "" to the argument).',
'\nDid you try to use the system.file("your/quant/dir/", package="IsoformSwitchAnalyzeR")',
'to import your own data? The system.file() should only be used',
'to access the example data stored in the IsoformSwitchAnalyzeR package.',
'To access your own data simply provide the string to the directory with the data as:',
'"path/to/quantification/".',
sep=' '
)
)
}
# pathToGTF
if (!file.exists(pathToGTF)) {
stop('The \'pathToGTF\' argument does not point to an acutal file')
}
if( !is.null(isoformNtFasta)) {
if( !is.character( isoformNtFasta)) {
stop('The \'isoformNtFasta\' argument must be a charachter string.')
}
if( any(isoformNtFasta == '') ) {
stop(
paste(
'The \'isoformNtFasta\' argument does not lead anywhere (acutally you just suppled "" to the argument).',
'\nDid you try to use the system.file("your/quant/dir/", package="IsoformSwitchAnalyzeR")',
'to import your own data? The system.file() should only be used',
'to access the example data stored in the IsoformSwitchAnalyzeR package.',
'To access your own data simply provide the string to the directory with the data as:',
'"path/to/quantification/".',
sep=' '
)
)
}
if( any( ! sapply(isoformNtFasta, file.exists) ) ) {
stop('At least one of the file(s) pointed to with \'isoformNtFasta\' seems not to exist.')
}
if( any(! grepl('\\.fa|\\.fasta|\\.fa.gz|\\.fasta.gz', isoformNtFasta)) ) {
stop('The file pointed to via the \'isoformNtFasta\' argument does not seem to be a fasta file...')
}
}
# DE
if (!file.exists(pathToGeneDEanalysis)) {
stop('The \'pathToGeneDEanalysis\' argument does not point to an acutal file')
}
if (!file.exists(pathToIsoformDEanalysis)) {
stop('The \'pathToIsoformDEanalysis\' argument does not point to an acutal file')
}
# Tracking
if (!file.exists(pathToGeneFPKMtracking)) {
stop('The \'pathToGeneFPKMtracking\' argument does not point to an acutal file')
}
if (!file.exists(pathToIsoformFPKMtracking)) {
stop(
'The \'pathToIsoformFPKMtracking\' argument does not point to an acutal file'
)
}
if (!file.exists(pathToIsoformReadGroupTracking)) {
stop(
'The \'pathToIsoformReadGroupTracking\' argument does not point to an acutal file'
)
}
# splicing
if (!is.null(pathToSplicingAnalysis)) {
if (!file.exists(pathToSplicingAnalysis)) {
stop(
'The \'pathToSplicingAnalysis\' argument does not point to an acutal file'
)
}
}
# info
if (!file.exists(pathToReadGroups)) {
stop('The \'pathToReadGroups\' argument does not point to an acutal file')
}
if (!file.exists(pathToRunInfo)) {
stop('The \'pathToRunInfo\' argument does not point to an acutal file')
}
}
### Import the supplied files (not gtf)
if (TRUE) {
if (!quiet) { message('Step 1 of 5: Importing data...')}
suppressMessages(
geneDiffanalysis <-
readr::read_tsv(
file = pathToGeneDEanalysis,
col_names = TRUE
)
)
suppressMessages(
isoformDiffanalysis <-
readr::read_tsv(
file = pathToIsoformDEanalysis,
col_names = TRUE
)
)
suppressMessages(
geneAnnotation <-
readr::read_tsv(
file = pathToGeneFPKMtracking,
col_names = TRUE
)
)
suppressMessages(
isoformAnnotation <-
readr::read_tsv(
file = pathToIsoformFPKMtracking,
col_names = TRUE
)
)
suppressMessages(
isoRepExp <-
read.table(
file = pathToIsoformReadGroupTracking,
header = TRUE,
sep='\t',
stringsAsFactors = FALSE
)
)
if( !is.null(pathToSplicingAnalysis) ) {
suppressMessages(
cuffSplicing <-
readr::read_tsv(
file = pathToSplicingAnalysis,
col_names = TRUE
)
)
}
suppressMessages(
readGroup <-
read.table(
file = pathToReadGroups,
sep='\t',
header = TRUE
)
)
suppressMessages(
runInfo <-
readr::read_tsv(
file = pathToRunInfo,
col_names = TRUE
)
)
}
### "Test" that the data.files are what they are supposed to be
if (TRUE) {
### gene diff analysis
q1 <-
!all(
colnames(geneDiffanalysis) %in% c(
"test_id",
"gene_id",
"gene",
"locus",
"sample_1",
"sample_2",
"status",
"value_1",
"value_2",
"log2(fold_change)",
"test_stat",
"p_value",
"q_value",
"significant"
)
)
if (q1) {
stop(paste(
'The file supplied to pathToGeneDEanalysis does not appear',
'to be the result of the CuffDiff gene expression analysis.'
))
}
### transcript diff analysis
q1 <-
!all(
colnames(isoformDiffanalysis) %in% c(
"test_id",
"gene_id",
"gene",
"locus",
"sample_1",
"sample_2",
"status",
"value_1",
"value_2",
"log2(fold_change)",
"test_stat",
"p_value",
"q_value",
"significant"
)
)
if (q1) {
stop(paste(
'The file supplied to isoformDiffanalysis does not appear to',
'be the result of the CuffDiff transcript expression analysis.'
))
}
q2 <-
sum(grepl(
'TCONS', isoformDiffanalysis$test_id
)) != nrow(isoformDiffanalysis)
if (q2) {
warning(paste(
'It looks like you have NOT been doing transcript\n',
'reconstruction/assembly with Cufflinks/Cuffdiff.\n',
'If you have not reconstructed transcripts we receomend to use Kallisto or Salmon\n',
'to do the quantification instead - they are more accurate and have better biase correction methods.'
))
}
### gene annoation
q1 <-
!all(
colnames(geneAnnotation)[1:8] %in% c(
"tracking_id",
"class_code",
"nearest_ref_id",
"gene_id",
"gene_short_name",
"tss_id",
"locus",
"length"
)
)
if (q1) {
stop(paste(
'The file supplied to geneAnnotation does not appear to be the',
'gene FPKM traccking of the CuffDiff gene FPKM trascking analysis.'
))
}
### transcript annoation
q1 <-
!all(
colnames(isoformAnnotation)[1:8] %in% c(
"tracking_id",
"class_code",
"nearest_ref_id",
"gene_id",
"gene_short_name",
"tss_id",
"locus",
"length"
)
)
if (q1) {
stop(paste(
'The file supplied to isoformAnnotation does not appear to be',
'the isoform FPKM tracking of the CuffDiff transcript analysis.'
))
}
### rep expression
q1 <-
!all(
colnames(isoRepExp)[1:4] %in% c(
"tracking_id", "condition", "replicate", "raw_frags"
)
)
if (q1) {
stop(paste(
'The file supplied to pathToIsoformCountTracking does not',
'appear to be the isoform count tracking of the CuffDiff',
'transcript analysis.'
))
}
### splicing analysis
if( !is.null(pathToSplicingAnalysis) ) {
q1 <-
!all(
colnames(cuffSplicing) %in% c(
"test_id",
"gene_id",
"gene",
"locus",
"sample_1",
"sample_2",
"status",
"value_1",
"value_2",
"sqrt(JS)",
"test_stat",
"p_value",
"q_value",
"significant"
)
)
if (q1) {
stop(
'The file supplied to cuffSplicing does not appear to be the',
'result of the CuffDiff differential analysis of alternative splicing.'
)
}
}
### Read grous
q1 <-
!all(
colnames(readGroup) %in% c(
"file",
"condition",
"replicate_num",
"total_mass",
"norm_mass",
"internal_scale",
"external_scale"
)
)
q2 <-
!all(readGroup$condition %in% unique(
unlist(geneDiffanalysis[, c('sample_1', 'sample_2')]))
)
if (q1 | q2) {
stop(paste(
'The file supplied to readGroup does not appear to be the',
'pathToReadGroups of the CuffDiff transcript analysis.'
))
}
### Run info
q1 <- !all(colnames(runInfo) %in% c("param", 'value'))
q2 <-
!all(
runInfo$param %in% c(
"cmd_line",
"version",
"SVN_revision",
"boost_version"
)
)
if (q1 | q2) {
stop(paste(
'The file supplied to runInfo does not appear to be',
'the runInfo of the CuffDiff transcript analysis.'
))
}
}
### Massage and merge gene and isoform annoation and DE analysis
if (TRUE) {
if (!quiet) { message('Step 2 of 5: Merging gene and isoform expression...')}
### Design matrix
readGroup$sample_name <-
stringr::str_c(readGroup$condition, '_', readGroup$replicate_num)
designMatrix <- readGroup[, c('sample_name', 'condition')]
colnames(designMatrix) <- c('sampleID', 'condition')
### Massage data frames
if (TRUE) {
# gene
geneDiffanalysis <-
geneDiffanalysis[, -which(
colnames(geneDiffanalysis) %in%
c('test_id', 'gene', 'locus', 'test_stat')
)]
colnames(geneDiffanalysis)[4:ncol(geneDiffanalysis)] <-
paste(
"gene_",
colnames(geneDiffanalysis)[4:ncol(geneDiffanalysis)] ,
sep = "") # add gene to the colnames so they can be destinquished from the gene diff data
colnames(geneDiffanalysis) <- gsub(
'gene_log2.fold_change.',
'gene_log2_fold_change',
colnames(geneDiffanalysis)
)
# info
colnames(isoformAnnotation)[1] <- 'isoform_id'
isoformAnnotation2 <-
isoformAnnotation[, na.omit(match(
c(
'isoform_id',
'gene_id',
'gene_short_name',
'nearest_ref_id',
'class_code',
'tss_id',
'CDS_id',
'length',
'locus'
),
colnames(isoformAnnotation)
))]
colnames(isoformAnnotation2)[which(
colnames(isoformAnnotation2) == 'gene_short_name'
)] <- 'gene_name'
# iso
isoformDiffanalysis <- isoformDiffanalysis[, which(
!colnames(isoformDiffanalysis) %in%
c('gene_id', 'gene', 'test_stat')
)]
colnames(isoformDiffanalysis)[5:ncol(isoformDiffanalysis)] <-
paste(
"iso_",
colnames(isoformDiffanalysis)[5:ncol(isoformDiffanalysis)],
sep = "") # add gene to the colnames so they can be destinquished from the gene diff data
colnames(isoformDiffanalysis)[1] <- 'isoform_id'
colnames(isoformDiffanalysis) <-
gsub(
'iso_log2.fold_change.',
'iso_log2_fold_change',
colnames(isoformDiffanalysis)
)
# rep expression
isoRepExp2 <-
isoRepExp[, c("tracking_id",
"condition",
"replicate",
"raw_frags")]
colnames(isoRepExp2)[1] <- 'isoform_id'
isoRepExp2$rep <-
paste(isoRepExp2$condition, isoRepExp2$replicate, sep = '_')
isoRepExp2 <-
reshape2::dcast(data = isoRepExp2,
isoform_id ~ rep,
value.var = 'raw_frags')
### rep fpkm
# iso
isoRepFpkm <- isoRepExp[, c(
"tracking_id",
"condition",
"replicate",
"FPKM"
)]
colnames(isoRepFpkm)[1] <- 'isoform_id'
isoRepFpkm$rep <-
paste(isoRepFpkm$condition, isoRepFpkm$replicate, sep = '_')
isoRepFpkm <-
reshape2::dcast(data = isoRepFpkm,
isoform_id ~ rep,
value.var = 'FPKM')
### Gene
isoRepFpkm2 <- isoRepFpkm
isoRepFpkm2$gene_id <- isoformAnnotation2$gene_id[match(
isoRepFpkm2$isoform_id, isoformAnnotation2$isoform_id
)]
isoRepFpkm2$isoform_id <- NULL
geneRepFpkm <- isoformToGeneExp(isoRepFpkm2, quiet = TRUE)
### Calculate means
rownames(isoRepFpkm) <- isoRepFpkm$isoform_id
isoRepFpkm$isoform_id <- NULL
isoMean <- rowMeans(isoRepFpkm)
rownames(geneRepFpkm) <- geneRepFpkm$gene_id
geneRepFpkm$gene_id <- NULL
geneMean <- rowMeans(geneRepFpkm)
### add means
geneDiffanalysis$gene_overall_mean <- geneMean[match(
geneDiffanalysis$gene_id, names(geneMean)
)]
isoformDiffanalysis$iso_overall_mean <- isoMean[match(
isoformDiffanalysis$isoform_id, names(isoMean)
)]
}
### Extract standard error
if (TRUE) {
### Tjek if the Isoform CI collums are switches
ciLowColumn <-
which(grepl('_conf_lo', colnames(isoformAnnotation)))[1]
ciHighColumn <-
which(grepl('_conf_hi', colnames(isoformAnnotation)))[1]
if (all(isoformAnnotation[, ciHighColumn] >= isoformAnnotation[, ciLowColumn])) {
highString <- '_conf_hi'
} else {
highString <- '_conf_lo'
}
### extract isoform sddev from CI
# fpkm
isoformFPKM <-
isoformAnnotation[, which(grepl(
'isoform_id|_FPKM', colnames(isoformAnnotation)
))]
isoformFPKM <- reshape2::melt(isoformFPKM, id.vars = 'isoform_id')
isoformFPKM$variable <-
gsub('_FPKM$', '', isoformFPKM$variable)
colnames(isoformFPKM)[3] <- 'expression'
# ci high
isoformFPKMciHi <-
isoformAnnotation[, which(grepl(
paste('isoform_id|', highString, sep = ''),
colnames(isoformAnnotation)
))]
isoformFPKMciHi <-
reshape2::melt(isoformFPKMciHi, id.vars = 'isoform_id')
isoformFPKMciHi$variable <-
gsub(highString, '', isoformFPKMciHi$variable)
colnames(isoformFPKMciHi)[3] <- 'ci_hi'
# stderr
isoformFPKMcombined <-
dplyr::inner_join(isoformFPKM,
isoformFPKMciHi,
by = c('isoform_id', 'variable'))
isoformFPKMcombined$iso_stderr <-
(isoformFPKMcombined$ci_hi - isoformFPKMcombined$expression) / 2 # How it's done in cufflinks source code
isoformFPKMcombined$expression <- NULL
isoformFPKMcombined$ci_hi <- NULL
colnames(isoformFPKMcombined) <-
c('isoform_id', 'sample_name', 'iso_stderr')
### Tjek if the gene CI collums are switches
ciLowColumn <-
which(grepl('_conf_lo', colnames(geneAnnotation)))[1]
ciHighColumn <-
which(grepl('_conf_hi', colnames(geneAnnotation)))[1]
if (all(
geneAnnotation[, ciHighColumn] >= geneAnnotation[, ciLowColumn])
) {
highString <- '_conf_hi'
} else {
highString <- '_conf_lo'
}
### extract gene sddev from CI
# fpkm
geneFPKM <- geneAnnotation[, which(grepl(
'tracking_id|_FPKM', colnames(geneAnnotation)
))]
geneFPKM <- reshape2::melt(geneFPKM, id.vars = 'tracking_id')
geneFPKM$variable <-
gsub('_FPKM$', '', geneFPKM$variable)
colnames(geneFPKM)[3] <- 'expression'
# ci high
geneFPKMciHi <- geneAnnotation[, which(grepl(
paste('tracking_id|', highString, sep = ''),
colnames(geneAnnotation)
))]
geneFPKMciHi <- reshape2::melt(geneFPKMciHi, id.vars = 'tracking_id')
geneFPKMciHi$variable <- gsub(highString, '', geneFPKMciHi$variable)
colnames(geneFPKMciHi)[3] <- 'ci_hi'
# stderr
geneFPKMcombined <-
dplyr::inner_join(geneFPKM,
geneFPKMciHi,
by = c('tracking_id', 'variable'))
geneFPKMcombined$iso_stderr <-
(geneFPKMcombined$ci_hi - geneFPKMcombined$expression) / 2 # how it's done in cufflinks sourece code
geneFPKMcombined$expression <- NULL
geneFPKMcombined$ci_hi <- NULL
colnames(geneFPKMcombined) <-
c('gene_id', 'sample_name', 'gene_stderr')
## Merge stderr with DE analysis
isoformDiffanalysis <- suppressWarnings( dplyr::inner_join(
isoformDiffanalysis,
isoformFPKMcombined,
by=c("sample_2" = "sample_name", "isoform_id" = "isoform_id")
) )
colnames(isoformDiffanalysis)[which( grepl(
'iso_stderr', colnames(isoformDiffanalysis))
)] <- 'iso_stderr_2'
isoformDiffanalysis <- suppressWarnings( dplyr::inner_join(
isoformDiffanalysis,
isoformFPKMcombined,
by=c("sample_2" = "sample_name", "isoform_id" = "isoform_id")
) )
colnames(isoformDiffanalysis)[which(grepl(
'iso_stderr$',
colnames(isoformDiffanalysis),
perl = TRUE
))] <- c('iso_stderr_1')
isoformDiffanalysis <-
isoformDiffanalysis[, c(
'isoform_id',
'sample_1',
'sample_2',
'iso_status',
'iso_overall_mean',
'iso_value_1',
'iso_value_2',
'iso_stderr_1',
'iso_stderr_2',
'iso_log2_fold_change',
'iso_p_value',
'iso_q_value',
'iso_significant'
)]
### Extract and add gene stderr
geneDiffanalysis <- suppressWarnings( dplyr::inner_join(
geneDiffanalysis,
geneFPKMcombined,
by=c("sample_2" = "sample_name", "gene_id" = "gene_id")
) )
colnames(geneDiffanalysis)[ which(grepl(
'gene_stderr', colnames(geneDiffanalysis)
))] <- 'gene_stderr_2'
geneDiffanalysis <- suppressWarnings( dplyr::inner_join(
geneDiffanalysis,
geneFPKMcombined,
by=c("sample_1" = "sample_name", "gene_id" = "gene_id")
) )
colnames(geneDiffanalysis)[which(grepl(
'gene_stderr$',
colnames(geneDiffanalysis),
perl = TRUE
))] <- c('gene_stderr_1')
geneDiffanalysis <-
geneDiffanalysis[, c(
'gene_id',
'sample_1',
'sample_2',
'gene_status',
'gene_overall_mean',
'gene_value_1',
'gene_value_2',
'gene_stderr_1',
'gene_stderr_2',
'gene_log2_fold_change',
'gene_p_value',
'gene_q_value',
'gene_significant'
)]
}
### Merge data
if (TRUE) {
### Meger gene DE and annotation data
isoformData <-
dplyr::inner_join(isoformAnnotation2, geneDiffanalysis, by = 'gene_id')
### Merge with iso DE
isoformData <-
dplyr::inner_join(
isoformData,
isoformDiffanalysis,
by = c('isoform_id', 'sample_1', 'sample_2')
)
### Massage again
colnames(isoformData)[which(
colnames(isoformData) == 'tss_id'
)] <- 'TSS_group_id'
}
}
### Obtain transcript structure information
if (TRUE) {
if (!quiet) { message('Step 3 of 5: Obtaining annotation...')}
### Import file
tmp <- capture.output(
suppressWarnings(
suppressMessages(
exonFeatures <- rtracklayer::import(pathToGTF, format = 'gtf')
)
)
)
if (length(exonFeatures) == 0)
stop("No exon information extracted from GTF")
### Filter for what is needed
exonFeatures <- exonFeatures[
which(tolower(exonFeatures$type) == 'exon'),
c('gene_id', 'transcript_id')
]
### rename
colnames(exonFeatures@elementMetadata) <- gsub(
'transcript_id', 'isoform_id',
colnames(exonFeatures@elementMetadata)
)
}
### import nulceotide fasta file
if(TRUE) {
addIsoformNt <- FALSE
if( !is.null(isoformNtFasta) ) {
isoformNtSeq <- do.call(
c,
lapply(isoformNtFasta, function(aFile) {
Biostrings::readDNAStringSet(
filepath = isoformNtFasta, format = 'fasta'
)
})
)
if(!is(isoformNtSeq, "DNAStringSet")) {
stop('The fasta file supplied to \'isoformNtFasta\' does not contain the nucleotide (DNA) sequence...')
}
### Remove preceeding ref|
if(
sum(grepl('^ref\\|', names(isoformNtSeq))) == length( isoformNtSeq )
) {
names(isoformNtSeq) <- gsub('^ref\\|', '', names(isoformNtSeq))
}
### Remove potential name duplication
isoformNtSeq <- isoformNtSeq[which(
! duplicated(names(isoformNtSeq))
)]
if( ! all(isoformData$isoform_id %in% names(isoformNtSeq)) ) {
warning(
paste(
'The fasta file supplied to \'isoformNtFasta\' does not contain the',
'nucleotide (DNA) sequence for all isoforms annotated and will not be added!',
'\nSpecifically:\n',
length(unique(isoformData$isoform_id)), 'isoforms were annotated in the GTF\n',
length(unique(names(isoformNtSeq))), 'isoforms have a sequence.\n',
'Only', length(intersect(names(isoformNtSeq), isoformData$isoform_id)), 'overlap.\n',
length(setdiff(unique(isoformData$isoform_id), names(isoformNtSeq))), 'annoated isoforms isoforms had no corresponding nucleotide sequence\n',
'\nIf there is no overlap (as in zero or close) there are two options:\n',
'1) The files do not fit together (different databases, versions etc)',
'(no fix except using propperly paired files).\n',
'2) It is somthing to do with how the isoform ids are stored in the different files.',
'This problem might be solvable using some of the',
'\'ignoreAfterBar\', \'ignoreAfterSpace\' or \'ignoreAfterPeriod\' arguments.\n',
' 3 Examples from GTF are :',
paste0( sample(unique(isoformData$isoform_id), min(3, nrow(isoformData)) ), collapse = ', '),'\n',
' 3 Examples of isoform sequence are :',
paste0( sample(names(isoformNtSeq), min(3, length(isoformNtSeq)) ), collapse = ', '),'\n',
'\nIf there is a large overlap but still far from complete there are 3 possibilites:\n',
'1) The files do not fit together (different databases versions)',
'(no fix except using propperly paired files).\n',
'2) The isoforms quantified have their nucleotide sequence stored in multiple fasta files (common for Ensembl).',
'Just supply a vector with the path to each of them to the \'isoformNtFasta\' argument.\n',
'3) One file could contain non-chanonical chromosomes while the other do not',
'(might be solved using the \'removeNonConvensionalChr\' argument.)\n',
'4) It is somthing to do with how a subset of the isoform ids are stored in the different files.',
'This problem might be solvable using some of the',
'\'ignoreAfterBar\', \'ignoreAfterSpace\' or \'ignoreAfterPeriod\' arguments.\n\n',
sep = ' '
)
)
} else {
addIsoformNt <- TRUE
}
### Subset to annotated isoforms
isoformNtSeq <- isoformNtSeq[which(
names(isoformNtSeq) %in% isoformData$isoform_id
)]
#if( !length(isoformNtSeq) ) {
# addIsoformNt <- FALSE
#}
}
}
### Check it is the same transcripts in transcript structure and expression info
if (TRUE) {
myUnion <-
unique(c(
isoformData$isoform_id,
exonFeatures$isoform_id,
isoRepExp2$isoform_id
))
myIntersect <- intersect(
intersect(isoformData$isoform_id, exonFeatures$isoform_id),
isoRepExp2$isoform_id
)
# If there are descripencies
if(length(myIntersect) == 0) {
stop(
paste(
'No overlap between isoform annotation',
'and isoform expression data was found',
sep=' '
)
)
}
if (length(myUnion) != length(myIntersect)) {
isoformData <- isoformData[which(
isoformData$isoform_id %in% myIntersect), ]
exonFeatures <- exonFeatures[which(
exonFeatures$isoform_id %in% myIntersect), ]
isoRepExp2 <- isoRepExp2[which(
isoRepExp2$isoform_id %in% myIntersect), ]
if (!quiet) {
message(
paste(
'There were discrepencies between the GTF and the',
'expression analysis files. To solve this',
abs(length(myUnion) - length(myIntersect)) ,
'transcripts were removed.',
sep = ' '
)
)
}
}
}
### Fix to correct for Cufflinks annotation problem where cufflinks assignes
# transcripts from several annotated genes to 1 cuffgene
if ( fixCufflinksAnnotationProblem ) {
if (!quiet) { message('Step 4 of 3: Fixing cufflinks annotation problem...')}
geneName <- unique(isoformData[, c('gene_id', 'gene_name')])
geneNameSplit <-
split(geneName$gene_name , f = geneName$gene_id)
# remove all unique
geneNameSplit <-
geneNameSplit[which(sapply(geneNameSplit, function(x)
length(unique(x))) > 1)]
if (length(geneNameSplit) > 0) {
# if there are any problems
#get indexes of those affected
geneNameIndexesData <-
which(isoformData$gene_id %in% names(geneNameSplit))
geneNameIndexesFeatures <-
which(exonFeatures$spliceR.gene_id %in% names(geneNameSplit))
# combine names of cuffgenes and gene short name
isoformData$gene_id[geneNameIndexesData] <-
paste(isoformData$gene_id[geneNameIndexesData] ,
isoformData$gene_name[geneNameIndexesData],
sep = ':')
exonFeatures$spliceR.gene_id[geneNameIndexesFeatures] <-
paste(exonFeatures$spliceR.gene_id[geneNameIndexesFeatures],
exonFeatures$spliceR.gene_name[geneNameIndexesFeatures],
sep = ':')
## Correct gene expression levels and differntial analysis
problematicGenes <-
isoformData[geneNameIndexesData, c(
'isoform_id',
'gene_id',
'sample_1',
'sample_2',
'gene_overall_mean',
'gene_value_1',
'gene_value_2',
'gene_stderr_1',
'gene_stderr_2',
'gene_log2_fold_change',
'gene_p_value',
'gene_q_value',
'gene_significant',
'iso_status',
'iso_overall_mean',
'iso_value_1',
'iso_value_2'
)]
problematicGenesSplit <-
split(problematicGenes, f = problematicGenes[
,c('gene_id', 'sample_1', 'sample_2')], drop =TRUE)
correctedGenes <-
plyr::ldply(
problematicGenesSplit,
.fun = function(df) {
# df <- problematicGenesSplit[[1]]
df$gene_overall_mean <- sum(df$iso_overall_mean)
df$gene_value_1 <- sum(df$iso_value_1)
df$gene_value_2 <- sum(df$iso_value_2)
df$gene_stderr_1 <- NA
df$gene_stderr_2 <- NA
df$gene_log2_fold_change <- log2(
(df$gene_value_2[2] + 0.0001) /
(df$gene_value_1[1] + 0.0001)
)
df$gene_p_value <- 1
df$gene_q_value <- 1
df$gene_significant <- 'no'
df$iso_status <- 'NOTEST'
return(df)
}
)
# sort so genes end up being in correct order for overwriting
correctedGenes <-
correctedGenes[order(
correctedGenes$isoform_id,
correctedGenes$gene_id,
correctedGenes$sample_1,
correctedGenes$sample_2
), -1] # -1 removes the index created by ldply
# overwrite problematic genes
isoformData[geneNameIndexesData, c(
'gene_id',
'sample_1',
'sample_2',
'gene_overall_mean',
'gene_value_1',
'gene_value_2',
'gene_stderr_1',
'gene_stderr_2',
'gene_log2_fold_change',
'gene_p_value',
'gene_q_value',
'gene_significant',
'iso_status',
'iso_overall_mean',
'iso_value_1',
'iso_value_2'
)] <- correctedGenes[, -1] # -1 removes the isoform id
### Add to exons
exonFeatures$gene_id <- isoformData$gene_id[match(
exonFeatures$isoform_id, isoformData$isoform_id
)]
if (!quiet) {
message(
paste(
" Cufflinks annotation problem was fixed for",
length(geneNameSplit),
"Cuff_genes",
sep = ' '
)
)
}
} else {
if (!quiet) {
message(paste(
'No instances of a Cufflinks annotation',
'problem found - no changes were made'
))
}
}
} # end of fix cufflinks annotatopn problem
if ( ! fixCufflinksAnnotationProblem ) {
if (!quiet) { message('Step 4 of 5: Skipped fixing of cufflinks annotation problem (due to fixCufflinksAnnotationProblem argument)...')}
}
if (!quiet) { message('Step 5 of 5: Creating switchAanalyzeRlist...')}
### Calculate IF values
localAnnot <- unique(isoformData[,c('gene_id','isoform_id')])
ifMatrix <- isoformToIsoformFraction(
isoformRepExpression = isoRepFpkm,
isoformGeneAnnotation = localAnnot,
quiet = TRUE
)
### Summarize IF
myMeanIF <- rowMeans(ifMatrix[,designMatrix$sampleID,drop=FALSE], na.rm = TRUE)
ifMeanDf <- plyr::ddply(
.data = designMatrix,
.variables = 'condition',
.fun = function(aDF) { # aDF <- switchAnalyzeRlist$designMatrix[1:2,]
tmp <- rowMeans(ifMatrix[,aDF$sampleID,drop=FALSE], na.rm = TRUE)
data.frame(
isoform_id=names(tmp),
mean=tmp,
stringsAsFactors = FALSE
)
}
)
isoformData$IF_overall <- myMeanIF[match(
isoformData$isoform_id, names(myMeanIF)
)]
isoformData$IF1 <- ifMeanDf$mean[match(
stringr::str_c(isoformData$isoform_id,isoformData$sample_1),
stringr::str_c(ifMeanDf$isoform_id, ifMeanDf$condition)
)]
isoformData$IF2 <- ifMeanDf$mean[match(
stringr::str_c(isoformData$isoform_id,isoformData$sample_2),
stringr::str_c(ifMeanDf$isoform_id, ifMeanDf$condition)
)]
isoformData$dIF <- isoformData$IF2 - isoformData$IF1
### Add q-values
if (!is.null(pathToSplicingAnalysis)) {
### Add cufflinks analysis isoform switch analysis results
isoformData$isoform_switch_q_value <- NA
isoformData$gene_switch_q_value <-
cuffSplicing$q_value[match(
paste(
isoformData$gene_id,
isoformData$sample_1,
isoformData$sample_2,
sep = '_'
),
paste(
cuffSplicing$gene_id,
cuffSplicing$sample_1,
cuffSplicing$sample_2,
sep = '_'
)
)]
} else {
### Add collumns for isoform switch analysis results
isoformData$isoform_switch_q_value <- NA
isoformData$gene_switch_q_value <- NA
}
### Reorder a bit
ofInterest <- c('isoform_id','gene_id','gene_name','sample_1','sample_2')
isoformData <- isoformData[, c(
match( ofInterest, colnames(isoformData)),
which( ! colnames(isoformData) %in% ofInterest)
)]
colnames(isoformData)[4:5] <- c('condition_1', 'condition_2')
isoformData <- as.data.frame(isoformData)
### Extract run info
# cufflinks version
cuffVersion <- runInfo$value[2]
# replicate numbers
nrRep <- table(readGroup$condition)
nrRep <-
data.frame(
condition = names(nrRep),
nrReplicates = as.vector(nrRep),
row.names = NULL,
stringsAsFactors = FALSE
)
isoRepFpkm$isoform_id <- rownames(isoRepFpkm)
rownames(isoRepFpkm) <- NULL
isoRepFpkm <- isoRepFpkm[,c(
which(colnames(isoRepFpkm) == 'isoform_id'),
which(colnames(isoRepFpkm) != 'isoform_id')
)]
# Return SpliceRList
switchAnalyzeRlist <- createSwitchAnalyzeRlist(
isoformFeatures = isoformData,
exons = exonFeatures,
designMatrix = designMatrix,
isoformCountMatrix = isoRepExp2,
isoformRepExpression = isoRepFpkm,
sourceId = paste("cufflinks", cuffVersion , sep = '_')
)
if (!is.null(pathToSplicingAnalysis)) {
if( nrow(cuffSplicing) ) {
switchAnalyzeRlist$isoformSwitchAnalysis <- as.data.frame(cuffSplicing)
}
}
if( addIFmatrix ) {
ifMatrix$isoform_id <- rownames(ifMatrix)
rownames(ifMatrix) <- NULL
ifMatrix <- ifMatrix[,c(
which(colnames(ifMatrix) == 'isoform_id'),
which(colnames(ifMatrix) != 'isoform_id')
)]
switchAnalyzeRlist$isoformRepIF <- ifMatrix
}
if(addIsoformNt) {
switchAnalyzeRlist$ntSequence <- isoformNtSeq[which(
names(isoformNtSeq) %in% switchAnalyzeRlist$isoformFeatures$isoform_id
)]
}
### Estimate DTU
if(estimateDifferentialGeneRange & !quiet) {
localEstimate <- estimateDifferentialRange(switchAnalyzeRlist)
message('The GUESSTIMATED number of genes with differential isoform usage are:')
print(localEstimate)
}
if (!quiet) {
message("Done")
}
return(switchAnalyzeRlist)
}
importGTF <- function(
### Core arguments
pathToGTF,
isoformNtFasta = NULL,
### Advanced arguments
extractAaSeq = FALSE,
addAnnotatedORFs = TRUE,
onlyConsiderFullORF = FALSE,
removeNonConvensionalChr = FALSE,
ignoreAfterBar = TRUE,
ignoreAfterSpace = TRUE,
ignoreAfterPeriod = FALSE,
removeTECgenes = TRUE,
PTCDistance = 50,
removeFusionTranscripts = TRUE,
removeUnstrandedTranscripts = TRUE,
quiet = FALSE
) {
### Test input
if(TRUE) {
### Test existance of files
if(TRUE) {
if( pathToGTF == '' ) {
stop(
paste(
'The \'pathToGTF\' argument does not lead anywhere (acutally you just suppled "" to the argument).',
'\nDid you try to use the system.file("your/quant/dir/", package="IsoformSwitchAnalyzeR")',
'to import your own data? The system.file() should only be used',
'to access the example data stored in the IsoformSwitchAnalyzeR package.',
'To access your own data simply provide the string to the directory with the data as:',
'"path/to/quantification/".',
sep=' '
)
)
}
if( ! (file.exists(pathToGTF) | RCurl::url.exists(pathToGTF)) ) {
stop(
paste(
'The file pointed to with the \'pathToGTF\' argument does not exists.',
'\nDid you accidentially make a spelling mistake or added a unwanted "/" infront of the text string?',
sep=' '
)
)
}
if( !is.null(isoformNtFasta)) {
if( !is.character( isoformNtFasta)) {
stop('The \'isoformNtFasta\' argument must be a charachter string.')
}
if( any(isoformNtFasta == '') ) {
stop(
paste(
'The \'isoformNtFasta\' argument does not lead anywhere (acutally you just suppled "" to the argument).',
'\nDid you try to use the system.file("your/quant/dir/", package="IsoformSwitchAnalyzeR")',
'to import your own data? The system.file() should only be used',
'to access the example data stored in the IsoformSwitchAnalyzeR package.',
'To access your own data simply provide the string to the directory with the data as:',
'"path/to/quantification/".',
sep=' '
)
)
}
if( any( ! sapply(isoformNtFasta, file.exists) ) ) {
stop('At least one of the file(s) pointed to with \'isoformNtFasta\' seems not to exist.')
}
if( any(! grepl('\\.fa|\\.fasta|\\.fa.gz|\\.fasta.gz', isoformNtFasta)) ) {
stop('The file pointed to via the \'isoformNtFasta\' argument does not seem to be a fasta file...')
}
}
}
if( ! grepl('\\.gtf$|\\.gtf\\.gz$|\\.gff$|\\.gff\\.gz$|\\.gff3$|\\.gff3\\.gz$', pathToGTF, ignore.case = TRUE) ) {
warning('The file pointed to by the "pathToGTF" argument appearts not to be a GTF/GFF file as it does have the right suffix - are you sure it is the rigth file?')
}
isGTF <- grepl('\\.gtf$|\\.gtf\\.gz$', pathToGTF, ignore.case = TRUE)
}
# Read in from GTF/GFF file
if(TRUE) {
if( isGTF ) {
if (!quiet) {
message('Importing GTF (this may take a while)...')
}
tmp <- capture.output(
suppressWarnings(
suppressMessages(
mfGTF <- rtracklayer::import(pathToGTF, format='gtf', feature.type = c('CDS','exon'))
)
)
)
### Check GTF
if (!all(c('transcript_id', 'gene_id') %in% colnames(mfGTF@elementMetadata))) {
collumnsMissing <- paste(
c('transcript_id', 'gene_id')[which(
!c('transcript_id', 'gene_id') %in%
colnames(mfGTF@elementMetadata)
)], collapse = ', ')
stop(
paste(
'The GTF file must contain the folliwing collumns',
'\'transcript_id\' and \'gene_id\'.',
collumnsMissing,
'is missing.',
sep = ' '
)
)
}
}
if( ! isGTF ){
if (!quiet) {
message('importing GFF (this may take a while)...')
}
tmp <- capture.output(
suppressWarnings(
suppressMessages(
mfGTF <- rtracklayer::import(pathToGTF, format='gff')
)
)
)
### Check GTF
geneIdPressent <- 'gene_id' %in% colnames(mfGTF@elementMetadata)
if( ! geneIdPressent ) {
### Check for RefSeq "gene" (aka gene_id)
if (!all(c('transcript_id', 'gene') %in% colnames(mfGTF@elementMetadata) )) {
collumnsMissing <- paste(
c('transcript_id', 'gene')[which(
!c('transcript_id', 'gene') %in%
colnames(mfGTF@elementMetadata)
)], collapse = ', ')
stop(
paste(
'The GFF file must contain the folliwing collumns',
'\'transcript_id\' and \'gene\'.',
collumnsMissing,
'is missing.',
sep = ' '
)
)
}
### Rename RefSeq gene to gene_id
if( ! 'gene_id' %in% colnames(mcols(mfGTF)) ) {
if( 'gene' %in% colnames(mcols(mfGTF)) ) {
colnames(mcols(mfGTF))[which(
colnames(mcols(mfGTF)) == 'gene'
)] <- 'gene_id'
} else {
stop('Could not locate the gene id in the gff file.')
}
}
}
if( geneIdPressent ) {
stop(
paste0(
'This is not a RefSeq GFF file (from ftp://ftp.ncbi.nlm.nih.gov/genomes/).',
'\nIsoformSwitchAnalyzeR only handles RefSeq GFF files so please supply GTF file instead.',
'\n(for more info see FAQ about annotate databases in vignette).'
)
)
}
}
}
### Reduce if nessesary
if (removeNonConvensionalChr) {
mfGTF <- mfGTF[which( ! grepl('_' , as.character(mfGTF@seqnames))), ]
mfGTF <- mfGTF[which( ! grepl('\\.', as.character(mfGTF@seqnames))), ]
if (length(mfGTF) == 0) {
stop('No exons were left after filtering',
'with \'removeNonConvensionalChr\'.')
}
seqlevels(mfGTF) <- as.character(mfGTF@seqnames@values)
}
if (removeUnstrandedTranscripts) {
mfGTF <- mfGTF[which( ! grepl('\\*' , as.character(mfGTF@strand))), ]
if (length(mfGTF) == 0) {
stop('No exons were left after filtering',
'with \'removeUnstrandedTranscripts\'.')
}
}
if( removeTECgenes ) {
if(isGTF) {
### Ensembl
if( 'gene_biotype' %in% colnames(mcols(mfGTF)) ) {
toExclude <- mfGTF$gene_biotype == 'TEC'
if( any( toExclude ) ) {
mfGTF <- mfGTF[-which( toExclude ),]
}
}
### Gencode
if( 'gene_type' %in% colnames(mcols(mfGTF)) ) {
toExclude <- mfGTF$gene_type == 'TEC'
if( any( toExclude ) ) {
mfGTF <- mfGTF[-which( toExclude ),]
}
}
}
}
### Ensure seqlevels are ok are removal
seqlevels(mfGTF) <- unique(as.character(mfGTF@seqnames@values))
### Potentially add version numbering
if( TRUE ) {
if(any( colnames(mcols(mfGTF)) == 'gene_version' )) {
mfGTF$gene_id <- stringr::str_c(
mfGTF$gene_id,
'.',
mfGTF$gene_version
)
}
if(any( colnames(mcols(mfGTF)) == 'transcript_version' )) {
mfGTF$transcript_id <- stringr::str_c(
mfGTF$transcript_id,
'.',
mfGTF$transcript_version
)
}
}
### Fix names
if( ignoreAfterBar | ignoreAfterSpace | ignoreAfterPeriod) {
mfGTF$transcript_id <- fixNames(
nameVec = mfGTF$transcript_id,
ignoreAfterBar = ignoreAfterBar,
ignoreAfterSpace = ignoreAfterSpace,
ignoreAfterPeriod = ignoreAfterPeriod
)
}
### Make annoation
if(TRUE) {
if( isGTF ) {
if (!quiet) {
message('Massaging annoation...')
}
exonAnoationIndex <- which(mfGTF$type == 'exon')
colsToExtract <- c(
'transcript_id', 'gene_id', 'gene_name',
'gene_type','gene_biotype', # respectively gencode and ensembl gene type col
'transcript_biotype','transcript_type',
'ref_gene_id' # for StringTie data
)
myIso <-
as.data.frame(unique(mfGTF@elementMetadata[
exonAnoationIndex,
na.omit(match(colsToExtract, colnames(mfGTF@elementMetadata)))]
))
### Handle columns not extracted
if (is.null(myIso$gene_name)) {
myIso$gene_name <- NA
}
if (is.null(myIso$ref_gene_id)) {
myIso$ref_gene_id <- myIso$gene_name
}
### Handle columns with multiple options
geneTypeCol <- which(colnames(myIso) %in% c('gene_type','gene_biotype'))
if( length(geneTypeCol) == 0 ) {
myIso$geneType <- NA
} else {
myIso$geneType <- myIso[,geneTypeCol[1]]
}
isoTypeCol <- which(colnames(myIso) %in% c('transcript_biotype','transcript_type'))
if( length(isoTypeCol) == 0 ) {
myIso$isoType <- NA
} else {
myIso$isoType <- myIso[,isoTypeCol]
}
}
if( ! isGTF ) {
if (!quiet) {
message('converting GFF to switchAnalyzeRlist')
}
exonAnoationIndex <- which(mfGTF$type == 'exon')
colsToExtract <- c(
'Parent',
'gene_id',
'transcript_id'
)
# extract exon annot
myIso <-
as.data.frame(unique(mfGTF@elementMetadata[
exonAnoationIndex,
na.omit(match(colsToExtract, colnames(mfGTF@elementMetadata)))]
))
if(any(is.na(myIso$transcript_id))) {
nNa <- sum(is.na(myIso$transcript_id))
warning(
paste(
'There were', nNa, 'annotated features without isoform_ids.',
'These were removed.'
)
)
myIso <- myIso[which(
! is.na(myIso$transcript_id)
),]
}
# extract gene biotype
if( 'gene_biotype' %in% colnames(mcols(mfGTF)) ) {
gffGeneAnnot <- mfGTF[which( mfGTF$type == 'gene' ),c(
'ID',
'gene_id',
'gene_biotype'
)]
myIso$gene_biotype <- gffGeneAnnot$gene_biotype[match(myIso$gene_id, gffGeneAnnot$gene_id)]
}
### Handle columns not extracted
if (is.null(myIso$gene_name)) {
myIso$gene_name <- NA
}
### Handle columns with multiple options
geneTypeCol <- which(colnames(myIso) %in% c('gene_type','gene_biotype'))
if( length(geneTypeCol) == 0 ) {
myIso$geneType <- NA
} else {
myIso$geneType <- myIso[,geneTypeCol[1]]
}
isoTypeCol <- which(colnames(myIso) %in% c('transcript_biotype','transcript_type'))
if( length(isoTypeCol) == 0 ) {
myIso$isoType <- NA
} else {
myIso$isoType <- myIso[,isoTypeCol]
}
}
### Make annotation
myIsoAnot <- data.frame(
isoform_id = myIso$transcript_id,
gene_id = myIso$gene_id,
condition_1 = "plaseholder1",
condition_2 = "plaseholder2",
gene_name = myIso$gene_name,
ref_gene_id = myIso$ref_gene_id,
gene_biotype = myIso$geneType,
iso_biotype = myIso$isoType,
class_code = '=',
gene_overall_mean = 0,
gene_value_1 = 0,
gene_value_2 = 0,
gene_stderr_1 = NA,
gene_stderr_2 = NA,
gene_log2_fold_change = 0,
gene_p_value = 1,
gene_q_value = 1,
iso_overall_mean = 0,
iso_value_1 = 0,
iso_value_2 = 0,
iso_stderr_1 = NA,
iso_stderr_2 = NA,
iso_log2_fold_change = 0,
iso_p_value = 1,
iso_q_value = 1,
IF_overall = NA,
IF1 = NA,
IF2 = NA,
dIF = NA,
isoform_switch_q_value = NA,
gene_switch_q_value = NA,
stringsAsFactors = FALSE
)
}
### Test for annoation problems
if(TRUE) {
geneSummary <-
myIsoAnot %>%
dplyr::select(gene_id, gene_name) %>%
dplyr::distinct() %>%
group_by(gene_id) %>%
dplyr::summarise(
n_gene_names = length(na.omit(gene_name)),
have_missing_gene_name = any(is.na(gene_name))
)
missingGeneProblem <- any(
geneSummary$n_gene_names > 0 & geneSummary$have_missing_gene_name
)
mergedGeneProblem <- any(
geneSummary$n_gene_names > 1
)
if( missingGeneProblem | mergedGeneProblem ) {
warning(
paste0(
'\nThe annotaion seems to have probelems that commonly occure',
'\n when transcript assembly is done (gene merging and unassigned novel isoforms).',
'\n These can be fixed and/or rescued by using the importRdata() function instead.',
'\n'
)
)
}
}
### Add CDS annoation from GTF file inc convertion to transcript coordinats
if (addAnnotatedORFs) {
if( isGTF ) {
# test whether any CDS are found
if (any(mfGTF$type == 'CDS')) {
if (!quiet) {
message('Massaging annotated CDSs...')
}
### Prepare CDS data
myCDS <-
mfGTF[which(mfGTF$type == 'CDS'), 'transcript_id']
colnames(mcols(myCDS)) <- 'isoform_id'
### Prepare exon data
localExons <- mfGTF[which(mfGTF$type == 'exon'), 'transcript_id']
colnames(mcols(localExons)) <- 'isoform_id'
localExons <-
localExons[which(
as.character(localExons@strand) %in% c('+', '-')), ]
localExons <-
localExons[which(
localExons$isoform_id %in% myCDS$isoform_id
), ]
### Analyze CDS
orfInfo <- analyseCds(
myCDS = myCDS,
localExons = localExons,
onlyConsiderFullORF = onlyConsiderFullORF,
mfGTF = mfGTF,
PTCDistance = PTCDistance
)
# make sure all ORFs are annotated (with NAs)
orfInfo <-
dplyr::full_join(
orfInfo,
unique(myIsoAnot[, 'isoform_id', drop = FALSE]),
by = 'isoform_id'
)
} else {
# if no CDS were found
warning(paste(
'No CDS was found in the GTF file. Please make sure the GTF',
'file have the CDS "feature" annotation. Adding NAs instead'
))
orfInfo <- data.frame(
isoform_id = unique(myIsoAnot$isoform_id),
orfTransciptStart = NA,
orfTransciptEnd = NA,
orfTransciptLength = NA,
orfStarExon = NA,
orfEndExon = NA,
orfStartGenomic = NA,
orfEndGenomic = NA,
stopDistanceToLastJunction = NA,
stopIndex = NA,
PTC = NA,
stringsAsFactors = FALSE
)
}
### add to iso annotation
myIsoAnot$PTC <-
orfInfo$PTC[match(myIsoAnot$isoform_id, orfInfo$isoform_id)]
}
if( ! isGTF ) {
# test whether any CDS are found
if (any(mfGTF$type == 'CDS')) {
if (!quiet) {
message('converting annotated CDSs')
}
### Extract CDS Ids
colsToExtract <- c(
'ID',
'gene_id',
'transcript_id'
)
myIso2 <-
as.data.frame(unique(mfGTF@elementMetadata[
which(mfGTF$type == 'mRNA'),
na.omit(match(colsToExtract, colnames(mfGTF@elementMetadata)))]
))
### Extract CDS
myCDS <- mfGTF[which(mfGTF$type == 'CDS'),c('ID','transcript_id','Parent')]
myCDS$Parent <- sapply(myCDS$Parent, function(x) x[1])
### Transfer ids
myCDS <- myCDS[which(
myCDS$Parent %in% myIso2$ID
),]
myCDS$transcript_id <- myIso2$transcript_id[match(
myCDS$Parent, myIso2$ID
)]
myCDS <- myCDS[which(
myCDS$transcript_id %in% myIsoAnot$isoform_id
),]
### Get it
myCDS <-
sort(myCDS[,'transcript_id'])
myCDSedges <-
suppressMessages(unlist(range(
split(myCDS[, 0], f = myCDS$transcript_id)
))) # Extract EDGEs
myCDSedges$id <- names(myCDSedges)
names(myCDSedges) <- NULL
### Extract Exons
localExons <- mfGTF[exonAnoationIndex, 'transcript_id']
localExons <-
localExons[which(
as.character(localExons@strand) %in% c('+', '-')), ]
localExons <-
localExons[which(localExons$transcript_id %in% myCDSedges$id), ]
localExons <-
localExons[order(localExons$transcript_id,
start(localExons),
end(localExons)), ]
localExons$exon_id <-
paste('exon_', 1:length(localExons), sep = '')
### This is where I can remove the stop codon
### Extract strand specific ORF info
cds <- as.data.frame(myCDSedges)
# start
plusIndex <- which(cds$strand == '+')
annoatedStartGRangesPlus <-
GRanges(
cds$seqnames[plusIndex],
IRanges(
start = cds$start[plusIndex],
end = cds$start[plusIndex] #-3 # -3 since stop codon is included in GFF according to https://github.com/The-Sequence-Ontology/Specifications/blob/master/gff3.md
),
strand = cds$strand[plusIndex],
id = cds$id[plusIndex]
)
minusIndex <- which(cds$strand == '-')
annoatedStartGRangesMinus <-
GRanges(
cds$seqnames[minusIndex],
IRanges(
start = cds$end[minusIndex],
end = cds$end[minusIndex] #+3 # +3 since stop codon is included in GFF according to https://github.com/The-Sequence-Ontology/Specifications/blob/master/gff3.md
),
strand = cds$strand[minusIndex],
id = cds$id[minusIndex]
)
annoatedStartGRanges <-
c(annoatedStartGRangesPlus,
annoatedStartGRangesMinus)
annoatedStartGRanges$orf_id <-
paste('cds_', 1:length(annoatedStartGRanges), sep = '')
# end
annoatedEndGRangesPlus <-
GRanges(
cds$seqnames[plusIndex],
IRanges(
start = cds$end[plusIndex],
end = cds$end[plusIndex]),
strand = cds$strand[plusIndex],
id = cds$id[plusIndex]
)
annoatedEndGRangesMinus <-
GRanges(
cds$seqnames[minusIndex],
IRanges(
start = cds$start[minusIndex],
end = cds$start[minusIndex]),
strand = cds$strand[minusIndex],
id = cds$id[minusIndex]
)
annoatedEndGRanges <-
c(annoatedEndGRangesPlus, annoatedEndGRangesMinus)
annoatedEndGRanges$orf_id <-
paste('stop_', 1:length(annoatedEndGRanges), sep = '')
# combine
annotatedORFGR <-
c(annoatedStartGRanges, annoatedEndGRanges)
### Idenetify overlapping CDS and exons as well as the annoate transcript id
suppressWarnings(overlappingAnnotStart <-
as.data.frame(
findOverlaps(
query = localExons,
subject = annotatedORFGR,
ignore.strand = FALSE
)
))
if (!nrow(overlappingAnnotStart)) {
stop(
'No overlap between CDS and transcripts were found. This is most likely due to a annoation problem around chromosome name.'
)
}
# Annoate overlap ids
overlappingAnnotStart$transcript_id <-
localExons$transcript_id[overlappingAnnotStart$queryHits]
overlappingAnnotStart$exon_id <- localExons$exon_id[
overlappingAnnotStart$queryHits
]
overlappingAnnotStart$cdsTranscriptID <- annotatedORFGR$id[
overlappingAnnotStart$subjectHits
]
overlappingAnnotStart$orf_id <- annotatedORFGR$orf_id[
overlappingAnnotStart$subjectHits
]
# subset to annoateted overlap
overlappingAnnotStart <-
overlappingAnnotStart[which(
overlappingAnnotStart$transcript_id ==
overlappingAnnotStart$cdsTranscriptID
), c('transcript_id',
'exon_id',
'cdsTranscriptID',
'orf_id')]
# annoate with genomic site
overlappingAnnotStart$orfGenomic <-
start(annotatedORFGR)[match(
overlappingAnnotStart$orf_id, annotatedORFGR$orf_id
)]
### Enrich exon information
myExons <-
as.data.frame(localExons[which(
localExons$transcript_id %in%
overlappingAnnotStart$transcript_id),])
# Strand
myExonPlus <- myExons[which(myExons$strand == '+'), ]
myExonMinus <- myExons[which(myExons$strand == '-'), ]
plusSplit <-
split(myExonPlus$width, myExonPlus$transcript_id)
minusSplit <-
split(myExonMinus$width, myExonMinus$transcript_id)
# cumsum
myExonPlus$cumSum <-
unlist(sapply(plusSplit , function(aVec) {
cumsum(c(0, aVec))[1:(length(aVec))]
}))
myExonMinus$cumSum <-
unlist(sapply(minusSplit, function(aVec) {
cumsum(c(0, rev(aVec)))[(length(aVec)):1] # reverse
}))
# exon number
myExonPlus$nrExon <-
unlist(sapply(plusSplit, function(aVec) {
1:length(aVec)
}))
myExonMinus$nrExon <-
unlist(sapply(minusSplit, function(aVec) {
1:length(aVec)
}))
# total nr exons
myExonPlus$lastExonIndex <-
unlist(sapply(plusSplit, function(aVec) {
rep(length(aVec), length(aVec))
}))
myExonMinus$lastExonIndex <-
unlist(sapply(minusSplit, function(aVec) {
rep(1, length(aVec))
}))
# final exon exon junction trancipt position
myExonPlus$finalJunctionPos <-
unlist(sapply(plusSplit , function(aVec) {
rep(cumsum(c(0, aVec))[length(aVec)], times = length(aVec))
}))
myExonMinus$finalJunctionPos <-
unlist(sapply(minusSplit, function(aVec) {
rep(cumsum(c(0, rev(
aVec
)))[length(aVec)], times = length(aVec))
}))
myExons2 <- rbind(myExonPlus, myExonMinus)
### Annoate with exon information
matchIndex <-
match(overlappingAnnotStart$exon_id, myExons2$exon_id)
overlappingAnnotStart$strand <- myExons2$strand[matchIndex]
overlappingAnnotStart$exon_start <- myExons2$start[matchIndex]
overlappingAnnotStart$exon_end <- myExons2$end[matchIndex]
overlappingAnnotStart$exon_cumsum <- myExons2$cumSum[matchIndex]
overlappingAnnotStart$exon_nr <- myExons2$nrExon[matchIndex]
overlappingAnnotStart$lastExonIndex <-
myExons2$lastExonIndex[matchIndex]
overlappingAnnotStart$finalJunctionPos <-
myExons2$finalJunctionPos[matchIndex]
### Annoate with transcript coordinats
overlappingAnnotStartPlus <-
overlappingAnnotStart[which(
overlappingAnnotStart$strand == '+'), ]
overlappingAnnotStartPlus$orfTranscript <-
overlappingAnnotStartPlus$exon_cumsum + (
overlappingAnnotStartPlus$orfGenomic -
overlappingAnnotStartPlus$exon_start
) + 1
overlappingAnnotStartPlus$junctionDistance <-
overlappingAnnotStartPlus$finalJunctionPos -
overlappingAnnotStartPlus$orfTranscript + 3 # +3 because the ORF does not include the stop codon - but it should in this calculation
overlappingAnnotStartMinus <-
overlappingAnnotStart[which(
overlappingAnnotStart$strand == '-'), ]
overlappingAnnotStartMinus$orfTranscript <-
overlappingAnnotStartMinus$exon_cumsum + (
overlappingAnnotStartMinus$exon_end -
overlappingAnnotStartMinus$orfGenomic
) + 1
overlappingAnnotStartMinus$junctionDistance <-
overlappingAnnotStartMinus$finalJunctionPos -
overlappingAnnotStartMinus$orfTranscript + 3 # +3 because the ORF does not include the stop codon - but it should in this calculation
overlappingAnnotStart2 <-
rbind(overlappingAnnotStartPlus,
overlappingAnnotStartMinus)
overlappingAnnotStart2 <-
overlappingAnnotStart2[order(
overlappingAnnotStart2$transcript_id,
overlappingAnnotStart2$exon_start,
overlappingAnnotStart2$exon_end
), ]
### devide into start and stop
starInfo <-
overlappingAnnotStart2[which(
grepl('^cds', overlappingAnnotStart2$orf_id)), ]
stopInfo <-
overlappingAnnotStart2[which(
grepl('^stop', overlappingAnnotStart2$orf_id)), ]
### predict PTC
stopInfo$PTC <-
stopInfo$exon_nr != stopInfo$lastExonIndex &
stopInfo$junctionDistance > PTCDistance
### Merge the data
starInfo2 <-
unique(starInfo[, c('transcript_id',
'orfGenomic',
'exon_nr',
'orfTranscript')])
colnames(starInfo2) <-
c('isoform_id',
'orfStartGenomic',
'orfStarExon',
'orfTransciptStart')
stopInfo2 <-
unique(stopInfo[, c(
'transcript_id',
'orfGenomic',
'exon_nr',
'orfTranscript',
'junctionDistance',
'lastExonIndex',
'PTC'
)])
colnames(stopInfo2) <-
c(
'isoform_id',
'orfEndGenomic',
'orfEndExon',
'orfTransciptEnd',
'stopDistanceToLastJunction',
'stopIndex',
'PTC'
)
orfInfo <- dplyr::inner_join(starInfo2, stopInfo2, by = 'isoform_id')
orfInfo$orfTransciptLength <-
orfInfo$orfTransciptEnd - orfInfo$orfTransciptStart + 1
# reorder
orfInfo <-
orfInfo[, c(
'isoform_id',
'orfTransciptStart',
'orfTransciptEnd',
'orfTransciptLength',
'orfStarExon',
'orfEndExon',
'orfStartGenomic',
'orfEndGenomic',
'stopDistanceToLastJunction',
'stopIndex',
'PTC'
)]
# make sure all ORFs are annotated (with NAs)
orfInfo <-
dplyr::full_join(orfInfo,
unique(myIsoAnot[, 'isoform_id', drop = FALSE]),
by = 'isoform_id')
} else {
# if no CDS were found
warning(paste(
'No CDS was found in the GTF file. Please make sure the GTF',
'file have the CDS "feature" annotation. Adding NAs instead'
))
orfInfo <- data.frame(
isoform_id = unique(myIsoAnot$isoform_id),
orfTransciptStart = NA,
orfTransciptEnd = NA,
orfTransciptLength = NA,
orfStarExon = NA,
orfEndExon = NA,
orfStartGenomic = NA,
orfEndGenomic = NA,
stopDistanceToLastJunction = NA,
stopIndex = NA,
PTC = NA,
stringsAsFactors = FALSE
)
}
### add to iso annotation
myIsoAnot$PTC <-
orfInfo$PTC[match(myIsoAnot$isoform_id, orfInfo$isoform_id)]
}
}
### Handle sequence input
if(TRUE) {
addIsoformNt <- FALSE
if( !is.null(isoformNtFasta) ) {
isoformNtSeq <- do.call(
c,
lapply(isoformNtFasta, function(aFile) {
Biostrings::readDNAStringSet(
filepath = isoformNtFasta, format = 'fasta'
)
})
)
if(!is(isoformNtSeq, "DNAStringSet")) {
stop('The fasta file supplied to \'isoformNtFasta\' does not contain the nucleotide (DNA) sequence...')
}
### Remove preceeding ref|
if(
sum(grepl('^ref\\|', names(isoformNtSeq))) == length( isoformNtSeq )
) {
names(isoformNtSeq) <- gsub('^ref\\|', '', names(isoformNtSeq))
}
### Remove potential name duplication
isoformNtSeq <- isoformNtSeq[which(
! duplicated(names(isoformNtSeq))
)]
### Fix names
if( ignoreAfterBar | ignoreAfterSpace | ignoreAfterPeriod) {
names(isoformNtSeq) <- fixNames(
nameVec = names(isoformNtSeq),
ignoreAfterBar = ignoreAfterBar,
ignoreAfterSpace = ignoreAfterSpace,
ignoreAfterPeriod = ignoreAfterPeriod
)
}
### Subset to those in GTF file
isoformNtSeq <- isoformNtSeq[which(
names(isoformNtSeq) %in% myIsoAnot$isoform_id
)]
if( ! all(myIsoAnot$isoform_id %in% names(isoformNtSeq)) ) {
warning(
paste(
'The fasta file supplied to \'isoformNtFasta\' does not contain the',
'nucleotide (DNA) sequence for all isoforms annotated and will not be added!',
'\nSpecifically:\n',
length(unique(myIsoAnot$isoform_id)), 'isoforms were annotated in the GTF\n',
length(unique(names(isoformNtSeq))), 'isoforms have a sequence.\n',
'Only', length(intersect(names(isoformNtSeq), myIsoAnot$isoform_id)), 'overlap.\n',
length(setdiff(unique(myIsoAnot$isoform_id), names(isoformNtSeq))), 'annoated isoforms isoforms had no corresponding nucleotide sequence\n',
'\nIf there is no overlap (as in zero or close) there are two options:\n',
'1) The files do not fit together (different databases, versions etc)',
'(no fix except using propperly paired files).\n',
'2) It is somthing to do with how the isoform ids are stored in the different files.',
'This problem might be solvable using some of the',
'\'ignoreAfterBar\', \'ignoreAfterSpace\' or \'ignoreAfterPeriod\' arguments.\n',
' 3 Examples from GTF are :',
paste0( sample(unique(myIsoAnot$isoform_id), min(3, nrow(myIsoAnot)) ), collapse = ', '),'\n',
' 3 Examples of isoform sequence are :',
paste0( sample(names(isoformNtSeq), min(3, length(isoformNtSeq)) ), collapse = ', '),'\n',
'\nIf there is a large overlap but still far from complete there are 3 possibilites:\n',
'1) The files do not fit together (different databases versions)',
'(no fix except using propperly paired files).\n',
'2) The isoforms quantified have their nucleotide sequence stored in multiple fasta files (common for Ensembl).',
'Just supply a vector with the path to each of them to the \'isoformNtFasta\' argument.\n',
'3) One file could contain non-chanonical chromosomes while the other do not',
'(might be solved using the \'removeNonConvensionalChr\' argument.)\n',
'4) It is somthing to do with how a subset of the isoform ids are stored in the different files.',
'This problem might be solvable using some of the',
'\'ignoreAfterBar\', \'ignoreAfterSpace\' or \'ignoreAfterPeriod\' arguments.\n\n',
sep = ' '
)
)
} else {
addIsoformNt <- TRUE
}
### Subset to annotated isoforms
isoformNtSeq <- isoformNtSeq[which(
names(isoformNtSeq) %in% myIsoAnot$isoform_id
)]
#if( length(isoformNtSeq) ) {
# addIsoformNt <- FALSE
#}
}
}
### Create exon_features grange
myExons <-
sort(mfGTF[exonAnoationIndex , c('transcript_id', 'gene_id')])
colnames(myExons@elementMetadata) <- c('isoform_id', 'gene_id')
myExons <- myExons[which(
myExons$isoform_id %in% myIsoAnot$isoform_id
),]
# Collaps ajecent exons (without any intron between)
if(TRUE) {
### Reduce ajecent exons
tmp <- unlist(
GenomicRanges::reduce(
split(
myExons,
myExons$isoform_id
)
)
)
### Add isoform id
tmp$isoform_id <- tmp@ranges@NAMES
tmp@ranges@NAMES <- NULL
### add gene id
tmp$gene_id <-myExons$gene_id[match(
tmp$isoform_id, myExons$isoform_id
)]
### sort
tmp <- tmp[sort.list(tmp$isoform_id),]
### Overwrite
myExons <- tmp
}
# create replicates
nrRep <-
data.frame(
condition = c('plaseholder1', 'plaseholder2'),
nrReplicates = c(NA, NA),
row.names = NULL,
stringsAsFactors = FALSE
)
# create dummy feature
repExp <- data.frame(
isoform_id = myIsoAnot$isoform_id,
plaseholder1 = NA,
plaseholder2 = NA,
stringsAsFactors = FALSE
)
designMatrix <-
data.frame(
sampleID = c('plaseholder1', 'plaseholder2'),
condition = c('plaseholder1', 'plaseholder2'),
stringsAsFactors = FALSE
)
### Create switchList
if (!quiet) {
message('Creating switchAnalyzeRlist...')
}
localSwichList <- createSwitchAnalyzeRlist(
isoformFeatures = myIsoAnot,
exons = myExons,
designMatrix = designMatrix,
isoformCountMatrix = repExp,
removeFusionTranscripts = removeFusionTranscripts,
sourceId = 'gtf'
)
if(addAnnotatedORFs) {
# subset to those in list
orfInfo <-
orfInfo[which(orfInfo$isoform_id %in%
localSwichList$isoformFeatures$isoform_id), ]
# Annotate ORF origin
orfInfo$orf_origin <- 'Annotation'
# check for negative ORF lengths
isoformsToRemove <-
orfInfo$isoform_id[which(orfInfo$orfTransciptLength < 0)]
if (length(isoformsToRemove)) {
genesToRemove <-
localSwichList$isoformFeatures$gene_id[which(
localSwichList$isoformFeatures$isoform_id %in%
isoformsToRemove)]
localSwichList <-
subsetSwitchAnalyzeRlist(
localSwichList,
!localSwichList$isoformFeatures$gene_id %in% genesToRemove
)
warning(
paste(
length(genesToRemove),
'genes where removed due to negative ORF lengths. This',
'typically occures because gene_id are not unique',
'(meaning are found multiple places accorss the genome).',
'Please note there might still be duplicated gene_id',
'located on the same chromosome.',
sep = ' '
)
)
}
localSwichList$orfAnalysis <- orfInfo[which(
orfInfo$isoform_id %in% localSwichList$isoformFeatures$isoform_id)
,]
}
### Add nucleotide sequence
if(addIsoformNt) {
localSwichList$ntSequence <- isoformNtSeq[which(
names(isoformNtSeq) %in% localSwichList$isoformFeatures$isoform_id
)]
if(addAnnotatedORFs & extractAaSeq) {
localSwichList <- extractSequence(
switchAnalyzeRlist = localSwichList,
onlySwitchingGenes = FALSE,
writeToFile = FALSE,
extractNTseq = TRUE,
extractAAseq = TRUE,
addToSwitchAnalyzeRlist = TRUE
)
}
}
# Return switchAnalyzeRlist
if (!quiet) {
message('Done.')
}
return(localSwichList)
}
importIsoformExpression <- function(
### Core arguments
parentDir = NULL,
sampleVector = NULL,
### Advanced arguments
calculateCountsFromAbundance=TRUE,
addIsofomIdAsColumn=TRUE,
interLibNormTxPM=TRUE,
normalizationMethod='TMM',
pattern='',
invertPattern=FALSE,
ignore.case=FALSE,
ignoreAfterBar = TRUE,
ignoreAfterSpace = TRUE,
ignoreAfterPeriod = FALSE,
readLength = NULL,
showProgress = TRUE,
quiet = FALSE
) {
### To do
# Could get a "summarize to gene level" option
### Test
if(TRUE) {
if( all(c( is.null(parentDir), is.null(sampleVector) )) ) {
stop('Either the \'parentDir\' or the \'sampleVector\' argument must be used.')
}
if( !is.null(parentDir) & !is.null(sampleVector) ) {
stop('Only one of the the \'parentDir\' and \'sampleVector\' argument can be used.')
}
inputIsDir <- ! is.null(parentDir)
if( inputIsDir ) {
if( !is.character(parentDir) ) {
stop('The user should supply a sting to the \'parentDir\' argument.')
}
if( parentDir == '' ) {
stop(
paste(
'The \'parentDir\' argument does not lead anywhere (acutally you just suppled "" to the argument).',
'\nDid you try to use the system.file("your/quant/dir/", package="IsoformSwitchAnalyzeR")',
'to import your own data? The system.file() should only be used',
'to access the example data stored in the IsoformSwitchAnalyzeR package.',
'To access your own data simply provide the string to the directory with the data as:',
'"path/to/quantification/".',
sep=' '
)
)
}
if( ! dir.exists(parentDir) ) {
if( file_test("-f", parentDir) ) {
stop(
paste(
'The file pointed to with the \'parentDir\' argument seems to be a file (not a directory).',
'Did you mean to use the \'sampleVector\' argument?',
'\nType "?importIsoformExpression" for more information.',
sep=' '
)
)
}
stop(
paste(
'The directory pointed to with the \'parentDir\' argument does not exists.',
'\nDid you accidentially make a spelling mistake or added a unwanted "/" infront of the text string?',
sep=' '
)
)
}
}
if( ! inputIsDir ) {
if( !is.character(sampleVector) ) {
stop('The user should supply a sting to the \'sampleVector\' argument.')
}
if( '' %in% sampleVector ) {
stop(
paste(
'The \'sampleVector\' argument does not lead anywhere (acutally you just suppled "" to the argument).',
'\nDid you try to use the system.file("your/quant/dir/quant.file", package="IsoformSwitchAnalyzeR")',
'to import your own data? The system.file() should only be used',
'to access the example data stored in the IsoformSwitchAnalyzeR package.',
'To access your own data simply provide the string to the files with the data as:',
'"path/to/quantification/quantification.file".',
sep=' '
)
)
}
if( ! all(file.exists(sampleVector)) ) {
stop(
paste(
'One or more of the files pointed to with the \'sampleVector\' argument does not exists.',
'\nDid you accidentially make a spelling mistake or added a unwanted "/" infront of the text string?',
sep=' '
)
)
}
if( ! all(file_test("-f", sampleVector)) ) {
stop(
paste(
'One or more of the files pointed to with the \'sampleVector\' argument seems to be a directory.',
'Did you mean to use the \'parentDir\' argument?',
'\nType "?importIsoformExpression" for more information.',
sep=' '
)
)
}
}
}
### Initialize
if(TRUE) {
if( !normalizationMethod %in% c("TMM", "RLE", "upperquartile") ){
stop('Metod supplied to "normalizationMethod" must be one of "TMM", "RLE" or "upperquartile". See documentation of edgeR::calcNormFactors for more info')
}
analysisCount <- 2 +
as.integer(interLibNormTxPM)
if (showProgress & !quiet) {
progressBar <- 'text'
progressBarLogic <- TRUE
} else {
progressBar <- 'none'
progressBarLogic <- FALSE
}
### data.frame with nesseary info
supportedTypes <- data.frame(
orign = c('Kallisto' , 'Salmon' , 'RSEM' , 'StringTie' ),
fileName = c('abundance.tsv' , 'quant.sf' , 'isoforms.results', 't_data.ctab'),
eLengthCol = c('eff_length' , 'EffectiveLength', 'effective_length', ''),
stringsAsFactors = FALSE
)
### Add support for detection of compressed files
supportedTypes2 <- supportedTypes
supportedTypes2$fileName <- paste0(supportedTypes2$fileName, '.gz')
supportedTypes <- rbind(
supportedTypes,
supportedTypes2
)
headerTypes <- list(
Kallisto = c('target_id','length','eff_length','est_counts','tpm'),
Salmon = c('Name','Length','EffectiveLength','TPM','NumReads'),
RSEM = c('transcript_id','gene_id','length','effective_length','expected_count','TPM','FPKM','IsoPct'),
StringTie = c('t_id','chr','strand','start','end','t_name','num_exons','length','gene_id','gene_name','cov','FPKM')
)
}
### Handle directory input
if(inputIsDir) {
### Identify directories of interest
if (TRUE) {
if (!quiet) {
message('Step 1 of ', analysisCount, ': Identifying which algorithm was used...')
}
dirList <- split(
list.dirs(
path = parentDir,
full.names = FALSE,
recursive = FALSE
),
list.dirs(
path = parentDir,
full.names = FALSE,
recursive = FALSE
)
)
dirList <- dirList[which(sapply(dirList, nchar) > 0)]
if(length(dirList) == 0) {
stop('No subdirecories were found in the supplied folder. Please check and try again.')
}
### Extract those where there are files of interest
dirList <-
dirList[sapply(
dirList,
FUN = function(aDir) {
# aDir <- dirList[[6]]
localFiles <-
list.files(
paste0(parentDir, '/', aDir),
recursive = FALSE
)
if(length( localFiles )) {
fileOfInterest <- any(
sapply(
paste(supportedTypes$fileName, '$', sep = ''),
function(aFileName) {
grepl(pattern = aFileName, x = localFiles)
})
)
} else{
fileOfInterest <- FALSE
}
return(fileOfInterest)
}
)]
### Remove hidden directories
if( any( grepl('^\\.', names(dirList)) ) ) {
nHidden <- sum( grepl('^\\.', names(dirList)) )
nTotal <- length(dirList)
warning(
paste(
'The importIsoformExpression() function identified',
nHidden,
'hidden sub-directories',
paste0('(of a total ',nTotal,' sub-directories of interest)'),
'\nThese were identified as having the prefix "." and will be ignored.',
'\nIf you want to keep them you will have to re-name the sub-directories omitting the starting ".".',
sep=' '
)
)
dirList <- dirList[which(
! grepl('^\\.', names(dirList))
)]
}
if (length(dirList) == 0) {
stop(
paste(
'There were no directories containing the file names/suffixes',
'typically generated by Kallisto/Salmon/RSEM/StringTie.',
'Have you renamed the quantification files?',
'(if so you should probably use the "sampleVector" argument instead).',
sep=' '
)
)
}
}
### Identify input type
if(TRUE) {
dataAnalyed <- supportedTypes[which(
sapply(
paste0(supportedTypes$fileName,'$'),
function(aFileName) {
any(grepl(
pattern = aFileName,
x = list.files(paste0( parentDir, '/', dirList[[1]] ))
))
})
), ]
if (nrow(dataAnalyed) == 0) {
stop(
paste(
'Could not identify any files with the names/suffixes',
'typically generated by Kallisto/Salmon/RSEM/StringTie.',
'Have you renamed the quantification files?',
'(if so you should use the "sampleVector" argument instead).',
sep=' '
)
)
}
if (nrow(dataAnalyed) > 1) {
stop(
paste(
'Could not uniquely identify file type.',
'Does the subdirectory contain results from multiple different tools?',
'If so you should use the "sampleVector" argument instead.',
'If not please contact developer.'
)
)
}
if (!quiet) {
message(paste(' The quantification algorithm used was:', dataAnalyed$orign, sep = ' '))
}
if( dataAnalyed$orign == 'StringTie' & is.null(readLength)) {
stop(paste(
'When importing StringTie results the \'readLength\' argument',
'must be specified.\n',
'This argument must be set to the number of base pairs sequenced',
'(e.g. if the \n quantified data is 75 bp paired ends \'readLength\' should be set to 75.'
))
}
}
### Make paths for tximport
if(TRUE) {
### make vector with paths
localFiles <- sapply(
dirList,
function(aDir) {
list.files(
path = paste0( parentDir, '/', aDir, '/' ),
pattern = paste0(dataAnalyed$fileName, '$'),
full.names = TRUE
)
}
)
names(localFiles) <- names(dirList)
### Subset to those of interest
if( invertPattern ) {
localFiles <- localFiles[which(
! grepl(
pattern = pattern,
x = localFiles,
ignore.case=ignore.case
)
)]
} else {
localFiles <- localFiles[which(
grepl(
pattern = pattern,
x = localFiles,
ignore.case=ignore.case
)
)]
}
if( length(localFiles) == 0 ) {
stop('No files were left after filtering via the \'pattern\' argument')
}
if (!quiet) {
message(
paste0(
' Found ',
length(localFiles),
' quantification file(s) of interest'
)
)
}
### Test existence
if(TRUE) {
fileTest <- file.exists(localFiles)
if( !all(fileTest)) {
stop(
paste0(
'\nSomething went wrong with the file-path creation. Please contact developer with reproducible example.',
'\n One file which did not work out was:\n ',
localFiles[which( ! fileTest) [1]],
sep=''
)
)
}
}
}
}
### Handle file input
if( ! inputIsDir ) {
if (!quiet) {
message('Step 1 of ', analysisCount, ': Identifying which algorithm was used...')
}
### Identify input type
if(TRUE) {
dataAnalyedList <- plyr::llply(sampleVector, function(aFilePath) {
suppressMessages(
sampleFile <- readr::read_tsv(
aFilePath, col_names = TRUE, n_max = 2
)
)
localRes <- data.frame(
orign = names(headerTypes)[which(
sapply(headerTypes, function(x) {
all(x %in% colnames(sampleFile))
})
)],
stringsAsFactors = FALSE
)
return(localRes)
})
if( any(sapply(dataAnalyedList, nrow) != 1) ) {
stop(
paste(
'Some of the files pointed to are not quantification',
'files from Kallisto/Salmon/RSEM/StringTie.',
'They did no contain the column names',
'typically generated by Kallisto/Salmon/RSEM/StringTie.',
'Are you sure it is the right files?',
sep=' '
)
)
}
dataAnalyed <- unique(
do.call(
rbind,
dataAnalyedList
)
)
if (nrow(dataAnalyed) == 0) {
stop(
paste(
'None of the files had the column names',
'typically generated by Kallisto/Salmon/RSEM/StringTie.',
'Are you sure it is the right files?',
sep=' '
)
)
}
if (nrow(dataAnalyed) > 1) {
stop(
paste(
'Could not uniquely identify file type.',
'Does the files pointed to come from a mixture of multiple different tools?',
'That is neither recommended nor supported. Please use only one tool'
)
)
}
if (!quiet) {
message(paste(' The quantification algorithm used was:', dataAnalyed$orign, sep = ' '))
}
if( dataAnalyed$orign == 'StringTie' & is.null(readLength)) {
stop(paste(
'When importing StringTie results the \'readLength\' argument',
'must be specified.\n',
'This argument must be set to the number of base pairs sequenced',
'(e.g. if the \n quantified data is 75 bp paired ends \'readLength\' should be set to 75.'
))
}
}
### Add names
if(TRUE) {
localFiles <- sampleVector
fileNames <- names(localFiles)
### Add file names
if( is.null( fileNames ) | any(is.na( fileNames )) ) {
if(any(is.na( fileNames ))) {
if (!quiet) {
message(' NAs was found in the name vector IsoformSwitchAnalyzeR will try and create new once.')
}
}
fileNames <- sapply(strsplit(localFiles, '/'), function(x) {
tmp <- tail(x, 1)
tmp <- gsub('\\.tsv$|\\.sf$|\\.isoforms.results|\\.ctab$|\\.csv|\\.txt', '', tmp)
return(tmp)
})
}
if(any(duplicated(fileNames)) ) {
# fileNames <- c('t','t','b')
nameCount <- as.data.frame(table(fileNames))
fileNames <- plyr::ddply(nameCount, 'fileNames', function(aDF) {
if( aDF$Freq == 1) {
return(
data.frame(
newId = aDF$fileNames,
stringsAsFactors = FALSE
)
)
} else {
return(
data.frame(
newId = paste0(aDF$fileNames, '_', 1:aDF$Freq),
stringsAsFactors = FALSE
)
)
}
})$newId
}
if( any(duplicated(fileNames)) ){
stop(
paste(
'IsoformSwitchAnalyzeR could not fix the missing name problem.',
'Please assign names to the vector provided to',
'the \'sampleVector\' argument using the names() function.'
)
)
}
names(localFiles) <- fileNames
}
}
### Import files with txtimport
if(TRUE) {
if (!quiet) {
message('Step 2 of ', analysisCount, ': Reading data...')
}
### Use Txtimporter to import data
if (!quiet) {
localDataList <- tximport::tximport(
files = localFiles,
type = tolower(dataAnalyed$orign),
txOut = TRUE, # to get isoform expression
countsFromAbundance = ifelse(
test = calculateCountsFromAbundance,
yes= 'scaledTPM',
no='no'
),
ignoreAfterBar = ignoreAfterBar,
readLength=readLength
)
} else {
suppressMessages(
localDataList <- tximport::tximport(
files = localFiles,
type = tolower(dataAnalyed$orign),
txOut = TRUE, # to get isoform expression
countsFromAbundance = ifelse(
test = calculateCountsFromAbundance,
yes= 'scaledTPM',
no='no'
),
ignoreAfterBar = ignoreAfterBar,
readLength=readLength
)
)
}
}
### test for failed libraies
if(TRUE) {
allZero <- apply(localDataList$abundance, 2, function(x) sum(x) == 0)
if(any(allZero)) {
toRemove <- names(allZero)[which(allZero)]
warning(
paste(
'Some quantifications appared to not have worked (zero reads mapped).',
'\nThe following libraries were therefore removed:',
paste(toRemove, collapse = ', ')
)
)
localDataList$abundance <- localDataList$abundance[,which(!allZero)]
localDataList$counts <- localDataList$counts[,which(!allZero)]
localDataList$length <- localDataList$length[,which(!allZero)]
if( ncol(localDataList$abundance) == 0 ) {
stop('No libraries left after failed quantifications were removed.')
}
}
}
### Noralize TxPM values based on effective counts
if(interLibNormTxPM) {
if( ncol(localDataList$abundance) >= 2) {
if (!quiet) {
message('Step 3 of 3: Normalizing abundance values (not counts) via edgeR...')
}
okIso <- rownames(localDataList$abundance)[which(
rowSums( localDataList$abundance > 1 ) > 1
)]
abundMat <- localDataList$abundance[which( rownames(localDataList$abundance) %in% okIso),]
### Calculate normalization factors
localDGE <- suppressMessages( suppressWarnings( edgeR::DGEList(abundMat, remove.zeros = TRUE) ) )
localDGE <- suppressMessages( suppressWarnings( edgeR::calcNormFactors(localDGE, method = normalizationMethod) ) )
### Apply normalization factors
localDataList$abundance <- t(t(localDataList$abundance) / localDGE$samples$norm.factors)
} else {
if (!quiet) {
message('Step 3 of 3: Normalizing skipped due to only 1 sample...')
}
}
}
### Massage data
if(TRUE) {
### massage
localDataList$abundance <- as.data.frame(localDataList$abundance)
localDataList$counts <- as.data.frame(localDataList$counts)
localDataList$length <- as.data.frame(localDataList$length)
localDataList$countsFromAbundance <- NULL # remove message of how it was imported
### Fix names
if( ignoreAfterBar | ignoreAfterSpace | ignoreAfterPeriod) {
### Test for duplication
rownames(localDataList$abundance) <- fixNames(
nameVec = rownames(localDataList$abundance),
ignoreAfterBar = ignoreAfterBar,
ignoreAfterSpace = ignoreAfterSpace,
ignoreAfterPeriod = ignoreAfterPeriod
)
rownames(localDataList$counts) <- fixNames(
nameVec = rownames(localDataList$counts),
ignoreAfterBar = ignoreAfterBar,
ignoreAfterSpace = ignoreAfterSpace,
ignoreAfterPeriod = ignoreAfterPeriod
)
rownames(localDataList$length) <- fixNames(
nameVec = rownames(localDataList$length),
ignoreAfterBar = ignoreAfterBar,
ignoreAfterSpace = ignoreAfterSpace,
ignoreAfterPeriod = ignoreAfterPeriod
)
}
### Add isoform id as col
if(addIsofomIdAsColumn) {
localDataList <- lapply(localDataList, function(x) {
x$isoform_id <- rownames(x)
rownames(x) <- NULL
return(x)
})
### Reorder
reorderCols <- function(x) {
x[,c( ncol(x), 1:(ncol(x)-1) )]
}
localDataList$abundance <- reorderCols( localDataList$abundance)
localDataList$counts <- reorderCols( localDataList$counts )
localDataList$length <- reorderCols( localDataList$length )
}
### Add options
localDataList$importOptions <- list(
'calculateCountsFromAbundance'= calculateCountsFromAbundance,
'interLibNormTxPM'= interLibNormTxPM,
'normalizationMethod'= normalizationMethod
)
if (!quiet) {
message('Done\n')
}
}
return(localDataList)
}
importRdata <- function(
### Core arguments
isoformCountMatrix,
isoformRepExpression = NULL,
designMatrix,
isoformExonAnnoation,
isoformNtFasta = NULL,
comparisonsToMake = NULL,
### Advanced arguments
detectUnwantedEffects = TRUE,
addAnnotatedORFs = TRUE,
onlyConsiderFullORF = FALSE,
removeNonConvensionalChr = FALSE,
ignoreAfterBar = TRUE,
ignoreAfterSpace = TRUE,
ignoreAfterPeriod = FALSE,
removeTECgenes = TRUE,
PTCDistance = 50,
foldChangePseudoCount = 0.01,
fixStringTieAnnotationProblem = TRUE,
fixStringTieViaOverlapInMultiGenes = TRUE,
fixStringTieMinOverlapSize = 50,
fixStringTieMinOverlapFrac = 0.2,
fixStringTieMinOverlapLog2RatioToContender = 0.65,
estimateDifferentialGeneRange = TRUE,
showProgress = TRUE,
quiet = FALSE
) {
### Test existence of files
if(TRUE) {
if( !is.null(isoformNtFasta)) {
if( ! is(isoformNtFasta, 'character') ) {
stop('The \'isoformNtFasta\' argument must be a string (or vector of strings) pointing to the fasta file on the disk.')
}
if( any( isoformNtFasta == '') ) {
stop(
paste(
'The \'isoformNtFasta\' argument does not lead anywhere (acutally you just suppled "" to the argument).',
'\nDid you try to use the system.file("your/quant/dir/", package="IsoformSwitchAnalyzeR")',
'to import your own data? The system.file() should only be used',
'to access the example data stored in the IsoformSwitchAnalyzeR package.',
'To access your own data simply provide the string to the directory with the data as:',
'"path/to/quantification/".',
sep=' '
)
)
}
if( any( ! sapply(isoformNtFasta, file.exists) ) ) {
stop('At least one of the file(s) pointed to with \'isoformNtFasta\' seems not to exist.')
}
if( any(! grepl('\\.fa|\\.fasta|\\.fa.gz|\\.fasta.gz', isoformNtFasta)) ) {
stop('At least one of the file(s) pointed to with \'isoformNtFasta\' seems not to be a fasta file...')
}
}
if( class(isoformExonAnnoation) == 'character' ) {
if( isoformExonAnnoation == '' ) {
stop(
paste(
'The \'isoformExonAnnoation\' argument does not lead anywhere (acutally you just suppled "" to the argument).',
'\nDid you try to use the system.file("your/quant/dir/", package="IsoformSwitchAnalyzeR")',
'to import your own data? The system.file() should only be used',
'to access the example data stored in the IsoformSwitchAnalyzeR package.',
'To access your own data simply provide the string to the directory with the data as:',
'"path/to/quantification/".',
sep=' '
)
)
}
if( ! (file.exists(isoformExonAnnoation) | RCurl::url.exists(isoformExonAnnoation)) ) {
stop(
paste(
'The file pointed to with the \'isoformExonAnnoation\' argument does not exists.',
'\nDid you accidentially make a spelling mistake or added a unwanted "/" infront of the text string?',
sep=' '
)
)
}
}
}
### Test whether input data fits together
if (!quiet) { message('Step 1 of 10: Checking data...')}
if (TRUE) {
### Set up progress
if (showProgress & !quiet) {
progressBar <- 'text'
progressBarLogic <- TRUE
} else {
progressBar <- 'none'
progressBarLogic <- FALSE
}
### Test supplied expression
if(TRUE) {
countsSuppled <- ! is.null(isoformCountMatrix)
abundSuppled <- ! is.null(isoformRepExpression)
if( ! countsSuppled ) {
stop('You must supply a count matrix to \'isoformCountMatrix\'.')
}
if( ! class(isoformCountMatrix)[1] %in% c('data.frame','matrix','tbl_df')) {
stop('The input given as isoformCountMatrix must be a data.frame or matrix.')
}
if( abundSuppled ) {
isoformRepExpression <- as.data.frame(isoformRepExpression)
if( any( apply(isoformRepExpression[,which(colnames(isoformRepExpression) != 'isoform_id')],2, class) %in% c('character', 'factor') )) {
stop('The isoformCountMatrix contains character/factor column(s) (other than the isoform_id column)')
}
extremeValues <- range( isoformRepExpression[,which( colnames(isoformRepExpression) != 'isoform_id')], na.rm = TRUE )
if( max(extremeValues) < 30 ) {
warning('The expression data supplied to \'isoformRepExpression\' seems very small - please double-check that it is NOT log-transformed')
}
if( min(extremeValues) < 0 ) {
stop('The expression data supplied to \'isoformRepExpression\' contains negative values - please double-check that it is NOT log-transformed')
}
}
if( countsSuppled ) {
isoformCountMatrix <- as.data.frame(isoformCountMatrix)
if( any( apply(isoformCountMatrix[,which(colnames(isoformCountMatrix) != 'isoform_id')],2, class) %in% c('character', 'factor') )) {
stop('The isoformCountMatrix contains character/factor column(s) (other than the isoform_id column)')
}
extremeValues <- range( isoformCountMatrix[,which( colnames(isoformCountMatrix) != 'isoform_id')], na.rm = TRUE )
if( max(extremeValues) < 30 ) {
warning('The count data supplied to \'isoformCountMatrix\' seems very small - please double-check that it is NOT log-transformed')
}
if( min(extremeValues) < 0 ) {
stop('The count data supplied to \'isoformCountMatrix\' contains negative values - please double-check that it is NOT log-transformed')
}
}
}
### Contains the colums they should
if (TRUE) {
### Colnames
if( countsSuppled ) {
if (!any(colnames(isoformCountMatrix) == 'isoform_id')) {
#stop(paste(
# 'The data.frame passed to the \'isoformCountMatrix\'',
# 'argument must contain a \'isoform_id\' column'
#))
warning(
paste(
' Using row.names as \'isoform_id\' for \'isoformCountMatrix\'.',
'If not suitable you must add them manually.',
sep=' '
)
)
isoformCountMatrix$isoform_id <- rownames(isoformCountMatrix)
}
if(any(duplicated( isoformCountMatrix$isoform_id) )) {
stop('The \'isoform_id\' of the count matrix must have unique ids.')
}
}
if ( abundSuppled ) {
if (!any(colnames(isoformRepExpression) == 'isoform_id')) {
#stop(paste(
# 'The data.frame passed to the \'isoformRepExpression\'',
# 'argument must contain a \'isoform_id\' column'
#))
message(paste(
' Using row.names as \'isoform_id\' for \'isoformRepExpression\'. If not suitable you must add them manually.'
))
isoformRepExpression$isoform_id <- rownames(isoformRepExpression)
}
if(any(duplicated( isoformRepExpression$isoform_id) )) {
stop('The \'isoform_id\' of the expression matrix must have unique ids.')
}
}
### Potentially convert from tibble
if( class(designMatrix)[1] == 'tbl_df') {
designMatrix <- as.data.frame(designMatrix)
}
if (!all(c('sampleID', 'condition') %in% colnames(designMatrix))) {
stop(paste(
'The data.frame passed to the \'designMatrix\'',
'argument must contain both a \'sampleID\' and a',
'\'condition\' column'
))
}
if (length(unique(designMatrix$condition)) < 2) {
stop('The supplied \'designMatrix\' only contains 1 condition')
}
# test information content in design matrix
if( ncol(designMatrix) > 2 ) {
otherDesign <- designMatrix[,which(
! colnames(designMatrix) %in% c('sampleID', 'condition')
),drop=FALSE]
nonInformaticColms <- which(
apply(otherDesign, 2, function(x) {
length(unique(x)) == 1
})
)
if(length(nonInformaticColms)) {
stop(
paste(
'In the designMatrix the following column(s): ',
paste(names(nonInformaticColms), collapse = ', '),
'\n Contain constant information. Columns apart from \'sampleID\' and \'condition\'\n',
'must describe cofounding effects not if interest. See ?importRdata and\n',
'vignette ("How to handle cofounding effects (including batches)" section) for more information.',
sep=' '
)
)
}
}
# test comparisonsToMake
if (!is.null(comparisonsToMake)) {
if (!all(c('condition_1', 'condition_2') %in%
colnames(comparisonsToMake))) {
stop(paste(
'The data.frame passed to the \'comparisonsToMake\'',
'argument must contain both a \'condition_1\' and a',
'\'condition_2\' column indicating',
'the comparisons to make'
))
}
}
}
### Convert potential factors
if (TRUE) {
orgCond <- designMatrix$condition
designMatrix$sampleID <- as.character(designMatrix$sampleID)
designMatrix$condition <- as.character(designMatrix$condition)
if (!is.null(comparisonsToMake)) {
comparisonsToMake$condition_1 <-
as.character(comparisonsToMake$condition_1)
comparisonsToMake$condition_2 <-
as.character(comparisonsToMake$condition_2)
}
if (!is.null(isoformRepExpression)) {
isoformRepExpression$isoform_id <-
as.character(isoformRepExpression$isoform_id)
}
if (!is.null(isoformCountMatrix)) {
isoformCountMatrix$isoform_id <-
as.character(isoformCountMatrix$isoform_id)
}
}
### Check supplied data fits togehter
if (TRUE) {
if(countsSuppled) {
if (!all(designMatrix$sampleID %in% colnames(isoformCountMatrix))) {
stop(paste(
'Each sample stored in \'designMatrix$sampleID\' must have',
'a corresponding expression column in \'isoformCountMatrix\''
))
}
}
if ( abundSuppled ) {
if (!all(designMatrix$sampleID %in%
colnames(isoformRepExpression))) {
stop(paste(
'Each sample stored in \'designMatrix$sampleID\' must',
'have a corresponding expression column',
'in \'isoformRepExpression\''
))
}
}
if( abundSuppled & countsSuppled ) {
if( ! identical( colnames(isoformCountMatrix) , colnames(isoformRepExpression)) ) {
stop('The column name and order of \'isoformCountMatrix\' and \'isoformRepExpression\' must be identical')
}
if( ! identical( isoformCountMatrix$isoform_id , isoformCountMatrix$isoform_id ) ) {
stop('The ids and order of the \'isoform_id\' column in \'isoformCountMatrix\' and \'isoformRepExpression\' must be identical')
}
}
if (!is.null(comparisonsToMake)) {
if (!all(
c(
comparisonsToMake$condition_1,
comparisonsToMake$condition_2
) %in% designMatrix$condition
)) {
stop(paste(
'The conditions supplied in comparisonsToMake and',
'designMatrix does not match'
))
}
} else {
# create it
comparisonsToMake <-
allPairwiseFeatures(orgCond)
colnames(comparisonsToMake) <-
c('condition_1', 'condition_2')
}
}
### Test complexity of setup
if(TRUE) {
nCond <- length(unique(designMatrix$condition))
n <- nrow(designMatrix)
if( nCond/n > 2/3 ) {
warning(paste(
'The experimental design seems to be of very low complexity - very few samples per replicate.',
'Please check the supplied design matrixt to make sure no mistakes were made.'
))
}
nComp <- nrow(comparisonsToMake)
if( nComp > 6 ) {
warning(paste0(
'The number of comparisons (n=', nComp,') is unusually high.',
'\n - If this intended please note that with a large number of comparisons IsoformSwitchAnalyzeR might use quite a lot of memmory (aka running on a small computer might be problematic).',
'\n - If this was not intended please check the supplied design matrixt to make sure no mistakes were made.'
))
}
### Test conditions with n=1
cndCnt <- table(designMatrix$condition)
if( any(cndCnt == 1) ) {
warning(
paste0(
'\n!!! NB !!! NB !!! NB !!!NB !!! NB !!!',
'\nIsoformSwitchAnalyzeR is not made to work with conditions without indepdendet biological replicates and results will not be trustworthy!',
'\nAt best data without replicates should be analyzed as a pilot study before investing in more replicates.',
'\nPlase consult the "Analysing experiments without replicates" and "What constitute an independent biological replicate?" sections of the vignette.',
'\n!!! NB !!! NB !!! NB !!!NB !!! NB !!!\n'
)
)
}
### Test for full rank
isFullRank <- testFullRank( designMatrix )
if( ! isFullRank ) {
stop(
paste(
'The supplied design matrix will result in a model matrix that is not full rank',
'\nPlease make sure there are no co-linearities in the design'
)
)
}
}
### Test NT input
if(TRUE) {
if( !is.null( isoformNtFasta )) {
if( !is.character( isoformNtFasta)) {
stop('The \'isoformNtFasta\' argument must be a charachter string.')
}
if( any( ! sapply(isoformNtFasta, file.exists) ) ) {
stop('At least one of the file(s) pointed to with \'isoformNtFasta\' seems not to exist.')
}
if( any(! grepl('\\.fa|\\.fasta|\\.fa.gz|\\.fasta.gz', isoformNtFasta)) ) {
stop('The file pointed to via the \'isoformNtFasta\' argument does not seem to be a fasta file...')
}
}
}
}
### Giver proper R names
if(TRUE) {
### Double check order
designMatrix <- designMatrix[,c(
match( c('sampleID','condition'), colnames(designMatrix) ),
which( ! colnames(designMatrix) %in% c('sampleID','condition') )
)]
tmp <- designMatrix
for( i in 2:ncol(designMatrix) ) { # i <- 2
if( class(designMatrix[,i]) %in% c('character','factor') ) {
designMatrix[,i] <- makeProperNames( designMatrix[,i] )
}
}
if( ! identical(tmp, designMatrix) ) {
message('Please note that some condition names were changed due to names not suited for modeling in R.')
}
if( !is.null(comparisonsToMake) ) {
comparisonsToMake$condition_1 <- makeProperNames(
comparisonsToMake$condition_1
)
comparisonsToMake$condition_2 <- makeProperNames(
comparisonsToMake$condition_2
)
}
}
### Fix names (done before input is handled and compared)
if( ignoreAfterBar | ignoreAfterSpace | ignoreAfterPeriod) {
if( countsSuppled ) {
isoformCountMatrix$isoform_id <- fixNames(
nameVec = isoformCountMatrix$isoform_id,
ignoreAfterBar = ignoreAfterBar,
ignoreAfterSpace = ignoreAfterSpace,
ignoreAfterPeriod = ignoreAfterPeriod
)
}
if ( abundSuppled ) {
isoformRepExpression$isoform_id <- fixNames(
nameVec = isoformRepExpression$isoform_id,
ignoreAfterBar = ignoreAfterBar,
ignoreAfterSpace = ignoreAfterSpace,
ignoreAfterPeriod = ignoreAfterPeriod
)
}
}
### Obtain isoform annotation
if (!quiet) { message('Step 2 of 10: Obtaining annotation...')}
if (TRUE) {
### Massage annoation input
if(TRUE) {
### Import GTF is nessesary
if( class(isoformExonAnnoation) == 'character' ) {
gtfImported <- TRUE
### Test input
if(TRUE) {
if (length(isoformExonAnnoation) != 1) {
stop(paste(
'You can only supply 1 file to isoformExonAnnoation'
))
}
if( ! grepl('\\.gtf$|\\.gtf\\.gz$', isoformExonAnnoation, ignore.case = TRUE) ) {
warning('The file appearts not to be a GTF file as it does not end with \'.gtf\' or \'.gtf.gz\' - are you sure it is the rigth file?')
}
if (!quiet) {
message(' importing GTF (this may take a while)...')
}
}
### Note: Isoform names are fixed by importGTF
suppressWarnings(
gtfSwichList <- importGTF(
pathToGTF = isoformExonAnnoation,
addAnnotatedORFs = addAnnotatedORFs,
onlyConsiderFullORF = onlyConsiderFullORF,
removeNonConvensionalChr = FALSE,
ignoreAfterBar = ignoreAfterBar,
ignoreAfterSpace = ignoreAfterSpace,
ignoreAfterPeriod = ignoreAfterPeriod,
removeTECgenes = FALSE,
PTCDistance = PTCDistance,
removeFusionTranscripts = FALSE,
removeUnstrandedTranscripts = FALSE,
quiet = TRUE
)
)
### Extract isoforms which are quantified
if(TRUE) {
### Get genes with iso quantified
if( countsSuppled ) {
genesToKeep <- gtfSwichList$isoformFeatures$gene_id[which(
gtfSwichList$isoformFeatures$isoform_id %in% isoformCountMatrix$isoform_id
)]
### Ensure all isoforms quantified are kept
isoToKeep <- union(
gtfSwichList$isoformFeatures$isoform_id[which(
gtfSwichList$isoformFeatures$gene_id %in% genesToKeep
)],
isoformCountMatrix$isoform_id
)
} else {
genesToKeep <- gtfSwichList$isoformFeatures$gene_id[which(
gtfSwichList$isoformFeatures$isoform_id %in% isoformRepExpression$isoform_id
)]
### Ensure all isoforms quantified are kept
isoToKeep <- union(
gtfSwichList$isoformFeatures$isoform_id[which(
gtfSwichList$isoformFeatures$gene_id %in% genesToKeep
)],
isoformRepExpression$isoform_id
)
}
}
### Extract isoforms to remove due to non chanonical nature
if(TRUE) {
### Identify isoforms to remove
isoformsToRemove <- character()
### TEC genes
if( removeTECgenes & any(!is.na( gtfSwichList$isoformFeatures$gene_biotype)) ) {
isoformsToRemove <- c(
isoformsToRemove,
unique(gtfSwichList$isoformFeatures$isoform_id[which(
gtfSwichList$isoformFeatures$gene_biotype == 'TEC'
)])
)
}
### Strange chromosomes
if( removeNonConvensionalChr ) {
nonChanonicalChrsIso <- unique(
gtfSwichList$exons$isoform_id[which(
grepl('_|\\.' , as.character(gtfSwichList$exons@seqnames))
)]
)
isoformsToRemove <- unique(c(
isoformsToRemove,
nonChanonicalChrsIso
))
}
### Unstranded transcripts
if(TRUE) {
unstrandedIso <- unique(
gtfSwichList$exons$isoform_id[which(
grepl('\\*' , as.character(gtfSwichList$exons@strand))
)]
)
isoformsToRemove <- unique(c(
isoformsToRemove,
unstrandedIso
))
if(length(unstrandedIso)) {
warning(
paste0(
'We found ', length(unstrandedIso),
' (', round(
length(unstrandedIso) / length(isoToKeep) * 100,
digits = 2
),
'%) unstranded transcripts.',
'\n These were removed as unstranded transcripts cannot be analysed'
)
)
}
}
### Note:
# No need to extend to genes since they per definition are all genes
### Remove non chanonical isoforms
if(length(isoformsToRemove)) {
isoToKeep <- setdiff(
isoToKeep,
isoformsToRemove
)
}
}
### Subset to used data
if(TRUE) {
if( countsSuppled ) {
isoformCountMatrix <- isoformCountMatrix[which(
isoformCountMatrix$isoform_id %in% isoToKeep
),]
}
if( abundSuppled ) {
isoformRepExpression <- isoformRepExpression[which(
isoformRepExpression$isoform_id %in% isoToKeep
),]
}
if(any(isoToKeep %in% gtfSwichList$isoformFeatures$isoform_id)) {
gtfSwichList$isoformFeatures <- gtfSwichList$isoformFeatures[which(
gtfSwichList$isoformFeatures$isoform_id %in% isoToKeep
),]
gtfSwichList$exons <- gtfSwichList$exons[which(
gtfSwichList$exons$isoform_id %in% isoToKeep
),]
gtfSwichList$orfAnalysis <- gtfSwichList$orfAnalysis[which(
gtfSwichList$orfAnalysis$isoform_id %in% isoToKeep
),]
}
}
### Extract wanted annotation files form the GTF switchAnalyzeR object
if(TRUE) {
isoformExonStructure <-
gtfSwichList$exons[, c('isoform_id', 'gene_id')]
isoformExonStructure <- sort(isoformExonStructure)
colsToExtract <- c(
'isoform_id', 'gene_id', 'gene_name',
'ref_gene_id', # stringtie annotation
'gene_biotype','iso_biotype'
)
isoformAnnotation <-
unique(gtfSwichList$isoformFeatures[,na.omit(
match(colsToExtract , colnames(gtfSwichList$isoformFeatures))
)])
}
# where isoformAnnotation and isoformExonStructure is made
if (addAnnotatedORFs & gtfImported) {
isoORF <- gtfSwichList$orfAnalysis
if( all( is.na(isoORF$PTC)) ) {
warning(
paste(
' No CDS annotation was found in the GTF files meaning ORFs could not be annotated.\n',
' (But ORFs can still be predicted with the analyzeORF() function)'
)
)
addAnnotatedORFs <- FALSE
}
}
}
if( class(isoformExonAnnoation) != 'character' ) {
gtfImported <- FALSE
### Test input
if(TRUE) {
if( ! is(object = isoformExonAnnoation, 'GRanges') ) {
stop('When not using a GTF file (by supplying a text string with the path to the file) the "isoformExonAnnoation" argument must be a GRange.')
}
if( length(isoformExonAnnoation) == 0 ) {
stop('The GRange supplied to the "isoformExonAnnoation" argument have zero enteries (rows).')
}
### Test
if( !all( c('isoform_id', 'gene_id') %in% colnames(isoformExonAnnoation@elementMetadata) )) {
stop('The supplied annotation must contain to meta data collumns: \'isoform_id\' and \'gene_id\'')
}
### Test for other than exons by annotation
if(any( colnames(isoformExonAnnoation@elementMetadata) == 'type' )) {
stop(
paste(
'The \'type\' column of the data supplied to \'isoformExonAnnoation\'',
'indicate there are multiple levels of data.',
'Please fix this (providing only exon-level) or simply',
'\nprovide a string with the path to the GTF file to the \'isoformExonAnnoation\' - ',
'then IsoformSwitchAnalyzeR will import and massage the GTF file for you.'
)
)
}
### Test for other than exons by overlap of transcript features
localExonList <- split(isoformExonAnnoation@ranges, isoformExonAnnoation$isoform_id)
localExonListReduced <- GenomicRanges::reduce(localExonList)
if(
any( sapply( width(localExonList), sum) != sapply( width(localExonListReduced), sum) )
) {
stop(
paste(
'The data supplied to \'isoformExonAnnoation\' appears to be multi-leveled',
'(Fx both containing exon and CDS information for transcripts - which a GTF file does).',
'If your annotation data originate from a GTF file please supply a string',
'indicating the path to the GTF file to the \'isoformExonAnnoation\' argument',
'instead - then IsoformSwitchAnalyzeR will handle the multi-levels.'
)
)
}
}
### Fix names
if( ignoreAfterBar | ignoreAfterSpace | ignoreAfterPeriod) {
isoformExonAnnoation$isoform_id <- fixNames(
nameVec = isoformExonAnnoation$isoform_id,
ignoreAfterBar = ignoreAfterBar,
ignoreAfterSpace = ignoreAfterSpace,
ignoreAfterPeriod = ignoreAfterPeriod
)
}
### Collaps ajecent exons (aka thouse without any intron between)
if(TRUE) {
### Reduce ajecent exons
tmp <- unlist(
GenomicRanges::reduce(
split(
isoformExonAnnoation,
isoformExonAnnoation$isoform_id
)
)
)
### Add isoform id
tmp$isoform_id <- tmp@ranges@NAMES
tmp@ranges@NAMES <- NULL
### add gene id
tmp$gene_id <-isoformExonAnnoation$gene_id[match(
tmp$isoform_id, isoformExonAnnoation$isoform_id
)]
### Add gene names if used
if('gene_name' %in% colnames(isoformExonAnnoation@elementMetadata)) {
tmp$gene_name <-isoformExonAnnoation$gene_name[match(
tmp$isoform_id, isoformExonAnnoation$isoform_id
)]
}
### sort
tmp <- tmp[sort.list(tmp$isoform_id),]
### Overwrite
isoformExonAnnoation <- tmp
}
### Extract isoforms which are quantified
if(TRUE) {
### Get genes with iso quantified
if( countsSuppled ) {
genesToKeep <- isoformExonAnnoation$gene_id[which(
isoformExonAnnoation$isoform_id %in% isoformCountMatrix$isoform_id
)]
### Ensure all isoforms quantified are kept
isoToKeep <- union(
isoformExonAnnoation$isoform_id[which(
isoformExonAnnoation$gene_id %in% genesToKeep
)],
isoformCountMatrix$isoform_id
)
} else {
genesToKeep <- isoformExonAnnoation$gene_id[which(
isoformExonAnnoation$isoform_id %in% isoformRepExpression$isoform_id
)]
### Ensure all isoforms quantified are kept
isoToKeep <- union(
isoformExonAnnoation$isoform_id[which(
isoformExonAnnoation$gene_id %in% genesToKeep
)],
isoformCountMatrix$isoform_id
)
}
}
### Subset to used data
if(TRUE) {
if( countsSuppled ) {
isoformCountMatrix <- isoformCountMatrix[which(
isoformCountMatrix$isoform_id %in% isoToKeep
),]
}
if( abundSuppled ) {
isoformRepExpression <- isoformRepExpression[which(
isoformRepExpression$isoform_id %in% isoToKeep
),]
}
if(any(isoToKeep %in% isoformExonAnnoation$isoform_id)) {
isoformExonAnnoation <- isoformExonAnnoation[which(
isoformExonAnnoation$isoform_id %in% isoToKeep
),]
}
}
### Devide the data
colsToUse <- c(
'isoform_id',
'gene_id',
'gene_name'
)
isoformExonStructure <-
isoformExonAnnoation[,na.omit(match(
colsToUse, colnames(isoformExonAnnoation@elementMetadata)
))]
isoformAnnotation <-
unique(as.data.frame(isoformExonAnnoation@elementMetadata))
if (!'gene_name' %in% colnames(isoformAnnotation)) {
isoformAnnotation$gene_name <- NA
}
isoformAnnotation <- isoformAnnotation[order(
isoformAnnotation$gene_id,
isoformAnnotation$gene_name,
isoformAnnotation$isoform_id
),]
}
}
### Test the columns of obtained annoation
if(TRUE) {
if (!all(c('isoform_id', 'gene_id', 'gene_name') %in%
colnames(isoformAnnotation))) {
stop(paste(
'The data.frame passed to the \'isoformAnnotation\' argument',
'must contain the following columns \'isoform_id\',',
'\'gene_id\' and \'gene_name\''
))
}
if (any(is.na(isoformAnnotation[, c('isoform_id', 'gene_id')]))) {
stop(paste(
'The \'isoform_id\' and \'gene_id\' columns in the data.frame',
'passed to the \'isoformAnnotation\' argument are not allowed',
'to contain NAs'
))
}
if (!'isoform_id' %in% colnames(isoformExonStructure@elementMetadata)) {
stop(paste(
'The GenomicRanges (GRanges) object passed to the',
'\'isoformExonStructure\' argument must contain both a',
'\'isoform_id\' and \'gene_id\' metadata column'
))
}
}
### Test overlap with expression data
if(TRUE) {
if( countsSuppled ) {
j1 <- jaccardSimilarity(
isoformCountMatrix$isoform_id,
isoformAnnotation$isoform_id
)
expIso <- isoformCountMatrix$isoform_id
} else {
j1 <- jaccardSimilarity(
isoformRepExpression$isoform_id,
isoformAnnotation$isoform_id
)
expIso <- isoformRepExpression$isoform_id
}
jcCutoff <- 0.925
onlyInExp <- setdiff(expIso, isoformAnnotation$isoform_id)
if (j1 != 1 ) {
if( j1 < jcCutoff | length(onlyInExp) ) {
options(warning.length = 2000L)
stop(
paste(
'The annotation and quantification (count/abundance matrix and isoform annotation)',
'seems to be different (Jaccard similarity < 0.925).',
'\nEither isforoms found in the annotation are',
'not quantifed or vise versa.',
'\nSpecifically:\n',
length(unique(expIso)), 'isoforms were quantified.\n',
length(unique(isoformAnnotation$isoform_id)), 'isoforms are annotated.\n',
'Only', length(intersect(expIso, isoformAnnotation$isoform_id)), 'overlap.\n',
length(setdiff(unique(expIso), isoformAnnotation$isoform_id)), 'isoforms quantifed had no corresponding annoation\n',
'\nThis combination cannot be analyzed since it will',
'cause discrepencies between quantification and annotation thereby skewing all analysis.\n',
'\nIf there is no overlap (as in zero or close) there are two options:\n',
'1) The files do not fit together (e.g. different databases, versions, etc)',
'(no fix except using propperly paired files).\n',
'2) It is somthing to do with how the isoform ids are stored in the different files.',
'This problem might be solvable using some of the',
'\'ignoreAfterBar\', \'ignoreAfterSpace\' or \'ignoreAfterPeriod\' arguments.\n',
' Examples from expression matrix are :',
paste0( sample(expIso, min(c(3, length(expIso)))), collapse = ', '),'\n',
' Examples of annoation are :',
paste0( sample(isoformAnnotation$isoform_id, min(c(3, length(isoformAnnotation$isoform_id)))), collapse = ', '),'\n',
' Examples of isoforms which were only found im the quantification are :',
paste0( sample(onlyInExp, min(c(3, length(onlyInExp)))), collapse = ', '),'\n',
'\nIf there is a large overlap but still far from complete there are 3 possibilites:\n',
'1) The files do not fit together (e.g different databases versions etc.)',
'(no fix except using propperly paired files).\n',
'2) If you are using Ensembl data you have supplied the GTF without phaplotyps. You need to supply the',
'<Ensembl_version>.chr_patch_hapl_scaff.gtf file - NOT the <Ensembl_version>.chr.gtf\n',
'3) One file could contain non-chanonical chromosomes while the other do not',
'(might be solved using the \'removeNonConvensionalChr\' argument.)\n',
'4) It is somthing to do with how a subset of the isoform ids are stored in the different files.',
'This problem might be solvable using some of the',
'\'ignoreAfterBar\', \'ignoreAfterSpace\' or \'ignoreAfterPeriod\' arguments.\n\n',
'\nFor more info see the FAQ in the vignette.\n',
sep=' '
)
)
}
if( j1 >= jcCutoff ) {
warning(
paste(
'The annotation and quantification (count/abundance matrix and isoform annotation)',
'Seem to be slightly different.',
'\nSpecifically:\n',
length(setdiff(isoformAnnotation$isoform_id, unique(expIso))), 'isoforms were only found in the annotation\n',
'\nPlease make sure this is on purpouse since differences',
'will cause inaccurate quantification and thereby skew all analysis.\n',
'If you have quantified with Salmon this could be normal since it as default only keep one copy of identical sequnces (can be prevented using the --keepDuplicates option)\n',
'We strongly encurage you to go back and figure out why this is the case.\n\n',
sep=' '
)
)
### Reduce to those found in all
if( countsSuppled ) {
isoformsUsed <- intersect(
isoformCountMatrix$isoform_id,
isoformAnnotation$isoform_id
)
} else {
isoformsUsed <- intersect(
isoformRepExpression$isoform_id,
isoformAnnotation$isoform_id
)
}
isoformExonStructure <- isoformExonStructure[which(
isoformExonStructure$isoform_id %in% isoformsUsed
), ]
isoformAnnotation <-isoformAnnotation[which(
isoformAnnotation$isoform_id %in% isoformsUsed
), ]
if( countsSuppled ) {
isoformCountMatrix <-isoformCountMatrix[which(
isoformCountMatrix$isoform_id %in% isoformsUsed
), ]
}
if( abundSuppled ) {
isoformRepExpression <-isoformRepExpression[which(
isoformRepExpression$isoform_id %in% isoformsUsed
), ]
}
}
}
}
}
### Subset to libraries used
if (TRUE) {
designMatrix <-
designMatrix[which(
designMatrix$condition %in% c(
comparisonsToMake$condition_1,
comparisonsToMake$condition_2
)
), ]
if( countsSuppled ) {
isoformCountMatrix <-
isoformCountMatrix[, which(
colnames(isoformCountMatrix) %in%
c('isoform_id', designMatrix$sampleID))]
isoformCountMatrix <-
isoformCountMatrix[,c(
which(colnames(isoformCountMatrix) == 'isoform_id'),
which(colnames(isoformCountMatrix) != 'isoform_id')
)]
rownames(isoformCountMatrix) <- NULL
}
if ( abundSuppled ) {
isoformRepExpression <-
isoformRepExpression[, which(
colnames(isoformRepExpression) %in%
c('isoform_id', designMatrix$sampleID)
)]
isoformRepExpression <-
isoformRepExpression[,c(
which(colnames(isoformRepExpression) == 'isoform_id'),
which(colnames(isoformRepExpression) != 'isoform_id')
)]
rownames(isoformRepExpression) <- NULL
}
}
### Remove isoforms not expressed
if (TRUE) {
if( countsSuppled ) {
okIsoforms <- isoformCountMatrix$isoform_id[which(
rowSums(isoformCountMatrix[,which( colnames(isoformCountMatrix) != 'isoform_id')]) > 0
)]
nTot <- nrow(isoformCountMatrix)
} else {
okIsoforms <-isoformRepExpression$isoform_id[which(
rowSums(isoformRepExpression[,which( colnames(isoformRepExpression) != 'isoform_id')]) > 0
)]
nTot <- nrow(isoformRepExpression)
}
nOk <- length(okIsoforms)
if( nOk != nTot ) {
if (!quiet) {
### Message
message(
paste(
' ',
nTot - nOk,
paste0( '( ', round( (nTot - nOk) / nTot *100, digits = 2),'%)'),
'isoforms were removed since they were not expressed in any samples.'
)
)
}
### Subset expression
if(abundSuppled) {
isoformRepExpression <- isoformRepExpression[which(
isoformRepExpression$isoform_id %in% okIsoforms
),]
}
if(countsSuppled) {
isoformCountMatrix <- isoformCountMatrix[which(
isoformCountMatrix$isoform_id %in% okIsoforms
),]
}
### Annotation
isoformExonStructure <- isoformExonStructure[which( isoformExonStructure$isoform_id %in% okIsoforms),]
isoformAnnotation <- isoformAnnotation[which( isoformAnnotation$isoform_id %in% okIsoforms),]
if (addAnnotatedORFs & gtfImported) {
isoORF <- isoORF[which( isoORF$isoform_id %in% okIsoforms),]
}
}
}
### Handle sequence input
if(TRUE) {
addIsoformNt <- FALSE
if(!is.null(isoformNtFasta)) {
isoformNtSeq <- do.call(
c,
lapply(isoformNtFasta, function(aFile) {
Biostrings::readDNAStringSet(
filepath = isoformNtFasta, format = 'fasta'
)
})
)
if(!is(isoformNtSeq, "DNAStringSet")) {
stop('The fasta file supplied to \'isoformNtFasta\' does not contain the nucleotide (DNA) sequence...')
}
### Fix names
if( ignoreAfterBar | ignoreAfterSpace | ignoreAfterPeriod) {
names(isoformNtSeq) <- fixNames(
nameVec = names(isoformNtSeq),
ignoreAfterBar = ignoreAfterBar,
ignoreAfterSpace = ignoreAfterSpace,
ignoreAfterPeriod = ignoreAfterPeriod
)
}
### Subset to used
isoSeqNames <- names(isoformNtSeq)
isoformNtSeq <- isoformNtSeq[which(
names(isoformNtSeq) %in% isoformCountMatrix$isoform_id
)]
### Remove potential duplication
isoformNtSeq <- isoformNtSeq[which(
! duplicated(names(isoformNtSeq))
)]
if(length(isoformNtSeq) == 0) {
stop(
paste(
'No sequences in the fasta files had IDs matching the expression data.',
'This problem might be solvable using some of the',
'\'ignoreAfterBar\', \'ignoreAfterSpace\' or \'ignoreAfterPeriod\' arguments.\n',
' 3 Examples from expression matrix are :',
paste0( sample(unique(isoformCountMatrix$isoform_id), min(c(3, length(isoformCountMatrix$isoform_id))) ), collapse = ', '),'\n',
' 3 Examples of sequence annotation are :',
paste0( sample(isoSeqNames, min(c(3, length( isoSeqNames ))) ), collapse = ', '),'\n',
sep = ' '
)
)
}
if( ! all( isoformCountMatrix$isoform_id %in% names(isoformNtSeq) ) ) {
options(warning.length = 2000L)
warning(
paste(
'The fasta file supplied to \'isoformNtFasta\' does not contain the',
'nucleotide (DNA) sequence for all isoforms quantified and will not be added!',
'\nSpecifically:\n',
length(unique(isoformCountMatrix$isoform_id)), 'isoforms were quantified.\n',
length(unique(names(isoformNtSeq))), 'isoforms have a sequence.\n',
'Only', length(intersect(names(isoformNtSeq), isoformCountMatrix$isoform_id)), 'overlap.\n',
length(setdiff(unique(isoformCountMatrix$isoform_id), names(isoformNtSeq))), 'isoforms quantifed isoforms had no corresponding nucleotide sequence\n',
'\nIf there is no overlap (as in zero or close) there are two options:\n',
'1) The files do not fit together (different databases, versions etc)',
'(no fix except using propperly paired files).\n',
'2) It is somthing to do with how the isoform ids are stored in the different files.',
'This problem might be solvable using some of the',
'\'ignoreAfterBar\', \'ignoreAfterSpace\' or \'ignoreAfterPeriod\' arguments.\n',
' 3 Examples from expression matrix are :',
paste0( sample(unique(isoformCountMatrix$isoform_id), min(c(3, length(isoformCountMatrix$isoform_id))) ), collapse = ', '),'\n',
' 3 Examples of sequence annotation are :',
paste0( sample(names(isoformNtSeq), min(c(3, length( isoformNtSeq ))) ), collapse = ', '),'\n',
'\nIf there is a large overlap but still far from complete there are 3 possibilites:\n',
'1) The files do not fit together (different databases versions)',
'(no fix except using propperly paired files).\n',
'2) The isoforms quantified have their nucleotide sequence stored in multiple fasta files (common for Ensembl).',
'Just supply a vector with the path to each of them to the \'isoformNtFasta\' argument.\n',
'3) One file could contain non-chanonical chromosomes while the other do not',
'(might be solved using the \'removeNonConvensionalChr\' argument.)\n',
'4) It is somthing to do with how a subset of the isoform ids are stored in the different files.',
'This problem might be solvable using some of the',
'\'ignoreAfterBar\', \'ignoreAfterSpace\' or \'ignoreAfterPeriod\' arguments.\n\n',
sep = ' '
)
)
} else {
addIsoformNt <- TRUE
}
}
}
### Rescue StringTie gene annotation
if(TRUE) {
if (!quiet) { message('Step 3 of 10: Fixing StringTie gene annoation problems...')}
### Add original gene_ids already assigned to gene_id back to ref_gene_id
if(TRUE) {
indexToModify <- which(
is.na( isoformAnnotation$ref_gene_id ) &
! is.na( isoformAnnotation$gene_name )
)
isoformAnnotation$ref_gene_id[indexToModify] <- isoformAnnotation$gene_id[indexToModify]
}
### Assign isoforms to ref_gene_id and gene_names
if( fixStringTieAnnotationProblem ) {
### variables for messages
anyFixed1 <- FALSE
anyFixed2 <- FALSE
anyFixed3 <- FALSE
anyFixed4 <- FALSE
### Fix missing ref_gene_id
if( any(is.na(isoformAnnotation$ref_gene_id)) ) {
### Fix simple missing ref_gene_id (within single ref_gene_id gene_id)
if( TRUE ) {
### Make list with ref_gene_id (same order)
geneNameList <- split(isoformAnnotation$ref_gene_id, isoformAnnotation$gene_id)
geneNameList <- geneNameList[unique(isoformAnnotation$gene_id)]
### Count problems
nIsoWihoutNames <- sum(
is.na(isoformAnnotation$ref_gene_id)
)
### Add ref_gene_ids to novel StringTie transcripts when possible (only 1 ref_gene_id candidate)
isoformAnnotation$ref_gene_id <- unlist(
lapply(
geneNameList,
function(geneNameVec) {
localGeneNames <- unique(na.omit( geneNameVec ))
if( length( localGeneNames ) == 1 ) {
return(
rep(localGeneNames, times = length(geneNameVec))
)
} else {
return(geneNameVec)
}
}
)
)
### Re-count problems
nIsoWihoutNames2 <- sum(
is.na(isoformAnnotation$ref_gene_id)
)
anyFixed1 <- nIsoWihoutNames - nIsoWihoutNames2 > 0
if( anyFixed1 ) {
if (!quiet) {
message(
paste(
' ',
nIsoWihoutNames - nIsoWihoutNames2,
' isoforms were assigned the ref_gene_id and gene_name of their associated gene_id.',
'\n This was only done when the parent gene_id were associated with a single ref_gene_id/gene_name.',
#'\n',
sep = ''
)
)
}
}
}
### Fix non-simple missing ref_gene_ids (within multi ref_gene_ids gene_id)
if(TRUE) {
### Identify gene_ids with problems
if(TRUE) {
### Summazie gene properties
multiGeneDf <-
isoformAnnotation %>%
dplyr::select(isoform_id, gene_id, ref_gene_id) %>%
dplyr::distinct()
geneNameSummary <-
multiGeneDf %>%
group_by(gene_id) %>%
dplyr::summarise(
n_ref_gene_ids = n_distinct(na.omit(ref_gene_id)),
n_iso_na = sum(is.na(ref_gene_id)),
.groups = 'drop'
)
### Identify genes with both missing and multiple gene names
multiGenesWithNa <-
geneNameSummary %>%
dplyr::filter(
n_ref_gene_ids >= 2 & # at least two genes
n_iso_na > 0 # no novel once
)
}
### Rescue via genomic overlap of known isoforms (aka with ref_gene_id annotation)
if(nrow(multiGenesWithNa) & fixStringTieViaOverlapInMultiGenes) {
### Extract all isoforms with problems
genesWithProblems <-
multiGeneDf %>%
dplyr::filter(gene_id %in% multiGenesWithNa$gene_id)
### Extract exons of interest
exonsOi <- isoformExonStructure[which(
isoformExonStructure$isoform_id %in% genesWithProblems$isoform_id
),]
### Modify seqnames to ensure only searching within same gene_id?
exonsOi <- GRanges(
seqnames = exonsOi$gene_id,
ranges = IRanges(
start = BiocGenerics::start(exonsOi),
end = BiocGenerics::end(exonsOi)
),
strand = exonsOi@strand,
isoform_id = exonsOi$isoform_id,
gene_id = exonsOi$gene_id
)
### Convert to list
exonsOiList <- split(exonsOi, exonsOi$isoform_id)
### Devide into novel and known
knownIsoforms <- genesWithProblems$isoform_id[which(
! is.na(genesWithProblems$ref_gene_id)
)]
novelIsoforms <- genesWithProblems$isoform_id[which(
is.na(genesWithProblems$ref_gene_id)
)]
knownList <- exonsOiList[ knownIsoforms ]
novelList <- exonsOiList[ novelIsoforms ]
novelLength <- sapply(width(novelList), sum)
### Identify overlapping isoforms
novelIsoOverlap <- findOverlaps(query = novelList, subject = knownList)
novelIsoOverlapDf <- as.data.frame(novelIsoOverlap)
novelIsoOverlapDf$novel_iso <- names(novelList)[novelIsoOverlapDf$queryHits]
novelIsoOverlapDf$known_iso <- names(knownList)[novelIsoOverlapDf$subjectHits]
novelIsoOverlapDf <- novelIsoOverlapDf[,c('novel_iso','known_iso')]
novelIsoOverlapDf$known_ref_gene_id <- genesWithProblems$ref_gene_id[match(
novelIsoOverlapDf$known_iso, genesWithProblems$isoform_id
)]
### Calculate overlap
novelOverlap <- intersect(novelList[queryHits(novelIsoOverlap)], knownList[subjectHits(novelIsoOverlap)])
novelIsoOverlapDf$nt_overlap <- sapply(width(novelOverlap), sum)
novelIsoOverlapDf$novel_length <- novelLength[match(
novelIsoOverlapDf$novel_iso, names(novelLength)
)]
novelIsoOverlapDf$frac_overlap <- novelIsoOverlapDf$nt_overlap / novelIsoOverlapDf$novel_length
### For each novel isoform assign gene_name via cutoffs
novelAssigned <-
novelIsoOverlapDf %>%
as_tibble() %>%
group_by(novel_iso, known_ref_gene_id) %>%
### For each known_gene extract top contender
dplyr::arrange(dplyr::desc(nt_overlap), .by_group = TRUE) %>%
dplyr::slice(1L) %>%
### For each isoform calculate ratios betwen top genes
group_by(novel_iso) %>%
dplyr::arrange(dplyr::desc(nt_overlap), .by_group = TRUE) %>%
mutate(
log2_overlap_ratio = c(
log2( nt_overlap[-length(nt_overlap)] / nt_overlap[-1] ),
Inf # assign Inf if only 1 gene is overlapping
)
) %>%
### For each isoform Filter
dplyr::filter(
nt_overlap >= fixStringTieMinOverlapSize,
frac_overlap >= fixStringTieMinOverlapFrac,
log2_overlap_ratio >= fixStringTieMinOverlapLog2RatioToContender
) %>%
### For each isoform : Extract top contender
dplyr::slice(1L)
### Modify annoation
toModify <- which(isoformAnnotation$isoform_id %in% novelAssigned$novel_iso)
isoformAnnotation$ref_gene_id[toModify] <- novelAssigned$known_ref_gene_id[match(
isoformAnnotation$isoform_id[toModify], novelAssigned$novel_iso
)]
### Redo problem calculations
nIsoWihoutNames3 <- sum(
is.na(isoformAnnotation$ref_gene_id)
)
anyFixed2 <- nIsoWihoutNames2 - nIsoWihoutNames3
if( anyFixed2) {
if (!quiet) {
message(
paste(
' ',
nIsoWihoutNames2 - nIsoWihoutNames3,
' isoforms were assigned the ref_gene_id and gene_name of the most similar',
'\n annotated isoform (defined via overlap in genomic exon coordinates).',
'\n This was only done if the overlap met the requriements',
'\n indicated by the three fixStringTieViaOverlap* arguments.',
#'\n',
sep = ''
)
)
}
}
}
}
}
### Remove non-assigned isoforms within known genes
if(TRUE) {
genesWithProblems <-
isoformAnnotation %>%
dplyr::select(gene_id, ref_gene_id) %>%
dplyr::distinct() %>%
group_by(gene_id) %>%
dplyr::summarise(
has_ref_gene_id = any(! is.na(ref_gene_id)),
has_novel_iso = any( is.na(ref_gene_id)),
.groups = 'drop'
) %>%
dplyr::filter(
has_ref_gene_id,
has_novel_iso
)
### Extract isoforms to remove
isoToRemove <- isoformAnnotation$isoform_id[which(
isoformAnnotation$gene_id %in% genesWithProblems$gene_id &
is.na(isoformAnnotation$ref_gene_id)
)]
anyFixed3 <- length(isoToRemove) > 0
if(length(isoToRemove)) {
### Remove
isoformAnnotation <- isoformAnnotation[which(
! isoformAnnotation$isoform_id %in% isoToRemove
),]
isoformExonStructure <- isoformExonStructure[which(
! isoformExonStructure$isoform_id %in% isoToRemove
),]
if(! is.null(isoformCountMatrix)) {
isoformCountMatrix <- isoformCountMatrix[which(
! isoformCountMatrix$isoform_id %in% isoToRemove
),]
}
if(! is.null(isoformRepExpression)) {
isoformRepExpression <- isoformRepExpression[which(
! isoformRepExpression$isoform_id %in% isoToRemove
),]
}
### Write message
if (!quiet) {
message(
paste(
' We were unable to assign', length(isoToRemove),
'isoforms (located within annotated genes) to a known ref_gene_id/gene_name.',
'\n These were removed to enable analysis of the rest of the isoform from within the merged genes.'
)
)
}
}
}
### Split gene_ids of gene_id with mutiple gene_names
if(TRUE) {
### Summarize problem
if(TRUE) {
multiGeneDf <-
isoformAnnotation %>%
dplyr::select(isoform_id, gene_id, ref_gene_id) %>%
dplyr::distinct()
multiGenes <-
multiGeneDf %>%
group_by(gene_id) %>%
dplyr::summarise(
n_ref_gene_ids = n_distinct(na.omit(ref_gene_id)),
n_iso_na = sum(is.na(ref_gene_id)),
.groups = 'drop'
) %>%
dplyr::filter(
n_ref_gene_ids >= 2 & # at least two genes
n_iso_na == 0 # no novel once
)
nProblems <- nrow(multiGenes)
}
### Split gene_ids
if(nrow(multiGenes)) {
### Extract corresponding iso data
multiGeneDf <-
multiGeneDf %>%
dplyr::filter(gene_id %in% multiGenes$gene_id)
### Create new gene_ids (by merging with ref_gene_id)
multiGeneDf$new_gene_id <- stringr::str_c(
multiGeneDf$gene_id,
':',
multiGeneDf$ref_gene_id
)
### Overwrite in annotation
indexToModify <- which(
isoformAnnotation$gene_id %in% multiGeneDf$gene_id
)
isoformAnnotation$gene_id[indexToModify] <-
multiGeneDf$new_gene_id[match(
isoformAnnotation$isoform_id[indexToModify], multiGeneDf$isoform_id
)]
### Overwrite ref_gene_id and gene_ids in exon annotation
isoformExonStructure$gene_id <- isoformAnnotation$gene_id[match(
isoformExonStructure$isoform_id, isoformAnnotation$isoform_id
)]
}
### Message summary
if(TRUE) {
### Redo problem calculations
multiGenes <-
isoformAnnotation %>%
dplyr::select(gene_id, ref_gene_id) %>%
dplyr::distinct() %>%
group_by(gene_id) %>%
dplyr::summarise(
n_ref_gene_ids = n_distinct(na.omit(ref_gene_id)),
n_iso_na = sum(is.na(ref_gene_id)),
.groups = 'drop'
) %>%
dplyr::filter(
n_ref_gene_ids >= 2 & # at least two genes
n_iso_na == 0 # no novel once
)
nProblems2 <- nrow(multiGenes)
anyFixed4 <- nProblems - nProblems2 > 0
if( anyFixed4 ) {
if (!quiet) {
message(
paste(
' ',
nProblems - nProblems2 ,
' gene_ids which were associated with multiple ref_gene_id/gene_names',
'\n were split into mutliple genes via their ref_gene_id/gene_names.',
#'\n',
sep = ''
)
)
}
}
}
}
### Generalize ref_gene_id assignment to gene_names and update both annotaion objects
if(TRUE) {
geneNameDf <-
isoformAnnotation %>%
dplyr::select(gene_name, ref_gene_id) %>%
dplyr::filter(!is.na(gene_name)) %>%
dplyr::distinct()
isoformAnnotation$gene_name <- geneNameDf$gene_name[match(
isoformAnnotation$ref_gene_id, geneNameDf$ref_gene_id
)]
isoformExonStructure$gene_name <- isoformAnnotation$gene_name[match(
isoformExonStructure$isoform_id, isoformAnnotation$isoform_id
)]
}
if(
! anyFixed1 &
! anyFixed2 &
! anyFixed3 &
! anyFixed4
) {
message(
paste(
' There were no need to rescue any annotation',
sep = ' '
)
)
}
### Overwrite with original gene ids if doable
if('ref_gene_id' %in% colnames(isoformAnnotation)) {
### Figure out those with potential
geneIdsWithRef <- isoformAnnotation$gene_id[which(
! is.na(isoformAnnotation$ref_gene_id)
)]
### Devide
isoAnnotAlreadCorrect <-
isoformAnnotation %>%
dplyr::filter(! gene_id %in% geneIdsWithRef)
isoAnnotToCorrect <-
isoformAnnotation %>%
dplyr::filter(gene_id %in% geneIdsWithRef)
### Figure out which one can be corrected
isoAnnotToCorrect <-
isoAnnotToCorrect %>%
dplyr::group_by(gene_id) %>%
dplyr::mutate(
n_ref = n_distinct(na.omit(ref_gene_id))
) %>%
dplyr::ungroup()
### Devide into those that can be corrected and those that cannot
isoAnnotCannotBeCorrected <-
isoAnnotToCorrect %>%
dplyr::filter(n_ref != 1)
isoAnnotCanBeCorrected <-
isoAnnotToCorrect %>%
dplyr::filter(n_ref == 1)
if(nrow(isoAnnotCanBeCorrected)) {
message(
paste(
' ',
length(unique(isoAnnotCanBeCorrected$gene_id)),
' genes_id were assigned their original gene_id instead of the StringTie gene_id.',
'\n This was only done when it could be done unambiguous.',
#'\n',
sep = ''
)
)
}
### Correct annnotation
isoAnnotCorrected <-
isoAnnotCanBeCorrected %>%
dplyr::group_by(gene_id) %>%
dplyr::mutate(
gene_id = unique(na.omit(ref_gene_id))
) %>%
dplyr::ungroup()
isoAnnotCorrected$n_ref <- NULL
isoAnnotCannotBeCorrected$n_ref <- NULL
### Combine the 3 datafames
isoformAnnotationCorrected <- rbind(
isoAnnotAlreadCorrect,
isoAnnotCorrected,
isoAnnotCannotBeCorrected
)
### Reorder
isoformAnnotationCorrected <-
isoformAnnotationCorrected[match(
isoformAnnotation$isoform_id,
isoformAnnotationCorrected$isoform_id
),]
isoformAnnotationCorrected$ref_gene_id <- NULL
### Overwrite
isoformAnnotation <- isoformAnnotationCorrected
isoformExonStructure$gene_id <- isoformAnnotation$gene_id[match(
isoformExonStructure$isoform_id, isoformAnnotation$isoform_id
)]
}
}
if( ! fixStringTieAnnotationProblem ) {
if (!quiet) { message(' Was skipped as instructed via the \"fixStringTieAnnotationProblem\" argument...')}
}
}
### If necessary calculate RPKM values
if(TRUE) {
if (!quiet) { message('Step 4 of 10: Calculating expression estimates from count data...') }
if ( ! abundSuppled ) {
### Extract isoform lengths
isoformLengths <- sapply(
X = split(
isoformExonStructure@ranges@width,
f = isoformExonStructure$isoform_id
),
FUN = sum
)
### Calulate CPM
# convert to matrix
localCM <- isoformCountMatrix
rownames(localCM) <- localCM$isoform_id
localCM$isoform_id <- NULL
localCM <- as.matrix(localCM)
myCPM <- t(t(localCM) / colSums(localCM)) * 1e6
### Calculate RPKM
isoformLengths <-
isoformLengths[match(rownames(myCPM), names(isoformLengths))]
isoformRepExpression <-
as.data.frame(myCPM / (isoformLengths / 1e3))
### Massage
isoformRepExpression$isoform_id <-
rownames(isoformRepExpression)
isoformRepExpression <-
isoformRepExpression[, c(
which(colnames(isoformRepExpression) == 'isoform_id'),
which(colnames(isoformRepExpression) != 'isoform_id')
)]
rownames(isoformRepExpression) <- NULL
}
if ( abundSuppled ) {
if (!quiet) { message(' Skipped as user supplied expression via the \"isoformRepExpression\" argument...')}
}
# isoformRepExpression
}
### Run SVA
if(TRUE) {
if (!quiet) { message('Step 5 of 10: Testing for unwanted effects...') }
### Ensure there are enougth samples to run SVA
enougthSamples <- min(table(designMatrix$condition)) >= 2
if( ! enougthSamples ) {
if (!quiet) { message(' Data was not corrected for unwanted effects since there are to few samples') }
}
if( enougthSamples ) {
### Massage
isoformRepExpressionLog <- isoformRepExpression
rownames(isoformRepExpressionLog) <- isoformRepExpressionLog$isoform_id
isoformRepExpressionLog$isoform_id <- NULL
isoformRepExpressionLog <- log2(isoformRepExpressionLog + 1)
### Filter on expression
smallestGroup <- min(table(designMatrix$condition))
if(smallestGroup > 10) {
smallestGroup <- smallestGroup * 0.7
}
minSamples <- max(c(
2,
smallestGroup
))
isoformRepExpressionLogFilt <- isoformRepExpressionLog[which(
rowSums( isoformRepExpressionLog > log2(1) ) >= minSamples
),]
### Make model matrix (which also take additional factors into account)
if(TRUE) {
localDesign <- designMatrix
### Convert group of interest to factors
localDesign$condition <- factor(
localDesign$condition,
levels=unique(localDesign$condition)
)
localFormula <- '~ 0 + condition'
### Check co-founders for group vs continous variables and add to fomula
if( ncol(localDesign) > 2 ) {
for(i in 3:ncol(localDesign) ) { # i <- 4
if( class(localDesign[,i]) %in% c('numeric', 'integer') ) {
if( uniqueLength( localDesign[,i] ) * 2 <= length(localDesign[,i]) ) {
localDesign[,i] <- factor(localDesign[,i])
}
} else {
localDesign[,i] <- factor(localDesign[,i])
}
}
### Make formula for model
localFormula <- paste(
localFormula,
'+',
paste(
colnames(localDesign)[3:ncol(localDesign)],
collapse = ' + '
),
sep=' '
)
}
localFormula <- as.formula(localFormula)
### Make model
localModel <- model.matrix(localFormula, data = localDesign)
indexToModify <- 1:length(unique( localDesign$condition ))
colnames(localModel)[indexToModify] <- gsub(
pattern = '^condition',
replacement = '',
x = colnames(localModel)[indexToModify]
)
### Test model
if( ! limma::is.fullrank(localModel) ) {
stop('The design matrix suggested by the "designMatrix" is not full rank and hence cannot be analyzed.')
}
}
# localModel
### Estimate SVAs
nSv = sva::num.sv(
dat = isoformRepExpressionLogFilt,
mod = localModel,
)
svaAdded <- FALSE
### Test if to may SVAs
if(TRUE) {
nSvaCutoff <- min(c(
10,
nrow(designMatrix) * 0.5
))
skipSvas <- nSv > nSvaCutoff
if(skipSvas) {
nSv <- 0
}
}
### Run SVA if necessary
if( nSv > 0 & detectUnwantedEffects ) {
### Run SVA
tmp <- capture.output(
localSv <- tryCatch({
sva::sva(
dat = as.matrix(isoformRepExpressionLogFilt),
mod = localModel,
n.sv = nSv
)$sv
}, error = {
function(x) {
NULL
}
})
)
### Test SVs
if( ! is.null(localSv) ) {
### Test for just diagnoal
notDiagonal <- which( apply(
localSv,
MARGIN = 2,
function(x) {
sum( x != 1 ) > 1 & sum( x != 0 ) > 1
}
) )
### Filter for correlation
notToHighCor <- which( apply(
localSv,
MARGIN = 2,
function(x) {
abs(cor(
x,
as.integer(as.factor(designMatrix$condition))
)) < 0.8
}
) )
## Subset
okSvs <- intersect(notDiagonal, notToHighCor)
}
if( is.null(localSv) ) {
okSvs <- integer()
}
### Add to design
if( length(okSvs) > 0 ) {
### Subset
localSv <- localSv[,okSvs,drop=FALSE]
colnames(localSv) <- paste0(
'sv', 1:ncol(localSv)
)
### Add SVs
designMatrix <- cbind(
designMatrix,
localSv
)
svaAdded <- TRUE
}
}
if( nSv > 0 & ! detectUnwantedEffects ) {
if (!quiet) { message(' Skipped due to \"detectUnwantedEffects=FALSE\". ') }
warning(
paste(
' We detected', nSv, 'batch/covariates in your data.',
'\n These will not be corrected in any downstream analysis due to \"detectUnwantedEffects=FALSE\". ',
'\n Unless you REALLY know what you are doing we recomend setting \"detectUnwantedEffects=TRUE\"'
)
)
}
### Send messages
if( ! skipSvas ) {
if( svaAdded ) {
if (!quiet) { message(paste(' Added', length(okSvs), 'batch/covariates to the design matrix')) }
}
if( ! svaAdded ) {
if( exists('localSv' )) {
if( is.null(localSv) ) {
if (!quiet) { message(c(
' \n SVA analysis failed. No unwanted effects were added.'
)) }
warning(paste0(
'\n',
'There were estimated unwanted effects in your dataset but the automatic sva run failed.',
'\n We highly reccomend you run sva yourself, add the nessesary surrogate variables',
'\n as extra columns in the \"designMatrix\" and re-run this function',
'\n'
))
} else {
if (!quiet) { message(' No unwanted effects added') }
}
} else {
if (!quiet) { message(' No unwanted effects added') }
}
}
}
if( skipSvas ) {
if( ! svaAdded ) {
if (!quiet) { message(' Data was not corrected for unwanted effects') }
}
warning(paste0(
'\n',
'We found MANY unwanted effects in your dataset!',
'\nTo many for IsoformSwitchAnalyzeR to be trusted with the correction.',
'\nWe therefore highly reccomend you run sva yourself and add',
'\nthe nessesary surrogate variables as extra columns in the \"designMatrix\"',
'\n'
))
}
}
}
### Batch correct if necessary
if(TRUE) {
if (!quiet) { message('Step 6 of 10: Batch correcting expression estimates...') }
batchCorrectionNeeded <- ncol(designMatrix) >= 3 & enougthSamples
if( batchCorrectionNeeded ) {
### Make new model matrix (which also take SVs into account)
if(TRUE) {
localDesign <- designMatrix
### Convert group of interest to factors
localDesign$condition <- factor(localDesign$condition, levels=unique(localDesign$condition))
### Check co-founders for group vs continous variables
if( ncol(localDesign) > 2 ) {
for(i in 3:ncol(localDesign) ) { # i <- 4
if( class(localDesign[,i]) %in% c('numeric', 'integer') ) {
if( uniqueLength( localDesign[,i] ) * 2 <= length(localDesign[,i]) ) {
localDesign[,i] <- factor(localDesign[,i])
}
} else {
localDesign[,i] <- factor(localDesign[,i])
}
}
}
### Make formula for model
localFormula <- paste(
'~ 0 + condition',
'+',
paste(
colnames(localDesign)[3:ncol(localDesign)],
collapse = ' + '
),
sep=' '
)
localFormula <- as.formula(localFormula)
### Make model
localModel <- model.matrix(localFormula, data = localDesign)
indexToModify <- 1:length(unique( localDesign$condition ))
colnames(localModel)[indexToModify] <- gsub(
pattern = '^condition',
replacement = '',
x = colnames(localModel)[indexToModify]
)
}
### Batch correct expression matrix
suppressWarnings(
isoRepBatch <- as.data.frame( limma::removeBatchEffect(
x = isoformRepExpressionLog,
design = localModel[,which( colnames(localModel) %in% localDesign$condition), drop=FALSE],
covariates = localModel[,which( ! colnames(localModel) %in% localDesign$condition), drop=FALSE],
method='robust'
))
)
### Overwrite expression
isoformRepExpression <- 2^isoRepBatch - 1
isoformRepExpression[which( isoformRepExpression < 0, arr.ind = TRUE)] <- 0
### Massage back
isoformRepExpression$isoform_id <- rownames(isoformRepExpression)
rownames(isoformRepExpression) <- NULL
isoformRepExpression <- isoformRepExpression[,c(
which(colnames(isoformRepExpression) == 'isoform_id'),
which(colnames(isoformRepExpression) != 'isoform_id')
)]
}
if( ! batchCorrectionNeeded ) {
if (!quiet) { message(' Skipped as no batch effects were found or annoated...')}
}
}
### Calculate gene and IF
if (!quiet) {
message('Step 7 of 10: Extracting data from each condition...')
}
if(TRUE) {
### Sum to gene level gene expression - updated
if(TRUE) {
### add gene_id
isoformRepExpression$gene_id <-
isoformAnnotation$gene_id[match(isoformRepExpression$isoform_id,
isoformAnnotation$isoform_id)]
### Sum to gene level
geneRepExpression <- isoformToGeneExp(
isoformRepExpression = isoformRepExpression,
quiet = TRUE
)
### Remove gene id
isoformRepExpression$gene_id <- NULL
}
### Calculate IF rep matrix
if(TRUE) {
isoformRepIF <- isoformToIsoformFraction(
isoformRepExpression=isoformRepExpression,
geneRepExpression=geneRepExpression,
isoformGeneAnnotation=isoformAnnotation,
quiet = TRUE
)
}
}
### in each condition analyzed get mean and standard error of gene and isoforms
if (TRUE) {
conditionList <-
split(designMatrix$sampleID, f = designMatrix$condition)
conditionSummary <-
plyr::llply(
.data = conditionList,
.progress = progressBar,
.fun = function(sampleVec) {
# sampleVec <- conditionList[[1]]
### Isoform and IF
isoIndex <-
which(colnames(isoformRepExpression) %in% sampleVec)
isoIndex2 <-
which(colnames(isoformRepIF) %in% sampleVec)
isoSummary <- data.frame(
isoform_id = isoformRepExpression$isoform_id,
iso_overall_mean = rowMeans(isoformRepExpression[,designMatrix$sampleID, drop=FALSE]),
iso_value = rowMeans(isoformRepExpression[, isoIndex, drop=FALSE]),
iso_std = apply( isoformRepExpression[, isoIndex, drop=FALSE], 1, sd),
IF_overall = rowMeans(isoformRepIF[,designMatrix$sampleID, drop=FALSE], na.rm = TRUE),
IF = rowMeans(isoformRepIF[, isoIndex2, drop=FALSE], na.rm = TRUE),
stringsAsFactors = FALSE
)
isoSummary$iso_stderr <-
isoSummary$iso_std / sqrt(length(sampleVec))
isoSummary$iso_std <- NULL
### Gene
geneIndex <-
which(colnames(geneRepExpression) %in% sampleVec)
geneSummary <- data.frame(
gene_id = geneRepExpression$gene_id,
gene_overall_mean = rowMeans(geneRepExpression[,designMatrix$sampleID, drop=FALSE]),
gene_value = rowMeans(geneRepExpression[, geneIndex, drop=FALSE]),
gene_std = apply(geneRepExpression[, geneIndex, drop=FALSE], 1, sd),
stringsAsFactors = FALSE
)
geneSummary$gene_stderr <-
geneSummary$gene_std / sqrt(length(sampleVec))
geneSummary$gene_std <- NULL
### Combine
combinedData <-
dplyr::inner_join(isoformAnnotation, geneSummary, by = 'gene_id')
combinedData <-
dplyr::inner_join(combinedData, isoSummary, by = 'isoform_id')
### return result
return(combinedData)
}
)
}
### Use comparisonsToMake to create the isoform comparisons
if (!quiet) {
message('Step 8 of 10: Making comparisons...')
}
if (TRUE) {
isoAnnot <-
plyr::ddply(
.data = comparisonsToMake,
.variables = c('condition_1', 'condition_2'),
.drop = TRUE,
.progress = progressBar,
.fun = function(aDF) { # aDF <- comparisonsToMake[1,]
### Extract data
cond1data <- conditionSummary[[aDF$condition_1]]
cond2data <- conditionSummary[[aDF$condition_2]]
### modify colnames in condition 1
matchIndex <-
match(
c(
'gene_value',
'gene_stderr',
'iso_value',
'iso_stderr'
),
colnames(cond1data)
)
colnames(cond1data)[matchIndex] <-
paste(colnames(cond1data)[matchIndex], '_1', sep = '')
colnames(cond1data)[which( colnames(cond1data) == 'IF')] <- 'IF1'
### modify colnames in condition 2
matchIndex <-
match(
c(
'gene_value',
'gene_stderr',
'iso_value',
'iso_stderr'
),
colnames(cond2data)
)
colnames(cond2data)[matchIndex] <-
paste(colnames(cond2data)[matchIndex], '_2', sep = '')
colnames(cond2data)[which( colnames(cond2data) == 'IF')] <- 'IF2'
combinedIso <- dplyr::inner_join(
cond1data,
cond2data[, c(
'isoform_id',
'gene_value_2',
'gene_stderr_2',
'iso_value_2',
'iso_stderr_2',
'IF2'
)],
by = 'isoform_id'
)
### Add comparison data
combinedIso$condition_1 <- aDF$condition_1
combinedIso$condition_2 <- aDF$condition_2
return(combinedIso)
}
)
### Add comparison data
# Log2FC
ps <- foldChangePseudoCount
isoAnnot$gene_log2_fold_change <-
log2((isoAnnot$gene_value_2 + ps) / (isoAnnot$gene_value_1 + ps))
isoAnnot$iso_log2_fold_change <-
log2((isoAnnot$iso_value_2 + ps) / (isoAnnot$iso_value_1 + ps))
# qValues
isoAnnot$gene_q_value <- NA
isoAnnot$iso_q_value <- NA
# Isoform fraction values
isoAnnot$dIF <- isoAnnot$IF2 - isoAnnot$IF1
# Swich values
isoAnnot$isoform_switch_q_value <- NA
isoAnnot$gene_switch_q_value <- NA
### Sort
matchVector <-
c(
'isoform_id',
'gene_id',
'condition_1',
'condition_2',
'gene_name',
'class_code',
'gene_biotype',
'iso_biotype',
'gene_overall_mean',
'gene_value_1',
'gene_value_2',
'gene_stderr_1',
'gene_stderr_2',
'gene_log2_fold_change',
'gene_q_value',
'iso_overall_mean',
'iso_value_1',
'iso_value_2',
'iso_stderr_1',
'iso_stderr_2',
'iso_log2_fold_change',
'iso_q_value',
'IF_overall',
'IF1',
'IF2',
'dIF',
'isoform_switch_q_value',
'gene_switch_q_value'
)
matchVector <-
na.omit(match(matchVector, colnames(isoAnnot)))
isoAnnot <- isoAnnot[, matchVector]
}
### Create the swichList
if (!quiet) {
message('Step 9 of 10: Making switchAnalyzeRlist object...')
}
if (TRUE) {
if( countsSuppled ) {
isoformCountMatrix <- isoformCountMatrix[colnames(isoformRepExpression)]
### Create switchList
dfSwichList <- createSwitchAnalyzeRlist(
isoformFeatures = isoAnnot,
exons = isoformExonStructure,
designMatrix = designMatrix,
isoformCountMatrix = isoformCountMatrix, # nessesary for drimseq
isoformRepExpression = isoformRepExpression, # nessesary for limma
sourceId = 'data.frames'
)
} else {
### Create switchList
dfSwichList <- createSwitchAnalyzeRlist(
isoformFeatures = isoAnnot,
exons = isoformExonStructure,
designMatrix = designMatrix,
isoformRepExpression = isoformRepExpression, # nessesary for limma
sourceId = 'data.frames'
)
}
### Add orf if extracted
if (addAnnotatedORFs & gtfImported) {
dfSwichList$isoformFeatures$PTC <-
isoORF$PTC[match(dfSwichList$isoformFeatures$isoform_id,
isoORF$isoform_id)]
isoORF <-
isoORF[which(isoORF$isoform_id %in%
isoformRepExpression$isoform_id), ]
dfSwichList$orfAnalysis <- isoORF
}
### Add IF matrix
dfSwichList$isoformRepIF <- isoformRepIF[,c('isoform_id',designMatrix$sampleID)]
### Add nucleotide sequence
if(addIsoformNt) {
dfSwichList$ntSequence <- isoformNtSeq[which(
names(isoformNtSeq) %in% dfSwichList$isoformFeatures$isoform_id
)]
}
}
### Estimate DTU
if (!quiet) {
message('Step 10 of 10: Guestimating differential usage...')
}
if(estimateDifferentialGeneRange & !quiet) {
localEstimate <- estimateDifferentialRange(
switchAnalyzeRlist = dfSwichList
)
if( !is.null(localEstimate)) {
message(' The GUESSTIMATED number of genes with differential isoform usage are:')
print(localEstimate)
} else {
message(' The estimation of DTU failed. Please proceed with the normal workflow.')
}
} else {
if (!quiet) {
message(' Skipping due to the \"estimateDifferentialGeneRange\" argument...')
}
}
### Return switchList
if (!quiet) {
message('Done\n')
}
return(dfSwichList)
}
### Supporting tximeta
prepareSalmonFileDataFrame <- function(
### Core arguments
parentDir,
### Advanced arguments
pattern='',
invertPattern=FALSE,
ignore.case=FALSE,
quiet = FALSE
) {
### Initialize
if(TRUE) {
### data.frame with nesseary info
supportedTypes <- data.frame(
orign = c('Salmon' ),
fileName = c('quant.sf' ),
eLengthCol = c('EffectiveLength'),
stringsAsFactors = FALSE
)
### Add support for detection of compressed files
supportedTypes2 <- supportedTypes
supportedTypes2$fileName <- paste0(supportedTypes2$fileName, '.gz')
supportedTypes <- rbind(
supportedTypes,
supportedTypes2
)
headerTypes <- list(
Salmon = c('Name','Length','EffectiveLength','TPM','NumReads')
)
}
### Identify directories of interest
if (TRUE) {
dirList <- split(
list.dirs(
path = parentDir,
full.names = FALSE,
recursive = FALSE
),
list.dirs(
path = parentDir,
full.names = FALSE,
recursive = FALSE
)
)
dirList <- dirList[which(sapply(dirList, nchar) > 0)]
if(length(dirList) == 0) {
stop('No subdirecories were found in the supplied folder. Please check and try again.')
}
### Extract those where there are files of interest
dirList <-
dirList[sapply(
dirList,
FUN = function(aDir) {
# aDir <- dirList[[6]]
localFiles <-
list.files(
paste0(parentDir, '/', aDir),
recursive = FALSE
)
if(length( localFiles )) {
fileOfInterest <- any(
sapply(
paste(supportedTypes$fileName, '$', sep = ''),
function(aFileName) {
grepl(pattern = aFileName, x = localFiles)
})
)
} else{
fileOfInterest <- FALSE
}
return(fileOfInterest)
}
)]
### Remove hidden directories
if( any( grepl('^\\.', names(dirList)) ) ) {
nHidden <- sum( grepl('^\\.', names(dirList)) )
nTotal <- length(dirList)
warning(
paste(
'The importIsoformExpression() function identified',
nHidden,
'hidden sub-directories',
paste0('(of a total ',nTotal,' sub-directories of interest)'),
'\nThese were identified as having the prefix "." and will be ignored.',
'\nIf you want to keep them you will have to re-name the sub-directories omitting the starting ".".',
sep=' '
)
)
dirList <- dirList[which(
! grepl('^\\.', names(dirList))
)]
}
if (length(dirList) == 0) {
stop(
paste(
'There were no directories containing the file names/suffixes',
'typically generated by Kallisto/Salmon/RSEM/StringTie.',
'Have you renamed the quantification files?',
'(if so you should probably use the "sampleVector" argument instead).',
sep=' '
)
)
}
}
### Identify input type
if(TRUE) {
dataAnalyed <- supportedTypes[which(
sapply(
paste0(supportedTypes$fileName,'$'),
function(aFileName) {
any(grepl(
pattern = aFileName,
x = list.files(paste0( parentDir, '/', dirList[[1]] ))
))
})
), ]
if (nrow(dataAnalyed) == 0) {
stop(
paste(
'Could not identify any Salmon files.',
sep=' '
)
)
}
if (nrow(dataAnalyed) > 1) {
stop(
paste(
'Could not identify any Salmon files.',
sep=' '
)
)
}
}
### Make paths for tximport
if(TRUE) {
### make vector with paths
localFiles <- sapply(
dirList,
function(aDir) {
list.files(
path = paste0( parentDir, '/', aDir, '/' ),
pattern = paste0(dataAnalyed$fileName, '$'),
full.names = TRUE
)
}
)
names(localFiles) <- names(dirList)
### Subset to those of interest
if( invertPattern ) {
localFiles <- localFiles[which(
! grepl(
pattern = pattern,
x = localFiles,
ignore.case=ignore.case
)
)]
} else {
localFiles <- localFiles[which(
grepl(
pattern = pattern,
x = localFiles,
ignore.case=ignore.case
)
)]
}
if( length(localFiles) == 0 ) {
stop('No files were left after filtering via the \'pattern\' argument')
}
if (!quiet) {
message(
paste0(
'Found ',
length(localFiles),
' Salmon quantifications of interest'
)
)
}
### Test existence
if(TRUE) {
fileTest <- file.exists(localFiles)
if( !all(fileTest)) {
stop(
paste0(
'\nSomething went wrong with the file-path creation. Please contact developer with reproducible example.',
'\n One file which did not work out was:\n ',
localFiles[which( ! fileTest) [1]],
sep=''
)
)
}
}
}
### Make data.frame
if(TRUE) {
dfData <- data.frame(
files = localFiles,
names = names(localFiles),
condition = NA,
row.names = NULL,
stringsAsFactors = FALSE
)
}
### Final message
if (!quiet) {
message(
'Adding NAs as conditions. Please modify these manually.'
)
}
return(dfData)
}
importSalmonData <- function(
### Core arguments
salmonFileDataFrame,
### Advanced arguments
comparisonsToMake=NULL,
ignoreAfterBar = TRUE,
ignoreAfterSpace = TRUE,
ignoreAfterPeriod = FALSE,
showProgress = TRUE,
quiet = FALSE,
...
) {
### Test input data.frame
if(TRUE) {
colsToHave <- c(
"files" ,"names","condition"
)
if( ! all(colsToHave %in% colnames(salmonFileDataFrame)) ) {
stop(
paste(
'The \'salmonFileDataFrame\' needs to also contain these column(s)',
setdiff(colsToHave, colnames(salmonFileDataFrame))
)
)
}
if( any(is.na(salmonFileDataFrame$condition)) ) {
stop('The \'condition\' column cannot contain NAs')
}
if( length(unique(salmonFileDataFrame$condition)) == 1) {
stop('There appear to be only 1 condition annoatated in the \'condition\' column.')
}
}
### Use tximeta to import data and meta data
if(TRUE) {
### Message
if (!quiet) { message('Importing quantification data...') }
suppressMessages(
suppressWarnings(
localSe <- tximeta(
coldata = salmonFileDataFrame,
countsFromAbundance = 'scaledTPM'
)
)
)
gtfPath <- metadata(localSe)$txomeInfo$gtf
if (!quiet) { message('Importing annoation data...') }
#suppressMessages(
# suppressWarnings(
# localSe <- addExons(localSe)
# )
#)
#suppressMessages(
# suppressWarnings(
# localSe <- addCDS(localSe)
# )
#)
suppressMessages(
suppressWarnings(
localNtSeq <- retrieveCDNA(localSe)
)
)
}
### Massage imported data
if(TRUE) {
if (!quiet) { message('Massaging data...') }
### Count data
if(TRUE) {
localCm <-
assay(localSe, "counts") %>%
as.data.frame() %>%
rownames_to_column('isoform_id')
}
### NT sequence
if(TRUE) {
names(localNtSeq) <- fixNames(
names(localNtSeq),
ignoreAfterBar = ignoreAfterBar,
ignoreAfterSpace = ignoreAfterSpace,
ignoreAfterPeriod = ignoreAfterPeriod
)
}
### Exon data
if(FALSE) {
localGr <- unlist(rowRanges(localSe))
localGr$isoform_id <- names(localGr)
localGr$exon_id <- NULL
localGr$exon_rank <- NULL
localGr$gene_id <- rowData(localSe)$gene_id[match(
localGr$isoform_id, rowData(localSe)$tx_id
)]
localGr$gene_name <- rowData(localSe)$gene_name[match(
localGr$isoform_id, rowData(localSe)$tx_id
)]
names(localGr) <- NULL
localGr$isoform_id <- fixNames(
localGr$isoform_id,
ignoreAfterBar = ignoreAfterBar,
ignoreAfterSpace = ignoreAfterSpace,
ignoreAfterPeriod = ignoreAfterPeriod
)
}
### Coding regions
if(FALSE) {
localCds <- unlist(rowData(localSe)$cds[which(rowData(localSe)$coding)])
localCds$isoform_id <- names(localCds)
localCds$exon_id <- NULL
localCds$exon_rank <- NULL
localCds$isoform_id <- fixNames(
localCds$isoform_id,
ignoreAfterBar = ignoreAfterBar,
ignoreAfterSpace = ignoreAfterSpace,
ignoreAfterPeriod = ignoreAfterPeriod
)
### Analyze CDS
orfInfo <- analyseCds(
myCDS = localCds,
localExons = localGr,
onlyConsiderFullORF = FALSE,
PTCDistance = 50
)
# make sure all ORFs are annotated (with NAs)
orfInfo <-
dplyr::full_join(
orfInfo,
localCm[, 'isoform_id', drop = FALSE],
by = 'isoform_id'
)
# Annotate ORF origin
orfInfo$orf_origin <- 'Annotation'
}
### Design
if(TRUE) {
localDesign <- data.frame(
sampleID = salmonFileDataFrame$names,
condition = salmonFileDataFrame$condition,
stringsAsFactors = FALSE
)
}
}
### Make switchAnalyzeRlist
if(TRUE) {
if (!quiet) { message('Making switchAnalyzeRlist...') }
localSL <- importRdata(
isoformCountMatrix = localCm,
designMatrix = localDesign,
#isoformExonAnnoation = localGr,
isoformExonAnnoation = gtfPath,
isoformNtFasta = NULL,
comparisonsToMake=comparisonsToMake,
addAnnotatedORFs=FALSE,
ignoreAfterBar = ignoreAfterBar,
ignoreAfterSpace = ignoreAfterSpace,
ignoreAfterPeriod = ignoreAfterPeriod,
showProgress=showProgress,
quiet=quiet,
...
)
}
### Add extra annotation data
if(TRUE) {
localNtSeq <- localNtSeq[which(
names(localNtSeq) %in% localSL$isoformFeatures$isoform_id
)]
if( ! all(localSL$isoformFeatures$isoform_id %in% names(localNtSeq)) ) {
stop('Something went wrong with obtaining the nucleotide sequence. Please make sure you link fasta files as well.')
}
localSL$ntSequence <- localNtSeq
#localSL$orfAnalysis <- orfInfo
}
### Subset to ensure everything is aligned
if(TRUE) {
localSL <- subsetSwitchAnalyzeRlist(
localSL,
TRUE
)
}
### Return
return(localSL)
}
### Prefilter
preFilter <- function(
switchAnalyzeRlist,
geneExpressionCutoff = 1,
isoformExpressionCutoff = 0,
IFcutoff = 0.01,
acceptedGeneBiotype = NULL,
acceptedIsoformClassCode = NULL,
removeSingleIsoformGenes = TRUE,
reduceToSwitchingGenes = FALSE,
reduceFurtherToGenesWithConsequencePotential = FALSE,
onlySigIsoforms = FALSE,
keepIsoformInAllConditions = FALSE,
alpha = 0.05,
dIFcutoff = 0.1,
quiet = FALSE
) {
### Test input
if (TRUE) {
if (class(switchAnalyzeRlist) != 'switchAnalyzeRlist') {
stop(paste(
'The object supplied to \'switchAnalyzeRlist\'',
'must be a \'switchAnalyzeRlist\''
))
}
if( switchAnalyzeRlist$sourceId == 'preDefinedSwitches' ) {
stop(
paste(
'The switchAnalyzeRlist is made from pre-defined isoform switches.',
'All filtering should be done before you used the approach',
'that gave rise to the isoform pairs you have pre-defined',
sep = ' '
)
)
}
if( switchAnalyzeRlist$sourceId == 'gtf') {
warning(
paste(
'The switchAnalyzeRlist seems to be created from a gtf file',
'wereby expression is probably not annotated.',
'Running preFilter() might not be what you want.',
'\nIf expression info was manually added afterwards',
'please ignore this warning.',
sep=' '
)
)
}
if( switchAnalyzeRlist$sourceId == 'preDefinedSwitches') {
if( all(is.na(switchAnalyzeRlist$isoformFeatures$IF_overall)) ) {
stop('The switchAnalyzeRlist was created without expression data whereby it cannot be filtered')
}
}
if (!is.null(isoformExpressionCutoff)) {
if (!is.numeric(isoformExpressionCutoff)) {
stop('The isoformExpressionCutoff argument must be a numeric')
}
}
if (!is.null(geneExpressionCutoff)) {
if (!is.numeric(geneExpressionCutoff)) {
stop('The geneExpressionCutoff argument must be a numeric')
}
}
if (!is.null(IFcutoff)) {
if (!is.numeric(IFcutoff)) {
stop('The IFcutoff argument must be a numeric')
}
}
if (!is.null(acceptedIsoformClassCode)) {
if (!'class_code' %in%
colnames(switchAnalyzeRlist$isoformFeatures)) {
stop(paste(
'The filter on class codes can only be used if',
'the switchAnalyzeRlist was generated from cufflinks data'
))
}
}
if( !is.null(acceptedGeneBiotype) & 'gene_biotype' %in% colnames(switchAnalyzeRlist$isoformFeatures) ) {
okBiotypes <- unique(switchAnalyzeRlist$isoformFeatures$gene_biotype)
if( !all(acceptedGeneBiotype %in% okBiotypes) ) {
notAnnot <- setdff(acceptedGeneBiotype, okBiotypes)
warning(
paste(
'Some of the supplied biotypes are not found in the isoforms supplied and will be ignored\n',
'These are:', paste(notAnnot, collapse = ', ')
)
)
}
}
if (!is.logical(removeSingleIsoformGenes)) {
stop('The removeSingleIsoformGenes must be either TRUE or FALSE')
}
if (reduceToSwitchingGenes) {
hasTest <- any(!is.na(
c(
switchAnalyzeRlist$isoformFeatures$isoform_switch_q_value,
switchAnalyzeRlist$isoformFeatures$gene_switch_q_value
)
))
if( ! hasTest) {
stop(
paste(
'The switchAnalyzeRlist does not contain the result',
'of a switch analysis.\nPlease turn off','
the "reduceToSwitchingGenes" argument try again.',
sep=' '
)
)
}
if (alpha < 0 |
alpha > 1) {
stop('The alpha parameter must be between 0 and 1 ([0,1]).')
}
if (alpha > 0.05) {
warning(paste(
'Most journals and scientists consider an alpha larger',
'than 0.05 untrustworthy. We therefore recommend using',
'alpha values smaller than or queal to 0.05'
))
}
}
}
### Find which isoforms to keep
if (TRUE) {
### Extract data to filter on
if(TRUE) {
columnsToExtraxt <-
c(
'iso_ref',
'gene_ref',
'isoform_id',
'gene_id',
'gene_biotype',
'class_code',
'gene_value_1',
'gene_value_2',
'iso_overall_mean',
'iso_value_1',
'iso_value_2',
'IF_overall',
'IF1',
'IF2',
'dIF',
'gene_switch_q_value',
'isoform_switch_q_value'
)
columnsToExtraxt <-
na.omit(match(
columnsToExtraxt,
colnames(switchAnalyzeRlist$isoformFeature)
))
#localData <- unique( switchAnalyzeRlist$isoformFeature[, columnsToExtraxt ] ) # no need
localData <-
switchAnalyzeRlist$isoformFeature[, columnsToExtraxt]
# Count features
transcriptCount <- length(unique(localData$isoform_id))
}
### Reduce to genes with switches
if (reduceToSwitchingGenes ) {
if( reduceFurtherToGenesWithConsequencePotential ) {
tmp <- extractSwitchPairs(
switchAnalyzeRlist,
alpha = alpha,
dIFcutoff = dIFcutoff,
onlySigIsoforms = onlySigIsoforms
)
deGenes <- unique(tmp$gene_ref)
} else {
isoResTest <-
any(!is.na(
localData$isoform_switch_q_value
))
if (isoResTest) {
deGenes <- localData$gene_ref[which(
localData$isoform_switch_q_value < alpha &
abs(localData$dIF) > dIFcutoff
)]
} else {
deGenes <- localData$gene_ref[which(
localData$gene_switch_q_value < alpha &
abs(localData$dIF) > dIFcutoff
)]
}
}
localData <- localData[which(
localData$gene_ref %in% deGenes
),]
if (!nrow(localData)) {
stop('No genes were left after filtering for switching genes')
}
}
if (!is.null(geneExpressionCutoff)) {
localData <- localData[which(
localData$gene_value_1 > geneExpressionCutoff &
localData$gene_value_2 > geneExpressionCutoff
), ]
if (!nrow(localData)) {
stop('No genes were left after filtering for gene expression')
}
}
if (!is.null(isoformExpressionCutoff)) {
localData <- localData[which(
#localData$iso_value_1 > isoformExpressionCutoff |
#localData$iso_value_2 > isoformExpressionCutoff
localData$iso_overall_mean > isoformExpressionCutoff
), ]
if (!nrow(localData)) {
stop('No isoforms were left after filtering for isoform expression')
}
}
if (!is.null(IFcutoff)) {
localData <- localData[which(
#localData$IF1 > IFcutoff |
#localData$IF2 > IFcutoff
localData$IF_overall > IFcutoff
), ]
if (!nrow(localData)) {
stop('No isoforms were left after filtering for isoform fraction (IF) values')
}
}
if (!is.null(acceptedGeneBiotype)) {
if( 'gene_biotype' %in% colnames(localData) ) {
localData <- localData[which(localData$gene_biotype %in% acceptedGeneBiotype), ]
if (!nrow(localData)) {
stop('No genes were left after filtering for acceptedGeneBiotype.')
}
} else {
warning('Gene biotypes were not annotated so the \'acceptedGeneBiotype\' argument was ignored.')
}
}
if (!is.null(acceptedIsoformClassCode)) {
localData <- localData[which(localData$class_code %in% acceptedIsoformClassCode), ]
if (!nrow(localData)) {
stop('No genes were left after filtering for isoform class codes')
}
}
if (removeSingleIsoformGenes) {
transcriptsPrGene <-
split(localData$iso_ref, f = localData$gene_ref)
transcriptsPrGene <- lapply(transcriptsPrGene, unique)
genesToKeep <-
names(transcriptsPrGene)[which(sapply(transcriptsPrGene, function(x)
length(x) > 1))]
if (!length(genesToKeep)) {
stop('No genes were left after filtering for mutlipe transcrip genes')
}
localData <-
localData[which(localData$gene_ref %in% genesToKeep),]
}
}
### Do filtering
if (keepIsoformInAllConditions) {
switchAnalyzeRlist <- subsetSwitchAnalyzeRlist(
switchAnalyzeRlist,
switchAnalyzeRlist$isoformFeatures$isoform_id %in%
localData$isoform_id
)
} else {
switchAnalyzeRlist <- subsetSwitchAnalyzeRlist(
switchAnalyzeRlist,
switchAnalyzeRlist$isoformFeatures$iso_ref %in% localData$iso_ref
)
}
### Repport filtering
transcriptsLeft <- length(unique(localData$isoform_id))
transcriptsRemoved <- transcriptCount - transcriptsLeft
percentRemoved <-
round(transcriptsRemoved / transcriptCount, digits = 4) * 100
if (!quiet) {
message(
paste(
'The filtering removed ',
transcriptsRemoved,
' ( ',
percentRemoved,
'% of ) transcripts. There is now ',
transcriptsLeft,
' isoforms left',
sep = ''
)
)
}
### return result
return(switchAnalyzeRlist)
}
|
a0c908c9c48fb585f8476ffd877da47277a843e1
|
00cc47dde2afd5a66293af0b8b07f380f993d2af
|
/Backup/rProject/rprojectSPLb/Charts/rprojectSPL_tidyquantChart02.R
|
1f93439cc7463b30dcd1fcf364c85404412218ab
|
[
"MIT"
] |
permissive
|
UTexas80/gitSPL
|
277ea78a08559115b15ff3e9046e1b0e18938280
|
212fe631d8162dabf4593768f7ff6aacc0457026
|
refs/heads/master
| 2021-01-25T09:04:31.709072
| 2019-04-30T14:35:47
| 2019-04-30T14:35:47
| 93,775,621
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,497
|
r
|
rprojectSPL_tidyquantChart02.R
|
Load libraries
library(tidyquant)
SPL <- tq_get("SPL.AX")
#'
#' # SMA
SPL %>%
ggplot(aes(x = date, y = adjusted)) +
geom_line() + # Plot stock price
geom_ma(ma_fun = SMA, n = 50) + # Plot 50-day SMA
geom_ma(ma_fun = SMA, n = 200, color = "red") + # Plot 200-day SMA
coord_x_date(xlim = c(today() - weeks(12), today()),
ylim = c(0.1, 2.0)) # Zoom in
#'
#' # EVWMA
SPL %>%
ggplot(aes(x = date, y = adjusted)) +
geom_line() + # Plot stock price
geom_ma(aes(volume = volume), ma_fun = EVWMA, n = 50) + # Plot 50-day EVWMA
coord_x_date(xlim = c(today() - weeks(12), today()),
ylim = c(0.50, 2.0)) # Zoom in
SPL %>%
ggplot(aes(x = date, y = adjusted)) +
geom_line() + # Plot stock price
geom_ma(ma_fun = ZLEMA, n = 10, ratio = NULL) + # Plot 50-day SMA
# geom_ma(ma_fun = ZLEMA, n = 200, color = "red") + # Plot 200-day SMA
coord_x_date(xlim = c(today() - weeks(12), today()),
ylim = c(0.3, 2.0)) # Zoom in
FANG_macd <- FANG %>%
group_by(symbol) %>%
tq_mutate(select = close,
mutate_fun = MACD,
nFast = 12,
nSlow = 26,
nSig = 9,
maType = SMA) %>%
mutate(diff = macd - signal) %>%
select(-(open:volume))
FANG_macd
|
51a02fe37c0b6ee6c35efffa2b26d26686dd0950
|
56b32941415e9abe063d6e52754b665bf95c8d6a
|
/R-Portable/App/R-Portable/library/igraph/tests/test-make.R
|
19f08a2a7f7bd18a5beca8cdd729445699485e6a
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-newlib-historical",
"GPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"MIT"
] |
permissive
|
voltek62/seo-viz-install
|
37ed82a014fc36e192d9a5e5aed7bd45327c8ff3
|
e7c63f4e2e4acebc1556912887ecd6a12b4458a0
|
refs/heads/master
| 2020-05-23T08:59:32.933837
| 2017-03-12T22:00:01
| 2017-03-12T22:00:01
| 84,758,190
| 1
| 0
|
MIT
| 2019-10-13T20:51:49
| 2017-03-12T21:20:14
|
C++
|
UTF-8
|
R
| false
| false
| 1,692
|
r
|
test-make.R
|
context("Make API")
test_that("make_ works, order of arguments does not matter", {
g0 <- make_undirected_graph(1:10)
g1 <- make_(undirected_graph(1:10))
g2 <- make_(undirected_graph(), 1:10)
g3 <- make_(1:10, undirected_graph())
expect_true(identical_graphs(g0, g1))
expect_true(identical_graphs(g0, g2))
expect_true(identical_graphs(g0, g3))
})
test_that("sample_, graph_ also work", {
g0 <- make_undirected_graph(1:10)
g1 <- sample_(undirected_graph(1:10))
g2 <- sample_(undirected_graph(), 1:10)
g3 <- sample_(1:10, undirected_graph())
expect_true(identical_graphs(g0, g1))
expect_true(identical_graphs(g0, g2))
expect_true(identical_graphs(g0, g3))
g4 <- graph_(undirected_graph(1:10))
g5 <- graph_(undirected_graph(), 1:10)
g6 <- graph_(1:10, undirected_graph())
expect_true(identical_graphs(g0, g4))
expect_true(identical_graphs(g0, g5))
expect_true(identical_graphs(g0, g6))
})
test_that("error messages are proper", {
expect_error(make_(), "Don't know how to make_")
expect_error(make_(1:10), "Don't know how to make_")
expect_error(graph_(), "Don't know how to graph_")
expect_error(graph_(1:10), "Don't know how to graph_")
expect_error(graph_(directed_graph(), directed_graph()),
"Don't know how to graph_")
expect_error(sample_(), "Don't know how to sample_")
expect_error(sample_(1:10), "Don't know how to sample_")
expect_error(sample_(directed_graph(), directed_graph()),
"Don't know how to sample_")
})
test_that("we pass arguments unevaluated", {
g0 <- graph_from_literal(A -+ B:C)
g1 <- graph_(from_literal(A -+ B:C))
expect_true(identical_graphs(g0, g1))
})
|
81dc92f8cf5e9856d6b050057d631453bc962f18
|
b98ece6254219513180cc730f7e26f7f9a277124
|
/plot1.R
|
064234c324277f6aedfe2fed9f879b607530b502
|
[] |
no_license
|
OlgaRusyaeva/ExData_Plotting1
|
543fdf16bba72a3cac2d60170f56325b362a3e63
|
0c0988359a8be37155b5c0142b09eb3a9a26f6c5
|
refs/heads/master
| 2021-01-14T10:23:38.695401
| 2015-06-07T14:41:30
| 2015-06-07T14:41:30
| 35,160,355
| 0
| 0
| null | 2015-05-06T13:33:03
| 2015-05-06T13:33:03
| null |
UTF-8
|
R
| false
| false
| 439
|
r
|
plot1.R
|
#read data from a file with the dates 2007-02-01 and 2007-02-02
library(sqldf)
fileName <- "household_power_consumption.txt"
df <- read.csv.sql(fileName, sql='select * from file where Date="1/2/2007" OR Date="2/2/2007"',sep=";",header=T)
closeAllConnections()
#create histogram of Global Active Power in red
png("plot1.png")
hist(df$Global_active_power,main="Global Active Power",xlab="Global Active Power (kilowatts)",col="red")
dev.off()
|
c7c441e2a6e812dacec8008225f108199f05de65
|
0c023ee284603ab89bc221b90dd4531d9463ec9d
|
/man/ctabs.Rd
|
867436932c02d831e60f72a73d1ddc576826adcf
|
[] |
no_license
|
cran/libcoin
|
8e81c745d853b842e557a6f9a16cb1a0aba556bd
|
26c093c92b0697e5853d58e953e9f654f90ae272
|
refs/heads/master
| 2021-10-12T18:22:55.621417
| 2021-09-27T08:50:07
| 2021-09-27T08:50:07
| 76,068,198
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 964
|
rd
|
ctabs.Rd
|
\name{ctabs}
\alias{ctabs}
\title{
Cross Tabulation
}
\description{
Efficient weighted cross tabulation of two factors and a block
}
\usage{
ctabs(ix, iy = integer(0), block = integer(0), weights = integer(0),
subset = integer(0), checkNAs = TRUE)
}
\arguments{
\item{ix}{a integer of positive values with zero indicating a missing.}
\item{iy}{an optional integer of positive values with zero indicating a
missing.}
\item{block}{an optional blocking factor without missings.}
\item{weights}{an optional vector of weights, integer or double.}
\item{subset}{an optional integer vector indicating a subset.}
\item{checkNAs}{a logical for switching off missing value checks.}
}
\details{
A faster version of \code{xtabs(weights ~ ix + iy + block, subset)}.
}
\value{
If \code{block} is present, a three-way table. Otherwise,
a one- or two-dimensional table.
}
\examples{
ctabs(ix = 1:5, iy = 1:5, weights = 1:5 / 5)
}
\keyword{univar}
|
1e5760ab704e6922eaa5d98a0822832662419dd5
|
2896f7f598ae89d712ae44db97fd7cd87c134880
|
/man/randomSampler.Rd
|
126d0c0fa7971eea28459758815d29e6ce23a155
|
[] |
no_license
|
cryptomanic/markovchain
|
ff1ba25b1415a5be79af1e8e3d3fa156031be8db
|
be07d293674f568c20b14189bdb3af447320b040
|
refs/heads/master
| 2020-02-26T13:48:51.546847
| 2016-03-23T06:03:33
| 2016-03-23T06:03:33
| 54,324,461
| 0
| 0
| null | 2016-03-20T15:16:14
| 2016-03-20T15:16:14
| null |
UTF-8
|
R
| false
| false
| 1,869
|
rd
|
randomSampler.Rd
|
\name{rmarkovchain}
\alias{rmarkovchain}
\alias{markovchainSequence}
\title{
Function to generate a sequence of states from homogeneous or non-homogeneous Markov chains.
}
\description{
Provided any \code{markovchain} or \code{markovchainList} objects, it returns a sequence of
states coming from the underlying stationary distribution.
}
\usage{
rmarkovchain(n, object, what="data.frame",...)
markovchainSequence(n, markovchain, t0 = sample(markovchain@states, 1),
include.t0 = FALSE)
}
\arguments{
\item{n}{
Sample size
}
\item{object}{
Either a \code{markovchain} or a \code{markovchainList} object.
}
\item{what}{
It specifies whether either a \code{data.frame} or a \code{matrix} (each rows represent a simulation) or a \code{list} is returned.
}
\item{\dots}{
additional parameters passed to the internal sampler
}
\item{markovchain}{
The \code{markovchain} object
}
\item{t0}{
The initial state.
}
\item{include.t0}{
Specify if the initial state shall be used.
}
}
\details{
When an homogeneous process is assumed (\code{markovchain} object) a sequence is
sampled of size n. When an non - homogeneous process is assumed,
n samples are taken but the process is assumed to last from the begin to the end of the
non-homogeneous markov process.
}
\value{
Either a character vector or a data frame
}
\references{
A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010
}
\author{
Giorgio Spedicato
}
\note{
Check the type of input
}
\seealso{
\code{\link{markovchainFit}}
}
\examples{
#define the Markov chain
statesNames=c("a","b","c")
mcB<-new("markovchain", states=statesNames, transitionMatrix=matrix(c(0.2,0.5,0.3,
0,0.2,0.8,0.1,0.8,0.1),nrow=3, byrow=TRUE, dimnames=list(statesNames,statesNames)
))
#show the sequence
outs<-markovchainSequence(n=100,markovchain=mcB, t0="a")
outs2<-rmarkovchain(n=20, object=mcB)
}
|
0194f30299d757a22dc1b3881f6bf1c6639f2821
|
f373158a5273638d145bedf9d58301239c79e025
|
/mle.R
|
a4361ea8fa91d7b8cd18f66068a99473e257f214
|
[] |
no_license
|
him-28/prob_dist_analysis
|
259e5b684548cf3932a654421e5ddf3ddc4938a1
|
50094bff97db09fb01a5d3731e2a1e9b2798b74c
|
refs/heads/master
| 2021-01-11T01:15:01.538322
| 2016-08-25T04:36:48
| 2016-08-25T04:36:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 811
|
r
|
mle.R
|
# MLE function creation from http://www.r-bloggers.com/fitting-a-model-by-maximum-likelihood/
mu_mle = matrix(, c, p)
sig_mle = matrix(, c, p)
t_mle = matrix(, c, p)
for (i in 1 : p) {
for (j in 1 : c) {
row1 = (j-1)*n + 1
row2 = j*n
x = data[row1 : row2, i]
LogLikelihood <- function (mu, sigma) {
R = suppressWarnings(dnorm(x, mu, sigma))
#
-sum(log(R))
}
library(stats4)
mleFit = mle(LogLikelihood, start = list(mu = 0, sigma=1))
# if we want to use the results from this mleFit object, we can do something like the following:
coefficients = coef(mleFit)
mu_mle[j, i] = coefficients[1]
sig_mle[j, i] = coefficients[2]
t_mle[j, i] = mu_mle[j,i]/ sqrt(sig_mle[j,i]/n)
}
}
|
98085dd74ef97deadd5c92ce6bebec2ec5ea3747
|
02b5125f6b2f94430176c1de91d4e65aef7e9ff5
|
/binomial/man/bin_mode.Rd
|
f6cffd22df9432254cb93db0d8b6925ef823bc47
|
[] |
no_license
|
stat133-sp19/hw-stat133-sp8268
|
1a3c4586e621becaa3547a06e334609cb9536947
|
9a6d68542ff0452bf4c222ac11cc06a3f1614de8
|
refs/heads/master
| 2020-04-28T06:17:40.574375
| 2019-05-02T06:07:14
| 2019-05-02T06:07:14
| 175,051,636
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 429
|
rd
|
bin_mode.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/measures.R
\name{bin_mode}
\alias{bin_mode}
\title{Binomial Mode}
\usage{
bin_mode(trials, prob)
}
\arguments{
\item{trials}{number of trials}
\item{prob}{probability of success}
}
\value{
variance
}
\description{
Returns mode of binom dist with given trials and prob
}
\examples{
bin_variance(trials = 10, prob = .4)
bin_variance(110, prob = .56)
}
|
9750e732c60e34b3fde9d2c34d4931238f03cf07
|
a845065b2b31e2fde530ad5eb192780340c5f481
|
/man/ex11.03.Rd
|
af751f0fd4864c685b4a1519009f890ff550b3f3
|
[] |
no_license
|
cran/Devore7
|
319c8a7bd4daca50ba1b7f7acdd53e40957a55bb
|
9592edf605e0a7927bdce1e0081796b183b1b5ad
|
refs/heads/master
| 2021-01-13T16:10:38.867717
| 2014-03-25T00:00:00
| 2014-03-25T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,079
|
rd
|
ex11.03.Rd
|
\name{ex11.03}
\alias{ex11.03}
\docType{data}
\title{R Data set: ex11.03}
\description{
The \code{ex11.03} data frame has 17 rows and 3 columns.
}
\usage{data(ex11.03)}
\format{
A data frame with 17 observations on the following 3 variables.
\describe{
\item{\code{C1}}{a factor with levels \code{200} \code{226} \code{240} \code{261} \code{278} \code{312} \code{330} \code{369} \code{381} \code{416} \code{462} \code{500} \code{517} \code{575} \code{645} \code{733} \code{C1}}
\item{\code{C2}}{a factor with levels \code{1(200)} \code{2(400)} \code{3(700)} \code{4(1100)} \code{C2}}
\item{\code{C3}}{a factor with levels \code{1(190)} \code{2(250)} \code{3(300)} \code{4(400)} \code{C3}}
}
}
\details{
Consult the web site \url{http://www.thomsonedu.com/statistics/devore} for additional online resources that are available for this book.
}
\source{
Devore, J. L. (2008) \emph{Probability and Statistics for Engineering and the Sciences (7th Edition)}, ISBN-10: 0495382175 ISBN-13: 9780495382171
}
\examples{
data(ex11.03)
str(ex11.03)
}
\keyword{datasets}
|
8abb399d215076be6558c8293f77a33820394773
|
c53e367a5a155cfb1ee3a41e8b0351aeaa8d331d
|
/urca/Rcmdr/Rcmdr-urca.R
|
c99f80a6fcf04a57930d4a5e01f5788e551b2e99
|
[
"MIT"
] |
permissive
|
solgenomics/R_libs
|
bcf34e00bf2edef54894f6295c4f38f1e480b3fc
|
e8cdf30fd5f32babf39c76a01df5f5544062224e
|
refs/heads/master
| 2023-07-08T10:06:04.304775
| 2022-05-09T15:41:26
| 2022-05-09T15:41:26
| 186,859,606
| 0
| 2
|
MIT
| 2023-03-07T08:59:16
| 2019-05-15T15:57:13
|
C++
|
UTF-8
|
R
| false
| false
| 32,149
|
r
|
Rcmdr-urca.R
|
library(urca)
#
# ADF-test
#
urcadf <- function(){
if (!checkActiveDataSet()) return()
if (!checkNumeric(n=1)) return()
initializeDialog(title="ADF Test")
xBox <- variableListBox(top, Numeric(), title="Variable (pick one)")
onOK <- function(){
x <- getSelection(xBox)
var <- paste(.activeDataSet, "$", x, sep="")
ttype <- tclvalue(testtypeVariable)
lags <- tclvalue(lagsVariable)
if (length(x) == 0){
errorCondition(recall=urcadf, message="You must select a variable.")
return()
}
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
doItAndPrint(paste("summary(ur.df(y= ", var, ", type = ", ttype, ", lags = ", lags, "))", sep=""))
tkdestroy(top)
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="ur.df")
radioButtons(name="testtype", buttons=c("none", "drift", "trend"), values=c("'none'", "'drift'", "'trend'" ), labels=c("no deterministic regressor", "drift only", "drift and trend"), title="Type of test")
rightFrame <- tkframe(top)
lagsFrame <- tkframe(rightFrame)
lagsVariable <- tclVar("4")
lagsField <- tkentry(lagsFrame, width="2", textvariable=lagsVariable)
tkgrid(getFrame(xBox), sticky="nw")
tkgrid(tklabel(rightFrame, text=""))
tkgrid(tklabel(lagsFrame, text="Maximum number of lags = ", fg="blue"), lagsField, sticky="w")
tkgrid(lagsFrame, sticky="w")
tkgrid(testtypeFrame, rightFrame, sticky="nw")
tkgrid(buttonsFrame, columnspan=2, sticky="w")
tkgrid.configure(lagsField, sticky="e")
dialogSuffix(rows=4, columns=2)
}
#
# ERS-test
#
urcaers <- function(){
if (!checkActiveDataSet()) return()
if (!checkNumeric(n=1)) return()
initializeDialog(title="ERS Test")
xBox <- variableListBox(top, Numeric(), title="Variable (pick one)")
onOK <- function(){
x <- getSelection(xBox)
var <- paste(.activeDataSet, "$", x, sep="")
ttype <- tclvalue(testtypeVariable)
modtype <- tclvalue(modelVariable)
lags <- tclvalue(lagsVariable)
if (length(x) == 0){
errorCondition(recall=urcaers, message="You must select a variable.")
return()
}
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
doItAndPrint(paste("summary(ur.ers(y= ", var, ", type = ", ttype, ", model = ", modtype, ", lag.max = ", lags, "))", sep=""))
tkdestroy(top)
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="ur.ers")
radioButtons(name="testtype", buttons=c("DFGLS", "Ptest"), values=c("'DF-GLS'", "'P-test'"),
labels=c("DF-GLS statistic", "P-test statistic"), title="Type of test")
radioButtons(name="model", buttons=c("const", "trend"), values=c("'constant'", "'trend'"),
labels=c("Include constant", "Include constant + trend"), title="Model type")
rightFrame <- tkframe(top)
lagsFrame <- tkframe(rightFrame)
lagsVariable <- tclVar("4")
lagsField <- tkentry(lagsFrame, width="2", textvariable=lagsVariable)
tkgrid(getFrame(xBox), sticky="nw")
tkgrid(tklabel(rightFrame, text=""))
tkgrid(tklabel(lagsFrame, text="Maximum number of lags = ", fg="blue"), lagsField, sticky="w")
tkgrid(lagsFrame, sticky="w")
tkgrid(testtypeFrame, rightFrame, sticky="nw")
tkgrid(modelFrame, rightFrame, sticky="nw")
tkgrid(buttonsFrame, columnspan=2, sticky="w")
tkgrid.configure(lagsField, sticky="e")
dialogSuffix(rows=4, columns=2)
}
#
# KPSS-test
#
urcakpss <- function(){
if (!checkActiveDataSet()) return()
if (!checkNumeric(n=1)) return()
initializeDialog(title="KPSS Test")
xBox <- variableListBox(top, Numeric(), title="Variable (pick one)")
radioButtons(name="testtype", buttons=c("constant", "trend"),
values=c("'mu'", "'tau'"), initialValue="'mu'",
labels=c("Include constant", "Include trend"), title="Deterministic Part")
radioButtons(name="lags", buttons=c("short", "long", "nil"),
values=c("'short'", "'long'", "'nil'"), initialValue="'short'", labels=c("use short lags", "use long lags", "use no lags"), title="Lag Selection")
onOK <- function(){
x <- getSelection(xBox)
if (length(x) == 0){
errorCondition(recall=urcakpss, message="You must select a variable.")
return()
}
var <- paste(.activeDataSet, "$", x, sep="")
ttype <- tclvalue(testtypeVariable)
ltype <- tclvalue(lagsVariable)
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
command <- paste("summary(ur.kpss(", var, ", type = ", ttype, ", lags = ", ltype, "))", sep="")
doItAndPrint(command)
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="ur.kpss")
tkgrid(getFrame(xBox), sticky="nw")
tkgrid(testtypeFrame, lagsFrame, sticky="w")
tkgrid(buttonsFrame, columnspan=2, sticky="w")
dialogSuffix(rows=3, columns=2)
}
#
# Schmidt-Phillips test
#
urcasp <- function(){
if (!checkActiveDataSet()) return()
if (!checkNumeric(n=1)) return()
initializeDialog(title="SP Test")
xBox <- variableListBox(top, Numeric(), title="Variable (pick one)")
onOK <- function(){
x <- getSelection(xBox)
var <- paste(.activeDataSet, "$", x, sep="")
ttype <- tclvalue(testtypeVariable)
pdtype <- tclvalue(poldegVariable)
sltype <- tclvalue(signifVariable)
if (length(x) == 0){
errorCondition(recall=urcasp, message="You must select a variable.")
return()
}
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
doItAndPrint(paste("summary(ur.sp(y= ", var, ", type = ", ttype, ", pol.deg = ", pdtype, ", signif = ", sltype, "))", sep=""))
tkdestroy(top)
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="ur.sp")
radioButtons(name="testtype", buttons=c("tau", "rho"), values=c("'tau'", "'rho'"),
labels=c("tau statistic", "rho statistic"), title="Type of test")
radioButtons(name="poldeg", buttons=c("pb1", "pb2", "pb3", "pb4"), values=c("1", "2", "3", "4"),
labels=c("1st", "2nd", "3rd", "4th"), title="Select pol. degree")
radioButtons(name="signif", buttons=c("sb1", "sb5", "sb10"), values=c("0.01", "0.05", "0.1"),
labels=c("alpha=1%", "alpha=5%", "alpha=10%"), title="Sig. Level")
tkgrid(getFrame(xBox), testtypeFrame, sticky="nw")
tkgrid(poldegFrame, signifFrame, sticky="nw")
tkgrid(buttonsFrame, columnspan=2, sticky="w")
dialogSuffix(rows=4, columns=2)
}
#
# Phillips-Perron test
#
urcapp <- function(){
if (!checkActiveDataSet()) return()
if (!checkNumeric(n=1)) return()
initializeDialog(title="PP Test")
xBox <- variableListBox(top, Numeric(), title="Variable (pick one)")
onOK <- function(){
x <- getSelection(xBox)
var <- paste(.activeDataSet, "$", x, sep="")
ttype <- tclvalue(testtypeVariable)
modtype <- tclvalue(modelVariable)
lagtype <- tclvalue(lagsVariable)
if (length(x) == 0){
errorCondition(recall=urcapp, message="You must select a variable.")
return()
}
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
doItAndPrint(paste("summary(ur.pp(x= ", var, ", type = ", ttype, ", model = ", modtype, ", lags = ", lagtype, "))", sep=""))
tkdestroy(top)
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="ur.pp")
radioButtons(name="testtype", buttons=c("Zalpha", "Ztau"), values=c("'Z-alpha'", "'Z-tau'"),
labels=c("Z-alpha statistic", "Z-tau statistic"), title="Type of test")
radioButtons(name="model", buttons=c("const", "trend"), values=c("'constant'", "'trend'"),
labels=c("Include constant", "include constant + trend"), title="Model type")
radioButtons(name="lags", buttons=c("short", "long"), values=c("'short'", "'long'"),
labels=c("short lags", "long lags"), title="Lags for error correction")
tkgrid(getFrame(xBox), testtypeFrame, sticky="nw")
tkgrid(modelFrame, lagsFrame, sticky="nw")
tkgrid(buttonsFrame, columnspan=2, sticky="w")
dialogSuffix(rows=4, columns=2)
}
#
# Zivot-Andrews test
#
urcaza <- function(){
if (!checkActiveDataSet()) return()
if (!checkNumeric(n=1)) return()
initializeDialog(title="Zivot & Andrews Test")
xBox <- variableListBox(top, Numeric(), title="Variable (pick one)")
onOK <- function(){
x <- getSelection(xBox)
var <- paste(.activeDataSet, "$", x, sep="")
modtype <- tclvalue(modelVariable)
lags <- tclvalue(lagsVariable)
ptype <- if(tclvalue(plotVariable) == 0) "FALSE" else "TRUE"
if (length(x) == 0){
errorCondition(recall=urcaza, message="You must select a variable.")
return()
}
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
command <- paste("ur.za(y= ", var, ", model = ", modtype, ", lag = ", lags, ")", sep="")
logger(paste("ZAstat <- ", command, sep=""))
assign("ZAstat", justDoIt(command), envir=.GlobalEnv)
doItAndPrint("summary(ZAstat)")
if(ptype==TRUE) {
justDoIt("x11()")
justDoIt("plot(ZAstat)")
}
logger("remove(ZAstat)")
remove(ZAstat, envir=.GlobalEnv)
tkdestroy(top)
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="ur.za")
radioButtons(name="model", buttons=c("const", "trend", "both"), values=c("'intercept'", "'trend'", "'both'"),
labels=c("Include constant", "Include trend", "Include both"), title="Model type")
checkBoxes(frame="plotFrame", boxes="plot", initialValues="0", labels="Plot path of Zivot & Andrews Statistic?")
rightFrame <- tkframe(top)
lagsFrame <- tkframe(rightFrame)
lagsVariable <- tclVar("4")
lagsField <- tkentry(lagsFrame, width="2", textvariable=lagsVariable)
tkgrid(getFrame(xBox), sticky="nw")
tkgrid(tklabel(rightFrame, text=""))
tkgrid(tklabel(lagsFrame, text="Maximum number of lags = ", fg="blue"), lagsField, sticky="w")
tkgrid(lagsFrame, sticky="w")
tkgrid(modelFrame, rightFrame, sticky="nw")
tkgrid(plotFrame, sticky="nw")
tkgrid(buttonsFrame, columnspan=2, sticky="w")
tkgrid.configure(lagsField, sticky="e")
dialogSuffix(rows=3, columns=2)
}
#
# Phillips-Ouliaris test
#
urcacapo <- function(){
dataSets <- listDataSets()
if (length(dataSets) == 0){
tkmessageBox(message="There are no data sets from which to choose.", icon="error", type="ok")
tkfocus(CommanderWindow())
return()
}
initializeDialog(title="Phillips & Ouliaris Test")
xBox <- variableListBox(top, dataSets, title="Data Sets (pick one)")
onOK <- function(){
x <- getSelection(xBox)
if (length(x) == 0){
errorCondition(recall=urcacapo, message="You must select a data set.")
return()
}
meantype <- tclvalue(demeanVariable)
lags <- tclvalue(lagsVariable)
ttype <- tclvalue(typeVariable)
tolvar <- tclvalue(tolVariable)
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
doItAndPrint(paste("summary(ca.po(z = ", x, ", demean = ", meantype, ", lag = ", lags, ", type = ", ttype, ", tol = ", tolvar, "))", sep=""))
tkdestroy(top)
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="ca.po")
radioButtons(name="demean", buttons=c("none", "constant", "trend"), values=c("'none'", "'constant'", "'trend'"),
labels=c("None", "Include constant", "Include trend"), title="Demean?")
radioButtons(name="lags", buttons=c("short", "long"), values=c("'short'", "'long'"),
labels=c("short lags", "long lags"), title="Lags for error correction")
radioButtons(name="type", buttons=c("Pu", "Pz"), values=c("'Pu'", "'Pz'"),
labels=c("Pu statistic", "Pz statistic"), title="Type of test")
rightFrame <- tkframe(top)
tolFrame <- tkframe(rightFrame)
tolVariable <- tclVar("NULL")
tolField <- tkentry(tolFrame, width="8", textvariable=tolVariable)
tkgrid(getFrame(xBox), sticky="nw")
tkgrid(tklabel(rightFrame, text=""))
tkgrid(tklabel(tolFrame, text="Tolerance level = ", fg="blue"), tolField, sticky="w")
tkgrid(tolFrame, sticky="w")
tkgrid(demeanFrame, rightFrame, sticky="nw")
tkgrid(lagsFrame, typeFrame, sticky="nw")
tkgrid(buttonsFrame, columnspan=2, sticky="w")
tkgrid.configure(tolField, sticky="e")
dialogSuffix(rows=5, columns=2)
}
#
# Johansen-Procedure
#
urcacajo <- function(){
dataSets <- listDataSets()
if (length(dataSets) == 0){
tkmessageBox(message="There are no data sets from which to choose.", icon="error", type="ok")
tkfocus(CommanderWindow())
return()
}
initializeDialog(title="Johansen's Procedure")
xBox <- variableListBox(top, dataSets, title="Data Sets (pick one)")
assign("UpdateModelNumber", UpdateModelNumber() + 1, envir=.GlobalEnv)
modelName <- tclVar(paste("VECMmodel.", UpdateModelNumber, sep=""))
modelFrame <- tkframe(top)
model <- tkentry(modelFrame, width="20", textvariable=modelName)
onOK <- function(){
x <- getSelection(xBox)
if (length(x) == 0){
errorCondition(recall=urcacajo, message="You must select a data set.")
return()
}
ttype <- tclvalue(typeVariable)
spect <- tclvalue(specVariable)
ctype <- tclvalue(ecdetVariable)
lags <- tclvalue(lagVariable)
seas <- tclvalue(seasonVariable)
dummy <- tclvalue(dumVariable)
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
modelValue <- tclvalue(modelName)
if (!is.valid.name(modelValue)){
assign("UpdateModelNumber", UpdateModelNumber() - 1, envir=.GlobalEnv)
errorCondition(recall=urcacajo, message=paste('"', modelValue, '" is not a valid name.', sep=""))
return()
}
if (is.element(modelValue, listVECMmodels())) {
if ("no" == tclvalue(checkReplace(modelValue, type="Model"))){
assign("UpdateModelNumber", UpdateModelNumber() - 1, envir=.GlobalEnv)
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
urcacajo()
return()
}
}
command <- paste("ca.jo(x = ", x, ", type = ", ttype, ", ecdet = ", ctype, ", K = ",
lags, ", spec = ", spect, ", season = ", seas, ", dumvar = ", dummy , ")", sep="")
logger(paste(modelValue, " <- ", command, sep=""))
assign(modelValue, justDoIt(command), envir=.GlobalEnv)
doItAndPrint(paste("summary(", modelValue, ")", sep=""))
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="ca.jo")
radioButtons(name="type", buttons=c("eigen", "trace"), values=c("'eigen'", "'trace'"),
labels=c("Eigenvalue statistic", "Trace statistic"), title="Type of statistic")
radioButtons(name="spec", buttons=c("long", "trans"), values=c("'longrun'", "'transitory'"),
labels=c("longrun specification", "transitory specification"), title="VECM specification")
radioButtons(name="season", buttons=c("none", "monthly", "quarterly"), values=c("NULL", "12", "4"),
labels=c("None", "Monthly seasonality", "Quarterly seasonality"), title="Seasonality")
radioButtons(name="ecdet", buttons=c("none", "const", "trend"), values=c("'none'", "'const'", "'trend'"),
labels=c("none", "constant", "trend"), title="Deterministic Variable in Cointegration")
rightFrame <- tkframe(top)
lagFrame <- tkframe(rightFrame)
lagVariable <- tclVar("2")
lagField <- tkentry(lagFrame, width="2", textvariable=lagVariable)
dumFrame <- tkframe(rightFrame)
dumVariable <- tclVar("NULL")
dumField <- tkentry(dumFrame, width="8", textvariable=dumVariable)
tkgrid(tklabel(modelFrame, text="Enter name for model:"), model, sticky="w")
tkgrid(modelFrame, sticky="w")
tkgrid(getFrame(xBox), sticky="nw")
tkgrid(tklabel(rightFrame, text=""), sticky="w")
tkgrid(tklabel(lagFrame, text="Lag order = ", fg="blue"), lagField, sticky="w")
tkgrid(lagFrame, sticky="w")
tkgrid(tklabel(dumFrame, text="Matrix of dummy variables = ", fg="blue"), dumField, sticky="w")
tkgrid(dumFrame, sticky="w")
tkgrid(typeFrame, rightFrame, sticky="nw")
tkgrid(specFrame, seasonFrame, sticky="nw")
tkgrid(ecdetFrame, rightFrame, sticky="w")
tkgrid(buttonsFrame, sticky="w")
tkgrid.configure(lagField, sticky="e")
tkgrid.configure(dumField, sticky="e")
dialogSuffix(rows=8, columns=2)
}
#
# Linear Trend test
#
urcalttest <- function(){
VECMmodels <- listVECMmodels()
if (length(VECMmodels) == 0){
tkmessageBox(message="There are no VECM models from which to choose.", icon="error", type="ok")
tkfocus(CommanderWindow())
return()
}
initializeDialog(title="Linear Trend Test")
xBox <- variableListBox(top, VECMmodels, title="VECM models (pick one)")
onOK <- function(){
x <- getSelection(xBox)
if (length(x) == 0){
errorCondition(recall=urcalttest, message="You must select a VECM model.")
return()
}
rint <- tclvalue(rankVariable)
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
doItAndPrint(paste("lttest(z = ", x, ", r = ", rint, ")", sep=""))
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="lttest")
rankFrame <- tkframe(top)
rankVariable <- tclVar("1")
rankField <- tkentry(rankFrame, width="2", textvariable=rankVariable)
tkgrid(getFrame(xBox), sticky="nw")
tkgrid(rankFrame, sticky="w")
tkgrid(tklabel(rankFrame, text="Number of cointegrating relationships = ", fg="blue"), rankField, sticky="w")
tkgrid(buttonsFrame, sticky="w")
tkgrid.configure(rankField, sticky="e")
dialogSuffix(rows=3, columns=2)
}
#
# Restrictions on Loading matrix
#
urcaalrtest <- function(){
VECMmodels <- listVECMmodels()
matrices <- listMatrix()
if (length(VECMmodels) == 0){
tkmessageBox(message="There are no VECM models from which to choose.", icon="error", type="ok")
tkfocus(CommanderWindow())
return()
}
if (length(matrices) == 0){
tkmessageBox(message="There are no restriction matrices defined from which to choose.", icon="error", type="ok")
tkfocus(CommanderWindow())
return()
}
initializeDialog(title="Test Restrictions on Loading Vectors")
xBox <- variableListBox(top, VECMmodels, title="VECM models (pick one)")
yBox <- variableListBox(top, matrices, title="Restriction matrices (pick one)")
onOK <- function(){
x <- getSelection(xBox)
if (length(x) == 0){
errorCondition(recall=urcaalrtest, message="You must select a VECM model.")
return()
}
y <- getSelection(yBox)
if (length(y) == 0){
errorCondition(recall=urcaalrtest, message="You must select a restriction matrix.")
return()
}
rint <- tclvalue(rankVariable)
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
doItAndPrint(paste("summary(alrtest(z = ", x, ", A = ", y , ", r = ", rint, "))", sep=""))
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="alrtest")
rankFrame <- tkframe(top)
rankVariable <- tclVar("1")
rankField <- tkentry(rankFrame, width="2", textvariable=rankVariable)
tkgrid(getFrame(xBox), getFrame(yBox), sticky="nw")
tkgrid(rankFrame, sticky="w")
tkgrid(tklabel(rankFrame, text="Number of cointegrating relationships = ", fg="blue"), rankField, sticky="w")
tkgrid(buttonsFrame, sticky="w")
tkgrid.configure(rankField, sticky="e")
dialogSuffix(rows=3, columns=1)
}
#
# Restrictions on cointegration matrix
#
urcablrtest <- function(){
VECMmodels <- listVECMmodels()
matrices <- listMatrix()
if (length(VECMmodels) == 0){
tkmessageBox(message="There are no VECM models from which to choose.", icon="error", type="ok")
tkfocus(CommanderWindow())
return()
}
if (length(matrices) == 0){
tkmessageBox(message="There are no restriction matrices defined from which to choose.", icon="error", type="ok")
tkfocus(CommanderWindow())
return()
}
initializeDialog(title="Test Restrictions on Cointegration Vectors")
xBox <- variableListBox(top, VECMmodels, title="VECM models (pick one)")
yBox <- variableListBox(top, matrices, title="Restriction matrices (pick one)")
onOK <- function(){
x <- getSelection(xBox)
if (length(x) == 0){
errorCondition(recall=urcaablrtest, message="You must select a VECM model.")
return()
}
y <- getSelection(yBox)
if (length(y) == 0){
errorCondition(recall=urcaalrtest, message="You must select a restriction matrix.")
return()
}
rint <- tclvalue(rankVariable)
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
doItAndPrint(paste("summary(blrtest(z = ", x, ", H = ", y , ", r = ", rint, "))", sep=""))
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="blrtest")
rankFrame <- tkframe(top)
rankVariable <- tclVar("1")
rankField <- tkentry(rankFrame, width="2", textvariable=rankVariable)
tkgrid(getFrame(xBox), getFrame(yBox), sticky="nw")
tkgrid(rankFrame, sticky="w")
tkgrid(tklabel(rankFrame, text="Number of cointegrating relationships = ", fg="blue"), rankField, sticky="w")
tkgrid(buttonsFrame, sticky="w")
tkgrid.configure(rankField, sticky="e")
dialogSuffix(rows=3, columns=1)
}
#
# Restrictions for partly known cointegrating vectors
#
urcabh5lrtest <- function(){
VECMmodels <- listVECMmodels()
matrices <- listMatrix()
numerics <- listNumeric()
elements <- c(matrices, numerics)
if (length(VECMmodels) == 0){
tkmessageBox(message="There are no VECM models from which to choose.", icon="error", type="ok")
tkfocus(CommanderWindow())
return()
}
if (length(matrices) == 0){
tkmessageBox(message="There are no restriction matrices defined from which to choose.", icon="error", type="ok")
tkfocus(CommanderWindow())
return()
}
initializeDialog(title="Test validity of partly known cointegrating Vectors")
xBox <- variableListBox(top, VECMmodels, title="VECM models (pick one)")
yBox <- variableListBox(top, elements, title="Restriction matrices (pick one)")
onOK <- function(){
x <- getSelection(xBox)
if (length(x) == 0){
errorCondition(recall=urcabh5lrtest, message="You must select a VECM model.")
return()
}
y <- getSelection(yBox)
if (length(y) == 0){
errorCondition(recall=urcabh5lrtest, message="You must select a restriction matrix.")
return()
}
rint <- tclvalue(rankVariable)
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
doItAndPrint(paste("summary(bh5lrtest(z = ", x, ", H = ", y , ", r = ", rint, "))", sep=""))
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="bh5lrtest")
rankFrame <- tkframe(top)
rankVariable <- tclVar("2")
rankField <- tkentry(rankFrame, width="2", textvariable=rankVariable)
tkgrid(getFrame(xBox), getFrame(yBox), sticky="nw")
tkgrid(rankFrame, sticky="w")
tkgrid(tklabel(rankFrame, text="Number of cointegrating relationships = ", fg="blue"), rankField, sticky="w")
tkgrid(buttonsFrame, sticky="w")
tkgrid.configure(rankField, sticky="e")
dialogSuffix(rows=3, columns=1)
}
#
# Restrictions for partly known cointegrating vectors
#
urcabh6lrtest <- function(){
VECMmodels <- listVECMmodels()
matrices <- listMatrix()
numerics <- listNumeric()
elements <- c(matrices, numerics)
if (length(VECMmodels) == 0){
tkmessageBox(message="There are no VECM models from which to choose.", icon="error", type="ok")
tkfocus(CommanderWindow())
return()
}
if (length(matrices) == 0){
tkmessageBox(message="There are no restriction matrices defined from which to choose.", icon="error", type="ok")
tkfocus(CommanderWindow())
return()
}
initializeDialog(title="Test restrictions of partly known cointegrating Vectors")
xBox <- variableListBox(top, VECMmodels, title="VECM models (pick one)")
yBox <- variableListBox(top, elements, title="Restriction matrices (pick one)")
onOK <- function(){
x <- getSelection(xBox)
if (length(x) == 0){
errorCondition(recall=urcabh6lrtest, message="You must select a VECM model.")
return()
}
y <- getSelection(yBox)
if (length(y) == 0){
errorCondition(recall=urcaalrtest, message="You must select a restriction matrix.")
return()
}
rint <- tclvalue(rankVariable)
r1int <- tclvalue(r1Variable)
maxiter <- tclvalue(maxiterVariable)
conv <- tclvalue(conVariable)
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
doItAndPrint(paste("summary(bh6lrtest(z = ", x, ", H = ", y , ", r = ", rint, ", r1 =", r1int, ", conv.val =", conv, ", max.iter =", maxiter, "))", sep=""))
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="bh6lrtest")
rankFrame <- tkframe(top)
r1Frame <- tkframe(top)
rankVariable <- tclVar("2")
r1Variable <- tclVar("1")
rightFrame <- tkframe(top)
maxiterFrame <- tkframe(rightFrame)
maxiterVariable <- tclVar("50")
maxiterField <- tkentry(maxiterFrame, width="4", textvariable=maxiterVariable)
conFrame <- tkframe(rightFrame)
conVariable <- tclVar("0.0001")
conField <- tkentry(conFrame, width="8", textvariable=conVariable)
tkgrid(tklabel(rightFrame, text=""))
rankField <- tkentry(rankFrame, width="2", textvariable=rankVariable)
r1Field <- tkentry(r1Frame, width="2", textvariable=r1Variable)
tkgrid(getFrame(xBox), getFrame(yBox), sticky="nw")
tkgrid(rankFrame, rightFrame, sticky="w")
tkgrid(tklabel(rankFrame, text="Number of cointegrating relationships = ", fg="blue"), rankField, sticky="w")
tkgrid(r1Frame, rightFrame, sticky="w")
tkgrid(tklabel(r1Frame, text="Number of restricted ci relationships = ", fg="blue"), r1Field, sticky="w")
tkgrid(maxiterFrame, sticky="w")
tkgrid(tklabel(maxiterFrame, text="Maximum number of iterations = ", fg="blue"), maxiterField, sticky="w")
tkgrid(conFrame, sticky="w")
tkgrid(tklabel(conFrame, text="Convergence criteria = ", fg="blue"), conField, sticky="w")
tkgrid(buttonsFrame, sticky="w")
tkgrid.configure(rankField, sticky="e")
tkgrid.configure(r1Field, sticky="e")
tkgrid.configure(maxiterField, sticky="e")
tkgrid.configure(conField, sticky="e")
dialogSuffix(rows=4, columns=2)
}
#
# Restrictions for loading and cointegration matrix
#
urcaablrtest <- function(){
VECMmodels <- listVECMmodels()
matrices <- listMatrix()
if (length(VECMmodels) == 0){
tkmessageBox(message="There are no VECM models from which to choose.", icon="error", type="ok")
tkfocus(CommanderWindow())
return()
}
if (length(matrices) == 0){
tkmessageBox(message="There are no restriction matrices defined from which to choose.", icon="error", type="ok")
tkfocus(CommanderWindow())
return()
}
initializeDialog(title="Test Restrictions on Loading & Cointegration vectors")
xBox <- variableListBox(top, VECMmodels, title="VECM models (pick one)")
yBox <- variableListBox(top, matrices, title="Restriction matrices for cointegration vectors (pick one)")
lBox <- variableListBox(top, matrices, title="Restriction matrices for loading vectors (pick one)")
onOK <- function(){
x <- getSelection(xBox)
if (length(x) == 0){
errorCondition(recall=urcaablrtest, message="You must select a VECM model.")
return()
}
y <- getSelection(yBox)
if (length(y) == 0){
errorCondition(recall=urcaablrtest, message="You must select a cointegration restriction matrix.")
return()
}
l <- getSelection(lBox)
if (length(l) == 0){
errorCondition(recall=urcaablrtest, message="You must select a loading restriction matrix.")
return()
}
rint <- tclvalue(rankVariable)
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
doItAndPrint(paste("summary(ablrtest(z = ", x, ", H = ", y, ", A = ", l, ", r = ", rint, "))", sep=""))
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="ablrtest")
rankFrame <- tkframe(top)
rankVariable <- tclVar("1")
rankField <- tkentry(rankFrame, width="2", textvariable=rankVariable)
tkgrid(getFrame(xBox), sticky="nw")
tkgrid(getFrame(yBox), getFrame(lBox), sticky="nw")
tkgrid(rankFrame, sticky="w")
tkgrid(tklabel(rankFrame, text="Number of cointegrating relationships = ", fg="blue"), rankField, sticky="w")
tkgrid(buttonsFrame, sticky="w")
tkgrid.configure(rankField, sticky="e")
dialogSuffix(rows=3, columns=2)
}
#
# VECM with a structural shift
#
urcacajolst <- function(){
dataSets <- listDataSets()
if (length(dataSets) == 0){
tkmessageBox(message="There are no data sets from which to choose.", icon="error", type="ok")
tkfocus(CommanderWindow())
return()
}
initializeDialog(title="VECM with level shift")
xBox <- variableListBox(top, dataSets, title="Data Sets (pick one)")
assign("UpdateModelNumber", UpdateModelNumber + 1, envir=.GlobalEnv)
modelName <- tclVar(paste("VECMmodel.", UpdateModelNumber, sep=""))
modelFrame <- tkframe(top)
model <- tkentry(modelFrame, width="20", textvariable=modelName)
onOK <- function(){
x <- getSelection(xBox)
if (length(x) == 0){
errorCondition(recall=urcacajo, message="You must select a data set.")
return()
}
ttype <- if(tclvalue(trendVariable) == 0) "FALSE" else "TRUE"
lags <- tclvalue(lagVariable)
seas <- tclvalue(seasonVariable)
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
modelValue <- tclvalue(modelName)
if (!is.valid.name(modelValue)){
assign("UpdateModelNumber", UpdateModelNumber - 1, envir=.GlobalEnv)
errorCondition(recall=urcacajolst, message=paste('"', modelValue, '" is not a valid name.', sep=""))
return()
}
if (is.element(modelValue, listVECMmodels())) {
if ("no" == tclvalue(checkReplace(modelValue, type="Model"))){
assign("UpdateModelNumber", UpdateModelNumber - 1, envir=.GlobalEnv)
if (GrabFocus()) tkgrab.release(top)
tkdestroy(top)
urcacajolst()
return()
}
}
command <- paste("cajolst(x = ", x, ", trend = ", ttype, ", K = ", lags, ", season = ", seas, ")", sep="")
logger(paste(modelValue, " <- ", command, sep=""))
assign(modelValue, justDoIt(command), envir=.GlobalEnv)
doItAndPrint(paste("summary(", modelValue, ")", sep=""))
tkfocus(CommanderWindow())
}
OKCancelHelp(helpSubject="cajolst")
radioButtons(name="season", buttons=c("none", "monthly", "quarterly"), values=c("NULL", "12", "4"),
labels=c("None", "Monthly seasonality", "Quarterly seasonality"), title="Seasonality")
checkBoxes(frame="trendFrame", boxes="trend", initialValues="1", labels="Include linear trend in the auxiliary regressions?")
lagFrame <- tkframe(top)
lagVariable <- tclVar("2")
lagField <- tkentry(lagFrame, width="2", textvariable=lagVariable)
tkgrid(tklabel(modelFrame, text="Enter name for model:"), model, sticky="w")
tkgrid(modelFrame, sticky="w")
tkgrid(getFrame(xBox), sticky="nw")
tkgrid(tklabel(lagFrame, text="Lag order = ", fg="blue"), lagField, sticky="w")
tkgrid(lagFrame, sticky="w")
tkgrid(trendFrame, sticky="w")
tkgrid(buttonsFrame, sticky="w")
tkgrid.configure(lagField, sticky="e")
dialogSuffix(rows=5, columns=1)
}
#
# Utility Functions
#
listVECMmodels <- function(envir=.GlobalEnv, ...) {
objects <- ls(envir=envir, ...)
if (length(objects) == 0) NULL
else objects[sapply(objects, function(.x) "ca.jo" == (class(eval(parse(text=.x), envir=envir))[1]))]
}
listMatrix <- function(envir=.GlobalEnv, ...) {
objects <- ls(envir=envir, ...)
if (length(objects) == 0) NULL
else objects[sapply(objects, function(.x) "matrix" == (class(eval(parse(text=.x), envir=envir))[1]))]
}
listNumeric <- function(envir=.GlobalEnv, ...) {
objects <- ls(envir=envir, ...)
if (length(objects) == 0) NULL
else objects[sapply(objects, function(.x) "numeric" == (class(eval(parse(text=.x), envir=envir))[1]))]
}
|
472ddef489de643b7915d3727f4cc3dc4e13485f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/plaqr/examples/predictInt.Rd.R
|
9acfc4244525e8a1c77cbda7b5bbe8109ee232c3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 216
|
r
|
predictInt.Rd.R
|
library(plaqr)
### Name: predictInt
### Title: Prediction Inteval for Quantile Regression
### Aliases: predictInt
### ** Examples
data(simData)
fit <- plaqr(y~.,~z1+z2,data=simData)
predictInt(fit, level=.95)
|
26fcdd2c26bb4765da001d9660e7e8d34b9c7ad0
|
03fb214812a36c4408fd59107b333f144f4de1f8
|
/R/data.R
|
ac83354113772613729473375cc6ad1efe0eea40
|
[] |
no_license
|
braverock/quantstrat
|
e8a911fac4fd73d1dc6623706a3bcbec72c81d69
|
3e660300b322bb63dcb7659a26304fe4e8d4a693
|
refs/heads/master
| 2023-02-07T16:28:50.251525
| 2023-02-04T20:29:30
| 2023-02-04T20:29:30
| 58,736,659
| 282
| 132
| null | 2022-06-25T02:20:08
| 2016-05-13T12:07:31
|
R
|
UTF-8
|
R
| false
| false
| 840
|
r
|
data.R
|
#' AAPL time series of daily OHLCVA bars
#'
#' A dataset containing the daily OHLCVA values of AAPL from 2017-01-03 to
#' 2018-12-28. The code to reproduce the dataset from yahoo is:
#'
#' getSymbols(symbol,from=start_date, to = end_date, auto.assign = T, index.class = "POSIXct", src = 'yahoo')
#' for(i in symbols) assign(i, adjustOHLC(get(i),use.Adjusted=TRUE))
#'
#' We have chosen to fix the dataset, since we have no control over changes at
#' the source, which would break our tests.
#'
#' @usage data(AAPL)
#'
#' @format An xts object with 501 rows and 6 variables:
#' \describe{
#' \item{Open}{Daily Open prices}
#' \item{High}{Daily High prices}
#' \item{Low}{Daily Low prices}
#' \item{Close}{Daily Close prices}
#' \item{Volume}{Aggregate Daily volume traded}
#' \item{Adjusted}{Adjusted prices}
#' }
#'
"AAPL"
|
8bb31430e1776b53eb1059c4b0c881b9fe7ed235
|
767958fe7b80b709e4a5e19fb4113c55e20d985a
|
/03.Report.R
|
a5d429e6e00719bbdb5ff16f667a2c24644ef4da
|
[] |
no_license
|
aashoaibi/mw
|
1a7a514b55cf2b81c1f286ef196f378bda94acbd
|
cca58ff200693fac14b1bb18f16ce8110446331f
|
refs/heads/master
| 2022-02-21T03:44:19.985777
| 2017-02-24T20:19:42
| 2017-02-24T20:19:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,965
|
r
|
03.Report.R
|
#!/usr/local/bin/Rscript
################################################################################
### Format files given as arguments and report results from simulation
##
## Created on: 2015-04-09
## Author: Kazuki Yoshida
################################################################################
### Prepare environment
################################################################################
## Configure sink()
if (sink.number() != 0) {sink()}
..scriptFileName.. <- gsub("^--file=", "", Filter(function(x) {grepl("^--file=", x)}, commandArgs()))
if (length(..scriptFileName..) == 1) {
sink(file = paste0(..scriptFileName.., ".txt"), split = TRUE)
options(width = 120)
}
## Record start time
start_time <- Sys.time()
cat("### Started ", as.character(start_time), "\n")
## Load packages
library(magrittr)
library(dplyr)
library(reshape2)
library(tidyr)
library(ggplot2)
library(grid)
library(gridExtra)
source("./function_definitions/05.BalanceCheck.R")
source("./function_definitions/07.Simulate.R")
source("./function_definitions/08.ReportingFunctions.R")
cat("###
### Load data files
################################################################################\n")
## List all .data/Result* files
resFiles <- system("ls ./data/Result*.RData", intern = TRUE)
## Stop if no file is specified
stopifnot(length(resFiles) > 0 & all(!is.na(resFiles)))
cat("### Files to be read\n")
print(resFiles)
## Load into a list
lstRes <- lapply(resFiles, function(file) {
load(file)
list(dfOut = dfOut, lstParams = lstParams, R = R, scenarioCount = scenarioCount)
})
reports <- lapply(lstRes, function(res) {
Report(scenarioCount = res$scenarioCount,
lstParams = res$lstParams,
dfOut = res$dfOut)
}) %>% do.call(rbind, .) %>% as.data.frame
cat("###
### Name scenarios
################################################################################\n")
## Original sample size
reports$N <- factor(reports$N)
## Strength of covariate-treatment association (stronger = poor covariate overlap)
reports$XT_assoc <- factor(reports$XT_assoc, levels = c(1,5),
labels = c("Good overlap", "Poor overlap"))
## Exposure prevalence
reports$pExpo <- factor(reports$pT.2, levels = c(33,45,80),
labels = c("33:33:33", "10:45:45", "10:10:80"))
## Main effects
reports$effects <- factor((reports$betaT1 != 0), levels = c(FALSE,TRUE),
labels = c("Null main effects", "Non-null main effects"))
## Effect modification
reports$em <- factor((reports$betaTX1 != 0), levels = c(FALSE,TRUE),
labels = c("Modification (-)", "Modification (+)"))
## Disease prevalence
reports$pDis <- factor(reports$beta0, levels = sort(unique(reports$beta0)),
labels = exp(sort(unique(reports$beta0))))
## Combine with N
## reports$pDis <- interaction(reports$pDis, reports$N, sep = ":")
## Specify index variables
indexVars <- c("scenario","N","XT_assoc","pExpo","effects","em","pDis")
cat("### Show scenarios\n")
reports[indexVars]
save(reports, indexVars, file = "./data/Report.RData")
cat("###
### Numerical Examination
################################################################################\n")
cat("### Check if there are missing values (corrupt scenarios)\n")
colSumNA <- colSums(is.na(reports))
colSumNA[colSumNA != 0]
cat("### Check if caliper widening ever happened\n")
reports[reports$caliperPw > 0,c("scenario","XT_assoc",paste0("pT.", c(0,1,2)))]
cat("### Magnitude of bias\n")
## Better if closer to 1 (closer to 0 on log scale)
absLogBiasM <- abs(log(reports[,PickMatchNames(names(reports), "M.biasRR")]))
absLogBiasMw <- abs(log(reports[,PickMatchNames(names(reports), "Mw.biasRR")]))
absLogBiasRatio <- absLogBiasM / absLogBiasMw
cat("### Proportion M > Mw using entire sample true value\n")
mean(absLogBiasRatio[,1:3] > 1)
cat("### Proportion M > Mw using common support true value\n")
mean(absLogBiasRatio[,4:6] > 1)
cat("### Magnitude of variance\n")
## Better if closer to 1 (closer to 0 on log scale)
trueVarM <- reports[,PickMatchNames(names(reports), "M.trueV")]
trueVarMw <- reports[,PickMatchNames(names(reports), "Mw.trueV")]
trueVarIp <- reports[,PickMatchNames(names(reports), "Ip.trueV")]
cat("### Proportion M > Mw\n")
mean((trueVarM / trueVarMw)[,1:3] > 1)
cat("### Proportion Ip > Mw\n")
mean((trueVarIp / trueVarMw)[,1:3] > 1)
cat("### Proportion M > Ip\n")
mean((trueVarM / trueVarIp)[,1:3] > 1)
cat("### Magnitude of MSE\n")
## Better if closer to 1 (closer to 0 on log scale)
mseM <- reports[,PickMatchNames(names(reports), "M.mse\\.")]
mseMw <- reports[,PickMatchNames(names(reports), "Mw.mse\\.")]
mseIp <- reports[,PickMatchNames(names(reports), "Ip.mse\\.")]
cat("### Proportion M > Mw\n")
mean((mseM / mseMw)[,1:3] > 1)
cat("### Proportion Ip > Mw\n")
mean((mseIp / mseMw)[,1:3] > 1)
cat("### Proportion M > Ip\n")
mean((mseM / mseIp)[,1:3] > 1)
cat("### False positive rate\n")
## Better if closer to 1 (closer to 0 on log scale)
dat <- Gather(reports, indexVars, "pRej")
dat <- subset(dat, effects == "Null main effects" & em == "Modification (-)")
dat %>%
group_by(method) %>%
summarize(mean = mean(value),
totalNull = n(),
antiCon05 = sum(value > 0.05),
antiCon06 = sum(value > 0.06),
antiCon07 = sum(value > 0.07))
cat("###
### Graphical Examination
################################################################################\n")
## Name graph using the date and time
pdf(file = "./figures/_ExperimentalFigures.pdf", width = 14, height = 10, family = "sans")
## Graph prototype
gg <- ggplot(mapping = aes(x = key, y = value)) +
geom_point() +
labs(y = "", x = "") +
theme_bw() + theme(legend.key = element_blank(),
axis.text.x = element_text(angle = 90))
cat("### Sample size\n")
## dat <- Gather(reports, indexVars, "\\.n$")
## gg %+% dat +
## aes(group = scenario, color = pExpo, x = method) +
## geom_line(size = 0.1) +
## facet_grid(. ~ XT_assoc) +
## labs(title = "Sample size") +
## geom_hline(yintercept = reports$U.n[1])
## dat <- Gather(reports, indexVars, "\\.nCs$")
## gg %+% dat +
## aes(group = scenario, color = pExpo, x = method) +
## geom_line(size = 0.1) +
## facet_grid(. ~ XT_assoc) +
## labs(title = "Sample size (common support)") +
## geom_hline(yintercept = reports$U.n[1])
dat <- Gather(reports, indexVars, "\\.n$|\\.nCs$")
dat$key <- factor(gsub(".*\\.", "", as.character(dat$key)), levels = c("n","nCs"),
labels = c("All", "Within common support"))
gg %+% dat +
aes(group = scenario, color = pExpo, x = method) +
geom_line(size = 0.1) +
facet_grid(XT_assoc ~ key) +
labs(title = "Sample size (All vs Within common support)") +
scale_y_continuous(limit = c(0,NA)) +
geom_hline(yintercept = unique(reports$U.n))
cat("### SMD\n")
dat <- Gather(reports, indexVars, "smd")
dat$key <- gsub("^.*\\.", "", as.character(dat$key))
dat$key <- factor(dat$key, levels = paste0("X", 1:10))
gg %+% dat +
labs(title = "SMD") +
aes(group = scenario, color = pExpo) +
geom_line() +
facet_grid(XT_assoc ~ method) +
scale_y_continuous() +
coord_cartesian(ylim = c(0, 0.5)) +
geom_hline(yintercept = 0.10, size = 0.01) +
geom_hline(yintercept = 0.00)
cat("### Prevalence of modifier\n")
dat <- Gather(reports, indexVars, "pMod$")
gg %+% dat +
aes(group = scenario, color = pExpo, x = method) +
geom_line(alpha = 0.5) +
scale_y_continuous(limit = c(0, NA)) +
facet_grid(. ~ XT_assoc) +
labs(title = "Prevalence of modifier", x = "")
cat("### Bias related\n")
dat <- Gather(reports, indexVars, "coef")
dat$value <- exp(dat$value)
dat$trueValue <- Gather(reports, indexVars, "trueRR\\.")$value
gg %+% dat +
aes(group = scenario, color = pExpo, shape = pDis, x = method) +
labs(x = "") +
## To indicate the true values
geom_point(mapping = aes(y = trueValue), shape = 4, alpha = 1/5, size = 5) +
geom_line() +
scale_y_log10(breaks = c(1/4, 1/2, 1, 2, 4)) +
coord_cartesian(ylim = c(1/4, 4)) +
facet_grid(effects + XT_assoc ~ em + contrast) +
labs(title = "RR along with with true RR", x = "")
dat <- Gather(reports, indexVars, "trueRR\\.")
gg %+% dat +
aes(group = scenario, color = pExpo, shape = pDis, x = method) +
geom_line() +
scale_y_log10(breaks = c(1/4, 1/2, 1, 2, 4)) +
coord_cartesian(ylim = c(1/4, 4)) +
facet_grid(effects + XT_assoc ~ em + contrast) +
labs(title = "True RR", x = "")
dat <- Gather(reports, indexVars, "biasRR\\.")
gg %+% dat +
aes(group = scenario, color = pExpo, shape = pDis, x = method) +
geom_line() +
scale_y_log10(breaks = c(1/4, 1/2, 1, 2, 4)) +
facet_grid(effects + XT_assoc ~ em + contrast) +
coord_cartesian(ylim = c(1/4, 4)) +
geom_hline(yintercept = 1) +
labs(title = "Bias (RR / true RR)")
cat("### Variance\n")
dat <- Gather(reports, indexVars, "trueV")
gg %+% dat +
aes(group = scenario, color = pExpo, shape = pDis, x = method) +
geom_line() +
facet_grid(effects + XT_assoc ~ em + contrast) +
coord_cartesian(ylim = c(0, 2)) +
labs(title = "True variance", x = "")
dat <- Gather(reports, indexVars, "vars")
gg %+% dat +
aes(group = scenario, color = pExpo, shape = pDis, x = method) +
geom_line() +
facet_grid(effects + XT_assoc ~ em + contrast) +
coord_cartesian(ylim = c(0, 2)) +
labs(title = "Mean estimated variance", x = "")
datWide <-
reports[, c(PickMatchNames(names(reports), "vars"))] /
reports[, c(PickMatchNames(names(reports), "trueV"))]
datWide <- cbind(datWide, reports[, indexVars])
dat <- Gather(datWide, indexVars, "vars")
gg %+% dat +
aes(group = scenario, color = pExpo, shape = pDis, x = method) +
geom_line() +
facet_grid(effects + XT_assoc ~ em + contrast) +
geom_hline(yintercept = 1) +
labs(title = "Mean estimated variance / true variance", x = "")
cat("### MSE\n")
dat <- Gather(reports, indexVars, "mse\\.")
gg %+% dat +
aes(group = scenario, color = pExpo, shape = pDis, x = method) +
geom_line() +
facet_grid(effects + XT_assoc ~ em + contrast) +
coord_cartesian(ylim = c(0, 1.5)) +
labs(title = "MSE", x = "")
cat("### Alpha rate\n")
dat <- Gather(reports, indexVars, "pRej")
dat <- subset(dat, effects == "Null main effects" & em == "Modification (-)")
gg %+% dat +
aes(group = scenario, color = pExpo, shape = pDis, x = method) +
geom_line() +
facet_grid(XT_assoc ~ contrast) +
scale_y_continuous(breaks = seq(from = 0, to = 1, by = 0.2)) +
## coord_cartesian(ylim = c(0, 0.25)) +
geom_hline(yintercept = 0.05) +
labs(title = "Observed alpha error rate (null scenarios)", x = "")
cat("### Coverage\n")
dat <- Gather(reports, indexVars, "cvr\\.")
gg %+% dat +
aes(group = scenario, color = pExpo, shape = pDis, x = method) +
geom_line() +
facet_grid(effects + XT_assoc ~ em + contrast) +
## coord_cartesian(ylim = c(0.75, 1.00)) +
geom_hline(yintercept = 0.95) +
labs(title = "Coverage", x = "")
cat("### Minimum counts\n")
dat <- Gather(reports, indexVars, "NCases\\.")
dat$key <- gsub("minNCases\\.", "", as.character(dat$key))
gg %+% dat +
aes(group = scenario, color = pExpo, shape = pDis, x = key) +
geom_line() +
facet_grid(effects + XT_assoc ~ em + pDis) +
coord_cartesian(ylim = c(0, 25)) +
geom_hline(yintercept = 0) +
labs(title = "Minimum case counts", x = "")
dat <- Gather(reports, indexVars, "minX10\\.")
dat$key <- gsub("minX10\\.", "", as.character(dat$key))
gg %+% dat +
aes(group = scenario, color = pExpo, shape = pDis, x = key) +
geom_line() +
facet_grid(effects + XT_assoc ~ em + pDis) +
coord_cartesian(ylim = c(0, 25)) +
geom_hline(yintercept = 0) +
labs(title = "Minimum X10 counts", x = "")
dev.off()
cat("
###
### Production figures
################################################################################\n")
## http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/
## Color-blind friendly palette with black.
cbbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7")[c(4,6,7)]
gg <- ggplot(mapping = aes(x = key, y = value)) +
labs(title = NULL, y = NULL, x = NULL) +
scale_color_manual(values = cbbPalette) +
scale_linetype_manual(values = c("solid", "dashed", "dotted")) +
theme_bw() + theme(legend.key = element_blank(),
legend.key.width = unit(1, "cm"),
legend.position = "bottom",
strip.background = element_blank(),
panel.margin = unit(0.5, "lines"))
### Figures
### Average sample size
pdf(file = "./figures/Figure2.pdf", width = 5, height = 5, family = "sans")
dat <- Gather(reports, indexVars, "\\.n$")
## Average across random variability with essentially same scenarios
dat <- dat %>%
group_by(pExpo,method,key,XT_assoc) %>%
summarize(value = mean(value))
gg %+% dat +
aes(group = pExpo, linetype = pExpo, x = method) +
geom_line() +
geom_point() +
facet_grid(. ~ XT_assoc) +
geom_hline(yintercept = c(reports$U.n[1], 0), alpha = 1/5) +
theme(panel.margin = unit(0, "lines"),
legend.margin = unit(0, "lines")) +
labs(title = NULL, x = NULL, y = "Sample Size")
dev.off()
### SMD
pdf(file = "./figures/Figure3.pdf", width = 5, height = 5, family = "sans")
dat <- Gather(reports, indexVars, "smd")
dat$key <- gsub("^.*\\.", "", as.character(dat$key))
dat$key <- factor(dat$key, levels = paste0("X", 1:10))
## Restrict to 3 covariates
dat <- subset(dat, key %in% paste0("X", c(1,4,7)))
## Average across random variability with essentially same scenarios
dat <- dat %>%
group_by(pExpo,method,key,XT_assoc) %>%
summarize(value = mean(value))
gg %+% dat +
aes(group = pExpo, linetype = pExpo, x = method) +
geom_line(alpha = 2/3) +
geom_point() +
facet_grid(XT_assoc ~ key, scales = "free") +
scale_y_continuous() +
coord_cartesian(ylim = c(0, 0.5)) +
geom_hline(yintercept = 0.10, size = 0.3, alpha = 3/5) +
geom_hline(yintercept = 0.00, alpha = 3/5) +
labs(title = NULL, x = NULL, y = "Absolute Standardized Mean Difference")
dev.off()
### eFigures
### Bias
pdf(file = "./figures/eFigure2.pdf", width = 10, height = 10, family = "sans")
dat <- Gather(reports, indexVars, "biasRR\\.")
gg %+% dat +
aes(group = scenario, linetype = pExpo, shape = pDis, x = method) +
geom_line(alpha = 2/3) +
geom_point(alpha = 2/3) +
scale_y_log10(breaks = c(1/4, 1/2, 3/4, 1, 1.5, 2, 3, 4)) +
facet_grid(effects + XT_assoc ~ em + contrast) +
coord_cartesian(ylim = c(3/4, 3)) +
geom_hline(yintercept = 1, alpha = 1/5) +
labs(title = NULL, x = NULL, y = "Bias (Estimated Risk Ratio / True Risk Ratio)")
dev.off()
### True RR
pdf(file = "./figures/eFigure3.pdf", width = 10, height = 10, family = "sans")
dat <- Gather(reports, indexVars, "trueRR\\.")
gg %+% dat +
aes(group = scenario, linetype = pExpo, x = method) +
geom_line(alpha = 2/3) +
scale_y_log10(breaks = c(1/4, 2/5, 1/2, 3/4, 1, 2, 4)) +
coord_cartesian(ylim = c(exp(-1), 1.1)) +
facet_grid(effects + XT_assoc ~ em + contrast) +
labs(title = NULL, x = NULL, y = "True Risk Ratio")
dev.off()
### True variance
pdf(file = "./figures/eFigure4.pdf", width = 10, height = 10, family = "sans")
dat <- Gather(reports, indexVars, "trueV")
gg %+% dat +
aes(group = scenario, linetype = pExpo, shape = pDis, x = method) +
geom_line() +
geom_point() +
facet_grid(effects + XT_assoc ~ em + contrast) +
coord_cartesian(ylim = c(0, 1)) +
labs(title = NULL, x = NULL, y = "True Variance")
dev.off()
### Mean estimated variance
pdf(file = "./figures/eFigure5.pdf", width = 10, height = 10, family = "sans")
dat <- Gather(reports, indexVars, "vars")
gg %+% dat +
aes(group = scenario, linetype = pExpo, shape = pDis, x = method) +
geom_line() +
geom_point() +
facet_grid(effects + XT_assoc ~ em + contrast) +
coord_cartesian(ylim = c(0, 1)) +
labs(title = NULL, x = NULL, y = "Estimated Variance")
dev.off()
### eFigure6.pdf is bootstrap variance figure created elsewhere
### MSE
pdf(file = "./figures/eFigure7.pdf", width = 10, height = 10, family = "sans")
dat <- Gather(reports, indexVars, "mse\\.")
gg %+% dat +
aes(group = scenario, linetype = pExpo, shape = pDis, x = method) +
geom_line() +
geom_point() +
facet_grid(effects + XT_assoc ~ em + contrast) +
coord_cartesian(ylim = c(0, 1.5)) +
labs(title = NULL, x = NULL, y = "Mean Squared Error")
dev.off()
### Type I error rate
pdf(file = "./figures/eFigure8.pdf", width = 5, height = 5, family = "sans")
dat <- Gather(reports, indexVars, "pRej")
dat <- subset(dat, effects == "Null main effects" & em == "Modification (-)")
gg %+% dat +
aes(group = scenario, linetype = pExpo, shape = pDis, x = method) +
geom_line() +
geom_point() +
facet_grid(XT_assoc ~ contrast) +
scale_y_continuous(breaks = seq(from = 0, to = 1, by = 0.2)) +
## coord_cartesian(ylim = c(0, 0.25)) +
geom_hline(yintercept = 0.05) +
labs(title = NULL, x = NULL, y = "Type I Error Rate")
dev.off()
### Coverage probability
pdf(file = "./figures/eFigure9.pdf", width = 10, height = 10, family = "sans")
dat <- Gather(reports, indexVars, "cvr\\.")
gg %+% dat +
aes(group = scenario, linetype = pExpo, shape = pDis, x = method) +
geom_line() +
geom_point() +
facet_grid(effects + XT_assoc ~ em + contrast) +
## coord_cartesian(ylim = c(0.75, 1.00)) +
geom_hline(yintercept = 0.95) +
labs(title = NULL, x = NULL, y = "Coverage Probability")
dev.off()
### eFigure10.pdf is empirical SMD figure created elsewhere
cat("
###
### Anomaly assessment
################################################################################\n")
### High variance for M
cat("### Assessment of very high variance in matching\n")
cat("### Show configuration for most extreme scenario\n")
scenarioNum <- which.max(reports$M.trueV.2v0)
reports[scenarioNum, c("pExpo","XT_assoc","effects","em","pDis")]
cat("### Extract iteration results\n")
res <- lstRes[[scenarioNum]]
cat("### Show coefficients distribution (very low coefficients)\n")
summary(res$dfOut[,PickMatchNames(names(reports), "coef")])
cat("### Show most extreme iterations\n")
res$dfOut[res$dfOut$M.coef.2v0 < min(res$dfOut$M.coef.2v0) + 1.0, ]
cat("### Iteration count of most extreme iteration\n")
as.numeric(rownames(res$dfOut[which.min(res$dfOut$M.coef.2v0),]))
### NA's in performance assessment (SMD for X10)
cat("### Check columns having NA's (This should be empty)\n")
countNa <- foreach(lst = lstRes, .combine = rbind) %do% {
colSums(is.na(lst$dfOut))
}
countNa[, colSums(countNa > 0) > 0]
### False positive scenarios
cat("### Check which scenarios are producing false positives\n")
reportsNull <- subset(reports, effects == "Null main effects" & em == "Modification (-)")
FalsePosScenarios <- function(indexVars, data, pat) {
vars <- PickMatchNames(names(data), pat)
list(alpha005 = data[rowSums(data[,vars] > 0.05) > 0, c(indexVars, vars)],
alpha006 = data[rowSums(data[,vars] > 0.06) > 0, c(indexVars, vars)],
alpha007 = data[rowSums(data[,vars] > 0.07) > 0, c(indexVars, vars)])
}
cat("### Violating scenarios for M\n")
FalsePosScenarios(indexVars, reportsNull, "M.pRej")
cat("### Violating scenarios for Mw\n")
FalsePosScenarios(indexVars, reportsNull, "Mw.pRej")
### Undercoverage scenarios
cat("### Check which scenarios are producing undercoverage\n")
UnderCiScenarios <- function(indexVars, data, pat) {
vars <- PickMatchNames(names(data), pat)
## All over coverage
list(cvr_all_gr097 = data[rowSums(data[,vars] > 0.96) > 2, c(indexVars, vars)],
cvr_all_gr096 = data[rowSums(data[,vars] > 0.96) > 2, c(indexVars, vars)],
## Any undercoverage
cvr_ls095 = data[rowSums(data[,vars] < 0.95) > 0, c(indexVars, vars)],
cvr_ls094 = data[rowSums(data[,vars] < 0.94) > 0, c(indexVars, vars)],
cvr_ls093 = data[rowSums(data[,vars] < 0.93) > 0, c(indexVars, vars)])
}
cat("### Violating scenarios for M\n")
UnderCiScenarios(indexVars, reports, "M.cvr\\.")
cat("### Violating scenarios for Mw\n")
UnderCiScenarios(indexVars, reports, "Mw.cvr\\.")
################################################################################
cat("\n### Record package versions\n")
print(sessionInfo())
## Record execution time
end_time <- Sys.time()
cat("### Started ", as.character(start_time), "\n")
cat("### Finished ", as.character(end_time), "\n")
print(end_time - start_time)
## Stop sinking to a file if active
if (sink.number() != 0) {sink()}
|
3798910eff9abb339b259ef2c49171e715dcc922
|
977e25b030bc27e923f52b08305a6dec2cfd02fd
|
/finance_basics_with_r/intro_r_finance/1_basics/financial_returns.R
|
2f6571592fec94e07c0e41eab3fd4bf1abc79450
|
[] |
no_license
|
printfCRLF/rr
|
d4cd813fafef7d64da2722ade9e14220c12e17ff
|
4116f726f5ad7a8cadbe6841d13abbdb998ee294
|
refs/heads/master
| 2021-04-15T15:08:37.032087
| 2019-07-12T08:29:26
| 2019-07-12T08:29:26
| 126,468,211
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,001
|
r
|
financial_returns.R
|
financial_returns1 <- function() {
# Variables for starting_cash and 5% return during January
starting_cash <- 200
jan_ret <- 5
jan_mult <- 1 + (jan_ret / 100)
# How much money do you have at the end of January?
post_jan_cash <- starting_cash * jan_mult
# Print post_jan_cash
print(post_jan_cash)
# January 10% return multiplier
jan_ret_10 <- 10
jan_mult_10 <- 1 + 10 / 100
# How much money do you have at the end of January now?
post_jan_cash_10 <- starting_cash * jan_mult_10
# Print post_jan_cash_10
print(post_jan_cash_10)
}
financial_returns2 <- function() {
# Starting cash and returns
starting_cash <- 200
jan_ret <- 4
feb_ret <- 5
# Multipliers
jan_mult <- 1 + jan_ret / 100
feb_mult <- 1 + feb_ret / 100
# Total cash at the end of the two months
total_cash <- starting_cash * jan_mult * feb_mult
# Print total_cash
print(total_cash)
}
#financial_returns1()
financial_returns2()
|
9f26872b6fbc0f10afb2801ff64ed66c8b697674
|
efb8047b788654dd44c39c4e30008fd85022d390
|
/man/read.gantt.Rd
|
e667095b46a707bce2b80d2b4fd8630548258a13
|
[] |
no_license
|
marodtam/plan
|
1f318115bbe17c7ad33959ef4ddc5d5faba27743
|
44998e22a7ec0af0aa6e2b89c0d32d4b89f9e55f
|
refs/heads/master
| 2021-01-19T03:59:12.872536
| 2013-09-30T16:15:35
| 2013-09-30T16:15:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,161
|
rd
|
read.gantt.Rd
|
\name{read.gantt}
\alias{read.gantt}
\title{Read a gantt data file}
\description{Read a data file containing gantt information.}
\usage{read.gantt(file, debug=FALSE)}
\arguments{
\item{file}{a connection or a character string giving the name of the
file to load.}
\item{debug}{boolean, set to \code{TRUE} to print debugging
information.}
}
\details{Reads a \code{gantt} dataset.
The data format is strict, and deviations from it may lead to
error messages that are difficult to understand.
The first line is a header, and must contain the words \code{Key},
\code{Description}, \code{Start}, \code{End}, \code{Done}, and
\code{NeededBy}, written exactly in this way, with commas separating
the words. (Blanks are ignored in this line.)
Additional lines indicate the details of each of several sub-projects,
in comma-separated items, as follows:
\itemize{
\item A key for the task. These must be distinct, and are typically
just the numbers 1, 2, 3, etc.
\item A description of the task. (This may not contain commas!)
\item The start time for the task, in ISO 8601 format
(\code{YYYY-MM-DD} or \code{YYYY-MM-DD hh:mm:ss}).
\item The end time for the task, in the same format as the starting time.
\item A number indicating the percentage of this task that has been
completed to date.
\item A space-separated optional list of numbers that indicate the
keys of other tasks that depend on this one. This list is ignored
in the present version of \code{read.gantt}.
}
Executing the code
\preformatted{
library(plan)
data(gantt)
print(summary(gantt))
}
will create the following sample file, which may be read with
\code{\link{read.gantt}}:
\preformatted{
Key, Description, Start, End, Done, NeededBy
1, Assemble equipment, 2008-01-01, 2008-03-28, 90
2, Test methods, 2008-02-28, 2008-03-28, 30
3, Field sampling, 2008-04-01, 2008-08-14, 0
4, Analyse field data, 2008-06-30, 2008-11-14, 0
5, Write methods chapter, 2008-08-14, 2008-11-14, 0
6, Write results chapter, 2008-10-14, 2009-01-15, 0
7, Write other chapters, 2008-12-10, 2009-02-28, 0
8, Committee reads thesis, 2009-02-28, 2009-03-14, 0
9, Revise thesis, 2009-03-15, 2009-03-30, 0
10, Thesis on display, 2009-04-01, 2009-04-15, 0
11, Defend thesis, 2009-04-16, 2009-04-17, 0
12, Finalize thesis, 2009-04-18, 2009-05-07, 0
}
}
\value{An object of type \code{"gantt"}, which is a data frame containing
\code{"description"} (a character description of the task)
\code{"start"} (the task's start time),
\code{"end"} (the task's end time),
\code{"progress"} (a number giving the percent progress on this item, or
\code{NA} if none given), and
\code{"needed.by"} (a number giving the indices of other tasks that rely
on this task, or \code{NA} if none given).
}
\seealso{\code{\link{summary.gantt}} and \code{\link{plot.gantt}}}
\examples{
\dontrun{
library(plan)
gantt <- read.gantt("demo/gantt.dat")
summary(gantt)
plot(gantt)
}
}
\author{Dan Kelley}
\keyword{misc}
|
f713307b57798b2ef184fc29274717079b4b3e11
|
cfb136e71d40639b8907373a27f52b634b502dae
|
/RNPN_download.R
|
ecfa3b2d3d76151c77c6afa501a057a3a227df89
|
[] |
no_license
|
mcrimmins/FallPhenology
|
26910db5724748de8bd90f7908a5d623b1d9fa19
|
a1b5bdde4484f108dc201be27d535fc727d306d6
|
refs/heads/master
| 2022-10-22T21:30:40.594144
| 2020-06-10T20:53:55
| 2020-06-10T20:53:55
| 270,773,879
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,994
|
r
|
RNPN_download.R
|
# NPN Fall Modeling
# R NPN package download
# MAC 05/29/20
# rnpn package
library(jsonlite)
library(RCurl)
library(reshape2)
library(tidyr)
library(dplyr)
library(geosphere)
library(data.table)
#library(DaymetR)
library(birk) # convert units
library(rnpn)
library(geosphere)
#spp<-npn_species()
# download data
dataQ<-npn_download_status_data(
request_source="FartFace2020",
years=c(seq(2012,2019,1)),
species_id=c("3"),
phenophase_ids = "498"
)
# adjust data types
dataQ$observation_date<-as.Date(dataQ$observation_date, format = "%Y-%m-%d")
dataQ$year<-as.numeric(format(dataQ$observation_date,"%Y"))
# thin data for mean model
dataQ<-dataQ[!(dataQ$intensity_value=="-9999"),]
dataQ<-dataQ[!(dataQ$intensity_value=="Less than 5%"),]
dataQ<-dataQ[(dataQ$day_of_year>=171),]
# only valid lat/lons
dataQ<-dataQ[!(dataQ$latitude>50),]
dataQ<-dataQ[!(dataQ$latitude<24),]
dataQ<-dataQ[(dataQ$longitude<(-66)),]
dataQ<-dataQ[(dataQ$longitude>(-124)),]
# thin out multiple first y's -- only one individual_id per year for a single phenophase
dataQ<-dataQ %>% group_by(individual_id,year) %>% filter(day_of_year==min(as.numeric(as.character(day_of_year))))
# site level mean
siteMean<-summarise(group_by(dataQ, site_id, year), mean(as.numeric(as.character(day_of_year))),
mean(as.numeric(as.character(latitude))),
mean(as.numeric(as.character(longitude))),
mean(as.numeric(as.character(elevation_in_meters))),
first(state),
first(species_id),
first(common_name),
first(phenophase_id))
colnames(siteMean)<-c("site_id","year","day_of_year","latitude","longitude","elevation_m","state","species_id","common_name","phenophase_id")
# more data cleaning
# only valid lat/lons
siteMean<-siteMean[!(siteMean$latitude<0),]
siteMean<-siteMean[!(siteMean$longitude>0),]
# drop AK using subset
#dataQ<-subset(dataQ, state != "AK" & state != "BC")
siteMean<-siteMean[!(siteMean$state %in% c("AK","BC","NS","ON","MB")),]
siteMean<-siteMean[!(siteMean$state==-9999),]
# get GDD based on lat/lon and dates, ADD second gdd base of 50F
siteMean$prismGDD<-NA
siteMean$prismPCPN<-NA
siteMean$dayLength<-NA
#siteMean$daymetGDD<-NA
#dataQ$daymetPCPN<-NA
# add in necessary cols
siteMean$avgDate<-as.Date(paste0(siteMean$year,"-",siteMean$day_of_year),format="%Y-%j")
for(i in 1:nrow(siteMean)){
lat<-siteMean$latitude[i]
lon<-siteMean$longitude[i]
base<-77 # SET base temperature, 25C/77F for Acer rubrum
sdate<-paste0(siteMean$year[i],"-06-21") # SET to -01-01 for breaking leaf buds, -08-01
edate<-siteMean$avgDate[i]
if (as.Date(sdate)<as.Date(edate)){
siteMean$dayLength[i]<-daylength(lat,as.numeric(as.character(siteMean$day_of_year[i])))
jsonQuery=paste0('{"loc":"',lon,',',lat,'","grid":"21","elems":"hdd',base,',pcpn","sdate":"',sdate,'","edate":"',edate,'"}')
out<-postForm("http://data.rcc-acis.org/GridData",
.opts = list(postfields = jsonQuery,
httpheader = c('Content-Type' = 'application/json', Accept = 'application/json')))
out<-fromJSON(out)
temp<-as.data.frame(out$data)
siteMean$prismGDD[i]<-sum(as.numeric(as.character(temp$V2)))
siteMean$prismPCPN[i]<-sum(as.numeric(as.character(temp$V3)))
} else {dataQ$prismGDD[i]<-NA
}
print((i/nrow(siteMean))*100)
}
# convert variables to numeric
# write out GDD vars to attr
# set attributes
attr(siteMean,"start_date" )<-sdate
#attr(dataQ,"end_date" )<-end_date
attr(siteMean,"CDD_base_F" )<-base
save(siteMean, file="RNPN_CDD_498_quakingaspen_base77F_052920.Rdata")
|
f35740ea5c36c39368bb350303e1472a73dc6407
|
eb25159c0a063807e2bb0d190bc18f3edaa8a385
|
/1_R_functions/LiDARsim.R
|
760c87738598bc27349b30030d41c2539016c91c
|
[] |
no_license
|
florianhartig/ReForse
|
2d13fb9fbb460b6d6df2fa44bb502caf41c79c2c
|
9827a7f627cea7d7a2acd5d3dace390b9fa30f31
|
refs/heads/master
| 2020-03-16T19:17:07.397386
| 2017-11-06T16:27:03
| 2017-11-06T16:27:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 204
|
r
|
LiDARsim.R
|
#' @title LiDARsim - simulating trees
#' @name LiDARsim
#' @docType package
#' @description XXX
#' @details See index / vignette for details
#' @examples
#' vignette("LiDARsim", package="LiDARsim")
NULL
|
03eadb33a194a42045f682edfacef54567911e15
|
3b272f273999702fbf5393df6b3bd9cbc6f761d0
|
/tests/testthat.R
|
3f114ce9a05956dc0d8b6198e3dd6c0b7ee914aa
|
[
"MIT"
] |
permissive
|
cran/KHQ
|
ffdae2c24707f08483506036f932910051f0bedf
|
61b65514b471c7668c89adf43e7e8e69457c4001
|
refs/heads/master
| 2023-06-28T18:27:28.130994
| 2021-08-06T08:40:02
| 2021-08-06T08:40:02
| 387,839,436
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(KHQ)
test_check("KHQ")
|
438a84353e1ac97b7c654fa532b1abf1e12802dd
|
ca3fa26a219a1695dc8d30f447325148a2f9c6f5
|
/R/getPropertyUrlsMioAffitto.R
|
d3acc54c7862cbc509ccc800c5c152aeaeeed9d6
|
[] |
no_license
|
joshbrowning2358/romeHousePrices
|
fc0c2cca2bdd66c02c655e01ec1fbcf61ba98322
|
e3568316c7d515605f8d72255c825b4569f7ae61
|
refs/heads/master
| 2023-02-18T15:47:17.485520
| 2015-12-14T05:59:16
| 2015-12-14T05:59:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,573
|
r
|
getPropertyUrlsMioAffitto.R
|
##' Get Property urls for Mio Affitto
##'
##' This function gets the urls for each of the detailed, property listing
##' pages.
##'
##' @param numPages The number of pages which should be loaded (and 15 urls are
##' generally scraped per page). If numPages is more than the number of pages
##' available (likely something in the ~2000 range), this function ends at the
##' last one (with a warning).
##'
##' @return A character vector containing the urls for all the individual
##' listings.
##'
##' @export
##'
getPropertyUrlsMioAffitto = function(numPages){
## Data Quality Checks
stopifnot(is.numeric(numPages))
base = "http://www.mioaffitto.it/search?provincia=77&poblacion=70708"
listingPages = c()
errors = 0
totalPages = getNumPagesMioAffitto()
if(numPages > totalPages){
warning("Only ", totalPages, " pages available! numPages has been ",
"adjusted down.")
numPages = totalPages
}
for(i in 1:numPages){
fail = try({
## Make sure i is never in scientific notation
url = paste0(base, "&page=", formatC(i, format = "f", digits = 0))
mainHtml <- read_html(url)
newPages = html_nodes(mainHtml, ".property-title")
newPages = sapply(newPages, html_attr, name = "href")
listingPages = c(listingPages, newPages)
})
if(is(fail, "try-error"))
errors = errors + 1
}
if(errors > 0)
warning("We had", errors, "errors on loading pages.")
return(listingPages)
}
|
1990d19f5a6e4f383723a8ce64ec33ddb2da5da9
|
d2e914bcc4d4c6aa4b8d99c072ff62ef95719ac9
|
/R/integrateFuture.R
|
876908619587084df8d7769cf428cb2f05a5d9c3
|
[] |
no_license
|
kristenkieta/northAreaDrought
|
be1516a9d029e3c84db7cfa6cc6583a7dd1709d8
|
172c87675e2e43b749d1e6cefdd746f26d2755e3
|
refs/heads/master
| 2023-03-19T07:37:37.659412
| 2020-01-06T18:58:03
| 2020-01-06T18:58:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,241
|
r
|
integrateFuture.R
|
# Integrate future data into current dataset for North
library(forestDroughtTool)
library(tidyverse)
# Load current asmr North data
load(here::here("dat","asmrNorth.rda"))
load(here::here("dat","climData_cleaned.rda"))
stnName="DOME CREEK"
bgcName="ICHvk2"
stnData<- asmrNorth$current$stnYears
clim<-
climData_cleaned %>%
filter(stn==stnName)
yearList<-
filter(stnData,stn==stnName) %>%
dplyr::select(contains("year")) %>%
as.numeric()
X<-asmr(stnData=clim,bgc=bgcName,future=TRUE,years=yearList)
filter(X$asmr,Scenario=="current") %>%
write.table("clipboard", sep="\t")
filter(asmrNorth$current$asmr,bgc=="SBSmk1")
# Come up with a station and bgc list
climList<-
climData_cleaned %>%
group_by(stn) %>%
summarise(bgc=first(bgc)) %>%
arrange(bgc)
filter(climList,bgc=="SBSwk1")
bgc.subF("SBSwk3","SBSwk1") %>%
rbind(bgc.subF("ESSFmv4","ESSFmv2")) %>%
rbind(bgc.subF("ESSFmv3","ESSFmv2")) %>%
rbind(bgc.subF("ESSFmv1","ESSFmv2")) %>%
rbind(bgc.subF("SBSmc1","SBSmc2")) %>%
rbind(bgc.subF("SBSmc3","SBSmc2")) %>%
rbind(bgc.subF("ICHmc1","ICHmc2")) %>%
rbind(bgc.subF("ESSFwk2","ESSFwk1")) %>%
rbind(bgc.subF("ICHwk3","ICHwk1")) %>%
rbind(bgc.subF("ICHwk4","ICHwk1"))
|
7229263ca95c78d3310b32592b4b2cd856abe03c
|
7159e87cbe1ab2bcd84771922f3c291fd9cae317
|
/2018_04_17_sparklyr.R
|
c2124a06df15a9814cb377a9422d54e1df695348
|
[] |
no_license
|
Sta323-Sp18/exercises
|
0a5cdb5a2b7e983a52df829681d28437614d5a57
|
0940a70a7995ee0c7b054a86eb55ad9d18bd73e9
|
refs/heads/master
| 2021-05-11T15:15:49.545840
| 2018-04-19T18:52:51
| 2018-04-19T18:52:51
| 117,723,329
| 0
| 1
| null | 2018-02-23T15:04:50
| 2018-01-16T18:24:08
|
R
|
UTF-8
|
R
| false
| false
| 3,974
|
r
|
2018_04_17_sparklyr.R
|
library(sparklyr)
library(dplyr)
library(dbplyr)
library(stringr)
library(ggplot2)
#spark_install()
#spark_available_versions()
sc = spark_connect(master = "local[4]", spark_home = "/data/spark/spark-2.2.1-bin-hadoop2.7/")
# Using readr
#system.time({green = readr::read_csv("/data/nyc-taxi-data/nyc_taxi_2017/green_tripdata_2017-01.csv")})
#system.time({yellow = readr::read_csv("/data/nyc-taxi-data/nyc_taxi_2017/yellow_tripdata_2017-01.csv")})
green = spark_read_csv(sc, "green", path = "/data/nyc-taxi-data/nyc_taxi_2017/green_tripdata_2017-01.csv")
yellow = spark_read_csv(sc, "yellow", path = "/data/nyc-taxi-data/nyc_taxi_2017/yellow_tripdata_2017-01.csv")
fix_colnames = function(df) {
colnames(df) %>%
tolower() %>%
str_replace("[lt]pep_","") %>%
setNames(df, .)
}
green = fix_colnames(green)
yellow = fix_colnames(yellow)
# yellow %>%
# head(50) %>%
# mutate(
# dropoff_datetime = str_replace(dropoff_datetime, "\\d{4}-\\d{2}-\\d{2} ", "")
# )
#
# green %>%
# head(50) %>%
# mutate(
# dropoff_datetime = regexp_replace(dropoff_datetime, "[0-9]{4}-[0-9]{2}-[0-9]{2} ", "")
# )
wday_hour_summary = function(df, label) {
df %>%
dplyr::select(pickup_datetime, dropoff_datetime, trip_distance, fare_amount, tip_amount) %>%
mutate(
hour = hour(pickup_datetime),
trip_time = (unix_timestamp(dropoff_datetime) - unix_timestamp(pickup_datetime))/60,
wday = date_format(pickup_datetime, "E")
) %>%
group_by(hour, wday) %>%
summarize(
avg_dist = mean(trip_distance, na.rm=TRUE),
avg_time = mean(trip_time, na.rm=TRUE),
avg_fare = mean(fare_amount, na.rm=TRUE),
avg_tip_perc = mean(tip_amount / fare_amount, na.rm=TRUE)
) %>%
mutate(type = label) %>%
collect()
}
green_wday = wday_hour_summary(green, "green")
yellow_wday = wday_hour_summary(yellow, "yellow")
bind_rows(
green_wday,
yellow_wday
) %>%
ungroup() %>%
tidyr::gather(quantity, value, starts_with("avg")) %>%
mutate(wday = factor(wday, levels = c("Mon","Tue","Wed","Thu","Fri","Sat","Sun"))) %>%
ggplot(aes(x=hour, y=value, color=type)) +
geom_line(size=1.5) +
facet_grid(quantity ~ wday, scales = "free_y") +
scale_color_manual(values=c("green","yellow"))
### Zones
zones = sf::st_read("/data/nyc-taxi-data/taxi_zones/")
plot(sf::st_geometry(zones))
## Reddit data
reddit = spark_read_json(sc, "reddit", "/data/reddit/RC_2017-10-smaller")
### Popular subreddits
reddit %>%
select(author, subreddit, body) %>%
group_by(subreddit) %>%
summarize(n = n()) %>%
arrange(desc(n)) %>%
head(50) %>%
collect()
### Most posts
reddit %>%
select(author, subreddit, body) %>%
group_by(author) %>%
summarize(n = n()) %>%
arrange(desc(n)) %>%
head(50) %>%
collect()
### When do people post
reddit %>%
select(author, subreddit, body, created_utc) %>%
mutate(hour = hour(from_unixtime(created_utc))) %>%
count(hour) %>%
arrange(hour) %>%
collect() %>%
plot(type='l')
###
library(tidytext)
reddit %>%
select(author, subreddit, body) %>%
head(10000) %>%
collect() %>%
mutate(text = str_split(body," ")) %>%
tidyr::unnest() %>%
mutate(
text = tolower(text),
text = str_replace_all(text, "[^a-z]", "")
) %>%
count(text) %>%
arrange(desc(n)) %>%
mutate(word = text) %>%
anti_join(tidytext::get_stopwords())
reddit %>%
select(author, subreddit, body) %>%
mutate(body = regexp_replace(body, "[^A-Za-z ]","")) %>%
ft_tokenizer("body", "tokens") %>%
ft_stop_words_remover("tokens", "word") %>%
mutate(word = explode(word)) %>%
count(word) %>%
arrange(desc(n))
## Hamlet example
hamlet = spark_read_text(sc, "hamlet", "/data/Shakespeare/hamlet.txt")
hamlet %>%
mutate(line = regexp_replace(line, "[^A-Za-z ]","")) %>%
ft_tokenizer("line", "tokens") %>%
ft_stop_words_remover("tokens", "word") %>%
mutate(word = explode(word)) %>%
count(word) %>%
arrange(desc(n))
|
0d9295986ad7d54fd6c6e913b6ab1a3dfb0cd084
|
bdb9326615b0ca06a9cbbde60bfef7999f4e58ae
|
/choosing_quadrat_pts.R
|
0337415c575deeaa0a4c4731eca83b57fd9f319b
|
[] |
no_license
|
abigailgolden/quadrats
|
8c4605342bc904cb3c14a4f7d6b845ac9a564f63
|
f9997ce23d0fc2d8f2c92ad5da086b18e3cc2a31
|
refs/heads/master
| 2020-07-27T14:16:07.771761
| 2019-09-20T22:52:00
| 2019-09-20T22:52:00
| 209,120,131
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,240
|
r
|
choosing_quadrat_pts.R
|
# FOR CHOOSING POINTS
getwd()
setwd("C:/Users/jensen/Desktop/mongolia/2019 interdisciplinary")
verts1=read.csv("LC1_Hay2_points_real.csv")
verts <- verts1 %>% select("lat", "lon")
#convert to x,y grid coordinates
lat_min=min(verts$lat)
lon_min=min(verts$lon)
Y_vec=(verts$lat-lat_min)*60
X_vec=(verts$lon-lon_min)*38.326
plot(X_vec,Y_vec)
Y_rand=runif(n=5, min = 0, max=max(Y_vec))
X_rand=runif(n=5, min = 0, max=max(X_vec))
points(X_rand,Y_rand, col="red")
FOR DIVERSITY ANALYSIS
library(dplyr)
data <- read.csv("C:/Users/jensen/Desktop/mongolia/2019 interdisciplinary/SZ_quadrats.csv")
View(data)
diversity <- data %>%
select(quadrat,square,species) %>%
#group_by(quadrat) %>%
summarize(species_count = n_distinct(species))
rarefaction=matrix(nrow =10, ncol =10)
for (i in 1:10)
{
for (j in 1:10)
{
data_subset <-filter(data, quadrat == i |quadrat == j)
diversity <- data_subset %>%
select(quadrat,square,species) %>%
summarize(species_count = n_distinct(species))
rarefaction[i,j]=diversity$species_count
}
}
Soilrandom=runif(n=5, min = 1, max=25)
diversity <- data %>%
select_if(quadrat==2) %>%
#group_by(quadrat) %>%
summarize(species_count = n_distinct(species))
|
7ad0e715578ba887bd5b3b320ec161bc3881c3bd
|
815c362cb3c3796403bf3f8ad11c22772abbb85c
|
/Rcode/ACTG315.R
|
6a2453e9f4ed7d392561f76a3c740b73531c6f37
|
[] |
no_license
|
ygwang2018/DataSets-Longitudinal
|
9cfd50e40caa67aaec7b4e3995e53eee6d051724
|
9065ba39adf3ac8d398ca82bd342e31813bbd3a6
|
refs/heads/master
| 2020-05-25T19:22:15.765794
| 2019-05-23T03:05:36
| 2019-05-23T03:05:36
| 187,950,214
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,912
|
r
|
ACTG315.R
|
require(nlme)
######################### Beginning of Splus Code ########################
# Input Data: assume that the data at the end is stored in an ASCII file
# named "data".
workd<-read.table(file="ACTG315-Ding.dat",header=T,row.names=NULL)
# The measurements before treatment initiation are not used in the analysis.
#workd<-workd[workd$Day>0,]
# Impute below detectable viral measurement by 50.
workd$RNA[workd$RNA==100]<-50
workd<- groupedData(RNA~Day|ID, data=workd)
ID<-unique(workd$ID)
n<-length(ID)
with(workd,plot(Day,log10(RNA),type='n',xlab="Days"))
for (i in 1:n)
{xx<-(workd$ID==ID[i])
lines(workd$Day[xx],log10(workd$RNA[xx]),col=i)
}
# Define functions representing Model (12) and Model (13)
# of Wu and Ding (1999).
exp.model12<-function(p0, p1, d1, X)
{
P0 <- exp(p0)
P1 <- exp(p1)
P0 + P1 * exp( - d1 * X)
}
exp.model13<-function(p1, d1, p2, d2, X)
{
P1 <- exp(p1)
P2 <- exp(p2)
P1 * exp( - d1 * X) + P2 * exp( - d2 * X)
}
# Fit Model (12) and Model (13) by NLME
actg315.model12<-nlme(Y ~ log10(exp.model12(p0, p1, d1, X)),
fixed = list(p0~., p1~., d1~.),
random = list(p0~., p1~., d1~.),
cluster = ~ Z,
data = data.frame(Y=log10(workd$RNA), X=workd$Day, Z=workd$ID),
start = list(fixed= c(5,11,0.5)),verbose=T)
actg315.model13<-nlme(Y ~ log10(exp.model13(p1, d1, p2, d2, X)),
fixed = list(p1~., d1~., p2~., d2~.),
random = list(p1~., d1~., p2~., d2~.),
cluster = ~ Z,
data = data.frame(Y=log10(workd$RNA), X=workd$Day, Z=workd$ID),
start = list(fixed= c(12,0.4,7,0.03)),verbose=T)
anova(actg315.model12,actg315.model13)
###################### End of Splus Code ######################################
RESULTS:
--------
Model Df AIC BIC Loglik Test Lik.Ratio P value
actg315.model12 1 10 433.03 470.69 -206.52
actg315.model13 2 15 257.98 314.46 -113.99 1 vs. 2 185.05 0
Fixed Effects Estimates from Model (12):
LogP0 LogP1 delta_p
5.666933 11.10344 0.2246409
Fixed Effects Estimates from Model (13):
LogP1 delta_p LogP2 lamba_l
12.32537 0.4759455 7.945189 0.04134005
**********************************************************************************
8. Please cite the following references if appropriate when you use the data in your paper:
REFERENCES:
----------
1) Connick E, Lederman MM, Kotzin BL, et al. (2000), "Immune reconstitution in the first year of potent antiretroviral therapy and its relationship to virologic response," Journal of Infectious Diseases, 181:358-63.
2) Ding, A.A. and Wu, H. (1999), "Relationships between Antiviral Treatment Effects and Biphasic Viral Decay Rates in Modeling HIV Dynamics," Mathematical Biosciences, 160. 63-82.
3) Ding, A.A. and Wu, H. (2000), "A Comparison Study of Models and Fitting Procedures for Biphasic Viral Dynamics in HIV-1 Infected Patients Treated with Antiviral Therapies," Biometrics, 56, 293-300.
4) Lederman MM, Connick E, Landay A, et al. (1998), "Immunologic responses associated with 12 weeks of combination antiretroviral therapy consisting of zidovudine, lamivudine and ritonavir: results of AIDS Clinical Trials Group Protocol 315," Journal of Infectious Diseases, 178: 70-79.
5) Wu, H. and Ding, A. (1999), "Population HIV-1 Dynamics in Vivo: Applicable Models and Inferential Tools for Virological Data from AIDS Clinical Trials," Biometrics, 55, 410-418.
6) Wu, H. and Ding, A. and DeGruttola. V. (1998), "Estimation of HIV Dynamic Parameters," Statistics in Medicine, 17, 2463-2485.
7) Wu, H., Kuritzkes, D.R., and McClernon, D.R. et al. (1999), "Characterization of Viral Dynamics in Human Immunodeficiency Virus Type 1-Infected Patients Treated with Combination Antiretroviral Therapy: Relationships to Host Factors, Cellular Restoration and Virological Endpoints," Journal of Infectious Diseases, 179(4):799-807.
---------------------------------------------------------------------------
|
e5632fcd3d9d8987f18b69be806705010da6490c
|
e74bd112294ec916364fe29b0484bab716274af5
|
/.Rprofile
|
2b7818cc7fa201ef7b5774947e978f99906203ac
|
[] |
no_license
|
liao961120/collabin
|
49d410815313e39519b2a7911a8579126823e5d9
|
64fb78a8b5dfd6608547ff09b5a7911074ad5279
|
refs/heads/master
| 2021-09-24T05:11:32.567050
| 2021-09-16T07:40:25
| 2021-09-16T07:40:25
| 166,556,192
| 4
| 6
| null | 2020-06-05T05:04:00
| 2019-01-19T14:35:11
|
HTML
|
UTF-8
|
R
| false
| false
| 67
|
rprofile
|
.Rprofile
|
options(
blogdown.ext = ".Rmd",
blogdown.new_bundle = TRUE
)
|
24a49ff75e5ec9c62155bcb1f27f87f0638bccf1
|
488aa11a1a9a819f958f111d0b07313c171251db
|
/old_code/old/makeUnique.R
|
0fe4ac0db34b7aadf2d1b3c6c9312bc8cb9111ef
|
[] |
no_license
|
dat-nguyen/DataAnalysis_ML-SF
|
6807b999fb94a1011a726e4b21b68f4288d7bf11
|
b61048f3b0603a8eba16be9ac636f4e685bec738
|
refs/heads/master
| 2021-01-10T12:49:27.596029
| 2016-02-24T14:00:03
| 2016-02-24T14:00:03
| 52,097,375
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,270
|
r
|
makeUnique.R
|
#source("lib/lib.data.R")
#
concatName <- function(path, desc, method, trainset, state) {
if (desc == "credo")
return (paste(path, "JMJD2A_", trainset, "_", desc, "_", state, "_c12_RoF_RC_", method,".csv", sep=""))
else
return (paste(path, "JMJD2A_", trainset, "_", desc, "_", state, "_c12b12_RoF_RC_", method,".csv", sep=""))
}
path2Result = "/Users/knight/dev/classification_R/JMJD/Result/"
path2Name = "/Users/knight/dev/classification_R/JMJD/"
#path2Result = "/home/dat/WORK/DB/JMJD/Result/"
#path2Name = "/home/dat/WORK/DB/DESCRIPTORS/JMJD/"
descList = c("credo", "elements", "sybyl")
methodList = c("REP", "RT")
trainsetList = c("CASF12", "CASF13", "CSAR12")
stateList = c("actives", "inactives")
#
createMergeList <- function(desc, method, trainset, state) {
if (state == "actives") {
name = read.csv(paste(path2Name,"JMJD2A_dock_actives.csv",sep=""), sep = "\t")
}
else {
name = read.csv(paste(path2Name,"JMJD2A_dock_inactives_gold.csv",sep=""), sep = "\t")
}
result = read.csv(concatName(path2Result, desc, method, trainset, state))
name = makeUniqueList(name)
result = makeUniqueList(result[,2:3])
mergeList = merge(name, result, by.x=1 , by.y=1)
if (state == "actives") {
mergeList = data.frame(goldscore = rep(0, length(mergeList[,1])), mergeList)
}
else {
mergeList = data.frame(pIC50 = rep(0, length(mergeList[,1])), mergeList)
}
return (mergeList)
}
createSortedList <- function(desc, method, trainset) {
#activesList = createMergeList(descList[1], methodList[1], trainsetList[1], stateList[1])
activesList = createMergeList(desc, method, trainset, stateList[1])
inactivesList = createMergeList(desc, method, trainset, stateList[2])
totalList = rbind(activesList, inactivesList)
sortedList = totalList[ order(-totalList[,4]),]
return (sortedList)
}
calcFoundActivesPercent <- function(desc, method, trainset, cutoff) {
sortedList = createSortedList(desc, method, trainset)
foundActives = sum(sortedList[1:cutoff, "pIC50"] > 0)
foundActivesPercent = foundActives/cutoff
return (foundActivesPercent)
}
buildAnalysis <- function(cutoff) {
foundActives = list()
for (method in methodList) {
foundActives[[method]] = matrix(NA, nrow = length(trainsetList), ncol = length(descList))
rownames(foundActives[[method]]) = trainsetList
colnames(foundActives[[method]]) = descList
# for (trainset in trainsetList)
for (i in seq(length(trainsetList)))
# for (desc in descList)
for (j in seq(length(descList))) {
trainset = trainsetList[i]
desc = descList[j]
foundActives[[method]][i,j] = calcFoundActivesPercent(desc, method, trainset, cutoff)
}
# \TODO: fixing bugs, trainsetList verwechselt mit descList?
image(1:length(trainsetList), 1:length(descList), z = t(foundActives[[method]]), axes = FALSE, xlab = method, ylab = "", main = cutoff)
axis(1, 1:length(trainsetList), colnames(foundActives[[method]]))
axis(2, 1:length(descList), rownames(foundActives[[method]]))
}
}
path = "/Users/knight/"
pdf(paste(path,"JMJD2A_heatmap.pdf",sep=""), width = 8, height = 12)
par(mfrow = c(4,2))
cutoffList = c(10, 20, 30, 46)
sapply(cutoffList, buildAnalysis)
dev.off()
source("lib/lib.validation.extern.R")
plotActives <- function() {
for (trainset in trainsetList)
for (desc in descList)
for (method in methodList) {
result = read.csv(concatName(path2Result, desc, method, trainset, "actives"))
plot(result[,2], result[,3], xlab = "measured pIC50", ylab = "predicted pKd", main = paste(method,"_",trainset,"_",desc,sep=""))
legend("topleft", paste("R=",round(sqrt(calcValidationMetric(result[,2], result[,3])$r2.pearson), 3), sep=""))
}
}
pdf(paste(path,"JMJD2A_plot.pdf",sep=""), width = 16, height = 8)
par(mfrow = c(3,6))
plotActives()
dev.off()
#cutoff = 40
#activesList = createMergeList(desc, method, trainset, stateList[1])
#inactivesList = createMergeList(desc, method, trainset, stateList[2])
#totalList = rbind(activesList, inactivesList)
#sortedList = totalList[ order(-totalList[,4]),]
#foundActives = sum(sortedList[1:cutoff, "pIC50"] > 0)
#foundActivesPercent = foundActives/cutoff
|
93d62feb6467c2048b65f250f1f384c21e22797b
|
c418c599316af9658a21e111cd3259335dd019c7
|
/res/ADC-CLC.r
|
167b5c99c9523a9c992e4394bcb56f676429a37a
|
[] |
no_license
|
LucasRodolfo/MC861
|
534f0705544e70bd367a9324b5524363e516ee7d
|
dda343d2685f412e7db562a86d3cb8cabf8c17f9
|
refs/heads/master
| 2022-12-21T06:48:40.125911
| 2019-11-22T23:34:23
| 2019-11-22T23:34:23
| 201,146,981
| 0
| 0
| null | 2022-12-10T01:27:04
| 2019-08-08T00:20:48
|
TypeScript
|
UTF-8
|
R
| false
| false
| 616
|
r
|
ADC-CLC.r
|
| pc = 0xc001 | a = 0xc0 | x = 0x00 | y = 0x00 | sp = 0x00fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc003 | a = 0x84 | x = 0x00 | y = 0x00 | sp = 0x00fd | p[NV-BDIZC] = 10110101 |
| pc = 0xc005 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x00fd | p[NV-BDIZC] = 00110111 |
| pc = 0xc007 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x00fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc008 | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x00fd | p[NV-BDIZC] = 00110110 |
| pc = 0xc00a | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x00fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc00c | a = 0x00 | x = 0x00 | y = 0x00 | sp = 0x00fd | p[NV-BDIZC] = 10110100 |
|
b56dad1a7bb40a39f1550adf184de78d87225a5f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PlackettLuce/examples/choices.Rd.R
|
41072c0513593a6e188b7005385a436e430c8134
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 558
|
r
|
choices.Rd.R
|
library(PlackettLuce)
### Name: choices
### Title: Choices Object
### Aliases: choices
### ** Examples
R <- matrix(c(1, 2, 0, 0,
4, 1, 2, 3,
2, 1, 1, 1,
1, 2, 3, 0,
2, 1, 1, 0,
1, 0, 3, 2), nrow = 6, byrow = TRUE)
colnames(R) <- c("apple", "banana", "orange", "pear")
actual_choices <- choices(R, names = TRUE)
coded_choices <- choices(R, names = FALSE)
attr(coded_choices, "objects")
## Coercion to tibble is straightforwards
if (require(tibble)){
as.tibble(coded_choices)
}
|
581c883b74676e8758e51d3c1fcef7d5f761475f
|
ee6e4d67425b493fe9f67fce795ee058a26cfadd
|
/Problem 3.R
|
9267f97340fa23d61e3c832ccf465ff466334b61
|
[] |
no_license
|
wkfunk/project-euler
|
d282513c0544a9428f711a9ba22e7867ce091591
|
8f9399bf5fe3d0522b25932fa51e9e65d793f798
|
refs/heads/master
| 2021-01-25T10:06:35.702630
| 2011-12-01T23:25:29
| 2011-12-01T23:25:29
| 2,884,658
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 576
|
r
|
Problem 3.R
|
##############################Problem 3:##############################
# Find the largest prime factor of a composite number.
# What is the largest prime factor of the number 600851475143 ?
findPrimeFactors <- function(num) {
primeFac <- c()
while (num>1){
if (num%%2 ==0) {
primeFac <- append(primeFac, 2)
num <- num/2
}
else {
seed <- 3
while (num%%seed != 0) {
seed <- seed+2
}
primeFac <- append(primeFac, seed)
num <- num/seed
}
}
return(primeFac)
}
max(findPrimeFactors(600851475143))
# [1] 6857
|
565849c8cfc76262f1b1f291ce0b027886f3f91a
|
622436e52cf1ffb105b2f9af141cabc22ac51246
|
/P7/P7T.R
|
cc11de3a0961e90afe292ae8c28212d8ae3e80fc
|
[] |
no_license
|
EdePB/Simulacion
|
6f25d97a0f3039f3a8c502fb5a13ec1b9db77206
|
1846530bbf16a99052a0400c7d799deb77bf1c5b
|
refs/heads/master
| 2020-12-24T07:04:04.794711
| 2017-12-12T08:36:40
| 2017-12-12T08:36:40
| 99,721,801
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,961
|
r
|
P7T.R
|
f <- function(x, y) {
return(((x + 0.5)^4 - 30 * x^2 - 20 * x + (y + 0.5)^4 - 30 * y^2 - 20 * y)/100)
}
low <- -2
high <- 2
step <- 0.25
replicas <- 100
best<-c()
t=35
WA<-0.0666822
po<-function (){
datos=data.frame()
resultados=data.frame()
currx <- runif(1, low, high)
curry <- runif(1, low, high)
best <- c(currx, curry)
for (tiempo in 1:t) {
delta <- runif(1, 0, step)
left <- currx - delta
right <- currx + delta
down <- curry - delta
up <- curry + delta
if (f(left,curry) > f(right,curry)) {
bestx <- c(left,curry)
} else {
bestx <- c(right,curry)
}
if (f(currx, up) > f(currx, down)) {
besty <-c(currx,up)
} else {
besty <- c(currx,down)
}
if(f (bestx[1],bestx[2])> f(besty[1], besty[2])){
currx<-bestx[1]
curry<-bestx[2]
}else{
currx<-besty[1]
curry<-besty[2]
}
if (f(currx, curry) > f(best[1],best[2])) {
best <- c(currx,curry)
}
datos=cbind(i,tiempo, currx, curry,f(best[1], best[2]))
resultados=rbind(resultados,datos)
}
return(resultados)
}
suppressMessages(library(doParallel))
registerDoParallel(makeCluster(detectCores() - 1))
y <- foreach(i = 1:10, .combine=rbind) %dopar% po()
stopImplicitCluster()
names(y)=c("corrida","tiempo","posx","posy", "Fevaluada")
y$corrida=as.factor(y$corrida)
#png("p7_2de.png",width=700, height = 700)
library(lattice) # lo mismo aplica con este paquete
png("p7_T.png", width=600, height=500)
xyplot(data=y,Fevaluada~tiempo, xlab="Pasos",ylab="f(x,y)",groups=corrida,
panel=function(x,y,subscripts,groups) {
panel.xyplot(x,y)
panel.stripplot(x,y,groups = groups,subscripts=subscripts, type="o", pch=16)
panel.abline(h=WA,col="green", lwd=2)
}
)
graphics.off()
|
149bb54c5933f70393ea421ff1e0891c3409f6cf
|
2f8eadc3086c263afdf2784e229f7e80a7dcf49e
|
/RMark/man/summary.mark.Rd
|
c45f9e1798ed15f6222592007308b8ee2f8b51b6
|
[] |
no_license
|
wchallenger/RMark
|
3edb66cbc924fbc37feaa53d22d99e8ac83ee47a
|
636a3a7a6c5ab5292c3a249a1b41dab580dda8ba
|
refs/heads/master
| 2021-01-17T22:35:11.482847
| 2012-01-27T17:16:51
| 2012-01-27T17:16:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,203
|
rd
|
summary.mark.Rd
|
\name{summary.mark}
\alias{coef.mark}
\alias{print.summary.mark}
\alias{summary.mark}
\title{Summary of MARK model parameters and results}
\usage{
\method{summary}{mark}(object,...,se=FALSE,vc=FALSE,showall=TRUE,show.fixed=FALSE,brief=FALSE)
\method{coef}{mark}(object,...)
\method{print}{summary.mark}(x,...)
}
\arguments{
\item{object}{a MARK model object}
\item{se}{if FALSE the real parameter estimates are
output in PIM format (eg. triangular format); if TRUE,
they are displayed as a list with se and confidence
interval}
\item{vc}{if TRUE the v-c matrix of the betas is
included}
\item{showall}{if FALSE it only returns the values of
each unique parameter value}
\item{show.fixed}{if FALSE, each fixed value given NA;
otherwise the fixed real value is used. If se=TRUE,
default for show.fixed=TRUE}
\item{brief}{if TRUE, does not show real parameter
estimates}
\item{...}{additional non-specified argument for S3
generic function}
\item{x}{list resulting from call to \code{summary}}
}
\value{
A list with each of the summarized objects that depends
on the argument values. Only the first 4 are given if it
is a summary of a model that has not been run.
\item{model}{type of model (e.g., CJS)} \item{title}{user
define title if any} \item{model.name}{descriptive name
of fitted model} \item{call}{call to make.mark.model used
to construct the model} \item{npar}{number of fitted
parameters} \item{lnl}{-2xLog Likelihood value}
\item{npar}{Number of parameters (always the number of
columns in design matrix)} \item{chat}{Value of
over-dispersion constant if not equal to 1}
\item{npar.unadjusted}{number of estimated parameters
from MARK if different than npar } \item{AICc}{Small
sample corrected AIC using npar; named qAICc if chat not
equal to 1} \item{AICc.unadjusted}{Small sample corrected
AIC using npar.unadjusted; prefix of q if chat not equal
to 1} \item{beta}{dataframe of beta parameters with
estimate, se, lcl, ucl} \item{vcv}{variance-covariance
matrix for beta} \item{reals}{list of lists, dataframes
or matrices depending on value of se and the type of
model (triangular versus square PIMS) (see details
above)}
}
\description{
Creates a summary object of either a MARK model input or
model output which includes number of parameters,
deviance, AICc, the beta and real parameter estimates and
optionally standard errors, confidence intervals and
variance-covariance matrices. If there are several
groups in the data, the output is structured by group.
}
\details{
The structure of the summary of the real parameters
depends on the type of model and the value of the
argument \code{se} and \code{showall}. If \code{se=F}
then only the estimates of the real parameters are shown
and they are summarized the result element \code{reals}
in PIM format. The structure of \code{reals} depends on
whether the PIMS are upper triangular ("Triang") or a row
("Square" although not really square). For the upper
triangular format, the values are passed back as a list
of matrices where the list is a list of parameter types
(eg Phi and p) and within each type is a list for each
group containing the pim as an upper triangular matrix
containing the real parameter estimate. For square
matrices, \code{reals} is a list of matrices with a list
element for each parameter type, but there is not a
second list layer for groups because in the returned
matrix each group is a row in the matrix of real
estimates. If \code{se=TRUE} then estimates, standard
error (se), lower and upper confidence limits (lcl, ucl)
and a "Fixed" indicator is passed for each real
parameter. If the pims for the model were simplified to
represent the unique real parameters (unique rows in the
design matrix), then it is possible to restict the
summary to only the unique parameters with
\code{showall=FALSE}. This argument only has an affect
if \code{se=TRUE}. If \code{showall=FALSE}, \code{reals}
is returned as a dataframe of the unique real parameters
specified in the model. This does not mean they will all
have unique values and it includes all "Fixed" real
parameters and any real parameters that cannot be
simplified in the case of parameters such as "pent" in
POPAN or "Psi" in "Multistrata" that use the multinomial
logit link. Use of \code{showall=FALSE} is of limited use
but provided for completeness. In most cases the default
of \code{showall=TRUE} will be satisfactory. In this
case, \code{reals} is a list of dataframes with a list
element for each parameter type. The dataframe contains
the estimate, se,lcl, ucl,fixed and the associated
default design data for that parameter (eg time,age,
cohort etc). The advantage of retrieving the reals in
this format is that it is the same regardless of the
model, so it enables model averaging the real parameters
over different models with differing numbers of unique
real parameters.
}
\author{
Jeff Laake
}
\keyword{utility}
|
d32f8d2c26818dcedf5a524369c8682b2460eab7
|
f6392b2ae66c3fc295335fd3d88ddb5711f1c294
|
/src/3-do/API/mrnaXmethylation/API_mrnaXmethylation.R
|
29b176f052bede28a3a0e0c95555723b07fddd7c
|
[] |
no_license
|
Leo-1992/multiomics
|
8c19d2ef9b8c3f9afa9971dd6e38dc28dc27120a
|
949c6e9cedf4a2526a74fa2db02646631b6a3205
|
refs/heads/master
| 2020-06-13T16:46:50.110842
| 2018-10-07T18:26:23
| 2018-10-07T18:26:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,597
|
r
|
API_mrnaXmethylation.R
|
#The methylation cgXXX is in methylation files of TCGA.
#We have also a file which correlates cgXXX with gene. Take into account that a cgXXX can do methilation over multiple genes and that a gene can be methylazed by multiple cgXXX.
#For doing it, we read a gen from the mrnaFile, then we look in the "methylationPlatformFile" all the cgXXX corresponding to this gene, and then we look for those CGXXX in the methilation file. It keeps the pairs (gene, cgXXX) which correlates negatively, considering the mrna file and the methylation file.
#Take into account that the platform file should Have two columns: GEN - CG. If a gen has goto multiple cgs, there will be many rows for this gene
methXMrnas <- function(mrna, meth, meth.platform, output.path="~/",
output.file.name="methylationXMrna.csv",
r.minimium=0.7,
pearsons.method = "pearson",
inc.progress = F,keep.pos.cor=T, keep.neg.cor=F){
ptm <- proc.time()
total.rows=nrow(mrna)*15
print(paste("Running pipeline methylaion_X_mrnas with", r.minimium,
"threshold and pearson's method:", pearsons.method, sep=" "))
###MDB: 26/2/2018 - P.ADJUST
#Columns are: "Gene","Location", "methylation-id", "Methylation-mRNA correlation", "p-value", "p_value_fdr_adjustedMeth_Mrna_Correlation", "ID"
num.of.result.columns<-7
position.of.adjusted.p.value<-6
####
# The result matix is created
res <- matrix(nrow=total.rows,ncol=num.of.result.columns)
colnames(res)<-(c("Gene","Location", "methylation-id", "Methylation-mRNA correlation", "p-value", "p-value-fdr-adjusted", "ID"))
###MDB: 26/2/2018 - P.ADJUST - Start on 0
actual<-0
actual.n.correlated<-1
print("Start process!")
###MDB: 26/2/2018 - P.ADJUST
p.values.all<-c()
p.values.positions.of.correlated.pairs<-c()
ids<-c()
###
ids.in.meth.dataset<-meth[,1]
print("Start process!")
for (i in 1:nrow(mrna)) {
actual.gen<-as.character(mrna[i,1])
#position.in.cnv.dataset<-which(ids.in.cnv.dataset == actual.gen)
#In meths i have each cg, which do methialtion over the gene.
cgs.for.this.gene<-get.cgs.for.this.genes(actual.gen,meth.platform)
#Se queda con el primer CNV
if (length(cgs.for.this.gene)>0){
for (actual.cg.index in 1:length(cgs.for.this.gene)){
actual<-actual+1
actual.cg<-cgs.for.this.gene[actual.cg.index]
#position.in.cnv.dataset<-position.in.cnv.dataset[1]
actual.mrna<-mrna[i,2:ncol(mrna)]
#position.in.meth.dataset<-which(ids.in.meth.dataset == actual.cg)
actual.meth<-meth[meth[,1] == actual.cg,2:ncol(meth)]
if (nrow(actual.meth)>0){
resultado.pearson<-cor.test(as.numeric(actual.mrna),
as.numeric(actual.meth),
method = pearsons.method)
###MDB: 26/2/2018 - P.ADJUST
p.values.all<-append(p.values.all, resultado.pearson$p.value)
#id<-paste(actual, actual.gen, actual.mirna, sep="-")
id<-actual
ids<-append(ids, id)
if (!is.na(abs(resultado.pearson$estimate))) {
if (abs(resultado.pearson$estimate) > r.minimium) {
if ((keep.pos.cor==T && resultado.pearson$estimate>0) || ((keep.neg.cor==T && resultado.pearson$estimate<0))){
location<-getGeneLocation(actual.gen);
newValue<-c(as.character(actual.gen), location, actual.cg,
resultado.pearson$estimate, resultado.pearson$p.value, -9999, id)
res[actual.n.correlated,1:num.of.result.columns] <- newValue
actual.n.correlated<-actual.n.correlated+1
###MDB: 26/2/2018 - P.ADJUST
p.values.positions.of.correlated.pairs<-append(p.values.positions.of.correlated.pairs, id)
}
}
}
if ((actual)%%500==0)print(paste("analised ", actual, " from ", total.rows))
if ((actual)%%1000==0) {
elapsedTime <- (proc.time() - ptm)[3]
print(paste(
"elapsed time: (seconds)", format2Print(elapsedTime),
" - (minutes)", format2Print(elapsedTime/60),
" - (hours)", format2Print(elapsedTime/60/60)
))
remainingTime <- ((total.rows*elapsedTime)/actual) - elapsedTime
print(paste("estimated remaining time (seconds)", format2Print(remainingTime),
" - (minutes)", format2Print(remainingTime/60),
" - (hours)", format2Print(remainingTime/60/60)
))
}
}
}
}
if(inc.progress) {
incProgress(1/nrow(mrna));
}
}
if (length(p.values.all)>0){
###MDB: 26/2/2018 - P.ADJUST
p.values.adjusted.fdr<-p.adjust(p.values.all, method="fdr", n=length(p.values.all))
names(p.values.adjusted.fdr)<-ids
###MDB: 26/2/2018 - P.ADJUST
res[res[,"ID"] %in% p.values.positions.of.correlated.pairs, position.of.adjusted.p.value]<-p.values.adjusted.fdr[as.character(p.values.positions.of.correlated.pairs)]
####
}
# deleting useless and unused rows
res <- res[c(1:actual.n.correlated-1),c(1:num.of.result.columns)]
#if (!(folder.exists(output.path))) {dir.create(output.path)}
file.path<-paste(output.path, output.file.name, sep="")
write.table(res, file.path, sep="\t", row.names=FALSE,
col.names=TRUE, quote=FALSE)
print(proc.time() - ptm)
return (convertVectorToMatrix(res))
}
methXMrnasWCGNA <- function(mrna, meth, meth.platform, output.path="~/",
output.file.name="methylationXMrna.csv",
r.minimium=0.7,
inc.progress = F, keep.pos.cor=F, keep.neg.cor=T){
library("WGCNA")
library("reshape2")
library("data.table")
ptm <- proc.time()
print(paste("Running pipeline methylaion_X_mrnas with", r.minimium, "threshold", sep=" "))
# TODO: Find a way to avoid this.
# rm(final.data.frame)
final.data.frame <- data.frame(matrix(ncol = 5, nrow = 0))
colnames(final.data.frame) <- c("x", "y", "correlation", "p.value", "p.value.fdr.adjusted")
# For each gen on MRNA file
for (i in 1:nrow(mrna)) {
# Build a dataframe with actual gen row only
actual.mrna<-mrna[i,2:ncol(mrna)]
actual.gen<-as.character(mrna[i,1])
# Get a list of all CG associated to the actual gen using the file meth.platform
cg.genes <- subset(meth.platform, gene == actual.gen)
if (nrow(cg.genes) > 0) {
# Create dataframe with all files on meth file which key is a value on cg.genes
current.gen.meth.values.by.cg <- subset(meth, meth %in% cg.genes$cg)
if (nrow(current.gen.meth.values.by.cg) > 0) {
# set row names using first row values, then remove first column
rownames(current.gen.meth.values.by.cg) <- current.gen.meth.values.by.cg[,1]
current.gen.meth.values.by.cg <- current.gen.meth.values.by.cg[,2:ncol(current.gen.meth.values.by.cg)]
print("Correlation....")
# calcultate correlation using wcgna
correlation.result <-correlation.with.wcgna(actual.mrna, current.gen.meth.values.by.cg,r.minimium, keep.pos.cor=keep.pos.cor, keep.neg.cor=keep.neg.cor)
# colnames(correlation.result)<-(c("Gene","Location", "CNV_mRNA_Correlation", "p-value", "p_value_fdr_adjusted"))
final.data.frame <- rbind(final.data.frame, correlation.result)
}
}
}
# Filter out results with correlation greater than 0. This is specific to this correlation
colnames(final.data.frame)<-(c("Gene", "methylation-id", "Methylation_mRNA_correlation", "p-value", "p_value_fdr_adjusted"))
final.data.frame <- subset(final.data.frame, Methylation_mRNA_correlation < 0)
# For each row, calculate methylation-id position
if (nrow(final.data.frame) > 0) {
final.data.frame$Location <- apply(final.data.frame[,1], 1, getGeneLocationFromFactor)
final.data.frame <- final.data.frame[,c("Gene","Location", "methylation-id", "Methylation_mRNA_correlation", "p-value", "p_value_fdr_adjusted")]
}
# Write the result to a file
write.to.file(final.data.frame, output.path, output.file.name)
print(proc.time() - ptm)
return (as.matrix(final.data.frame))
}
get.cgs.for.this.genes <- function(actual.gen,meth.platform){
as.character((meth.platform[meth.platform$gene == actual.gen,])[,2])
}
|
3a18110ccb9766e117a5060eef63ae50126c40c9
|
bbd1dc1b9d3dd750d1501aedf11ef865b794c09c
|
/scripts/Java/greedy_algorithm_package/01_generate.R
|
3f224ac1dd4f4cf1ee66791220f5340312b07f74
|
[
"LicenseRef-scancode-mit-old-style",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
WansonChoi/GenomicGPS
|
5ad17a973b61c6ce05087cb1cd090987bbe74986
|
0e6f213d65ffbbecfa03cd5db920d94667649227
|
refs/heads/master
| 2020-07-02T00:32:27.382810
| 2019-07-30T04:11:03
| 2019-07-30T04:11:03
| 201,361,362
| 1
| 0
|
MIT
| 2019-08-09T00:58:35
| 2019-08-09T00:58:34
| null |
UTF-8
|
R
| false
| false
| 662
|
r
|
01_generate.R
|
#!/usr/bin/env Rscript
minMAF=0.3
nsnp=1000
nref=30
args = commandArgs(trailingOnly=TRUE)
pref = args[1]
ref=matrix(NA, nrow=nref, ncol=nsnp)
sam=matrix(NA, 1, nsnp)
af=runif(nsnp, min=minMAF, max=1-minMAF)
for (j in 1:nsnp) {
ref[,j]=rbinom(nref,2,af[j])
sam[,j]=rbinom(1,2,af[j])
}
dv=sapply(1:nref, function(x) sum((ref[x,]-sam)^2))
# write to file
write.table(sam, file=paste(pref,'.sam',sep=""), row.names=F, col.names=F)
write.table(ref, file=paste(pref,'.ref',sep=''), row.names=F, col.names=F)
write.table(af, file=paste(pref,'.af',sep=''), row.names=F, col.names=F)
write.table(dv, file=paste(pref,'.dv',sep=''), row.names=F, col.names=F)
|
6b6e9adc8a4c603a88d4629a37b06d031b1a19af
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/udpipe/vignettes/udpipe-train.R
|
8352df4fb6354087d77eb69b290e706b0bb25934
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,124
|
r
|
udpipe-train.R
|
## ----setup, include=FALSE, cache=FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
options(width = 1000)
knitr::opts_chunk$set(echo = TRUE, message = FALSE, comment = NA, eval = TRUE)
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
file_conllu <- system.file(package = "udpipe", "dummydata", "traindata.conllu")
file_conllu
cat(head(readLines(file_conllu), 3), sep="\n")
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
library(udpipe)
m <- udpipe_train(file = "toymodel.udpipe", files_conllu_training = file_conllu,
annotation_tokenizer = list(dimension = 16,
epochs = 1,
batch_size = 100,
dropout = 0.7),
annotation_tagger = list(iterations = 1,
models = 1,
provide_xpostag = 1,
provide_lemma = 0,
provide_feats = 0),
annotation_parser = "none")
m$file_model
## The model is now trained and saved in file toymodel.udpipe in the current working directory
## Now we can use the model to annotate some text
mymodel <- udpipe_load_model("toymodel.udpipe")
x <- udpipe_annotate(
object = mymodel,
x = "Dit is een tokenizer met POS tagging,
zonder lemmatisation noch laat deze dependency parsing toe.",
parser = "none")
str(as.data.frame(x))
## ---- eval=FALSE----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
# m <- udpipe_train(file = "toymodel.udpipe", files_conllu_training = file_conllu,
# annotation_tokenizer = "default",
# annotation_tagger = "default",
# annotation_parser = "default")
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
params <- list()
## Tokenizer training parameters
params$tokenizer <- list(dimension = 24,
epochs = 1, #epochs = 100,
initialization_range = 0.1,
batch_size = 100, learning_rate = 0.005,
dropout = 0.1, early_stopping = 1)
## Tagger training parameters
params$tagger <- list(models = 2,
templates_1 = "tagger",
guesser_suffix_rules_1 = 8, guesser_enrich_dictionary_1 = 6,
guesser_prefixes_max_1 = 0,
use_lemma_1 = 0, use_xpostag_1 = 1, use_feats_1 = 1,
provide_lemma_1 = 0, provide_xpostag_1 = 1,
provide_feats_1 = 1, prune_features_1 = 0,
templates_2 = "lemmatizer",
guesser_suffix_rules_2 = 6, guesser_enrich_dictionary_2 = 4,
guesser_prefixes_max_2 = 4,
use_lemma_2 = 1, use_xpostag_2 = 0, use_feats_2 = 0,
provide_lemma_2 = 1, provide_xpostag_2 = 0,
provide_feats_2 = 0, prune_features_2 = 0)
## Dependency parser training parameters
params$parser <- list(iterations = 1,
#iterations = 30,
embedding_upostag = 20, embedding_feats = 20, embedding_xpostag = 0,
embedding_form = 50,
#embedding_form_file = "../ud-2.0-embeddings/nl.skip.forms.50.vectors",
embedding_lemma = 0, embedding_deprel = 20,
learning_rate = 0.01, learning_rate_final = 0.001, l2 = 0.5, hidden_layer = 200,
batch_size = 10, transition_system = "projective", transition_oracle = "dynamic",
structured_interval = 10)
## Train the model
m <- udpipe_train(file = "toymodel.udpipe",
files_conllu_training = file_conllu,
annotation_tokenizer = params$tokenizer,
annotation_tagger = params$tagger,
annotation_parser = params$parser)
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
data(udpipe_annotation_params)
str(udpipe_annotation_params$tokenizer)
## Example for training the tokenizer on the Dutch treebank
hyperparams_nl <- subset(udpipe_annotation_params$tokenizer, language_treebank == "nl")
as.list(hyperparams_nl)
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## Example for training the tagger on the Dutch treebank
hyperparams_nl <- subset(udpipe_annotation_params$tagger, language_treebank == "nl")
as.list(hyperparams_nl)
## -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
## Example for training the dependency parser on the Dutch treebank
hyperparams_nl <- subset(udpipe_annotation_params$parser, language_treebank == "nl")
as.list(hyperparams_nl)
## ---- results='hide', echo=FALSE------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
invisible(file.remove(c("toymodel.udpipe")))
|
28d36d219d1b2fca149f3a502eeeb18703fc9b09
|
fe254ef6be0bd316d41b6796ef28f1c9e1d5551e
|
/R/impCoda.R
|
6c643771e1677ad977d77ee938d46e8ae11b758b
|
[] |
no_license
|
matthias-da/robCompositions
|
89b26d1242b5370d78ceb5b99f3792f0b406289f
|
a8da6576a50b5bac4446310d7b0e7c109307ddd8
|
refs/heads/master
| 2023-09-02T15:49:40.315508
| 2023-08-23T12:54:36
| 2023-08-23T12:54:36
| 14,552,562
| 8
| 6
| null | 2019-12-12T15:20:57
| 2013-11-20T09:44:25
|
C++
|
UTF-8
|
R
| false
| false
| 12,464
|
r
|
impCoda.R
|
#' Imputation of missing values in compositional data
#'
#' This function offers different methods for the imputation of missing values
#' in compositional data. Missing values are initialized with proper values.
#' Then iterative algorithms try to find better estimations for the former
#' missing values.
#'
#' eps: The algorithm is finished as soon as the imputed values stabilize, i.e.
#' until the sum of Aitchison distances from the present and previous iteration
#' changes only marginally (eps).\
#'
#' method: Several different methods can be chosen, such as \sQuote{ltsReg}:
#' least trimmed squares regression is used within the iterative procedure.
#' \sQuote{lm}: least squares regression is used within the iterative
#' procedure. \sQuote{classical}: principal component analysis is used within
#' the iterative procedure. \sQuote{ltsReg2}: least trimmed squares regression
#' is used within the iterative procedure. The imputated values are perturbed
#' in the direction of the predictor by values drawn form a normal distribution
#' with mean and standard deviation related to the corresponding residuals and
#' multiplied by \code{noise}.
#'
#' @param x data frame or matrix
#' @param maxit maximum number of iterations
#' @param eps convergence criteria
#' @param method imputation method
#' @param closed imputation of transformed data (using ilr transformation) or
#' in the original space (\code{closed} equals TRUE)
#' @param init method for initializing missing values
#' @param k number of nearest neighbors (if init $==$ \dQuote{KNN})
#' @param dl detection limit(s), only important for the imputation of rounded
#' zeros
#' @param noise amount of adding random noise to predictors after convergency
#' @param bruteforce if TRUE, imputations over dl are set to dl. If FALSE,
#' truncated (Tobit) regression is applied.
#' @return \item{xOrig }{Original data frame or matrix} \item{xImp }{Imputed
#' data} \item{criteria }{Sum of the Aitchison distances from the present and
#' previous iteration} \item{iter }{Number of iterations} \item{maxit }{Maximum
#' number of iterations } \item{w }{Amount of imputed values} \item{wind
#' }{Index of the missing values in the data}
#' @author Matthias Templ, Karel Hron
#' @export
#' @importFrom VIM kNN
#' @importFrom robustbase ltsReg
#' @seealso \code{\link{impKNNa}}, \code{\link{pivotCoord}}
#' @references Hron, K., Templ, M., Filzmoser, P. (2010) Imputation of
#' missing values for compositional data using classical and robust methods
#' \emph{Computational Statistics and Data Analysis}, 54 (12),
#' 3095-3107.
#' @keywords robust multivariate iteration
#' @examples
#'
#' data(expenditures)
#' x <- expenditures
#' x[1,3]
#' x[1,3] <- NA
#' xi <- impCoda(x)$xImp
#' xi[1,3]
#' s1 <- sum(x[1,-3])
#' impS <- sum(xi[1,-3])
#' xi[,3] * s1/impS
#'
#' # other methods
#' impCoda(x, method = "lm")
#' impCoda(x, method = "ltsReg")
#'
`impCoda` <-
function(x, maxit=10, eps=0.5, method="ltsReg", closed=FALSE,
init="KNN", k=5, dl=rep(0.05, ncol(x)), noise=0.1, bruteforce=FALSE){
## MT & KH, 1. Version April 2008
## MT 01. August 2008 (modification).
## MT 17. Oktober 2008 (adaption)
## for method pca: classical, mcd, gridMAD
## for regression: lm, ltsReg
## if closed == FALSE, ilr is applied.
# `ilrM` <-
# function(x){
# x.ilr=matrix(NA,nrow=nrow(x),ncol=ncol(x)-1)
# D=ncol(x)
# for (i in 1:ncol(x.ilr)){
# x.ilr[,i]=sqrt((D-i)/(D-i+1))*log(((apply(as.matrix(x[,(i+1):D,drop=FALSE]),1,prod))^(1/(D-i)))/(x[,i]))
# }
# invisible(-x.ilr)
# }
# `invilrM` <-
# function(x.ilr){
# y=matrix(0,nrow=nrow(x.ilr),ncol=ncol(x.ilr)+1)
# D=ncol(x.ilr)+1
# y[,1]=-sqrt((D-1)/D)*x.ilr[,1]
# for (i in 2:ncol(y)){
# for (j in 1:(i-1)){
# y[,i]=y[,i]+x.ilr[,j]/sqrt((D-j+1)*(D-j))
# }
# }
# for (i in 2:(ncol(y)-1)){
# y[,i]=y[,i]-sqrt((D-i)/(D-i+1))*x.ilr[,i]
# }
# yexp=exp(-y)
# x.back=yexp/apply(yexp,1,sum) # * rowSums(derOriginaldaten)
# invisible(x.back)
##return(yexp)
#}
if( is.vector(x) ) stop("x must be a matrix or data frame")
stopifnot((method %in% c("ltsReg", "ltsReg2", "classical", "lm",
"roundedZero","roundedZeroRobust")))
if( k > nrow(x)/4 ) warning("k might be too large")
# if(method == "roundedZero") init <- "roundedZero"
xcheck <- x
# if(method == "roundedZero"){
# x[x==0] <- NA
# }
##index of missings / non-missings
w <- is.na(x)
wn <- !is.na(x)
w2 <- apply(x, 1, function(x){
length(which(is.na(x)))
})
if(method == "gmean"){
### mean imputation im Simplex:
geometricmean <- function (x) {
if (any(na.omit(x == 0)))
0
else exp(mean(log(unclass(x)[is.finite(x) & x > 0])))
}
gm <- apply(x, 2, function(x) {
geometricmean(as.numeric(x[complete.cases(x)]))
})
xmean <- x
for(i in 1:ncol(x)){
xmean[w[,i], i] <- gm[i]
}
res <- list(xOrig=xcheck, xImp=xmean, criteria=0, iter=0, maxit=maxit, w=length(which(w)), wind=w)
} else if ( method=="meanClosed" ){
xmean <- x
impute <-
function (x, what = c("median", "mean"))
{
what <- match.arg(what)
if (what == "median") {
retval <- apply(x, 2, function(z) {
z[is.na(z)] <- median(z, na.rm = TRUE)
z
})
}
else if (what == "mean") {
retval <- apply(x, 2, function(z) {
z[is.na(z)] <- mean(as.numeric(z), na.rm = TRUE)
z
})
}
else {
stop("`what' invalid")
}
retval
}
xmean <- impute(xmean)
res <- list(xOrig=xcheck, xImp=xmean, criteria=0, iter=0, maxit=maxit, w=length(which(w)), wind=w)
} else{
##sort the columns of the data according to the amount of missings in the variables
indM <- sort(apply(x,2,function(x) length(which(is.na(x)))),index.return=TRUE,decreasing=TRUE)$ix
cn <- colnames(x)
## first step - replace all NAs with values with 'nearest neighbour' algorithm
#if(init=="NN"){
# x <- templdist.C(x)
#}
if(init=="KNN"){
x <- impKNNa(x, k=k, metric="Aitchison", normknn=TRUE)$xImp #"Aitchison"
}
if(init=="KNNclosed"){
x <- impKNNa(x, k=k, metric="Euclidean")$xImp
}
if(init=="roundedZero"){
x[is.na(x)] <- 0.001
}
if(init=="geometricmean"){
gm <- apply(x, 2, function(x) geometricmean(x[!is.na(x)]))
for(i in 1:ncol(x)){
x[is.na(x[,i]),i] <- gm[[i]]
}
}
#x=acomp(x) #Aitchison compositions (for ilr)
#x2 <- acomp(xcheck) # with missings
##PCA algorithmus
it=0
criteria <- 10000000
error <- rep(0, ncol(x))
###########################################
### start the iteration
##ternary(acomp(x))
#plot(ilr(x[w2==0,]), xlim=c(-5,5), ylim=c(-8,0.5))
#points(ilr(x[w2>0,]), col=gray(0.9), pch=3)
#gr <- seq(0.7,0.3, length.out=8)
while(it <= maxit & criteria >= eps){
xold <- x
it=it+1
for(i in 1:ncol(x)){
#change the first column with that one with the highest amount of NAs
#in the step
xNA=x[,indM[i]]
x1=x[,1]
x[,1]=xNA
x[,indM[i]]=x1
if( closed == FALSE ) xilr <- pivotCoord(x) else xilr=x
#apply the PCA algorithm -> ximp
ind <- cbind(w[, indM[i]], rep(FALSE, dim(w)[1]))
if(method=="classical" | method =="mcd" | method == "gridMAD"){
# xilr <- impPCA(xilr, indexMiss=ind, eps=1,
# indexObs=!ind, method=method)
stop("currently not supported, please use method lm, ltsReg or ltsReg2")
}
#if( method == "em" ){
# s <- prelim.norm(as.matrix(xilr))
# thetahat <- em.norm(s, showits=FALSE)
# xilr <- imp.norm(s, thetahat, as.matrix(xilr))
#}
#
#if( method == "lls" ){
# xilr <- suppressWarnings(llsImpute(xmiss, 3, verbose = FALSE)@completeObs)
#}
if(method == "ltsReg" | method == "lm"){
#beta=ltsReg(xilr[,1]~xilr[,2],xilr)$coefficients
xilr <- data.frame(xilr)
c1 <- colnames(xilr)[1]
colnames(xilr)[1] <- "V1"
reg1 = get(method)(V1 ~ ., data=xilr)
colnames(xilr)[1] <- c1
##imp= cbind(rep(1, nrow(xilr)), xilr[,-1]) %*% reg1$coef
xilr[w[, indM[i]], 1] <- fitted(reg1)[w[, indM[i]]] ##imp[w[, indM[i]]] ## xilr[w[, indM[i]], 1]
}
if(method == "ltsReg2"){
xilr <- data.frame(xilr)
c1 <- colnames(xilr)[1]
colnames(xilr)[1] <- "V1"
reg1 = robustbase::ltsReg(V1 ~ ., data=xilr)
imp= as.matrix(cbind(rep(1, nrow(xilr)), xilr[,-1])) %*% reg1$coef
colnames(xilr)[1] <- c1
##imp= cbind(rep(1, nrow(xilr)), xilr[,-1]) %*% reg1$coef
xilr[w[, indM[i]], 1] <- fitted(reg1)[w[, indM[i]]]
error[indM[i]] <- noise*sd(xilr[,1])#sqrt(mad(xilr[,1]))
#+
# rnorm(length(imp[w[, indM[i]]]), 0, sd=0.5*sqrt(mad(xilr[,1])))
# xilr <- data.frame(xilr)
###imp[w[, indM[i]]] + rnorm(length(imp[w[, indM[i]]]), 0, sd=0.5*sqrt(mad(xilr[,1])))
}
# if(method == "roundedZero"){
# xilr <- ilrM(x)
# phi <- ilr(cbind(rep(dl[indM[i]], nrow(x)), x[,-1,drop=FALSE]))[,1]
# xilr <- data.frame(xilr)
# c1 <- colnames(xilr)[1]
# colnames(xilr)[1] <- "V1"
# reg1 = lm(V1 ~ ., data=xilr)
# yhat2 <- predict(reg1, new.data=xilr[,-i])
# #colnames(xilr)[1] <- c1
# #s <- sd(xilr[,1], na.rm=TRUE)
# #ex <- (phi - yhat)/s
# #yhat2 <- yhat - s*dnorm(ex)/pnorm(ex)
# if(bruteforce){
# xilr[w[, indM[i]], 1] <- ifelse(yhat2[w[, indM[i]]] <= phi[w[, indM[i]]], phi[w[, indM[i]]], yhat2[w[, indM[i]]] )
# } else {
# s <- sd(reg1$res, na.rm=TRUE)
# ex <- (phi - yhat2)/s
# yhat2 <- yhat2 - s*dnorm(ex)/pnorm(ex)
# xilr[w[, indM[i]], 1] <- yhat2[w[, indM[i]]]
# }
# }
# if(method == "roundedZeroRobust"){
# xilr <- ilrM(x)
# phi <- ilr(cbind(rep(dl[indM[i]], nrow(x)), x[,-1,drop=FALSE]))[,1]
# xilr <- data.frame(xilr)
# c1 <- colnames(xilr)[1]
# colnames(xilr)[1] <- "V1"
# reg1 = rlm(V1 ~ ., data=xilr, method="MM")
# yhat2 <- predict(reg1, new.data=xilr[,-i])
# #colnames(xilr)[1] <- c1
# #s <- sd(xilr[,1], na.rm=TRUE)
# #ex <- (phi - yhat)/s
# #yhat2 <- yhat - s*dnorm(ex)/pnorm(ex)
# if(bruteforce){
# xilr[w[, indM[i]], 1] <- ifelse(yhat2[w[, indM[i]]] <= phi[w[, indM[i]]], phi[w[, indM[i]]], yhat2[w[, indM[i]]] )
# } else {
## s <- mad(reg1$res, na.rm=TRUE)
## s <- reg1$s
# ex <- (phi - yhat2)/s
# yhat2 <- yhat2 - s*dnorm(ex)/pnorm(ex)
# xilr[w[, indM[i]], 1] <- yhat2[w[, indM[i]]]
## }
# }
#if( method == "rf" ){
# xilr[w[, indM[i]], 1] <- NA
# reg1 <- rfImpute(xilr[,1] ~ xilr[,-1], data=xilr)
# xilr[w[, indM[i]], 1] <- reg1[w[, indM[i]]]
#}
if( closed == FALSE ) x <- pivotCoordInv(xilr) else x=xilr
# if( closed == FALSE && method %in% c("roundedZero","roundedZeroRobust")) x=invilrM(xilr) else x=xilr
#return the order of columns
xNA=x[,1]
x1=x[,indM[i]]
x[,1]=x1
x[,indM[i]]=xNA
}
criteria <- sum( ((xold - x)/x)^2, na.rm=TRUE) #sum(abs(as.matrix(xold) - as.matrix(x)), na.rm=TRUE) ## DIRTY: (na.rm=TRUE)
#print(paste(method, ",", it, ",", "criteria=",round(criteria,3)))
if(closed == FALSE) colnames(x) <- colnames(xcheck)
}
if( method == "ltsReg2"){ # finally, add an error for method ltsReg2
for(i in 1:ncol(x)){
xNA=x[,indM[i]]
x1=x[,1]
x[,1]=xNA
x[,indM[i]]=x1
if( closed == FALSE ) xilr <- -pivotCoord(x) else xilr=x
ind <- cbind(w[, indM[i]], rep(FALSE, dim(w)[1]))
xilr <- data.frame(xilr)
#c1 <- colnames(xilr)[1]
#colnames(xilr)[1] <- "V1"
#reg1 = ltsReg(V1 ~ ., data=xilr)
#imp= as.matrix(cbind(rep(1, nrow(xilr)), xilr[,-1])) %*% reg1$coef
#colnames(xilr)[1] <- c1
xilr[w[, indM[i]], 1] <- xilr[w[, indM[i]], 1] +
rnorm(length(which(w[, indM[i]])), 0, sd=error[indM[i]])
xilr <- data.frame(xilr)
if( closed == FALSE ) x <- pivotCoordInv(-xilr) else x=xilr
xNA=x[,1]
x1=x[,indM[i]]
x[,1]=x1
x[,indM[i]]=xNA
}
}
res <- list(xOrig=xcheck, xImp=x, criteria=criteria, iter=it,
maxit=maxit, w=length(which(w)), wind=w)
}
class(res) <- "imp"
invisible(res)
}
|
87762d08cbdfa440cfd80b1d1495be92ca8db7e2
|
cab285249f5e5e1fbd40897c522870ff97031a7b
|
/man/relativeBowlingERODTT.Rd
|
8f0f11da56f28f9efc86fac43740771c5f4b4058
|
[] |
no_license
|
dharmang/cricketr
|
61bd5a107fb143d471f36e256132b6f10092891a
|
4e09953d4f9d427771ce928bf8b815398a83206c
|
refs/heads/master
| 2020-05-26T09:22:51.262187
| 2019-03-08T01:24:46
| 2019-03-08T01:24:46
| 188,184,725
| 1
| 0
| null | 2019-05-23T07:36:12
| 2019-05-23T07:36:11
| null |
UTF-8
|
R
| false
| false
| 1,898
|
rd
|
relativeBowlingERODTT.Rd
|
\name{relativeBowlingERODTT}
\alias{relativeBowlingERODTT}
\title{
Compute and plot the relative mean Economy Rate(ER) of the bowlers for ODI or Twenty20
}
\description{
This function computes and plots the relative Economy Rate of the bowlers for ODI or Twenty20
}
\usage{
relativeBowlingERODTT(frames, names)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{frames}{
This is a list of <bowler>.csv files obtained with an initial getPlayerDataOD() or getPlayerTT()
}
\item{names}{
A list of bowlers names who need to be compared
}
}
\details{
More details can be found in my short video tutorial in Youtube
https://www.youtube.com/watch?v=q9uMPFVsXsI
}
\value{
None
}
\references{
http://www.espncricinfo.com/ci/content/stats/index.html\cr
https://gigadom.wordpress.com/
}
\author{
Tinniam V Ganesh
}
\note{
Maintainer: Tinniam V Ganesh <tvganesh.85@gmail.com>
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
\code{\link{relativeBatsmanSRODTT}}
\code{\link{relativeRunsFreqPerfODTT}}
}
\examples{
# Get or use the <bowler>.csv obtained with getPlayerData()
# a <- getPlayerData(47492,file="steyn.csv",type="bowling",
# homeOrAway=c(1,2),result=c(1,2,4))
# Retrieve the file path of a data file installed with cricketr
steyn <- system.file("data", "steyn.csv", package = "cricketr")
mitchell <- system.file("data", "mitchell.csv", package = "cricketr")
southee <- system.file("data", "southee.csv", package = "cricketr")
malinga <- system.file("data", "malinga.csv", package = "cricketr")
frames <- list(steyn,mitchell,southee,malinga)
names <- c("Steyn","Mitchell","Southee","Malinga")
relativeBowlingERODTT(frames,names)
# Note: This example uses the /data directory for the files. However
# you can use any directory as long as the data files exists in that directory.
}
|
0399e65bf5a5c55554bc92435ed3e470e87145dc
|
730ec6f7b8046c842ee4b7d35bdace9cfd75f202
|
/man/collapseNoMismatch.Rd
|
8dbb7580b3541099cf3ced8b6341a3f341785848
|
[] |
no_license
|
cmsmoo/dada2
|
e0a4d8a4eef1727bc0bfaf155a57a2c30a812111
|
0c99d4e6cf6d71c8733cd46fa31ada5df683fa3d
|
refs/heads/master
| 2021-01-22T16:13:42.488161
| 2016-02-23T01:42:05
| 2016-02-23T01:42:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,964
|
rd
|
collapseNoMismatch.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multiSample.R
\name{collapseNoMismatch}
\alias{collapseNoMismatch}
\title{Collapse together sequences that are identical up to shifts and/or length.}
\usage{
collapseNoMismatch(seqtab, minOverlap = 20, verbose = FALSE)
}
\arguments{
\item{seqtab}{(Required). A samples by sequence matrix, the return of \code{\link{makeSequenceTable}}.}
\item{minOverlap}{(Optional). \code{numeric(1)}. Default is 20.
The minimum amount of overlap between sequences required to collapse them together.}
\item{verbose}{(Optional). \code{logical(1)}. Default FALSE.
If TRUE, a summary of this function is printed to standard output.}
}
\value{
Integer \code{matrix}.
A row for each sample, and a column for each collapsed sequence across all the samples.
Note that the columns are named by the sequence which can make display a little unwieldy.
Columns are in the same order (modulo the removed columns) as in the input matrix.
}
\description{
This function takes as input a sequence table and returns a sequence table in which
any sequences that are identical up to shifts or length variation, i.e. that have
no mismatches or internal indels when aligned, are collapsed together. The most abundant
sequences is chosen as the representative of the collapsed sequences.
}
\details{
This function can be thought of as implementing greedy 100-percent OTU clustering, where end-gapping
is ignored. The current implementation relies on full alignments and is therefore much slower
than necessary. A better implementation would be good.
}
\examples{
derep1 <- derepFastq(system.file("extdata", "sam1F.fastq.gz", package="dada2"))
derep2 <- derepFastq(system.file("extdata", "sam2F.fastq.gz", package="dada2"))
dada1 <- dada(derep1, tperr1)
dada2 <- dada(derep2, tperr1)
seqtab <- makeSequenceTable(list(sample1=dada1, sample2=dada2))
collapseNoMismatch(seqtab)
}
\seealso{
\code{\link{makeSequenceTable}}
}
|
902dd7ca619c25228c755cf81517137629f03b68
|
9ffe93784d7f093b30151fb6053a6bec6184fde3
|
/reprex.R
|
5b1f69b608ed96bbf6db812a0814402b65063f26
|
[] |
no_license
|
restrellado/ca_school_distances
|
0847ceb5b2b9527523749fce9c01a56354771d82
|
b9a99b0eacd9b0f1c1b5699c7ed0c62b16fc9ea6
|
refs/heads/master
| 2020-03-21T13:30:47.628073
| 2018-06-29T20:47:30
| 2018-06-29T20:47:30
| 138,609,995
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 648
|
r
|
reprex.R
|
library(reprex)
#------------------------------------------------------------------------------
reprex({
library(tidyverse)
library(geosphere)
# School location data
schools <- tibble(
school = c("Roosevelt", "Lincoln", "Washington"),
long = c(-117.1873, -117.1821, -117.1454),
lat = c(32.69640, 32.69391, 32.64398)
)
# Create distance matrix
mat <- distm(schools[, c(2, 3)], schools[, c(2,3)]) %>%
as.tibble() %>%
# Convert meters to miles
mutate_all(funs(. * 0.000621371))
# Summarize by finding the mean of each row
# then taking the mean of that value
mean(mat %>% map_dbl(mean))
})
|
5d06df13defa3d31284ce2c397ea83e681b911ff
|
3f94521e8fd715b749d50b3a8dac7cace1844eda
|
/Part 4 - Clustering/Section 25 - Hierarchical Clustering/hc - ADONIS.R
|
72fd79735a9b7c9892c1e727ecfbd4ae00d1f976
|
[] |
no_license
|
adonisDias/Machine-Learning-Basic---A-Z
|
70eead9217a1ff951ca6af4bcb1a15b6a2cc4e5e
|
7a92aeb4fc85c68a3f061d2273de63f669746092
|
refs/heads/master
| 2020-06-03T04:00:23.623177
| 2020-04-16T20:55:59
| 2020-04-16T20:55:59
| 191,430,074
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,420
|
r
|
hc - ADONIS.R
|
# Hierarchical Clustering
# Importing the mall dataset
dataset = read.csv('Mall_Customers.csv')
X = dataset[4:5]
# Using the dendrogram to find the optimal number of clusters
dendrogram = hclust(dist(X, method = 'euclidean'), method = 'ward.D') # primeiro parâmetro "d" é a distance matrix of the dataset X, que é uma matriz que diz para cada par de customers, a euclidean distance entre os dois. Busca as coordenadas entre as variáveis x e y, e calcula a euclidean distance entre esses dois.
plot(dendrogram,
main = paste('Dendrogram'),
xlab = 'Customers',
ylab = 'Euclidean distances')
# Fitting hierarchical clustering to the mall dataset
hc = hclust(dist(X, method = 'euclidean'), method = 'ward.D') # primeiro parâmetro "d" é a distance matrix of the dataset X, que é uma matriz que diz para cada par de customers, a euclidean distance entre os dois. Busca as coordenadas entre as variáveis x e y, e calcula a euclidean distance entre esses dois.
y_hc = cutree(hc, 5) # método utilizado para gerar o vector com a informação de qual cluster cada customer pertence
# Visualising the clusters
library(cluster)
clusplot(X,
y_hc,
lines = 0,
shade = TRUE,
color = TRUE,
labels = 2,
plotchar = FALSE,
span = TRUE,
main = paste('Clusters of Clients'),
xlab = 'Annual Income',
ylab = 'Spending Score')
|
b51976b17c7c3b4ff07ab39329468e1c17960850
|
0fbfb9298e862d65bd4e076c9cc06198763b1a84
|
/Figure 1 Step1_part2_UPenn.R
|
a86ecfa91cbd38b851f085cb56f1ac8be0e59ddf
|
[
"MIT"
] |
permissive
|
kozlama/Sayed-Kodama-Fan-et-al-2021
|
18dd7475523515cd9baf164d27d301da3bcef280
|
332532c147f826760c9fb24cb2b2cf969aef5e20
|
refs/heads/main
| 2023-07-19T15:47:07.310109
| 2021-09-09T21:32:52
| 2021-09-09T21:32:52
| 404,477,614
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 117,629
|
r
|
Figure 1 Step1_part2_UPenn.R
|
###############################################################################################
# Pre-processing for data used to generate Figure 1 from Sayed, Kodama, Fan, et al. 2021
# Total of 55 human AD R47H vs CV samples and non-AD samples
# This script is: STEP 1 of 5 (PART 2 of 2) - 21 human AD R47H vs CV samples from UPenn
# Adapted from https://satijalab.org/seurat/v3.1/pbmc3k_tutorial.html
# by Li Fan
###############################################################################################
library(Seurat)
library(ggplot2)
library(DoubletFinder)
#set working directory ====
setwd("/athena/ganlab/scratch/lif4001/Human_UPenn/data_analysis/DF_2ndRound")
#load in data from Cell Ranger or other counts data ====
#for loading Cell Ranger counts:
Gan_43.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_43/outs/filtered_feature_bc_matrix")
Gan_43 <- CreateSeuratObject(counts = Gan_43.counts, project = "AD_WT_E3E3_F_1", min.cells = 3, min.features = 200)
Gan_43[["Condition"]] = c('AD_WT_E3E3_F')
Gan_43[["Condition_1"]] = c('WT_E3E3')
Gan_43[["Condition_2"]] = c('WT_F')
Gan_43[["Condition_3"]] = c('E3E3_F')
Gan_43[["TREM2"]] = c('WT')
Gan_43[["Dx"]] = c('AD')
Gan_43[["LBD"]] = c('N/A')
Gan_43[["Braak"]] = c('6')
Gan_43[["Thal"]] = c('N/A')
Gan_43[["TDP.43"]] = c('N/A')
Gan_43[["ClinicalDx"]] = c('Dementia with Lewy Bodies')
Gan_43[["APOE"]] = c('E3E3')
Gan_43[["Age"]] = c('83')
Gan_43[["Age_Onset"]] = c('77')
Gan_43[["Sex"]] = c('F')
Gan_43[["PMI"]] = c('4')
Gan_43[["INDDID"]] = c('108953')
rm(Gan_43.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_43[["percent.mt"]] <- PercentageFeatureSet(object = Gan_43, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_44.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_44/outs/filtered_feature_bc_matrix")
Gan_44 <- CreateSeuratObject(counts = Gan_44.counts, project = "AD_WT_E3E3_F_2", min.cells = 3, min.features = 200)
Gan_44[["Condition"]] = c('AD_WT_E3E3_F')
Gan_44[["Condition_1"]] = c('WT_E3E3')
Gan_44[["Condition_2"]] = c('WT_F')
Gan_44[["Condition_3"]] = c('E3E3_F')
Gan_44[["TREM2"]] = c('WT')
Gan_44[["Dx"]] = c('AD')
Gan_44[["LBD"]] = c('N/A')
Gan_44[["Braak"]] = c('6')
Gan_44[["Thal"]] = c('N/A')
Gan_44[["TDP.43"]] = c('N/A')
Gan_44[["ClinicalDx"]] = c('Probable Alzheimers Disease')
Gan_44[["APOE"]] = c('E3E3')
Gan_44[["Age"]] = c('87')
Gan_44[["Age_Onset"]] = c('78')
Gan_44[["Sex"]] = c('F')
Gan_44[["PMI"]] = c('16')
Gan_44[["INDDID"]] = c('105007')
rm(Gan_44.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_44[["percent.mt"]] <- PercentageFeatureSet(object = Gan_44, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_45.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_45/outs/filtered_feature_bc_matrix")
Gan_45 <- CreateSeuratObject(counts = Gan_45.counts, project = "AD_WT_E4E4_F", min.cells = 3, min.features = 200)
Gan_45[["Condition"]] = c('AD_WT_E4E4_F')
Gan_45[["Condition_1"]] = c('WT_E4E4')
Gan_45[["Condition_2"]] = c('WT_F')
Gan_45[["Condition_3"]] = c('E4E4_F')
Gan_45[["TREM2"]] = c('WT')
Gan_45[["Dx"]] = c('AD')
Gan_45[["LBD"]] = c('N/A')
Gan_45[["Braak"]] = c('5')
Gan_45[["Thal"]] = c('N/A')
Gan_45[["TDP.43"]] = c('N/A')
Gan_45[["ClinicalDx"]] = c('Probable Alzheimers Disease')
Gan_45[["APOE"]] = c('E4E4')
Gan_45[["Age"]] = c('72')
Gan_45[["Age_Onset"]] = c('66')
Gan_45[["Sex"]] = c('F')
Gan_45[["PMI"]] = c('5')
Gan_45[["INDDID"]] = c('102475')
rm(Gan_45.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_45[["percent.mt"]] <- PercentageFeatureSet(object = Gan_45, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_46.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_46/outs/filtered_feature_bc_matrix")
Gan_46 <- CreateSeuratObject(counts = Gan_46.counts, project = "AD_WT_E3E3_M", min.cells = 3, min.features = 200)
Gan_46[["Condition"]] = c('AD_WT_E3E3_M')
Gan_46[["Condition_1"]] = c('WT_E3E3')
Gan_46[["Condition_2"]] = c('WT_M')
Gan_46[["Condition_3"]] = c('E3E3_M')
Gan_46[["TREM2"]] = c('WT')
Gan_46[["Dx"]] = c('AD')
Gan_46[["LBD"]] = c('N/A')
Gan_46[["Braak"]] = c('5')
Gan_46[["Thal"]] = c('N/A')
Gan_46[["TDP.43"]] = c('N/A')
Gan_46[["ClinicalDx"]] = c('Dementia of undetermined etiology')
Gan_46[["APOE"]] = c('E3E3')
Gan_46[["Age"]] = c('82')
Gan_46[["Age_Onset"]] = c('74')
Gan_46[["Sex"]] = c('M')
Gan_46[["PMI"]] = c('39')
Gan_46[["INDDID"]] = c('107928')
rm(Gan_46.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_46[["percent.mt"]] <- PercentageFeatureSet(object = Gan_46, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_47.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_47/outs/filtered_feature_bc_matrix")
Gan_47 <- CreateSeuratObject(counts = Gan_47.counts, project = "AD_WT_E3E4_M_1", min.cells = 3, min.features = 200)
Gan_47[["Condition"]] = c('AD_WT_E3E4_M')
Gan_47[["Condition_1"]] = c('WT_E3E4')
Gan_47[["Condition_2"]] = c('WT_M')
Gan_47[["Condition_3"]] = c('E3E4_M')
Gan_47[["TREM2"]] = c('WT')
Gan_47[["Dx"]] = c('AD')
Gan_47[["LBD"]] = c('N/A')
Gan_47[["Braak"]] = c('6')
Gan_47[["Thal"]] = c('N/A')
Gan_47[["TDP.43"]] = c('N/A')
Gan_47[["ClinicalDx"]] = c('Probable Alzheimers Disease')
Gan_47[["APOE"]] = c('E3E4')
Gan_47[["Age"]] = c('64')
Gan_47[["Age_Onset"]] = c('60')
Gan_47[["Sex"]] = c('M')
Gan_47[["PMI"]] = c('6')
Gan_47[["INDDID"]] = c('122417')
rm(Gan_47.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_47[["percent.mt"]] <- PercentageFeatureSet(object = Gan_47, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_48.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_48/outs/filtered_feature_bc_matrix")
Gan_48 <- CreateSeuratObject(counts = Gan_48.counts, project = "AD_WT_E3E4_M_2", min.cells = 3, min.features = 200)
Gan_48[["Condition"]] = c('AD_WT_E3E4_M')
Gan_48[["Condition_1"]] = c('WT_E3E4')
Gan_48[["Condition_2"]] = c('WT_M')
Gan_48[["Condition_3"]] = c('E3E4_M')
Gan_48[["TREM2"]] = c('WT')
Gan_48[["Dx"]] = c('AD')
Gan_48[["LBD"]] = c('N/A')
Gan_48[["Braak"]] = c('6')
Gan_48[["Thal"]] = c('N/A')
Gan_48[["TDP.43"]] = c('N/A')
Gan_48[["ClinicalDx"]] = c('Probable Alzheimers Disease')
Gan_48[["APOE"]] = c('E3E4')
Gan_48[["Age"]] = c('71')
Gan_48[["Age_Onset"]] = c('62')
Gan_48[["Sex"]] = c('M')
Gan_48[["PMI"]] = c('4.5')
Gan_48[["INDDID"]] = c('111593')
rm(Gan_48.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_48[["percent.mt"]] <- PercentageFeatureSet(object = Gan_48, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_49.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_49/outs/filtered_feature_bc_matrix")
Gan_49 <- CreateSeuratObject(counts = Gan_49.counts, project = "AD_WT_E3E4_M_3", min.cells = 3, min.features = 200)
Gan_49[["Condition"]] = c('AD_WT_E3E4_M')
Gan_49[["Condition_1"]] = c('WT_E3E4')
Gan_49[["Condition_2"]] = c('WT_M')
Gan_49[["Condition_3"]] = c('E3E4_M')
Gan_49[["TREM2"]] = c('WT')
Gan_49[["Dx"]] = c('AD')
Gan_49[["LBD"]] = c('N/A')
Gan_49[["Braak"]] = c('6')
Gan_49[["Thal"]] = c('N/A')
Gan_49[["TDP.43"]] = c('N/A')
Gan_49[["ClinicalDx"]] = c('Probable Alzheimers Disease')
Gan_49[["APOE"]] = c('E3E4')
Gan_49[["Age"]] = c('78')
Gan_49[["Age_Onset"]] = c('64')
Gan_49[["Sex"]] = c('M')
Gan_49[["PMI"]] = c('6.5')
Gan_49[["INDDID"]] = c('107833')
rm(Gan_49.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_49[["percent.mt"]] <- PercentageFeatureSet(object = Gan_49, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_50.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_50/outs/filtered_feature_bc_matrix")
Gan_50 <- CreateSeuratObject(counts = Gan_50.counts, project = "AD_WT_E4E4_M", min.cells = 3, min.features = 200)
Gan_50[["Condition"]] = c('AD_WT_E4E4_M')
Gan_50[["Condition_1"]] = c('WT_E4E4')
Gan_50[["Condition_2"]] = c('WT_M')
Gan_50[["Condition_3"]] = c('E4E4_M')
Gan_50[["TREM2"]] = c('WT')
Gan_50[["Dx"]] = c('AD')
Gan_50[["LBD"]] = c('N/A')
Gan_50[["Braak"]] = c('6')
Gan_50[["Thal"]] = c('N/A')
Gan_50[["TDP.43"]] = c('N/A')
Gan_50[["ClinicalDx"]] = c('Probable Alzheimers Disease')
Gan_50[["APOE"]] = c('E4E4')
Gan_50[["Age"]] = c('78')
Gan_50[["Age_Onset"]] = c('70')
Gan_50[["Sex"]] = c('M')
Gan_50[["PMI"]] = c('6')
Gan_50[["INDDID"]] = c('100436')
rm(Gan_50.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_50[["percent.mt"]] <- PercentageFeatureSet(object = Gan_50, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_51.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_51/outs/filtered_feature_bc_matrix")
Gan_51 <- CreateSeuratObject(counts = Gan_51.counts, project = "AD_R47H_E2E4_F", min.cells = 3, min.features = 200)
Gan_51[["Condition"]] = c('AD_R47H_E2E4_F')
Gan_51[["Condition_1"]] = c('R47H_E2E4')
Gan_51[["Condition_2"]] = c('R47H_F')
Gan_51[["Condition_3"]] = c('E2E4_F')
Gan_51[["TREM2"]] = c('R47H')
Gan_51[["Dx"]] = c('AD')
Gan_51[["LBD"]] = c('N/A')
Gan_51[["Braak"]] = c('6')
Gan_51[["Thal"]] = c('N/A')
Gan_51[["TDP.43"]] = c('N/A')
Gan_51[["ClinicalDx"]] = c('PPA_Logopenic')
Gan_51[["APOE"]] = c('E2E4')
Gan_51[["Age"]] = c('71')
Gan_51[["Age_Onset"]] = c('62')
Gan_51[["Sex"]] = c('F')
Gan_51[["PMI"]] = c('4.5')
Gan_51[["INDDID"]] = c('107342')
rm(Gan_51.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_51[["percent.mt"]] <- PercentageFeatureSet(object = Gan_51, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_52.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_52/outs/filtered_feature_bc_matrix")
Gan_52 <- CreateSeuratObject(counts = Gan_52.counts, project = "AD_R47H_E3E3_F", min.cells = 3, min.features = 200)
Gan_52[["Condition"]] = c('AD_R47H_E3E3_F')
Gan_52[["Condition_1"]] = c('R47H_E3E3')
Gan_52[["Condition_2"]] = c('R47H_F')
Gan_52[["Condition_3"]] = c('E3E3_F')
Gan_52[["TREM2"]] = c('R47H')
Gan_52[["Dx"]] = c('AD_ALS')
Gan_52[["LBD"]] = c('N/A')
Gan_52[["Braak"]] = c('5')
Gan_52[["Thal"]] = c('N/A')
Gan_52[["TDP.43"]] = c('N/A')
Gan_52[["ClinicalDx"]] = c('Amyotrophic lateral sclerosis')
Gan_52[["APOE"]] = c('E3E3')
Gan_52[["Age"]] = c('87')
Gan_52[["Age_Onset"]] = c('63')
Gan_52[["Sex"]] = c('F')
Gan_52[["PMI"]] = c('4')
Gan_52[["INDDID"]] = c('107518')
rm(Gan_52.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_52[["percent.mt"]] <- PercentageFeatureSet(object = Gan_52, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_54.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_54/outs/filtered_feature_bc_matrix")
Gan_54 <- CreateSeuratObject(counts = Gan_54.counts, project = "AD_R47H_E3E3_M", min.cells = 3, min.features = 200)
Gan_54[["Condition"]] = c('AD_R47H_E3E3_M')
Gan_54[["Condition_1"]] = c('R47H_E3E3')
Gan_54[["Condition_2"]] = c('R47H_M')
Gan_54[["Condition_3"]] = c('E3E3_M')
Gan_54[["TREM2"]] = c('R47H')
Gan_54[["Dx"]] = c('AD')
Gan_54[["LBD"]] = c('N/A')
Gan_54[["Braak"]] = c('5')
Gan_54[["Thal"]] = c('N/A')
Gan_54[["TDP.43"]] = c('N/A')
Gan_54[["ClinicalDx"]] = c('Cerebrovascular disease')
Gan_54[["APOE"]] = c('E3E3')
Gan_54[["Age"]] = c('82')
Gan_54[["Age_Onset"]] = c('70')
Gan_54[["Sex"]] = c('M')
Gan_54[["PMI"]] = c('9')
Gan_54[["INDDID"]] = c('104332')
rm(Gan_54.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_54[["percent.mt"]] <- PercentageFeatureSet(object = Gan_54, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_55.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_55/outs/filtered_feature_bc_matrix")
Gan_55 <- CreateSeuratObject(counts = Gan_55.counts, project = "AD_R47H_E3E4_M_1", min.cells = 3, min.features = 200)
Gan_55[["Condition"]] = c('AD_R47H_E3E4_M')
Gan_55[["Condition_1"]] = c('R47H_E3E4')
Gan_55[["Condition_2"]] = c('R47H_M')
Gan_55[["Condition_3"]] = c('E3E4_M')
Gan_55[["TREM2"]] = c('R47H')
Gan_55[["Dx"]] = c('AD')
Gan_55[["LBD"]] = c('N/A')
Gan_55[["Braak"]] = c('6')
Gan_55[["Thal"]] = c('N/A')
Gan_55[["TDP.43"]] = c('N/A')
Gan_55[["ClinicalDx"]] = c('Probable Alzheimers Disease')
Gan_55[["APOE"]] = c('E3E4')
Gan_55[["Age"]] = c('77')
Gan_55[["Age_Onset"]] = c('68')
Gan_55[["Sex"]] = c('M')
Gan_55[["PMI"]] = c('14')
Gan_55[["INDDID"]] = c('113755')
rm(Gan_55.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_55[["percent.mt"]] <- PercentageFeatureSet(object = Gan_55, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_56.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_56/outs/filtered_feature_bc_matrix")
Gan_56 <- CreateSeuratObject(counts = Gan_56.counts, project = "AD_R47H_E3E4_M_2", min.cells = 3, min.features = 200)
Gan_56[["Condition"]] = c('AD_R47H_E3E4_M')
Gan_56[["Condition_1"]] = c('R47H_E3E4')
Gan_56[["Condition_2"]] = c('R47H_M')
Gan_56[["Condition_3"]] = c('E3E4_M')
Gan_56[["TREM2"]] = c('R47H')
Gan_56[["Dx"]] = c('AD')
Gan_56[["LBD"]] = c('N/A')
Gan_56[["Braak"]] = c('6')
Gan_56[["Thal"]] = c('N/A')
Gan_56[["TDP.43"]] = c('N/A')
Gan_56[["ClinicalDx"]] = c('PPA_Logopenic')
Gan_56[["APOE"]] = c('E3E4')
Gan_56[["Age"]] = c('60')
Gan_56[["Age_Onset"]] = c('56')
Gan_56[["Sex"]] = c('M')
Gan_56[["PMI"]] = c('12')
Gan_56[["INDDID"]] = c('100957')
rm(Gan_56.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_56[["percent.mt"]] <- PercentageFeatureSet(object = Gan_56, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_59.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_59/outs/filtered_feature_bc_matrix")
Gan_59 <- CreateSeuratObject(counts = Gan_59.counts, project = "Non_WT_E2E3_F_1", min.cells = 3, min.features = 200)
Gan_59[["Condition"]] = c('Non_WT_E2E3_F')
Gan_59[["Condition_1"]] = c('WT_E2E3')
Gan_59[["Condition_2"]] = c('WT_F')
Gan_59[["Condition_3"]] = c('E2E3_F')
Gan_59[["TREM2"]] = c('WT')
Gan_59[["Dx"]] = c('Normal')
Gan_59[["LBD"]] = c('N/A')
Gan_59[["Braak"]] = c('1')
Gan_59[["Thal"]] = c('N/A')
Gan_59[["TDP.43"]] = c('N/A')
Gan_59[["ClinicalDx"]] = c('N/A')
Gan_59[["APOE"]] = c('E2E3')
Gan_59[["Age"]] = c('72')
Gan_59[["Age_Onset"]] = c('N/A')
Gan_59[["Sex"]] = c('F')
Gan_59[["PMI"]] = c('6')
Gan_59[["INDDID"]] = c('119534')
rm(Gan_59.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_59[["percent.mt"]] <- PercentageFeatureSet(object = Gan_59, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_60.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_60/outs/filtered_feature_bc_matrix")
Gan_60 <- CreateSeuratObject(counts = Gan_60.counts, project = "Non_WT_E2E3_F_2", min.cells = 3, min.features = 200)
Gan_60[["Condition"]] = c('Non_WT_E2E3_F')
Gan_60[["Condition_1"]] = c('WT_E2E3')
Gan_60[["Condition_2"]] = c('WT_F')
Gan_60[["Condition_3"]] = c('E2E3_F')
Gan_60[["TREM2"]] = c('WT')
Gan_60[["Dx"]] = c('Normal')
Gan_60[["LBD"]] = c('N/A')
Gan_60[["Braak"]] = c('1')
Gan_60[["Thal"]] = c('N/A')
Gan_60[["TDP.43"]] = c('N/A')
Gan_60[["ClinicalDx"]] = c('N/A')
Gan_60[["APOE"]] = c('E2E3')
Gan_60[["Age"]] = c('83')
Gan_60[["Age_Onset"]] = c('N/A')
Gan_60[["Sex"]] = c('F')
Gan_60[["PMI"]] = c('3')
Gan_60[["INDDID"]] = c('112090')
rm(Gan_60.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_60[["percent.mt"]] <- PercentageFeatureSet(object = Gan_60, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_61.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_61/outs/filtered_feature_bc_matrix")
Gan_61 <- CreateSeuratObject(counts = Gan_61.counts, project = "Non_WT_NA_F", min.cells = 3, min.features = 200)
Gan_61[["Condition"]] = c('Non_WT_NA_F')
Gan_61[["Condition_1"]] = c('WT_NA')
Gan_61[["Condition_2"]] = c('WT_F')
Gan_61[["Condition_3"]] = c('NA_F')
Gan_61[["TREM2"]] = c('WT')
Gan_61[["Dx"]] = c('Normal')
Gan_61[["LBD"]] = c('N/A')
Gan_61[["Braak"]] = c('2')
Gan_61[["Thal"]] = c('N/A')
Gan_61[["TDP.43"]] = c('N/A')
Gan_61[["ClinicalDx"]] = c('N/A')
Gan_61[["APOE"]] = c('NA')
Gan_61[["Age"]] = c('75')
Gan_61[["Age_Onset"]] = c('N/A')
Gan_61[["Sex"]] = c('F')
Gan_61[["PMI"]] = c('12')
Gan_61[["INDDID"]] = c('125061')
rm(Gan_61.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_61[["percent.mt"]] <- PercentageFeatureSet(object = Gan_61, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_62.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_62/outs/filtered_feature_bc_matrix")
Gan_62 <- CreateSeuratObject(counts = Gan_62.counts, project = "Non_WT_E2E3_M_1", min.cells = 3, min.features = 200)
Gan_62[["Condition"]] = c('Non_WT_E2E3_M')
Gan_62[["Condition_1"]] = c('WT_E2E3')
Gan_62[["Condition_2"]] = c('WT_M')
Gan_62[["Condition_3"]] = c('E2E3_M')
Gan_62[["TREM2"]] = c('WT')
Gan_62[["Dx"]] = c('Normal')
Gan_62[["LBD"]] = c('N/A')
Gan_62[["Braak"]] = c('0')
Gan_62[["Thal"]] = c('N/A')
Gan_62[["TDP.43"]] = c('N/A')
Gan_62[["ClinicalDx"]] = c('N/A')
Gan_62[["APOE"]] = c('E2E3')
Gan_62[["Age"]] = c('68')
Gan_62[["Age_Onset"]] = c('N/A')
Gan_62[["Sex"]] = c('M')
Gan_62[["PMI"]] = c('14')
Gan_62[["INDDID"]] = c('118709')
rm(Gan_62.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_62[["percent.mt"]] <- PercentageFeatureSet(object = Gan_62, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_63.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_63/outs/filtered_feature_bc_matrix")
Gan_63 <- CreateSeuratObject(counts = Gan_63.counts, project = "Non_WT_E2E3_M_2", min.cells = 3, min.features = 200)
Gan_63[["Condition"]] = c('Non_WT_E2E3_M')
Gan_63[["Condition_1"]] = c('WT_E2E3')
Gan_63[["Condition_2"]] = c('WT_M')
Gan_63[["Condition_3"]] = c('E2E3_M')
Gan_63[["TREM2"]] = c('WT')
Gan_63[["Dx"]] = c('Normal')
Gan_63[["LBD"]] = c('N/A')
Gan_63[["Braak"]] = c('1')
Gan_63[["Thal"]] = c('N/A')
Gan_63[["TDP.43"]] = c('N/A')
Gan_63[["ClinicalDx"]] = c('N/A')
Gan_63[["APOE"]] = c('E2E3')
Gan_63[["Age"]] = c('72')
Gan_63[["Age_Onset"]] = c('N/A')
Gan_63[["Sex"]] = c('M')
Gan_63[["PMI"]] = c('17')
Gan_63[["INDDID"]] = c('120927')
rm(Gan_63.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_63[["percent.mt"]] <- PercentageFeatureSet(object = Gan_63, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_64.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_64/outs/filtered_feature_bc_matrix")
Gan_64 <- CreateSeuratObject(counts = Gan_64.counts, project = "Non_WT_E2E4_M", min.cells = 3, min.features = 200)
Gan_64[["Condition"]] = c('Non_WT_E2E4_M')
Gan_64[["Condition_1"]] = c('WT_E2E4')
Gan_64[["Condition_2"]] = c('WT_M')
Gan_64[["Condition_3"]] = c('E2E4_M')
Gan_64[["TREM2"]] = c('WT')
Gan_64[["Dx"]] = c('Normal')
Gan_64[["LBD"]] = c('N/A')
Gan_64[["Braak"]] = c('0')
Gan_64[["Thal"]] = c('N/A')
Gan_64[["TDP.43"]] = c('N/A')
Gan_64[["ClinicalDx"]] = c('N/A')
Gan_64[["APOE"]] = c('E2E4')
Gan_64[["Age"]] = c('61')
Gan_64[["Age_Onset"]] = c('N/A')
Gan_64[["Sex"]] = c('M')
Gan_64[["PMI"]] = c('6')
Gan_64[["INDDID"]] = c('100786')
rm(Gan_64.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_64[["percent.mt"]] <- PercentageFeatureSet(object = Gan_64, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_65.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_65/outs/filtered_feature_bc_matrix")
Gan_65 <- CreateSeuratObject(counts = Gan_65.counts, project = "Non_WT_E3E3_M_1", min.cells = 3, min.features = 200)
Gan_65[["Condition"]] = c('Non_WT_E3E3_M')
Gan_65[["Condition_1"]] = c('WT_E3E3')
Gan_65[["Condition_2"]] = c('WT_M')
Gan_65[["Condition_3"]] = c('E3E3_M')
Gan_65[["TREM2"]] = c('WT')
Gan_65[["Dx"]] = c('Normal')
Gan_65[["LBD"]] = c('N/A')
Gan_65[["Braak"]] = c('1')
Gan_65[["Thal"]] = c('N/A')
Gan_65[["TDP.43"]] = c('N/A')
Gan_65[["ClinicalDx"]] = c('N/A')
Gan_65[["APOE"]] = c('E3E3')
Gan_65[["Age"]] = c('75')
Gan_65[["Age_Onset"]] = c('N/A')
Gan_65[["Sex"]] = c('M')
Gan_65[["PMI"]] = c('17')
Gan_65[["INDDID"]] = c('113464')
rm(Gan_65.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_65[["percent.mt"]] <- PercentageFeatureSet(object = Gan_65, pattern = "^MT-") #recognize mitochondrial transcripts
#for loading Cell Ranger counts:
Gan_66.counts <- Read10X(data.dir = "/athena/ganlab/scratch/lif4001/Human_UPenn/cellranger/Gan_66/outs/filtered_feature_bc_matrix")
Gan_66 <- CreateSeuratObject(counts = Gan_66.counts, project = "Non_WT_E3E3_M_2", min.cells = 3, min.features = 200)
Gan_66[["Condition"]] = c('Non_WT_E3E3_M')
Gan_66[["Condition_1"]] = c('WT_E3E3')
Gan_66[["Condition_2"]] = c('WT_M')
Gan_66[["Condition_3"]] = c('E3E3_M')
Gan_66[["TREM2"]] = c('WT')
Gan_66[["Dx"]] = c('Normal')
Gan_66[["LBD"]] = c('N/A')
Gan_66[["Braak"]] = c('2')
Gan_66[["Thal"]] = c('N/A')
Gan_66[["TDP.43"]] = c('N/A')
Gan_66[["ClinicalDx"]] = c('N/A')
Gan_66[["APOE"]] = c('E3E3')
Gan_66[["Age"]] = c('83')
Gan_66[["Age_Onset"]] = c('N/A')
Gan_66[["Sex"]] = c('M')
Gan_66[["PMI"]] = c('6')
Gan_66[["INDDID"]] = c('119767')
rm(Gan_66.counts)
#vizualize QC metrics and filtering====
#mitochondrial transcripts - if the cell has high mitochondrial transcripts, it may signal a cell under stress/unhealthy
Gan_66[["percent.mt"]] <- PercentageFeatureSet(object = Gan_66, pattern = "^MT-") #recognize mitochondrial transcripts
setwd("/athena/ganlab/scratch/lif4001/Human_UPenn/data_analysis/DF_2ndRound")
###############################################################################################
all <- Gan_43
pdf("Gan_43_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_43_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_43_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_43_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#saveRDS(all,"Gan_43_PCA.rds") #it's good to save your R object periodically so you can start from this object without having to go through the processing steps again.
#all<-readRDS("Gan_43_PCA.rds")
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_43_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.046*6582) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.24, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.24, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.24_303", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_43_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_43_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.24_303" #visualizing the singlet vs doublet cells
pdf("Gan_43_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_43_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_43_singlets.rds")
singlets<-readRDS("Gan_43_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_43_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_43_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_43_singlets_PCA.rds")
###############################################################################################
all <- Gan_44
pdf("Gan_44_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_44_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_44_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_44_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#saveRDS(all,"Gan_44_PCA.rds") #it's good to save your R object periodically so you can start from this object without having to go through the processing steps again.
#all<-readRDS("Gan_44_PCA.rds")
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_44_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.046*6516) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.01, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.01, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.01_300", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_44_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_44_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.01_300" #visualizing the singlet vs doublet cells
pdf("Gan_44_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_44_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_44_singlets.rds")
singlets<-readRDS("Gan_44_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_44_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_44_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_44_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_45
pdf("Gan_45_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_45_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_45_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_45_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#saveRDS(all,"Gan_45_PCA.rds") #it's good to save your R object periodically so you can start from this object without having to go through the processing steps again.
#all<-readRDS("Gan_45_PCA.rds")
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_45_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.046*6648) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.27, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.27, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.27_306", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_45_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_45_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.27_306" #visualizing the singlet vs doublet cells
pdf("Gan_45_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_45_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_45_singlets.rds")
singlets<-readRDS("Gan_45_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_45_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_45_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_45_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_46
pdf("Gan_46_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_46_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_46_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_46_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_46_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.039*5649) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.005, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.005, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.005_220", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_46_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_46_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.005_220" #visualizing the singlet vs doublet cells
pdf("Gan_46_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_46_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_46_singlets.rds")
singlets<-readRDS("Gan_46_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_46_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_46_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_46_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_47
pdf("Gan_47_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_47_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_47_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_47_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_47_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.061*8564) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.03, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.03, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.03_522", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_47_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_47_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.03_522" #visualizing the singlet vs doublet cells
pdf("Gan_47_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_47_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_47_singlets.rds")
singlets<-readRDS("Gan_47_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_47_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_47_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_47_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_48
pdf("Gan_48_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_48_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_48_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_48_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_48_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.054*7751) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.16, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.16, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.16_419", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_48_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_48_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.16_419" #visualizing the singlet vs doublet cells
pdf("Gan_48_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_48_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_48_singlets.rds")
singlets<-readRDS("Gan_48_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_48_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_48_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_48_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_49
pdf("Gan_49_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_49_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_49_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_49_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_49_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.039*5548) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.25, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.25, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.25_216", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_49_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_49_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.25_216" #visualizing the singlet vs doublet cells
pdf("Gan_49_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_49_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_49_singlets.rds")
singlets<-readRDS("Gan_49_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_49_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_49_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_49_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_50
pdf("Gan_50_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_50_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_50_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_50_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_50_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.039*5488) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.005, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.005, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.005_214", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_50_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_50_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.005_214" #visualizing the singlet vs doublet cells
pdf("Gan_50_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_50_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_50_singlets.rds")
singlets<-readRDS("Gan_50_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_50_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_50_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_50_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_51
pdf("Gan_51_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_51_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_51_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_51_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_51_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.061*8028) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.11, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.11, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.11_490", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_51_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_51_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.11_490" #visualizing the singlet vs doublet cells
pdf("Gan_51_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_51_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_51_singlets.rds")
singlets<-readRDS("Gan_51_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_51_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_51_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_51_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_52
pdf("Gan_52_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_52_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_52_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_52_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_52_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.054*7528) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.01, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.01, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.01_407", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_52_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_52_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.01_407" #visualizing the singlet vs doublet cells
pdf("Gan_52_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_52_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_52_singlets.rds")
singlets<-readRDS("Gan_52_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_52_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_52_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_52_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_54
pdf("Gan_54_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_54_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_54_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_54_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_54_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.054*7219) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.22, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.22, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.22_390", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_54_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_54_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.22_390" #visualizing the singlet vs doublet cells
pdf("Gan_54_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_54_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_54_singlets.rds")
singlets<-readRDS("Gan_54_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_54_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_54_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_54_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_55
pdf("Gan_55_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_55_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_55_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_55_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_55_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.054*7132) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.01, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.01, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.01_385", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_55_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_55_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.01_385" #visualizing the singlet vs doublet cells
pdf("Gan_55_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_55_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_55_singlets.rds")
singlets<-readRDS("Gan_55_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_55_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_55_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_55_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_56
pdf("Gan_56_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_56_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_56_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_56_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_56_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.023*3681) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.3, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.3, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.3_85", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_56_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_56_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.3_85" #visualizing the singlet vs doublet cells
pdf("Gan_56_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_56_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_56_singlets.rds")
singlets<-readRDS("Gan_56_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_56_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_56_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_56_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_59
pdf("Gan_59_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_59_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_59_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_59_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_59_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.054*7225) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.24, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.24, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.24_390", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_59_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_59_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.24_390" #visualizing the singlet vs doublet cells
pdf("Gan_59_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_59_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_59_singlets.rds")
singlets<-readRDS("Gan_59_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_59_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_59_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_59_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_60
pdf("Gan_60_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_60_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_60_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_60_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_60_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.061*8735) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.005, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.005, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.005_533", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_60_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_60_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.005_533" #visualizing the singlet vs doublet cells
pdf("Gan_60_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_60_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_60_singlets.rds")
singlets<-readRDS("Gan_60_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_60_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_60_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_60_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_61
pdf("Gan_61_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_61_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_61_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_61_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_61_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.023*3119) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.02, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.02, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.02_72", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_61_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_61_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.02_72" #visualizing the singlet vs doublet cells
pdf("Gan_61_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_61_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_61_singlets.rds")
singlets<-readRDS("Gan_61_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_61_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_61_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_61_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_62
pdf("Gan_62_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_62_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_62_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_62_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_62_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.061*8825) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.005, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.005, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.005_538", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_62_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_62_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.005_538" #visualizing the singlet vs doublet cells
pdf("Gan_62_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_62_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_62_singlets.rds")
singlets<-readRDS("Gan_62_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_62_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_62_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_62_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_63
pdf("Gan_63_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_63_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_63_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_63_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_63_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.046*6150) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.01, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.01, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.01_283", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_63_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_63_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.01_283" #visualizing the singlet vs doublet cells
pdf("Gan_63_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_63_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_63_singlets.rds")
singlets<-readRDS("Gan_63_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_63_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_63_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_63_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_64
pdf("Gan_64_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_64_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_64_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_64_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_64_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.061*8047) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.005, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.005, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.005_491", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_64_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_64_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.005_491" #visualizing the singlet vs doublet cells
pdf("Gan_64_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_64_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_64_singlets.rds")
singlets<-readRDS("Gan_64_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_64_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_64_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_64_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_65
pdf("Gan_65_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_65_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_65_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_65_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_65_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.054*7107) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.005, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.005, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.005_384", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_65_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_65_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.005_384" #visualizing the singlet vs doublet cells
pdf("Gan_65_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_65_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_65_singlets.rds")
singlets<-readRDS("Gan_65_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_65_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_65_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_65_singlets_PCA.rds")
###############################################################################################
###############################################################################################
all <- Gan_66
pdf("Gan_66_QC.pdf", width=12, height=4)
VlnPlot(object = all, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3,pt.size=0)
dev.off()
#plot of correlation between number of genes detected and number of transcripts detected - you generally want this to be 1:1
pdf("Gan_66_FeatureScatter.pdf", width=12, height=4)
plot1 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(all, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
#initial filtering step - usually you want cells with detected gene counts over 300 and mitochondrial transcripts below 5%
all <- subset(x = all, subset = nFeature_RNA > 300 & nFeature_RNA < 9000 & nCount_RNA < 50000 & percent.mt < 5)
#normalize counts=====
all <- NormalizeData(all, normalization.method = "LogNormalize", scale.factor = 10000)
all <- FindVariableFeatures(all, selection.method = "vst", nfeatures = 2000)
all <- ScaleData(object = all)
all <- RunPCA(object = all, features = rownames(x = all), verbose = FALSE)
pdf("Gan_66_Elbow_1.pdf", width=8, height=6)
ElbowPlot(all)
dev.off()
all <- FindNeighbors(all, dims = 1:15)
all <- FindClusters(all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_66_UMAP.pdf", width=8, height=6)
DimPlot(all, reduction = "umap", label = T)
dev.off()
#Doublet finder (no ground-truth) - please reference https://github.com/chris-mcginnis-ucsf/DoubletFinder for more information on parameters====
sweep.pbmc <- paramSweep_v3(all,PCs=1:15,sct=FALSE)
sweep.stats_pbmc <- summarizeSweep(sweep.pbmc,GT=FALSE)
bcmvn_pbmc <- find.pK(sweep.stats_pbmc)
pdf("Gan_66_ggplot_pK.pdf", width=18, height=6)
ggplot(bcmvn_pbmc, aes(x=bcmvn_pbmc$pK, y=bcmvn_pbmc$BCmetric))+geom_bar(stat="identity") #look for pK at the initial peak
dev.off()
length(all@meta.data$seurat_clusters)
#homotypic doublet proportion estimate
annotations <- all@meta.data$seurat_clusters
homotypic.prop <- modelHomotypic(annotations)
nExp_poi <- round(0.061*8121) #estimate the number of multiplets you expect from the kit you are using - should give you percent expected based on number of nuclei inputs
nExp_poi.adj <- round(nExp_poi*(1-homotypic.prop))
#doublet finder with different classification stringencies
all <- doubletFinder_v3(all, PCs=1:15, pN=0.25, pK=0.22, nExp=nExp_poi, reuse.pANN = FALSE,sct=FALSE)
all <- doubletFinder_v3(all, PCs = 1:15, pN = 0.25, pK = 0.22, nExp = nExp_poi.adj, reuse.pANN = "pANN_0.25_0.22_495", sct = FALSE)
#visualizing clusters and multiplet cells====
pdf("Gan_66_Elbow_2.pdf", width=8, height=6)
ElbowPlot(all,ndims=50)
dev.off()
all <- FindNeighbors(object = all, dims = 1:15)
all <- FindClusters(object = all, resolution = 0.1)
all <- RunUMAP(all, dims = 1:15)
pdf("Gan_66_UMAP_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = T)
dev.off()
Idents(object = all) <- "DF.classifications_0.25_0.22_495" #visualizing the singlet vs doublet cells
pdf("Gan_66_3_UMAP_singlets_doublets_2.pdf", width=8, height=6)
DimPlot(object = all, reduction = 'umap', label = F)
dev.off()
saveRDS(all,"Gan_66_after_doublet_detection.rds")
#processing singlets ====
#remove doublets
singlets <- subset(all, idents=c("Singlet"))
rm(all)
saveRDS(singlets,"Gan_66_singlets.rds")
singlets<-readRDS("Gan_66_singlets.rds")
Idents(singlets) <- "seurat_clusters"
#normalization
singlets <- NormalizeData(singlets, normalization.method = "LogNormalize", scale.factor = 10000)
singlets <- FindVariableFeatures(singlets, selection.method = "vst", nfeatures = 2000)
#scaling the data
singlets <- ScaleData(object = singlets)
#perform and visualize PCA
singlets <- RunPCA(object = singlets, features = rownames(x = singlets), verbose = FALSE)
#PC capture
pdf("Gan_66_Elbow_after_processing.pdf", width=8, height=6)
ElbowPlot(singlets,ndims=50)
dev.off()
singlets <- FindNeighbors(object = singlets, dims = 1:15)
singlets <- FindClusters(object = singlets, resolution = 0.1)
singlets <- RunUMAP(object = singlets, dims = 1:15)
pdf("Gan_66_UMAP_singlets_after_processing.pdf", width=8, height=6)
DimPlot(object = singlets, reduction = 'umap', label = T)
dev.off()
saveRDS(singlets,"Gan_66_singlets_PCA.rds")
###############################################################################################
###############################################################################################
|
4807e873126711ebe328636936028407a7dbdadf
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sampSurf/examples/gridCellEnhance.Rd.R
|
efa523b949d22f6f2f7ccbe0cea94b24f5ace9d3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 517
|
r
|
gridCellEnhance.Rd.R
|
library(sampSurf)
### Name: gridCellEnhance
### Title: Add Grid Lines and Centers in Package "sampSurf"
### Aliases: gridCellEnhance
### Keywords: ~kwd1 ~kwd2
### ** Examples
#
# apply it after plotting a sampling surface...
#
tr = Tract(c(x=50,y=50), cellSize=0.5)
btr = bufferedTract(10, tr)
ssSA = sampSurf(2, btr, iZone = 'sausageIZ', plotRadius=3,
buttDiam=c(20,40))
plot(ssSA, axes=TRUE)
gridCellEnhance(ssSA@tract, gridLines=TRUE)
#
# on a tract only...
#
plot(btr, axes=TRUE, gridCenters=TRUE)
|
cf68f2a73b8ca68acd8dcbd657da0698cf4aea17
|
6c6637636c4856ae9fa0e2f87596f6920e5e3f1f
|
/R/get_site_coordinates.R
|
3db726f7f634c9fd281bf8852926187cb9a4265d
|
[] |
no_license
|
LimnoTech/SWMPrExtension
|
177e8f7e34f4909a56b40e56320c3bbe8a610754
|
bfc36ce765dbc7d8ed7a6d39675dd61f3dda3f33
|
refs/heads/master
| 2020-03-21T23:17:46.758854
| 2018-06-27T18:21:01
| 2018-06-27T18:21:01
| 94,801,603
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,392
|
r
|
get_site_coordinates.R
|
#' Identify NERRS sampling locations from metadata
#'
#' Identify the latitude/longitude for sampling stations based on the metadata in the data file
#'
#' @param data.file location of data
#' @param active logical. Only return active stations?
#'
#' @importFrom dplyr group_by summarise
#' @importFrom magrittr "%>%"
#' @importFrom rlang .data
#' @importFrom SWMPr site_codes
#'
#' @export
#'
#' @details This function is intended for internal use with the NERRS reserve level reporting scripts. It returns the names, station codes, and coordinates associated with the data in the user-specified data folder.
#'
#' @author Julie Padilla
#'
#' @concept reporting
#'
#' @return Returns a dataframe of station ids, station names, lat/long
#'
get_site_coordinates <- function(data.file, active = TRUE){
if (active){
res_data <- SWMPr::site_codes()
res_data <- res_data[res_data$nerr_site_id == get_site_code(data.file) & res_data$status == 'Active', ]
}else{
res_data <- SWMPr::site_codes()
res_data <- res_data[res_data$nerr_site_id == get_site_code(data.file), ]
}
sites <- res_data %>%
dplyr::group_by(.data$nerr_site_id, .data$station_name
, .data$latitude, .data$longitude) %>%
dplyr::summarise()
sites$latitude <- as.numeric(as.character(sites$latitude))
sites$longitude <- -as.numeric(as.character(sites$longitude))
return(sites)
}
|
20941d80ad2e6f61de75bc9e864bf9c25ca02442
|
26cdd50a58304f4290ab1180b5b835b868f67f39
|
/man/feor93_08_crosswalk.Rd
|
b3e033af710b1e72810b1601a837641fa5151149
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
svraka/statoszt
|
148b7612b212de27dc0b9030f87cd546e8944ec3
|
aab2a0ae24e24bb98c7034d2896315aed11e82d4
|
refs/heads/master
| 2021-07-10T06:44:35.171332
| 2020-10-13T17:03:44
| 2020-10-13T17:03:44
| 202,533,643
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 405
|
rd
|
feor93_08_crosswalk.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{feor93_08_crosswalk}
\alias{feor93_08_crosswalk}
\title{FEOR-93 to FEOR-08 crosswalk}
\format{
An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 817 rows and 2 columns.
}
\usage{
feor93_08_crosswalk
}
\description{
FEOR-93 to FEOR-08 crosswalk
}
\keyword{datasets}
|
2e916245b6175733e0da77960f219c30fefd05fb
|
ba13db93987c2b676e31def1c810cf2ffd53e59b
|
/plot2.R
|
862dc3bee458ec8ec8874d5b3f880b9852c78239
|
[] |
no_license
|
n-pandey/ExData_Plotting1
|
16a1154c20d8cb809f7d16a35506b62ac95041f0
|
2caab394e06bece30552d1fbbef7cdedeb99f19a
|
refs/heads/master
| 2021-05-04T15:57:21.213792
| 2018-02-05T02:18:33
| 2018-02-05T02:18:33
| 120,241,015
| 0
| 0
| null | 2018-02-05T01:41:40
| 2018-02-05T01:41:40
| null |
UTF-8
|
R
| false
| false
| 257
|
r
|
plot2.R
|
# Plot Graph 2
with(CleanData, plot(x = Date_Time, y = Global_active_power, type = "l",ylab = "Global Active Power (kilowatts)",xlab = NA))
# Copy to png file
dev.copy(png, "plot2.png", bg = "transparent", width = 480, height = 480 )
dev.off()
|
c3da7310aee4bdd66aad531b0bc44c5d952d1e51
|
732999ea64c95d1a1b539def6bd8564c1129bb52
|
/app.R
|
a5051ec17827f8981de2ca93bfa98ea703ec90b9
|
[
"CC-BY-4.0"
] |
permissive
|
nikolajthams/COVID-19
|
489737a854c860e98bed201d04811af75ff8f6f5
|
4fa1bbe9f1c509c07ade8cb6259660213ad94723
|
refs/heads/master
| 2021-07-12T01:53:20.230646
| 2021-05-31T09:05:19
| 2021-05-31T09:05:19
| 246,864,729
| 1
| 0
| null | 2020-03-12T15:10:07
| 2020-03-12T15:10:06
| null |
UTF-8
|
R
| false
| false
| 32,503
|
r
|
app.R
|
library(shiny)
library(reshape2)
library(ggplot2)
library(shiny)
library(tidyverse)
library(scales)
library(shinydashboard)
library(DT)
library(knitr)
library(plotly)
library(magrittr)
library(tidyselect)
library(shinyhelper)
library(lubridate)
theme_set(theme_minimal())
# Define data paths -------------------------------------------------------
source("code/data_paths.R")
# Function definitions ----------------------------------------------------
nls2 <- function(formula, data = parent.frame(), start, control = nls.control(),
algorithm = c("default", "plinear", "port", "brute-force",
"grid-search", "random-search", "plinear-brute", "plinear-random"),
trace = FALSE, weights, ..., all = FALSE)
{
if (!inherits(formula, "formula"))
formula <- as.formula(formula, env = parent.frame())
L <- list(formula = formula, data = data, control = control,
trace = trace)
if (!missing(start)) {
if (inherits(start, "nls")) {
start <- coef(start)
start <- start[grep("^[^.]", names(start))]
}
L$start <- start
}
finIter <- NROW(L$start)
L <- append(L, list(...))
algorithm <- match.arg(algorithm)
if (algorithm == "grid-search")
algorithm <- "brute-force"
call <- match.call()
if (algorithm == "brute-force" || algorithm == "random-search" ||
algorithm == "plinear-brute" || algorithm == "plinear-random") {
nls <- function(formula, data, start, weights, ...) {
nlsModel <- if (algorithm == "plinear-brute" || algorithm ==
"plinear-random")
stats:::nlsModel.plinear
else stats:::nlsModel
environment(nlsModel) <- environment()
stop <- function(...) {
msg <- "singular gradient matrix at initial parameter estimates"
if (list(...)[[1]] == msg)
return()
stop(...)
}
m <- if (missing(weights)) {
nlsModel(formula, data, start)
}
else {
wts <- eval(substitute(weights), data, environment(formula))
nlsModel(formula, data, start, wts)
}
structure(list(m = m, call = call, convInfo = list(isConv = TRUE,
finIter = finIter, finTol = NA)), class = "nls")
}
}
else L$algorithm <- algorithm
if (missing(start))
return(do.call(nls, L))
else L$start <- as.data.frame(as.list(start))
if (NROW(L$start) == 1)
return(do.call(nls, L))
if (NROW(L$start) == 2) {
if (algorithm == "brute-force" || algorithm == "plinear-brute") {
rng <- as.data.frame(lapply(start, range))
mn <- rng[1, ]
mx <- rng[2, ]
k1 <- pmax(ceiling(sum(mx > mn)), 1)
k <- pmax(ceiling(control$maxiter^(1/k1)), 1)
DF <- as.data.frame(rbind(mn, mx, k))
finIter <- k^k1
L$start <- expand.grid(lapply(DF, function(x) seq(x[1],
x[2], length = x[3])))
}
else {
finIter <- control$maxiter
u <- matrix(runif(finIter * NCOL(start)), NCOL(start))
L$start <- t(u * unlist(start[1, ]) + (1 - u) * unlist(start[2,
]))
L$start <- as.data.frame(L$start)
names(L$start) <- names(start)
}
}
result <- apply(L$start, 1, function(start) {
L$start <- start
xx <- try(do.call(nls, L))
yy <- if (inherits(xx, "try-error"))
NA
else xx
if (trace)
print(yy)
yy
})
if (all) {
for (i in seq_along(result)) result[[i]]$data <- substitute(data)
}
else {
ss <- lapply(result, function(x) if (identical(x, NA))
NA
else deviance(x))
result <- result[[which.min(ss)]]
result$data <- substitute(data)
}
result
}
# Load confirmed data ---------------------------------------------------------------
data <- read_delim(
"code/data/frontpage_data.csv",
delim = ",",
col_types=list("NewCasesSmooth" = col_double(), "NewDeathsSmooth" = col_double())
)
# Load Danish data from SSI -----------------------------------------------
ssi <- read_delim(
"code/data/ssi_processed_daily.csv",
delim = ","
)
agedata <- read_delim(
"code/data/ssi_processed_agegroups.csv",
delim = ","
)
# make shadow numbers:
wvv.data <- read_delim(
"code/data/wvvdata.csv",
delim = ","
)
# Get countries w age data
death.by.age <- read.csv(
deaths_path_age,
header=TRUE,
stringsAsFactors = FALSE,
sep = ";"
)
countries.w.age.data = death.by.age$Country
# Exponential growth models --------------------------------------------------------------
.fit_nls <- function(country, dt, get_convergence = F) {
if (length(country) > 1) {
mm <- list()
fm0 <- list()
conv <- list()
for (i in country) {
fm0[[i]] <- lm(
I(log(Cases + 1)) ~ t,
data = dt,
subset = Country.Region == i
) %>% coef
names(fm0[[i]]) <- c("l", "r")
try({
mm[[i]] <- nls(
I(Cases + 1) ~ (1 + r)**(t - l),
data = filter(
dt,
Country.Region == i
),
start = fm0[[i]],
control = nls.control(maxiter = 1e5, minFactor = 1 / 2**10)
)
conv[[i]] <- "Yes"
}, silent = T)
if (is.null(mm[[i]])) {
mm[[i]] <- nls2(
I(Cases + 1) ~ (1 + r)**(t - l),
data = filter(
dt,
Country.Region == country
),
start = expand.grid(
"l" = seq(1, 20, length.out = 40),
"r" = seq(0, 1, length.out = 100)
),
control = nls.control(maxiter = 1e3, minFactor = 1 / 2**10),
algorithm = "grid-search"
)
conv[[i]] <- "No"
}
}
} else {
fm0 <- lm(
I(log(Cases + 1)) ~ t,
data = dt,
subset = Country.Region %in% country
) %>% coef
names(fm0) <- c("l", "r")
mm <- NULL
try({
mm <- nls(
I(Cases + 1) ~ (1 + r)**(t - l),
data = filter(
dt,
Country.Region %in% country
),
start = fm0,
control = nls.control(maxiter = 1e5, minFactor = 1 / 2**10)
)
}, silent = T)
if (is.null(mm)) {
mm <- nls2(
I(Cases + 1) ~ (1 + r)**(t - l),
data = filter(
dt,
Country.Region %in% country
),
start = expand.grid(
"l" = seq(1, 20, length.out = 40),
"r" = seq(0, 1, length.out = 100)
),
control = nls.control(maxiter = 1e3, minFactor = 1 / 2**10),
algorithm = "grid-search"
)
}
}
if (get_convergence) {
return(
list(mm, conv)
)
} else {
return(
mm
)
}
}
.get_plots <- function(model, country, dt, tmax = NULL) {
plotdata <- dt %>%
filter(
Country.Region %in% country
) %>%
select(
Cases, t, Country.Region
) %>%
mutate(
"Method" = "Actual cases"
)
if (is.null(tmax)) tmax <- max(plotdata$t)
tmpdata <- expand.grid(
"Country.Region" = country,
"Method" = "Predicted cases\n(Assuming no interventions)",
"t" = seq(0, tmax, by = 1)
)
if (length(country) > 1) {
predictions <- c()
for (i in country) {
predictions <- c(predictions, predict(model[[i]], filter(tmpdata, Country.Region == i)) - 1)
}
tmpdata$Cases <- predictions
} else {
tmpdata$Cases <- predict(model, tmpdata) - 1
}
plotdata <- rbind(
plotdata,
select(tmpdata, Cases, t, Method, Country.Region)
)
plotdata$Group <- as.factor(plotdata$Method):as.factor(plotdata$Country.Region)
return({ggplotly(
ggplot(
data = plotdata,
aes(x = t-1, y = Cases, color = Method)
) +
geom_line(lwd = 1, alpha = 0.5) +
xlab("Days from first case") +
ylab("Cumulative cases") +
ggtitle(
paste(
"Country: ",
paste(country, sep = ","),
"\n",
"Estimated infection rate: ",
round(coef(model)[2], 3),
"\n",
"Estimated lag-phase duration (days): ",
round(abs(coef(model)[1]), 3),
sep = ""
)
) +
theme_minimal() +
theme(text = element_text(size = 12),legend.position="bottom") +
labs(colour ="Method:")
)})
}
# UI ----------------------------------------------------------------------
ui <- dashboardPage(
dashboardHeader(title = "COVID19"),
dashboardSidebar(width = 250,
sidebarMenu(
menuItem(
text = "Plots", tabName = "plots", icon = icon("bar-chart-o")
),
menuItem(
text = "Danish data on COVID19 tests", tabName = "ssidat", icon = icon("bar-chart-o")
),
menuItem(
text = "Estimated number of total cases", tabName = "wirvsvirus", icon = icon("file-alt")
),
menuItem(text = "About", tabName = "mainpage", icon = icon("file-alt"))
)
),
dashboardBody(
tags$head(HTML("<script async src='https://www.googletagmanager.com/gtag/js?id=UA-160709431-1'
></script>")),
tags$head(includeScript("code/analytics.js")),
tabItems(
# Welcome page
tabItem(
tabName = "mainpage",
fluidPage(
withMathJax(
includeMarkdown("code/docs/mainpage.Rmd")
)
)
),
# Pane with country plots
tabItem(
tabName = "plots",
fluidPage(
sidebarLayout(
sidebarPanel(
radioButtons(
"log",
"Y-axis scale",
choices = c(
"Original scale" = "unscaled",
"Logarithmic scale" = "log"
),
selected = "unscaled"
) %>% helper(
icon = "question",
type = "inline",
content = c(
"<b>Change the y-axis to be on a logarithmic scale</b><br>",
"Logarithmic scales are useful when visualizing numbers that are vastly different,
because very large nominal differences are represented as ratios.",
"This means, for example, that the vertical distance between 1 and 10 looks the same as the vertical distance between 1000 and 10.000 -- because they both differ by a factor of 10."
)
),
selectInput(
"countries",
"Countries",
choices = data$Country.Region,
selected = c("Denmark", "Sweden", "Norway"),
multiple = T
) %>% helper(
icon = "question",
type = "inline",
content = c(
"Here you can choose the countries you wish to see in the graph.",
"Multiple countries can be chosen at once. Simply search for the country you wish to add and click it.",
"To remove countries from the graph, remove them from the search field as you would normally delete text."
)
),
actionButton("western.countries", "Major western countries"),
actionButton("scandinavia", "Nordic countries"),
actionButton("asia", "Asia"),
actionButton("latin", "Latin America"),
actionButton("africa", "Africa"),
actionButton("clear.countries", "Clear"),
checkboxInput("rebase", "View graph from death number x", F) %>%
helper(
type = "inline",
icon = "question",
content = c(
"This feature changes the x-axis from dates to 'Days since death number ${x}$'.",
"Viewing the data in this way makes it easy to compare the timeline of different countries, even if the outbreaks started months apart from each other."
)
),
conditionalPanel("!input.rebase",
dateInput("left.date", "Start date", value = as.Date("2020-03-01"), max=today())%>%
helper(
type = "inline",
icon = "question",
content = c(
"For many countries, it is not so interesting to see data prior to March.",
"This option sets the default starting date. You can still drag the x-axis with the mouse to go back in time."
)
)),
conditionalPanel("input.rebase",
numericInput('rebase.value', 'Death number', value=10, min=1, step=20)%>%
helper(
type = "inline",
icon = "question",
content = c(
"Showing the development of the data since the day of death number {x} in that country"
)
)),
radioButtons(
"output",
"Output",
choices = c(
"Total deaths" = "Deaths",
"Total confirmed cases" = "Cases",
"New deaths" = "NewDeaths",
"New deaths (smoothed)" = "NewDeathsSmooth",
"New confirmed cases" = "NewCases",
"New confirmed cases (smoothed)" = "NewCasesSmooth",
"Still infected" = "StillInfected",
"Recovered" = "Recovered",
"Percentage of population infected" = "PercentageOfPopulation",
"Percentage of population deceased" = "MortalityRatePop",
"Proportion of deaths among infected" = "MortalityRate",
"Proportion of recoveries among infected" = "RecoveryRate"
),
selected = "NewCasesSmooth"
) %>%
helper(
type = "inline",
icon = "question",
content = "Choose what output you want to see."
),
checkboxInput("barchart", "View as bar chart (works well with 'New deaths' and 'New cases' when few countries are selected)", FALSE),
downloadButton("downloadData", "Download Selected Data")
),
mainPanel(
div(
style = "position:relative",
plotlyOutput("country_plot"),
# hover = hoverOpts("plot_hover", delay = 100, delayType = "debounce")),
uiOutput("hover_info")
),
fluidPage(
withMathJax(
includeMarkdown("code/docs/text_below_plot.md")
),
fluidRow(
br(),
if(today() <= as.Date("2020-06-08")){
box(
withMathJax(
includeMarkdown("code/docs/log.md")
),
title="Change log", status="info",
width=NULL, collapsible = T, collapsed = T)
})
)
)
),
textOutput("JH_data_lastupdate")
)
),
# Pane with Danish data:
tabItem(
tabName = "ssidat",
fluidPage(
box(
includeMarkdown("code/docs/ssi_doc.Rmd"),
width = 12
),
radioButtons(
"ageYN", "View:",
choices = c(
"Daily number of tests and percentage positive" = "tot",
"Cumulative Number of tests and percentage positive by age group" = "age"
)
),
plotlyOutput("ssiplot")
)
),
# Pane with wvv data
tabItem(
tabName = "wirvsvirus",
(
sidebarLayout(
sidebarPanel(
radioButtons(
"wvv.log",
"Y-axis scale",
choices = c("Original scale" = "unscaled", "Logarithmic scale" = "log"),
selected = "unscaled"
),
radioButtons(
"wvv.compare_ouput",
"Compare estimator to",
choices = c("Deaths" = "deaths", "Confirmed cases" = "confirmed_cases"),
selected="confirmed_cases"
),
selectInput(
"wvv.countries",
"Countries",
choices = data$Country.Region,
selected = c("Denmark", "Sweden", "Norway"),
multiple = T
),
numericInput(
"wvv.death_delay",
"Days from illness onset to death",
value = 20,
min = 1
)
# ,
# textInput(
# "wvv.death_rate",
# "Case fatality rate for each age group (0-9, 10-19, ...) [comma-separated]",
# value=c("0.0, 0.0, 0.0, 0.001, 0.001, 0.006, 0.017, 0.070, 0.183")
# )
,
h5("Case fatality rate numbers from South Korea are used in the estimation.")
# textInput(
# "wvv.rel_risk",
# "Relative risk (comma-separated)",
# value=c("0, 0, 0, 0.0014,0.004,0.013,0.065,0.274,0.641")
# ),
# sliderInput(
# "wvv.dr1",
# "Death rate 0-9",
# min=0, max=1, value=0
# )
),
mainPanel(
div(
style = "position:relative",
plotlyOutput("wirvsvirus"),
),
h6("Solid curves indicate confirmed numbers. Shaded regions are estimated number of infected, measured from illness onset."),
verbatimTextOutput("countries.age.data"),
# fluidPage(
withMathJax(
includeMarkdown("code/docs/wvv_explanation.md")
)
# )
)
)
)
)
)
)
)
yaxislab <- c(
"Total confirmed cases" = "Cases",
"New confirmed cases" = "NewCases",
"Still infected" = "StillInfected",
"Cumulative recovered patients" = "Recovered",
"Cumulative deaths" = "Deaths",
"Population infected (%)" = "PercentageOfPopulation" ,
"Population deceased (%)" = "MortalityRatePop" ,
"Mortality rate (%)" = "MortalityRate",
"Recovery rate (%)" = "RecoveryRate",
"New deaths" = "NewDeaths",
"New deaths (smoothed)" = "NewDeathsSmooth",
"New confirmed cases (smoothed)" = "NewCasesSmooth")
# Server ------------------------------------------------------------------
server <- function(input, output, session) {
observe_helpers(withMathJax = TRUE)
# Add predefined lists
observeEvent(input$western.countries,
updateSelectInput(session, "countries", selected=c(input$countries,
c("US", "United Kingdom", "Germany", "Italy", "Spain", "France")))
)
observeEvent(input$scandinavia,
updateSelectInput(session, "countries", selected=c(input$countries,
c("Denmark", "Sweden", "Norway", "Iceland", "Finland")))
)
observeEvent(input$asia,
updateSelectInput(session, "countries", selected=c(input$countries,
c("China", "India", "Indonesia", "Japan", "Korea, South")))
)
observeEvent(input$africa,
updateSelectInput(session, "countries", selected=c(input$countries,
c("Nigeria", "Ethiopia", "Egypt", "Congo (Kinshasa)", "Tanzania")))
)
observeEvent(input$latin,
updateSelectInput(session, "countries", selected=c(input$countries,
c("Brazil", "Mexico", "Colombia", "Argentina", "Peru")))
)
observeEvent(input$clear.countries,
updateSelectInput(session, "countries", selected=character(0))
)
# source("code/make_wvv_data_v2.R", local = T)
number_ticks <- function(n) {
function(limits)
pretty(limits, n)
}
datasetInput <- reactive({
data_tmp <- data %>%
filter(
Country.Region %in% c(input$countries)
) %>%
mutate(
Country.Region = factor(Country.Region, levels = c(input$countries))
) %>%
group_by(Country.Region) %>%
mutate(
LeadCases = ifelse(
is.na(lead(Cases)),
Inf,
lead(Cases)
),
LeadDeaths = ifelse(
is.na(lead(Deaths)),
Inf,
lead(Deaths)
)
) %>%
filter(
LeadDeaths >= ifelse(input$rebase == TRUE, input$rebase.value, 0)
) %>%
# ungroup %>% {
# LastDayBecoreConfirmedCase <-
# (.) %>% arrange(Date) %>% filter(LeadCases > ifelse(input$rebase == TRUE, input$rebase.value, 0)) %>% summarize(min(Date)) %>% pull()
# (.) %>% filter(Date >= LastDayBecoreConfirmedCase)
# } %>%
select(-c(LeadCases, LeadDeaths)) %>%
mutate(
"t" = (Date - as.Date(min(Date))) %>% as.numeric,
"PercentageOfPopulation" = (Cases / Population) * 100,
Country = Country.Region
) %>% ungroup
})
output$countries.age.data <- renderText({
paste(paste("Countries with age specific data: \t", paste(intersect(input$wvv.countries, countries.w.age.data), collapse = ", ")),
paste("Countries with no age specific data:\t", paste(setdiff(input$wvv.countries, countries.w.age.data), collapse = ", ")),
sep = "\n")
})
output$JH_data_lastupdate <- renderText({
paste(
"Johns Hopkins data was last updated at:",
file.info(cases_path)$mtime,
"(Central European Time)",
sep = " "
)
})
output$country_plot <- renderPlotly({
patient.x.name = paste("Days since death number", input$rebase.value)
if(input$rebase == TRUE){
p <- ggplot(datasetInput()%>% rename(!!patient.x.name:="t"),
aes_string(
x = paste("`", patient.x.name, "`", sep = ""),
y = input$output,
colour = "Country.Region",
Country = "Country",
label = "Date"
)) +
xlab(paste("Days since death number ", input$rebase.value)) + scale_x_continuous(breaks=c(0, seq(7,1000,7)))# + geom_point(aes_string(text = hover.date))
} else {
p <- ggplot(datasetInput(),
aes_string(
x = "Date",
y = input$output,
colour = "Country.Region",
Country = "Country"
)) +
scale_x_date(breaks = date_breaks("week"), date_labels = "%b %d")
}
if (input$log == "log") {
p <- p + scale_y_log10(labels = comma)
} else {
p <- p + scale_y_continuous(labels = comma)
}
p <- p + theme_minimal() +
ggtitle(names(yaxislab)[yaxislab == input$output]) +
theme(plot.title = element_text(hjust = 0.5)) +
ylab(names(yaxislab)[yaxislab == input$output]) +
labs(colour="Country")
if (input$barchart){#(input$output %in% c("NewCases", "NewDeaths")){
p = p + geom_bar(aes(fill=Country.Region), position="dodge", stat="identity", alpha=1, lwd = 0.1)
} else {
p = p + geom_line() + geom_point(alpha=0.5, size=0.4)
}
if(input$output == "NewDeathsSmooth"){
p = p + geom_point(aes(y=NewDeaths), alpha=0.3, size=0.4)
}
if(input$output == "NewCasesSmooth"){
p = p + geom_point(aes(y=NewCases), alpha=0.3, size=0.4)
}
p <- ggplotly(
p,
tooltip = c(
"x",
"y",
"Country"
)
)
if(input$rebase == FALSE){
p <- p %>% layout(xaxis = list(range = c(
# as.numeric(as.Date("2020-03-01")),
as.numeric(input$left.date),
as.numeric(today())
)))
}
p
})
output$hover_info <- renderUI({
hover <- input$plot_hover
point <- nearPoints(datasetInput(), hover, threshold = 10, maxpoints = 1, addDist = TRUE)
if (nrow(point) == 0) return(NULL)
# calculate point position INSIDE the image as percent of total dimensions
# from left (horizontal) and from top (vertical)
left_pct <- (hover$x - hover$domain$left) / (hover$domain$right - hover$domain$left)
top_pct <- (hover$domain$top - hover$y) / (hover$domain$top - hover$domain$bottom)
# calculate distance from left and bottom side of the picture in pixels
left_px <- hover$range$left + left_pct * (hover$range$right - hover$range$left)
top_px <- hover$range$top + top_pct * (hover$range$bottom - hover$range$top)
# create style property fot tooltip
# color is set so tooltip is a bit transparent
# z-index is set so we are sure are tooltip will be on top
style <- paste0("position:absolute; z-index:100; background-color: rgba(245, 245, 245, 0.85); ",
"left:", left_px + 2, "px; top:", top_px + 2, "px;")
# actual tooltip created as wellPanel
wellPanel(
style = style,
p(HTML(paste0("<b> Country: </b>", point$Country.Region, "<br/>",
"<b> Date: </b>", point$Date, "<br/>",
"<b> Value: </b>", point[,input$output], "<br/>"
)))
)
})
output$dynamic <- renderPrint({
req(input$plot_hover)
verbatimTextOutput("vals")
})
output$vals <- renderPrint({
hover <- input$plot_hover
HoverData <- nearPoints(datasetInput(),input$plot_hover) %>% select(Country.Region,Date,input$output)
req(nrow(HoverData) != 0)
knitr::kable(HoverData, "pandoc")
})
output$downloadData <- downloadHandler(
filename = function() {
paste("COVID19_", paste(sort(input$countries), collapse = "_"), ".csv", sep =
"")
},
content = function(file) {
write.csv(datasetInput(), file, row.names = TRUE)
}
)
output$wirvsvirus <- renderPlotly({
make_estimate_plot <- function(input) {
if (input$wvv.compare_ouput == "confirmed_cases") {
firstDate <-
wvv.data %>%
filter(
ConfirmedCases != 0,
Country %in% input$wvv.countries
) %>%
pull(Date) %>%
min
} else {
firstDate <-
wvv.data %>%
filter(
Deaths != 0,
Country %in% input$wvv.countries
) %>%
pull(Date) %>%
min
}
wvv.data %<>% filter(
Country %in% input$wvv.countries
# ,
# Date >= firstDate
) %>%
mutate(
"Date " = as.Date(Date - input$wvv.death_delay)
)
cutoff_date <- wvv.data %>%
filter(Cases.high > 0) %>%
pull(`Date `) %>%
min %>%
as.Date
wvv.data2 <- wvv.data %>%
filter(`Date ` >= cutoff_date)
p <- ggplot(
data = wvv.data %>% filter(Date >= cutoff_date),
aes(
colour = Country
)
) +
scale_x_date(breaks = date_breaks("week"), date_labels = "%b %d") +
ylab("Number of cases")
if (input$wvv.compare_ouput == "confirmed_cases") {
p <- p +
geom_line(
aes(
x = Date, y = ConfirmedCases
)
)
} else {
p <- p +
geom_line(
aes(
x = Date, y = Deaths
)
)
}
# wvv.data %<>% mutate(Date2 = as.Date(Date - input$wvv.death_delay))
p <- p + geom_ribbon(
aes(
x = `Date `,
ymin = Cases.low,
ymax = Cases.high,
fill = Country,
text1 = Cases.high,
text2 = Cases.low
),
data = wvv.data2,
alpha = 0.3
)
if (input$wvv.log == "log") {
p <- p + scale_y_log10(labels = comma
# labels = function(x) format(x, scientific = F),
# oob = squish_infinite
)
} else {
p <- p + scale_y_continuous(labels = comma)
}
# else {
# p <- p + scale_y_continuous(
# labels = function(x) format(x, scientific = F)
# )
# }
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
return(p)
}
p <- make_estimate_plot(input)
p <- ggplotly(p, tooltip = c(
"colour", "x", "y", "text1", "text2"
))
for (i in 1:length(p$x$data)) {
if (grepl(",1", p$x$data[[i]]$legendgroup)) {
p$x$data[[i]]$legendgroup <- gsub(
"[()]|,|1",
"",
p$x$data[[i]]$legendgroup
)
p$x$data[[i]]$name <- paste(
gsub(
"[()]|,|1",
"",
p$x$data[[i]]$name
),
", confirmed",
sep = ""
)
} else {
p$x$data[[i]]$name <- paste(
p$x$data[[i]]$name,
", estimated",
sep = ""
)
}
}
p
})
output$ssiplot <- renderPlotly({
if (input$ageYN == "tot") {
p <- ggplot(
data = ssi %>%
mutate(
InfectionRate = round(InfectionRate * 100, 4)
) %>%
rename(
"Number of tested people, daily" = Tested,
"Percentage of tests positive, daily" = InfectionRate
) %>%
dplyr::select(
Date,
"Number of tested people, daily",
"Percentage of tests positive, daily"
) %>%
melt(
id.vars = "Date"
),
aes(
x = Date,
y = value,
colour = T
)
) +
scale_x_date(breaks = date_breaks("week"), date_labels = "%b %d") +
geom_line() +
geom_point() +
facet_wrap(
~ variable,
scales = "free_y"
) +
ylab("") +
theme_minimal()
p <- ggplotly(p, tooltip = c("Date", "value"))
for (i in 1:length(p$x$data)){
# p2$x$data[[i]]$text <- c(p$x$data[[i]]$text, "")
p$x$data[[i]]$showlegend <- FALSE
}
p
} else {
plotdata <- agedata %>%
mutate(
"Percentage of tests positive, total" = round(Laboratoriebekræftede / `Antal testede personer` * 100, 4)
) %>%
rename(
"Number of tested people, total" = "Antal testede personer"
) %>%
dplyr::select(
Date,
Aldersgrupper,
"Number of tested people, total",
"Percentage of tests positive, total"
) %>%
melt(
id.vars = c("Date", "Aldersgrupper")
)
p <- ggplot(
data = plotdata,
aes(
x = Date,
y = value,
colour = Aldersgrupper
)
) +
geom_line() +
geom_point() +
scale_x_date(labels = date_format("%b %d")) +
facet_wrap(
~ variable,
scales = "free"
) +
theme_minimal() +
theme(
text = element_text(size = 12),
legend.position = "bottom"
) +
ylab("")
ggplotly(p) %>%
layout(
legend = list(
orientation = "h",
y = -0.2
)
)
}
})
}
shinyApp(ui = ui, server = server)
|
ca392b8a1b2a15cc482a53b0ef10c2659a93a989
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ggguitar/examples/notes_for_frets.Rd.R
|
053de54d71a33065705475698b9f3aa15613cb11
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 496
|
r
|
notes_for_frets.Rd.R
|
library(ggguitar)
### Name: notes_for_frets
### Title: This function is similar to 'chord_for_frets' but also handles
### scales. Unlike chords_for_frets, this function removes NAs. This
### means there are no muted strings identified if a chord is passed in
### the frets argument.
### Aliases: notes_for_frets
### ** Examples
G_M_scale <- c(3, 0, 0, 0, NA, NA,
NA, 2, 2, NA, NA, NA,
NA, 3, 4, NA, NA, NA)
notes_for_frets(G_M_scale)
|
f8d3dc2fac230ed401db2aee5a46554b0c797cff
|
ad7840c706ee1a0df54a4203fabe51d85cf42f51
|
/Functions/xGChainTeamFunction.R
|
16304b480845e6b640b420ed540d58ec4e17111f
|
[] |
no_license
|
davidp6/ASAShootingApp
|
18269810f27e11aa955a826c5553f8059bf87e35
|
ce75dd6ff159dedb761c422ebbd04187e1e26c4a
|
refs/heads/master
| 2022-10-04T05:14:24.871203
| 2020-06-09T05:18:51
| 2020-06-09T05:18:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,387
|
r
|
xGChainTeamFunction.R
|
# Summarize team chain data across games and seasons
library(dplyr)
teamchaindata.game <- readRDS("AppData/TeamxGChainData.rds")
teamchains.func <- function(teamchaindata = teamchaindata.game,
date1 = as.Date('2000-01-01'),
date2 = as.Date('9999-12-31'),
season = 2011:2017,
even = F,
pattern = unique(teamxgoals$patternOfPlay.model),
pergame = F,
advanced = F,
venue = c('Home', 'Away'),
byseasons = T,
confview = T,
plot = F){
}
DO STUFF!
%>%
group_by(team) %>%
summarize(Games = length(unique(gameID)),
ChainsF = sum(num.chains_f),
ChainsA = sum(num.chains_a))
chains.game = Chains/Games,
passes.chain = sum(num.passes)/Chains,
dribbles.chain = sum(num.dribbles)/Chains,
counters.game = sum(num.counters)/Games,
width.chain = sum(total.width)/Chains,
width.median = mean(med.width),
start.def.pct = sum(start.def)/Chains,
start.mid.pct = sum(start.mid)/Chains,
start.att.pct = sum(start.att)/Chains,
avg.xStart = sum(num.chains*avg.xStart)/sum(num.chains),
med.xStart = sum(num.chains*med.xStart)/sum(num.chains)
) %>%
data.frame()
|
0e21b16b06c74595eb542c6c4463cd19c366f109
|
d717103c595efda13336bc9349a3f32f0d36122e
|
/inst/doc/Rraven.R
|
274658a41a8f13436f28825e2df6625a2ff418a2
|
[] |
no_license
|
cran/Rraven
|
297d0997a35c10210c93183b20a8744a7d0bee49
|
db48cc8a1d826274c61774ffeaacf5f1d034b7d1
|
refs/heads/master
| 2023-04-06T05:10:46.136693
| 2021-04-21T05:10:06
| 2021-04-21T05:10:06
| 111,541,108
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,407
|
r
|
Rraven.R
|
## ---- echo = FALSE, message = FALSE-----------------------------------------------------------------------------------------------------------------
# remove all objects
rm(list = ls())
# unload all non-based packages
out <- sapply(paste('package:', names(sessionInfo()$otherPkgs), sep = ""), function(x) try(detach(x, unload = FALSE, character.only = TRUE), silent = T))
#load packages
library(warbleR)
library(Rraven)
library(knitr)
library(kableExtra)
options(knitr.table.format = "html")
opts_chunk$set(comment = "")
opts_knit$set(root.dir = tempdir())
options(width = 150, max.print = 100)
#website to fix gifs
#https://ezgif.com/optimize
## ----eval = FALSE-----------------------------------------------------------------------------------------------------------------------------------
#
# download.file(
# url = "https://raw.githubusercontent.com/maRce10/Rraven/master/gifs/Rraven.hitgub.html",
# destfile = "Rraven.github.html")
#
## ---- eval = FALSE----------------------------------------------------------------------------------------------------------------------------------
#
# devtools::install_github("maRce10/warbleR")
#
# devtools::install_github("maRce10/Rraven")
#
# #from CRAN would be
# #install.packages("warbleR")
#
# #load packages
# library(warbleR)
# library(Rraven)
#
## ----eval= F, echo=T--------------------------------------------------------------------------------------------------------------------------------
#
# setwd(tempdir())
#
# #load example data
# data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "Phae.long4", "selec.table", "selection_files"))
#
# #save sound files in temporary directory
# writeWave(Phae.long1, "Phae.long1.wav", extensible = FALSE)
# writeWave(Phae.long2, "Phae.long2.wav", extensible = FALSE)
# writeWave(Phae.long3, "Phae.long3.wav", extensible = FALSE)
# writeWave(Phae.long4, "Phae.long4.wav", extensible = FALSE)
#
# #save Raven selection tables in the temporary directory
# out <- lapply(1:4, function(x)
# writeLines(selection_files[[x]], con = names(selection_files)[x]))
#
# #this is the temporary directory location (of course different each time is run)
# getwd()
#
## ----eval= T, echo=F--------------------------------------------------------------------------------------------------------------------------------
#load example data
data(list = c("Phae.long1", "Phae.long2", "Phae.long3", "Phae.long4", "selec.table", "selection_files"))
#save sound files in temporary directory
writeWave(Phae.long1, file.path(tempdir(), "Phae.long1.wav"), extensible = FALSE) #save sound files
writeWave(Phae.long2, file.path(tempdir(), "Phae.long2.wav"), extensible = FALSE)
writeWave(Phae.long3, file.path(tempdir(), "Phae.long3.wav"), extensible = FALSE)
writeWave(Phae.long4, file.path(tempdir(), "Phae.long4.wav"), extensible = FALSE)
#save Raven selection tables in temporary directory
out <- lapply(1:4, function(x)
writeLines(selection_files[[x]], con = file.path(tempdir(), names(selection_files)[x])))
#providing the name of the column with the sound file names
# rvn.dat <- imp_raven(sound.file.col = "Begin.File", all.data = FALSE)
#this is the temporary directory location (of course different each time is run)
# getwd()
## ---- eval=T, echo=T--------------------------------------------------------------------------------------------------------------------------------
list.files(path = tempdir(), pattern = "\\.txt$")
## ---- eval=FALSE------------------------------------------------------------------------------------------------------------------------------------
#
# #providing the name of the column with the sound file names
# rvn.dat <- imp_raven(all.data = TRUE, path = tempdir())
#
# head(rvn.dat)
#
## ---- eval=TRUE, echo=F, message=F------------------------------------------------------------------------------------------------------------------
#providing the name of the column with the sound file names
rvn.dat <- imp_raven(all.data = TRUE, path = tempdir())
kbl <- kable(head(rvn.dat), align = "c", row.names = F, escape = FALSE)
kbl <- kable_styling(kbl, bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = FALSE, font_size = 11)
scroll_box(kbl, width = "808px",
box_css = "border: 1px solid #ddd; padding: 5px; ", extra_css = NULL)
## ---- eval=FALSE------------------------------------------------------------------------------------------------------------------------------------
#
# rvn.dat <- imp_raven(all.data = TRUE, waveform = TRUE,
# path = tempdir())
#
## ---- eval=FALSE------------------------------------------------------------------------------------------------------------------------------------
# #providing the name of the column with the sound file names
# rvn.dat <- imp_raven(sound.file.col = "End.File",
# warbler.format = TRUE, path = tempdir())
#
# head(rvn.dat)
#
## ---- eval=TRUE, echo=FALSE-------------------------------------------------------------------------------------------------------------------------
#providing the name of the column with the sound file names
rvn.dat <- imp_raven(sound.file.col = "End.File",
warbler.format = TRUE, path = tempdir())
kbl <- kable(head(rvn.dat), align = "c", row.names = F, escape = FALSE)
kable_styling(kbl, bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = TRUE, font_size = 12)
# scroll_box(kbl, width = "808",
# box_css = "border: 1px solid #ddd; padding: 5px; ", extra_css = NULL)
## ---- eval=FALSE, echo=TRUE-------------------------------------------------------------------------------------------------------------------------
#
# # convert to class selection.table
# rvn.dat.st <- selection_table(rvn.dat, path = tempdir())
#
# sp <- spectro_analysis(X = rvn.dat, bp = "frange", wl = 150,
# pb = FALSE, ovlp = 90, path = tempdir())
#
# head(sp)
#
## ---- eval=TRUE, echo=FALSE-------------------------------------------------------------------------------------------------------------------------
# convert to class selection.table
rvn.dat.st <- selection_table(rvn.dat)
sp <- spectro_analysis(X = rvn.dat, bp = "frange", wl = 150, pb = FALSE, ovlp = 90, path = tempdir())
kbl <- kable(head(sp), align = "c", row.names = F, escape = FALSE)
kbl <- kable_styling(kbl, bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = FALSE, font_size = 11)
scroll_box(kbl, width = "808px",
box_css = "border: 1px solid #ddd; padding: 5px; ", extra_css = NULL)
## ---- eval = FALSE----------------------------------------------------------------------------------------------------------------------------------
#
# # create a color palette
# trc <- function(n) terrain.colors(n = n, alpha = 0.3)
#
# # plot catalog
# catalog(X = rvn.dat.st[1:9, ], flim = c(1, 10), nrow = 3, ncol = 3,
# same.time.scale = TRUE, spec.mar = 1, box = FALSE,
# ovlp = 90, parallel = 1, mar = 0.01, wl = 200,
# pal = reverse.heat.colors, width = 20,
# labels = c("sound.files", "selec"), legend = 1,
# tag.pal = list(trc), group.tag = "sound.files", path = tempdir())
#
## ---- eval=FALSE------------------------------------------------------------------------------------------------------------------------------------
#
# #remove previous raven data files
# unlink(list.files(pattern = "\\.txt$", path = tempdir()))
#
# #save Raven selection table in the temporary directory
# writeLines(selection_files[[5]], con = file.path(tempdir(),
# names(selection_files)[5]))
#
# rvn.dat <- imp_raven(all.data = TRUE, path = tempdir())
#
# # Peak freq contour dif length
# fcts <- extract_ts(X = rvn.dat, ts.column = "Peak Freq Contour (Hz)")
#
# head(fcts)
#
## ---- eval=T, echo=FALSE----------------------------------------------------------------------------------------------------------------------------
#remove previous raven data files
unlink(list.files(pattern = "\\.txt$", path = tempdir()))
#save Raven selection table in the temporary directory
writeLines(selection_files[[5]], con = file.path(tempdir(), names(selection_files)[5]))
#save Raven selection table in the temporary directory
rvn.dat <- imp_raven(all.data = TRUE, path = tempdir())
# Peak freq contour dif length
fcts <- extract_ts(X = rvn.dat, ts.column = "Peak Freq Contour (Hz)")
kbl <- kable(head(fcts), align = "c", row.names = F, escape = FALSE)
kbl <- kable_styling(kbl, bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = FALSE, font_size = 11)
scroll_box(kbl, width = "808px",
box_css = "border: 1px solid #ddd; padding: 5px; ", extra_css = NULL)
## ---- eval=F, echo=T--------------------------------------------------------------------------------------------------------------------------------
#
# # Peak freq contour equal length
# fcts <- extract_ts(X = rvn.dat, ts.column = "Peak Freq Contour (Hz)", equal.length = TRUE)
#
# #look at the last rows wit no NAs
# head(fcts)
#
## ---- eval=T, echo = F------------------------------------------------------------------------------------------------------------------------------
# Peak freq contour equal length
fcts <- extract_ts(X = rvn.dat, ts.column = "Peak Freq Contour (Hz)",
equal.length = TRUE)
kbl <- kable(head(fcts), align = "c", row.names = F, escape = FALSE)
kbl <- kable_styling(kbl, bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = FALSE, font_size = 11)
scroll_box(kbl, width = "808px",
box_css = "border: 1px solid #ddd; padding: 5px; ", extra_css = NULL)
## ---- eval=FALSE------------------------------------------------------------------------------------------------------------------------------------
#
# # Peak freq contour equal length 10 measurements
# fcts <- extract_ts(X = rvn.dat, ts.column = "Peak Freq Contour (Hz)",
# equal.length = T, length.out = 10)
#
# head(fcts)
#
## ---- eval=TRUE, echo=FALSE-------------------------------------------------------------------------------------------------------------------------
# Peak freq contour equal length 10 measurements
fcts <- extract_ts(X = rvn.dat, ts.column = "Peak Freq Contour (Hz)",
equal.length = T, length.out = 10)
kbl <- kable(head(fcts), align = "c", row.names = F, escape = FALSE)
kable_styling(kbl, bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = FALSE, font_size = 14)
# scroll_box(kbl, width = "900px",
# box_css = "border: 1px solid #ddd; padding: 5px; ", extra_css = NULL)
## ---- eval=F, echo=T--------------------------------------------------------------------------------------------------------------------------------
#
# freq_DTW(ts.df = fcts, path = tempdir())
#
## ---- eval=T, echo=F--------------------------------------------------------------------------------------------------------------------------------
kbl <- kable(freq_DTW(ts.df = fcts, path = tempdir()), align = "c", row.names = T, escape = FALSE)
kbl <- kable_styling(kbl, bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = T, font_size = 12)
# row_spec(0, angle = 0)
scroll_box(kbl, height = "500px", width = "808px",
box_css = "border: 1px solid #ddd; padding: 5px; ", extra_css = NULL)
## ---- eval = F, echo = T----------------------------------------------------------------------------------------------------------------------------
#
# #to simplify the example select a subset of the columns
# st1 <- rvn.dat[ ,1:7]
#
# #check original column names
# st1
## ---- eval = T, echo = F----------------------------------------------------------------------------------------------------------------------------
#to simplify the example select a subset of the columns
st1 <- rvn.dat[ ,1:7]
#check original column names
kbl <- kable(st1, align = "c", row.names = F, escape = FALSE)
kbl <- kable_styling(kbl, bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = FALSE, font_size = 14)
## ---- eval = F, echo = T----------------------------------------------------------------------------------------------------------------------------
# # Relabel the basic columns required by warbleR
# relabel_colms(st1)
#
## ---- eval = T, echo = F----------------------------------------------------------------------------------------------------------------------------
rc <- relabel_colms(st1)
#check original column names
kbl <- kable(rc, align = "c", row.names = F, escape = FALSE)
kbl <- kable_styling(kbl, bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = FALSE, font_size = 14)
## ---- eval = F, echo = T----------------------------------------------------------------------------------------------------------------------------
#
# # 2 additional column
# relabel_colms(st1, extra.cols.name = "View",
# extra.cols.new.name = "Raven view")
#
## ---- eval = T, echo = F----------------------------------------------------------------------------------------------------------------------------
# plus 2 additional column
rc <- relabel_colms(st1, extra.cols.name = "View",
"Raven view")
kbl <- kable(rc, align = "c", row.names = F, escape = FALSE)
kable_styling(kbl, bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = FALSE, font_size = 14)
## ---- eval=F, echo=T--------------------------------------------------------------------------------------------------------------------------------
#
# #create new folder to put cuts
# dir.create(file.path(tempdir(), "cuts"))
#
# # add a rowname column to be able to match cuts and selections
# selec.table$rownames <- sprintf("%02d",1:nrow(selec.table))
#
# # cut files
# cut_sels(X = selec.table, mar = 0.05, path = tempdir(), dest.path =
# file.path(tempdir(), "cuts"),
# labels = c("rownames", "sound.files", "selec"), pb = FALSE)
#
# #list cuts
# list.files(path = file.path(tempdir(), "cuts"))
#
## ---- eval=F, echo=T--------------------------------------------------------------------------------------------------------------------------------
#
# # Import output (change the name of the file if you used a different one)
# xcorr.rav <- imp_corr_mat(file = "BatchCorrOutput.txt", path = tempdir())
#
## ---- eval=T, echo=F--------------------------------------------------------------------------------------------------------------------------------
#save Raven selection table in the temporary directory
writeLines(selection_files[[6]], con = file.path(tempdir(), names(selection_files)[6]))
# Import output (change the name of the file if you used a different one)
xcorr.rav <- imp_corr_mat(file = "BatchCorrOutput.txt", path = tempdir())
## ---- eval=F----------------------------------------------------------------------------------------------------------------------------------------
#
# xcorr.rav$correlation
#
## ---- eval=T, echo=F--------------------------------------------------------------------------------------------------------------------------------
kbl <- kable(xcorr.rav$correlation, align = "c", row.names = T, escape = FALSE)
kbl <- kable_styling(kbl, bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = T, font_size = 12)
# row_spec(0, angle = 0)
scroll_box(kbl, height = "500px", width = "808px",
box_css = "border: 1px solid #ddd; padding: 5px; ", extra_css = NULL)
## ---- eval=F----------------------------------------------------------------------------------------------------------------------------------------
# xcorr.rav$`lag (s)`
#
## ---- eval=T, echo=F--------------------------------------------------------------------------------------------------------------------------------
kbl <- kable(xcorr.rav$`lag (s)`, align = "c", row.names = T, escape = FALSE)
kbl <- kable_styling(kbl, bootstrap_options = c("striped", "hover", "condensed", "responsive"), full_width = T, font_size = 12)
scroll_box(kbl, height = "500px", width = "808px",
box_css = "border: 1px solid #ddd; padding: 5px; ", extra_css = NULL)
## ---------------------------------------------------------------------------------------------------------------------------------------------------
#convert cross-corr to distance
xcorr.rvn <- 1- xcorr.rav$correlation
#sort matrix to match selection table
xcorr.rvn <- xcorr.rvn[order(rownames(xcorr.rvn)), order(colnames(xcorr.rvn))]
#convert it to distance matrix
xcorr.rvn <- as.dist(xcorr.rvn)
# measure acoustic parameters
sp.wrblR <- specan(selec.table, bp = c(1, 11), wl = 150,
pb = FALSE, path = tempdir())
#convert them to distance matrix
dist.sp.wrblR <- dist(sp.wrblR)
vegan::mantel(xcorr.rvn, dist.sp.wrblR)
## ---- eval=FALSE, echo=T----------------------------------------------------------------------------------------------------------------------------
# # Select data for a single sound file
# st1 <- selec.table[selec.table$sound.files == "Phae.long1.wav", ]
#
# # Export data of a single sound file
# exp_raven(st1, file.name = "Phaethornis 1", khz.to.hz = TRUE, path = tempdir())
## ---- eval=FALSE, echo=T----------------------------------------------------------------------------------------------------------------------------
# # Select data for a single sound file
# st1 <- selec.table[selec.table$sound.files == "Phae.long1.wav",]
#
# # Export data of a single sound file
# exp_raven(st1, file.name = "Phaethornis 1", khz.to.hz = TRUE,
# sound.file.path = tempdir(), path = tempdir())
#
## ---- eval=FALSE, echo=T----------------------------------------------------------------------------------------------------------------------------
#
# exp_raven(X = selec.table, file.name = "Phaethornis multiple sound files",
# sound.file.path = tempdir(), single.file = TRUE, path = tempdir())
## ---- eval=FALSE, echo=T----------------------------------------------------------------------------------------------------------------------------
# # here replace with the path where Raven is install in your computer
# raven.path <- "PATH_TO_RAVEN_DIRECTORY_HERE"
#
# # run function
# run_raven(raven.path = raven.path, sound.files = c("Phae.long1.wav", "Phae.long2.wav", "Phae.long3.wav", "Phae.long4.wav"),
# import = TRUE, all.data = TRUE, path = tempdir())
#
## ---- eval=FALSE, echo=T----------------------------------------------------------------------------------------------------------------------------
#
# detec.res <- raven_batch_detec(raven.path = raven.path,
# sound.files = "BlackCappedVireo.aif",
# path = file.path(raven.path, "Examples"))
#
## ---- eval=T, echo=F--------------------------------------------------------------------------------------------------------------------------------
unlink(list.files(pattern = "\\.wav$|\\.txt$", ignore.case = TRUE, path = tempdir()))
## ----session info, echo=F---------------------------------------------------------------------------------------------------------------------------
sessionInfo()
|
d46585f512850c0a5dfacc80decc3863c4054350
|
c7cb5f38754c00cb28c172a2a8a33b552f03c7dc
|
/generateMAR_maxPat_study2_p2_ts.R
|
0129d07ce611a8959e6003038d6fe79c4222f01f
|
[] |
no_license
|
cathyxijuan/missing-data-project-2
|
92fd9ee60c85f94016d17059f848614f3a911a0f
|
9875e076469e49f80e82450a1b2d081473863264
|
refs/heads/master
| 2021-08-17T07:02:49.980457
| 2021-01-01T07:08:55
| 2021-01-01T07:08:55
| 237,735,428
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26,938
|
r
|
generateMAR_maxPat_study2_p2_ts.R
|
library(lavaan)
setwd("/Volumes/SP PHD U3/missing-data-project-2")
source("functions.R")
source("Models_WM.R") # done
#strong dependency
#Usage: FOR THIS RESEARCH ONLY. There need to be 12 Variables.
# Creating missing data on x11 and x12. Strong dependence:missing of x11 depends on x7; missing of x12 depends on x8
#Argument:
#model: lavaan defined population model
#sample.nobs: numeric; sample size without missing data
#missing.percentage: numeric; a proportion of missing data
MARStrong_2Var <- function(model, sample.nobs=1000000, missing.percentage=0.5){
data <- simulateData(model, sample.nobs=sample.nobs)
simuData <- data.frame(x1=data[,"x1"], x2=data[,"x2"], x3=data[,"x3"], x4=data[,"x4"],
x5=data[,"x5"], x6=data[,"x6"], x7=data[,"x7"], x8=data[,"x8"],
x9=data[,"x9"], x10=data[,"x10"], x11=data[,"x11"], x12=data[,"x12"])
cutoff<- qnorm(missing.percentage, lower.tail = F)
for(i in 1:2){
simuData[simuData[,i] > cutoff,(i+6)] <- NA}
simuData
}
#weak dependency
#Usage: FOR THIS RESEARCH ONLY. There need to be 12 Variables.
# Creating missing data on x11 and x12. Weak dependence:missing of x11 depends on x7; missing of x12 depends on x8. 75% of data beyond a cutoff are eliminated.
#Argument:
#model: lavaan defined population model
#sample.nobs: numeric; sample size without missing data
#missing.percentage: numeric; a proportion of missing data
MARWeak_2Var <- function(model, sample.nobs=1000000, missing.percentage=0.5){
data <- simulateData(model, sample.nobs=sample.nobs)
simuData <- data.frame(x1=data[,"x1"], x2=data[,"x2"], x3=data[,"x3"], x4=data[,"x4"],
x5=data[,"x5"], x6=data[,"x6"], x7=data[,"x7"], x8=data[,"x8"],
x9=data[,"x9"], x10=data[,"x10"], x11=data[,"x11"], x12=data[,"x12"])
cutoff<- qnorm(missing.percentage, lower.tail = F)
#create missing
for(i in 1:2){
ind <- which(simuData[,i] > cutoff)
keep.log <- as.logical(sample(0:1,length(ind),replace=T, prob=c(0.25, 0.75))) #1=delete
row.keep <- ind[keep.log]
ind2 <- which(simuData[,i] < cutoff)
keep.log2 <- as.logical(sample(0:1,length(ind2),replace=T, prob=c(0.75, 0.25))) #1=delete
row.keep2 <- ind2[keep.log2]
simuData[c(row.keep, row.keep2),(i+6)] <-NA
} ##Note the rows that are kept got deleted. So it really should be called row.delete.
simuData
}
#strong dependency
#Usage: FOR THIS RESEARCH ONLY. There need to be 12 Variables.
# Creating missing data on x11 and x12. Strong dependence:missing of x9 and x11 depends on x7; missing of x10 and x12 depends on x8.
#Argument:
#model: lavaan defined population model
#sample.nobs: numeric; sample size without missing data
#missing.percentage: numeric; a proportion of missing data
MARStrong_4Var <- function(model, sample.nobs=1000000, missing.percentage=0.5){
data <- simulateData(model, sample.nobs=sample.nobs)
simuData <- data.frame(x1=data[,"x1"], x2=data[,"x2"], x3=data[,"x3"], x4=data[,"x4"],
x5=data[,"x5"], x6=data[,"x6"], x7=data[,"x7"], x8=data[,"x8"],
x9=data[,"x9"], x10=data[,"x10"], x11=data[,"x11"], x12=data[,"x12"])
cutoff<- qnorm(missing.percentage, lower.tail = F)
for(i in 1:4){
simuData[simuData[,i] > cutoff,(i+6)] <- NA}
simuData
}
#weak dependency
#Usage: FOR THIS RESEARCH ONLY. There need to be 12 Variables.
# Creating missing data on x9, x10, x11 and x12.
# Weak dependence:missing of x9 and x11 depends on x7; missing of x10 and x12 depends on x8. 75% of data beyond a cutoff are eliminated.
#model: lavaan defined population model
#sample.nobs: numeric; sample size without missing data
#missing.percentage: numeric; a proportion of missing data
MARWeak_4Var <- function(model, sample.nobs=1000000, missing.percentage=0.5){
data <- simulateData(model, sample.nobs=sample.nobs)
simuData <- data.frame(x1=data[,"x1"], x2=data[,"x2"], x3=data[,"x3"], x4=data[,"x4"],
x5=data[,"x5"], x6=data[,"x6"], x7=data[,"x7"], x8=data[,"x8"],
x9=data[,"x9"], x10=data[,"x10"], x11=data[,"x11"], x12=data[,"x12"])
cutoff<- qnorm(missing.percentage, lower.tail = F)
#create missing
for(i in 1:4){
ind <- which(simuData[,i] > cutoff)
keep.log <- as.logical(sample(0:1,length(ind),replace=T, prob=c(0.25, 0.75))) #1=delete
row.keep <- ind[keep.log]
ind2 <- which(simuData[,i] < cutoff)
keep.log2 <- as.logical(sample(0:1,length(ind2),replace=T, prob=c(0.75, 0.25))) #1=delete
row.keep2 <- ind2[keep.log2]
simuData[c(row.keep, row.keep2),(i+6)] <-NA
} ##Note the rows that are kept got deleted. So it really should be called row.delete.
simuData
}
#strong dependency
#Usage: FOR THIS RESEARCH ONLY. There need to be 12 Variables.
# Creating missing data on x11 and x12. Strong dependence:missing of x9 and x11 depends on x7; missing of x10 and x12 depends on x8.
#Argument:
#model: lavaan defined population model
#sample.nobs: numeric; sample size without missing data
#missing.percentage: numeric; a proportion of missing data
MARStrong_6Var <- function(model, sample.nobs=1000000, missing.percentage=0.5){
data <- simulateData(model, sample.nobs=sample.nobs)
simuData <- data.frame(x1=data[,"x1"], x2=data[,"x2"], x3=data[,"x3"], x4=data[,"x4"],
x5=data[,"x5"], x6=data[,"x6"], x7=data[,"x7"], x8=data[,"x8"],
x9=data[,"x9"], x10=data[,"x10"], x11=data[,"x11"], x12=data[,"x12"])
cutoff<- qnorm(missing.percentage, lower.tail = F)
for(i in 1:6){
simuData[simuData[,i] > cutoff,(i+6)] <- NA}
simuData
}
#weak dependency
#Usage: FOR THIS RESEARCH ONLY. There need to be 12 Variables.
# Creating missing data on x7 to x12.
# Weak dependence:missing of x9 and x11 depends on x7; missing of x10 and x12 depends on x8. 75% of data beyond a cutoff are eliminated.
#model: lavaan defined population model
#sample.nobs: numeric; sample size without missing data
#missing.percentage: numeric; a proportion of missing data
MARWeak_6Var <- function(model, sample.nobs=1000000, missing.percentage=0.5){
data <- simulateData(model, sample.nobs=sample.nobs)
simuData <- data.frame(x1=data[,"x1"], x2=data[,"x2"], x3=data[,"x3"], x4=data[,"x4"],
x5=data[,"x5"], x6=data[,"x6"], x7=data[,"x7"], x8=data[,"x8"],
x9=data[,"x9"], x10=data[,"x10"], x11=data[,"x11"], x12=data[,"x12"])
cutoff<- qnorm(missing.percentage, lower.tail = F)
#create missing for
for(i in 1:6){
ind <- which(simuData[,i] > cutoff)
keep.log <- as.logical(sample(0:1,length(ind),replace=T, prob=c(0.25, 0.75))) #1=delete
row.keep <- ind[keep.log]
ind2 <- which(simuData[,i] < cutoff)
keep.log2 <- as.logical(sample(0:1,length(ind2),replace=T, prob=c(0.75, 0.25))) #1=delete
row.keep2 <- ind2[keep.log2]
simuData[c(row.keep, row.keep2),(i+6)] <-NA
} ##Note the rows that are kept got deleted. So it really should be called row.delete.
simuData
}
#Arguments:
#pop.model.list: a list of lavaan models for the population
#sample.nobs: numeric; sample size without missing data
#missing.percentage: numeric; a proportion of missing data
#missing.percentage: vector specifying which columns are missing
#missing.type: a character: "strong" or "weak"
#var.with.missing: the number of variables with missing data; it can be 2, 4, 6
#simu.num: the number of simulation rounds
fit.components.simu <- function(pop.model.list, fitted.mod, sample.nobs=1000000,
missing.percentage, missing.type, var.with.missing,
simu.num = 1){
fit.indices.list <- vector(mode="list", length=simu.num)
for(j in 1:simu.num){
fit.indices.MAR <-matrix( nrow = 30, ncol = 0)
for(i in 1:length(pop.model.list)){
if (var.with.missing == 2){
if(missing.type =="strong"){
simuData <- MARStrong_2Var(pop.model.list[[i]], sample.nobs, missing.percentage)
} else {
simuData <- MARWeak_2Var(pop.model.list[[i]], sample.nobs, missing.percentage)
}
} else if(var.with.missing == 4) {
if(missing.type =="strong"){
simuData <- MARStrong_4Var(pop.model.list[[i]], sample.nobs, missing.percentage)
} else {
simuData <-MARWeak_4Var(pop.model.list[[i]], sample.nobs, missing.percentage)
}
} else {
if(missing.type =="strong"){
simuData <- MARStrong_6Var(pop.model.list[[i]], sample.nobs, missing.percentage)
} else {
simuData <- MARWeak_6Var(pop.model.list[[i]], sample.nobs, missing.percentage)
}
}
fit.ind.vector <- ts.components(fitted.mod, dataset=simuData)
fit.indices.MAR<- cbind(fit.indices.MAR,fit.ind.vector)
}
colnames(fit.indices.MAR) <-paste("FC =",c("1","0.9", "0.8" , "0.7", "0.6",
"0.5", "0.4", "0.3","0.2"))
fit.indices.MAR <- round(fit.indices.MAR, 8)
fit.indices.list[[j]] <- fit.indices.MAR
print(j)
}
fit.indices.list
}
set.seed(111)
setwd("/Volumes/SP PHD U3/missing-data-project-2/Simu results TS")
############2 variables with missing data########
fitMAR_Strong_maxPat_20PerMiss_2VarMiss_WM_ts_compo_n1000000 <- fit.components.simu(pop.model.list=pop.mod,
fitted.mod=fitted.mod,
missing.percentage = 0.20,
sample.nobs=1000000,
missing.type = "strong",
var.with.missing = 2)
fitMAR_Strong_maxPat_20PerMiss_2VarMiss_WM_ts_n1000000 <- ts.fit(fitMAR_Strong_maxPat_20PerMiss_2VarMiss_WM_ts_compo_n1000000 )
fitMAR_Strong_maxPat_20PerMiss_2VarMiss_WM_ts_checks_n1000000 <-ts.checks(fitMAR_Strong_maxPat_20PerMiss_2VarMiss_WM_ts_compo_n1000000,
fitMAR_Strong_maxPat_20PerMiss_2VarMiss_WM_ts_n1000000)
save(fitMAR_Strong_maxPat_20PerMiss_2VarMiss_WM_ts_compo_n1000000, file="fitMAR_Strong_maxPat_20PerMiss_2VarMiss_WM_ts_compo_n1000000.RData")
save(fitMAR_Strong_maxPat_20PerMiss_2VarMiss_WM_ts_n1000000, file="fitMAR_Strong_maxPat_20PerMiss_2VarMiss_WM_ts_n1000000.RData")
save(fitMAR_Strong_maxPat_20PerMiss_2VarMiss_WM_ts_checks_n1000000, file="fitMAR_Strong_maxPat_20PerMiss_2VarMiss_WM_ts_checks_n1000000.RData")
fitMAR_Strong_maxPat_50PerMiss_2VarMiss_WM_ts_compo_n1000000 <- fit.components.simu(pop.model.list=pop.mod,
fitted.mod=fitted.mod,
missing.percentage = 0.50,
sample.nobs=1000000,
missing.type = "strong",
var.with.missing = 2)
fitMAR_Strong_maxPat_50PerMiss_2VarMiss_WM_ts_n1000000 <-
ts.fit(fitMAR_Strong_maxPat_50PerMiss_2VarMiss_WM_ts_compo_n1000000 )
fitMAR_Strong_maxPat_50PerMiss_2VarMiss_WM_ts_checks_n1000000 <-
ts.checks(fitMAR_Strong_maxPat_50PerMiss_2VarMiss_WM_ts_compo_n1000000 ,
fitMAR_Strong_maxPat_50PerMiss_2VarMiss_WM_ts_n1000000)
save(fitMAR_Strong_maxPat_50PerMiss_2VarMiss_WM_ts_compo_n1000000, file="fitMAR_Strong_maxPat_50PerMiss_2VarMiss_WM_ts_compo_n1000000.RData")
save(fitMAR_Strong_maxPat_50PerMiss_2VarMiss_WM_ts_n1000000 , file="fitMAR_Strong_maxPat_50PerMiss_2VarMiss_WM_ts_n1000000.RData")
save(fitMAR_Strong_maxPat_50PerMiss_2VarMiss_WM_ts_checks_n1000000, file="fitMAR_Strong_maxPat_50PerMiss_2VarMiss_WM_ts_checks_n1000000.RData")
fitMAR_Weak_maxPat_20PerMiss_2VarMiss_WM_ts_compo_n1000000 <- fit.components.simu(pop.model.list=pop.mod,
fitted.mod=fitted.mod,
missing.percentage = 0.20,
sample.nobs=1000000,
missing.type = "weak",
var.with.missing = 2)
fitMAR_Weak_maxPat_20PerMiss_2VarMiss_WM_ts_n1000000 <-ts.fit(fitMAR_Weak_maxPat_20PerMiss_2VarMiss_WM_ts_compo_n1000000 )
fitMAR_Weak_maxPat_20PerMiss_2VarMiss_WM_ts_checks_n1000000 <-
ts.checks(fitMAR_Weak_maxPat_20PerMiss_2VarMiss_WM_ts_compo_n1000000 ,
fitMAR_Weak_maxPat_20PerMiss_2VarMiss_WM_ts_n1000000 )
save(fitMAR_Weak_maxPat_20PerMiss_2VarMiss_WM_ts_compo_n1000000, file="fitMAR_Weak_maxPat_20PerMiss_2VarMiss_WM_ts_compo_n1000000.RData")
save(fitMAR_Weak_maxPat_20PerMiss_2VarMiss_WM_ts_n1000000, file="fitMAR_Weak_maxPat_20PerMiss_2VarMiss_WM_ts_n1000000.RData")
save(fitMAR_Weak_maxPat_20PerMiss_2VarMiss_WM_ts_checks_n1000000, file="fitMAR_Weak_maxPat_20PerMiss_2VarMiss_WM_ts_checks_n1000000.RData")
fitMAR_Weak_maxPat_50PerMiss_2VarMiss_WM_ts_compo_n1000000 <- fit.components.simu(pop.model.list=pop.mod,
fitted.mod=fitted.mod,
missing.percentage = 0.50,
sample.nobs=1000000,
missing.type = "weak",
var.with.missing = 2)
fitMAR_Weak_maxPat_50PerMiss_2VarMiss_WM_ts_n1000000 <- ts.fit(fitMAR_Weak_maxPat_50PerMiss_2VarMiss_WM_ts_compo_n1000000 )
fitMAR_Weak_maxPat_50PerMiss_2VarMiss_WM_ts_checks_n1000000 <-ts.checks(fitMAR_Weak_maxPat_50PerMiss_2VarMiss_WM_ts_compo_n1000000,
fitMAR_Weak_maxPat_50PerMiss_2VarMiss_WM_ts_n1000000)
save(fitMAR_Weak_maxPat_50PerMiss_2VarMiss_WM_ts_compo_n1000000 , file="fitMAR_Weak_maxPat_50PerMiss_2VarMiss_WM_ts_compo_n1000000.RData")
save(fitMAR_Weak_maxPat_50PerMiss_2VarMiss_WM_ts_n1000000 , file="fitMAR_Weak_maxPat_50PerMiss_2VarMiss_WM_ts_n1000000.RData")
save(fitMAR_Weak_maxPat_50PerMiss_2VarMiss_WM_ts_checks_n1000000 , file="fitMAR_Weak_maxPat_50PerMiss_2VarMiss_WM_ts_checks_n1000000.RData")
################4 variables with missing###############
fitMAR_Strong_maxPat_20PerMiss_4VarMiss_WM_ts_compo_n1000000 <- fit.components.simu(pop.model.list=pop.mod,
fitted.mod=fitted.mod,
missing.percentage = 0.20,
sample.nobs=1000000,
missing.type = "strong",
var.with.missing = 4)
fitMAR_Strong_maxPat_20PerMiss_4VarMiss_WM_ts_n1000000 <- ts.fit(fitMAR_Strong_maxPat_20PerMiss_4VarMiss_WM_ts_compo_n1000000 )
fitMAR_Strong_maxPat_20PerMiss_4VarMiss_WM_ts_checks_n1000000 <-ts.checks(fitMAR_Strong_maxPat_20PerMiss_4VarMiss_WM_ts_compo_n1000000,
fitMAR_Strong_maxPat_20PerMiss_4VarMiss_WM_ts_n1000000)
save(fitMAR_Strong_maxPat_20PerMiss_4VarMiss_WM_ts_compo_n1000000, file="fitMAR_Strong_maxPat_20PerMiss_4VarMiss_WM_ts_compo_n1000000.RData")
save(fitMAR_Strong_maxPat_20PerMiss_4VarMiss_WM_ts_n1000000, file="fitMAR_Strong_maxPat_20PerMiss_4VarMiss_WM_ts_n1000000.RData")
save(fitMAR_Strong_maxPat_20PerMiss_4VarMiss_WM_ts_checks_n1000000, file="fitMAR_Strong_maxPat_20PerMiss_4VarMiss_WM_ts_checks_n1000000.RData")
fitMAR_Strong_maxPat_50PerMiss_4VarMiss_WM_ts_compo_n1000000 <- fit.components.simu(pop.model.list=pop.mod,
fitted.mod=fitted.mod,
missing.percentage = 0.50,
sample.nobs=1000000,
missing.type = "strong",
var.with.missing = 4)
fitMAR_Strong_maxPat_50PerMiss_4VarMiss_WM_ts_n1000000 <-
ts.fit(fitMAR_Strong_maxPat_50PerMiss_4VarMiss_WM_ts_compo_n1000000 )
fitMAR_Strong_maxPat_50PerMiss_4VarMiss_WM_ts_checks_n1000000 <-
ts.checks(fitMAR_Strong_maxPat_50PerMiss_4VarMiss_WM_ts_compo_n1000000 ,
fitMAR_Strong_maxPat_50PerMiss_4VarMiss_WM_ts_n1000000)
save(fitMAR_Strong_maxPat_50PerMiss_4VarMiss_WM_ts_compo_n1000000, file="fitMAR_Strong_maxPat_50PerMiss_4VarMiss_WM_ts_compo_n1000000.RData")
save(fitMAR_Strong_maxPat_50PerMiss_4VarMiss_WM_ts_n1000000 , file="fitMAR_Strong_maxPat_50PerMiss_4VarMiss_WM_ts_n1000000.RData")
save(fitMAR_Strong_maxPat_50PerMiss_4VarMiss_WM_ts_checks_n1000000, file="fitMAR_Strong_maxPat_50PerMiss_4VarMiss_WM_ts_checks_n1000000.RData")
fitMAR_Weak_maxPat_20PerMiss_4VarMiss_WM_ts_compo_n1000000 <- fit.components.simu(pop.model.list=pop.mod,
fitted.mod=fitted.mod,
missing.percentage = 0.20,
sample.nobs=1000000,
missing.type = "weak",
var.with.missing = 4)
fitMAR_Weak_maxPat_20PerMiss_4VarMiss_WM_ts_n1000000 <-ts.fit(fitMAR_Weak_maxPat_20PerMiss_4VarMiss_WM_ts_compo_n1000000 )
fitMAR_Weak_maxPat_20PerMiss_4VarMiss_WM_ts_checks_n1000000 <-
ts.checks(fitMAR_Weak_maxPat_20PerMiss_4VarMiss_WM_ts_compo_n1000000 ,
fitMAR_Weak_maxPat_20PerMiss_4VarMiss_WM_ts_n1000000 )
save(fitMAR_Weak_maxPat_20PerMiss_4VarMiss_WM_ts_compo_n1000000, file="fitMAR_Weak_maxPat_20PerMiss_4VarMiss_WM_ts_compo_n1000000.RData")
save(fitMAR_Weak_maxPat_20PerMiss_4VarMiss_WM_ts_n1000000, file="fitMAR_Weak_maxPat_20PerMiss_4VarMiss_WM_ts_n1000000.RData")
save(fitMAR_Weak_maxPat_20PerMiss_4VarMiss_WM_ts_checks_n1000000, file="fitMAR_Weak_maxPat_20PerMiss_4VarMiss_WM_ts_checks_n1000000.RData")
fitMAR_Weak_maxPat_50PerMiss_4VarMiss_WM_ts_compo_n1000000 <- fit.components.simu(pop.model.list=pop.mod,
fitted.mod=fitted.mod,
missing.percentage = 0.50,
sample.nobs=1000000,
missing.type = "weak",
var.with.missing = 4)
fitMAR_Weak_maxPat_50PerMiss_4VarMiss_WM_ts_n1000000 <- ts.fit(fitMAR_Weak_maxPat_50PerMiss_4VarMiss_WM_ts_compo_n1000000 )
fitMAR_Weak_maxPat_50PerMiss_4VarMiss_WM_ts_checks_n1000000 <-ts.checks(fitMAR_Weak_maxPat_50PerMiss_4VarMiss_WM_ts_compo_n1000000,
fitMAR_Weak_maxPat_50PerMiss_4VarMiss_WM_ts_n1000000)
save(fitMAR_Weak_maxPat_50PerMiss_4VarMiss_WM_ts_compo_n1000000 ,
file="fitMAR_Weak_maxPat_50PerMiss_4VarMiss_WM_ts_compo_n1000000.RData")
save(fitMAR_Weak_maxPat_50PerMiss_4VarMiss_WM_ts_n1000000 ,
file="fitMAR_Weak_maxPat_50PerMiss_4VarMiss_WM_ts_n1000000.RData")
save(fitMAR_Weak_maxPat_50PerMiss_4VarMiss_WM_ts_checks_n1000000 ,
file="fitMAR_Weak_maxPat_50PerMiss_4VarMiss_WM_ts_checks_n1000000.RData")
############6 variables with missing##############
fitMAR_Strong_maxPat_20PerMiss_6VarMiss_WM_ts_compo_n1000000 <- fit.components.simu(pop.model.list=pop.mod,
fitted.mod=fitted.mod,
missing.percentage = 0.20,
sample.nobs=1000000,
missing.type = "strong",
var.with.missing = 6)
fitMAR_Strong_maxPat_20PerMiss_6VarMiss_WM_ts_n1000000 <- ts.fit(fitMAR_Strong_maxPat_20PerMiss_6VarMiss_WM_ts_compo_n1000000 )
fitMAR_Strong_maxPat_20PerMiss_6VarMiss_WM_ts_checks_n1000000 <-ts.checks(fitMAR_Strong_maxPat_20PerMiss_6VarMiss_WM_ts_compo_n1000000,
fitMAR_Strong_maxPat_20PerMiss_6VarMiss_WM_ts_n1000000)
save(fitMAR_Strong_maxPat_20PerMiss_6VarMiss_WM_ts_compo_n1000000, file="fitMAR_Strong_maxPat_20PerMiss_6VarMiss_WM_ts_compo_n1000000.RData")
save(fitMAR_Strong_maxPat_20PerMiss_6VarMiss_WM_ts_n1000000, file="fitMAR_Strong_maxPat_20PerMiss_6VarMiss_WM_ts_n1000000.RData")
save(fitMAR_Strong_maxPat_20PerMiss_6VarMiss_WM_ts_checks_n1000000, file="fitMAR_Strong_maxPat_20PerMiss_6VarMiss_WM_ts_checks_n1000000.RData")
fitMAR_Strong_maxPat_50PerMiss_6VarMiss_WM_ts_compo_n1000000 <- fit.components.simu(pop.model.list=pop.mod,
fitted.mod=fitted.mod,
missing.percentage = 0.50,
sample.nobs=1000000,
missing.type = "strong",
var.with.missing = 6)
fitMAR_Strong_maxPat_50PerMiss_6VarMiss_WM_ts_n1000000 <-
ts.fit(fitMAR_Strong_maxPat_50PerMiss_6VarMiss_WM_ts_compo_n1000000 )
fitMAR_Strong_maxPat_50PerMiss_6VarMiss_WM_ts_checks_n1000000 <-
ts.checks(fitMAR_Strong_maxPat_50PerMiss_6VarMiss_WM_ts_compo_n1000000 ,
fitMAR_Strong_maxPat_50PerMiss_6VarMiss_WM_ts_n1000000)
save(fitMAR_Strong_maxPat_50PerMiss_6VarMiss_WM_ts_compo_n1000000, file="fitMAR_Strong_maxPat_50PerMiss_6VarMiss_WM_ts_compo_n1000000.RData")
save(fitMAR_Strong_maxPat_50PerMiss_6VarMiss_WM_ts_n1000000 , file="fitMAR_Strong_maxPat_50PerMiss_6VarMiss_WM_ts_n1000000.RData")
save(fitMAR_Strong_maxPat_50PerMiss_6VarMiss_WM_ts_checks_n1000000, file="fitMAR_Strong_maxPat_50PerMiss_6VarMiss_WM_ts_checks_n1000000.RData")
fitMAR_Weak_maxPat_20PerMiss_6VarMiss_WM_ts_compo_n1000000 <- fit.components.simu(pop.model.list=pop.mod,
fitted.mod=fitted.mod,
missing.percentage = 0.20,
sample.nobs=1000000,
missing.type = "weak",
var.with.missing = 6)
fitMAR_Weak_maxPat_20PerMiss_6VarMiss_WM_ts_n1000000 <-ts.fit(fitMAR_Weak_maxPat_20PerMiss_6VarMiss_WM_ts_compo_n1000000 )
fitMAR_Weak_maxPat_20PerMiss_6VarMiss_WM_ts_checks_n1000000 <-
ts.checks(fitMAR_Weak_maxPat_20PerMiss_6VarMiss_WM_ts_compo_n1000000 ,
fitMAR_Weak_maxPat_20PerMiss_6VarMiss_WM_ts_n1000000 )
save(fitMAR_Weak_maxPat_20PerMiss_6VarMiss_WM_ts_compo_n1000000, file="fitMAR_Weak_maxPat_20PerMiss_6VarMiss_WM_ts_compo_n1000000.RData")
save(fitMAR_Weak_maxPat_20PerMiss_6VarMiss_WM_ts_n1000000, file="fitMAR_Weak_maxPat_20PerMiss_6VarMiss_WM_ts_n1000000.RData")
save(fitMAR_Weak_maxPat_20PerMiss_6VarMiss_WM_ts_checks_n1000000, file="fitMAR_Weak_maxPat_20PerMiss_6VarMiss_WM_ts_checks_n1000000.RData")
fitMAR_Weak_maxPat_50PerMiss_6VarMiss_WM_ts_compo_n1000000 <- fit.components.simu(pop.model.list=pop.mod,
fitted.mod=fitted.mod,
missing.percentage = 0.50,
sample.nobs=1000000,
missing.type = "weak",
var.with.missing = 6)
fitMAR_Weak_maxPat_50PerMiss_6VarMiss_WM_ts_n1000000 <- ts.fit(fitMAR_Weak_maxPat_50PerMiss_6VarMiss_WM_ts_compo_n1000000 )
fitMAR_Weak_maxPat_50PerMiss_6VarMiss_WM_ts_checks_n1000000 <-ts.checks(fitMAR_Weak_maxPat_50PerMiss_6VarMiss_WM_ts_compo_n1000000,
fitMAR_Weak_maxPat_50PerMiss_6VarMiss_WM_ts_n1000000)
save(fitMAR_Weak_maxPat_50PerMiss_6VarMiss_WM_ts_compo_n1000000 ,
file="fitMAR_Weak_maxPat_50PerMiss_6VarMiss_WM_ts_compo_n1000000.RData")
save(fitMAR_Weak_maxPat_50PerMiss_6VarMiss_WM_ts_n1000000 ,
file="fitMAR_Weak_maxPat_50PerMiss_6VarMiss_WM_ts_n1000000.RData")
save(fitMAR_Weak_maxPat_50PerMiss_6VarMiss_WM_ts_checks_n1000000 ,
file="fitMAR_Weak_maxPat_50PerMiss_6VarMiss_WM_ts_checks_n1000000.RData")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.