blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
974ebb715659c071e7de1d64b33a2e549e033841
|
ebb359239eaa04c8e02ecdd0a32b260737c7c1d0
|
/R/shared_functions.R
|
465deb108373f63fe4dd950ac92a128d4a7128f9
|
[] |
no_license
|
Halvee/rvatk
|
ffd9cd200f5f63090a2e877683569f75fdd42c16
|
dea66eba71b52f5397e4c9684a588e4285412e2a
|
refs/heads/master
| 2021-01-12T08:10:12.717410
| 2018-04-06T18:11:15
| 2018-04-06T18:11:15
| 76,492,599
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,045
|
r
|
shared_functions.R
|
# GLOBAL VARS
kGeneNameCol <- "Gene Name"
kVariantIdCol<- "Variant ID"
kSampleNameCol <- "Sample Name"
kGenotypeCol <- "Genotype"
kLvgQualvarCols <- c(kGeneNameCol, kVariantIdCol, kSampleNameCol)
kCphtGeneNameCol <- "Gene"
kCasectrlAllColnames <- c("name","method","alternative")
kCasectrlFetColnames <- c(kCasectrlAllColnames,
"n.case.qual", "n.case.nonqual",
"n.ctrl.qual", "n.ctrl.nonqual",
"odds.ratio",
"conf.int.lower", "conf.int.upper",
"p.value")
kCasectrlPoissonColnames<- c(kCasectrlAllColnames,
"n.var.case", "n.case", "var.rate.case",
"n.var.ctrl", "n.ctrl", "var.rate.ctrl",
"conf.int.lower", "conf.int.upper",
"p.value")
#' read in a file where each line is a seperate entry into a vector.
#'
#' @param filename name of input file.
#'
#' @export
ReadListFile <- function(filename) {
mylist <- scan(filename, what=character(), quiet=T)
return(mylist)
}
#' read in a .condition file, where each line represents conditions to apply.
#'
#' @param condition.filename name of .condition file.
#' @export
ReadConditionFile <- function(condition.filename) {
cnd <- read.table(condition.filename, stringsAsFactors=F)
colnames(cnd) <- c("col.name", "rel.operator", "val")
return(cnd)
}
#' add a leave-one-out minor allele freq. column with values to genotype table.
#'
#' @param geno data frame of genotypes.
#' @param n.samples total number of samples.
#' @export
AddLooMafCol <- function(geno, n.samples, variant.id.col="Variant ID") {
geno[["loo maf"]] <- 0
var.loo.counts <- table(geno[[variant.id.col]]) - 1
var.loo.counts <- var.loo.counts[var.loo.counts > 0]
if (length(var.loo.counts) > 0) {
unique.counts <- sort(as.numeric(unique(var.loo.counts)))
max.loo.count <- max(var.loo.counts)
for (i in unique.counts) {
vars <- names(var.loo.counts[var.loo.counts == i])
loo.maf.i <- as.numeric(i / n.samples)
geno[ geno[[variant.id.col]] %in% vars,
"loo maf"] <- loo.maf.i
}
}
return(geno)
}
#' subset on table based on rows in condition data frame.
#'
#' @param tbl data frame of values with column names.
#' @param cnd data frame of conditions to subset tbl on.
#' @export
TblCndSubset <- function(tbl, cnd) {
for (i in 1:nrow(cnd)) {
col.name <- cnd[i,"col.name"]
rel.operator <- cnd[i,"rel.operator"]
val <- cnd[i,"val"]
tbl <- ApplyThreshold(tbl, col.name, rel.operator, val)
}
return(tbl)
}
#' split string to vector
#'
#' @param str input string.
#' @param type type of variable that each output vector element should be.
#' @param delim delimiter character in input string.
#' @export
StringToSet <- function(str,
type="character",
delim=",") {
if (is.null(str)==F) {
str.set <- strsplit(str, delim)[[1]]
} else {
str.set <- c()
}
if (type!="character") {
for (i in 1:length(str.set)) {
str.set[i] <- as(str.set[i], type)
}
}
return(str.set)
}
#' subset on input table on a user defined condition.
#'
#' @param tbl input data frame.
#' @param col.name column name to subset input data frame on.
#' @param cmp string representing which operator to apply on df column.
#' @param val value linked to input tbl by cmp operator.
#' @export
ApplyThreshold <- function(tbl, col.name, cmp, val,
na.numeric.convert=0) {
if (cmp == "in") {
valset <- StringToSet(val)
tbl <- tbl[ which(tbl[[col.name]] %in% valset), , drop=F]
} else if (cmp == "notin") {
valset <- StringToSet(val)
tbl <- tbl[ which((tbl[[col.name]] %in% valset)==F), , drop=F]
} else if (cmp == "grep") {
tbl <- tbl[ which(grepl(val, tbl[[col.name]]) == T), , drop=F]
} else if (cmp == "grepv") {
tbl <- tbl[ which(grepl(val, tbl[[col.name]]) == F), , drop=F]
} else if (cmp == "eq" | cmp == "==") {
tbl <- tbl[ which(tbl[[col.name]] == val), , drop =F]
} else if (cmp == "noteq" | cmp == "!=") {
tbl <- tbl[ which(tbl[[col.name]] != val), , drop =F]
} else if (cmp == "gt" | cmp == ">") {
val <- as.double(val)
valset <- as.double( tbl[[col.name]] )
valset <- ifelse(is.na(valset), na.numeric.convert, valset)
tbl <- tbl[ which(valset > val), , drop =F]
} else if (cmp == "gte" | cmp == ">=") {
val <- as.double(val)
valset <- as.double( tbl[[col.name]] )
valset <- ifelse(is.na(valset), na.numeric.convert, valset)
tbl <- tbl[ which(valset >= val), , drop =F]
} else if (cmp == "lt" | cmp == "<") {
val <- as.double(val)
valset <- as.double( tbl[[col.name]] )
valset <- ifelse(is.na(valset), na.numeric.convert, valset)
tbl <- tbl[ which(valset < val), , drop =F]
} else if (cmp == "lte" | cmp == "<=") {
val <- as.double(val)
valset <- as.double( tbl[[col.name]] )
valset <- ifelse(is.na(valset), na.numeric.convert, valset)
tbl <- tbl[ which(valset <= val), , drop =F]
}
return(tbl)
}
#' read a large table (can be gzipped and/or GB in size) into a data frame.
#' @param filename name of table file.
#' @param ... args to be passed to data.table fread function
#' @importFrom data.table fread
#' @export
ReadLargeTable <- function(filename, ...) {
if (grepl(".gz$",filename)==T) {
filename.full <- paste("gunzip -c",
filename,
sep=" ")
} else {
filename.full <- filename
}
mat <- fread(filename.full,
data.table=F,
...)
return(mat)
}
#' write large table to output file, with support for gzip.
#'
#' @param tbl data frame to be written to file.
#' @param filename name of output file to write to, ".gz" at end for gzip.
#' @param ... parameters to be passed on to write.table function.
#' @export
WriteLargeTable <- function(tbl, filename, ...) {
if (grepl(".gz$", filename) == T) {
filename.full <- gzfile(filename)
} else {
filename.full <- filename
}
write.table(tbl, file=filename.full,
...)
}
#' perform a case/control burden test on aggregated rare variant sequence data.
#'
#' @param mat collapsing matrix data structure.
#' @param genes list of genes to include in burden test.
#' @param cases vector of case names.
#' @param ctrls vector of ctrl names.
#' @param collapse.thresh integer threshold for inclusion of sample genotype,
#' if >0 then collapsing analyses are used, else
#' poisson tests used (case vs. ctrl variant rate).
#' @param ... additional args for fisher.test or poisson.test
#' @export
BurdenTest <- function(mat, genes,
cases, ctrls,
collapse.thresh=0,
...) {
n.cases <- length(cases)
n.ctrls <- length(ctrls)
cases <- intersect(cases, colnames(mat))
ctrls <- intersect(ctrls, colnames(mat))
genes <- intersect(genes, rownames(mat))
res <- list()
mat.g.counts <- colSums(mat[genes, , drop=F])
if (collapse.thresh == 0) {
mat.g.counts.cases <- sum(mat.g.counts[cases])
mat.g.counts.ctrls <- sum(mat.g.counts[ctrls])
var.rate.cases <- mat.g.counts.cases / n.cases
var.rate.ctrls <- mat.g.counts.ctrls / n.ctrls
res.full <- poisson.test(mat.g.counts.cases, n.cases,
var.rate.ctrls, ...)
res$n.var.case <- mat.g.counts.cases
res$n.case <- n.cases
res$var.rate.case <- var.rate.cases
res$n.var.ctrl <- mat.g.counts.ctrls
res$n.ctrl <- n.ctrls
res$var.rate.ctrl <- var.rate.ctrls
res$conf.int.lower <- res.full$conf.int[1]
res$conf.int.upper <- res.full$conf.int[2]
res$p.value <- res.full$p.value
} else {
mat.g.counts <- ifelse(mat.g.counts >= collapse.thresh, 1, 0)
mat.g.counts.cases <- sum(mat.g.counts[cases])
mat.g.counts.ctrls <- sum(mat.g.counts[ctrls])
n.case.qual <- mat.g.counts.cases
n.case.nonqual <- n.cases - mat.g.counts.cases
n.ctrl.qual <- mat.g.counts.ctrls
n.ctrl.nonqual <- n.ctrls - mat.g.counts.ctrls
FET.tbl <- data.frame(case=c(n.case.qual, n.case.nonqual),
ctrl=c(n.ctrl.qual, n.ctrl.nonqual))
res.full <- fisher.test(FET.tbl, ...)
res$n.case.qual <- n.case.qual
res$n.case.nonqual <- n.case.nonqual
res$n.ctrl.qual <- n.ctrl.qual
res$n.ctrl.nonqual <- n.ctrl.nonqual
res$odds.ratio <- res.full$estimate
res$conf.int.lower <- res.full$conf.int[1]
res$conf.int.upper <- res.full$conf.int[2]
res$p.value <- res.full$p.value
}
res$method <- ProcessString(res.full$method)
res$alternative <- ProcessString(res.full$alternative)
return(res)
}
#' convert process input string into a form more agreeable with R.
#' @param str input string.
#' @export
ProcessString <- function(str) {
str <- gsub(" ",".",str)
str <- gsub("'","",str)
return(str)
}
#' read genesets file to a list of genesets.
#' @param genesets.file name of .genesets file.
#' @export
ReadGenesetsFile <- function(genesets.file) {
genesets <- list()
fh <- file(genesets.file, open="r")
lines <- readLines(fh)
rows <- c()
for (i in 1:length(lines)){
row.i <- strsplit( lines[i], "\t" )[[1]]
geneset.name <- row.i[1]
geneset <- strsplit(row.i[2], "," )[[1]]
genesets[[geneset.name]] <- geneset
}
close(fh)
return(genesets)
}
#' update input table with values of res.list at row i
#' @param tbl input table.
#' @param row.i row number in table to edit.
#' @param res.list list of results to iterate through and add vals to table.
#' @export
UpdateTable <- function(tbl, row.i, res.list) {
for (col.name.j in colnames(tbl)) {
tbl[row.i, col.name.j] <- res.list[[col.name.j]]
}
return(tbl)
}
#' table sampleped data frame and get list of individual ID -> family ID.
#' @param sampleped data frame of sample data in sampleped format.
#' @export
SamplepedIidFidList <- function(sampleped) {
iid.fid <- list()
for (i in 1:nrow(sampleped)) {
iid.fid[[ sampleped[i, 2] ]] <- sampleped[i,1]
}
return(iid.fid)
}
#' for input variant calls and genotypes, return genotype counts, taking into
#' account the chromosome and sample gender for each variant call. For
#' example, a 'homozygous' on an x chromosome of a male would be an allele
#' count of only 1, rather than 2, whereas a female homozygous call on an x
#' chromosome would be an allele count of 2.Such rules do not apply to
#' autosomes, where each sample should have two copies of each.
#' @param sample.ID vector of samples IDs
#' @param variant.ID vector of variant IDs
#' @param genotype vector of genotype strings (het / hom)
#' @param sampleped data frame of samples in sampleped format
#' @export
GetAlleleCounts <- function(sample.ID,
variant.ID,
genotype,
sampleped) {
genotype <- ifelse(genotype=="hom",2,1)
calls <- data.frame(sample.ID=sample.ID,
variant.ID=variant.ID,
genotype=genotype,
stringsAsFactors=F)
samplegender <- list()
for (i in 1:nrow(sampleped)) {
sample.ID.i <- sampleped[i,2]
sample.gender.i <- as.numeric(sampleped[i,5])
samplegender[[ sample.ID.i ]] <- sample.gender.i
}
gender <- as.numeric(unlist(samplegender[ calls$sample.ID ]))
calls$gender <- gender
if ( nrow(calls[ (grepl("X-", calls$variant.ID)) &
(calls$gender == 1) &
(calls$genotype == 2), ]) > 0) {
calls[ grepl("X-", calls$variant.ID) &
calls$gender == 1 &
calls$genotype == 2, "genotype"] <- 1
}
return(calls$genotype)
}
LoadExpression<-function(expression.str, is.file=F) {
if (is.file == T) {
expr <- scan(expression.str, what=character(),sep="\n")
expr <- expr[1]
} else {
expr <- expression.str
}
expr <- as.expression(expr)
return(expr)
}
|
a6a9341769eaf9920195a3bfd8c9b00b4d6dea4e
|
4c8bc2a54b8126162adb7ded82fe92c14ef78236
|
/USArrests.R.R
|
7b55d46fbd559cb7de1c9bf63e97ebe1cf4d6b2b
|
[] |
no_license
|
fall2018-wallace/snehab_dataviz
|
f77fdab1836c6fb7bb99d14a5733574ad210a14e
|
c7469d222b55206595bdabc2543a0daf017eb734
|
refs/heads/master
| 2020-04-02T18:32:59.593654
| 2018-10-25T17:23:55
| 2018-10-25T17:23:55
| 154,704,506
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 79
|
r
|
USArrests.R.R
|
arrests<-data.frame(USArrests)
arrests$stateNames<-rownames(arrests)
arrests
|
5d6fcb5ea8191f553995d0a30a0662e2d16d5c16
|
32902f92d3a16b13b1790ffdb8b2d553d08d5a12
|
/Code/LimpiezaTweets.R
|
0b5552e190c046a4bdc6320387163b01ff6386d6
|
[] |
no_license
|
jnm733/Andotter
|
d0b70af2ea40c22a2b339f121ad74b6d5d67495d
|
fa055d93f9b0fe62760ee849ba1d212e5efa1750
|
refs/heads/master
| 2020-12-25T15:08:41.163460
| 2016-09-11T19:49:00
| 2016-09-11T19:49:00
| 66,074,411
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,556
|
r
|
LimpiezaTweets.R
|
library(stringr)
#Función para convertir en minuscula
catch.error = function(x)
{
# let us create a missing value for test purpose
y = NA
# Try to catch that error (NA) we just created
catch_error = tryCatch(tolower(x), error=function(e) e)
# if not an error
if (!inherits(catch_error, "error"))
y = tolower(x)
# check result if error exists, otherwise the function works fine.
return(y)
}
#Función para limpiar el tweet
cleanTweets = function(tweet){
tweet = gsub("(f|ht)(tp)(s?)(://)(.*)[.|/](.*)", " ", tweet)
tweet = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", " ", tweet)
tweet = gsub("#\\w+", " ", tweet)
tweet = gsub("@\\w+", " ", tweet)
tweet = gsub("â", "¿", tweet)
tweet = gsub("á", "a", tweet)
tweet = gsub("é", "e", tweet)
tweet = gsub("ó", "o", tweet)
tweet = gsub("ñ", "ñ", tweet)
tweet = gsub("[[:punct:]]", " ", tweet)
tweet = gsub("ã", "i", tweet)
tweet = gsub("Ã ", "i", tweet)
tweet = gsub("[[:digit:]]", " ", tweet)
tweet = gsub("\n", " ", tweet)
tweet = gsub("[ \t]{2,}", " ", tweet)
tweet = gsub("^\\s+|\\s+$", "", tweet)
tweet = chartr('áéíóú','aeiou', tweet)
tweet = catch.error(tweet)
tweet
}
cleanTweetsAndRemoveNAs = function(Tweets) {
TweetsCleaned = sapply(Tweets, cleanTweets)
# Remove the "NA" tweets from this tweet list
TweetsCleaned = TweetsCleaned[!is.na(TweetsCleaned)]
names(TweetsCleaned) = NULL
# Remove the repetitive tweets from this tweet list
TweetsCleaned = unique(TweetsCleaned)
TweetsCleaned
}
|
5ffc976cc1013de311169f540ce180863e43ee92
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/coala/tests/testthat/test-tools.R
|
254642c65c565ce73d5e45b26b7f0cf0021d620d
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 165
|
r
|
test-tools.R
|
context("Tools")
test_that("it checks for packages", {
skip_on_cran()
expect_true(require_package("coala"))
expect_error(require_package("2l3ihjrpaiwhf"))
})
|
6137ce2ccc4e84a69ccdf2e90f9ed33f7077d12d
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/corpustools/man/tCorpus-cash-set_special.Rd
|
32e6c10b453a5150f873d6f4a7f14314aec9ead8
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,073
|
rd
|
tCorpus-cash-set_special.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documentation_methods.r
\name{tCorpus$set_special}
\alias{tCorpus$set_special}
\alias{set_special}
\title{Designate column as columns with special meaning (token, lemma, POS, relation, parent)}
\arguments{
\item{token}{Name of the column that will be designated as the token, and renamed to 'token'}
\item{lemma}{Name of the column that will be designated as the lemma of the token, and renamed to 'lemma'}
\item{pos}{Name of the column that will be designated as the part-of-speech tag of the token, and renamed to 'POS'}
\item{relation}{Name of the column that will be designated as the dependency relation of the token to its parent, and renamed to 'relation'}
\item{parent}{Name of the column that will be designated as the parent of the token, and renamed to 'parent'}
}
\description{
\strong{Usage:}
}
\details{
## R6 method for class tCorpus. Use as tc$method (where tc is a tCorpus object).
\preformatted{set_special(token=NULL, lemma=NULL, POS=NULL, relation=NULL, parent=NULL)}
}
|
d8853cb09ba59590ec7e28962c1df89bb1397ea6
|
dacc730b25c72f29be816010e7aa67ae36f51f0a
|
/Store_Item_Demand_Forecasting_Challenge/src/models/arima.R
|
6e8c63b8ba6bb9b33aa7f3f57ff7fb4812167fd7
|
[] |
no_license
|
Mattias99/Kaggle
|
7601981db30810401da285374275772d45681708
|
8d1d76a36b0fe6081ccada61ffed09c9f29df354
|
refs/heads/master
| 2020-03-23T03:56:15.257100
| 2019-03-21T09:02:34
| 2019-03-21T09:02:34
| 125,730,361
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,743
|
r
|
arima.R
|
# ARIMA
# Evaluation of data on store = 1, item = 1
# Determine ACF and PACF
# Non stationary
train_one$sales %T>%
acf(main = "Orginal Time-Serie") %>%
pacf(main = "Orginal Time-Serie")
# Transformation with non-season length
train_one$sales %>% diff(lag = 1) %T>%
acf(main = "One-Diff Time-Serie") %>%
pacf(main = "One-Diff Time-Serie")
# Transformation with non-season and season length
train_one$sales %>% diff(lag = 1) %>% diff(lag = 365) %T>%
acf(main = "One-Diff and Seasonal-Diff") %>%
pacf(main = "One-Diff and Seasonal-Diff")
# Line Plot for stationary sales
train_one$sales %>%
diff(lag = 1) %>%
plot(main = "Orginal Time-Serie", type = "l")
# Line Plot for season
train_one$sales %>%
diff(lag = 1) %>%
diff(lag = 365) %>%
plot(main = "One-Diff and Sesonal-Diff", type = "l")
# PACF suggests a AR(5) with seasonal spikes at lag 6, 13, 20, 27
# ACF suggests MA(1) with seasonal spikes at lag 7, 14, 21, 28
# Comment: Seasonal pattern looks like a weekly pattern.
# i.e. Higher sales at weekends
# ARIMA (p, d , q) (P, D, Q)s
# ARIMA (5, 1, 1) (3, 0, 0)S
one_arima <- arima(x = train_one$sales,
order = c(5, 1, 0),
seasonal = list(order = c(3, 0, 0),
period = 7), xreg = train_xreg)
# Diagnostics, Residual Plot
plot(residuals(one_arima), type = "l",
main = "Residual Plot. Store = 1, Item = 1")
# Prediction
one_pred <- predict(one_arima, n.ahead = 90,newxreg = test_xreg)
# Evaluation
mape(actual = test_one$sales,
predicted = one_pred$pred)
plot(x = 1:90, y = test_one$sales, col = "green",
type = "l",
main = "ARIMA\nStore = 1, Item = 1\nBlack = Prediction")
lines(x = 1:90, y = one_pred$pred)
|
309335f2f311be75cf9bb51578f80db48237504d
|
13a5d2deae8247072c637d3437be888027c66c87
|
/11-15/opgave11_4.R
|
cbb84596e17f584b7313fcf92aaadfeb05162149
|
[] |
no_license
|
wildered/R_code
|
26a1f74f90eac709cc474dd4022cb7316755b49c
|
1bc440fefe7e2a181ba0e8da315aca3650e8ee91
|
refs/heads/master
| 2021-01-12T14:40:14.790768
| 2016-10-26T20:04:54
| 2016-10-26T20:04:54
| 72,039,912
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 381
|
r
|
opgave11_4.R
|
n <- 20
x <- rgamma(n, 3, 1)
hist(x, probability = TRUE)
count <- 10
for (v in c(1/2, 3/4, 1, 5/4, 6/4, 2)){
d <- density(x, adjust=v)
lines(d, col=count)
count <- count + 7
}
n <- 2000
x <- rgamma(n, 3, 1)
hist(x, probability = TRUE)
count <- 10
for (v in c( 1/4, 1/2, 3/4, 1, 5/4, 6/4, 2)){
d <- density(x, adjust=v)
lines(d, col=count)
count <- count + 7
}
|
14ce9d0af3f7931dc0f1f6b730ffb3d7ee875afa
|
9ab05b7f8d8697fe99e6d4e7917fcb2b3234269c
|
/man/SiteRisksetsByStrata.Rd
|
64132929c87c7432115e3ad0bce39b3c6cc9b65d
|
[] |
no_license
|
kaz-yos/distributed
|
87ba8da54be2379c06fe244f4f570db4555770d7
|
46e53316e7ed20bcb8617e238b1b776fbeb364e2
|
refs/heads/master
| 2021-05-05T17:31:45.076267
| 2018-06-27T14:37:17
| 2018-06-27T14:37:17
| 103,559,562
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,184
|
rd
|
SiteRisksetsByStrata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/02.ParepareDataWithinSites.R
\name{SiteRisksetsByStrata}
\alias{SiteRisksetsByStrata}
\title{Construct risk set data stratifying on a variable}
\usage{
SiteRisksetsByStrata(time = NULL, event, A, W = NULL, strata = NULL)
}
\arguments{
\item{time}{vector of the observed time variable. If omitted, taken to be all 0. Omit when \code{event} is a binary outcome variable without an accompanying time variable.}
\item{event}{vector of the event status binary variable (must be 0, 1). A binary outcome variable can also be used if the outcome of interest is such a variable.}
\item{A}{vector of the exposure status variable (must be 0, 1).}
\item{W}{vector of the weights. Omit if not weighting.}
\item{strata}{vector of stratifying variable}
}
\value{
reduced stratified risk set data frame having one row for each unique evaluation time for each strata.
}
\description{
Gives a stratified risk set data. See \code{\link{SiteRisksetsHelper}} for details
about the risk set data within each stratum. If not \code{strata} vector is supplied,
one stratum with value NA is assumed.
}
\author{
Kazuki Yoshida
}
|
13effb125e1ebd1851f8bd7b9787ca87c425c049
|
5aa7bc82cb400833a0b103d1d447ca9ac927aa39
|
/additional_data/general_use/mmap/man/make.fixedwidth.Rd
|
9c01540c50ce64ad81243d65c8acc64934dcbcef
|
[] |
no_license
|
gearslaboratory/gears-singularity
|
f4c6cfa47c043a387316fd4463a1e434d1cfad4c
|
f77ca9a87d3e8bf647deb353dfdfc3aec525995e
|
refs/heads/master
| 2021-05-13T14:23:30.228684
| 2021-02-18T18:29:39
| 2021-02-18T18:29:39
| 116,738,122
| 4
| 6
| null | 2018-07-11T13:04:41
| 2018-01-08T22:57:33
|
C
|
UTF-8
|
R
| false
| false
| 1,534
|
rd
|
make.fixedwidth.Rd
|
\name{make.fixedwidth}
\alias{make.fixedwidth}
\title{
Convert Character Vectors From Variable To Constant Width
}
\description{
Utility function to convert a vector of character strings
to one where each element has exactly \sQuote{width}-bytes.
}
\usage{
make.fixedwidth(x, width = NA, justify = c("left", "right"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
A character vector.
}
\item{width}{
Maximum width of each element. width=NA (default)
will expand each element to the width required
to contain the largest element of x without loss
of information.
}
\item{justify}{
How should the results be padded? \sQuote{left} will
add spacing to the right of shorter elements in the vector
(left-justified), \sQuote{right} will do the opposite.
}
}
\details{
The current implementation of mmap only handles fixed-width
strings (nul-terminated). To simplify conversion of (potentially)
variable-width strings in a character vector, all
elements will be padded to the length of the longest string
in the vector or set to length \code{width} if specified.
All new elements will be left or right justified based on the
\code{justify} argument.
}
\value{
A character vector where each element is of fixed-width.
}
\author{
Jeffrey A. Ryan
}
\note{
Future implementions will possibly support variable-width character
vectors.
}
\examples{
month.name
make.fixedwidth(month.name)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ utilities }
|
e9a32fb66cb28d4b85bfb188f5044156c3758f00
|
ab7d15d06ed92cd51cc383dc9e98ae2a8fa41eaa
|
/man/trav_reverse_edge.Rd
|
afbceba98bd29b5bd5d90631f714f698b90751b0
|
[
"MIT"
] |
permissive
|
rich-iannone/DiagrammeR
|
14c46eb994eb8de90c50166a5d2d7e0668d3f7c5
|
218705d52d445c5d158a04abf8107b425ea40ce1
|
refs/heads/main
| 2023-08-18T10:32:30.784039
| 2023-05-19T16:33:47
| 2023-05-19T16:33:47
| 28,556,914
| 1,750
| 293
|
NOASSERTION
| 2023-07-10T20:46:28
| 2014-12-28T08:01:15
|
R
|
UTF-8
|
R
| false
| true
| 2,632
|
rd
|
trav_reverse_edge.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trav_reverse_edge.R
\name{trav_reverse_edge}
\alias{trav_reverse_edge}
\title{Traverse to any reverse edges}
\usage{
trav_reverse_edge(graph, add_to_selection = FALSE)
}
\arguments{
\item{graph}{A graph object of class \code{dgr_graph}.}
\item{add_to_selection}{An option to either add the reverse edges to the
active selection of edges (\code{TRUE}) or switch the active selection entirely
to those reverse edges (\code{FALSE}, the default case).}
}
\value{
A graph object of class \code{dgr_graph}.
}
\description{
From an active selection of edges in a graph object of class \code{dgr_graph},
traverse to any available reverse edges between the nodes common to the
selected edges. For instance, if an active selection has the edge \code{1->2} but
there is also an (not selected) edge \code{2->1}, then this function can either
switch to the selection of \code{2->1}, or, incorporate both those edges into the
active selection of edges.
This traversal function makes use of an active selection of edges. After the
traversal, depending on the traversal conditions, there will either be a
selection of edges or no selection at all.
Selections of edges can be performed using the following selection
(\verb{select_*()}) functions: \code{\link[=select_edges]{select_edges()}}, \code{\link[=select_last_edges_created]{select_last_edges_created()}},
\code{\link[=select_edges_by_edge_id]{select_edges_by_edge_id()}}, or \code{\link[=select_edges_by_node_id]{select_edges_by_node_id()}}.
Selections of edges can also be performed using the following traversal
(\verb{trav_*()}) functions: \code{\link[=trav_out_edge]{trav_out_edge()}}, \code{\link[=trav_in_edge]{trav_in_edge()}},
\code{\link[=trav_both_edge]{trav_both_edge()}}, or \code{\link[=trav_reverse_edge]{trav_reverse_edge()}}.
}
\examples{
# Create a node data frame (ndf)
ndf <-
create_node_df(
n = 4,
type = "basic",
label = TRUE)
# Create an edge data frame (edf)
edf <-
create_edge_df(
from = c(1, 4, 2, 3, 3),
to = c(4, 1, 3, 2, 1))
# Create a graph with the
# ndf and edf
graph <-
create_graph(
nodes_df = ndf,
edges_df = edf)
# Explicitly select the edges
# `1`->`4` and `2`->`3`
graph <-
graph \%>\%
select_edges(
from = 1,
to = 4) \%>\%
select_edges(
from = 2,
to = 3)
# Get the inital edge selection
graph \%>\% get_selection()
# Traverse to the reverse edges
# (edges `2`: `4`->`1` and
# `4`:`3`->`2`)
graph <-
graph \%>\%
trav_reverse_edge()
# Get the current selection of edges
graph \%>\% get_selection()
}
|
7eed2431a5dc7b7cd128b24a7591a5428a955176
|
8369681fc1c33fab4b7aca6e4514d4782271adbb
|
/R/HW5.R
|
0174b37ffd94a1f6a79b52af73ebe5420c9a597d
|
[] |
no_license
|
mshagena89/IAA-Code
|
e90772197abd4f24c78bf3e119158c6069abef7b
|
7c64686fe37f3c60c092bfc22a8f4b2bc1d280db
|
refs/heads/master
| 2021-01-01T15:30:31.566197
| 2014-12-09T22:47:23
| 2014-12-09T22:47:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,153
|
r
|
HW5.R
|
#Solutions for Data Mining HW5
#variance-covariance Matrix
sigma = matrix(c(100, -40, -40, 25), nrow=2, ncol=2)
#calculate sigma inverse, used later in mahalanobis function
sigmaInv = solve(sigma)
#calculating Euclidean Distance between individual and Means
#distance between individual and non-defaulter
nondef <- matrix(c(5,5,8,8), nrow=2,ncol=2, byrow=TRUE)
dist(nondef, method="euclidean")
#non defaulter distance = 4.24
#distance between individual and defaulter
defaulter <- matrix(c(5,5,15,6), nrow=2,ncol=2, byrow=TRUE)
dist(defaulter, method="euclidean")
#defaulter distance = 10.04988
#looks like a non-defaulter, based on euclidean distance!
#mahalanobis distance calculations
individual <- matrix(c(8,8), nrow=1)
defaulterMean <- matrix(c(15,6), nrow=1)
nonDefMean <- matrix(c(5,5), nrow=1)
#calculate mahalanobis for Individual and Defaulter Mean
mahalanobis(individual, defaulterMean, sigmaInv, inverted=TRUE)
#.0561111
#calculate mahalanobis for Individual and non-Defaulter Mean
mahalanobis(individual, nonDefMean, sigmaInv, inverted=TRUE)
#2.05
#Based on Mahalanobis Distance, Individual is more likely to be a defaulter!
|
74f0ad15de12994c59ced1630d80476c71a06288
|
62e8659296b80ffad17a90b30bc9aea688df485f
|
/experiments/test_exponential.r
|
00fb4933e0bdadf0b0d59e6df86eeb886551ccbe
|
[] |
no_license
|
matheushjs/dealing-with-popmin
|
b63bde3b63f895dad2300fb0bd2d9488c000902a
|
fb375ed9777128e35dfc4cb8b95840a721108380
|
refs/heads/master
| 2023-03-01T21:03:50.368606
| 2021-02-06T19:50:33
| 2021-02-06T19:50:33
| 259,324,432
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,828
|
r
|
test_exponential.r
|
require(colorspace);
ALL.N = c(10, 25, 50, 75, 100, 200);
#ALL.N = 100*c(100, 200);
ITERATIONS = 200;
RATE = 1/3
plotMeans = NULL;
plotConf = NULL;
df = NULL;
for(N in ALL.N){
for(idx in 1:ITERATIONS){
min.median = qexp(1 - (1 - 0.5)**(1/N), rate=RATE);
# 1% and 5% quantiles of the minimum distribution
quantile05 = qexp(1 - (1 - 0.05)**(1/N), rate=RATE);
quantile01 = qexp(1 - (1 - 0.05)**(1/N), rate=RATE);
data = list();
data$samples = rexp(rate=RATE, n=N);
data$min = min(data$samples);
data$stddev = sd(data$samples);
data$mean = mean(data$samples);
estim = list();
estim[[1]] = data$min;
estim[[2]] = data$min - data$min * (data$stddev / data$mean) / log10(N);
estim[[3]] = data$min - data$min * (data$stddev) / N;
estim[[4]] = data$min - data$min * (data$stddev) * sqrt(log(log(N)) / (2*N));
estim[[5]] = data$min - data$min * (data$stddev) * sqrt(-log(0.05/2) / (2*N));
dist01 = rep(0, 5);
for(i in 1:5){
dist01[i] = (estim[[i]] - quantile01);
}
#print(dist01);
lik = rep(0, 5);
for(i in 1:5){
samples = data$samples - estim[[i]];
lik.f = function(param){
-sum(dexp(samples, rate=param[1], log=TRUE));
}
result = optim(c(1/mean(samples)), lik.f, method="L-BFGS", upper=c(Inf), lower=c(0));
lik[[i]] = -result$value;
}
likBase = sum(dexp(samples, rate=RATE, log=TRUE));
#print(lik);
df = rbind(df, c(N, dist01, lik, likBase));
}
slice = df[df[,1] == N,];
means = colMeans(slice);
slice[,7:11] = slice[,7:11] - slice[,12];
stds = apply(slice, 2, function(col) sd(col));
#delta = stds;
#delta = qt(0.05, df=ITERATIONS) * stds / ITERATIONS;
delta = qnorm(0.01) * stds / ITERATIONS;
#delta = apply(slice[,-1], 2, function(col){
# b = boot(col, function(data, idx) mean(data[idx]), R=100, sim="balanced");
# ci = boot.ci(b, type="norm");
# print(b);
# print(ci);
# 0.05;
#});
plotMeans = rbind(plotMeans, means[-1]);
plotConf = rbind(plotConf, delta[-1]);
}
colnames(df) = c("N", paste("dist", 1:5, sep=""), paste("lik", 1:5, sep=""), "likBase");
#print(df);
graphics.off();
dev.new(width=0.8*8, height=0.8*5.4);
#print(plotMeans);
base = plotMeans[,11];
par(lwd=3, mar=c(3, 3, 0.2, 0.2));
palette(qualitative_hcl(palette="Dark 3", n=5));
plot(ALL.N, plotMeans[,6] - base, type="l", ylim=c(0.4, 1.7), col=1, log="x", xlab="", ylab="");
lines(ALL.N, plotMeans[,7] - base, col=2, lty=2);
lines(ALL.N, plotMeans[,8] - base, col=3, lty=3);
lines(ALL.N, plotMeans[,9] - base, col=4, lty=4);
lines(ALL.N, plotMeans[,10] - base, col=5, lty=5);
for(i in 6:10){
arrows(x0=ALL.N, y0=plotMeans[,i] - plotConf[,i] - base, y1=plotMeans[,i] + plotConf[,i] - base, angle=90, code=3, length=0, col="#00000099");
}
title(xlab="sample size", line=2);
title(ylab="likelihood", line=2);
legend("topright", c(expression(textstyle(min)), expression(c1), expression(c2), expression(c3), expression(c4)), col=1:5, lwd=3, lty=1:5, seg.len=4, bg="#FFFFFFBB");
#savePlot("test-exponential-likelihood.png");
dev.new(width=0.8*8, height=0.8*5);
#print(plotMeans);
#base = plotMeans[,11];
par(lwd=3, mar=c(3, 3, 0.2, 0.2));
palette(qualitative_hcl(palette="Dark 3", n=5));
plot(ALL.N, plotMeans[,1], type="l", ylim=c(-0.1, 0.35), col=1, log="x", xlab="", ylab="");
lines(ALL.N, plotMeans[,2], col=2, lty=2);
lines(ALL.N, plotMeans[,3], col=3, lty=3);
lines(ALL.N, plotMeans[,4], col=4, lty=4);
lines(ALL.N, plotMeans[,5], col=5, lty=5);
for(i in 1:5){
arrows(x0=ALL.N, y0=plotMeans[,i] - plotConf[,i], y1=plotMeans[,i] + plotConf[,i], angle=90, code=3, length=0);
}
title(xlab="sample size", line=2);
title(ylab="estimate minus the 0.05 quantile", line=2);
legend("topright", c(expression(textstyle(min)), expression(c1), expression(c2), expression(c3), expression(c4)), col=1:5, lwd=3, lty=1:5, seg.len=4, bg="#FFFFFFBB");
|
a0d80c61fe5ef8bddb4698116fa73e3d8490ed4b
|
56e22dd051b4ecc6bf96a7fa93d6dcecc74eebcb
|
/R/shortcuts.R
|
301425dc4e383447108de0a8029612f500de4ffb
|
[
"MIT"
] |
permissive
|
yjunechoe/hrbragg
|
44337b14d1a23b3d3757ac5dae3c2899913ff01b
|
8f8b1f098d02329632fff26a89083237fa8cc75f
|
refs/heads/master
| 2023-06-14T13:32:21.889462
| 2021-07-10T12:36:19
| 2021-07-10T12:36:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 803
|
r
|
shortcuts.R
|
#' Shortcut for `element_blank`
#' @return An S3 object of class element, rel, or margin.
#' @export
elb <- ggplot2::element_blank
#' Shortcut for `element_line`
#'
#' @param colour,size,linetype,lineend,color,arrow,inherit.blank
#' See [ggplot2::element_line()]
#' @return An S3 object of class element, rel, or margin.
#' @export
ell <- ggplot2::element_line
#' Shortcut for `element_text`
#' @param family,face,colour,size,hjust,vjust,angle,lineheight,color,margin,debug,inherit.blank
#' See [ggplot2::element_text()]
#' @export
elt <- ggplot2::element_text
#' Shortcut for `element_rect`
#' @param fill,colour,size,linetype,color,inherit.blank
#' See [ggplot2::element_rect()]
#' @return An S3 object of class element, rel, or margin.
#' @export
elr <- ggplot2::element_rect
|
d633b34b0ccca0c469602d0468e0d5359f9f2d77
|
7f7c55fce129ce299358e22b4f82757d5bfb111e
|
/R/ApiKey.R
|
836fb6032cf31b972eabcad63fea440a379f03d2
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
RickPack/urlshorteneR
|
32c43e797b7720d647947b23277d230c0de8b5a0
|
040c218e3670ed21ee44363f5edd580271759298
|
refs/heads/master
| 2020-04-28T00:47:33.950190
| 2019-03-10T17:50:24
| 2019-03-10T17:50:24
| 169,720,605
| 0
| 0
|
Apache-2.0
| 2019-02-21T10:53:57
| 2019-02-08T10:48:18
|
R
|
UTF-8
|
R
| false
| false
| 3,048
|
r
|
ApiKey.R
|
.state <- new.env(parent = emptyenv())
globalVariables(c("bitly_token"))
# Bitly_api_version <- "v4"
# Isgd_api_version <- "v2019"
#' @title Assign API tokens using OAuth2.0
#'
#' @description You should register an application in order to get Client ID and Client Secret code.
#' For Bit.ly, go to \url{https://bitly.com/a/oauth_apps}. Click \code{REGISTERED OAUTH APPLICATIONS},
#' then \code{GET REGISTRATION CODE}. Open the email you will receive and click \code{COMPLETE REGISTRATION}.
#' Make up an \code{APPLICATION NAME:} that is unique. Unless you know to do otherwise,
#' type "http://localhost:1410" in both fields \code{APPLICATION LINK:} and \code{REDIRECT URIs:}.
#' Type what you like in \code{APPLICATION DESCRIPTION:}.
#'
#' @param key - Client ID
#' @param secret - Client Secret
#'
#' @seealso See \url{http://dev.bitly.com/rate_limiting.html}
#' @seealso See \url{http://dev.bitly.com/authentication.html}
#'
#' @examples
#' \dontrun{
#' bitly_token <-
#' bitly_auth(key = "be03aead58f23bc1aee6e1d7b7a1d99d62f0ede8",
#' secret = "e12dfc2482c76512b9a497e965abf4e082d1ffeb")
#' }
#'
#' @import httr
#' @export
bitly_auth <- function(key = "", secret = "") {
bitly_token <- httr::oauth2.0_token(httr::oauth_endpoint(authorize = "https://bitly.com/oauth/authorize",
access = "https://api-ssl.bitly.com/oauth/access_token"),
httr::oauth_app("bitly", key = key, secret = secret),
cache = TRUE)
.state$token <- bitly_token
return(bitly_token)
}
#' @title Generalized function for executing GET/POST requests
#'
#' @param url - which is used for the request
#' @param authcode - calls the rbitlyApi \code{\link{rbitlyApi}}
#' @param queryParameters - parameters that are used for building a URL
#' @param showURL - for debugging purposes only: it shows what URL has been called
#'
#' @import httr
#' @import jsonlite
#'
#' @noRd
#' @keywords internal
doRequest <- function(verb, url, queryParameters = NULL, showURL = NULL) {
switch(verb,
"GET" = {
return_request <- httr::GET(url, query = queryParameters, httr::config(token = bitly_token))
},
"POST" = {
return_request <- httr::POST(url, body = queryParameters, encode = "json",
httr::content_type_json(), httr::config(token = bitly_token))
}
)
if (http_error(return_request) == FALSE) {
text_response <- content(return_request, as = "text")
json_response <- fromJSON(text_response)
if (is.null(json_response$status_code) == FALSE && json_response$status_code >= 400) {
message(sprintf("Code: %s - %s", json_response$status_code, json_response$status_txt))
}
if (identical(showURL, TRUE)) {
cat("The requested URL has been this: ", return_request$request$url, "\n")
}
} else {
stop_for_status(return_request)
}
return(json_response)
}
|
c87e16368607911cdac0295054860071af9e1c9f
|
0284cda1023b82fcb23f46373fe8d406273494dd
|
/man/emHMM.Rd
|
d6b6614f7f4786ee467e656a60ebe835c1d7044a
|
[] |
no_license
|
flyingxiang/CCRWvsLW
|
b206569dd094a795a391c1bda8f8e821bf221f5b
|
fdccfa229695f7d92ffd6a709cb524a43d25e013
|
refs/heads/master
| 2021-01-22T17:39:56.325678
| 2016-06-10T20:36:30
| 2016-06-10T20:36:30
| 65,390,605
| 1
| 0
| null | 2016-08-10T14:45:43
| 2016-08-10T14:45:42
| null |
UTF-8
|
R
| false
| false
| 2,414
|
rd
|
emHMM.Rd
|
\name{emHMM}
\alias{emHMM}
\alias{EMHMM}
\title{EM-algorithm to fit a hidden Markov model representing the CCRW
}
\description{
emHMM finds the maximum likelihood estimate for the parameters of the CCRW by fitting the hidden Markov model through an Expectation Maximization (EM) algorithm.
}
\usage{
emHMM(SL, TA, missL, SLmin, lambda, gamm, delta=c(0.5,0.5),
kapp, notMisLoc, maxiter=10000, tol=1e-5)
}
\arguments{
\item{SL}{numeric vector containing the step lengths}
\item{TA}{numeric vector containing the turning angles}
\item{missL}{integer vector containing the number of time steps between two steps. If no missing location it will be 1.}
\item{SLmin}{one numeric value representing the minimum step length}
\item{lambda}{numeric vector of length 2 containing the starting value for the lambdas of the two behaviors}
\item{gamm}{2x2 matrix containing the starting value for the transition probability matrix}
\item{delta}{numeric vector value for the probability of starting in each of the two behaviors, default value c(0.5,0.5), which means that you have an equal chance of starting in each behavior}
\item{kapp}{one numeric value representing the starting value for the kappa of the von Mises distribution describing the extensive search behavior}
\item{notMisLoc}{integer vector containing the index of the locations that are not missing}
\item{maxiter}{one integer value representing the maximum number of iterations the EM algorithm will go through. Default = 10000.}
\item{tol}{double: value that indicates the maximum allowed difference between the parameters.}
}
\details{Will return the parameter estimates and the minimum negative log likelihood.}
\references{
Please refer to Auger-Methe, M., A.E. Derocher, M.J. Plank, E.A. Codling, M.A. Lewis (2015-In Press) Differentiating the Levy walk from a composite correlated random walk. Methods in Ecology and Evolution. Preprint available at \url{http://arxiv.org/abs/1406.4355}
For more information on the EM-algorithm please refer to Zucchini W. and I.L. MacDonald (2009) Hidden Markov Models for Time Series: An Introduction Using R. Chapman and Hall/CRC}
\examples{
simPath <- simmCCRW(500,0.9,0.9,0.1,0.01,5,1)
formPath <- movFormat(simPath)
emHMM(formPath$SL,formPath$TA, formPath$missL, formPath$SLmin,
lambda=c(0.1,0.1), gamm=matrix(c(0.8,0.2,0.2,0.8),nrow=2),
kapp=10,notMisLoc=formPath$notMisLoc)
}
|
2701f222199004ea1374338ddae0a4d0d31fb0a8
|
bf8458974cd7c5daa0882bba005d37b795aadeff
|
/man/independencetests.Rd
|
3d3290c6c1bfede7a8115b8786b0072a12c09eed
|
[] |
no_license
|
cran/SpatialNP
|
67e43d86a7e00a1ca2f218ae09e57346329183a0
|
57669fbaf6c594659c25439fe7c7b530e0a2e96e
|
refs/heads/master
| 2021-12-14T12:48:51.284359
| 2021-12-08T11:50:05
| 2021-12-08T11:50:05
| 17,693,745
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,953
|
rd
|
independencetests.Rd
|
\name{Independence tests}
\alias{sr.indep.test}
\title{Multivariate test of independence based on spatial signs or
ranks}
\description{ Test of independence between two sets of
variables. Inference is based on the spatial signs of the
observations, symmetrized signs of the observations or spatial signed
ranks of the observations. }
\usage{ sr.indep.test(X, Y = NULL, g = NULL, score = c("sign",
"symmsign", "rank"), regexp = FALSE, cond = FALSE, cond.n = 1000,
na.action = na.fail) }
\arguments{
\item{X}{a matrix or a data frame}
\item{Y}{an optional matrix or a data frame}
\item{g}{a factor giving the two sets of variables, or numeric vector or vector of column names giving the first set of variables. See details}
\item{score}{a character string indicating which transformation of the observations should be used}
\item{regexp}{logical. Is \code{g} a regular expression?}
\item{cond}{logical. Should the conditionally distribution free test be used?}
\item{cond.n}{Number of permutations to use in the conditionally distribution free test}
\item{na.action}{a function which indicates what should happen when the data
contain 'NA's. Default is to fail.}
}
\details{\code{X} should contain the first set of variables and
\code{Y} the second with matching rows. Alternatively, \code{X} should
contain both sets and \code{g} should be a factor of length equal to
number of columns of \code{X}, or, \code{g} should be a numeric or
character vector naming the variables in the first set. If \code{g} is
a character vector it is assumed to name all wanted columns exactly,
unless \code{regexp} is \code{TRUE}.}
\value{
A list with class 'htest' containing the following components:
\item{statistic }{the value of the statistic}
\item{parameter}{the degrees of freedom for the statistic or the number of replications if conditionally distribution free p-value was used}
\item{p.value}{the p-value for the test}
\item{null.value}{the specified hypothesized value of the measure of dependence (always 0)}
\item{alternative}{a character string with the value 'two.sided'.}
\item{method}{a character string indicating what type of test was performed}
\item{data.name}{a character string giving the name of the data (and grouping vector)}
}
\author{Seija Sirkia, \email{seija.sirkia@iki.fi}}
\references{\cite{Taskinen, S., Oja, H., Randles R. (2004) Multivariate Nonparametric Tests of Independence. \emph{JASA}, 100, 916-925}}
\seealso{\link[=spatial.rank]{Spatial signs and ranks}}
\examples{
A<-matrix(c(1,2,-3,4,3,-2,-1,0,4),ncol=3)
X<-matrix(rnorm(3000),ncol=3)\%*\%t(A)
Y<-cbind(X+runif(3000,-1,1),runif(1000))
sr.indep.test(X,Y)
#alternative calls:
Z<-cbind(X,Y)
colnames(Z)<-c("a1","a2","a3","b1","b2","b3","b4")
g<-factor(c(rep(1,3),rep(2,4)))
sr.indep.test(Z,g=g)
sr.indep.test(Z,g=c("b"),regexp=TRUE)
sr.indep.test(Z,g=1:3)
}
\keyword{multivariate}
\keyword{nonparametric}
\keyword{htest}
|
071894307309259fd95c9ff0698192c21d4d1a8d
|
894024e86cc9f5a0b95df606f80547fc78a59d1a
|
/R/gpuApply_Funcs.R
|
06cce59d332cd8fa35bef12acddfcab3cdc86b0b
|
[] |
no_license
|
Jiefei-Wang/gpuMagic
|
08047002c398eb303deeddd702139d67529ce555
|
0d6f49dccff7b4826afc685aff1e25ec2032e8d4
|
refs/heads/master
| 2022-05-10T00:20:41.729900
| 2022-03-15T00:18:21
| 2022-03-15T00:18:21
| 151,973,314
| 11
| 2
| null | 2022-03-15T00:18:22
| 2018-10-07T18:40:04
|
R
|
UTF-8
|
R
| false
| false
| 12,017
|
r
|
gpuApply_Funcs.R
|
saveGPUcode <- function(GPUcode) {
GPUcode_hash = GPUcode
GPUcode_hash$parms = NULL
for (i in seq_along(GPUcode_hash$varInfo)) {
if (is(GPUcode_hash$varInfo[[i]],"hash"))
GPUcode_hash$varInfo[[i]] = copy(GPUcode_hash$varInfo[[i]])
}
GPUcode_hash$parmsName = names(GPUcode$parms)
GPUcode_hash
}
loadGPUcode <- function(key, parms) {
GPUcode = gpuApplyFuncList[[key]]
GPUcode[["parms"]] = parms
names(GPUcode$parms) = GPUcode$parmsName
return(GPUcode)
}
createSapplySignature <- function(parms, FUN, .macroParms, .device, .options) {
sig = c()
parmsName = names(parms)
res=processDimTbl(parms,.macroParms)
matchRule=res$rule
# skip the first parameter(the parameter that will be looped on)
for (i in seq_len(length(parms) - 1) + 1) {
# Type of the parameters
varSig = ""
# Precision type of the parameter when it is a gpuMatrix class
if (is(parms[[i]],"gpuMatrix")) {
varSig = paste0(varSig, .type(parms[[i]]))
}
# When it is a macro, add the dim and data
sig = c(sig, varSig)
}
sig=c(sig,matchRule)
# Default variable type
sig = c(sig, paste(GPUVar$default_float, GPUVar$default_int, GPUVar$default_index_type,
sep = ","))
# gpuSapply options
sig = c(sig,digest(FUN), digest(.options$sapplyOptimization))
sig
}
getVarSizeInfo_C_level <- function(sizeMatrix) {
matrixOffset = c()
size1 = sizeMatrix$size1
size2 = sizeMatrix$size2
size1[is.na(size1)]=0
size2[is.na(size2)]=0
curoffSet = 0
for (i in seq_len(nrow(sizeMatrix))) {
curInfo = sizeMatrix[i, ]
matrixOffset[i] = curoffSet
curoffSet = curoffSet + curInfo$sizeInByte
}
matrixNum = nrow(sizeMatrix)
dim = c(size1, size2)
if (curoffSet == 0)
curoffSet = 1
# if(is.null(size1)) size1=0 if(is.null(size2)) size2=0
res = list(matrixOffset = matrixOffset, size1 = size1, size2 = size2,
dim = dim, totalSize = curoffSet, matrixNum = matrixNum)
return(res)
}
fillGPUdata <- function(GPUcode1, .options, .device) {
parms = GPUcode1$parms
varInfo = GPUcode1$varInfo
# Convert all the parameters into the gpuMatrix objects
for (varName in names(parms)) {
if (is(parms[[varName]], "gpuMatrix")) {
curInfo = getVarInfo(varInfo, varName)
if (curInfo$precisionType != .type(parms[[varName]])) {
stop("The data type of the variable ", varName, " are not compatible with the code,\n",
"expected type: ", curInfo$precisionType, ", variable type:",
.type(parms[[varName]]), "\n")
}
if (.device(parms[[varName]]) == .device) {
next
} else {
warning("You supplied a gpu memory object but it does not belong to the device that the code will be run on.")
parms[[varName]] = as.matrix(parms[[varName]])
}
}
curInfo = getVarInfo(varInfo, varName, 1)
curType = curInfo$precisionType
parms[[varName]] = gpuMatrix(parms[[varName]], type = curType,
device = .device)
}
kernel_args = list()
# return size, gp,gs,lp offset, gp,gs,lp,ls number, gp,gs,lp,ls dim(row,col)
kernel_args$sizeInfo = NULL
returnSize=0
#The size of the matrix in gp,gs,lp,ls
matrix_size_info=c()
sizeInfo_gp = getVarSizeInfo_C_level(varInfo$matrix_gp)
# Total size per worker
matrix_size_info=c(matrix_size_info,sizeInfo_gp$dim)
sizeInfo_gs = getVarSizeInfo_C_level(varInfo$matrix_gs)
matrix_size_info=c(matrix_size_info,sizeInfo_gs$dim)
sizeInfo_lp = getVarSizeInfo_C_level(varInfo$matrix_lp)
matrix_size_info=c(matrix_size_info,sizeInfo_lp$dim)
sizeInfo_ls = getVarSizeInfo_C_level(varInfo$matrix_ls)
matrix_size_info=c(matrix_size_info,sizeInfo_ls$dim)
if (!is.null(varInfo$returnInfo)) {
returnInfo = varInfo$returnInfo
if(sum(is.na(returnInfo$designSize))>0) {
warning("Undetermined return size has been found!")
returnInfo=returnInfo[!is.na(returnInfo$designSize),]
}
if (length(returnInfo$designSize) != 0) {
if (sum(returnInfo$designSize[1] != returnInfo$designSize) > 0)
warning("Multiple return size has been found!")
returnSize = max(max(returnInfo$designSize),1)
}
}
totalWorkerNum = length(parms[[1]])
global_gp_offset=sizeInfo_gp$matrixOffset*totalWorkerNum
matrix_offset=c(global_gp_offset,sizeInfo_gs$matrixOffset,sizeInfo_ls$matrixOffset)
kernel_args$sizeInfo = c(returnSize,totalWorkerNum,matrix_offset,matrix_size_info)
if(length(kernel_args$sizeInfo)==0) kernel_args$sizeInfo=0
# Allocate the gpu memory
IntType = GPUVar$default_index_type
device_argument = list()
device_argument$gp_data = gpuEmptMatrix(row = ceiling(sizeInfo_gp$totalSize *totalWorkerNum/4),
col = 1, type = "int", device = .device)
device_argument$gs_data = gpuEmptMatrix(row = ceiling(sizeInfo_gs$totalSize/4),
type = "int", device = .device)
device_argument$ls_data = kernel.getSharedMem(sizeInfo_ls$totalSize,
type = "char")
# The return size for each thread
if(returnSize!=0){
device_argument$return_var = gpuEmptMatrix(returnSize, totalWorkerNum, type = GPUVar$default_float, device = .device)
}else{
device_argument$return_var = gpuEmptMatrix(1,1, type = GPUVar$default_float, device = .device)
}
device_argument$sizeInfo = gpuMatrix(kernel_args$sizeInfo, type = IntType, device = .device)
device_argument = c(parms, device_argument)
GPUcode1$device_argument = device_argument
GPUcode1
}
# add the function definition
completeGPUcode <- function(GPUcode) {
varInfo = GPUcode$varInfo
profile = varInfo$profile
GPUVar$functionCount = GPUVar$functionCount + 1
kernelName = paste0(GPUVar$functionName, "_", GPUVar$functionCount)
# Fefine function name
code = paste0("kernel void ", kernelName, "(")
# The function arguments
kernel_arg_code = c()
for (curName in varInfo$requiredVar) {
curInfo = getVarInfo(varInfo, curName)
curType = curInfo$precisionType
kernel_arg_code = c(kernel_arg_code, paste0("global ", curType,
"* ", curName))
}
code = paste0(code, paste0(kernel_arg_code, collapse = ","))
# The working memory space
arg_prefix_list = c(
"global", "global", "local",
"global", "global")
arg_list = c(
GPUVar$global_private_data,GPUVar$global_shared_data,
GPUVar$local_shared_data,GPUVar$return_variable, GPUVar$size_info)
indType=GPUVar$default_index_type
floatType=GPUVar$default_float
arg_type_list = c("char", "char","char", floatType,indType)
for (i in seq_along(arg_list)) {
curCode = paste0(arg_prefix_list[i], " ", arg_type_list[i], "* ",
arg_list[i])
if (i != length(arg_list))
curCode = paste0(curCode)
code = c(code, curCode)
}
paste0(arg_prefix_list, " ", arg_type_list, "* ", arg_list)
code=paste0(code, collapse = ",\n")
# add the kernel function definition
code = paste0(
code,
"){\n",
paste0(GPUcode$gpu_code, collapse = "\n"),
"\n}")
code=c(
paste0("#define default_index_type ",GPUVar$default_index_type),
paste0("#define default_float ",GPUVar$default_float),
paste0("#define default_int ",GPUVar$default_int),
code
)
# Add the double vector support if appliable
if (GPUVar$default_float == "double")
code = c("#pragma OPENCL EXTENSION cl_khr_fp64:enable",
code)
code=paste0(code, collapse = "\n")
GPUcode$gpu_code = code
GPUcode$kernel = kernelName
GPUcode
}
evaluateProfileTbl <- function(parms, table) {
if (is.null(table) || nrow(table) == 0)
return(table)
table$size1 = vapply(as.list(parse(text = table$size1)), eval, numeric(1),
envir = environment())
table$size2 = vapply(as.list(parse(text = table$size2)), eval, numeric(1),
envir = environment())
if(!is.null(table$sizeInByte))
table$sizeInByte = vapply(as.list(parse(text = table$sizeInByte)), eval,
numeric(1), envir = environment())
table$designSize = table$size1*table$size2
return(table)
}
completeProfileTbl <- function(GPUExp3) {
parms = GPUExp3$parms
parms=lapply(parms,function(curParm){
if(!is(curParm,"gpuMatrix")&&!is(curParm,"matrix")){
message(is(curParm,"gpuMatrix"))
curParm=as.matrix(curParm)
}
curParm
})
varInfo = GPUExp3$varInfo
varInfo$matrix_gs = evaluateProfileTbl(parms, varInfo$matrix_gs)
varInfo$matrix_gp = evaluateProfileTbl(parms, varInfo$matrix_gp)
varInfo$matrix_ls = evaluateProfileTbl(parms, varInfo$matrix_ls)
varInfo$matrix_lp = evaluateProfileTbl(parms, varInfo$matrix_lp)
varInfo$returnInfo = evaluateProfileTbl(parms, varInfo$returnInfo)
GPUExp3$varInfo = varInfo
GPUExp3
}
CheckCodeError <- function(GPUcode) {
parms=GPUcode$parms
errorCheckInfo = GPUcode$errorCheck
if(is.null(errorCheckInfo)) return()
for (i in seq_len(nrow(errorCheckInfo))) {
info = errorCheckInfo[i, ]
if (info$check == "")
next
error = eval(parse(text = info$check))
if (is.na(error) || is.null(error))
next
if (error) {
if (info$level == "warning") {
warning(info$msg, ": \n", info$code)
} else {
stop(info$msg, ": \n", info$code)
}
}
}
}
matchParms <- function(X, parms, FUN) {
argNames = names(funcToExp(FUN)$args)
loopVar_ind = which(argNames == "X")
if (length(loopVar_ind) == 0)
loopVar = argNames[1] else loopVar = "X"
parms = c(list(loopVar = X), parms)
names(parms)[1] = loopVar
unmatchedName = setdiff(argNames, names(parms))
parName = names(parms)
for (i in seq_along(parName)) {
if (parName[i] == "") {
if (length(unmatchedName) > 0) {
parName[i] = unmatchedName[1]
unmatchedName = unmatchedName[-1]
} else stop("The function arguments does not match")
}
}
if (length(unmatchedName) > 0) {
stop("The function arguments does not match")
}
names(parms) = parName
parms
}
formatParms <- function(parms) {
for (i in seq_along(parms)) {
if (!is(parms[[i]],"gpuMatrix") && !is(parms[[i]],"matrix")) {
parms[[i]] = as.matrix(parms[[i]])
}
}
parms
}
# =========================optimization functions==============================
opt_workerNumber <- function(varInfo, code, .options) {
targetCode = paste0("//Thread number optimization\n")
if (!grepl(targetCode, code, fixed = TRUE)) {
stop("Unable to find the location of the thread number optimization code\n",
"This error should never be happened\n", "Please contact the author")
}
loopedVar_length=GPUVar$gpu_global_size
if (.options$sapplyOptimization$thread.number) {
insertedCode = paste0("if(", GPUVar$gpu_global_id, "<", loopedVar_length, "){\n")
insertedCode = paste0(targetCode, insertedCode)
endCode = "\n}"
} else {
insertedCode = ""
endCode = ""
}
code = sub(targetCode, insertedCode, code, fixed = TRUE)
code = paste0(code, endCode)
code
}
|
bce11c79f274b6cd6fc6987445e62f67cf449048
|
bcc3bf661017041dfc14adc9be256996ce0fce56
|
/2019-accountability/cdf_to_student_level.R
|
2bc45cfdd37321f9a806c999b2829c4b1da8116b
|
[] |
no_license
|
tnedu/accountability
|
60bc867c76342bc43e66464150439b10360331e1
|
395c4d880d02cede1ff37ee3d4980046e0bcf783
|
refs/heads/master
| 2021-11-02T03:30:30.545850
| 2019-10-23T18:39:11
| 2019-10-23T18:39:11
| 42,124,295
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,560
|
r
|
cdf_to_student_level.R
|
library(acct)
library(tidyverse)
## TODO: School Numbers for 964/964 and 970/970
msaa <- read_csv("N:/ORP_accountability/data/2019_cdf/2019_msaa_cdf.csv") %>%
filter(!(reporting_status %in% c("WDR", "NLE"))) %>%
mutate(
test = "MSAA",
semester = "Spring",
special_ed = 1L,
performance_level = if_else(reporting_status != "TES", NA_character_, performance_level)
)
alt_ss <- read_csv("N:/ORP_accountability/data/2019_cdf/2019_alt_ss_cdf.csv") %>%
filter(school != 0) %>%
mutate(
system_name = if_else(system_name == str_to_upper(system_name), str_to_title(system_name), system_name),
test = "Alt-Social Studies",
semester = "Spring",
special_ed = 1L,
performance_level = case_when(
performance_level == "Level 3" ~ "Mastered",
performance_level == "Level 2" ~ "On Track",
performance_level == "Level 1" ~ "Approaching"
)
)
fall_eoc <- read_csv("N:/ORP_accountability/data/2019_cdf/2019_fall_eoc_cdf.csv",
col_types = "iciccccdiccccdiiiiciiciiciiciiiiiicc") %>%
mutate(
test = "EOC",
semester = "Fall"
)
spring_eoc <- read_csv("N:/ORP_accountability/data/2019_cdf/2019_spring_eoc_cdf.csv",
col_types = "iciccccdiccccdiiiiciiciiciiciiiiiicc") %>%
mutate(
test = "EOC",
semester = "Spring"
)
tn_ready <- read_csv("N:/ORP_accountability/data/2019_cdf/2019_3_8_cdf.csv",
col_types = "iciccccdiccccdiiiiciiciiciiciiiiiicc") %>%
mutate(
test = "TNReady",
semester = "Spring"
)
cdf <- bind_rows(fall_eoc, spring_eoc, tn_ready, alt_ss) %>%
mutate(
ri_status = if_else(reason_not_tested == 1 & ri_status == 6, 0, ri_status),
performance_level = if_else(performance_level == "On track", "On Track", performance_level),
absent = reason_not_tested == 1,
not_enrolled = reason_not_tested == 2,
not_scheduled = reason_not_tested == 3,
medically_exempt = reason_not_tested == 4,
residential_facility = reason_not_tested == 5,
tested_alt = reason_not_tested == 6,
did_not_submit = reason_not_tested == 7,
breach_adult = ri_status == 1,
breach_student = ri_status == 2,
irregular_admin = ri_status == 3,
incorrect_grade_subject = ri_status == 4,
refused_to_test = ri_status == 5,
failed_attemptedness = ri_status == 6,
original_subject = case_when(
content_area_code == "ENG" ~ "ELA",
content_area_code == "MAT" ~ "Math",
content_area_code == "SOC" ~ "Social Studies",
content_area_code == "A1" ~ "Algebra I",
content_area_code == "A2" ~ "Algebra II",
content_area_code == "E1" ~ "English I",
content_area_code == "E2" ~ "English II",
content_area_code == "G1" ~ "Geometry",
content_area_code == "M1" ~ "Integrated Math I",
content_area_code == "M2" ~ "Integrated Math II",
content_area_code == "M3" ~ "Integrated Math III",
content_area_code == "U1" ~ "US History"
)
)
math_eoc <- c("Algebra I", "Algebra II", "Geometry", "Integrated Math I", "Integrated Math II", "Integrated Math III")
english_eoc <- c("English I", "English II")
# Integrated Math districts for reassigning MSAA subjects
int_math_systems <- cdf %>%
filter(content_area_code %in% c("A1", "M1")) %>%
count(system, content_area_code) %>%
group_by(system) %>%
mutate(temp = max(n)) %>%
filter(n == temp, content_area_code == "M1") %>%
magrittr::extract2("system")
student_level <- bind_rows(cdf, msaa) %>%
transmute(
system,
system_name,
school,
school_name,
test,
semester,
original_subject,
subject = original_subject,
original_performance_level = performance_level,
performance_level,
scale_score,
state_student_id = unique_student_id,
last_name,
first_name,
grade,
gender,
reported_race,
bhn_group = reported_race %in% c("Black or African American", "Hispanic/Latino", "American Indian/Alaska Native"),
economically_disadvantaged,
el,
el_recently_arrived = (el_arrived_year_1 == 1 | el_arrived_year_2 == 1),
t1234 = t1234 %in% 1:4,
special_ed,
functionally_delayed,
gifted,
migrant,
enrolled_50_pct_district,
enrolled_50_pct_school,
teacher_of_record_tln,
reporting_status,
breach_adult, breach_student, irregular_admin, incorrect_grade_subject, refused_to_test, failed_attemptedness,
absent, not_enrolled, not_scheduled, medically_exempt, residential_facility, tested_alt, did_not_submit
) %>%
mutate_at(vars(bhn_group, t1234, el_recently_arrived), as.integer) %>%
rowwise() %>%
# Apply testing flag hierarchy
mutate(
enrolled = case_when(
any(breach_adult, breach_student, irregular_admin, incorrect_grade_subject, refused_to_test, failed_attemptedness) ~ 0,
any(not_enrolled, not_scheduled) ~ 0,
TRUE ~ 1
),
# EL Recently Arrived students with missing proficiency are not considered tested
tested = case_when(
test == "MSAA" & reporting_status == "DNT" ~ 0,
any(breach_adult, breach_student, irregular_admin, incorrect_grade_subject, refused_to_test, failed_attemptedness) ~ 0,
any(absent, not_enrolled, not_scheduled) ~ 0,
el_recently_arrived == 1L & is.na(original_performance_level) ~ 0,
TRUE ~ 1
),
# EL Recently Arrived students performance level are converted to missing
performance_level = case_when(
any(breach_adult, breach_student, irregular_admin, incorrect_grade_subject, refused_to_test, failed_attemptedness) ~ NA_character_,
any(absent, not_enrolled, not_scheduled, medically_exempt, residential_facility, did_not_submit) ~ NA_character_,
el_recently_arrived == 1 ~ NA_character_,
TRUE ~ performance_level
)
) %>%
ungroup() %>%
mutate(
# Modify subject for MSAA tests in grades >= 9 (6.8)
subject = case_when(
original_subject == "Math" & test == "MSAA" & grade >= 9 & system %in% int_math_systems ~ "Integrated Math I",
original_subject == "Math" & test == "MSAA" & grade >= 9 & !(system %in% int_math_systems) ~ "Algebra I",
original_subject == "ELA" & test == "MSAA" & grade >= 9 ~ "English II",
TRUE ~ subject
),
# Convert subjects per accountability rules
subject = case_when(
grade %in% 3:8 & original_subject %in% math_eoc ~ "Math",
grade %in% 3:8 & original_subject %in% english_eoc ~ "ELA",
grade %in% 3:8 & original_subject == "US History" ~ "Social Studies",
TRUE ~ subject
)
)
# Records from Alternative, CTE, Adult HS are dropped from student level
cte_alt_adult <- read_csv("N:/ORP_accountability/data/2019_tdoe_provided_files/cte_alt_adult_schools.csv") %>%
transmute(system = as.numeric(DISTRICT_NUMBER), school = as.numeric(SCHOOL_NUMBER))
dedup <- student_level %>%
anti_join(cte_alt_adult, by = c("system", "school")) %>%
# For students with multiple records across test types, MSAA has priority, then EOC, then 3-8
mutate(
test_priority = case_when(
test %in% c("MSAA", "Alt-Social Studies") ~ 3,
test == "EOC" ~ 2,
test == "TNReady" ~ 1
)
) %>%
group_by(state_student_id, subject) %>%
mutate(temp = max(test_priority, na.rm = TRUE)) %>%
filter(test_priority == temp | temp == -Inf) %>%
select(-test_priority, -temp) %>%
ungroup() %>%
# For students with multiple records within the same test, take highest performance level
mutate(
prof_priority = case_when(
performance_level %in% c("Below", "Below Basic") ~ 1,
performance_level %in% c("Approaching", "Basic") ~ 2,
performance_level %in% c("On Track", "Proficient") ~ 3,
performance_level %in% c("Mastered", "Advanced") ~ 4
)
) %>%
group_by(state_student_id, original_subject, test) %>%
mutate(temp = max(prof_priority, na.rm = TRUE)) %>%
filter(prof_priority == temp | temp == -Inf) %>%
select(-prof_priority, -temp) %>%
ungroup() %>%
# For students with multiple records within the same performance level, take highest scale score
group_by(state_student_id, original_subject, test, performance_level) %>%
mutate(temp = max(scale_score, na.rm = TRUE)) %>%
filter(scale_score == temp | temp == -Inf) %>%
select(-temp) %>%
ungroup() %>%
# For students with multiple test records with the same proficiency across administrations, take the most recent
mutate(
semester_priority = case_when(
test %in% c("MSAA", "Alt-Social Studies", "Achievement") | (test == "EOC" & semester == "Spring") ~ 2,
test == "EOC" & semester == "Fall" ~ 1
)
) %>%
group_by(state_student_id, original_subject, test) %>%
mutate(temp = max(semester_priority, na.rm = TRUE)) %>%
filter(semester_priority == temp | temp == -Inf) %>%
select(-semester_priority, -temp) %>%
ungroup() %>%
# For students with multiple test records with the same original subject, performance level, scale score
# Deduplicate by missing race/ethnicity
group_by(state_student_id, original_subject, test, performance_level, scale_score, semester) %>%
mutate(
n = n(), # Tag duplicates by id, subject, test, performance level, scale score, semester
temp = mean(is.na(reported_race)) # Check whether one among duplicates has non-missing race/ethnicity
) %>%
filter(!(n > 1 & temp != 0 & is.na(reported_race))) %>%
ungroup() %>%
select(-n, -temp) %>%
# For students multiple test records with the same original subject, performance level, scale score, demographics
# Deduplicate for non-missing grade
group_by(state_student_id, original_subject, test, performance_level, scale_score, semester, reported_race) %>%
mutate(
n = n(), # Tag duplicates by id, subject, test, performance level, scale score, semester
temp = mean(is.na(grade)) # Check whether one among duplicates has non-missing race/ethnicity
) %>%
filter(!(n > 1 & temp != 0 & is.na(grade))) %>%
ungroup() %>%
select(-n, -temp) %>%
# Valid test if there is a proficiency level
mutate(valid_test = as.integer(not_na(performance_level)))
# Reassigned schools for accountability
enrollment <- read_csv("N:/ORP_accountability/data/2019_final_accountability_files/enrollment.csv")
# ELPA Students should be EL = 1
elpa <- read_csv("N:/ORP_accountability/data/2019_ELPA/wida_growth_standard_student.csv") %>%
select(student_id)
student_level <- dedup %>%
select(
system, system_name, school, school_name, test, original_subject, subject, semester,
original_performance_level, performance_level, scale_score, enrolled, tested, valid_test,
state_student_id, last_name, first_name, grade, gender, reported_race, bhn_group, teacher_of_record_tln,
functionally_delayed, special_ed, economically_disadvantaged, gifted, migrant, el, t1234, el_recently_arrived,
enrolled_50_pct_district, enrolled_50_pct_school, absent, refused_to_test, residential_facility
) %>%
mutate_at(vars(absent, refused_to_test, residential_facility), as.integer) %>%
# Percentiles by grade and original subject for 3-8
group_by(test, original_subject, grade) %>%
mutate(
rank = if_else(not_na(scale_score), rank(scale_score, ties = "max"), NA_integer_),
denom = sum(not_na(scale_score)),
percentile = if_else(test == "TNReady", round5(100 * rank/denom, 1), NA_real_)
) %>%
# Percentiles by original subject for EOCs
group_by(test, original_subject) %>%
mutate(
rank = if_else(not_na(scale_score), rank(scale_score, ties = "max"), NA_integer_),
denom = sum(not_na(scale_score)),
percentile = if_else(test == "EOC", round5(100 * rank/denom, 1), percentile)
) %>%
ungroup() %>%
select(-rank, -denom) %>%
arrange(system, school, state_student_id) %>%
# Add system and school for accountability purposes
left_join(enrollment, by = "state_student_id") %>%
mutate(
acct_system = if_else(is.na(acct_system), system, acct_system),
acct_school = if_else(is.na(acct_school), school, acct_school)
# Assign EL = 1 if student tested ELPA
mutate(
el = if_else(state_student_id %in% elpa$student_id, 1, el)
)
write_csv(student_level, "N:/ORP_accountability/projects/2019_student_level_file/2019_student_level_file.csv", na = "")
# Split student level file
district_numbers <- sort(unique(student_level$system))
# Split files should contain either students with assessment or accountability school number
split_by_district <- function(s) {
filter(student_level, system == s | acct_system == s)
}
map(district_numbers, split_by_district) %>%
walk2(
.x = .,
.y = district_numbers,
.f = ~ write_csv(.x,
path = paste0("N:/ORP_accountability/data/2019_assessment_files/Split/", .y, "_StudentLevelFiles_30Jul2019.csv"),
na = ""
)
)
|
4416ec54ef21e9db290050bb8d4f0be873730e98
|
5dc064f8f0df5f9dc0251dd789617402bb648ece
|
/functions.R
|
6648acdc6ae220ce49b719881618ec5ed67248da
|
[] |
no_license
|
fbetteo/dm-HyadesCluster
|
192fb42c6493dc062f090c1150ee7721636b5a06
|
15b8f5fdb73056471c37c9ad018042440d222e43
|
refs/heads/master
| 2020-04-03T13:45:44.936591
| 2018-10-29T23:57:48
| 2018-10-29T23:57:48
| 155,297,021
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,535
|
r
|
functions.R
|
source("libraries.R")
# operador para concatenar texto:
"%+%" <- function(a,b) paste(a,b,sep="")
# transformacion minmax
minmax <- function(x) (x-min(x))/(max(x)-min(x))
# saca n_rem outliers de data.frame
remove_outliers = function(df, n_rem) {
good_i = MASS::cov.rob(df, cor=F, quantile.used=nrow(df)-n_rem)$best
out = df[good_i, ]
return(out)
}
# calcular distancias euclideas de conjunto de filas (matrix o dataframe) contra un vector
rowdist <- function(mat, vec, rownames=NULL) {
out = apply(mat, 1, function(x) sqrt(sum((x-vec)^2)) )
names(out) = rownames
return(out)
}
# ALL duplicated indexes in a vector
every_dup = function(x) duplicated(x)|duplicated(x,fromLast=T)
# pairwise euclidean distances entre filas de dos dataframes o matrices
pwdist <- function(x, y, names_x=NULL, names_y=NULL) {
# fuente: https://www.r-bloggers.com/pairwise-distances-in-r/
# tiene lo de 10e15 si no redondea y se va a cero
xm = as.matrix(x) * 1e11
n_x = nrow(xm)
ym = as.matrix(y) * 1e11
n_y = nrow(ym)
# sumas de cuadrados por fila
x_sc = apply(xm, 1, function(vec) crossprod(vec,vec))
y_sc = apply(ym, 1, function(vec) crossprod(vec,vec))
# sumas de cuadrados de x en n_y columnas
tmp1 = matrix(rep(x_sc,n_y), nrow=n_x)
# sumas de cuadrados de y en n_x filas
tmp2 = matrix(rep(y_sc, n_x), nrow=n_x, byrow=TRUE)
# (no entiendo)
tmp = tmp1 + tmp2
# (no entiendo PERO FUNCIONA)
out = sqrt(tmp - 2*tcrossprod(xm,ym)) / 1e11
dimnames(out) = list(names_x, names_y)
return(out)
}
|
e94889d65f3ec75a5982deaeb076a1f2563c512b
|
f2b85324be5786e64007d6569ee0f859cd4e7890
|
/utils_plot_fcts.R
|
618b42391610a18efc708d59c53acde7111fdd17
|
[] |
no_license
|
marzuf/Cancer_HiC_data_TAD_DA
|
17313c4bd142b1d47b9c544e642cc248e7329dd6
|
67d62faba164678c7e4751f20131b34ecd4d11a5
|
refs/heads/master
| 2020-04-15T21:39:51.365591
| 2019-06-20T07:47:52
| 2019-06-20T07:47:52
| 165,041,352
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,685
|
r
|
utils_plot_fcts.R
|
printVar <- function(x){
cat(paste0(x, " = ", eval(parse(text=x)), "\n"))
}
plot_cumMatch <- function(dt, tomatch){
curr_match <- na.omit(dt[, tomatch])
xvect <- seq_len(max(curr_match))
yvect <- sapply(xvect, function(x){
sum(curr_match >= x)
})
plot(x = xvect,
y = yvect,
xlab = paste0("# datasets in which matching signif. TAD"),
ylab = paste0("# query TAD"),
type="l")
mtext(side=3, text=paste0(tomatch))
}
ggplot_barplot_hicdsexprds <- function(barDT, xvar, yvar,
xcolvar = NULL,
myxlab="", myylab="", myTit="", mySub="",
barCol="dodgerblue3") {
if(is.null(xcolvar)) {
plotcols <- "black"
}else {
plotcols <- barDT[,xcolvar]
}
p_ref <- ggplot(barDT, aes_string(x = xvar, y = yvar)) +
ggtitle(myTit, subtitle = mySub)+
geom_bar(stat="identity", position = "dodge", fill = barCol)+
scale_x_discrete(name=myxlab)+
scale_y_continuous(name=myylab,
breaks = scales::pretty_breaks(n = 10))+
labs(fill = "")+
theme( # Increase size of axis lines
# top, right, bottom and left
plot.margin = unit(c(1, 1, 1, 1), "lines"),
plot.title = element_text(hjust = 0.5, face = "bold", size=16),
plot.subtitle = element_text(hjust = 0.5, face = "italic", size=10),
panel.grid = element_blank(),
axis.text.x = element_text( hjust=1,vjust = 0.5, size=8, angle = 90, color = plotcols),
axis.line.x = element_line(size = .2, color = "black"),
axis.line.y = element_line(size = .3, color = "black"),
# axis.ticks.x = element_blank(),
axis.text.y = element_text(color="black", hjust=1,vjust = 0.5),
axis.title.y = element_text(color="black", size=12),
axis.title.x = element_text(color="black", size=12),
panel.border = element_blank(),
panel.background = element_rect(fill = "transparent"),
legend.background = element_rect(),
legend.key = element_blank()
)
return(p_ref)
}
ggplot_boxplot_hicdsexprds <- function(barDT, xvar, yvar, colvar,
myxlab="", myylab="", myTit="", mySub="",
barCol="dodgerblue3") {
avg_barDT <- aggregate(as.formula(paste0(yvar, "~", xvar)), data = barDT, FUN=mean, na.rm=TRUE)
xvar_order <- as.character(avg_barDT[,xvar][order(avg_barDT[,yvar], decreasing=TRUE)])
stopifnot(!is.na(xvar_order))
barDT[, xvar] <- factor(as.character(barDT[,xvar]), levels = xvar_order)
plotDT <- na.omit(barDT)
if(is.null(colvar)){
mycols <- "black"
}else {
mycols <- plotDT[,colvar]
}
p_ref <- ggplot(plotDT, aes_string(x = xvar, y = yvar)) +
ggtitle(myTit, subtitle = mySub)+
geom_boxplot(fill = barCol)+
scale_x_discrete(name=myxlab)+
scale_y_continuous(name=myylab,
breaks = scales::pretty_breaks(n = 10))+
labs(fill = "")+
theme( # Increase size of axis lines
# top, right, bottom and left
plot.margin = unit(c(1, 1, 1, 1), "lines"),
plot.title = element_text(hjust = 0.5, face = "bold", size=16),
plot.subtitle = element_text(hjust = 0.5, face = "italic", size=10),
panel.grid = element_blank(),
axis.text.x = element_text( hjust=1,vjust = 0.5, size=8, angle = 90, color = mycols),
axis.line.x = element_line(size = .2, color = "black"),
axis.line.y = element_line(size = .3, color = "black"),
# axis.ticks.x = element_blank(),
axis.text.y = element_text(color="black", hjust=1,vjust = 0.5),
axis.title.y = element_text(color="black", size=12),
axis.title.x = element_text(color="black", size=12),
panel.border = element_blank(),
panel.background = element_rect(fill = "transparent"),
legend.background = element_rect(),
legend.key = element_blank()
)
return(p_ref)
}
plot_multiDens_setcols <- function(size_list, plotTit="", legTxt=NULL, legPos="topright", my_ylab="density", my_xlab="", my_cols = NULL) {
dens <- lapply(size_list, function(x) density(na.omit(x)))
names(dens) <- names(size_list)
lengthDens <- unlist(lapply(size_list, function(x) length(na.omit(x))))
if(is.null(my_cols)) my_cols <- 1:length(dens)
plot(NA, xlim=range(sapply(dens, "[", "x")), ylim=range(sapply(dens, "[", "y")),
main=plotTit, xlab=my_xlab, ylab=my_ylab)
foo <- mapply(lines, dens, col=my_cols)
if(is.null(legTxt)){
# legTxt <- names(dens)
legTxt <- paste0(names(dens), " (n=", lengthDens, ")")
}
legend(legPos, legend=legTxt, fill=my_cols, bty='n')
}
|
ad00c64dfbd9304cc6fc3842c6aca2b83589b7a1
|
9bf7e9b202b2b02d65ad8eed6346a50584392b1a
|
/R/NglWidget.R
|
2c485b87198435e3b2c09e3cb813fce8a8079a4d
|
[] |
no_license
|
paul-shannon/nglShinyS4
|
53be289f1148bf6711d2783a17461fc588175940
|
a96fd5c79057ed05c3acf7be60d7c9aa623d31f3
|
refs/heads/master
| 2022-09-17T15:21:25.091519
| 2020-05-31T17:03:12
| 2020-05-31T17:03:12
| 267,874,245
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,117
|
r
|
NglWidget.R
|
#' @import shiny
#' @import htmlwidgets
#' @import shinyjs
#' @importFrom methods new
#'
#' @title NglWidget
#------------------------------------------------------------------------------------------------------------------------
#' @name NglWidget-class
#' @rdname NglWidget-class
#' @aliases NglWidget
#'
## @import methods
.NglWidget <- setClass("NglWidget",
representation = representation(
pdbID="character",
htmlContainer="character",
width="numeric",
height="numeric",
componentOptions="list",
state="environment",
quiet="logical"
)
)
#------------------------------------------------------------------------------------------------------------------------
printf <- function(...) print(noquote(sprintf(...)))
#------------------------------------------------------------------------------------------------------------------------
setGeneric('getWidget', signature='obj', function(obj) standardGeneric('getWidget'))
setGeneric('shinyOutput', signature='obj', function(obj) standardGeneric('shinyOutput'))
setGeneric('renderWidget', signature='obj', function(obj) standardGeneric('renderWidget'))
setGeneric('getPdbID', signature='obj', function(obj) standardGeneric('getPdbID'))
setGeneric('fit', signature='obj', function(obj) standardGeneric('fit'))
#------------------------------------------------------------------------------------------------------------------------
#' NglWidget
#'
#'
#' @export
#'
#' @param pdbID character
#' @param componentOptions list of lists, specifying named components of the molecular structure
#' @param htmlContainer character, the name of the DOM element, typically a div
#' @param width integer initial width of the widget.
#' @param height integer initial height of the widget.
#'
#' @return a reference to an htmlwidget.
#'
NglWidget <- function(pdbID, htmlContainer, componentOptions=list(),
width = NA_integer_, height = NA_integer_, quiet=TRUE)
{
printf("--- ~/github/nglShinyS4/R/NglWidget s4 ctor");
widget <- htmlwidgets::createWidget(
name = 'NGL',
options,
width = width,
height = height,
package = 'nglShiny',
elementId = htmlContainer
)
state <- new.env(parent=emptyenv())
state[["widget"]] <- widget
obj <- .NglWidget(pdbID=pdbID,
htmlContainer=htmlContainer,
width=width,
height=height,
componentOptions=componentOptions,
state=state,
quiet=quiet)
obj
} # NglWidget constructor
#----------------------------------------------------------------------------------------------------
setMethod('getWidget', 'NglWidget',
function(obj){
return(obj@state$widget)
})
#----------------------------------------------------------------------------------------------------
#' Standard shiny ui rendering construct
#'
#' @param obj an NglWidget instance
#' @return a reference to an htmlwidget
#'
#' @examples
#' \dontrun{
#' mainPanel(shinyOutput(nglWidget)), width=10)
#' }
#'
#' @aliases NglWidgetOutput
#' @rdname NglWidgetOutput
#'
#' @export
#'
setMethod('shinyOutput', 'NglWidget',
function(obj) {
htmlwidgets::shinyWidgetOutput(obj@htmlContainer, 'NglWidget', obj@width, obj@height,
package = 'NglWidget')
})
#----------------------------------------------------------------------------------------------------
#' More shiny plumbing - an NglWidget wrapper for htmlwidget standard rendering operation
#'
#' @param expr an expression that generates an HTML widget.
#' @param env environment in which to evaluate expr.
#' @param quoted logical specifies whether expr is quoted ("useuful if you want to save an expression in a variable").
#'
#' @return not sure
#'
#' @aliases renderWidget
#' @rdname renderWidget
#'
#' @export
#'
setMethod('renderWidget', 'NglWidget',
function(obj) {
env = parent.frame()
htmlwidgets::shinyRenderWidget(getWidget(obj), shinyOutput(obj), env, quoted = TRUE)
})
#----------------------------------------------------------------------------------------------------
#' Set zoom and center so that the current model nicely fills the display.
#'
#' @param session a Shiny server session object.
#' @param htmlContainer a character string used to identify the NglWidget instance, the id of html element
#'
#' @examples
#' \dontrun{
#' fit(session)
#'}
#'
#' @aliases fit
#' @rdname fit
#'
#'
#' @export
#'
fit <- function(session, htmlContainer)
{
session$sendCustomMessage("fit", message=list(htmlContainer=htmlContainer))
} # fit
#----------------------------------------------------------------------------------------------------
setRepresentation <- function(session, rep)
{
session$sendCustomMessage("setRepresentation", list(rep))
} # setRepresentation
#----------------------------------------------------------------------------------------------------
#' Using the specified representation and colorScheme, display the portion of selection
#'
#' @param session a Shiny server session object.
#' @param representation todo
#' @param selection todo
#' @param colorScheme todo
#' @param name character string, used for subsequent show/hide
#'
#' @examples
#' \dontrun{
#' showSelection(session, "cartoon", "helix", "residueIndex")
#'}
#'
#' @aliases showSelection
#' @rdname showSelection
#'
#' @export
#'
showSelection <- function(session, representation, selection, name, colorScheme="residueIndex")
{
session$sendCustomMessage("showSelection",
list(representation=representation,
selection=selection,
colorScheme=colorScheme,
name=name))
} # showSelection
#----------------------------------------------------------------------------------------------------
#' hide or show the named selection
#'
#' @param session a Shiny server session object.
#' @param representationName a previously assigned character string
#'
#' @examples
#' \dontrun{
#' setVisibility(session, "chromaphore", FALSE)
#'}
#'
#' @aliases setVisibility
#' @rdname setVisibility
#'
#' @export
#'
setVisibility <- function(session, representationName, newVisibilityState)
{
session$sendCustomMessage("setVisibility",
list(representationName=representationName,
newState=newVisibilityState))
} # setVisibility
#----------------------------------------------------------------------------------------------------
setColorScheme <- function(session, newColorScheme)
{
session$sendCustomMessage("setRepresentation", list(newColorScheme))
} # setColorScheme
#----------------------------------------------------------------------------------------------------
|
c02b971ae9cffbc7f3a81d4dc35f8e58de1c806c
|
4cf1e9eb8178a3c30575f13c6e889b40d758a709
|
/cachematrix.R
|
d59c7b320f6f4a27deca787b5f1e5ae0094e9b33
|
[] |
no_license
|
jgbarberena/ProgrammingAssignment2
|
84885975da2f7ee5e939718aeced352d44af8485
|
ead374e6ce0bb71e91a1e6270abd6f8dd39da576
|
refs/heads/master
| 2021-01-09T08:08:40.071613
| 2014-11-18T20:08:01
| 2014-11-18T20:08:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,364
|
r
|
cachematrix.R
|
## This function creates a special type of matrix. It actually consists on a list of
## functions for manipulating a matrix. It has four fuunctions:
## The 'set' function sets the values of the matrix and NULL for the inverse, so each time a matrix is
## created or changed, it's inverse is reset to NULL and should be calculated.
## The 'get' function returns the value of the matrix.
## The 'setinv' function is used to set the value of the inverse to be stored in cache, so once it is
## calculated, the is no need to calculate it again (until the matrix is 'set' again)
## The 'getinv' function returns the value of the inverse (it will be NULL or the actual inverse if it
## has been already calculated (using 'setinv')
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(solve) inv <<- solve
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function returns the inverse of a the special type of matrix created with the function
## 'makeCacheMatrix'. The inverse is only calculated if it has not been previously calculated,
## otherwise it returns the inverse from the cache. The usage could be as follows:
## myMat <- rbind(c(m11, m12), c(m21, m22)) ## Should be invertible
## mySpecialMat <- makeCacheMatrix(myMat)
## cacheSolve(mySpecialMat) ##This calculates the inverse, since it has not been calculated yet.
## ##other code here not using 'mySpecialMat <- makeCacheMatrix()' again.
## cacheSolve(mySpecialMat) ##This returns the inverse from cache, since it has been already calculated.
cacheSolve <- function(x, ...) {
inv <- x$getinv() ## gets the inverse with the 'getinv' function
if(!is.null(inv)) {
message("getting cached data")
return(inv) ## if !NULL then it has been already calculated. Return it from cache and exit
}
message("calculating matrix inverse")
matrix <- x$get() ## if the inverse has not been calculated,
inv <- solve(matrix)
x$setinv(inv)
inv
}
|
cc691c8b1786c8296a5f38cdf7460476157ec6de
|
e6d0aa42a4e601e8acf1bb5838668e4f0c906f9e
|
/plot3.R
|
1973c7800be86aec4c90e8745e550dd4198c410f
|
[] |
no_license
|
DylanLennard/Coursera_JHU_EDA_Final_Project
|
886d47dd4ff65fe522f44613a38838f70ce3de12
|
ae0e842304c44a0f38c773770a7fb24d7ac0ab21
|
refs/heads/master
| 2021-01-12T14:08:46.897859
| 2016-10-05T14:07:46
| 2016-10-05T14:07:46
| 69,757,907
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 891
|
r
|
plot3.R
|
### plot3 ###
setwd("~/Desktop/Statistical_Programming/Coursera/Exploratory_Data_Analysis/EDA_Final_Project")
library("readr")
library("tidyverse")
library("data.table")
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
fileNames <- c("summarySCC_PM25.rds", "Source_Classification_Code.rds")
if(!file.exists(fileNames[1]) || !file.exists(fileNames[2])){
download.file(url, destfile = "project.zip", method = 'curl')
unzip("project.zip")
file.remove("project.zip")
}
NEI <- readRDS(fileNames[1])
SCC <- readRDS(fileNames[2])
plot3DF <- NEI %>%
group_by(year, type) %>%
filter(fips == "24510") %>%
summarise(Total_Emissions = sum(Emissions)) %>%
arrange(year, type)
#order from nonpoint, point, non-road, on-road
ggplot(aes(year, Total_Emissions, fill = type), data = plot3DF) +
geom_bar(stat = 'identity', position = 'dodge')
|
95edaefafac0155884af99b7b1497368e75a37b2
|
4f038917144f89bcdb949346c92ae90782ab6f72
|
/man/CONST_LOGIN_TYPES.Rd
|
656f810b9cda438e42dbe7c9d795beb238ce0a51
|
[
"MIT"
] |
permissive
|
isabella232/rauth0
|
a4df725b2eb1f0f351140e878e419ff44d9e862c
|
07972e009ed95e1d7980de32ba8ef54b6ff97c5f
|
refs/heads/master
| 2023-06-09T14:00:41.898109
| 2021-07-01T08:58:08
| 2021-07-01T08:58:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 376
|
rd
|
CONST_LOGIN_TYPES.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/variables.R
\docType{data}
\name{CONST_LOGIN_TYPES}
\alias{CONST_LOGIN_TYPES}
\title{Login types}
\format{An object of class \code{character} of length 9.}
\usage{
CONST_LOGIN_TYPES
}
\description{
Types of events which count as active users for usage consumption, as a vector
}
\keyword{datasets}
|
79ec78f63f678cf895e9d907ce952d41dddc97fb
|
0382fe6ebf899ce56eecfdb7f234a477e7c26800
|
/scripts/CRSeafood Functions-example.R
|
15555da00b1317eb2397c24746ac6d6cf837f3b6
|
[] |
no_license
|
fishmgt/fishmgt.github.io
|
a3eaa117bd6292f99c7fd22797720ee8befa2a0f
|
e0f595ed9aaa4d427138e9e3a03b6b78056c74e0
|
refs/heads/master
| 2016-08-11T05:30:05.804920
| 2016-03-29T01:25:50
| 2016-03-29T01:25:50
| 49,733,944
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,601
|
r
|
CRSeafood Functions-example.R
|
#===================================================================
# Using optim in R to fit a logistic growth model to Costa Rican snapper
# 'catches.csv' -> gives regional (Guanacaste) catches between 1990 - 2013
# 'CPUE.csv' -> gives local catch per unit effort (number caught per trip) between 2007 - 2013
# Modified by Kat Millage from a script written by Trevor A. Branch tbranch@uw.edu
# Last revised 2/2/16
#====================================================================
#===PART 1=======================================
# Loading packages
#================================================
library(ggplot2)
library(plyr)
library(dplyr)
library(tidyr)
#===PART 2=======================================
# Logistic growth equation, takes parameters
# Uses CPUE data to calculate NLL for those years
# N0 (numbers in 1990), r (rate of increase), K (carrying capacity), startyear and endyear
#================================================
getNLL <- function(par, catches, CPUE, plot=F) {
r <- par[1]
K <- par[2]
q <- par[3]
start.yr <- min(catches$Year)
end.yr <- max(catches$Year)
years <- start.yr:end.yr
nyr <- length(years)
B <- vector(mode = 'numeric',length = nyr)
B[1] <- K*0.8
NLL <- vector(mode = 'numeric', length=nyr)
obsB <- vector(mode = 'numeric', length=nyr)
obsCPUE <- vector(mode = 'numeric', length=nyr)
#Logistic growth model fitting. max() ensures it does not go to 0
for(i in 1:(nyr-1)){
B[i+1] = max(0.001, B[i] + r*B[i]*(1-B[i]/K) - catches[i,2])
}
#Finds current year of the model and checks to see if there is CPUE for that year
for (i in 1:nyr) {
current.yr <- years[i]
#If there is CPUE data in that year then NLL is calculated
if (current.yr %in% CPUE[,1]) {
obsCPUE[i] <- CPUE[CPUE[,1]==current.yr,2]
obsCV <- CPUE[CPUE[,1]==current.yr,3]
predCPUE <- B[i]*q
NLL[i] = (log(obsCPUE[i]/predCPUE))^2/(2*obsCV^2)
obsB[i] <- obsCPUE[i]/q
}
else {NLL[i]=NA
obsB[i]=NA}
}
#Combines predicted biomass and calculated NLL for each year into a data frame
Results <- data.frame(Year = years, Catch = catches[,2], Biomass = B, ObsB = obsB, Like = NLL)
NLLSum <- sum(Results$Like, na.rm=T)
#PLOTTING PART
if (plot==T) {
par(mfrow=c(2,1), mar=c(2,1,1,1), oma=c(3,3,1,1))
plot(x=years,y=Results$Biomass, lwd=2,col="black",type="l", yaxs="i",
ylim=c(0,1.05*max(Results$Biomass)), ylab = "Biomass (kg)", xlab = "Year")
points(x=years, y=Results$ObsB, col="black", pch=16)
mtext(side=2,"Biomass (kg)", line=2.5)
plot(x=years,y=Results$Catch, lwd=2,col="blue",type="l",yaxs="i",
ylim=c(0,1.1*max(Results$Catch)), xlab = "Year", ylab = 'Catch (kg)')
mtext(side=2,"Catch (kg)", line=2.5)
}
invisible(Results) #return the biomass and NLL
return(NLLSum)
}
#===PART 3=======================================
# Optimizing parameters for model
#================================================
BestModel <- optim(par = c(.5,1000*max(catches$catch, na.rm = T),.00001),
fn = getNLL,
CPUE = CPUE,
catches = catches,
plot=F,
lower = c(0.1,max(catches$catch, na.rm=T),1e-6) ,
upper = c(3,1000*max(catches$catch, na.rm = T),1),
control=list(maxit=20000))
#Possibles <- function()
possibles <- expand.grid(r = .05:.6,K = 1000000:10000000,q = 0.0000001:1)
for (i in 1:dim(possibles)[1])
{
Parameters <- BestModel[i]
}
#===PART 3=======================================
# Making likelihood profiles for our parameters
#================================================
Rprofile.NLL <- function(par, catches, CPUE, r) {
K <- par[1]
q <- par[2]
start.yr <- min(catches$Year)
end.yr <- max(catches$Year)
years <- start.yr:end.yr
nyr <- length(years)
B <- vector(mode = 'numeric',length = nyr)
B[1] <- K*0.8
NLL <- vector(mode = 'numeric', length=nyr)
obsB <- vector(mode = 'numeric', length=nyr)
obsCPUE <- vector(mode = 'numeric', length=nyr)
#Logistic growth model fitting. max() ensures it does not go to 0
for(i in 1:(nyr-1)){
B[i+1] = max(0.001, B[i] + r*B[i]*(1-B[i]/K) - catches[i,2])
}
#Finds current year of the model and checks to see if there is CPUE for that year
for (i in 1:nyr) {
current.yr <- years[i]
#If there is CPUE data in that year then NLL is calculated
if (current.yr %in% CPUE[,1]) {
obsCPUE[i] <- CPUE[CPUE[,1]==current.yr,2]
obsCV <- CPUE[CPUE[,1]==current.yr,3]
predCPUE <- B[i]*q
NLL[i] = (log(obsCPUE[i]/predCPUE))^2/(2*obsCV^2)
obsB[i] <- obsCPUE[i]/q
}
else {NLL[i]=NA
obsB[i]=NA}
}
#Combines predicted biomass and calculated NLL for each year into a data frame
Results <- data.frame(Year = years, Catch = catches[,2], Biomass = B, ObsB = obsB, Like = NLL)
NLLSum <- sum(Results$Like, na.rm=T)
return(NLLSum)
}
#Rprofile.NLL(par = c(7000000, 0.00001), catches, CPUE, r=0.25)
#optim(par = c(7000000, 0.00001), fn=Rprofile.NLL, catches = catches, CPUE = CPUE, method="L-BFGS-B", r=0.25, lower=c(1000000, 0.000001), upper=c(10000000, 0.1))
# Actually does the likelihood profile
Rprofile <- function(R.vec, lower=c(1000000, 0.000001), upper=c(10000000, 0.1)) {
nR <- length(R.vec)
saved.NLL <- vector(length=nR)
for (i in 1:nR) {
x <- optim(par = c(7000000, 0.00001),
fn=Rprofile.NLL,
catches = catches,
CPUE = CPUE,
method="L-BFGS-B",
r=R.vec[i],
lower=c(1000000, 0.000001),
upper=c(10000000, 0.1))
print(paste("Run",i,"of",nR))
saved.NLL[i] <- x$value
}
plot(x=R.vec, y=saved.NLL, type="l",xaxs="i", las=1, ylab="NLL", xlab="r values",
lwd=2, col="blue")
abline(h=min(saved.NLL), lty=2, col="gray50")
abline(h=min(saved.NLL)+1.92, lty=2, col="gray50") #95% CI
return(saved.NLL)
}
#Profile for K
Kprofile.NLL <- function(par, catches, CPUE, K) {
r <- par[1]
q <- par[2]
start.yr <- min(catches$Year)
end.yr <- max(catches$Year)
years <- start.yr:end.yr
nyr <- length(years)
B <- vector(mode = 'numeric',length = nyr)
B[1] <- K*0.8
NLL <- vector(mode = 'numeric', length=nyr)
obsB <- vector(mode = 'numeric', length=nyr)
obsCPUE <- vector(mode = 'numeric', length=nyr)
#Logistic growth model fitting. max() ensures it does not go to 0
for(i in 1:(nyr-1)){
B[i+1] = max(0.001, B[i] + r*B[i]*(1-B[i]/K) - catches[i,2])
}
#Finds current year of the model and checks to see if there is CPUE for that year
for (i in 1:nyr) {
current.yr <- years[i]
#If there is CPUE data in that year then NLL is calculated
if (current.yr %in% CPUE[,1]) {
obsCPUE[i] <- CPUE[CPUE[,1]==current.yr,2]
obsCV <- CPUE[CPUE[,1]==current.yr,3]
predCPUE <- B[i]*q
NLL[i] = (log(obsCPUE[i]/predCPUE))^2/(2*obsCV^2)
obsB[i] <- obsCPUE[i]/q
}
else {NLL[i]=NA
obsB[i]=NA}
}
#Combines predicted biomass and calculated NLL for each year into a data frame
Results <- data.frame(Year = years, Catch = catches[,2], Biomass = B, ObsB = obsB, Like = NLL)
NLLSum <- sum(Results$Like, na.rm=T)
return(NLLSum)
}
# Actually does the likelihood profile
Kprofile <- function(K.vec, lower=c(0.05, 0.000001), upper=c(0.6, 0.1)) {
nR <- length(K.vec)
saved.NLL <- vector(length=nR)
for (i in 1:nR) {
y <- optim(par = c(7000000, 0.00001),
fn=Kprofile.NLL,
catches = catches,
CPUE = CPUE,
method="L-BFGS-B",
K=K.vec[i],
lower=c(0.05, 0.000001),
upper=c(0.6, 0.1))
print(paste("Run",i,"of",nR))
saved.NLL[i] <- y$value
}
plot(x=K.vec, y=saved.NLL, type="l",xaxs="i", las=1, ylab="NLL", xlab="K values",
lwd=2, col="red")
abline(h=min(saved.NLL), lty=2, col="gray50")
abline(h=min(saved.NLL)+1.92, lty=2, col="gray50") #95% CI
return(saved.NLL)
}
##===PART 4=======================================
# Plotting the catch and biomass projections
#================================================
Project.Model <- function(par, catches, CPUE, proj.yr.end, E.proj, plot=T) {
r <- par[1]
K <- par[2]
q <- par[3]
start.yr <- min(catches$Year)
end.yr <- max(catches$Year)
years <- start.yr:end.yr
nyr <- length(years)
B <- vector(mode = 'numeric',length = nyr)
B[1] <- K*0.8
E <- vector(mode = 'numeric', length=nyr)
obsB <- vector(mode = 'numeric', length=nyr)
obsCPUE <- vector(mode = 'numeric', length=nyr)
#Logistic growth model fitting. max() ensures it does not go to 0
for(i in 1:(nyr-1)){
B[i+1] = max(0.001, B[i] + r*B[i]*(1-B[i]/K) - catches[i,2])
}
for(i in 1:(nyr)){
E[i] = catches[i,2]/(q*B[i])
}
#Finds current year of the model and checks to see if there is CPUE for that year
for (i in 1:nyr) {
current.yr <- years[i]
#If there is CPUE data in that year then NLL is calculated
if (current.yr %in% CPUE[,1]) {
obsCPUE[i] <- CPUE[CPUE[,1]==current.yr,2]
obsCV <- CPUE[CPUE[,1]==current.yr,3]
predCPUE <- B[i]*q
obsB[i] <- obsCPUE[i]/q
predC <- predCPUE
}
else {obsB[i]=NA}
}
Results1 <- data.frame(Year = years, Catch = catches[,2], Biomass = B, ObsB = obsB, Effort = E)
#Combines predicted biomass and calculated NLL for each year into a data frame
proj.yr.start <- max(catches$Year)
proj.years <- proj.yr.start:proj.yr.end
proj.nyr <- length(proj.years)
B.proj <- vector(mode = 'numeric',length = proj.nyr)
B.proj[1] <- B[nyr]
C.proj <- vector(mode = 'numeric', length = proj.nyr)
obsCPUE <- vector(mode = 'numeric', length=proj.nyr)
#Logistic growth model fitting. max() ensures it does not go to 0
for(i in 1:(proj.nyr-1)){
B.proj[i+1] = max(0.001, B.proj[i] + r*B.proj[i]*(1-B.proj[i]/K) - C.proj[i])
}
for(i in 1:(proj.nyr)){
C.proj[i] = q*E.proj*B.proj[i]
}
Results2 <- data.frame(Year = proj.years, Catch = C.proj, Biomass = B.proj)
#PLOTTING PART
#if (plot==T) {
if (plot==T) {
xrange <- start.yr:proj.yr.end
plot(x=xrange,y=Results1$Biomass, lwd=2,col="black",type="l", yaxs="i",
ylim=c(0,1.2*max(Results1$Biomass)), ylab = "Biomass (kg)", xlab = "Year")
lines(x=xrange, y=Results2$Biomass, lwd=2, col="blue")
# par(mfrow=c(2,1), mar=c(2,1,1,1), oma=c(3,3,1,1))
#plot(x=years,y=Results1$Biomass, lwd=2,col="black",type="l", yaxs="i",
# ylim=c(0,1.05*max(Results1$Biomass)), ylab = "Biomass (kg)", xlab = "Year")
#mtext(side=2,"Biomass (kg)", line=2.5)
#plot(x=proj.years,y=Results2$Biomass, lwd=2,col="blue",type="l",yaxs="i",
# ylim=c(0,1.1*max(Results2$Biomass)), xlab = "Year", ylab = 'Catch (kg)')
#mtext(side=2,"Catch (kg)", line=2.5)
}
invisible(Results1) #return the biomass and NLL
}
|
b7a7d1ddc8eaaa271073efdd8ff174998b17a91f
|
aaf8222e2e7c1ca3480092387472ed539e79985a
|
/man/JiraTime.Rd
|
5458731bc087c5af9338dc4b4d1b90772ce4d8e3
|
[] |
no_license
|
M3SOulu/MozillaApacheDataset-Rpackage
|
57e7028f2d2ee9a6a672a9775f20bf40af9e4f4a
|
3644dbd266325309be4bfdf1ac926ae8859ebd19
|
refs/heads/master
| 2022-06-23T11:56:58.580415
| 2022-06-20T11:03:39
| 2022-06-20T11:03:39
| 238,914,906
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 429
|
rd
|
JiraTime.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jira.R
\name{JiraTime}
\alias{JiraTime}
\title{Jira Time}
\usage{
JiraTime(table, name)
}
\arguments{
\item{table}{deta.frame containing raw time fields}
\item{name}{Name of the column in which to add the parsed time.}
}
\value{
The table with the time parsed as POSIXct added as a
column.
}
\description{
Parses Jira raw time fields as POSIXct.
}
|
71c0ac0df78222c817e348985bdbbcfb59acaadc
|
0a3111aa5626916b3517627c1f280f082a12db3d
|
/tests/test-get_aligns.R
|
4c85bf93e53e9480da9d90c22d0d338724683020
|
[] |
no_license
|
naszim/homework-02
|
27c37c9310336dbea083206e31a578f6a3dc5acc
|
5a8b541d5c61a416e8651cb6be460b50e6c11cc5
|
refs/heads/master
| 2020-05-25T14:44:16.684720
| 2017-03-15T20:01:24
| 2017-03-15T20:01:24
| 84,941,013
| 0
| 0
| null | 2017-03-14T11:24:20
| 2017-03-14T11:24:20
| null |
UTF-8
|
R
| false
| false
| 825
|
r
|
test-get_aligns.R
|
##testthat behívása-------------------------------------------------------------
library(testthat)
##get_aligns 0 elemű vektort ad, ha az argumentumba számot írunk
test_that("get_aligns does not work with numeric argument",
{expect_identical(get_aligns(alignment = 5), character(0))
})
##get_aligns karaktervektort ad eredményként
test_that("get_aligns returns a character vector",
{expect_type(get_aligns(alignment = "Good Characters"), "character")
})
##get_aligns Neutral Characterekre 100 eleműnél hosszabb vektort ad
test_that("get_aligns returns more than 100 cases for Neutral Characters",
{expect_gt(length(get_aligns(alignment = "Neutral Characters")), 100)
})
##FELADAT VÉGE------------------------------------------------------------------
|
0987b6dbc7938a0708b3648eb29a7c318cb16d9c
|
3f6dd3134f16de2f08aa6ec52e772d7e5c5422c0
|
/tests/testthat/test_chisquare_ndist.R
|
7d34e3100a4e314a1661d335bb54fd7113fb8989
|
[
"MIT"
] |
permissive
|
imbi-heidelberg/blindrecalc
|
1ee7045d042f20b2d3392753838fb41a41a2017f
|
b6df80d1ff7b9605fbb6ee1adc38a7aac32f08e5
|
refs/heads/master
| 2022-11-24T15:18:40.995647
| 2022-11-22T14:30:31
| 2022-11-22T14:30:31
| 228,565,854
| 8
| 2
|
NOASSERTION
| 2022-11-22T14:30:33
| 2019-12-17T08:10:34
|
R
|
UTF-8
|
R
| false
| false
| 1,569
|
r
|
test_chisquare_ndist.R
|
context("test n_dist for ChiSquare test")
test_that("error messages are thrown correctly", {
d1 <- setupChiSquare(alpha = 0.025, beta = 0.2, r = 1, delta = 0.1)
expect_error(n_dist(d1, n1 = 20, nuisance = 1.1, TRUE))
d2 <- setupChiSquare(alpha = 0.025, beta = 0.2, r = 2, delta = 0.1, n_max = 301)
expect_error(n_dist(d2, 21, 0.5, TRUE, "exact"))
d2@n_max <- 300
expect_error(n_dist(d2, 20, 0.5, TRUE, "exact"))
expect_error(n_dist(d2, c(21, 30), c(0.6, 0.7), TRUE, "approximate"))
})
test_that("n_dist works for multiple n1 values", {
d <- setupChiSquare(alpha = 0.025, beta = 0.2, r = 1, delta = 0.2)
n <- n_dist(d, n1 = c(20, 40), nuisance = 0.4, summary = FALSE)
n_table <- n_dist(d, n1 = c(20, 40), nuisance = 0.4, summary = TRUE)
expect_equal(as.vector(n_table), as.vector(c(summary(n$`n1 = 20`), summary(n$`n1 = 40`))))
n_plot <- n_dist(d, n1 = c(20, 40), nuisance = 0.4, allocation = "approximate",
summary = FALSE, plot = TRUE)
expect_equal(class(n_plot), "list")
})
test_that("n_dist works for multiple nuisance values", {
d <- setupChiSquare(alpha = 0.025, beta = 0.2, r = 1, delta = 0.2)
n <- n_dist(d, n1 = 30, nuisance = c(0.35, 0.4), summary = FALSE)
n_table <- n_dist(d, n1 = 30, nuisance = c(0.35, 0.4), summary = TRUE)
expect_equal(as.vector(n_table), as.vector(c(summary(n$`p = 0.35`), summary(n$`p = 0.4`))))
n_plot <- n_dist(d, n1 = 30, nuisance = c(0.35, 0.4), allocation = "approximate",
summary = FALSE, plot = TRUE)
expect_equal(class(n_plot), "list")
})
|
d7982db0c0227b190202d394a563078de93933dc
|
222ddcb4176c06aa122588179cb5395652653d2d
|
/archive/simulations/TMLE_ATT.R
|
14f0705decdf57a76247d254f90734bafd512832
|
[
"MIT"
] |
permissive
|
ck37/acic-tmle-2018
|
f0b1bc9732b10edfa4809f6d5e1729b2e8c338b8
|
471bcdf1e46bea804d62a1c4e1a1d92ef47ff32d
|
refs/heads/master
| 2022-01-08T14:37:52.497376
| 2019-01-23T20:09:11
| 2019-01-23T20:09:11
| 98,351,394
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,508
|
r
|
TMLE_ATT.R
|
# Susan Gruber and Mark van der Laan
# October 26, 2015
# Supplemental Materials for One-Step Targeted Minimum Loss-based Estimation
# Based on Universal Least Favorable One- Dimensional Submodels.
# R source code for
# - iterative TMLE for ATT parameter using one epsilon
# - one-Step TMLE for ATT parameter
# - Simulation Study 1 from the paper
# - Simulation Study 2 from the paper
#--------------Iterative TMLE for ATT parameter (using one epsilon) -------------------------------
calcATT.iter <- function(Y, A, Q, g1W, tol = tol, maxIter=50, gbounds, Qbounds){
iter <- 0
epsilon <- Inf
calc_h2 <- function(Q){
psi <- mean((Q[,"Q1W"] - Q[, "Q0W"]) * g1W/q)
return(Q[,"Q1W"] - Q[,"Q0W"] - psi)
}
while(iter <= maxIter & (abs(epsilon) > tol)){
iter <- iter + 1
h1.A1 <- rep(1, length(Y))
h1.A0 <- -g1W/(1-g1W)
h1 <- h1.A1
h1[A == 0] <- h1.A0[A==0]
Q.off <- qlogis(Q[,"QAW"])
h2 <- calc_h2(Q)
g1W.off <- qlogis(g1W)
d <- data.frame(X = c(Y, A), offset = c(Q.off, g1W.off), h = c(h1, h2))
epsilon <- coef(glm(X ~ -1 + offset(offset) + h, data = d, family = "binomial"))
epsilon[is.na(epsilon)] <- 0
Q <- .bound(plogis(qlogis(Q) + epsilon * cbind(h1, h1.A0, h1.A1)), Qbounds)
g1W <- .bound(plogis(g1W.off + epsilon * h2), gbounds)
}
psi <- mean((Q[,"Q1W"] - Q[, "Q0W"]) * g1W/q)
IC <- ( (A - (1-A)*g1W / (1-g1W))*(Y-Q[, "QAW"]) + A *(Q[,"Q1W"]-Q[,"Q0W"] - psi)) / mean(A)
return(list(psi = psi, var.psi = var(IC)/length(Y), conv = abs(epsilon) <= tol, iter=iter))
}
#-----------------------------------One-Step TMLE for ATT parameter ----------------------------------------
oneStepATT <- function(Y, A, Q, g1W, depsilon, max_iter, gbounds, Qbounds, N=NULL){
n <- length(Y)
if (is.null(N)) N = n
q <- mean(A)
calcLoss <- function(Q, g1W){
-mean(Y * log(Q[,"QAW"]) + (1-Y) * log(1 - Q[,"QAW"]) + A * log(g1W) + (1-A) * log(1 - g1W))
}
psi.prev <- psi <- mean((Q[,"Q1W"] - Q[, "Q0W"]) * g1W/q)
H1.AW = (A -(1-A) * g1W / (1-g1W))/q
IC.prev <- IC.cur <- H1.AW* (Y-Q[, "QAW"]) + A*(Q[,"Q1W"]-Q[,"Q0W"] - psi)/q
deriv <- deriv.prev <- mean(IC.cur)
if (deriv > 0) { depsilon <- -depsilon}
loss.prev <- Inf
loss.cur <- calcLoss(Q, g1W)
if(is.nan(loss.cur) | is.na(loss.cur) | is.infinite(loss.cur)) {
loss.cur <- Inf
loss.prev <- 0
}
iter <- 0
while (loss.prev > loss.cur & iter < max_iter){
IC.prev <- IC.cur
Q.prev <- Q
g1W.prev <- g1W
psi.prev <- psi
loss.prev <- loss.cur
iter = iter+1
deriv.prev=deriv
if (abs(deriv.prev) < 1/N) break
H1 <- cbind(HAW = A/q - (1-A) * g1W / (q * (1-g1W)),
H0W = - g1W/(q * (1-g1W)),
H1W = 1/q)
H2 <- (Q[,"Q1W"]-Q[,"Q0W"]-psi)/q
g1W <- .bound(plogis(qlogis(g1W)-depsilon*H2),gbounds)
Q <- .bound(plogis(qlogis(Q) - depsilon * H1), Qbounds)
psi <- mean((Q[,"Q1W"] - Q[, "Q0W"]) * g1W/q)
loss.prev <- loss.cur
loss.cur <- calcLoss(Q, g1W)
IC.cur <- ((A - (1-A) * g1W / (1-g1W)) * (Y-Q[, "QAW"]) + A *(Q[,"Q1W"]-Q[,"Q0W"] - psi))/q
if(is.nan(loss.cur) | is.infinite(loss.cur) | is.na(loss.cur)) {loss.cur <- Inf}
deriv = mean(IC.cur)
print(psi.prev)
}
return(list(psi=psi.prev, epsilon = (iter-1)*depsilon,IC=IC.prev,
Q=Q.prev))
}
#------------- bound function --------------
.bound <- function(x, bds){
x[x > max(bds)] <- max(bds)
x[x < min(bds)] <- min(bds)
x
}
n=1000
W1 = rnorm(n)
W2 = abs(rnorm(n))
A = rbinom(n,1,plogis(rnorm(n)+W1+W2))
Y = A*rbinom(n,1,plogis(W2+W1-.4*A))+(1-A)*rbinom(n,1,plogis(W2+W1))
X = data.frame(W1=W1,W2=W2,A=A)
X0 = X
X0$A=0
X1=X
X1$A=1
newX=rbind(X,X1,X0)
W=X
W$A=NULL
library(SuperLearner)
Qkfit = SuperLearner(Y=Y, X=X, newX = newX, family = binomial(),
SL.library="SL.glm",method = "method.NNLS",
id = NULL, verbose = FALSE, control = list(), cvControl = list(),
obsWeights = NULL)
gkfit = SuperLearner(Y=A, X=W, newX = W, family = binomial(),
SL.library="SL.glm",method = "method.NNLS",
id = NULL, verbose = FALSE, control = list(), cvControl = list(),
obsWeights = NULL)
g1W = gkfit$SL.predict
QAW = Qkfit$SL.predict[1:n]
Q0W = Qkfit$SL.predict[(n+1):(2*n)]
Q1W = Qkfit$SL.predict[(2*n+1):(3*n)]
Q = cbind(QAW,Q0W,Q1W)
colnames(Q)=c("QAW","Q0W","Q1W")
res = oneStepATT(Y=Y, A=A, Q=Q, g1W=g1W, depsilon=.0001, max_iter=100000,
gbounds=c(1e-4,1-1e-4), Qbounds=c(1e-4,1-1e-4), N=NULL)
res$epsilon
|
6cac0ec5dcec030467c67ec34ef1375cd2eb675e
|
82c0113017734464b1b8d92f27b255368969155a
|
/R/cellassign.R
|
9a959cfb4c5f68a6e482f397d93b0c14ea125e6e
|
[
"Apache-2.0"
] |
permissive
|
kieranrcampbell/cellassign
|
030d16048b22ddd8b38f638a64e97f7f75e21af4
|
ea5329e2f6e343a58e7e4ac60289c40f391140a2
|
refs/heads/master
| 2020-06-16T01:16:28.983066
| 2020-04-14T13:33:02
| 2020-04-14T13:33:02
| 195,441,954
| 1
| 0
|
NOASSERTION
| 2019-11-25T12:43:46
| 2019-07-05T16:51:28
|
R
|
UTF-8
|
R
| false
| false
| 14,777
|
r
|
cellassign.R
|
#' Annotate cells to cell types using cellassign
#'
#' Automatically annotate cells to known types based
#' on the expression patterns of
#' a priori known marker genes.
#'
#' @param exprs_obj Either a matrix representing gene
#' expression counts or a \code{SummarizedExperiment}.
#' See details.
#' @param marker_gene_info Information relating marker genes to cell types.
#' See details.
#' @param s Numeric vector of cell size factors
#' @param min_delta The minimum log fold change a marker gene must
#' be over-expressed by in its cell type
#' @param X Numeric matrix of external covariates. See details.
#' @param B Number of bases to use for RBF dispersion function
#' @param shrinkage Logical - should the delta parameters
#' have hierarchical shrinkage?
#' @param n_batches Number of data subsample batches to use in inference
#' @param dirichlet_concentration Dirichlet concentration parameter for cell
#' type abundances
#' @param rel_tol_adam The change in Q function value (in pct) below which
#' each optimization round is considered converged
#' @param rel_tol_em The change in log marginal likelihood value (in pct)
#' below which the EM algorithm is considered converged
#' @param max_iter_adam Maximum number of ADAM iterations
#' to perform in each M-step
#' @param max_iter_em Maximum number of EM iterations to perform
#' @param learning_rate Learning rate of ADAM optimization
#' @param verbose Logical - should running info be printed?
#' @param return_SCE Logical - should a SingleCellExperiment be returned
#' with the cell
#' type annotations added? See details.
#' @param sce_assay The \code{assay} from the input#' \code{SingleCellExperiment} to use: this assay
#' should always represent raw counts.
#' @param num_runs Number of EM optimizations to perform (the one with the maximum
#' log-marginal likelihood value will be used as the final).
#' @param threads Maximum number of threads used by the algorithm
#' (defaults to the number of cores available on the machine)
#'
#'
#'
#'
#' @importFrom methods is
#' @importFrom SummarizedExperiment assays
#'
#'
#' @details
#' \strong{Input format}
#' \code{exprs_obj} should be either a
#' \code{SummarizedExperiment} (we recommend the
#' \code{SingleCellExperiment} package) or a
#' cell (row) by gene (column) matrix of
#' \emph{raw} RNA-seq counts (do \strong{not}
#' log-transform or otherwise normalize).
#'
#' \code{marker_gene_info} should either be
#' \itemize{
#' \item A gene by cell type binary matrix, where a 1 indicates that a gene is a
#' marker for a cell type, and 0 otherwise
#' \item A list with names corresponding to cell types, where each entry is a
#' vector of marker gene names. These are converted to the above matrix using
#' the \code{marker_list_to_mat} function.
#' }
#'
#' \strong{Cell size factors}
#' If the cell size factors \code{s} are
#' not provided they are computed using the
#' \code{computeSumFactors} function from
#' the \code{scran} package.
#'
#' \strong{Covariates}
#' If \code{X} is not \code{NULL} then it should be
#' an \code{N} by \code{P} matrix
#' of covariates for \code{N} cells and \code{P} covariates.
#' Such a matrix would typically
#' be returned by a call to \code{model.matrix}
#' \strong{with no intercept}. It is also highly
#' recommended that any numerical (ie non-factor or one-hot-encoded)
#' covariates be standardized
#' to have mean 0 and standard deviation 1.
#'
#' \strong{cellassign}
#' A call to \code{cellassign} returns an object
#' of class \code{cellassign}. To access the
#' MLE estimates of cell types, call \code{fit$cell_type}.
#' To access all MLE parameter
#' estimates, call \code{fit$mle_params}.
#'
#' \strong{Returning a SingleCellExperiment}
#'
#' If \code{return_SCE} is true, a call to \code{cellassign} will return
#' the input SingleCellExperiment, with the following added:
#' \itemize{
#' \item A column \code{cellassign_celltype} to \code{colData(sce)} with the MAP
#' estimate of the cell type
#' \item A slot \code{sce@metadata$cellassign} containing the cellassign fit.
#' Note that a \code{SingleCellExperiment} must be provided as \code{exprs_obj}
#' for this option to be valid.
#' }
#'
#'
#' @examples
#' data(example_sce)
#' data(example_marker_mat)
#'
#' fit <- em_result <- cellassign(example_sce[rownames(example_marker_mat),],
#' marker_gene_info = example_marker_mat,
#' s = colSums(SummarizedExperiment::assay(example_sce, "counts")),
#' learning_rate = 1e-2,
#' shrinkage = TRUE,
#' verbose = FALSE)
#'
#' @importFrom basilisk basiliskStart basiliskRun basiliskStop
#'
#'
#' @export
#'
#' @return
#' An object of class \code{cellassign}. See \code{details}
cellassign <- function(exprs_obj,
marker_gene_info,
s = NULL,
min_delta = 2,
X = NULL,
B = 10,
shrinkage = TRUE,
n_batches = 1,
dirichlet_concentration = 1e-2,
rel_tol_adam = 1e-4,
rel_tol_em = 1e-4,
max_iter_adam = 1e5,
max_iter_em = 20,
learning_rate = 0.1,
verbose = TRUE,
sce_assay = "counts",
return_SCE = FALSE,
num_runs = 1,
threads = 0) {
# Work out rho
rho <- NULL
if(is.matrix(marker_gene_info)) {
rho <- marker_gene_info
} else if(is.list(marker_gene_info)) {
rho <- marker_list_to_mat(marker_gene_info, include_other = FALSE)
} else {
stop("marker_gene_info must either be a matrix or list. See ?cellassign")
}
# Logical as to whether input is SCE
is_sce <- is(exprs_obj, "SummarizedExperiment")
if(return_SCE && !is_sce) {
stop("return_SCE is set to TRUE but the input object is not a SummarizedExperiment")
}
# Get expression input
Y <- extract_expression_matrix(exprs_obj, sce_assay = sce_assay)
# Check X is correct
if(!is.null(X)) {
if(!(is.matrix(X) && is.numeric(X))) {
stop("X must either be NULL or a numeric matrix")
}
}
stopifnot(is.matrix(Y))
stopifnot(is.matrix(rho))
if(!is.null(s)) {
stopifnot(length(s) == nrow(Y))
}
if(any(colSums(Y) == 0)) {
warning("Genes with no mapping counts are present. Make sure this is expected -- this can be valid input in some cases (e.g. when cell types are overspecified).")
}
if(any(rowSums(Y) == 0)) {
warning("Cells with no mapping counts are present. You might want to filter these out prior to using cellassign.")
}
if(is.null(rownames(rho))) {
warning("No gene names supplied - replacing with generics")
rownames(rho) <- paste0("gene_", seq_len(nrow(rho)))
}
if(is.null(colnames(rho))) {
warning("No cell type names supplied - replacing with generics")
colnames(rho) <- paste0("cell_type_", seq_len(ncol(rho)))
}
N <- nrow(Y)
X <- initialize_X(X, N, verbose = verbose)
G <- ncol(Y)
C <- ncol(rho)
P <- ncol(X)
if(G > 100) {
warning(paste("You have specified", G, "input genes. Are you sure these are just your markers? Only the marker genes should be used as input"))
}
# Check the dimensions add up
if(nrow(X) != N) {
stop("Number of rows of covariate matrix must match number of cells provided")
}
if(nrow(rho) != G) {
stop("Number of genes provided in marker_gene_info should match number of genes in exprs_obj and be marker genes only")
}
# Compute size factors for each cell
if (is.null(s)) {
message("No size factors supplied - computing from matrix. It is highly recommended to supply size factors calculated using the full gene set")
s <- scran::computeSumFactors(t(Y))
}
# Make sure all size factors are positive
if (any(s <= 0)) {
stop("Cells with size factors <= 0 must be removed prior to analysis.")
}
# Make Dirichlet concentration parameter symmetric if not otherwise specified
if (length(dirichlet_concentration) == 1) {
dirichlet_concentration <- rep(dirichlet_concentration, C)
}
res <- NULL
seeds <- sample(.Machine$integer.max - 1, num_runs)
cl <- basiliskStart(cellassign_env)
run_results <- lapply(seq_len(num_runs), function(i) {
res <- basiliskRun(cl, inference_tensorflow,
Y = Y,
rho = rho,
s = s,
X = X,
G = G,
C = C,
N = N,
P = P,
B = B,
shrinkage = shrinkage,
verbose = verbose,
n_batches = n_batches,
rel_tol_adam = rel_tol_adam,
rel_tol_em = rel_tol_em,
max_iter_adam = max_iter_adam,
max_iter_em = max_iter_em,
learning_rate = learning_rate,
min_delta = min_delta,
dirichlet_concentration = dirichlet_concentration,
random_seed = seeds[i],
threads = as.integer(threads))
return(structure(res, class = "cellassign"))
})
basiliskStop(cl)
# Return best result
res <- run_results[[which.max(sapply(run_results, function(x) x$lls[length(x$lls)]))]]
if(return_SCE) {
# Now need to parse this into a SingleCellExperiment -
# note that we know the input (exprs_obj) is (at least) a
# SummarizedExperiment to get this far
if("cellassign_celltype" %in% names(SummarizedExperiment::colData(exprs_obj))) {
warning("Field 'cellassign_celltype' exists in colData of the SCE. Overwriting...")
}
SummarizedExperiment::colData(exprs_obj)[['cellassign_celltype']] <- res$cell_type
exprs_obj@metadata$cellassign <- res
return(exprs_obj)
}
return(res)
}
#' Print a \code{cellassign} fit
#'
#' @param x An object of class \code{cellassign}
#' @param ... Additional arguments (unused)
#'
#' @examples
#' data(example_cellassign_fit)
#' print(example_cellassign_fit)
#'
#' @return Prints a structured representation of the \code{cellassign}
#'
#' @export
print.cellassign <- function(x, ...) {
N <- nrow(x$mle_params$gamma)
C <- ncol(x$mle_params$gamma)
G <- nrow(x$mle_params$delta)
P <- ncol(x$mle_params$beta) - 1
cat(sprintf("A cellassign fit for %i cells, %i genes, %i cell types with %i covariates
", N, G, C, P),
"To access cell types, call celltypes(x)
",
"To access cell type probabilities, call cellprobs(x)\n\n")
}
#' @rdname celltypes
#' @export
celltypes <- function(x, assign_prob = 0.95) {
UseMethod("celltypes")
}
#' @rdname cellprobs
#' @export
cellprobs <- function(x) {
UseMethod("cellprobs", x)
}
#' @rdname mleparams
#' @export
mleparams <- function(x) {
UseMethod("mleparams", x)
}
#' Get the cell type assignments of a \code{cellassign} fit
#'
#' Get the MLE cell type estimates for each cell
#'
#' @return A character vector with the MLE cell type for each cell, if the probability
#' is greater than \code{assign_prob}.
#'
#' @param x An object of class \code{cellassign} returned by a call to \code{cellassign(...)}
#' @param assign_prob The probability threshold above which a cell is assigned to a given cell type,
#' otherwise "unassigned"
#'
#' @rdname celltypes
#' @export
#'
#' @examples
#' data(example_cellassign_fit)
#' celltypes(example_cellassign_fit)
celltypes.cellassign <- function(x, assign_prob = 0.95) {
stopifnot(is(x, 'cellassign'))
cp <- cellprobs(x)
mle_celltypes <- get_mle_cell_type(cp)
max_prob <- matrixStats::rowMaxs(cp)
mle_celltypes[max_prob < assign_prob] <- "unassigned"
mle_celltypes
}
#' Get the cell assignment probabilities of a \code{cellassign} fit
#'
#' Get the MLE cell type assignment probabilities for each cell
#'
#' @param x An object of class \code{cellassign}
#' returned by a call to \code{cellassign(...)}
#'
#' @return A cell by celltype matrix with assignment probabilities
#'
#' @rdname cellprobs
#' @export
#'
#' @examples
#' data(example_cellassign_fit)
#' cellprobs(example_cellassign_fit)
cellprobs.cellassign <- function(x) {
stopifnot(is(x, 'cellassign'))
x$mle_params$gamma
}
#' Get the MLE parameter list of a \code{cellassign} fit
#'
#' @return A list of MLE parameter estimates from cellassign
#'
#' @param x An object of class \code{cellassign} returned
#' by a call to \code{cellassign(...)}
#'
#' @rdname mleparams
#' @export
#'
#' @examples
#' data(example_cellassign_fit)
#' mleparams(example_cellassign_fit)
#' @export
mleparams.cellassign <- function(x) {
stopifnot(is(x, 'cellassign'))
x$mle_params
}
#' Example SingleCellExperiment
#'
#' An example \code{SingleCellExperiment} for 10 marker genes and 500 cells.
#'
#' @seealso example_cellassign_fit
#' @examples
#' data(example_sce)
"example_sce"
#' Example cell marker matrix
#'
#' An example matrix for 10 genes and 2 cell types showing the membership
#' of marker genes to cell types
#'
#' @seealso example_cellassign_fit
#' @examples
#' data(example_marker_mat)
"example_marker_mat"
#' Example cellassign fit
#'
#' An example fit of calling \code{cellassign} on both
#' \code{example_marker_mat} and \code{example_sce}
#'
#' @seealso example_cellassign_fit
#' @examples
#' data(example_cellassign_fit)
"example_cellassign_fit"
#' Example tumour microevironment markers
#'
#' A set of example marker genes for commonly profiling the
#' human tumour mircoenvironment
#'
#' @examples
#' data(example_TME_markers)
"example_TME_markers"
#' Example bulk RNA-seq data
#'
#' An example bulk RNA-seq dataset from Holik et al. Nucleic Acids Research 2017 to
#' demonstrate deriving marker genes
#' @examples
#' data(holik_data)
"holik_data"
#'
#' @keywords local
construct_cellassign_env <- function() {
is_mac <- Sys.info()[['sysname']] == "Darwin"
if(is_mac) {
return(
BasiliskEnvironment("cellassign_env",
pkgname="cellassign",
packages=c("python==3.7.2"),
pip=c("tensorflow==2.1.0",
"tensorflow-probability==0.7.0"))
)
} else {
return(
BasiliskEnvironment("cellassign_env",
pkgname="cellassign",
packages=c("python==3.7.2",
"tensorflow==2.1.0",
"tensorflow-probability==0.7.0"))
)
}
}
#' Basilisk environment
#'
#' @keywords local
#'
#' @importFrom basilisk BasiliskEnvironment
cellassign_env <- construct_cellassign_env()
|
552f9733e54dddfc8a38622fd61163464e59ac58
|
20f36a6ec7a216b425ea26bdcac23314d846bfd8
|
/Analysis/IWTomics/low_resolution/IWTomicsData_low_resolution.r
|
aa9f234fb1e9af5369d3637ba927243a6b61e1ba
|
[
"MIT"
] |
permissive
|
makovalab-psu/L1_Project
|
0ffd46c9544f20112c05dfba977ea69c51a08494
|
d8a3ae36dbe65fc48a4cee7a3ccf27a594e0c039
|
refs/heads/master
| 2021-05-24T10:33:25.312392
| 2020-07-13T20:45:55
| 2020-07-13T20:45:55
| 253,520,011
| 1
| 2
|
MIT
| 2020-04-07T17:55:45
| 2020-04-06T14:21:06
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 38,986
|
r
|
IWTomicsData_low_resolution.r
|
setClassUnion(".listOrNULL",c("list","NULL"))
setClass("IWTomicsData",
slots=c(metadata="list",
regions="GRangesList",
alignment="character",
features="list",
length_features="list",
test=".listOrNULL"),
prototype=list(metadata=list(region_datasets=data.frame(),
feature_datasets=data.frame()),
regions=GRangesList(),
alignment='center',
features=list(),
length_features=list(),
test=NULL),
validity=function(object){
if(!(alignment(object) %in% c('left','right','center','scale')))
return(paste0('invalid alignment type \'',alignment(object),'\'. Available alignments are \'left\', \'right\', \'center\' and \'scale\'.'))
if(nRegions(object)!=0)
if(!identical(names(object@regions),idRegions(object)))
return('invalid regions. Names should be identical to region IDs.')
if(nFeatures(object)!=0){
if(!identical(names(object@features),idFeatures(object)))
return('invalid features. Names should be identical to feature IDs.')
if(TRUE %in% lapply(object@features,function(x) !identical(names(x),idRegions(object))))
return('invalid features. Each feature should have names identical to region IDs.')
}
if(length(object@length_features)!=0){
if(!identical(names(object@length_features),idFeatures(object)))
return('invalid length_features. Names should be identical to feature IDs.')
if(TRUE %in% lapply(object@length_features,function(x) !identical(names(x),idRegions(object))))
return('invalid length_features. Each feature should have names identical to region IDs.')
}
if(!is.null(object@test)){
input=testInput(object)
if(sum(!(input$id_region1 %in% idRegions(object))))
return('invalid test id_region1. The IDs provided are not region IDs.')
if(!is.null(input$id_region2)){
if(length(input$id_region2)!=length(input$id_region1))
return('invalid test id_region2. It must have the same length of id_region1.')
if(sum(!(setdiff(input$id_region2,'') %in% idRegions(object))))
return('invalid test id_region2. The IDs provided are not region IDs.')
}
if(sum(!(input$id_features_subset %in% idFeatures(object))))
return('invalid test id_features_subset. The IDs provided are not feature IDs.')
if(!(input$statistics %in% c('mean','median','variance','quantile')))
return('invalid \'statistics\'. Available test statistics are \'mean\', \'median\', \'variance\' and \'quantile\'.')
}
return(TRUE)
})
# Constructor method
setGeneric("IWTomicsData",function(x,y,...) standardGeneric("IWTomicsData"))
setMethod("IWTomicsData",c("GRangesList","list"),
function(x,y,alignment='center',
id_regions=NULL,name_regions=NULL,
id_features=NULL,name_features=NULL,length_features=NULL){
if(!(alignment %in% c('left','right','center','scale')))
stop('invalid alignment type \'',alignment,'\'. Available alignments are \'left\', \'right\', \'center\' and \'scale\'.')
regions=x
features=y
if((FALSE %in% unlist(lapply(features,is.list)))|
(FALSE %in% unlist(lapply(features,function(feature) lapply(feature,is.matrix)))))
stop('invalid \'features\'. List of matrix lists expected.')
if(sum(unlist(lapply(features,length))!=length(regions)))
stop('Invalid \'features\'. A matrix for each region dataset expected.')
if(FALSE %in% unlist(lapply(features,function(feature) length(unique(unlist(lapply(feature,nrow))))==1)))
stop('Invalid \'features\'. Matrices for the same feature should have the same row number.')
if(is.null(id_regions))
id_regions=names(regions)
if(is.null(id_regions))
id_regions=paste0('rgn',seq_along(regions))
if(is.null(name_regions))
name_regions=id_regions
region_datasets=data.frame(name=name_regions,file=NA,size=unlist(lapply(regions,length)),row.names=id_regions,stringsAsFactors=FALSE)
for(id_region in id_regions){
if(TRUE %in% duplicated(regions[[id_region]]))
stop('duplicated regions in ',id_region,'.')
if(!identical(disjoin(regions[[id_region]]),sort(regions[[id_region]])))
warning('overlapping regions in ',id_region,'.')
}
if(is.null(id_features))
id_features=names(features)
if(is.null(id_features))
id_features=paste0('ftr',seq_along(features))
if(is.null(name_features))
name_features=id_features
feature_datasets=data.frame(name=name_features,row.names=id_features,stringsAsFactors=FALSE)
feature_datasets=cbind(feature_datasets,matrix(NA,ncol=length(id_regions),nrow=length(id_features)))
names(feature_datasets)=c('name',paste0('file_',id_regions))
length_features_new=list()
for(id_feature in id_features){
length_feature=list()
resolution=c()
for(id_region in id_regions){
if(is.null(length_features[[id_feature]][[id_region]])){
ranges=apply(features[[id_feature]][[id_region]],2,
function(feature){
notNA=which(!is.na(feature))
return(c(notNA[1],notNA[length(notNA)]))
})
length_feature[[id_region]]=ranges[2,]-ranges[1,]+1
} else {
length_feature[[id_region]]=length_features[[id_feature]][[id_region]]
}
names(length_feature[[id_region]])=NULL
feature_resolution=unique(width(regions[[id_region]])/length_feature[[id_region]])
if(length(feature_resolution)>1)
warning('different size windows for feature \'',id_feature,'\'.')
resolution=c(resolution,feature_resolution[1])
}
resolution=unique(resolution)
if(length(resolution)>1)
warning(paste0('Different size windows for feature \'',id_feature,'\'.'))
feature_datasets[id_feature,'resolution']=resolution[1]
if(max(unlist(length_feature))!=nrow(features[[id_feature]][[1]]))
stop(paste0('Invalid features. Row of NA in the matrices of feature \'',id_feature,'\'.'))
length_features_new[[id_feature]]=length_feature
}
new("IWTomicsData",metadata=list(region_datasets=region_datasets,feature_datasets=feature_datasets),
regions=regions,alignment=alignment,features=features,length_features=length_features_new)
})
setMethod("IWTomicsData",c("character","data.frame"),
function(x,y,alignment='center',
id_regions=NULL,name_regions=NULL,id_features=NULL,name_features=NULL,
path=NULL,start.are.0based=TRUE,header=FALSE,...){
if(!(alignment %in% c('left','right','center','scale')))
stop('invalid alignment type \'',alignment,'\'. Available alignments are \'left\', \'right\', \'center\' and \'scale\'.')
file_regions=x
if(is.null(id_regions))
id_regions=file_path_sans_ext(file_regions)
if(is.null(name_regions))
name_regions=id_regions
region_datasets=data.frame(name=name_regions,file=file_regions,row.names=id_regions,stringsAsFactors=FALSE)
names(name_regions)=id_regions
file_features=y
if(ncol(file_features)<length(file_regions))
stop('invalid file_features.')
dataset.in.files=id_regions %in% names(file_features)
if(FALSE %in% dataset.in.files){
if(TRUE %in% dataset.in.files)
stop('invalid file_features.')
names(file_features)=id_regions
}
if(nrow(file_features)!=1){
file_features=data.frame(apply(file_features,2,as.character),stringsAsFactors=FALSE)
}else{
file_features=data.frame(t(apply(file_features,2,as.character)),stringsAsFactors=FALSE)
}
if(is.null(id_features))
id_features=file_path_sans_ext(file_features[,1])
if(is.null(name_features))
name_features=id_features
feature_datasets=data.frame(name=name_features,file=file_features,row.names=id_features,stringsAsFactors=FALSE)
colnames(feature_datasets)[-1]=paste0('file_',id_regions)
names(name_features)=id_features
if(!is.null(path)){
file_regions=file.path(path,file_regions)
if(nrow(file_features)!=1){
file_features=data.frame(apply(file_features,2,function(file) file.path(path,file)),stringsAsFactors=FALSE)
}else{
file_features=data.frame(t(apply(file_features,2,function(file) file.path(path,file))),stringsAsFactors=FALSE)
}
}
names(file_regions)=id_regions
rownames(file_features)=id_features
regions=list()
for(id_region in id_regions){
message('Reading region dataset \'',name_regions[id_region],'\'...')
file=file_regions[id_region]
tmp=read.delim(file,header=header,stringsAsFactors=FALSE,...)
if(ncol(tmp)<3)
stop('invalid format in ',file,'.')
if(header){
if(sum(!(c('chr','start','end') %in% names(tmp))))
stop('invalid variable names in ',file,'. Variable names should be \'chr\', \'start\' and \'end\'.')
}else{
names(tmp)[1:3]=c('chr','start','end')
}
regions[[id_region]]=makeGRangesFromDataFrame(tmp,starts.in.df.are.0based=start.are.0based)
rm(tmp)
if(TRUE %in% duplicated(regions[[id_region]]))
stop('duplicated regions in ',file,'.')
if(!isDisjoint(regions[[id_region]]))
warning('overlapping regions in ',file,'.')
}
region_datasets$size=unlist(lapply(regions,length))
regions=GRangesList(regions)
features=list()
length_features=list()
for(id_feature in id_features){
message('Reading feature \'',name_features[id_feature],'\'...')
feature.matrices=list()
resolution=c()
for(id_region in id_regions){
message(' Region dataset \'',name_regions[id_region],'\'...')
tmp=read.delim(file_features[id_feature,id_region],header=header,stringsAsFactors=FALSE,...)
if(ncol(tmp)<4)
stop('invalid format in ',file_features[id_feature,id_region],'.')
measure.window=ifelse(ncol(tmp)==4,TRUE,FALSE)
if(header){
if(sum(!(c('chr','start','end') %in% names(tmp))))
stop('Invalid variable names in ',file_features[id_feature,id_region],'. Variable names should be \'chr\', \'start\' and \'end\'.')
}else{
names(tmp)[1:4]=c('chr','start','end','measure')
}
tmp=makeGRangesFromDataFrame(tmp,starts.in.df.are.0based=start.are.0based,keep.extra.columns=TRUE)
if(TRUE %in% duplicated(tmp))
stop('duplicated windows in ',file_features[id_feature,id_region],'.')
if(!isDisjoint(tmp))
if(measure.window){
stop('overlapping windows in ',file_features[id_feature,id_region],'.')
}else{
warning('overlapping windows in ',file_features[id_feature,id_region],'.')
}
if(measure.window){
feature.resolution=unique(width(tmp))
}else{
match.region=match(regions[[id_region]],tmp)
if(sum(is.na(match.region)))
stop('not all regions in datasets ',id_region,' are present in ',file_features[id_feature,id_region],'.')
tmp=tmp[match.region,]
skip=list(...)[['skip']]
if(is.null(skip))
skip=0
length.tmp=(count.fields(file_features[id_feature,id_region],sep="\t",skip=header+skip)-3)[match.region]
feature.resolution=unique(width(regions[[id_region]])/length.tmp)
}
if(length(feature.resolution)>1)
if(measure.window){
warning('different size windows in ',file_features[id_feature,id_region],'.')
}else{
warning('different size windows in ',file_features[id_feature,id_region],'.')
}
resolution=c(resolution,feature.resolution[1])
if(length(setdiff(regions[[id_region]],tmp))>0)
warning('windows in ',file_features[id_feature,id_region],' do not cover all regions.
Put NA in the file to indicate Not Available measurements.')
if(measure.window){
if(region_datasets[id_region,'size']>100){
core.number <- min(floor(region_datasets[id_region,'size']/50),detectCores()) # at least 50 regions each node
n_group=max(floor(region_datasets[id_region,'size']/core.number),50)
groups=c(rep.int(1,region_datasets[id_region,'size']-core.number*n_group),rep(seq.int(core.number),each=n_group))
cl <- makeCluster(core.number)
feature.matrices[[id_region]]=Reduce(c,parLapply(cl,split(regions[[id_region]],groups),
function(region,tmp) lapply(region,function(region,tmp) subsetByOverlaps(tmp,region)$measure,tmp=tmp),tmp=tmp))
stopCluster(cl)
}else{
feature.matrices[[id_region]]=lapply(regions[[id_region]],
function(region) subsetByOverlaps(tmp,region)$measure)
}
}else{
feature.matrices[[id_region]]=mapply(function(tmp,length.tmp) tmp[seq_len(length.tmp)],split(as.matrix(mcols(tmp)),seq_along(length.tmp)),length.tmp,SIMPLIFY=FALSE)
}
names(feature.matrices[[id_region]])=NULL
rm(tmp)
}
resolution=unique(resolution)
if(length(resolution)>1)
if(measure.window){
warning('Different size windows for feature \'',id_feature,'\'.')
resolution=min(resolution)
}else{
warning('Different size windows for feature \'',id_feature,'\'.')
resolution=min(resolution)
}
feature_datasets[id_feature,'resolution']=resolution
length=lapply(feature.matrices,function(feature.matrix) unlist(lapply(feature.matrix,length),use.names=FALSE))
if(length(unique(unlist(length)))==1){
feature.matrices=lapply(feature.matrices,function(feature) do.call(cbind,feature))
}else{
length.max=max(unlist(length))
if(alignment %in% c('left','scale')){
length.NA.right=lapply(length,function(length) length.max-length)
for(id_region in id_regions)
feature.matrices[[id_region]]=mapply(function(feature,length.NA.right) c(feature,rep.int(NA,length.NA.right)),
feature.matrices[[id_region]],length.NA.right[[id_region]])
}
if(alignment=='right'){
length.NA.left=lapply(length,function(length) length.max-length)
for(id_region in id_regions)
feature.matrices[[id_region]]=mapply(function(feature,length.NA.left) c(rep.int(NA,length.NA.left),feature),
feature.matrices[[id_region]],length.NA.left[[id_region]])
}
if(alignment=='center'){
center=lapply(length,'%/%',2)
# the alignment is approximate if there are regions with an odd number of windows and regions with an even number of regions
length.NA.right=lapply(mapply('-',length,center,SIMPLIFY=FALSE),function(lenght_center) (length.max-length.max%/%2)-lenght_center)
length.NA.left=lapply(center,function(center) length.max%/%2-center)
for(id_region in id_regions)
feature.matrices[[id_region]]=mapply(function(feature,length.NA.left,length.NA.right) as.matrix(c(rep.int(NA,length.NA.left),feature,rep.int(NA,length.NA.right))),
feature.matrices[[id_region]],length.NA.left[[id_region]],length.NA.right[[id_region]])
}
}
features[[id_feature]]=feature.matrices
length_features[[id_feature]]=length
}
new("IWTomicsData",metadata=list(region_datasets=region_datasets,feature_datasets=feature_datasets),
regions=regions,alignment=alignment,features=features,length_features=length_features)
})
setMethod("IWTomicsData",c("character","character"),
function(x,y,...){
y=data.frame(matrix(y,ncol=length(x),nrow=length(y)),stringsAsFactors=FALSE)
IWTomicsData(x,y,...)
})
setMethod("IWTomicsData",c("character","matrix"),
function(x,y,...){
y=data.frame(y,stringsAsFactors=FALSE)
IWTomicsData(x,y,...)
})
# Accessors
setGeneric("nRegions",function(x,...) standardGeneric("nRegions"))
setMethod("nRegions","IWTomicsData",function(x) length(x@regions))
setGeneric("nFeatures",function(x,...) standardGeneric("nFeatures"))
setMethod("nFeatures","IWTomicsData",function(x) length(x@features))
setMethod("dim","IWTomicsData",function(x) c(nRegions(x),nFeatures(x)))
setGeneric("lengthRegions",function(x,...) standardGeneric("lengthRegions"))
setMethod("lengthRegions","IWTomicsData",function(x) unlist(lapply(x@regions,length)))
setGeneric("lengthFeatures",function(x,...) standardGeneric("lengthFeatures"))
setMethod("lengthFeatures","IWTomicsData",function(x) x@length_features)
setGeneric("resolution",function(x,...) standardGeneric("resolution"))
setMethod("resolution","IWTomicsData",
function(x){
res=metadata(x)$feature_datasets$resolution
names(res)=idFeatures(x)
return(res)
})
setMethod("metadata","IWTomicsData",function(x) x@metadata)
setGeneric("regions",function(x,...) standardGeneric("regions"))
setMethod("regions","IWTomicsData",function(x) x@regions)
setGeneric("features",function(x,...) standardGeneric("features"))
setMethod("features","IWTomicsData",function(x) x@features)
setGeneric("idRegions",function(x,...) standardGeneric("idRegions"))
setMethod("idRegions","IWTomicsData",function(x) row.names(metadata(x)$region_datasets))
setGeneric("idFeatures",function(x,...) standardGeneric("idFeatures"))
setMethod("idFeatures","IWTomicsData",function(x) row.names(metadata(x)$feature_datasets))
setGeneric("nameRegions",function(x,...) standardGeneric("nameRegions"))
setMethod("nameRegions","IWTomicsData",
function(x){
name=metadata(x)$region_datasets$name
names(name)=idRegions(x)
name
})
setGeneric("nameFeatures",function(x,...) standardGeneric("nameFeatures"))
setMethod("nameFeatures","IWTomicsData",
function(x){
name=metadata(x)$feature_datasets$name
names(name)=idFeatures(x)
name
})
setGeneric("alignment",function(x,...) standardGeneric("alignment"))
setMethod("alignment","IWTomicsData",function(x) x@alignment)
setGeneric("testInput",function(x,...) standardGeneric("testInput"))
setMethod("testInput","IWTomicsData",function(x) x@test$input)
setGeneric("nTests",function(x,...) standardGeneric("nTests"))
setMethod("nTests","IWTomicsData",function(x) length(testInput(x)$id_region1))
setGeneric("idRegionsTest",function(x,test,...) standardGeneric("idRegionsTest"))
setMethod("idRegionsTest",c("IWTomicsData","vector"),
function(x,test){
if(nTests(x)==0)
return(NULL)
if(sum(!(test %in% 1:nTests(x))))
stop('invalid test number.')
id=lapply(test,
function(i) c(testInput(x)$id_region1[i],
ifelse(is.null(testInput(x)$id_region2)||(testInput(x)$id_region2[i]==''),'',testInput(x)$id_region2[i])))
names(id)=paste0('test',test)
return(id)
})
setMethod("idRegionsTest",c("IWTomicsData","missing"),
function(x){
if(nTests(x)==0)
return(NULL)
idRegionsTest(x,1:nTests(x))
})
setGeneric("idFeaturesTest",function(x,...) standardGeneric("idFeaturesTest"))
setMethod("idFeaturesTest","IWTomicsData",function(x) testInput(x)$id_features_subset)
setGeneric("adjusted_pval",function(x,test,id_features_subset,scale_threshold,...) standardGeneric("adjusted_pval"))
setMethod("adjusted_pval",c("IWTomicsData","vector","character","vector"),
function(x,test,id_features_subset,scale_threshold){
if(nTests(x)==0)
return(NULL)
if(sum(!(test %in% 1:nTests(x))))
stop('invalid test number.')
if(sum(!(id_features_subset %in% idFeaturesTest(x))))
stop('invalid id_features_subset.')
if(is.list(scale_threshold)){
scale_threshold=lapply(scale_threshold,
function(scale){
scale=as.list(rep(scale,length.out=length(id_features_subset)))
names(scale)=id_features_subset
return(scale)
})
}else{
scale_threshold=lapply(test,
function(i){
scale=as.list(rep(scale_threshold,length.out=length(id_features_subset)))
names(scale)=id_features_subset
return(scale)
})
}
pval=mapply(function(results,scale) mapply(function(result,scale){
pval=result$adjusted_pval_matrix
if((scale<1)||(result$max_scale<scale)){
warning('invalid scale_threshold. Setting it to the default value.',call.=FALSE,immediate.=TRUE)
scale=result$max_scale
}
pval=pval[ncol(pval)-scale+1,]
return(pval)
},results,scale,SIMPLIFY=FALSE),
.testResults(x,test,id_features_subset),scale_threshold,SIMPLIFY=FALSE)
names(pval)=paste0('test',test)
return(pval)
})
setMethod("adjusted_pval",c("IWTomicsData","missing","character","vector"),
function(x,test,id_features_subset,scale_threshold){
if(nTests(x)==0)
return(NULL)
adjusted_pval(x,1:nTests(x),id_features_subset,scale_threshold)
})
setMethod("adjusted_pval",c("IWTomicsData","vector","missing","vector"),
function(x,test,id_features_subset,scale_threshold){
if(nTests(x)==0)
return(NULL)
adjusted_pval(x,test,idFeaturesTest(x),scale_threshold)
})
setMethod("adjusted_pval",c("IWTomicsData","missing","missing","vector"),
function(x,test,id_features_subset,scale_threshold){
if(nTests(x)==0)
return(NULL)
adjusted_pval(x,1:nTests(x),idFeaturesTest(x),scale_threshold)
})
setMethod("adjusted_pval",c("IWTomicsData","vector","character","missing"),
function(x,test,id_features_subset){
if(nTests(x)==0)
return(NULL)
if(sum(!(test %in% 1:nTests(x))))
stop('invalid test number.')
if(sum(!(id_features_subset %in% idFeaturesTest(x))))
stop('invalid id_features_subset.')
pval=lapply(x@test$result[test],function(results) lapply(results[id_features_subset],function(result) result$adjusted_pval))
names(pval)=paste0('test',test)
return(pval)
})
setMethod("adjusted_pval",c("IWTomicsData","missing","character","missing"),
function(x,test,id_features_subset){
if(nTests(x)==0)
return(NULL)
adjusted_pval(x,1:nTests(x),id_features_subset)
})
setMethod("adjusted_pval",c("IWTomicsData","vector","missing","missing"),
function(x,test){
if(nTests(x)==0)
return(NULL)
adjusted_pval(x,test,idFeaturesTest(x))
})
setMethod("adjusted_pval",c("IWTomicsData","missing","missing","missing"),
function(x){
if(nTests(x)==0)
return(NULL)
adjusted_pval(x,1:nTests(x),idFeaturesTest(x))
})
setGeneric(".testResults",function(x,test,id_features_subset,...) standardGeneric(".testResults"))
setMethod(".testResults",c("IWTomicsData","vector","character"),
function(x,test,id_features_subset){
if(nTests(x)==0)
return(NULL)
if(sum(!(test %in% 1:nTests(x))))
stop('invalid test number.')
if(sum(!(id_features_subset %in% idFeaturesTest(x))))
stop('invalid id_features_subset.')
lapply(x@test$result[test],function(results) results[id_features_subset])
})
setMethod(".testResults",c("IWTomicsData","missing","character"),
function(x,test,id_features_subset){
if(nTests(x)==0)
return(NULL)
.testResults(x,1:nTests(x),id_features_subset)
})
setMethod(".testResults",c("IWTomicsData","vector","missing"),
function(x,test,id_features_subset){
if(nTests(x)==0)
return(NULL)
.testResults(x,test,idFeaturesTest(x))
})
setMethod(".testResults",c("IWTomicsData","missing","missing"),
function(x){
if(nTests(x)==0)
return(NULL)
.testResults(x,1:nTests(x),idFeaturesTest(x))
})
# Subset method
setMethod("[",c("IWTomicsData","ANY","ANY","ANY"),
function(x,i,j,...,drop=TRUE){
if(!missing(i)){
if(is.numeric(i)&(FALSE %in% (i %in% seq_len(nRegions(x)))))
stop('undefined regions selected')
if(is.character(i)&(FALSE %in% (i %in% idRegions(x))))
stop('undefined regions selected')
}
if(!missing(j)){
if(is.numeric(j)&(FALSE %in% (j %in% seq_len(nFeatures(x)))))
stop('undefined features selected')
if(is.character(j)&(FALSE %in% (j %in% idFeatures(x))))
stop('undefined features selected')
}
region_datasets=x@metadata$region_datasets[i,]
feature_datasets=x@metadata$feature_datasets[j,c('name',paste0('file_',row.names(region_datasets)),'resolution')]
regions_new=x@regions[row.names(region_datasets)]
length_features=lapply(x@length_features[row.names(feature_datasets)],function(length) length[row.names(region_datasets)])
length_features_max=lapply(length_features,function(length) max(unlist(length)))
features_new=mapply(function(feature,length){
feature=feature[row.names(region_datasets)]
if(alignment(x) %in% c('left','scale'))
return(lapply(feature,function(M) as.matrix(M[seq_len(length),])))
if(alignment(x)=='right')
return(lapply(feature,function(M) as.matrix(M[seq_len(length)+nrow(M)-length,])))
if(alignment(x)=='center')
return(lapply(feature,function(M) as.matrix(M[seq_len(length)+nrow(M)%/%2-length%/%2,])))
},x@features[row.names(feature_datasets)],length_features_max,SIMPLIFY=FALSE)
initialize(x,metadata=list(region_datasets=region_datasets,feature_datasets=feature_datasets),
regions=regions_new,features=features_new,length_features=length_features,test=NULL)
})
# Combine methods
setMethod("c","IWTomicsData",
function(x,...){
elements=list(x,...)
if(length(elements)>2){
c(elements[[1]],c(...))
}else{
x=elements[[1]]
y=elements[[2]]
alignment_new=unique(alignment(x),alignment(y))
if(length(alignment_new)>1)
stop('merging not possible, different types of alignment present.')
regions_id=union(idRegions(x),idRegions(y))
features_id=union(idFeatures(x),idFeatures(y))
regions_common=intersect(idRegions(x),idRegions(y))
for(region_id in regions_common){
if(length(setdiff(regions(x)[[region_id]],regions(y)[[region_id]])))
stop('merging not possible, region dataset \'',region_id,'\' differs in the IWTomicsData objects.')
overlaps=as.matrix(findOverlaps(regions(x)[[region_id]],regions(y)[[region_id]]))
if(sum(overlaps[,1]!=1:length(regions(x)[[region_id]])))
stop('merging not possible, region dataset \'',region_id,'\' differs in the IWTomicsData objects.')
if(sum(overlaps[,2]!=1:length(regions(y)[[region_id]]))){
y@regions[[region_id]]=y@regions[[region_id]][overlaps[,2]]
for(feature_id in idFeatures(y)){
y@features[[feature_id]][[region_id]]=y@features[[feature_id]][[region_id]][,overlaps[,2]]
y@length_features[[feature_id]][[region_id]]=y@length_features[[feature_id]][[region_id]][overlaps[,2]]
}
}
}
features_common=intersect(idFeatures(x),idFeatures(y))
for(feature_id in features_common){
if(metadata(x)$feature_datasets[feature_id,'resolution']!=metadata(y)$feature_datasets[feature_id,'resolution'])
stop('merging not possible, feature \'',feature_id,'\' resolution differs in the IWTomicsData objects.')
for(region_id in regions_common){
if(!identical(features(x[region_id,feature_id]),features(y[region_id,feature_id])))
stop('merging not possible, feature \'',feature_id,'\' in region dataset \'',region_id,'\' differs in the IWTomicsData objects.')
}
}
for(region_id in regions_id){
features_id_present=c()
if(region_id %in% idRegions(x))
features_id_present=idFeatures(x)
if(region_id %in% idRegions(y))
features_id_present=c(features_id_present,idFeatures(y))
if(!isEmpty(setdiff(features_id,features_id_present)))
stop('merging not possible, not all features are present for region dataset \'',region_id,'\'.')
}
if(isEmpty(setdiff(idRegions(x),regions_common))&isEmpty(setdiff(idFeatures(x),features_common)))
return(y)
if(isEmpty(setdiff(idRegions(y),regions_common))&isEmpty(setdiff(idFeatures(y),features_common)))
return(x)
region_datasets=rbind(x@metadata$region_datasets,y@metadata$region_datasets[setdiff(idRegions(y),regions_common),])
feature_datasets=as.data.frame(matrix(NA,nrow=length(features_id),ncol=length(regions_id)+2))
row.names(feature_datasets)=features_id
colnames(feature_datasets)=c('name',paste0('file_',regions_id),'resolution')
feature_datasets[idFeatures(x),c('name',paste0('file_',idRegions(x)),'resolution')]=x@metadata$feature_datasets[idFeatures(x),c('name',paste0('file_',idRegions(x)),'resolution')]
feature_datasets[idFeatures(y),c('name',paste0('file_',idRegions(y)),'resolution')]=y@metadata$feature_datasets[idFeatures(y),c('name',paste0('file_',idRegions(y)),'resolution')]
regions_new=c(regions(x),regions(y)[setdiff(idRegions(y),regions_common)])
features_new=lapply(features_id,
function(feature_id){
feature=vector('list',length(regions_id))
names(feature)=regions_id
return(feature)
})
names(features_new)=features_id
length_features=features_new
for(feature_id in idFeatures(x))
for(region_id in idRegions(x)){
features_new[[feature_id]][[region_id]]=x@features[[feature_id]][[region_id]]
length_features[[feature_id]][[region_id]]=x@length_features[[feature_id]][[region_id]]
}
for(feature_id in idFeatures(y))
for(region_id in idRegions(y)){
features_new[[feature_id]][[region_id]]=y@features[[feature_id]][[region_id]]
length_features[[feature_id]][[region_id]]=y@length_features[[feature_id]][[region_id]]
}
for(feature_id in features_id){
length=lapply(features_new[[feature_id]],nrow)
if(length(unique(unlist(length)))!=1){
length.max=max(unlist(length))
if(alignment_new %in% c('left','scale')){
length.NA.right=lapply(length,function(length) length.max-length)
for(region_id in regions_id)
if(length(length.NA.right[[region_id]])>0)
features_new[[feature_id]][[region_id]]=rbind(features_new[[feature_id]][[region_id]],
matrix(NA,nrow=length.NA.right[[region_id]],ncol=ncol(features_new[[feature_id]][[region_id]])))
}
if(alignment_new=='right'){
length.NA.left=lapply(length,function(length) length.max-length)
for(region_id in regions_id)
if(length(length.NA.left[[region_id]])>0)
features_new[[feature_id]][[region_id]]=rbind(matrix(NA,nrow=length.NA.left[[region_id]],ncol=ncol(features_new[[feature_id]][[region_id]])),
features_new[[feature_id]][[region_id]])
}
if(alignment_new=='center'){
center=lapply(length,'%/%',2)
# the alignment is approximate if there are regions with an odd number of windows and regions with an even number of regions
length.NA.right=lapply(mapply('-',length,center,SIMPLIFY=FALSE),function(lenght_center) (length.max-length.max%/%2)-lenght_center)
length.NA.left=lapply(center,function(center) length.max%/%2-center)
for(region_id in regions_id)
if(length(length.NA.right[[region_id]])>0)
features_new[[feature_id]][[region_id]]=rbind(matrix(NA,nrow=length.NA.left[[region_id]],ncol=ncol(features_new[[feature_id]][[region_id]])),
features_new[[feature_id]][[region_id]],
matrix(NA,nrow=length.NA.right[[region_id]],ncol=ncol(features_new[[feature_id]][[region_id]])))
}
}
}
new("IWTomicsData",metadata=list(region_datasets=region_datasets,feature_datasets=feature_datasets),
regions=regions_new,alignment=alignment_new,features=features_new,length_features=length_features,test=NULL)
}
})
setMethod("merge",c("IWTomicsData","IWTomicsData"),function(x,y,...) c(x,y,...))
setMethod("rbind","IWTomicsData",
function(...){
elements=list(...)
features_id=lapply(elements,idFeatures)
equal.features_id=unlist(lapply(seq_along(features_id)[-1],function(i) identical(features_id[[i-1]],features_id[[i]])))
if(FALSE %in% equal.features_id)
stop('merging not possible, features differs in the IWTomicsData objects.')
c(...)})
setMethod("cbind","IWTomicsData",
function(...){
elements=list(...)
regions_id=lapply(elements,idRegions)
equal.regions_id=unlist(lapply(seq_along(regions_id)[-1],function(i) identical(regions_id[[i-1]],regions_id[[i]])))
if(FALSE %in% equal.regions_id)
stop('merging not possible, locations differs in the IWTomicsData objects.')
c(...)})
|
21fb3561ccbbe0c622d150255d35faaf82a0b015
|
87760ba06690cf90166a879a88a09cd2e64f3417
|
/man/recipe_helpers.Rd
|
c4493f0b5e3ed1886f54edfb08a1a842418b0cde
|
[
"MIT"
] |
permissive
|
topepo/modeltime
|
1189e5fe6c86ee3a70aec0f100387a495f8add5f
|
bff0b3784d1d8596aa80943b221eb621481534e1
|
refs/heads/master
| 2022-12-27T07:11:58.979836
| 2020-10-08T16:07:27
| 2020-10-08T16:07:27
| 289,933,114
| 1
| 0
|
NOASSERTION
| 2020-08-24T13:17:10
| 2020-08-24T13:17:10
| null |
UTF-8
|
R
| false
| true
| 1,469
|
rd
|
recipe_helpers.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dev-xregs.R
\name{recipe_helpers}
\alias{recipe_helpers}
\alias{juice_xreg_recipe}
\alias{bake_xreg_recipe}
\title{Developer Tools for processing XREGS (Regressors)}
\usage{
juice_xreg_recipe(recipe, format = c("tbl", "matrix"))
bake_xreg_recipe(recipe, new_data, format = c("tbl", "matrix"))
}
\arguments{
\item{recipe}{A prepared recipe}
\item{format}{One of:
\itemize{
\item \code{tbl}: Returns a tibble (data.frame)
\item \code{matrix}: Returns a matrix
}}
\item{new_data}{Data to be processed by a recipe}
}
\value{
Data in either the \code{tbl} (data.frame) or \code{matrix} formats
}
\description{
Wrappers for using \code{recipes::bake} and \code{recipes::juice} to process data
returning data in either \verb{data frame} or \code{matrix} format (Common formats needed
for machine learning algorithms).
}
\examples{
library(dplyr)
library(timetk)
library(recipes)
library(lubridate)
predictors <- m4_monthly \%>\%
filter(id == "M750") \%>\%
select(-value) \%>\%
mutate(month = month(date, label = TRUE))
predictors
# Create default recipe
xreg_recipe_spec <- create_xreg_recipe(predictors, prepare = TRUE)
# Extracts the preprocessed training data from the recipe (used in your fit function)
juice_xreg_recipe(xreg_recipe_spec)
# Applies the prepared recipe to new data (used in your predict function)
bake_xreg_recipe(xreg_recipe_spec, new_data = predictors)
}
|
532f64fb16cd73d983aa279f5446db7815f347e3
|
3ca84ee0818caa8ba642431fce356f523fc4a8d4
|
/stackoverflow/anova.R
|
c4e8e1f6f891bb0935399d7d1e2d000b5d6c9c3f
|
[] |
no_license
|
Zedseayou/reprexes
|
0f15109ee57e2d4da5538c47a826ebd126bfbfd9
|
a38a88acaff05bd7b9cd9c4ba3b77899b5f33b81
|
refs/heads/master
| 2021-05-03T10:57:11.405154
| 2019-05-22T23:29:32
| 2019-05-22T23:29:32
| 120,541,074
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 325
|
r
|
anova.R
|
library(tidyverse)
tbl <- read_table2(
"id number value
1 1 2
1 2 1
1 3 4
2 1 4
2 2 3
2 3 4"
)
tbl %>%
group_by(number) %>%
do(anova(lm(value ~ number, data = .)))
library(broom)
tbl %>%
group_by(number) %>%
do(tidy(anova(lm(value ~ number, data = .))))
|
7a388cc05aeda87002ea418aff3743fa8a85a0df
|
ebef50b72699404ed4e523bdd702806b108f4fab
|
/man/ShowOptDesign.Rd
|
b5b57bc250ea0485a63ea0f11b5f2fdd1e41d9eb
|
[] |
no_license
|
cran/hiPOD
|
a4691dc4f657132ee40dad821a47907aa189242d
|
85448d9356e7a9451b846392b9eb8afdc333bf74
|
refs/heads/master
| 2021-01-22T17:47:39.699768
| 2012-04-27T00:00:00
| 2012-04-27T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,920
|
rd
|
ShowOptDesign.Rd
|
\name{ShowOptDesign}
\alias{ShowOptDesign}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Print the top choices of designs
}
\description{
Show the top [num.designs] choices of valid designs.
}
\usage{
ShowOptDesign(opt.design.results, num.designs = 10)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{opt.design.results}{
%% ~~Describe \code{opt.design.results} here~~
}
\item{num.designs}{
%% ~~Describe \code{num.designs} here~~
}
}
\value{
a dataframe including the top choices.
}
\author{
Wei E. Liang
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{FindOptPower}}, \code{\link{PlotOptPower}}
}
\examples{
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
######## Example 1: A simple example, with very rough grid points (only 20X20 grid points)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
##### Find the optimal design
example.1 <- FindOptPower(cost=700000, sample.size=5000, MAF=0.03, OR=2, error=0.01, upper.P=200, Number.Grids=50)
##### assign a directory to store the contour plots
##### with your own choice
proj.Dir <- paste(getwd(), "/hiPOD_examples", sep="")
if(!file.exists(proj.Dir)) dir.create(proj.Dir)
##### Inferences on the optimal designs
PlotOptPower(example.1, save.contour=FALSE, plot.3d=FALSE)
ShowOptDesign(example.1, 5)
ShowOptDesign(example.1, 10)
## The function is currently defined as
function(opt.design.results, num.designs=10)
{
designs.good <- subset(opt.design.results[[5]], subset=(is.valid.design & upper.sample.good & Xmean.good))
head(designs.good[order(-designs.good$pred.power), ], num.designs)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
7c19873b45fbd2ba0e5de62dfea265d223ba3973
|
f64fea318bda54ddf7a18aab6ea6683d2b2c94e1
|
/exploratory/explore_3_viz_text.R
|
d264a57478d562faf41949d4b4bb10c6fbdeb721
|
[] |
no_license
|
SportsTribution/doing_data
|
75faedc24fe467120cbb2e46892e98db219d2e54
|
c728afee4d3cb4fdf7d25cf319cf220497e9eb87
|
refs/heads/master
| 2018-01-08T08:10:22.206196
| 2016-02-24T16:19:00
| 2016-02-24T16:19:00
| 52,455,390
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,629
|
r
|
explore_3_viz_text.R
|
if (j==1){
DFfieldText <- paste(as.character(DFfac[,i]), collapse=" ")
DFfieldText <- Corpus(VectorSource(DFfieldText))
DFfieldText <- tm_map(DFfieldText,content_transformer(function(x) iconv(x, to='UTF-8', sub='byte')), mc.cores=1)
DFfieldText <- tm_map(DFfieldText, content_transformer(tolower))
DFfieldText <- tm_map(DFfieldText, content_transformer(removePunctuation), mc.cores=1)
DFfieldText <- tm_map(DFfieldText, content_transformer(removeNumbers))
DFfieldText <- tm_map(DFfieldText, content_transformer(stripWhitespace))
# create term document matrix
dtm <- DocumentTermMatrix(DFfieldText )
dtm2 <- data.frame(as.matrix(dtm))
frequency <- colSums(dtm2)
frequency <- sort(frequency, decreasing=TRUE)
frequency <- frequency[frequency>=20]
if (length(frequency)>0){
Top_freq <- data.frame(frequency=frequency[1:30])
Top_freq[,"Top_Word"] <- rownames(Top_freq)
p<-ggplot(Top_freq,aes(Top_Word,frequency))
p<-p+geom_bar(stat="identity",width = 0.8)
p <- p+geom_text(aes(y=frequency, label=frequency),
color="black",
vjust=-1.0, size=2)+
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust=0.5))+
ggtitle(fieldName)#+
ggsave(paste("exploratory/Summary_",tableName,"/",i,"_",fieldName,"_INFO.png",sep=""),
width = 10,
height = 5,
dpi = 150)
ggsave(paste(folderName,"/","0_",fieldName,"_INFO.png",sep=""),
width = 10,
height = 5,
dpi = 150)
}
}
if ((colIsFactorChar[j] | colIsFactorNum[j] | colIsLogical[j])& length(frequency)>0 ){
LL.df <- NULL
nameGroup<-levels(DFfac[,j])
textInfo<-data.frame(matrix(data =0 , nrow = length(nameGroup)+1, ncol = length(frequency)+1))
textLL<-data.frame(matrix(data =0 , nrow = length(nameGroup), ncol = length(frequency)))
colnames(textInfo)<-c(names(frequency),"All_")
rownames(textInfo)<-c(nameGroup,"All_")
textInfo["All_",]<-c(frequency,sum(frequency))
colnames(textLL)<-c(names(frequency))
rownames(textLL)<-c(nameGroup)
for (jGroup in nameGroup){
DFfieldText <- paste(as.character(DFfac[DFfac[,j]==jGroup,i]), collapse=" ")
DFfieldText <- Corpus(VectorSource(DFfieldText))
DFfieldText <- tm_map(DFfieldText,content_transformer(function(x) iconv(x, to='UTF-8', sub='byte')), mc.cores=1)
DFfieldText <- tm_map(DFfieldText, content_transformer(tolower))
DFfieldText <- tm_map(DFfieldText, content_transformer(removePunctuation), mc.cores=1)
DFfieldText <- tm_map(DFfieldText, content_transformer(removeNumbers))
DFfieldText <- tm_map(DFfieldText, content_transformer(stripWhitespace))
dtm <- DocumentTermMatrix(DFfieldText )
dtm2 <- data.frame(as.matrix(dtm))
dtm2 <- dtm2[names(dtm2) %in% names(textInfo)]
if (dim(dtm2)[2]>0){
textInfo[jGroup,names(dtm2)]<-dtm2
textInfo[jGroup,"All_"]<-sum(dtm2)
speakerTMP<-textInfo[jGroup,1:length(frequency)]
otherTMP<-textInfo["All_",1:length(frequency)]-speakerTMP
speakerTMP[speakerTMP==0]<-0.0001
otherTMP[otherTMP==0]<-0.0001
E1 <- (textInfo[jGroup,"All_"]*textInfo["All_",1:length(frequency)])/textInfo["All_","All_"]
E2 <- ((textInfo["All_","All_"]-textInfo[jGroup,"All_"])*
textInfo["All_",1:length(frequency)])/textInfo["All_","All_"]
textLL[jGroup,] <- 2*(speakerTMP*log(speakerTMP/E1) + otherTMP*log(otherTMP/E2))
tmpLL <- data.frame(textLL[jGroup,])
tmpLL[,E1>speakerTMP]<-0
tmpLL[,tmpLL<10]<-0
if (sum(tmpLL>0)>1){
rankTMP<-order(tmpLL,decreasing = TRUE)
tmpLL[,rankTMP[1:sum(tmpLL>0)]]<-1:sum(tmpLL>0)
LLmelt<-melt(tmpLL[1,tmpLL>0 & tmpLL<21])
LLmelt[,"Group"]<-jGroup
} else if (sum(tmpLL>0)==1){
LLmelt <- data.frame(variable=names(tmpLL)[tmpLL>0],value=1,Group=jGroup)
} else {
LLmelt <- data.frame(variable="No significant text",value=0,Group=jGroup)
}
} else{
LLmelt <- data.frame(variable="No significant text",value=0,Group=jGroup)
}
LL.df<-rbind(LL.df,LLmelt)
# row <- data.frame(speaker, word, word.total, speaker.total, speaker.word, E1, E2, LL)
# LL.df <- rbind(LL.df, row)
}
names(LL.df)[2:3]<-c("rank",fieldName2)
ggplot(LL.df,aes_string(fieldName2,"rank"))+
geom_point(color="white")+
geom_label(aes_string(label="variable",fill=fieldName2), color='white', fontface='bold', size=2)+ guides(fill=FALSE)
ggsave(paste(folderName,"/",j,"_",fieldName,"_",fieldName2,"_INFO.png",sep=""),
width = 8,
height = 4.5,
dpi = 150)
}
|
d2ac83d5ee3768b66b73a203ac8c3fe0afbe48b4
|
2e4f3592e872117efc1559b0219299ba7d6e840c
|
/tests/testthat.R
|
1d1c6fe0d09c5424d912293b98f4d6b88ae98c09
|
[
"MIT"
] |
permissive
|
gl2668/whichState
|
1d9134f1998af3764d987feed1894e5ed7015ab4
|
afc5523e75cee28b191665c3a05c6700b3c46967
|
refs/heads/master
| 2020-08-22T19:53:40.592257
| 2019-11-25T19:36:00
| 2019-11-25T19:36:00
| 216,468,112
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64
|
r
|
testthat.R
|
library(testthat)
library(whichState)
test_check("whichState")
|
af4a9cf6061e487ba8bee242ac7691166fa1c480
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/VGAM/examples/genrayleigh.Rd.R
|
936f43b940169fc477bbbb50a402776cb0787be2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 437
|
r
|
genrayleigh.Rd.R
|
library(VGAM)
### Name: genrayleigh
### Title: Generalized Rayleigh Distribution Family Function
### Aliases: genrayleigh
### Keywords: models regression
### ** Examples
Scale <- exp(1); shape <- exp(1)
rdata <- data.frame(y = rgenray(n = 1000, scale = Scale, shape = shape))
fit <- vglm(y ~ 1, genrayleigh, data = rdata, trace = TRUE)
c(with(rdata, mean(y)), head(fitted(fit), 1))
coef(fit, matrix = TRUE)
Coef(fit)
summary(fit)
|
a1ae3ff9d29df1957189bf3f3fe47ff753546a6e
|
4df4f7d46f919516073166fa5f4f654bb54a1d02
|
/man/pld.Rd
|
5d938a3b7816da5b2c79156cf853efefbc6dea4c
|
[] |
no_license
|
cran/polysat
|
feb78fd54904f91d909fbc9b9e35e84d067eab36
|
c39682b0f45a3f889e44514a8629f4947210ed1f
|
refs/heads/master
| 2022-09-08T14:09:00.861300
| 2022-08-23T13:10:02
| 2022-08-23T13:10:02
| 17,698,645
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,693
|
rd
|
pld.Rd
|
\name{pld}
\alias{pld}
\alias{pld<-}
\alias{plCollapse}
\title{
Accessor, Replacement, and Manipulation Functions for \code{"ploidysuper"} Objects
}
\description{
\code{pld} accesses and replaces the \code{pld} slot of objects of
\code{"\linkS4class{ploidysuper}"} subclasses. \code{plCollapse} tests
whether an object of one of these classes can be converted to an object
of a simpler one of these classes, and optionally returns the converted
object. These are generic functions with methods for the subclasses of
\code{"ploidysuper"}. These functions are primarily for internal use.
}
\usage{
pld(object, samples, loci)
pld(object) <- value
plCollapse(object, na.rm, returnvalue)
}
\arguments{
\item{object}{
A \code{"ploidysuper"} object.
}
\item{samples}{
An optional character or numeric vector indexing the samples for which
to return ploidy values.
}
\item{loci}{
An optional character or numeric vector indexing the loci for which
to return ploidy values.
}
\item{value}{
A numeric vector or matrix that can be coerced to integers. These
represent the ploidies to store in the \code{object@pld} slot.
}
\item{na.rm}{Boolean. If \code{TRUE}, \code{NA} values are ignored when
testing to see if the ploidy format can be simplified. If the sample,
locus, or entire dataset all has one ploidy aside from \code{NA}, the
\code{NA} values will be overwritten by that ploidy when simplifying
the ploidy format. If \code{FALSE}, \code{NA} is treated as a unique
ploidy.
}
\item{returnvalue}{Boolean. If \code{TRUE}, a \code{"ploidysuper"}
object will be returned if the ploidy format can be simplified, and
\code{FALSE} will be returned if it cannot be simplified. If
\code{FALSE}, only \code{TRUE} or \code{FALSE} will be returned to
indicate if the ploidy format can be simplified or not.
}
}
\value{
\code{pld} returns the vector or matrix containing the ploidy values.
This is the contents of \code{object@pld}.
\code{plCollapse} either returns a Boolean value indicating whether the
ploidy can be changed to a simpler format, or a new \code{"ploidysuper"}
object with all of the ploidy data of \code{object} put into a simpler
format. If \code{object} is a \code{"ploidymatrix"} object, a
\code{"ploidysample"}, \code{"ploidylocus"}, or \code{"ploidyone"}
object can be returned depending on how many unique ploidy values there
are and how they are distributed. If \code{object} is a
\code{"ploidysample"} or \code{"ploidylocus"} object, a
\code{"ploidyone"} object can be returned.
}
\author{
Lindsay V. Clark
}
\seealso{
\code{\link{reformatPloidies}}, \code{\link{Ploidies}}
}
\examples{
test <- new("ploidymatrix", samples=c("a","b","c"),
loci=c("l1","l2","l3"))
pld(test) # view the ploidies
pld(test) <- 2 # make it diploid at all samples and loci
pld(test)["a",] <- c(2,4,4) # change the ploidies for sample a
pld(test, samples=c("a","b")) # view ploidies at a subset of samples
# test to see if the ploidies can be simplified
p <- plCollapse(test, na.rm=FALSE, returnvalue=TRUE)
p
# now change a ploidy and repeat the test
pld(test)["a","l1"] <- 4
p <- plCollapse(test, na.rm=FALSE, returnvalue=TRUE)
p
# change something else and collapse it further
pld(p)["a"] <- 2
p2 <- plCollapse(p, na.rm=FALSE, returnvalue=TRUE)
p2
# if na.rm=FALSE, NA values are not ignored:
pld(test)["a","l1"] <- NA
pld(test)
plCollapse(test, na.rm=FALSE, returnvalue=TRUE)
# NA values are ignored with na.rm=TRUE
plCollapse(test, na.rm=TRUE, returnvalue=TRUE)
}
\keyword{ methods }
\keyword{ manip }% __ONLY ONE__ keyword per line
|
f8a7e2d82f85f38e7e3805be573a83294eb8afea
|
e1b685959f55e5556adda5e25520b15c2f338705
|
/src/main/R/libyogiseq.R
|
d5af6e3cbd189d2ec7e6a83b4430fc81c9a79531
|
[] |
no_license
|
jweile/screen_pipeline
|
e5f13a771d7e6f374144447b0a10aed40142eff5
|
7e73babeec0a1e5f56d6cd7ac71b347a9769ae9d
|
refs/heads/master
| 2020-09-24T08:04:12.539267
| 2017-03-27T17:43:36
| 2017-03-27T17:43:36
| 225,709,768
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 31,577
|
r
|
libyogiseq.R
|
#Function to turn string into character array
to.char.array <- function (str) sapply(1:nchar(str), function(i) substr(str,i,i))
char.at <- function(str,i) substr(str,i,i)
new.sequence <- function(sequence, qual=NULL, id=NULL) {
.seq <- sequence
.qual <- qual
.id <- id
toString <- function() {
.seq
}
getQuality <- function(is) {
if (!is.null(.qual)) {
.qual[is]
} else {
# warning("This sequence has no quality track.")
NULL
}
}
getID <- function() {
if (!is.null(.id)) {
.id
} else {
# warning("This sequence has no ID field.")
NULL
}
}
structure(list(
toString=toString,
getQuality=getQuality,
getID=getID
),class="yogiseq")
}
print.yogiseq <- function(s) print(paste("<YogiSeq:",s$getID(),">"))
summary.yogiseq <- function(s) c(id=s$getID(),sequence=s$toString(),phred=paste(s$getQuality(),collapse=","))
length.yogiseq <- function(s) nchar(s$toString())
reverseComplement <- function(seq) {
trans <- c(A='T',C='G',G='C',T='A',N='N',R='Y',Y='R',S='S',W='W',K='M',M='K')
if (any(class(seq) == "yogiseq")) {
revSeq <- paste(rev(sapply(to.char.array(seq$toString()), function(nc) trans[nc])),collapse="")
revQual <- rev(seq$getQuality())
new.sequence(revSeq,qual=revQual,id=seq$getID())
} else {
paste(rev(sapply(to.char.array(seq), function(nc) trans[nc])),collapse="")
}
}
subseq <- function(s,from,to) {
if (!any(class(s) == "yogiseq")) stop("First argument must be a YogiSeq object")
new.sequence(
substr(s$toString(),from,to),
if (!is.null(s$getQuality())) s$getQuality(from:to) else NULL,
s$getID()
)
}
writeFASTA <- function(con,seqs) {
for (i in 1:length(seqs)) {
s <- seqs[[i]]
if (class(s) == "yogiseq") {
writeLines(c(
paste(">",s$getID(),sep=""),
s$toString()
),con)
} else if (class(s) == "character") {
writeLines(c(
paste(">",names(seqs)[i],sep=""),
s
),con)
} else {
warning("Skipping unsupported data type",class(s))
}
}
}
readFASTA <- function(con) {
out <- list()
id <- NULL
seq <- NULL
i <- 0
while(length(line <- readLines(con, n=1)) > 0) {
if (substr(line,1,1)==">") {
#if old sequence exists, add it to the output
if (!is.null(id)) {
out[[length(out)+1]] <- new.sequence(seq,id=id)
cat(paste("\r Read",i <- i+1,"sequences "))
}
#new sequence
id <- substr(line,2,nchar(line))
seq <- ""
} else {
seq <- paste(seq,line,sep="")
}
}
#add last sequence to output
if (!is.null(id)) {
out[[length(out)+1]] <- new.sequence(seq,id=id)
cat(paste("\r Read",i <- i+1,"sequences \n"))
}
out
}
writeFASTQ <- function(con, seqs) {
#function for decoding phred quality scores
qualScale <- to.char.array("!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~")
qual2string <- function(qual) paste(qualScale[qual-32],collapse="")
writeLines(unlist(lapply(seqs, function(s) {
c(
paste("@",s$getID(),sep=""),
s$toString(),
"+",
qual2string(s$getQuality())
)
})),con)
}
#creates a new fastq parser object
new.fastq.parser <- function(con) {
.con <- con
#function for decoding phred quality scores
qualScale <- to.char.array("!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~")
string2phred <- function(string) {
out <- sapply(to.char.array(string), function(x) which(qualScale == x))
names(out) <- NULL
out+32
}
#function for parsing the next n entries from the open fastq file (or less if less than n remain)
parse.next <- function(n=10,ignore.quality=FALSE) {
contents <- list()
i <- 0
while ((i <- i+1) <= n && length(lines <- readLines(.con, n=4)) > 0) {
if (length(lines) < 4 || substr(lines[1],1,1) != "@" || substr(lines[3],1,1) != "+") {
stop("Corrupt read:\n",paste(lines,collapse="\n"))
}
id <- strsplit(substr(lines[1],2,nchar(lines[1])), " ", fixed=TRUE)[[1]][1]
sequence <- lines[2]
quality <- if (ignore.quality) NULL else string2phred(lines[4])
contents[[length(contents)+1]] <- new.sequence(sequence,id=id,qual=quality)
}
contents
}
structure(list(parse.next=parse.next),class="yogi.fastq.parser")
}
#alignment algorithm requires bitwise operations
library(bitops)
##
# Needleman-Wunsch global alignment algorithm
#
new.alignment <- function(s1, s2) {
if (any(class(s1)=="yogiseq")) {
c1 <- c("$",to.char.array(s1$toString()))
} else {
c1 <- c("$",to.char.array(s1))
}
if (any(class(s2)=="yogiseq")) {
c2 <- c("$",to.char.array(s2$toString()))
} else {
c2 <- c("$",to.char.array(s2))
}
#init score matrix
mat <- matrix(nrow=length(c1), ncol=length(c2))
mat[1,] <- 1:length(c2) - (c1[1] == c2[1])
mat[,1] <- 1:length(c1) - (c1[1] == c2[1])
#init trace matrix
trace <- matrix(0, nrow=length(c1), ncol=length(c2))
trace[1,] <- 4
trace[,1] <- 2
trace[1,1] <- 0
#compute alignment matrix
for (i in 2:length(c1)) {
for (j in 2:length(c2)) {
options <- c(
rep = mat[i-1,j-1] + (c1[i] != c2[j]),
del = mat[i-1,j] + 1,
ins = mat[i,j-1] + 1
)
mat[i,j] <- min(options)
tr.bitmasks <- 2^(which(options == min(options))-1)
for (mask in tr.bitmasks) {
trace[i,j] <- bitOr(trace[i,j],mask)
}
}
}
getMatrix <- function() {
mat
}
getDistance <- function() {
mat[length(c1),length(c2)]
}
.mutations <- NULL
.mapping <- NULL
run.trace <- function() {
rep <- 1
del <- 2
ins <- 4
muts <- list()
map <- list()
i <- length(c1)
j <- length(c2)
while (i > 1 || j > 1) {
if (bitAnd(trace[i,j], rep) > 0) {
if (c1[i] != c2[j]) {
muts[[length(muts)+1]] <- c(c1[i], i-1, j-1, c2[j])
}
map[[length(map)+1]] <- c(i-1, j-1)
i <- i-1
j <- j-1
} else if (bitAnd(trace[i,j], del)) {
muts[[length(muts)+1]] <- c(c1[i], i-1, j-1, "-")
map[[length(map)+1]] <- c(i-1, NA)
i <- i-1
} else if (bitAnd(trace[i,j], ins)) {
muts[[length(muts)+1]] <- c("-", i-1, j-1, c2[j])
map[[length(map)+1]] <- c(NA, j-1)
j <- j-1
} else {
stop("uninitialized trace at ",i,j)
}
}
# if (c1[1] != c2[1]) {
# muts[[length(muts)+1]] <- c(c1[1],i,c2[1])
# }
.mapping <<- do.call(rbind,rev(map))
.mutations <<- do.call(rbind,rev(muts))
}
getMutations <- function() {
if (is.null(.mutations)) run.trace()
.mutations
}
getMappings <- function() {
if (is.null(.mapping)) run.trace()
.mapping
}
printAlignment <- function() {
if (is.null(.mapping)) run.trace()
chars <- do.call(cbind,lapply(1:nrow(.mapping), function(k) {
i <- .mapping[k,1]
j <- .mapping[k,2]
char1 <- if (is.na(c1[i+1])) '-' else c1[i+1]
char2 <- if (is.na(c2[j+1])) '-' else c2[j+1]
matchChar <- if (is.na(c1[i+1]) || is.na(c2[j+1])) " "
else if (c1[i+1] == c2[j+1]) "|" else "."
c(char1,matchChar,char2)
}))
cat("\nLevenstein distance:",getDistance(),"\n")
for (wrap in 0:(ncol(chars)/70)) {
startcol <- wrap*70 + 1
endcol <- if (startcol+69 > ncol(chars)) ncol(chars) else startcol+69
cat("\n",paste(apply(chars[,startcol:endcol],1,paste,collapse=""),collapse="\n"),"\n",sep="")
}
}
structure(list(
getMatrix=getMatrix,
getDistance=getDistance,
getMutations=getMutations,
getMappings=getMappings,
printAlignment=printAlignment
),class="yogialign")
}
##
# Creates a new translator object for translating Nucleotide strings to Amino acid strings.
#
init.translator <- function(ctable.file="codontable.txt") {
##
# Creates a new codon table object
#
init.codon.table <- function(con) {
nc2single <- list()
nc2triple <- list()
while (length(line <- readLines(con, n=1, warn=FALSE)) > 0) {
cols <- strsplit(line,"\t")[[1]]
aa3 <- cols[1]
aa1 <- cols[2]
codons <- strsplit(cols[3], "|", fixed=TRUE)[[1]]
for (codon in codons) {
nc2single[codon] <- aa1
nc2triple[codon] <- aa3
}
}
# Return single-letter code of aminoacid that is encoded by the given codon
getSingleForCodon <- function(codon) {
nc2single[[codon]]
}
structure(list(
getSingleForCodon=getSingleForCodon
),class="codonTable")
}
tryCatch({
con <- file(ctable.file, open="r")
codons <- init.codon.table(con)
},
error = function(ex) {
cat(ex)
},
finally = {
if (isOpen(con)) {
close(con)
}
})
# translates a given nucleotide sequence
translate <- function(nucl) {
aaQual <- NULL
if (any(class(nucl)=="yogiseq")) {
ncSeq <- nucl$toString()
if (!is.null(nucl$getQuality())) {
aaQual <- sapply(seq(1,length(nucl),3), function(i) min(nucl$getQuality(i:(i+2))))
}
} else {
ncSeq <- nucl
}
if (nchar(ncSeq) == 0) stop("translate: empty string! ",ncSeq)
aa <- paste(sapply(
seq(1,nchar(ncSeq),3),
function(i) {
a <- codons$getSingleForCodon(substr(ncSeq,i,i+2))
if(is.null(a)) "" else a
}
), collapse="")
aaseq <- to.char.array(aa)
if (any(aaseq == "*")) {
cutoff <- min(which(aaseq == "*"))
aa <- paste(aaseq[1:cutoff], collapse="")
}
if (!is.null(aaQual)) {
attr(aa,"quality") <- aaQual
}
aa
}
list(translate=translate)
}
###
# Function for safely running bowtie and retrieving results
# TODO: Needs to be bowtie2 for local alignment!
# fastq.file = FASTQ with query sequence
# db.file = Location of bowtie reference DB
#
bowtie <- function(fastq.file, db.file,
clip3=0, clip5=0, short=TRUE,
purge=TRUE, debug.mode=FALSE, parse=TRUE, header=FALSE) {
sam.file <- sub("\\.fastq",".sam",fastq.file)
tryCatch(
exitCode <- system(paste(
#"/home/rothlab/jweile/bin/bowtie2",
"$BowtieBin",
ifelse(clip3>0,paste("-3",clip3),""),
ifelse(clip5>0,paste("-5",clip5),""),
ifelse(short,"-L 4 -N 1 -i C,1 --score-min C,0",""),
"--local",
ifelse(header,"","--no-head"),
"-x",db.file,
"-U",fastq.file,
"-S",sam.file
)),
error=function(e) {
logger$fatal(e)
stop(e)
}
)
if (exitCode != 0) {
e <- simpleError("Error executing Bowtie!")
logger$fatal(e)
stop(e)
}
if (parse) {
sam <- read.delim(sam.file,header=FALSE,stringsAsFactors=FALSE)
colnames(sam) <- c(
"cname","flag","rname","pos","mapq","cigar","mrnm","mpos",
"isize","seq","qual","tags"
)
if(purge && !debug.mode) {
file.remove(sam.file)
}
sam
} else {
sam.file
}
}
library("hash")
###
# A class for searching strings against an index of k-mers
#
new.kmer.search <- function(k=5) {
#extract all k-mers from given sting
kmers <- function(s) {
if (class(s)=="yogiseq") {
sapply(1:(length(s)-k+1),function(i) subseq(s,i,i+k-1)$toString())
} else {
sapply(1:(nchar(s)-k+1),function(i) substr(s,i,i+k-1))
}
}
#Fields storing the index (a hash mapping kmers to template indices)
.kmer.index <- NULL
#Template names
.kmer.index.names <- NULL
#Template sequences
.template.seqs <- NULL
build.index <- function(fasta.file) {
tryCatch({
con <- file(fasta.file, open="r")
seqs <- readFASTA(con)
},
error = function(ex) {
# logger$fatal(ex)
stop(ex)
},
finally = {
if (exists("con") && isOpen(con)) {
close(con)
}
})
kmer.index <- hash()
kmer.index.names <- sapply(seqs,function(s)s$getID())
for (j in 1:length(seqs)) {
s <- seqs[[j]]
kms <- kmers(s)
for (i in 1:length(kms)) {
kmer.index[[kms[[i]]]] <- c(kmer.index[[kms[[i]]]],j)
}
}
index.file <- sub(".fa","_index.rdata",fasta.file)
save(kmer.index,kmer.index.names,seqs,file=index.file)
.kmer.index <<- kmer.index
.kmer.index.names <<- kmer.index.names
.template.seqs <<- seqs
}
load.index <- function(index.file) {
load(index.file)
.kmer.index <<- kmer.index
.kmer.index.names <<- kmer.index.names
.template.seqs <<- seqs
}
search <- function(queries,min.hits=3,max.d=3,useAlignment=TRUE) {
sapply(queries, function(s) {
if (is.null(s) || is.na(s) || length(s) == 0 || nchar(s) == 0) {
return(NA)
}
# cat(s$getID(),"\n")
kms <- kmers(s)
#Filter out kmers that don't occur in library
kms <- kms[kms %in% keys(.kmer.index)]
#No result if no kmers occur in library
if (length(kms)==0) {
return(NA)
}
#table showing number of hits per template id
nhits <- table(do.call(c,values(.kmer.index,kms,simplify=FALSE)))
#filter out best match(es) if it fulfills minimum #hits requirement
if (useAlignment) {
#top 3 matches
top.nhits <- head(sort(nhits[nhits >= min.hits],decreasing=TRUE),3)
idxs <- as.integer(names(top.nhits))
#perform alignments for top 3 matches and report distance
d <- sapply(idxs,function(idx) {
tseq <- .template.seqs[[idx]]
new.alignment(tseq,s)$getDistance()
})
top.match <- idxs[which(d <= max.d & d==min(d))]
if (length(top.match) == 1) {
.kmer.index.names[[top.match]]
} else {
NA
}
} else {
top.nhits <- nhits[nhits >= min.hits & nhits==max(nhits)]
if (length(top.nhits) == 1) {
.kmer.index.names[[as.integer(names(top.nhits))]]
} else {
#in case nothing gets over minimum or there are multiple choices
NA
}
}
})
}
list(
build.index=build.index,
load.index=load.index,
search=search
)
}
# call.variants <- function(sam.file, ref.file) {
# pileup.file <- sub(".sam$",".pileup",sam.file)
# tryCatch({
# exitCode <- system(paste(
# "$SAMtoolsBin view -b -S",sam.file,"|",
# "$SAMtoolsBin sort -o - - |",
# "$SAMtoolsBin mpileup -s -f",ref.file,"- >",
# pileup.file
# ))
# if (exitCode != 0) {
# stop("Error executing SAMtools!")
# }
# con <- file(ref.file,open="r")
# ref.length <- length(readFASTA(con)[[1]])
# },
# error=function(e) {
# logger$fatal(e)
# stop(e)
# },
# finally={
# if (exists("con") && isOpen(con)) {
# close(con)
# }
# })
# pu <- parsePileup(pileup.file)
# var.call(simplifyPileup(pu),toupper(pu$ref),pu$indels,ref.length)
# }
# parsePileup <- function(f) {
# #read file
# pu <- read.delim(f,stringsAsFactors=FALSE,header=FALSE,quote="")
# colnames(pu) <- c("refname","pos","ref","depth","matches","rqual","mqual")
# #convert quality scores
# pu$rqual <- lapply(pu$rqual, function(qstr) as.integer(charToRaw(qstr))-33)
# pu$mqual <- lapply(pu$mqual, function(qstr) as.integer(charToRaw(qstr))-33)
# #parse matches
# re <- "(\\^.)?([\\+-]\\d+)?([\\.,actgnACTGN\\*])(\\$)?"
# parsed <- global.extract.groups(pu$matches,re)
# #clean up indels
# matches <- lapply(parsed, function(m) {
# indel.starts <- which(m[,2] != "")
# if (length(indel.starts) > 0) {
# for (i in 1:length(indel.starts)) {
# is <- indel.starts[[i]]
# indel <- m[is,2]
# l <- as.integer(substr(indel,2,nchar(indel)))
# val <- paste(substr(indel,1,1),paste(m[is:(is+l-1),3],collapse=""),sep="")
# m[is,3] <- val
# if (l > 1) {
# m <- m[-((is+1):(is+l-1)),]
# indel.starts[(i+1):length(indel.starts)] <- indel.starts[(i+1):length(indel.starts)] -l + 1
# }
# }
# }
# m[,3]
# })
# #split indels from matches
# indels <- lapply(matches, function(m){
# idx <- substr(m,1,1) %in% c("+","-")
# m[idx]
# })
# matches <- lapply(matches,function(m){
# idx <- substr(m,1,1) %in% c("+","-")
# m[!idx]
# })
# pu$matches <- matches
# pu$indels <- ""
# pu$indels <- indels
# pu
# }
# simplifyPileup <- function(pu,onlyFwd=FALSE,disregardMqual=TRUE) {
# piles <- lapply(1:nrow(pu), function(i) {
# ref <- toupper(pu$ref[[i]])
# pile <- to.df(do.call(rbind,mapply(
# function(m,rqual,mqual) {
# if (onlyFwd && m %in% c(",","a","c","g","t")) {
# return(NULL)
# } else {
# p <- if (disregardMqual) {
# 10^(-rqual/10)
# } else {
# 1-(1-10^(-rqual/10))*(1-10^(-mqual/10))
# }
# if (m %in% c(",",".")) {
# return(list(base=ref,p=p))
# } else {
# return(list(base=toupper(m),p=p))
# }
# }
# },
# m=pu$matches[[i]],
# rqual=pu$rqual[[i]],
# mqual=pu$mqual[[i]],
# SIMPLIFY=FALSE
# )))
# #remove absolutes
# pile$p[pile$p == 0] <- 0.0001
# pile$p[pile$p == 1] <- 0.999
# pile
# })
# names(piles) <- pu$pos
# piles
# }
call.variants <- function(sam.file, ref.file) {
tryCatch({
con <- file(ref.file,open="r")
ref.seq <- readFASTA(con)[[1]]
},
error=function(e) {
logger$fatal(e)
stop(e)
},
finally={
if (exists("con") && isOpen(con)) {
close(con)
}
})
pu <- sam2pileup(sam.file,ref.file)
var.call(
pu$pileup,
to.char.array(toupper(ref.seq$toString())),
pu$indel.track,
length(ref.seq)
)
}
read.sam <- function(sam.file) {
tryCatch({
sam.con <- file(sam.file,open="r")
lines <- readLines(sam.con)
lines <- lines[substr(lines,1,1)!="@"]
split <- strsplit(lines,"\t")
ncol <- max(sapply(split,length))
sam <- do.call(rbind,lapply(split,function(row) c(row,rep(NA,ncol-length(row)))))
colnames(sam) <- c(
"cname","flag","rname","pos","mapq","cigar","mrnm","mpos",
"isize","seq","qual","tags",13:ncol
)
sam <- to.df(sam)
sam$flag <- as.integer(sam$flag)
sam$pos <- as.integer(sam$pos)
sam$mapq <- as.integer(sam$mapq)
sam$mpos <- as.integer(sam$mpos)
sam$isize <- as.integer(sam$isize)
sam$mapq <- as.integer(sam$mapq)
sam
},
error=function(e) {
logger$fatal(e)
stop(e)
},
finally={
if (exists("sam.con") && isOpen(sam.con)) {
close(sam.con)
}
})
}
sam2pileup <- function(sam.file,ref.file) {
tryCatch({
ref.con <- file(ref.file,open="r")
ref.seq <- readFASTA(ref.con)[[1]]
},
error=function(e) {
logger$fatal(e)
stop(e)
},
finally={
if (exists("ref.con") && isOpen(ref.con)) {
close(ref.con)
}
})
# sam <- read.delim(sam.file,header=FALSE,stringsAsFactors=FALSE,skip=3)
sam <- read.sam(sam.file)
flagMasks <- c(
multiSegment=0x1, allSegmentsOK=0x2, segmentUnmapped=0x4,
nextSegmentUnmapped=0x8, revComp=0x10, nextRevComp=0x20,
firstSegment=0x40, lastSegment=0x80, secondary=0x100,
failQC=0x200, duplicate=0x400, supplementary=0x800
)
flags <- do.call(rbind,lapply(sam$flag,function(x)bitAnd(x,flagMasks)>0))
colnames(flags) <- names(flagMasks)
flags <- to.df(flags)
#CIGAR: S=Soft clip, H=Hard clip, N=Intron skip, M=Match, D=Deletion, I=Insertion, P=Padded
cigar <- global.extract.groups(sam$cigar,"(\\d+)([SHNMDIP]{1})")
start.stop <- do.call(rbind,lapply(1:nrow(sam),function(i) {
if (flags$segmentUnmapped[[i]]) {
return(c(start=NA,end=NA))
}
l <- sum(as.integer(cigar[[i]][cigar[[i]][,2] %in% c("M","D"),1]))
c(start=sam$pos[[i]],sam$pos[[i]]+l)
}))
pcr.dup <- apply(is.na(start.stop),1,any) | duplicated(start.stop)
out.sam <- sub("/[^/]+\\.sam$","/nodup.sam",sam.file)
write.table(sam[!pcr.dup,],out.sam,sep="\t",quote=FALSE,row.names=FALSE)
pileup <- list(
bases=replicate(length(ref.seq),character()),
qual=replicate(length(ref.seq),numeric()),
ins=replicate(length(ref.seq),character())
)
for (i in 1:nrow(sam)) {
if (flags$segmentUnmapped[[i]] || pcr.dup[[i]]) {
next
}
qtrack <- as.integer(charToRaw(sam$qual[[i]]))-33
read <- to.char.array(sam$seq[[i]])
tp <- sam$pos[[i]] #template position
rp <- 1 #read position
for (cigrow in 1:nrow(cigar[[i]])) {
k <- as.integer(cigar[[i]][cigrow,1])
op <- cigar[[i]][cigrow,2]
if (op=="M") {
mstart <- rp
while (rp < mstart+k) {
pileup$bases[[tp]][[length(pileup$bases[[tp]])+1]] <- read[[rp]]
pileup$qual[[tp]][[length(pileup$qual[[tp]])+1]] <- qtrack[[rp]]
rp <- rp+1
tp <- tp+1
}
} else if (op=="D") {
mstart <- rp
for (.dummy in 1:k) {
pileup$bases[[tp]][[length(pileup$bases[[tp]])+1]] <- "*"
pileup$qual[[tp]][[length(pileup$qual[[tp]])+1]] <- sam$mapq[[i]]
tp <- tp+1
}
} else if (op=="I") {
ins.bases <- paste(read[rp:(rp+k-1)],collapse="")
pileup$ins[[tp]][[length(pileup$ins[[tp]])+1]] <- ins.bases
rp <- rp + k
} else if (op %in% c("S","H")) {
# tp <- tp + k
rp <- rp + k
} else {
warning("Unsupported cigar character: ",op, sam$cigar[[i]])
tp <- tp + k
}
}
}
pu <- mapply(function(bases, qual){
data.frame(base=bases,p=10^(-qual/10))
},bases=pileup$bases,qual=pileup$qual,SIMPLIFY=FALSE)
names(pu) <- 1:length(ref.seq)
list(pileup=pu,indel.track=pileup$ins)
}
var.call <- function(piles, ref, indel.track, ref.length, threshold=.05) {
bases <- c("A","C","G","T","*")
freqs <- do.call(rbind,lapply(piles, function(pile.i) {
fpile <- pile.i[pile.i$p < threshold,]
table(factor(fpile$base,levels=bases))
}))
d <- apply(freqs,1,sum) + sapply(indel.track,length)
names(d) <- names(piles)
#check indels
indel.track <- lapply(indel.track, toupper)
indel.idxs <- which(sapply(indel.track,length) > 0)
called.indels <- to.df(do.call(rbind,lapply(indel.idxs, function(i) {
indel.freqs <- table(toupper(indel.track[[i]]))
do.call(rbind,lapply(names(indel.freqs), function(indel) {
f <- indel.freqs[[indel]]
if (f > 1 && f/d[[i]] > threshold) {
list(ref=ref[[i]],pos=names(piles)[[i]],alt=indel,freq=f/d[[i]])
} else {
NULL
}
}))
})))
#check SNVs
skimmed.freqs <- apply(freqs,c(1,2), function(x)if(x < 2) 0 else x)
calls <- lapply(1:nrow(freqs), function(i) {
f <- skimmed.freqs[i,]/d[[i]]
nonref <- f[setdiff(bases,ref[[i]])]
nonref[!is.na(nonref) & nonref > threshold]
})
idxs <- which(sapply(calls,length) > 0)
called.snvs <- to.df(do.call(rbind,lapply(idxs, function(i) {
pos <- as.numeric(names(piles)[[i]])
vars <- calls[[i]]
ref <- ref[[i]]
do.call(rbind,lapply(names(vars), function(base)
list(ref=ref,pos=pos,alt=base,freq=vars[[base]])
))
})))
#create depth vector for all positions (including those not in alignment)
d.all <- sapply(as.character(1:ref.length), function(pos) if (pos %in% names(d)) d[[pos]] else 0)
list(calls=rbind(called.snvs,called.indels),depth=d.all)
}
base.posteriors <- function(piles) {
do.call(rbind,lapply(piles, function(pile.i) {
#possible bases
qis <- c("A","C","G","T","*")
posteriors <- sapply(qis, function(qi) {
#skip impossible bases
if (!(qi %in% pile.i$base)) {
return (0)
}
# compute the log-odds by iterating over all symbols at the pileup position
lo.i <- sum(sapply(1:nrow(pile.i), function(j){
#the base symbol
bij <- pile.i$base[[j]]
#the error probability
pij <- pile.i$p[[j]]
#calculate the Bayes factor
if (qi==bij) {
log(1-pij) - log(pij/3)
} else {
log(pij/3) - log(1/3)
}
})) + log(1/length(qis))
# then transform the log-odds to the probability
# being careful to avoid NaNs
if (lo.i > 38) 1 else exp(lo.i)/(1+exp(lo.i))
})
}))
}
#input: list of lists of mutatiion descriptor strings
# (e.g 'A20G', 'truncation', 'nonsense' or 'silent')
#output: matrix of mutations
mutlist2matrix <- function(mutations, num.aa) {
#initialize the change matrix
change.matrix <- matrix(0,nrow=21,ncol=num.aa,
dimnames=list(
c('A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y','*'),
1:num.aa
)
)
#remove any mutations that come with trucations and nonsense
muts <- unlist(mutations[sapply(mutations, {
function(x) length(x) > 0 && !any(x == "truncation" | x == "nonsense")
})])
# Mark mutations in the matrix
for (sac in muts) {
if (sac == "silent") next
pos <- as.numeric(substr(sac,2,nchar(sac)-1))
aa <- substr(sac,nchar(sac),nchar(sac))
change.matrix[aa,pos] <- change.matrix[aa,pos] + 1
}
change.matrix
}
# Plots the mutation coverage for a given change matrix
plotMutCoverage <- function(change.matrix, sequence, translator, all=FALSE, main="") {
.sequence <- ifelse (any(class(sequence) == "yogiseq"), sequence$toString(), sequence)
#translate sequence to protein
protein <- trans$translate(sequence)
#a function that returns the i'th codon from the template
codon.at <- function(dna, i) substr(dna,3*i-2, 3*i)
#init reachability matrix
reach.matrix <- matrix(NA,nrow=21,ncol=ncol(change.matrix),
dimnames=list(
c('A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y','*'),
1:nchar(protein)
)
)
# fill reachability matrix according to sequence
for (i in 1:ncol(change.matrix)) {
codon <- codon.at(.sequence,i)
for (pos in 1:3) {
for (nc in c('A','C','G','T')) {
mut.codon <- codon
substr(mut.codon,pos,pos) <- nc
aa <- translator$translate(mut.codon)
reach.matrix[aa,i] <- 0
}
}
reach.matrix[char.at(protein,i),i] <- -1
}
# compute coverage
if (!all) {
coverage <- (apply(change.matrix,2,function(x) sum(na.omit(x) > 0))
/ (apply(reach.matrix,2,function(x) sum(!is.na(x)))-1))
} else {
coverage <- apply(change.matrix,2,function(x) sum(na.omit(x) > 0)) / 20
}
#define drawing layot, set drawing color to gray, adjust margins
layout(matrix(c(1,2),ncol=1), heights=c(1,3))
op <- par(fg="gray",mar=c(0,4.1,4.1,2.1))
# draw a bar plot for coverage
ylim <- if (max(coverage) > 1) c(0,max(coverage)) else c(0,1)
barplot(coverage,
main=main,
xlab="Position",
ylab="Coverage",
ylim=ylim,
border=NA,
names.arg=NA,
col="darkolivegreen3"
)
# Compute a color gradient to represent the mutation counts
maxVal <- max(apply(change.matrix,1,function(x) max(na.omit(x))))
colors <- colorRampPalette(c("white", "orange"))(5)
### Draw the diagram
# use horizontal axis labels
op <- c(op,par(las=1))
par(mar=c(5.1,4.1,0,2.1))
# create an empty plot
plot(0,
type='n',
axes=FALSE,
xlim=c(0,ncol(change.matrix)),
ylim=c(0,21),
xlab="Position",
ylab="Amino acid"
)
# iterate over each matrix entry and draw the contents on the plot
for (x in 1:ncol(change.matrix)) {
for (y in 1:21) {
if (change.matrix[y,x] > 0) {
#observed mutations are drawn in a color shade corresponding to their count
col <- colors[ceiling(5*change.matrix[y,x]/maxVal)+1]
rect(x-1,22-y,x,21-y,col=col, lty="blank")
}
}
}
for (x in 1:ncol(change.matrix)) {
for (y in 1:21) {
if (!is.na(reach.matrix[y,x])) {
if (reach.matrix[y,x] == -1) {
#original amino acids are marked in gray
rect(x-1,22-y,x,21-y,col="gray")
} else if (!all) {
#reachable aminoacids are marked with dotted outline
rect(x-1,22-y,x,21-y, lty="dashed",lwd=2)
}
}
}
}
# draw axes
axis(1, at=c(1,seq(5,ncol(change.matrix),5))-.5, labels=c(1,seq(5,ncol(change.matrix),5)))
axis(2, at=(1:21)-.5, labels=rev(rownames(change.matrix)) )
par(op)
}
##
# Mutagenic PCR simulation function
# sequence = original DNA sequence, should start with a start codon and end with a stop codon
# cycles = number of PCR cycles to simulate. Should be > 1, but too large numbers will affect runtime and memory usage exponentially
# init.amount = Initial amount of template molecules to use in the simulation
# etr = Enzyme-to-Template ratio. Defaults to 1/1
# mutation.rate = Mutations per bp introduced by the enzyme per replication process.
#
pcr.sim <- function(sequence, translator, cycles=10, init.amount=100, etr=1, mut.rate=1/2000) {
.sequence <- ifelse(any(class(sequence) == "yogiseq"), sequence$toString(), sequence)
enzyme.amount <- round(init.amount * etr)
#calculate sampling bias based on Mutazyme II bias and sequence bias
pol.bias <- c(A=0.2675, C=0.2325, G=0.2325, T=0.2675)
seq.bias <- table(to.char.array(.sequence)) / nchar(.sequence)
bias <- pol.bias * seq.bias / sum(pol.bias * seq.bias)
cbias <- c(bias[1],sapply(2:4, function(i) sum(bias[1:i])))
names(cbias) <- names(bias)
#make index of nucleotide positions
nuc.positions <- sapply(c('A','C','G','T'), function(nuc) which(to.char.array(.sequence) == nuc))
#mutation transition matrix based on Mutazyme II
mut <- cbind(
rbind(
A=c(A=0, C=.047,G=.175,T=.285),
C=c(A=.141,C=0, G=.041,T=.255),
G=c(A=.255,C=.041,G=0, T=.141),
T=c(A=.285,C=.175,G=.047,T=0 )
) * .5,
DEL=rep(.048,4)/4,
INS=rep(.008,4)/4
) * 4
mut <- mut / apply(mut,1,sum)
cmut <- cbind(mut[,1],sapply(2:ncol(mut), function(i) apply(mut[,1:i],1,sum)))
dimnames(cmut) <- dimnames(mut)
#seed molecule pool with templates
pool <- list()
for (i in 1:init.amount) pool[[i]] <- list()
#perform PCR cycles
for (c in 1:cycles) {
num.reactions <- min(length(pool),enzyme.amount)
templates <- sample(pool, num.reactions)
num.muts <- rpois(num.reactions, nchar(.sequence) * mut.rate)
new.mutations <- sapply(num.muts, function(num.mut) {
if (num.mut == 0) {
return(list())
}
# use bias table to figure out how many of each nucleotide to pick for mutating
to.sample <- table(sapply(1:num.mut, function(i) {
names(which.min(which(runif(1,0,1) < cbias)))
}))
#pick positions to mutate
to.mutate <- sapply(names(to.sample), function(nuc) {
sample(nuc.positions[[nuc]], to.sample[nuc])
})
#implement mutations
unlist(sapply(names(to.mutate), function(nuc) {
sapply(to.mutate[[nuc]], function(pos) {
#sample mutation
to.nuc <- names(which.min(which(runif(1,0,1) < cmut[nuc,])))
if (to.nuc == "DEL" || to.nuc == "INS") {
return("nonsense")
} else {
codon.number <- floor((pos-1) / 3) + 1
codon.start <- 3*codon.number - 2
from.codon <- substr(.sequence,codon.start,codon.start+2)
change.pos <- pos - codon.start + 1
to.codon <- from.codon
substr(to.codon,change.pos,change.pos) <- to.nuc
from.aa <- translator$translate(from.codon)
to.aa <- translator$translate(to.codon)
if (from.aa == to.aa) {
return("silent")
} else if (to.aa == "*") {
return("truncation")
} else {
return(paste(from.aa,codon.number,to.aa,sep=""))
}
}
})
}))
})
names(new.mutations) <- NULL
#add mutagenized copies to pool
pool[(length(pool)+1):(length(pool)+num.reactions)] <- sapply(1:num.reactions, function(i) {
c(templates[[i]], new.mutations[[i]])
})
}
#return pool without original templates
pool[-(1:init.amount)]
}
default.error <- function(ex) {
print(ex)
traceback(ex)
stop()
}
protect <- function(filename, fun, mode="r",error=default.error) {
tryCatch({
con <- file(filename, open=mode)
fun(con)
},
error = error,
finally = {
if (exists("con") && isOpen(con)) {
close(con)
}
})
}
# processFile <- function(file,f) {
# tryCatch({
# con <- file(file, open="r")
# f(con)
# },
# error = function(ex) {
# traceback(ex)
# },
# finally = {
# if (exists("con") && isOpen(con)) {
# close(con)
# }
# })
# }
# test1 <- NULL
# processFile("test1.fastq",function(con) {
# test1 <<- parseFASTQ(con)
# })
# test2 <- NULL
# processFile("test2.fastq",function(con) {
# test2 <<- parseFASTQ(con)
# })
|
8584cd4541163e061c014ea9cd83b00baa565a0a
|
91484bf347364a04d7eab95629840cd6f2c9e823
|
/ui.R
|
718ff04108dfb785cb4bb65b6a713344a8de04ad
|
[] |
no_license
|
matschmitz/NOMIREG
|
b7976242dcfd5a5da190cba3a1f81d82f1fc69da
|
99a6c16ca265f37bb8790267e3a18ce6446570e9
|
refs/heads/master
| 2023-01-11T04:46:36.962730
| 2020-11-12T10:25:53
| 2020-11-12T10:25:53
| 312,241,387
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,261
|
r
|
ui.R
|
library(shiny)
fluidPage(theme = "myCSS.css",
headerPanel(title = "", windowTitle = "Régression multiple nominale"),
h2("Régression multiple nominale"),
sidebarLayout(
sidebarPanel(width = 4,
matrixInput("codes", class = "numeric",
value = matrix(c(-1, +1, 0,
1, 1, -2),
dimnames = list(c("feed_NvsP", "feed_AvsNP"),
c("N", "P", "A")),
nrow = 2, byrow = TRUE),
rows = list(names = TRUE),
cols = list(names = TRUE)),
checkboxInput('projectX1', 'note~feed_NvsP'),
checkboxInput('projectX2', 'note~feed_AvsNP'),
verbatimTextOutput("descriptives"),
verbatimTextOutput("mdlSummary")
),
mainPanel(width = 5,
plotlyOutput('mainPlot', height = "600px")
)
)
)
|
a53143485a087a50e43577b106f67385c504beca
|
dd470dfb159767bb3d2fade6697e6f25568cbe0e
|
/R_Files/Chapter_17_Neural_Network.R
|
b64cbc1b82b9aa13c9e29277fffff39ee87f2548
|
[
"MIT"
] |
permissive
|
djdhiraj/Data_Science_In_R
|
2011c3304f56fb9b96cc62ff84ca362d6e9ff134
|
4bf78a8542689c87f14072885a292e4762f390ac
|
refs/heads/master
| 2020-04-02T04:22:40.700705
| 2019-02-09T12:45:25
| 2019-02-09T12:45:25
| 154,013,949
| 1
| 0
|
MIT
| 2018-12-04T13:00:10
| 2018-10-21T13:55:24
|
R
|
UTF-8
|
R
| false
| false
| 1,284
|
r
|
Chapter_17_Neural_Network.R
|
#install.packages("neuralnet")
library(neuralnet)
dim(infert)
View(infert)
nn<-neuralnet(case~age+parity+induced+spontaneous,data=infert,hidden=2,err.fct = "ce",
linear.output = FALSE)
nn
plot(nn)
nn$net.result
nn$weights
nn$results
nn$result.matrix
nn$covariate
infert$case
nn$net.result[[1]]
nn1<-ifelse(nn$net.result[[1]]>0.5,1,0)
nn1
misClassificationError=mean(infert$case !=nn1)
print(misClassificationError)
OutPutVsPred<-cbind(infert$case,nn1)
OutPutVsPred
nn.bp<-neuralnet(formula=case~age+parity+induced+spontaneous,data=infert,hidden=2,
learningrate = 0.01,algorithm = "backprop",err.fct = "ce",
linear.output = FALSE)
nn.bp
new_output<-compute(nn,covariate = matrix(c(22,1,0,0,
22,1,1,0,
22,1,1,1),
byrow = TRUE,
ncol = 4))
new_output$net.result
ci<-confidence.interval(nn,alpha=0.5)
ci
par(mfrow=c(2,2))
gwplot(nn,selected.covariate = "age",min=2.5,max=5)
gwplot(nn,selected.covariate = "parity",min = 2.5,max=5)
gwplot(nn,selected.covariate = "induced",min=2.5,max=5)
gwplot(nn,selected.covariate = "spontaneous",min = 2.5,max=5)
|
87ffb45f51a593c1391c11b95ceb4221d60d125a
|
ee2a149e5a84006dc3a3672d70eaf02bbec30b4b
|
/R/plot_simple_fit.R
|
e20facaa257a960f06ba1eb97dc2238018dc02e0
|
[] |
no_license
|
annube/Chifit
|
57d55c31aa54d1880432693aa3f82517e10b4642
|
a4381fef2ab6d8fc47bf5f4bc7d9e972aa01219e
|
refs/heads/master
| 2021-01-01T17:17:31.136458
| 2013-08-14T19:59:41
| 2013-08-14T19:59:41
| 3,256,011
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 263
|
r
|
plot_simple_fit.R
|
## library(hadron)
plot_simple_fit <- function(data_x,data_y,data_dy,fitfn,file="simple_fit.pdf")
{
pdf(file=file)
plotwitherror(data_x,data_y,data_dy)
xs = seq(min(data_x),max(data_x),length.out=100)
ys = fitfn(xs)
lines(xs,ys)
dev.off()
}
|
e087f100429413e49d87db599338836c92ac9608
|
2bb1abc51dd9746776948143f91e2cfdd4463a7e
|
/R/plot.R
|
7fbcca744283e7fdac9ebddf22276350a7e5a22f
|
[
"BSD-2-Clause"
] |
permissive
|
parenthetical-e/boldR
|
90c53f09fdca36098ed744380959680391e1eb4b
|
8ae1b67cf1764a1a789d612fc7bf7ba0c1250980
|
refs/heads/master
| 2016-09-05T19:36:07.421610
| 2014-06-24T03:45:27
| 2014-06-24T03:45:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,678
|
r
|
plot.R
|
library("ggplot2")
library("reshape2")
plot.bolddf.homogeneity <- function(bolddf, name, seperate_by=NA,
height=0, width=0, returnp=FALSE, title=NA){
if(! is.na(name)){ .pdf.device.setup(name, height, width) }
# else { dev.new(width=width, height=height) }
if (is.na(seperate_by)){
p <- ggplot(data=bolddf,
aes(x=factor(index), y=voxel, fill=data)) +
geom_tile() +
scale_fill_continuous(low="black", high="pink") +
facet_grid(dataname~cond) +
ylab("Voxel") +
xlab("Index") +
theme_bw() +
theme(
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
strip.text.y = element_text(angle=0)
)
if (! is.na(title)) { p <- p + ggtitle(title) }
print(p)
} else if (seperate_by == "dataname") {
for (rname in unique(as.character(bolddf$dataname))){
print(paste("Plotting", rname, sep=" "))
p <- ggplot(data=bolddf[rname == bolddf$dataname,],
aes(x=factor(index), y=voxel, fill=data)) +
geom_tile() +
scale_fill_continuous(low="black", high="pink") +
facet_grid(.~cond) +
ggtitle(rname) +
ylab("Voxel") +
xlab("Index") +
theme_bw() +
theme(
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
strip.text.y = element_text(angle=0)
)
if (! is.na(title)) { p <- p + ggtitle(title) }
print(p)
}
} else {
stop("Argument 'seperate_by' not recognized. Try NA, or 'dataname'.")
}
if (! is.na(name)) { dev.off() }
if (returnp) {return(p) }
}
plot.bolddf.mediantc <- function(bolddf, name=NA, height=0, width=0,
returnp=FALSE, title=NA, drop_cond=NULL, vlines=NULL, defcolor=FALSE){
if(! is.na(name)){ .pdf.device.setup(name, height, width) }
# else { dev.new(width=width, height=height) }
if (! is.null(drop_cond)) {
for(dc in drop_cond){
bolddf <- bolddf[bolddf$cond != dc, ]
}
}
p <- ggplot(data=bolddf, aes(x=index, y=data, colour=cond, fill=cond))
p <- p + stat_summary(fun.y=median, geom="line", size=1.5) +
facet_grid(voxel~., scales = "free_y") +
ylab("BOLD signal (AU)") + xlab("Time (TR)") + theme_bw() +
scale_x_continuous(breaks=1:max(bolddf$index)) +
theme(
plot.background = element_blank(), ## Main facet bkg
panel.grid.major = element_blank(), ## Major grid lines
panel.grid.minor = element_blank(), ## Minor grid lines
panel.border = element_blank(), ## Facet border
panel.background = element_blank(), ## Facet bkg
# axis.text.y=element_blank(), ## y lab
# axis.ticks.y=element_blank(), ## y ticks
strip.text.y = element_text(angle=0),## facet name rotate
strip.background = element_blank() ## Fam1e bckgrnd (grey+box)
)
if (! defcolor) {
p <- p + scale_colour_brewer(palette="BrBG")
}
if (! is.null(vlines)) {
p <- p + geom_vline(xintercept = vlines, colour="light grey")
}
if (! is.na(title)) { p <- p + ggtitle(title) }
print(p)
if(! is.na(name)){ dev.off() }
if (returnp) { return(p) }
}
plot.bolddf.tc <- function(bolddf, name=NA, height=0, width=0,
returnp=FALSE, title=NA, drop_cond=NULL, vlines=NULL,
defcolor=FALSE){
if(! is.na(name)){ .pdf.device.setup(name, height, width) }
if (! is.null(drop_cond)) {
for(dc in drop_cond){
bolddf <- bolddf[bolddf$cond != dc, ]
}
}
p <- ggplot(data=bolddf, aes(x=index, y=data, colour=cond, group=dataname)) +
geom_line(alpha=0.25) +
facet_grid(voxel~cond, scales="free_y") +
ylab("BOLD signal (AU)") + xlab("Time (TR)") + theme_minimal() +
scale_x_continuous(breaks=1:max(bolddf$index)) +
# Strip off all the boxes
theme(
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.x = element_blank(),
strip.text.y = element_text(angle=0)
)
if (! defcolor) {
p <- p + scale_colour_brewer(palette="BrBG")
}
if (! is.null(vlines)) {
p <- p + geom_vline(xintercept = vlines, colour="light grey")
}
if (! is.na(title)) { p <- p + ggtitle(title) }
print(p)
if(! is.na(name)){ dev.off() }
if (returnp) { return(p) }
}
plot.bolddf.tcpoint <- function(bolddf, name=NA, height=0, width=0,
returnp=FALSE, title=NA){
if(! is.na(name)){ .pdf.device.setup(name, height, width) }
# else { dev.new(width=width, height=height) }
p <- ggplot(data=bolddf, aes(x=index, y=data, colour=cond)) +
geom_point(alpha=0.25) +
facet_grid(voxel~cond, scales = "free_y") +
ylab("BOLD signal (AU)") + xlab("Time (TR)") + theme_bw() +
scale_x_continuous(breaks=1:max(bolddf$index)) +
theme(
plot.background = element_blank(), ## Main facet bkg
panel.grid.major = element_blank(), ## Major grid lines
panel.grid.minor = element_blank(), ## Minor grid lines
panel.border = element_blank(), ## Facet border
panel.background = element_blank(), ## Facet bkg
# axis.text.y=element_blank(), ## y lab
# axis.ticks.y=element_blank(), ## y ticks
strip.text.y = element_text(angle=0),## facet name rotate
strip.background = element_blank() ## Fam1e bckgrnd (grey+box)
)
if (! is.na(title)) { p <- p + ggtitle(title) }
print(p)
if(! is.na(name)){ dev.off() }
if (returnp) { return(p) }
}
plot.bolddf.boxplot <- function(bolddf, name=NA, seperate_by=NULL,
height=0, width=0, returnp=FALSE, title=NA, drop_cond=NULL,
notch=FALSE, defcolor=FALSE){
# Plots timecourses for each voxel in a grid. Conds are separatly colored.
if(! is.na(name)){ .pdf.device.setup(name, height, width) }
# else { dev.new(width=width, height=height) }
if (! is.null(drop_cond)) {
for(dc in drop_cond){
bolddf <- bolddf[bolddf$cond != dc, ]
}
}
p <- NULL
if (is.null(seperate_by)) {
p <- ggplot(data=bolddf, aes(x=factor(
index), y=data, colour=cond, fill=cond)) +
facet_grid(voxel~., scales = "free_y")
} else if (seperate_by == "cond") {
p <- ggplot(data=bolddf, aes(x=factor(index), y=data)) +
facet_grid(voxel~cond, scales = "free_y")
} else {
stop("seperate_by was not valid")
}
if (! defcolor) {
p <- p + scale_colour_brewer(palette="BrBG") +
scale_fill_brewer(palette="BrBG")
}
p <- p + geom_boxplot(alpha=0.5, outlier.colour="light grey", notch=notch)
p <- p + ylab("BOLD signal (AU)") + xlab("Index") + theme_bw()
if (! is.na(title)) { p <- p + ggtitle(title) }
p <- p + theme(
plot.background = element_blank(), ## Main facet bkg
panel.grid.major = element_blank(), ## Major grid lines
panel.grid.minor = element_blank(), ## Minor grid lines
panel.border = element_blank(), ## Facet border
panel.background = element_blank(), ## Facet bkg
#axis.text.y=element_blank(), ## y lab
#axis.ticks.y=element_blank(), ## y ticks
axis.text.x=element_blank(), ## x lab
#axis.ticks.x=element_blank(), ## x ticks
strip.text.y = element_text(angle=0),## facet name rotate
strip.background = element_blank() ## Fame bckgrnd (grey+box)
)
print(p)
if(! is.na(name)){ dev.off() }
if (returnp) { return(p) }
}
plot.bolddf.stat <- function(bolddf, stat, name=NA, height=0, width=0,
nulldist=FALSE, geom="boxplot", returnp=FALSE, title=NA){
# For every voxel plot the named stat, as a boxplot, coloring based on cond.
# NOTE:
# Has two modes. If name is NA, returns a ggplot() pointer.
# If it is a char string, save the result to file of that name.
if(! is.na(name)){ .pdf.device.setup(name, height, width) }
# else { dev.new(width=width, height=height) }
print("Creating stats.")
bolddf <- bolddf.stat(bolddf, stat)
# Create the nulldist data?
if (nulldist) {
print("Creating voxel specfic null distribution stats.")
# Create the nulldist, and add a level
# to cond indicating denoating
# the null distribution data
n_samples <- 100
print(paste("Taking", n_samples, "samples."))
bolddf_nulldist <- bolddf.nulldist(bolddf, stat, n_samples)
bolddf_nulldist[["cond"]] <- factor(
rep("null", nrow(bolddf_nulldist)))
bolddf <- rbind(bolddf, bolddf_nulldist)
}
# Init the plot aes
p <- ggplot(data=bolddf, aes(x=voxel, y=data, colour=cond))
# What geom of plot to use?
if(geom == "boxplot"){
p <- p + geom_boxplot(notch=FALSE, outlier.colour="light grey")
} else if(geom == "violin"){
p <- p + geom_violin()
} else {
stop("Geom not valid. Try 'boxplot' or 'violin'.")
}
# Finish the plot config
p <- p +
theme_bw() +
ylab(paste("Distribution of dataname ", stat, "'s", sep="")) +
xlab("Voxel") +
theme(
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
strip.text.y = element_text(angle=0)
) +
coord_flip()
if (! is.na(title)) { p <- p + ggtitle(title) }
print(p)
# Plot or return p
if(! is.na(name)){
dev.off()
}
if (returnp) { return(p) }
}
plot.bolddf.allstats <- function(bolddf, name=NA, height=0, width=0,
nulldist=FALSE, geom="boxplot", returnp=FALSE, title=NA){
# Plot all the stats available in the stat.timecourse function,
# saving each as a page inside the same pdf file.
if(! is.na(name)){ .pdf.device.setup(name, height, width) }
# else { dev.new(width=width, height=height) }
statsnames <- c(mean, var, diff, median, time.to.max)
for(statname in statsnames){
print(statname)
p <- bolddf.stat(bolddf, statname, NA, height, width, nulldist, geom)
print(p)
}
if (! is.na(name)) { dev.off() }
if (returnp) { return(p) }
}
plot.bolddf.rmsdifference <- function(bolddf, stat, name=NA, height=0, width=0,
returnp=FALSE, title=NA){
if(! is.na(name)){ .pdf.device.setup(name, height, width) }
# else { dev.new(width=width, height=height) }
# Are there enough cond?
conds <- as.character(unique(bolddf$cond))
if (length(conds) <= 2) {
stop("This plot needs more than 2 cond to be of use.")
}
condpairs <- combn(conds, 2)
## Each col in the condpairs df is a pair.
bolddf_rmsdiff <- ddply(
bolddf,
.(voxel, dataname),
# Anonymous fn to calculate
# rmss for all the pairs
function(bolddf, condpairs){
rmss <- NULL
pairnames <- NULL
for(j in 1:ncol(condpairs)){
# Make conditions and datas clear
# The do RMS and join the condition names
c1 = condpairs[1,j]
c2 = condpairs[2,j]
v1 = bolddf[c1 == bolddf$cond, "data"]
v2 = bolddf[c2 == bolddf$cond, "data"]
rmss <- c(rmss, sqrt((v1 - v2)^2))
pairnames <- c(pairnames, paste(c1, c2, sep="-"))
}
N <- length(rmss)
data.frame(
data=rmss,
condpair=pairnames,
voxel=rep(bolddf$voxel, N),
dataname=rep(bolddf$dataname, N)
)
},
condpairs)
# Finally we plot...
p <- ggplot(data=bolddf_rmsdiff, aes(x=voxel, y=data,
fill=condpair, colour=condpair)) +
geom_boxplot(alpha=0.8, notch=FALSE, outlier.colour="light grey") +
theme_bw() +
theme(
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
strip.text.y = element_text(angle=0)
) +
ylab(paste("RMS ", stat)) +
ylim(0,0.002) +
coord_flip()
if (! is.na(title)) { p <- p + ggtitle(title) }
print(p)
if(! is.na(name)){ dev.off() }
if (returnp) { return(p) }
}
.pdf.device.setup <- function(name, height, width){
# cat("Using pdf().")
if((height > 0) && (width > 0)){
pdf(file=name, height=height, width=width)
} else {
pdf(file=name) ## Use the defaults
}
}
|
a4a62c2f4232743312eb96287f35d59f1167cf5c
|
37cc63b9708638db1fd1e01c0b3e52013654986c
|
/analysis/Brendan/Gene Filter/gene_groups.R
|
534bac67d333589fa98bfb58720723dbe1c68088
|
[] |
no_license
|
brendan4/sravandevanathan
|
e7082bd5892ccc5cf679132aaa06c88abe4c17fc
|
00c54737e09ea29b517a2d008a420748af43d9b7
|
refs/heads/master
| 2022-03-26T08:02:45.299082
| 2019-12-13T21:09:21
| 2019-12-13T21:09:21
| 170,389,554
| 0
| 0
| null | 2019-02-12T20:52:38
| 2019-02-12T20:52:37
| null |
UTF-8
|
R
| false
| false
| 6,394
|
r
|
gene_groups.R
|
min_nonzero = 1
gene.list <- c("MDM2","RPL11", "TP53", "GATA1", "PML", "MYC", "CDKN2A")
# two options: expressed genes and expressed trans datasets
filtered.data <- filter.genes(expressed.genes,gene.list) # expressed genes dataset
filtered.data <- filter.genes(expressed.trans,gene.list) # expressed trans dataset
#24 interesting protiens from gene.list
par(mar=c(7,4,4,2)+0.1)
png(filename='Distribution of FPKMs.png', width=800, height=750)
boxplot(log2(filtered.data+min_nonzero),
names=colnames(filtered.data), las=2, ylab="log2(FPKM)",
main="Distribution of FPKMs for all libraries")
graphics.off()
#distribution in Ribo protiens
par(mar=c(7,4,4,2)+0.1)
png(filename='Distribution of Ribo FPKMs.png', width=800, height=750)
boxplot(log2(Ribo.filter+min_nonzero),
names=colnames(filtered.data), las=2, ylab="log2(FPKM)",
main="Distribution of Ribo FPKMs")
graphics.off()
sub <- rownames(na.omit(filtered.data[which(filtered.data > 100),])) # RPL11 highest abundance in trans
RPL11 <- expressed.trans[grep("^RPL11",rownames(expressed.trans)),] # all RPL11 in trans
#text(Ribo.filter, Ribo.filter,RPL11, cex=0.6, pos=4, col="red")
#a look at myc
filt <- expressed.genes[grep("^MYC", rownames(expressed.genes)),]
filt$var <- apply(log2(filt +0.01),1, var)
filt <- pretty.gene.name(filt)
rownames(filt) <- filt$pretty
filt.p <- filt[,-which(colnames(filt) %in% c("pretty"))]
ggplot(filt.p, aes(x = rownames(filt.p) , y = var)) + geom_bar(stat="identity", fill="tomato3")
# a look at RPL11
filt <- expressed.genes[grep("^RPL11", rownames(expressed.genes)),]
filt$var <- apply(log2(filt +0.01),1, var)
filt <- pretty.gene.name(filt)
rownames(filt) <- filt$pretty
filt.p <- filt[,-which(colnames(filt) %in% c("pretty"))]
#PML
filt <- expressed.genes[grep("^PML", rownames(expressed.genes)),]
filt$var <- apply(log2(filt +0.01),1, var)
filt <- pretty.gene.name(filt)
rownames(filt) <- filt$pretty
filt.p <- filt[,-which(colnames(filt) %in% c("pretty"))]
#a look at all genes in gene.list
filtered.data$var <- apply(log10(filtered.data),1, var)
filt <- pretty.gene.name(filtered.data)
rownames(filt) <- filt$pretty
filt.p <- filt[,-which(colnames(filt) %in% c("pretty"))]
filt.p <- na.omit(filt.p)
ggplot(filt.p, aes(x = rownames(filt.p) , y = var)) + geom_bar(stat="identity", fill="tomato3")
colSum <- as.data.frame(colSums(filt.p))
colSum <- t(colSum)
filt.p <- rbind(filt.p, colSum)
# a look a hemo protiens
gene.list <- c('HBA', "HBB", "HBG1", "HBG2", "HBE", "HBD", "SLC4A1", "SNCA", "BPGM")
filtered.data <- filter.genes(expressed.genes, gene.list = gene.list)
filtered.data$var <- apply(log2(filtered.data +0.1),1, var)
filt <- pretty.gene.name(filtered.data)
rownames(filt) <- filt$pretty
filt.p <- filt[,-which(colnames(filt) %in% c("pretty"))]
ggplot(filt.p, aes(x = rownames(filt.p) , y = var)) + geom_bar(stat="identity", fill="tomato3")
# HB
filtered.data <- filter.genes(expressed.genes, "HB")
filtered.data$var <- apply(log2(filtered.data +0.1),1, var)
filt <- pretty.gene.name(filtered.data)
rownames(filt) <- filt$pretty
filt.p <- filt[,-which(colnames(filt) %in% c("pretty"))]
ggplot(filt.p, aes(x = rownames(filt.p) , y = var)) + geom_bar(stat="identity", fill="tomato3")
# H2BFXP
filtered.data <- filter.genes(expressed.genes, "H2BFXP")
filtered.data$var <- apply(log2(filtered.data +0.1),1, var)
filt <- pretty.gene.name(filtered.data)
rownames(filt) <- filt$pretty
filt.p <- filt[,-which(colnames(filt) %in% c("pretty"))]
#FNDC4
filtered.data <- filter.genes(expressed.genes, "FNDC4")
filtered.data$var <- apply(log2(filtered.data +0.1),1, var)
filt <- pretty.gene.name(filtered.data)
rownames(filt) <- filt$pretty
filt.p <- filt[,-which(colnames(filt) %in% c("pretty"))]
werid <- c("NPHP1", "MORN4","POTEI", "GPRC5B")
#immune proteins
filtered.data <- filter.genes(expressed.genes, c("JCHAIN","MX1","IGLL5","RSAD2","CMPK2", "IFI44L", "MZB1", "IFIT1", "OAS3"))
filtered.data$var <- apply(log2(filtered.data +0.1),1, var)
filt <- pretty.gene.name(filtered.data)
rownames(filt) <- filt$pretty
filt.p <- filt[,-which(colnames(filt) %in% c("pretty"))]
ggplot(filt.p, aes(x = rownames(filt.p) , y = var)) + geom_bar(stat="identity", fill="tomato3")
# AHSP, FAM132B, HEMGN, and TRIM10: from the GATA1 diff DBA paper : blood cel formation
filtered.data <- filter.genes(expressed.genes, c("AHSP", "FAM132B", "HEMGN", "TRIM10"))
filtered.data$var <- apply(log2(filtered.data +0.1),1, var)
filt <- pretty.gene.name(filtered.data)
rownames(filt) <- filt$pretty
filt.p <- filt[,-which(colnames(filt) %in% c("pretty"))]
ggplot(filt.p, aes(x = rownames(filt.p) , y = var)) + geom_bar(stat="identity", fill="tomato3")
# heme proteins ALAS2, FECH, CPOX, PPOX, and UROS.: GATA1 paper
filtered.data <- filter.genes(expressed.genes, c("ALAS2", "FECH", "CPOX", "PPOX", "UROS"))
filtered.data$var <- apply(log2(filtered.data +0.1),1, var)
filt <- pretty.gene.name(filtered.data)
rownames(filt) <- filt$pretty
filt.p <- filt[,-which(colnames(filt) %in% c("pretty"))]
ggplot(filt.p, aes(x = rownames(filt.p) , y = var)) + geom_bar(stat="identity", fill="tomato3")
#IL8, IL1R1, CXCR4, ICAM3, MPO, TNFSF10, and TLR4 genes with IL6, TNF,
filtered.data <- filter.genes(expressed.genes, c("IL8", "IL1R1", "CXCR4", "ICAM3", "MPO", "TLR4", "IL6", "TNF"))
filtered.data$var <- apply(log2(filtered.data +0.1),1, var)
filt <- pretty.gene.name(filtered.data)
rownames(filt) <- filt$pretty
filt.p <- filt[,-which(colnames(filt) %in% c("pretty"))]
ggplot(filt.p, aes(x = rownames(filt.p) , y = var)) + geom_bar(stat="identity", fill="tomato3")
#RNR1
filtered.data <- filter.genes(expressed.genes, c("RNA"))
filtered.data$var <- apply(log2(filtered.data +0.1),1, var)
filt <- pretty.gene.name(filtered.data)
rownames(filt) <- filt$pretty
filt.p <- filt[,-which(colnames(filt) %in% c("pretty"))]
ggplot(filt.p, aes(x = rownames(filt.p) , y = var)) + geom_bar(stat="identity", fill="tomato3")
filtered.data <- var.samples(expressed.genes, gene.list = c('HBA', "HBB", "HBG1", "HBG2", "HBE", "HBD"), pretty.names = TRUE, graph = TRUE)
filtered.data <- filtered.data[-which(rownames(filtered.data) %in% c("HBBP1","HBEGF", "var")),]
filtered.data <- as.data.frame(t(filtered.data))
ggplot(filtered.data, aes(x = HBB , y = HBG1)) +
geom_point() +
geom_text(aes(label = rownames(filtered.data)), hjust = 0, vjust = 0)
|
bafcab8f473f287d49d8815f77796d4400c2a7f1
|
703ad20be09e009077863d8a571c3585f9e2a061
|
/man/as_node.Rd
|
338fe66898a3538d269a12922ae5a0a668f64955
|
[
"MIT"
] |
permissive
|
russHyde/s3tree
|
ccbfde0cd8451834ae3f41c3ae8c88d7b12281a0
|
711831f9c0736439bc3eb950643dc4c64d4249e8
|
refs/heads/master
| 2020-04-19T06:03:50.949064
| 2019-01-30T11:21:10
| 2019-01-30T11:21:10
| 168,007,301
| 0
| 0
|
NOASSERTION
| 2019-01-30T11:21:11
| 2019-01-28T17:45:36
|
R
|
UTF-8
|
R
| false
| true
| 328
|
rd
|
as_node.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/node.R
\name{as_node}
\alias{as_node}
\title{Convert a `list` into a `Node`}
\usage{
as_node(x)
}
\arguments{
\item{x}{A list of data. This must contain entries named
`name`, `parent` and `children`.}
}
\description{
Convert a `list` into a `Node`
}
|
9c9815d949a644a91ea44c530ef35687065e87e2
|
d03baca03096273c73c930b29e654836f0d3f0b8
|
/man/PlotHist.Rd
|
d6416191fc68fa2fabcf64c0ff8ba936c16a2c39
|
[] |
no_license
|
ericschulz/rpawl
|
fb2235112126d988f1e5ec09d22915d6affa390c
|
0e5ea45356644ffc79c6381ef002e5599242c676
|
refs/heads/master
| 2016-08-11T07:29:18.270112
| 2013-06-07T04:25:56
| 2013-06-07T04:25:56
| 43,148,457
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 732
|
rd
|
PlotHist.Rd
|
\name{PlotHist}
\alias{PlotHist}
\title{Plot a histogram of one component of the chains}
\usage{
PlotHist(results, component)
}
\arguments{
\item{results}{Object of class \code{"list"}: either the
output of \code{\link{pawl}} or of
\code{\link{adaptiveMH}}.}
\item{component}{Object of class \code{"numeric"}:
specifies the index of the component to plot on the
x-axis.}
}
\value{
The function returns a ggplot2 object.
}
\description{
This function takes the result of
\code{\link{adaptiveMH}} or of \code{\link{pawl}}, and a
component index, and draws a histogram of it.
}
\author{
Luke Bornn <bornn@stat.harvard.edu>, Pierre E. Jacob
<pierre.jacob.work@gmail.com>
}
\seealso{
\code{\link{ggplot}}
}
|
dfa9c38cb07baf07b2bf101f7921b30575bdee0a
|
7bf3503de3ccf77a0f57491b4bccfaa8aeff5b84
|
/R Scripts/KNNAlgorithm.R
|
65b3d1e7ffbaec904edb5303d84513dfddacb569
|
[] |
no_license
|
augaonkar/Yelp-Dataset-Analysis
|
dbb193a49e4b52d5b758ef8c54bfb1c7f26a32a0
|
ad9234f9c652570b51bdaffcf86035945d3afab5
|
refs/heads/master
| 2021-01-21T10:04:59.189758
| 2017-02-28T01:27:25
| 2017-02-28T01:27:25
| 83,370,719
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,789
|
r
|
KNNAlgorithm.R
|
library(rjson)
library(plyr)
library(dplyr)
library(ggplot2)
library(knitr)
library(glmnet)
library(googleVis)
library(DT)
library(scales)
library(varhandle)
install.packages("caret")
library(ISLR)
library(caret)
con <- file("C:/Users/yesha/OneDrive/Documents/yelp_academic_dataset_business.json", "r")
input <- readLines(con, -1L)
close(con)
yelpdata <- input %>%
lapply(function(x) t(unlist(fromJSON(x)))) %>%
ldply()
save(yelpdata, file= 'yelpdata.rdata')
load("yelpdata.rdata")
clean.names <- function(df){
colnames(df) <- gsub("[^[:alnum:]]", "", colnames(df))
colnames(df) <- tolower(colnames(df))
return(df)
}
yelpdata <- clean.names(yelpdata)
yelpdata <- yelpdata[,!duplicated(colnames(yelpdata))]
all_restaurants <- filter(yelpdata, categories == "Restaurants" |
categories1 == "Restaurants" |
categories2 == "Restaurants"|
categories3 == "Restaurants"|
categories4 == "Restaurants"|
categories5 == "Restaurants"|
categories6 == "Restaurants"|
categories7 == "Restaurants"|
categories8 == "Restaurants"|
categories9 == "Restaurants"|
categories10 == "Restaurants")
Restaurants_city <- filter(all_restaurants,state == "AZ")
Restaurants_city <- Restaurants_city[Restaurants_city$open==TRUE,]
View(all_restaurants)
View(Restaurants_city)
names(Restaurants_city)
NewData<- subset(Restaurants_city, select=c(stars,attributespricerange,attributesalcohol,attributesnoiselevel,
attributesattire,attributesgoodforgroups,
attributesacceptscreditcards,attributesoutdoorseating,
attributesgoodforkids,
attributesdelivery,attributestakeout,
attributestakesreservations,attributesparkinglot
))
View(NewData)
NewData<-na.omit(NewData)
NewData[,2] <- as.numeric(NewData[,2])
NewData[,1]<-unfactor(NewData[,1])
NewData$stars <- ifelse(NewData$stars < 3.0, 'LOW',
ifelse(NewData$stars >= 3.0 & NewData$stars <=4,'MEDIUM','HIGH'))
table(NewData$stars)
NewData$attributestakesreservations <- ifelse(NewData$attributestakesreservations=='TRUE', 1,
ifelse(NewData$attributestakesreservations == 'FALSE' ,0,0))
NewData$attributesparkinglot <- ifelse(NewData$attributesparkinglot=='TRUE', 1,
ifelse(NewData$attributesparkinglot == 'FALSE' ,0,0))
NewData$attributesgoodforgroups <- ifelse(NewData$attributesgoodforgroups=='TRUE', 1,
ifelse(NewData$attributesgoodforgroups == 'FALSE' ,0,0))
NewData$attributesacceptscreditcards <- ifelse(NewData$attributesacceptscreditcards=='TRUE', 1,
ifelse(NewData$attributesacceptscreditcards == 'FALSE' ,0,0))
NewData$attributesoutdoorseating <- ifelse(NewData$attributesoutdoorseating=='TRUE', 1,
ifelse(NewData$attributesoutdoorseating == 'FALSE' ,0,0))
NewData$attributesgoodforkids <- ifelse(NewData$attributesgoodforkids=='TRUE', 1,
ifelse(NewData$attributesgoodforkids == 'FALSE' ,0,0))
NewData$attributesalcohol <- ifelse(NewData$attributesalcohol=='none', 1,
ifelse(NewData$attributesalcohol == 'full_bar' , 2,
ifelse(NewData$attributesalcohol == 'full_bar' ,3,0)))
NewData$attributesnoiselevel <- ifelse(NewData$attributesnoiselevel=='quiet', 1,
ifelse(NewData$attributesnoiselevel=='average', 2,
ifelse(NewData$attributesnoiselevel=='loud', 3,
ifelse(NewData$attributesnoiselevel == 'very_loud' ,0,0))))
NewData$attributesattire <- ifelse(NewData$attributesattire=='formal', 1,
ifelse(NewData$attributesattire=='dressy', 2,
ifelse(NewData$attributesattire == 'causal' ,3,0)))
NewData$attributesdelivery <- ifelse(NewData$attributesdelivery=='TRUE', 1,
ifelse(NewData$attributesdelivery == 'FALSE' ,0,0))
NewData$attributestakeout <- ifelse(NewData$attributestakeout=='TRUE', 1,
ifelse(NewData$attributestakeout == 'FALSE' ,0,0))
d1<-NewData
normalize <- function(x) {
y <- (x - min(x))/(max(x) - min(x))
y
}
View(d1)
View(New_Dataset)
New_Dataset <- as.data.frame(lapply(d1[2:5], normalize))
New_dataset2 <- as.data.frame(d1[6:13])
New_Dataset<-cbind( stars=d1$stars,New_Dataset,New_dataset2)
View(New_Dataset)
NewData<-New_Dataset
temp <- sample(nrow(NewData),as.integer(0.70 * nrow(NewData)))
Training <- NewData[temp,]
View(Training)
Test <- NewData[-temp,]
View(Test)
library(class)
table(Test[,1])
table(Training[,1])
?knn
predict<-knn(Training[,-1],Test[,-1],Training[,1],k=69 )
View(predict)
mean(predict==Training[,1])
results<-cbind(Test, as.character(predict))
View(results)
table(Actual=results[,1],Prediction=results[,14])
wrong<-results[,1]!=results[,14]
plot(results[,1],results[,14])
rate<-sum(wrong)/length(wrong)
rate
table(results[,14])
table(Test[,1])
accuracy <- rep(1, 70)
k <- 1:70
for(x in k){
prediction <- knn(Training[,-1],Test[,-1],Training[,1],k=x )
accuracy[x] <- mean(prediction == Training[,1])
}
plot(k, accuracy, type = 'b')
|
5ed3e128c0db43544862ce08e5b2f73f512cba43
|
721d96cf611a8ee4ee224e6dae7c92a9faf180c0
|
/man/mwStatsGrid.Rd
|
9a84a8c6a9e361ca0a869175c876a075b2a148c0
|
[] |
no_license
|
cran/astrochron
|
5b05df5f4f114cdf6df630cdfe0f649e5f45dbbf
|
c86d83297c0b75ce1ab1a026a25fe05e0e6cb3fe
|
refs/heads/master
| 2023-09-01T06:57:37.447424
| 2023-08-26T13:40:02
| 2023-08-26T14:30:40
| 21,361,937
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,177
|
rd
|
mwStatsGrid.Rd
|
\name{mwStatsGrid}
\alias{mwStatsGrid}
\title{'Dynamic window' moving average, median and variance of stratigraphic series, using evenly spaced spatial/temporal grid}
\description{
'Dynamic window' moving average, median and variance of stratigraphic series.
This routine adjusts the number of data points in the window so it has a constant duration in time or space, for use with unevenly sampled data.
The results are output on an evenly spaced spatial/temporal grid (this contrasts with mwStats).
}
\usage{
mwStatsGrid(dat,cols=NULL,win=NULL,step=NULL,start=NULL,end=NULL,output=T,norm=F,
palette=6,ncolors=100,genplot=1,verbose=T)
}
\arguments{
\item{dat}{Your data frame containing stratigraphic data; any number of columns (variables) are permitted, but the first column should be a location identifier (e.g., depth, height, time).}
\item{cols}{A vector that identifies the variable column to be extracted (first column automatically extracted).}
\item{win}{Moving window size, in units of space or time.}
\item{step}{Moving window step size, in units of space or time.}
\item{start}{Starting point for analysis, in units of space or time.}
\item{end}{Ending point for analysis, in units of space or time.}
\item{norm}{Normalize density estimates to maximum value? (T or F). If false, density estimates are normalized to unit area.}
\item{output}{Output results? (T or F)}
\item{palette}{What color palette would you like to use? (1) rainbow, (2) grayscale, (3) blue, (4) red, (5) blue-white-red, (6) viridis}
\item{ncolors}{Number of colors to use in plot.}
\item{genplot}{Generate summary plots? (0=none, 1=all time series, 2=kernel density estimates for each window, 3=kernel density estimates with median, 4=kernel density estimates with mean)}
\item{verbose}{Verbose output? (T or F)}
}
\value{
A data frame with four columns: Center of window, Average, Median, Variance
}
\examples{
# generate example series from ar1 noise, 5 kyr sampling interval
ex = ar1(npts=1001,dt=5)
# jitter sampling times
ex[1]=ex[1]+rnorm(1001,sd=1)
# sort
ex = ex[order(ex[,1],na.last=NA,decreasing=FALSE),]
# run mwStats
mwStatsGrid(ex,win=100)
}
|
9f3d2d9ceff46932dd29b8a5d2121139d7569d62
|
14d2d4e7cacb355ed158ae9b55cab59444b8d400
|
/R/studentGrowthPercentiles.R
|
582ccf53ba08fcd0e985fe72c3db1f47b36ed89c
|
[] |
no_license
|
shangyi/SGP.experiment
|
8bfdcf988f54d94a6bb621b44b2dcc5b60dc36bc
|
597bc09c8e4b5ec754acaca49c8056338980c9ba
|
refs/heads/master
| 2016-09-09T20:41:22.051372
| 2014-05-09T21:48:57
| 2014-05-09T21:48:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64,131
|
r
|
studentGrowthPercentiles.R
|
## experimental version for simexing aggregate sgp
`studentGrowthPercentiles` <-
function(panel.data, ## REQUIRED
sgp.labels, ## REQUIRED
panel.data.vnames,
additional.vnames.to.return=NULL,
grade.progression,
content_area.progression=NULL,
year.progression=NULL,
year_lags.progression=NULL,
num.prior,
max.order.for.percentile=NULL,
subset.grade,
percentile.cuts=NULL,
growth.levels,
use.my.knots.boundaries,
use.my.coefficient.matrices=NULL,
calculate.confidence.intervals=NULL,
print.other.gp=FALSE,
print.sgp.order=FALSE,
calculate.sgps=TRUE,
rq.method="br",
knot.cut.percentiles=c(0.2,0.4,0.6,0.8),
knots.boundaries.by.panel=FALSE,
exact.grade.progression.sequence=FALSE,
drop.nonsequential.grade.progression.variables=TRUE,
convert.0and100=TRUE,
sgp.quantiles="Percentiles",
sgp.loss.hoss.adjustment=NULL,
sgp.cohort.size=NULL,
percuts.digits=0,
isotonize=TRUE,
convert.using.loss.hoss=TRUE,
goodness.of.fit=TRUE,
goodness.of.fit.minimum.n=NULL,
return.prior.scale.score=TRUE,
return.prior.scale.score.standardized=TRUE,
return.norm.group.identifier=TRUE,
return.norm.group.scale.scores=NULL,
print.time.taken=TRUE,
parallel.config=NULL,
calculate.simex=NULL,
sgp.percentiles.set.seed=314159,
verbose.output=FALSE) {
started.at <- proc.time()
started.date <- date()
##########################################################
###
### Internal utility functions
###
##########################################################
.smooth.isotonize.row <- function(x, iso=isotonize) {
if (!is.null(sgp.loss.hoss.adjustment)) {
my.path.knots.boundaries <- get.my.knots.boundaries.path(sgp.labels$my.subject, as.character(sgp.labels$my.year))
bnd <- eval(parse(text=paste("Knots_Boundaries", my.path.knots.boundaries, "[['loss.hoss_", tmp.last, "']]", sep="")))
x[x > bnd[2]] <- bnd[2]
}
x[which(is.na(x))] <- approx(x, xout=which(is.na(x)))$y
if (iso) return(sort(x))
else return(x)
}
.smooth.bound.iso.row <- function(x, grade, tmp.year, tmp.content_area, iso=isotonize, missing.taus, na.replace) {
bnd <- eval(parse(text=paste("panel.data[['Knots_Boundaries']]", get.my.knots.boundaries.path(tmp.content_area, tmp.year), "[['loss.hoss_", grade, "']]", sep="")))
x[x < bnd[1]] <- bnd[1] ; x[x > bnd[2]] <- bnd[2]
if (!iso) return(round(x, digits=5)) # Results are the same whether NAs present or not...
if (iso & missing.taus) {
na.row <- rep(NA,100)
na.row[na.replace] <- round(sort(x[!is.na(x)]), digits=5)
return(na.row)
} else {
x[which(is.na(x))] <- approx(x, xout=which(is.na(x)))$y
return(round(sort(x), digits=5))
}
}
.create.path <- function(labels, pieces=c("my.subject", "my.year", "my.extra.label")) {
sub(' ', '_', toupper(sub('\\.+$', '', paste(unlist(sapply(labels[pieces], as.character)), collapse="."))))
}
.get.knots.boundaries <- function(data, by.grade) {
num.panels <- (dim(data)[2]-1)/2
if (knots.boundaries.by.panel) {
tmp.years <- rep(yearIncrement(sgp.labels$my.year, (-num.panels+1):-1), each=dim(data)[1])
} else {
tmp.years <- rep(sgp.labels$my.year, dim(data)[1]*(num.panels-1))
}
if (by.grade) {
tmp.grades <- as.vector(sapply(data[,2:(2+num.panels-2), with=FALSE], as.character))
} else {
tmp.grades <- rep(head(tmp.gp, -1), each=dim(data)[1])
}
tmp.stack <- data.table(
VALID_CASE="VALID_CASE",
CONTENT_AREA=rep(head(content_area.progression, -1), each=dim(data)[1]),
GRADE=tmp.grades,
SCALE_SCORE=as.vector(sapply(data[,(2+num.panels):(2+2*num.panels-2), with=FALSE], as.numeric)),
YEAR=tmp.years, key=c("VALID_CASE", "CONTENT_AREA", "YEAR"))
createKnotsBoundaries(tmp.stack, knot.cut.percentiles)
}
.get.panel.data <- function(tmp.data, k, by.grade) {
str1 <- str2 <- str3 <- NULL
for (i in 0:k) {
str1 <- paste(str1, " & !is.na(tmp.data[[", 1+2*num.panels-i, "]])", sep="")
str2 <- paste(str2, " & tmp.data[[", 1+num.panels-i, "]]=='", rev(as.character(tmp.gp))[i+1], "'", sep="")
str3 <- c(1+2*num.panels-i, str3)
}
if (by.grade) {
tmp.data[eval(parse(text=paste(substring(str1, 4), str2, sep="")))][, c(1, str3), with=FALSE]
} else {
tmp.data[eval(parse(text=substring(str1, 4)))][, c(1, str3), with=FALSE]
}
}
get.my.knots.boundaries.path <- function(content_area, year) {
tmp.knots.boundaries.names <-
names(Knots_Boundaries[[tmp.path.knots.boundaries]])[content_area==sapply(strsplit(names(Knots_Boundaries[[tmp.path.knots.boundaries]]), "[.]"), '[', 1)]
if (length(tmp.knots.boundaries.names)==0) {
return(paste("[['", tmp.path.knots.boundaries, "']]", sep=""))
} else {
tmp.knots.boundaries.years <- sapply(strsplit(tmp.knots.boundaries.names, "[.]"), function(x) x[2])
if (any(!is.na(tmp.knots.boundaries.years))) {
if (year %in% tmp.knots.boundaries.years) {
return(paste("[['", tmp.path.knots.boundaries, "']][['", content_area, ".", year, "']]", sep=""))
} else {
if (year==sort(c(year, tmp.knots.boundaries.years))[1]) {
return(paste("[['", tmp.path.knots.boundaries, "']][['", content_area, "']]", sep=""))
} else {
return(paste("[['", tmp.path.knots.boundaries, "']][['", content_area, ".", rev(sort(tmp.knots.boundaries.years))[1], "']]", sep=""))
}
}
} else {
return(paste("[['", tmp.path.knots.boundaries, "']][['", content_area, "']]", sep=""))
}
}
}
.create.coefficient.matrices <- function(data, k, by.grade) {
tmp.data <- .get.panel.data(data, k, by.grade)
if (dim(tmp.data)[1]==0) return(NULL)
if (dim(tmp.data)[1] < sgp.cohort.size) return("Insufficient N")
tmp.num.variables <- dim(tmp.data)[2]
mod <- character()
s4Ks <- "Knots=list("
s4Bs <- "Boundaries=list("
tmp.gp.iter <- rev(tmp.gp)[2:(k+1)]
for (i in seq_along(tmp.gp.iter)) {
my.path.knots.boundaries <- get.my.knots.boundaries.path(rev(content_area.progression)[i+1], yearIncrement(rev(year.progression)[i+1], 0))
.check.knots.boundaries(names(eval(parse(text=paste("Knots_Boundaries", my.path.knots.boundaries, sep="")))), tmp.gp.iter[i])
knt <- paste("Knots_Boundaries", my.path.knots.boundaries, "[['knots_", tmp.gp.iter[i], "']]", sep="")
bnd <- paste("Knots_Boundaries", my.path.knots.boundaries, "[['boundaries_", tmp.gp.iter[i], "']]", sep="")
mod <- paste(mod, " + bs(tmp.data[[", tmp.num.variables-i, "]], knots=", knt, ", Boundary.knots=", bnd, ")", sep="")
s4Ks <- paste(s4Ks, "knots_", tmp.gp.iter[i], "=", knt, ",", sep="")
s4Bs <- paste(s4Bs, "boundaries_", tmp.gp.iter[i], "=", bnd, ",", sep="")
}
if (is.null(parallel.config)) {
tmp.mtx <- eval(parse(text=paste("rq(tmp.data[[", tmp.num.variables, "]] ~ ", substring(mod,4), ", tau=taus, data=tmp.data, method=rq.method)[['coefficients']]", sep="")))
} else {
par.start <- startParallel(parallel.config, 'TAUS', qr.taus=taus) # Need new argument here - default to missing
if (toupper(parallel.config[["BACKEND"]]) == "FOREACH") {
tmp.mtx <- foreach(j = iter(par.start$TAUS.LIST), .combine = "cbind", .packages="quantreg", .inorder=TRUE,
.options.mpi=par.start$foreach.options, .options.multicore=par.start$foreach.options) %dopar% {
eval(parse(text=paste("rq(tmp.data[[", tmp.num.variables, "]] ~ ", substring(mod,4), ", tau=j, data=tmp.data, method=rq.method)[['coefficients']]", sep="")))
}
} else {
if (par.start$par.type == 'MULTICORE') {
tmp.mtx <- mclapply(par.start$TAUS.LIST, function(x) eval(parse(text=paste("rq(tmp.data[[", tmp.num.variables, "]] ~ ",
substring(mod,4), ", tau=x, data=tmp.data, method=rq.method)[['coefficients']]", sep=""))), mc.cores=par.start$workers, mc.preschedule = FALSE)
tmp.mtx <- do.call(cbind, tmp.mtx)
}
if (par.start$par.type == 'SNOW') {
tmp.mtx <- parLapplyLB(par.start$internal.cl, par.start$TAUS.LIST, function(x) eval(parse(text=paste("rq(tmp.data[[",
tmp.num.variables, "]] ~ ", substring(mod,4), ", tau=x, data=tmp.data, method=rq.method)[['coefficients']]", sep=""))))
tmp.mtx <- do.call(cbind, tmp.mtx)
}
}
stopParallel(parallel.config, par.start)
}
tmp.version <- list(SGP_Package_Version=as.character(packageVersion("SGP")), Date_Prepared=date(), Matrix_Information=list(N=dim(tmp.data)[1]))
eval(parse(text=paste("new('splineMatrix', tmp.mtx, ", substring(s4Ks, 1, nchar(s4Ks)-1), "), ", substring(s4Bs, 1, nchar(s4Bs)-1), "), ",
"Content_Areas=list(as.character(tail(content_area.progression, k+1))), ",
"Grade_Progression=list(as.character(tail(tmp.slot.gp, k+1))), ",
"Time=list(as.character(tail(year.progression, k+1))), ",
"Time_Lags=list(as.numeric(tail(year_lags.progression, k))), ",
"Version=tmp.version)", sep="")))
} ### END .create.coefficient.matrices
.check.knots.boundaries <- function(names, grade) {
tmp <- do.call(rbind, strsplit(names, "_"))
if (!grade %in% tmp[tmp[,1]=="knots", 2]) stop(paste("knots_", grade, " not found in Knots_Boundaries.", sep=""))
if (!grade %in% tmp[tmp[,1]=="boundaries", 2]) stop(paste("boundaries_", grade, " not found in Knots_Boundaries.", sep=""))
}
.create_taus <- function(sgp.quantiles) {
if (is.character(sgp.quantiles)) {
taus <- switch(sgp.quantiles,
PERCENTILES = (1:100-0.5)/100)
}
if (is.numeric(sgp.quantiles)) {
taus <- sgp.quantiles
}
return(taus)
}
get.coefficient.matrix.name <- function(tmp.last, k) {
return(paste("qrmatrix_", tmp.last, "_", k, sep=""))
}
.get.percentile.predictions <- function(my.data, my.matrix) {
SCORE <- NULL
mod <- character()
int <- "cbind(rep(1, dim(my.data)[1]),"
for (k in seq_along(my.matrix@Time_Lags[[1]])) {
knt <- paste("my.matrix@Knots[[", k, "]]", sep="")
bnd <- paste("my.matrix@Boundaries[[", k, "]]", sep="")
mod <- paste(mod, ", bs(my.data[[", dim(my.data)[2]-k, "]], knots=", knt, ", Boundary.knots=", bnd, ")", sep="")
}
tmp <- eval(parse(text=paste(int, substring(mod, 2), ") %*% my.matrix", sep="")))
return(round(matrix(data.table(ID=rep(seq(dim(tmp)[1]), each=100), SCORE=as.vector(t(tmp)))[,.smooth.isotonize.row(SCORE), by=ID][['V1']], ncol=100, byrow=TRUE), digits=5))
}
.get.quantiles <- function(data1, data2) {
TMP_TF <- NULL
tmp <- data.table(ID=rep(seq(dim(data1)[1]), each=101), TMP_TF=as.vector(t(cbind(data1 < data2, FALSE))))[,which.min(TMP_TF)-1, by=ID][['V1']]
if (!is.null(sgp.loss.hoss.adjustment)) {
my.path.knots.boundaries <- get.my.knots.boundaries.path(sgp.labels$my.subject, as.character(sgp.labels$my.year))
tmp.hoss <- eval(parse(text=paste("Knots_Boundaries", my.path.knots.boundaries, "[['loss.hoss_", tmp.last, "']][2]", sep="")))
tmp.index <- which(data2==tmp.hoss)
if (length(tmp.index) > 0) {
tmp[tmp.index] <- apply(cbind(data1 > data2, TRUE)[tmp.index,,drop=FALSE], 1, function(x) which.max(x)-1)
}
}
if (convert.0and100) {
tmp[tmp==0] <- 1
tmp[tmp==100] <- 99
}
return(as.integer(tmp))
}
.get.percentile.cuts <- function(data1) {
tmp <- round(data1[ , percentile.cuts+1, drop=FALSE], digits=percuts.digits)
if (convert.using.loss.hoss) {
my.path.knots.boundaries <- get.my.knots.boundaries.path(sgp.labels$my.subject, as.character(sgp.labels$my.year))
bnd <- eval(parse(text=paste("Knots_Boundaries", my.path.knots.boundaries, "[['loss.hoss_", tmp.last, "']]", sep="")))
tmp[tmp < bnd[1]] <- bnd[1]
tmp[tmp > bnd[2]] <- bnd[2]
}
colnames(tmp) <- paste("PERCENTILE_CUT_", percentile.cuts, sep="")
return(tmp)
}
.simex.sgp <- function(state, variable, lambda, B, simex.sample.size, extrapolation, save.matrices, simex.use.my.coefficient.matrices=NULL, calculate.simex.sgps) {
GRADE <- CONTENT_AREA <- YEAR <- V1 <- Lambda <- tau <- b <- .SD <- TEMP <- NULL ## To avoid R CMD check warnings
my.path.knots.boundaries <- get.my.knots.boundaries.path(sgp.labels$my.subject, as.character(sgp.labels$my.year))
fitted <- extrap <- tmp.quantiles.simex <- simex.coef.matrices <- list()
loss.hoss <- matrix(nrow=2,ncol=length(tmp.gp)-1)
if (!is.null(state)) {
for (g in 1:ncol(loss.hoss)) {
loss.hoss[,g] <- SGPstateData[[state]][["Achievement"]][["Knots_Boundaries"]][[rev(content_area.progression)[-1][g]]][[paste("loss.hoss_", rev(tmp.gp)[-1][g], sep="")]]
}}
if (!is.null(variable)) {
for (g in 1:ncol(loss.hoss)) {
loss.hoss[,g] <- variable[[paste("loss.hoss_", rev(tmp.gp)[-1][g], sep="")]]
}}
rq.mtx <- function(tmp.gp.iter, lam, rqdata) {
mod <- character()
s4Ks <- "Knots=list("
s4Bs <- "Boundaries=list("
for (i in seq_along(tmp.gp.iter)) {
knt <- paste("Knots_Boundaries", my.path.knots.boundaries, "[['Lambda_", lam, "']][['knots_", tmp.gp.iter[i], "']]", sep="")
bnd <- paste("Knots_Boundaries", my.path.knots.boundaries, "[['Lambda_", lam, "']][['boundaries_", tmp.gp.iter[i], "']]", sep="")
mod <- paste(mod, " + bs(prior_", i, ", knots=", knt, ", Boundary.knots=", bnd, ")", sep="")
s4Ks <- paste(s4Ks, "knots_", tmp.gp.iter[i], "=", knt, ",", sep="")
s4Bs <- paste(s4Bs, "boundaries_", tmp.gp.iter[i], "=", bnd, ",", sep="")
}
tmp.mtx <-eval(parse(text=paste("rq(final.yr ~", substring(mod,4), ", tau=taus, data = rqdata, method=rq.method)[['coefficients']]", sep="")))
tmp.version <- list(SGP_Package_Version=as.character(packageVersion("SGP")), Date_Prepared=date(), Matrix_Information=list(N=dim(rqdata)[1]))
eval(parse(text=paste("new('splineMatrix', tmp.mtx, ", substring(s4Ks, 1, nchar(s4Ks)-1), "), ", substring(s4Bs, 1, nchar(s4Bs)-1), "), ",
"Content_Areas=list(as.character(tail(content_area.progression, k+1))), ",
"Grade_Progression=list(as.character(tail(tmp.slot.gp, k+1))), ",
"Time=list(as.character(tail(year.progression, k+1))), ",
"Time_Lags=list(as.numeric(tail(year_lags.progression, k))), ",
"Version=tmp.version)", sep="")))
}
if (!is.null(use.my.coefficient.matrices)) { # Passed implicitly from studentGrowthPercentiles arguments
taus <- .create_taus(sgp.quantiles)
if (exact.grade.progression.sequence) {
simex.matrix.priors <- num.prior
} else {
simex.matrix.priors <- seq(num.prior)
}
} else simex.matrix.priors <- coefficient.matrix.priors
for (k in simex.matrix.priors) {
tmp.data <- .get.panel.data(ss.data, k, by.grade)
tmp.num.variables <- dim(tmp.data)[2]
tmp.gp.iter <- rev(tmp.gp)[2:(k+1)]
tmp.ca.iter <- rev(content_area.progression)[2:(k+1)]
tmp.yr.iter <- rev(year.progression)[2:(k+1)]
csem.int <- matrix(nrow=dim(tmp.data)[1], ncol=length(tmp.gp.iter)) # build matrix to store interpolated csem
colnames(csem.int) <- paste("icsem", tmp.gp.iter, tmp.ca.iter, tmp.yr.iter, sep="")
# interpolate csem for all scale scores except that of the last grade
if (!is.null(state)) {
for (g in seq_along(tmp.gp.iter)) {
if ("YEAR" %in% names(SGPstateData[[state]][["Assessment_Program_Information"]][["CSEM"]])) {
CSEM_Data <- subset(SGPstateData[[state]][["Assessment_Program_Information"]][["CSEM"]],
GRADE==tmp.gp.iter[g] & CONTENT_AREA== tmp.ca.iter[g] & YEAR==tmp.yr.iter[g])
} else {
CSEM_Data <- subset(SGPstateData[[state]][["Assessment_Program_Information"]][["CSEM"]],
GRADE==tmp.gp.iter[g] & CONTENT_AREA== tmp.ca.iter[g])
}
if (dim(CSEM_Data)[1] == 0) stop(paste('CSEM data for', tmp.ca.iter[g], 'Grade', tmp.gp.iter[g], 'is required to use SIMEX functionality, but is not available in SGPstateData. Please contact package administrators to add CSEM data.'))
CSEM_Function <- splinefun(CSEM_Data[["SCALE_SCORE"]], CSEM_Data[["SCALE_SCORE_CSEM"]], method="natural")
csem.int[, paste("icsem", tmp.gp.iter[g], tmp.ca.iter[g], tmp.yr.iter[g], sep="")] <- CSEM_Function(tmp.data[[tmp.num.variables-g]])
}
}
if (!is.null(variable)){
for (g in seq_along(tmp.gp.iter)) {
csem.int[, paste("icsem", tmp.gp.iter[g], tmp.ca.iter[g], tmp.yr.iter[g], sep="")] <- variable[[paste("CSEM.grade", tmp.gp.iter[g], ".", tmp.ca.iter[g], sep="")]]
}
}
# naive model
if (calculate.simex.sgps) {
fitted[[paste("order_", k, sep="")]] <- matrix(0, nrow=1+length(lambda), ncol=dim(tmp.data)[1])
tmp.matrix <- getsplineMatrices(
Coefficient_Matrices[[tmp.path.coefficient.matrices]],
tail(content_area.progression, k+1),
tail(grade.progression, k+1),
tail(year.progression, k+1),
tail(year_lags.progression, k),
my.matrix.order=k)[[1]]
naive.fitted<-.get.percentile.predictions(tmp.data, tmp.matrix)
fitted[[paste("order_", k, sep="")]][1,] <-t(tmp.data[["ID"]])
fitted[[paste("order_", k, sep="")]][2,] <-.get.quantiles(naive.fitted, tmp.data[[tmp.num.variables]])
}
# perturb data
for (L in lambda[-1]) {
big.data <- rbindlist(replicate(B, tmp.data, simplify = FALSE))
big.data[, Lambda := rep(L, each=dim(tmp.data)[1]*B)]
big.data[, b := rep(1:B, each=dim(tmp.data)[1])]
setnames(big.data, tmp.num.variables, "final.yr")
for (g in seq_along(tmp.gp.iter)) {
col.index <- tmp.num.variables-g
setkeyv(big.data, c(names(big.data)[col.index], "final.yr", "b"))
big.data.uniques <- unique(big.data)
big.data.uniques.indices <- which(!duplicated(big.data))
big.data.uniques[, paste("icsem", tmp.gp.iter[g], tmp.ca.iter[g], tmp.yr.iter[g], sep="") :=
rep(csem.int[, paste("icsem", tmp.gp.iter[g], tmp.ca.iter[g], tmp.yr.iter[g], sep="")], B)[big.data.uniques.indices]]
big.data.uniques[, TEMP :=
eval(parse(text=paste("big.data.uniques[[", tmp.num.variables-g, "]]+sqrt(big.data.uniques[['Lambda']])*big.data.uniques[['icsem",
tmp.gp.iter[g], tmp.ca.iter[g], tmp.yr.iter[g], "']] * rnorm(dim(big.data.uniques)[1])", sep="")))]
big.data.uniques[big.data.uniques[[col.index]] < loss.hoss[1,g], col.index := loss.hoss[1,g], with=FALSE]
big.data.uniques[big.data.uniques[[col.index]] > loss.hoss[2,g], col.index := loss.hoss[2,g], with=FALSE]
if (is.null(key(big.data.uniques))) setkeyv(big.data.uniques, key(big.data))
big.data[, tmp.num.variables-g := big.data.uniques[,c(key(big.data), "TEMP"), with=FALSE][big.data][['TEMP']]]
ks <- big.data[, as.list(as.vector(unlist(round(quantile(big.data[[col.index]], probs=knot.cut.percentiles, na.rm=TRUE), digits=3))))] # Knots
bs <- big.data[, as.list(as.vector(round(extendrange(big.data[[col.index]], f=0.1), digits=3)))] # Boundaries
lh <- big.data[, as.list(as.vector(round(extendrange(big.data[[col.index]], f=0.0), digits=3)))] # LOSS/HOSS
eval(parse(text=paste("Knots_Boundaries", my.path.knots.boundaries, "[['Lambda_", L, "']][['knots_", tmp.gp.iter[g],
"']] <- c(ks[,V1], ks[,V2], ks[,V3], ks[,V4])", sep="")))
eval(parse(text=paste("Knots_Boundaries", my.path.knots.boundaries, "[['Lambda_", L, "']][['boundaries_", tmp.gp.iter[g],
"']] <- c(bs[,V1], bs[,V2])", sep="")))
eval(parse(text=paste("Knots_Boundaries", my.path.knots.boundaries, "[['Lambda_", L, "']][['loss.hoss_", tmp.gp.iter[g],
"']] <- c(lh[,V1], lh[,V2])", sep="")))
setnames(big.data, tmp.num.variables-g, paste("prior_",g,sep=""))
setkey(big.data, b, ID)
}
## Establish the simulation iterations - either 1) 1:B, or 2) a sample of either B or the number of previously computed matrices
sim.iters <- 1:B
if (!is.null(simex.use.my.coefficient.matrices)) { # Element from the 'calculate.simex' argument list.
available.matrices <- unlist(getsplineMatrices(
Coefficient_Matrices[[paste(tmp.path.coefficient.matrices, '.SIMEX', sep="")]][[
paste("qrmatrices", tail(tmp.gp,1), k, sep="_")]][[paste("lambda_", L, sep="")]],
tail(content_area.progression, k+1),
tail(grade.progression, k+1),
tail(year.progression, k+1),
tail(year_lags.progression, k),
my.exact.grade.progression.sequence=TRUE,
return.multiple.matrices=TRUE,
my.matrix.order=k), recursive=FALSE)
if (length(available.matrices) > B) sim.iters <- sample(1:length(available.matrices), B) # Stays as 1:B when length(available.matrices) == B
if (length(available.matrices) < B) sim.iters <- sample(1:length(available.matrices), B, replace=TRUE)
}
setkey(big.data, b)
if (is.null(parallel.config)) { # Sequential
if (is.null(simex.use.my.coefficient.matrices)) {
for (z in seq_along(sim.iters)) {
if (is.null(simex.sample.size) || dim(tmp.data)[1] <= simex.sample.size) {
simex.coef.matrices[[paste("qrmatrices", tail(tmp.gp,1), k, sep="_")]][[paste("lambda_", L, sep="")]][[z]] <-
rq.mtx(tmp.gp.iter[1:k], lam=L, rqdata=big.data[list(z)])
} else {
simex.coef.matrices[[paste("qrmatrices", tail(tmp.gp,1), k, sep="_")]][[paste("lambda_", L, sep="")]][[z]] <-
rq.mtx(tmp.gp.iter[1:k], lam=L, rqdata=big.data[list(z)][sample(seq(dim(tmp.data)[1]), simex.sample.size)])
}
}
} else simex.coef.matrices[[paste("qrmatrices", tail(tmp.gp,1), k, sep="_")]][[paste("lambda_", L, sep="")]] <- available.matrices[sim.iters]
tmp.fitted=matrix(0,nrow=dim(tmp.data)[1],ncol=length(taus))
if (calculate.simex.sgps) {
for (z in seq_along(sim.iters)) {
tmp.fitted<- tmp.fitted +
.get.percentile.predictions(big.data[list(z)][,which(names(big.data[list(z)]) %in% c("ID", paste('prior_', k:1, sep=""), "final.yr")), with=FALSE],
simex.coef.matrices[[paste("qrmatrices", tail(tmp.gp,1), k, sep="_")]][[paste("lambda_", L, sep="")]][[z]])/B
}
fitted[[paste("order_", k, sep="")]][(1+which(lambda==L)),]<-.get.quantiles(tmp.fitted, tmp.data[[tmp.num.variables]])
}
} else { # Parallel over sim.iters
if (toupper(parallel.config[["BACKEND"]]) == "FOREACH") {
## Don't offer this option now. But this could ultimately be the BEST option for this because we could have
## nested foreach loops around Lambda, B and even the priors/orders if we have access to enough cores (cluster)
message("\t\tNOTE: FOREACH backend in not currently available for SIMEX. Changing to BACKEND='PARALLEL' and TYPE will be set to OS default.")
parallel.config[["BACKEND"]] <- "PARALLEL"
}
par.start <- startParallel(parallel.config, 'SIMEX')
## Note, that if you use the parallel.config for SIMEX here, you can also use it for TAUS in the naive analysis
## Example parallel.config argument: '... parallel.config=list(BACKEND="PARALLEL", TYPE="SOCK", WORKERS=list(SIMEX = 4, TAUS = 4))'
## Calculate coefficient matricies (if needed/requested)
if (is.null(simex.use.my.coefficient.matrices)) {
if (par.start$par.type == 'MULTICORE') {
if (is.null(simex.sample.size) || dim(tmp.data)[1] <= simex.sample.size) {
simex.coef.matrices[[paste("qrmatrices", tail(tmp.gp,1), k, sep="_")]][[paste("lambda_", L, sep="")]] <-
mclapply(sim.iters, function(z) big.data[list(z)][,rq.mtx(tmp.gp.iter[1:k], lam=L, rqdata=.SD)], mc.cores=par.start$workers)
} else {
simex.coef.matrices[[paste("qrmatrices", tail(tmp.gp,1), k, sep="_")]][[paste("lambda_", L, sep="")]] <-
mclapply(sim.iters, function(z) big.data[list(z)][sample(seq(dim(tmp.data)[1]), simex.sample.size)][,
rq.mtx(tmp.gp.iter[1:k], lam=L, rqdata=.SD)], mc.cores=par.start$workers)
}
}
if (par.start$par.type == 'SNOW') {
if (is.null(simex.sample.size) || dim(tmp.data)[1] <= simex.sample.size) {
simex.coef.matrices[[paste("qrmatrices", tail(tmp.gp,1), k, sep="_")]][[paste("lambda_", L, sep="")]] <-
parLapply(par.start$internal.cl, sim.iters, function(z) big.data[list(z)][,rq.mtx(tmp.gp.iter[1:k], lam=L, rqdata=.SD)])
} else {
simex.coef.matrices[[paste("qrmatrices", tail(tmp.gp,1), k, sep="_")]][[paste("lambda_", L, sep="")]] <-
parLapply(par.start$internal.cl, sim.iters, function(z) big.data[list(z)][sample(seq(dim(tmp.data)[1]), simex.sample.size)][,
rq.mtx(tmp.gp.iter[1:k], lam=L, rqdata=.SD)])
}
}
} else {
simex.coef.matrices[[paste("qrmatrices", tail(tmp.gp,1), k, sep="_")]][[paste("lambda_", L, sep="")]] <- available.matrices[sim.iters]
}
## get percentile predictions from coefficient matricies
if (calculate.simex.sgps) {
if (par.start$par.type == 'MULTICORE') {
tmp.fitted <- mclapply(seq_along(sim.iters), function(z) { as.vector(.get.percentile.predictions(
big.data[list(z)][,
which(names(big.data[list(z)]) %in% c("ID", paste('prior_', k:1, sep=""), "final.yr")), with=FALSE],
simex.coef.matrices[[paste("qrmatrices", tail(tmp.gp,1), k, sep="_")]][[paste("lambda_", L, sep="")]][[z]])/B)
}, mc.cores=par.start$workers
)
fitted[[paste("order_", k, sep="")]][which(lambda==L),] <- tmp.fitted[[1]]
for (s in seq_along(sim.iters[-1])) {
fitted[[paste("order_", k, sep="")]][which(lambda==L),] <- fitted[[paste("order_", k, sep="")]][which(lambda==L),] + tmp.fitted[[s]]
}
}
if (par.start$par.type == 'SNOW') {
tmp.fitted <- parLapply(par.start$internal.cl, seq_along(sim.iters), function(z) { as.vector(.get.percentile.predictions(
big.data[list(z)][,
which(names(big.data[list(z)]) %in% c("ID", paste('prior_', k:1, sep=""), "final.yr")), with=FALSE],
simex.coef.matrices[[paste("qrmatrices", tail(tmp.gp,1), k, sep="_")]][[paste("lambda_", L, sep="")]][[z]])/B)}
)
fitted[[paste("order_", k, sep="")]][which(lambda==L),] <- tmp.fitted[[1]]
for (s in seq_along(sim.iters[-1])) {
fitted[[paste("order_", k, sep="")]][which(lambda==L),] <- fitted[[paste("order_", k, sep="")]][which(lambda==L),] + tmp.fitted[[s]]
}
}
}
stopParallel(parallel.config, par.start)
}
} ### END for (L in lambda[-1])
} ### END for (k in simex.matrix.priors)
return(fitted)
} ### END .simex.sgp function
split.location <- function(years) sapply(strsplit(years, '_'), length)[1]
############################################################################
###
### Data Preparation & Checks
###
############################################################################
ID <- tmp.messages <- ORDER <- SCALE_SCORE_PRIOR <- TEMP_SGP_SIM <- NULL
if (missing(panel.data)) {
stop("User must supply student achievement data for student growth percentile calculations. NOTE: data is now supplied to function using panel.data argument. See help page for details.")
}
if (!(is.matrix(panel.data) | is.list(panel.data))) {
stop("Supplied panel.data not of a supported class. See help for details of supported classes")
}
if (identical(class(panel.data), "list")) {
if (!("Panel_Data" %in% names(panel.data))) {
stop("Supplied panel.data missing Panel_Data")
}
}
if (identical(class(panel.data), "list")) {
if (!is.data.frame(panel.data[["Panel_Data"]]) & !is.data.table(panel.data[["Panel_Data"]])) {
stop("Supplied panel.data$Panel_Data is not a data.frame or a data.table")
}
}
if (identical(class(panel.data), "list") & !is.null(panel.data[['Coefficient_Matrices']])) {
panel.data[['Coefficient_Matrices']] <- checksplineMatrix(panel.data[['Coefficient_Matrices']])
}
if (!missing(sgp.labels)) {
if (!is.list(sgp.labels)) {
stop("Please specify an appropriate list of SGP function labels (sgp.labels). See help page for details.")
}}
if (!identical(names(sgp.labels), c("my.year", "my.subject")) &
!identical(names(sgp.labels), c("my.year", "my.subject", "my.extra.label"))) {
stop("Please specify an appropriate list for sgp.labels. See help page for details.")
}
sgp.labels <- lapply(sgp.labels, toupper)
tmp.path <- .create.path(sgp.labels)
if (!missing(growth.levels)) {
tmp.growth.levels <- list()
if (!is.list(growth.levels) & !is.character(growth.levels)) {
tmp.messages <- c(tmp.messages, "\t\tNOTE: growth.levels must be supplied as a list or character abbreviation. See help page for details. studentGrowthPercentiles will be calculated without augmented growth.levels\n")
tf.growth.levels <- FALSE
}
if (is.list(growth.levels)) {
if (!identical(names(growth.levels), c("my.cuts", "my.levels"))) {
tmp.messages <- c(tmp.messages, "\t\tNOTE: Please specify an appropriate list for growth.levels. See help page for details. Student growth percentiles will be calculated without augmented growth.levels\n")
tf.growth.levels <- FALSE
} else {
tmp.growth.levels <- growth.levels
tf.growth.levels <- TRUE
}
}
if (is.character(growth.levels)) {
if (is.null(SGPstateData[[growth.levels]][["Growth"]][["Levels"]])) {
tmp.messages <- c(tmp.messages, "\t\tNOTE: Growth Levels are currently not specified for the indicated state. \n\tPlease contact the SGP package administrator to have your state's data included in the package. Student growth percentiles will be calculated without augmented growth levels\n")
tf.growth.levels <- FALSE
} else {
tmp.growth.levels[["my.cuts"]] <- SGPstateData[[growth.levels]][["Growth"]][["Cutscores"]][["Cuts"]]
tmp.growth.levels[["my.levels"]] <- SGPstateData[[growth.levels]][["Growth"]][["Levels"]]
tf.growth.levels <- TRUE
}
}
} else {
tf.growth.levels <- FALSE
}
if (!missing(use.my.knots.boundaries)) {
if (!is.list(use.my.knots.boundaries) & !is.character(use.my.knots.boundaries)) {
stop("use.my.knots.boundaries must be supplied as a list or character abbreviation. See help page for details.")
}
if (is.list(use.my.knots.boundaries)) {
if (!identical(class(panel.data), "list")) {
stop("use.my.knots.boundaries is only appropriate when panel data is of class list. See help page for details.")
}
if (!identical(names(use.my.knots.boundaries), c("my.year", "my.subject")) &
!identical(names(use.my.knots.boundaries), c("my.year", "my.subject", "my.extra.label"))) {
stop("Please specify an appropriate list for use.my.knots.boundaries. See help page for details.")
}
tmp.path.knots.boundaries <- .create.path(use.my.knots.boundaries, pieces=c("my.subject", "my.year"))
if (is.null(panel.data[["Knots_Boundaries"]]) | is.null(panel.data[["Knots_Boundaries"]][[tmp.path.knots.boundaries]])) {
stop("Knots and Boundaries indicated by use.my.knots.boundaries are not included.")
}
}
if (is.character(use.my.knots.boundaries)) {
if (is.null(SGPstateData[[use.my.knots.boundaries]][["Achievement"]][["Knots_Boundaries"]])) {
tmp.messages <- c(tmp.messages, paste("\t\tNOTE: Knots and Boundaries are currently not implemented for the state indicated (",
use.my.knots.boundaries, "). Knots and boundaries will be calculated from the data.", "
Please contact the SGP package administrator to have your Knots and Boundaries included in the package\n", sep=""))
}
tmp.path.knots.boundaries <- .create.path(sgp.labels, pieces=c("my.subject", "my.year"))
}
} else {
tmp.path.knots.boundaries <- .create.path(sgp.labels, pieces=c("my.subject", "my.year"))
}
if (!is.null(use.my.coefficient.matrices) & !identical(use.my.coefficient.matrices, TRUE)) {
if (!identical(class(panel.data), "list")) {
stop("use.my.coefficient.matrices is only appropriate when panel data is of class list. See help page for details.")
}
if (!is.list(use.my.coefficient.matrices)) {
stop("Please specify an appropriate list for argument 'use.my.coefficient.matrices'. See help page for details.")
}
if (!identical(names(use.my.coefficient.matrices), c("my.year", "my.subject")) &
!identical(names(use.my.coefficient.matrices), c("my.year", "my.subject", "my.extra.label"))) {
stop("Please specify an appropriate list for argument 'use.my.coefficient.matrices'. See help page for details.")
}
tmp.path.coefficient.matrices <- .create.path(use.my.coefficient.matrices, pieces=c("my.subject", "my.year"))
if (is.null(panel.data[["Coefficient_Matrices"]]) | is.null(panel.data[["Coefficient_Matrices"]][[tmp.path.coefficient.matrices]])) {
stop("Coefficient matrices indicated by argument 'use.my.coefficient.matrices' are not included.")
}
} else {
tmp.path.coefficient.matrices <- tmp.path
}
if (is.character(sgp.quantiles)) {
sgp.quantiles <- toupper(sgp.quantiles)
if (sgp.quantiles != "PERCENTILES") {
stop("Character options for sgp.quantiles include only Percentiles at this time. Other options available by specifying a numeric quantity. See help page for details.")
}}
if (is.numeric(sgp.quantiles)) {
if (!(all(sgp.quantiles > 0 & sgp.quantiles < 1))) {
stop("Specify sgp.quantiles as as a vector of probabilities between 0 and 1.")
}}
if (!is.null(percentile.cuts)) {
if (sgp.quantiles != "PERCENTILES") {
stop("percentile.cuts only appropriate for growth percentiles. Set sgp.quantiles to Percentiles to produce requested percentile.cuts.")
}
if (!all(percentile.cuts %in% 0:100)) {
stop("Specified percentile.cuts must be integers between 0 and 100.")
}}
if (!calculate.sgps & (is.character(goodness.of.fit) | goodness.of.fit==TRUE)) {
tmp.messages <- c(tmp.messages, "\t\tNOTE: Goodness-of-Fit tables only produced when calculating SGPs.\n")
}
if (!is.null(calculate.confidence.intervals)) {
csem.tf <- TRUE
if (!is.character(calculate.confidence.intervals) & !is.list(calculate.confidence.intervals)) {
tmp.messages <- c(tmp.messages, "\t\tNOTE: Please supply an appropriate state acronym, variable or list containing details to calculate.confidence.intervals. See help page for details. SGPs will be calculated without confidence intervals.\n")
csem.tf <- FALSE
}
if (is.list(calculate.confidence.intervals)) {
if (!(("state" %in% names(calculate.confidence.intervals)) | ("variable" %in% names(calculate.confidence.intervals)))) {
tmp.messages <- c(tmp.messages, "\t\tNOTE: Please specify an appropriate list for calculate.confidence.intervals including state/csem variable, confidence.quantiles, simulation.iterations, distribution and round. See help page for details. SGPs will be calculated without confidence intervals.\n")
csem.tf <- FALSE
}
if ("variable" %in% names(calculate.confidence.intervals) & missing(panel.data.vnames)) {
stop("To utilize a supplied CSEM variable for confidence interval calculation you must specify the variables to be used for student growth percentile calculations with the panel.data.vnames argument. See help page for details.")
}
if (all(c("state", "variable") %in% names(calculate.confidence.intervals))) {
stop("Please specify EITHER a state OR a CSEM variable for SGP confidence interval calculation. See help page for details.")
}
}
if (is.character(calculate.confidence.intervals)) {
if (!calculate.confidence.intervals %in% c(objects(SGPstateData), names(panel.data[['Panel_Data']]))) {
tmp.messages <- c(tmp.messages, "\t\tNOTE: Please provide an appropriate state acronym or variable name in supplied data corresponding to CSEMs. See help page for details. SGPs will be calculated without confidence intervals.\n")
csem.tf <- FALSE
}
if (calculate.confidence.intervals %in% objects(SGPstateData)) {
if ("YEAR" %in% names(SGPstateData[[calculate.confidence.intervals]][["Assessment_Program_Information"]][["CSEM"]])) {
if (!sgp.labels$my.year %in% unique(SGPstateData[[calculate.confidence.intervals]][["Assessment_Program_Information"]][["CSEM"]][["YEAR"]])) {
tmp.messages <- c(tmp.messages, "\t\tNOTE: SGPstateData contains year specific CSEMs but year requested is not available. Simulated SGPs and confidence intervals will not be calculated.\n")
csem.tf <- FALSE
}
}
if (!sgp.labels$my.subject %in% unique(SGPstateData[[calculate.confidence.intervals]][["Assessment_Program_Information"]][["CSEM"]][["CONTENT_AREA"]])) {
tmp.messages <- c(tmp.messages, paste("\t\tNOTE: SGPstateData does not contain content area CSEMs for requested content area '", sgp.labels$my.subject, "'. Simulated SGPs and confidence intervals will not be calculated.\n", sep=""))
csem.tf <- FALSE
}
calculate.confidence.intervals <- list(state=calculate.confidence.intervals)
}
if (calculate.confidence.intervals %in% names(panel.data[['Panel_Data']])) {
calculate.confidence.intervals <- list(variable=calculate.confidence.intervals)
}
}
} else {
csem.tf <- FALSE
}
if (!is.null(calculate.simex)) {
simex.tf <- TRUE
if (!is.character(calculate.simex) & !is.list(calculate.simex)) {
tmp.messages <- c(tmp.messages, "\t\tNOTE: Please supply an appropriate state acronym, variable or list containing details to calculate.simex. See help page for details. SGPs will be calculated without measurement error correction.\n")
simex.tf <- FALSE
}
if (is.list(calculate.simex)) {
if (!("state" %in% names(calculate.simex)) & !("variable" %in% names(calculate.simex))) {
tmp.messages <- c(tmp.messages, "\t\tNOTE: Please specify an appropriate list for calculate.simex including state/csem variable, simulation.iterations, lambda and extrapolation. See help page for details. SGPs will be calculated without measurement error correction.\n")
simex.tf <- FALSE
}
if (all(c("state", "variable") %in% names(calculate.simex))) {
stop("Please specify EITHER a state OR a CSEM variable for SGP measurement error correction. See help page for details.")
}
if (!is.null(calculate.simex$lambda)) {
if (!is.numeric(calculate.simex$lambda)) {
tmp.messages <- c(tmp.messages, "\t\tNOTE: Please supply numeric values to lambda. See help page for details. SGPs will be calculated without measurement error correction.\n")
simex.tf <- FALSE
}
if (any(calculate.simex$lambda < 0)) {
message("lambda should not contain negative values. Negative values will be ignored", call. = FALSE)
lambda <- calculate.simex$lambda[calculate.simex$lambda >= 0]
} else lambda=calculate.simex$lambda
}
}
if (is.character(calculate.simex)) {
if (!calculate.simex %in% c(objects(SGPstateData), names(panel.data))) {
tmp.messages <- c(tmp.messages, "\t\tNOTE: Please provide an appropriate state acronym or variable name in supplied data corresponding to CSEMs. See help page for details. SGPs will be calculated without measurement error correction.\n")
simex.tf <- FALSE
}
if (calculate.simex %in% objects(SGPstateData)) {
if ("YEAR" %in% names(SGPstateData[[calculate.simex]][["Assessment_Program_Information"]][["CSEM"]])) {
if (!sgp.labels$my.year %in% unique(SGPstateData[[calculate.simex]][["Assessment_Program_Information"]][["CSEM"]][["YEAR"]])) {
tmp.messages <- c(tmp.messages, "\t\tNOTE: SGPstateData contains year specific CSEMs but year requested is not available. SGPs will be calculated without measurement error correction.\n")
simex.tf <- FALSE
}
}
if (!sgp.labels$my.subject %in% unique(SGPstateData[[calculate.simex]][["Assessment_Program_Information"]][["CSEM"]][["CONTENT_AREA"]])) {
tmp.messages <- c(tmp.messages, paste("\t\tNOTE: SGPstateData does not contain content area CSEMs for requested content area '",
sgp.labels$my.subject, "'. SGPs will be calculated without measurement error correction.\n", sep=""))
simex.tf <- FALSE
}
calculate.simex <- list(state=calculate.simex)
}
if (calculate.simex %in% names(panel.data)) {
calculate.simex <- list(variable=calculate.simex)
}
}
if (is.null(calculate.simex$simulation.iterations)) calculate.simex$simulation.iterations <- 20
if (!is.null(calculate.simex$simex.sample.size) && !is.numeric(calculate.simex$simex.sample.size)) calculate.simex$simulation.sample.size <- NULL
if (is.null(calculate.simex$lambda)) calculate.simex$lambda <- seq(0,2,0.5)
if (is.null(calculate.simex$extrapolation)) {
calculate.simex$extrapolation <- "LINEAR"
} else {
calculate.simex$extrapolation <- toupper(calculate.simex$extrapolation)
}
if (!any(calculate.simex$extrapolation == c("QUADRATIC", "LINEAR", "NATURAL"))) {
message("\t\tNOTE: Extrapolation not implemented. Using: linear", call. = FALSE)
calculate.simex$extrapolation <- "LINEAR"
}
} else {
simex.tf <- FALSE
}
if (!is.null(additional.vnames.to.return)) {
if (!all(names(additional.vnames.to.return) %in% names(panel.data[["Panel_Data"]]))) {
tmp.messages <- c(tmp.messages, "\t\tNOTE: Supplied 'additional.vnames.to.return' are not all contained in supplied panel.data. No additional variables will be returned.\n")
additional.vnames.to.return <- NULL
}
}
if (is.null(sgp.cohort.size)) sgp.cohort.size <- 0
if (is.null(goodness.of.fit.minimum.n)) goodness.of.fit.minimum.n <- 250
if (!is.null(sgp.percentiles.set.seed)) set.seed(as.integer(sgp.percentiles.set.seed))
### Create object to store the studentGrowthPercentiles objects
tmp.objects <- c("Coefficient_Matrices", "Cutscores", "Goodness_of_Fit", "Knots_Boundaries", "Panel_Data", "SGPercentiles", "SGProjections", "Simulated_SGPs")
Coefficient_Matrices <- Cutscores <- Goodness_of_Fit <- Knots_Boundaries <- Panel_Data <- SGPercentiles <- SGProjections <- Simulated_SGPs <- SGP_STANDARD_ERROR <- Verbose_Messages <- NULL
SGP_SIMEX <- SGP_NORM_GROUP_SCALE_SCORES <- NULL
if (identical(class(panel.data), "list")) {
for (i in tmp.objects) {
if (!is.null(panel.data[[i]])) {
assign(i, panel.data[[i]])
}
}
## Check class and construction of coefficient matrices
if (!is.null(panel.data[['Coefficient_Matrices']])) {
tmp.matrices <- Coefficient_Matrices; tmp.changes <- FALSE
for (i in names(tmp.matrices)) {
splineMatrix.tf <- sapply(tmp.matrices[[i]], validObject, test=TRUE)==TRUE
if (!any(splineMatrix.tf)) {
tmp.changes <- TRUE
tmp.content_area <- unlist(strsplit(i, "[.]"))[1]; tmp.year <- unlist(strsplit(i, "[.]"))[2]
for (j in names(tmp.matrices[[i]])[!splineMatrix.tf]) {
message(paste("\t\tUpdating Existing Coefficient Matrix", i, j, "to new splineMatrix class."))
tmp.matrices[[i]][[j]] <- as.splineMatrix(matrix_argument=tmp.matrices[[i]][[j]],
matrix_argument_name=j, content_area=tmp.content_area, year=tmp.year, sgp_object=panel.data)
}
}
}
if (tmp.changes) {
Coefficient_Matrices <- tmp.matrices
}
}
} ### if (identical(class(panel.data), "list"))
### Create Panel_Data based upon class of input data
if (is.matrix(panel.data)) {
Panel_Data <- panel.data <- as.data.frame(panel.data, stringsAsFactors=FALSE)
}
if (identical(class(panel.data), "list")) {
if (!identical(class(panel.data[["Panel_Data"]]), "data.frame")) {
Panel_Data <- as.data.frame(panel.data[["Panel_Data"]], stringsAsFactors=FALSE)
}}
if (identical(class(panel.data), "data.frame")) {
Panel_Data <- panel.data
}
if (identical(class(panel.data), "list")) {
Panel_Data <- panel.data[["Panel_Data"]]
}
### Create ss.data from Panel_Data
if (!missing(panel.data.vnames)) {
if (!all(panel.data.vnames %in% names(Panel_Data))) {
tmp.messages <- c(tmp.messages, "\t\tNOTE: Supplied 'panel.data.vnames' are not all in the supplied Panel_Data. Analyses will continue with the intersection names contain in Panel_Data.\n")
}
ss.data <- Panel_Data[,intersect(panel.data.vnames, names(Panel_Data))]
} else {
ss.data <- Panel_Data
}
if (dim(ss.data)[2] %% 2 != 1) {
stop(paste("Number of columns of supplied panel data (", dim(ss.data)[2], ") does not conform to data requirements. See help page for details."))
}
num.panels <- (dim(ss.data)[2]-1)/2
### Rename variables in ss.data based upon grade progression
if (!missing(grade.progression)) {
tmp.gp <- grade.progression
by.grade <- TRUE
if (length(tmp.gp[!is.na(tmp.gp)]) > num.panels) {
tmp.messages <- c(tmp.messages, paste("\t\tNOTE: Supplied grade progression, grade.progress=c(", paste(grade.progression, collapse=","), "), exceeds number of panels (", num.panels, ") in provided data.\n\t\t Analyses will utilize maximum number of priors supplied by the data.\n", sep=""))
tmp.gp <- tail(grade.progression, num.panels)
}}
if (!missing(subset.grade) & missing(grade.progression)) {
tmp.gp <- (subset.grade-num.panels+1):subset.grade
by.grade <- TRUE
}
if (missing(subset.grade) & missing(grade.progression)) {
tmp.gp <- 1:num.panels
by.grade <- FALSE
}
if (!missing(num.prior) & !exact.grade.progression.sequence) {
if (length(num.prior) > 1 | !((num.prior-round(num.prior)) < .Machine$double.eps^0.5) | num.prior <= 0) {
stop("Specified num.prior not positive integer(s)")
}
if (num.prior > length(tmp.gp[!is.na(tmp.gp)])-1) {
tmp.messages <- c(tmp.messages, paste("\t\tNOTE: Specified argument num.prior (", num.prior, ") exceeds number of panels of data supplied. Analyses will utilize maximum number of priors possible.\n", sep=""))
num.prior <- length(tmp.gp[!is.na(tmp.gp)])-1
} else {
tmp.gp <- grade.progression <- tail(tmp.gp[!is.na(tmp.gp)], num.prior+1)
if (!is.null(content_area.progression) && length(content_area.progression > num.prior+1)) content_area.progression <- tail(content_area.progression, num.prior+1)
}} else {
num.prior <- length(tmp.gp[!is.na(tmp.gp)])-1
}
if (exact.grade.progression.sequence){
tmp.gp <- grade.progression
by.grade <- TRUE
num.prior <- length(tmp.gp[!is.na(tmp.gp)])-1
}
if (is.character(tmp.gp)) {
tmp.slot.gp <- tmp.gp
tmp.gp <- tmp.gp[!is.na(tmp.gp)]
} else {
tmp.slot.gp <- grade.progression
}
if (!is.null(max.order.for.percentile)) {
tmp.gp <- tail(tmp.gp, max.order.for.percentile+1)
num.prior <- min(num.prior, max.order.for.percentile)
if (!is.null(content_area.progression)) content_area.progression <- tail(content_area.progression, length(tmp.gp))
if (!is.null(year.progression)) year.progression <- year.progression.for.norm.group <- tail(year.progression, length(tmp.gp))
}
if (is.numeric(tmp.gp) & drop.nonsequential.grade.progression.variables && any(diff(tmp.gp) > 1)) {
ss.data <- ss.data[,c(1, (num.panels+1)-rev(c(1, cumsum(rev(diff(tmp.gp)))+1)-1), (2*num.panels+1)-rev(c(1, cumsum(rev(diff(tmp.gp)))+1)-1))]
num.panels <- (dim(ss.data)[2]-1)/2
}
## Run this check before the setup of ss.data - otherwise function chokes on negative subscripts
if (exact.grade.progression.sequence & num.prior > num.panels) {
tmp.messages <- paste("\t\tNOTE: Supplied data together with EXACT grade progression contains fewer panel years than required. \n\t\t
Check data, function arguments and see help page for details.\n")
message(paste("\tStarted studentGrowthPercentiles", started.date))
message(paste("\t\tSubject: ", sgp.labels$my.subject, ", Year: ", sgp.labels$my.year, ", Grade Progression: ",
paste(tmp.slot.gp, collapse=", "), " ", sgp.labels$my.extra.label, sep=""))
message(paste(tmp.messages, "\tStudent Growth Percentile Analysis NOT RUN", date(), "\n"))
return(
list(Coefficient_Matrices=Coefficient_Matrices,
Cutscores=Cutscores,
Goodness_of_Fit=Goodness_of_Fit,
Knots_Boundaries=Knots_Boundaries,
Panel_Data=Panel_Data,
SGPercentiles=SGPercentiles,
SGProjections=SGProjections,
Simulated_SGPs=Simulated_SGPs))
}
### Create ss.data
tmp.last <- tail(tmp.gp, 1)
ss.data <- data.table(ss.data[,c(1, (1+num.panels-num.prior):(1+num.panels), (1+2*num.panels-num.prior):(1+2*num.panels))], key=names(ss.data)[1])
num.panels <- (dim(ss.data)[2]-1)/2
if (is.factor(ss.data[[1]])) ss.data[[1]] <- as.character(ss.data[[1]])
if (exact.grade.progression.sequence) tmp.num.prior <- num.prior else tmp.num.prior <- 1
max.cohort.size <- dim(.get.panel.data(ss.data, tmp.num.prior, by.grade))[1]
if (max.cohort.size == 0) {
tmp.messages <- "\t\tNOTE: Supplied data together with grade progression contains no data. Check data, function arguments and see help page for details.\n"
message(paste("\tStarted studentGrowthPercentiles", started.date))
message(paste("\t\tSubject: ", sgp.labels$my.subject, ", Year: ", sgp.labels$my.year, ", Grade Progression: ",
paste(tmp.slot.gp, collapse=", "), " ", sgp.labels$my.extra.label, sep=""))
message(paste(tmp.messages, "\tFinished SGP Student Growth Percentile Analysis", date(), "in", timetaken(started.at), "\n"))
return(
list(Coefficient_Matrices=Coefficient_Matrices,
Cutscores=Cutscores,
Goodness_of_Fit=Goodness_of_Fit,
Knots_Boundaries=Knots_Boundaries,
Panel_Data=Panel_Data,
SGPercentiles=SGPercentiles,
SGProjections=SGProjections,
Simulated_SGPs=Simulated_SGPs))
}
if (max.cohort.size < sgp.cohort.size) {
tmp.messages <- paste("\t\tNOTE: Supplied data together with grade progression contains fewer than the minimum cohort size. \n\t\tOnly", max.cohort.size,
"valid cases provided with", sgp.cohort.size, "indicated as minimum cohort N size. Check data, function arguments and see help page for details.\n")
message(paste("\tStarted studentGrowthPercentiles", started.date))
message(paste("\t\tSubject: ", sgp.labels$my.subject, ", Year: ", sgp.labels$my.year, ", Grade Progression: ",
paste(tmp.slot.gp, collapse=", "), " ", sgp.labels$my.extra.label, sep=""))
message(paste(tmp.messages, "\tStudent Growth Percentile Analysis NOT RUN", date(), "\n"))
return(
list(Coefficient_Matrices=Coefficient_Matrices,
Cutscores=Cutscores,
Goodness_of_Fit=Goodness_of_Fit,
Knots_Boundaries=Knots_Boundaries,
Panel_Data=Panel_Data,
SGPercentiles=SGPercentiles,
SGProjections=SGProjections,
Simulated_SGPs=Simulated_SGPs))
}
### PROGRESSION variable creation:
grade.progression <- tmp.gp
if (is.null(content_area.progression)) {
content_area.progression <- rep(sgp.labels$my.subject, length(tmp.gp))
} else {
if (!identical(class(content_area.progression), "character")) {
stop("content_area.progression should be a character vector. See help page for details.")
}
if (!identical(tail(content_area.progression, 1), sgp.labels[['my.subject']])) {
stop("The last element in the content_area.progression must be identical to 'my.subject' of the sgp.labels. See help page for details.")
}
if (length(content_area.progression) != length(tmp.gp)) {
tmp.messages <- c(tmp.messages, "\tNOTE: The content_area.progression vector does not have the same number of elements as the grade.progression vector.\n")
}
}
if (is.null(year.progression) & is.null(year_lags.progression)) {
if (is.character(type.convert(as.character(grade.progression), as.is=TRUE))) {
stop("\tNOTE: Non-numeric grade progressions must be accompanied by arguments 'year.progression' and 'year_lags.progression'")
} else {
year.progression <- year.progression.for.norm.group <- rev(yearIncrement(sgp.labels[['my.year']], c(0, -cumsum(rev(diff(type.convert(as.character(grade.progression))))))))
}
}
if (is.null(year.progression) & !is.null(year_lags.progression)) {
if (!identical(sgp.labels[['my.extra.label']], "BASELINE")) {
year.progression <- year.progression.for.norm.group <- rev(yearIncrement(sgp.labels[['my.year']], c(0, -cumsum(rev(year_lags.progression)))))
}
if (identical(sgp.labels[['my.extra.label']], "BASELINE")) {
year.progression <- rep("BASELINE", length(tmp.gp))
year.progression.for.norm.group <- rev(yearIncrement(sgp.labels[['my.year']], c(0, -cumsum(rev(year_lags.progression)))))
}
if (!identical(class(year.progression), "character")) {
stop("year.area.progression should be a character vector. See help page for details.")
}
if (!identical(sgp.labels[['my.extra.label']], "BASELINE") & !identical(tail(year.progression, 1), sgp.labels[['my.year']])) {
stop("The last element in the year.progression must be identical to 'my.year' of the sgp.labels. See help page for details.")
}
if (length(year.progression) != length(tmp.gp)) {
tmp.messages <- c(tmp.messages, "\tNOTE: The year.progression vector does not have the same number of elements as the grade.progression vector.\n")
}
}
if (!is.null(year.progression) & is.null(year_lags.progression)) {
if (year.progression[1] == "BASELINE") {
year_lags.progression <- rep(1, length(year.progression)-1)
year.progression.for.norm.group <- year.progression
} else {
year_lags.progression <- diff(as.numeric(sapply(strsplit(year.progression, '_'), '[', split.location(year.progression))))
year.progression.for.norm.group <- year.progression
}
}
### Create Knots and Boundaries if requested (uses only grades in tmp.gp)
if (missing(use.my.knots.boundaries)) {
tmp.knots <- c(Knots_Boundaries[[tmp.path.knots.boundaries]], .get.knots.boundaries(ss.data, by.grade))
Knots_Boundaries[[tmp.path.knots.boundaries]] <- tmp.knots[!duplicated(names(tmp.knots))]
} else {
if (is.character(use.my.knots.boundaries)) {
if (!is.null(SGPstateData[[use.my.knots.boundaries]][["Achievement"]][["Knots_Boundaries"]])) {
for (h in unique(content_area.progression)) {
for (i in grep(h, names(SGPstateData[[use.my.knots.boundaries]][["Achievement"]][["Knots_Boundaries"]]), value=TRUE)) {
Knots_Boundaries[[tmp.path.knots.boundaries]][[i]] <- SGPstateData[[use.my.knots.boundaries]][["Achievement"]][["Knots_Boundaries"]][[i]]
}
}
}
}
}
### QR Calculations: coefficient matrices are saved/read into/from panel.data[["Coefficient_Matrices"]]
if (is.null(use.my.coefficient.matrices)) {
taus <- .create_taus(sgp.quantiles)
if (exact.grade.progression.sequence) {
coefficient.matrix.priors <- num.prior
} else {
coefficient.matrix.priors <- seq(num.prior)
}
for (k in coefficient.matrix.priors) {
Coefficient_Matrices[[tmp.path.coefficient.matrices]][['TMP_NAME']] <- .create.coefficient.matrices(ss.data, k, by.grade)
if (identical(Coefficient_Matrices[[tmp.path.coefficient.matrices]][['TMP_NAME']], "Insufficient N")) {
tmp.messages <- c(tmp.messages, paste("\tNOTE: Some grade progressions contain fewer than the minimum cohort size.",
"\n\t\tOnly analyses with MAX grade progression", paste(rev(rev(tmp.gp)[1:k]), collapse = ', '), "will be produced given", sgp.cohort.size,
"indicated as minimum cohort N size. \n\t\tCheck data, function arguments and see help page for details.\n"))
Coefficient_Matrices[[tmp.path.coefficient.matrices]][['TMP_NAME']] <- NULL
grade.progression <- tmp.gp <- rev(rev(tmp.gp)[1:k])
# num.prior <- length(tmp.gp[2:k]) # Force lots of warnings (?)
break
}
names(Coefficient_Matrices[[tmp.path.coefficient.matrices]])[length(Coefficient_Matrices[[tmp.path.coefficient.matrices]])] <- get.coefficient.matrix.name(tmp.last, k)
if (verbose.output) {
tmp.coefficient.matrix.name <- get.coefficient.matrix.name(tmp.last, k)
tmp.grade.names <- paste("Grade",
rev(head(unlist(Coefficient_Matrices[[tmp.path.coefficient.matrices]][[tmp.coefficient.matrix.name]]@Grade_Progression), -1)))
for (l in seq_along(tmp.grade.names)) {
tmp.knots <- paste(tmp.grade.names[l], Coefficient_Matrices[[tmp.path.coefficient.matrices]][[tmp.coefficient.matrix.name]]@Knots[l])
tmp.boundaries <- paste(tmp.grade.names[l], Coefficient_Matrices[[tmp.path.coefficient.matrices]][[tmp.coefficient.matrix.name]]@Boundaries[l])
Verbose_Messages <- c(Verbose_Messages, paste("\t\tNOTE: Coefficient Matrix ", tmp.coefficient.matrix.name,
" created using Knots: ", tmp.knots, " and Boundaries: ", tmp.boundaries, ".\n", sep=""))
}
}
}
}
### Calculate SIMEX corrected coefficient matrices and percentiles (if requested)
if (simex.tf) {
fitted.simex <- .simex.sgp(
state=calculate.simex$state,
variable=calculate.simex$variable,
lambda=calculate.simex$lambda,
B=calculate.simex$simulation.iterations,
simex.sample.size=calculate.simex$simex.sample.size,
extrapolation=calculate.simex$extrapolation,
save.matrices=calculate.simex$save.matrices,
simex.use.my.coefficient.matrices=calculate.simex$simex.use.my.coefficient.matrices,
calculate.simex.sgps=calculate.sgps)
}
### Start/Finish Message & Return SGP Object
if (print.time.taken) {
message(paste("\tStarted studentGrowthPercentiles:", started.date))
if (calculate.sgps) {
message(paste("\t\tContent Area: ", sgp.labels$my.subject, ", Year: ", sgp.labels$my.year, ", Grade Progression: ",
paste(tmp.slot.gp, collapse=", "), " ", sgp.labels$my.extra.label, " (N=", format(dim(quantile.data)[1], big.mark=","), ")", sep=""))
} else {
message(paste("\t\tContent Area: ", sgp.labels$my.subject, ", Year: ", sgp.labels$my.year, ", Grade Progression: ",
paste(tmp.slot.gp, collapse=", "), " ", sgp.labels$my.extra.label, sep=""))
}
if (verbose.output) message(Verbose_Messages)
message(c(tmp.messages, "\tFinished SGP Student Growth Percentile Analysis: ", date(), " in ", timetaken(started.at), "\n"))
}
return(fitted.simex)
} ### END studentGrowthPercentiles Function
|
29cf510cc14caf1d31364c191859e77d33fadbfa
|
5127927036020b809569d9b57581518f13e4c548
|
/10XGenomics/R_scripts/01_new_gtf.R
|
0df611d50fa7217340d203c94fac70bbe43b9f21
|
[
"MIT"
] |
permissive
|
zamanianlab/Bmsinglecell-ms
|
f2abacefbe74feb3041df06076b71fc740ea8779
|
ebcf3846212be1a97583a7777663910033a76484
|
refs/heads/main
| 2023-06-30T09:25:14.631569
| 2023-05-01T23:25:16
| 2023-05-01T23:25:16
| 528,939,348
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,820
|
r
|
01_new_gtf.R
|
# determining the ideal extension length for each gene for maximal mapping rates across the transcriptome.
# Need to increase vector memory to 100GB prior to beginning or you will get nothing done.
options(future.globals.maxSize = 8000 * 1024^2)
# install packages
install.packages("hdf5r")
install.packages("readbitmap")
# read in libraries
library(tidyverse)
library(DropletUtils)
library(ggplot2)
library(Matrix)
library(rjson)
library(cowplot)
library(RColorBrewer)
library(grid)
library(readbitmap)
library(Seurat)
library(dplyr)
library(rhdf5)
library(data.table)
# set working directory
setwd("~/Library/CloudStorage/Box-Box/ZamanianLab/SeqLibraries/Mapping/singlecell/210518_BH7FNCDRXY")
# set directories for each raw ext data set
data.dir.0 <- ("~/Library/CloudStorage/Box-Box/ZamanianLab/SeqLibraries/Mapping/singlecell/210518_BH7FNCDRXY/outs_0bp_ext/filtered_feature_bc_matrix/")
data.dir.50 <- ("~/Library/CloudStorage/Box-Box/ZamanianLab/SeqLibraries/Mapping/singlecell/210518_BH7FNCDRXY/outs_50bp_ext/filtered_feature_bc_matrix/")
data.dir.100 <- ("~/Library/CloudStorage/Box-Box/ZamanianLab/SeqLibraries/Mapping/singlecell/210518_BH7FNCDRXY/outs_100bp_ext/filtered_feature_bc_matrix/")
data.dir.150 <- ("~/Library/CloudStorage/Box-Box/ZamanianLab/SeqLibraries/Mapping/singlecell/210518_BH7FNCDRXY/outs_150bp_ext/filtered_feature_bc_matrix/")
data.dir.200 <- ("~/Library/CloudStorage/Box-Box/ZamanianLab/SeqLibraries/Mapping/singlecell/210518_BH7FNCDRXY/outs_200bp_ext/filtered_feature_bc_matrix/")
data.dir.250 <- ("~/Library/CloudStorage/Box-Box/ZamanianLab/SeqLibraries/Mapping/singlecell/210518_BH7FNCDRXY/outs_250bp_ext/filtered_feature_bc_matrix/")
data.dir.300 <- ("~/Library/CloudStorage/Box-Box/ZamanianLab/SeqLibraries/Mapping/singlecell/210518_BH7FNCDRXY/outs_300bp_ext/filtered_feature_bc_matrix/")
data.dir.400 <- ("~/Library/CloudStorage/Box-Box/ZamanianLab/SeqLibraries/Mapping/singlecell/210518_BH7FNCDRXY/outs_400bp_ext/filtered_feature_bc_matrix/")
data.dir.500 <- ("~/Library/CloudStorage/Box-Box/ZamanianLab/SeqLibraries/Mapping/singlecell/210518_BH7FNCDRXY/outs_500bp_ext/filtered_feature_bc_matrix/")
# read in filtered data
filt_0 <- Read10X(data.dir.0)
filt_50 <- Read10X(data.dir.50)
filt_100 <- Read10X(data.dir.100)
filt_150 <- Read10X(data.dir.150)
filt_200 <- Read10X(data.dir.200)
filt_250 <- Read10X(data.dir.250)
filt_300 <- Read10X(data.dir.300)
filt_400 <- Read10X(data.dir.400)
filt_500 <- Read10X(data.dir.500)
# create Seurat objects
bma_0 <- Seurat::CreateSeuratObject(counts = filt_0)
bma_50 <- Seurat::CreateSeuratObject(counts = filt_50)
bma_100 <- Seurat::CreateSeuratObject(counts = filt_100)
bma_150 <- Seurat::CreateSeuratObject(counts = filt_150)
bma_200 <- Seurat::CreateSeuratObject(counts = filt_200)
bma_250 <- Seurat::CreateSeuratObject(counts = filt_250)
bma_300 <- Seurat::CreateSeuratObject(counts = filt_300)
bma_400 <- Seurat::CreateSeuratObject(counts = filt_400)
bma_500 <- Seurat::CreateSeuratObject(counts = filt_500)
# write a csv file containing the count matrix from the Seurat object
bma_0_cts <- write.table(as.matrix(GetAssayData(object = bma_0, slot = "counts")),
'~/Desktop/bma_0_counts.csv',
sep = ',', row.names = T, col.names = T, quote = F)
bma_50_cts <- write.table(as.matrix(GetAssayData(object = bma_50, slot = "counts")),
'~/Desktop/bma_50_counts.csv',
sep = ',', row.names = T, col.names = T, quote = F)
bma_100_cts <- write.table(as.matrix(GetAssayData(object = bma_100, slot = "counts")),
'~/Desktop/bma_100_counts.csv',
sep = ',', row.names = T, col.names = T, quote = F)
bma_150_cts <- write.table(as.matrix(GetAssayData(object = bma_150, slot = "counts")),
'~/Desktop/bma_150_counts.csv',
sep = ',', row.names = T, col.names = T, quote = F)
bma_200_cts <- write.table(as.matrix(GetAssayData(object = bma_200, slot = "counts")),
'~/Desktop/bma_200_counts.csv',
sep = ',', row.names = T, col.names = T, quote = F)
bma_250_cts <- write.table(as.matrix(GetAssayData(object = bma_250, slot = "counts")),
'~/Desktop/bma_250_counts.csv',
sep = ',', row.names = T, col.names = T, quote = F)
bma_300_cts <- write.table(as.matrix(GetAssayData(object = bma_300, slot = "counts")),
'~/Desktop/bma_300_counts.csv',
sep = ',', row.names = T, col.names = T, quote = F)
bma_400_cts <- write.table(as.matrix(GetAssayData(object = bma_400, slot = "counts")),
'~/Desktop/bma_400_counts.csv',
sep = ',', row.names = T, col.names = T, quote = F)
bma_500_cts <- write.table(as.matrix(GetAssayData(object = bma_500, slot = "counts")),
'~/Desktop/bma_500_counts.csv',
sep = ',', row.names = T, col.names = T, quote = F)
# import csv files for each count matrix
bma_0_cts <- read.csv("~/Desktop/bma_0_counts.csv")
bma_50_cts <- read.csv("~/Desktop/bma_50_counts.csv")
bma_100_cts <- read.csv("~/Desktop/bma_100_counts.csv")
bma_150_cts <- read.csv("~/Desktop/bma_150_counts.csv")
bma_200_cts <- read.csv("~/Desktop/bma_200_counts.csv")
bma_250_cts <- read.csv("~/Desktop/bma_250_counts.csv")
bma_300_cts <- read.csv("~/Desktop/bma_300_counts.csv")
bma_400_cts <- read.csv("~/Desktop/bma_400_counts.csv")
bma_500_cts <- read.csv("~/Desktop/bma_500_counts.csv")
# add the first column containing the gene names to the rest of the data frame and rename the column
bma_0_cts <- rownames_to_column(bma_0_cts, var = "genes")
bma_50_cts <- rownames_to_column(bma_50_cts, var = "genes")
bma_100_cts <- rownames_to_column(bma_100_cts, var = "genes")
bma_150_cts <- rownames_to_column(bma_150_cts, var = "genes")
bma_150_cts <- rownames_to_column(bma_150_cts, var = "genes")
bma_200_cts <- rownames_to_column(bma_200_cts, var = "genes")
bma_250_cts <- rownames_to_column(bma_250_cts, var = "genes")
bma_300_cts <- rownames_to_column(bma_300_cts, var = "genes")
bma_400_cts <- rownames_to_column(bma_400_cts, var = "genes")
bma_500_cts <- rownames_to_column(bma_500_cts, var = "genes")
# create new column in each count matrix with total sum of reads for each gene across all cells
bma_0_total <- bma_0_cts %>%
mutate(total_0bp = rowSums(bma_0_cts[,-1]))
bma_50_total <- bma_0_cts %>%
mutate(total_50bp = rowSums(bma_50_cts[,-1]))
bma_100_total <- bma_100_cts %>%
mutate(total_100bp = rowSums(bma_100_cts[,-1]))
bma_150_total <- bma_150_cts %>%
mutate(total_150bp = rowSums(bma_150_cts[,-1]))
bma_200_total <- bma_200_cts %>%
mutate(total_200bp = rowSums(bma_200_cts[,-1]))
bma_250_total <- bma_250_cts %>%
mutate(total_250bp = rowSums(bma_250_cts[,-1]))
bma_300_total <- bma_300_cts %>%
mutate(total_300bp = rowSums(bma_300_cts[,-1]))
bma_400_total <- bma_400_cts %>%
mutate(total_400bp = rowSums(bma_400_cts[,-1]))
bma_500_total <- bma_500_cts %>%
mutate(total_500bp = rowSums(bma_500_cts[,-1]))
# create new data.frame with gene column and total columns in each extension-specific matrix
ext_all <- data.frame(bma_0_total$genes, bma_0_total$total_0bp, bma_50_total$total_50bp,bma_100_total$total_100bp, bma_150_total$total_150bp, bma_200_total$total_200bp, bma_250_total$total_250bp, bma_300_total$total_300bp, bma_400_total$total_400bp, bma_500_total$total_500bp)
# rename columns for simplicity (and sanity)
nms <- c("gene_id", "0", "50", "100", "150", "200", "250", "300", "400", "500")
setnames(ext_all, nms)
# identify the ext column with the largest count number in each row and print new column with the optimal ext length
ext_all$final <- as.numeric(colnames(ext_all[,-1])[max.col(ext_all[,-1], ties.method = "first")])
# remove the ext length columns and maintain "gene_id" and "final" columns
ext_all <- select(ext_all, -c("0", "50", "100", "150", "200", "250", "300", "400", "500"))
# read in B. malayi gtf
gtf <- read.table("~/Desktop/geneset.gtf", sep = "\t", header = FALSE, quote = "")
# append the "final" column of ext_all to the gtf based on gene_id
gtf_new <- gtf %>%
mutate(gene_id = str_match(V9, "gene_id \"(.*?)\";")[,2]) # unable to pipe the next line for some reason??
gtf_new <- left_join(gtf_new, ext_all, by = "gene_id") %>%
mutate(final = replace_na(final, as.numeric(0)))
# extend gene, transcript, final exon, and 3' UTR by the amount in the "final" column
gtf.ext <- gtf_new %>%
mutate(transcript_id = str_match(V9, "transcript_id \"(.*?)\";")[,2]) %>%
mutate(exon_number = str_match(V9, "exon_number \"(\\d+)\";")[,2]) %>%
mutate(exon_number = replace_na(exon_number, as.character(0))) %>%
mutate(exon_number = as.numeric(exon_number)) %>%
group_by(transcript_id) %>%
mutate(group_max = max(exon_number)) %>%
mutate(group_max = ifelse(exon_number == group_max & V3 == "exon", "yes", "no")) %>%
ungroup() %>%
mutate(V5 = ifelse(group_max == "yes" & V7 == "+", as.numeric(V5) + as.numeric(final), V5)) %>%
mutate(V4 = ifelse(group_max == "yes" & V7 == "-", as.numeric(V4) - as.numeric(final), V4)) %>%
mutate(V4 = ifelse(group_max == "yes" & V4 < 1, 1, V4)) %>%
mutate(V5 = ifelse(V3 == "three_prime_utr" & V7 =="+", as.numeric(V5) + as.numeric(final), V5)) %>%
mutate(V4 = ifelse(V3 == "three_prime_utr" & V7 =="-", as.numeric(V4) - as.numeric(final), V4)) %>%
mutate(V4 = ifelse(V3 == "three_prime_utr" & V4 < 1, 1, V4)) %>%
mutate(V5 = ifelse(V3 == "transcript" & V7 =="+", as.numeric(V5) + as.numeric(final), V5)) %>%
mutate(V4 = ifelse(V3 == "transcript" & V7 =="-", as.numeric(V4) - as.numeric(final), V4)) %>%
mutate(V4 = ifelse(V3 == "transcript" & V4 < 1, 1, V4)) %>%
mutate(V5 = ifelse(V3 == "gene" & V7 =="+", as.numeric(V5) + as.numeric(final), V5)) %>%
mutate(V4 = ifelse(V3 == "gene" & V7 =="-", as.numeric(V4) - as.numeric(final), V4)) %>%
mutate(V4 = ifelse(V3 == "gene" & V4 < 1, 1, V4))
colnames(gtf.ext) <- c("seqname","source","feature","start","end","score","strand","frame","attribute", "gene_id", "final", "transcript_id", "exon_number", "group_max")
#read in contig lengths
contig.len <- read.table("~/Desktop/contig_lengths.txt", sep = "\t", header = FALSE, quote = "")
colnames(contig.len) <- c("seqname","len")
#truncate if exceeds boundaries of contig
gtf.ext <- left_join(gtf.ext,contig.len, by = "seqname") %>%
mutate(end = ifelse(strand == "+" & end >= len, len, end)) %>%
select(-len, -group_max, -exon_number, -transcript_id, -gene_id, -final)
#generate new gtf
write.table(gtf.ext,"~/Desktop/geneset.ext.gtf", sep = "\t", quote = FALSE, row.names = FALSE, col.names = FALSE)
|
ddb270049c2ac30c3a3ad3b1368ce41521d59a16
|
c555092c911699a657b961a007636208ddfa7b1b
|
/tests/testthat/test-stat-align.R
|
c727073daabd168a9a40d26c6a18803391a51c73
|
[] |
no_license
|
cran/ggplot2
|
e724eda7c05dc8e0dc6bb1a8af7346a25908965c
|
e1b29e4025de863b86ae136594f51041b3b8ec0b
|
refs/heads/master
| 2023-08-30T12:24:48.220095
| 2023-08-14T11:20:02
| 2023-08-14T12:45:10
| 17,696,391
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 986
|
r
|
test-stat-align.R
|
test_that("standard alignment works", {
df <- tibble::tribble(
~g, ~x, ~y,
"a", 1, 2,
"a", 3, 5,
"a", 5, 1,
"b", 2, 3,
"b", 4, 6,
"b", 6, 7
)
p <- ggplot(df, aes(x, y, fill = g)) + geom_area(color = "black")
expect_doppelganger("align two areas", p)
})
test_that("alignment with cliffs works", {
df <- tibble::tribble(
~g, ~x, ~y,
"a", 1, 2,
"a", 3, 5,
"a", 5, 1,
"b", 2, 3,
"b", 4, 3,
"b", 4, 6,
"b", 6, 7
)
p <- ggplot(df, aes(x, y, fill = g)) + geom_area(color = "black")
expect_doppelganger("align two areas with cliff", p)
})
test_that("alignment with negative and positive values works", {
df <- tibble::tribble(
~g, ~x, ~y,
"a", 1, 1,
"a", 2, 4,
"a", 3, -4,
"a", 8, 0,
"b", 2, 4,
"b", 6, -4
)
p <- ggplot(df, aes(x, y, fill = g)) + geom_area(color = "black")
expect_doppelganger("align two areas with pos/neg y", p)
})
|
e4c7dd55383d52f7557132fcb400e2e9155b197b
|
b08b7e3160ae9947b6046123acad8f59152375c3
|
/Programming Language Detection/Experiment-2/Dataset/Train/R/sum-of-squares.r
|
4e0140025307fe5f7d6d5f6c5d11edf9b59fbd52
|
[] |
no_license
|
dlaststark/machine-learning-projects
|
efb0a28c664419275e87eb612c89054164fe1eb0
|
eaa0c96d4d1c15934d63035b837636a6d11736e3
|
refs/heads/master
| 2022-12-06T08:36:09.867677
| 2022-11-20T13:17:25
| 2022-11-20T13:17:25
| 246,379,103
| 9
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 41
|
r
|
sum-of-squares.r
|
arr <- c(1,2,3,4,5)
result <- sum(arr^2)
|
deb01e4247b7d66bcfb7f14cff97ce9705c28456
|
4b76f1a19c6fc9a8a2263ddda140aff00ec3396a
|
/R/normal_rejection.R
|
90eaa2a4032d5b4c7eb83e23ee21c78aa46903eb
|
[] |
no_license
|
cran/tmvmixnorm
|
db90df7285039e701fde8ce91665fd133327313a
|
cbb7edbc4b3c255eea3ceeaea9e77947c6422ddd
|
refs/heads/master
| 2021-06-27T21:36:10.016413
| 2020-09-18T17:00:02
| 2020-09-18T17:00:02
| 145,902,241
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 519
|
r
|
normal_rejection.R
|
#' Normal rejection sampling
#'
#' \code{norm_rej} is used for normal rejection sampling.
#'
#' @param a lower bound
#' @param b upper bound
#'
#' @return \code{norm_rej} returns a list
#' \code{x}: sampled value; and
#' \code{acc}: total number of draw used.
#'
#' @examples
#' set.seed(1)
#' norm_rej(a=1, b=Inf)
#'
norm_rej <- function(a,b=Inf){
acc <- 0
repeat{
x <- stats::rnorm(1)
acc <- acc+1 # accept x at acc-th draw
if (x>=a&x<=b)
return(list(x=x,acc=acc))
}
}
|
56fb082457271bcf28cca6faac84161a7a592c59
|
2244c60d2e337787768123af843783825496e14a
|
/man/BlaAlt.Rd
|
30f3a62e49d662c82631495b8f11508e3ba07e44
|
[] |
no_license
|
cran/pvrank
|
a46f30b5cfc4df678937f4e05ea9d6c3346b16a7
|
c03170854caf8df1eabc7e831ee0f82deeae3db8
|
refs/heads/master
| 2020-12-09T09:09:57.472522
| 2018-05-17T07:38:40
| 2018-05-17T07:38:40
| 31,172,247
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,062
|
rd
|
BlaAlt.Rd
|
\name{BlaAlt}
\alias{BlaAlt}
\docType{data}
\title{Pairs of measurements of forced expiratory volume}
\description{ A problem with the use of the correlation coefficient between the first and second measurements is that there is no reason to suppose that their order is important. If the order were important the measurements would not be repeated observations of the same thing. We could reverse the order of any of the pairs and get a slightly different value of the correlation coefficient between repeated measurements.}
\usage{data(BlaAlt)}
\format{
A data frame with 20 observations on the following two variables.
\describe{
\item{Fev1}{First FEV (litres).}
\item{Fev2}{Second Fev (litres).}
}
}
\details{Pairs of measurements of forced expiratory volume (litres) a few weeks apart from 20 Scottish school children.}
\examples{
data(BlaAlt)
head(BlaAlt)
}
\source{
Bland, M. J. and Altman, D. G. (1996). "Measurement error and correlation coefficients". \emph{BMJ: British Medical Journal}, \bold{313}, 41--42.
}
\keyword{datasets}
|
22b192431de25ab3c9f5ace8800c5ada20cbcd2b
|
f43db456bb3b51ab6bc71d8a4adf5752df699109
|
/R/offenses_known.R
|
a569c0d0350f3830dfd5e37c8a08b8e3609c1ec9
|
[] |
no_license
|
sefabey/crime_data
|
333ebf79d95a80af1ef4d452b136f19ad4115d3c
|
f1c2b8d77dd794782387c70d377d0ac49c7022da
|
refs/heads/master
| 2020-09-10T16:13:02.647384
| 2019-10-27T01:40:40
| 2019-10-27T01:40:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,337
|
r
|
offenses_known.R
|
source(here::here('R/crosswalk.R'))
source(here::here('R/utils/global_utils.R'))
source(here::here('R/make_sps/make_offenses_known_sps.R'))
source(here::here('R/utils/offenses_known_utils.R'))
crosswalk <- read_merge_crosswalks()
get_all_return_a_monthly(crosswalk)
offenses_known_yearly <- get_data_yearly("offenses_known",
"1960_2017",
"offenses_known_yearly_",
crosswalk)
global_checks(offenses_known_yearly)
setwd(here::here("clean_data/offenses_known"))
save_as_zip("ucr_offenses_known_monthly_1960_2017_", pattern = "month")
save_as_zip("ucr_offenses_known_yearly_1960_2017_", pattern = "year")
get_all_return_a_monthly <- function(crosswalk) {
setwd(here::here("raw_data/offenses_known_from_fbi"))
files <- list.files()
files <- files[!grepl("sps", files)]
for (file in files) {
setwd(here::here("raw_data/offenses_known_from_fbi"))
data <- read_ascii_setup(file, here::here("setup_files/ucr_return_a.sps"))
data <-
data %>%
dplyr::filter(!is.na(ori)) %>%
dplyr::select(-identifier_code,
-population_source,
-contains("last_population"),
-contains("under50"),
-population_1_county,
-population_1_msa,
-population_2_county,
-population_2_msa,
-population_3_county,
-population_3_msa,
-population_source,
-sequence_number,
-agency_state_name,
-covered_by_population_group,
-contains("blank"),
-population_group_in_previous_year) %>%
dplyr::mutate_at(dplyr::vars(tidyselect::matches("card")), remove_special_characters) %>%
dplyr::mutate_at(dplyr::vars(tidyselect::matches("mail")), crime_remove_special_characters) %>%
dplyr::mutate_if(is.character, tolower) %>%
dplyr::mutate(year = fix_years(year),
population = rowSums(.[, grepl("population_[1-3]",
names(.))]),
ori = toupper(ori),
state_abb = make_state_abb(state),
covered_by_ori = as.character(covered_by_ori)) %>%
dplyr::select(-population_1,
-population_2,
-population_3)
data <- fix_all_negatives(data)
data <- fix_outliers(data)
data <- month_wide_to_long(data)
if (data$year[1] == 2017) {
data$unfound_burg_attempted[data$ori %in% "LANPD00"] <- NA
data$unfound_burg_total[data$ori %in% "LANPD00"] <- NA
}
data <- make_agg_assault(data)
data <- make_index_crimes(data)
data$juvenile_age[data$juvenile_age == 0] <- NA
data <- dplyr::left_join(data, crosswalk, by = "ori")
data <- reorder_columns(data, crosswalk)
# Save the data in various formats
setwd(here::here("clean_data/offenses_known"))
save_files(data = data,
year = data$year[1],
file_name = "offenses_known_monthly_",
save_name = "offenses_known_monthly_")
rm(data); gc(); Sys.sleep(3)
}
}
|
53462820a7db31a9e4c6d85a7ff15cab50c0876c
|
55c59b150b49de2123191bbd9e62cc5baed5c52b
|
/man/reg.mle.lda.Rd
|
02e7bcd0ddb072d85ecae62da66e0d6f43ee8f05
|
[] |
no_license
|
RfastOfficial/Rfast2
|
b45c43d627571f5f1c5bbf293454d92643853146
|
23c639c345d526ac05ce8b1613d9671975a8402b
|
refs/heads/master
| 2023-08-08T01:15:34.684148
| 2023-07-21T10:48:12
| 2023-07-21T10:48:12
| 213,210,384
| 27
| 4
| null | 2023-01-20T11:23:56
| 2019-10-06T17:14:37
|
C++
|
UTF-8
|
R
| false
| false
| 1,391
|
rd
|
reg.mle.lda.Rd
|
\name{Regularised maximum likelihood linear discriminant analysis}
\alias{reg.mle.lda}
\title{Regularised maximum likelihood linear discriminant analysis
}
\description{
Regularised maximum likelihood linear discriminant analysis.
}
\usage{
reg.mle.lda(xnew, x, ina, lambda)
}
\arguments{
\item{xnew}{
A numerical vector or a matrix with the new observations, continuous data.
}
\item{x}{
A matrix with numerical data.
}
\item{ina}{
A numerical vector or factor with consecutive numbers indicating the group to which each
observation belongs to.
}
\item{lambda}{
A vector of regularization values \eqn{\lambda} such as (0, 0.1, 0.2,...).
}
}
\details{
Regularised maximum likelihood linear discriminant analysis is performed. The function is not extremely fast, yet is pretty fast.
}
\value{
A matrix with the predicted group of each observation in "xnew".
Every column corresponds to a \eqn{\lambda} value. If you have just on value of \eqn{\lambda}, then
you will have one column only.
}
%\references{
%
%}
\author{
Michail Tsagris.
R implementation and documentation: Michail Tsagris \email{mtsagris@uoc.gr}.
}
%\note{
%% ~~further notes~~
%}
\seealso{
\code{ \link{regmlelda.cv} \link{mle.lda}, \link{fisher.da}, \link{big.knn}, \link{weibull.nb} }
}
\examples{
x <- as.matrix(iris[, 1:4])
ina <- iris[, 5]
a <- reg.mle.lda(x, x, ina, lambda = seq(0, 1, by = 0.1) )
}
|
80bebfd18099b6eabdb0de2ba60b6875c52b0409
|
407a7bb9c951051c93805b580056832f8d32d5bd
|
/src/R/init.R
|
35e40d679b2570d2137e7dde3d41a6de6752e942
|
[
"MIT"
] |
permissive
|
qutang/stereotypy_ubicomp_14
|
ea8bbfcb47f27fde42663bd168638152f3b85588
|
a9c1d2e114d74d2b34f62e223811d4d480479559
|
refs/heads/master
| 2023-01-29T17:55:43.201258
| 2020-12-06T23:51:10
| 2020-12-06T23:51:10
| 319,152,029
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,917
|
r
|
init.R
|
#!/usr/bin/env r
rm(list=ls())
### Set up root path
source("src/R/setRoot.R")
### Check libraries, if not install it ====
check_required_libraries <- function (package1, ...) {
# convert arguments to vector
packages <- c(package1, ...)
# start loop to determine if each package is installed
for(package in packages){
# if package is installed locally, load
if(package %in% rownames(installed.packages())){
print(paste("Found", package))
do.call('library', list(package))
}
# if package is not installed locally, download, then load
else {
print(paste("Installing", package))
install.packages(package, verbose=FALSE, repos="http://cran.us.r-project.org")
do.call("library", list(package))
}
}
}
check_required_libraries("SOAR", "colorspace", "foreach", "R.matlab", "plyr", "caret", "sampling", "e1071", "rChoiceDialogs", "rJava", "RWeka")
### Constants ====
source("src/R/constants.R")
### Caching feature sets ====
source("src/R/utils/load_featureset_mat.stereotypy.R")
study1_session_folders = list.files(study1_folder, include.dirs=TRUE, full.names=TRUE, no..=TRUE)
study2_session_folders = list.files(study2_folder, include.dirs=TRUE, full.names=TRUE, no..=TRUE)
feature_filename = "featureVectorAndLabels.mat"
exclude_files = "nothing" # no bad dataset
for(folder in study1_session_folders){
if(!grepl(pattern=exclude_files, x=folder, ignore.case=TRUE, perl=TRUE)){
load_featureset_mat(folder, feature_filename)
}else{
print(paste("Bad dataset skipped:", basename(folder)))
}
}
for(folder in study2_session_folders){
if(!grepl(pattern=exclude_files, x=folder, ignore.case=TRUE, perl=TRUE)){
load_featureset_mat(folder, feature_filename)
}else{
print(paste("Bad dataset skipped:", basename(folder)))
}
}
### Set the maximum memory of JVM to be 16 gigabit
options( java.parameters = "-Xmx16g" )
|
18b76359380fca0b21e748e20a6d6f967a980aca
|
595e5a281c84fbb3cc2f52f00b1c326af533019c
|
/man/pdf_doc.Rd
|
2d132a5bfdcf4a81a2c3bec34a30edbf2d1961f8
|
[] |
no_license
|
jiang-hang/rwp
|
e23a3e0f17d48fdc58e2451caf15f7838854c22f
|
2e0a91020fe23c117b9a380e9cec618e7ec2fd78
|
refs/heads/master
| 2021-01-10T09:07:53.192403
| 2016-10-16T20:09:33
| 2016-10-16T20:09:33
| 54,447,467
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 310
|
rd
|
pdf_doc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rwordpress.R
\name{pdf_doc}
\alias{pdf_doc}
\title{replacement of rmarkdown::pdf_document}
\usage{
pdf_doc(inputfile)
}
\arguments{
\item{inputfile}{value}
}
\value{
returndes
}
\description{
description
}
\examples{
x=c(1,2,3)
}
|
0eb501c897f57acae576b8e0b115419b2a1eeaa1
|
428dad6718179a377e250c40d0adf0c5070e7d51
|
/functions/cv_analysis.R
|
b89dfc66f44619b9900dfec038ae1954a9f7cb60
|
[] |
no_license
|
michbur/malarial_signal_peptides
|
5156ea01192768cc92c0580b0e7e7c9db860fa23
|
aa6651cacfa39970963f60d605f59e185b115967
|
refs/heads/master
| 2020-04-10T03:56:10.127236
| 2016-09-28T04:43:41
| 2016-09-28T04:43:41
| 40,606,471
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,609
|
r
|
cv_analysis.R
|
#' Get performance measures of cross-validation
#'
#' Get performance measures of cross-validation
#' @param folds cross-validation results (very large list)
#' @inheritParams calc_metrics
#'
#' @return a data frame of cross-validation performance measures
perf_rep <- function(folds, threshold = 0.5) {
do.call(rbind, pblapply(1L:length(folds), function(repetition_id) {
res <- t(sapply(folds[[repetition_id]], function(single_fold)
rowMeans(sapply(single_fold, function(single_group)
#calc_metrics is defined in benchmark_functions.R
unlist(calc_metrics(as.numeric(!is.na(single_group[, "cs_real"])), single_group[, "prob"], threshold))
))
))
res <- melt(res)
colnames(res) <- c("encoding", "measure", "value")
res[["encoding"]] <- factor(res[["encoding"]])
cbind(repetition = factor(rep(repetition_id, nrow(res))), res)
}))
}
#' Create data for cvplot
#'
#' Create data for cvplot (cross-validation results for all encodings).
#' @param rep_res results cross-validation as produced by \code{\link{perf_rep}}
#'
#' @return a data frame ready for p1 plot.
#compute results of cv analysis
create_cvplotdat <- function(rep_res) {
mean_res <- rep_res %>% filter(measure %in% c("AUC", "Sens", "Spec", "MCC")) %>%
group_by(encoding, measure) %>% summarise(mean_value = mean(value, na.rm = TRUE)) %>% ungroup
best_enc <- lapply(c("AUC", "Sens", "Spec", "MCC"), function(i)
mean_res %>% filter(measure == i) %>%
filter(mean_value > quantile(mean_value, 0.9, na.rm = TRUE)) %>%
droplevels %>%
#arrange(desc(mean_value)) %>%
select(encoding) %>%
unlist %>%
as.character %>%
as.numeric)
p1_dat <- rep_res %>% filter(#encoding %in% unique(unlist(best_enc)),
measure %in% c("AUC", "Spec", "Sens", "MCC")) %>% droplevels %>%
group_by(encoding, measure) %>% summarize(mean = mean(value)) %>%
dcast(encoding ~ measure, value.var = "mean")
p1_dat <- p1_dat[!duplicated(p1_dat[, -1]), ]
p1_dat[, "encoding"] <- rep("", nrow(p1_dat))
p1_dat[p1_dat[, "Spec"] == max(p1_dat[, "Spec"]), "encoding"] <- "2"
#p1_dat[p1_dat[, "Sens"] > 0.85 & p1_dat[, "Spec"] > 0.94, "encoding"] <- "3"
p1_dat[p1_dat[, "Sens"] == max(p1_dat[, "Sens"]), "encoding"] <- "1"
p1_dat[, "encoding"] <- as.factor(p1_dat[, "encoding"])
p1_dat
}
#' Create p1 plot
#'
#' Create cross-validation plot and dynamically generate caption as well as table.
#' @param p1_dat processed data for the plot as produced by \code{\link{create_cvplotdat}}.
#'
#' @return a list, the first element is a plot, the second is a caption, the third is
#' the table.
plot_cvplot <- function(p1_dat) {
p1 <- ggplot(p1_dat, aes(x = Sens, y = Spec, label = encoding, colour = encoding == "", fill = encoding == "")) +
geom_point(size = 5, shape = 21) +
geom_text(size = 6, hjust = -0.75, vjust = 1) +
scale_colour_manual(values = c("red","blue")) +
scale_fill_manual(values = c(adjustcolor("red", 0.25), adjustcolor("blue", 0.25))) +
scale_x_continuous("Sensitivity\n") +
scale_y_continuous("Specificity") +
my_theme +
guides(colour = FALSE, fill = FALSE)
#caption for cvres
cpt <- paste0("Results of cross-validation. 1. The encoding providing the best sensitivity (AUC = ",
round(mean(p1_dat[p1_dat[, "encoding"] == "1", "AUC"]), 4),
", MCC = ",
round(mean(p1_dat[p1_dat[, "encoding"] == "1", "MCC"]), 4),
"). 2. The encoding providing the best specificity (AUC = ",
round(mean(p1_dat[p1_dat[, "encoding"] == "2", "AUC"]), 4),
", MCC = ",
round(mean(p1_dat[p1_dat[, "encoding"] == "2", "MCC"]), 4),
").")
#cv table for the best encoding (69)
tab <- filter(rep_res, encoding == 69, measure %in% c("AUC", "Sens", "Spec", "MCC")) %>%
group_by(measure) %>%
summarize(m_value = mean(value), sd_value = sd(value)) %>%
droplevels %>%
data.frame
levels(tab[["measure"]]) <- c("AUC", "Sensitivity", "Specificity", "MCC")
rws <- seq(1, nrow(tab) - 1, by = 2)
colnames(tab) <- c("Measure", "Mean", "SD")
col <- rep("\\rowcolor[gray]{0.85}", length(rws))
xtab <- print(xtable(tab, caption = "Performance measures for the best encoding. 60 repetitions of cross-validation.",
label = "tab:perfmeas", digits = 4), include.rownames = FALSE, booktabs = TRUE,
add.to.row = list(pos = as.list(rws), command = col), print.results = FALSE,
caption.placement = "top")
list(plot = p1,
cpt = cpt,
xtab = xtab)
}
|
53df24aaaa3c9282d818eb2206561fe54629d990
|
1f5590d3276d541e8a916bc6d589d6dd98562854
|
/man/display.sym.table.Rd
|
503c84312b3c8b7a7a5e6d6fbf6229e2e0b9de03
|
[] |
no_license
|
Frenchyy1/RSDA
|
0e8c683e79c73c214b3487991ea3d6e2b8f8b621
|
60445a2749d8f009a748a158f89f53d022edb6f0
|
refs/heads/master
| 2020-04-14T14:27:57.888698
| 2018-10-10T19:36:39
| 2018-10-10T19:36:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,018
|
rd
|
display.sym.table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/display.sym.table.R
\name{display.sym.table}
\alias{display.sym.table}
\title{display.sym.table}
\usage{
display.sym.table(sym.data)
}
\arguments{
\item{sym.data}{Shoud be a Symbolic Data table that have been read with read.sym.table(...)}
}
\value{
Not value.
}
\description{
This function display a symbolic data table tha have been read by read.sym.table(...)
}
\details{
The output will be the symbolic data table in the screen.
}
\examples{
data(example3)
display.sym.table(example3)
}
\references{
Billard, L and Diday, E. (2007). Symbolic Data Analysis: Conceptual Statistics and Data
Mining (Wiley Series in Computational Statistics).
Billard, L and Diday, E. (2003). From the Statistics of Data to the Statistics of
Knowledge: Symbolic Data Analysis. Journal of the American of the Statistical Association, USA.
}
\seealso{
read.sym.table
}
\author{
Oldemar Rodriguez Rojas
}
\keyword{Display}
\keyword{Symbolic}
\keyword{Table}
|
fafb9550046c81aee96a6c7fcf0dbe7c0bec34e5
|
e635f968dc35fda610f15836a8e293bf4a7cce81
|
/TheoreticalRSquared.R
|
02677e703d9babb8c28571917045a91c0c1d552d
|
[] |
no_license
|
ggazzola/DBC-RCPI
|
40a0199ea46a11b22df36e693db1c481e46614de
|
4ff6973a37ba82c2cef9b2368b368bc42480bbe4
|
refs/heads/master
| 2022-01-09T01:30:12.135012
| 2019-05-02T15:46:06
| 2019-05-02T15:46:06
| 117,899,884
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,883
|
r
|
TheoreticalRSquared.R
|
TheoXYCorCov = function(beta, betaNoise, CovMat) {
# Returns vector theoXYCorVect of theoretical linear correlation between each of X_1, X_2, ..., X_p and Y,
# vector theoXYCovVect of theoretical covariance between each of X_1, X_2, ..., X_p and Y,
# and theoretical standard deviation of Y
# assuming:
# 1) Y = beta[1]* X_1 + beta[2]*X_2 + ... + beta[p]*X_p + betaNoise*Eps
# 2) all X_j's and Eps are standard normal -- (LOOKS LIKE ALL WE ACTUALLY ASSUME IS VAR(EPS)=1!!)
# 3) Eps is independent of all of X_1, X_2, ..., X_p
# beta is the vector of theoretical regression coefficients
# betaNoise is the theoretical regression coefficient associated to Eps,
# i.e., the variance of the noise component in the RHS is betaNoise^2
# CovMat is the theoretical covariance matrix of X_1, X_2, ..., X_p
p = length(beta)
theoXSumVar = 0
theoXYCovVect = numeric(p)
for(a in 1:p)
for(b in 1:p)
theoXSumVar = theoXSumVar +beta[a]*beta[b]*CovMat[a,b] # variance of the sum of X_1, X_2, ..., X_p
theoYStdev = sqrt(theoXSumVar + betaNoise^2) # standard deviation of Y (betaNoise^2 is the variance of Eps)
for(a in 1:p)
theoXYCovVect[a] = CovMat[a,]%*%beta # Cov(X_a, Y) # since Cov(X_a, Eps) = 0, Cov(X_a, Y) = Cov(X_a, Y-Eps), where Y-Eps is the noise-free output
theoXStdevVect = sqrt(diag(CovMat))
theoXYCorVect = theoXYCovVect/(theoXStdevVect*theoYStdev)
res = list(theoXYCorVect = theoXYCorVect, theoXYCovVect = theoXYCovVect, theoXSumVar = theoXSumVar, theoXStdevVect = theoXStdevVect)
return(res)
}
CalibrateBetaNoise = function(theoXYCovVect, theoXSumVar, theoXStdevVect, CovMat, desiredRsq) {
# returns the value of betaNoise necessary to obtain a theoretical Rsq equal to desiredRsq
# theoXYCovVect, theoXSumVar are obtained from a call to TheoXYCorCov with any betaNoise
# since they are independent of betaNoise itself
# CovMat should be the same as inputted in TheoXYCorCov
precision = 32 # to avoid res to be the square root of a negative number if desiredRsq =1
# in case of numerical micro errors in the calculation of gamma
gamma = t(theoXYCovVect/(theoXStdevVect)^2)%*%solve(CovMat)%*%(theoXYCovVect/(theoXStdevVect)^2)
gamma = round(gamma, precision)
theoXSumVar = round(theoXSumVar, precision)
res = as.numeric(sqrt(gamma/desiredRsq-theoXSumVar))
return(res)
}
TheoRSquared = function(theoXYCorVect, CovMat) {
# returns the theoretical R^2 of linear model
# Y = beta[1]* X_1 + beta[2]*X_2 + ... + beta[p]*X_p + betaNoise*Eps,
# with the same assumption as in TheoCor
# theoXYCorVect and theoXStdevVect are from the output of a TheoXYCorCov call
# CovMat should be the same as inputted in TheoXYCorCov
rsq = as.numeric(t(theoXYCorVect)%*%solve(CovMat)%*%theoXYCorVect)
return(rsq)
}
NoiseLevel = function(beta, CovMat, desiredRsq) {
TheoXYCorCovOut = TheoXYCorCov(beta, betaNoise = 0, CovMat)
theoXYCovVect = TheoXYCorCovOut$theoXYCovVect
theoXSumVar = TheoXYCorCovOut$theoXSumVar
theoXStdevVect = TheoXYCorCovOut$theoXStdevVect
betaNoiseVal = CalibrateBetaNoise(theoXYCovVect, theoXSumVar, theoXStdevVect , CovMat, desiredRsq)
return(betaNoiseVal)
}
#dims = 12
#Cov <- matrix(0, dims, dims)
#Cov[1:4,1:4] = 0.9
#diag(Cov)[] <- 1
#beta = c(5,5,2,0,-5,-5,-2,0, 0, 0, 0, 0)
#desiredRsq = 0.69
#TheoXYCorCovOut = TheoXYCorCov(beta, betaNoise = 0, Cov)
#theoXYCovVect = TheoXYCorCovOut$theoXYCovVect
#theoXSumVar = TheoXYCorCovOut$theoXSumVar
#theoXStdevVect = TheoXYCorCovOut$theoXStdevVect
#betaNoiseVal = CalibrateBetaNoise(theoXYCovVect, theoXSumVar, theoXStdevVect , Cov, desiredRsq)
#TheoXYCorCovOut = TheoXYCorCov(beta, betaNoise = betaNoiseVal, Cov)
#theoXYCorVect = TheoXYCorCovOut$theoXYCorVect
#resultingRsq = TheoRSquared(theoXYCorVect, Cov)
#nPts = 100000
#X = mvrnorm(nPts, rep(0, nrow(Cov)), Sigma = Cov)
#cleanY = X%*%beta
#Y = cleanY + rnorm(nPts, 0, betaNoiseVal)
#(cor(cleanY, Y))^2
|
a545eeda1fb5cedb813b5ea7e8997812bc751400
|
51703d55be207df29decc17441c323be93b8adaf
|
/HW2/Solutions/8.R
|
75b902087850db21882148e3f1988771c3bfb840
|
[] |
no_license
|
Mahbodmajid/DataAnalysis
|
b4ee16f39e91dff0bbeea058e92162f91f28d84c
|
127e59a101d4847171fcb7728db38f4405a10e38
|
refs/heads/master
| 2021-06-10T02:04:52.153255
| 2019-11-25T00:58:18
| 2019-11-25T00:58:18
| 141,756,223
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 256
|
r
|
8.R
|
valid_mob <- mobile %>%
filter(!is.na(battery_mah), !is.na(weight))
ggplot(valid_mob, aes(x = battery_mah, y = weight)) +
geom_point(na.rm = T, size = 0.5)+
xlab("Battery (m A.h)")+
ylab("Weight (g)")
cor(valid_mob$battery_mah, valid_mob$weight)
|
8cbc9fe9faf171c97c4f59600056361c90ca4c98
|
b7cb6c3b387515f1969278137899a158b75b79ae
|
/json/285.r
|
75f044b9702d25ab24c709cc9774326e9ee368c7
|
[] |
no_license
|
rweekly/rweekly.org
|
92d8528cde9336cfcf7dfd307de116c61ac73741
|
719d2bff2e16d716d200561384111655d772f829
|
refs/heads/gh-pages
| 2023-09-03T19:17:18.733983
| 2023-09-01T08:49:24
| 2023-09-01T08:49:24
| 59,336,738
| 676
| 559
| null | 2023-09-14T15:33:23
| 2016-05-21T02:03:54
|
R
|
UTF-8
|
R
| false
| false
| 7,876
|
r
|
285.r
|
[
{
"title": "A Talk About Campaign Finance in Brazil",
"href": "https://feedproxy.google.com/~r/danielmarcelino/~3/qWUMB985JaA/"
},
{
"title": "Applied Statistical Theory: Quantile Regression",
"href": "https://mathewanalytics.com/2015/11/13/applied-statistical-theory-quantile-regression/"
},
{
"title": "Laplace’s liberation army",
"href": "https://gianlubaio.blogspot.com/2012/10/laplaces-liberation-army.html"
},
{
"title": "R/Shiny for clinical trials: simple randomization tables",
"href": "https://talesofr.wordpress.com/2016/06/09/rshiny-for-simple-randomization-tables/"
},
{
"title": "Fix missing dates with R",
"href": "http://plausibel.blogspot.com/2011/09/try-to-do-this-in-stata.html"
},
{
"title": "Chord progressions of 5 000 songs!",
"href": "http://amitkohli.com/chord-progressions-of-5-000-songs/"
},
{
"title": "48 Industries (Dendrogram Ordered) Over 50 Years",
"href": "http://timelyportfolio.blogspot.com/2012/08/48-industries-dendrogram-ordered-over.html"
},
{
"title": "Access attribute_hidden Functions in R Packages",
"href": "http://biostatmatt.com/archives/280"
},
{
"title": "A better way of visualising income distributions with zeroes and negatives",
"href": "http://ellisp.github.io/blog/2015/08/21/visualising-distributions"
},
{
"title": "Sentiment analysis on my girlfriend’s text messages",
"href": "http://amitkohli.com/sentiment-analysis-on-my-girlfriends-text-messages/"
},
{
"title": "Citations in markdown using knitr",
"href": "https://web.archive.org/web/http://www.carlboettiger.info/wordpress/archives/4352"
},
{
"title": "highlight R syntax in wordpress using wp-codebox",
"href": "https://web.archive.org/web/http://www.cwcity.de/fehler/404.php"
},
{
"title": "REIT Momentum in Quantstrat",
"href": "http://timelyportfolio.blogspot.com/2011/06/reit-momentum-in-quantstrat.html"
},
{
"title": "gtrendsR 1.3.3",
"href": "http://dirk.eddelbuettel.com/blog/2016/03/08/"
},
{
"title": "Maps with R, part… n+1",
"href": "http://blog.free.fr/"
},
{
"title": "Cricket fever again",
"href": "http://www.rcasts.com/2011/05/cricket-fever-again.html"
},
{
"title": "Handling required and missing R packages in Microsoft R Services",
"href": "https://tomaztsql.wordpress.com/2016/08/11/handling-required-and-missing-r-packages-in-microsoft-r-services/"
},
{
"title": "Machine Learning: Definition of %Var(y) in R’s randomForest package’s regression method",
"href": "http://hack-r.com/machine-learning-definition-of-vary-in-rs-randomforest-packages-regression-method/"
},
{
"title": "Embed R Code with Syntax Highlighting on your Blog",
"href": "http://www.gettinggeneticsdone.com/2010/09/embed-rstats-code-with-syntax.html"
},
{
"title": "THE FINAL FOUR – Drag Race season 5, episode 11 predictions",
"href": "http://badhessian.org/2013/04/the-final-four-drag-race-season-5-episode-11-predictions/"
},
{
"title": "New R User Group in Chicago",
"href": "https://web.archive.org/web/http://blog.revolution-computing.com/2010/04/new-r-user-group-in-chicago.html"
},
{
"title": "R Recommendation Contest Launches on Kaggle",
"href": "http://www.johnmyleswhite.com/notebook/2010/10/10/r-recommendation-contest-launches-on-kaggle/"
},
{
"title": "Scheduling R Tasks via Windows Task Scheduler",
"href": "https://trinkerrstuff.wordpress.com/2015/02/11/scheduling-r-tasks-via-windows-task-scheduler/"
},
{
"title": "R: parallel processing using multicore package",
"href": "http://www.compbiome.com/2010/04/r-parallel-processing-using-multicore.html"
},
{
"title": "Integrating R with C++: Rcpp, RInside, and RProtobuf",
"href": "https://opensource.googleblog.com/2010/10/integrating-r-with-c-rcpp-rinside-and.html"
},
{
"title": "HIBPwned on CRAN",
"href": "https://itsalocke.com/hibpwned-on-cran/"
},
{
"title": "R 3.0.3 is released",
"href": "https://www.r-statistics.com/2014/03/r-3-0-3-is-released/"
},
{
"title": "Cascading style sheets for R plots (via the Rcssplot package)",
"href": "https://www.r-bloggers.com/cascading-style-sheets-for-r-plots-via-the-rcssplot-package/"
},
{
"title": "Because it’s Friday: How Machines Work",
"href": "http://blog.revolutionanalytics.com/2010/08/because-its-friday-how-machines-work.html"
},
{
"title": "R for Developers course",
"href": "http://www.quantide.com/courses-rdev/"
},
{
"title": "Mebane Faber Tactical Asset Allocation in R",
"href": "http://petewerner.blogspot.com/2012/04/mebane-faber-tactical-asset-allocation.html"
},
{
"title": "Using parameter and multiparameters with sp_execute_external_script",
"href": "https://tomaztsql.wordpress.com/2016/08/01/using-parameter-and-multiparameters-with-sp_execute_external_script/"
},
{
"title": "Exposing R-script as API",
"href": "http://www.dataperspective.info/2015/04/r-as-service.html"
},
{
"title": "Loops, Conditionals and Variables: A Basic Simulation in R",
"href": "http://www.psychwire.co.uk/2011/05/loops-conditionals-and-variables-a-basic-simulation-in-r/"
},
{
"title": "Venn Colors: Happy Bday, John Venn",
"href": "http://data-steve.github.io/venn-colors-happy-bday-john-venn/"
},
{
"title": "A Chart of Recent Comrades Marathon Winners",
"href": "http://www.exegetic.biz/blog/2013/07/a-chart-of-recent-comrades-marathon-winners/"
},
{
"title": "GoodReads: Exploratory data analysis and sentiment analysis (Part 2)",
"href": "http://datascienceplus.com/exploratory-data-analysis-and-sentiment-analysis/"
},
{
"title": "R is now the number two statistical computing program in scholarly use",
"href": "http://www.decisionsciencenews.com/2016/06/20/r-now-number-two-package-scholarly-use/"
},
{
"title": "Data Manipulation with dplyr",
"href": "http://datascienceplus.com/data-manipulation-with-dplyr/"
},
{
"title": "A Grid Search for The Optimal Setting in Feed-Forward Neural Networks",
"href": "https://statcompute.wordpress.com/2013/02/03/a-grid-search-for-the-optimal-setting-in-feed-forward-neural-networks/"
},
{
"title": "Flexmix taks long time…",
"href": "http://sgsong.blogspot.com/2010/07/flexmix-taks-long-time.html"
},
{
"title": "an email exchange about integral representations",
"href": "https://xianblog.wordpress.com/2015/04/08/an-email-exchange-about-integral-representations/"
},
{
"title": "Less than negative?",
"href": "http://the-praise-of-insects.blogspot.com/2010/09/less-than-negative.html"
},
{
"title": "ogdindiar: R package to easily access Open Government Data from India Portal",
"href": "http://justanotherdatablog.blogspot.com/2015/06/ogdindiar-r-package-to-easily-access.html"
},
{
"title": "Continuous Integration with OpenCPU",
"href": "https://www.opencpu.org/posts/continuous-integration-of-R-packages/"
},
{
"title": "Tab completion",
"href": "http://thedatamonkey.blogspot.com/2011/01/tab-completion.html"
},
{
"title": "Hurricanes and Reproducible Research",
"href": "https://web.archive.org/web/http://pirategrunt.com/2013/11/08/hurricanes-and-reproducible-research/"
},
{
"title": "Easy cell statistics for factorial designs",
"href": "https://statmethods.wordpress.com/2011/12/02/easy-cell-statistics-for-factorial-designs/"
},
{
"title": "Units and metadata",
"href": "http://mkao006.blogspot.com/2012/08/units-and-metadata.html"
},
{
"title": "Sabermetrics Meets R Meetup",
"href": "http://princeofslides.blogspot.com/2011/10/sabermetrics-meets-r-meetup.html"
}
]
|
55ddbc34d2ba01b155f6543963aad317eb784403
|
14ea5102d208aad4d1983627f69bcfb65ce954eb
|
/getLiDAR.R
|
5722d18c84e7f7a664b1d0bb2ef76bcaebe88ff0
|
[] |
no_license
|
barnabasharris/rspatial
|
e7d51eb67921cbd0bdff1e6de10121502186892c
|
12467990f8d5c8b6a036d0f1fca8f1e75bd50b0d
|
refs/heads/master
| 2023-01-24T12:03:46.090179
| 2020-12-11T20:17:07
| 2020-12-11T20:17:07
| 294,397,586
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,571
|
r
|
getLiDAR.R
|
getLidar2 <- function(bufferedPoly,
whichProd="LIDAR Tiles DTM",
whichYears,
minSurvey = 2,
userDataDirRoot='tmp',
overwrite=T) {
if (is.null(bufferedPoly$tile50k_name_char)) {
bufferedPoly$tile50k_name_char <- bufferedPoly$layer
}
# define chrome options
eCaps <- list(chromeOptions = list(
args = c(
'--disable-gpu'
,'--headless',
'--window-size=1280,800'
)
))
# rD <- rsDriver(browser = "chrome",
# chromever = "81.0.4044.138",
# extraCapabilities = eCaps,
# port =
# as.integer(base::sample(seq(32768,65535, by=1),1)))
rD <- RSelenium::rsDriver(
browser = "firefox",
extraCapabilities = list(
"moz:firefoxOptions" = list(
args = list('--headless')
)
),
port =
as.integer(base::sample(seq(32768,65535, by=1),1))
)
remDr <- rD[["client"]]
browseQuery <- function(remDr,bufferedPoly) {
bufferedPoly <- st_union(bufferedPoly) %>% st_sf
st_write(bufferedPoly, dsn=paste0('data/temp.shp'),
delete_dsn=T)
simplePoly <- paste0(getwd(),'/data/temp.shp')
simplePolyZip <- paste0(getwd(),'/data/temp.zip')
simplePolyFiles <- paste0(getwd(),'/data/temp*')
# zip shapefiles into archive
system(paste0('zip ',simplePolyZip,' ',simplePolyFiles))
# navigate to the EA lidar portal
remDr$navigate("https://environment.data.gov.uk/DefraDataDownload/?Mode=survey")
# upload zipped shape file
suppressMessages({
try(remDr$findElement("id", "fileid"),TRUE)
})
while (remDr$status == 7) {
Sys.sleep(2)
print('waiting for portal to load...')
suppressMessages({
try(remDr$findElement("id", "fileid"),silent=TRUE)
})
}
webElem <- remDr$findElement("id", "fileid")
webElem$sendKeysToElement(list(simplePolyZip))
print('uploading shape file...')
# wait for upload to complete (2 seconds)
Sys.sleep(5)
# find 'Get Tiles' button
getTiles <- remDr$findElement(using = 'css selector', ".grid-item-container")
# click 'Get Tiles' button
getTiles$clickElement()
# sys.sleep ?
suppressMessages({
try(remDr$findElement(using = 'css selector', '.data-ready-container'),TRUE)
})
i <- 0
while (remDr$status == 7) {
Sys.sleep(5)
print(paste0('waiting for tiles to be returned...'))
suppressMessages({
try(remDr$findElement(using = 'css selector', '.data-ready-container'),TRUE)
})
i <- i + 1
if (i > 12) {
print('error with shape file...')
return('shapefile error')
}
}
print('tiles returned!')
} # / browseQuery
browseResult <- browseQuery(remDr,bufferedPoly)
if (browseResult == 'shapefile error') {
print(browseResult)
return(browseResult)
}
l <- leaflet() %>% addTiles() %>%
addPolygons(data=st_transform(bufferedPoly,4326))
print(l)
print('searching available tiles...')
# select products DTMs
desiredProds <- whichProd
# desiredProds <- "LIDAR Tiles DTM"
# desiredProds <- "LIDAR Point Cloud"
prodElem <- remDr$findElement(using = 'css selector', '#productSelect')
prodList <- unique(prodElem$selectTag()$text)
prodsIndex <- which(prodList %in% desiredProds)
xP <- paste0('//*[@id="productSelect"]/option[',prodsIndex,']')
webElem <- remDr$findElement(using = 'xpath',
value = xP)
webElem$clickElement()
webElem$getElementText()
# check which year available
yrElem <- remDr$findElement(using = 'css selector', '#yearSelect')
yrList <- unique(yrElem$selectTag()$text)
if (desiredProds == "LIDAR Tiles DTM") {
# cycle through years, selecting 1m res and recording tiles names
# x <- 1
tileList <- lapply(1:length(yrList), function(x) {
yr <- yrList[x]
xP <- paste0('//*[@id="yearSelect"]/option[',x,']')
webElem <- remDr$findElement(using = 'xpath',
value = xP)
webElem$clickElement()
# now cycle through res
resElem <- remDr$findElement(using = 'css selector', '#resolutionSelect')
resVec <- unique(resElem$selectTag()$text)
# pick only 1m
if (length(which(resVec == 'DTM 1M')) == 0) {
return(NULL) } else { r <- which(resVec == 'DTM 1M') }
resElem$clickElement() # open drop down
xP <- paste0('//*[@id="resolutionSelect"]/option[',r,']')
webElem <- remDr$findElement(using = 'xpath',
value = xP)
webElem$clickElement() # select 1m res
tileLinks <- remDr$findElement(using = 'css selector', '.data-ready-container')
tileLinks.a <- tileLinks$findChildElements('tag', 'a')
tiles <- unlist(lapply(tileLinks.a, function(x) x$getElementAttribute('href')))
return(tiles)
})
# name list by years
names(tileList) <- yrList
# remove nulls (years with no 1m res)
tileList[unlist(lapply(tileList,is.null))] <- NULL
}
if (desiredProds == "LIDAR Point Cloud") {
# yr <- "2011"
tileList <- lapply(whichYears, function(yr) {
x <- which(yrList==yr)
if (length(x) == 0) {
print(paste0('year ',yr,' not available as LAZ'))
return(NULL)
}
xP <- paste0('//*[@id="yearSelect"]/option[',x,']')
webElem <- remDr$findElement(using = 'xpath',
value = xP)
webElem$clickElement()
tileLinks <- remDr$findElement(using = 'css selector', '.data-ready-container')
tileLinks.a <- tileLinks$findChildElements('tag', 'a')
tiles <- unlist(lapply(tileLinks.a, function(x) x$getElementAttribute('href')))
return(tiles)
})
# name list by years
names(tileList) <- whichYears
tileList[unlist(lapply(tileList, is.null))] <- NULL
}
# extract tile names from download URLs
# x <- names(tileList)[1]
tileNames <- lapply(names(tileList), function(x) {
unlist(lapply(str_split(tileList[[x]],
paste0(x,'-')),function(y) substr(y[2],1,6)))
})
names(tileNames) <- names(tileList)
# convert data to tile names with lists of years
tilesYears <- lapply(unique(unlist(tileNames)), function(tile) {
allYears <- lapply(names(tileNames), function(year) {
if (tile %in% tileNames[[year]]) year
})
allYears[unlist(lapply(allYears,is.null))] <- NULL
return(unlist(allYears))
})
names(tilesYears) <- unique(unlist(tileNames))
# minimun number of years survey 3, remove the rest
if (minSurvey > 0) {
tilesYears[unlist(lapply(tilesYears, function(x) length(x) < minSurvey))] <- NULL
if (length(tilesYears) == 0) {
er <- 'no tiles with sequential surveys found...'
print(er)
return(er)
}
}
allLinks <- as.character(unlist(tileList))
dlLinks <- allLinks[str_detect(allLinks,paste(names(tilesYears),collapse = '|'))]
# output URLs as list for Wget
fileName <- paste0(unique(bufferedPoly$tile50k_name_char,'_list.txt'))
write.table(dlLinks,
file=paste0('wget/',fileName),
quote = F,row.names=F,col.names = F)
print(paste0('written download list to ... wget/',fileName))
# close selenium
remDr$close()
rD$server$stop()
gc()
# create folder structure
folderPath <- paste0(userDataDir,'/',userDataDirRoot,'/',unique(bufferedPoly$tile50k_name_char))
if (!dir.exists(folderPath)) {
dir.create(folderPath)
lapply(unique(unlist(tilesYears)),function(x) dir.create(paste0(folderPath,'/',x)))
}
# overwrite=T
if (overwrite) {
system(paste0('rm ',folderPath,' -R'))
dir.create(folderPath)
lapply(unique(unlist(tilesYears)),function(x) dir.create(paste0(folderPath,'/',x)))
}
# download and uncompress EA lidar with magic!
# x <- 1
system(paste0('cat ',getwd(),'/',paste0('wget/',fileName),' | parallel --gnu ',
shQuote(paste0('wget {} -P ',folderPath))))
# extract to yearly folders
yrs <- unique(unlist(tilesYears))
# x <- yrs[2]
lapply(yrs, function(x) {
zips <- paste0(folderPath,'/',
list.files(folderPath)[grep(paste0("*(",x,").*zip$"),list.files(folderPath))])
if (length(zips) > 0) {
lapply(zips, function(y) {
system(paste0("unzip -n ",y,
' -d ',folderPath,'/',x,'/'))
})
}
})
return(folderPath)
}
|
90d015b94f95197ba78426f8a4e794346c9f15c3
|
26080c27d35e63e7b2ac501f65d3f606806b34a6
|
/man/try_read_xml.Rd
|
ba397aa6e7ba6b04e7857592d3c59dbf7462692b
|
[
"MIT"
] |
permissive
|
lawine90/datagokR
|
1fb953a1a2ef91ee0300a3787c0903e3acf9695f
|
b4026238ab7b307c9d079f117c9412f3bbd12985
|
refs/heads/master
| 2021-07-03T23:33:50.207804
| 2020-09-23T07:46:11
| 2020-09-23T07:46:11
| 180,745,810
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 430
|
rd
|
try_read_xml.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/try_read_xml.R
\name{try_read_xml}
\alias{try_read_xml}
\title{Try xml2::read_xml function at least 5 times If got errors, save error message.}
\usage{
try_read_xml(url, times = 5)
}
\arguments{
\item{url}{url want to read.}
\item{times}{trying times.}
}
\description{
Try xml2::read_xml function at least 5 times If got errors, save error message.
}
|
6e0c208fab8f8b718865282dea3879c904bc6ee9
|
649bac45381b4bd84d39d6991327cfd056153b36
|
/Visulation.R
|
d4e622ec0f73a730a4adad2d3bae25930c292adf
|
[
"Apache-2.0"
] |
permissive
|
basakstuff/UFC-Machine-Learning-Project
|
3b862a6a23a06703223afdf0eb35ffd4264225dc
|
6a983994f9b32359d50b47c9fd6927444625d245
|
refs/heads/main
| 2023-06-11T05:53:48.640409
| 2021-06-30T14:46:04
| 2021-06-30T14:46:04
| 327,935,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,369
|
r
|
Visulation.R
|
load.libraries <- c('data.table', 'testthat', 'gridExtra', 'corrplot', 'GGally',
'ggplot2', 'e1071', 'dplyr', 'Hmisc', 'tidyverse', 'funModeling',
'plotly','psych','rattle','caret','tree', 'rpart','magrittr',
'class','formattable','randomForest')
install.lib <- load.libraries[!load.libraries %in% installed.packages()]
for(libs in install.lib) install.packages(libs, dependences = TRUE)
sapply(load.libraries, require, character = TRUE)
data <- read.csv("data.csv")
setDT(data)
plot_Missing <- function(data_in, title = NULL){
temp_df <- as.data.frame(ifelse(is.na(data_in), 0, 1))
temp_df <- temp_df[,order(colSums(temp_df))]
data_temp <- expand.grid(list(x = 1:nrow(temp_df), y = colnames(temp_df)))
data_temp$m <- as.vector(as.matrix(temp_df))
data_temp <- data.frame(x = unlist(data_temp$x), y = unlist(data_temp$y), m = unlist(data_temp$m))
ggplot(data_temp) + geom_tile(aes(x=x, y=y, fill=factor(m))) + scale_fill_manual(values=c("white", "black"), name="Missing\n(0=Yes, 1=No)") + theme_light() + ylab("") + xlab("") + ggtitle(title)
}
cat('Data has', dim(data)[1], 'rows and', dim(data)[2], 'columns.')
str(data)
cat_var <- names(data)[which(sapply(data, is.character))] #kategorik
numeric_var <- names(data)[which(sapply(data, is.numeric))] #numeric
colSums(sapply(data, is.na))
colSums(sapply(data[,.SD, .SDcols = cat_var], is.na)) #kategorikte null kontrolu
colSums(sapply(data[,.SD, .SDcols = numeric_var], is.na)) #numericte null kontrolu
plot_Missing(data)
#############################
ufc_data <- data %>%
select(date, Winner, title_bout, weight_class,B_fighter, B_Height_cms, B_Reach_cms, B_age, B_current_lose_streak, B_current_win_streak,B_longest_win_streak, B_losses,B_wins,B_total_rounds_fought, B_total_title_bouts,B_win_by_KO.TKO,B_win_by_Submission, B_win_by_Decision_Majority,B_win_by_Decision_Split,B_win_by_Decision_Unanimous,B_win_by_TKO_Doctor_Stoppage,
R_fighter, R_Height_cms, R_Reach_cms, R_age,
R_current_lose_streak, R_current_win_streak,R_longest_win_streak, R_losses,R_wins,R_total_rounds_fought,
R_total_title_bouts,R_win_by_KO.TKO,R_win_by_Submission,
R_win_by_Decision_Majority,R_win_by_Decision_Split,R_win_by_Decision_Unanimous,R_win_by_TKO_Doctor_Stoppage)
ufc_data <- subset.data.frame(ufc_data, subset= date >= "2010-01-01")
dim(ufc_data)
############################
cat_ufc <- names(ufc_data)[which(sapply(ufc_data, is.character))] #kategorik
numeric_ufc <- names(ufc_data)[which(sapply(ufc_data, is.numeric))] #numeric
colSums(sapply(ufc_data, is.na))
plot_Missing(ufc_data)
################################ delete missing
ufc_data <- na.omit(ufc_data)
plot_Missing(ufc_data)
dim(ufc_data)
colnames(ufc_data)
write.csv(ufc_data,"ufc_data.csv", row.names = FALSE)
# ************************************************
# perform_PCA()
# ************************************************
perform_PCA <- function(df) {
# PCA and Plotting Component Variance
df.prc = prcomp(df, center = TRUE,scale = TRUE)
# Variance
variance = df.prc$sdev ^ 2
# Kaiser Criterion
pca_vars = variance[variance >= 1]
number_of_PCAs = length(pca_vars)
#Scree Plot
screeplot(df.prc,type = "line",main = "PCA: Scree Plot")
#Varimax Rotation
df.varimax_rotation = varimax(df.prc$rotation)
test = data.frame(unclass(df.varimax_rotation$loadings))
test = cbind(rownames(test),test)
row.names(test)<-1:nrow(test)
colnames_test = names(test)
colnames_test = colnames_test[2:number_of_PCAs]
selected_variables = c()
for(i in colnames_test){
for (k in 1:nrow(test)){
if (test[k,i]>0.2 | test[k,i]<=-0.2){
selected_variables <- c(selected_variables,paste(test[k,1]))
}
}
}
return(selected_variables)
}
df <- read.csv("ufc_data.csv")
df <- subset(df, select=-c(R_fighter,B_fighter, date))
df$title_bout <- as.numeric(factor(df$title_bout))
df$weight_class <- as.numeric(factor(df$weight_class))
df$Winner<-factor(df$Winner)
PCA_cols <- perform_PCA(df[,-1])
normalised <- function(x) {return((x - min(x,rm.na=TRUE))/(max(x,rm.na=TRUE)-min(x,rm.na=TRUE)))}
UFC_PCA<-data.frame(df[1],df[,PCA_cols]) # concat 1st column
cat("[1] Principle Components: \n", PCA_cols,"\n",sep =" | ")
write.csv(UFC_PCA,"UFC_PCA.csv", row.names = FALSE)
### Visualization of numeric column information
plot_num(ufc_data)
### Histogram for Blue Fighter Reach Difference
ggplot(ufc_data, aes(x=(B_Reach_cms - R_Reach_cms), y =..density.., fill=..count..)) + geom_histogram(binwidth = 5, ) + labs(x = "Reach Difference (cm)", title = "Histogram for Blue Fighter Reach Difference") + scale_fill_gradient("Count", low="#87CEEB", high="#00008B")
### Histogram for Red Fighter Reach Difference
ggplot(ufc_data, aes(x=(R_Reach_cms - B_Reach_cms), y =..density.., fill=..count..)) + geom_histogram(binwidth = 5, ) + labs(x = "Reach Difference (cm)", title = "Histogram for Red Fighter Reach Difference") + scale_fill_gradient("Count", low="#FFA07A", high="#FF0000")
### Barplot of Blue Fighter Age
ggplot(ufc_data, aes(x = B_age)) + geom_bar(fill = "#0000FF") #B_age
### Barplot of Blue Fighter Age
ggplot(ufc_data, aes(x = R_age)) + geom_bar(fill = "#FF0000") #R_age
### List of Blue fighter's winning average
temp <- ufc_data %>% select(B_fighter,B_wins)
temp <- temp %>% group_by(B_fighter) %>% summarise(avg=mean(B_wins))
temp <- temp %>% arrange(desc(avg))
temp <- temp[1:10,]
temp %>%
formattable(list(avg = color_bar("#85C1E9")), align = 'l')
### List of Red fighter's winning average
temp <- ufc_data %>% select(R_fighter,R_wins)
temp <- temp %>% group_by(R_fighter) %>% summarise(avg=mean(R_wins))
temp <- temp %>% arrange(desc(avg))
temp <- temp[1:10,]
temp %>%
formattable(list(avg = color_bar("#FF0000")), align = 'l')
##The winning Blue fighter according to weight_class
ufc_data %>% filter(Winner == "Blue") %>% count(weight_class) #weight_class'a göre kazanan blue
##The winning Red fighter according to weight_class
ufc_data %>% filter(Winner == "Red") %>% count(weight_class) #weight_class'a göre kazanan red
### Splitting columns containing numeric data
numeric_data <- select_if(ufc_data, is.numeric)
### Correlation Matrix
cor_data <- cor(numeric_data)
corrplot(cor_data, method = "color", type = "upper", tl.col = "black", tl.cex = 0.75, order="hclust", tl.col.cex=0.30)
### Pie chart showing the winning fighter
custom_col <- c("blue", "green", "red")
ggplot(ufc_data, aes(x = "", y="", fill = factor(Winner))) +
geom_bar(width = 1, stat = "identity") +
theme(axis.line = element_blank(),
plot.title = element_text(hjust=0.5,size=22)) +
labs(fill="Winner",
x=NULL,
y=NULL,
title="Pie Chart of Winners") + coord_polar(theta = "y", start=0)+
scale_fill_manual(values=custom_col)
################
t <- list(family = "sans-serif",size = 14,color = 'black') # Text style
m <- list(l = 8,r = 8,b = 35,t = 35,pad =1) # Magins
################
### Weight Class Donut
is.num <- sapply(ufc_data, is.numeric) # Format to 3 Decimal Points
ufc_data [is.num] <- lapply(ufc_data [is.num], round, 3)
weight_class <- ufc_data$weight_class
weight_class <- as.data.frame(table(ufc_data$weight_class)) # frequency
t2 <- list(family = "sans-serif",size = 16,color = 'black') # Text style
m2 <- list(l = 50,r = 50,b = 100,t = 100,pad = 4) # Magins
b <- plot_ly(weight_class, labels = ~Var1, values = ~Freq)%>%add_pie(hole = 0.6) %>%
layout(title = "UFC Weight Class 2010 - 2019",
showlegend = T,
xaxis = list(showgrid = FALSE, zeroline = FALSE,showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
font = t,
margin = m
)
print(b)
# *********************** #
# Events vs Years BarChart
# Extract Year
yearsList <- c()
for(date in data[,]$date){
date <- strsplit(date,"-") # split by -
date <- date[[1]][1] # get date
yearsList <- c(yearsList,date)
}
yearsDF <- data.frame(yearsList)
yearsDF <- as.data.frame(table(yearsDF)) # frequency
names(yearsDF) <- c("year", "count")
x4 = yearsDF$year
y4 = yearsDF$count
t4 <- list(family = "sans-serif",size = 14,color = 'Black') # Text style
m4 <- list(l = 50,r = 50,b = 100,t = 100,pad = 4) # Magins
bar_color <- rep("#3caef2",27)
bar_color[22] <- '#07466c'
d <- plot_ly(yearsDF, x = ~x4, y = ~y4, type = 'bar',text=y4, textposition="auto",
marker = list(color = bar_color)) %>%
layout(title = "Number of UFC Matches Over Years",
xaxis = list(title = "Year"),
yaxis = list(title = "No. Of matches"),
font = t,
margin = m)
print(d)
# *********************** #
# Density plots
fighter_measures <- data.frame(
"height" = c(ufc_data$B_Height_cms, ufc_data$B_Height_cms),
"reach" = c(ufc_data$B_Reach_cms, ufc_data$R_Reach_cms),
"age" = c(ufc_data$B_age, ufc_data$R_age))
fighter_measures <- na.omit(fighter_measures)
p1 <- ggplot(fighter_measures, aes(x=age))+
geom_density(color="darkblue", fill="lightblue")
p2 <- ggplot(fighter_measures, aes(x=height))+
geom_density(color="darkblue", fill="lightblue")
p3 <- ggplot(fighter_measures, aes(x=reach))+
geom_density(color="darkblue", fill="lightblue")
grid.arrange(p1, p2, p3, nrow=3)
print("~~ VISUALISATION ENDED:")
|
ebec70e83d9b93d85cd0f0155fa4089c6893b523
|
2036e35b23e6f77296a11523360a65a6e8252580
|
/scripts/04_aoh_bat_spatial_data.R
|
066b7f2d87494150762a355138457157a25e1447
|
[
"CC0-1.0",
"MIT"
] |
permissive
|
ecohealthalliance/sars_cov_risk
|
6ee585cf788daf068226eb3efd78ea7dac4431ec
|
9512e010d90502b96d86907e19943fe8bdcb5b7b
|
refs/heads/main
| 2023-04-09T09:18:29.347436
| 2022-05-11T17:44:10
| 2022-05-11T17:44:10
| 398,210,464
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,898
|
r
|
04_aoh_bat_spatial_data.R
|
# code to create several spatial files that will be use in AOH analyses
source(here("R/prepSEAfiles.R"))
# habitat types global map
# the 1km res map is easier/faster to work with
# also the authors recommend working at the coarsened resolution
hab.ras <- raster(here("data-raw/iucn_habitatclassification_composite_1km_ver001/iucn_habitatclassification_composite_1km_lvl2_ver001.tif"))
# carbonate rock outcrops as sf file
# cave habitats are suitable for many bat species
# Jung et al. doesn't include cave data, so we'll use carbonate rock as a proxy
karst.sf <- st_read(here("data-raw/karst_wgs.shp"))
# countries of interest
SEAcountries <- c("Bangladesh", "Bhutan", "Brunei", "Cambodia", "China",
"Hong Kong", "India", "Indonesia", "Laos", "Macao",
"Malaysia", "Myanmar", "Nepal", "Philippines", "Singapore",
"Sri Lanka", "Taiwan", "Thailand", "East Timor", "Vietnam")
# get necessary habitat, elevation files set up---------------------------------
preppedFiles <- prepSEAfiles(continent = "asia", countryNames = SEAcountries,
habitatMap = hab.ras, karst.sf = karst.sf)
# save all the outputs
# 1. shapefile of country boundaries
writeOGR(preppedFiles[[1]], dsn = here("data/"), layer = "SEA",
driver = "ESRI Shapefile", overwrite_layer = TRUE)
# 2a. habitat raster cropped to countries of interest
writeRaster(preppedFiles[[2]], here("data/SEAhabitat.tif"), datatype = "INT2U",
overwrite = TRUE)
# 2b. habitat raster with karst added, cropped to countries of interest
writeRaster(preppedFiles[[3]], here("data/SEAhabitatKarst.tif"),
datatype = "INT2U", overwrite = TRUE)
# 3. elevation raster
# note that no elevation data were available for Singapore
writeRaster(preppedFiles[[4]], here("data/SEAelevation.tif"),
datatype = "INT2S", overwrite = TRUE)
|
fb3e3b9722b7fac295dc949b34ade054e7fbe115
|
7f873c96ced1c3b57c378102febbdcda3bc8e0c4
|
/statistics_economics/examples/contingency_table.R
|
cbd33136fab065c69b6cf447a09042ff1b7626cd
|
[] |
no_license
|
IanMadlenya/teaching
|
ec71610301c2f23f4e38b0e73c7f052edd71c984
|
6ca5a296b07b5b42e80ff56070edd4d72a561a8f
|
refs/heads/master
| 2020-03-14T03:32:16.376964
| 2018-04-25T14:09:56
| 2018-04-25T14:09:56
| 131,422,094
| 1
| 0
| null | 2018-04-28T15:38:38
| 2018-04-28T15:38:37
| null |
UTF-8
|
R
| false
| false
| 932
|
r
|
contingency_table.R
|
## Example contingency table
# First generate some data simulating the relation between revolutions following military defeat
set.seed(42)
R<-rgamma(200,1,7) # Baseline risk of revolution
defeat<-rbinom(200,1,prob=.12) # Probability of military defeat
R<-R+.4*defeat+rnorm(200,.01,.001) # Adjust probability of revolution
revolution<-ifelse(R>.5,1,0) # Dummy variable for revolution
# Look at cross-tabulation
#table(defeat);table(revolution)
table(defeat,revolution)
# Can calcute probability
7/(168+7) # Revolution when there was no defeat
12/(13+12) # Revolution when there was military defeat
# Save table and look at proportions
DR<-table(defeat,revolution)
prop.table(DR)
prop.table(DR,1) # Row as margin
prop.table(DR,2) # Column as margin
# Can very easily conduct a Chi-square test using 'summary'
#summary(DR)
# Produce a mosaicplot
#plot(DR,main="Proportions",col=c("grey90","grey60"))
|
1b1df2422338184c58205b51490870343c2616ec
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/hamlet/examples/mem.plotresid.Rd.R
|
69b75d95f15c563f5c4ec8f4446656abfef99d8f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 721
|
r
|
mem.plotresid.Rd.R
|
library(hamlet)
### Name: mem.plotresid
### Title: Plot residuals of a mixed-effects model along with trend lines
### Aliases: mem.plotresid
### Keywords: regression
### ** Examples
data(vcaplong)
exdat <- vcaplong[vcaplong[,"Group"] %in% c("Vehicle", "ARN"),]
library(lme4)
f0 <- lmer(log2PSA ~ 1 + DrugWeek + (1 + DrugWeek|ID), data = exdat)
f1 <- lmer(log2PSA ~ 1 + DrugWeek + DrugWeek:ARN + (1 + DrugWeek|ID), data = exdat)
f2 <- lmer(log2PSA ~ 1 + DrugWeek + DrugWeek:ARN + (1|ID) + (0 + DrugWeek|ID), data = exdat)
f3 <- lmer(log2PSA ~ 1 + DrugWeek + DrugWeek:ARN + (1|Submatch) + (0 + DrugWeek|ID), data = exdat)
par(mfrow=c(2,2))
mem.plotresid(f0)
mem.plotresid(f1)
mem.plotresid(f2)
mem.plotresid(f3)
|
b21f704c4737995e572feadd0b82cc16da15262e
|
e4b9f3d8f5f45eeea21591455b5de77c1454548d
|
/teste1b.R
|
f66828df21267e494678e4431118197f34160233
|
[] |
no_license
|
JamesJnJ/analisededadosExerc01
|
1b11c1ea8dd1598f7b900cc11e230bbbc11a1066
|
0412bcf9d6fe911238c4c48a8932ebb12c8c26d4
|
refs/heads/master
| 2020-04-25T07:40:45.223481
| 2019-02-26T23:00:05
| 2019-02-26T23:00:05
| 172,621,601
| 0
| 0
| null | 2019-02-26T02:22:54
| 2019-02-26T02:22:53
| null |
UTF-8
|
R
| false
| false
| 3,016
|
r
|
teste1b.R
|
########################################
# Teste 1B
# Nome(s): James Andrade Moreno Jr e Renann Camargo
########################################
#ATENÇÃO: Você precisa fazer o download do arquivo chustomer_churn.csv e
# deixá-lo na mesma pasta que o arquivo teste1b.R
# Depois, Fornecer o caminho completo até o arquivo csv.
# Exemplos:
# -Windows:
# "C:/Users/Andre/Downloads/customer_churn.csv"
# -Ubuntu
# "/home/andre/Downloads/customer_churn.csv"
# -Mac OS
# "/Users/andre/Downloads/customer_churn.csv"
customer_churn = read.table("customer_churn.csv", sep=",", header = TRUE, stringsAsFactors= FALSE)
########################################
# Item 1 (0.5 ponto)
########################################
customer_churn #7500 clientes
customer_churn <- unique(customer_churn)
customer_churn #7043 clientes exclusivos
########################################
# Item 2 (0.5 ponto)
########################################
customer_churn$Partner <- as.logical(customer_churn$Partner == "Yes")
customer_churn$Dependents <- as.logical(customer_churn$Dependents == "Yes")
customer_churn$Churn <- as.logical(customer_churn$Churn == "Yes")
customer_churn
########################################
# Item 3 (0.5 ponto)
########################################
#max_tenure <- #salvar resultado nessa variável
########################################
# Item 4 (1.0 ponto)
########################################
#max_tenure_50 <- #salvar resultado nessa variável
########################################
# Item 5 (1.0 ponto)
########################################
#min_tenure_mtm <- #salvar resultado nessa variável
########################################
# Item 6a (1.0 ponto)
########################################
#total_mtm <- #salvar resultado nessa variável
#total_year <- #salvar resultado nessa variável
#total_two_year <- #salvar resultado nessa variável
########################################
# Item 6b (0.5 ponto)
########################################
#regular_customers <- #salvar resultado nessa variável
########################################
# Item 7a (0.5 ponto)
########################################
#customers_with_dependents <- #salvar resultado nessa variável
########################################
# Item 7b (0.5 ponto)
########################################
#customers_mtm <- #salvar resultado nessa variável
#customers_year <- #salvar resultado nessa variável
#customers_two_year <- #salvar resultado nessa variável
########################################
# Item 7c (0.5 ponto)
########################################
#customers_two_years <- #salvar resultado nessa variável
########################################
# Item 7d (0.5 ponto)
########################################
#accumulated_discount <- #salvar resultado nessa variável
|
b1fbf21e9ba6dc2dfaaa54231db25e0be9396494
|
9f0c7a45e1dae89638c46bf241f4a8ddf4a023d6
|
/task/analysis/CompareAlgorithms.R
|
a21a8d388994e949f1b89600201c6cd8e792616c
|
[] |
no_license
|
danielbrianbennett/jsbandit
|
953b0738ff8496a162735086785dc821ee8028e1
|
c1fc5ede9a27cb4c1659e29176ae17df822177e5
|
refs/heads/master
| 2021-01-10T12:47:40.075416
| 2018-09-14T16:54:16
| 2018-09-14T16:54:16
| 49,531,899
| 0
| 0
| null | 2016-01-22T03:41:03
| 2016-01-12T22:05:04
|
JavaScript
|
UTF-8
|
R
| false
| false
| 11,645
|
r
|
CompareAlgorithms.R
|
# load relevant packages
library(ggplot2)
library(boot)
# clear workspace
rm(list = ls())
# set version
version <- "v2point2" # either v2point2, v3, or v4
fileDir <- "~/Documents/Git/jsbandit/task/data/"
# load file
filename <- paste0(fileDir, "banditData_", version, ".RData")
load(filename)
# load list of filtered IDs
filename <- paste0(fileDir, "filteredIDs_", version, ".RData")
load(filename)
# retain only participants with an ID in the white-list
sorted.data <- sorted.data[sorted.data$ID %in% filtered.IDs,]
nParticipants <- length(filtered.IDs)
# calculate cumulative winnings
nSims <- 1000
winnings <- array(data = NA, dim = c(3,30,nParticipants))
winnings_random <- array(data = NA, dim = c(3,30,nParticipants))
winnings_perfect <- array(data = NA, dim = c(3,30,nParticipants))
for (i in 1:nParticipants){
winnings[,,i] <- t(matrix(sorted.data[sorted.data$ID == filtered.IDs[i],]$pointsWon, nrow = 30, ncol = 3))
payoffs <- unsorted.data[unsorted.data$ID == filtered.IDs[i],]$payoffs
payoffs <- payoffs[which(nchar(payoffs) == max(nchar(payoffs)))]
payoffs <- scan(text = gsub("[^0-9,]", "", payoffs),sep=",")
payoffs <- aperm(array(data = payoffs, dim = c(30,3,4)), perm = c(3,1,2))
# perfect knowledge
bestOption <- matrix(data = NA,nrow = 3, ncol = 30)
for (j in 1:3){
for (k in 1:30){
bestOption[j,k] <- max(payoffs[,k,j])
}
}
winnings_perfect[,,i] <- bestOption
# random choices
randomTotal <- array(data = NA,dim = c(3,30,nSims))
for (sim in 1:nSims){
randomOption <- matrix(data = NA,nrow = 3, ncol = 30)
for (j in 1:3){
for (k in 1:30){
randomOption[j,k] <- (payoffs[sample(1:4,1),k,j])
}
}
randomTotal[,,sim] <- randomOption
}
winnings_random[,,i] <- apply(randomTotal, c(1,2), mean)
}
winningsAsProportion <- colMeans(colMeans(winnings))/colMeans(colMeans(winnings_random))
meanWinnings_random <- rowMeans(colMeans(winnings_random))
ciWinnings_random <- 1.96 * apply(colMeans(winnings_random),c(1), sd) / sqrt(nParticipants)
meanWinnings_perfect <- rowMeans(colMeans(winnings_perfect))
ciWinnings_perfect <- 1.96 *apply(colMeans(winnings_perfect),c(1), sd) / sqrt(nParticipants)
meanWinnings <- rowMeans(colMeans(winnings))
ciWinnings <- 1.96 *apply(colMeans(winnings),c(1), sd) / sqrt(nParticipants)
## make plot 1
df <- data.frame(meanPoints = c(meanWinnings_random, meanWinnings, meanWinnings_perfect),
ciPoints = c(ciWinnings_random, ciWinnings, ciWinnings_perfect),
algorithm = c(rep("Random",30), rep("Behaviour", 30), rep("Maximum", 30)),
trialNo = rep(1:30,3))
cols <- c("Random" = "#000000", "Behaviour" = "#0059b3", "Maximum" = "#000000")
p1 <- ggplot(data = df, aes(x = trialNo, y = meanPoints, group = algorithm)) +
geom_ribbon(aes(ymin = meanPoints - ciPoints, ymax = meanPoints + ciPoints),
fill = "grey", alpha = 0.6) +
geom_line(aes(color = algorithm, linetype = algorithm), size = 1) +
geom_point(aes(color = algorithm, shape = algorithm),size = 3, fill = "#235ce1") +
labs(x = "\nTrial number", y = "Points per trial\n")
p1 + scale_color_manual(values = cols, labels = c("Behaviour","Omniscient choices", "Random choices") ) +
scale_linetype_manual(values = c("solid","solid","dotted"), labels = c("Behaviour","Omniscient choices", "Random choices") ) +
scale_shape_manual(values = c(21,32,32), labels = c("Behaviour","Omniscient choices", "Random choices") ) +
guides(color = guide_legend(nrow = 3)) +
theme(panel.grid = element_blank(),
panel.background = element_rect(fill = "white"),
axis.line = element_line(color = "black", size = 0.3),
axis.title = element_text(size = 28),
axis.text = element_text (size = 20),
legend.title = element_blank(),
legend.text = element_text(size = 24),
legend.position = c(.5, .15),
legend.key.size = unit(2, 'lines'),
plot.margin = margin(1, 1, 1, 1, "cm")) +
scale_x_continuous(breaks = c(5,10,15,20,25,30), expand = c(0,0), limits = c(0,31)) +
scale_y_continuous(breaks = c(30, 40, 50, 60, 70, 80), expand = c(0,0), limits = c(30,90))
## make plot 2
blockMeans <- apply(winnings,c(1,3),mean)
overallBlockMean <- apply(blockMeans,1,mean)
overallBlockCI <- 1.96 * apply(blockMeans,1,sd) / sqrt(nParticipants)
df <- data.frame(meanPoints = overallBlockMean,
ciPoints = overallBlockCI,
block = c("Block 1","Block 2","Block 3"))
p2 <- ggplot(data = df, aes(x = block, y = meanPoints)) +
geom_col(width = 0.6, fill = "white",colour = "black", size = 1.5) +
geom_errorbar(aes(y = meanPoints, ymin = meanPoints - ciPoints, ymax = meanPoints + ciPoints, width = 0.2)) +
labs(x = "\nBlock", y = "Points per trial\n")
p2 + theme(panel.grid = element_blank(),
panel.background = element_rect(fill = "white"),
axis.line = element_line(color = "black", size = 0.3),
axis.title = element_text(size = 28),
axis.text = element_text (size = 20)) +
scale_y_continuous(breaks = c(30, 40, 50, 60, 70, 80), expand = c(0,0)) +
coord_cartesian(ylim = c(30,90))
# make plot 3
p3 <- ggplot(data.frame(winningsAsProportion),aes(y = winningsAsProportion, x = 1))
p3 + geom_violin(adjust = 1, color = "#0059b3",trim = TRUE, size = 1.5, width = 1.2) +
geom_jitter(height = .02, width = .03,color = "#0059b3") +
ylim(.4,2) +
labs(x = "\n", y = "Total points ratio\n") +
theme(panel.grid = element_blank(),
panel.background = element_rect(fill = "white"),
axis.line = element_line(color = "black", size = 0.3),
axis.title = element_text(size = 28),
axis.text.y = element_text(size = 20),
axis.ticks.x = element_blank(),
axis.text.x = element_blank()) +
geom_hline(yintercept = 1,linetype = "dotted",size = 1)
# retrieve only data from 5 trials before to 7 trials after a change
proximal.data <- subset(sorted.data, sorted.data$changeLag < 11 & sorted.data$changeLag > -11)
# aggregate choice proportions by lag number across participants
choice.by.lag.short <- aggregate(proximal.data$filledChosen, by = list(proximal.data$changeLag, proximal.data$ID), FUN = mean)
# get mean and sd choice proportion by lag number
mean.choice.prop.short <- tapply(choice.by.lag.short$x, choice.by.lag.short$Group.1, FUN = mean)
lower.ci.choice.prop.short <- vector(mode = "logical", length = dim(mean.choice.prop.short))
upper.ci.choice.prop.short <- vector(mode = "logical", length = dim(mean.choice.prop.short))
boot.fun <- function(data, indices){
return(mean(d[indices]))
}
indices <- c(-10:-2, 1:10)
for (i in 1:length(indices)){
print(i)
d <- choice.by.lag.short[choice.by.lag.short$Group.1 == indices[i],]$x
results <- boot(data = d,statistic = boot.fun, R = 1000)
results.95.ci <- boot.ci(results)
if (indices[i] > -2){
putLoc <- i + 1
} else{
putLoc <- i
}
lower.ci.choice.prop.short[putLoc] = results.95.ci$normal[2]
upper.ci.choice.prop.short[putLoc] = results.95.ci$normal[3]
}
choice.prop.short <- data.frame(mean.choice.prop.short, lower.ci.choice.prop.short,upper.ci.choice.prop.short)
plotLabs <- as.numeric(rownames(choice.prop.short))
plotLocs <- plotLabs
plotLocs[plotLocs < 0] <- plotLocs[plotLocs < 0] + 1
choice.prop.short <- data.frame(plotLocs,plotLabs,mean.choice.prop.short, lower.ci.choice.prop.short,upper.ci.choice.prop.short)
# create short plot
p4 <- ggplot(choice.prop.short,
aes(x = plotLocs, y = mean.choice.prop.short)) +
geom_ribbon(aes(ymin = lower.ci.choice.prop.short, ymax = upper.ci.choice.prop.short),
colour = "white",fill = "gray", alpha = 0.7) +
geom_line(size = 2) +
geom_point(size = 2.5, shape = 21, fill = "white") +
labs(x = "\nChange lag", y = "Novel choice proportion\n") +
scale_x_continuous(breaks = c(-9, -4, 0.5, 5, 10), labels = c(-10, -5, 0, 5, 10)) +
scale_y_continuous(expand = c(0,0), limits = c(0,0.5)) +
geom_vline(xintercept = 0.5, lty = "dotted", size = 1) +
theme(panel.grid = element_blank(),
panel.background = element_rect(fill = "white"),
axis.line = element_line(color = "black", size = 0.3),
axis.title = element_text(size = 28),
axis.text = element_text (size = 20),
legend.title = element_blank(),
legend.text = element_text(size = 24),
plot.margin = margin(1, 1, 1, 1, "cm"))
# build short plot
p4
# get mean and sd choice proportion by lag number and block number
choice.by.lag.short <- aggregate(proximal.data$filledChosen, by = list(proximal.data$changeLag, proximal.data$block, proximal.data$ID), FUN = mean)
mean.choice.prop.short <- matrix(data = 0, nrow = 3, ncol = 20)
lower.ci.choice.prop.short <- matrix(data = 0, nrow = 3, ncol = 20)
upper.ci.choice.prop.short <- matrix(data = 0, nrow = 3, ncol = 20)
boot.fun <- function(data, indices){
return(mean(d[indices]))
}
indices <- c(-10:-2, 1:10)
for (j in 1:3){
temp.data <- subset(choice.by.lag.short, choice.by.lag.short$Group.2 == j)
mean.choice.prop.short[j,] <- tapply(temp.data$x, temp.data$Group.1, FUN = mean)
for (i in 1:length(indices)){
print(i)
d <- temp.data[temp.data$Group.1 == indices[i],]$x
results <- boot(data = d,statistic = boot.fun, R = 1000)
results.95.ci <- boot.ci(results)
if (indices[i] > -2){
putLoc <- i + 1
} else{
putLoc <- i
}
lower.ci.choice.prop.short[j,putLoc] = results.95.ci$normal[2]
upper.ci.choice.prop.short[j,putLoc] = results.95.ci$normal[3]
}
}
mean.choice.prop.short <- c(mean.choice.prop.short[1,],mean.choice.prop.short[2,],mean.choice.prop.short[3,])
lower.ci.choice.prop.short <- c(lower.ci.choice.prop.short[1,],lower.ci.choice.prop.short[2,],lower.ci.choice.prop.short[3,])
upper.ci.choice.prop.short <- c(upper.ci.choice.prop.short[1,],upper.ci.choice.prop.short[2,],upper.ci.choice.prop.short[3,])
block.number <- c(rep(1,20), rep(2,20),rep(3,20))
choice.prop.short <- data.frame(mean.choice.prop.short, lower.ci.choice.prop.short,upper.ci.choice.prop.short,block.number)
plotLabs <- rep(c(-10:-1,1:10),3)
plotLocs <- plotLabs
plotLocs[plotLocs < 0] <- plotLocs[plotLocs < 0] + 1
# create short plot
cols <- c("1" = "#003e7d", "2" = "#0059b3", "3" = "#b2cde8")
p5 <- ggplot(choice.prop.short,
aes(x = plotLocs, y = mean.choice.prop.short, colour = factor(block.number))) +
# geom_ribbon(aes(ymin = lower.ci.choice.prop.short, ymax = upper.ci.choice.prop.short, group = block.number),
# colour = "gray",fill = "gray", alpha = 0.3) +
geom_line(size = 2) +
geom_point(size = 2.5, shape = 21, fill = "white") +
labs(x = "\nChange lag", y = "Novel choice proportion\n") +
scale_x_continuous(breaks = c(-9, -4, 0.5, 5, 10), labels = c(-10, -5, 0, 5, 10)) +
scale_y_continuous(expand = c(0,0), limits = c(0,0.5)) +
geom_vline(xintercept = 0.5, lty = "dotted", size = 1) +
theme(panel.grid = element_blank(),
panel.background = element_rect(fill = "white"),
axis.line = element_line(color = "black", size = 0.3),
axis.title = element_text(size = 28),
axis.text = element_text (size = 20),
legend.title = element_blank(),
legend.text = element_text(size = 24),
legend.key = element_rect(fill = 'white'),
legend.key.size = unit(2, 'lines'),
plot.margin = margin(1, 1, 1, 1, "cm"),
legend.position = c(.2, .9))
# build short plot
p5 + scale_color_manual(name = "Block",values = cols, labels = c("Block 1","Block 2", "Block 3") )
|
4882e1a14f45f5d3c4ede772dc4fce07087620ab
|
a831455edcc95ffa62747cb06e166920c974afce
|
/analysis.R
|
c6cc02530a5169dcae882f30db877508391e7056
|
[] |
no_license
|
abentsui/hku-admission-2015
|
46a142e5bb47b3d2486642d19fd58b4514e8cb2c
|
b5c73460bc16bc167315a8fbc05c1494ea370545
|
refs/heads/master
| 2022-12-21T23:54:35.285796
| 2020-09-24T14:30:55
| 2020-09-24T14:30:55
| 298,302,335
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,272
|
r
|
analysis.R
|
#Library
library(tidyverse)
library(pdftools)
library(ggplot2)
library(tidyr)
#Set directory to this project directory
#Step 1: Read the table from HKU report
path1 <- "data/HKU_2015_2016_UgReport.pdf"
txt1 <- pdf_text(path1)
txt1 <- txt1[[28]]
tab1 <- str_split(txt1, "\r\n")
tab1 <- tab1[[1]]
#As the header names spread over two rows, I need to manually input the entries
header_names1 <- c("HKD", "Arch", "Arts", "Bus & Econ", "Dent", "Ed", "Eng", "Law", "LKS Med", "Sci", "Soc Sci", "hku_percent")
hku_local_stu <- 3236
uni_house_income <- tab1[28:46] %>%
str_trim() %>%
str_replace_all("(\\d)\\s(\\d)", "\\1 \\2") %>%
str_split("\\s{2,}", simplify = TRUE) %>%
as.data.frame(stringAsFactor = FALSE) %>%
setNames(header_names1) %>%
mutate_at(-1, parse_number) %>%
mutate(hku_num = round(hku_local_stu * hku_percent / 100)) %>%
select(HKD, hku_percent, hku_num)
#Combine row 2 and row 3 to for < 4,000
uni_house_income[3,2] <- uni_house_income[3,2] + uni_house_income[2,2]
uni_house_income[3,3] <- uni_house_income[3,3] + uni_house_income[2,3]
#Combine row 14 and row 15 to for 60,000 - 79,999
uni_house_income[15,2] <- uni_house_income[15,2] + uni_house_income[14,2]
uni_house_income[15,3] <- uni_house_income[15,3] + uni_house_income[14,3]
#Combine row 16 and row 17 to for 80,000 - 99,999
uni_house_income[17,2] <- uni_house_income[17,2] + uni_house_income[16,2]
uni_house_income[17,3] <- uni_house_income[17,3] + uni_house_income[16,3]
#The need of defining hkd range is because the "-" symbol is different
#between the two pdf
hkd_range <- c("Not applicable", "Under 2,000", "2,000 - 3,999",
"4,000 - 5,999", "6,000 - 7,999",
"8,000 - 9,999", "10,000 - 14,999",
"15,000 - 19,999", "20,000 - 24,999",
"25,000 - 29,999", "30,000 - 39,999",
"40,000 - 49,999", "50,000 - 59,999",
"60,000 - 69,999", "70,000 - 79,999",
"80,000 - 89,999", "90,000 - 99,999",
">= 100,000", "Total")
uni_house_income_2 <- uni_house_income %>%
mutate(HKD = hkd_range) %>%
filter(!HKD %in% c("Under 2,000", "60,000 - 69,999", "80,000 - 89,999")) %>%
mutate(HKD = recode(HKD,
'2,000 - 3,999' = "< 4,000",
'70,000 - 79,999' = "60,000 - 79,999",
'90,000 - 99,999' = "80,000 - 99,999"))
uni_house_income_2
#Step 2: Read table from HKGov Census and Statistics Dept Report
path2 <- "data/HKGOV_2016Q1_Report.pdf"
txt2 <- pdf_text(path2)
txt2 <- txt2[[142]]
tab2 <- str_split(txt2, "\r\n")
tab2 <- tab2[[1]]
header_names2 <- c("HKD", "2015", "2016")
ind <- str_which(tab2, "\\d+\\s*\\(\\d+.\\d\\)")
hk_house_income <- tab2[ind] %>%
str_trim() %>%
str_split("\\s{2,}", simplify = TRUE)
#Select the 1-3 columns and replace row entries
hk_house_income <- hk_house_income[,1:3]
hk_house_income[16,1] <- ">= 100,000"
hk_house_income[17,1] <- "Total"
hk_house_income <- hk_house_income %>%
as.data.frame(stringAsFactor= FALSE) %>%
setNames(header_names2) %>%
extract(`2015`, c("2015_num", "2015_percent"), "(\\d+.\\d)\\s*\\((\\d+.\\d)\\)") %>%
extract(`2016`, c("2016_num", "2016_percent"), "(\\d+.\\d)\\s*\\((\\d+.\\d)\\)") %>%
mutate_at(-1, parse_number) %>%
select(`HKD`, `2015_percent`, `2015_num`)
#Combine row 9 and 10; Combine row 11 and 12.
hk_house_income[10,2] <- hk_house_income[10,2] + hk_house_income[9,2]
hk_house_income[10,3] <- hk_house_income[10,3] + hk_house_income[9,3]
hk_house_income[12,2] <- hk_house_income[12,2] + hk_house_income[11,2]
hk_house_income[12,3] <- hk_house_income[12,3] + hk_house_income[11,3]
hk_house_income_2 <- hk_house_income %>%
filter(!HKD %in% c("30,000 - 34,999", "40,000 - 44,999")) %>%
mutate(HKD = recode(HKD,
'35,000 - 39,999' = "30,000 - 39,999",
'45,000 - 49,999' = "40,000 - 49,999"))
hk_house_income_2
#Step 3: From hk_house_income to deduce income for 2015 DSE candidates
uni_house_income_2
hk_house_income_2
#Use hk_house_income to approximate the income distribution of 2015 DSE candidates
total_student <- 74131
stu_house_income <- hk_house_income_2 %>%
mutate(`2015_student_num` = round(total_student * `2015_percent` / 100))
jt_income <- uni_house_income_2 %>% left_join(stu_house_income, by = 'HKD')
#Step 4: Plot of the percent of income distribution
order_hkd <- c("Not applicable", "< 4,000", "4,000 - 5,999", "6,000 - 7,999",
"8,000 - 9,999", "10,000 - 14,999", "15,000 - 19,999",
"20,000 - 24,999", "25,000 - 29,999",
"30,000 - 39,999", "40,000 - 49,999",
"50,000 - 59,999", "60,000 - 79,999",
"80,000 - 99,999", ">= 100,000", "Total")
jt_income %>%
mutate(HKD = factor(HKD, levels = order_hkd)) %>%
filter(!HKD %in% c("Not applicable", "Total")) %>%
select(HKD, `hku_percent`, `2015_percent`) %>%
rename(`population_percent` = `2015_percent`) %>%
gather(key = "Type", value = "Percent", c(`hku_percent`, `population_percent`)) %>%
ggplot(aes(HKD, Percent, color = Type)) +
geom_point() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
#Numeric form of HKD
jt_income_num <- jt_income %>%
filter(!HKD %in% c("Not applicable", "Total")) %>%
mutate(HKD = recode(HKD,
'< 4,000' = '4000',
'4,000 - 5,999' = '5000',
'6,000 - 7,999' = '8000',
'8,000 - 9,999' = '9000',
'10,000 - 14,999' = '12500',
'15,000 - 19,999' = '17500',
'20,000 - 24,999' = '22500',
'25,000 - 29,999' = '27500',
'30,000 - 39,999' = '35000',
'40,000 - 49,999' = '45000',
'50,000 - 59,999' = '55000',
'60,000 - 79,999' = '70000',
'80,000 - 99,999' = '90000',
'>= 100,000' = '100,000')) %>%
mutate_at(1, parse_number)
jt_income_num %>%
select(HKD, `hku_percent`, `2015_percent`) %>%
rename(`HKU Student` = `hku_percent`) %>%
rename(`Population` = `2015_percent`) %>%
gather(key = "Type", value = "Percent", c(`HKU Student`, `Population`)) %>%
ggplot(aes(HKD, Percent, color = Type)) +
geom_line() +
ggtitle("Monthly Household Income Distribution of HKU Students and Population in 2015") +
xlab("Monthly Household Income (HKD)") +
ylab("Percentage") +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
ggsave("figs/linePlot.png")
#Step 5: Calculate odds ratio
median_income <- 25000
below_median_group <- jt_income_num %>%
filter(HKD < median_income) %>%
mutate(not_admit = `2015_student_num` - hku_num) %>%
summarize(ttl_hku_num = sum(hku_num), ttl_not_admit = sum(not_admit), ttl_stu_num = sum(`2015_student_num`))
or1 <- below_median_group$ttl_hku_num/below_median_group$ttl_not_admit
above_median_group <- jt_income_num %>%
filter(HKD > median_income) %>%
mutate(not_admit = `2015_student_num` - hku_num) %>%
summarize(ttl_hku_num = sum(hku_num), ttl_not_admit = sum(not_admit), ttl_stu_num = sum(`2015_student_num`))
or2 <- above_median_group$ttl_hku_num/above_median_group$ttl_not_admit
or1/or2
|
6958f389e8015abf51fe9f4124bf0eaddfbbd492
|
e5cbbdd1afba5eae30369a2ff7bfbcb42afcae29
|
/clusters_data.R
|
f11249b46151bf786542675a4dcca379c6da4e5a
|
[] |
no_license
|
JonathanJohann/EE364BFinalProject
|
3ca76c2ff84a3c9e34045d35acb621a6c9002dbb
|
09989a695d92367946be38489b1e1d382a2d904f
|
refs/heads/master
| 2020-05-25T11:19:56.158116
| 2019-06-05T10:44:27
| 2019-06-05T10:44:27
| 187,777,660
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,951
|
r
|
clusters_data.R
|
library(ggplot2)
library(Matrix)
library(MASS)
source("l2_map.R")
dataset = "clusters"
generate_clusters <- function(mean_vals,n_per_cluster){
df <- c()
labels <- c()
p <- dim(mean_vals)[2]
for(i in 1:dim(mean_vals)[1]){
tmp <- MASS::mvrnorm(n_per_cluster,mu=mean_vals[i,],Sigma=diag(p))
tmp <- cbind(tmp,rep(i,n_per_cluster))
df <- rbind(df,tmp)
}
return(df)
}
clusters_data <- function(seed=123123){
set.seed(seed)
mean_vals <- expand.grid(
v1 <- c(5,0),
v2 <- c(5,0),
v3 <- c(5,0),
v4 <- c(5,0)
)
dat <- generate_clusters(as.matrix(mean_vals),50)
labels <- dat[,5]
dat <- dat[,-5]
return(dat)
}
mat_data <- function(d1=5,d2=5,d3=5){
X = as.matrix(expand.grid(x1=1:d1,
x2=1:d2))
P = matrix(rnorm(2*d3),2,d3)
Z = X %*% P
out = list(X=X,
Z=Z)
return(out)
}
cube_data <- function(){
X = as.matrix(expand.grid(x1=c(-3,3),
x2=c(-3,3),
x3=c(-3,3)))
out = list(X=X)
return(out)
}
set.seed(123123)
if(dataset=="clusters"){
X <- clusters_data()
}else{
X <- read.csv("mnist_data.csv")
X <- as.data.frame(X)
X <- X[,-785]
}
seeds <- sample(999999,size=50,replace=TRUE)
best_x2 <- -Inf
best_x3 <- -Inf
best_x4 <- -Inf
best_x4_1 <- -Inf
best_x4_2 <- -Inf
best_x4_3 <- -Inf
best_x4_4 <- -Inf
best_x4_5 <- -Inf
best_x5 <- -Inf
best_x5_1 <- -Inf
best_x5_2 <- -Inf
best_x5_3 <- -Inf
best_x5_4 <- -Inf
best_x5_5 <- -Inf
best_x6 <- -Inf
best_x6_1 <- -Inf
best_x6_2 <- -Inf
best_x6_3 <- -Inf
best_x6_4 <- -Inf
best_x6_5 <- -Inf
best_x7 <- -Inf
best_x7_1 <- -Inf
best_x7_2 <- -Inf
best_x7_3 <- -Inf
best_x7_4 <- -Inf
best_x7_5 <- -Inf
x2_star <- 0
x3_star <- 0
x4_star <- 0
x4_1_star <- 0
x4_2_star <- 0
x4_3_star <- 0
x4_4_star <- 0
x4_5_star <- 0
x5_star <- 0
x5_1_star <- 0
x5_2_star <- 0
x5_3_star <- 0
x5_4_star <- 0
x5_5_star <- 0
x6_star <- 0
x6_1_star <- 0
x6_2_star <- 0
x6_3_star <- 0
x6_4_star <- 0
x6_5_star <- 0
x7_star <- 0
x7_1_star <- 0
x7_2_star <- 0
x7_3_star <- 0
x7_4_star <- 0
x7_5_star <- 0
iterative_map <- function(X,d=2,niter=1,niter2=1000,...){
x_temp <- cmdscale(dist(X),k=2)
for(i in 1:niter){
eps = 1/(dist(X) - dist(x_temp))^2
eps = ifelse(eps>1e+6,1e+6,eps)
x_temp <- l2_map(X,d=2,W=eps,niter=niter2,...)$X
}
return(x_temp)
}
#inverse_p <- function(Z){
# P = ecdf(Z[Z!=0])
# eps = 1e-06
# out <- ifelse(P(Z)!=0,1/P(Z),1/eps)
# return(out)
#}
#ip_weights <- inverse_p(dist(X))
n = dim(X)[1]-1
X = as.matrix(X)
for(i in 1:1){
set.seed(seeds[i])
x2 <- cmdscale(dist(X),k=2)
x3 <- sammon(dist(X),k = 2)$points
eps = 1/(dist(X) - dist(x2))^2
eps = ifelse(eps>1e+6,1e+6,eps)
#single iteration
x4 <- l2_map(X,d=2,W=eps,niter=1000,method=0)$X
x4_1<- l2_map(X,d=2,W=eps,niter=1000,method=1)$X
x4_2<- l2_map(X,d=2,W=eps,niter=1000,method=2)$X
x4_3<- l2_map(X,d=2,W=eps,niter=1000,method=3,beta=0.3)$X
x4_4<- l2_map(X,d=2,W=eps,niter=1000,method=3,beta=0.7)$X
x4_5 <- l2_map(X,d=2,W=eps,niter=1000,method=4)$X
#normal
x5 <- l2_map(X,d=2,niter=1000,method=0)$X
x5_1<- l2_map(X,d=2,niter=1000,method=1)$X
x5_2<- l2_map(X,d=2,niter=1000,method=2)$X
x5_3<- l2_map(X,d=2,niter=1000,method=3,beta=0.3)$X
x5_4<- l2_map(X,d=2,niter=1000,method=3,beta=0.7)$X
x5_5 <- l2_map(X,d=2,niter=1000,method=4)$X
#iterative
x6 <- iterative_map(X=X,niter=10,method=0)
x6_1 <- iterative_map(X=X,niter=10,method=1)
x6_2<- iterative_map(X=X,niter=10,method=2)
x6_3<- iterative_map(X=X,niter=10,method=3,beta=0.3)
x6_4 <- iterative_map(X=X,niter=10,method=3,beta=0.7)
x6_5<- iterative_map(X=X,niter=10,method=4)
#inverse probability
#x7 <- l2_map(X,d=2,W=ip_weights,niter=1000,method=0)$X
#x7_1 <- l2_map(X,d=2,W=ip_weights,niter=1000,method=1)$X
#x7_2 <- l2_map(X,d=2,W=ip_weights,niter=1000,method=2)$X
#x7_3 <- l2_map(X,d=2,W=ip_weights,niter=1000,method=3,beta=0.3)$X
#x7_4 <- l2_map(X,d=2,W=ip_weights,niter=1000,method=3,beta=0.7)$X
#x7_5 <- l2_map(X,d=2,W=ip_weights,niter=1000,method=4)$X
rk2 <- range_kept(X1=x2,X2=X,k=n)
rk3 <- range_kept(X1=x3,X2=X,k=n)
rk4 <- range_kept(X1=x4,X2=X,k=n)
rk4_1 <- range_kept(X1=x4_1,X2=X,k=n)
rk4_2 <- range_kept(X1=x4_2,X2=X,k=n)
rk4_3 <- range_kept(X1=x4_3,X2=X,k=n)
rk4_4 <- range_kept(X1=x4_4,X2=X,k=n)
rk4_5 <- range_kept(X1=x4_5,X2=X,k=n)
rk5 <- range_kept(X1=x5,X2=X,k=n)
rk5_1 <- range_kept(X1=x5_1,X2=X,k=n)
rk5_2 <- range_kept(X1=x5_2,X2=X,k=n)
rk5_3 <- range_kept(X1=x5_3,X2=X,k=n)
rk5_4 <- range_kept(X1=x5_4,X2=X,k=n)
rk5_5 <- range_kept(X1=x5_5,X2=X,k=n)
rk6 <- range_kept(X1=x6,X2=X,k=n)
rk6_1 <- range_kept(X1=x6_1,X2=X,k=n)
rk6_2 <- range_kept(X1=x6_2,X2=X,k=n)
rk6_3 <- range_kept(X1=x6_3,X2=X,k=n)
rk6_4 <- range_kept(X1=x6_4,X2=X,k=n)
rk6_5 <- range_kept(X1=x6_5,X2=X,k=n)
#rk7 <- range_kept(X1=x7,X2=X,k=n)
#rk7_1 <- range_kept(X1=x7_1,X2=X,k=n)
#rk7_2 <- range_kept(X1=x7_2,X2=X,k=n)
#rk7_3 <- range_kept(X1=x7_3,X2=X,k=n)
#rk7_4 <- range_kept(X1=x7_4,X2=X,k=n)
#rk7_5 <- range_kept(X1=x7_5,X2=X,k=n)
if(rk2>best_x2){
best_x2 = rk2
x2_star = x2
}
if(rk3>best_x3){
best_x3 = rk3
x3_star = x3
}
if(rk4>best_x4){
best_x4 = rk4
x4_star = x4
}
if(rk4_1>best_x4_1){
best_x4_1 = rk4_1
x4_1_star = x4_1
}
if(rk4_2>best_x4_2){
best_x4_2 = rk4_2
x4_2_star = x4_2
}
if(rk4_3>best_x4_3){
best_x4_3 = rk4_3
x4_3_star = x4_3
}
if(rk4_4>best_x4_4){
best_4_x4 = rk4_4
x4_4_star = x4_4
}
if(rk4_4>best_x4_4){
best_x4_4 = rk4_4
x4_4_star = x4_4
}
if(rk4_5>best_x4_5){
best_x4_5 = rk4_5
x4_5_star = x4_5
}
if(rk5>best_x5){
best_x5 = rk5
x5_star = x5
}
if(rk5_1>best_x5_1){
best_x5_1 = rk5_1
x5_1_star = x5_1
}
if(rk5_2>best_x5_2){
best_x5_2 = rk5_2
x5_2_star = x5_2
}
if(rk5_3>best_x5_3){
best_x5_3 = rk5_3
x5_3_star = x5_3
}
if(rk5_4>best_x5_4){
best_5_x4 = rk5_4
x5_4_star = x5_4
}
if(rk5_5>best_x5_5){
best_x5_5 = rk5_5
x5_5_star = x5_5
}
if(rk6>best_x6){
best_x6 = rk6
x6_star = x6
}
if(rk6_1>best_x6_1){
best_x6_1 = rk6_1
x6_1_star = x6_1
}
if(rk6_2>best_x6_2){
best_x6_2 = rk6_2
x6_2_star = x6_2
}
if(rk6_3>best_x6_3){
best_x6_3 = rk6_3
x6_3_star = x6_3
}
if(rk6_4>best_x6_4){
best_6_x4 = rk6_4
x6_4_star = x6_4
}
if(rk6_5>best_x6_5){
best_x6_5 = rk6_5
x6_5_star = x6_5
}
#if(rk7>best_x7){
# best_x7 = rk7
# x7_star = x7
#}
#if(rk7_1>best_x7_1){
# best_x7_1 = rk7_1
# x7_1_star = x7_1
#}
#if(rk7_2>best_x7_2){
# best_x7_2 = rk7_2
# x7_2_star = x7_2
#}
#if(rk7_3>best_x7_3){
# best_x7_3 = rk7_3
# x7_3_star = x7_3
#}
#if(rk7_4>best_x7_4){
# best_7_x4 = rk7_4
# x7_4_star = x7_4
#}
#if(rk7_5>best_x7_5){
# best_x7_5 = rk7_5
# x7_5_star = x7_5
#}
print("Done")
}
method <- c("MDS","Sammon",rep("Inverse Square",6),rep("Iterative Inverse Square",6),rep("L2 Map",6))
stepsize <- c("-","-",rep(c("0.1","1/k","1/sqrt(k)","0.1","0.1","0.1"),3))
descent_method <- c("-","-",rep(c("-","-","-","beta=0.3","beta=0.7","adagrad"),3))
eval <- c(rk2,rk3,rk4,rk4_1,rk4_2,rk4_3,rk4_4,rk4_5,rk5,rk5_1,rk5_2,rk5_3,rk5_4,rk5_5,rk6,rk6_1,rk6_2,rk6_3,rk6_4,rk6_5)
df <- data.frame(method=method,
stepsize=stepsize,
descent_method=descent_method,
eval=eval)
df[,"seed"] <- seeds[1]
write.csv(df,filename=paste(dataset,seeds[1],".csv",sep=""))
|
ede1566e942cc4dcdf72ff9305dfb00caa60ad4c
|
407c55b957ae702ce8f738498f9d0b8a8bf2b52f
|
/vis/smartphones/smartphones.r
|
48880f56daf06c19e2a3336db0fd52d654964ea7
|
[
"MIT"
] |
permissive
|
edwardoughton/pytal
|
0fc25072d14238a6796e5683b8c5fa7d85cc1239
|
aebab40178f2f7f1599ec1efdac6def25f0834ba
|
refs/heads/master
| 2022-07-11T02:43:21.762818
| 2022-02-18T15:49:17
| 2022-02-18T15:49:17
| 197,574,539
| 6
| 3
|
MIT
| 2021-08-11T11:56:51
| 2019-07-18T11:31:51
|
Python
|
UTF-8
|
R
| false
| false
| 2,713
|
r
|
smartphones.r
|
#Spectrum costs
library(tidyverse)
library(ggpubr)
#get folder directory
folder <- dirname(rstudioapi::getSourceEditorContext()$path)
folder_inputs = file.path(folder, "data_inputs")
files = list.files(path=folder_inputs, pattern="*.csv")
data <-
do.call("rbind",
lapply(files,
function(x)
read.csv(file.path(folder_inputs, x),
stringsAsFactors = FALSE)))
data$country = factor(data$country, levels=c("UGA",
"MWI",
"KEN",
"SEN",
"PAK",
"ALB",
"PER",
"MEX"),
labels=c("Uganda\n(Cluster 1)",
"Malawi\n(Cluster 1)",
"Kenya\n(Cluster 2)",
"Senegal\n(Cluster 2)",
"Pakistan\n(Cluster 3)",
"Albania\n(Cluster 4)",
"Peru\n(Cluster 5)",
"Mexico\n(Cluster 6)"))
data$settlement_type = factor(data$settlement_type,
levels=c("urban",
"rural"),
labels=c("Urban",
"Rural"
))
subscriptions <- ggplot(data, aes(x=year, y=penetration, group=country)) +
geom_point(aes(shape=country, color=country), size=2.5) +
geom_line(aes(color=country)) +
scale_shape_manual(values=c(0, 1, 2, 3, 4, 5, 6, 7, 8)) +
scale_color_manual(values=c("#F0E442", "#F0E442","#E69F00", "#E69F00","#D55E00", "#0072B2", "#56B4E9","#009E73"))+
geom_vline(xintercept=2020, linetype="dashed", color = "grey", size=.5) +
scale_x_continuous(expand = c(0, 0), limits = c(2020,2030), breaks = seq(2020,2030,1)) +
scale_y_continuous(expand = c(0, 0), limits = c(0,95)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
legend.position = "bottom", legend.title=element_blank()) +
labs(title = "Smartphone Penetration",
subtitle = "Forecast: 2020-2030 ",
x = NULL, y = "Smartphone Adoption (%)") +
guides(colour=guide_legend(ncol=4)) +
facet_wrap(~settlement_type, ncol=1)
path = file.path(folder, 'figures', 'smartphones.png')
ggsave(path, units="in", width=7, height=7, dpi=300)
print(subscriptions)
dev.off()
|
c7e148075a20b5327e373d612462617d8a54ee8d
|
3a6b47ec44a959bba312709cc1bee52ac3270092
|
/Scrape Data.R
|
5e8b61be5095efad50ba3238496b1a58b9c62371
|
[] |
no_license
|
bmewing/garthbrooks
|
f7c81dc0694f46625002d833b0cd6ced5b21a295
|
2eeba7cf7f4082c6fc07137c1fdfda320bdd4564
|
refs/heads/master
| 2021-01-18T13:10:36.402102
| 2017-02-02T14:00:04
| 2017-02-02T14:00:04
| 80,728,837
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,603
|
r
|
Scrape Data.R
|
library(RSelenium)
library(rvest)
library(magrittr)
library(dplyr)
#helper functions for zip codes
source('Zip Codes/get all zip codes needed.R')
#It seems that brickseek uses a radius of 100 miles based on observation
#This function will generate a (conservative) grid of zip codes that provide complete coverage of the US for a given radius
ziplist = getZipList(100)
#This function takes a row of the table returned by brickseek and extracts the Store and Saleable quantity information
getDetails = function(row){
res = row %>%
html_nodes(css = "td") %>%
html_text() %>%
gsub("\\(.*?\\)","",.)
return(dplyr::data_frame(Store = res[2],Qty = res[4]))
}
#This function manages the Selenium browser to reload the Garth Brooks page, insert the zip code, process results and return them in a data frame
getCounts = function(zip,remDr){
remDr$navigate("http://brickseek.com/target-inventory-checker/?sku=012-05-0331")
elem = remDr$findElement(using = "css",value = "input[name='zip']")
elem$sendKeysToElement(list(zip))
elem = remDr$findElement(using = "xpath",value = "(//input[@type='submit'])[2]")
elem$submitElement()
Sys.sleep(10)
source = remDr$getPageSource()
page = read_html(source[[1]])
results = page %>%
html_nodes(css = "tr[class='store_row']")
return(lapply(results,getDetails) %>%
do.call(dplyr::bind_rows,.))
}
remDr = remoteDriver()
remDr$open()
out = list()
#For loops are bad
#However, at some point brickseek will check if you're a robot and it will break the results
#This is setup to allow for easy recovery from a break by updating what zip codes are in the loop
#There may be sexier ways of doing this which I'd love to see
ziplist2 = ziplist[!(ziplist %in% names(out))]
for(i in ziplist3){
out[[i]] = getCounts(i,remDr)
}
remDr$close()
#Clean up the results, make it into one big data frame with no duplicates
finalOut = out %>%
do.call(bind_rows,.) %>%
distinct() %>%
mutate(State = gsub(".*?, ([A-Z]+) [0-9\\-]+","\\1",Store))
#Summarize the output
summarisedCounts = finalOut %>%
group_by(State) %>%
dplyr::summarise(nTargets = n(), Avg = mean(as.numeric(Qty)),Total = sum(as.numeric(Qty)))
#This is the worst possible way to fix this but it's fast enough
for(i in 1:nrow(summarisedCounts)){
summarisedCounts$FullState[i] = tolower(state.name[grep(summarisedCounts$State[i],state.abb)])
}
#Make the choropleth map
library(choroplethr)
summarisedCounts %>%
select(region = FullState, value = Total) %>%
state_choropleth(title = 'Average Limited Edition Garth Brooks CD Sets in a Target by State')
|
d40931ec74e0fa7f8cad70b646874103df0c6f83
|
a3c9774bfc4fae322e706fd83a4caf15775ed39a
|
/inst/doc/geex_intro.R
|
3101676b955419434d341c10902fbf2f0bd59f98
|
[] |
no_license
|
BarkleyBG/geex
|
b3d2ba8bcb0f683d9784f3300449982e672977d6
|
a70c010a72698aa2a9f88d746d4c9d75ceceecd3
|
refs/heads/master
| 2021-05-01T02:04:28.712327
| 2017-02-06T15:13:33
| 2017-02-06T15:13:33
| 79,879,625
| 0
| 0
| null | 2017-01-24T04:51:24
| 2017-01-24T04:51:24
| null |
UTF-8
|
R
| false
| false
| 17,003
|
r
|
geex_intro.R
|
## ---- echo = FALSE, message = FALSE, warning=FALSE-----------------------
library(geex)
library(dplyr)
library(inferference)
library(sandwich)
library(xtable)
library(moments)
library(MASS)
library(knitr)
library(rprojroot)
# child.path <- normalizePath(paste0(find_package_root_file(), '/vignettes/examples/'))
opts_knit$set(progress = TRUE, verbose = TRUE, child.path = 'examples/')
# library(microbenchmark)
## ---- echo = FALSE, message = FALSE, warning=FALSE-----------------------
library(geex)
library(dplyr)
library(inferference)
library(sandwich)
library(xtable)
# library(microbenchmark)
## ----functions_results, echo = FALSE-------------------------------------
print_pmatrix <- function(object, digits = 4){
if(!is.matrix(object)){
object <- matrix(object, nrow = 1)
}
paste0('$', print(xtable(object, align=rep("",ncol(object)+1), digits =digits), comment = FALSE,
floating=FALSE, tabular.environment="pmatrix", hline.after=NULL,
include.rownames=FALSE, include.colnames=FALSE, print.results = FALSE), '$')
}
first_diff_dec <- function(x){
-floor(log10(abs(x)))
}
print_results <- function(results, label, caption){
r <- results
cat('\\begin{table}[H] \n',
'\\centering \n',
'\\label{', label, '} \n',
'\\caption{"', caption, '"} \n',
'\\begin{tabular}{lcc} \n',
' & $\\hat{\\theta}$ & $\\hat{\\Sigma}$ \\\\ \n',
'Closed form &', print_pmatrix(r$cls$parameters), '&', print_pmatrix(r$cls$vcov), '\\\\ \n',
'geex &', print_pmatrix(r$geex$parameters), '&', print_pmatrix(r$geex$vcov), '\\\\ \n',
'Decimal of difference &', print_pmatrix(first_diff_dec(r$cls$parameters - r$geex$parameters), d = 0), '&',
print_pmatrix(first_diff_dec(r$cls$vcov - r$geex$vcov), d = 0), '\\\\ \n',
'\\end{tabular} \n',
'\\end{table}')
}
## ----SB1_setup, echo=FALSE-----------------------------------------------
n <- 100
mu <- 5
sigma <- 2
dt <- data.frame(Y = rnorm(n, mean = mu, sd = sigma), id = 1:n)
## ----SB1_eefun, echo=FALSE, results='hide'-------------------------------
SB1_eefun <- function(data){
function(theta){
with(data,
c(Y - theta[1],
(Y - theta[1])^2 - theta[2] )
)
}
}
## ----SB1_run, echo=TRUE--------------------------------------------------
estimates <- estimate_equations(eeFUN = SB1_eefun,
data = dt, units = 'id',
roots = c(1,1))
## ----SB1_clsform, echo=FALSE---------------------------------------------
## Compare to closed form ##
A <- diag(1, nrow = 2)
B <- with(dt, {
Ybar <- mean(Y)
B11 <- mean( (Y - Ybar)^2 )
B12 <- mean( (Y - Ybar) * ((Y - Ybar)^2 - B11) )
B22 <- mean( ((Y - Ybar)^2 - B11)^2 )
matrix(
c(B11, B12,
B12, B22), nrow = 2
)
})
## closed form roots
# note that var() divides by n - 1, not n
theta_cls <- summarize(dt, p1 = mean(Y), p2 = var(Y) * (n() - 1)/ n() )
# closed form
Sigma_cls <- (solve(A) %*% B %*% t(solve(A))) / n
## ----SB1_results, echo = FALSE, results = 'asis'-------------------------
results <- list(geex = estimates, cls = list(parameters = theta_cls, vcov = Sigma_cls))
print_results(results, 'test', 'test')
## ----SB_setup, echo=FALSE------------------------------------------------
n <- 100
muY <- 5
sigmaY <- 2
muX <- 2
sigmaX <- 0.2
dt <- data.frame(Y = rnorm(n, mean = muY, sd = sigmaY),
X = rnorm(n, mean = muX, sd = sigmaX),
id = 1:n)
## ----SB2_eefun, echo = FALSE---------------------------------------------
SB2_eefun <- function(data){
function(theta){
with(data,
c(Y - theta[1],
X - theta[2],
theta[1] - (theta[3] * theta[2]) )
)
}
}
## ----SB2_run, echo = TRUE------------------------------------------------
estimates <- estimate_equations(eeFUN = SB2_eefun,
data = dt, units = 'id',
roots = c(1, 1, 1))
## ----SB2_clsform, echo = FALSE-------------------------------------------
## Compare to closed form ##
A <- with(dt, {
matrix(
c(1 , 0, 0,
0 , 1, 0,
-1, mean(Y)/mean(X), mean(X)),
byrow = TRUE, nrow = 3)
})
B <- with(dt, {
matrix(
c(var(Y) , cov(Y, X), 0,
cov(Y, X), var(X) , 0,
0, 0, 0),
byrow = TRUE, nrow = 3)
})
## closed form roots
theta_cls <- summarize(dt, p1 = mean(Y), p2 = mean(X), p3 = p1/p2)
## closed form covariance
Sigma_cls <- (solve(A) %*% B %*% t(solve(A))) / n
## ----SB2_results, echo = FALSE, results = 'asis'-------------------------
results <- list(geex = estimates, cls = list(parameters = theta_cls, vcov = Sigma_cls))
print_results(results, 'test', 'test')
## ----SB3_setup, echo=FALSE-----------------------------------------------
n <- 100
mu <- 5
sigma <- 4
set.seed(100) # running into issue where sqrt(theta2) and log(theta2) return NaN for some seeds
dt <- data.frame(Y = rnorm(n, mean = mu, sd = sigma),
id = 1:n)
## ----SB3_eefun, echo = FALSE---------------------------------------------
SB3_eefun <- function(data){
function(theta){
with(data,
c(Y - theta[1],
(Y - theta[1])^2 - theta[2],
sqrt(theta[2]) - theta[3],
log(theta[2]) - theta[4])
)
}
}
## ----SB3_run, echo = TRUE------------------------------------------------
estimates <- estimate_equations(eeFUN= SB3_eefun,
data = dt, units = 'id',
roots = c(1, 1, 1, 1))
## ----SB3_clsform, echo = FALSE-------------------------------------------
## closed form roots
theta_cls <- summarize(dt, p1 = mean(Y), p2 = sum((Y - p1)^2)/n(), p3 = sqrt(p2), p4 = log(p2))
## Compare to closed form ##
theta2 <- theta_cls$p2
mu3 <- moments::moment(dt$Y, order = 3, central = TRUE)
mu4 <- moments::moment(dt$Y, order = 4, central = TRUE)
# A <- matrix(c(1, 0, 0, 0,
# 0, 1, 0, 0,
# 0, -1/(2 * sqrt(theta2)), 1, 0,
# 0, -1/theta2, 0, 1),
# byrow = TRUE, nrow = 4)
# B <- matrix(c(1/theta2, mu3/(2 * theta2^3), 0, 0,
# mu3/(2 * theta2^3), (mu4 - theta2^2)/(4 * theta2^4), 0, 0,
# 0, 0, 0, 0,
# 0, 0, 0, 0),
# byrow = TRUE, nrow = 4)
## closed form covariance
Sigma_cls <- matrix(
c(theta2, mu3, mu3/(2*sqrt(theta2)), mu3/theta2,
mu3, mu4 - theta2^2, (mu4 - theta2^2)/(2*sqrt(theta2)), (mu4 - theta2^2)/theta2,
mu3/(2 * sqrt(theta2)), (mu4 - theta2^2)/(2*sqrt(theta2)), (mu4 - theta2^2)/(4*theta2), (mu4 - theta2^2)/(2*theta2^(3/2)),
mu3/theta2, (mu4 - theta2^2)/theta2, (mu4 - theta2^2)/(2*theta2^(3/2)), (mu4/theta2^2) - 1) ,
nrow = 4, byrow = TRUE) / n
## closed form covariance
# Sigma_cls <- (solve(A) %*% B %*% t(solve(A))) / n
## ----SB3_results, echo = FALSE, results = 'asis'-------------------------
results <- list(geex = estimates, cls = list(parameters = theta_cls, vcov = Sigma_cls))
print_results(results, 'ex3_results', 'Example 3')
## ----SB4_setup, echo=FALSE-----------------------------------------------
n <- 100
# Oracle parms
alpha <- 2
beta <- 3
gamma <- 2
delta <- 1.5
e1 <- e2 <- e3 <- rnorm(n)
sigma_e <- 1
sigma_U <- .25
sigma_tau <- 1
### Random variables
X <- rgamma(n, shape = 5)
X <- rnorm(n, sd = 1)
dt <- data.frame(Y = alpha + (beta * X) + (sigma_e * e1),
W = X + (sigma_U * e2),
T_ = gamma + (delta * X) + (sigma_tau * e3),
id = 1:n)
## ----SB4_eefun, echo = FALSE---------------------------------------------
SB4_eefun <- function(data){
function(theta){
with(data,
c(theta[1] - T_,
theta[2] - W,
(Y - (theta[3] * W)) * (theta[2] - W),
(Y - (theta[4] * W)) * (theta[1] - T_))
)
}
}
## ----SB4_run, echo = TRUE------------------------------------------------
estimates <- estimate_equations(eeFUN = SB4_eefun,
data = dt, units = 'id',
roots = c(1, 1, 1, 1))
## ----SB4_clsform, echo = TRUE--------------------------------------------
YW_model <- lm(Y ~ W, data = dt)
YT_model <- lm(Y ~ T_, data = dt)
WT_model <- lm(W ~ T_, data = dt)
## closed form roots
theta_cls <- c(theta1 = mean(dt$T_),
theta2 = mean(dt$W),
theta3 = coef(YW_model)[2],
theta4 = coef(YT_model)[2]/coef(WT_model)[2])
## closed form covariance
# Not sure how compute SB's closed form since it depends on X, which is
# supposed to be unobserved.
Sigma_cls <- matrix(NA, nrow = 2, ncol = 2)
## ----SB4_results, echo = FALSE, results = 'asis'-------------------------
# primary interest lies in the lower 2 x 2 submatrix of the asymptotic variance matrix
estimates$vcov <- estimates$vcov[3:4, 3:4]
results <- list(geex = estimates, cls = list(parameters = theta_cls, vcov = Sigma_cls))
print_results(results, 'Example 4', 'Example 4')
## ----SB5_setup, echo=FALSE-----------------------------------------------
n <- 100
theta0 <- 0
theta_tru <- 2
sigma <- 1
dt <- data.frame(X = rnorm(n, mean = 2, sd = sigma),
id = 1:n)
## ----SB5_eefun, echo = TRUE----------------------------------------------
F0 <- function(y, theta0, distrFUN = pnorm){
distrFUN(y - theta0, mean = 0)
}
f0 <- function(y, densFUN){
densFUN(y, mean = 0)
}
integrand <- function(y, densFUN = dnorm){
f0(y, densFUN = densFUN)^2
}
IC_denom <- integrate(integrand, lower = -Inf, upper = Inf)$value
SB5_eefun <- function(data, theta0 = 0){
Xi <- data$X
IC_HL <- (1/IC_denom) * (F0(Xi, theta0) - 0.5)
function(theta){
c(IC_HL - (theta[1] - theta0),
Xi - theta[2])
}
}
## ----SB5_run, echo = TRUE------------------------------------------------
estimates <- estimate_equations(eeFUN = SB5_eefun,
data = dt, units = 'id',
roots = c(1, 1))
## ----SB5_clsform, echo = TRUE--------------------------------------------
X <- dt$X
pair_means <- numeric(length(X) - 1)
for(i in 1:(length(X) - 1)){
pair_means[i] <- (X[i] + X[i + 1])/2
}
theta_cls <- c(median(pair_means), mean(X))
## closed form covariance
# Not sure how compute SB's closed form since it depends on X, which is
# supposed to be unobserved.
Sigma_cls <- matrix(c(1/(12 * IC_denom^2) / n, NA, NA, NA),
nrow = 2, ncol = 2, byrow = TRUE)
## ----SB5_results, echo = FALSE, results = 'asis'-------------------------
results <- list(geex = estimates, cls = list(parameters = theta_cls, vcov = Sigma_cls))
print_results(results, 'Example 5', 'Example 5')
## ----SB6_setup, echo=FALSE-----------------------------------------------
n <- 100
theta_tru <- 2
sigma <- 1
dt <- data.frame(Y = rnorm(n, mean = 2, sd = sigma),
id = 1:n)
## ----SB6_eefun, echo = FALSE---------------------------------------------
SB6_eefun <- function(data, k = 1.5){
function(theta){
x <- data$Y - theta[1]
if(abs(x) <= k) x else sign(x) * k
}
}
## ----SB6_run, echo = TRUE------------------------------------------------
estimates <- estimate_equations(eeFUN = SB6_eefun,
data = dt, units = 'id',
roots = 1)
## ----SB6_clsform, echo = TRUE--------------------------------------------
theta_cls <- MASS::huber(dt$Y)$mu
psi_k <- function(x, k = 1.5){
if(abs(x) <= k) x else sign(x) * k
}
A <- lapply(dt$Y, function(y){
x <- y - theta_cls
-numDeriv::grad(psi_k, x = x)
}) %>% unlist() %>% mean()
B <- lapply(dt$Y, function(y){
x <- y - theta_cls
psi_k(x = x)^2
}) %>% unlist() %>% mean()
## closed form covariance
Sigma_cls <- matrix(A * B * A / n)
## ----SB6_results, echo = FALSE, results = 'asis'-------------------------
results <- list(geex = estimates, cls = list(parameters = theta_cls, vcov = Sigma_cls))
print_results(results, 'Example 6', 'Example 6')
## ----SB7_setup, echo=FALSE-----------------------------------------------
n <- 100
theta_tru <- 2
sigma <- 1
dt <- data.frame(Y = rnorm(n, mean = 2, sd = sigma),
id = 1:n)
## ----SB7_eefun, echo = FALSE---------------------------------------------
SB7_eefun <- function(data){
function(theta){
with(data,
c(0.5 - (Y <= theta[1]),
0.65 - (Y <= theta[2]))
)
}
}
## ----SB7_run, echo = TRUE, eval=FALSE------------------------------------
# estimates <- estimate_equations(eeFUN = SB7_eefun,
# data = dt, units = 'id',
# roots = c(.5, .65))
## ----SB7_clsform, echo = TRUE--------------------------------------------
theta_cls <- c(quantile(dt$Y, 0.5), quantile(dt$Y, 0.65))
## ----SB7_results, echo = FALSE, results = 'asis', eval=FALSE-------------
# results <- list(geex = estimates, cls = list(parameters = theta_cls, vcov = Sigma_cls))
# print_results(results, 'Example 7', 'Example 7')
## ----SB8_setup, echo=FALSE-----------------------------------------------
n <- 50
beta <- c(0.5, 2)
dt <- data_frame(X = rep(0:1, each = n/2),
e = rnorm(n),
Y = as.numeric(cbind(1, X) %*% beta) + e,
id = 1:n)
## ----SB8_eefun, echo = FALSE---------------------------------------------
psi_k <- function(x, k = 1.345){
if(abs(x) <= k) x else sign(x) * k
}
SB8_eefun <- function(data){
Yi <- data$Y
xi <- model.matrix(Y ~ X, data = data)
function(theta){
r <- Yi - xi %*% theta
c(psi_k(r) %*% xi)
}
}
## ----SB8_run, echo = TRUE------------------------------------------------
estimates <- estimate_equations(eeFUN = SB8_eefun,
data = dt, units = 'id',
roots = c(1, 1))
## ----SB8_clsform, echo = TRUE--------------------------------------------
m <- MASS::rlm(Y ~ X, data = dt, method = 'M')
theta_cls <- coef(m)
Sigma_cls <- vcov(m)
## ----SB8_results, echo = FALSE, results = 'asis'-------------------------
results <- list(geex = estimates, cls = list(parameters = theta_cls, vcov = Sigma_cls))
print_results(results, 'Example 8', 'Example 8')
## ----SB9_setup, echo=FALSE-----------------------------------------------
n <- 100
beta <- c(0.5, 2, .1)
dt <- data_frame(X1 = rep(0:1, each = n/2),
X2 = rep(0:1, times = n/2),
Y = rbinom(n, 1, prob = as.numeric(plogis(cbind(1, X1, X2) %*% beta))),
id = 1:n)
## ----SB9_eefun, echo = FALSE---------------------------------------------
SB9_eefun <- function(data){
Yi <- data$Y
xi <- model.matrix(Y ~ X1 + X2, data = data, drop = FALSE)
function(theta){
lp <- xi %*% theta
mu <- plogis(lp)
D <- t(xi) %*% dlogis(lp)
V <- mu * (1 - mu)
D %*% solve(V) %*% (Yi - mu)
}
}
## ----SB9_run, echo = TRUE------------------------------------------------
estimates <- estimate_equations(eeFUN = SB9_eefun,
data = dt, units = 'id',
roots = c(1, 1, 1))
## ----SB9_clsform, echo = TRUE--------------------------------------------
m <- glm(Y ~ X1 + X2, data = dt, family = binomial(link = 'logit'))
theta_cls <- coef(m)
Sigma_cls <- sandwich(m)
## ----SB9_results, echo = FALSE, results = 'asis'-------------------------
results <- list(geex = estimates, cls = list(parameters = theta_cls, vcov = Sigma_cls))
print_results(results, 'Example 9', 'Example 9')
## ----SB10_setup, echo=FALSE----------------------------------------------
shaq <- data_frame(game = 1:23,
ft_made = c(4, 5, 5, 5, 2, 7, 6, 9, 4, 1, 13, 5, 6, 9, 7, 3, 8, 1, 18, 3, 10, 1, 3),
ft_attp = c(5, 11, 14, 12, 7, 10, 14, 15, 12, 4, 27, 17, 12, 9, 12, 10, 12, 6, 39, 13, 17, 6, 12))
## ----SB10_eefun, echo = FALSE--------------------------------------------
SB10_eefun <- function(data){
Y <- data$ft_made
n <- data$ft_attp
function(theta){
p <- theta[2]
c(((Y - (n * p))^2)/(n * p * (1 - p)) - theta[1],
Y - n * p)
}
}
## ----SB10_run, echo = TRUE-----------------------------------------------
estimates <- estimate_equations(eeFUN = SB10_eefun,
data = shaq, units = 'game',
roots = c(.5, .5))
## ----SB10_clsform, echo = TRUE-------------------------------------------
V11 <- function(p) {
k <- length(nrow(shaq))
sumn <- sum(shaq$ft_attp)
sumn_inv <- sum(1/shaq$ft_attp)
term2_n <- 1 - (6 * p) + (6 * p^2)
term2_d <- p * (1 - p)
term2 <- term2_n/term2_d
print(term2)
term3 <- ((1 - 2 * p)^2)/( (sumn/k) * p * (1 - p))
print(term3)
2 + (term2 * (1/k) * sumn_inv) - term3
}
### ???? I keep getting a negative value for V11
p_tilde <- sum(shaq$ft_made)/sum(shaq$ft_attp)
V <- V11(.45)
V
pnorm(estimates$parameters[1], mean = 1, sd = sqrt(V))
## ----SB10_results, echo = FALSE, results = 'asis', eval = FALSE----------
# results <- list(geex = estimates, cls = list(parameters = theta_cls, vcov = Sigma_cls))
# print_results(results, 'Example 10', 'Example 10')
|
a5e0046070330a7520da9a24d0e8c9b06b57c3bf
|
d6d0ba8ddab5eb7cfd52781802fca21241611bb0
|
/BulkRNASeq/ISB025_mapping.R
|
01646db77114d528d56b59c6aebc72d067f1a3ca
|
[
"Apache-2.0"
] |
permissive
|
cemalley/Jovanovic_methods
|
dc3a9de74230dec25868cdba17318469a77f0948
|
475728042d1bba1195c02c5bc58a9a862b4e0dec
|
refs/heads/main
| 2023-08-23T16:13:00.563835
| 2021-10-12T15:51:19
| 2021-10-12T15:51:19
| 366,406,313
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,463
|
r
|
ISB025_mapping.R
|
library(data.table)
library(readxl)
library(stringr)
setwd('/Volumes/ncatssctl/NGS_related/BulkRNA/ISB025/')
files <- as.data.table(readxl::read_xlsx('ISB025_lane_concatenating.xlsx', sheet=1))
samples <- unique(files$Sample)
# ~~~~~~~PC_siRNA~~~~~~~~-------------------
setwd('/Volumes/ncatssctl/NGS_related/BulkRNA/ISB025/')
files <- as.data.table(readxl::read_xlsx('ISB025_lane_concatenating.xlsx'))
samples <- unique(files$Sample)
# lane concatenation-----
for (sample in samples){
cat(paste0('mkdir ', sample), sep='\n')
}
for (sample in samples){
rows <- grep(sample, files$Sample)
cat(sample)
cat('\t')
cat(rows)
cat('\n')
}
rows <- readxl::read_xlsx('ISB025_lane_concatenating.xlsx', sheet=2)
rows <- as.data.table(rows)
rows
samples <- unique(rows$Sample)
#R1------
for (sample in samples){
files.row <- grep(sample, rows$Sample)
files.temp <- unlist(as.numeric(str_split_fixed(rows$Rows[files.row], " ", Inf)))
files.temp.out <- c()
cat('cat ', sep='')
for (row in files.temp){
files.temp[row] <- files$File[row]
dir.row <- grep(files.temp[row],files$File)
files.temp.out[row] <- paste0('/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_PC_siRNA/Raw_data/FASTQ/' ,files$Folder[dir.row], '/', files$File[dir.row])
cat(paste0(files.temp.out[row], '_R1_001.fastq.gz '))
}
out.sample <- files$Sample[dir.row]
cat(paste0('> ', '/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_PC_siRNA/Raw_data/concatenated_lanes/', out.sample, '/', out.sample, '_R1_001.fastq.gz'))
cat("\n\n", sep="")
rm(files.temp.out, files.row, files.temp, dir.row)
}
# R2----
for (sample in samples){
files.row <- grep(sample, rows$Sample)
files.temp <- unlist(as.numeric(str_split_fixed(rows$Rows[files.row], " ", Inf)))
files.temp.out <- c()
cat('cat ', sep='')
for (row in files.temp){
files.temp[row] <- files$File[row]
dir.row <- grep(files.temp[row],files$File)
files.temp.out[row] <- paste0('/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_PC_siRNA/Raw_data/FASTQ/' ,files$Folder[dir.row], '/', files$File[dir.row])
cat(paste0(files.temp.out[row], '_R2_001.fastq.gz '))
}
out.sample <- files$Sample[dir.row]
cat(paste0('> ', '/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_PC_siRNA/Raw_data/concatenated_lanes/', out.sample, '/', out.sample, '_R2_001.fastq.gz'))
cat("\n\n", sep="")
rm(files.temp.out, files.row, files.temp, dir.row)
}
# swarm -f fastq_cat_081820.sh -t 1 -g 1 --time=8:00:00 #
# mapping-------
samples <- unique(files$Sample)
dir <- '/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_PC_siRNA'
for (sample in samples){
cat(paste0('cd ',dir,'/trimmomatic && java -jar $TRIMMOJAR PE -phred33 ', dir ,'/Raw_data/concatenated_lanes/',sample,'/', sample,'_R1_001.fastq.gz ', dir, '/Raw_data/concatenated_lanes/',sample, '/',
sample, '_R2_001.fastq.gz ', '-baseout ', sample, '.fastq.gz ',
'ILLUMINACLIP:/usr/local/apps/trimmomatic/Trimmomatic-0.36/adapters/TruSeq3-PE-2.fa:2:30:10 LEADING:10 TRAILING:5 MAXINFO:50:0.97 MINLEN:36 && cd ',dir,'/BAM && STAR --runThreadN $SLURM_CPUS_PER_TASK --genomeDir /fdb/STAR_current/GENCODE/Gencode_human/release_27/genes-150 --sjdbOverhang 150 --outSAMunmapped Within --outFilterType BySJout --outFilterMultimapNmax 20 --outFilterMismatchNmax 999 --outFilterMismatchNoverLmax 0.04 --alignIntronMin 20 --alignIntronMax 1000000 --alignMatesGapMax 1000000 --alignSJoverhangMin 8 --alignSJDBoverhangMin 1 --sjdbScore 1 --readFilesIn ',dir,'/trimmomatic/',sample,'_1P.fastq.gz ',dir,'/trimmomatic/',sample,'_2P.fastq.gz --readFilesCommand zcat --outSAMtype BAM SortedByCoordinate --outFileNamePrefix ', sample,'_hg38 && htseq-count -f bam -r pos -s no -t exon -m union ',dir,'/BAM/',sample,'_hg38Aligned.sortedByCoord.out.bam /fdb/GENCODE/Gencode_human/release_27/gencode.v27.annotation.gtf --idattr gene_name > ',dir,'/htseq/',sample,'_htseq_counts.txt'), sep="\n")
cat("\n")
}
# swarm -g 40 -t 12 --time=48:00:00 --module=trimmomatic,STAR,htseq -f ISB025_PC_siRNA_pipeline.sh #
# ~~~~~~~PC_LA_WNT~~~~~~~~-------------------
setwd('/Volumes/ncatssctl/NGS_related/BulkRNA/ISB025/')
files <- as.data.table(readxl::read_xlsx('ISB025_lane_concatenating.xlsx', sheet=3))
samples <- unique(files$Sample)
# lane concatenation-----
for (sample in samples){
cat(paste0('mkdir ', sample), sep='\n')
}
for (sample in samples){
rows <- grep(sample, files$Sample)
cat(sample)
cat('\t')
cat(rows)
cat('\n')
}
rows <- readxl::read_xlsx('ISB025_lane_concatenating.xlsx', sheet=4)
rows <- as.data.table(rows)
rows
samples <- unique(rows$Sample)
#R1------
for (sample in samples){
files.row <- grep(sample, rows$Sample)
files.temp <- unlist(as.numeric(str_split_fixed(rows$Rows[files.row], " ", Inf)))
files.temp.out <- c()
cat('cat ', sep='')
for (row in files.temp){
files.temp[row] <- files$File[row]
dir.row <- grep(files.temp[row],files$File)
files.temp.out[row] <- paste0('/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_PC_LA_WNT/Raw_data/FASTQ/' ,files$Folder[dir.row], '/', files$File[dir.row])
cat(paste0(files.temp.out[row], '_R1_001.fastq.gz '))
}
out.sample <- files$Sample[dir.row]
cat(paste0('> ', '/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_PC_LA_WNT/Raw_data/concatenated_lanes/', out.sample, '/', out.sample, '_R1_001.fastq.gz'))
cat("\n\n", sep="")
rm(files.temp.out, files.row, files.temp, dir.row)
}
# R2----
for (sample in samples){
files.row <- grep(sample, rows$Sample)
files.temp <- unlist(as.numeric(str_split_fixed(rows$Rows[files.row], " ", Inf)))
files.temp.out <- c()
cat('cat ', sep='')
for (row in files.temp){
files.temp[row] <- files$File[row]
dir.row <- grep(files.temp[row],files$File)
files.temp.out[row] <- paste0('/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_PC_LA_WNT/Raw_data/FASTQ/' ,files$Folder[dir.row], '/', files$File[dir.row])
cat(paste0(files.temp.out[row], '_R2_001.fastq.gz '))
}
out.sample <- files$Sample[dir.row]
cat(paste0('> ', '/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_PC_LA_WNT/Raw_data/concatenated_lanes/', out.sample, '/', out.sample, '_R2_001.fastq.gz'))
cat("\n\n", sep="")
rm(files.temp.out, files.row, files.temp, dir.row)
}
# swarm -f fastq_cat_081820.sh -t 1 -g 1 --time=8:00:00 #
# mapping-------
samples <- unique(files$Sample)
dir <- '/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_PC_LA_WNT'
for (sample in samples){
cat(paste0('cd ',dir,'/trimmomatic && java -jar $TRIMMOJAR PE -phred33 ', dir ,'/Raw_data/concatenated_lanes/',sample,'/', sample,'_R1_001.fastq.gz ', dir, '/Raw_data/concatenated_lanes/',sample, '/',
sample, '_R2_001.fastq.gz ', '-baseout ', sample, '.fastq.gz ',
'ILLUMINACLIP:/usr/local/apps/trimmomatic/Trimmomatic-0.36/adapters/TruSeq3-PE-2.fa:2:30:10 LEADING:10 TRAILING:5 MAXINFO:50:0.97 MINLEN:36 && cd ',dir,'/BAM && STAR --runThreadN $SLURM_CPUS_PER_TASK --genomeDir /fdb/STAR_current/GENCODE/Gencode_human/release_27/genes-150 --sjdbOverhang 150 --outSAMunmapped Within --outFilterType BySJout --outFilterMultimapNmax 20 --outFilterMismatchNmax 999 --outFilterMismatchNoverLmax 0.04 --alignIntronMin 20 --alignIntronMax 1000000 --alignMatesGapMax 1000000 --alignSJoverhangMin 8 --alignSJDBoverhangMin 1 --sjdbScore 1 --readFilesIn ',dir,'/trimmomatic/',sample,'_1P.fastq.gz ',dir,'/trimmomatic/',sample,'_2P.fastq.gz --readFilesCommand zcat --outSAMtype BAM SortedByCoordinate --outFileNamePrefix ', sample,'_hg38 && htseq-count -f bam -r pos -s no -t exon -m union ',dir,'/BAM/',sample,'_hg38Aligned.sortedByCoord.out.bam /fdb/GENCODE/Gencode_human/release_27/gencode.v27.annotation.gtf --idattr gene_name > ',dir,'/htseq/',sample,'_htseq_counts.txt'), sep="\n")
cat("\n")
}
# swarm -g 40 -t 12 --time=48:00:00 --module=trimmomatic,STAR,htseq -f ISB025_PC_LA_WNT_pipeline.sh #
# ~~~~~~~Vukasin~~~~~~~~-------------------
setwd('/Volumes/ncatssctl/NGS_related/BulkRNA/ISB025/')
files <- as.data.table(readxl::read_xlsx('ISB025_lane_concatenating.xlsx', sheet=5))
samples <- unique(files$Sample)
# lane concatenation-----
for (sample in samples){
cat(paste0('mkdir ', sample), sep='\n')
}
for (sample in samples){
rows <- grep(sample, files$Sample)
cat(sample)
cat('\t')
cat(rows)
cat('\n')
}
rows <- readxl::read_xlsx('ISB025_lane_concatenating.xlsx', sheet=6)
rows <- as.data.table(rows)
rows
samples <- unique(rows$Sample)
#R1------
for (sample in samples){
files.row <- grep(sample, rows$Sample)
files.temp <- unlist(as.numeric(str_split_fixed(rows$Rows[files.row], " ", Inf)))
files.temp.out <- c()
cat('cat ', sep='')
for (row in files.temp){
files.temp[row] <- files$File[row]
dir.row <- grep(files.temp[row],files$File)
files.temp.out[row] <- paste0('/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_Vukasin/Raw_data/FASTQ/' ,files$Folder[dir.row], '/', files$File[dir.row])
cat(paste0(files.temp.out[row], '_R1_001.fastq.gz '))
}
out.sample <- files$Sample[dir.row]
cat(paste0('> ', '/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_Vukasin/Raw_data/concatenated_lanes/', out.sample, '/', out.sample, '_R1_001.fastq.gz'))
cat("\n\n", sep="")
rm(files.temp.out, files.row, files.temp, dir.row)
}
# R2----
for (sample in samples){
files.row <- grep(sample, rows$Sample)
files.temp <- unlist(as.numeric(str_split_fixed(rows$Rows[files.row], " ", Inf)))
files.temp.out <- c()
cat('cat ', sep='')
for (row in files.temp){
files.temp[row] <- files$File[row]
dir.row <- grep(files.temp[row],files$File)
files.temp.out[row] <- paste0('/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_Vukasin/Raw_data/FASTQ/' ,files$Folder[dir.row], '/', files$File[dir.row])
cat(paste0(files.temp.out[row], '_R2_001.fastq.gz '))
}
out.sample <- files$Sample[dir.row]
cat(paste0('> ', '/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_Vukasin/Raw_data/concatenated_lanes/', out.sample, '/', out.sample, '_R2_001.fastq.gz'))
cat("\n\n", sep="")
rm(files.temp.out, files.row, files.temp, dir.row)
}
# swarm -f fastq_cat_081820.sh -t 1 -g 1 --time=8:00:00 #
# mapping-------
samples <- unique(files$Sample)
dir <- '/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_Vukasin'
for (sample in samples){
cat(paste0('cd ',dir,'/trimmomatic && java -jar $TRIMMOJAR PE -phred33 ', dir ,'/Raw_data/concatenated_lanes/',sample,'/', sample,'_R1_001.fastq.gz ', dir, '/Raw_data/concatenated_lanes/',sample, '/',
sample, '_R2_001.fastq.gz ', '-baseout ', sample, '.fastq.gz ',
'ILLUMINACLIP:/usr/local/apps/trimmomatic/Trimmomatic-0.36/adapters/TruSeq3-PE-2.fa:2:30:10 LEADING:10 TRAILING:5 MAXINFO:50:0.97 MINLEN:36 && cd ',dir,'/BAM && STAR --runThreadN $SLURM_CPUS_PER_TASK --genomeDir /fdb/STAR_current/GENCODE/Gencode_human/release_27/genes-150 --sjdbOverhang 150 --outSAMunmapped Within --outFilterType BySJout --outFilterMultimapNmax 20 --outFilterMismatchNmax 999 --outFilterMismatchNoverLmax 0.04 --alignIntronMin 20 --alignIntronMax 1000000 --alignMatesGapMax 1000000 --alignSJoverhangMin 8 --alignSJDBoverhangMin 1 --sjdbScore 1 --readFilesIn ',dir,'/trimmomatic/',sample,'_1P.fastq.gz ',dir,'/trimmomatic/',sample,'_2P.fastq.gz --readFilesCommand zcat --outSAMtype BAM SortedByCoordinate --outFileNamePrefix ', sample,'_hg38 && htseq-count -f bam -r pos -s no -t exon -m union ',dir,'/BAM/',sample,'_hg38Aligned.sortedByCoord.out.bam /fdb/GENCODE/Gencode_human/release_27/gencode.v27.annotation.gtf --idattr gene_name > ',dir,'/htseq/',sample,'_htseq_counts.txt'), sep="\n")
cat("\n")
}
# swarm -g 40 -t 12 --time=48:00:00 --module=trimmomatic,STAR,htseq -f ISB025_Vukasin_pipeline.sh #
# ~~~~~~~CT~~~~~~~~-------------------
setwd('/Volumes/ncatssctl/NGS_related/BulkRNA/ISB025/')
files <- as.data.table(readxl::read_xlsx('ISB025_lane_concatenating.xlsx', sheet=7))
samples <- unique(files$Sample)
# lane concatenation-----
for (sample in samples){
cat(paste0('mkdir ', sample), sep='\n')
}
for (sample in samples){
rows <- grep(sample, files$Sample)
cat(sample)
cat('\t')
cat(rows)
cat('\n')
}
rows <- readxl::read_xlsx('ISB025_lane_concatenating.xlsx', sheet=8)
rows <- as.data.table(rows)
rows
samples <- unique(rows$Sample)
#R1------
for (sample in samples){
files.row <- grep(sample, rows$Sample)
files.temp <- unlist(as.numeric(str_split_fixed(rows$Rows[files.row], " ", Inf)))
files.temp.out <- c()
cat('cat ', sep='')
for (row in files.temp){
files.temp[row] <- files$File[row]
dir.row <- grep(files.temp[row],files$File)
files.temp.out[row] <- paste0('/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_CT/Raw_data/FASTQ/' ,files$Folder[dir.row], '/', files$File[dir.row])
cat(paste0(files.temp.out[row], '_R1_001.fastq.gz '))
}
out.sample <- files$Sample[dir.row]
cat(paste0('> ', '/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_CT/Raw_data/concatenated_lanes/', out.sample, '/', out.sample, '_R1_001.fastq.gz'))
cat("\n\n", sep="")
rm(files.temp.out, files.row, files.temp, dir.row)
}
# R2----
for (sample in samples){
files.row <- grep(sample, rows$Sample)
files.temp <- unlist(as.numeric(str_split_fixed(rows$Rows[files.row], " ", Inf)))
files.temp.out <- c()
cat('cat ', sep='')
for (row in files.temp){
files.temp[row] <- files$File[row]
dir.row <- grep(files.temp[row],files$File)
files.temp.out[row] <- paste0('/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_CT/Raw_data/FASTQ/' ,files$Folder[dir.row], '/', files$File[dir.row])
cat(paste0(files.temp.out[row], '_R2_001.fastq.gz '))
}
out.sample <- files$Sample[dir.row]
cat(paste0('> ', '/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_CT/Raw_data/concatenated_lanes/', out.sample, '/', out.sample, '_R2_001.fastq.gz'))
cat("\n\n", sep="")
rm(files.temp.out, files.row, files.temp, dir.row)
}
# swarm -f fastq_cat_081820.sh -t 1 -g 1 --time=8:00:00 #
# mapping-------
samples <- unique(files$Sample)
dir <- '/data/NCATS_ifx/data/mRNASeq/ISB025/ISB025_CT'
for (sample in samples){
cat(paste0('cd ',dir,'/trimmomatic && java -jar $TRIMMOJAR PE -phred33 ', dir ,'/Raw_data/concatenated_lanes/',sample,'/', sample,'_R1_001.fastq.gz ', dir, '/Raw_data/concatenated_lanes/',sample, '/',
sample, '_R2_001.fastq.gz ', '-baseout ', sample, '.fastq.gz ',
'ILLUMINACLIP:/usr/local/apps/trimmomatic/Trimmomatic-0.36/adapters/TruSeq3-PE-2.fa:2:30:10 LEADING:10 TRAILING:5 MAXINFO:50:0.97 MINLEN:36 && cd ',dir,'/BAM && STAR --runThreadN $SLURM_CPUS_PER_TASK --genomeDir /fdb/STAR_current/GENCODE/Gencode_human/release_27/genes-150 --sjdbOverhang 150 --outSAMunmapped Within --outFilterType BySJout --outFilterMultimapNmax 20 --outFilterMismatchNmax 999 --outFilterMismatchNoverLmax 0.04 --alignIntronMin 20 --alignIntronMax 1000000 --alignMatesGapMax 1000000 --alignSJoverhangMin 8 --alignSJDBoverhangMin 1 --sjdbScore 1 --readFilesIn ',dir,'/trimmomatic/',sample,'_1P.fastq.gz ',dir,'/trimmomatic/',sample,'_2P.fastq.gz --readFilesCommand zcat --outSAMtype BAM SortedByCoordinate --outFileNamePrefix ', sample,'_hg38 && htseq-count -f bam -r pos -s no -t exon -m union ',dir,'/BAM/',sample,'_hg38Aligned.sortedByCoord.out.bam /fdb/GENCODE/Gencode_human/release_27/gencode.v27.annotation.gtf --idattr gene_name > ',dir,'/htseq/',sample,'_htseq_counts.txt'), sep="\n")
cat("\n")
}
# swarm -g 40 -t 12 --time=48:00:00 --module=trimmomatic,STAR,htseq -f ISB025_CT_pipeline.sh #
|
1978fa225f4ea315bb95895131b55e705ffc971f
|
cf9e51a70485c84874479cc5e9797bd3352f5545
|
/outLocate() function.R
|
4a49682c3ea04bc118a79728e6c958e001799c34
|
[] |
no_license
|
GourabNath/DataCleaning
|
7fb7acd70db903c672060870033c3f3c783f139f
|
c3a2040f44bcf84bec437b0d16e90bcfbdebc4af
|
refs/heads/master
| 2021-01-10T16:42:19.592643
| 2016-04-18T19:34:53
| 2016-04-18T19:34:53
| 54,949,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,780
|
r
|
outLocate() function.R
|
## Locating Outliers:
## outLocate() function
##
## Outlier is definitely a matter of interest. Therefore it is required that we should know where exactly this outlier is located for a
## particular variable. This function will help to do so.
##
## The function outLocate takes two arguments -
## data - a data frame of interest
## bench - the benchmark.
## There are four benchmarks used to construct this function:
## b1 - Based on quartile and interquartile range. (Upper BM, Lower BM) = (Q1 - 1.5*IQR, Q3 + 1.5*IQR)
## b2 - Based on mean and sd. (Upper BM, Lower BM) = (mean - 2*sd, mean + 2*sd)
## b3 - Based on mean and sd. (Upper BM, Lower BM) = (mean - 2.5*sd, mean + 2.5*sd)
## b4 - Based on mean and sd. (Upper BM, Lower BM) = (mean - 3*sd, mean + 3*sd)
##
## The finction returns a list. Each location of the list corresponds to a variable in the data frame.
## If a particular variable of the data frame contains outliers then the corresponding location of the list returns a vector containing the index where
## in the variable the outlier is located.
## If a particular variable of the data frame does not contain a outlier or a categorical variable then the corresponding location of the list
## shows a hypen (indicating not applicable)
outLocate <- function(data, benchmark = c("b1","b2","b3","b4"))
{
#Extracting only the numerical variables
indx <- c()
for(i in 1:ncol(data))
{
if(class(data[,i]) == "factor")
{
data = data[,-i]
}
}
#locating outliers: This list wil be used to store vectors of indices of each variables
#containing outliers
outLoc <- list()
if(benchmark == "b1")
{
#The benchmark is Benchmark used is (Q1 - 1.5*IQR, Q3 + 1.5*IQR)
for(i in 1:ncol(data))
{
#Constructing the upper and lower benchmark
Lbench = quantile(data[,i][!is.na(data[,i])], 0.25) - 1.5*IQR(data[,i][!is.na(data[,i])])/2
Ubench = quantile(data[,i][!is.na(data[,i])], 0.75) + 1.5*IQR(data[,i][!is.na(data[,i])])/2
#Using which() function to locate the outliers
a <- which(data[,i] > Ubench)
b <- which(data[,i] < Lbench)
#Note the if there are no outlier then the length of the vector c(a,b) will be zero
if(length(c(a,b)) > 0) #if outlier is present
{
outLoc[[i]] <- c(a,b) #Then the ith location of the list is the vector c(a,b)
}
else #Else If the outlier is not present
{
outLoc[[i]] <- "-" #Then the ith location of the list is an hypen (indicating not applicable)
}
}
}
if(benchmark == "b2")
{
#Here the benchmark used is (mean - 2*sd, mean + 2*sd)
for(i in 1:ncol(data))
{
Lbench = mean(data[,i][!is.na(data[,i])]) - 2*sd(data[,i][!is.na(data[,i])])
Ubench = mean(data[,i][!is.na(data[,i])]) + 2*sd(data[,i][!is.na(data[,i])])
a <- which(data[,i] > Ubench)
b <- which(data[,i] < Lbench)
if(length(c(a,b)) > 0)
{
outLoc[[i]] <- c(a,b)
}
else
{
outLoc[[i]] <- "-"
}
}
}
if(benchmark == "b3")
{
#Here the benchmark used is (mean - 2.5*sd, mean + 2.5*sd)
for(i in 1:ncol(data))
{
Lbench = mean(data[,i][!is.na(data[,i])]) - 2.5*sd(data[,i][!is.na(data[,i])])
Ubench = mean(data[,i][!is.na(data[,i])]) + 2.5*sd(data[,i][!is.na(data[,i])])
a <- which(data[,i] > Ubench)
b <- which(data[,i] < Lbench)
if(length(c(a,b)) > 0)
{
outLoc[[i]] <- c(a,b)
}
else
{
outLoc[[i]] <- "-"
}
}
}
if(benchmark == "b4")
{
#Here the benchmark used is (mean - 3*sd, mean + 3*sd)
for(i in 1:ncol(data))
{
Lbench = mean(data[,i][!is.na(data[,i])]) - 3*sd(data[,i][!is.na(data[,i])])
Ubench = mean(data[,i][!is.na(data[,i])]) + 3*sd(data[,i][!is.na(data[,i])])
a <- which(data[,i] > Ubench)
b <- which(data[,i] < Lbench)
if(length(c(a,b)) > 0)
{
outLoc[[i]] <- c(a,b)
}
else
{
outLoc[[i]] <- "-"
}
}
}
names(outLoc) <- names(data) #Naming the location of the list as the same as the variable names of the data frame.
return(noquote(outLoc)) #noquote() function is used to remove quote when displaying a character variable.
}
|
d08307eea6140e6e2961b52d4936d7247704cd0f
|
4d3672136d43264176fe42ea42196f113532138d
|
/R/nsize.R
|
1ba3c28c114041ac64bf32b1181d085e23ef8338
|
[] |
no_license
|
alanarnholt/BSDA
|
43c851749a402c6fe73213c31d42c26fa968303e
|
2098ae86a552d69e4af0287c8b1828f7fa0ee325
|
refs/heads/master
| 2022-06-10T10:52:15.879117
| 2022-05-14T23:58:15
| 2022-05-14T23:58:15
| 52,566,969
| 5
| 13
| null | 2017-07-27T02:06:33
| 2016-02-26T00:28:07
|
R
|
UTF-8
|
R
| false
| false
| 2,827
|
r
|
nsize.R
|
#' Required Sample Size
#'
#' Function to determine required sample size to be within a given margin of
#' error.
#'
#' Answer is based on a normal approximation when using type \code{"pi"}.
#'
#' @param b the desired bound.
#' @param sigma population standard deviation. Not required if using type
#' \code{"pi"}.
#' @param p estimate for the population proportion of successes. Not required
#' if using type \code{"mu"}.
#' @param conf.level confidence level for the problem, restricted to lie
#' between zero and one.
#' @param type character string, one of \code{"mu"} or \code{"pi"}, or just the
#' initial letter of each, indicating the appropriate parameter. Default value
#' is \code{"mu"}.
#' @return Returns required sample size.
#' @author Alan T. Arnholt
#' @keywords univar
#' @examples
#'
#' nsize(b=.03, p=708/1200, conf.level=.90, type="pi")
#' # Returns the required sample size (n) to estimate the population
#' # proportion of successes with a 0.9 confidence interval
#' # so that the margin of error is no more than 0.03 when the
#' # estimate of the population propotion of successes is 708/1200.
#' # This is problem 5.38 on page 257 of Kitchen's BSDA.
#'
#' nsize(b=.15, sigma=.31, conf.level=.90, type="mu")
#' # Returns the required sample size (n) to estimate the population
#' # mean with a 0.9 confidence interval so that the margin
#' # of error is no more than 0.15. This is Example 5.17 on page
#' # 261 of Kitchen's BSDA.
#'
#' @export nsize
nsize <-
function(b, sigma = NULL, p = 0.5, conf.level = 0.95, type = "mu")
{
choices <- c("mu", "pi")
alt <- pmatch(type, choices)
type <- choices[alt]
if(length(type) > 1 || is.na(type))
stop("type must be one \"mu\", \"pi\"")
if(type == "pi" && b > 1)
stop("b must be less than 1")
if(!missing(b))
if(length(b) != 1 || is.na(b))
stop("b must be a single number")
if(type == "mu") {
z <- qnorm(1 - (1 - conf.level)/2)
n <- ((z * sigma)/b)^2
n <- ceiling(n)
cat("\n")
cat("The required sample size (n) to estimate the population",
"\n")
cat("mean with a", conf.level,
"confidence interval so that the margin", "\n")
cat("of error is no more than", b, "is", n, ".", "\n")
cat("\n\n")
}
else if(type == "pi") {
z <- qnorm(1 - (1 - conf.level)/2)
n <- p * (1 - p) * (z/b)^2
n <- ceiling(n)
cat("\n")
cat("The required sample size (n) to estimate the population",
"\n")
cat("proportion of successes with a", conf.level,
"confidence interval", "\n")
cat("so that the margin of error is no more than", b, "is",
n, ".", "\n")
cat("\n\n")
}
}
|
d80b7e880476e033a81866db37208fe7be658e8d
|
7179a563634bc94ecd2322777c91203ad2dc7245
|
/app/ui.R
|
6b4f9661264e5d390b4b000ed05876e5a357f542
|
[] |
no_license
|
hoangtv1899/DataScienceCapstone
|
9a65d68b3ebf2239a7c99b6ad19b98bbc60fd0fc
|
edc3a8251517c244bd2d301bed5a4d37b778fa94
|
refs/heads/master
| 2021-09-01T12:20:12.644261
| 2017-12-27T00:20:15
| 2017-12-27T00:20:15
| 115,463,946
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,126
|
r
|
ui.R
|
library(shiny)
library(markdown)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("DATA SCIENCE CAPSTONE - PREDICTING NEXT WORD"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
helpText("Enter a word or a sentence to preview next word prediction"),
hr(),
textInput("inputText", "Enter the word/ sentence here", value=""),
hr()
),
# Show a plot of the generated distribution
mainPanel(
h2("The predicted next word at this box:"),
verbatimTextOutput("prediction"),
strong("You entered:"),
strong(code(textOutput('sentence1'))),
br(),
strong("Using search at n-grams to show the next word:"),
strong(code(textOutput('sentence2'))),
hr(),
hr(),
hr(),
img(src='swiftkey_logo.jpg',height=50,width=250),
img(src='jhu_logo.jpg',height=100,width=250),
hr(),
hr(),
img(src='coursera_logo.png',height=122,width=467),
hr()
)
)
))
|
0e4b3a058bc443ed5dd38a43dc9158d252531e7f
|
fc132d038f07aeba9c15ef544c11a3aca0bc6f43
|
/findDepthOffset.R
|
d5d9e7a62537a3213f5231e191df915559bc62d7
|
[] |
no_license
|
sherrillmix/turtleSurvival
|
071538ca0f0c3a483a049079c4a3d8ff4dcb3637
|
f0d3eee8533c908bd1689b91bedd7c975adbb437
|
refs/heads/master
| 2021-01-10T03:47:56.967801
| 2016-11-22T14:49:40
| 2016-11-22T14:49:40
| 49,825,523
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,762
|
r
|
findDepthOffset.R
|
statusDepths<-statusData[!is.na(statusData$Depth)&statusData$deployDay>-5,]
depthRange<-range(statusDepths$Depth[!is.na(statusDepths$Depth)])
#NOTE generating plot and adjustment info in one step
pdf('out/depthOffset.pdf')
depthOffsets<-lapply(info$PTTID,function(ptt){
message(ptt)
thisInfo<-info[info$PTTID==ptt,]
#thisDepth<-statusDepths[statusDepths$Ptt==ptt&statusDepths$deployDay<thisInfo$lastDay+5,]
thisDepth<-statusDepths[statusDepths$Ptt==ptt,]
times<-thisDepth$deployDay
depths<-thisDepth$Depth + ifelse(is.na(thisDepth$ZeroDepthOffset),0,thisDepth$ZeroDepthOffset) * ifelse(is.na(thisDepth$DepthSensor),.5,thisDepth$DepthSensor)
if(length(depths)==0){
safeMax<-round(max(c(thisInfo$lastDay,thisInfo$releaseDays,100),na.rm=TRUE))
out<-rep(0,safeMax+201)
names(out)<--100:(safeMax+100)
return(out)
}
plot(times,depths,main=ptt,ylim=depthRange,ylab='Depth',xlab='Days after deployment',xlim=c(-10,thisInfo$lastDay+10))
abline(h=0,lty=2)
depthReg<-lm(depths~times)
coefs<-depthReg$coefficient[c('(Intercept)','times')]
coefs[is.na(coefs)]<-0
abline(coefs[1],coefs[2],col='#FF000077')
approxDates<-seq(0,ceiling(max(c(times,thisInfo$lastDay)))+50,1)
if(length(depths)==1){
depthAdjust<-rep(depths,length(approxDates))
depthAdjust2<-rep(depths,length(approxDates))
} else {
depthAdjust<-approx(times,depths,approxDates,rule=2)$y
#depthAdjust2<-spline(times,depths,xout=approxDates)$y
#depthAdjust2<-(depthAdjust+coefs[1]+coefs[2]*approxDates)/2
weeks<-seq(0,max(times),10)
weekMedians<-sapply(weeks,function(x){
selector<-abs(times-x)<5
if(any(selector))return(median(depths[abs(times-x)<7]))
else return(NA)
})
if(sum(!is.na(weekMedians))>1)depthAdjust2<-approx(weeks,weekMedians,approxDates,rule=2)$y
else depthAdjust2<-rep(median(depths),length(approxDates))
}
lines(approxDates,depthAdjust,col='#00FF0077')
lines(approxDates,depthAdjust2,col='#0000FF77')
abline(v=c(0,thisInfo$lastDay),lty=3)
out<-depthAdjust2
names(out)<-approxDates
return(out)
})
dev.off()
names(depthOffsets)<-info$PTTID
tadOffsets<-lapply(info$PTTID,function(ptt){
thisInfo<-info[info$PTTID==ptt,]
#thisDepth<-statusDepths[statusDepths$Ptt==ptt&statusDepths$deployDay<thisInfo$lastDay+5,]
thisTad<-tad[tad$Ptt==ptt,]
times<-thisTad$deployDay
minDepths<-sapply(times,function(x){
nonZeros<-which(apply(thisTad[abs(times-x)<3,binCols],2,sum,na.rm=TRUE)>0)
if(min(nonZeros)!=max(nonZeros))return(min(nonZeros))
else return(NA)
})
if(any(is.na(minDepths))){
minDepths[is.na(minDepths)]<-approx(times,minDepths,times[is.na(minDepths)],rule=2)$y
}
names(minDepths)<-times
return(minDepths)
})
names(tadOffsets)<-info$PTTID
|
2e1578ce6ab64127b14a645746e2e4ebbe2253b6
|
599e4da9d94e1f4970680a1389cb4e0b96adb544
|
/Analysis/crump.r
|
e3735c90346f2b93ec39b504e2c23e9f3ed760d6
|
[] |
no_license
|
mcefalu/PPTA
|
73ea053a46918481501c062536d9c72571bc8bb7
|
0834bd2ee87138321fdc00665c5113cb32c602d0
|
refs/heads/master
| 2017-12-01T01:28:02.296009
| 2017-10-25T18:53:53
| 2017-10-25T18:53:53
| 108,310,193
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 638
|
r
|
crump.r
|
#####################################
## implementation of crumps method ##
#
# Y is the outcome
# A is the treatment
# PS is the propensity score
crump <- function(Y,A,PS){
PS2 = PS*(1-PS)
PStemp = PS*(PS<=.5) + (1-PS)*(PS>.5)
PStemp = sort(PStemp)
for (alpha in PStemp){
alpha2=alpha*(1-alpha)
if( (1/alpha2) <= (2*sum( (PS2>=alpha2)*1/PS2 ) / sum(PS2>=alpha2)) ){
w =A*(PS2>=alpha2)/PS + (1-A)*(PS2>=alpha2)/(1-PS)
msm <- svyglm(Y ~ A, design = svydesign(~ 1, weights = ~ w , data=data.frame(A=A,Y=Y,w=w)))
return(list(est=coef(msm)[2],CI=confint(msm)[2,], weight=w))
}
}
}
|
943e93092e7f558d2dd1e2b3179309aa8e62db02
|
1a9eeed43a99e1b01b61233d6013746b17e7286b
|
/scripts/plot2.R
|
abe96f4222ecaf5ad63fd014078f2c3ccc383087
|
[] |
no_license
|
srrussel74/ExData_Plotting1
|
b46ad1787817d28ec1005ad5c51038cca9ac4343
|
177a2e8ab7bd25f3ae9130f56aa6b3c005f3cd4c
|
refs/heads/master
| 2021-01-14T12:44:46.680762
| 2014-09-06T11:52:54
| 2014-09-06T11:52:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,141
|
r
|
plot2.R
|
library(datasets)
#Load data with given options
data<-read.table("./household_power_consumption.txt", header=TRUE, sep=";");
#Convert class factor to class Date for col Date and to char for col Time
data$Date<-as.Date(data$Date,format="%d/%m/%Y");
#Subset data with 01 and 02 february 2007
subdata<-subset(data, Date=="2007-02-01" | Date=="2007-02-02");
#Convert class factor Time to char, then paste with Date to convert it class POSIXlt/POSIXt
subdata$Time<-as.character(subdata$Time);
tmp<-paste(subdata$Date,subdata$Time);
DateTime<-strptime(tmp,"%Y-%m-%d %H:%M:%S");
#Convert class factor to class numeric along as.character
GAP<-as.numeric(as.character(subdata$Global_active_power))
## Open PNG file
png(filename="plot2.png", width=480, height=480, units="px", bg="transparent")
with(subdata, plot(
DateTime, # data year-month-day time
GAP, #data Global Active Power
ylab="Global Active Power (kilowatts)",
xlab=NA, # no x-title
type="l" # plot lines instead of points
)
)
dev.off() ## Close PNG file
|
1b8b44a13d3558bd2ee7421947159ba3a1ebb98c
|
5c55e61748fd4d288cf74692d438dd23a0b36cb2
|
/exdata-012/project-2/plot4.R
|
f34cb2dbfe449ff56d016a9c9e9515dae7640a3e
|
[] |
no_license
|
maxim5/r-stuff
|
aa807ddec61513bbb3af45951128a18c25e57a40
|
92f63307068cb92b59e23142e1147f0702aa4e46
|
refs/heads/master
| 2021-04-02T10:57:24.012000
| 2015-08-28T12:10:57
| 2015-08-28T12:10:57
| 248,266,917
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,300
|
r
|
plot4.R
|
source("common.R")
sectors = unique(scc_mapping$EI.Sector)
coal_sectors = sectors[grep("Coal", sectors)]
coal_mapping = scc_mapping %>% filter(EI.Sector %in% coal_sectors)
coal_summary = inventory %>%
filter(SCC %in% coal_mapping$SCC) %>%
group_by(year) %>%
summarise(total_emission=sum(Emissions) / 1000)
TITLE = expression("Emissions from PM"[2.5]*" from coal combustion-related sources")
XLAB = ""
YLAB = "Total emission (in 1000 tones)"
THEME = theme(plot.margin=unit(c(1,1,1,1), "cm"),
plot.title=element_text(vjust=2),
axis.title.y=element_text(angle=90, vjust=2))
# View as a barplot
ggplot(coal_summary,
aes(x=as.factor(year), y=total_emission)) +
geom_bar(stat="identity", fill=BAR_COLOR, colour=BAR_BORDER_COLOR) +
labs(title=TITLE, x=XLAB, y=YLAB) +
THEME
ggsave("plot4.png")
# Alternative view - a line plot
ggplot(coal_summary,
aes(x=year, y=total_emission)) +
geom_line(colour=PLOT_LINE_COLOR) +
geom_point(size=4, shape=21, fill=POINT_FILL_COLOR) +
scale_x_continuous(breaks = seq(min(coal_summary$year), max(coal_summary$year), by=3)) +
scale_y_continuous(limits=c(0, max(coal_summary$total_emission))) +
labs(title=TITLE, x=XLAB, y=YLAB) +
THEME
ggsave("plot4-alt.png")
|
5e3837f93dff2a9fe0cdf9cfaefa82dbf2e85ca5
|
8d25028ef38f747bc7ee94ce571588dc70f6748f
|
/tests/testthat/test-discrete_by_quantile.R
|
1c5c738773148b5f8fdecadeb218d1f0e44a3aaf
|
[
"MIT"
] |
permissive
|
nhejazi/nima
|
645130b400e24f25258a36ceed081f11376274a0
|
b31aaeef4f9a44e725cdd3004ab4b4d8e3b52f76
|
refs/heads/master
| 2020-12-25T09:08:03.780186
| 2020-03-06T02:01:37
| 2020-03-06T02:01:37
| 52,858,693
| 1
| 3
|
NOASSERTION
| 2019-12-11T18:07:20
| 2016-03-01T07:53:36
|
R
|
UTF-8
|
R
| false
| false
| 315
|
r
|
test-discrete_by_quantile.R
|
context("Discretizing vectors by quantile")
test_that("range of discretization matches quantiles for numerics", {
expect_equal(range(discrete_by_quantile(rnorm(100))), c(1, 4))
})
test_that("discretization fails for non-numeric and non-factor inputs", {
expect_error(discrete_by_quantile(c("foo", "bar")))
})
|
7fa65204d2b3eb113507e6dfcfcc4e2614169081
|
c5ed03ffbb6b7a6f298e48dda384343166483a47
|
/Psoralen/sacCer3_analysis/linker_lengths.r
|
bd70af80302727054da2b384ef9d3002dd66230f
|
[] |
no_license
|
HJeffery/Thesis_supplementary
|
d9ae1dc18de47823a9399285187e66de67936574
|
b6af00c42c749da6bfcfc2ea149f1f3e534ac64c
|
refs/heads/master
| 2023-04-21T18:27:21.935040
| 2021-05-16T17:11:41
| 2021-05-16T17:11:41
| 367,930,591
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,003
|
r
|
linker_lengths.r
|
# Script to make a plot of linker sizes
# Written by Heather Jeffery
# 29th April 2020
library(ggplot2)
library(plyr)
linker_lengths <- read.csv("2009_Jiang_linker_sizes_sacCer3.csv", header = FALSE)
colnames(linker_lengths) <- c("Lengths")
summary <- count(linker_lengths, vars = "Lengths")
print(summary)
# Plot
p <- ggplot(linker_lengths, aes(x = Lengths)) +
geom_histogram(fill = "#006600", color = "black", bins = 100) +
guides(fill=FALSE) +
scale_y_continuous(expand = c(0,0), limits = c(0,6000)) + # This sets the x axis to be zero on the y axis
scale_x_continuous(expand = c(0,0)) +
labs(title = "Linker lengths in sacCer3 (Jiang 2009)", x = "Linker length (bases)", y = "Frequency") +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5)) +
theme(text = element_text(size = 20))
print(p)
ggsave("Linker_lengths.png", width = 12)
print(paste0("Mean lengths = ", mean(linker_lengths$Lengths)))
print(paste0("Number of linkers = ", length(linker_lengths$Lengths)))
|
1ae8f488da913eb92d80fc3bded0856ac7b2833c
|
f02e02d6d797a7da2879b04022d088024798187d
|
/Advanced R Programming/lab03/lab03_result/tests/testthat/test-my-test.R
|
6dc8fd1b625a65162776d5b0f7392bac887d27d2
|
[
"MIT"
] |
permissive
|
lennartsc/MSc-Statistics-and-Machine-Learning
|
aa0d920955f12daf79c01d3233fc6381d5923672
|
57905f689db794ca9bfe4775859106942d80456a
|
refs/heads/master
| 2023-05-26T23:48:08.461875
| 2020-07-07T10:16:57
| 2020-07-07T10:16:57
| 214,446,376
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,295
|
r
|
test-my-test.R
|
context("test-my-test.R")
context("test-my-test.R")
test_that("package works", {
expect_equal(euclidean(100, 1000), 100)
expect_equal(euclidean(123612, 13892347912), 4)
expect_equal(euclidean(-100, 1000), 100)
expect_equal(dijkstra_man(wiki_graph, 1), c(0,7,9,20,20,11))
expect_equal(dijkstra_man(wiki_graph,3), c(9,10,0,11,11,2))
expect_equal(dijkstra_adv(wiki_graph,1), c(0,7,9,20,20,11))
expect_equal(dijkstra_adv(wiki_graph,3), c(9,10,0,11,11,2))
})
test_that("Error messages are returned for erronous input in the Dijkstra algorithm.", {
wiki_wrong_graph <- wiki_graph
names(wiki_wrong_graph) <- c("v1, v3, w")
expect_error(dijkstra_man(wiki_wrong_graph, 3))
expect_error(dijkstra_adv(wiki_wrong_graph, 3))
wiki_wrong_graph <- wiki_graph[1:2]
expect_error(dijkstra_man(wiki_wrong_graph, 3))
expect_error(dijkstra_man(wiki_graph, 7))
expect_error(dijkstra_man(as.matrix(wiki_graph), 3))
expect_error(dijkstra_adv(wiki_wrong_graph, 3))
expect_error(dijkstra_adv(wiki_graph, 7))
expect_error(dijkstra_adv(as.matrix(wiki_graph), 3))
})
test_that("Wrong input throws an error.", {
expect_error(euclidean("100", 1000))
expect_error(euclidean(0, 1000))
expect_error(euclidean(0, NA))
expect_error(euclidean(100, "1000"))
expect_error(euclidean(TRUE, "1000"))
})
|
1758698b550fdceebbd95f76a7c60b42ac8ae32e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/BAMMtools/examples/computeBayesFactors.Rd.R
|
9a3f584af5bab01e47db378b72d875d25a12c9bb
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 252
|
r
|
computeBayesFactors.Rd.R
|
library(BAMMtools)
### Name: computeBayesFactors
### Title: Compute Bayes Factors
### Aliases: computeBayesFactors
### Keywords: models
### ** Examples
data(mcmc.whales)
computeBayesFactors(mcmc.whales, expectedNumberOfShifts = 1, burnin = 0.1)
|
87427423ff861dfb1db408bcfbb92b7cef8af738
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/A_github/sources/authors/4648/Bagidis/BAGIDIS.dist.BD.r
|
972f5d09d542f9d8d5b241a04eee49aeadaab53b
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,633
|
r
|
BAGIDIS.dist.BD.r
|
BAGIDIS.dist.BD= function(Details1, Breakpoints1,
Details2, Breakpoints2,
p = 2,
wk=NULL,
Param=0.5){
#=======================================================================
# Selection des details et breakpoints des deux series
#=======================================================================
if (p!= Inf){
det.S1 = ((1-Param)^(1/p)) * Details1
break.S1= (Param^(1/p)) * Breakpoints1
det.S2 = ((1-Param)^(1/p)) * Details2
break.S2= (Param^(1/p)) * Breakpoints2 } else
{ # if p=Inf
det.S1 = (1-Param) * Details1
break.S1= Param * Breakpoints1
det.S2 = (1-Param) * Details2
break.S2= Param * Breakpoints2 }
#=======================================================================
# Definition des poids (si non specifie dans l'entete de la fonction)
#=======================================================================
if (is.null(wk)){
N =length(det.S1)
wk = log(N+1-(1:N))/log(N+1) }
#=======================================================================
# Cacul des dissimilarites partielles (en norme p)
#=======================================================================
if (p!= Inf){
dk = ( (abs(break.S1-break.S2))^p + (abs(det.S1-det.S2))^p )^(1/p)
} else { #if p= Inf
dk = max( abs(break.S1-break.S2), abs(det.S1-det.S2) ) }
#print(dk)
#=======================================================================
# Cacul de la dissimilarite (semi-distance)
#=======================================================================
dissimilarity =sum(wk*dk)
return(dissimilarity) }
|
120f0e0c779bbe6fb711b048b1852db353de1c06
|
f51ad1cfad4ef6c28062aae4b5a41c46c2bcac4e
|
/shiny.R
|
41ed8a51011f471344a968847a35ff88e4f3bf0e
|
[] |
no_license
|
sarasarasun/Rtemp
|
03ccb05fbb1828723063ac3f2b930edb451564e3
|
11d0f5fc6bcdb3e6f24b5e71ef7a82abc2941ae8
|
refs/heads/master
| 2021-01-19T04:22:00.510882
| 2016-08-16T04:19:22
| 2016-08-16T04:19:22
| 65,777,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,343
|
r
|
shiny.R
|
#R Code for Shiny Tutorial for Boston Data Con
#Load the dataset
#install.packages('ElemStatLearn') #loaded during session
library(ElemStatLearn)
data("SAheart")
?SAheart
names(SAheart)
summary(SAheart)
SAheart$chd<-factor(SAheart$chd)
#First install and load the package
#install.packages("shiny") # loaded during session
library(shiny)
#Three parts of shiny
#1.Inputs
#2.Server
#3.Application
#Inputs
#Let's try a slider
?sliderInput
#ui<-fluidPage( sliderInput())
#We need a max
ui<-fluidPage(sliderInput(inputId ="num", max = 100, min = 2, value = 5, label= "Breaks"))
#We also need a min!
server <- function(input,output){}
shinyApp(ui = ui, server = server) #This launches our first shiny app!
#Try changing the values in our ui to see how it changes the output
#Outputs
ui<-fluidPage(sliderInput(inputId ="num", max = 100, min = 2, value = 5, label= "Breaks"),
plotOutput("hist"))
server <- function(input,output){}
shinyApp(ui = ui, server = server)
#No change!
#Server
?renderPlot
ui<-fluidPage(sliderInput(inputId ="num", max = 100, min = 2, value = 5, label= "Breaks"),
plotOutput("hist"))
server <- function(input,output){
output$hist<-renderPlot({hist(SAheart$sbp)})
}
shinyApp(ui = ui, server = server) #We have a histogram displayed!
#Let's add some interaction
ui<-fluidPage(sliderInput(inputId ="num", max = 100, min = 2, value = 5, label= "Breaks"),
plotOutput("hist"))
server <- function(input,output){
output$hist<-renderPlot({hist(SAheart$sbp, breaks = input$num)})
}
shinyApp(ui = ui, server = server)
#Let's clean it up
ui<-fluidPage(sliderInput(inputId ="num", max = 85, min = 1, value = 5, label= "Histogram Breaks"),
plotOutput("hist"))
server <- function(input,output){
output$hist<-renderPlot({hist(SAheart$sbp, breaks = input$num, main = "Histogram of Blood Pressure", xlab = "Systolic Blood Pressure (mm Hg)")})
}
shinyApp(ui = ui, server = server)
################################
#Let's change our slider to a text box
ui<-fluidPage(numericInput(inputId ="num", min = 1, value = 5, label= "Histogram Breaks"),
plotOutput("hist"))
server <- function(input,output){
output$hist<-renderPlot({hist(SAheart$sbp, breaks = input$num, main = "Histogram of Blood Pressure", xlab = "Systolic Blood Pressure (mm Hg)")})
}
shinyApp(ui = ui, server = server)
#Let's try out text input
?textInput
ui<-fluidPage(numericInput(inputId ="num", min = 1, value = 5, label= "Histogram Breaks"),
textInput(inputId = "text", label = "Histogram Title"),
plotOutput("hist"))
server <- function(input,output){
output$hist<-renderPlot({hist(SAheart$sbp, breaks = input$num, main = input$text, xlab = "Systolic Blood Pressure (mm Hg)")})
}
shinyApp(ui = ui, server = server)
#Now for a more complex example
#We'll use the app to explore the relationship between the independent variables and the dependent variable Chronic Heart Disease (chd)
ui<-fluidPage(selectInput(inputId = "independent", label = "Independent Variable", choices = colnames(SAheart[,c(1:4,6:9)])),
plotOutput("boxplot"))
server<-function(input,output){
output$boxplot<-renderPlot({boxplot(SAheart[,input$independent]~SAheart$chd, xlab = "Chronic Heart Disease", ylab = input$independent)})
}
shinyApp(ui = ui, server = server)
|
0937fe4e71be0a28484d8a450b678b140e8722c7
|
ae7b89639ac8befcfcb72b497482df70f853a897
|
/man/wirecost.Rd
|
8ce0c4dd54c35d6878ac286b0351826d64163582
|
[] |
no_license
|
Pralhad-Analyst/wireharness
|
09a31772168f71e234ecc2e959be4301a83a1691
|
b0dbbf89d2f62fb92ebb5c30a832c19a0ff222e1
|
refs/heads/master
| 2022-12-19T09:57:46.073626
| 2020-09-25T05:34:29
| 2020-09-25T05:34:29
| 294,894,888
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 474
|
rd
|
wirecost.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wirecost.R
\name{wirecost}
\alias{wirecost}
\title{Calculate Raw Wire Cost}
\usage{
wirecost(data_gauge, wirelengthh, Awg)
}
\arguments{
\item{data_gauge}{data-set of gauge rates}
\item{wirelengthh}{numeric string which mentions the total wire length}
\item{Awg}{numeric string which mentions Gauge of wire}
}
\value{
Raw Wire Cost (mostly in dollars)
}
\description{
Calculate Raw Wire Cost
}
|
7c9fabe88445c47afbb8af39b02c553e8b67c7db
|
2099a2b0f63f250e09f7cd7350ca45d212e2d364
|
/DUC-Dataset/Summary_p100_R/D086.AP900802-0135.html.R
|
5e68c1c137e1d6887a5df98ab3c1b989923bcf92
|
[] |
no_license
|
Angela7126/SLNSumEval
|
3548301645264f9656b67dc807aec93b636778ef
|
b9e7157a735555861d2baf6c182e807e732a9dd6
|
refs/heads/master
| 2023-04-20T06:41:01.728968
| 2021-05-12T03:40:11
| 2021-05-12T03:40:11
| 366,429,744
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 910
|
r
|
D086.AP900802-0135.html.R
|
<html>
<head>
<meta name="TextLength" content="SENT_NUM:6, WORD_NUM:103">
</head>
<body bgcolor="white">
<a href="#0" id="0">Iraq Invades Kuwait; U.S. Responds With Sanctions; Kuwait radio, 1st graf, a0571.</a>
<a href="#1" id="1">Foreigners reached by telephone said they watched from their windows or dived for cover as the invasion began.</a>
<a href="#2" id="2">Troops set up roadblocks at major intersections and shot at cars that did not stop.</a>
<a href="#3" id="3">_The Soviet Union, Iraq's main arms supplier, halted weapons sales.</a>
<a href="#4" id="4">Iraq has emerged as the Arab world's strongest and most militant military power, and Saddam has a vast arsenal of chemical and conventional weapons.</a>
<a href="#5" id="5">There are about 4,000 Americans among the large expatriate community in Kuwait, where more than 60 percent of the 1.8 million residents are foreigners.</a>
</body>
</html>
|
d9b3d73d5f4eb9c6f8700f58d34306d6cc704661
|
92d54f598099f13f7150d8a6fbf39d14e7371ff4
|
/R/default.R
|
6edc61707244d1d77348955f6e53183aaeb4f13a
|
[
"MIT"
] |
permissive
|
r-dbi/RPostgres
|
3c44d9eabe682e866411b44095a4671cbad275af
|
58a052b20f046c95723c332a0bb06fdb9ed362c4
|
refs/heads/main
| 2023-08-18T09:48:04.523198
| 2023-07-11T02:17:42
| 2023-07-11T02:17:42
| 28,823,976
| 230
| 66
|
NOASSERTION
| 2023-08-31T08:20:25
| 2015-01-05T17:43:02
|
R
|
UTF-8
|
R
| false
| false
| 1,434
|
r
|
default.R
|
#' Check if default database is available.
#'
#' RPostgres examples and tests connect to a default database via
#' `dbConnect(`[RPostgres::Postgres()]`)`. This function checks if that
#' database is available, and if not, displays an informative message.
#'
#' @param ... Additional arguments passed on to [dbConnect()]
#' @export
#' @examples
#' if (postgresHasDefault()) {
#' db <- postgresDefault()
#' print(dbListTables(db))
#' dbDisconnect(db)
#' } else {
#' message("No database connection.")
#' }
postgresHasDefault <- function(...) {
tryCatch(
{
con <- connect_default(...)
dbDisconnect(con)
TRUE
},
error = function(...) {
message(
"Could not initialise default postgres database. If postgres is running\n",
"check that the environment variables PGHOST, PGPORT, \n",
"PGUSER, PGPASSWORD, and PGDATABASE, are defined and\n",
"point to your database."
)
FALSE
})
}
#' @description
#' `postgresDefault()` works similarly but returns a connection on success and
#' throws a testthat skip condition on failure, making it suitable for use in
#' tests.
#' @export
#' @rdname postgresHasDefault
postgresDefault <- function(...) {
tryCatch(
{
connect_default(...)
},
error = function(...) {
testthat::skip("Test database not available")
})
}
connect_default <- function(...) {
dbConnect(Postgres(), ...)
}
|
aa592c2dac22701defae84caf71599d9b0a9a64d
|
1d4edd65cac511c7d9b559087f5ffa253562cf46
|
/man/summary.Rd
|
41e13c2ef9ca599ffc001ba581984798ad177109
|
[] |
no_license
|
TobieSurette/gulf.stats
|
1f775524ad0a0fee5f6baba031fe957429163813
|
3f1497acdd292d2b9ae57e0d07689df8489db2fa
|
refs/heads/master
| 2023-01-06T09:59:25.834213
| 2022-12-23T17:53:53
| 2022-12-23T17:53:53
| 253,641,836
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 461
|
rd
|
summary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{summary.ked}
\alias{summary.ked}
\title{Summary for Statistical Models}
\usage{
\method{summary}{ked}(x, polygon, ...)
}
\description{
Generate a summary for a statistical model.
}
\section{Methods (by class)}{
\itemize{
\item \code{summary(ked)}: Generate a summary of a 'ked' object. The output includes sample and kriged estimates,
along with error estimates.
}}
|
4157c2cc102c984cfd03d07202811b45cadd74c4
|
b7e3b2977a19a5ea95d832a54d0443fb8acae6f3
|
/rasqualTools/man/tabixFetchGenes.Rd
|
56c51adb4acf1caf6f1e65bd9b567c66de33f6c2
|
[] |
no_license
|
kauralasoo/rasqual
|
f29ad9e74795dc1dd07e11bed95e1785def31b96
|
05c3a1e38f75679cce5e0806640cfd250363eede
|
refs/heads/master
| 2021-01-18T01:57:38.996084
| 2017-07-28T12:32:08
| 2017-07-28T12:32:08
| 53,346,349
| 9
| 5
| null | 2016-03-07T17:53:05
| 2016-03-07T17:53:05
| null |
UTF-8
|
R
| false
| true
| 575
|
rd
|
tabixFetchGenes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import_export_files.R
\name{tabixFetchGenes}
\alias{tabixFetchGenes}
\title{Fetch particular genes from tabix indexed Rasqual output file.}
\usage{
tabixFetchGenes(gene_ranges, tabix_file)
}
\arguments{
\item{gene_ranges}{GRanges object with coordinates of the cis regions around genes.}
\item{tabix_file}{Tabix-indexed Rasqual output file.}
}
\value{
List of data frames containing Rasqual results for each gene.
}
\description{
Fetch particular genes from tabix indexed Rasqual output file.
}
|
80c93ac3d581fd3c5b6e3c04606af31a99ab3f41
|
3eb3ddae9516b87d63e66c4cd4db49f1a878564b
|
/R Library/R/Tinn-R/sample/french/Tinn-R_exemple de script.r
|
18c6586b617797d0023642f0d4fab03ef0e79bcb
|
[] |
no_license
|
openefsa/C-TSEMM
|
7903b2e73c5c202312a92af71962984471f6ff56
|
34da6db9f6bbfdd27687d3326f02812f5ed18549
|
refs/heads/master
| 2021-01-20T18:24:04.496167
| 2016-06-01T12:15:49
| 2016-06-01T12:15:49
| 60,173,311
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 2,767
|
r
|
Tinn-R_exemple de script.r
|
#===================================//=========================================#
# Mail: <<< joseclaudio.faria@gmail.com >>>
# <<< phgrosjean@sciviews.org >>>
#
# Plus: <<< http://zoonek2.free.fr/UNIX/48_R/all.html >>>
#===================================//=========================================#
# help.start()
# Démarre l'interface graphique de l'aide en ligne (en utilisant le navigateur
# Web par défaut sur votre machine). Vous devriez explorer brièvement cette aide
# pour vous familiariser avec elle si vous être novice en R.
# Minimisez l'aide maintenant pour pouvoir continuer la démonstration.
x <- rnorm(50)
x
y <- rnorm(50)
y
# Génère deux vecteurs de nombre pseudo-aléatoires pour les coordonnées x- et y-.
x11(w=4, h=4); bringToTop(s=T);
plot(x, y)
# Graphique en nuage de points. Une fenêtre graphique apparait automatiquement.
ls()
# Visualise les objets actuellement dans l'environnement utilisateur de R.
rm(x, y)
# Efface les objets qui ne sont plus nécessaires.
x <- 1:20
# Crée x = (1, 2, . . . , 20).
w <- 1 + sqrt(x)/2
# Un vecteur 'pondéré' d'écarts types généré de manière pseudo-aléatoire.
dummy <- data.frame(x=x, y=x + rnorm(x)*w)
# Dummy est un 'data frame' de deux variables, x et y.
fm <- lm(y ~ x, data=dummy)
summary(fm)
# Ajuste une régression linéaire simple de y sur x, et inspecte le résultat.
fm1 <- lm(y ~ x, data=dummy, weight=1/w^2)
summary(fm1)
# Connaissant les écarts types, nous ajustons maintenant une régression pondérée.
lrf <- with(dummy, lowess(x, y))
# Ajuste une régression locale non paramétrique.
with(dummy, plot(x, y))
# Nuage de points.
lines(x, lrf$y)
# Ajoute une courbe représentant la régression locale.
abline(0, 1, lty=3)
# La droite de régression du modèle (ordonnée à l'origine 0, pente 1).
abline(coef(fm))
# La droite de régression non pondérée.
abline(coef(fm1), col="red")
# La droite de régression pondérée.
plot(fitted(fm), resid(fm),
xlab="Fitted values",
ylab="Residuals",
main="Residuals vs Fitted")
# Un graphique diagnostic de base des résidus pour rechercher une
# hétéroscédasticité. La voyez-vous?
qqnorm(resid(fm), main="Residuals Rankit Plot")
# Un graphe quantile-quantile pour rechercher une assymétrie, un aplatissement
# ou des outliers dans la distribution des résidus. (rien de significatif dans
# le cas présent).
rm(w, fm, fm1, lrf, x, dummy)
# Nettoye les variables à nouveau.
graphics.off()
# Ferme la ou les fenêtres graphiques ouvertes.
# q()
# Quitte R. Il vous sera demandé si vous voulez sauvegarder la session en cours.
# Dans le cadre de cette démo, vous ne voudrez probablement rien sauvegarder.
|
86ffac3e4f1a1b3ae22692250b7c22c8daf27c9a
|
97ae4842948e48a5843d7866f94efc44f099f3ae
|
/R/colorlegend.R
|
3f74fa347a756b915d3afc4a2917c52b988408c5
|
[
"MIT"
] |
permissive
|
taiyun/corrplot
|
d60e54742baba0d25ac2e2ff2b1343dffc917d12
|
0497ad6717c2909fb1ed0c75ff6d269605107808
|
refs/heads/master
| 2023-07-05T14:51:00.802057
| 2022-08-31T04:54:03
| 2022-08-31T04:54:03
| 2,910,722
| 274
| 105
|
NOASSERTION
| 2022-08-31T04:54:04
| 2011-12-04T15:15:45
|
R
|
UTF-8
|
R
| false
| false
| 3,362
|
r
|
colorlegend.R
|
#' Draw color legend.
#'
#' @param colbar Vector, color of colbar.
#' @param labels Vector, numeric or character to be written.
#' @param at Numeric vector (quantile), the position to put labels. See examples
#' for details.
#' @param xlim See in \code{\link{plot}}
#' @param ylim See in \code{\link{plot}}
#' @param vertical Logical, whether the colorlegend is vertical or horizon.
#' @param ratio.colbar The width ratio of colorbar to the total colorlegend
#' (including colorbar, segments and labels).
#' @param lim.segment Vector (quantile) of length 2, the elements should be in
#' [0,1], giving segments coordinates ranges. If the value is NULL or 'auto',
#' then the ranges are derived automatically.
#' @param align Character, alignment type of labels, \code{'l'} means left,
#' \code{'c'} means center and \code{'r'} right.
#' Only valid when \code{vertical} is \code{TRUE}.
#' @param addlabels Logical, whether add text label or not.
#' @param \dots Additional arguments, passed to \code{\link{plot}}
#'
#' @example vignettes/example-colorlegend.R
#' @keywords hplot
#' @author Taiyun Wei
#' @export
colorlegend = function(
colbar,
labels,
at = NULL,
xlim = c(0, 1),
ylim = c(0, 1),
vertical = TRUE,
ratio.colbar = 0.4,
lim.segment = 'auto', # NOTE: NULL treated as 'auto'
align = c('c', 'l', 'r'),
addlabels = TRUE,
...)
{
if (is.null(at) && addlabels) {
at = seq(0L, 1L, length = length(labels))
}
if (any(is.null(lim.segment)) || any(lim.segment == 'auto')) {
lim.segment = ratio.colbar + c(0, ratio.colbar * .2)
}
if (any(at < 0L) || any(at > 1L)) {
stop('at should be between 0 and 1')
}
if (length(lim.segment) != 2) {
stop('lim.segment should be a vector of length 2')
}
if (any(lim.segment < 0L) || any(lim.segment > 1L)) {
stop('lim.segment should be between 0 and 1')
}
align = match.arg(align)
xgap = diff(xlim)
ygap = diff(ylim)
len = length(colbar)
rat1 = ratio.colbar
rat2 = lim.segment
if (vertical) {
at = at * ygap + ylim[1]
yyy = seq(ylim[1], ylim[2], length = len + 1)
rect(rep(xlim[1], len), yyy[1:len],
rep(xlim[1] + xgap * rat1, len), yyy[-1],
col = colbar, border = colbar)
rect(xlim[1], ylim[1], xlim[1] + xgap * rat1, ylim[2], border = 'black')
segments(xlim[1] + xgap * rat2[1], at, xlim[1] + xgap * rat2[2], at)
if (addlabels) {
pos.xlabel = rep(xlim[1] + xgap * max(rat2, rat1), length(at))
if(align == 'l') {
text(pos.xlabel, y = at, labels = labels, pos = 4, ...)
}
if(align == 'r') {
text(xlim[2], y = at, labels = labels, pos = 2, ...)
}
if(align == 'c') {
text((pos.xlabel + xlim[2]) / 2, y = at, labels = labels, ...)
}
}
} else {
at = at * xgap + xlim[1]
xxx = seq(xlim[1], xlim[2], length = len + 1)
rect(xxx[1:len], rep(ylim[2] - rat1 * ygap, len),
xxx[-1], rep(ylim[2], len),
col = colbar, border = colbar)
rect(xlim[1], ylim[2] - rat1 * ygap, xlim[2], ylim[2], border = 'black')
segments(at, ylim[2] - ygap * rat2[1], at, ylim[2] - ygap * rat2[2])
if (addlabels) {
pos.ylabel = rep(ylim[2] - ygap * max(rat2, rat1), length(at))
text(x = at, y = pos.ylabel, labels = labels, pos = 1, ...)
}
}
}
|
c987ea835722f7a6e25bfbbc3d2474a1331c421c
|
da74c7c306abb829fd272b5fd45191ad68fc9d67
|
/ProgAssignment3-data/code/Best.R
|
dfbcc8e11583fa39bed3c27c09c9cb3473985768
|
[] |
no_license
|
JPaul23/datasciencecoursera
|
ab08ec88e1199d423436240cb0d19d7e236f0fc5
|
030f72580f8dfb176db806f3ad03423cbe714c39
|
refs/heads/master
| 2023-03-27T06:23:20.767729
| 2021-03-29T23:02:19
| 2021-03-29T23:02:19
| 342,956,831
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,279
|
r
|
Best.R
|
best <- function(state, outcome){
data <- read.csv("outcome-of-care-measures.csv",
colClasses = 'character',header = TRUE)
dt <- as.data.frame(cbind(data[, 2], #Heart rate
data[, 7], #State
data[, 11], #Heart attack
data[, 17], #Heart failure
data[, 23]), #pneumonia
stringsAsFactors = FALSE)
colnames(dt) <- c("hospital", "state", "heart attack","heart failure","pneumonia")
#check outcome and state validity
if(!state %in% dt[, 'state']){
stop('Invalid state !')
}
else if(!outcome %in% c("heart attack","heart failure","pneumonia")){
stop('Invalid outcome!')
}
else {
st <- which(dt[, "state"] == state)
sd <- dt[st, ] #data from the state
os <- as.numeric(sd[, eval(outcome)])
val_min <- min(os, na.rm = TRUE)
results <- sd[, "hospital"][which(os == val_min)]
out <- results[order(results)]
}
return(out)
}
|
f941802b1d84029d04fd46db0ca0dd426e0c7dc8
|
6710ed3f613c15aa016796fb4acd26d7cb1ad6e1
|
/man/barycenter_unbalanced_stabilized.Rd
|
15cb08d63a91e0b8a408b8e0611179b439b439f5
|
[] |
no_license
|
shizhan666/ROT
|
defcd2b2586b45ca0bb2490fb540dc29936e6986
|
6f387b60643043d17d1302410b0b45ca508565c7
|
refs/heads/main
| 2023-02-13T16:07:31.943526
| 2021-01-12T08:18:23
| 2021-01-12T08:18:23
| 328,911,614
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,834
|
rd
|
barycenter_unbalanced_stabilized.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{barycenter_unbalanced_stabilized}
\alias{barycenter_unbalanced_stabilized}
\title{unbalanced barycenter}
\usage{
barycenter_unbalanced_stabilized(
A,
M,
weights,
reg = 0.1,
reg_m = 1,
numItermax = 1000L,
stopThr = 1e-08,
verbose = FALSE
)
}
\arguments{
\item{A}{matrix(dim, n). n training distributions a_i of dimension dim.}
\item{M}{matrix(dim, dim). ground metric matrix for OT.}
\item{reg}{Entropy regularization term > 0.}
\item{reg_m}{Marginal relaxation term > 0.}
\item{numItermax}{int. Max number of iterations.}
\item{stopThr}{float. Stop threshol on error (> 0).}
\item{verbose}{bool. Print information along iterations.}
\item{weighs}{vector(n). Weight of each distribution (barycentric coodinates).}
}
\value{
vector(dim). Unbalanced Wasserstein barycenter
}
\description{
Compute the entropic unbalanced wasserstein barycenter of A.
}
\examples{
a=dnorm(1:100,20,10)
a=a/sum(a)
b=dnorm(1:100, mean=60, sd=2)
b=b/sum(b)*2
dab = matrix(c(a, b), 100, 2)
distance= dists(1:100,1:100)
distance = distance/max(distance)
res = barycenter_unbalanced_stabilized(dab, distance, reg = 1, reg_m = 1, weights = c(0.5, 0.5))
p = data.frame(ind = 1:100, a = a, b = b, c = res)
ggplot(data = p)+geom_line(aes(x = ind, y = a), color = "blue")+
geom_line(aes(x = ind, y = b), color = "red")+
geom_line(aes(x = ind, y = res), color = "green")
}
\references{
[1] Benamou, J. D., Carlier, G., Cuturi, M., Nenna, L., & Peyre, G.(2015). Iterative Bregman projections for regularized transportation problems. SIAM Journal on Scientific Computing, 37(2), A1111-A1138.
[2] Chizat, L., Peyre, G., Schmitzer, B., & Vialard, F. X. (2016). Scaling algorithms for unbalanced transport problems. arXiv preprin arXiv:1607.05816.
}
|
c5b4ab6fad84ebbd6f17f16af098315a94235552
|
0198bd016fc0867660639ff7b5979c088e42c6c7
|
/man/Kommunegrense.Rd
|
d105dbcf31852e696f14f1aa2ac29c7edf93040a
|
[
"CC-BY-4.0"
] |
permissive
|
hmalmedal/N5000
|
62f4f046ccbd28e8d3e5a05dacab8f755a6c5950
|
6b7e55001998e3ea05fcb26c564069d6862d7a63
|
refs/heads/master
| 2023-05-12T12:08:37.036517
| 2019-02-21T19:12:57
| 2023-05-01T08:11:06
| 171,929,948
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,848
|
rd
|
Kommunegrense.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datadoc.R
\docType{data}
\name{Kommunegrense}
\alias{Kommunegrense}
\title{Kommunegrense}
\format{
\if{html}{\out{<div class="sourceCode">}}\preformatted{Simple feature collection with 865 features and 3 fields
Geometry type: LINESTRING
Dimension: XY
Bounding box: xmin: -92079.54 ymin: 6428945 xmax: 1111593 ymax: 7962288
Projected CRS: ETRS89 / UTM zone 33N
# A tibble: 865 × 4
målemetode nøyaktighet oppdateringsdato geometry
* <int> <int> <date> <LINESTRING [m]>
1 64 100000 2021-12-28 (314938.8 6614098, 315396.1 6618167, 314551.…
2 64 100000 2021-12-28 (164812.4 6891640, 158580.5 6892916, 154277.…
3 64 100000 2021-12-28 (283937.1 6733179, 286511 6731071, 286832.9 …
4 64 100000 2021-12-28 (165097.2 6514877, 181722.6 6499979)
5 64 100000 2021-12-28 (150671.7 6523133, 151912.3 6522401, 161237.…
6 64 100000 2021-12-28 (150671.7 6523133, 154256.5 6531598)
7 64 100000 2021-12-28 (368189.6 6712383, 367110.1 6712427, 363265.…
8 64 100000 2021-12-28 (173726 6591437, 175834.4 6592177, 180923.6 …
9 64 100000 2021-12-28 (285788.6 6685184, 284407.7 6683836, 282096.…
10 64 100000 2021-12-28 (285788.6 6685184, 284189.2 6694608)
# ℹ 855 more rows
# ℹ Use `print(n = ...)` to see more rows
}\if{html}{\out{</div>}}
}
\source{
\code{Basisdata_0000_Norge_25833_N5000AdministrativeOmrader_GML.gml}
}
\usage{
Kommunegrense
}
\description{
Kommunegrense
}
\author{
© \href{https://kartverket.no/}{Kartverket}
}
\keyword{datasets}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.