blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f96a4738ce01b2e7b650e25b2950569cfb6888d4
|
7daf72d1abe4b13d1e26dc46abddfebcfc42d9e8
|
/man/elt_delim.Rd
|
4996192f78606e1e1b53a1e067778259d5a6943e
|
[
"MIT"
] |
permissive
|
farcego/rbl
|
6c39a7f2e63564c75860aa6a7887b2b49ffb73fb
|
b1cfa946b978dae09bf4d4b79267c4269e067627
|
refs/heads/master
| 2020-03-21T15:25:49.368438
| 2017-06-15T09:22:11
| 2017-06-15T09:22:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 716
|
rd
|
elt_delim.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funs_tdr.r
\name{elt_delim}
\alias{elt_delim}
\title{Extract indices corresponding to a symbol from a "delim" table}
\usage{
elt_delim(elt, no, delim)
}
\arguments{
\item{elt}{A single character to match against delim table.}
\item{no}{dives numbers to extract.}
\item{delim}{a delim table (delim slot of a "ses" object).}
}
\value{
a data frame with row number of sart/end of the desired periods.
}
\description{
Extract indices corresponding to a symbol from a "delim" table
}
\examples{
data(exses)
elt_delim('-', 60:62, exses$delim)
elt_delim("/", no = c(62, 62:60), delim = exses$delim)
}
\keyword{internal}
\keyword{tdr_extract}
|
3464127aaf4cabaa0af97fadf9e5cdc0fc6955d4
|
6e2277f64cb1f8aeadb5951f770b537d630ab584
|
/man/broad.Rd
|
f376e97bfe56fcbb8e48f1abf33a10d22177d992
|
[] |
no_license
|
npp97-field/robustmeta
|
6c2dc7692cdfa50e67308bb8f7a4e613662aa7f8
|
cd379e6e5ee5cea4424e4ec56e4d782682366454
|
refs/heads/master
| 2021-01-22T20:34:34.815661
| 2013-04-07T07:04:49
| 2013-04-07T07:04:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 401
|
rd
|
broad.Rd
|
\docType{data}
\name{broad}
\alias{broad}
\title{Broad vs. narrow selectivity data for robustmeta package. Based on
in-progress work with a NESCent working group. Not to be considered
final data.}
\description{
Broad vs. narrow selectivity data for robustmeta package.
Based on in-progress work with a NESCent working group.
Not to be considered final data.
}
\keyword{data}
\keyword{datasets}
|
932b19dbd10833485b3fdd457a4683b4ccd56224
|
68df1dcac1649f8aadc12dbd29cacf053dc0f922
|
/tests/testthat/test-int64.R
|
8c4dbdf01a3a5636a027e287e3a2647ec430133f
|
[] |
no_license
|
cran/RClickhouse
|
680315acd99741b3972bf3818d95596bd2d682f4
|
a76518c14dd8d17786d9a369083f486e46660a45
|
refs/heads/master
| 2023-04-10T17:54:19.854520
| 2023-04-02T15:20:02
| 2023-04-02T15:20:02
| 112,093,885
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 248
|
r
|
test-int64.R
|
context("int64")
library(DBI, warn.conflicts=F)
library(dplyr, warn.conflicts=F) # for data_frame
source("utils.R")
test_that("reading & writing int64", {
writeReadTest(as.data.frame(tibble(x=bit64::as.integer64(c("9007199254740993")))))
})
|
acb7b3d9d9d47c1ce55871601c038d1a7b580a09
|
7b838911abcf1d892c507c3b97c285b76525cf8f
|
/man/nlist.Rd
|
8e0f95a3b54bd08775a5661cf4b33e2ab0f26b48
|
[
"MIT"
] |
permissive
|
Mehranmzn/LMMELSM
|
d6412ef299df70ae906fc75ac636f6b2b9bdc8eb
|
b4c05f9d3e4be0fb3585217a02821f218da61473
|
refs/heads/master
| 2023-03-17T23:44:43.930882
| 2021-03-18T00:14:17
| 2021-03-18T00:14:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 314
|
rd
|
nlist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility.R
\name{nlist}
\alias{nlist}
\title{Creates named list.}
\usage{
nlist(...)
}
\arguments{
\item{...}{Objects for list.}
}
\value{
Named List.
}
\description{
Creates named list.
}
\author{
Stephen R. Martin
}
\keyword{internal}
|
b879cf6ad8e4bfcd750a86c63ee5d49cbd31de2a
|
c74a067bc641e4b9df95823a8542ec383daa44ec
|
/man/GenCondGroups.Rd
|
e9bd5f7f60f9e97786f96b0bed8a7f648ca8b4d3
|
[
"MIT"
] |
permissive
|
bolongtan/isocyanate
|
2f97986ae6ac88120a10f0e5df3c29574f5121fe
|
cb64962ecd7731ff838fd7919bea5342c33371ff
|
refs/heads/master
| 2020-04-04T08:58:25.873750
| 2018-11-18T03:54:56
| 2018-11-18T03:54:56
| 155,779,514
| 0
| 0
|
MIT
| 2018-11-02T02:07:55
| 2018-11-01T21:43:25
|
R
|
UTF-8
|
R
| false
| true
| 483
|
rd
|
GenCondGroups.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shoot.R
\name{GenCondGroups}
\alias{GenCondGroups}
\title{Generate conditional groups based on bool signals and a variable/return xts}
\usage{
GenCondGroups(ret, bools)
}
\arguments{
\item{ret}{xts variable/return}
\item{bools}{a 2d xts bool, Vector{1dxts{bool}}}
}
\value{
a list of 1dxts, same size as bools
}
\description{
Generate conditional groups based on bool signals and a variable/return xts
}
|
225c9f7917a444be35107c5d0cee249266704073
|
5d222882c2eef9ad8692aa317091d9262ccd044f
|
/man/registerFunctions.Rd
|
627e25d18fc9481873bb5cdb5d141f32d1d1e6d1
|
[] |
no_license
|
dpastoor/Kmisc
|
33a00a3180a3b671e788c4c6728d4308e8bd51d2
|
108afcc99f23a67f6794a20d8748984792827c65
|
refs/heads/master
| 2021-01-16T23:04:49.286460
| 2014-04-25T17:00:06
| 2014-04-25T17:00:06
| 19,631,189
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,464
|
rd
|
registerFunctions.Rd
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{registerFunctions}
\alias{registerFunctions}
\title{Automatically Register C/C++ Functions in a Package}
\usage{
registerFunctions(prefix = "C_")
}
\arguments{
\item{prefix}{A prefix to append to the exported name, so that a function
called \code{myfun} is registered as \code{<prefix>myfun}.}
}
\description{
This function can be used to automatically register the native routines
in a package. It searches all of the \code{.c} and \code{.cpp} files in
\code{src}, excluding the file \code{<pkgname>_init.c}, finds functions
annotated with \code{// [[register]]}, and extracts the
required information needed to register routines in the package.
The necessary routines are written to a file called
\code{src/<pkgname>_init.c}.
}
\details{
This function should be called from the base directory of an
\R package you are developing.
Currently, the assumption is that all functions in a package use the
\code{.Call} interface; i.e., there are no functions using the \code{.C},
\code{.Fortran}, or \code{.External} interfaces -- this may be
added in a future version.
After calling this function, ensure that you have
\code{useDynLib(<pkg>, .registration=TRUE)} in your \code{NAMESPACE}.
If you use \code{roxygen} to document your package, you can
use
\describe{
\item{ }{\code{##' @useDynLib <pkg>, .registration=TRUE}}
}
somewhere in your \code{roxygen} documentation to achieve the same effect.
}
|
503042e3291e9093ac5f1a3ae1aae292b25e9ec8
|
d98b7d973db4770b573ffcf2e61a37ffa74ecb21
|
/springleaf/ensembling.R
|
649523298f1fde51ca304d8c5c1bf2e113786e79
|
[] |
no_license
|
brandenkmurray/kaggle
|
a27a85f172c5ecd58d9fc58219b3e31400be597e
|
30924c37e15772b6e7125b341931d7c775b07d0b
|
refs/heads/master
| 2021-01-10T14:18:07.768888
| 2017-08-29T00:20:48
| 2017-08-29T00:20:48
| 44,857,026
| 30
| 17
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,902
|
r
|
ensembling.R
|
library(caretEnsemble)
library(xgboost)
library(pROC)
library(RPushbullet)
setwd("/home/branden/Documents/kaggle/springleaf")
# load("xgb13.rda")
varnames <- names(train[,grepl("VAR", names(train))])
# xgb13Imp <- xgb.importance(feature_names = NULL, model= xgb13)
# write.csv(xgb13Imp, "xgb13Imp.csv")
## TESTING CARET MODELS BEFORE ENSEMBLING
glmnetCtrl <- trainControl(method="cv",
number=2,
classProbs = TRUE,
# allowParallel=TRUE,
# index=createMultiFolds(ts1$Hazard[ts1$split==0], k=2, times=2),
selectionFunction="best",
summaryFunction=twoClassSummary)
varnamessub <- names(train[2:50])
set.seed(2015)
ensCtrl <- trainControl(method="cv",
number=2,
classProbs=TRUE,
savePredictions=TRUE,
# allowParallel=TRUE,
index=createMultiFolds(train$target, k=2, times=2),
selectionFunction="best",
summaryFunction=twoClassSummary)
glmnet <- train(#x=train[,varnames],
#y=factor(make.names(train$target)),
factor(make.names(target)) ~ .,
data=train[,c(varnamessub, "target")],
method="glmnet",
trControl=ensCtrl,
metric="ROC",
preProcess=c("center","scale"),
tuneGrid=expand.grid(alpha=c(.1,.2), lambda=c(.05,.2)))
rf1 <- train(#x=train[,varnames],
#y=factor(make.names(train$target)),
factor(make.names(target)) ~ .,
data=train[,c(varnamessub, "target")],
method="rf",
trControl=ensCtrl,
metric="ROC",
tuneGrid=expand.grid(mtry=c(5)),
nodesize=20,
ntree=20)
tme <- Sys.time()
xgbEns <- train(factor(make.names(target)) ~ .,
data=train[,c(varnames, "target")],
method="xgbTree",
metric="ROC",
tuneGrid=expand.grid(max_depth = c(16),
nrounds = c(20000),
eta = c(.005)),
min_child_weight=1,
subsample=1,
colsample_bytree=1)
(Sys.time() - tme)
save(xgbEns, file="xgbEns.rda")
cl <- makeCluster(6)
registerDoParallel(cl)
tme <- Sys.time()
model_list <- caretList(
factor(make.names(target)) ~ .,
data=train[,c("target",varnames)],
# x=train[,2:50],
# y=factor(make.names(train$target)),
trControl=ensCtrl,
metric="ROC",
tuneList=list(
#XGB tuned to that it creates a random forest model
rf2=caretModelSpec(method="rf",
tuneGrid=expand.grid(mtry=c(17)),
nodesize=20,
ntree=2000),
xgb1=caretModelSpec(method="xgbTree",
tuneGrid=expand.grid(max_depth = c(16),
nrounds = c(20000),
eta = c(.005)),
min_child_weight=1,
subsample=1,
colsample_bytree=1)
)
)
stopCluster(cl)
Sys.time() - tme
pbPost("note", "Ensemble", "Finished.")
save(model_list, file="model_list-RF-XGB-GBM-GLMNET-SVM-08-21-2015.rda")
model_list <- list(glmnet=glmnet, rf1=rf1)
class(model_list) <- "caretList"
xyplot(resamples(model_list))
modelCor(resamples(model_list))
greedy_ensemble <- caretEnsemble(model_list)
summary(greedy_ensemble)
glm_ensemble <- caretStack(
model_list,
method='glm',
metric='ROC',
trControl=trainControl(
method='boot',
number=10,
savePredictions=TRUE,
classProbs=TRUE,
summaryFunction=twoClassSummary
)
)
model_preds2 <- model_preds
model_preds2$ensemble <- predict(glm_ensemble, newdata=testing, type='prob')$M
CF <- coef(glm_ensemble$ens_model$finalModel)[-1]
colAUC(model_preds2, testing$Class)
|
7657de20777371480de8aa4e6d916e6830e9e2b1
|
2d95274803db62a4c1567b3b221bf7f7dfaac4f9
|
/spatial-galls-testing-pois.bug.R
|
f1bcef63fe378de7c47e33897d7fb1e51733903c
|
[] |
no_license
|
wcwetzel/gall-spatial-bayes
|
a829205832fa0ea037603ab7d8b560010be6075e
|
6cd570850b60bdf2221453f4fa28f3f46cd83938
|
refs/heads/master
| 2021-03-12T23:45:36.987725
| 2011-11-22T23:41:19
| 2011-11-22T23:41:19
| 2,831,908
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 520
|
r
|
spatial-galls-testing-pois.bug.R
|
# spatial-galls.bug.R
# JAGS testing
# 4 Nov 2011
### this is to fit data from
### gall-spatial-sim.R
### see if I can recover simulated parms
model {
for(i in 1:N) {
galls[i] ~ dpois(lambda)
lambda[i] <- exp(lambdaNE[i])
lambdaNE[i] ~ dmnorm( logmu[i], precV)
}
V <- pow(sigma, 2) * exp(1)^distcorr
precV <- 1/V
distcorr <- gamma * pow(D, 2)
logmu <- log(mu)
# priors
sigma ~ dunif(0, 100)
gamma ~ dunif(-10, 0)
mu ~ dgamma(0.001, 0.001)
}
# look for seed dispersal bugs code
|
d92bda6ceae81375deb5362d2f4fc2b13c93110a
|
e36e8d5859f764ffa3e6f18d2b5dcd6bbd4e80f0
|
/man/get_storms.Rd
|
3d3d3f8045b8bcdc81061c1ce4221b1f251e33ad
|
[
"MIT"
] |
permissive
|
ropensci/rrricanes
|
23855df40a5cc598b94ec90ac9e32c70b291e2a8
|
533454c8e4d3b7dff6dc2a6592a7b304fef41fdb
|
refs/heads/main
| 2023-01-07T17:56:01.118103
| 2022-12-31T18:29:58
| 2022-12-31T18:29:58
| 74,975,357
| 19
| 9
|
NOASSERTION
| 2022-12-31T18:29:59
| 2016-11-28T13:25:12
|
R
|
UTF-8
|
R
| false
| true
| 1,173
|
rd
|
get_storms.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_storms.R
\name{get_storms}
\alias{get_storms}
\title{get_storms}
\format{
A 4xN dataframe
\describe{
\item{Year}{Numeric, four-digit year of the storm}
\item{Name}{Character, name of storm mixed-case}
\item{Basin}{AL (Atlantic) or EP (East Pacific)}
\item{Link}{URL to storms' product pages}
}
}
\source{
\url{http://www.nhc.noaa.gov/archive/2016/}
}
\usage{
get_storms(years = format(Sys.Date(), "\%Y"), basins = c("AL", "EP"))
}
\arguments{
\item{years}{numeric or vector, four digits (\%Y format)}
\item{basins}{One or both of c("AL", "EP")}
}
\value{
Dataframe of storms.
}
\description{
Returns storms and product link.
}
\details{
By default returns all storms for the current year. If no storms
have developed will return an empty dataframe.
}
\examples{
# Default. Get all storms, both basins, for last year.
\dontrun{
storms <- get_storms(year = 2016, basin = c("AL", "EP"))
# Get storms for two different years
storms.2010 <- get_storms(c(2010, 2015))
# Get storms for two consecutive years, Atlantic basin only
storms.al.2005 <- get_storms(2005:2007, basin = "AL")
}
}
|
62ad21d7ff3127958910daa0c576230ff4679f66
|
09fafdf633ad778282a95ea62f7248183a63e0cd
|
/R/lightbox.R
|
3a3e0eff0c2751f3ead81540688e443df8482230
|
[] |
no_license
|
mrjoh3/gallerier
|
10a9e79c31fb616ba7898ebd416ade70a9391de4
|
9ab546e4c5903fabfd1aa5442200541f1a93cc47
|
refs/heads/master
| 2021-08-20T04:55:37.592130
| 2021-07-27T12:50:44
| 2021-07-27T12:50:44
| 213,344,268
| 15
| 1
| null | 2021-07-27T12:50:45
| 2019-10-07T09:33:04
|
HTML
|
UTF-8
|
R
| false
| false
| 2,818
|
r
|
lightbox.R
|
#' @title Create Lightbox Gallery
#' @param df data.frame containing
#' @param gallery character label identifying gallery
#' @param css character file path to css file. If missing default style is used
#' @param path character folder path, sometimes some frameworks like shiny and
#' blogdown can look for files in a specific location.
#' @param width integer thumbnail image size in pixels
#' @param display character
#'
#' @importFrom shiny tags includeScript includeCSS
#' @importFrom glue glue glue_collapse
#' @importFrom digest sha1
#' @importFrom fs dir_copy
#'
#' @return
#' @export
#'
#' @examples
lightbox_gallery <- function(df, gallery, css, path = '', width = 80, display = 'block'){
dir.create('www')
if (missing(css)) {
css <- file.path(system.file('css', package = 'gallerier'),
"styles.css")
}
if (!(dir.exists('www/lightbox-2-2.11.3'))) {
fs::dir_copy(system.file('js/lightbox-2-2.11.3', package = 'gallerier'),
'www/lightbox-2-2.11.3')
}
# ensure all required columns exist in df
#if (!('description' %in% colnames(df))) df$description <- NA
if (!('uid' %in% colnames(df))) df$uid <- strtrim(digest::sha1(df$src), 5)
tags$div(style = sprintf('display: %s;', display),
tagList(tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "www/lightbox-2-2.11.3/css/lightbox.min.css")
),
tags$div(class = 'card-deck',
lapply(seq_len(nrow(df)), function(i){
tags$div(`data-type`="template", class = 'card',
tags$a(id = df$uid[i],
href = paste0(path, df$src[i]),
`data-lightbox` = gallery, # this identifies gallery group
`data-title` = glue_collapse(df[i,], sep = ' - '), # this is where complex title (glue) added
tags$img(class = 'card-img-top',
src = paste0(path, df$src[i]),
width = glue('{width}px'),
height = 'auto'))
)
})
),
includeScript("www/lightbox-2-2.11.3/js/lightbox.min.js"),
includeCSS(css)
))
}
#' Lightbox Dependancies
#'
#' @return
#'
#' @examples
lightbox_rmd <- function(){
cat('<link rel="stylesheet" href="www/lightbox-2-2.11.3/css/lightbox.min.css">\n',
'<script src="www/lightbox-2-2.11.3/js/lightbox.min.js"></script>')
}
|
d1b39f405cdad3fdc7988a5c37dbbba93dd76a0f
|
fb58b29abc66afc108137917a6d79311ada6002d
|
/WaterQualityCalc.R
|
005db375759f9b35dd5dc12577b94c09b706fb9d
|
[] |
no_license
|
TeganWhitehead/ReportCard
|
43d2a2635213369a2e0c411a415e5ece86a39667
|
0ebbfb4d5d4d086afe43f5a3c74e1057c6fe2d19
|
refs/heads/master
| 2020-07-23T05:34:31.568435
| 2019-09-25T04:58:18
| 2019-09-25T04:58:18
| 207,459,271
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,496
|
r
|
WaterQualityCalc.R
|
# ------------------------
# Directory where all the data files are relative to this script.
data.path <- "data"
WQFreshEstuaryData.file <- file.path(data.path,"WaterQualityFreshEstuaryData201819.csv")
WQGuidelines.file <- file.path(data.path,"WaterQualityGuidelines.csv")
# Location of where all the files generated by this script are saved. These files
# are not saved to the Git code repository. (excluded in the .gitignore)
outputs.path <- file.path("output")
# -------------------------------------------------------------------
#Functions
# -------------------------------------------------------------------
standardisedScoreToGrade <- function(score) {
grade <- ifelse (score>=81,"A",
ifelse(score>=61,"B",
ifelse(score>=41,"C",
ifelse(score>=21,"D",
"E"))))
return(grade)
}
# -------------------------------------------------------------------
# Start of main code
# -------------------------------------------------------------------
# ------------------------
# Test if dplyr is already installed. If not then try to install it.
if("tidyr" %in% rownames(installed.packages()) == FALSE) {
print("tidyr library is not installed on this machine. Attempting to install.")
install.packages("tidyr")
stop("Rerun the script once tidyr installation is complete.")
} else {
print("Required tidyr is installed.")
}
library(dplyr)
library(lubridate)
#this means if outputs folder does not exist, create one
if (!dir.exists(outputs.path)) {
dir.create(outputs.path, showWarnings = TRUE)
print(paste("Created output directory: ",outputs.path))
}
# -------------------------------------
# Read in the data and guidelines
print("Attempting to load input water quality data")
if (!file.exists(WQFreshEstuaryData.file)) {
print(paste("Could not find ",WQFreshEstuaryData.file))
stop("Ensure that your data is saved in CSV format and is saved to the correct location")
}
WQFreshEstuaryData <- read.csv(WQFreshEstuaryData.file, header=TRUE, sep=",", stringsAsFactors=FALSE)
print(paste("Loading successful. Found",nrow(WQFreshEstuaryData),"rows of data"))
print("Attempting to load input water quality guidelines table file")
if (!file.exists(WQGuidelines.file)) {
print(paste("Could not find ",WQGuidelines.file))
stop("Ensure that your Water Quality Guidelines file is saved in CSV format and is saved to the correct location")
}
WQGuidelines <- read.csv(WQGuidelines.file, header=TRUE, sep=",", stringsAsFactors=FALSE)
print(paste("Loading successful. Found",nrow(WQGuidelines),"rows of data"))
# ---------------------------------
# Add on DIN
# ---------------------------------
WQFreshEstuaryData$DIN <- WQFreshEstuaryData$Nox+WQFreshEstuaryData$Ammonia
WQFreshEstuaryData$Month <- format(dmy(WQFreshEstuaryData$Date),"%B")
# --------------------------------------------------------------------------------------
# Calculating the median for each month for each site
# --------------------------------------------------------------------------------------
WQFreshEstuaryData.monthlymedian <- WQFreshEstuaryData %>% group_by(Site, Month) %>%
summarise_at(vars(Turbidity, TP, FRP, DO, DIN, TSS, Chlorophyll), function(x) median(x, na.rm=TRUE))
# --------------------------------------------------------------------------------------
# Calculate the annual median of the monthly medians
#this code means group the data by site and then summarise by variables Turbidity, Phosphorus etc and then on each of these variables calculate the median
#Mackay-whitsunday and Wet Tropics report on TSS and Chlorophyll a so this needs to be included as well
# --------------------------------------------------------------------------------------
WQFreshEstuaryData.annualmedian <- WQFreshEstuaryData.monthlymedian %>%
group_by(Site) %>%
summarise_at(vars(Turbidity, TP, FRP, DO, DIN, TSS, Chlorophyll), function(x) median(x, na.rm=TRUE))
# -----------------------------------------------------------
# Convert the WQGuidelines from long to wide format
# -----------------------------------------------------------
# Expand the seasons to have months
seasons <- data.frame(
Month=c('October','November','December','January','February','March',
'April','May','June','July','August','September'),
Season=c(rep("Wet",6),rep("Dry",6)))
WQGuidelines.allmonths <- merge(WQGuidelines.wide,seasons)
WQGuidelines.allmonths <- rename(WQGuidelines.allmonths, Turbidity.WQO=Turbidity,Chlorophyll.WQO=Chlorophyll,DIN.WQO=DIN,FRP.WQO=FRP,TSS.WQO=TSS,TP.WQO=TP)
# Attach all the matching guideline values (by month and site)
WQFreshEstuaryData.GV <- merge(WQFreshEstuaryData.monthlymedian,WQGuidelines.allmonths)
#Calculatinng the percent of months below GVs - generic function
belowGVFreq <- function(x,measure,WQO) {
#x <- WQFreshEstuaryData.GV
#measure <- "Turbidity"
#WQO <- "Turbidity.WQO"
x$belowGV <- x[,measure]<x[,WQO]
monthsBelowGV.site <- x %>% group_by(Site) %>% summarise_at(vars(belowGV), sum)
numMonths.site <- x %>% group_by(Site) %>% summarise_at(vars(belowGV), length)
#monthsBelowGV.site$belowGV/numMonths.site
monthsBelowGV.site$fracBelowGV <- monthsBelowGV.site$belowGV/numMonths.site$belowGV
return(monthsBelowGV.site)
}
#This is applying the function for Turbidity
turbidity.belowGVFreq <- belowGVFreq(WQFreshEstuaryData.GV,"Turbidity","Turbidity.WQO")
# Number of median monthly values below ≤ the guideline values
#WQFreshEstuaryData.MediansBelowGVs<-WQFreshEstuaryData.monthlymedian%>%
# group_by(Site) %>%
# summarise_at(vars(Turbidity, TP, FRP, DO, DIN, TSS, Chlorophyll), function(x) sum(x<= , na.rm=TRUE))
#WQFreshEstuaryData.MediansBelowGVs
#NEED TO WORK OUT HOW TO PUT IN THE GUIDELINE VALUES IN EACH OF THE ABOVE
#Length of each variable data column
#WQFreshEstuaryData.Lengthmonthlymedian<-WQFreshEstuaryData.monthlymedian%>%(summarise_each(funs(length))
#Percent of median values ≤ GV - calculated by number of median monthly values below GV divided by number of monthly values
#WQFreshEstuaryData.PercentBelowGVs<-WQFreshEstuaryData.MediansBelowGVs/
#This is calculating the 80th percentile for each variable
#WQFreshEstuaryData.monthlymedian %>% group_by(Site) %>% summarise_at(vars(Turbidity, TP, FRP, DO, DIN, TSS, Chlorophyll), function(WQFreshEstuaryData.monthlymedian) (quantile(WQFreshEstuaryData.monthlymedian, prob=c(0.8),na.rm=TRUE)))
|
1e7d596bcfe501d9350828d19db47b6b8b34241b
|
73f2e47fa4927b2b8ba2f7de15c6b094882e1d73
|
/419 r.R
|
abeffe8de62bdcc31f8f8e60aebb84ae5a4ef77b
|
[] |
no_license
|
UW-Biol419-Wi2016/HIV
|
3c103dd954f54352d1501cd0d571510af351226b
|
c532f20ccf28621e57d86deed7057f1d62648136
|
refs/heads/master
| 2021-01-10T15:23:40.067169
| 2016-03-15T05:51:35
| 2016-03-15T05:51:35
| 52,486,535
| 0
| 0
| null | 2016-03-11T18:22:03
| 2016-02-25T01:12:58
|
Matlab
|
UTF-8
|
R
| false
| false
| 2,569
|
r
|
419 r.R
|
# install.packages(seqinr)
library(seqinr)
traindata = read.csv(file="training_data.csv",header=TRUE,sep=",")
# traindata and testdata are "data frames" that contain variables of different types (numeric, character, etc)
# dim(traindata) = 1000 by 6
prseq = traindata[,3]
# length(prseq) = 920
rtseq = traindata[,4]
# 1000
train0 = traindata[traindata[,2]==0,]
# 794 rows (subjects)
train1 = traindata[traindata[,2]==1,]
# 206 rows (subjects)
levels(traindata$PR.Seq)
# 920
traindata[,3]=as.character(traindata[,3])
traindata[,4]=as.character(traindata[,4])
# convert factors in column 3 and 4 into character strings
testdata = read.csv(file="test_data.csv",header=TRUE,sep=",")
# dim(testdata) = 692 by 6
# seqinr package has write.fasta() function
# write.fasta(sequences, names, nbchar = 60, file.out, open = "w")
# writing fasta file for all subjects
prlist=traindata[1:920,3]
rtlist=traindata[,4]
for (i in 1:920){
if (i==1){
write.fasta(prlist[1],1,nbchar=60,file.out="pr",open="w")
} else{
write.fasta(prlist[i],i,nbchar=60,file.out="pr",open="a")
}
}
for (i in 1:1000){
if (i==1){
write.fasta(rtlist[1],1,nbchar=60,file.out="rt",open="w")
} else{
write.fasta(rtlist[i],i,nbchar=60,file.out="rt",open="a")
}
}
# Get row index of 0 patients and 1 patients
pr0index=c()
pr1index=c()
for (i in 1:920){
if (traindata[i,2]==0){
pr0index=c(pr0index,i)
} else {
pr1index=c(pr1index,i)
}
}
rt0index=c()
rt1index=c()
for (i in 1:1000){
if (traindata[i,2]==0){
rt0index=c(rt0index,i)
} else {
rt1index=c(rt0index,i)
}
}
# write fasta file for 0 and 1 patients separately
pr0=traindata[pr0index,3]
pr1=traindata[pr1index,3]
rt0=traindata[rt0index,4]
rt1=traindata[rt1index,4]
# pr0 and pr1
for (i in 1:length(pr0index)){
if (i==1){
write.fasta(pr0[1],pr0index[1],nbchar=60,file.out="pr0",open="w")
} else{
write.fasta(pr0[i],pr0index[i],nbchar=60,file.out="pr0",open="a")
}
}
for (i in 1:length(pr1index)){
if (i==1){
write.fasta(pr1[1],pr1index[1],nbchar=60,file.out="pr1",open="w")
} else{
write.fasta(pr1[i],pr1index[i],nbchar=60,file.out="pr1",open="a")
}
}
# rt0 and rt1
for (i in 1:length(rt0index)){
if (i==1){
write.fasta(rt0[1],rt0index[1],nbchar=60,file.out="rt0",open="w")
} else{
write.fasta(rt0[i],rt0index[i],nbchar=60,file.out="rt0",open="a")
}
}
for (i in 1:length(pr1index)){
if (i==1){
write.fasta(rt1[1],rt1index[1],nbchar=60,file.out="rt1",open="w")
} else{
write.fasta(rt1[i],rt1index[i],nbchar=60,file.out="rt1",open="a")
}
}
|
a6c7b46d1790181223ed3f76e1e32eef595e901d
|
aa09d9e2b3e8aaab0f9597d4623b76bd117d624a
|
/ui.R
|
264d247ef21e2b77e48dc497eebecd5627a941bd
|
[] |
no_license
|
karencode/DistributionApp
|
8fcd9910f924e6d50aeb48c1b709b15a3015fe7b
|
616a6f69a3bee556111e3270d790f7eab78920cd
|
refs/heads/master
| 2021-01-10T08:46:25.810258
| 2016-02-26T22:32:44
| 2016-02-26T22:32:44
| 52,631,622
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,646
|
r
|
ui.R
|
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("P-value Calculator"),
# Sidebar with a slider input for the number of bins
sidebarLayout(
sidebarPanel(
helpText(strong("Obtain a p-value for a test statistic that follows a standard normal distribution.")),
numericInput("testStat",
label = h5("Your computed test statistic (z*):"),
value = 0.00, step=.01),
radioButtons("numTails", label = h5("Type of Test:"),
choices = list("Lower-Tailed"="lower", "Upper-Tailed" = "upper",
"Two-Tailed" = "two"),selected = "two")
),
# Show a plot of the generated distribution
mainPanel(tabsetPanel(
tabPanel("Results", div(h4(textOutput("thePvalue")), style = "color:blue"),
br(),
plotOutput("distPlot")
),
tabPanel("Help/Example", withMathJax(),
helpText("Use this App to compute a p-value for a hypothesis test whose test statistic follows a standard normal distribution.
For example, suppose you are conducting a one sample z test with a known population standard deviation \\(\\sigma\\) = 4."),
helpText("Your sample mean M = 8 and sample size n = 25. Your null and alternative hypotheses are: $$H_0:\\mu = 10; H_a:\\mu < 10$$"),
helpText("Then your test statistic z* is:$$z^* = \\frac{M-\\mu}{\\left(\\frac{\\sigma}{\\sqrt{n}}\\right)} = \\frac{8-10}{\\left(\\frac{4}{\\sqrt{25}}\\right)} = -2.5$$"),
helpText("Enter z* = -2.5 in the test statistic box. Since \\(H_a\\) is <, select Lower-Tailed as the type of test. (Use Upper-Tailed when \\(H_a\\) is > and Two-Tailed when \\(H_a\\) is \\(\\neq\\).)"),
helpText("The results give the P-value as 0.0062 and shades the corresponding area under the z curve to help you visualize how a probability corresponds to an area under the z curve.
You can interpret this P-value as describing the probability of drawing a sample that produces a sample mean of M = 8.0 or smaller, if the actual population mean is \\(\\mu\\) = 10.
If your significance level is say \\(\\alpha\\) = 0.05, you would reject \\(H_0\\) because 0.0062 < 0.05. That is, you would conclude that \\(\\mu\\) < 10.")
)
)
)
)))
|
eae4e3f15b3fdf509a77a485a459784c74c6e5ab
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/crmPack/examples/update-LogisticIndepBeta-method.Rd.R
|
0ccfe1e4faa1eb51ada1d5dadf45f34f77517f6f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 899
|
r
|
update-LogisticIndepBeta-method.Rd.R
|
library(crmPack)
### Name: update,LogisticIndepBeta-method
### Title: Update method for the 'LogisticIndepBeta'Model class. This is a
### method to update the modal estimates of the model parameters phi_1
### (phi1) and phi_2 (phi2) when new data or new observations of
### responses are available and added in.
### Aliases: update,LogisticIndepBeta-method
### Keywords: methods
### ** Examples
##Update the 'LogisticIndepBeta' model with new data
## first define the data and the model
emptydata<-Data(doseGrid=seq(25,300,25))
data<-emptydata
model<-LogisticIndepBeta(binDLE=c(1.05,1.8),DLEweights=c(3,3),DLEdose=c(25,300),data=data)
##Then we have some new observations data
data<-Data(x=c(25,50,50,75,100,100,225,300),
y=c(0,0,0,0,1,1,1,1),
doseGrid=seq(from=25,to=300,by=25))
##update the model to get new estimates
newModel <- update(object=model,data=data)
|
426f7a908e37360ac4cc7ed465010b4e4047902f
|
e08df0e8deae3c5b97c82474eaeae832a37799a4
|
/R/internals.R
|
0a567b3fb7e4a5cb965747ae0a80e6f1c4eff80e
|
[] |
no_license
|
cran/ICS
|
9f34f777e03d19eae27868b613741d87cc73da25
|
e9ec644c83008f2789e91fee78bc3d9a983ed614
|
refs/heads/master
| 2023-08-21T05:05:22.272245
| 2023-05-30T09:40:06
| 2023-05-30T09:40:06
| 17,679,939
| 0
| 2
| null | 2023-08-09T13:44:13
| 2014-03-12T19:10:49
|
R
|
UTF-8
|
R
| false
| false
| 1,046
|
r
|
internals.R
|
### Internal functions for cov4 and ics
### covariance matrix based on 4th moments wrt to the mean vector
### subroutine of cov4
###
.cov4moments.mean<-function(X)
{
p<-dim(X)[2]
n<-dim(X)[1]
data.centered<-sweep(X,2,colMeans(X),"-")
Sigma.data.sqrt<-mat.sqrt(cov(X))
radius<-sqrt(rowSums((data.centered %*% solve(Sigma.data.sqrt))^2))
y<-radius*data.centered
V<-(1/(n*(p+2)))*crossprod(y)
return(V)
}
### covariance matrix based on 4th moments wrt to origin
### subroutine of cov4
###
.cov4moments.origin<-function(X)
{
p<-dim(X)[2]
n<-dim(X)[1]
Sigma.data.sqrt<-mat.sqrt(covOrigin(X))
radius<-sqrt(rowSums((X %*% solve(Sigma.data.sqrt))^2))
V<-(1/(p+2))*covOrigin(radius*X)
return(V)
}
### Sign of the maximum element of a vector
### returns 1 if the absolute largest value is positive and -1 otherwise
### subroutine in ics
.sign.max<-function(x)
{
ifelse(identical(max(x),max(abs(x))),1,-1)
}
|
ff961e2726150c93da67a77d921f9fd307fd2341
|
22dcf4092a0e038d5cb02b70532fa98d614d736b
|
/abcd_ed_gi_lmes.R
|
ad141ca46ca4bd1a3a547fa7ed5eb3e903e546a7
|
[] |
no_license
|
klkerr/abcd_ed_gi
|
d46fea41dcd2a43446a404dd13bd9b672ad779e3
|
734da423ac01fe62b80d2c5fda1e58d06ceba8d9
|
refs/heads/master
| 2023-02-17T12:32:17.370297
| 2021-01-14T16:36:09
| 2021-01-14T16:36:09
| 288,810,686
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,982
|
r
|
abcd_ed_gi_lmes.R
|
#!/usr/bin/Rscript
library(psych)
library(lmerTest)
library(lme4)
library(dplyr)
library(MuMIn)
library(r2glmm)
library(effectsize)
print("Loading Rds files...")
abcd.ed.gi=readRDS("abcd_ed_gi.Rds")
abcd.ed.gi.m_crpbi=readRDS("abcd_ed_gi_mcrpbi.Rds")
abcd.ed.gi.f_crpbi=readRDS("abcd_ed_gi_fcrpbi.Rds")
sink("abcd_ed_gi_gam_output.txt",split=TRUE)
########################################
########### Regression ###############
########################################
const.vars=c("subjectid","abcd_site.x","rel_family_id.x","race_ethnicity.x","gender","married.or.livingtogether.x","household.income.x")
#Center interaction terms
abcd.ed.gi$b.gi_sum.centered = scale(abcd.ed.gi$b.gi_sum,center=TRUE,scale=FALSE)
abcd.ed.gi.m_crpbi$b.gi_sum.centered = scale(abcd.ed.gi.m_crpbi$b.gi_sum,center=TRUE,scale=FALSE)
abcd.ed.gi.f_crpbi$b.gi_sum.centered = scale(abcd.ed.gi.f_crpbi$b.gi_sum,center=TRUE,scale=FALSE)
abcd.ed.gi.m_crpbi$b.crpbi_mother.centered = scale(abcd.ed.gi.m_crpbi$b.crpbi_mother,center=TRUE,scale=FALSE)
abcd.ed.gi.f_crpbi$b.crpbi_father.centered = scale(abcd.ed.gi.f_crpbi$b.crpbi_father,center=TRUE,scale=FALSE)
abcd.ed.gi$b.cbcl_scr_dsm5_anxdisord_t.centered = scale(abcd.ed.gi$b.cbcl_scr_dsm5_anxdisord_t,center=TRUE,scale=FALSE)
abcd.ed.gi=abcd.ed.gi[abcd.ed.gi$gender=="F" | abcd.ed.gi$gender=="M",]
abcd.ed.gi.m_crpbi = abcd.ed.gi.m_crpbi[abcd.ed.gi.m_crpbi$gender=="F" | abcd.ed.gi.m_crpbi$gender=="M",]
abcd.ed.gi.f_crpbi = abcd.ed.gi.f_crpbi[abcd.ed.gi.f_crpbi$gender=="F" | abcd.ed.gi.f_crpbi$gender=="M",]
writeLines("#########################################")
writeLines("Whole sample correlation matrix (Table 3)")
writeLines("#########################################")
#Make correlation matrices
cont.vars=c("b.bmi","b.interview_age","b.gi_sum")
data=abcd.ed.gi[c(cont.vars,"b.cbcl_scr_dsm5_anxdisord_t")]
corr.test(data, adjust = "none", use = "pairwise")
data=abcd.ed.gi.m_crpbi[c(cont.vars,"b.cbcl_scr_dsm5_anxdisord_t","b.crpbi_mother")]
corr.test(data, adjust = "none", use = "pairwise")
data=abcd.ed.gi.f_crpbi[c(cont.vars,"b.cbcl_scr_dsm5_anxdisord_t","b.crpbi_father")]
corr.test(data, adjust = "none", use = "pairwise")
abcd.m.temp = abcd.ed.gi.m_crpbi[c("subjectid","b.crpbi_mother")]
abcd.m.temp=abcd.m.temp[complete.cases(abcd.m.temp),]
abcd.f.temp = abcd.ed.gi.f_crpbi[c("subjectid","b.crpbi_father")]
abcd.f.temp=abcd.f.temp[complete.cases(abcd.f.temp),]
abcd.m.f.temp = merge(abcd.m.temp,abcd.f.temp,by="subjectid")
cor.test(abcd.m.f.temp$b.crpbi_mother,abcd.m.f.temp$b.crpbi_father,use="pairwise.complete.obs",method="pearson")
writeLines("###########################################")
writeLines("Paternal Acceptance LME (Table 4; Table S3)")
writeLines("###########################################")
#Paternal acceptance
data=abcd.ed.gi.f_crpbi[c(const.vars,"b.ed_sum","b.gi_sum.centered","y1.ed_sum","b.crpbi_father.centered","b.cbcl_scr_dsm5_anxdisord_t")]
data=data[complete.cases(data),]
y1.ed_pred_b.gi_b.fcrpbi=lmer(y1.ed_sum ~ b.ed_sum + b.gi_sum.centered + race_ethnicity.x + gender + married.or.livingtogether.x + household.income.x + b.cbcl_scr_dsm5_anxdisord_t + b.crpbi_father.centered +
gender * b.crpbi_father.centered * b.gi_sum.centered + (1|abcd_site.x/rel_family_id.x),data=data)
summary(y1.ed_pred_b.gi_b.fcrpbi)
r.squaredGLMM(y1.ed_pred_b.gi_b.fcrpbi)
standardize(y1.ed_pred_b.gi_b.fcrpbi)
writeLines("###########################################")
writeLines("Maternal Acceptance LME (Table 5; Table S2)")
writeLines("###########################################")
#Maternal acceptance
data=abcd.ed.gi.m_crpbi[c(const.vars,"b.ed_sum","b.gi_sum.centered","y1.ed_sum","b.crpbi_mother.centered","b.cbcl_scr_dsm5_anxdisord_t")]
data=data[complete.cases(data),]
y1.ed_pred_b.gi_b.mcrpbi=lmer(y1.ed_sum ~ b.ed_sum + b.gi_sum.centered + race_ethnicity.x + gender + married.or.livingtogether.x + household.income.x + b.cbcl_scr_dsm5_anxdisord_t + b.crpbi_mother.centered +
gender * b.crpbi_mother.centered * b.gi_sum.centered + (1|abcd_site.x/rel_family_id.x),data=data)
writeLines("Results for GI symptoms and maternal acceptance:")
summary(y1.ed_pred_b.gi_b.mcrpbi)
r.squaredGLMM(y1.ed_pred_b.gi_b.mcrpbi)
standardize(y1.ed_pred_b.gi_b.mcrpbi)
writeLines("##################################################")
writeLines("Maternal acceptance LME results by participant sex")
writeLines("##################################################")
writeLines("\n##################")
writeLines("Females (Table S5)")
writeLines("##################")
girls.abcd.ed.gi.m_crpbi=abcd.ed.gi.m_crpbi[abcd.ed.gi.m_crpbi$gender=="F",]
boys.abcd.ed.gi.m_crpbi=abcd.ed.gi.m_crpbi[abcd.ed.gi.m_crpbi$gender=="M",]
data=girls.abcd.ed.gi.m_crpbi[c(const.vars,"b.ed_sum","b.gi_sum.centered","y1.ed_sum","b.crpbi_mother.centered","b.cbcl_scr_dsm5_anxdisord_t")]
data=data[complete.cases(data),]
g.y1.ed_pred_b.gi_b.mcrpbi=lmer(y1.ed_sum ~ b.ed_sum + b.gi_sum.centered + race_ethnicity.x + married.or.livingtogether.x + household.income.x + b.cbcl_scr_dsm5_anxdisord_t + b.crpbi_mother.centered +
b.crpbi_mother.centered * b.gi_sum.centered + (1|abcd_site.x/rel_family_id.x),data=data)
summary(g.y1.ed_pred_b.gi_b.mcrpbi)
r.squaredGLMM(g.y1.ed_pred_b.gi_b.mcrpbi)
standardize(g.y1.ed_pred_b.gi_b.mcrpbi)
writeLines("##################")
writeLines("Males (Table S6)")
writeLines("##################")
data=boys.abcd.ed.gi.m_crpbi[c(const.vars,"b.ed_sum","b.gi_sum.centered","y1.ed_sum","b.crpbi_mother.centered","b.cbcl_scr_dsm5_anxdisord_t")]
data=data[complete.cases(data),]
b.y1.ed_pred_b.gi_b.mcrpbi=lmer(y1.ed_sum ~ b.ed_sum + b.gi_sum.centered + race_ethnicity.x + married.or.livingtogether.x + household.income.x + b.cbcl_scr_dsm5_anxdisord_t + b.crpbi_mother.centered +
b.crpbi_mother.centered * b.gi_sum.centered + (1|abcd_site.x/rel_family_id.x),data=data)
summary(b.y1.ed_pred_b.gi_b.mcrpbi)
r.squaredGLMM(b.y1.ed_pred_b.gi_b.mcrpbi)
standardize(b.y1.ed_pred_b.gi_b.mcrpbi)
sink()
|
b30972f81d83f4a92fba63cfe643092d00649527
|
7a9774bf1ba2ebc7b4783126f6c197d76913a35b
|
/LandWeb_output.R
|
51de6bb817a135483575f20c3d672cd9085bc31d
|
[] |
no_license
|
fRI-Research/LandWeb_output
|
9f3502eb1f05485988b0539da23226e6504555b2
|
f0fcf516a2c17a2f0553fde5db7d9c85da4adf45
|
refs/heads/master
| 2022-11-11T00:19:56.378832
| 2022-11-08T15:39:36
| 2022-11-08T15:39:36
| 146,651,853
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,025
|
r
|
LandWeb_output.R
|
defineModule(sim, list(
name = "LandWeb_output",
description = "Summarize the output for the LandWeb natural range of variation (NRV).",
keywords = c("LandWeb", "NRV"),
authors = c(
person(c("Eliot", "J", "B"), "McIntire", email = "eliot.mcintire@nrcan-rncan.gc.ca", role = c("aut", "cre")),
person("Yong", "Luo", email = "yluo1@lakeheadu.ca", role = "aut"),
person(c("Alex", "M."), "Chubaty", email = "achubaty@for-cast.ca", role = c("ctb"))
),
childModules = character(0),
version = list(LandWeb_output = numeric_version("1.3.3")),
spatialExtent = raster::extent(rep(NA_real_, 4)),
timeframe = as.POSIXlt(c(NA, NA)),
timeunit = "year",
citation = list("citation.bib"),
documentation = list("README.txt", "LandWeb_output.Rmd"),
reqdPkgs = list("data.table", "raster", "SpaDES.tools",
"PredictiveEcology/LandR@development (>= 1.0.9.9001)",
"PredictiveEcology/pemisc@development"),
parameters = rbind(
defineParameter("mixedType", "numeric", 2,
desc = paste("How to define mixed stands: 1 for any species admixture;",
"2 for deciduous > conifer. See ?vegTypeMapGenerator.")),
defineParameter("sppEquivCol", "character", "LandWeb", NA, NA,
desc = "The column in sim$specieEquivalency data.table to use as a naming convention"),
defineParameter("summaryInterval", "numeric", 50, NA, NA,
desc = "This describes summary interval for this module"),
defineParameter("vegLeadingProportion", "numeric", 0.8, 0, 1,
desc = "a number that define whether a species is leading for a given pixel"),
defineParameter(".plotInitialTime", "numeric", start(sim), NA, NA,
desc = "This describes the simulation time at which the first plot event should occur"),
defineParameter(".plotInterval", "numeric", 1, NA, NA,
desc = "This describes the simulation time interval between plot events"),
defineParameter(".plots", "character", default = "object",
desc = paste("Passed to `types` in `Plots` (see `?Plots`). There are a few plots that are made within this module, if set.",
"Note that plots (or their data) saving will ONLY occur at `end(sim)`.",
"If `NA`, plotting is turned off completely (this includes plot saving).")),
defineParameter(".useCache", "logical", FALSE, NA, NA,
desc = paste("Should this entire module be run with caching activated?",
"This is generally intended for data-type modules,",
"where stochasticity and time are not relevant"))
),
inputObjects = bindrows(
expectsInput("cohortData", "data.table",
desc = paste("age cohort-biomass table hooked to pixel group map by `pixelGroupIndex` at",
"succession time step, this is imported from forest succession module."),
sourceURL = NA),
expectsInput("fireReturnInterval", "Raster",
desc = paste("A raster layer that is a factor raster,",
"with at least 1 column called fireReturnInterval,",
"representing the fire return interval in years.")),
expectsInput("pixelGroupMap", "RasterLayer",
desc = "updated community map at each succession time step",
sourceURL = NA),
expectsInput("rasterToMatch", "RasterLayer",
desc = paste("this raster contains two pieces of information:",
"Full study area with fire return interval attribute."), ## TODO: is this correct?
sourceURL = NA),
expectsInput("rstTimeSinceFire", "Raster",
desc = "a time since fire raster layer",
sourceURL = NA),
expectsInput("species", "data.table",
desc = paste("a table that of invariant species traits with adjusted values"),
sourceURL = "https://raw.githubusercontent.com/dcyr/LANDIS-II_IA_generalUseFiles/master/speciesTraits.csv"),
expectsInput("sppColorVect", "character",
desc = paste("A named vector of colors to use for plotting.",
"The names must be in `sim$speciesEquivalency[[sim$sppEquivCol]]`,",
"and should also contain a color for 'Mixed'"),
sourceURL = NA),
expectsInput("sppEquiv", "data.table",
desc = "table of species equivalencies. See `LandR::sppEquivalencies_CA`.",
sourceURL = NA),
expectsInput("speciesLayers", "RasterStack",
desc = "biomass percentage raster layers by species in Canada species map",
sourceURL = "http://tree.pfc.forestry.ca/kNN-Species.tar"),
expectsInput("standAgeMap", "RasterLayer",
desc = "stand age map in study area, default is Canada national stand age map",
sourceURL = "http://tree.pfc.forestry.ca/kNN-StructureStandVolume.tar"),
expectsInput("studyArea", "SpatialPolygonsDataFrame",
desc = paste("multipolygon to use as the study area,",
"with attribute LTHFC describing the fire return interval.",
"Defaults to a square shapefile in Southwestern Alberta, Canada."),
sourceURL = NA),
expectsInput("studyAreaLarge", "SpatialPolygonsDataFrame",
desc = paste("Polygon to use as the parametrisation study area.",
"Note that `studyAreaLarge` is only used for parameter estimation, and",
"can be larger than the actual study area used for LandR simulations",
"(e.g., larger than `studyArea` in LandR Biomass_core)."),
sourceURL = NA),
expectsInput("studyAreaReporting", "SpatialPolygonsDataFrame",
desc = paste("multipolygon (typically smaller/unbuffered than studyArea) to use for plotting/reporting.",
"Defaults to an area in Southwestern Alberta, Canada."),
sourceURL = NA),
expectsInput("summaryPeriod", "numeric",
desc = "a numeric vector contains the start year and end year of summary",
sourceURL = NA)
),
outputObjects = bindrows(
createsOutput("vegTypeMap", "Raster", desc = NA)
)
))
doEvent.LandWeb_output <- function(sim, eventTime, eventType, debug = FALSE) {
if (eventType == "init") {
sim <- scheduleEvent(sim, P(sim)$.plotInitialTime, "LandWeb_output", "initialConditions",
eventPriority = 1)
sim <- scheduleEvent(sim, P(sim)$.plotInitialTime, "LandWeb_output", "otherPlots",
eventPriority = 1)
# sim <- scheduleEvent(sim, 0, "LandWeb_output", "allEvents", eventPriority = 7.5)
sim <- scheduleEvent(sim, sim$summaryPeriod[1], "LandWeb_output", "allEvents",
eventPriority = 7.5)
} else if (eventType == "initialConditions") {
if (anyPlotting(P(sim)$.plots) && ("screen" %in% P(sim)$.plots)) {
devCur <- dev.cur()
## if current plot dev is too small, open a new one
if (is.null(dev.list())) {
dev(x = devCur + 1, height = 7, width = 14)
clearPlot()
} else {
if (dev.size()[2] < 14) {
dev(x = devCur + 1, height = 7, width = 14)
clearPlot()
}
}
plotVTM(speciesStack = raster::mask(sim$speciesLayers, sim$studyAreaReporting) %>% raster::stack(),
vegLeadingProportion = P(sim)$vegLeadingProportion,
sppEquiv = sim$sppEquiv,
sppEquivCol = P(sim)$sppEquivCol,
colors = sim$sppColorVect,
title = "Initial Types")
dev(devCur)
## plot initial age map
ageMap <- raster::mask(sim$standAgeMap, sim$studyAreaReporting) %>% raster::stack()
Plot(ageMap, title = "Initial stand ages")
}
} else if (eventType == "allEvents") {
if (time(sim) >= sim$summaryPeriod[1] && time(sim) <= sim$summaryPeriod[2]) {
sim <- AllEvents(sim)
sim <- scheduleEvent(sim, time(sim) + P(sim)$summaryInterval,
"LandWeb_output", "allEvents", eventPriority = 7.5)
}
} else if (eventType == "otherPlots") {
if (anyPlotting(P(sim)$.plots) && ("screen" %in% P(sim)$.plots)) {
## average age by FRI polygon
mod$tsfOverTime <- ggPlotFn(sim$rstTimeSinceFire, sim$studyAreaReporting,
sim$fireReturnInterval, current(sim)$eventTime, end(sim),
mod$tsfOverTime, P(sim)$plotInitialTime, P(sim)$plotInterval,
outputPath(sim))
## schedule future plots
sim <- scheduleEvent(sim, times(sim)$current + P(sim)$.plotInterval, "LandWeb_output",
"otherPlots", eventPriority = 1)
}
} else {
warning(paste("Undefined event type: '", current(sim)[1, "eventType", with = FALSE],
"' in module '", current(sim)[1, "moduleName", with = FALSE], "'", sep = ""))
}
return(invisible(sim))
}
## event functions
# - keep event functions short and clean, modularize by calling subroutines from section below.
AllEvents <- function(sim) {
sim$vegTypeMap <- vegTypeMapGenerator(sim$cohortData, sim$pixelGroupMap,
P(sim)$vegLeadingProportion, mixedType = P(sim)$mixedType,
sppEquiv = sim$sppEquiv, sppEquivCol = P(sim)$sppEquivCol,
colors = sim$sppColorVect,
doAssertion = getOption("LandR.assertions", TRUE))
return(invisible(sim))
}
.inputObjects <- function(sim) {
cacheTags <- c(currentModule(sim), "function:.inputObjects")
dPath <- asPath(getOption("reproducible.destinationPath", dataPath(sim)), 1)
message(currentModule(sim), ": using dataPath '", dPath, "'.")
if (!suppliedElsewhere("studyArea", sim)) {
message("'studyArea' was not provided by user. Using a polygon in southwestern Alberta, Canada,")
sim$studyArea <- randomStudyArea(seed = 1234)
}
if (!suppliedElsewhere("studyAreaLarge", sim)) {
message("'studyAreaLarge' was not provided by user. Using the same as 'studyArea'.")
sim$studyAreaLarge <- sim$studyArea
}
if (!suppliedElsewhere("studyAreaReporting", sim)) {
message("'studyAreaReporting' was not provided by user. Using the same as 'studyArea'.")
sim$studyAreaLarge <- sim$studyArea
}
if (!suppliedElsewhere("fireReturnInterval", sim))
stop("fireReturnInterval map must be supplied.")
if (!suppliedElsewhere("rasterToMatch", sim))
stop("rasterToMatch must be supplied.")
if (!suppliedElsewhere("summaryPeriod", sim))
sim$summaryPeriod <- c(1000, 1500)
if (!suppliedElsewhere("cohortData", sim))
sim$cohortData <- data.table()
if (!suppliedElsewhere("pixelGroupMap", sim))
sim$pixelGroupMap <- raster()
if (!suppliedElsewhere("species", sim)) {
sim$species <- getSpeciesTable(dPath, cacheTags)
}
if (!suppliedElsewhere("sppEquiv", sim)) {
sim$sppEquiv <- LandR::sppEquivalencies_CA
## By default, Abies_las is renamed to Abies_sp
sim$sppEquiv[KNN == "Abie_Las", LandR := "Abie_sp"]
## add default colors for species used in model
if (!is.null(sim$sppColorVect))
stop("If you provide sppColorVect, you MUST also provide sppEquiv")
sim$sppColorVect <- sppColors(sim$sppEquiv, P(sim)$sppEquivCol, newVals = "Mixed", palette = "Accent")
}
if (!suppliedElsewhere("speciesLayers", sim)) {
#opts <- options(reproducible.useCache = "overwrite")
speciesLayersList <- Cache(loadkNNSpeciesLayers,
dPath = dPath,
rasterToMatch = sim$rasterToMatch,
studyArea = sim$studyAreaLarge,
sppEquiv = sim$sppEquiv,
knnNamesCol = "KNN",
sppEquivCol = P(sim)$sppEquivCol,
# thresh = 10,
url = extractURL("speciesLayers"),
cachePath = cachePath(sim),
userTags = c(cacheTags, "speciesLayers"))
#options(opts)
writeRaster(speciesLayersList$speciesLayers,
file.path(outputPath(sim), "speciesLayers.grd"),
overwrite = TRUE)
sim$speciesLayers <- speciesLayersList$speciesLayers
}
if (!suppliedElsewhere("standAgeMap", sim)) {
sim$standAgeMap <- Cache(prepInputs, #notOlderThan = Sys.time(),
targetFile = basename(standAgeMapFilename),
archive = asPath(c("kNN-StructureStandVolume.tar",
"NFI_MODIS250m_kNN_Structure_Stand_Age_v0.zip")),
destinationPath = dPath,
url = extractURL("standAgeMap"),
fun = "raster::raster",
studyArea = sim$studyAreaLarge,
rasterToMatch = sim$rasterToMatch,
method = "bilinear",
datatype = "INT2U",
filename2 = TRUE, overwrite = TRUE,
userTags = c("stable", currentModule(sim)))
sim$standAgeMap[] <- asInteger(sim$standAgeMap[])
}
return(invisible(sim))
}
ggPlotFn <- function(rstTimeSinceFire, studyAreaReporting, fireReturnInterval,
currTime, endTime, tsfOverTime, plotInitialTime, plotInterval, outPath) {
tsfMap <- raster::mask(rstTimeSinceFire, studyAreaReporting)
tsfDF <- data.table(tsf = tsfMap[], FRI = fireReturnInterval[]) %>% na.omit()
tsfDF <- tsfDF[, list(
time = as.numeric(currTime),
meanAge = mean(tsf, na.rm = TRUE)), by = FRI]
tsfDF[, FRI := factor(FRI)]
tsfOverTime <- rbindlist(list(tsfOverTime, tsfDF))
tsfOverTime <- tsfOverTime[!is.na(tsfOverTime$meanAge), ]
if (length(unique(tsfOverTime$time)) > 1) {
gg_tsfOverTime <- ggplot(tsfOverTime, aes(x = currTime, y = meanAge, col = FRI, ymin = 0)) +
geom_line(size = 1.5) +
theme(legend.text = element_text(size = 14))
firstPlot <- isTRUE(currTime == plotInitialTime + plotInterval)
title1 <- if (firstPlot) "Average age (TSF) by FRI polygon" else ""
Plot(gg_tsfOverTime, title = title1, new = TRUE, addTo = "ageOverTime")
if (currTime == endTime) {
checkPath(file.path(outPath, "figures"), create = TRUE)
ggsave(file.path(outPath, "figures", "average_age_(TSF)_by_FRI_polygon.png"), gg_tsfOverTime)
}
}
return(tsfOverTime)
}
|
2643a823f7557757fdf684537c2711f930523812
|
37c8e06d742a785331d666e7394a7e52786830c2
|
/Vac_Scholarship_R/Constrained_Tree_MAx.R
|
9f8bfa11db938e9321f23c2797e6840d1776af41
|
[] |
no_license
|
gb13faithless/root_node
|
353f2a0621d40a4272fc14f2225a33e0535d9a4d
|
69b465ce2fa1ab63c2cd2a8f4b38cf378963eaa1
|
refs/heads/master
| 2021-05-09T06:38:47.769853
| 2018-02-09T14:09:14
| 2018-02-09T14:09:14
| 119,321,916
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,627
|
r
|
Constrained_Tree_MAx.R
|
library(tree)
library(ape)
library(phangorn)
library(seqinr)
library("phangorn")
library("ape")
library("Biostrings")
library("ggplot2")
library("ggtree")
library("devtools")
library("strataG")
library("phylobase")
# read in the fasta sequences
S <- read.phyDat("test.fa",format="fasta", type="DNA")
#read in the fsc tree
T <-read.nexus("1PopDNA_1_true_trees.trees")
#pull out an example tree
T <- T$NumGen_tree_1_1_pos_0
plot(T)
n<- length(S)
names<- c(rep(1:2))
for( i in 1:2) {
names[i] <- paste( i, 1, sep = ".")
}
names(S) <- names
fit = pml(T, data=S, rearrangment="none",rate = 0.00002)
fit
library(ape)
tree$edge.length[1]
t1 <- tree$edge.length[1]
t2 <- tree$edge.length[2]
t3 <- tree$edge.length[3]
# Let A denoted by 1, C 2, G 3, T 4
# transition probabilities function for JC model
transition_prob <- function(i,j,t,u){
if(i==j){ output <- exp(-u*t) + (1-exp(-u*t))*0.25 # note for future: need == operator
} else{
output <- (1-exp(-u*t))*0.25
}
return(output)
}
transition_prob(1,1,t1,1)
# all transition equilibria are 0.25
# mutation rate = 1
0.25*transition_prob(1,4,t1,1)
getAnywhere('pml')
getAnywhere('optim.pml')
getAnywhere('pml.fit')
fit = pml(T, data=S, rearrangment="none",rate = 0.00002)
tree <- reorder(tree, "postorder")
tree
#ggtree
Tr <- ggtree(tree)
Tr
# Add Scale
Tr <- Tr + geom_treescale()
# Internal Node Numbers
Tr <- Tr +geom_text2(aes(subset=!isTip, label=node), hjust=-.3)
# Tip Labels
Tr <- Tr + geom_tiplab()
# Tip Points
Tr <- Tr + geom_tippoint()
# Tip Points
Tr +geom_text2(aes(subset=!isTip, label=node), hjust=1)
|
17f7988e02c2eea14986179f98c9a6a10a4276bf
|
b0f44e41d4a8d9031373e23dc0a35e9a20279426
|
/preprocess.R
|
38fa5268f79799f1b0a447c746219bf4a6a6e23e
|
[] |
no_license
|
LeoCai/MultiDeviceAlign-R
|
c6aa5043816668d330976e396d30fd39caa6c46b
|
89390a16e38a65bc8608f9415a115195a8ff2d9f
|
refs/heads/master
| 2021-01-17T14:41:22.434860
| 2017-02-20T08:10:27
| 2017-02-20T08:10:27
| 54,257,332
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 51
|
r
|
preprocess.R
|
source("../handshake_9_11/model/GloableConvert.R")
|
274bed494006732620405c6330b126196b6e94d9
|
f3b646597b88853f9ef547b62083925b42fee58f
|
/R/data.R
|
6523882f38af5634afe3d74e2ff475a0d86954b9
|
[] |
no_license
|
ameliabedelia/amelia
|
d5426d8382fe62d72bbe0e5779d6203e2e9a1d9e
|
de165e50372b442a94c60e7c1c5d9fca537b55bc
|
refs/heads/master
| 2023-07-05T20:47:00.953930
| 2023-06-28T12:12:24
| 2023-06-28T12:12:24
| 208,226,968
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 873
|
r
|
data.R
|
#' Geospatial data of Landkreise in Germany
#'
#' A tidy dataframe containing the latitude and longitudes of Landkreise
#' in Germany for use with ggplot2::geom_polygon.
#'
#' \describe{
#' \item{long}{longitude}
#' \item{lat}{latitutde}
#' \item{rs}{Landkreise code}
#' \item{gen}{Landkreis name}
#' \item{hole}{Logical. Whether polygon is a hole or not}
#' ...
#' }
"landkreis"
#' Geospatial data of Bunderländer in Germany
#'
#' A tidy dataframe containing the latitude and longitudes of Bunderländer
#' in Germany for use with ggplot2::geom_polygon.
#'
#' \describe{
#' \item{long}{longitude}
#' \item{lat}{latitutde}
#' \item{id}{Numeric code used as governmental ID and prefix for regional codes}
#' \item{gen}{Bundesland name}
#' \item{hole}{Logical. Whether polygon is a hole or not}
#' ...
#' }
"bundesland"
|
f66a43105c5a807c7d4551ade7861064a8d2ac1d
|
725a33f27fce430ee481a3542aae5bb81a94dfc0
|
/R/FilenameMapper.R
|
38f88ce24ecb5f80b319ab51d3024d616b1766b8
|
[
"BSD-3-Clause"
] |
permissive
|
cbielow/PTXQC
|
fac47ecfa381737fa0cc36d5ffe7c772400fb24e
|
f4dc4627e199088c83fdc91a1f4c5d91f381da6c
|
refs/heads/master
| 2023-07-20T00:39:45.918617
| 2023-05-17T14:23:03
| 2023-05-17T14:23:03
| 20,481,452
| 41
| 30
|
NOASSERTION
| 2023-05-17T14:23:04
| 2014-06-04T11:53:49
|
HTML
|
UTF-8
|
R
| false
| false
| 14,558
|
r
|
FilenameMapper.R
|
#'
#' Make sure to call $readMappingFile(some_file) if you want to support a user-defined file mapping.
#' Otherwise, calls to $getShortNames() will create/augment the mapping for filenames.
#'
#'
#'
#' @field raw_file_mapping Data.frame with columns 'from', 'to' and maybe 'best.effort' (if shorting was unsuccessful)
#' @field mapping.creation how the current mapping was obtained (user or auto)
#' @field external.mapping.file Filename of user-defined mapping file; only defined if readMappingFile() was called
#'
#' @import ggplot2
#'
#' @exportClass FilenameMapper
#' @export FilenameMapper
#'
#' @examples
#' a = FilenameMapper$new()
#' a$readMappingFile('filenamemapping.txt')
#'
FilenameMapper = setRefClass("FilenameMapper",
fields = list(raw_file_mapping = "data.frame", ## with cols 'from', to' and maybe 'best.effort' (if shorting was unsuccessful)
mapping.creation = "character", ## how the current mapping was obtained (user or auto)
external.mapping.file = "character" ## filename of user-defined mapping file; only defined if readMappingFile() was called
),
methods = list(
initialize=function() {
.self$raw_file_mapping = data.frame()
.self$mapping.creation = NA_character_
.self$external.mapping.file = NA_character_
return(.self)
},
specrefToRawfile = function(.self, specrefs)
{
"Return a DF with 'ms_run', 'raw.file' and 'fc.raw.file' given a vector of spectraReferences, e.g. 'ms_run[1]:...', '...'"
res = data.frame(ms_run = sub("[.]*:.*", "\\1", specrefs))
return (cbind(res, .self$msrunToRawfile(res$ms_run)))
},
msrunToRawfile = function(.self, ms_runs)
{
"Given a vector of ms_runs, c('ms_run[1]', ...), return a data.frame of identical length with columns 'raw.file' and 'fc.raw.file'."
if (!"ms.run" %in% colnames(.self$raw_file_mapping)) stop("Mapping is missing 'ms.run' from mzTab!")
res = .self$getRawfm()[ match(ms_runs, .self$raw_file_mapping$ms.run), c("from", "to")]
colnames(res) = c("raw.file", "fc.raw.file")
return (res)
},
getShortNames = function(.self, raw_filenames, max_length = 10, ms_runs = NULL)
{
"Uses the internal mapping (or re-creates it if current one is incomplete) and maps the input raw names to shorter output names.
Returns a vector of the same length."
#rf <<- raw_filenames
#raw_filenames = rf
if (!is.null(ms_runs) && length(ms_runs) != length(raw_filenames)) stop("raw_filenames and ms_runs do not have the same length!")
cat(paste0("Adding fc.raw.file column ..."))
## if there is no mapping, or if its incomplete (outdated mapping file)
has_mapping = (nrow(.self$raw_file_mapping) != 0)
incomplete_mapping = has_mapping && any(is.na(match(raw_filenames, .self$raw_file_mapping$from)))
if (!has_mapping || incomplete_mapping)
{
## if the mapping is 'auto', we got handed an incomplete/different txt file before, which does not match
## the current file. Some files got mixed up, so we stop!
if (incomplete_mapping)
{
if (is.na(.self$mapping.creation))
{
stop("mapping.creation member not properly initialized!")
}
## we had NA's in auto mode ... bad
if (.self$mapping.creation == .self$getMappingCreation()['auto'])
{ ## mapping is incomplete
missing = unique(raw_filenames[is.na(match(raw_filenames, .self$raw_file_mapping$from))])
stop(paste0("Hithero unknown Raw files: ", paste(missing, collapse=", ", sep=""), " encountered which were not present in previous data files.\nDid you mix output files from different analyses?"))
}
}
## --> redo
rfm = .self$getShortNamesStatic(unique(raw_filenames), max_length)
if (!is.null(ms_runs)) {
rfm$ms.run = ms_runs[ match(rfm$from, raw_filenames) ]
}
## and remember it
.self$raw_file_mapping = rfm
cat("Created a new filename mapping:\n")
print(rfm)
## indicate to outside that a new table is ready
.self$mapping.creation = .self$getMappingCreation()['auto']
}
## do the mapping
v.result = as.factor(.self$raw_file_mapping$to[match(raw_filenames, .self$raw_file_mapping$from)])
cat(paste0(" done\n"))
return (v.result)
},
getShortNamesStatic = function(raw.files, max_len, fallbackStartNr = 1)
{
"Static method: Shorten a set of Raw file names and return a data frame with the mappings.
Mapping will have: $from, $to and optionally $best.effort (if shorting was unsuccessful and numbers had to be used)
\\itemize{
\\item{\\verb{raw.files} Vector of Raw files.}
\\item{\\verb{max_len} Maximal length of shortening results, before resorting to canonical names (file 1,...).}
\\item{\\verb{fallbackStartNr} Starting index for canonical names.}
}
\\subsection{Return Value}{ data.frame with mapping.}
"
rf_name = raw.files
## remove prefix
rf_name_s = delLCP(rf_name,
min_out_length = 8,
add_dots = TRUE)
## remove infix (2 iterations)
rf_name_s = simplifyNames(rf_name_s,
2,
min_LCS_length = 7,
min_out_length = 8)
## check if shorter filenames are still unique (they should be.. if not we have a problem!!)
if (length(rf_name) != length(unique(rf_name_s)))
{
cat("\nOriginal names:\n")
cat(rf_name)
cat("\nShort names:\n")
cat(rf_name_s)
cat("\n")
stop("While loading MQ data: shortened raw filenames are not unique! This should not happen. Please contact the developers and provide the above names!")
}
df.mapping = data.frame(from = rf_name, to = rf_name_s, stringsAsFactors = FALSE)
## always include 'best.effort' column
df.mapping[, "best.effort"] = df.mapping$to
## check if the minimal length was reached
if (max(nchar(df.mapping$to)) > max_len)
{ ## resort to short naming convention
cat("Filenames are longer than the maximal allowed size of '" %+% max_len %+% "'. Resorting to short versions 'file X'.\n\n")
maxl = length(raw.files) - 1 + fallbackStartNr
df.mapping$to = paste("file", sprintf(paste0("%0", nchar(maxl), "d"), fallbackStartNr:maxl)) ## with leading 0's if required
}
return(df.mapping)
},
plotNameMapping = function(.self)
{
"Plots the current mapping of Raw file names to their shortened version.
Convenience function to plot the mapping (e.g. to a PDF device for reporting).
The data frame can be accessed directly via \\verb{.self$raw_file_mapping}.
If no mapping exists, the function prints a warning to console and returns NULL (which is safe to use in print(NULL)).
@return if mapping is available, returns a list of plots 'plots' and a Html table string 'htmlTable' ; 'NULL' otherwise.
"
if (nrow(.self$raw_file_mapping) == 0)
{
cat("No mapping found. Omitting plot.")
return (NULL);
}
table_header = c("original", "short\nname")
xpos = c(9, 11)
extra = ""
has_best_effort = FALSE
if ("best.effort" %in% colnames(.self$raw_file_mapping))
{
has_best_effort = TRUE
table_header = c(table_header, "best\neffort")
xpos = c(9, 11, 13)
if (all(.self$raw_file_mapping$to != .self$raw_file_mapping$best.effort)) {
extra = "\n(automatic shortening of names was not sufficient - see 'best effort')"
}
}
#mq_mapping = mq$raw_file_mapping
mq_mapping = .self$raw_file_mapping
pl_title = "Mapping of Raw files to their short names\nMapping source: " %+% .self$mapping.creation %+% extra;
mappingChunk = function(mq_mapping)
{
mq_mapping$ypos = -(1:nrow(mq_mapping))
head(mq_mapping)
## convert factors to string, because they will all end up in a common 'value' column
mq_mapping.s = data.frame(lapply(mq_mapping, function(x) if (is.factor(x)) as.character(x) else {x}), stringsAsFactors= FALSE)
mq_mapping.long = reshape2::melt(mq_mapping.s, id.vars = c("ypos"), value.name = "value")
head(mq_mapping.long)
mq_mapping.long$variable = as.character(mq_mapping.long$variable)
mq_mapping.long$col = "#000000";
mq_mapping.long$col[mq_mapping.long$variable=="to"] = "#5F0000"
mq_mapping.long$variable[mq_mapping.long$variable=="from"] = xpos[1]
mq_mapping.long$variable[mq_mapping.long$variable=="to"] = xpos[2]
mq_mapping.long$variable[mq_mapping.long$variable=="best.effort"] = xpos[3]
mq_mapping.long$variable = as.numeric(mq_mapping.long$variable)
mq_mapping.long$size = 2;
df.header = data.frame(ypos = 1, variable = xpos, value = table_header, col = "#000000", size=3)
mq_mapping.long2 = rbind(mq_mapping.long, df.header)
mq_mapping.long2$hpos = 0 ## left aligned, 1=right aligned
mq_mapping.long2$hpos[mq_mapping.long2$variable==xpos[1]] = 1
mq_mapping.long2$hpos[mq_mapping.long2$variable==xpos[2]] = 0
mqmap_pl = ggplot(mq_mapping.long2, aes_string(x = "variable", y = "ypos")) +
geom_text(aes_string(label="value"), color = mq_mapping.long2$col, hjust=mq_mapping.long2$hpos, size=mq_mapping.long2$size) +
coord_cartesian(xlim=c(0,20)) +
theme_bw() +
theme(plot.margin = grid::unit(c(1,1,1,1), "cm"), line = element_blank(),
axis.title = element_blank(), panel.border = element_blank(),
axis.text = element_blank(), strip.text = element_blank(), legend.position = "none") +
ggtitle(pl_title)
return(mqmap_pl)
}
l_plots = byXflex(mq_mapping, 1:nrow(mq_mapping), 20, mappingChunk, sort_indices = FALSE);
return (list(plots = l_plots, htmlTable = getHTMLTable(.self$raw_file_mapping, pl_title)))
},
getRawfm = function(.self)
{
"Wrapper function for member 'raw_file_mapping', ensuring that $to is a factor"
tmp = .self$raw_file_mapping
tmp$to = factor(tmp$to)
return(tmp)
},
readMappingFile = function(.self, filename)
{
"Reads a mapping table of full Raw file names to shortened names.
The internal structure \\verb{raw_file_mapping} is created using this file.
If the file is missing, nothing is done and FALSE is returned.
If the file contains contradictory information (different set of $from files) compared to
the current mapping (if present), the internal mapping wins (filemapping is ignored) and FALSE is returned.
The file must have two columns named: 'orig.Name' and 'new.Name' and use Tab as separator.
This file can be used to manually substitute Raw file names within the report.
The ordering of Raw files in the report can be changed by re-arranging the rows.
I.e.
\\preformatted{
orig.Name new.Name
2011_05_30_ALH_OT_21_VIL_TMT_FR01 myfile A
2011_05_30_ALH_OT_22_VIL_TMT_FR02 another B
}
@param filename Source filename to read.
@return Returns \\verb{TRUE} if file was read, \\verb{FALSE} if it does not exist.
"
if (file.exists(filename))
{
message(paste0("Reading mapping file '", filename, "'\n"))
dfs = read.delim(filename, comment.char="#", stringsAsFactors = FALSE)
colnames(dfs) = gsub("_", ".", colnames(dfs)) ## legacy support for old "best_effort" column (now "best.effort")
req_cols = c(from = "orig.Name", to = "new.Name")
if (!all(req_cols %in% colnames(dfs)))
{
stop("Input file '", filename, "' does not contain the columns '", paste(req_cols, collapse="' and '"), "'.",
" Please fix and re-run PTXQC!")
}
req_cols = c(req_cols, best.effort = "best.effort", ms.run = "ms.run") ## augment
colnames(dfs) = names(req_cols)[match(colnames(dfs), req_cols)]
if (any(duplicated(dfs$from)) | any(duplicated(dfs$to)))
{
dups = c(dfs$from[duplicated(dfs$from)], dfs$to[duplicated(dfs$to)])
stop("Input file '", filename_sorting, "' has duplicate entries ('", paste(dups, collapse=", "), ")'!",
" Please fix and re-run PTXQC!")
}
dfs
dfs$to = factor(dfs$to, levels = unique(dfs$to), ordered = TRUE) ## keep the order
dfs$from = factor(dfs$from, levels = unique(dfs$from), ordered = TRUE) ## keep the order
## set internal mapping
if (nrow(.self$raw_file_mapping) > 0 & ## was initialized before...
!setequal(.self$raw_file_mapping$from, dfs$from)) ## .. and has different data
{
print(paste0("Raw filename mapping in file '", filename, "' has different set of raw files than current data. Mapping file will be ignored and overwritten!",
"\nold filenames in mapping:\n ", paste(dfs$from, collapse="\n "),
"\nnew filenames from data:\n ", paste(.self$raw_file_mapping$from, collapse="\n ")))
return (FALSE)
}
.self$raw_file_mapping = dfs
## set who defined it
.self$mapping.creation = .self$getMappingCreation()['user']
.self$external.mapping.file = filename; ## remember filename for later error messages
return (TRUE)
}
return (FALSE)
},
writeMappingFile = function(.self, filename)
{
"Writes a mapping table of full Raw file names to shortened names.
The internal structure \\verb{raw_file_mapping} is written to the
file specified.
File is only created if mapping exists (in .self$raw_file_mapping).
@param filename Target filename to create.
@return Returns NULL.
"
if (nrow(.self$raw_file_mapping) == 0)
{
cat("No mapping found. Writing mapping file '", filename, "' not possible!")
return (FALSE)
}
dfs = data.frame(orig.Name = .self$raw_file_mapping$from, new.Name = .self$raw_file_mapping$to)
if (nrow(dfs) == 0) return(NULL)
if ("best.effort" %in% colnames(.self$raw_file_mapping)) {
dfs$best.effort = .self$raw_file_mapping[, "best.effort"]
}
if ("ms.run" %in% colnames(.self$raw_file_mapping)) {
dfs$ms.run = .self$raw_file_mapping[,"ms.run"]
}
## use a file handle to avoid warning from write.table() when appending
## a table with column names 'Warning(): appending column names to file'
FH = file(filename, "w")
cat(file = FH,
"# This file can be used to manually substitute Raw file names within the report.",
"# The ordering of Raw files in the report can be changed by re-arranging the rows.",
sep = "\n")
write.table(x = dfs, file = FH, quote = FALSE, sep="\t", row.names = FALSE)
close(FH) ## flush
return (TRUE)
},
getMappingCreation = function(.self)
{
"A static function"
return(c(user = 'file (user-defined)', auto = 'automatic'))
}
) ## end methods list
) ## end RefClass
|
9e95ad7fd1fc72a6cddc98094a7c0bc22ffd3632
|
446373433355171cdb65266ac3b24d03e884bb5d
|
/man/saga_soiltextureclassification.Rd
|
a1c31094047e4ff87c14050b7e52ee78c088df30
|
[
"MIT"
] |
permissive
|
VB6Hobbyst7/r_package_qgis
|
233a49cbdb590ebc5b38d197cd38441888c8a6f3
|
8a5130ad98c4405085a09913b535a94b4a2a4fc3
|
refs/heads/master
| 2023-06-27T11:52:21.538634
| 2021-08-01T01:05:01
| 2021-08-01T01:05:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,445
|
rd
|
saga_soiltextureclassification.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/saga_soiltextureclassification.R
\name{saga_soiltextureclassification}
\alias{saga_soiltextureclassification}
\title{QGIS algorithm Soil texture classification}
\usage{
saga_soiltextureclassification(
SAND = qgisprocess::qgis_default_value(),
SILT = qgisprocess::qgis_default_value(),
CLAY = qgisprocess::qgis_default_value(),
TEXTURE = qgisprocess::qgis_default_value(),
SUM = qgisprocess::qgis_default_value(),
...,
.complete_output = TRUE
)
}
\arguments{
\item{SAND}{\code{raster} - Sand. Path to a raster layer.}
\item{SILT}{\code{raster} - Silt. Path to a raster layer.}
\item{CLAY}{\code{raster} - Clay. Path to a raster layer.}
\item{TEXTURE}{\code{rasterDestination} - Soil Texture. Path for new raster layer.}
\item{SUM}{\code{rasterDestination} - Sum. Path for new raster layer.}
\item{...}{further parameters passed to \code{qgisprocess::qgis_run_algorithm()}}
\item{.complete_output}{logical specifing if complete out of \code{qgisprocess::qgis_run_algorithm()} should be used (\code{TRUE}) or first output (most likely the main) should read (\code{FALSE}). Default value is \code{TRUE}.}
}
\description{
QGIS Algorithm provided by SAGA Soil texture classification (saga:soiltextureclassification)
}
\details{
\subsection{Outputs description}{
\itemize{
\item TEXTURE - outputRaster - Soil Texture
\item SUM - outputRaster - Sum
}
}
}
|
f7f235094252e9c49bcd08ab1358ec826aad6030
|
03a1d1efb4fb7323088201474bf6500159c0bb4b
|
/dist2coast.R
|
97ac088f686909d26356dfd7eac45f90d8c5dd4a
|
[] |
no_license
|
markolipka/dist2coast
|
8a5f181640c1d7b8ebecb813dbdff06ff395d7af
|
71cd14bf2cde6d3158cdf04b8ba06936eccb0567
|
refs/heads/main
| 2023-02-07T20:30:14.348219
| 2020-12-28T01:33:07
| 2020-12-28T01:33:07
| 324,579,004
| 0
| 0
| null | 2020-12-26T15:08:47
| 2020-12-26T15:08:46
| null |
UTF-8
|
R
| false
| false
| 3,529
|
r
|
dist2coast.R
|
## calculate distance between geo points and coastline
## distance is in m
## transforming to UTM32 and also cropping does not seem to have a measurable performance benefit with moderate numbers of points (100) and spatial extend (North Sea scale)
## the output = "distmat" option might be quite useless if the linestrings of the coastline are not also output
## the coastline = "mapdata" uses map('world') because high res maps are too slow
### download and process high resolution coastline geodata:
# http://www.soest.hawaii.edu/pwessel/gshhg/gshhg-shp-2.3.7.zip
# read_sf("~/Downloads/gshhg-shp-2/GSHHS_shp/f/GSHHS_f_L1.shp") %>%
# st_cast("MULTILINESTRING") %>%
# st_combine() %>%
# st_write("worldCoastlines/wcl_fine_GSHHS.gpkg")
dist2coast <- function(lons,
lats,
coastline_crop = NULL, # numeric vector with xmin, ymin, xmax and ymax for cropping the coastline to a bounding box, e.g. c(xmin = -1, ymin = 50, xmax = 11, ymax = 60), elements do not have to be named but must be in the correct order
coastline = "ne", # coastline source; "ne" for Natural Earth (www.naturalearthdata.com) (faster), "mapdata" for using map('world') (more precise)
as_utm32 = FALSE, # transform both points and coast to UTM32, i.e. a planar projection, for more speed
#output = "mindist", # "mindist" for distance to nearest coastline, "distmat" for matrix of distances between points (in rows) and every line (in cols)
plot = FALSE
) {
suppressWarnings( suppressPackageStartupMessages(library(dplyr, quietly = TRUE)) )
suppressWarnings( suppressPackageStartupMessages(library(mapdata, quietly = TRUE)) )
suppressWarnings( suppressPackageStartupMessages(library(sf, quietly = TRUE)) )
# make sf features for points and coastline
points <- data.frame(lons, lats) %>%
st_as_sf(coords = c("lons", "lats"), crs = 4326)
switch(coastline,
"ne" = coast <- rnaturalearth::ne_coastline(scale = 110, returnclass = "sf") %>%
st_set_crs(4326) %>%
st_combine(),
"mapdata" = coast <- st_as_sf(map('world', plot = FALSE, fill = TRUE)) %>%
st_set_crs(4326) %>%
st_combine() %>%
st_cast("MULTILINESTRING"),
"gshhg" = coast <- st_read("worldCoastlines/wcl_fine_GSHHS.gpkg", quiet = TRUE) # already st_combine()ed
)
# crop coastline
# if(!is.null(coastline_crop)) {
# if(is.null(names(coastline_crop))) {names(coastline_crop) <- c("xmin", "ymin", "xmax", "ymax")}
# suppressMessages(suppressWarnings(
# coast <- st_crop(coast, coastline_crop)
# ))
# }
if(coastline_crop){
bbox <- st_bbox(points)
extended_bbox <- bbox + c(-40, -20, 40, 20)
coast <- st_crop(coast, extended_bbox)
}
# transform to UTM 32
if(as_utm32) {
points <- points %>% st_transform(25832)
coast <- coast %>% st_transform(25832)
}
# calculate distances
dist <- st_distance(points, coast)
if(plot) {
suppressWarnings(library(ggplot2, quietly = TRUE))
p1 <- ggplot() +
geom_sf(data = coast, fill = "grey70") +
geom_point(aes(lons, lats, col = round(as.numeric(dist) / 1000))) +
scale_x_continuous(expand = c(0,0)) +
scale_y_continuous(expand = c(0,0)) +
scale_color_gradientn(name = "distance to\nshore [km]", colours = c("blue", "red")) +
theme_bw()
print(p1)
}
return(dist)
}
|
53cb30e931be42e9b4c5ff7c920b394264b2d76f
|
10142a4fd35068f2675373580ecd8ed7aaa8f7d0
|
/PreguntaPersonal.R
|
b134f025d7ca6042eefe26ce0c7df08f252fa8fd
|
[] |
no_license
|
caroljanethjp/Suicide-Rates
|
96002a83d79c09233c28be67f444a983e9f5494e
|
32bdb9a962b23fc9e75fb6c5acddb030a51c606a
|
refs/heads/master
| 2020-05-03T09:03:40.810358
| 2019-04-02T14:40:00
| 2019-04-02T14:40:00
| 178,543,765
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,709
|
r
|
PreguntaPersonal.R
|
#
# Este script se construyó a partir del dataset obtenido en la Limpieza de datos
# y permitirá encontrar un foco : de los 10 países a nivel mundial
# con mayor tendencia creciente de suicidios en los ultimos años(2010-2015).
# Y a partir de esto, se mostrará en que grupo de edades y género se concentra más este fenómeno,
# con el fin de aplicar una prevención más delimitada.
#
########## Librerías ############
library(tidyverse)
library(broom)
########## Lectura ############
dt <- read_csv("data/master_clean.csv")
########## Obteniendo las tendencias de suicidios en cada país ############
########## desde 1985-2015 ############
country_year <- dt %>%
group_by(country, year) %>%
summarize(suicides = sum(suicides_no),
population = sum(population),
suicide_per_100k = (suicides / population) * 100000)
country_year_trends <- country_year %>%
ungroup()%>%
nest(-country)%>%
mutate(model = map(data, ~ lm(suicide_per_100k ~ year, data = .)),
tidied = map(model, tidy))%>%
unnest(tidied)
# me quedo con aquellos valores más significativos de los modelos (p< 0.05)
country_year_sig_trends <- country_year_trends %>%
filter(term == "year") %>%
mutate(p.adjusted = p.adjust(p.value, method = "holm")) %>%
filter(p.adjusted < .05) %>%
arrange(estimate)
#Obtengo los 10 países con tendencia más creciente en todo el dataset
top10_increasing <- tail(country_year_sig_trends$country, 10)
########## Delimitando los 10 top países por grupo de edades y género ############
data_filtered <- dt %>%
filter(country %in% top10_increasing)
#por género
top_10_gender <- data_filtered %>%
filter(year >= 2010) %>%
group_by(country, sex) %>%
summarize(suicide_per_100k = (sum(as.numeric(suicides_no)) / sum(as.numeric(population))) * 100000)
#por edad
top_10_age <- data_filtered %>%
filter(year >= 2010) %>%
group_by(country, age) %>%
summarize(suicide_per_100k = (sum(as.numeric(suicides_no)) / sum(as.numeric(population))) * 100000)
#factorizando la columna edad
data_filtered$age <- factor(data_filtered$age,
ordered = T,
levels = c("5-14",
"15-24",
"25-34",
"35-54",
"55-74",
"75+"))
########## Visualizaciones ############
countries_top_10 <- country_year %>%
filter(country %in% top10_increasing) %>%
ggplot(aes(x = year, y = suicide_per_100k, col = country)) +
geom_point() +
geom_smooth(method = "lm") +
facet_wrap(~ country) +
theme(legend.position = "none") +
ggtitle("Top 10 de países con tendencia más creciente")+
labs(x = "Año",
y = "Suicidios por 100k")
countries_top_10
countries_top_10_gender <- ggplot(top_10_gender, aes(x = country, y = suicide_per_100k, fill = sex)) +
geom_bar(position = "fill", stat = "identity") +
scale_y_continuous(labels = scales::percent) +
labs(title = "Porcentaje de suicidios por género",
subtitle = "en los 10 países top, 2010 - 2015",
x = "País",
y = "",
fill = "Género")
countries_top_10_age <- ggplot(top_10_age, aes(x = country, y = suicide_per_100k, fill = age)) +
geom_bar(position = "dodge", stat = "identity") +
labs(title = "Por edad",
subtitle = "2010 - 2015 ",
x = "País",
y = "Suicidios por 100k",
fill = "Edad")
grid.arrange(countries_top_10_gender, countries_top_10_age, nrow = 2)
|
2c250b652bfa3ec9e51a4cccb8a01f43c7e22db8
|
17b84acca3b2e3d1de130f0803d4d7f6a8cecc3e
|
/server.R
|
621ad6e2ade6142b28db3fc3a2ff0553e8a3c98c
|
[] |
no_license
|
brunamdalmoro/shiny-imoveis-poa
|
982dce34496f603cb173dba06440d954cb332f3e
|
1d26b94e894f2a1603ee5fe5effd62fba5d63692
|
refs/heads/master
| 2021-01-23T00:14:37.754606
| 2017-03-22T18:24:20
| 2017-03-22T18:24:20
| 85,706,232
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,307
|
r
|
server.R
|
library(shiny)
library(leaflet)
library(randomForest)
source("helper.R")
server <- function(input, output){
v <- reactiveValues(doPlot = FALSE) #recebe FALSE
observeEvent(input$atualizar, {
v$doPlot <- input$atualizar
}) #quando clicado, recebe TRUE
output$mapa <- renderLeaflet({
dados <- data.frame(Area = input$area, Dormitorios = input$dormitorios,
Vagas = input$vagas, Valor = NA)
resultado <- rf(dados, input$bairro)
if (v$doPlot == FALSE) return() #não não clicado, não retorna
isolate({
coordenadas <- location(input$bairro)
icons <- awesomeIcons(
icon = 'ion-social-usd',
iconColor = 'black',
library = 'ion',
markerColor = "blue"
)
leaflet(coordenadas) %>% addTiles() %>%
# addProviderTiles(providers$Hydda) %>%
setView(lat=coordenadas$lat, lng=coordenadas$lng, zoom=11) %>%
addAwesomeMarkers(lng = ~lng, lat = ~lat,
label = ~paste0(bairro," - R$",round(resultado, digits=2)),
icon=icons)
})
})
}
|
be3b94c48e4edc3e88f6d016dfb9624bacc26fa6
|
0ed00ba01bc3c5256df40233b3de58dc433a5fc0
|
/Data Management.R
|
93a769107f692598004a4bd412f8b6ae8e76a109
|
[] |
no_license
|
zaq0718/PISA-2015-Well-being
|
9214923f8ccd3dec1a342a2deff1d03632f40c26
|
12ce7360cad7c183a9cd3e2f7af661f441522889
|
refs/heads/master
| 2021-05-19T08:00:50.716024
| 2021-04-01T08:42:36
| 2021-04-01T08:42:36
| 251,596,304
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,390
|
r
|
Data Management.R
|
library(dplyr)
library(foreign)
library(sjmisc)
library(srvyr)
library(ggplot2)
# 1. Load Data ------------------------------------------------------------
setwd("~\\PISA 2015\\Correlations_PISA 2015_Well-being\\Well-being")
Survey <- c("Option_ICTQ","Option_PQ","ADMINMODE")
Demo <- c("ST004D01T","IMMIG","ESCS")
Student <- c("MOTIVAT","ANXTEST","EMOSUPS","BELONG","TEACHSUP","PV1SCIE","PV2SCIE","PV3SCIE",
"PV4SCIE","PV5SCIE","PV6SCIE","PV7SCIE","PV8SCIE","PV9SCIE","PV10SCIE")
Well_being <- c("ST016Q01NA")
weight <- c("W_FSTUWT","SENWT")
list <- dir(pattern = "*.sav")
ldf <- list()
for (k in 1:length(list)){
ldf[[k]] <- read.spss(list[k],use.value.labels = FALSE,use.missings = TRUE,to.data.frame = TRUE)%>%
select(Survey,Demo,Student,Well_being,weight )
}
names(ldf) <- list
for(i in 1:length(list)){
assign(list[i], as.data.frame(ldf[[i]]))
}
# 2. Data Managment by cultures -------------------------------------------
Anglo <- rbind(IRL.sav,USA.sav,GBR.sav)
Latin_Eu <- rbind(BEL.sav,FRA.sav,ITA.sav,PRT.sav,ESP.sav,QES.sav)
Nordic <- rbind(FIN.sav,ISL.sav)
German <- rbind(AUT.sav,DEU.sav,LUX.sav,NLD.sav,CHE.sav)
East_EU <- rbind(CZE.sav,GRC.sav,HUN.sav,LVA.sav,POL.sav,RUS.sav,
SVK.sav,BGR.sav,EST.sav,HRV.sav,LTU.sav,MNE.sav,SVN.sav)
Latin_A <- rbind(BRA.sav,MEX.sav,URY.sav,CHL.sav,COL.sav,CRI.sav,DOM.sav,PER.sav)
Middle_E <- rbind(TUN.sav,TUR.sav,ARE.sav,QAT.sav)
South_A <- THA.sav
East_A <- rbind(HKG.sav,JPN.sav,KOR.sav,MAC.sav,QCH.sav,TAP.sav)
# 3. Frequency :"MOTIVAT","ANXTEST","EMOSUPS","BELONG","TEACHSUP", and Well-being -------------
# with wieght
culture <- list(Anglo,Latin_Eu,Nordic,German ,East_EU,Latin_A,Middle_E,South_A,East_A)
Anglo_F <- culture[[1]]%>%as_survey_design(weights=SENWT)%>%
select(c("MOTIVAT","ANXTEST","EMOSUPS","BELONG","TEACHSUP","ST016Q01NA"))%>%
summarise_all(list(~ survey_mean(., na.rm = TRUE)))
Freq <- function(Data){
Data_F <- Data%>%as_survey_design(weights=SENWT)%>%
select(c("MOTIVAT","ANXTEST","EMOSUPS","BELONG","TEACHSUP","ST016Q01NA"))%>%
summarise_all(list(~ round(survey_mean(., na.rm = TRUE),digits = 3)))
return(Data_F)
}
results <- NULL
for( i in 1:length(culture)){
Des <- Freq (Data = culture[[i]])
results<- rbind(results,Des)
}
Sample <-unlist(lapply(culture, nrow))
results <- cbind(results,Sample)
rownames(results) <- c("Anglo","Latin_Eu","Nordic","German" ,"East_EU","Latin_A","Middle_E","South_A","East_A")
write.csv(results,file="Frequency_Social.csv",sep=",")
#4. 95 CI Plot ----------------------------------------------------------------
lower_ci <- function(mean, se, n, conf_level = 0.95){
lower_ci <- round(mean - qt(1 - ((1 - conf_level) / 2), n - 1) * se,digits=3)
}
upper_ci <- function(mean, se, n, conf_level = 0.95){
upper_ci <- round(mean + qt(1 - ((1 - conf_level) / 2), n - 1) * se,digits = 3)
}
# MOTIVAT
max(results$MOTIVAT);min(results$MOTIVAT)#0.708 and -0.334
lower <- NULL
upper <- NULL
for( i in 1:nrow(results)){
res <- lower_ci(mean=results[i,1],se=results[i,2],n=results[i,13])
res_u <- upper_ci(mean=results[i,1],se=results[i,2],n=results[i,13])
lower <- rbind(lower,res)
upper <- rbind(upper, res_u)
}
Regions <- c("Anglo","Latin Europe","Nordic","German" ,"East Europe","Latin America ","Middle East","South Asia","East Asia")
MOTIVAT <- as.data.frame(cbind(Regions,results$MOTIVAT,results$MOTIVAT_se,lower,upper))
colnames(MOTIVAT) <- c("Regions","Mean","SE","lower","upper")
MOTIVAT[,2:5] <- apply(MOTIVAT[,2:5],2,as.numeric)
ggplot(MOTIVAT,aes(x=as.factor(Regions),y=Mean,label=as.factor(Regions)))+
geom_point()+
geom_errorbar(aes(x=as.factor(Regions),ymin=lower,ymax=upper))+
geom_text(size=4,hjust="center", vjust=3,aes(color=as.factor(Regions)))+
theme(legend.position = "none")+
ylab("Achievement Motivation")+
theme(axis.title.x=element_blank(),axis.text.x=element_blank(),axis.ticks.x=element_blank(),
panel.grid.major = element_blank(),panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "transparent",colour = NA),
axis.line = element_line(colour = "black"))+
scale_y_continuous(limits = c(-0.5, 0.8), breaks = seq(-0.5,0.8, by = 0.1))
# ANXTEST
max(results$ANXTEST);min(results$ANXTEST)#0.379 and -0.313
lower <- NULL
upper <- NULL
for( i in 1:nrow(results)){
res <- lower_ci(mean=results[i,3],se=results[i,4],n=results[i,13])
res_u <- upper_ci(mean=results[i,3],se=results[i,4],n=results[i,13])
lower <- rbind(lower,res)
upper <- rbind(upper, res_u)
}
Regions <- c("Anglo","Latin Europe","Nordic","German" ,"East Europe","Latin America ","Middle East","South Asia","East Asia")
ANXTEST <- as.data.frame(cbind(Regions,results$ANXTEST,results$ANXTEST_se,lower,upper))
colnames(ANXTEST) <- c("Regions","Mean","SE","lower","upper")
ANXTEST[,2:5] <- apply(ANXTEST[,2:5],2,as.numeric)
ggplot(ANXTEST,aes(x=as.factor(Regions),y=Mean,label=as.factor(Regions)))+
geom_point()+
geom_errorbar(aes(x=as.factor(Regions),ymin=lower,ymax=upper))+
geom_text(size=4,hjust="center", vjust=3,aes(color=as.factor(Regions)))+
theme(legend.position = "none")+
ylab("Test Anxiety")+
theme(axis.title.x=element_blank(),axis.text.x=element_blank(),axis.ticks.x=element_blank(),
panel.grid.major = element_blank(),panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "transparent",colour = NA),
axis.line = element_line(colour = "black"))+
scale_y_continuous(limits = c(-0.5, 0.8), breaks = seq(-0.5,0.8, by = 0.1))
# EMOSUPS
max(results$EMOSUPS);min(results$EMOSUPS)#0.187 and -0.427
lower <- NULL
upper <- NULL
for( i in 1:nrow(results)){
res <- lower_ci(mean=results[i,5],se=results[i,6],n=results[i,13])
res_u <- upper_ci(mean=results[i,5],se=results[i,6],n=results[i,13])
lower <- rbind(lower,res)
upper <- rbind(upper, res_u)
}
Regions <- c("Anglo","Latin Europe","Nordic","German" ,"East Europe","Latin America ","Middle East","South Asia","East Asia")
EMOSUPS <- as.data.frame(cbind(Regions,results$EMOSUPS,results$EMOSUPS_se,lower,upper))
colnames(EMOSUPS) <- c("Regions","Mean","SE","lower","upper")
EMOSUPS[,2:5] <- apply(EMOSUPS[,2:5],2,as.numeric)
ggplot(EMOSUPS,aes(x=as.factor(Regions),y=Mean,label=as.factor(Regions)))+
geom_point()+
geom_errorbar(aes(x=as.factor(Regions),ymin=lower,ymax=upper))+
geom_text(size=4,hjust="center", vjust=1.5,aes(color=as.factor(Regions)))+
theme(legend.position = "none")+
ylab("Emotional Support")+
theme(axis.title.x=element_blank(),axis.text.x=element_blank(),axis.ticks.x=element_blank(),
panel.grid.major = element_blank(),panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "transparent",colour = NA),
axis.line = element_line(colour = "black"))+
scale_y_continuous(limits = c(-0.5, 0.8), breaks = seq(-0.5,0.8, by = 0.1))
#BELONG
max(results$BELONG);min(results$BELONG)#0.278 and -0.354
lower <- NULL
upper <- NULL
for( i in 1:nrow(results)){
res <- lower_ci(mean=results[i,7],se=results[i,8],n=results[i,13])
res_u <- upper_ci(mean=results[i,7],se=results[i,8],n=results[i,13])
lower <- rbind(lower,res)
upper <- rbind(upper, res_u)
}
Regions <- c("Anglo","Latin Europe","Nordic","German" ,"East Europe","Latin America ","Middle East","South Asia","East Asia")
BELONG <- as.data.frame(cbind(Regions,results$BELONG,results$BELONG_se,lower,upper))
colnames(BELONG ) <- c("Regions","Mean","SE","lower","upper")
BELONG [,2:5] <- apply(BELONG [,2:5],2,as.numeric)
ggplot(BELONG,aes(x=as.factor(Regions),y=Mean,label=as.factor(Regions)))+
geom_point()+
geom_errorbar(aes(x=as.factor(Regions),ymin=lower,ymax=upper))+
geom_text(size=3.5,hjust="center", vjust=2.5,aes(color=as.factor(Regions)))+
theme(legend.position = "none")+
ylab("Belongness ")+
theme(axis.title.x=element_blank(),axis.text.x=element_blank(),axis.ticks.x=element_blank(),
panel.grid.major = element_blank(),panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "transparent",colour = NA),
axis.line = element_line(colour = "black"))+
scale_y_continuous(limits = c(-0.5, 0.8), breaks = seq(-0.5,0.8, by = 0.1))
#TEACHSUP
max(results$TEACHSUP);min(results$TEACHSUP)#0.391 and -0.346
lower <- NULL
upper <- NULL
for( i in 1:nrow(results)){
res <- lower_ci(mean=results[i,9],se=results[i,10],n=results[i,13])
res_u <- upper_ci(mean=results[i,9],se=results[i,10],n=results[i,13])
lower <- rbind(lower,res)
upper <- rbind(upper, res_u)
}
Regions <- c("Anglo","Latin Europe","Nordic","German" ,"East Europe","Latin America ","Middle East","South Asia","East Asia")
TEACHSUP <- as.data.frame(cbind(Regions,results$TEACHSUP,results$TEACHSUP_se,lower,upper))
colnames(TEACHSUP) <- c("Regions","Mean","SE","lower","upper")
TEACHSUP[,2:5] <- apply(TEACHSUP[,2:5],2,as.numeric)
ggplot(TEACHSUP,aes(x=as.factor(Regions),y=Mean,label=as.factor(Regions)))+
geom_point()+
geom_errorbar(aes(x=as.factor(Regions),ymin=lower,ymax=upper))+
geom_text(size=4,hjust="center", vjust=2.5,aes(color=as.factor(Regions)))+
theme(legend.position = "none")+
ylab("Belongness ")+
theme(axis.title.x=element_blank(),axis.text.x=element_blank(),axis.ticks.x=element_blank(),
panel.grid.major = element_blank(),panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "transparent",colour = NA),
axis.line = element_line(colour = "black"))+
scale_y_continuous(limits = c(-0.5, 0.8), breaks = seq(-0.5,0.8, by = 0.1))
#ST016Q01NA
max(results$ST016Q01NA);min(results$ST016Q01NA)#7.871 and 6.61
lower <- NULL
upper <- NULL
for( i in 1:nrow(results)){
res <- lower_ci(mean=results[i,11],se=results[i,12],n=results[i,13])
res_u <- upper_ci(mean=results[i,11],se=results[i,12],n=results[i,13])
lower <- rbind(lower,res)
upper <- rbind(upper, res_u)
}
Regions <- c("Anglo","Latin Europe","Nordic","German" ,"East Europe","Latin America ","Middle East","South Asia","East Asia")
Wellbeing <- as.data.frame(cbind(Regions,results$ST016Q01NA,results$ST016Q01NA_se,lower,upper))
colnames(Wellbeing) <- c("Regions","Mean","SE","lower","upper")
Wellbeing[,2:5] <- apply(Wellbeing[,2:5],2,as.numeric)
ggplot(Wellbeing,aes(x=as.factor(Regions),y=Mean,label=as.factor(Regions)))+
geom_point()+
geom_errorbar(aes(x=as.factor(Regions),ymin=lower,ymax=upper))+
geom_text(size=4,hjust="center", vjust=3,aes(color=as.factor(Regions)))+
theme(legend.position = "none")+
ylab("Subjective Well-being")+
theme(axis.title.x=element_blank(),axis.text.x=element_blank(),axis.ticks.x=element_blank(),
panel.grid.major = element_blank(),panel.grid.minor = element_blank(),
panel.background = element_rect(fill = "transparent",colour = NA),
axis.line = element_line(colour = "black"))+
scale_y_continuous(limits = c(6, 8), breaks = seq(6,8, by = 1))
# 5. Plots between social support and well-being --------------------------
culture <- list(Anglo,Latin_Eu,Nordic,German ,East_EU,Latin_A,Middle_E,South_A,East_A)
ggplot(data=German,aes(x=as.numeric(EMOSUPS),y=as.numeric(BELONG)))+geom_point()
# 4. Function -------------------------------------------------------------
PISA <- function(Data,folder,file){
Demo <- c("ST004D01T","IMMIG","ESCS")
Student <- c("MOTIVAT","ANXTEST","EMOSUPS","BELONG","TEACHSUP","PV1SCIE","PV2SCIE","PV3SCIE",
"PV4SCIE","PV5SCIE","PV6SCIE","PV7SCIE","PV8SCIE","PV9SCIE","PV10SCIE")
Well_being <- c("ST016Q01NA")
weight <- c("W_FSTUWT","SENWT")
Data <- Data%>%
select(Demo,Student,Well_being,weight)%>%mutate_at("IMMIG",as.factor)%>%mutate(ST004D01T=recode(ST004D01T,'1'="1",
'2'="0"))
#Male is a reference group
Dummy <- to_dummy(Data$IMMIG)%>%select("IMMIG_2","IMMIG_3")%>%mutate_all(as.numeric)
#Native is a reference group
FData <- cbind(Data,Dummy)
FData <- FData%>%mutate_at(c("IMMIG","IMMIG_2","IMMIG_3"),as.numeric)
FData <- FData%>%mutate_all(~replace(., is.na(.), 9999))
colnames(FData) <- c("ST004D01T","IMMIG","ESCS","MOTIVAT","ANXTEST","EMOSUPS","BELONG",
"TEACHSUP","PVSCIE1","PVSCIE2","PVSCIE3",
"PVSCIE4","PVSCIE5","PVSCIE6","PVSCIE7","PVSCIE8","PVSCIE9","PVSCIE10",
"ST016Q01NA","W_FSTUWT","SENWT","IMMIG_2","IMMIG_3")
PV1<- FData%>%select("ST004D01T","IMMIG","ESCS","MOTIVAT","ANXTEST","EMOSUPS","BELONG","TEACHSUP","PVSCIE1",
"ST016Q01NA","SENWT","IMMIG_2","IMMIG_3")
PV2<- FData%>%select("ST004D01T","IMMIG","ESCS","MOTIVAT","ANXTEST","EMOSUPS","BELONG","TEACHSUP","PVSCIE2",
"ST016Q01NA","SENWT","IMMIG_2","IMMIG_3")
PV3<- FData%>%select("ST004D01T","IMMIG","ESCS","MOTIVAT","ANXTEST","EMOSUPS","BELONG","TEACHSUP","PVSCIE3",
"ST016Q01NA","SENWT","IMMIG_2","IMMIG_3")
PV4<- FData%>%select("ST004D01T","IMMIG","ESCS","MOTIVAT","ANXTEST","EMOSUPS","BELONG","TEACHSUP","PVSCIE4",
"ST016Q01NA","SENWT","IMMIG_2","IMMIG_3")
PV5<- FData%>%select("ST004D01T","IMMIG","ESCS","MOTIVAT","ANXTEST","EMOSUPS","BELONG","TEACHSUP","PVSCIE5",
"ST016Q01NA","SENWT","IMMIG_2","IMMIG_3")
PV6<- FData%>%select("ST004D01T","IMMIG","ESCS","MOTIVAT","ANXTEST","EMOSUPS","BELONG","TEACHSUP","PVSCIE6",
"ST016Q01NA","SENWT","IMMIG_2","IMMIG_3")
PV7<- FData%>%select("ST004D01T","IMMIG","ESCS","MOTIVAT","ANXTEST","EMOSUPS","BELONG","TEACHSUP","PVSCIE7",
"ST016Q01NA","SENWT","IMMIG_2","IMMIG_3")
PV8<- FData%>%select("ST004D01T","IMMIG","ESCS","MOTIVAT","ANXTEST","EMOSUPS","BELONG","TEACHSUP","PVSCIE8",
"ST016Q01NA","SENWT","IMMIG_2","IMMIG_3")
PV9<- FData%>%select("ST004D01T","IMMIG","ESCS","MOTIVAT","ANXTEST","EMOSUPS","BELONG","TEACHSUP","PVSCIE9",
"ST016Q01NA","SENWT","IMMIG_2","IMMIG_3")
PV10 <- FData%>%select("ST004D01T","IMMIG","ESCS","MOTIVAT","ANXTEST","EMOSUPS","BELONG","TEACHSUP","PVSCIE10",
"ST016Q01NA","SENWT","IMMIG_2","IMMIG_3")
dataname <- list(PV1,PV2,PV3,PV4,PV5,PV6,PV7,PV8,PV9,PV10)
names(dataname) <- c("PV1","PV2","PV3","PV4","PV5","PV6","PV7","PV8","PV9","PV10")
dir.create(paste0("~\\PISA 2015\\Analysis\\",folder))
for(i in 1:length(dataname)){
write.table(dataname[i], file = paste0("~\\PISA 2015\\Analysis\\",file,"\\",names(dataname[i]), ".txt"), sep = "\t",row.names = FALSE,col.names = FALSE,quote = FALSE)
}
}
Anglo_Data<- PISA(Data =Latin_Eu,folder = "Anglo",file = "Anglo")
Latin_Eu_Data_revised <- PISA(Data =Latin_Eu,folder = "Latin_Europe_Revised",file = "Latin_Europe_Revised")
Nordic_Data <- PISA(Data =Nordic,folder = "Nordic",file = "Nordic")
German_Data <- PISA(Data =German,folder = "German",file = "German")
East_EU_Data <- PISA(Data =East_EU,folder = "Eastern_Europe_Revised",file = "Eastern_Europe_Revised")
Latin_A_Data <- PISA(Data =Latin_A,folder = "Latin_America_Revised",file = "Latin_America_Revised")
Middle_E_Data <- PISA(Data =Middle_E,folder = "Middle_East_Revised",file = "Middle_East_Revised")
South_A_Data <- PISA(Data =South_A,folder = "Southern_Asia",file = "Southern_Asia")
East_A_Data <- PISA(Data =East_A,folder = "Confucian_Asia",file = "Confucian_Asia")
|
cc7a1f1e31102b457756a23cc91115a07ea9ffd4
|
161fc5151a7d816086b9c68e1cb5d591053253c0
|
/statistical_analysis_of_datasets.R
|
4e617f4a22f7264f984a7c1750536caec595edf4
|
[] |
no_license
|
RichardCurran/Temp-rainfall-Ireland
|
e58309207a7070ad7a6577bf617222a0ac8b8ab8
|
115c4edf18c079b160361663b8d99226d6599032
|
refs/heads/master
| 2021-02-09T05:27:51.992630
| 2020-03-02T00:33:01
| 2020-03-02T00:33:01
| 244,246,557
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,120
|
r
|
statistical_analysis_of_datasets.R
|
library(ggpubr)
library(pwr)
library(dplyr)
library(tibble)
#install.packages("finalfit")
library(finalfit)
#install.packages("naniar")
library(naniar)
#importing data
rainfall <- read.csv('rainfall.csv',
header = TRUE,
sep = ",",
strip.white = TRUE,
stringsAsFactors = FALSE)
temp <- read.csv('temp.csv',
header = TRUE,
sep = ",",
strip.white = TRUE,
stringsAsFactors = FALSE)
str(rainfall)
# removing unwanted rows; any row containging
# a certain string is removed
# only want the mean (given in dataset)
temp <- temp[grepl("Mean", temp$X.1),]
rainfall <- rainfall[grepl("Total", rainfall$X.1),]
#merging two columns and removing old column
temp$X.1 <- paste(temp$X, temp$X.1)
temp <- temp[,-1]
rainfall$X.1 <- paste(rainfall$X, rainfall$X.1)
rainfall <- rainfall[,-1]
# dealing with non-interger values
temp[temp == ".."] <- NA
rainfall[rainfall== ".."] <- NA
# making rainfall and temp the variables in each dataframe
# transposing the column and rows
temp <-temp %>%
t() %>%
as.data.frame(stringsAsFactors = F) %>%
rownames_to_column("value") %>%
`colnames<-`(.[1,]) %>%
.[-1,] %>%
`rownames<-`(NULL)
rainfall <-rainfall %>%
t() %>%
as.data.frame(stringsAsFactors = F) %>%
rownames_to_column("value") %>%
`colnames<-`(.[1,]) %>%
.[-1,] %>%
`rownames<-`(NULL)
#for visualising the missing data and giving percentage that is missing
# 7 of of the 15 weather stations have a substantial number of missing values
# it is not known what caused these NA values
# but they will be ignored in the analysis and will be dropped
# imputation would be the preferred choice over dropping the variables
# but the variables that remain should give a decent view of the data even
# with the missing data being dropped
# this will of course effect the accuracy of the study
par(mfrow=c(2,4))
gg_miss_var(rainfall)
gg_miss_var(temp)
pct_miss(rainfall)
pct_miss(temp)
#do not want to lose any data realting to the years,
# so will swap the row and columns again in order
# to drop the weather stations that have NA rather than
# dopping the years as this would greatly effect
# the granularity of the data
temp <-temp %>%
t() %>%
as.data.frame(stringsAsFactors = F) %>%
rownames_to_column("value") %>%
`colnames<-`(.[1,]) %>%
.[-1,] %>%
`rownames<-`(NULL)
rainfall <-rainfall %>%
t() %>%
as.data.frame(stringsAsFactors = F) %>%
rownames_to_column("value") %>%
`colnames<-`(.[1,]) %>%
.[-1,] %>%
`rownames<-`(NULL)
#removing na
temp <- na.omit(temp)
rainfall <- na.omit((rainfall))
#transposing again
temp <-temp %>%
t() %>%
as.data.frame(stringsAsFactors = F) %>%
rownames_to_column("value") %>%
`colnames<-`(.[1,]) %>%
.[-1,] %>%
`rownames<-`(NULL)
rainfall <-rainfall %>%
t() %>%
as.data.frame(stringsAsFactors = F) %>%
rownames_to_column("value") %>%
`colnames<-`(.[1,]) %>%
.[-1,] %>%
`rownames<-`(NULL)
# renaming variables
colnames(temp) <- c("Month & Year", "Belmullet Temp","Valentia Temp", "Casement Temp", "Cork Temp",
"Dublin Temp","Malin Head Temp", "Mullingar Temp", "Shannon Temp")
colnames(rainfall) <-c("Month & Year","Belmullet Rain","Valentia Rain", "Casement Rain", "Cork Rain",
"Dublin Rain","Malin Head Rain", "Mullingar Rain", "Shannon Rain")
# merging the two datasets by the X.1 colum (yearly and monthly data)
total <- merge(temp, rainfall, by = "Month & Year")
total_data <- merge(temp, rainfall, by = "Month & Year")
total <-total_data[,-1]
rownames(total) <- total_data[,1]
total[, 2:ncol(total)] <- sapply(total[, 2:ncol(total)],
as.numeric)
#####ignore##### this is for yearly data, replacing each variable with the mean for that year######
#sorting out the years
#only want to know averagae yearly values for temp and rain
# so edit each value in Month & Yeart varible to only contain the year
# find.list <- list("X", "M01", "M02", "M03",
# "M04", "M05", "M06", "M07",
# "M08", "M09", "M10", "M11", "M12")
# find.string <- paste(unlist(find.list), collapse = "|")
# # #
# total_data$`Month & Year` <- gsub(find.string, replacement = "", x = total_data$`Month & Year`)
# total_data$`Month & Year` <- as.numeric(total_data$`Month & Year`)
#levels(total_data$`Month & Year`)
# rm(Ulster)
#
# #making the first colum the index (years and months)
# # <-total_data[,-1]
# # rownames(total) <- total_data[,1]
# #
# #
# # #new dataframe containg the average temp for each station
# # # convert all columns excpet first to numeric
# # # calculate the mean values for each factor variable
# # # making the first colum the index (years)
# total[, 1:ncol(total)] <- sapply(total[, 1:ncol(total)],
# as.numeric)
# #
# yearly1 <- aggregate(total[,2:ncol(total)],
# by = list(total_data$`Month & Year`),
# FUN = mean)
# summary(total_data)[["1st Qu."]]
# outvals <- boxplot(Munster, plot = FALSE)$out
#
# outvals
# summary(total)
# splitting into two groups; before the millenium and after
# #1990s
# nineties_avg <- yearly1 %>%
# filter(Group.1 < 2000)
#
# as.factor(nineties_avg$Group.1)
# boxplot(`Malin Head Rain`~ Group.1, data = nineties_avg)
# plot(nineties_avg$Group.1, nineties_avg$`Malin Head Rain`)
#
# # 2000's
# millenia_avg <-yearly1 %>%
# filter(Group.1 > 1999)
#
#
#
# yearly <- yearly1[,-1]
# rownames(yearly) <- yearly1[,1]
#
#
#rainfall is the dependant variable and temperature is the independant
# want to see if rainfall is affected by temp change
#make dataframe numeric
total_data[, 1:ncol(total)] <- sapply(total_data[, 1:ncol(total_data)],
as.numeric)
str(total_data)
# before Splitting dataset into groups by province,
# checking each weather station data in Connaucht (only one with stations >2)
# they are positively skewed. from the histograms and density graph
par(mfrow = c(2,2))
hist(total$`Belmullet Rain`, main = "Belmullet rain hist.")
hist(total$`Mullingar Rain`, main = "Mullingar rain hist.")
hist(total$`Shannon Rain`, main = "Shannon rain hist.")
#median is a better approximation and so will be used to
# represent the province as a whole
plot(density(total$`Belmullet Rain`),main = "Example of Connaucht skewness")
abline(v =c(median(total$`Belmullet Rain`), mean(total$`Belmullet Rain`)),
col = c('red','blue'), lty = 2)
# splitting dataset into data pertaining to each province
Ulster <- total %>% select(`Malin Head Temp`,`Malin Head Rain`)
Munster <- total %>% select(`Valentia Temp`,`Valentia Rain`)
Leinster <- total %>% select(`Casement Temp`, `Casement Rain`,
`Dublin Temp`, `Dublin Rain`)
Connaucht <- total %>% select(`Belmullet Temp`, `Belmullet Rain`,
`Mullingar Temp`, `Mullingar Rain`,
`Shannon Temp`, `Shannon Rain`)
#mean rain and temp for connaucht and leinster for each year
# as they contain > 1 weather station
# this will unfortunaetly effect granularity
Connaucht$Mean_Temp <- apply(Connaucht[,c("Belmullet Temp","Mullingar Temp",
"Shannon Temp")], 1, median)
Connaucht$Mean_Rain <- apply(Connaucht[,c("Belmullet Rain", "Mullingar Rain",
"Shannon Rain")], 1, median)
Leinster$Mean_Temp <- apply(Leinster[,c("Casement Temp", "Dublin Temp")], 1, mean)
Leinster$Mean_Rain <- apply(Leinster[,c("Casement Rain", "Dublin Rain")], 1, mean)
# mean, stdev skew and kurtosis for each province
my_stats <- function(x, na.omit = FALSE){
if(na.omit)
x <- x[!is.na(x)] #omits missing values
med <- median(x)
m <- mean(x)
n <- length(x)
s <- sd(x)
skew <- sum((x-m) ^ 3 / s ^ 3)/n
kurt <- sum((x-m) ^ 4 / s ^ 4)/n - 3
return(c(median= med,n = n, mean = m, stdev = s, skew = skew,
kurtosis = kurt))
}
sapply(Connaucht[,7:ncol(Connaucht)], my_stats)
sapply(Munster, my_stats)
sapply(Ulster, my_stats)
sapply(Leinster[,5:ncol(Leinster)], my_stats)
#each have kurtosis < 3 indiciating Platykurtic distribution so fewer extreme
# extreme values at the tails than that of normal distribution
# further edging us toward non-parametric test
#function telling how manyh outliers are present in each group
# there are very few outliers per group comapared to the amount of data
outvals <- function(x){
outliers <-boxplot(x, plot = FALSE)$out
return(sum(outliers > 0))
}
sapply(Ulster, outvals)
sapply(Connaucht, outvals)
sapply(Munster, outvals)
sapply(Leinster, outvals)
#density plots and histograms for each province
# shows releveant skewness of the dependat variable
d_u <- density(Ulster$`Malin Head Rain`)
d_c <- density(Connaucht$Mean_Rain)
d_m <- density(Munster$`Valentia Rain`)
d_l <- density(Leinster$Mean_Rain)
par(mfrow=c(2,2))
#densit plots; using median as straight line throught the median
# through the median because of the positive skewness
plot(d_u, main = "Ulster Rain dist.")
abline(v = 87.8,#median
lty = 2, col = "blue")
plot(d_c, main = "Connaucht Rain dist.")
abline(v =79.5 ,#median
col ="blue", lty = 2)
plot(d_m, main = "Munster Rain dist.")
abline(v = 122.6,#median
lty = 2, col = "blue")
plot(d_l, main = "Leinster Rain dist.")
abline(v = 60.3,#median
lty = 2, col = "blue")
hist(Ulster$`Malin Head Rain`, main = "Ulster Rain hist.")
hist(Connaucht$Mean_Rain, main = "Connaucht Rain hist.")
hist(Munster$`Valentia Rain`, main = "Munster Rain hist.")
hist(Leinster$Mean_Rain, main = "Leinster Rain hist.")
# postiviely skewed; best to use median.
#effect size for the analysis is 0.3
# correlation is the analysis type
cohen.ES(test = "r", size = "medium")
# type 2 error tolerance of 0.1 so Power = 0.90
# type 1 error tolerance is usually 0.05. anything
# greater than this and H0 must be accepted or risk
# makign type 1 error
# two sided test (default); using two tail test because
# regardless of the direction of the relationship, I am
# testing for the possibility of a relationship in both directions
r_test <- pwr.r.test(r = 0.3, sig.level = 0.05, power = 0.9, alternative = "two.sided")
plot(r_test)
#taking the recommended sample
set.seed(1234)
sample <- dplyr::sample_n(Ulster, 150)
sample_c <- dplyr::sample_n(Connaucht, 150)
sample_m <- dplyr::sample_n(Munster, 150)
sample_l <- dplyr::sample_n(Leinster, 150)
#normality plot
# to see if the data follows a normal distribution.
# central limit theorem states that a large enough sample size
# sample > 30 will lead to a normal dist.
ggqqplot(sample$`Malin Head Rain`, main ="Ulster Normality")
ggqqplot(sample_c$Mean_Rain, main = "Connaucht Normality")
ggqqplot(sample_m$`Valentia Rain`, main = "Munster Normality")
ggqqplot(sample_l$Mean_Rain, main = "Leinster Normality")
#shapiro to show normality as visually determening normality is unreliable with
# large dataframes
# p value is << 0.05 meaning a non-parametric test must be used
# also the plots shows the data does not follow a normal distribution - high kurtosis
shapiro.test(sample$`Malin Head Rain`)
shapiro.test(sample_c$Mean_Rain)
shapiro.test(sample_m$`Valentia Rain`)
shapiro.test(sample_l$Mean_Rain)
# correlation test: spearman test
# ulster
cor.test(sample$`Malin Head Temp`, sample$`Malin Head Rain`,
method = "spearman", exact = FALSE)
# munster
cor.test(sample_m$`Valentia Temp`, sample_m$`Valentia Rain`,
method = "spearman", exact = FALSE)
#connacht
cor.test(sample_c$Mean_Temp, sample_c$Mean_Rain,
method = "spearman", exact =FALSE)
#
# #cor.test(sample_c$`Belmullet Temp`, sample_c$`Belmullet Rain`,
# method = "spearman", exact =FALSE)
#
# cor.test(sample_c$`Mullingar Temp`, sample_c$`Mullingar Rain`,
# method = "spearman", exact =FALSE)
# cor.test(sample_c$`Shannon Temp`, sample_c$`Shannon Rain`,
# method = "spearman", exact =FALSE)
# Leinster
cor.test(sample_l$Mean_Temp, sample_l$Mean_Rain,
method = "spearman", exact = FALSE)
#Leinster accepts the null hypothesis as p-value > 0.05; avoiding a type 1 error
# each of the other provinces have p valu << 0.05 and so accept the alternate hypothesis;
# avoiding a type 2 error (accepting H0)
library(forecast)
write.csv(Ulster, file = "ulster.csv")
y_range <- c(min(sample_m$`Valentia Rain`), max(sample_m$`Valentia Rain`))
plot(ma(sample_m$`Valentia Rain`, 15), ylim = y_range)
Acf(Ulster)
library(tseries)
adf.test(Ulster)
Pacf(Ulster)
ndiffs(h)
|
534f5e964712e688e77fa8dc1c67a8fc86b1f628
|
806a2b55bdbadb4e377eb3a4ec610df73e4ec413
|
/Airbnb_NYC_dash/global.R
|
f6bda07087c5ec992eae78300b71e10419fab3c7
|
[] |
no_license
|
staneaurelius/Airbnb-Shiny-Dashboard
|
e9686944e20f8382f751ad7d727a8849712a9b9d
|
f45b029c0c5be8387b193c57beb5ffbf2bc67dc5
|
refs/heads/main
| 2023-07-01T14:36:40.078778
| 2021-08-03T13:11:41
| 2021-08-03T13:11:41
| 391,693,926
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 700
|
r
|
global.R
|
library(shiny)
library(shinythemes)
library(shinydashboard)
library(shinyWidgets)
library(leaflet)
library(tidyverse)
library(glue)
library(scales)
library(DT)
library(plotly)
# Data
ab_nyc <- read.csv("AB_NYC_2019.csv")
ab_nyc <- ab_nyc %>%
select(-c(id, host_id, last_review, reviews_per_month)) %>%
rename(borough = neighbourhood_group) %>%
mutate(across(c(borough, neighbourhood, room_type),
factor)) %>%
mutate(room_type = recode(room_type,
"Entire home/apt" = "Entire Home/Apartment",
"Private room" = "Private Room",
"Shared room" = "Shared Room"))
|
5c434746855d98aa680a431da2d559e135331052
|
87d3e0f0468d71a362025ca235164608e84d08d7
|
/sound-transit/soundtransit-cities.R
|
2fb994017b164aea6234989fc229648efe7f59e7
|
[] |
no_license
|
psrc/ofm
|
eaeb2fe0adc0adedc103966dfff6aa5c82bdbc9e
|
a439f11f248f4dc907aee33a2f46c0f5674accbd
|
refs/heads/master
| 2021-06-17T12:59:08.124433
| 2021-03-12T18:26:31
| 2021-03-12T18:26:31
| 170,179,031
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,886
|
r
|
soundtransit-cities.R
|
# Top 10 cities with largest absolute & % growth
curr.dir <- getwd()
this.dir <- dirname(rstudioapi::getSourceEditorContext()$path)
setwd(this.dir)
source("soundtransit-settings.R")
juris.col <- paste0("Juris", max(years))
cols <- c("COUNTYFP10", juris.col)
juris.part.cols <- c("Auburn", "Bothell", "Enumclaw", "Milton", "Pacific")
# find ST cities (based on block assignments)
setDT(blocks)
blocks[, GEOID10 := as.character(GEOID10)]
stdf <- unique(ofm[GEOID10 %in% blocks$GEOID10, ..cols])
# clean
stdf[, county_name := switch(COUNTYFP10, "033" = "King", "035" = "Kitsap", "053" = "Pierce", "061" = "Snohomish"), by = COUNTYFP10
][, (juris.col) := str_to_title(get(eval(juris.col)))]
stdf[get(eval(juris.col)) %in% juris.part.cols, (juris.col) := paste(get(eval(juris.col)), "(part)")]
stdf[get(eval(juris.col)) == "Seatac", (juris.col) := "SeaTac"]
stdf[get(eval(juris.col)) == "Dupont", (juris.col) := "DuPont"]
stdf[get(eval(juris.col)) %like% "Unincorporated", (juris.col) := paste(get(eval(juris.col)), "County")]
setnames(stdf, juris.col, "juris")
# read in april 1 pop ests
pofm <- read.xlsx("ofm_april1_population_final.xlsx", sheet = "Population", start = 5, colNames = TRUE)
setDT(pofm)
pop.cols <- colnames(pofm)[str_which(colnames(pofm), "Population")]
pofm <- pofm[Filter != ".",][, lapply(.SD, as.numeric), .SDcols = pop.cols, by = .(Filter, County, Jurisdiction)]
podt <- pofm[stdf, on = c("County" = "county_name", "Jurisdiction" = "juris")]
setnames(podt, pop.cols, yrs.cols)
# aggregate parts
ptdt <- podt[Jurisdiction %like% "(part)"
][, lapply(.SD, sum), .SDcols = yrs.cols, by = .(Filter, Jurisdiction)
][, Jurisdiction := str_replace(Jurisdiction, "\\(part\\)", "(all)")]
odt <- rbindlist(list(podt, ptdt), use.names = T, fill = T)
# calculate
cols1 <- rep(max(yrs.cols), 2)
cols2 <- c(yrs.cols[length(yrs.cols)-1],min(yrs.cols))
delta.cols <- paste0("delta_", cols1, "-", cols2)
share.cols <- paste0("share_", delta.cols)
odt[, (delta.cols) := mapply(function(x, y) .SD[[x]]-.SD[[y]], cols1, cols2, SIMPLIFY = F)]
odt[, (share.cols) := mapply(function(x, y) .SD[[x]]/.SD[[y]], delta.cols, cols2, SIMPLIFY = F)]
dt <- odt[!(Jurisdiction %like% "Uninc") & !(Jurisdiction %like% "part")]
# top 10 lists
sort.cols <- c(delta.cols, share.cols)
sel.cols1 <- c(cols2[1], cols1[1])
sel.cols2 <- c(cols2[2], cols1[2])
all.sel.cols <- rep(list(sel.cols1, sel.cols2), 2)
calc.cols1 <- c(delta.cols[1], share.cols[1])
calc.cols2 <- c(delta.cols[2], share.cols[2])
all.calc.cols <- rep(list(calc.cols1, calc.cols2), 2)
dts <- NULL
for (i in 1:length(sort.cols)) {
t <- dt[order(-get(eval(sort.cols[i])))][1:10,]
tcols <- c("Jurisdiction", "County", all.sel.cols[[i]], all.calc.cols[[i]])
tt <- t[, ..tcols]
setnames(tt, "Jurisdiction", "Municipality")
dts[[i]] <- tt
}
write.xlsx(dts, "sound_transit_top10lists.xlsx")
|
9719b0947a841dbb30e87f23a2abaa4992cc8e36
|
919b62802e62e5647c898870e702f20f9a02271c
|
/plot2.R
|
86e412e906570571ee14d0b00ded664f0b31ce08
|
[] |
no_license
|
przemo/ExData_Plotting1
|
3e7074548bf5274cf2fd5d091752712edce82246
|
226a6a663e67c9e5fa422aa401d6d7c3f3109fed
|
refs/heads/master
| 2021-01-17T09:08:53.233999
| 2014-11-09T21:29:38
| 2014-11-09T21:29:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 215
|
r
|
plot2.R
|
## data frame is in the object 'dt'
png(filename = "plot2.png", width = 480, height = 480)
plot( dt$DateTime, dt$Global_active_power, main="", ylab="Global Active Power (kilowatts)", xlab="", type="l")
dev.off()
|
768d129d7b2df8c694e28a78d1bbd7f0810c3c70
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rcosmo/examples/plot.variogram.Rd.R
|
1ee4a106cdd8b202451102acb2bd9edd751f6195
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 396
|
r
|
plot.variogram.Rd.R
|
library(rcosmo)
### Name: plot.variogram
### Title: Plot sample variogram
### Aliases: plot.variogram
### ** Examples
## Download the map first
# downloadCMBMap(foreground = "smica", nside = 1024)
# df <- CMBDataFrame("CMB_map_smica1024.fits")
# cmbdf <- sampleCMB(df, sample.size = 100000)
# varcmb <- variogramCMB(cmbdf, max.dist = 0.1, num.bins = 30, sample.size=1000)
# plot(varcmb)
|
157cc7735bfccce65969c41019e79d1117a5f3fa
|
6e417befc45a906d5d918ff1bd59a6de25a406fb
|
/2017数据科学训练营作业第三题陈轶伦数理统计.R
|
f512f3a093a372511dec90f06a64bc7983116206
|
[] |
no_license
|
Magellen/movie-analyse
|
51346953b784f8ae6bec54cdc276dbbc26428a26
|
16a2747831e86d58ad5975e16c2912475dd97366
|
refs/heads/master
| 2021-01-01T20:02:54.948449
| 2017-07-29T18:18:27
| 2017-07-29T18:18:27
| 98,751,129
| 0
| 1
| null | null | null | null |
GB18030
|
R
| false
| false
| 4,304
|
r
|
2017数据科学训练营作业第三题陈轶伦数理统计.R
|
rm(list=ls())
setwd("D:/大四下/夏令营/作业题目/三.电影评论分析")
library(tidyverse)
library(stringr)
library(ggplot2)
library(jiebaR)
library(text2vec)
library(glmnet)
data<-read.csv("douban.csv")
data[,2]<-data[,2]%>%as.character()
for (i in 1:length(data[,2]))
{
data[i,2]<-str_sub(data[i,2],2,5)
}
data[,2]%>%is.na()%>%sum
which(is.na(data[,2]))
data1<-data[c("上映年份","评分","评价人数")]
qplot(data1[,1],data1[,2])
qplot(data1[,1],data1[,3])
hero<-data%>%select(主演)%>%as.data.frame()
hero<-apply(hero,1,as.character)
herodist<-list[""]
herodist<-strsplit(hero,"/")
for (i in 1:1810)
{
for(j in 1:length(herodist[[i]]))
{
herodist[[i]][j]<-gsub(" ","",herodist[[i]][j])
}
}
mtype<-data%>%select(类型)%>%as.data.frame()
mtype<-apply(mtype,1,as.character)
mtypedist<-list[""]
mtypedist<-strsplit(mtype,"\\n")
mtypedist%>%unlist%>%unique()%>%length()
mtypem<-data.frame()
typename<-mtypedist%>%unlist%>%unique()
for(i in 1:1810)
{
ifelse(sum(mtypedist[[i]] == typename),mtypem<-rbind(mtypem,(mtypedist[[i]] == typename)%>%as.numeric()),
mtypem<-rbind(mtypem,rep(0,32)))
}
colnames(mtypem)<-c(typename)
typesummary<-mtypedist%>%summary
typesummary<-typesummary[,1]%>%as.numeric()
which(typesummary==0)
mplot<-data%>%select(剧情简介)%>%as.data.frame()
mplot<-apply(mplot,1,as.character)
for(j in 1:length(mplot))
{
mplot[j]<-str_sub(mplot[j],3,nchar(mplot[j]))
}
length(mplot)
fenci<-worker(bylines = TRUE,type = "mix",stop_word = "chinese_stopword.txt")
fencidist<-list[""]
fencidist<-segment(mplot,fenci)
lapply(fencidist, write.table, "test2.txt", append=TRUE)
#时间因素分析
dim(data1)# 1810部电影
qplot(data1[,1],data1[,2])#评分随时间
qplot(data1[,1],data1[,3])#评论数随时间
qplot(data1[,1],log(data1[,3]))
qplot(data1[,1],log(data1[,3]),colour=data1[,2])#评论数随时间 颜色为评分
data1%>%group_by(上映年份)%>%count()%>%plot()#国产电影数量指数增长
quantile<-data1%>%group_by(上映年份)%>%count()
data%>%filter(上映年份>=2017)%>%select(片名)#提前透露
qplot(data1[,2],log(data1[,3]))#评论数和评分无关
data2000<-data%>%filter(上映年份>=2000&上映年份<2017)
View(data2000)
sub2000<-data2000%>%select(上映年份,评分,评价人数)
qplot(sub2000[,1],sub2000[,2])
p<-ggplot(sub2000,aes(上映年份,log(评价人数)))
p+geom_jitter()
qplot(sub2000[,2],log(sub2000[,3]))
summary(sub2000)
sub2000[,1]<-sub2000[,1]%>%as.integer()
sub2000_1<-data2000%>%group_by(上映年份)%>%select(上映年份,评价人数)
ggplot(sub2000_1,aes(x=log(评价人数)))+geom_histogram()+facet_grid(~上映年份)
data2000$导演%>%length()
data2000$导演%>%unique()
#text to vector
it <- itoken(iterable = fencidist)
vocab <- create_vocabulary(it)
vectorizer <- vocab_vectorizer(vocab)
corpus <- create_corpus(it,vectorizer)
vocab$vocab
dtm <- corpus$get_dtm()
dim(dtm)
dtm[1:20,2500:2530]
dtm_tfidf <- TfIdf$new()
dtm_tfidf$fit(dtm)
dtm_tfidf$transform(dtm)
dtm_tfidfm<-dtm_tfidf$transform(dtm)
vocab = create_vocabulary(it, ngram = c(1L, 2L))
vocab = vocab %>% prune_vocabulary(term_count_min = 2)
bigram_vectorizer = vocab_vectorizer(vocab)
dtm_train = create_dtm(it, bigram_vectorizer)
vocab <- create_vocabulary(it)
vocab <- prune_vocabulary(vocab, term_count_min = 2)
vectorizer <- vocab_vectorizer(vocab,skip_grams_window = 2L)
tcm <- create_tcm(it, vectorizer)
dtm
glove = GlobalVectors$new(word_vectors_size = 20, vocabulary = vocab, x_max = 100)
glove$fit(tcm, n_iter = 200)
word_vectors <- glove$get_word_vectors()
dim(word_vectors)
vec<-list[]
for(i in 1:length(fencidist))
{
for(j in 1:length(fencidist[[i]]))
{
vec[[i]]<-cbind(vec[[i]],word_vectors[rownames(word_vectors)==fencidist[[i]][j],])
}
}
#TF-IDF
dtm_tfidfm%>%str()
tfidfm<-as.matrix(dtm_tfidfm)%>%as.data.frame()
dim(tfidfm)
write_csv(tfidfm,"tfidfm.csv")
write_csv(mtypem,"typem.csv")
#LDA
lda_model =
LDA$new(n_topics = 50, vocabulary = vocab,
doc_topic_prior = 0.1, topic_word_prior = 0.01)
doc_topic_distr =
lda_model$fit_transform(dtm, n_iter = 100, convergence_tol = 0.01,
check_convergence_every_n = 10)
|
91648dd32403f941a2e5e08a518c694ea1046177
|
5395cdc191ff5a30d1c59e68ca0f95a288892c8b
|
/R/Oyster_Volumes.R
|
243e9ca2a3d50e1f02c98248d44ea6a7eb0c2fcf
|
[] |
no_license
|
nielsjdewinter/ShellTrace
|
fe16bb69b8981211bd24ef120627fc38d283db66
|
34dd076d72bb0812f251c986b1aad04b6849261b
|
refs/heads/master
| 2021-07-23T13:37:20.750368
| 2017-11-02T08:57:26
| 2017-11-02T08:57:26
| 105,881,428
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,835
|
r
|
Oyster_Volumes.R
|
#' @export
Oyster_Volumes <-
function(subincr_matrix, Z_mat, IncG, Xstep=0.1){
IncGAnet<-data.frame(matrix(0,ncol=length(IncG[1,]),nrow=length(IncG[,1])))
colnames(IncGAnet)<-colnames(IncG)
rownames(IncGAnet)<-rownames(IncG)
VolC<-vector()
As2<-vector()
Y1s2<-vector()
Ytops2<-vector()
Y0s2<-vector()
rs2<-vector()
Ytopbs2<-vector()
Y0bs2<-vector()
rbs2<-vector()
for(t in 1:length(IncGAnet[1,])){
cat(paste("Calculating volume for subincrement: ",t),"\r")
As<-vector()
Y1s<-vector()
Ytops<-vector()
Y0s<-vector()
rs<-vector()
Ytopbs<-vector()
Y0bs<-vector()
rbs<-vector()
for (x in 1:(length(IncGAnet[,1]))){
# Universal parameters Z and Y1
# Of which Y1 is the height of the base oval that forms the bottom of the shell under the increment, and Z is the distance from the x-axis to the side of the ellips
Z<-Z_mat[x,t]
Y1<-subincr_matrix[t,6] + ((subincr_matrix[t,7]-subincr_matrix[t,6])/(subincr_matrix[t,3]-subincr_matrix[t,2])) * (as.numeric(rownames(IncGAnet)[x])-subincr_matrix[t,2])
if(is.na(Y1)){Y1<-0}
# Parameters increment
# Of which Ytop is the maximum height of the cross section of the shell under the increment, which is found for the X value by linear interpolation in the XY trace of the shell increment
# Ytop is the Y value calculated in the incremental growth matrix
# Y0 is the Y value of the center of the virtual circle that connects the top of the shell with the two points at the edge of the ellipse (Z1 and Z)
# Formula of circle x^2 + y^2 = r^2 with points [-Z,Y1], [0,Ytop], and [Z,Y1], and center of circle = [0, Y0] gives:
# (0-0)^2 + (Ytop-Y0)^2 = (-Z-0)^2 + (Y1 - Y0)^2 = (Z- 0)^2 + (Y1 - Y0)^2
# Y0 = (Ytop^2 - Y1^2 - Z^2) / (2*(Ytop - Y1))
# r is the radius of said circle
# r = (x^2 - y^2)^0.5 in point [Z, Y1] and center [0, Y0] gives:
# r = (Z^2 + (Y1 - Y0)^2)^0.5
Ytop<-IncG[x,t]
Y0<-(Ytop^2-Y1^2-Z^2)/(2*(Ytop-Y1))
r<-(Z^2+(Y1-Y0)^2)^0.5
# Parameters shell top (b)
# Ytopb and rb are calculated in the same way as Ytop and r above
Ytopb<-IncG[x,1]
Y0b<-(Ytopb^2-Y1^2-Z^2)/(2*(Ytopb-Y1))
rb<-(Z^2+(Y1-Y0b)^2)^0.5
# Prevent negative areas if increment go above the shell top
if(Ytopb<Ytop){Ytop<-Ytopb}
# Calculate area under shell increment
# The area of the circle segment that is under the growth increment is found by
# subtracting the area of the triangle formed by the center of the circle and points P2[-Z,Y1] and P3[Z,Y1] from that of the corresponding circle sector
# A_circle = A_sector - A_triangle
# = angle/(2*pi) * pi * r^2 - rsin(angle)*rcos(angle)
# = 0.5*angle*r^2 - 0.5*r^2*sin(angle)
# = 0.5*r^2*(angle - sin(angle))
# In which angle is the angle between the lines between the two points P2[-Z,Y1] and P3[Z,Y1] and the center of the circle (Pc)
# The total area under the shell increment is always equal to the area between the circle segment and the x-axis
# Area between the shell and the x-axis is not taken into account as the difference between top and shell increment is taken later on
if(is.na(r)|r==abs(Inf)){
angle<-0
r<-0
}
else angle<-asin(Z/r)*2
# If Y1 is higher than the top of the shell, the circle flips and has to be subtracted from the rectangle between Y0 and the x-axis
if(Y1>Ytop){
A<-(2*Z*Y1)-0.5*r^2*(angle - sin(angle))
}
else{
# If Y0 is in between Y1 and Ytop, then the area is calculated by the full circle formed by Y0 (center) and Ytop minus the circlesection under Y1
if(Y0>Y1 & !is.na(Y1) & !is.na(Y0)){
A<-r^2*pi-(0.5*r^2*(angle - sin(angle)))+(2*Z*Y1)
}
else A<-0.5*r^2*(angle - sin(angle))+(2*Z*Y1)
}
# Calculate area under shell top in the same way as the area under the increment was calculated
if(is.na(rb)|rb==abs(Inf)){
angleb<-0
rb<-0
}
else angleb<-asin(Z/rb)*2
if(Y1>Ytopb){
Ab<-(2*Z*Y1)-0.5*rb^2*(angleb - sin(angleb))
}
else{
# If Y0b is in between Y1 and Ytopb, then the area is calculated by the full circle between Y0b (center) and Ytopb minus the circlesection under Y1
if(Y0b>Y1 & !is.na(Y1) & !is.na(Y0b)){
Ab<-rb^2*pi-(0.5*rb^2*(angleb - sin(angleb)))+(2*Z*Y1)
}
else Ab<-0.5*rb^2*(angleb - sin(angleb))+(2*Z*Y1)
}
# Prevent cases where increment overlaps with top of shell
if(Ytop==Ytopb){
Ab<-A
}
# Prevent cases of negative area
if(Ab<A){
Ab<-A
}
# Calculate area relative to shell top by subtracting area under top of shell from that under the shell (sub)increment
IncGAnet[x,t]<-Ab-A
As<-append(As,Ab-A)
Y1s<-append(Y1s,Y1)
Ytops<-append(Ytops,Ytop)
Y0s<-append(Y0s,Y0)
rs<-append(rs,r)
Ytopbs<-append(Ytopbs,Ytopb)
Y0bs<-append(Y0bs,Y0b)
rbs<-append(rbs,rb)
}
# Calculate total volume relative to zero line per time increment by adding up the areas of all perpendicular (YZ)cross sections under the same increment and multiplying by the increment width
VolC<-append(VolC,sum(As)*Xstep)
As2<-cbind(As2,As)
Y1s2<-cbind(Y1s2,Y1s)
Ytops2<-cbind(Ytops2,Ytops)
Y0s2<-cbind(Y0s2,Y0s)
rs2<-cbind(rs2,rs)
Ytopbs2<-cbind(Ytopbs2,Ytopbs)
Y0bs2<-cbind(Y0bs2,Y0bs)
rbs2<-cbind(rbs2,rbs)
}
# Calculate volume gain per time increment by subtracting volume value for each increment by its predecessor.
# WARNING: changes in the slope of Y1 will be amplified into large oscillations of VolI. It is advised to use a smoothing of VolI for further analysis
VolI<-c(VolC,0)-c(0,VolC)
VolI<-VolI[-length(VolI)]
# Prevent negative volume increments
VolI[VolI<0]<-0
VolC<-cumsum(VolI)
subincr_matrix<-cbind(subincr_matrix,VolI,VolC)
dev.new(); plot(subincr_matrix[,c(1,17)], type = "l")
diagL<-list(subincr_matrix,IncGAnet)
return(diagL)
}
|
78dbdeee19dcbda38bbb6e9d895e2dc162a0d2cf
|
22bf6ffb3cf1f5ab81dab271b686a1cdf9c3c996
|
/R/asymptoticComplexityClass.R
|
7f8601c81d0d73c7445cbadebff4b19971fbd5e6
|
[
"MIT"
] |
permissive
|
Anirban166/testComplexity
|
0eea40ffd81760b7d645090e403dc6c2992ea268
|
97fc7ea93e7a14eb047b4cd6c9f7c7858f90b511
|
refs/heads/master
| 2023-04-28T19:31:15.124945
| 2022-05-07T08:40:27
| 2023-04-18T08:40:27
| 228,646,265
| 44
| 1
|
NOASSERTION
| 2020-08-23T17:20:59
| 2019-12-17T15:30:22
|
R
|
UTF-8
|
R
| false
| false
| 1,901
|
r
|
asymptoticComplexityClass.R
|
#' Function to classify the complexity trend between two selected parameters from the data frame provided as input here
#'
#' @title Asymptotic Complexity Classification function
#'
#' @param df A data frame composing for two columns at the least, where one should be the contain the output-parameter sizes and one should contain the data sizes.
#'
#' @param output.size A string specifying the column name in the passed data frame to be used as the output size.
#'
#' @param data.size A string specifying the column name in the passed data frame to be used as the data size.
#'
#' @return A string specifying the resultant complexity class. (Eg: 'Linear', 'Log-linear', 'Quadratic')
#'
#' @details For more information regarding its implementation or functionality/usage, please check https://anirban166.github.io//Generalized-complexity/
#'
#' @export
#'
#' @examples
#' # Avoiding for CRAN since computation time might exceed 5 seconds sometimes:
#' \dontrun{
#' # Running the quick sort algorithm with sampling against a set of increasing input data sizes:
#' sizes = 10^seq(1, 3, by = 0.5)
#' df <- asymptoticTimings(sort(sample(1:100, data.sizes, replace = TRUE), method = "quick"), sizes)
#' # Classifying the complexity trend between the data contained in the columns
#' # 'Timings' and 'Data sizes' from the data frame obtained above:
#' asymptoticComplexityClass(df, output.size = "Timings", data.size = "Data sizes")
#' # For quick sort, the log-linear time complexity class is expected.
#' }
asymptoticComplexityClass = function(df, output.size, data.size)
{
if(class(df) == "data.frame" & output.size %in% colnames(df) & data.size %in% colnames(df))
{
d <- data.frame('output' = df[[output.size]], 'size' = df[[data.size]])
asymptoticComplexityClassifier(d)
}
else stop("Input parameter must be a data frame containing the two specified columns passed as parameters")
}
|
9aed5e45fd7c812106234a2dcb7b3262f949de57
|
ebee9629abd81143610a6352288ceb2296d111ac
|
/tests/53-nn-density.R
|
03137efef81872e37b46220027c81fa67bdb2b8e
|
[] |
no_license
|
antiphon/Kdirectional
|
76de70805b4537a5aff0636486eb387cb64069b0
|
98ab63c3491f1497d6fae8b7b096ddd58afc4b29
|
refs/heads/master
| 2023-02-26T02:19:41.235132
| 2023-02-12T13:07:11
| 2023-02-12T13:07:11
| 37,183,574
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 619
|
r
|
53-nn-density.R
|
# test nn angle density, IPSS p253
library(devtools)
load_all(".")
if(!exists("x")) x <- matrix(runif(200), nc=2)
par(mfrow=c(1,3))
# 0-2pi
f <- nnangle.density(x, antipodal = F)
plot(f, type ="l", ylim=yl<-c(0,.6))
abline(h=1/(2*pi))
# wrapping working?
plot(f, type ="l", xlim=c(-pi,3*pi), ylim=yl)
lines(f$angle+pi*2, f$density, col=2)
lines(f$angle-pi*2, f$density, col=2)
# 0-pi
fa <- nnangle.density(x, antipodal = T)
plot(fa, type ="l", ylim=yl)
abline(h=1/(pi))
# data input:
D <- nnangle.density(x, justData = T , antipodal=T)
fb <- nnangle.density(data = D, antipodal = T)
lines(fb, col=2, lty=3)
|
3f13cb5c8cb232f5050a4d219de0a7aa5e3b2f3e
|
3eb3c7f82edbe3e5dc027443113f93d47fabd3d1
|
/man/make_table.Rd
|
5afcb83af4cf914325a42cf0d830746cbf865720
|
[
"MIT"
] |
permissive
|
dpc-nickwilliams/djprlabourdash
|
3a6a95a2a3307400168a3a394238aa36d81b7c1b
|
8be8cba6a550b5fbba469542d194aea5bad87b2d
|
refs/heads/main
| 2023-07-16T02:13:43.236199
| 2021-08-31T05:49:10
| 2021-08-31T05:49:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,765
|
rd
|
make_table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_table.R
\name{make_table}
\alias{make_table}
\title{Create a table for the dashboard or a briefing document}
\usage{
make_table(
data,
destination = Sys.getenv("R_DJPRLABOURDASH_TABLEDEST", unset = "dashboard"),
years_in_sparklines = 3,
row_order = NULL,
highlight_rows = NULL,
notes = NULL,
title = "",
rename_indicators = TRUE
)
}
\arguments{
\item{data}{A data frame containing data to summarise}
\item{destination}{"dashboard" or "briefing"}
\item{years_in_sparklines}{Period of time to include in the sparkline line
charts.}
\item{row_order}{Vector of series IDs, in the order in which you wish the
corresponding rows to be included in the output table}
\item{highlight_rows}{Vector of series IDs, corresponding to rows
in the table to highlight.
Highlighted rows are bolded and have a top border; non-highlighted rows
are indented. If \code{NULL} then all rows are non-bold, non-indented.}
\item{notes}{Optional notes to add to caption. Source will be inferred
automatically based on the data using \code{caption_auto()}.}
\item{title}{Character vector to use as the table title. Will only be used
when \code{destination} is "briefing".}
\item{rename_indicators}{logical; default is \code{TRUE}. If \code{TRUE}, the
\code{rename_indicators()} function will be used to rename certain indicators.}
}
\description{
Create a table for the dashboard or a briefing document
}
\examples{
# dash_data <- load_dash_data()
\dontrun{
make_table(
data = filter_dash_data(series_ids = c(
"A84423354L",
"A84423242V",
"A84423466F",
)),
row_order = c(
"A84423354L",
"A84423242V",
"A84423466F"
),
highlight_rows = c("A84426256L")
)
}
}
|
459a156e38030e2e768fcfe42f4043c3133608c4
|
af4645ff58fe1a3d4505ba26a999eb33910595b3
|
/R/apiUtils.r
|
7f97f298df6f26abde399617c51a25499bc1e8fb
|
[
"MIT"
] |
permissive
|
rwalrond/razdatalake
|
0d373200eb5b2e471cdd07189746f4d14acc1aa8
|
e6e4933773c7d4be70fd8e8856bcae80f9384f3b
|
refs/heads/master
| 2020-03-13T00:12:12.731779
| 2018-04-30T04:21:53
| 2018-04-30T04:21:53
| 130,882,128
| 0
| 0
| null | 2018-04-24T16:16:52
| 2018-04-24T16:16:51
| null |
UTF-8
|
R
| false
| false
| 3,988
|
r
|
apiUtils.r
|
#' getToken
#'
#' @export
#'
#' @param tenantID String the tenant Id.
#' @param clientID String the client Id.
#' @param apiKet String the API secret.
#'
#' @return String the access token
getToken <- function(tenantID, clientID, apiKey) {
body <- list(
grant_type="client_credentials",
resource="https://management.core.windows.net/",
client_id=clientID,
client_secret=apiKey
)
url <- paste0("https://login.microsoftonline.com/", tenantID, "/oauth2/token")
res <- httr::POST(url, body = body)
if (res$status_code == 200) {
js <- httr::content(res, as = "parsed")
js$access_token
}
else {
print(res)
stop("HTTP Error")
}
}
#' listFolder
#'
#' @export
#'
#' @param datalake String the data lake name.
#' @param folder String the subfolder name.
#' @param token String the Bearer token.
#'
#' @return List complete contents of a folder.
listFolder <- function(datalake, folder, token) {
url <- paste0("https://", datalake, ".azuredatalakestore.net/webhdfs/v1/", folder, "?op=LISTSTATUS")
res <- httr::GET(url, httr::add_headers(Authorization = paste0("Bearer ", token)))
if (res$status_code == 200) {
js <- httr::content(res, as = "parsed")
js$FileStatuses[[1]]
}
else {
print(res)
stop("HTTP Error")
}
}
#' listFiles
#'
#' @export
#'
#' @param datalake String the data lake name.
#' @param folder String the subfolder name.
#' @param token String the Bearer token.
#'
#' @return List pathSuffix for each file in the folder.
listFiles <- function(datalake, folder, token) {
folders <- listFolder(datalake, folder, token)
vapply(folders, function(x) { x$pathSuffix }, FUN.VALUE = character(1))
}
#' getFile
#'
#' @export
#'
#' @param datalake String the data lake name.
#' @param folder String the subfolder name.
#' @param filename String the file name.
#' @param token String the Bearer token.
#'
#' @return String text contents of a file.
getFile <- function(datalake, folder, filename, token) {
url <- paste0("https://", datalake, ".azuredatalakestore.net/webhdfs/v1/", folder, "/", filename, "?op=OPEN&read=true")
res <- httr::GET(url, httr::add_headers(Authorization = paste0("Bearer ", token)))
if (res$status_code == 200) {
js <- httr::content(res, as = "text")
js
}
else {
print(res)
stop("HTTP Error")
}
}
#' putFile
#'
#' @description NOTE: File must be stored on disk in the current directory.
#'
#' @export
#'
#' @param datalake String the data lake name.
#' @param folder String the subfolder name.
#' @param filename String the file name.
#' @param contents string contents to save
#' @param token String the Bearer token.
#'
#' @return List the `httr` response.
putFile <- function(datalake, folder, filename, token, contents = NULL) {
url <- paste0("https://", datalake, ".azuredatalakestore.net/webhdfs/v1/", folder, "/", filename, "?op=CREATE&write=true&overwrite=true")
res <- httr::PUT(
url,
body = ifelse(is.null(contents), httr::upload_file(filename), contents),
config = httr::add_headers(
Authorization = paste0("Bearer ", token),
`Transfer-Encoding` = "chunked"
),
httr::content_type_json()
)
if (res$status_code == 201) {
res
}
else {
print(res)
stop("HTTP Error")
}
}
#' downloadFiles
#'
#' @description Downloads all files in a datalake folder. Assumes files are ndjson format.
#'
#' @export
#'
#' @param datalake String the data lake name.
#' @param folder String the subfolder name.
#' @param token String the Bearer token.
#' @param fileList object a filtered list to download.
#'
#' @return data.table
downloadFiles <- function(datalake, folder, token, fileList = NULL) {
if (is.null(fileList)) {
fileList <- listFiles(datalake, folder, token)
}
xs <- lapply(fileList, function(filename){
jsonlite::stream_in(textConnection(getFile(datalake, folder, filename, token)))
})
##### this line crashes for me, I prefer the format as is
##data.table::rbindlist(xs)
}
|
33c43a0f31a5374267254766dbfc95934b15c3b0
|
36df8d41ca3fd4e6b6d3fbf2e07049dd7781dea8
|
/mutationProfiles.R
|
a9c873c45325c5d7f9d2fbf1bc48a044acbcf871
|
[] |
no_license
|
nriddiford/mutationProfiles
|
58ec365582bb7a1e1c16f0b2abc6891f2627de2d
|
4222426e0de2a18df86745ffdb9ad79d47fca53a
|
refs/heads/master
| 2021-11-07T21:06:49.786958
| 2021-11-04T10:49:40
| 2021-11-04T10:49:40
| 98,303,005
| 4
| 3
| null | 2019-10-09T10:12:29
| 2017-07-25T12:20:07
|
R
|
UTF-8
|
R
| false
| false
| 75,406
|
r
|
mutationProfiles.R
|
list.of.packages <- c('ggplot2', 'dplyr', 'plyr', 'RColorBrewer',
'BSgenome.Dmelanogaster.UCSC.dm6', 'deconstructSigs',
'reshape', 'data.table', 'ggpubr', 'plotly', 'grid', 'VennDiagram')
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)){
cat('Installing missing packages...\n')
install.packages(new.packages)
}
cat('Silently loading packages...')
suppressMessages(library(ggplot2))
suppressMessages(library(dplyr))
suppressMessages(library(plyr))
suppressMessages(library(RColorBrewer))
suppressMessages(library(BSgenome.Dmelanogaster.UCSC.dm6))
suppressMessages(library(deconstructSigs))
suppressMessages(library(reshape))
suppressMessages(library(data.table))
suppressMessages(library(ggpubr))
suppressMessages(library(plotly))
suppressMessages(library(grid))
suppressMessages(library(VennDiagram))
set.seed(42)
#' getData
#'
#' Function to clean cnv files
#' @param infile File to process [Required]
#' @keywords get
#' @import dplyr
#' @export
#' @return Dataframe
getData <- function(infile = "data/annotated_snvs.txt", expression_data='data/isc_genes_rnaSeq.csv'){
snv_data<-read.delim(infile, header = T)
colnames(snv_data)=c("sample", "chrom", "pos", "ref", "alt", "tri", "trans", "decomposed_tri", "grouped_trans", "a_freq", "caller", "variant_type", "status", "snpEff_anno", "feature", "gene", "id")
# Read in tissue specific expression data
seq_data<-read.csv(header = F, expression_data)
colnames(seq_data)<-c('id', 'fpkm')
snv_data <- join(snv_data,seq_data,"id", type = 'left')
snv_data$fpkm <- ifelse(is.na(snv_data$fpkm), 0, round(as.numeric(snv_data$fpkm), 1))
# Order by FPKM
snv_data<- dplyr::arrange(snv_data, desc(fpkm))
# Find vars called by both Mu and Var
# Must also filter one of these calls out...
snv_data$dups<-duplicated(snv_data[,1:3])
snv_data<-mutate(snv_data, caller = ifelse(dups == "TRUE", 'varscan2_mutect2' , as.character(caller)))
##############
## Filters ###
##############
# Filter for calls made by both V and M
# snv_data<-filter(snv_data, caller == 'mutect2' | caller == 'varscan2_mutect2')
# Filter for old/new data
# cat("Filtering for old/new data\n")
# snv_data <- filter(snv_data, !grepl("^A|H", sample))
# Filter for genes expressed in RNA-Seq data
# cat("Filtering out non-expressed genes\n")
# snv_data<-filter(snv_data, !is.na(fpkm) & fpkm > 0.1)
# Filter for genes NOT expressed in RNA-Seq data
# cat("Filtering out expressed genes\n")
# snv_data<-filter(snv_data, fpkm == 0)
# Filter on allele freq
# cat("Filtering on allele frequency\n")
#snv_data<-filter(snv_data, is.na(a_freq))
# snv_data<-filter(snv_data, a_freq >= 0.20)
# Filter out samples
# snv_data<-filter(snv_data, sample != "A373R1" & sample != "A373R7" & sample != "A512R17" )
# snv_data <- filter(snv_data, !sample %in% c("A373R1", "A373R7", "A512R17", "A373R11", "A785-A788R1", "A785-A788R11", "A785-A788R3", "A785-A788R5", "A785-A788R7", "A785-A788R9"))
# snv_data<-filter(snv_data, sample != "A373R11" & sample != 'A373R13')
# snv_data <- snv_data %>%
# filter(!sample %in% c("A373R1", "A373R7", "A512R17", "A373R11", "D050R07-2")) %>%
# droplevels()
# snv_data <- snv_data %>%
# filter(sample %in% c("D050R01", "D050R03", "D050R05", "D050R07-1")) %>%
# droplevels()
dir.create(file.path("plots"), showWarnings = FALSE)
return(snv_data)
}
#' cleanTheme
#'
#' Clean theme for plotting
#' @param base_size Base font size [Default 12]
#' @import ggplot2
#' @keywords theme
#' @export
cleanTheme <- function(base_size = 12){
theme(
plot.title = element_text(hjust = 0.5, size = 20),
panel.background = element_blank(),
plot.background = element_rect(fill = "transparent",colour = NA),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
axis.text = element_text(size=12),
axis.title = element_text(size=30)
)
}
#' genTris
#'
#' This function returns all possible trinucleotide combinations
#' @keywords trinucleotides
#' @export
#' @return Character string containing all 96 trinucleotides
#' genTris()
genTris <- function(){
all.tri = c()
for(i in c("A", "C", "G", "T")){
for(j in c("C", "T")){
for(k in c("A", "C", "G", "T")){
if(j != k){
for(l in c("A", "C", "G", "T")){
tmp = paste(i, "[", j, ">", k, "]", l, sep = "")
all.tri = c(all.tri, tmp)
}
}
}
}
}
all.tri <- all.tri[order(substr(all.tri, 3, 5))]
return(all.tri)
}
#' setCols
#'
#' Get colours for n levels
#' @import RColorBrewer
#' @param df Dataframe [Required]
#' @param col Column of snv_dataframe. Colours will be set to levels(df$cols) [Required]
#' @keywords cols
#' @export
setCols <- function(df, col){
names<-levels(df[[col]])
cat("Setting colour levles:", names, "\n")
level_number<-length(names)
mycols<-brewer.pal(level_number, "Set2")
names(mycols) <- names
colScale <- scale_fill_manual(name = col,values = mycols)
return(colScale)
}
#' snvStats
#'
#' Calculate some basic stats for snv snv_data
#' @import dplyr
#' @keywords stats
#' @export
snvStats <- function(){
snv_data<-getData()
cat("sample", "snvs", sep='\t', "\n")
rank<-sort(table(snv_data$sample), decreasing = TRUE)
rank<-as.array(rank)
total=0
scores=list()
for (i in 1:nrow(rank)){
cat(names(rank[i]), rank[i], sep='\t', "\n")
total<-total + rank[i]
scores[i]<-rank[i]
}
cat('--------------', '\n')
scores<-unlist(scores)
mean<-as.integer(mean(scores))
med<-as.integer(median(scores))
cat('total', total, sep='\t', '\n')
cat('samples', nrow(rank), sep='\t', '\n')
cat('--------------', '\n')
cat('mean', mean, sep='\t', '\n')
cat('median', med, sep='\t', '\n')
cat('\n')
all_ts<-nrow(filter(snv_data, trans == "A>G" | trans == "C>T" | trans == "G>A" | trans == "T>C"))
all_tv<-nrow(filter(snv_data, trans != "A>G" & trans != "C>T" & trans != "G>A" & trans != "T>C"))
ts_tv<-round((all_ts/all_tv), digits=2)
cat("ts/tv = ", ts_tv, sep='', '\n')
}
#' rainfall
#'
#' Plot log10 distances between snvs as rainfall plot
#' @import ggplot2
#' @keywords rainfall
#' @export
rainfall <- function(){
snv_data <- getData()
distances <- do.call(rbind, lapply(split(snv_data[order(snv_data$chrom, snv_data$pos),], snv_data$chrom[order(snv_data$chrom, snv_data$pos)]),
function(a)
data.frame(a,
dist=c(diff(a$pos), NA),
logdist = c(log10(diff(a$pos)), NA))
)
)
distances$logdist[is.infinite(distances$logdist)] <- 0
distances<-filter(distances, chrom != 4)
p<-ggplot(distances)
p<-p + geom_point(aes(pos/1000000, logdist, colour = grouped_trans))
p <- p + cleanTheme() +
theme(axis.text.x = element_text(angle=45, hjust = 1),
panel.grid.major.y = element_line(color="grey80", size = 0.5, linetype = "dotted"),
strip.text = element_text(size=20)
)
p<-p + facet_wrap(~chrom, scale = "free_x", ncol = 6)
#p<-p + scale_x_continuous("Mbs", breaks = seq(0,33,by=1), limits = c(0, 33), expand = c(0.01, 0.01))
p<-p + scale_x_continuous("Mbs", breaks = seq(0,max(distances$pos),by=10))
rainfall_out<-paste("rainfall.pdf")
cat("Writing file", rainfall_out, "\n")
ggsave(paste("plots/", rainfall_out, sep=""), width = 20, height = 5)
p
}
#' samplesPlot
#'
#' Plot the snv distribution for each sample
#' @import ggplot2
#' @param count Output total counts instead of frequency if set [Default no]
#' @keywords spectrum
#' @export
samplesPlot <- function(count=NA){
snv_data<-getData()
mut_class<-c("C>A", "C>G", "C>T", "T>A", "T>C", "T>G")
p<-ggplot(snv_data)
if(is.na(count)){
p<-p + geom_bar(aes(x = grouped_trans, y = (..count..)/sum(..count..), group = sample, fill = sample), position="dodge",stat="count")
p<-p + scale_y_continuous("Relative contribution to total mutation load", expand = c(0.0, .001))
tag='_freq'
}
else{
p<-p + geom_bar(aes(x = grouped_trans, y = ..count.., group = sample, fill = sample), position="dodge",stat="count")
p<-p + scale_y_continuous("Count", expand = c(0.0, .001))
tag='_count'
}
p<-p + scale_x_discrete("Mutation class", limits=mut_class)
p<-p + cleanTheme() +
theme(panel.grid.major.y = element_line(color="grey80", size = 0.5, linetype = "dotted"),
axis.title = element_text(size=20),
strip.text.x = element_text(size = 10)
)
p<-p + facet_wrap(~sample, ncol = 4, scale = "free_x" )
samples_mut_spect<-paste("mutation_spectrum_samples", tag, ".pdf", sep = '')
cat("Writing file", samples_mut_spect, "\n")
ggsave(paste("plots/", samples_mut_spect, sep=""), width = 20, height = 10)
p
}
#calledSnvs
calledSnvs <- function(){
snv_data<-getData()
calls<-table(snv_data$caller)
calls<-as.data.frame(unlist(calls))
calls$Var1 <- as.factor(calls$Var1)
grid.newpage()
draw.pairwise.venn(area1 = calls$Freq[calls$Var1 == 'mutect2'],
area2 = calls$Freq[calls$Var1 == 'varscan2'],
cross.area = calls$Freq[calls$Var1 == 'varscan2_mutect2'],
category = c("Mutect2","Varscan2"),
#lty = rep('blank', 2),
lwd = rep(0.3, 2),
cex = rep(2, 3),
cat.cex = rep(2, 2),
fill = c("#E7B800", "#00AFBB"),
alpha = rep(0.4, 2),
cat.pos = c(0, 0),
#cat.dist = rep(0.025, 2)
ext.text = 'FALSE'
)
}
#' mutSigs
#'
#' Calculate and plot the mutational signatures accross samples using the package `deconstructSigs`
#' @param samples Calculates and plots mutational signatures on a per-sample basis [Default no]
#' @param pie Plot a pie chart shwoing contribution of each signature to overall profile [Default no]
#' @import deconstructSigs
#' @import BSgenome.Dmelanogaster.UCSC.dm6
#' @keywords signatures
#' @export
mutSigs <- function(samples=NULL, pie=FALSE, write=FALSE){
if(!exists('scaling_factor')){
cat("calculating trinucleotide frequencies in genome\n")
scaling_factor <-triFreq()
}
snv_data<-getData()
genome <- BSgenome.Dmelanogaster.UCSC.dm6
if(missing(samples)){
cat("Plotting for all samples\n")
snv_data$tissue = 'All'
sigs.input <- mut.to.sigs.input(mut.ref = snv_data, sample.id = "tissue", chr = "chrom", pos = "pos", alt = "alt", ref = "ref", bsg = genome)
sig_plot<-whichSignatures(tumor.ref = sigs.input, signatures.ref = signatures.cosmic, sample.id = 'All',
contexts.needed = TRUE,
tri.counts.method = scaling_factor
)
if(write){
cat("Writing to file 'plots/all_signatures.pdf'\n")
pdf('plots/all_signatures.pdf', width = 20, height = 10)
plotSignatures(sig_plot)
dev.off()
}
plotSignatures(sig_plot)
if(pie){
makePie(sig_plot)
}
}
else{
sigs.input <- mut.to.sigs.input(mut.ref = snv_data, sample.id = "sample", chr = "chrom", pos = "pos", alt = "alt", ref = "ref", bsg = genome)
cat("sample", "snv_count", sep="\t", "\n")
for(s in levels(snv_data$sample)) {
snv_count<-nrow(filter(snv_data, sample == s))
if(snv_count > 50){
cat(s, snv_count, sep="\t", "\n")
sig_plot<-whichSignatures(tumor.ref = sigs.input, signatures.ref = signatures.cosmic, sample.id = s,
contexts.needed = TRUE,
tri.counts.method = scaling_factor)
if(write){
outfile<-(paste('plots/', s, '_signatures.pdf', sep = ''))
cat("Writing to file", outfile, "\n")
pdf(outfile, width = 20, height = 10)
plotSignatures(sig_plot)
dev.off()
}
plotSignatures(sig_plot)
if(pie){
makePie(sig_plot)
}
}
}
}
}
#' sigTypes
#'
#' Calculate and plot the mutational signatures accross samples using the package `deconstructSigs`
#' @param samples Calculates and plots mutational signatures on a per-sample basis [Default no]
#' @param pie Plot a pie chart shwoing contribution of each signature to overall profile [Default no]
#' @import deconstructSigs
#' @import data.table
#' @import reshape
#' @import forcats
#' @import BSgenome.Dmelanogaster.UCSC.dm6
#' @keywords signatures
#' @export
sigTypes <- function(write=FALSE){
suppressMessages(require(BSgenome.Dmelanogaster.UCSC.dm6))
suppressMessages(require(deconstructSigs))
if(!exists('scaling_factor')){
cat("Calculating trinucleotide frequencies in genome\n")
scaling_factor <-triFreq()
}
snv_data<-getData()
genome <- BSgenome.Dmelanogaster.UCSC.dm6
sigs.input <- mut.to.sigs.input(mut.ref = snv_data, sample.id = "sample", chr = "chrom", pos = "pos", alt = "alt", ref = "ref", bsg = genome)
l = list()
for(s in levels(snv_data$sample)) {
snv_count<-nrow(filter(snv_data, sample == s))
if(snv_count > 50){
sig_plot<-whichSignatures(tumor.ref = sigs.input, signatures.ref = signatures.cosmic, sample.id = s,
contexts.needed = TRUE,
tri.counts.method = scaling_factor)
l[[s]] <- sig_plot
}
}
mutSigs<-do.call(rbind, l)
mutSigs<-as.data.frame(mutSigs)
mutWeights<-mutSigs$weights
mutData<-melt(rbindlist(mutWeights, idcol = 'sample'),
id = 'sample', variable.name = 'signature', value.name = 'score')
mutData <- mutData %>%
filter(score > 0.1) %>%
group_by(sample) %>%
mutate(total = sum(score))
p <- ggplot(mutData)
p <- p + geom_bar(aes(fct_reorder(sample, -total), score, fill=signature),colour="black", stat = "identity")
p <- p + scale_x_discrete("Sample")
p <- p + scale_y_continuous("Signature contribution", expand = c(0.01, 0.01), breaks=seq(0, 1, by=0.1))
p <- p + cleanTheme() +
theme(axis.text.x = element_text(angle = 45, hjust=1),
axis.text = element_text(size=30)
)
if(write){
sigTypes<-paste("sigTypes.pdf")
cat("Writing file", sigTypes, "\n")
ggsave(paste("plots/", sigTypes, sep=""), width = 20, height = 10)
}
p
}
####
# sigTypesPie
####
sigPie <- function() {
df <- data.frame(
group = c("Sig3", "Sig5", "Sig8", "Unknown"),
value = c(21, 14, 25, 40),
cols = c('#DB8E00', '#64B200', '#00BD5C', '#00BADE'))
all <- data.frame(
group = c("Sig3", "Sig8", "Sig9", "Sig21", "Sig25", "Unknown"),
value = c(29, 17, 10, 7, 7, 30),
cols = c('#E68613', '#0CB702', '#00BE67', '#ED68ED', '#FF61CC', 'grey'))
bp <- ggplot(all, aes(x="", y=value, fill = cols)) +
geom_bar(width = 1, stat = "identity", colour = "white") +
scale_fill_manual(values = levels(all$cols), labels = levels(all$group))
pie <- bp + coord_polar("y", start=0)
pie + cleanTheme() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
}
#' mutSpectrum
#'
#' Plots the mutations spectrum for all samples combined
#' @import ggplot2
#' @keywords spectrum
#' @export
mutSpectrum <- function(write=FALSE, max_y=25){
snv_data<-getData()
cat("Showing global contribution of tri class to mutation load", "\n")
p <- ggplot(snv_data)
p <- p + geom_bar(aes(x = decomposed_tri, y = (..count..)/sum(..count..), group = decomposed_tri, fill = grouped_trans), position="dodge",stat="count")
p <- p + scale_y_continuous("Contribution to mutation load", limits = c(0, max_y/100), breaks=seq(0,max_y/100,by=0.025), labels=paste0(seq(0,max_y,by=2.5), "%"), expand = c(0.0, .0005))
p <- p + scale_x_discrete("Genomic context", expand = c(.005, .005))
p <- p + cleanTheme() +
theme(panel.grid.major.y = element_line(color="grey80", size = 0.5, linetype = "dotted"),
axis.text.x = element_text(angle = 90, hjust=1),
axis.text.y = element_text(size=15),
axis.title = element_text(size=20),
strip.text.x = element_text(size = 15)
)
p <- p + facet_wrap(~grouped_trans, ncol = 6, scale = "free_x" )
p <- p + guides(grouped_trans = FALSE)
if(write){
mut_spectrum<-paste("mutation_spectrum.pdf")
cat("Writing file", mut_spectrum, "\n")
ggsave(paste("plots/", mut_spectrum, sep=""), width = 20, height = 5)
}
p
}
#' featureEnrichment
#'
#' Function to calculate enrichment of snv hits in genomic features
#' @description Calculate the enrichment of snv hits in genomic features
#' A 'features' file must be provided with the follwing format:
#' feature length percentage
#' This can be generated using the script 'script/genomic_features.pl' and a genome .gtf file
#' The defualt genome length is set to the mappable regions of the Drosophila melanogastor Dmel6.12 genome (GEM mappability score > .5)
#' (118274340). The full, assembled genome legnth for chroms 2/3/4/X/Y is 137547960
#' @param features File containing total genomic lengths of features [Default 'data/genomic_features.txt']
#' @param genome_length The total legnth of the genome [Default 118274340 (mappable regions on chroms 2, 3, 4, X & Y for Drosophila melanogastor Dmel6.12)]
#' @keywords enrichment
#' @import dplyr ggpubr
#' @return A snv_data frame with FC scores for all genes seen at least n times in snv snv_data
#' @export
featureEnrichment <- function(features='data/genomic_features.txt', genome_length=118274340, write=FALSE){
genome_features<-read.delim(features, header = T)
snv_data<-getData()
mutCount<-nrow(snv_data)
# To condense exon counts into "exon"
snv_data$feature<-as.factor(gsub("exon_.*", "exon", snv_data$feature))
classCount<-table(snv_data$feature)
classLengths<-setNames(as.list(genome_features$length), genome_features$feature)
fun <- function(f) {
# Calculate the fraction of geneome occupied by each feature
featureFraction<-classLengths[[f]]/genome_length
# How many times should we expect to see this feature hit in our snv_data (given number of obs. and fraction)?
featureExpect<-(mutCount*featureFraction)
# observed/expected
fc<-classCount[[f]]/featureExpect
Log2FC<-log2(fc)
featureExpect<-round(featureExpect,digits=3)
# Binomial test
if(!is.null(classLengths[[f]])){
if(classCount[f] >= featureExpect){
stat<-binom.test(x = classCount[f], n = mutCount, p = featureFraction, alternative = "greater")
test<-"enrichment"
}
else{
stat<-binom.test(x = classCount[f], n = mutCount, p = featureFraction, alternative = "less")
test<-"depletion"
}
sig_val <- ifelse(stat$p.value <= 0.001, "***",
ifelse(stat$p.value <= 0.01, "**",
ifelse(stat$p.value <= 0.05, "*", "")))
p_val<-format.pval(stat$p.value, digits = 3, eps=0.0001)
list(feature = f, observed = classCount[f], expected = featureExpect, Log2FC = Log2FC, test = test, sig = sig_val, p_val = p_val)
}
}
enriched<-lapply(levels(snv_data$feature), fun)
enriched<-do.call(rbind, enriched)
featuresFC<-as.data.frame(enriched)
# Sort by FC value
featuresFC<-dplyr::arrange(featuresFC,desc(abs(as.numeric(Log2FC))))
featuresFC$Log2FC<-round(as.numeric(featuresFC$Log2FC), 1)
if(write){
featuresFC <- filter(featuresFC, observed >= 5)
first.step <- lapply(featuresFC, unlist)
second.step <- as.data.frame(first.step, stringsAsFactors = F)
ggpubr::ggtexttable(second.step, rows = NULL, theme = ttheme("mGreen"))
feat_enrichment_table <- paste("feature_enrichment_table.tiff")
cat("Writing to file: ", 'plots/', feat_enrichment_table, sep = '')
ggsave(paste("plots/", feat_enrichment_table, sep=""), width = 5.5, height = (nrow(featuresFC)/3), dpi=300)
} else{
return(featuresFC)
}
}
featureEnrichmentPlot <- function(write=FALSE) {
feature_enrichment<-featureEnrichment()
feature_enrichment$feature <- as.character(feature_enrichment$feature)
feature_enrichment$Log2FC <- as.numeric(feature_enrichment$Log2FC)
feature_enrichment <- transform(feature_enrichment, feature = reorder(feature, -Log2FC))
feature_enrichment <- filter(feature_enrichment, observed >= 5)
# Custom sorting
# feature_enrichment$feature <- factor(feature_enrichment$feature, levels=c("intron", "intergenic", "exon", "3UTR", "ncRNA", "5UTR"))
p<-ggplot(feature_enrichment)
p<-p + geom_bar(aes(feature, Log2FC, fill = as.character(test)), stat="identity")
p<-p + guides(fill=FALSE)
p<-p + ylim(-2,2)
p<-p + cleanTheme() +
theme(panel.grid.major.y = element_line(color="grey80", size = 0.5, linetype = "dotted"),
axis.text.x = element_text(angle = 45, hjust=1),
axis.text = element_text(size=20)
)
if(write){
feat_plot <- paste("feat_plot.pdf")
cat("Writing file", feat_plot, "\n")
ggsave(paste("plots/", feat_plot, sep=""), width = 5, height = 10)
}
p
}
#' geneEnrichment
#'
#' Function to calculate fold change enrichment in a set of snv calls correcting for gene length
#' @description Calculate the enrichment of snv hits in length-corrected genes
#' A 'gene_lengths' file must be provided with the following fields (cols 1..6 required)
#' gene length chrom start end tss scaling_factor
#' This can be generated using the script 'script/genomic_features.pl' and a genome .gtf file
#' The defualt genome length is set to the mappable regions of the Drosophila melanogastor Dmel6.12 genome (GEM mappability score > .5)
#' (118274340). The full, assembled genome legnth for chroms 2/3/4/X/Y is 137547960
#' @param gene_lengths File containing all genes and their lengths (as generated by 'script/genomefeatures.pl') [Default 'data/gene_lengths.txt']
#' @param n The number of times we need to have seen a gene in our snv_data to view its enrichment score [Default 3]
#' @param genome_length The total legnth of the genome [Default 137547960 (chroms 2, 3, 4, X & Y for Drosophila melanogastor Dmel6.12)]
#' @keywords enrichment
#' @import dplyr
#' @import ggpubr
#' @return A snv_data frame with FC scores for all genes seen at least n times in snv snv_data
#' @export
geneEnrichment <- function(gene_lengths_in="data/gene_lengths.txt", n=10, genome_length=118274340, write=FALSE){
snv_data <- getData() %>%
dplyr::filter(gene != "intergenic") %>%
droplevels()
snv_count<-nrow(snv_data)
gene_lengths <- read.delim(gene_lengths_in, header = T)
gene_lengths <- gene_lengths %>%
dplyr::filter(length > 1000) %>%
dplyr::select(gene, length) %>%
droplevels()
genes<-setNames(as.list(gene_lengths$length), gene_lengths$gene)
snv_data<-join(gene_lengths, snv_data, 'gene', type = 'left')
snv_data$fpkm <- ifelse(snv_data$fpkm=='NULL' | snv_data$fpkm=='NA' | is.na(snv_data$fpkm), 0, snv_data$fpkm)
snv_data$observed <- ifelse(is.numeric(snv_data$observed), snv_data$observed, 0)
hit_genes<-table(factor(snv_data$gene, levels = levels(snv_data$gene) ))
expression<-setNames(as.list(snv_data$fpkm), snv_data$gene)
fun <- function(g) {
# Calculate the fraction of geneome occupied by each gene
genefraction<-genes[[g]]/genome_length
# How many times should we expect to see this gene hit in our snv_data (given number of obs. and fraction of genome)?
gene_expect<-snv_count*(genefraction)
# observed/expected
fc<-hit_genes[[g]]/gene_expect
log2FC = log2(fc)
if (hit_genes[[g]] >= gene_expect) {
stat <- binom.test(x = hit_genes[[g]], n = snv_count, p = genefraction, alternative = "greater")
test <- "enrichment"
} else {
stat <- binom.test(x = hit_genes[[g]], n = snv_count, p = genefraction, alternative = "less")
test <- "depletion"
}
sig_val <- ifelse(stat$p.value <= 0.001, "***",
ifelse(stat$p.value <= 0.01, "**",
ifelse(stat$p.value <= 0.05, "*", "")))
sig_val <- ifelse(stat$p.value > 0.05, "-", sig_val)
p_val <- format.pval(stat$p.value, digits = 3)
gene_expect<-round(gene_expect,digits=3)
list(gene = g, length = genes[[g]], fpkm = expression[[g]], test=test, observed = hit_genes[g], expected = gene_expect, fc = fc, log2FC = log2FC, sig_val=sig_val, p_val=p_val)
}
enriched<-lapply(levels(snv_data$gene), fun)
enriched<-do.call(rbind, enriched)
genesFC<-as.data.frame(enriched)
# Filter for genes with few observations
genesFC <- genesFC %>%
dplyr::filter(observed >= n) %>%
dplyr::mutate(expected = round(as.numeric(expected),digits=3)) %>%
dplyr::mutate(log2FC = round(as.numeric(log2FC),digits=2)) %>%
dplyr::mutate(p_val = as.numeric(p_val)) %>%
dplyr::mutate(eScore = abs(log2FC) * -log10(p_val)) %>%
dplyr::mutate(eScore = round(as.numeric(eScore),digits=2)) %>%
dplyr::select(gene, observed, expected, log2FC, test, sig_val, p_val, eScore) %>%
dplyr::arrange(-eScore, p_val, -abs(log2FC)) %>%
droplevels()
if(write){
cat("printing")
first.step <- lapply(genesFC, unlist)
second.step <- as.data.frame(first.step, stringsAsFactors = F)
arrange(second.step,desc(as.integer(log2FC)))
ggpubr::ggtexttable(second.step, rows = NULL, theme = ttheme("mGreen"))
gene_enrichment_table <- paste("gene_enrichment_table.tiff")
ggsave(paste("plots/", gene_enrichment_table, sep=""), width = 5.2, height = (nrow(genesFC)/3), dpi=300)
} else{
return(genesFC)
}
}
# EnrichmentVolcano
#'
#' Plot the enrichment of SNVs in genes features
#' @keywords enrichment
#' @import dplyr
#' @import plotly
#' @export
EnrichmentVolcano <- function(d){
gene_enrichment <- d
minPval <- min(gene_enrichment$p_val[gene_enrichment$p_val>0])
gene_enrichment$p_val <- ifelse(gene_enrichment$p_val==0, minPval/abs(gene_enrichment$log2FC), gene_enrichment$p_val)
gene_enrichment$p_val <- ifelse(gene_enrichment$p_val==0, minPval, gene_enrichment$p_val)
maxLog2 <- max(abs(gene_enrichment$log2FC[is.finite(gene_enrichment$log2FC)]))
maxLog2 <- as.numeric(round_any(maxLog2, 1, ceiling))
ax <- list(
size = 25
)
ti <- list(
size = 25
)
p <- plot_ly(data = gene_enrichment,
x = ~log2FC,
y = ~-log10(p_val),
type = 'scatter',
# showlegend = FALSE,
mode = 'markers',
# height = 1200,
# width = 1000,
# frame = ~p_val,
text = ~paste("Gene: ", gene, "\n",
"Observed: ", observed, "\n",
"Expected: ", expected, "\n",
"P-val: ", p_val, "\n"),
color = ~log10(p_val),
colors = "Spectral",
size = ~-log10(p_val)
) %>%
layout(
xaxis = list(title="Log2(FC)", titlefont = ax, range = c(-maxLog2, maxLog2)),
yaxis = list(title="-Log10(p)", titlefont = ax)
)
p
}
#' snvinGene
#'
#' Plot all snvs found in a given gene
#' @description Plot all snvs found in a given gene.
#' A 'gene_lengths' file must be provided with the following fields (cols 1..6 required)
#' gene length chrom start end tss scaling_factor
#' This can be generated using the script 'script/genomic_features.pl' and a genome .gtf file
#' @param gene_lengths File containing all genes and their lengths (as generated by 'script/genomefeatures.pl') [Default 'data/gene_lengths.txt']
#' @param gene2plot Name of the gene to plot
#' @import ggplot2 dplyr
#' @keywords gene
#' @export
snvinGene <- function(gene_lengths="data/gene_lengths.txt", gene2plot='kuz', annotated=TRUE, col_by_status=TRUE, write=FALSE){
gene_lengths <- read.delim(gene_lengths, header = T)
region <- gene_lengths %>%
dplyr::filter(gene == gene2plot) %>%
droplevels()
gene_length <-(region$end-region$start)
wStart<-(region$start - gene_length/10)
wEnd<-(region$end + gene_length/10)
wChrom<-as.character(region$chrom)
wTss<-suppressWarnings(as.numeric(levels(region$tss))[region$tss])
snv_data<-getData() %>%
dplyr::filter(chrom == wChrom & pos >= wStart & pos <= wEnd)
if(nrow(snv_data) == 0){
stop(paste("There are no snvs in", gene2plot, "- Exiting", "\n"))
}
snv_data$colour_var <- snv_data$feature
if(annotated){
snv_data$colour_var <- snv_data$variant_type
if(col_by_status)
snv_data$colour_var <- snv_data$status
}
p <- ggplot(snv_data)
p <- p + geom_point(aes(pos/1000000, sample, colour = colour_var, size = 1.5), position=position_jitter(width=0, height=0.2))
p <- p + guides(size = FALSE, sample = FALSE)
p <- p + cleanTheme() +
theme(axis.title.y=element_blank(),
panel.grid.major.y = element_line(color="grey80", size = 0.5, linetype = "dotted"),
axis.text.y = element_text(size = 30)
)
p <- p + scale_x_continuous("Mbs", expand = c(0,0), breaks = seq(round(wStart/1000000, digits = 2),round(wEnd/1000000, digits = 2),by=0.05), limits=c(wStart/1000000, wEnd/1000000))
p <- p + annotate("rect", xmin=region$start/1000000, xmax=region$end/1000000, ymin=0, ymax=0.3, alpha=.2, fill="skyblue")
p <- p + geom_vline(xintercept = wTss/1000000, colour="red", alpha=.7, linetype="solid")
p <- p + geom_segment(aes(x = wTss/1000000, y = 0, xend= wTss/1000000, yend = 0.1), colour="red")
middle<-((wEnd/1000000+wStart/1000000)/2)
p <- p + annotate("text", x = middle, y = 0.15, label=gene2plot, size=6)
p <- p + ggtitle(paste("Chromosome:", wChrom))
if(write){
hit_gene<-paste(gene2plot, "_hits.pdf", sep='')
cat("Writing file", hit_gene, "\n")
ggsave(paste("plots/", hit_gene, sep=""), width = 10, height = 10)
}
p
}
#' featuresHit
#'
#' Show top hit features
#' @import ggplot2
#' @keywords features
#' @export
featuresHit <- function(..., write=FALSE){
snv_data<-getData(...)
# To condense exon counts into "exon"
snv_data$feature<-as.factor(gsub("exon_.*", "exon", snv_data$feature))
# Reoders descending
snv_data$feature<-factor(snv_data$feature, levels = names(sort(table(snv_data$feature), decreasing = TRUE)))
snv_data <- snv_data %>%
dplyr::group_by(feature) %>%
dplyr::add_tally() %>%
ungroup() %>%
dplyr::filter(n >= 5) %>%
droplevels()
#cols<-setCols(snv_data, "feature")
p <- ggplot(snv_data)
p <- p + geom_bar(aes(feature, fill = feature))
#p<-p + cols
p <- p + cleanTheme() +
theme(axis.title.x=element_blank(),
panel.grid.major.y = element_line(color="grey80", size = 0.5, linetype = "dotted"))
p <- p + scale_x_discrete(expand = c(0.01, 0.01))
p <- p + scale_y_continuous(expand = c(0.01, 0.01))
# colour to a pub palette:
# p<-p + ggpar(p, palette = 'jco')
if(write){
features_outfile<-paste("hit_features_count.pdf")
cat("Writing file", features_outfile, "\n")
ggsave(paste("plots/", features_outfile, sep=""), width = 20, height = 10)
}
p
}
#' geneHit
#'
#' Show top hit genes
#' @import dplyr
#' @keywords gene
#' @param n Show top n hits [Default 10]
#' @export
geneHit <- function(..., n=10){
snv_data<-getData(...)
snv_data<-filter(snv_data, gene != "intergenic")
hit_count<-as.data.frame(sort(table(unlist(snv_data$gene)), decreasing = T))
colnames(hit_count)<- c("gene", "count")
head(hit_count, n)
}
#' triFreq
#'
#' This function counts the number of times each triunucleotide is found in a supplied genome
#' @param genome BS.genome file defaults to BSgenome.Dmelanogaster.UCSC.dm6
#' @param count Output total counts instead of frequency if set [Default no]
#' @import dplyr
#' @keywords trinucleotides
#' @export
#' @return Dataframe of trinucs and freqs (or counts if count=1)
triFreq <- function(genome=NULL, count=FALSE){
if(missing(genome)){
cat("No genome specfied, defaulting to 'BSgenome.Dmelanogaster.UCSC.dm6'\n")
library(BSgenome.Dmelanogaster.UCSC.dm6, quietly = TRUE)
genome <- BSgenome.Dmelanogaster.UCSC.dm6
}
params <- new("BSParams", X = Dmelanogaster, FUN = trinucleotideFrequency, exclude = c("M", "_"), simplify = TRUE)
snv_data<-as.data.frame(bsapply(params))
snv_data$genome<-as.integer(rowSums(snv_data))
snv_data$genome_adj<-(snv_data$genome*2)
if(count){
tri_count<-snv_data['genome_adj']
tri_count<-cbind(tri = rownames(tri_count), tri_count)
colnames(tri_count) <- c("tri", "count")
rownames(tri_count) <- NULL
return(tri_count)
}
else{
snv_data$x <- (1/snv_data$genome)
scaling_factor<-snv_data['x']
return(scaling_factor)
}
}
# Functions to calculate the distance
# from each breakpoint to user-provided loci (e.g. TSS)
#' generateData
#' Prepare data for dist2motif
#' @keywords simulate
#' @import ggplot2
#' @import dplyr
#' @import colorspace
#' @import RColorBrewer
#' @export
generateData <- function(..., breakpoints=NA, sim=NA, keep=NULL){
if(is.na(breakpoints)){
# if(!missing(keep)){
# real_data <- notchFilt(..., keep=keep)
# } else {
real_data <- getData(..., genotype=='somatic_tumour', !sample %in% c("A373R7", "A512R17", "A785-A788R1", "A785-A788R11", "A785-A788R3", "A785-A788R5", "A785-A788R7", "A785-A788R9"))
# }
real_data <- real_data %>%
dplyr::filter(chrom == "2L" | chrom == "2R" | chrom == "3L" | chrom == "3R" | chrom == "X" ) %>%
dplyr::mutate(pos = bp) %>%
dplyr::select(chrom, pos) %>%
droplevels()
} else{
real_data <- read.table(breakpoints, header = F)
if(is.null(real_data$V3)){
real_data$V3 <- real_data$V2 + 2
}
colnames(real_data) <- c("chrom", "start", "end")
real_data <- real_data %>%
dplyr::filter(chrom == "2L" | chrom == "2R" | chrom == "3L" | chrom == "3R" | chrom == "X" ) %>%
dplyr::mutate(pos = (end+start)/2) %>%
dplyr::select(chrom, pos) %>%
droplevels()
}
if (!is.na(sim)) {
byIteration <- list()
#run each iteration
for (i in 1:sim){
cat("Running simulation", i, "of", sim, "\n")
simByChrom <- list()
for (c in levels(real_data$chrom)){
hitCount <- nrow(real_data[real_data$chrom== c,])
hitCount <- (hitCount*10)
if (i == 1){
cat(paste("Simulating", hitCount, "breakpoints on chromosome", c), "\n")
}
bp_data <- bpSim(nSites = hitCount, byChrom = c)
bp_data$iteration <- i
simByChrom[[c]] <- bp_data
}
result <- as.data.frame(do.call(rbind, simByChrom))
rownames(result) <- NULL
byIteration[[i]] <- result
}
#combine each iteration into one data frame
# final <- dplyr::bind_rows(byIteration)
final <- as.data.frame(do.call(rbind, byIteration))
final$iteration <- as.factor(final$iteration)
return(final)
} else{
cat("Using real data", "\n")
real_data$iteration <- as.factor(1)
return(real_data)
}
}
#' dist2Motif2
#' Calculate the distance from each breakpoint to closest motif in a directory of files
#' @keywords motif
#' @import ggplot2 dplyr tidyr RColorBrewer
#' @export
dist2motif2 <- function(..., feature_file = NA, featureDir = 'rawdata/features/', sim=NA, keep=NULL, position = 'centre') {
snv_data <- generateData(..., confidence=='precise', breakpoints=breakpoints, sim=sim, keep=keep)
cat("Calculating distances to", position, 'of regions', sep = " ", "\n")
svCount <- table(bp_data$chrom)
bp_data <- subset(bp_data, chrom %in% names(svCount[svCount >= 5]))
# bp_data <- droplevels(bp_data)
minDist <- function(p) {
index <- which.min(abs(tss_df$pos - p))
closestTss <- tss_df$pos[index]
chrom <- as.character(tss_df$chrom[index])
dist <- (p - closestTss)
list(p, closestTss, dist, chrom)
}
scores <- list()
fileNames <- dir(featureDir, pattern = ".bed")
# cat("Analysing all files in directory:", bedFiles, "\n")
for (i in 1:length(fileNames)){
filename <- basename(tools::file_path_sans_ext(fileNames[i]))
parts <- unlist(strsplit(filename, split = '\\.'))
feature <- parts[1]
cat("Analysing file:", fileNames[i], 'with feature:', feature, "\n")
feature_locations <- read.table(paste(featureDir, fileNames[i], sep='/'), header = F)
feature_locations <- feature_locations[,c(1,2,3)]
colnames(feature_locations) <- c("chrom", "start", "end")
# fCount <- table(feature_locations$chrom)
#
# bp_data <- subset(bp_data, chrom %in% names(svCount[svCount >= 5]))
#
feature_locations <- feature_locations %>%
dplyr::filter(chrom %in% levels(bp_data$chrom))
if(position == 'centre'){
feature_locations <- feature_locations %>%
dplyr::mutate(end = as.integer(((end+start)/2)+1)) %>%
dplyr::mutate(pos = as.integer(end-1)) %>%
dplyr::select(chrom, pos)
} else if(position == 'edge'){
feature_locations <- feature_locations %>%
tidyr::gather(c, pos, start:end, factor_key=TRUE) %>%
dplyr::select(chrom, pos)
}
byIteration <- list()
for (j in levels(bp_data$iteration)){
byChrom <- list()
df1 <- dplyr::filter(bp_data, iteration == j)
for (c in levels(bp_data$chrom)) {
df <- dplyr::filter(df1, chrom == c)
tss_df <- dplyr::filter(feature_locations, chrom == c)
dist2tss <- lapply(df$pos, minDist)
dist2tss <- do.call(rbind, dist2tss)
new <- data.frame(matrix(unlist(dist2tss), nrow=nrow(df)))
new$iteration <- j
new$feature <- as.factor(feature)
colnames(new) <- c("bp", "closest_tss", "min_dist", "chrom", "iteration", "feature")
byChrom[[c]] <- new
}
perIter <- do.call(rbind, byChrom)
byIteration[[j]] <- perIter
}
dist2feat <- do.call(rbind, byIteration)
scores[[i]] <- dist2feat
}
final <- do.call(rbind, scores)
rownames(final) <- NULL
final$iteration <- as.factor(final$iteration)
final$chrom <- as.character(final$chrom)
final$min_dist <- as.numeric(as.character(final$min_dist))
return(final)
}
# distOverlay
#'
#' Calculate the distance from each breakpoint to closest motif
#' Overlay the same number of random simulated breakpoints
#' @keywords motif
#' @import dplyr
#' @import ggplot2
#' @import ggpubr
#' @import RColorBrewer
#' @export
distOverlay2 <- function(..., breakpoints = NA, featureDir = 'rawdata/features/', from='bps', lim=2.5, n=2, plot = TRUE, keep=NULL, position = 'centre') {
scaleFactor <- lim*1000
real_data <- dist2motif2(..., breakpoints = breakpoints, featureDir = featureDir, keep=keep, position = position)
sim_data <- dist2motif2(..., featureDir = featureDir, sim = n, position = position)
real_data$Source <- as.factor("Real")
sim_data$Source <- as.factor("Sim")
dummy_iterations <- list()
for (i in levels(sim_data$iteration)){
real_data$iteration <- as.factor(i)
dummy_iterations[[i]] <- real_data
}
real_data <- do.call(rbind, dummy_iterations)
rownames(real_data) <- NULL
real_data$iteration <- factor(real_data$iteration, levels = 1:n)
sim_data$iteration <- factor(sim_data$iteration, levels = 1:n)
# Perform significance testing
pVals_and_df <- simSig2(r = real_data, s = sim_data, max_dist = scaleFactor)
combined <- pVals_and_df[[1]]
pVals <- pVals_and_df[[2]]
if(plot==T){
print(plotdistanceOverlay2(..., d=combined, from=from, facetPlot=FALSE, byChrom=byChrom, lim=lim, n=n, position=position ))
print(pVals)
}else{
print(pVals)
return(list(combined, pVals))
}
}
#' plotdistanceOverlay
#'
#' Plot the distance overlay
#' @param d Dataframe containing combined real + sim data (d <- distOverlay())
#' @import dplyr ggplot2 RColorBrewer scales colorspace cowplot
#' @keywords distance
#' @export
plotdistanceOverlay2 <- function(..., d, from='bps', lim=2.5, n=2, position='centre', histo=FALSE, binWidth = 500){
grDevices::pdf(NULL)
scaleFactor <- lim*1000
scale <- "(Kb)"
lims <- c(as.numeric(paste("-", scaleFactor, sep = '')), scaleFactor)
brks <- c(as.numeric(paste("-", scaleFactor, sep = '')),
as.numeric(paste("-", scaleFactor/10, sep = '')),
scaleFactor/10,
scaleFactor)
labs <- as.character(brks/1000)
expnd <- c(0, 0)
new <- d %>%
mutate(iteration = as.factor(ifelse(Source=='Real', 0, iteration)))
real_fill <- '#3D9DEB'
iterFill <- colorspace::rainbow_hcl(n)
colours <- c(real_fill, iterFill)
plts <- list()
for (i in 1:(length(levels(new$feature)))){
d <- new %>%
filter(feature == levels(new$feature)[i])
p <- ggplot(d)
if(histo) {
p <- p + geom_histogram(data=d[d$Source=="Sim",], aes(min_dist, fill = Source, group = iteration), alpha = 0.1, binwidth = binWidth, position="identity")
p <- p + geom_histogram(data=d[d$Source=="Real",], aes(min_dist, fill = Source, group = iteration), alpha = 0.5, binwidth = binWidth, position="identity")
p <- p + scale_fill_manual(values=colours)
p <- p + scale_y_continuous(paste("Count per", binWidth, "bp bins"))
} else {
p <- p + geom_line(data=d[d$Source=="Real",], aes(min_dist, colour = iteration), size=2, stat='density')
p <- p + geom_line(aes(min_dist, group = interaction(iteration, Source), colour = iteration), alpha = 0.7, size=1, stat='density')
p <- p + scale_color_manual(values=colours)
}
p <- p + scale_x_continuous(
limits = lims,
breaks = brks,
expand = expnd,
labels = labs
)
p <- p +
theme(
legend.position = "none",
panel.background = element_blank(),
plot.background = element_rect(fill = "transparent", colour = NA),
axis.line.x = element_line(color = "black", size = 0.5),
axis.text.x = element_text(size = 16),
axis.line.y = element_line(color = "black", size = 0.5),
plot.title = element_text(size=22, hjust = 0.5)
)
p <- p + labs(title = paste(d$feature, "\n", position))
plts[[i]] <- p
}
cat("Plotting", length(levels(new$feature)), "plots", "\n")
grDevices::dev.off()
cowplot::plot_grid(plotlist=plts)
}
simSig2 <- function(r, s, test=NA, max_dist=5000){
cat("Calculating descriptive statistics\n")
arrange_data <- function(x){
x <- x %>%
group_by(iteration, feature) %>%
dplyr::mutate( count = n(),
median = median(min_dist),
mean = mean(min_dist),
sd = sd(min_dist),
Source = Source) %>%
dplyr::filter(abs(min_dist) <= max_dist ) %>%
ungroup()
return(x)
}
simulated <- arrange_data(s)
real <- arrange_data(r)
combined <- suppressWarnings(dplyr::full_join(real, simulated))
combined$Source <- as.factor(combined$Source)
simbyFeat = list()
for (f in levels(combined$feature)){
pVals = list()
c <- dplyr::filter(combined, feature==f)
for(i in levels(c$iteration)){
df <- dplyr::filter(c, iteration==i)
rl <- dplyr::filter(df, Source == "Real")
sm <- dplyr::filter(df, Source == "Sim")
result1 <- tryCatch(suppressWarnings(ks.test(rl$min_dist, sm$min_dist)), error=function(err) NA)
result1 <- suppressWarnings(ks.test(rl$min_dist, sm$min_dist))
ksPval <- round(result1$p.value, 4)
result2 <- car::leveneTest(df$min_dist, df$Source, center='median')
result3 <- stats::bartlett.test(df$min_dist, df$Source)
bPval <- round(result3$p.value, 4)
lPval <- round(result2$`Pr(>F)`[1], 4)
rmed <- round(median(rl$min_dist)/1000, 2)
smed <- round(median(sm$min_dist)/1000, 2)
rsd <- round(sd(rl$min_dist)/1000, 2)
ssd <- round(sd(sm$min_dist)/1000, 2)
rKurtosis <- round(kurtosis(rl$min_dist), 2)
sKurtosis <- round(kurtosis(sm$min_dist), 2)
rSkew <- round(skewness(rl$min_dist), 2)
sSkew <- round(skewness(sm$min_dist), 2)
# fStat <- var.test(min_dist ~ Source , df, alternative = "two.sided")
# fRatio <- round(fStat$statistic, 2)
# fStat <- round(fStat$p.value, 4)
sig <- ifelse(lPval <= 0.001, "***",
ifelse(lPval <= 0.01, "**",
ifelse(lPval <= 0.05, "*", "")))
vals <- data.frame(iteration = i,
feature = f,
KS = ksPval,
Levenes = lPval,
# Bartlett = bPval,
# Fstat_ratio = fRatio,
# Fstat = fStat,
real_median = rmed,
sim_median = smed,
real_sd = rsd,
sim_sd = ssd,
real_kurtosis = rKurtosis,
sim_kurtosis = sKurtosis,
real_skew = rSkew,
sim_skew = sSkew,
sig = sig)
pVals[[i]] <- vals
}
pVals_df <- do.call(rbind, pVals)
simbyFeat[[f]] <- pVals_df
}
combined_sig_vals <- do.call(rbind, simbyFeat)
rownames(combined_sig_vals) <- NULL
combined_sig_vals <- combined_sig_vals %>%
arrange(Levenes, KS)
# print(pVals_df, row.names = FALSE)
## Boxplot per chrom
# colours <- c("#E7B800", "#00AFBB")
# cat("Plotting qq plot of min distances\n")
# qqnorm(combined$min_dist)
# qqline(combined$min_dist, col = 2)
# p <- ggplot(combined)
# p <- p + geom_boxplot(aes(chrom, min_dist, fill = Source), alpha = 0.6)
# p <- p + scale_y_continuous("Distance", limits=c(-5000, 5000))
# p <- p + facet_wrap(~iteration, ncol = 2)
# p <- p + scale_fill_manual(values = colours)
# p
return(list(combined, combined_sig_vals))
}
#################
## Development ##
#################
geneEnrichmentPlot <- function(n=0, highlight='kuz', write=FALSE) {
gene_enrichment<-geneEnrichment(n=n)
#gene_enrichment<-filter(gene_enrichment, fpkm > 0)
gene_enrichment <- gene_enrichment %>%
dplyr::mutate(gene = as.character(gene)) %>%
dplyr::mutate(log2FC = as.numeric(log2FC)) %>%
dplyr::mutate(test = as.character(ifelse(log2FC>=0, "enriched", "depleted")))
gene_enrichment <- transform(gene_enrichment, gene = reorder(gene, -abs(log2FC)))
highlightedGene <- dplyr::filter(gene_enrichment, gene == highlight)
highlightedGene <- droplevels(highlightedGene)
p <- ggplot(gene_enrichment)
p <- p + geom_bar(aes(gene, log2FC, fill = as.character(test)), stat="identity")
#p<-p + geom_bar(data=highlightedGene, aes(gene, log2FC, fill="red"), colour="black", stat="identity")
p <- p + guides(fill=FALSE)
p <- p + scale_x_discrete("Gene")
p <- p + cleanTheme() +
theme(panel.grid.major.y = element_line(color="grey80", size = 0.5, linetype = "dotted"),
axis.text.x = element_text(angle = 90, hjust=1),
axis.text = element_text(size=7)
)
#p<-p + coord_flip()
#p<-p + scale_y_reverse()
if(write){
gene_enrichment_plot <- paste("gene_enrichment.pdf")
cat("Writing file", gene_enrichment_plot, "\n")
ggsave(paste("plots/", gene_enrichment_plot, sep=""), width = 25, height = 5)
}
p
}
### Development
geneLenPlot <- function(n=0,gene_lengths_in="data/gene_lengths.txt"){
gene_enrichment<-geneEnrichment(n=n)
gene_lengths<-read.delim(gene_lengths_in, header = T)
gene_enrichment$length<-as.numeric(gene_enrichment$length)
gene_enrichment$log2FC<-as.numeric(gene_enrichment$log2FC)
gene_enrichment$fc<-as.numeric(gene_enrichment$fc)
gene_enrichment$observed<-as.numeric(gene_enrichment$observed)
# Var in x explained by Y
# par(mfrow=c(2,2))
# plot(enrichment_lm)
# Set new col 'col' to indicate enrichment/depletion
gene_enrichment$col<-as.factor(ifelse(gene_enrichment$log2FC > 0, 'enrichment', 'depletion'))
# Only keep relevant cols
gene_enrichment<-gene_enrichment[,c("gene","fpkm","observed",'expected','fc', 'log2FC', 'col')]
gene_enrichment<-droplevels(gene_enrichment)
# Join both df on 'gene'
gene_lengths_df<-join(as.data.frame(gene_lengths), gene_enrichment, 'gene', type = "left")
# Clean up null/na vals
gene_lengths_df$fpkm <- ifelse(gene_lengths_df$fpkm=='NULL' | gene_lengths_df$fpkm=='NA' | is.na(gene_lengths_df$fpkm), 0, gene_lengths_df$fpkm)
gene_lengths_df$level <- ifelse(gene_lengths_df$fpkm == 0 , 'not_expressed', 'expressed')
gene_lengths_df$observed <- ifelse(gene_lengths_df$observed == 'NULL' | gene_lengths_df$observed == 'NA', 0, gene_lengths_df$observed)
gene_lengths_df$level<-as.factor(gene_lengths_df$level)
# Allow colouring of expressed/enriched/depleted
gene_lengths_df$col <- ifelse(is.na(gene_lengths_df$col), 'NA', as.character(gene_lengths_df$col))
gene_lengths_df$col <- ifelse(gene_lengths_df$fpkm > 0, 'expressed', gene_lengths_df$col)
# New col log10 length
gene_lengths_df$log10length<-log10(gene_lengths$length)
gene_lengths_df<-filter(gene_lengths_df, length >= 1000, length < 200000)
gene_lengths_df<-droplevels(gene_lengths_df)
#
# Linear model (predicter ~ predictor)
enrichment_lm <- lm(observed ~ length, data = gene_lengths_df)
# Exponential model
enrichment_exp <- lm(log(observed) ~ length, data = gene_lengths_df)
# plot( observed ~ length, data = gene_enrichment)
lmRsq<-round(summary(enrichment_lm)$adj.r.squared, 2)
expRsq<-round(summary(enrichment_exp)$adj.r.squared, 2)
#summary(pois <- glm(observed ~ length, family="poisson", data=gene_lengths_df))
gene_lengths_p<-filter(gene_lengths_df, col != 'NA')
p <- ggplot(gene_lengths_p, aes(log10length, observed))
p <- p + geom_jitter(aes( colour = col, alpha = 0.8))
p <- p + scale_color_manual(values=c("#F8766D", "#00AFBB", "#E7B800"))
p <- p + scale_x_continuous("Log10 Kb", limits=c(3, max(gene_lengths_p$log10length)))
p <- p + scale_y_continuous("Count", limits=c(0,max(gene_lengths_p$observed)))
p <- p + scale_size_continuous(range=c(0, abs(max(gene_lengths_p$log2FC))))
p <- p + annotate(x = 3.5, y = 20, geom="text", label = paste('Lin:R^2:', lmRsq), size = 7,parse = TRUE)
p <- p + annotate(x = 3.5, y = 17, geom="text", label = paste('Exp:R^2:', expRsq), size = 7,parse = TRUE)
# Default model is formula = y ~ x
# How much variation in X is explained by Y
# How muc var in length is explained by observation
p <- p + geom_smooth(method=lm, show.legend = FALSE) # linear
p <- p + geom_smooth(method=lm, formula = y ~ poly(x, 2), colour = "orange", show.legend = FALSE) #Quadratic
#p <- p + geom_smooth(method=glm, method.args = list(family = "poisson"), colour = "red", se=T)
#p <- p + geom_smooth(colour="orange") # GAM
p <- p + cleanTheme()
p <- p + geom_rug(aes(colour=col,alpha=.8),sides="b")
p <- p + guides(alpha = FALSE)
colours<-c( "#E7B800", "#00AFBB")
p2<-ggplot(gene_lengths_df)
p2<-p2 + geom_density(aes(log10length, fill=level),alpha = 0.4)
p2 <- p2 + cleanTheme()
p2 <- p2 + scale_x_continuous("Log10 Kb", limits=c(3, max(gene_lengths_df$log10length)))
p2 <- p2 + guides(alpha = FALSE)
#p2 <- p2 + geom_rug(inherit.aes = F, aes(log10length,colour=level),alpha=0.2, sides = "tb")
p2 <- p2 + geom_rug(data=subset(gene_lengths_df,level=="expressed"), aes(log10length,colour=level),alpha=0.7, sides = "b")
p2 <- p2 + geom_rug(data=subset(gene_lengths_df,level=="not_expressed"), aes(log10length,colour=level),alpha=0.2, sides = "t")
p2 <- p2 + scale_fill_manual(values=colours)
p2 <- p2 + scale_colour_manual(values=colours)
# p<-ggscatter(gene_lengths_p, x = "log10length", y = "observed",
# color = "col", size = "log2FC"
# )
# p2<-ggdensity(gene_lengths_df, x = "log10length",
# add = "mean", rug = TRUE,
# color = "level", fill = "level",
# palette = c("#00AFBB", "#E7B800")
# )
#
combined_plots <- ggarrange(p, p2,
labels = c("A", "B"),
ncol = 1, nrow = 2)
gene_len<-paste("gene_lengths_count_model_log10.pdf")
cat("Writing file", gene_len, "\n")
ggsave(paste("plots/", gene_len, sep=""), width = 10, height = 10)
combined_plots
}
getPromoter <- function(gene_lengths_in="data/gene_lengths.txt"){
gene_lengths<-read.delim(gene_lengths_in, header = T)
gene_lengths$promoter<-ifelse(gene_lengths$start<gene_lengths$end,
gene_lengths$start- 1500,
gene_lengths$end + 1500)
gene_lengths<-gene_lengths[,c("chrom", "promoter")]
colnames(gene_lengths)<-NULL
return(gene_lengths)
}
dist2Feat <- function(feature_file="data/tss_locations.txt",sim=NA, print=0,send=0, feature='tss'){
if(is.na(sim)){
snv_data<-getData()
}
else{
cat("Generating simulated snv_data\n")
hit_count<-nrow(getData())
snv_data<-snvSim(N=hit_count, write=print)
colnames(snv_data)<-c("chrom", "pos", "v3", "v4", "v5")
snv_data<-filter(snv_data, chrom == "2L" | chrom == "2R" | chrom == "3L" | chrom == "3R" | chrom == "X" | chrom == "Y" | chrom == "4")
snv_data<-droplevels(snv_data)
}
feature<-paste(toupper(substr(feature, 1, 1)), substr(feature, 2, nchar(feature)), sep='')
if(feature=='Promoter'){
feature_locations<-getPromoter()
cat("Getting gene promoter locations...\n")
}
else{
feature_locations<-read.delim(feature_file, header = F)
cat("Reading in file:", feature_file, sep =' ', "\n")
}
cat("Calculating distances to", feature, sep=' ', "\n")
colnames(feature_locations)<-c("chrom", "pos")
feature_locations$pos<-as.integer(feature_locations$pos)
# Will throw error if SVs don't exist on a chrom...
# Removes chroms with fewer than 10 observations
svCount <- table(snv_data$chrom)
snv_data <- subset(snv_data, chrom %in% names(svCount[svCount >= 10]))
snv_data<-droplevels(snv_data)
feature_locations <- subset(feature_locations, chrom %in% levels(snv_data$chrom))
feature_locations<-droplevels(feature_locations)
fun2 <- function(p) {
index<-which.min(abs(tss_df$pos - p))
closestTss<-tss_df$pos[index]
# browser()
chrom<-as.character(tss_df$chrom[index])
gene<-as.character(tss_df$gene[index])
dist<-(p-closestTss)
list(p, closestTss, dist, chrom, gene)
}
l <- list()
for (c in levels(snv_data$chrom)){
df<-filter(snv_data, chrom == c)
tss_df<-filter(feature_locations, chrom == c)
dist2tss<-lapply(df$pos, fun2)
dist2tss<-do.call(rbind, dist2tss)
dist2tss<-as.data.frame(dist2tss)
colnames(dist2tss)=c("bp", "closest_tss", "min_dist", "chrom", "closest_gene")
dist2tss$min_dist<-as.numeric(dist2tss$min_dist)
l[[c]] <- dist2tss
}
dist2tss<-do.call(rbind, l)
dist2tss<-as.data.frame(dist2tss)
dist2tss$chrom<-as.character(dist2tss$chrom)
dist2tss<-arrange(dist2tss,(abs(min_dist)))
if(send==1){
return(dist2tss)
}
else{
p<-ggplot(dist2tss)
p<-p + geom_density(aes(min_dist, fill = chrom), alpha = 0.3)
p<-p + scale_x_continuous(paste("Distance to", feature, "(Kb)", sep=' '),
limits=c(-10000, 10000),
breaks=c(-10000,-1000, 1000, 10000),
expand = c(.0005, .0005),
labels=c("-10", "-1", "1", "10") )
p<-p + scale_y_continuous("Density")
p<-p + geom_vline(xintercept = 0, colour="black", linetype="dotted")
#p<-p + facet_wrap(~chrom, scale = "free_x", ncol = 5)
p <- p + geom_rug(aes(min_dist, colour=chrom))
p<-p + cleanTheme() +
theme(strip.text = element_text(size=20),
legend.position="top")
p<-p + facet_wrap(~chrom, ncol = 3, scales = "free_y")
if(is.na(sim)){
distout<-paste("snv", feature, 'dist.pdf', sep='')
}
else{
distout<-paste("snv", feature, 'dist_sim.pdf', sep='')
}
cat("Writing file", distout, "\n")
ggsave(paste("plots/", distout, sep=""), width = 20, height = 10)
p
}
}
distOverlay <- function(feature_file="data/tss_locations.txt", feature='tss', lim=10,all=NA){
feature<-paste(toupper(substr(feature, 1, 1)), substr(feature, 2, nchar(feature)), sep='')
if(feature=='promoter'){
real_data<-dist2Feat(send=1, feature=feature)
sim_data<-dist2Feat(feature=feature, sim=1, send=1)
}
else{
real_data<-dist2Feat(feature_file=feature_file, send=1, feature=feature)
sim_data<-dist2Feat(feature_file=feature_file, feature=feature, sim=1, send=1)
}
real_data$Source<-"Real"
sim_data$Source<-"Sim"
sim_data<-filter(sim_data, chrom != "Y", chrom != 4)
sim_data<-droplevels(sim_data)
real_data<-filter(real_data, chrom != "Y", chrom != 4)
real_data<-droplevels(real_data)
colours<-c( "#E7B800", "#00AFBB")
scale<-"(Kb)"
if(lim==0.1){
cat("Setting limits to -+100bp\n")
lims=c(-100, 100)
brks=c(-100, -10, 10, 100)
expnd = c(.0005, .0005)
labs=c("-100", "-10", "10", "100")
scale<-"(bp)"
}
else if(lim==0.5){
cat("Setting limits to -+0.5kb\n")
lims=c(-500, 500)
brks=c(-500, -100,100, 500)
expnd = c(.0005, .0005)
labs=c("-500", "-100", "100", "500")
scale<-"(bp)"
}
else if(lim==1){
cat("Setting limits to -+1kb\n")
lims=c(-1000, 1000)
brks=c(-1000, 1000)
expnd = c(.0005, .0005)
labs=c("-1", "1")
}
else{
cat("Setting limits to -+10kb\n")
lims=c(-10000, 10000)
brks=c(-10000,-1000, 1000, 10000)
expnd = c(.0005, .0005)
labs=c("-10", "-1", "1", "10")
}
p<-ggplot()
p<-p + geom_density(data=real_data,aes(min_dist, fill = Source), alpha = 0.4)
p<-p + geom_density(data=sim_data,aes(min_dist, fill = Source), alpha = 0.4)
if(is.na(all)){
p<-p + facet_wrap(~chrom, ncol = 3, scales = "free_y")
}
p<-p + scale_x_continuous(paste("Distance to", feature, scale, sep=' '),
limits=lims,
breaks=brks,
expand=expnd,
labels=labs )
p<-p + scale_y_continuous("Density")
p<-p + geom_vline(xintercept = 0, colour="black", linetype="dotted")
p <- p + geom_rug(data=real_data,aes(min_dist, colour=Source),sides="b")
p <- p + geom_rug(data=sim_data,aes(min_dist, colour=Source),sides="t")
p <- p + scale_fill_manual(values=colours)
p <- p + scale_colour_manual(values=colours)
p<-p + cleanTheme() +
theme(strip.text = element_text(size=20),
legend.position="top")
overlay<-paste("snv", feature, 'dist_overlay.pdf', sep='')
cat("Writing file", overlay, "\n")
ggsave(paste("plots/", overlay, sep=""), width = 25, height = 10)
p
}
#' tssDist
#'
#' Plot distance to TSS distribution
#' @param tss_pos File containing all TSS positions in genome: "gene chrom tss" [Default 'data/tss_positions.txt']
#' @param sim Simulate random SNVs accross genomic intervals? [Default: NO]
#' @param print Write the simulated random SNVs to a bed file ('data/simulatedSNVs.bed')? [Default: NO]
#' @import ggplot2
#' @keywords tss
#' @export
tssDist <- function(tss_pos="data/tss_positions.txt",sim=NA, print=0,return=0){
tss_locations<-read.delim(tss_pos, header = T)
tss_locations$tss<-as.integer(tss_locations$tss)
if(is.na(sim)){
snv_data<-getData()
}
else{
cat("Generating simulated snv_data\n")
hit_count<-nrow(getData())
snv_data<-snvSim(N=hit_count, write=print)
colnames(snv_data)<-c("chrom", "pos", "v3", "v4", "v5")
snv_data<-filter(snv_data, chrom == "2L" | chrom == "2R" | chrom == "3L" | chrom == "3R" | chrom == "X" | chrom == "Y" | chrom == "4")
snv_data<-droplevels(snv_data)
}
# Will throw error if SVs don't exist on a chrom...
# Removes chroms with fewer than 20 observations
svCount <- table(snv_data$chrom)
snv_data <- subset(snv_data, chrom %in% names(svCount[svCount > 30]))
snv_data<-droplevels(snv_data)
tss_locations <- subset(tss_locations, chrom %in% levels(snv_data$chrom))
tss_locations<-droplevels(tss_locations)
fun2 <- function(p) {
index<-which.min(abs(tss_df$tss - p))
closestTss<-tss_df$tss[index]
chrom<-as.character(tss_df$chrom[index])
gene<-as.character(tss_df$gene[index])
dist<-(p-closestTss)
list(p, closestTss, dist, chrom, gene)
}
l <- list()
for (c in levels(snv_data$chrom)){
df<-filter(snv_data, chrom == c)
tss_df<-filter(tss_locations, chrom == c)
dist2tss<-lapply(df$pos, fun2)
dist2tss<-do.call(rbind, dist2tss)
dist2tss<-as.data.frame(dist2tss)
colnames(dist2tss)=c("bp", "closest_tss", "min_dist", "chrom", "closest_gene")
dist2tss$min_dist<-as.numeric(dist2tss$min_dist)
l[[c]] <- dist2tss
}
dist2tss<-do.call(rbind, l)
dist2tss<-as.data.frame(dist2tss)
dist2tss$chrom<-as.character(dist2tss$chrom)
dist2tss<-arrange(dist2tss,(abs(min_dist)))
# Removes chroms with fewer than 20 observations
# svCount <- table(dist2tss$chrom)
# dist2tss <- subset(dist2tss, chrom %in% names(svCount[svCount > 10]))
if(return==1){
return(dist2tss)
}
else{
p<-ggplot(dist2tss)
p<-p + geom_density(aes(min_dist, fill = chrom), alpha = 0.3)
p<-p + scale_x_continuous("Distance to TSS (Kb)",
limits=c(-10000, 10000),
breaks=c(-10000,-1000, 1000, 10000),
expand = c(.0005, .0005),
labels=c("-10", "-1", "1", "10") )
p<-p + scale_y_continuous("Density")
p<-p + geom_vline(xintercept = 0, colour="black", linetype="dotted")
#p<-p + facet_wrap(~chrom, scale = "free_x", ncol = 5)
p <- p + geom_rug(aes(min_dist, colour=chrom))
p<-p + cleanTheme() +
theme(strip.text = element_text(size=20),
legend.position="top")
p<-p + facet_wrap(~chrom, ncol = 3, scales = "free_y")
if(is.na(sim)){
tssDistout<-paste("bpTSSdist.pdf")
}
else{
tssDistout<-paste("bpTSSdist_sim.pdf")
}
cat("Writing file", tssDistout, "\n")
ggsave(paste("plots/", tssDistout, sep=""), width = 20, height = 10)
p
}
}
tssDistOverlay <- function(){
real_data<-tssDist(return=1)
real_data$Source<-"Real"
sim_data<-bpTssDist(sim=1, return=1)
sim_data$Source<-"Sim"
sim_data<-filter(sim_data, chrom != "Y", chrom != 4)
sim_data<-droplevels(sim_data)
real_data<-filter(real_data, chrom != "Y", chrom != 4)
real_data<-droplevels(real_data)
colours<-c( "#E7B800", "#00AFBB")
p<-ggplot()
p<-p + geom_density(data=real_data,aes(min_dist, fill = Source), alpha = 0.4)
p<-p + geom_density(data=sim_data,aes(min_dist, fill = Source), alpha = 0.4)
p<-p + facet_wrap(~chrom, ncol = 3, scales = "free_y")
p<-p + scale_x_continuous("Distance to TSS (Kb)",
limits=c(-100000, 100000),
breaks=c(-100000,-10000,-1000, 1000, 10000, 100000),
expand = c(.0005, .0005),
labels=c("-100", "-10", "-1", "1", "10", "100") )
p<-p + scale_y_continuous("Density")
p<-p + geom_vline(xintercept = 0, colour="black", linetype="dotted")
#p<-p + facet_wrap(~chrom, scale = "free_x", ncol = 5)
p <- p + geom_rug(data=real_data,aes(min_dist, colour=Source),sides="b")
p <- p + geom_rug(data=sim_data,aes(min_dist, colour=Source),sides="t")
p <- p + scale_fill_manual(values=colours)
p <- p + scale_colour_manual(values=colours)
p<-p + cleanTheme() +
theme(strip.text = element_text(size=20),
legend.position="top")
p<-p + facet_wrap(~chrom, ncol = 3, scales = "free_y")
tssDistout<-paste("bpTSSdist_overlay.pdf")
cat("Writing file", tssDistout, "\n")
ggsave(paste("plots/", tssDistout, sep=""), width = 20, height = 10)
p
}
#' snvSim
#'
#' Generate simulated SNV hits acroos genomic regions (e.g. mappable regions)
#' @param intervals File containing genomic regions within which to simulate SNVs [Default 'data/intervals.bed]
#' @param N Number of random SNVs to generate [Default nrow(snv_data)]
#' @import GenomicRanges
#' @keywords sim
#' @export
snvSim <- function(intervals="data/intervals.bed", N=1000, write=F){
suppressPackageStartupMessages(require(GenomicRanges))
intFile <- import.bed(intervals)
space <- sum(width(intFile))
positions <- sample(c(1:space), N)
cat("Simulating", N, "SNVs", sep = ' ', "\n")
new_b <- GRanges(seqnames=as.character(rep(seqnames(intFile), width(intFile))),
ranges=IRanges(start=unlist(mapply(seq, from=start(intFile), to=end(intFile))), width=1))
bedOut<-new_b[positions]
if(write){
export.bed(new_b[positions], "data/simulatedSNVs.bed")
}
remove(new_b)
return(data.frame(bedOut))
}
svDist <- function(svs="data/all_bps_filtered.txt",sim=NA, print=0){
svBreaks<-read.delim(svs, header = F)
colnames(svBreaks) <- c("event", "bp_no", "sample", "chrom", "bp", "gene", "feature", "type", "length")
svBreaks$bp<-as.integer(svBreaks$bp)
svBreaks<-filter(svBreaks, sample != "A373R1" & sample != "A373R7" & sample != "A512R17" )
svBreaks <- droplevels(svBreaks)
snv_data<-getData()
if(!is.na(sim)){
simrep<-nrow(snv_data)
cat("Generating simulated snv_data for", simrep, "SNVs", "\n")
snv_data<-snvSim(N=simrep, write=print)
snv_data$end<-NULL
snv_data$width<-NULL
snv_data$strand<-NULL
snv_data$sample <- as.factor(sample(levels(svBreaks$sample), size = nrow(snv_data), replace = TRUE))
#snv_data$type <- sample(levels(svBreaks$type), size = nrow(snv_data), replace = TRUE)
colnames(snv_data)<-c("chrom", "pos", "sample")
snv_data<-filter(snv_data, chrom == "2L" | chrom == "2R" | chrom == "3L" | chrom == "3R" | chrom == "X" | chrom == "Y" | chrom == "4")
snv_data<-droplevels(snv_data)
}
snv_data <- subset(snv_data, sample %in% levels(svBreaks$sample))
snv_data <- droplevels(snv_data)
snv_data <- subset(snv_data, chrom %in% levels(svBreaks$chrom))
snv_data <- droplevels(snv_data)
fun3 <- function(p) {
index<-which.min(abs(sv_df$bp - p))
closestBp<-as.numeric(sv_df$bp[index])
chrom<-as.character(sv_df$chrom[index])
gene<-as.character(sv_df$gene[index])
sample<-as.character(sv_df$sample[index])
type<-as.character(sv_df$type[index])
dist<-(p-closestBp)
list(p, closestBp, dist, chrom, gene, type, sample)
}
l <- list()
for (c in levels(snv_data$chrom)){
for (s in levels(snv_data$sample)){
df<-filter(snv_data, chrom == c & sample == s)
sv_df<-filter(svBreaks, chrom == c & sample == s)
dist2bp<-lapply(df$pos, fun3)
dist2bp<-do.call(rbind, dist2bp)
dist2bp<-as.data.frame(dist2bp)
colnames(dist2bp)=c("snp", "closest_bp", "min_dist", "chrom", "closest_gene", "type", "sample")
dist2bp$min_dist<-as.numeric(dist2bp$min_dist)
l[[s]] <- dist2bp
}
l[[c]] <- dist2bp
}
dist2bp<-do.call(rbind, l)
dist2bp<-as.data.frame(dist2bp)
dist2bp$chrom<-as.character(dist2bp$chrom)
dist2bp$type<-as.character(dist2bp$type)
snvCount <- table(dist2bp$chrom)
dist2bp <- subset(dist2bp, chrom %in% names(snvCount[snvCount > 25]))
dist2bp<-arrange(dist2bp,(abs(min_dist)))
dist2bp <- dist2bp %>% na.omit()
p<-ggplot(dist2bp)
p<-p + geom_density(aes(min_dist, fill=type), alpha = 0.3)
# p<-p + scale_x_continuous("Distance to SV BP (Kb)",
# limits=c(-10000000, 10000000),
# breaks=c(-10000000, -100000, -10000, -1000, 0, 1000, 10000, 100000, 10000000),
# expand = c(.0005, .0005),
# labels=c("-10000", "-100", "-10", "-1", 0, "1", "10", "100", "10000") )
p<-p + scale_y_continuous("Density", expand = c(0, 0))
p<-p + geom_vline(xintercept = 0, colour="black", linetype="dotted")
p<-p + cleanTheme()
p<-p + facet_wrap(~chrom, ncol = 2, scales = "free_y")
p
# p<-ggplot(dist2bp)
# p<-p + geom_histogram(aes(as.numeric(min_dist, fill = type)), alpha = 0.6, binwidth = 1000)
# p<-p + scale_x_continuous("Distance to TSS", limits=c(-1000000, 1000000))
# p<-p + geom_vline(xintercept = 0, colour="black", linetype="dotted")
# p
}
g4Dist <- function(g4_pos="data/g4_positions.txt",sim=NA, print=0,return=0){
g4_pos="data/g4_positions.txt"
g4_locations<-read.delim(g4_pos, header = T)
g4_locations$g4<-as.integer(g4_locations$g4)
if(is.na(sim)){
snv_data<-getData()
}
else{
cat("Generating simulated snv_data\n")
hit_count<-nrow(getData())
snv_data<-snvSim(N=hit_count, write=print)
colnames(snv_data)<-c("chrom", "pos", "v3", "v4", "v5")
snv_data<-filter(snv_data, chrom == "2L" | chrom == "2R" | chrom == "3L" | chrom == "3R" | chrom == "X" | chrom == "Y" | chrom == "4")
snv_data<-droplevels(snv_data)
}
# Will throw error if SVs don't exist on a chrom...
# Removes chroms with fewer than 20 observations
svCount <- table(snv_data$chrom)
snv_data <- subset(snv_data, chrom %in% names(svCount[svCount > 30]))
snv_data<-droplevels(snv_data)
g4_locations <- subset(g4_locations, chrom %in% levels(snv_data$chrom))
g4_locations<-droplevels(g4_locations)
fun2 <- function(p) {
index<-which.min(abs(g4_df$g4 - p))
closestTss<-g4_df$g4[index]
chrom<-as.character(g4_df$chrom[index])
gene<-as.character(g4_df$gene[index])
dist<-(p-closestTss)
list(p, closestTss, dist, chrom, gene)
}
l <- list()
for (c in levels(snv_data$chrom)){
df<-filter(snv_data, chrom == c)
g4_df<-filter(g4_locations, chrom == c)
dist2g4<-lapply(df$pos, fun2)
dist2g4<-do.call(rbind, dist2g4)
dist2g4<-as.data.frame(dist2g4)
colnames(dist2g4)=c("bp", "closest_g4", "min_dist", "chrom", "closest_gene")
dist2g4$min_dist<-as.numeric(dist2g4$min_dist)
l[[c]] <- dist2g4
}
dist2g4<-do.call(rbind, l)
dist2g4<-as.data.frame(dist2g4)
dist2g4$chrom<-as.character(dist2g4$chrom)
dist2g4<-arrange(dist2g4,(abs(min_dist)))
# Removes chroms with fewer than 20 observations
# svCount <- table(dist2g4$chrom)
# dist2g4 <- subset(dist2g4, chrom %in% names(svCount[svCount > 10]))
if(return==1){
return(dist2g4)
}
else{
p<-ggplot(dist2g4)
p<-p + geom_density(aes(min_dist, fill = chrom), alpha = 0.3)
p<-p + scale_x_continuous("Distance to G4 (Kb)",
limits=c(-10000, 10000),
breaks=c(-10000,-1000, 1000, 10000),
expand = c(.0005, .0005),
labels=c("-10", "-1", "1", "10") )
p<-p + scale_y_continuous("Density")
p<-p + geom_vline(xintercept = 0, colour="black", linetype="dotted")
#p<-p + facet_wrap(~chrom, scale = "free_x", ncol = 5)
p <- p + geom_rug(aes(min_dist, colour=chrom))
p<-p + cleanTheme() +
theme(strip.text = element_text(size=20),
legend.position="top")
p<-p + facet_wrap(~chrom, ncol = 3, scales = "free_y")
if(is.na(sim)){
g4Distout<-paste("bpG4dist.pdf")
}
else{
g4Distout<-paste("bpG4dist_sim.pdf")
}
cat("Writing file", g4Distout, "\n")
ggsave(paste("plots/", g4Distout, sep=""), width = 20, height = 10)
p
}
}
g4DistOverlay <- function(){
real_data<-g4Dist(return=1)
real_data$Source<-"Real"
sim_data<-bpTssDist(sim=1, return=1)
sim_data$Source<-"Sim"
sim_data<-filter(sim_data, chrom != "Y", chrom != 4)
sim_data<-droplevels(sim_data)
real_data<-filter(real_data, chrom != "Y", chrom != 4)
real_data<-droplevels(real_data)
colours<-c( "#E7B800", "#00AFBB")
p<-ggplot()
p<-p + geom_density(data=real_data,aes(min_dist, fill = Source), alpha = 0.4)
p<-p + geom_density(data=sim_data,aes(min_dist, fill = Source), alpha = 0.4)
p<-p + facet_wrap(~chrom, ncol = 3, scales = "free_y")
p<-p + scale_x_continuous("Distance to G4 (Kb)",
limits=c(-10000, 10000),
breaks=c(-10000,-1000, 1000, 10000),
expand = c(.0005, .0005),
labels=c("-10", "-1", "1", "10") )
p<-p + scale_y_continuous("Density")
p<-p + geom_vline(xintercept = 0, colour="black", linetype="dotted")
#p<-p + facet_wrap(~chrom, scale = "free_x", ncol = 5)
p <- p + geom_rug(data=real_data,aes(min_dist, colour=Source),sides="b")
p <- p + geom_rug(data=sim_data,aes(min_dist, colour=Source),sides="t")
p <- p + scale_fill_manual(values=colours)
p <- p + scale_colour_manual(values=colours)
p<-p + cleanTheme() +
theme(strip.text = element_text(size=20),
legend.position="top")
p<-p + facet_wrap(~chrom, ncol = 3, scales = "free_y")
g4Distout<-paste("bpG4dist_overlay.pdf")
cat("Writing file", g4Distout, "\n")
ggsave(paste("plots/", g4Distout, sep=""), width = 20, height = 10)
p
}
#' chromDist
#'
#' Plot genome-wide snv distribution
#' @import ggplot2
#' @keywords distribution
#' @export
chromDist <- function(object=NA, notch=0){
snv_data<-getData()
ext<-'.pdf'
if(is.na(object)){
object<-'grouped_trans'
cols<-setCols(snv_data, "grouped_trans")
}
if(notch){
snv_data<-exclude_notch()
ext<-'_excl.N.pdf'
}
cat("Plotting snvs by", object, "\n")
p<-ggplot(snv_data)
p<-p + geom_histogram(aes(pos/1000000, fill = get(object)), binwidth=0.1, alpha = 0.8)
p<-p + facet_wrap(~chrom, scale = "free_x", ncol = 2)
p<-p + scale_x_continuous("Mbs", breaks = seq(0,33,by=1), limits = c(0, 33),expand = c(0.01, 0.01))
p<-p + scale_y_continuous("Number of snvs", expand = c(0.01, 0.01))
p<-p + cleanTheme() +
theme(axis.text.x = element_text(angle = 45, hjust=1),
axis.text = element_text(size=12),
axis.title = element_text(size=20),
strip.text.x = element_text(size = 15)
)
if (object == 'grouped_trans'){
p<-p + cols
}
chrom_outfile<-paste("snv_dist_genome_by_", object, ext, sep = "")
cat("Writing file", chrom_outfile, "\n")
ggsave(paste("plots/", chrom_outfile, sep=""), width = 20, height = 10)
p
}
mutationTypes <- function(allele_frequency = 0.1){
snv_data<-getData()
snv_data <- snv_data %>%
filter(dups!="TRUE") %>%
filter(a_freq>=allele_frequency)
library(dplyr)
mutCounts <- snv_data %>%
group_by(sample, grouped_trans) %>%
summarise(count=n()) %>%
mutate(perc=count/sum(count))
p <- ggplot(mutCounts)
p <- p + geom_bar(aes(sample, perc*100, fill=grouped_trans), stat="identity")
p <- p + cleanTheme() +
theme(panel.grid.major.y = element_line(color="grey80", size = 0.5, linetype = "dotted"),
axis.text.x = element_text(angle = 90, hjust=1),
axis.text.y = element_text(size=15),
axis.title = element_text(size=20),
strip.text.x = element_text(size = 15)
)
p <- p + ylab("% contribution")
p
}
###########
## Misc ##
###########
# Gene lengths on accross chroms
#gene_lengths="data/gene_lengths.txt"
#gene_lengths<-read.delim(gene_lengths, header = T)
#p<-ggplot(gene_lengths)
#p<-p + geom_point(aes( x=(start+(length/2)), y=log10(length), colour = chrom))
#p<-p + facet_wrap(~chrom, scale = "free_x")
#p
|
be4608272aa21a146abb600499e3f562e19d9b4e
|
3315ba96bf388f9a22dc2249daf50c5234b0ada0
|
/man/model_test.Rd
|
bc1bff0d53fe2f30eb377ad99d514af7f776ad79
|
[
"MIT"
] |
permissive
|
cunybpl/bRema
|
ec78adcd99677fd3e7ecc52dd224af07867014fb
|
4d189940c7b8ca4bff4b0f92e38000433d1ddf40
|
refs/heads/master
| 2021-06-21T15:29:22.907408
| 2019-08-22T19:39:49
| 2019-08-22T19:39:49
| 140,751,149
| 1
| 0
|
MIT
| 2019-06-06T17:31:47
| 2018-07-12T18:34:24
|
R
|
UTF-8
|
R
| false
| true
| 1,242
|
rd
|
model_test.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modeller.R
\name{model_test}
\alias{model_test}
\title{Tests if a model passes tTest, population test, and shape test}
\usage{
model_test(x, y, bestvalue, model, n = 4)
}
\arguments{
\item{x}{A vector. Independent variables.}
\item{y}{A vector. Dependent variables.}
\item{bestvalue}{A list containing information about parameters such as slopes, change-points, and stats such as RMSE.}
\item{model}{A character string. Model such as '2P', '3PH', '3PC', '4P' or '5P'.}
\item{n}{A numeric value that determines threshold for population test: \code{thereshold = number_of_independent_variables/n}. Defaults to 4. See \code{\link{pop_test}}.}
}
\description{
This function determines if a given model passes tTest, population test, and shape test. The output is a nested list. If the model passes all three tests, 'Pass' is returned as \code{main_test} result; otherwise, as 'Fail'.
}
\examples{
util = subset(unretrofit_utility, unretrofit_utility$bdbid == 'f3acce86'
& unretrofit_utility$energy_type == 'Elec')
temp = sort_matrix(util$OAT,util$usage)
bestvalue = create_model(temp$x, temp$y, '5P')
test_result = model_test(temp$x, temp$y, bestvalue, '5P')
}
|
13adee5f9bfb595583f20b387a00af454bf23a3d
|
e9e5a348573f0099d8a6c03ab90ca93d7e6df9ca
|
/bLogistic.R
|
47a319974a7cc19c63c5ee08fa153987b78a9add
|
[] |
no_license
|
nxskok/stad29-notes
|
a39f73502e18f92b12024a910a3e4f83b3929c15
|
a8a887e621b84fdadb974bf50c384ba65d2a8383
|
refs/heads/master
| 2021-06-08T11:21:53.709889
| 2021-04-26T23:02:15
| 2021-04-26T23:02:15
| 161,848,845
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,847
|
r
|
bLogistic.R
|
### R code from vignette source '/home/ken/teaching/d29/notes/bLogistic.Rnw'
###################################################
### code chunk number 1: bLogistic.Rnw:41-45
###################################################
rats=read.table("rat.txt",header=T)
rats
attach(rats)
rats.1=glm(status~dose,family="binomial")
###################################################
### code chunk number 2: bLogistic.Rnw:62-63
###################################################
summary(rats.1)
###################################################
### code chunk number 3: bLogistic.Rnw:85-87
###################################################
p=predict(rats.1,type="response")
p=predict(rats.1)
cbind(rats,p)
# separation
rats$status=factor(c("lived","lived","died","died","died","died"))
rats
str(rats)
rats.1x=glm(status~dose,family="binomial",data=rats)
summary(rats.1x)
p=predict(rats.1x,type="response")
cbind(rats,p)
###################################################
### code chunk number 4: bLogistic.Rnw:123-129
###################################################
detach(rats)
rat2=read.table("rat2.txt",header=T)
rat2
attach(rat2)
response=cbind(lived,died)
response2=cbind(died,lived)
response
rat2.1=glm(response~dose,family="binomial")
summary(rat2.1)
###################################################
### code chunk number 5: bLogistic.Rnw:145-146
###################################################
summary(rat2.1)
###################################################
### code chunk number 6: bLogistic.Rnw:156-158
###################################################
pred=predict(rat2.1,type="response")
pred
cbind(rat2,pred)
###################################################
### code chunk number 7: bLogistic.Rnw:201-207
###################################################
detach(rat2)
sepsis=read.table("sepsis.txt",header=T)
sepsis
dim(sepsis)
str(sepsis)
head(sepsis)
attach(sepsis)
sepsis.1=glm(death~shock+malnut+alcohol+age+
bowelinf,family="binomial")
summary(sepsis.1)
###################################################
### code chunk number 8: bLogistic.Rnw:216-217
###################################################
summary(sepsis.1)$coefficients
###################################################
### code chunk number 9: bLogistic.Rnw:232-235
###################################################
sepsis.2=update(sepsis.1,.~.-malnut)
sepsis.2=glm(death~shock+alcohol+age+
bowelinf,family="binomial")
summary(sepsis.2)
detach(sepsis)
###################################################
### code chunk number 10: bLogistic.Rnw:268-271
###################################################
sepsis.pred=predict(sepsis.2,type="response")
myrows=c(4,1,2,11,32)
cbind(sepsis[myrows,],p=sepsis.pred[myrows])
###################################################
### code chunk number 11: seppo (eval = FALSE)
###################################################
## r=residuals(sepsis.2)
## plot(r~age)
###################################################
### code chunk number 12: virtusentella
###################################################
r=residuals(sepsis.2)
plot(r~age)
###################################################
### code chunk number 13: bLogistic.Rnw:362-364
###################################################
cc=exp(coef(sepsis.2)[-1])
round(cc,2)
###################################################
### code chunk number 14: bLogistic.Rnw:375-376
###################################################
detach(sepsis)
###################################################
### code chunk number 15: bLogistic.Rnw:395-397
###################################################
(od1=0.02/0.98)
(od2=0.01/0.99)
###################################################
### code chunk number 16: bLogistic.Rnw:402-403
###################################################
od1/od2 # very close to 2
###################################################
### code chunk number 17: bLogistic.Rnw:456-458
###################################################
freqs=read.table("miners-tab.txt",header=T)
freqs
###################################################
### code chunk number 18: bLogistic.Rnw:470-473
###################################################
total=apply(freqs[,-1],1,sum)
total
obsprop=freqs[,-1]/total
obsprop
cbind(exposure=freqs[,1],obsprop)
###################################################
### code chunk number 19: bLogistic.Rnw:486-488
###################################################
prop.table(freqs[-1],1) # error
m=as.matrix(freqs[,-1])
prop.table(m,1) # 1 for rows, like apply
###################################################
### code chunk number 20: bLogistic.Rnw:525-533
###################################################
freqs
ex=freqs[,1] # exposures
sev=c("None","Moderate","Severe") # severities
names(freqs)
sev=names(freqs)[-1]
sev
obsprop
plot(ex,obsprop[,1],type="n",xlab="Exposure",
ylab="Observed proportion", ylim=c(0,1))
lines(ex,obsprop[,1],type="b",col=1,pch=1)
lines(ex,obsprop[,2],type="b",col=2,pch=2)
lines(ex,obsprop[,3],type="b",col=3,pch=3)
legend("topright",sev,col=1:3,pch=1:3)
###################################################
### code chunk number 21: bLogistic.Rnw:543-544
###################################################
freqs
###################################################
### code chunk number 22: dartmiff
###################################################
#library(dplyr)
library(tidyr)
suppressMessages(library(dplyr))
###################################################
### code chunk number 23: kingswear
###################################################
freqs
library(dplyr)
library(tidyr)
miners=freqs %>% gather(severity,frequency,None:Severe)
head(miners,n=10)
str(miners)
factor(miners$severity)
severity.ordered=ordered(miners$severity,c("None","Moderate","Severe"))
severity.ordered
###################################################
### code chunk number 24: bLogistic.Rnw:628-630
###################################################
library(MASS)
miners.1=polr(severity.ordered~Exposure,weights=frequency,data=miners)
###################################################
### code chunk number 25: bLogistic.Rnw:638-639
###################################################
summary(miners.1)
miners
###################################################
### code chunk number 26: bLogistic.Rnw:650-652
###################################################
miners.0=polr(severity~1,weights=frequency,data=miners)
anova(miners.0,miners.1)
###################################################
### code chunk number 27: bLogistic.Rnw:668-671
###################################################
freqs$Exposure
miners.new=data.frame(Exposure=freqs$Exposure)
miners.new
p=predict(miners.1,miners.new,type="p")
p
cbind(miners.new,p)
###################################################
### code chunk number 28: bLogistic.Rnw:680-682 (eval = FALSE)
###################################################
## plot(ex,obsprop[,1],type="n",ylim=c(0,1),
## xlab="Exposure",ylab="Probability")
###################################################
### code chunk number 29: bLogistic.Rnw:688-691 (eval = FALSE)
###################################################
## points(ex,obsprop[,1],col=1,pch=1)
## points(ex,obsprop[,2],col=2,pch=2)
## points(ex,obsprop[,3],col=3,pch=3)
###################################################
### code chunk number 30: bLogistic.Rnw:697-700 (eval = FALSE)
###################################################
## lines(ex,p[,1],col=1)
## lines(ex,p[,2],col=2)
## lines(ex,p[,3],col=3)
###################################################
### code chunk number 31: bLogistic.Rnw:706-707 (eval = FALSE)
###################################################
## legend("topright",sev,col=1:3,pch=1:3)
###################################################
### code chunk number 32: bLogistic.Rnw:733-742
###################################################
plot(ex,obsprop[,1],type="n",ylim=c(0,1),
xlab="Exposure",ylab="Probability")
points(ex,obsprop[,1],col=1,pch=1)
points(ex,obsprop[,2],col=2,pch=2)
points(ex,obsprop[,3],col=3,pch=3)
lines(ex,p[,1],col=1)
lines(ex,p[,2],col=2)
lines(ex,p[,3],col=3)
legend("topright",sev,col=1:3,pch=1:3)
###################################################
### code chunk number 33: bLogistic.Rnw:764-766
###################################################
brandpref=read.csv("mlogit.csv",header=T)
head(brandpref)
##################################################
### code chunk number 34: bLogistic.Rnw:780-782
###################################################
attach(brandpref)
class(sex)
class(factor(sex))
brandpref$sex=factor(brandpref$sex)
brandpref$brand=factor(brandpref$brand)
###################################################
### code chunk number 35: bLogistic.Rnw:790-792
###################################################
library(nnet)
brands.both=multinom(brand~age+sex,data=brandpref)
###################################################
### code chunk number 36: bLogistic.Rnw:802-804
###################################################
brands.age=multinom(brand~age,data=brandpref)
brands.sex=multinom(brand~sex,data=brandpref)
###################################################
### code chunk number 37: bLogistic.Rnw:815-817
###################################################
anova(brands.age,brands.both)
anova(brands.sex,brands.both)
brands.int=update(brands.both,.~.+sex*age)
anova(brands.both,brands.int) # not significant
###################################################
### code chunk number 38: bLogistic.Rnw:836-839
###################################################
summary(brandpref)
new=expand.grid(age=c(24,28,32,35,38),sex=factor(0:1))
p=predict(brands.both,new,type="probs")
cbind(new,p)
###################################################
### code chunk number 39: bLogistic.Rnw:860-863 (eval = FALSE)
###################################################
## plot(new$age,p[,1],type="n",xlab="age",
## ylab="predicted probability")
## mycol=ifelse(new$sex==1,"red","blue")
###################################################
### code chunk number 40: bLogistic.Rnw:867-871 (eval = FALSE)
###################################################
## for (i in 1:3)
## {
## text(new$age,p[,i],i,col=mycol)
## }
###################################################
### code chunk number 41: bLogistic.Rnw:875-877 (eval = FALSE)
###################################################
## legend("topright",legend=levels(new$sex),
## fill=c("blue","red"))
###################################################
### code chunk number 42: bLogistic.Rnw:900-907
###################################################
plot(new$age,p[,1],type="n",xlab="age",ylab="predicted probability")
mycol=ifelse(new$sex==1,"red","blue")
for (i in 1:3)
{
text(new$age,p[,i],i,col=mycol)
}
legend("topright",legend=levels(new$sex),fill=c("blue","red"))
###################################################
### code chunk number 43: bLogistic.Rnw:954-957
###################################################
attach(brandpref)
tb=table(brand,age,sex)
tb
###################################################
### code chunk number 44: bLogistic.Rnw:972-975
###################################################
b=as.data.frame(tb)
b
b[21:30,]
detach(brandpref)
###################################################
### code chunk number 45: bLogistic.Rnw:993-997
###################################################
b$sex=factor(b$sex)
b$brand=factor(b$brand)
b.both=multinom(brand~age+sex,data=b,weights=Freq)
b.age=multinom(brand~age,data=b,weights=Freq)
###################################################
### code chunk number 46: bLogistic.Rnw:1010-1011
###################################################
anova(b.age,b.both)
str(b)
str(brandpref)
|
9198a5bb7067dde5bf049ce5505289c44a473c3f
|
b72f90ef0cde82cf132c45525cc7eab7877f23cb
|
/R/layer.R
|
8bea5c5a5bc396547b4330e9b47826385fd843bc
|
[] |
no_license
|
zhaoleibupt/REcharts3
|
4ce8eb3b3372d328081d8ecfa39c0572f1f100a1
|
84c058cb1a8ba3acddc2457f8caf74a5e8c33f0a
|
refs/heads/master
| 2021-01-20T08:08:28.489367
| 2016-07-15T03:35:58
| 2016-07-15T03:35:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,103
|
r
|
layer.R
|
# type = 'heatmap'
# ncol = NULL; nrow = NULL; facets.fontSize = 14; facets.top = 6;
# label = NULL; label.show = F; label.position = 'inside';
# opacity = 0.7; symbolSize = 'formatFunction_symbolSize'
# tooltip.show = T;
# type = 'bar'; stack = F; color = .plotColor;
# title = NULL; title.fontSize = 18; title.top = 0; title.left = 'left';
# subtext = NULL; subTitle.fontSize = 14;subTitle.color = '#888',
# legend = NULL; legend.show = T; legend.orient = c('horizontal', 'vertical');
# legend.left = 'center'; legend.top = '6%';
# legend.right = NULL; legend.bottom = NULL; legend.width = NULL; legend.height = NULL;
# grid.left = NULL; grid.top = NULL; grid.right = NULL; grid.bottom = NULL; grid.margin.x = 5; grid.margin.y = 5;
# yAxis.max = NULL;
# xAxis.inverse = F; axisLabel.interval.x = NULL; axisLabel.interval.y = NULL;
# toolbox.show = F; dataZoom.show = T; dataView.show = T; dataView.readOnly = T; restore.show = T; saveAsImage.show = T;
# width = NULL; height = NULL;
# chart.radius = '70%'; chart.position = c('50%', '55%')
# draggable = T;repulsion = 200;gravity = 0.1;edgeLength = 50;layoutAnimation = F;focusNodeAdjacency = F
setLayer = function(dataList, type = 'bar',
...,
stack = F, color = .plotColor, opacity = 1, symbolSize = 10,
ncol = NULL, nrow = NULL, facets.fontSize = 14, facets.top = 6,
label = NULL, label.show = F, label.position = 'inside',
tooltip.show = T,
title = NULL, title.fontSize = 18, title.top = 0, title.left = 'left',
subTitle = NULL, subTitle.fontSize = 14, subTitle.color = '#888',
legend = NULL, legend.show = T, legend.orient = c('horizontal', 'vertical'),legend.left = 'center', legend.top = '5.5%',
legend.right = NULL, legend.bottom = NULL, legend.width = NULL, legend.height = NULL,
grid.left = NULL, grid.top = NULL, grid.right = NULL, grid.bottom = NULL, grid.margin.x = 5, grid.margin.y = 5,
xAxis.min = NULL, xAxis.max = NULL, xAxis.inverse = F,
yAxis.min = NULL, yAxis.max = NULL, yAxis.inverse = F,
axisLabel.interval.x = NULL, axisLabel.interval.y = NULL,
toolbox.show = F, dataZoom.show = T, dataView.show = T, dataView.readOnly = T, restore.show = T, saveAsImage.show = T,
width = NULL, height = NULL){
optionList = list()
# seriesSet
optionList$series = .setSeries(dataList,
type = type, color = color, stack = stack,
label.show = label.show, label.position = label.position,
opacity = opacity, symbolSize = symbolSize,
...)
# legendSet
if(type != 'heatmap'){
legendData = if(type != 'pie') dataList@seriesName else dataList@xLevelsName
optionList$legend = .legendSet(data = legendData,
legend.show = legend.show,
legend.left = legend.left, legend.top = legend.top,
legend.right = legend.right, legend.bottom = legend.bottom,
legend.width = legend.width, legend.height = legend.height,
legend.orient = legend.orient[1])
}
# gridSet
if(is.null(grid.top)){
if(!'facets' %in% dataList@var) grid.top = 10 else grid.top = 16
}
if(!'facets' %in% dataList@var & is.null(grid.left) & is.null(grid.top) & is.null(grid.right) & is.null(grid.bottom)){
gridSet = NULL
} else if('facets' %in% dataList@var){
gridSet = .gridSet_facets(length(dataList@facetsName), ncol = ncol, nrow = nrow,
grid.left = grid.left, grid.top = grid.top,
grid.right = grid.right, grid.bottom = grid.bottom,
grid.margin.x = grid.margin.x, grid.margin.y = grid.margin.y)
} else {
gridSet = .gridSet(grid.left = grid.left, grid.top = grid.top,
grid.right = grid.right, grid.bottom = grid.bottom)
}
optionList$grid = gridSet
# titleSet
if(!is.null(title)){
optionList$title = list(list(text = title, fontSize = title.fontSize,
top = title.top, left = title.left,
subtext = subTitle,
subtextStyle = list(fontSize = subTitle.fontSize, color = subTitle.color)
))
}
if('facets' %in% dataList@var){
g = attr(gridSet, 'grid')
addTitle = mapply(function(ir, it, x){ # ir = i.grid[1 ,1]; it = i.grid[1 ,2]
o = list(left = ir, top = it, text = x, fontSize = facets.fontSize)
o[1:2] = lapply(o[1:2], paste0, '%')
o
}, g[ ,1], g[ ,2] - facets.top, as.list(dataList@facetsName), SIMPLIFY = F, USE.NAMES = F)
optionList$title = c(optionList$title, addTitle)
}
# Axis
if(type %in% c('bar', 'his', 'line', 'scatter', 'heatmap')){
optionList$xAxis = list()
optionList$yAxis = list()
for(i in 1:length(dataList@ facetsName)){
if(i < 1) next
optionList$xAxis[[i]] = list(gridIndex = i - 1,
min =xAxis.min, max = xAxis.max, inverse = xAxis.inverse,
axisLabel = list(interval = axisLabel.interval.x),
inverse = xAxis.inverse)
if(type %in% c('line', 'bar', 'his', 'heatmap')) optionList$xAxis[[i]]$data = dataList@xLevelsName
optionList$yAxis[[i]] = list(gridIndex = i - 1,
min = yAxis.min, max = yAxis.max, inverse = yAxis.inverse,
axisLabel = list(interval = axisLabel.interval.y),
max = yAxis.max)
if(type %in% c('heatmap')) optionList$yAxis[[i]]$data = dataList@yLevelsName
}
names(optionList$xAxis) = NULL
names(optionList$yAxis) = NULL
}
optionList$tooltip = list(show = tooltip.show, formatter = 'formatFunction_tooltip')
optionList$toolbox = .toolboxSet(toolbox.show = toolbox.show,
dataZoom.show = dataZoom.show,
dataView.show = dataView.show, dataView.readOnly = dataView.readOnly,
restore.show = restore.show,
saveAsImage.show = saveAsImage.show)
p = new("REcharts3")
p@id = paste('ID', format(Sys.time(), "%y%m%d%H%M%S"), substring(runif(1), 3, 5), type, sep = '_')
p@id = gsub('\\..*', '', p@id)
p@type = type
p@option = rmNULL(optionList)
p@xLevelsName = dataList@xLevelsName
p@yLevelsName = dataList@yLevelsName
p@seriesName = dataList@seriesName
p@facetsName = dataList@facetsName
p@plotOption = list(width = ifelse(!is.null(width), width, 0),
height = ifelse(!is.null(height), height, 0))
if(type %in% c('line', 'bar', 'his', 'graph', 'heatmap')){
p@formatFunction_label = 'function(params){return params.data.label}'
p@formatFunction_tooltip = 'function(params){return params.name + \':<br>\' + params.seriesName + \' : \' + params.data.label}'
} else if(type == 'pie'){
p@formatFunction_label = '\\"{b}: {c} ({d}%)\\"'
p@formatFunction_tooltip = '\\"{a} <br/>{b}: {c} ({d}%)\\"'
} else if(type %in% c('scatter', 'lines')){
p@formatFunction_label = 'function(params){return params.data.label}'
p@formatFunction_tooltip = 'function(params){return params.seriesName + \' : \' + params.data.label}'
p@formatFunction_symbolSize = 'function (data){ return data[2]; }'
}
p
}
# p = setLayer(dat1, feed, weight, label = round(weight, 1))
# p = bar(dat2, wool, breaks, tension, label = breaks*10)
bar = function(dat, x, y, z = NULL, facets = NULL, label = NULL,
label.show = F, barGap = '10%', legend.left = 'center', ...){
expr = match.call()
expr[[1]] = as.name('.dataParse')
parList = as.list(expr[-1])
dat = eval(expr, parent.frame())
dataList = .dataList(dat, type = 'bar')
if(!is.null(expr$label) & is.null(expr$label)) label.show = T
p = setLayer(dataList, type = 'bar', xAxis.inverse = T,
label.show = label.show, barGap = barGap, legend.left = legend.left, ...)
coord_rotate(p)
}
his = function(dat, x, y, z = NULL, facets = NULL, label = NULL,
label.show = F, barGap = '10%', legend.left = 'center', ...){
expr = match.call()
expr[[1]] = as.name('.dataParse')
parList = as.list(expr[-1])
dat = eval(expr, parent.frame())
dataList = .dataList(dat, type = 'bar')
if(!is.null(expr$label) & is.null(expr$label)) label.show = T
p = setLayer(dataList, type = 'bar', label.show = label.show, barGap = barGap, legend.left = legend.left, ...)
p
}
line = function(dat, x, y, z = NULL, facets = NULL, label = NULL,
label.show = F, legend.left = 'center', ...){
expr = match.call()
expr[[1]] = as.name('.dataParse')
parList = as.list(expr[-1])
dat = eval(expr, parent.frame())
dataList = .dataList(dat, type = 'line')
if(!is.null(expr$label) & is.null(expr$label)) label.show = T
p = setLayer(dataList, type = 'line', label.show = label.show, legend.left = legend.left, ...)
p
}
scatter = function(dat, x, y, z = NULL, facets = NULL, label = NULL, size = NULL,
label.show = F, legend.left = 'center', opacity = 0.7, ...){
expr = match.call()
expr[[1]] = as.name('.dataParse')
expr[['type']] = 'scatter'
parList = as.list(expr[-1])
dat = eval(expr, parent.frame())
dataList = .dataList(dat, type = 'scatter')
if(!is.null(expr$label) & is.null(expr$label)) label.show = T
p = setLayer(dataList, type = 'scatter',
label.show = label.show, legend.left = legend.left,
opacity = opacity, ...)
p
}
pie = function(dat, x, y, facets = NULL, label = NULL,
label.show = T, label.position = 'outside',
chart.radius = '70%', chart.position = c('50%', '55%'),
...){
expr = match.call()
expr[[1]] = as.name('.dataParse')
parList = as.list(expr[-1])
dat = eval(expr, parent.frame())
dataList = .dataList(dat, type = 'pie')
# if(!is.null(expr$label) & is.null(expr$label)) label.show = T
p = setLayer(dataList, type = 'pie',
label.show = label.show,
label.position = label.position,
radius = chart.radius,
center = chart.position,
...)
p
}
donut = function(dat, x, y, facets = NULL, label = NULL,
label.show = T, label.position = 'outside',
chart.radius = c('40%', '60%'), chart.position = c('50%', '55%'),
...){
expr = match.call()
expr[[1]] = as.name('.dataParse')
parList = as.list(expr[-1])
dat = eval(expr, parent.frame())
dataList = .dataList(dat, type = 'pie')
# if(!is.null(expr$label) & is.null(expr$label)) label.show = T
p = setLayer(dataList, type = 'pie',
label.show = label.show,
label.position = label.position,
radius = chart.radius,
center = chart.position,
...)
p
}
force = function(dat, x, y, z = NULL, facets = NULL, label = NULL,
draggable = T, repulsion = 200, gravity = 0.1, edgeLength = 50, layoutAnimation = T,
focusNodeAdjacency = F,
...){
expr = match.call()
expr[[1]] = as.name('.dataParse')
parList = as.list(expr[-1])
parList[['type']] = 'graph'
dat = eval(expr, parent.frame())
dataList = .dataList(dat, type = 'graph')
# if(!is.null(expr$label) & is.null(expr$label)) label.show = T
p = setLayer(dataList, type = 'graph', layout = 'force',
draggable = draggable, focusNodeAdjacency = focusNodeAdjacency,
force = list(repulsion = repulsion, gravity = gravity,
edgeLength = edgeLength, layoutAnimation = layoutAnimation),
...)
p
}
mapLines = function(dat, x, y, z = NULL, label = NULL,
center = NULL, zoom = 14, line.width = 0.1,
label.show = F, legend.left = 'center', ...){
expr = match.call()
expr[[1]] = as.name('.dataParse')
parList = as.list(expr[-1])
dat = eval(expr, parent.frame())
dataList = .dataList(dat, type = 'lines')
if(!is.null(expr$label) & is.null(expr$label)) label.show = T
p = setLayer(dataList, type = 'lines', label.show = label.show, legend.left = legend.left,
...,
coordinateSystem = 'bmap', polyline = T, lineStyle = list(width = line.width))
p@option$bmap = .setBmap(center, zoom)
p
}
markScatter = function(p, dat, x, y, z, color = .plotColor[1]){
expr = match.call()
expr[[1]] = as.name('.dataParse')
parList = as.list(expr[-1])
dat = eval(expr, parent.frame())
if(is.null(dat$z)) dat$z = NA
toList_markScatter = function(d)(
mapply(function(x, y, z){
list(name = z, value = c(x, y))
},
d$x, d$y, d$z,
SIMPLIFY = F, USE.NAMES = F)
)
m = length(p@option$series) + 1
p@option$series[[m]] = list(
type = 'scatter',
coordinateSystem = 'bmap',
data = toList_markScatter(dat),
label = list(
normal = list(show = T, position = 'inside', formatter = '{b}')
),
itemStyle = list(normal = list(color = color, size = 10))
)
p
}
markPoint = function(p, dat, x, y, z, color = .plotColor[1], seriesIndex = 1){
expr = match.call()
expr[[1]] = as.name('.dataParse')
parList = as.list(expr[-1])
dat = eval(expr, parent.frame())
if(is.null(dat$z)) dat$z = NA
toList_markPoint = function(d)(
mapply(function(x, y, z){
list(name = z, coord = c(x, y))
},
d$x, d$y, d$z,
SIMPLIFY = F, USE.NAMES = F)
)
p@option$series[[seriesIndex]]$markPoint = list(
data = toList_markPoint(dat),
label = list(
normal = list(show = T, position = 'inside', formatter = '{b}')
),
itemStyle = list(normal = list(color = color))
)
p
}
markAxisLine = function(p, dat, x, y, type = 'xAxis',
color = .plotColor[1], seriesIndex = 1){
expr = match.call()
expr[[1]] = as.name('.dataParse')
parList = as.list(expr[-1])
dat = eval(expr, parent.frame())
if(is.null(dat$z)) dat$z = NA
if(type == 'xAxis'){
ds = mapply(function(x, y){
list(xAxis = x, label = y)
}, dat$x, dat$y, SIMPLIFY = F, USE.NAMES = F)
} else {
ds = mapply(function(x, y){
list(yAxis = x, label = y)
}, dat$x, dat$y, SIMPLIFY = F, USE.NAMES = F)
}
markLine = list(data = ds,
lineStyle = list(normal = list(color = color)),
label = list(normal = list(formatter = 'formatFunction_label')))
p@option$series[[seriesIndex]]$markLine = markLine
p
}
|
2951d2ad4c011dc4d8bea0c5f7b1cdfa969237b1
|
8fd26f923edc0ccb31fcd0dc109c23a059ec0886
|
/R/sensitivitypuc.R
|
66107a27a4d46e424a1789158dba83f2df646c40
|
[] |
no_license
|
matteobonvini/sensitivitypuc
|
37b290662ed8e9edb6c9b120c900f53af7e5a6ad
|
edfd5452a6bf8fef650cc1a8ddc9f7167f2c28b3
|
refs/heads/master
| 2021-06-26T20:47:00.438209
| 2020-12-03T10:27:45
| 2020-12-03T10:27:45
| 182,876,912
| 5
| 3
| null | 2019-12-06T12:44:29
| 2019-04-22T22:22:44
|
R
|
UTF-8
|
R
| false
| false
| 164
|
r
|
sensitivitypuc.R
|
#' echseffects: A package to perform sensitivity analysis via the proportion of
#' unmeasured confounding.
#'
#' @docType package
#'
#' @name sensAteBounds
NULL
|
157e43ff8e709e584b2dcfe8fc225899f6013c92
|
0c668165f5b60c1545cff3636cc9dfe7a020bc0f
|
/bma.R
|
2044c23118062d4d282dfd764c850df2c7377804
|
[
"MIT"
] |
permissive
|
wethepeopleonline/r-scripts
|
4253433c349caee7b2c7d6acc8365a68000375c7
|
dc20093c43f40f2babddfab9f272ef87053ad761
|
refs/heads/master
| 2020-05-07T17:14:36.470032
| 2017-09-25T11:43:53
| 2017-09-25T11:43:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,329
|
r
|
bma.R
|
#
#
############### Bayesian Model Averaging examples ###############
#
# This script assumes you have worked through all the previous notes from
# the web page and you have downloaded, installed, and updated all available
# R packages.
### Upload the SAS SEM Example data ("SEMData.sav") which is SIMULATION DATA.
library(foreign)
exsem <- read.spss("http://www.unt.edu/rss/class/Jon/R_SC/Module10/SEMData.sav", use.value.labels=TRUE,
max.value.labels=Inf, to.data.frame=TRUE)
summary(exsem)
head(exsem)
cor(exsem)
# The goal of the this example is to choose the best set of predictor variables for a linear (OLS) prediction
# model with Extroversion (extro) as the outcome.
# Basic linear (OLS) regression model.
reg.1 <- lm(extro~abstruse+agree+block+cognitive+common+cultural+open+physical+series+sets+social+vocab, data=exsem)
summary(reg.1)
# The BMA (Bayesian Model Averaging) package/library was designed specifically to use Bayesian Model
# Averaging to address the variable selection problem in the context of several types of models (e.g.
# GLM, LM, and Survival Models.
library(BMA)
# The function for conducting BMA with respect to linear regression is 'bicreg'.
# The 'bicreg' function requires a matrix of predictor variables as input.
attach(exsem)
predictors <- as.matrix(cbind(open, agree, social, cognitive, physical, cultural, vocab, abstruse,
block, common, sets, series))
detach(exsem)
# Conduct the BMA using the 'bicreg' function by submitting the matrix of predictors (predictors)
# and the outcome variable (exsem$extro).
bma1 <- bicreg(predictors, exsem$extro)
summary(bma1)
# Based on the first column of the output we can see that "open", "agree", and "series" are the most
# important variables; the column 'p!=0' indicates the percentage/probability that the coefficient
# for a given predictor is NOT zero. We can also see that the first model 'model 1' (which includes
# only 'open', 'agree', & 'series') is the best because it has the lowest BIC and the largest
# posterior probability.
# The 'ols' part of the output (not printed by default) gives a matrix, with each model as a row and
# each predictor variable as a column; listing the estimated (OLS) coefficient for each variable in
# a given model.
bma1$ols
# Likewise, the 'se' part of the output produces a similar matrix, with the standard errors for each
# coefficient (for each variable/model combination).
bma1$se
# The 'postmean' part of the output (not printed by default) contains the average posterior coefficient
# for each predictor. The 'postsd' provides the standard deviation of each average posterior coefficient.
bma1$postmean
bma1$postsd
# The 'which' part of the output (not provided by default) contains a matrix, with each model as a row and
# each predictor variable as a column; listing whether a variable was included in each model.
bma1$which
# The BMA package also contains a plot function for displaying the posterior distributions of the
# coefficients.
plot(bma1)
# For a complete description of the 'bicreg' function:
help(bicreg)
# Bayesian model averaging can also be conducted when attempting to identify the best set of predictors
# for a Generalized Linear Model. The 'bic.glm' function is very similar to 'bicreg'; you must
# supply a matrix or data frame of the independent variables (predictors) and the outcome variable.
# Results of the following model mirror those above. However, the obvious benefit of the 'bic.glm' function
# is the ability to specify non-normal error distributions (i.e. non-Gaussian; e.g. binomial).
bma2 <- bic.glm(predictors, exsem$extro, glm.family = "gaussian")
summary(bma2)
# Notice that when specifying "Gaussian" the estimation of the posterior standard
# deviations is slightly off; therefore, it is best to use 'bicreg' when family =
# "Gaussian".
bma1$postsd
bma2$postsd
plot(bma2)
# For a complete description of the 'bic.glm' function and its arguments:
help(bic.glm)
### Example of Binomial Logistic Regression using 'bic.glm'.
# Read in the data:
logreg <- read.table("http://www.unt.edu/rss/class/Jon/R_SC/Module9/logreg1.txt",
header=TRUE, sep="", na.strings="NA", dec=".", strip.white=TRUE)
summary(logreg)
# Create a matrix of the predictor variables.
attach(logreg)
predictors.logist <- as.matrix(cbind(x1,x2,x3,x4))
detach(logreg)
# Run the 'bic.glm' function specifying the binomial family.
bma2 <- bic.glm(predictors.logist, logreg$y, glm.family = "binomial")
summary(bma2)
plot(bma2)
### Example of Multinomial Logistic Regression using library 'mlogitBMA' and function 'bic.mlogit'.
library(mlogitBMA)
# Read in the data from the web (data is an SPSS.sav file, so the 'foreign' package is necessary).
library(foreign)
mdata1 <-
read.spss("http://www.unt.edu/rss/class/Jon/R_SC/Module9/MultiNomReg.sav",
use.value.labels=TRUE, max.value.labels=Inf, to.data.frame=TRUE)
summary(mdata1)
# Apply the 'bic.logit' function; supplying the formula in standard format, choices represent
# the choices on the outcome variable (i.e. the categories of the outcome).
mlog.1 <- bic.mlogit(y ~ x1 + x2 + X3, data = mdata1, choices = 1:3)
summary(mlog.1)
# To see all the arguments of the 'bic.mlogit' function
help(bic.mlogit)
|
65b3f4f7c557ff40ad09d610aaac231048a862c1
|
a71b66c5093942f7c7bae59dac35dc13ab33b0c5
|
/utility_functions.R
|
3fce5d6b3dd097701c739484d72a4e40b5d5850a
|
[] |
no_license
|
bertcarnell/bracketr
|
2d2dc312c3961c87fa3d742326f3c18a84369392
|
41fde1010618414a69fe0b730f6c009eaa535a6c
|
refs/heads/main
| 2023-04-27T05:50:43.776737
| 2021-05-16T03:23:17
| 2021-05-16T03:23:17
| 358,048,857
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,186
|
r
|
utility_functions.R
|
drawBracket <- function(structureList, P, winner = NA)
{
X <- matrix(NA, nrow = nrow(P), ncol = ncol(P))
for (i in seq_along(structureList))
{
ind <- structureList[[i]]
if (ind[1,2] == ncol(X))
{
if (is.na(winner))
{
X[ind] <- rmultinom(1, size = 1, prob = P[ind])
} else
{
X[ ,ncol(P)] <- 0
X[winner, ncol(P)] <- 1
}
} else
{
current_column <- ind[1,2]
higher_column <- current_column + 1
higher_ind <- ind
higher_ind[,2] <- higher_column
if (any(X[higher_ind] == 1))
{
X[ind] <- X[higher_ind]
} else
{
X[ind] <- rmultinom(1, size = 1, prob = P[ind])
}
}
}
return(X)
}
drawMaxLikelihoodBracket <- function(structureList, P, winner=NA)
{
X <- matrix(NA, nrow = nrow(P), ncol = ncol(P))
for (i in seq_along(structureList))
{
ind <- structureList[[i]]
if (ind[1,2] == ncol(X))
{
if (is.na(winner))
{
ind2 <- which.max(P[ind])
temp <- rep(0, length=nrow(ind))
temp[ind2] <- 1
X[ind] <- temp
} else
{
X[,ncol(X)] <- 0
X[winner,ncol(X)] <- 1
}
} else
{
current_column <- ind[1,2]
higher_column <- current_column + 1
higher_ind <- ind
higher_ind[,2] <- higher_column
if (any(X[higher_ind] == 1))
{
X[ind] <- X[higher_ind]
} else
{
ind2 <- which.max(P[ind])
temp <- rep(0, length=nrow(ind))
temp[ind2] <- 1
X[ind] <- temp
}
}
}
return(X)
}
drawEquiprobableBracket <- function(n_teams, n_rounds, structureList)
{
M <- matrix(NA, nrow = n_teams, ncol = n_rounds)
M[,n_rounds] <- rep(1/n_teams, n_teams)
M[,5] <- rep(2/n_teams, n_teams)
M[,4] <- rep(4/n_teams, n_teams)
for (i in 1:n_rounds)
{
M[,i] <- rep(2^(n_rounds-i)/n_teams, n_teams)
}
return(drawBracket(structureList, M))
}
scoreBracket <- function(M, Mtruth)
{
# ESPN tracks March Madness brackets using a 10-20-40-80-160-320 scoring system.
nc <- ncol(M)
sum(apply(M*Mtruth, 2, sum)*(10*2^(0:(nc-1))))
}
# method 1 swaps rows from a game
# method 2 draws after a game is swapped
permuteBracket <- function(M, method = 1)
{
#set.seed(193939)
#M <- X[[1]]
#n_games <- 63
#game_num <- 62
# change the outcome of one of the games
n_games <- nrow(M) - 1
game_num <- sample(1:n_games, size = 1)
ind <- which(Bstruct == game_num, arr.ind = TRUE)
rnd <- ind[1,2]
if (method == 1)
{
if (rnd > 1) # if not the first round
{
ind_back <- ind
ind_back[,2] <- rnd - 1
winner <- which(M[ind] == 1)
loser <- base::setdiff(which(M[ind_back] == 1), winner)
winner <- ind[winner,1]
loser <- ind[loser,1]
} else # if the first round, there are only two options
{
winner <- ind[1,1]
loser <- ind[2,1]
}
temp <- M[winner,]
M[winner,] <- M[loser,]
M[loser,] <- temp
return(M)
} else if (method == 2)
{
if (rnd == 1)
{
#swap
M[ind] <- rev(M[ind])
} else if (rnd > 1)
{
ind_back <- ind
ind_back[,2] <- rnd - 1
winner <- which(M[ind] == 1)
loser <- base::setdiff(which(M[ind_back] == 1), winner)
winner <- ind[winner,1]
loser <- ind[loser,1]
#swap
temp <- M[winner,rnd]
M[winner,rnd] <- M[loser,rnd]
M[loser,rnd] <- temp
}
if (rnd <= 5)
{
for (k in rnd:5)
{
game_num <- floor(game_num / 2)
ind2 <- which(Bstruct == game_num, arr.ind = TRUE)
back_ind2 <- ind2
back_ind2[,2] <- ind2[1,2] - 1
M[ind2] <- rmultinom(1, size = 1, prob = M[back_ind2] * P[ind2])
}
}
return(M)
} else
{
stop("method not implemented")
}
}
if (FALSE) #####################################################################
{
M1 <- X[[1]]
for (i in 1:1000) {
M1 <- permuteBracket(M, method = 1)
checkBracket(structureList, M)
}
for (i in 1:1000){
M1 <- permuteBracket(M, method = 2)
checkBracket(structureList, M)
}
} ##############################################################################
# Yscore is a global variable
# n_truth_brackers is a global variable
probabilityOfWin <- function(Z)
{
Zscore <- sapply(X, function(Mtruth) scoreBracket(Z, Mtruth))
ind <- which(mapply(function(a, b)
{
all(a > b)
}, a = Zscore, b = Yscore))
length(ind) / n_truth_brackets
}
# X is a global draw from the truth distribution
# Y is a global draw from the adversary distribution
# n_truth_brackets is a global variable
# Yscores are a global object
geneticBracketSearch <- function(seedBracket, pRate, nchildren, ngenerations, debug = FALSE, method = 1)
{
cat(paste(Sys.time(), "\n"))
current_best <- seedBracket
children <- vector("list", length = nchildren)
current_best_score <- mean(sapply(X, function(truth_list_element) scoreBracket(seedBracket, truth_list_element)))
current_best_pwin <- probabilityOfWin(current_best)
start_score <- current_best_score
start_pwin <- current_best_pwin
children_scores <- numeric(nchildren)
children_pwin <- numeric(nchildren)
n_since_change <- 0
for (i in 1:ngenerations)
{
# want to cool the permutation rate with generations unless
# there have been to changes for 10 generations, then warm it up again
if (n_since_change < 10)
{
pRate <- (0.5 - pRate) / (ngenerations - 1) * (i - 1) + pRate
} else
{
pRate <- 1.1 * pRate
}
if (debug)
cat(paste("\tGeneration", i, "Poisson Rate", round(pRate, 3), "P(win)",
current_best_pwin, "\n"))
for (j in 1:nchildren)
{
nmutations <- rpois(1, pRate) + 1
children[[j]] <- current_best
for (k in 1:nmutations)
{
children[[j]] <- permuteBracket(children[[j]], method)
}
children_scores[j] <- mean(sapply(X, function(truth_list_element) scoreBracket(children[[j]], truth_list_element)))
children_pwin[j] <- probabilityOfWin(children[[j]])
}
ind <- which.max(children_pwin)
if (children_pwin[ind] > current_best_pwin)
{
current_best_pwin <- children_pwin[ind]
current_best_score <- children_scores[ind]
current_best <- children[[ind]]
n_since_change <- 0
} else
{
n_since_change <- n_since_change + 1
}
}
cat(paste(Sys.time(), "\n"))
return(list(current_best = current_best,
score = current_best_score,
start_score = start_score,
pwin = current_best_pwin,
start_pwin = start_pwin,
scores = scoreTruthSampleList(current_best, X),
cov = correlateTruthSampleList(current_best, X),
covA = correlateAdversarySampleList(current_best, Y, n_truth_brackets)))
}
drawTruthSampleList <- function(P, structureList, n_truth_brackets)
{
return(lapply(1:n_truth_brackets, function(i) drawBracket(structureList, P)))
}
checkTruthSampleList <- function(X, structureList)
{
dummy <- sapply(X, function(M) checkBracket(structureList, M))
}
drawWinnerSampleList <- function(P, structureList, n_truth_brackets, winner)
{
return(lapply(1:n_truth_brackets, function(i) drawBracket(structureList, P, winner)))
}
drawAdversarySampleList <- function(Th, structureList, n_truth_brackets, n_adversaries)
{
Y <- vector("list", length = n_truth_brackets)
Y <- lapply(1:n_truth_brackets, function(i)
{
lapply(1:n_adversaries, function(j)
{
drawBracket(structureList, Th)
})
})
return(Y)
}
checkAdversarySampleList <- function(Y, structureList, n_truth_brackets, n_adversaries)
{
dummy <- lapply(1:n_truth_brackets, function(i)
{
lapply(1:n_adversaries, function(j)
{
checkBracket(structureList, Y[[i]][[j]])
})
})
}
scoreTruthSampleList <- function(Z, X)
{
return(sapply(X, function(Mtruth) scoreBracket(Z, Mtruth)))
}
scoreAdversarySampleList <- function(Y, X, n_truth_brackets)
{
Yscore <- vector("list", n_truth_brackets)
for (i in seq_along(Y))
{
Yscore[[i]] <- sapply(Y[[i]], function(M) scoreBracket(M, X[[i]]))
}
return(Yscore)
}
correlateTruthSampleList <- function(Z, X)
{
return(sapply(X, function(Mtruth) cov(rowSums(Mtruth), rowSums(Z))))
}
correlateAdversarySampleList <- function(Z, Y, n_truth_brackets)
{
return(sapply(Y, function(ML)
{
mean(sapply(ML, function(M) cov(rowSums(M), rowSums(Z))))
}))
}
checkBracket <- function(structureList, M)
{
if (any(is.na(M)) | any(is.na(unlist(structureList))))
{
print("Diagnositcs")
print("")
print(M)
print("")
print(structureList)
}
for (i in seq_along(structureList))
{
ind <- structureList[[i]]
if (abs(sum(M[ind]) - 1) > 1E-6)
{
print("Diagnostics")
print("")
print(structureList[[i]])
print("")
print(M[ind])
print("")
print(sum(M[ind]))
stop("Section does not sum to 1")
}
}
}
normalizeBracket <- function(structureList, M)
{
for (i in seq_along(structureList))
{
ind <- structureList[[i]]
M[ind] <- M[ind] / sum(M[ind])
}
return(M)
}
|
f05ef53bed55f3e764508b5b0ef50c5edfbb10d9
|
5ba74bc0140276f2ac42383107ceb82bcdeb6e5d
|
/man/get_reactome_methods.Rd
|
0f9b8c19291fbae0e30e338f21cebb1761fd521f
|
[] |
no_license
|
reactome/ReactomeGSA
|
01e4bd76134d1daf83dae2cf09b6977d3b9fe806
|
044eb7b1352c6c6893839258d02e6238579cf67d
|
refs/heads/master
| 2023-04-29T04:15:48.445386
| 2023-04-13T13:34:17
| 2023-04-13T13:34:17
| 194,657,526
| 21
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,111
|
rd
|
get_reactome_methods.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_methods.R
\name{get_reactome_methods}
\alias{get_reactome_methods}
\title{get_reactome_methods}
\usage{
get_reactome_methods(
print_methods = TRUE,
print_details = FALSE,
return_result = FALSE,
method = NULL,
reactome_url = NULL
)
}
\arguments{
\item{print_methods}{If set to \code{TRUE} (default) a (relatively) nice formatted version of the result is printed.}
\item{print_details}{If set to \code{TRUE} detailed information about every method, including available parameters and
description are displayed. This does not affect the data returned if \code{return_result} is \code{TRUE}.}
\item{return_result}{If set to \code{TRUE}, the result is returned as a data.frame (see below)}
\item{method}{If set to a method's id, only information for this method will be shown. This is especially useful if
detailed information about a single method should be retrieved. This does not affect the data returned
if \code{return_result} is \code{TRUE}.}
\item{reactome_url}{URL of the Reactome API Server. Overwrites the URL set in the 'reactome_gsa.url' option.
Specific ports can be set using the standard URL specification (for example http://your.service:1234)}
}
\value{
If \code{return_result} is set to \code{TRUE}, a data.frame with one row per method. Each method has a name, description, and
(optional) a list of parameters. Parameters again have a name, type, and description.
}
\description{
Returns all available analysis methods from the Reactome analysis service.
}
\details{
Every method has a type, a scope, and sometimes a list of allowed values. The type (string, int = integer, float) define
the expected data type. The \strong{scope} defines at what level the parameter can be set. \emph{dataset} level parameters
can be set at the dataset level (using the \code{\link{add_dataset}} function) or at the analysis request level (using
\code{\link{set_parameters}}). If these parameters are set at the analysis request level, this overwrites the default
value for all datasets. \emph{analysis} and \emph{global} level parameters must only be set at the analysis request level
using \code{\link{set_parameters}}. The difference between these two types of parameters is that while \emph{analysis}
parameters influence the results, \emph{global} parameters only influence the behaviour of the analysis system (for example
whether a Reactome visualization is created).
}
\examples{
# retrieve the available methods only in an object
available_methods <- get_reactome_methods(print_methods = FALSE, return_result = TRUE)
# print all method names
available_methods$name
# list all parameters for the first method
first_method_parameters <- available_methods[1, "parameters"]
first_method_parameters
# simply print the available methods
get_reactome_methods()
# get the details for PADOG
get_reactome_methods(print_details = TRUE, method = "PADOG")
}
\seealso{
Other Reactome Service functions:
\code{\link{get_reactome_data_types}()}
}
\author{
Johannes Griss
}
\concept{Reactome Service functions}
|
01fca5a4ddaf2020192df634767ed2369aef494e
|
3432e3ed5d85e49fca0be9a5213a339cb29c01cf
|
/scripts/helper/filter_by_snps.R
|
ab4c11d05522d8a30f78a1d0c82f5517c37dd871
|
[] |
no_license
|
bogdanlab/BEAVR
|
ed451f32f79fec722207e90bebb30c57a473c714
|
b6df2c7402ca5bf2543bffdc9ed38b651829224e
|
refs/heads/master
| 2021-07-19T08:59:18.253341
| 2020-10-04T22:45:58
| 2020-10-04T22:45:58
| 219,420,560
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 364
|
r
|
filter_by_snps.R
|
#!/usr/bin/env Rscript
library(dplyr)
args = commandArgs(trailingOnly=TRUE)
gwas_file<-args[1]
snp_file<-args[2]
out_file<-args[3]
gwas_df <- read.table(gwas_file, header=T)
snp_df <- read.table(snp_file, header=F)
snp_df$SNP <- snp_df$V1
df_filter <- inner_join(gwas_df, snp_df, by="SNP")
write.table(df_filter, out_file, quote=F, row.names=F, col.names=T)
|
0d928fec3457307d5c7649fd45798260c4abd543
|
d2b171da278dbfe70f99fba2aa20d3cc9a319960
|
/03_prediction.R
|
c5e107ac14b68d208840b8f035beca3541dacf9d
|
[] |
no_license
|
sky94520/introduction_predictiv_modelling
|
99ddd42910ced9733de1fe1d89bf2ff47d473e40
|
66de1f77a7ef0cb506d57ee17c91b1f4b3121905
|
refs/heads/master
| 2022-11-16T10:33:37.809660
| 2020-07-10T19:57:31
| 2020-07-10T19:57:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,708
|
r
|
03_prediction.R
|
############################################################################
# Preamble
############################################################################
rm(list=ls())
set.seed(1337)
### Load packages Standard
library(tidyverse) # Collection of all the good stuff like dplyr, ggplot2 ect.
library(magrittr)
### Load extra packages
library(tidymodels)
############################################################################
# Load data
############################################################################
rm(list=ls())
data <- readRDS("../temp/data_full.rds")
############################################################################
# training setup
############################################################################
data %<>% slice_sample(n = 1000000)
# Normalize variables by technology field/year cohort
data %<>%
group_by(tech_field_main, appln_filing_year) %>%
mutate(across(c(tech_field_n, scope, family_size_docdb, family_size_inpadoc, cit_bwd, cit_nlp, claims, originality, nb_applicants, nb_inventors), scale)) %>%
ungroup()
# Factors
data %<>%
mutate(tech_field_main = tech_field_main %>% factor())
# Train & test split
data_split <- data %>%
select(-appln_filing_year, -cit_fwd5) %>% # Deselect what we dont want
initial_split(prop = 0.75, strata = breakthrough01)
data_train <- data_split %>% training()
data_test <- data_split %>% testing()
rm(data_split)
# Create recipe -> new package to make preprocessing really easy
data_recipe_ <- data_train %>%
rename(y = breakthrough01) %>%
select(-breakthrough25, -breakthrough50) %>%
#
recipe(y ~.) %>%
step_dummy(tech_field_main) %>% # , source
step_zv(all_predictors()) %>% # get rid of zero variance vars
prep()
# finalize reciepe preperation
data_train01 <- recipe_01 %>% juice()
data_test01 <- recipe_01 %>% bake(data_test %>% select(-breakthrough25, -breakthrough50))
data_train25 <- recipe_25 %>% juice()
data_test25 <- recipe_25 %>% bake(data_test %>% select(-breakthrough01, -breakthrough50))
data_train50 <- recipe_50 %>% juice()
data_test50 <- recipe_50 %>% bake(data_test %>% select(-breakthrough01, -breakthrough25))
############################################################################
# Model setup
############################################################################
# Null
model_00 <- null_model(mode = "classification")
# Logistic
model_lg <- logistic_reg(mode = 'classification') %>%
set_engine('glm', family = binomial)
# Elastic net
model_en <- logistic_reg(mode = 'classification',
mixture = tune(),
penalty = tune()) %>%
set_engine('glm', family = binomial)
# Decision tree
model_dt <- decision_tree(mode = 'classification',
cost_complexity = tune(),
tree_depth = tune(),
min_n = tune()) %>%
set_engine('rpart')
# Random Forest
model_rf <- rand_forest(mode = 'regression',
trees = 100,
mtry = tune(),
min_n = tune()) %>%
set_engine('ranger', importance = 'impurity')
# XGBoost
model_xg <- boost_tree(mode = 'classification',
trees = 100,
mtry = tune(),
min_n = tune(),
tree_depth = tune(),
learn_rate = tune()) %>%
set_engine('xgboost')
# neural net
model_nn <- mlp(mode = 'classification',
hidden_units = tune(),
penalty = tune(),
epochs = tune()) %>%
set_engine('nnet')
############################################################################
# Hyperparameter Tuning
############################################################################
### Resamples
data_resample01 <- data_train %>%
bootstraps(data_train, strata = y, times = 5)
data_resample25 <- bootstraps(data_train25, strata = y, times = 5)
data_resample50 <- bootstraps(data_train50, strata = y, times = 5)
### Parallelprocessing
all_cores <- parallel::detectCores(logical = FALSE)
library(doParallel)
cl <- makePSOCKcluster(all_cores - 1)
registerDoParallel(cl)
# Breakthrough50
tune_el50 <- model_en %>% tune_grid(resamples = data_resample, grid = 10)
############################################################################
# Workflow setup
############################################################################
# General reciepes
workflow_01 <- workflow() %>% add_recipe(recipe_01)
workflow_25 <- workflow() %>% add_recipe(recipe_25)
workflow_50 <- workflow() %>% add_recipe(recipe_50)
# breakthrough01
workflow_01_lg <- workflow_01 %>% add_model(model_lg)
workflow_01_en <- workflow_01 %>% add_model(model_en)
workflow_01_dt <- workflow_01 %>% add_model(model_dt)
workflow_01_rf <- workflow_01 %>% add_model(model_rf)
workflow_01_xg <- workflow_01 %>% add_model(model_xg)
workflow_01_nn <- workflow_01 %>% add_model(model_nn)
# breakthrough25
workflow_25_lg <- workflow_25 %>% add_model(model_lg)
workflow_25_en <- workflow_25 %>% add_model(model_en)
workflow_25_dt <- workflow_25 %>% add_model(model_dt)
workflow_25_rf <- workflow_25 %>% add_model(model_rf)
workflow_25_xg <- workflow_25 %>% add_model(model_xg)
workflow_25_nn <- workflow_25 %>% add_model(model_nn)
# breakthrough50
workflow_50_lg <- workflow_50 %>% add_model(model_lg)
workflow_50_en <- workflow_50 %>% add_model(model_en)
workflow_50_dt <- workflow_50 %>% add_model(model_dt)
workflow_50_rf <- workflow_50 %>% add_model(model_rf)
workflow_50_xg <- workflow_50 %>% add_model(model_xg)
workflow_50_nn <- workflow_50 %>% add_model(model_nn)
|
6f43a6fd281f714ee87cf7ddf0fa69577c20b3cd
|
358fda5b2a499273537f39bf278450c060f42573
|
/code/gb_wrapper_stacking.R
|
3b62ed09619a80fc278d1b8d4a80f48c0c572b9a
|
[] |
no_license
|
schreckf/NIC_Schreck
|
7a7886f70a99428f58a7408ad27128fb0cad14c1
|
6ae88f71c924733dc067305e20abb2f4f7794213
|
refs/heads/master
| 2020-03-18T19:38:24.174405
| 2018-07-15T19:38:02
| 2018-07-15T19:38:02
| 135,166,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,546
|
r
|
gb_wrapper_stacking.R
|
###### Feature selection for the Gradient Boosting model. A wrapper approach #####
# A model building approach with sequential forward selection is
# established in order to find the best subset of features.
# Each model is built with crossvalidation on the AUC measure.
# One-hot-encoding of categorical features
train_wrapper2 <- mlr::createDummyFeatures(train, target = "customer")
gb_task_wrapper <- makeClassifTask(data = train_wrapper2,
target = "customer",
positive = "good")
gb_learner_wrapper <- makeLearner("classif.xgboost",
predict.type = "prob")
gb_ctrl_wrapper <- makeFeatSelControlSequential(method = "sfs",
alpha = 0.00001)
gb_rdesc_wrapper <- makeResampleDesc("CV", iters = 3)
gb_sfeats <- selectFeatures(learner = gb_learner_wrapper,
task = gb_task_wrapper,
resampling = gb_rdesc_wrapper,
control = gb_ctrl_wrapper,
show.info = TRUE,
measures = mlr::auc)
# Performance score for each combination of features
analyzeFeatSelResult(gb_sfeats)
# Next, I store the optimal set of features to later use it
# in the model building part file "gb_model_stacking.R"
vars$gb2 <- c("customer", gb_sfeats$x)
|
80916ce2f31a87556921423372b88f1368ce239d
|
3d0cc57b1908da75fc1bd5a1ad3b074a32835055
|
/R/listMetaGenomes.R
|
e41e137fb37e322c1622c72bc22a93b7211272e8
|
[] |
no_license
|
flopezo/biomartr
|
c964109dfa2356559ae7f566664313615080ef86
|
cbd022783c7e1e779096ea3e8abbc2a790b5cb8b
|
refs/heads/master
| 2020-12-24T12:40:15.877281
| 2016-10-31T17:00:35
| 2016-10-31T17:00:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,145
|
r
|
listMetaGenomes.R
|
#' @title List available metagenomes on NCBI Genbank
#' @description List available metagenomes on NCBI genbank. NCBI genbank allows users
#' to download entire metagenomes of several metagenome projects. This function lists
#' all available metagenomes that can then be downloaded via \code{\link{getMetaGenomes}}.
#' @param details a boolean value specifying whether only the scientific names of stored metagenomes shall be returned
#' (\code{details = FALSE}) or all information such as "organism_name","bioproject", etc (\code{details = TRUE}).
#' @author Hajk-Georg Drost
#' @examples
#' \dontrun{
#' # retrieve available metagenome projects at NCBI Genbank
#' listMetaGenomes()
#'
#' # retrieve detailed information on available metagenome projects at NCBI Genbank
#' listMetaGenomes(details = TRUE)
#' }
#' @seealso \code{\link{getMetaGenomes}}, \code{\link{getMetaGenomeSummary}}
#' @export
#'
listMetaGenomes <- function(details = FALSE) {
metagenome.summary <- getMetaGenomeSummary()
if (!details)
return(unique(metagenome.summary$organism_name))
if (details)
return(metagenome.summary)
}
|
dc77327c390f65902ec2827ab311ea3a9513d351
|
4e781036d765e18949e09daaac2e4a6a6568b409
|
/Plot1.R
|
a03c0847991b8b6f13f3a7d4f10a2f6cd2a5b400
|
[] |
no_license
|
MohsinTunio/ExData_Plotting1
|
e4079fbba185094fe8076f6814c322adf06a0df5
|
78a718aecbc0005dbb11b881705ffde52647dbe6
|
refs/heads/master
| 2021-01-25T13:41:19.422616
| 2018-03-02T20:32:12
| 2018-03-02T20:32:12
| 123,605,002
| 0
| 0
| null | 2018-03-02T16:46:14
| 2018-03-02T16:46:13
| null |
UTF-8
|
R
| false
| false
| 1,143
|
r
|
Plot1.R
|
#------------------------------------------------------------------
# ****Plot1****
#------------------------------------------------------------------
?mi# I suppose the data have been extracted into the working directory
# This script creates an image with .png extension in the working directory-- it employs graphi device method
# dev.off() to achieve this
#library(mise) # this clears the console-- removing wordy mess (Optional)
rm(list=ls()) # this clears existing variables and datasets from memmory
#mise()
dta<- read.table("household_power_consumption.txt", header=T, sep=";", na.strings="?")
## set time variable
filtered_data <- dta[dta$Date %in% c("1/2/2007","2/2/2007"),]
SetTime <-strptime(paste(filtered_data$Date, filtered_data$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
filtered_data <- cbind(SetTime, filtered_data)
##
## Histogram generation as suggested
png("Plot1.png", width = 5*480,height = 5*480, res = 300,pointsize = 15)
hist(filtered_data$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off() # This creates a file in the working directory
|
2b669859f4ca24242920dc89d07bea1979aacf8d
|
7d6907d2cd8b81e79300fe52bf420281b419f5d7
|
/Friedmantest.R
|
07c4103c24bcefb79e61302c5aa7a6eea37928bf
|
[] |
no_license
|
Helma-T/CrossvalidatedData
|
b6d4a9078fd63104b23fff3a358d5ff2586e2246
|
1f1e5fe576709f491ac52416dd5c394cdc074f4f
|
refs/heads/master
| 2022-09-24T04:31:18.002900
| 2020-05-29T10:53:14
| 2020-05-29T10:53:14
| 267,825,414
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 128
|
r
|
Friedmantest.R
|
anonymized <- read.csv("anonymized.csv")
library(dplyr)
x=select(anonymized, "A_1", "A_2", "A_3")
friedman.test(data.matrix(x))
|
6695cf442bdc31b8c530cf088421867796c51bac
|
874aa96cc13571e2378de93742ba81c42cd768f6
|
/app.R
|
7ca984684689dea82d0a05c0769b82c5812a8531
|
[] |
no_license
|
jve19/chem_life
|
3cc9d49e3a7b335e9e472a9171e59c067518bda1
|
9143e6afaab4ee0dd7d1d2a20928cc168be2abc0
|
refs/heads/master
| 2020-12-11T10:09:26.333616
| 2020-01-14T10:48:12
| 2020-01-14T10:48:12
| 233,818,525
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,048
|
r
|
app.R
|
#setwd("C:\\Users\\Gebruiker\\Documents\\R\\shiny\\trial glasgow")
setwd("C:\\Users\\Gebruiker\\Documents\\R\\shiny\\tortilla_script_070120")
library(shiny)
library(ggplot2)
# Define the fields we want to save from the form
#fields <- c("text_demo", 'secret?')
fields <- c("t_where", 't_which', 'opinion', 'rating')
outputDir <- 'responses'
############################################################
#source('trial glasgow\\f_save_Data.R')
source("C:\\Users\\Gebruiker\\Documents\\R\\shiny\\trial\\trial glasgow\\f_save_Data.R")
############################################################
source("C:\\Users\\Gebruiker\\Documents\\R\\shiny\\trial\\trial glasgow\\f_load_Data.R")
# anser text question
t_where <- textInput("t_where", "Where did you ate tortilla?")
# textarea_demo <- textAreaInput("textarea_demo", "what tortilla dit you got")
t_which <- textInput("t_which", "what type of tortilla?")
opinion <- textInput('opinion', 'how did you like it?')
# make a slider
rating <- sliderInput(
"rating",
"Which mark would you give it?",
min = 0, max = 10, step = 1, value = 1
)
slider_demo <- sliderInput(
"slider_demo",
"How would you rate the 80s musically, on a scale from 0-100?",
min = 0, max = 100, step = 1, value = 50
)
action_demo <- actionButton("clear", "Clear Form")
download_demo <- downloadButton("download", "Download")
# file_demo <- fileInput("file_demo", "Upload a PDF", accept = "pdf")
# help_demo <- helpText("You can write help text in your form this way")
#source('trial glasgow\\f_reset_form.R')
source("C:\\Users\\Gebruiker\\Documents\\R\\shiny\\trial\\trial glasgow\\f_reset_form.R")
# Set up questionnaire interface ----
ui <- fluidPage(
title = "Questionnaire Framework",
# CSS ----
# stop the default input containers being 300px, which is ugly
tags$head(
tags$style(HTML("
.shiny-input-container:not(.shiny-input-container-inline) {
width: 100%;
max-width: 100%;
}
"))
),
# App title ----
h3("BEEEEEH"),
p("pues nasa... tortilla time"),
fluidRow(
column(width=6, t_where),
column(width=6, t_which),
column(width=6, opinion)
),
rating,
actionButton("submit", "Submit"),
action_demo
)
# Reactive functions ----
outputDir <- "responses"
server = function(input, output, session) {
# When the Submit button is clicked, save the form data
observeEvent(input$submit, {
saveData(input)
resetForm(session)
# thank the user
n_responses <- length(list.files(outputDir))
response <- paste0("Thank you for eating tortilla! You supplied tortilla nr ",
n_responses, ".")
showNotification(response, duration = 0, type = "message")
})
# clear the fields
observeEvent(input$clear, {
resetForm(session)
})
}
shinyApp(ui, server)
|
adff837a82dc6c85becbe5a91e2c6cfcb6c706b4
|
4b5aa81508cd3900e6e46c67897b7c81d6d199eb
|
/chick_cars_tasks.R
|
5f91b1205015227f26e081edaf1c00a4e2d3977e
|
[] |
no_license
|
vladyslavchernikov/r_history
|
a4b6853c715c5767e649c3ff2402f4986802ad84
|
a671e4d5603049fe4c8e12aaed30d6c58c184715
|
refs/heads/main
| 2023-07-19T09:53:10.183675
| 2021-09-15T14:14:29
| 2021-09-15T14:14:29
| 406,790,348
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,671
|
r
|
chick_cars_tasks.R
|
library("psych")
library("dplyr")
library("ggplot2")
library("Ggally")
library("Ecdat")
library("lmtest")
library("car")
chick <- ChickWeight
summary(chick)
table(chick)
subset(chick,Time = 10)
glimpse(chick)
mean(chick$weight,chick$Time=10)
summary(chick,Time = 10)
view(chick)
qplot(data=chick, Time, weight)
a <- subset(chick, Time<11, select = c(weight, Time))
a <- subset(a, Time>9, select = c(weight, Time))
a
summary(chick)
mean(a$weight)
d1 <- subset(chick, Time < 22 , select = c(weight, Time, Diet))
d1 <- subset(d1, Time > 20 , select = c(weight, Time, Diet))
d1
reg <-lm(data=chick,weight~Time+Diet)
summary(reg)
diamonds <-diamonds
qplot(data = diamonds, price,fill=cut)+facet_grid(~clarity)
qplot(data = diamonds, log(price),fill=cut)+facet_grid(~cut)
qplot(data = diamonds, log(price),color=cut)+facet_grid(~cut)
qplot(data = diamonds, price,fill=cut)+facet_wrap(~clarity)
lmlm <- lm(data=diamonds, price~carat+table+x+y+z+depth)
summary(lmlm)
lmtest<-lm(data=diamonds,price~carat+y+x)
summary(lmtest)
help(diamonds)
confint(lmtest,level=0.9)
lmtest<-lm(data=diamonds,price~carat+y+x+table+depth)
confint(lmtest,level=0.9)
food <- BudgetFood
lmfood <- lm(data=food,wfood~totexp+size)
help(BudgetFood)
nw<-data.frame(totexp=700000,size=4)
predict(lmfood, newdata = nw, interval = "prediction")
resettest(lmfood)
h <- na.omit(BudgetFood)
lmfood2 <- lm(data=h,wfood~totexp+size)
waldtest(lmfood, lmfood2)
d<-ChickWeight
d2<-d[d$Time==21,]
mean(d2[d2$Diet==4,]$weight)
qplot(data = diamonds, log(price),color=cut)+facet_grid(~cut)
mtcars <-mtcars
help(mtcars)
mllm<-lm(data=mtcars,mpg~cyl+disp)
mllm2<-lm(data=mtcars,mpg~cyl+disp+hp)
summary(mllm)
summary(mllm2)
glimpse(mtcars)
mllm3<-lm(data=mtcars,mpg~disp+hp+wt)
vif(mllm3)
h.pca <- prcomp(mtcars, scale = TRUE)
pca1 <- h.pca$x[, 1]
head(pca1)
v1 <- h.pca$rotation[, 1]
norm(v1, type="2")
h <- BudgetFood
h2 <- BudgetFood
h <- na.omit(BudgetFood)
lmh<-lm(data=h, wfood~totexp+size)
lmh2<-lm(data=h2, wfood~totexp+size)
resettest(lmh)
resettest(lmh2)
model_r<-lm(data=h,wfood~totexp+size)
model_ur<-lm(data=h,wfood~totexp*sex+size*sex)
waldtest(model_r,model_ur)
nd<-data.frame(size=4,totexp=700000)
predict(lmh2,newdata = nd, interval ="prediction", level=0.9)
qplot(data = diamonds, log(price),color=cut)+facet_grid(~cut)
qplot(data = diamonds, log(price),color=cut)+facet_grid(~cut)
qplot(data = diamonds, log(price),color=cut)+facet_grid(~cut)
car<-mtcars
lmcar<-lm(data=car,mpg~disp+hp)
lmcar2<-lm(data=car,mpg~disp+hp+wt)
summary(lmcar)
summary(lmcar2)
|
c7abae5616af53331e1f439ce0cce5c4f02606b7
|
16f9d05fa1d0b6aadd313cc2896d7730d0e0d7c6
|
/RGtkGen/man/genRCode.Rd
|
63c14971a8946cbddfd13610ca330795f23f595a
|
[] |
no_license
|
statTarget/RGtk2
|
76c4b527972777c567fb115587418f9dea61bf29
|
42c2d5bc7c8a462274ddef2bec0eb0f51dde1b53
|
refs/heads/master
| 2023-08-24T05:13:35.291365
| 2021-10-24T18:27:46
| 2021-10-24T19:24:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,199
|
rd
|
genRCode.Rd
|
\name{genRCode}
\alias{genRCode}
\alias{genCCode}
\title{Generate S and C interface code for a C routine}
\description{
These functions generate S and C code that interface to a
Gtk function defined in the .defs files. These are the glue
between the interactive S language and the raw Gtk method
definined in the C library.
}
\usage{
genRCode(fun, defs, name, sname, className = NULL, package = "RGtk")
genCCode(fun, defs, name)
}
\arguments{
\item{fun}{the definition of the C routine for which we are generating
either the S or C interface. This is an object of class
\code{FunctionDef} that is created by reading the .defs files.}
\item{defs}{the collection of all the definitions read from .defs
files, including classes, functions, enumerations, etc.}
\item{name}{the name of the C routine for which the interface is being generated.}
\item{sname}{the name of the S function to create.}
\item{className}{not used.}
\item{package}{the name of the S package in which the S code will be
located. This is used as the value of \code{PACKAGE} argument
when calling the associated C interface routine via a \code{\link[RGtk]{.GtkCall}}.}
}
\value{
\code{genRCode} returns string giving the full definition of the S function
that interfaces to the C routine. This includes the assignment to the
appropriate function name.
\code{genCCode} returns a list with two elements:
\item{code}{a string giving the full definition of the S-C interface routine
that interfaces to the underlying C routine. This can be put into a .c
file, compiled and accessed from S via \code{\link[RGtk]{.GtkCall}}.}
\item{decl}{a string giving the declaration of the S-C interface routine
which can be used in a header file (.h). This can be used
when creating the registration table of routines in the generated package.}
}
\references{
\url{http://www.omegahat.net/RGtk/},
\url{http://www.omegahat.net/RGtkBindingGenerator},
\url{http://www.gtk.org}
\url{http://www.pygtk.org}(?)
}
\author{Duncan Temple Lang <duncan@research.bell-labs.com>}
\seealso{
\code{\link{genCode}}
\code{\link{generateCodeFiles}}
}
\examples{
}
\keyword{programming}
|
71eafdf48dc461b526dae3b4e4f8aa72fb6f944f
|
92a2eabe000e965687ae574e8ef834d993345a3a
|
/R_learning/conditionals.R
|
482f14cd8a433bb13739a7cf04f727782f1f9c2e
|
[] |
no_license
|
amendizabalm/R_learning
|
dab86a396ff0f59f0a23fd53cc123a046ac57f95
|
defa428ecdb5b8d8afac09e0e4dd269ac0bcb4fd
|
refs/heads/master
| 2022-06-25T10:26:26.282432
| 2020-05-07T00:42:33
| 2020-05-07T00:42:33
| 261,912,586
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,630
|
r
|
conditionals.R
|
#conditionals
a=0
if(a!=0){
print(1/a)
} else{
print("No recirpocal for 0.")
}
# an example that tells us which states, if any, have a murder rate less than 0.5
library(dslabs)
data(murders)
murder_rate <- murders$total / murders$population*100000
ind<- which.min(murder_rate)
#which states, if any, have murder rates lower than 0.5
if(murder_rate[ind] <0.5){
print(murders$state[ind])
} else{
print("No state has murder state that low")
}
if(murder_rate[ind] <0.25){
print(murders$state[ind])
} else{
print("No state has murder state that low")
}
#ifelse function. This function takes three arguments, a logical, and two possible answers.
#If the logical is true, the first answer is returned.
#If it's false, the second answer is returned.
a<- 0
ifelse(a>0, 1/a, NA)
a<- 3
ifelse(a>0, 1/a, NA)
#in vectors
a <- c (0, 1, 2, -4, 5)
result <-ifelse(a>0, 1/a, NA)
result
#how to use this function to replace all the missing values ina vector with zeros, we are going to use an example
data("na_example")
na_example
#to know how many NAs are in the data we use
sum(is.na(na_example))
#to replace all the NAs with zero we use
no_nas <-ifelse(is.na(na_example), 0, na_example)
no_nas
#to check out there are no NAs
sum(is.na(no_nas))
#The any function takes a vector of logicals and it returns true
#if any of the entries is true.
z<- c(TRUE,TRUE,FALSE)
any(z)
z<-c(FALSE, FALSE, FALSE)
any(z)
#The all function takes a vector of logicals and returns
#TRUE if all the entries are true.
z<- c(TRUE,TRUE,FALSE)
all(z)
z<- c(TRUE, TRUE, TRUE)
all(z)
|
f6723686476c4db7dd81cc7bac423d7b545d7d1c
|
db340ce61b6e8e513fb2805fa5fe8ccdea5c4043
|
/man/fragPlot.Rd
|
ba93944684f407c95c20911ea079ac69cc238a04
|
[
"MIT"
] |
permissive
|
bakerwm/demo
|
763ef7fe03e44df6939889f82757c2aaea31553a
|
ba4e1c33909dea96142ad6cf41ee2e25b9c2659b
|
refs/heads/master
| 2020-12-02T15:32:20.602059
| 2019-12-31T09:31:58
| 2019-12-31T09:31:58
| 140,517,077
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 233
|
rd
|
fragPlot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{fragPlot}
\alias{fragPlot}
\title{fragPlot}
\usage{
fragPlot(df)
}
\arguments{
\item{df}{data.frame for plotting}
}
\description{
fragPlot
}
|
cd28a9dbf53a00c4a4c95af7ec9f4fc863ce1bb0
|
0aaecd6991a7f16759a1f8d2b3be6093f8a183af
|
/R/probplot.R
|
d3a43d8a2009291beb20f6a789d63746c87c9855
|
[] |
no_license
|
cran/fastR
|
3f0e3959dad4e5d361c341eb6bea670eab6bfdcc
|
572a5dc31e5aa85af4126662f95268329179c87b
|
refs/heads/master
| 2021-01-21T04:55:14.927487
| 2017-07-27T19:52:06
| 2017-07-27T19:52:06
| 17,695,981
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,451
|
r
|
probplot.R
|
#' @export
probplot <-
function (x, qdist = qnorm, probs = NULL, line = TRUE, xlab = NULL,
ylab = "Percentile", ...)
{
DOTARGS <- as.list(substitute(list(...)))[-1]
DOTARGS <- paste(names(DOTARGS), DOTARGS, sep = "=", collapse = ", ")
xlab = deparse(substitute(x))
x <- sort(x)
QNAME <- deparse(substitute(qdist))
DOTS <- list(...)
qdist <- match.fun(qdist)
QFUN <- function(p) {
args = DOTS
args$p = p
do.call("qdist", args)
}
y <- QFUN(ppoints(length(x)))
if (is.null(probs)) {
probs <- c(0.01, 0.05, seq(0.1, 0.9, by = 0.1), 0.95,
0.99)
if (length(x) >= 1000)
probs <- c(0.001, probs, 0.999)
}
qprobs <- QFUN(probs)
plot(x, y, axes = FALSE, type = "n", ylim = range(c(y, qprobs)),
xlab = xlab, ylab = ylab)
box()
abline(h = qprobs, col = "grey")
axis(1)
axis(2, at = qprobs, labels = 100 * probs)
points(x, y)
QTEXT <- paste("Quantile: ", QNAME, sep = "")
if (nchar(DOTARGS))
QTEXT <- paste(QTEXT, DOTARGS, sep = ", ")
mtext(QTEXT, side = 1, line = 3, adj = 1)
xl <- quantile(x, c(0.25, 0.75))
yl <- qdist(c(0.25, 0.75), ...)
slope <- diff(yl)/diff(xl)
int <- yl[1] - slope * xl[1]
if (line) {
abline(int, slope, col = "lightskyblue3")
}
z <- list(qdist = QFUN, int = int, slope = slope)
class(z) <- "probplot"
invisible(z)
}
|
4ac4261972ce17126745bb277b5106dbcdff1cc6
|
5a5976e18d7f3681ba7d02861fcd627ec0642099
|
/docs/correlations.R
|
aae902c8e0868de0469a19eb5e30ce7d823ae2ee
|
[] |
no_license
|
mloop/kdiff-type1-error-rate
|
a8f79631142b83778092af187a611a7efec71399
|
21dfb7455bec9ec8f448e81c0ffb815e050aad8c
|
refs/heads/master
| 2016-09-05T19:32:33.305474
| 2016-01-19T14:50:01
| 2016-01-19T14:50:01
| 26,125,884
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,839
|
r
|
correlations.R
|
# Purpose: Create figure 2 for manuscript. It's the correlation matrix of the $\hat{D}(h_i)$ values, for different values of $h_i$
# Packages
library(dplyr)
library(ggplot2)
# Load data
results <- tbl_df(read.table(file = "../analysis/results.txt", sep = "\t", header = TRUE))
results_s <- arrange(
filter(results, condition > 4),
condition, iteration, range)
results_s <- data.frame(results_s)
# Function to calculate the correlation matrices
range_corr <- function(x, n.tests, var){
small <- filter(x, tests == n.tests)
u1 <- small[small$iteration == 1, "h"]
u2 <- u1
range_combos <- expand.grid(h1 = u1, h2 = u2)
correlation <- vector()
for(i in 1:nrow(range_combos)){
x <- select(
filter(small, h == range_combos[i, 1]),
reject)
y <- select(
filter(small, h == range_combos[i, 2]),
reject)
stopifnot(nrow(x) <= 2000, nrow(x) >= 1997, nrow(y) <= 2000, nrow(y) >= 1997)
correlation[i] <- cor(x, y)
}
corr_table <- tbl_df(data.frame(range_combos, correlation))
return(corr_table)
}
# Calculate correlations
hundred <- range_corr(results_s, 100, reject)
## Plot correlation matrix
p100 <- ggplot(hundred, aes(x = h1, y = h2, fill = correlation)) +
geom_tile() +
ggtitle("Correlations among tests when testing 100 ranges") +
xlab("Range (miles)") +
ylab("Range (miles)") +
xlim(min(hundred$h1), max(hundred$h1)) +
ylim(min(hundred$h2), max(hundred$h2)) +
scale_fill_continuous(name = "Correlation") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_blank()) # https://felixfan.github.io/page8/
# Combine into 1 plot
png(file = "correlations.png", res = 300, width = 7, height = 7, units =
'in')
p100
dev.off()
|
78a74670fdfec507f7636c2950c8f788ce7441d5
|
8d4aba8553de7ee99ebda55c6e4ab224ae1d0118
|
/Neural networks.R
|
05958f9c326a5dad24b997a730107c38fc39d394
|
[] |
no_license
|
raghavendrarba03/Neural_networks
|
8daa18e1fdb8de37d566d16588fa7448ea800d66
|
57cc838fff20128ba585c4b4436e6f05e768caab
|
refs/heads/master
| 2020-04-05T18:28:09.207721
| 2018-11-11T17:24:54
| 2018-11-11T17:24:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 588
|
r
|
Neural networks.R
|
str(iris)
set.seed(123); iris_mixed=iris[order(runif(150)), ]
iris_train=iris_mixed[1:120, ]
iris_test=iris_mixed[121:150, ]
library(nnet)
nn=nnet(Species~.,data=iris_train,size=2)
library(NeuralNetTools)
plotnet(nn)
nn_pre=predict(nn,iris_test[,1:4],type="class")
library("caret")
nn_pre<-as.factor(nn_pre)
confusionMatrix(nn_pre,iris_test[,5])
#Improving the model performance
set.seed(345); nn_imp=train(Species~.,data=iris_train,method="nnet"); nn_imp
plotnet(nn_imp)
nn_imp_pre=predict(nn_imp,iris_test[,1:4],type="raw")
confusionMatrix(nn_imp_pre,iris_test[,5])
|
5f6218779dc650240567427f8576d35f7a0fbe67
|
1c4c928f6ed84d80b18efe369cc0cc9ee25269d8
|
/cachematrix.R
|
d209b5670a906154132cf2424f1cb605e232a028
|
[] |
no_license
|
rafigueroapr/ProgrammingAssignment2
|
1f07aa7d50edf29c37699e6bc6eb74d534ebde8b
|
ac37344fe14ea4fdcd648bc3b19f1a5ab50b04a7
|
refs/heads/master
| 2021-01-17T09:18:51.525618
| 2014-06-20T22:50:26
| 2014-06-20T22:50:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,622
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix will create a special matrix object, and then cacheSolve will
## calculate the inverse of the matrix.
## If the matrix inverse has already been calculated, the function will
## find it in the cache and return it, and not calculate it again. If it is not
## calculated then the funtion will calculate the matrix inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL ## to store the cached inverse matrix
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
## return the matrix
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## The function cacheSolve returns the inverse of a matrix created with
## the makeCacheMatrix function. If the cached inverse is available,
## cacheSolve retrieves it, if not, it computes, caches, and returns it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
# If the inverse is already calculated, the function will return it
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
# When the inverse is not yet calculated, the function calculate it
data <- x$get()
inv <- solve(data, ...)
# Cache the inverse
x$setinv(inv)
inv
}
|
448b25ab6b088bc2e6e4879e7ea5785a639811e4
|
86174891548989b75e92678588b3a532ff8c6b99
|
/man/table_reactable.Rd
|
95863afe73a10da251503b0f0c116c8a8bcc9704
|
[] |
no_license
|
ScottishCovidResponse/SCRCshinyApp
|
777d0c84adea17b5ae36a68da8bf8a840803776a
|
b447fc209c95fa31ee3a32263400a963144dc290
|
refs/heads/master
| 2022-11-13T11:35:35.541366
| 2020-06-27T09:38:14
| 2020-06-27T09:38:14
| 273,044,757
| 0
| 0
| null | 2020-06-17T18:00:22
| 2020-06-17T18:00:21
| null |
UTF-8
|
R
| false
| true
| 245
|
rd
|
table_reactable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/table_reactable.R
\name{table_reactable}
\alias{table_reactable}
\title{table_reactable}
\usage{
table_reactable(covid_dat, all_dat)
}
\description{
table_reactable
}
|
850b596a052c6251ab0597fcd47927868838b15a
|
7fa55be282daf5d7bfdf0fb1b86532ca6d422bb9
|
/R/model_score.r
|
a1b75eb16a1acf5a5c9286a2db539320967b244e
|
[] |
no_license
|
vishalbelsare/autovar
|
3fe5e4b032938d26faae49ac367c4dbaa2fa127d
|
6277fdf8552246aebb98f4f2c0f6b8c7c199e2f4
|
refs/heads/master
| 2023-01-13T20:20:22.049310
| 2023-01-05T22:39:04
| 2023-01-05T22:39:04
| 163,805,863
| 0
| 0
| null | 2023-01-06T11:34:26
| 2019-01-02T06:59:48
|
R
|
UTF-8
|
R
| false
| false
| 2,116
|
r
|
model_score.r
|
# Goodness of fit tests
model_score <- function(varest) {
# low values == better models
es <- estat_ic(varest)
if (av_state_criterion(varest) == 'BIC') {
es$BIC
} else {
es$AIC
}
}
normalized_model_score <- function(av_state,model) {
normalized_model <- create_model(model,normalized=TRUE)
varest <- calc_varest(av_state,normalized_model)
model_score(varest)
}
printed_model_score <- function(varest) {
# low values == better models
es <- estat_ic(varest)
if (apply_log_transform(varest)) {
paste("(AIC: ",round(es$AIC,digits=3)," (orig: ",round(es$orig_AIC,digits=3),")",
", BIC: ",round(es$BIC,digits=3)," (orig: ",round(es$orig_BIC,digits=3),")",")",sep='')
} else {
paste("(AIC: ",round(es$AIC,digits=3),
", BIC: ",round(es$BIC,digits=3),")",sep='')
}
}
estat_ic <- function(varest) {
varsum <- summary(varest)
nobs <- varsum$obs
k <- nr_parameters_est(varest)
if (apply_log_transform(varest)) {
ll <- logLik_for_logtransformed(varest)
} else {
ll <- varsum$logLik
}
llreal <- varsum$logLik
# k = tparms
# nobs = T
aic <- -2*ll + 2*k
realaic <- -2*llreal + 2*k
bic <- -2*ll + log(nobs)*k
realbic <- -2*llreal + log(nobs)*k
res <- data.frame(Obs=nobs,
ll=ll,
df=k,
AIC=aic,
BIC=bic,
orig_AIC=realaic,
orig_BIC=realbic,
stringsAsFactors = TRUE)
res
}
logLik_for_logtransformed <- function(object) {
# http://webspace.qmul.ac.uk/aferreira/lect2-var2_handout.pdf
# http://www.unc.edu/courses/2010fall/ecol/563/001/docs/lectures/lecture15.htm#transformation
obs <- object$obs
K <- object$K
resids <- resid(object)
Sigma <- crossprod(resids)/obs
r <- -(obs * K/2) * log(2 * pi) - (obs/2) * log(det(Sigma)) -
(1/2) * sum(diag(resids %*% solve(Sigma) %*% t(resids)))
r <- r - sum(object$y)
class(r) <- "logLik"
return(r)
}
nr_parameters_est <- function(varest) {
r <- 0
for (lm in varest$varresult) {
r <- r+length(lm$coefficients)
}
r
}
|
fcad03ac785752003c89e7fa376992017cd73b97
|
4c0394633c8ceb95fc525a3594211636b1c1981b
|
/R/nest_by.R
|
b867c64b711291802744420cd83d835b47e4c427
|
[
"MIT"
] |
permissive
|
markfairbanks/tidytable
|
8401b92a412fdd8b37ff7d4fa54ee6e9b0939cdc
|
205c8432bcb3e14e7ac7daba1f4916d95a4aba78
|
refs/heads/main
| 2023-09-02T10:46:35.003118
| 2023-08-31T19:16:36
| 2023-08-31T19:16:36
| 221,988,616
| 357
| 33
|
NOASSERTION
| 2023-09-12T20:07:14
| 2019-11-15T19:20:49
|
R
|
UTF-8
|
R
| false
| false
| 1,595
|
r
|
nest_by.R
|
#' Nest data.tables
#'
#' @description
#' Nest data.tables by group.
#'
#' Note: `nest_by()` _does not_ return a rowwise tidytable.
#'
#' @param .df A data.frame or data.table
#' @param ... Columns to group by. If empty nests the entire data.table.
#' `tidyselect` compatible.
#' @param .key Name of the new column created by nesting.
#' @param .keep Should the grouping columns be kept in the list column.
#'
#' @export
#'
#' @examples
#' df <- data.table(
#' a = 1:5,
#' b = 6:10,
#' c = c(rep("a", 3), rep("b", 2)),
#' d = c(rep("a", 3), rep("b", 2))
#' )
#'
#' df %>%
#' nest_by()
#'
#' df %>%
#' nest_by(c, d)
#'
#' df %>%
#' nest_by(where(is.character))
#'
#' df %>%
#' nest_by(c, d, .keep = TRUE)
nest_by <- function(.df, ..., .key = "data", .keep = FALSE) {
UseMethod("nest_by")
}
#' @export
nest_by.tidytable <- function(.df, ..., .key = "data", .keep = FALSE) {
if (is_true(.keep)) {
split_list <- group_split(.df, ..., .keep = .keep)
.df <- distinct(.df, ...)
.df <- mutate(.df, !!.key := .env$split_list)
} else {
.df <- summarize(.df, !!.key := list(.SD), .by = c(...))
}
.df
}
#' @export
nest_by.grouped_tt <- function(.df, ..., .key = "data", .keep = FALSE) {
.by <- group_vars(.df)
out <- ungroup(.df)
out <- nest_by(out, any_of(.by), .key = .key, .keep = .keep)
group_by(out, any_of(.by))
}
#' @export
nest_by.data.frame <- function(.df, ..., .key = "data", .keep = FALSE) {
.df <- as_tidytable(.df)
nest_by(.df, ..., .key = .key, .keep = .keep)
}
tt_nest_by <- function(.df, ..., .key = "data", .keep = FALSE) {
}
|
3dc1c46727aa33ff2b33b64654c2b1288ba91c77
|
5b67a3e15e7d0f4474ea029c9a56bd49953d04e5
|
/faultDetectionSkySpark/data-raw/cooling_set_outrange_distribute_min_reg.R
|
57f0943f30aabc2e0007423415eca3c964fd3e37
|
[] |
no_license
|
yujiex/gsa_2018
|
9ae9b978331f93ae8c6a7e8199f5545a5255b06c
|
ff7a2f1dd879c833d320b6e22f66d35174ee849e
|
refs/heads/master
| 2020-03-18T02:52:27.096253
| 2019-05-24T15:51:04
| 2019-05-24T15:51:04
| 134,211,125
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,039
|
r
|
cooling_set_outrange_distribute_min_reg.R
|
library("dplyr")
library("ggplot2")
## gsalink_buildings = list.files(path="buildling_fault_start_end", pattern = "*.rda")
## gsalink_buildings <- gsub(".rda", "", gsalink_buildings)
## gsalink_buildings = c("CA0167ZZ")
gsalink_buildings = c("CA0167ZZ", "IN0048ZZ", "TN0088ZZ")
devtools::load_all("../../db.interface")
## devtools::load_all("../../get.noaa.weather")
## devtools::load_all("../../lean.analysis")
head(gsalink_buildings)
start_str = "2018-01-01"
end_str = "2018-12-31"
path = "~/Dropbox/gsa_2017/faultDetectionSkySpark/data-raw/gsalink_weather"
v = "TAVG"
## devtools::load_all("../../roiForECM")
## devtools::load_all("../../get.noaa.weather")
devtools::load_all("../../summarise.and.plot")
## for (b in gsalink_buildings) {
## print(b)
years = as.integer(substr(start_str, 1, 4)):(as.integer(substr(end_str, 1, 4)))
print(years)
if (!file.exists(sprintf("gsalink_weather/%s_%s.csv", b, years[[1]]))) {
result = get.noaa.weather::compile_weather_isd_main(useSavedData=FALSE, years=years, building=b, step="hour")
result %>%
readr::write_csv(sprintf("gsalink_weather/%s_%s.csv", b, years[[1]]))
}
## }
r = "Occupied Cooling Setpoint Out of Range"
energytype = "kWh Del Int"
time.min.str="2018-06-01"
time.max.str="2018-09-01"
namelookup = readr::read_csv("../data/gsalink_name_in_rulefile.csv") %>%
tibble::as_data_frame() %>%
{.}
component.group.lookup = readr::read_csv("building_component.csv") %>%
tibble::as_data_frame() %>%
dplyr::rename(equipRef=equip) %>%
{.}
## plot damn kwh per deg F during spark vs not
for (b in gsalink_buildings[1:3]) {
## change here when got real occ hour info
occ_hour_start = 8
occ_hour_end = 17
weekdays = c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday")
name = paste0(namelookup[namelookup$building==b,]$name, " ")
dfenergy = readr::read_csv(sprintf("building_energy/%s_%s.csv", b, energytype))
dfrule = readr::read_csv(sprintf("ruleStartEndByBuilding/%s_2018.csv", b))
## change to setpoint temperature
dfweather = readr::read_csv(sprintf("gsalink_weather/%s_2018.csv", b))
dfweather <- dfweather %>%
dplyr::mutate(Timestamp=sprintf("%s %s:00:00", date, hour)) %>%
dplyr::mutate(Timestamp=as.POSIXct(Timestamp, format="%Y%m%d %H:%M:%S", tz="UTC")) %>%
dplyr::rename(`F`=`wt_temperatureFhour`) %>%
dplyr::select(Timestamp, F) %>%
{.}
tz = dfrule[["tz"]][1]
time.min=as.POSIXct(time.min.str, tz=tz)
time.max=as.POSIXct(time.max.str, tz=tz)
dfleft = tibble::tibble(Timestamp=seq(from=time.min, to=time.max, by="mins"))
dfleft.whole = tibble::tibble(Timestamp=seq(from=min(dfrule$startPosix), to=max(dfrule$endPosix), by="mins"))
dfenergy <- dfenergy %>%
dplyr::mutate(Timestamp=as.POSIXct(Timestamp, format="%m/%d/%Y %I:%M:%S %p", tz=tz)) %>%
dplyr::filter(Timestamp>=time.min) %>%
dplyr::filter(Timestamp<=time.max) %>%
dplyr::filter(!!rlang::sym(energytype) >= 0) %>%
dplyr::right_join(dfleft, by="Timestamp") %>%
dplyr::mutate(groupvar = ifelse(!is.na(!!rlang::sym(energytype)), Timestamp, NA)) %>%
tidyr::fill(groupvar) %>%
dplyr::group_by(groupvar) %>%
dplyr::mutate(!!rlang::sym(energytype):=first(!!rlang::sym(energytype)) / n()) %>%
dplyr::ungroup() %>%
dplyr::select(-groupvar) %>%
{.}
## print(dfenergy, n=30)
dfrule <- dfrule %>%
dplyr::filter(rule==r) %>%
dplyr::mutate(equipRef=substr(equipRef, 32, nchar(equipRef))) %>%
dplyr::mutate(equipRef=gsub(name, "", equipRef)) %>%
dplyr::left_join(component.group.lookup, by=c("building", "equipRef")) %>%
dplyr::mutate(count=1) %>%
dplyr::mutate(ecost.per.min=eCost/(durationSecond/60)) %>%
{.}
dfresult = summarise.and.plot::agg_interval(df=dfrule, start="startPosix", end="endPosix",
group="group", value="count",
time.epsilon=0) %>%
dplyr::rename(Timestamp=time)
summarise.and.plot::scan_agg(df=dfrule, start="startPosix", end="endPosix",
group="group", value="count",
time.epsilon=0.1)
ggplot2::ggsave(sprintf("../plots/%s_%s_2018.png", b, r, energytype))
extra.times = setdiff(dfresult[["Timestamp"]], dfleft.whole[["Timestamp"]])
if (length(extra.times) != 0L) {
stop (paste0("The data had unexpected values in the time column; some are: ",
paste(head(extra.times), collapse=", ")))
}
dfresult <- dfresult %>%
dplyr::filter(row.kind!="pre-delta") %>%
tidyr::complete(group, Timestamp=dfleft.whole$Timestamp) %>%
dplyr::group_by(group) %>%
dplyr::arrange(Timestamp) %>%
dplyr::mutate(value.agg=zoo::na.locf0(value.agg)) %>%
dplyr::mutate(value.agg=zoo::na.fill(value.agg, 0)) %>%
dplyr::ungroup(group) %>%
dplyr::filter(Timestamp>=time.min) %>%
dplyr::filter(Timestamp<=time.max) %>%
{.}
dfweather <- dfweather %>%
dplyr::right_join(dfleft, by="Timestamp") %>%
tidyr::fill(F) %>%
{.}
df <- dfenergy %>%
dplyr::left_join(dfresult, by="Timestamp") %>%
dplyr::left_join(dfweather, by="Timestamp") %>%
dplyr::mutate(`rulePresent`=ifelse(value.agg>0, "Yes", "No")) %>%
dplyr::mutate(hour=as.numeric(format(Timestamp, "%H"))) %>%
dplyr::mutate(day=format(Timestamp, "%A")) %>%
dplyr::mutate(is.occupied=ifelse((hour <= occ_hour_end) & (hour >= occ_hour_start) & (day %in% weekdays), "Occupied", "Un-occupied")) %>%
{.}
df %>%
dplyr::rename(`num.component.has.warning`=`value.agg`) %>%
dplyr::select(-`row.kind`) %>%
readr::write_csv(sprintf("building_rule_energy_weather/%s_%s_%s_2018.csv",
b, r, energytype))
their.estimate <-
summarise.and.plot::agg_interval(df=dfrule, start="startPosix",
end="endPosix", group="group",
value="ecost.per.min", time.epsilon=0) %>%
dplyr::filter(row.kind!="pre-delta") %>%
dplyr::rename(Timestamp=time) %>%
tidyr::complete(group, Timestamp=dfleft.whole$Timestamp) %>%
dplyr::group_by(group) %>%
dplyr::arrange(Timestamp) %>%
dplyr::mutate(value.agg=zoo::na.locf0(value.agg)) %>%
dplyr::mutate(value.agg=zoo::na.fill(value.agg, 0)) %>%
dplyr::ungroup(group) %>%
dplyr::filter(Timestamp>=time.min) %>%
dplyr::filter(Timestamp<=time.max) %>%
dplyr::mutate(hour=as.numeric(format(Timestamp, "%H"))) %>%
dplyr::mutate(day=format(Timestamp, "%A")) %>%
dplyr::mutate(is.occupied=ifelse((hour <= occ_hour_end) & (hour >= occ_hour_start) & (day %in% weekdays), "Occupied", "Un-occupied")) %>%
{.}
their.estimate %>%
dplyr::rename(ecost.per.min=value.agg) %>%
readr::write_csv(sprintf("rule_ecost_minute/%s_%s_%s_2018.csv",
b, r, energytype))
p <- df %>%
ggplot2::ggplot(aes(x=F, y=(!!rlang::sym(energytype)), colour=rulePresent)) +
ggplot2::geom_point(size=0.3) +
geom_smooth(method='lm', size=1.5) +
ggplot2::ggtitle(label=sprintf("%s %s, %s", b, energytype, r),
subtitle = sprintf("%s -- %s", time.min, time.max)) +
ggplot2::facet_wrap(group~is.occupied) +
ggplot2::ylab(sprintf("%s per minute", energytype)) +
ggplot2::xlab("outdoor temperature (F)") +
ggplot2::theme()
print(p)
ggplot2::ggsave(sprintf("../plots/%s_%s_%s_2018.png", b, r, energytype))
}
## read in the data from file
for (b in gsalink_buildings) {
df =
readr::read_csv(sprintf("building_rule_energy_weather/%s_%s_%s_2018.csv",
b, r, energytype)) %>%
dplyr::rename(value.agg=num.component.has.warning)
head(df)
## compute counterfactual
dfcmp = df %>%
dplyr::group_by(group, is.occupied) %>%
dplyr::do({
dfnospark = .[.$value.agg==0,]
dfwithspark = .[.$value.agg>0,]
y = dfnospark[[energytype]]
x = dfnospark[["F"]]
model.no.spark = lm(y ~ x)
print(summary(model.no.spark))
new=data.frame(x=dfwithspark$F)
## counterfactual
yhat = predict(model.no.spark, newdata = new)
y = dfwithspark[[energytype]]
x = dfwithspark[["F"]]
model.with.spark = lm(y ~ x)
print(summary(model.with.spark))
yfitted = fitted.values(model.with.spark)
asdf <- data.frame(F=x, modeled.with.spark=yfitted, modeled.no.spark=yhat)
asdf
}) %>%
## dplyr::distinct(group, F, modeled.with.spark, modeled.no.spark) %>%
{.}
dfcmp %>%
tidyr::gather(status, !!rlang::sym(energytype), modeled.with.spark:modeled.no.spark) %>%
ggplot2::ggplot(aes(x=F, y=(!!rlang::sym(energytype)), colour=status)) +
ggplot2::geom_point(size=0.3) +
ggplot2::ggtitle(label=sprintf("%s %s, %s", b, energytype, r),
subtitle = sprintf("%s -- %s", time.min, time.max)) +
ggplot2::facet_wrap(group~is.occupied) +
ggplot2::ylab(sprintf("%s per minute", energytype)) +
ggplot2::xlab("outdoor temperature (F)") +
ggplot2::theme()
df1 = dfcmp %>%
dplyr::mutate(with.minus.without = modeled.with.spark - modeled.no.spark) %>%
dplyr::group_by(group, is.occupied) %>%
dplyr::summarise(with.minus.without.dollar = sum(with.minus.without) * 0.1) %>%
dplyr::rename(costdiff = with.minus.without.dollar) %>%
dplyr::mutate(status="model estimate") %>%
{.}
df2 <- readr::read_csv(sprintf("rule_ecost_minute/%s_%s_%s_2018.csv",
b, r, energytype),
col_types = readr::cols(ecost.per.min = readr::col_double())) %>%
dplyr::group_by(group, is.occupied) %>%
dplyr::summarise(costdiff = sum(ecost.per.min)) %>%
dplyr::ungroup() %>%
dplyr::mutate(status="their estimate") %>%
{.}
df1 %>%
dplyr::bind_rows(df2) %>%
tidyr::spread(status, costdiff) %>%
readr::write_csv(sprintf("cmp/%s_%s_%s_2018.csv",
b, r, energytype))
}
|
bc713f374b080a11779400d558a5a11fdadb20e7
|
5baa8b42d2718a20223b85d94693d01825ea961e
|
/MODELING.R
|
ccd47ba598fcc4a1bb4945885a0c783a1787ce08
|
[] |
no_license
|
DSM-LAC/Bolivia-taller
|
a9d4672a26f8e44f15483f8e390665b5f02c4bb2
|
968e0dc0d63cddd6a2d1dccda61e6156f03f83fc
|
refs/heads/master
| 2020-04-16T19:21:57.960831
| 2019-04-23T14:41:39
| 2019-04-23T14:41:39
| 165,856,613
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,285
|
r
|
MODELING.R
|
#IMPORTAMOS LAS COVARIABLES AMBIENTALES
library(raster)
covs <- stack("covs5km.tif")
names(covs) <- readRDS('worldgridsCOVS_names.rds')
#EXTRAEMOS A LOS DATOS DE LAS COVARIABLES A LOS PUNTOS
e <- extract(covs, training[c('x','y')])
training <- cbind(training, data.frame(e))
#QUITAMOS VALORES POR ENCIMA DE 65% DE ARENA
training$ARENA[training$ARENA>65] <- 65
#QUITAMOS LOS CEROS
training$ARENA[training$ARENA==0] <- NA
training <- na.omit(training)
#QUITAMOS LAS VARIABLES CATEGORICAS
cat1 <- grep('igb', names(training))[1:6]
cat2 <- grep('esa', names(training))[23]
cat <- c(cat1, cat2)
t <- na.omit(training[-cat])
#ENCONTRAMOS LAS VARIABLES MEJOR CORRELACIONADAS CON LA ARENA
COR <- cor(as.matrix(t[,3]), as.matrix(t[-c(1, 2, 3)]))
library(reshape)
x <- subset(melt(COR), value != 1 | value != NA)
x <- x[with(x, order(-abs(x$value))),]
names(x)[1] <- 'country'
names(x)[2] <- 'predictor'
names(x)[3] <- 'correlation'
bestCor <- data.frame(country = character(), predictor = character(),
correlation = numeric())
bestCor$country <- as.character(bestCor$country)
bestCor$predictor <- as.character(bestCor$predictor)
bestCor <- rbind (bestCor, x[1:10,])
idx <- as.character(x$predictor[1:10])
print(bestCor)
#NOS QUEDAMOS SOLAMENTE CON LAS VARIABLES MEJOR CORRELACIONADAS
train <- training[idx]
train$ARENA <- training$ARENA
train <- na.omit(train)
COVS <- covs[[idx]]
#ENTRENAMOS UN MODELO LINEAL
model.MLR <- lm(log(ARENA) ~ ., data = train)
predLM <- predict(COVS, model.MLR)
#ENTRENAMOS UN MODELO NO LINEAL (Random Forests)
library(randomForest)
arbolReg <- randomForest(ARENA~., train)
predRF <- predict(COVS, arbolReg)
#VISUALIZAMOS PREDICCIONES
library(rasterVis)
plot(exp(predLM))
plot(predRF)
#GUARDA LOS MAPAS EN TIF
writeRaster(predRF, file='prediccionArbolRegARENA.tif')
writeRaster(exp(mapLM) , file='prediccionLinearModelARENA.tif')
#####
#####HASTA AQUI
library(caret)
##DEFINE LOS CONTROLES DE VALIDACION CRUZADA DEL MODELO
fitControl <- trainControl(## 10-fold CV
method = "repeatedcv",
number = 5,
savePredictions = TRUE,
repeats = 5)
#VALIDACION CRUZADA DE AJUSTE NO LINEAL
set.seed(825)
(ajusteRandomForest <- train(ARENA ~ ., data = train,
method = "rf",
trControl = fitControl,
verbose = FALSE))
#VALIDACION CRUZADA DE MODELO LINEAL
set.seed(825)
(ajusteModeloLineal <- train(ARENA ~ ., data = train,
method = "lm",
trControl = fitControl,
verbose = FALSE))
#EXTRAE OBSERVADOS Y MODELADOS PARA AMBOS AJUSTES
RFpred <- ajusteRandomForest$pred$pred
obsRF <- ajusteRandomForest$pred$obs
LMpred <- ajusteModeloLineal$pred$pred
obsLM <- ajusteModeloLineal$pred$obs
validacionLM <- data.frame(obs=obsLM, mod=LMpred, model='Linear')
validacionRF <- data.frame(obs=obsRF, mod=RFpred, model='RF')
validacion <- rbind(validacionLM, validacionRF)
#GRAFICA LA RELACION ENTRE OBSERVADOvalidacionRFS Y MODELADOS
library(openair)
conditionalQuantile(validacion, obs = "obs", mod = "mod", type='model')
###
###ENSAMBLE DE AMBOS
library(caretEnsemble)
library(doParallel)
library(doMC)
set.seed(102)
ctrl <- trainControl(method="repeatedcv", number=5, repeats=5, savePredictions = TRUE)
cl <- makeCluster(detectCores(), type='SOCK')
registerDoParallel(cl)
models <- caretList(train[-11], train[,11], trControl=ctrl ,
methodList=c("rf", "lm", "kknn", "pls"))
ens <- caretEnsemble(models)
stopCluster(cl = cl)
###PREDICCIONES
#AJUSTE LINEAL
PREDLM <- predict(COVS, ajusteModeloLineal)
#AJUSTE NO LINEAL
PREDRF <- predict(COVS, ajusteRandomForest)
#ENSAMBLE DE MODELOS
ENSAMBLE <- predict(COVS, ens)
#STACK PREDICTIONS
models <- stack(PREDLM, PREDRF, ENSAMBLE)
#CONVERT TO DATA FRAME
DF <- na.omit(as.data.frame(models))
names(DF) <- c('Linear', 'RandomForests', 'Ensamble')
#CORRELACION ENTRE PREDICCIONES
library(psych)
pairs.panels(DF,
method = "pearson", # correlation method
hist.col = "#00AFBB",
density = TRUE, # show density plots
ellipses = TRUE) # show correlation ellipses
#VARIANZA ENTRE PREDICCIONES
library(rasterVis)
names(models) <- c('Linear', 'RandomForests', 'Ensamble')
densityplot(models)
SD <- calc(models , sd)
library(plotKML)
plotKML(SD)
####
####hasta aqui!
#TRANSFORMA LOS DATOS PARA REGRESSION KRIGING
# Project point data
dat <- spTransform(dat, CRS("+init=epsg:6204"))
# Project covariates to VN-2000 UTM 48N
COVS <- projectRaster(COVS, crs = CRS("+init=epsg:6204"),
method='ngb')
set.seed(102)
ctrl <- trainControl(savePred=T, method="repeatedcv", number=5, repeats=5)
cl <- makeCluster(detectCores(), type='SOCK')
registerDoParallel(cl)
models <- caretList(training[-11], training[,11], trControl=ctrl ,
methodList=c("rf", "svmLinear"))
ens <- caretEnsemble(models)
stopCluster(cl = cl)
#QUITAMOS VARIABLES CON MUCHOS HUECOS
training$ln2dms3a <- NULL
training$lnmdms3a <- NULL
#QUITAMOS VALOES VACIOS
NA2mean <- function(x) replace(x, is.na(x), median(x, na.rm = TRUE))
training[] <- lapply(training, NA2mean)
#QUITAMOS VALORES NO REALES
training$ARENA[training$ARENA>65] <- 65
#QUITAMOS LOS CEROS
training$ARENA[training$ARENA==0] <- NA
training <- na.omit(training)
#PREPARAMOS COVARIABLES
x <- as(covs,'SpatialPixelsDataFrame')
x@data[] <- lapply(x@data, NA2mean)
covs <- raster::stack(covs)
#
dat <- training
coordinates(dat) <- ~ x + y
class(dat)
dat@proj4string <- CRS(projargs = "+init=epsg:4326")
dat@proj4string
library(raster)
datdf <- dat@data
datdf <- datdf[, c("ARENA", names(covs))]
# Fit a multiple linear regression model between the log transformed
# values of ARENA and the top 20 covariates
model.MLR <- lm(log(ARENA) ~ ., data = datdf)
mapLM <- predict(model.MLR, covs)
# stepwise variable selection
model.MLR.step <- step(model.MLR, direction="both")
# summary and anova of the new model using stepwise covariates
# selection
summary(model.MLR.step)
anova(model.MLR.step)
# Graphical diagnosis of the regression analysis
par(mfrow=c(2,2))
plot(model.MLR.step)
# Project point data
dat <- spTransform(dat, CRS("+init=epsg:6204"))
# Project covariates to VN-2000 UTM 48N
covs <- projectRaster(covs, crs = CRS("+init=epsg:6204"),
method='ngb')
|
81c551b8c96cc511d12d81d14d71ec112e21ef7e
|
1e23f82ad3aea5958fb4e60136ad0bf07d18edb8
|
/server.R
|
52fe8afdb3e0fab300705e5ae537054aa3aa01ff
|
[] |
no_license
|
cmitter/Spurious_Coorrelation_Demo
|
64e4a7123aac34e89af0c6b05f435abbea6a3a94
|
d9bf28e450ebc82a3d63eb3cbe4b14a5f3088b79
|
refs/heads/master
| 2016-08-11T16:06:40.996428
| 2016-02-14T20:10:27
| 2016-02-14T20:10:27
| 51,711,156
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,029
|
r
|
server.R
|
library(shiny)
library(MASS)
rt2 <- function(n=500,dft=15){ rt(n=n,df=dft) }
formals(rgamma)[1:2] <- c(500,1)
rchisq2 <- function(n=500,dfx=1){ rchisq(n=n,df=dfx) }
formals(rf)[1:3] <- c(500,1,15)
rexp2 <- function(n=500,rate2=1){ rexp(n=n,rate=rate2) }
formals(rbeta)[1:3] <- c(500,2,2)
shinyServer(function(input,output){
dat <- reactive({
dist <- switch(input$dist,
norm=rnorm, unif=runif, t=rt2, F=rf, gam=rgamma, exp=rexp2, chisq=rchisq2, lnorm=rlnorm, beta=rbeta)
def.args <- switch(input$dist,
norm=c(input$mean,input$sd), unif=c(input$min,input$max), t=c(input$dft), F=c(input$df1,input$df2),
gam=c(input$shape,input$rate), exp=c(input$rate2), chisq=c(input$dfx), lnorm=c(input$meanlog,input$sdlog), beta=c(input$shape1,input$shape2))
f <- formals(dist); f <- f[names(f)!="n"]; len <- min(length(f),3-1); f <- f[1:len]
argList <- list(n=input$n*input$var)
for(i in 1:len) argList[[names(f)[i]]] <- def.args[i]
return(list(do.call(dist,argList),names(f)))
})
output$dist1 <- renderUI({
input$dist
isolate({
lab <- switch(input$dist,
norm="Mean:", unif="Minimum:", t="Degrees of freedom:", F="Numerator degrees of freedom:", gam="Shape:", exp="Rate:",
chisq="Degrees of freedom:", lnorm="Mean(log):", beta="Alpha:")
ini <- switch(input$dist,
norm=0, unif=0, t=15, F=1, gam=1, exp=1, chisq=1, lnorm=0, beta=2)
numericInput(dat()[[2]][1],lab,ini)
})
})
output$dist2 <- renderUI({
input$dist
isolate({
lab <- switch(input$dist,
norm="Standard deviation:", unif="Maximum:", F="Denominator degrees of freedom:", gam="Rate:", lnorm="Standard deviation(log)", beta="Beta:")
ini <- switch(input$dist,
norm=1, unif=1, F=15, gam=1, lnorm=1, beta=2)
if(any(input$dist==c("norm","unif","F","gam","lnorm","beta"))) numericInput(dat()[[2]][2],lab,ini)
})
})
output$dldat <- downloadHandler(
filename = function() { paste(input$dist, '.csv', sep='') },
content = function(file) {
write.csv(data.frame(x=dat()[[1]]), file)
}
)
output$plot <- renderPlot({
hist(dat()[[1]],main="",xlab="Observations",col="orange",cex.axis=2,cex.lab=1.2,prob=T)
if(input$density) lines(density(dat()[[1]],adjust=input$bw),lwd=2)
})
output$plot1 <- renderPlot({
# transform random values into matrix
mat<-matrix(as.numeric(dat()[[1]]),input$n)
cormat<-cor(mat)
# take only values of the upper triangular matrix
ut <- upper.tri(cormat)
# vectorized correlations coefficients
corr_m =cormat[ut]
hist(corr_m,main="",xlab="Correlation Coefficients",col="orange",cex.axis=2,cex.lab=1.2,prob=T)
})
output$plot2 <- renderPlot({
X<-matrix(as.numeric(dat()[[1]]),input$n)
nr_class1 <- floor(input$n/2)
nr_class2 <- input$n - nr_class1
Y<- c(rep(0,nr_class1),rep(1,nr_class1))
df <- data.frame(X,Y)
Acc <- rep(0,input$var)
for (i in 1:input$var){
fit<-lda(Y~X[,i],data=df)
t<-table(df$Y, predict(fit,df)$class)
Acc[i] <- (t[1,1]+t[2,2])/input$n
}
hist(Acc,main="",xlab="Accuracy estimates",col="orange",cex.axis=2,cex.lab=1.2,prob=T)
})
output$summary <- renderPrint({
#print("table to big")
})
output$usage <- renderPrint({
txt1<-'1. Choose distribution type <BR> 2. Depending on the distribition choose parameters<BR>'
txt2<-'3. Choose Sample size and Number of variables<P><P>'
txt3<-'The distributions are calculated instantaneously<P>'
txt4<-'Distr. of random variables shows the distribution of the raw data.<br>
Distr. of Correlations shows the distribution of the correlations coefficients of each variables with all others.<br>
Distr. of Accuracy values shows the accuracy values after an prediction with linar discriminant analysis with an random 2-class asssignment'
txt5<-'Source: Input navigation was build on <A href="https://github.com/ua-snap/shiny-apps/tree/master/RV_distributions">RV_distributions by leonawicz'
HTML(c(txt1,txt2,txt3,txt4,txt5))
})
output$pageviews <- renderText({
if (!file.exists("pageviews.Rdata")) pageviews <- 0 else load(file="pageviews.Rdata")
pageviews <- pageviews + 1
save(pageviews,file="pageviews.Rdata")
paste("Visits:",pageviews)
})
})
|
2bb9d8e862b10f44cf667bfe605c03abc3e992a0
|
1ff3b2a37707ab1049a34c61f72898db95864b30
|
/Cross Validation algorithm.R
|
f44c218853ff486f9a7f53dbaa51abb2c3f6d6e0
|
[] |
no_license
|
NarendarK29/Model-Selection
|
bbd04860a9405c25916e6496a10662c98d70e298
|
41dda6d857f3bd48e6bb77cd4f86028497467d99
|
refs/heads/master
| 2020-09-23T07:10:14.030442
| 2019-12-02T18:11:16
| 2019-12-02T18:11:16
| 225,435,356
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,641
|
r
|
Cross Validation algorithm.R
|
###########################################################
# Data Mining #
# Narendar Kumar #
###########################################################
gen_reg=function(n){
x=rnorm(n,0,1)
u=rnorm(n,0,1)
y= 1+x^3+u
gen_reg=cbind.data.frame(y,x)
return(gen_reg)
}
set.seed(90)
n=200
data = gen_reg(n)
train=sample(n, 0.8*n)
##plot(train_x,train_y)
plot(y ~ 1+x^3, data=data, subset = train)
curve(1+x^3, add= T)
#train_y=data[,1][train]
#train_x=data[,2][train]
##z=lm.fit$coefficients[-1]
##y_pred=sum(x[-train])*z+lm.fit$coefficients[1]
## EX_2(a)
MSE= function(actual, predicted){
MSE= mean((actual - predicted)^2)
return(MSE)
}
lm_fit= glm(y ~ 1+I(x^3), data=data, subset=train)
y_pred= predict(lm_fit, data)[-train]
y_act= data[,1][-train]
y_act
MSE(y_act, y_pred)
## EX_2(b)
library(boot)
my.kfold= function(model, data, k){
if (nrow(data) %% k == 0)
my.kfoldk = rep(0,k)
for (i in 1:k){
glm_fit= glm(y ~ 1+I(x^3), data=data)
my.kfoldk[i]=cv.glm(data, model, K=k)$delta[1]
}
return(my.kfoldk)
}
my.kfold(glm_fit, data, k) ## K is multiple of 10 since nrow(data)=200
## EX_3
seednum = 1:100
n=200
MSE_training= c()
MSE_Val= c()
MSE_CV = c()
my.kfold <- function(data,K){
cv.error <- c()
if(nrow(data)%%K==0){
kfold= sample(nrow(data), nrow(data) - nrow(data)/K, replace=FALSE)
cv.error=glm(y ~ 1 + poly(x,j), data=data, subset = kfold)
}else{
print("Error change K")
cv.error=NA
}
return(cv.error)
}
|
cf4a44cf0cb999e0aa5290d2863f5956d11838a6
|
9739f7e3c1aaa3b3f6cd4fae073d8c9baf0a9e53
|
/R/bootInclude.R
|
53b358175f2635d86ab793402ac2494d85c693db
|
[] |
no_license
|
SachaEpskamp/bootnet
|
127d9b460c6833d7562e1876bf31e1f5fbe3de1a
|
d5af7f30e57c00b0313dbd2479ae379857559ab5
|
refs/heads/master
| 2023-08-07T22:32:06.086492
| 2023-08-02T08:52:28
| 2023-08-02T08:52:28
| 30,825,303
| 23
| 18
| null | 2023-05-16T11:44:24
| 2015-02-15T11:14:46
|
R
|
UTF-8
|
R
| false
| false
| 2,022
|
r
|
bootInclude.R
|
# Function to create include probability network
bootInclude <- function(bootobject,verbose=TRUE){
# Check if object is bootnet object:
if (!is(bootobject,"bootnet")){
stop("'bootobject' must be an object of class 'bootnet'")
}
# Check type:
if (bootobject$type != "nonparametric" & bootobject$type != "parametric"){
stop("Bootstrap type must be 'nonparametric' or 'parametric'")
}
# Extract the network object:
Network <- bootobject$sample
# Dummy for multiple graphs:
if (!is.list(Network$graph)){
Graphs <- list(Network$graph)
Directed <- list(Network$directed)
Intercepts <- list(Network$intercepts)
names(Graphs) <- names(Directed) <- names(Intercepts) <-
unique(bootobject$bootTable$graph)
} else {
Graphs <- Network$graph
Directed <- Network$directed
Intercepts <- Network$intercepts
}
# For every graph:
for (g in seq_along(Graphs)){
graphName <- names(Graphs)[g]
# Summary table of edge weights:
bootSummary <- bootobject$bootTable %>%
dplyr::filter(.data[['type']] == "edge", .data[['graph']] == graphName) %>%
dplyr::group_by(.data[['node1']],.data[['node2']]) %>%
dplyr::summarize(
propNonZero=mean(value != 0)
)
# Reweight network:
# if (nrow(bootSummary) > 0){
Graphs[[graphName]][] <- 0
for (i in 1:nrow(bootSummary)){
Graphs[[graphName]][Network$labels == bootSummary$node1[i],Network$labels == bootSummary$node2[i]] <- bootSummary$propNonZero[i]
if (!Directed[[graphName]]){
Graphs[[graphName]][Network$labels == bootSummary$node2[i],Network$labels == bootSummary$node1[i]] <- bootSummary$propNonZero[i]
}
}
}
# Return to network object:
if (length(Graphs) == 1){
Network$graph <- Graphs[[1]]
Network$intercepts <- NULL
} else {
Network$graph <- Graphs
Network$intercepts <- NULL
}
# Add indicator network is about include proportions:
Network$bootInclude <- TRUE
# Return network:
return(Network)
}
|
6511e4398491347891c7b137b6800607d249284f
|
d28508911e5a2f5c3d8d849d7d2a97c687dbffd9
|
/Chapter07/spacy_pre_trained_embeddings.R
|
6ce294f41d8bb6671d612006f46a0751ebcd5cec
|
[
"MIT"
] |
permissive
|
PacktPublishing/Hands-on-Deep-Learning-with-R
|
10032fb0aceed0b315cf7bb399f53e07885df8f7
|
6e3766377395d4e2a853f787d1f595e4d8d28fa5
|
refs/heads/master
| 2023-02-11T11:05:47.140350
| 2023-01-30T09:37:44
| 2023-01-30T09:37:44
| 124,351,189
| 21
| 15
|
MIT
| 2020-04-09T06:29:03
| 2018-03-08T07:03:57
|
R
|
UTF-8
|
R
| false
| false
| 128
|
r
|
spacy_pre_trained_embeddings.R
|
spacy_install()
spacy_initialize(model = "en_core_web_sm")
spacy_parse(twenty_newsgroups$text[1], entity = TRUE, lemma = TRUE)
|
fef3c9563a7660d7100cf9cf47a57ba2cac854c3
|
81d93db227dac663a9a460e9ba97b6bfe54e9411
|
/man/plotStability.Rd
|
4223c79f28ccc9bb1f2c2dd46267f0c008a7448d
|
[] |
no_license
|
powellgenomicslab/ascend
|
792d53a054e789cdf8f033bcba71e39c081abb1a
|
48c235d3fa02b7c1403f5acbc530036dd12d5fb9
|
refs/heads/master
| 2020-06-06T03:12:44.266851
| 2019-08-19T06:30:28
| 2019-08-19T06:30:28
| 192,622,263
| 12
| 10
| null | 2020-02-29T21:13:52
| 2019-06-18T22:48:51
|
R
|
UTF-8
|
R
| false
| true
| 673
|
rd
|
plotStability.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ascend_plots.R
\name{plotStability}
\alias{plotStability}
\title{plotStability}
\usage{
plotStability(object)
}
\arguments{
\item{object}{An \code{\linkS4class{EMSet}} object that has undergone clustering.}
}
\value{
A line graph generated by ggplot2's geom_line function
}
\description{
Plots Stability, Consecutive RI and Rand Index. This can be used to determine
the optimal resolution of the clustering results.
}
\examples{
# Load example EMSet that has undergone processing
em_set <- ascend::analyzed_set
# Use function to plot stability scores
stability_plot <- plotStability(em_set)
}
|
e991891e163c87446d9656c7284d2befa4f9be11
|
d86268c2fdd4195208c3fd5aecab31c324af7bca
|
/omd/man/my_basic_setdiff.Rd
|
354cdcc1f80f2422beaa1e76683eef0eac910d93
|
[] |
no_license
|
bio-datascience/omd
|
0e2edc61e86c135383b5d4bf29c14c95af026f5f
|
5f2f532dfe077388f7911cc7999622c4b6a3f8b8
|
refs/heads/master
| 2023-08-28T21:44:27.488641
| 2021-11-02T15:25:02
| 2021-11-02T15:25:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 309
|
rd
|
my_basic_setdiff.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/landcrossing.R
\name{my_basic_setdiff}
\alias{my_basic_setdiff}
\title{More basic version of \code{my_setdiff()}.}
\usage{
my_basic_setdiff(a, b, check_if_a_in_b = TRUE)
}
\description{
More basic version of \code{my_setdiff()}.
}
|
8473bc40f2515b0c9b3832dc5dc0400aefb7e2b5
|
03f9b872f9e89453d1faf9b545d23fbad83bb303
|
/R/summary_reads.R
|
ae90ca077f928b38b0a7e7b5ce1209631ccf8b75
|
[] |
no_license
|
kawu001/stackr
|
99fa54f4b4e1c8194550752bb238864597442c08
|
684b29b9895c773f48d0e58cba3af22fc2c98a56
|
refs/heads/master
| 2023-01-06T06:47:55.234575
| 2020-11-05T13:51:20
| 2020-11-05T13:51:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,447
|
r
|
summary_reads.R
|
#' @name summary_reads
#' @title Summarise the reads for indel and GC content and produce the read depth plot
#' @description Still in dev, but work nicely.
#' Summarise the reads for indel and GC content and produce the read depth plot
#'
#' @param fq.files (character, path) The path to the individual fastq file to check,
#' or the entire fq folder.
#' @param output.dir (path) Write the output in a specific directory.
#' Default: \code{output.dir = NULL}, uses the working directory.
#' @param read.depth.plot (logical) To get the interesting summaries on
#' the read content, the function is very close to similar to
#' \code{\link{read_depth_plot}}, so you can produce it for each sample with
#' minimal cost on time.
#' Default: \code{read.depth.plot = TRUE}.
#' @inheritParams read_depth_plot
#' @rdname summary_reads
#' @export
#' @return The function returns the read depth groups plot and the read stats overall
#' and by read depth groups.
#' @examples
#' \dontrun{
#' require(vroom)
#' sum <- summary_reads(fq.files = "my_fq_folder")
#' }
summary_reads <- function(
fq.files,
output.dir = NULL,
read.depth.plot = TRUE,
min.coverage.fig = 7L,
parallel.core = parallel::detectCores() - 1
) {
opt.change <- getOption("width")
options(width = 70)
cat("#######################################################################\n")
cat("####################### stackr::summary_reads #########################\n")
cat("#######################################################################\n")
timing <- proc.time()
if (!"vroom" %in% utils::installed.packages()[,"Package"]) {
rlang::abort('Please install vroom for this option:\n
install.packages("vroom")')
}
if (assertthat::is.string(fq.files) && assertthat::is.dir(fq.files)) {
fq.files <- stackr::list_sample_file(f = fq.files, full.path = TRUE, recursive = TRUE)
message("Analysing ", length(fq.files), " samples...")
}
if (length(fq.files) > 1) {
future::plan(future::multisession, workers = parallel.core)
p <- progressr::progressor(steps = length(fq.files))
res <- furrr::future_map(
.x = fq.files,
.f = reads_stats,
read.depth.plot = read.depth.plot,
min.coverage.fig = min.coverage.fig,
output.dir = output.dir,
parallel.core = parallel.core,
p = p,
verbose = FALSE,
.progress = FALSE
)
} else {
res <- reads_stats(
fq.files = fq.files,
read.depth.plot = read.depth.plot,
min.coverage.fig = min.coverage.fig,
output.dir = output.dir,
parallel.core = parallel.core,
verbose = TRUE
)
}
timing <- proc.time() - timing
options(width = opt.change)
message("\nComputation time: ", round(timing[[3]]), " sec")
cat("############################## completed ##############################\n")
return(res)
}#summary_reads
# Internal function required ---------------------------------------------------
#' @title reads_stats
#' @description Main function to summarise the reads
#' @rdname reads_stats
#' @export
#' @keywords internal
reads_stats <- function(
fq.files,
read.depth.plot = TRUE,
min.coverage.fig = 7L,
output.dir = NULL,
parallel.core = parallel::detectCores() - 1,
p,
verbose = TRUE
) {
p()
clean.names <- stackr::clean_fq_filename(basename(fq.files))
if (verbose) message("Sample name: ", clean.names)
read.stats <- vroom::vroom(
file = fq.files,
col_names = "READS",
col_types = "c",
delim = "\t",
num_threads = parallel.core,
progress = TRUE
) %>%
dplyr::mutate(
INFO = seq.int(from = 1L, to = n()),
SEQ = rep(1:4, n() / 4)
) %>%
dplyr::filter(SEQ == 2L)
total.sequences <- nrow(read.stats)
if (verbose) message("Number of reads: ", total.sequences)
# stats ----------------------------------------------------------------------
if (verbose) message("Calculating reads stats...")
read.stats %<>% dplyr::count(READS, name = "DEPTH")
# detect indel and / or low quality reads...----------------------------------
# Number of N
# GC-content/ratio (or guanine-cytosine content), proportion of nitrogenous bases in a DNA
# DNA with low GC-content is less stable than DNA with high GC-content
indel <- read.stats %>%
dplyr::mutate(
LENGTH = stringi::stri_length(str = READS),
N = stringi::stri_count_fixed(str = READS, pattern = "N"),
N_PROP = round(N / LENGTH, 2),
GC = stringi::stri_count_fixed(str = READS, pattern = "C") +
stringi::stri_count_fixed(str = READS, pattern = "G"),
GC_PROP = round(GC / LENGTH, 2)
)
indel.stats.by.depth.group <- stats_stackr(data = indel, x = "N", group.by = "DEPTH")
gc.ratio.by.depth.group <- stats_stackr(data = indel, x = "GC_PROP", group.by = "DEPTH")
stats.overall <- dplyr::bind_rows(
stats_stackr(data = indel, x = "N") %>% dplyr::mutate(GROUP = "INDEL", .before = 1L),
stats_stackr(data = indel, x = "GC_PROP") %>% dplyr::mutate(GROUP = "GC", .before = 1L)
) %>%
tibble::add_column(.data = ., INDIVIDUALS = clean.names, .before = 1L) %>%
tibble::add_column(.data = ., TOTAL_READS = total.sequences, .before = 2L)
if (!is.null(output.dir)) {
vroom::vroom_write(x = stats.overall, path = file.path(output.dir, paste0(clean.names, "_stats.overall.tsv")))
}
read.stats %<>% dplyr::count(DEPTH, name = "NUMBER_DISTINCT_READS")
depth.group.levels <- c("low coverage", "target", "high coverage", "distinct reads")
max.coverage.fig <- min(read.stats$DEPTH[read.stats$NUMBER_DISTINCT_READS == 1L]) - 1
read.stats %<>%
dplyr::mutate(
DEPTH_GROUP = dplyr::case_when(
NUMBER_DISTINCT_READS == 1L ~ "distinct reads",
DEPTH < min.coverage.fig ~ "low coverage",
DEPTH >= min.coverage.fig & DEPTH <= max.coverage.fig ~ "target",
DEPTH > max.coverage.fig ~ "high coverage"
),
DEPTH_GROUP = factor(x = DEPTH_GROUP, levels = depth.group.levels, ordered = TRUE)
)
distinct.sequences <- total.number.distinct.reads <- sum(read.stats$NUMBER_DISTINCT_READS)
# read_depth_plot ------------------------------------------------------------
if (read.depth.plot) {
color.tibble <- tibble::tibble(
DEPTH_GROUP = c("low coverage", "target", "high coverage", "distinct reads"),
LABELS = c("low coverage", paste0("target [", min.coverage.fig, " - ", max.coverage.fig, "]"), "high coverage > 1 reads", "high coverage, unique reads"),
GROUP_COLOR = c("red", "green", "yellow", "orange")
) %>%
dplyr::mutate(
DEPTH_GROUP = factor(x = DEPTH_GROUP, levels = depth.group.levels, ordered = TRUE)
)
hap.read.depth.group.stats <- read.stats %>%
dplyr::mutate(DISTINCT_READS_DEPTH = DEPTH * NUMBER_DISTINCT_READS) %>%
dplyr::group_by(DEPTH_GROUP) %>%
dplyr::summarise(
NUMBER_READS_PROP = round((sum(DISTINCT_READS_DEPTH) / total.sequences), 4),
.groups = "drop"
) %>%
dplyr::left_join(color.tibble, by = "DEPTH_GROUP") %>%
dplyr::mutate(
LABELS = stringi::stri_join(LABELS, " (", as.character(format(NUMBER_READS_PROP, scientific = FALSE)), ")")
)
base_breaks <- function(n = 10){
function(x) {
grDevices::axisTicks(log10(range(x, na.rm = TRUE)), log = TRUE, n = n)
}
}
read.depth.plot <- ggplot2::ggplot(
data = read.stats, ggplot2::aes(x = DEPTH, y = NUMBER_DISTINCT_READS)
) +
ggplot2::geom_point(ggplot2::aes(colour = DEPTH_GROUP)) +
ggplot2::labs(
title = paste0("Read Depth Groups for sample: ", clean.names),
subtitle = paste0("Total reads: ", total.sequences),
x = "Depth of sequencing (log10)",
y = "Number of distinct reads (log10)"
) +
ggplot2::annotation_logticks() +
ggplot2::scale_colour_manual(
name = "Read coverage groups",
labels = hap.read.depth.group.stats$LABELS,
values = hap.read.depth.group.stats$GROUP_COLOR
) +
ggplot2::scale_x_log10(breaks = c(1, 5, 10, 25, 50, 75, 100, 250, 500, 1000)) +
ggplot2::scale_y_log10(breaks = base_breaks()) +
ggplot2::theme(
axis.title = ggplot2::element_text(size = 16, face = "bold"),
legend.title = ggplot2::element_text(size = 16, face = "bold"),
legend.text = ggplot2::element_text(size = 16, face = "bold"),
legend.position = c(0.7,0.8)
)
if (!is.null(output.dir)) {
ggplot2::ggsave(
plot = read.depth.plot,
filename = file.path(output.dir, paste0(clean.names, "_hap_read_depth.png")),
width = 25,
height = 15,
dpi = 300,
units = "cm"
)
}
} else {
read.depth.plot <- NULL
}
# results ------------------------------------------------------------------
read.stats %<>%
dplyr::left_join(indel.stats.by.depth.group, by = "DEPTH") %>%
dplyr::left_join(gc.ratio.by.depth.group, by = "DEPTH", suffix = c("_INDEL", "_GC")) %>%
tibble::add_column(.data = ., INDIVIDUALS = clean.names, .before = 1L) %>%
tibble::add_column(.data = ., TOTAL_READS = total.sequences, .before = 2L)
if (!is.null(output.dir)) {
vroom::vroom_write(x = read.stats, path = file.path(output.dir,paste0(clean.names, "_stats.by.depth.groups.tsv")))
}
return(list(overall = stats.overall, by.read.depth.groups = read.stats, plot = read.depth.plot))
}# End reads_stats
|
464215cceb7af480293d11da8401b12f632982c2
|
e717b2e829667fd3b6691c923ab4633af4d5ef74
|
/plot2.R
|
4a4877f2ee455675f767472f2661cc526361e9bc
|
[] |
no_license
|
Emil-png/Exploratory_Data_Analysis_Assignment
|
30568390a28b7e57293c37c2f2158e8a6c47e92d
|
a44333694c863fe3d3d0ab127d9bbcdb08a501f8
|
refs/heads/main
| 2023-01-23T02:09:02.929418
| 2020-12-06T00:05:35
| 2020-12-06T00:05:35
| 318,914,098
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,295
|
r
|
plot2.R
|
#set working directory
setwd("C:/Users/Dell/Desktop/Cursos/Data_Science_course_Jopkins/Exploratory_data_analysis/Assignments")
if(!file.exists("./dataStore")){dir.create("./dataStore")}
#now get the data for the project from the url
get.data.project <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(get.data.project, destfile = "./dataStore/exdata_data_NEI_data.zip", method = "auto")
#unzip data files
unzip(zipfile = "./dataStore/exdata_data_NEI_data.zip")
if(!exists("NEI")){
NEI <- readRDS("summarySCC_PM25.rds")
}
if(!exists("SCC")){
SCC <- readRDS("Source_Classification_Code.rds")
}
#Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510") from 1999 to 2008?
#Use the base plotting system to make a plot answering this question.
NEIsubset <- NEI[NEI$fips=="24510", ]
total_annual_emissions_Balt <- aggregate(Emissions ~ year, NEIsubset, FUN = sum)
#create png file
png("plot2.png")
plot2 <- barplot(height = total_annual_emissions_Balt$Emissions/1000, names.arg = total_annual_emissions_Balt$year, xlab = "years", ylab = expression("total PM"[2.5]*" emission"), main = expression("Total PM"[2.5]*" emissions in Baltimore in time"), col= 2:(length(total_annual_emissions_Balt$year)+1))
dev.off()
print()
|
95b15ae915d516a81266df27a5fe3941a899b7fc
|
f25c5405790cf17a2b6e78b4ef58654810c8bb7b
|
/man/columnFilter.Rd
|
90238ef5cdfc8bd4604ba13b346c3be71ff3e2aa
|
[] |
no_license
|
moturoa/shintodashboard
|
15ad881ea4c72549b616a3021852a0db8c25f6fd
|
80385da221d370a563eb1cfe8946964acfacfe15
|
refs/heads/master
| 2023-05-31T07:05:11.026309
| 2021-06-28T12:55:32
| 2021-06-28T12:55:32
| 312,505,839
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 368
|
rd
|
columnFilter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/columnFilter.R
\name{columnFilterUI}
\alias{columnFilterUI}
\alias{columnFilter}
\title{Shiny module om een kolom te filteren}
\usage{
columnFilterUI(id, data, preset = NULL)
columnFilter(input, output, session, data, preset = NULL)
}
\description{
Shiny module om een kolom te filteren
}
|
f1955ebbde05035ba8bda7f1d4aada4f2e66af4b
|
d2bca2cec889f3ebd2ecfaf9157b9412702d58df
|
/miRNA/rfam.R
|
208fad0a5fef5a5c6cec93519ef4877bbadeaf8a
|
[] |
no_license
|
czheluo/R-Script
|
471fee99b3aa7e5942d42e78bd0d4e6e194f2297
|
caa2968bcf3a767d51e2c2c948c8c20f99a2dcb0
|
refs/heads/master
| 2023-04-15T09:30:29.758206
| 2023-04-06T07:53:29
| 2023-04-06T07:53:29
| 183,537,154
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,182
|
r
|
rfam.R
|
gro<-read.table("group.list",header=F)
g<-unique(gro[,1])
for (i in 1:length(g)){
print(g[i])
loc1<-read.table(paste(g[i],"_1_loc_bias_per.xls",sep=""),sep="\t",header=T)
loc2<-read.table(paste(g[i],"_2_loc_bias_per.xls",sep=""),sep="\t",header=T)
loc3<-read.table(paste(g[i],"_3_loc_bias_per.xls",sep=""),sep="\t",header=T)
p1<- t(loc1)[-1,]
colnames(p1) <- t(loc1)[1,]
p2<- t(loc2)[-1,]
colnames(p2) <- t(loc2)[1,]
p3<- t(loc3)[-1,]
colnames(p3) <- t(loc3)[1,]
p <- (p1+p2+p3)/3
write.table(t(p),paste(g[i],"_loc_bias_per.xls",sep=""),sep="\t",row.names=T,col.names=NA,quote=F)
pdf(paste(g[i],"_loc_bias_per.pdf",sep=""),10,6)
par(pin=c(5,1.8),fig=c(0,0.95,0,1),xpd=T,cex.axis=0.8)
barplot(p,col=c("blue","yellow","red","purple"),yaxt="n",ann=FALSE)
title(main="miRNA nucleotide bias at each position")
axis(2,at=seq(0,1,0.25),labels=seq(0,100,25))
legend(32+6,1,c("A","G","C","U"),col=c("blue","yellow","red","purple"),pch=c(15),bty="n",cex=0.7)
mtext("Percent(%)",side=2,las=0,cex.lab=0.7,line=2)
mtext("Position",side=1,las=0,cex.lab=0.7,line=2)
segments(-1.5,0,32+5,0)
dev.off()
loc1<-read.table(paste(g[i],"_1_first_bias_per.xls",sep=""),sep="\t",header=T)
loc2<-read.table(paste(g[i],"_2_first_bias_per.xls",sep=""),sep="\t",header=T)
loc3<-read.table(paste(g[i],"_3_first_bias_per.xls",sep=""),sep="\t",header=T)
p1<- t(loc1)[-1,]
colnames(p1) <- t(loc1)[1,]
p2<- t(loc2)[-1,]
colnames(p2) <- t(loc2)[1,]
p3<- t(loc3)[-1,]
colnames(p3) <- t(loc3)[1,]
p <- (p1+p2+p3)/3
write.table(t(p),paste(g[i],"_first_bias_per.xls",sep=""),sep="\t",row.names=T,col.names=NA,quote=F)
pdf(paste(g[i],"_first_bias_per.pdf",sep=""),10,6)
par(pin=c(5,1.8),fig=c(0,0.95,0,1),xpd=T,cex.axis=0.8)
barplot(p,col=c("blue","yellow","red","purple"),yaxt="n",ann=FALSE)
title(main="miRNA nucleotide bias at each position")
axis(2,at=seq(0,1,0.25),labels=seq(0,100,25))
legend(32+6,1,c("A","G","C","U"),col=c("blue","yellow","red","purple"),pch=c(15),bty="n",cex=0.7)
mtext("Percent(%)",side=2,las=0,cex.lab=0.7,line=2)
mtext("Position",side=1,las=0,cex.lab=0.7,line=2)
segments(-1.5,0,32+5,0)
dev.off()
}
|
ed2377256eb91d871e25ea8dd1c1f9d97db3418e
|
fc744a2378b8b1f61f8e0a509f84b5551bb2eeeb
|
/R/fix_lmer.R
|
199c272eef60d58f856c11d0f782cf2fff1f2cc1
|
[] |
no_license
|
devoges/fitLMM
|
d8950a3aad2ab808b3f79be93b1776ab8aa68cda
|
12b1692402fd9f6f4c02e20d295b6c8102475079
|
refs/heads/master
| 2020-04-27T15:02:52.525236
| 2019-07-19T19:01:58
| 2019-07-19T19:01:58
| 174,429,590
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,696
|
r
|
fix_lmer.R
|
##-- found online at
##-- https://rstudio-pubs-static.s3.amazonaws.com/305732_7a0cfc5b535c41c8b56ffdc2e322a51d.html
#' @importFrom Matrix Matrix bdiag
#' @importFrom stats rmultinom
#' @importFrom methods as
myReTrms<-function(ListZ,ListVar=NULL){
reTrms<-list()
reTrms$Zt <- Matrix::Matrix(t(Reduce('cbind',ListZ)),sparse=TRUE)
reTrms$Zt <- as(reTrms$Zt, "dgCMatrix")
reTrms$theta <- rep(1,length(ListZ)) # Initial Value of the covariance parameters
reTrms$Lind <- rep(1:length(ListZ),unlist(lapply(ListZ,ncol))) #an integer vector of indices determining the mapping of the elements of the theta vector to the "x" slot of Lambdat
reTrms$Gp <- as.integer(unname(cumsum(c(0,unlist(lapply(ListZ,ncol))))))
reTrms$lower <- rep(0,length(ListZ)) # lower bounds on the covariance parameters
if (is.null(ListVar)) {
reTrms$Lambdat <- Matrix(diag(rep(1,sum(unlist(lapply(ListZ,ncol))))),sparse=TRUE)
} else {
reTrms$Lambdat <- bdiag(lapply(ListVar,chol))
}
reTrms$Ztlist <- lapply(ListZ,function(Z) as(Matrix(t(Z),sparse=T), "dgCMatrix"))
reTrms$cnms <- as.list(names(ListZ)) ; names(reTrms$cnms)<- names(ListZ)
# Flist is Not very clean (to say the least... )
reTrms$flist <- lapply(ListZ,function(Z) {flist.factor<- as.factor(colnames(Z)[apply(Z,1,function(x) which(rmultinom(n=1,size=1,prob =abs(x)+0.1)==1) )]);
levels(flist.factor)<-colnames(Z); return(flist.factor)}) #NULL # list of grouping factors used in the random-effects terms (used for computing initial variance ??)
return(reTrms)
}
#' @importFrom stats sigma
myranef<-function(model,condVar=TRUE){
re.cond.mode<-tapply(model@u,mymod@pp$Lind,function(x) x)
names(re.cond.mode)<- names(model@cnms)
if (condVar) {
Zt<-model@pp$Zt
D <- sigma(model)* t(model@pp$Lambdat) %*% model@pp$Lambdat
Sigma<- t(Zt)%*% D %*%Zt + sigma(model)*diag(rep(1,ncol(Zt)))
var.cond <- D - Zt %*%solve(Sigma) %*% t(Zt)
var.cond <- diag(var.cond)
var.cond.mode <- tapply(var.cond,mymod@pp$Lind,function(x) x)
}
for (i in 1:length(re.cond.mode)) {
re.cond.mode[[i]]<-data.frame(re.cond.mode[i])
names(re.cond.mode[[i]])<-names(re.cond.mode[i])
row.names(re.cond.mode[[i]]) <- levels(model@flist[[i]])
if (condVar) attr(re.cond.mode[[i]],"postVar")=array(var.cond.mode[[i]],c(1,1,nrow(re.cond.mode[[i]])))
}
attr(re.cond.mode,"class") <- "ranef.mer"
re.cond.mode
}
#' @importFrom stats model.frame setNames
#' @importFrom lme4 mkLmerDevfun optimizeLmer mkMerMod getME VarCorr
#' @importFrom dplyr case_when mutate
#' @importFrom magrittr %>%
#' @export
fit_lmer <- function(Y,
X,
Zlist,
REML = TRUE) {
fr <- model.frame(Y ~ .,
data.frame(Response = Y,
X))
reTrms <- myReTrms(Zlist, NULL)
reTrms$Zt <- as(reTrms$Zt, "dgCMatrix")
devfun <- mkLmerDevfun(fr, X, reTrms, REML = REML)
opt <- optimizeLmer(devfun)
model <- mkMerMod(environment(devfun), opt, reTrms, fr = fr)
beta <- getME(model, name = "fixef")
beta <- beta[order(names(beta))]
foo <- as.data.frame(VarCorr(model))[, c("var1", "vcov")] %>%
mutate(var1 = case_when(is.na(var1) ~ "Error",
TRUE ~ var1))
vcs <- setNames(foo$vcov, foo$var1)
vcs <- vcs[order(names(vcs))]
ll_REML <- logLik(model, REML = TRUE)[1]
ll_ML <- logLik(model, REML = FALSE)[1]
rss <- unname(getME(model, "devcomp")$cmp["pwrss"])
list("type" = "lme4",
"model" = model,
"beta" = beta,
"vcs" = vcs,
"ll_REML" = ll_REML,
"ll_ML" = ll_ML,
"rss" = rss,
"method" = ifelse(REML, "REML", "ML"))
}
|
576e82faa11d5c93ee9fb32a2a9ba27859a82c72
|
120de1ae49850f8212efc39ab9fa266f175dc4c6
|
/man/covar.Rd
|
6957e7c96f8578bde38976d0d0e5f5aa7c9f57a3
|
[] |
no_license
|
vsrimurthy/EPFR
|
168aed47aa2c48c98be82e3d8c833d89e1d11e04
|
544471a8d0cf75c7d65a195b9f6e95d6b1d6800f
|
refs/heads/master
| 2023-08-02T14:50:25.754990
| 2023-07-29T13:56:39
| 2023-07-29T13:56:39
| 118,918,801
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 270
|
rd
|
covar.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{covar}
\alias{covar}
\title{covar}
\usage{
covar(x)
}
\arguments{
\item{x}{= a matrix}
}
\description{
efficient estimated covariance between the columns of <x>
}
\keyword{covar}
|
2b00dc77e83a3963ed783806c716cd8b2e7fda3a
|
ccafbf8e73a96465174e94000f91ecbe21169487
|
/04_CNV_genotype/scripts/fun_models.R
|
f7199ba2c120f32558c489fde609ea87833caa70
|
[] |
no_license
|
SinomeM/ensembleCNV
|
8c284ee3f73f4599541ff1ec671c3552b08f4caf
|
8470619a35f079eb4a0b27b8f7253969e03c114e
|
refs/heads/master
| 2021-07-16T12:49:06.291644
| 2020-06-30T15:26:37
| 2020-06-30T15:26:37
| 184,261,992
| 0
| 0
| null | 2019-04-30T12:54:47
| 2019-04-30T12:54:47
| null |
UTF-8
|
R
| false
| false
| 39,033
|
r
|
fun_models.R
|
mode_zz <- function(dt_cnvr) {
dt <- dt_cnvr
cutoffs <- quantile(dt$LRR_median, c(0.1, 0.9))
dt1 <- dt[which(dt$LRR_median >= cutoffs[1] & dt$LRR_median <= cutoffs[2]), ]
md1 <- mlv(x = dt1$LRR_median, method = "parzen", kernel = "gaussian")
#m1 <- md1$M
sd1 <- sd(dt1$LRR_median)
return(list(mu = md1, sigma = sd1))
}
# CN = 1 and 3 all have more 10 samples
model1_zz <- function(dt_cnvr, paras_LRR) {
numsnp <- unique(dt_cnvr$numSNP)
# init paras_model
paras_model <- list()
paras_model$stage1 <- list()
paras_model$stage2 <- list()
dt_cnvr <- dt_cnvr[order(dt_cnvr$CN), ]
n <- nrow(dt_cnvr) # number of samples
numsnp <- unique(dt_cnvr$numSNP) # numsnp
cnvr_id <- unique(dt_cnvr$CNVR_ID)
chr <- unique(dt_cnvr$Chr)
tbl1 <- table(dt_cnvr$CN)
# first round gmm model
lambdas1 <- prop.table(tbl1)
model1 <- list()
n1s <- as.vector(tbl1) ##
# add here percent of CN = 2 < 50%
if (lambdas1[2] <= 0.5) {
# exclude P/Q/PQ
idxs <- which(dt_cnvr$alg %in% c("I", "IP", "IPQ", "IQ"))
dt_new <- dt_cnvr[idxs, ]
dt2_new <- NULL
if (length(idxs) == 0) {
dt2_new <- dt_cnvr
} else {
dt2_new <- dt_cnvr[-idxs, ]
}
m2 <- mode_zz(dt_cnvr = dt2_new)
mu2 <- m2$mu
sigma2 <- m2$sigma
dt1_new <- subset(dt_new, CN == 1)
dt3_new <- subset(dt_cnvr, CN == 3)
n1s <- c(nrow(dt1_new), nrow(dt2_new), nrow(dt3_new)) ## n1s
if (nrow(dt1_new) >= 60) {
mu1 <- mean(dt1_new$LRR_median)
sigma1 <- sd(dt1_new$LRR_median)
} else {
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
sigma1 <- paras_LRR$LRR_sd$CN_1/sqrt(numsnp)
}
if (nrow(dt3_new) >= 60) {
mu3 <- mean(dt3_new$LRR_median)
sigma3 <- sd(dt3_new$LRR_median)
} else {
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
sigma3 <- paras_LRR$LRR_sd$CN_3/sqrt(numsnp)
}
if ((mu2 - mu1) < abs(0.2*paras_LRR$LRR_mean$CN_1)) {
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
num_permute <- 60
sigma1 <- paras_LRR$LRR_sd$CN_1/sqrt(numsnp)
LRR_median_cn1_permute <- rnorm(num_permute, mean = mu1, sd = sigma1)
dt_cn1_permute <- data.frame(Sample_ID = paste0("Sample_CN1_", 1:num_permute),
CNVR_ID = rep(cnvr_id, num_permute),
LRR_median = LRR_median_cn1_permute,
Chr = chr, alg = rep("permute", num_permute),
CN = 1, numSNP = numsnp, stringsAsFactors = FALSE)
dt_cnvr <- rbind(dt_cnvr, dt_cn1_permute)
n1s[1] <- n1s[1] + 60
}
lambdas1 <- n1s/sum(n1s)
mus1 <- c(mu1, mu2, mu3)
sigmas1 <- c(sigma1, sigma2, sigma3)
# init parameters
paras_model$stage1$init <- list(mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
lambda = lambdas1)
if (nrow(dt1_new) < 10 & nrow(dt3_new) < 10) {
model1 <- normalmixEM(x = dt_cnvr$LRR_median, mu = mus1,
mean.constr = c(mu1, NA, mu3),
sigma = sigmas1, k = 3)
} else if (nrow(dt1_new) < 10 & nrow(dt3_new) >= 10) {
model1 <- normalmixEM(x = dt_cnvr$LRR_median, mu = mus1,
mean.constr = c(mu1, NA, NA),
sigma = sigmas1, k = 3)
} else if (nrow(dt1_new >= 10) & nrow(dt3_new) < 10) {
model1 <- normalmixEM(x = dt_cnvr$LRR_median, mu = mus1,
mean.constr = c(NA, NA, mu3),
sigma = sigmas1, k = 3)
} else {
model1 <- normalmixEM(x = dt_cnvr$LRR_median, mu = mus1,
sigma = sigmas1, k = 3)
}
} else {
dt2 <- subset(dt_cnvr, CN == 2)
paras2 <- mode_zz(dt_cnvr = dt2)
mu2 <- paras2$mu
sigma2 <- paras2$sigma
# CN = 1
dt1 <- subset(dt_cnvr, CN == 1)
mu1 <- mean(dt1$LRR_median)
sigma1 <- sd(dt1$LRR_median)
# CN = 3
dt3 <- subset(dt_cnvr, CN == 3)
mu3 <- mean(dt3$LRR_median)
sigma3 <- sd(dt3$LRR_median)
if ((mu2 - mu1) < abs(0.2*paras_LRR$LRR_mean$CN_1)) {
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
num_permute <- 60
sigma1 <- paras_LRR$LRR_sd$CN_1/sqrt(numsnp)
LRR_median_cn1_permute <- rnorm(num_permute, mean = mu1, sd = sigma1)
dt_cn1_permute <- data.frame(Sample_ID = paste0("Sample_CN1_", 1:num_permute),
CNVR_ID = rep(cnvr_id, num_permute),
LRR_median = LRR_median_cn1_permute,
Chr = chr, alg = rep("permute", num_permute),
CN = 1, numSNP = numsnp, stringsAsFactors = FALSE)
dt_cnvr <- rbind(dt_cnvr, dt_cn1_permute)
n1s[1] <- n1s[1] + num_permute
}
mus1 <- c(mu1, mu2, mu3)
sigmas1 <- c(sigma1, sigma2, sigma3)
lambdas1 <- n1s/sum(n1s)
# init parameters
paras_model$stage1$init <- list(mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
lambda = lambdas1)
model1 <- normalmixEM(x = dt_cnvr$LRR_median, mu = mus1,
sigma = sigmas1, lambda = lambdas1, k = 3)
}
# model parameters
paras_model$stage1$model <- list(mu = model1$mu,
sigma = model1$sigma,
lambda = model1$lambda)
#--------------------------------------------
cns_pred <- apply(model1$posterior, MARGIN = 1, which.max)
probs <- model1$posterior
# CN = 1
probs1 <- as.vector(probs[, 1])
idxs1 <- which(cns_pred == 1 & probs1 >= 0.9)
n1 <- length(idxs1)
n1
# CN = 3
probs3 <- as.vector(probs[, 3])
idxs3 <- which(cns_pred == 3 & probs3 >= 0.9)
n3 <- length(idxs3)
n3
# CN = 2
idxs2 <- setdiff(1:nrow(dt_cnvr), union(idxs1, idxs3))
dt22 <- dt_cnvr[idxs2, ]
m2 <- mode_zz(dt_cnvr = dt22)
mu2 <- m2$mu
sigma2 <- m2$sigma
n2 <- nrow(dt22)
# add
n2s <- c(n1, n2, n3)
flag1 <- ifelse(n2s[1]/n1s[1] >= 0.8,TRUE,FALSE)
flag3 <- ifelse(n2s[3]/n1s[3] >= 0.8,TRUE,FALSE) ## prop
# parameters
mu1 <- NA
sigma1 <- NA
percent.dist12 = abs(model1$mu[2] - model1$mu[1])/abs(paras_LRR$LRR_mean$CN_1)
if ( percent.dist12 <= 0.2) {
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
sigma1 <- paras_LRR$LRR_sd$CN_1/sqrt(numsnp)
n1 = 0
} else {
if (n1 >= 10) {
mu1 <- mean(dt_cnvr$LRR_median[idxs1])
sigma1 <- sd(dt_cnvr$LRR_median[idxs1])
} else {
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
sigma1 <- paras_LRR$LRR_sd$CN_1/sqrt(numsnp)
}
}
mu3 <- NA
sigma3 <- NA
if (n3 >= 10) {
mu3 <- mean(dt_cnvr$LRR_median[idxs3])
sigma3 <- sd(dt_cnvr$LRR_median[idxs3])
} else {
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
sigma3 <- paras_LRR$LRR_sd$CN_3/sqrt(numsnp)
}
if (mu3 < mu2) {
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
sigma3 <- paras_LRR$LRR_sd$CN_3/sqrt(numsnp)
}
lambdas2 <- c(n1, n2, n3)/n
paras_model$stage2$init <- list(mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
lambda = lambdas2)
model2 <- list()
if (n1 >= 10 & n3 >= 10) {
model2 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3), lambda = lambdas2,
k = 3)
} else if (n1 >= 10 & n3 < 10) {
model2 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
mean.constr = c(NA, NA, mu3), k = 3)
} else if (n1 < 10 & n3 >= 10) {
model2 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
mean.constr = c(mu1, NA, mu3), k = 3)
} else if (n1 < 10 & n3 < 10) {
mu2 <- median(dt22$LRR_median)
sigma2 <- sd(dt22$LRR_median)
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
if(n1 == 0) {
sigma1 <- Inf
} else {
sigma1 <- paras_LRR$LRR_sd$CN_1/sqrt(numsnp)
}
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
if (n3 == 0) {
sigma3 <- Inf
} else {
sigma3 <- paras_LRR$LRR_sd$CN_3/sqrt(numsnp)
}
model2$mu <- c(mu1, mu2, mu3)
model2$sigma <- c(sigma1, sigma2, sigma3)
model2$lambda <- c(1, 1, 1)
}
# check parameters
cutoff1 <- 0.5
cutoff3 <- 0.8
mu1f <- model2$mu[1]
mu2f <- model2$mu[2]
mu3f <- model2$mu[3]
f1 <- (abs(mu1f - mu2f) > cutoff1*abs(paras_LRR$LRR_mean$CN_1))
f3 <- (abs(mu2f - mu3f) > cutoff3*abs(paras_LRR$LRR_mean$CN_3))
f1 <- f1|flag1
f3 <- f3|flag3
if (f1 == FALSE & f3 == FALSE) {
mu2 <- median(dt_cnvr$LRR_median)
sd2 <- sd(dt_cnvr$LRR_median)
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
model3 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
mean.constr = c(mu1, NA, mu3), k = 3)
paras_model$stage2$model <- list(mu = model3$mu,
sigma = model3$sigma,
lambda = model3$lambda)
paras_all <- list(mus = model3$mu,
sigmas = model3$sigma,
lambdas = model3$lambda)
} else if (f1 == FALSE & f3 == TRUE) {
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
model3 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
mean.constr = c(mu1, NA, mu3), k = 3)
paras_model$stage2$model <- list(mu = model3$mu,
sigma = model3$sigma,
lambda = model3$lambda)
paras_all <- list(mus = model3$mu,
sigmas = model3$sigma,
lambdas = model3$lambda)
} else if (f1 == TRUE & f3 == FALSE) {
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
model3 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
mean.constr = c(mu1, NA, mu3), k = 3)
paras_model$stage2$model <- list(mu = model3$mu,
sigma = model3$sigma,
lambda = model3$lambda)
paras_all <- list(mus = model3$mu,
sigmas = model3$sigma,
lambdas = model3$lambda)
} else {
mu22 <- NA
if (n1 <= 5 & n3 <= 5) {
mu22 <- mu2
}
model3 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
mean.constr = c(mu1, mu22, mu3), k = 3)
paras_model$stage2$model <- list(mu = model3$mu,
sigma = model3$sigma,
lambda = model3$lambda)
paras_all <- list(mus = model3$mu,
sigmas = model3$sigma,
lambdas = model3$lambda)
}
res <- list(paras_all = paras_all,
paras_model = paras_model)
return(res) # return
}
model2_zz <- function(dt_cnvr, paras_LRR) {
# init paras_model
paras_model <- list()
paras_model$stage1 <- list()
paras_model$stage2 <- list()
dt_cnvr <- dt_cnvr[order(dt_cnvr$CN), ]
n <- nrow(dt_cnvr) # number of samples
numsnp <- unique(dt_cnvr$numSNP) # numsnp
cnvr_id <- unique(dt_cnvr$CNVR_ID)
chr <- unique(dt_cnvr$Chr)
cn_factor <- factor(dt_cnvr$CN, levels = c(1, 2, 3))
v <- as.vector(table(cn_factor))
lambdas1 <- v/n ##
n1s <- v
dt1 <- subset(dt_cnvr, CN == 1)
dt2 <- subset(dt_cnvr, CN == 2)
dt3 <- subset(dt_cnvr, CN == 3)
mu1 <- mean(dt1$LRR_median)
sigma1 <- sd(dt1$LRR_median)
paras1 <- mode_zz(dt_cnvr = dt2)
mu2 <- paras1$mu
sigma2 <- paras1$sigma
mu3 <- mean(dt3$LRR_median)
sigma3 <- sd(dt3$LRR_median)
# add 8_cc here
if ((mu2 - mu1) < abs(0.2*paras_LRR$LRR_mean$CN_1)) {
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
num_permute <- 20
# num_permute <- 60
sigma1 <- paras_LRR$LRR_sd$CN_1/sqrt(numsnp)
LRR_median_cn1_permute <- rnorm(num_permute, mean = mu1, sd = sigma1)
dt_cn1_permute <- data.frame(Sample_ID = paste0("Sample_CN1_", 1:num_permute),
CNVR_ID = rep(cnvr_id, num_permute),
LRR_median = LRR_median_cn1_permute,
Chr = chr, alg = rep("permute", num_permute),
CN = 1, numSNP = numsnp, stringsAsFactors = FALSE)
dt_cnvr <- rbind(dt_cnvr, dt_cn1_permute)
n1s[1] <- n1s[1] + num_permute
}
lambdas1 <- n1s/sum(n1s) ## add
paras_model$stage1$init <- list(mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
lambda = lambdas1)
model1 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3), k = 3)
paras_model$stage1$model <- list(mu = model1$mu,
sigma = model1$sigma,
lambda = model1$lambda)
cns_pred <- apply(model1$posterior, MARGIN = 1, which.max)
probs <- model1$posterior
# CN = 1
probs1 <- as.vector(probs[, 1])
idxs1 <- which(probs1 >= 0.9 & cns_pred == 1)
n1 <- length(idxs1)
n1
# CN = 3
probs3 <- as.vector(probs[, 3])
idxs3 <- which(probs3 >= 0.9 & cns_pred == 3)
n3 <- length(idxs3)
n3
idxs <- union(idxs1, idxs3)
idxs2 <- setdiff(1:nrow(dt_cnvr), idxs)
dt22 <- dt_cnvr[idxs2, ]
n2 <- nrow(dt22)
paras2 <- mode_zz(dt_cnvr = dt22)
mu2 <- paras2$mu
sigma2 <- paras2$sigma
n2s <- c(n1, n2, n3)
flag1 <- ifelse(n2s[1]/n1s[1] >= 0.8, TRUE, FALSE)
flag3 <- ifelse(n2s[3]/n1s[3] >= 0.8, TRUE, FALSE)
# parameters
mu1 <- NA
sigma1 <- NA
if (n1 >= 10) {
mu1 <- mean(dt_cnvr$LRR_median[idxs1])
sigma1 <- sd(dt_cnvr$LRR_median[idxs1])
} else {
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
sigma1 <- paras_LRR$LRR_sd$CN_1
}
mu3 <- NA
sigma3 <- NA
if (n3 >= 10) {
mu3 <- mean(dt_cnvr$LRR_median[idxs3])
sigma3 <- sd(dt_cnvr$LRR_median[idxs3])
} else {
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
sigma3 <- paras_LRR$LRR_sd$CN_3
}
## add
if (mu3 < mu2) {
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
sigma3 <- paras_LRR$LRR_sd$CN_3
}
n2 <- n - n1 - n3
lambdas2 <- c(n1, n2, n3)/n
# init parameters
paras_model$stage2$init <- list(mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
lambda = lambdas2)
model2 <- list()
if (n1 >= 10 & n3 >= 10) {
model2 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
mean.constr = c(mu1, NA, mu3),
k = 3)
} else if (n1 >= 10 & n3 < 10) {
model2 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
mean.constr = c(mu1, NA, mu3), k = 3)
} else if (n1 < 10 & n3 >= 10) {
model2 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
mean.constr = c(mu1, NA, mu3), k = 3)
} else if (n1 < 10 & n3 < 10) {
mu2 <- median(dt22$LRR_median)
sigma2 <- sd(dt22$LRR_median)
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
if(n1 == 0) {
sigma1 <- Inf
} else {
sigma1 <- paras_LRR$LRR_sd$CN_1/sqrt(numsnp)
}
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
if (n3 == 0) {
sigma3 <- Inf
} else {
sigma3 <- paras_LRR$LRR_sd$CN_3/sqrt(numsnp)
}
model2$mu <- c(mu1, mu2, mu3)
model2$sigma <- c(sigma1, sigma2, sigma3)
model2$lambda <- c(1, 1, 1)
}
# check parameters
cutoff1 <- 0.5
cutoff3 <- 0.8
mu1f <- model2$mu[1]
mu2f <- model2$mu[2]
mu3f <- model2$mu[3]
f1 <- (abs(mu1f - mu2f) > cutoff1*abs(paras_LRR$LRR_mean$CN_1))
f3 <- (abs(mu2f - mu3f) > cutoff3*abs(paras_LRR$LRR_mean$CN_3))
##
f1 <- f1|flag1
f3 <- f3|flag3
if (f1 == FALSE & f3 == FALSE) {
mu2 <- median(dt_cnvr$LRR_median)
sd2 <- sd(dt_cnvr$LRR_median)
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
model3 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
mean.constr = c(mu1, NA, mu3), k = 3)
paras_model$stage2$model <- list(mu = model3$mu,
sigma = model3$sigma,
lambda = model3$lambda)
paras_all <- list(mus = model3$mu,
sigmas = model3$sigma,
lambdas = model3$lambda)
} else if (f1 == FALSE & f3 == TRUE) {
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
model3 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
mean.constr = c(mu1, NA, mu3), k = 3)
paras_model$stage2$model <- list(mu = model3$mu,
sigma = model3$sigma,
lambda = model3$lambda)
paras_all <- list(mus = model3$mu,
sigmas = model3$sigma,
lambdas = model3$lambda)
} else if (f1 == TRUE & f3 == FALSE) {
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
model3 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
mean.constr = c(mu1, NA, mu3), k = 3)
paras_model$stage2$model <- list(mu = model3$mu,
sigma = model3$sigma,
lambda = model3$lambda)
paras_all <- list(mus = model3$mu,
sigmas = model3$sigma,
lambdas = model3$lambda)
} else {
model3 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
mean.constr = c(mu1, NA, mu3), k = 3)
paras_model$stage2$model <- list(mu = model3$mu,
sigma = model3$sigma,
lambda = model3$lambda)
paras_all <- list(mus = model3$mu,
sigmas = model3$sigma,
lambdas = model3$lambda)
}
res <- list(paras_all = paras_all,
paras_model = paras_model)
return(res) # return
}
model3_zz <- function(dt_cnvr, paras_LRR) {
# init paras_model
paras_model <- list()
paras_model$stage1 <- list()
paras_model$stage2 <- list()
dt_cnvr <- dt_cnvr[order(dt_cnvr$CN), ]
n <- nrow(dt_cnvr) # number of samples
numsnp <- unique(dt_cnvr$numSNP) # numsnp
cn_factor <- factor(dt_cnvr$CN, levels = c(1, 2, 3))
v <- as.vector(table(cn_factor))
lambdas1 <- v/n ##
n1s <- v
dt1 <- subset(dt_cnvr, CN == 1)
dt2 <- subset(dt_cnvr, CN == 2)
dt3 <- subset(dt_cnvr, CN == 3)
mu3 <- mean(dt3$LRR_median)
sigma3 <- sd(dt3$LRR_median)
mu2 <- NA
sigma2 <- NA
flag1 <- TRUE #
if (nrow(dt2) >= 200) { #
paras1 <- mode_zz(dt_cnvr = dt2)
mu2 <- paras1$mu
sigma2 <- paras1$sigma
} else {
paras1 <- mode_zz(dt_cnvr = dt_cnvr)
mu2 <- paras1$mu
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
flag1 <- FALSE # changed flag1
}
# init paras
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
sigma1 <- paras_LRR$LRR_sd$CN_1
paras_model$stage1$init <- list(mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
lambda = lambdas1)
model1 <- NULL
flag2 <- TRUE
if (flag1) {
model1 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
k = 3)
} else {
while(flag2) {
model1 <- normalmixEM(x = dt_cnvr$LRR_median,
mu= c(mu1, mu2, mu3),
k = 3)
mu1 <- model1$mu[1]
mu2 <- model1$mu[2]
mu3 <- model1$mu[3]
d21 <- mu2 - mu1
d32 <- mu3 - mu2
f21 <- d21 >= 0.5*(-paras_LRR$LRR_mean$CN_1)
f32 <- d32 >= 0.8*(paras_LRR$LRR_mean$CN_3)
if (f21 & f32) {
flag2 = FALSE
}
}
}
paras_model$stage1$model <- list(mu = model1$mu,
sigma = model1$sigma,
lambda = model1$lambda)
if (flag2) {
cns_pred <- apply(model1$posterior, MARGIN = 1, which.max)
probs <- model1$posterior
# CN = 1
probs1 <- as.vector(probs[, 1])
idxs1 <- which(probs1 >= 0.9 & cns_pred == 1)
n1 <- length(idxs1)
n1
# CN = 3
probs3 <- as.vector(probs[, 3])
idxs3 <- which(probs3 >= 0.9 & cns_pred == 3)
n3 <- length(idxs3)
n3
idxs <- union(idxs1, idxs3)
idxs2 <- setdiff(1:nrow(dt_cnvr), idxs)
dt22 <- dt_cnvr[idxs2, ]
n2 <- nrow(dt22)
paras2 <- mode_zz(dt_cnvr = dt22)
mu2 <- paras2$mu
sigma2 <- paras2$sigma
# parameters
mu1 <- NA
sigma1 <- NA
if (n1 >= 10) {
mu1 <- mean(dt_cnvr$LRR_median[idxs1])
sigma1 <- sd(dt_cnvr$LRR_median[idxs1])
} else {
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
sigma1 <- paras_LRR$LRR_sd$CN_1/sqrt(numsnp)
}
mu3 <- NA
sigma3 <- NA
if (n3 >= 10) {
mu3 <- mean(dt_cnvr$LRR_median[idxs3])
sigma3 <- sd(dt_cnvr$LRR_median[idxs3])
} else {
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
sigma3 <- paras_LRR$LRR_sd$CN_3/sqrt(numsnp)
}
## add
if (mu3 < mu2) {
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
sigma3 <- paras_LRR$LRR_sd$CN_3/sqrt(numsnp)
}
if (mu1 > mu2) {
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
sigma1 <- paras_LRR$LRR_sd$CN_1/sqrt(numsnp)
}
n2 <- n - n1 - n3
lambdas2 <- c(n1, n2, n3)/n
n2s <- c(n1, n2, n3)
flag1 <- ifelse(n2s[1]/n1s[1] >= 0.8, TRUE, FALSE)
flag3 <- ifelse(n2s[3]/n1s[3] >= 0.8, TRUE, FALSE)
# init parameters
paras_model$stage2$init <- list(mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
lambda = lambdas2)
model2 <- list()
if (n1 >= 10 & n3 >= 10) {
model2 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
mean.constr = c(mu1, NA, mu3),
k = 3)
} else if (n1 >= 10 & n3 < 10) {
model2 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
mean.constr = c(mu1, NA, mu3), k = 3)
} else if (n1 < 10 & n3 >= 10) {
model2 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
mean.constr = c(mu1, NA, mu3), k = 3)
} else if (n1 < 10 & n3 < 10) {
mu2 <- median(dt22$LRR_median)
sigma2 <- sd(dt22$LRR_median)
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
if(n1 == 0) {
sigma1 <- Inf
} else {
sigma1 <- paras_LRR$LRR_sd$CN_1/sqrt(numsnp)
}
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
if (n3 == 0) {
sigma3 <- Inf
} else {
sigma3 <- paras_LRR$LRR_sd$CN_3/sqrt(numsnp)
}
model2$mu <- c(mu1, mu2, mu3)
model2$sigma <- c(sigma1, sigma2, sigma3)
model2$lambda <- c(1, 1, 1)
}
# check parameters
cutoff1 <- 0.5
cutoff3 <- 0.8
mu1f <- model2$mu[1]
mu2f <- model2$mu[2]
mu3f <- model2$mu[3]
f1 <- (abs(mu1f - mu2f) > cutoff1*abs(paras_LRR$LRR_mean$CN_1))
f3 <- (abs(mu2f - mu3f) > cutoff3*abs(paras_LRR$LRR_mean$CN_3))
f1 <- f1|flag1 ## add
f3 <- f3|flag3
if (f1 == FALSE & f3 == FALSE) {
mu2 <- median(dt_cnvr$LRR_median)
sd2 <- sd(dt_cnvr$LRR_median)
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
model3 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
mean.constr = c(mu1, NA, mu3), k = 3)
paras_model$stage2$model <- list(mu = model3$mu,
sigma = model3$sigma,
lambda = model3$lambda)
paras_all <- list(mus = model3$mu,
sigmas = model3$sigma,
lambdas = model3$lambda)
} else if (f1 == FALSE & f3 == TRUE) {
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
model3 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
mean.constr = c(mu1, NA, mu3), k = 3)
paras_model$stage2$model <- list(mu = model3$mu,
sigma = model3$sigma,
lambda = model3$lambda)
paras_all <- list(mus = model3$mu,
sigmas = model3$sigma,
lambdas = model3$lambda)
} else if (f1 == TRUE & f3 == FALSE) {
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
model3 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
mean.constr = c(mu1, NA, mu3), k = 3)
paras_model$stage2$model <- list(mu = model3$mu,
sigma = model3$sigma,
lambda = model3$lambda)
paras_all <- list(mus = model3$mu,
sigmas = model3$sigma,
lambdas = model3$lambda)
} else {
model3 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
mean.constr = c(mu1, NA, mu3), k = 3)
paras_model$stage2$model <- list(mu = model3$mu,
sigma = model3$sigma,
lambda = model3$lambda)
paras_all <- list(mus = model3$mu,
sigmas = model3$sigma,
lambdas = model3$lambda)
}
paras_model$stage2$model <- list(mu = model1$mu,
sigma = model1$sigma,
lambda = model1$lambda)
res <- list(paras_all = paras_all,
paras_model = paras_model)
return(res) # return
} else {
paras_model$stage2$init <- list(mu = model1$mu,
sigma = model1$sigma,
lambda = model1$lambda)
paras_model$stage2$model <- list(mu = model1$mu,
sigma = model1$sigma,
lambda = model1$lambda)
paras_all <- list(mus = model1$mu,
sigmas = model1$sigma,
lambdas = model1$lambda)
res <- list(paras_all = paras_all,
paras_model = paras_model)
return(res) # return
}
}
model4_zz <- function(dt_cnvr, paras_LRR) {
numsnp <- unique(dt_cnvr$numSNP)
# init paras_model
paras_model <- list()
paras_model$stage1 <- list()
paras_model$stage2 <- list()
# only use one round gmm model
dt_cnvr <- dt_cnvr[order(dt_cnvr$CN), ]
n <- nrow(dt_cnvr) # number of samples
numsnp <- unique(dt_cnvr$numSNP) # numsnp
cn_factor <- factor(dt_cnvr$CN, levels = c(1, 2, 3))
ns <- as.vector(table(cn_factor))
n1 <- ns[1]
n2 <- ns[2]
n3 <- ns[3]
# start
mu2 <- NA
sigma2 <- NA
dt1 <- subset(dt_cnvr, CN == 1)
dt2 <- subset(dt_cnvr, CN == 2)
dt3 <- subset(dt_cnvr, CN == 3)
flag1 <- TRUE #
if (nrow(dt2) >= 200) { #
paras1 <- mode_zz(dt_cnvr = dt2)
mu2 <- paras1$mu
sigma2 <- paras1$sigma
} else {
paras1 <- mode_zz(dt_cnvr = dt_cnvr)
mu2 <- paras1$mu
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
flag1 <- FALSE # changed flag1
}
dt1 <- subset(dt_cnvr, CN == 1)
mu1 <- mean(dt1$LRR_median)
sigma1 <- sd(dt1$LRR_median)
dt3 <- subset(dt_cnvr, CN == 3)
mu3 <- mean(dt3$LRR_median)
sigma3 <- sd(dt3$LRR_median)
if (!flag1) {
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
sigma1 <- paras_LRR$LRR_sd$CN_1/sqrt(numsnp)
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
sigma3 <- paras_LRR$LRR_sd$CN_3/sqrt(numsnp)
}
# init paras
paras_model$stage1$init <- list(mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
lambda = c(n1, n2, n3)/n)
model1 <- NULL
flag2 <- TRUE
if (flag1) {
model1 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
k = 3) ## need to change
} else {
while(flag2) {
model1 <- normalmixEM(x = dt_cnvr$LRR_median,
mu= c(mu1, mu2, mu3),
k = 3)
mu1 <- model1$mu[1]
mu2 <- model1$mu[2]
mu3 <- model1$mu[3]
d21 <- mu2 - mu1
d32 <- mu3 - mu2
f21 <- d21 >= 0.5*(-paras_LRR$LRR_mean$CN_1)
f32 <- d32 >= 0.8*(paras_LRR$LRR_mean$CN_3)
if (f21 & f32) {
flag2 = FALSE
}
}
}
paras_model$stage1$model <- list(mu = model1$mu,
sigma = model1$sigma,
lambda = model1$lambda)
if (flag2) {
# number in CN = 1 and CN = 3
cns_pred <- apply(model1$posterior, MARGIN = 1, which.max)
probs <- model1$posterior
probs1 <- as.vector(probs[, 1])
idxs1 <- which(cns_pred == 1 & probs1 >= 0.99)
n1 <- length(idxs1)
probs3 <- as.vector(probs[, 3])
idxs3 <- which(cns_pred == 3 & probs3 >= 0.99)
n3 <- length(idxs3)
idxs2 <- setdiff(1:nrow(dt_cnvr), union(idxs1, idxs3))
dt22 <- dt_cnvr[idxs2, ]
paras2 <- mode_zz(dt_cnvr = dt22)
mu2 <- paras2$mu
sigma2 <- paras2$sigma
n2 <- nrow(dt22)
mu1 <- 0
sigma1 <- 0
if (n1 >= 10) {
dt21 <- dt_cnvr[idxs1, ]
mu1 <- mean(dt21$LRR_median)
sigma1 <- sd(dt21$LRR_median)
} else {
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
sigma1 <- paras_LRR$LRR_sd$CN_1/sqrt(numsnp)
}
mu3 <- 0
sigma3 <- 0
if (n3 >= 10) {
dt23 <- dt_cnvr[idxs3, ]
mu3 <- mean(dt23$LRR_median)
sigma3 <- sd(dt23$LRR_median)
} else {
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
sigma3 <- paras_LRR$LRR_sd$CN_3/sqrt(numsnp)
}
## add
if (mu3 < mu2) {
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
sigma3 <- paras_LRR$LRR_sd$CN_3
}
n2 <- n - n1 - n3
lambdas2 <- c(n1, n2, n3)/n
# init parameters
paras_model$stage2$init <- list(mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
lambda = lambdas2)
model2 <- list()
if (n1 >= 10 & n3 >= 10) {
model2 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
k = 3)
} else if (n1 >= 10 & n3 < 10) {
model2 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
mean.constr = c(NA, NA, mu3), k = 3)
} else if (n1 < 10 & n3 >= 10) {
model2 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
sigma = c(sigma1, sigma2, sigma3),
mean.constr = c(mu1, NA, mu3), k = 3)
} else if (n1 < 10 & n3 < 10) {
mu2 <- median(dt22$LRR_median)
sigma2 <- sd(dt22$LRR_median)
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
model5 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
mean.constr = c(mu1, NA, mu3), k = 2)
model2$mu <- model5$mu
model2$sigma <- model5$sigma
model2$lambda <- model5$lambda
}
# check parameters
cutoff1 <- 0.5
cutoff3 <- 0.8
mu1f <- model2$mu[1]
mu2f <- model2$mu[2]
mu3f <- model2$mu[3]
f1 <- (abs(mu1f - mu2f) > cutoff1*abs(paras_LRR$LRR_mean$CN_1))
f3 <- (abs(mu2f - mu3f) > cutoff3*abs(paras_LRR$LRR_mean$CN_3))
if (f1 == FALSE & f3 == FALSE) {
mu2 <- median(dt_cnvr$LRR_median)
sd2 <- sd(dt_cnvr$LRR_median)
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
model3 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
mean.constr = c(mu1, NA, mu3), k = 3)
paras_model$stage2$model <- list(mu = model3$mu,
sigma = model3$sigma,
lambda = model3$lambda)
paras_all <- list(mus = model3$mu,
sigmas = model3$sigma,
lambdas = model3$lambda)
} else if (f1 == FALSE & f3 == TRUE) {
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
model3 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
mean.constr = c(mu1, NA, mu3), k = 3)
paras_model$stage2$model <- list(mu = model3$mu,
sigma = model3$sigma,
lambda = model3$lambda)
paras_all <- list(mus = model3$mu,
sigmas = model3$sigma,
lambdas = model3$lambda)
} else if (f1 == TRUE & f3 == FALSE) {
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
model3 <- normalmixEM(x = dt_cnvr$LRR_median, mu = c(mu1, mu2, mu3),
mean.constr = c(mu1, NA, mu3), k = 3)
paras_model$stage2$model <- list(mu = model3$mu,
sigma = model3$sigma,
lambda = model3$lambda)
paras_all <- list(mus = model3$mu,
sigmas = model3$sigma,
lambdas = model3$lambda)
} else {
## change mu1 mu2 mu3 value
if (model2$mu[3] < model2$mu[2]) {
model2$mu[3] = model2$mu[2] + paras_LRR$LRR_mean$CN_3
}
if (model2$mu[1] > model2$mu[2]) {
model2$mu[1] = model2$mu[2] - abs(paras_LRR$LRR_mean$CN_1)
}
paras_model$stage2$model <- list(mu = model2$mu,
sigma = model2$sigma,
lambda = model2$lambda)
paras_all <- list(mus = model2$mu,
sigmas = model2$sigma,
lambdas = model2$lambda)
}
res <- list(paras_all = paras_all,
paras_model = paras_model)
return(res) # return
} else {
paras_model$stage2$init <- list(mu = model1$mu,
sigma = model1$sigma,
lambda = model1$lambda)
paras_model$stage2$model <- list(mu = model1$mu,
sigma = model1$sigma,
lambda = model1$lambda)
paras_all <- list(mus = model1$mu,
sigmas = model1$sigma,
lambdas = model1$lambda)
res <- list(paras_all = paras_all,
paras_model = paras_model)
return(res) # return
}
}
# main function
train_model_zz <- function(dt_cnvr, paras_LRR) {
cnvr_id <- unique(dt_cnvr$CNVR_ID)
chr <- unique(dt_cnvr$Chr)
dt_cnvr <- dt_cnvr[order(dt_cnvr$CN), ]
numsnp <- unique(dt_cnvr$numSNP)
numsnp
cns_factor <- factor(dt_cnvr$CN, levels = c(1, 2, 3))
tbl <- table(cns_factor)
tbl
nums <- as.vector(tbl)
nums
n1 <- nums[1]
n3 <- nums[3]
# add permute data
dt2 <- subset(dt_cnvr, CN == 2)
mu2 <- NULL
flag <- TRUE
if (nrow(dt2) <= 100) { # search the mode for all data
m2 <- mode_zz(dt_cnvr = dt_cnvr)
mu2 <- m2$mu
flag = FALSE
} else {
m2 <- mode_zz(dt_cnvr = dt2)
mu2 <- m2$mu
}
mu1 <- mu2 + paras_LRR$LRR_mean$CN_1
sigma1 <- paras_LRR$LRR_sd$CN_1/sqrt(numsnp)
mu3 <- mu2 + paras_LRR$LRR_mean$CN_3
sigma3 <- paras_LRR$LRR_sd$CN_3/sqrt(numsnp)
num_permute <- 60 # 20
LRR_median_cn1_permute <- rnorm(num_permute, mean = mu1, sd = sigma1)
LRR_median_cn3_permute <- rnorm(num_permute, mean = mu3, sd = sigma3)
dt_cn1_permute <- data.frame(Sample_ID = paste0("Sample_CN1_", 1:num_permute),
CNVR_ID = rep(cnvr_id, num_permute),
LRR_median = LRR_median_cn1_permute,
Chr = chr, alg = rep("permute", num_permute),
CN = 1, numSNP = numsnp, stringsAsFactors = FALSE)
dt_cn3_permute <- data.frame(Sample_ID = paste0("Sample_CN3_", 1:num_permute),
CNVR_ID = rep(cnvr_id, num_permute),
LRR_median = LRR_median_cn3_permute,
Chr = chr, alg = rep("permute", num_permute),
CN = 3, numSNP = numsnp, stringsAsFactors = FALSE)
# cutoff of minimum number of samples in each CN
if (n1 >= 30 & n3 >= 30) {
dt_cnvr <- dt_cnvr
paras <- model1_zz(dt_cnvr = dt_cnvr, paras_LRR = paras_LRR)
return(paras)
} else if (n1 >= 30 & n3 < 30) {
dt_cnvr <- rbind(dt_cnvr, dt_cn3_permute)
paras <- model2_zz(dt_cnvr = dt_cnvr, paras_LRR = paras_LRR)
return(paras)
} else if (n1 < 30 & n3 >= 30) {
if (flag == FALSE) {
dt_cnvr <- rbind(dt_cnvr, dt_cn1_permute, dt_cn3_permute)
} else {
dt_cnvr <- rbind(dt_cnvr, dt_cn1_permute)
}
paras <- model3_zz(dt_cnvr = dt_cnvr, paras_LRR = paras_LRR)
return(paras)
} else if (n1 < 30 & n3 < 30) {
dt_cnvr <- rbind(dt_cnvr, dt_cn1_permute, dt_cn3_permute)
paras <- model4_zz(dt_cnvr = dt_cnvr, paras_LRR = paras_LRR)
return(paras)
}
}
|
fec196321ca9b10c5d1536abae3bdc3f2c82f5a9
|
5921cca6628a5842d1035e304b9f824768a21cea
|
/run_analysis.R
|
883dceb4b779786b4fae62a45b1336b6971200cd
|
[] |
no_license
|
MayDS/TidyDataRepo
|
ee0980d0cd9cced090b9ab8669f8a09b9e56d60e
|
7d9bfb07dde6ade53702cbc04115fb27f219862a
|
refs/heads/master
| 2021-01-10T16:19:24.862317
| 2016-02-29T02:58:24
| 2016-02-29T02:58:24
| 52,759,739
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,753
|
r
|
run_analysis.R
|
## Code for Course Project in Getting and Cleaning Data course
## The output summary data file name for the run_analysis function can be entered
## as an argument.
run_analysis <- function(outputfile = "tidydata.csv"){
library(plyr, quietly = TRUE)
library(dplyr)
library(reshape2)
## Download file
temp <- tempfile()
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, destfile = temp, method = "libcurl")
## Import and combine X data
con1 <- unz(temp, "UCI HAR Dataset/train/X_train.txt")
con2 <- unz(temp, "UCI HAR Dataset/test/X_test.txt")
Xtrain <- read.table(con1)
Xtest <- read.table(con2)
Xdata <- rbind(Xtrain, Xtest)
## Import and combine Y data
con1 <- unz(temp, "UCI HAR Dataset/train/y_train.txt")
con2 <- unz(temp, "UCI HAR Dataset/test/y_test.txt")
Ytrain <- read.table(con1)
Ytest <- read.table(con2)
Ydata <- rbind(Ytrain, Ytest)
## Import and combine subject data
con1 <- unz(temp, "UCI HAR Dataset/train/subject_train.txt")
con2 <- unz(temp, "UCI HAR Dataset/test/subject_test.txt")
Strain <- read.table(con1)
Stest <- read.table(con2)
subjects <- rbind(Strain, Stest)
## Import labels
con1 <- unz(temp, "UCI HAR Dataset/activity_labels.txt")
con2 <- unz(temp, "UCI HAR Dataset/features.txt")
activities <- read.table(con1)
features <- read.table(con2)
unlink(temp)
## Find features for mean and std measurements
validCols <- sort(c(grep("mean\\(\\)", features$V2), grep("std\\(\\)", features$V2)))
## Remove unneeded columns from Xdata and unneeded labels
Xdata <- Xdata[, validCols]
features <- features[validCols,]
## Clean up labels and add to Xdata
features$V2 <- tolower(features$V2)
features$V2 <- sub("\\(\\)","", features$V2)
features$V2 <- gsub("\\-","",features$V2)
features$V2 <- sub("acc","acceleration",features$V2)
colnames(Xdata) <- features[,2]
## Convert Ydata to activity names
Ydata <- join(Ydata, activities, match = "all")
Ydata <- select(Ydata, activity = V2)
Ydata$activity <- tolower(Ydata$activity)
## rename subject column
subjects <- select(subjects, subjectid = V1)
## combine X, Y, and subject columns
data <- cbind(subjects, Ydata, Xdata)
## create data set with means for each variable for each subject/activity pair
datamelt <- melt(data,id=c("subjectid","activity"))
output <- dcast(datamelt, subjectid + activity ~ variable, mean)
## write output file
write.csv(output, file = outputfile)
}
|
1a42380fb04b6ddd19d0807674dd5652e1bac0e2
|
d095d504fbb0b6a055586e71b4d0a6522425cd20
|
/R_functions.R
|
cacdc875d39cf640b1e27f92387c512d0e61110a
|
[] |
no_license
|
breakerkun/FLM-clustering
|
41855350b78ef872962212f43f14f56dd33ea39a
|
66d99b6edd34b45e3887682412bee1601524f609
|
refs/heads/main
| 2023-07-01T20:53:21.857344
| 2021-08-04T15:42:43
| 2021-08-04T15:42:43
| 385,279,962
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,313
|
r
|
R_functions.R
|
library(MASS)
# some examples of coefficient functions
Beta89 <- function(t) {
beta89.t <- rep(0,length(t))
for(i in 1:length(t))
{
beta89.t[i] = 2-6*t[i]
}
return(beta89.t)
}
Betacubic <- function(t) {
betacubic.t <- rep(0,length(t))
for(i in 1:length(t))
{
betacubic.t[i] = -3+10*t[i]*t[i]-10*t[i]+6*(t[i])^3;
}
return(betacubic.t)
}
Betaexp <- function(t) {
betaexp.t <- rep(0,length(t))
for(i in 1:length(t))
{
betaexp.t[i] = 1.5-exp(2*t[i])+3*sin(2*t[i]);
}
return(betaexp.t)
}
# L2 distance between two functions
L2_dist_func <- function(Beta1,Beta2){
flog <- function(t) {(Beta1(t)-Beta2(t))^2}
in.prod <- integrate(flog,0,1)
return(sqrt(in.prod$value))
}
####################### Calculate InnerProd #######################
InnerProd <- function(Betaf,basisfd,j) {
# compute the <beta_j, B_j>, integral of beta_j and B_j.
# Betaf: beta function, i.e. Beta1, Beta2, Beta3, Beta4, predefined.
# basisfd: basis function, jth column of basismatrix (eval.basis) (t rows and nbasis columns)
rng <- getbasisrange(basisfd)
knots <- basisfd$param
knots <- c(rng[1], knots, rng[2])
nbasis <- basisfd$nbasis
norder <- nbasis - length(knots) + 2
a <-rng[1]
if(j-norder > 0) {a <- knots[j-norder+1]}
b <- rng[2]
if(j <= (nbasis-norder)) {b <- knots[j+1]}
BFun <- function(t) {
basismatrix <- eval.basis(t,basisfd) # 71 by 74 matrix, t rows and nbasis column
basismatrix.j <- t(basismatrix[,j]) #get jth column of basismatrix
return(basismatrix.j)
}
flog <- function(t) {Betaf(t)*BFun(t)}
in.prod <- integrate(flog,a,b)
return(in.prod$value)
}
knots_eq3 <- function(x, k, m){
#external knots are on boundary
#return boundary with internal knots only
#used in bs or bsplineS
c(min(x), seq(from=min(x), to=max(x), length.out=m+2)[-c(1,m+2)], max(x))
}
create_adjacency <- function(V,n) {
differences <- apply(V,2,FUN=function(x) {norm(as.matrix(x),'f')})
connected_ix <- which(differences == 0);
index = t(combn(n,2));
i <- index[connected_ix,1]
j <- index[connected_ix,2]
A <- Matrix(0, nrow = n, ncol = n, sparse = TRUE)
A[(j-1)*n + i] <- 1
return(A)
}
# calculate GCV
GCV <- function(estimatey,K_est){
fenzi = sum((estimatey[,1]-estimatey[,2])^2);
Ulist_K = list()
trace = 0
for(i in 1:k_final){
Ulist_K[[i]] = matrix(Uvec[groups(cls_final)[[i]],],nrow=length(groups(cls_final)[[i]]));
trace = trace + sum(diag(Ulist_K[[i]]%*%ginv(t(Ulist_K[[i]])%*%Ulist_K[[i]]
+2*gamma1*D)%*%t(Ulist_K[[i]])))
}
fenmu = (1-trace/n)^2
gcv = fenzi/fenmu
return(gcv)
}
# BIC
cal_bic2_y <- function(estimatey,K_est){
comp1 = log(sum((estimatey[,1]-estimatey[,2])^2)/n);
dfsum = K_est*p*log(log(n+p));
comp2 = (log(n)*dfsum)/n;
bic2 = comp1 + comp2;
return(bic2)
}
# calculate errors
error_inprod <- function(Beta,fdobj){
fd_func <- function(t){
eval.fd(fdobj,t)
}
flog <- function(t) {(Beta(t)-fd_func(t))^2}
in.prod <- integrate(flog,0,1)
return(in.prod$value)
}
error_list <- function(Beta,fdobj_all){
fdlist = list();
for(i in 1:n){
fdlist[[i]] = fdobj_all[i];
}
return(as.numeric(Map(
function(fn, value)
{
error_inprod(fn,value)
},
Beta,
fdlist
)))
}
|
1fae35b83c096ab2778345547c0d67830893ccf8
|
dd2f731943b17b9087c2141dfde592b7733ac33f
|
/man/normalizedKendallTauDistance.Rd
|
8a83bc2978d56bd075d485ac5182aa18e7a835de
|
[] |
no_license
|
katal24/PairwiseComparisons_work
|
2b87d059db30ead31f376953a6c7c5c8434e7dee
|
466aa7af135954c400dde0fd44be76d850e20af7
|
refs/heads/master
| 2021-05-01T12:30:48.409704
| 2016-11-29T14:03:34
| 2016-11-29T14:03:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 582
|
rd
|
normalizedKendallTauDistance.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairwiseComparisons.R
\name{normalizedKendallTauDistance}
\alias{normalizedKendallTauDistance}
\title{Normalized Kendall Tau distance for two vectors}
\usage{
normalizedKendallTauDistance(list1, list2)
}
\arguments{
\item{list1}{- first rank to compare}
\item{list2}{- second rank to compare}
}
\value{
proportion of Kendall Tau distance to numbers of all possible pairs
}
\description{
Computes Kendall (bubble sort) distance between two rank vectors and divited it by numbers of all possible pairs
}
|
43f480de29974f35a772013c79df969f1437437c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RMTL/examples/calcError.Rd.R
|
ca4aaa784bb3e7b4a5b72c179b917517087b8c13
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 509
|
r
|
calcError.Rd.R
|
library(RMTL)
### Name: calcError
### Title: Calculate the prediction error
### Aliases: calcError
### ** Examples
#create example data
data<-Create_simulated_data(Regularization="L21", type="Regression")
#train a model
model<-MTL(data$X, data$Y, type="Regression", Regularization="L21",
Lam1=0.1, Lam2=0, opts=list(init=0, tol=10^-6, maxIter=1500))
#calculate the training error
calcError(model, newX=data$X, newY=data$Y)
#calculate the test error
calcError(model, newX=data$tX, newY=data$tY)
|
68f2e8766d9d9d8069aaccd520e877cf1e4cc866
|
8afb6488566bc68c33766c1c5cc45c4d4bf07560
|
/Test17.R
|
fbafb29f13f373849fe5e642bd661bae56be6ba5
|
[] |
no_license
|
RajithaGayashan/R
|
e6eee46c900aa7b63f1d2995b218193ef7e24b7b
|
4612fb218e701c9b1c644a3b15f855d3a5c25a4b
|
refs/heads/master
| 2020-12-05T11:07:46.805763
| 2020-02-03T10:43:05
| 2020-02-03T10:43:05
| 232,090,770
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 138
|
r
|
Test17.R
|
#Data summarization -Graphical method
#Scatter Diagram
plot(iris$Sepal.Length,iris$Sepal.Width)
plot(iris$Petal.Length,iris$Petal.Width)
|
9b84fd8672b7f68af1d2f09bba92e96cf17b18c2
|
f35cd0a30aefe6faa72b21b8519706858c9f7e3b
|
/R/ic_find.R
|
53e19cdddf21a2bb1b1be4d7cfbbf440b2c9f7ab
|
[
"Apache-2.0"
] |
permissive
|
ATFutures/calendar
|
66a816c8e60a6abcdb6c8b1e5011ff7a51f257e3
|
d06f8febadc8c0c0f2770399a866bb7de4a4e02d
|
refs/heads/master
| 2021-07-24T10:12:46.718376
| 2021-07-16T12:14:48
| 2021-07-16T12:14:48
| 143,872,302
| 22
| 8
|
NOASSERTION
| 2021-07-16T12:14:48
| 2018-08-07T12:47:25
|
R
|
UTF-8
|
R
| false
| false
| 354
|
r
|
ic_find.R
|
#' Find contents of iCal fields
#'
#' @param x Lines read-in in from an iCal file
#' @param pattern A text string to search from (an ical field)
#'
#' @export
#' @examples
#' pattern = "DTSTAMP"
#' ic_find(ical_example, pattern)
ic_find <- function(x, pattern) {
pattern <- paste0(pattern, ":")
locations <- grepl(x, pattern = pattern)
locations
}
|
1727342453a20994692d5a718c1b8b533a7c7d10
|
4ea5205aa0b2e6e201e5fbd5070688a3e5b311f2
|
/Data preparation Assignment.R
|
787217fd18361674b0cb213331932517b2e3906c
|
[] |
no_license
|
Keerthbeth/BusinessAnalytics
|
bfbbd792afe910c6c53944ee8c10ed5dfe7cc859
|
2385398e32c8d428e16b2f8a0a8438e36c68708c
|
refs/heads/master
| 2020-04-14T08:10:40.820031
| 2019-01-01T14:42:31
| 2019-01-01T14:42:31
| 163,730,644
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,143
|
r
|
Data preparation Assignment.R
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#1 Data Preparation and imputations
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
data1<-read.csv("D:/assignment_data_prep/assignment_data_prep/Assignment_Data.csv")
data2<-read.csv("D:/assignment_data_prep/assignment_data_prep/Assignment_Data2.csv")
library(pacman)
p_load(plyr)
#Join the datasets on full level
data<-join(data1,data2,type="full")
rm(data1,data2)
p_load(magrittr,tidyverse,dplyr)
#Sort by timestamp and unit id
data<-data%>%
arrange(date_time)
#Sensor 36 may be the first operational sensor!
#Cross-check indices - manual
#which(data$unitid == 'SS0029')[1]
#write.csv(data,file="D:/assignment_data_prep/assignment_data_prep/Sorted_Data.csv")
#Minute impute the dataset to make it ready for time series imputation
data_imp<-data%>%
mutate(date_time_hm=format(as.POSIXct(date_time,format="%Y-%m-%d %H:%M:%S"),'%Y-%m-%d %H:%M'))
#?complete
data_imp<-data_imp%>%
mutate(date_time_hm=as.POSIXct(date_time_hm))%>%
complete(date_time_hm=seq(min(as.POSIXct(date_time_hm)),max(as.POSIXct(date_time_hm)),by = '1 min'))%>%
select(-one_of("date_time"))
#If above code gives error or takes time, please clear workspace environment and restart R with the script
#write.csv(data_imp,file="D:/assignment_data_prep/assignment_data_prep/Minute_imputed_Data.csv")
p_load(zoo)
p_load(imputeTS)
#Imputing TS as linear interpolation from endpoints via a zoo TS
for (i in 3:length(colnames(data_imp))) {
if (i > length(colnames(data_imp))){
break
} else {
x <- zoo(data_imp[colnames(data_imp)[i]],data_imp$date_time_hm)
x <- na.interpolation(x, option = "linear")
df<-as.data.frame(x)
data_imp[colnames(data_imp)[i]]<-df[,1]
}
}
#Manual check for imputations
#write.csv(data_imp,file="D:/assignment_data_prep/assignment_data_prep/Minute_imputed_Data.csv")
rm(df,i,x,data)
#Trying different imputations for unit id to verify with support in non imputed dataset
data_impute_1<-data_imp #Mirror preceding
data_impute_2<-data_imp #Linear Discriminant Analysis
data_impute_3<-data_imp #k Nearest Neighbours
data_impute_4<-data_imp #LOCF
#A. Impute unit id using mirror preceding(assuming the first entry for unit id is not empty)
for (i in 2:length(data_impute_1$unitid)){
if(is.na(data_impute_1$unitid[i]) == TRUE){
if(is.na(data_impute_1$unitid[i-1]) == FALSE){
data_impute_1$unitid[i] = data_impute_1$unitid[i-1]
}
}
}
rm(i)
#write.csv(data_impute_1,file = "D:/assignment_data_prep/assignment_data_prep/Minute_imputed_Data_mirror.csv")
#B. Impute unit id by using LDA
p_load(MASS)
lda_model<-lda(unitid ~ Temperature + Noise + Light + Co2 + VOC + Humidity,data = data_impute_2)
predictions<-predict(lda_model,newdata=data_impute_2[is.na(data_impute_2$unitid) == FALSE,c(3:8)])$class
table(predictions,data_impute_2[is.na(data_impute_2$unitid) == FALSE,]$unitid)
#heavy misclassifications on level SS0050
for (i in 1:length(data_impute_2$unitid)){
if(is.na(data_impute_2$unitid[i]) == TRUE){
data_impute_2$unitid[i]<-predict(lda_model,newdata = data_impute_2[i,c(3:8)])$class
}
}
rm(i,lda_model,predictions)
#Manual Verification
#write.csv(data_impute_2,file = "D:/assignment_data_prep/assignment_data_prep/Minute_imputed_Data_lda.csv")
#Imputes wrongly! wont be used!
#C. Impute unitid using k-Nearest Neighbours - this algorith considers and compares entire row slices
#(all columns) to make imputations on unitid(i.e using nearest non NA unitids with similar Temp,Noise,... patterns)
p_load(VIM,ggplot2)
#Below code may take upto approx 3 mins to execute...please wait
data_impute_3<-kNN(data_impute_3,variable = "unitid",k=5)
#Manual Verification
#write.csv(data_impute_3,file = "D:/assignment_data_prep/assignment_data_prep/Minute_imputed_Data_knn.csv")
#Support for each unitid preserved in imputed dataset
#D. Impute unitid using LOCF
p_load(stringr)
ids<-as.numeric(str_extract(data_impute_4$unitid,"[0-9]+"))
ids_imp<-na.locf(ids)%>%
sapply(function(x) paste('SS00',x,sep=''))
data_impute_4$unitid<-ids_imp
#Manual verification
#write.csv(data_impute_4,file = "D:/assignment_data_prep/assignment_data_prep/Minute_imputed_Data_locf.csv")
rm(ids,ids_imp)
#Delete 31st March 2017 entry - entire day imputed with 1440 datapoints!
data_impute_3<-data_impute_3[format(data_impute_3$date_time_hm,"%Y-%m-%d") != "2017-03-31",]#KNN - best imputation!
data_impute_4<-data_impute_4[format(data_impute_4$date_time_hm,"%Y-%m-%d") != "2017-03-31",]#LOCF
data_impute_1<-data_impute_1[format(data_impute_1$date_time_hm,"%Y-%m-%d") != "2017-03-31",]#Mirror
#We will be using only kNN from here forth
rm(data_impute_4,data_impute_1,data_impute_2)
#--------------------------------------------------------------------------------------------------------------
#Observations on dataset level
#--------------------------------------------------------------------------------------------------------------
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#2. Check if variables measured are correlated!
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
p_load(corrplot)
corrplot(cor(data_impute_3[,3:8]),method = 'number',type = 'upper')
#As we can see VOC and Co2 are positively 'PERFECTLY CORRELATED'.
#Also temperature and humidity are positively correlated.
#Temperature and Light are negatively correlated and Light and Humidity are negatively correlated
#Remember to compare with WHO levels
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#3. Comparing recording levels for each sensor
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Temperature
data_impute_3%>%
group_by(unitid)%>%
ggplot(aes(x=factor(unitid),y=Temperature)) + geom_boxplot() + labs(x="Sensor ID")
summary(aov(data_impute_3$Temperature~data_impute_3$unitid))
#Light
data_impute_3%>%
group_by(unitid)%>%
ggplot(aes(x=factor(unitid),y=Light)) + geom_boxplot() + labs(x="Sensor ID")
summary(aov(data_impute_3$Light~data_impute_3$unitid))
#Noise
data_impute_3%>%
group_by(unitid)%>%
ggplot(aes(x=factor(unitid),y=Noise)) + geom_boxplot() + labs(x="Sensor ID")
summary(aov(data_impute_3$Noise~data_impute_3$unitid))
data_impute_3%>%
group_by(unitid)%>%
ggplot(aes(x=factor(unitid),y=Co2)) + geom_boxplot() + labs(x="Sensor ID")
#Room 29 has highest CO2 levels and is a common office space
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#4. Hourly Plot - To identify trends and habits on any given random day
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
data_impute_3<-data_impute_3%>%
mutate(hour=format(date_time_hm,format = "%H"))
#Temperature
data_impute_3%>%
group_by(hour)%>%
summarise(aver_temp=mean(Temperature))%>%
ggplot(aes(hour,aver_temp)) + geom_point(aes(size = 12)) + labs(y="Mean Temperature(C)") + theme(text = element_text(size = 20),legend.position = "none")
#CO2
data_impute_3%>%
group_by(hour)%>%
summarise(C=mean(Co2))%>%
ggplot(aes(hour,C)) + geom_point(aes(size = 12)) + labs(y="Mean CO2(ppm)") + theme(text = element_text(size = 20),legend.position = "none")
#Noise
data_impute_3%>%
group_by(hour)%>%
summarise(N=mean(Noise))%>%
ggplot(aes(hour,N)) + geom_point(aes(size = 12)) + labs(y="Mean Noise(dB)") + theme(text = element_text(size = 20),legend.position = "none")
#Light
data_impute_3%>%
group_by(hour)%>%
summarise(L=mean(Light))%>%
ggplot(aes(hour,L)) + geom_point(aes(size = 12)) + labs(y="Mean Light(LUX)") + theme(text = element_text(size = 20),legend.position = "none")
#Humidity
data_impute_3%>%
group_by(hour)%>%
summarise(H=mean(Humidity))%>%
ggplot(aes(hour,H)) + geom_point(aes(size = 12)) + labs(y="Mean Humidity(%)") + theme(text = element_text(size = 20),legend.position = "none")
#VOC
data_impute_3%>%
group_by(hour)%>%
summarise(V=mean(VOC))%>%
ggplot(aes(hour,V)) + geom_point(aes(size = 12)) + labs(y="VOC(ppm)") + theme(text = element_text(size = 20),legend.position = "none")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#5. Daily plot using time-series and moving average
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
p_load(lubridate,scales)
#------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------
#Create moving average for Temperature,Noise,Light,CO2,VOC,Humidity with POSIXct Timestamp
p_load(zoo,tidyverse,magrittr,dplyr,scales,xts)
#--------------------------------------------------------------------------------------------------------------------------
l_zoo<-zoo(data_impute_3$Temperature,data_impute_3$date_time_hm)
m_av<-rollmean(l_zoo,1440,fill = list(NA,NULL,NA))
data_impute_3$mv_temp=coredata(m_av)
l_zoo<-zoo(data_impute_3$Noise,data_impute_3$date_time_hm)
m_av<-rollmean(l_zoo,1440,fill = list(NA,NULL,NA))
data_impute_3$mv_noise=coredata(m_av)
l_zoo<-zoo(data_impute_3$Light,data_impute_3$date_time_hm)
m_av<-rollmean(l_zoo,1440,fill = list(NA,NULL,NA))
data_impute_3$mv_light=coredata(m_av)
l_zoo<-zoo(data_impute_3$Co2,data_impute_3$date_time_hm)
m_av<-rollmean(l_zoo,1440,fill = list(NA,NULL,NA))
data_impute_3$mv_co2=coredata(m_av)
l_zoo<-zoo(data_impute_3$VOC,data_impute_3$date_time_hm)
m_av<-rollmean(l_zoo,1440,fill = list(NA,NULL,NA))
data_impute_3$mv_voc=coredata(m_av)
l_zoo<-zoo(data_impute_3$Humidity,data_impute_3$date_time_hm)
m_av<-rollmean(l_zoo,1440,fill = list(NA,NULL,NA))
data_impute_3$mv_hum=coredata(m_av)
rm(l_zoo,m_av)
#--------------------------------------------------------------------------------------------------------------------------
#Note: 28/Feb - 01/Mar is 00:00-23:59 of 01/Mar
#MARCH - Daily timeseries plot with trend-line using moving average rolled up to 1440 min => 1 day
#--------------------------------------------------------------------------------------------------------------------------
data_impute_3[format(data_impute_3$date_time_hm,format = "%m") < "04",]%>%
ggplot(aes(date_time_hm,Temperature)) + geom_line() + geom_line(aes(date_time_hm,mv_temp),color="blue") +
labs(y="Temperature(C)",x="Day of March 2017") +
scale_x_datetime(breaks = date_breaks("1 day"),labels = date_format("%d")) + theme(axis.text.x = element_text(angle = 90))
data_impute_3[format(data_impute_3$date_time_hm,format = "%m") < "04",]%>%
ggplot(aes(date_time_hm,Noise)) + geom_line() + geom_line(aes(date_time_hm,mv_noise),color="blue") +
labs(y="Noise(dB)",x="Day of March 2017") +
scale_x_datetime(breaks = date_breaks("1 day"),labels = date_format("%d")) + theme(axis.text.x = element_text(angle = 90))
data_impute_3[format(data_impute_3$date_time_hm,format = "%m") < "04",]%>%
ggplot(aes(date_time_hm,Light)) + geom_line() + geom_line(aes(date_time_hm,mv_light),color="blue") +
labs(y="Light(LUX)",x="Day of March 2017") +
scale_x_datetime(breaks = date_breaks("1 day"),labels = date_format("%d")) + theme(axis.text.x = element_text(angle = 90))
data_impute_3[format(data_impute_3$date_time_hm,format = "%m") < "04",]%>%
ggplot(aes(date_time_hm,Co2)) + geom_line() + geom_line(aes(date_time_hm,mv_co2),color = "blue") +
labs(y="CO2(ppm)",x="Day of March 2017") +
scale_x_datetime(breaks = date_breaks("1 day"),labels = date_format("%d")) + theme(axis.text.x = element_text(angle = 90))
data_impute_3[format(data_impute_3$date_time_hm,format = "%m") < "04",]%>%
ggplot(aes(date_time_hm,VOC)) + geom_line() + geom_line(aes(date_time_hm,mv_voc),color = "blue") +
labs(y="VOC(ppm)",x="Day of March 2017") +
scale_x_datetime(breaks = date_breaks("1 day"),labels = date_format("%d")) + theme(axis.text.x = element_text(angle = 90))
data_impute_3[format(data_impute_3$date_time_hm,format = "%m") < "04",]%>%
ggplot(aes(date_time_hm,Humidity)) + geom_line() + geom_line(aes(date_time_hm,mv_hum),color = "blue") +
labs(y="Humidity(%)",x="Day of March 2017") +
scale_x_datetime(breaks = date_breaks("1 day"),labels = date_format("%d")) + theme(axis.text.x = element_text(angle = 90))
#---------------------------------------------------------------------------------------------------------------------------
#APRIL - Daily timeseries plot with trend-line using moving average rolled up to 1440 minutes => 1 day
#---------------------------------------------------------------------------------------------------------------------------
data_impute_3[format(data_impute_3$date_time_hm,format = "%m") == "04",]%>%
ggplot(aes(date_time_hm,Temperature)) + geom_line() + geom_line(aes(date_time_hm,mv_temp),color="blue") +
labs(y="Temperature(C)",x="Day of April 2017") +
scale_x_datetime(breaks = date_breaks("1 day"),labels = date_format("%d")) + theme(axis.text.x = element_text(angle = 90))
data_impute_3[format(data_impute_3$date_time_hm,format = "%m") == "04",]%>%
ggplot(aes(date_time_hm,Noise)) + geom_line() + geom_line(aes(date_time_hm,mv_noise),color="blue") +
labs(y="Noise(dB)",x="Day of April 2017") +
scale_x_datetime(breaks = date_breaks("1 day"),labels = date_format("%d")) + theme(axis.text.x = element_text(angle = 90))
data_impute_3[format(data_impute_3$date_time_hm,format = "%m") == "04",]%>%
ggplot(aes(date_time_hm,Light)) + geom_line() + geom_line(aes(date_time_hm,mv_light),color="blue") +
labs(y="Light(LUX)",x="Day of April 2017") +
scale_x_datetime(breaks = date_breaks("1 day"),labels = date_format("%d")) + theme(axis.text.x = element_text(angle = 90))
data_impute_3[format(data_impute_3$date_time_hm,format = "%m") == "04",]%>%
ggplot(aes(date_time_hm,Co2)) + geom_line() + geom_line(aes(date_time_hm,mv_co2),color = "blue") +
labs(y="CO2(ppm)",x="Day of April 2017") +
scale_x_datetime(breaks = date_breaks("1 day"),labels = date_format("%d")) + theme(axis.text.x = element_text(angle = 90))
data_impute_3[format(data_impute_3$date_time_hm,format = "%m") == "04",]%>%
ggplot(aes(date_time_hm,VOC)) + geom_line() + geom_line(aes(date_time_hm,mv_voc),color = "blue") +
labs(y="VOC(ppm)",x="Day of April 2017") +
scale_x_datetime(breaks = date_breaks("1 day"),labels = date_format("%d")) + theme(axis.text.x = element_text(angle = 90))
data_impute_3[format(data_impute_3$date_time_hm,format = "%m") == "04",]%>%
ggplot(aes(date_time_hm,Humidity)) + geom_line() + geom_line(aes(date_time_hm,mv_hum),color = "blue") +
labs(y="Humidity(%)",x="Day of April 2017") +
scale_x_datetime(breaks = date_breaks("1 day"),labels = date_format("%d")) + theme(axis.text.x = element_text(angle = 90))
#--------------------------------------------------------------------------------------------------------------
#Verification using plot.xts's moving average builtin
#--------------------------------------------------------------------------------------------------------------
#help("plot.zoo")
#l_xts<-as.xts(l_zoo)
#plot(l_xts)
#lines(TTR::SMA(l_xts,n=1440),col = "blue")
#--------------------------------------------------------------------------------------------------------------
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Extra.Interactive Viewer for minute details
#We have not excluded outliers since these observations may record anomalous behaviour of indoor systems
#and our collection of data from these systems is intended to monitor them and act on such anomalies
#Excluding data points blindly without this consideration defeats the purpose of IoT monitoring!
#We will examine the data at minute detail in this section using dygraphs to report on such anomalies
#if any found
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
p_load(dygraphs)
#--------------------------------------------------------------------------------------------------------------
xts_temp<-xts(data_impute_3$Temperature, order.by = data_impute_3$date_time_hm)
xts_humi<-xts(data_impute_3$Humidity, order.by = data_impute_3$date_time_hm)
xts_data<-cbind(xts_temp, xts_humi)
rm(xts_temp,xts_humi)
dygraph(xts_data)%>%
dyRangeSelector()
#There is an abnormally low value of humidity on 2nd March 05:36 am of 46.4 with a minor surge of temperature
#This phenomena started at 5:35 am and receded by 5:37 am. The humidity stabilized by 5:45 am
#This is infact the global minima for humidity!
data_impute_3[which(data_impute_3$Humidity == min(data_impute_3$Humidity)),"date_time_hm"]
#There is an erratic pattern of humidity fluctuations observed on 16th April from 6:13 am all the way upto 8:48 am.
#This is accompanied by small fluctuations in temperature also. Humidity seems to have adhered thereafter to cyclicity
#---------------------------------------------------------------------------------------------------------------------
xts_co2<-xts(data_impute_3$Co2,order.by = data_impute_3$date_time_hm)
xts_voc<-xts(data_impute_3$VOC,order.by = data_impute_3$date_time_hm)
xts_data<-cbind(xts_co2,xts_voc)
rm(xts_co2,xts_voc)
dygraph(xts_data)%>%
dyRangeSelector()
#A comparison over two months reveals that the pattern gradually spreads out over the two months and hence
#finding outliers here would eliminate important information such as the spike in both CO2 and VOC on 25th April
#at 4:29 pm which are the global maxima and indicative of a flash rise in emissions detected by system due to
#some event near the sensor SS0029. The maxima reached together along with the fact that their sample correlation is
#1 justifies the observation's non exclusion as an outlier observation
data_impute_3[which(data_impute_3$VOC == max(data_impute_3$VOC)),c("date_time_hm","unitid")]
#---------------------------------------------------------------------------------------------------------------------
#We will be comparing Light and Noise separately each of which may be indicative of activity
xts_light<-xts(data_impute_3$Light,order.by = data_impute_3$date_time_hm)
dygraph(xts_light)%>%
dyRangeSelector()
#There is a sudden and random dimming at 10:44 pm followed by spiked brightening of light source upto
#11:02 pm on 10th March. This phenomenon may be indicative of issues with neutral wiring of the light source
#and may warrant inspection
#There was an anomalous usage of light on 11 & 12 March Sunday & Monday: at 5:41 to 6:10 pm on 11th and 11:05pm
#to 02:35 am on 11-12th. Similarly on 1st April 5:17pm to 6:10 pm, 14th April(Good Friday) 5:04pm to 8:10 pm and
#22nd April 9:23pm to 10:46 pm. More prominently, the intensity of usage of light seems to follow a decreasing trend
#---------------------------------------------------------------------------------------------------------------------
rm(xts_light)
xts_noise<-xts(data_impute_3$Noise,order.by = data_impute_3$date_time_hm)
dygraph(xts_noise)%>%
dyRangeSelector()
#As discussed earlier there is a minute but sharp drop in background noise from 16th April onwards. This may be a deliberate
#calibration effort by the administration team to improve quality of noise recorded over background noise.
#A global maxima is reached on 2nd April at 5:38 pm
data_impute_3[which(data_impute_3$Noise == max(data_impute_3$Noise)),"date_time_hm"]
#Thanks :)
|
54346ff31e06fc0e1fcc0f21191ec80bc4305721
|
0afc779287f863d2de305a7b95243c988c2aefcb
|
/Lab4.R
|
b32c54d8c7d309e63978ccb4d833869273ee5969
|
[] |
no_license
|
Jacob-Snyder/Lab-5
|
d91f0f255e36fd71496e07f65678803fb1c1505f
|
afde6b77ee974a8a9ec086583a44f0a2567ed61a
|
refs/heads/master
| 2022-11-10T23:52:27.346329
| 2020-07-03T20:18:54
| 2020-07-03T20:18:54
| 276,967,973
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,794
|
r
|
Lab4.R
|
library(tidyverse)
s.t. <- tapply(X = fish$parcel.density.m3,INDEX = fish$transect.id,FUN = mean)
s.t.
data.frame(s.t.)
s.t.$mean <- row.names(s.t.)
s.t.
s.t. <- tapply(X = fish$parcel.density.m3,INDEX = fish$transect.id,FUN = mean)
data.frame(s.t.)
s.t.
st2 <- data.frame(s.t.)
st2$mean <- row.names(st2)
st2
s.d. <- tapply(X = fish$parcel.density.m3,INDEX = fish$transect.id,FUN = sd)
s.d.
data.frame(s.d.)
s.d.
sd2 <- data.frame(s.d.)
sd2$standard_deviation <-row.names(sd2)
sd2
merge(x = sd2,y = st2,by = "transect.id")
sd2$transect.id <- row.names(sd2)
sd2
standard_deviation <- tapply(X = fish$parcel.density.m3,INDEX = fish$transect.id,FUN = sd)
data.frame(standard_deviation)
sd2 <- data.frame(standard_deviation)
sd2$transect.id <- row.names(sd2)
sd2
mean <- tapply(X = fish$parcel.density.m3,INDEX = fish$transect.id,FUN = mean)
data.frame(mean)
st2 <- data.frame(mean)
st2$transect.id <- row.names(st2)
st2
sd2
stand.dev <-tapply(X = fish$parcel.density.m3,INDEX = fish$transect.id,FUN = sd)
data.frame(stand.dev)
sd2 <- data.frame(stand.dev)
sd2$transect.id <- row.names(sd2)
sd2
merge(x = mean,y = stand.dev,... = "transect.id")
sdt <- merge(x = sd2,y = st2,by = "transect.id")
sdt
count <- tapply(X = fish$parcel.density.m3,INDEX = fish$transect.id,FUN = sum)
count
data.frame(count)
ct <- data.frame(count)
ct$transect.id <- row.names(ct)
ct
merge(x = ct,y = sdt,by = "transect.id")
install.packages("tidyverse")
Mean <- fish %>% group_by (transect.id) %>% summarise(mean=mean(parcel.density.m3))
Mean
data.frame(Mean)
Mean$mean.pd <- row.names(Mean)
Mean
Mean <- fish %>% group_by (transect.id) %>% summarise(mean.pd=mean(parcel.density.m3))
data.frame(Mean)
transect.id mean.pd
1 OST14-1E-D 1.901954
2 OST14-1E-M 1.770026
3 OST14-1E-S 2.098717
4 OST14-1W-M 1.768641
5 OST14-1W-S 1.983198
6 OST14-2C-D 2.086289
7 OST14-2C-M 1.535166
8 OST14-2C-S 1.673188
9 OST14-2E-D 1.892880
10 OST14-2E-M 2.033088
11 OST14-2E-S 2.402454
12 OST14-3C-D 2.009058
13 OST14-3C-M 2.046898
14 OST14-3C-S 1.564965
15 OST14-3W-D 1.603930
16 OST14-3W-M 1.785874
17 OST14-3W-S 2.143646
18 OST14-4W-D 2.015992
19 OST14-4W-M 1.969309
20 OST14-4W-S 1.518438
21 OST14-5W-D 1.850275
22 OST14-5W-M 6.321862
23 OST14-5W-S 1.747230
24 OST15-10W-M 1.636471
25 OST15-10W-S 1.791265
26 OST15-6E-M 2.590905
27 OST15-6E-S 1.378165
28 OST15-6W-M 1.560250
29 OST15-6W-S 1.509818
30 OST15-7C-M 2.457173
31 OST15-7C-S 1.666048
32 OST15-7E-M 2.491521
33 OST15-7E-S 3.048735
34 OST15-7W-M 1.994220
35 OST15-7W-S 2.326290
36 OST15-8W-M 1.917494
37 OST15-8W-S 1.702851
38 OST15-9W-M 2.882370
39 OST15-9W-S 1.599573
st.dev <- tapply(X = fish$parcel.density.m3,INDEX = fish$transect.id,FUN = sd)
data.frame(st.dev)
mpd <- data.frame(Mean)
sdpd <- data.frame(st.dev)
sdpd$transect.id <- row.names(sdpd)
sdpd
?join
inner_join(x = mpd,y = sdpd,"transect.id")
Count <- tapply(X = fish$parcel.density.m3,INDEX = fish$transect.id,FUN = sum)
data.frame(Count)
Count <- data.frame(Count)
Count$transect.id <- row.names(Count)
Count
inner_join(x = sdt,y = Count,"transect.id")
minimum <- tapply(X = fish$parcel.length.m,INDEX = fish$depth_fac,FUN = min)
data.frame(minimum)
MINIMUM <- data.frame(minimum)
MINIMUM$depth <- row.names(MINIMUM)
median <- tapply(X = fish$parcel.length.m,INDEX = fish$depth_fac,FUN = median)
data.frame(median)
MEDIAN <- data.frame(median)
MEDIAN$depth <- row.names(MEDIAN)
mean.pl <- tapply(X = fish$parcel.length.m,INDEX = fish$depth_fac,FUN = mean)
data.frame(mean.pl)
MEAN <-data.frame(mean.pl)
MEAN$depth <- row.names(MEAN)
maximum <- tapply(X = fish$parcel.length.m,INDEX = fish$depth_fac,FUN = max)
data.frame(maximum)
MAXIMUM <- data.frame(maximum)
MAXIMUM$depth <- row.names(MAXIMUM)
MaxMin <- inner_join(x = MAXIMUM,y = MINIMUM,"depth")
MedMean <- inner_join(x = MEDIAN,y = MEAN,"depth")
MaxMin.MedMean <- inner_join(x = MaxMin,y = MedMean,"depth")
MaxMin.MedMean
max <- tapply(X = fish$parcel.length.m,INDEX = fish$area_fac,FUN = min)
data.frame(max)
MAX.A <- data.frame(max)
MAX.A$area <- row.names(MAX.A)
MAX.A
min <- tapply(X = fish$parcel.length.m,INDEX = fish$area_fac,FUN = min)
data.frame(min)
MIN.A <- data.frame(min)
MIN.A$area <- row.names(MIN.A)
MIN.A
med <- tapply(X = fish$parcel.length.m,INDEX=fish$area_fac,FUN = median)
data.frame(med)
MED.A <- data.frame(med)
MED.A$area <- row.names(MED.A)
MED.A
m.e.a.n <- tapply(X = fish$parcel.length.m,INDEX = fish$area_fac,FUN = mean)
data.frame(m.e.a.n)
MEAN.A <- data.frame(m.e.a.n)
MEAN.A$area <- row.names(MEAN.A)
MEAN.A
MaxMin.A <- inner_join(x = MAX.A,y = MIN.A,"area")
MedMean.A <- inner_join(x = MED.A,y = MEAN.A,"area")
A.MaxMinMedMean <- inner_join(x = MaxMin.A,y = MedMean.A,"area")
A.MaxMinMedMean
merge(x = sd2,y = st2,b.y=mean)
s.d. <- tapply(X = fish$parcel.density.m3,INDEX = fish$transect.id,FUN = sd)
fish
s.d. <- tapply(X = fish$parcel.density.m3,INDEX = fish$transect.id,FUN = sd)
rm(`fish_data.(1)`)
data.frame(s.d.)
sd2 <- data.frame(s.d.)
sd2$standard_deviation <-row.names(sd2)
merge(x = sd2,y = st2,by = "mean")
s.t. <- tapply(X = fish$parcel.density.m3,INDEX = fish$transect.id,FUN = mean)
data.frame(s.t.)
st2 <- data.frame(s.t.)
st2$mean <- row.names(st2)
merge(x = sd2,y = st2,by = "mean")
head(sd2)
head (st2)
merge(x = sd2,y = st2,by.x = "standard_deviation",by.y = "mean")
combo <- merge(x = sd2,y = st2,by.x = "standard_deviation",by.y = "mean")
head(combo)
?join()
?rename()
rename(sd2,standard_deviation=transect.id)
rename(st2,mean=transect.id)
sd2<-rename(sd2, transect.id=standard_deviation)
st2<-rename(st2,transect.id=mean)
head(sd2)
head(st2)
combo2 <- inner_join(x = sd2,y = st2,"transect.id")
combo2 <- combo2[,c("transect.id","s.d.","s.t.")]
combo2
minimum <- tapply(X = fish$parcel.length.m,INDEX = fish$depth_fac,FUN = min)
data.frame(minimum)
MINIMUM <- data.frame(minimum)
MINIMUM$depth <- row.names(MINIMUM)
maximum <- tapply(X = fish$parcel.length.m,INDEX = fish$depth_fac,FUN = max)
data.frame(maximum)
MAXIMUM <- data.frame(maximum)
MAXIMUM$depth <- row.names(MAXIMUM)
median <- tapply(X = fish$parcel.length.m,INDEX = fish$depth_fac,FUN = median)
data.frame(median)
MEDIAN <- data.frame(median)
MEDIAN$depth <- row.names(MEDIAN)
?group_by()
minimum <- group_by(fish,depth,area)
names(fish)
minimum <- group_by(fish,tow.depth,area_fac)
minimum
summarise(minimum,min.p=min(parcel.length.m))
summarise(minimum,max.p=max(parcel.length.m),min.p=min(parcel.length.m))
summarise(minimum,max.p=max(parcel.length.m),min.p=min(parcel.length.m),med.p=median(parcel.length.m))
?quantile()
quantile(x = fish$parcel.length.m,probs = c(.05,.95))
summarise(minimum,max.p=max(parcel.length.m),min.p=min(parcel.length.m),med.p=median(parcel.length.m),upper95.p=quantile(x = parcel.length.m,.05),lower95.p=quantile(x = parcel.length.m,.95))
|
05cc6d41fdafcc2de3a25bdbd9a3869f1fd0ebb5
|
9689cd916145b4f477003353a681f25f22045bd2
|
/rf_predict.R
|
8767bde28c565757f373bb92ae92e66c23dfba8c
|
[] |
no_license
|
dingiser/DTB100China
|
8108b979dde4c5c11e6c95651d8e17a154947a6e
|
6198cd88565a52ae18b44cd08c7b6ad64d61e46e
|
refs/heads/master
| 2022-03-01T01:31:46.960372
| 2019-12-03T04:03:31
| 2019-12-03T04:03:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,471
|
r
|
rf_predict.R
|
### apply spatial prediction using ramdom forest
library(randomForest)
library(rgdal)
library(sp)
rm(list = ls())
gc()
memory.limit(size = 100*1024)
load("F:/New/RData/rf.RData")
load("F:/China/Final/blockList_1h.RData")
### predict DTB for every 1¡ã ¡Á 1¡ã block
predictFunForParallel<-function(bList)
{
library(randomForest)
library(rgdal)
prj.str<-"+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
lon=round(bList@bbox[1,1])
lat=round(bList@bbox[2,1])
blockName<-paste0(as.character(lon),"_",as.character(lat))
filePath<-"F:/New/Results/rf_1h" # result output path
fileList<-list.files(filePath)
if(paste0(blockName,".tif") %in% fileList)
return()
if((lon>=133 && lon<=136 && lat>=18 && lat<=54) | 1==1)
{
print(blockName)
blockPath<-paste0("F:/China/Covariates/BlockCovariates/",blockName)
blockCovariatesNameList<-list.files(blockPath)
covaDf<-data.frame()
for(i in blockCovariatesNameList)
{
strList<-strsplit(i,"[.]")
covaName<-strList[[1]][1]
blockCovariates<-readGDAL(paste0(blockPath,"\\",i))
proj4string(blockCovariates)<-prj.str
ov.grid<-over(bList,blockCovariates)
colnames(ov.grid)<-covaName
if(ncol(covaDf)==0)
{
covaDf<-data.frame(no=1:nrow(ov.grid))
}
covaDf<-cbind(covaDf,ov.grid)
}
covariateNum<-ncol(covaDf)-1
for(l in 2:(covariateNum+1))
{
ave<-mean(covaDf[,l],na.rm = TRUE)
if(is.nan(ave))
{
ave=0
}
covaDf[,l]<-ifelse(is.na(covaDf[,l]),ave,covaDf[,l])
}
if(anyNA(covaDf))
{
return()
}
pre<-vector()
pre<-predict(rf,newdata = covaDf[,-1],na.action=na.pass)
#pre<-ifelse(pre>=0,pre,0)
bList@data<-data.frame(pred=pre)
if(!dir.exists(filePath))
dir.create(filePath)
name<-paste0(as.character(round(bList@bbox[1,1])),"_",as.character(round(bList@bbox[2,1])))
writeGDAL(bList,paste0(filePath,"/",as.character(name),".tif"))
#rm(dfTemp)
#flag=flag+1
gc()
gc()
gc()
}
#Sys.time()
}
### parallel computing for predicting
library(parallel)
coresNum<-detectCores(logical = F)
cl <- makeCluster(getOption("cl.cores", coresNum))
clusterExport(cl,varlist="rf")
system.time({
res <- parLapply(cl, blockList,predictFunForParallel)
})
stopCluster(cl)
|
0cb6be76f24f92ebc6e1edca3c70700cfe82f33e
|
50fd6e2baeac8c557023daf23afb48a02bb1a103
|
/R/generics.R
|
ba36f38aaea212ce5d286f32a210cdc3abbe1158
|
[
"Apache-2.0"
] |
permissive
|
aszekMosek/CVXR
|
1efa4ce7a290fe026f5afbfd79c12e7d754ed350
|
e8bd7e417bd868341edf487f108fbf57d24247a1
|
refs/heads/master
| 2020-09-23T01:06:33.713597
| 2019-11-07T19:48:05
| 2019-11-07T19:48:05
| 225,362,059
| 0
| 0
|
Apache-2.0
| 2019-12-02T11:44:34
| 2019-12-02T11:44:33
| null |
UTF-8
|
R
| false
| false
| 33,906
|
r
|
generics.R
|
#'
#' Sign Properties
#'
#' Determine if an expression is positive, negative, or zero.
#'
#' @param object An \linkS4class{Expression} object.
#' @return A logical value.
#' @examples
#' pos <- Constant(1)
#' neg <- Constant(-1)
#' zero <- Constant(0)
#' unknown <- Variable()
#'
#' is_zero(pos)
#' is_zero(-zero)
#' is_zero(unknown)
#' is_zero(pos + neg)
#'
#' is_positive(pos + zero)
#' is_positive(pos * neg)
#' is_positive(pos - neg)
#' is_positive(unknown)
#'
#' is_negative(-pos)
#' is_negative(pos + neg)
#' is_negative(neg * zero)
#' is_negative(neg - pos)
#' @name sign-methods
NULL
#' @rdname sign-methods
#' @export
setGeneric("is_zero", function(object) { standardGeneric("is_zero") })
#' @rdname sign-methods
#' @export
setGeneric("is_positive", function(object) { standardGeneric("is_positive") })
#' @rdname sign-methods
#' @export
setGeneric("is_negative", function(object) { standardGeneric("is_negative") })
#'
#' Curvature of Expression
#'
#' The curvature of an expression.
#'
#' @param object An \linkS4class{Expression} object.
#' @return A string indicating the curvature of the expression, either "CONSTANT", "AFFINE", "CONVEX, "CONCAVE", or "UNKNOWN".
#' @examples
#' x <- Variable()
#' c <- Constant(5)
#'
#' curvature(c)
#' curvature(x)
#' curvature(x^2)
#' curvature(sqrt(x))
#' curvature(log(x^3) + sqrt(x))
#' @docType methods
#' @rdname curvature
#' @export
setGeneric("curvature", function(object) { standardGeneric("curvature") })
#'
#' Curvature Properties
#'
#' Determine if an expression is constant, affine, convex, concave, quadratic, or piecewise linear (pwl).
#'
#' @param object An \linkS4class{Expression} object.
#' @return A logical value.
#' @examples
#' x <- Variable()
#' c <- Constant(5)
#'
#' is_constant(c)
#' is_constant(x)
#'
#' is_affine(c)
#' is_affine(x)
#' is_affine(x^2)
#'
#' is_convex(c)
#' is_convex(x)
#' is_convex(x^2)
#' is_convex(sqrt(x))
#'
#' is_concave(c)
#' is_concave(x)
#' is_concave(x^2)
#' is_concave(sqrt(x))
#'
#' is_quadratic(x^2)
#' is_quadratic(sqrt(x))
#'
#' is_pwl(c)
#' is_pwl(x)
#' is_pwl(x^2)
#' @name curvature-methods
NULL
#' @rdname curvature-methods
#' @export
setGeneric("is_constant", function(object) { standardGeneric("is_constant") })
#' @rdname curvature-methods
#' @export
setGeneric("is_affine", function(object) { standardGeneric("is_affine") })
#' @rdname curvature-methods
#' @export
setGeneric("is_convex", function(object) { standardGeneric("is_convex") })
#' @rdname curvature-methods
#' @export
setGeneric("is_concave", function(object) { standardGeneric("is_concave") })
#' @rdname curvature-methods
#' @export
setGeneric("is_quadratic", function(object) { standardGeneric("is_quadratic") })
#' @rdname curvature-methods
#' @export
setGeneric("is_pwl", function(object) { standardGeneric("is_pwl") })
#'
#' DCP Compliance
#'
#' Determine if a problem or expression complies with the disciplined convex programming rules.
#'
#' @param object A \linkS4class{Problem} or \linkS4class{Expression} object.
#' @return A logical value indicating whether the problem or expression is DCP compliant, i.e. no unknown curvatures.
#' @examples
#' x <- Variable()
#' prob <- Problem(Minimize(x^2), list(x >= 5))
#' is_dcp(prob)
#' solve(prob)
#' @docType methods
#' @rdname is_dcp
#' @export
setGeneric("is_dcp", function(object) { standardGeneric("is_dcp") })
#'
#' Size of Expression
#'
#' The size of an expression.
#'
#' @param object An \linkS4class{Expression} object.
#' @return A vector with two elements \code{c(row, col)} representing the dimensions of the expression.
#' @examples
#' x <- Variable()
#' y <- Variable(3)
#' z <- Variable(3,2)
#'
#' size(x)
#' size(y)
#' size(z)
#' size(x + y)
#' size(z - x)
#' @docType methods
#' @rdname size
#' @export
setGeneric("size", function(object) { standardGeneric("size") })
#'
#' Size Properties
#'
#' Determine if an expression is a scalar, vector, or matrix.
#'
#' @param object An \linkS4class{Expression} object.
#' @return A logical value.
#' @examples
#' x <- Variable()
#' y <- Variable(3)
#' z <- Variable(3,2)
#'
#' is_scalar(x)
#' is_scalar(y)
#' is_scalar(x + y)
#'
#' is_vector(x)
#' is_vector(y)
#' is_vector(2*z)
#'
#' is_matrix(x)
#' is_matrix(y)
#' is_matrix(z)
#' is_matrix(z - x)
#' @name size-methods
NULL
#' @rdname size-methods
#' @export
setGeneric("is_scalar", function(object) { standardGeneric("is_scalar") })
#' @rdname size-methods
#' @export
setGeneric("is_vector", function(object) { standardGeneric("is_vector") })
#' @rdname size-methods
#' @export
setGeneric("is_matrix", function(object) { standardGeneric("is_matrix") })
# The value of the objective given the solver primal value.
setGeneric("primal_to_result", function(object, result) { standardGeneric("primal_to_result") })
#'
#' Get or Set Value
#'
#' Get or set the value of a variable, parameter, expression, or problem.
#'
#' @param object A \linkS4class{Variable}, \linkS4class{Parameter}, \linkS4class{Expression}, or \linkS4class{Problem} object.
#' @param value A numeric scalar, vector, or matrix to assign to the object.
#' @return The numeric value of the variable, parameter, or expression. If any part of the mathematical object is unknown, return \code{NA}.
#' @examples
#' lambda <- Parameter()
#' value(lambda)
#'
#' value(lambda) <- 5
#' value(lambda)
#' @name value-methods
NULL
#' @rdname value-methods
#' @export
setGeneric("value", function(object) { standardGeneric("value") })
#' @rdname value-methods
#' @export
setGeneric("value<-", function(object, value) { standardGeneric("value<-") })
# Internal method for saving the value of an expression
setGeneric("save_value", function(object, value) { standardGeneric("save_value") })
#'
#' Get Expression Data
#'
#' Get information needed to reconstruct the expression aside from its arguments.
#'
#' @param object A \linkS4class{Expression} object.
#' @return A list containing data.
#' @docType methods
#' @rdname get_data
#' @export
setGeneric("get_data", function(object) { standardGeneric("get_data") })
#'
#' Variable, Parameter, or Expression Name
#'
#' The string representation of a variable, parameter, or expression.
#'
#' @param object A \linkS4class{Variable}, \linkS4class{Parameter}, or \linkS4class{Expression} object.
#' @return For \linkS4class{Variable} or \linkS4class{Parameter} objects, the value in the name slot. For \linkS4class{Expression} objects, a string indicating the nested atoms and their respective arguments.
#' @docType methods
#' @rdname name
#' @examples
#' x <- Variable()
#' y <- Variable(3, name = "yVar")
#'
#' name(x)
#' name(y)
#' @export
setGeneric("name", function(object) { standardGeneric("name") })
#'
#' Parts of an Expression
#'
#' List the variables, parameters, or constants in a canonical expression.
#'
#' @param object A \linkS4class{Canonical} expression.
#' @return A list of \linkS4class{Variable}, \linkS4class{Parameter}, or \linkS4class{Constant} objects.
#' @examples
#' m <- 50
#' n <- 10
#' beta <- Variable(n)
#' y <- matrix(rnorm(m), nrow = m)
#' X <- matrix(rnorm(m*n), nrow = m, ncol = n)
#' lambda <- Parameter()
#'
#' expr <- sum_squares(y - X %*% beta) + lambda*p_norm(beta, 1)
#' variables(expr)
#' parameters(expr)
#' constants(expr)
#' lapply(constants(expr), function(c) { value(c) })
#' @name expression-parts
NULL
#' @rdname expression-parts
#' @export
setGeneric("variables", function(object) { standardGeneric("variables") })
#' @rdname expression-parts
#' @export
setGeneric("parameters", function(object) { standardGeneric("parameters") })
#' @rdname expression-parts
#' @export
setGeneric("constants", function(object) { standardGeneric("constants") })
#'
#' Sub/Super-Gradient
#'
#' The (sub/super)-gradient of the expression with respect to each variable.
#' Matrix expressions are vectorized, so the gradient is a matrix. \code{NA} indicates variable values are unknown or outside the domain.
#'
#' @param object An \linkS4class{Expression} object.
#' @return A list mapping each variable to a sparse matrix.
#' @examples
#' x <- Variable(2, name = "x")
#' A <- Variable(2, 2, name = "A")
#'
#' value(x) <- c(-3,4)
#' expr <- p_norm(x, 2)
#' grad(expr)
#'
#' value(A) <- rbind(c(3,-4), c(4,3))
#' expr <- p_norm(A, 0.5)
#' grad(expr)
#'
#' value(A) <- cbind(c(1,2), c(-1,0))
#' expr <- abs(A)
#' grad(expr)
#' @docType methods
#' @rdname grad
#' @export
setGeneric("grad", function(object) { standardGeneric("grad") })
#'
#' Domain
#'
#' A list of constraints describing the closure of the region where the expression is finite.
#'
#' @param object An \linkS4class{Expression} object.
#' @return A list of \linkS4class{Constraint} objects.
#' @examples
#' a <- Variable(name = "a")
#' dom <- domain(p_norm(a, -0.5))
#' prob <- Problem(Minimize(a), dom)
#' result <- solve(prob)
#' result$value
#'
#' b <- Variable()
#' dom <- domain(kl_div(a, b))
#' result <- solve(Problem(Minimize(a + b), dom))
#' result$getValue(a)
#' result$getValue(b)
#'
#' A <- Variable(2, 2, name = "A")
#' dom <- domain(lambda_max(A))
#' A0 <- rbind(c(1,2), c(3,4))
#' result <- solve(Problem(Minimize(norm2(A - A0)), dom))
#' result$getValue(A)
#'
#' dom <- domain(log_det(A + diag(rep(1,2))))
#' prob <- Problem(Minimize(sum(diag(A))), dom)
#' result <- solve(prob, solver = "SCS")
#' result$value
#' @docType methods
#' @rdname domain
#' @export
setGeneric("domain", function(object) { standardGeneric("domain") })
#'
#' Validate Value
#'
#' Check that the value satisfies a \linkS4class{Leaf}'s symbolic attributes.
#'
#' @param object A \linkS4class{Leaf} object.
#' @param val The assigned value.
#' @return The value converted to proper matrix type.
#' @docType methods
#' @rdname validate_val
setGeneric("validate_val", function(object, val) { standardGeneric("validate_val") })
#'
#' Canonicalize
#'
#' Computes the graph implementation of a canonical expression.
#'
#' @param object A \linkS4class{Canonical} object.
#' @return A list of \code{list(affine expression, list(constraints))}.
#' @docType methods
#' @name canonicalize
NULL
#' @rdname canonicalize
#' @export
setGeneric("canonicalize", function(object) { standardGeneric("canonicalize") })
#' @rdname canonicalize
setGeneric("canonical_form", function(object) { standardGeneric("canonical_form") })
#
# Gradient of an Atom
#
# The (sub/super) gradient of the atom with respect to each argument. Matrix expressions are vectorized, so the gradient is a matrix.
#
# @param object An \linkS4class{Atom} object.
# @param values A list of numeric values for the arguments.
# @return A list of sparse matrices or \code{NA}.
setGeneric(".grad", function(object, values) { standardGeneric(".grad") })
#
# Domain of an Atom
#
# The constraints describing the domain of the atom.
#
# @param object An \linkS4class{Atom} object.
# @return A list of \linkS4class{Constraint} objects.
setGeneric(".domain", function(object) { standardGeneric(".domain") })
#
# Gradient of an AxisAtom
#
# The (sub/super) gradient of the atom with respect to each argument. Matrix expressions are vectorized, so the gradient is a matrix. Takes the axis into account.
#
# @param values A list of numeric values for the arguments.
# @return A list of sparse matrices or \code{NA}.
setGeneric(".axis_grad", function(object, values) { standardGeneric(".axis_grad") })
#
# Column Gradient of an Atom
#
# The (sub/super) gradient of the atom with respect to a column argument. Matrix expressions are vectorized, so the gradient is a matrix.
# @param value A numeric value for a column.
# @return A sparse matrix or \code{NA}.
setGeneric(".column_grad", function(object, value) { standardGeneric(".column_grad") })
# Positive definite inequalities
#' @rdname PSDConstraint-class
#' @export
setGeneric("%>>%", function(e1, e2) { standardGeneric("%>>%") })
#' @rdname PSDConstraint-class
#' @export
setGeneric("%<<%", function(e1, e2) { standardGeneric("%<<%") })
#'
#' Atom Size
#'
#' Determine the size of an atom based on its arguments.
#'
#' @param object A \linkS4class{Atom} object.
#' @return A numeric vector \code{c(row, col)} indicating the size of the atom.
#' @rdname size_from_args
setGeneric("size_from_args", function(object) { standardGeneric("size_from_args") })
#'
#' Atom Sign
#'
#' Determine the sign of an atom based on its arguments.
#'
#' @param object An \linkS4class{Atom} object.
#' @return A logical vector \code{c(is positive, is negative)} indicating the sign of the atom.
#' @rdname sign_from_args
setGeneric("sign_from_args", function(object) { standardGeneric("sign_from_args") })
#'
#' Validate Arguments
#'
#' Validate an atom's arguments, returning an error if any are invalid.
#'
#' @param object An \linkS4class{Atom} object.
#' @docType methods
#' @rdname validate_args
setGeneric("validate_args", function(object) { standardGeneric("validate_args") })
#'
#' Numeric Value of Atom
#'
#' Returns the numeric value of the atom evaluated on the specified arguments.
#'
#' @param object An \linkS4class{Atom} object.
#' @param values A list of arguments to the atom.
#' @return A numeric scalar, vector, or matrix.
#' @docType methods
#' @rdname to_numeric
setGeneric("to_numeric", function(object, values) { standardGeneric("to_numeric") })
#'
#' Curvature of an Atom
#'
#' Determine if an atom is convex, concave, or affine.
#'
#' @param object A \linkS4class{Atom} object.
#' @return A logical value.
#' @examples
#' x <- Variable()
#'
#' is_atom_convex(x^2)
#' is_atom_convex(sqrt(x))
#' is_atom_convex(log(x))
#'
#' is_atom_concave(-abs(x))
#' is_atom_concave(x^2)
#' is_atom_concave(sqrt(x))
#'
#' is_atom_affine(2*x)
#' is_atom_affine(x^2)
#' @name curvature-atom
NULL
#' @rdname curvature-atom
#' @export
setGeneric("is_atom_convex", function(object) { standardGeneric("is_atom_convex") })
#' @rdname curvature-atom
#' @export
setGeneric("is_atom_concave", function(object) { standardGeneric("is_atom_concave") })
#' @rdname curvature-atom
#' @export
setGeneric("is_atom_affine", function(object) { standardGeneric("is_atom_affine") })
#'
#' Curvature of Composition
#'
#' Determine whether a composition is non-decreasing or non-increasing in an index.
#'
#' @param object A \linkS4class{Atom} object.
#' @param idx An index into the atom.
#' @return A logical value.
#' @examples
#' x <- Variable()
#' is_incr(log(x), 1)
#' is_incr(x^2, 1)
#' is_decr(min(x), 1)
#' is_decr(abs(x), 1)
#' @name curvature-comp
NULL
#' @rdname curvature-comp
#' @export
setGeneric("is_incr", function(object, idx) { standardGeneric("is_incr") })
#' @rdname curvature-comp
#' @export
setGeneric("is_decr", function(object, idx) { standardGeneric("is_decr") })
#'
#' Graph Implementation
#'
#' Reduces the atom to an affine expression and list of constraints.
#'
#' @param object An \linkS4class{Expression} object.
#' @param arg_objs A list of linear expressions for each argument.
#' @param size A vector with two elements representing the size of the resulting expression.
#' @param data A list of additional data required by the atom.
#' @return A list of \code{list(LinOp for objective, list of constraints)}, where LinOp is a list representing the linear operator.
#' @docType methods
#' @rdname graph_implementation
setGeneric("graph_implementation", function(object, arg_objs, size, data) { standardGeneric("graph_implementation") })
#'
#' Identification Number
#'
#' A unique identification number used internally to keep track of variables and constraints. Should not be modified by the user.
#'
#' @param object A \linkS4class{Variable} or \linkS4class{Constraint} object.
#' @return A non-negative integer identifier.
#' @seealso \code{\link[CVXR]{get_id}} \code{\link[CVXR]{setIdCounter}}
#' @examples
#' x <- Variable()
#' constr <- (x >= 5)
#' id(x)
#' id(constr)
#' @docType methods
#' @rdname id
#' @export
setGeneric("id", function(object) { standardGeneric("id") })
#'
#' Constraint Residual
#'
#' The residual expression of a constraint, i.e. the amount by which it is violated, and the value of that violation.
#' For instance, if our constraint is \eqn{g(x) \leq 0}, the residual is \eqn{max(g(x), 0)} applied elementwise.
#'
#' @param object A \linkS4class{Constraint} object.
#' @return A \linkS4class{Expression} representing the residual, or the value of this expression.
#' @docType methods
#' @name residual-methods
NULL
#' @rdname residual-methods
setGeneric("residual", function(object) { standardGeneric("residual") })
#' @rdname residual-methods
setGeneric("violation", function(object) { standardGeneric("violation") })
#'
#' Second-Order Cone Methods
#'
#' The number of elementwise cones or the size of a single cone in a second-order cone constraint.
#'
#' @param object An \linkS4class{SOCAxis} object.
#' @return The number of cones, or the size of a cone.
#' @docType methods
#' @name cone-methods
NULL
#' @rdname cone-methods
setGeneric("num_cones", function(object) { standardGeneric("num_cones") })
#' @rdname cone-methods
setGeneric("cone_size", function(object) { standardGeneric("cone_size") })
#
# Dual Value
#
# The value of the dual variable in a constraint.
#
# @param object A \linkS4class{Constraint} object.
# @return The numeric value of the dual variable. Defaults to \code{NA} if unknown.
# @rdname dual_value
setGeneric("dual_value", function(object) { standardGeneric("dual_value") })
#'
#' Format Constraints
#'
#' Format constraints for the solver.
#'
#' @param object A \linkS4class{Constraint} object.
#' @param eq_constr A list of the equality constraints in the canonical problem.
#' @param leq_constr A list of the inequality constraints in the canonical problem.
#' @param dims A list with the dimensions of the conic constraints.
#' @param solver A string representing the solver to be called.
#' @return A list containing equality constraints, inequality constraints, and dimensions.
#' @rdname format_constr
setGeneric("format_constr", function(object, eq_constr, leq_constr, dims, solver) { standardGeneric("format_constr") })
# Constraint generic methods
setGeneric("constr_type", function(object) { standardGeneric("constr_type") })
setGeneric("constr_id", function(object) { standardGeneric("constr_id") })
# Nonlinear constraint generic methods
setGeneric("block_add", function(object, mat, block, vert_offset, horiz_offset, rows, cols, vert_step, horiz_step) { standardGeneric("block_add") })
setGeneric("place_x0", function(object, big_x, var_offsets) { standardGeneric("place_x0") })
setGeneric("place_Df", function(object, big_Df, Df, var_offsets, vert_offset) { standardGeneric("place_Df") })
setGeneric("place_H", function(object, big_H, H, var_offsets) { standardGeneric("place_H") })
setGeneric("extract_variables", function(object, x, var_offsets) { standardGeneric("extract_variables") })
# Problem generic methods
#'
#' Parts of a Problem
#'
#' Get and set the objective, constraints, or size metrics (get only) of a problem.
#'
#' @param object A \linkS4class{Problem} object.
#' @param value The value to assign to the slot.
#' @return For getter functions, the requested slot of the object.
#' x <- Variable()
#' prob <- Problem(Minimize(x^2), list(x >= 5))
#' objective(prob)
#' constraints(prob)
#' size_metrics(prob)
#'
#' objective(prob) <- Maximize(sqrt(x))
#' constraints(prob) <- list(x <= 10)
#' objective(prob)
#' constraints(prob)
#' @name problem-parts
NULL
#' @rdname problem-parts
#' @export
setGeneric("objective", function(object) { standardGeneric("objective") })
#' @rdname problem-parts
#' @export
setGeneric("objective<-", function(object, value) { standardGeneric("objective<-") })
#' @rdname problem-parts
#' @export
setGeneric("constraints", function(object) { standardGeneric("constraints") })
#' @rdname problem-parts
#' @export
setGeneric("constraints<-", function(object, value) { standardGeneric("constraints<-") })
#' @rdname problem-parts
#' @export
setGeneric("size_metrics", function(object) { standardGeneric("size_metrics") })
setGeneric("status", function(object) { standardGeneric("status") })
setGeneric("status<-", function(object, value) { standardGeneric("status<-") })
setGeneric("solver_stats", function(object) { standardGeneric("solver_stats") })
setGeneric("solver_stats<-", function(object, value) { standardGeneric("solver_stats<-") })
#'
#' Get Problem Data
#'
#' Get the problem data used in the call to the solver.
#'
#' @param object A \linkS4class{Problem} object.
#' @param solver A string indicating the solver that the problem data is for. Call \code{installed_solvers()} to see all available.
#' @return A list of arguments for the solver.
#' @examples
#' a <- Variable(name = "a")
#' data <- get_problem_data(Problem(Maximize(exp(a) + 2)), "SCS")
#' data[["dims"]]
#' data[["c"]]
#' data[["A"]]
#'
#' x <- Variable(2, name = "x")
#' data <- get_problem_data(Problem(Minimize(p_norm(x) + 3)), "ECOS")
#' data[["dims"]]
#' data[["c"]]
#' data[["A"]]
#' data[["G"]]
#' @rdname get_problem_data
#' @export
setGeneric("get_problem_data", function(object, solver) { standardGeneric("get_problem_data") })
#'
#' Solve a DCP Problem
#'
#' Solve a DCP compliant optimization problem.
#'
#' @param object,a A \linkS4class{Problem} object.
#' @param solver,b (Optional) A string indicating the solver to use. Defaults to "ECOS".
#' @param ignore_dcp (Optional) A logical value indicating whether to override the DCP check for a problem.
#' @param warm_start (Optional) A logical value indicating whether the previous solver result should be used to warm start.
#' @param verbose (Optional) A logical value indicating whether to print additional solver output.
#' @param parallel (Optional) A logical value indicating whether to solve in parallel if the problem is separable.
#' @param ... Additional options that will be passed to the specific solver. In general, these options will override any default settings imposed by CVXR.
#' @return A list containing the solution to the problem:
#' \describe{
#' \item{\code{status}}{The status of the solution. Can be "optimal", "optimal_inaccurate", "infeasible", "infeasible_inaccurate", "unbounded", "unbounded_inaccurate", or "solver_error".}
#' \item{\code{value}}{The optimal value of the objective function.}
#' \item{\code{solver}}{The name of the solver.}
#' \item{\code{solve_time}}{The time (in seconds) it took for the solver to solve the problem.}
#' \item{\code{setup_time}}{The time (in seconds) it took for the solver to set up the problem.}
#' \item{\code{num_iters}}{The number of iterations the solver had to go through to find a solution.}
#' \item{\code{getValue}}{A function that takes a \linkS4class{Variable} object and retrieves its primal value.}
#' \item{\code{getDualValue}}{A function that takes a \linkS4class{Constraint} object and retrieves its dual value(s).}
#' }
#' @examples
#' a <- Variable(name = "a")
#' prob <- Problem(Minimize(norm_inf(a)), list(a >= 2))
#' result <- psolve(prob, solver = "ECOS", verbose = TRUE)
#' result$status
#' result$value
#' result$getValue(a)
#' result$getDualValue(constraints(prob)[[1]])
#' @docType methods
#' @aliases psolve solve
#' @rdname psolve
#' @export
setGeneric("psolve", function(object, solver, ignore_dcp = FALSE, warm_start = FALSE, verbose = FALSE, parallel = FALSE, ...) { standardGeneric("psolve") })
#'
#' Is Problem a QP?
#'
#' Determine if a problem is a quadratic program.
#'
#' @param object A \linkS4class{Problem} object.
#' @return A logical value indicating whether the problem is a quadratic program.
#' @docType methods
#' @rdname is_qp
#' @export
setGeneric("is_qp", function(object) { standardGeneric("is_qp") })
#'
#' Parse output from a solver and updates problem state
#'
#' Updates problem status, problem value, and primal and dual variable values
#'
#' @param object A \linkS4class{Problem} object.
#' @param solver A character string specifying the solver such as "ECOS", "SCS" etc.
#' @param results_dict the solver output
#' @return A list containing the solution to the problem:
#' \describe{
#' \item{\code{status}}{The status of the solution. Can be "optimal", "optimal_inaccurate", "infeasible", "infeasible_inaccurate", "unbounded", "unbounded_inaccurate", or "solver_error".}
#' \item{\code{value}}{The optimal value of the objective function.}
#' \item{\code{solver}}{The name of the solver.}
#' \item{\code{solve_time}}{The time (in seconds) it took for the solver to solve the problem.}
#' \item{\code{setup_time}}{The time (in seconds) it took for the solver to set up the problem.}
#' \item{\code{num_iters}}{The number of iterations the solver had to go through to find a solution.}
#' \item{\code{getValue}}{A function that takes a \linkS4class{Variable} object and retrieves its primal value.}
#' \item{\code{getDualValue}}{A function that takes a \linkS4class{Constraint} object and retrieves its dual value(s).}
#' }
#' @examples
#' \dontrun{
#' x <- Variable(2)
#' obj <- Minimize(x[1] + cvxr_norm(x, 1))
#' constraints <- list(x >= 2)
#' prob1 <- Problem(obj, constraints)
#' # Solve with ECOS.
#' ecos_data <- get_problem_data(prob1, "ECOS")
#' # Call ECOS solver interface directly
#' ecos_output <- ECOSolveR::ECOS_csolve(
#' c = ecos_data[["c"]],
#' G = ecos_data[["G"]],
#' h = ecos_data[["h"]],
#' dims = ecos_data[["dims"]],
#' A = ecos_data[["A"]],
#' b = ecos_data[["b"]]
#' )
#' # Unpack raw solver output.
#' res1 <- unpack_results(prob1, "ECOS", ecos_output)
#' # Without DCP validation (so be sure of your math), above is equivalent to:
#' # res1 <- solve(prob1, solver = "ECOS")
#' X <- Semidef(2)
#' Fmat <- rbind(c(1,0), c(0,-1))
#' obj <- Minimize(sum_squares(X - Fmat))
#' prob2 <- Problem(obj)
#' scs_data <- get_problem_data(prob2, "SCS")
#' scs_output <- scs::scs(
#' A = scs_data[['A']],
#' b = scs_data[['b']],
#' obj = scs_data[['c']],
#' cone = scs_data[['dims']]
#' )
#' res2 <- unpack_results(prob2, "SCS", scs_output)
#' # Without DCP validation (so be sure of your math), above is equivalent to:
#' # res2 <- solve(prob2, solver = "SCS")
#' }
#' @docType methods
#' @rdname unpack_results
#' @export
setGeneric("unpack_results", function(object, solver, results_dict) { standardGeneric("unpack_results") })
setGeneric(".handle_no_solution", function(object, status) { standardGeneric(".handle_no_solution") })
setGeneric("Problem.save_values", function(object, result_vec, objstore, offset_map) { standardGeneric("Problem.save_values") })
setGeneric(".save_dual_values", function(object, result_vec, constraints, constr_types) { standardGeneric(".save_dual_values") })
setGeneric(".update_problem_state", function(object, results_dict, sym_data, solver) { standardGeneric(".update_problem_state") })
# Problem data generic methods
setGeneric("get_objective", function(object) { standardGeneric("get_objective") })
setGeneric("get_eq_constr", function(object) { standardGeneric("get_eq_constr") })
setGeneric("get_ineq_constr", function(object) { standardGeneric("get_ineq_constr") })
setGeneric("get_nonlin_constr", function(object) { standardGeneric("get_nonlin_constr") })
# Solver generic methods
#'
#' Import Solver
#'
#' Import the R library that interfaces with the specified solver.
#'
#' @param solver A \linkS4class{Solver} object.
#' @examples
#' import_solver(ECOS())
#' import_solver(SCS())
#' @rdname import_solver
#' @docType methods
#' @export
setGeneric("import_solver", function(solver) { standardGeneric("import_solver") })
setGeneric("is_installed", function(solver) { standardGeneric("is_installed") })
#
# Choose MOSEK Solution
#
# Chooses between the basic and interior point solution from MOSEK. Solutions are ranked optimal > near_optimal > anything else > None.
# As long as interior solution is not worse, take it (for backward compatibility).
#
# @param solver A \linkS4class{MOSEK} object.
# @param results_dict A list of the results returned by the solver.
# @return A list containing the preferred solution (\code{solist}) and status of the preferred solution (\code{solsta}).
# @rdname choose_solution
# setGeneric("choose_solution", function(solver, results_dict) { standardGeneric("choose_solution") })
setGeneric("nonlin_constr", function(solver) { standardGeneric("nonlin_constr") })
#'
#' Validate Solver
#'
#' Raises an exception if the solver cannot solve the problem.
#'
#' @param solver A \linkS4class{Solver} object.
#' @param constraints A list of canonicalized constraints
#' @docType methods
#' @rdname validate_solver
setGeneric("validate_solver", function(solver, constraints) { standardGeneric("validate_solver") })
#
# Validate Cache
#
# Clears the cache if the objective or constraints changed.
#
# @param solver A \linkS4class{Solver} object.
# @param objective A list representing the canonicalized objective.
# @param constraints A list of canonicalized constraints.
# @param cached_data A list mapping solver name to cached problem data.
# @return The updated \code{cached_data}.
setGeneric("validate_cache", function(solver, objective, constraints, cached_data) { standardGeneric("validate_cache") })
#
# Get Symbolic Data
#
# Returns the symbolic data for the problem.
#
# @param solver A \linkS4class{Solver} object.
# @param objective A list representing the canonicalized objective.
# @param constraints A list of canonicalized constraints.
# @param cached_data A list mapping solver name to cached problem data.
# @return A \linkS4class{SymData} object holding the symbolic data for the problem.
setGeneric("get_sym_data", function(solver, objective, constraints, cached_data) { standardGeneric("get_sym_data") })
#
# Get Matrix Data
#
# Returns the numeric data for the problem.
#
# @param solver A \linkS4class{Solver} object.
# @param objective A list representing the canonicalized objective.
# @param constraints A list of canonicalized constraints.
# @param cached_data A list mapping solver name to cached problem data.
# @return A \linkS4class{SymData} object holding the symbolic data for the problem.
setGeneric("get_matrix_data", function(solver, objective, constraints, cached_data) { standardGeneric("get_matrix_data") })
#
# Solver: Get Problem Data
#
# Returns the argument for the call to the solver.
#
# @param solver A \linkS4class{Solver} object.
# @param objective A list representing the canonicalized objective.
# @param constraints A list of canonicalized constraints.
# @param cached_data A list mapping solver name to cached problem data.
# @return A list of the arguments needed for the solver.
setGeneric("Solver.get_problem_data", function(solver, objective, constraints, cached_data) { standardGeneric("Solver.get_problem_data") })
#
# Extract Constraints
#
# Extracts the equality, inequality, and nonlinear constraints.
#
# @param solver A \linkS4class{Solver} object.
# @param constr_map A list of canonicalized constraints.
# @return A list of equality, inequality, and nonlinear constraints.
setGeneric("split_constr", function(solver, constr_map) { standardGeneric("split_constr") })
#'
#' Call to Solver
#'
#' Returns the result of the call to the solver.
#'
#' @param solver A \linkS4class{Solver} object.
#' @param objective A list representing the canonicalized objective.
#' @param constraints A list of canonicalized constraints.
#' @param cached_data A list mapping solver name to cached problem data.
#' @param warm_start A logical value indicating whether the previous solver result should be used to warm start.
#' @param verbose A logical value indicating whether to print solver output.
#' @param ... Additional arguments to the solver.
#' @return A list containing the status, optimal value, primal variable, and dual variables for the equality and inequality constraints.
#' @docType methods
#' @aliases Solver.solve
#' @rdname Solver-solve
setGeneric("Solver.solve", function(solver, objective, constraints, cached_data, warm_start, verbose, ...) { standardGeneric("Solver.solve") })
#'
#' Format Solver Results
#'
#' Converts the solver output into standard form.
#'
#' @param solver A \linkS4class{Solver} object.
#' @param results_dict A list containing the solver output.
#' @param data A list containing information about the problem.
#' @param cached_data A list mapping solver name to cached problem data.
#' @return A list containing the solver output in standard form.
#' @docType methods
#' @rdname format_results
setGeneric("format_results", function(solver, results_dict, data, cached_data) { standardGeneric("format_results") })
#'
#' Solver Capabilities
#'
#' Determine if a solver is capable of solving a linear program (LP), second-order cone program (SOCP), semidefinite program (SDP), exponential cone program (EXP), or mixed-integer program (MIP).
#'
#' @param solver A \linkS4class{Solver} object.
#' @return A logical value.
#' @examples
#' lp_capable(ECOS())
#' socp_capable(ECOS())
#' sdp_capable(ECOS())
#' exp_capable(ECOS())
#' mip_capable(ECOS())
#' @name Solver-capable
NULL
#' @rdname Solver-capable
#' @export
setGeneric("lp_capable", function(solver) { standardGeneric("lp_capable") })
#' @rdname Solver-capable
#' @export
setGeneric("socp_capable", function(solver) { standardGeneric("socp_capable") })
#' @rdname Solver-capable
#' @export
setGeneric("sdp_capable", function(solver) { standardGeneric("sdp_capable") })
#' @rdname Solver-capable
#' @export
setGeneric("exp_capable", function(solver) { standardGeneric("exp_capable") })
#' @rdname Solver-capable
#' @export
setGeneric("mip_capable", function(solver) { standardGeneric("mip_capable") })
# Map of solver status code to CVXR status.
setGeneric("status_map", function(solver, status) { standardGeneric("status_map") })
|
8b9dc6ed819d73a5ed42ca89013350991f3e9421
|
e0792d444ced51db843c659be798fad158ac1e93
|
/simulation/summary/preProcessiCOBRA.R
|
7d1355a774f976d7e2135901fd2fcb2437be357f
|
[] |
no_license
|
cfc424/rna-seq_tcComp
|
9cf34e9a3c04655a66dcdf5a70941599270c1eae
|
2146f415b37ccf35aeb3d3dacb4febb4a8183acc
|
refs/heads/master
| 2021-12-30T12:53:37.250504
| 2018-02-09T10:03:10
| 2018-02-09T10:03:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,101
|
r
|
preProcessiCOBRA.R
|
#!/bin/Rscript
############ path to directories ############
path <- "~/Documents/phd/data/simulation/study"
outPath <- paste0(path,"/summary")
system(paste0("mkdir -p ",outPath))
methods <- dir(file.path(path,"results"))[-c(4,6)] # exclude noise and GSE data set
files <- sub(paste0(methods[3],".txt"),"",list.files(file.path(path,"results",methods[3])))
############ writing ground truth file ############
## include category for fractioning
ids <- read.table(paste0(path,"/stats/SIM_IDs.txt"),stringsAsFactors=F)[3]
table <- cbind(ids,c(rep(1,1200),rep(0,nrow(ids)-1200)))
cat_time <- c(
rep(c(rep("early",100),rep("mid",100),rep("late",100)),2), rep(c(rep("early",50),rep("mid",50)),2), rep("mid",100), rep("grad",200), rep("mixed",100),rep("non-DEG",nrow(ids)-1200))
cat_type <- c(rep(c(rep("low",50),rep("high",50)),6), rep("slow",200),rep("fast",100), rep("grad",200), rep("slow",50),rep("fast",50),rep("non-DEG",nrow(ids)-1200))
truth <- cbind(table,cat_time,cat_type)
colnames(truth) <- c("feature","DEG","cat_time","cat_type")
write.table(truth,paste0(path,"/stats/SIM_truth.txt"),quote=F,row.name=F,sep="\t")
############ combine results for each kind of test ############
for (file in files)
{
## list of candidates identified by each method
results <- list()
for (type in methods)
{
comp <- paste0(path,"/results/",type,"/",file,type,".txt")
if (!file.exists(comp)) next
data <- cbind(read.table(comp,stringsAsFactors=F)[1],0.01)
colnames(data) <- c("feature",paste0(type,":adjP"))
results <- c(results,list(data))
names(results)[length(results)] <- type
}
outData <- Reduce(function(x,y) merge(x,y,by="feature",all=T),results)
## add Negativ IDs for complete graphs
N_IDs <- setdiff(truth$feature,outData$feature)
N <- cbind(N_IDs,data.frame((matrix(NA,nrow=length(N_IDs),ncol=ncol(outData)-1))))
colnames(N) <- colnames(outData)
outData <- rbind(outData,N)
outData[is.na(outData)] <- "1.00"
write.table(outData,paste0(outPath,"/",file,"results.txt"),quote=F,row.name=F,sep="\t")
}
|
51d888f2ac178c4532755e8137050cf0dfe4dc7a
|
0cc47aef60324272a954309ecc8cf08f2ad38813
|
/inst/doc/recommenderlab.R
|
4ef74a0269158b1b7d0f8a7423694aac1ee40f79
|
[] |
no_license
|
cran/recommenderlab
|
fff485dc29ebfe25ac47b90283ab4a5b6dcb9568
|
9c2d2f7f6d7e06094a2641c60a31e771c527668b
|
refs/heads/master
| 2023-06-28T00:47:49.557436
| 2023-06-20T21:00:02
| 2023-06-20T21:00:02
| 17,699,104
| 20
| 26
| null | 2018-05-22T16:33:16
| 2014-03-13T06:04:13
|
R
|
UTF-8
|
R
| false
| false
| 11,097
|
r
|
recommenderlab.R
|
### R code from vignette source 'recommenderlab.Rnw'
###################################################
### code chunk number 1: recommenderlab.Rnw:88-91
###################################################
options(scipen=3, digits=4, prompt="R> ", eps=FALSE, width=75)
### for sampling
set.seed(1234)
###################################################
### code chunk number 2: recommenderlab.Rnw:1107-1108
###################################################
library("recommenderlab")
###################################################
### code chunk number 3: recommenderlab.Rnw:1116-1121
###################################################
m <- matrix(sample(c(as.numeric(0:5), NA), 50,
replace=TRUE, prob=c(rep(.4/6,6),.6)), ncol=10,
dimnames=list(user=paste("u", 1:5, sep=''),
item=paste("i", 1:10, sep='')))
m
###################################################
### code chunk number 4: recommenderlab.Rnw:1129-1132
###################################################
r <- as(m, "realRatingMatrix")
r
getRatingMatrix(r)
###################################################
### code chunk number 5: recommenderlab.Rnw:1137-1138
###################################################
identical(as(r, "matrix"),m)
###################################################
### code chunk number 6: recommenderlab.Rnw:1145-1147
###################################################
as(r, "list")
head(as(r, "data.frame"))
###################################################
### code chunk number 7: recommenderlab.Rnw:1162-1165
###################################################
r_m <- normalize(r)
r_m
getRatingMatrix(r_m)
###################################################
### code chunk number 8: recommenderlab.Rnw:1169-1170
###################################################
denormalize(r_m)
###################################################
### code chunk number 9: recommenderlab.Rnw:1176-1178 (eval = FALSE)
###################################################
## image(r, main = "Raw Ratings")
## image(r_m, main = "Normalized Ratings")
###################################################
### code chunk number 10: image1
###################################################
print(image(r, main = "Raw Ratings"))
###################################################
### code chunk number 11: image2
###################################################
print(image(r_m, main = "Normalized Ratings"))
###################################################
### code chunk number 12: recommenderlab.Rnw:1211-1214
###################################################
r_b <- binarize(r, minRating=4)
r_b
as(r_b, "matrix")
###################################################
### code chunk number 13: recommenderlab.Rnw:1226-1228
###################################################
data(Jester5k)
Jester5k
###################################################
### code chunk number 14: recommenderlab.Rnw:1237-1240
###################################################
set.seed(1234)
r <- sample(Jester5k, 1000)
r
###################################################
### code chunk number 15: recommenderlab.Rnw:1247-1250
###################################################
rowCounts(r[1,])
as(r[1,], "list")
rowMeans(r[1,])
###################################################
### code chunk number 16: hist1
###################################################
hist(getRatings(r), breaks=100)
###################################################
### code chunk number 17: hist2
###################################################
hist(getRatings(normalize(r)), breaks=100)
###################################################
### code chunk number 18: hist3
###################################################
hist(getRatings(normalize(r, method="Z-score")), breaks=100)
###################################################
### code chunk number 19: hist4
###################################################
hist(rowCounts(r), breaks=50)
###################################################
### code chunk number 20: hist5
###################################################
hist(colMeans(r), breaks=20)
###################################################
### code chunk number 21: recommenderlab.Rnw:1339-1340
###################################################
recommenderRegistry$get_entries(dataType = "realRatingMatrix")
###################################################
### code chunk number 22: recommenderlab.Rnw:1348-1350
###################################################
r <- Recommender(Jester5k[1:1000], method = "POPULAR")
r
###################################################
### code chunk number 23: recommenderlab.Rnw:1354-1356
###################################################
names(getModel(r))
getModel(r)$topN
###################################################
### code chunk number 24: recommenderlab.Rnw:1371-1373
###################################################
recom <- predict(r, Jester5k[1001:1002], n=5)
recom
###################################################
### code chunk number 25: recommenderlab.Rnw:1378-1379
###################################################
as(recom, "list")
###################################################
### code chunk number 26: recommenderlab.Rnw:1385-1388
###################################################
recom3 <- bestN(recom, n = 3)
recom3
as(recom3, "list")
###################################################
### code chunk number 27: recommenderlab.Rnw:1396-1399
###################################################
recom <- predict(r, Jester5k[1001:1002], type="ratings")
recom
as(recom, "matrix")[,1:10]
###################################################
### code chunk number 28: recommenderlab.Rnw:1410-1413
###################################################
recom <- predict(r, Jester5k[1001:1002], type="ratingMatrix")
recom
as(recom, "matrix")[,1:10]
###################################################
### code chunk number 29: recommenderlab.Rnw:1428-1431
###################################################
e <- evaluationScheme(Jester5k[1:1000], method="split", train=0.9,
given=15, goodRating=5)
e
###################################################
### code chunk number 30: recommenderlab.Rnw:1437-1442
###################################################
r1 <- Recommender(getData(e, "train"), "UBCF")
r1
r2 <- Recommender(getData(e, "train"), "IBCF")
r2
###################################################
### code chunk number 31: recommenderlab.Rnw:1449-1453
###################################################
p1 <- predict(r1, getData(e, "known"), type="ratings")
p1
p2 <- predict(r2, getData(e, "known"), type="ratings")
p2
###################################################
### code chunk number 32: recommenderlab.Rnw:1459-1464
###################################################
error <- rbind(
UBCF = calcPredictionAccuracy(p1, getData(e, "unknown")),
IBCF = calcPredictionAccuracy(p2, getData(e, "unknown"))
)
error
###################################################
### code chunk number 33: recommenderlab.Rnw:1477-1480
###################################################
scheme <- evaluationScheme(Jester5k[1:1000], method="cross", k=4, given=3,
goodRating=5)
scheme
###################################################
### code chunk number 34: recommenderlab.Rnw:1487-1490
###################################################
results <- evaluate(scheme, method="POPULAR", type = "topNList",
n=c(1,3,5,10,15,20))
results
###################################################
### code chunk number 35: recommenderlab.Rnw:1501-1502
###################################################
getConfusionMatrix(results)[[1]]
###################################################
### code chunk number 36: recommenderlab.Rnw:1514-1515
###################################################
avg(results)
###################################################
### code chunk number 37: roc1
###################################################
plot(results, annotate=TRUE)
###################################################
### code chunk number 38: precrec1
###################################################
plot(results, "prec/rec", annotate=TRUE)
###################################################
### code chunk number 39: recommenderlab.Rnw:1562-1578
###################################################
set.seed(2016)
scheme <- evaluationScheme(Jester5k[1:1000], method="split", train = .9,
given=-5, goodRating=5)
scheme
algorithms <- list(
"random items" = list(name="RANDOM", param=NULL),
"popular items" = list(name="POPULAR", param=NULL),
"user-based CF" = list(name="UBCF", param=list(nn=50)),
"item-based CF" = list(name="IBCF", param=list(k=50)),
"SVD approximation" = list(name="SVD", param=list(k = 50))
)
## run algorithms
results <- evaluate(scheme, algorithms, type = "topNList",
n=c(1, 3, 5, 10, 15, 20))
###################################################
### code chunk number 40: recommenderlab.Rnw:1583-1584
###################################################
results
###################################################
### code chunk number 41: recommenderlab.Rnw:1590-1592
###################################################
names(results)
results[["user-based CF"]]
###################################################
### code chunk number 42: roc2
###################################################
plot(results, annotate=c(1,3), legend="bottomright")
###################################################
### code chunk number 43: precrec2
###################################################
plot(results, "prec/rec", annotate=3, legend="topleft")
###################################################
### code chunk number 44: recommenderlab.Rnw:1634-1636
###################################################
## run algorithms
results <- evaluate(scheme, algorithms, type = "ratings")
###################################################
### code chunk number 45: recommenderlab.Rnw:1641-1642
###################################################
results
###################################################
### code chunk number 46: real
###################################################
plot(results, ylim = c(0,100))
###################################################
### code chunk number 47: recommenderlab.Rnw:1664-1670
###################################################
Jester_binary <- binarize(Jester5k, minRating=5)
Jester_binary <- Jester_binary[rowCounts(Jester_binary)>20]
Jester_binary
scheme_binary <- evaluationScheme(Jester_binary[1:1000],
method="split", train=.9, k=1, given=3)
scheme_binary
###################################################
### code chunk number 48: recommenderlab.Rnw:1673-1675
###################################################
results_binary <- evaluate(scheme_binary, algorithms,
type = "topNList", n=c(1,3,5,10,15,20))
###################################################
### code chunk number 49: roc3
###################################################
plot(results_binary, annotate=c(1,3), legend="topright")
|
310460a48906d32a335a3aee27afa8a125a6d851
|
55544bfa2ef73067d9af918c0da41cec66ff368e
|
/R/globalstatsforgame.R
|
6732225a0957bcfd733f03360b0e34621bd74f5e
|
[] |
no_license
|
drewlake/steamR
|
c65fb79417fcb37394f54125dd7120e96ef83c63
|
29e31d537ed66d24e689788557a49dd1233e2ea9
|
refs/heads/master
| 2016-08-11T10:02:50.702583
| 2016-04-04T10:59:53
| 2016-04-04T10:59:53
| 45,043,830
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 586
|
r
|
globalstatsforgame.R
|
#' Global stats for a game
#'
#' Returns list for gloabal stats for a game
#' @export
#' @import data.table
#' @import rjson
#' @param gameid Steam game ID
#' @param stat name of achievent
glbstats <- function (gameid = "17740", stat = "global.map.emp_isle") {
json_file1 <-
"http://api.steampowered.com/ISteamUserStats/GetGlobalStatsForGame/v0001/?format=json&appid="
json_file2 <- "&count=1&name[0]="
json_file <-
paste(json_file1,gameid,json_file2,stat,sep = "")
list <-
suppressWarnings(fromJSON(paste(readLines(json_file), collapse = "")))
list
}
|
ad4746e559d413b3d4ecd068a3413d19c126249e
|
1f0025783a4e015ecb6d9b2a1a0451d948812240
|
/man/get_data_range.Rd
|
838589ee3e796c2bb591023e35f1e60ea7ad516e
|
[] |
no_license
|
dipterix/rutabaga
|
9965d33dc136da837af1e95067bf722efb3e3244
|
e47423870b34ca17a1736ce8d4fe146b2697de19
|
refs/heads/master
| 2022-07-08T00:19:08.625715
| 2022-06-22T23:03:12
| 2022-06-22T23:03:12
| 137,968,822
| 0
| 1
| null | 2021-09-05T23:07:15
| 2018-06-20T02:18:12
|
R
|
UTF-8
|
R
| false
| true
| 455
|
rd
|
get_data_range.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_helpers.R
\name{get_data_range}
\alias{get_data_range}
\title{Get Data Range From A Collection Of Named Lists
(questioning)}
\usage{
get_data_range(ll, name = "range", ...)
}
\arguments{
\item{ll}{list}
\item{name}{element name}
\item{...}{additional params for \link{get_list_elements}}
}
\description{
Get Data Range From A Collection Of Named Lists
(questioning)
}
|
cb756916125d626d516c25e5d70e1913064c32b7
|
8ef48830a3d4beef21187e60d8d79148df8a5e1c
|
/week_5/hw/speech.R
|
baf5d72dff63b8ac0ffe3403a02aeae66e2c1952
|
[] |
no_license
|
richlay/Rlanguage
|
cac987762a7d46e97366e89acce7b72735aa6c26
|
d9d0f1388a894c2f230392a014509ea937d95f93
|
refs/heads/master
| 2021-08-07T20:54:48.588550
| 2020-04-10T19:22:40
| 2020-04-10T19:22:40
| 148,768,162
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,422
|
r
|
speech.R
|
library(tm)
library(tmcn)
library(jiebaR)
library(stringr)
library("SnowballC")
library("wordcloud")
library("RColorBrewer")
library(Rwordseg)
cutter <- worker(stop_word = "停用詞.txt") #initialize jiebaR workers
#for (y in 91:107) {
# nam <- paste("speech", y, sep = "")
# assign(nam, readLines(paste(y, '.txt' , sep = ""), encoding = "UTF-8"))
#}
#docs <- cutter[speech107]
wordstop <- readLines("停用詞.txt", encoding= "UTF-8")
d.corpus <- Corpus(DirSource("txt",encoding = "UTF-8"), list(language = NA))
d.corpus <- tm_map(d.corpus, removePunctuation)
d.corpus <- tm_map(d.corpus[1:100], segmentCN, nature = TRUE)
d.corpus <- tm_map(d.corpus, function(sentence) {
noun <- lapply(sentence, function(w) {
w[names(w) == "n"]
})
unlist(noun)
})
d.corpus<-tm_map(d.corpus,removeWords,stopwords(wordstop))
d.corpus <- Corpus(VectorSource(d.corpus))
tdm = TermDocumentMatrix(d.corpus, control = list(wordLengths = c(2, Inf)))
inspect(tdm[1:10, 1:2])
sort(table(docs),decreasing = T)
tb<-table(docs)
library(plyr)
tableWord <- count(docs) #Equivalent to as.data.frame(table(x))
str(tableWord)
wordcloud(tableWord[,1],tableWord[,2],min.freq=3,random.order=F,rot.per = F,colors= rainbow(length(docs)))
作者:拿笔的小鑫
链接:https://www.jianshu.com/p/30460f38b774
來源:简书
简书著作权归作者所有,任何形式的转载都请联系作者获得授权并注明出处。
|
7f4fad061e6ba9e79ba912082e43b5e50121fca6
|
e4c6377ccbb4675ca75ded549de3a2e1cab721ed
|
/man/predict.fastVAR.VAR.Rd
|
b6f3a02e59827ace316bf7a9424f6fefac8016a9
|
[] |
no_license
|
skyu1226/fastVAR
|
090f71a54d7bc9b24f74b50fd818b18197030b00
|
a3f627267e916a3e2fd26189d8abc401f720be0d
|
refs/heads/master
| 2021-01-20T14:35:54.247635
| 2013-01-30T17:56:17
| 2013-01-30T17:56:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 602
|
rd
|
predict.fastVAR.VAR.Rd
|
\name{predict.fastVAR.VAR}
\alias{predict.fastVAR.VAR}
\title{VAR Predict}
\usage{
\method{predict}{fastVAR.VAR} (VAR, n.ahead, threshold,
...)
}
\arguments{
\item{VAR}{an object of class fastVAR.VAR returned from
VAR}
\item{n.ahead}{number of steps to predict}
\item{threshold}{threshold prediction values to be
greater than this value}
\item{...}{extra parameters to pass into the coefficients
method for objects of type fastVAR.VAR}
}
\description{
Predict n steps ahead from a fastVAR.VAR object
}
\examples{
data(Canada)
predict(VAR(Canada, p = 3, intercept = F), 1)
}
|
f63862dcc57da9cfce176888aa1b9964162f2500
|
0338a652966267cd47c13b74fce75d0d8b4dd335
|
/R/compile.R
|
6855cafee2ad0b9c618e4c342f8335ea02c960a6
|
[] |
no_license
|
duncantl/R2llvm
|
1fb3086b499b4d187491107923a9ff4eb064ef02
|
bbfe1b4eeadafbbee562fed97e177fde2471c303
|
refs/heads/master
| 2021-01-21T14:39:21.044144
| 2017-07-03T18:50:50
| 2017-07-03T18:50:50
| 95,319,119
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 48,588
|
r
|
compile.R
|
## compile.R - compile R functions
# Some of this was inspired by Tierney's compiler package.
MathOps = c("+", "-", "*", "/", "%/%", "^")
LogicOps = c("<", ">", "<=", ">=", "!=", "==", "!")
getBasicType =
function(call)
{
id = as.character(call[[1]])
switch(id,
integer = Int32Type,
string = ,
character = StringType,
numeric = DoubleType,
logical = Int1Type,
float = FloatType)
}
isPrimitiveConstructor =
function(call)
{
if(is.call(call) && as.character(call[[1]]) %in% c("integer", "string", "character", "numeric", "logical", "float"))
TRUE
else
FALSE
}
setGeneric("isSubsettingAssignment", function(call) standardGeneric("isSubsettingAssignment"))
tmp =
function(call) {
length(call) > 1 &&
is.call(tmp <- call[[2]]) &&
as.character(tmp[[1]]) %in% c("[", "[[")
}
setMethod("isSubsettingAssignment", "call", tmp)
setMethod("isSubsettingAssignment", "=", tmp)
setMethod("isSubsettingAssignment", "<-", tmp)
setMethod("isSubsettingAssignment", "Assign", function(call) FALSE)
tmp =
function(call)
{
length(call$args) > 0 &&
is(tmp <- call$args[[1]], "Call") &&
as.character(tmp$fn$name) %in% c("[", "[[")
}
setOldClass("Call")
setOldClass("Replacement")
setMethod("isSubsettingAssignment", "Call", tmp)
setMethod("isSubsettingAssignment", "Replacement", function(call) TRUE)
assignHandler = `compile.=` = # `compile.<-`
# Second version here so I don't mess the other one up.
#
# XXX This is now getting too long. Break it up and streamline.
#
function(call, env, ir, ..., .targetType = NULL, .useHandler = TRUE)
{
# if(.useHandler && !is.na(i <- match(as.character(call[[1]]), names(env$.compilerHandlers))))
# return(env$.compilerHandlers[[i]](call, env, ir, ...))
if(is(call, "Replacement"))
call = asRCall(call)
if(is(call, "Call"))
args = call$args
else if(is(call, "Assign"))
args = list(call$write, call$read)
else
args = as.list(call[-1]) # drop the = or <-
stringLiteral = FALSE
type = NULL
if(is(args[[2]], "Symbol")) {
# Experimenting with mapping an SSA name to its basename
# when we have never allocated a variable for the ssa name.
# See tests2/ assignSubset.R and also list.R.
#
if(args[[2]]$basename %in% names(env$.params))
args[[2]] = env$.params[[v$basename]]
}
#XXX may not need to do this but maybe can compile the RHS as usual.
if(isSubsettingAssignment(call) && is(ty <- getElementAssignmentContainerType(args[[1]], env), "STRSXPType"))
return(assignToSEXPElement(args[[1]], args[[2]], env, ir, type = ty))
#!!!!!XXX The Replacement object is different from the way R represents the assignment
# In R, x[1L] = 10 is represented as =(x[1L], 10) - so a call with 2 arguments.
# The Replacement class inherits from Cal and the function being called is [<- and
# There are 3 arguments to this call x, 1L, and 10
# In an expression such as x[1, 2] = foo(3, 4)
# we get 4 arguments x, 1, 2 and foo(3, 4).
# look at the RHS - is this the RHS?
rhs = args[[length(args)]]
if(isLiteral(rhs)) { #!! these are the args, not the call - so first element is not = or <-, but the LHS.
# Do we need to eval() this. If it is really literal, no.
tmp = val = if(is(rhs, "Literal")) rhs$value else eval(rhs)
ctx = getContext(env$.module)
lhs.type = getDataType(args[[1]], env)
# What is this doing?
# So not a character, and we don't know the lhs type or we know it isn't
if( !is.character(tmp) && (is.null(lhs.type) || !sameType(DoubleType, lhs.type)) && env$.integerLiterals && val == floor(val) )
tmp = val = as.integer(val)
if(!is.null(lhs.type))
type = lhs.type
else
type = getDataType(I(val), env)
#XXX What if this is an expression??
val = makeConstant(ir, val, type, ctx)
type = getDataType(val, env)
if(is.character(tmp))
stringLiteral = TRUE
} else if(FALSE && isPrimitiveConstructor(args[[2]])) { # what does skipping this break? any code where we have an integer() or whatever and use it as int *
# so this is probably just defining a variable.
# Use the type of the RHS to create the variable.
# Perhaps just change compile.call to handle these functions
# specially and return a val.
#
# Need to know if we need to create a SEXP or just the corresponding native type
# i.e. an INTSXP or an int [n]
type = getBasicType(args[[2]])
val = NULL
} else
val = compile(args[[2]], env, ir)
if(is(args[[1]], "Symbol"))
args[[1]] = as.name(args[[1]]$name)
if(is.name(args[[1]])) {
var = as.character(args[[1]])
#XXXX Temp to check something
# We don't search parameters for the var name, since we don't
# want to try to assign over a parameter name.
#XXX I think we do want to mimic that behaviour but understand which local variable that corresponds to.
ref <- getVariable(var, env, ir, load = FALSE, search.params = FALSE)
if(is.null(ref)) {
# No existing variable; detect type and create one.
if(is.null(type))
type = env$.localVarTypes[[var]]
if(is.null(type))
type = env$.types[[var]]
if(is.null(type))
type = getDataType(var, env)
# didn't get a type from the variable, so look at the RHS.
if(is.null(type))
type = getDataType(val, env, args[[2]])
if (is.null(type)) {
# Variable not found in env or global environments; get type via Rllvm
if (is(val, "StoreInst")) {
# This is from the val = compile(); probably from a
# statement like: y <- 4L; x <- y. When args[[2]] is
# compiled above, getVariable returns an object of class
# StoreInst. We ignore the current val, and instead query
# the type from the variable.
type = getDataType(args[[2]], env)
}
if(is(val, "Value"))
type = getDataType(val, env)
}
#XXXX Merge with compile.character
if(stringLiteral) { # isStringType(type))
gvar = createGlobalVariable(sprintf(".%s", var), env$.module, type, val, TRUE, PrivateLinkage)
val = getGetElementPtr(gvar, ctx = ctx)
type = StringType
}
ref <- createFunctionVariable(type, var, env, ir)
env$newAlloc(var, ref)
#XXX assign(var, ref, envir = env)
## Todo fix type ???
if(!(var %in% names(env$.types)))
env$.types[[var]] = type
}
} else {
expr = args[[1]]
# XXX have to be a lot more general here, but okay to be simple for now (Apr 26 2013).
if(is(ty <- getElementAssignmentContainerType(expr, env), "SEXPType")
&& is.null(expr <- assignToSEXPElement(expr, val, env, ir, ty)))
return(val)
#XXXX What does removing load = FALSE affect. Find examples of where this breaks matters.
# fuseLoops?
ref = compile(expr, env, ir, ..., load = FALSE)
}
if(!is.null(val)) {
if(all(sapply(args, is.name))) {
# Temporary hack for dealing with ._return_1 = .return_1 in rw2d.R
tmp = env$.types[ sapply(args, as.character) ]
if(!sameType(tmp[[1]], tmp[[2]]))
val = createCast(env, ir, getElementType(type[[1]]), tmp[[2]], val)
else if(is(val, "AllocaInst"))
val = ir$createLoad(val)
} else if(!sameType(getType(val), getElementType(getType(ref)))) {
#XXX
#cat("fix this cast\n")
# val = Rllvm::createCast(ir, "SIToFP", val, getElementType(getType(ref)))
to = getElementType(getType(ref))
from = getType(val)
val = createCast(env, ir, to, from, val)
}
store = ir$createStore(val, ref)
if(!is.null(tmp <- attr(val, "zeroBasedCounting"))) {
attr(ref, "zeroBasedCounting") = tmp
if(is.name(call[[2]]))
env$.zeroBased[as.character(args[[1]])] = TRUE
}
}
# Probably don't want to set this anymore, or certainly not for x[1] but only when is.name(args[[1]])
setVarType(env, args[[1]], val)
val # return value - I (Vince) changed this to val from ans (the createStore() return).
# There seems to be very little we can do with the object of class
# StoreInst. Note: this seems to be the way it's done here
# too: http://llvm.org/docs/tutorial/LangImpl7.html
}
setVarType =
#
# This adds the type to
#
function(env, lhs, rhs, meta = FALSE)
{
if(is.name(lhs)) {
lhs = as.character(lhs)
ty = getType(rhs)
if(!is.null(tmp <- attr(rhs, "RType")))
ty = get(tmp, globalenv(), inherits = TRUE)
env$.localVarTypes[[ lhs ]] = ty
if(meta)
setMetadata(env$.module, lhs, class(ty))
}
}
getElementAssignmentContainerType =
#
# called from just above for x[.....]
#
function(call, env)
{
var = if(is.name(call))
call
else {
if(is(call, "Call"))
call$args[[1]] # [[2]]
else
call[[2]]
}
getDataType(var, env)
}
createFunctionVariable =
#
# create a local variable, but put it in the entry block of the function
# rather than in the current block. The idea is that we don't
# want to allocate variables in a loop and so end up repeating that instruction.
#
function(type, id, env, ir)
{
cur = getInsertBlock(ir)
setInsertPoint(ir, env$.entryBlock)
on.exit(setInsertPoint(ir, cur))
var = createLocalVariable(ir, type, id, TRUE) # to insert before terminator.
env$newAlloc(id, var)
var
}
compile <-
function(e, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL, .useHandler = TRUE)
{
if(is(e, "RC++Reference")) # for already compiled objects, i.e. Value.
return(e)
if(.useHandler && is.call(e)) {
tmp = dispatchCompilerHandlers(e, env$.compilerHandlers, env, ir, ...)
if(!is.null(tmp))
return(tmp)
}
# This doesn't always seem to dispatch on <-, i.e. in the code generated by rewriteSApply(). That is just a call.
if(is.call(e) && as.character(e[[1]]) %in% c("<-", "=", "<<-"))
`compile.=`(e, env, ir, ...)
else
UseMethod("compile")
}
compile.character =
# See varargs.R which is a call to printf() with a constant format
function(e, env, ir, ..., .targetType = NULL)
{
ctxt = getContext(env$.module)
ty = arrayType(Int8Type, nchar(e))
strVar = createGlobalVariable(".tmpString", env$.module, val = e, constant = TRUE, linkage = PrivateLinkage)
return(getGetElementPtr(strVar, ctx = ctxt))
ptrVar = createFunctionVariable(StringType, ".tmpStringPtr", env, ir)
setInitializer(ptrVar, strVar)
createLoad(ir, ptrVar)
}
`compile.{` = compileExpressions =
#
# This compiles a group of expressions.
# It handles moving from block to block with a block for
# each expression. <Is this still true? or is it more sophisticated about blocks now?>
function(exprs, env, ir, fun = env$.fun, name = getName(fun), .targetType = NULL, ..., afterBlock = NULL, nextBlock = NULL)
{
#insertReturn(exprs)
given.afterBlock = !missing(afterBlock)
if(as.character(exprs[[1]]) != "{")
compile(exprs, env, ir, fun = fun, name = name)
else {
oldVals = env$.remainingExpressions
on.exit(env$.remainingExpressions <- oldVals)
exprs = exprs[-1]
idx = seq_along(exprs)
for (i in idx) {
cur = ir$getInsertBlock()
if(length(getTerminator(cur)))
break
env$.remainingExpressions = exprs[ - (1:i) ]
pop = FALSE
if(is.call(exprs[[i]]) && (is(exprs[[i]], "if") || is(exprs[[i]], "for") || is(exprs[[i]], "while")) ) {
if(i < length(idx)) # length(afterBlock) == 0 &&
afterBlock = if(length(afterBlock)) afterBlock else Block(env$.fun, sprintf("after.%s", deparse(exprs[[i]])))
else {
afterBlock = nextBlock
if(is(exprs[[i]], "if")) {
# THIS SEEMS ugly. It handles the case of a while() { } where the last expression in the {}
# is an if() expr with no else.
# We need to return to the while() condition, not jump to nextBlock.
if(length(env$.loopStack) && env$.loopStack[1] == "while") {
tmp = list(...)$nextIterBlock
if(!is.null(tmp))
afterBlock = tmp
else
stop("probably something wrong!!!")
}
}
}
pop = TRUE
#pushNextBlock(env, afterBlock)
}
compile(exprs[[i]], env, ir, fun = fun, name = name, nextBlock = afterBlock, ...)
if(pop) {
# Do we setInsertBlock() for this next block?
#popNextBlock(env) # popping the wrong thing!
b = afterBlock
afterBlock = NULL
if(!is.null(b))
setInsertBlock(ir, b)
}
# # One approach to handling the lack of an explicit return is to
# # create the return instruction ourselves, or to add a return
# # around the call before we compile. The advantage of the latter
# # is that any code generation that we write to ensure the correct
# # return type on the expressions e.g. return(x + 1) will do the correct
# # thing on the x + 1 part, not later converting the value.
# if(i == idx[length(idx)] && !is.call(exprs[[i]]) || exprs[[i]][[1]] != as.name('return')) { # last one
# ir$createReturn(val)
# } else
# val
}
}
}
compile.name <-
function(e, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL)
{
getVariable(e, env, ir, searchR = TRUE, load = TRUE, ...)
}
compile.integer <-
function(e, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL)
{
if(length(e) == 1)
createIntegerConstant(e, type = .targetType)
else
stop("not compiling integer vector (multiple values) for now")
}
compile.logical <-
function(e, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL)
{
# compile(as(e, "integer"), env, ir, ..., fun = fun, name = name)
createLogicalConstant(e)
}
compile.numeric <-
function(e, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL)
{
if(length(e) == 1) {
if(length(.targetType))
createConstant(val = e, type = .targetType)
else
createDoubleConstant(e)
}
else
stop("not compiling numeric vector for now")
}
compile.Value <-
# This is just an LLVM value
function(e, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL)
e
if(FALSE) {
compile.ASTNode =
function(e, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL)
construct_ir(e, env, ir, env$.types)
}
compile.default <-
function(e, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL)
{
if(is(e, "("))
return(compile(e[[2]], env, ir, .targetType = .targetType))
if(is(e, "Value") || is(e, "Instruction"))
return(e)
if (is.call(e)) {
dispatchCompilerHandlers(e, env$.compilerHandlers, env, ir, ...)
} else if (is.symbol(e)) {
var <- as.character(e)
return(var) ## TODO: lookup here, or in OP function?
} else if(is.character(e))
return(compile.character(e, env, ir, ...))
else
stop("can't compile objects of class ", class(e))
}
dispatchCompilerHandlers =
#XXX Dispatch across lists.
function(e, handlers, env, ir, ...)
{
# Recursively compile arguments
call.op <- findCall(e[[1]], env$.compilerHandlers)
if(is.null(call.op))
return(NULL)
if(is.list(call.op)) {
for(f in call.op) {
tmp = f(e, env, ir, ...)
if(!is.null(tmp))
return(tmp)
}
} else {
if (typeof(call.op) != "closure" && is.na(call.op))
call.op = findCall("call", env$.compilerHandlers)
if(!is.list(call.op) && !is.function(call.op) && is.na(call.op))
return(NULL)
# XXX Dispatch across list here if we get back a list.
call.op(e, env, ir, ...)
}
}
findGlobals=
function(fun, merge = FALSE, ignoreDefaultArgs = TRUE)
{
ans = codetools::findGlobals(fun, merge)
if(!merge && ignoreDefaultArgs) {
formals(fun) = lapply(formals(fun), function(x) NULL)
ans$functions = codetools::findGlobals(fun, FALSE)$functions
}
ans
}
addArgSEXPTypeMetadata =
function(className, id, module)
{
name = sprintf("%s.SEXPType", id)
setMetadata(module, name, list("SEXPType", className))
}
addSEXPTypeMetadata =
function(module, argTypes)
{
k = sapply(argTypes, class)
i = (k != "SEXPType")
if(any(i))
mapply(addArgSEXPTypeMetadata, k[i], names(argTypes)[i], MoreArgs = list(module = module))
any(i)
}
compileFunction <-
function(fun,
cfg = to_cfg(fun),
types = infer_types(cfg),
returnType = return_type(types),
module = Module(name),
name = NULL,
compiler = makeCompileEnv(),
NAs = FALSE,
asFunction = FALSE, asList = FALSE,
optimize = TRUE, ...,
.functionInfo = list(...),
.routineInfo = list(),
.compilerHandlers = getCompilerHandlers(),
.globals = getGlobals(if(isClosure) fun else to_r(fun),
names(.CallableRFunctions),
.ignoreDefaultArgs, .assert = .assert, .debug = .debug), # would like to avoid processing default arguments.
# findGlobals(fun, merge = FALSE, .ignoreDefaultArgs),
.insertReturn = !identical(returnType, VoidType),
.builtInRoutines = getBuiltInRoutines(),
.constants = getConstants(),
.vectorize = character(), .execEngine = NULL,
structInfo = list(),
.ignoreDefaultArgs = TRUE,
.useFloat = FALSE,
.zeroBased = TRUE,
.localVarTypes = list(),
.fixIfAssign = TRUE,
.CallableRFunctions = list(),
.RGlobalVariables = character(),
.debug = TRUE, .assert = TRUE,
.addSymbolMetaData = TRUE,
.readOnly = constInputs(if(is(fun, "ASTNode")) eval(to_r(fun)) else fun),
.integerLiterals = TRUE,
.loadExternalRoutines = TRUE,
.rewriteAST = missing(cfg)
) # .duplicateParams = TRUE
{
if(missing(name))
name = deparse(substitute(fun))
if(is.logical(.assert))
.assert = if(.assert) ".assert" else character()
#this probably goes
if(!missing(types) && !is.list(types))
types = structure(list(types), names = names(formals(fun))[1])
if(is.logical(.execEngine) && .execEngine)
.execEngine = ExecutionEngine(module)
if(!missing(fun) && .fixIfAssign)
fun = fixIfAssign(fun)
isClosure <- typeof(fun) == "closure"
if (isClosure || is(fun, "Function")) {
# In the case tht rewriteAST changes the computational nature of the code,
# we compute the types from the original code. We can then augment the resulting
# types computed from the rewritten AST and CFG with any from the types computed here that are missing from the
# rewrites.
pre.types = infer_types(fun, error = FALSE)
if(.rewriteAST && missing(cfg)) {
# What if person specified the cfg?
if(is(fun, "ASTNode"))
ast = fun
else
ast = to_ast(fun)
rewriteAST(ast)
cfg = to_cfg(ast)
}
# Should this be before .rewriteAST?
if(missing(cfg) && .insertReturn && isClosure)
fun = insertReturn(fun) # do we need env??
# Doing this here because we need to insert the returns before the CFG and types.
#if there is no type information but the author put the type information on the function itself, use that.
.typeInfo = attr(fun, "llvmTypes")
if( (missing(returnType) || missing(types)) && !is.null(.typeInfo)) {
if(missing(types))
types = .typeInfo$parms
if(missing(returnType))
returnType = .typeInfo$returnType
}
# for checking against types; TODO
args <- if(isClosure) formals(fun) else fun$params
if(length(types) == 0 && length(args) > 0) {
types = getTypeInfo(fun)
returnType = types[[1]]
types = types[[2]]
}
# Merge pre.types with types
# We probably just want the parameters from pre.types.
ptypes = pre.types[names(args)]
a = intersect(names(pre.types), names(args))
m = !(a %in% names(types))
if(any(m))
types[ a[m] ] = pre.types[a[m]]
if(length(args) > length(types))
stop("need to specify the types for all of the arguments for the ", name, " function")
types = lapply(types, translate_type)
if(is(returnType, "typesys::list|Type"))
returnType = translate_type(returnType)
#XX Revisit when the switch to the new types is working
if(FALSE) {
# See if we have some SEXP types for which we may need to know the length.
# This might go as we can call Rf_length(). nrow()
rVecTypes = sapply(types, isRVectorType)
if(any(rVecTypes)) {
lengthVars = sprintf("_%s_length", names(types)[rVecTypes])
types[lengthVars] = replicate(length(lengthVars), Int32Type)
} else
lengthVars = character()
}
# Grab types, including return. Set up Function, block, and params.
isDimensionedType = sapply(types, is, "DimensionedType")
# The dimTypes need to have names or we need to be able to map them back to the particular arguments. They do!
if(any(isDimensionedType)) {
dimTypes = types[isDimensionedType]
types[isDimensionedType] = replicate(sum(isDimensionedType), SEXPType) #XXXX Not necessarily a SEXPType anymore.
} else
dimTypes = list()
# Create the LLVM Function.
# not all the types, just the parameter types and translate them from .
#??? Probably best to convert all the types to Rllvm types now and also to change
# the names of the parameters to SSA form now. Could change the type and CFG to use the parameter names for
# first reference to parameter rather than appending _1
argTypes <- getFunParamTypes(types, names(args))
llvm.fun <- Function(name, returnType, argTypes, module)
if(any( i <- sapply(argTypes, is, "SEXPType")))
addSEXPTypeMetadata(module, argTypes[i])
# if we picked up any .R() expressions in the function, add the resulting types
# to the .CallableRFunctions.
if(length(.globals$skippedExpressions) &&
(i <- names(.globals$skippedExpressions) == ".R")) {
z = structure(lapply(.globals$skippedExpressions[i],
function(x)
eval(x[[3]])),
names = sapply(.globals$skippedExpressions[i], function(x) as.character(x[[2]][[1]]))) # have to deal with obj$f() in x[[2]][1]]
.CallableRFunctions = c(.CallableRFunctions, z)
}
if(any(.globals$functions %in% names(.builtInRoutines))) {
i = match(.globals$functions, names(.builtInRoutines), 0)
.routineInfo = .builtInRoutines[ i ]
.globals$functions = .globals$functions[i == 0]
}
if(length(.CallableRFunctions)) {
i = match(names(.CallableRFunctions), .globals$functions, 0)
.globals$functions = .globals$functions[ i == 0]
}
if(name %in% .globals$functions && !(name %in% names(.functionInfo)))
.functionInfo[[name]] = list(returnType = returnType, params = types)
cfg.blocks = cfg$blocks[ rev(rstatic::postorder(cfg)) ]
# names(cfg.blocks)[1] = "entry"
blocks = lapply(names(cfg.blocks), function(i) Block(llvm.fun, i))
names(blocks) = names(cfg.blocks)
params <- getParameters(llvm.fun) # TODO need to load these into nenv
# probably don't need this later but we do set it in the nenv for now.
block = blocks[[1]]
ir <- IRBuilder(block)
#XXX temporary to see if we should declare and load these individually when we encounter them
# Really need the user to specify the DLL not just the name in case of ambiguities, so often easier to do this separately.
if(.loadExternalRoutines && length(.routineInfo))
processExternalRoutines(module, .funcs = .routineInfo, .addMetaData = .addSymbolMetaData)
if(length(.globals$functions))
compileCalledFuncs(.globals, module, .functionInfo, optimize = FALSE)
if(length(.globals$variables)) {
.globals$variables = setdiff(.globals$variables, c(ExcludeGlobalVariables, .RGlobalVariables))
i = .globals$variables %in% names(module)
if(any(i)) {
#XXX should check that they are actual variables and not functions.
.globals$variables = .globals$variables[!i]
}
#XXX nenv & ir are not yet defined. What do we want here?
compileGlobalVariables(.globals$variables, module, nenv, ir)
}
compiler = compiler(.compilerHandlers, NAs, .builtInRoutines, .functionInfo, structInfo, .zeroBased,
.integerLiterals, .useFloat, .debug, .assert, .addSymbolMetaData, .CallableRFunctions,
compiler = compiler)
nenv = compiler
nenv$.fun = llvm.fun
nenv$.params = params
nenv$.types = types
nenv$.returnType = returnType
nenv$.entryBlock = block
nenv$.funName = name # name of the routine being compiled.
nenv$.ExecEngine = .execEngine
nenv$.module = module
nenv$.localVarTypes = .localVarTypes
nenv$.Constants = .constants
nenv$.dimensionedTypes = dimTypes
nenv$blocks = blocks
nenv$cfg.blocks = cfg.blocks
nenv$.cfg = cfg
# Doing gymnastics that should be done in rstatic. XXX Remove later
phiVarNames = unlist(findPhiAssignVarNames(cfg))
nenv$.phiVarInstructions = structure(vector("list", length(phiVarNames)), names = phiVarNames)
if(isClosure) fbody <- body(fun)
if(FALSE) {
# Will insertReturn fix this?
last = fbody[[length(fbody)]]
if(sameType(VoidType, returnType) && is.call(last) && as.character(last[[1]]) == "if" && length(last) == 3)
fbody[[ length(fbody) + 1L ]] = quote(return( ))
}
nenv$.Rfun = fun
if(length(.readOnly)) {
k = .readOnly
# mayMutate = setdiff(names(formals(fun)), k)
if(length(k)) {
idx = match(k, names(argTypes))
if(any(is.na(idx)))
stop("mismatch in parameter names and types")
mapply(function(type, arg) {
if(isPointerType(type))
setParamAttributes(arg, LLVMAttributes["ReadOnly"]) # 28L, TRUE)
}, argTypes[k], getFunctionArgs(llvm.fun)[idx])
}
}
### here we are going to work on the blocks in the call graph and use a different approach.
lapply(cfg.blocks, compileCFGBlock, types, nenv, ir, llvm.fun, blocks)
# Fix the phiForwardRefs. May need to find the nodes
# in additional places. Make separate function.
w = ( names(nenv$.phiForwardRefs) %in% names(params))
if(any(w))
nenv$.phiVarInstructions[ names(nenv$.phiForwardRefs)[w] ] = params[names(nenv$.phiForwardRefs)[w]]
actualNodes = nenv$.phiVarInstructions[names(nenv$.phiForwardRefs)]
if(any(sapply(actualNodes, is.null)))
stop("Problem with phi node forward references")
mapply(replaceAllUsesWith,
nenv$.phiForwardRefs,
actualNodes)
#GONE compileExpressions(fbody, nenv, ir, llvm.fun, name)
# the second condition occurs when we have an if() with no else as the last expression
# in the function. We add a return() so the compiler handlers work, and then we don't
# add the return here.
# Could also check for a terminator with: getTerminator(getInsertBlock(ir))
if(FALSE) {
# Shouldn't need with CFG
if(identical(returnType, VoidType) && !identical(fbody[[length(fbody)]], quote(return())))
ir$createReturn()
}
if(length(nenv$.SetCallFuns)) {
# This is for the callbacks to R. We have to get the expressions for the callback to the module.
# We have a way to set them, another way to create them (although this doesn't handle complex arguments)
# We might eliminate lots of these and just go with serializing the expression and deserializing
# when it is needed.
lapply(nenv$.SetCallFuns,
function(x)
compileSetCall(x$var, x$name, module))
lapply(nenv$.SetCallFuns,
function(x)
compileCreateCallRoutine(nenv, ir, x$call, sprintf("create_%s", x$var), x$var))
if(!is.null(nenv$.ExecEngine)) # don't use .ee as this field in the compiler(env) may have been set as a side effect of compile()
lapply(nenv$.SetCallFuns,
function(x) {
.llvm( module[[x$name]], x$call, .ee = nenv$.ExecEngine)
})
lapply(nenv$.SetCallFuns,
function(x)
createDeserializeCall(nenv, ir, x$call, x$deserializeCallFun))
}
## This may ungracefully cause R to exit, but it's still
## preferably to the crash Optimize() creates on an unverified module
if(optimize && verifyModule(module))
Optimize(module, execEngine = .execEngine)
if(asFunction)
makeFunction(fun, llvm.fun, .vectorize = .vectorize, .execEngine = .execEngine, .lengthVars = lengthVars)
else if (asList)
list(mod = module, fun = llvm.fun, env = nenv)
else
llvm.fun
} else if(!isIntrinsic(name))
stop("compileFunction can currently only handle closures. Failing on ", name)
}
compilerStartFunction =
function(env, ir, name, retType, paramTypes = list())
{
if(name %in% names(env$.module))
f = env$.module[[name]]
else
f = Function(name, retType, paramTypes, module = env$.module)
b = Block(f, "createCallEntry")
ir$setInsertBlock(b)
env$.localVarTypes = list()
env$.returnType = if(missing(retType)) getReturnType(f) else retType # want the user to specify this to be able to distinguish different classes of SEXP types.
env$.fun = f
env$.entryBlock = b # vital to set this so that the local variables go into this block.
# otherwise go into the entry block of the original function being compiled
# Need to generalize, e.g. add a method to the compiler to create a new Function
TRUE
}
Rf_routines = c("length")
RewrittenRoutineNames = c("numeric", "integer", "logical", "character", "list", "double")
mapRoutineName =
function(name)
{
w = name %in% Rf_routines
name[w] = sprintf("Rf_%s", name[w])
name
}
isRVectorType =
# This is transient and intended to identify if the
# type corresponds to an R vector, rather than an R scalar.
# For now this is an arrayType(, 0), i.e. with zero elements
function(type)
{
is(type, "ArrayType") && getNumElements(type) == 0
}
processExternalRoutines =
function(mod, ..., .funcs = list(...), .lookup = TRUE, .addMetaData = TRUE)
{
names(.funcs) = mapRoutineName(names(.funcs))
w = !duplicated(names(.funcs))
.funcs = .funcs[w]
.funcs = .funcs[setdiff(names(.funcs) , RewrittenRoutineNames)]
ans = mapply(declareFunction, .funcs, names(.funcs), MoreArgs = list(mod))
if(.lookup) {
syms = lapply(names(.funcs),
function(x) {
info = tryCatch(getNativeSymbolInfo(x),
error = function(e)
as(x, "NativeSymbol"))
if(.addMetaData && is(info, "NativeSymbolInfo")) {
pkg = info$package
# do we need the path? yes if it is not part of a package.
# Some of these will be "wrong", i.e. too specific
# e.g. finding printf in RLLVMCompile since libc is linked to RLLVMCompile.so.
# But that is R's problem, i.e. should be fixed there or we should specify
# where it is in the registration information in this package for known
# external routines
setMetadata(mod, sprintf("symbolInfo.%s", info$name),
list("package", pkg[["name"]], "path", pkg[["path"]]))
}
if(is(info, "NativeSymbolInfo"))
info$address
else
info
})
llvmAddSymbol(.syms = structure( syms, names = names(.funcs)))
}
ans
}
getSymbolInfoMetadata =
function(module, id = character())
{
if(length(id) == 0) {
# get all the metadata
# get the names of all metadata, find those named symbolInfo\\..* and then call this function in an lapply()
all = getMetadata(module)
i = grepl("^symbolInfo\\.", names(all))
return(lapply(all[i], function(node) getSymbolInfoMetadata(module, node)))
}
if(length(id) > 1) {
ans = lapply(id, function(x) getSymbolInfoMetadata(module, x))
if(is.character(id))
names(ans) = i
return(ans)
}
md = if(is.character(id))
md = getMetadata(module, sprintf("symbolInfo.%s", id))
else
id
if(is.null(md))
stop("no symbolInfo metadata for ", id)
a = md[[1]]
vals = names(a[])
i = seq(1, length(vals)-1, by = 2)
structure(vals[i+1], names = vals[i])
}
getConstants =
function(..., .defaults = ConstantInfo)
{
vals = list(...)
.defaults[names(vals)] = vals
.defaults
}
getBuiltInRoutines =
#
# See FunctionTypeInfo also
#
function(..., env = NULL, useFloat = FALSE)
{
if(!is.null(env) && exists(".builtInRoutines", env))
return(get(".builtInRoutines", env))
SEXPType = getSEXPType()
basic = if(useFloat)
list(exp = list(FloatType, FloatType),
log = list(FloatType, FloatType),
pow = list(FloatType, FloatType, FloatType),
sqrt = list(FloatType, FloatType))
else
list(exp = list(DoubleType, DoubleType),
log = list(DoubleType, DoubleType),
pow = list(DoubleType, DoubleType, DoubleType),
sqrt = list(DoubleType, DoubleType))
ans = list(
Rf_runif = list(DoubleType, DoubleType, DoubleType),
length = list(Int32Type, getSEXPType("REAL")),
Rf_length = list(Int32Type, getSEXPType("REAL")), # same as length. Should rewrite name length to Rf_length.
INTEGER = list(Int32PtrType, getSEXPType("INT")),
REAL = list(DoublePtrType, getSEXPType("REAL")),
Rf_allocVector = list(SEXPType, Int32Type, Int32Type), #XXXX 64 or 32 type depends on platform.
Rf_protect = list(VoidType, SEXPType),
Rf_unprotect = list(VoidType, Int32Type),
Rf_unprotect_ptr = list(VoidType, SEXPType),
R_PreserveObject = list(VoidType, SEXPType),
R_ReleaseObject = list(VoidType, SEXPType),
Rf_mkChar = list(getSEXPType("CHAR"), StringType),
Rf_PrintValue = list(VoidType, SEXPType),
STRING_ELT = list(getSEXPType("CHAR"), getSEXPType("STR"), Int32Type), # long vectors?
SET_STRING_ELT = list(SEXPType, getSEXPType("STR"), Int32Type, getSEXPType("CHAR")), # XXX may need different type for the index for long vector support.
SET_VECTOR_ELT = list(SEXPType, getSEXPType("VEC"), Int32Type, SEXPType), # XXX may need different type for the index for long vector support.
# R_SET_VECTOR_ELT = list(SEXPType, getSEXPType("VEC"), Int32Type, SEXPType), # XXX may need different type for the index for long vector support.
VECTOR_ELT = list(SEXPType, getSEXPType("VEC"), Int32Type),
# R_VECTOR_ELT = list(SEXPType, getSEXPType("VEC"), Int32Type),
SETCAR = list(SEXPType, SEXPType, SEXPType),
SETCDR = list(SEXPType, SEXPType, SEXPType),
SET_TAG = list(VoidType, SEXPType, SEXPType),
Rf_install = list(SEXPType, StringType),
CDR = list(SEXPType, SEXPType),
Rf_nrows = list(Int32Type, SEXPType),
Rf_ncols = list(Int32Type, SEXPType),
numeric = list(REALSXPType, Int32Type),
integer = list(INTSXPType, Int32Type),
logical = list(LGLSXPType, Int32Type),
character = list(LGLSXPType, Int32Type),
Rf_ScalarInteger = list(SEXPType, Int32Type),
Rf_ScalarReal = list(SEXPType, DoubleType),
Rf_ScalarLogical = list(SEXPType, Int32Type),
Rf_mkString = list(SEXPType, StringType),
Rprintf = list(VoidType, StringType, "..." = TRUE),
printf = list(Int32Type, StringType, "..." = TRUE),
Rf_eval = list(SEXPType, SEXPType, SEXPType),
Rf_asInteger = list(Int32Type, SEXPType),
#XXX the following are not correct and need some thinking.
nrow = list(Int32Type, c("matrix", "data.frame")),
ncol = list(Int32Type, c("matrix", "data.frame")),
dim = list(quote(matrix(Int32Type, 2)), c("matrix", "data.frame")),
strdup = list(StringType, StringType),
R_CHAR = list(StringType, SEXPType),
R_loadRObjectFromString = list(SEXPType, StringType),
Rf_error = list(VoidType, StringType, "..." = TRUE),
R_raiseStructuredError = list(VoidType, StringType, pointerType(StringType), Int32Type),
R_va_raiseStructuredError = list(VoidType, StringType, Int32Type, "..." = TRUE) ,
memcpy = list(pointerType(VoidType), pointerType(VoidType), pointerType(VoidType), Int32Type),
strcmp = list(Int32Type, StringType, StringType),
puts = list(Int32Type, StringType),
printf = list(Int32Type, StringType, "..." = TRUE)
)
ans[names(basic)] = basic
others = list(...)
ans[names(others)] = others
ans
}
# Should this just be names of CompilerHandlers? No, need more than those.
# Although we could add these items to CompilerHandlers and have them map
# to the existing handlers, e.g., sapply = ...
# But this is not a good idea. printf is just a regular call.
ExcludeCompileFuncs = c("{", "sqrt", "return", MathOps,
LogicOps, "||", "&&", # add more here &, |
":", "=", "<-", "<<-", "[<-", '[', "[[", "for", "if", "while",
"repeat", "(", "!", "^", "$", "$<-",
"sapply", "lapply",
"printf",
"break", "next",
".R", ".typeInfo", ".signature", ".varDecl", ".pragma",
".assert", ".debug",
"stop", "warning",
"logical", "integer", "numeric", "list",
"mkList"
) # for now
compileCalledFuncs =
#
# The .functionInfo
#
function(globalInfo, mod, .functionInfo = list(), ...)
{
funs = setdiff(globalInfo$functions, ExcludeCompileFuncs)
# Skip the ones we already have in the module.
# Possibly have different types!
funs = funs[!(funs %in% names(getModuleFunctions(mod))) ]
funs = funs[!(sapply(funs, isIntrinsic))]
funs = structure(lapply(funs, get), names = funs)
lapply(names(funs),
function(id) {
if(id %in% names(.functionInfo)) {
types = .functionInfo[[id]]
compileFunction(funs[[id]],
types$returnType,
types = types$params,
module = mod, name = id,
...
)
} else
compileFunction(funs[[id]], module = mod, name = id)
})
}
makeFunction =
function(fun, compiledFun, .vectorize = character(), .execEngine = NULL, .lengthVars = character())
{
e = new.env()
e$.fun = compiledFun
e$.irCode = showModule(compiledFun, TRUE)
if(is.null(.execEngine))
.execEngine = ExecutionEngine(as(compiledFun, "Module")) # evaluate this now or quote it.
args = c(as.name('.fun'), lapply(names(formals(fun)), as.name), as.name("..."))
k = call('.llvm')
k[2:(length(args) + 1)] = args
if(length(.lengthVars))
k[.lengthVars] = lapply(gsub("_(.*)_length", "\\1", .lengthVars),
function(id)
substitute(length(x), list(x = as.name(id))))
k[[".ee"]] = as.name('.ee')
formals(fun)$.ee = .execEngine
formals(fun) = c(formals(fun), formals(function(...){}))
body(fun) = k
environment(fun) = e
fun
}
isMutableRObject =
function(var)
{
where = sapply(var, function(x) find(x)[1])
if(any(is.na(where)))
stop("Cannot find variable ", if(sum(is.na(where) > 1)) "s", " ", paste(var[is.na(where)], collapse = ", "))
var %in% ".GlobalEnv"
}
compileGlobalVariables =
function(varNames, mod, env, ir,
mutable = sapply(varNames, isMutableRObject))
{
ctx = getGlobalContext()
sapply(varNames[!mutable],
function(var) {
val = createConstant(ir, get(var), context = ctx)
createGlobalVariable(var, val = val, mod, constant = TRUE)
})
#
#XX create variables for the mutable ones.
}
getTypeInfo =
function(fun)
{
b = body(fun)
if(is(b, "{"))
e = b[[2]]
else
e = b
if(is.call(e) && as.character(e[[1]]) == ".typeInfo")
eval(e) # , globalenv())
else
stop("no .typeInfo() call")
}
.typeInfo = .signature =
# We might process this as a unlisted collection and regroup them into list(returnType = , params = )
function(..., .types = list(...))
{
i = sapply(.types, function(x) is(x, "externalptr") || is(x, "Type"))
if(all(i))
.types = list(.types[[1]], .types[-1])
else
.types
}
.varDecl =
function(..., .types = list(...))
{
.types
}
compile.Integer =
function(call, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL, .useHandlers = TRUE)
{
createIntegerConstant(call$value, type = .targetType)
}
compile.Numeric =
function(call, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL, .useHandlers = TRUE)
{
# Prototype for casting. Needs to be more comprehensive.
val = call$value
if(!is.null(.targetType)) {
if(sameType(.targetType, Int32Type))
val = as.integer(val)
}
createConstant(ir, val)
}
compile.Call =
function(call, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL, .useHandlers = TRUE)
{
# FIXME: What if $fn is not a symbol?
idx = match(call$fn$name, names(env$.compilerHandlers))
if(is.na(idx))
compile.call(call, env, ir, .targetType = .targetType)
else
env$.compilerHandlers[[idx]](call, env, ir, .targetType = .targetType)
}
compile.BrTerminator =
function(call, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL, .useHandlers = TRUE)
ir$createBr(call$dest)
compile.RetTerminator =
function(call, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL, .useHandlers = TRUE)
{
stop("This needs to be fixed")
Rllvm::createReturn(ir, createLoad(ir, helper$alloc_table[["._return__1"]])) # helper$alloc_table from corsair. REPLACE.
}
compile.Assign =
function(call, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL, .useHandlers = TRUE)
{
# For dealing with assignments that are actually for Phi nodes.
if(call$write$name %in% names(env$.phiVarInstructions)) {
.targetType = env$.types[[call$write$name]]
i = compile(call$read, env, ir, .targetType = .targetType)
env$.phiVarInstructions[[call$write$name]] = i
return(i)
}
# call = asRCall(call)
#call=node
return(`compile.=`(call, env, ir, .targetType = .targetType))
}
compile.Symbol =
function(call, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL, .useHandlers = TRUE)
{
v = env$getAlloc(call$name)
if(is.null(v)) {
v = env$.params[[ call$name ]]
return(v)
}
#XXX Do we only load this if it is an AllocaInst?
if(!is(v, "PHINode"))
ir$createLoad( v )
else
v
}
compile.Replacement =
function(call, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL, .useHandlers = TRUE)
{
browser()
e = to_r(call)
var = call$write$basename
if(!( var %in% names(env$.params)) )
var = paste0(var, "_1")
e[[2]][[2]] = as.name(var)
return(`compile.=`(e, env, ir))
# idx = match(node$fn$name, names(cmp$.compilerHandlers))
# if (is.na(idx))
# compile(node, cmp, helper)
# else
# cmp$.compilerHandlers[[idx]](node, cmp, helper)
}
compile.Phi =
function(call, env, ir, ..., fun = env$.fun, name = getName(fun), .targetType = NULL, .useHandlers = TRUE)
{
#XXX Fix
node = call
cmp = env
helper = ir
types = env$.types
#XXXX
# Don't insert phis for shadowed globals.
# if ( any(is_global(node$read)) )
# return (NULL)
phiName = node$write$name
type = types[[node$write$name]]
numIncoming = length(node$blocks)
phi = Rllvm::createPhi(helper, type, numIncoming, id = node$write$name)
# Add the incoming. We have them in the node$blocks and in node$read
# We may not have created all of the incoming values for the phi nodes at this point
# as the order of the CFG may put put creation of the incoming after the Phi node
# into which it comes.
# So we have to make an incomplete Phi node and add its identity and what it is waiting
# for (i.e. the variable in the Assignment) and when we process that we check to see if
# we need to add it to any of the incomplete Phi nodes.
mapply(function(var, block) {
val = cmp$.phiVarInstructions[[ var$name ]]
if(is.null(val)) {
# so the intruction has not been processed yet.
# We create a dummy Value and then arrange to replace
# it with the actual instruction at the end of the module/
# routine construction. This is the purpose of
# the llvm function replaceAllUsesWith()
# This approach is what llvm uses itself when we create
# the c++ API code from a .ll file. i.e., we are copying
# that directly.
val = .Call("R_createFwdRef_for_phi", type)
cmp$.phiForwardRefs[[ var$name ]] = val
}
addIncoming(phi, val, block)
}, node$read, cmp$blocks[node$blocks])
if(node$write$name %in% names(cmp$.phiVarInstructions))
cmp$.phiVarInstructions[[ phiName ]] <- phi
else
cmp$.allocVars[[ phiName ]] <- phi
phi
}
#########################
#??? Kill off
XXXX.assignHandler =
function(call, env, ir)
{
args = call[-1]
val = compile(args[[2]], env, ir)
# CreateLocalVariable, with a reference in the
# environment, and then CreateStore of the value.
checkArgs(args, list(c('character', 'symbol'), 'ANY'), '<-')
# XXX may not be a variable
if(is.name(var))
var <- as.character(args[1])
val <- args[[2]]
if (is.na(findVar(var, env))) {
# Create new local store, TODO remove the type here and infer it
type = getDataType(val, env)
assign(var, createFunctionVariable(type, var, env, ir), envir = env) ## Todo fix type
env$.types[[var]] = type
}
ref <- get(var, envir = env)
# Now, create store. TODO: how does this work *without*
# constants? Where is evaluation handled... probably not
# here?
createStore(ir, val, ref)
}
|
6817b313f26cf7d98259a7d70a5ba32e4c433caf
|
d5d8785c91ddab62b125aab6b4a740826127ae2b
|
/man/vcf2QTLseq.Rd
|
55373f894772a15499e19cc78381f9b96f21621e
|
[] |
no_license
|
shankarkshakya/mypackage
|
b7cd7e7bd220cce75b991713dc57ceb119ecbeb4
|
853b316d4d532dcb226584837801ed15f4c2983b
|
refs/heads/master
| 2023-07-26T04:00:11.923306
| 2021-03-09T21:45:29
| 2021-03-09T21:45:29
| 115,390,638
| 1
| 2
| null | 2021-09-09T19:55:22
| 2017-12-26T05:59:35
|
R
|
UTF-8
|
R
| false
| true
| 434
|
rd
|
vcf2QTLseq.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vcf2QTLseq.R
\name{vcf2QTLseq}
\alias{vcf2QTLseq}
\title{Convert vcfR object to QTLseqR format.}
\usage{
vcf2QTLseq(vcf)
}
\arguments{
\item{vcfR}{object.}
}
\value{
text file
}
\description{
The function converts vcf file to text format file that can be used by QTLseqR package.
}
\details{
Converts vcfR data format to text file to be used by QTLseqR.
}
|
9c06eb37b402cc6d106aa6cacb2399b20477a710
|
4cb5426e8432d4af8f6997c420520ffb29cefd3e
|
/R58.R
|
041c26c2930f4919dc522e02a73b34db77945a8e
|
[
"CC0-1.0"
] |
permissive
|
boyland-pf/MorpheusData
|
8e00e43573fc6a05ef37f4bfe82eee03bef8bc6f
|
10dfe4cd91ace1b26e93235bf9644b931233c497
|
refs/heads/master
| 2021-10-23T03:47:35.315995
| 2019-03-14T21:30:03
| 2019-03-14T21:30:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,252
|
r
|
R58.R
|
# making table data sets
library(dplyr)
library(tidyr)
library(MorpheusData)
#############benchmark 61
dat <- read.table(text=
"
ID p_2012 p_2010 p_2008 p_2006 c_2012 c_2010 c_2008 c_2006
1 160 162 163 165 37.3 37.3 37.1 37.1
2 163 164 164 163 2.6 2.6 2.6 2.6
", header=T)
write.csv(dat, "data-raw/r58_input1.csv", row.names=FALSE)
df_out = dat %>%
gather(key,value,-ID) %>%
separate(key,c("category","year")) %>%
spread(category,value)
write.csv(df_out, "data-raw/r58_output1.csv", row.names=FALSE)
r58_output1 <- read.csv("data-raw/r58_output1.csv", check.names = FALSE)
fctr.cols <- sapply(r58_output1, is.factor)
int.cols <- sapply(r58_output1, is.integer)
r58_output1[, fctr.cols] <- sapply(r58_output1[, fctr.cols], as.character)
r58_output1[, int.cols] <- sapply(r58_output1[, int.cols], as.numeric)
save(r58_output1, file = "data/r58_output1.rdata")
r58_input1 <- read.csv("data-raw/r58_input1.csv", check.names = FALSE)
fctr.cols <- sapply(r58_input1, is.factor)
int.cols <- sapply(r58_input1, is.integer)
r58_input1[, fctr.cols] <- sapply(r58_input1[, fctr.cols], as.character)
r58_input1[, int.cols] <- sapply(r58_input1[, int.cols], as.numeric)
save(r58_input1, file = "data/r58_input1.rdata")
|
bf0812f812801da13fd346d1a9a3612d57dd2f9e
|
1df29054dba27843aeb8b46286e9347dac7dd6b1
|
/WaterQuality/Scripts/RunSWQUpdate.R
|
08cf7afbc9f1062c567fa793212390b9e6f8a838
|
[] |
no_license
|
lukefullard/LAWA2021
|
9e40fab111493361de1a95a0572e05a7d9410a7a
|
db3528005791fba016b40cbd8dd268dba4ed2b6e
|
refs/heads/main
| 2023-07-19T16:00:29.563125
| 2021-09-27T19:50:27
| 2021-09-27T19:50:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 113
|
r
|
RunSWQUpdate.R
|
source("./scripts/SWQprepWFS.R")
source("./scripts/SWQloadAndCompile.R")
source("./scripts/SWQ_state.R")
♣
|
456bc7175433147b242719ed9c500e3447c534a4
|
93c52470855de917e759adb0e42940133e52070b
|
/.ipynb_checkpoints/Help_Functions-checkpoint.r
|
61f8cf5262697db158dbaab10b3a9ab6c0153254
|
[] |
no_license
|
Vasco27/UCI_Heart_Disease
|
02b02cbc3f51c7de020882e647b2038bda5d111f
|
1cbc8dd14d6c7dcaf59097ec371fea8138e2ac87
|
refs/heads/master
| 2022-04-23T17:06:24.118452
| 2020-04-19T17:18:35
| 2020-04-19T17:18:35
| 255,373,175
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,690
|
r
|
Help_Functions-checkpoint.r
|
#Help functions
to_numeric = function(x) {
return(as.numeric(as.character(x)))
}
#Splits dataset into a train and test set with given percent split
train_test_split = function(data, train_size = 0.7) {
set.seed(1)
#train_idx = createDataPartition(data$target, p = train_size, list = FALSE, times = 1)
train_idx = sample(1:nrow(data), train_size*nrow(data))
tr = data[train_idx, ]
te = data[-train_idx, ]
return(list("train" = tr, "test" = te))
}
#Make logistic regression predictions, can be changed to return probabilities or it can be used to predict lasso and ridge regression
make_preds = function(model, data.test, threshold = 0.5, probs = FALSE, feature.selection = FALSE) {
if(feature.selection == TRUE) {
glm.probs = predict(model, newx = model.matrix(target ~ ., data = data.test)[, -1], type = "response")
} else {
glm.probs = predict(model, newdata = data.test, type = "response")
}
if(probs == TRUE) {
return(glm.probs)
}
glm.preds = ifelse(glm.probs > threshold, 1, 0)
return(factor(glm.preds)) #deixa de ser preciso o y.test
}
#Predict for the lda model
lda_preds = function(model, x.test) {
lda.pred = predict(model, x.test)
return(lda.pred$class)
}
#Evaluate the various models passed as parameters, returns the metrics (Should be able to receive a function like "make_preds" for each type of model)
#If feature.selection = TRUE, models.reduced contains the feature selection models
evaluate_models = function(models.complete, models.reduced, predict_function = make_preds, feature.selection = FALSE, plot.width = 24, plot.height = 12, x.models = c("Complete", "Complete_step", "Reduced", "Reduced_step")) {
#train test spliting for both types of datasets
ret = train_test_split(uci_heart, train_size = 0.7); train.complete = ret$train; test.complete = ret$test
if(feature.selection == FALSE) {
ret = train_test_split(df.reduced, train_size = 0.7); train.reduced = ret$train ;test.reduced = ret$test
}
#It is the same for both datasets
y_train = train.complete$target; y_test = test.complete$target
#Evaluate model and retrieve immportant metrics
recalls = c(); precisions = c(); f1scores = c(); specificities = c(); accs = c(); AUCs = c(); type = c()
i = 1
for(model in models.complete){
preds = predict_function(model, test.complete)
confm = caret::confusionMatrix(preds, y_test, positive = "1")
recalls[i] = confm$byClass["Recall"]; precisions[i] = confm$byClass["Precision"]; f1scores[i] = confm$byClass["F1"]; specificities = confm$byClass["Specificity"]; accs[i] = confm$overall["Accuracy"];
probs = predict_function(model, test.complete, probs = TRUE)
AUCs[i] = calc_AUC(probs, y_test)$auc
type[i] = "Complete"
i = i + 1
}
for(model in models.reduced) {
if(feature.selection == TRUE) {
preds = predict_function(model, test.complete, feature.selection = TRUE)
probs = predict_function(model, test.complete, feature.selection = TRUE, probs = TRUE)
type[i] = "Feature Selection"
} else {
preds = predict_function(model, test.reduced)
probs = predict_function(model, test.reduced, probs = TRUE)
type[i] = "Reduced"
}
confm = caret::confusionMatrix(preds, y_test, positive = "1")
recalls[i] = confm$byClass["Recall"]; precisions[i] = confm$byClass["Precision"]; f1scores[i] = confm$byClass["F1"]; specificities = confm$byClass["Specificity"]; accs[i] = confm$overall["Accuracy"];
AUCs[i] = calc_AUC(probs, y_test)$auc
i = i + 1
}
metrics = data.frame(Recall = recalls, Precision = precisions, F1 = f1scores, Specificity = specificities, Accuracy = accs, AUC = AUCs, Type = type, Models = x.models, row.names = NULL)
#visualization
print(visualize_metrics(metrics, plot.width, plot.height))
return(metrics)
}
#Method to Visualize all the metrics of a model, the metrics dataframe must have the structure [Metrics, Model Type, Model Name]
visualize_metrics = function(metrics, plot.width = 24, plot.height = 12) {
options(repr.plot.width = plot.width, repr.plot.height = plot.height)
#melt for facet wrap
metrics.melt = melt(metrics, id.vars = c("Models", "Type"), value.name = "Count", variable.name = "Variable")
p = ggplot(data = metrics.melt, aes(x = Models, y = Count)) +
geom_bar(aes(fill = Type), stat = "identity", width = 0.5, position = "dodge") + facet_wrap("Variable") +
geom_text(aes(label = round(Count, 2)), position = position_dodge(width = 0.9), vjust = 1.25, size = 10) +
scale_y_continuous(limits = c(0.65, 0.95), oob=rescale_none) +
labs(title = "Different metrics for each model", y = "Metric Percentage") +
theme(text = element_text(size = 20), plot.title = element_text(size = 30, face = "bold", hjust = 0.5), panel.grid.minor = element_blank(),
panel.grid.major = element_blank(), axis.text.x = element_text(angle = 45, vjust = 0.75))
return(p)
}
#Plots the ROC Curve of all the models in the parameters "models.complete" and "models.reduced".
#"x.models" must have the same size as the number of models passed.
plot_ROCCurve = function(models.complete, models.reduced, predict_function = make_preds, x.models = c("Complete", "Complete_step", "Reduced", "Reduced_step"), plots.display = c(2,2), plot.width = 24, plot.height = 12) {
options(repr.plot.width = plot.width, repr.plot.height = plot.height)
par(cex.main = 1.75, cex.axis = 1.35, cex.lab = 1.5, lwd = 2.5, mfrow = plots.display)
#train test spliting for both types of datasets
ret = train_test_split(uci_heart, train_size = 0.7); train.complete = ret$train; test.complete = ret$test
ret = train_test_split(df.reduced, train_size = 0.7); train.reduced = ret$train ;test.reduced = ret$test
#It is the same for both datasets
y_train = train.reduced$target; y_test = test.reduced$target
i = 1
for(model in models.complete) {
ret = calc_AUC(predict_function(model, test.complete, probs = TRUE), y_test)
perf <- performance(ret$pred,"tpr","fpr")
plot(perf, colorize = TRUE, main = paste("ROC Curve for", x.models[i], "\nAUC =", ret$auc))
i = i + 1
}
for(model in models.reduced) {
ret = calc_AUC(predict_function(model, test.reduced, probs = TRUE), y_test)
perf <- performance(ret$pred,"tpr","fpr")
plot(perf, colorize = TRUE, main = paste("ROC Curve for", x.models[i], "\nAUC =", ret$auc))
i = i + 1
}
}
#Calculate the AUC using the ROCR package
calc_AUC = function(y.pred, y.test) {
pred <- prediction(to_numeric(y.pred), y.test)
auc.tmp = performance(pred, "auc")
return(list("auc" = as.numeric(auc.tmp@y.values), "pred" = pred))
}
#Best prediction probabilities threhsoçld, according to the ROC curve cutoffs
roc_cutoff = function(predict_function = make_preds, model, x.test, y.test, tpr.threshold = 0.8, fpr.threshold = 0.2) {
pred = calc_AUC(predict_function(model, x.test, probs = TRUE), y.test)$pred
perf = performance(pred, "tpr", "fpr")
cutoffs = data.frame(cutoff = perf@alpha.values[[1]], fpr = perf@x.values[[1]], tpr = perf@y.values[[1]]) #get the cutoff, tpr and fpr values
cutoffs = cutoffs[order(cutoffs$tpr, decreasing = TRUE), ] #The best cutoff for the max tpr will be the first
return(cutoffs[(cutoffs$tpr >= tpr.threshold) & (cutoffs$fpr <= fpr.threshold), ])
}
|
0322e09f3eef755c35e7e6a5457cebe61c273536
|
a73d721a7fd75c2df60ed088208ed2899d0927ba
|
/HTSevents/archive/v1_0/populate_failure_modes.R
|
93264346d55aa172daabbac922ff943bb21c06d6
|
[] |
no_license
|
jpwalker625/tutorials
|
1d4c9e0bfe03128412518ec09065b1eb1d37191b
|
2553890f1d214558d2f6008793a26e38b2fd1179
|
refs/heads/master
| 2021-01-23T01:06:59.845623
| 2019-02-12T00:44:09
| 2019-02-12T00:44:09
| 85,870,953
| 0
| 0
| null | 2017-08-24T15:47:12
| 2017-03-22T20:04:09
|
HTML
|
UTF-8
|
R
| false
| false
| 507
|
r
|
populate_failure_modes.R
|
#!/usr/bin/env Rscript
#
# Jabus Tyerman
source("/var/shiny-server/www/HTSevents/load_packages.R")
source("/var/shiny-server/www/HTSevents/hts_functions.R")
wd <- "/var/shiny-server/www/HTSevents/"
setwd(wd)
x <- read.csv("HTS_events_failure_modes_list_jabus_2014-01-14.csv",stringsAsFactors = FALSE, header=TRUE)
for (i in 1:nrow(x)){
sql <- paste("INSERT INTO easybake.hts_et_failure_modes VALUES ('",x[i,1], "','", x[i,2], "','", x[i,3],"', NULL, '", x[i,5], "')", sep="")
res<- pull(sql)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.