content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
## makeCacheMatrix and cacheSolve are a pair of functions that can
## be used together to store an invertible matrix along with its
## calculated inverse.
##
## Example:
##
## m <- matrix(c(rnorm(3), runif(3), rnorm(3, 1)), 3, 3)
## m
## cm <- makeCacheMatrix(m)
## cm$get()
## cacheSolve(cm)
## im <- cacheSolve(cm)
## im
## icm <- makeCacheMatrix(im)
## icm$get()
## cacheSolve(icm)
## cacheSolve(icm)
## Makes a cache-matrix encapsulating an invertible matrix and its
## inverse and providing accessor functions to both i.e.
##
## set
## get
## setinverse
## getinverse
##
## Using the $set accessor will clear the cached inverse, thus the
## inverse will need to be NULL-checked; it is recommended to
## access the inverse with the companion cacheSolve function.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get, getinverse = getinverse, setinverse = setinverse)
}
## Retrieves the inverse matrix of the provided cache-matrix,
## calculating and caching the inverse if it is not present.
##
## The solve function is used to calculate the matrix' inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
imt <- x$getinverse()
if (!is.null(imt)) {
message("Getting cached inverse")
return(imt)
}
mt <- x$get()
imt <- solve(mt, ...)
x$setinverse(imt)
imt
}
| /cachematrix.R | no_license | DarrenBishop/ProgrammingAssignment2 | R | false | false | 1,576 | r | ## makeCacheMatrix and cacheSolve are a pair of functions that can
## be used together to store an invertible matrix along with its
## calculated inverse.
##
## Example:
##
## m <- matrix(c(rnorm(3), runif(3), rnorm(3, 1)), 3, 3)
## m
## cm <- makeCacheMatrix(m)
## cm$get()
## cacheSolve(cm)
## im <- cacheSolve(cm)
## im
## icm <- makeCacheMatrix(im)
## icm$get()
## cacheSolve(icm)
## cacheSolve(icm)
## Makes a cache-matrix encapsulating an invertible matrix and its
## inverse and providing accessor functions to both i.e.
##
## set
## get
## setinverse
## getinverse
##
## Using the $set accessor will clear the cached inverse, thus the
## inverse will need to be NULL-checked; it is recommended to
## access the inverse with the companion cacheSolve function.
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get, getinverse = getinverse, setinverse = setinverse)
}
## Retrieves the inverse matrix of the provided cache-matrix,
## calculating and caching the inverse if it is not present.
##
## The solve function is used to calculate the matrix' inverse.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
imt <- x$getinverse()
if (!is.null(imt)) {
message("Getting cached inverse")
return(imt)
}
mt <- x$get()
imt <- solve(mt, ...)
x$setinverse(imt)
imt
}
|
args <- commandArgs(trailingOnly = F)
myargument <- args[length(args)]
filemask <- sub("-","",myargument)
print(filemask)
files<-dir(pattern=filemask)
for(i in 1:length(files)){
print(files[i])
} | /test.R | no_license | rsettlage/useful_R | R | false | false | 207 | r | args <- commandArgs(trailingOnly = F)
myargument <- args[length(args)]
filemask <- sub("-","",myargument)
print(filemask)
files<-dir(pattern=filemask)
for(i in 1:length(files)){
print(files[i])
} |
#######################
##PlotRandom.R
##A function that will generate a random sample of IID normal random values, then it will plot a histogram with an overlaid mean on the plot
##inputs: numpts = number of points to generate
## mu = the theoretical mean of normal dist
## sigma = standard deviation of the normal dist
## numbins = the number of bins in the histogram
## meanColor = the color of overlaid mean
## seed = a random number generator seed, using to ensure reproducibility
##outputs: a list with the following elements
## Random_values = a vector of the generated random normal values
## Mean_x = the sample mean of the Random_values
## SD_x = the sample standard deviation of Random_values
plotRandomNormals <- function(numpts=100,
mu=0,sigma=1,
numbins=10,
meanColor="navy",
seed=10062021){
set.seed(seed)
rand_x <- rnorm(numpts,mean=mu,sd=sigma)
mean_x <- mean(rand_x)
hist(rand_x,breaks=numbins)
abline(v=mean_x,col=meanColor,lwd=3)
list(Random_values = rand_x,
Mean_x = mean_x,
SD_x = sd(mean_x))
}
| /PlotRandom.R | no_license | dwetzel7/PlotRandomNormalFunc | R | false | false | 1,227 | r | #######################
##PlotRandom.R
##A function that will generate a random sample of IID normal random values, then it will plot a histogram with an overlaid mean on the plot
##inputs: numpts = number of points to generate
## mu = the theoretical mean of normal dist
## sigma = standard deviation of the normal dist
## numbins = the number of bins in the histogram
## meanColor = the color of overlaid mean
## seed = a random number generator seed, using to ensure reproducibility
##outputs: a list with the following elements
## Random_values = a vector of the generated random normal values
## Mean_x = the sample mean of the Random_values
## SD_x = the sample standard deviation of Random_values
plotRandomNormals <- function(numpts=100,
mu=0,sigma=1,
numbins=10,
meanColor="navy",
seed=10062021){
set.seed(seed)
rand_x <- rnorm(numpts,mean=mu,sd=sigma)
mean_x <- mean(rand_x)
hist(rand_x,breaks=numbins)
abline(v=mean_x,col=meanColor,lwd=3)
list(Random_values = rand_x,
Mean_x = mean_x,
SD_x = sd(mean_x))
}
|
\name{HET}
\alias{HET}
\title{
Heterozygosity Calculator
}
\description{
This function calculates the Expected Heterozygosity (HET; called PIC in earlier versions and in the paper describing this package) of a set of genotypes.
}
\usage{
HET(data)
}
\arguments{
\item{data}{
A matrix of genotypes, where each column is one individual, each row is one marker, and marker values are 1, 0, or -1, or NA, where 0 represents a heterozygous marker, and NA represents missing data. Note that this coding is different from the earlier PicCalc, which cannot handle heterozygous markers. All data in this matrix must be numeric.
}
}
\value{
The mean Heterozygosity of all markers for the given set of genotypes.
}
\author{
Ryan C. Graebner
}
\note{
The ability to recogize heterozygous markers was included in HET, resulting in a slightly different genotype coding scheme than the earlier PicCalc.
}
\examples{
data("genotypes")
HET(genotypes)
}
\keyword{ misc }
| /man/HET.Rd | no_license | cran/GeneticSubsetter | R | false | false | 960 | rd | \name{HET}
\alias{HET}
\title{
Heterozygosity Calculator
}
\description{
This function calculates the Expected Heterozygosity (HET; called PIC in earlier versions and in the paper describing this package) of a set of genotypes.
}
\usage{
HET(data)
}
\arguments{
\item{data}{
A matrix of genotypes, where each column is one individual, each row is one marker, and marker values are 1, 0, or -1, or NA, where 0 represents a heterozygous marker, and NA represents missing data. Note that this coding is different from the earlier PicCalc, which cannot handle heterozygous markers. All data in this matrix must be numeric.
}
}
\value{
The mean Heterozygosity of all markers for the given set of genotypes.
}
\author{
Ryan C. Graebner
}
\note{
The ability to recogize heterozygous markers was included in HET, resulting in a slightly different genotype coding scheme than the earlier PicCalc.
}
\examples{
data("genotypes")
HET(genotypes)
}
\keyword{ misc }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{AdaBoost.RQ}
\alias{AdaBoost.RQ}
\title{AdaBoost.RQ}
\usage{
AdaBoost.RQ(form, train, test, t_final = 100, power = 2, ...)
}
\arguments{
\item{form}{The model formula.}
\item{train}{A data.frame with the training data.}
\item{test}{A data.frame with the test data.}
\item{t_final}{The number of maximum boosting iterations. Default is 100.}
\item{power}{Type of loss function, e.g. linear (1), squared (2). Default is 2.}
\item{...}{Dots are passed to rpart}
}
\value{
Returns a vector with the predictions made by AdaBoost.RQ.
}
\description{
AdaBoost.RQ
}
\examples{
data(Boston,package="MASS")
idx <- sample(1:nrow(Boston),nrow(Boston)*0.75)
form <- medv ~ .
train <- Boston[idx,]
test <- Boston[-idx,]
preds <- AdaBoost.RQ(form,train,test)
}
\references{
}
| /man/AdaBoost.RQ.Rd | no_license | nunompmoniz/ReBoost | R | false | true | 866 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{AdaBoost.RQ}
\alias{AdaBoost.RQ}
\title{AdaBoost.RQ}
\usage{
AdaBoost.RQ(form, train, test, t_final = 100, power = 2, ...)
}
\arguments{
\item{form}{The model formula.}
\item{train}{A data.frame with the training data.}
\item{test}{A data.frame with the test data.}
\item{t_final}{The number of maximum boosting iterations. Default is 100.}
\item{power}{Type of loss function, e.g. linear (1), squared (2). Default is 2.}
\item{...}{Dots are passed to rpart}
}
\value{
Returns a vector with the predictions made by AdaBoost.RQ.
}
\description{
AdaBoost.RQ
}
\examples{
data(Boston,package="MASS")
idx <- sample(1:nrow(Boston),nrow(Boston)*0.75)
form <- medv ~ .
train <- Boston[idx,]
test <- Boston[-idx,]
preds <- AdaBoost.RQ(form,train,test)
}
\references{
}
|
# Author: Andrea Payne
library(tidyverse)
library(car)
library(glmnet)
#running LASSO regression
set.seed(29)
#creating variables
deathProportion = nonStandardizedRegressionMatrix$deathProportion
#reducing covariates to a manageable level
StandardizedRegressionMatrixWODeathProp = scale(nonStandardizedRegressionMatrix %>%
select(-c(No_Certificate_Diploma_Degree_Proportion,
Secondary_School_Or_Equivalent_Proportion,
Trades_Certificate_Proportion,
Certificate_Of_Apprenticeship_or_Qualification_Proportion,
College_Or_CEGEP_Diploma_Proportion,
University_Diploma_Below_Bachelor_Proportion,
University_Bachelors_Degree_Proportion,
University_Masters_Degree_Proportion,
University_Earned_Doctorate_Proportion,
Apartment_Greater_Or_Equal_To_Five_Storeys_Proportion,
Apartment_Less_Than_Five_Storeys_Proportion,
Multi_Census_Households_Proportion,
LICOAT, deathProportion)))
#running LASSO fit
cvLassoFit = cv.glmnet(StandardizedRegressionMatrixWODeathProp, deathProportion,
family = 'gaussian', alpha = 1)
#plotting
plot(cvLassoFit)
plot(cvLassoFit$glmnet.fit)
#predicting death proportion
deathPredicted <- predict(cvLassoFit, s = "lambda.min",
StandardizedRegressionMatrixWODeathProp)
# Sum of Squares Total and Error
sst <- sum((deathProportion- mean(deathProportion))^2)
sse <- sum((deathPredicted - deathProportion)^2)
# R squared
rsq <- 1 - (sse / sst)
rsq
#printing fitted coefficients
(lassoCoef = predict(cvLassoFit, type = "coefficients", s = "lambda.min")[(1:33),])
| /scripts/LASSO Regression.R | no_license | Alex-Lehmann/SSC-COVID-19-Case-Study | R | false | false | 2,361 | r | # Author: Andrea Payne
library(tidyverse)
library(car)
library(glmnet)
#running LASSO regression
set.seed(29)
#creating variables
deathProportion = nonStandardizedRegressionMatrix$deathProportion
#reducing covariates to a manageable level
StandardizedRegressionMatrixWODeathProp = scale(nonStandardizedRegressionMatrix %>%
select(-c(No_Certificate_Diploma_Degree_Proportion,
Secondary_School_Or_Equivalent_Proportion,
Trades_Certificate_Proportion,
Certificate_Of_Apprenticeship_or_Qualification_Proportion,
College_Or_CEGEP_Diploma_Proportion,
University_Diploma_Below_Bachelor_Proportion,
University_Bachelors_Degree_Proportion,
University_Masters_Degree_Proportion,
University_Earned_Doctorate_Proportion,
Apartment_Greater_Or_Equal_To_Five_Storeys_Proportion,
Apartment_Less_Than_Five_Storeys_Proportion,
Multi_Census_Households_Proportion,
LICOAT, deathProportion)))
#running LASSO fit
cvLassoFit = cv.glmnet(StandardizedRegressionMatrixWODeathProp, deathProportion,
family = 'gaussian', alpha = 1)
#plotting
plot(cvLassoFit)
plot(cvLassoFit$glmnet.fit)
#predicting death proportion
deathPredicted <- predict(cvLassoFit, s = "lambda.min",
StandardizedRegressionMatrixWODeathProp)
# Sum of Squares Total and Error
sst <- sum((deathProportion- mean(deathProportion))^2)
sse <- sum((deathPredicted - deathProportion)^2)
# R squared
rsq <- 1 - (sse / sst)
rsq
#printing fitted coefficients
(lassoCoef = predict(cvLassoFit, type = "coefficients", s = "lambda.min")[(1:33),])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/algorithm.R
\name{measure_cluster}
\alias{measure_cluster}
\title{Metrics of the cluster algorithm}
\usage{
measure_cluster()
}
\value{
list with the metrics
}
\description{
Metrics of the cluster algorithm
}
\keyword{internal}
| /man/measure_cluster.Rd | no_license | cran/Clustering | R | false | true | 306 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/algorithm.R
\name{measure_cluster}
\alias{measure_cluster}
\title{Metrics of the cluster algorithm}
\usage{
measure_cluster()
}
\value{
list with the metrics
}
\description{
Metrics of the cluster algorithm
}
\keyword{internal}
|
#' Time series A of the Santa Fe Time Series Competition
#'
#' A univariate time series derived from laser-generated data recorded from a
#' Far-Infrared-Laser in a chaotic state.
#'
#' The main benchmark of the Santa Fe Time Series Competition, time series A,
#' is composed of a clean low-dimensional nonlinear and stationary time series
#' with 1,000 observations. Competitors were asked to correctly predict the
#' next 100 observations (\code{\link{SantaFe.A.cont}}). The performance
#' evaluation done by the Santa Fe Competition was based on the NMSE errors of
#' prediction found by the competitors.
#'
#' @name SantaFe.A
#' @docType data
#' @format A data frame with 1000 observations on the following variable.
#' \describe{ \item{V1}{a numeric vector containing the observations of
#' the univariate time series A of the Santa Fe Time Series Competition.} }
#' @seealso \code{\link{SantaFe.A.cont}}, \code{\link{SantaFe.D}},
#' \code{\link{SantaFe.D.cont}} ~
#' @references A.S. Weigend, 1993, Time Series Prediction: Forecasting The
#' Future And Understanding The Past. Reading, MA, Westview Press.
#'
#' @keywords datasets Santa Fe Time Series Competition
#' @examples
#'
#' data(SantaFe.A)
#' str(SantaFe.A)
#' plot(ts(SantaFe.A))
#'
"SantaFe.A"
#> [1] "SantaFe.A" | /R/SantaFe.A.r | no_license | cran/TSPred | R | false | false | 1,319 | r | #' Time series A of the Santa Fe Time Series Competition
#'
#' A univariate time series derived from laser-generated data recorded from a
#' Far-Infrared-Laser in a chaotic state.
#'
#' The main benchmark of the Santa Fe Time Series Competition, time series A,
#' is composed of a clean low-dimensional nonlinear and stationary time series
#' with 1,000 observations. Competitors were asked to correctly predict the
#' next 100 observations (\code{\link{SantaFe.A.cont}}). The performance
#' evaluation done by the Santa Fe Competition was based on the NMSE errors of
#' prediction found by the competitors.
#'
#' @name SantaFe.A
#' @docType data
#' @format A data frame with 1000 observations on the following variable.
#' \describe{ \item{V1}{a numeric vector containing the observations of
#' the univariate time series A of the Santa Fe Time Series Competition.} }
#' @seealso \code{\link{SantaFe.A.cont}}, \code{\link{SantaFe.D}},
#' \code{\link{SantaFe.D.cont}} ~
#' @references A.S. Weigend, 1993, Time Series Prediction: Forecasting The
#' Future And Understanding The Past. Reading, MA, Westview Press.
#'
#' @keywords datasets Santa Fe Time Series Competition
#' @examples
#'
#' data(SantaFe.A)
#' str(SantaFe.A)
#' plot(ts(SantaFe.A))
#'
"SantaFe.A"
#> [1] "SantaFe.A" |
setwd("~/Desktop/Courses_Online/Data_Science_Specialization/08_PracticalMachineLearning/08_august/course_project_github")
rm( list = ls() )
# Read the data from file
training <- read.csv("pml-training.csv", na.strings = c("NA",""))
testing <- read.csv("pml-testing.csv" , na.strings = c("NA",""))
# Cleaning the data
columns_with_NA <- colSums(is.na(testing))
training <- training[, !columns_with_NA ]
testing <- testing[ , !columns_with_NA ]
rm("columns_with_NA")
# Select variables:
# roll_belt, gyros_belt_x , accel_belt_x
# roll_arm, pitch_arm , yaw_arm
# roll_forearm, pitch_forearm , yaw_forearm
# roll_dumbbell, pitch_dumbbell, yaw_dumbbell
my_vars <- c(8,12,21,22,23,47,48,49, 34, 35, 36)
train_subset <- training[,c(60, my_vars)]
test_subset <- testing[ ,c( my_vars)]
rm(my_vars)
# Cross Validation
library(caret)
set.seed(1)
K = 4 # it takes too long in my computer...
cv_subsets <- createFolds( y=train_subset$classe, k = K, list=TRUE, returnTrain=TRUE)
for (k in 1:K) {
cvk_train <- train_subset[ cv_subsets[[k]],]
cvk_test <- train_subset[-cv_subsets[[k]],]
# I use the "gbm" method to train the classifiers
cvk_model <- train( classe ~ ., method="gbm", data=cvk_train, verbose=FALSE)
cvk_classe <- predict(cvk_model, cvk_test)
cvk_acc <- sum(cvk_classe == cvk_test$classe)/length(cvk_classe)
if (k==1) {
cv_models <- list(cvk_model)
cv_accuracy <- list(cvk_acc)
} else {
cv_models <- c(cv_models, list(cvk_model))
cv_accuracy <- c(cv_accuracy, list(cvk_acc) )
}
}
# out of sample accuracy
plot(y=cv_accuracy,x=1:K)
mean(cv_accuracy)
# Submission information
predicted_classe <- list( predict(cv_models[[k]], test_subset) )
# CP submission
answers = as.character( predicted_classe )
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
pml_write_files(answers)
| /course_project_solution.R | no_license | DoloresZurdo/08_PracticalMachineLearning_CourseProject | R | false | false | 2,019 | r | setwd("~/Desktop/Courses_Online/Data_Science_Specialization/08_PracticalMachineLearning/08_august/course_project_github")
rm( list = ls() )
# Read the data from file
training <- read.csv("pml-training.csv", na.strings = c("NA",""))
testing <- read.csv("pml-testing.csv" , na.strings = c("NA",""))
# Cleaning the data
columns_with_NA <- colSums(is.na(testing))
training <- training[, !columns_with_NA ]
testing <- testing[ , !columns_with_NA ]
rm("columns_with_NA")
# Select variables:
# roll_belt, gyros_belt_x , accel_belt_x
# roll_arm, pitch_arm , yaw_arm
# roll_forearm, pitch_forearm , yaw_forearm
# roll_dumbbell, pitch_dumbbell, yaw_dumbbell
my_vars <- c(8,12,21,22,23,47,48,49, 34, 35, 36)
train_subset <- training[,c(60, my_vars)]
test_subset <- testing[ ,c( my_vars)]
rm(my_vars)
# Cross Validation
library(caret)
set.seed(1)
K = 4 # it takes too long in my computer...
cv_subsets <- createFolds( y=train_subset$classe, k = K, list=TRUE, returnTrain=TRUE)
for (k in 1:K) {
cvk_train <- train_subset[ cv_subsets[[k]],]
cvk_test <- train_subset[-cv_subsets[[k]],]
# I use the "gbm" method to train the classifiers
cvk_model <- train( classe ~ ., method="gbm", data=cvk_train, verbose=FALSE)
cvk_classe <- predict(cvk_model, cvk_test)
cvk_acc <- sum(cvk_classe == cvk_test$classe)/length(cvk_classe)
if (k==1) {
cv_models <- list(cvk_model)
cv_accuracy <- list(cvk_acc)
} else {
cv_models <- c(cv_models, list(cvk_model))
cv_accuracy <- c(cv_accuracy, list(cvk_acc) )
}
}
# out of sample accuracy
plot(y=cv_accuracy,x=1:K)
mean(cv_accuracy)
# Submission information
predicted_classe <- list( predict(cv_models[[k]], test_subset) )
# CP submission
answers = as.character( predicted_classe )
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
pml_write_files(answers)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crossingFunc.R
\name{randomMateAll}
\alias{randomMateAll}
\title{randomMateAll}
\usage{
randomMateAll(popSize, geno, pos)
}
\arguments{
\item{popSize}{the number of progeny to return}
\item{geno}{matrix of haplotypes}
\item{pos}{position of markers/QTLs}
}
\description{
randomMateAll
}
| /man/randomMateAll.Rd | no_license | jeanlucj/BreedingSchemeLanguage | R | false | true | 367 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crossingFunc.R
\name{randomMateAll}
\alias{randomMateAll}
\title{randomMateAll}
\usage{
randomMateAll(popSize, geno, pos)
}
\arguments{
\item{popSize}{the number of progeny to return}
\item{geno}{matrix of haplotypes}
\item{pos}{position of markers/QTLs}
}
\description{
randomMateAll
}
|
\name{Plot.clm.par}
\alias{Plot.clm.par}
\title{Plotting the estimated chain ladder parameters
}
\description{Show a plot with the estimates of the chain ladder parameters and the development factors
}
\usage{
Plot.clm.par( clm.par )
}
\arguments{
\item{clm.par }{A list object with the estimated chain ladder parameters: the value returned by the functions \code{clm}.
}
}
\value{ No returned value
}
\references{
Martinez-Miranda, M.D., Nielsen, J.P. and Verrall, R. (2012) Double Chain Ladder. \emph{Astin Bulletin}, 42/1, 59-76.
}
\author{
M.D. Martinez-Miranda, J.P. Nielsen and R. Verrall
}
\seealso{
\code{\link{clm}}, \code{\link{dcl.estimation}}, \code{\link{Plot.triangle}}
}
\examples{
data(NtriangleDCL)
my.clm.par<-clm(NtriangleDCL)
Plot.clm.par(my.clm.par)
}
\keyword{Graphics}
| /man/Plot.clm.par.Rd | no_license | cran/DCL | R | false | false | 835 | rd | \name{Plot.clm.par}
\alias{Plot.clm.par}
\title{Plotting the estimated chain ladder parameters
}
\description{Show a plot with the estimates of the chain ladder parameters and the development factors
}
\usage{
Plot.clm.par( clm.par )
}
\arguments{
\item{clm.par }{A list object with the estimated chain ladder parameters: the value returned by the functions \code{clm}.
}
}
\value{ No returned value
}
\references{
Martinez-Miranda, M.D., Nielsen, J.P. and Verrall, R. (2012) Double Chain Ladder. \emph{Astin Bulletin}, 42/1, 59-76.
}
\author{
M.D. Martinez-Miranda, J.P. Nielsen and R. Verrall
}
\seealso{
\code{\link{clm}}, \code{\link{dcl.estimation}}, \code{\link{Plot.triangle}}
}
\examples{
data(NtriangleDCL)
my.clm.par<-clm(NtriangleDCL)
Plot.clm.par(my.clm.par)
}
\keyword{Graphics}
|
DataTableFromFile<-read.table("TempFiles/SingleSNPsAll.txt", header=TRUE, sep = "\t")
DataMatrix<-as.matrix(DataTableFromFile)
DataFrame<-data.frame(DataMatrix)
NumRowsInMatrix<-nrow(DataMatrix)
NumColsInMatrix<-ncol(DataMatrix)
NamesCol<-DataMatrix[,1]
BiallelicMatrix<-matrix(c("",NamesCol), ncol=1, nrow=NumRowsInMatrix+1)
for (a in 2:(NumColsInMatrix)) { #Check each locus one at a time
CurrentLocus<-colnames(DataFrame[a])
UniqueHaplotypes<-"starter"
for (b in 1:NumRowsInMatrix) { #Start going through all snps at this locus
Match<-0
for (c in UniqueHaplotypes) {
if (DataFrame[b,a] == c) {
Match<-1
break
}
}
if (Match == 0) {
UniqueHaplotypes<-c(UniqueHaplotypes,as.character(DataFrame[b,a]))
}
} #Finished going through all snps at this locus
NumberOfUniqueHaplotypes<-length(UniqueHaplotypes)
VectorOfUniqueHaplotypes<-UniqueHaplotypes[2:NumberOfUniqueHaplotypes]
if (NumberOfUniqueHaplotypes == 3) {
VectorToPrint<-c(CurrentLocus, as.character(DataFrame[,a]))
BiallelicMatrix<-cbind(BiallelicMatrix,VectorToPrint)
}
} #Finished with current locus
write.table(BiallelicMatrix, file = "TempFiles/AllBiallelicSNPsRaw.txt", sep = "\t",row.names=FALSE, col.names=FALSE)
UnlinkedBiallelicMatrix<-matrix(c("",NamesCol), ncol=1, nrow=NumRowsInMatrix+1)
LocusNames<-"starter"
CurrentLocus<-"aa"
for (a in 2:(NumColsInMatrix)) { #Check each locus one at a time
CurrentLocus<-colnames(DataFrame[a])
CurrentLocus<-sub("\\.[0-9]+", "", CurrentLocus, perl=TRUE)
Match<-0
for (c in LocusNames) {
if (CurrentLocus == c) {
Match<-1
break
}
}
if (Match == 0) {
LocusNames<-c(LocusNames,CurrentLocus)
VectorToPrint<-c(CurrentLocus, as.character(DataFrame[,a]))
UnlinkedBiallelicMatrix<-cbind(UnlinkedBiallelicMatrix,VectorToPrint)
}
}
write.table(UnlinkedBiallelicMatrix, file = "TempFiles/UnlinkedBiallelicSNPsRaw.txt", sep = "\t",row.names=FALSE, col.names=FALSE)
| /AftrRADv5.0.0.1/RScripts/OutputBiallelicSingleSNPs.R | no_license | mikesovic/AftrRAD | R | false | false | 2,337 | r | DataTableFromFile<-read.table("TempFiles/SingleSNPsAll.txt", header=TRUE, sep = "\t")
DataMatrix<-as.matrix(DataTableFromFile)
DataFrame<-data.frame(DataMatrix)
NumRowsInMatrix<-nrow(DataMatrix)
NumColsInMatrix<-ncol(DataMatrix)
NamesCol<-DataMatrix[,1]
BiallelicMatrix<-matrix(c("",NamesCol), ncol=1, nrow=NumRowsInMatrix+1)
for (a in 2:(NumColsInMatrix)) { #Check each locus one at a time
CurrentLocus<-colnames(DataFrame[a])
UniqueHaplotypes<-"starter"
for (b in 1:NumRowsInMatrix) { #Start going through all snps at this locus
Match<-0
for (c in UniqueHaplotypes) {
if (DataFrame[b,a] == c) {
Match<-1
break
}
}
if (Match == 0) {
UniqueHaplotypes<-c(UniqueHaplotypes,as.character(DataFrame[b,a]))
}
} #Finished going through all snps at this locus
NumberOfUniqueHaplotypes<-length(UniqueHaplotypes)
VectorOfUniqueHaplotypes<-UniqueHaplotypes[2:NumberOfUniqueHaplotypes]
if (NumberOfUniqueHaplotypes == 3) {
VectorToPrint<-c(CurrentLocus, as.character(DataFrame[,a]))
BiallelicMatrix<-cbind(BiallelicMatrix,VectorToPrint)
}
} #Finished with current locus
write.table(BiallelicMatrix, file = "TempFiles/AllBiallelicSNPsRaw.txt", sep = "\t",row.names=FALSE, col.names=FALSE)
UnlinkedBiallelicMatrix<-matrix(c("",NamesCol), ncol=1, nrow=NumRowsInMatrix+1)
LocusNames<-"starter"
CurrentLocus<-"aa"
for (a in 2:(NumColsInMatrix)) { #Check each locus one at a time
CurrentLocus<-colnames(DataFrame[a])
CurrentLocus<-sub("\\.[0-9]+", "", CurrentLocus, perl=TRUE)
Match<-0
for (c in LocusNames) {
if (CurrentLocus == c) {
Match<-1
break
}
}
if (Match == 0) {
LocusNames<-c(LocusNames,CurrentLocus)
VectorToPrint<-c(CurrentLocus, as.character(DataFrame[,a]))
UnlinkedBiallelicMatrix<-cbind(UnlinkedBiallelicMatrix,VectorToPrint)
}
}
write.table(UnlinkedBiallelicMatrix, file = "TempFiles/UnlinkedBiallelicSNPsRaw.txt", sep = "\t",row.names=FALSE, col.names=FALSE)
|
#' Population at the town level for selected Danish towns
#'
#' A dataset containing the population of selected Danish towns during the
#' period they had cholera epidemics. Data for years ending in "0" or "5" come
#' from census data. any years inbetween were imputed assuming a linear trend
#' between the two census data points. Census data comes via
#' danishfamilysearch.com
#'
#'
#' @format A data frame with 10 rows and 3 variables:
#' \describe{
#' \item{\bold{city}}{character vector of the name of the city/town.}
#' \item{\bold{year}}{Integer vector of the year the population data is for.}
#' \item{\bold{pop}}{Numeric vector of the census population or the estimated
#' population.}
#' }
#' @source {Danish census data via danishfamilysearch.com. Data entered by Mads Perner and Matthew Phelps}
"dk_population"
| /R/dk_population.R | no_license | matthew-phelps/CholeraDataDK | R | false | false | 834 | r | #' Population at the town level for selected Danish towns
#'
#' A dataset containing the population of selected Danish towns during the
#' period they had cholera epidemics. Data for years ending in "0" or "5" come
#' from census data. any years inbetween were imputed assuming a linear trend
#' between the two census data points. Census data comes via
#' danishfamilysearch.com
#'
#'
#' @format A data frame with 10 rows and 3 variables:
#' \describe{
#' \item{\bold{city}}{character vector of the name of the city/town.}
#' \item{\bold{year}}{Integer vector of the year the population data is for.}
#' \item{\bold{pop}}{Numeric vector of the census population or the estimated
#' population.}
#' }
#' @source {Danish census data via danishfamilysearch.com. Data entered by Mads Perner and Matthew Phelps}
"dk_population"
|
#---------------------------------------------------------------------------
#
# This class setup is for container objects for the objects that are
# subclasses of the "Stem" virtual class.
#
# 1. StemContainer -- virtual
# 2. downLogs -- for objects of class "downLog"
# 3. standingTrees -- for objects of class "StandingTree"
#
#Author... Date: 25-Oct-2011
# Jeffrey H. Gove
# USDA Forest Service
# Northern Research Station
# 271 Mast Road
# Durham, NH 03824
# jhgove@unh.edu
# phone: 603-868-7667 fax: 603-868-7604
#---------------------------------------------------------------------------
#
#=================================================================================================
#=================================================================================================
#
# 1. The StemContainer virtual class is a container class for any number of subclass objects...
#
setClass('StemContainer',
representation(units = 'character', #English or metric units
bbox = 'matrix', #the overall containing bbox matrix limits
stats = 'data.frame', #summary of volume, etc. of Stems in collection
description = 'character' #a short description of the object
),
prototype = list(units = .StemEnv$msrUnits$metric,
bbox = matrix(rep(0,4), nrow=2, dimnames=list(c('x','y'), c('min','max'))),
description = ''
),
contains = 'VIRTUAL',
validity = function(object) {
if(!(object@units %in% .StemEnv$msrUnits))
return('units of measure must be "English" or "metric"')
# check on bbox matrix format...
#matrix inherits from array (3-Feb-2020)...
#if(!class(object@bbox) == 'matrix') #bad practice!
if(!is(object@bbox, 'matrix'))
return('bbox slot must be a 2x2 matrix')
bboxNames = match(rownames(object@bbox), c('x','y'))
if(any(is.na(bboxNames)))
return('slot bbox rownames must be "x", "y"!')
bboxNames = match(colnames(object@bbox), c('min','max'))
if(any(is.na(bboxNames)))
return('slot bbox colnames must be "min", "max"!')
return(TRUE)
} #validity check
) #class StemContainer
#=================================================================================================
#=================================================================================================
#
# 2. The downLogs class (plural) is a container class for any number of "downLog" objects...
#
setClass('downLogs',
representation(logs = 'list' #the log objects as a list
##units = 'character', #English or metric units
##bbox = 'matrix',
##stats = 'data.frame' #summary of volume, etc. of logs in collection
#numLogs = 'numeric'#, #number of log objects in logs
#spLogs = 'SpatialPolygons' #for simplicity in plotting
),
prototype = list(logs = list(), #empty, zero-length list
bbox = matrix(rep(0,4), nrow=2, dimnames=list(c('x','y'), c('min','max')))
),
contains='StemContainer', #a subclass of the virtual 'StemContainer' class
validity = function(object) {
if(!(object@units %in% .StemEnv$msrUnits))
return('units of measure must be "English" or "metric"')
numLogs = length(object@logs)
if(numLogs < 1)
return('no logs in collection!')
for(i in seq_len(numLogs))
validObject(object@logs[[i]])
for(i in seq_len(numLogs))
if(object@units != object@logs[[i]]@units)
return('At least one log has the wrong units!')
# check on bbox matrix format...
#matrix inherits from array (3-Feb-2020)...
#if(!class(object@bbox) == 'matrix') #bad practice!
if(!is(object@bbox, 'matrix'))
return('bbox slot must be a 2x2 matrix')
bboxNames = match(rownames(object@bbox), c('x','y'))
if(any(is.na(bboxNames)))
return('slot bbox rownames must be "x", "y"!')
bboxNames = match(colnames(object@bbox), c('min','max'))
if(any(is.na(bboxNames)))
return('slot bbox colnames must be "min", "max"!')
# consistent units check...
units = object@logs[[1]]@units
for(i in seq_len(numLogs))
if(object@logs[[i]]@units != units)
return('You can not mix measurement units within a population of logs!')
return(TRUE)
} #validity check
) #class downLogs
#=================================================================================================
#=================================================================================================
#
# 3. The standingTrees class (plural) is a container class for any number of "standingTree"
# objects...
#
setClass('standingTrees',
representation(trees = 'list' #the standingTree objects as a list
#numTrees = 'numeric'#, #number of standingTree objects in trees
),
prototype = list(trees = list(), #empty, zero-length list
description = 'container of standingTree objects'
),
contains='StemContainer', #a subclass of the virtual 'StemContainer' class
validity = function(object) {
numTrees = length(object@trees)
if(numTrees < 1)
return('no "standingTree" objects found in "trees" slot!')
for(i in seq_len(numTrees))
validObject(object@trees[[i]])
for(i in seq_len(numTrees))
if(object@units != object@trees[[i]]@units)
return('At least one "standingTree" object has the wrong units!')
# consistent class check...
class = class(object@trees[[1]])
for(i in seq_len(numTrees))
if(class(object@trees[[i]]) != class) #could us is() for softer comparison w/ inheritance?
return('You can not mix "Stem" classes in the population!')
# consistent units check...
units = object@trees[[1]]@units
for(i in seq_len(numTrees))
if(object@trees[[i]]@units != units)
return('You can not mix measurement units within a population of trees!')
return(TRUE)
} #validity check
) #class standingTrees
| /R/StemContainerClass.R | no_license | cran/sampSurf | R | false | false | 7,392 | r | #---------------------------------------------------------------------------
#
# This class setup is for container objects for the objects that are
# subclasses of the "Stem" virtual class.
#
# 1. StemContainer -- virtual
# 2. downLogs -- for objects of class "downLog"
# 3. standingTrees -- for objects of class "StandingTree"
#
#Author... Date: 25-Oct-2011
# Jeffrey H. Gove
# USDA Forest Service
# Northern Research Station
# 271 Mast Road
# Durham, NH 03824
# jhgove@unh.edu
# phone: 603-868-7667 fax: 603-868-7604
#---------------------------------------------------------------------------
#
#=================================================================================================
#=================================================================================================
#
# 1. The StemContainer virtual class is a container class for any number of subclass objects...
#
setClass('StemContainer',
representation(units = 'character', #English or metric units
bbox = 'matrix', #the overall containing bbox matrix limits
stats = 'data.frame', #summary of volume, etc. of Stems in collection
description = 'character' #a short description of the object
),
prototype = list(units = .StemEnv$msrUnits$metric,
bbox = matrix(rep(0,4), nrow=2, dimnames=list(c('x','y'), c('min','max'))),
description = ''
),
contains = 'VIRTUAL',
validity = function(object) {
if(!(object@units %in% .StemEnv$msrUnits))
return('units of measure must be "English" or "metric"')
# check on bbox matrix format...
#matrix inherits from array (3-Feb-2020)...
#if(!class(object@bbox) == 'matrix') #bad practice!
if(!is(object@bbox, 'matrix'))
return('bbox slot must be a 2x2 matrix')
bboxNames = match(rownames(object@bbox), c('x','y'))
if(any(is.na(bboxNames)))
return('slot bbox rownames must be "x", "y"!')
bboxNames = match(colnames(object@bbox), c('min','max'))
if(any(is.na(bboxNames)))
return('slot bbox colnames must be "min", "max"!')
return(TRUE)
} #validity check
) #class StemContainer
#=================================================================================================
#=================================================================================================
#
# 2. The downLogs class (plural) is a container class for any number of "downLog" objects...
#
setClass('downLogs',
representation(logs = 'list' #the log objects as a list
##units = 'character', #English or metric units
##bbox = 'matrix',
##stats = 'data.frame' #summary of volume, etc. of logs in collection
#numLogs = 'numeric'#, #number of log objects in logs
#spLogs = 'SpatialPolygons' #for simplicity in plotting
),
prototype = list(logs = list(), #empty, zero-length list
bbox = matrix(rep(0,4), nrow=2, dimnames=list(c('x','y'), c('min','max')))
),
contains='StemContainer', #a subclass of the virtual 'StemContainer' class
validity = function(object) {
if(!(object@units %in% .StemEnv$msrUnits))
return('units of measure must be "English" or "metric"')
numLogs = length(object@logs)
if(numLogs < 1)
return('no logs in collection!')
for(i in seq_len(numLogs))
validObject(object@logs[[i]])
for(i in seq_len(numLogs))
if(object@units != object@logs[[i]]@units)
return('At least one log has the wrong units!')
# check on bbox matrix format...
#matrix inherits from array (3-Feb-2020)...
#if(!class(object@bbox) == 'matrix') #bad practice!
if(!is(object@bbox, 'matrix'))
return('bbox slot must be a 2x2 matrix')
bboxNames = match(rownames(object@bbox), c('x','y'))
if(any(is.na(bboxNames)))
return('slot bbox rownames must be "x", "y"!')
bboxNames = match(colnames(object@bbox), c('min','max'))
if(any(is.na(bboxNames)))
return('slot bbox colnames must be "min", "max"!')
# consistent units check...
units = object@logs[[1]]@units
for(i in seq_len(numLogs))
if(object@logs[[i]]@units != units)
return('You can not mix measurement units within a population of logs!')
return(TRUE)
} #validity check
) #class downLogs
#=================================================================================================
#=================================================================================================
#
# 3. The standingTrees class (plural) is a container class for any number of "standingTree"
# objects...
#
setClass('standingTrees',
representation(trees = 'list' #the standingTree objects as a list
#numTrees = 'numeric'#, #number of standingTree objects in trees
),
prototype = list(trees = list(), #empty, zero-length list
description = 'container of standingTree objects'
),
contains='StemContainer', #a subclass of the virtual 'StemContainer' class
validity = function(object) {
numTrees = length(object@trees)
if(numTrees < 1)
return('no "standingTree" objects found in "trees" slot!')
for(i in seq_len(numTrees))
validObject(object@trees[[i]])
for(i in seq_len(numTrees))
if(object@units != object@trees[[i]]@units)
return('At least one "standingTree" object has the wrong units!')
# consistent class check...
class = class(object@trees[[1]])
for(i in seq_len(numTrees))
if(class(object@trees[[i]]) != class) #could us is() for softer comparison w/ inheritance?
return('You can not mix "Stem" classes in the population!')
# consistent units check...
units = object@trees[[1]]@units
for(i in seq_len(numTrees))
if(object@trees[[i]]@units != units)
return('You can not mix measurement units within a population of trees!')
return(TRUE)
} #validity check
) #class standingTrees
|
## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix creates a special matrix object,
## which contains a list of functions to set and get a matrix
## i.e. store in current environment as "c_x",
## and set and get the inverted matrix stored as "inverse_x"
##
makeCacheMatrix <- function(x = matrix()) {
set <- function(y) {
c_x <<- y
inverse_x <<- NULL
}
set(x)
get <- function() c_x
setinverse <- function(inverse) inverse_x <<- inverse
getinverse <- function() inverse_x
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve returns the inverse of a matrix
## if a previous call of cacheSolve
## If the cached inverse is available, cacheSolve retrieves it, while if
## not, it computes, caches, and returns it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv_x <- x$getinverse()
if(!is.null(inv_x)) {
message("getting cached inverse matrix")
return(inv_x)
}
data <- x$get()
inv_x <- solve(data, ...)
x$setinverse(inv_x)
inv_x
}
## Test workflow
## Create matrix: x<-matrix(c(2,3,5,6,7,8,10,12,13),nrow=3,ncol=3)
## make cache matrix: cm<-makeCacheMatrix(x)
## cm is a list with 4 values, the functions $set, $get, $setinverse, $getinverse
## makeCacheMatrix calls cm$set(x) to store x in the global variable c_x
## and make inverse_x<<-NULL
## cacheSolve(cm) either retrieves the cached value of inverse_x via $getinverse()
## or calculates inv_X and stores it in inverse_x via $setinverse(inv_x)
## the second call of cacheSolve(cm) displays a message about the cached matrix
| /cachematrix.R | no_license | Camphausen/ProgrammingAssignment2 | R | false | false | 1,676 | r | ## Put comments here that give an overall description of what your
## functions do
## makeCacheMatrix creates a special matrix object,
## which contains a list of functions to set and get a matrix
## i.e. store in current environment as "c_x",
## and set and get the inverted matrix stored as "inverse_x"
##
makeCacheMatrix <- function(x = matrix()) {
set <- function(y) {
c_x <<- y
inverse_x <<- NULL
}
set(x)
get <- function() c_x
setinverse <- function(inverse) inverse_x <<- inverse
getinverse <- function() inverse_x
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve returns the inverse of a matrix
## if a previous call of cacheSolve
## If the cached inverse is available, cacheSolve retrieves it, while if
## not, it computes, caches, and returns it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv_x <- x$getinverse()
if(!is.null(inv_x)) {
message("getting cached inverse matrix")
return(inv_x)
}
data <- x$get()
inv_x <- solve(data, ...)
x$setinverse(inv_x)
inv_x
}
## Test workflow
## Create matrix: x<-matrix(c(2,3,5,6,7,8,10,12,13),nrow=3,ncol=3)
## make cache matrix: cm<-makeCacheMatrix(x)
## cm is a list with 4 values, the functions $set, $get, $setinverse, $getinverse
## makeCacheMatrix calls cm$set(x) to store x in the global variable c_x
## and make inverse_x<<-NULL
## cacheSolve(cm) either retrieves the cached value of inverse_x via $getinverse()
## or calculates inv_X and stores it in inverse_x via $setinverse(inv_x)
## the second call of cacheSolve(cm) displays a message about the cached matrix
|
#EFS split per date
efs<-read_xlsx("Desktop/FinalMerge.xlsx")
efs$IMO<-NULL
efs$Date<-as.factor(efs$Date)
efs$ID_Vessel<-as.factor(efs$ID_Vessel)
efs$Number_Of_Containers<-as.numeric(efs$Number_Of_Containers)
efs$Number_Of_Containers<-range01(efs$Number_Of_Containers)
efs$Time_UntilNext_StackDate<-range01(efs$Time_UntilNext_StackDate)
efs$Time_UntilNext_DockDate<-range01(efs$Time_UntilNext_DockDate)
efs$Wind_Knots<-range01(efs$Wind_Knots)
efs$Number_Of_Containers<-range01(efs$Number_Of_Containers)
efs$Next_StackOpen_Date<-NULL
efs$Next_Dock_Date<-NULL
trainefs <- efs[ which(efs$Date <'2020-03-14'), ]
View(efs)
library(Hmisc)
efs$Number_Of_Containers[efs$Number_Of_Containers==557] <- mean(efs$Number_Of_Containers)#replace outlier with mean
testefs<-efs[which(efs$Date>='2020-03-14'),]
View(testd)
traind<-traind%>%select(-Number_Of_Containers,everything())
testd<-testd%>%select(-Number_Of_Containers,everything())
library(e1071)
trainefs$IMO<-NULL
testefs$IMO<-NULL
trainefs$ID_Vessel<-as.factor(trainefs$ID_Vessel)
testefs$ID_Vessel<-as.factor(testefs$ID_Vessel)
trainefs$Date<-as.factor(trainefs$Date)
testefs$Date<-as.factor(testefs$Date)
trainefs$Next_StackOpen_Date<-NULL
trainefs$Next_Dock_Date<-NULL
testefs$Next_StackOpen_Date<-NULL
testefs$Next_Dock_Date<-NULL
set.seed(222)
#Split randomly
indd <- sample(2, nrow(efs), replace = TRUE, prob = c(0.7, 0.3))
View(traind)
traind <- efs[indd==1,]
testd <- efs[indd==2,]
traind$ID_Vessel<-as.factor(traind$ID_Vessel)
testd$ID_Vessel<-as.factor(testd$ID_Vessel)
traind$Next_StackOpen_Date<-NULL
testd$Next_StackOpen_Date<-NULL
traind$Next_Dock_Date<-NULL
testd$Next_Dock_Date<-NULL
trainefs$Wind_Knots<-NULL
testefs$Wind_Knots<-NULL
traind$Wind_Knots<-NULL
testd$Wind_Knots<-NULL
View(efs)
# train an svm model, consider further tuning parameters for lower MSE
svmodel <- train(Number_Of_Containers ~ .,data=traind, method = "svmRadial", cost=100, gamma=0.01,preProcess = c("center","scale"),tuneLength = 10)
#linear kernel function
linear.tunee<-tune.svm(Number_Of_Containers~.,data=traind,kernel="linear",cost = c(.001,.01,.1,1,5,10))
summary(linear.tunee)
best.lineare<-linear.tunee$best.model
tune.teste<-predict(best.lineare,newdata=testd)
plot(tune.teste,testd$Number_Of_Containers)
tune.test.reside<-tune.teste-testd$Number_Of_Containers
mean(tune.test.reside^2)
#polynomial kernel function
set.seed(123)
poly.tunee<-tune.svm(Number_Of_Containers~.,data = traind,kernal="polynomial",degree = c(3,4,5),coef0 = c(.1,.5,1,2,3,4))
best.polye<-poly.tunee$best.model
summary(poly.tunee)
poly.teste<-predict(best.polye,newdata=testd)
plot(poly.teste,testd$Number_Of_Containers)
poly.test.reside<-poly.teste-testd$Number_Of_Containers
mean(poly.test.reside^2)
#radial kernel function
set.seed(123)
rbf.tune<-tune.svm(Number_Of_Containers~.,data=traind,kernel="radial",gamma = c(.1,.5,1,2,3,4))
summary(rbf.tune)
best.rbf<-rbf.tune$best.model
rbf.test<-predict(best.rbf,newdata=testd)
plot(rbf.test,testd$Number_Of_Containers)
rbf.test.resid<-rbf.test-testd$Number_Of_Containers
mean(rbf.test.resid^2)
#sigmoid kernel function
set.seed(123)
sigmoid.tune<-tune.svm(Number_Of_Containers~., data=traind,kernel="sigmoid",gamma = c(.1,.5,1,2,3,4),coef0 = c(.1,.5,1,2,3,4))
summary(sigmoid.tune)
best.sigmoid<-sigmoid.tune$best.model
sigmoid.test<-predict(best.sigmoid,newdata=testd)
plot(sigmoid.test,testd$Number_Of_Containers)
#predict
prognoza <- predict(svmodel, newdata=testd)
View(prognoza)
#plot the results
ylim <- c(min(testefs$Number_Of_Containers), max(testefs$Number_Of_Containers))
xlim <- c(min(testefs$Date), max(testefs$Date))
plot(testefs$Date,testefs$Number_Of_Containers, col="blue", ylim=ylim, xlim=xlim, type="l")
par(new=TRUE)
plot(p$base, p$prognoza, col="red", ylim=ylim, xlim=xlim)
#accuracy of predictions
accuracy(sigmoid.test, testd$Number_Of_Containers)
################################################################################################################
#mfs split per date
mfs<-read_xlsx("Desktop/FinalMFS.xlsx")
mfs$NumberOfContainers[mfs$NumberOfContainers==755] <- mean(mfs$NumberOfContainers)#replace outlier with mean
mfs$WeekDay<-as.factor(mfs$WeekDay)
range01 <- function(x){(x-min(x))/(max(x)-min(x))}
mfs$NumberOfContainers<-range01(mfs$NumberOfContainers)
testdenorm <- mapply(function(x, y) (x*(max(y)-min(y)))+min(y), tune.test, testnorm$NumberOfContainers)
new<-one_hot(as.data.table(mfs))
new$Year<-as.numeric(new$Year)
new$Month<-as.numeric(new$Month)
new$Day<-as.numeric(new$Day)
new$Week<-as.numeric(new$Week)
new$Year<-range01(new$Year)
new$Month<-range01(new$Month)
new$Day<-range01(new$Day)
new$Week<-range01(new$Week)
new$Knots<-range01(new$Knots)
new$NumberOfContainers<-range01(new$NumberOfContainers)
mfs$WeekDay<-as.factor(mfs$WeekDay)
new<-new%>%select(-NumberOfContainers,everything())
#split per date
testnorm<-new[526:750,]
View(new)
trainm<-new[1:525,]
testm<-new[526:750,]
trainm$Date<-NULL
testm$Date<-NULL
summary(testm)
#model
svmodelm <- svm(NumberOfContainers ~ .,data=trainm, type="eps-regression",kernel="radial",cost=1000, gamma=0.001)
#linear kernel
linear.tune<-tune.svm(NumberOfContainers~.,data=trainm,kernel="linear",cost = c(.001,.01,.1,1,5,10))
summary(linear.tune)
best.linear<-linear.tune$best.model
tune.test<-predict(best.linear,newdata=testm)
plot(tune.test,testm$NumberOfContainers)
tune.test.resid<-tune.test-testm$NumberOfContainers
mean(tune.test.resid^2)
#polynomial kernel
set.seed(123)
poly.tune<-tune.svm(NumberOfContainers~.,data = trainm,kernal="polynomial",degree = c(3,4,5),coef0 = c(.1,.5,1,2,3,4))
best.poly<-poly.tune$best.model
summary(poly.tune)
poly.test<-predict(best.poly,newdata=testm)
plot(poly.test,testm$NumberOfContainers)
poly.test.resid<-poly.test-testm$NumberOfContainers
mean(poly.test.resid^2)
#radial kernel
set.seed(123)
rbf.tunem<-tune.svm(NumberOfContainers~.,data=trainm,kernel="radial",gamma = c(.1,.5,1,2,3,4))
summary(rbf.tunem)
best.rbfm<-rbf.tunem$best.model
rbf.testm<-predict(best.rbfm,newdata=testm)
plot(rbf.testm,testm$NumberOfContainers)
rbf.test.residm<-rbf.testm-testm$NumberOfContainers
mean(rbf.test.residm^2)
#sigmoid kernel
set.seed(123)
sigmoid.tunem<-tune.svm(NumberOfContainers~., data=trainm,kernel="sigmoid",gamma = c(.1,.5,1,2,3,4),coef0 = c(.1,.5,1,2,3,4))
summary(sigmoid.tunem)
best.sigmoidm<-sigmoid.tunem$best.model
sigmoid.testm<-predict(best.sigmoidm,newdata=testm)
plot(sigmoid.testm,testm$NumberOfContainers)
#predict
prognozam <- predict(svmodelm, newdata=testm)
#accuracy of predictions
accuracy(sigmoid.testm, testm$NumberOfContainers)
################################################################################################################
#MFS split random
mfs<-read_xlsx("Desktop/FinalMFS.xlsx")
range01 <- function(x){(x-min(x))/(max(x)-min(x))}
mfs$NumberOfContainers<-range01(mfs$NumberOfContainers)
mfs$WeekDay<-as.factor(mfs$WeekDay)
new<-one_hot(as.data.table(mfs))
View(trainmf)
#split randomly
set.seed(222)
indmfs <- sample(2, nrow(new), replace = TRUE, prob = c(0.7, 0.3))
trainmf <- new[indmfs==1,]
testmf <- new[indmfs==2,]
trainmf<-trainmf%>%select(-NumberOfContainers,everything())
testmf<-testmf%>%select(-NumberOfContainers,everything())
trainmf$Year<-NULL
trainmf$Month<-NULL
trainmf$Day<-NULL
testmf$Year<-NULL
testmf$Month<-NULL
testmf$Day<-NULL
trainmf$Date<-NULL
testmf$Date<-NULL
trainmf$Date<-NULL
testmf$Date<-NULL
#model
svmodelmf <- svm(NumberOfContainers ~ .,data=trainmf, type="eps-regression",kernel="radial",cost=100, gamma=0.001)
#predict
prognozamf <- predict(svmodelmf, newdata=testmf)
#accuracy of predictions
accuracy(prognozamf, testmf$NumberOfContainers)
| /SupportVectorRegression.R | no_license | izanneodendaal/Predictive-Suite | R | false | false | 7,660 | r | #EFS split per date
efs<-read_xlsx("Desktop/FinalMerge.xlsx")
efs$IMO<-NULL
efs$Date<-as.factor(efs$Date)
efs$ID_Vessel<-as.factor(efs$ID_Vessel)
efs$Number_Of_Containers<-as.numeric(efs$Number_Of_Containers)
efs$Number_Of_Containers<-range01(efs$Number_Of_Containers)
efs$Time_UntilNext_StackDate<-range01(efs$Time_UntilNext_StackDate)
efs$Time_UntilNext_DockDate<-range01(efs$Time_UntilNext_DockDate)
efs$Wind_Knots<-range01(efs$Wind_Knots)
efs$Number_Of_Containers<-range01(efs$Number_Of_Containers)
efs$Next_StackOpen_Date<-NULL
efs$Next_Dock_Date<-NULL
trainefs <- efs[ which(efs$Date <'2020-03-14'), ]
View(efs)
library(Hmisc)
efs$Number_Of_Containers[efs$Number_Of_Containers==557] <- mean(efs$Number_Of_Containers)#replace outlier with mean
testefs<-efs[which(efs$Date>='2020-03-14'),]
View(testd)
traind<-traind%>%select(-Number_Of_Containers,everything())
testd<-testd%>%select(-Number_Of_Containers,everything())
library(e1071)
trainefs$IMO<-NULL
testefs$IMO<-NULL
trainefs$ID_Vessel<-as.factor(trainefs$ID_Vessel)
testefs$ID_Vessel<-as.factor(testefs$ID_Vessel)
trainefs$Date<-as.factor(trainefs$Date)
testefs$Date<-as.factor(testefs$Date)
trainefs$Next_StackOpen_Date<-NULL
trainefs$Next_Dock_Date<-NULL
testefs$Next_StackOpen_Date<-NULL
testefs$Next_Dock_Date<-NULL
set.seed(222)
#Split randomly
indd <- sample(2, nrow(efs), replace = TRUE, prob = c(0.7, 0.3))
View(traind)
traind <- efs[indd==1,]
testd <- efs[indd==2,]
traind$ID_Vessel<-as.factor(traind$ID_Vessel)
testd$ID_Vessel<-as.factor(testd$ID_Vessel)
traind$Next_StackOpen_Date<-NULL
testd$Next_StackOpen_Date<-NULL
traind$Next_Dock_Date<-NULL
testd$Next_Dock_Date<-NULL
trainefs$Wind_Knots<-NULL
testefs$Wind_Knots<-NULL
traind$Wind_Knots<-NULL
testd$Wind_Knots<-NULL
View(efs)
# train an svm model, consider further tuning parameters for lower MSE
svmodel <- train(Number_Of_Containers ~ .,data=traind, method = "svmRadial", cost=100, gamma=0.01,preProcess = c("center","scale"),tuneLength = 10)
#linear kernel function
linear.tunee<-tune.svm(Number_Of_Containers~.,data=traind,kernel="linear",cost = c(.001,.01,.1,1,5,10))
summary(linear.tunee)
best.lineare<-linear.tunee$best.model
tune.teste<-predict(best.lineare,newdata=testd)
plot(tune.teste,testd$Number_Of_Containers)
tune.test.reside<-tune.teste-testd$Number_Of_Containers
mean(tune.test.reside^2)
#polynomial kernel function
set.seed(123)
poly.tunee<-tune.svm(Number_Of_Containers~.,data = traind,kernal="polynomial",degree = c(3,4,5),coef0 = c(.1,.5,1,2,3,4))
best.polye<-poly.tunee$best.model
summary(poly.tunee)
poly.teste<-predict(best.polye,newdata=testd)
plot(poly.teste,testd$Number_Of_Containers)
poly.test.reside<-poly.teste-testd$Number_Of_Containers
mean(poly.test.reside^2)
#radial kernel function
set.seed(123)
rbf.tune<-tune.svm(Number_Of_Containers~.,data=traind,kernel="radial",gamma = c(.1,.5,1,2,3,4))
summary(rbf.tune)
best.rbf<-rbf.tune$best.model
rbf.test<-predict(best.rbf,newdata=testd)
plot(rbf.test,testd$Number_Of_Containers)
rbf.test.resid<-rbf.test-testd$Number_Of_Containers
mean(rbf.test.resid^2)
#sigmoid kernel function
set.seed(123)
sigmoid.tune<-tune.svm(Number_Of_Containers~., data=traind,kernel="sigmoid",gamma = c(.1,.5,1,2,3,4),coef0 = c(.1,.5,1,2,3,4))
summary(sigmoid.tune)
best.sigmoid<-sigmoid.tune$best.model
sigmoid.test<-predict(best.sigmoid,newdata=testd)
plot(sigmoid.test,testd$Number_Of_Containers)
#predict
prognoza <- predict(svmodel, newdata=testd)
View(prognoza)
#plot the results
ylim <- c(min(testefs$Number_Of_Containers), max(testefs$Number_Of_Containers))
xlim <- c(min(testefs$Date), max(testefs$Date))
plot(testefs$Date,testefs$Number_Of_Containers, col="blue", ylim=ylim, xlim=xlim, type="l")
par(new=TRUE)
plot(p$base, p$prognoza, col="red", ylim=ylim, xlim=xlim)
#accuracy of predictions
accuracy(sigmoid.test, testd$Number_Of_Containers)
################################################################################################################
#mfs split per date
mfs<-read_xlsx("Desktop/FinalMFS.xlsx")
mfs$NumberOfContainers[mfs$NumberOfContainers==755] <- mean(mfs$NumberOfContainers)#replace outlier with mean
mfs$WeekDay<-as.factor(mfs$WeekDay)
range01 <- function(x){(x-min(x))/(max(x)-min(x))}
mfs$NumberOfContainers<-range01(mfs$NumberOfContainers)
testdenorm <- mapply(function(x, y) (x*(max(y)-min(y)))+min(y), tune.test, testnorm$NumberOfContainers)
new<-one_hot(as.data.table(mfs))
new$Year<-as.numeric(new$Year)
new$Month<-as.numeric(new$Month)
new$Day<-as.numeric(new$Day)
new$Week<-as.numeric(new$Week)
new$Year<-range01(new$Year)
new$Month<-range01(new$Month)
new$Day<-range01(new$Day)
new$Week<-range01(new$Week)
new$Knots<-range01(new$Knots)
new$NumberOfContainers<-range01(new$NumberOfContainers)
mfs$WeekDay<-as.factor(mfs$WeekDay)
new<-new%>%select(-NumberOfContainers,everything())
#split per date
testnorm<-new[526:750,]
View(new)
trainm<-new[1:525,]
testm<-new[526:750,]
trainm$Date<-NULL
testm$Date<-NULL
summary(testm)
#model
svmodelm <- svm(NumberOfContainers ~ .,data=trainm, type="eps-regression",kernel="radial",cost=1000, gamma=0.001)
#linear kernel
linear.tune<-tune.svm(NumberOfContainers~.,data=trainm,kernel="linear",cost = c(.001,.01,.1,1,5,10))
summary(linear.tune)
best.linear<-linear.tune$best.model
tune.test<-predict(best.linear,newdata=testm)
plot(tune.test,testm$NumberOfContainers)
tune.test.resid<-tune.test-testm$NumberOfContainers
mean(tune.test.resid^2)
#polynomial kernel
set.seed(123)
poly.tune<-tune.svm(NumberOfContainers~.,data = trainm,kernal="polynomial",degree = c(3,4,5),coef0 = c(.1,.5,1,2,3,4))
best.poly<-poly.tune$best.model
summary(poly.tune)
poly.test<-predict(best.poly,newdata=testm)
plot(poly.test,testm$NumberOfContainers)
poly.test.resid<-poly.test-testm$NumberOfContainers
mean(poly.test.resid^2)
#radial kernel
set.seed(123)
rbf.tunem<-tune.svm(NumberOfContainers~.,data=trainm,kernel="radial",gamma = c(.1,.5,1,2,3,4))
summary(rbf.tunem)
best.rbfm<-rbf.tunem$best.model
rbf.testm<-predict(best.rbfm,newdata=testm)
plot(rbf.testm,testm$NumberOfContainers)
rbf.test.residm<-rbf.testm-testm$NumberOfContainers
mean(rbf.test.residm^2)
#sigmoid kernel
set.seed(123)
sigmoid.tunem<-tune.svm(NumberOfContainers~., data=trainm,kernel="sigmoid",gamma = c(.1,.5,1,2,3,4),coef0 = c(.1,.5,1,2,3,4))
summary(sigmoid.tunem)
best.sigmoidm<-sigmoid.tunem$best.model
sigmoid.testm<-predict(best.sigmoidm,newdata=testm)
plot(sigmoid.testm,testm$NumberOfContainers)
#predict
prognozam <- predict(svmodelm, newdata=testm)
#accuracy of predictions
accuracy(sigmoid.testm, testm$NumberOfContainers)
################################################################################################################
#MFS split random
mfs<-read_xlsx("Desktop/FinalMFS.xlsx")
range01 <- function(x){(x-min(x))/(max(x)-min(x))}
mfs$NumberOfContainers<-range01(mfs$NumberOfContainers)
mfs$WeekDay<-as.factor(mfs$WeekDay)
new<-one_hot(as.data.table(mfs))
View(trainmf)
#split randomly
set.seed(222)
indmfs <- sample(2, nrow(new), replace = TRUE, prob = c(0.7, 0.3))
trainmf <- new[indmfs==1,]
testmf <- new[indmfs==2,]
trainmf<-trainmf%>%select(-NumberOfContainers,everything())
testmf<-testmf%>%select(-NumberOfContainers,everything())
trainmf$Year<-NULL
trainmf$Month<-NULL
trainmf$Day<-NULL
testmf$Year<-NULL
testmf$Month<-NULL
testmf$Day<-NULL
trainmf$Date<-NULL
testmf$Date<-NULL
trainmf$Date<-NULL
testmf$Date<-NULL
#model
svmodelmf <- svm(NumberOfContainers ~ .,data=trainmf, type="eps-regression",kernel="radial",cost=100, gamma=0.001)
#predict
prognozamf <- predict(svmodelmf, newdata=testmf)
#accuracy of predictions
accuracy(prognozamf, testmf$NumberOfContainers)
|
# FILE: ERROR_MESSAGES.r
# AUTHOR: Zeljko Jovan Dzakula
# DATE: 7/21/2014
################################################################################
################################################################################
################################################################################
################################################################################
referenceError <- paste0( "Illegal argument - unknown reference: ", reference, ". ",
"Currently, the only supported valuse include", referenceChoices, "." );
#"Valid values are hg19, hg38, and BioNano" );
binSizeError <- paste0( "Illegal argument - unknown bin size choice: ", binSize, ". ",
"Currently, the only supported values include ", binSizeChoices, "." );
#"Valid values are hg19, hg38, and BioNano" );
| /scripts/Analysis/SV/CopyNumberProfiles/ERROR_MESSAGES.r | no_license | splaisan/BionanoThinkmate-NC | R | false | false | 916 | r | # FILE: ERROR_MESSAGES.r
# AUTHOR: Zeljko Jovan Dzakula
# DATE: 7/21/2014
################################################################################
################################################################################
################################################################################
################################################################################
referenceError <- paste0( "Illegal argument - unknown reference: ", reference, ". ",
"Currently, the only supported valuse include", referenceChoices, "." );
#"Valid values are hg19, hg38, and BioNano" );
binSizeError <- paste0( "Illegal argument - unknown bin size choice: ", binSize, ". ",
"Currently, the only supported values include ", binSizeChoices, "." );
#"Valid values are hg19, hg38, and BioNano" );
|
\name{setestimator}
\alias{setestimator}
\alias{setoptimizer}
\alias{usecpp}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Convenience functions
}
\description{
These functions can be used to change some estimator options.
}
\usage{
setestimator(x, estimator)
setoptimizer(x, optimizer = c("default","nlminb","ucminf","cpp_L-BFGS-B",
"cpp_BFGS","cpp_CG","cpp_SANN","cpp_Nelder-Mead"))
usecpp(x, use = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
A \code{psychonetrics} model.
}
\item{estimator}{
A string indicating the estimator to be used
}
\item{optimizer}{
The optimizer to be used. Can be one of \code{"nlminb"} (the default R \code{nlminb} function), \code{"ucminf"} (from the \code{optimr} package), and C++ based optimizers \code{"cpp_L-BFGS-B"}, \code{"cpp_BFGS"}, \code{"cpp_CG"}, \code{"cpp_SANN"}, and \code{"cpp_Nelder-Mead"}. The C++ optimizers are faster but slightly less stable. Defaults to \code{"nlminb"}.
}
\item{use}{
Logical indicating if C++ should be used (currently only used in FIML)
}
}
\value{
An object of the class psychonetrics (\link{psychonetrics-class})
}
\author{
Sacha Epskamp
}
| /psychonetrics/man/convenience.Rd | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 1,203 | rd | \name{setestimator}
\alias{setestimator}
\alias{setoptimizer}
\alias{usecpp}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Convenience functions
}
\description{
These functions can be used to change some estimator options.
}
\usage{
setestimator(x, estimator)
setoptimizer(x, optimizer = c("default","nlminb","ucminf","cpp_L-BFGS-B",
"cpp_BFGS","cpp_CG","cpp_SANN","cpp_Nelder-Mead"))
usecpp(x, use = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
A \code{psychonetrics} model.
}
\item{estimator}{
A string indicating the estimator to be used
}
\item{optimizer}{
The optimizer to be used. Can be one of \code{"nlminb"} (the default R \code{nlminb} function), \code{"ucminf"} (from the \code{optimr} package), and C++ based optimizers \code{"cpp_L-BFGS-B"}, \code{"cpp_BFGS"}, \code{"cpp_CG"}, \code{"cpp_SANN"}, and \code{"cpp_Nelder-Mead"}. The C++ optimizers are faster but slightly less stable. Defaults to \code{"nlminb"}.
}
\item{use}{
Logical indicating if C++ should be used (currently only used in FIML)
}
}
\value{
An object of the class psychonetrics (\link{psychonetrics-class})
}
\author{
Sacha Epskamp
}
|
library(glmnet)
mydata = read.table("./TrainingSet/AvgRank/bone.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.3,family="gaussian",standardize=TRUE)
sink('./Model/EN/AvgRank/bone/bone_043.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/AvgRank/bone/bone_043.R | no_license | leon1003/QSMART | R | false | false | 345 | r | library(glmnet)
mydata = read.table("./TrainingSet/AvgRank/bone.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.3,family="gaussian",standardize=TRUE)
sink('./Model/EN/AvgRank/bone/bone_043.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatial_visuals.R
\name{spatDimPlot2D}
\alias{spatDimPlot2D}
\title{spatDimPlot2D}
\usage{
spatDimPlot2D(
gobject,
plot_alignment = c("vertical", "horizontal"),
dim_reduction_to_use = "umap",
dim_reduction_name = "umap",
dim1_to_use = 1,
dim2_to_use = 2,
sdimx = "sdimx",
sdimy = "sdimy",
spat_enr_names = NULL,
cell_color = NULL,
color_as_factor = T,
cell_color_code = NULL,
cell_color_gradient = c("blue", "white", "red"),
gradient_midpoint = NULL,
gradient_limits = NULL,
select_cell_groups = NULL,
select_cells = NULL,
dim_point_size = 1,
dim_point_border_col = "black",
dim_point_border_stroke = 0.1,
spat_point_size = 1,
spat_point_border_col = "black",
spat_point_border_stroke = 0.1,
dim_show_cluster_center = F,
dim_show_center_label = T,
dim_center_point_size = 4,
dim_center_point_border_col = "black",
dim_center_point_border_stroke = 0.1,
dim_label_size = 4,
dim_label_fontface = "bold",
spat_show_cluster_center = F,
spat_show_center_label = F,
spat_center_point_size = 4,
spat_label_size = 4,
spat_label_fontface = "bold",
show_NN_network = F,
nn_network_to_use = "sNN",
network_name = "sNN.pca",
nn_network_alpha = 0.05,
show_spatial_network = F,
spat_network_name = "spatial_network",
spat_network_color = "blue",
spat_network_alpha = 0.5,
show_spatial_grid = F,
spat_grid_name = "spatial_grid",
spat_grid_color = "blue",
show_other_cells = T,
other_cell_color = "lightgrey",
dim_other_point_size = 1,
spat_other_point_size = 1,
spat_other_cells_alpha = 0.5,
dim_show_legend = F,
spat_show_legend = F,
legend_text = 8,
axis_text = 8,
axis_title = 8,
show_plot = NA,
return_plot = NA,
save_plot = NA,
save_param = list(),
default_save_name = "spatDimPlot2D"
)
}
\arguments{
\item{gobject}{giotto object}
\item{plot_alignment}{direction to align plot}
\item{dim_reduction_to_use}{dimension reduction to use}
\item{dim_reduction_name}{dimension reduction name}
\item{dim1_to_use}{dimension to use on x-axis}
\item{dim2_to_use}{dimension to use on y-axis}
\item{sdimx}{= spatial dimension to use on x-axis}
\item{sdimy}{= spatial dimension to use on y-axis}
\item{spat_enr_names}{names of spatial enrichment results to include}
\item{cell_color}{color for cells (see details)}
\item{color_as_factor}{convert color column to factor}
\item{cell_color_code}{named vector with colors}
\item{cell_color_gradient}{vector with 3 colors for numeric data}
\item{gradient_midpoint}{midpoint for color gradient}
\item{gradient_limits}{vector with lower and upper limits}
\item{select_cell_groups}{select subset of cells/clusters based on cell_color parameter}
\item{select_cells}{select subset of cells based on cell IDs}
\item{dim_point_size}{size of points in dim. reduction space}
\item{dim_point_border_col}{border color of points in dim. reduction space}
\item{dim_point_border_stroke}{border stroke of points in dim. reduction space}
\item{spat_point_size}{size of spatial points}
\item{spat_point_border_col}{border color of spatial points}
\item{spat_point_border_stroke}{border stroke of spatial points}
\item{dim_show_cluster_center}{show the center of each cluster}
\item{dim_show_center_label}{provide a label for each cluster}
\item{dim_center_point_size}{size of the center point}
\item{dim_center_point_border_col}{border color of center point}
\item{dim_center_point_border_stroke}{stroke size of center point}
\item{dim_label_size}{size of the center label}
\item{dim_label_fontface}{font of the center label}
\item{spat_show_cluster_center}{show the center of each cluster}
\item{spat_show_center_label}{provide a label for each cluster}
\item{spat_center_point_size}{size of the center point}
\item{spat_label_size}{size of the center label}
\item{spat_label_fontface}{font of the center label}
\item{show_NN_network}{show underlying NN network}
\item{nn_network_to_use}{type of NN network to use (kNN vs sNN)}
\item{network_name}{name of NN network to use, if show_NN_network = TRUE}
\item{nn_network_alpha}{column to use for alpha of the edges}
\item{show_spatial_network}{show spatial network}
\item{spat_network_name}{name of spatial network to use}
\item{spat_network_color}{color of spatial network}
\item{show_spatial_grid}{show spatial grid}
\item{spat_grid_name}{name of spatial grid to use}
\item{spat_grid_color}{color of spatial grid}
\item{show_other_cells}{display not selected cells}
\item{other_cell_color}{color of not selected cells}
\item{dim_other_point_size}{size of not selected dim cells}
\item{spat_other_point_size}{size of not selected spat cells}
\item{spat_other_cells_alpha}{alpha of not selected spat cells}
\item{dim_show_legend}{show legend of dimension reduction plot}
\item{spat_show_legend}{show legend of spatial plot}
\item{legend_text}{size of legend text}
\item{axis_text}{size of axis text}
\item{axis_title}{size of axis title}
\item{show_plot}{show plot}
\item{return_plot}{return ggplot object}
\item{save_plot}{directly save the plot [boolean]}
\item{save_param}{list of saving parameters from \code{\link{all_plots_save_function}}}
\item{default_save_name}{default save name for saving, don't change, change save_name in save_param}
}
\value{
ggplot
}
\description{
Visualize cells according to spatial AND dimension reduction coordinates 2D
}
\details{
Description of parameters.
}
\examples{
spatDimPlot2D(gobject)
}
\seealso{
\code{\link{spatDimPlot3D}}
}
| /man/spatDimPlot2D.Rd | permissive | zorrodong/Giotto | R | false | true | 5,588 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spatial_visuals.R
\name{spatDimPlot2D}
\alias{spatDimPlot2D}
\title{spatDimPlot2D}
\usage{
spatDimPlot2D(
gobject,
plot_alignment = c("vertical", "horizontal"),
dim_reduction_to_use = "umap",
dim_reduction_name = "umap",
dim1_to_use = 1,
dim2_to_use = 2,
sdimx = "sdimx",
sdimy = "sdimy",
spat_enr_names = NULL,
cell_color = NULL,
color_as_factor = T,
cell_color_code = NULL,
cell_color_gradient = c("blue", "white", "red"),
gradient_midpoint = NULL,
gradient_limits = NULL,
select_cell_groups = NULL,
select_cells = NULL,
dim_point_size = 1,
dim_point_border_col = "black",
dim_point_border_stroke = 0.1,
spat_point_size = 1,
spat_point_border_col = "black",
spat_point_border_stroke = 0.1,
dim_show_cluster_center = F,
dim_show_center_label = T,
dim_center_point_size = 4,
dim_center_point_border_col = "black",
dim_center_point_border_stroke = 0.1,
dim_label_size = 4,
dim_label_fontface = "bold",
spat_show_cluster_center = F,
spat_show_center_label = F,
spat_center_point_size = 4,
spat_label_size = 4,
spat_label_fontface = "bold",
show_NN_network = F,
nn_network_to_use = "sNN",
network_name = "sNN.pca",
nn_network_alpha = 0.05,
show_spatial_network = F,
spat_network_name = "spatial_network",
spat_network_color = "blue",
spat_network_alpha = 0.5,
show_spatial_grid = F,
spat_grid_name = "spatial_grid",
spat_grid_color = "blue",
show_other_cells = T,
other_cell_color = "lightgrey",
dim_other_point_size = 1,
spat_other_point_size = 1,
spat_other_cells_alpha = 0.5,
dim_show_legend = F,
spat_show_legend = F,
legend_text = 8,
axis_text = 8,
axis_title = 8,
show_plot = NA,
return_plot = NA,
save_plot = NA,
save_param = list(),
default_save_name = "spatDimPlot2D"
)
}
\arguments{
\item{gobject}{giotto object}
\item{plot_alignment}{direction to align plot}
\item{dim_reduction_to_use}{dimension reduction to use}
\item{dim_reduction_name}{dimension reduction name}
\item{dim1_to_use}{dimension to use on x-axis}
\item{dim2_to_use}{dimension to use on y-axis}
\item{sdimx}{= spatial dimension to use on x-axis}
\item{sdimy}{= spatial dimension to use on y-axis}
\item{spat_enr_names}{names of spatial enrichment results to include}
\item{cell_color}{color for cells (see details)}
\item{color_as_factor}{convert color column to factor}
\item{cell_color_code}{named vector with colors}
\item{cell_color_gradient}{vector with 3 colors for numeric data}
\item{gradient_midpoint}{midpoint for color gradient}
\item{gradient_limits}{vector with lower and upper limits}
\item{select_cell_groups}{select subset of cells/clusters based on cell_color parameter}
\item{select_cells}{select subset of cells based on cell IDs}
\item{dim_point_size}{size of points in dim. reduction space}
\item{dim_point_border_col}{border color of points in dim. reduction space}
\item{dim_point_border_stroke}{border stroke of points in dim. reduction space}
\item{spat_point_size}{size of spatial points}
\item{spat_point_border_col}{border color of spatial points}
\item{spat_point_border_stroke}{border stroke of spatial points}
\item{dim_show_cluster_center}{show the center of each cluster}
\item{dim_show_center_label}{provide a label for each cluster}
\item{dim_center_point_size}{size of the center point}
\item{dim_center_point_border_col}{border color of center point}
\item{dim_center_point_border_stroke}{stroke size of center point}
\item{dim_label_size}{size of the center label}
\item{dim_label_fontface}{font of the center label}
\item{spat_show_cluster_center}{show the center of each cluster}
\item{spat_show_center_label}{provide a label for each cluster}
\item{spat_center_point_size}{size of the center point}
\item{spat_label_size}{size of the center label}
\item{spat_label_fontface}{font of the center label}
\item{show_NN_network}{show underlying NN network}
\item{nn_network_to_use}{type of NN network to use (kNN vs sNN)}
\item{network_name}{name of NN network to use, if show_NN_network = TRUE}
\item{nn_network_alpha}{column to use for alpha of the edges}
\item{show_spatial_network}{show spatial network}
\item{spat_network_name}{name of spatial network to use}
\item{spat_network_color}{color of spatial network}
\item{show_spatial_grid}{show spatial grid}
\item{spat_grid_name}{name of spatial grid to use}
\item{spat_grid_color}{color of spatial grid}
\item{show_other_cells}{display not selected cells}
\item{other_cell_color}{color of not selected cells}
\item{dim_other_point_size}{size of not selected dim cells}
\item{spat_other_point_size}{size of not selected spat cells}
\item{spat_other_cells_alpha}{alpha of not selected spat cells}
\item{dim_show_legend}{show legend of dimension reduction plot}
\item{spat_show_legend}{show legend of spatial plot}
\item{legend_text}{size of legend text}
\item{axis_text}{size of axis text}
\item{axis_title}{size of axis title}
\item{show_plot}{show plot}
\item{return_plot}{return ggplot object}
\item{save_plot}{directly save the plot [boolean]}
\item{save_param}{list of saving parameters from \code{\link{all_plots_save_function}}}
\item{default_save_name}{default save name for saving, don't change, change save_name in save_param}
}
\value{
ggplot
}
\description{
Visualize cells according to spatial AND dimension reduction coordinates 2D
}
\details{
Description of parameters.
}
\examples{
spatDimPlot2D(gobject)
}
\seealso{
\code{\link{spatDimPlot3D}}
}
|
## ----nomessages, echo = FALSE-------------------------------------------------
knitr::opts_chunk$set(
warning = FALSE,
message = FALSE,
fig.height = 5,
fig.width = 5
)
options(digits=4)
par(mar=c(3,3,1,1)+.1)
## ----echo=FALSE---------------------------------------------------------------
set.seed(1)
## -----------------------------------------------------------------------------
library(SimDesign)
# SimFunctions(comments=FALSE)
Design <- createDesign(N = c(10,20,30))
## -----------------------------------------------------------------------------
Generate <- function(condition, fixed_objects = NULL) {
ret <- with(condition, rnorm(N))
ret
}
Analyse <- function(condition, dat, fixed_objects = NULL) {
whc <- sample(c(0,1,2,3), 1, prob = c(.7, .20, .05, .05))
if(whc == 0){
ret <- mean(dat)
} else if(whc == 1){
ret <- t.test() # missing arguments
} else if(whc == 2){
ret <- t.test('invalid') # invalid arguments
} else if(whc == 3){
# throw error manually
stop('Manual error thrown')
}
# manual warnings
if(sample(c(TRUE, FALSE), 1, prob = c(.1, .9)))
warning('This warning happens rarely')
if(sample(c(TRUE, FALSE), 1, prob = c(.5, .5)))
warning('This warning happens much more often')
ret
}
Summarise <- function(condition, results, fixed_objects = NULL) {
ret <- c(bias = bias(results, 0))
ret
}
## ----include=FALSE------------------------------------------------------------
set.seed(1)
## -----------------------------------------------------------------------------
result <- runSimulation(Design, replications = 100,
generate=Generate, analyse=Analyse, summarise=Summarise)
## -----------------------------------------------------------------------------
print(result)
## -----------------------------------------------------------------------------
SimExtract(result, what = 'errors')
## ----eval=FALSE---------------------------------------------------------------
# runSimulation(..., debug = 'error-4')
## -----------------------------------------------------------------------------
seeds <- SimExtract(result, what = 'error_seeds')
head(seeds[,1:3])
## ----eval=FALSE---------------------------------------------------------------
# picked_seed <- seeds$Design_row_1.1..Error.in.t.test.default..invalid.....not.enough..x..observations.
#
# # debug analyse() for first row of Design object via debug='analyse-1'
# runSimulation(Design, replications = 100, load_seed=picked_seed, debug='analyse-1',
# generate=Generate, analyse=Analyse, summarise=Summarise)
| /inst/doc/Catch_errors.R | no_license | cran/SimDesign | R | false | false | 2,655 | r | ## ----nomessages, echo = FALSE-------------------------------------------------
knitr::opts_chunk$set(
warning = FALSE,
message = FALSE,
fig.height = 5,
fig.width = 5
)
options(digits=4)
par(mar=c(3,3,1,1)+.1)
## ----echo=FALSE---------------------------------------------------------------
set.seed(1)
## -----------------------------------------------------------------------------
library(SimDesign)
# SimFunctions(comments=FALSE)
Design <- createDesign(N = c(10,20,30))
## -----------------------------------------------------------------------------
Generate <- function(condition, fixed_objects = NULL) {
ret <- with(condition, rnorm(N))
ret
}
Analyse <- function(condition, dat, fixed_objects = NULL) {
whc <- sample(c(0,1,2,3), 1, prob = c(.7, .20, .05, .05))
if(whc == 0){
ret <- mean(dat)
} else if(whc == 1){
ret <- t.test() # missing arguments
} else if(whc == 2){
ret <- t.test('invalid') # invalid arguments
} else if(whc == 3){
# throw error manually
stop('Manual error thrown')
}
# manual warnings
if(sample(c(TRUE, FALSE), 1, prob = c(.1, .9)))
warning('This warning happens rarely')
if(sample(c(TRUE, FALSE), 1, prob = c(.5, .5)))
warning('This warning happens much more often')
ret
}
Summarise <- function(condition, results, fixed_objects = NULL) {
ret <- c(bias = bias(results, 0))
ret
}
## ----include=FALSE------------------------------------------------------------
set.seed(1)
## -----------------------------------------------------------------------------
result <- runSimulation(Design, replications = 100,
generate=Generate, analyse=Analyse, summarise=Summarise)
## -----------------------------------------------------------------------------
print(result)
## -----------------------------------------------------------------------------
SimExtract(result, what = 'errors')
## ----eval=FALSE---------------------------------------------------------------
# runSimulation(..., debug = 'error-4')
## -----------------------------------------------------------------------------
seeds <- SimExtract(result, what = 'error_seeds')
head(seeds[,1:3])
## ----eval=FALSE---------------------------------------------------------------
# picked_seed <- seeds$Design_row_1.1..Error.in.t.test.default..invalid.....not.enough..x..observations.
#
# # debug analyse() for first row of Design object via debug='analyse-1'
# runSimulation(Design, replications = 100, load_seed=picked_seed, debug='analyse-1',
# generate=Generate, analyse=Analyse, summarise=Summarise)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FuncSpectraModel.R
\docType{class}
\name{FuncSpectraModel}
\alias{FuncSpectraModel}
\alias{FuncSpectraModel-class}
\title{Definition of the [\code{\linkS4class{FuncSpectraModel}}] class}
\description{
This class defines a Gaussian model for the spectrum with mean
and variance varying along the time. It inherits
from [\code{\linkS4class{ICloHeModel}}].
}
\section{Slots}{
\describe{
\item{\code{mu}}{matrix with the sampled (100 times) mean spectrum.}
\item{\code{sigma2}}{vector with the variance of each spectrum}
}}
\examples{
getSlots("FuncSpectraModel")
}
\seealso{
[\code{\linkS4class{ICloHeModel}}] class
}
\author{
Serge Iovleff
}
| /CloHe/man/FuncSpectraModel-class.Rd | no_license | joblion/scisc | R | false | true | 722 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FuncSpectraModel.R
\docType{class}
\name{FuncSpectraModel}
\alias{FuncSpectraModel}
\alias{FuncSpectraModel-class}
\title{Definition of the [\code{\linkS4class{FuncSpectraModel}}] class}
\description{
This class defines a Gaussian model for the spectrum with mean
and variance varying along the time. It inherits
from [\code{\linkS4class{ICloHeModel}}].
}
\section{Slots}{
\describe{
\item{\code{mu}}{matrix with the sampled (100 times) mean spectrum.}
\item{\code{sigma2}}{vector with the variance of each spectrum}
}}
\examples{
getSlots("FuncSpectraModel")
}
\seealso{
[\code{\linkS4class{ICloHeModel}}] class
}
\author{
Serge Iovleff
}
|
g.sim <- simulate(network(16) ~ edges + mutual, coef=c(0, 0))
summary(g.sim ~ edges + mutual)
#################
n = 5;
# random network object:
mat = matrix(round(runif(n^2, max = 0.8, min = -0.4)), nrow = n)
net <- network(mat, directed = FALSE)
# adding attributes:
net%v%"race" = c("W","W", "B", "H", "O")
net%v%"gender" = c("F", "M", "F", "F", "M")
g.sim <- simulate(~edges+kstar(2), coef=c(-1.8,0.03),
basis=net, control=control.simulate(
MCMC.burnin=100000,
MCMC.interval=1000))
logit<-function(p)log(p/(1-p))
coef.form.f<-function(coef.diss,density) -log(((1+exp(coef.diss))/(density/(1-density)))-1)
# Construct a network with 20 nodes and 20 edges
n<-20
target.stats<-edges<-20
g0<-network.initialize(n,dir=TRUE)
g1<-san(g0~edges,target.stats=target.stats,verbose=TRUE)
S<-10
# To get an average duration of 10...
duration<-10
coef.diss<-logit(1-1/duration)
# To get an average of 20 edges...
dyads<-network.dyadcount(g1)
density<-edges/dyads
coef.form<-coef.form.f(coef.diss,density)
# ... coefficients.
print(coef.form)
print(coef.diss)
# Simulate a networkDynamic
dynsim<-simulate(g1,formation=~edges,dissolution=~edges,coef.form=coef.form,coef.diss=coef.diss,time.slices=S,verbose=TRUE)
# "Resume" the simulation.
dynsim2<-simulate(dynsim,time.slices=S,verbose=TRUE)
| /Simulation.R | no_license | smtwtfs/paperRelated | R | false | false | 1,378 | r | g.sim <- simulate(network(16) ~ edges + mutual, coef=c(0, 0))
summary(g.sim ~ edges + mutual)
#################
n = 5;
# random network object:
mat = matrix(round(runif(n^2, max = 0.8, min = -0.4)), nrow = n)
net <- network(mat, directed = FALSE)
# adding attributes:
net%v%"race" = c("W","W", "B", "H", "O")
net%v%"gender" = c("F", "M", "F", "F", "M")
g.sim <- simulate(~edges+kstar(2), coef=c(-1.8,0.03),
basis=net, control=control.simulate(
MCMC.burnin=100000,
MCMC.interval=1000))
logit<-function(p)log(p/(1-p))
coef.form.f<-function(coef.diss,density) -log(((1+exp(coef.diss))/(density/(1-density)))-1)
# Construct a network with 20 nodes and 20 edges
n<-20
target.stats<-edges<-20
g0<-network.initialize(n,dir=TRUE)
g1<-san(g0~edges,target.stats=target.stats,verbose=TRUE)
S<-10
# To get an average duration of 10...
duration<-10
coef.diss<-logit(1-1/duration)
# To get an average of 20 edges...
dyads<-network.dyadcount(g1)
density<-edges/dyads
coef.form<-coef.form.f(coef.diss,density)
# ... coefficients.
print(coef.form)
print(coef.diss)
# Simulate a networkDynamic
dynsim<-simulate(g1,formation=~edges,dissolution=~edges,coef.form=coef.form,coef.diss=coef.diss,time.slices=S,verbose=TRUE)
# "Resume" the simulation.
dynsim2<-simulate(dynsim,time.slices=S,verbose=TRUE)
|
/KGE_boxplot.R | no_license | ElinLangsholt/Rskript | R | false | false | 2,286 | r | ||
# gifmaker.org
# location of images
rfolder <- "D:/s2/mci_resample20"
safe_folder <- "D:/s2/raw"
mci_files <- list.files(rfolder, pattern = ".data")
mci_files_jlake <- unique(grep("17SPV", mci_files, value = TRUE)) # 9
mci_files_utah <- unique(grep("12TVK", mci_files, value = TRUE)) # none
jlake_dir <- "O:/PRIV/NERL_ORD_CYAN/Salls_working/Presentations/AGU2018/data/jordanlake"
#file.copy(from = file.path(rfolder, mci_files_jord), to = jlake_dir, recursive = TRUE) # too slow!!
library(raster)
imglist <- list()
for (i in seq_along(mci_files_jlake)) {
rast <- raster(file.path(rfolder, mci_files_jlake[i], "MCI.img"))
imglist <- c(imglist, rast)
plot(rast)
}
# ---------------------------------------------
library(rgdal)
library(raster)
# set image folder paths
#img_folder <- "C:/Users/WSalls/Desktop/s2_imgs_agu/mci/jordan" #jordan or utah
#img_folder <- "/Users/wilsonsalls/Desktop/EPA/Presentations/AGU2018/data/mci/jordan"
#img_folder <- "O:/PRIV/NERL_ORD_CYAN/Salls_working/Presentations/AGU2018/data/mci/jordan"
img_folder <- "O:/PRIV/NERL_ORD_CYAN/Sentinel2/Images/jordan_imagery"
# specify MCI img location
#imgs <- list.files(img_folder, pattern = ".data")
#imgs <- c("mci_resample20_S2B_MSIL1C_20180429T155859_N0206_R097_T17SPV_20180429T194054.data") # L1C
imgs <- c("mci_rayleigh_resample20_S2B_MSIL1C_20180429T155859_N0206_R097_T17SPV_20180429T194054.data") # BRR
lakename <- "jordan" # jordan OR utah
coeffs <- "ontario" # ontario OR erie
# load lake shp
#lakes <- readOGR("O:/PRIV/NERL_ORD_CYAN/Salls_working/geospatial_general/resolvableLakes/NHD_NLA_shoredist", "nhd_nla_subset_shore_dist")
#lake_poly_raw <- lakes[which(lakes$COMID == 166755060), ] #166755060 for jordan; xx for utah
lake_poly_raw <- readOGR("O:/PRIV/NERL_ORD_CYAN/Sentinel2/Images/jordan_imagery", "JordanLake")
rast <- raster(file.path(img_folder, imgs[1], "MCI.img"))
lake_poly <- spTransform (lake_poly_raw, crs(rast))
##
#rast_out_dir <- file.path("/Users/wilsonsalls/Desktop/EPA/Presentations/AGU2018/data/mci_cropped/", lakename)
#rast_out_dir <- file.path("/Users/wilsonsalls/Desktop/EPA/Sentinel2/Images/mci_demo_paper")
rast_out_dir <- "O:/PRIV/NERL_ORD_CYAN/Sentinel2/Images/jordan_imagery"
# specify BRR img location (for baseline slope calc)
brr_dir <- "O:/PRIV/NERL_ORD_CYAN/Sentinel2/Images/jordan_imagery"
## clip, remove edges, convert to chlorophyll, save new rasters ---------------
for (i in seq_along(imgs)) {
idate <- substr(imgs[i], 36, 43) #substr(imgs[i], 27, 34)
print(sprintf("image %s of %s: %s", i, length(imgs), idate))
# load raster
mci <- raster(file.path(img_folder, imgs[i], "MCI.img"))
# clip to lake
mci_mask <- mask(mci, lake_poly) # MCI raster values
mci_crop <- crop(mci_mask, lake_poly) # MCI raster extent
# remove edges
mci_crop <- focal(mci_crop, matrix(c(0,0,0,0,1,0,0,0,0), nrow = 3))
# convert to chlorophyll
if (coeffs == "ontario") {
chl <- (mci_crop - (-0.0012)) / 0.0002 # ontario
} else if (coeffs == "erie") {
chl <- (mci_crop - (-0.0021)) / 0.0004 # erie
}
# remove negative chl / set to -1 for later use
values(chl)[which(values(chl) < 0)] <- -1
## remove sediment affected water
# load rasters
brr_folder <- file.path(brr_dir, sub("mci_", "", imgs[i]))
b4 <- raster(file.path(brr_folder, list.files(brr_folder, "*_B4.img")))
b6 <- raster(file.path(brr_folder, list.files(brr_folder, "*_B6.img")))
brr_brick <- brick(c(b4, b6))
brr_brick_mask <- mask(brr_brick, mask = lake_poly)
brr_brick_lake <- crop(brr_brick_mask, lake_poly)
# calculate slope
baseline_slope <- (brr_brick_lake[[2]] - brr_brick_lake[[1]]) / (740 - 655) * 10000 # expressed as 10^-4 nm-1
# apply sediment filter to image
chl[baseline_slope < -1.5] <- -1
#
# write raster
writeRaster(chl, file.path(rast_out_dir,
sprintf("chlorophyll_BRR_%s_%s_%s.png", coeffs, lakename, idate)), "GTiff")
}
## --------------------------------------------------------
### plot
# set location to save images
#setwd("/Users/wilsonsalls/Desktop/EPA/Presentations/AGU2018/imgs")
#setwd("/Users/wilsonsalls/Desktop/EPA/Sentinel2/Images/mci_demo_paper")
setwd(rast_out_dir)
# set location to read rasters from
chl_rasts <- list.files(rast_out_dir, pattern = ".tif")
## plot chlorophyll, save ---------------
# preset min and max
#minc <- NA
#maxc <- NA
library(viridis)
rast_crop <- chl
rast_crop[rast_crop < 0] <- NA
maxc <- max(values(chl), na.rm = TRUE)
hist(chl)
max.color.val <- 70.5
# plot
for (i in seq_along(chl_rasts)) {
idate <- substr(chl_rasts[i], nchar(chl_rasts[i]) - 11, nchar(chl_rasts[i]) - 4)
print(sprintf("image %s of %s: %s", i, length(chl_rasts), idate))
# read raster
rast_crop <- raster(file.path(rast_out_dir, chl_rasts[i]))
# plot
jpeg(sprintf("chl_%s_%s_%s.jpg", coeffs, lakename, idate), width = 1800, height = 3600, res = 300) #, width = 600, height = 1200, res = 300
plot(rast_crop,
#main = paste0(substr(idate, 1, 4), "-", substr(idate, 5, 6), "-", substr(idate, 7, 8)),
cex.main = 4,
xaxt = "n", yaxt = "n", box = FALSE, bty = "n",
#col = colorRampPalette(c("blue", "green", "yellow"))(255),
col = viridis(max.color.val),
breaks = c(seq(0, max.color.val, by = 1), maxc + 0.1),
legend = FALSE,
colNA = NA)
plot(lake_poly, add = TRUE)
dev.off()
# get min and max to improve plotting
minc <- min(minc, minValue(rast_crop), na.rm = T)
maxc <- max(maxc, maxValue(rast_crop), na.rm = T)
}
minc
maxc
# for legend
#rast_crop <- raster(file.path(rast_out_dir, chl_rasts[i]))
values(rast_crop)[values(rast_crop) > max.color.val] <- NA
jpeg(sprintf("chl_leg_%s_%s_%s.jpg", coeffs, lakename, idate), width = 750, height = 500, res = 300)
plot(rast_crop,
#main = paste0(substr(idate, 1, 4), "-", substr(idate, 5, 6), "-", substr(idate, 7, 8)),
cex.main = 4,
xaxt = "n", yaxt = "n", box = FALSE, bty = "n",
#col = colorRampPalette(c("blue", "green", "yellow"))(255),
col = viridis(max.color.val),
#breaks = c(seq(1, max.color.val, length.out = max.color.val), 120),
colNA = NA)
dev.off()
#
## plot trophic state, save ---------------
rast_crop <- chl
rast_crop[rast_crop < 0] <- NA
# plot
for (i in seq_along(chl_rasts)) {
idate <- substr(chl_rasts[i], 28, 35)
print(sprintf("image %s of %s: %s", i, length(chl_rasts), idate))
# read raster
#rast_crop <- raster(file.path(rast_out_dir, chl_rasts[i]))
# plot
jpeg(sprintf("trophic_%s_%s_%s.jpg", coeffs, lakename, idate), width = 1800, height = 3600, res = 300)
plot(rast_crop,
#main = paste0(substr(idate, 1, 4), "-", substr(idate, 5, 6), "-", substr(idate, 7, 8)),
cex.main = 4,
xaxt = "n", yaxt = "n", box = FALSE, bty = "n",
col = plasma(4),
breaks = c(0, 2, 7, 30, max.color.val),
legend = FALSE,
colNA = NA)
plot(lake_poly, add = TRUE)
dev.off()
}
# colors used
plasma(4)
#
# blank plot
rast_blank <- rast_crop
values(rast_blank) <- NA
jpeg("xblank_white.png", width = 600, height = 1200)
plot(rast_blank, colNA = NA, xaxt = "n", yaxt = "n", box = FALSE, bty = "n")
dev.off()
jpeg("xblank_black.png", width = 600, height = 1200)
plot(rast_blank, colNA = "black", xaxt = "n", yaxt = "n", box = FALSE, bty = "n")
dev.off()
## pie charts----------------------------
#setwd("/Users/wilsonsalls/Desktop/EPA/Presentations/AGU2018/imgs/pie")
rast_crop <- chl
clrs <- plasma(4)
#trophic_counts <- data.frame()
for (i in seq_along(chl_rasts)) {
idate <- substr(chl_rasts[i], 28, 35)
print(sprintf("image %s of %s: %s", i, length(chl_rasts), idate))
# read raster
rast_crop <- raster(file.path(rast_out_dir, chl_rasts[i]))
# remove NAs
rast_crop[rast_crop < 0] <- NA
vals <- values(rast_crop)[!is.na(values(rast_crop))]
oligotrophic <- sum(vals < 2)
mesotrophic <- sum(vals >= 2 & vals < 7)
eutrophic <- sum(vals >= 7 & vals < 30)
hypereutrophic <- sum(vals >= 30)
#total <- length(vals)
#trophic_counts <- rbind(trophic_counts, data.frame(oligotrophic, mesotrophic, eutrophic, hypereutrophic, total))
jpeg(sprintf("pie_%s_%s_%s.jpg", coeffs, lakename, idate), width = 600, height = 600)
pie(c(oligotrophic, mesotrophic, eutrophic, hypereutrophic),
clockwise = TRUE, init.angle = 90, labels=NA, col=clrs, main = sprintf("%s: %s", lakename, idate))
dev.off()
}
## pie charts, show NA ----------------------------
#setwd("/Users/wilsonsalls/Desktop/EPA/Presentations/AGU2018/imgs/pie")
rast_crop <- chl
clrs <- plasma(4)
#trophic_counts <- data.frame()
#rast_crop <- raster("chlorophyll_BRR_jordan_sed_NAneg1_20180429.tif")
#for (i in seq_along(chl_rasts)) {
idate <- substr(chl_rasts[i], 28, 35)
print(sprintf("image %s of %s: %s", i, length(chl_rasts), idate))
# read raster
#rast_crop <- raster(file.path(rast_out_dir, chl_rasts[i]))
# count NAs
na <- sum(values(rast_crop) == -1, na.rm = TRUE)
# remove NAs
vals <- values(rast_crop)[!is.na(values(rast_crop))]
oligotrophic <- sum(vals < 2 & vals >= 0)
mesotrophic <- sum(vals >= 2 & vals < 7)
eutrophic <- sum(vals >= 7 & vals < 30)
hypereutrophic <- sum(vals >= 30)
#total <- length(vals)
#trophic_counts <- rbind(trophic_counts, data.frame(oligotrophic, mesotrophic, eutrophic, hypereutrophic, total))
jpeg(sprintf("pie_%s_%s_NAs_%s.jpg", coeffs, lakename, idate), width = 1800, height = 1800, res = 300)
pie(c(oligotrophic, mesotrophic, eutrophic, hypereutrophic, na),
clockwise = TRUE, init.angle = 90, labels=NA, col= c(clrs, "white")) #, main = sprintf("%s: %s", lakename, idate)
dev.off()
#}
# where to change raster options
showMethods("plot")
getMethod("plot", c("Raster", "ANY"))
getAnywhere(".plotraster2")
getAnywhere(".rasterImagePlot")
args(raster:::.rasterImagePlot)
| /old/make_imgs.R | no_license | wbsalls/Sent2 | R | false | false | 9,921 | r | # gifmaker.org
# location of images
rfolder <- "D:/s2/mci_resample20"
safe_folder <- "D:/s2/raw"
mci_files <- list.files(rfolder, pattern = ".data")
mci_files_jlake <- unique(grep("17SPV", mci_files, value = TRUE)) # 9
mci_files_utah <- unique(grep("12TVK", mci_files, value = TRUE)) # none
jlake_dir <- "O:/PRIV/NERL_ORD_CYAN/Salls_working/Presentations/AGU2018/data/jordanlake"
#file.copy(from = file.path(rfolder, mci_files_jord), to = jlake_dir, recursive = TRUE) # too slow!!
library(raster)
imglist <- list()
for (i in seq_along(mci_files_jlake)) {
rast <- raster(file.path(rfolder, mci_files_jlake[i], "MCI.img"))
imglist <- c(imglist, rast)
plot(rast)
}
# ---------------------------------------------
library(rgdal)
library(raster)
# set image folder paths
#img_folder <- "C:/Users/WSalls/Desktop/s2_imgs_agu/mci/jordan" #jordan or utah
#img_folder <- "/Users/wilsonsalls/Desktop/EPA/Presentations/AGU2018/data/mci/jordan"
#img_folder <- "O:/PRIV/NERL_ORD_CYAN/Salls_working/Presentations/AGU2018/data/mci/jordan"
img_folder <- "O:/PRIV/NERL_ORD_CYAN/Sentinel2/Images/jordan_imagery"
# specify MCI img location
#imgs <- list.files(img_folder, pattern = ".data")
#imgs <- c("mci_resample20_S2B_MSIL1C_20180429T155859_N0206_R097_T17SPV_20180429T194054.data") # L1C
imgs <- c("mci_rayleigh_resample20_S2B_MSIL1C_20180429T155859_N0206_R097_T17SPV_20180429T194054.data") # BRR
lakename <- "jordan" # jordan OR utah
coeffs <- "ontario" # ontario OR erie
# load lake shp
#lakes <- readOGR("O:/PRIV/NERL_ORD_CYAN/Salls_working/geospatial_general/resolvableLakes/NHD_NLA_shoredist", "nhd_nla_subset_shore_dist")
#lake_poly_raw <- lakes[which(lakes$COMID == 166755060), ] #166755060 for jordan; xx for utah
lake_poly_raw <- readOGR("O:/PRIV/NERL_ORD_CYAN/Sentinel2/Images/jordan_imagery", "JordanLake")
rast <- raster(file.path(img_folder, imgs[1], "MCI.img"))
lake_poly <- spTransform (lake_poly_raw, crs(rast))
##
#rast_out_dir <- file.path("/Users/wilsonsalls/Desktop/EPA/Presentations/AGU2018/data/mci_cropped/", lakename)
#rast_out_dir <- file.path("/Users/wilsonsalls/Desktop/EPA/Sentinel2/Images/mci_demo_paper")
rast_out_dir <- "O:/PRIV/NERL_ORD_CYAN/Sentinel2/Images/jordan_imagery"
# specify BRR img location (for baseline slope calc)
brr_dir <- "O:/PRIV/NERL_ORD_CYAN/Sentinel2/Images/jordan_imagery"
## clip, remove edges, convert to chlorophyll, save new rasters ---------------
for (i in seq_along(imgs)) {
idate <- substr(imgs[i], 36, 43) #substr(imgs[i], 27, 34)
print(sprintf("image %s of %s: %s", i, length(imgs), idate))
# load raster
mci <- raster(file.path(img_folder, imgs[i], "MCI.img"))
# clip to lake
mci_mask <- mask(mci, lake_poly) # MCI raster values
mci_crop <- crop(mci_mask, lake_poly) # MCI raster extent
# remove edges
mci_crop <- focal(mci_crop, matrix(c(0,0,0,0,1,0,0,0,0), nrow = 3))
# convert to chlorophyll
if (coeffs == "ontario") {
chl <- (mci_crop - (-0.0012)) / 0.0002 # ontario
} else if (coeffs == "erie") {
chl <- (mci_crop - (-0.0021)) / 0.0004 # erie
}
# remove negative chl / set to -1 for later use
values(chl)[which(values(chl) < 0)] <- -1
## remove sediment affected water
# load rasters
brr_folder <- file.path(brr_dir, sub("mci_", "", imgs[i]))
b4 <- raster(file.path(brr_folder, list.files(brr_folder, "*_B4.img")))
b6 <- raster(file.path(brr_folder, list.files(brr_folder, "*_B6.img")))
brr_brick <- brick(c(b4, b6))
brr_brick_mask <- mask(brr_brick, mask = lake_poly)
brr_brick_lake <- crop(brr_brick_mask, lake_poly)
# calculate slope
baseline_slope <- (brr_brick_lake[[2]] - brr_brick_lake[[1]]) / (740 - 655) * 10000 # expressed as 10^-4 nm-1
# apply sediment filter to image
chl[baseline_slope < -1.5] <- -1
#
# write raster
writeRaster(chl, file.path(rast_out_dir,
sprintf("chlorophyll_BRR_%s_%s_%s.png", coeffs, lakename, idate)), "GTiff")
}
## --------------------------------------------------------
### plot
# set location to save images
#setwd("/Users/wilsonsalls/Desktop/EPA/Presentations/AGU2018/imgs")
#setwd("/Users/wilsonsalls/Desktop/EPA/Sentinel2/Images/mci_demo_paper")
setwd(rast_out_dir)
# set location to read rasters from
chl_rasts <- list.files(rast_out_dir, pattern = ".tif")
## plot chlorophyll, save ---------------
# preset min and max
#minc <- NA
#maxc <- NA
library(viridis)
rast_crop <- chl
rast_crop[rast_crop < 0] <- NA
maxc <- max(values(chl), na.rm = TRUE)
hist(chl)
max.color.val <- 70.5
# plot
for (i in seq_along(chl_rasts)) {
idate <- substr(chl_rasts[i], nchar(chl_rasts[i]) - 11, nchar(chl_rasts[i]) - 4)
print(sprintf("image %s of %s: %s", i, length(chl_rasts), idate))
# read raster
rast_crop <- raster(file.path(rast_out_dir, chl_rasts[i]))
# plot
jpeg(sprintf("chl_%s_%s_%s.jpg", coeffs, lakename, idate), width = 1800, height = 3600, res = 300) #, width = 600, height = 1200, res = 300
plot(rast_crop,
#main = paste0(substr(idate, 1, 4), "-", substr(idate, 5, 6), "-", substr(idate, 7, 8)),
cex.main = 4,
xaxt = "n", yaxt = "n", box = FALSE, bty = "n",
#col = colorRampPalette(c("blue", "green", "yellow"))(255),
col = viridis(max.color.val),
breaks = c(seq(0, max.color.val, by = 1), maxc + 0.1),
legend = FALSE,
colNA = NA)
plot(lake_poly, add = TRUE)
dev.off()
# get min and max to improve plotting
minc <- min(minc, minValue(rast_crop), na.rm = T)
maxc <- max(maxc, maxValue(rast_crop), na.rm = T)
}
minc
maxc
# for legend
#rast_crop <- raster(file.path(rast_out_dir, chl_rasts[i]))
values(rast_crop)[values(rast_crop) > max.color.val] <- NA
jpeg(sprintf("chl_leg_%s_%s_%s.jpg", coeffs, lakename, idate), width = 750, height = 500, res = 300)
plot(rast_crop,
#main = paste0(substr(idate, 1, 4), "-", substr(idate, 5, 6), "-", substr(idate, 7, 8)),
cex.main = 4,
xaxt = "n", yaxt = "n", box = FALSE, bty = "n",
#col = colorRampPalette(c("blue", "green", "yellow"))(255),
col = viridis(max.color.val),
#breaks = c(seq(1, max.color.val, length.out = max.color.val), 120),
colNA = NA)
dev.off()
#
## plot trophic state, save ---------------
rast_crop <- chl
rast_crop[rast_crop < 0] <- NA
# plot
for (i in seq_along(chl_rasts)) {
idate <- substr(chl_rasts[i], 28, 35)
print(sprintf("image %s of %s: %s", i, length(chl_rasts), idate))
# read raster
#rast_crop <- raster(file.path(rast_out_dir, chl_rasts[i]))
# plot
jpeg(sprintf("trophic_%s_%s_%s.jpg", coeffs, lakename, idate), width = 1800, height = 3600, res = 300)
plot(rast_crop,
#main = paste0(substr(idate, 1, 4), "-", substr(idate, 5, 6), "-", substr(idate, 7, 8)),
cex.main = 4,
xaxt = "n", yaxt = "n", box = FALSE, bty = "n",
col = plasma(4),
breaks = c(0, 2, 7, 30, max.color.val),
legend = FALSE,
colNA = NA)
plot(lake_poly, add = TRUE)
dev.off()
}
# colors used
plasma(4)
#
# blank plot
rast_blank <- rast_crop
values(rast_blank) <- NA
jpeg("xblank_white.png", width = 600, height = 1200)
plot(rast_blank, colNA = NA, xaxt = "n", yaxt = "n", box = FALSE, bty = "n")
dev.off()
jpeg("xblank_black.png", width = 600, height = 1200)
plot(rast_blank, colNA = "black", xaxt = "n", yaxt = "n", box = FALSE, bty = "n")
dev.off()
## pie charts----------------------------
#setwd("/Users/wilsonsalls/Desktop/EPA/Presentations/AGU2018/imgs/pie")
rast_crop <- chl
clrs <- plasma(4)
#trophic_counts <- data.frame()
for (i in seq_along(chl_rasts)) {
idate <- substr(chl_rasts[i], 28, 35)
print(sprintf("image %s of %s: %s", i, length(chl_rasts), idate))
# read raster
rast_crop <- raster(file.path(rast_out_dir, chl_rasts[i]))
# remove NAs
rast_crop[rast_crop < 0] <- NA
vals <- values(rast_crop)[!is.na(values(rast_crop))]
oligotrophic <- sum(vals < 2)
mesotrophic <- sum(vals >= 2 & vals < 7)
eutrophic <- sum(vals >= 7 & vals < 30)
hypereutrophic <- sum(vals >= 30)
#total <- length(vals)
#trophic_counts <- rbind(trophic_counts, data.frame(oligotrophic, mesotrophic, eutrophic, hypereutrophic, total))
jpeg(sprintf("pie_%s_%s_%s.jpg", coeffs, lakename, idate), width = 600, height = 600)
pie(c(oligotrophic, mesotrophic, eutrophic, hypereutrophic),
clockwise = TRUE, init.angle = 90, labels=NA, col=clrs, main = sprintf("%s: %s", lakename, idate))
dev.off()
}
## pie charts, show NA ----------------------------
#setwd("/Users/wilsonsalls/Desktop/EPA/Presentations/AGU2018/imgs/pie")
rast_crop <- chl
clrs <- plasma(4)
#trophic_counts <- data.frame()
#rast_crop <- raster("chlorophyll_BRR_jordan_sed_NAneg1_20180429.tif")
#for (i in seq_along(chl_rasts)) {
idate <- substr(chl_rasts[i], 28, 35)
print(sprintf("image %s of %s: %s", i, length(chl_rasts), idate))
# read raster
#rast_crop <- raster(file.path(rast_out_dir, chl_rasts[i]))
# count NAs
na <- sum(values(rast_crop) == -1, na.rm = TRUE)
# remove NAs
vals <- values(rast_crop)[!is.na(values(rast_crop))]
oligotrophic <- sum(vals < 2 & vals >= 0)
mesotrophic <- sum(vals >= 2 & vals < 7)
eutrophic <- sum(vals >= 7 & vals < 30)
hypereutrophic <- sum(vals >= 30)
#total <- length(vals)
#trophic_counts <- rbind(trophic_counts, data.frame(oligotrophic, mesotrophic, eutrophic, hypereutrophic, total))
jpeg(sprintf("pie_%s_%s_NAs_%s.jpg", coeffs, lakename, idate), width = 1800, height = 1800, res = 300)
pie(c(oligotrophic, mesotrophic, eutrophic, hypereutrophic, na),
clockwise = TRUE, init.angle = 90, labels=NA, col= c(clrs, "white")) #, main = sprintf("%s: %s", lakename, idate)
dev.off()
#}
# where to change raster options
showMethods("plot")
getMethod("plot", c("Raster", "ANY"))
getAnywhere(".plotraster2")
getAnywhere(".rasterImagePlot")
args(raster:::.rasterImagePlot)
|
ECGDelineatorValidation <- function(ECGData, reference.annotation.file, path="./", mnemonic = "N", ...){
match.window <- 0.15 * ECGData$samplingFreq # 0.15 s de ventana
file <- paste(path, reference.annotation.file, sep = "")
reference.annotations <- read.table(file)
algorithm.annotations <- getECGDataAnnotations(ECGData, mnemonic)
algorithm.annotations <- filterECGDataAnnotations( reference.annotations, algorithm.annotations )
reference.annotations.peaks <- reference.annotations$V2[which(reference.annotations$V3 == mnemonic)]
reference.annotations.onsets <- reference.annotations$V2[which(reference.annotations$V3 == mnemonic) - 1]
reference.annotations.ends <- reference.annotations$V2[which(reference.annotations$V3 == mnemonic) + 1]
mnemonics.onsets <- reference.annotations$V3[which(reference.annotations$V3 == mnemonic) - 1]
number.of.annotations <- length(reference.annotations.peaks)
validation <- createValidationList(number.of.annotations)
validation <- applyValidation( number.of.annotations, algorithm.annotations$peaks, reference.annotations.peaks, algorithm.annotations$onsets,
reference.annotations.onsets, algorithm.annotations$ends, reference.annotations.ends, mnemonics.onsets,
match.window, validation )
if( mnemonic == "N"){
ECGData$validation$qrs <- validation
}
if( mnemonic == "p"){
ECGData$validation$p <- validation
}
if( mnemonic == "t"){
ECGData$validation$t <- validation
}
return(ECGData)
}
getECGDataAnnotations <- function(ECGData, mnemonic){
algorithm.annotations <- list()
if( mnemonic == "N" ){
algorithm.annotations$peaks <- ECGData$annot$pos
algorithm.annotations$onsets <- ECGData$delineation$qrs.onset.positions
algorithm.annotations$ends <- ECGData$delineation$qrs.end.positions
}
if( mnemonic == "p" ){
algorithm.annotations$peaks <- ECGData$delineation$p.peak.positions
algorithm.annotations$onsets <- ECGData$delineation$p.onset
algorithm.annotations$ends <- ECGData$delineation$p.end
}
if( mnemonic == "t"){
algorithm.annotations$peaks <- ECGData$delineation$t.peak.positions
algorithm.annotations$onsets <- ECGData$delineation$t.onset
algorithm.annotations$ends <- ECGData$delineation$t.end
}
return(algorithm.annotations)
}
# Obtener la primera y última posicion del archivo anotado de referencia. Seleccionar las posiciones P válidas
filterECGDataAnnotations <- function( reference.annotations, algorithm.annotations ){
first.annotation <- reference.annotations$V2[1]
last.annotation <- reference.annotations$V2[length(reference.annotations$V2)]
valid.positions <- which( algorithm.annotations$peaks > first.annotation & algorithm.annotations$peaks < last.annotation )
algorithm.annotations$peaks <- algorithm.annotations$peaks[valid.positions]
algorithm.annotations$onsets <- algorithm.annotations$onsets[valid.positions]
algorithm.annotations$ends <- algorithm.annotations$ends[valid.positions]
return(algorithm.annotations)
}
applyValidation <- function(number.of.annotations, peak.positions, peak.positions.ref, onset.positions, onset.positions.ref,
end.positions, end.positions.ref, mnemonics.onsets, match.window, validation ){
pos <- 1
ref.pos <- 1
cont.validas <- 1
# Recorre todas las anotaciones del achivo archivo de referencia
while( ref.pos <= number.of.annotations){
peak.pos <- peak.positions[pos]
peak.pos.ref <- peak.positions.ref[ref.pos]
is.valid.mnemonic = mnemonics.onsets[ref.pos] == "("
if( is.valid.mnemonic ){ # Por si no existe, caso de la onda T
onset.pos <- onset.positions[pos]
onset.pos.ref <- onset.positions.ref[ref.pos]
}
end.pos <- end.positions[pos]
end.pos.ref <- end.positions.ref[ref.pos]
# Avanzar índice del pico menor
#msg <- paste("\nDebugging: \npeak.pos", peak.pos, "\nmatch.window", match.window, "\npeak.pos.ref", peak.pos.ref)
#cat(msg)
if( peak.pos + match.window > peak.pos.ref ){
ref.pos <- ref.pos + 1
}
if( peak.pos.ref + match.window > peak.pos ){
pos <- pos + 1
}
# Si los peak hacen match compara la delineacion
if( abs(peak.pos - peak.pos.ref) < match.window ){
if( is.valid.mnemonic ){ # Por si no existe, caso de la onda T
validation$onset.differences[cont.validas] <- onset.pos - onset.pos.ref # abs(onset.pos - onset.pos.ref)
}
validation$end.differences[cont.validas] <- end.pos - end.pos.ref #abs(end.pos - end.pos.ref)
validation$peak.differences[cont.validas] <- peak.pos - peak.pos.ref # abs(peak.pos - peak.pos.ref)
# Guardar posiciones de los picos (Para hacer pruebas)
validation$annot[cont.validas] <- peak.pos
validation$annot.ref[cont.validas] <- peak.pos.ref
if( is.valid.mnemonic ){ # Por si no existe, caso de la onda T
validation$annot.onset[cont.validas] <- onset.pos
validation$annot.onset.ref[cont.validas] <- onset.pos.ref
}
validation$annot.end[cont.validas] <- end.pos
validation$annot.end.ref[cont.validas] <- end.pos.ref
cont.validas <- cont.validas + 1
}
}
validation$number.of.matchs <- cont.validas - 1
validation$number.total <- number.of.annotations
return(validation)
}
createValidationList <- function(number.of.annotations){
validation <- list()
# Pruebas
validation$annot <- mat.or.vec(number.of.annotations, 1)
validation$annot.ref <- mat.or.vec(number.of.annotations, 1)
validation$annot.onset <- mat.or.vec(number.of.annotations, 1)
validation$annot.onset.ref <- mat.or.vec(number.of.annotations, 1)
validation$annot.end <- mat.or.vec(number.of.annotations, 1)
validation$annot.end.ref <- mat.or.vec(number.of.annotations, 1)
# Result arrays
validation$onset.differences <- mat.or.vec(number.of.annotations, 1)
validation$end.differences <- mat.or.vec(number.of.annotations, 1)
return(validation)
} | /RECG/R/ECGDelineatorValidation.R | no_license | milegroup/Recg | R | false | false | 6,073 | r | ECGDelineatorValidation <- function(ECGData, reference.annotation.file, path="./", mnemonic = "N", ...){
match.window <- 0.15 * ECGData$samplingFreq # 0.15 s de ventana
file <- paste(path, reference.annotation.file, sep = "")
reference.annotations <- read.table(file)
algorithm.annotations <- getECGDataAnnotations(ECGData, mnemonic)
algorithm.annotations <- filterECGDataAnnotations( reference.annotations, algorithm.annotations )
reference.annotations.peaks <- reference.annotations$V2[which(reference.annotations$V3 == mnemonic)]
reference.annotations.onsets <- reference.annotations$V2[which(reference.annotations$V3 == mnemonic) - 1]
reference.annotations.ends <- reference.annotations$V2[which(reference.annotations$V3 == mnemonic) + 1]
mnemonics.onsets <- reference.annotations$V3[which(reference.annotations$V3 == mnemonic) - 1]
number.of.annotations <- length(reference.annotations.peaks)
validation <- createValidationList(number.of.annotations)
validation <- applyValidation( number.of.annotations, algorithm.annotations$peaks, reference.annotations.peaks, algorithm.annotations$onsets,
reference.annotations.onsets, algorithm.annotations$ends, reference.annotations.ends, mnemonics.onsets,
match.window, validation )
if( mnemonic == "N"){
ECGData$validation$qrs <- validation
}
if( mnemonic == "p"){
ECGData$validation$p <- validation
}
if( mnemonic == "t"){
ECGData$validation$t <- validation
}
return(ECGData)
}
getECGDataAnnotations <- function(ECGData, mnemonic){
algorithm.annotations <- list()
if( mnemonic == "N" ){
algorithm.annotations$peaks <- ECGData$annot$pos
algorithm.annotations$onsets <- ECGData$delineation$qrs.onset.positions
algorithm.annotations$ends <- ECGData$delineation$qrs.end.positions
}
if( mnemonic == "p" ){
algorithm.annotations$peaks <- ECGData$delineation$p.peak.positions
algorithm.annotations$onsets <- ECGData$delineation$p.onset
algorithm.annotations$ends <- ECGData$delineation$p.end
}
if( mnemonic == "t"){
algorithm.annotations$peaks <- ECGData$delineation$t.peak.positions
algorithm.annotations$onsets <- ECGData$delineation$t.onset
algorithm.annotations$ends <- ECGData$delineation$t.end
}
return(algorithm.annotations)
}
# Obtener la primera y última posicion del archivo anotado de referencia. Seleccionar las posiciones P válidas
filterECGDataAnnotations <- function( reference.annotations, algorithm.annotations ){
first.annotation <- reference.annotations$V2[1]
last.annotation <- reference.annotations$V2[length(reference.annotations$V2)]
valid.positions <- which( algorithm.annotations$peaks > first.annotation & algorithm.annotations$peaks < last.annotation )
algorithm.annotations$peaks <- algorithm.annotations$peaks[valid.positions]
algorithm.annotations$onsets <- algorithm.annotations$onsets[valid.positions]
algorithm.annotations$ends <- algorithm.annotations$ends[valid.positions]
return(algorithm.annotations)
}
applyValidation <- function(number.of.annotations, peak.positions, peak.positions.ref, onset.positions, onset.positions.ref,
end.positions, end.positions.ref, mnemonics.onsets, match.window, validation ){
pos <- 1
ref.pos <- 1
cont.validas <- 1
# Recorre todas las anotaciones del achivo archivo de referencia
while( ref.pos <= number.of.annotations){
peak.pos <- peak.positions[pos]
peak.pos.ref <- peak.positions.ref[ref.pos]
is.valid.mnemonic = mnemonics.onsets[ref.pos] == "("
if( is.valid.mnemonic ){ # Por si no existe, caso de la onda T
onset.pos <- onset.positions[pos]
onset.pos.ref <- onset.positions.ref[ref.pos]
}
end.pos <- end.positions[pos]
end.pos.ref <- end.positions.ref[ref.pos]
# Avanzar índice del pico menor
#msg <- paste("\nDebugging: \npeak.pos", peak.pos, "\nmatch.window", match.window, "\npeak.pos.ref", peak.pos.ref)
#cat(msg)
if( peak.pos + match.window > peak.pos.ref ){
ref.pos <- ref.pos + 1
}
if( peak.pos.ref + match.window > peak.pos ){
pos <- pos + 1
}
# Si los peak hacen match compara la delineacion
if( abs(peak.pos - peak.pos.ref) < match.window ){
if( is.valid.mnemonic ){ # Por si no existe, caso de la onda T
validation$onset.differences[cont.validas] <- onset.pos - onset.pos.ref # abs(onset.pos - onset.pos.ref)
}
validation$end.differences[cont.validas] <- end.pos - end.pos.ref #abs(end.pos - end.pos.ref)
validation$peak.differences[cont.validas] <- peak.pos - peak.pos.ref # abs(peak.pos - peak.pos.ref)
# Guardar posiciones de los picos (Para hacer pruebas)
validation$annot[cont.validas] <- peak.pos
validation$annot.ref[cont.validas] <- peak.pos.ref
if( is.valid.mnemonic ){ # Por si no existe, caso de la onda T
validation$annot.onset[cont.validas] <- onset.pos
validation$annot.onset.ref[cont.validas] <- onset.pos.ref
}
validation$annot.end[cont.validas] <- end.pos
validation$annot.end.ref[cont.validas] <- end.pos.ref
cont.validas <- cont.validas + 1
}
}
validation$number.of.matchs <- cont.validas - 1
validation$number.total <- number.of.annotations
return(validation)
}
createValidationList <- function(number.of.annotations){
validation <- list()
# Pruebas
validation$annot <- mat.or.vec(number.of.annotations, 1)
validation$annot.ref <- mat.or.vec(number.of.annotations, 1)
validation$annot.onset <- mat.or.vec(number.of.annotations, 1)
validation$annot.onset.ref <- mat.or.vec(number.of.annotations, 1)
validation$annot.end <- mat.or.vec(number.of.annotations, 1)
validation$annot.end.ref <- mat.or.vec(number.of.annotations, 1)
# Result arrays
validation$onset.differences <- mat.or.vec(number.of.annotations, 1)
validation$end.differences <- mat.or.vec(number.of.annotations, 1)
return(validation)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zchunk_LA101.EIA_SEDS.R
\name{module_gcamusa_LA101.EIA_SEDS}
\alias{module_gcamusa_LA101.EIA_SEDS}
\title{module_gcamusa_LA101.EIA_SEDS}
\usage{
module_gcamusa_LA101.EIA_SEDS(command, ...)
}
\arguments{
\item{command}{API command to execute}
\item{...}{other optional parameters, depending on command}
}
\value{
Depends on \code{command}: either a vector of required inputs,
a vector of output names, or (if \code{command} is "MAKE") all
the generated outputs: \code{L101.EIA_use_all_Bbtu}, \code{L101.inEIA_EJ_state_S_F}. The corresponding file in the
original data system was \code{LA101.EIA_SEDS.R} (gcam-usa level1).
}
\description{
Produce two ouput tables from the EIA state energy database:
\itemize{
\item{L101.inEIA_EJ_state_S_F: Energy data by GCAM sector and fuel, state, and year; energy units in EJ, years from 1971-2010, includes only rows that have a defined sector and fuel}
\item{L101.EIA_use_all_Bbtu: Energy data by EIA sector and fuel code, GCAM sector and fuel, MSN, state, and year; energy units in Billion BTU, years from 1960-2011, includes all original data}
}
}
\details{
See above
}
\author{
AS April 2017
}
| /man/module_gcamusa_LA101.EIA_SEDS.Rd | permissive | Liyang-Guo/gcamdata | R | false | true | 1,216 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zchunk_LA101.EIA_SEDS.R
\name{module_gcamusa_LA101.EIA_SEDS}
\alias{module_gcamusa_LA101.EIA_SEDS}
\title{module_gcamusa_LA101.EIA_SEDS}
\usage{
module_gcamusa_LA101.EIA_SEDS(command, ...)
}
\arguments{
\item{command}{API command to execute}
\item{...}{other optional parameters, depending on command}
}
\value{
Depends on \code{command}: either a vector of required inputs,
a vector of output names, or (if \code{command} is "MAKE") all
the generated outputs: \code{L101.EIA_use_all_Bbtu}, \code{L101.inEIA_EJ_state_S_F}. The corresponding file in the
original data system was \code{LA101.EIA_SEDS.R} (gcam-usa level1).
}
\description{
Produce two ouput tables from the EIA state energy database:
\itemize{
\item{L101.inEIA_EJ_state_S_F: Energy data by GCAM sector and fuel, state, and year; energy units in EJ, years from 1971-2010, includes only rows that have a defined sector and fuel}
\item{L101.EIA_use_all_Bbtu: Energy data by EIA sector and fuel code, GCAM sector and fuel, MSN, state, and year; energy units in Billion BTU, years from 1960-2011, includes all original data}
}
}
\details{
See above
}
\author{
AS April 2017
}
|
testlist <- list(Beta = 0, CVLinf = -1.37672045511001e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615828929-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 487 | r | testlist <- list(Beta = 0, CVLinf = -1.37672045511001e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(DT)
ui <- fluidPage(
h3("t1"),
tableOutput("t1"),
hr(),
fluidRow(
column(9, h3("dt1"), dataTableOutput("dt1")),
column(3, h3("x4"), verbatimTextOutput("x4"))),
hr(),
selectInput(inputId = "selectColor1", label = "Please select a color for the plot", choices = colors(), selected = "black",multiple = FALSE),
selectInput(inputId = "selectColor2", label = "Please select a color for highlighted points", choices = colors(), selected = "black", multiple = FALSE),
fluidRow(
column(8, h3("dt2"), dataTableOutput("dt2")),
column(4, h3("p5"), plotOutput("p5")))
)
options(error = function() traceback(2))
server <- function(input, output, session) {
output$t1 <- renderTable(iris[1:10,], striped = TRUE, hover = TRUE)
output$dt1 <- renderDataTable(iris, options = list(pageLength = 5))
output$x4 <- renderPrint({
s = input$dt1_rows_selected
if (length(s)) {
cat('These rows were selected:\n\n')
cat(s, sep = ', ')
}
})
output$dt2 <- renderDataTable(iris,
options = list(pageLength = 5),
server = FALSE)
output$p5 <- renderPlot({
s <- input$dt2_rows_selected
plot(iris$Sepal.Length, iris$Sepal.Width, pch = 19, cex = 1, col = input$selectColor1)
if (length(s)) {
points(iris[s, c("Sepal.Length", "Sepal.Width"), drop = F],
pch = 19, cex = 1, col = input$selectColor2)
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /2018/Assignment/AY2018 FE8828 Assignment - Wu Siying/WuSiyingAssignment2/WuSiying_24R/app.R | no_license | leafyoung/fe8828 | R | false | false | 1,732 | r | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(DT)
ui <- fluidPage(
h3("t1"),
tableOutput("t1"),
hr(),
fluidRow(
column(9, h3("dt1"), dataTableOutput("dt1")),
column(3, h3("x4"), verbatimTextOutput("x4"))),
hr(),
selectInput(inputId = "selectColor1", label = "Please select a color for the plot", choices = colors(), selected = "black",multiple = FALSE),
selectInput(inputId = "selectColor2", label = "Please select a color for highlighted points", choices = colors(), selected = "black", multiple = FALSE),
fluidRow(
column(8, h3("dt2"), dataTableOutput("dt2")),
column(4, h3("p5"), plotOutput("p5")))
)
options(error = function() traceback(2))
server <- function(input, output, session) {
output$t1 <- renderTable(iris[1:10,], striped = TRUE, hover = TRUE)
output$dt1 <- renderDataTable(iris, options = list(pageLength = 5))
output$x4 <- renderPrint({
s = input$dt1_rows_selected
if (length(s)) {
cat('These rows were selected:\n\n')
cat(s, sep = ', ')
}
})
output$dt2 <- renderDataTable(iris,
options = list(pageLength = 5),
server = FALSE)
output$p5 <- renderPlot({
s <- input$dt2_rows_selected
plot(iris$Sepal.Length, iris$Sepal.Width, pch = 19, cex = 1, col = input$selectColor1)
if (length(s)) {
points(iris[s, c("Sepal.Length", "Sepal.Width"), drop = F],
pch = 19, cex = 1, col = input$selectColor2)
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
library(ggplot2)
ISSJ<-as.data.frame(Xall.100m)
sumdata=data.frame(value=apply(ISSJ,2,sum))
sumdata$key=rownames(sumdata)
ggplot(data=sumdata, aes(x=key, y=value)) +
geom_bar(colour="black", stat="identity")
| /Hierarchical Modeling code folder/Hierarchical Modeling projects/ISSJ plot of distances.R | no_license | tmclaren1/tmclaren1 | R | false | false | 224 | r | library(ggplot2)
ISSJ<-as.data.frame(Xall.100m)
sumdata=data.frame(value=apply(ISSJ,2,sum))
sumdata$key=rownames(sumdata)
ggplot(data=sumdata, aes(x=key, y=value)) +
geom_bar(colour="black", stat="identity")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/config.R
\name{configure}
\alias{configure}
\title{Library configuration tool.}
\usage{
configure(auth_token = NULL)
}
\arguments{
\item{auth_token}{data.world's API authentication token.}
}
\description{
Configuration is not persistent and must be performed for
every new R session.
}
\section{DO NOT SHARE YOUR AUTHENTICATION TOKEN}{
For your security, do not include your API authentication token in code that
is intended to be shared with others.
Call this function via console, always when possible.
If you must call it in code do not include the actual API token.
Instead, pass the token via a variable in .Renviron, and do not share
your .Renviron file. For example:
\code{
dwapi::configure(auth_token = Sys.getenv("DW_AUTH_TOKEN"))
}
}
\examples{
dwapi::configure(auth_token = "YOUR_API_TOKEN_HERE")
}
| /man/configure.Rd | permissive | datadotworld/dwapi-r | R | false | true | 896 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/config.R
\name{configure}
\alias{configure}
\title{Library configuration tool.}
\usage{
configure(auth_token = NULL)
}
\arguments{
\item{auth_token}{data.world's API authentication token.}
}
\description{
Configuration is not persistent and must be performed for
every new R session.
}
\section{DO NOT SHARE YOUR AUTHENTICATION TOKEN}{
For your security, do not include your API authentication token in code that
is intended to be shared with others.
Call this function via console, always when possible.
If you must call it in code do not include the actual API token.
Instead, pass the token via a variable in .Renviron, and do not share
your .Renviron file. For example:
\code{
dwapi::configure(auth_token = Sys.getenv("DW_AUTH_TOKEN"))
}
}
\examples{
dwapi::configure(auth_token = "YOUR_API_TOKEN_HERE")
}
|
library(Rlab)
## OS specific directories:
mac.os <- "/Users/li11/"
linux <- "~/"
windows <- "X:/"
##=============================================
# Three criteria
# aneuploidy: > 2.3c --> three populations
# mitotic: >1.7c --> two populations
# normal??
aneuThresh = 2.3
mitoThresh = 1.7
##=============================================
## Read in data
##=============================================
#root <- windows
root <- mac.os
source (paste (root, "myGit/mixturemodel/Scripts/cleaningFuncs.R", sep = ""))
rawFiles <- list.files (paste(root, "myGit/mixturemodel/data/dt_01232014/OSCC/",sep=""), pattern = "csv")
rawFiles
i = 3
i = 4
for (i in 1:length(rawFiles))
{
i=6
print (rawFiles[i])
#cat ("\n")
#}
fileName <- paste("myGit/mixturemodel/data/dt_01232014/OSCC/", rawFiles[i], sep ="")
##========================================================
# On a real D.I. value data
##=========================================================
f_IN <- paste (root, fileName, sep ="")
nameSplit <- strsplit(f_IN, "/")[[1]]
sampleName <- nameSplit[length(nameSplit)]
sampleName <- sub(".csv", "", sampleName)
sampleName
cleanedSample <- list("sample" = sampleName)
#cleanedSample
##============================
# read in the raw D.I. value
##============================
dt <- read.csv (f_IN)
## determine how many families are we dealing with
numOfFamily <- 1 # minimun one family
if (length(which(as.vector(dt$DNA_Index) > aneuThresh)) > 1)
{
numOfFamily = 3
}else if (length(which(as.vector(dt$DNA_Index) > mitoThresh)) > 1)
{
numOfFamily = 2
}
##===================================================================
get.den <- density(as.vector(dt$DNA_Index))
peaks <- peak.quick (get.den$x, get.den$y)
peaks
##===================================================
## Determine where to start the first population
## There could be more small peaks less than 1
## Try to get the first one peaks > 1 but < 1.2
##====================================================
index = 1
length(which(peaks < 1))
if (peaks[length(which(peaks<1)) + 1] < 1.2)
{
index = length(which(peaks<1)) + 1
}else { index = length(which(peaks<1)) }
index
##============================================
## clean starts here with first population
##============================================
dt.raw <- as.vector (dt$DNA_Index)
firstDT <- getPopWIndex (dt.raw, index)
## Save first population dt
FP_dt_primary <- firstDT + peaks[index]
#FP_mean <- mean(firstDT + peaks[index])
#FP_std <- sd(firstDT + peaks[index])
dt.cleaned <- cleanFirstPop(peaks[index], firstDT, dt.raw)
#plot(density(dt.cleaned))
#str(dt.cleaned)
##================================
## Extra cleaning if necessary
##================================
firstDT <- getFirstPop(dt.cleaned)
#plot(density(firstDT))
peaks <- peak.quick(density(dt.cleaned)$x, density(dt.cleaned)$y)
peaks
##=========================================
## Follow the same protocol, but just
## carry out one more cleaning cycle
## if there is any peaks less than 1.2
##===========================================
##Need to add the "cleaned back to population one"!!
if (peaks[1] < 1.2)
{
dt.another.clean <- cleanFirstPop(peaks[1], firstDT, dt.cleaned)
# plot(density(dt.another.clean))
dt.1pop.cleaned <- dt.another.clean
}else{
dt.1pop.cleaned <- dt.cleaned
}
FP_mean <- mean(FP_dt_primary)
FP_std <- sd(FP_dt_primary)
FP_count <- (length(dt.raw) - length(dt.1pop.cleaned))
FP <- list ("FP_mean" = FP_mean, "FP_std" = FP_std, "FP_count" = FP_count)
cleanedSample <- c(cleanedSample, FP)
#cleanedSample
##===========================================
## Here comes the cleaning for the
## second population and store the stats
##===========================================
num.of.DI.left <- length(dt.1pop.cleaned)
get.den <- density(dt.1pop.cleaned)
peaks <- peak.quick (get.den$x, get.den$y)
peaks
## Determine where to start the first population
index = 1
#which(peaks > 1.5)[1]
if (length(which(peaks < 1.5)) >=1 )
{
index <-which(peaks > 1.5)[1]
#secondDT <- getSecondPop(dt.1pop.cleaned)
secondDT <- getPopWIndex (dt.1pop.cleaned, index)
#plot(density(secondDT))
## Save first population stats
SP_dt_primary <- (secondDT + peaks[index])
# SP_mean <- mean(secondDT + peaks[index])
# SP_std <- sd(secondDT + peaks[index])
#plot(density(secondDT + peaks[index]))
secondDT.cleaned <- cleanFirstPop(peaks[index], secondDT, dt.1pop.cleaned)
#str(secondDT.cleaned)
#plot(density(secondDT.cleaned))
}
##==================================
## Need another round of cleaning
##==================================
get.den <- density(secondDT.cleaned)
peaks <- peak.quick (get.den$x, get.den$y)
peaks
## Determine where to start the first population
index = 0
third_round = 0
if (length(peaks) > 1 & length(which(peaks < 2)) >= 1)
{
index = which(peaks < 2) [length(which(peaks < 2))]
}
#stats (secondDT.cleaned)
# secondDT.1 <- getFirstPop(secondDT.cleaned)
#secondDT <- getSecondPop(dt.1pop.cleaned)
if (index >=1)
{
secondDT.1 <- getPopWIndex (secondDT.cleaned, index)
#plot(density(secondDT.1))
#plot(density(secondDT.1 + peaks[index]))
secondDT.2.cleaned <- cleanFirstPop(peaks[index], secondDT.1, secondDT.cleaned)
third_round = 1
#str(secondDT.2.cleaned)
#plot(density(secondDT.2.cleaned))
#stats (secondDT.2.cleaned)
}else{
secondDT.2.cleaned <- secondDT.cleaned
}
## third round??
if (third_round)
{
get.den <- density(secondDT.2.cleaned)
peaks <- peak.quick (get.den$x, get.den$y)
#peaks
index = 0
if (length(peaks) > 1 & length(which(peaks < 1.8)) >= 1)
{
index = which(peaks < 1.8) [length(which(peaks < 1.8))]
}
if (index > 0)
{
secondDT.2.sub <- getFirstPop(secondDT.2.cleaned)
secondDT.3.cleaned <- cleanFirstPop(peaks[1], secondDT.2.sub , secondDT.2.cleaned)
}else{
secondDT.3.cleaned <- secondDT.2.cleaned
}
# plot(density(secondDT.3.cleaned))
# stats(secondDT.3.cleaned)
#get.den <- density(secondDT.3.cleaned)
#peak.quick(get.den$x, get.den$y)
# length(secondDT.3.cleaned)
SP_count <- num.of.DI.left - length(secondDT.3.cleaned)
}
SP <- list ("SP_mean" = SP_mean, "SP_std" = SP_std, "SP_count" = SP_count)
cleanedSample <- c(cleanedSample, SP)
#cleanedSample
##=====================================
# aneuploidy population of interest
##=====================================
aneup.pop <- secondDT.3.cleaned
plot(density(aneup.pop))
stats(aneup.pop)
aneu <- list ("AneuLeft" = aneup.pop)
cleanedSample <- c(cleanedSample, aneu)
cleanedSample
#cleanedSample$sample
##==========================
## Saving the results
##==========================
storage.dir <- paste (root, "myGit/mixturemodel/cleanedData/OSCC/", sep = "")
file2save <- paste (storage.dir, "cleaned_", cleanedSample$sample, ".rda", sep="")
#file2save
save (cleanedSample, file = file2save)
}
| /mixturemodel/ScriptsDraft/density2DI_finalized_OSCC.R | no_license | ImageRecognitionMaster/myOCRI-iii | R | false | false | 7,138 | r | library(Rlab)
## OS specific directories:
mac.os <- "/Users/li11/"
linux <- "~/"
windows <- "X:/"
##=============================================
# Three criteria
# aneuploidy: > 2.3c --> three populations
# mitotic: >1.7c --> two populations
# normal??
aneuThresh = 2.3
mitoThresh = 1.7
##=============================================
## Read in data
##=============================================
#root <- windows
root <- mac.os
source (paste (root, "myGit/mixturemodel/Scripts/cleaningFuncs.R", sep = ""))
rawFiles <- list.files (paste(root, "myGit/mixturemodel/data/dt_01232014/OSCC/",sep=""), pattern = "csv")
rawFiles
i = 3
i = 4
for (i in 1:length(rawFiles))
{
i=6
print (rawFiles[i])
#cat ("\n")
#}
fileName <- paste("myGit/mixturemodel/data/dt_01232014/OSCC/", rawFiles[i], sep ="")
##========================================================
# On a real D.I. value data
##=========================================================
f_IN <- paste (root, fileName, sep ="")
nameSplit <- strsplit(f_IN, "/")[[1]]
sampleName <- nameSplit[length(nameSplit)]
sampleName <- sub(".csv", "", sampleName)
sampleName
cleanedSample <- list("sample" = sampleName)
#cleanedSample
##============================
# read in the raw D.I. value
##============================
dt <- read.csv (f_IN)
## determine how many families are we dealing with
numOfFamily <- 1 # minimun one family
if (length(which(as.vector(dt$DNA_Index) > aneuThresh)) > 1)
{
numOfFamily = 3
}else if (length(which(as.vector(dt$DNA_Index) > mitoThresh)) > 1)
{
numOfFamily = 2
}
##===================================================================
get.den <- density(as.vector(dt$DNA_Index))
peaks <- peak.quick (get.den$x, get.den$y)
peaks
##===================================================
## Determine where to start the first population
## There could be more small peaks less than 1
## Try to get the first one peaks > 1 but < 1.2
##====================================================
index = 1
length(which(peaks < 1))
if (peaks[length(which(peaks<1)) + 1] < 1.2)
{
index = length(which(peaks<1)) + 1
}else { index = length(which(peaks<1)) }
index
##============================================
## clean starts here with first population
##============================================
dt.raw <- as.vector (dt$DNA_Index)
firstDT <- getPopWIndex (dt.raw, index)
## Save first population dt
FP_dt_primary <- firstDT + peaks[index]
#FP_mean <- mean(firstDT + peaks[index])
#FP_std <- sd(firstDT + peaks[index])
dt.cleaned <- cleanFirstPop(peaks[index], firstDT, dt.raw)
#plot(density(dt.cleaned))
#str(dt.cleaned)
##================================
## Extra cleaning if necessary
##================================
firstDT <- getFirstPop(dt.cleaned)
#plot(density(firstDT))
peaks <- peak.quick(density(dt.cleaned)$x, density(dt.cleaned)$y)
peaks
##=========================================
## Follow the same protocol, but just
## carry out one more cleaning cycle
## if there is any peaks less than 1.2
##===========================================
##Need to add the "cleaned back to population one"!!
if (peaks[1] < 1.2)
{
dt.another.clean <- cleanFirstPop(peaks[1], firstDT, dt.cleaned)
# plot(density(dt.another.clean))
dt.1pop.cleaned <- dt.another.clean
}else{
dt.1pop.cleaned <- dt.cleaned
}
FP_mean <- mean(FP_dt_primary)
FP_std <- sd(FP_dt_primary)
FP_count <- (length(dt.raw) - length(dt.1pop.cleaned))
FP <- list ("FP_mean" = FP_mean, "FP_std" = FP_std, "FP_count" = FP_count)
cleanedSample <- c(cleanedSample, FP)
#cleanedSample
##===========================================
## Here comes the cleaning for the
## second population and store the stats
##===========================================
num.of.DI.left <- length(dt.1pop.cleaned)
get.den <- density(dt.1pop.cleaned)
peaks <- peak.quick (get.den$x, get.den$y)
peaks
## Determine where to start the first population
index = 1
#which(peaks > 1.5)[1]
if (length(which(peaks < 1.5)) >=1 )
{
index <-which(peaks > 1.5)[1]
#secondDT <- getSecondPop(dt.1pop.cleaned)
secondDT <- getPopWIndex (dt.1pop.cleaned, index)
#plot(density(secondDT))
## Save first population stats
SP_dt_primary <- (secondDT + peaks[index])
# SP_mean <- mean(secondDT + peaks[index])
# SP_std <- sd(secondDT + peaks[index])
#plot(density(secondDT + peaks[index]))
secondDT.cleaned <- cleanFirstPop(peaks[index], secondDT, dt.1pop.cleaned)
#str(secondDT.cleaned)
#plot(density(secondDT.cleaned))
}
##==================================
## Need another round of cleaning
##==================================
get.den <- density(secondDT.cleaned)
peaks <- peak.quick (get.den$x, get.den$y)
peaks
## Determine where to start the first population
index = 0
third_round = 0
if (length(peaks) > 1 & length(which(peaks < 2)) >= 1)
{
index = which(peaks < 2) [length(which(peaks < 2))]
}
#stats (secondDT.cleaned)
# secondDT.1 <- getFirstPop(secondDT.cleaned)
#secondDT <- getSecondPop(dt.1pop.cleaned)
if (index >=1)
{
secondDT.1 <- getPopWIndex (secondDT.cleaned, index)
#plot(density(secondDT.1))
#plot(density(secondDT.1 + peaks[index]))
secondDT.2.cleaned <- cleanFirstPop(peaks[index], secondDT.1, secondDT.cleaned)
third_round = 1
#str(secondDT.2.cleaned)
#plot(density(secondDT.2.cleaned))
#stats (secondDT.2.cleaned)
}else{
secondDT.2.cleaned <- secondDT.cleaned
}
## third round??
if (third_round)
{
get.den <- density(secondDT.2.cleaned)
peaks <- peak.quick (get.den$x, get.den$y)
#peaks
index = 0
if (length(peaks) > 1 & length(which(peaks < 1.8)) >= 1)
{
index = which(peaks < 1.8) [length(which(peaks < 1.8))]
}
if (index > 0)
{
secondDT.2.sub <- getFirstPop(secondDT.2.cleaned)
secondDT.3.cleaned <- cleanFirstPop(peaks[1], secondDT.2.sub , secondDT.2.cleaned)
}else{
secondDT.3.cleaned <- secondDT.2.cleaned
}
# plot(density(secondDT.3.cleaned))
# stats(secondDT.3.cleaned)
#get.den <- density(secondDT.3.cleaned)
#peak.quick(get.den$x, get.den$y)
# length(secondDT.3.cleaned)
SP_count <- num.of.DI.left - length(secondDT.3.cleaned)
}
SP <- list ("SP_mean" = SP_mean, "SP_std" = SP_std, "SP_count" = SP_count)
cleanedSample <- c(cleanedSample, SP)
#cleanedSample
##=====================================
# aneuploidy population of interest
##=====================================
aneup.pop <- secondDT.3.cleaned
plot(density(aneup.pop))
stats(aneup.pop)
aneu <- list ("AneuLeft" = aneup.pop)
cleanedSample <- c(cleanedSample, aneu)
cleanedSample
#cleanedSample$sample
##==========================
## Saving the results
##==========================
storage.dir <- paste (root, "myGit/mixturemodel/cleanedData/OSCC/", sep = "")
file2save <- paste (storage.dir, "cleaned_", cleanedSample$sample, ".rda", sep="")
#file2save
save (cleanedSample, file = file2save)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vis_pancan_value.R
\name{vis_toil_TvsN}
\alias{vis_toil_TvsN}
\title{Visualize Pan-cancer TPM (tumor (TCGA) vs Normal (TCGA & GTEx))}
\usage{
vis_toil_TvsN(
Gene = "TP53",
Mode = "Boxplot",
data_type = "mRNA",
Show.P.value = TRUE,
Show.P.label = TRUE,
Method = "wilcox.test",
values = c("#DF2020", "#DDDF21"),
TCGA.only = FALSE,
draw_quantiles = c(0.25, 0.5, 0.75),
trim = TRUE
)
}
\arguments{
\item{Gene}{a molecular identifier (e.g., "TP53") or a formula specifying
genomic signature (\code{"TP53 + 2 * KRAS - 1.3 * PTEN"}).}
\item{Mode}{"Boxplot" or "Violinplot" to represent data}
\item{data_type}{choose gene profile type,
including "mRNA", "transcript", "protein", "mutation", "cnv" (-2, -1, 0, 1, 2),
"cnv_gistic2", "methylation", "miRNA".}
\item{Show.P.value}{\code{TRUE} or \code{FALSE} whether to count P value}
\item{Show.P.label}{\code{TRUE} or \code{FALSE} present p value with number or label \code{*}, \verb{**}, \verb{***} and \verb{****}}
\item{Method}{default method is wilcox.test}
\item{values}{the color to fill tumor or normal}
\item{TCGA.only}{include samples only from TCGA dataset}
\item{draw_quantiles}{draw quantiles for violinplot}
\item{trim}{whether trim the violin}
}
\value{
a \code{ggplot} object
}
\description{
Visualize Pan-cancer TPM (tumor (TCGA) vs Normal (TCGA & GTEx))
}
\examples{
\dontrun{
p <- vis_toil_TvsN(Gene = "TP53", Mode = "Violinplot", Show.P.value = FALSE, Show.P.label = FALSE)
p <- vis_toil_TvsN(Gene = "TP53", Mode = "Boxplot", Show.P.value = FALSE, Show.P.label = FALSE)
}
}
| /man/vis_toil_TvsN.Rd | permissive | fei0810/UCSCXenaShiny | R | false | true | 1,639 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vis_pancan_value.R
\name{vis_toil_TvsN}
\alias{vis_toil_TvsN}
\title{Visualize Pan-cancer TPM (tumor (TCGA) vs Normal (TCGA & GTEx))}
\usage{
vis_toil_TvsN(
Gene = "TP53",
Mode = "Boxplot",
data_type = "mRNA",
Show.P.value = TRUE,
Show.P.label = TRUE,
Method = "wilcox.test",
values = c("#DF2020", "#DDDF21"),
TCGA.only = FALSE,
draw_quantiles = c(0.25, 0.5, 0.75),
trim = TRUE
)
}
\arguments{
\item{Gene}{a molecular identifier (e.g., "TP53") or a formula specifying
genomic signature (\code{"TP53 + 2 * KRAS - 1.3 * PTEN"}).}
\item{Mode}{"Boxplot" or "Violinplot" to represent data}
\item{data_type}{choose gene profile type,
including "mRNA", "transcript", "protein", "mutation", "cnv" (-2, -1, 0, 1, 2),
"cnv_gistic2", "methylation", "miRNA".}
\item{Show.P.value}{\code{TRUE} or \code{FALSE} whether to count P value}
\item{Show.P.label}{\code{TRUE} or \code{FALSE} present p value with number or label \code{*}, \verb{**}, \verb{***} and \verb{****}}
\item{Method}{default method is wilcox.test}
\item{values}{the color to fill tumor or normal}
\item{TCGA.only}{include samples only from TCGA dataset}
\item{draw_quantiles}{draw quantiles for violinplot}
\item{trim}{whether trim the violin}
}
\value{
a \code{ggplot} object
}
\description{
Visualize Pan-cancer TPM (tumor (TCGA) vs Normal (TCGA & GTEx))
}
\examples{
\dontrun{
p <- vis_toil_TvsN(Gene = "TP53", Mode = "Violinplot", Show.P.value = FALSE, Show.P.label = FALSE)
p <- vis_toil_TvsN(Gene = "TP53", Mode = "Boxplot", Show.P.value = FALSE, Show.P.label = FALSE)
}
}
|
modelInfo <- list(label = "Bagged FDA using gCV Pruning",
library = "earth",
type = c("Classification"),
parameters = data.frame(parameter = c('degree'),
class = c("numeric"),
label = c('Product Degree')),
grid = function(x, y, len = NULL, search = "grid") data.frame(degree = 1),
loop = NULL,
fit = function(x, y, wts, param, lev, last, classProbs, ...) {
dat <- if(is.data.frame(x)) x else as.data.frame(x)
dat$.outcome <- y
bagFDA(.outcome ~ .,
data = dat,
degree = param$degree,
weights = wts,
...)
},
tags = c("Multivariate Adaptive Regression Splines", "Ensemble Model",
"Implicit Feature Selection", "Bagging"),
predict = function(modelFit, newdata, submodels = NULL)
predict(modelFit , newdata),
prob = function(modelFit, newdata, submodels = NULL)
predict(modelFit, newdata, type= "probs"),
predictors = function(x, ...) {
fdaPreds <- function(x) {
code <- getModelInfo("earth", regex = FALSE)[[1]]$predictors
tmp <- predictors(x$terms)
out <- if(class(x$fit) == "earth") code(x$fit) else tmp
out
}
eachFit <- lapply(x$fit, fdaPreds)
unique(unlist(eachFit))
},
varImp = function(object, ...) {
allImp <- lapply(object$fit, varImp, ...)
impDF <- as.data.frame(allImp)
meanImp <- apply(impDF, 1, mean)
out <- data.frame(Overall = meanImp)
rownames(out) <- names(meanImp)
out
},
levels = function(x) x$levels,
tags = c("Multivariate Adaptive Regression Splines", "Ensemble Model",
"Implicit Feature Selection", "Bagging", "Accepts Case Weights"),
sort = function(x) x[order(x$degree),],
oob = function(x) apply(x$oob, 2, function(x) quantile(x, probs = .5)))
| /models/files/bagFDAGCV.R | no_license | JackStat/caret | R | false | false | 2,533 | r | modelInfo <- list(label = "Bagged FDA using gCV Pruning",
library = "earth",
type = c("Classification"),
parameters = data.frame(parameter = c('degree'),
class = c("numeric"),
label = c('Product Degree')),
grid = function(x, y, len = NULL, search = "grid") data.frame(degree = 1),
loop = NULL,
fit = function(x, y, wts, param, lev, last, classProbs, ...) {
dat <- if(is.data.frame(x)) x else as.data.frame(x)
dat$.outcome <- y
bagFDA(.outcome ~ .,
data = dat,
degree = param$degree,
weights = wts,
...)
},
tags = c("Multivariate Adaptive Regression Splines", "Ensemble Model",
"Implicit Feature Selection", "Bagging"),
predict = function(modelFit, newdata, submodels = NULL)
predict(modelFit , newdata),
prob = function(modelFit, newdata, submodels = NULL)
predict(modelFit, newdata, type= "probs"),
predictors = function(x, ...) {
fdaPreds <- function(x) {
code <- getModelInfo("earth", regex = FALSE)[[1]]$predictors
tmp <- predictors(x$terms)
out <- if(class(x$fit) == "earth") code(x$fit) else tmp
out
}
eachFit <- lapply(x$fit, fdaPreds)
unique(unlist(eachFit))
},
varImp = function(object, ...) {
allImp <- lapply(object$fit, varImp, ...)
impDF <- as.data.frame(allImp)
meanImp <- apply(impDF, 1, mean)
out <- data.frame(Overall = meanImp)
rownames(out) <- names(meanImp)
out
},
levels = function(x) x$levels,
tags = c("Multivariate Adaptive Regression Splines", "Ensemble Model",
"Implicit Feature Selection", "Bagging", "Accepts Case Weights"),
sort = function(x) x[order(x$degree),],
oob = function(x) apply(x$oob, 2, function(x) quantile(x, probs = .5)))
|
#daraz searcher and Scrapper
#required Packages
library(jsonlite)
library(httr)
library(shiny)
library(shinythemes)
library(DT)
library(stringi)
library(taRifx)
library(sqldf)
library(xml2)
library(rvest)
library(stringr)
#########################################################################
#########################InterFace#######################################
#########################################################################
shinyApp(
ui = fluidPage(
shinythemes::themeSelector(), # <--- Add this somewhere in the UI
sidebarPanel(
textInput("txt", "Enter Searching Terms:", placeholder = "type here"),
h4("E-Commerce Sites"),
checkboxInput("cbDARAZ", label = "DARAZ", value = TRUE),
checkboxInput("cbPICKABOO",label = "PICKABOO", value = TRUE),
radioButtons("radio",h4("ORDERS"),
c("Default" = "default",
"Rating->Review->Price" = "RaRePr",
"Review->Rating->Price" = "ReRaPr",
"Price->Review->Rating" = "PrReRa",
"Price->Rating->Review" = "PrRaRe"
)),
actionButton("action", "Search")
#actionButton("action2", "Button2", class = "btn-primary")
),
mainPanel(
tabsetPanel(
tabPanel("Results"),
fluidRow(
column(12,
dataTableOutput('table')
)
)
)
)
),
server = function(input, output, session) {
observeEvent(input$action, {
###############################DARAZ SCRAPPER############################################
#write key word to search
#validate( need(input$txt, 'Check at least one letter!') )
df=NULL
keyword = input$txt
if(input$cbDARAZ==TRUE || input$cbPICKABOO==TRUE){
if(input$cbDARAZ==TRUE){
#joining keyword with link to query
link = paste("https://www.daraz.com.bd/catalog/?q=",keyword,sep = )
#getting JSON embaded in html of the page
j = sub("[ ]+window.pageData\\((.+)\\)" , "\\1",
grep("window.pageData", readLines(link), value=TRUE),
perl=TRUE)
#removing Script tag
j = gsub('<script>window.pageData=|</script>','',j)
#parsing JSON
df = fromJSON(j)
#getting product list array as dataFrame
df= as.data.frame(df$mods$listItems)
#selecting only Specific Columns
##################################PRODUCT DESCRIPTION#########################
productDescription= df$description
pdsLength=length(productDescription)
for (i in 1:pdsLength) {
pd=stri_join_list(productDescription[i], sep = "", collapse = NULL)
if(length(pd)){
productDescription[i] =pd}
else{
productDescription[i]=" not found"
}
}
productDescription=unlist(productDescription)
################################################################################
productURL= df$productUrl
df =as.data.frame( cbind( ProductName= df$name,Source="DARAZ",originalPrice= df$originalPrice,price= df$price,ratingScore = df$ratingScore,review = df$review ,image = df$image))
#df$image<-paste0("<img src='",df$image,"' height='52'></img> ",sep="")
df$image <- paste0("<a title='",productDescription,"' target='_blank' href='",productURL,"'>","<img src='", df$image, "' style='height:60px'>","</a>")
# View(df$image)
#exporting to csv
#write.csv(ex,'C:\\Users\\MAHMUD\\Desktop\\MyData.csv', row.names = FALSE)
#View(df)
####################################REMOVE FACTOR####################
df=remove.factors(df)
###########################################################
###################################DATA MANUPULATION#############################################
#df$originalPrice[is.na(df$originalPrice)]=0
MaxRating = max(df$ratingScore)
MaxReview = max(df$review)
dfLength=length(df$originalPrice)
for(i in 1:dfLength){
df$ratingScore[i]= round((as.numeric(df$ratingScore[i]) /as.numeric(MaxRating))*100)
# df$review[i]= (df$review[i]/MaxReview)*100
if( is.na(df$originalPrice[i])){
df$originalPrice[i]=df$price[i]
}
}
}
####################PICKABOO######################
#####################################################
if( input$cbPICKABOO == TRUE){
link = paste("https://www.pickaboo.com/search/result/?q=",keyword, sep = "")
webpage <- read_html(link)
data<- html_nodes(webpage, 'div#em-grid-mode')
product = html_nodes(data,'h2 a')
#################NAMES ##############
Names = html_text(product)
#################LINKS ##############
ProductLinks = html_attr(product,"href")
#################REVIEW ##############
reviews = gsub('\\(|)', '', html_text(html_nodes(data,'span.amount')))
#################RATING ##############
ratings=strsplit(toString (html_nodes(data,'div.rating')),"," )
ratings = as.numeric(gsub("\\D", "", unlist(ratings)))
ratings[is.na(ratings)] <- 0
#################PIKABOO PRICE EXTRECTION ##############
allprice = html_nodes(data,'div.price-box')
priceLength = length(allprice)
pdf<-data.frame(old=1:priceLength,new=1:priceLength)
for(i in 1:priceLength){(i)
p=html_text(html_nodes(allprice[i],'span.price'))
if(length(p)==1){
pdf$old[i]=gsub("\\D", "", p)
pdf$new[i]=pdf$old[i]
}
else{
p=gsub("\\D", "", p)
pdf$old[i]=p[1]
pdf$new[i]=p[2]
}
}
#################IMAGE ##############
images = html_attr(html_nodes(data,'img.em-alt-org'),'src')
images <- paste0("<a target='_blank' href='",ProductLinks,"'>","<img src='", images, "' style='height:60px'>","</a>")
pickaboo =as.data.frame( cbind( ProductName= Names,Source="PICKABOO",originalPrice= pdf$old,price= pdf$new,ratingScore = ratings,review = reviews ,image = images))
if(is.null(df))
{
df=pickaboo
}
else{
df=rbind(df,pickaboo)
}
}
############################################################################
if(input$radio=="RaRePr"){
df = sqldf("select * from df order by ratingScore desc, review desc, price asc")
}
if(input$radio=="ReRaPr"){
df = sqldf("select * from df order by review desc,ratingScore desc, price asc")
}
if(input$radio=="PrReRa"){
df = sqldf("select * from df order by price asc,review desc,ratingScore desc ")
}
if(input$radio=="PrRaRe"){
df = sqldf("select * from df order by price asc,review desc,ratingScore desc ")
}
output$table <- renderDataTable(datatable(df,escape = FALSE))
}
})
}
)
| /IntelligentProductSummarizer.R | no_license | MahmudRifath/web-scraper | R | false | false | 9,752 | r | #daraz searcher and Scrapper
#required Packages
library(jsonlite)
library(httr)
library(shiny)
library(shinythemes)
library(DT)
library(stringi)
library(taRifx)
library(sqldf)
library(xml2)
library(rvest)
library(stringr)
#########################################################################
#########################InterFace#######################################
#########################################################################
shinyApp(
ui = fluidPage(
shinythemes::themeSelector(), # <--- Add this somewhere in the UI
sidebarPanel(
textInput("txt", "Enter Searching Terms:", placeholder = "type here"),
h4("E-Commerce Sites"),
checkboxInput("cbDARAZ", label = "DARAZ", value = TRUE),
checkboxInput("cbPICKABOO",label = "PICKABOO", value = TRUE),
radioButtons("radio",h4("ORDERS"),
c("Default" = "default",
"Rating->Review->Price" = "RaRePr",
"Review->Rating->Price" = "ReRaPr",
"Price->Review->Rating" = "PrReRa",
"Price->Rating->Review" = "PrRaRe"
)),
actionButton("action", "Search")
#actionButton("action2", "Button2", class = "btn-primary")
),
mainPanel(
tabsetPanel(
tabPanel("Results"),
fluidRow(
column(12,
dataTableOutput('table')
)
)
)
)
),
server = function(input, output, session) {
observeEvent(input$action, {
###############################DARAZ SCRAPPER############################################
#write key word to search
#validate( need(input$txt, 'Check at least one letter!') )
df=NULL
keyword = input$txt
if(input$cbDARAZ==TRUE || input$cbPICKABOO==TRUE){
if(input$cbDARAZ==TRUE){
#joining keyword with link to query
link = paste("https://www.daraz.com.bd/catalog/?q=",keyword,sep = )
#getting JSON embaded in html of the page
j = sub("[ ]+window.pageData\\((.+)\\)" , "\\1",
grep("window.pageData", readLines(link), value=TRUE),
perl=TRUE)
#removing Script tag
j = gsub('<script>window.pageData=|</script>','',j)
#parsing JSON
df = fromJSON(j)
#getting product list array as dataFrame
df= as.data.frame(df$mods$listItems)
#selecting only Specific Columns
##################################PRODUCT DESCRIPTION#########################
productDescription= df$description
pdsLength=length(productDescription)
for (i in 1:pdsLength) {
pd=stri_join_list(productDescription[i], sep = "", collapse = NULL)
if(length(pd)){
productDescription[i] =pd}
else{
productDescription[i]=" not found"
}
}
productDescription=unlist(productDescription)
################################################################################
productURL= df$productUrl
df =as.data.frame( cbind( ProductName= df$name,Source="DARAZ",originalPrice= df$originalPrice,price= df$price,ratingScore = df$ratingScore,review = df$review ,image = df$image))
#df$image<-paste0("<img src='",df$image,"' height='52'></img> ",sep="")
df$image <- paste0("<a title='",productDescription,"' target='_blank' href='",productURL,"'>","<img src='", df$image, "' style='height:60px'>","</a>")
# View(df$image)
#exporting to csv
#write.csv(ex,'C:\\Users\\MAHMUD\\Desktop\\MyData.csv', row.names = FALSE)
#View(df)
####################################REMOVE FACTOR####################
df=remove.factors(df)
###########################################################
###################################DATA MANUPULATION#############################################
#df$originalPrice[is.na(df$originalPrice)]=0
MaxRating = max(df$ratingScore)
MaxReview = max(df$review)
dfLength=length(df$originalPrice)
for(i in 1:dfLength){
df$ratingScore[i]= round((as.numeric(df$ratingScore[i]) /as.numeric(MaxRating))*100)
# df$review[i]= (df$review[i]/MaxReview)*100
if( is.na(df$originalPrice[i])){
df$originalPrice[i]=df$price[i]
}
}
}
####################PICKABOO######################
#####################################################
if( input$cbPICKABOO == TRUE){
link = paste("https://www.pickaboo.com/search/result/?q=",keyword, sep = "")
webpage <- read_html(link)
data<- html_nodes(webpage, 'div#em-grid-mode')
product = html_nodes(data,'h2 a')
#################NAMES ##############
Names = html_text(product)
#################LINKS ##############
ProductLinks = html_attr(product,"href")
#################REVIEW ##############
reviews = gsub('\\(|)', '', html_text(html_nodes(data,'span.amount')))
#################RATING ##############
ratings=strsplit(toString (html_nodes(data,'div.rating')),"," )
ratings = as.numeric(gsub("\\D", "", unlist(ratings)))
ratings[is.na(ratings)] <- 0
#################PIKABOO PRICE EXTRECTION ##############
allprice = html_nodes(data,'div.price-box')
priceLength = length(allprice)
pdf<-data.frame(old=1:priceLength,new=1:priceLength)
for(i in 1:priceLength){(i)
p=html_text(html_nodes(allprice[i],'span.price'))
if(length(p)==1){
pdf$old[i]=gsub("\\D", "", p)
pdf$new[i]=pdf$old[i]
}
else{
p=gsub("\\D", "", p)
pdf$old[i]=p[1]
pdf$new[i]=p[2]
}
}
#################IMAGE ##############
images = html_attr(html_nodes(data,'img.em-alt-org'),'src')
images <- paste0("<a target='_blank' href='",ProductLinks,"'>","<img src='", images, "' style='height:60px'>","</a>")
pickaboo =as.data.frame( cbind( ProductName= Names,Source="PICKABOO",originalPrice= pdf$old,price= pdf$new,ratingScore = ratings,review = reviews ,image = images))
if(is.null(df))
{
df=pickaboo
}
else{
df=rbind(df,pickaboo)
}
}
############################################################################
if(input$radio=="RaRePr"){
df = sqldf("select * from df order by ratingScore desc, review desc, price asc")
}
if(input$radio=="ReRaPr"){
df = sqldf("select * from df order by review desc,ratingScore desc, price asc")
}
if(input$radio=="PrReRa"){
df = sqldf("select * from df order by price asc,review desc,ratingScore desc ")
}
if(input$radio=="PrRaRe"){
df = sqldf("select * from df order by price asc,review desc,ratingScore desc ")
}
output$table <- renderDataTable(datatable(df,escape = FALSE))
}
})
}
)
|
#' NBR2 - Normalized Burn Ratio 2
#'
#' NBR2 modifies the Normalized Burn Ratio to highlight water sensitivity in vegetation and may be useful in post-fire recovery studies.
#'
#' @param SWIR1 A raster layer object with the reflectance values for the Short Wave Infrared 1 band.
#' @param SWIR2 A raster layer object with the reflectance values for the Short Wave Infrared 2 band.
#' @return NBR2 - Normalized Burn Ratio 2.
#'
#' @examples
#' library(raster)
#' path_files <- system.file("extdata/", package="nightmares")
#' bands <- stack(list.files(path_files,".tif", full.names=TRUE))
#' x <- ref_oli(bands, sun.elev= 67.97)
#' NBR2(x[[6]], x[[7]])
#'
#' @references
#' \url{https://www.usgs.gov/core-science-systems/nli/landsat/landsat-surface-reflectance-derived-spectral-indices}.
#' \url{https://www.geo.university/pages/spectral-indices-with-multispectral-satellite-data}.
#' @export
#' @import raster
NBR2 <- function (SWIR1, SWIR2) {
if (missing(SWIR1)) {
stop("Required data missing. Please, select the reflectance values for the Short Wave Infrared 1 band")
}
if (missing(SWIR2)) {
stop("Required data missing. Please, enter the reflectance values for the Short Wave Infrared 2 band")
}
NBR2 <- (SWIR1-SWIR2)/(SWIR1+SWIR2)
}
| /R/NBR2.R | no_license | cran/nightmares | R | false | false | 1,288 | r | #' NBR2 - Normalized Burn Ratio 2
#'
#' NBR2 modifies the Normalized Burn Ratio to highlight water sensitivity in vegetation and may be useful in post-fire recovery studies.
#'
#' @param SWIR1 A raster layer object with the reflectance values for the Short Wave Infrared 1 band.
#' @param SWIR2 A raster layer object with the reflectance values for the Short Wave Infrared 2 band.
#' @return NBR2 - Normalized Burn Ratio 2.
#'
#' @examples
#' library(raster)
#' path_files <- system.file("extdata/", package="nightmares")
#' bands <- stack(list.files(path_files,".tif", full.names=TRUE))
#' x <- ref_oli(bands, sun.elev= 67.97)
#' NBR2(x[[6]], x[[7]])
#'
#' @references
#' \url{https://www.usgs.gov/core-science-systems/nli/landsat/landsat-surface-reflectance-derived-spectral-indices}.
#' \url{https://www.geo.university/pages/spectral-indices-with-multispectral-satellite-data}.
#' @export
#' @import raster
NBR2 <- function (SWIR1, SWIR2) {
if (missing(SWIR1)) {
stop("Required data missing. Please, select the reflectance values for the Short Wave Infrared 1 band")
}
if (missing(SWIR2)) {
stop("Required data missing. Please, enter the reflectance values for the Short Wave Infrared 2 band")
}
NBR2 <- (SWIR1-SWIR2)/(SWIR1+SWIR2)
}
|
library(data.table)
library(dplyr)
## group_by
# Unite and conquer using group_by
hflights %>%
group_by(UniqueCarrier) %>%
summarise(p_canc = mean(Cancelled == 1) * 100,
avg_delay = mean(ArrDelay, na.rm = T)) %>%
arrange(avg_delay, p_canc)
# Ordered overview of average arrival delays per carrier
filter(hflights, !is.na(ArrDelay), ArrDelay > 0) %>%
group_by(UniqueCarrier) %>%
summarise(avg = mean(ArrDelay)) %>%
mutate(rank = rank(avg)) %>%
arrange(rank)
# How many airplanes only flew to one destination?
hflights %>%
group_by(TailNum) %>%
summarise(ndest = n_distinct(Dest)) %>%
filter(ndest == 1) %>%
summarise(nplanes = n())
# Find the most visited destination for each carrier
hflights %>%
group_by(UniqueCarrier, Dest) %>%
summarise(n = n()) %>%
mutate(rank = rank(desc(n))) %>%
filter(rank == 1)
## dplyr and databases
# dplyr deals with different types
hflights2 <- as.data.table(hflights)
summarise(hflights2, n_carrier = n_distinct(UniqueCarrier))
# dplyr and mySQL databases
# Set up a connection to the mysql database
my_db <- src_mysql(dbname = "dplyr",
host = "courses.csrrinzqubik.us-east-1.rds.amazonaws.com",
port = 3306,
user = "student",
password = "datacamp")
# Reference a table within that source: nycflights
nycflights <- tbl(my_db, "dplyr")
# glimpse at nycflights
glimpse(nycflights)
# Ordered, grouped summary of nycflights
nycflights %>%
group_by(carrier) %>%
summarise(n_flights = n(), avg_delay = mean(arr_delay)) %>%
arrange(avg_delay)
| /DataCamp/Data-Manipulation-in-R-with-dplyr/group-by-and-working-with-db.R | no_license | r-repos-org/R-2 | R | false | false | 1,601 | r | library(data.table)
library(dplyr)
## group_by
# Unite and conquer using group_by
hflights %>%
group_by(UniqueCarrier) %>%
summarise(p_canc = mean(Cancelled == 1) * 100,
avg_delay = mean(ArrDelay, na.rm = T)) %>%
arrange(avg_delay, p_canc)
# Ordered overview of average arrival delays per carrier
filter(hflights, !is.na(ArrDelay), ArrDelay > 0) %>%
group_by(UniqueCarrier) %>%
summarise(avg = mean(ArrDelay)) %>%
mutate(rank = rank(avg)) %>%
arrange(rank)
# How many airplanes only flew to one destination?
hflights %>%
group_by(TailNum) %>%
summarise(ndest = n_distinct(Dest)) %>%
filter(ndest == 1) %>%
summarise(nplanes = n())
# Find the most visited destination for each carrier
hflights %>%
group_by(UniqueCarrier, Dest) %>%
summarise(n = n()) %>%
mutate(rank = rank(desc(n))) %>%
filter(rank == 1)
## dplyr and databases
# dplyr deals with different types
hflights2 <- as.data.table(hflights)
summarise(hflights2, n_carrier = n_distinct(UniqueCarrier))
# dplyr and mySQL databases
# Set up a connection to the mysql database
my_db <- src_mysql(dbname = "dplyr",
host = "courses.csrrinzqubik.us-east-1.rds.amazonaws.com",
port = 3306,
user = "student",
password = "datacamp")
# Reference a table within that source: nycflights
nycflights <- tbl(my_db, "dplyr")
# glimpse at nycflights
glimpse(nycflights)
# Ordered, grouped summary of nycflights
nycflights %>%
group_by(carrier) %>%
summarise(n_flights = n(), avg_delay = mean(arr_delay)) %>%
arrange(avg_delay)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modify_columns.R
\name{cols_merge_range}
\alias{cols_merge_range}
\title{Merge two columns to a value range column}
\usage{
cols_merge_range(data, col_begin, col_end)
}
\arguments{
\item{data}{a table object that is created using the \code{\link{gt}()}
function.}
\item{col_begin}{a column that contains values for the start of the range.}
\item{col_end}{a column that contains values for the end of the range.}
}
\value{
an object of class \code{gt_tbl}.
}
\description{
Merge two columns to a value range column
}
\section{Figures}{
\if{html}{\figure{man_cols_merge_range_1.svg}{options: width=100\%}}
}
\examples{
# Use `gtcars` to create a gt table,
# keeping only the `model`, `mpg_c`,
# and `mpg_h` columns; merge the mpg
# columns together as a single range
# column (which is labeled as MPG,
# in italics)
tab_1 <-
gtcars \%>\%
dplyr::select(model, starts_with("mpg")) \%>\%
dplyr::slice(1:8) \%>\%
gt() \%>\%
cols_merge_range(
col_begin = vars(mpg_c),
col_end = vars(mpg_h)) \%>\%
cols_label(
mpg_c = md("*MPG*")
)
}
\seealso{
Other column modification functions: \code{\link{cols_align}},
\code{\link{cols_hide}}, \code{\link{cols_label}},
\code{\link{cols_merge_uncert}},
\code{\link{cols_merge}}, \code{\link{cols_move_to_end}},
\code{\link{cols_move_to_start}},
\code{\link{cols_move}}, \code{\link{cols_split_delim}}
}
\concept{column modification functions}
| /man/cols_merge_range.Rd | permissive | mustafaascha/gt | R | false | true | 1,492 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modify_columns.R
\name{cols_merge_range}
\alias{cols_merge_range}
\title{Merge two columns to a value range column}
\usage{
cols_merge_range(data, col_begin, col_end)
}
\arguments{
\item{data}{a table object that is created using the \code{\link{gt}()}
function.}
\item{col_begin}{a column that contains values for the start of the range.}
\item{col_end}{a column that contains values for the end of the range.}
}
\value{
an object of class \code{gt_tbl}.
}
\description{
Merge two columns to a value range column
}
\section{Figures}{
\if{html}{\figure{man_cols_merge_range_1.svg}{options: width=100\%}}
}
\examples{
# Use `gtcars` to create a gt table,
# keeping only the `model`, `mpg_c`,
# and `mpg_h` columns; merge the mpg
# columns together as a single range
# column (which is labeled as MPG,
# in italics)
tab_1 <-
gtcars \%>\%
dplyr::select(model, starts_with("mpg")) \%>\%
dplyr::slice(1:8) \%>\%
gt() \%>\%
cols_merge_range(
col_begin = vars(mpg_c),
col_end = vars(mpg_h)) \%>\%
cols_label(
mpg_c = md("*MPG*")
)
}
\seealso{
Other column modification functions: \code{\link{cols_align}},
\code{\link{cols_hide}}, \code{\link{cols_label}},
\code{\link{cols_merge_uncert}},
\code{\link{cols_merge}}, \code{\link{cols_move_to_end}},
\code{\link{cols_move_to_start}},
\code{\link{cols_move}}, \code{\link{cols_split_delim}}
}
\concept{column modification functions}
|
##botanist effects
turf_environment2 %>%
mutate(
turfID = factor(turfID),
TTtreat = as.factor(TTtreat),
turfID = reorder(turfID, as.numeric(TTtreat))
) %>%
ggplot(aes(x = year, y = turfID, fill = recorder)) +
geom_tile() +
facet_wrap(~siteID, scales = "free_y")
# non- randomised | /community/DataChecks/botanist_effects.R | no_license | chencaf/PFTC_1_2_China | R | false | false | 313 | r | ##botanist effects
turf_environment2 %>%
mutate(
turfID = factor(turfID),
TTtreat = as.factor(TTtreat),
turfID = reorder(turfID, as.numeric(TTtreat))
) %>%
ggplot(aes(x = year, y = turfID, fill = recorder)) +
geom_tile() +
facet_wrap(~siteID, scales = "free_y")
# non- randomised |
setwd("~/06_mm10_SNUH_radiation/21_RSEM")
#devtools::install_github("hadley/tidyverse")
#if (!requireNamespace("BiocManager", quietly = TRUE))
#install.packages("BiocManager")
BiocManager::install("tximport", version = "3.8")
library(tximport)
library(tidyverse)
library("readr")
BiocManager::install("tximportData")
library("tximportData")
dir <- system.file("extdata", package="tximportData")
dir
samples <- read.table(file.path(dir,"samples.txt"), header=TRUE)
head(samples)
samples$condition <- factor(rep(c("A","B"),each=3))
rownames(samples) <- samples$run
samples[,c("pop","center","run","condition")]
files <- file.path(dir,"salmon", samples$run, "quant.sf.gz")
files %>% head()
names(files) <- samples$run
tx2gene <- read_csv(file.path(dir, "tx2gene.gencode.v27.csv"))
head(tx2gene)
txi <- tximport(files, type="salmon", tx2gene=tx2gene)
head(txi)
ddsTxi <- DESeqDataSetFromTximport(txi,
colData = samples,
design = ~ condition)
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("DESeq2", version = "3.8")
BiocManager::install("copynumber", version = "3.8")
nlibrary(DESeq2)
library("copynumber")
require("DESeq2")
dds <- DESeqDataSetFromMatrix(countData = cts,
colData = coldata,
design= ~ batch + condition)
dds <- DESeq(dds)
resultsNames(dds) # lists the coefficients
res <- results(dds, name="condition_trt_vs_untrt")
# or to shrink log fold changes association with condition:
res <- lfcShrink(dds, coef="condition_trt_vs_untrt", type="apeglm")
R --version
version
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("DESeq2")
install.packages("readtext")
install.packages("devtools")
devtools::install_github('kevinblighe/EnhancedVolcano')
###TPM of Myc in mfallopian tube sample
bfb<-c(185.31,162.25,128.76)
con<-c(54.86,55.30,58.82)
t.test(bfb,con)
wilcox.test(bfb,con)
summary(bfb)
summary(con)
mean(bfb)/mean(con)
#difference using Myc count of DESeq data
bfb_count<-c(3105.00,2992.00,2237.00)
control_count<-c(1094.00,1031.00,975.00)
t.test(bfb_count,control_count)
summary(bfb_count)
boxplot(con,bfb,ylim=c(0,200),names = c("control","mm_study4_fallopian_tube_SO2"),ylab= "TPM",boxwex=0.25)
stripchart(list(con,bfb),vertical = TRUE,
method = "jitter", add = TRUE, pch = 20, col = alpha('black',0.2))
con_ci<-1.96*sd(con)/sqrt(3)
bfb_ci<-1.96*sd(bfb)/sqrt(3)
pdf("BFB_Myc_TPM.pdf")
barplot(c(mean(con),mean(bfb)),space = 10,ylim=c(0,200),border = F,names.arg = c("control","mm_study4_fallopian_tube_SO2"),ylab= "TPM",col='gray')
arrows(10.5,mean(con)-con_ci,10.5,mean(con)+con_ci,length = 0.1,angle=90,code=3,col = alpha('gray',1),lwd = 2)
arrows(21.5,mean(bfb)-con_ci,21.5,mean(bfb)+con_ci,length = 0.1,angle=90,code=3,col = alpha('gray',1),lwd=2)
dev.off()
rc_del$number<-table(total_sv$experiment)
frc_del$ci<-1.96*frc_del$sd/sqrt(frc_del$number)
frc_del$se<-frc_del$sd/sqrt(frc_del$number)
arrows(c(1,2,3),frc_radio$mean-frc_radio$se,c(1,2,3),frc_radio$mean+frc_radio$se,length = 0.05,angle=90,code=3,col = alpha('black',1))
###mfallopian tube DEG####
#set working directory
setwd("~/06_mm10_SNUH_radiation/21_RSEM")
rm(list=ls())
library(DESeq2)
library(tximport)
#BiocManager::install("tximport")
library(tidyverse)
library(NMF)
#library(tibble)
#install.packages("NMF")
#rm(list=ls())
samples <- matrix(c('BFB-1','BFB-2','BFB-3','control-R','control-F','control-B'),ncol = 1)
samples <- samples %>% as.data.frame()
samples$condition <- rep(c('BFB','control'),each=3)
samples
files <- file.path(paste(samples$V1,"_rsem.genes.results",sep=""))
names(files) <- samples$V1
txi <- tximport(files,type = 'rsem',txIn=FALSE,txOut = FALSE)
str(txi)
txi$length[txi$length == 0] <- 1
samples <- samples %>% as.data.frame() %>% column_to_rownames('V1')
samples
dds <- DESeqDataSetFromTximport(txi, colData = samples, design = ~condition)
dds
keep <- rowSums(counts(dds)) >= 10 # for saving memory
dds <- dds[keep,]
dds$condition <- relevel(dds$condition, ref = "control")
dds <- DESeq(dds)
res <- results(dds,tidy=TRUE)
#vsd <- vst(dds, blind=FALSE)
rld <- rlog(dds, blind=FALSE)
biomart <- read.table("~/06_mm10_SNUH_radiation/21_RSEMmart_export.txt",sep='\t',header = T)
head(biomart)
res
res21 <- res %>% mutate(sig=padj<0.05) %>% tbl_df()
colnames(res21)[1] <- "row"
res22 <- res21 %>% arrange(padj) %>% left_join(biomart, by = c("row"="Gene.stable.ID.1"))
biomart <- biomart[,c(3,4,1,2)]
biomart
res21
write.table(res22,"BFB_mouse_fallopian_DESeq2_result.txt",sep='\t')
table(is.na(res22$Gene.name))
# supervised heatmap using aheatmap function
pdf("Heatmap.pdf")
aheatmap(assay(rld)[arrange(res21, padj, pvalue)$row[1:50], ], labRow = arrange(res21, padj, pvalue)$'Gene name'[1:50], scale="row",
distfun="euclidean", annCol=select(samples,condition), col=c("green","black","black","red"))
aheatmap(assay(rld)[arrange(res21, padj, pvalue)$row[1:50], ], labRow = arrange(res21, padj, pvalue)$'Gene name'[1:50], scale="none",
distfun="euclidean", annCol=select(samples,condition), col=c("green","black","black","red"))
dev.off()
?aheatmap
?dist
# plotCounts per a gene
plotCounts(dds2,gene="ENSMUSG00000017146", intgroup = "condition",main = 'Brca1')
plotCounts(dds2,gene="ENSMUSG00000041147", intgroup = "condition",main = 'Brca2')
plotCounts(dds2,gene="ENSMUSG00000059552", intgroup = "condition",main = 'Trp53')
plotCounts(dds2,gene="ENSMUSG00000026187", intgroup = "condition",main = 'Xrcc5')
# volcano plot using ggplot2
res21 %>% ggplot(aes(log2FoldChange, -1*log10(pvalue), col=sig)) + geom_point() + ggtitle("Volcano plot")
res21 %>% ggplot(aes(baseMean, -1*log10(pvalue), col=sig)) + geom_point() + ggtitle("Volcano plot") | /21_RSEM/DESeq2.R | no_license | ju-lab/jhyouk_inhouse | R | false | false | 5,860 | r | setwd("~/06_mm10_SNUH_radiation/21_RSEM")
#devtools::install_github("hadley/tidyverse")
#if (!requireNamespace("BiocManager", quietly = TRUE))
#install.packages("BiocManager")
BiocManager::install("tximport", version = "3.8")
library(tximport)
library(tidyverse)
library("readr")
BiocManager::install("tximportData")
library("tximportData")
dir <- system.file("extdata", package="tximportData")
dir
samples <- read.table(file.path(dir,"samples.txt"), header=TRUE)
head(samples)
samples$condition <- factor(rep(c("A","B"),each=3))
rownames(samples) <- samples$run
samples[,c("pop","center","run","condition")]
files <- file.path(dir,"salmon", samples$run, "quant.sf.gz")
files %>% head()
names(files) <- samples$run
tx2gene <- read_csv(file.path(dir, "tx2gene.gencode.v27.csv"))
head(tx2gene)
txi <- tximport(files, type="salmon", tx2gene=tx2gene)
head(txi)
ddsTxi <- DESeqDataSetFromTximport(txi,
colData = samples,
design = ~ condition)
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("DESeq2", version = "3.8")
BiocManager::install("copynumber", version = "3.8")
nlibrary(DESeq2)
library("copynumber")
require("DESeq2")
dds <- DESeqDataSetFromMatrix(countData = cts,
colData = coldata,
design= ~ batch + condition)
dds <- DESeq(dds)
resultsNames(dds) # lists the coefficients
res <- results(dds, name="condition_trt_vs_untrt")
# or to shrink log fold changes association with condition:
res <- lfcShrink(dds, coef="condition_trt_vs_untrt", type="apeglm")
R --version
version
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
BiocManager::install("DESeq2")
install.packages("readtext")
install.packages("devtools")
devtools::install_github('kevinblighe/EnhancedVolcano')
###TPM of Myc in mfallopian tube sample
bfb<-c(185.31,162.25,128.76)
con<-c(54.86,55.30,58.82)
t.test(bfb,con)
wilcox.test(bfb,con)
summary(bfb)
summary(con)
mean(bfb)/mean(con)
#difference using Myc count of DESeq data
bfb_count<-c(3105.00,2992.00,2237.00)
control_count<-c(1094.00,1031.00,975.00)
t.test(bfb_count,control_count)
summary(bfb_count)
boxplot(con,bfb,ylim=c(0,200),names = c("control","mm_study4_fallopian_tube_SO2"),ylab= "TPM",boxwex=0.25)
stripchart(list(con,bfb),vertical = TRUE,
method = "jitter", add = TRUE, pch = 20, col = alpha('black',0.2))
con_ci<-1.96*sd(con)/sqrt(3)
bfb_ci<-1.96*sd(bfb)/sqrt(3)
pdf("BFB_Myc_TPM.pdf")
barplot(c(mean(con),mean(bfb)),space = 10,ylim=c(0,200),border = F,names.arg = c("control","mm_study4_fallopian_tube_SO2"),ylab= "TPM",col='gray')
arrows(10.5,mean(con)-con_ci,10.5,mean(con)+con_ci,length = 0.1,angle=90,code=3,col = alpha('gray',1),lwd = 2)
arrows(21.5,mean(bfb)-con_ci,21.5,mean(bfb)+con_ci,length = 0.1,angle=90,code=3,col = alpha('gray',1),lwd=2)
dev.off()
rc_del$number<-table(total_sv$experiment)
frc_del$ci<-1.96*frc_del$sd/sqrt(frc_del$number)
frc_del$se<-frc_del$sd/sqrt(frc_del$number)
arrows(c(1,2,3),frc_radio$mean-frc_radio$se,c(1,2,3),frc_radio$mean+frc_radio$se,length = 0.05,angle=90,code=3,col = alpha('black',1))
###mfallopian tube DEG####
#set working directory
setwd("~/06_mm10_SNUH_radiation/21_RSEM")
rm(list=ls())
library(DESeq2)
library(tximport)
#BiocManager::install("tximport")
library(tidyverse)
library(NMF)
#library(tibble)
#install.packages("NMF")
#rm(list=ls())
samples <- matrix(c('BFB-1','BFB-2','BFB-3','control-R','control-F','control-B'),ncol = 1)
samples <- samples %>% as.data.frame()
samples$condition <- rep(c('BFB','control'),each=3)
samples
files <- file.path(paste(samples$V1,"_rsem.genes.results",sep=""))
names(files) <- samples$V1
txi <- tximport(files,type = 'rsem',txIn=FALSE,txOut = FALSE)
str(txi)
txi$length[txi$length == 0] <- 1
samples <- samples %>% as.data.frame() %>% column_to_rownames('V1')
samples
dds <- DESeqDataSetFromTximport(txi, colData = samples, design = ~condition)
dds
keep <- rowSums(counts(dds)) >= 10 # for saving memory
dds <- dds[keep,]
dds$condition <- relevel(dds$condition, ref = "control")
dds <- DESeq(dds)
res <- results(dds,tidy=TRUE)
#vsd <- vst(dds, blind=FALSE)
rld <- rlog(dds, blind=FALSE)
biomart <- read.table("~/06_mm10_SNUH_radiation/21_RSEMmart_export.txt",sep='\t',header = T)
head(biomart)
res
res21 <- res %>% mutate(sig=padj<0.05) %>% tbl_df()
colnames(res21)[1] <- "row"
res22 <- res21 %>% arrange(padj) %>% left_join(biomart, by = c("row"="Gene.stable.ID.1"))
biomart <- biomart[,c(3,4,1,2)]
biomart
res21
write.table(res22,"BFB_mouse_fallopian_DESeq2_result.txt",sep='\t')
table(is.na(res22$Gene.name))
# supervised heatmap using aheatmap function
pdf("Heatmap.pdf")
aheatmap(assay(rld)[arrange(res21, padj, pvalue)$row[1:50], ], labRow = arrange(res21, padj, pvalue)$'Gene name'[1:50], scale="row",
distfun="euclidean", annCol=select(samples,condition), col=c("green","black","black","red"))
aheatmap(assay(rld)[arrange(res21, padj, pvalue)$row[1:50], ], labRow = arrange(res21, padj, pvalue)$'Gene name'[1:50], scale="none",
distfun="euclidean", annCol=select(samples,condition), col=c("green","black","black","red"))
dev.off()
?aheatmap
?dist
# plotCounts per a gene
plotCounts(dds2,gene="ENSMUSG00000017146", intgroup = "condition",main = 'Brca1')
plotCounts(dds2,gene="ENSMUSG00000041147", intgroup = "condition",main = 'Brca2')
plotCounts(dds2,gene="ENSMUSG00000059552", intgroup = "condition",main = 'Trp53')
plotCounts(dds2,gene="ENSMUSG00000026187", intgroup = "condition",main = 'Xrcc5')
# volcano plot using ggplot2
res21 %>% ggplot(aes(log2FoldChange, -1*log10(pvalue), col=sig)) + geom_point() + ggtitle("Volcano plot")
res21 %>% ggplot(aes(baseMean, -1*log10(pvalue), col=sig)) + geom_point() + ggtitle("Volcano plot") |
#' Tabsets for RMD
#'
#' @param tab_contents
#'
#' The contents of the tabs (must be a list and same length/order as titles)
#'
#' @param tab_titles
#'
#' The titles of the tabs (must be a list and same length/order as content)
#'
#' @param name
#'
#'Unique name for the tabset
#'
#' @examples
#'
#' ztabs(name = 'name', tab_contents= list('panel1 content', 'panel2 content') , tab_titles =list('title1','title2') )
#'
#'You can also add in little icons with fontawesome
#'
#' tab_titles= list(tags$div(fa(name = 'venus-mars', fill="steelblue"), "Sex")
#'
#'** note...make sure you have access to the proper styles file.
#' @export
#'
#'
#'
ztabs<-function(tab_titles, tab_contents, name){
inp<- lapply(1:length(tab_titles),function(tabtitle){
if(tabtitle == 1){ inputtag<- tags$input(type="radio",
name=name,
id=paste0( tabtitle, "id", name),
'aria-controls'=paste0(tabtitle, name) ,
checked=NA)}else{inputtag<- tags$input(type="radio",
name=name,
id=paste0( tabtitle, "id", name),
'aria-controls'=paste0(tabtitle, name)
)}
tagList(
inputtag,
tags$label('for'=paste0(tabtitle, "id", name),
tab_titles[[tabtitle]]))})
sec<- lapply(1:length(tab_contents),function(tab_content){
tags$section( id=paste0( tab_content , name) ,
class="tab-panel", tab_contents[[tab_content]])})
tags$div(class="tabset",
tagList(inp),tags$div(class="tab-panels", tagList(sec) )
)
}
| /R/ztabs.R | no_license | ctzn-pub/ztools | R | false | false | 1,925 | r |
#' Tabsets for RMD
#'
#' @param tab_contents
#'
#' The contents of the tabs (must be a list and same length/order as titles)
#'
#' @param tab_titles
#'
#' The titles of the tabs (must be a list and same length/order as content)
#'
#' @param name
#'
#'Unique name for the tabset
#'
#' @examples
#'
#' ztabs(name = 'name', tab_contents= list('panel1 content', 'panel2 content') , tab_titles =list('title1','title2') )
#'
#'You can also add in little icons with fontawesome
#'
#' tab_titles= list(tags$div(fa(name = 'venus-mars', fill="steelblue"), "Sex")
#'
#'** note...make sure you have access to the proper styles file.
#' @export
#'
#'
#'
ztabs<-function(tab_titles, tab_contents, name){
inp<- lapply(1:length(tab_titles),function(tabtitle){
if(tabtitle == 1){ inputtag<- tags$input(type="radio",
name=name,
id=paste0( tabtitle, "id", name),
'aria-controls'=paste0(tabtitle, name) ,
checked=NA)}else{inputtag<- tags$input(type="radio",
name=name,
id=paste0( tabtitle, "id", name),
'aria-controls'=paste0(tabtitle, name)
)}
tagList(
inputtag,
tags$label('for'=paste0(tabtitle, "id", name),
tab_titles[[tabtitle]]))})
sec<- lapply(1:length(tab_contents),function(tab_content){
tags$section( id=paste0( tab_content , name) ,
class="tab-panel", tab_contents[[tab_content]])})
tags$div(class="tabset",
tagList(inp),tags$div(class="tab-panels", tagList(sec) )
)
}
|
file = "./household_power_consumption.txt"
cols <- read.csv2(file, header=TRUE, nrows = 1)
df <- read.csv2(file, skip = 66637, nrows = 2879, col.names = colnames(cols))
GAP <- as.numeric(df$Global_active_power)
png('Plot1.png', width = 480, height = 480)
hist(GAP, col='red', main='Global Active Power', xlab='Global Active Power (kilowatts)')
dev.off()
| /Plot1.r | no_license | aladdin-oct/EDA-Assignment1 | R | false | false | 365 | r | file = "./household_power_consumption.txt"
cols <- read.csv2(file, header=TRUE, nrows = 1)
df <- read.csv2(file, skip = 66637, nrows = 2879, col.names = colnames(cols))
GAP <- as.numeric(df$Global_active_power)
png('Plot1.png', width = 480, height = 480)
hist(GAP, col='red', main='Global Active Power', xlab='Global Active Power (kilowatts)')
dev.off()
|
library(magrittr)
library(getopt)
library(dplyr)
spec <- matrix(
c(
"input", "i", 2, "character", "Input file",
"output", "o", 2, "character", "Output file",
"output_convertedID", "t", 2, "character", "Output file after ID convertion"
),
byrow = TRUE, ncol = 5
)
opt <- getopt(spec = spec)
input <- opt$input
output <- opt$output
output_convertedID <- opt$output_convertedID
in_file <- file(input, "r")
out_file <- structure(
.Data = matrix(nrow = 0, ncol = 3),
dimnames = list(c(), c("cluster_id", "species_name", "gene_name"))
)
write.table(out_file, output,
row.names = FALSE, sep = ","
)
# gene_num_vector <- c()
run_loop <- TRUE
run_loop_num <- 1
while (run_loop) {
in_content <- readLines(in_file, n = 1)
run_loop <- (length(in_content) > 0)
if (!run_loop) break
temp <- in_content %>%
gsub(pattern = '"', replacement = "") %>%
strsplit(split = "\t") %>%
extract2(1)
cluster_id <- temp[1] %>%
strsplit(split = "[(]") %>%
extract2(1) %>%
extract(1)
gene_num <- temp[1] %>%
strsplit(split = "[:|,]") %>%
extract2(1) %>%
extract(2) %>%
as.numeric()
temp_gene <- temp[-1] %>%
strsplit(split = "[(]") %>%
sapply(extract, 1)
stopifnot(all(regexpr("_", temp_gene) == 4)) # TRUE
species_name <- substr(temp_gene, 1, 3)
gene_name <- substring(temp_gene, 5)
out_dataframe <- data.frame(cluster_id, species_name, gene_name)
# gene_num_vector[run_loop_num] <- gene_num
write.table(
out_dataframe, output,
row.names = FALSE, col.names = FALSE,
append = TRUE, quote = FALSE, sep = ","
)
cat("\r", run_loop_num)
run_loop_num %<>% add(1)
}
close(in_file)
# sum(gene_num_vector)
## Load the ortholog table (long data format)
ortho_table <- read.csv(output)
# dim(ortho_table) # 478525 3
## Convert the protein ID into gene ID
### Define the function for ID extracting
extract_prefix_before_point <- function(raw_id, which_point = 2) {
cut_position <-
raw_id %>%
gregexpr(pattern = "[.]") %>%
extract2(1) %>%
extract(which_point)
extracted_id <- substr(raw_id, 1, cut_position - 1)
extracted_id
}
### Replace Ptr peptide ID with gene ID
Ptr_protein_fasta <-
scan(paste0(
"/home/woodformation/HDD1/GenomicsData/Ptrichocarpa_533_v4.1/",
"Ptrichocarpa_533_v4.1.protein_primaryTranscriptOnly.fa"
),
what = "", sep = "\n"
)
fasta_info <- grep(">", Ptr_protein_fasta, value = TRUE)
Ptr_protein_ID <-
strsplit(fasta_info, " ") %>%
sapply(extract, 1) %>%
sub(">", "", .)
Ptr_gene_ID <-
strsplit(fasta_info, " ") %>%
sapply(extract, 5) %>%
sub("ID=", "", .) %>%
sapply(extract_prefix_before_point, 2)
stopifnot(
all(filter(ortho_table, species_name == "PoT")$gene_name %in% Ptr_protein_ID)
)
Ptr_row_indices <- which(ortho_table$species_name == "PoT")
ortho_table[Ptr_row_indices, "gene_name"] <-
paste0(
Ptr_gene_ID[match(
ortho_table[Ptr_row_indices, "gene_name"],
Ptr_protein_ID
)],
".v4.1"
)
### Replace Egr peptide ID with gene ID
Egr_row_indices <- which(ortho_table$species_name == "EuG")
ortho_table[Egr_row_indices, "gene_name"] <-
sub("1.p", "v2.0", ortho_table[Egr_row_indices, "gene_name"])
### Replace Tar peptide ID with gene ID
Tar_gtf <-
read.table(paste0(
"/home/woodformation/HDD1/GenomicsData/Taralioides_20200702/",
"Trochodendron_aralioides_chromosomes_pasa2.longest.filter.gtf"
), sep = "\t", header = FALSE)
ID_info <- unique(Tar_gtf$V9)
Tar_transcript_id <-
strsplit(ID_info, ";") %>%
sapply(extract, 1) %>%
sub("transcript_id ", "", .)
Tar_gene_id <-
strsplit(ID_info, ";") %>%
sapply(extract, 2) %>%
sub(" gene_id ", "", .)
# sub("evm.TU.", "", .)
# Dr. Ku remove prefix of gene id during quantification
stopifnot(
all(
filter(ortho_table, species_name == "TrA")$gene_name %in% Tar_transcript_id
)
)
Tar_row_indices <- which(ortho_table$species_name == "TrA")
ortho_table[Tar_row_indices, "gene_name"] <-
Tar_gene_id[match(
ortho_table[Tar_row_indices, "gene_name"],
Tar_transcript_id
)]
write.csv(
ortho_table,
file = output_convertedID,
quote = FALSE,
row.names = FALSE
) | /Part2/scripts/ortholog_data_preprocess.R | permissive | Woodformation1136/SingleCell | R | false | false | 4,152 | r | library(magrittr)
library(getopt)
library(dplyr)
spec <- matrix(
c(
"input", "i", 2, "character", "Input file",
"output", "o", 2, "character", "Output file",
"output_convertedID", "t", 2, "character", "Output file after ID convertion"
),
byrow = TRUE, ncol = 5
)
opt <- getopt(spec = spec)
input <- opt$input
output <- opt$output
output_convertedID <- opt$output_convertedID
in_file <- file(input, "r")
out_file <- structure(
.Data = matrix(nrow = 0, ncol = 3),
dimnames = list(c(), c("cluster_id", "species_name", "gene_name"))
)
write.table(out_file, output,
row.names = FALSE, sep = ","
)
# gene_num_vector <- c()
run_loop <- TRUE
run_loop_num <- 1
while (run_loop) {
in_content <- readLines(in_file, n = 1)
run_loop <- (length(in_content) > 0)
if (!run_loop) break
temp <- in_content %>%
gsub(pattern = '"', replacement = "") %>%
strsplit(split = "\t") %>%
extract2(1)
cluster_id <- temp[1] %>%
strsplit(split = "[(]") %>%
extract2(1) %>%
extract(1)
gene_num <- temp[1] %>%
strsplit(split = "[:|,]") %>%
extract2(1) %>%
extract(2) %>%
as.numeric()
temp_gene <- temp[-1] %>%
strsplit(split = "[(]") %>%
sapply(extract, 1)
stopifnot(all(regexpr("_", temp_gene) == 4)) # TRUE
species_name <- substr(temp_gene, 1, 3)
gene_name <- substring(temp_gene, 5)
out_dataframe <- data.frame(cluster_id, species_name, gene_name)
# gene_num_vector[run_loop_num] <- gene_num
write.table(
out_dataframe, output,
row.names = FALSE, col.names = FALSE,
append = TRUE, quote = FALSE, sep = ","
)
cat("\r", run_loop_num)
run_loop_num %<>% add(1)
}
close(in_file)
# sum(gene_num_vector)
## Load the ortholog table (long data format)
ortho_table <- read.csv(output)
# dim(ortho_table) # 478525 3
## Convert the protein ID into gene ID
### Define the function for ID extracting
extract_prefix_before_point <- function(raw_id, which_point = 2) {
cut_position <-
raw_id %>%
gregexpr(pattern = "[.]") %>%
extract2(1) %>%
extract(which_point)
extracted_id <- substr(raw_id, 1, cut_position - 1)
extracted_id
}
### Replace Ptr peptide ID with gene ID
Ptr_protein_fasta <-
scan(paste0(
"/home/woodformation/HDD1/GenomicsData/Ptrichocarpa_533_v4.1/",
"Ptrichocarpa_533_v4.1.protein_primaryTranscriptOnly.fa"
),
what = "", sep = "\n"
)
fasta_info <- grep(">", Ptr_protein_fasta, value = TRUE)
Ptr_protein_ID <-
strsplit(fasta_info, " ") %>%
sapply(extract, 1) %>%
sub(">", "", .)
Ptr_gene_ID <-
strsplit(fasta_info, " ") %>%
sapply(extract, 5) %>%
sub("ID=", "", .) %>%
sapply(extract_prefix_before_point, 2)
stopifnot(
all(filter(ortho_table, species_name == "PoT")$gene_name %in% Ptr_protein_ID)
)
Ptr_row_indices <- which(ortho_table$species_name == "PoT")
ortho_table[Ptr_row_indices, "gene_name"] <-
paste0(
Ptr_gene_ID[match(
ortho_table[Ptr_row_indices, "gene_name"],
Ptr_protein_ID
)],
".v4.1"
)
### Replace Egr peptide ID with gene ID
Egr_row_indices <- which(ortho_table$species_name == "EuG")
ortho_table[Egr_row_indices, "gene_name"] <-
sub("1.p", "v2.0", ortho_table[Egr_row_indices, "gene_name"])
### Replace Tar peptide ID with gene ID
Tar_gtf <-
read.table(paste0(
"/home/woodformation/HDD1/GenomicsData/Taralioides_20200702/",
"Trochodendron_aralioides_chromosomes_pasa2.longest.filter.gtf"
), sep = "\t", header = FALSE)
ID_info <- unique(Tar_gtf$V9)
Tar_transcript_id <-
strsplit(ID_info, ";") %>%
sapply(extract, 1) %>%
sub("transcript_id ", "", .)
Tar_gene_id <-
strsplit(ID_info, ";") %>%
sapply(extract, 2) %>%
sub(" gene_id ", "", .)
# sub("evm.TU.", "", .)
# Dr. Ku remove prefix of gene id during quantification
stopifnot(
all(
filter(ortho_table, species_name == "TrA")$gene_name %in% Tar_transcript_id
)
)
Tar_row_indices <- which(ortho_table$species_name == "TrA")
ortho_table[Tar_row_indices, "gene_name"] <-
Tar_gene_id[match(
ortho_table[Tar_row_indices, "gene_name"],
Tar_transcript_id
)]
write.csv(
ortho_table,
file = output_convertedID,
quote = FALSE,
row.names = FALSE
) |
\name{smargin.table}
\alias{smargin.table}
\title{For a contingency table in array form, compute the sum of table entries for a given index.}
\usage{
smargin.table(x, margin = NULL, na.rm = TRUE)
}
\arguments{
\item{x}{an array}
\item{margin}{index number (1 for rows, etc.)}
\item{na.rm}{logical. Should missing values be removed?
Passed to sum()}
}
\description{
Redefines margin.table to deal with NA values
}
\examples{
##m <- matrix(1:4,2)
##surveyor:::smargin.table(m, 1)
##surveyor:::smargin.table(m, 2)
}
\keyword{##}
\keyword{internal}
| /man/smargin.table.Rd | no_license | andrie/surveyor | R | false | false | 560 | rd | \name{smargin.table}
\alias{smargin.table}
\title{For a contingency table in array form, compute the sum of table entries for a given index.}
\usage{
smargin.table(x, margin = NULL, na.rm = TRUE)
}
\arguments{
\item{x}{an array}
\item{margin}{index number (1 for rows, etc.)}
\item{na.rm}{logical. Should missing values be removed?
Passed to sum()}
}
\description{
Redefines margin.table to deal with NA values
}
\examples{
##m <- matrix(1:4,2)
##surveyor:::smargin.table(m, 1)
##surveyor:::smargin.table(m, 2)
}
\keyword{##}
\keyword{internal}
|
library(rstan)
set.seed(42)
births = rbinom(1e5, 1, 0.485)
mcmc = list(
N = length(births),
births = births)
mcmc = stan("birth_data.stan", data = mcmc, pars = c("prob"), iter = 200, chains = 1)
res = summary(mcmc)$summary
| /learn_stan/birth_data.R | no_license | andymckenzie/R-plots | R | false | false | 232 | r | library(rstan)
set.seed(42)
births = rbinom(1e5, 1, 0.485)
mcmc = list(
N = length(births),
births = births)
mcmc = stan("birth_data.stan", data = mcmc, pars = c("prob"), iter = 200, chains = 1)
res = summary(mcmc)$summary
|
#### libraries-and-functions_debate-in-the-wild.r: Part of `debate_in_the_wild.Rmd` ####
#
# This script sets the working directory, loads libraries, creates a number of
# additional functions to facilitate data prep and analysis.
#
# Written by: A. Paxton (University of California, Berkeley)
#
#####################################################################################
#### Set working directory ####
setwd('/debate-in-the-wild/')
#####################################################################################
#### Load libraries we'll need ####
library(doBy)
library(dplyr)
library(e1071)
library(ggplot2)
library(grid)
library(gridExtra)
library(gtable)
library(gtools)
library(Hmisc)
library(kernlab)
library(languageR)
library(lme4)
library(pander)
library(piecewiseSEM)
library(purrr)
library(plotrix)
library(plyr)
library(tsne)
#####################################################################################
#### Create functions we'll need ####
# "%notin%": identify values from one list (x) not included in another (y)
'%notin%' <- function(x,y) !(x %in% y)
# "pander_lme": simplify lme4 printouts (available on GitHub: https://github.com/a-paxton/stats-tools)
pander_lme = function(lme_model_name, stats.caption){
# load in pander
library(pander)
# convert the model summary to a dataframe
neat_output = data.frame(summary(lme_model_name)$coefficient)
# get it to display p-values and asterisks based on significance
neat_output$p = 2*(1-pnorm(abs(neat_output$t.value)))
neat_output$sig = ' '
neat_output$sig[neat_output$p < .1] = '.'
neat_output$sig[neat_output$p < .05] = '*'
neat_output$sig[neat_output$p < .01] = '**'
neat_output$sig[neat_output$p < .001] = '***'
# set a caption that includes R-squared values
if (stats.caption == TRUE){
# use piecewiseSEM to calculate R-squared
library(piecewiseSEM)
model_marginal_r_squared = sem.model.fits(lme_model_name)$Marginal
model_conditional_r_squared = sem.model.fits(lme_model_name)$Conditional
neat_caption = paste('**Marginal *R*-squared: ',
round(model_marginal_r_squared,2),
"; Conditional *R*-squared: ",
round(model_conditional_r_squared,2), ".**",sep="")
# return the table
return(pander(neat_output, split.table = Inf, caption = neat_caption, style = 'rmarkdown'))
} else { # or return a table without it
return(pander(neat_output, split.table = Inf, style = 'rmarkdown'))
}
}
# "pander_lm": simplify lm printouts and include adjusted R-squared and F-stats
pander_lm = function(lm_model_name, stats.caption){
# load in pander
library(pander)
# convert the model summary to a dataframe
neat_output = data.frame(summary(lm_model_name)$coefficient)
# get it to display p-values and asterisks based on significance
neat_output$p = 2*(1-pnorm(abs(neat_output$t.value)))
neat_output$sig = ' '
neat_output$sig[neat_output$p < .1] = '.'
neat_output$sig[neat_output$p < .05] = '*'
neat_output$sig[neat_output$p < .01] = '**'
neat_output$sig[neat_output$p < .001] = '***'
# set a caption that includes R-squared values
if (stats.caption==TRUE){
# grab stats F-stats and adjusted R-squared
model_adj_r_squared = summary(lm_model_name)$adj.r.squared
model_fstatistics = summary(lm_model_name)$fstatistic
neat_caption = paste('**Adjusted *R*-squared: ',
round(model_adj_r_squared,2), "; *F*(",
model_fstatistics[2],",",model_fstatistics[3],
") = ",round(model_fstatistics[1],2),"**",sep="")
# return the table
return(pander(neat_output, split.table = Inf, caption = neat_caption, style = 'rmarkdown'))
}else{# or return a table without it
return(pander(neat_output, style="rmarkdown",split.table = Inf, style = 'rmarkdown'))
}
}
# "significant.effects": identify significant effects in a model
significant.effects = function(lm_model_name){
# convert the model summary to a dataframe
model.summary = data.frame(summary(lm_model_name)$coefficient)
# create significance values
model.summary$p = 2*(1-pnorm(abs(model.summary$t.value)))
# identify only effects below p = .05
significant.effects = data.frame(model.summary[model.summary$p <= .05,])
significant.effects$component = rownames(significant.effects)
# grab only the component name and p-value, then remove row names
significant.effects = significant.effects %>%
select(component,p)
rownames(significant.effects) = seq(length=nrow(significant.effects))
# return the names of the significant effects
return(pander(significant.effects, style = 'rmarkdown'))
}
# "trending.effects": identify trends in a model (.05 < p <= .10)
trending.effects = function(lm_model_name){
# convert the model summary to a dataframe
model.summary = data.frame(summary(lm_model_name)$coefficient)
# create significance values
model.summary$p = 2*(1-pnorm(abs(model.summary$t.value)))
# identify only effects between .05 < p <= .10
trending.effects = data.frame(model.summary[model.summary$p > .05 &
model.summary$p <= .10,])
trending.effects$component = rownames(trending.effects)
# grab only the component name and p-value, then remove row names
trending.effects = trending.effects %>%
select(component,p)
rownames(trending.effects) = seq(length=nrow(trending.effects))
# return the names of the significant effects
return(pander(trending.effects, style = 'rmarkdown'))
}
# 'component.factors': a function to show the bottom or top factors in a given component
component.factors = function(original.df, svd.df, factor.list, component, top.bottom){
# subset original dataframe to include only those in our list of factors
subset.data = original.df[names(original.df) %in% factor.list]
# specify the top/lowest n values
slice.n = 5
# create name
component.name = paste('c',component,sep='')
# use the option to decide whether to...
if (top.bottom == 'top'){ # ... return top slice.n factors...
sorted.factors.vals = as.numeric(svd.df$v[,component])[sort(as.numeric(svd.df$v[,component]),
decreasing = TRUE,
index = TRUE)$ix[1:slice.n]]
sorted.factors.names = colnames(subset.data)[sort(svd.df$v[,component],
decreasing = TRUE,
index = TRUE)$ix[1:slice.n]]
} else { # ... or return bottom slice.n factors
sorted.factors.vals = as.numeric(svd.df$v[,component])[sort(as.numeric(svd.df$v[,component]),
decreasing = FALSE,
index = TRUE)$ix[1:slice.n]]
sorted.factors.names = colnames(subset.data)[sort(svd.df$v[,component],
decreasing = FALSE,
index = TRUE)$ix[1:slice.n]]
}
# then spit out our list
sorted.factors = data.frame(component.name,
sorted.factors.names,
sorted.factors.vals)
names(sorted.factors) = c('component','name','weight')
return(sorted.factors)
}
# 'summarySE': Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
# (Thanks to http://www.cookbook-r.com/Graphs/Plotting_means_and_error_bars_(ggplot2)/)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- plyr::rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
#####################################################################################
#### Group variables ####
# "ratings.variables": list of variables derived from headshot ratings
debater.ratings = c('attractive','intelligent','expert','biased','liberal','hostile','trustworthy')
debater.ratings.group = paste("group.", debater.ratings, sep = "")
debater.vars = c(debater.ratings,debater.ratings.group)
# "desc.variables": list of variables about descriptions about who's speaking, etc.
desc.variables = c('debate','speaker','group')
# "outcome.variables": list of variables about vote outcomes
outcome.variables = c('before_for','before_against','before_undecided',
'after_for','after_against','after_undecided',
'change_for','change_against','change_undecided',
'winner','deltaV')
# "detailed.variables": list of variables about more detailed vote metrics
detailed.variables = c('for_for','against_against','undecided_undecided',
'for_against','for_undecided',
'against_for','against_undecided',
'undecided_for','undecided_against')
# "summedLIWC.variables": list of LIWC-related variables that will not be included in the typical LIWC content
summedLIWC.variables = c('eos','WPS','WC','charnum')
# "text.variables": list of string variables to be excluded from numeric operations
text.variables = c('word','Filename','Seg','transcript')
# 'unused.variables': list of variables that are not included in standard LIWC
unused.variables = c('politics','liberal.1','conservative','demo','hedge','repub','turn')
unused.var.prop = paste("prop.", unused.variables, sep = "")
unused.var.group = paste("group.", unused.variables, sep = "")
unused.var.group.prop = paste("group.prop.",unused.variables, sep = "")
unused.variables = c(unused.variables,
unused.var.prop,
unused.var.group,
unused.var.group.prop) | /supplementary/libraries-and-functions_debate-in-the-wild.r | no_license | a-paxton/debate-in-the-wild | R | false | false | 10,989 | r | #### libraries-and-functions_debate-in-the-wild.r: Part of `debate_in_the_wild.Rmd` ####
#
# This script sets the working directory, loads libraries, creates a number of
# additional functions to facilitate data prep and analysis.
#
# Written by: A. Paxton (University of California, Berkeley)
#
#####################################################################################
#### Set working directory ####
setwd('/debate-in-the-wild/')
#####################################################################################
#### Load libraries we'll need ####
library(doBy)
library(dplyr)
library(e1071)
library(ggplot2)
library(grid)
library(gridExtra)
library(gtable)
library(gtools)
library(Hmisc)
library(kernlab)
library(languageR)
library(lme4)
library(pander)
library(piecewiseSEM)
library(purrr)
library(plotrix)
library(plyr)
library(tsne)
#####################################################################################
#### Create functions we'll need ####
# "%notin%": identify values from one list (x) not included in another (y)
'%notin%' <- function(x,y) !(x %in% y)
# "pander_lme": simplify lme4 printouts (available on GitHub: https://github.com/a-paxton/stats-tools)
pander_lme = function(lme_model_name, stats.caption){
# load in pander
library(pander)
# convert the model summary to a dataframe
neat_output = data.frame(summary(lme_model_name)$coefficient)
# get it to display p-values and asterisks based on significance
neat_output$p = 2*(1-pnorm(abs(neat_output$t.value)))
neat_output$sig = ' '
neat_output$sig[neat_output$p < .1] = '.'
neat_output$sig[neat_output$p < .05] = '*'
neat_output$sig[neat_output$p < .01] = '**'
neat_output$sig[neat_output$p < .001] = '***'
# set a caption that includes R-squared values
if (stats.caption == TRUE){
# use piecewiseSEM to calculate R-squared
library(piecewiseSEM)
model_marginal_r_squared = sem.model.fits(lme_model_name)$Marginal
model_conditional_r_squared = sem.model.fits(lme_model_name)$Conditional
neat_caption = paste('**Marginal *R*-squared: ',
round(model_marginal_r_squared,2),
"; Conditional *R*-squared: ",
round(model_conditional_r_squared,2), ".**",sep="")
# return the table
return(pander(neat_output, split.table = Inf, caption = neat_caption, style = 'rmarkdown'))
} else { # or return a table without it
return(pander(neat_output, split.table = Inf, style = 'rmarkdown'))
}
}
# "pander_lm": simplify lm printouts and include adjusted R-squared and F-stats
pander_lm = function(lm_model_name, stats.caption){
# load in pander
library(pander)
# convert the model summary to a dataframe
neat_output = data.frame(summary(lm_model_name)$coefficient)
# get it to display p-values and asterisks based on significance
neat_output$p = 2*(1-pnorm(abs(neat_output$t.value)))
neat_output$sig = ' '
neat_output$sig[neat_output$p < .1] = '.'
neat_output$sig[neat_output$p < .05] = '*'
neat_output$sig[neat_output$p < .01] = '**'
neat_output$sig[neat_output$p < .001] = '***'
# set a caption that includes R-squared values
if (stats.caption==TRUE){
# grab stats F-stats and adjusted R-squared
model_adj_r_squared = summary(lm_model_name)$adj.r.squared
model_fstatistics = summary(lm_model_name)$fstatistic
neat_caption = paste('**Adjusted *R*-squared: ',
round(model_adj_r_squared,2), "; *F*(",
model_fstatistics[2],",",model_fstatistics[3],
") = ",round(model_fstatistics[1],2),"**",sep="")
# return the table
return(pander(neat_output, split.table = Inf, caption = neat_caption, style = 'rmarkdown'))
}else{# or return a table without it
return(pander(neat_output, style="rmarkdown",split.table = Inf, style = 'rmarkdown'))
}
}
# "significant.effects": identify significant effects in a model
significant.effects = function(lm_model_name){
# convert the model summary to a dataframe
model.summary = data.frame(summary(lm_model_name)$coefficient)
# create significance values
model.summary$p = 2*(1-pnorm(abs(model.summary$t.value)))
# identify only effects below p = .05
significant.effects = data.frame(model.summary[model.summary$p <= .05,])
significant.effects$component = rownames(significant.effects)
# grab only the component name and p-value, then remove row names
significant.effects = significant.effects %>%
select(component,p)
rownames(significant.effects) = seq(length=nrow(significant.effects))
# return the names of the significant effects
return(pander(significant.effects, style = 'rmarkdown'))
}
# "trending.effects": identify trends in a model (.05 < p <= .10)
trending.effects = function(lm_model_name){
# convert the model summary to a dataframe
model.summary = data.frame(summary(lm_model_name)$coefficient)
# create significance values
model.summary$p = 2*(1-pnorm(abs(model.summary$t.value)))
# identify only effects between .05 < p <= .10
trending.effects = data.frame(model.summary[model.summary$p > .05 &
model.summary$p <= .10,])
trending.effects$component = rownames(trending.effects)
# grab only the component name and p-value, then remove row names
trending.effects = trending.effects %>%
select(component,p)
rownames(trending.effects) = seq(length=nrow(trending.effects))
# return the names of the significant effects
return(pander(trending.effects, style = 'rmarkdown'))
}
# 'component.factors': a function to show the bottom or top factors in a given component
component.factors = function(original.df, svd.df, factor.list, component, top.bottom){
# subset original dataframe to include only those in our list of factors
subset.data = original.df[names(original.df) %in% factor.list]
# specify the top/lowest n values
slice.n = 5
# create name
component.name = paste('c',component,sep='')
# use the option to decide whether to...
if (top.bottom == 'top'){ # ... return top slice.n factors...
sorted.factors.vals = as.numeric(svd.df$v[,component])[sort(as.numeric(svd.df$v[,component]),
decreasing = TRUE,
index = TRUE)$ix[1:slice.n]]
sorted.factors.names = colnames(subset.data)[sort(svd.df$v[,component],
decreasing = TRUE,
index = TRUE)$ix[1:slice.n]]
} else { # ... or return bottom slice.n factors
sorted.factors.vals = as.numeric(svd.df$v[,component])[sort(as.numeric(svd.df$v[,component]),
decreasing = FALSE,
index = TRUE)$ix[1:slice.n]]
sorted.factors.names = colnames(subset.data)[sort(svd.df$v[,component],
decreasing = FALSE,
index = TRUE)$ix[1:slice.n]]
}
# then spit out our list
sorted.factors = data.frame(component.name,
sorted.factors.names,
sorted.factors.vals)
names(sorted.factors) = c('component','name','weight')
return(sorted.factors)
}
# 'summarySE': Gives count, mean, standard deviation, standard error of the mean, and confidence interval (default 95%).
# (Thanks to http://www.cookbook-r.com/Graphs/Plotting_means_and_error_bars_(ggplot2)/)
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
# New version of length which can handle NA's: if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- plyr::rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
#####################################################################################
#### Group variables ####
# "ratings.variables": list of variables derived from headshot ratings
debater.ratings = c('attractive','intelligent','expert','biased','liberal','hostile','trustworthy')
debater.ratings.group = paste("group.", debater.ratings, sep = "")
debater.vars = c(debater.ratings,debater.ratings.group)
# "desc.variables": list of variables about descriptions about who's speaking, etc.
desc.variables = c('debate','speaker','group')
# "outcome.variables": list of variables about vote outcomes
outcome.variables = c('before_for','before_against','before_undecided',
'after_for','after_against','after_undecided',
'change_for','change_against','change_undecided',
'winner','deltaV')
# "detailed.variables": list of variables about more detailed vote metrics
detailed.variables = c('for_for','against_against','undecided_undecided',
'for_against','for_undecided',
'against_for','against_undecided',
'undecided_for','undecided_against')
# "summedLIWC.variables": list of LIWC-related variables that will not be included in the typical LIWC content
summedLIWC.variables = c('eos','WPS','WC','charnum')
# "text.variables": list of string variables to be excluded from numeric operations
text.variables = c('word','Filename','Seg','transcript')
# 'unused.variables': list of variables that are not included in standard LIWC
unused.variables = c('politics','liberal.1','conservative','demo','hedge','repub','turn')
unused.var.prop = paste("prop.", unused.variables, sep = "")
unused.var.group = paste("group.", unused.variables, sep = "")
unused.var.group.prop = paste("group.prop.",unused.variables, sep = "")
unused.variables = c(unused.variables,
unused.var.prop,
unused.var.group,
unused.var.group.prop) |
raster4LivePca <- function(year, month, idxvalues, map, viewType, latitude, longitude){
# create raster object from matrix
raster <- toRaster(map, longitude, latitude)
# set raster NA value
raster[is.na(raster)] <- NA
# trim raster to relevant extent
raster <- trim(raster, padding=1)
# get event id
event_id <- idxvalues[4]
# get index value
idx_val <- idxvalues[3]
# write raster as geoTiff
writeRaster(raster, NAflag=9999, filename = getTmpTifFile(), format="GTiff", overwrite=TRUE)
# write idx files to db
writeViewToDB(year, month, viewType, getTmpTifFile(), event_id = event_id, idx_val = idx_val)
}
| /regmodR/R/regmod/raster4LivePca.R | permissive | LeunamBk/MSC | R | false | false | 659 | r | raster4LivePca <- function(year, month, idxvalues, map, viewType, latitude, longitude){
# create raster object from matrix
raster <- toRaster(map, longitude, latitude)
# set raster NA value
raster[is.na(raster)] <- NA
# trim raster to relevant extent
raster <- trim(raster, padding=1)
# get event id
event_id <- idxvalues[4]
# get index value
idx_val <- idxvalues[3]
# write raster as geoTiff
writeRaster(raster, NAflag=9999, filename = getTmpTifFile(), format="GTiff", overwrite=TRUE)
# write idx files to db
writeViewToDB(year, month, viewType, getTmpTifFile(), event_id = event_id, idx_val = idx_val)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ds.GWAS.R
\name{ds.GWAS}
\alias{ds.GWAS}
\title{Genome-wide association analysis (GWAS)}
\usage{
ds.GWAS(genoData, model, family = "binomial", snpBlock = 10000,
datasources = NULL, ...)
}
\arguments{
\item{genoData}{a \code{GenotypeData} object which is a container for storing genotype data
from a GWAS toghether with the metadata associated with the subjects (i.e. phenotypes and/or covariates)
and SNPs}
\item{model}{formula indicating the condition (left side) and other covariates to be adjusted for
(i.e. condition ~ covar1 + ... + covar2). The fitted model is: snp ~ condition + covar1 + ... + covarN}
\item{family}{A description of the generalized linear model used. The defatul is "binomial" that is defined
for case/control studies. Quantitative traits can be analyzed by using "gaussian". Other values are accepted.}
\item{snpBlock}{an integer specifying the number of SNPs in an iteration block. See \code{GenotypeIterator}
function in GWASTools package.}
\item{...}{other arguments of \code{fitNullModel} function in GENESIS package}
}
\value{
a matrix with SNPs ordered by p-values
}
\description{
Performs GWAS using GENESIS
}
\author{
Gonzalez, JR.
}
| /man/ds.GWAS.Rd | permissive | epigeny/dsOmicsClient | R | false | true | 1,253 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ds.GWAS.R
\name{ds.GWAS}
\alias{ds.GWAS}
\title{Genome-wide association analysis (GWAS)}
\usage{
ds.GWAS(genoData, model, family = "binomial", snpBlock = 10000,
datasources = NULL, ...)
}
\arguments{
\item{genoData}{a \code{GenotypeData} object which is a container for storing genotype data
from a GWAS toghether with the metadata associated with the subjects (i.e. phenotypes and/or covariates)
and SNPs}
\item{model}{formula indicating the condition (left side) and other covariates to be adjusted for
(i.e. condition ~ covar1 + ... + covar2). The fitted model is: snp ~ condition + covar1 + ... + covarN}
\item{family}{A description of the generalized linear model used. The defatul is "binomial" that is defined
for case/control studies. Quantitative traits can be analyzed by using "gaussian". Other values are accepted.}
\item{snpBlock}{an integer specifying the number of SNPs in an iteration block. See \code{GenotypeIterator}
function in GWASTools package.}
\item{...}{other arguments of \code{fitNullModel} function in GENESIS package}
}
\value{
a matrix with SNPs ordered by p-values
}
\description{
Performs GWAS using GENESIS
}
\author{
Gonzalez, JR.
}
|
log <- file(snakemake@log[[1]], open="wt")
sink(log)
sink(log, type="message")
library("DESeq2")
parallel <- FALSE
if (snakemake@threads > 1) {
library("BiocParallel")
# setup parallelization
register(MulticoreParam(snakemake@threads))
parallel <- TRUE
}
dds <- readRDS(snakemake@input[["rds"]])
########################################################
#deseq2-diff-gene
########################################################
#差异分析结果 multiple use contrast
contrast <- c("condition", snakemake@params[["contrast"]])
res = results(dds,contrast=contrast)
# Sort the results data frame by the padj and foldChange columns.
sorted = res[with(res, order(padj, -log2FoldChange)), ]
# Turn it into a dataframe to have proper column names.
sorted.df = data.frame("id"=rownames(sorted),sorted)
# Write the table out.
write.table(sorted.df, file=snakemake@output[["all_tab"]], row.names = FALSE,sep="\t", quote=FALSE)
########significantly different genes
#padj(<0.05)和log2 fold (>1)
regSig <- subset(res, padj < 0.05)
regSig2 <- subset(regSig, abs(log2FoldChange) > 1)
sorted_regSig2 = regSig2[with(regSig2, order(-log2FoldChange)), ]
sig = data.frame("id"=rownames(regSig2),regSig2)
write.table(sig, file= snakemake@output[["sig_tab"]], sep="\t", row.name=FALSE, col.names=TRUE,quote=FALSE) | /Archive/diffexp.R | no_license | Iceylee/NGS-Pacbio | R | false | false | 1,323 | r | log <- file(snakemake@log[[1]], open="wt")
sink(log)
sink(log, type="message")
library("DESeq2")
parallel <- FALSE
if (snakemake@threads > 1) {
library("BiocParallel")
# setup parallelization
register(MulticoreParam(snakemake@threads))
parallel <- TRUE
}
dds <- readRDS(snakemake@input[["rds"]])
########################################################
#deseq2-diff-gene
########################################################
#差异分析结果 multiple use contrast
contrast <- c("condition", snakemake@params[["contrast"]])
res = results(dds,contrast=contrast)
# Sort the results data frame by the padj and foldChange columns.
sorted = res[with(res, order(padj, -log2FoldChange)), ]
# Turn it into a dataframe to have proper column names.
sorted.df = data.frame("id"=rownames(sorted),sorted)
# Write the table out.
write.table(sorted.df, file=snakemake@output[["all_tab"]], row.names = FALSE,sep="\t", quote=FALSE)
########significantly different genes
#padj(<0.05)和log2 fold (>1)
regSig <- subset(res, padj < 0.05)
regSig2 <- subset(regSig, abs(log2FoldChange) > 1)
sorted_regSig2 = regSig2[with(regSig2, order(-log2FoldChange)), ]
sig = data.frame("id"=rownames(regSig2),regSig2)
write.table(sig, file= snakemake@output[["sig_tab"]], sep="\t", row.name=FALSE, col.names=TRUE,quote=FALSE) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trig_radian_check.R
\name{trig_radian_check}
\alias{trig_radian_check}
\title{Check that argument is in radians}
\usage{
trig_radian_check(ex, radian_val,
message = "Could not find call to a trigonometric function", eps = 0.001)
}
\arguments{
\item{ex}{A \code{checkr_result}, presumably containing a call to a trig function.}
\item{radian_val}{a number: the desired angle in radians}
\item{message}{character string message to produce on failure}
\item{eps}{precision of comparison}
}
\description{
Check that argument is in radians
}
\examples{
ex <- for_checkr(quote(15 * sin(3)))
trig_radian_check(ex, 3)
trig_radian_check(ex, 3 * pi / 180)
trig_radian_check(ex, 4*pi/180)
}
| /man/trig_radian_check.Rd | permissive | statnmap/checkr | R | false | true | 763 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trig_radian_check.R
\name{trig_radian_check}
\alias{trig_radian_check}
\title{Check that argument is in radians}
\usage{
trig_radian_check(ex, radian_val,
message = "Could not find call to a trigonometric function", eps = 0.001)
}
\arguments{
\item{ex}{A \code{checkr_result}, presumably containing a call to a trig function.}
\item{radian_val}{a number: the desired angle in radians}
\item{message}{character string message to produce on failure}
\item{eps}{precision of comparison}
}
\description{
Check that argument is in radians
}
\examples{
ex <- for_checkr(quote(15 * sin(3)))
trig_radian_check(ex, 3)
trig_radian_check(ex, 3 * pi / 180)
trig_radian_check(ex, 4*pi/180)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_read_years}
\alias{fars_read_years}
\title{Reads in list of years}
\usage{
fars_read_years(years)
}
\arguments{
\item{years}{A list of years}
}
\value{
edits the data files with names ending in provided year range
}
\description{
Reads in a list of years and for each file whose name ends in a year within the
provided year range changes the year in that file.
Throws an error if the year range contains a year which doesn't correspond to
a file name with that year in its name.
}
| /man/fars_read_years.Rd | no_license | csmatyi/fars | R | false | true | 587 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_read_years}
\alias{fars_read_years}
\title{Reads in list of years}
\usage{
fars_read_years(years)
}
\arguments{
\item{years}{A list of years}
}
\value{
edits the data files with names ending in provided year range
}
\description{
Reads in a list of years and for each file whose name ends in a year within the
provided year range changes the year in that file.
Throws an error if the year range contains a year which doesn't correspond to
a file name with that year in its name.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lassosumR.R
\name{lassosumR}
\alias{lassosumR}
\title{Function to obtain LASSO estimates of a regression problem given summary statistics
and a reference panel (without PLINK bfile)}
\usage{
lassosumR(cor, refpanel, lambda = exp(seq(log(0.001), log(0.1),
length.out = 20)), shrink = 0.9, ridge = F, thr = 1e-04,
init = NULL, trace = 0, maxiter = 10000, blocks = NULL)
}
\arguments{
\item{cor}{A vector of correlations (\eqn{r})}
\item{refpanel}{reference panel as \code{data.frame} or \code{matrix}}
\item{lambda}{A vector of \eqn{\lambda}s (the tuning parameter)}
\item{shrink}{The shrinkage parameter \eqn{s} for the correlation matrix \eqn{R}}
\item{ridge}{Produce ridge regression results also (slow if nrow(refpanel) > 2000)}
\item{thr}{convergence threshold for \eqn{\beta}}
\item{init}{Initial values for \eqn{\beta}}
\item{trace}{An integer controlling the amount of output generated.}
\item{maxiter}{Maximum number of iterations}
\item{blocks}{A vector to split the genome by blocks (coded as c(1,1,..., 2, 2, ..., etc.))}
}
\value{
A list with the following
\item{lambda}{same as the lambda input}
\item{beta}{A matrix of estimated coefficients}
\item{conv}{A vector of convergence indicators. 1 means converged. 0 not converged.}
\item{pred}{\eqn{=(1-s)X\beta}}
\item{loss}{\eqn{=(1-s)\beta'X'X\beta/n - 2\beta'r}}
\item{fbeta}{\eqn{=\beta'R\beta - 2\beta'r + 2\lambda||\beta||_1}}
\item{sd}{The standard deviation of the reference panel SNPs}
\item{shrink}{same as input}
\item{nparams}{Number of non-zero coefficients}
\item{ridge}{ridge regression estimates}
}
\description{
Function to obtain LASSO estimates of a regression problem given summary statistics
and a reference panel (without PLINK bfile)
}
\details{
A function to find the minimum of \eqn{\beta} in
\deqn{f(\beta)=\beta'R\beta - 2\beta'r + 2\lambda||\beta||_1}
where
\deqn{R=(1-s)X'X/n + sI}
is a shrunken correlation matrix, with \eqn{X} being standardized reference panel.
\eqn{s} should take values in (0,1]. \eqn{r} is a vector of correlations.
}
\note{
\itemize{
\item Missing values in \code{refpanel} are filled with 0.
\item Unlike lassosum, we do not provide the options keep/remove/extract/exclude.
It is thus up to the user to ensure the SNPs in the reference panel corresponds
to those in the correlations.
}
}
\keyword{#@export}
\keyword{internal}
| /man/lassosumR.Rd | permissive | mattwarkentin/lassosum | R | false | true | 2,436 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lassosumR.R
\name{lassosumR}
\alias{lassosumR}
\title{Function to obtain LASSO estimates of a regression problem given summary statistics
and a reference panel (without PLINK bfile)}
\usage{
lassosumR(cor, refpanel, lambda = exp(seq(log(0.001), log(0.1),
length.out = 20)), shrink = 0.9, ridge = F, thr = 1e-04,
init = NULL, trace = 0, maxiter = 10000, blocks = NULL)
}
\arguments{
\item{cor}{A vector of correlations (\eqn{r})}
\item{refpanel}{reference panel as \code{data.frame} or \code{matrix}}
\item{lambda}{A vector of \eqn{\lambda}s (the tuning parameter)}
\item{shrink}{The shrinkage parameter \eqn{s} for the correlation matrix \eqn{R}}
\item{ridge}{Produce ridge regression results also (slow if nrow(refpanel) > 2000)}
\item{thr}{convergence threshold for \eqn{\beta}}
\item{init}{Initial values for \eqn{\beta}}
\item{trace}{An integer controlling the amount of output generated.}
\item{maxiter}{Maximum number of iterations}
\item{blocks}{A vector to split the genome by blocks (coded as c(1,1,..., 2, 2, ..., etc.))}
}
\value{
A list with the following
\item{lambda}{same as the lambda input}
\item{beta}{A matrix of estimated coefficients}
\item{conv}{A vector of convergence indicators. 1 means converged. 0 not converged.}
\item{pred}{\eqn{=(1-s)X\beta}}
\item{loss}{\eqn{=(1-s)\beta'X'X\beta/n - 2\beta'r}}
\item{fbeta}{\eqn{=\beta'R\beta - 2\beta'r + 2\lambda||\beta||_1}}
\item{sd}{The standard deviation of the reference panel SNPs}
\item{shrink}{same as input}
\item{nparams}{Number of non-zero coefficients}
\item{ridge}{ridge regression estimates}
}
\description{
Function to obtain LASSO estimates of a regression problem given summary statistics
and a reference panel (without PLINK bfile)
}
\details{
A function to find the minimum of \eqn{\beta} in
\deqn{f(\beta)=\beta'R\beta - 2\beta'r + 2\lambda||\beta||_1}
where
\deqn{R=(1-s)X'X/n + sI}
is a shrunken correlation matrix, with \eqn{X} being standardized reference panel.
\eqn{s} should take values in (0,1]. \eqn{r} is a vector of correlations.
}
\note{
\itemize{
\item Missing values in \code{refpanel} are filled with 0.
\item Unlike lassosum, we do not provide the options keep/remove/extract/exclude.
It is thus up to the user to ensure the SNPs in the reference panel corresponds
to those in the correlations.
}
}
\keyword{#@export}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simer.Utility.R
\name{write.file}
\alias{write.file}
\title{Write files of simer}
\usage{
write.file(pop, geno, map, out.geno.index, out.pheno.index,
out = "simer", outpath, out.format, verbose)
}
\arguments{
\item{pop}{population information of generation, family index, within-family index, index, sire, dam, sex, phenotpye}
\item{geno}{genotype matrix of population}
\item{map}{map information of markers}
\item{out.geno.index}{indice of individuals outputting genotype}
\item{out.pheno.index}{indice of individuals outputting phenotype}
\item{out}{prefix of output file name}
\item{outpath}{path of output files}
\item{out.format}{format of output, "numeric" or "plink"}
\item{verbose}{whether to print detail}
}
\value{
None
}
\description{
Build date: Jan 7, 2019
Last update: Oct 16, 2019
}
\examples{
\donttest{
data(simdata)
nmrk <- nrow(input.map)
pos.map <- check.map(input.map = input.map, num.marker = nmrk, len.block = 5e7)
basepop <- getpop(nind = 100, from = 1, ratio = 0.1)
basepop.geno <- rawgeno
basepop.geno <- as.big.matrix(basepop.geno)
effs <-
cal.effs(pop.geno = basepop.geno,
cal.model = "A",
num.qtn.tr1 = c(2, 6, 10),
sd.tr1 = c(0.4, 0.2, 0.02, 0.02, 0.02, 0.02, 0.02, 0.001),
dist.qtn.tr1 = rep("normal", 6),
prob.tr1 = c(0.5, 0.5, 0.5, 0.5, 0.5, 0.5),
shape.tr1 = c(1, 1, 1, 1, 1, 1),
scale.tr1 = c(1, 1, 1, 1, 1, 1),
multrait = FALSE,
num.qtn.trn = matrix(c(18, 10, 10, 20), 2, 2),
sd.trn = diag(c(1, 0.5)),
qtn.spot = rep(0.1, 10),
maf = 0,
verbose = TRUE)
str(basepop)
pop.pheno <-
phenotype(effs = effs,
pop = basepop,
pop.geno = basepop.geno,
pos.map = NULL,
h2.tr1 = c(0.3, 0.1, 0.05, 0.05, 0.05, 0.01),
gnt.cov = matrix(c(1, 2, 2, 15), 2, 2),
h2.trn = c(0.3, 0.5),
sel.crit = "pheno",
pop.total = basepop,
sel.on = TRUE,
inner.env = NULL,
verbose = TRUE)
basepop <- pop.pheno$pop
pop.pheno$pop <- NULL
idx <- basepop$index
# convert (0, 1) to (0, 1, 2)
basepop.geno <- geno.cvt1(basepop.geno)
basepop.geno <- as.big.matrix(basepop.geno)
write.file(pop = basepop, geno = basepop.geno, map = pos.map,
out.geno.index = idx, out.pheno.index = idx,
outpath = tempdir(), out.format = "numeric", verbose = TRUE)
file.remove(file.path(tempdir(), "simer.geno.id"))
file.remove(file.path(tempdir(), "simer.map"))
file.remove(file.path(tempdir(), "simer.ped"))
file.remove(file.path(tempdir(), "simer.phe"))
}
}
\author{
Dong Yin
}
| /man/write.file.Rd | permissive | ntduc11/SIMER | R | false | true | 2,804 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simer.Utility.R
\name{write.file}
\alias{write.file}
\title{Write files of simer}
\usage{
write.file(pop, geno, map, out.geno.index, out.pheno.index,
out = "simer", outpath, out.format, verbose)
}
\arguments{
\item{pop}{population information of generation, family index, within-family index, index, sire, dam, sex, phenotpye}
\item{geno}{genotype matrix of population}
\item{map}{map information of markers}
\item{out.geno.index}{indice of individuals outputting genotype}
\item{out.pheno.index}{indice of individuals outputting phenotype}
\item{out}{prefix of output file name}
\item{outpath}{path of output files}
\item{out.format}{format of output, "numeric" or "plink"}
\item{verbose}{whether to print detail}
}
\value{
None
}
\description{
Build date: Jan 7, 2019
Last update: Oct 16, 2019
}
\examples{
\donttest{
data(simdata)
nmrk <- nrow(input.map)
pos.map <- check.map(input.map = input.map, num.marker = nmrk, len.block = 5e7)
basepop <- getpop(nind = 100, from = 1, ratio = 0.1)
basepop.geno <- rawgeno
basepop.geno <- as.big.matrix(basepop.geno)
effs <-
cal.effs(pop.geno = basepop.geno,
cal.model = "A",
num.qtn.tr1 = c(2, 6, 10),
sd.tr1 = c(0.4, 0.2, 0.02, 0.02, 0.02, 0.02, 0.02, 0.001),
dist.qtn.tr1 = rep("normal", 6),
prob.tr1 = c(0.5, 0.5, 0.5, 0.5, 0.5, 0.5),
shape.tr1 = c(1, 1, 1, 1, 1, 1),
scale.tr1 = c(1, 1, 1, 1, 1, 1),
multrait = FALSE,
num.qtn.trn = matrix(c(18, 10, 10, 20), 2, 2),
sd.trn = diag(c(1, 0.5)),
qtn.spot = rep(0.1, 10),
maf = 0,
verbose = TRUE)
str(basepop)
pop.pheno <-
phenotype(effs = effs,
pop = basepop,
pop.geno = basepop.geno,
pos.map = NULL,
h2.tr1 = c(0.3, 0.1, 0.05, 0.05, 0.05, 0.01),
gnt.cov = matrix(c(1, 2, 2, 15), 2, 2),
h2.trn = c(0.3, 0.5),
sel.crit = "pheno",
pop.total = basepop,
sel.on = TRUE,
inner.env = NULL,
verbose = TRUE)
basepop <- pop.pheno$pop
pop.pheno$pop <- NULL
idx <- basepop$index
# convert (0, 1) to (0, 1, 2)
basepop.geno <- geno.cvt1(basepop.geno)
basepop.geno <- as.big.matrix(basepop.geno)
write.file(pop = basepop, geno = basepop.geno, map = pos.map,
out.geno.index = idx, out.pheno.index = idx,
outpath = tempdir(), out.format = "numeric", verbose = TRUE)
file.remove(file.path(tempdir(), "simer.geno.id"))
file.remove(file.path(tempdir(), "simer.map"))
file.remove(file.path(tempdir(), "simer.ped"))
file.remove(file.path(tempdir(), "simer.phe"))
}
}
\author{
Dong Yin
}
|
# GENERAL MODEL DESCRIPTIONS -----
#' Get model descriptions for parsnip, workflows & modeltime objects
#'
#'
#' @param object Parsnip or workflow objects
#' @param upper_case Whether to return upper or lower case model descriptions
#' @param indicate_training Whether or not to indicate if the model has been trained
#'
#' @examples
#' library(dplyr)
#' library(timetk)
#' library(parsnip)
#' library(modeltime)
#'
#' # Model Specification ----
#'
#' arima_spec <- arima_reg() %>%
#' set_engine("auto_arima")
#'
#' get_model_description(arima_spec, indicate_training = TRUE)
#'
#' # Fitted Model ----
#'
#' m750 <- m4_monthly %>% filter(id == "M750")
#'
#' arima_fit <- arima_spec %>%
#' fit(value ~ date, data = m750)
#'
#' get_model_description(arima_fit, indicate_training = TRUE)
#'
#'
#' @export
get_model_description <- function(object, indicate_training = FALSE, upper_case = TRUE) {
UseMethod("get_model_description", object)
}
#' @export
get_model_description.default <- function(object, indicate_training = FALSE, upper_case = TRUE) {
glubort("No method for class '{class(object)[1]}'. Expecting an object of class 'workflow', 'model_spec', or 'model_fit'.")
}
#' @export
get_model_description.model_fit <- function(object, indicate_training = FALSE, upper_case = TRUE) {
x <- object
desc <- tryCatch({
x$fit$desc
}, error = function(e) {
NULL
})
if (is.null(desc)) {
desc <- x$spec$engine[1]
if (is.null(desc)) {
desc <- class(x$fit)[1]
}
}
if (indicate_training) {
desc <- stringr::str_c(desc, " (Trained)")
}
if (upper_case) {
desc <- toupper(desc)
} else {
desc <- tolower(desc)
}
return(desc)
}
#' @export
get_model_description.model_spec <- function(object, indicate_training = FALSE, upper_case = TRUE) {
spec <- object
# Try to get engine
desc <- spec$engine[1]
# Get class of spec
if (is.null(desc)) {
desc <- class(spec)[1]
}
if (indicate_training) {
desc <- stringr::str_c(desc, " (Not Trained)")
}
if (upper_case) {
desc <- toupper(desc)
} else {
desc <- tolower(desc)
}
return(desc)
}
#' @export
get_model_description.workflow <- function(object, indicate_training = FALSE, upper_case = TRUE) {
x <- object
# Fitted Modeltime - Try to grab model description
desc <- tryCatch({
x$fit$fit$fit$desc
}, error = function(e) {
NULL
})
# Fitted Workflow - Try to grab engine from spec
if (is.null(desc)) {
desc <- tryCatch({
x$fit$fit$spec$engine[1]
}, error = function(e) {
NULL
})
}
# Fitted Workflow - Try to grab class from model
if (is.null(desc)) {
if (!is.null(x$fit$fit$fit)) {
desc <- class(x$fit$fit$fit)[1]
}
}
# Un-Fitted Workflow - Try to grab class from model engine
if (is.null(desc)) {
if (!is.null(x$fit$actions$model$spec)) {
desc <- class(x$fit$actions$model$spec)[1]
}
}
if (indicate_training) {
if (x$trained) {
desc <- stringr::str_c(desc, " (Trained)")
} else {
desc <- stringr::str_c(desc, " (Not Trained)")
}
}
if (upper_case) {
desc <- toupper(desc)
} else {
desc <- tolower(desc)
}
return(desc)
}
#' @export
get_model_description.recursive <- function(object, indicate_training = FALSE, upper_case = TRUE) {
class(object) <- class(object)[3:length(class(object))]
desc <- get_model_description(object, indicate_training = FALSE, upper_case = TRUE)
desc <- paste("RECURSIVE", desc)
if (upper_case) {
desc <- toupper(desc)
} else {
desc <- tolower(desc)
}
return(desc)
}
#' @export
get_model_description.recursive_panel <- function(object, indicate_training = FALSE, upper_case = TRUE) {
class(object) <- class(object)[3:length(class(object))]
desc <- get_model_description(object, indicate_training = FALSE, upper_case = TRUE)
desc <- paste("RECURSIVE", desc)
if (upper_case) {
desc <- toupper(desc)
} else {
desc <- tolower(desc)
}
return(desc)
}
#' @export
get_model_description.NULL <- function(object, indicate_training = FALSE, upper_case = TRUE) {
"NULL"
}
# ARIMA Model Descriptions ----
#' Get model descriptions for Arima objects
#'
#' @param object Objects of class `Arima`
#' @param padding Whether or not to include padding
#'
#' @source
#' - Forecast R Package, `forecast:::arima.string()`
#'
#' @examples
#' library(forecast)
#'
#' arima_fit <- forecast::Arima(1:10)
#'
#' get_arima_description(arima_fit)
#'
#'
#' @export
get_arima_description <- function(object, padding = FALSE) {
UseMethod("get_arima_description", object)
}
#' @export
get_arima_description.default <- function(object, padding = FALSE) {
glubort("No method for class '{class(object)[1]}'. Expecting an object of class 'Arima'.")
}
#' @export
get_arima_description.Arima <- function(object, padding = FALSE) {
order <- object$arma[c(1, 6, 2, 3, 7, 4, 5)]
m <- order[7]
result <- paste("ARIMA(", order[1], ",", order[2], ",", order[3], ")", sep = "")
if (m > 1 && sum(order[4:6]) > 0) {
result <- paste(result, "(", order[4], ",", order[5], ",", order[6], ")[", m, "]", sep = "")
}
if (padding && m > 1 && sum(order[4:6]) == 0) {
result <- paste(result, " ", sep = "")
if (m <= 9) {
result <- paste(result, " ", sep = "")
} else if (m <= 99) {
result <- paste(result, " ", sep = "")
} else {
result <- paste(result, " ", sep = "")
}
}
if (!is.null(object$xreg)) {
if (NCOL(object$xreg) == 1 && is.element("drift", names(object$coef))) {
result <- paste(result, "with drift ")
} else {
result <- paste("Regression with", result, "errors")
}
} else {
if (is.element("constant", names(object$coef)) || is.element("intercept", names(object$coef))) {
result <- paste(result, "with non-zero mean")
} else if (order[2] == 0 && order[5] == 0) {
result <- paste(result, "with zero mean ")
} else {
result <- paste(result, " ")
}
}
if (!padding) {
# Strip trailing spaces
result <- gsub("[ ]*$", "", result)
}
return(result)
}
# TBATS Model Descriptions ----
#' Get model descriptions for TBATS objects
#'
#' @param object Objects of class `tbats`
#'
#' @source
#' - Forecast R Package, `forecast:::as.character.tbats()`
#'
#'
#' @export
get_tbats_description <- function(object) {
if (!(inherits(object, "tbats") || inherits(object, "bats"))) {
glubort("No method for class '{class(object)[1]}'. Expecting an object of class 'bats' or 'tbats'.")
}
as.character(object)
}
| /R/dev-model_descriptions.R | permissive | silverf62/modeltime | R | false | false | 7,059 | r |
# GENERAL MODEL DESCRIPTIONS -----
#' Get model descriptions for parsnip, workflows & modeltime objects
#'
#'
#' @param object Parsnip or workflow objects
#' @param upper_case Whether to return upper or lower case model descriptions
#' @param indicate_training Whether or not to indicate if the model has been trained
#'
#' @examples
#' library(dplyr)
#' library(timetk)
#' library(parsnip)
#' library(modeltime)
#'
#' # Model Specification ----
#'
#' arima_spec <- arima_reg() %>%
#' set_engine("auto_arima")
#'
#' get_model_description(arima_spec, indicate_training = TRUE)
#'
#' # Fitted Model ----
#'
#' m750 <- m4_monthly %>% filter(id == "M750")
#'
#' arima_fit <- arima_spec %>%
#' fit(value ~ date, data = m750)
#'
#' get_model_description(arima_fit, indicate_training = TRUE)
#'
#'
#' @export
get_model_description <- function(object, indicate_training = FALSE, upper_case = TRUE) {
UseMethod("get_model_description", object)
}
#' @export
get_model_description.default <- function(object, indicate_training = FALSE, upper_case = TRUE) {
glubort("No method for class '{class(object)[1]}'. Expecting an object of class 'workflow', 'model_spec', or 'model_fit'.")
}
#' @export
get_model_description.model_fit <- function(object, indicate_training = FALSE, upper_case = TRUE) {
x <- object
desc <- tryCatch({
x$fit$desc
}, error = function(e) {
NULL
})
if (is.null(desc)) {
desc <- x$spec$engine[1]
if (is.null(desc)) {
desc <- class(x$fit)[1]
}
}
if (indicate_training) {
desc <- stringr::str_c(desc, " (Trained)")
}
if (upper_case) {
desc <- toupper(desc)
} else {
desc <- tolower(desc)
}
return(desc)
}
#' @export
get_model_description.model_spec <- function(object, indicate_training = FALSE, upper_case = TRUE) {
spec <- object
# Try to get engine
desc <- spec$engine[1]
# Get class of spec
if (is.null(desc)) {
desc <- class(spec)[1]
}
if (indicate_training) {
desc <- stringr::str_c(desc, " (Not Trained)")
}
if (upper_case) {
desc <- toupper(desc)
} else {
desc <- tolower(desc)
}
return(desc)
}
#' @export
get_model_description.workflow <- function(object, indicate_training = FALSE, upper_case = TRUE) {
x <- object
# Fitted Modeltime - Try to grab model description
desc <- tryCatch({
x$fit$fit$fit$desc
}, error = function(e) {
NULL
})
# Fitted Workflow - Try to grab engine from spec
if (is.null(desc)) {
desc <- tryCatch({
x$fit$fit$spec$engine[1]
}, error = function(e) {
NULL
})
}
# Fitted Workflow - Try to grab class from model
if (is.null(desc)) {
if (!is.null(x$fit$fit$fit)) {
desc <- class(x$fit$fit$fit)[1]
}
}
# Un-Fitted Workflow - Try to grab class from model engine
if (is.null(desc)) {
if (!is.null(x$fit$actions$model$spec)) {
desc <- class(x$fit$actions$model$spec)[1]
}
}
if (indicate_training) {
if (x$trained) {
desc <- stringr::str_c(desc, " (Trained)")
} else {
desc <- stringr::str_c(desc, " (Not Trained)")
}
}
if (upper_case) {
desc <- toupper(desc)
} else {
desc <- tolower(desc)
}
return(desc)
}
#' @export
get_model_description.recursive <- function(object, indicate_training = FALSE, upper_case = TRUE) {
class(object) <- class(object)[3:length(class(object))]
desc <- get_model_description(object, indicate_training = FALSE, upper_case = TRUE)
desc <- paste("RECURSIVE", desc)
if (upper_case) {
desc <- toupper(desc)
} else {
desc <- tolower(desc)
}
return(desc)
}
#' @export
get_model_description.recursive_panel <- function(object, indicate_training = FALSE, upper_case = TRUE) {
class(object) <- class(object)[3:length(class(object))]
desc <- get_model_description(object, indicate_training = FALSE, upper_case = TRUE)
desc <- paste("RECURSIVE", desc)
if (upper_case) {
desc <- toupper(desc)
} else {
desc <- tolower(desc)
}
return(desc)
}
#' @export
get_model_description.NULL <- function(object, indicate_training = FALSE, upper_case = TRUE) {
"NULL"
}
# ARIMA Model Descriptions ----
#' Get model descriptions for Arima objects
#'
#' @param object Objects of class `Arima`
#' @param padding Whether or not to include padding
#'
#' @source
#' - Forecast R Package, `forecast:::arima.string()`
#'
#' @examples
#' library(forecast)
#'
#' arima_fit <- forecast::Arima(1:10)
#'
#' get_arima_description(arima_fit)
#'
#'
#' @export
get_arima_description <- function(object, padding = FALSE) {
UseMethod("get_arima_description", object)
}
#' @export
get_arima_description.default <- function(object, padding = FALSE) {
glubort("No method for class '{class(object)[1]}'. Expecting an object of class 'Arima'.")
}
#' @export
get_arima_description.Arima <- function(object, padding = FALSE) {
order <- object$arma[c(1, 6, 2, 3, 7, 4, 5)]
m <- order[7]
result <- paste("ARIMA(", order[1], ",", order[2], ",", order[3], ")", sep = "")
if (m > 1 && sum(order[4:6]) > 0) {
result <- paste(result, "(", order[4], ",", order[5], ",", order[6], ")[", m, "]", sep = "")
}
if (padding && m > 1 && sum(order[4:6]) == 0) {
result <- paste(result, " ", sep = "")
if (m <= 9) {
result <- paste(result, " ", sep = "")
} else if (m <= 99) {
result <- paste(result, " ", sep = "")
} else {
result <- paste(result, " ", sep = "")
}
}
if (!is.null(object$xreg)) {
if (NCOL(object$xreg) == 1 && is.element("drift", names(object$coef))) {
result <- paste(result, "with drift ")
} else {
result <- paste("Regression with", result, "errors")
}
} else {
if (is.element("constant", names(object$coef)) || is.element("intercept", names(object$coef))) {
result <- paste(result, "with non-zero mean")
} else if (order[2] == 0 && order[5] == 0) {
result <- paste(result, "with zero mean ")
} else {
result <- paste(result, " ")
}
}
if (!padding) {
# Strip trailing spaces
result <- gsub("[ ]*$", "", result)
}
return(result)
}
# TBATS Model Descriptions ----
#' Get model descriptions for TBATS objects
#'
#' @param object Objects of class `tbats`
#'
#' @source
#' - Forecast R Package, `forecast:::as.character.tbats()`
#'
#'
#' @export
get_tbats_description <- function(object) {
if (!(inherits(object, "tbats") || inherits(object, "bats"))) {
glubort("No method for class '{class(object)[1]}'. Expecting an object of class 'bats' or 'tbats'.")
}
as.character(object)
}
|
DF <- read.table("household_power_consumption.txt", header=TRUE, sep= ";", na.strings = c("?","")) ## read in text file into an R dataframe
DF$Date <- as.Date(DF$Date, format = "%d/%m/%Y") ## change class of Date column to date
DF$timetemp <- paste(DF$Date, DF$Time) ## paste contents of Date and Time column in a new column
DF$Time <- strptime(DF$timetemp, format = "%Y-%m-%d %H:%M:%S") ## convert the content of the above column into class "POSIXlt"
DF <- DF[DF$Date %in% as.Date(c('2007-02-01', '2007-02-02')), ] ## filters dataframe for the data corresponding to the required 2 days
## create a histogram for the Global_active_power variable with required annotations format:
hist(DF$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency")
dev.copy(png, file = "plot1.png", width = 480, height = 480) ## copy plot to png file
dev.off() ## close the PNG device | /plot1.R | no_license | bmarquetant/ExData_Plotting1 | R | false | false | 937 | r | DF <- read.table("household_power_consumption.txt", header=TRUE, sep= ";", na.strings = c("?","")) ## read in text file into an R dataframe
DF$Date <- as.Date(DF$Date, format = "%d/%m/%Y") ## change class of Date column to date
DF$timetemp <- paste(DF$Date, DF$Time) ## paste contents of Date and Time column in a new column
DF$Time <- strptime(DF$timetemp, format = "%Y-%m-%d %H:%M:%S") ## convert the content of the above column into class "POSIXlt"
DF <- DF[DF$Date %in% as.Date(c('2007-02-01', '2007-02-02')), ] ## filters dataframe for the data corresponding to the required 2 days
## create a histogram for the Global_active_power variable with required annotations format:
hist(DF$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)", ylab = "Frequency")
dev.copy(png, file = "plot1.png", width = 480, height = 480) ## copy plot to png file
dev.off() ## close the PNG device |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documentation.R
\docType{data}
\name{csd.situations}
\alias{csd.situations}
\title{CSD Situations}
\format{
A \code{\link{data.frame}}.
}
\source{
Tim Bock.
}
\usage{
csd.situations
}
\description{
Binary associations between CSD brands and the situations in which they are used, collected in Sydney c2001 amongst University of Sydney students.
}
\keyword{datasets}
| /man/csd.situations.Rd | no_license | Displayr/flipExampleData | R | false | true | 444 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documentation.R
\docType{data}
\name{csd.situations}
\alias{csd.situations}
\title{CSD Situations}
\format{
A \code{\link{data.frame}}.
}
\source{
Tim Bock.
}
\usage{
csd.situations
}
\description{
Binary associations between CSD brands and the situations in which they are used, collected in Sydney c2001 amongst University of Sydney students.
}
\keyword{datasets}
|
md_bullet(rmarkdown::metadata$reading)
md_bullet(rmarkdown::metadata$tasks)
library(gapminder)
library(dplyr)
library(ggplot2)
library(scales)
gapminder <- gapminder %>% filter(country != "Kuwait")
gapminder_continent <- gapminder %>%
group_by(continent, year) %>%
summarise(gdpPercap = weighted.mean(x = gdpPercap, w = pop),
pop = sum(as.numeric(pop)))
p1=ggplot(data = gapminder, aes(color = continent, x = lifeExp, y = gdpPercap, size = pop/100000)) +
geom_point() +
facet_wrap(~year, nrow = 1) +
scale_y_continuous(trans = "sqrt") +
theme_bw() +
labs(x = "Life Expectancy", y = "GDP per capita", size = "Population (100k)", color = "continent")
p2=ggplot(data = gapminder, aes(color = continent, x = year, y = gdpPercap)) +
geom_point(aes(size = pop/100000)) +
geom_line(aes(group = country)) +
geom_point(data = gapminder_continent, color = "black", aes(size = pop/100000)) +
geom_line(data = gapminder_continent, color = "black") +
facet_wrap(~continent, nrow = 1) +
theme_bw() +
labs(x = "Year", y = "GDP per capita", color = "Continent", size = "Population (100k)")
p1
p2
# ggsave(file="week_04/case_study/plot1.png",plot=p1,width = 15,height=8)
# ggsave(file="week_04/case_study/plot2.png",plot=p2,width = 15,height=8)
| /scripts/CS_03_nocomments.R | permissive | darunabas/GEO511 | R | false | false | 1,355 | r | md_bullet(rmarkdown::metadata$reading)
md_bullet(rmarkdown::metadata$tasks)
library(gapminder)
library(dplyr)
library(ggplot2)
library(scales)
gapminder <- gapminder %>% filter(country != "Kuwait")
gapminder_continent <- gapminder %>%
group_by(continent, year) %>%
summarise(gdpPercap = weighted.mean(x = gdpPercap, w = pop),
pop = sum(as.numeric(pop)))
p1=ggplot(data = gapminder, aes(color = continent, x = lifeExp, y = gdpPercap, size = pop/100000)) +
geom_point() +
facet_wrap(~year, nrow = 1) +
scale_y_continuous(trans = "sqrt") +
theme_bw() +
labs(x = "Life Expectancy", y = "GDP per capita", size = "Population (100k)", color = "continent")
p2=ggplot(data = gapminder, aes(color = continent, x = year, y = gdpPercap)) +
geom_point(aes(size = pop/100000)) +
geom_line(aes(group = country)) +
geom_point(data = gapminder_continent, color = "black", aes(size = pop/100000)) +
geom_line(data = gapminder_continent, color = "black") +
facet_wrap(~continent, nrow = 1) +
theme_bw() +
labs(x = "Year", y = "GDP per capita", color = "Continent", size = "Population (100k)")
p1
p2
# ggsave(file="week_04/case_study/plot1.png",plot=p1,width = 15,height=8)
# ggsave(file="week_04/case_study/plot2.png",plot=p2,width = 15,height=8)
|
# 1.Merges the training and the test sets to create one data set.
train_X_train <- read.table("UCI\ HAR\ Dataset\\train\\X_train.txt",header=FALSE)
train_subject_train <- read.table("UCI\ HAR\ Dataset/train/subject_train.txt",header=FALSE)
train_y_train <- read.table("UCI\ HAR\ Dataset/train/y_train.txt",header=FALSE)
train <- cbind(train_X_train,train_subject_train,train_y_train)
test_X_test <- read.table("UCI\ HAR\ Dataset\\test\\X_test.txt",header=FALSE)
test_subject_test <- read.table("UCI\ HAR\ Dataset\\test\\subject_test.txt",header=FALSE)
test_y_test <- read.table("UCI\ HAR\ Dataset/test/y_test.txt",header=FALSE)
test <- cbind(test_X_test,test_subject_test,test_y_test)
combined <- rbind(train,test)
# 2.Extracts only the measurements on the mean and standard deviation for each measurement.
features <- read.table("UCI\ HAR\ Dataset\\features.txt",sep=" ",header=FALSE)
features <- as.character(features[,2])
colnames(combined) <- c(features, "student", "activity")
dataset1 <- combined[,c(
"tBodyAcc-mean()-X",
"tBodyAcc-mean()-Y",
"tBodyAcc-mean()-Z",
"tGravityAcc-mean()-X",
"tGravityAcc-mean()-Y",
"tGravityAcc-mean()-Z",
"tBodyAccJerk-mean()-X",
"tBodyAccJerk-mean()-Y",
"tBodyAccJerk-mean()-Z",
"tBodyGyro-mean()-X",
"tBodyGyro-mean()-Y",
"tBodyGyro-mean()-Z",
"tBodyGyroJerk-mean()-X",
"tBodyGyroJerk-mean()-Y",
"tBodyGyroJerk-mean()-Z",
"tBodyAccMag-mean()",
"tGravityAccMag-mean()",
"tBodyAccJerkMag-mean()",
"tBodyGyroMag-mean()",
"tBodyGyroJerkMag-mean()",
"fBodyAcc-mean()-X",
"fBodyAcc-mean()-Y",
"fBodyAcc-mean()-Z",
"fBodyAcc-meanFreq()-X",
"fBodyAcc-meanFreq()-Y",
"fBodyAcc-meanFreq()-Z",
"fBodyAccJerk-mean()-X",
"fBodyAccJerk-mean()-Y",
"fBodyAccJerk-mean()-Z",
"fBodyAccJerk-meanFreq()-X",
"fBodyAccJerk-meanFreq()-Y",
"fBodyAccJerk-meanFreq()-Z",
"fBodyGyro-mean()-X",
"fBodyGyro-mean()-Y",
"fBodyGyro-mean()-Z",
"fBodyGyro-meanFreq()-X",
"fBodyGyro-meanFreq()-Y",
"fBodyGyro-meanFreq()-Z",
"fBodyAccMag-mean()",
"fBodyAccMag-meanFreq()",
"fBodyBodyAccJerkMag-mean()",
"fBodyBodyAccJerkMag-meanFreq()",
"fBodyBodyGyroMag-mean()",
"fBodyBodyGyroMag-meanFreq()",
"fBodyBodyGyroJerkMag-mean()",
"fBodyBodyGyroJerkMag-meanFreq()",
"tBodyAcc-std()-X",
"tBodyAcc-std()-Y",
"tBodyAcc-std()-Z",
"tGravityAcc-std()-X",
"tGravityAcc-std()-Y",
"tGravityAcc-std()-Z",
"tBodyAccJerk-std()-X",
"tBodyAccJerk-std()-Y",
"tBodyAccJerk-std()-Z",
"tBodyGyro-std()-X",
"tBodyGyro-std()-Y",
"tBodyGyro-std()-Z",
"tBodyGyroJerk-std()-X",
"tBodyGyroJerk-std()-Y",
"tBodyGyroJerk-std()-Z",
"tBodyAccMag-std()",
"tGravityAccMag-std()",
"tBodyAccJerkMag-std()",
"tBodyGyroMag-std()",
"tBodyGyroJerkMag-std()",
"fBodyAcc-std()-X",
"fBodyAcc-std()-Y",
"fBodyAcc-std()-Z",
"fBodyAccJerk-std()-X",
"fBodyAccJerk-std()-Y",
"fBodyAccJerk-std()-Z",
"fBodyGyro-std()-X",
"fBodyGyro-std()-Y",
"fBodyGyro-std()-Z",
"fBodyAccMag-std()",
"fBodyBodyAccJerkMag-std()",
"fBodyBodyGyroMag-std()",
"fBodyBodyGyroJerkMag-std()",
"student","activity")]
# 3.Uses descriptive activity names to name the activities in the data set
dataset1$activity[dataset1$activity==1] <- "WALKING"
dataset1$activity[dataset1$activity==2] <- "WALKING_UPSTAIRS"
dataset1$activity[dataset1$activity==3] <- "WALKING_DOWNSTAIRS"
dataset1$activity[dataset1$activity==4] <- "SITTING"
dataset1$activity[dataset1$activity==5] <- "STANDING"
dataset1$activity[dataset1$activity==6] <- "LAYING"
# 4.Appropriately labels the data set with descriptive variable names.
# done already in step 2 to facilitate subsetting
# 5.Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(reshape2)
datamelt <- melt(dataset1, id=c("student","activity"))
dataset2 <- dcast(datamelt, student + activity ~ variable, mean)
# 6. Export tidy data set to a text file
write.table(dataset2,file="tidydata2.txt",sep=" ")
| /run_analysis.R | no_license | anon1776/Cleaning-data-course-project | R | false | false | 4,027 | r | # 1.Merges the training and the test sets to create one data set.
train_X_train <- read.table("UCI\ HAR\ Dataset\\train\\X_train.txt",header=FALSE)
train_subject_train <- read.table("UCI\ HAR\ Dataset/train/subject_train.txt",header=FALSE)
train_y_train <- read.table("UCI\ HAR\ Dataset/train/y_train.txt",header=FALSE)
train <- cbind(train_X_train,train_subject_train,train_y_train)
test_X_test <- read.table("UCI\ HAR\ Dataset\\test\\X_test.txt",header=FALSE)
test_subject_test <- read.table("UCI\ HAR\ Dataset\\test\\subject_test.txt",header=FALSE)
test_y_test <- read.table("UCI\ HAR\ Dataset/test/y_test.txt",header=FALSE)
test <- cbind(test_X_test,test_subject_test,test_y_test)
combined <- rbind(train,test)
# 2.Extracts only the measurements on the mean and standard deviation for each measurement.
features <- read.table("UCI\ HAR\ Dataset\\features.txt",sep=" ",header=FALSE)
features <- as.character(features[,2])
colnames(combined) <- c(features, "student", "activity")
dataset1 <- combined[,c(
"tBodyAcc-mean()-X",
"tBodyAcc-mean()-Y",
"tBodyAcc-mean()-Z",
"tGravityAcc-mean()-X",
"tGravityAcc-mean()-Y",
"tGravityAcc-mean()-Z",
"tBodyAccJerk-mean()-X",
"tBodyAccJerk-mean()-Y",
"tBodyAccJerk-mean()-Z",
"tBodyGyro-mean()-X",
"tBodyGyro-mean()-Y",
"tBodyGyro-mean()-Z",
"tBodyGyroJerk-mean()-X",
"tBodyGyroJerk-mean()-Y",
"tBodyGyroJerk-mean()-Z",
"tBodyAccMag-mean()",
"tGravityAccMag-mean()",
"tBodyAccJerkMag-mean()",
"tBodyGyroMag-mean()",
"tBodyGyroJerkMag-mean()",
"fBodyAcc-mean()-X",
"fBodyAcc-mean()-Y",
"fBodyAcc-mean()-Z",
"fBodyAcc-meanFreq()-X",
"fBodyAcc-meanFreq()-Y",
"fBodyAcc-meanFreq()-Z",
"fBodyAccJerk-mean()-X",
"fBodyAccJerk-mean()-Y",
"fBodyAccJerk-mean()-Z",
"fBodyAccJerk-meanFreq()-X",
"fBodyAccJerk-meanFreq()-Y",
"fBodyAccJerk-meanFreq()-Z",
"fBodyGyro-mean()-X",
"fBodyGyro-mean()-Y",
"fBodyGyro-mean()-Z",
"fBodyGyro-meanFreq()-X",
"fBodyGyro-meanFreq()-Y",
"fBodyGyro-meanFreq()-Z",
"fBodyAccMag-mean()",
"fBodyAccMag-meanFreq()",
"fBodyBodyAccJerkMag-mean()",
"fBodyBodyAccJerkMag-meanFreq()",
"fBodyBodyGyroMag-mean()",
"fBodyBodyGyroMag-meanFreq()",
"fBodyBodyGyroJerkMag-mean()",
"fBodyBodyGyroJerkMag-meanFreq()",
"tBodyAcc-std()-X",
"tBodyAcc-std()-Y",
"tBodyAcc-std()-Z",
"tGravityAcc-std()-X",
"tGravityAcc-std()-Y",
"tGravityAcc-std()-Z",
"tBodyAccJerk-std()-X",
"tBodyAccJerk-std()-Y",
"tBodyAccJerk-std()-Z",
"tBodyGyro-std()-X",
"tBodyGyro-std()-Y",
"tBodyGyro-std()-Z",
"tBodyGyroJerk-std()-X",
"tBodyGyroJerk-std()-Y",
"tBodyGyroJerk-std()-Z",
"tBodyAccMag-std()",
"tGravityAccMag-std()",
"tBodyAccJerkMag-std()",
"tBodyGyroMag-std()",
"tBodyGyroJerkMag-std()",
"fBodyAcc-std()-X",
"fBodyAcc-std()-Y",
"fBodyAcc-std()-Z",
"fBodyAccJerk-std()-X",
"fBodyAccJerk-std()-Y",
"fBodyAccJerk-std()-Z",
"fBodyGyro-std()-X",
"fBodyGyro-std()-Y",
"fBodyGyro-std()-Z",
"fBodyAccMag-std()",
"fBodyBodyAccJerkMag-std()",
"fBodyBodyGyroMag-std()",
"fBodyBodyGyroJerkMag-std()",
"student","activity")]
# 3.Uses descriptive activity names to name the activities in the data set
dataset1$activity[dataset1$activity==1] <- "WALKING"
dataset1$activity[dataset1$activity==2] <- "WALKING_UPSTAIRS"
dataset1$activity[dataset1$activity==3] <- "WALKING_DOWNSTAIRS"
dataset1$activity[dataset1$activity==4] <- "SITTING"
dataset1$activity[dataset1$activity==5] <- "STANDING"
dataset1$activity[dataset1$activity==6] <- "LAYING"
# 4.Appropriately labels the data set with descriptive variable names.
# done already in step 2 to facilitate subsetting
# 5.Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(reshape2)
datamelt <- melt(dataset1, id=c("student","activity"))
dataset2 <- dcast(datamelt, student + activity ~ variable, mean)
# 6. Export tidy data set to a text file
write.table(dataset2,file="tidydata2.txt",sep=" ")
|
#' Data Generating process for Monte Carlo study
#' Possible to module the non-linearity
#'
#' CREATED: 19 avril 2017
#' EDITED: 24 juillet 2017
#'
#' @param n number of observations wanted
#' @param p is the number of covariates
#' @param delta is the order of the legendre polynomial (should be an integer)
#' @param Ry gives the R-square wanted in outcome equation
#' @param Rd gives the R-square wanted in treatment equation
#' @param rho parametrizes the covariance between the covariates
#' @param a is the treatment effect value
#'
#' @autor Jeremy LHour
NLmatchDGP <- function(n=20,p=2,delta=1,Ry=.5,Rd=.2,rho=.5,a=0){
library("MASS")
library("orthopolynom")
### Covariate variance matrix
Sigma = matrix(0,nrow=p, ncol=p)
for(k in 1:p){
for(j in 1:p){
Sigma[k,j] = rho^abs(k-j)
}
}
### Treatment variable coefficient
gamma = rep(0,p)
for(j in 1:p){
gamma[j] = 1*(-1)^(j) / j^2
}
### Outcome equation coefficients
b = gamma
for(j in 1:p){
b[j] = (-1)^(j+1) / (p-j+1)^2
}
### Adjustment to match R.squared
c = sqrt((1/t(gamma)%*%Sigma%*%gamma)*(Rd/(1-Rd)))
gamma = c*gamma
c = sqrt((1/t(b)%*%Sigma%*%b)*(Ry/(1-Ry)))
b = c*b
X = mvrnorm(n = n, mu=rep(0,p), Sigma)
d = as.numeric(runif(n) < pnorm(X%*%gamma))
### Create outcome function
poly = legendre.polynomials(delta)
polypart = unlist(polynomial.values(poly[delta+1],X%*%b))
y = ifelse(abs(X%*%b)<1,(X%*%b+1)/2,polypart) + a*d + rnorm(n)
return(list(X=X,
y=y,
d=d,
b=b,
g=gamma))
} | /functions/NLmatchDGP.R | no_license | predt/regsynth | R | false | false | 1,601 | r | #' Data Generating process for Monte Carlo study
#' Possible to module the non-linearity
#'
#' CREATED: 19 avril 2017
#' EDITED: 24 juillet 2017
#'
#' @param n number of observations wanted
#' @param p is the number of covariates
#' @param delta is the order of the legendre polynomial (should be an integer)
#' @param Ry gives the R-square wanted in outcome equation
#' @param Rd gives the R-square wanted in treatment equation
#' @param rho parametrizes the covariance between the covariates
#' @param a is the treatment effect value
#'
#' @autor Jeremy LHour
NLmatchDGP <- function(n=20,p=2,delta=1,Ry=.5,Rd=.2,rho=.5,a=0){
library("MASS")
library("orthopolynom")
### Covariate variance matrix
Sigma = matrix(0,nrow=p, ncol=p)
for(k in 1:p){
for(j in 1:p){
Sigma[k,j] = rho^abs(k-j)
}
}
### Treatment variable coefficient
gamma = rep(0,p)
for(j in 1:p){
gamma[j] = 1*(-1)^(j) / j^2
}
### Outcome equation coefficients
b = gamma
for(j in 1:p){
b[j] = (-1)^(j+1) / (p-j+1)^2
}
### Adjustment to match R.squared
c = sqrt((1/t(gamma)%*%Sigma%*%gamma)*(Rd/(1-Rd)))
gamma = c*gamma
c = sqrt((1/t(b)%*%Sigma%*%b)*(Ry/(1-Ry)))
b = c*b
X = mvrnorm(n = n, mu=rep(0,p), Sigma)
d = as.numeric(runif(n) < pnorm(X%*%gamma))
### Create outcome function
poly = legendre.polynomials(delta)
polypart = unlist(polynomial.values(poly[delta+1],X%*%b))
y = ifelse(abs(X%*%b)<1,(X%*%b+1)/2,polypart) + a*d + rnorm(n)
return(list(X=X,
y=y,
d=d,
b=b,
g=gamma))
} |
#'
#' @title Summary function for maximum a posteriori estimate
#'
#' @description Takes as argument a return from a call to \code{\link{map}} and summarises the mean, sd and quantile values.
#'
#' @param object return from a call to \code{\link{map}}
#' @param pars parameters to be summarised
#'
#' @note This function will fail of the \code{draws} argument is not specified in the optimisation i.e. \code{optimizing(..., draws = <n>)}, where \code{<n>} is a large number.
#'
#' @include map.R
#'
#' @method summary map
#'
#' @examples
#' require(rstan)
#'
#' mdl <- "data{ int n; vector[n] x; } parameters{ real mu; real sigma;} model{ x ~ normal(mu, sigma);} generated quantities{ vector[n] x_sim; for (i in 1:n) x_sim[i] = normal_rng(mu, sigma);} \n"
#' mdl <- stan_model(model_code = mdl)
#' n <- 20
#' x <- rnorm(n, 0, 2)
#'
#' mdl.fit <- optimizing(mdl, data = list(n = n, x = x), init = list(mu = 0, sigma = 1), draws = 2000)
#'
#' mdl.map <- map(mdl.fit, pars = c("mu", "sigma", "x_sim"), dims = list("mu" = 0, "sigma" = 0, "x_sim" = n))
#'
#' summary(mdl.map, pars = c("mu", "sigma"))
#'
#' cbind(x = x, x_sim = summary(mdl.map, pars = "x_sim")[[1]][, "mean"])
#'
#' @export
"summary.map" <- function(object, pars) {
if (missing(pars)) {
pars <- names(object$estimate)
}
out <- list()
if (is.null(object$sd)) {
stop("Must specify 'draws' argument in call to optimizing to calculate standard errors and credibility intervals")
} else {
for (x in pars) {
n <- length(object$estimate[[x]])
if (n > 1) {
# par is a vector
out[[x]] <- cbind("mean" = object$estimate[[x]],
"sd" = object$sd[[x]],
"2.5%" = object$quantiles[[x]][2,],
"50%" = object$quantiles[[x]][1,],
"97.5%" = object$quantiles[[x]][3,])
rownames(out[[x]]) <- paste0(x, "[", 1:n, "]")
} else {
# par is a real value
out[[x]] <- c("mean" = object$estimate[[x]],
"sd" = object$sd[[x]],
object$quantiles[[x]][2],
object$quantiles[[x]][1],
object$quantiles[[x]][3])
}
}
}
return(out)
}
| /R/summary.R | no_license | cttedwards/bde | R | false | false | 2,584 | r | #'
#' @title Summary function for maximum a posteriori estimate
#'
#' @description Takes as argument a return from a call to \code{\link{map}} and summarises the mean, sd and quantile values.
#'
#' @param object return from a call to \code{\link{map}}
#' @param pars parameters to be summarised
#'
#' @note This function will fail of the \code{draws} argument is not specified in the optimisation i.e. \code{optimizing(..., draws = <n>)}, where \code{<n>} is a large number.
#'
#' @include map.R
#'
#' @method summary map
#'
#' @examples
#' require(rstan)
#'
#' mdl <- "data{ int n; vector[n] x; } parameters{ real mu; real sigma;} model{ x ~ normal(mu, sigma);} generated quantities{ vector[n] x_sim; for (i in 1:n) x_sim[i] = normal_rng(mu, sigma);} \n"
#' mdl <- stan_model(model_code = mdl)
#' n <- 20
#' x <- rnorm(n, 0, 2)
#'
#' mdl.fit <- optimizing(mdl, data = list(n = n, x = x), init = list(mu = 0, sigma = 1), draws = 2000)
#'
#' mdl.map <- map(mdl.fit, pars = c("mu", "sigma", "x_sim"), dims = list("mu" = 0, "sigma" = 0, "x_sim" = n))
#'
#' summary(mdl.map, pars = c("mu", "sigma"))
#'
#' cbind(x = x, x_sim = summary(mdl.map, pars = "x_sim")[[1]][, "mean"])
#'
#' @export
"summary.map" <- function(object, pars) {
if (missing(pars)) {
pars <- names(object$estimate)
}
out <- list()
if (is.null(object$sd)) {
stop("Must specify 'draws' argument in call to optimizing to calculate standard errors and credibility intervals")
} else {
for (x in pars) {
n <- length(object$estimate[[x]])
if (n > 1) {
# par is a vector
out[[x]] <- cbind("mean" = object$estimate[[x]],
"sd" = object$sd[[x]],
"2.5%" = object$quantiles[[x]][2,],
"50%" = object$quantiles[[x]][1,],
"97.5%" = object$quantiles[[x]][3,])
rownames(out[[x]]) <- paste0(x, "[", 1:n, "]")
} else {
# par is a real value
out[[x]] <- c("mean" = object$estimate[[x]],
"sd" = object$sd[[x]],
object$quantiles[[x]][2],
object$quantiles[[x]][1],
object$quantiles[[x]][3])
}
}
}
return(out)
}
|
######################################################################
# study.R
#
# Brian S Yandell
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License,
# version 3, as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but without any warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose. See the GNU
# General Public License, version 3, for more details.
#
# A copy of the GNU General Public License, version 3, is available
# at http://www.r-project.org/Licenses/GPL-3
#
# Contains: count.thr, exceed.thr,
# get.hotspot, filter.threshold,
# NL.counts, N.WW.counts, mycat
######################################################################
## This function computes the error rates for the NL-, N- and West/Wu methods
## out of nSim simulations. (It also returns the method's thresholds).
## At each simulation iteration this function: (1) generates a null dataset
## cross; (2) perform Haley-Knott mapping for all traits; (3) applies the LOD
## drop interval computation to the scanone object; (4) determine the NL-, N-
## and West/Wu approches thresholds using the scanone results from step 3;
## (5) for each of the three methods it computes the proportion of times out
## of the nSim simulations we detected at least one false hotspot anywhere in
## the genome.
##
count.thr <- function(scan, lod.thrs, droptwo = TRUE)
{
## Count number of traits at or above each value of lod.thrs for each locus.
## Result is n.loci * n.thr matrix.
if(droptwo)
scan <- scan[, -c(1,2), drop = FALSE]
apply(scan, 1, exceed.thr, lod.thrs)
}
exceed.thr <- function(x, y)
{
## Finds out how many in x exceed each value of y.
res <- rep(0, length(y))
for(k in order(y)) {
x <- x[x > y[k]]
if(length(x) == 0)
break
res[k] <- length(x)
}
res
}
#################################################################################
get.hotspot <- function(filenames,
## Following supplied in filenames[1].
n.quant, out.sim)
{
## filenames = list.files(".", paste(prefix, latent.eff, sets, "RData", sep = "."))
## latent.eff = 0, prefix = "Pilot", sets = "[0-9][0-9]*"
## Get stored n.quant value, and out.sim to get alpha.levels and lod.thrs.
load(filenames[1])
nSim <- length(filenames)
n.quant <- n.quant ## Null action to make sure n.quant is valued.
s.quant <- seq(n.quant)
tmp <- dimnames(out.sim$N.thrs)
lod.thrs <- as.numeric(tmp[[1]])
alpha.levels <- as.numeric(tmp[[2]])
## May not have names on rows and columns.
nalpha <- ncol(out.sim$N.thrs)
nlod <- nrow(out.sim$N.thrs)
## outputs count the number of times we detected
## a hotspot using the respective method
outNL <- matrix(0, n.quant, nalpha)
dimnames(outNL) <- list(NULL, alpha.levels)
outN <- outWW <- matrix(0, nlod, nalpha)
dimnames(outN) <- dimnames(outWW) <- list(lod.thrs, alpha.levels)
## we are saving the thresholds of each simulation
thrNL <- array(dim=c(n.quant, nalpha, nSim))
thrN <- thrWW <- array(dim=c(nlod, nalpha, nSim))
dimnames(thrN) <- dimnames(thrWW) <- list(lod.thrs, alpha.levels, NULL)
dimnames(thrNL) <- list(NULL, alpha.levels, NULL)
for(k in 1:nSim) {
mycat(k, TRUE, TRUE)
load(filenames[k])
thrNL[,,k] <- out.sim$NL.thrs
thrN[,,k] <- out.sim$N.thrs
thrWW[,,k] <- out.sim$WW.thrs
outNL <- outNL + out.sim$NL
outN <- outN + out.sim$N.counts
outWW <- outWW + out.sim$WW.counts
}
NL.err <- outNL/nSim
dimnames(NL.err) <- list(as.factor(s.quant), as.factor(alpha.levels))
N.err <- outN / nSim
dimnames(N.err) <- list(as.factor(lod.thrs), as.factor(alpha.levels))
WW.err <- outWW / nSim
dimnames(WW.err) <- list(as.factor(lod.thrs), as.factor(alpha.levels))
list(nSim = nSim, NL.err=NL.err, N.err=N.err, WW.err=WW.err, thrNL=thrNL, thrN=thrN,
thrWW=thrWW)
}
########################################################################################
filter.threshold <- function(cross, pheno.col, latent.eff, res.var,
lod.thrs, drop.lod = 1.5,
s.quant, n.perm, alpha.levels,
qh.thrs = summary(hotperm(cross, max(s.quant), n.perm, alpha.levels,
lod.thrs, verbose = verbose)),
ww.thrs = summary(ww.perm(highobj, n.perm, lod.thrs, alpha.levels)),
addcovar = NULL, intcovar = NULL,
verbose = FALSE, ...)
{
mycat("scanone", verbose)
scanmat <- scanone(cross, pheno.col = pheno.col, method = "hk",
addcovar = addcovar, intcovar = intcovar, ...)
## Reduce to high LOD scores.
mycat("highlod", verbose)
highobj <- highlod(scanmat, min(lod.thrs), drop.lod, restrict.lod = TRUE)
rm(scanmat)
gc()
## computes an array of size n.quant by nalpha by npos.
## showing for each s.quant size and alpha level, the
## hotspot sizes at each genomic location.
mycat("NL.counts", verbose)
NL.thrs <- qh.thrs[[1]]
N.thrs <- qh.thrs[[2]]
n.quant <- length(s.quant)
NL <- NL.counts(highobj, n.quant, NL.thrs)
## computes a matrix of size nlod by npos.
## showing for each lod threshold the
## hotspot sizes at each genomic location.
mycat("N.WW.counts", verbose)
N.WW <- N.WW.counts(highobj, lod.thrs, N.thrs, ww.thrs)
list(NL.thrs = NL.thrs, N.thrs = N.thrs, WW.thrs = ww.thrs, NL = NL,
N.counts = N.WW$N, WW.counts = N.WW$WW)
}
## Computes an array of size n.quant (number of spurious hotspots sizes) by
## nalpha (number of significance levels) by npos (number of locus), and for
## each spurious hotspot size/significance level threshold, it computes the
## number of traits mapping with LOD higher than the threshold at each one
## of the genomic positions.
##
NL.counts <- function(highobj, n.quant, NL.thrs)
{
## get the maximum spurious hotspot size (N-method)
## for different QTL mapping significance levels
XX <- quantile(highobj, n.quant = n.quant)
NL.counts <- apply(NL.thrs, 2,
function(x,y) (x < y[seq(x)]),
XX)
## dimnames(NL.counts)[[2]] <- seq(n.quant)
NL.counts
}
## Computes a matrix of size nlod (number of mapping thresholds) by npos
## (number of locus), and for each LOD threshold, it computes the number
## of traits mapping with LOD higher than the threshold at each one of
## the genomic positions. The same counts are used by the N- and WW-methods.
##
N.WW.counts <- function(highobj, lod.thrs, N.thrs, WW.thrs)
{
## XX = genome position by number of traits above LOD threshold.
XX <- max(highobj, lod.thr = lod.thrs)
## N.counts[lod,alpha] = TRUE if max hotspot size using lod is above alpha perm threshold.
N.counts <- apply(N.thrs, 2, function(x,y) (x < y), XX)
## WW.counts[lod,alpha] = TRUE if max hotspot size using lod is above alpha perm threshold.
WW.counts <- apply(WW.thrs, 2, function(x,y) (x < y), XX)
dimnames(N.counts) <- dimnames(WW.counts) <- dimnames(N.thrs)
list(N = N.counts, WW = WW.counts)
}
mycat <- function(title, verbose = FALSE, init = FALSE, last = "\n")
{
if(verbose) {
if(verbose > 1) {
if(init)
cat("user system elapsed time\n")
else
cat(round(as.numeric(proc.time()[1:3])), "")
}
cat(title, last)
}
}
| /qtlhot/R/study.R | no_license | ingted/R-Examples | R | false | false | 7,784 | r | ######################################################################
# study.R
#
# Brian S Yandell
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License,
# version 3, as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but without any warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose. See the GNU
# General Public License, version 3, for more details.
#
# A copy of the GNU General Public License, version 3, is available
# at http://www.r-project.org/Licenses/GPL-3
#
# Contains: count.thr, exceed.thr,
# get.hotspot, filter.threshold,
# NL.counts, N.WW.counts, mycat
######################################################################
## This function computes the error rates for the NL-, N- and West/Wu methods
## out of nSim simulations. (It also returns the method's thresholds).
## At each simulation iteration this function: (1) generates a null dataset
## cross; (2) perform Haley-Knott mapping for all traits; (3) applies the LOD
## drop interval computation to the scanone object; (4) determine the NL-, N-
## and West/Wu approches thresholds using the scanone results from step 3;
## (5) for each of the three methods it computes the proportion of times out
## of the nSim simulations we detected at least one false hotspot anywhere in
## the genome.
##
count.thr <- function(scan, lod.thrs, droptwo = TRUE)
{
## Count number of traits at or above each value of lod.thrs for each locus.
## Result is n.loci * n.thr matrix.
if(droptwo)
scan <- scan[, -c(1,2), drop = FALSE]
apply(scan, 1, exceed.thr, lod.thrs)
}
exceed.thr <- function(x, y)
{
## Finds out how many in x exceed each value of y.
res <- rep(0, length(y))
for(k in order(y)) {
x <- x[x > y[k]]
if(length(x) == 0)
break
res[k] <- length(x)
}
res
}
#################################################################################
get.hotspot <- function(filenames,
## Following supplied in filenames[1].
n.quant, out.sim)
{
## filenames = list.files(".", paste(prefix, latent.eff, sets, "RData", sep = "."))
## latent.eff = 0, prefix = "Pilot", sets = "[0-9][0-9]*"
## Get stored n.quant value, and out.sim to get alpha.levels and lod.thrs.
load(filenames[1])
nSim <- length(filenames)
n.quant <- n.quant ## Null action to make sure n.quant is valued.
s.quant <- seq(n.quant)
tmp <- dimnames(out.sim$N.thrs)
lod.thrs <- as.numeric(tmp[[1]])
alpha.levels <- as.numeric(tmp[[2]])
## May not have names on rows and columns.
nalpha <- ncol(out.sim$N.thrs)
nlod <- nrow(out.sim$N.thrs)
## outputs count the number of times we detected
## a hotspot using the respective method
outNL <- matrix(0, n.quant, nalpha)
dimnames(outNL) <- list(NULL, alpha.levels)
outN <- outWW <- matrix(0, nlod, nalpha)
dimnames(outN) <- dimnames(outWW) <- list(lod.thrs, alpha.levels)
## we are saving the thresholds of each simulation
thrNL <- array(dim=c(n.quant, nalpha, nSim))
thrN <- thrWW <- array(dim=c(nlod, nalpha, nSim))
dimnames(thrN) <- dimnames(thrWW) <- list(lod.thrs, alpha.levels, NULL)
dimnames(thrNL) <- list(NULL, alpha.levels, NULL)
for(k in 1:nSim) {
mycat(k, TRUE, TRUE)
load(filenames[k])
thrNL[,,k] <- out.sim$NL.thrs
thrN[,,k] <- out.sim$N.thrs
thrWW[,,k] <- out.sim$WW.thrs
outNL <- outNL + out.sim$NL
outN <- outN + out.sim$N.counts
outWW <- outWW + out.sim$WW.counts
}
NL.err <- outNL/nSim
dimnames(NL.err) <- list(as.factor(s.quant), as.factor(alpha.levels))
N.err <- outN / nSim
dimnames(N.err) <- list(as.factor(lod.thrs), as.factor(alpha.levels))
WW.err <- outWW / nSim
dimnames(WW.err) <- list(as.factor(lod.thrs), as.factor(alpha.levels))
list(nSim = nSim, NL.err=NL.err, N.err=N.err, WW.err=WW.err, thrNL=thrNL, thrN=thrN,
thrWW=thrWW)
}
########################################################################################
filter.threshold <- function(cross, pheno.col, latent.eff, res.var,
lod.thrs, drop.lod = 1.5,
s.quant, n.perm, alpha.levels,
qh.thrs = summary(hotperm(cross, max(s.quant), n.perm, alpha.levels,
lod.thrs, verbose = verbose)),
ww.thrs = summary(ww.perm(highobj, n.perm, lod.thrs, alpha.levels)),
addcovar = NULL, intcovar = NULL,
verbose = FALSE, ...)
{
mycat("scanone", verbose)
scanmat <- scanone(cross, pheno.col = pheno.col, method = "hk",
addcovar = addcovar, intcovar = intcovar, ...)
## Reduce to high LOD scores.
mycat("highlod", verbose)
highobj <- highlod(scanmat, min(lod.thrs), drop.lod, restrict.lod = TRUE)
rm(scanmat)
gc()
## computes an array of size n.quant by nalpha by npos.
## showing for each s.quant size and alpha level, the
## hotspot sizes at each genomic location.
mycat("NL.counts", verbose)
NL.thrs <- qh.thrs[[1]]
N.thrs <- qh.thrs[[2]]
n.quant <- length(s.quant)
NL <- NL.counts(highobj, n.quant, NL.thrs)
## computes a matrix of size nlod by npos.
## showing for each lod threshold the
## hotspot sizes at each genomic location.
mycat("N.WW.counts", verbose)
N.WW <- N.WW.counts(highobj, lod.thrs, N.thrs, ww.thrs)
list(NL.thrs = NL.thrs, N.thrs = N.thrs, WW.thrs = ww.thrs, NL = NL,
N.counts = N.WW$N, WW.counts = N.WW$WW)
}
## Computes an array of size n.quant (number of spurious hotspots sizes) by
## nalpha (number of significance levels) by npos (number of locus), and for
## each spurious hotspot size/significance level threshold, it computes the
## number of traits mapping with LOD higher than the threshold at each one
## of the genomic positions.
##
NL.counts <- function(highobj, n.quant, NL.thrs)
{
## get the maximum spurious hotspot size (N-method)
## for different QTL mapping significance levels
XX <- quantile(highobj, n.quant = n.quant)
NL.counts <- apply(NL.thrs, 2,
function(x,y) (x < y[seq(x)]),
XX)
## dimnames(NL.counts)[[2]] <- seq(n.quant)
NL.counts
}
## Computes a matrix of size nlod (number of mapping thresholds) by npos
## (number of locus), and for each LOD threshold, it computes the number
## of traits mapping with LOD higher than the threshold at each one of
## the genomic positions. The same counts are used by the N- and WW-methods.
##
N.WW.counts <- function(highobj, lod.thrs, N.thrs, WW.thrs)
{
## XX = genome position by number of traits above LOD threshold.
XX <- max(highobj, lod.thr = lod.thrs)
## N.counts[lod,alpha] = TRUE if max hotspot size using lod is above alpha perm threshold.
N.counts <- apply(N.thrs, 2, function(x,y) (x < y), XX)
## WW.counts[lod,alpha] = TRUE if max hotspot size using lod is above alpha perm threshold.
WW.counts <- apply(WW.thrs, 2, function(x,y) (x < y), XX)
dimnames(N.counts) <- dimnames(WW.counts) <- dimnames(N.thrs)
list(N = N.counts, WW = WW.counts)
}
mycat <- function(title, verbose = FALSE, init = FALSE, last = "\n")
{
if(verbose) {
if(verbose > 1) {
if(init)
cat("user system elapsed time\n")
else
cat(round(as.numeric(proc.time()[1:3])), "")
}
cat(title, last)
}
}
|
#' Compare two phylogenetic trees
#'
#' @param phylo1 A multiphylo object with two trees to be compared between them or a single phylo object to be compared to phylo2
#' @param phylo2 A phylo object to be compared to phylo1
#' @return A named list with are_same_tree property set to either TRUE or FALSE
#' @examples
#' are_same <- phylo_compare(c(ape::rcoal(5), ape::rcoal(5)))
#' @seealso \url{https://github.com/phylotastic/phylo_services_docs/tree/master/ServiceDescription}
# phylo_compare relied upon a server which is now offline. The code is commented out below.
# phylo_compare <- function(phylo1, phylo2 = NULL) {
# url <- paste0(get_base_url(), "md/dp/compare_trees")
# if(is.list(phylo1) & length(phylo1) == 2){
# trees <- phylo1
# } else {
# if(inherits(phylo1, "phylo") & inherits(phylo2, "phylo")){
# trees <- list(phylo1, phylo2)
# # class(trees) <- "multiPhylo" # this is unnecessary
# }
# }
# if(length(trees)!=2) {
# stop("Must have two trees as input")
# }
# body <- list(tree1_nwk = ape::write.tree(trees[[1]]), tree2_nwk = ape::write.tree(trees[[2]]))
# response <- httr::POST(url, body = body, encode = "json")
# result <- httr::content(response,"parsed")
# return(result$are_same_tree)
# }
| /R/comparetrees.R | no_license | phylotastic/rphylotastic | R | false | false | 1,268 | r | #' Compare two phylogenetic trees
#'
#' @param phylo1 A multiphylo object with two trees to be compared between them or a single phylo object to be compared to phylo2
#' @param phylo2 A phylo object to be compared to phylo1
#' @return A named list with are_same_tree property set to either TRUE or FALSE
#' @examples
#' are_same <- phylo_compare(c(ape::rcoal(5), ape::rcoal(5)))
#' @seealso \url{https://github.com/phylotastic/phylo_services_docs/tree/master/ServiceDescription}
# phylo_compare relied upon a server which is now offline. The code is commented out below.
# phylo_compare <- function(phylo1, phylo2 = NULL) {
# url <- paste0(get_base_url(), "md/dp/compare_trees")
# if(is.list(phylo1) & length(phylo1) == 2){
# trees <- phylo1
# } else {
# if(inherits(phylo1, "phylo") & inherits(phylo2, "phylo")){
# trees <- list(phylo1, phylo2)
# # class(trees) <- "multiPhylo" # this is unnecessary
# }
# }
# if(length(trees)!=2) {
# stop("Must have two trees as input")
# }
# body <- list(tree1_nwk = ape::write.tree(trees[[1]]), tree2_nwk = ape::write.tree(trees[[2]]))
# response <- httr::POST(url, body = body, encode = "json")
# result <- httr::content(response,"parsed")
# return(result$are_same_tree)
# }
|
library(dashBootstrapComponents)
library(dashHtmlComponents)
card_content_1 <- list(
dbcCardHeader("Card header"),
dbcCardBody(
list(
htmlH5("Card title", className = "card-title"),
htmlP(
"This is some card content that we'll reuse",
className = "card-text"
)
)
)
)
card_content_2 <- dbcCardBody(
list(
htmlBlockquote(
list(
htmlP(
"A learning experience is one of those things that says,
'You know that thing you just did? Don't do that.'"
),
htmlFooter(
htmlSmall("Douglas Adams", className = "text-muted")
)
),
className = "blockquote"
)
)
)
card_content_3 <- list(
dbcCardImg(src = "/static/images/placeholder286x180.png", top = TRUE),
dbcCardBody(
list(
htmlH5("Card with image", className = "card-title"),
htmlP("This card has an image on top, and a button below",
className = "card-text",
),
dbcButton("Click me!", color = "primary")
)
)
)
cards <- dbcCardColumns(
list(
dbcCard(card_content_1, color = "primary", inverse = TRUE),
dbcCard(card_content_2, body = TRUE),
dbcCard(card_content_1, color = "secondary", inverse = TRUE),
dbcCard(card_content_3, color = "info", inverse = TRUE),
dbcCard(card_content_1, color = "success", inverse = TRUE),
dbcCard(card_content_1, color = "warning", inverse = TRUE),
dbcCard(card_content_1, color = "danger", inverse = TRUE),
dbcCard(card_content_3, color = "light"),
dbcCard(card_content_1, color = "dark", inverse = TRUE)
)
)
| /docs/components_page/components/card/layout/columns.R | permissive | goranstojkoski/dash-bootstrap-components | R | false | false | 1,617 | r | library(dashBootstrapComponents)
library(dashHtmlComponents)
card_content_1 <- list(
dbcCardHeader("Card header"),
dbcCardBody(
list(
htmlH5("Card title", className = "card-title"),
htmlP(
"This is some card content that we'll reuse",
className = "card-text"
)
)
)
)
card_content_2 <- dbcCardBody(
list(
htmlBlockquote(
list(
htmlP(
"A learning experience is one of those things that says,
'You know that thing you just did? Don't do that.'"
),
htmlFooter(
htmlSmall("Douglas Adams", className = "text-muted")
)
),
className = "blockquote"
)
)
)
card_content_3 <- list(
dbcCardImg(src = "/static/images/placeholder286x180.png", top = TRUE),
dbcCardBody(
list(
htmlH5("Card with image", className = "card-title"),
htmlP("This card has an image on top, and a button below",
className = "card-text",
),
dbcButton("Click me!", color = "primary")
)
)
)
cards <- dbcCardColumns(
list(
dbcCard(card_content_1, color = "primary", inverse = TRUE),
dbcCard(card_content_2, body = TRUE),
dbcCard(card_content_1, color = "secondary", inverse = TRUE),
dbcCard(card_content_3, color = "info", inverse = TRUE),
dbcCard(card_content_1, color = "success", inverse = TRUE),
dbcCard(card_content_1, color = "warning", inverse = TRUE),
dbcCard(card_content_1, color = "danger", inverse = TRUE),
dbcCard(card_content_3, color = "light"),
dbcCard(card_content_1, color = "dark", inverse = TRUE)
)
)
|
\name{ensemble}
\alias{relation_ensemble}
\alias{as.relation_ensemble}
\alias{is.relation_ensemble}
\title{Relation Ensembles}
\description{Creation and manipulation of relation ensembles.}
\usage{
relation_ensemble(..., list = NULL)
as.relation_ensemble(x)
is.relation_ensemble(x)
}
\arguments{
\item{\dots}{\R objects representing relations, or coercible to such.}
\item{list}{a list of \R objects as in \code{\dots}.}
\item{x}{for coercion with \code{as.relation_ensemble()}, an \R object
as in \code{\dots}; for testing with \code{is.relation_ensemble()},
an arbitrary \R object.}
}
\details{
\code{relation_ensemble()} creates non-empty \dQuote{relation
ensembles}, i.e., collections of relations \eqn{R_i = (D, G_i)} with
the same domain \eqn{D} and possibly different graphs \eqn{G_i}.
Such ensembles are implemented as suitably classed lists of relation
objects, making it possible to use \code{lapply()} for computations on
the individual relations in the ensemble. Available methods for
relation ensembles include those for subscripting, \code{c()},
\code{t()}, \code{rep()}, and \code{print()}.
}
\examples{
data("Cetacea")
## Consider each variable an equivalence relation on the objects.
## Note that 2 variables (LACHRYMAL_AND_JUGAL_BONES and HEAD_BONES) have
## missing values, and hence are excluded.
ind <- sapply(Cetacea, function(s) all(!is.na(s)))
relations <- as.relation_ensemble(Cetacea[, ind])
## This gives a relation ensemble of length 14 (number of complete
## variables in the data set).
print(relations)
## Are there any duplicated relations?
any(duplicated(relations))
## Replicate and combine ...
thrice <- c(rep(relations, 2), relations)
## Extract unique elements again:
all.equal(unique(thrice), relations)
## Note that unique() does not preserve attributes, and hence names.
## In case we want otherwise:
all.equal(thrice[!duplicated(thrice)], relations)
## Subscripting:
relation_dissimilarity(relations[1 : 2], relations["CLASS"])
## Which relation is "closest" to the classification?
d <- relation_dissimilarity(relations)
sort(as.matrix(d)[, "CLASS"])[-1]
}
\keyword{math}
| /man/ensemble.Rd | no_license | cran/relations | R | false | false | 2,146 | rd | \name{ensemble}
\alias{relation_ensemble}
\alias{as.relation_ensemble}
\alias{is.relation_ensemble}
\title{Relation Ensembles}
\description{Creation and manipulation of relation ensembles.}
\usage{
relation_ensemble(..., list = NULL)
as.relation_ensemble(x)
is.relation_ensemble(x)
}
\arguments{
\item{\dots}{\R objects representing relations, or coercible to such.}
\item{list}{a list of \R objects as in \code{\dots}.}
\item{x}{for coercion with \code{as.relation_ensemble()}, an \R object
as in \code{\dots}; for testing with \code{is.relation_ensemble()},
an arbitrary \R object.}
}
\details{
\code{relation_ensemble()} creates non-empty \dQuote{relation
ensembles}, i.e., collections of relations \eqn{R_i = (D, G_i)} with
the same domain \eqn{D} and possibly different graphs \eqn{G_i}.
Such ensembles are implemented as suitably classed lists of relation
objects, making it possible to use \code{lapply()} for computations on
the individual relations in the ensemble. Available methods for
relation ensembles include those for subscripting, \code{c()},
\code{t()}, \code{rep()}, and \code{print()}.
}
\examples{
data("Cetacea")
## Consider each variable an equivalence relation on the objects.
## Note that 2 variables (LACHRYMAL_AND_JUGAL_BONES and HEAD_BONES) have
## missing values, and hence are excluded.
ind <- sapply(Cetacea, function(s) all(!is.na(s)))
relations <- as.relation_ensemble(Cetacea[, ind])
## This gives a relation ensemble of length 14 (number of complete
## variables in the data set).
print(relations)
## Are there any duplicated relations?
any(duplicated(relations))
## Replicate and combine ...
thrice <- c(rep(relations, 2), relations)
## Extract unique elements again:
all.equal(unique(thrice), relations)
## Note that unique() does not preserve attributes, and hence names.
## In case we want otherwise:
all.equal(thrice[!duplicated(thrice)], relations)
## Subscripting:
relation_dissimilarity(relations[1 : 2], relations["CLASS"])
## Which relation is "closest" to the classification?
d <- relation_dissimilarity(relations)
sort(as.matrix(d)[, "CLASS"])[-1]
}
\keyword{math}
|
#-------------------Q1---------------------
#We want to explore the tissue_gene_expression predictors by plotting them.
data("tissue_gene_expression")
dim(tissue_gene_expression$x)
#We want to get an idea of which observations are close to each other, but, as you can see from the
#dimensions, the predictors are 500-dimensional, making plotting difficult. Plot the first two principal
#components with color representing tissue type.
#Which tissue is in a cluster by itself?
x<- tissue_gene_expression$x
pc <- prcomp(x)
dat <- as.data.frame(pca$x) %>% select(PC1,PC2)
dat %>% mutate(tissue = tissue_gene_expression$y) %>% ggplot(aes(x=PC1, y=PC2,color=tissue)) + geom_point()
#-------------------Q2---------------------
#The predictors for each observation are measured using the same device and experimental procedure.
#This introduces biases that can affect all the predictors from one observation. For each observation,
#compute the average across all predictors, and then plot this against the first PC with color
#representing tissue. Report the correlation.
#What is the correlation?
avgs <- rowMeans(tissue_gene_expression$x)
data.frame(pc_1 = pc$x[,1], avg = avgs,
tissue = tissue_gene_expression$y) %>%
ggplot(aes(avgs, pc_1, color = tissue)) +
geom_point()
cor(avgs, pc$x[,1])
#-------------------Q3---------------------
#We see an association with the first PC and the observation averages. Redo the PCA but only after
#removing the center. Part of the code is provided for you.
means <- rowMeans(x)
x <- with(tissue_gene_expression, sweep(x, 1, means)) #remember that sweep has FUN="-" as default
pc <- prcomp(x)
data.frame(pc_1 = pc$x[,1], pc_2 = pc$x[,2],
tissue = tissue_gene_expression$y) %>%
ggplot(aes(pc_1, pc_2, color = tissue)) +
geom_point()
##This is an example to understand the use of sweep
mat <- matrix(seq(from=2,to=18,by=2),3,3)
sweep(mat, 1, rowMeans(mat))
sweep(mat, 1, mean(mat))
#-------------------Q4---------------------
#For the 7th PC, which two tissues have the second greatest median difference?
data.frame(pc_7=pc$x[,7], tissue=tissue_gene_expression$y) %>%
ggplot(aes(tissue,pc_7)) + geom_boxplot()
for(i in 1:10){
boxplot(pc$x[,i] ~ tissue_gene_expression$y, main = paste("PC", i))
}
#-------------------Q5---------------------
#Plot the percent variance explained by PC number. Hint: use the summary function.
#How many PCs are required to reach a cumulative percent variance explained greater than 50%?
#A learner's answer
importance_df <- data.frame(summary(pc)$importance)
importance_df <- importance_df[2,] %>%
gather(key = pc, value = importance)
importance_df <- importance_df %>% mutate(pc_index = as.integer(str_remove(importance_df$pc, "PC")))
importance_df$pc <- factor(importance_df$pc, levels = importance_df$pc[order(importance_df$pc_index)])
importance_df <- importance_df %>% mutate(cum_sum = cumsum(importance))
importance_df %>%
filter(pc_index < 20) %>%
arrange(pc_index, cum_sum) %>%
ggplot(aes(x = pc, y = cum_sum, fill=pc)) +
geom_col() +
scale_y_continuous(breaks = seq(0,1,0.1)) +
theme_grey()
#The staff's answer
plot(summary(pc)$importance[3,])
| /8. Machine learning/Comprehension checks/11.2 Dimension reduction MNIST.R | no_license | aquijanoruiz/Harvardx_data_science | R | false | false | 3,197 | r | #-------------------Q1---------------------
#We want to explore the tissue_gene_expression predictors by plotting them.
data("tissue_gene_expression")
dim(tissue_gene_expression$x)
#We want to get an idea of which observations are close to each other, but, as you can see from the
#dimensions, the predictors are 500-dimensional, making plotting difficult. Plot the first two principal
#components with color representing tissue type.
#Which tissue is in a cluster by itself?
x<- tissue_gene_expression$x
pc <- prcomp(x)
dat <- as.data.frame(pca$x) %>% select(PC1,PC2)
dat %>% mutate(tissue = tissue_gene_expression$y) %>% ggplot(aes(x=PC1, y=PC2,color=tissue)) + geom_point()
#-------------------Q2---------------------
#The predictors for each observation are measured using the same device and experimental procedure.
#This introduces biases that can affect all the predictors from one observation. For each observation,
#compute the average across all predictors, and then plot this against the first PC with color
#representing tissue. Report the correlation.
#What is the correlation?
avgs <- rowMeans(tissue_gene_expression$x)
data.frame(pc_1 = pc$x[,1], avg = avgs,
tissue = tissue_gene_expression$y) %>%
ggplot(aes(avgs, pc_1, color = tissue)) +
geom_point()
cor(avgs, pc$x[,1])
#-------------------Q3---------------------
#We see an association with the first PC and the observation averages. Redo the PCA but only after
#removing the center. Part of the code is provided for you.
means <- rowMeans(x)
x <- with(tissue_gene_expression, sweep(x, 1, means)) #remember that sweep has FUN="-" as default
pc <- prcomp(x)
data.frame(pc_1 = pc$x[,1], pc_2 = pc$x[,2],
tissue = tissue_gene_expression$y) %>%
ggplot(aes(pc_1, pc_2, color = tissue)) +
geom_point()
##This is an example to understand the use of sweep
mat <- matrix(seq(from=2,to=18,by=2),3,3)
sweep(mat, 1, rowMeans(mat))
sweep(mat, 1, mean(mat))
#-------------------Q4---------------------
#For the 7th PC, which two tissues have the second greatest median difference?
data.frame(pc_7=pc$x[,7], tissue=tissue_gene_expression$y) %>%
ggplot(aes(tissue,pc_7)) + geom_boxplot()
for(i in 1:10){
boxplot(pc$x[,i] ~ tissue_gene_expression$y, main = paste("PC", i))
}
#-------------------Q5---------------------
#Plot the percent variance explained by PC number. Hint: use the summary function.
#How many PCs are required to reach a cumulative percent variance explained greater than 50%?
#A learner's answer
importance_df <- data.frame(summary(pc)$importance)
importance_df <- importance_df[2,] %>%
gather(key = pc, value = importance)
importance_df <- importance_df %>% mutate(pc_index = as.integer(str_remove(importance_df$pc, "PC")))
importance_df$pc <- factor(importance_df$pc, levels = importance_df$pc[order(importance_df$pc_index)])
importance_df <- importance_df %>% mutate(cum_sum = cumsum(importance))
importance_df %>%
filter(pc_index < 20) %>%
arrange(pc_index, cum_sum) %>%
ggplot(aes(x = pc, y = cum_sum, fill=pc)) +
geom_col() +
scale_y_continuous(breaks = seq(0,1,0.1)) +
theme_grey()
#The staff's answer
plot(summary(pc)$importance[3,])
|
#' @title Restriction enzyme recognition sites in spacer sequences
#'
#' @description Add restriction site enzymes annotation.
#'
#' @param object A \linkS4class{GuideSet} or a
#' \linkS4class{PairedGuideSet} object.
#' @param enzymeNames Character vector of enzyme names.
#' @param patterns Optional named character vector for custom restriction site
#' patterns. Vector names are treated as enzymes names. See example.
#' @param includeDefault Should commonly-used enzymes be included?
#' TRUE by default.
#' @param flanking5,flanking3 Character string indicating the 5' or 3' flanking
#' sequence, respectively, of the spacer sequence in the lentivial vector.
#' @param ... Additional arguments, currently ignored.
#'
#' @return Adds a DataFrame indicating
#' whether cutting sites for the specified enzymes are found in the gRNA
#' cassette (flanking sequences + spacer sequences).
#'
#' @details Restriction enzymes are often used for cloning purpose during the
#' oligonucleotide synthesis of gRNA lentiviral constructs. Consequently,
#' it is often necessary to avoid restriction sites of the used restriction
#' enzymes in and around the spacer sequences.
#' \code{addRestrictionEnzymes} allows for
#' flagging problematic spacer sequences by searching for restriction sites
#' in the [flanking5][spacer][flanking3] sequence.
#'
#' The following enzymes are included when \code{includeDefault=TRUE}:
#' EcoRI, KpnI, BsmBI, BsaI, BbsI, PacI, and MluI.
#'
#' Custom recognition sequences in \code{patterns} may use the IUPAC
#' nucleotide code, excluding symbols indicating gaps. Avoid providing
#' enzyme names in \code{patterns} that are already included by default (if
#' \code{includeDefault=TRUE}) or given by \code{enzymeNames}. Patterns
#' with duplicated enzyme names will be silently ignored, even if the
#' recognition sequence differs. See example.
#'
#'
#' @author Jean-Philippe Fortin, Luke Hoberecht
#'
#' @seealso \code{\link{enzymeAnnotation}} to retrieve existing enzyme
#' annotation from a \linkS4class{GuideSet} object.
#'
#' @examples
#' data(SpCas9, package="crisprBase")
#' seq <- c("ATTTCCGGAGGCGAATTCGGCGGGAGGAGGAAGACCGG")
#' guideSet <- findSpacers(seq, crisprNuclease=SpCas9)
#'
#' # Using default enzymes:
#' guideSet <- addRestrictionEnzymes(guideSet)
#'
#' # Using custom enzymes:
#' guideSet <- addRestrictionEnzymes(guideSet,
#' patterns=c(enz1="GGTCCAA",
#' enz2="GGTCG"))
#'
#' # Avoid duplicate enzyme names
#' guideSet <- addRestrictionEnzymes(guideSet,
#' patterns=c(EcoRI="GANNTC")) # ignored
#'
#' @export
#' @rdname addRestrictionEnzymes
#' @importFrom S4Vectors split mcols<-
setMethod("addRestrictionEnzymes", "GuideSet", function(object,
enzymeNames=NULL,
patterns=NULL,
includeDefault=TRUE,
flanking5="ACCG",
flanking3="GTTT"
){
enzymeAnnotation <- getRestrictionEnzymes(object,
enzymeNames=enzymeNames,
patterns=patterns,
includeDefault=includeDefault,
flanking5=flanking5,
flanking3=flanking3)
dfs <- S4Vectors::split(enzymeAnnotation,
f=factor(rownames(enzymeAnnotation),
levels=names(object)))
S4Vectors::mcols(object)[["enzymeAnnotation"]] <- dfs
return(object)
})
#' @export
#' @rdname addRestrictionEnzymes
setMethod("addRestrictionEnzymes",
"PairedGuideSet", function(object,
enzymeNames=NULL,
patterns=NULL,
includeDefault=TRUE,
flanking5="ACCG",
flanking3="GTTT"
){
object <- .validatePairedGuideSet(object)
unifiedGuideSet <- .pairedGuideSet2GuideSet(object)
unifiedGuideSet <- addRestrictionEnzymes(unifiedGuideSet,
enzymeNames=enzymeNames,
patterns=patterns,
includeDefault=includeDefault,
flanking5=flanking5,
flanking3=flanking3)
out <- .addColumnsFromUnifiedGuideSet(object,
unifiedGuideSet)
return(out)
})
#' @rdname addRestrictionEnzymes
#' @export
setMethod("addRestrictionEnzymes", "NULL", function(object){
return(NULL)
})
# Core engine to get restriction enzymes annotation
#' @importFrom S4Vectors DataFrame
getRestrictionEnzymes <- function(guideSet,
enzymeNames=NULL,
patterns=NULL,
includeDefault=TRUE,
flanking5="ACCG",
flanking3="GTTT"
){
guideSet <- .validateGuideSet(guideSet)
enzymeMotifs <- .enzymeMotifs(includeDefault=includeDefault,
enzymeNames=enzymeNames,
patterns=patterns)
spacers <- .spacersWithFlankingRegions(guideSet=guideSet,
flanking5=flanking5,
flanking3=flanking3)
enzymeAnnotation <- lapply(enzymeMotifs,
grepl,
x=spacers)
enzymeAnnotation <- DataFrame(enzymeAnnotation,
row.names=names(guideSet))
return(enzymeAnnotation)
}
# Generate a final list of enzyme motifs from different user inputs
.enzymeMotifs <- function(includeDefault,
enzymeNames,
patterns
){
stopifnot("includeDefault must be TRUE or FALSE" = {
includeDefault %in% c(TRUE, FALSE) && length(includeDefault) == 1
})
if (includeDefault){
enzymeNames <- unique(c(.defaultEnzymeNames, enzymeNames))
}
motifs <- .getEnzymeMotifs(enzymeNames)
motifs <- .addCustomEnzymes(motifs, patterns)
stopifnot("no restriction enzymes found" = {
length(motifs) > 0
})
motifs <- vapply(motifs, .enzymeMotif2RegexPattern, FUN.VALUE=character(1))
return(motifs)
}
.defaultEnzymeNames <- c("EcoRI", "KpnI", "BsmBI",
"BsaI", "BbsI", "PacI","MluI")
# Get enzyme motifs from a vector of enzyme names
#' @importFrom crisprBase motifs
.getEnzymeMotifs <- function(enzymeNames){
data("restrictionEnzymes",
package="crisprBase",
envir=environment())
.checkEnzymeNames(enzymeNames, restrictionEnzymes)
motifs <- vapply(enzymeNames, function(x){
crisprBase::motifs(restrictionEnzymes[[x]],
as.character=TRUE)
}, FUN.VALUE=character(1))
return(motifs)
}
# Make sure the enzyme names are available in a pre-calculated set of
# restriction enzymes
.checkEnzymeNames <- function(enzymeNames,
restrictionEnzymes){
if (length(enzymeNames) > 0){
stopifnot("enzymeNames must be a character vector" = {
is.vector(enzymeNames, mode="character")
})
badNames <- setdiff(enzymeNames, names(restrictionEnzymes))
if (length(badNames) > 0){
stop("restriction enzyme name(s) not found: ",
paste(badNames, collapse=', '))
}
}
invisible(NULL)
}
.addCustomEnzymes <- function(motifs,
patterns
){
if (!is.null(patterns)){
patterns <- .validateCustomEnzymes(patterns)
patterns <- as.list(patterns)
patterns <- patterns[setdiff(names(patterns), names(motifs))]
motifs <- c(motifs, patterns)
}
return(motifs)
}
.validateCustomEnzymes <- function(patterns
){
stopifnot("patterns must be a character vector" = {
is.vector(patterns, mode="character")
})
stopifnot("patterns vector must have names" = {
!is.null(names(patterns)) &&
!any(c(NA, "") %in% names(patterns))
})
stopifnot("patterns vector must have unique names" = {
all.equal(names(patterns), unique(names(patterns)))
})
patterns <- .validateDNACharacterVariable(patterns,
"patterns",
exactBases=FALSE)
return(patterns)
}
# Transform string to a regex motif for sequence search
.enzymeMotif2RegexPattern <- function(motif
){
revMotif <- .revCompBs(motif)
pattern <- c(.iupacCode2RegexPattern(motif),
.iupacCode2RegexPattern(revMotif))
pattern <- unique(pattern)
pattern <- paste0(pattern, collapse="|")
return(pattern)
}
#' @importFrom Biostrings DNA_BASES IUPAC_CODE_MAP
.iupacCode2RegexPattern <- function(seq
){
seqBases <- strsplit(seq, '')[[1]]
patternBases <- vapply(seqBases, function(x){
if (!x %in% Biostrings::DNA_BASES){
x <- paste0("[", Biostrings::IUPAC_CODE_MAP[x], "]")
}
x
}, FUN.VALUE=character(1))
pattern <- paste0(patternBases, collapse="")
return(pattern)
}
# Add flanking sequences to spacer sequences
.spacersWithFlankingRegions <- function(guideSet,
flanking5,
flanking3
){
spacers <- spacers(guideSet,
as.character=TRUE)
flanking5 <- .validateDNACharacterVariable(flanking5, "flanking5", len=1)
flanking3 <- .validateDNACharacterVariable(flanking3, "flanking3", len=1)
spacers <- paste0(flanking5, spacers, flanking3, recycle0=TRUE)
names(spacers) <- names(guideSet)
return(spacers)
}
| /R/addRestrictionEnzymes.R | permissive | crisprVerse/crisprDesign | R | false | false | 10,355 | r | #' @title Restriction enzyme recognition sites in spacer sequences
#'
#' @description Add restriction site enzymes annotation.
#'
#' @param object A \linkS4class{GuideSet} or a
#' \linkS4class{PairedGuideSet} object.
#' @param enzymeNames Character vector of enzyme names.
#' @param patterns Optional named character vector for custom restriction site
#' patterns. Vector names are treated as enzymes names. See example.
#' @param includeDefault Should commonly-used enzymes be included?
#' TRUE by default.
#' @param flanking5,flanking3 Character string indicating the 5' or 3' flanking
#' sequence, respectively, of the spacer sequence in the lentivial vector.
#' @param ... Additional arguments, currently ignored.
#'
#' @return Adds a DataFrame indicating
#' whether cutting sites for the specified enzymes are found in the gRNA
#' cassette (flanking sequences + spacer sequences).
#'
#' @details Restriction enzymes are often used for cloning purpose during the
#' oligonucleotide synthesis of gRNA lentiviral constructs. Consequently,
#' it is often necessary to avoid restriction sites of the used restriction
#' enzymes in and around the spacer sequences.
#' \code{addRestrictionEnzymes} allows for
#' flagging problematic spacer sequences by searching for restriction sites
#' in the [flanking5][spacer][flanking3] sequence.
#'
#' The following enzymes are included when \code{includeDefault=TRUE}:
#' EcoRI, KpnI, BsmBI, BsaI, BbsI, PacI, and MluI.
#'
#' Custom recognition sequences in \code{patterns} may use the IUPAC
#' nucleotide code, excluding symbols indicating gaps. Avoid providing
#' enzyme names in \code{patterns} that are already included by default (if
#' \code{includeDefault=TRUE}) or given by \code{enzymeNames}. Patterns
#' with duplicated enzyme names will be silently ignored, even if the
#' recognition sequence differs. See example.
#'
#'
#' @author Jean-Philippe Fortin, Luke Hoberecht
#'
#' @seealso \code{\link{enzymeAnnotation}} to retrieve existing enzyme
#' annotation from a \linkS4class{GuideSet} object.
#'
#' @examples
#' data(SpCas9, package="crisprBase")
#' seq <- c("ATTTCCGGAGGCGAATTCGGCGGGAGGAGGAAGACCGG")
#' guideSet <- findSpacers(seq, crisprNuclease=SpCas9)
#'
#' # Using default enzymes:
#' guideSet <- addRestrictionEnzymes(guideSet)
#'
#' # Using custom enzymes:
#' guideSet <- addRestrictionEnzymes(guideSet,
#' patterns=c(enz1="GGTCCAA",
#' enz2="GGTCG"))
#'
#' # Avoid duplicate enzyme names
#' guideSet <- addRestrictionEnzymes(guideSet,
#' patterns=c(EcoRI="GANNTC")) # ignored
#'
#' @export
#' @rdname addRestrictionEnzymes
#' @importFrom S4Vectors split mcols<-
setMethod("addRestrictionEnzymes", "GuideSet", function(object,
enzymeNames=NULL,
patterns=NULL,
includeDefault=TRUE,
flanking5="ACCG",
flanking3="GTTT"
){
enzymeAnnotation <- getRestrictionEnzymes(object,
enzymeNames=enzymeNames,
patterns=patterns,
includeDefault=includeDefault,
flanking5=flanking5,
flanking3=flanking3)
dfs <- S4Vectors::split(enzymeAnnotation,
f=factor(rownames(enzymeAnnotation),
levels=names(object)))
S4Vectors::mcols(object)[["enzymeAnnotation"]] <- dfs
return(object)
})
#' @export
#' @rdname addRestrictionEnzymes
setMethod("addRestrictionEnzymes",
"PairedGuideSet", function(object,
enzymeNames=NULL,
patterns=NULL,
includeDefault=TRUE,
flanking5="ACCG",
flanking3="GTTT"
){
object <- .validatePairedGuideSet(object)
unifiedGuideSet <- .pairedGuideSet2GuideSet(object)
unifiedGuideSet <- addRestrictionEnzymes(unifiedGuideSet,
enzymeNames=enzymeNames,
patterns=patterns,
includeDefault=includeDefault,
flanking5=flanking5,
flanking3=flanking3)
out <- .addColumnsFromUnifiedGuideSet(object,
unifiedGuideSet)
return(out)
})
#' @rdname addRestrictionEnzymes
#' @export
setMethod("addRestrictionEnzymes", "NULL", function(object){
return(NULL)
})
# Core engine to get restriction enzymes annotation
#' @importFrom S4Vectors DataFrame
getRestrictionEnzymes <- function(guideSet,
enzymeNames=NULL,
patterns=NULL,
includeDefault=TRUE,
flanking5="ACCG",
flanking3="GTTT"
){
guideSet <- .validateGuideSet(guideSet)
enzymeMotifs <- .enzymeMotifs(includeDefault=includeDefault,
enzymeNames=enzymeNames,
patterns=patterns)
spacers <- .spacersWithFlankingRegions(guideSet=guideSet,
flanking5=flanking5,
flanking3=flanking3)
enzymeAnnotation <- lapply(enzymeMotifs,
grepl,
x=spacers)
enzymeAnnotation <- DataFrame(enzymeAnnotation,
row.names=names(guideSet))
return(enzymeAnnotation)
}
# Generate a final list of enzyme motifs from different user inputs
.enzymeMotifs <- function(includeDefault,
enzymeNames,
patterns
){
stopifnot("includeDefault must be TRUE or FALSE" = {
includeDefault %in% c(TRUE, FALSE) && length(includeDefault) == 1
})
if (includeDefault){
enzymeNames <- unique(c(.defaultEnzymeNames, enzymeNames))
}
motifs <- .getEnzymeMotifs(enzymeNames)
motifs <- .addCustomEnzymes(motifs, patterns)
stopifnot("no restriction enzymes found" = {
length(motifs) > 0
})
motifs <- vapply(motifs, .enzymeMotif2RegexPattern, FUN.VALUE=character(1))
return(motifs)
}
.defaultEnzymeNames <- c("EcoRI", "KpnI", "BsmBI",
"BsaI", "BbsI", "PacI","MluI")
# Get enzyme motifs from a vector of enzyme names
#' @importFrom crisprBase motifs
.getEnzymeMotifs <- function(enzymeNames){
data("restrictionEnzymes",
package="crisprBase",
envir=environment())
.checkEnzymeNames(enzymeNames, restrictionEnzymes)
motifs <- vapply(enzymeNames, function(x){
crisprBase::motifs(restrictionEnzymes[[x]],
as.character=TRUE)
}, FUN.VALUE=character(1))
return(motifs)
}
# Make sure the enzyme names are available in a pre-calculated set of
# restriction enzymes
.checkEnzymeNames <- function(enzymeNames,
restrictionEnzymes){
if (length(enzymeNames) > 0){
stopifnot("enzymeNames must be a character vector" = {
is.vector(enzymeNames, mode="character")
})
badNames <- setdiff(enzymeNames, names(restrictionEnzymes))
if (length(badNames) > 0){
stop("restriction enzyme name(s) not found: ",
paste(badNames, collapse=', '))
}
}
invisible(NULL)
}
.addCustomEnzymes <- function(motifs,
patterns
){
if (!is.null(patterns)){
patterns <- .validateCustomEnzymes(patterns)
patterns <- as.list(patterns)
patterns <- patterns[setdiff(names(patterns), names(motifs))]
motifs <- c(motifs, patterns)
}
return(motifs)
}
.validateCustomEnzymes <- function(patterns
){
stopifnot("patterns must be a character vector" = {
is.vector(patterns, mode="character")
})
stopifnot("patterns vector must have names" = {
!is.null(names(patterns)) &&
!any(c(NA, "") %in% names(patterns))
})
stopifnot("patterns vector must have unique names" = {
all.equal(names(patterns), unique(names(patterns)))
})
patterns <- .validateDNACharacterVariable(patterns,
"patterns",
exactBases=FALSE)
return(patterns)
}
# Transform string to a regex motif for sequence search
.enzymeMotif2RegexPattern <- function(motif
){
revMotif <- .revCompBs(motif)
pattern <- c(.iupacCode2RegexPattern(motif),
.iupacCode2RegexPattern(revMotif))
pattern <- unique(pattern)
pattern <- paste0(pattern, collapse="|")
return(pattern)
}
#' @importFrom Biostrings DNA_BASES IUPAC_CODE_MAP
.iupacCode2RegexPattern <- function(seq
){
seqBases <- strsplit(seq, '')[[1]]
patternBases <- vapply(seqBases, function(x){
if (!x %in% Biostrings::DNA_BASES){
x <- paste0("[", Biostrings::IUPAC_CODE_MAP[x], "]")
}
x
}, FUN.VALUE=character(1))
pattern <- paste0(patternBases, collapse="")
return(pattern)
}
# Add flanking sequences to spacer sequences
.spacersWithFlankingRegions <- function(guideSet,
flanking5,
flanking3
){
spacers <- spacers(guideSet,
as.character=TRUE)
flanking5 <- .validateDNACharacterVariable(flanking5, "flanking5", len=1)
flanking3 <- .validateDNACharacterVariable(flanking3, "flanking3", len=1)
spacers <- paste0(flanking5, spacers, flanking3, recycle0=TRUE)
names(spacers) <- names(guideSet)
return(spacers)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{to_exceed}
\alias{to_exceed}
\title{to_exceed converts numeric pmf vectors to log10 exceedance distributions of equal length}
\usage{
to_exceed(pmf)
}
\arguments{
\item{pmf}{is a numeric vector of probabilities summing to one}
}
\value{
a numeric vector of the log10 exceedance distribution
}
\description{
Given a numeric pmf, returns a numeric vector of the log10 exceedance distribution
}
\seealso{
to_cdf
}
| /man/to_exceed.Rd | no_license | crumplecup/muddier | R | false | true | 503 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{to_exceed}
\alias{to_exceed}
\title{to_exceed converts numeric pmf vectors to log10 exceedance distributions of equal length}
\usage{
to_exceed(pmf)
}
\arguments{
\item{pmf}{is a numeric vector of probabilities summing to one}
}
\value{
a numeric vector of the log10 exceedance distribution
}
\description{
Given a numeric pmf, returns a numeric vector of the log10 exceedance distribution
}
\seealso{
to_cdf
}
|
The R code which Rafa runs in this video is available here:
https://github.com/genomicsclass/labs/blob/master/intro/getting_started.Rmd
You can download the individual Rmd scripts from Github by clicking on the filename, and then the 'Raw' button. Save this file to your computer, and then open it in RStudio.
Running Lab Code
All software used for the class is free and open source:
R can be downloaded and installed from CRAN (Comprehensive R Archive Network). If possible download the latest release.
We recommend using RStudio, a slick visual interface for R.
First assessment: Exercises
Bookmark this page
To download R and R studio, please visit the Course Materials section of the course.
If you have not done so already, download, install and load the swirl package.
install.packages("swirl")
library(swirl)
Go through the R Programming Basic Building Blocks tutorial and then use the skills you have just learned to answer the following questions.
Textbook
We will be using the textbook Data Analysis for the Life Science which is freely available here (link to textbook). The book is written in R markdown which includes the R code used for several of the analysis shown in the course. The book includes links to the specific R markdown documents but you can access all of these in this GitHub repository described below. We also provide an html version of the book here.
https://leanpub.com/dataanalysisforthelifesciences
https://rmarkdown.rstudio.com/
https://github.com/genomicsclass/labs
http://genomicsclass.github.io/book/
https://github.com/genomicsclass
https://github.com/genomicsclass/dagdata/tree/master/inst/extdata
library(dplyr)
## Filter and Select
dat <- read.csv("femaleMiceWeights.csv")
dat
View(dat)
controls <- filter (dat, Diet=="chow")
select(dat, Bodyweight)
controls <- select(dat, Bodyweight)
unlist(controls)
controls <- filter(dat, Diet=="chow")%>%select(Bodyweight)%>%unlist
controls
mean(controls)
treatment <- filter(dat, Diet=="hf")%>% select(Bodyweight)%>%unlist
treatment
mean(treatment)
mean(treatment)-mean(controls)
population <- read.csv("femaleControlsPopulation.csv")
population <- unlist(population) # to convert data.frame to numeric
sample(population, 12)
mean(sample(population, 12)) # run several times it gives differ means
# observation
obs <- mean(treatment)- mean(controls)
class(population)
# Null Hypothesis, high fiber ,"hf", has not effect.
control <- sample (population, 12)
treatment <- sample (population, 12) # treatment is not treatment,
# treatment comes from same population
mean(treatment)-mean(control) # running the 3 commands over and over gives different
# differences
# For loops
n <- 1000
nulls <- vector("numeric", n)
for (i in 1:n) {
control<- sample(population, 12)
treatment <- sample (population, 12)
nulls[i]<- mean(treatment)-mean(control)
}
# p value logic
nulls > obs # how many times nulls >obs is TRUE
sum(nulls > obs) # count the TRUES
sum(nulls>obs)/n # proportion of times of the TRUES. THIS IS THE P VALUE, proortion
# of times nulls is TRUE when null hypothesis is true
mean(nulls>obs) # the same. THIS IS THE P VALUE, proportion
# of times nulls is TRUE when null hypothesis is true
mean (abs(nulls)>obs) # the same , actually better , "two tails".proportion
# of times nulls is higher or less than obs
# when null hypothesis is true (that hf has no effect )
max (nulls)
hist(nulls)
| /Code.R | no_license | mmaravi1/homework-1 | R | false | false | 3,755 | r | The R code which Rafa runs in this video is available here:
https://github.com/genomicsclass/labs/blob/master/intro/getting_started.Rmd
You can download the individual Rmd scripts from Github by clicking on the filename, and then the 'Raw' button. Save this file to your computer, and then open it in RStudio.
Running Lab Code
All software used for the class is free and open source:
R can be downloaded and installed from CRAN (Comprehensive R Archive Network). If possible download the latest release.
We recommend using RStudio, a slick visual interface for R.
First assessment: Exercises
Bookmark this page
To download R and R studio, please visit the Course Materials section of the course.
If you have not done so already, download, install and load the swirl package.
install.packages("swirl")
library(swirl)
Go through the R Programming Basic Building Blocks tutorial and then use the skills you have just learned to answer the following questions.
Textbook
We will be using the textbook Data Analysis for the Life Science which is freely available here (link to textbook). The book is written in R markdown which includes the R code used for several of the analysis shown in the course. The book includes links to the specific R markdown documents but you can access all of these in this GitHub repository described below. We also provide an html version of the book here.
https://leanpub.com/dataanalysisforthelifesciences
https://rmarkdown.rstudio.com/
https://github.com/genomicsclass/labs
http://genomicsclass.github.io/book/
https://github.com/genomicsclass
https://github.com/genomicsclass/dagdata/tree/master/inst/extdata
library(dplyr)
## Filter and Select
dat <- read.csv("femaleMiceWeights.csv")
dat
View(dat)
controls <- filter (dat, Diet=="chow")
select(dat, Bodyweight)
controls <- select(dat, Bodyweight)
unlist(controls)
controls <- filter(dat, Diet=="chow")%>%select(Bodyweight)%>%unlist
controls
mean(controls)
treatment <- filter(dat, Diet=="hf")%>% select(Bodyweight)%>%unlist
treatment
mean(treatment)
mean(treatment)-mean(controls)
population <- read.csv("femaleControlsPopulation.csv")
population <- unlist(population) # to convert data.frame to numeric
sample(population, 12)
mean(sample(population, 12)) # run several times it gives differ means
# observation
obs <- mean(treatment)- mean(controls)
class(population)
# Null Hypothesis, high fiber ,"hf", has not effect.
control <- sample (population, 12)
treatment <- sample (population, 12) # treatment is not treatment,
# treatment comes from same population
mean(treatment)-mean(control) # running the 3 commands over and over gives different
# differences
# For loops
n <- 1000
nulls <- vector("numeric", n)
for (i in 1:n) {
control<- sample(population, 12)
treatment <- sample (population, 12)
nulls[i]<- mean(treatment)-mean(control)
}
# p value logic
nulls > obs # how many times nulls >obs is TRUE
sum(nulls > obs) # count the TRUES
sum(nulls>obs)/n # proportion of times of the TRUES. THIS IS THE P VALUE, proortion
# of times nulls is TRUE when null hypothesis is true
mean(nulls>obs) # the same. THIS IS THE P VALUE, proportion
# of times nulls is TRUE when null hypothesis is true
mean (abs(nulls)>obs) # the same , actually better , "two tails".proportion
# of times nulls is higher or less than obs
# when null hypothesis is true (that hf has no effect )
max (nulls)
hist(nulls)
|
#------------------------------------------------------------------------------
# THIS R SCRIPT GENERATES PLOT 1 AND SAVES TO FILE IN PNG FORMAT
#------------------------------------------------------------------------------
# Set library and current working directory
library(sqldf)
setwd("C:/Users/catherine/projects/ExData_Plotting1")
#------------------------------------------------------------------------------
# Read section of file from date 1/2/2007 to 2/2/2007
#------------------------------------------------------------------------------
file <- "./household_power_consumption.txt"
pdata <- read.csv.sql(file, sql="select * from file where Date in
('1/2/2007','2/2/2007')", sep=";", header=TRUE,
stringsAsFactors=F, colClasses=c("character","character",
"numeric","numeric","numeric","numeric","numeric",
"numeric","numeric"))
#------------------------------------------------------------------------------
# Plot 1: Histogram
#------------------------------------------------------------------------------
## Remove incomplete observations
pdata <- pdata[complete.cases(pdata),]
#mar_defaults <- par(mar=c(5.1, 4.1, 4.1, 2))
par(mar=c(5.1, 4.1, 2.1, 2))
hist(pdata$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", col="red")
##Save file in png format and close device
dev.copy(png,"plot1.png", width=480, height=480)
dev.off()
| /plot1.R | no_license | yunshun26/ExData_Plotting1 | R | false | false | 1,481 | r | #------------------------------------------------------------------------------
# THIS R SCRIPT GENERATES PLOT 1 AND SAVES TO FILE IN PNG FORMAT
#------------------------------------------------------------------------------
# Set library and current working directory
library(sqldf)
setwd("C:/Users/catherine/projects/ExData_Plotting1")
#------------------------------------------------------------------------------
# Read section of file from date 1/2/2007 to 2/2/2007
#------------------------------------------------------------------------------
file <- "./household_power_consumption.txt"
pdata <- read.csv.sql(file, sql="select * from file where Date in
('1/2/2007','2/2/2007')", sep=";", header=TRUE,
stringsAsFactors=F, colClasses=c("character","character",
"numeric","numeric","numeric","numeric","numeric",
"numeric","numeric"))
#------------------------------------------------------------------------------
# Plot 1: Histogram
#------------------------------------------------------------------------------
## Remove incomplete observations
pdata <- pdata[complete.cases(pdata),]
#mar_defaults <- par(mar=c(5.1, 4.1, 4.1, 2))
par(mar=c(5.1, 4.1, 2.1, 2))
hist(pdata$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", col="red")
##Save file in png format and close device
dev.copy(png,"plot1.png", width=480, height=480)
dev.off()
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{bgbb.PlotFrequencyInHoldout}
\alias{bgbb.PlotFrequencyInHoldout}
\title{BG/BB Plot Frequency in Holdout}
\usage{
bgbb.PlotFrequencyInHoldout(params, n.cal, rf.matrix.holdout, censor = NULL,
plotZero = TRUE, title = "Frequency of Repeat Transactions",
xlab = "Holdout period transactions", ylab = "Customers")
}
\arguments{
\item{params}{BG/BB parameters - a vector with alpha, beta, gamma, and delta, in that order. Alpha and beta are unobserved parameters for the beta-Bernoulli transaction process. Gamma and delta are unobserved parameters for the beta-geometric dropout process.}
\item{n.cal}{number of transaction opportunities in the calibration period.}
\item{rf.matrix.holdout}{holdout period recency-frequency
matrix. It must contain columns for frequency in the holdout
period ("x.star"), the number of transaction opportunities in the
holdout period ("n.star"), and the number of customers with each
frequency ("custs").}
\item{censor}{optional. Any calibration period frequency at this
number, or above it, will be binned together. If the censor number
is greater than the maximum recency in the recency-frequency
matrix, the maximum recency will be used as the censor number.}
\item{plotZero}{If FALSE, the histogram will exclude the zero bin.}
\item{title}{title placed on the top-center of the plot.}
\item{xlab}{descriptive label for the x axis.}
\item{ylab}{descriptive label for the y axis.}
}
\value{
Holdout period repeat transaction frequency comparison
matrix (actual vs. expected).
}
\description{
Plots the actual and expected number of customers who made a
certain number of transactions in the holdout period, binned
according to holdout period frequencies. Also returns a matrix
with this comparison and the number of customers in each bin.
}
\examples{
data(donationsSummary)
rf.matrix <- donationsSummary$rf.matrix
rf.matrix.holdout <- donationsSummary$rf.matrix.holdout
# donationsSummary$rf.matrix and donationsSummary$rf.matrix.holdout already
# have appropriate column names
# starting-point parameters
startingparams <- c(1, 1, 0.5, 3)
# estimated parameters
est.params <- bgbb.EstimateParameters(rf.matrix, startingparams)
# number of periods in the calibration period
n.cal = max(rf.matrix[,"n.cal"])
bgbb.PlotFrequencyInHoldout (est.params, n.cal, rf.matrix.holdout)
}
\references{
Fader, Peter S., Bruce G.S. Hardie, and Jen Shang. \dQuote{Customer-Base Analysis in a Discrete-Time Noncontractual Setting.} \emph{Marketing Science} 29(6), pp. 1086-1108. 2010. INFORMS. \url{http://www.brucehardie.com/papers/020/}
}
| /man/bgbb.PlotFrequencyInHoldout.Rd | no_license | jamespaul007/BTYD | R | false | false | 2,629 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{bgbb.PlotFrequencyInHoldout}
\alias{bgbb.PlotFrequencyInHoldout}
\title{BG/BB Plot Frequency in Holdout}
\usage{
bgbb.PlotFrequencyInHoldout(params, n.cal, rf.matrix.holdout, censor = NULL,
plotZero = TRUE, title = "Frequency of Repeat Transactions",
xlab = "Holdout period transactions", ylab = "Customers")
}
\arguments{
\item{params}{BG/BB parameters - a vector with alpha, beta, gamma, and delta, in that order. Alpha and beta are unobserved parameters for the beta-Bernoulli transaction process. Gamma and delta are unobserved parameters for the beta-geometric dropout process.}
\item{n.cal}{number of transaction opportunities in the calibration period.}
\item{rf.matrix.holdout}{holdout period recency-frequency
matrix. It must contain columns for frequency in the holdout
period ("x.star"), the number of transaction opportunities in the
holdout period ("n.star"), and the number of customers with each
frequency ("custs").}
\item{censor}{optional. Any calibration period frequency at this
number, or above it, will be binned together. If the censor number
is greater than the maximum recency in the recency-frequency
matrix, the maximum recency will be used as the censor number.}
\item{plotZero}{If FALSE, the histogram will exclude the zero bin.}
\item{title}{title placed on the top-center of the plot.}
\item{xlab}{descriptive label for the x axis.}
\item{ylab}{descriptive label for the y axis.}
}
\value{
Holdout period repeat transaction frequency comparison
matrix (actual vs. expected).
}
\description{
Plots the actual and expected number of customers who made a
certain number of transactions in the holdout period, binned
according to holdout period frequencies. Also returns a matrix
with this comparison and the number of customers in each bin.
}
\examples{
data(donationsSummary)
rf.matrix <- donationsSummary$rf.matrix
rf.matrix.holdout <- donationsSummary$rf.matrix.holdout
# donationsSummary$rf.matrix and donationsSummary$rf.matrix.holdout already
# have appropriate column names
# starting-point parameters
startingparams <- c(1, 1, 0.5, 3)
# estimated parameters
est.params <- bgbb.EstimateParameters(rf.matrix, startingparams)
# number of periods in the calibration period
n.cal = max(rf.matrix[,"n.cal"])
bgbb.PlotFrequencyInHoldout (est.params, n.cal, rf.matrix.holdout)
}
\references{
Fader, Peter S., Bruce G.S. Hardie, and Jen Shang. \dQuote{Customer-Base Analysis in a Discrete-Time Noncontractual Setting.} \emph{Marketing Science} 29(6), pp. 1086-1108. 2010. INFORMS. \url{http://www.brucehardie.com/papers/020/}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DL_Soccerway_season_logo.R
\name{DL_Soccerway_season_logo}
\alias{DL_Soccerway_season_logo}
\title{DL_Soccerway_season_logo}
\usage{
DL_Soccerway_season_logo(countryCode, url)
}
\arguments{
\item{countryCode}{e.g. "ESP"}
\item{url}{e.g. "http://nl.soccerway.com/national/spain/segunda-division/20152016/regular-season/r32028/map"}
}
\value{
tibble with columns CountryCode ("ESP"), Team ("UD Almería"), LogoURL ("http://cache.images.core.optasports.com/soccer/teams/150x150/2049.png")
}
\description{
creates tibble with logo url per club. url input usually comes from DL_Soccerway_season_data(), the URL.logo column
df = DL_Soccerway_data("NLD1"); DL_Soccerway_season_logo("NLD",df$URL.logo)
}
| /man/DL_Soccerway_season_logo.Rd | no_license | tristanbains/rFootballAnalysis | R | false | true | 775 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DL_Soccerway_season_logo.R
\name{DL_Soccerway_season_logo}
\alias{DL_Soccerway_season_logo}
\title{DL_Soccerway_season_logo}
\usage{
DL_Soccerway_season_logo(countryCode, url)
}
\arguments{
\item{countryCode}{e.g. "ESP"}
\item{url}{e.g. "http://nl.soccerway.com/national/spain/segunda-division/20152016/regular-season/r32028/map"}
}
\value{
tibble with columns CountryCode ("ESP"), Team ("UD Almería"), LogoURL ("http://cache.images.core.optasports.com/soccer/teams/150x150/2049.png")
}
\description{
creates tibble with logo url per club. url input usually comes from DL_Soccerway_season_data(), the URL.logo column
df = DL_Soccerway_data("NLD1"); DL_Soccerway_season_logo("NLD",df$URL.logo)
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.99939408264402e-241, 1.97274569258757e-154, 5.49464572566663e+109, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 9L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta_interleaved_matrices,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_beta_interleaved_matrices/AFL_communities_individual_based_sampling_beta_interleaved_matrices/communities_individual_based_sampling_beta_interleaved_matrices_valgrind_files/1615838887-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 394 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.99939408264402e-241, 1.97274569258757e-154, 5.49464572566663e+109, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 9L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta_interleaved_matrices,testlist)
str(result) |
\name{cdf.discrete}
\alias{cdf.discrete}
\title{
Cumulative Distribution Function of a known discrete distribution
}
\description{
Returns an object similar to what is produced by \code{\link{ecdf}}, but based on a known discrete distribution.
}
\usage{
cdf.discrete(x, dist = c("binom", "geom", "hyper", "nbinom", "pois"), ...)
}
\arguments{
\item{x}{
numeric vector of the observations.
}
\item{dist}{
character string naming a discrete distribution (\code{"binom"} by default).
}
\item{\dots}{
parameters of the distribution specified by \code{dist}.
}
}
\details{
The function is intended to be used in goodness-of-fits tests for discrete distributions, such as proposed in the \code{dgof} package.
}
\author{
Maxime HERVE <maxime.herve@univ-rennes1.fr>
}
\examples{
if(require(dgof)) {
set.seed(1124)
resp <- rpois(20,2)
cvm.test(resp,cdf.discrete(resp,"pois",2))
}
} | /man/cdf.discrete.Rd | no_license | cran/RVAideMemoire | R | false | false | 918 | rd | \name{cdf.discrete}
\alias{cdf.discrete}
\title{
Cumulative Distribution Function of a known discrete distribution
}
\description{
Returns an object similar to what is produced by \code{\link{ecdf}}, but based on a known discrete distribution.
}
\usage{
cdf.discrete(x, dist = c("binom", "geom", "hyper", "nbinom", "pois"), ...)
}
\arguments{
\item{x}{
numeric vector of the observations.
}
\item{dist}{
character string naming a discrete distribution (\code{"binom"} by default).
}
\item{\dots}{
parameters of the distribution specified by \code{dist}.
}
}
\details{
The function is intended to be used in goodness-of-fits tests for discrete distributions, such as proposed in the \code{dgof} package.
}
\author{
Maxime HERVE <maxime.herve@univ-rennes1.fr>
}
\examples{
if(require(dgof)) {
set.seed(1124)
resp <- rpois(20,2)
cvm.test(resp,cdf.discrete(resp,"pois",2))
}
} |
## Computes or retrieve cached inverse matrix
##
## This function creates a special "matrix" object that can cach its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get, setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" or retrieves.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
| /cachematrix.R | no_license | gwencrey/ProgrammingAssignment2 | R | false | false | 917 | r | ## Computes or retrieve cached inverse matrix
##
## This function creates a special "matrix" object that can cach its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get, setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" or retrieves.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
## Generating Discrete Random Variables ##
##############################################
getwd() # determine the working directory
setwd("./Documents/Teaching")
# change the working directory
# . refers to the current directory
getwd()
##############################################
## The Inverse Transform Method
# Example 4a Ross (2006)
x<-1:4
p<-c(0.2,0.15,0.25,0.40)
Fx<-cumsum(p)
U<-runif(1)
X<-1
while (Fx[X]<U){
X<-X+1
}
print(X)
# Simulation study
N<-5000
set.seed(1)
U<-runif(N)
X<-rep(0,N)
for (i in 1:N){
j<-1
while (Fx[j]<U[i]){
j<-j+1
}
X[i]<-j
}
#print(X)
freq<-rep(0,4)
for (i in x) freq[i]<-sum(X==i)/N
#freq<-as.numeric(table(X))/N
plot(x,freq,type="h",lwd=3,ylim=c(0,max(p,freq)))
lines(x+0.05,p,type="h",col="red",lwd=3)
# pdf("inv_discr1.pdf",paper="special")
# plot(x,freq,type="h",lwd=3,ylim=c(0,max(p,freq)),axes=F,main="Simulation study")
# lines(x+0.05,p,type="h",col="red",lwd=3)
# axis(1,1:4)
# axis(2)
# box()
# legend("topleft",c("observed","theoretical"),lty=c(1,1),
# lwd=c(3,3),col=c(1,2),bty="n")
# dev.off()
# Using of sample()
X<-sample(x,size=N,replace=T,prob=p)
freq<-as.numeric(table(X))/N
plot(x,freq,type="h",lwd=3,ylim=c(0,max(p,freq)))
lines(x+0.05,p,type="h",col="red",lwd=3)
## Uniform discrete rv's
# Using 'floor()'
?floor
N<-5000
n<-10
U<-runif(N)
X<-floor(n*U)+1
freq<-as.numeric(table(X))/N
plot(1:n,freq,type="h",lwd=3,ylim=c(0,max(1/n,freq)))
lines(1:n+0.05,rep(1/n,n),type="h",col="red",lwd=3)
# Using 'sample()'
X<-sample(1:n,size=N,replace=T)
freq<-as.numeric(table(X))/N
plot(1:n,freq,type="h",lwd=3,ylim=c(0,max(1/n,freq)))
lines(1:n+0.05,rep(1/n,n),type="h",col="red",lwd=3)
## Random permutation
# Es 4b, Ross (2006)
# take 1
n<-10
pi.0<-1:n
pi<-rep(0,n)
k<-n
while (k>1){
#cat("k=",k,"\n")
U<-runif(1)
I<-floor(k*U)+1
#cat("I=",I,"\n")
pi[k]<-pi.0[I]
pi.0<-pi.0[-I]
#cat("pi=",pi,"\n")
#cat("pi.0=",pi.0,"\n")
k<-k-1
}
pi[1]<-pi.0
print(pi)
# take 2
n<-10
pi<-1:n
k<-n
while (k>1){
#cat("k=",k,"\n")
U<-runif(1)
I<-floor(k*U)+1
#cat("I=",I,"\n")
x<-pi[k]
pi[k]<-pi[I]
pi[I]<-x
#cat("pi=",pi,"\n")
k<-k-1
}
print(pi)
# Using 'sample()'
pi<-sample(1:n)
print(pi)
# random subset of size r<= n/2
# take 1
n<-10
r<-4
pi.0<-1:n
pi<-rep(0,r)
k<-n
while (k>(n-r)){
#cat("k=",k,"\n")
U<-runif(1)
I<-floor(k*U)+1
#cat("I=",I,"\n")
pi[k-n+r]<-pi.0[I]
pi.0<-pi.0[-I]
#cat("pi=",pi,"\n")
#cat("pi.0=",pi.0,"\n")
k<-k-1
}
print(pi)
# take 2
n<-10
r<-4
pi<-1:n
k<-n
while (k>(n-r)){
#cat("k=",k,"\n")
U<-runif(1)
I<-floor(k*U)+1
#cat("I=",I,"\n")
x<-pi[k]
pi[k]<-pi[I]
pi[I]<-x
#cat("pi=",pi,"\n")
k<-k-1
}
print(pi[(n-r+1):n])
# Using 'sample()'
pi<-sample(1:n,size=r)
print(pi)
## Binomial random variable
# calculate cdf of binom rv
binom.cdf<-function(x,n,p){
Fx<-0
for (i in 0:x){
Fx<-Fx+choose(n, i)*p^i*(1-p)^(n-i) # 'choose()' compute binomial coef
}
return(Fx)
}
n<-10
p<-0.5
binom.cdf(1,n,p)
pbinom(1,size=n,prob=p)
# simulate X ~ F
cdf.sim<-function(F,...){
X<-0
U<-runif(1)
while (F(X,...)<U){
X<-X+1
}
return(X)
}
cdf.sim(binom.cdf,n,p)
rbinom(1,size=n,prob=p)
# Simulation study
N<-5000
n<-10
p<-0.5
X<-rep(0,N)
set.seed(1)
for (i in 1:N){
X[i]<-cdf.sim(binom.cdf,n,p)
}
#print(X)
freq<-rep(0,n+1)
for (i in 0:n) freq[i+1]<-sum(X==i)/N
#freq<-as.numeric(table(X))/N
p.t<-dbinom(0:n,size=n,prob=p)
plot(0:n,freq,type="h",lwd=3,ylim=c(0,max(freq,p.t)))
lines(0:n+0.05,p.t,type="h",col="red",lwd=3)
# pdf("binom1.pdf",paper="special")
# plot(0:n,freq,type="h",lwd=3,ylim=c(0,max(freq,p.t)),main="binom(n=10,p=0.5)")
# lines(0:n+0.05,p.t,type="h",col="red",lwd=3)
# legend("topleft",c("observed","theoretical"),lty=c(1,1),
# lwd=c(3,3),col=c(1,2),bty="n")
# dev.off()
# combine loop in cdf.sim with
# the loop in binom.cdf
binom.sim <- function(n,p){
X<-0
px<-(1-p)^n
Fx<-px
U<-runif(1)
while (Fx<U) {
X<-X+1
px<-px*((n-X+1)*p)/(X*(1-p)) # compute px via recursive formula
Fx<-Fx+px
}
return(X)
}
set.seed(1)
system.time( # returns CPU time taken for execution
for (i in 1:N){
X[i]<-cdf.sim(binom.cdf,n,p)
}
)
# check higher efficiency, i.e.
# less computing time
set.seed(1)
system.time(
for (i in 1:N){
X[i]<-binom.sim(n,p)
}
)
## Sequences of independent trials
p<-0.5
U<-runif(1)
# simulate B ~ Bernulli(p)
if (U<p) {
B<-1
} else B<-0
print(B)
# simulate n iid B_i ~ Bernulli(p)
n<-10
B<-rep(0,n)
for (i in 1:n) {
U<-runif(1)
if (U<p) {
B[i]<-1
} else B[i]<-0
}
print(B)
# simulate X ~ binom(n,p) as
# X=sum_{i=1}^n B_i
n<-10
p<-0.5
X<-0
for (i in 1:n){
U<-runif(1)
if (U<p) X<-X+1
}
print(X)
X<-sum(runif(n)<p) # simpler
print(X)
# Geometric random variable
p<-0.5
# simulate Y ~ geom(p) as smaller i
# such that B_j=0 for j=1,...,i-1
# and B_i=1
Y<-0
success <-FALSE
while (!success) {
U<-runif(1)
if (U<p) {
success <-TRUE
} else {
Y<-Y+1
}
}
# using inverse transform method
U<-runif(1)
Y<-floor(log(U)/log(1-p))+1
print(Y)
# Simulation study
N<-5000
set.seed(100)
U<-runif(N)
Y<-floor(log(U)/log(1-p))+1
#print(Y)
y.max<-max(Y)
freq<-rep(0,y.max)
for (i in 1:y.max) freq[i]<-sum(Y==i)/N
plot(1:y.max,freq,type="h",lwd=3,ylim=c(0,p))
lines(1:y.max+0.05,dgeom(0:(y.max-1),prob=p),type="h",col="red",lwd=3)
# check '?dgeom'
# pdf("geom1.pdf",paper="special")
# plot(1:y.max,freq,type="h",lwd=3,ylim=c(0,p),main="geom(p=0.5)")
# lines(1:y.max+0.05,dgeom(0:(y.max-1),prob=p),type="h",col="red",lwd=3)
# legend("topright",c("observed","theoretical"),lty=c(1,1),
# lwd=c(3,3),col=c(1,2),bty="n")
# dev.off()
## Poisson random variable
l<-3 # set value of parameter lambda
# inverse transform algorithm
pois.sim <- function(l){
X<-0
px<-exp(-l)
Fx<-px
U<-runif(1)
iter<-0 # dummy var, counts how many searches
while (Fx<U) {
iter<-iter+1
X<-X+1
px<-px*l/X
Fx<-Fx+px
}
cat("X=",X,"\n")
cat("num searches=",iter,"\n")
return(X)
}
pois.sim(l)
# Simulation study
set.seed(5)
N<-1000
X<-rep(0,N)
for (i in 1:N){
X[i]<-pois.sim(l)
}
#print(X)
x.max<-max(X)+5
freq<-rep(0,x.max+1)
for (i in 0:x.max) freq[i+1]<-sum(X==i)/N
plot(0:x.max,freq,type="h",lwd=3)
lines(0:x.max+0.1,dpois(0:x.max,lambda=l),type="h",col="red",lwd=3)
# pdf("pois1.pdf",paper="special")
# plot(0:x.max,freq,type="h",lwd=3,xlab="x",ylab="prob",
# main=expression(paste("Pois(",lambda,"=3), ",N==1000)))
# lines(0:x.max+0.1,dpois(0:x.max,lambda=l),type="h",col="red",lwd=3)
# legend("topright",c("observed","theoretical"),lty=c(1,1),
# lwd=c(3,3),col=c(1,2),bty="n")
# dev.off()
# 2 take
# inverse transform
# with more efficient search (?)
pois.sim1 <- function(l){
X<-floor(l)
px<-rep(exp(-l),3*X)
Fx<-px[1]
for (i in 1:X){
px[i+1]<-px[i]*l/i
Fx<-Fx+px[i+1]
}
#cat("px=",px[1:(X+1)],"\n")
#cat("true px=",dpois(0:X,lambda=l),"\n")
#cat("X=",X,"\n")
U<-runif(1)
#cat("U=",U,"\n")
#cat("Fx=",Fx,"\n")
#cat("true Fx=",ppois(X,lambda=l),"\n")
iter<-0 # dummy var, counts how many searches
if (Fx<U) {
while (Fx<U) {
iter<-iter+1
X<-X+1
#cat("X=",X,"\n")
px[X+1]<-px[X]*l/X
#cat("px=",px[1:(X+1)],"\n")
#cat("true px=",dpois(0:X,lambda=l),"\n")
Fx<-Fx+px[X+1]
#cat("U=",U,"\n")
#cat("Fx",Fx,"\n")
#cat("true Fx=",ppois(X,lambda=l),"\n")
}
} else {
while (Fx>=U) {
iter<-iter+1
#cat("current px=",px[X+1],"\n")
Fx<-Fx-px[X+1]
#cat("U=",U,"\n")
#cat("Fx",Fx,"\n")
#cat("true Fx=",ppois(X,lambda=l),"\n")
X<-X-1
#cat("X=",X,"\n")
}
X<-X+1
}
cat("X=",X,"\n")
cat("num searches=",iter,"\n")
return(X)
}
l<-100
pois.sim1(l)
# check that pois.sim1 is equivalent
# to pois.sim by setting the same
# seed. Note smaller num of searches
# in particular for l=100
seed<-1
seed<-seed+1
set.seed(seed)
pois.sim1(l)
set.seed(seed)
pois.sim(l)
# check higher efficiency, i.e.
# less computing time
seed<-seed+1
set.seed(seed)
system.time(
pois.sim1(l)
)
set.seed(seed)
system.time(
pois.sim(l)
)
# not clear from CPU time
## Rejection Method
# Example 4f, Ross (2006)
n<-10
p<-c(0.11,0.12,0.09,0.08,0.12,0.10,0.09,0.09,0.10,0.10)
q<-rep(1/n,n)
#sum(p);sum(q)
c<-max(p/q)
print(c) # [1] 1.2
prob.accept<-0
iter<-0
U<-1
while (U>=prob.accept){
Y<-floor(n*runif(1))+1
prob.accept<-p[Y]/(c*q[Y])
U<-runif(1)
#cat("prob accept=",prob.accept,"\n")
#cat("U=",U,"\n")
iter<-iter+1
}
X<-Y
print(X)
cat("num of iter=",iter,"\n")
set.seed(50)
N<-10000
num.iter<-rep(0,N)
X<-rep(0,N)
for (i in 1:N) {
prob.accept<-0
iter<-0
U<-1
while (U>=prob.accept){
Y<-floor(n*runif(1))+1
prob.accept<-p[Y]/(c*q[Y])
U<-runif(1)
iter<-iter+1
}
X[i]<-Y
num.iter[i]<-iter
}
#print(X)
freq<-rep(0,n)
for (i in 1:n) freq[i]<-sum(X==i)/N
plot(1:n,freq,ylim=c(0,max(c(0.12,freq))),type="h",lwd=3)
lines(1:n+0.1,p,type="h",col="red",lwd=3)
#print(num.iter)
mean(num.iter)
c
# pdf("accept1.pdf",paper="special")
# plot(1:n,freq,ylim=c(0,max(c(0.12,freq))),
# type="h",lwd=3,main="Example 4f, Ross (2006)")
# lines(1:n+0.1,p,type="h",col="red",lwd=3)
# legend("topright",c("observed","theoretical"),lty=c(1,1),
# lwd=c(3,3),col=c(1,2),bty="n")
# dev.off()
| /lect_6/script_6.R | no_license | marjokaci/Numerical-and-Statistical-Methods-for-Finance-in-R | R | false | false | 11,545 | r |
## Generating Discrete Random Variables ##
##############################################
getwd() # determine the working directory
setwd("./Documents/Teaching")
# change the working directory
# . refers to the current directory
getwd()
##############################################
## The Inverse Transform Method
# Example 4a Ross (2006)
x<-1:4
p<-c(0.2,0.15,0.25,0.40)
Fx<-cumsum(p)
U<-runif(1)
X<-1
while (Fx[X]<U){
X<-X+1
}
print(X)
# Simulation study
N<-5000
set.seed(1)
U<-runif(N)
X<-rep(0,N)
for (i in 1:N){
j<-1
while (Fx[j]<U[i]){
j<-j+1
}
X[i]<-j
}
#print(X)
freq<-rep(0,4)
for (i in x) freq[i]<-sum(X==i)/N
#freq<-as.numeric(table(X))/N
plot(x,freq,type="h",lwd=3,ylim=c(0,max(p,freq)))
lines(x+0.05,p,type="h",col="red",lwd=3)
# pdf("inv_discr1.pdf",paper="special")
# plot(x,freq,type="h",lwd=3,ylim=c(0,max(p,freq)),axes=F,main="Simulation study")
# lines(x+0.05,p,type="h",col="red",lwd=3)
# axis(1,1:4)
# axis(2)
# box()
# legend("topleft",c("observed","theoretical"),lty=c(1,1),
# lwd=c(3,3),col=c(1,2),bty="n")
# dev.off()
# Using of sample()
X<-sample(x,size=N,replace=T,prob=p)
freq<-as.numeric(table(X))/N
plot(x,freq,type="h",lwd=3,ylim=c(0,max(p,freq)))
lines(x+0.05,p,type="h",col="red",lwd=3)
## Uniform discrete rv's
# Using 'floor()'
?floor
N<-5000
n<-10
U<-runif(N)
X<-floor(n*U)+1
freq<-as.numeric(table(X))/N
plot(1:n,freq,type="h",lwd=3,ylim=c(0,max(1/n,freq)))
lines(1:n+0.05,rep(1/n,n),type="h",col="red",lwd=3)
# Using 'sample()'
X<-sample(1:n,size=N,replace=T)
freq<-as.numeric(table(X))/N
plot(1:n,freq,type="h",lwd=3,ylim=c(0,max(1/n,freq)))
lines(1:n+0.05,rep(1/n,n),type="h",col="red",lwd=3)
## Random permutation
# Es 4b, Ross (2006)
# take 1
n<-10
pi.0<-1:n
pi<-rep(0,n)
k<-n
while (k>1){
#cat("k=",k,"\n")
U<-runif(1)
I<-floor(k*U)+1
#cat("I=",I,"\n")
pi[k]<-pi.0[I]
pi.0<-pi.0[-I]
#cat("pi=",pi,"\n")
#cat("pi.0=",pi.0,"\n")
k<-k-1
}
pi[1]<-pi.0
print(pi)
# take 2
n<-10
pi<-1:n
k<-n
while (k>1){
#cat("k=",k,"\n")
U<-runif(1)
I<-floor(k*U)+1
#cat("I=",I,"\n")
x<-pi[k]
pi[k]<-pi[I]
pi[I]<-x
#cat("pi=",pi,"\n")
k<-k-1
}
print(pi)
# Using 'sample()'
pi<-sample(1:n)
print(pi)
# random subset of size r<= n/2
# take 1
n<-10
r<-4
pi.0<-1:n
pi<-rep(0,r)
k<-n
while (k>(n-r)){
#cat("k=",k,"\n")
U<-runif(1)
I<-floor(k*U)+1
#cat("I=",I,"\n")
pi[k-n+r]<-pi.0[I]
pi.0<-pi.0[-I]
#cat("pi=",pi,"\n")
#cat("pi.0=",pi.0,"\n")
k<-k-1
}
print(pi)
# take 2
n<-10
r<-4
pi<-1:n
k<-n
while (k>(n-r)){
#cat("k=",k,"\n")
U<-runif(1)
I<-floor(k*U)+1
#cat("I=",I,"\n")
x<-pi[k]
pi[k]<-pi[I]
pi[I]<-x
#cat("pi=",pi,"\n")
k<-k-1
}
print(pi[(n-r+1):n])
# Using 'sample()'
pi<-sample(1:n,size=r)
print(pi)
## Binomial random variable
# calculate cdf of binom rv
binom.cdf<-function(x,n,p){
Fx<-0
for (i in 0:x){
Fx<-Fx+choose(n, i)*p^i*(1-p)^(n-i) # 'choose()' compute binomial coef
}
return(Fx)
}
n<-10
p<-0.5
binom.cdf(1,n,p)
pbinom(1,size=n,prob=p)
# simulate X ~ F
cdf.sim<-function(F,...){
X<-0
U<-runif(1)
while (F(X,...)<U){
X<-X+1
}
return(X)
}
cdf.sim(binom.cdf,n,p)
rbinom(1,size=n,prob=p)
# Simulation study
N<-5000
n<-10
p<-0.5
X<-rep(0,N)
set.seed(1)
for (i in 1:N){
X[i]<-cdf.sim(binom.cdf,n,p)
}
#print(X)
freq<-rep(0,n+1)
for (i in 0:n) freq[i+1]<-sum(X==i)/N
#freq<-as.numeric(table(X))/N
p.t<-dbinom(0:n,size=n,prob=p)
plot(0:n,freq,type="h",lwd=3,ylim=c(0,max(freq,p.t)))
lines(0:n+0.05,p.t,type="h",col="red",lwd=3)
# pdf("binom1.pdf",paper="special")
# plot(0:n,freq,type="h",lwd=3,ylim=c(0,max(freq,p.t)),main="binom(n=10,p=0.5)")
# lines(0:n+0.05,p.t,type="h",col="red",lwd=3)
# legend("topleft",c("observed","theoretical"),lty=c(1,1),
# lwd=c(3,3),col=c(1,2),bty="n")
# dev.off()
# combine loop in cdf.sim with
# the loop in binom.cdf
binom.sim <- function(n,p){
X<-0
px<-(1-p)^n
Fx<-px
U<-runif(1)
while (Fx<U) {
X<-X+1
px<-px*((n-X+1)*p)/(X*(1-p)) # compute px via recursive formula
Fx<-Fx+px
}
return(X)
}
set.seed(1)
system.time( # returns CPU time taken for execution
for (i in 1:N){
X[i]<-cdf.sim(binom.cdf,n,p)
}
)
# check higher efficiency, i.e.
# less computing time
set.seed(1)
system.time(
for (i in 1:N){
X[i]<-binom.sim(n,p)
}
)
## Sequences of independent trials
p<-0.5
U<-runif(1)
# simulate B ~ Bernulli(p)
if (U<p) {
B<-1
} else B<-0
print(B)
# simulate n iid B_i ~ Bernulli(p)
n<-10
B<-rep(0,n)
for (i in 1:n) {
U<-runif(1)
if (U<p) {
B[i]<-1
} else B[i]<-0
}
print(B)
# simulate X ~ binom(n,p) as
# X=sum_{i=1}^n B_i
n<-10
p<-0.5
X<-0
for (i in 1:n){
U<-runif(1)
if (U<p) X<-X+1
}
print(X)
X<-sum(runif(n)<p) # simpler
print(X)
# Geometric random variable
p<-0.5
# simulate Y ~ geom(p) as smaller i
# such that B_j=0 for j=1,...,i-1
# and B_i=1
Y<-0
success <-FALSE
while (!success) {
U<-runif(1)
if (U<p) {
success <-TRUE
} else {
Y<-Y+1
}
}
# using inverse transform method
U<-runif(1)
Y<-floor(log(U)/log(1-p))+1
print(Y)
# Simulation study
N<-5000
set.seed(100)
U<-runif(N)
Y<-floor(log(U)/log(1-p))+1
#print(Y)
y.max<-max(Y)
freq<-rep(0,y.max)
for (i in 1:y.max) freq[i]<-sum(Y==i)/N
plot(1:y.max,freq,type="h",lwd=3,ylim=c(0,p))
lines(1:y.max+0.05,dgeom(0:(y.max-1),prob=p),type="h",col="red",lwd=3)
# check '?dgeom'
# pdf("geom1.pdf",paper="special")
# plot(1:y.max,freq,type="h",lwd=3,ylim=c(0,p),main="geom(p=0.5)")
# lines(1:y.max+0.05,dgeom(0:(y.max-1),prob=p),type="h",col="red",lwd=3)
# legend("topright",c("observed","theoretical"),lty=c(1,1),
# lwd=c(3,3),col=c(1,2),bty="n")
# dev.off()
## Poisson random variable
l<-3 # set value of parameter lambda
# inverse transform algorithm
pois.sim <- function(l){
X<-0
px<-exp(-l)
Fx<-px
U<-runif(1)
iter<-0 # dummy var, counts how many searches
while (Fx<U) {
iter<-iter+1
X<-X+1
px<-px*l/X
Fx<-Fx+px
}
cat("X=",X,"\n")
cat("num searches=",iter,"\n")
return(X)
}
pois.sim(l)
# Simulation study
set.seed(5)
N<-1000
X<-rep(0,N)
for (i in 1:N){
X[i]<-pois.sim(l)
}
#print(X)
x.max<-max(X)+5
freq<-rep(0,x.max+1)
for (i in 0:x.max) freq[i+1]<-sum(X==i)/N
plot(0:x.max,freq,type="h",lwd=3)
lines(0:x.max+0.1,dpois(0:x.max,lambda=l),type="h",col="red",lwd=3)
# pdf("pois1.pdf",paper="special")
# plot(0:x.max,freq,type="h",lwd=3,xlab="x",ylab="prob",
# main=expression(paste("Pois(",lambda,"=3), ",N==1000)))
# lines(0:x.max+0.1,dpois(0:x.max,lambda=l),type="h",col="red",lwd=3)
# legend("topright",c("observed","theoretical"),lty=c(1,1),
# lwd=c(3,3),col=c(1,2),bty="n")
# dev.off()
# 2 take
# inverse transform
# with more efficient search (?)
pois.sim1 <- function(l){
X<-floor(l)
px<-rep(exp(-l),3*X)
Fx<-px[1]
for (i in 1:X){
px[i+1]<-px[i]*l/i
Fx<-Fx+px[i+1]
}
#cat("px=",px[1:(X+1)],"\n")
#cat("true px=",dpois(0:X,lambda=l),"\n")
#cat("X=",X,"\n")
U<-runif(1)
#cat("U=",U,"\n")
#cat("Fx=",Fx,"\n")
#cat("true Fx=",ppois(X,lambda=l),"\n")
iter<-0 # dummy var, counts how many searches
if (Fx<U) {
while (Fx<U) {
iter<-iter+1
X<-X+1
#cat("X=",X,"\n")
px[X+1]<-px[X]*l/X
#cat("px=",px[1:(X+1)],"\n")
#cat("true px=",dpois(0:X,lambda=l),"\n")
Fx<-Fx+px[X+1]
#cat("U=",U,"\n")
#cat("Fx",Fx,"\n")
#cat("true Fx=",ppois(X,lambda=l),"\n")
}
} else {
while (Fx>=U) {
iter<-iter+1
#cat("current px=",px[X+1],"\n")
Fx<-Fx-px[X+1]
#cat("U=",U,"\n")
#cat("Fx",Fx,"\n")
#cat("true Fx=",ppois(X,lambda=l),"\n")
X<-X-1
#cat("X=",X,"\n")
}
X<-X+1
}
cat("X=",X,"\n")
cat("num searches=",iter,"\n")
return(X)
}
l<-100
pois.sim1(l)
# check that pois.sim1 is equivalent
# to pois.sim by setting the same
# seed. Note smaller num of searches
# in particular for l=100
seed<-1
seed<-seed+1
set.seed(seed)
pois.sim1(l)
set.seed(seed)
pois.sim(l)
# check higher efficiency, i.e.
# less computing time
seed<-seed+1
set.seed(seed)
system.time(
pois.sim1(l)
)
set.seed(seed)
system.time(
pois.sim(l)
)
# not clear from CPU time
## Rejection Method
# Example 4f, Ross (2006)
n<-10
p<-c(0.11,0.12,0.09,0.08,0.12,0.10,0.09,0.09,0.10,0.10)
q<-rep(1/n,n)
#sum(p);sum(q)
c<-max(p/q)
print(c) # [1] 1.2
prob.accept<-0
iter<-0
U<-1
while (U>=prob.accept){
Y<-floor(n*runif(1))+1
prob.accept<-p[Y]/(c*q[Y])
U<-runif(1)
#cat("prob accept=",prob.accept,"\n")
#cat("U=",U,"\n")
iter<-iter+1
}
X<-Y
print(X)
cat("num of iter=",iter,"\n")
set.seed(50)
N<-10000
num.iter<-rep(0,N)
X<-rep(0,N)
for (i in 1:N) {
prob.accept<-0
iter<-0
U<-1
while (U>=prob.accept){
Y<-floor(n*runif(1))+1
prob.accept<-p[Y]/(c*q[Y])
U<-runif(1)
iter<-iter+1
}
X[i]<-Y
num.iter[i]<-iter
}
#print(X)
freq<-rep(0,n)
for (i in 1:n) freq[i]<-sum(X==i)/N
plot(1:n,freq,ylim=c(0,max(c(0.12,freq))),type="h",lwd=3)
lines(1:n+0.1,p,type="h",col="red",lwd=3)
#print(num.iter)
mean(num.iter)
c
# pdf("accept1.pdf",paper="special")
# plot(1:n,freq,ylim=c(0,max(c(0.12,freq))),
# type="h",lwd=3,main="Example 4f, Ross (2006)")
# lines(1:n+0.1,p,type="h",col="red",lwd=3)
# legend("topright",c("observed","theoretical"),lty=c(1,1),
# lwd=c(3,3),col=c(1,2),bty="n")
# dev.off()
|
#' Calculate M
#'
#' Determines the maximum value of M, where 2^M < N (length of original series)
#'
#' @param d vector of points in the series
#'
#' @export
#'
#' @author W. S. Drysdale
calc_M = function(d){
M = 0
while(2^M <= length(d))
M = M+1
M-1
}
| /R/calc_M.R | no_license | willdrysdale/MultiresDecomp | R | false | false | 270 | r | #' Calculate M
#'
#' Determines the maximum value of M, where 2^M < N (length of original series)
#'
#' @param d vector of points in the series
#'
#' @export
#'
#' @author W. S. Drysdale
calc_M = function(d){
M = 0
while(2^M <= length(d))
M = M+1
M-1
}
|
#### log(count+1) instead of removing zero values
#### when validation, eat.model should use data=data_est
### VIF is missing
# ---- 1.Data transform and fit the full model ----
#read table
bike <- read.csv("hourv2.csv", header = TRUE)
#factoring variables
bike$season = as.factor(bike$season)
bike$mnth = as.factor(bike$mnth)
bike$weekday = as.factor(bike$weekday)
bike$holiday = as.factor(bike$holiday)
bike$workingday = as.factor(bike$workingday)
bike$weathersit = as.factor(bike$weathersit)
bike$hr = as.factor(bike$hr)
bike$yr = as.factor(bike$yr)
bike$weekday = as.factor(bike$weekday)
bike$type = as.factor(bike$type)
# ---- 1.1 split data ---
#split
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(bike$user,SplitRatio = 0.8)
training_set = subset(bike,split == T)
test_set = subset(bike,split == FALSE)
# ---- 1.2 fit a full multiple linear regression model ----
bike.fit1 <- lm(user ~ season + yr + mnth + hr + holiday + weekday +
workingday + weathersit + temp + atemp + hum + windspeed + type, data = training_set)
summary(bike.fit1)
# ---- 2.Perform a thorough residual analysis of this model. ----
par(mfrow=c(1,2))
qqnorm(rstandard(bike.fit1))
abline(0,1)
plot(fitted.values(bike.fit1), rstandard(bike.fit1), main = "Residual plot")
abline(0,0)
# ---- 3.Transform Y --- Boxcox method ----
par(mfrow=c(1,1))
bike.fitboxcox <- lm(bike$user1 ~ season + yr + mnth + hr + holiday + weekday +
workingday + weathersit + temp + atemp + hum + windspeed + type, data = bike)
library(MASS)
boxcox(bike.fitboxcox)
# ---- 4.new model ----
#remove user = 0
bike.subset <- subset(bike, user > 0)
bike.fit2 <-lm(log(user) ~ season + yr + mnth + hr + holiday + weekday +
workingday + weathersit + temp + atemp + hum + windspeed + type, data = bike.subset)
summary(bike.fit2)
#qqplot and residual plot
par(mfrow=c(1,2))
qqnorm(rstandard(bike.fit2))
abline(0,1)
plot(fitted.values(bike.fit2), rstandard(bike.fit2), main = "Redisual plot")
abline(0,0)
# ---- 5.Variable selection ----
#divide variable hr into 5 category
#level 1 [0-5], level 2 [6-9], level 3 [10-15], level 4 [16-20], level 5 [21-23]
levels(bike.subset$hr) <- c(1,1,1,1,1,1,2,2,2,2,3,3,3,3,3,3,4,4,4,4,4,5,5,5)
levels(bike.subset$hr)
##forward selection
library(leaps)
attach(bike.subset)
forward <- regsubsets(x=cbind(bike.subset$season,bike.subset$yr,bike.subset$mnth,bike.subset$hr,bike.subset$holiday,bike.subset$weekday,bike.subset$workingday,
bike.subset$weathersit,bike.subset$temp,bike.subset$atemp,bike.subset$hum,bike.subset$windspeed,bike.subset$type), y=log(bike.subset$user),
method="forward",nbest=1)
summary(forward)
Cp <- summary(forward)$cp
AdjR2 <- summary(forward)$adjr2
SSRes <- summary(forward)$rss
R2 <- summary(forward)$rsq
Matrix <- summary(forward)$which
p <- apply(Matrix,1, sum)
MSE <- SSRes/(33153-p)
output1 <- cbind(p, Matrix, R2, AdjR2, SSRes, Cp)
colnames(output1)[3:15] <- c("season","yr","mnth","hr","holiday","weekday","workingday","weathersit","temp","atemp","hum","windspeed","type")
output1
###forward variable selection order: 1.type;2.hr;3.temp;4.hum;5.yr;6.season;7.workingday;8.atemp
##backward selection
library(leaps)
attach(bike.subset)
backward <- regsubsets(x=cbind(bike.subset$season,bike.subset$yr,bike.subset$mnth,bike.subset$hr,bike.subset$holiday,bike.subset$weekday,bike.subset$workingday,
bike.subset$weathersit,bike.subset$temp,bike.subset$atemp,bike.subset$hum,bike.subset$windspeed,bike.subset$type), y=log(bike.subset$user),
method="backward", nbest = 1)
summary(backward)
Cp <- summary(backward)$cp
AdjR2 <- summary(backward)$adjr2
SSRes <- summary(backward)$rss
R2 <- summary(backward)$rsq
Matrix <- summary(backward)$which
p <- apply(Matrix,1, sum)
MSE <- SSRes/(33153-p)
output2 <- cbind(p, Matrix, R2, AdjR2, SSRes, Cp)
colnames(output2)[3:15] <- c("season","yr","mnth","hr","holiday","weekday","workingday","weathersit","temp","atemp","hum","windspeed","type")
output2
###backward variable drop order: season+yr+hr+workingday+atemp+hum+windspeed+type
####drop order:1.windspeed;2.workingday;3.season;4.yr;5.hum;6.atemp;7.hr
##exhaustive selection
library(leaps)
attach(bike.subset)
exhaustive <- regsubsets(x=cbind(bike.subset$season,bike.subset$yr,bike.subset$mnth,bike.subset$hr,bike.subset$holiday,bike.subset$weekday,bike.subset$workingday,
bike.subset$weathersit,bike.subset$temp,bike.subset$atemp,bike.subset$hum,bike.subset$windspeed,bike.subset$type), y=log(bike.subset$user),
method="exhaustive", nbest = 3)
summary(exhaustive)
Cp <- summary(exhaustive)$cp
AdjR2 <- summary(exhaustive)$adjr2
SSRes <- summary(exhaustive)$rss
R2 <- summary(exhaustive)$rsq
Matrix <- summary(exhaustive)$which
p <- apply(Matrix,1, sum)
MSE <- SSRes/(33153-p)
output3 <- cbind(p, Matrix, R2, AdjR2, SSRes, Cp)
colnames(output3)[3:15] <- c("season","yr","mnth","hr","holiday","weekday","workingday","weathersit","temp","atemp","hum","windspeed","type")
output3
###exhaustive selection order: 1.type 2.type+hr3.type+hr+temp;4.type+hr+atemp+hum;
####5.type+hr+atemp+hum+yr;6.type+hr+atemp+hum+yr+season;7.type+hr+atemp+hum+yr+season+workingday;8.type+hr+atemp+hum+yr+season+workingday+windspeed
######conclusion:1.intersting finding:In our prediction, the temp should be an important factor affecting the number of bike user,
#################but through the variable selection, we found that the temp has not a significant influence the bike user,
#################while, atemp has more signifcant influence than temp.
#################2.some variables(eg.weathersit)should be add in the model, we suspect the reason why these variables are not selected
##################may be due to the relationship with response, so, next, we need to draw some scatterplot respectively to analyze.
#################3. The criteria of R2, AdjR2, SSres, Cp not good, It maybe because of the form of variables. We need do the further analyze
#4.Variable selection---"By-hand"
#named varibales
y <- log(bike.subset$user)
x1 <- bike.subset$season
x2 <- bike.subset$yr
x3 <- bike.subset$mnth
x4 <- bike.subset$hr
x5 <- bike.subset$holiday
x6 <- bike.subset$weekday
x7 <- bike.subset$workingday
x8 <- bike.subset$weathersit
x9 <- bike.subset$temp
x10 <- bike.subset$atemp
x11 <- bike.subset$hum
x12 <- bike.subset$windspeed
x13 <- bike.subset$type
##Forward variable selection
fit.0 <- lm( y ~ 1, data = bike.subset)
add1(fit.0, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.1 <- lm(y ~ x13, data = bike.subset)
add1(fit.1, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.2 <- lm(y ~ x13 + x4, data = bike.subset)
add1(fit.2, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.3 <- lm(y ~ x13 + x4 + x10, data = bike.subset)
add1(fit.3, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.4 <- lm(y ~ x13 + x4 + x10 + x2, data = bike.subset)
add1(fit.4, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.5 <- lm(y ~ x13 + x4 + x10 + x2 + x11, data = bike.subset)
add1(fit.5, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.6 <- lm(y ~ x13 + x4 + x10 + x2 + x11 + x7, data = bike.subset)
add1(fit.6, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.7 <- lm(y ~ x13 + x4 + x10 + x2 + x11 + x7 + x1, data = bike.subset)
add1(fit.7, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.8 <- lm(y ~ x13 + x4 + x10 + x2 + x11 + x7 + x1 + x8, data = bike.subset)
add1(fit.8, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.9 <- lm(y ~ x13 + x4 + x10 + x2 + x11 + x7 + x1 + x8 + x5, data = bike.subset)
add1(fit.9, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.10 <- lm(y ~ x13 + x4 + x10 + x2 + x11 + x7 + x1 + x8 + x5 + x9, data = bike.subset)
add1(fit.10, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.11 <- lm(y ~ x13 + x4 + x10 + x2 + x11 + x7 + x1 + x8 + x5 + x9 + x6, data = bike.subset)
add1(fit.11, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.12 <- lm(y ~ x13 + x4 + x10 + x2 + x11 + x7 + x1 + x8 + x5 + x9 + x6 + x3 , data = bike.subset)
add1(fit.12, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
##add order:1.type;2.hr;3.atemp;4.yr;5.hum;6.workingday;7.season;8.weathersit;9.holiday;10.temp;11.weekday;12.mnth;13.windspeed
##Backward variable selection
fit.13 <- lm(y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, data = bike.subset)
drop1(fit.13,y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
#x5 and x7's F value is 0
fit.12 <- lm(y ~ x1 + x2 + x3 + x4 + x6 + x8 + x9 + x10 + x11 + x12 + x13, data = bike.subset)
drop1(fit.12,y ~ x1 + x2 + x3 + x4 + x6 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.11 <- lm(y ~ x1 + x2 + x3 + x4 + x6 + x8 + x9 + x10 + x11 + x13, data = bike.subset)
drop1(fit.11,y ~ x1 + x2 + x3 + x4 + x6 + x8 + x9 + x10 + x11 + x13, test = "F")
fit.10 <- lm(y ~ x1 + x2 + x4 + x6 + x8 + x9 + x10 + x11 + x13, data = bike.subset)
drop1(fit.10,y ~ x1 + x2 + x4 + x6 + x8 + x9 + x10 + x11 + x13, test = "F")
fit.9 <- lm(y ~ x1 + x2 + x4 + x6 + x8 + x10 + x11 + x13, data = bike.subset)
drop1(fit.9,y ~ x1 + x2 + x4 + x6 + x8 + x10 + x11 + x13, test = "F")
#Drop out order:holiday, workingday, windspeed, mnth, temp
##Stepwise variable selection
fit.0 <- lm(y ~ 1, data = bike.subset)
add1(fit.0, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.1 <- lm(y ~ x13, data = bike.subset)
drop1(fit.1, y ~ x13, test = "F")
add1(fit.1, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.2 <- lm(y ~ x13 + x4, data = bike.subset)
drop1(fit.2, y ~ x13 + x4, test = "F")
add1(fit.2, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.3 <- lm(y ~ x13 + x4 + x10, data = bike.subset)
drop1(fit.3, y ~ x13 + x4 + x10, test = "F")
add1(fit.3, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.4 <- lm(y ~ x13 + x4 + x10 + x2, data = bike.subset)
drop1(fit.4, y ~ x13 + x4 + x10 + x2, test = "F")
add1(fit.4, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.5 <- lm(y ~ x13 + x4 + x10 + x11, data = bike.subset)
drop1(fit.5, y ~ x13 + x4 + x10 + x11, test = "F")
add1(fit.5, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.6 <- lm(y ~ x13 + x4 + x10 + x11 + x2, data = bike.subset)
drop1(fit.6, y ~ x13 + x4 + x10 + x11 + x2, test = "F")
add1(fit.6, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.7 <- lm(y ~ x13 + x4 + x10 + x11 + x2 + x7, data = bike.subset)
drop1(fit.7, y ~ x13 + x4 + x10 + x11 + x2 + x7, test = "F")
add1(fit.7, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.8 <- lm(y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1, data = bike.subset)
drop1(fit.8, y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1, test = "F")
add1(fit.8, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.9 <- lm(y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1 + x8, data = bike.subset)
drop1(fit.9, y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1 + x8, test = "F")
add1(fit.9, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.10 <- lm(y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1 + x8 + x5, data = bike.subset)
drop1(fit.10, y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1 + x8 + x5, test = "F")
add1(fit.10, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.11 <- lm(y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1 + x8 + x5 + x9, data = bike.subset)
drop1(fit.11, y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1 + x8 + x5 + x9, test = "F")
add1(fit.11, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.12 <- lm(y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1 + x8 + x5 + x9 + x6, data = bike.subset)
drop1(fit.12, y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1 + x8 + x5 + x9 + x6, test = "F")
##conclusion:no drop variable
###1.type;2.hr;3.atemp;4.hum;5.yr;6.workingday;7.season;8.weathersit;9.holiday;10.temp;11.weekday
# ---- 6. Fit model and assumption checking ----
bike.fit3 <- lm(log(user) ~ season + yr + hr + workingday + weathersit + atemp + hum + type, data = bike.subset)
summary(bike.fit3)
#checking
par(mfrow=c(1,2))
qqnorm(rstandard(bike.fit3))
abline(0,1)
plot(fitted.values(bike.fit3), rstandard(bike.fit3), main = "Redisual plot")
abline(0,0)
#testing
anova(bike.fit2,bike.fit3)
#6.add interaction--final model
#according to ggplot, we divide hr into two level(add ggplot code)
bike.fit4 <- lm(log(user) ~ season + yr + workingday*type*hr + weathersit + atemp + hum, data = bike.subset)
summary(bike.fit4)
# ---- 7. model validation ----
set.seed(5)
obs <- c(1:33153)
sample.est <- sort(sample(obs,32000))
sample.val <- (1:33153)[-sample.est]
bike.est <- bike.subset[sample.est,]
bike.val <- bike.subset[sample.val,]
fit.est <- lm(log(user) ~ season + yr + workingday*type*hr + weathersit + atemp + hum, data = bike.subset) #################### #data = bike.est #######################
summary(fit.est)
coefficients(fit.est)
y_hat <- predict(fit.est, bike.val[,c(3,4,6,9,10,12,13,16)])
pred_error <- log(bike.val[,15]) - y_hat
sum(pred_error^2)/1153 #MSP= 0.4962858 MSE=0.5
anova(fit.est)
##repeat 100 times
sample.est <- sort(sample(obs,32153))
sample.val <- (1:33153)[-sample.est]
bike.est <- bike.subset[sample.est,]
bike.val <- bike.subset[sample.val,]
fit.est <- lm(log(user) ~ season + yr + workingday*type*hr + weathersit + atemp + hum, data = bike.subset)
coefficients(fit.est)
y_hat <- predict(fit.est, bike.val[,c(3,4,6,9,10,12,13,16)])
pred_error <- log(bike.val[,15]) - y_hat
sum(pred_error^2)/1000
beta0 <- numeric()
beta1 <- numeric()
beta2 <- numeric()
beta3 <- numeric()
beta4 <- numeric()
beta5 <- numeric()
beta6 <- numeric()
beta7 <- numeric()
beta8 <- numeric()
beta9 <- numeric()
beta10 <- numeric()
beta11 <- numeric()
beta12 <- numeric()
beta13 <- numeric()
beta14 <- numeric()
beta15<- numeric()
beta16<- numeric()
beta17<- numeric()
beta18<- numeric()
beta19<- numeric()
beta20<- numeric()
beta21<- numeric()
beta22<- numeric()
beta23<- numeric()
beta24<- numeric()
beta25<- numeric()
beta26<- numeric()
beta27<- numeric()
beta28<- numeric()
MSP <- numeric()
for (i in 1:100){
sample.est <- sort(sample(obs,32153))
sample.val <- (1:33153)[-sample.est]
bike.est <- bike.subset[sample.est,]
bike.val <- bike.subset[sample.val,]
fit.est <- lm(log(user) ~ season + yr + workingday*type*hr + weathersit + atemp + hum, data = bike.subset)
coefficients(fit.est)
y_hat <- predict(fit.est, bike.val[,c(3,4,6,9,10,12,13,16)])
pred_error <- log(bike.val[,15]) - y_hat
beta0[i] <- coef(fit.est)[1]
beta1[i] <- coef(fit.est)[2]
beta2[i] <- coef(fit.est)[3]
beta3[i] <- coef(fit.est)[4]
beta4[i] <- coef(fit.est)[5]
beta5[i] <- coef(fit.est)[6]
beta6[i] <- coef(fit.est)[7]
beta7[i] <- coef(fit.est)[8]
beta8[i] <- coef(fit.est)[9]
beta9[i] <- coef(fit.est)[10]
beta10[i] <- coef(fit.est)[11]
beta11[i] <- coef(fit.est)[12]
beta12[i] <- coef(fit.est)[13]
beta13[i] <- coef(fit.est)[14]
beta14[i] <- coef(fit.est)[15]
beta15[i] <- coef(fit.est)[16]
beta16[i] <- coef(fit.est)[17]
beta17[i] <- coef(fit.est)[18]
beta18[i] <- coef(fit.est)[19]
beta19[i] <- coef(fit.est)[20]
beta20[i] <- coef(fit.est)[21]
beta21[i] <- coef(fit.est)[22]
beta22[i] <- coef(fit.est)[23]
beta23[i] <- coef(fit.est)[24]
beta24[i] <- coef(fit.est)[25]
beta25[i] <- coef(fit.est)[26]
beta26[i] <- coef(fit.est)[27]
beta27[i] <- coef(fit.est)[28]
beta28[i] <- coef(fit.est)[29]
MSP[i] <- sum(pred_error^2)/1000
}
summary(MSP) # Min. 1st Qu. Median Mean 3rd Qu. Max.
##############0.4610 0.5082 0.5328 0.5337 0.5552 0.6285 | /(analysis)bikesharing.R | no_license | minfang423/Regression-Analysis-of-Bike-Sharing | R | false | false | 16,769 | r | #### log(count+1) instead of removing zero values
#### when validation, eat.model should use data=data_est
### VIF is missing
# ---- 1.Data transform and fit the full model ----
#read table
bike <- read.csv("hourv2.csv", header = TRUE)
#factoring variables
bike$season = as.factor(bike$season)
bike$mnth = as.factor(bike$mnth)
bike$weekday = as.factor(bike$weekday)
bike$holiday = as.factor(bike$holiday)
bike$workingday = as.factor(bike$workingday)
bike$weathersit = as.factor(bike$weathersit)
bike$hr = as.factor(bike$hr)
bike$yr = as.factor(bike$yr)
bike$weekday = as.factor(bike$weekday)
bike$type = as.factor(bike$type)
# ---- 1.1 split data ---
#split
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(bike$user,SplitRatio = 0.8)
training_set = subset(bike,split == T)
test_set = subset(bike,split == FALSE)
# ---- 1.2 fit a full multiple linear regression model ----
bike.fit1 <- lm(user ~ season + yr + mnth + hr + holiday + weekday +
workingday + weathersit + temp + atemp + hum + windspeed + type, data = training_set)
summary(bike.fit1)
# ---- 2.Perform a thorough residual analysis of this model. ----
par(mfrow=c(1,2))
qqnorm(rstandard(bike.fit1))
abline(0,1)
plot(fitted.values(bike.fit1), rstandard(bike.fit1), main = "Residual plot")
abline(0,0)
# ---- 3.Transform Y --- Boxcox method ----
par(mfrow=c(1,1))
bike.fitboxcox <- lm(bike$user1 ~ season + yr + mnth + hr + holiday + weekday +
workingday + weathersit + temp + atemp + hum + windspeed + type, data = bike)
library(MASS)
boxcox(bike.fitboxcox)
# ---- 4.new model ----
#remove user = 0
bike.subset <- subset(bike, user > 0)
bike.fit2 <-lm(log(user) ~ season + yr + mnth + hr + holiday + weekday +
workingday + weathersit + temp + atemp + hum + windspeed + type, data = bike.subset)
summary(bike.fit2)
#qqplot and residual plot
par(mfrow=c(1,2))
qqnorm(rstandard(bike.fit2))
abline(0,1)
plot(fitted.values(bike.fit2), rstandard(bike.fit2), main = "Redisual plot")
abline(0,0)
# ---- 5.Variable selection ----
#divide variable hr into 5 category
#level 1 [0-5], level 2 [6-9], level 3 [10-15], level 4 [16-20], level 5 [21-23]
levels(bike.subset$hr) <- c(1,1,1,1,1,1,2,2,2,2,3,3,3,3,3,3,4,4,4,4,4,5,5,5)
levels(bike.subset$hr)
##forward selection
library(leaps)
attach(bike.subset)
forward <- regsubsets(x=cbind(bike.subset$season,bike.subset$yr,bike.subset$mnth,bike.subset$hr,bike.subset$holiday,bike.subset$weekday,bike.subset$workingday,
bike.subset$weathersit,bike.subset$temp,bike.subset$atemp,bike.subset$hum,bike.subset$windspeed,bike.subset$type), y=log(bike.subset$user),
method="forward",nbest=1)
summary(forward)
Cp <- summary(forward)$cp
AdjR2 <- summary(forward)$adjr2
SSRes <- summary(forward)$rss
R2 <- summary(forward)$rsq
Matrix <- summary(forward)$which
p <- apply(Matrix,1, sum)
MSE <- SSRes/(33153-p)
output1 <- cbind(p, Matrix, R2, AdjR2, SSRes, Cp)
colnames(output1)[3:15] <- c("season","yr","mnth","hr","holiday","weekday","workingday","weathersit","temp","atemp","hum","windspeed","type")
output1
###forward variable selection order: 1.type;2.hr;3.temp;4.hum;5.yr;6.season;7.workingday;8.atemp
##backward selection
library(leaps)
attach(bike.subset)
backward <- regsubsets(x=cbind(bike.subset$season,bike.subset$yr,bike.subset$mnth,bike.subset$hr,bike.subset$holiday,bike.subset$weekday,bike.subset$workingday,
bike.subset$weathersit,bike.subset$temp,bike.subset$atemp,bike.subset$hum,bike.subset$windspeed,bike.subset$type), y=log(bike.subset$user),
method="backward", nbest = 1)
summary(backward)
Cp <- summary(backward)$cp
AdjR2 <- summary(backward)$adjr2
SSRes <- summary(backward)$rss
R2 <- summary(backward)$rsq
Matrix <- summary(backward)$which
p <- apply(Matrix,1, sum)
MSE <- SSRes/(33153-p)
output2 <- cbind(p, Matrix, R2, AdjR2, SSRes, Cp)
colnames(output2)[3:15] <- c("season","yr","mnth","hr","holiday","weekday","workingday","weathersit","temp","atemp","hum","windspeed","type")
output2
###backward variable drop order: season+yr+hr+workingday+atemp+hum+windspeed+type
####drop order:1.windspeed;2.workingday;3.season;4.yr;5.hum;6.atemp;7.hr
##exhaustive selection
library(leaps)
attach(bike.subset)
exhaustive <- regsubsets(x=cbind(bike.subset$season,bike.subset$yr,bike.subset$mnth,bike.subset$hr,bike.subset$holiday,bike.subset$weekday,bike.subset$workingday,
bike.subset$weathersit,bike.subset$temp,bike.subset$atemp,bike.subset$hum,bike.subset$windspeed,bike.subset$type), y=log(bike.subset$user),
method="exhaustive", nbest = 3)
summary(exhaustive)
Cp <- summary(exhaustive)$cp
AdjR2 <- summary(exhaustive)$adjr2
SSRes <- summary(exhaustive)$rss
R2 <- summary(exhaustive)$rsq
Matrix <- summary(exhaustive)$which
p <- apply(Matrix,1, sum)
MSE <- SSRes/(33153-p)
output3 <- cbind(p, Matrix, R2, AdjR2, SSRes, Cp)
colnames(output3)[3:15] <- c("season","yr","mnth","hr","holiday","weekday","workingday","weathersit","temp","atemp","hum","windspeed","type")
output3
###exhaustive selection order: 1.type 2.type+hr3.type+hr+temp;4.type+hr+atemp+hum;
####5.type+hr+atemp+hum+yr;6.type+hr+atemp+hum+yr+season;7.type+hr+atemp+hum+yr+season+workingday;8.type+hr+atemp+hum+yr+season+workingday+windspeed
######conclusion:1.intersting finding:In our prediction, the temp should be an important factor affecting the number of bike user,
#################but through the variable selection, we found that the temp has not a significant influence the bike user,
#################while, atemp has more signifcant influence than temp.
#################2.some variables(eg.weathersit)should be add in the model, we suspect the reason why these variables are not selected
##################may be due to the relationship with response, so, next, we need to draw some scatterplot respectively to analyze.
#################3. The criteria of R2, AdjR2, SSres, Cp not good, It maybe because of the form of variables. We need do the further analyze
#4.Variable selection---"By-hand"
#named varibales
y <- log(bike.subset$user)
x1 <- bike.subset$season
x2 <- bike.subset$yr
x3 <- bike.subset$mnth
x4 <- bike.subset$hr
x5 <- bike.subset$holiday
x6 <- bike.subset$weekday
x7 <- bike.subset$workingday
x8 <- bike.subset$weathersit
x9 <- bike.subset$temp
x10 <- bike.subset$atemp
x11 <- bike.subset$hum
x12 <- bike.subset$windspeed
x13 <- bike.subset$type
##Forward variable selection
fit.0 <- lm( y ~ 1, data = bike.subset)
add1(fit.0, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.1 <- lm(y ~ x13, data = bike.subset)
add1(fit.1, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.2 <- lm(y ~ x13 + x4, data = bike.subset)
add1(fit.2, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.3 <- lm(y ~ x13 + x4 + x10, data = bike.subset)
add1(fit.3, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.4 <- lm(y ~ x13 + x4 + x10 + x2, data = bike.subset)
add1(fit.4, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.5 <- lm(y ~ x13 + x4 + x10 + x2 + x11, data = bike.subset)
add1(fit.5, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.6 <- lm(y ~ x13 + x4 + x10 + x2 + x11 + x7, data = bike.subset)
add1(fit.6, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.7 <- lm(y ~ x13 + x4 + x10 + x2 + x11 + x7 + x1, data = bike.subset)
add1(fit.7, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.8 <- lm(y ~ x13 + x4 + x10 + x2 + x11 + x7 + x1 + x8, data = bike.subset)
add1(fit.8, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.9 <- lm(y ~ x13 + x4 + x10 + x2 + x11 + x7 + x1 + x8 + x5, data = bike.subset)
add1(fit.9, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.10 <- lm(y ~ x13 + x4 + x10 + x2 + x11 + x7 + x1 + x8 + x5 + x9, data = bike.subset)
add1(fit.10, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.11 <- lm(y ~ x13 + x4 + x10 + x2 + x11 + x7 + x1 + x8 + x5 + x9 + x6, data = bike.subset)
add1(fit.11, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.12 <- lm(y ~ x13 + x4 + x10 + x2 + x11 + x7 + x1 + x8 + x5 + x9 + x6 + x3 , data = bike.subset)
add1(fit.12, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
##add order:1.type;2.hr;3.atemp;4.yr;5.hum;6.workingday;7.season;8.weathersit;9.holiday;10.temp;11.weekday;12.mnth;13.windspeed
##Backward variable selection
fit.13 <- lm(y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, data = bike.subset)
drop1(fit.13,y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
#x5 and x7's F value is 0
fit.12 <- lm(y ~ x1 + x2 + x3 + x4 + x6 + x8 + x9 + x10 + x11 + x12 + x13, data = bike.subset)
drop1(fit.12,y ~ x1 + x2 + x3 + x4 + x6 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.11 <- lm(y ~ x1 + x2 + x3 + x4 + x6 + x8 + x9 + x10 + x11 + x13, data = bike.subset)
drop1(fit.11,y ~ x1 + x2 + x3 + x4 + x6 + x8 + x9 + x10 + x11 + x13, test = "F")
fit.10 <- lm(y ~ x1 + x2 + x4 + x6 + x8 + x9 + x10 + x11 + x13, data = bike.subset)
drop1(fit.10,y ~ x1 + x2 + x4 + x6 + x8 + x9 + x10 + x11 + x13, test = "F")
fit.9 <- lm(y ~ x1 + x2 + x4 + x6 + x8 + x10 + x11 + x13, data = bike.subset)
drop1(fit.9,y ~ x1 + x2 + x4 + x6 + x8 + x10 + x11 + x13, test = "F")
#Drop out order:holiday, workingday, windspeed, mnth, temp
##Stepwise variable selection
fit.0 <- lm(y ~ 1, data = bike.subset)
add1(fit.0, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.1 <- lm(y ~ x13, data = bike.subset)
drop1(fit.1, y ~ x13, test = "F")
add1(fit.1, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.2 <- lm(y ~ x13 + x4, data = bike.subset)
drop1(fit.2, y ~ x13 + x4, test = "F")
add1(fit.2, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.3 <- lm(y ~ x13 + x4 + x10, data = bike.subset)
drop1(fit.3, y ~ x13 + x4 + x10, test = "F")
add1(fit.3, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.4 <- lm(y ~ x13 + x4 + x10 + x2, data = bike.subset)
drop1(fit.4, y ~ x13 + x4 + x10 + x2, test = "F")
add1(fit.4, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.5 <- lm(y ~ x13 + x4 + x10 + x11, data = bike.subset)
drop1(fit.5, y ~ x13 + x4 + x10 + x11, test = "F")
add1(fit.5, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.6 <- lm(y ~ x13 + x4 + x10 + x11 + x2, data = bike.subset)
drop1(fit.6, y ~ x13 + x4 + x10 + x11 + x2, test = "F")
add1(fit.6, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.7 <- lm(y ~ x13 + x4 + x10 + x11 + x2 + x7, data = bike.subset)
drop1(fit.7, y ~ x13 + x4 + x10 + x11 + x2 + x7, test = "F")
add1(fit.7, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.8 <- lm(y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1, data = bike.subset)
drop1(fit.8, y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1, test = "F")
add1(fit.8, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.9 <- lm(y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1 + x8, data = bike.subset)
drop1(fit.9, y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1 + x8, test = "F")
add1(fit.9, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.10 <- lm(y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1 + x8 + x5, data = bike.subset)
drop1(fit.10, y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1 + x8 + x5, test = "F")
add1(fit.10, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.11 <- lm(y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1 + x8 + x5 + x9, data = bike.subset)
drop1(fit.11, y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1 + x8 + x5 + x9, test = "F")
add1(fit.11, y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + x12 + x13, test = "F")
fit.12 <- lm(y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1 + x8 + x5 + x9 + x6, data = bike.subset)
drop1(fit.12, y ~ x13 + x4 + x10 + x11 + x2 + x7 + x1 + x8 + x5 + x9 + x6, test = "F")
##conclusion:no drop variable
###1.type;2.hr;3.atemp;4.hum;5.yr;6.workingday;7.season;8.weathersit;9.holiday;10.temp;11.weekday
# ---- 6. Fit model and assumption checking ----
bike.fit3 <- lm(log(user) ~ season + yr + hr + workingday + weathersit + atemp + hum + type, data = bike.subset)
summary(bike.fit3)
#checking
par(mfrow=c(1,2))
qqnorm(rstandard(bike.fit3))
abline(0,1)
plot(fitted.values(bike.fit3), rstandard(bike.fit3), main = "Redisual plot")
abline(0,0)
#testing
anova(bike.fit2,bike.fit3)
#6.add interaction--final model
#according to ggplot, we divide hr into two level(add ggplot code)
bike.fit4 <- lm(log(user) ~ season + yr + workingday*type*hr + weathersit + atemp + hum, data = bike.subset)
summary(bike.fit4)
# ---- 7. model validation ----
set.seed(5)
obs <- c(1:33153)
sample.est <- sort(sample(obs,32000))
sample.val <- (1:33153)[-sample.est]
bike.est <- bike.subset[sample.est,]
bike.val <- bike.subset[sample.val,]
fit.est <- lm(log(user) ~ season + yr + workingday*type*hr + weathersit + atemp + hum, data = bike.subset) #################### #data = bike.est #######################
summary(fit.est)
coefficients(fit.est)
y_hat <- predict(fit.est, bike.val[,c(3,4,6,9,10,12,13,16)])
pred_error <- log(bike.val[,15]) - y_hat
sum(pred_error^2)/1153 #MSP= 0.4962858 MSE=0.5
anova(fit.est)
##repeat 100 times
sample.est <- sort(sample(obs,32153))
sample.val <- (1:33153)[-sample.est]
bike.est <- bike.subset[sample.est,]
bike.val <- bike.subset[sample.val,]
fit.est <- lm(log(user) ~ season + yr + workingday*type*hr + weathersit + atemp + hum, data = bike.subset)
coefficients(fit.est)
y_hat <- predict(fit.est, bike.val[,c(3,4,6,9,10,12,13,16)])
pred_error <- log(bike.val[,15]) - y_hat
sum(pred_error^2)/1000
beta0 <- numeric()
beta1 <- numeric()
beta2 <- numeric()
beta3 <- numeric()
beta4 <- numeric()
beta5 <- numeric()
beta6 <- numeric()
beta7 <- numeric()
beta8 <- numeric()
beta9 <- numeric()
beta10 <- numeric()
beta11 <- numeric()
beta12 <- numeric()
beta13 <- numeric()
beta14 <- numeric()
beta15<- numeric()
beta16<- numeric()
beta17<- numeric()
beta18<- numeric()
beta19<- numeric()
beta20<- numeric()
beta21<- numeric()
beta22<- numeric()
beta23<- numeric()
beta24<- numeric()
beta25<- numeric()
beta26<- numeric()
beta27<- numeric()
beta28<- numeric()
MSP <- numeric()
for (i in 1:100){
sample.est <- sort(sample(obs,32153))
sample.val <- (1:33153)[-sample.est]
bike.est <- bike.subset[sample.est,]
bike.val <- bike.subset[sample.val,]
fit.est <- lm(log(user) ~ season + yr + workingday*type*hr + weathersit + atemp + hum, data = bike.subset)
coefficients(fit.est)
y_hat <- predict(fit.est, bike.val[,c(3,4,6,9,10,12,13,16)])
pred_error <- log(bike.val[,15]) - y_hat
beta0[i] <- coef(fit.est)[1]
beta1[i] <- coef(fit.est)[2]
beta2[i] <- coef(fit.est)[3]
beta3[i] <- coef(fit.est)[4]
beta4[i] <- coef(fit.est)[5]
beta5[i] <- coef(fit.est)[6]
beta6[i] <- coef(fit.est)[7]
beta7[i] <- coef(fit.est)[8]
beta8[i] <- coef(fit.est)[9]
beta9[i] <- coef(fit.est)[10]
beta10[i] <- coef(fit.est)[11]
beta11[i] <- coef(fit.est)[12]
beta12[i] <- coef(fit.est)[13]
beta13[i] <- coef(fit.est)[14]
beta14[i] <- coef(fit.est)[15]
beta15[i] <- coef(fit.est)[16]
beta16[i] <- coef(fit.est)[17]
beta17[i] <- coef(fit.est)[18]
beta18[i] <- coef(fit.est)[19]
beta19[i] <- coef(fit.est)[20]
beta20[i] <- coef(fit.est)[21]
beta21[i] <- coef(fit.est)[22]
beta22[i] <- coef(fit.est)[23]
beta23[i] <- coef(fit.est)[24]
beta24[i] <- coef(fit.est)[25]
beta25[i] <- coef(fit.est)[26]
beta26[i] <- coef(fit.est)[27]
beta27[i] <- coef(fit.est)[28]
beta28[i] <- coef(fit.est)[29]
MSP[i] <- sum(pred_error^2)/1000
}
summary(MSP) # Min. 1st Qu. Median Mean 3rd Qu. Max.
##############0.4610 0.5082 0.5328 0.5337 0.5552 0.6285 |
#' @rdname ml-tuning
#' @param train_ratio Ratio between train and validation data. Must be between 0 and 1. Default: 0.75
#' @export
ml_train_validation_split <- function(x, estimator = NULL, estimator_param_maps = NULL,
evaluator = NULL, train_ratio = 0.75,
collect_sub_models = FALSE, parallelism = 1,
seed = NULL, uid = random_string("train_validation_split_"),
...) {
UseMethod("ml_train_validation_split")
}
#' @export
ml_train_validation_split.spark_connection <- function(x, estimator = NULL, estimator_param_maps = NULL,
evaluator = NULL, train_ratio = 0.75,
collect_sub_models = FALSE, parallelism = 1,
seed = NULL, uid = random_string("train_validation_split_"),
...) {
.args <- list(
estimator = estimator,
estimator_param_maps = estimator_param_maps,
evaluator = evaluator,
train_ratio = train_ratio,
collect_sub_models = collect_sub_models,
parallelism = parallelism,
seed = seed
) %>%
ml_validator_train_validation_split()
ml_new_validator(
x, "org.apache.spark.ml.tuning.TrainValidationSplit", uid,
.args[["estimator"]], .args[["evaluator"]], .args[["estimator_param_maps"]], .args[["seed"]]
) %>%
invoke("setTrainRatio", .args[["train_ratio"]]) %>%
maybe_set_param("setCollectSubModels", .args[["collect_sub_models"]], "2.3.0", FALSE) %>%
maybe_set_param("setParallelism", .args[["parallelism"]], "2.3.0", 1) %>%
new_ml_train_validation_split()
}
#' @export
ml_train_validation_split.ml_pipeline <- function(x, estimator = NULL, estimator_param_maps = NULL,
evaluator = NULL, train_ratio = 0.75,
collect_sub_models = FALSE, parallelism = 1,
seed = NULL, uid = random_string("train_validation_split_"),
...) {
stage <- ml_train_validation_split.spark_connection(
x = spark_connection(x),
estimator = estimator,
estimator_param_maps = estimator_param_maps,
evaluator = evaluator,
train_ratio = train_ratio,
collect_sub_models = collect_sub_models,
parallelism = parallelism,
seed = seed,
uid = uid,
...
)
ml_add_stage(x, stage)
}
#' @export
ml_train_validation_split.tbl_spark <- function(x, estimator = NULL, estimator_param_maps = NULL,
evaluator = NULL, train_ratio = 0.75,
collect_sub_models = FALSE, parallelism = 1,
seed = NULL, uid = random_string("train_validation_split_"),
...) {
stage <- ml_train_validation_split.spark_connection(
x = spark_connection(x),
estimator = estimator,
estimator_param_maps = estimator_param_maps,
evaluator = evaluator,
train_ratio = train_ratio,
collect_sub_models = collect_sub_models,
parallelism = parallelism,
seed = seed,
uid = uid,
...
)
stage %>%
ml_fit(x)
}
ml_validator_train_validation_split <- function(.args) {
.args <- validate_args_tuning(.args)
.args[["train_ratio"]] <- cast_scalar_double(.args[["train_ratio"]])
.args
}
new_ml_train_validation_split <- function(jobj) {
new_ml_tuning(
jobj,
train_ratio = invoke(jobj, "getTrainRatio"),
subclass = "ml_train_validation_split"
)
}
new_ml_train_validation_split_model <- function(jobj) {
validation_metrics <- invoke(jobj, "validationMetrics")
metric_name <- jobj %>%
invoke("getEvaluator") %>%
invoke("getMetricName") %>%
rlang::sym()
new_ml_tuning_model(
jobj,
train_ratio = invoke(jobj, "getTrainRatio"),
metric_name = metric_name,
validation_metrics = validation_metrics,
validation_metrics_df = ml_get_estimator_param_maps(jobj) %>%
param_maps_to_df() %>%
dplyr::mutate(!!metric_name := validation_metrics) %>%
dplyr::select(!!metric_name, dplyr::everything()),
sub_models = function() {
try_null(jobj %>%
invoke("subModels") %>%
purrr::map(ml_constructor_dispatch)
)
},
subclass = "ml_train_validation_split_model")
}
#' @export
print.ml_train_validation_split <- function(x, ...) {
print_tuning_info(x, "tvs")
}
#' @export
print.ml_train_validation_split_model <- function(x, ...) {
print_tuning_info(x, "tvs")
print_best_model(x)
}
#' @export
summary.ml_train_validation_split_model <- function(object, ...) {
print_tuning_summary(object, "tvs")
}
| /R/ml_tuning_train_validation_split.R | permissive | awblocker/sparklyr | R | false | false | 4,955 | r | #' @rdname ml-tuning
#' @param train_ratio Ratio between train and validation data. Must be between 0 and 1. Default: 0.75
#' @export
ml_train_validation_split <- function(x, estimator = NULL, estimator_param_maps = NULL,
evaluator = NULL, train_ratio = 0.75,
collect_sub_models = FALSE, parallelism = 1,
seed = NULL, uid = random_string("train_validation_split_"),
...) {
UseMethod("ml_train_validation_split")
}
#' @export
ml_train_validation_split.spark_connection <- function(x, estimator = NULL, estimator_param_maps = NULL,
evaluator = NULL, train_ratio = 0.75,
collect_sub_models = FALSE, parallelism = 1,
seed = NULL, uid = random_string("train_validation_split_"),
...) {
.args <- list(
estimator = estimator,
estimator_param_maps = estimator_param_maps,
evaluator = evaluator,
train_ratio = train_ratio,
collect_sub_models = collect_sub_models,
parallelism = parallelism,
seed = seed
) %>%
ml_validator_train_validation_split()
ml_new_validator(
x, "org.apache.spark.ml.tuning.TrainValidationSplit", uid,
.args[["estimator"]], .args[["evaluator"]], .args[["estimator_param_maps"]], .args[["seed"]]
) %>%
invoke("setTrainRatio", .args[["train_ratio"]]) %>%
maybe_set_param("setCollectSubModels", .args[["collect_sub_models"]], "2.3.0", FALSE) %>%
maybe_set_param("setParallelism", .args[["parallelism"]], "2.3.0", 1) %>%
new_ml_train_validation_split()
}
#' @export
ml_train_validation_split.ml_pipeline <- function(x, estimator = NULL, estimator_param_maps = NULL,
evaluator = NULL, train_ratio = 0.75,
collect_sub_models = FALSE, parallelism = 1,
seed = NULL, uid = random_string("train_validation_split_"),
...) {
stage <- ml_train_validation_split.spark_connection(
x = spark_connection(x),
estimator = estimator,
estimator_param_maps = estimator_param_maps,
evaluator = evaluator,
train_ratio = train_ratio,
collect_sub_models = collect_sub_models,
parallelism = parallelism,
seed = seed,
uid = uid,
...
)
ml_add_stage(x, stage)
}
#' @export
ml_train_validation_split.tbl_spark <- function(x, estimator = NULL, estimator_param_maps = NULL,
evaluator = NULL, train_ratio = 0.75,
collect_sub_models = FALSE, parallelism = 1,
seed = NULL, uid = random_string("train_validation_split_"),
...) {
stage <- ml_train_validation_split.spark_connection(
x = spark_connection(x),
estimator = estimator,
estimator_param_maps = estimator_param_maps,
evaluator = evaluator,
train_ratio = train_ratio,
collect_sub_models = collect_sub_models,
parallelism = parallelism,
seed = seed,
uid = uid,
...
)
stage %>%
ml_fit(x)
}
ml_validator_train_validation_split <- function(.args) {
.args <- validate_args_tuning(.args)
.args[["train_ratio"]] <- cast_scalar_double(.args[["train_ratio"]])
.args
}
new_ml_train_validation_split <- function(jobj) {
new_ml_tuning(
jobj,
train_ratio = invoke(jobj, "getTrainRatio"),
subclass = "ml_train_validation_split"
)
}
new_ml_train_validation_split_model <- function(jobj) {
validation_metrics <- invoke(jobj, "validationMetrics")
metric_name <- jobj %>%
invoke("getEvaluator") %>%
invoke("getMetricName") %>%
rlang::sym()
new_ml_tuning_model(
jobj,
train_ratio = invoke(jobj, "getTrainRatio"),
metric_name = metric_name,
validation_metrics = validation_metrics,
validation_metrics_df = ml_get_estimator_param_maps(jobj) %>%
param_maps_to_df() %>%
dplyr::mutate(!!metric_name := validation_metrics) %>%
dplyr::select(!!metric_name, dplyr::everything()),
sub_models = function() {
try_null(jobj %>%
invoke("subModels") %>%
purrr::map(ml_constructor_dispatch)
)
},
subclass = "ml_train_validation_split_model")
}
#' @export
print.ml_train_validation_split <- function(x, ...) {
print_tuning_info(x, "tvs")
}
#' @export
print.ml_train_validation_split_model <- function(x, ...) {
print_tuning_info(x, "tvs")
print_best_model(x)
}
#' @export
summary.ml_train_validation_split_model <- function(object, ...) {
print_tuning_summary(object, "tvs")
}
|
library(RSNNS)
# Funciones
graficaError <- function(iterativeErrors){
plot(1:nrow(iterativeErrors),iterativeErrors[,1], type="l", main="Evoluci???n del error",
ylab="MSE (3 salidas)",xlab="Ciclos",
ylim=c(min(iterativeErrors),max(iterativeErrors)))
lines(1:nrow(iterativeErrors),iterativeErrors[,2], col="red")
}
accuracy <- function (cm) sum(diag(cm))/sum(cm)
set.seed(2)
#SELECCION DE LOS HIPERPARAMETROS DE LA RED
topologia <- c(20,30)
razonAprendizaje <- 0.02
ciclosMaximos <- 2500
#CARGA DE LOS DATOS
# cambiar a fold 2 y 3
fold <- 1
trainSet <- read.csv(paste("DatosProcesados/Modelo_",fold,"/datosNubes_entrenamiento.txt",sep=""),dec=".",sep=",",header = T)
testSet <- read.csv(paste("DatosProcesados/Modelo_",fold,"/datosNubes_test.txt",sep=""),dec=".",sep=",",header = T)
#SELECCION DE LA SALIDA. Num de columna del target.
nTarget <- ncol(trainSet)
#SEPARAR ENTRADA DE LA SALIDA
trainInput <- trainSet[,-nTarget]
testInput <- testSet[,-nTarget]
#TRANSFORMAR LA SALIDA DISCRETA A NUMERICA (Matriz con columnas, una por etiqueta, hay un 1 por cada fila en la columna que pertenece a la clase)
trainTarget <- decodeClassLabels(trainSet[,nTarget])
testTarget <- decodeClassLabels(testSet[,nTarget])
# transformar las entradas de dataframe en matrix para mlp:
trainInput <- as.matrix(trainInput)
testInput <- as.matrix(testInput )
## generar un nombre de fichero que incluya los hiperpar???metros
fileID <- paste("fold_",fold,"_topol",paste(topologia,collapse="-"),"_ra",
razonAprendizaje,"_iter",ciclosMaximos,sep="")
#EJECUCION DEL APRENDIZAJE Y GENERACION DEL MODELO
model <- mlp(x= trainInput,
y= trainTarget,
inputsTest= testInput,
targetsTest= testTarget,
size= topologia,
maxit=ciclosMaximos,
learnFuncParams=c(razonAprendizaje),
shufflePatterns = F
)
#GRAFICO DE LA EVOLUCION DEL ERROR
#
#plotIterativeError(model)
#TABLA CON LOS ERRORES POR CICLO de train y test correspondientes a las 4 salidas
iterativeErrors <- data.frame(MSETrain= (model$IterativeFitError/nrow(trainSet)),
MSETest= (model$IterativeTestError/nrow(testSet)))
graficaError(iterativeErrors)
#GENERAR LAS PREDICCIONES en bruto (valores reales)
trainPred <- predict(model,trainInput)
testPred <- predict(model,testInput)
#poner nombres de columnas "cieloDespejado" "multinube" "nube"
colnames(testPred)<-colnames(testTarget)
colnames(trainPred)<-colnames(testTarget)
# transforma las tres columnas reales en la clase 1,2,3 segun el maximo de los tres valores.
trainPredClass<-as.factor(apply(trainPred,1,which.max))
testPredClass<-as.factor(apply(testPred,1,which.max))
#transforma las etiquetas "1", "2", "3" en "cieloDespejado" "multinube" "nube"
levels(testPredClass)<-c("cieloDespejado", "multinube","nube")
levels(trainPredClass)<-c("cieloDespejado", "multinube","nube")
#CALCULO DE LAS MATRICES DE CONFUSION
trainCm <- confusionMatrix(trainTarget,trainPred)
testCm <- confusionMatrix(testTarget, testPred)
trainCm
testCm
#VECTOR DE PRECISIONES
accuracies <- c(TrainAccuracy= accuracy(trainCm), TestAccuracy= accuracy(testCm))
accuracies
# calcular errores finales MSE
MSEtrain <-sum((trainTarget - trainPred)^2)/nrow(trainSet)
MSEtest <-sum((testTarget - testPred)^2)/nrow(testSet)
MSEtrain
MSEtest
#GUARDANDO RESULTADOS
#MODELO
saveRDS(model, paste("nnet_",fileID,".rds",sep=""))
#tasa de aciertos (accuracy)
write.csv(accuracies, paste("finalAccuracies_",fileID,".csv",sep=""))
#Evoluci???n de los errores MSE
write.csv(iterativeErrors,paste("iterativeErrors_",fileID,".csv",sep=""))
#salidas esperadas de test con la clase (Target) (???ltima columna del fichero de test)
write.csv( testSet[,nTarget] , paste("TestTarget_",fileID,".csv",sep=""), row.names = TRUE)
#salidas esperadas de test codificadas en tres columnas (Target)
write.csv(testTarget , paste("TestTargetCod_",fileID,".csv",sep=""), row.names = TRUE)
#salidas de test en bruto (nums reales)
write.csv(testPred , paste("TestRawOutputs_",fileID,".csv",sep=""), row.names = TRUE)
#salidas de test con la clase
write.csv(testPredClass, paste("TestClassOutputs_",fileID,".csv",sep=""),row.names = TRUE)
# matrices de confusi???n
write.csv(trainCm, paste("trainCm_",fileID,".csv",sep=""))
write.csv(testCm, paste("testCm_",fileID,".csv",sep=""))
| /Perceptrón Multicapa/Enunciado (no incluir)/PM_Clas20-21.R | no_license | clau-hb9/RNA_P2 | R | false | false | 4,476 | r | library(RSNNS)
# Funciones
graficaError <- function(iterativeErrors){
plot(1:nrow(iterativeErrors),iterativeErrors[,1], type="l", main="Evoluci???n del error",
ylab="MSE (3 salidas)",xlab="Ciclos",
ylim=c(min(iterativeErrors),max(iterativeErrors)))
lines(1:nrow(iterativeErrors),iterativeErrors[,2], col="red")
}
accuracy <- function (cm) sum(diag(cm))/sum(cm)
set.seed(2)
#SELECCION DE LOS HIPERPARAMETROS DE LA RED
topologia <- c(20,30)
razonAprendizaje <- 0.02
ciclosMaximos <- 2500
#CARGA DE LOS DATOS
# cambiar a fold 2 y 3
fold <- 1
trainSet <- read.csv(paste("DatosProcesados/Modelo_",fold,"/datosNubes_entrenamiento.txt",sep=""),dec=".",sep=",",header = T)
testSet <- read.csv(paste("DatosProcesados/Modelo_",fold,"/datosNubes_test.txt",sep=""),dec=".",sep=",",header = T)
#SELECCION DE LA SALIDA. Num de columna del target.
nTarget <- ncol(trainSet)
#SEPARAR ENTRADA DE LA SALIDA
trainInput <- trainSet[,-nTarget]
testInput <- testSet[,-nTarget]
#TRANSFORMAR LA SALIDA DISCRETA A NUMERICA (Matriz con columnas, una por etiqueta, hay un 1 por cada fila en la columna que pertenece a la clase)
trainTarget <- decodeClassLabels(trainSet[,nTarget])
testTarget <- decodeClassLabels(testSet[,nTarget])
# transformar las entradas de dataframe en matrix para mlp:
trainInput <- as.matrix(trainInput)
testInput <- as.matrix(testInput )
## generar un nombre de fichero que incluya los hiperpar???metros
fileID <- paste("fold_",fold,"_topol",paste(topologia,collapse="-"),"_ra",
razonAprendizaje,"_iter",ciclosMaximos,sep="")
#EJECUCION DEL APRENDIZAJE Y GENERACION DEL MODELO
model <- mlp(x= trainInput,
y= trainTarget,
inputsTest= testInput,
targetsTest= testTarget,
size= topologia,
maxit=ciclosMaximos,
learnFuncParams=c(razonAprendizaje),
shufflePatterns = F
)
#GRAFICO DE LA EVOLUCION DEL ERROR
#
#plotIterativeError(model)
#TABLA CON LOS ERRORES POR CICLO de train y test correspondientes a las 4 salidas
iterativeErrors <- data.frame(MSETrain= (model$IterativeFitError/nrow(trainSet)),
MSETest= (model$IterativeTestError/nrow(testSet)))
graficaError(iterativeErrors)
#GENERAR LAS PREDICCIONES en bruto (valores reales)
trainPred <- predict(model,trainInput)
testPred <- predict(model,testInput)
#poner nombres de columnas "cieloDespejado" "multinube" "nube"
colnames(testPred)<-colnames(testTarget)
colnames(trainPred)<-colnames(testTarget)
# transforma las tres columnas reales en la clase 1,2,3 segun el maximo de los tres valores.
trainPredClass<-as.factor(apply(trainPred,1,which.max))
testPredClass<-as.factor(apply(testPred,1,which.max))
#transforma las etiquetas "1", "2", "3" en "cieloDespejado" "multinube" "nube"
levels(testPredClass)<-c("cieloDespejado", "multinube","nube")
levels(trainPredClass)<-c("cieloDespejado", "multinube","nube")
#CALCULO DE LAS MATRICES DE CONFUSION
trainCm <- confusionMatrix(trainTarget,trainPred)
testCm <- confusionMatrix(testTarget, testPred)
trainCm
testCm
#VECTOR DE PRECISIONES
accuracies <- c(TrainAccuracy= accuracy(trainCm), TestAccuracy= accuracy(testCm))
accuracies
# calcular errores finales MSE
MSEtrain <-sum((trainTarget - trainPred)^2)/nrow(trainSet)
MSEtest <-sum((testTarget - testPred)^2)/nrow(testSet)
MSEtrain
MSEtest
#GUARDANDO RESULTADOS
#MODELO
saveRDS(model, paste("nnet_",fileID,".rds",sep=""))
#tasa de aciertos (accuracy)
write.csv(accuracies, paste("finalAccuracies_",fileID,".csv",sep=""))
#Evoluci???n de los errores MSE
write.csv(iterativeErrors,paste("iterativeErrors_",fileID,".csv",sep=""))
#salidas esperadas de test con la clase (Target) (???ltima columna del fichero de test)
write.csv( testSet[,nTarget] , paste("TestTarget_",fileID,".csv",sep=""), row.names = TRUE)
#salidas esperadas de test codificadas en tres columnas (Target)
write.csv(testTarget , paste("TestTargetCod_",fileID,".csv",sep=""), row.names = TRUE)
#salidas de test en bruto (nums reales)
write.csv(testPred , paste("TestRawOutputs_",fileID,".csv",sep=""), row.names = TRUE)
#salidas de test con la clase
write.csv(testPredClass, paste("TestClassOutputs_",fileID,".csv",sep=""),row.names = TRUE)
# matrices de confusi???n
write.csv(trainCm, paste("trainCm_",fileID,".csv",sep=""))
write.csv(testCm, paste("testCm_",fileID,".csv",sep=""))
|
library(urca)
data("Raotbl3")
attach(Raotbl3)
lc<-ts(lc, start=c(1966,4), end=c(1991,2), frequency = 4)
pp<-ur.pp(lc, type="Z-tau", model = "trend",
lags = "long")
pp@teststat
summary(pp)
df<-ur.df(y=lc, type="trend", selectlags = "AIC")
summary(df)
| /Material/Raotbl3_pptest.r | no_license | susanasierrac/Economia_Experimental | R | false | false | 261 | r | library(urca)
data("Raotbl3")
attach(Raotbl3)
lc<-ts(lc, start=c(1966,4), end=c(1991,2), frequency = 4)
pp<-ur.pp(lc, type="Z-tau", model = "trend",
lags = "long")
pp@teststat
summary(pp)
df<-ur.df(y=lc, type="trend", selectlags = "AIC")
summary(df)
|
##Function to perform two sided permutation test to test if the mean of variable y is different
##between groups g1 and g2
## y: vector containing samples for the variable
## group: vector containing group values for y
## g1: value of the first group
## g2: value of the second group
permTest <- function(y, group, g1, g2, n = 10000) {
# test validity of parameters
if(length(y) != length(group)) {
message("ERROR: y and group have different lengths")
return()
}
if(!any(group == g1)) {
message("no samples in g1")
return()
}
if(!any(group == g2)) {
message("no samples in g2")
return()
}
#subset only the groups being tested
gsub <- group[group %in% c(g1,g2)]
ysub <- y[group %in% c(g1,g2)]
# test statistic is the difference of the mean of y in groups g1 and g2
testStat <- function(var,gr) {mean(var[gr==g2]) - mean(var[gr == g1])}
#original permutation
origStat <- testStat(ysub,gsub)
#test statistic for n permutations
permStats <- sapply(1:n, function(i) testStat(ysub, sample(gsub)))
#p value for two sided test is calculated as the percentage of permutations
#having more extreme value than the original regardless of the sign
p <- mean(abs(permStats) > abs(origStat))
# return p-value, test statistics of permutations and the original test statistic
list(p = p, permStats = permStats, origStat = origStat)
} | /permTest.R | no_license | hpykala/statistical-inference-project | R | false | false | 1,508 | r | ##Function to perform two sided permutation test to test if the mean of variable y is different
##between groups g1 and g2
## y: vector containing samples for the variable
## group: vector containing group values for y
## g1: value of the first group
## g2: value of the second group
permTest <- function(y, group, g1, g2, n = 10000) {
# test validity of parameters
if(length(y) != length(group)) {
message("ERROR: y and group have different lengths")
return()
}
if(!any(group == g1)) {
message("no samples in g1")
return()
}
if(!any(group == g2)) {
message("no samples in g2")
return()
}
#subset only the groups being tested
gsub <- group[group %in% c(g1,g2)]
ysub <- y[group %in% c(g1,g2)]
# test statistic is the difference of the mean of y in groups g1 and g2
testStat <- function(var,gr) {mean(var[gr==g2]) - mean(var[gr == g1])}
#original permutation
origStat <- testStat(ysub,gsub)
#test statistic for n permutations
permStats <- sapply(1:n, function(i) testStat(ysub, sample(gsub)))
#p value for two sided test is calculated as the percentage of permutations
#having more extreme value than the original regardless of the sign
p <- mean(abs(permStats) > abs(origStat))
# return p-value, test statistics of permutations and the original test statistic
list(p = p, permStats = permStats, origStat = origStat)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{tfd_vector_laplace_diag}
\alias{tfd_vector_laplace_diag}
\title{The vectorization of the Laplace distribution on \code{R^k}}
\usage{
tfd_vector_laplace_diag(loc = NULL, scale_diag = NULL,
scale_identity_multiplier = NULL, validate_args = FALSE,
allow_nan_stats = TRUE, name = "VectorLaplaceDiag")
}
\arguments{
\item{loc}{Floating-point Tensor. If this is set to NULL, loc is
implicitly 0. When specified, may have shape \code{[B1, ..., Bb, k]} where
b >= 0 and k is the event size.}
\item{scale_diag}{Non-zero, floating-point Tensor representing a diagonal
matrix added to scale. May have shape \code{[B1, ..., Bb, k]}, b >= 0,
and characterizes b-batches of k x k diagonal matrices added to
scale. When both scale_identity_multiplier and scale_diag are
NULL then scale is the Identity.}
\item{scale_identity_multiplier}{Non-zero, floating-point Tensor representing
a scaled-identity-matrix added to scale. May have shape
\code{[B1, ..., Bb]}, b >= 0, and characterizes b-batches of scaled
k x k identity matrices added to scale. When both
scale_identity_multiplier and scale_diag are NULL then scale is
the Identity.}
\item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
silently render incorrect outputs. Default value: FALSE.}
\item{allow_nan_stats}{Logical, default TRUE. When TRUE, statistics (e.g., mean, mode, variance)
use the value NaN to indicate the result is undefined. When FALSE, an exception is raised if
one or more of the statistic's batch members are undefined.}
\item{name}{name prefixed to Ops created by this class.}
}
\value{
a distribution instance.
}
\description{
The vector laplace distribution is defined over \code{R^k}, and parameterized by
a (batch of) length-k loc vector (the means) and a (batch of) k x k
scale matrix: \code{covariance = 2 * scale @ scale.T}, where @ denotes
matrix-multiplication.
}
\details{
Mathematical Details
The probability density function (pdf) is,\preformatted{pdf(x; loc, scale) = exp(-||y||_1) / Z
y = inv(scale) @ (x - loc)
Z = 2**k |det(scale)|
}
where:
\itemize{
\item \code{loc} is a vector in \code{R^k},
\item \code{scale} is a linear operator in \code{R^{k x k}}, \code{cov = scale @ scale.T},
\item \code{Z} denotes the normalization constant, and,
\item \code{||y||_1} denotes the \code{l1} norm of \code{y}, `sum_i |y_i|.
}
A (non-batch) \code{scale} matrix is:\preformatted{scale = diag(scale_diag + scale_identity_multiplier * ones(k))
}
where:
\itemize{
\item \code{scale_diag.shape = [k]}, and,
\item \code{scale_identity_multiplier.shape = []}.
Additional leading dimensions (if any) will index batches.
If both \code{scale_diag} and \code{scale_identity_multiplier} are \code{NULL}, then
\code{scale} is the Identity matrix.
}
About VectorLaplace and Vector distributions in TensorFlow
The VectorLaplace is a non-standard distribution that has useful properties.
The marginals Y_1, ..., Y_k are \emph{not} Laplace random variables, due to
the fact that the sum of Laplace random variables is not Laplace.
Instead, Y is a vector whose components are linear combinations of Laplace
random variables. Thus, Y lives in the vector space generated by vectors
of Laplace distributions. This allows the user to decide the mean and
covariance (by setting loc and scale), while preserving some properties of
the Laplace distribution. In particular, the tails of Y_i will be (up to
polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of Y_i is the convolution of
the pdf of k independent Laplace random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
}
\seealso{
For usage examples see e.g. \code{\link[=tfd_sample]{tfd_sample()}}, \code{\link[=tfd_log_prob]{tfd_log_prob()}}, \code{\link[=tfd_mean]{tfd_mean()}}.
Other distributions: \code{\link{tfd_autoregressive}},
\code{\link{tfd_batch_reshape}},
\code{\link{tfd_bernoulli}}, \code{\link{tfd_beta}},
\code{\link{tfd_binomial}},
\code{\link{tfd_categorical}}, \code{\link{tfd_cauchy}},
\code{\link{tfd_chi2}}, \code{\link{tfd_chi}},
\code{\link{tfd_cholesky_lkj}},
\code{\link{tfd_deterministic}},
\code{\link{tfd_dirichlet_multinomial}},
\code{\link{tfd_dirichlet}}, \code{\link{tfd_empirical}},
\code{\link{tfd_exponential}},
\code{\link{tfd_gamma_gamma}}, \code{\link{tfd_gamma}},
\code{\link{tfd_gaussian_process_regression_model}},
\code{\link{tfd_gaussian_process}},
\code{\link{tfd_geometric}}, \code{\link{tfd_gumbel}},
\code{\link{tfd_half_cauchy}},
\code{\link{tfd_half_normal}},
\code{\link{tfd_hidden_markov_model}},
\code{\link{tfd_horseshoe}},
\code{\link{tfd_independent}},
\code{\link{tfd_inverse_gamma}},
\code{\link{tfd_inverse_gaussian}},
\code{\link{tfd_joint_distribution_named}},
\code{\link{tfd_joint_distribution_sequential}},
\code{\link{tfd_kumaraswamy}}, \code{\link{tfd_laplace}},
\code{\link{tfd_linear_gaussian_state_space_model}},
\code{\link{tfd_lkj}}, \code{\link{tfd_log_normal}},
\code{\link{tfd_logistic}},
\code{\link{tfd_mixture_same_family}},
\code{\link{tfd_mixture}}, \code{\link{tfd_multinomial}},
\code{\link{tfd_multivariate_normal_diag_plus_low_rank}},
\code{\link{tfd_multivariate_normal_diag}},
\code{\link{tfd_multivariate_normal_full_covariance}},
\code{\link{tfd_multivariate_normal_linear_operator}},
\code{\link{tfd_multivariate_normal_tri_l}},
\code{\link{tfd_multivariate_student_t_linear_operator}},
\code{\link{tfd_negative_binomial}},
\code{\link{tfd_normal}},
\code{\link{tfd_one_hot_categorical}},
\code{\link{tfd_pareto}},
\code{\link{tfd_poisson_log_normal_quadrature_compound}},
\code{\link{tfd_poisson}}, \code{\link{tfd_quantized}},
\code{\link{tfd_relaxed_bernoulli}},
\code{\link{tfd_relaxed_one_hot_categorical}},
\code{\link{tfd_sample_distribution}},
\code{\link{tfd_sinh_arcsinh}},
\code{\link{tfd_student_t_process}},
\code{\link{tfd_student_t}},
\code{\link{tfd_transformed_distribution}},
\code{\link{tfd_triangular}},
\code{\link{tfd_truncated_normal}},
\code{\link{tfd_uniform}},
\code{\link{tfd_variational_gaussian_process}},
\code{\link{tfd_vector_diffeomixture}},
\code{\link{tfd_vector_exponential_diag}},
\code{\link{tfd_vector_exponential_linear_operator}},
\code{\link{tfd_vector_laplace_linear_operator}},
\code{\link{tfd_vector_sinh_arcsinh_diag}},
\code{\link{tfd_von_mises_fisher}},
\code{\link{tfd_von_mises}}, \code{\link{tfd_wishart}},
\code{\link{tfd_zipf}}
}
\concept{distributions}
| /man/tfd_vector_laplace_diag.Rd | permissive | iMarcello/tfprobability | R | false | true | 6,797 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distributions.R
\name{tfd_vector_laplace_diag}
\alias{tfd_vector_laplace_diag}
\title{The vectorization of the Laplace distribution on \code{R^k}}
\usage{
tfd_vector_laplace_diag(loc = NULL, scale_diag = NULL,
scale_identity_multiplier = NULL, validate_args = FALSE,
allow_nan_stats = TRUE, name = "VectorLaplaceDiag")
}
\arguments{
\item{loc}{Floating-point Tensor. If this is set to NULL, loc is
implicitly 0. When specified, may have shape \code{[B1, ..., Bb, k]} where
b >= 0 and k is the event size.}
\item{scale_diag}{Non-zero, floating-point Tensor representing a diagonal
matrix added to scale. May have shape \code{[B1, ..., Bb, k]}, b >= 0,
and characterizes b-batches of k x k diagonal matrices added to
scale. When both scale_identity_multiplier and scale_diag are
NULL then scale is the Identity.}
\item{scale_identity_multiplier}{Non-zero, floating-point Tensor representing
a scaled-identity-matrix added to scale. May have shape
\code{[B1, ..., Bb]}, b >= 0, and characterizes b-batches of scaled
k x k identity matrices added to scale. When both
scale_identity_multiplier and scale_diag are NULL then scale is
the Identity.}
\item{validate_args}{Logical, default FALSE. When TRUE distribution parameters are checked
for validity despite possibly degrading runtime performance. When FALSE invalid inputs may
silently render incorrect outputs. Default value: FALSE.}
\item{allow_nan_stats}{Logical, default TRUE. When TRUE, statistics (e.g., mean, mode, variance)
use the value NaN to indicate the result is undefined. When FALSE, an exception is raised if
one or more of the statistic's batch members are undefined.}
\item{name}{name prefixed to Ops created by this class.}
}
\value{
a distribution instance.
}
\description{
The vector laplace distribution is defined over \code{R^k}, and parameterized by
a (batch of) length-k loc vector (the means) and a (batch of) k x k
scale matrix: \code{covariance = 2 * scale @ scale.T}, where @ denotes
matrix-multiplication.
}
\details{
Mathematical Details
The probability density function (pdf) is,\preformatted{pdf(x; loc, scale) = exp(-||y||_1) / Z
y = inv(scale) @ (x - loc)
Z = 2**k |det(scale)|
}
where:
\itemize{
\item \code{loc} is a vector in \code{R^k},
\item \code{scale} is a linear operator in \code{R^{k x k}}, \code{cov = scale @ scale.T},
\item \code{Z} denotes the normalization constant, and,
\item \code{||y||_1} denotes the \code{l1} norm of \code{y}, `sum_i |y_i|.
}
A (non-batch) \code{scale} matrix is:\preformatted{scale = diag(scale_diag + scale_identity_multiplier * ones(k))
}
where:
\itemize{
\item \code{scale_diag.shape = [k]}, and,
\item \code{scale_identity_multiplier.shape = []}.
Additional leading dimensions (if any) will index batches.
If both \code{scale_diag} and \code{scale_identity_multiplier} are \code{NULL}, then
\code{scale} is the Identity matrix.
}
About VectorLaplace and Vector distributions in TensorFlow
The VectorLaplace is a non-standard distribution that has useful properties.
The marginals Y_1, ..., Y_k are \emph{not} Laplace random variables, due to
the fact that the sum of Laplace random variables is not Laplace.
Instead, Y is a vector whose components are linear combinations of Laplace
random variables. Thus, Y lives in the vector space generated by vectors
of Laplace distributions. This allows the user to decide the mean and
covariance (by setting loc and scale), while preserving some properties of
the Laplace distribution. In particular, the tails of Y_i will be (up to
polynomial factors) exponentially decaying.
To see this last statement, note that the pdf of Y_i is the convolution of
the pdf of k independent Laplace random variables. One can then show by
induction that distributions with exponential (up to polynomial factors) tails
are closed under convolution.
}
\seealso{
For usage examples see e.g. \code{\link[=tfd_sample]{tfd_sample()}}, \code{\link[=tfd_log_prob]{tfd_log_prob()}}, \code{\link[=tfd_mean]{tfd_mean()}}.
Other distributions: \code{\link{tfd_autoregressive}},
\code{\link{tfd_batch_reshape}},
\code{\link{tfd_bernoulli}}, \code{\link{tfd_beta}},
\code{\link{tfd_binomial}},
\code{\link{tfd_categorical}}, \code{\link{tfd_cauchy}},
\code{\link{tfd_chi2}}, \code{\link{tfd_chi}},
\code{\link{tfd_cholesky_lkj}},
\code{\link{tfd_deterministic}},
\code{\link{tfd_dirichlet_multinomial}},
\code{\link{tfd_dirichlet}}, \code{\link{tfd_empirical}},
\code{\link{tfd_exponential}},
\code{\link{tfd_gamma_gamma}}, \code{\link{tfd_gamma}},
\code{\link{tfd_gaussian_process_regression_model}},
\code{\link{tfd_gaussian_process}},
\code{\link{tfd_geometric}}, \code{\link{tfd_gumbel}},
\code{\link{tfd_half_cauchy}},
\code{\link{tfd_half_normal}},
\code{\link{tfd_hidden_markov_model}},
\code{\link{tfd_horseshoe}},
\code{\link{tfd_independent}},
\code{\link{tfd_inverse_gamma}},
\code{\link{tfd_inverse_gaussian}},
\code{\link{tfd_joint_distribution_named}},
\code{\link{tfd_joint_distribution_sequential}},
\code{\link{tfd_kumaraswamy}}, \code{\link{tfd_laplace}},
\code{\link{tfd_linear_gaussian_state_space_model}},
\code{\link{tfd_lkj}}, \code{\link{tfd_log_normal}},
\code{\link{tfd_logistic}},
\code{\link{tfd_mixture_same_family}},
\code{\link{tfd_mixture}}, \code{\link{tfd_multinomial}},
\code{\link{tfd_multivariate_normal_diag_plus_low_rank}},
\code{\link{tfd_multivariate_normal_diag}},
\code{\link{tfd_multivariate_normal_full_covariance}},
\code{\link{tfd_multivariate_normal_linear_operator}},
\code{\link{tfd_multivariate_normal_tri_l}},
\code{\link{tfd_multivariate_student_t_linear_operator}},
\code{\link{tfd_negative_binomial}},
\code{\link{tfd_normal}},
\code{\link{tfd_one_hot_categorical}},
\code{\link{tfd_pareto}},
\code{\link{tfd_poisson_log_normal_quadrature_compound}},
\code{\link{tfd_poisson}}, \code{\link{tfd_quantized}},
\code{\link{tfd_relaxed_bernoulli}},
\code{\link{tfd_relaxed_one_hot_categorical}},
\code{\link{tfd_sample_distribution}},
\code{\link{tfd_sinh_arcsinh}},
\code{\link{tfd_student_t_process}},
\code{\link{tfd_student_t}},
\code{\link{tfd_transformed_distribution}},
\code{\link{tfd_triangular}},
\code{\link{tfd_truncated_normal}},
\code{\link{tfd_uniform}},
\code{\link{tfd_variational_gaussian_process}},
\code{\link{tfd_vector_diffeomixture}},
\code{\link{tfd_vector_exponential_diag}},
\code{\link{tfd_vector_exponential_linear_operator}},
\code{\link{tfd_vector_laplace_linear_operator}},
\code{\link{tfd_vector_sinh_arcsinh_diag}},
\code{\link{tfd_von_mises_fisher}},
\code{\link{tfd_von_mises}}, \code{\link{tfd_wishart}},
\code{\link{tfd_zipf}}
}
\concept{distributions}
|
#' Update Ace Editor
#'
#' Update the styling or mode of an aceEditor component.
#'
#' @param session The Shiny session to whom the editor belongs
#' @param editorId The ID associated with this element
#' @param value The initial text to be contained in the editor.
#' @param mode The Ace \code{mode} to be used by the editor. The \code{mode}
#' in Ace is often the programming or markup language that you're using and
#' determines things like syntax highlighting and code folding. Use the
#' \code{\link{getAceModes}} function to enumerate all the modes available.
#' @param theme The Ace \code{theme} to be used by the editor. The \code{theme}
#' in Ace determines the styling and coloring of the editor. Use
#' \code{\link{getAceThemes}} to enumerate all the themes available.
#' @param readOnly If set to \code{TRUE}, Ace will disable client-side editing.
#' If \code{FALSE} (the default), it will enable editing.
#' @param fontSize If set, will update the font size (in px) used in the editor.
#' Should be an integer.
#' @param showLineNumbers If set to \code{TRUE}, Ace will show line numbers.
#' @param wordWrap If set to \code{TRUE}, Ace will enable word wrapping.
#' Default value is \code{FALSE}.
#' @param tabSize Set tab size. Default value is 4
#' @param useSoftTabs Replace tabs by spaces. Default value is TRUE
#' @param showInvisibles Show invisible characters (e.g., spaces, tabs, newline characters).
#' Default value is FALSE
#' @param showPrintMargin Show print margin. Default value is True
#' @param border Set the \code{border} 'normal', 'alert', or 'flash'.
#' @param autoComplete Enable/Disable code completion. See \code{\link{aceEditor}}
#' for details.
#' @param autoCompleters Character vector of completers to enable. If set to \code{NULL},
#' all completers will be disabled.
#' @param autoCompleteList If set to \code{NULL}, existing static completions
#' list will be unset. See \code{\link{aceEditor}} for details.
#' @examples \dontrun{
#' shinyServer(function(input, output, session) {
#' observe({
#' updateAceEditor(session, "myEditor", "Updated text for editor here",
#' mode = "r", theme = "ambiance")
#' })
#' }
#' }
#'
#' @author Jeff Allen \email{jeff@@trestletech.com}
#'
#' @export
updateAceEditor <- function(
session, editorId, value, theme, readOnly, mode,
fontSize, showLineNumbers, wordWrap, useSoftTabs, tabSize, showInvisibles, showPrintMargin,
border = c("normal", "alert", "flash"),
autoComplete = c("disabled", "enabled", "live"),
autoCompleters = c("snippet", "text", "keyword", "static", "rlang"),
autoCompleteList = NULL
) {
if (missing(session) || missing(editorId)) {
stop("Must provide both a session and an editorId to update Ace editor settings")
}
if (!all(autoComplete %in% c("disabled", "enabled", "live"))) {
stop("updateAceEditor: Incorrectly formatted autoComplete parameter")
}
if (!all(border %in% c("normal", "alert", "flash"))) {
stop("updateAceEditor: Incorrectly formatted border parameter")
}
if (!is.empty(autoCompleters) && !all(autoCompleters %in% c("snippet", "text", "keyword", "static", "rlang"))) {
stop("updateAceEditor: Incorrectly formatted autoCompleters parameter")
}
theList <- list(id = session$ns(editorId))
if (!missing(value)) theList["value"] <- value
if (!missing(theme)) theList["theme"] <- theme
if (!missing(mode)) theList["mode"] <- mode
if (!missing(readOnly)) theList["readOnly"] <- readOnly
if (!missing(fontSize)) theList["fontSize"] <- fontSize
if (!missing(showLineNumbers)) theList["showLineNumbers"] <- showLineNumbers
if (!missing(wordWrap)) theList["wordWrap"] <- wordWrap
if (!missing(tabSize)) theList["tabSize"] <- tabSize
if (!missing(useSoftTabs)) theList["useSoftTabs"] <- useSoftTabs
if (!missing(showInvisibles)) theList["showInvisibles"] <- showInvisibles
if (!missing(showPrintMargin)) theList["showPrintMargin"] <- showPrintMargin
if (!missing(border)) {
border <- match.arg(border)
theList["border"] <- paste0("ace", border)
}
if (!missing(autoComplete)) {
if (is.empty(autoCompleters)) {
autoComplete <- "disabled"
} else {
autoComplete <- match.arg(autoComplete)
}
theList["autoComplete"] <- autoComplete
}
if (!missing(autoCompleters) && !is.empty(autoCompleters)) {
theList <- c(theList, list(autoCompleters = match.arg(autoCompleters, several.ok = TRUE)))
}
if (!missing(autoCompleteList)) {
# NULL can only be inserted via c()
theList <- c(theList, list(autoCompleteList = autoCompleteList))
}
session$sendCustomMessage("shinyAce", theList)
} | /R/update-ace-editor.R | permissive | trestletech/shinyAce | R | false | false | 4,660 | r | #' Update Ace Editor
#'
#' Update the styling or mode of an aceEditor component.
#'
#' @param session The Shiny session to whom the editor belongs
#' @param editorId The ID associated with this element
#' @param value The initial text to be contained in the editor.
#' @param mode The Ace \code{mode} to be used by the editor. The \code{mode}
#' in Ace is often the programming or markup language that you're using and
#' determines things like syntax highlighting and code folding. Use the
#' \code{\link{getAceModes}} function to enumerate all the modes available.
#' @param theme The Ace \code{theme} to be used by the editor. The \code{theme}
#' in Ace determines the styling and coloring of the editor. Use
#' \code{\link{getAceThemes}} to enumerate all the themes available.
#' @param readOnly If set to \code{TRUE}, Ace will disable client-side editing.
#' If \code{FALSE} (the default), it will enable editing.
#' @param fontSize If set, will update the font size (in px) used in the editor.
#' Should be an integer.
#' @param showLineNumbers If set to \code{TRUE}, Ace will show line numbers.
#' @param wordWrap If set to \code{TRUE}, Ace will enable word wrapping.
#' Default value is \code{FALSE}.
#' @param tabSize Set tab size. Default value is 4
#' @param useSoftTabs Replace tabs by spaces. Default value is TRUE
#' @param showInvisibles Show invisible characters (e.g., spaces, tabs, newline characters).
#' Default value is FALSE
#' @param showPrintMargin Show print margin. Default value is True
#' @param border Set the \code{border} 'normal', 'alert', or 'flash'.
#' @param autoComplete Enable/Disable code completion. See \code{\link{aceEditor}}
#' for details.
#' @param autoCompleters Character vector of completers to enable. If set to \code{NULL},
#' all completers will be disabled.
#' @param autoCompleteList If set to \code{NULL}, existing static completions
#' list will be unset. See \code{\link{aceEditor}} for details.
#' @examples \dontrun{
#' shinyServer(function(input, output, session) {
#' observe({
#' updateAceEditor(session, "myEditor", "Updated text for editor here",
#' mode = "r", theme = "ambiance")
#' })
#' }
#' }
#'
#' @author Jeff Allen \email{jeff@@trestletech.com}
#'
#' @export
updateAceEditor <- function(
session, editorId, value, theme, readOnly, mode,
fontSize, showLineNumbers, wordWrap, useSoftTabs, tabSize, showInvisibles, showPrintMargin,
border = c("normal", "alert", "flash"),
autoComplete = c("disabled", "enabled", "live"),
autoCompleters = c("snippet", "text", "keyword", "static", "rlang"),
autoCompleteList = NULL
) {
if (missing(session) || missing(editorId)) {
stop("Must provide both a session and an editorId to update Ace editor settings")
}
if (!all(autoComplete %in% c("disabled", "enabled", "live"))) {
stop("updateAceEditor: Incorrectly formatted autoComplete parameter")
}
if (!all(border %in% c("normal", "alert", "flash"))) {
stop("updateAceEditor: Incorrectly formatted border parameter")
}
if (!is.empty(autoCompleters) && !all(autoCompleters %in% c("snippet", "text", "keyword", "static", "rlang"))) {
stop("updateAceEditor: Incorrectly formatted autoCompleters parameter")
}
theList <- list(id = session$ns(editorId))
if (!missing(value)) theList["value"] <- value
if (!missing(theme)) theList["theme"] <- theme
if (!missing(mode)) theList["mode"] <- mode
if (!missing(readOnly)) theList["readOnly"] <- readOnly
if (!missing(fontSize)) theList["fontSize"] <- fontSize
if (!missing(showLineNumbers)) theList["showLineNumbers"] <- showLineNumbers
if (!missing(wordWrap)) theList["wordWrap"] <- wordWrap
if (!missing(tabSize)) theList["tabSize"] <- tabSize
if (!missing(useSoftTabs)) theList["useSoftTabs"] <- useSoftTabs
if (!missing(showInvisibles)) theList["showInvisibles"] <- showInvisibles
if (!missing(showPrintMargin)) theList["showPrintMargin"] <- showPrintMargin
if (!missing(border)) {
border <- match.arg(border)
theList["border"] <- paste0("ace", border)
}
if (!missing(autoComplete)) {
if (is.empty(autoCompleters)) {
autoComplete <- "disabled"
} else {
autoComplete <- match.arg(autoComplete)
}
theList["autoComplete"] <- autoComplete
}
if (!missing(autoCompleters) && !is.empty(autoCompleters)) {
theList <- c(theList, list(autoCompleters = match.arg(autoCompleters, several.ok = TRUE)))
}
if (!missing(autoCompleteList)) {
# NULL can only be inserted via c()
theList <- c(theList, list(autoCompleteList = autoCompleteList))
}
session$sendCustomMessage("shinyAce", theList)
} |
\name{get_lit_cheminfo}
\alias{get_lit_cheminfo}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Get literature Chemical Information.
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
This function provides the information specified in "info=" for all chemicals with data from the Wetmore et al. (2012) and (2013) publications and other literature.
}
\usage{
get_lit_cheminfo(info="CAS",species="Human")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{info}{A single character vector (or collection of character vectors) from "Compound","CAS","MW","Raw.Experimental.Percentage.Unbound","Entered.Experimental.Percentage.Unbound","Fub","source_PPB","Renal_Clearance","Met_Stab","Met_Stab_entered" ,"r2","p.val","Concentration..uM.","Css_lower_5th_perc.mg.L.","Css_median_perc.mg.L.","Css_upper_95th_perc.mg.L.","Css_lower_5th_perc.uM.","Css_median_perc.uM.","Css_upper_95th_perc.uM.", and "Species".}
\item{species}{Species desired (either "Rat" or default "Human").}
}
\value{
\item{info}{Table/vector containing values specified in "info" for valid chemicals.}
}
\references{
Wetmore, B.A., Wambaugh, J.F., Ferguson, S.S., Sochaski, M.A., Rotroff, D.M., Freeman, K., Clewell, H.J., Dix, D.H., Andersen, M.E., Houck, K.A., Allen, B., Judson, R.S., Sing, R., Kavlock, R.J., Richard, A.M., and Thomas, R.S., "Integration of Dosimetry, Exposure and High-Throughput Screening Data in Chemical Toxicity Assessment," Toxicological Sciences 125 157-174 (2012)
Wetmore, B.A., Wambaugh, J.F., Ferguson, S.S., Li, L., Clewell, H.J. III, Judson, R.S., Freeman, K., Bao, W, Sochaski, M.A., Chu T.-M., Black, M.B., Healy, E, Allen, B., Andersen M.E., Wolfinger, R.D., and Thomas R.S., "The Relative Impact of Incorporating Pharmacokinetics on Predicting in vivo Hazard and Mode-of-Action from High-Throughput in vitro Toxicity Assays" Toxicological Sciences, 132:327-346 (2013).
Wetmore, B. A., Wambaugh, J. F., Allen, B., Ferguson, S. S., Sochaski, M. A., Setzer, R. W., Houck, K. A., Strope, C. L., Cantwell, K., Judson, R. S., LeCluyse, E., Clewell, H.J. III, Thomas, R.S., and Andersen, M. E. (2015). "Incorporating High-Throughput Exposure Predictions with Dosimetry-Adjusted In Vitro Bioactivity to Inform Chemical Toxicity Testing" Toxicological Sciences, kfv171.
}
\author{
John Wambaugh
}
\examples{
\dontrun{
get_lit_cheminfo()
get_lit_cheminfo(info=c('CAS','MW'))
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Literature}
\keyword{Retrieval}% __ONLY ONE__ keyword per line
| /man/get_lit_cheminfo.Rd | no_license | HQData/httk | R | false | false | 2,675 | rd | \name{get_lit_cheminfo}
\alias{get_lit_cheminfo}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Get literature Chemical Information.
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
This function provides the information specified in "info=" for all chemicals with data from the Wetmore et al. (2012) and (2013) publications and other literature.
}
\usage{
get_lit_cheminfo(info="CAS",species="Human")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{info}{A single character vector (or collection of character vectors) from "Compound","CAS","MW","Raw.Experimental.Percentage.Unbound","Entered.Experimental.Percentage.Unbound","Fub","source_PPB","Renal_Clearance","Met_Stab","Met_Stab_entered" ,"r2","p.val","Concentration..uM.","Css_lower_5th_perc.mg.L.","Css_median_perc.mg.L.","Css_upper_95th_perc.mg.L.","Css_lower_5th_perc.uM.","Css_median_perc.uM.","Css_upper_95th_perc.uM.", and "Species".}
\item{species}{Species desired (either "Rat" or default "Human").}
}
\value{
\item{info}{Table/vector containing values specified in "info" for valid chemicals.}
}
\references{
Wetmore, B.A., Wambaugh, J.F., Ferguson, S.S., Sochaski, M.A., Rotroff, D.M., Freeman, K., Clewell, H.J., Dix, D.H., Andersen, M.E., Houck, K.A., Allen, B., Judson, R.S., Sing, R., Kavlock, R.J., Richard, A.M., and Thomas, R.S., "Integration of Dosimetry, Exposure and High-Throughput Screening Data in Chemical Toxicity Assessment," Toxicological Sciences 125 157-174 (2012)
Wetmore, B.A., Wambaugh, J.F., Ferguson, S.S., Li, L., Clewell, H.J. III, Judson, R.S., Freeman, K., Bao, W, Sochaski, M.A., Chu T.-M., Black, M.B., Healy, E, Allen, B., Andersen M.E., Wolfinger, R.D., and Thomas R.S., "The Relative Impact of Incorporating Pharmacokinetics on Predicting in vivo Hazard and Mode-of-Action from High-Throughput in vitro Toxicity Assays" Toxicological Sciences, 132:327-346 (2013).
Wetmore, B. A., Wambaugh, J. F., Allen, B., Ferguson, S. S., Sochaski, M. A., Setzer, R. W., Houck, K. A., Strope, C. L., Cantwell, K., Judson, R. S., LeCluyse, E., Clewell, H.J. III, Thomas, R.S., and Andersen, M. E. (2015). "Incorporating High-Throughput Exposure Predictions with Dosimetry-Adjusted In Vitro Bioactivity to Inform Chemical Toxicity Testing" Toxicological Sciences, kfv171.
}
\author{
John Wambaugh
}
\examples{
\dontrun{
get_lit_cheminfo()
get_lit_cheminfo(info=c('CAS','MW'))
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{Literature}
\keyword{Retrieval}% __ONLY ONE__ keyword per line
|
library(dplyr)
library(Seurat)
library(ggplot2)
library(monocle3)
## Load the datas
# sshfs psamareh@mercury.pmacs.upenn.edu:/home/psamareh ~/Desktop/mount
# Location of counts data -- should have barcodes.tsv.gz, features.tsv.gz, matrix.mtx.gz
input_folder = "/Users/parisasamareh/Downloads/filtered_gene_bc_matrices"
# Location of the folder you want your plots to be in
output_folder = "~/Desktop/work_for_charly/07_20_2020_metascapeforday0ND388"
# Read in the 10x data
CART_exh.data <- Read10X(data.dir = input_folder)
## Initialize the Seurat object with the raw (non-normalized data).
# Only keep genes expressed in at least 3 cells and cells that express at least 200 genes.
CART_exh <- CreateSeuratObject(counts = CART_exh.data, project = "CART_exh", min.cells = 3, min.features = 200)
CART_exh[["percent.mt"]] <- PercentageFeatureSet(CART_exh, pattern = "^MT-")
## Pre-processing and QC
# Violin plot of number of features per cell, number of counts per cell,
# and percentage of genes in the cell that map to the mitochondiral genome
pdf(paste(output_folder, "/violin_plot_QC.pdf", sep = ""))
VlnPlot(CART_exh, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3, pt.size = 0.01)
dev.off()
# Correlation between percent mito and RNA count as well as number of genes expressed in a cell and RNA count.
pdf(paste(output_folder, "/feature_scatter_QC.pdf", sep = ""))
plot1 <- FeatureScatter(CART_exh, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(CART_exh, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
# Printing the number of cells we have without the mitochondrial and gene filter
print("Number of cells without mito and gene filter:\n")
print(dim(CART_exh@meta.data))
# Each cell expresses between 200 and 5000 genes and has less than 5% mito dna
CART_exh <- subset(CART_exh, subset = nFeature_RNA > 200 & nFeature_RNA < 5000 & percent.mt < 5)
# Printing the number of cells we have with the mitochondrial and gene filter
print("Number of cells with mito and gene filter:\n")
print(dim(CART_exh@meta.data))
# Printing out the normalized gene expression data to a csv
write.csv(t(as.matrix(CART_exh[["RNA"]]@data)), paste("/Users/parisasamareh/Desktop/temp_gene_exp", "/gene_exp_day20_mito5.csv", sep = ""))
# Perform SCTransform normalization
CART_exh <- SCTransform(CART_exh)
# Run PCA
CART_exh <- RunPCA(CART_exh, features = VariableFeatures(object = CART_exh))
# If you want to print genes associated with each PC: print(CART_exh[["pca"]], dims = 1:5, nfeatures = 10)
# Plot PC 1 and 2 against each other
pdf(paste(output_folder, "/pca_plot.pdf", sep = ""))
DimPlot(CART_exh, reduction = "pca")
dev.off()
# Plot heatmatmap of up and down regulated genes for each PC using 500 cells
pdf(paste(output_folder, "/heatmap_PCs.pdf", sep = ""))
DimHeatmap(CART_exh, dims = 1:9, cells = 500, balanced = TRUE)
dev.off()
# Plot standard deviation each PC encompasses to determine the most informative PCs
pdf(paste(output_folder, "/elbow.pdf", sep = ""))
ElbowPlot(CART_exh)
dev.off()
# Construct a KNN graph based on the euclidean distance in PCA space,
# and refine the edge weights between any two cells based on the shared overlap in their local neighborhoods (Jaccard similarity).
CART_exh <- FindNeighbors(CART_exh, dims = 1:10)
# Apply the Louvain algorithm to cluster cells by iteratively grouping them together, optimizing modularity
# The higher the resolution, the more clusters: "0.4-1.2 typically returns good results for single-cell datasets of around 3K cells"
CART_exh <- FindClusters(CART_exh, resolution = 0.2)
# Run non-linear dimensionality reduction to visualize data: use the same number of PCs as in clustering
CART_exh <- RunUMAP(CART_exh, dims = 1:10)
CART_exh <- RunTSNE(CART_exh, dims = 1:10)
# Plot UMAP
pdf(paste(output_folder, "/umap.pdf", sep = ""))
DimPlot(CART_exh, reduction = "umap")
dev.off()
# Plot tSNE
pdf(paste(output_folder, "/tsne.pdf", sep = ""))
DimPlot(CART_exh, reduction = "tsne")
dev.off()
## Gene markers
# Find the positive markers for each cluster in the single cell data: must be detected in at least 25% of the cells and have a log fc of 0.25
CART_exh.markers <- FindAllMarkers(CART_exh, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
# Save the markers in a csv
write.csv(CART_exh.markers, paste(output_folder, "/cluster_markers.csv", sep = ""))
# If you want to find the markers for more than one cluster.
# cluster23.markers <- FindMarkers(CART_exh, ident.1 = c(2,3), min.pct = 0.25, logfc.threshold = 0.25)
# write.csv(cluster23.markers, paste(output_folder, "/cluster23_markers.csv", sep = ""))
CART_exh.markers.03 <- FindMarkers(CART_exh, ident.1 = c(0,3), logfc.threshold = 0)
EnhancedVolcano(CART_exh.markers.03, subtitle = "Differentially Expressed Genes in Clusters 0 and 3",
lab = rownames(CART_exh.markers.03),
x = 'avg_logFC',
y = 'p_val',
xlim = c(-3, 3), pCutoff = 0.05, FCcutoff = 0.5)
# A shortened list of the top 10 gene markers for each cluster based of log FC
top10 <- CART_exh.markers %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
# Heatmap of the gene expression for the top 10 gene markers in each cluster
pdf(paste(output_folder, "/cluster_heatmap.pdf", sep = ""))
DoHeatmap(CART_exh, features = top10$gene) + NoLegend()
dev.off()
## Plots to investigate particular genes
# Create violin plot of expression of these genes, separated by cluster.
pdf(paste(output_folder, "/vlnplot_counts.pdf", sep = ""))
VlnPlot(CART_exh, features = c("GNLY", "ENTPD1","LAYN","KLRC1", "KLRC2", "KLRB1", "KLRD1", "PHLDA1", "SRGAP3"), slot = "counts", log = TRUE)
dev.off()
# UMAP of gene expression for the following genes: one plot saved for each gene
for (i in c("KLRC1", "TOX",
"GNLY",
"LAYN",
"CCL4",
"GNLY",
"ENTPD1",
"TIGIT",
"PHLDA1",
"GZMA")){
if(!(i %in% all.genes)){
print(i)
}
pdf(paste(c(output_folder, "/", i, ".pdf"), collapse = ""))
curr <- FeaturePlot(CART_exh, features = c(i))
print(curr)
dev.off()
}
# Dot plot where dot size corresponds to the percentage of cells in that cluster expressing the gene
# and color corresponds to the average gene expression level of those expressing cells
pdf(paste(output_folder, "/dot_plot.pdf", sep = ""))
DotPlot(CART_exh,
features = c("TNFRSF18", "GNLY", "ENTPD1", "PHLDA1",
"SRGAP3", "SOX4")) + coord_flip()
dev.off()
#--------------------------------------------------------------------------------------------------------
## Monocle
# Get the raw, subsetted (for mito etc.) counts from the seurat object
data <- as(as.matrix(CART_exh@assays$RNA@counts), 'sparseMatrix')
# Get the cell meta data, mito percentage, seurat cluster, etc.)
cell_metadata <- CART_exh@meta.data
# Gene names
gene_metadata <- data.frame(gene_short_name = row.names(data), row.names = row.names(data))
# Create the monocle object
cds <- new_cell_data_set(expression_data = data,
cell_metadata = cell_metadata,
gene_metadata = gene_metadata)
# Normalizes data by log and size factor
# then calculates a lower dimensional space using PCA that will be used as input in the future
cds <- preprocess_cds(cds, num_dim = 100)
# Perform dimensionality reduction, UMAP is default
cds <- reduce_dimension(cds)
# If you want to perform tsne as well, cds <- reduce_dimension(cds, reduction_method = 'tSNE')
# Perform clustering: default dim reduction it clusters on is UMAP
cds = cluster_cells(cds)
# Plot UMAP of cells with monocle clusters
pdf(paste(output_folder, "/monocle_umap_clusters.pdf", sep = ""))
plot_cells(cds, reduction_method="UMAP")
dev.off()
# Plot tSNE of cells with monocle clusters
pdf(paste(output_folder, "/monocle_tsne_clusters.pdf", sep = ""))
plot_cells(cds, reduction_method="tSNE")
dev.off()
## Can see how these plots compare to seurat: monocle dim reduction with seurat clusters colored
plot_cells(cds, color_cells_by="seurat_clusters")
plot_cells(cds, color_cells_by="seurat_clusters", reduction_method = "tSNE")
## Plot gene expression on UMAP to see if expression is cluster associated
plot_cells(cds, genes=c("KLRC1"))
plot_cells(cds, genes=c("LAYN"))
## Marker genes for monocle clusters
marker_test_res_mon <- top_markers(cds, group_cells_by="cluster")
write.csv(marker_test_res_mon, file = paste(output_folder, "/monocle_markers_for_monocle_clusters.csv", sep = ""), quote = FALSE)
## Marker genes for seurat clusters
marker_test_res_seur <- top_markers(cds, group_cells_by="seurat_clusters")
write.csv(marker_test_res_seur, file = paste(output_folder, "/monocle_markers_for_seurat_clusters.csv", sep = ""), quote = FALSE)
## Modules for entire data set
pr_graph_test_res <- graph_test(cds, neighbor_graph="knn", cores=8)
# Filter genes based on Morans values:
# tells you whether cells at nearby positions on a trajectory will have similar (or dissimilar) expression levels for the gene being tested
# +1 means nearby cells will have perfectly similar expression, -1 is anti-correlated, 0 is no correlation
# Also want siginificant q-value
pr_deg_ids <- row.names(subset(pr_graph_test_res, morans_I > 0.01 & q_value < 0.05))
# Find modules using the genes that showed to be significant with the Morans I test
gene_module_df <- find_gene_modules(cds[pr_deg_ids,], resolution = 0.01)
# Save module csv
write.csv(gene_module_df, file = paste(output_folder, "/modules_all.csv", sep = ""), quote = FALSE)
# Plot umap with module expression for each module
pdf(paste(output_folder, "/modules_umap.pdf", sep = ""))
plot_cells(cds, genes=gene_module_df,
show_trajectory_graph=FALSE,
label_cell_groups=FALSE)
dev.off()
## Plot module expression with the seurat UMAP
for (i in levels(gene_module_df$module)){
seurat_object <- AddModuleScore(seurat_object, list(subset(gene_module_df, module == i)$id), name = paste("mod_", i, sep = ""))
}
x <- colnames(seurat_object@meta.data)
for (i in subset(x, grepl("mod", x))){
pdf(paste(c(output_folder, "/", i, ".pdf"), collapse = ""))
curr <- FeaturePlot(seurat_object, features = c(i))
print(curr)
dev.off()
}
## Trajectory analysis
cds <- learn_graph(cds)
cds <- order_cells(cds)
pdf(paste(output_folder, "/trajectories_seurat_clusters.pdf", sep = ""))
plot_cells(cds,
color_cells_by = "seurat_clusters",
label_groups_by_cluster=FALSE,
label_leaves=TRUE,
label_branch_points=TRUE)+ theme(
legend.position = c(0.95, 0.95),
legend.justification = c("right", "top")
)
dev.off()
pdf(paste(output_folder, "/trajectories_monocle_clusters.pdf", sep = ""))
plot_cells(cds,
color_cells_by = "cluster",
label_groups_by_cluster=FALSE,
label_leaves=TRUE,
label_branch_points=TRUE)+ theme(
legend.position = c(0.95, 0.95),
legend.justification = c("right", "top")
)
dev.off()
| /sc_pipeline_seurat_monocle.R | no_license | parisasamareh/InVitroCARTexh_code_2020 | R | false | false | 11,134 | r | library(dplyr)
library(Seurat)
library(ggplot2)
library(monocle3)
## Load the datas
# sshfs psamareh@mercury.pmacs.upenn.edu:/home/psamareh ~/Desktop/mount
# Location of counts data -- should have barcodes.tsv.gz, features.tsv.gz, matrix.mtx.gz
input_folder = "/Users/parisasamareh/Downloads/filtered_gene_bc_matrices"
# Location of the folder you want your plots to be in
output_folder = "~/Desktop/work_for_charly/07_20_2020_metascapeforday0ND388"
# Read in the 10x data
CART_exh.data <- Read10X(data.dir = input_folder)
## Initialize the Seurat object with the raw (non-normalized data).
# Only keep genes expressed in at least 3 cells and cells that express at least 200 genes.
CART_exh <- CreateSeuratObject(counts = CART_exh.data, project = "CART_exh", min.cells = 3, min.features = 200)
CART_exh[["percent.mt"]] <- PercentageFeatureSet(CART_exh, pattern = "^MT-")
## Pre-processing and QC
# Violin plot of number of features per cell, number of counts per cell,
# and percentage of genes in the cell that map to the mitochondiral genome
pdf(paste(output_folder, "/violin_plot_QC.pdf", sep = ""))
VlnPlot(CART_exh, features = c("nFeature_RNA", "nCount_RNA", "percent.mt"), ncol = 3, pt.size = 0.01)
dev.off()
# Correlation between percent mito and RNA count as well as number of genes expressed in a cell and RNA count.
pdf(paste(output_folder, "/feature_scatter_QC.pdf", sep = ""))
plot1 <- FeatureScatter(CART_exh, feature1 = "nCount_RNA", feature2 = "percent.mt")
plot2 <- FeatureScatter(CART_exh, feature1 = "nCount_RNA", feature2 = "nFeature_RNA")
CombinePlots(plots = list(plot1, plot2))
dev.off()
# Printing the number of cells we have without the mitochondrial and gene filter
print("Number of cells without mito and gene filter:\n")
print(dim(CART_exh@meta.data))
# Each cell expresses between 200 and 5000 genes and has less than 5% mito dna
CART_exh <- subset(CART_exh, subset = nFeature_RNA > 200 & nFeature_RNA < 5000 & percent.mt < 5)
# Printing the number of cells we have with the mitochondrial and gene filter
print("Number of cells with mito and gene filter:\n")
print(dim(CART_exh@meta.data))
# Printing out the normalized gene expression data to a csv
write.csv(t(as.matrix(CART_exh[["RNA"]]@data)), paste("/Users/parisasamareh/Desktop/temp_gene_exp", "/gene_exp_day20_mito5.csv", sep = ""))
# Perform SCTransform normalization
CART_exh <- SCTransform(CART_exh)
# Run PCA
CART_exh <- RunPCA(CART_exh, features = VariableFeatures(object = CART_exh))
# If you want to print genes associated with each PC: print(CART_exh[["pca"]], dims = 1:5, nfeatures = 10)
# Plot PC 1 and 2 against each other
pdf(paste(output_folder, "/pca_plot.pdf", sep = ""))
DimPlot(CART_exh, reduction = "pca")
dev.off()
# Plot heatmatmap of up and down regulated genes for each PC using 500 cells
pdf(paste(output_folder, "/heatmap_PCs.pdf", sep = ""))
DimHeatmap(CART_exh, dims = 1:9, cells = 500, balanced = TRUE)
dev.off()
# Plot standard deviation each PC encompasses to determine the most informative PCs
pdf(paste(output_folder, "/elbow.pdf", sep = ""))
ElbowPlot(CART_exh)
dev.off()
# Construct a KNN graph based on the euclidean distance in PCA space,
# and refine the edge weights between any two cells based on the shared overlap in their local neighborhoods (Jaccard similarity).
CART_exh <- FindNeighbors(CART_exh, dims = 1:10)
# Apply the Louvain algorithm to cluster cells by iteratively grouping them together, optimizing modularity
# The higher the resolution, the more clusters: "0.4-1.2 typically returns good results for single-cell datasets of around 3K cells"
CART_exh <- FindClusters(CART_exh, resolution = 0.2)
# Run non-linear dimensionality reduction to visualize data: use the same number of PCs as in clustering
CART_exh <- RunUMAP(CART_exh, dims = 1:10)
CART_exh <- RunTSNE(CART_exh, dims = 1:10)
# Plot UMAP
pdf(paste(output_folder, "/umap.pdf", sep = ""))
DimPlot(CART_exh, reduction = "umap")
dev.off()
# Plot tSNE
pdf(paste(output_folder, "/tsne.pdf", sep = ""))
DimPlot(CART_exh, reduction = "tsne")
dev.off()
## Gene markers
# Find the positive markers for each cluster in the single cell data: must be detected in at least 25% of the cells and have a log fc of 0.25
CART_exh.markers <- FindAllMarkers(CART_exh, only.pos = TRUE, min.pct = 0.25, logfc.threshold = 0.25)
# Save the markers in a csv
write.csv(CART_exh.markers, paste(output_folder, "/cluster_markers.csv", sep = ""))
# If you want to find the markers for more than one cluster.
# cluster23.markers <- FindMarkers(CART_exh, ident.1 = c(2,3), min.pct = 0.25, logfc.threshold = 0.25)
# write.csv(cluster23.markers, paste(output_folder, "/cluster23_markers.csv", sep = ""))
CART_exh.markers.03 <- FindMarkers(CART_exh, ident.1 = c(0,3), logfc.threshold = 0)
EnhancedVolcano(CART_exh.markers.03, subtitle = "Differentially Expressed Genes in Clusters 0 and 3",
lab = rownames(CART_exh.markers.03),
x = 'avg_logFC',
y = 'p_val',
xlim = c(-3, 3), pCutoff = 0.05, FCcutoff = 0.5)
# A shortened list of the top 10 gene markers for each cluster based of log FC
top10 <- CART_exh.markers %>% group_by(cluster) %>% top_n(n = 10, wt = avg_logFC)
# Heatmap of the gene expression for the top 10 gene markers in each cluster
pdf(paste(output_folder, "/cluster_heatmap.pdf", sep = ""))
DoHeatmap(CART_exh, features = top10$gene) + NoLegend()
dev.off()
## Plots to investigate particular genes
# Create violin plot of expression of these genes, separated by cluster.
pdf(paste(output_folder, "/vlnplot_counts.pdf", sep = ""))
VlnPlot(CART_exh, features = c("GNLY", "ENTPD1","LAYN","KLRC1", "KLRC2", "KLRB1", "KLRD1", "PHLDA1", "SRGAP3"), slot = "counts", log = TRUE)
dev.off()
# UMAP of gene expression for the following genes: one plot saved for each gene
for (i in c("KLRC1", "TOX",
"GNLY",
"LAYN",
"CCL4",
"GNLY",
"ENTPD1",
"TIGIT",
"PHLDA1",
"GZMA")){
if(!(i %in% all.genes)){
print(i)
}
pdf(paste(c(output_folder, "/", i, ".pdf"), collapse = ""))
curr <- FeaturePlot(CART_exh, features = c(i))
print(curr)
dev.off()
}
# Dot plot where dot size corresponds to the percentage of cells in that cluster expressing the gene
# and color corresponds to the average gene expression level of those expressing cells
pdf(paste(output_folder, "/dot_plot.pdf", sep = ""))
DotPlot(CART_exh,
features = c("TNFRSF18", "GNLY", "ENTPD1", "PHLDA1",
"SRGAP3", "SOX4")) + coord_flip()
dev.off()
#--------------------------------------------------------------------------------------------------------
## Monocle
# Get the raw, subsetted (for mito etc.) counts from the seurat object
data <- as(as.matrix(CART_exh@assays$RNA@counts), 'sparseMatrix')
# Get the cell meta data, mito percentage, seurat cluster, etc.)
cell_metadata <- CART_exh@meta.data
# Gene names
gene_metadata <- data.frame(gene_short_name = row.names(data), row.names = row.names(data))
# Create the monocle object
cds <- new_cell_data_set(expression_data = data,
cell_metadata = cell_metadata,
gene_metadata = gene_metadata)
# Normalizes data by log and size factor
# then calculates a lower dimensional space using PCA that will be used as input in the future
cds <- preprocess_cds(cds, num_dim = 100)
# Perform dimensionality reduction, UMAP is default
cds <- reduce_dimension(cds)
# If you want to perform tsne as well, cds <- reduce_dimension(cds, reduction_method = 'tSNE')
# Perform clustering: default dim reduction it clusters on is UMAP
cds = cluster_cells(cds)
# Plot UMAP of cells with monocle clusters
pdf(paste(output_folder, "/monocle_umap_clusters.pdf", sep = ""))
plot_cells(cds, reduction_method="UMAP")
dev.off()
# Plot tSNE of cells with monocle clusters
pdf(paste(output_folder, "/monocle_tsne_clusters.pdf", sep = ""))
plot_cells(cds, reduction_method="tSNE")
dev.off()
## Can see how these plots compare to seurat: monocle dim reduction with seurat clusters colored
plot_cells(cds, color_cells_by="seurat_clusters")
plot_cells(cds, color_cells_by="seurat_clusters", reduction_method = "tSNE")
## Plot gene expression on UMAP to see if expression is cluster associated
plot_cells(cds, genes=c("KLRC1"))
plot_cells(cds, genes=c("LAYN"))
## Marker genes for monocle clusters
marker_test_res_mon <- top_markers(cds, group_cells_by="cluster")
write.csv(marker_test_res_mon, file = paste(output_folder, "/monocle_markers_for_monocle_clusters.csv", sep = ""), quote = FALSE)
## Marker genes for seurat clusters
marker_test_res_seur <- top_markers(cds, group_cells_by="seurat_clusters")
write.csv(marker_test_res_seur, file = paste(output_folder, "/monocle_markers_for_seurat_clusters.csv", sep = ""), quote = FALSE)
## Modules for entire data set
pr_graph_test_res <- graph_test(cds, neighbor_graph="knn", cores=8)
# Filter genes based on Morans values:
# tells you whether cells at nearby positions on a trajectory will have similar (or dissimilar) expression levels for the gene being tested
# +1 means nearby cells will have perfectly similar expression, -1 is anti-correlated, 0 is no correlation
# Also want siginificant q-value
pr_deg_ids <- row.names(subset(pr_graph_test_res, morans_I > 0.01 & q_value < 0.05))
# Find modules using the genes that showed to be significant with the Morans I test
gene_module_df <- find_gene_modules(cds[pr_deg_ids,], resolution = 0.01)
# Save module csv
write.csv(gene_module_df, file = paste(output_folder, "/modules_all.csv", sep = ""), quote = FALSE)
# Plot umap with module expression for each module
pdf(paste(output_folder, "/modules_umap.pdf", sep = ""))
plot_cells(cds, genes=gene_module_df,
show_trajectory_graph=FALSE,
label_cell_groups=FALSE)
dev.off()
## Plot module expression with the seurat UMAP
for (i in levels(gene_module_df$module)){
seurat_object <- AddModuleScore(seurat_object, list(subset(gene_module_df, module == i)$id), name = paste("mod_", i, sep = ""))
}
x <- colnames(seurat_object@meta.data)
for (i in subset(x, grepl("mod", x))){
pdf(paste(c(output_folder, "/", i, ".pdf"), collapse = ""))
curr <- FeaturePlot(seurat_object, features = c(i))
print(curr)
dev.off()
}
## Trajectory analysis
cds <- learn_graph(cds)
cds <- order_cells(cds)
pdf(paste(output_folder, "/trajectories_seurat_clusters.pdf", sep = ""))
plot_cells(cds,
color_cells_by = "seurat_clusters",
label_groups_by_cluster=FALSE,
label_leaves=TRUE,
label_branch_points=TRUE)+ theme(
legend.position = c(0.95, 0.95),
legend.justification = c("right", "top")
)
dev.off()
pdf(paste(output_folder, "/trajectories_monocle_clusters.pdf", sep = ""))
plot_cells(cds,
color_cells_by = "cluster",
label_groups_by_cluster=FALSE,
label_leaves=TRUE,
label_branch_points=TRUE)+ theme(
legend.position = c(0.95, 0.95),
legend.justification = c("right", "top")
)
dev.off()
|
#!/usr/bin/env Rscript
#
# @package MiGA
# @license Artistic-2.0
#
#= Load stuff
argv <- commandArgs(trailingOnly=T)
suppressPackageStartupMessages(library(ape))
suppressPackageStartupMessages(library(phytools))
suppressPackageStartupMessages(library(phangorn))
suppressPackageStartupMessages(library(enveomics.R))
#= Main function
ref_tree <- function(ani_file, out_base, q_dataset) {
a <- read.table(ani_file, sep="\t", header=TRUE, as.is=TRUE)
ani.d <- enve.df2dist(a[,1:3], default.d=0.9, max.sim=100)
ani.ph <- midpoint(bionj(ani.d))
write.tree(ani.ph, paste(out_base, ".nwk", sep=""))
pdf(paste(out_base, ".nwk.pdf", sep=""), 7, 7)
plot(ani.ph, cex=1/3, type='fan',
tip.color=c('red', 'black')[ifelse(ani.ph$tip.label==q_dataset, 1, 2)])
add.scale.bar()
dev.off()
}
# Ancilliary functions
midpoint <- function(tree){
dm = cophenetic(tree)
tree = unroot(tree)
rn = max(tree$edge)+1
maxdm = max(dm)
ind = which(dm==maxdm,arr=TRUE)[1,]
tmproot = Ancestors(tree, ind[1], "parent")
tree = phangorn:::reroot(tree, tmproot)
edge = tree$edge
el = tree$edge.length
children = tree$edge[,2]
left = match(ind[1], children)
tmp = Ancestors(tree, ind[2], "all")
tmp= c(ind[2], tmp[-length(tmp)])
right = match(tmp, children)
if(el[left]>= (maxdm/2)){
edge = rbind(edge, c(rn, ind[1]))
edge[left,2] = rn
el[left] = el[left] - (maxdm/2)
el = c(el, maxdm/2)
}else{
sel = cumsum(el[right])
i = which(sel>(maxdm/2))[1]
edge = rbind(edge, c(rn, tmp[i]))
edge[right[i],2] = rn
eltmp = sel[i] - (maxdm/2)
el = c(el, el[right[i]] - eltmp)
el[right[i]] = eltmp
}
tree$edge.length = el
tree$edge=edge
tree$Nnode = tree$Nnode+1
phangorn:::reorderPruning(phangorn:::reroot(tree, rn))
}
#= Main
ref_tree(ani_file=argv[1], out_base=argv[2], q_dataset=argv[3])
| /utils/ref-tree.R | permissive | jianshu93/miga | R | false | false | 1,849 | r | #!/usr/bin/env Rscript
#
# @package MiGA
# @license Artistic-2.0
#
#= Load stuff
argv <- commandArgs(trailingOnly=T)
suppressPackageStartupMessages(library(ape))
suppressPackageStartupMessages(library(phytools))
suppressPackageStartupMessages(library(phangorn))
suppressPackageStartupMessages(library(enveomics.R))
#= Main function
ref_tree <- function(ani_file, out_base, q_dataset) {
a <- read.table(ani_file, sep="\t", header=TRUE, as.is=TRUE)
ani.d <- enve.df2dist(a[,1:3], default.d=0.9, max.sim=100)
ani.ph <- midpoint(bionj(ani.d))
write.tree(ani.ph, paste(out_base, ".nwk", sep=""))
pdf(paste(out_base, ".nwk.pdf", sep=""), 7, 7)
plot(ani.ph, cex=1/3, type='fan',
tip.color=c('red', 'black')[ifelse(ani.ph$tip.label==q_dataset, 1, 2)])
add.scale.bar()
dev.off()
}
# Ancilliary functions
midpoint <- function(tree){
dm = cophenetic(tree)
tree = unroot(tree)
rn = max(tree$edge)+1
maxdm = max(dm)
ind = which(dm==maxdm,arr=TRUE)[1,]
tmproot = Ancestors(tree, ind[1], "parent")
tree = phangorn:::reroot(tree, tmproot)
edge = tree$edge
el = tree$edge.length
children = tree$edge[,2]
left = match(ind[1], children)
tmp = Ancestors(tree, ind[2], "all")
tmp= c(ind[2], tmp[-length(tmp)])
right = match(tmp, children)
if(el[left]>= (maxdm/2)){
edge = rbind(edge, c(rn, ind[1]))
edge[left,2] = rn
el[left] = el[left] - (maxdm/2)
el = c(el, maxdm/2)
}else{
sel = cumsum(el[right])
i = which(sel>(maxdm/2))[1]
edge = rbind(edge, c(rn, tmp[i]))
edge[right[i],2] = rn
eltmp = sel[i] - (maxdm/2)
el = c(el, el[right[i]] - eltmp)
el[right[i]] = eltmp
}
tree$edge.length = el
tree$edge=edge
tree$Nnode = tree$Nnode+1
phangorn:::reorderPruning(phangorn:::reroot(tree, rn))
}
#= Main
ref_tree(ani_file=argv[1], out_base=argv[2], q_dataset=argv[3])
|
#' get individualID coordinates
#'
#'function to calculate the coordinates for each
#'individual tree in the vegetation structure survay
#' @return a dataframe
#' @seealso [geoNEON::loadByProduct()] which this function wraps.
#' @importFrom magrittr "%>%"
#' @import sf
#' @import dplyr
#' @import stringr
#'
#' @examples
#' retrieve_VST_data("OSBS")
#' retrieve_VST_data("all", 2017, 2019)
#'
#'
retrieve_coords_itc <- function(dat){
#import shapefile with the coordinates of plots and pointIDs for NEON vegetation structure
plots<-sf::st_read("./meta/All_NEON_TOS_Plot_Points.shp") %>% data.frame %>%
dplyr::filter(str_detect(appMods,"vst"))
plots<-plots %>%
mutate(pointID=as.character(pointID))%>%
mutate(siteID=as.character(siteID))%>%
mutate(plotID=as.character(plotID))
# mutate point and plot id into factors, and remove multiple entries
dat<-dat %>%
mutate(pointID=as.character(pointID))%>%
mutate(siteID=as.character(siteID))%>%
mutate(plotID=as.character(plotID)) %>%
inner_join(plots,by=c("plotID","pointID", "siteID"))
# check if there are individualIDs missing teh azimuth, remove them in case
if(sum(is.na(dat["stemAzimuth"]))>0){
warning(paste(sum(is.na(dat["stemAzimuth"])),
"entries could not be georeferenced
and will be discarded."))
dat <- dat[!is.na(dat["stemAzimuth"]), ]
}
#calculate UTM coordinates for individualIDs
dat_apply <- dat %>%
dplyr::select(c(stemDistance, stemAzimuth, easting, northing))
coords <- apply(dat_apply,1,
function(p)retrieve_dist_to_utm(p[1],p[2], p[3], p[4])) %>%
t %>%
data.frame
colnames(coords) <- c('itcEasting', 'itcNorthing')
#colnames(dat)[54:55] <- c("plotEasting", "plotNorthing")
#colnames(dat)[27:28] <- c("geoEasting", "geoNorthing")
field_tag <- cbind(dat, coords) %>% filter(!is.na(itcEasting))
return(field_tag)
}
| /R/retrieve_coords_itc.R | permissive | vscholl/neonVegWrangleR | R | false | false | 1,925 | r | #' get individualID coordinates
#'
#'function to calculate the coordinates for each
#'individual tree in the vegetation structure survay
#' @return a dataframe
#' @seealso [geoNEON::loadByProduct()] which this function wraps.
#' @importFrom magrittr "%>%"
#' @import sf
#' @import dplyr
#' @import stringr
#'
#' @examples
#' retrieve_VST_data("OSBS")
#' retrieve_VST_data("all", 2017, 2019)
#'
#'
retrieve_coords_itc <- function(dat){
#import shapefile with the coordinates of plots and pointIDs for NEON vegetation structure
plots<-sf::st_read("./meta/All_NEON_TOS_Plot_Points.shp") %>% data.frame %>%
dplyr::filter(str_detect(appMods,"vst"))
plots<-plots %>%
mutate(pointID=as.character(pointID))%>%
mutate(siteID=as.character(siteID))%>%
mutate(plotID=as.character(plotID))
# mutate point and plot id into factors, and remove multiple entries
dat<-dat %>%
mutate(pointID=as.character(pointID))%>%
mutate(siteID=as.character(siteID))%>%
mutate(plotID=as.character(plotID)) %>%
inner_join(plots,by=c("plotID","pointID", "siteID"))
# check if there are individualIDs missing teh azimuth, remove them in case
if(sum(is.na(dat["stemAzimuth"]))>0){
warning(paste(sum(is.na(dat["stemAzimuth"])),
"entries could not be georeferenced
and will be discarded."))
dat <- dat[!is.na(dat["stemAzimuth"]), ]
}
#calculate UTM coordinates for individualIDs
dat_apply <- dat %>%
dplyr::select(c(stemDistance, stemAzimuth, easting, northing))
coords <- apply(dat_apply,1,
function(p)retrieve_dist_to_utm(p[1],p[2], p[3], p[4])) %>%
t %>%
data.frame
colnames(coords) <- c('itcEasting', 'itcNorthing')
#colnames(dat)[54:55] <- c("plotEasting", "plotNorthing")
#colnames(dat)[27:28] <- c("geoEasting", "geoNorthing")
field_tag <- cbind(dat, coords) %>% filter(!is.na(itcEasting))
return(field_tag)
}
|
# Animated graphs
library(gganimate)
library(dplyr)
library(lubridate)
## Points over time
g <- dat %>%
ggplot(aes(Ostkoordinat + month(Startdatum) * 700000, Nordkoordinat, color = month(Startdatum, label = T))) +
geom_point(aes(group = seq_along(Id))) +
geom_path(aes(X1, X2), data = dat_swe_border, inherit_aes = F) +
coord_equal() +
#facet_wrap(dat %>% pull(Startdatum) %>% ymd() %>% month(label = T)) +
transition_time(as.numeric(date(Startdatum))) +
enter_fade() +
exit_fade() +
shadow_mark(past = T, future = F) +
labs(title = 'Dag: {as_date(frame_time)}', x = "", y = "") +
theme_bw() +
theme(legend.position = "none", axis.text = element_blank(), panel.grid = element_blank(), axis.ticks = element_blank())
animate(g, duration = 30, end_pause = 30, nframes = 1000, height = 400, width = 1000)
anim_save("Species portal/Output/Sorgmantel2019.gif")
| /Species portal/Script/Animated graphs.R | no_license | adamflr/Visuals | R | false | false | 884 | r | # Animated graphs
library(gganimate)
library(dplyr)
library(lubridate)
## Points over time
g <- dat %>%
ggplot(aes(Ostkoordinat + month(Startdatum) * 700000, Nordkoordinat, color = month(Startdatum, label = T))) +
geom_point(aes(group = seq_along(Id))) +
geom_path(aes(X1, X2), data = dat_swe_border, inherit_aes = F) +
coord_equal() +
#facet_wrap(dat %>% pull(Startdatum) %>% ymd() %>% month(label = T)) +
transition_time(as.numeric(date(Startdatum))) +
enter_fade() +
exit_fade() +
shadow_mark(past = T, future = F) +
labs(title = 'Dag: {as_date(frame_time)}', x = "", y = "") +
theme_bw() +
theme(legend.position = "none", axis.text = element_blank(), panel.grid = element_blank(), axis.ticks = element_blank())
animate(g, duration = 30, end_pause = 30, nframes = 1000, height = 400, width = 1000)
anim_save("Species portal/Output/Sorgmantel2019.gif")
|
cat("\014") # Clear your console
rm(list = ls()) #clear your environment
########################## Load in header file ######################## #
setwd("~/git/of_dollars_and_data")
source(file.path(paste0(getwd(),"/header.R")))
########################## Load in Libraries ########################## #
library(scales)
library(readxl)
library(lubridate)
library(ggrepel)
library(gganimate)
library(tidylog)
library(zoo)
library(tidyverse)
library(tidylog)
folder_name <- "0159_ycharts_mcap_sp500_stocks"
out_path <- paste0(exportdir, folder_name)
dir.create(file.path(paste0(out_path)), showWarnings = FALSE)
########################## Start Program Here ######################### #
raw <- read_excel(paste0(importdir, "0159_ycharts_mcap_sp500_stocks/Historical Market Cap Data.xlsx"), skip = 5) %>%
rename(symbol = Symbol,
name = Name) %>%
select(-Metric) %>%
gather(-symbol, -name, key=key, value=value) %>%
mutate(date = as.Date(as.numeric(key), origin = "1900-01-01"),
year = year(date),
month = month(date),
name = str_remove_all(str_remove_all(name, " Corp"), " Inc")) %>%
filter(symbol != "", !is.na(value), symbol != "GOOG") %>%
rename(mcap_millions = value) %>%
select(date, year, month, symbol, name, mcap_millions) %>%
arrange(symbol, date) %>%
filter(year(date) >= 2010) %>%
arrange(date, -mcap_millions) %>%
group_by(date) %>%
mutate(rank = row_number(),
name = case_when(name == "International Business Machines" ~ "IBM",
name == "Alphabet" ~ "Alphabet (Google)",
TRUE ~ name),
faamg = ifelse(name %in% c("Facebook", "Apple", "Amazon.com", "Microsoft", "Alphabet (Google)"), 1, 0)) %>%
ungroup()
# Plot FAAMG market share over time
all_mcap <- raw %>%
group_by(date) %>%
summarize(mcap_total = sum(mcap_millions)) %>%
ungroup()
to_plot <- raw %>%
filter(faamg == 1) %>%
group_by(date) %>%
summarize(mcap_faamg = sum(mcap_millions)) %>%
ungroup() %>%
left_join(all_mcap) %>%
mutate(pct_faamg = mcap_faamg/mcap_total)
file_path <- paste0(out_path, "/mcap_faamg.jpeg")
source_string <- "Source: YCharts, 2010-2019 (OfDollarsAndData.com)"
number_labels <- to_plot %>%
filter(date == max(to_plot$date) | date == min(to_plot$date)) %>%
mutate(label = paste0(round(100*pct_faamg, 1), "%"))
plot <- ggplot(to_plot, aes(x=date, y=pct_faamg)) +
geom_line() +
geom_text_repel(data = number_labels, aes(x=date, y = pct_faamg),
label = number_labels$label,
size = 3,
nudge_y = 0.005,
segment.color = "transparent") +
scale_y_continuous(label = percent_format(accuracy = 1)) +
of_dollars_and_data_theme +
ggtitle(paste0("FAAMG Stocks % Share of the S&P 500")) +
labs(x="Date", y="Percentage of S&P 500",
caption = paste0(source_string))
# Save the plot
ggsave(file_path, plot, width = 15, height = 12, units = "cm")
# Do plot by top 2, 5, and 10 companies in S&P 500
tops <- c(2, 5, 10)
for(t in tops){
top_n <- raw %>%
filter(rank <= t)
top_n_sum <- top_n %>%
group_by(date) %>%
summarize(mcap_top_n = sum(mcap_millions)) %>%
ungroup() %>%
select(date, mcap_top_n)
tmp <- raw %>%
group_by(date) %>%
summarize(mcap_year = sum(mcap_millions)) %>%
ungroup() %>%
left_join(top_n_sum) %>%
mutate(pct_top_n = mcap_top_n/mcap_year) %>%
rename_(.dots = setNames("pct_top_n", paste0("pct_top_", t))) %>%
select(date, contains("pct_"))
if(t == min(tops)){
mcap_by_year <- tmp
} else{
mcap_by_year <- mcap_by_year %>%
left_join(tmp)
}
}
to_plot <- mcap_by_year %>%
gather(-date, key=key, value=value)
file_path <- paste0(out_path, "/mcap_tiers.jpeg")
source_string <- "Source: YCharts, 2010-2019 (OfDollarsAndData.com)"
text_labels <- to_plot %>%
filter(date == "2017-12-02") %>%
mutate(label = case_when(
grepl("_2", key) ~ "Top 2",
grepl("_5", key) ~ "Top 5",
grepl("_10", key) ~ "Top 10",
TRUE ~ ""
))
number_labels <- to_plot %>%
filter(date == max(to_plot$date) | date == min(to_plot$date)) %>%
mutate(label = paste0(round(100*value, 1), "%"))
plot <- ggplot(to_plot, aes(x=date, y=value, col = key)) +
geom_line() +
geom_text_repel(data = text_labels, aes(x=date, y = value, col = key),
label = text_labels$label,
nudge_y = 0.02,
segment.color = "transparent") +
geom_text_repel(data = number_labels, aes(x=date, y = value, col = key),
label = number_labels$label,
size = 3,
nudge_y = 0.005,
segment.color = "transparent") +
scale_color_discrete(guide = FALSE) +
scale_y_continuous(label = percent_format(accuracy = 1)) +
of_dollars_and_data_theme +
theme(legend.position = "bottom",
legend.title = element_blank()) +
ggtitle(paste0("Share of Largest Companies in the S&P 500")) +
labs(x="Date", y="Percentage of S&P 500",
caption = paste0(source_string))
# Save the plot
ggsave(file_path, plot, width = 15, height = 12, units = "cm")
# Plot top 10 companies by date over time
file_path <- paste0(out_path, "/mcap_top_10.jpeg")
to_plot <- top_n %>%
mutate(month = month(date),
mcap_billions = mcap_millions/1000,
label = paste0("$", formatC(mcap_billions, digits = 0, format = "f", big.mark = ","))) %>%
filter(month == 12) %>%
mutate(tech = case_when(
name %in% c("Alphabet (Google)", "Apple", "Microsoft", "Amazon.com", "Facebook") ~ 1,
TRUE ~ 0
))
unique(to_plot$name)
plot <- ggplot(to_plot, aes(x=rank, y=mcap_billions, fill = as.factor(tech))) +
geom_bar(stat = "identity") +
geom_text(data=to_plot, aes(x=rank, y=mcap_billions, label = label, col = as.factor(tech), hjust=-0.1), size = 3) +
geom_text(aes(y = 0, label = paste(name, " "), col = as.factor(tech)), vjust = 0.2, hjust = 1, size = 3) +
scale_color_manual(guide = FALSE, values = c("black", "blue")) +
scale_fill_manual(guide = FALSE, values = c("black", "blue")) +
coord_flip(clip = "off", expand = FALSE) +
scale_y_continuous(label = dollar, limits = c(0, 1250)) +
scale_x_reverse(breaks = seq(1, 10)) +
of_dollars_and_data_theme +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks.x=element_blank(),
axis.ticks.y=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
axis.line.x = element_blank(),
axis.line.y = element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank(),
plot.margin = margin(1, 2, 1, 3.5, "cm"),
plot.caption = element_text(hjust = 0, family = "my_font", size = 8)) +
ggtitle(paste0("Market Cap of 10 Largest Companies in S&P 500\n{closest_state}")) +
labs(x=" ", y="Market Cap (in billions)",
caption = paste0(source_string)) +
transition_states(year, transition_length = 4, state_length = 4)
animate <- 0
if(animate == 1){
anim <- animate(plot, fps = 7)
anim_save(filename = paste0("top_10_companies_2010_2019.gif"), animation = anim, path = out_path)
}
# ############################ End ################################## # | /analysis/0159_ycharts_mcap_sp500_stocks.R | no_license | justkp/of-dollars-and-data | R | false | false | 7,824 | r | cat("\014") # Clear your console
rm(list = ls()) #clear your environment
########################## Load in header file ######################## #
setwd("~/git/of_dollars_and_data")
source(file.path(paste0(getwd(),"/header.R")))
########################## Load in Libraries ########################## #
library(scales)
library(readxl)
library(lubridate)
library(ggrepel)
library(gganimate)
library(tidylog)
library(zoo)
library(tidyverse)
library(tidylog)
folder_name <- "0159_ycharts_mcap_sp500_stocks"
out_path <- paste0(exportdir, folder_name)
dir.create(file.path(paste0(out_path)), showWarnings = FALSE)
########################## Start Program Here ######################### #
raw <- read_excel(paste0(importdir, "0159_ycharts_mcap_sp500_stocks/Historical Market Cap Data.xlsx"), skip = 5) %>%
rename(symbol = Symbol,
name = Name) %>%
select(-Metric) %>%
gather(-symbol, -name, key=key, value=value) %>%
mutate(date = as.Date(as.numeric(key), origin = "1900-01-01"),
year = year(date),
month = month(date),
name = str_remove_all(str_remove_all(name, " Corp"), " Inc")) %>%
filter(symbol != "", !is.na(value), symbol != "GOOG") %>%
rename(mcap_millions = value) %>%
select(date, year, month, symbol, name, mcap_millions) %>%
arrange(symbol, date) %>%
filter(year(date) >= 2010) %>%
arrange(date, -mcap_millions) %>%
group_by(date) %>%
mutate(rank = row_number(),
name = case_when(name == "International Business Machines" ~ "IBM",
name == "Alphabet" ~ "Alphabet (Google)",
TRUE ~ name),
faamg = ifelse(name %in% c("Facebook", "Apple", "Amazon.com", "Microsoft", "Alphabet (Google)"), 1, 0)) %>%
ungroup()
# Plot FAAMG market share over time
all_mcap <- raw %>%
group_by(date) %>%
summarize(mcap_total = sum(mcap_millions)) %>%
ungroup()
to_plot <- raw %>%
filter(faamg == 1) %>%
group_by(date) %>%
summarize(mcap_faamg = sum(mcap_millions)) %>%
ungroup() %>%
left_join(all_mcap) %>%
mutate(pct_faamg = mcap_faamg/mcap_total)
file_path <- paste0(out_path, "/mcap_faamg.jpeg")
source_string <- "Source: YCharts, 2010-2019 (OfDollarsAndData.com)"
number_labels <- to_plot %>%
filter(date == max(to_plot$date) | date == min(to_plot$date)) %>%
mutate(label = paste0(round(100*pct_faamg, 1), "%"))
plot <- ggplot(to_plot, aes(x=date, y=pct_faamg)) +
geom_line() +
geom_text_repel(data = number_labels, aes(x=date, y = pct_faamg),
label = number_labels$label,
size = 3,
nudge_y = 0.005,
segment.color = "transparent") +
scale_y_continuous(label = percent_format(accuracy = 1)) +
of_dollars_and_data_theme +
ggtitle(paste0("FAAMG Stocks % Share of the S&P 500")) +
labs(x="Date", y="Percentage of S&P 500",
caption = paste0(source_string))
# Save the plot
ggsave(file_path, plot, width = 15, height = 12, units = "cm")
# Do plot by top 2, 5, and 10 companies in S&P 500
tops <- c(2, 5, 10)
for(t in tops){
top_n <- raw %>%
filter(rank <= t)
top_n_sum <- top_n %>%
group_by(date) %>%
summarize(mcap_top_n = sum(mcap_millions)) %>%
ungroup() %>%
select(date, mcap_top_n)
tmp <- raw %>%
group_by(date) %>%
summarize(mcap_year = sum(mcap_millions)) %>%
ungroup() %>%
left_join(top_n_sum) %>%
mutate(pct_top_n = mcap_top_n/mcap_year) %>%
rename_(.dots = setNames("pct_top_n", paste0("pct_top_", t))) %>%
select(date, contains("pct_"))
if(t == min(tops)){
mcap_by_year <- tmp
} else{
mcap_by_year <- mcap_by_year %>%
left_join(tmp)
}
}
to_plot <- mcap_by_year %>%
gather(-date, key=key, value=value)
file_path <- paste0(out_path, "/mcap_tiers.jpeg")
source_string <- "Source: YCharts, 2010-2019 (OfDollarsAndData.com)"
text_labels <- to_plot %>%
filter(date == "2017-12-02") %>%
mutate(label = case_when(
grepl("_2", key) ~ "Top 2",
grepl("_5", key) ~ "Top 5",
grepl("_10", key) ~ "Top 10",
TRUE ~ ""
))
number_labels <- to_plot %>%
filter(date == max(to_plot$date) | date == min(to_plot$date)) %>%
mutate(label = paste0(round(100*value, 1), "%"))
plot <- ggplot(to_plot, aes(x=date, y=value, col = key)) +
geom_line() +
geom_text_repel(data = text_labels, aes(x=date, y = value, col = key),
label = text_labels$label,
nudge_y = 0.02,
segment.color = "transparent") +
geom_text_repel(data = number_labels, aes(x=date, y = value, col = key),
label = number_labels$label,
size = 3,
nudge_y = 0.005,
segment.color = "transparent") +
scale_color_discrete(guide = FALSE) +
scale_y_continuous(label = percent_format(accuracy = 1)) +
of_dollars_and_data_theme +
theme(legend.position = "bottom",
legend.title = element_blank()) +
ggtitle(paste0("Share of Largest Companies in the S&P 500")) +
labs(x="Date", y="Percentage of S&P 500",
caption = paste0(source_string))
# Save the plot
ggsave(file_path, plot, width = 15, height = 12, units = "cm")
# Plot top 10 companies by date over time
file_path <- paste0(out_path, "/mcap_top_10.jpeg")
to_plot <- top_n %>%
mutate(month = month(date),
mcap_billions = mcap_millions/1000,
label = paste0("$", formatC(mcap_billions, digits = 0, format = "f", big.mark = ","))) %>%
filter(month == 12) %>%
mutate(tech = case_when(
name %in% c("Alphabet (Google)", "Apple", "Microsoft", "Amazon.com", "Facebook") ~ 1,
TRUE ~ 0
))
unique(to_plot$name)
plot <- ggplot(to_plot, aes(x=rank, y=mcap_billions, fill = as.factor(tech))) +
geom_bar(stat = "identity") +
geom_text(data=to_plot, aes(x=rank, y=mcap_billions, label = label, col = as.factor(tech), hjust=-0.1), size = 3) +
geom_text(aes(y = 0, label = paste(name, " "), col = as.factor(tech)), vjust = 0.2, hjust = 1, size = 3) +
scale_color_manual(guide = FALSE, values = c("black", "blue")) +
scale_fill_manual(guide = FALSE, values = c("black", "blue")) +
coord_flip(clip = "off", expand = FALSE) +
scale_y_continuous(label = dollar, limits = c(0, 1250)) +
scale_x_reverse(breaks = seq(1, 10)) +
of_dollars_and_data_theme +
theme(axis.line=element_blank(),
axis.text.x=element_blank(),
axis.text.y=element_blank(),
axis.ticks.x=element_blank(),
axis.ticks.y=element_blank(),
axis.title.x=element_blank(),
axis.title.y=element_blank(),
axis.line.x = element_blank(),
axis.line.y = element_blank(),
legend.position="none",
panel.background=element_blank(),
panel.border=element_blank(),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
plot.background=element_blank(),
plot.margin = margin(1, 2, 1, 3.5, "cm"),
plot.caption = element_text(hjust = 0, family = "my_font", size = 8)) +
ggtitle(paste0("Market Cap of 10 Largest Companies in S&P 500\n{closest_state}")) +
labs(x=" ", y="Market Cap (in billions)",
caption = paste0(source_string)) +
transition_states(year, transition_length = 4, state_length = 4)
animate <- 0
if(animate == 1){
anim <- animate(plot, fps = 7)
anim_save(filename = paste0("top_10_companies_2010_2019.gif"), animation = anim, path = out_path)
}
# ############################ End ################################## # |
setMethodS3("exportTotalCnRatioSet", "AromaUnitTotalCnBinarySet", function(this, ref="median", ..., logBase=2, tags=NULL, overwrite=FALSE, rootPath="rawCnData", verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'this':
nbrOfFiles <- length(this);
if (nbrOfFiles == 0L) {
throw("Cannot export. ", class(this)[1L], " is empty: ", getFullName(this));
}
# Argument 'ref':
nbrOfUnits <- nbrOfUnits(getOneFile(this));
chipType <- getChipType(this);
if (is.null(ref)) {
throw("Argument 'ref' must not be NULL.");
}
if (inherits(ref, "AromaUnitTotalCnBinaryFile")) {
refList <- rep(list(ref), nbrOfFiles);
refSet <- AromaUnitTotalCnBinarySet(refList);
# Not needed anymore
refList <- NULL;
}
if (inherits(ref, "AromaUnitTotalCnBinarySet")) {
if (getChipType(ref) != chipType) {
throw("Chip type of argument 'ref' does not match the data set: ", getChipType(ref), " != ", chipType);
}
df <- getOneFile(ref);
if (nbrOfUnits(df) != nbrOfUnits) {
throw("Number of units in argument 'ref' does not match the data set: ", nbrOfUnits(ref), " != ", nbrOfUnits);
}
refSet <- ref;
thetaR <- NULL;
} else if (inherits(ref, "numeric")) {
thetaR <- Arguments$getNumeric(ref, length=nbrOfUnits);
refSet <- NULL;
} else if (is.character(ref)) {
refMethod <- match.arg(ref, c("median", "mean"));
refSet <- NULL;
thetaR <- NULL;
}
# Argument 'logBase':
if (!is.null(logBase)) {
logBase <- Arguments$getDouble(logBase, range=c(1,Inf));
}
# Argument 'tags':
tags <- Arguments$getTags(tags, collapse=",");
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
verbose && enter(verbose, "Calculating CN ratios");
dataSet <- getFullName(this);
verbose && cat(verbose, "Data set: ", dataSet);
platform <- getPlatform(this);
verbose && cat(verbose, "Platform: ", platform);
chipType <- getChipType(this);
verbose && cat(verbose, "Chip type: ", chipType);
nbrOfFiles <- length(this);
verbose && cat(verbose, "Number of files: ", nbrOfFiles);
if (!is.null(refSet)) {
verbose && cat(verbose, "Reference set:");
verbose && print(verbose, refSet);
} else {
verbose && str(verbose, "theta[R]: ", thetaR);
}
dataSetOut <- paste(c(dataSet, tags), collapse=",");
verbose && cat(verbose, "Output data set: ", dataSetOut);
chipTypeS <- getChipType(this, fullname=FALSE);
outPath <- file.path(rootPath, dataSetOut, chipTypeS);
outPath <- Arguments$getWritablePath(outPath);
verbose && cat(verbose, "Output path: ", outPath);
if (is.null(logBase)) {
ratioTag <- "ratio";
} else {
ratioTag <- sprintf("log%dratio", logBase);
}
typeTags <- paste(c(ratioTag, "total"), collapse=",");
for (kk in seq_along(this)) {
ce <- this[[kk]];
verbose && enter(verbose, sprintf("File %d ('%s') of %d", kk, getName(ce), nbrOfFiles));
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Setting up output filename
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (!is.null(refSet)) {
ceR <- refSet[[kk]];
refName <- getFullName(ceR);
refName <- gsub(",(total|log2ratio)", "", refName);
refTag <- sprintf("ref=%s", refName);
} else {
ceR <- NULL;
refTag <- NULL;
}
fullname <- getFullName(ce);
fullname <- gsub(",(total|log2ratio)", "", fullname);
fullname <- paste(c(fullname, refTag, typeTags), collapse=",");
filename <- sprintf("%s.asb", fullname);
pathname <- file.path(outPath, filename);
verbose && cat(verbose, "Pathname: ", pathname);
if (!overwrite && isFile(pathname)) {
verbose && cat(verbose, "Nothing to do. File already exists.");
verbose && exit(verbose);
next;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Allocating
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && enter(verbose, "Allocating (temporary) output file");
pathnameT <- pushTemporaryFile(pathname, verbose=verbose);
asb <- AromaUnitSignalBinaryFile$allocate(pathnameT, nbrOfRows=nbrOfUnits(ce), platform=platform, chipType=chipType);
verbose && print(verbose, asb);
verbose && exit(verbose);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Calculating relative CNs
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && enter(verbose, "Reading data from total file");
theta <- extractMatrix(ce, drop=TRUE, verbose=verbose);
# Transform to intensity scale?
if (hasTag(ce, "log2ratio")) {
theta <- 2^theta;
verbose && cat(verbose, "Transformed theta = 2^M");
}
# Sanity check
verbose && str(verbose, theta);
verbose && exit(verbose);
verbose && enter(verbose, "Calculating ratios");
if (!is.null(refSet)) {
thetaR <- extractMatrix(ceR, drop=TRUE, verbose=verbose);
# Transform to intensity scale?
if (hasTag(ceR, "log2ratio")) {
thetaR <- 2^thetaR;
verbose && cat(verbose, "Transformed thetaR = 2^MR");
}
verbose && str(verbose, thetaR);
} else if (is.null(thetaR)) {
verbose && enter(verbose, "Calculating reference signals");
verbose && cat(verbose, "Averaging method: ", refMethod);
# Sanity check?
ce <- getOneFile(this);
if (hasTag(ce, "log2ratio")) {
throw("Cannot estimate reference signals by calculating average across data set. Not implemented for CN ratio data sets.");
}
thetaR <- calculateAverageColumnAcrossFiles(this, method=refMethod,
verbose=less(verbose,5));
verbose && str(verbose, thetaR);
verbose && exit(verbose);
}
# Sanity check
stopifnot(length(thetaR) == length(theta));
verbose && cat(verbose, "Copy-number ratios:");
C <- theta / thetaR;
verbose && str(verbose, C);
# Not needed anymore
theta <- NULL;
# Log ratios?
if (!is.null(logBase)) {
C <- log(C) / log(logBase);
verbose && cat(verbose, "Log copy-number ratios:");
verbose && str(verbose, C);
}
verbose && exit(verbose);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Storing relative CNs
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && enter(verbose, "Updating temporary output file");
# Store data
asb[,1] <- C;
# Not needed anymore
C <- NULL;
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Updating file footer
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (!is.null(ceR)) {
refFile <- list(
dataSet=dataSet,
fullName=getFullName(ceR),
filename=getFilename(ceR),
checksum=getChecksum(ceR)
);
} else {
refFile <- list(thetaR=getChecksum(thetaR));
}
footer <- readFooter(asb);
footer$srcFiles <- list(
srcFile = list(
dataSet=dataSet,
fullName=getFullName(ce),
filename=getFilename(ce),
checksum=getChecksum(ce)
),
refFile = refFile
);
writeFooter(asb, footer);
# Not needed anymore
footer <- refFile <- NULL;
verbose && exit(verbose);
# Renaming temporary file
pathname <- popTemporaryFile(pathnameT, verbose=verbose);
verbose && exit(verbose);
} # for (kk ...)
verbose && enter(verbose, "Setting up output data sets");
pattern <- sprintf("%s.asb", typeTags);
res <- AromaUnitTotalCnBinarySet$byPath(outPath, pattern=pattern);
verbose && exit(verbose);
verbose && exit(verbose);
invisible(res);
}) # exportTotalCnRatioSet()
############################################################################
# HISTORY:
# 2012-08-26
# o Dropped an "rm(thetaR)", because it sometimes caused a warning on
# "In rm(thetaR) : object 'thetaR' not found".
# 2011-11-19
# o BUG FIX: exportTotalCnRatioSet() for AromaUnitTotalCnBinarySet tried
# to call cat(verbose, x) with length(x) > 1.
# 2009-09-24
# o Added more verbose output.
# 2009-06-13
# o BUG FIX: exportTotalCnRatioSet() would return a
# AromaUnitFracBCnBinarySet.
# 2009-05-17
# o BUG FIX: exportTotalCnRatioSet() would return any signal file.
# 2009-02-22
# o Updated exportTotalCnRatioSet() to take log2ratio files as well.
# 2009-02-18
# o Renamed from getTotalCnRatioSet() to exportTotalCnRatioSet().
# o Added support for more complex argument 'ref'.
# 2009-02-16
# o No longer multiplying by 2.
# 2009-02-13
# o TODO: Make use of getAverageFile(), which still does not exist.
# o Created.
############################################################################
| /aroma.core/R/AromaUnitTotalCnBinarySet.exportTotalCnRatioSet.R | no_license | ingted/R-Examples | R | false | false | 9,296 | r | setMethodS3("exportTotalCnRatioSet", "AromaUnitTotalCnBinarySet", function(this, ref="median", ..., logBase=2, tags=NULL, overwrite=FALSE, rootPath="rawCnData", verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'this':
nbrOfFiles <- length(this);
if (nbrOfFiles == 0L) {
throw("Cannot export. ", class(this)[1L], " is empty: ", getFullName(this));
}
# Argument 'ref':
nbrOfUnits <- nbrOfUnits(getOneFile(this));
chipType <- getChipType(this);
if (is.null(ref)) {
throw("Argument 'ref' must not be NULL.");
}
if (inherits(ref, "AromaUnitTotalCnBinaryFile")) {
refList <- rep(list(ref), nbrOfFiles);
refSet <- AromaUnitTotalCnBinarySet(refList);
# Not needed anymore
refList <- NULL;
}
if (inherits(ref, "AromaUnitTotalCnBinarySet")) {
if (getChipType(ref) != chipType) {
throw("Chip type of argument 'ref' does not match the data set: ", getChipType(ref), " != ", chipType);
}
df <- getOneFile(ref);
if (nbrOfUnits(df) != nbrOfUnits) {
throw("Number of units in argument 'ref' does not match the data set: ", nbrOfUnits(ref), " != ", nbrOfUnits);
}
refSet <- ref;
thetaR <- NULL;
} else if (inherits(ref, "numeric")) {
thetaR <- Arguments$getNumeric(ref, length=nbrOfUnits);
refSet <- NULL;
} else if (is.character(ref)) {
refMethod <- match.arg(ref, c("median", "mean"));
refSet <- NULL;
thetaR <- NULL;
}
# Argument 'logBase':
if (!is.null(logBase)) {
logBase <- Arguments$getDouble(logBase, range=c(1,Inf));
}
# Argument 'tags':
tags <- Arguments$getTags(tags, collapse=",");
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
verbose && enter(verbose, "Calculating CN ratios");
dataSet <- getFullName(this);
verbose && cat(verbose, "Data set: ", dataSet);
platform <- getPlatform(this);
verbose && cat(verbose, "Platform: ", platform);
chipType <- getChipType(this);
verbose && cat(verbose, "Chip type: ", chipType);
nbrOfFiles <- length(this);
verbose && cat(verbose, "Number of files: ", nbrOfFiles);
if (!is.null(refSet)) {
verbose && cat(verbose, "Reference set:");
verbose && print(verbose, refSet);
} else {
verbose && str(verbose, "theta[R]: ", thetaR);
}
dataSetOut <- paste(c(dataSet, tags), collapse=",");
verbose && cat(verbose, "Output data set: ", dataSetOut);
chipTypeS <- getChipType(this, fullname=FALSE);
outPath <- file.path(rootPath, dataSetOut, chipTypeS);
outPath <- Arguments$getWritablePath(outPath);
verbose && cat(verbose, "Output path: ", outPath);
if (is.null(logBase)) {
ratioTag <- "ratio";
} else {
ratioTag <- sprintf("log%dratio", logBase);
}
typeTags <- paste(c(ratioTag, "total"), collapse=",");
for (kk in seq_along(this)) {
ce <- this[[kk]];
verbose && enter(verbose, sprintf("File %d ('%s') of %d", kk, getName(ce), nbrOfFiles));
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Setting up output filename
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (!is.null(refSet)) {
ceR <- refSet[[kk]];
refName <- getFullName(ceR);
refName <- gsub(",(total|log2ratio)", "", refName);
refTag <- sprintf("ref=%s", refName);
} else {
ceR <- NULL;
refTag <- NULL;
}
fullname <- getFullName(ce);
fullname <- gsub(",(total|log2ratio)", "", fullname);
fullname <- paste(c(fullname, refTag, typeTags), collapse=",");
filename <- sprintf("%s.asb", fullname);
pathname <- file.path(outPath, filename);
verbose && cat(verbose, "Pathname: ", pathname);
if (!overwrite && isFile(pathname)) {
verbose && cat(verbose, "Nothing to do. File already exists.");
verbose && exit(verbose);
next;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Allocating
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && enter(verbose, "Allocating (temporary) output file");
pathnameT <- pushTemporaryFile(pathname, verbose=verbose);
asb <- AromaUnitSignalBinaryFile$allocate(pathnameT, nbrOfRows=nbrOfUnits(ce), platform=platform, chipType=chipType);
verbose && print(verbose, asb);
verbose && exit(verbose);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Calculating relative CNs
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && enter(verbose, "Reading data from total file");
theta <- extractMatrix(ce, drop=TRUE, verbose=verbose);
# Transform to intensity scale?
if (hasTag(ce, "log2ratio")) {
theta <- 2^theta;
verbose && cat(verbose, "Transformed theta = 2^M");
}
# Sanity check
verbose && str(verbose, theta);
verbose && exit(verbose);
verbose && enter(verbose, "Calculating ratios");
if (!is.null(refSet)) {
thetaR <- extractMatrix(ceR, drop=TRUE, verbose=verbose);
# Transform to intensity scale?
if (hasTag(ceR, "log2ratio")) {
thetaR <- 2^thetaR;
verbose && cat(verbose, "Transformed thetaR = 2^MR");
}
verbose && str(verbose, thetaR);
} else if (is.null(thetaR)) {
verbose && enter(verbose, "Calculating reference signals");
verbose && cat(verbose, "Averaging method: ", refMethod);
# Sanity check?
ce <- getOneFile(this);
if (hasTag(ce, "log2ratio")) {
throw("Cannot estimate reference signals by calculating average across data set. Not implemented for CN ratio data sets.");
}
thetaR <- calculateAverageColumnAcrossFiles(this, method=refMethod,
verbose=less(verbose,5));
verbose && str(verbose, thetaR);
verbose && exit(verbose);
}
# Sanity check
stopifnot(length(thetaR) == length(theta));
verbose && cat(verbose, "Copy-number ratios:");
C <- theta / thetaR;
verbose && str(verbose, C);
# Not needed anymore
theta <- NULL;
# Log ratios?
if (!is.null(logBase)) {
C <- log(C) / log(logBase);
verbose && cat(verbose, "Log copy-number ratios:");
verbose && str(verbose, C);
}
verbose && exit(verbose);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Storing relative CNs
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && enter(verbose, "Updating temporary output file");
# Store data
asb[,1] <- C;
# Not needed anymore
C <- NULL;
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Updating file footer
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if (!is.null(ceR)) {
refFile <- list(
dataSet=dataSet,
fullName=getFullName(ceR),
filename=getFilename(ceR),
checksum=getChecksum(ceR)
);
} else {
refFile <- list(thetaR=getChecksum(thetaR));
}
footer <- readFooter(asb);
footer$srcFiles <- list(
srcFile = list(
dataSet=dataSet,
fullName=getFullName(ce),
filename=getFilename(ce),
checksum=getChecksum(ce)
),
refFile = refFile
);
writeFooter(asb, footer);
# Not needed anymore
footer <- refFile <- NULL;
verbose && exit(verbose);
# Renaming temporary file
pathname <- popTemporaryFile(pathnameT, verbose=verbose);
verbose && exit(verbose);
} # for (kk ...)
verbose && enter(verbose, "Setting up output data sets");
pattern <- sprintf("%s.asb", typeTags);
res <- AromaUnitTotalCnBinarySet$byPath(outPath, pattern=pattern);
verbose && exit(verbose);
verbose && exit(verbose);
invisible(res);
}) # exportTotalCnRatioSet()
############################################################################
# HISTORY:
# 2012-08-26
# o Dropped an "rm(thetaR)", because it sometimes caused a warning on
# "In rm(thetaR) : object 'thetaR' not found".
# 2011-11-19
# o BUG FIX: exportTotalCnRatioSet() for AromaUnitTotalCnBinarySet tried
# to call cat(verbose, x) with length(x) > 1.
# 2009-09-24
# o Added more verbose output.
# 2009-06-13
# o BUG FIX: exportTotalCnRatioSet() would return a
# AromaUnitFracBCnBinarySet.
# 2009-05-17
# o BUG FIX: exportTotalCnRatioSet() would return any signal file.
# 2009-02-22
# o Updated exportTotalCnRatioSet() to take log2ratio files as well.
# 2009-02-18
# o Renamed from getTotalCnRatioSet() to exportTotalCnRatioSet().
# o Added support for more complex argument 'ref'.
# 2009-02-16
# o No longer multiplying by 2.
# 2009-02-13
# o TODO: Make use of getAverageFile(), which still does not exist.
# o Created.
############################################################################
|
library(shiny)
golem::detach_all_attached()
golem::document_and_reload()
ui <- mod_focus_20200320_novara_ui("test_1")
server <- function(input,output,session){
mod_focus_20200320_novara_server("test_1")
}
shinyApp(ui, server)
| /dev/test_mod_focus_20200320_novara.R | permissive | fpirotti/covid19ita | R | false | false | 232 | r | library(shiny)
golem::detach_all_attached()
golem::document_and_reload()
ui <- mod_focus_20200320_novara_ui("test_1")
server <- function(input,output,session){
mod_focus_20200320_novara_server("test_1")
}
shinyApp(ui, server)
|
.SUPCHARS <- c("\u1D43", "\u1D47", "\u1D48", "\u1D49", "\u1DA0", "\u1D4D", "\u02B0", "\u2071",
"\u02B2", "\u1D4F", "\u02E1", "\u1D50", "\u207F", "\u1D52", "\u1D56", "\u02B3", "\u02E2",
"\u1D57", "\u1D58", "\u1D5B", "\u02B7", "\u02E3", "\u02B8", "\u1DBB")
ignore <- function(...) {
if (length(args) > 0)
warning(paste(paste0("Ignoring argument '", names(list(...)),"'"), collapse='\n'))
}
reject <- function(formats, code=NULL, ...) {
message <- format(formats[1], ...)
error <- simpleError(message)
for (name in names(formats)) {
if (name != "") {
message <- format(formats[[name]], ...)
error[[name]] <- message
}
}
error$code <- code
stop(error)
}
.analysisInfoCache <- new.env()
.resultsInfoCache <- new.env()
loadAnalysisInfo <- function(packageName, analysisName) {
name <- paste0(packageName, "::", analysisName)
if (name %in% names(.analysisInfoCache)) {
info <- .analysisInfoCache[[name]]
} else {
location <- system.file("silky", paste0(tolower(analysisName), ".a.yaml"), package=packageName)
info <- yaml::yaml.load_file(location)
.analysisInfoCache[[name]] <- info
}
info
}
loadResultsInfo <- function(packageName, analysisName) {
name <- paste0(packageName, "::", analysisName)
if (name %in% names(.resultsInfoCache)) {
info <- .resultsInfoCache[[name]]
} else {
location <- system.file("silky", paste0(tolower(analysisName), ".r.yaml"), package=packageName)
info <- yaml::yaml.load_file(location)
.resultsInfoCache[[name]] <- info
}
info
}
columnType <- function(column) {
if (inherits(column, "ordered")) {
return("ordinal")
} else if (inherits(column, "factor")) {
return("nominal")
} else {
return("continuous")
}
}
columnTypeRDescription <- function(column) {
if (is.ordered(column)) {
return("an ordered factor")
} else if (is.factor(column)) {
return("a factor")
} else {
return("numeric")
}
}
cap1st <- function(s) {
paste0(toupper(substring(s,1,1)), substring(s, 2))
}
setter <- function(s) {
paste0("set", cap1st(s))
}
formatFromEnv <- function(str, env, context="normal") {
matches <- gregexpr("\\{[a-zA-Z]+\\}", str)[[1]]
if (matches[1] > 0) {
for (i in seq_along(matches)) {
name <- substring(str, matches[i] + 1, matches[i] + attr(matches, "match.length")[i]-2)
if (name %in% names(env))
str <- gsub(paste0("{", name, "}"), stringify(env[[name]], context), str, fixed=TRUE)
}
}
str
}
format <- function(str, ..., context="normal") {
args <- list(...)
if (grepl("{}", str, fixed=TRUE)) {
for (token in args)
str <- sub("{}", stringify(token, context), str, fixed=TRUE)
} else {
if (grepl("\\{[0-9]+\\}", str)) {
i <- 0
for (token in args) {
str <- gsub(paste0("{", i, "}"), stringify(token, context), str, fixed=TRUE)
i <- i + 1
}
}
if (grepl("\\{[a-zA-Z]+\\}", str)) {
for (name in names(args)) {
if (name != "" && is.null(args[[name]]) == FALSE)
str <- gsub(paste0("{", name, "}"), stringify(args[[name]], context), str, fixed=TRUE)
}
}
}
str
}
spaces <- function(n) {
s <- ''
if (n > 0)
s <- paste(rep(' ', n), collapse='')
s
}
dotPos <- function(x) {
floor(log10(x))
}
silkyMeasureElements <- function(elems, sf=3, scl=1e-3, sch=1e7) {
dp <- 0
maxns <- 0 # max non-scientific value
minns <- 0
maxsexp <- 1 # max (abs) scientific exponent
maxsexps <- '+' # max scientific exponent sign
maxsms <- '+' # max scientific mantissa sign
maxstr <- 4
maxsupwidth <- 0
for (elem in elems) {
sups <- integer()
if (inherits(elem, "Cell")) {
sups <- elem$sups
elem <- elem$value
}
if (is.null(elem)) {
maxns <- 4
} else if (is.na(elem)) {
# do nothing
} else if (inherits(elem, "character")) {
maxstr <- max(maxstr, nchar(elem))
}
else if ( ! is.numeric(elem)) {
maxstr <- 2 + nchar(class(elem)[1])
}
else if (elem == 0) {
dp <- max(dp, sf-1)
}
else if (abs(elem) > scl && abs(elem) < sch) {
# non-scientific values
dp <- max(dp, (sf - floor(log10(abs(elem))) - 1))
if (elem > maxns)
maxns <- elem
if (elem < minns)
minns <- elem
} else {
# scientific values
exp <- floor(log10(abs(elem)))
if (abs(exp) > maxsexp) {
maxsexp <- abs(exp)
maxsms <- ifelse(elem >= 0, '+', '-')
}
}
if (length(sups) > 0)
maxsupwidth <- max(maxsupwidth, 1 + length(sups))
}
if (maxns != 0 || minns != 0) {
maxnsw <- max(1, floor(log10(maxns))+1)
minnsw <- max(1, floor(log10(abs(minns)))+1)
if (minns < 0)
minnsw = minnsw + 1 # for minus sign
nswidth <- max(maxnsw, minnsw)
} else {
nswidth <- 1
}
if (dp > 0)
nswidth <- nswidth + 1 + dp # add a decimal point
swidth <- 0
if (maxsexp != 1)
swidth <- (sf+1) + (2 + floor(log10(maxsexp))+1) # +3 is for the '.', 'e' and the +/-
if (maxsms == '-')
swidth <- swidth + 1
width <- max(swidth, nswidth, maxstr)
width <- width + maxsupwidth
list(sf=sf, dp=dp, width=width, expwidth=(2 + floor(log10(maxsexp))+1), supwidth=maxsupwidth)
}
silkyFormatElement <- function(elem, w=NULL, expw=NULL, supw=0, dp=2, sf=3, scl=1e-3, sch=1e7) {
sups <- integer()
supspad <- ''
if (inherits(elem, "Cell")) {
sups <- elem$sups
elem <- elem$value
if (is.null(w) == FALSE)
w <- w - supw
thissupw <- length(sups)
if (thissupw > 0)
thissupw <- thissupw + 1 # add 1 for the space
supspad <- repstr(' ', supw - thissupw)
}
if (is.null(elem)) {
width <- 4
padstr <- spaces(max(w - 4, 0))
str <- paste0("null", padstr)
} else if (is.na(elem)) {
if (is.null(w))
str <- ''
else
str <- repstr(' ', w)
} else if (inherits(elem, "character")) {
width <- nchar(elem)
padstr <- spaces(max(w - width, 0))
str <- paste0(elem, padstr)
} else if ( ! is.numeric(elem)) {
str <- paste0("[", class(elem)[1], "]")
} else if (elem == 0 || (abs(elem) > scl && abs(elem) < sch)) {
# non-scientific values
str <- sprintf(paste0("%", w, ".", dp, "f"), elem)
} else {
# scientific values
exponent <- floor(log10(abs(elem)))
sign <- ifelse(exponent >= 0, '+', '-')
mantissa <- elem / (10^exponent)
exponent <- abs(exponent)
expstr <- base::format(exponent, scientific=FALSE)
exppad <- ''
if ( ! is.null(expw))
exppad <- spaces(expw-nchar(expstr)-2) # 1 for the +/-, 1 for the e
expstr <- paste0('e', exppad, sign, expstr)
if ( ! is.null(w))
manstr <- base::format(mantissa, width=w-nchar(expstr), nsmall=sf-1)
else
manstr <- base::format(mantissa, nsmall=sf-1)
str <- paste0(manstr, expstr)
}
if (length(sups) > 0)
str <- paste0(str, ' ', paste(.SUPCHARS[sups+1], collapse=''))
str <- paste0(str, supspad)
str
}
repstr <- function(value, n, join='') {
if (n > 0)
return(paste(rep(value, n), collapse=join))
else
return('')
}
stringify <- function(value, context="normal") {
if (context == "R") {
if (is.null(value))
return("NULL")
else
return(paste0(value))
} else {
if (is.null(value))
return("null")
else if (identical(value, TRUE))
return("true")
else if (identical(value, FALSE))
return("false")
else
return(paste0(value))
}
}
extractErrorMessage <- function(error) {
split <- base::strsplit(as.character(error), ":")[[1]]
last <- split[[length(split)]]
base::trimws(last)
}
rethrow <- function(error) {
message <- extractErrorMessage(error)
stop(message, call.=FALSE)
}
| /R/utils.R | no_license | dcaunce/silkyR-old | R | false | false | 9,324 | r |
.SUPCHARS <- c("\u1D43", "\u1D47", "\u1D48", "\u1D49", "\u1DA0", "\u1D4D", "\u02B0", "\u2071",
"\u02B2", "\u1D4F", "\u02E1", "\u1D50", "\u207F", "\u1D52", "\u1D56", "\u02B3", "\u02E2",
"\u1D57", "\u1D58", "\u1D5B", "\u02B7", "\u02E3", "\u02B8", "\u1DBB")
ignore <- function(...) {
if (length(args) > 0)
warning(paste(paste0("Ignoring argument '", names(list(...)),"'"), collapse='\n'))
}
reject <- function(formats, code=NULL, ...) {
message <- format(formats[1], ...)
error <- simpleError(message)
for (name in names(formats)) {
if (name != "") {
message <- format(formats[[name]], ...)
error[[name]] <- message
}
}
error$code <- code
stop(error)
}
.analysisInfoCache <- new.env()
.resultsInfoCache <- new.env()
loadAnalysisInfo <- function(packageName, analysisName) {
name <- paste0(packageName, "::", analysisName)
if (name %in% names(.analysisInfoCache)) {
info <- .analysisInfoCache[[name]]
} else {
location <- system.file("silky", paste0(tolower(analysisName), ".a.yaml"), package=packageName)
info <- yaml::yaml.load_file(location)
.analysisInfoCache[[name]] <- info
}
info
}
loadResultsInfo <- function(packageName, analysisName) {
name <- paste0(packageName, "::", analysisName)
if (name %in% names(.resultsInfoCache)) {
info <- .resultsInfoCache[[name]]
} else {
location <- system.file("silky", paste0(tolower(analysisName), ".r.yaml"), package=packageName)
info <- yaml::yaml.load_file(location)
.resultsInfoCache[[name]] <- info
}
info
}
columnType <- function(column) {
if (inherits(column, "ordered")) {
return("ordinal")
} else if (inherits(column, "factor")) {
return("nominal")
} else {
return("continuous")
}
}
columnTypeRDescription <- function(column) {
if (is.ordered(column)) {
return("an ordered factor")
} else if (is.factor(column)) {
return("a factor")
} else {
return("numeric")
}
}
cap1st <- function(s) {
paste0(toupper(substring(s,1,1)), substring(s, 2))
}
setter <- function(s) {
paste0("set", cap1st(s))
}
formatFromEnv <- function(str, env, context="normal") {
matches <- gregexpr("\\{[a-zA-Z]+\\}", str)[[1]]
if (matches[1] > 0) {
for (i in seq_along(matches)) {
name <- substring(str, matches[i] + 1, matches[i] + attr(matches, "match.length")[i]-2)
if (name %in% names(env))
str <- gsub(paste0("{", name, "}"), stringify(env[[name]], context), str, fixed=TRUE)
}
}
str
}
format <- function(str, ..., context="normal") {
args <- list(...)
if (grepl("{}", str, fixed=TRUE)) {
for (token in args)
str <- sub("{}", stringify(token, context), str, fixed=TRUE)
} else {
if (grepl("\\{[0-9]+\\}", str)) {
i <- 0
for (token in args) {
str <- gsub(paste0("{", i, "}"), stringify(token, context), str, fixed=TRUE)
i <- i + 1
}
}
if (grepl("\\{[a-zA-Z]+\\}", str)) {
for (name in names(args)) {
if (name != "" && is.null(args[[name]]) == FALSE)
str <- gsub(paste0("{", name, "}"), stringify(args[[name]], context), str, fixed=TRUE)
}
}
}
str
}
spaces <- function(n) {
s <- ''
if (n > 0)
s <- paste(rep(' ', n), collapse='')
s
}
dotPos <- function(x) {
floor(log10(x))
}
silkyMeasureElements <- function(elems, sf=3, scl=1e-3, sch=1e7) {
dp <- 0
maxns <- 0 # max non-scientific value
minns <- 0
maxsexp <- 1 # max (abs) scientific exponent
maxsexps <- '+' # max scientific exponent sign
maxsms <- '+' # max scientific mantissa sign
maxstr <- 4
maxsupwidth <- 0
for (elem in elems) {
sups <- integer()
if (inherits(elem, "Cell")) {
sups <- elem$sups
elem <- elem$value
}
if (is.null(elem)) {
maxns <- 4
} else if (is.na(elem)) {
# do nothing
} else if (inherits(elem, "character")) {
maxstr <- max(maxstr, nchar(elem))
}
else if ( ! is.numeric(elem)) {
maxstr <- 2 + nchar(class(elem)[1])
}
else if (elem == 0) {
dp <- max(dp, sf-1)
}
else if (abs(elem) > scl && abs(elem) < sch) {
# non-scientific values
dp <- max(dp, (sf - floor(log10(abs(elem))) - 1))
if (elem > maxns)
maxns <- elem
if (elem < minns)
minns <- elem
} else {
# scientific values
exp <- floor(log10(abs(elem)))
if (abs(exp) > maxsexp) {
maxsexp <- abs(exp)
maxsms <- ifelse(elem >= 0, '+', '-')
}
}
if (length(sups) > 0)
maxsupwidth <- max(maxsupwidth, 1 + length(sups))
}
if (maxns != 0 || minns != 0) {
maxnsw <- max(1, floor(log10(maxns))+1)
minnsw <- max(1, floor(log10(abs(minns)))+1)
if (minns < 0)
minnsw = minnsw + 1 # for minus sign
nswidth <- max(maxnsw, minnsw)
} else {
nswidth <- 1
}
if (dp > 0)
nswidth <- nswidth + 1 + dp # add a decimal point
swidth <- 0
if (maxsexp != 1)
swidth <- (sf+1) + (2 + floor(log10(maxsexp))+1) # +3 is for the '.', 'e' and the +/-
if (maxsms == '-')
swidth <- swidth + 1
width <- max(swidth, nswidth, maxstr)
width <- width + maxsupwidth
list(sf=sf, dp=dp, width=width, expwidth=(2 + floor(log10(maxsexp))+1), supwidth=maxsupwidth)
}
silkyFormatElement <- function(elem, w=NULL, expw=NULL, supw=0, dp=2, sf=3, scl=1e-3, sch=1e7) {
sups <- integer()
supspad <- ''
if (inherits(elem, "Cell")) {
sups <- elem$sups
elem <- elem$value
if (is.null(w) == FALSE)
w <- w - supw
thissupw <- length(sups)
if (thissupw > 0)
thissupw <- thissupw + 1 # add 1 for the space
supspad <- repstr(' ', supw - thissupw)
}
if (is.null(elem)) {
width <- 4
padstr <- spaces(max(w - 4, 0))
str <- paste0("null", padstr)
} else if (is.na(elem)) {
if (is.null(w))
str <- ''
else
str <- repstr(' ', w)
} else if (inherits(elem, "character")) {
width <- nchar(elem)
padstr <- spaces(max(w - width, 0))
str <- paste0(elem, padstr)
} else if ( ! is.numeric(elem)) {
str <- paste0("[", class(elem)[1], "]")
} else if (elem == 0 || (abs(elem) > scl && abs(elem) < sch)) {
# non-scientific values
str <- sprintf(paste0("%", w, ".", dp, "f"), elem)
} else {
# scientific values
exponent <- floor(log10(abs(elem)))
sign <- ifelse(exponent >= 0, '+', '-')
mantissa <- elem / (10^exponent)
exponent <- abs(exponent)
expstr <- base::format(exponent, scientific=FALSE)
exppad <- ''
if ( ! is.null(expw))
exppad <- spaces(expw-nchar(expstr)-2) # 1 for the +/-, 1 for the e
expstr <- paste0('e', exppad, sign, expstr)
if ( ! is.null(w))
manstr <- base::format(mantissa, width=w-nchar(expstr), nsmall=sf-1)
else
manstr <- base::format(mantissa, nsmall=sf-1)
str <- paste0(manstr, expstr)
}
if (length(sups) > 0)
str <- paste0(str, ' ', paste(.SUPCHARS[sups+1], collapse=''))
str <- paste0(str, supspad)
str
}
repstr <- function(value, n, join='') {
if (n > 0)
return(paste(rep(value, n), collapse=join))
else
return('')
}
stringify <- function(value, context="normal") {
if (context == "R") {
if (is.null(value))
return("NULL")
else
return(paste0(value))
} else {
if (is.null(value))
return("null")
else if (identical(value, TRUE))
return("true")
else if (identical(value, FALSE))
return("false")
else
return(paste0(value))
}
}
extractErrorMessage <- function(error) {
split <- base::strsplit(as.character(error), ":")[[1]]
last <- split[[length(split)]]
base::trimws(last)
}
rethrow <- function(error) {
message <- extractErrorMessage(error)
stop(message, call.=FALSE)
}
|
##
## Brody growth model
##
## Created by Daniel Rodríguez Pérez on 28/8/2013.
##
## Copyright (c) 2013 Daniel Rodríguez Pérez.
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>
##
#' Brody growth model
#'
#' Computes the Brody growth model and its inverse
#' \deqn{ y(t) = \alpha - (\alpha - w_0) exp(- k t) }{ y(t) = \alpha - (\alpha - w_0) * exp(- k * t) }
#'
#' @param t time
#' @param x size
#' @param alpha upper asymptote
#' @param w0 the value at t = 0
#' @param k growth rate
#'
#' @usage brody(t, alpha, w0, k)
#'
#' @examples
#' growth <- brody(0:10, 10, 5, 0.3)
#'
#' @references
#' M. M. Kaps, W. O. W. Herring, and W. R. W. Lamberson, "Genetic and
#' environmental parameters for traits derived from the Brody growth curve and
#' their relationships with weaning weight in Angus cattle.," Journal of
#' Animal Science, vol. 78, no. 6, pp. 1436-1442, May 2000.
#'
#' @author Daniel Rodriguez
#'
#' @rdname brody
#' @export brody
#' @aliases brody
brody <- function(t, alpha, w0, k) {
result <- alpha - (alpha - w0) * exp(- k * t)
return(result)
}
#' @examples
#' # Calculate inverse function
#' time <- brody.inverse(growth, 10, 5, 0.3)
#'
#' @rdname brody
#' @export brody.inverse
#' @aliases brody.inverse
brody.inverse <- function(x, alpha, w0, k) {
result <- - log((alpha - x) / (alpha - w0)) / k
return(result)
}
| /R/brody.R | no_license | cran/growthmodels | R | false | false | 1,963 | r | ##
## Brody growth model
##
## Created by Daniel Rodríguez Pérez on 28/8/2013.
##
## Copyright (c) 2013 Daniel Rodríguez Pérez.
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>
##
#' Brody growth model
#'
#' Computes the Brody growth model and its inverse
#' \deqn{ y(t) = \alpha - (\alpha - w_0) exp(- k t) }{ y(t) = \alpha - (\alpha - w_0) * exp(- k * t) }
#'
#' @param t time
#' @param x size
#' @param alpha upper asymptote
#' @param w0 the value at t = 0
#' @param k growth rate
#'
#' @usage brody(t, alpha, w0, k)
#'
#' @examples
#' growth <- brody(0:10, 10, 5, 0.3)
#'
#' @references
#' M. M. Kaps, W. O. W. Herring, and W. R. W. Lamberson, "Genetic and
#' environmental parameters for traits derived from the Brody growth curve and
#' their relationships with weaning weight in Angus cattle.," Journal of
#' Animal Science, vol. 78, no. 6, pp. 1436-1442, May 2000.
#'
#' @author Daniel Rodriguez
#'
#' @rdname brody
#' @export brody
#' @aliases brody
brody <- function(t, alpha, w0, k) {
result <- alpha - (alpha - w0) * exp(- k * t)
return(result)
}
#' @examples
#' # Calculate inverse function
#' time <- brody.inverse(growth, 10, 5, 0.3)
#'
#' @rdname brody
#' @export brody.inverse
#' @aliases brody.inverse
brody.inverse <- function(x, alpha, w0, k) {
result <- - log((alpha - x) / (alpha - w0)) / k
return(result)
}
|
## double check whether direction of con_prob is correct
##########################
##########################
#' Create a semantic network based on the co-occurence of tokens in documents
#'
#' @description
#' This function calculates the co-occurence of features and returns a network/graph in the igraph format, where nodes are tokens and edges represent the similarity/adjacency of tokens. Co-occurence is calcuated based on how often two tokens occured within the same document (e.g., news article, chapter, paragraph, sentence). The semnet_window() function can be used to calculate co-occurrence of tokens within a given token distance.
#'
#' @param tc a tCorpus or a featureHits object (i.e. the result of search_features)
#' @param feature The name of the feature column
#' @param measure The similarity measure. Currently supports: "con_prob" (conditional probability), "con_prob_weighted", "cosine" similarity, "count_directed" (i.e number of cooccurrences) and "count_undirected" (same as count_directed, but returned as an undirected network, chi2 (chi-square score))
#' @param context_level Determine whether features need to co-occurr within "documents" or "sentences"
#' @param backbone If True, add an edge attribute for the backbone alpha
#' @param n.batches If a number, perform the calculation in batches
#'
#' @return an Igraph graph in which nodes are features and edges are similarity scores
#' @export
#' @examples
#' text = c('A B C', 'D E F. G H I', 'A D', 'GGG')
#' tc = create_tcorpus(text, doc_id = c('a','b','c','d'), split_sentences = TRUE)
#'
#' g = semnet(tc, 'token')
#' g
#' igraph::get.data.frame(g)
#' \donttest{plot_semnet(g)}
semnet <- function(tc, feature='token', measure=c('con_prob', 'con_prob_weighted', 'cosine', 'count_directed', 'count_undirected', 'chi2'), context_level=c('document','sentence'), backbone=F, n.batches=NA){
alpha = 2
measure = match.arg(measure)
context_level = match.arg(context_level)
if (!methods::is(tc, 'tCorpus') && !methods::is(tc, 'featureHits') && !methods::is(tc, 'contextHits')) stop('tc has to be a tCorpus, featureHits or contextHits object')
if (methods::is(tc, 'featureHits') || methods::is(tc, 'contextHits')) {
sentence_col = if (anyNA(tc$hits$sentence)) NULL else 'sentence'
hits = tc$hits
if (methods::is(tc, 'contextHits')) {
hits$hit_id = 1:nrow(hits)
hits$token_id = 1:nrow(hits) ## doesn't matter for document/sentence level semnet
}
if (context_level == 'sentence') {
hits = unique(hits, by=c('code','sentence','hit_id'))
} else {
hits = unique(hits, by=c('code', 'hit_id'))
}
tc = tokens_to_tcorpus(hits, doc_col = 'doc_id', sentence_col=NULL, token_id_col = 'token_id')
feature = 'code'
}
is_tcorpus(tc)
feature = match.arg(feature, tc$feature_names)
g = create_semnet(tc, feature, measure=measure, matrix_mode='dtm', context_level=context_level, n.batches=n.batches, alpha=alpha)
if (backbone) igraph::E(g)$alpha = backbone_alpha(g)
g$measure = measure
class(g) = c('semnet', 'dtm_cooc', measure, class(g))
g
}
# A sliding window approach to calculate the co-occurence of tokens
#' Create a semantic network based on the co-occurence of tokens in token windows
#'
#' @description
#' This function calculates the co-occurence of features and returns a network/graph
#' in the igraph format, where nodes are tokens and edges represent the similarity/adjacency of tokens.
#' Co-occurence is calcuated based on how often two tokens co-occurr within a given token distance.
#'
#' If a featureHits object is given as input, then for for query hits that have multiple positions (i.e. terms
#' connected with AND statements or word proximity) the raw count score is biased. For the count_* measures
#' therefore only the first position of the query hit is used.
#'
#' @param tc a tCorpus or a featureHits object (i.e. the result of search_features)
#' @param feature The name of the feature column
#' @param measure The similarity measure. Currently supports: "con_prob" (conditional probability),
#' "cosine" similarity, "count_directed" (i.e number of cooccurrences) and "count_undirected"
#' (same as count_directed, but returned as an undirected network, chi2 (chi-square score))
#' @param context_level Determine whether features need to co-occurr within "documents" or "sentences"
#' @param window.size The token distance within which features are considered to co-occurr
#' @param direction Determine whether co-occurrence is assymmetricsl ("<>") or takes the order of tokens
#' into account. If direction is '<', then the from/x feature needs to occur before the
#' to/y feature. If direction is '>', then after.
#' @param backbone If True, add an edge attribute for the backbone alpha
#' @param n.batches To limit memory use the calculation is divided into batches. This parameter controls
#' the number of batches.
#' @param matrix_mode There are two approaches for calculating window co-occurrence (see details). By
#' default we use positionXmatrix, but matrixXmatrix is optional because it might
#' be favourable for some uses, and might make more sense for cosine similarity.
#'
#' @details
#' There are two approaches for calculating window co-occurrence.
#' One is to measure how often a feature occurs within a given token window, which
#' can be calculating by calculating the inner product of a matrix that contains the
#' exact position of features and a matrix that contains the occurrence window.
#' We refer to this as the "positionXwindow" mode. Alternatively, we can measure how
#' much the windows of features overlap, for which take the inner product of two window
#' matrices, which we call the "windowXwindow" mode. The positionXwindow approach has the advantage
#' of being easy to interpret (e.g. how likely is feature "Y" to occurr within 10
#' tokens from feature "X"?). The windowXwindow mode, on the other hand, has the interesting
#' feature that similarity is stronger if tokens co-occurr more closely together
#' (since then their windows overlap more), but this only works well for similarity measures that
#' normalize the similarity (e.g., cosine). Currently, we only use the positionXwindow mode,
#' but windowXwindow could be interesting to use as well, and for cosine it might actually make more
#' sense.
#'
#' @return an Igraph graph in which nodes are features and edges are similarity scores
#' @export
#' @examples
#' text = c('A B C', 'D E F. G H I', 'A D', 'GGG')
#' tc = create_tcorpus(text, doc_id = c('a','b','c','d'), split_sentences = TRUE)
#'
#' g = semnet_window(tc, 'token', window.size = 1)
#' g
#' igraph::get.data.frame(g)
#' \donttest{plot_semnet(g)}
semnet_window <- function(tc, feature='token', measure=c('con_prob', 'cosine', 'count_directed', 'count_undirected', 'chi2'), context_level=c('document','sentence'), window.size=10, direction='<>', backbone=F, n.batches=5, matrix_mode=c('positionXwindow', 'windowXwindow')){
measure = match.arg(measure)
context_level = match.arg(context_level)
matrix_mode = match.arg(matrix_mode)
if (!methods::is(tc, 'tCorpus') && !methods::is(tc, 'featureHits')) stop('tc has to be a tCorpus or featureHits object')
if (methods::is(tc, 'featureHits')) {
sentence_col = if (anyNA(tc$hits$sentence)) NULL else 'sentence'
hits = tc$hits
if (measure %in% c('count_directed','count_undirected')) hits = hits[!duplicated(hits[,c('code','hit_id')])]
tc = tokens_to_tcorpus(hits, doc_col = 'doc_id', sentence_col=NULL, token_id_col = 'token_id')
feature = 'code'
}
is_tcorpus(tc)
feature = match.arg(feature, tc$feature_names)
#set_matrix_mode = match.arg(set_matrix_mode)
#if (measure %in% c('cosine')) matrix_mode = 'windowXwindow'
#if (measure %in% c('con_prob','count_directed','count_undirected','chi2')) matrix_mode='positionXwindow'
#if (!is.na(set_matrix_mode)) matrix_mode = set_matrix_mode
## developer note: might be other interesting combinations. Should we allow the user to specifiy the matrix_mode manually, or decide for them which to use? I'm currently thinking of the latter, but with the set_matrix_mode paramter to override it if wanted
if (!direction == '<>'){
if (measure %in% c('cosine','count_undirected')) stop('cannot use assymetrical window with undirected similarity measures')
if (matrix_mode == 'windowXwindow') stop('cannot use assymetrical window with matrix_mode == windowXwindow')
}
g = create_semnet(tc, feature, measure=measure, matrix_mode=matrix_mode, context_level=context_level, window.size=window.size, direction='<>', n.batches=n.batches)
if (backbone) igraph::E(g)$alpha = backbone_alpha(g)
g$measure = measure
class(g) = c('semnet', 'window_cooc', measure, class(g))
g
}
create_semnet <- function(tc, feature, measure, matrix_mode, context_level, direction, window.size, n.batches, alpha){
if (measure == 'cosine') {
ml = feature_cooccurrence(tc, feature, matrix_mode=matrix_mode, count_mode='normal', mat_stats=c('sum.x','count.x','magnitude.x','magnitude.y'), context_level=context_level, direction=direction, window.size=window.size, n.batches=n.batches, alpha=alpha)
ml$mat@x = ml$mat@x / (ml$magnitude.x[ml$mat@i+1] * ml$magnitude.y[ml$mat@j+1])
g = igraph::graph.adjacency(squarify_matrix(ml$mat), mode = 'upper', diag = F, weighted = T)
}
if (measure == 'con_prob') {
ml = feature_cooccurrence(tc, feature, matrix_mode=matrix_mode, count_mode='dicho', mat_stats=c('sum.x'), context_level=context_level, direction=direction, window.size=window.size, n.batches=n.batches, alpha=alpha)
ml$mat = ml$mat / ml$sum.x
g = igraph::graph.adjacency(squarify_matrix(ml$mat), mode = 'directed', diag = F, weighted = T)
}
if (measure == 'con_prob_weighted') {
ml = feature_cooccurrence(tc, feature, matrix_mode=matrix_mode, count_mode='prob', mat_stats=c('sum.x'), context_level=context_level, direction=direction, window.size=window.size, n.batches=n.batches, alpha=alpha)
ml$mat = ml$mat / ml$sum.x
g = igraph::graph.adjacency(squarify_matrix(ml$mat), mode = 'directed', diag = F, weighted = T)
}
if (measure == 'count_directed') {
ml = feature_cooccurrence(tc, feature, matrix_mode=matrix_mode, count_mode='dicho', mat_stats=c(), context_level=context_level, direction=direction, window.size=window.size, n.batches=n.batches, alpha=alpha)
g = igraph::graph.adjacency(squarify_matrix(ml$mat), mode = 'directed', diag = F, weighted = T)
}
if (measure == 'count_undirected') {
ml = feature_cooccurrence(tc, feature, matrix_mode=matrix_mode, count_mode='dicho', mat_stats=c(), context_level=context_level, direction=direction, window.size=window.size, n.batches=n.batches, alpha=alpha)
g = igraph::graph.adjacency(squarify_matrix(ml$mat), mode = 'upper', diag = F, weighted = T)
}
if (measure == 'chi2'){
## add sign and/or ratio
ml = feature_cooccurrence(tc, feature, matrix_mode=matrix_mode, count_mode='dicho', mat_stats=c('sum.x','sum.y','nrow'), context_level=context_level, direction=direction, window.size=window.size, n.batches=n.batches, alpha=alpha)
xtab = data.frame(a = ml$mat@x, # x=1, y=1
b = ml$sum.x[ml$mat@i+1] - ml$mat@x, # x=0, y=1
c = ml$sum.y[ml$mat@j+1] - ml$mat@x) # x=1, y=0
xtab$d = ml$nrow - ((xtab$b + xtab$c) - xtab$a) # x=0, y=0
ml$mat@x = calc_chi2(xtab$a, xtab$b, xtab$c, xtab$d, correct=T) ## replace sparse matrix values with chi2
g = igraph::graph.adjacency(squarify_matrix(ml$mat), mode = 'directed', diag = F, weighted = T)
}
## match frequencies (and if available document frequencies)
match_i = match(igraph::V(g)$name, names(ml$freq))
igraph::V(g)$freq = ml$freq[match_i]
if ('doc_freq' %in% ml) igraph::V(g)$doc_freq = ml$doc_freq[match_i]
g
}
DocumentTermMatrix_to_dgTMatrix <- function(dtm){
sm = spMatrix(nrow(dtm), ncol(dtm), dtm$i, dtm$j, dtm$v)
rownames(sm) = rownames(dtm)
colnames(sm) = colnames(dtm)
methods::as(sm, 'dgTMatrix')
}
| /R/semnet.r | no_license | fcfilho/corpustools | R | false | false | 12,204 | r | ## double check whether direction of con_prob is correct
##########################
##########################
#' Create a semantic network based on the co-occurence of tokens in documents
#'
#' @description
#' This function calculates the co-occurence of features and returns a network/graph in the igraph format, where nodes are tokens and edges represent the similarity/adjacency of tokens. Co-occurence is calcuated based on how often two tokens occured within the same document (e.g., news article, chapter, paragraph, sentence). The semnet_window() function can be used to calculate co-occurrence of tokens within a given token distance.
#'
#' @param tc a tCorpus or a featureHits object (i.e. the result of search_features)
#' @param feature The name of the feature column
#' @param measure The similarity measure. Currently supports: "con_prob" (conditional probability), "con_prob_weighted", "cosine" similarity, "count_directed" (i.e number of cooccurrences) and "count_undirected" (same as count_directed, but returned as an undirected network, chi2 (chi-square score))
#' @param context_level Determine whether features need to co-occurr within "documents" or "sentences"
#' @param backbone If True, add an edge attribute for the backbone alpha
#' @param n.batches If a number, perform the calculation in batches
#'
#' @return an Igraph graph in which nodes are features and edges are similarity scores
#' @export
#' @examples
#' text = c('A B C', 'D E F. G H I', 'A D', 'GGG')
#' tc = create_tcorpus(text, doc_id = c('a','b','c','d'), split_sentences = TRUE)
#'
#' g = semnet(tc, 'token')
#' g
#' igraph::get.data.frame(g)
#' \donttest{plot_semnet(g)}
semnet <- function(tc, feature='token', measure=c('con_prob', 'con_prob_weighted', 'cosine', 'count_directed', 'count_undirected', 'chi2'), context_level=c('document','sentence'), backbone=F, n.batches=NA){
alpha = 2
measure = match.arg(measure)
context_level = match.arg(context_level)
if (!methods::is(tc, 'tCorpus') && !methods::is(tc, 'featureHits') && !methods::is(tc, 'contextHits')) stop('tc has to be a tCorpus, featureHits or contextHits object')
if (methods::is(tc, 'featureHits') || methods::is(tc, 'contextHits')) {
sentence_col = if (anyNA(tc$hits$sentence)) NULL else 'sentence'
hits = tc$hits
if (methods::is(tc, 'contextHits')) {
hits$hit_id = 1:nrow(hits)
hits$token_id = 1:nrow(hits) ## doesn't matter for document/sentence level semnet
}
if (context_level == 'sentence') {
hits = unique(hits, by=c('code','sentence','hit_id'))
} else {
hits = unique(hits, by=c('code', 'hit_id'))
}
tc = tokens_to_tcorpus(hits, doc_col = 'doc_id', sentence_col=NULL, token_id_col = 'token_id')
feature = 'code'
}
is_tcorpus(tc)
feature = match.arg(feature, tc$feature_names)
g = create_semnet(tc, feature, measure=measure, matrix_mode='dtm', context_level=context_level, n.batches=n.batches, alpha=alpha)
if (backbone) igraph::E(g)$alpha = backbone_alpha(g)
g$measure = measure
class(g) = c('semnet', 'dtm_cooc', measure, class(g))
g
}
# A sliding window approach to calculate the co-occurence of tokens
#' Create a semantic network based on the co-occurence of tokens in token windows
#'
#' @description
#' This function calculates the co-occurence of features and returns a network/graph
#' in the igraph format, where nodes are tokens and edges represent the similarity/adjacency of tokens.
#' Co-occurence is calcuated based on how often two tokens co-occurr within a given token distance.
#'
#' If a featureHits object is given as input, then for for query hits that have multiple positions (i.e. terms
#' connected with AND statements or word proximity) the raw count score is biased. For the count_* measures
#' therefore only the first position of the query hit is used.
#'
#' @param tc a tCorpus or a featureHits object (i.e. the result of search_features)
#' @param feature The name of the feature column
#' @param measure The similarity measure. Currently supports: "con_prob" (conditional probability),
#' "cosine" similarity, "count_directed" (i.e number of cooccurrences) and "count_undirected"
#' (same as count_directed, but returned as an undirected network, chi2 (chi-square score))
#' @param context_level Determine whether features need to co-occurr within "documents" or "sentences"
#' @param window.size The token distance within which features are considered to co-occurr
#' @param direction Determine whether co-occurrence is assymmetricsl ("<>") or takes the order of tokens
#' into account. If direction is '<', then the from/x feature needs to occur before the
#' to/y feature. If direction is '>', then after.
#' @param backbone If True, add an edge attribute for the backbone alpha
#' @param n.batches To limit memory use the calculation is divided into batches. This parameter controls
#' the number of batches.
#' @param matrix_mode There are two approaches for calculating window co-occurrence (see details). By
#' default we use positionXmatrix, but matrixXmatrix is optional because it might
#' be favourable for some uses, and might make more sense for cosine similarity.
#'
#' @details
#' There are two approaches for calculating window co-occurrence.
#' One is to measure how often a feature occurs within a given token window, which
#' can be calculating by calculating the inner product of a matrix that contains the
#' exact position of features and a matrix that contains the occurrence window.
#' We refer to this as the "positionXwindow" mode. Alternatively, we can measure how
#' much the windows of features overlap, for which take the inner product of two window
#' matrices, which we call the "windowXwindow" mode. The positionXwindow approach has the advantage
#' of being easy to interpret (e.g. how likely is feature "Y" to occurr within 10
#' tokens from feature "X"?). The windowXwindow mode, on the other hand, has the interesting
#' feature that similarity is stronger if tokens co-occurr more closely together
#' (since then their windows overlap more), but this only works well for similarity measures that
#' normalize the similarity (e.g., cosine). Currently, we only use the positionXwindow mode,
#' but windowXwindow could be interesting to use as well, and for cosine it might actually make more
#' sense.
#'
#' @return an Igraph graph in which nodes are features and edges are similarity scores
#' @export
#' @examples
#' text = c('A B C', 'D E F. G H I', 'A D', 'GGG')
#' tc = create_tcorpus(text, doc_id = c('a','b','c','d'), split_sentences = TRUE)
#'
#' g = semnet_window(tc, 'token', window.size = 1)
#' g
#' igraph::get.data.frame(g)
#' \donttest{plot_semnet(g)}
semnet_window <- function(tc, feature='token', measure=c('con_prob', 'cosine', 'count_directed', 'count_undirected', 'chi2'), context_level=c('document','sentence'), window.size=10, direction='<>', backbone=F, n.batches=5, matrix_mode=c('positionXwindow', 'windowXwindow')){
measure = match.arg(measure)
context_level = match.arg(context_level)
matrix_mode = match.arg(matrix_mode)
if (!methods::is(tc, 'tCorpus') && !methods::is(tc, 'featureHits')) stop('tc has to be a tCorpus or featureHits object')
if (methods::is(tc, 'featureHits')) {
sentence_col = if (anyNA(tc$hits$sentence)) NULL else 'sentence'
hits = tc$hits
if (measure %in% c('count_directed','count_undirected')) hits = hits[!duplicated(hits[,c('code','hit_id')])]
tc = tokens_to_tcorpus(hits, doc_col = 'doc_id', sentence_col=NULL, token_id_col = 'token_id')
feature = 'code'
}
is_tcorpus(tc)
feature = match.arg(feature, tc$feature_names)
#set_matrix_mode = match.arg(set_matrix_mode)
#if (measure %in% c('cosine')) matrix_mode = 'windowXwindow'
#if (measure %in% c('con_prob','count_directed','count_undirected','chi2')) matrix_mode='positionXwindow'
#if (!is.na(set_matrix_mode)) matrix_mode = set_matrix_mode
## developer note: might be other interesting combinations. Should we allow the user to specifiy the matrix_mode manually, or decide for them which to use? I'm currently thinking of the latter, but with the set_matrix_mode paramter to override it if wanted
if (!direction == '<>'){
if (measure %in% c('cosine','count_undirected')) stop('cannot use assymetrical window with undirected similarity measures')
if (matrix_mode == 'windowXwindow') stop('cannot use assymetrical window with matrix_mode == windowXwindow')
}
g = create_semnet(tc, feature, measure=measure, matrix_mode=matrix_mode, context_level=context_level, window.size=window.size, direction='<>', n.batches=n.batches)
if (backbone) igraph::E(g)$alpha = backbone_alpha(g)
g$measure = measure
class(g) = c('semnet', 'window_cooc', measure, class(g))
g
}
create_semnet <- function(tc, feature, measure, matrix_mode, context_level, direction, window.size, n.batches, alpha){
if (measure == 'cosine') {
ml = feature_cooccurrence(tc, feature, matrix_mode=matrix_mode, count_mode='normal', mat_stats=c('sum.x','count.x','magnitude.x','magnitude.y'), context_level=context_level, direction=direction, window.size=window.size, n.batches=n.batches, alpha=alpha)
ml$mat@x = ml$mat@x / (ml$magnitude.x[ml$mat@i+1] * ml$magnitude.y[ml$mat@j+1])
g = igraph::graph.adjacency(squarify_matrix(ml$mat), mode = 'upper', diag = F, weighted = T)
}
if (measure == 'con_prob') {
ml = feature_cooccurrence(tc, feature, matrix_mode=matrix_mode, count_mode='dicho', mat_stats=c('sum.x'), context_level=context_level, direction=direction, window.size=window.size, n.batches=n.batches, alpha=alpha)
ml$mat = ml$mat / ml$sum.x
g = igraph::graph.adjacency(squarify_matrix(ml$mat), mode = 'directed', diag = F, weighted = T)
}
if (measure == 'con_prob_weighted') {
ml = feature_cooccurrence(tc, feature, matrix_mode=matrix_mode, count_mode='prob', mat_stats=c('sum.x'), context_level=context_level, direction=direction, window.size=window.size, n.batches=n.batches, alpha=alpha)
ml$mat = ml$mat / ml$sum.x
g = igraph::graph.adjacency(squarify_matrix(ml$mat), mode = 'directed', diag = F, weighted = T)
}
if (measure == 'count_directed') {
ml = feature_cooccurrence(tc, feature, matrix_mode=matrix_mode, count_mode='dicho', mat_stats=c(), context_level=context_level, direction=direction, window.size=window.size, n.batches=n.batches, alpha=alpha)
g = igraph::graph.adjacency(squarify_matrix(ml$mat), mode = 'directed', diag = F, weighted = T)
}
if (measure == 'count_undirected') {
ml = feature_cooccurrence(tc, feature, matrix_mode=matrix_mode, count_mode='dicho', mat_stats=c(), context_level=context_level, direction=direction, window.size=window.size, n.batches=n.batches, alpha=alpha)
g = igraph::graph.adjacency(squarify_matrix(ml$mat), mode = 'upper', diag = F, weighted = T)
}
if (measure == 'chi2'){
## add sign and/or ratio
ml = feature_cooccurrence(tc, feature, matrix_mode=matrix_mode, count_mode='dicho', mat_stats=c('sum.x','sum.y','nrow'), context_level=context_level, direction=direction, window.size=window.size, n.batches=n.batches, alpha=alpha)
xtab = data.frame(a = ml$mat@x, # x=1, y=1
b = ml$sum.x[ml$mat@i+1] - ml$mat@x, # x=0, y=1
c = ml$sum.y[ml$mat@j+1] - ml$mat@x) # x=1, y=0
xtab$d = ml$nrow - ((xtab$b + xtab$c) - xtab$a) # x=0, y=0
ml$mat@x = calc_chi2(xtab$a, xtab$b, xtab$c, xtab$d, correct=T) ## replace sparse matrix values with chi2
g = igraph::graph.adjacency(squarify_matrix(ml$mat), mode = 'directed', diag = F, weighted = T)
}
## match frequencies (and if available document frequencies)
match_i = match(igraph::V(g)$name, names(ml$freq))
igraph::V(g)$freq = ml$freq[match_i]
if ('doc_freq' %in% ml) igraph::V(g)$doc_freq = ml$doc_freq[match_i]
g
}
DocumentTermMatrix_to_dgTMatrix <- function(dtm){
sm = spMatrix(nrow(dtm), ncol(dtm), dtm$i, dtm$j, dtm$v)
rownames(sm) = rownames(dtm)
colnames(sm) = colnames(dtm)
methods::as(sm, 'dgTMatrix')
}
|
\name{residuals.modgam}
\alias{residuals.modgam}
\title{
Residuals of the \code{\link{modgam}} Object}
\description{
This function provides residuals of a modgam object produced by \code{\link{modgam}}.
}
\usage{
\method{residuals}{modgam} (object, \dots)
}
\arguments{
\item{object}{
a modgam object.
}
\item{\dots}{
extra arguments for S3 generic, ignored by \code{residuals.modgam}.
}
}
\author{
Lu Bai
Send bug reports to \email{sbartell@uci.edu}.
}
\seealso{
\code{\link{modgam}}
\code{\link{gamcox}},
\code{\link{predict.gamcox}}.
}
\keyword{misc}
\keyword{smooth}
| /man/residuals.modgam.Rd | no_license | cran/MapGAM | R | false | false | 612 | rd | \name{residuals.modgam}
\alias{residuals.modgam}
\title{
Residuals of the \code{\link{modgam}} Object}
\description{
This function provides residuals of a modgam object produced by \code{\link{modgam}}.
}
\usage{
\method{residuals}{modgam} (object, \dots)
}
\arguments{
\item{object}{
a modgam object.
}
\item{\dots}{
extra arguments for S3 generic, ignored by \code{residuals.modgam}.
}
}
\author{
Lu Bai
Send bug reports to \email{sbartell@uci.edu}.
}
\seealso{
\code{\link{modgam}}
\code{\link{gamcox}},
\code{\link{predict.gamcox}}.
}
\keyword{misc}
\keyword{smooth}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PISecurityMappingLinks.r
\name{PISecurityMappingLinks}
\alias{PISecurityMappingLinks}
\title{Generate an instance of the PISecurityMappingLinks PI Web API class}
\usage{
PISecurityMappingLinks(self = NULL, assetServer = NULL,
securityIdentity = NULL, security = NULL, securityEntries = NULL)
}
\arguments{
\item{self}{(string)}
\item{assetServer}{(string)}
\item{securityIdentity}{(string)}
\item{security}{(string)}
\item{securityEntries}{(string)}
}
\value{
PISecurityMappingLinks
}
\description{
Generate an instance of the PISecurityMappingLinks PI Web API class
}
\examples{
securityMappingLinks <- PISecurityMappingLinks(self = "", assetServer = "",
securityIdentity = "", security = "", securityEntries = "")
}
| /man/PISecurityMappingLinks.Rd | permissive | eddyrene/PI-Web-API-Client-R | R | false | true | 864 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PISecurityMappingLinks.r
\name{PISecurityMappingLinks}
\alias{PISecurityMappingLinks}
\title{Generate an instance of the PISecurityMappingLinks PI Web API class}
\usage{
PISecurityMappingLinks(self = NULL, assetServer = NULL,
securityIdentity = NULL, security = NULL, securityEntries = NULL)
}
\arguments{
\item{self}{(string)}
\item{assetServer}{(string)}
\item{securityIdentity}{(string)}
\item{security}{(string)}
\item{securityEntries}{(string)}
}
\value{
PISecurityMappingLinks
}
\description{
Generate an instance of the PISecurityMappingLinks PI Web API class
}
\examples{
securityMappingLinks <- PISecurityMappingLinks(self = "", assetServer = "",
securityIdentity = "", security = "", securityEntries = "")
}
|
setwd("~/")
getwd()
png("/Users/macintosh/Desktop/fourtime vs PdDistrict in each newCategory.png",width=1200,height=900)
# draw plot #
dev.off()
#-----#
setwd("~/")
getwd()
dw_fp=ggplot(MeltDD,aes(x=fourtime,y=Probability,color=PdDistrict,group=PdDistrict)) +
geom_line() +
geom_point() +
facet_grid(variable~., scales="free") +
labs(title="fourtime vs PdDistrict in each newCategory") +
theme(plot.title = element_text(size=18, face="bold.italic"),
axis.text = element_text(size=12, color="navy"),
axis.title.x = element_text(size=16, face="bold"),
axis.title.y = element_text(size=16, face="bold"),
legend.title = element_text(size=12),
legend.text = element_text(size=12))
dw_fp
png("fourtime vs PdDistrict in each newCategory.png")
dev.off()
#----#
DDframe=data.frame(fourtime=rep(c("twilight","morning","afternoon","night"),each=219495),
PdDistrict=rep(c("BAYVIEW","INGLESIDE","CENTRAL","NORTHERN","MISSION",
"RICHMOND","SOUTHERN","PARK","TENDERLOIN","TARAVAL"),87798))
PredDD=cbind(DDframe,predict(fitfourtime,newdata=DDframe,type="probs",se=TRUE))
MeltDD=melt(PredDD,id.vars=c("fourtime","PdDistrict"),value.name="Probability")
MeltDD$fourtime=factor(MeltDD$fourtime,levels=c("twilight","morning","afternoon","night"))
dw_fp=ggplot(MeltDD,aes(x=fourtime,y=Probability,color=PdDistrict,group=PdDistrict)) +
geom_line() +
geom_point() +
facet_grid(variable~., scales="free") +
labs(title="fourtime vs PdDistrict in each newCategory") +
theme(plot.title = element_text(size=18, face="bold.italic"),
axis.text = element_text(size=14, color="navy"),
axis.title.x = element_text(size=16, face="bold"),
axis.title.y = element_text(size=16, face="bold"),
legend.title = element_text(size=12),
legend.text = element_text(size=12))
ggsave("fourtime vs PdDistrict in each newCategory.png",dw_fp,path="/Users/macintosh/Desktop")
###
DDframe=data.frame(fourtime=rep(c("twilight","morning","afternoon","night"),each=219495),
PdDistrict=rep(c("BAYVIEW","INGLESIDE","CENTRAL","NORTHERN","MISSION","RICHMOND","SOUTHERN","PARK","TENDERLOIN","TARAVAL"),87798))
PredDD=cbind(DDframe,predict(top10_fourtime,newdata=DDframe,type="probs",se=TRUE))
MeltDD=melt(PredDD,id.vars=c("fourtime","PdDistrict"),value.name="Probability")
MeltDD$fourtime=factor(MeltDD$fourtime,levels=c("twilight","morning","afternoon","night"))
tpf=ggplot(MeltDD,aes(x=fourtime,y=Probability,color=PdDistrict,group=PdDistrict)) +
geom_line() +
geom_point() +
facet_grid(variable~., scales="free") +
labs(title="fourtime vs PdDistrict in each Category") +
theme(plot.title = element_text(size=18, face="bold.italic"),
axis.text = element_text(size=10, color="navy"),
axis.title.x = element_text(size=16, face="bold"),
axis.title.y = element_text(size=16, face="bold"),
legend.title = element_text(size=12),
legend.text = element_text(size=12))
ggsave("fourtime vs PdDistrict in each Category.png",tpf,path="/Users/macintosh/Desktop")
| /parallel set/plot.R | no_license | lulukuo530/SF-Crime | R | false | false | 3,124 | r | setwd("~/")
getwd()
png("/Users/macintosh/Desktop/fourtime vs PdDistrict in each newCategory.png",width=1200,height=900)
# draw plot #
dev.off()
#-----#
setwd("~/")
getwd()
dw_fp=ggplot(MeltDD,aes(x=fourtime,y=Probability,color=PdDistrict,group=PdDistrict)) +
geom_line() +
geom_point() +
facet_grid(variable~., scales="free") +
labs(title="fourtime vs PdDistrict in each newCategory") +
theme(plot.title = element_text(size=18, face="bold.italic"),
axis.text = element_text(size=12, color="navy"),
axis.title.x = element_text(size=16, face="bold"),
axis.title.y = element_text(size=16, face="bold"),
legend.title = element_text(size=12),
legend.text = element_text(size=12))
dw_fp
png("fourtime vs PdDistrict in each newCategory.png")
dev.off()
#----#
DDframe=data.frame(fourtime=rep(c("twilight","morning","afternoon","night"),each=219495),
PdDistrict=rep(c("BAYVIEW","INGLESIDE","CENTRAL","NORTHERN","MISSION",
"RICHMOND","SOUTHERN","PARK","TENDERLOIN","TARAVAL"),87798))
PredDD=cbind(DDframe,predict(fitfourtime,newdata=DDframe,type="probs",se=TRUE))
MeltDD=melt(PredDD,id.vars=c("fourtime","PdDistrict"),value.name="Probability")
MeltDD$fourtime=factor(MeltDD$fourtime,levels=c("twilight","morning","afternoon","night"))
dw_fp=ggplot(MeltDD,aes(x=fourtime,y=Probability,color=PdDistrict,group=PdDistrict)) +
geom_line() +
geom_point() +
facet_grid(variable~., scales="free") +
labs(title="fourtime vs PdDistrict in each newCategory") +
theme(plot.title = element_text(size=18, face="bold.italic"),
axis.text = element_text(size=14, color="navy"),
axis.title.x = element_text(size=16, face="bold"),
axis.title.y = element_text(size=16, face="bold"),
legend.title = element_text(size=12),
legend.text = element_text(size=12))
ggsave("fourtime vs PdDistrict in each newCategory.png",dw_fp,path="/Users/macintosh/Desktop")
###
DDframe=data.frame(fourtime=rep(c("twilight","morning","afternoon","night"),each=219495),
PdDistrict=rep(c("BAYVIEW","INGLESIDE","CENTRAL","NORTHERN","MISSION","RICHMOND","SOUTHERN","PARK","TENDERLOIN","TARAVAL"),87798))
PredDD=cbind(DDframe,predict(top10_fourtime,newdata=DDframe,type="probs",se=TRUE))
MeltDD=melt(PredDD,id.vars=c("fourtime","PdDistrict"),value.name="Probability")
MeltDD$fourtime=factor(MeltDD$fourtime,levels=c("twilight","morning","afternoon","night"))
tpf=ggplot(MeltDD,aes(x=fourtime,y=Probability,color=PdDistrict,group=PdDistrict)) +
geom_line() +
geom_point() +
facet_grid(variable~., scales="free") +
labs(title="fourtime vs PdDistrict in each Category") +
theme(plot.title = element_text(size=18, face="bold.italic"),
axis.text = element_text(size=10, color="navy"),
axis.title.x = element_text(size=16, face="bold"),
axis.title.y = element_text(size=16, face="bold"),
legend.title = element_text(size=12),
legend.text = element_text(size=12))
ggsave("fourtime vs PdDistrict in each Category.png",tpf,path="/Users/macintosh/Desktop")
|
plot3Data <- read.csv("./household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", nrows=2075259, check.names=FALSE, stringsAsFactors = FALSE, quote ='\"')
plot3DataFilter <- subset(plot3Data, Date %in% c("1/2/2007","2/2/2007"))
plot3DataFilter$Date <- as.Date(plot3DataFilter$Date, format="%d/%m/%Y")
datetime <- paste(as.Date(plot3DataFilter$Date), plot3DataFilter$Time)
plot3DataFilter$Datetime <- as.POSIXct(datetime)
png(filename = "plot3.png", width = 480, height = 480 )
with(plot3DataFilter, {
plot(Sub_metering_1~Datetime, type="l", ylab="Energy sub metering", xlab="")
lines(Sub_metering_2~Datetime,col="Red")
lines(Sub_metering_3~Datetime,col="Blue")
})
legend("topright", col=c("black","red","blue"), lty=1, lwd=2, legend=c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
dev.off()
| /plot3.R | no_license | wtg5020/ExData_Plotting1 | R | false | false | 834 | r | plot3Data <- read.csv("./household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", nrows=2075259, check.names=FALSE, stringsAsFactors = FALSE, quote ='\"')
plot3DataFilter <- subset(plot3Data, Date %in% c("1/2/2007","2/2/2007"))
plot3DataFilter$Date <- as.Date(plot3DataFilter$Date, format="%d/%m/%Y")
datetime <- paste(as.Date(plot3DataFilter$Date), plot3DataFilter$Time)
plot3DataFilter$Datetime <- as.POSIXct(datetime)
png(filename = "plot3.png", width = 480, height = 480 )
with(plot3DataFilter, {
plot(Sub_metering_1~Datetime, type="l", ylab="Energy sub metering", xlab="")
lines(Sub_metering_2~Datetime,col="Red")
lines(Sub_metering_3~Datetime,col="Blue")
})
legend("topright", col=c("black","red","blue"), lty=1, lwd=2, legend=c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
dev.off()
|
library(hatstats)
library(hatviz)
config_file <- system.file('studies/NIRPC HTS.yaml', package = 'hatstats')
hts_data <- read_data(config_file)
# hts_data <- read_data(config = "study_config.yaml")
#================================================================================================================================================#
#### Tables ####
#================================================================================================================================================#
#=================================================================================#
# Table 1. Distribution of travel dates =====
tbl1 <- summarize_data(
hts_data,
agg = 'household_count',
by = 'travdate'
)
tbl1[, N := NULL]
make_table(tbl1, confidence = 0.95)
# tbl1_html <- make_table(tbl1, confidence = 0.95)
# html_table_to_docx(tbl1_html, 'test.docx')
#=================================================================================#
# Table 2. Sample Distributions =====
#=================================================================================#
# Table 3. Data Collection Process Description =====
#=================================================================================#
# Table 4. Data Collection Process Description (Conitnued) =====
#=================================================================================#
# Table 5. Data Elements Collected =====
#=================================================================================#
# Table 6. Item Non-response for Recruitment Questions =====
#=================================================================================#
# Table 7. Item Non-response for Retrieval/Travel Characteristic Questions =====
#=================================================================================#
# Table 8. Stage 2 QA/QC Checks for Household and Person Characteristics =====
#=================================================================================#
# Table 9. Stage 2 QA/QC Checks for Place Characteristics =====
#=================================================================================#
# Table 10. Household Size by Number of Household Workers =====
tbl10 <- summarize_data(
data = hts_data,
agg = 'household_count',
by = c('hhsize_agg','hhworker_agg')
)
make_table(tbl10, row_vars = c('hhsize_agg','hhworker_agg'))
#=================================================================================#
# Table 11. Household Size by Number of Household Vehicles =====
#=================================================================================#
# Table 12. Number of Workers by Number of Household Vehicles =====
#=================================================================================#
# Table 13. Household Income; Surveyed and Weighted versus ACS =====
#=================================================================================#
# Table 14. Person Characteristics; Surveyed and Weighted versus ACS =====
#=================================================================================#
# Table 15. Household Characteristics; Surveyed and Weighted versus ACS =====
#=================================================================================#
# Table 16. Summary Survey Results =====
#=================================================================================#
# Table 17. Household Size =====
#=================================================================================#
# Table 18. Household Vehicles =====
#=================================================================================#
# Table 19. Household Workers =====
#=================================================================================#
# Table 20. Household Income =====
#=================================================================================#
# Table 21. Home Ownership =====
#=================================================================================#
# Table 22. Respondent Race =====
#=================================================================================#
# Table 23. Respondent Age =====
#=================================================================================#
# Table 24. Household Trip Rates =====
tbl24 <- summarize_data(
data = hts_data,
agg = 'household_trip_rate'
)
#=================================================================================#
# Table 25. Household Trip Rates by Household Size by Household Vehicles =====
#=================================================================================#
# Table 26. Household Trip Rates by Household Size =====
#=================================================================================#
# Table 27. Household Trip Rates by Household Vehicles =====
#=================================================================================#
# Table 28. Household Trip Rates by Household Workers =====
#=================================================================================#
# Table 29. Household Trip Rates by Household Income =====
#=================================================================================#
# Table 30. Household Trip Rates by Home Ownership =====
#=================================================================================#
# Table 31. Person Trip Rates =====
#=================================================================================#
# Table 32. Person Trip Rates by Gender =====
#=================================================================================#
# Table 33. Person Trip Rates by Age =====
#=================================================================================#
# Table 34. Person Trip Rates by Race =====
#=================================================================================#
# Table 35. Person Trip Rates by Driver’s License Status =====
#=================================================================================#
# Table 36. Person Trip Rates by Person Type =====
#=================================================================================#
# Table 37. Trip Type Definitions Used in Analysis =====
#=================================================================================#
# Table 38. Frequency, Trip Rate, Average Minutes, and Average Distances by Trip Types =====
#=================================================================================#
# Table 39. Mode Share =====
#=================================================================================#
# Table 40. Average Trip Duration (in minutes) by Mode =====
tbl40 <- summarize_data(
data = hts_data,
agg = 'avg',
agg_var = 'travtime',
by = 'mode'
)
#=================================================================================#
# Table 41. Average Trip Distance (in miles) by Mode =====
#=================================================================================#
# Table 42. Actual versus Typical Work Mode =====
#=================================================================================#
# Table 43. Actual versus Typical School Mode =====
#=================================================================================#
#================================================================================================================================================#
#### Figures ####
#================================================================================================================================================#
# Figure 3. Study Area with Home Locations for Final Delivered Households =====
#=================================================================================#
# Figure 4. Study Area with Home Locations for Final Delivered Households =====
#=================================================================================#
# Figure 5. Representation of Captured GPS points from the Daily Travel Apps =====
#=================================================================================#
# Figure 6. Volume of Household Trips =====
#=================================================================================#
# Figure 7. Person Types =====
#=================================================================================#
# Figure 8. Departure Times by Time of Day =====
#=================================================================================#
# Figure 9. Departure Times – Raw Counts (weighted counts divided by 1000) =====
| /prep/report_shell.R | no_license | Westat-Transportation/surveysummarize | R | false | false | 8,537 | r | library(hatstats)
library(hatviz)
config_file <- system.file('studies/NIRPC HTS.yaml', package = 'hatstats')
hts_data <- read_data(config_file)
# hts_data <- read_data(config = "study_config.yaml")
#================================================================================================================================================#
#### Tables ####
#================================================================================================================================================#
#=================================================================================#
# Table 1. Distribution of travel dates =====
tbl1 <- summarize_data(
hts_data,
agg = 'household_count',
by = 'travdate'
)
tbl1[, N := NULL]
make_table(tbl1, confidence = 0.95)
# tbl1_html <- make_table(tbl1, confidence = 0.95)
# html_table_to_docx(tbl1_html, 'test.docx')
#=================================================================================#
# Table 2. Sample Distributions =====
#=================================================================================#
# Table 3. Data Collection Process Description =====
#=================================================================================#
# Table 4. Data Collection Process Description (Conitnued) =====
#=================================================================================#
# Table 5. Data Elements Collected =====
#=================================================================================#
# Table 6. Item Non-response for Recruitment Questions =====
#=================================================================================#
# Table 7. Item Non-response for Retrieval/Travel Characteristic Questions =====
#=================================================================================#
# Table 8. Stage 2 QA/QC Checks for Household and Person Characteristics =====
#=================================================================================#
# Table 9. Stage 2 QA/QC Checks for Place Characteristics =====
#=================================================================================#
# Table 10. Household Size by Number of Household Workers =====
tbl10 <- summarize_data(
data = hts_data,
agg = 'household_count',
by = c('hhsize_agg','hhworker_agg')
)
make_table(tbl10, row_vars = c('hhsize_agg','hhworker_agg'))
#=================================================================================#
# Table 11. Household Size by Number of Household Vehicles =====
#=================================================================================#
# Table 12. Number of Workers by Number of Household Vehicles =====
#=================================================================================#
# Table 13. Household Income; Surveyed and Weighted versus ACS =====
#=================================================================================#
# Table 14. Person Characteristics; Surveyed and Weighted versus ACS =====
#=================================================================================#
# Table 15. Household Characteristics; Surveyed and Weighted versus ACS =====
#=================================================================================#
# Table 16. Summary Survey Results =====
#=================================================================================#
# Table 17. Household Size =====
#=================================================================================#
# Table 18. Household Vehicles =====
#=================================================================================#
# Table 19. Household Workers =====
#=================================================================================#
# Table 20. Household Income =====
#=================================================================================#
# Table 21. Home Ownership =====
#=================================================================================#
# Table 22. Respondent Race =====
#=================================================================================#
# Table 23. Respondent Age =====
#=================================================================================#
# Table 24. Household Trip Rates =====
tbl24 <- summarize_data(
data = hts_data,
agg = 'household_trip_rate'
)
#=================================================================================#
# Table 25. Household Trip Rates by Household Size by Household Vehicles =====
#=================================================================================#
# Table 26. Household Trip Rates by Household Size =====
#=================================================================================#
# Table 27. Household Trip Rates by Household Vehicles =====
#=================================================================================#
# Table 28. Household Trip Rates by Household Workers =====
#=================================================================================#
# Table 29. Household Trip Rates by Household Income =====
#=================================================================================#
# Table 30. Household Trip Rates by Home Ownership =====
#=================================================================================#
# Table 31. Person Trip Rates =====
#=================================================================================#
# Table 32. Person Trip Rates by Gender =====
#=================================================================================#
# Table 33. Person Trip Rates by Age =====
#=================================================================================#
# Table 34. Person Trip Rates by Race =====
#=================================================================================#
# Table 35. Person Trip Rates by Driver’s License Status =====
#=================================================================================#
# Table 36. Person Trip Rates by Person Type =====
#=================================================================================#
# Table 37. Trip Type Definitions Used in Analysis =====
#=================================================================================#
# Table 38. Frequency, Trip Rate, Average Minutes, and Average Distances by Trip Types =====
#=================================================================================#
# Table 39. Mode Share =====
#=================================================================================#
# Table 40. Average Trip Duration (in minutes) by Mode =====
tbl40 <- summarize_data(
data = hts_data,
agg = 'avg',
agg_var = 'travtime',
by = 'mode'
)
#=================================================================================#
# Table 41. Average Trip Distance (in miles) by Mode =====
#=================================================================================#
# Table 42. Actual versus Typical Work Mode =====
#=================================================================================#
# Table 43. Actual versus Typical School Mode =====
#=================================================================================#
#================================================================================================================================================#
#### Figures ####
#================================================================================================================================================#
# Figure 3. Study Area with Home Locations for Final Delivered Households =====
#=================================================================================#
# Figure 4. Study Area with Home Locations for Final Delivered Households =====
#=================================================================================#
# Figure 5. Representation of Captured GPS points from the Daily Travel Apps =====
#=================================================================================#
# Figure 6. Volume of Household Trips =====
#=================================================================================#
# Figure 7. Person Types =====
#=================================================================================#
# Figure 8. Departure Times by Time of Day =====
#=================================================================================#
# Figure 9. Departure Times – Raw Counts (weighted counts divided by 1000) =====
|
plotPost = function( paramSampleVec , cenTend=c("mode","median","mean")[1] ,
compVal=NULL, ROPE=NULL, credMass=0.95, HDItextPlace=0.7,
xlab=NULL , xlim=NULL , yaxt=NULL , ylab=NULL ,
main=NULL , cex=NULL , cex.lab=NULL ,
col=NULL , border=NULL , showCurve=FALSE , breaks=NULL ,
... ) {
# Override defaults of hist function, if not specified by user:
# (additional arguments "..." are passed to the hist function)
if ( is.null(xlab) ) xlab="Param. Val."
if ( is.null(cex.lab) ) cex.lab=1.5
if ( is.null(cex) ) cex=1.4
if ( is.null(xlim) ) xlim=range( c( compVal , ROPE , paramSampleVec ) )
if ( is.null(main) ) main=""
if ( is.null(yaxt) ) yaxt="n"
if ( is.null(ylab) ) ylab=""
if ( is.null(col) ) col="skyblue"
if ( is.null(border) ) border="white"
# convert coda object to matrix:
if ( class(paramSampleVec) == "mcmc.list" ) {
paramSampleVec = as.matrix(paramSampleVec)
}
summaryColNames = c("ESS","mean","median","mode",
"hdiMass","hdiLow","hdiHigh",
"compVal","pGtCompVal",
"ROPElow","ROPEhigh","pLtROPE","pInROPE","pGtROPE")
postSummary = matrix( NA , nrow=1 , ncol=length(summaryColNames) ,
dimnames=list( c( xlab ) , summaryColNames ) )
# require(coda) # for effectiveSize function
postSummary[,"ESS"] = effectiveSize(paramSampleVec)
postSummary[,"mean"] = mean(paramSampleVec)
postSummary[,"median"] = median(paramSampleVec)
mcmcDensity = density(paramSampleVec)
postSummary[,"mode"] = mcmcDensity$x[which.max(mcmcDensity$y)]
HDI = HDIofMCMC( paramSampleVec , credMass )
postSummary[,"hdiMass"]=credMass
postSummary[,"hdiLow"]=HDI[1]
postSummary[,"hdiHigh"]=HDI[2]
# Plot histogram.
cvCol = "darkgreen"
ropeCol = "darkred"
if ( is.null(breaks) ) {
if ( max(paramSampleVec) > min(paramSampleVec) ) {
breaks = c( seq( from=min(paramSampleVec) , to=max(paramSampleVec) ,
by=(HDI[2]-HDI[1])/18 ) , max(paramSampleVec) )
} else {
breaks=c(min(paramSampleVec)-1.0E-6,max(paramSampleVec)+1.0E-6)
border="skyblue"
}
}
if ( !showCurve ) {
par(xpd=NA)
histinfo = hist( paramSampleVec , xlab=xlab , yaxt=yaxt , ylab=ylab ,
freq=F , border=border , col=col ,
xlim=xlim , main=main , cex=cex , cex.lab=cex.lab ,
breaks=breaks , ... )
}
if ( showCurve ) {
par(xpd=NA)
histinfo = hist( paramSampleVec , plot=F )
densCurve = density( paramSampleVec , adjust=2 )
plot( densCurve$x , densCurve$y , type="l" , lwd=5 , col=col , bty="n" ,
xlim=xlim , xlab=xlab , yaxt=yaxt , ylab=ylab ,
main=main , cex=cex , cex.lab=cex.lab , ... )
}
cenTendHt = 0.9*max(histinfo$density)
cvHt = 0.7*max(histinfo$density)
ROPEtextHt = 0.55*max(histinfo$density)
# Display central tendency:
mn = mean(paramSampleVec)
med = median(paramSampleVec)
mcmcDensity = density(paramSampleVec)
mo = mcmcDensity$x[which.max(mcmcDensity$y)]
if ( cenTend=="mode" ){
text( mo , cenTendHt ,
bquote(mode==.(signif(mo,3))) , adj=c(.5,0) , cex=cex )
}
if ( cenTend=="median" ){
text( med , cenTendHt ,
bquote(median==.(signif(med,3))) , adj=c(.5,0) , cex=cex , col=cvCol )
}
if ( cenTend=="mean" ){
text( mn , cenTendHt ,
bquote(mean==.(signif(mn,3))) , adj=c(.5,0) , cex=cex )
}
# Display the comparison value.
if ( !is.null( compVal ) ) {
pGtCompVal = sum( paramSampleVec > compVal ) / length( paramSampleVec )
pLtCompVal = 1 - pGtCompVal
lines( c(compVal,compVal) , c(0.96*cvHt,0) ,
lty="dotted" , lwd=2 , col=cvCol )
text( compVal , cvHt ,
bquote( .(round(100*pLtCompVal,1)) * "% < " *
.(signif(compVal,3)) * " < " *
.(round(100*pGtCompVal,1)) * "%" ) ,
adj=c(pLtCompVal,0) , cex=0.8*cex , col=cvCol )
postSummary[,"compVal"] = compVal
postSummary[,"pGtCompVal"] = pGtCompVal
}
# Display the ROPE.
if ( !is.null( ROPE ) ) {
pInROPE = ( sum( paramSampleVec > ROPE[1] & paramSampleVec < ROPE[2] )
/ length( paramSampleVec ) )
pGtROPE = ( sum( paramSampleVec >= ROPE[2] ) / length( paramSampleVec ) )
pLtROPE = ( sum( paramSampleVec <= ROPE[1] ) / length( paramSampleVec ) )
lines( c(ROPE[1],ROPE[1]) , c(0.96*ROPEtextHt,0) , lty="dotted" , lwd=2 ,
col=ropeCol )
lines( c(ROPE[2],ROPE[2]) , c(0.96*ROPEtextHt,0) , lty="dotted" , lwd=2 ,
col=ropeCol)
text( mean(ROPE) , ROPEtextHt ,
bquote( .(round(100*pLtROPE,1)) * "% < " * .(ROPE[1]) * " < " *
.(round(100*pInROPE,1)) * "% < " * .(ROPE[2]) * " < " *
.(round(100*pGtROPE,1)) * "%" ) ,
adj=c(pLtROPE+.5*pInROPE,0) , cex=1 , col=ropeCol )
postSummary[,"ROPElow"]=ROPE[1]
postSummary[,"ROPEhigh"]=ROPE[2]
postSummary[,"pLtROPE"]=pLtROPE
postSummary[,"pInROPE"]=pInROPE
postSummary[,"pGtROPE"]=pGtROPE
}
# Display the HDI.
lines( HDI , c(0,0) , lwd=4 , lend=1 )
text( mean(HDI) , 0 , bquote(.(100*credMass) * "% HDI" ) ,
adj=c(.5,-1.7) , cex=cex )
text( HDI[1] , 0 , bquote(.(signif(HDI[1],3))) ,
adj=c(HDItextPlace,-0.5) , cex=cex )
text( HDI[2] , 0 , bquote(.(signif(HDI[2],3))) ,
adj=c(1.0-HDItextPlace,-0.5) , cex=cex )
par(xpd=F)
#
return( postSummary )
}
HDIofMCMC = function( sampleVec , credMass=0.95 ) {
# Computes highest density interval from a sample of representative values,
# estimated as shortest credible interval.
# Arguments:
# sampleVec
# is a vector of representative values from a probability distribution.
# credMass
# is a scalar between 0 and 1, indicating the mass within the credible
# interval that is to be estimated.
# Value:
# HDIlim is a vector containing the limits of the HDI
sortedPts = sort( sampleVec )
ciIdxInc = ceiling( credMass * length( sortedPts ) )
nCIs = length( sortedPts ) - ciIdxInc
ciWidth = rep( 0 , nCIs )
for ( i in 1:nCIs ) {
ciWidth[ i ] = sortedPts[ i + ciIdxInc ] - sortedPts[ i ]
}
HDImin = sortedPts[ which.min( ciWidth ) ]
HDImax = sortedPts[ which.min( ciWidth ) + ciIdxInc ]
HDIlim = c( HDImin , HDImax )
return( HDIlim )
}
| /Bayes_Code_Dylan.R | no_license | ecm5245/Advanced-Stats | R | false | false | 6,461 | r | plotPost = function( paramSampleVec , cenTend=c("mode","median","mean")[1] ,
compVal=NULL, ROPE=NULL, credMass=0.95, HDItextPlace=0.7,
xlab=NULL , xlim=NULL , yaxt=NULL , ylab=NULL ,
main=NULL , cex=NULL , cex.lab=NULL ,
col=NULL , border=NULL , showCurve=FALSE , breaks=NULL ,
... ) {
# Override defaults of hist function, if not specified by user:
# (additional arguments "..." are passed to the hist function)
if ( is.null(xlab) ) xlab="Param. Val."
if ( is.null(cex.lab) ) cex.lab=1.5
if ( is.null(cex) ) cex=1.4
if ( is.null(xlim) ) xlim=range( c( compVal , ROPE , paramSampleVec ) )
if ( is.null(main) ) main=""
if ( is.null(yaxt) ) yaxt="n"
if ( is.null(ylab) ) ylab=""
if ( is.null(col) ) col="skyblue"
if ( is.null(border) ) border="white"
# convert coda object to matrix:
if ( class(paramSampleVec) == "mcmc.list" ) {
paramSampleVec = as.matrix(paramSampleVec)
}
summaryColNames = c("ESS","mean","median","mode",
"hdiMass","hdiLow","hdiHigh",
"compVal","pGtCompVal",
"ROPElow","ROPEhigh","pLtROPE","pInROPE","pGtROPE")
postSummary = matrix( NA , nrow=1 , ncol=length(summaryColNames) ,
dimnames=list( c( xlab ) , summaryColNames ) )
# require(coda) # for effectiveSize function
postSummary[,"ESS"] = effectiveSize(paramSampleVec)
postSummary[,"mean"] = mean(paramSampleVec)
postSummary[,"median"] = median(paramSampleVec)
mcmcDensity = density(paramSampleVec)
postSummary[,"mode"] = mcmcDensity$x[which.max(mcmcDensity$y)]
HDI = HDIofMCMC( paramSampleVec , credMass )
postSummary[,"hdiMass"]=credMass
postSummary[,"hdiLow"]=HDI[1]
postSummary[,"hdiHigh"]=HDI[2]
# Plot histogram.
cvCol = "darkgreen"
ropeCol = "darkred"
if ( is.null(breaks) ) {
if ( max(paramSampleVec) > min(paramSampleVec) ) {
breaks = c( seq( from=min(paramSampleVec) , to=max(paramSampleVec) ,
by=(HDI[2]-HDI[1])/18 ) , max(paramSampleVec) )
} else {
breaks=c(min(paramSampleVec)-1.0E-6,max(paramSampleVec)+1.0E-6)
border="skyblue"
}
}
if ( !showCurve ) {
par(xpd=NA)
histinfo = hist( paramSampleVec , xlab=xlab , yaxt=yaxt , ylab=ylab ,
freq=F , border=border , col=col ,
xlim=xlim , main=main , cex=cex , cex.lab=cex.lab ,
breaks=breaks , ... )
}
if ( showCurve ) {
par(xpd=NA)
histinfo = hist( paramSampleVec , plot=F )
densCurve = density( paramSampleVec , adjust=2 )
plot( densCurve$x , densCurve$y , type="l" , lwd=5 , col=col , bty="n" ,
xlim=xlim , xlab=xlab , yaxt=yaxt , ylab=ylab ,
main=main , cex=cex , cex.lab=cex.lab , ... )
}
cenTendHt = 0.9*max(histinfo$density)
cvHt = 0.7*max(histinfo$density)
ROPEtextHt = 0.55*max(histinfo$density)
# Display central tendency:
mn = mean(paramSampleVec)
med = median(paramSampleVec)
mcmcDensity = density(paramSampleVec)
mo = mcmcDensity$x[which.max(mcmcDensity$y)]
if ( cenTend=="mode" ){
text( mo , cenTendHt ,
bquote(mode==.(signif(mo,3))) , adj=c(.5,0) , cex=cex )
}
if ( cenTend=="median" ){
text( med , cenTendHt ,
bquote(median==.(signif(med,3))) , adj=c(.5,0) , cex=cex , col=cvCol )
}
if ( cenTend=="mean" ){
text( mn , cenTendHt ,
bquote(mean==.(signif(mn,3))) , adj=c(.5,0) , cex=cex )
}
# Display the comparison value.
if ( !is.null( compVal ) ) {
pGtCompVal = sum( paramSampleVec > compVal ) / length( paramSampleVec )
pLtCompVal = 1 - pGtCompVal
lines( c(compVal,compVal) , c(0.96*cvHt,0) ,
lty="dotted" , lwd=2 , col=cvCol )
text( compVal , cvHt ,
bquote( .(round(100*pLtCompVal,1)) * "% < " *
.(signif(compVal,3)) * " < " *
.(round(100*pGtCompVal,1)) * "%" ) ,
adj=c(pLtCompVal,0) , cex=0.8*cex , col=cvCol )
postSummary[,"compVal"] = compVal
postSummary[,"pGtCompVal"] = pGtCompVal
}
# Display the ROPE.
if ( !is.null( ROPE ) ) {
pInROPE = ( sum( paramSampleVec > ROPE[1] & paramSampleVec < ROPE[2] )
/ length( paramSampleVec ) )
pGtROPE = ( sum( paramSampleVec >= ROPE[2] ) / length( paramSampleVec ) )
pLtROPE = ( sum( paramSampleVec <= ROPE[1] ) / length( paramSampleVec ) )
lines( c(ROPE[1],ROPE[1]) , c(0.96*ROPEtextHt,0) , lty="dotted" , lwd=2 ,
col=ropeCol )
lines( c(ROPE[2],ROPE[2]) , c(0.96*ROPEtextHt,0) , lty="dotted" , lwd=2 ,
col=ropeCol)
text( mean(ROPE) , ROPEtextHt ,
bquote( .(round(100*pLtROPE,1)) * "% < " * .(ROPE[1]) * " < " *
.(round(100*pInROPE,1)) * "% < " * .(ROPE[2]) * " < " *
.(round(100*pGtROPE,1)) * "%" ) ,
adj=c(pLtROPE+.5*pInROPE,0) , cex=1 , col=ropeCol )
postSummary[,"ROPElow"]=ROPE[1]
postSummary[,"ROPEhigh"]=ROPE[2]
postSummary[,"pLtROPE"]=pLtROPE
postSummary[,"pInROPE"]=pInROPE
postSummary[,"pGtROPE"]=pGtROPE
}
# Display the HDI.
lines( HDI , c(0,0) , lwd=4 , lend=1 )
text( mean(HDI) , 0 , bquote(.(100*credMass) * "% HDI" ) ,
adj=c(.5,-1.7) , cex=cex )
text( HDI[1] , 0 , bquote(.(signif(HDI[1],3))) ,
adj=c(HDItextPlace,-0.5) , cex=cex )
text( HDI[2] , 0 , bquote(.(signif(HDI[2],3))) ,
adj=c(1.0-HDItextPlace,-0.5) , cex=cex )
par(xpd=F)
#
return( postSummary )
}
HDIofMCMC = function( sampleVec , credMass=0.95 ) {
# Computes highest density interval from a sample of representative values,
# estimated as shortest credible interval.
# Arguments:
# sampleVec
# is a vector of representative values from a probability distribution.
# credMass
# is a scalar between 0 and 1, indicating the mass within the credible
# interval that is to be estimated.
# Value:
# HDIlim is a vector containing the limits of the HDI
sortedPts = sort( sampleVec )
ciIdxInc = ceiling( credMass * length( sortedPts ) )
nCIs = length( sortedPts ) - ciIdxInc
ciWidth = rep( 0 , nCIs )
for ( i in 1:nCIs ) {
ciWidth[ i ] = sortedPts[ i + ciIdxInc ] - sortedPts[ i ]
}
HDImin = sortedPts[ which.min( ciWidth ) ]
HDImax = sortedPts[ which.min( ciWidth ) + ciIdxInc ]
HDIlim = c( HDImin , HDImax )
return( HDIlim )
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.