blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3bfb66dcccabb30322e78aee4bd3a00a10d4c29d | f89ffc588173d602176706659bf0529e52f0ed8a | /Week5_Exercises.R | 70025943f3e6353bf6766261f3b1121ff4d1e377 | [] | no_license | Viveniac/Advanced-R | cc7980d1629fe24510ff11ef7cd6a8d566f7b1af | ba0148478500687b9f4e958ff274e9347b3a91e2 | refs/heads/main | 2023-06-08T02:54:56.085732 | 2021-06-23T14:18:15 | 2021-06-23T14:18:15 | 379,626,129 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,034 | r | Week5_Exercises.R | # Week 5 Exercises #
## EX 1 --------------------------------------------------------------
# Create the tibble adult3 composed of all the columns except final_weight,
# capital_gain, capital_loss
## EX 2 --------------------------------------------------------------
# Calculate the average and standard deviation of hours_per_week for every
# combination of sex and marital status and order the new tibble in decrasing
# order with respect to the average.
## EX 3 --------------------------------------------------------------
# Illustrate how the frequencies of class depend on age.
## EX 4 --------------------------------------------------------------
# Focus on rows where education is either "Masters" or "Bachelors", and
# focus on age between 30 and 40 and "Private" workclass.
# Add a new column called "class_binary" that is 1 if class is ">50K" or 0
# otherwise.
# Calculate the mean of class_binary for every combination of education and
# age.
# Show with a plot how this mean depends on age by education.
|
e3ceeec619615853294c17f20576fe039049ba0d | 76b866e7ef2e2477fad8330d547fa732547bd8ef | /1GridEnsemble.R | 455b747ec2056b7789d5da1d8e93543c9c1445e2 | [] | no_license | tsuresh83/Kaggle_FBCheckin | d5f51bd675a4c018d1f99de420dbf7236b4ab952 | 3f07b95bbe500bd0dbc76035b42b358169ed9af6 | refs/heads/master | 2020-03-21T17:15:38.408820 | 2018-06-27T03:04:35 | 2018-06-27T03:04:35 | 138,822,747 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,979 | r | 1GridEnsemble.R | rm(list=ls())
library(needs,h2o,h2oEnsemble)
needs(data.table,RANN,caret,scales,h2o)
load("/media/3TB/kaggle/fb/data/train.rdata")
load("/media/3TB/kaggle/fb/data/test.rdata")
grid1<- train[train$Grid==train[3,]$Grid,]
trainPartition <- createDataPartition(grid1$place_id,p=0.75)
trainData <- grid1[unlist(trainPartition),]
validData <- grid1[-unlist(trainPartition),]
#preprocess trainData
accuracyThreshold<-as.integer(quantile(trainData$accuracy,probs=c(0.95)))
#trainData$ThresholdedAccuracy <- trainData$accuracy
trainData[trainData$accuracy>=accuracyThreshold,]$accuracy <- accuracyThreshold
validData[validData$accuracy>=accuracyThreshold,]$accuracy <- accuracyThreshold
featureNames <- c("x","y","accuracy","quarter_period_of_day","hour","dayOfWeek","monthOfYear")
cts <- trainData[,list(N=.N),by=place_id]
cutoff <- quantile(cts$N,0.9)
pidsAfterCutOff <- cts[N>cutoff,"place_id",with=F]
gridDataTrain <- trainData[place_id %in% pidsAfterCutOff$place_id]
gridDataTrain <- gridDataTrain[,c(featureNames,"place_id"),with=F]
gridDataTrain$place_id <- as.factor(gridDataTrain$place_id)
gridDataValidation <- validData[,c(featureNames,"place_id"),with=F]
local <- h2o.init(nthreads=-1,max_mem_size = "50G")
trainingH2O <- as.h2o(gridDataTrain)
validationH2O <- as.h2o(gridDataValidation)
learner <- c("h2o.glm.wrapper", "h2o.randomForest.wrapper",
"h2o.gbm.wrapper", "h2o.deeplearning.wrapper","h2o.naivebayes.wrapper")
metalearner <- "h2o.deeplearning.wrapper"
# Train the ensemble using 5-fold CV to generate level-one data
# More CV folds will take longer to train, but should increase performance
fit <- h2o.ensemble(x = featureNames, y = "place_id",
training_frame = trainingH2O,
family = "binomial",
learner = learner,
metalearner = metalearner,
cvControl = list(V = 2, shuffle = TRUE))
perf <- h2o.ensemble_performance(fit, newdata = validationH2O)
|
bc5583453e7086f66c40376f3ccb31837f7b0d28 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/rerf/examples/StrCorr.Rd.R | e9e8b65b5bc1116df221aa81855fade675cd4562 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 409 | r | StrCorr.Rd.R | library(rerf)
### Name: StrCorr
### Title: Compute tree strength and correlation
### Aliases: StrCorr
### ** Examples
library(rerf)
trainIdx <- c(1:40, 51:90, 101:140)
X <- as.matrix(iris[, 1:4])
Y <- iris[[5]]
forest <- RerF(X[trainIdx, ], Y[trainIdx], num.cores = 1L)
predictions <- Predict(X[-trainIdx, ], forest, num.cores = 1L, aggregate.output = FALSE)
scor <- StrCorr(predictions, Y[-trainIdx])
|
10c44af5c92b962c4e6bed68bc046e27ba91633d | bbf82d62199f1c4c274d2d84ffd092e51cd9f993 | /man/Hypertension_Models.Rd | 5288b13fdfc36a99879ff4acbce7c4570958b8ef | [
"MIT"
] | permissive | lawine90/dizzPredictoR | f732c514e627c05656bc776d962494281bb5309f | 2f012343b6e7fd603a0c9fbab7e26285cc08807f | refs/heads/master | 2020-07-17T09:23:09.237262 | 2019-10-23T08:14:48 | 2019-10-23T08:14:48 | 205,993,549 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 573 | rd | Hypertension_Models.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Hypertension_Models.R
\docType{data}
\name{Hypertension_Models}
\alias{Hypertension_Models}
\title{Pre-trained models for hypertension prediction.}
\format{An object of class \code{caretList} of length 6.}
\usage{
data("Hypertension_Models")
}
\description{
Using 7th Korea National Health and Nutrition Examination Survey in 2016,
build pre-trained model for hypertension predicting. 6 models are used and
average AUC is about 0.79
}
\examples{
data("Hypertension_Models")
}
\keyword{datasets}
|
b755e0666bf640e6ca92ab4fc0a8dd835ffe30f9 | 3cc6265e82e373d377dae488831cfdb1caad1dfe | /ddR/spark/dfrows.R | 4845a6e7191d7698ea427e415be103a43b74fd87 | [] | no_license | clarkfitzg/phd_research | 439ecc0d650da23bfad1e1a212e490c2746a6656 | dfe46c49f6beba54389b0074e19f3c9b1ea04645 | refs/heads/master | 2020-04-15T14:02:03.890862 | 2019-09-20T02:33:07 | 2019-09-20T02:33:07 | 59,333,323 | 6 | 3 | null | null | null | null | UTF-8 | R | false | false | 2,713 | r | dfrows.R | # Fri Aug 19 10:57:07 KST 2016
#
# For the Spark patch to work I need to figure out how to split and
# recombine a dataframe with a column of raw bytes
df <- data.frame(key = 1:5)
df$value <- lapply(df$key, serialize, NULL)
# list of dataframes
rows <- split(df, seq_len(nrow(df)))
df2 <- do.call(rbind, rows)
# Does what I wanted to happen
class(df2$value[[1]])
# looks like this does exactly the same thing.
df3 <- do.call(rbind.data.frame, rows)
############################################################
list_of_dfs <- rows
list_of_lists <- lapply(list_of_dfs, as.list)
do.call(rbind, list_of_lists)
# Fails
#df4 <- do.call(rbind.data.frame, list_of_lists)
# TODO: write a function that rbinds and deals with binary objects
rbind_withlists <- function(...)
lapply(list_of_lists, as.data.frame)
############################################################
#Fri Aug 19 14:37:28 KST 2016
# Here's what may well be going on in Spark's dapplyCollect:
keys = as.list(1:5)
values = lapply(keys, serialize, NULL)
list_of_lists <- mapply(list, keys, values, SIMPLIFY = FALSE,
USE.NAMES = FALSE)
# Then dapplyCollect is doing something like this, which fails because of
# the vectors which have a different length. So we need to make this work.
do.call(rbind.data.frame, list_of_lists)
# Sanity check that this works with appropriate rows
list_of_lists2 <- mapply(list, 1:5, letters[1:5], SIMPLIFY = FALSE,
USE.NAMES = FALSE)
out2 = do.call(rbind.data.frame, list_of_lists2)
# Yes, no problem
# We're only worried about raws and vectors of length greater than 1 I
# think. So one way to do it is to identify raw vectors and handle them
row1 <- list_of_lists[[1]]
rawcolumns <- "raw" == sapply(row1, class)
if(any(rawcolumns))
row_to_df <- function(row, rawcolumns){
# Converts row from a list to data.frame, respecting raw columns
cleanrow <- row
cleanrow[rawcolumns] <- NA
dframe <- data.frame(cleanrow, stringsAsFactors = FALSE)
dframe[rawcolumns] <- lapply(row[rawcolumns], list)
rownames(dframe) <- NULL
colnames(dframe) <- NULL
dframe
}
row_to_df(row1)
rbind_df_with_raw <- function(list_of_rows, rawcolumns){
cleanrows <- lapply(list_of_rows, row_to_df, rawcolumns)
args <- c(cleanrows, list(make.row.names = FALSE))
do.call(rbind.data.frame, cleanrows)
do.call(rbind, cleanrows)
}
rbind_df_with_raw(list_of_lists, rawcolumns)
# This is heavily row based. And it's not working well!
# Can we do it in a more vectorized way?
# What if we just "fill in" a dataframe?
# This looks ____WAY____ better :)
dframe <- as.data.frame(do.call(rbind, list_of_rows))
dframe[!rawcolumns] <- lapply(dframe[!rawcolumns], unlist)
|
f4c41626fd1a4d5ed1388ecd29952c3aaad66fc6 | 06f362f76b1542bbdea12c34c0f239c9a624c877 | /man/importFromTxt.Rd | c2187f8f6c835995e2f18be8834bcd62c07ace2b | [] | no_license | mi2-warsaw/PISAoccupations | 31412147943082c058d998618ac6c78c06c9caf7 | 0b817f09c5599b59390e58edab602453ac9b0fe4 | refs/heads/master | 2020-05-22T06:51:14.081893 | 2017-04-18T18:49:12 | 2017-04-18T18:49:12 | 63,240,717 | 2 | 1 | null | 2016-12-09T00:00:09 | 2016-07-13T11:32:33 | R | UTF-8 | R | false | true | 1,322 | rd | importFromTxt.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/importexport.R
\name{importFromTxt}
\alias{importFromTxt}
\title{Import data from .txt files from OECD website.}
\usage{
importFromTxt(inputFilePath, variablesStartPositions, variablesEndPositions,
groupingVariablesStartPositions, groupingVariablesEndPositions,
groupingVariablesNames, studyYear, NAcodes = character(0),
occupationNAcodes = character(0), outputFilePath = NULL)
}
\arguments{
\item{inputFilePath}{path to the file to import.}
\item{variablesStartPositions}{starting position of column containing
country code, school id, mother and father occupation
and plausible values and replicate weights.}
\item{variablesEndPositions}{ending positions of column given in previous argument.}
\item{groupingVariablesStartPositions}{starting positions of additional columns
with factor variables.}
\item{groupingVariablesEndPositions}{ending positions of variables from previous argument.}
\item{groupingVariablesNames}{names for variables given in previous two arguments.}
\item{studyYear}{year of study.}
\item{NAcodes}{codes for NA values in PV and weight variables.}
\item{outputFilePath}{path to .rda file to which results will be saved.}
}
\value{
tibble
}
\description{
Import data from .txt files from OECD website.
}
|
d5b96e82f3388626baaeba2671a22e5430a704b1 | 4c57a41ddf35c564a9e783de4d17539ffad3c789 | /R_Scripts/2_keras_cnn_example.R | e3a95f9b46b63d0fab82fb737fedda737af56c93 | [] | no_license | dan-veltri/ace-intro-to-deep-learning | 123e34689b5029dff4ecb5d0892fe2317144ac80 | 2e9fbc414b5a391ef4c5b23d1f73705f8902a6dc | refs/heads/master | 2022-10-08T11:05:23.737067 | 2022-09-23T21:18:51 | 2022-09-23T21:18:51 | 243,871,699 | 0 | 9 | null | null | null | null | UTF-8 | R | false | false | 3,399 | r | 2_keras_cnn_example.R | #!/usr/bin/env Rscript
#
# keras_cnn_example.R
# By: Dan Veltri (dan.veltri@gmail.com)
# Date: 02.06.2018
# Code modified from: http://parneetk.github.io/blog/cnn-cifar10/
# R-code adjustments from: https://keras.rstudio.com
#
# Here we're going to use a convolutional neural network (CNN)- specifically an
# 2D CNN to try and predict if images from the CIFAR10 dataset belong to one of
# ten classes/categories. For more details on Keras' CNN implementation see:
# https://keras.io/layers/convolutional/
# The problems we need to solve to use our CNN are:
# 1) How do we 'massage' our image data and responses so that it fits into our network?
# - We'll have to do some reshaping first!
#
# 2) Parameters - How good of performance can you get?
# - Try adjusting the number of epochs, filters and kernal sizes
#
# The CIFAR10 Data: Keras comes with a pre-processed training/testing data set (cifar10) that
# includes 50,000 32x32 color (RGB) images labeled as one of ten classes. Load the data as follow:
# (x_train, y_train), (x_test, y_test) = cifar10.load_data()
# x_train and x_test are arrays containing RGB images (num_samples, 3, 32, 32)
# y_train and y_test contain arrays of corresponding category numbers (0-9)
#
# More dataset details available in: https://keras.io/datasets/
#
# Challenge: Can you add additional Conv and pooling layers to the model and improve the ACC?
#=============================================================================================================
library(keras)
# Define the top words, review size, and model params
num_filters <- 32 # Number of filters to apply to image
kern_shape <- c(5,5) # kernel size of filters to slide over image
stride_size <- c(1,1) # How far to move/slide kernel each time
pool_shape <- c(2,2) # Dim. of max pooling
num_epochs <- 5 # Rounds of training
num_batches <- 32 # No. of samples per patch to train at a time
# Load in data and pad reviews shorter than 'max_review_length' with 0's in front
print("Loading in data.")
cf10 <- dataset_cifar10()
#Reshape and normalize the image data. Adjust the responses to be categorical
x_train <- cf10$train$x/255
x_test <- cf10$test$x/255
y_train <- to_categorical(cf10$train$y, num_classes = 10)
y_test <- to_categorical(cf10$test$y, num_classes = 10)
print(paste0("Loaded ", nrow(x_train), " training examples with ", length(y_train), " responses and ", nrow(x_test)," testing examples with ", length(y_test)," responses."))
# Initialize sequential model
model <- keras_model_sequential()
model %>%
layer_conv_2d(filter = num_filters,
kernel_size = kern_shape,
strides = stride_size,
padding = "same",
input_shape = c(32, 32, 3)
) %>%
layer_max_pooling_2d(pool_size = pool_shape) %>%
layer_flatten() %>%
layer_dense(10, activation="softmax")
# Compile model
model %>% compile(loss = "categorical_crossentropy", optimizer = "adam", metrics = "accuracy")
summary(model)
print("Training now...")
train_history <- model %>% fit(x_train, y_train, batch_size = num_batches, epochs = num_epochs, shuffle=TRUE, validation_data = list(x_test, y_test))
#Plot out training history
plot(train_history)
print("Testing prediction performance...")
scores <- model %>% evaluate(x_test, y_test)
print(paste0("Testing Accuracy: ", scores$acc * 100.0, "%"))
#END OF PROGRAM
|
77e84f4b015d0bdd6e7309c276bcee5ccff521ec | 8c076878c75a918a7dd45bb0468e626affe69e0c | /man/simpleboot_d.Rd | abb358ff879f195925d0c327c02f2514ab3c2f77 | [] | no_license | lillion/emittr | bc831d69b78869340ae032492c7e38102c884437 | d4628a1f949ce145ea1bb476c777a12149f938d2 | refs/heads/master | 2020-05-22T05:42:14.895491 | 2020-05-05T14:45:52 | 2020-05-05T14:45:52 | 19,569,876 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 592 | rd | simpleboot_d.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simplebootstrap_d.R
\name{simpleboot_d}
\alias{simpleboot_d}
\title{Einfacher univariater Bootstrap}
\usage{
simpleboot_d(x, stat, reps = 1000)
}
\arguments{
\item{x}{Vektor mit den Daten}
\item{stat}{Art der Statistik, z.B. 'mean', 'sd'}
\item{reps}{Anzahl der Wiederholung}
}
\value{
bootstrap
}
\description{
einfacher bootstrap für univariate Statistik
}
\examples{
mydata<-rchisq(25,df=3)
simpleboot_d(mydata, "mean", reps=10000)
simpleboot_d(mydata, sd, reps=5000)
}
\seealso{
boot
}
\keyword{bootstrap}
|
b17a19f7e41184dbca505a617de82287ccf4aa62 | 7d80e38b6831ceb9f9af96773f50242750c921a5 | /man/ord.Rd | 5cfcff9915c5896255dafe5ea7c56a1180f2e427 | [] | no_license | SamGG/made4 | 9c60dd0011c8dff29f7e6b9ed0f811016df9540f | 1120fd5fa3bceaf44a546bd3bab1d7a6c9e85179 | refs/heads/master | 2021-05-25T09:23:45.024936 | 2020-09-25T10:49:52 | 2020-09-25T10:49:52 | 126,978,240 | 1 | 0 | null | 2018-03-27T12:33:23 | 2018-03-27T11:46:33 | R | UTF-8 | R | false | false | 5,981 | rd | ord.Rd | \name{ord}
\alias{ord}
\alias{plot.ord}
\title{Ordination}
\description{Run principal component analysis, correspondence analysis or non-symmetric correspondence analysis
on gene expression data}
\usage{
ord(dataset, type="coa", classvec=NULL,ord.nf=NULL, trans=FALSE, \dots)
\method{plot}{ord}(x, axis1=1, axis2=2, arraycol=NULL, genecol="gray25", nlab=10, genelabels= NULL, arraylabels=NULL,classvec=NULL, \dots)
}
\arguments{
\item{dataset}{Training dataset. A \code{\link{matrix}}, \code{\link{data.frame}},
\code{\link[Biobase:ExpressionSet-class]{ExpressionSet}} or
\code{\link[marray:marrayRaw-class]{marrayRaw-class}}.
If the input is gene expression data in a \code{\link{matrix}} or \code{\link{data.frame}}. The
rows and columns are expected to contain the variables (genes) and cases (array samples)
respectively.
}
\item{classvec}{A \code{factor} or \code{vector} which describes the classes in the training dataset.}
\item{type}{Character, "coa", "pca" or "nsc" indicating which data
transformation is required. The default value is type="coa".}
\item{ord.nf}{Numeric. Indicating the number of eigenvector to be saved, by default, if NULL, all eigenvectors will be saved.}
\item{trans}{Logical indicating whether 'dataset' should be transposed before ordination. Used by BGA
Default is \code{FALSE}.}
\item{x}{An object of class \code{ord}. The output from \code{ord}. It contains the projection coordinates from \code{ord},
the \$co or \$li coordinates to be plotted.}
\item{arraycol, genecol}{Character, colour of points on plot. If arraycol is NULL,
arraycol will obtain a set of contrasting colours using \code{getcol}, for each classes
of cases (microarray samples) on the array (case) plot. genecol is the colour of the
points for each variable (genes) on gene plot.}
\item{nlab}{Numeric. An integer indicating the number of variables (genes) at the end of
axes to be labelled, on the gene plot.}
\item{axis1}{Integer, the column number for the x-axis. The default is 1.}
\item{axis2}{Integer, the column number for the y-axis, The default is 2.}
\item{genelabels}{A vector of variables labels, if \code{genelabels=NULL} the row.names
of input matrix \code{dataset} will be used.}
\item{arraylabels}{A vector of variables labels, if \code{arraylabels=NULL} the col.names
of input matrix \code{dataset} will be used.}
\item{\dots}{further arguments passed to or from other methods.}
}
\details{
\code{ord} calls either \code{\link[ade4:dudi.pca]{dudi.pca}}, \code{\link[ade4:dudi.coa]{dudi.coa}} or \code{\link[ade4:dudi.nsc]{dudi.nsc}}
on the input dataset. The input format of the dataset
is verified using \code{\link[made4:array2ade4]{array2ade4}}.
If the user defines microarray sample groupings, these are colours on plots produced by \code{plot.ord}.
\bold{Plotting and visualising bga results:}
\emph{2D plots:}
\code{\link[made4:plotarrays]{plotarrays}} to draw an xy plot of cases (\$ls).
\code{\link[made4:plotgenes]{plotgenes}}, is used to draw an xy plot of the variables (genes).
\emph{3D plots:}
3D graphs can be generated using \code{\link[made4:do3d]{do3D}} and \code{\link[made4:html3D]{html3D}}.
\code{\link[made4:html3D]{html3D}} produces a web page in which a 3D plot can be interactively rotated, zoomed,
and in which classes or groups of cases can be easily highlighted.
\emph{1D plots, show one axis only:}
1D graphs can be plotted using \code{\link[made4:graph1D]{graph1D}}. \code{\link[made4:graph1D]{graph1D}}
can be used to plot either cases (microarrays) or variables (genes) and only requires
a vector of coordinates (\$li, \$co)
\bold{Analysis of the distribution of variance among axes:}
The number of axes or principal components from a \code{ord} will equal \code{nrow} the number of rows, or the
\code{ncol}, number of columns of the dataset (whichever is less).
The distribution of variance among axes is described in the eigenvalues (\$eig) of the \code{ord} analysis.
These can be visualised using a scree plot, using \code{\link[ade4:scatter]{scatterutil.eigen}} as it done in \code{plot.ord}.
It is also useful to visualise the principal components from a using a \code{ord} or principal components analysis
\code{\link[ade4:dudi.pca]{dudi.pca}}, or correspondence analysis \code{\link[ade4:dudi.coa]{dudi.coa}} using a
heatmap. In MADE4 the function \code{\link[made4:heatplot]{heatplot}} will plot a heatmap with nicer default colours.
\bold{Extracting list of top variables (genes):}
Use \code{\link[made4:topgenes]{topgenes}} to get list of variables or cases at the ends of axes. It will return a list
of the top n variables (by default n=5) at the positive, negative or both ends of an axes.
\code{\link[made4:sumstats]{sumstats}} can be used to return the angle (slope) and distance from the origin of a list of
coordinates.
}
\value{
A list with a class \code{ord} containing:
\item{ord}{Results of initial ordination. A list of class "dudi" (see \code{\link[ade4:dudi]{dudi}})}
\item{fac}{The input classvec, the \code{factor} or \code{vector} which described the classes in the input dataset. Can be NULL.}
}
\references{ }
\author{Aedin Culhane}
\seealso{See Also \code{\link[ade4:dudi.pca]{dudi.pca}}, \code{\link[ade4:dudi.coa]{dudi.coa}} or \code{\link[ade4:dudi.nsc]{dudi.nsc}}, \code{\link[made4:bga]{bga}},
}
\examples{
data(khan)
if (require(ade4, quiet = TRUE)) {
khan.coa<-ord(khan$train, classvec=khan$train.classes, type="coa")
}
khan.coa
plot(khan.coa, genelabels=khan$annotation$Symbol)
plotarrays(khan.coa)
# Provide a view of the first 5 principal components (axes) of the correspondence analysis
heatplot(khan.coa$ord$co[,1:5], dend="none",dualScale=FALSE)
}
\keyword{manip}
\keyword{multivariate}
|
7d42aa1113ba2f49eddd5f4df4b29b4a9e692e95 | 1376cfb3b3c86f2d4f6960b8d248af26ed0f42dd | /man/ensemblToReactome.Rd | 8cc816b727ea16962e027d888aeb144c426be5a7 | [] | no_license | RamsinghLab/TxDbLite | fe4ddfc0b76e7cfcc8938a2e13d42c26f43e0a3d | 358f14c72b234b8afedd8ffcf4ff23f7c9dd5b40 | refs/heads/master | 2021-08-17T02:39:37.533642 | 2016-08-21T16:37:34 | 2016-08-21T16:37:34 | 40,850,238 | 1 | 3 | null | 2016-04-10T19:42:55 | 2015-08-17T00:24:40 | R | UTF-8 | R | false | true | 1,066 | rd | ensemblToReactome.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ensemblToReactome}
\alias{ensemblToReactome}
\title{Data frame of ENSEMBL transcript IDs and corresponding reactome pathway IDs, Reactome URL pathway-browser, reactome pathway name, and organism for numerous species including Mus musculus, Homo Sapiens and many more. This is used for enrichment activation analysis.}
\format{A data frame full of reactome data
\itemize{
\item code: names of ENSEMBL IDs of transcripts and genes
\item term: Reactome ID pathway
\item URL: Reactome Pathway URL
\item name: Pathway name
\item from: Pathway Origin
\item organism: organism of pathway
}}
\source{
\url{www.reactome.org}
}
\usage{
ensemblToReactome
}
\description{
Data frame of ENSEMBL transcript IDs and corresponding reactome pathway IDs, Reactome URL pathway-browser, reactome pathway name, and organism for numerous species including Mus musculus, Homo Sapiens and many more. This is used for enrichment activation analysis.
}
\keyword{datasets}
|
c60f1c2933052dc87c9529f754564fc1cf6fa1b5 | 1a40a037374327dbe0ae788253069c70d551ceae | /docs/components_page/components/nav/fill.R | ccc926eeacc3af90f5cd348a3f933ea79fd67bde | [
"Apache-2.0"
] | permissive | tcbegley/dash-bootstrap-components | dc85af514d1c89d8b67b3d74b24292abacc32967 | 4cea8518bc7d2af299749eecb9e18d2e6522ff90 | refs/heads/main | 2021-11-25T06:56:17.357244 | 2021-10-21T20:55:58 | 2021-10-21T20:55:58 | 241,688,271 | 1 | 0 | Apache-2.0 | 2020-02-19T18:04:35 | 2020-02-19T18:04:34 | null | UTF-8 | R | false | false | 453 | r | fill.R | library(dashBootstrapComponents)
library(dashHtmlComponents)
nav1 <- dbcNav(
list(
dbcNavItem(dbcNavLink("A link", href = "#")),
dbcNavItem(dbcNavLink("Another link with a longer label", href = "#"))
),
fill = TRUE
)
nav2 <- dbcNav(
list(
dbcNavItem(dbcNavLink("A link", href = "#")),
dbcNavItem(dbcNavLink("Another link with a longer label", href = "#"))
),
justified = TRUE
)
navs <- htmlDiv(list(nav1, htmlHr(), nav2))
|
3eb9fdcdb9470a189760f9bd3837b11c0468ca75 | 3c8ab3be9090997d092d3418a2f6b5fd1c9412f7 | /report.R | 9666ec9cba18a440ff0ef73d7728124b530a559e | [] | no_license | ices-taf/2019_VMS_ICES-QC | 7eb42b7113d73d6f4399bc0911acd779f87a1691 | 7ea1032d5f5d6e4ba1474bd5564b24d6413aa5b1 | refs/heads/master | 2022-04-28T18:13:54.761247 | 2022-03-10T09:50:43 | 2022-03-10T09:50:43 | 184,386,961 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,040 | r | report.R | ## Prepare plots and tables for report
## Before:
## After:
# create report directory
mkdir("report")
# libraries
library(rmarkdown)
library(icesTAF)
library(jsonlite)
taf.library(vmstools)
library(plyr)
library(ggplot2)
library(RColorBrewer)
library(doBy)
library(reshape2)
# utiities
source("utilities.R")
# settings
config <- read_json("bootstrap/config/config.json", simplifyVector = TRUE)
# loop over countries
for (country in config$countries) {
#country <- "EST"
msg("Running QC for ... ", country)
# fillin and write template
fname <- makeQCRmd(country, "bootstrap/data", template = "report_QC_template.Rmd")
# render Rmd
ret <- try(render(fname, clean = FALSE, output_format = latex_document()))
if (inherits(ret, "try-error")) {
msg("FAILED - ", country)
next
}
# compile pdf
x <- shell(paste('pdflatex -halt-on-error', ret))
if (x == 0) {
# copy report and Rmd file
copyReport(fname, report_dir = "report", keeps = c("pdf", "knit.md", "Rmd"))
}
msg("Done ... ", country)
}
|
a88e71ff763bf76eb32b0bfaba06a9b09eaa7c2b | 7cd8e6ac8097d2ad5811eab2f3688ff22b0a0feb | /man/Metrics.Rd | 7836427cba59a9afda84d2f955ea86ea689ed22d | [] | no_license | noahhl/r-google-analytics | 400e492011fd096448f7db677f6adaf81094f9f6 | 5c396e1bded0ef00a84c15f000f6fde37d45040f | refs/heads/master | 2016-08-04T15:04:37.911940 | 2011-03-23T15:21:06 | 2011-03-23T15:21:06 | 1,411,707 | 4 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,768 | rd | Metrics.Rd | \name{Metrics}
\alias{Metrics}
\title{Sets the metrics of interest (clicks, pageviews, etc)...}
\usage{Metrics(metrics.param=NA)}
\description{Sets the metrics of interest (clicks, pageviews, etc)
Optional.
The aggregated statistics for user activity in a profile, such as
clicks or pageviews. When queried by alone, metrics provide aggregate
values for the requested date range, such as overall pageviews or
total bounces. However, when requested with dimensions, values are
segmented by the dimension. For example, ga:pageviews requested with
ga:country returns the total pageviews per country rather than the
total pageviews for the entire profile. When requesting metrics, keep
in mind:
Any request must supply at least one metric because a request cannot
consist only of dimensions.
You can supply a maximum of 10 metrics for any query.
Most combinations of metrics from multiple categories can be used
together, provided no dimensions are specified.
The exception to the above is the ga:visitors metric, which can only
be used in combination with a subset of metrics.
Any given metric can be used in combination with other dimensions or
metrics, but only where Valid Combinations apply for that metric.
Metric values are always reported as an aggregate because the Data
Export API does not provide calculated metrics. For a list of common
calculations based on aggregate metrics.
NOTE: We do check for valid metrics.}
\value{The metrics value if metrics.param is not set.}
\arguments{\item{metrics.param}{A vector of up to 10 dimensions, either as
a single string or a vector or strings. E.g.
"ga:visits" or c("ga:visits", "ga:bounces")
If NULL is used, the metrics parameter will be
unset. If no parameter is specified, the current
metrics value is returned.}}
|
1d2ed52033558e9391a25c17408ae31bcb76bea4 | a585fef179c1b9806937a5b975e3d2226faf806e | /prep-data.R | 9e63bf5ae074ea8dde98fa87410e4816e51626e1 | [] | no_license | chansonm/clustering | 19daffad44b8c8a1407c864cc1612e4019b9eeca | 54c570391079692215148a4aaf663fb3142fefb8 | refs/heads/master | 2021-09-03T08:59:06.268602 | 2018-01-07T21:05:28 | 2018-01-07T21:05:28 | 110,950,813 | 0 | 0 | null | 2017-11-20T09:34:54 | 2017-11-16T09:25:06 | R | UTF-8 | R | false | false | 570 | r | prep-data.R | .data.read <- function(filename){
csv <- fread(filename)
data <- copy(csv)
# REMOVE EXCLUDED COMPANIES
# Todo: We do not need to exclude anything so far
# data <- data[which(data$excluded == 0),]
return(data)
}
.data.prepAndTest <- function(data, excludedColumns){
rownames(data) <- data$name
colnames(data)
#dataForClustering <- data[which(data$excluded == 0),]
dataForClustering <- data[, !excludedColumns, with=FALSE]
dataForClustering
apply(dataForClustering, 2, mean)
apply(dataForClustering, 2, var)
return(dataForClustering)
} |
2fa6e360f636170e2331d9441eca10dfe4a37f56 | 56dfd044b7c883836b200491c3dd33e8ed7e175f | /plot3.R | 46049ba959efc51174a4eafc0d21d86bec809798 | [] | no_license | josivalmarques/ExData_Plotting1 | 8124070d971244699e329b56dd7b64b1e9221bb0 | 5b04db0eb827628b0b863fe4615cc0ee34ab681c | refs/heads/master | 2020-12-25T11:15:10.556979 | 2014-07-13T17:32:56 | 2014-07-13T17:32:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,164 | r | plot3.R | ##read in the data
josival <- read.table("household_power_consumption.txt", skip = 66637, nrow = 2880,
sep = ";", colClasses="character",
col.names = colnames(read.table("household_power_consumption.txt",
nrow = 1, header = TRUE, sep=";")))
##put NA
josival[josival == "?"] = NA
##put as numeric
josival$Global_active_power = as.numeric(as.character(josival$Global_active_power))
png(file="plot3.png",width=480,height=480)
##Combine DateTime
josival$DateTime = (paste(josival$Date, josival$Time))
josival$DateTime = strptime(josival$DateTime, format = "%d/%m/%Y %H:%M:%S")
##plot
par(mfrow = c(1,1))
plot(josival$DateTime, josival$Sub_metering_1, type = "n", ylab = "Energy sub metering", xlab = "")
##lines
lines(josival$DateTime, josival$Sub_metering_1)
lines(josival$DateTime, josival$Sub_metering_2, col= "red")
lines(josival$DateTime, josival$Sub_metering_3, col= "blue")
##legend
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty = c(1,1),
col = c("black","red", "blue"))
##close
dev.off()
|
255fdb77b2d33ef4cc2eb40514b42633f6cea2d2 | 7771cfa6266820b85a80bc0941879e9ce2025fd2 | /R/journals_pVals-data.R | 47bc84cd55ad638abcb713f96ec525d847d48d0f | [] | no_license | leekgroup/swfdr | 461c2aec5454cdd57d1d4532c987210c72e17507 | f9832f6eb0332f5e9de4637eb50a41492e19cb4f | refs/heads/master | 2023-01-24T19:31:27.656006 | 2020-12-10T19:38:00 | 2020-12-10T19:38:00 | 63,630,067 | 5 | 2 | null | 2020-08-25T01:07:31 | 2016-07-18T19:14:24 | R | UTF-8 | R | false | false | 775 | r | journals_pVals-data.R | #' P-values from abstracts from articles in 5 biomedical journals (American Journal of Epidemiology, BMJ, JAMA, Lancet, New England Journal of Medicine), over 11 years (2000-2010).
#'
#' A dataset containing 15,653 p-values.
#'
#' @docType data
#'
#' @usage journals_pVals
#'
#' @return Object of class tbl_df, tbl, data.frame.
#'
#' @format A tbl data frame with 15,653 rows and 5 variables:
#' \describe{
#' \item{pvalue}{P-value}
#' \item{pvalueTruncated}{Equals to 1 if the p-value is truncated, 0 otherwise}
#' \item{pubmedID}{Pubmed ID of the article}
#' \item{year}{Year of publication}
#' \item{journal}{Journal}
#' }
#'
#' @keywords datasets
#'
#' @source Code for extracting p-values at: \url{inst/script/getPvalues.R}
#'
#' @name journals_pVals
NULL
|
6e1adb8e1d2f30c52d18248dee7b77875a0d675f | 3042149579fe266eb1cd3da50594176756b24c08 | /tests/testthat/test-spec-version.R | bf06ae2ac1395f009b2b3a39ea61c9d4c8b98754 | [] | no_license | hadley/vegawidget | 19e614adacc84038eda203eac5d3567cb9827831 | d0dbd0cabd4f2f1daf6f2e0658a81c50a9850a9c | refs/heads/master | 2023-06-24T23:40:53.108854 | 2021-06-20T16:50:13 | 2021-06-20T16:50:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 888 | r | test-spec-version.R | schema_vega <- "https://vega.github.io/schema/vega/v5.json"
schema_vega_lite <- "https://vega.github.io/schema/vega-lite/v4.json"
vega <- list(library = "vega", version = "5")
vega_lite <- list(library = "vega_lite", version = "4")
test_that(".schema_type warns", {
empty <- list(library = "", version = "")
expect_warning(
expect_identical(.schema_type("NULL"), empty),
"NULL$"
)
expect_warning(
expect_identical(.schema_type("foo"), empty),
"foo$"
)
})
test_that(".schema_type works", {
expect_identical(.schema_type(schema_vega), vega)
expect_identical(.schema_type(schema_vega_lite), vega_lite)
})
test_that("vw_spec_version works", {
expect_identical(vw_spec_version(spec_mtcars), vega_lite)
})
test_that("vega_schema works", {
expect_identical(vega_schema(), schema_vega_lite)
expect_identical(vega_schema("vega"), schema_vega)
})
|
49f3a7adbc0643358dcfd4382e2292fde449bc19 | 02c37615762af39de855590a40efd5d29858c9fc | /R/transport_plan.R | e23222ea264b09bcd0b46181366b7d4c1b45d3ba | [] | no_license | ericdunipace/WpProj | d950d1f8e36094b1b93cd2bb62e99fc1b9ec3aef | 6039e5ce8c5d3386e776fc1e6784807411805889 | refs/heads/master | 2023-03-27T19:23:12.132980 | 2021-04-02T21:32:56 | 2021-04-02T21:32:56 | 229,637,281 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,276 | r | transport_plan.R | transport_plan_given_C <- function(mass_x, mass_y, p = 2,
cost=NULL, method = "exact", ...) {
method <- match.arg(method, c("exact","sinkhorn","greenkhorn",
"randkhorn", "gandkhorn", "sinkhorn2"))
dots <- list(...)
epsilon <- as.double(dots$epsilon)
niter <- as.integer(dots$niter)
stopifnot(all(is.finite(cost)))
if(length(epsilon) == 0) epsilon <- as.double(0.05)
if(length(niter) == 0) niter <- as.integer(100)
if (is.null(cost) ) stop("Cost matrix must be provided")
tplan <- if (method == "exact" | method == "greenkhorn" | method == "sinkhorn" |
method == "randkhorn" | method == "gandkhorn") {
n1 <- length(mass_x)
n2 <- length(mass_y)
if(n1 > 1 & n2 > 1) {
transport_C_(mass_a_ = mass_x, mass_b_ = mass_y, cost_matrix_ = cost^p,
method_ = method, epsilon_ = epsilon, niter_ = niter)
} else if (n2 == 1) {
list(from = 1:n1, to = rep(1,n1), mass = mass_x)
} else if (n1 == 1) {
list(from = rep(1,n2), to = 1:n2, mass = mass_y)
} else {
stop("Some error found in mass_x or mass_y length. Check mass input.")
}
} else if (method == "sinkhorn2") {
sinkhorn_transport(mass_x = mass_x, mass_y = mass_y, cost = cost^p,
eps = epsilon, niter = niter)
} else {
stop( paste0( "Transport method ", method, " not supported" ) )
}
return( tplan )
}
transport_plan <- function(X, Y, p = 2, ground_p = 2,
observation.orientation = c("colwise","rowwise"),
method = c("exact", "sinkhorn", "greenkhorn",
"randkhorn", "gandkhorn", "sinkhorn2",
"hilbert", "rank",
"univariate",
"univariate.approximation",
"univariate.approximation.pwr"),... ) {
obs <- match.arg(observation.orientation)
method <- match.arg(method)
if (!is.matrix(X)) {
X <- as.matrix(X)
if(dim(X)[2] == 1) X <- t(X)
}
if (!is.matrix(Y)) {
Y <- as.matrix(Y)
if(dim(Y)[2] == 1) Y <- t(Y)
}
p <- as.double(p)
ground_p <- as.double(ground_p)
if(obs == "rowwise"){
X <- t(X)
Y <- t(Y)
}
stopifnot(all(is.finite(X)))
stopifnot(all(is.finite(Y)))
cost <- tplan <- NULL
if (method == "univariate.approximation") {
tplan <- list(from = apply(X, 1, order), to = apply(Y,1,order), mass = rep(1/ncol(X), ncol(X)))
cost <- sapply(1:nrow(X), function(i)
sum((X[i, tplan$from[,i],drop=FALSE] -
Y[i, tplan$to[,i],drop = FALSE] )^ground_p * tplan$mass )^(1.0/ground_p))
} else if (method == "univariate.approximation.pwr") {
dots <- list(...)
if(is.null(dots$is.X.sorted)) dots$is.X.sorted <- FALSE
is.A.sorted <- as.logical(dots$is.X.sorted)
tplan <- transport_(A_ = X, B_ = Y, p = p, ground_p = ground_p,
method_ = method, a_sort = is.A.sorted)
cost <- sum((X[tplan$from] -
Y[tplan$to] )^p * tplan$mass*1/nrow(Y))
} else if (method == "exact" | method == "sinkhorn" | method == "greenkhorn" | method == "randkhorn" | method == "gandkhorn" | method == "sinkhorn2") {
# tplan <- transport_(X, Y, p, ground_p, "shortsimplex")
n1 <- ncol(X)
n2 <- ncol(Y)
mass_x <- as.double(rep(1/n1, n1))
mass_y <- as.double(rep(1/n2, n2))
cost <- cost_calc(X, Y, ground_p)
tplan <- transport_plan_given_C(mass_x, mass_y, p, cost, method, ...)
} else if (method == "univariate" | method == "hilbert" | method == "rank") {
dots <- list(...)
if(is.null(dots$is.X.sorted)) dots$is.X.sorted <- FALSE
is.A.sorted <- as.logical(dots$is.X.sorted)
tplan <- transport_(A_ = X, B_ = Y, p = p, ground_p = ground_p,
method_ = method, a_sort = is.A.sorted, epsilon = 0.0, niter = 0)
cost <- c((((colSums(abs(X[, tplan$from, drop=FALSE] - Y[, tplan$to, drop=FALSE])^ground_p))^(1/ground_p))^p %*% tplan$mass)^(1/p))
} else {
stop( paste0( "Transport method ", method, " not supported" ) )
}
return(list(tplan = tplan, cost = cost ))
}
|
2fb7d1fb76750d36695d718966b8bac8242d6439 | e56da52eb0eaccad038b8027c0a753d9eb2ff19e | /tests/testthat/test-mst.R | 304dee53e9062d4e20f9d6203c70ddab231f73eb | [] | no_license | ms609/TreeTools | fb1b656968aba57ab975ba1b88a3ddf465155235 | 3a2dfdef2e01d98bf1b58c8ee057350238a02b06 | refs/heads/master | 2023-08-31T10:02:01.031912 | 2023-08-18T12:21:10 | 2023-08-18T12:21:10 | 215,972,277 | 16 | 5 | null | 2023-08-16T16:04:19 | 2019-10-18T08:02:40 | R | UTF-8 | R | false | false | 1,565 | r | test-mst.R | test_that("MSTEdges() handles bad input", {
expect_error(MSTEdges(matrix(1:12, 6, 2)), "distance")
})
test_that("minimum_spanning_tree.cpp handles bad input", {
expect_equal(minimum_spanning_tree(numeric(0)), matrix(0, 0, 0))
expect_error(minimum_spanning_tree(c(1:-1)),
"`order` contains entries < 0")
expect_error(minimum_spanning_tree(c(3, 100, 1)),
"`order` contains entries > `length.order.`")
expect_error(minimum_spanning_tree(c(3, 1, NA_real_)),
"`order` contains NA")
expect_error(minimum_spanning_tree(0:13),
"`length.order.`.* not.* triangular")
})
test_that("MST edges calculated correctly", {
set.seed(0)
points <- matrix(c(0.1, 0, 1.9, 2, 1.1, 1,
0.1, 2, 0, 2, 1, 1.1,
0, 0, 0, 0, 1, -1), 6)
distances <- dist(points)
apeMst <- matrix(c(5, 6, 6, 5, 5, 1, 1:4), 5)
distMat <- as.matrix(distances)
expect_equal(MSTLength(distances, apeMst),
MSTLength(distances))
expect_equal(MSTLength(distances),
MSTLength(distMat))
MSTPlot <- function() {
plot(points, asp = 1, ann = FALSE)
expect_equal(MSTEdges(distances, FALSE),
MSTEdges(distances, TRUE, points[, 1], points[, 2]))
}
skip_if_not_installed("vdiffr", minimum_version = "1.0.0")
skip_if(packageVersion("graphics") < "4.1.0")
vdiffr::expect_doppelganger("MST plotting", MSTPlot)
})
test_that("MST handles large distance matrices", {
x <- dist(0:300)
expect_equal(c(300, 2), dim(MSTEdges(x)))
})
|
3f106046524e678e877270966227c1b3df629f78 | 396418ee3c753f146f45fe6669a0fb2b04afcdc1 | /dust.r | c2f5af65e5de9b4940fefb7a987a29d2f65cc48c | [] | no_license | YeaJi1984/R-TEST | d8f116f5fbd326404dd90ea57db02c2de3ece02c | 592deb4eb4157d0925aab84d56c0cd37dcc8e46f | refs/heads/main | 2023-03-26T05:46:47.354663 | 2021-04-02T08:48:59 | 2021-04-02T08:48:59 | 350,265,796 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,083 | r | dust.r | #서울시의 구 중에서 성북구와 중구의 미세먼지 비교 및 차이 검정..
library("readxl")
library("dplyr")
dustdata <- read_excel("dustdata.xlsx")
head(dustdata)
#성북구와 중구 데이터만 추출
dustdata_anal <- dustdata %>% filter(area %in% c("성북구", "중구"))
#데이터 현황 구체적인 파악
#데이터 날짜 확인 -> 2017년 9월 1일부터 12월 31일까지..
count(dustdata_anal, yyyymmdd) %>% arrange(desc(n)) #모든 데이터가 2개 확인
count(dustdata_anal, area) # 모든 데이터가 2개 확인.
#실행 결과를 보면 빠진 데이터가 없이 동일한 날짜는 2개씩 구에 따른 미세먼지 수치를 122
#성북구와 중구에 데이터를 각각 분리
dust_sb <- subset(dustdata_anal, area =="성북구")
dust_jg <- subset(dustdata_anal, area =="중구")
#sub(데이터, 조건)
#dust_sb <- dustdata_anal %>% filter(area=="성북구")
#dust_sb <- dustdata_anal %>% filter(area=="중구")
#분리한 두 개구의 데이터를 이용해서 기초 통계량 도출
|
3a71535e8af854a6e35fac7e7f14d24acee37211 | 88feba0d520bec949061fdc586b5a083dd9277ef | /plot3.R | b98debe215da953f9f91c2de23549af3bf650eb2 | [] | no_license | Faerydoc/ExData_Plotting1 | 4fb003f1bdf05998bc8210a98dc52816dabe2e7d | ac6ce67d53812f35bf2c6159db401c56753b6b26 | refs/heads/master | 2020-12-29T18:48:00.124624 | 2014-09-07T00:30:43 | 2014-09-07T00:30:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 881 | r | plot3.R | library(dplyr)
library(data.table)
library(lubridate)
# Read data into R
fh <- fread("household_power_consumption.txt", na.strings="?")
data <- filter(fh, grep("^[1,2]/2/2007", Date)) ## Just get the data I need
# Convert Date and Time to a POSIX DateTime field using lubridate
data$Timestamp <- dmy_hms(paste(data$Date, data$Time))
## Open PNG device; create 'plot3.png' in working directory
png(file = "plot3.png", width = 480, height = 480)
## Create plot and send to a file
plot(data$Timestamp, data$Sub_metering_1,type="l",
ylab="Energy sub metering", xlab="")
points(data$Timestamp, data$Sub_metering_2, type="l",col="red")
points(data$Timestamp, data$Sub_metering_3, type="l",col="blue")
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off() ## Close the PNG file device
|
e132c3eb94900b4201215002137d3368bbaa2b82 | 2e73e542d19a4780d24dec58d8e4a0540a4c66ef | /DummyRfile.R | c78f68172b0760b74a7eaa2c9537768e2505a32f | [] | no_license | jcmcdavid/Test-11-11 | 847b95f70ab1339201a3fe22807396afbe501646 | 856a46cdb33b03b7d6e1baeb6399e3a6311dad89 | refs/heads/master | 2021-01-10T08:55:19.325880 | 2015-11-11T15:42:27 | 2015-11-11T15:42:27 | 45,989,266 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 27 | r | DummyRfile.R | #Test R file
fun <- 2 + 3
|
146eab891c3ffcb66f23a1fb88a22e73e1537868 | 154be347cf7dc2c3b2e8c448546119bf5a6078fe | /man/box_cox_shift.Rd | 578739c692dfd55c89bf039b81dc35e1f3f8ea61 | [] | no_license | cran/rrscale | 6163755ddb653f95a9ffe8c9605ac403fec2a15f | fa6868319495e268a160bfebfc33134f25f01088 | refs/heads/master | 2021-07-22T02:43:07.665602 | 2020-05-26T10:30:02 | 2020-05-26T10:30:02 | 175,442,575 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 647 | rd | box_cox_shift.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transformations.R
\docType{data}
\name{box_cox_shift}
\alias{box_cox_shift}
\title{Box-cox transformation of shifted variable}
\format{An object of class \code{list} of length 2.}
\usage{
box_cox_shift
}
\description{
\itemize{
\item{T} the transformation with arguments Y, the data, lambda the parameter, and boolean inverse to calculate inverse transformation. The parameter lambda has two real elements (1) the power and (2) the additive shift to the data.
\item{T_deriv} the transformation with arguments Y, the data, lambda the parameter.
}
}
\keyword{datasets}
|
eb95d997e6c18546d708ac3cfb201ff7bb4815f7 | c8674dc53aa778b3d8c0759f117b8872196d3009 | /R/GenerateCompleteYPheno.R | 5a617579cb60b43cf1d724a2acfb963f642ccd8c | [] | no_license | andrewhaoyu/TOP | d8acae9cd8668d70f424997cc6c91b21bf5b5dce | 0de8cd088754079b9b9a11ee785fc6a34f3bab29 | refs/heads/master | 2022-10-04T21:39:09.104998 | 2022-08-25T21:19:01 | 2022-08-25T21:19:01 | 148,667,732 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,240 | r | GenerateCompleteYPheno.R | ###Generate the complete y pheno dataframe based on the incomplete data file
#' Title
#'
#' @param y.pheno
#' @param missingTumorIndicator
#'
#' @return
#' @export
#'
#' @examples
GenerateMissingPosition <- function(y.pheno,missingTumorIndicator){
tumor.number <- ncol(y.pheno)-1
find.missing.position.text = "idx <- which("
for(i in 2:(tumor.number+1)){
if(i == (tumor.number+1)){
find.missing.position.text <- paste0(find.missing.position.text,"y.pheno[,",i,"]==missingTumorIndicator)")
}else{
find.missing.position.text <- paste0(find.missing.position.text,"y.pheno[,",i,"]==missingTumorIndicator|")
}
}
eval(parse(text=find.missing.position.text))
return(idx)
}
#' Title
#'
#' @param y.pheno
#' @param missingTumorIndicator
#'
#' @return
#' @export
#'
#' @examples
GenerateCompleteYPheno <- function(y.pheno,missingTumorIndicator){
tumor.number <- ncol(y.pheno)-1
find.missing.position.text = "idx <- which("
for(i in 2:(tumor.number+1)){
if(i == (tumor.number+1)){
find.missing.position.text <- paste0(find.missing.position.text,"y.pheno[,",i,"]==missingTumorIndicator)")
}else{
find.missing.position.text <- paste0(find.missing.position.text,"y.pheno[,",i,"]==missingTumorIndicator|")
}
}
eval(parse(text=find.missing.position.text))
if(length(idx)!=0){
y.pheno.complete = y.pheno[-idx,]
}else{
y.pheno.complete = y.pheno
}
return(y.pheno.complete)
}
#' Title
#'
#' @param y.pheno
#' @param x.all
#' @param missingTumorIndicator
#'
#' @return
#' @export
#'
#' @examples
GenerateCompleteXCovariates <- function(y.pheno,x.all,missingTumorIndicator){
tumor.number <- ncol(y.pheno)-1
find.missing.position.text = "idx <- which("
for(i in 2:(tumor.number+1)){
if(i == (tumor.number+1)){
find.missing.position.text <- paste0(find.missing.position.text,"y.pheno[,",i,"]==missingTumorIndicator)")
}else{
find.missing.position.text <- paste0(find.missing.position.text,"y.pheno[,",i,"]==missingTumorIndicator|")
}
}
eval(parse(text=find.missing.position.text))
if(length(idx)!=0){
x.all.complete <- x.all[-idx,]
}else{
x.all.complete <- x.all
}
return(x.all.complete)
}
|
c8130356de0814dda404fa6047d98bfa3c362707 | a500013b7a3733f72d747082e10801e98567097a | /figure_invasive_species_taxa_barplot.R | 2493cf94760f40f05ab32b965df3a9093b7d9d0c | [] | no_license | robcrystalornelas/impacts_systematic_review | 4ff913c79e3f7b14d6ba79f6cc4f9e612fe68c29 | 9ed0f457f72bad9fb7de420bb7a9744dd9ada667 | refs/heads/master | 2022-03-18T14:40:31.648205 | 2019-11-20T00:19:12 | 2019-11-20T00:19:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,162 | r | figure_invasive_species_taxa_barplot.R | ## READ IN DATA ####
source("~/Desktop/ch2_impacts_systematic_review/scripts/impacts_systematic_review/clean_raw_data.R")
## Load libraries #####
library(dplyr)
library(ggplot2)
library(cowplot)
taxa_for_barplot <- dplyr::select(raw_data, latinname, invasivespeciestaxapysek)
taxa_for_barplot
unique_taxa_for_barplot <- unique(taxa_for_barplot)
dim(unique_taxa_for_barplot)
unique_taxa_for_barplot
# Make the plot
gg_taxa <-
ggplot(unique_taxa_for_barplot, aes(x = reorder(invasivespeciestaxapysek, invasivespeciestaxapysek, function(x)
-
length(x))))
gg_taxa <- gg_taxa + geom_bar(stat = "count", fill = "#7e4e90ff") +
# coord_cartesian(ylim=c(0,1050), expand = FALSE) +
scale_y_continuous(expand = c(0, 0), limits = c(0, 325))
gg_taxa
gg_taxa <- gg_taxa + theme_cowplot()
gg_taxa <- gg_taxa + ylab("Frequency")
gg_taxa <- gg_taxa + xlab("")
gg_taxa <-
gg_taxa + theme(
axis.text = element_text(size = 23),
axis.text.x = element_text(
angle = 90,
hjust = 1,
vjust = 0.5,
size = 23
),
strip.text = element_text(size = 23),
axis.title = element_text(size=23)
) # Change axis title size
gg_taxa
|
621778eb8ac1338588f22f5ec952b44a2b87f7d6 | 682e8ed167f0c282c0f62499b062a2b7442787b9 | /cachematrix.R | 3fda4164a03e6f9d9af4eb8c332704c391a70ca6 | [] | no_license | skmgowda27/ProgrammingAssignment2 | ef5af1964d1a9fb3925dd94b1f34ee74cce63e70 | 94b47f2a3c76a624995e9ee59b3db316921f2f06 | refs/heads/master | 2021-01-15T16:28:14.558603 | 2015-07-24T14:58:46 | 2015-07-24T14:58:46 | 39,594,821 | 0 | 0 | null | 2015-07-23T21:45:10 | 2015-07-23T21:45:10 | null | UTF-8 | R | false | false | 3,824 | r | cachematrix.R | ## makeCacheMatrix function creates a matrix object that can be used cache its inverse.It however does not calculate the inverse
## cacheSolve function computes the inverse of the matrix returned by makeCacheMatrix function.
##If the inverse has already been calculated and also the matrix has not been changed,
##then the cachesolve retrieves the inverse from the cache and prints it out
## makeCacheMatrix is a function that stores a list of functions
##makeCacheMatrix contains 4 functions: set, get, setinverse, getinverse.
makeCacheMatrix <- function(x = matrix()) {
inver<- NULL
##set is a function that changes the matrix stored in the main function (makeCacheMatrix).
set <- function(y = matrix()){
x <<- y
inver<<- NULL
}
## get is a function that returns the matrix x stored in the main function(makeCacheMatrix). Doesn't require any input.
get <- function() x
##setinverse and getinverse don't calculate the inverse of the matrix,
##they simply store the value of the input in a variable inver into the main function makeCacheMatrix (setinverse) and return it (getinverse)
setinverse <- function(solve = matrix()) inver <<- solve
getinverse <- function() inver
##To store the 4 functions in the function makeCacheMatrix, we need the function list(),
##so that when we assign makeCacheMatrix to an object, the object has all the 4 functions.
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The first thing cacheSolve does is to verify the value inver, stored previously with getinverse, exists and is not NULL.
##If it exists in memory, it simply returns a message and the value inver, that is supposed to be the inverse of the matrix, but not necessarily.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
##If the inverse of the matrix is in the memory then, "return(m)" would have ended the function
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
##data gets the matrix stored with makeCacheMatrix, m calculates the inverse of the matrix
##x$setinverse(m) stores it in the object generated assigned with makeCacheMatrix.
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
############## Sample Output###########################
##> mat <- matrix(c(0,0,2,55,23,54,8,10,25,65,15,44,19,30,100,120),nrow = 4,ncol = 4)
##solve(mat)
##[,1] [,2] [,3] [,4]
##[1,] 0.16332107 -0.068137639 -0.033886580 0.0194140575
##[2,] 0.39645092 -0.145663698 -0.033069603 0.0012025310
##[3,] -0.33651033 0.140088665 0.022910075 -0.0008330937
##[4,] 0.01549405 -0.007997451 0.009886789 -0.0003595196
##matrixx <- makeCacheMatrix(mat)
##matrixx$get()
##[,1] [,2] [,3] [,4]
##[1,] 0 23 25 19
##[2,] 0 54 65 30
##[3,] 2 8 15 100
##[4,] 55 10 44 120
##> matrixx$getinverse()
##NULL
##> cacheSolve(matrixx)
##[,1] [,2] [,3] [,4]
##[1,] 0.16332107 -0.068137639 -0.033886580 0.0194140575
##[2,] 0.39645092 -0.145663698 -0.033069603 0.0012025310
##[3,] -0.33651033 0.140088665 0.022910075 -0.0008330937
##[4,] 0.01549405 -0.007997451 0.009886789 -0.0003595196
##> cacheSolve(matrixx)
##getting cached data
##[,1] [,2] [,3] [,4]
##[1,] 0.16332107 -0.068137639 -0.033886580 0.0194140575
##[2,] 0.39645092 -0.145663698 -0.033069603 0.0012025310
##[3,] -0.33651033 0.140088665 0.022910075 -0.0008330937
##[4,] 0.01549405 -0.007997451 0.009886789 -0.0003595196
|
00414a2f59cc48f8cabd23f96bb8da7b5ab59089 | baaf7d6c4636acce3b675be5384753afaf12cebc | /generateData/get_selection_diff_dist/pancreas/run_rmd.R | d1766d7ce7756a6ea7b4f77302d9c2883d1d4007 | [] | no_license | r3fang/am_geneBasis | ce44a77cc1f2efced8f52d82f8da4e11856a1155 | 362e2c54229ba04d28fd0e7025eaa37acfa0895c | refs/heads/main | 2023-08-25T10:33:46.356344 | 2021-10-18T14:20:07 | 2021-10-18T14:20:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 146 | r | run_rmd.R | library(rmarkdown)
render("/nfs/research1/marioni/alsu/geneBasis/am_geneBasis/generateData/get_selection_diff_dist/pancreas/gene_selection.Rmd")
|
9b0b9bd2859e4683a9d495b5d62b83b1bf4291bc | 1bee70411de2016d2e9dcfcd8ead3043b620bc1a | /man/predict.fastglm.Rd | d1cbf49bf67eff541b3dd26eb970e5520268197a | [] | no_license | jaredhuling/fastglm | df92e850a6b852388731035522fd9148ac3f122b | 9a04daa4a99761fee4fc87ecdb100a530f96b161 | refs/heads/master | 2022-08-23T09:01:09.698721 | 2022-07-23T15:12:48 | 2022-07-23T15:12:48 | 107,818,291 | 52 | 15 | null | null | null | null | UTF-8 | R | false | true | 1,546 | rd | predict.fastglm.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glm_methods.R
\name{predict.fastglm}
\alias{predict.fastglm}
\title{Obtains predictions and optionally estimates standard errors of those predictions from a fitted generalized linear model object.}
\usage{
\method{predict}{fastglm}(
object,
newdata = NULL,
type = c("link", "response"),
se.fit = FALSE,
dispersion = NULL,
...
)
}
\arguments{
\item{object}{a fitted object of class inheriting from "\code{fastglm}".}
\item{newdata}{a matrix to be used for prediction}
\item{type}{the type of prediction required. The default is on the scale of the linear predictors;
the alternative "\code{response}" is on the scale of the response variable. Thus for a default binomial
model the default predictions are of log-odds (probabilities on logit scale) and \code{type = "response"}
gives the predicted probabilities. The "\code{terms}" option returns a matrix giving the fitted values of each
term in the model formula on the linear predictor scale.
The value of this argument can be abbreviated.}
\item{se.fit}{logical switch indicating if standard errors are required.}
\item{dispersion}{the dispersion of the GLM fit to be assumed in computing the standard errors.
If omitted, that returned by \code{summary} applied to the object is used.}
\item{...}{further arguments passed to or from other methods.}
}
\description{
Obtains predictions and optionally estimates standard errors of those predictions from a fitted generalized linear model object.
}
|
204150008b0b7d6fbf28a8ca9bae78493a03ea90 | 79b935ef556d5b9748b69690275d929503a90cf6 | /man/Kmodel.dppm.Rd | 8251511b14daf13aa55b31cdda547e86e1f4d9be | [] | no_license | spatstat/spatstat.core | d0b94ed4f86a10fb0c9893b2d6d497183ece5708 | 6c80ceb9572d03f9046bc95c02d0ad53b6ff7f70 | refs/heads/master | 2022-06-26T21:58:46.194519 | 2022-05-24T05:37:16 | 2022-05-24T05:37:16 | 77,811,657 | 6 | 10 | null | 2022-03-09T02:53:21 | 2017-01-02T04:54:22 | R | UTF-8 | R | false | false | 1,216 | rd | Kmodel.dppm.Rd | \name{Kmodel.dppm}
\alias{Kmodel.detpointprocfamily}
\alias{pcfmodel.detpointprocfamily}
\alias{Kmodel.dppm}
\alias{pcfmodel.dppm}
\title{
K-function or Pair Correlation Function of a
Determinantal Point Process Model
}
\description{Returns the theoretical \eqn{K}-function
or theoretical pair correlation function of a determinantal point
process model as a function of one argument \eqn{r}.
}
\usage{
\method{Kmodel}{dppm}(model, \dots)
\method{pcfmodel}{dppm}(model, \dots)
\method{Kmodel}{detpointprocfamily}(model, \dots)
\method{pcfmodel}{detpointprocfamily}(model, \dots)
}
\arguments{
\item{model}{Model of class \code{"detpointprocfamily"} or \code{"dppm"}.}
\item{\dots}{Ignored (not quite true -- there is some undocumented internal use)}
}
\value{
A function in the \R language, with one numeric argument \code{r},
that can be used to evaluate the theoretical \eqn{K}-function or
pair correlation function of the model at distances \code{r}.
}
\author{
\spatstatAuthors.
}
\examples{
model <- dppMatern(lambda=100, alpha=.01, nu=1, d=2)
KMatern <- Kmodel(model)
pcfMatern <- pcfmodel(model)
plot(KMatern, xlim = c(0,0.05))
plot(pcfMatern, xlim = c(0,0.05))
}
|
075bb9d3d1506ff4a08f46098d1e194ea0b69461 | 075808db3fb39c52a6c39cc5b71f8883346431c0 | /man/batch_tuneCP.Rd | 282a487f0f559230872356f227caec6f7721d7b8 | [
"MIT"
] | permissive | MaikeMM/OptimalTRees | 5d4fbeff5b45a0edb47124a5eaf871669129a396 | efb6127d35d337d01f251f26b60c4c0fc190e22b | refs/heads/master | 2020-07-04T04:07:00.854405 | 2019-07-26T07:17:00 | 2019-07-26T07:17:00 | 202,150,471 | 0 | 1 | null | 2019-08-13T13:27:11 | 2019-08-13T13:27:11 | null | UTF-8 | R | false | true | 992 | rd | batch_tuneCP.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/finalization.R
\name{batch_tuneCP}
\alias{batch_tuneCP}
\title{Find optimal parameter alpha for OCT}
\usage{
batch_tuneCP(trees, losses, trainingdata, validationdata, trainingweights,
validationweights, misclassification_weights)
}
\arguments{
\item{trees}{A list of dtree objects}
\item{losses}{A numeric array of corresponding values of loss functions of the dtree objects in trees,
found with alpha = 0}
\item{trainingdata}{Dataframe that will be used to train the OCT}
\item{validationdata}{Dataframe that will be used to validate the OCT}
}
\value{
A list of two, vbest is the best possible misclassification rate on the validation data and alphabest
is the best corresponding value for parameter alpha.
}
\description{
For a batch of tree, construct a mean curve of validation error as a function of complexity parameter
such that the optimal parameter alpha (smallest validation error) can be found.
}
|
ed9955d64d74f106e210deee1784e1fbb6c91b52 | 8aaa825e2f72cbe40ca7ac0b2e2f87c76dbc79be | /R/object_function.R | 2d8bb31ad9fe9e66b9c0794d3d64d763525adf93 | [] | no_license | douglascm/trafficr | 50bb1b03d5ffca411e3177436e091d45ff17b886 | 3418a5ebd5168a768b0c0c11cc02db7ee6c4ac03 | refs/heads/master | 2020-12-03T19:08:05.685991 | 2020-03-09T13:47:11 | 2020-03-09T13:47:11 | 231,444,558 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 705 | r | object_function.R | #' Objective function
#'
#' Objective function in the linear search step
#' of the optimization model of user equilibrium
#' traffic assignment problem, the only variable
#' is mixed_flow in this case.
#'
#' @param mixed_flow Vector with flows used by golden section search technique
#' @param graph Graph object created with configure_graph() function
#'
#' @return Vector with objective function calculation from flow
#'
#' @export
object_function <- function(mixed_flow,graph){ #mixed_flow<-(1 - leftX) * flow + leftX * auxiliary_flow
val = 0
for(i in 1:length(mixed_flow)){
val <- val + link_time_performance_integrated(mixed_flow[i], t0=graph$d[i], capacity=graph$cap[i])
}
val
}
|
ae8c7beed05d000c30f462f622ffa4ec0e2f4ff8 | 5e0088bbe018dab0fd409665c477005002afccea | /CSE5243-DataMining/hw2/testIRIS.r | 3b4e202bc7190b3246654d48a3b16ba17d4e17bf | [] | no_license | XuShulei/Course-Stuff | 68e6f6dc9d69385a21cd3d91408fb2020aa98115 | 247876184cb5910019cb5fbe95469ae19ac701f0 | refs/heads/master | 2022-11-27T22:39:35.082456 | 2020-08-05T17:04:15 | 2020-08-05T17:05:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,783 | r | testIRIS.r | #==============================
# Author: Ching-Hsiang Chu
# Email: chu.368@osu.edu
#==============================
normalize = function(x) {
num = x - min(x)
denom = max(x) - min(x)
return (num/denom)
}
start.time <- Sys.time()
iris_training_raw = read.csv(file="./Iris.csv", header = TRUE, sep = ",")
iris_testing_raw = read.csv(file="./Iris_Test.csv", header = TRUE, sep = ",")
#summary(iris_training)
iris_training = data.matrix(iris_training_raw)
iris_testing = data.matrix(iris_testing_raw)
length_traning = nrow(iris_training)
length_testing = nrow(iris_testing)
df_l1 = matrix(, nrow = length_testing, ncol = length_traning)
df_l2 = matrix(, nrow = length_testing, ncol = length_traning)
for(i in 1:length_traning) {
for(j in 1:length_testing) {
df_l1[j,i] = sum(abs(iris_training[i, 1:4] - iris_testing[j, 1:4]))
df_l2[j,i] = sqrt(sum((iris_training[i, 1:4] - iris_testing[j, 1:4])^2))
}
}
end.time <- Sys.time()
time_calc_dist = (end.time - start.time)
max_k=nrow(iris_training)
exe_time = matrix(nrow = length(seq(3, max_k, 2)), ncol = 1, dimnames = list(seq(3, max_k, 2), "Exec. Time"))
error_rate_l1 = matrix(nrow = length(seq(3, max_k, 2)), ncol = 1, dimnames = list(seq(3, max_k, 2), "Accurary"))
error_rate_l2 = matrix(nrow = length(seq(3, max_k, 2)), ncol = 1, dimnames = list(seq(3, max_k, 2), "Accurary"))
TPR = matrix(nrow = length(seq(3, max_k, 2)), ncol = 1, dimnames = list(seq(3, max_k, 2), "TPR"))
FPR = matrix(nrow = length(seq(3, max_k, 2)), ncol = 1, dimnames = list(seq(3, max_k, 2), "FPR"))
for (k in seq(3, max_k, 2)) {
start.time <- Sys.time()
colLabel = c()
for (kk in 1:k) {
colLabel[kk] = switch(paste(kk), "1" = "1st", "2" = "2nd", "3" = "3rd", paste(kk,"th",sep=""))
}
dist_rank_l1 = matrix(nrow = length_testing, ncol = k, dimnames = list(NULL, colLabel))
dist_rank_l2 = matrix(nrow = length_testing, ncol = k, dimnames = list(NULL, colLabel))
for(i in 1:length_testing) {
# Sort the distances and find the top k closest samples from traning set
sortedRow = sort.list(df_l1[i,1:length_traning])
sortedRow2 = sort.list(df_l2[i,1:length_traning])
for (kk in 1:k) {
dist_rank_l1[i, kk] = sortedRow[kk]
dist_rank_l2[i, kk] = sortedRow2[kk]
}
}
class_level = unique(iris_training_raw$class)
n_class = length(class_level)
prob_matrix_l1 = matrix(nrow=length_testing, ncol=n_class, dimnames = list(NULL, class_level))
prob_matrix_l2 = matrix(nrow=length_testing, ncol=n_class, dimnames = list(NULL, class_level))
for (i in 1:length_testing) {
for (c in class_level) {
prob_matrix_l1[i, c] = length(which(iris_training_raw[unlist(dist_rank_l1[i,]), 5] == c)) / k
prob_matrix_l2[i, c] = length(which(iris_training_raw[unlist(dist_rank_l2[i,]), 5] == c)) / k
}
}
iris_classfication_l1 = matrix(,nrow = length_testing, ncol = n_class, dimnames = list(NULL, c("Actual Class", "Predicted Class", "Posterior Probability")))
iris_classfication_l2 = matrix(,nrow = length_testing, ncol = n_class, dimnames = list(NULL, c("Actual Class", "Predicted Class", "Posterior Probability")))
error_l1 = matrix(,nrow = length_testing, ncol = 1, dimnames = list(NULL,c("Correctness")))
error_l2 = matrix(,nrow = length_testing, ncol = 1, dimnames = list(NULL,c("Correctness")))
iris_classfication_l1[, "Actual Class"] = as.character(iris_testing_raw$class)
iris_classfication_l2[, "Actual Class"] = as.character(iris_testing_raw$class)
for (i in 1:length_testing) {
# Based on L1 distance
iris_classfication_l1[i, "Predicted Class"] = as.character(class_level[which.max(prob_matrix_l1[i,])])
iris_classfication_l1[i, "Posterior Probability"] = max(prob_matrix_l1[i,])
error_l1[i,1] = (iris_classfication_l1[i, "Actual Class"] == iris_classfication_l1[i, "Predicted Class"] )
# Based on L2 distance
iris_classfication_l2[i, "Predicted Class"] = as.character(class_level[which.max(prob_matrix_l2[i,])])
iris_classfication_l2[i, "Posterior Probability"] = max(prob_matrix_l2[i,])
error_l2[i,1] = (iris_classfication_l2[i, "Actual Class"] == iris_classfication_l2[i, "Predicted Class"] )
}
confusion_matrix_l1 = matrix(nrow = n_class, ncol = n_class, dimnames = list(class_level, class_level))
confusion_matrix_l2 = matrix(nrow = n_class, ncol = n_class, dimnames = list(class_level, class_level))
for (i in class_level) {
for (j in class_level) {
confusion_matrix_l1[i,j] = length(which((iris_classfication_l1[, "Actual Class"]==i)
& (iris_classfication_l1[, "Predicted Class"]==j)))
confusion_matrix_l2[i,j] = length(which((iris_classfication_l2[, "Actual Class"]==i)
& (iris_classfication_l2[, "Predicted Class"]==j)))
}
}
TPR[as.character(k),1] = confusion_matrix_l1[1,1] / (confusion_matrix_l1[1,1] + confusion_matrix_l1[1,2])
FPR[as.character(k),1] = confusion_matrix_l1[2,1] / (confusion_matrix_l1[2,1] + confusion_matrix_l1[2,2])
error_rate_l1[as.character(k),1] = 1 - (length(which(error_l1 == TRUE)) / length_testing)
error_rate_l2[as.character(k),1] = 1- (length(which(error_l2 == TRUE)) / length_testing)
end.time <- Sys.time()
exe_time[as.character(k),1] = time_calc_dist+(end.time - start.time)
}
#min(error_rate_l1)
#min(error_rate_l2)
#which.min(error_rate_l2)
#plot(row.names(error_rate_l1), error_rate_l1, xlab = "k", ylab = "Error Rate")
#plot(row.names(error_rate_l2), error_rate_l2, xlab = "k", ylab = "Error Rate")
#plot(FPR, TPR, type = "o", xlab = "FPR", ylab = "TPR", xlim=c(0, 1), ylim=c(0, 1))
write.csv(iris_classfication_l1,file="./iris_classification_l1.csv")
write.csv(iris_classfication_l2,file="./iris_classification_l2.csv") |
7a156fb8d05458e78597ddef706e2e110564eb95 | d9073843316803f58cc7512a1d8dd90951b860c6 | /man/print_meansFull.Rd | 444f47b83beaf0fcb88e9690ab24c07e1d12915d | [
"MIT"
] | permissive | medpsytuebingen/paprHelper | f62a84d24c7e202d26b275aab746823abb0ad3f4 | d0ceff6591270df5cf843d0c9de59c732d1f07b6 | refs/heads/master | 2021-04-15T12:37:56.241459 | 2018-03-25T18:21:18 | 2018-03-25T18:21:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 655 | rd | print_meansFull.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compare_means.R
\name{compare_means}
\alias{compare_means}
\title{Combine two means in one sentence.}
\usage{
compare_means(means, group, compared_str = "vs.")
}
\arguments{
\item{means}{A numeric or character vector with means or means +- sem.}
\item{group}{A two-level factor identifying the means being compared.}
\item{compared_str}{String to be placed between the means. Default = "vs.".}
}
\value{
A character vector.
}
\description{
This will create a character vector with two means and a string
inbetween.
}
\examples{
\dontrun{
if(interactive()){
#EXAMPLE1
}
}
}
|
fc9c8fd13dd1715299a24bb4f1361afe88326072 | c31a43ade75c77f2d99c02faf798c3cc9f9cb9e3 | /scripts/project_true.r | f2deaca7efee0c0484234da7d0dce8863cd4fac1 | [] | no_license | kflagg/manuscript2 | 34c0edf1cef8102f34462552373b99e941d7ea0d | 795e7157a5c903d8fc3d90de50f19ed41b8693f6 | refs/heads/master | 2021-06-25T03:51:35.987808 | 2020-11-27T00:44:38 | 2020-11-27T00:44:38 | 165,315,151 | 1 | 0 | null | 2020-07-03T22:29:02 | 2019-01-11T21:51:44 | HTML | UTF-8 | R | false | false | 991 | r | project_true.r | # Load packages, create functions, create designs.
source('functions.r')
# Read the datasets.
rect_datasets <- readRDS('../data/rect_data.rds')
# Mesh nodes will be sorted by y.
lambda_grid <- as.data.frame(attr(rect_datasets$Data[[1]], 'Lambda')) %>%
arrange(y) %>%
as.matrix
lambda_mesh <- inla.mesh.create(
lattice = inla.mesh.lattice(
unique(lambda_grid[,'x']), unique(lambda_grid[,'y'])
), refine = FALSE, extend = FALSE
)
proj_gridtomesh <- inla.mesh.projector(lambda_mesh, rect_R_mesh$loc[,1:2])
invisible(clusterEvalQ(cl, library(dplyr)))
clusterExport(cl, c('rect_datasets', 'proj_gridtomesh'))
lambda_at_nodes <- parSapply(cl, seq_len(nrow(rect_datasets)), function(r){return(
inla.mesh.project(proj_gridtomesh,
rect_datasets$Data[[r]] %>%
attr('Lambda') %>%
as.data.frame %>%
arrange(y) %>%
`$`('value')
)
)})
colnames(lambda_at_nodes) <- rect_datasets$DataID
saveRDS(lambda_at_nodes, '../data/lambda_at_nodes.rds')
stopCluster(cl)
|
500ab61957feda7db996f096119f1496b386fc73 | 078bf836f420c94805ea22214f952752dca611c1 | /xship/server/tab-vesseldetails.R | b5f37bbc5a45c7917df99dd46e300805e7e4d393 | [] | no_license | nikhadharman/shiny | 20e8cdc3e4e6b6d7b463c7cd494a5f301e945906 | e461d48b7d5f3a1e350298468b103067947ddc70 | refs/heads/master | 2021-09-09T23:59:30.755964 | 2018-03-20T09:26:29 | 2018-03-20T09:26:29 | 111,196,872 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,931 | r | tab-vesseldetails.R |
#VESSEL DETAIL....
Vpq <- reactive({
y= VESSELDETAILS
if(is.null(y))
return(NULL)
validate(
need(try(input$Vessel),"Please Wait or Select the vessel")
)
ff=input$Vessel
y=suppressWarnings(subset(y,y$Vessel == ff))
})
output$vfleet <- renderUI({
r=DATA
s=input$selecttype
if(is.null(s))
return(NULL)
if(s=="Fleet Wise")
{
Fleet_List =suppressWarnings(unique(as.character(r[,1]), incomparables = FALSE))
selectInput("Fleet", label=h4(strong("Fleet")), choices = Fleet_List, selected = 1, multiple = FALSE, selectize = TRUE, width = "50%", size = NULL)
}else {return(NULL)}
})
output$selectUI <- renderUI({
r=DATA
s=input$selecttype
if(is.null(s))
return(NULL)
if(s=="Fleet Wise"){
r=subset(r,Fleet == input$Fleet)
Vessel_List = suppressWarnings(unique(as.character(r[,3]), incomparables = FALSE))
selectInput("Vessel", label=h4(strong("Vessel")), choices = Vessel_List, selected = "Strategic Alliance", multiple = FALSE, selectize = TRUE, width = "50%", size = NULL)
}
else{
Vessel_List = suppressWarnings( unique(as.character(r[,3]), incomparables = FALSE))
selectInput("Vessel", label=h4(strong("Vessel")), choices = Vessel_List, selected = "Strategic Alliance", multiple = FALSE, selectize = TRUE, width = "50%", size = NULL)
}
})
output$yob <- renderUI({
r=Vpq()
if(is.null(r))
return(NULL)
yob= suppressWarnings(unique(as.numeric(r[,8]), incomparables = FALSE))
numericInput("yob",label="YOB",value=yob)
})
output$loa <- renderUI({
r=Vpq()
if(is.null(r))
return(NULL)
loa=suppressWarnings(unique(as.numeric(r[,9]), incomparables = FALSE))
numericInput("loa",label="LOA (m)",value=loa)
})
output$b <- renderUI({
r=Vpq()
if(is.null(r))
return(NULL)
b=suppressWarnings(unique(as.numeric(r[,10]), incomparables = FALSE))
numericInput("b",label="Moulded Breadth (m)",value=b)
})
output$DIS <- renderUI({
r=Vpq()
if(is.null(r))
return(NULL)
DIS=suppressWarnings(unique(as.numeric(r[,11]), incomparables = FALSE))
numericInput("DIS",label="Displacement(Scantling/Design Draft) (T)",value=DIS)
})
output$Draft1UI <- renderUI({
r=Vpq()
if(is.null(r))
return(NULL)
draft1=suppressWarnings(unique(as.numeric(as.character(r[,4])), incomparables = FALSE))
numericInput("draft1",label="Ballast Draft (m)",value=draft1)
})
output$Draft2UI <- renderUI({
r=Vpq()
if(is.null(r))
return(NULL)
draft2=suppressWarnings(unique(as.numeric(r[,5]), incomparables = FALSE))
numericInput("draft2",label="Scantling/Design Draft (m)",value=draft2)
})
output$speed2UI <- renderUI({
r=Vpq()
if(is.null(r))
return(NULL)
speed2 = suppressWarnings(unique(as.numeric(r[,6]), incomparables = FALSE))
numericInput("speed2",label="Max Service Speed (Knots)",value=speed2)
})
output$MCRUI <- renderUI({
r=Vpq()
if(is.null(r))
return(NULL)
mcr = suppressWarnings(unique(as.numeric(r[,7]), incomparables = FALSE))
numericInput("MCR",label="Max Continuous Rating (kW)",value=mcr)
})
# image vessel ...
output$vesselimage=renderUI({
vessel=input$Vessel
if(is.null(vessel))
return(NULL)
vessel=str_replace_all(vessel, fixed(" "), "")
filename <- paste(vessel,".jpg",sep="")
validate(
need(try(filename),"NO IMAGE AVAILABLE .....")
)
s=tags$img(src = filename, width=1600, height=500)
})
#hydros data.......
Hydros<- reactive({
y=data.frame(read.csv("data/Hydros Data.csv"))
ff=input$Vessel
y=subset(y,y$Vessel == ff)
})
H=reactive({
inhydros =input$hydros
if (is.null(inhydros))
{y=Hydros()}
else{
hydros=read.csv(inhydros$datapath)}
})
output$Hplot=renderPlotly({
if (is.null(H()))
return(NULL)
p= plot_ly(data = H(), x= ~Draft, y= ~WSA,name="WSA",type = 'scatter',mode = 'lines+markers',line=list(color= "#CD3131") ,marker=list(color= "#CD3131") )%>%
layout(title="WSA vs DRAFT ",titlefont=c,
xaxis = list(title = "Draft (meter)", titlefont =f, tickfont =f,gridcolor = "#FFFFFF"),
yaxis = list(title = "Wetted Surface Area (Sq.meter)", titlefont =f, tickfont = f,gridcolor = "#ABB2B9"),
plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF"
)
})
output$Dplot=renderPlotly({
if (is.null(H()))
return(NULL)
plot_ly(data = H(), x= ~Draft, y= ~Displ,name="Displ",type = 'scatter',mode = 'lines+markers',line=list(color= "#74B49B") ,marker=list(color= "#74B49B") )%>%
layout(title="DISPLACEMENT Vs DRAFT",titlefont=c,
xaxis = list(title = "Draft (meter)", titlefont =f, tickfont =f,gridcolor = "#FFFFFF"),
yaxis = list(title = "DIisplacement (Tonne)", titlefont = f, tickfont = f,gridcolor = "#ABB2B9"),
plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF"
)
})
output$hydros= renderDataTable({
y=H()
y$Vessel = NULL
y$Fleet=NULL
datatable(y,class = 'cell-border stripe', rownames = FALSE,options = list(autoWidth = TRUE,searching = FALSE,paging = FALSE))%>%
formatStyle(names(y),color="#000")
})
output$N1=renderText({
paste("Draft coefficient(Draft & WSA Relation) n1 :",n1())
})
#shoptrial data .....................
shopdata = reactive ({
y=Shoptrial
ff=input$Vessel
y=subset(y,y$VESSEL.NAME==ff)
y$VESSEL.NAME = NULL
y$FLEET=NULL
y$CLASS=NULL
colnames(y)=c("ENGINE LOAD %","POWER(kW)","SFOC Measured(g/kW-Hr)","SFOC Corrected(g/kW-Hr)")
y
})
output$shoptable <- DT::renderDataTable({
y=shopdata()
datatable(y,class = 'cell-border stripe',options = list(autoWidth = TRUE,searching = FALSE,paging = FALSE),rownames = FALSE)%>%
formatStyle(names(y),color="#000")
})
testfit=reactive({
y=shopdata()
if (is.null(y))
return(NULL)
x1 = y[, 1]
y1 = y[, 3]
y2 = y[, 4]
fit2aa = lm(y1 ~ poly(x1, 2, raw = TRUE))
n1 = as.numeric(fit2aa$coefficients[3])
n2 = as.numeric(fit2aa$coefficients[2])
k = as.numeric(fit2aa$coefficients[1])
fit2bb = lm(y2 ~ poly(x1, 2, raw = TRUE))
n1b = as.numeric(fit2bb$coefficients[3])
n2b = as.numeric(fit2bb$coefficients[2])
kb = as.numeric(fit2bb$coefficients[1])
testx = seq(min(x1), max(x1), length.out = 30)
testy1 = testx ^ 2 * n1 + testx * n2 + k
testy2 = testx ^ 2 * n1b + testx * n2b + kb
test=data.frame(testx,testy1,testy2)
test
})
output$Shopplot=renderPlotly({
y=shopdata()
if (is.null(y))
return(NULL)
test=testfit()
p = plot_ly(
y,
x = ~y[, 1],
y = ~y[, 3],
name = "SFOC Measured", type = 'scatter',mode='markers',
marker = list(size = 8, color = "#74B49B"),
showlegend = T) %>%
add_trace(y,
x = ~y[, 1],
y = ~y[, 4],
name = "SFOC Measured", type = 'scatter',mode='markers',
marker = list(size = 8, color = "#CD3131"),
showlegend = T)%>%
add_trace(test,x= test$testx,y = test$testy1, type = 'scatter',mode='lines+markers',
marker=list(opacity=0,color = "#74B49B"), line = list(shape = "spline",color = "#74B49B"),showlegend = FALSE)%>%
add_trace(test,x= test$testx,y = test$testy2, type = 'scatter',mode='lines+markers',
marker=list(opacity=0,color = "#CD3131"), line = list(shape = "spline",color = "#CD3131"),showlegend = FALSE)
p = p%>% layout(title="LOAD(%) Vs SFOC",titlefont=c,
xaxis = list(title = "Load(%)", titlefont =f, tickfont =f,gridcolor = "#FFFFFF"),
yaxis = list(title = "SFOC(g/kW-Hr)", titlefont = f, tickfont =f,gridcolor = "#ABB2B9"),
plot_bgcolor = "#FFFFFF",
paper_bgcolor = "#FFFFFF",legend = l
)
p
})
#Sea trial Data............................................................
seatrialdb = reactive({
y=seatrialdata
y=subset(y,y$Vessel==input$Vessel)
y$Vessel=NULL
y$Fleet= NULL
y$Class =NULL
y
})
output$seatrialtable<- DT::renderDataTable({
y=seatrialdb()
colnames(y)=c("Speed (kn)","Power (kW)")
datatable(y,options = list(autoWidth = TRUE,searching = FALSE,paging = FALSE),rownames = FALSE)
})
output$seatrialplot=renderPlotly({
plot_ly(data = seatrialdb(), x= ~Speed, y= ~Sea.Trial.Power,name="Ballast Draft",type = 'scatter',mode = 'lines+markers',line = list(shape = "spline") )%>%
layout(title="Sea Trial Curve",titlefont=s,
xaxis = list(title = "Speed (knots)", titlefont =s, tickfont =s,gridcolor = "white"),
yaxis = list(title = "Power (kW)", titlefont = s, tickfont = s,gridcolor="#ABB2B9"),
plot_bgcolor = "white",
paper_bgcolor = "white",legend = l
)
})
|
8a190a005ff78273c704dbcf086cb85b2111a37e | 75f69ae4eb0fc37bc2fde2d606a1cee493867b2d | /man/find_vignettes.Rd | d95c2f23fac19eb67a6f1c65b5511ca1a1c9ab2a | [] | no_license | cran/docreview | 8b025ce045ce58900949cb99c0b6e67cde5dbf00 | e8f986d62977c86cde0136b3f07b4f2ff3a244fb | refs/heads/master | 2023-07-08T16:50:53.256805 | 2021-08-17T06:20:11 | 2021-08-17T06:20:11 | 397,309,259 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 300 | rd | find_vignettes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/find.R
\name{find_vignettes}
\alias{find_vignettes}
\title{Find vignettes}
\usage{
find_vignettes(path = ".")
}
\arguments{
\item{path}{Path to package}
}
\description{
Find all vignettes in a package
}
\keyword{internal}
|
428f645a35cd3f3a7216a00429a00544ede4b3f5 | 0e398d85e9d2612e56eeac1c98759e4131482016 | /man/hbridge.Rd | f7a6e1f5fffeeacf533c76b6e608fb4be1ef92f7 | [] | no_license | cran/PCL | da79d63a76c0a082991f42cb5fe9f0c5ef77007a | 36f7daf23d3a2ea51eabdba9852cc5d87afd3462 | refs/heads/master | 2023-03-31T21:04:08.441908 | 2021-04-10T06:50:10 | 2021-04-10T06:50:10 | 356,625,596 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 462 | rd | hbridge.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/proxicausal.R
\name{hbridge}
\alias{hbridge}
\title{This function is to compute estimating equation of outcome-inducing confounding bridge function}
\usage{
hbridge(para, Y, W, Z)
}
\value{
returns the sample level estimating equations for q function
}
\description{
This function is to compute estimating equation of outcome-inducing confounding bridge function
}
\keyword{internal}
|
59499fbab1898af9c21095d614a4d8504b89f443 | 3aba228c59ecaad560dfd8cc6bf4a711836b3fc1 | /man/plotTimeseries.Rd | c00ff3b766c312f3057f342b1b3ee597bbb2c7da | [] | no_license | ahonkela/tigre | bd4d063fe9be375f1c61b76682f601f8226c38de | ccbd773c4f0eb2d1673f36b1255475bb5c22e431 | refs/heads/master | 2021-08-28T12:52:05.965841 | 2021-08-04T08:19:30 | 2021-08-04T08:19:30 | 244,204,913 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 978 | rd | plotTimeseries.Rd | \name{plotTimeseries}
\Rdversion{1.0}
\alias{plotTimeseries}
\title{Plot ExpressionTimeSeries data}
\description{
Plots ExpressionTimeSeries data.
}
\usage{
plotTimeseries(data, nameMapping = NULL)
}
\arguments{
\item{data}{An ExpressionTimeSeries object.}
\item{nameMapping}{The annotation used for mapping the names of the
genes for the figures. By default, the SYMBOL annotation for the
array is used, if available.}
}
\details{
The function plots the expression levels from an ExpressionTimeSeries
object and the associated standard deviations. If the object
includes multiple time series, they will be plotted in the same
figure, but slightly shifted.
}
\author{Antti Honkela}
\seealso{
\code{\link{processData}}.
}
\examples{
# Load a mmgmos preprocessed fragment of the Drosophila developmental
# time series
data(drosophila_gpsim_fragment)
# Plot the first two genes
plotTimeseries(drosophila_gpsim_fragment[1:2,])
}
\keyword{model}
|
3da4c258b0825215c8411676c438a3c203984142 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/scriptuRs/examples/kjv_bible.Rd.R | a686a9630d079de953b775d3046c4423fffa260b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 257 | r | kjv_bible.Rd.R | library(scriptuRs)
### Name: kjv_bible
### Title: Tidy data frame of the King James Version of the Bible
### Aliases: kjv_bible
### ** Examples
library(dplyr)
kjv_bible() %>%
group_by(volume_title, book_title) %>%
summarise(total_verses = n())
|
b7cb5de2318b7fda5f47240329877a3db80a4d2f | 165e1fae27618c2000fb5e2620408fe3f37430f3 | /R/sparta_wrapper_functions.R | 9344a3bd5d489ac7d769d9d420f1442e6bfc853b | [] | no_license | syhof/sMon-Amphibia | 59eb94be728ffe2f4b9df3da69d8c977ebedf2f3 | 8396ef326b4523453a93882c273c6e99c5676c4d | refs/heads/master | 2021-05-10T08:28:06.729945 | 2018-01-24T13:17:20 | 2018-01-24T13:17:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,816 | r | sparta_wrapper_functions.R | # calculate the linear trends
#@ models - a list of all the sparta models for each species
calculateTrends<-function(models){
#get trends for each model
trends <- lapply(models, occurrenceChange, firstYear=min(df$Year), lastYear=max(df$Year))
names(trends) <- gsub(sp_mods, pa="\\.rdata", repl="")
#convert into a data frame
outputs <- data.frame(
mean.trend = sapply(trends, function(x) x$mean),
CI.lower = sapply(trends, function(x) x$CIs[1]),
CI.upper = sapply(trends, function(x) x$CIs[2]))
#return it
return(outputs)
}
#get annual predictions for each species
#@ models - a list of all the sparta models for each species
annualPredictions <- function(models){
library(plyr)
ldply(models,function(x){
#get annual predictions
temp <- data.frame(summary(x))
temp$Year<-as.numeric(row.names(summary(x)))
temp$Species <- x$SPP_NAME
#get RHat values
bugsOutput <- x$BUGSoutput$summary
bugsOutput <- data.frame(bugsOutput[grepl("psi.fs",row.names(bugsOutput)),])
temp$Rhat <- as.numeric(bugsOutput$Rhat)
return(temp)
})
}
#plot these predictions (restrict to species with more than 50 observations)
#@ myAnnualPredictions - the annual predictions returned by the above function
#@ rawData - the original data file of species occurrence records
plotPredictions <- function(myAnnualPredictions,rawData){
require(ggplot2)
ggplot(data=subset(myAnnualPredictions,Species %in% names(table(rawData$Species))[table(rawData$Species)>50])) +
geom_line(aes(x = Year, mean))+
geom_point(aes(x = Year, mean,colour = factor(Rhat<1.1)))+
geom_ribbon(aes(x=Year, ymin = quant_025, ymax = quant_975), alpha=0.50)+
theme_bw() +
scale_x_continuous(labels=c(1990,1995,2000,2005,2010))+
facet_wrap( ~ Species) +
theme(legend.position = "none")
} |
062cc48dc7fc5a8b30f06414596e25ec239a114e | 4520e57b8718ff8815de7c3ecbd0a3536d89ebe8 | /PH125_9_movielens_capstone_script_mdt.R | 7326ee1f712a61f7cca2f3b9c7cac51f3259d389 | [] | no_license | mdt-ds/PH125_9x_MovieLens | 306d404daacaf6bec36cb3e3168573bccf403555 | cd288f15054a79cadc549c66e97591e1690cb6db | refs/heads/master | 2020-05-18T18:35:06.363292 | 2019-05-12T12:54:44 | 2019-05-12T12:54:44 | 184,590,454 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,165 | r | PH125_9_movielens_capstone_script_mdt.R | # Script Header ----
# File-Name: PH125_9_movielens_capstone_script_mdt.R
# Date: May 10, 2019
# Author: Mario De Toma <mdt.datascience@gmail.com>
# Purpose: R script for submission of PH125_9 movielens capstone project for
# HarvardX Data Science Professional Certificate
# Data used: MovieLens 10M dataset
# Packages used: dplyr, tidyr, ggplot2, softImpute
# This program is believed to be free of errors, but it comes with no guarantee!
# The user bears all responsibility for interpreting the results.
# All source code is copyright (c) 2019, under the Simplified BSD License.
# For more information on FreeBSD see: http://www.opensource.org/licenses/bsd-license.php
# All images and materials produced by this code are licensed under the Creative Commons
# Attribution-Share Alike 3.0 United States License: http://creativecommons.org/licenses/by-sa/3.0/us/
# All rights reserved.
#############################################################################################
# session init ----
rm(list=ls())
graphics.off()
#setwd("working directory path")
# load and partition data script provided by HarvardX ----
# Note: this process could take a couple of minutes
if(!require(tidyverse)) {
install.packages("tidyverse", repos = "http://cran.us.r-project.org")
library(tidyverse)
}
if(!require(caret)) {
install.packages("caret", repos = "http://cran.us.r-project.org")
library(caret)
}
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- read.table(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1)
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
# exploratory data analysis ----
# rating overall distribution
edx %>% group_by(rating) %>% summarise(prop = n()/nrow(edx)) %>%
ggplot(aes(rating, prop)) + geom_col()
# rating by movie year
edx %>%
mutate(movie_year = as.numeric(str_sub(str_extract(title, '[0-9]{4}\\)$'), 1, 4))) %>%
group_by(movie_year) %>%
summarise(mean_rating = mean(rating)) %>%
ggplot(mapping = aes(x = movie_year, y = mean_rating)) +
geom_point() + geom_line() +
theme(axis.text.x = element_text(angle = 90))
# rating by year_rated
library(lubridate)
edx %>%
mutate(year_rated = year(as_datetime(timestamp))) %>%
group_by(year_rated) %>%
summarise(mean_rating = mean(rating)) %>%
ggplot(mapping = aes(x = year_rated, y = mean_rating)) +
geom_point() + geom_line() +
theme(axis.text.x = element_text(angle = 90))
# x.5 rating by year rated
edx %>% filter(rating %in% c(0.5, 1.5, 2.5, 3.5, 4.5)) %>%
mutate(year_rated = year(as_datetime(timestamp))) %>%
group_by(year_rated) %>%
summarise(half_rated = n()) %>%
arrange(half_rated)
# modeling
# main + group level efect model ----
# evaluation of Reccomender through RMSE
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
# modeling effects ----
mu <- mean(edx$rating)
movie_effect <- edx %>%
group_by(movieId) %>%
summarize(b_movie = mean(rating - mu))
user_effect <- edx %>%
left_join(movie_effect, by = 'movieId') %>%
group_by(userId) %>%
summarize(b_user = mean(rating - mu - b_movie))
year_effect <- edx %>%
mutate(year_movie = as.numeric(str_sub(str_extract(title, '[0-9]{4}\\)$'), 1, 4))) %>%
left_join(movie_effect, by = 'movieId') %>%
left_join(user_effect, by = 'userId') %>%
group_by(year_movie) %>%
summarise(b_year = mean(rating - mu - b_movie - b_user))
# recommender prediction
predicted_real_ratings <- validation %>%
mutate(year_movie = as.numeric(str_sub(str_extract(title, '[0-9]{4}\\)$'), 1, 4))) %>%
left_join(movie_effect, by = 'movieId') %>%
left_join(user_effect, by = 'userId') %>%
left_join(year_effect, by = 'year_movie') %>%
mutate(predicted_real = mu + b_movie + b_user + b_year) %>%
.$predicted_real
# evaluating root mean squared error
rmse_0 <- RMSE(true_ratings = validation$rating, predicted_ratings = predicted_real_ratings)
rmse_results <- tibble(method = 'movie + user + movie_year effects', RMSE = rmse_0)
rmse_results %>% knitr::kable()
# regularization of main + group level model ----
gc()
# search for best lambda
lambdas <- seq(0,10, 0.5)
lambda_df <- map_df(lambdas, function(l) {
mu <- mean(edx$rating)
movie_effect <- edx %>%
group_by(movieId) %>%
summarize(b_movie = sum(rating - mu)/(n() + l))
user_effect <- edx %>%
left_join(movie_effect, by = 'movieId') %>%
group_by(userId) %>%
summarize(b_user = sum(rating - mu - b_movie)/(n() + l))
year_effect <- edx %>%
mutate(year_movie = as.numeric(str_sub(str_extract(title, '[0-9]{4}\\)$'), 1, 4))) %>%
left_join(movie_effect, by = 'movieId') %>%
left_join(user_effect, by = 'userId') %>%
group_by(year_movie) %>%
summarise(b_year = sum(rating - mu - b_movie - b_user)/(n() + l))
predicted_real_ratings <- validation %>%
mutate(year_movie = as.numeric(str_sub(str_extract(title, '[0-9]{4}\\)$'), 1, 4))) %>%
left_join(movie_effect, by = 'movieId') %>%
left_join(user_effect, by = 'userId') %>%
left_join(year_effect, by = 'year_movie') %>%
mutate(predicted_real = mu + b_movie + b_user + b_year) %>%
.$predicted_real
rmse <- RMSE(true_ratings = validation$rating, predicted_ratings = predicted_real_ratings)
tibble(lambda =l, rmse = rmse)
})
lambda_best <- lambda_df$lambda[which.min(lambda_df$rmse)]
# best lambda regularized model prediction
l <- lambda_best
mu <- mean(edx$rating)
movie_effect <- edx %>%
group_by(movieId) %>%
summarize(b_movie = sum(rating - mu)/(n() + l))
user_effect <- edx %>%
left_join(movie_effect, by = 'movieId') %>%
group_by(userId) %>%
summarize(b_user = sum(rating - mu - b_movie)/(n() + l))
year_effect <- edx %>%
mutate(year_movie = as.numeric(str_sub(str_extract(title, '[0-9]{4}\\)$'), 1, 4))) %>%
left_join(movie_effect, by = 'movieId') %>%
left_join(user_effect, by = 'userId') %>%
group_by(year_movie) %>%
summarise(b_year = sum(rating - mu - b_movie - b_user)/(n() + l))
predicted_real_ratings <- validation %>%
mutate(year_movie = as.numeric(str_sub(str_extract(title, '[0-9]{4}\\)$'), 1, 4))) %>%
left_join(movie_effect, by = 'movieId') %>%
left_join(user_effect, by = 'userId') %>%
left_join(year_effect, by = 'year_movie') %>%
mutate(predicted_real = mu + b_movie + b_user + b_year) %>%
.$predicted_real
# evaluating root mean squared rror
rmse_1 <- RMSE(true_ratings = validation$rating, predicted_ratings = predicted_real_ratings)
rmse_results <- bind_rows(rmse_results,
tibble(method = 'movie + user + movie_year effects regularized', RMSE = rmse_1))
rmse_results %>% knitr::kable()
# #uncomment if you can compute this part
# #at least 32 GB RAM needed
#
# # adding latent factor contribution to the model ----
# gc()
# # user movie rating matrix
# user_movie <- edx %>% select(userId, movieId, rating) %>%
# spread(key = movieId, value = rating)
# userId_vec <- user_movie$userId
# user_movie <- user_movie %>% select(-userId)
# movieId_vec <- as.numeric(colnames(user_movie))
# um_matrix <- as.matrix(user_movie)
#
# # als algo for matrix factorization
# if(!require(softImpute)) {
# install.packages("softImpute", repos = "http://cran.us.r-project.org")
# library(softImpute)
# }
# um_matrix_sparse <- as(um_matrix, 'Incomplete')
# rm(um_matrix, user_movie); gc()
# um_matrix_sparse_centered <- biScale(um_matrix_sparse,
# col.scale=FALSE, row.scale=FALSE,
# maxit = 50, thresh = 1e-05, trace=TRUE)
# rating_fits <- softImpute(um_matrix_sparse_centered, type = "als",
# rank.max = 51, lambda = 96,
# trace=TRUE)
# rating_fits$d
#
# idx_user <- numeric(length = nrow(validation))
# idx_movie <- numeric(length = nrow(validation))
#
# for (it in 1:nrow(validation)) {
# idx_user[it] <- which(userId_vec == validation$userId[it])
# idx_movie[it] <- which(movieId_vec == validation$movieId[it])
# }
#
# latent_factor_effect <- numeric(length = length(idx_user))
# latent_factor_effect <- impute(object = rating_fits,
# i = idx_user , j = idx_movie,
# unscale = FALSE)
#
# # predict real ratings adding latent factor effect
# predicted_real_ratings_mf <- predicted_real_ratings + latent_factor_effect
#
#
# # evaluating root mean squared error
# rmse_2 <- RMSE(true_ratings = validation$rating, predicted_ratings = predicted_real_ratings_mf)
# rmse_results <- bind_rows(rmse_results, tibble(method = 'matrix factorization',
# RMSE = rmse_2))
#
# rmse_results %>% knitr::kable()
# end of script ######################################################################################## |
377b304d681d4b6d13f3d78f8303dfbcbc6bb13d | 7659496f0f1a5e8632dd853c10480874bab7dab9 | /R/CNVtest.R | 630bc70f76e16cbb892b7a0061edc26a5927ff21 | [] | no_license | isglobal-brge/CNVassoc | 77db8ae797b6e7b26d51aecee6ef1ae22294d44c | fc6806fb9bf583cf5321c3efd1ed6df92e290c89 | refs/heads/master | 2021-01-22T05:47:14.323741 | 2019-04-09T12:59:39 | 2019-04-09T12:59:39 | 92,494,611 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,557 | r | CNVtest.R | CNVtest <- function(x, type = "Wald") {
nCov <- attr(x, "nCov")
F <- qr.solve(-x$hessian)
type.test <- charmatch(type, c("Wald",
"LRT"))
cc <- NCOL(x$coefficients)
if (type.test == 1) {
if (attr(x, "model") == 1) {
K <- diag(1, cc)[-cc, ] - diag(1,
cc)[-1, ]
beta <- x$coefficients[1, ]
Var <- F[seq_len(cc), seq_len(cc)]
stat <- as.double(t(K %*% beta) %*%
qr.solve(K %*% Var %*% t(K)) %*%
(K %*% beta))
df <- nrow(K)
} else {
beta <- x$coefficients[2, 1]
se <- sqrt(F[2, 2])
stat <- (beta/se)^2
df <- 1
}
} else {
formula <- x$formula
formula.null <- eval(parse(text = paste("update(formula,.~.-",
x$CNVname, ")", sep = "")))
family <- attr(x, "family")
if (family != "weibull")
model.null <- glm(formula.null, data = x$data, family = family)
else model.null <- survreg(formula.null,
data = x$data)
stat <- 2 * (logLik(x)[1] - logLik(model.null)[1])
df <- if (attr(x, "model") == 1)
cc - 1 else 1
}
pvalue <- pchisq(stat, df, lower.tail = FALSE)
out <- list(type = type.test, stat = stat,
df = df, pvalue = pvalue)
class(out) <- "CNVtest"
out
}
|
adc0973cfd6d14fb7226239d55b964f008ee2350 | 55f5ecbb7dba1647e295b1f4fb88fec2c152a93b | /man/importData.Rd | 46469091c1733858ed86f3a5fdc4f9610da08c1f | [
"MIT"
] | permissive | ardata-fr/shinytools | b0fa5f54296df731b0e40a9c011dc166c6a9f5d7 | b88457cd7e76ec8fb3c1565b2abe15619f9a70a6 | refs/heads/master | 2020-05-05T02:36:32.089501 | 2019-11-12T21:23:48 | 2019-11-12T21:23:48 | 179,644,269 | 16 | 4 | NOASSERTION | 2019-04-11T10:08:23 | 2019-04-05T08:22:55 | R | UTF-8 | R | false | true | 3,031 | rd | importData.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/importData.R
\name{importData}
\alias{importData}
\alias{importDataUI}
\alias{importDataServer}
\title{shiny UI to import data}
\usage{
importDataUI(id)
importDataServer(input, output, session,
forbidden_labels = reactive(NULL), default_tofact = FALSE,
ui_element = "actionLink", ui_label = "Import",
ui_icon = icon("upload"), labelize = FALSE)
}
\arguments{
\item{id}{namespace identifier for the module}
\item{input, output, session}{mandatory arguments for modules to be valid. These
should not to be defined as they will be handled by shiny.}
\item{forbidden_labels}{Optional, reactive value, forbidden labels as a character vector}
\item{default_tofact}{If default convert characters to factors. Default FALSE.}
\item{ui_element}{UI element to show, either "actionButton", or "actionLink". Default "actionLink".}
\item{ui_label}{Label of ui element. Default to "import".}
\item{ui_icon}{Icon of ui element. Default to icon("upload").}
\item{labelize}{if TRUE a label is required to import the data}
}
\description{
A module to enable data importation
in shiny applications, by clicking on a button or link action,
man can open a modal window to let import dataset in shiny application.
The module support CSV, Excel and SAS datasets.
}
\examples{
library(shinytools)
library(DT)
library(shiny)
if (interactive()) {
options(device.ask.default = FALSE)
ui <- fluidPage(
load_tingle(),
importDataUI(id = "id1"),
uiOutput("ui_SI_labels"),
DT::dataTableOutput(outputId = "id2")
)
server <- function(input, output) {
dataset <- callModule(
module = importDataServer,
id = "id1", ui_element = "actionButton",
labelize = FALSE)
output$id2 <- DT::renderDataTable({
req(dataset$trigger > 0)
dataset$object
})
}
print(shinyApp(ui, server))
}
if (interactive()) {
options(device.ask.default = FALSE)
ui <- fluidPage(
titlePanel("Import and visualize dataset"),
sidebarLayout(
sidebarPanel(
load_tingle(),
importDataUI(id = "id1"),
uiOutput("ui_SI_labels")
),
mainPanel(
DT::dataTableOutput(outputId = "id2")
)
)
)
server <- function(input, output) {
all_datasets <- reactiveValues()
datasets <- callModule(
module = importDataServer,
id = "id1", ui_element = "actionButton",
labelize = TRUE,
forbidden_labels = reactive(names(reactiveValuesToList(all_datasets))))
observeEvent(datasets$trigger, {
req(datasets$trigger > 0)
all_datasets[[datasets$name]] <- datasets$object
})
output$ui_SI_labels <- renderUI({
x <- reactiveValuesToList(all_datasets)
if (length(x) > 0) {
selectInput("SI_labels", label = "Choose dataset", choices = names(x))
}
})
output$id2 <- DT::renderDataTable({
req(input$SI_labels)
all_datasets[[input$SI_labels]]
})
}
print(shinyApp(ui, server))
}
}
|
c13d6abc4c727b427b4d041b34481370108c0f0e | 35e7853d8d521ce6d6b0561e395bc0ffb7c4a763 | /RCode/ShinyCode/Keywords/server.R | 522071afac420cd16e224b3ab864ed5bf3d53437 | [] | no_license | ingoldji/GroupProgrammingProject | de2907eca1e24684523594754f23cca5ec3a5f24 | 457bb4c09e4f550f8a8e1849628f66930757c189 | refs/heads/master | 2021-01-13T16:11:22.308367 | 2014-12-09T19:07:43 | 2014-12-09T19:07:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,956 | r | server.R | setwd("E:/R Shiny/KeyWords")
library(shiny)
library(ggplot2)
library(wordcloud)
library(RColorBrewer)
#Package: RColorBrewer is used to generate colors used in the plot
load("KeywordData.RData")
#server.r
shinyServer(
function(input, output) {
output$FinalPlot <- renderPlot({
Data <- switch(input$content,
"Software Skills"=KeywordData[[1]],
"Education Requirement"=KeywordData[[2]],
"Job Titles"=KeywordData[[3]])
data_to_plot <- Data[input$city,]
if (input$content=="Software Skills")
{counts <- data.frame(sort(apply(data_to_plot,2,sum)))
skills <- rownames(counts)
df <- data.frame(skills,counts)
colnames(df) <- c("skills","counts")
rownames(df) <- NULL
pal <- brewer.pal(9, "Set1")
wordcloud(df$skills,df$counts,scale=c(5,0.2),rot.per=.15,colors=pal,random.order=FALSE,max.words=Inf)
title(main = "Word Cloud of Software Skills Requirement",font.main= 1.2)}
else {
if (input$content=="Education Requirement")
{counts <- data.frame(apply(data_to_plot,2,sum))
education <- rownames(counts)
df <- data.frame(education,counts)
colnames(df) <- c("education","counts")
rownames(df) <- NULL
df['percent'] <- round(counts/sum(counts)*100,2)
for (i in 1:dim(df['percent'])[1])
{df['percent'][i,1] <- paste(df['percent'][i,1], "%", sep="")}
ggplot(df, aes(x="", y = counts, fill = education)) +
geom_bar(width = 1,stat="identity") +
coord_polar(theta = "y") +
geom_text(aes(x= rep(1.2,3),y =counts/3 + c(0, cumsum(counts)[-length(counts)]) ,label=percent),size=5,angle = 0) +
scale_fill_brewer() +
xlab(" ") +
ylab("Percent") +
ggtitle("Pie Chart of Education Requirement")}
else {
if (input$content=="Job Titles")
{counts <- data.frame(sort(apply(data_to_plot,2,sum)))
title <- factor(rownames(counts),levels=rownames(counts))
df <- data.frame(title,counts)
colnames(df) <- c("title","counts")
rownames(df) <- NULL
df['percent'] <- round(counts/sum(counts)*100,2)
for (i in 1:dim(df['percent'])[1])
{df['percent'][i,1] <- paste(df['percent'][i,1], "%", sep="") }
ggplot(df, aes( x= title, y = counts, fill=title)) +
geom_bar(stat = "identity") +
xlab("Job Title") +
ylab("Counts") +
ggtitle("Bar Plot of Job Titles")}
}}
})
})
|
222c723ef30d6a65c13d0ed94f2a3d2cd2613ccb | 5e88cabd66814e2edc394548f6c7d76c6511b41e | /tests/testthat/test-helper.R | 4d2ceb09d9e77d6347ffd815500dc1c817fdeb27 | [
"MIT"
] | permissive | EarthSystemDiagnostics/paleospec | ba7125c62946eba4302e1aaf20e1f7170262809d | bf2086b9d4adb5c657af3863d15745a730f9b146 | refs/heads/master | 2023-09-01T07:23:35.955702 | 2023-06-18T15:18:16 | 2023-06-18T15:18:16 | 223,199,924 | 0 | 0 | NOASSERTION | 2023-06-18T15:18:18 | 2019-11-21T15:02:33 | R | UTF-8 | R | false | false | 3,695 | r | test-helper.R | context("Helper functions")
test_that("frequency removal works.", {
spec1 <- list(freq = 1 : 10, spec = rep(5, 10), dof = rep(1, 10))
spec2 <- list(freq = 1 : 10, spec = rep(5, 10), dof = rep(1, 10),
lim.1 = rep(6, 10), lim.2 = rep(4, 10))
actual1 <- remove.lowestFreq(spec1, iRemove = 0)
actual2 <- remove.lowestFreq(spec1, iRemove = 3)
actual3 <- remove.lowestFreq(spec2, iRemove = 3)
expect_equal(actual1, spec1)
expect_equal(actual2, lapply(spec1, function(x) {x[-(1 : 3)]}))
expect_equal(actual3, lapply(spec2, function(x) {x[-(1 : 3)]}))
actual1 <- remove.highestFreq(spec1, iRemove = 0)
actual2 <- remove.highestFreq(spec1, iRemove = 5)
actual3 <- remove.highestFreq(spec2, iRemove = 5)
expect_equal(actual1, spec1)
expect_equal(actual2, lapply(spec1, function(x) {x[1 : 5]}))
expect_equal(actual3, lapply(spec2, function(x) {x[1 : 5]}))
})
test_that("limit check works.", {
spec <- list(freq = 1, spec = 5, dof = 1)
expect_false(has.limits(spec))
spec <- list(freq = 1, spec = 5, dof = 1, lim.1 = 6)
expect_false(has.limits(spec))
spec <- list(freq = 1, spec = 5, dof = 1, lim.1 = 6, lim.2 = 4)
expect_true(has.limits(spec))
})
test_that("object check works.", {
spec <- "foo"
expect_false(is.spectrum(spec, check.only = TRUE))
expect_error(is.spectrum(spec),
"Passed argument is not a spectral list object.")
spec <- list()
expect_false(is.spectrum(spec, check.only = TRUE))
expect_error(is.spectrum(spec),
"Passed object has no frequency vector.")
spec <- list(freq = 1)
expect_false(is.spectrum(spec, check.only = TRUE))
expect_error(is.spectrum(spec),
"Passed object has no spectral density vector.")
spec <- list(spec = 1)
expect_false(is.spectrum(spec, check.only = TRUE))
expect_error(is.spectrum(spec),
"Passed object has no frequency vector.")
spec <- list(dof = 1)
expect_false(is.spectrum(spec, check.only = TRUE))
expect_error(is.spectrum(spec),
"Passed object has no frequency vector.")
spec <- list(freq = 1, spec = 1)
expect_false(is.spectrum(spec, check.only = TRUE))
expect_error(is.spectrum(spec),
"Passed object has no dof vector.")
expect_true(is.spectrum(spec, check.only = TRUE, dof = FALSE))
expect_error(is.spectrum(spec, dof = FALSE), NA)
spec <- list(freq = 1, dof = 1)
expect_false(is.spectrum(spec, check.only = TRUE))
expect_error(is.spectrum(spec),
"Passed object has no spectral density vector.")
spec <- list(spec = 1, dof = 1)
expect_false(is.spectrum(spec, check.only = TRUE))
expect_error(is.spectrum(spec),
"Passed object has no frequency vector.")
spec <- list(freq = 1 : 2, spec = 1, dof = 1)
expect_false(is.spectrum(spec, check.only = TRUE))
expect_error(is.spectrum(spec),
"Frequency, PSD and DOF vectors have different lengths.")
spec <- list(freq = 1, spec = 1, dof = 1)
expect_true(is.spectrum(spec, check.only = TRUE))
expect_error(is.spectrum(spec), NA)
})
test_that("small functions works.", {
f <- c(0.01, 0.05, 0.1, 0.2, 0.4, 0.5)
s <- c(NA, 2, NA, 4, 5, NA)
d <- seq(10, -1, length.out = length(f))
expect_equal(get.df(list(freq = f)), mean(diff(f)))
expect_equal(get.fend.existing(list(freq = f, spec = s)), f[5])
expect_equal(get.fstart.existing(list(freq = f, spec = s)), f[2])
expect_equal(get.length(list(freq = f, spec = s)), length(f))
expect_equal(get.freq(list(freq = f, spec = s, dof = d)), f)
expect_equal(get.spec(list(freq = f, spec = s, dof = d)), s)
expect_equal(get.dofs(list(freq = f, spec = s, dof = d)), d)
})
|
d9eefe6ffc9c83302383b466b5518c31ef91fda3 | b1eeeee8330b8fac5f4b21217bfa1a7779cf8763 | /Analysis/AKI_7Day/Other plots/time_series_plot_HR.R | 90234a30bc30d13deedfda3c3301bb556395c7cb | [] | no_license | lasiadhi/Predictive-Models-for-Acute-Kidney-Injury | 4f1e38e7f2f52c4966c4488593859b707ac02c75 | f7ffd9a5b9263371e7d3ad15e680a2191e814c31 | refs/heads/master | 2021-06-25T21:26:44.220344 | 2020-11-11T03:02:55 | 2020-11-11T03:02:55 | 164,008,221 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,521 | r | time_series_plot_HR.R | # Lasith Adhikari
# Plotting time series data - HR
# Library
library(dygraphs)
library(xts)
library(mfp) ## Load mfp for automated fractional polynomials
library(Rmisc)
library(dplyr)
library(tseriesChaos)
library(scatterplot3d)
#######################################################
setwd("/run/user/2209058/gvfs/smb-share:server=ahcdfs.ahc.ufl.edu,share=files/dom/SHARE/2016_223 IDEALIST/ANALYTIC CORE/MySurgeryRisk PostOP V1.0/3 Users/Lasith")
# read data
time_series_data <- read.csv("Time_series/Clean_data/IntraOp_clean_data/HR_onlyIntraOp.csv",header = TRUE) # data
# filter and preprocess data
time_series_data$time_stamp <- as.POSIXct(time_series_data$time_stamp, format="%Y-%m-%d %H:%M:%S")
accts <- read.csv("Model/Data/aki7_drop_esrd1_patients/aki7Day_y_train.csv",header = TRUE) # accts
# filter accounts based on the outcome
accts_AKI <- accts[accts$aki7day==1,]
accts_noAKI <- accts[accts$aki7day==0,]
max_time <- 200
max_obser <- 200
make_plot <- function(time_in_mins, data, myflag, mycolor){
#axis.POSIXct(1, at=seq(from=round(time_series_data$time_stamp[1],"hour"), to=tail(time_series_data$time_stamp,1), by="3 hours"), format="%H:%M")
#matplot(time_series_data$time_stamp, time_series_data$obser, type="l", lty=1, col=rgb(0,0,0,0.1), xlab="Time (mins)", ylab="Observation")
if (myflag){
matplot(time_in_mins, data, type="l", lty=1, col=mycolor, xlab="Time (mins)", ylab="Heart Rate (bpm)", xlim = c(0, max_time), ylim = c(0,max_obser))
}
matlines(time_in_mins, data, type="l", lty=1, col=mycolor, xlab="Time (mins)", ylab="Heart Rate (bpm)", xlim = c(0, max_time), ylim = c(0,max_obser))
}
#svg('HR_ts_for100patients_AKI7_ci.svg')
############################ for no AKI patients
# dataframe to hold all data from no AKI patients
df_noAKI <- data.frame()
#df_noAKI['time_x'] <- as.numeric()
#df_noAKI['value_y'] <- as.numeric()
iter = 0
myflag = TRUE
for (acc_i in accts_noAKI$acc){
time_series_data_i <- time_series_data[which(time_series_data$acc == acc_i),]
if(dim(time_series_data_i)[1] > 400){
time_series_data_i$time_stamp <- (time_series_data_i$time_stamp - time_series_data_i$time_stamp[1])/60
mycolor = rgb(0,1,0,0.05)
make_plot(time_series_data_i$time_stamp, time_series_data_i$obser, myflag, mycolor)
df_noAKI <- rbind(df_noAKI, time_series_data_i)
myflag = FALSE
iter = iter + 1
}
if (iter == 100){
break
}
}
#################### for AKI patients
df_AKI <- data.frame()
iter = 0
for (acc_i in accts_AKI$acc){
time_series_data_i <- time_series_data[which(time_series_data$acc == acc_i),]
if(dim(time_series_data_i)[1] > 400){
time_series_data_i$time_stamp <- (time_series_data_i$time_stamp - time_series_data_i$time_stamp[1])/60
mycolor = rgb(1,0,0,0.03)
make_plot(time_series_data_i$time_stamp, time_series_data_i$obser, myflag, mycolor)
df_AKI <- rbind(df_AKI, time_series_data_i)
iter = iter + 1
}
if (iter == 100){
break
}
}
############## mean+CI for No AKI ####################
df_noAKI$time_stamp <- round(as.numeric(df_noAKI$time_stamp),1)
df_noAKI$obser <- as.numeric(as.character(df_noAKI$obser))
#x <- CI(as.numeric(as.character(df_noAKI$obser)), ci=0.95)
mean_data_noAKI <- group_by(df_noAKI, time_stamp)%>% summarise(mean = mean(obser, na.rm = TRUE), sd = sd(obser))
mytime <- seq(0,max_time, 0.01)
matlines(mean_data_noAKI$time_stamp, mean_data_noAKI$mean, type="l", lty=1, lwd=0.5, col=rgb(0,1,0,1), xlim = c(0, max_time), ylim = c(0,max_obser))
matlines(mean_data_noAKI$time_stamp[1:50 == 50], (mean_data_noAKI$mean + 1.96 * mean_data_noAKI$sd)[1:50 == 50], type="l", lty=2, lwd=1, col=rgb(0.156, 0.443, 0.243,1), xlim = c(0, max_time), ylim = c(0,max_obser))
matlines(mean_data_noAKI$time_stamp[1:50 == 50], (mean_data_noAKI$mean - 1.96 * mean_data_noAKI$sd)[1:50 == 50], type="l", lty=2, lwd=1, col=rgb(0.156, 0.443, 0.243,1), xlim = c(0, max_time), ylim = c(0,max_obser))
############## mean+CI for AKI ######################
df_AKI$time_stamp <- round(as.numeric(df_AKI$time_stamp),1)
df_AKI$obser <- as.numeric(as.character(df_AKI$obser))
mean_data_AKI <- group_by(df_AKI, time_stamp)%>% summarise(mean = mean(obser, na.rm = TRUE), sd = sd(obser))
matlines(mean_data_AKI$time_stamp, mean_data_AKI$mean, type="l", lty=1, lwd=0.5, col=rgb(1,0,0,1), xlim = c(0, max_time), ylim = c(0,max_obser))
matlines(mean_data_AKI$time_stamp[1:50 == 50], (mean_data_AKI$mean + 1.96 * mean_data_AKI$sd)[1:50 == 50], type="l", lty=2, lwd=1, col=rgb(1,0,0,1), xlim = c(0, max_time), ylim = c(0,max_obser))
matlines(mean_data_AKI$time_stamp[1:50 == 50], (mean_data_AKI$mean - 1.96 * mean_data_AKI$sd)[1:50 == 50], type="l", lty=2, lwd=1, col=rgb(1,0,0,1), xlim = c(0, max_time), ylim = c(0,max_obser))
legend("topright", c('Mean HR for AKI','Mean HR for No AKI', '95% CI for HR (AKI)', '95% CI for HR (No AKI)'), lty=c(1,1,2,2), lwd=c(1.5,1.5,1.5,1.5), col=c('red', rgb(0,1,0,1), rgb(1,0,0,1), rgb(0.156, 0.443, 0.243,1)))
################################################### Time delay embedding #########################################
## No AKI
gap <- 8
obs_ts <- ts(mean_data_noAKI$mean[1:gap == gap])
## AKI
obs_ts_AKI <- ts(mean_data_AKI$mean[1:gap == gap])
for (i in 1:20){
xyz <- embedd(obs_ts, m=3, d=10*i)
xyz_AKI <- embedd(obs_ts_AKI, m=3, d=10*i)
par(mfrow=c(2,1))
scatterplot3d(xyz, type="l", color= rgb(1,0,0,1), box=FALSE, main = 'NO AKI', angle = 30)
scatterplot3d(xyz_AKI, type="l", color= rgb(1,0,0,1), box=FALSE, main = 'AKI', angle = 30)
print(i*10)
Sys.sleep(4)
}
#dev.off()
# ################################# Automatically fit fractional polynomials for no AKI
# mfpOne <- mfp(formula = as.numeric(as.character(obser)) ~ fp(as.numeric(time_stamp), df = 4), data = df_noAKI)
# ## Check model for transformation
# #summary(mfpOne)
# #plot the model
# mytime <- seq(0,max_time, 0.01)
# y_obser <- predict(mfpOne, list(time_stamp = mytime),type="response")
# matlines(mytime, y_obser, type="l", lty=1, lwd=2, col=rgb(0,1,0,1), xlim = c(0, max_time), ylim = c(0,max_obser))
#
#
# ## Automatically fit fractional polynomials for AKI
# mfpOne <- mfp(formula = as.numeric(as.character(obser)) ~ fp(as.numeric(time_stamp), df = 4), data = df_AKI)
# ## Check model for transformation
# #summary(mfpOne)
# #plot the model
# #mytime <- seq(0,max_time, 0.01)
# y_obser <- predict(mfpOne, list(time_stamp = mytime),type="response")
# matlines(mytime, y_obser, type="l", lty=1, lwd=2, col=rgb(0,0,0,1), xlim = c(0, max_time), ylim = c(0,max_obser))
|
8f56bdfee3b6cc97ad1dad607c37b0e7d3b6b501 | 33fe14a5295a4c80672bed8000abb6869ec5d777 | /analysis/data.R | 476f4aa8637d2b7127655482c51051e12a2587a5 | [
"MIT",
"CC-BY-3.0"
] | permissive | mdscheuerell/AukeCoho | 82646357fdc793b25ac3456634e7e8085975ad92 | 9e1af77eb4617b221950841598d324afadf72ece | refs/heads/master | 2023-07-20T04:03:44.305818 | 2023-07-11T22:31:24 | 2023-07-11T22:31:24 | 120,044,965 | 3 | 0 | NOASSERTION | 2021-05-11T01:01:15 | 2018-02-03T00:12:44 | HTML | UTF-8 | R | false | false | 1,153 | r | data.R | hpc <- read.csv("hpc.csv")
pdo <- read.csv("pdo.csv")
ctemp <- read.csv("ctemp.csv")
colnames(hpc) <- c("year", "total")
hpc.sum.year <- with(hpc,aggregate(total, by=list(year), FUN = sum))
colnames(hpc.sum.year) <- c("Year", "Total HPC Release")
pdo.mean.year <- with(pdo, aggregate(Value, by=list(Year), FUN= mean))
colnames(pdo.mean.year) <- c("Year", "Mean PDO")
pdo.mean.year <- pdo.mean.year[-length(pdo.mean.year$Year),]
if(!require(lubridate)) install.packages('lubridate',repos = "http://cran.us.r-project.org")
ctemp <- na.omit(ctemp)
colnames(ctemp) <- c("date", "temp")
ctemp$date <- mdy(ctemp$date)
ctemp$year <- year(ctemp$date)
ctemp.mean.year <- with(ctemp, aggregate(temp, by=list(year), FUN= mean))
colnames(ctemp.mean.year) <- c("Year", "Mean Creek Temp")
ctemp.mean.year <- ctemp.mean.year[-length(ctemp.mean.year$Year),]
dat <- cbind(hpc.sum.year, pdo.mean.year[,2], ctemp.mean.year[,2])
colnames(dat) <- c("Year","Total HPC Release", "Mean PDO", "Mean Creek Temp" )
if(!require(xlsx)) install.packages('xlsx',repos = "http://cran.us.r-project.org")
write.xlsx(dat, "CLEANDATA.xlsx")
save(dat, file = "CLEANDATA.rdata")
|
d8ae5238f9579663ecb90802cdbef381d0f3708b | f8ec609f34e9de8e36f984202124aeab04567cf6 | /man/qbic.Rd | 6d5dc273e2bc3e2bc108bd737d9dc15741578a04 | [] | no_license | jsliu/rqPen | 997e508499a3d2321f8f99dc26b65ebd74376fb8 | 727348ae1d087d588da67f9cea1f3d7633429bcc | refs/heads/master | 2020-05-31T20:05:55.157880 | 2019-05-01T20:20:03 | 2019-05-01T20:20:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 937 | rd | qbic.Rd | \name{qbic}
\alias{qbic}
\title{Quantile Regresion BIC}
\usage{
qbic(model, largeP=FALSE)
}
\arguments{
\item{model}{Model of class "rqPen".}
\item{largeP}{Large P version using an additional penalty factor of log(s) where "s" is the total number of covariates considered.}
}
\value{
Numeric value representing BIC of selected model.
}
\description{
Quantile regression BIC with large p alternative as described in Lee, Noh and Park (2013).
}
\examples{
x <- matrix(rnorm(800),nrow=100)
y <- 1 + x[,1] - 3*x[,5] + rnorm(100)
l_model <- rq.lasso.fit(x,y, lambda=1)
nc_model <- rq.nc.fit(x,y, lambda=1)
qbic(l_model)
qbic(nc_model)
qbic(l_model, largeP=TRUE)
qbic(nc_model, largeP=TRUE)
}
\references{
[1] Lee, E., Noh, H. and Park, B. (2014). Model selection via Bayesian Information Criterion for quantile regression models., \emph{J. Am. Statist. Ass}, \bold{109}, 216--229.
}
\author{Ben Sherwood} |
18e1f60c86100b2b0920930e325bd356efb1e2c4 | b27f3ca9fb38ee017c82a3308ba774f20070cc6d | /code/VoxelWrapperModels/ResultNotebooks/CBF_Results.R | 06cfd04190ac88e605aaba13c89931849f89a344 | [] | no_license | PennLINC/isla | ef085f563827c50ed51a12f2085ca4589c71f75a | 24e0a252b0c47cca0a458a3a82aaedad16c65187 | refs/heads/master | 2022-04-19T19:40:47.046576 | 2020-04-21T19:29:44 | 2020-04-21T19:29:44 | 151,718,662 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,630 | r | CBF_Results.R | #' ---
#' title: "Multivariate Voxelwise `gam()` Results: CBF"
#' author: "Tinashe M. Tapera"
#' date: "2019-02-15"
#' ---
#+ setup
suppressPackageStartupMessages({
library(tidyr)
library(dplyr)
library(knitr)
library(ggplot2)
library(magrittr)
library(stringr)
library(oro.nifti)
library(purrr)
library(RColorBrewer)
})
print(paste("Last Run:", format(Sys.time(), '%Y-%m-%d')))
#' # Summarising Results of the ISLA Voxelwise Models
#'
#' Here we visualise the results of the isla voxelwise models. These models were of the form:
#'
#' `Y ~ s(age) + s(age,by=sex) + sex + pcaslRelMeanRMSMotion`
#'
#' We compare the output of the models where `Y` is:
#
#' 1. Raw CBF
#' 2. Smoothed CBF with a smoothing kernel of 3mm
#' 3. Smoothed CBF with a smoothing kernel of 4mm
#' 4. ISLA CBF with a kernel radius of 3mm
#' 5. ISLA CBF with a kernel radius of 4mm.
#+ gather data
results_dir <- "/data/jux/BBL/projects/isla/results/VoxelWrapperModels/imco1"
rawcbf_dir <- file.path(results_dir, "raw_cbf")
smoothed3_dir <- file.path(results_dir, "rawSmoothedCBF_3")
smoothed4_dir <- file.path(results_dir, "rawSmoothedCBF_4")
isla3_dir <- file.path(results_dir, "cbf3")
isla4_dir <- file.path(results_dir, "cbf4")
images_df <- c(
rawcbf_dir,
smoothed3_dir,
smoothed4_dir,
isla3_dir,
isla4_dir) %>%
tibble(path = .) %>%
group_by(path) %>%
mutate(
images_paths = map(
.x = path,
.f = list.files,
pattern = "fdr",
recursive = TRUE,
full.names = TRUE)
) %>%
unnest()
#'
#' Read in the Niftis and the mask
#'
#+ read in
images_df <- images_df %>%
mutate(
variable = str_extract(
string = images_paths,
pattern = "(?<=fdr_)(.*)(?=\\.nii)") %>%
str_replace(pattern = "sage", "s(age)") %>%
str_replace(pattern = "and", " by ") %>%
str_replace(pattern = "\\.L", "") %>%
str_replace(pattern = "2", ""),
nifti = map(images_paths, readNIfTI, reorient = FALSE),
Y = str_extract(string = path, pattern = "(?<=imco1/).*$")
)
mask <- "/data/jux/BBL/projects/isla/data/Masks/gm10perc_PcaslCoverageMask.nii.gz"
mask_img <- readNIfTI(mask)
maskdat <- img_data(mask_img)
#'
#' Below is a helper function to extract the data from a nifti and get the proportion of significant and non-significant voxels at $p < 0.05$. Then, we apply the function
#'
returnFDR <- function(nif, variable, mask = maskdat) {
tempdat <- img_data(nif)
tempdat <- tempdat[mask != 0]
table(tempdat < 0.05) %>%
data.frame() %>%
mutate(Covariate = variable) %>%
rename(Significant = Var1) %>%
return()
}
results <- images_df %>%
group_by(Y, variable) %>%
mutate(results = list(map2_df(nifti, variable, returnFDR))) %>%
ungroup() %>%
select(Y,results) %>%
unnest()
#'
#' Also, a helper function to help label the plot:
#'
y_names <- list(
"cbf3" = "Y: ISLA CBF 3",
"cbf4" = "Y: ISLA CBF 4",
"raw_cbf" = "Y: Raw CBF",
"rawSmoothedCBF_3" = "Y: Raw Smoothed CBF 3",
"rawSmoothedCBF_4" = "Y: Raw Smoothed CBF 4"
)
my_labels <- function(variable, value){
return(y_names[value])
}
#'
#' Now, plot:
#'
results %>%
ggplot(aes(x = Covariate, y = Freq)) +
geom_bar(aes(fill = Significant), stat = "identity") +
theme_minimal() +
labs(
title = "Number of Significant Voxels Per Covariate",
y = "Frequency of Voxels",
subtitle = "FDR Corrected Voxelwise GAM",
caption = "Model: Y~s(age)+s(age,by=sex)+sex+pcaslRelMeanRMSMotion") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
scale_fill_brewer(palette = "Set1") +
facet_wrap(~Y, labeller = my_labels)
|
c5836894fbc71fe52666f8b37a2c4f76f74f4168 | 24415cd8d6f4d92af14d07e905cb11f007c0ebfd | /R/shinyghap.R | 9c5b4ea3dc58798825e9afa79474886f08650a04 | [] | no_license | yonicd/shinyghap | 428d2edeccfbbee276d376d81dfed221fd76eded | e27cc1b42bb0d68ff0fe9f724151856ed17bcd58 | refs/heads/master | 2021-07-14T13:19:33.398017 | 2017-10-18T17:03:55 | 2017-10-18T17:03:55 | 107,418,933 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,341 | r | shinyghap.R | createDB <- function(MYDIR) {
if( file.exists(file.path(MYDIR,'filters.Rdata')) )
load(file.path(MYDIR,'filters.Rdata'))
nm <- names(meta_ghap)
meta_ghap <- meta_ghap[,c('STUDY_TYPE',nm[nm!='STUDY_TYPE'])]
col_opts <- sapply(names(meta_ghap)[sapply(meta_ghap,class)=='character'],function(x) list(type='select',plugin='selectize'),simplify = FALSE)
filters <- queryBuildR::getFiltersFromTable(data = meta_ghap,column_opts = col_opts)
save(file=file.path(MYDIR,'filters.Rdata'),filters)
datadb <- DBI::dbConnect(RSQLite::SQLite(), file.path(MYDIR,"data/data.db"))
DBI::dbWriteTable(datadb,"datatable",meta_ghap,row.names=F,overwrite=TRUE)
DBI::dbDisconnect(datadb)
}
loadData <- function(sql,MYDIR) {
if ( sql!="" )
sql<-paste0("where ",sql)
datadb <- DBI::dbConnect(RSQLite::SQLite(), file.path(MYDIR,"data/data.db"))
datacontent <- DBI::dbGetQuery(datadb,paste0("select * from datatable ",sql))
DBI::dbDisconnect(datadb)
datacontent
}
get_study_n<-function(current_query){
n_summ <- current_query%>%
select_(STUDY_TYPE,DOMAIN,STUDY_ID,VARIABLE=STUDY_VARIABLE)%>%
distinct%>%
group_by(STUDY_TYPE,DOMAIN,STUDY_ID)%>%
summarise_at(funs(paste0(sprintf('%s IS NOT NULL',.),collapse=' AND ')),.vars=vars(VARIABLE))%>%
group_by(STUDY_TYPE,DOMAIN,VARIABLE)%>%
summarise_at(funs(paste0(sprintf("'%s'",.),collapse=',')),.vars=vars(STUDY_ID))
if( file.exists('../data/ghap_longitudinal.sqlite3') )
long_db <- DBI::dbConnect(RSQLite::SQLite(), "../data/ghap_longitudinal.sqlite3")
if( file.exists('../data/ghap_cross_sectional.sqlite3') )
cross_db <- DBI::dbConnect(RSQLite::SQLite(), "../data/ghap_cross_sectional.sqlite3")
get_n < -n_summ%>%ddply(.(STUDY_TYPE,DOMAIN,VARIABLE),.fun=function(x){
q <- sprintf("select STUDYID as STUDY_ID, count(DISTINCT SUBJID) as SUBJID_N from %s WHERE %s AND STUDY_ID IN (%s) GROUP BY STUDY_ID",x$DOMAIN,x$VARIABLE,x$STUDY_ID)
if( x$STUDY_TYPE=='Longitudinal' ){
DBI::dbGetQuery(conn=long_db,q)
}else{
DBI::dbGetQuery(conn=cross_db,q)
}
},.progress = 'text')
DBI::dbDisconnect(long_db)
DBI::dbDisconnect(cross_db)
get_n%>%select(-VARIABLE)
}
#' @title Shinyapp to navigate and maintain GHAP repositories
#' @description Shinyapp to navigate and maintain GHAP repositories run from console or launch from addin menu
#' @param viewer Where to open the application can be dialogViewer, browserViewer or paneViewer , Default: shiny::dialogViewer()
#' @return nothing
#' @details DETAILS
#' @examples
#' if(interactive()){
#' use_ghap()
#' }
#' @rdname use_ghap
#' @export
#' @import dplyr
#' @importFrom jsonlite fromJSON
#' @importFrom jsTree renderJsTree jsTree jsTreeOutput
#' @importFrom miniUI miniPage gadgetTitleBar miniTitleBarButton miniContentPanel
#' @importFrom reshape2 dcast
#' @import shiny
#' @importFrom vcs ls_remote diff_head
#' @importFrom ghap use_study get_git_base_path
use_ghap <- function(viewer = shiny::dialogViewer(dialogName = 'GHAP',width = 3000,height = 2000)){
MYDIR <- file.path(tempdir(),'mydir')
if( !dir.exists(MYDIR) ){
dir.create(MYDIR)
dir.create(file.path(MYDIR,'data'))
DBI::dbConnect(RSQLite::SQLite(), file.path(MYDIR,"data/data.db"))
}
createDB(MYDIR)
ui <- miniUI::miniPage(
miniUI::gadgetTitleBar('Search and Maintain GHAP Repositories',
left = miniUI::miniTitleBarButton(inputId = "qt","Quit",primary = TRUE),
right=NULL),
miniUI::miniContentPanel(
shiny::fluidPage(
shiny::sidebarLayout(
shiny::sidebarPanel(
shiny::h3('Define and apply filters'),
queryBuildR::queryBuildROutput('queryBuilderWidget',height='100%'),
shiny::actionButton('queryApply', label = 'Apply filters'),
shiny::tags$script("
function getSQLStatement() {
var sql = $('#queryBuilderWidget').queryBuilder('getSQL', false);
Shiny.onInputChange('queryBuilderSQL', sql);
};
document.getElementById('queryApply').onclick = function() {getSQLStatement()}
"),
shiny::tags$h4('Query string applied to Study Meta Information'),
shiny::textOutput('sqlQuery'),
shiny::span(shiny::uiOutput('zero_out'),style="color:red"),
shiny::hr(),
shiny::uiOutput('chk_n'),
shiny::uiOutput('chk_complete_rows'),
shiny::uiOutput('btn_copy'),
shiny::uiOutput('btn_md'),
shiny::uiOutput('chk_tree'),
shiny::conditionalPanel('input.chk_tree==true',
shiny::uiOutput('study_choose'),
shiny::uiOutput('btn_tree'),
shiny::uiOutput('tree_show')
),
width=4
),
shiny::mainPanel(
shiny::fluidRow(
shiny::uiOutput('table_tag'),
DT::dataTableOutput('study_select'),
shiny::h1(''),
shiny::tags$h3('Study Meta Information'),
shiny::tags$h4('Use this table to select columns and search terms for the query builder in the left side panel.'),
shiny::helpText('This table contains unique rows by (Study Type, Domain, Study ID, Study Variable).
Additionally there is meta-information for Study Variables, Studies and Repositories in the additional columns.'),
shiny::helpText('The table may be searched globally (search field above the table to the right) or by column (search fields above each row),
and it can be copied to the clipboard or exported to a csv file (application must be running in a web browser)'),
DT::dataTableOutput('table'),
shiny::tags$div(class='extraspace')
),
width=8
)
)
)
))
server <- function(input, output,session) {
sessionvalues <- reactiveValues()
network <- reactiveValues()
sessionvalues$data <- loadData(sql = '',MYDIR = MYDIR)
observe({
if ( length(input$queryBuilderSQL)>0 )
sessionvalues$data<-loadData(input$queryBuilderSQL,MYDIR = MYDIR)
})
output$sqlQuery <- renderText({
sql <- ''
if ( length(input$queryBuilderSQL)>0 ) {
if ( input$queryBuilderSQL!='' )
sql <- paste0('where ', input$queryBuilderSQL)
}
paste0('select * from datatable ',sql)
})
output$queryBuilderWidget <-queryBuildR::renderQueryBuildR({
data <- sessionvalues$data
load(file.path(MYDIR,'filters.Rdata'))
rules <- NULL
queryBuildR::queryBuildR(filters)
})
output$table <- DT::renderDataTable({
data <- sessionvalues$data
colnames(data) <- as.vector(sapply(colnames(data),function(x) gsub('[_.]',' ',x)))
action <- DT::dataTableAjax(session, data,rownames=F)
DT::datatable(data, rownames=F,
extensions = c('Buttons', 'Scroller', 'ColReorder', 'FixedColumns'),
filter = 'top',
options = list(
dom= c('Bfrtip'),
ajax = list(url = action),
deferRender = TRUE,
scrollX = TRUE,
pageLength = 50,
scrollY = pmin(100+500*(nrow(data)/50),500),
scroller = TRUE,
colReorder = TRUE,
fixedColumns = TRUE,
buttons = c('copy', 'csv', 'colvis')
)
)
}, server = TRUE)
observeEvent(input$btn_copy,{
y <- loadData(input$queryBuilderSQL,MYDIR = MYDIR)
y <- y%>%dplyr::count(STUDY_TYPE,STUDY_ID)
y_copy <- y$STUDY_ID[input$study_select_rows_all]
if( length(input$study_select_rows_selected)>0 )
y_copy <- y_copy[input$study_select_rows_selected]
writeClipboard(y_copy)
})
observeEvent(input$btn_md,{
out <- loadData(input$queryBuilderSQL,MYDIR = MYDIR) %>%
dplyr::mutate(COLS = sprintf("%s\n[%s]",STUDY_VARIABLE_DESCRIPTION,STUDY_VARIABLE),val=1) %>%
reshape2::dcast(STUDY_TYPE + STUDY_ID + DOMAIN ~ COLS,value.var='val')
if( input$complete )
out <- out %>%
dplyr::filter_(~complete.cases(.))
out <- out[input$study_select_rows_all,]
out_copy_md <- knitr::kable(out)
writeClipboard(out_copy_md)
})
observeEvent(input$queryBuilderSQL,{
y <- loadData(input$queryBuilderSQL,MYDIR = MYDIR)
if( nrow(y)==0 ){
output$zero_out<-shiny::renderText('Query matched zero rows')
}else{
output$zero_out<-shiny::renderText('')
if( nrow(y)<nrow(meta_ghap) ){
output$table_tag<-renderUI({
list(shiny::tags$h3('Studies Query Output'),
shiny::helpText('Table contains unique rows of (Study Type and Study ID) and
columns containing the STUDY VARIABLE DESCRIPTION [STUDY VARIABLE NAME] that are result of the query.
A value of 1 indicates that the study contains this column.'),
shiny::helpText('The table may be searched globally (search field above the table to the right) or by column (search fields above each row),
and it can be copied to the clipboard or exported to a csv file (application must be running in a web browser)')
)
})
output$btn_copy <- renderUI({
list(shiny::actionButton(inputId = 'btn_copy',label = 'Copy List of Studies to clipboard'),
shiny::helpText('Use this button to copy to the clipboard the list of studies seen on
the Studies Query Output table, if any rows are clicked/highlighted on
the table only the highlighted ones will be copied')
)
})
output$btn_md <- renderUI({
list(shiny::actionButton(inputId = 'btn_md',label = 'Copy table as markdown to clipboard'),
shiny::helpText('Use this button to copy to the clipboard the table seen on
the Studies Query Output table as a markdown table to paste into documents or emails.')
)
})
output$study_select <-
DT::renderDataTable({
out <- y %>%
dplyr::mutate(COLS = sprintf("%s\n[%s]",STUDY_VARIABLE_DESCRIPTION,STUDY_VARIABLE),val=1) %>%
reshape2::dcast(STUDY_TYPE + STUDY_ID + DOMAIN ~ COLS,value.var='val')
if( dir.exists('../data') ){
if( input$get_n ){
study_n <- get_study_n(y)
out <- out%>%left_join(study_n,by=c('STUDY_TYPE','DOMAIN','STUDY_ID'))
}}
if( input$complete )
out <- out %>%
dplyr::filter_(~complete.cases(.))
DT::datatable(out,
extensions = c('Buttons', 'Scroller', 'ColReorder', 'FixedColumns'),
filter = 'top',
options = list(
deferRender = TRUE,
scrollX = TRUE,
pageLength = 50,
scrollY = pmin(100+500*(nrow(out)/50),500),
scroller = TRUE,
dom = 'Bfrtip',
colReorder = TRUE,
fixedColumns = TRUE,
buttons = c('copy', 'csv', 'colvis')
))
})
}
}
})
output$chk_complete_rows <- renderUI({
list(shiny::checkboxInput('complete','Show only complete cases'),
shiny::helpText('Use this checkbox to filter the Studies Query Output table to show only studies that have all the columns'))
})
output$chk_n <- renderUI({
if( dir.exists('../data') )
list(shiny::checkboxInput('get_n','Show number of subjects per study conditional on query result',value = FALSE),
shiny::helpText('Use this checkbox to add an additional column to the Studies Query Output table that shows how many unique subjects are in the columns indicated for each study')
)
})
output$chk_tree <- renderUI({
if( dir.exists(ghap::get_git_base_path()) )
list(shiny::hr(),
shiny::checkboxInput('chk_tree','Visualization of Study Repository Contents',value = FALSE)
)
})
observeEvent(input$tree_update,{
current_selection <- input$tree_update$.current_tree
if( !is.null(current_selection) )
network$tree <- jsonlite::fromJSON(current_selection)
})
observeEvent(input$study_tree,{
basepath <- normalizePath(get_git_base_path(),winslash = '/')
dirOutput <- file.path(basepath,'HBGD',input$study_tree)
dirGit <- file.path(dirOutput,'.git')
if( !dir.exists(dirGit) ){
output$btn_tree <- renderUI({
list(shiny::actionButton('btn_tree','Fetch Study'),
shiny::helpText('Press the Fetch Study button to retrieve
the file directory structure of the study repository
chosen in the field above.')
)
})
output$tree_show <- renderUI({
shiny::p('')
})
}else{
output$btn_tree <- renderUI({
list(shiny::actionButton('btn_tree','Update Study'),
shiny::helpText('Navigate the tree by clicking on folders to open them or using
the search field above the tree,
and choose which files to retrieve from the GHAP repository by
checking next to a folder or a file.
If there are already files in fetched from the repository
they will be prechecked for you, uncheck them to remove files.
Press on the Update Study button to invoke the update.'))
})
tips.folders=c(adam='Analysis-Ready data derived form SDTM data sets (USERS: ALL)',
docs='Study documentation (USERS: ALL)',
fmt='SAS format files (USERS: Data Management)',
import='Raw metadata submitted by Prinicipal Investigators (USERS: Data Management)',
jobs='SAS programs for creating SDTM datasets',
raw='Data submitted by Prinicipal Investigators (USERS: Data Management)',
sdtm='Data in HBGDki Standardized format (USERS: Data Scientists)'
)
basepath <- normalizePath(get_git_base_path(),winslash = '/')
dirOutput <- file.path(basepath,'HBGD',input$study_tree)
dirGit <- file.path(dirOutput,'.git')
output$tree <- jsTree::renderJsTree({
path <- file.path(basepath,'HBGD',input$study_tree)
if( dir.exists(path) ){
obj <- vcs::ls_remote(path = path,vcs='git')
jsTree::jsTree(obj = obj,
remote_repo = input$study_tree,vcs='git',
tooltips = tips.folders,
nodestate = vcs::diff_head(path,vcs='git',show = FALSE))
}else{
shiny::p('')
}
})
output$tree_show<-renderUI({
jsTree::jsTreeOutput(outputId = 'tree')
})
}
})
observeEvent(sessionvalues$data,{
output$study_choose <- renderUI({
study <- sessionvalues$data%>%count(STUDY_ID_SHORT,STUDY_REPOSITORY_NAME)
study_split <- split(study$STUDY_REPOSITORY_NAME,study$STUDY_ID_SHORT)
shiny::selectInput(inputId = 'study_tree',label = 'Select Study to Preview',choices = study_split,selected = study_split[1])
})
})
observeEvent(input$btn_tree,{
basepath <- normalizePath(get_git_base_path(),winslash = '/')
dirOutput <- file.path(basepath,'HBGD',input$study_tree)
dirGit <- file.path(dirOutput,'.git')
study <- sessionvalues$data%>%count(STUDY_ID_SHORT,STUDY_REPOSITORY_NAME)
study_name <- study$STUDY_ID_SHORT[which(study$STUDY_REPOSITORY_NAME==input$study_tree)]
f2 <- '*.txt'
if( length(f2)>0 ){
if( dir.exists(dirGit) ){
f2 <- gsub(sprintf('%s/%s',input$study_tree,'master'),'',network$tree)
ghap::use_study(study_name,
queries=f2,
create = !'sparse-checkout'%in%basename(dir(dirGit,recursive = TRUE)),
append = FALSE)
}else{
ghap::use_study(study_name,queries=f2)
}
}
})
shiny::observeEvent(input$qt,{
unlink(MYDIR,recursive = TRUE)
shiny::stopApp()
})
}
shiny::runGadget(ui, server, viewer = viewer)
} |
5c6139bb6b3cb755e22c05f7d016751f04057502 | 89b6cdab64df3ceb04786d715a61d6bd29484fe7 | /R/digits_svm_hpc.R | 1bf38ca11ddb2cc6bf91fb3b593c03ce196767a2 | [] | no_license | UtrechtUniversity/Workshop-IRAS | a7b39ae6c6de68f0b55f9088c02001d3fd41a2ce | 84e4887bc53de14b2a93bc37aa9874757216224a | refs/heads/master | 2020-04-08T22:41:25.915375 | 2019-01-22T09:05:38 | 2019-01-22T09:05:38 | 159,797,157 | 0 | 0 | null | 2019-01-21T19:52:50 | 2018-11-30T09:06:22 | R | UTF-8 | R | false | false | 2,625 | r | digits_svm_hpc.R | # Recognize written digits with Support Vector Machines
#
# Call:
# Rscript ./R/digits_svm_hpc.R -C <value for 'cost'> -G <value for 'gamma'>
#
# Arguments:
# -C: Penalty parameter C of the error term.
# -G: Kernel coefficient.
#
# Authors: Roel Brouwer, Kees van Eijden, Jonathan de Bruin
#
# Dependencies:
# License: BSD-3-Clause
#
library("getopt")
library("e1071")
library("raster")
library("tidyverse")
# `getopt` parses the arguments given to an R script:
# Rscript --[options] script.R [arguments]
# Example: Rscript digits_svm_shell.R -C 0.5 -G 0.01
opt <- getopt::getopt(
matrix(
c('gamma', 'G', 1, "numeric",
'cost', 'C', 1, "numeric"
),
byrow=TRUE, ncol=4))
# default values for options not specified
# depending on the situation at hand you could/should also check if given values are allowed
if (is.null(opt$gamma)) {opt$gamma = 2^-3}
if (is.null(opt$cost)) {opt$cost = 2^-1}
# The digits dataset (train dataset)
train_set <- read.csv("data/digits_trainset.csv", header= FALSE)
train_images <- train_set[1:64]
train_targets <- as.factor(train_set[[65]])
# The digits dataset (test dataset)
test_set <- read.csv("data/digits_testset.csv", header= FALSE)
test_images <- test_set[1:64]
test_targets <- as.factor(test_set[[65]])
# Fit a Support Vector Classifier
model <- e1071::svm(x = train_images,
y = train_targets,
gamma = opt$gamma,
cost = opt$cost,
scale = FALSE)
# Predict the value of the digit on the test dataset
prediction <- predict(object = model,
newdata = test_images)
# Accuracy measure to evaluate the result
agreement <- table(prediction == test_targets)
accuracy <- agreement[2]/(agreement[1] + agreement[2])
# Store results in a data fame (tibble) and
# write that data frame to an '.csv' file
trial <- as_data_frame (
x = list(cost = opt$cost,
gamma = opt$gamma,
accuracy = accuracy))
# It's good practise to use the hyper paarmeter values in the name the outputfile and
# store the outputfiles in a dedicated subfolder
#
if (!dir.exists("output")) { dir.create("output") }
output_file <- file.path("./output",
sprintf("digits_svm_C_%f_G_%f.csv",
opt$cost,
opt$gamma))
# Write the result of this trial to the file with the parameter setting
#
write.csv(trial, output_file, row.names= FALSE)
# end of program
|
f451c6315be64a556411fe0105aa2a6e0dd0679a | 3d098f9116c36e0bf9ea330df3ae89e3c48f9d1d | /~code/Hartford/HF - 04 - prediction for Hartford (Open).R | 8438439c67dbe763e31985a66ded8357ffccab81 | [] | no_license | Louisville-Scooters/Practicum | bd131bba2f3ad93f376f978ab8c7d075c28f4e32 | 5b5e91a65728483d83a921884257c21212c41f19 | refs/heads/master | 2020-12-19T23:55:36.513038 | 2020-05-12T04:02:24 | 2020-05-12T04:02:24 | 235,890,049 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,460 | r | HF - 04 - prediction for Hartford (Open).R | HF_spatial_census_RDS <- file.path(data_directory, "~RData/Hartford/HF_spatial_census")
HF_spatial_census <- readRDS(HF_spatial_census_RDS)
HF_LODES_RDS <- file.path(data_directory, "~RData/Hartford/HF_LODES")
HF_LODES <- readRDS(HF_LODES_RDS)
HF_model <- merge(HF_spatial_census, HF_LODES, by.x = 'GEOID', by.y = 'geocode')
HF_model <- HF_model %>%
st_set_geometry(NULL)
HF_model <- HF_model %>%
rename_all(toupper)
HF_model <- HF_model %>% dplyr::select(-c(MEAN_COMMUTE_TIME, CENTROID_X, CENTROID_Y),-starts_with('DENSITY'), -starts_with('COUNT'), -ends_with('LENGTH'))
# HF_model_RDS <- file.path(data_directory, "~RData/Hartford/HF_model")
# saveRDS(HF_model,
# file = HF_model_RDS)
# HF_model <- readRDS(HF_model_RDS)
library(randomForest)
model1 <- randomForest(ORIGINS_CNT ~ ., data = Model_clean %>% dplyr::select(-CITY, -race),
ntree = 1000,
mtry = 2, engine = 'ranger', importance = TRUE)
HF_model <- HF_model %>%
mutate(Predicted.CNT = round(predict(model1, HF_model, type = "class"),0))
HF_result <- merge(HF_Census_geoinfo, HF_model %>% dplyr::select(GEOID, Predicted.CNT), on='GEOID')
###### race content ##########
HF_result <- merge(HF_result, as.data.frame(MD_Census) %>% dplyr::select(GEOID, pWhite), on='GEOID')
HF_result <- mutate(HF_result, race = ifelse(pWhite > .5, "Majority_White", "Majority_Non_White"))
HF_result %>%
na.omit() %>%
group_by(race) %>%
summarise(mean_trip_count=mean(Predicted.CNT))
HF_result <- HF_result %>% na.omit()
mean(HF_result$Predicted.CNT)
library(viridis)
palette5 <- c('#f0f9e8','#bae4bc','#7bccc4','#43a2ca','#0868ac')
ggplot() +
geom_sf(data = HF_result %>% na.omit(), aes(fill=q5(Predicted.CNT))) +
scale_fill_manual(values = palette5,
labels = qBr(HF_result, "Predicted.CNT"),
name="Quintile\nBreaks") +
labs(title = 'Prediced Trip Count in Hartford, CT', size=18) +
mapTheme()
HF_result_RDS <- file.path(data_directory, "~RData/Hartford/HF_result")
# saveRDS(HF_result,
# file = HF_result_RDS)
HF_result <- readRDS(HF_result_RDS)
predict_HF <- ggplot()+
geom_sf(data = HF_trimmed_result %>% na.omit(), aes(fill=Predicted.CNT)) +
scale_fill_viridis()+
labs(title = 'Predicted Trip Count for Hartford, CT') +
mapTheme()
ggsave(file.path(plot_directory,
"5.3 predict_HF.png"),
plot = predict_HF,
width = 6,
units = "in")
|
1e4bf4a3bbef3c01955565136acd38a558cd9d8a | cdf9d0bf0c57d446b1c95066d0a5d01fd97bdbfc | /server.R | 7900abc7f9dff84b7bf8845ef77a34fcd9086cd9 | [] | no_license | NahuelGrasso/Time-Series-Overview | 71030a8f18848114cc7d97e71e3e3d83759c9f03 | 5d3c16ec3c54c119f5aabb354af5b7ae704d9e73 | refs/heads/master | 2021-01-19T19:33:14.202549 | 2015-03-05T14:53:00 | 2015-03-05T14:53:00 | 31,299,384 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,589 | r | server.R | library(shiny)
library(datasets)
options(shiny.maxRequestSize=60*1024^2)
options ( java.parameters = "-Xmx1024m" )
distVariables <- function(input1,input2) {
if (input == input2) {
"Choose another variable to analyze time series"
} else if (input == "") {
FALSE
} else {
NULL
}
}
is.timeBased <- function (x)
{
if (!any(sapply(c("Date", "POSIXct","POSIXt", "chron", "dates", "times",
"timeDate", "yearmon", "yearqtr", "xtime"), function(xx) inherits(x, xx)))) {
FALSE
}
else TRUE
}
shinyServer(
function(input, output, session) {
library("zoo", lib.loc="~/R/win-library/3.1")
library("xts", lib.loc="~/R/win-library/3.1")
library("TTR", lib.loc="~/R/win-library/3.1")
library("timeDate", lib.loc="~/R/win-library/3.1")
library("forecast", lib.loc="~/R/win-library/3.1")
library("tseries", lib.loc="~/R/win-library/3.1")
library(XLConnect)
# library(gdata)
library(dygraphs)
frequency.daily <- "365"
frequency.weekly <- "52"
frequency.monthly <- "12"
frequency.quarterly <- "4"
workbook <- reactive({
inFile <- input$fileSelected
if (is.null(inFile))
return(NULL)
result = tryCatch({
Workbook <- read.csv(inFile$datapath, header = input$header,
sep = input$sep, quote = input$quote)
}, warning = function(w) {
"error"
}, error = function(e) {
"error"
})
if(!is.object(result)){
result <- tryCatch({
Workbook <- loadWorkbook(inFile$datapath)
# Workbook <- read.xls(inFile$datapath)
}, error = function(e) {
paste("error",e)
})
}
if(!is.object(result)){
return(NULL)
}
Workbook
})
sheets <- reactive({
if (is.null(workbook()))
return(NULL)
result = tryCatch({
getSheets(workbook())
}, warning = function(w) {
"error"
}, error = function(e) {
"error"
})
if(length(result) == 1 && result=="error")
return(NULL)
return(result)
})
output$SheetSelector <- renderUI({
if(is.null(sheets()))
return(NULL)
selectInput("Sheet", "Select Sheet:", as.list(c("",unique(sheets()))),selected = "")
})
observe({
if (!is.null(variables())) {
updateTabsetPanel(session, "TABSETPANEL1", selected = "Select Variables")
}
# updateSliderInput(session, "alphaSlider",
# value = input$alphaText)
# updateSliderInput(session, "betaSlider",
# value = input$betaText)
# updateSliderInput(session, "gammaSlider",
# value = input$gammaText)
})
observeEvent(input$alphaText,
updateNumericInput(session, "alphaSlider", value = input$alphaText)
)
observeEvent(input$betaText,
updateNumericInput(session, "betaSlider", value = input$betaText)
)
observeEvent(input$gammaText,
updateNumericInput(session, "gammaSlider", value = input$gammaText)
)
observeEvent(input$alphaSlider,
updateNumericInput(session, "alphaText", value = input$alphaSlider)
)
observeEvent(input$betaSlider,
updateNumericInput(session, "betaText", value = input$betaSlider)
)
observeEvent(input$gammaSlider,
updateNumericInput(session, "gammaText", value = input$gammaSlider)
)
table <- reactive({
if(is.null(workbook()) || is.null(input$Sheet)|| ''==input$Sheet){
return(NULL)
}else{
if(!is.null(workbook()) && is.null(sheets()))
return(workbook())
# w <- readWorksheet(workbook(), sheet=input$Sheet)
# return(w[order(w$High),])
return(readWorksheet(workbook(), sheet=input$Sheet))
}
# return(readWorksheet(workbook(), sheet=input$Sheet))
# if(!is.null(workbook()) && is.null(sheets()))
# return(workbook())
})
# table <- reactive({
# if(!is.null(workbook()) && is.null(sheets()))
# return(workbook())
# })
output$ImportedTable <- renderDataTable(
# if(is.null(table()))
# return(NULL)
table(),options = list(pageLength = 10)
)
variables <- reactive({
if(is.null(table()))
return(NULL)
return(colnames(table()))
})
output$DateVariableSelector <- renderUI({
selectInput("DateVariable", "Select Date variable", as.list(c("",unique(variables()))),selected = "")
})
suggestedYear <- reactive({
if (!is.null(input$DateVariable) && input$DateVariable!= "")
getYear(min(table()$Date))
# exec <- paste("getYear(min(table()$",input$DateVariable,"))",sep="")
# eval(parse(text=exec))
})
observeEvent(input$startYear,
if(input$startYear == ""){
updateNumericInput(session, "startYear", value = suggestedYear())
}
)
output$variableSelector <- renderUI({
selectInput("variable", "Select variable", as.list(c("",unique(variables()))),selected = "")
})
dataProcessed <- reactive({
if(!is.null(input$DateVariable) && !is.null(input$variable)){
if((input$DateVariable!="") && (input$variable!="")){
# if(!is.timeBased(input$DateVariable)){
# DateVariable <- as.Date(input$DateVariable)
# }else{DateVariable <- input$DateVariable}
exec <- paste("df.",input$period,".",input$variable," <- apply.",input$period,"(xts(table()$",input$variable,", order.by=table()$",input$DateVariable,"), FUN=mean)",sep="")
eval(parse(text=exec))
freq <- get(paste("frequency.",input$period,sep=""))
#############I NEED START DATE###############
exec <- paste("dfts.",input$period,".",input$variable," <- ts(df.",input$period,".",input$variable,",frequency = ",freq,",start=c(2005,1))",sep="")
eval(parse(text=exec))
}else {
return(NULL)
}
}else {
return(NULL)
}
})
{
output$decompositionPlot <- renderPlot({
if(!is.null(dataProcessed())){
#decompose
exec <- paste("dfts.",input$period,".",input$variable,".components <-decompose(dataProcessed())",sep="")
eval(parse(text=exec))
decomposition <- get(paste("dfts.",input$period,".",input$variable,".components", sep=""))
plot(decomposition)
}else {
return(NULL)
}
})
alphavalue <- reactive({
if(input$alphaSlider == 0){
return(FALSE)
}else{
input$alphaSlider
}
})
betavalue <- reactive({
input$alphaSlider
})
gammavalue <- reactive({
input$alphaSlider
})
wintersModel <- reactive({
if(!is.null(dataProcessed())){
#winters
# exec <- paste("dfts.",input$period,".",input$variable,".withtrend.withseasonal <- HoltWinters(dataProcessed(), alpha=",alphavalue(),",beta=",betavalue(),", gamma=",gammavalue(),")",sep="")
exec <- paste("HoltWinters(dataProcessed(), alpha=",alphavalue(),",beta=",betavalue(),", gamma=",gammavalue(),",seasonal=\"",input$seasonalMode,"\")",sep="")
eval(parse(text=exec))
}else {
return(NULL)
}
})
wintersPrediction <- reactive({
if(!is.null(dataProcessed())){
#winters
exec <- paste("predict(wintersModel(),n.ahead=",input$predictionPeriodText,",level=0.95)",sep="")
eval(parse(text=exec))
# dfts.monthly.High.withtrend.withoutseasonal.prediction <- predict(dfts.monthly.High.withtrend.withoutseasonal,n.ahead=4,level=0.95)
}else {
return(NULL)
}
})
output$wintersPlot <- renderPlot({
if(!is.null(wintersModel())){
#winters
plot(wintersModel())
}else {
return(NULL)
}
})
output$wintersPredictionPlot <- renderPlot({
if(!is.null(wintersPrediction())){
#winters
plot(wintersModel(), wintersPrediction())
}else {
return(NULL)
}
})
}#PLOTS
output$accuracy <- renderTable({
if(!is.null(dataProcessed()) && !is.null(wintersModel())){
RESIDUALS <- dataProcessed()-wintersModel()$fitted[,1]
BIAS <- sum(RESIDUALS)
TABLE <- accuracy(dataProcessed(),wintersModel()$fitted)
MAD <- mean(abs(RESIDUALS))
TR <- BIAS/TABLE[1,3]
cbind(TABLE,BIAS,TR)
}else {
return(NULL)
}
}) #ACCURACY TABLE
optimalValue <- reactive({
# if(!is.null(dataProcessed()) && strtrim(input$numberIterationsOptimize)!=""){
if(!is.null(dataProcessed()) && input$numberIterationsOptimize!=""){
optimizationFunction<- function(x) { ## Optimization function for ALPHA, BETA and GAMMA for a specific accuracy value.
winterSolution <- HoltWinters(dataProcessed(), alpha=x[1], beta=x[2], gamma=x[3],seasonal = input$seasonalMode)#,seasonal = "additive")
#accuracy(dataProcessed(),winterSolution$fitted)[3]#[input$measurement2Optimize]
index <- as.integer(input$measurement2Optimize)
if (index<=6){
accuracy(dataProcessed(),winterSolution$fitted)[as.integer(input$measurement2Optimize)]
}else if (index==8) {
RESIDUALS <- dataProcessed()-winterSolution$fitted[,1]
BIAS <- sum(RESIDUALS)
MAD <- mean(abs(RESIDUALS))
abs(BIAS/MAD)#MINIMIZE THE ABS VALUE OF THE TR
}else {
RESIDUALS <- dataProcessed()-winterSolution$fitted[,1]
abs(sum(RESIDUALS))#MINIMIZE THE ABS VALUE OF THE BIAS-> CLOSER TO 0 BETTER
}
}
optimal <-Inf
optimal_alpha <-Inf
optimal_beta <-Inf
optimal_gamma <-Inf
TOP <- as.integer(input$numberIterationsOptimize)
for(i in 1:TOP){
alpha <- runif(1, 0, 1)
beta <- runif(1, 0, 1)
gamma <- runif(1, 0, 1)
optimalLocal <- suppressWarnings(optim(c(alpha,beta,gamma), optimizationFunction,lower = c(0.0000001,0,0), upper = c(1,1,1)))
# optimalLocal <- optim(c(alpha,beta,gamma), optimizationFunction,lower = c(0.0000001,0,0), upper = c(1,1,1))
if(optimalLocal$value<optimal){
optimal_alpha <-optimalLocal$par[1]
optimal_beta <-optimalLocal$par[2]
optimal_gamma <-optimalLocal$par[3]
optimal <- optimalLocal$value
}
}
cbind(optimal_alpha,optimal_beta,optimal_gamma,optimal)
}else {
return(NULL)
}
})#optimalValue
output$optimalValuesTable <- renderTable({
if(!is.null(optimalValue())){
colnames(optimalValue()) <- c("optimal alpha","optimal beta","optimal gamma","optimal")
optimalValue()
}else {
return(NULL)
}
}) #OPTIMAL TABLE
wintersOptimalModel <- reactive({
if(!is.null(optimalValue())){
HoltWinters(dataProcessed(), alpha=optimalValue()[1], beta=optimalValue()[2], gamma=optimalValue()[3],seasonal = input$seasonalMode)
}else {
return(NULL)
}
})
output$accuracyOptimal <- renderTable({
if(!is.null(optimalValue()) && !is.null(dataProcessed())){
winterSolution <- HoltWinters(dataProcessed(), alpha=optimalValue()[1], beta=optimalValue()[2], gamma=optimalValue()[3],seasonal = input$seasonalMode)
RESIDUALS <- dataProcessed()-wintersOptimalModel()$fitted[,1]
BIAS <- sum(RESIDUALS)
TABLE <- accuracy(dataProcessed(),wintersOptimalModel()$fitted)
MAD <- mean(abs(RESIDUALS))
TR <- BIAS/TABLE[1,3]
cbind(TABLE,BIAS,TR)
}else {
return(NULL)
}
}) #ACCURACY OPTIMAL TABLE
output$wintersPlotOptimal <- renderPlot({
if(!is.null(optimalValue())){
#winters
plot(wintersOptimalModel())
}else {
return(NULL)
}
})
wintersPredictionOptimal <- reactive({
if(!is.null(wintersOptimalModel())){
exec <- paste("predict(wintersOptimalModel(),n.ahead=",input$predictionPeriodTextOptimal,",level=0.95)",sep="")
eval(parse(text=exec))
}else {
return(NULL)
}
})
output$wintersPredictionPlotOptimal <- renderPlot({
if(!is.null(wintersPredictionOptimal())){
plot(wintersOptimalModel(), wintersPredictionOptimal())
}else {
return(NULL)
}
})
})
|
209971da90c2da3d740dd45ce4508eff37beec28 | b118030d46abd023fd3a4888cdadb72093b3dcbb | /R/Analysis.R | 3e897f7e7c17907fcb0ea82276ef350ad901d2d3 | [] | no_license | LasseHjort/LossOfLifetimeEstimation | 1b5a2c81b464fe42fd9da0d113f848ddaea262ca | 1aa854960cfe124dd9a9af72d1ca5c06183b3218 | refs/heads/master | 2020-04-11T22:30:10.804676 | 2018-12-20T14:48:38 | 2018-12-20T14:48:38 | 162,138,269 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 34,689 | r | Analysis.R | ################# Lymphoma registry data ####################
get.knots <- function(data, k){
quantile(data$FU_years[data$status ==1], seq(0, 1, length.out = k))
}
##Fit parametric relative survival models
#DLBCL
knots <- log(get.knots(DLBCL, 5))
fit_DLBCL <- GenFlexCureModel(Surv(FU_years, status) ~ -1, data = DLBCL, bhazard = "exp_haz",
smooth.formula = ~cb(x = log(FU_years), knots = knots), ini.types = "cure")
knots <- log(get.knots(DLBCL, 6))
fit_DLBCL2 <- stpm2(Surv(FU_years, status) ~ -1, data = DLBCL, bhazard = DLBCL$exp_haz,
smooth.formula = ~cb(x = log(FU_years), knots = knots))
knots <- log(sort(c(get.knots(DLBCL, 6), 10)))
fit_DLBCL3 <- stpm2(Surv(FU_years, status) ~ -1, data = DLBCL, bhazard = DLBCL$exp_haz,
smooth.formula = ~cbc(x = log(FU_years), knots = knots))
#FL
knots <- log(get.knots(FL, 5))
fit_FL <- GenFlexCureModel(Surv(FU_years, status) ~ -1, data = FL, bhazard = "exp_haz",
smooth.formula = ~cb(x = log(FU_years), knots = knots), ini.types = "cure")
knots <- log(get.knots(FL, 6))
fit_FL2 <- stpm2(Surv(FU_years, status) ~ -1, data = FL, bhazard = FL$exp_haz,
smooth.formula = ~cb(x = log(FU_years), knots = knots))
knots <- log(sort(c(get.knots(FL, 6), 10)))
fit_FL3 <- stpm2(Surv(FU_years, status) ~ -1, data = FL, bhazard = FL$exp_haz,
smooth.formula = ~cbc(x = log(FU_years), knots = knots))
#ML
knots <- log(get.knots(ML, 5))
fit_ML <- GenFlexCureModel(Surv(FU_years, status) ~ -1, data = ML, bhazard = "exp_haz",
smooth.formula = ~cb(x = log(FU_years), knots = knots), ini.types = "cure")
knots <- log(get.knots(ML, 6))
fit_ML2 <- stpm2(Surv(FU_years, status) ~ -1, data = ML, bhazard = ML$exp_haz,
smooth.formula = ~cb(x = log(FU_years), knots = knots))
knots <- log(sort(c(get.knots(ML, 6), 10)))
fit_ML3 <- stpm2(Surv(FU_years, status) ~ -1, data = ML, bhazard = ML$exp_haz,
smooth.formula = ~cbc(x = log(FU_years), knots = knots))
# Old implementation of the relative survival models
# fit_ML <- FlexCureModel(Surv(FU_years, status) ~ 1, data = ML, bhazard = "exp_haz",
# n.knots = 5)
# fit_ML2 <- stpm2(Surv(FU_years, status) ~ 1, data = ML, bhazard = ML$exp_haz, df = 5)
# knots_ML <- log(sort(c(quantile(ML$FU_years[ML$status ==1], c(0, 1, 0.2, 0.4, 0.6, 0.8)), 10)))
# fit_ML3 <- stpm2(Surv(FU_years, status) ~ -1, data = ML, bhazard = ML$exp_haz,
# smooth.formula = ~basis_cure(knots = knots_ML, x = log(FU_years)))
#Assemble parametric models
fits <- list(DLBCL = list(fit_DLBCL, fit_DLBCL2, fit_DLBCL3, data = DLBCL),
FL = list(fit_FL, fit_FL2, fit_FL3, data = FL),
ML = list(fit_ML, fit_ML2, fit_ML3, data = ML))
#Calculate non-parametric and parametric relative survival
plot_data <- lapply(fits, function(fit){
rsfit <- rs.surv(Surv(FU, status) ~ 1 + ratetable(age = age, sex = sex, year = diag_date),
data = fit$data, ratetable = survexp.dk, method = "ederer2")
rsfit$time <- rsfit$time / ayear
D <- data.frame(RS = rsfit$surv, time = rsfit$time, ci.lower = rsfit$lower, ci.upper = rsfit$upper)
pred1 <- predict(fit[[1]], time = D$time, var.type = "n")[[1]]
pred2 <- data.frame(Estimate = predict(fit[[2]], newdata = data.frame(FU_years = D$time)))
pred3 <- data.frame(Estimate = predict(fit[[3]], newdata = data.frame(FU_years = D$time)))
D_para <- rbind(pred1, pred2, pred3)
D_para$time <- rep(D$time, 3)
D_para$model <- rep(c("FMC", "NRS", "ARS"), each = nrow(D))
list(D = D, D_para = D_para)
})
#Plot relative survival
para_plot_data <- do.call(rbind, lapply(plot_data, function(x) x$D_para))
para_plot_data$disease <- rep(c("DLBCL", "FL", "ML"), sapply(plot_data, function(x) nrow(x$D_para)))
npara_plot_data <- do.call(rbind, lapply(plot_data, function(x) x$D))
npara_plot_data$disease <- rep(c("DLBCL", "FL", "ML"), sapply(plot_data, function(x) nrow(x$D)))
colnames(npara_plot_data)[1] <- "Estimate"
npara_plot_data$model <- "Ederer II estimate"
p <- ggplot(data = npara_plot_data, aes(x = time, y = Estimate, group = model, colour = model)) + geom_step() +
facet_grid(.~disease) + geom_step(data = npara_plot_data, aes(x = time, y = ci.lower), linetype = "dashed") +
geom_step(data = npara_plot_data, aes(x = time, y = ci.upper), linetype = "dashed") +
geom_line(data = para_plot_data, aes(x = time, y = Estimate), size = 1) + ylim(c(0, 1.002)) +
scale_colour_manual(values = c("Ederer II estimate" = "black", "FMC" = "brown2",
"NRS" = "darkolivegreen3", "ARS" = "deepskyblue3"),
breaks = c("Ederer II estimate", "NRS",
"ARS", "FMC")) +
theme_bw() + theme(legend.position = "bottom",
legend.title = element_blank(),
legend.text=element_text(size=15),
axis.title=element_text(size=17),
strip.text = element_text(size=15),
axis.text = element_text(size = 13)) +
xlab("Follow-up time (years)") +
ylab("Relative survival")
pdf(file.path(fig.out, "RSCombined.pdf"), width = 9.5, height = 5)
print(p)
dev.off()
png(file.path(fig.out, "RSCombined.png"), res = 200, width = 2300, height = 2300 * 5 / 11)
print(p)
dev.off()
#Function for calculating mean age
get_ages <- function(data){
bdr <- floor(range(data$age_years))
mean_age <- floor(median(data$age_years))
paste0(mean_age, "(", bdr[1], "-", bdr[2], ")")
}
#Create table with age, relative survival, and loss of lifetime estimates
M <- matrix(nrow = 7, ncol = 5)
M[,1] <- c("Median age (range)", "5-year RS (95% CI)", NA, NA, "Loss of lifetime (95% CI)", NA, NA)
M[,2] <- c(NA, rep(c("NRS", "ARS", "FMC"), 2))
M[1, 2:5] <- c(NA, get_ages(DLBCL), get_ages(FL), get_ages(ML))
predict(fit_DLBCL, time = 5)
predict(fit_DLBCL2, newdata = data.frame(FU_years = 5))
get_rs <- function(fit, time = 5){
if("cuRe" %in% class(fit)){
pred <- round(predict(fit, time = 5)[[1]], 2)
paste0(pred$Estimate, "(", pred$lower, "-", pred$upper, ")")
}else{
pred <- round(predict(fit, newdata = data.frame(FU_years = 5), se.fit = T), 2)
paste0(pred$Estimate, "(", pred$lower, "-", pred$upper, ")")
}
}
M[2, 3:5] <- c(get_rs(fit_DLBCL2), get_rs(fit_FL2), get_rs(fit_ML2))
M[3, 3:5] <- c(get_rs(fit_DLBCL3), get_rs(fit_FL3), get_rs(fit_ML3))
M[4, 3:5] <- c(get_rs(fit_DLBCL), get_rs(fit_FL), get_rs(fit_ML))
#Function for calculating loss of lifetime estimates
get_LL <- function(fit){
LL_res <- calc.LL(fit, time = 0, rmap = list(year = diag_date), smooth.exp = FALSE)
LL_res <- sprintf("%.2f", LL_res[[1]])
paste0(LL_res[1], "(", LL_res[2], "-", LL_res[3], ")")
}
M[5, 3:5] <- c(get_LL(fit_DLBCL2), get_LL(fit_FL2), get_LL(fit_ML2))
M[6, 3:5] <- c(get_LL(fit_DLBCL3), get_LL(fit_FL3), get_LL(fit_ML3))
M[7, 3:5] <- c(get_LL(fit_DLBCL), get_LL(fit_FL), get_LL(fit_ML))
M <- as.data.frame(M)
colnames(M) <- c("", "Model","DLBCL", "FL", "ML")
if(thesis){
print(xtable(M, caption = "Median age, 5-year relative survival (RS), and loss of lifetime estimates at time zero
in Danish diffuse large B-cell lymphoma (DLBCL), follicular lymphoma (FL),
and mantle cell lymphoma (ML) patients.",
label = "tab:sum", align = "cllccc"), include.rownames = F,
file = file.path(tab.out, "SummaryMeasureTable.tex"), scalebox = 0.85)
}else{
print(xtable(M, caption = "Median age, 5-year relative survival (RS), and loss of lifetime estimates at time zero
in Danish diffuse large B-cell lymphoma (DLBCL), follicular lymphoma (FL),
and mantle cell lymphoma (ML) patients.",
label = "tab:sum", align = "cllccc"), include.rownames = F,
file = file.path(tab.out, "SummaryMeasureTable.tex"))
}
#Choose time points for the loss of lifetime estimates
times <- seq(0, 10, length.out = 50)
#Calculate loss of lifetime for the three diseases using the three models
#DLBCL
res_DLBCL <- calc.LL(fit_DLBCL, time = times, rmap = list(year = diag_date), smooth.exp = F)[[1]]
res_DLBCL2 <- calc.LL(fit_DLBCL2, time = times, rmap = list(year = diag_date), smooth.exp = F)[[1]]
res_DLBCL3 <- calc.LL(fit_DLBCL3, time = times, rmap = list(year = diag_date), smooth.exp = F)[[1]]
#FL
res_FL <- calc.LL(fit_FL, time = times, rmap = list(year = diag_date), smooth.exp = F)[[1]]
res_FL2 <- calc.LL(fit_FL2, time = times, rmap = list(year = diag_date), smooth.exp = F)[[1]]
res_FL3 <- calc.LL(fit_FL3, time = times, rmap = list(year = diag_date), smooth.exp = F)[[1]]
#ML
res_ML <- calc.LL(fit_ML, time = times, rmap = list(year = diag_date), smooth.exp = F)[[1]]
res_ML2 <- calc.LL(fit_ML2, time = times, rmap = list(year = diag_date), smooth.exp = F)[[1]]
res_ML3 <- calc.LL(fit_ML3, time = times, rmap = list(year = diag_date), smooth.exp = F)[[1]]
#Combine results into a single data frame for plotting
res_all <- rbind(res_DLBCL, res_FL, res_ML, res_DLBCL2, res_FL2, res_ML2, res_DLBCL3, res_FL3, res_ML3)
res_all$disease <- rep(rep(c("DLBCL", "FL", "ML"), each = length(times)), 3)
res_all$Time <- rep(times, 9)
levs <- c("FMC", "NRS", "ARS")
res_all$model <- factor(rep(levs, each = length(times) * 3), levels = levs[c(2, 3, 1)])
#Plot the loss of lifetime curves
pdf(file.path(fig.out, "LOLLymphoma.pdf"), width = 10, height = 5.3)
ggplot(res_all, aes(x = Time, y = Estimate, group = model, linetype = model)) + geom_line(size = 1) +
facet_grid(.~disease) + xlab("Follow-up time (years)") + ylab("Loss of lifetime (years)") +
scale_x_continuous(breaks = seq(0, 12, by = 3)) +
geom_hline(yintercept = 0, linetype = "dashed") + theme_bw() +
theme(legend.position = "bottom", legend.title = element_blank(),
legend.key=element_rect(fill=NA),
legend.text=element_text(size=15),
axis.title=element_text(size=17),
strip.text = element_text(size=15),
axis.text = element_text(size = 14),
legend.key.size = unit(2,"line"))# +
#scale_colour_manual(values = c("FMC" = "brown2",
# "NRS" = "darkolivegreen3",
# "ARS" = "deepskyblue3"))
dev.off()
################# Cancer registry data ####################
#The analyses are restricted to mean residual lifetime since the expected
#survival is the same regardless of length of follow-up
#Compute the mean residual lifetime estimates using KM
gaussxw <- statmod::gauss.quad(100)
#The function allows for a flexible parametric version also and computes the integral
#using the rectangle rule
calc_LOLKM <- function(sfit, expected, time, tau, type = "KM"){
if(type == "KM"){
surv_fun <- function(t){
s <- summary(sfit, t)
names(s$surv) <- s$time
a <- s$surv[as.character(t)]
names(a) <- NULL
a
}
}else{
surv_fun <- function(t) exp(-exp(basis(knots = sfit$knots, x = log(t)) %*% sfit$coefficients))
}
exp_fun <- function(t){
s <- summary(expected, t)
names(s$surv) <- s$time
a <- s$surv[as.character(t)]
names(a) <- NULL
a
}
scale <- (tau - time) / 2
scale2 <- (tau + time) / 2
eval_gen_t <- exp_fun(time)
eval_pop_t <- surv_fun(time)
eval <- rep(NA, length(time))
for(i in 1:length(time)){
points <- scale[i] * gaussxw$nodes + scale2[i]
eval_gen <- exp_fun(points)
eval_pop <- surv_fun(points)
inner_int <- eval_gen / eval_gen_t[i] - eval_pop / eval_pop_t[i]
eval[i] <- sum(gaussxw$weights * inner_int)
}
scale * eval
# t_new <- sort(unique(c(time, seq(0, tau, length.out = 5000))), decreasing = T)
# df_time <- -diff(t_new)
# mid_points <- t_new[-length(t_new)] + diff(t_new) / 2
# vals_pop <- c(0, cumsum(surv_fun(mid_points) * df_time))
# vals_pop <- rev(vals_pop[t_new %in% time])
# vals_exp <- c(0, cumsum(exp_fun(mid_points) * df_time))
# vals_exp <- rev(vals_exp[t_new %in% time])
# vals_exp / exp_fun(time) - vals_pop / surv_fun(time)
}
#Set clinical subgroups and relevant time points
diseases <- levels(CR_tumor$disease)
ages <- levels(CR_tumor$age_group)
time <- seq(0, 15, length.out = 100)
#Compute the loss of lifetime function
L <- lapply(diseases, function(disease){
LOLs <- lapply(ages, function(age_group){
data_new <- CR_tumor[CR_tumor$disease == disease & CR_tumor$age_group == age_group, ]
sfit <- survfit(Surv(FU, status) ~ 1, data = data_new)
#sfit <- flexsurvspline(Surv(FU, status) ~ 1, data = data_new, k = 6)
expected <- survexp( ~ 1, rmap = list(age = age, sex = sex, year = diag_date),
ratetable = survexp.dk, scale = ayear, data = data_new,
time = seq(0, 70, length.out = 2000) * ayear)
tau <- max(data_new$FU)
if(tau > 40) tau <- 40
#print(c(tau, summary(sfit, tau)$surv))
calc_LOLKM(sfit, expected, time = time, tau = tau)
})
names(LOLs) <- ages
LOLs
})
names(L) <- diseases
#Create new dataset with limited follow-up period
#Patients are censored at 16 years, i.e., the a new follow-up and status variable .
CR_tumor2 <- CR_tumor
CR_tumor2$last_followup <- CR_tumor2$D_STATDATO
CR_tumor2$last_followup[CR_tumor2$D_STATDATO > "1976-01-01"] <- "1976-01-01"
CR_tumor2$FU_days <- as.numeric(CR_tumor2$last_followup - CR_tumor2$diag_date)
CR_tumor2$FU_years <- CR_tumor2$FU_days / ayear
CR_tumor2$status[CR_tumor2$D_STATDATO > "1976-01-01"] <- 0
CR_tumor2$exp_haz <- general.haz(time = "FU_days", age = "age", sex = "sex", year = "diag_date",
data = CR_tumor2, ratetable = survexp.dk)
#Fit models for all diseases and age groups and compute mean residual lifetime
L_fit <- lapply(diseases, function(disease){
cat(disease, "\n")
LOLs <- lapply(ages, function(age_group){
cat(age_group, "\n")
data_new <- CR_tumor2[CR_tumor2$disease == disease & CR_tumor2$age_group == age_group, ]
#Fit model by Nelson et al. 2007
knots <- log(get.knots(data_new, 6))
fit_nelson <- stpm2(Surv(FU_years, status) ~ -1, data = data_new, bhazard = data_new$exp_haz,
smooth.formula = ~cb(x = log(FU_years), knots = knots))
#Fit models by Andersson et al. 2011
add.knot <- 10
knots <- sort(c(knots, log(add.knot)))
#knots_andersson1 <- log(sort(c(quantile(data_new$FU_years[data_new$status ==1],
# c(0, 0.2, 0.4, 0.6, 0.8, 1)), add.knot)))
fit_andersson1 <- stpm2(Surv(FU_years, status) ~ -1, data = data_new, bhazard = data_new$exp_haz,
smooth.formula = ~cbc(x = log(FU_years), knots = knots))
last.knot <- 80
knots[length(knots)] <- log(last.knot)
#knots_andersson2 <- log(sort(c(quantile(data_new$FU_years[data_new$status ==1],
# c(0, 0.2, 0.4, 0.6, 0.8)), add.knots)))
fit_andersson2 <- stpm2(Surv(FU_years, status) ~ -1, data = data_new, bhazard = data_new$exp_haz,
smooth.formula = ~cbc(x = log(FU_years), knots = knots))
#Fit flexible mixture cure models
# fit_flex_mix1 <- FlexCureModel(Surv(FU_years, status) ~ 1, data = data_new,
# bhazard = "exp_haz", n.knots = 5,
# covariance = F, verbose = F)
# fit_flex_mix2 <- FlexCureModel(Surv(FU_years, status) ~ 1, data = data_new,
# bhazard = "exp_haz",
# knots = log(c(min(data_new$FU_years), 0.5, 1, 2, 5)),
# covariance = F, verbose = F)
knots <- log(get.knots(data_new, 5))
fit_flex_mix1 <- GenFlexCureModel(Surv(FU_years, status) ~ -1, data = data_new, bhazard = "exp_haz",
smooth.formula = ~cb(x = log(FU_years), knots = knots),
verbose = F, covariance = F, ini.types = "cure")
min.time <- min(data_new$FU_years[data_new$status == 1])
knots <- log(c(min.time, 0.5, 1, 2, 5))
fit_flex_mix2 <- GenFlexCureModel(Surv(FU_years, status) ~ -1, data = data_new, bhazard = "exp_haz",
smooth.formula = ~cb(x = log(FU_years), knots = knots),
verbose = F, covariance = F, ini.types = "cure")
#Plot the models
# plot(fit_nelson, newdata = data.frame(age = 50), ylim = c(0, 1), ci = F, rug = F)
# plot(fit_andersson1, newdata = data.frame(age = 50), ylim = c(0, 1), ci = F, rug = F, add = T, line.col = 2)
# plot(fit_andersson2, newdata = data.frame(age = 50), ylim = c(0, 1), ci = F, rug = F, add = T, line.col = 3)
# plot(fit_flex_mix1, time = seq(0, 15, length.out = 100), add = T, col = 4, ci = F)
# plot(fit_flex_mix2, time = seq(0, 15, length.out = 100), add = T, col = 5, ci = F)
tau <- 40
#Calculate loss of lifetime esimates
LOL1 <- calc.LL(fit_nelson, time = time, var.type = "n", tau = tau, rmap = list(year = diag_date), smooth.exp = F)[[1]]
LOL2 <- calc.LL(fit_andersson1, time = time, var.type = "n", tau = tau, rmap = list(year = diag_date), smooth.exp = F)[[1]]
LOL3 <- calc.LL(fit_andersson2, time = time, var.type = "n", tau = tau, rmap = list(year = diag_date), smooth.exp = F)[[1]]
LOL4 <- calc.LL(fit_flex_mix1, time = time, var.type = "n", tau = tau, rmap = list(year = diag_date), smooth.exp = F)[[1]]
LOL5 <- calc.LL(fit_flex_mix2, time = time, var.type = "n", tau = tau, rmap = list(year = diag_date), smooth.exp = F)[[1]]
cbind(LOL1, LOL2, LOL3, LOL4, LOL5)
})
names(LOLs) <- ages
LOLs
})
names(L_fit) <- diseases
#Assemble mean residual lifetime estimates and calculate biases
models <- LETTERS[1:5]
plot_data <- lapply(diseases, function(disease){
biases <- lapply(ages, function(age_group){
bias_matrix <- L_fit[[disease]][[age_group]] - L[[disease]][[age_group]]
data.frame(bias = unlist(bias_matrix), time = rep(time, length(models)),
Model = rep(models, each = nrow(bias_matrix)), age_group = age_group)
})
biases <- do.call(rbind, biases)
biases$disease <- disease
biases
})
#Assemble data in a data frame for plotting
plot_data <- do.call(rbind, plot_data)
#Plot LL biases
p <- ggplot(plot_data, aes(x = time, y = bias, colour = Model, group = Model)) + geom_line() +
facet_grid(age_group ~ disease) + geom_hline(yintercept = 0, linetype = "dashed") +
ylab("Loss of lifetime bias, D(t)") + xlab("Years survived") + theme_bw() +
coord_cartesian(ylim = c(-3, 2)) +
scale_color_manual(values = cbPalette) +
theme(legend.position = "bottom",
legend.text=element_text(size=15),
legend.title=element_text(size=15),
axis.title=element_text(size=17),
strip.text = element_text(size=15),
axis.text = element_text(size = 13))
pdf(file.path(fig.out, "LOLBiasOldData.pdf"), width = 10.5, height = 12.5)
print(p)
dev.off()
#Create plots for PhD-defence
for(age in ages){
p <- ggplot(plot_data[plot_data$age_group == age,], aes(x = time, y = bias, colour = Model, group = Model)) +
geom_line() +
facet_grid(age_group ~ disease) + geom_hline(yintercept = 0, linetype = "dashed") +
ylab("Loss of lifetime bias, D(t)") + xlab("Years survived") + theme_bw() +
coord_cartesian(ylim = c(-3, 2)) +
scale_color_manual(values = c(cbPalette[1:4], "black")) +
theme(legend.position = "bottom",
legend.text=element_text(size=15),
legend.title=element_text(size=15),
axis.title=element_text(size=17),
strip.text = element_text(size=15),
axis.text = element_text(size = 13))
file.out <- "C:/Users/sw1y/Dropbox/PhDDefence/"
png(file.path(file.out, paste0("LOLBiasOldData_", age, ".png")), res = 200, width = 2800, height = 1000)
print(p)
dev.off()
}
old_pars <- par("mai")
png(file.path(file.out, "SurvOldData.png"), res = 200, width = 1600, height = 1300)
par(mfrow = c(2,2), mai = c(0.7, 0.7, 0.3, 0.1))
for(disease in diseases){
sfit <- survfit(Surv(FU, status) ~ 1, data = CR_tumor[CR_tumor$disease == disease,])
file.out <- "C:/Users/sw1y/Dropbox/PhDDefence/"
plot(sfit, xlab = "Years since diagnosis", ylab = "Survival probability")
title(main = disease, line = 0.5)
}
dev.off()
#Compute the LL estimates and assemble for plotting
models <- LETTERS[1:5]
plot_data <- lapply(diseases, function(disease){
biases <- lapply(ages, function(age_group){
all_data <- cbind(L_fit[[disease]][[age_group]], L[[disease]][[age_group]])
data.frame(LOL = unlist(all_data), time = rep(time, length(models) + 1),
Model = rep(c(models, "True LOL"), each = nrow(all_data)),
age_group = age_group)
})
biases <- do.call(rbind, biases)
biases$disease <- disease
biases
})
plot_data <- do.call(rbind, plot_data)
#Plot LL estimates
pdf(file.path(fig.out, "LOLOldData.pdf"), width = 12, height = 12)
ggplot(plot_data, aes(x = time, y = LOL, colour = Model, group = Model)) + geom_line() +
facet_grid(age_group ~ disease) + geom_hline(yintercept = 0, linetype = "dashed") +
ylab("Bias") + xlab("Follow-up time (years)") + theme_bw() +
scale_color_manual(values = cbPalette) +
theme(legend.position = "bottom",
legend.text=element_text(size=15),
axis.title=element_text(size=17),
strip.text = element_text(size=15),
axis.text = element_text(size = 13), legend.title = element_blank())
dev.off()
#Calculate predicted survival curves all models in each age group of each disease
for(disease in diseases){
cat(disease, "\n")
plot_data <- lapply(ages, function(age_group){
cat(age_group, "\n")
#Full follow-up Kaplan-Meier
data_new <- CR_tumor[CR_tumor$disease == disease & CR_tumor$age_group == age_group, ]
sfit <- survfit(Surv(FU, status)~ 1, data = data_new)
#Time points
time <- seq(0, max(data_new$FU), length.out = 1000)
#Restricted follow-up dataset
data_new <- CR_tumor2[CR_tumor2$disease == disease & CR_tumor2$age_group == age_group, ]
#Fit model by Nelson et al. 2007
knots <- log(get.knots(data_new, 6))
fit_nelson <- stpm2(Surv(FU_years, status) ~ -1, data = data_new, bhazard = data_new$exp_haz,
smooth.formula = ~cb(x = log(FU_years), knots = knots))
#Fit models by Andersson et al. 2011
add.knot <- 10
knots <- sort(c(knots, log(add.knot)))
fit_andersson1 <- stpm2(Surv(FU_years, status) ~ -1, data = data_new, bhazard = data_new$exp_haz,
smooth.formula = ~cbc(x = log(FU_years), knots = knots))
last.knot <- 80
knots[length(knots)] <- log(last.knot)
fit_andersson2 <- stpm2(Surv(FU_years, status) ~ -1, data = data_new, bhazard = data_new$exp_haz,
smooth.formula = ~cbc(x = log(FU_years), knots = knots))
#Fit flexible mixture cure models
knots <- log(get.knots(data_new, 5))
fit_flex_mix1 <- GenFlexCureModel(Surv(FU_years, status) ~ -1, data = data_new, bhazard = "exp_haz",
smooth.formula = ~cb(x = log(FU_years), knots = knots),
verbose = F, covariance = F, ini.types = "cure")
min.time <- min(data_new$FU_years[data_new$status == 1])
knots <- log(c(min.time, 0.5, 1, 2, 5))
fit_flex_mix2 <- GenFlexCureModel(Surv(FU_years, status) ~ -1, data = data_new, bhazard = "exp_haz",
smooth.formula = ~cb(x = log(FU_years), knots = knots),
verbose = F, covariance = F, ini.types = "cure")
#Expected survival
expected <- survexp( ~ 1, rmap = list(age = age, sex = sex, year = diag_date),
ratetable = survexp.dk, scale = ayear, data = data_new,
times = seq(0, 70, length.out = 2000) * ayear)
#Predict survival probabilities
model1 <- model2 <- model3 <- model4 <- model5 <- rep(1, length(time))
model1[-1] <- predict(fit_nelson, newdata = data.frame(FU_years = time[time != 0]))
model2[-1] <- predict(fit_andersson1, newdata = data.frame(FU_years = time[time != 0]))
model3[-1] <- predict(fit_andersson2, newdata = data.frame(FU_years = time[time != 0]))
model4[-1] <- predict(fit_flex_mix1, time = time[time != 0], var.type = "n")[[1]]$Estimate
model5[-1] <- predict(fit_flex_mix2, time = time[time != 0], var.type = "n")[[1]]$Estimate
#Merge to data frame
models <- c(model1, model2, model3, model4, model5) * rep(summary(expected, time)$surv, 5)
D <- data.frame(Est = models, time = rep(time, 5), Model = rep(LETTERS[1:5], each = length(time)))
D <- rbind(D, data.frame(Est = sfit$surv, time = sfit$time, Model = "Full KM"))
#D$Est[D$time == 0] <- 1
D$age_group <- age_group
D
})
#Assemble data and plot survival curves
plot_data <- do.call(rbind, plot_data)
p <- ggplot(plot_data, aes(x = time, y = Est, colour = Model, group = Model)) + geom_line() +
facet_wrap( ~ age_group, ncol = 2) + ylab("Survival probability") +
xlab("Follow-up time (years)") + ggtitle(disease) + theme_bw() +
scale_color_manual(values = cbPalette) + scale_x_continuous(breaks = seq(0, 50, by = 10)) +
theme(legend.position = "bottom", plot.title = element_text(hjust = 0.5, size = 18),
legend.text=element_text(size=15),
legend.title=element_text(size=15),
axis.title=element_text(size=17),
strip.text = element_text(size=15),
axis.text = element_text(size = 13))
pdf(file.path(fig.out, paste0("SC_", gsub(" ", "_", disease), ".pdf")), width = 10, height = 7)
print(p)
dev.off()
png(file.path(fig.out, paste0("SC_", gsub(" ", "_", disease), ".png")),
res = 200, width = 2000, height = 2000 * 6 / 9)
print(p)
dev.off()
}
################# Lymphoma registry revisited ####################
#Flexible mixture cure model
probs <- c(0, 1/3, 2/3, 1)
#
# knots_DLBCL <- quantile(DLBCL$age_years, probs = probs)
# bs_DLBCL <- cuRe:::cb(x = DLBCL$age_years, knots = knots_DLBCL, intercept = FALSE, ortho = T)
# colnames(bs_DLBCL) <- paste0("bs", 1:(length(knots_DLBCL) - 1))
# DLBCL2 <- cbind(DLBCL, bs_DLBCL)
#
# fit_DLBCL_time <- cuRe:::FlexCureModel(Surv(FU_years, status) ~ bs1 + bs2 + bs3, data = DLBCL2,
# smooth.formula = ~1,
# bhazard = "exp_haz",
# n.knots = 5, n.knots.time = list(bs1 = 2, bs2 = 2, bs3 = 2))
knots.DLBCL <- log(get.knots(DLBCL, k = 5))
knots.time.DLBCL <- log(get.knots(DLBCL, k = 2))
knots.age.DLBCL <- quantile(DLBCL$age_years, probs = probs)
fit_DLBCL_time <- GenFlexCureModel(Surv(FU_years, status) ~ -1,
data = DLBCL, bhazard = "exp_haz",
smooth.formula = ~ cb(x = log(FU_years), knots = knots.DLBCL),
cr.formula = ~cb(x = age_years, knots = knots.age.DLBCL, intercept = F),
tvc.formula = ~cb(log(FU_years), knots = knots.time.DLBCL):
cb(x = age_years, knots = knots.age.DLBCL, intercept = F),
ini.types = "cure")
fit_DLBCL_time$knots_age <- knots.age.DLBCL
# knots_FL <- quantile(FL$age_years, probs = probs)
# #knots_FL <- quantile(FL$age_years, probs = c(0, 0.333, 0.666, 1))
# bs_FL <- cuRe:::basis(x = FL$age_years, knots = knots_FL, intercept = FALSE, ortho = T)
# colnames(bs_FL) <- paste0("bs", 1:(length(knots_FL) - 1))
# FL2 <- cbind(FL, bs_FL)
#
# fit_FL_time <- FlexCureModel(Surv(FU_years, status) ~ bs1 + bs2 + bs3, data = FL2,
# smooth.formula = ~1,
# bhazard = "exp_haz",
# n.knots = 5, n.knots.time = list(bs1 = 2, bs2 = 2, bs3 = 2))
knots.FL <- log(get.knots(FL, k = 5))
knots.time.FL <- log(get.knots(FL, k = 2))
knots.age.FL <- quantile(FL$age_years, probs = probs)
fit_FL_time <- GenFlexCureModel(Surv(FU_years, status) ~ -1,
data = FL, bhazard = "exp_haz",
smooth.formula = ~ cb(x = log(FU_years), knots = knots.FL, ortho = F),
cr.formula = ~cb(x = age_years, knots = knots.age.FL, intercept = F, ortho = F),
tvc.formula = ~cb(log(FU_years), knots = knots.time.FL, ortho = F):
cb(x = age_years, knots = knots.age.FL, intercept = F, ortho = F),
ini.types = "cure")
fit_FL_time$covariance
fit_FL_time$knots_age <- knots.age.FL
# knots_ML <- quantile(ML$age_years, probs = probs)
# bs_ML <- cuRe:::basis(x = ML$age_years, knots = knots_ML, intercept = FALSE, ortho = T)
# colnames(bs_ML) <- paste0("bs", 1:(length(knots_ML) - 1))
# ML2 <- cbind(ML, bs_ML)
# fit_ML_time <- FlexCureModel(Surv(FU_years, status) ~ bs1 + bs2 + bs3, data = ML2,
# smooth.formula = ~1,
# bhazard = "exp_haz",
# n.knots = 5,
# n.knots.time = list(bs1 = 2, bs2 = 2, bs3 = 2))
knots.ML <- log(get.knots(ML, k = 5))
knots.time.ML <- log(get.knots(ML, k = 2))
knots.age.ML <- quantile(ML$age_years, probs = probs)
fit_ML_time <- GenFlexCureModel(Surv(FU_years, status) ~ -1,
data = ML, bhazard = "exp_haz",
smooth.formula = ~ cb(x = log(FU_years), knots = knots.ML),
cr.formula = ~cb(x = age_years, knots = knots.age.ML, intercept = F),
tvc.formula = ~cb(log(FU_years), knots = knots.time.ML):
cb(x = age_years, knots = knots.age.ML, intercept = F),
ini.types = "cure")
fit_ML_time$covariance
fit_ML_time$knots_age <- knots.age.ML
#Relative survival model
knots.DLBCL <- log(get.knots(DLBCL, k = 6))
knots.time.DLBCL <- log(get.knots(DLBCL, k = 3))
fit_DLBCL_time2 <- stpm2(Surv(FU_years, status) ~ -1,
data = DLBCL, bhazard = DLBCL$exp_haz,
smooth.formula = ~ cb(x = log(FU_years), knots = knots.DLBCL),
tvc.formula = ~cb(x = age_years, knots = knots.age.DLBCL, intercept = F):
cb(x = log(FU_years), knots = knots.time.DLBCL))
knots.FL <- log(get.knots(FL, k = 6))
knots.time.FL <- log(get.knots(FL, k = 3))
fit_FL_time2 <- stpm2(Surv(FU_years, status) ~ -1,
data = FL, bhazard = FL$exp_haz,
smooth.formula = ~ cb(x = log(FU_years), knots = knots.FL),
tvc.formula = ~ cb(x = age_years, knots = knots.age.FL, intercept = F) :
cb(x = log(FU_years), knots = knots.time.FL))
knots.ML <- log(get.knots(ML, k = 6))
knots.time.ML <- log(get.knots(ML, k = 3))
fit_ML_time2 <- stpm2(Surv(FU_years, status) ~ -1,
data = ML, bhazard = ML$exp_haz,
smooth.formula = ~ cb(x = log(FU_years), knots = knots.ML),
tvc.formula = ~ cb(x = age_years, knots = knots.age.ML, intercept = F) :
cb(x = log(FU_years), knots = knots.time.ML))
# fit_ML_time2 <- stpm2(Surv(FU_years, status) ~ 1 + ns(age_years, df = 3), data = ML,
# bhazard = ML$exp_haz, df = 5,
# tvc.formula = ~ns(age_years, df = 3):ns(log(FU_years), df = 2))
times <- c(0, 2, 5)
ages <- seq(50, 80, by = 2)
res_time <- lapply(list(fit_DLBCL_time, fit_FL_time, fit_ML_time), function(obj){
lapply(ages, function(age){
# bs <- cuRe:::basis(age, knots = obj[[1]]$knots_age, ortho = TRUE,
# intercept = FALSE, R.inv = attr(obj[[2]], "R.inv"))
# colnames(bs) <- paste0("bs", 1:ncol(bs))
# res <- calc.LL(obj[[1]], time = times, ci = F,
# newdata = data.frame(bs, age = age * ayear,
# sex = "female",
# year = as.Date("2010-01-01")))$Ests[[1]]
calc.LL(obj, time = times, var.type = "n",
newdata = data.frame(age = age * ayear, age_years = age,
sex = "female", year = as.Date("2010-01-01")),
smooth.exp = F)[[1]]
})
})
res_time_nelson <- lapply(list(fit_DLBCL_time2, fit_FL_time2, fit_ML_time2), function(fit){
lapply(ages, function(age){
calc.LL(fit, time = times, var.type = "n",
newdata = data.frame(age_years = age, age = age * ayear,
sex = "female",
year = as.Date("2010-01-01")),
smooth.exp = F)[[1]]
})
})
res_time2 <- lapply(res_time, function(x){
D <- do.call(rbind, x)
D$age <- rep(ages, each = length(times))
D$Time <- rep(times, length(ages))
D
})
res_time2 <- do.call(rbind, res_time2)
res_time2$disease <- rep(c("DLBCL", "FL", "ML"), each = length(ages) * length(times))
res_time2$Time <- factor(res_time2$Time)
res_time_nelson2 <- lapply(res_time_nelson, function(x){
D <- do.call(rbind, x)
D$age <- rep(ages, each = length(times))
D$Time <- rep(times, length(ages))
D
})
res_time_nelson2 <- do.call(rbind, res_time_nelson2)
res_time_nelson2$disease <- rep(c("DLBCL", "FL", "ML"), each = length(ages) * length(times))
res_time_nelson2$Time <- factor(res_time_nelson2$Time)
res_all <- rbind(res_time2, res_time_nelson2)
models <- c("FMC", "NRS")
res_all$Model <- factor(rep(models, c(nrow(res_time2), nrow(res_time_nelson2))), models)
p <- ggplot(res_all, aes(x = age, y = Estimate, linetype = Time, colour = Model)) + geom_line() + facet_grid(.~disease) +
xlab("Age at diagnosis (years)") + ylab("Loss of lifetime (years)") +
theme_bw() +
theme(legend.position = "bottom",
legend.text=element_text(size=15),
legend.title=element_text(size=15),
axis.title=element_text(size=17),
strip.text = element_text(size=15),
axis.text = element_text(size = 14),
legend.key.size = unit(2,"line")) +
scale_color_manual(name = "", values = c("black", "grey"))
pdf(file.path(fig.out, "Time_varyingLOL2.pdf"), width = 10, height = 5.3)
print(p)
dev.off()
|
8d329aea92a99441c61bb9363d45e6798b40b12e | 5e78f5a44207e1fa7e1d3b1cfaf8aa43d2458687 | /Fig. 6 code copy.R | 92e242f46049fa6f52c540bc70ed783653c8c3e1 | [
"MIT"
] | permissive | jiaojiaojing84/Ecology_2477 | bd27282946e7e584b269fda75d9b4eaf88d5d19c | 01469bb2146e234c8e96add46adbdc93719b44bb | refs/heads/master | 2020-03-27T03:58:48.385206 | 2018-08-23T20:27:01 | 2018-08-23T20:27:01 | 145,903,083 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,438 | r | Fig. 6 code copy.R | ##take off exponential term +1 and interference competition
setwd("C:\\Users\\jiaojin1\\Downloads\\PhD work")
library(deSolve)
library(reshape)
library(ggplot2)
library(scales)
library(pheatmap)
################test
#####L is fishing ground while T is MPAs, total area is S
JAP08<-function(t, inits,parameters) {
with(as.list(c(inits, parameters)),{
x<-inits[1:L]
y<-inits[(L+1):(L+T)]
A<-array(NA,dim=c(1,(L+T)))
if(L==1)
{
A[1]<-R-(mu+F)*x[1]-D1*x[1]+D2/2*y[1]+D2/2*y[T]
}
else
{
A[1]<-R-(mu+F)*x[1]-D1*x[1]+D1/2*x[2]+D2/2*y[1]
A[L]<-R-(mu+F)*x[L]-D1*x[L]+D1/2*x[L-1]+D2/2*y[T]
}
if(L-1>=2)
{
for(i in 2:(L-1))
{
A[i]<-R-(mu+F)*x[i]-D1*x[i]+D1/2*(x[i-1]+x[i+1])
}
}
if(T==1)
{
A[(L+1)]<-R-mu*y[1]-D2*y[1]+D1/2*x[L]+D1/2*x[1]
}
else
{
A[(L+1)]<-R-mu*y[1]-D2*y[1]+D2/2*y[2]+D1/2*x[1]
A[(T+L)]<-R-mu*y[T]-D2*y[T]+D2/2*y[T-1]+D1/2*x[L]
}
if(T-1>=2)
{
for(i in (L+2):(T+L-1))
{
A[i]<-R-mu*y[i-L]-D2*y[i-L]+D2/2*(y[i-L-1]+y[i-L+1])
}
}
list(c(A))
})
}
#################
Timesteps=500
times <- seq(0, Timesteps, by = 1)
S=10
inits <- rep(1,S)
r<-c(1,1.5,2)
m<-seq(0,10,0.5)
F=0.25
##h is for MPA size, r is for differential movement
eqn1<-array(NA,dim=c(length(r),length(m),S))
eqn5<-array(NA,dim=c(length(r),length(m),S))
eqn9<-array(NA,dim=c(length(r),length(m),S))
eqn_before<-array(NA,dim=c(length(r),length(m),S))
for(i in 1:length(r))
{
for(j in 1:length(m))
{
for(z in 1:S)
{
parameters <- c(T=1,L=9,R=2,mu=0.5,D1=m[j]*r[i],D2=m[j],F=0.25)
out= ode(y = inits, times = times, func = JAP08, parms = parameters)
eqn1[i,j,z]<-out[Timesteps+1,z+1]
parameters <- c(T=5,L=5,R=2,mu=0.5,D1=m[j]*r[i],D2=m[j],F=0.25)
out= ode(y = inits, times = times, func = JAP08, parms = parameters)
eqn5[i,j,z]<-out[Timesteps+1,z+1]
parameters <- c(T=9,L=1,R=2,mu=0.5,D1=m[j]*r[i],D2=m[j],F=0.25)
out= ode(y = inits, times = times, func = JAP08, parms = parameters)
eqn9[i,j,z]<-out[Timesteps+1,z+1]
parameters_before <- c(T=5,L=5,R=2,mu=0.5,D1=0,D2=0,F=0.25)
out= ode(y = inits, times = times, func = JAP08, parms = parameters)
eqn_before[i,j,z]<-out[Timesteps+1,z+1]
}
}
}
###before MPA
###cell density
den_bef<-eqn_before[1,1,1]
###local effect: =1 since there is no MPA
loc_before=1
###regional abundance:eqnmean_ba[1,1,1]=2.666667
reg_before<-eqn_before[1,1,1]*10
###fishing yield
fis_before<-F*reg_before
bind1<-rbind(eqn5[1,10,]/den_bef,eqn5[2,10,]/den_bef,eqn5[3,10,]/den_bef)
bind<-bind1
tiff("Open_system_Fig.6.tiff", width=5,height=5, units='in',res=600)
par(mfrow=c(1,3))
par(mar=c(12,2,12,2))
###at large MPA size SM=9
#plot(seq(1,10,1),eqn1[1,10,],lwd=2,xlab="",ylab="",ylim=c(min(bind),max(bind)))
#abline(v=9.5,lwd=2,col="red")
#text(3.5,4.5,paste("LE=", round(mean(eqn1[1,10,10])/mean(eqn1[1,10,1:9]),digits=2)))
yvalue<-eqn5[1,10,]/den_bef
plot(seq(1,10,1),eqn5[1,10,]/den_bef,lwd=2,type="p",lty=2,ylim=c(min(bind)-0.03,max(bind)+0.03),xlab="",ylab="",pch=16,col="#31A9B8")
points(seq(1,10,1),eqn5[1,10,]/den_bef,lwd=1,type="p",col="black")
points(1:5,yvalue[1:5],col="red",pch=16,type="p")
abline(v=5.5,lwd=2,lty=5)
text(3.3,1.4,paste("LE=",round(mean(eqn5[1,10,6:10])/mean(eqn5[1,10,1:5]),digits=2)))
yvalue1<-eqn5[2,10,]/den_bef
plot(seq(1,10,1),eqn5[2,10,]/den_bef,lwd=2,type="p",lty=2,ylim=c(min(bind)-0.03,max(bind)+0.03),xlab="",ylab="",pch=16,col="#31A9B8")
points(seq(1,10,1),eqn5[2,10,]/den_bef,lwd=1,type="p",col="black")
points(1:5,yvalue1[1:5],col="red",pch=16,type="p")
abline(v=5.5,lwd=2,lty=5)
text(3.3,1.4,paste("LE=",round(mean(eqn5[2,10,6:10])/mean(eqn5[2,10,1:5]),digits=2)))
yvalue2<-eqn5[3,10,]/den_bef
plot(seq(1,10,1),eqn5[3,10,]/den_bef,lwd=2,type="p",lty=2,ylim=c(min(bind)-0.03,max(bind)+0.03),xlab="",ylab="",pch=16,col="#31A9B8")
points(seq(1,10,1),eqn5[3,10,]/den_bef,lwd=1,type="p",col="black")
points(1:5,yvalue2[1:5],col="red",pch=16,type="p")
abline(v=5.5,lwd=2,lty=5)
text(3.3,1.4,paste("LE=",round(mean(eqn5[3,10,6:10])/mean(eqn5[3,10,1:5]),digits=2)))
dev.off()
|
f47e4b6763b20aa52252d15f10a1498ce5d8faf4 | dd8f1e4f866bb08a99a378324844b2e8bd66984a | /lecture_3/lecture3-script-comments-only.R | 3202e472957a9c5711d01a124a0560c97a8f272e | [] | no_license | kovacskokokornel/Rcoding_CEU | fd40ba7bdae17e61fdba39c2925e2030b24378a9 | 7cb4ee79f9ba6e75fd5f3d971e95e1a1f32e6025 | refs/heads/master | 2020-07-28T04:48:56.438283 | 2019-12-06T17:48:17 | 2019-12-06T17:48:17 | 209,314,244 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,183 | r | lecture3-script-comments-only.R | # Lecture 3 Script
# First choose a new team for next week
# Follows Grolemund and Wickham, chapter 5
# Install the dataset if you don't have it
# install.packages("nycflights13")
library(nycflights13)
flights
View(flights)
# Today, we'll cover
# - filter()
# - arrange()
# - select()
# Next week, we'll cover
# - mutate()
# - summarise()
# - group_by(), which tells the other verbs to use the data by groups
# All take as first argument a data frame (or tibble) and return a data frame (or tibble).
# Together they form the verbs of the tidyverse.
# Class Exercise: For 2 minutes, think about why it is a nice property (and a conscious design choice)
# that all verbs take as a first argument a data frame and return a data frame. Talk with your
# neighbour about this.
# Filtering (choosing) rows with filter()
filter(flights, month==1)
filter(flights, day == 1, month == 2)
# dplyr functions don't change the data frame that you give it. They return a new one.
# Save the filtered data
feb1 <- filter(flights, day == 1, month == 2)
# Assign and print, use (varname <- ...)
feb1 <- filter(flights, day == 1, month == 2)
# Check it really assigned
feb1
# Some notes on comparisons
sqrt(2)^2 == 2
sqrt(4)^2 == 4
# In short, you can't rely on "It works because it works for what I tried".
# For floating point comparisons, use near() to compare numbers
near(sqrt(2)^2, 2)
# Exercise: What counts as near? Find out. Can you change it?
# Multiple constraints
(jan_feb <- filter(flights, month == 1 | month == 2))
(not_jan <- filter(flights, !(month == 1)))
# Class exercise: How do we know these actually worked?
filter(not_jan, month == 1)
unique(not_jan$month)
(jan_to_jun <- filter(flights, month <= 6 ))
(jan_to_jun2 <- filter(flights, month %in% c(1,2,3,4,5,6)))
nrow(jan_to_jun2) == nrow(jan_to_jun)
# Class Exercise: What does this do?
mystery_filter <- filter(flights, !(arr_delay > 120 | dep_delay > 120))
mystery_filter_1 <- filter(flights, arr_delay <= 120, dep_delay <= 120)
# Vote:
# 1. All flights that started and landed 120 minutes late
# 2. All flights that started 120 minutes late or landed 120 minutes late
# 3. All flights that started less than 120 minutes late or landed less than 120 minutes late
# 4. All flights that started and landed less than 120 minutes late
# Class Exercise: Get the filter command for number 3 above
number_3 <- filter(flights, arr_delay < 120 | dep_delay < 120)
# Class Exercise: get all flights that departed with less than 120 minutes delay,
# but arrived with more than 120 minutes delay.
dep_ok_arr_not <- filter(flights, arr_delay < 120, dep_delay > 120)
ggplot(data = dep_ok_arr_not,
mapping = aes(x = dep_delay)) +
geom_histogram()
# Let's look at the data to see what the departure was for planes that arrived
# late but didn't start quite as late
ggplot(data = flights,
mapping = aes(x = dep_delay)) +
geom_histogram()
# Filter flights by those that had dep_delay <= 120, then plot histogram
dep_ok <- filter(flights, dep_delay <= 120)
ggplot(data = dep_ok,
mapping = aes(x = dep_delay)) +
geom_histogram()
# NA: Not available
NA > 5
10 == NA
NA == NA
FALSE | NA
FALSE & NA
NA & FALSE
# Nice example from G&W
# Let x be Mary's age. We don't know how old she is.
x <- NA
# Let y be John's age. We don't know how old he is.
y <- NA
# Are John and Mary the same age?
#> [1] NA
x == y
# We don't know!
NA ^ 0
0 * NA
is.na(x)
df <- tibble(c(1, NA, 3))
df
filter(df, x > 1)
filter(df, x > 1 | is.na(x))
## arrange()
flights
arrange(flights, year, month, day)
arrange(flights, dep_delay)
arrange(flights, desc(dep_delay))
arrange(df, x)
arrange(df, desc(x))
# Class exercise: How can we get the missing values at the top?
# Fastest flight
colnames(flights)
arrange(flights, air_time)
# Better ways of getting some special columns
# select()
select(flights, year, month,day)
select(arrange(flights, air_time), air_time, origin, dest)
# That's tedious to write. Hence the pipe.
flights %>%
arrange(air_time) %>%
select(air_time, origin, dest)
# Notice that the data doesn't have to be mentioned,
# and the first argument should not have to be provided
# Some helper functions
select(flights, year:day)
# Dropping cols
select (flights, -(year:day))
## some helper functions
select (flights, starts_with("arr"))
select (flights, -starts_with("arr"))
select (flights, ends_with("hour"))
select (flights, contains("time"))
?select
# Function for renaming columns
rename(flights, destination = dest)
# Hard to see if it worked, so...
flights %>% rename (destination = dest) %>% select (year:day, destination)
# Moving some columns to the start
select(flights, origin, dest, everything())
# Class Exercise: What happens if you include a variable multiple times?
## Assignment 4
# ## Resources
#
# - If you have no experience coding, this may be helpful: https://rstudio-education.github.io/hopr/
#
# ## Assignment 4
#
# 1. Read Chapter 5 of Grolemund and Wickham parts 1 through 3 (until select) of Grolemund and Wickham for anything we did not cover. We will cover the remaining parts next week.
# 2. Turn the script (.R file) from class into a markdown file which displays the graphs and tables. Add any comments that might benefit you later on, such as reminders of things you found confusing, etc.
# Make sure that you comment the graphs where appropriate, either through captions or in the accompanying text.
# 3. Repeat the steps from chapter 5 in parts 1 through 3, but using hotels data instead of the nycflights data. Since the two datasets don't have the same columns, either pick some variable you'd like to filter on and see results on, or use the following suggested mapping:
# - When filtering (etc) on month for flights, use stars in the hotels data
# - Instead of flight duration, use hotel price
# - For travel times, use distance (you can reuse distance for different types of time)
#
# Example: Instead of doing
# filter(flights, month == 1)
# you should do
# filter(hotels, stars == <some-number-you-like>)
# Create similar output to Grolemund and Wickham, i.e. show what the output is of various commands.
|
de826217d7ae757c10f87d87f0e1aa2665360940 | 3984db1e1c46a241e2305220112eee6c0c5d6c62 | /E-System.R | 834afcd855dd5cebcef8e0ec2d72e6ad39a0c5a8 | [
"MIT"
] | permissive | abotalebmostafa11/ET-System | 2a99af994bb85622b528d5b5f359493fcdd32022 | 412de391adbc36bb6bb576db3ee720b499b4cdd4 | refs/heads/main | 2023-07-17T05:01:33.126279 | 2021-08-31T16:04:43 | 2021-08-31T16:04:43 | 394,823,240 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 38,103 | r | E-System.R | #Import library
# install.packages("Formattable")#install package for make tables
#install.packages("xlsx")
#install.packages("writexl")
#install.packages("stargazer") # Install & load gridExtra
library("stargazer")
library(xlsx)
library(xlsx)
library(writexl)
library(readxl)
library("openxlsx")
library(writexl)
library(readxl)
library("openxlsx")
library(fpp2)
library(forecast)
library(ggplot2)
library("readxl")
library(moments)
library(forecast)
require(forecast)
require(tseries)
require(markovchain)
require(data.table)
library(Hmisc)
library(ascii)
library(pander)
library(tseries)
require(tseries) # need to install tseries tj test Stationarity in time series
library(forecast) # install library forecast
library(ascii) # for make tables
library(gridExtra)
##Global vriable##
Full_original_data <- read_excel("data.xlsx") # path of your data ( time series data)
original_data<-Full_original_data$Cases # select colum from your data
y_lab <- "(Daily Covid 19 Infection cases in Russia)" # input name of data
Actual_date_interval <- c("2020/01/03","2021/08/15") # put actual range date of your data
Forecast_date_interval <- c("2021/08/16","2021/08/31") #put forecasting date range
validation_data_days <-7 # Number of testing data(#testing last 10 days)10
Number_Neural<-50# Number of Neural For model NNAR Model
NNAR_Model<- FALSE #create new NNAR model (TRUE/FALSE)
frequency<-"days" # type of you data( daily-weekly-month-years)
country.name <- "Russia" # name of area or country or cases
# Data Preparation & calculate some of statistics measures
summary(original_data) # Summary your time series
# calculate Cofficient of kurtosis
# calculate Cofficient of skewness
# calculate standard deviation
data.frame(kurtosis=kurtosis(original_data),skewness=skewness(original_data),Standard.deviation =sd(original_data))
#processing on data (input data)
rows <- NROW(original_data) # calculate number of rows in time series (number of days)
training_data<-original_data[1:(rows-validation_data_days)] # Training data
testing_data<-original_data[(rows-validation_data_days+1):rows] #testing data
AD<-fulldate<-seq(as.Date(Actual_date_interval[1]),as.Date(Actual_date_interval[2]), frequency) #input range for actual date
FD<-seq(as.Date(Forecast_date_interval[1]),as.Date(Forecast_date_interval[2]), frequency) #input range forecasting date
N_forecasting_days<-nrow(data.frame(FD)) #calculate number of days that you want to forecasting
validation_dates<-tail(AD,validation_data_days) # select validation_dates
validation_data_by_name<-weekdays(validation_dates) # put names of validation dates
forecasting_data_by_name<-weekdays(FD) # put names of Forecasting dates
##############
# NNAR Model #
##############
if(NNAR_Model==TRUE){
data_series<-ts(training_data)
model_NNAR<-nnetar(data_series, size = Number_Neural)
saveRDS(model_NNAR, file = "model_NNAR.RDS")
my_model <- readRDS("model_NNAR.RDS")
accuracy(model_NNAR) # accuracy on training data #Print Model Parameters
model_NNAR
}
if(NNAR_Model==FALSE){
data_series<-ts(training_data)
#model_NNAR<-nnetar(data_series, size = Number_Numeral)
model_NNAR <- readRDS("model_NNAR.RDS")
accuracy(model_NNAR) # accuracy on training data #Print Model Parameters
model_NNAR
}
# Testing Data Evaluation
forecasting_NNAR <- forecast(model_NNAR, h=N_forecasting_days+validation_data_days)
validation_forecast<-head(forecasting_NNAR$mean,validation_data_days)
data.frame(validation_forecast)
MAPE_Per_Day<-round( abs(((testing_data-validation_forecast)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using NNAR Model for ==> ",y_lab, sep=" ")
MAPE_Mean_All<-paste(round(mean(MAPE_Per_Day),3),"% MAPE",validation_data_days,frequency,y_lab,sep=" ")
MAPE_Mean_All_NNAR<-round(mean(MAPE_Per_Day),3)
MAPE_NNAR<-paste(round(MAPE_Per_Day,3),"%")
MAPE_NNAR_Model<-paste(MAPE_Per_Day ,"%")
paste ("MAPE that's Error of Forecasting for ",validation_data_days," days in NNAR Model for ==> ",y_lab, sep=" ")
paste(MAPE_Mean_All,"%")
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in NNAR Model for ==> ",y_lab, sep=" ")
print(ascii(data.frame(date_NNAR=validation_dates,validation_data_by_name,actual_data=testing_data,forecasting_NNAR=validation_forecast,MAPE_NNAR_Model)), type = "rest")
print(ascii(data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_NNAR=tail(forecasting_NNAR$mean,N_forecasting_days))), type = "rest")
# extract results in Txt & csv file
df1<-data.frame(Testing.Date=validation_dates,Day.Name=validation_data_by_name,Actual.Value=testing_data,forecasting.NNAR=head(forecasting_NNAR$mean,validation_data_days),MAPE=MAPE_NNAR_Model)
df11<-data.frame(Forecasting.Date=FD,forecating.date=forecasting_data_by_name,forecasting.NNAR=tail(forecasting_NNAR$mean,N_forecasting_days))
write.csv(df1, file = "testing NNAR Model.csv")
write.csv(df11, file = "forecasting NNAR Model.csv")
plot(forecasting_NNAR,xlab = paste ("Time in", frequency ,y_lab , sep=" "), ylab=y_lab)
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) )
lines(x1_test, col='red',lwd=2)
graph1<-autoplot(forecasting_NNAR,xlab = paste ("Time in", frequency ,y_lab , sep=" "), ylab=y_lab)
p1<-graph1+scale_y_continuous(labels = scales::comma)+
forecast::autolayer(forecasting_NNAR$mean, series="NNAR Model",size = 0.7) +
guides(colour=guide_legend(title="Forecasts"),fill = "black")+
theme(legend.position="bottom")+
theme(legend.background = element_rect(fill="white",
size=0.7, linetype="solid",
colour ="gray"))
p1
#################
## bats model #
#################
# Data Modeling
data_series<-ts(training_data) # make your data to time series
autoplot(data_series ,xlab=paste ("Time in", frequency, sep=" "), ylab = y_lab, main=paste ("Actual Data :", y_lab, sep=" "))
model_bats<-bats(data_series)
accuracy(model_bats) # accuracy on training data
# Print Model Parameters
model_bats
#ploting BATS Model
plot(model_bats,xlab = paste ("Time in", frequency ,y_lab , sep=" "))
# Testing Data Evaluation
forecasting_bats <- predict(model_bats, h=N_forecasting_days+validation_data_days)
validation_forecast<-head(forecasting_bats$mean,validation_data_days)
MAPE_Per_Day<-round( abs(((testing_data-validation_forecast)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using BATS Model for ==> ",y_lab, sep=" ")
MAPE_Mean_All.bats_Model<-round(mean(MAPE_Per_Day),3)
MAPE_Mean_All.bats<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_bats<-paste(round(MAPE_Per_Day,3),"%")
MAPE_bats_Model<-paste(MAPE_Per_Day ,"%")
paste ("MAPE that's Error of Forecasting for ",validation_data_days," days in BATS Model for ==> ",y_lab, sep=" ")
paste(MAPE_Mean_All.bats,"%")
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in BATS Model for ==> ",y_lab, sep=" ")
print(ascii(data.frame(date_bats=validation_dates,validation_data_by_name,actual_data=testing_data,forecasting_bats=validation_forecast,MAPE_bats_Model)), type = "rest")
print(ascii(data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_bats=tail(forecasting_bats$mean,N_forecasting_days),lower=tail(forecasting_bats$lower,N_forecasting_days),Upper=tail(forecasting_bats$lower,N_forecasting_days))), type = "rest")
# extract results in Txt & csv file
df2<-data.frame(Testing.Date=validation_dates,Day.Name=validation_data_by_name,Actual.Value=testing_data,Forecasting.BATS=head(forecasting_bats$mean,validation_data_days),MAPE=MAPE_bats_Model)
df21<-data.frame(Forecasting.Date=FD,forecating.Date=forecasting_data_by_name,Forecasting.BATS=tail(forecasting_bats$mean,N_forecasting_days),lower=tail(forecasting_bats$lower,N_forecasting_days),Upper=tail(forecasting_bats$lower,N_forecasting_days))
write.csv(df2, file = "testing BATS Model.csv")
write.csv(df21, file = "forecasting BATS Model.csv")
plot(forecasting_bats)
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) )
lines(x1_test, col='red',lwd=2)
graph2<-autoplot(forecasting_bats,xlab = paste ("Time in", frequency ,y_lab , sep=" "), ylab=y_lab)
p2<-graph2+scale_y_continuous(labels = scales::comma)+
forecast::autolayer(forecasting_bats$mean, series="BATS Model",size = 0.7) +
guides(colour=guide_legend(title="Forecasts"),fill = "black")+
theme(legend.position="bottom")+
theme(legend.background = element_rect(fill="white",
size=0.7, linetype="solid",
colour ="gray"))
p2
###############
## TBATS Model#
###############
# Data Modeling
data_series<-ts(training_data)
model_TBATS<-forecast:::fitSpecificTBATS(data_series,use.box.cox=FALSE, use.beta=TRUE, seasonal.periods=c(6),use.damping=FALSE,k.vector=c(2))
accuracy(model_TBATS) # accuracy on training data
# Print Model Parameters
model_TBATS
plot(model_TBATS,xlab = paste ("Time in", frequency ,y_lab , sep=" "), ylab=y_lab)
# Testing Data Evaluation
forecasting_tbats <- predict(model_TBATS, h=N_forecasting_days+validation_data_days)
validation_forecast<-head(forecasting_tbats$mean,validation_data_days)
MAPE_Per_Day<-round( abs(((testing_data-validation_forecast)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using TBATS Model for ==> ",y_lab, sep=" ")
MAPE_Mean_All.TBATS_Model<-round(mean(MAPE_Per_Day),3)
MAPE_Mean_All.TBATS<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_TBATS<-paste(round(MAPE_Per_Day,3),"%")
MAPE_TBATS_Model<-paste(MAPE_Per_Day ,"%")
paste (" MAPE that's Error of Forecasting for ",validation_data_days," days in TBATS Model for ==> ",y_lab, sep=" ")
paste(MAPE_Mean_All.TBATS,"%")
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in TBATS Model for ==> ",y_lab, sep=" ")
print(ascii(data.frame(date_TBATS=validation_dates,validation_data_by_name,actual_data=testing_data,forecasting_TBATS=validation_forecast,MAPE_TBATS_Model)), type = "rest")
print(ascii(data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_TBATS=tail(forecasting_tbats$mean,N_forecasting_days),Lower=tail(forecasting_tbats$lower,N_forecasting_days),Upper=tail(forecasting_tbats$upper,N_forecasting_days))), type = "rest")
# extract results in Txt & csv file
df3<-data.frame(Testing.Date=validation_dates,Day.Name=validation_data_by_name,Actual.Value=testing_data,forecasting.TBATS=head(forecasting_tbats$mean,validation_data_days),MAPE=MAPE_TBATS_Model)
df31<-data.frame(Forecasting.Date=FD,forecating.Date=forecasting_data_by_name,forecasting.TBATS=tail(forecasting_tbats$mean,N_forecasting_days),Lower=tail(forecasting_tbats$lower,N_forecasting_days),Upper=tail(forecasting_tbats$upper,N_forecasting_days))
write.csv(df3, file = "testing TBATS Model.csv")
write.csv(df31, file = "forecasting TBATS Model.csv")
plot(forecasting_tbats)
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) )
lines(x1_test, col='red',lwd=2)
graph3<-autoplot(forecasting_tbats,xlab = paste ("Time in", frequency ,y_lab , sep=" "), ylab=y_lab)
p3<-graph3+scale_y_continuous(labels = scales::comma)+
forecast::autolayer(forecasting_tbats$mean, series="TBATS Model",size = 0.7) +
guides(colour=guide_legend(title="Forecasts"),fill = "black")+
theme(legend.position="bottom")+
theme(legend.background = element_rect(fill="white",
size=0.7, linetype="solid",
colour ="gray"))
p3
#######################
## Holt's linear trend#
#######################
# Data Modeling
data_series<-ts(training_data)
model_holt<-holt(data_series,h=N_forecasting_days+validation_data_days,lambda = "auto")
accuracy(model_holt) # accuracy on training data
# Print Model Parameters
summary(model_holt$model)
# Testing Data Evaluation
forecasting_holt <- predict(model_holt, h=N_forecasting_days+validation_data_days,lambda = "auto")
validation_forecast<-head(forecasting_holt$mean,validation_data_days)
MAPE_Per_Day<-round( abs(((testing_data-validation_forecast)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using holt's Linear trend Model for ==> ",y_lab, sep=" ")
MAPE_Mean_All.Holt_Model<-round(mean(MAPE_Per_Day),3)
MAPE_Mean_All.Holt<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_holt<-paste(round(MAPE_Per_Day,3),"%")
MAPE_holt_Model<-paste(MAPE_Per_Day ,"%")
paste (" MAPE that's Error of Forecasting for ",validation_data_days," days in holt's Linear trend Model for ==> ",y_lab, sep=" ")
paste(MAPE_Mean_All.Holt,"%")
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in holt's Linear trend Model for ==> ",y_lab, sep=" ")
print(ascii(data.frame(date_holt=validation_dates,validation_data_by_name,actual_data=testing_data,forecasting_holt=validation_forecast,MAPE_holt_Model)), type = "rest")
print(ascii(data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_holt=tail(forecasting_holt$mean,N_forecasting_days),Lower=tail(forecasting_holt$lower,N_forecasting_days),Upper=tail(forecasting_holt$upper,N_forecasting_days))), type = "rest")
# extract results in Txt & csv file
df4<-data.frame(Testing.Date=validation_dates,Day.Name=validation_data_by_name,Actual.Value=testing_data,forecasting.Holt=head(forecasting_holt$mean,validation_data_days),MAPE=MAPE_holt_Model)
df41<-data.frame(Forecasting.Date=FD,forecating.Date=forecasting_data_by_name,forecasting.Holt=tail(forecasting_holt$mean,N_forecasting_days),Lower=tail(forecasting_holt$lower,N_forecasting_days),Upper=tail(forecasting_holt$upper,N_forecasting_days))
write.csv(df4, file = "testing Holt Model.csv")
write.csv(df41, file = "forecasting Holt Model.csv")
plot(forecasting_holt)
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) )
lines(x1_test, col='red',lwd=2)
graph4<-autoplot(forecasting_holt,xlab = paste ("Time in", frequency ,y_lab , sep=" "), ylab=y_lab)
p4<-graph4+scale_y_continuous(labels = scales::comma)+
forecast::autolayer(forecasting_holt$mean, series="Holt's Linear Trend Model",size = 0.7) +
guides(colour=guide_legend(title="Forecasts"),fill = "black")+
theme(legend.position="bottom")+
theme(legend.background = element_rect(fill="white",
size=0.7, linetype="solid",
colour ="gray"))
p4
##################
#Auto arima model#
##################
paste ("tests For Check Stationarity in series ==> ",y_lab, sep=" ")
kpss.test(data_series) # applay kpss test
pp.test(data_series) # applay pp test
adf.test(data_series) # applay adf test
ndiffs(data_series) # Doing first diffrencing on data
#Taking the first difference
diff1_x1<-diff(data_series)
autoplot(diff1_x1, xlab = paste ("Time in", frequency ,y_lab , sep=" "), ylab=y_lab,main = "1nd differenced series")
##Testing the stationary of the first differenced series
paste ("tests For Check Stationarity in series after taking first differences in ==> ",y_lab, sep=" ")
kpss.test(diff1_x1) # applay kpss test after taking first differences
pp.test(diff1_x1) # applay pp test after taking first differences
adf.test(diff1_x1) # applay adf test after taking first differences
#Taking the second difference
diff2_x1=diff(diff1_x1)
autoplot(diff2_x1, xlab = paste ("Time in", frequency ,y_lab , sep=" "), ylab=y_lab ,main = "2nd differenced series")
##Testing the stationary of the first differenced series
paste ("tests For Check Stationarity in series after taking Second differences in",y_lab, sep=" ")
kpss.test(diff2_x1) # applay kpss test after taking Second differences
pp.test(diff2_x1) # applay pp test after taking Second differences
adf.test(diff2_x1) # applay adf test after taking Second differences
####Fitting an ARIMA Model
#1. Using auto arima function
model1 <- auto.arima(data_series,stepwise=FALSE, approximation=FALSE, trace=T, test = c("kpss", "adf", "pp")) #applaying auto arima
model1 # show the result of autoarima
#Make changes in the source of auto arima to run the best model
arima.string <- function (object, padding = FALSE)
{
order <- object$arma[c(1, 6, 2, 3, 7, 4, 5)]
m <- order[7]
result <- paste("ARIMA(", order[1], ",", order[2], ",",
order[3], ")", sep = "")
if (m > 1 && sum(order[4:6]) > 0) {
result <- paste(result, "(", order[4], ",", order[5],
",", order[6], ")[", m, "]", sep = "")
}
if (padding && m > 1 && sum(order[4:6]) == 0) {
result <- paste(result, " ", sep = "")
if (m <= 9) {
result <- paste(result, " ", sep = "")
}
else if (m <= 99) {
result <- paste(result, " ", sep = "")
}
else {
result <- paste(result, " ", sep = "")
}
}
if (!is.null(object$xreg)) {
if (NCOL(object$xreg) == 1 && is.element("drift", names(object$coef))) {
result <- paste(result, "with drift ")
}
else {
result <- paste("Regression with", result, "errors")
}
}
else {
if (is.element("constant", names(object$coef)) || is.element("intercept",
names(object$coef))) {
result <- paste(result, "with non-zero mean")
}
else if (order[2] == 0 && order[5] == 0) {
result <- paste(result, "with zero mean ")
}
else {
result <- paste(result, " ")
}
}
if (!padding) {
result <- gsub("[ ]*$", "", result)
}
return(result)
}
bestmodel <- arima.string(model1, padding = TRUE)
bestmodel <- substring(bestmodel,7,11)
bestmodel <- gsub(" ", "", bestmodel)
bestmodel <- gsub(")", "", bestmodel)
bestmodel <- strsplit(bestmodel, ",")[[1]]
bestmodel <- c(strtoi(bestmodel[1]),strtoi(bestmodel[2]),strtoi(bestmodel[3]))
bestmodel
strtoi(bestmodel[3])
#2. Using ACF and PACF Function
#par(mfrow=c(1,2)) # Code for making two plot in one graph
acf(diff2_x1,xlab = paste ("Time in", frequency ,y_lab , sep=" ") , ylab=y_lab, main=paste("ACF-2nd differenced series ",y_lab, sep=" ",lag.max=20)) # plot ACF "auto correlation function after taking second diffrences
pacf(diff2_x1,xlab = paste ("Time in", frequency ,y_lab , sep=" "), ylab=y_lab,main=paste("PACF-2nd differenced series ",y_lab, sep=" ",lag.max=20)) # plot PACF " Partial auto correlation function after taking second diffrences
x1_model1= arima(data_series, order=c(bestmodel)) # Run Best model of auto arima for forecasting
x1_model1 # Show result of best model of auto arima
paste ("accuracy of autoarima Model For ==> ",y_lab, sep=" ")
accuracy(x1_model1) # aacuracy of best model from auto arima
x1_model1$x # show result of best model from auto arima
checkresiduals(x1_model1,xlab = paste ("Time in", frequency ,y_lab , sep=" "), ylab=y_lab) # checkresiduals from best model from using auto arima
paste("Box-Ljung test , Ljung-Box test For Modelling for ==> ",y_lab, sep=" ")
Box.test(x1_model1$residuals^2, lag=20, type="Ljung-Box") # Do test for resdulas by using Box-Ljung test , Ljung-Box test For Modelling
jarque.bera.test(x1_model1$residuals) # Do test jarque.bera.test
#Actual Vs Fitted
plot(data_series, col='red',lwd=2, main="Actual vs Fitted Plot", xlab='Time in (days)', ylab=y_lab) # plot actual and Fitted model
lines(fitted(x1_model1), col='black')
#Test data
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) ) # make testing data in time series
forecasting_auto_arima <- forecast(x1_model1, h=N_forecasting_days+validation_data_days)
validation_forecast<-head(forecasting_auto_arima$mean,validation_data_days)
MAPE_Per_Day<-round(abs(((testing_data-validation_forecast)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using bats Model for ==> ",y_lab, sep=" ")
MAPE_Mean_All.ARIMA_Model<-round(mean(MAPE_Per_Day),3)
MAPE_Mean_All.ARIMA<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_auto_arima<-paste(round(MAPE_Per_Day,3),"%")
MAPE_auto.arima_Model<-paste(MAPE_Per_Day ,"%")
paste (" MAPE that's Error of Forecasting for ",validation_data_days," days in bats Model for ==> ",y_lab, sep=" ")
paste(MAPE_Mean_All.ARIMA,"%")
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in bats Model for ==> ",y_lab, sep=" ")
print(ascii(data.frame(date_auto.arima=validation_dates,validation_data_by_name,actual_data=testing_data,forecasting_auto.arima=validation_forecast,MAPE_auto.arima_Model)), type = "rest")
print(ascii(data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_auto.arima=tail(forecasting_auto_arima$mean,N_forecasting_days),Lower=tail(forecasting_auto_arima$lower,N_forecasting_days),Upper=tail(forecasting_auto_arima$upper,N_forecasting_days))), type = "rest")
# extract results in Txt & csv file
df5<-data.frame(Testing.Date=validation_dates,Day.Name=validation_data_by_name,Actual.Value=testing_data,forecasting.autoarima=head(forecasting_auto_arima$mean,validation_data_days),MAPE=MAPE_auto.arima_Model)
df51<-data.frame(Forecasting.Date=FD,forecating.Date=forecasting_data_by_name,forecasting.autoarima=tail(forecasting_auto_arima$mean,N_forecasting_days),Lower=tail(forecasting_auto_arima$lower,N_forecasting_days),Upper=tail(forecasting_auto_arima$upper,N_forecasting_days))
write.csv(df5, file = "testing autoarima Model.csv")
write.csv(df51, file = "forecasting autoarima Model.csv")
plot(forecasting_auto_arima)
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) )
lines(x1_test, col='red',lwd=2)
graph5<-autoplot(forecasting_auto_arima,xlab = paste ("Time in", frequency ,y_lab , sep=" "), ylab=y_lab)
p5<-graph5+scale_y_continuous(labels = scales::comma)+
forecast::autolayer(forecasting_auto_arima$mean, series="auto.arima Model",size = 0.7) +
guides(colour=guide_legend(title="Forecasts"),fill = "black")+
theme(legend.position="bottom")+
theme(legend.background = element_rect(fill="white",
size=0.7, linetype="solid",
colour ="gray"))
p5
#########################################################################################
# Returns local linear forecasts and prediction intervals using cubic smoothing splines.#
# Testing Data Evaluation #
#########################################################################################
forecasting_splinef <- splinef(original_data,h=N_forecasting_days+validation_data_days)
summary(forecasting_splinef)
validation_forecast<-head(forecasting_splinef$mean,validation_data_days)
MAPE_Per_Day<-round( abs(((testing_data-validation_forecast)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using cubic smoothing splines Model for ==> ",y_lab, sep=" ")
MAPE_Mean_All.splinef_Model<-round(mean(MAPE_Per_Day),3)
MAPE_Mean_All.splinef<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_splinef<-paste(round(MAPE_Per_Day,3),"%")
MAPE_splinef_Model<-paste(MAPE_Per_Day ,"%")
paste (" MAPE that's Error of Forecasting for ",validation_data_days," days in cubic smoothing splines Model for ==> ",y_lab, sep=" ")
paste(MAPE_Mean_All.splinef,"%")
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in cubic smoothing splines Model for ==> ",y_lab, sep=" ")
print(ascii(data.frame(date_splinef=validation_dates,validation_data_by_name,actual_data=testing_data,forecasting_splinef=validation_forecast,MAPE_splinef_Model)), type = "rest")
print(ascii(data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_splinef=tail(forecasting_splinef$mean,N_forecasting_days),Lower=tail(forecasting_splinef$lower,N_forecasting_days),Upper=tail(forecasting_splinef$upper,N_forecasting_days))), type = "rest")
# extract results in Txt & csv file
df6<-data.frame(Testing.Date=validation_dates,Day.Name=validation_data_by_name,Actual.Value=testing_data,forecasting.cubic.smoothing.splines=head(forecasting_splinef$mean,validation_data_days),MAPE=MAPE_splinef_Model)
df61<-data.frame(Forecasting.Date=FD,forecating.Date=forecasting_data_by_name,forecasting.cubic.smoothing.splines=tail(forecasting_splinef$mean,N_forecasting_days),Lower=tail(forecasting_splinef$lower,N_forecasting_days),Upper=tail(forecasting_splinef$upper,N_forecasting_days))
write.csv(df6, file = "testing cubic smoothing splines Model.csv")
write.csv(df61, file = "forecasting cubic smoothing splines Model.csv")
plot(forecasting_splinef)
x1_test <- ts(testing_data, start =(rows-validation_data_days+1) )
lines(x1_test, col='red',lwd=2)
graph6<-autoplot(forecasting_splinef,xlab = paste ("Time in", frequency ,y_lab , sep=" "), ylab=y_lab)
p6<-graph6+scale_y_continuous(labels = scales::comma)+
forecast::autolayer(forecasting_splinef$mean, series="cubic smoothing splines Model",size = 0.7) +
guides(colour=guide_legend(title="Forecasts"),fill = "black")+
theme(legend.position="bottom")+
theme(legend.background = element_rect(fill="white",
size=0.7, linetype="solid",
colour ="gray"))
p6
######################
#Ensembling (Average)#
######################
re_NNAR<-forecasting_NNAR$mean
re_BATS<-forecasting_bats$mean
re_TBATS<-forecasting_tbats$mean
re_holt<-forecasting_holt$mean
re_autoarima<-forecasting_auto_arima$mean
splinef_model<-data.frame(forecasting_splinef)
splinef<-splinef_model$Point.Forecast
result_df<-data.frame(re_NNAR,re_BATS,re_TBATS,re_holt,re_autoarima,splinef)
average_models<-rowMeans(result_df)
# Testing Data Evaluation
Ensembling_average1<-head(average_models,validation_data_days)
MAPE_Per_Day<-round(abs(((testing_data-Ensembling_average1)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using Ensembling (Average) for ==> ",y_lab, sep=" ")
MAPE_Mean_EnsemblingAverage<-round(mean(MAPE_Per_Day),3)
MAPE_Mean_Ensembling<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_Ensembling<-paste(round(MAPE_Per_Day,3),"%")
MAPE_Ensembling_Model<-paste(MAPE_Per_Day ,"%")
paste (" MAPE that's Error of Forecasting for ",validation_data_days," days in Ensembling Model for ==> ",y_lab, sep=" ")
paste(MAPE_Mean_EnsemblingAverage,"%")
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in Ensembling (Average) for ==> ",y_lab, sep=" ")
print(ascii(data.frame(date_Ensembling=validation_dates,validation_data_by_name,actual_data=testing_data,Ensembling=head(average_models,validation_data_days),MAPE_Ensembling)), type = "rest")
print(ascii(data.frame(FD,forecating_date=forecasting_data_by_name,Ensembling_Average=tail(average_models,N_forecasting_days))), type = "rest")
# extract results in Txt & csv file
df7<-data.frame(Testing.Date=validation_dates,Day.Name=validation_data_by_name,Actual.Value=testing_data,Ensembling.Average=head(average_models,validation_data_days),MAPE=MAPE_Ensembling)
df71<-data.frame(Forecasting.Date=FD,forecating.Date=forecasting_data_by_name,Forecasting.Ensembling.Average=tail(average_models,N_forecasting_days))
write.csv(df7, file = "testing Ensembling Average.csv")
write.csv(df71, file = "forecasting Ensembling Average.csv")
#############################
#Ensembling (weight average)#
#############################
weight.model<-0.90# priotizer the weights ( weight average)
re_NNAR<-forecasting_NNAR$mean
re_BATS<-forecasting_bats$mean
re_TBATS<-forecasting_tbats$mean
re_holt<-forecasting_holt$mean
re_autoarima<-forecasting_auto_arima$mean
re_splinef<-c(forecasting_splinef$mean)
re_bestmodel<-min(MAPE_Mean_All_NNAR,MAPE_Mean_All.bats_Model,MAPE_Mean_All.TBATS_Model,MAPE_Mean_All.Holt_Model,MAPE_Mean_All.ARIMA_Model,MAPE_Mean_All.splinef_Model)
y1<-if(re_bestmodel >= MAPE_Mean_All.bats_Model) {re_BATS*weight.model
} else {
(re_BATS*(1-weight.model))/5
}
y2<-if(re_bestmodel >= MAPE_Mean_All.TBATS_Model) {re_TBATS*weight.model
} else {
(re_TBATS*(1-weight.model))/5
}
y3<-if(re_bestmodel >= MAPE_Mean_All.Holt_Model) {re_holt*weight.model
} else {
(re_holt*(1-weight.model))/5
}
y4<-if(re_bestmodel >= MAPE_Mean_All.ARIMA_Model) {re_autoarima*weight.model
} else {
(re_autoarima*(1-weight.model))/5
}
y5<-if(re_bestmodel >= MAPE_Mean_All_NNAR) {re_NNAR*weight.model
} else {
(re_NNAR*(1-weight.model))/5
}
y6<-if(re_bestmodel >= MAPE_Mean_All.splinef_Model) {re_splinef*weight.model
} else {
(splinef*(1-weight.model))/5
}
Ensembling.weight<-(y1+y2+y3+y4+y5+y6)
# Testing Data Evaluation
validation_forecast2<-head(Ensembling.weight,validation_data_days)
MAPE_Per_Day<-round(abs(((testing_data-validation_forecast2)/testing_data)*100) ,3)
paste ("MAPE % For ",validation_data_days,frequency,"by using Ensembling (weight average) for ==> ",y_lab, sep=" ")
MAPE_Mean_EnsemblingAverage1<-round(mean(MAPE_Per_Day),3)
MAPE_Mean_Ensembling<-paste(round(mean(MAPE_Per_Day),3),"% MAPE ",validation_data_days,frequency,y_lab,sep=" ")
MAPE_Ensembling<-paste(round(MAPE_Per_Day,3),"%")
MAPE_Ensembling_Model<-paste(MAPE_Per_Day ,"%")
paste (" MAPE that's Error of Forecasting for ",validation_data_days," days in Ensembling weight average for ==> ",y_lab, sep=" ")
paste(MAPE_Mean_EnsemblingAverage1,"%")
paste ("MAPE that's Error of Forecasting day by day for ",validation_data_days," days in Ensembling weight average for ==> ",y_lab, sep=" ")
print(ascii(data.frame(date_Ensembling=validation_dates,validation_data_by_name,actual_data=testing_data,Ensembling=validation_forecast2,MAPE_Ensembling)), type = "rest")
print(ascii(data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_Ensembling=tail(Ensembling.weight,N_forecasting_days))), type = "rest")
# extract results in Txt & csv file
df8<-data.frame(Testing.Date=validation_dates,Day.Name=validation_data_by_name,Actual.Value=testing_data,Ensembling.weight.average=head(Ensembling.weight,validation_data_days),MAPE=MAPE_Ensembling)
df81<-data.frame(Forecasting.Date=FD,forecating.Date=forecasting_data_by_name,Forecasting.Ensembling.weight.Average=tail(Ensembling.weight,N_forecasting_days))
write.csv(df8, file = "testing Ensembling weight average.csv")
write.csv(df81, file = "forecasting weight average.csv")
graph8<-autoplot(Ensembling.weight,xlab = paste ("Time in", frequency ,y_lab,"by using Ensembling weight average" , sep=" "), ylab=y_lab)
p8<-graph8+scale_y_continuous(labels = scales::comma)+
forecast::autolayer(Ensembling.weight, series="Ensembling weight average",size = 0.7) +
guides(colour=guide_legend(title="Forecasts"),fill = "black")+
theme(legend.position="bottom")+
theme(legend.background = element_rect(fill="white",
size=0.7, linetype="solid",
colour ="gray"))
p8
# Table for MAPE For counry
best_recommended_model <- min(MAPE_Mean_All_NNAR,MAPE_Mean_All.bats_Model,MAPE_Mean_All.TBATS_Model,MAPE_Mean_All.Holt_Model,MAPE_Mean_All.ARIMA_Model,MAPE_Mean_All.splinef_Model,MAPE_Mean_EnsemblingAverage,MAPE_Mean_EnsemblingAverage1)
paste("System Choose Least Error ==> ( MAPE %) of Forecasting by using NNAR model, BATS Model, TBATS Model, Holt's Linear Model , autoarima Model, cubic smoothing splines Model, Ensembling (Average), and Ensembling weight average , for ==> ", y_lab , sep=" ")
best_recommended_model
x1<-if(best_recommended_model >= MAPE_Mean_All.bats_Model) {paste("BATS Model")}
x2<-if(best_recommended_model >= MAPE_Mean_All.TBATS_Model) {paste("TBATS Model")}
x3<-if(best_recommended_model >= MAPE_Mean_All.Holt_Model) {paste("Holt Model")}
x4<-if(best_recommended_model >= MAPE_Mean_All.ARIMA_Model) {paste("ARIMA Model")}
x5<-if(best_recommended_model >= MAPE_Mean_All_NNAR) {paste("NNAR Model")}
x6<-if(best_recommended_model >= MAPE_Mean_All.splinef_Model) {paste("cubic smoothing splines")}
x7<-if(best_recommended_model >= MAPE_Mean_EnsemblingAverage) {paste("Ensembling (Average)")}
x8<-if(best_recommended_model >= MAPE_Mean_EnsemblingAverage1) {paste("Ensembling weight average")}
panderOptions('table.split.table', Inf)
paste("Forecasting by using NNAR Model ==> ", y_lab , sep=" ")
print(ascii(data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_NNAR=tail(forecasting_NNAR$mean,N_forecasting_days))), type = "rest")
paste("Forecasting by using BATS Model ==> ", y_lab , sep=" ")
print(ascii(data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_bats=tail(forecasting_bats$mean,N_forecasting_days),lower=tail(forecasting_bats$lower,N_forecasting_days),Upper=tail(forecasting_bats$lower,N_forecasting_days))), type = "rest")
paste("Forecasting by using TBATS Model ==> ", y_lab , sep=" ")
print(ascii(data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_TBATS=tail(forecasting_tbats$mean,N_forecasting_days),Lower=tail(forecasting_tbats$lower,N_forecasting_days),Upper=tail(forecasting_tbats$upper,N_forecasting_days))), type = "rest")
paste("Forecasting by using Holt's Linear Trend Model ==> ", y_lab , sep=" ")
print(ascii(data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_holt=tail(forecasting_holt$mean,N_forecasting_days),Lower=tail(forecasting_holt$lower,N_forecasting_days),Upper=tail(forecasting_holt$upper,N_forecasting_days))), type = "rest")
paste("Forecasting by using ARIMA Model ==> ", y_lab , sep=" ")
print(ascii(data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_auto.arima=tail(forecasting_auto_arima$mean,N_forecasting_days),Lower=tail(forecasting_auto_arima$lower,N_forecasting_days),Upper=tail(forecasting_auto_arima$upper,N_forecasting_days))), type = "rest")
paste("Forecasting by using cubic smoothing splines Model ==> ", y_lab , sep=" ")
print(ascii(data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_splinef=tail(forecasting_splinef$mean,N_forecasting_days),Lower=tail(forecasting_splinef$lower,N_forecasting_days),Upper=tail(forecasting_splinef$upper,N_forecasting_days))), type = "rest")
print(ascii(data.frame(FD,forecating_date=forecasting_data_by_name,forecasting_by_splinef=tail(forecasting_splinef$mean,N_forecasting_days),Lower=tail(forecasting_holt$lower,N_forecasting_days),Upper=tail(forecasting_holt$upper,N_forecasting_days))), type = "rest")
result<-c(x1,x2,x3,x4,x5,x6,x7,x8)
table.error<-data.frame(country.name,NNAR.model=MAPE_Mean_All_NNAR, BATS.Model=MAPE_Mean_All.bats_Model,TBATS.Model=MAPE_Mean_All.TBATS_Model,Holt.Model=MAPE_Mean_All.Holt_Model,ARIMA.Model=MAPE_Mean_All.ARIMA_Model,cubic_smoothing.splines=MAPE_Mean_All.splinef_Model,Ensembling_Average=MAPE_Mean_EnsemblingAverage,Ensembling_weight=MAPE_Mean_EnsemblingAverage1,Best.Model=result)
knitr::kable(table.error,caption = paste("Accuracy MAPE % daily Covid-19 infection cases for testing data last" , validation_data_days ,frequency, y_lab , sep=" "))
MAPE.Value<-c(MAPE_Mean_All_NNAR,MAPE_Mean_All.bats_Model,MAPE_Mean_All.TBATS_Model,MAPE_Mean_All.Holt_Model,MAPE_Mean_All.ARIMA_Model,MAPE_Mean_All.splinef_Model,MAPE_Mean_EnsemblingAverage,MAPE_Mean_EnsemblingAverage1)
Model<-c("NNAR model","BATS Model","TBATS Model","Holt Model","ARIMA Model","cubic smoothing splines","Ensembling (Average)","Ensembling weight")
channel_data<-data.frame(Model,MAPE.Value)
#comparison and visualization plot accuracy models.
p0<-ggplot(channel_data, aes(x = Model, y = MAPE.Value)) +
geom_bar(stat = "identity") +
geom_text(aes(label = MAPE.Value)) + # x AND y INHERITED. WE JUST NEED TO SPECIFY "label"
coord_flip() +
scale_y_continuous(expand = c(0, 0))
p0
# Opening the graphical device
png("Forecast NNAR Model.png")
p1
dev.off()
png("Forecast BATS Model.png")
p2
dev.off()
png("Forecast TBATS Model.png")
p3
dev.off()
png("Forecast Holt Model.png")
p4
dev.off()
png("Forecast autoarima Model.png")
p5
dev.off()
png("Forecast cubic smoothing splines Model.png")
p6
dev.off()
png("Ensembling Average weight.png")
p8
dev.off()
png("accuracy of testing data.png")
p0
# Closing the graphical device
dev.off()
#Export All Models Results
stargazer(df1,df11, type = "text",out = "NNAR Model.txt",summary = FALSE)
stargazer(df2,df21, type = "text",out = "BATS Model.txt",summary = FALSE)
stargazer(df3,df31, type = "text",out = "TBATS Model.txt",summary = FALSE)
stargazer(df4,df41, type = "text",out = "Holt Model.txt",summary = FALSE)
stargazer(df5,df51, type = "text",out = "auto arima.txt",summary = FALSE)
stargazer(df6,df61, type = "text",out = "cubic smoothing splines.txt",summary = FALSE)
stargazer(df7,df71, type = "text",out = "Ensembling Average.txt",summary = FALSE)
stargazer(df8,df81, type = "text",out = "Ensembling Average weight.txt",summary = FALSE)
stargazer(channel_data, type = "text",out = "Accuracy of All models all testing data.txt",summary = FALSE)
message("System finished Modelling and Forecasting by using NNAR, BATS, TBATS, Holt's Linear Trend, ARIMA, cubic smoothing splines, Ensembling (Average), and Ensembling weight ==>",y_lab, sep=" ")
message(" Thank you for using our System For Modelling and Forecasting ==> ",y_lab, sep=" ")
|
88bf4a3fef4b015acb756aae2f910013782b7ae8 | d925a4a8ce949bcac1803561eac412941c84a6fa | /man/get_words.Rd | f757c1feea7b16d21a284881bc9afcc950dcc612 | [
"MIT"
] | permissive | bensoltoff/rspellingbee | 996ecc9ba0746eda42e6cba0b76b3182d7c5b0cf | 85f0dd308334a4e0f85defb973d5d5ea8c344c71 | refs/heads/master | 2021-01-19T03:40:50.973523 | 2016-07-20T15:15:18 | 2016-07-20T15:15:18 | 60,034,807 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 331 | rd | get_words.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_dict.R
\name{get_words}
\alias{get_words}
\title{Get list of competition words}
\usage{
get_words(results)
}
\arguments{
\item{results}{Results from previous competitions, generated by [get_seasons]}
}
\description{
Get list of competition words
}
|
47aafd32f6ab2a03f3e199b6938643d9bf266b44 | 00d3d127a3a8da7384cc8a28eb19e220d234e97f | /code/functions/baggingWperm.R | 101e4a4c3131c893ec9fb17158f2de70c2537aff | [] | no_license | renikaul/YF_Brazil | ae193cb37f434d9902193b42cac1658d8d65d110 | f991360d75b5c4cd6d0fcca27cd1d1aad4d0601c | refs/heads/master | 2021-03-24T10:00:24.309600 | 2018-10-19T09:47:46 | 2018-10-19T09:47:46 | 94,576,273 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 10,915 | r | baggingWperm.R | # Single Bagged Model ----
bagging<-function(form.x.y,training,new.data){
# modified JP's bagging function 12/1/17 RK
# form.x.y the formula for model to use
# training dataframe containing training data (presence and abs)
# new.data new data for logreg model to predict onto
# returns predictions based on logreg model for new data and coefficients of model
#0. load packages
require(dplyr)
#1. Create subset of data with fixed number of pres and abs
training.pres <- dplyr::filter(training, case==1) #pull out just present points
training.abs <- dplyr::filter(training, case==0) #pull out just absence points
training_positions.p <- sample(nrow(training.pres),size=10) #randomly choose 10 present point rows
training_positions.b <- sample(nrow(training.abs),size=100) #randomly choose 100 absence point rows
train_pos.p<-1:nrow(training.pres) %in% training_positions.p #presence
train_pos.b<-1:nrow(training.abs) %in% training_positions.b #background
#2. Build logreg model with subset of data
glm_fit<-glm(form.x.y,data=rbind(training.pres[train_pos.p,],training.abs[train_pos.b,]),family=binomial(logit))
#3. Pull out model coefs
#glm.coef <- coef(glm_fit)
#4. Use model to predict (0,1) on whole training data
predictions <- predict(glm_fit,newdata=new.data,type="response")
return(predictions)
}
# Bagging with predictions
BaggedModel = function(form.x.y, training, new.data, no.iterations= 100, bag.fnc=baggingTryCatch){
#make a matrix of predictions
list.of.models <- replicate(n = no.iterations, expr = bag.fnc(form.x.y, training, new.data, keep.model=TRUE), simplify = FALSE)
#calculate mean prediction
matrix.of.predictions <- matrix(NA, ncol=no.iterations, nrow = dim(new.data)[1])
for (i in 1:no.iterations){
print(i)
tmp <- list.of.models[[i]]
matrix.of.predictions[,i] <- predict(tmp, newdata=new.data, type="response")
}
output.preds<- apply(matrix.of.predictions, 1, mean)
#add identifiers to predictions
preds <- as.data.frame(cbind(muni.no=new.data$muni.no, month.no=new.data$month.no,case=new.data$case,prediction=output.preds))
return(list(list.of.models,preds))
}
# Making predictions from a bagged models (Bagged Model[[2]] output) ----
baggedPredictions = function(list.of.models, new.data){
require(ROCR)
matrix.of.predictions <- matrix(NA, ncol=length(list.of.models), nrow = dim(new.data)[1])
for(i in 1:length(list.of.models)){
tmp <- list.of.models[[i]]
matrix.of.predictions[,i] <- predict(tmp, newdata=new.data, type="response")
}
#calculate mean value for each row
output.preds<- apply(matrix.of.predictions, 1, mean)
#calculate model AUC
preds <- ROCR::prediction(output.preds, new.data$case) #other projects have used dismo::evaluate instead. Not sure if is makes a difference.
#AUC to return
auc <- unlist(ROCR::performance(preds, "auc")@y.values)
#add identifiers to predictions
preds <- as.data.frame(cbind(muni.no=new.data$muni.no, month.no=new.data$month.no,case=new.data$case,prediction=output.preds))
return(list(auc,preds))
}
# Single Bagged Model with tryCatch----
baggingTryCatch<-function(form.x.y,training,new.data, keep.model=FALSE){
# modified JP's bagging function 12/1/17 RK
# form.x.y the formula for model to use
# training dataframe containing training data (presence and abs)
# new.data new data for logreg model to predict onto
perfectSeparation <- function(w) {
if(grepl("fitted probabilities numerically 0 or 1 occurred", #text to match
as.character(w))) {} #output warning message, counter NA
}
# returns predictions based on logreg model for new data and coefficients of model
#0. load packages
require(dplyr)
#1. Create subset of data with fixed number of pres and abs
training.pres <- dplyr::filter(training, case==1) #pull out just present points
training.abs <- dplyr::filter(training, case==0) #pull out just absence points
attempt <- 0 #attempt counter
repeat {
attempt <- attempt +1 #count attempt
training_positions.p <- sample(nrow(training.pres),size=10) #randomly choose 10 present point rows
training_positions.b <- sample(nrow(training.abs),size=100) #randomly choose 100 absence point rows
train_pos.p<-1:nrow(training.pres) %in% training_positions.p #presence
train_pos.b<-1:nrow(training.abs) %in% training_positions.b #background
#2. Build logreg model with subset of data
glm_fit<-tryCatch(glm(form.x.y,data=rbind(training.pres[train_pos.p,],training.abs[train_pos.b,]),family=binomial(logit)), warning=perfectSeparation) # if this returns a warning the predictions errors out b/c glm_fit is NULL
#2b. test to if perfect sep
if(is.list(glm_fit)==TRUE){
break
}
#escape for stupid amounts of attempts
if(attempt > 50){
break
}
}
#4. Use model to predict (0,1) on whole training data
if(is.list(glm_fit)==TRUE){
if(keep.model==TRUE){ #3. Return model too is keep.model is TRUE
return(glm_fit)
} else {
predictions <- predict(glm_fit,newdata=new.data,type="response")
return(predictions)
}
}
#If model fails after 100 attempts return just NAs
if(attempt>50){
predictions <- rep(NA, dim(new.data)[1])
return(predictions)
}
}
# Permutation Specific functions ----
# Permute Variable based on loop iteration of PermOneVar
permutedata=function(formula = glm.formula,trainingdata, i){
# glm.formula:
# training : training data with pres and abs
# cores : number of cores to use for parallel; default to 2
# no.iterations : number of low bias models to make; default to 100
#parse out variables from formula object
variables <- trimws(unlist(strsplit(as.character(formula)[3], "+", fixed = T)), which = "both")
variablesName <- c("full model", variables, "all permutated")
#if statments to permute data as needed ----
if(i==1){
#run full model
permuted.data <- trainingdata
}else if(i==length(variablesName)){
#permute all variables; using loop so can I can use same sampling method (apply statement coherced data into weird format)
# temp.data <- dplyr::select(traindata, variables) %>%
# dplyr::sample_frac()
# permuted.data <- cbind(case=traindata$case, tmp.data)
#bug: treating colnames as colnumber in fun but not when ran in console. :(
permuted.data <- trainingdata
for( j in 1:length(variables)){
vari <- variables[j]
permuted.data[,vari] <- sample(permuted.data[,vari],dim(permuted.data)[1],FALSE) #permute the col named included in vari (ie. variable.names)
}
} else {
#permute single variable
permuted.data <- trainingdata
permuted.data[,variablesName[i]] <- sample(permuted.data[,variablesName[i]],dim(permuted.data)[1],FALSE) #permute the col named included in vari (ie. variable.names)
}
return(permuted.data)
}
# PermOneVar to write after each permutation ----
PermOneVar=function(VarToPerm, formula = glm.formula, bag.fnc=baggingTryCatch, permute.fnc = permutedata, traindata = training,
cores=2, no.iterations= 100, perm=10){
# VarToPerm: number from 1 to length(variableNames)+2
# glm.formula: full formula for the model to use
# traindata : training data with pres and abs
# cores : number of cores to use for parallel; default to 2
# no.iterations : number of low bias models to make; default to 100
# bag.fnc : bagging(form.x.y,training,new.data); bagging function
# permute.fnc : permutedata(formula = glm.formula,trainingdata, i); function to permute single variable
require(dplyr)
require(doParallel)
require(ROCR)
f <- function(){
pb <- txtProgressBar(min=1, max=perm-1,style=3)
count <- 0
function(...) {
count <<- count + length(list(...)) - 1
setTxtProgressBar(pb,count)
Sys.sleep(0.01)
flush.console()
c(...)
}
}
cl <- makeCluster(cores)
registerDoParallel(cl)
results <- foreach(i = icount(perm), .combine = f()) %dopar% {
#permute data
permuted.data <- permute.fnc(formula = formula, trainingdata = traindata, i = VarToPerm)
#create model and prediction no.iterations times
matrix_of_predictions <- replicate(n = no.iterations, expr = bag.fnc(form.x.y = formula, training = permuted.data, new.data = traindata))
#calculate mean prediction
output.preds<- apply(matrix_of_predictions, 1, function(x) mean(x, na.rm=TRUE))
#prediction errors out if NA for output.preds so need to add alternative route for NA
if(anyNA(output.preds)==TRUE){
perm.auc <- NA
}else{
preds <- ROCR::prediction(output.preds, traindata$case) #other projects have used dismo::evaluate instead. Not sure if is makes a difference.
#AUC to return
perm.auc <- unlist(ROCR::performance(preds, "auc")@y.values)
}
}
stopCluster(cl)
#matrix of AUC to return
return(unlist(results))
}
SumPermOneVar = function(perm.auc, permutations, viz = TRUE, title = ""){
#count number of permutations used to make stats
no.failed <- apply(perm.auc, 2, function(x) sum(is.na(x)))
no.suc.perm <- permutations - no.failed
#calculate relative importance ----
perm.auc.mean <- apply(perm.auc,2,function(x) mean(x,na.rm=TRUE))
perm.auc.sd <- apply(perm.auc, 2, function(x) sd(x,na.rm=TRUE))
delta.auc <- perm.auc.mean[1] - perm.auc.mean[-c(1, length(perm.auc.mean))] #change in AUC from base model only for single variable permutation
rel.import <- delta.auc/max(delta.auc, na.rm = TRUE) # normalized relative change in AUC from base model only for single variable permutation
#Output for relative importance
relative.import <- as.data.frame(cbind(Variable=variables,varImp=rel.import))
#plot it for fun
if(viz==TRUE){barplot(rel.import, names.arg = variables, main= title)}
#Output for mean and sd of permutations for all permutations (non, single var, and all var)
mean.auc <- as.data.frame(cbind(Model=variablesName,meanAUC=perm.auc.mean, sdAUC=perm.auc.sd, perms=no.suc.perm))
#Output of AUC for each permutation
colnames(perm.auc) <- variablesName
#return training coefs and AUC for each iteration
#return(list(train.auc, Coefs))
return(list(relative.import, mean.auc,perm.auc))
}
# Min working script ----
#training.data <- readRDS("../../data_clean/TrainingData.rds") #load data
#define function for model
#glm.formula <- as.formula("case~ NDVI+NDVIScale+popLog10")
#Create 10 permuted datasets for each variable, fit model bagged 100 times, predict on full dataset, save AUC
#PermTestModel <- permOneVar(formula = glm.formula,traindata = training.data, cores=2, no.iterations = 5, perm = 3)
|
80570d3551306f360ec561aa67c53b940433724a | 51abd15fcf14ab1e9862f9e10f8c8862a6297ef1 | /plot1.r | f881a865cb9ba2763083236df5a2435177404cf5 | [] | no_license | zli00/ExData_Plotting1 | c59d03747e1f220f987cd3c1a1216c0e22400165 | c5ce894b0422fbfc34e05bc67c8ba303e442413d | refs/heads/master | 2021-01-21T05:20:43.774959 | 2015-03-08T02:45:09 | 2015-03-08T02:45:09 | 29,107,872 | 0 | 0 | null | 2015-01-11T22:05:30 | 2015-01-11T22:05:30 | null | UTF-8 | R | false | false | 322 | r | plot1.r | dat<-read.table("household_power_consumption.txt", sep=";", header=T)
newDat <- subset(dat, dat[,1] == "1/2/2007" | dat[,1] == "2/2/2007")
#plot1
hist(as.numeric(newDat$Global_active_power), col="red", xlab="Global Active Power (kilowatts)", main = "Global Active Power")
dev.copy(png, file="plot1.png")
dev.off()
|
d8981619414b7720731bb2f9400f3b89a7b24e26 | 7d5968837bec87fcc42bab82f82db8bfa169e7c7 | /man/scatterplot.CI.Rd | 13869b4555a6562b4873d12419b5bc37ab15799c | [] | no_license | liuguofang/figsci | ddadb01fae7c208b4ac3505eed5dc831d7de0743 | 076f7dd70711836f32f9c2118ad0db21ce182ea2 | refs/heads/master | 2021-06-04T19:23:34.065124 | 2020-02-12T04:22:11 | 2020-02-12T04:22:11 | 107,945,277 | 6 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,024 | rd | scatterplot.CI.Rd |
\name{scatterplot.CI}
\alias{scatterplot.CI}
\title{Draw a scatterplot with fitted line and/or confidence line. }
\usage{
scatterplot.CI(mydata, x, y, line.col = "black", confidence.line.col = "red", confidence.line = FALSE, ...)
}
\description{
Draw a scatterplot with fitted line and/or confidence line.
}
\arguments{
\item{mydata} {a data.frame.}
\item{x} {a string on x variable.}
\item{y} {a string on y variable.}
\item{line.col} {a color denotes the fitted line. The default is black color.}
\item{confidence.line.col} {a color denotes the confidence line. The default is red color. It is invalid if confidence is FALSE.}
\item{...} {further arguments to pass the function \code{\link{plot}}.}
}
\examples{
data(leaflife, package = "smatr")
scatterplot.CI(mydata = leaflife, x = "lma", y = "longev")
scatterplot.CI(mydata = subset(leaflife, soilp == "high"), x = "lma", y = "longev")
scatterplot.CI(mydata = subset(leaflife, soilp == "high"), x = "lma", y = "longev", confidence.line = T)
}
|
6b126b4ed2c1006773469c34fd1e9fc6e6dac02f | 6c321997b2237e3432ebc89866e47c5636e8ccde | /R/auc.R | 2812176b54ff3b28fd44252f18e94b6ac2456bc0 | [] | no_license | cran/coca | e37d4a524d58e47400158ac4cfea0ea10570038e | 2baeffda08df37be4aa3b0638f99e00869a49a37 | refs/heads/master | 2021-05-16T23:21:41.927083 | 2020-07-06T16:00:09 | 2020-07-06T16:00:09 | 250,513,558 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,107 | r | auc.R | #' Compute Area Under the Curve (AUC)
#'
#' This function allows to compute the area under the curve of the empirical
#' distribution function of a consensus matrix as described in Monti et al.
#' (2003), Section 3.3.1.
#'
#' @param consensusMatrix Consensus matrix, output of the
#' "coca::consensusCluster" function.
#' @return This function returns the area under the curve.
#' @author Alessandra Cabassi \email{alessandra.cabassi@mrc-bsu.cam.ac.uk}
#' @keywords internal
#'
computeAUC <- function(consensusMatrix) {
N <- dim(consensusMatrix)[1]
x <- sort(as.vector(consensusMatrix))
Z <- length(x)
empirical_cdf <- stats::ecdf(x)
auc <-
sum((x[seq_len(Z - 1) + 1] - x[seq_len(Z - 1)]) *
empirical_cdf(x[seq_len(Z - 1) + 1]))
}
#' Plot area under the curve
#'
#' Plot area under the curve for different values of K.
#' @param deltaAUC Vector of the difference between the area under the curve
#' between each value K of the number of clusters and K-1. For the smallest
#' value considered (usually two) this is simply the area under the curve for
#' that value of cluster number.
#' @param chosenK Chosen number of clusters. If specified, a vertical line is
#' plotted in correspondance of the indicated value. Default is NULL.
#' @param fileName name of the png file
#' @author Alessandra Cabassi \email{alessandra.cabassi@mrc-bsu.cam.ac.uk}
#' @keywords internal
#'
plotDeltaAUC <-
function(deltaAUC,
chosenK = NULL,
fileName = "deltaAUC.png") {
maxK <- length(deltaAUC) + 1
fileName <- paste(fileName, ".png", sep = "")
grDevices::png(fileName, width = 400, height = 400)
graphics::plot(2:maxK,
deltaAUC,
xlab = "Number of clusters",
ylab = "Relative change in area under the curve",
type = "o")
if (!is.null(chosenK))
graphics::abline(v = chosenK)
grDevices::dev.off()
}
#' Choose number of clusters based on AUC
#'
#' This function allows to choose the number of clusters in a dataset
#' based on the area under the curve of the empirical distribution
#' function of a consensus matrix, calculated for different (consecutive)
#' cluster numbers, as explained in the article by Monti et al. (2003),
#' Section 3.3.1.
#'
#' @param areaUnderTheCurve Vector of length maxK-1 containing the area
#' under the curve of the empirical distribution function of the
#' consensus matrices obtained with K varying from 2 to maxK.
#' @param savePNG Boolean. If TRUE, a plot of the area under the curve
#' for each value of K is saved as a png file. The file is saved in a
#' subdirectory of the working directory, called "delta-auc". Default is FALSE.
#' @param fileName If \code{savePNG} is TRUE, this is the name of the png file.
#' Can be used to specify the folder path too. Default is "deltaAUC". The ".png"
#' extension is automatically added to this string.
#' @return This function returns a list containing:
#' \item{deltaAUC}{a vector of
#' length maxK-1 where element i is the area under the curve for
#' K = i+1 minus the area under the curve for K = i (for i = 2 this
#' is simply the area under the curve for K = i)}
#' \item{K}{the lowest among the values of K that are chosen by the algorithm.}
#' @author Alessandra Cabassi \email{alessandra.cabassi@mrc-bsu.cam.ac.uk}
#' @examples
#' # Assuming that we want to choose among any value of K (number of clusters)
#' # between 2 and 10 and that the area under the curve is as follows:
#' areaUnderTheCurve <- c(0.05, 0.15, 0.4, 0.5, 0.55, 0.56, 0.57, 0.58, 0.59)
#'
#' # The optimal value of K can be chosen with:
#' K <- chooseKusingAUC(areaUnderTheCurve)$K
#' @references Monti, S., Tamayo, P., Mesirov, J. and Golub, T., 2003. Consensus
#' clustering: a resampling-based method for class discovery and visualization
#' of gene expression microarray data. Machine learning, 52(1-2), pp.91-118.
#' @export
#'
chooseKusingAUC <-
function(areaUnderTheCurve,
savePNG = FALSE,
fileName =
"deltaAUC.png") {
# Get value of maximum number of clusters considered
maxK <- length(areaUnderTheCurve) + 1
# Initialise vector of AUC[i]-AUC[i-1]
deltaAUC <- rep(NA, maxK - 1)
# For K=2, this cannot be computed so it is simply AUC for K = 2
deltaAUC[1] <- areaUnderTheCurve[1]
# Since the values in vector `areaUnderTheCurve` are not always
# monotonically increasing, we need to store at each step the maximum value
# encountered so far
maxAUC <- areaUnderTheCurve[1]
# Fill in vector deltaAUC according to Equation 7 in Monti et al. (2003)
for (i in 2:(maxK - 1)) {
deltaAUC[i] <- (areaUnderTheCurve[i] - maxAUC)/maxAUC
maxAUC <- max(areaUnderTheCurve[i], maxAUC)
}
# Choose the value K such that deltaAUC[K+1] - deltaAUC[K] is smallest (not
# its absolute value)
K <- max(which(deltaAUC > 0.025)) + 1
if (savePNG)
plotDeltaAUC(deltaAUC, K, fileName)
output <- list(deltaAUC = deltaAUC, K = K[1])
return(output)
}
|
b098eac1ebf0c8c2ee939c4cddd628f4e54b4fdd | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/compute.es/examples/r_to_es.Rd.R | a8d765f2f23798ca7a9f7f3c155ab54612e0f5ff | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 230 | r | r_to_es.Rd.R | library(compute.es)
### Name: res
### Title: Correlation coefficient (r) to Effect Size
### Aliases: res
### Keywords: arith
### ** Examples
# CALCULATE SEVERAL EFFECT SIZES BASED ON CORRELATION STATISTIC:
res(.3, n=30)
|
6b76c3e1d45827d95412af39618ca2f224babf40 | 043d5872c9be9e65b738f264b0fd5186061ec50c | /man/Chua.norm-methods.Rd | 828b098689cffa464efaea4eb9f47677482013a4 | [] | no_license | cran/NetPreProc | 19c7d9606236dc96c5481061b29978cf3c4177ce | 32b12f3fae585025664ef6cbd501e3d7fa5678df | refs/heads/master | 2022-09-23T08:26:18.988099 | 2022-09-19T10:06:10 | 2022-09-19T10:06:10 | 17,713,777 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,771 | rd | Chua.norm-methods.Rd | \name{Chua.norm-methods}
\docType{methods}
%\alias{Chua.norm-methods}
\alias{Chua.norm,graph-method}
\alias{Chua.norm,matrix-method}
\alias{Chua.norm}
\title{ Chua normalization }
\description{
Normalization of graphs according to Chua et al., 2007.
The normalized weigths between nodes are computed by taking into account their neighborhoods.
This normalization is meaningful in particular with interaction data.
More precisely, the normalized weigth \eqn{W_{ij}} between nodes \eqn{i} and \eqn{j} is computed by taking into account their neighborhods \eqn{N_i} and \eqn{N_j} :
\deqn{W_{ij} = \frac{2|N_i \cap N_j|}{|N_i \setminus N_j| + 2|N_i \cap N_j| + 1}\times \frac{2|N_i \cap N_j|}{|N_j \setminus N_i| + 2|N_i \cap N_j| + 1}}
where \eqn{N_k} is the set of the neighbors of gene \eqn{k} (\eqn{k} is included).
}
\usage{
Chua.norm(W)
%%\S4method{Binary.matrix.by.thresh}{"matrix"}(W, thresh=0.5)
}
\arguments{
\item{W}{
an object representing the graph to be normalized
}
}
\value{
The normalized adjacency matrix of the network
}
\section{Methods}{
\describe{
\item{\code{signature(W = "graph")}}{
an object of the virtual class graph (hence including objects of class \code{\link[graph:graphAM-class]{graphAM}} and \code{\link[graph:graphNEL-class]{graphNEL}} from the package \pkg{graph})
}
\item{\code{signature(W = "matrix")}}{
a matrix representing the adjacency matrix of the graph
}
}}
\examples{
\donttest{library(bionetdata);
data(Yeast.Biogrid.data);
W <- Chua.norm(Yeast.Biogrid.data);}
}
\references{
Chua, H., Sung, W., & Wong, L. An efficient strategy for extensive
integration of diverse biological data for protein function prediction. Bioinformatics, 23 , 3364--3373, 2007.
}
\keyword{methods}
\keyword{graph normalization}
|
88c89167a47efcd0d4a3f4e49347570d2902f902 | cbb8c2f78b16577cf7262c1e41525086d2d6facf | /Analysis_Mouse_Rebuttal.R | 268f3b92f8ab24d5d8eca5a6dddfa9cc4bc2ae40 | [] | no_license | gosianow/microarrays_edwin | f026386166e01f36120723f07982aed15f85eb24 | 72440bfb79c10e9251ab1345a9fb61df9fc81ad0 | refs/heads/master | 2021-01-20T11:48:22.611909 | 2015-12-10T17:48:48 | 2015-12-10T17:48:48 | 31,887,351 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 82,661 | r | Analysis_Mouse_Rebuttal.R | ###########################################################################
# Created 16 Oct 2015
# BioC 3.0
# DE analysis of Affymetrix Mouse Gene 2.0 ST arrays (pd.mogene.2.0.st)
# Additional replicates
# Update 27 Oct 2015
# Add pre versus after treatment analysis
###########################################################################
setwd("/home/Shared/data/array/microarrays_edwin")
path_plots <- "Analysis_Mouse_Rebuttal/Plots/"
path_results <- "Analysis_Mouse_Rebuttal/"
dir.create(path_plots, showWarnings = FALSE, recursive = TRUE)
dir.create(path_results, showWarnings = FALSE, recursive = TRUE)
###########################################################################
# create targets table with information about samples from Micro_array_sample_list.txt
###########################################################################
library(limma)
metadata <- read.table("metadata/Micro_array_sample_list.txt", sep = "\t")
targets <- data.frame(metadata , FileName = list.files("CEL/", pattern="IA201502" ,full.names = TRUE))
colnames(targets) <- c("Experiment", "SampleNr", "CellType", "FileName")
targets$ExperimentShort <- targets$Experiment
levels(targets$ExperimentShort) <- c("Bone marrow control"="control", "Kit control"="control", "Leukemia"="leukemia", "Leukemia after treatment" = "afterTreatment", "T cell control" = "control", "Thymocyte control" = "control")
targets$CellTypeShort <- targets$CellType
levels(targets$CellTypeShort) <- c("907" = "907", "907 - Post Dex" = "907", "B2M10" = "B2M10", "B2M2" = "B2M2", "B2M3" = "B2M3", "B2M3 Post dex" = "B2M3", "B3M3" = "B3M3", "B3M30" = "B3M30", "CD4 T cells spleen 1" = "CD4", "CD4 T cells spleen 2" = "CD4", "CD4 T cells spleen 3" = "CD4", "CD4+8+ DP Thymocytes 1" = "CD4+8+", "CD4+8+ DP Thymocytes 2" = "CD4+8+", "CD4+8+ DP Thymocytes 3" = "CD4+8+", "CD8 T cells spleen 1" = "CD8", "CD8 T cells spleen 2" = "CD8", "CD8 T cells spleen 3" = "CD8", "HeLa control" = "HeLa", "Whole bone marrow 1" = "wholeBoneMarrow", "Whole bone marrow 2" = "wholeBoneMarrow", "Whole bone marrow 3" = "wholeBoneMarrow")
targets$labels <- factor(paste(targets$ExperimentShort, targets$CellTypeShort, sep="_" ))
targets$groups <- targets$labels
levels(targets$groups)[grep(pattern = "leukemia", levels(targets$groups))] <- "leukemia"
levels(targets$groups)[grep(pattern = "afterTreatment", levels(targets$groups))] <- "afterTreatment"
targets$ctrlRep <- c(rep("", 12), rep(1:3, rep(4, 3)))
nlevels(targets$groups)
levels(targets$groups)
cbPalette <- c("#D55E00", "#F0E442","#56B4E9", "#009E73", "#0072B2","#CC79A7", "#999999")
pdf("Colors.pdf", width = 15)
barplot(rep(1, 7), col = cbPalette, names.arg = levels(targets$groups))
dev.off()
targets$colors <- cbPalette[targets$groups]
write.table(targets, file = file.path("metadata", "targets.xls"), quote = FALSE, sep = "\t", row.names = FALSE)
###########################################################################
# add samples from Micro_array_sample_list_rebuttal.xls
###########################################################################
targets_batch1 <- read.table(file.path("metadata", "targets.xls"), header = TRUE, sep = "\t", comment.char = "", as.is = TRUE)
targets_batch1$batch <- 1
targets_batch2 <- read.table(file.path("metadata", "Micro_array_sample_list_rebuttal.xls"), header = TRUE, sep = "\t", comment.char = "", as.is = TRUE)
targets_batch2$batch <- c(rep(2, 5), 1)
colors <- unique(targets_batch1[, c("groups", "colors")])
targets_batch2$colors <- colors$colors[match(targets_batch2$groups, colors$groups)]
targets <- rbind(targets_batch1, targets_batch2)
write.table(targets, file = file.path("metadata", "targets_all.xls"), quote = FALSE, sep = "\t", row.names = FALSE)
###########################################################################
# read in all targets
###########################################################################
targets_org <- targets <- read.table(file.path("metadata", "targets_all.xls"), header = TRUE, sep = "\t", comment.char = "", as.is = TRUE)
###########################################################################
#### import cel files
###########################################################################
# source("http://bioconductor.org/biocLite.R")
# biocLite("pd.mogene.2.0.st")
library(oligo)
library(pd.mogene.2.0.st)
ff <- as.character(targets$FileName)
x <- oligo::read.celfiles(filenames = ff) ## GeneFeatureSet
pdf(paste0(path_plots, "boxplot.pdf"))
par(mar = c(12, 4, 4, 2) + 0.1) # c(bottom, left, top, right), default = c(5, 4, 4, 2) + 0.1
boxplot(x, las = 2, col = targets$colors, names = targets$labels, las=2)
dev.off()
pdf(paste0(path_plots, "hist.pdf"))
par(mar = c(5, 4, 4, 2) + 0.1)
hist(x, col = targets$colors, lwd = 2)
legend("topright", legend = targets$labels, col = targets$colors, lty = 1, lwd = 2, cex = 0.8)
dev.off()
###########################################################################
### PLM normalization; create images of chips, NUSE and RLE plots
###########################################################################
fitplm <- oligo::fitProbeLevelModel(x)
pdf(paste0(path_plots,"NUSE_fitplm.pdf"))
par(mar = c(12, 4, 4, 2) + 0.1) # c(bottom, left, top, right), default = c(5, 4, 4, 2) + 0.1
oligo::NUSE(fitplm, col = targets$colors, names = targets$labels, las=2)
dev.off()
pdf(paste0(path_plots, "RLE_fitplm.pdf"))
par(mar = c(12, 4, 4, 2) + 0.1) # c(bottom, left, top, right), default = c(5, 4, 4, 2) + 0.1
oligo::RLE(fitplm, col = targets$colors, names = targets$labels, las=2)
dev.off()
###########################################################################
### Normalization with RMA
###########################################################################
eset_org <- eset <- oligo::rma(x) ## Is the expression in log2 scale? ## ExpressionSet
pdf(paste0(path_plots, "boxplot_norm.pdf"))
par(mar = c(12, 4, 4, 2) + 0.1) # c(bottom, left, top, right), default = c(5, 4, 4, 2) + 0.1
boxplot(eset, las = 2, col = targets$colors, names = targets$labels)
dev.off()
pdf(paste0(path_plots, "hist_norm.pdf"))
par(mar = c(5, 4, 4, 2) + 0.1)
hist(eset, col = targets$colors, lwd = 2)
legend("topright", legend = targets$labels, col = targets$colors, lty = 1, lwd = 2, cex = 0.8)
dev.off()
###########################################################################
### MDS plots
###########################################################################
library(limma)
########## All samples
eset <- eset_org
targets <- targets_org
labels <- targets$groups
pdf(paste0(path_plots, "MDS_all.pdf"), width = 5, height = 5)
mds <- plotMDS(eset, top=1000, col = targets$colors, labels = labels, cex = 1.2)
dev.off()
legend <- unique(targets[, c("groups", "colors")])
min <- min(mds$x, mds$y)
max <- max(mds$x, mds$y)
pdf(paste0(path_plots, "MDS_all_points.pdf"), width = 5, height = 5)
plot(mds$x, mds$y, pch = targets$batch, col = targets$colors, las = 1, cex.axis = 1, cex.lab = 1, xlab = "Leading logFC dim 1", ylab = "Leading logFC dim 2", cex = 1, xlim = c(min, max), ylim = c(min, max))
# text(mds$x, mds$y, labels = targets$CellTypeShort, pos = 3, offset = 0.3, cex = 0.3)
legend("topleft", legend = c(legend$groups, "batch1", "batch2"), pch = c(rep(16, nrow(legend)), 1, 2), col = c(legend$colors, 1, 1), cex = 0.8, bty = "n")
dev.off()
########## All samples with no hela
keep_samps <- targets_org$CellTypeShort != "HeLa"
eset <- eset_org[, keep_samps]
targets <- targets_org[keep_samps, ]
labels <- targets$groups
pdf(paste0(path_plots, "MDS_all_noHela.pdf"), width = 5, height = 5)
mds <- plotMDS(eset, top=1000, col = targets$colors, labels = labels, cex = 1.2)
dev.off()
legend <- unique(targets[, c("groups", "colors")])
min <- min(mds$x, mds$y)
max <- max(mds$x, mds$y)
pdf(paste0(path_plots, "MDS_all_noHela_points.pdf"), width = 5, height = 5)
plot(mds$x, mds$y, pch = targets$batch, col = targets$colors, las = 1, cex.axis = 1, cex.lab = 1, xlab = "Leading logFC dim 1", ylab = "Leading logFC dim 2", cex = 1, xlim = c(min, max), ylim = c(min, max))
# text(mds$x, mds$y, labels = targets$CellTypeShort, pos = 3, offset = 0.3, cex = 0.3)
legend("bottomleft", legend = c(legend$groups, "batch1", "batch2"), pch = c(rep(16, nrow(legend)), 1, 2), col = c(legend$colors, 1, 1), cex = 0.8, bty = "n")
dev.off()
### zoom on leukemia and treatment samples
keep_samps <- targets$ExperimentShort != "control"
eset <- eset[, keep_samps]
targets <- targets[keep_samps, ]
legend <- unique(targets[, c("groups", "colors")])
min <- min(mds$x[keep_samps], mds$y[keep_samps])
max <- max(mds$x[keep_samps], mds$y[keep_samps])
pdf(paste0(path_plots, "MDS_all_noHela_points_zoom.pdf"), width = 5, height = 5)
plot(mds$x[keep_samps], mds$y[keep_samps], pch = targets$batch, col = targets$colors, las = 1, cex.axis = 1, cex.lab = 1, xlab = "Leading logFC dim 1", ylab = "Leading logFC dim 2", cex = 1, xlim = c(min, max), ylim = c(min, max))
text(mds$x[keep_samps], mds$y[keep_samps], labels = targets$CellTypeShort, pos = 3, offset = 0.4, cex = 0.5, col = targets$colors)
legend("bottomleft", legend = c(legend$groups, "batch1", "batch2"), pch = c(rep(16, nrow(legend)), 1, 2), col = c(legend$colors, 1, 1), cex = 0.8, bty = "n")
dev.off()
########## Only controls
keep_samps <- targets_org$CellTypeShort != "HeLa" & targets_org$ExperimentShort == "control"
eset <- eset_org[, keep_samps]
targets <- targets_org[keep_samps, ]
labels <- targets$groups
pdf(paste0(path_plots, "MDS_controls.pdf"), width = 5, height = 5)
mds <- plotMDS(eset, top=1000, col = targets$colors, labels = labels, cex = 1.2)
dev.off()
legend <- unique(targets[, c("groups", "colors")])
min <- min(mds$x, mds$y)
max <- max(mds$x, mds$y)
pdf(paste0(path_plots, "MDS_controls_points.pdf"), width = 5, height = 5)
plot(mds$x, mds$y, pch = targets$batch, col = targets$colors, las = 1, cex.axis = 1, cex.lab = 1, xlab = "Leading logFC dim 1", ylab = "Leading logFC dim 2", cex = 1, xlim = c(min, max), ylim = c(min, max))
# text(mds$x, mds$y, labels = targets$CellTypeShort, pos = 3, offset = 0.3, cex = 0.3)
legend("topright", legend = c(legend$groups), pch = c(rep(16, nrow(legend))), col = c(legend$colors), cex = 0.8, bty = "n")
dev.off()
########## Pre and after treatment
keep_samps <- targets_org$CellTypeShort != "HeLa" & targets_org$ExperimentShort != "control"
eset <- eset_org[, keep_samps]
targets <- targets_org[keep_samps, ]
labels <- targets$groups
pdf(paste0(path_plots, "MDS_treatment.pdf"), width = 5, height = 5)
mds <- plotMDS(eset, top=1000, col = targets$colors, labels = labels, cex = 1.2)
dev.off()
legend <- unique(targets[, c("groups", "colors")])
min <- min(mds$x, mds$y)
max <- max(mds$x, mds$y)
pdf(paste0(path_plots, "MDS_treatment_points.pdf"), width = 5, height = 5)
plot(mds$x, mds$y, pch = targets$batch, col = targets$colors, las = 1, cex.axis = 1, cex.lab = 1, xlab = "Leading logFC dim 1", ylab = "Leading logFC dim 2", cex = 1, xlim = c(min, max), ylim = c(min, max))
text(mds$x, mds$y, labels = targets$CellTypeShort, pos = 3, offset = 0.4, cex = 0.5, col = targets$colors)
legend("bottomleft", legend = c(legend$groups, "batch1", "batch2"), pch = c(rep(16, nrow(legend)), 1, 2), col = c(legend$colors, 1, 1), cex = 0.8, bty = "n")
dev.off()
###########################################################################
####### Do NOT keep the HeLa sample for the rest of the analysis
###########################################################################
keepSAMPS <- targets_org$labels != "control_HeLa"
eset_org <- eset <- eset_org[, keepSAMPS]
targets_org <- targets <- targets_org[keepSAMPS, ]
save(targets_org, file = paste0(path_results, "targets_org.Rdata"))
###########################################################################
####### NetAffx Annotation with getNetAffx()
###########################################################################
infoNetAffx <- pData(getNetAffx(eset, "transcript"))
head(infoNetAffx)
# apply(infoNetAffx, 2, function(cat){sum(is.na(cat))})
#
# all(infoNetAffx$transcriptclusterid == infoNetAffx$probesetid)
#
# sum(infoNetAffx$totalprobes)
#
# ### check how many probesets have no annotation in fData and in infoNetAffx
# table(is.na(fData(eset)[,"ENTREZID"]))
#
# table(is.na(fData(eset)[,"ENTREZID"]) & is.na(infoNetAffx$geneassignment))
###########################################################################
####### DO NOT RUN! Get annotation from NetAffx files from Affy website
### http://www.affymetrix.com/estore/browse/level_three_category_and_children.jsp?category=35868&categoryIdClicked=35868&expand=true&parent=35617
###########################################################################
# source("http://bioconductor.org/biocLite.R")
# biocLite("AffyCompatible")
### Download the data
library(AffyCompatible)
# password <- AffyCompatible:::acpassword
rsrc <- NetAffxResource(user = "gosia.nowicka@uzh.ch", password = "mockIP27", directory = "NetAffx")
availableArrays <- names(rsrc)
head(availableArrays)
availableArrays[grep("Mo", availableArrays)]
affxDescription(rsrc[["MoGene-2_0-st-v1"]])
annos <- rsrc[["MoGene-2_0-st-v1"]]
annos
anno <- affxAnnotation(annos)[[4]]
anno
fl <- readAnnotation(rsrc, annotation=anno, content=FALSE)
### Check what is in there
fl <- "NetAffx/MoGene-2_0-st-v1.na34.mm10.transcript.csv.zip"
conn <- unz(fl, "MoGene-2_0-st-v1.na34.mm10.transcript.csv")
# readLines(conn, n=20)
infoNetAffx2 <- read.table(conn, header=TRUE, sep=",", as.is = TRUE)
rownames(infoNetAffx2) <- infoNetAffx2$transcript_cluster_id
dim(infoNetAffx2)
apply(infoNetAffx2, 2, function(cat){sum(cat == "---")})
#### compare infoNetAffx2 with infoNetAffx
# all(infoNetAffx2$transcript_cluster_id == infoNetAffx2$probeset_id)
# colnames(infoNetAffx) <- colnames(infoNetAffx2)
#
# probesetID <- "17457722" ## probe set with no ENTREZID
# infoNetAffx2[probesetID,]
# infoNetAffx[probesetID,]
#
# infoNetAffx2[probesetID,] == infoNetAffx[probesetID,]
#
# infoNetAffx2[probesetID, "mrna_assignment"]
# infoNetAffx[probesetID, "mrna_assignment"]
# probesetID <- "17457722" ## probe set with no ENTREZID
# infoNetAffx2[probesetID, "gene_assignment"]
# geneAssi <- strsplit(infoNetAffx2$gene_assignment, " /// ")
###########################################################################
####### remove control probes == keep main probes
###########################################################################
# source("http://bioconductor.org/biocLite.R")
# biocLite("affycoretools")
# library(affycoretools)
# eset_main <- affycoretools::getMainProbes(eset) ### gives different results
table(infoNetAffx$category, useNA = "always")
all(featureNames(eset) == rownames(infoNetAffx))
keepMAIN <- infoNetAffx$category == "main"
eset_main <- eset[keepMAIN, ]
###########################################################################
####### Keep probes from chr1-chr19, Y, X
###########################################################################
table(infoNetAffx$seqname, useNA = "always")
keepCHR <- featureNames(eset_main) %in% rownames(infoNetAffx)[which(infoNetAffx$seqname %in% paste0("chr", c(1:19, "Y", "X")), useNames = TRUE)]
table(keepCHR)
eset_main <- eset_main[keepCHR, ]
###########################################################################
####### DO NOT USE THIS ONE. Annotation from mogene20sttranscriptcluster - has many entrez IDs missing
###########################################################################
# source("http://bioconductor.org/biocLite.R")
# biocLite("mogene20sttranscriptcluster.db")
expr <- data.frame(exprs(eset))
library(mogene20sttranscriptcluster.db)
### Display all mappings
mogene20sttranscriptcluster()
# I way
annot <- data.frame(SYMBOL=sapply(contents(mogene20sttranscriptclusterSYMBOL), paste, collapse=" /// "), ENTREZID=sapply(contents(mogene20sttranscriptclusterENTREZID), paste, collapse=" /// "), stringsAsFactors = FALSE)
colnames(annot) <- c("GeneSymbol_mogene20", "EntrezGeneID_mogene20")
annot[annot == "NA"] <- "---"
annot_mergeogene20 <- annot
annot_mergeogene20 <- annot_mergeogene20[featureNames(eset_main), ]
# # II way
# probes.ALL=row.names(expr)
# SYMBOL = unlist(mget(probes.ALL, mogene20sttranscriptclusterSYMBOL))
# ENTREZID = unlist(mget(probes.ALL, mogene20sttranscriptclusterENTREZID))
#
#
# ### check if it returns always one values - YES
# mg <- mget(probes.ALL, mogene20sttranscriptclusterENTREZID)
# table(sapply(mg, length))
# # IV way
# probes.ALL=row.names(expr)
# SYMBOLb = sapply(mget(probes.ALL, mogene20sttranscriptclusterSYMBOL), paste, collapse=", ")
# ENTREZIDb = sapply(mget(probes.ALL, mogene20sttranscriptclusterENTREZID), paste, collapse=", ")
#
# all(SYMBOL == SYMBOLb)
# # III way
# library(annotate)
# probes.ALL <- featureNames(eset)
# SYMBOL <- getSYMBOL(probes.ALL,"mogene20sttranscriptcluster.db")
# annot <- data.frame(SYMBOL = SYMBOL, ENTREZID = ENTREZID , stringsAsFactors = FALSE)
#
# fData(eset) <- annot
#
#
# table(is.na(annot$ENTREZID))
# table(is.na(annot$SYMBOL))
#
#
# eset_org <- eset
###########################################################################
####### Get annotation from formated files from Affy website (NetAffx Analysis Center)
### http://www.affymetrix.com/analysis/index.affx
###########################################################################
library(limma)
###### files that are formated for easy load
annof <- list.files("NetAffx", pattern = ".tsv", full.names = TRUE)
annof
### does not work
# anno_list <- read.table(annof[2], header = TRUE, sep = "\t", as.is = TRUE)
############## use public_database_references
# allLines <- readLines(annof[3], n=-1)
#
# pdr <- data.frame(strsplit2(allLines, "\t"), stringsAsFactors = FALSE)
# colnames(pdr) <- gsub(pattern = " ", replacement = "" ,pdr[1,])
# pdr <- pdr[-1,]
# rownames(pdr) <- pdr$TranscriptClusterID
#
# head(pdr)
# dim(pdr)
#
# colnames(pdr)
#
# table(pdr$EntrezGeneID == "---")
# table(pdr$GeneSymbol == "---")
# table(pdr$TranscriptID == "---")
# table(pdr$GOID == "---")
#
# table(pdr[ featureNames(eset_main) ,"GOID"]== "---")
# table(pdr[ featureNames(eset_main) ,"TranscriptID"]== "---")
# table(pdr[ featureNames(eset_main) ,"GeneSymbol"] == "---")
#
# probesetID <- "17299972" ## probe set with no ENTREZID
#
# pdr[probesetID,]
#
# pdr[probesetID, "GeneSymbol"]
#
# infoNetAffx2[probesetID,]
############## use gene_list
allLines <- readLines(annof[grepl("gene_list", annof)], n=-1)
gl <- data.frame(strsplit2(allLines, "\t"), stringsAsFactors = FALSE)
colnames(gl) <- gsub(pattern = " ", replacement = "" ,gl[1,])
gl <- gl[-1,]
rownames(gl) <- gl$TranscriptClusterID
colnames(gl)
# ### check for how many probe sets there is GO
# head(gl$GODescription)
# table(gl$GODescription == "---")
#
#
# # dim(gl)
#
# table(gl$EntrezGeneID == "---")
# table(gl$GeneSymbol == "---")
# table(gl$GeneTitle == "---")
#
#
# table(gl[ featureNames(eset_main) ,"GeneSymbol"] == "---")
# table(gl[ featureNames(eset_main) ,"GeneTitle"] == "---")
#
#
# ### list of probe sets with no annotation in the end
# noAnnot <- featureNames(eset_main)[gl[ featureNames(eset_main) ,"GeneSymbol"] == "---"]
#
# # probesetID <- "17422859" ## probe with ENTREZID: Tnfrsf4 22163
# # infoNetAffx2[probesetID, 1:9]
# # gl[probesetID,]
annot <- gl[ featureNames(eset_main) ,c("GeneSymbol", "EntrezGeneID", "GeneTitle")]
# ### compare annot with annot_mergeogene20 - weird thing for some probe sets the info is different... But what is in annot agrees with infoNetAffx2.
#
# table(annot_mergeogene20$GeneSymbol_mogene20 == "---")
# table(annot$GeneSymbol == "---")
#
# table(annot_mergeogene20$EntrezGeneID_mogene20 == "---")
# table(annot$EntrezGeneID == "---")
#
# head(annot_mergeogene20)
# head(annot)
#
# infoNetAffx2["17210883", "gene_assignment"]
# infoNetAffx["17210883", "geneassignment"]
#
#
# infoNetAffx2["17210869", "gene_assignment"]
# infoNetAffx["17210869", "geneassignment"]
#
#
# infoNetAffx2["17210883", "mrna_assignment"]
# infoNetAffx["17210883", "mrnaassignment"]
#
#
# probeID <- "17532811" # Foxp3
#
# annot_mergeogene20[probeID, ]
# annot[probeID, ]
# annot_merge[probeID, ]
###########################################################################
####### Get ENSEMBL annotation using biomaRt
###########################################################################
library(biomaRt)
mart <- useMart("ensembl")
listDatasets(mart)
mart <- useMart("ensembl", dataset="mmusculus_gene_ensembl")
attr <- listAttributes(mart)
attr[grep("affy", attr$name),]
# listFilters(mart)
genes <- getBM(attributes = c("ensembl_gene_id","external_gene_name", "description","affy_mogene_2_1_st_v1"), filters="affy_mogene_2_1_st_v1", values=featureNames(eset_main), mart=mart)
dim(genes)
head(genes)
### clean the description
genes$description <- strsplit2(genes$description, " \\[Source")[, 1]
# ### Do some checks
# ### some features have multiple ensembl annotation
# length(unique(genes$affy_mogene_2_1_st_v1))
#
# probesetID <- "17457722" ## probe set with no ENTREZID
# genes[genes$affy_mogene_2_1_st_v1 == probesetID, ]
# gl[probesetID,]
#
# probesetID <- "17422859" ## probe with ENTREZID: Tnfrsf4 22163
# genes[genes$affy_mogene_2_1_st_v1 == probesetID, ]
# gl[probesetID,]
#
#
# ### check what are the extra annotations that I get with Ensembl
# noAnnotMart <- genes[genes$affy_mogene_2_1_st_v1 %in% noAnnot, ]
# head(noAnnotMart)
# ## most of them are the predicted genes
# table(grepl("predicted", noAnnotMart$description))
# head(noAnnotMart[!grepl("predicted", noAnnotMart$description), ])
# ## for predicted genes the gene symbol starts with "Gm"
# noAnnotMart[grepl("predicted", noAnnotMart$description), "external_gene_name" ]
### Merge the info about multiple genes into one string
library(plyr)
genes_merge <- plyr::ddply(genes, "affy_mogene_2_1_st_v1", summarize, GeneSymbol_Ensembl = paste0(external_gene_name, collapse = " /// "), GeneTitle_Ensembl = paste0(description, collapse = " /// "), EnsemblGeneID = paste0(ensembl_gene_id, collapse = " /// "))
h(genes_merge)
# ### Do some checks
# probesetID <- "17422859" ## probe with ENTREZID: Tnfrsf4 22163
# genes_merge[genes_merge$affy_mogene_2_1_st_v1 == probesetID, ]
# dim(annot)
#
# dim(genes_merge)
annot_merge <- merge(annot, genes_merge, by.x = 0, by.y = "affy_mogene_2_1_st_v1", all.x = TRUE, sort = FALSE)
colnames(annot_merge)[1] <- "ProbesetID"
rownames(annot_merge) <- annot_merge[,"ProbesetID"]
annot_merge[is.na(annot_merge)] <- "---"
# ### some checks
# table(annot_merge$GeneSymbol == annot_merge$GeneSymbol_Ensembl)
#
# table(annot_merge$GeneSymbol == "---", !annot_merge$GeneSymbol_Ensembl == "---")
#
# head(annot_merge[annot_merge$GeneSymbol == "---" & !annot_merge$GeneSymbol_Ensembl == "---", ])
#
# extraAnnot <- !grepl("Gm",annot_merge[, "GeneSymbol_Ensembl"]) & annot_merge$GeneSymbol == "---" & !annot_merge$GeneSymbol_Ensembl == "---"
#
# table(extraAnnot)
#
# annot_merge[extraAnnot, c("GeneSymbol_Ensembl", "GeneTitle_Ensembl" )]
all(annot_merge$ProbesetID == featureNames(eset_main))
fData(eset_main) <- annot_merge[featureNames(eset_main), ]
###########################################################################
####### Get probe info - probe 2 transcript cluster match
###########################################################################
### get probe 2 transcript match
probeInfo <- oligo::getProbeInfo(x, field = c('fid', 'fsetid', 'level', 'type', 'transcript_cluster_id'), probeType = "pm", target='core')
head(probeInfo)
table(probeInfo$type, useNA = "always")
setequal(featureNames(eset), unique(probeInfo$transcript_cluster_id))
###########################################################################
### Get GC content per probe
###########################################################################
# probe with higher GC content will have higher background
# pms <- oligo::pm(x, target='core')
pmSeq <- oligo::pmSequence(x, target='core')
library(Biostrings)
gcCont <- letterFrequency(pmSeq, letters='CG')[,1]
table(gcCont)
probeInfo$gcCont <- gcCont
###########################################################################
####### Filtering probes with low expression
###########################################################################
#### using background information from antigenomic probesets
# library(genefilter)
#
# tblNA <- table(infoNetAffx$category, useNA = "always")
#
# antigm <- infoNetAffx[infoNetAffx$category == "control->bgp->antigenomic", "probesetid"]
#
# bgExpr <- exprs(eset)[as.character(antigm), ]
#
#
# # bkg <- apply(bgExpr, 2, quantile, probs=0.5)
# # minval <- max(bkg)
# # minval
#
#
# bkg <- apply(bgExpr, 2, mean)
# # bkg <- rowMeans( bgExpr )
#
# minval <- mean(bkg)
# minval
#
# keep <- genefilter(eset_main, filterfun(kOverA(3, minval)))
# table(keep)
#
# eset_main <- eset_main[keep,]
#################### based on GC content
### Get the background expression levels for different GC ammount
antigm <- infoNetAffx[infoNetAffx$category == "control->bgp->antigenomic", "probesetid"]
bgExpr <- exprs(eset)[as.character(antigm), ]
bgExpr
bgProbeInfo <- subset(probeInfo, probeInfo$type == "control->bgp->antigenomic")
head(bgProbeInfo)
### see how many probes are for each GC content
table(bgProbeInfo$gcCont)
library(plyr)
library(ggplot2)
library(reshape2)
bgTransInfo <- ddply(bgProbeInfo, "transcript_cluster_id", summarize, gcCont=mean(gcCont))
bgdf <- data.frame(bgTransInfo, bgExpr)
bgdf.m <- melt(bgdf, id.vars = c("transcript_cluster_id", "gcCont"), variable.name = "Samples", value.name = "Expression")
head(bgdf.m)
bgdf.m$gcCont <- factor(bgdf.m$gcCont)
ggp.bg <- ggplot(data = bgdf.m, aes(x = gcCont, y = Expression)) +
geom_boxplot(colour = "lightcoral") +
theme_bw()
pdf(paste0(path_plots, "gc_boxplot.pdf"))
print(ggp.bg)
dev.off()
expr <- exprs(eset_main)
### Get the GC content for all the probe sets
transInfo <- ddply(probeInfo, "transcript_cluster_id", summarize, gcCont=mean(gcCont))
rownames(transInfo) <- transInfo$transcript_cluster_id
transInfo <- transInfo[rownames(expr), ]
transInfo$gcCont <- round(transInfo$gcCont)
### see what is the average GC content for main probe sets
table(transInfo$gcCont)
df <- data.frame(transInfo, expr)
df.m <- melt(df, id.vars = c("transcript_cluster_id", "gcCont"), variable.name = "Samples", value.name = "Expression")
head(df.m)
df.m$Type <- "Main"
bgdf.m$Type <- "BGP"
df.all <- rbind(df.m, bgdf.m)
df.all$gcCont <- factor(df.all$gcCont, levels = 3:25)
ggp <- ggplot(data = df.all, aes(x = gcCont, y = Expression, fill = Type)) +
geom_boxplot() +
theme_bw() +
theme(legend.position="top")
pdf(paste0(path_plots, "gc_boxplot_main_and_bgp.pdf"))
print(ggp)
dev.off()
################### set the threshold for each probe set
library(matrixStats)
# ls("package:matrixStats")
bgTransInfo$Q095Expr <- rowQuantiles(bgExpr, probs = 0.95)
bgTransInfo
# pdf(paste0(path_plots, "gc.pdf"))
# plot(bgTransInfo$gcCont, bgTransInfo$MedianExpr, type = "p", xlab = "GC content", ylab = "Median log2 expression", pch = 16, col = "lightcoral", cex = 2)
# dev.off()
transInfo$minExpr <- factor(transInfo$gcCont, levels = bgTransInfo$gcCont)
levels(transInfo$minExpr) <- bgTransInfo$Q095Expr
transInfo$minExpr <- as.numeric(as.character(transInfo$minExpr))
head(transInfo)
save(transInfo, file = paste0(path_results, "transInfo.Rdata"))
#### Filtering itself
all(rownames(expr) == transInfo$transcript_cluster_id)
keepEXPR <- sapply(1:nrow(expr), function(tr){ sum(expr[tr, ] > transInfo$minExpr[tr]) >= 3 } )
table(keepEXPR)
eset_main <- eset_main[keepEXPR, ]
eset_main_org <- eset_main
save(eset_main_org, file = paste0(path_results, "eset_main_org.Rdata"))
###########################################################################
##### Multiple plot function
###########################################################################
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
###########################################################################
#### Comparison 1: leukemia VS. controls
#### fitting model for all data
###########################################################################
library(oligo)
library(pd.mogene.2.0.st)
library(limma)
load(paste0(path_results, "eset_main_org.Rdata"))
load(paste0(path_results, "targets_org.Rdata"))
targets <- targets_org
eset_main <- eset_main_org
### keep only leukemia and control CD4+, CD4+CD8+ and CD8+ and bone marrow samples
samples2keep <- grepl("leukemia|control", targets$labels)
targets <- targets[samples2keep,]
eset_main <- eset_main[, samples2keep]
### sort samples by groups
ord <- order(targets$groups)
targets <- targets[ord, ]
eset_main <- eset_main[ ,ord]
# all(sampleNames(eset_main) == strsplit2(targets$FileName, "//")[,2])
#### design & analysis
treatments <- data.frame(Treatment = as.character(targets$groups))
treatments
design <- model.matrix(~ 0 + Treatment, data=treatments)
rownames(design) <- targets$labels
design
fit <- lmFit(eset_main, design)
contrasts <- cbind(CtrlCD4 = c(-1, 0, 0, 0, 1), CtrlCD4CD8 = c(0, -1, 0, 0, 1), CtrlCD8 = c(0, 0, -1, 0, 1), CtrlBM = c(0, 0, 0, -1, 1)) # treatment - control
contrasts
fit2 <- contrasts.fit(fit, contrasts)
fit2 <- eBayes(fit2, trend = TRUE)
pdf(paste0(path_plots, "Comp1_plotSA_trend.pdf"))
plotSA(fit2)
dev.off()
## with the FC cutoff
results <- decideTests(fit2, method="separate", adjust.method="BH", p.value=0.05, lfc=1)
summary(results)
colours <- unique(targets[targets$groups != "leukemia", "colors"])
pdf(paste0(path_plots, "Comp1_vennDiagram.pdf"))
vennDiagram(results,include=c("up", "down"), circle.col=colours, counts.col=c("gold", "darkblue"))
# vennDiagram(results,include="both", circle.col=colours, counts.col=colours)
# vennDiagram(results,include="up", circle.col=colours, counts.col=colours)
# vennDiagram(results,include="down", circle.col=colours, counts.col=colours)
dev.off()
### save all results with nice order
coefs <- c("CtrlCD4", "CtrlCD4CD8", "CtrlCD8", "CtrlBM")
# resExpr <- round(exprs(eset_main), 2)
# colnames(resExpr) <- paste0(treatments$Treatment, "_", colnames(resExpr))
resExpr <- round(exprs(eset_main_org), 2)
colnames(resExpr) <- paste0(targets_org$labels, "_", colnames(resExpr))
resExpr <- resExpr[, order(colnames(resExpr))]
resCoeff <- fit2$coefficients
colnames(resCoeff) <- paste0(colnames(resCoeff), "_coeffs")
resT <- fit2$t
colnames(resT) <- paste0(colnames(resT), "_t")
resPValue <- fit2$p.value
colnames(resPValue) <- paste0(colnames(resPValue), "_PValues")
resPValueAdj <- apply(fit2$p.value, 2, p.adjust, method = "BH")
colnames(resPValueAdj) <- paste0(colnames(resPValueAdj), "_AdjPValues")
resGenes <- fit2$genes
resRes <- results
colnames(resRes) <- paste0(colnames(resRes), "_Results")
stats <- c("coeffs", "t", "PValues", "AdjPValues", "Results")
colOrder <- paste(rep(coefs, each = length(stats)), rep(stats, length(coefs)), sep = "_")
resDE <- data.frame(resCoeff, resT, resPValue, resPValueAdj, resRes)[, colOrder]
resAll <- cbind(resGenes, resDE, resExpr)
write.table(resAll, file = paste0(path_results, "Comp1_DE_results_All.xls"), quote = FALSE, sep = "\t", row.names = FALSE)
### plot MA
pdf(paste0(path_plots, "Comp1_plotMA.pdf"))
for(i in 1:length(coefs)){
coef <- coefs[i]
limma::plotMA(fit2, coef = coef, status = results[, coef], values = c(-1, 0, 1), col = c("red", "black", "green"), cex = c(0.7, 0.3, 0.7), main = coef)
abline(0,0,col="blue")
}
dev.off()
### volcano plots
library(ggplot2)
coefs <- c("CtrlCD4", "CtrlCD4CD8", "CtrlCD8", "CtrlBM")
gg1 <- list()
for(i in 1:length(coefs)){
coef <- coefs[i]
table <- topTable(fit2, coef=coef, n=Inf)
table$threshold = as.factor(table$adj.P.Val < 0.05 & abs(table$logFC) > 1)
gg1[[i]] <- ggplot(data=table, aes(x=logFC, y=-log10(P.Value), colour=threshold)) +
geom_point(alpha=0.4, size=1.75) + theme_bw() +ggtitle(coef) +
theme(legend.position = "none") +
xlab("log2 fold change") + ylab("-log10 p-value")
}
pdf(paste0(path_plots, "Comp1_volcanoplot.pdf"))
print(multiplot(plotlist = gg1, cols=2))
dev.off()
### histograms of p-values and adjusted p-values
colours <- unique(targets[targets$groups != "leukemia", "colors"])
pdf(paste0(path_plots, "Comp1_hist_pvs.pdf"))
for(i in 1:length(coefs)){
coef <- coefs[i]
table <- topTable(fit2, coef=coef, n=Inf)
hist(table$P.Value, breaks = 100, main = coef, xlab = "P-values", col = colours[i])
#hist(table$adj.P.Val, breaks = 100, main = coef, xlab = "Adjusted p-values")
}
dev.off()
# ### plot expression of top sign. genes/probesets
# library(ggplot2)
# library(reshape2)
#
# topn <- 20
# expr <- exprs(eset_main)
# xs <- 1:ncol(expr)
#
# for(i in 1:length(coefs)){
# # i = 1
# coef <- coefs[i]
# print(coef)
#
# tt <- topTable(fit2, coef=coef, n=Inf, p.value=0.05, lfc=1)
# # write.table(tt, paste0("Comp1_topTable_",coef,".xls"), quote = FALSE, sep = "\t", row.names = FALSE)
#
# ### in the report display only first gene symbol
# GeneSymbol <- strsplit2(head(tt[,"GeneSymbol"], topn), " /// ")[,1]
# GeneTitle <- paste0(substr(strsplit2(head(tt[,"GeneTitle"], topn), " /// ")[,1], 1, 30))
#
# print(data.frame(GeneSymbol = GeneSymbol, GeneTitle = GeneTitle , head(tt[, c("logFC", "AveExpr", "P.Value", "adj.P.Val")], topn)))
#
# topp <- rownames(tt)[1:topn]
#
# df <- data.frame(Gene = topp, expr[topp,])
# df.m <- reshape2::melt(df, id.vars = "Gene", value.name = "Expression", variable.name = "Sample")
# ### keep order of genes as in tt
# df.m$Gene <- factor(df.m$Gene, levels = topp)
# ### add Entrez ID to the facet labels
# lab.fct <- paste0(topp, "\n", strsplit2(tt[topp, "GeneSymbol"], " /// ")[,1])
# levels(df.m$Gene) <- lab.fct
#
# ggp <- ggplot(df.m, aes(x = Sample, y = Expression)) +
# theme_bw() +
# theme(axis.text.x = element_text(angle = 80, hjust = 1, size = 10), plot.title = element_text(size = 16), strip.text.x = element_text(size = 10)) +
# scale_x_discrete(labels=targets$groups) +
# labs(title = coef, y = "Log2 expression") +
# geom_bar(stat = "identity", colour = targets$colors, fill = targets$colors) +
# facet_wrap(~ Gene, scales="free_y", ncol=4)
#
# pdf(paste0(path_plots, "Comp1_topExpressionBarPlot_",coef,".pdf"), 11, 11)
# print(ggp)
# dev.off()
#
# }
###########################################################################
#### Gene set enrichment analysis with C5 - GO genes sets
###########################################################################
# gene sets from MSigDB with ENTREZ IDs
load("MSigDB_v4_0/mouse_c5_v4.rdata")
mysets <- Mm.c5
length(mysets)
### keep the sets of interest
intrset <- read.table("Gene_Sets/Interesting_gene_sets_C5.txt", header = FALSE, sep = ",")[, 1]
intrset
intrset <- gsub("-", " ", intrset)
intrset <- gsub(" ", "_", intrset)
intrset <- toupper(intrset)
length(intrset)
sum(names(mysets) %in% intrset)
mysets <- mysets[intrset]
# table(sapply(mysets, length))
### Create an Index for camera
annot <- fData(eset_main)
# table(annot$EntrezGeneID == "---")
### Too slow
# EntrezGeneID <- strsplit(annot$EntrezGeneID, " /// ")
# Index <- lapply(mysets, function(ms){sapply(EntrezGeneID, function(eg){any(eg %in% ms)})})
EntrezGeneID <- strsplit2(annot$EntrezGeneID, " /// ")
nrow = nrow(EntrezGeneID)
ncol = ncol(EntrezGeneID)
Index <- lapply(mysets, function(ms){
eg <- matrix(EntrezGeneID %in% ms, nrow = nrow, ncol = ncol, byrow = FALSE)
rowSums(eg) > 0
})
IndexMx <- do.call(cbind, Index)
class(IndexMx) <- "numeric"
colnames(IndexMx) <- names(mysets)
IndexMx <- data.frame(ProbesetID = annot$ProbesetID, IndexMx)
resAll <- merge(resAll, IndexMx, by = "ProbesetID", sort = FALSE)
write.table(resAll, file = "Comp1_DE_results_AllPlus.xls", quote = FALSE, sep = "\t", row.names = FALSE)
#### design & analysis
treatments <- data.frame(Treatment = as.character(targets$groups))
design <- model.matrix(~ 0 + Treatment, data=treatments)
rownames(design) <- targets$labels
design
contrasts <- cbind(CtrlCD4 = c(-1, 0, 0, 0, 1), CtrlCD4CD8 = c(0, -1, 0, 0, 1), CtrlCD8 = c(0, 0, -1, 0, 1), CtrlBM = c(0, 0, 0, -1, 1)) # treatment - control
contrasts
### run CAMERA
gsea <- list()
coef <- "CtrlCD4"
gsea.tmp <- gsea[[coef]] <- camera(y = eset_main, index=Index, design=design, contrast=contrasts[,coef], trend.var=FALSE)
head(gsea[[coef]], 10)
table(gsea[[coef]]$FDR < 0.05)
gsea[[coef]] <- gsea[[coef]][, c("NGenes","Direction", "PValue", "FDR")]
colnames(gsea[[coef]]) <- paste0(coef, "_", colnames(gsea[[coef]]))
gsea[[coef]] <- data.frame(GeneSet = rownames(gsea[[coef]]), NGenes = gsea[[coef]][,1], gsea[[coef]][,-1])
# write.table(gsea[[coef]], paste("Comp1_GSEA_",coef ,".xls", sep=""), sep="\t", row.names=F, quote = FALSE)
### using information from eBayes fitting: fit2
pdf(paste0("PLOTS/GS_barcodeplot_",coef,".pdf"))
topgs <- 1
gsn <-rownames(gsea[[coef]])[1:topgs]
gss <- gsea.tmp[gsn, , drop = FALSE]
for(i in 1:length(topgs)){
barcodeplot(statistics = as.numeric((fit2$t[,coef])), index = Index[[gsn[i]]], index2 = NULL, gene.weights = as.numeric((fit2$coefficients[, coef]))[Index[[gsn[i]]]], weights.label = "logFC", labels = c("Up","Down"), quantiles = c(-1,1), col.bars = NULL, worm = TRUE, span.worm=0.45, main = paste0(gsn[i], "\n", gss[i, "Direction"], ", FDR = ", sprintf("%.02e",gss[i, "FDR"])))
barcodeplot(statistics = as.numeric((fit2$p.value[, coef])), index = Index[[gsn[i]]], index2 = NULL, gene.weights = as.numeric((fit2$coefficients[, coef]))[Index[[gsn[i]]]], weights.label = "logFC", labels = c("Not significant","Significant"), quantiles = c(0.05,1), col.bars = NULL, worm = TRUE, span.worm=0.45, main = paste0(gsn[i], "\n", gss[i, "Direction"], ", FDR = ", sprintf("%.02e",gss[i, "FDR"])))
}
dev.off()
coef <- "CtrlCD4CD8"
gsea[[coef]] <- camera(y = eset_main, index=Index, design=design, contrast=contrasts[,coef], trend.var=TRUE)
head(gsea[[coef]], 10)
table(gsea[[coef]]$FDR < 0.05)
gsea[[coef]] <- gsea[[coef]][, c("Direction", "PValue", "FDR")]
colnames(gsea[[coef]]) <- paste0(coef, "_", colnames(gsea[[coef]]))
gsea[[coef]] <- data.frame(GeneSet = rownames(gsea[[coef]]), gsea[[coef]])
# write.table(gsea[[coef]], paste("Comp1_GSEA_",coef ,".xls", sep=""), sep="\t", row.names=F, quote = FALSE)
coef <- "CtrlCD8"
gsea[[coef]] <- camera(y = eset_main, index=Index, design=design, contrast=contrasts[,coef], trend.var=TRUE)
head(gsea[[coef]], 10)
table(gsea[[coef]]$FDR < 0.05)
gsea[[coef]] <- gsea[[coef]][, c("Direction", "PValue", "FDR")]
colnames(gsea[[coef]]) <- paste0(coef, "_", colnames(gsea[[coef]]))
gsea[[coef]] <- data.frame(GeneSet = rownames(gsea[[coef]]), gsea[[coef]])
# write.table(gsea[[coef]], paste("Comp1_GSEA_",coef ,".xls", sep=""), sep="\t", row.names=F, quote = FALSE)
coef <- "CtrlBM"
gsea[[coef]] <- camera(y = eset_main, index=Index, design=design, contrast=contrasts[,coef], trend.var=TRUE)
head(gsea[[coef]], 10)
table(gsea[[coef]]$FDR < 0.05)
gsea[[coef]] <- gsea[[coef]][, c("Direction", "PValue", "FDR")]
colnames(gsea[[coef]]) <- paste0(coef, "_", colnames(gsea[[coef]]))
gsea[[coef]] <- data.frame(GeneSet = rownames(gsea[[coef]]), gsea[[coef]])
# write.table(gsea[[coef]], paste("Comp1_GSEA_",coef ,".xls", sep=""), sep="\t", row.names=F, quote = FALSE)
### merge all results into one table
gseaAll <- merge(gsea[["CtrlCD4"]], gsea[["CtrlCD4CD8"]], by = "GeneSet", all = TRUE)
gseaAll <- merge(gseaAll, gsea[["CtrlCD8"]], by = "GeneSet", all = TRUE)
gseaAll <- merge(gseaAll, gsea[["CtrlBM"]], by = "GeneSet", all = TRUE)
write.table(gseaAll, paste("Comp1_GSEA_C5_All.xls", sep=""), sep="\t", row.names=F, quote = FALSE)
# http://www.broadinstitute.org/gsea/doc/GSEAUserGuideTEXT.htm
###########################################################################
#### Gene set enrichment analysis with C7 Immunologic genes sets
###########################################################################
# gene sets from MSigDB with ENTREZ IDs
load("MSigDB_v4_0/mouse_c7_v4.rdata")
# Mm.c7[1]
mysets <- Mm.c7
length(mysets)
# table(sapply(mysets, length))
### Create an Index for camera
annot <- fData(eset_main)
# table(annot$EntrezGeneID == "---")
### Too slow
# EntrezGeneID <- strsplit(annot$EntrezGeneID, " /// ")
# Index <- lapply(mysets, function(ms){sapply(EntrezGeneID, function(eg){any(eg %in% ms)})})
EntrezGeneID <- strsplit2(annot$EntrezGeneID, " /// ")
nrow = nrow(EntrezGeneID)
ncol = ncol(EntrezGeneID)
Index <- lapply(mysets, function(ms){
eg <- matrix(EntrezGeneID %in% ms, nrow = nrow, ncol = ncol, byrow = FALSE)
rowSums(eg) > 0
})
# ms <- mysets[[4]]
# eg <- matrix(EntrezGeneID %in% ms, nrow = nrow(EntrezGeneID), ncol = ncol(EntrezGeneID), byrow = FALSE)
# apply(eg, 2, sum)
# table(rowSums(eg) > 0)
#
# table(Index[[4]])
# ms <- c(1, 2, 3)
# eg <- c(2, 4)
# any(eg %in% ms)
#### design & analysis
treatments <- data.frame(Treatment = as.character(targets$groups))
design <- model.matrix(~ 0 + Treatment, data=treatments)
rownames(design) <- targets$labels
design
contrasts <- cbind(CtrlCD4 = c(-1, 0, 0, 0, 1), CtrlCD4CD8 = c(0, -1, 0, 0, 1), CtrlCD8 = c(0, 0, -1, 0, 1), CtrlBM = c(0, 0, 0, -1, 1)) # treatment - control
contrasts
### run CAMERA
gsea <- list()
coef <- "CtrlCD4"
gsea.tmp <- gsea[[coef]] <- camera(y = eset_main, index=Index, design=design, contrast=contrasts[,coef], trend.var=FALSE)
head(gsea[[coef]], 10)
table(gsea[[coef]]$FDR < 0.05)
gsea[[coef]] <- gsea[[coef]][, c("NGenes","Direction", "PValue", "FDR")]
colnames(gsea[[coef]]) <- paste0(coef, "_", colnames(gsea[[coef]]))
gsea[[coef]] <- data.frame(GeneSet = rownames(gsea[[coef]]), NGenes = gsea[[coef]][,1], gsea[[coef]][,-1])
# write.table(gsea[[coef]], paste("Comp1_GSEA_",coef ,".xls", sep=""), sep="\t", row.names=F, quote = FALSE)
### using information from eBayes fitting: fit2
pdf(paste0("PLOTS/GS_barcodeplot_",coef,".pdf"))
topgs <- 1
gsn <-rownames(gsea[[coef]])[1:topgs]
gss <- gsea.tmp[gsn, , drop = FALSE]
for(i in 1:length(topgs)){
barcodeplot(statistics = as.numeric((fit2$t[,coef])), index = Index[[gsn[i]]], index2 = NULL, gene.weights = as.numeric((fit2$coefficients[, coef]))[Index[[gsn[i]]]], weights.label = "logFC", labels = c("Up","Down"), quantiles = c(-1,1), col.bars = NULL, worm = TRUE, span.worm=0.45, main = paste0(gsn[i], "\n", gss[i, "Direction"], ", FDR = ", sprintf("%.02e",gss[i, "FDR"])))
barcodeplot(statistics = as.numeric((fit2$p.value[, coef])), index = Index[[gsn[i]]], index2 = NULL, gene.weights = as.numeric((fit2$coefficients[, coef]))[Index[[gsn[i]]]], weights.label = "logFC", labels = c("Not significant","Significant"), quantiles = c(0.05,1), col.bars = NULL, worm = TRUE, span.worm=0.45, main = paste0(gsn[i], "\n", gss[i, "Direction"], ", FDR = ", sprintf("%.02e",gss[i, "FDR"])))
}
dev.off()
coef <- "CtrlCD4CD8"
gsea[[coef]] <- camera(y = eset_main, index=Index, design=design, contrast=contrasts[,coef], trend.var=TRUE)
head(gsea[[coef]], 10)
table(gsea[[coef]]$FDR < 0.05)
gsea[[coef]] <- gsea[[coef]][, c("Direction", "PValue", "FDR")]
colnames(gsea[[coef]]) <- paste0(coef, "_", colnames(gsea[[coef]]))
gsea[[coef]] <- data.frame(GeneSet = rownames(gsea[[coef]]), gsea[[coef]])
# write.table(gsea[[coef]], paste("Comp1_GSEA_",coef ,".xls", sep=""), sep="\t", row.names=F, quote = FALSE)
coef <- "CtrlCD8"
gsea[[coef]] <- camera(y = eset_main, index=Index, design=design, contrast=contrasts[,coef], trend.var=TRUE)
head(gsea[[coef]], 10)
table(gsea[[coef]]$FDR < 0.05)
gsea[[coef]] <- gsea[[coef]][, c("Direction", "PValue", "FDR")]
colnames(gsea[[coef]]) <- paste0(coef, "_", colnames(gsea[[coef]]))
gsea[[coef]] <- data.frame(GeneSet = rownames(gsea[[coef]]), gsea[[coef]])
# write.table(gsea[[coef]], paste("Comp1_GSEA_",coef ,".xls", sep=""), sep="\t", row.names=F, quote = FALSE)
coef <- "CtrlBM"
gsea[[coef]] <- camera(y = eset_main, index=Index, design=design, contrast=contrasts[,coef], trend.var=TRUE)
head(gsea[[coef]], 10)
table(gsea[[coef]]$FDR < 0.05)
gsea[[coef]] <- gsea[[coef]][, c("Direction", "PValue", "FDR")]
colnames(gsea[[coef]]) <- paste0(coef, "_", colnames(gsea[[coef]]))
gsea[[coef]] <- data.frame(GeneSet = rownames(gsea[[coef]]), gsea[[coef]])
# write.table(gsea[[coef]], paste("Comp1_GSEA_",coef ,".xls", sep=""), sep="\t", row.names=F, quote = FALSE)
### merge all results into one table
gseaAll <- merge(gsea[["CtrlCD4"]], gsea[["CtrlCD4CD8"]], by = "GeneSet", all = TRUE)
gseaAll <- merge(gseaAll, gsea[["CtrlCD8"]], by = "GeneSet", all = TRUE)
gseaAll <- merge(gseaAll, gsea[["CtrlBM"]], by = "GeneSet", all = TRUE)
write.table(gseaAll, paste("Comp1_GSEA_C7_All.xls", sep=""), sep="\t", row.names=F, quote = FALSE)
# http://www.broadinstitute.org/gsea/doc/GSEAUserGuideTEXT.htm
###########################################################################
#### Gene set enrichment analysis with Hallmark genes sets
###########################################################################
############### Create mouse_hallmark_v5.rdata object like on WEHI web
allLines <- readLines("MSigDB_v4_0/h.all.v5.0.entrez.gmt", n = -1)
humanSets <- data.frame(strsplit2(allLines, "\t"), stringsAsFactors = FALSE)
namesHS <- humanSets[, 1]
Hu.hallmark <- apply(humanSets, 1, function(r){
r <- r[-c(1,2)]
r <- r[r != ""]
return(as.numeric(r))
} )
names(Hu.hallmark) <- namesHS
### get the mouse human homology
hom <- read.table("MSigDB_v4_0/HOM_MouseHumanSequence.txt", header = TRUE, sep = "\t")
homM <- hom[hom$Common.Organism.Name == "mouse, laboratory", c("HomoloGene.ID", "EntrezGene.ID")]
homH <- hom[hom$Common.Organism.Name == "human", c("HomoloGene.ID", "EntrezGene.ID")]
homMatch <- merge(homH, homM, by = "HomoloGene.ID", sort = FALSE, all = TRUE)
homMatch <- homMatch[!is.na(homMatch[, 2]) & !is.na(homMatch[, 3]), ]
# merge(data.frame(a = c(1, 1, 2), b = c(21, 23, 24)), data.frame(a = c(1, 1, 2, 2), b = c(31, 32, 33, 34)) , by=1, sort = FALSE, all = TRUE)
Mm.hallmark <- lapply(Hu.hallmark, function(gs){
unique(homMatch[homMatch[, 2] %in% gs, 3])
})
save(Mm.hallmark, file = "MSigDB_v4_0/mouse_hallmark_v5.rdata")
############### Create mouse_hallmark_v5.rdata object like on WEHI web
# gene sets from MSigDB with ENTREZ IDs
load("MSigDB_v4_0/mouse_hallmark_v5.rdata")
mysets <- Mm.hallmark
length(mysets)
### keep the sets of interest
intrset <- read.table("Gene_Sets/Interesting_gene_sets_Hallmark.txt", header = FALSE, sep = ",", as.is = TRUE)[, 1]
intrset
intrset <- gsub("-", " ", intrset)
intrset <- gsub(" ", "_", intrset)
intrset <- paste0("HALLMARK_",toupper(intrset))
length(intrset)
sum(names(mysets) %in% intrset)
# intrset[!intrset %in% names(mysets)]
mysets <- mysets[intrset]
# table(sapply(mysets, length))
### Create an Index for camera
annot <- fData(eset_main)
# table(annot$EntrezGeneID == "---")
### Too slow
# EntrezGeneID <- strsplit(annot$EntrezGeneID, " /// ")
# Index <- lapply(mysets, function(ms){sapply(EntrezGeneID, function(eg){any(eg %in% ms)})})
EntrezGeneID <- strsplit2(annot$EntrezGeneID, " /// ")
nrow = nrow(EntrezGeneID)
ncol = ncol(EntrezGeneID)
Index <- lapply(mysets, function(ms){
eg <- matrix(EntrezGeneID %in% ms, nrow = nrow, ncol = ncol, byrow = FALSE)
rowSums(eg) > 0
})
IndexMx <- do.call(cbind, Index)
class(IndexMx) <- "numeric"
colnames(IndexMx) <- names(mysets)
IndexMx <- data.frame(ProbesetID = annot$ProbesetID, IndexMx)
resAll <- merge(resAll, IndexMx, by = "ProbesetID", sort = FALSE)
write.table(resAll, file = "Comp1_DE_results_AllPlus.xls", quote = FALSE, sep = "\t", row.names = FALSE)
#### design & analysis
treatments <- data.frame(Treatment = as.character(targets$groups))
design <- model.matrix(~ 0 + Treatment, data=treatments)
rownames(design) <- targets$labels
design
contrasts <- cbind(CtrlCD4 = c(-1, 0, 0, 0, 1), CtrlCD4CD8 = c(0, -1, 0, 0, 1), CtrlCD8 = c(0, 0, -1, 0, 1), CtrlBM = c(0, 0, 0, -1, 1)) # treatment - control
contrasts
### run CAMERA
gsea <- list()
coef <- "CtrlCD4"
gsea.tmp <- gsea[[coef]] <- camera(y = eset_main, index=Index, design=design, contrast=contrasts[,coef], trend.var=FALSE)
head(gsea[[coef]], 10)
table(gsea[[coef]]$FDR < 0.05)
gsea[[coef]] <- gsea[[coef]][, c("NGenes","Direction", "PValue", "FDR")]
colnames(gsea[[coef]]) <- paste0(coef, "_", colnames(gsea[[coef]]))
gsea[[coef]] <- data.frame(GeneSet = rownames(gsea[[coef]]), NGenes = gsea[[coef]][,1], gsea[[coef]][,-1])
# write.table(gsea[[coef]], paste("Comp1_GSEA_",coef ,".xls", sep=""), sep="\t", row.names=F, quote = FALSE)
### using information from eBayes fitting: fit2
pdf(paste0("PLOTS/GS_barcodeplot_",coef,".pdf"))
topgs <- 1
gsn <-rownames(gsea[[coef]])[1:topgs]
gss <- gsea.tmp[gsn, , drop = FALSE]
for(i in 1:length(topgs)){
barcodeplot(statistics = as.numeric((fit2$t[,coef])), index = Index[[gsn[i]]], index2 = NULL, gene.weights = as.numeric((fit2$coefficients[, coef]))[Index[[gsn[i]]]], weights.label = "logFC", labels = c("Up","Down"), quantiles = c(-1,1), col.bars = NULL, worm = TRUE, span.worm=0.45, main = paste0(gsn[i], "\n", gss[i, "Direction"], ", FDR = ", sprintf("%.02e",gss[i, "FDR"])))
barcodeplot(statistics = as.numeric((fit2$p.value[, coef])), index = Index[[gsn[i]]], index2 = NULL, gene.weights = as.numeric((fit2$coefficients[, coef]))[Index[[gsn[i]]]], weights.label = "logFC", labels = c("Not significant","Significant"), quantiles = c(0.05,1), col.bars = NULL, worm = TRUE, span.worm=0.45, main = paste0(gsn[i], "\n", gss[i, "Direction"], ", FDR = ", sprintf("%.02e",gss[i, "FDR"])))
}
dev.off()
coef <- "CtrlCD4CD8"
gsea[[coef]] <- camera(y = eset_main, index=Index, design=design, contrast=contrasts[,coef], trend.var=TRUE)
head(gsea[[coef]], 10)
table(gsea[[coef]]$FDR < 0.05)
gsea[[coef]] <- gsea[[coef]][, c("Direction", "PValue", "FDR")]
colnames(gsea[[coef]]) <- paste0(coef, "_", colnames(gsea[[coef]]))
gsea[[coef]] <- data.frame(GeneSet = rownames(gsea[[coef]]), gsea[[coef]])
# write.table(gsea[[coef]], paste("Comp1_GSEA_",coef ,".xls", sep=""), sep="\t", row.names=F, quote = FALSE)
coef <- "CtrlCD8"
gsea[[coef]] <- camera(y = eset_main, index=Index, design=design, contrast=contrasts[,coef], trend.var=TRUE)
head(gsea[[coef]], 10)
table(gsea[[coef]]$FDR < 0.05)
gsea[[coef]] <- gsea[[coef]][, c("Direction", "PValue", "FDR")]
colnames(gsea[[coef]]) <- paste0(coef, "_", colnames(gsea[[coef]]))
gsea[[coef]] <- data.frame(GeneSet = rownames(gsea[[coef]]), gsea[[coef]])
# write.table(gsea[[coef]], paste("Comp1_GSEA_",coef ,".xls", sep=""), sep="\t", row.names=F, quote = FALSE)
coef <- "CtrlBM"
gsea[[coef]] <- camera(y = eset_main, index=Index, design=design, contrast=contrasts[,coef], trend.var=TRUE)
head(gsea[[coef]], 10)
table(gsea[[coef]]$FDR < 0.05)
gsea[[coef]] <- gsea[[coef]][, c("Direction", "PValue", "FDR")]
colnames(gsea[[coef]]) <- paste0(coef, "_", colnames(gsea[[coef]]))
gsea[[coef]] <- data.frame(GeneSet = rownames(gsea[[coef]]), gsea[[coef]])
# write.table(gsea[[coef]], paste("Comp1_GSEA_",coef ,".xls", sep=""), sep="\t", row.names=F, quote = FALSE)
### merge all results into one table
gseaAll <- merge(gsea[["CtrlCD4"]], gsea[["CtrlCD4CD8"]], by = "GeneSet", all = TRUE)
gseaAll <- merge(gseaAll, gsea[["CtrlCD8"]], by = "GeneSet", all = TRUE)
gseaAll <- merge(gseaAll, gsea[["CtrlBM"]], by = "GeneSet", all = TRUE)
write.table(gseaAll, paste("Comp1_GSEA_Hallmark_All.xls", sep=""), sep="\t", row.names=F, quote = FALSE)
# http://www.broadinstitute.org/gsea/doc/GSEAUserGuideTEXT.htm
###########################################################################
### Clustering for all genes based on DE results (-1, 0, 1)
###########################################################################
targets <- targets_org
eset_main <- eset_main_org
### keep only leukemia and control CD4+, CD4+CD8+ and CD8+ samples
samples2keep <- targets_org$ExperimentShort != "afterTreatment" & targets_org$labels != "control_HeLa" & targets_org$labels != "control_wholeBoneMarrow"
targets <- targets[samples2keep,]
eset_main <- eset_main[, samples2keep]
### sort samples by groups
ord <- order(targets$groups)
targets <- targets[ord, ]
eset_main <- eset_main[ ,ord]
expr <- exprs(eset_main)
### normalize expression per gene
exprNorm <- t(scale(t(expr), center = TRUE, scale = TRUE))
####### load the DE results
## does not work
# resAll <- read.table("Comp1_DE_results_All.xls", header = TRUE, sep = "\t", stringsAsFactors = FALSE)
library(limma)
allLines <- readLines("Comp1_DE_results_All.xls", n = -1)[-1]
resAll <- data.frame(strsplit2(allLines, "\t"), stringsAsFactors = FALSE)
colnames(resAll) <- strsplit2(readLines("Comp1_DE_results_All.xls", n = 1), "\t")
resAll <- resAll[, !grepl(pattern = "CEL", colnames(resAll))]
resAllSort <- resAll[order(resAll$CtrlCD4_PValues, resAll$CtrlCD4CD8_PValues, resAll$CtrlCD8_PValues, decreasing = FALSE), ]
resAllSort$clusters <- apply(resAllSort[, c("CtrlCD4_Results", "CtrlCD4CD8_Results", "CtrlCD8_Results")], MARGIN = 1, paste, collapse = ",")
resAllSort <- resAllSort[resAllSort$clusters != "0,0,0", ]
library(gtools)
clusters <- apply(permutations(n=3, r=3, v = c(-1, 0, 1), repeats.allowed=TRUE), MARGIN = 1, paste, collapse = ",")
clusters <- clusters[clusters != "0,0,0"]
resAllSort$clusters <- factor(resAllSort$clusters, levels = clusters)
resAllSort <- resAllSort[order(resAllSort$clusters), ]
# unique(resAllSort$clusters)
### number of genes in clusters
table(resAllSort$clusters)
##### Create a heat map with all the clusters
intrProbes <- as.character(resAllSort$ProbesetID)
# dataHeat <- expr[intrProbes, ]
dataHeat <- exprNorm[intrProbes, ]
annotation_col <- targets[, "groups", drop = FALSE]
rownames(annotation_col) <- colnames(dataHeat)
cols <- unique(targets$colors)
names(cols) <- unique(targets$group)
annotation_colors = list(groups = cols)
labels_row <- strsplit2(resAllSort$GeneSymbol, " /// ")[, 1]
library(pheatmap)
pdf("PLOTS/heatmap_clusters.pdf", width = 7, height = 10)
pheatmap(dataHeat, color = colorRamps::matlab.like(100), cluster_cols = FALSE, cluster_rows = FALSE, annotation_col = annotation_col, annotation_colors = annotation_colors, labels_col = targets$groups, labels_row = rep("", nrow(dataHeat)), annotation_legend = FALSE, fontsize_row = 8, gaps_col = cumsum(table(targets$groups)), gaps_row = cumsum(table(resAllSort$clusters)),breaks = seq(from = -4, to = 4, length.out = 101), legend_breaks = seq(from = -4, to = 4, by = 2))
dev.off()
write.table(resAllSort, file = "Comp1_DEclusters.xls", quote = FALSE, sep = "\t", row.names = FALSE)
###########################################################################
#### GO analysis per cluster
###########################################################################
# source("http://bioconductor.org/biocLite.R")
# biocLite("topGO")
# source("http://bioconductor.org/biocLite.R")
# biocLite("Rgraphviz")
library(topGO)
library(Rgraphviz)
affyLib <- "mogene20sttranscriptcluster.db"
### Function used to create new topGOdata object
fun.gene.sel <- function(geneList) {
return(geneList <- ifelse(geneList==0, FALSE, TRUE))
}
### keep the clusters with at least 50 genes
cls <- levels(resAllSort$clusters)[table(resAllSort$clusters) > 50]
allResList <- list()
for(cl in cls){
# cl <- cls[1]
geneList <- rep(0, nrow(resAll))
names(geneList) <- resAll$ProbesetID
geneList[resAllSort[resAllSort$clusters == cl, "ProbesetID"]] <- 1
table(geneList)
for(go in c("BP","MF","CC")){
# go = "BP"
cat("Cluster:", cl, "go:", go, "\n")
sampleGOdata <- new("topGOdata", description = paste0("Simple session for ", cl), ontology = go, allGenes = geneList, geneSel = fun.gene.sel , nodeSize = 10, annot = annFUN.db, affyLib = affyLib)
# print(sampleGOdata)
result <- runTest(sampleGOdata, algorithm = "elim", statistic = "fisher")
pValues <- score(result)
topNodes <- length(pValues)
allRes <- GenTable(sampleGOdata, elimFisher = result, orderBy = "elimFisher", topNodes = topNodes)
colnames(allRes)[6] <- "PValues"
allRes$GO <- go
# pdf(paste("PLOTS/GO_",cl, "_" ,go, ".pdf", sep=""))
# showSigOfNodes(sampleGOdata, score(result), firstSigNodes = 5, useInfo = 'all')
# dev.off()
allRes$AdjPValues <- p.adjust(allRes$PValues, method = "BH")
# cat("#########################################################################################", fill = TRUE)
# print(table(allRes$AdjPValues < 0.05))
# print(head(allRes, 20))
# cat("#########################################################################################", fill = TRUE)
# write.table(allRes, paste("Comp1_GO_Fisher_elim_",cl, "_", go ,".xls", sep=""), sep="\t", row.names=F, quote = FALSE)
allResList[[paste0(cl, "_", go)]] <- allRes
}
}
save(allResList, file = "Comp1_GO_Clusters_Fisher_elim.rdata")
#### save results
for(go in c("BP","MF","CC")){
cl <- cls[1]
allR <- allResList[[paste0(cl, "_", go)]]
allAll <- allR[, c("GO.ID", "GO", "Term", "Annotated")]
for(cl in cls){
# cl = cls[1]
allR <- allResList[[paste0(cl, "_", go)]][, c("GO.ID", "Significant", "Expected", "PValues", "AdjPValues")]
### add cluster names to columns
colnames(allR) <- paste0(c("", rep(paste0("CL(",cl, ")_"), 4)), colnames(allR))
### merge all results into one table
allAll <- merge(allAll, allR, by = "GO.ID", sort = FALSE)
}
write.table(allAll, paste0("Comp1_GO_Clusters_Fisher_elim_", go ,".xls"), sep="\t", row.names=F, quote = FALSE)
}
###########################################################################
#### GO analysis per control - up or down regulation
###########################################################################
library(topGO)
library(Rgraphviz)
affyLib <- "mogene20sttranscriptcluster.db"
### Function used to create new topGOdata object
fun.gene.sel <- function(geneList) {
return(geneList <- ifelse(geneList==0, FALSE, TRUE))
}
cls <- rep(c(-1, 1), times = 4)
names(cls) <- rep(c("CtrlCD4", "CtrlCD4CD8", "CtrlCD8", "CtrlBM"), each = 2)
allResList <- list()
for(cl in 1:length(cls)){
# cl <- cls[1]
cl <- cls[cl]
print(cl)
geneList <- rep(0, nrow(resAll))
names(geneList) <- resAll$ProbesetID
geneList[resAll[resAll[, paste0(names(cl), "_Results")] == cl, "ProbesetID"]] <- 1
table(geneList)
cl <- paste0(names(cl),".", cl)
for(go in c("BP","MF","CC")){
# go = "BP"
cat("Cluster:", cl, "go:", go, "\n")
sampleGOdata <- new("topGOdata", description = paste0("Simple session for ", cl), ontology = go, allGenes = geneList, geneSel = fun.gene.sel , nodeSize = 10, annot = annFUN.db, affyLib = affyLib)
# print(sampleGOdata)
result <- runTest(sampleGOdata, algorithm = "elim", statistic = "fisher")
pValues <- score(result)
topNodes <- length(pValues)
allRes <- GenTable(sampleGOdata, elimFisher = result, orderBy = "elimFisher", topNodes = topNodes)
colnames(allRes)[6] <- "PValues"
allRes$GO <- go
# pdf(paste("PLOTS/GO_",cl, "_" ,go, ".pdf", sep=""))
# showSigOfNodes(sampleGOdata, score(result), firstSigNodes = 5, useInfo = 'all')
# dev.off()
allRes$AdjPValues <- p.adjust(allRes$PValues, method = "BH")
# cat("#########################################################################################", fill = TRUE)
# print(table(allRes$AdjPValues < 0.05))
# print(head(allRes, 20))
# cat("#########################################################################################", fill = TRUE)
# write.table(allRes, paste("Comp1_GO_Fisher_elim_",cl, "_", go ,".xls", sep=""), sep="\t", row.names=F, quote = FALSE)
allResList[[paste0(cl, "_", go)]] <- allRes
}
}
save(allResList, file = "Comp1_GO_UpDown_Fisher_elim.rdata")
#### save results
cls <- paste0(names(cls), "." ,cls)
for(go in c("BP","MF","CC")){
cl <- cls[1]
allR <- allResList[[paste0(cl, "_", go)]]
allAll <- allR[, c("GO.ID", "GO", "Term", "Annotated")]
for(cl in cls){
# cl = cls[1]
allR <- allResList[[paste0(cl, "_", go)]][, c("GO.ID", "Significant", "Expected", "PValues", "AdjPValues")]
### add cluster names to columns
colnames(allR) <- paste0(c("", rep(paste0("CL(",cl, ")_"), 4)), colnames(allR))
### merge all results into one table
allAll <- merge(allAll, allR, by = "GO.ID", sort = FALSE)
}
write.table(allAll, paste0("Comp1_GO_UpDown_Fisher_elim_", go ,".xls"), sep="\t", row.names=F, quote = FALSE)
}
###########################################################################
#### GO analysis per control
###########################################################################
# source("http://bioconductor.org/biocLite.R")
# biocLite("topGO")
# source("http://bioconductor.org/biocLite.R")
# biocLite("Rgraphviz")
library(topGO)
library(Rgraphviz)
affyLib <- "mogene20sttranscriptcluster.db"
### Function used to create new topGOdata object
fun.gene.sel <- function(geneList) {
return(geneList <- ifelse(geneList==0, FALSE, TRUE))
}
coefs <- c("CtrlCD4", "CtrlCD4CD8", "CtrlCD8", "CtrlBM")
allResList <- list()
for(coef in coefs){
# coef <- coefs[1]
tt <- topTable(fit2, coef=coef, n=Inf)
geneList <- rep(0, nrow(tt))
names(geneList) <- rownames(tt)
geneList[tt$adj.P.Val < 0.05 & abs(tt$logFC) > 1] <- 1
print(table(geneList))
for(go in c("BP","MF","CC")){
# go = "BP"
print(coef)
print(go)
sampleGOdata <- new("topGOdata", description = paste0("Simple session for ", coef), ontology = go, allGenes = geneList, geneSel = fun.gene.sel , nodeSize = 10, annot = annFUN.db, affyLib = affyLib)
print(sampleGOdata)
result <- runTest(sampleGOdata, algorithm = "elim", statistic = "fisher")
pValues <- score(result)
topNodes <- length(pValues)
allRes <- GenTable(sampleGOdata, elimFisher = result, orderBy = "elimFisher", topNodes = topNodes)
colnames(allRes)[6] <- "PValues"
pdf(paste("PLOTS/GO_",coef, "_" ,go, ".pdf", sep=""))
showSigOfNodes(sampleGOdata, score(result), firstSigNodes = 5, useInfo = 'all')
dev.off()
allRes$AdjPValues <- p.adjust(allRes$PValues, method = "BH")
print(table(allRes$AdjPValues < 0.05))
cat("#########################################################################################", fill = TRUE)
print(head(allRes, 20))
cat("#########################################################################################", fill = TRUE)
# write.table(allRes, paste("Comp1_GO_Fisher_elim_",coef, "_", go ,".xls", sep=""), sep="\t", row.names=F, quote = FALSE)
allResList[[paste0(go, "_", coef)]] <- allRes
}
}
#### save results
for(go in c("BP","MF","CC")){
coef <- "CtrlCD4"
allR <- allResList[[paste0(go, "_", coef)]]
colnames(allR) <- paste0(c(rep("", 3), rep(paste0(coef, "_"), 4)), colnames(allR))
allResList[[paste0(go, "_", coef)]] <- allR
coef <- "CtrlCD4CD8"
allR <- allResList[[paste0(go, "_", coef)]][, -c(2, 3)]
colnames(allR) <- paste0(c("", rep(paste0(coef, "_"), 4)), colnames(allR))
allResList[[paste0(go, "_", coef)]] <- allR
coef <- "CtrlCD8"
allR <- allResList[[paste0(go, "_", coef)]][, -c(2, 3)]
colnames(allR) <- paste0(c("", rep(paste0(coef, "_"), 4)), colnames(allR))
allResList[[paste0(go, "_", coef)]] <- allR
coef <- "CtrlBM"
allR <- allResList[[paste0(go, "_", coef)]][, -c(2, 3)]
colnames(allR) <- paste0(c("", rep(paste0(coef, "_"), 4)), colnames(allR))
allResList[[paste0(go, "_", coef)]] <- allR
### merge all results into one table
allAll <- merge(allResList[[paste0(go, "_", "CtrlCD4")]], allResList[[paste0(go, "_", "CtrlCD4CD8")]], by = "GO.ID", all = TRUE)
allAll <- merge(allAll, allResList[[paste0(go, "_", "CtrlCD8")]], by = "GO.ID", all = TRUE)
allAll <- merge(allAll, allResList[[paste0(go, "_", "CtrlBM")]], by = "GO.ID", all = TRUE)
write.table(allAll, paste0("Comp1_GO_Fisher_elim_", go ,".xls"), sep="\t", row.names=F, quote = FALSE)
}
###########################################################################
#### Comparison 2: ALL pre VS ALL after treatment
###########################################################################
targets <- targets_org
eset_main <- eset_main_org
### keep only leukemia and afterTreatment samples
samples2keep <- grepl("leukemia|afterTreatment", targets$labels)
targets <- targets[samples2keep,]
eset_main <- eset_main[, samples2keep]
### sort samples by groups
ord <- order(targets$groups)
targets <- targets[ord, ]
eset_main <- eset_main[ ,ord]
# all(sampleNames(eset_main) == strsplit2(targets$FileName, "//")[,2])
#### design & analysis
treatments <- data.frame(Treatment = as.character(targets$groups))
treatments$Treatment <- relevel(treatments$Treatment, ref = "leukemia")
treatments
design <- model.matrix(~Treatment, data=treatments)
rownames(design) <- targets$labels
design
fit <- lmFit(eset_main, design)
fit2 <- eBayes(fit[, "TreatmentafterTreatment"], trend = TRUE)
pdf(paste0(path_plots, "Comp2_plotSA_trend.pdf"))
plotSA(fit2)
dev.off()
## with the FC cutoff
results <- decideTests(fit2, method="separate", adjust.method="BH", p.value=0.05, lfc=1)
summary(results)
colours <- unique(targets[targets$groups == "afterTreatment", "colors"])
pdf(paste0(path_plots, "Comp2_vennDiagram.pdf"))
vennDiagram(results,include=c("up", "down"), circle.col=colours, counts.col=c("gold", "darkblue"))
# vennDiagram(results,include="both", circle.col=colours, counts.col=c("gold", "darkblue"))
# vennDiagram(results,include="up", circle.col=colours, counts.col=c("gold", "darkblue"))
# vennDiagram(results,include="down", circle.col=colours, counts.col=c("gold", "darkblue"))
dev.off()
table <- topTable(fit2, coef = 1, n = Inf)
### save all results with nice order
resCoeff <- fit2$coefficients
resT <- fit2$t
resPValue <- fit2$p.value
resPValueAdj <- apply(fit2$p.value, 2, p.adjust, method = "BH")
resRes <- results[, 1]
resDE <- data.frame(resCoeff, resT, resPValue, resPValueAdj, resRes)
colnames(resDE) <- paste0("afterTreatment_all_", c("coeffs", "t", "PValues", "AdjPValues", "Results"))
resGenes <- fit2$genes
resExpr <- round(exprs(eset_main_org), 2)
colnames(resExpr) <- paste0(targets_org$labels, "_", colnames(resExpr))
resExpr <- resExpr[, order(colnames(resExpr))]
resAll <- cbind(resGenes, resDE, resExpr)
write.table(resAll, file = paste0(path_results, "Comp2_DE_results_All.xls"), quote = FALSE, sep = "\t", row.names = FALSE)
### plot MA
pdf(paste0(path_plots, "Comp2_plotMA.pdf"))
limma::plotMA(fit2, coef = 1, status = results, values = c(-1, 0, 1), col = c("red", "black", "green"), cex = c(0.7, 0.3, 0.7))
abline(0,0,col="blue")
dev.off()
### volcano plots
library(ggplot2)
table <- topTable(fit2, coef = 1, n=Inf)
table$threshold = as.factor(table$adj.P.Val < 0.05 & abs(table$logFC) > 1)
gg2 <- ggplot(data=table, aes(x=logFC, y=-log10(P.Value), colour=threshold)) + geom_point(alpha=0.4, size=1.75) + theme_bw() + theme(legend.position = "none") + xlab("log2 fold change") + ylab("-log10 p-value") + ggtitle("after Treatment")
pdf(paste0(path_plots, "Comp2_volcanoplot.pdf"))
print(gg2)
dev.off()
### histograms of p-values and adjusted p-values
colours <- unique(targets[targets$groups != "leukemia", "colors"])
pdf(paste0(path_plots, "Comp2_hist_pvs.pdf"))
table <- topTable(fit2, coef = 1, n=Inf)
hist(table$P.Value, breaks = 100, main = "afterTreatment", xlab = "P-values", col = colours)
dev.off()
###########################################################################
#### Comparison 3a: pre VS after treatment with matched samples pooled
###########################################################################
library(oligo)
library(pd.mogene.2.0.st)
library(limma)
load(paste0(path_results, "eset_main_org.Rdata"))
load(paste0(path_results, "targets_org.Rdata"))
targets <- targets_org
eset_main <- eset_main_org
tt <- table(targets$CellTypeShort, targets$groups)
tt <- data.frame(tt, stringsAsFactors = FALSE)
cell_types <- as.character(tt[tt$Var2 == "afterTreatment" & tt$Freq > 0, "Var1"])
### keep only leukemia and afterTreatment samples that have matched cell type
samples2keep <- grepl("leukemia|afterTreatment", targets$labels) & targets$CellTypeShort %in% cell_types
targets <- targets[samples2keep,]
eset_main <- eset_main[, samples2keep]
### sort samples by groups
ord <- order(targets$groups)
targets <- targets[ord, ]
eset_main <- eset_main[ ,ord]
# all(sampleNames(eset_main) == strsplit2(targets$FileName, "//")[,2])
#### design & analysis
treatments <- data.frame(Treatment = as.character(targets$groups), CellType = targets$CellTypeShort)
treatments$Treatment <- relevel(treatments$Treatment, ref = "leukemia")
treatments
design <- model.matrix(~ Treatment, data = treatments)
rownames(design) <- targets$labels
design
fit <- lmFit(eset_main, design)
fit2 <- eBayes(fit[, "TreatmentafterTreatment"], trend = TRUE)
pdf(paste0(path_plots, "Comp3a_plotSA_trend.pdf"))
plotSA(fit2)
dev.off()
## with the FC cutoff
results <- decideTests(fit2, method="separate", adjust.method="BH", p.value=0.05, lfc=1)
summary(results)
colours <- unique(targets[targets$groups == "afterTreatment", "colors"])
pdf(paste0(path_plots, "Comp3a_vennDiagram.pdf"))
vennDiagram(results,include=c("up", "down"), circle.col=colours, counts.col=c("gold", "darkblue"))
# vennDiagram(results,include="both", circle.col=colours, counts.col=c("gold", "darkblue"))
# vennDiagram(results,include="up", circle.col=colours, counts.col=c("gold", "darkblue"))
# vennDiagram(results,include="down", circle.col=colours, counts.col=c("gold", "darkblue"))
dev.off()
table <- topTable(fit2, coef = 1, n = Inf)
### save all results with nice order
resCoeff <- fit2$coefficients
resT <- fit2$t
resPValue <- fit2$p.value
resPValueAdj <- apply(fit2$p.value, 2, p.adjust, method = "BH")
resRes <- results[, 1]
resDE <- data.frame(resCoeff, resT, resPValue, resPValueAdj, resRes)
colnames(resDE) <- paste0("afterTreatment_matched_pooled_", c("coeffs", "t", "PValues", "AdjPValues", "Results"))
resGenes <- fit2$genes
resExpr <- round(exprs(eset_main_org), 2)
colnames(resExpr) <- paste0(targets_org$labels, "_", colnames(resExpr))
resExpr <- resExpr[, order(colnames(resExpr))]
resAll <- cbind(resGenes, resDE, resExpr)
write.table(resAll, file = paste0(path_results, "Comp3a_DE_results_All.xls"), quote = FALSE, sep = "\t", row.names = FALSE)
### plot MA
pdf(paste0(path_plots, "Comp3a_plotMA.pdf"))
limma::plotMA(fit2, coef = 1, status = results, values = c(-1, 0, 1), col = c("red", "black", "green"), cex = c(0.7, 0.3, 0.7))
abline(0,0,col="blue")
dev.off()
### volcano plots
library(ggplot2)
table <- topTable(fit2, coef = 1, n=Inf)
table$threshold = as.factor(table$adj.P.Val < 0.05 & abs(table$logFC) > 1)
gg2 <- ggplot(data=table, aes(x=logFC, y=-log10(P.Value), colour=threshold)) + geom_point(alpha=0.4, size=1.75) + theme_bw() + theme(legend.position = "none") + xlab("log2 fold change") + ylab("-log10 p-value") + ggtitle("after Treatment")
pdf(paste0(path_plots, "Comp3a_volcanoplot.pdf"))
print(gg2)
dev.off()
### histograms of p-values and adjusted p-values
colours <- unique(targets[targets$groups != "leukemia", "colors"])
pdf(paste0(path_plots, "Comp3a_hist_pvs.pdf"))
table <- topTable(fit2, coef = 1, n=Inf)
hist(table$P.Value, breaks = 100, main = "afterTreatment", xlab = "P-values", col = colours)
dev.off()
### plot expression of top sign. genes/probesets
library(ggplot2)
library(reshape2)
expr <- exprs(eset_main)
topn <- 20
rownames(targets) <- strsplit2(targets$FileName, split = "//")[, 2]
tt <- topTable(fit2, coef = 1, n=Inf, p.value=0.05, lfc=1)
### in the report display only first gene symbol
GeneSymbol <- strsplit2(head(tt[,"GeneSymbol"], topn), " /// ")[,1]
GeneTitle <- paste0(substr(strsplit2(head(tt[,"GeneTitle"], topn), " /// ")[,1], 1, 30))
# print(data.frame(GeneSymbol = GeneSymbol, GeneTitle = GeneTitle , head(tt[, c("logFC", "AveExpr", "P.Value", "adj.P.Val")], topn)))
topp <- rownames(tt)[1:topn]
df <- data.frame(Gene = topp, expr[topp,])
df.m <- reshape2::melt(df, id.vars = "Gene", value.name = "Expression", variable.name = "Sample")
### keep order of genes as in tt
df.m$Gene <- factor(df.m$Gene, levels = topp)
### add Entrez ID to the facet labels
lab.fct <- paste0(topp, "\n", strsplit2(tt[topp, "GeneSymbol"], " /// ")[,1])
levels(df.m$Gene) <- lab.fct
df.m$groups <- targets[df.m$Sample ,"groups"]
fill_colors <- unique(targets[, c("groups", "colors")])
fill_colors <- fill_colors[order(fill_colors$groups), "colors"]
ggp <- ggplot(df.m, aes(x = Sample, y = Expression, fill = groups)) +
theme_bw() +
theme(axis.text.x = element_text(angle = 80, hjust = 1, size = 10), plot.title = element_text(size = 16), strip.text.x = element_text(size = 10)) +
scale_x_discrete(labels=targets$CellTypeShort) +
labs(y = "Log2 expression") +
geom_bar(stat = "identity") +
facet_wrap(~ Gene, scales="free_y", ncol=4) +
scale_fill_manual(values = fill_colors)
pdf(paste0(path_plots, "Comp3a_topExpressionBarPlot.pdf"), 11, 11)
print(ggp)
dev.off()
###########################################################################
#### Comparison 3b: pre VS after treatment with matched samples + cell type
###########################################################################
library(oligo)
library(pd.mogene.2.0.st)
library(limma)
load(paste0(path_results, "eset_main_org.Rdata"))
load(paste0(path_results, "targets_org.Rdata"))
targets <- targets_org
eset_main <- eset_main_org
tt <- table(targets$CellTypeShort, targets$groups)
tt <- data.frame(tt, stringsAsFactors = FALSE)
cell_types <- as.character(tt[tt$Var2 == "afterTreatment" & tt$Freq > 0, "Var1"])
### keep only leukemia and afterTreatment samples that have matched cell type
samples2keep <- grepl("leukemia|afterTreatment", targets$labels) & targets$CellTypeShort %in% cell_types
targets <- targets[samples2keep,]
eset_main <- eset_main[, samples2keep]
### sort samples by groups
ord <- order(targets$groups)
targets <- targets[ord, ]
eset_main <- eset_main[ ,ord]
# all(sampleNames(eset_main) == strsplit2(targets$FileName, "//")[,2])
#### design & analysis
treatments <- data.frame(Treatment = as.character(targets$groups), CellType = targets$CellTypeShort)
treatments$Treatment <- relevel(treatments$Treatment, ref = "leukemia")
treatments
design <- model.matrix(~ 0 + CellType + Treatment, data = treatments)
rownames(design) <- targets$labels
design
fit <- lmFit(eset_main, design)
fit2 <- eBayes(fit[, "TreatmentafterTreatment"], trend = TRUE)
pdf(paste0(path_plots, "Comp3b_plotSA_trend.pdf"))
plotSA(fit2)
dev.off()
## with the FC cutoff
results <- decideTests(fit2, method="separate", adjust.method="BH", p.value=0.05, lfc=1)
summary(results)
colours <- unique(targets[targets$groups == "afterTreatment", "colors"])
pdf(paste0(path_plots, "Comp3b_vennDiagram.pdf"))
vennDiagram(results,include=c("up", "down"), circle.col=colours, counts.col=c("gold", "darkblue"))
# vennDiagram(results,include="both", circle.col=colours, counts.col=c("gold", "darkblue"))
# vennDiagram(results,include="up", circle.col=colours, counts.col=c("gold", "darkblue"))
# vennDiagram(results,include="down", circle.col=colours, counts.col=c("gold", "darkblue"))
dev.off()
table <- topTable(fit2, coef = 1, n = Inf)
### save all results with nice order
resCoeff <- fit2$coefficients
resT <- fit2$t
resPValue <- fit2$p.value
resPValueAdj <- apply(fit2$p.value, 2, p.adjust, method = "BH")
resRes <- results[, 1]
resDE <- data.frame(resCoeff, resT, resPValue, resPValueAdj, resRes)
colnames(resDE) <- paste0("afterTreatment_matched_paired_", c("coeffs", "t", "PValues", "AdjPValues", "Results"))
resGenes <- fit2$genes
resExpr <- round(exprs(eset_main_org), 2)
colnames(resExpr) <- paste0(targets_org$labels, "_", colnames(resExpr))
resExpr <- resExpr[, order(colnames(resExpr))]
resAll <- cbind(resGenes, resDE, resExpr)
write.table(resAll, file = paste0(path_results, "Comp3b_DE_results_All.xls"), quote = FALSE, sep = "\t", row.names = FALSE)
### plot MA
pdf(paste0(path_plots, "Comp3b_plotMA.pdf"))
limma::plotMA(fit2, coef = 1, status = results, values = c(-1, 0, 1), col = c("red", "black", "green"), cex = c(0.7, 0.3, 0.7))
abline(0,0,col="blue")
dev.off()
### volcano plots
library(ggplot2)
table <- topTable(fit2, coef = 1, n=Inf)
table$threshold = as.factor(table$adj.P.Val < 0.05 & abs(table$logFC) > 1)
gg2 <- ggplot(data=table, aes(x=logFC, y=-log10(P.Value), colour=threshold)) + geom_point(alpha=0.4, size=1.75) + theme_bw() + theme(legend.position = "none") + xlab("log2 fold change") + ylab("-log10 p-value") + ggtitle("after Treatment")
pdf(paste0(path_plots, "Comp3b_volcanoplot.pdf"))
print(gg2)
dev.off()
### histograms of p-values and adjusted p-values
colours <- unique(targets[targets$groups != "leukemia", "colors"])
pdf(paste0(path_plots, "Comp3b_hist_pvs.pdf"))
table <- topTable(fit2, coef = 1, n=Inf)
hist(table$P.Value, breaks = 100, main = "afterTreatment", xlab = "P-values", col = colours)
dev.off()
### plot expression of top sign. genes/probesets
library(ggplot2)
library(reshape2)
expr <- exprs(eset_main)
topn <- 20
rownames(targets) <- strsplit2(targets$FileName, split = "//")[, 2]
tt <- topTable(fit2, coef = 1, n=Inf, p.value=0.05, lfc=1)
### in the report display only first gene symbol
GeneSymbol <- strsplit2(head(tt[,"GeneSymbol"], topn), " /// ")[,1]
GeneTitle <- paste0(substr(strsplit2(head(tt[,"GeneTitle"], topn), " /// ")[,1], 1, 30))
# print(data.frame(GeneSymbol = GeneSymbol, GeneTitle = GeneTitle , head(tt[, c("logFC", "AveExpr", "P.Value", "adj.P.Val")], topn)))
topp <- rownames(tt)[1:topn]
df <- data.frame(Gene = topp, expr[topp,])
df.m <- reshape2::melt(df, id.vars = "Gene", value.name = "Expression", variable.name = "Sample")
### keep order of genes as in tt
df.m$Gene <- factor(df.m$Gene, levels = topp)
### add Entrez ID to the facet labels
lab.fct <- paste0(topp, "\n", strsplit2(tt[topp, "GeneSymbol"], " /// ")[,1])
levels(df.m$Gene) <- lab.fct
df.m$groups <- targets[df.m$Sample ,"groups"]
fill_colors <- unique(targets[, c("groups", "colors")])
fill_colors <- fill_colors[order(fill_colors$groups), "colors"]
ggp <- ggplot(df.m, aes(x = Sample, y = Expression, fill = groups)) +
theme_bw() +
theme(axis.text.x = element_text(angle = 80, hjust = 1, size = 10), plot.title = element_text(size = 16), strip.text.x = element_text(size = 10)) +
scale_x_discrete(labels=targets$CellTypeShort) +
labs(y = "Log2 expression") +
geom_bar(stat = "identity") +
facet_wrap(~ Gene, scales="free_y", ncol=4) +
scale_fill_manual(values = fill_colors)
pdf(paste0(path_plots, "Comp3b_topExpressionBarPlot.pdf"), 11, 11)
print(ggp)
dev.off()
###########################################################################
#### Merge all results
###########################################################################
res_files <- c("Comp1_DE_results_All.xls", "Comp2_DE_results_All.xls", "Comp3a_DE_results_All.xls", "Comp3b_DE_results_All.xls")
res_all <- lapply(1:length(res_files), function(ff){
# ff = 1
allLines <- readLines(paste0(path_results, res_files[ff]), n = -1)[-1]
resComp <- data.frame(strsplit2(allLines, "\t"), stringsAsFactors = FALSE)
colnames(resComp) <- strsplit2(readLines(paste0(path_results, res_files[ff]), n = 1), "\t")
if(ff == 1){
return(resComp[, !grepl(pattern = "CEL", x = colnames(resComp))])
}else if(ff == length(res_files)){
return(resComp[, !grepl(pattern = "Gene", x = colnames(resComp))])
}else{
return(resComp[, !grepl(pattern = "Gene", x = colnames(resComp)) & !grepl(pattern = "CEL", x = colnames(resComp))])
}
})
lapply(res_all, colnames)
res_all <- Reduce(function(...) merge(..., by = "ProbesetID", all=TRUE, sort = FALSE), res_all)
colnames(res_all)
write.table(res_all, file = paste0(path_results, "CompALL_DE_results_All.xls"), quote = FALSE, sep = "\t", row.names = FALSE)
results <- res_all[, grepl("afterTreatment.*Results", colnames(res_all))]
colnames(results) <- gsub("_Results", "", gsub(pattern = "afterTreatment_", "", colnames(results)))
pdf(paste0(path_plots, "CompAll_afterTreatment_vennDiagram.pdf"))
vennDiagram(results, include = c("up", "down"), counts.col = c("gold", "darkblue"))
dev.off()
|
102f5664962ca7ff9d68010070649807d70abe75 | cc983684925e96e70ecf33862cdb2fdd97b9a318 | /man/all_resources.Rd | 23f957a81d78968be25c3c23ce45acfaf6886ad2 | [] | no_license | muschellij2/clusterRundown | 71bd0d4c4979900893eee36872d0fc249d9612fa | f944724f0c381fd30549c5f9c022c6785cd20b56 | refs/heads/master | 2021-06-07T16:39:01.041205 | 2021-03-24T18:23:39 | 2021-03-24T18:23:39 | 19,076,191 | 1 | 2 | null | 2017-04-01T04:31:05 | 2014-04-23T16:04:52 | R | UTF-8 | R | false | true | 322 | rd | all_resources.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_resources.R
\name{all_resources}
\alias{all_resources}
\title{Get Full Cluster Rundown}
\usage{
all_resources()
}
\value{
\code{data.frame} of values for each person
}
\description{
Get output of resources and slots begin used by cluster
}
|
27e6728fdd103b2863f6aff81746599a26301e00 | ef69755977ff0ac21c306e7d3bd1b80c119e6820 | /plot2.R | a31efee3d0fcaf96adfb69191feaab6abaf3c873 | [] | no_license | kawe74/ExData_Plotting1 | 7975a474c230618b99094ac3ee4d56d0fd6f0e0a | af700a3c377dffcac542ea60dfb6cb13a943d801 | refs/heads/master | 2020-03-30T15:00:51.647106 | 2016-08-20T16:45:09 | 2016-08-20T16:45:09 | 66,142,798 | 0 | 0 | null | 2016-08-20T11:14:07 | 2016-08-20T11:14:06 | null | UTF-8 | R | false | false | 600 | r | plot2.R | # Author: Ahmad Kamil Abdul Hamid
# Submission date: 2016-08-20
# Part 3 of 5: To plot plot2.png
if(!file.exists("data.zip")) {
print ("Source file not available.. need to download and preprocess..")
source("downloaddatafile.R")
} else {
print ("source file already available..")
}
plot2 <- function() {
plot(df$timestamp,df$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
cat("plot2.png has been saved in", getwd())
}
plot2()
|
330b51eaed3b1d8db9b318a6f060aef00c6ccdb9 | 6a217fe66e311fe1b3120130db04d53517be1caa | /lectures/convert-times.R | 6d189786adefc096bb8b17ef884d55b9acdbd5d5 | [] | no_license | acthomasca/EDFDataScience | 775f1c90dd83fb177bacb2c212268e032ae62ee6 | 9c9e402f920e94d03306cefc3deef3fe06b1465b | refs/heads/master | 2020-05-30T14:07:18.707330 | 2019-06-02T00:37:34 | 2019-06-02T00:37:34 | 189,780,957 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 491 | r | convert-times.R |
## Quick: Convert dates from the earlier Twitter format into YYYY-MM-DD
dates <- c("Oct 13", "Sep 01", "Nov 09", "Nov 13", "Dec 12")
## Way 1: gsub and dplyr
dates2 <- gsub ("([A-Z][a-z]{2}) ([0-9]{2})",
"2015-\\1-\\2",
dates)
library(dplyr)
dates3 <- gsub ("Sep", "09", dates2) %>% gsub ("Oct", "10", .) %>% gsub ("Nov", "11", .) %>% gsub ("Dec", "12", .)
## Way 2: as.Date
as.Date (dates, format = "%b %d")
as.Date (dates, format = "%b %d") - 2
|
594a9504ee59e9171ea2a7ebd9a39ba03285519f | 0a86db2a0ad6f8aee0b3f59b1fb5e93da9c3dd78 | /man/allocate_consumption.Rd | 4a008e03fd6ec4aec666e1f02900250a07e1bc5b | [] | no_license | bjornkallerud/waterr | a4f16fb0c6f497c4ea6a9cfce229afb52cf683f5 | e80f456a0a5dd5753461bf1e63916f7660048dc6 | refs/heads/master | 2021-08-03T19:47:04.607199 | 2021-07-29T12:50:13 | 2021-07-29T12:50:13 | 211,401,852 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 974 | rd | allocate_consumption.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/allocate_consumption.R
\name{allocate_consumption}
\alias{allocate_consumption}
\title{Allocate Tiered Water Consumption}
\usage{
allocate_consumption(df, suffix = NULL, use.prime = FALSE)
}
\arguments{
\item{df}{dataframe that contains customer usage and tier widths. usage column can be titled either "use" or "usage". width columns titles must take the form of "tX_width".}
\item{suffix}{width column suffix. we often define widths in terms of current/proposed rates (ie tX_width_current), so defining the suffix enables use of this function for current and proposed widths.}
\item{use.prime}{boolean - set to TRUE if you want to use `use_prime` usage column instead of standard use column.}
}
\value{
This function returns a \code{dataframe} of numeric columns for tiered usage
}
\description{
Distributes a customers total water consumption over various tiers.
}
\author{
Bjorn Kallerud
}
|
6c1e10e3a5a34a225d20e09966200f07e24f0cbf | 441e689481b0ec3f4dfbbd0a67f1dbf3bb40c006 | /datafiles/exit_profile.R | 23f527172a92766bfcf17ab5cb7a8225375b9ee6 | [] | no_license | cameronbracken/nsfem | d665822bab6d43671977b3c92baa187eb3c010f5 | 6a8ac137b62813fea2839737185aa531b3b36514 | refs/heads/master | 2021-01-22T04:57:05.562830 | 2009-03-18T05:14:44 | 2009-03-18T05:14:44 | 153,282 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 806 | r | exit_profile.R | parms = scan('../domain',nlines=1,what='character')
up = as.numeric(substr(parms,1,1))
sol = as.matrix(read.table('solution.out'))
u = sol[,1]
v = sol[,2]
mag = sqrt(u^2+v^2)
xy = as.matrix(read.table('nodes.out'))
outmag = mag[xy[,2]==up]
print(outmag)
inmag = mag[xy[,2]==0]
xy = xy[xy[,2]==up,]
x = xy[,1]
pdf('../plots/vel_profile.pdf',family='serif',pointsize=13)
plot(sort(x),inmag[order(x)],type='b',col='steelblue',xlab='X',ylab='Velocity Magnitude')
lines(sort(x),outmag[order(x)],type='b')
legend('topright',c('Inlet','Outlet'),lty='solid',col=c('steelblue','black'))
dev.off()
plot(sort(x),inmag[order(x)],type='b',col='steelblue',xlab='X',ylab='Velocity Magnitude')
lines(sort(x),outmag[order(x)],type='b')
legend('topright',c('Inlet','Outlet'),lty='solid',col=c('steelblue','black')) |
455b5bcc0ffd7f0b460c36d189e408d77003460f | c5c882dae3557ee44791f441a95561e65c63651d | /man/drop_tagons.Rd | ddfafc023e674d7f6a100ae4af1e9b16a7814154 | [] | no_license | ytse17/clpr | 12583a5073f655e31628a8c9221a210653971169 | 78d09142f06f1c1f408516fa81f0181ff533c192 | refs/heads/master | 2020-03-25T18:36:47.231589 | 2018-08-23T22:47:35 | 2018-08-23T22:47:35 | 144,040,324 | 0 | 0 | null | 2018-08-16T18:51:43 | 2018-08-08T16:29:32 | R | UTF-8 | R | false | true | 454 | rd | drop_tagons.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/complete_trip.R
\name{drop_tagons}
\alias{drop_tagons}
\title{Drop rows with tag on subtypes after recording the relevant tag on information in the tag off transactions}
\usage{
drop_tagons(tr_df)
}
\arguments{
\item{tr_df}{dataframe of transactions}
}
\description{
Drop rows with tag on subtypes after recording the relevant tag on information in the tag off transactions
}
|
bd32906c6a36c65189ca25f8f6b1104c3d69c5e1 | d3239b792d5abbbb020f403de0676902eb59f66c | /EMuDataHandling/GUIDcheck.R | 9d203736293b26b8d9c6e5b3695d8e92e3f5e9a2 | [] | no_license | fieldmuseum/Collections-Scripts | 319d0991c30c4343d703240b6eacec6e07656856 | 5cd74cf25eef673c8588bc6642871fc266988f87 | refs/heads/master | 2023-02-03T11:23:10.835246 | 2023-02-03T03:39:48 | 2023-02-03T03:39:48 | 125,874,637 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,793 | r | GUIDcheck.R | # EMu GUID Uniqueness Checker Script
# -- To check for duplicate GUIDs across records in a CSV
#
# Setup:
# 1. In EMu, set up a CSV UTF-8 report with either:
# Option 1: a group of these four columns:
# - irn
# - AdmGUIDIsPreferred_tab
# - AdmGUIDType_tab
# - AdmGUIDValue_tab
#
# Option 2: Two of these columns, as appropriate:
# - irn
# - DarGlobalUniqueId (if checking ecatalogue)
# - AudIdentifier (if checking emultimedia)
#
# ...See example in "EMuDataHandling/sample_data/GUIDcheck/"
#
# 2. Run the report for the records in need of a GUID-check
#
# 3. Name the output CSV "Group1.csv"
#
# 4. Move it here in the Collections-Scripts repo:
# "EMuDataHandling/real_data_in/GUIDcheck/Group1.csv"
# install.packages(c("readr","tidyr","dplyr","progress"))
library("readr")
library("tidyr")
library("dplyr")
library("progress")
#### Input - point to your csv file ####
input_file <- "EMuDataHandling/real_data_in/GUIDcheck/Group1.csv"
records <- read_csv(file=input_file,
progress = TRUE)
#### Check input GUID field ####
if ("AdmGUIDValue" %in% colnames(records)) {
colnames(records)[colnames(records)=="AdmGUIDValue"] <- "GUID"
} else {
if ("DarGlobalUniqueIdentifier" %in% colnames(records)) {
colnames(records)[colnames(records)=="DarGlobalUniqueIdentifier"] <- "GUID"
} else {
if ("AudIdentifier" %in% colnames(records)) {
colnames(records)[colnames(records)=="AudIdentifier"] <- "GUID"
} else {
print("Error -- Cannot find 'DarGlobalUniqueIdentifier', 'AudIdentifier', or grouped 'AdmGUIDValue' column in input CSV")
}
}
}
#### Count GUIDs ####
if (NROW(records) > 1000) {
print(paste("Counting duplicates in", NROW(records),
"rows -- May take a minute..."))
}
guids <- dplyr::count(records, GUID)
guids_dups <- guids[guids$n > 1,]
record_dups <- merge(records, guids_dups,
by="GUID",
all.y = TRUE)
record_dups <- unique(record_dups[,c("irn","GUID","n")])
#### Check output ####
# irn's may be duplicated in reports that take a long time to run...
# (specifically, irn's that were edited while the report was running.)
re_check <- dplyr::count(record_dups, GUID)
re_check <- re_check[re_check$n > 1,]
record_dups <- record_dups[record_dups$GUID %in% re_check$GUID,]
#### Output ####
if (NROW(record_dups) > 0) {
output_filename <- "EMuDataHandling/real_data_in/GUIDcheck/guid_dups.csv"
print(c(paste("Outputting",NROW(guids_dups), "duplicate GUIDs in",
NROW(record_dups),"records to: "),
output_filename))
write_csv(record_dups,
output_filename)
} else {
print(paste("No duplicate GUIDS found in input CSV", input_file))
}
|
5eeda38052780237322ea6d7e179c29e4a56d233 | 9276963e9a3da697dbbfd9ffe5e25f625f45941b | /R/Initial_Analysis.R | a28a25958b4aae33bcc3dd25a70ecec43d1bf8eb | [] | no_license | tylerjrichards/UF_SG | ab18a514ee9b3b7c35890e51dd3d32a80d2205ed | 5e5bde8efe9dc24e4675621c814dbd7f94e0bdcf | refs/heads/master | 2021-09-13T02:06:05.712120 | 2018-04-23T19:41:41 | 2018-04-23T19:41:41 | 114,598,836 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,310 | r | Initial_Analysis.R | library(here)
library(dplyr)
library(tidyr)
library(ggplot2)
Spring_elections <- read.csv(here("Spring_total.csv"))
Fall_elections <- read.csv(here("Fall_total_v2.csv"))
#Let's convert the votes and years column to numeric values for Spring and Fall
Spring_elections$Votes <- as.numeric(as.character(Spring_elections$Votes))
Spring_elections$Year <- as.numeric(as.character(Spring_elections$Year))
Fall_elections$Votes <- as.numeric(as.character(Fall_elections$Votes))
Fall_elections$Year <- as.numeric(as.character(Fall_elections$Year))
#Now we need to group by the fall elections and edit some party names
Fall_elections <- Fall_elections %>%
replace_na(list(Won = FALSE)) %>%
mutate_all(funs(toupper)) %>%
mutate(Party = ifelse(Party == "SWAMP PARTY", "SWAMP", Party)) %>%
mutate(Party = ifelse(Party == "THE STUDENTS PARTY", "STUDENTS PARTY", Party)) %>%
mutate(Won = as.logical(Won)) %>%
mutate(Election_date = "FALL")
#Now for Spring
Spring_elections <- Spring_elections %>%
replace_na(list(Won = FALSE)) %>%
mutate(Party = as.character(Party)) %>%
mutate(Party = ifelse( Party == "The_Students", "Students Party", Party)) %>%
mutate(Party = ifelse(Party == "FSP", "Florida Students Party", Party)) %>%
mutate(Party = ifelse(Party == "Vision_2000" | Party == "Vision_2001", "Vision", Party)) %>%
mutate_all(funs(toupper)) %>%
mutate(Election_date = "SPRING") %>%
mutate(Won = as.logical(Won)) %>%
filter(!is.na(Spring_elections$Votes))
#note that Student Party is different that Students Party, which appeared a few years later.
#let's get establishment vs independent
Est_Fall <- Fall_elections %>%
filter(Seat == "DISTRICT A") %>%
group_by(Party, Year, Seat) %>%
summarise(Seats_won = sum(Won), Candidates = n()) %>%
mutate(Est = ifelse(Seats_won > 1, "SYSTEM", "INDEPENDENT")) %>%
select(Party, Year, Est)
Est_Spring <- Spring_elections %>%
filter(Seat == "BUSINESS") %>%
group_by(Party, Year, Seat) %>%
summarise(Seats_won = sum(Won), Candidates = n()) %>%
mutate(Est = ifelse(Seats_won > 1, "SYSTEM", "INDEPENDENT")) %>%
select(Party, Year, Est)
Establishment_total <- rbind(Est_Spring, Est_Fall)
Party_age <- read.csv('year_eval.csv')
Party_age$Year <- as.character(Party_age$Year)
Election_total <- Fall_elections %>%
bind_rows(Spring_elections) %>%
left_join(Establishment_total, by = c("Party", "Year")) %>%
distinct(Seat, Year, Party, First_Name, Last_Name, Votes, .keep_all = TRUE) %>%
mutate(Est = ifelse(is.na(Est), "INDEPENDENT", Est)) %>%
left_join(Party_age, by = c("Party", "Year", "Election_date")) %>%
mutate(X = NULL)
Check_candidate_totals <- Election_total %>%
group_by(Party, Year, Election_date) %>%
count(Est)
Party_success_senate <- Election_total %>%
filter(Seat != "STUDENT BODY PRESIDENT" & Seat != "TREASURER") %>%
group_by(Party, Year, Election_date, Est) %>%
summarise(Seats_won = sum(Won), Candidates = n()) %>%
mutate(Percent_success = 100 * (Seats_won / Candidates))
#At this point, we need to look though the party success file as well as the check candidate totals to make sure everything is correct
ggplot(Party_success_senate, aes(x=Year, y=Seats_won)) + geom_point() + geom_text(label = Party_success_senate$Party)
#Spring vis
ggplot(Party_success_senate[Party_success_senate$Election_date == "SPRING",], aes(x=Year, y=Seats_won, color = Est, size = 1.5)) + geom_point() + ylab("Number of Seats Won") + theme(legend.title=element_blank()) + guides(size=FALSE)
Seat_breakdown <- Fall_elections %>%
left_join(Est_Fall, by = c("Party", "Year")) %>%
group_by(Seat, Est)
#Let's ensure that all of the candidates are present in the data
Seatswon_Year <- Party_success_senate %>%
group_by(Year) %>%
summarize(Won = sum(Seats_won))
#This will give us a breakdown of how many seats were won each year, we can cross-refrence this the seats allotted each year
#This will not match up for all years because there we no candidates who ran in certain elections
#Now that the data is all checked we're good to go with analysis!
#Let's sort by who won
Spring_success <- Spring_elections %>%
filter(Won == "TRUE")
Fall_success <- Fall_elections %>%
filter(Won == "TRUE")
write.csv(Election_total, "Cleaned_SG_Election.csv")
|
d5a793772ed82e5f0b668093f6cc9845a9e1d7bf | f3b26d8821c9cfb4d339b2d0436579a7a52352a6 | /cachematrix.R | eedbda9e95bbe6109cd4c8b62bdc106dfb556513 | [] | no_license | KDThinh/ProgrammingAssignment2 | c93ebd2ba8c0bf98f1dd90308b50b75a248a7070 | 2402568d4e85259df8c8f7a988d1397bb8f8a04b | refs/heads/master | 2021-01-21T07:54:04.937667 | 2014-11-19T14:14:49 | 2014-11-19T14:14:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,411 | r | cachematrix.R | ## This R script is to create a square matrix according to the user's input
## and calculate and store (or cache) its inverse matrix so that when the
## cacheSolve function is called again, it won't re-calculate the inverse
## matrix but just return the cached value.
## If a new matrix is created, the old inverse matrix, which is cached, will
## be removed and turn to NULL, waiting for the inverse matrix to be stored
## when the inverse calculation is called.
## This function is to create a square matrix for inverse calculation and caching
## the result. For sub-functions are created:
## + The set function is to change the old matrix to a new one. Inside, the
## inv variable which assigned as the value of inversed matrix will be cleared
## and turned to NULL, waiting for a new inverse matrix to be calculated
## and stored. The matrix x will be assigned globally to the new matrix y.
## + The get function is to return the current x matrix.
## + The setinv is to be used to assign the inversed matrix calculated from
## cacheSOlve function to the variable inv.
## + The getinv function is to return the value of inv
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL
set<- function(y) {
x<<-y
inv<<-NULL
}
get<-function() x
setinv<-function(inverse) inv<<-inverse
getinv<-function() inv
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## This function is to calculate the inverse matrix of x. If the x matrix is the
## same and inv has been calculated before, it will just return the cached value.
## The work flow of this function is first it assigns the value of inv with the value
## of x$getinv() (which is the inv value of makeCachMatrix, is NULL as stated in
## the first line if cacheSolve hasn't run. If cacheSolve was run, the inv
## in makeCacheMatrix would be assigned as the inv value calculated from cacheSolve)
## If inv is NULL, the function will calculate the inverse function using the
## matrix from x$get(), and assigns its result to inv in x$setinv(inv).
## If inv is not NULL, it contains a value calculated from before. The function
## will stop and return the cached inv value.
cacheSolve <- function(x, ...) {
inv<-x$getinv()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
data<-x$get()
inv<-solve(data,...)
x$setinv(inv)
inv
}
|
0d17b9b6acaee2fe54fdb995e0f5f069c02fd338 | d0b8f818a830ba41e13a8420ee55dc1485877693 | /R/qc_dems_function.R | 4661a76250e1040812e4856dd4739b2fcfcf8ed9 | [] | no_license | zrussek/eatestpackage | ea5c3e30f3e86ede57bf9a17db53ccde723cc4e6 | ad858c501d65d24a14384902c1f0c67cddecf7bd | refs/heads/master | 2021-01-18T15:07:33.564933 | 2016-04-04T16:27:40 | 2016-04-04T16:27:40 | 52,910,920 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,442 | r | qc_dems_function.R | ################################################################
# Notes: #
# - purpose: this is an outline for the ea_dems_qc_funtion #
# #
# - keywords: #brule #check #remove #
################################################################
###################
# define function #
###################
qc_dems_html <- function(
formatted_test_name=NULL,
outlier_decimal=.25,
missing_percentage=5,
duplicate_percentage=5,
html_name_location=NULL)
{
################################
# running on personal computer #
################################
# create html and input parms
rmarkdown::render(input = system.file("rmd/qc_dems_markdown.Rmd", package="eatestpackage"),
params = list(
file_name = formatted_test_name,
outlier_parm = outlier_decimal,
missing_parm = missing_percentage,
duplicate_parm= duplicate_percentage),
clean = TRUE,
output_file = html_name_location)
}
# # example
# qc_dems_html(
#
# formatted_test_name = formatted_test_review,
# html_name_location = "N:/general/sullivan/quality_control/dems_example.html"
#
# )
|
b9f9aa5aa77a440072b429c630dca8ebe7608fe1 | dd31f1c810abfd5c1f729dffd31aa14a5d848bab | /DataProducts/ScatterPlot/server.R | 336afb57332829ef9357c89b50b8080428bfb0be | [] | no_license | hamelsmu/datasciencecoursera | 99267d8c597a265452accdaa863e194f02f1eb4b | 881e57115e5c3b5b7d5ad2c136ee4771e2435ece | refs/heads/master | 2021-01-17T14:35:53.339829 | 2015-02-23T14:43:19 | 2015-02-23T14:43:19 | 18,737,422 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,177 | r | server.R | # server.R
require(rCharts)
library(ggplot2)
library(ggthemes)
data(mtcars)
mtcars$cyl = as.factor(mtcars$cyl)
mtcars$vs = as.factor(mtcars$vs)
mtcars$am = as.factor(mtcars$am)
mtcars$gear = as.factor(mtcars$gear)
mtcars$carb = as.factor(mtcars$carb)
x = c("")
shinyServer(function(input, output) {
#########################
#Reactive Function Here:
# Note - reactive function was not necessary, but included it
# for puposes of this exercise as I couldn't think of a good reason for
# it, but it was required so I found a way to use it.
Xvar = reactive(mtcars[, c(input$X)])
Yvar = reactive(mtcars[, c(input$Y)])
output$text1 <- renderText({
cor(Xvar(), Yvar())
})
output$plot1 <- renderPlot({
p1 = ggplot(mtcars, aes_string(x=input$X, y=input$Y)) +
geom_point(aes_string(color=input$C), size = 6, alpha = .75) +
stat_smooth(alpha = .25, lty = 2) +
labs(title = paste('ScatterPlot:',input$X, "vs.", input$Y),
x = input$X,
y = input$Y)
exp = paste('p2 = theme_', input$T, '()', sep = '')
eval(parse(text=(exp)))
print(p1+p2)
})
}
) |
a625f33520b3d2731850db894f38b58659f00fea | 72d9009d19e92b721d5cc0e8f8045e1145921130 | /sppmix/man/plot_convdiags.Rd | f0b7bb480bf70ff22dcd9cb3b4b07b196e092e36 | [] | no_license | akhikolla/TestedPackages-NoIssues | be46c49c0836b3f0cf60e247087089868adf7a62 | eb8d498cc132def615c090941bc172e17fdce267 | refs/heads/master | 2023-03-01T09:10:17.227119 | 2021-01-25T19:44:44 | 2021-01-25T19:44:44 | 332,027,727 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,503 | rd | plot_convdiags.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mcmc_plots.R
\name{plot_convdiags}
\alias{plot_convdiags}
\title{Checking convergence visually}
\usage{
plot_convdiags(fit, burnin = floor(fit$L/10), open_new_window = FALSE,
maxlag = 100)
}
\arguments{
\item{fit}{Object of class \code{damcmc_res} or \code{bdmcmc_res}.}
\item{burnin}{Number of initial realizations to discard. By default, it is 1/10 of the total number of iterations.}
\item{open_new_window}{Open a new window for the plot.}
\item{maxlag}{The maximum lag value to consider. Default is 100.}
}
\description{
Based on a `damcmc_res` object, this function will produce
many graphs to help assess convergence visually,
including running mean plots and
autocorrelation plots for all the parameters. This function calls
\code{\link{plot_runmean}} and \code{\link{plot_autocorr}} for all parameters
so we do not have to it individually.
For examples see
\url{http://faculty.missouri.edu/~micheasa/sppmix/sppmix_all_examples.html
#plot_convdiags}
}
\examples{
\donttest{
truemix_surf <- rmixsurf(m = 3, lambda=100, xlim = c(-3,3), ylim = c(-3,3))
plot(truemix_surf)
genPPP=rsppmix(intsurf = truemix_surf, truncate = FALSE)
fit = est_mix_damcmc(pp = genPPP, m = 3)
plot_convdiags(fit)}
}
\seealso{
\code{\link{est_mix_damcmc}},
\code{\link{rmixsurf}},
\code{\link{plot_runmean}},
\code{\link{plot_autocorr}},
\code{\link{rsppmix}}
}
\author{
Sakis Micheas
}
|
82228327a29f5a34e3238491bb73d0948f6a221b | 18b022bcf2011d6b5588d73355a34c481f2d50e3 | /plot1.R | d2bfb6ce81a48b054026521d74e982c3ddc49017 | [] | no_license | j-ros/ExData_Plotting1 | a2aa81851e3c0353fa12ddeca420a63cf5389ebd | 92c01a5ccd8032cf6a3039cf31e732fc32a1a52f | refs/heads/master | 2021-01-19T14:36:23.306153 | 2017-04-13T16:24:00 | 2017-04-13T16:24:00 | 88,173,242 | 0 | 0 | null | 2017-04-13T14:23:13 | 2017-04-13T14:23:12 | null | UTF-8 | R | false | false | 1,140 | r | plot1.R | #Set the working directory to the correct folder
setwd("~/DataScience/Git/ExData_Plotting1")
#Download files
url<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
dir<-"~/DataScience/Git/ExData_Plotting1/data.zip"
download.file(url,dir)
dateDownloaded<-date() "Thu Apr 13 16:29:04 2017"
#Open a file connection to the zip file to read
unzip(dir,exdir="data")
data <- read.table("data/household_power_consumption.txt",na.strings="?",sep=";",header=T)
#Convert dates and times and drop old ones
datetime<-paste(data$Date,data$Time)
data$datetime<-strptime(datetime,format="%d/%m/%Y %H:%M:%S")
drop <- c("Date","Time")
data<-data[,!(names(data) %in% drop)]
datesubset<-subset(data,as.Date(datetime)>=as.Date("2007-02-01")
& as.Date(datetime)<=as.Date("2007-02-02"))
#Write the subsetted file to use in all parts of the assignement
write.table(datesubset,file="datesubset.txt",sep=";")
#Plot1
png("plot1.png")
hist(datesubset$Global_active_power,main="Global Active Power",
xlab="Global Active Power (kilowatts)",col="red")
axis(2,at=c(200,400,600,800,1000,1200))
dev.off()
|
2ecd839edcb9d55cbb70927fa1d5444fb4e3130d | 5c50a77d0737f53c2aa913a0cdf481a95996674c | /HeatMap_WasteDisposal_090220.R | b113e5de75688ac18cbec0cd8e75f6f024c66734 | [] | no_license | bbeacosta/Upgrade_scripts | a400cf616e096c374065561a94a543cf94180d8d | 6ad144c9ad67373e8139630c5021f41dde67c81e | refs/heads/master | 2022-11-06T16:16:11.869124 | 2020-06-22T14:06:37 | 2020-06-22T14:06:37 | 274,151,637 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,389 | r | HeatMap_WasteDisposal_090220.R | ### Load libraries
library(ggplot2)
library(tidyverse)
library(readxl)
library(dplyr)
library(stringr)
### Set workin directory and load files ###
setwd("C:/Users/skgtbco/OneDrive - University College London/PhD folder/PPI Networks/WPPINA/December2019/GenePrioritisation/Targets/")
getwd()
df <- read_excel(path = "Targets_HeatMap.xlsx", col_names = T)
# theme and style
theme_bc <- theme_bw(base_family = "Helvetica") +
theme(panel.grid.major.x = element_blank(),
legend.position = "right",
strip.text = element_text(size = 7),
axis.text.x = element_text(size = 7, angle = 90, hjust = 1, vjust = 0.5),
axis.text.y = element_text(size = 7),
axis.title.y = element_text(vjust = 0.6),
axis.title = element_text(size = 10),
panel.spacing = unit(0.1, "lines"))
# Data wrangling and plotting
df %>%
dplyr::group_by(Analysis.Module, Semantic.classes) %>%
dplyr::summarise(n = n()) %>%
ggplot(aes(x = Semantic.classes, y = Analysis.Module)) +
geom_tile(aes(fill = n), colour = "black") +
# facet_grid(rows = vars(Semantic.classes), scales = "free_y", space = "free_y") +
scale_fill_viridis_c(na.value = "grey") +
labs(x = "Biological Process", y = "Module") +
theme_bc +
theme(panel.grid = element_blank(),
strip.text.y = element_text(size = 8, angle = 0))
|
ec9b4ea77b02f56e0197142e6bd9085418917ddd | c6c0881ca260a793a70f5814ab6993c61dc2401c | /imputed/scripts/pheno_pred_UKB.R | 14d4b7bae0bfd9e6959a0debf399a7c21c5d4ed1 | [] | no_license | luyin-z/PRS_Height_Admixed_Populations | 5fe1c1bef372b3c64bfd143397709c7529a2705a | bf04ba884fd16e5e8c0685ccfbc86ed72d02c7f2 | refs/heads/master | 2023-03-16T17:05:56.658896 | 2020-09-18T16:58:04 | 2020-09-18T16:58:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,026 | r | pheno_pred_UKB.R | #!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
#**************************************
#* CALCULATE PARTIAL R2 *********
#**************************************
library("optparse")
library(data.table)
library(dplyr)
library(biomaRt)
library(parallel)
options(scipen=10)
options(digits=10)
library("optparse")
library(ggplot2);
library(reshape2); library(wesanderson)
library(rlist)
library(asbio)
library(GGally)
library(tidyr)
library(hexbin)
library(psychometric)
library(boot)
#read in PGS scores
PGS_UKB_afr<-vector('list', 35)
nam<-paste(rep(c(5000,10000,25000,50000,75000,100000,500000),5), c(0.0005,0.00005, 0.000005,0.0000005,0.00000005), sep="_")
names(PGS_UKB_afr)<-nam
for(N in nam){
readRDS(paste0('~/height_prediction/imputed/output/PGS2', N, '_UKB.Rds'))[[1]]-> PGS_UKB_afr[[N]]
}
#read in phenotype data
fread('~/height_prediction/input/ukb_afr/UKB_AFR_pheno.txt', fill=T)[,ANC.PC:=NULL]-> Pheno_UKB_afr
#a partial R2 function
source('~/height_prediction/strat_prs/scripts/Rsq_R2.R')
#add PGS to Pheno table in order to be able to make multiple analyses
#Pheno_UKB_afr[,ID:=paste0(ID, "_", ID)]
for (N in nam){
for(j in 1:length(PGS_UKB_afr[[N]])){
gsub("[0-9]+_","",names(PGS_UKB_afr[[N]])[[j]])-> names(PGS_UKB_afr[[N]])[[j]]
}
}
as.character(Pheno_UKB_afr$ID)-> Pheno_UKB_afr$ID
setkey(Pheno_UKB_afr, ID)
#add ancestry
ancestry<-do.call(rbind, lapply(1:22, function(X) fread(paste0('~/height_prediction/input/ukb_afr/rfmix_anc_chr', X, '.txt'))))
anc_UKB_afr<-ancestry %>% group_by(SUBJID) %>% summarise(AFR_ANC=mean(AFR_ANC), EUR_ANC=1-mean(AFR_ANC)) %>% as.data.table #mean across chromosomes for each individual
anc_UKB_afr[,ID:=SUBJID][,SUBJID:=NULL]
as.character(anc_UKB_afr$ID)-> anc_UKB_afr$ID
setkey(anc_UKB_afr, ID)
PGS2_UKB_afr<-vector('list', length(PGS_UKB_afr))
names(PGS2_UKB_afr)<-nam
for(N in nam){
data.table(ID=names(PGS_UKB_afr[[N]]), PGS=unlist(PGS_UKB_afr[[N]]))-> PGS2_UKB_afr[[N]]
setkey(PGS2_UKB_afr[[N]], ID)
PGS2_UKB_afr[[N]][Pheno_UKB_afr, nomatch=0]-> PGS2_UKB_afr[[N]]
PGS2_UKB_afr[[N]][anc_UKB_afr, nomatch=0]-> PGS2_UKB_afr[[N]]
PGS2_UKB_afr[[N]][,AGE2:=Age^2]
PGS2_UKB_afr[[N]][AFR_ANC>=0.05]-> PGS2_UKB_afr[[N]]
PGS2_UKB_afr[[N]]$Sex<-as.factor(PGS2_UKB_afr[[N]]$Sex)
PGS2_UKB_afr[[N]][which(!is.na(PGS2_UKB_afr[[N]][,Height])),]-> PGS2_UKB_afr[[N]]
}
lapply(PGS2_UKB_afr, function(X) lm(Height~Sex, X))-> lm0_UKB_afr
lapply(PGS2_UKB_afr, function(X) lm(Height~PGS, X))-> lm1_UKB_afr
lapply(PGS2_UKB_afr, function(X) lm(Height~Age, X))-> lm2_UKB_afr
lapply(PGS2_UKB_afr, function(X) lm(Height~AGE2, X))-> lm3_UKB_afr
lapply(PGS2_UKB_afr, function(X) lm(Height~EUR_ANC, X))-> lm4_UKB_afr
lapply(PGS2_UKB_afr, function(X) lm(Height~PGS+Age, X))-> lm5_UKB_afr
lapply(PGS2_UKB_afr, function(X) lm(Height~PGS+AGE2,X))-> lm6_UKB_afr
lapply(PGS2_UKB_afr, function(X) lm(Height~Sex+Age+AGE2+EUR_ANC, X))-> lm7_UKB_afr
lapply(PGS2_UKB_afr, function(X) lm(Height~Sex+Age+AGE2+EUR_ANC+PGS, X))-> lm8_UKB_afr
partial_R2<-lapply(nam, function(X) partial.R2(lm7_UKB_afr[[X]], lm8_UKB_afr[[X]]))
names(partial_R2)<-nam
lapply(PGS2_UKB_afr, function(X) X[, Quantile:= cut(EUR_ANC,
breaks=quantile(EUR_ANC, probs=seq(0,1, by=0.25), na.rm=TRUE),
include.lowest=TRUE)])-> PGS3_UKB_afr
names(PGS3_UKB_afr)<-names(PGS2_UKB_afr)
lapply(1:length(PGS3_UKB_afr), function(X) PGS3_UKB_afr[[X]][,Med_Eur_Anc:=median(EUR_ANC),by=Quantile])
lapply(1:length(PGS3_UKB_afr), function(X) as.character(unique((PGS3_UKB_afr[[X]]$Quantile))))-> a
lapply(a, function(X) c(X[2],X[4], X[3], X[1]))-> a1 #check
names(a1)<-names(PGS3_UKB_afr)
r2_UKB_afr<-vector('list', length(PGS3_UKB_afr))
names(r2_UKB_afr)<-names(PGS3_UKB_afr)
for(I in names(r2_UKB_afr)){
r2_UKB_afr[[I]]<-vector('list', length(a1[[I]]))
names(r2_UKB_afr[[I]])<-a1[[I]]
for(i in a1[[I]]){
r2_UKB_afr[[I]][[i]]<-partial.R2(lm(Height~Sex+Age+AGE2+EUR_ANC, PGS3_UKB_afr[[I]][Quantile==i]),lm(Height~Sex+Age+AGE2+EUR_ANC+PGS, PGS3_UKB_afr[[I]][Quantile==i]))
}
}
B_UKB_afr<-vector('list', length(r2_UKB_afr))
names(B_UKB_afr)<-names(r2_UKB_afr)
for (I in names(r2_UKB_afr)){
B_UKB_afr[[I]]<-data.table(Quant=c(a1[[I]], "total"),
R_sq=c(unlist(r2_UKB_afr[[I]]), partial_R2[[I]]),
Med_Eur_Anc=c(unique(PGS3_UKB_afr[[I]][Quantile==a1[[I]][1]][,Med_Eur_Anc]), unique(PGS3_UKB_afr[[I]][Quantile==a1[[I]][2]][,Med_Eur_Anc]),unique(PGS3_UKB_afr[[I]][Quantile==a1[[I]][3]][,Med_Eur_Anc]),unique(PGS3_UKB_afr[[I]][Quantile==a1[[I]][4]][,Med_Eur_Anc]), median(PGS3_UKB_afr[[I]][, EUR_ANC])))
B_UKB_afr[[I]][,N:=c(nrow(PGS3_UKB_afr[[I]][Quantile==a1[[I]][1]]), nrow(PGS3_UKB_afr[[I]][Quantile==a1[[I]][2]]), nrow(PGS3_UKB_afr[[I]][Quantile==a1[[I]][3]]), nrow(PGS3_UKB_afr[[I]][Quantile==a1[[I]][4]]),nrow(PGS3_UKB_afr[[I]]))]
B_UKB_afr[[I]][,K:=1] #number of predictors. Need to check later if this is correct.
B_UKB_afr[[I]][, LCL:=CI.Rsq(R_sq, k=K, n=N)[3]]
B_UKB_afr[[I]][, UCL:=CI.Rsq(R_sq, k=K, n=N)[4]]
}
### add confidence intervals calculated with bootstrap: https://www.statmethods.net/advstats/bootstrapping.html
results.UKB_afr<-vector('list', length(PGS3_UKB_afr))
names(results.UKB_afr)<-names(PGS3_UKB_afr)
for (I in names(PGS3_UKB_afr)){
results.UKB_afr[[I]]<-vector('list', length(a1[[I]])+1)
lapply(a1[[I]], function(i) boot(data=PGS3_UKB_afr[[I]][Quantile==i], statistic=rsq.R2,
R=999, formula1=Height~Sex+Age+AGE2+EUR_ANC, formula2=Height~Sex+Age+AGE2+EUR_ANC+PGS))-> results.UKB_afr[[I]]
cat(I)
cat(' done\n')
}
for (I in names(PGS3_UKB_afr)){
tp <- boot(data=PGS2_UKB_afr[[I]], statistic=rsq.R2, R=999, formula1=Height~Sex+Age+AGE2+EUR_ANC, formula2=Height~Sex+Age+AGE2+EUR_ANC+PGS)
list.append(results.UKB_afr[[I]], tp)-> results.UKB_afr[[I]]
names(results.UKB_afr[[I]])<-c(a1[[I]], "total")
cat(' done\n')
}
saveRDS(PGS3_UKB_afr, file='~/height_prediction/imputed/output/PGS3_UKB_afr.Rds')
saveRDS(results.UKB_afr, file='~/height_prediction/imputed/output/results.UKB_afr.Rds')
#confidence intervals
boots.ci.UKB_afr<-lapply(results.UKB_afr, function(Y) lapply(Y, function(X) boot.ci(X, type = c("norm", 'basic', "perc"))))
names(boots.ci.UKB_afr)<-names(results.UKB_afr)
for (I in names(PGS3_UKB_afr)){
B_UKB_afr[[I]][1:4,]-> a
B_UKB_afr[[I]][5,]-> b
a[,HVB_L:=sapply(a$Quant, function(X) as.numeric(gsub("\\]","",gsub("\\(","",gsub("\\[","",strsplit(X,",")[[1]])))))[1,]]
a[,HVB_U:=sapply(a$Quant, function(X) as.numeric(gsub("\\]","",gsub("\\(","",gsub("\\[","",strsplit(X,",")[[1]])))))[2,]]
b[,HVB_L:=1]
b[,HVB_U:=1]
rbind(a,b)->B_UKB_afr[[I]]
B_UKB_afr[[I]][, Dataset:='UKB_AFR']
B_UKB_afr[[I]][, boots_norm_L:=sapply(1:5, function(X) boots.ci.UKB_afr[[I]][[X]]$normal[2])]
B_UKB_afr[[I]][, boots_norm_U:=sapply(1:5, function(X) boots.ci.UKB_afr[[I]][[X]]$normal[3])]
B_UKB_afr[[I]][, boots_perc_L:=sapply(1:5, function(X) boots.ci.UKB_afr[[I]][[X]]$perc[4])]
B_UKB_afr[[I]][, boots_perc_U:=sapply(1:5, function(X) boots.ci.UKB_afr[[I]][[X]]$perc[5])]
B_UKB_afr[[I]][, boots_basic_L:=sapply(1:5, function(X) boots.ci.UKB_afr[[I]][[X]]$basic[4])]
B_UKB_afr[[I]][, boots_basic_U:=sapply(1:5, function(X) boots.ci.UKB_afr[[I]][[X]]$basic[5])]
}
saveRDS(B_UKB_afr, file="~/height_prediction/imputed/output/B_UKB_afr.Rds")
###################################
##now UKB_eur
PGS_UKB_eur<-vector('list', 35)
names(PGS_UKB_eur)<-nam
for(N in nam){
readRDS(paste0('~/height_prediction/imputed/output/PGS2', N, '_UKB.Rds'))[[2]]-> PGS_UKB_eur[[N]]
}
#read in phenotype data
fread('~/height_prediction/input/ukb_eur/UKB_EUR_pheno.txt')-> Pheno_UKB_eur
###########
##############
#add PGS to Pheno table in order to be able to make multiple analyses
Pheno_UKB_eur[,ID:=paste0(ID, "_", ID)]
setkey(Pheno_UKB_eur, ID)
PGS2_UKB_eur<-vector('list', 35)
names(PGS2_UKB_eur)<-nam
for(N in nam){
data.table(ID=names(PGS_UKB_eur[[N]]), PGS=unlist(PGS_UKB_eur[[N]]))-> PGS2_UKB_eur[[N]]
setkey(PGS2_UKB_eur[[N]], ID)
PGS2_UKB_eur[[N]][Pheno_UKB_eur, nomatch=0]-> PGS2_UKB_eur[[N]]
PGS2_UKB_eur[[N]][,EUR_ANC:=1]
PGS2_UKB_eur[[N]][,AGE2:=Age^2]
PGS2_UKB_eur[[N]][,Height:=Height*100]
PGS2_UKB_eur[[N]]$Sex<-as.factor(PGS2_UKB_eur[[N]]$Sex)
}
lapply(PGS2_UKB_eur, function(X) lm(Height~Sex, X))-> lm1_UKB_eur
lapply(PGS2_UKB_eur, function(X) lm(Height~PGS, X))-> lm2_UKB_eur
lapply(PGS2_UKB_eur, function(X) lm(Height~Age, X))-> lm3_UKB_eur
lapply(PGS2_UKB_eur, function(X) lm(Height~AGE2,X))-> lm4_UKB_eur
lapply(PGS2_UKB_eur, function(X) lm(Height~PGS+Age, X))-> lm5_UKB_eur
lapply(PGS2_UKB_eur, function(X) lm(Height~PGS+AGE2,X))-> lm6_UKB_eur
lapply(PGS2_UKB_eur, function(X) lm(Height~Sex+Age+AGE2, X))-> lm7_UKB_eur
lapply(PGS2_UKB_eur, function(X) lm(Height~Sex+Age+AGE2+PGS, X))-> lm8_UKB_eur
partial_R2_eur<-lapply(nam, function(X) partial.R2(lm7_UKB_eur[[X]],lm8_UKB_eur[[X]])) #
names(partial_R2_eur)<-nam
#combine all in a table
readRDS('~/height_prediction/gwas/ukb_afr/output/Nr_SNPs_UKB_afr.Rds')[Name %in% paste0('phys_', nam)]-> ukb_afr
readRDS('/project/mathilab/bbita/gwas_admix/new_height/Nr_SNPs_UKB.Rds')[Name %in% paste0('phys_', nam)]-> ukb_eur #need to update this path
setkey(ukb_afr, Name)
setkey(ukb_eur, Name)
dt<-data.table(Name=paste0('phys_',nam), UKB_afr_imp=unlist(partial_R2), UKB_eur_imp=unlist(partial_R2_eur), Nr_imp=unlist(lapply(nam, function(X) nrow(do.call(rbind,readRDS(paste0('~/height_prediction/imputed/output/UKB_vec_all_', X,'.Rds')))))))
setkey(dt, Name)
setkey(dt, Name)[ukb_afr][ukb_eur]-> dt2
dt2[, UKB_afr:=Part_R2]
dt2[, UKB_eur:=i.Part_R2]
dt2[,i.Nr:=NULL][,Part_R2:=NULL][,i.Part_R2:=NULL]
dt2[,eur_diff:=UKB_eur_imp-UKB_eur]
dt2[,afr_diff:=UKB_afr_imp-UKB_afr]
dt2[,Nr_diff:=Nr_imp-Nr]
saveRDS(dt2,'~/height_prediction/imputed/output/comparison_ukb.Rds')
|
af60efdfa1e646e6dcc29a94719bc61cbaa7b5a5 | 38686032b524267b71c6522db054312c9c9cd43b | /R/DE_timepoints.R | 12720fd8319ece3bc9842133a5c4d7ee7818ba8d | [
"BSD-3-Clause"
] | permissive | NelleV/moanin | 538b8737554d12942f9e5aff57ebda14ed928775 | accf9276a1ac675c41e578a30763fbf9ccc74a46 | refs/heads/master | 2021-08-02T23:13:40.009688 | 2021-07-28T07:11:56 | 2021-07-28T07:11:56 | 198,623,822 | 5 | 1 | NOASSERTION | 2021-02-15T23:37:28 | 2019-07-24T11:39:47 | R | UTF-8 | R | false | false | 18,493 | r | DE_timepoints.R | setGeneric("DE_timepoints",
function(object,...) { standardGeneric("DE_timepoints")})
setGeneric("create_timepoints_contrasts",
function(object,...) { standardGeneric("create_timepoints_contrasts")})
setGeneric("create_diff_contrasts",
function(object,...) { standardGeneric("create_diff_contrasts")})
#' Fit weekly differential expression analysis
#'
#' @inheritParams DE_timecourse
#' @param add_factors A character vector of additional variables to add to the
#' design. See details.
#' @return A \code{data.frame} with four columns for each of the contrasts
#' given in \code{contrasts}, corresponding to the raw p-value of the contrast
#' for that gene (\code{_pval}), the adjusted p-value (\code{_qval}),
#' the t-statistic of the contrast (\code{_stat), and the
#' estimate of log-fold-change (\code{_lfc}). The adjusted p-values are
#' FDR-adjusted based on the Benjamini-Hochberg method, as implemented in
#' \code{\link[stats]{p.adjust}}. The adjustment is done across all p-values
#' for all contrasts calculated.
#' @aliases create_timepoints_contrasts DE_timepoints,Moanin-method
#' @aliases create_timepoints_contrasts,Moanin-method
#' @name DE_timepoints
#' @importFrom edgeR DGEList calcNormFactors
#' @importFrom limma voom lmFit contrasts.fit eBayes
#' @details By default the formula fitted for each gene is
#' \preformatted{
#' ~ Group*Timepoint +0
#' }
#' If the user gives values to \code{add_factors}, then the vector of character
#' values given in \code{add_factors} will be \emph{added} to the default formula.
#' So that \code{add_factors="Replicate"} will change the formula to
#' \preformatted{
#' ~ Group*Timepoint +0 + Replicate
#' }
#' This allows for a small amount of additional complexity to control
#' for other variables. Users should work directly with limma for
#' more complex models.
#' @details If \code{use_voom_weights=TRUE}, the data is given directly to limma
#' via \code{assay(object)}. The specific series of
#' calls is:
#' \preformatted{
#' y <- edgeR::DGEList(counts=assay(object))
#' y <- edgeR::calcNormFactors(y, method="upperquartile")
#' v <- limma::voom(y, design, plot=FALSE)
#' v <- limma::lmFit(v)
#' }
#' @details If the user set \code{log_transform=TRUE} in the creation of the
#' \code{Moanin} object, this will not have an impact in the analysis if
#' \code{use_voom_weights=TRUE}. Only if \code{use_voom_weights=FALSE} will
#' this matter, in which case the log of the input data will be given to a
#' regular call to \code{limma}:
#' \preformatted{
#' y<-get_log_data(object)
#' v <- limma::lmFit(y, design)
#' }
#' @examples
#' data(exampleData)
#' moanin <- create_moanin_model(data=testData, meta=testMeta)
#' # compare groups within each timepoint
#' contrasts <- create_timepoints_contrasts(moanin,"C", "K",
#' type="per_timepoint_group_diff")
#' head(contrasts)
#' deTimepoints=DE_timepoints(moanin,
#' contrasts=contrasts, use_voom_weights=FALSE)
#' head(deTimepoints)
#' # Control for replicate variable:
#' deTimepoints=DE_timepoints(moanin,
#' contrasts=contrasts, add_factors="Replicate",
#' use_voom_weights=FALSE)
#' head(deTimepoints)
#'
#' # compare adjacent timepoints within each group
#' contrastsDiff <- create_timepoints_contrasts(moanin,"C",
#' type="per_group_timepoint_diff")
#' deDiffTimepoints=DE_timepoints(moanin,
#' contrasts=contrastsDiff,
#' use_voom_weights=FALSE)
#' # provide the sets of timepoints to compare:
#' contrastsDiff2<-create_timepoints_contrasts(moanin,"C",
#' timepoints_before=c(72,120),timepoints_after=c(168,168),
#' type="per_group_timepoint_diff")
#' deDiffTimepoints2=DE_timepoints(moanin,
#' contrasts=contrastsDiff2,
#' use_voom_weights=FALSE)
#'
#' # Compare selected timepoints across groups.
#' # This time we also return format="data.frame" which helps us keep track of
#' # the meaning of each contrast.
#' contrastsGroupDiff<-create_timepoints_contrasts(moanin,"C", "K",
#' timepoints_before=c(72,120),timepoints_after=c(168,168),
#' type="group_and_timepoint_diff",format="data.frame")
#' head(contrastsGroupDiff)
#' deGroupDiffTimepoints=DE_timepoints(moanin,
#' contrasts=contrastsGroupDiff$contrasts,
#' use_voom_weights=FALSE)
#' @export
setMethod("DE_timepoints","Moanin",
function(object,
contrasts,add_factors=NULL,
use_voom_weights=TRUE){
designText<-"~WeeklyGroup + 0"
if(!is.null(add_factors)){
designText<-paste(designText,"+",
paste(add_factors,collapse="+"))
}
designFormula<-stats::as.formula(designText)
design <- try(stats::model.matrix(designFormula, data=colData(object)), silent=TRUE)
if( inherits(design, "try-error")){
stop("Error in creating the design matrix. Error:\n",design)
}
cleaned_colnames <- gsub("WeeklyGroup", "", colnames(design))
colnames(design) <- cleaned_colnames
allcontrasts <- limma::makeContrasts(
contrasts=contrasts,
levels=design)
if(use_voom_weights){
y <- edgeR::DGEList(counts=assay(object))
y <- edgeR::calcNormFactors(y, method="upperquartile")
v <- limma::voom(y, design, plot=FALSE)
v <- limma::lmFit(v)
}else{
y<-get_log_data(object)
v <- limma::lmFit(y, design)
}
fit <- limma::contrasts.fit(v, allcontrasts)
fit <- limma::eBayes(fit)
contrast_names <- colnames(fit$p.value)
fit$adj.p.value <- stats::p.adjust(fit$p.value, method="BH")
dim(fit$adj.p.value) <- dim(fit$p.value)
colnames(fit$adj.p.value) <- contrast_names
combine_results <- function(ii, fit2){
contrast_formula <- contrasts[ii]
de_analysis <- data.frame(row.names=row.names(object))
base_colname <- gsub(" ", "", contrast_formula, fixed=TRUE)
colname_pval <- paste(base_colname, "_pval", sep="")
colname_qval <- paste(base_colname, "_qval", sep="")
colname_lfc <- paste(base_colname, "_lfc", sep="")
colname_stat <- paste(base_colname, "_stat", sep="")
de_analysis[colname_pval] <- fit2$p.value[, contrast_formula]
de_analysis[colname_qval] <- fit2$adj.p.value[, contrast_formula]
tt <- limma::topTable(
fit2, coef=ii, number=length(rownames(fit2$coef)),
p.value=1, adjust.method="none", sort.by="none",
genelist=rownames(fit2$coef))
de_analysis[colname_stat] <- tt$t
de_analysis[colname_lfc] <- tt$logFC
return(de_analysis)
}
all_results <- do.call("cbind",
lapply(seq_along(contrast_names),
combine_results, fit2=fit))
return(all_results)
}
)
#' Creates pairwise contrasts for all timepoints
#'
#' @param group1 First group to consider in making contrasts, character value
#' that must match a value of the grouping variable contained in
#' \code{moanin_model}.
#' @param group2 Second group to consider in making contrasts, character value
#' that must match a value of the grouping variable contained in
#' \code{moanin_model}, unless type=="per_group_timepoint_diff", in which case
#' should be NULL (only \code{group1} is used in comparison)
#' @param timepoints vector of timepoints to compare. Must be contained in the
#' \code{time_variable} of the \code{moanin} object.
#' @param timepoints_after for \code{type} equal to
#' \code{"per_group_timepoint_diff"} or, \code{"group_and_timepoint_diff"},
#' the set of timepoints to compare, see details. By default, taken from the
#' \code{timepoints} variable.
#' @param timepoints_before for \code{type} equal to
#' \code{"per_group_timepoint_diff"} or, \code{"group_and_timepoint_diff"},
#' the set of timepoints to compare, see details. By default, taken from the
#' \code{timepoints} variable.
#' @param format the choice of "vector" (the default) for
#' \code{create_timepoints_contrasts} returns just the character vector of
#' contrasts. If instead \code{format="data.frame"} then a data.frame is
#' return that identifies the timepoint and group comparisons involved in each
#' contrast. If this is the desired output, then the input to
#' \code{DE_timepoints} should be the column corresponding to the contrast.
#' See examples.
#' @param type the type of contrasts that should be created. See details.
#' @details \code{create_timepoints_contrasts} creates the needed contrasts for
#' comparing groups or timepoints in the format needed for
#' \code{DE_timepoints} (i.e. \code{\link[limma]{makeContrasts}}), to which the
#' contrasts are ultimately passed. The time points and groups are determined
#' by the levels of the \code{grouping_variable} and the values of
#' \code{time_variable} in the \code{moanin_object} provided by the user.
#' @details Three different types of contrasts are created:
#' \itemize{
#' \item{"per_timepoint_group_diff"}{Contrasts that compare the groups within a
#' timepoint}
#' \item{"per_group_timepoint_diff"}{Contrasts that compare two timepoints
#' within a group}
#' \item{"group_and_timepoint_diff"}{Contrasts that compare the
#' difference between two timepoints between two levels of the
#' \code{group_variable} of the \code{Moanin} object. These are contrasts in
#' the form (TP i - TP (i-1))[Group1] - (TP i - TP (i-1))[Group2].}
#' }
#' @export
#' @return \code{create_timepoints_contrasts}: a character vector with each
#' element of the vector corresponding to a contrast to be compared.
#' @seealso \code{\link[limma]{makeContrasts}}
#' @rdname DE_timepoints
#' @importFrom utils head tail
#' @export
setMethod("create_timepoints_contrasts","Moanin",
function(object, group1, group2=NULL,
type=c("per_timepoint_group_diff","per_group_timepoint_diff",
"group_and_timepoint_diff"),
timepoints=sort(unique(time_variable(object))),
timepoints_before=head(sort(timepoints),-1),
timepoints_after=tail(sort(timepoints),-1),
format=c("vector","data.frame")
){
type<-match.arg(type)
format<-match.arg(format)
if(type=="per_timepoint_group_diff"){
if(is.null(group2))
stop("cannot choose type='per_timepoint_group_diff' and give a NULL value for argument `group2`")
if(!all(timepoints%in% time_variable(object)))
stop("timepoints must consist only of timepoints in the time_variable of Moanin object")
contrasts<-pertimepoint_contrast(object=object, group1=group1,
group2=group2,timepoints=timepoints)
}
if(type=="group_and_timepoint_diff"){
if(is.null(group2))
stop("cannot choose type='group_and_timepoint_diff' and give a NULL value for argument `group2`")
if(!all(timepoints_before %in% time_variable(object)))
stop("timepoints_before must consist only of timepoints in the time_variable of Moanin object")
if(!all(timepoints_after %in% time_variable(object)))
stop("timepoints_after must consist only of timepoints in the time_variable of Moanin object")
contrasts<-timepointdiff_contrasts(object=object, group1=group1,
group2=group2, timepoints_before=timepoints_before,
timepoints_after=timepoints_after)
}
if(type=="per_group_timepoint_diff"){
if(!is.null(group2))
stop("cannot choose type='per_group_timepoint_diff' and give a value for argument `group2`")
contrasts<-timepointdiff_contrasts(object=object, group1=group1,
group2=NULL, timepoints_before=timepoints_before,
timepoints_after=timepoints_after)
}
if(format=="vector") return(contrasts$contrasts)
else{
return(contrasts)
}
})
pertimepoint_contrast<-function(object, group1, group2,
timepoints){
object <- object[,group_variable(object) %in% c(group1, group2)]
all_timepoints <- sort(unique(time_variable(object)))
timepoints<-timepoints[.which_timepoints(timepoints,all_timepoints,argname="timepoints")]
contrasts <- rep(NA, length(timepoints))
msg<-""
foundMissing<-FALSE
for(i in seq_along(timepoints)){
# First, check that the two conditions have been sampled for this
# timepoint
timepoint <- timepoints[i]
submeta <- object[,time_variable(object) == timepoint]
if(length(unique(time_by_group_variable(submeta))) == 2){
contrasts[i] <- paste0(group1, ".", timepoint, "-", group2, ".",
timepoint)
}else if(length(unique(time_by_group_variable(submeta))) == 1){
if(unique(group_variable(submeta))[1] == group1){
missing_condition <- group2
}else{
missing_condition <- group1
}
msg <- paste0(msg,paste("timepoint",
timepoint, "is missing in condition",
missing_condition,"\n"))
foundMissing<-TRUE
}
}
if(foundMissing) warning(msg)
timepoints<-timepoints[!is.na(contrasts)]
contrasts<-contrasts[!is.na(contrasts)]
return(data.frame("contrasts"=contrasts,"timepoints"=as.character(timepoints),"group"=paste0(group1,"-",group2)))
}
.which_timepoints<-function(timepoints, possibles,argname){
wh<-which(timepoints%in% possibles)
if(!all(timepoints%in% possibles)){
if(length(wh)>0){
warning("removing timepoints in ",argname," not measured for these groups\n")
}
else{
stop("None of the requested timepoints measured for these groups")
}
}
return(wh)
}
timepointdiff_contrasts<-function(object, group1, group2,
timepoints_before=NULL,timepoints_after=NULL){
object <- object[,group_variable(object) %in% c(group1, group2)]
all_timepoints <- sort(unique(time_variable(object)))
### Checks for timepoints
if((is.null(timepoints_before) & !is.null(timepoints_after)) ||
(!is.null(timepoints_before) & is.null(timepoints_after))){
stop("either timepoints_before and timepoints_after must be given, or both must be NULL")
}
if(is.null(timepoints_before)){
timepoints_before<-head(all_timepoints,-1)
timepoints_after<-tail(all_timepoints,-1)
}
wh_before<-.which_timepoints(timepoints_before,all_timepoints,"timepoints_before")
wh_after<-.which_timepoints(timepoints_after,all_timepoints,"timepoints_after")
wh<-intersect(wh_before,wh_after)
timepoints_before<-timepoints_before[wh]
timepoints_after<-timepoints_after[wh]
contrasts <- rep(NA, length(timepoints_before))
# Will give a tally of timepoint pairs can't do
msg<-""
foundMissing<-FALSE
for(i in seq_along(timepoints_before)){
tpbefore <- timepoints_before[i]
tpafter<-timepoints_after[i]
# First, check that the two conditions have been sampled
# for both timepoints
# Could do all at once, but not worth effort
combos<-expand.grid(tp=c(tpbefore,tpafter),
groups=c(group1,group2))
npercombo<-sapply(1:nrow(combos),function(i){
tp<-combos[i,1]
gp<-as.character(combos[i,2])
sum(time_variable(object)==tp & group_variable(object)==gp)
})
if(any(npercombo==0)){
msg <- paste(msg,"Cannot compare",tpbefore,"and",tpafter,
"because one of the timepoints is missing in one of the conditions.\n")
foundMissing<-TRUE
}else{
if(!is.null(group2))
contrasts[i] <- paste0(group1, ".", tpafter, "-", group1, ".", tpbefore,"-",group2,".",tpafter,"+",group2,".",tpbefore)
else
contrasts[i] <- paste0(group1, ".", tpafter, "-", group1, ".", tpbefore)
}
}
if(foundMissing) warning(msg)
timepoints_before<-timepoints_before[!is.na(contrasts)]
timepoints_after<-timepoints_after[!is.na(contrasts)]
if(!is.null(group2)) group<-paste0(group1,"-",group2) else group<-group1
contrasts<-contrasts[!is.na(contrasts)]
timepoints<-paste0(timepoints_after,"-",timepoints_before)
return(data.frame("contrasts"=contrasts,"timepoints"=as.character(timepoints),"group"=group))
}
#' Creates barplot of results of per-timepoint comparison
#'
#' @param de_results results from \code{\link{DE_timepoints}}
#' @param type type of p-value to count ("qval" or "pval")
#' @param labels labels to give each bar
#' @param threshold cutoff for counting gene as DE
#' @param xlab x-axis label
#' @param ylab y-axis label
#' @param main title of plot
#' @param ... arguments passed to \code{\link{barplot}}
#' @details \code{create_timepoints_contrasts} creates the needed contrasts for
#' comparing two groups for every timepoint in the format needed for
#' \code{DE_timepoints} (i.e. \code{\link[limma]{makeContrasts}}, to which the
#' contrasts are ultimately passed). The time points are determined by the
#' meta data in the \code{moanin_object} provided by the user.
#' @return This is a plotting function, and returns (invisibly) the results of
#' \code{\link{barplot}}
#' @aliases perWeek_barplot
#' @examples
#' data(exampleData)
#' moanin <- create_moanin_model(data=testData, meta=testMeta)
#' contrasts <- create_timepoints_contrasts(moanin, "C", "K")
#' deTimepoints <- DE_timepoints(moanin,
#' contrasts=contrasts, use_voom_weights=FALSE)
#' perWeek_barplot(deTimepoints)
#' @export
perWeek_barplot <- function(de_results, type=c("qval","pval"),
labels=NULL, threshold=0.05,
xlab="Timepoint", ylab="Number of DE genes", main="",
...){
type <- match.arg(type)
qval_colnames <- colnames(de_results)[
grepl(type, colnames(de_results))]
if(is.null(labels)){
stringReplace <- paste0("_",type)
labels <- vapply(
strsplit(gsub(stringReplace, "", qval_colnames), "\\."),
FUN=function(x){.subset2(x, 3)},
character(1))
}
number_de_genes_per_time <- matrixStats::colSums2(
de_results[, qval_colnames] < threshold)
graphics::barplot(number_de_genes_per_time,
names.arg=labels, xlab=xlab,ylab=ylab,
main=main, ...)
}
|
605faff7850147966c0445bc6e7c58486dfa43f3 | 2b8d6d163000406d88b0f3dc0db64a9c740e4151 | /projectver3.R | 86682e8de26d41eb891a10304b59fafa3a4f97df | [] | no_license | ridhideo14/titanic_545 | 2c2f1c77ae6efc33e91ae599204ca3ee1b03f8f8 | b22625f1480da141a33e41a1561d7004680602ef | refs/heads/master | 2021-08-29T12:30:13.618999 | 2017-12-14T01:10:49 | 2017-12-14T01:10:49 | 108,551,637 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,130 | r | projectver3.R | mydata = read.csv("train.csv")
mydata
names<-mydata[,4]
#names_characterized<-as.character(names[800])
#splitted_names<-strsplit(names_characterized,",")
splitted_names<-c()
for(i in 1:length(names)){
names_characterized<-as.character(names[i])
#print(names_characterized)
splitted_name<-strsplit(names_characterized,",")
print(splitted_name)
list5<-splitted_name[[1]]
list6<-list5[1]
splitted_names[i]<-list6
print(list6)
splitted_names[i]
}
names_characterized<-as.character(names[40])
splitted_name<-strsplit(names_characterized,",")
list5<-splitted_name[[1]]
list6<-list5[1]
splitted_names[40]<-list6
splitted_names[40]
str(mydata)
library('ggplot2') # visualization
library('tidyverse')
dim(mydata)
str(mydata)
summary(mydata)
sum(mydata$Cabin=="")
#people with same ticket are from same group
mydata<-mutate(mydata, grp=rep(0,nrow(mydata)))#grp number, 0 =>single
uniq_ticket<-table(mydata$Ticket)
not_single<-uniq_ticket[uniq_ticket>1]
for(ii in 1:length(not_single)){
this_ticket<-names(not_single)[ii]
tickt_index<-which(mydata$Ticket==this_ticket)
mydata$grp[tickt_index]<-ii
}
mydata %>% group_by(grp) %>% summarise(sur=mean(Survived))->grp_stats
#extract title
mydata$Title <- gsub('(.*, )|(\\..*)', '', mydata$Name)
table(mydata$Title)
rare_title <- c('Dona', 'Lady', 'the Countess','Capt', 'Col', 'Don',
'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer')
mydata$Title[mydata$Title == 'Mlle'] <- 'Miss'
mydata$Title[mydata$Title == 'Ms'] <- 'Miss'
mydata$Title[mydata$Title == 'Mme'] <- 'Mrs'
mydata$Title[mydata$Title %in% rare_title] <- 'Rare Title'
Title_Miss<-which(mydata$Title=='Miss')
Title_Mr<-which(mydata$Title=='Mr')
Title_Mrs<-which(mydata$Title=='Mrs')
Title_Master<-which(mydata$Title=='Master')
Title_Rare<-which(mydata$Title=='Rare Title')
Age_miss<-mydata$Age[Title_Miss]
mean_Age_miss<-mean(Age_miss, na.rm = TRUE)
Age_mr<-mydata$Age[Title_Mr]
mean_Age_mr<-mean(Age_mr, na.rm = TRUE)
Age_mrs<-mydata$Age[Title_Mrs]
mean_Age_mrs<-mean(Age_mrs, na.rm = TRUE)
Age_master<-mydata$Age[Title_Master]
mean_Age_master<-mean(Age_master, na.rm = TRUE)
Age_rare<-mydata$Age[Title_Rare]
mean_Age_rare<-mean(Age_rare, na.rm = TRUE)
Age_missing<-which(is.na(mydata$Age))
mydata$Title[Age_missing[1]]
for(i in 1:length(Age_missing)){
Title1<-mydata$Title[Age_missing[i]]
if(Title1=='Mr'){
mydata$Age[Age_missing[i]]<-mean_Age_mr
}
if(Title1=='Miss'){
mydata$Age[Age_missing[i]]<-mean_Age_miss
}
if(Title1=='Mrs'){
mydata$Age[Age_missing[i]]<-mean_Age_mrs
}
if(Title1=='Rare Title'){
mydata$Age[Age_missing[i]]<-mean_Age_rare
}
if(Title1=='Master'){
mydata$Age[Age_missing[i]]<-mean_Age_master
}
}
library(ggplot2)
#plot(mydata$Age, mydata$Survived, data = mydata)
ggplot(mydata, aes(Sex,fill = factor(Survived))) +
geom_histogram(stat = "count")
ggplot(mydata, aes(Pclass,fill = factor(Survived))) +
geom_histogram(stat = "count")
## Random Forest
Attributes<-as.matrix(c(mydata$Pclass,mydata$Sex, mydata$Age, mydata$SibSp,mydata$Parch, mydata$Fare, mydata$Embarked))
Attributes<-c("Pclass","Sex", "Age", "SibSp","Parch","Fare", "Embarked")
Attribute_index<-c(3,5,6,7,8,10,12)
names(Attribute_index)<-Attributes
Attributes[,1]
random_number<-as.vector(sample(1:7,size=2))
random_number
feature1<-c()
for(i in 1:length(random_number)){
rand<-random_number[i]
feature<-Attributes[rand]
feature1<-c(feature1,feature)
entropy_value1<-entropy(mydata[,Attribute_index[random_number[1]]])
entropy_value2<-entropy(mydata[,Attribute_index[random_number[2]]])
if(entropy_value1>entropy_value2){
best_node<-mydata[,Attribute_index[random_number[1]]]
}
else
{
best_node<-mydata[,Attribute_index[random_number[2]]]
}
}
for(i in 1:length(Attributes)){
entropy_list<-c(entropy_list, entropy_value)
}
# for(i in 1:100){
# random_number<-sample(1:7,size=2)
# Attributes[,random_number[1]]
# }
|
8e7c34e859bb697c37486f215ebe02ece2e5e7c8 | a8def44ca5bb9d5a1435d3d853df90b28ac9c44b | /R/Training/dataframe/subset.R | fbf4bbd1ed6831e842c37e59fc78740bc57f4158 | [] | no_license | edyscom/dataviz | 706a15d001bb2da0de9f236cd99df5bcd147ddfe | 63acf2f045c01b057bf6ba698100138360b3c04f | refs/heads/main | 2023-08-23T01:57:28.191324 | 2021-10-26T06:45:28 | 2021-10-26T06:45:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 144 | r | subset.R |
mat <- read.table("input.tab",header=T,sep="\t")
mat
col1 <- "colA"
col2 <- "colB"
sub <- subset(mat,subset=(mat[col1] == mat[col2]))
sub
|
621e20360c576567c2a0caeafe873ae813b22522 | 99c940c76b96b7a3577aac3dc70226aadf6f9935 | /step8-funcs.R | 85858d0e1d614c763d2b6486bfa387c700a2b08e | [] | no_license | tanpuekai/res-per-platform-on-toshiba | e22e036ef9967f45630e98a8ccc4f27149c72ab8 | 6baf27b0afcd64b9529a6185c19856240ba77670 | refs/heads/master | 2016-09-06T18:14:07.748981 | 2015-07-02T03:06:21 | 2015-07-02T03:06:21 | 38,408,037 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,060 | r | step8-funcs.R | library(ROCR)
library(randomForest)
############################
rocauc<-function(model,dattest,flag.plot=F){
temp1<-predict(model,type="prob",newdata=as.matrix(dattest[,-1,drop=F]))
heldout.rf.pr = temp1[,2]
heldout.rf.pred = prediction(heldout.rf.pr, dattest[,1])
heldout.rf.perf = performance(heldout.rf.pred,"tpr","fpr")
if(flag.plot==T){
plot(heldout.rf.perf,main=c(k,i),col=2,lwd=2)
abline(a=0,b=1,lwd=2,lty=2,col="gray")
}
perf <- performance(heldout.rf.pred,"auc")
auc <-unlist(slot(perf , "y.values"))
print(auc)
return(auc)
}
############################
generate.set<-function(file.str){
print(file.exists(file.str))
load(file.str)
s1<-apply(is.na(DegMat),1,sum)
s2<-apply(is.na(MeanMat),1,sum)
s3<-apply(is.na(VarMat),1,sum)
ind.1<-which(s1< 1/2*ncol(MeanMat) & s2< 1/2*ncol(MeanMat) & s3< 1/2*ncol(MeanMat))
temp.set<-data.frame(uniqGene,DegMat,MeanMat,VarMat)#[ind.1,]
trainset<-temp.set[ind.1,]
names(trainset)[1]<-"genes"
return(trainset)
}
############################
|
a5b4a3b6d4c25aeafe1537545891488a3e600638 | 4a284872a3105b96d51bd724925a473cc6c11f4d | /man/pileupMT.Rd | dadc30e6ca16d21cd9313152fb26bfa4818bd426 | [] | no_license | heoly32/MTseeker | 601872f0f9963cea7ba14fc8401724ffa3e0aa16 | b41db73742a240931c86ae8bcb614b8f44fc81a2 | refs/heads/master | 2023-02-23T01:13:46.723190 | 2021-03-04T22:33:00 | 2021-03-04T22:33:00 | 172,066,324 | 0 | 0 | null | 2019-02-22T12:55:40 | 2019-02-22T12:55:40 | null | UTF-8 | R | false | true | 1,428 | rd | pileupMT.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pileupMT.R
\name{pileupMT}
\alias{pileupMT}
\title{Pileup the mitochondrial reads in a BAM, for variant calling or CN purposes.}
\usage{
pileupMT(
bam,
sbp = NULL,
pup = NULL,
callIndels = TRUE,
ref = c("rCRS", "GRCh37", "GRCh38", "hg38", "GRCm38", "C57BL/6J", "NC_005089",
"mm10"),
...
)
}
\arguments{
\item{bam}{BAM (must be indexed!) file name or object with @bam slot}
\item{sbp}{optional ScanBamParam object (autogenerated if missing)}
\item{pup}{optional PileupParam object (autogenerated if missing)}
\item{callIndels}{call indels? (This can reduce compute and RAM if FALSE.)}
\item{ref}{aligned reference mitogenome (default is rCRS/GRCh37+)}
\item{...}{additional args to pass on to the pileup() function}
}
\value{
\preformatted{ an MVRanges object, constructed from pileup results
}
}
\description{
If a BAM filename is given, but no ScanBamParam, scanMT(bam) will be called.
Human mitochondrial genomes (GRCh37+ and hg38+) are fully supported, mouse
mitochondrial genomes (C57BL/6J aka NC_005089) are only somewhat supported.
}
\examples{
library(MTseekerData)
BAMdir <- system.file("extdata", "BAMs", package="MTseekerData")
patientBAMs <- list.files(BAMdir, pattern="^pt.*.bam$")
(bam <- file.path(BAMdir, patientBAMs[1]))
(sbp <- scanMT(bam))
(mvr <- pileupMT(bam, sbp=sbp, callIndels=TRUE, ref="rCRS"))
}
|
2d6bbf8d11973a0bfdb05be183f734e828254572 | 1fbb41fd4b995739d0151d7bd7c30766ab62e7d1 | /plot1.R | 2dfe8495e83a6d46cf599cb1b385caf109f857d7 | [] | no_license | cyrussafaie/ExData_Plotting1 | 8681b303ea08e743c163937c5f5b634230d26974 | e1e7687996a1cc6afcead39fa9b170aadf9ce827 | refs/heads/master | 2021-01-24T00:54:51.464257 | 2016-06-19T04:06:13 | 2016-06-19T04:06:13 | 61,257,617 | 0 | 0 | null | 2016-06-16T03:00:34 | 2016-06-16T03:00:33 | null | UTF-8 | R | false | false | 492 | r | plot1.R | hh_inc=read.table("household_power_consumption.txt", sep = ";",header = T,dec = ".",stringsAsFactors = F)
str(hh_inc)
hh_inc_sub <- hh_inc[hh_inc$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(hh_inc_sub$Date, hh_inc_sub$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
hh_inc=cbind(datetime,hh_inc_sub)
png("plot1.png", width=480, height=480)
hist(as.numeric(hh_inc$Global_active_power), col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
ca94a0be71c3a714a1c0e6cd085fd987e096852a | 02637adf9f44155963e4ede2ca0ac54caaac732e | /man/cohens.d.Rd | 5d9d84bef21bc3ed44a864e05302311c33004c67 | [] | no_license | rikhuijzer/codex | c40dac077935955036cffad3c675a9ce8438e1aa | c4e12f2bf450506cc4886426aad94ca1bad9f319 | refs/heads/master | 2022-09-27T14:52:02.226295 | 2020-05-31T16:59:42 | 2020-05-31T16:59:42 | 266,567,416 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 456 | rd | cohens.d.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/math.R
\name{cohens.d}
\alias{cohens.d}
\title{Returns Cohen's d}
\usage{
cohens.d(n1, n2, m1, m2, s1, s2)
}
\arguments{
\item{n1}{Sample size for group 1.}
\item{n2}{Sample size for group 2.}
\item{m1}{Mean for group 1.}
\item{m2}{Mean for group 2.}
\item{s1}{Standard deviation for group 1.}
\item{s2}{Standard deviation for group 2.}
}
\description{
Returns Cohen's d
}
|
1752889c791fd53490882896d7f4c9729ca03482 | 1689e9c39fb03adc170c41baff027560de8a343e | /man/GEOtop_CheckHydroBudget.Rd | 7a0e9b6382519078049847934321ecd0c217f249 | [] | no_license | ecor/AnalyseGEOtop | 00d91a7892d361c6ff3691643f735375db4ca89c | c69b3a91389b88c40529a1102fe1e8010f58191e | refs/heads/master | 2021-01-16T18:51:08.273744 | 2016-06-06T22:35:35 | 2016-06-06T22:35:35 | 59,219,388 | 1 | 0 | null | 2016-05-19T15:31:37 | 2016-05-19T15:31:37 | null | UTF-8 | R | false | true | 1,828 | rd | GEOtop_CheckHydroBudget.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GEOtop_CheckHydroBudget.R
\name{GEOtop_CheckHydroBudget}
\alias{GEOtop_CheckHydroBudget}
\title{Check the hydrological budget of a GEOtop 3d simulation}
\usage{
GEOtop_CheckHydroBudget(wpath, Q_obs, Q_obs_data, soil_files,
list_file = "listpoints.txt")
}
\arguments{
\item{wpath}{working path, pointing into simulation folder}
\item{Q_obs}{character describing if and in which time scale observed discharge data is provided. "hour": hourly data; "day": daily data;"n": no observed discharge data prvided}
\item{Q_obs_data}{zoo object, observed discharge data in m^3/s}
\item{soil_files}{boolean, TRUE: soil files are provided as GEOtop input. FALSE: soil is parameterized in the geotop.inpts file}
\item{list_file}{character, name of the listpoint file defining GEOtop output points, if not available: list_file = NULL, information is read from geotop.inpts}
}
\value{
PDF files containing specific analyse plots:
\item{Ppartitioning.pdf}{Areal precipitation amounts and partitioning in the components snow and rain, first glew on discharge}
\item{QsimVSQobs.pdf}{Simulated versus observed discharge, hourly - daily - monhly aggregations and GOFs}
\item{WaterBudget.pdf}{Analytic plot on the water budget of the simulation}
}
\description{
Comparison of simulated and observed runoff (if provided). Checking the hydrological budget of the simulation (dS/dt = P - Q - ET).
}
\examples{
### TO DO
}
\author{
Johannes Brenner
}
\references{
Endrizzi, S., Gruber, S., Amico, M. D., & Rigon, R. (2014). \strong{GEOtop 2.0 : simulating the combined energy and water balance at and below the land surface accounting for soil freezing , snow cover and terrain effects.} \emph{Geosci. Model Dev., 7, 2831-2857}. doi:10.5194/gmd-7-2831-2014
}
|
461375fe3b002c1043add6cf94341091705c0401 | cb54fbf79c8ddb2c1d2a4fa2404d2e95faa61db3 | /Solution_3.4.R | 0096678a92b64f5f248871f14dd033fd40966bef | [] | no_license | abhay30495/CASE-STUDY-Healthcare-Org | 139cdc4714fc752cd779c5ee986e811ecbabe4a9 | 1838d9e1ad3d11000e357ad3fe1b8108097270c4 | refs/heads/master | 2020-05-19T03:36:55.563598 | 2019-05-03T20:03:14 | 2019-05-03T20:03:14 | 184,806,122 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,110 | r | Solution_3.4.R | ## Question 3
> View(diabetes)
> diabetes_1=diabetes[,c(9,1,2,10,11,12,13,14)] ##Making different table for the below operation
> plot(diabetes_1,main="pairwise scatter plot") ##Pair wise scatter plot
> round(cor(diabetes_1),3)
NDD AGE SEX Height Weight BMI HC WHR
NDD 1.000 0.208 0.120 NA NA NA NA NA ##Age is strongly co-related
AGE 0.208 1.000 0.126 NA NA NA NA NA
SEX 0.120 0.126 1.000 NA NA NA NA NA
Height NA NA NA 1 NA NA NA NA
Weight NA NA NA NA 1 NA NA NA
BMI NA NA NA NA NA 1 NA NA
HC NA NA NA NA NA NA 1 NA
WHR NA NA NA NA NA NA NA 1
> model=lm(NDD~.,data = diabetes_1) ##forming linear model
> model
Call:
lm(formula = NDD ~ ., data = diabetes_1)
Coefficients:
(Intercept) AGE SEX Height Weight BMI HC
-2.086323 0.007247 0.018474 0.012324 -0.008321 0.007888 0.002353
WHR
0.307130
> summary(model)
Call:
lm(formula = NDD ~ ., data = diabetes_1)
Residuals:
Min 1Q Median 3Q Max
-0.7600 -0.4008 -0.2485 0.5295 0.9486
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) -2.086323 1.872833 -1.114 0.266
AGE 0.007247 0.001375 5.270 1.72e-07 *** ##significant
SEX 0.018474 0.036968 0.500 0.617
Height 0.012324 0.011893 1.036 0.300
Weight -0.008321 0.013606 -0.612 0.541
BMI 0.007888 0.034727 0.227 0.820
HC 0.002353 0.001877 1.254 0.210
WHR 0.307130 0.214785 1.430 0.153
---
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
Residual standard error: 0.4768 on 871 degrees of freedom
(42 observations deleted due to missingness)
Multiple R-squared: 0.06537, Adjusted R-squared: 0.05786
F-statistic: 8.703 on 7 and 871 DF, p-value: 2.332e-10
## from summary we see age is strongly related to NDD, and no other variables are significant
#############################################################################################
##Question 4
> View(diabetes)
> View(diabetes_1)
> diabetes_1=diabetes[c(1:133),c(3,8,9)]
> View(diabetes_1)
> library(caTools)
> data=sample.split(diabetes_1,SplitRatio = 0.8)
> train=subset(diabetes_1,data=="TRUE")
##Warning message:
##Length of logical index must be 1 or 133, not 3
> test=subset(diabetes_1,data=="FALSE")
##Warning message:
##Length of logical index must be 1 or 133, not 3
> View(train)
> View(test)
##############################################################################################
##Checking consistency for both the variable, FBS & PPBS1
> library(caret)
> model=glm(NDD~.,train, family="binomial")
> model
Call: glm(formula = NDD ~ ., family = "binomial", data = train)
Coefficients:
(Intercept) FBS PPBS1
-24.7263 0.1157 0.0700
Degrees of Freedom: 87 Total (i.e. Null); 85 Residual
(1 observation deleted due to missingness)
Null Deviance: 106.8
Residual Deviance: 16.56 AIC: 22.56
> prediction=predict(model, test, type="response")
> prediction
> table(test$NDD,prediction>0.5)
FALSE TRUE
0 12 4
1 0 28
> (12+28)/(12+28+4)
[1] 0.9090909
##############################################################################################
##Checking consistency for FBS variable
> View(train)
> View(test)
> model_1=glm(NDD~FBS,train, family = "binomial")
Warning message:
glm.fit: fitted probabilities numerically 0 or 1 occurred
> model_1
Call: glm(formula = NDD ~ FBS, family = "binomial", data = train)
Coefficients:
(Intercept) FBS
-12.7891 0.1195
Degrees of Freedom: 88 Total (i.e. Null); 87 Residual
Null Deviance: 107.5
Residual Deviance: 46.36 AIC: 50.36
> prediction=predict(model_1, test, type="response")
> prediction
> table(test$NDD,prediction>0.5)
FALSE TRUE
0 8 8
1 2 26
> (8+26)/(8+26+2+8)
[1] 0.7727273 ## lower accuracy from the previous case
##############################################################################################
##Checking consistency for PPBS1 Variable
> model_2=glm(NDD~PPBS1,train, family = "binomial")
##Warning message:
##glm.fit: fitted probabilities numerically 0 or 1 occurred
> model_2
Call: glm(formula = NDD ~ PPBS1, family = "binomial", data = train)
Coefficients:
(Intercept) PPBS1
-11.40871 0.06929
Degrees of Freedom: 87 Total (i.e. Null); 86 Residual
(1 observation deleted due to missingness)
Null Deviance: 106.8
Residual Deviance: 28.57 AIC: 32.57
> prediction=predict(model_2, test, type="response")
> table(test$NDD,prediction>0.5)
FALSE TRUE
0 13 3
1 1 27
> (13+27)/(13+3+1+27)
[1] 0.9090909
## PPBS1 is having the highest accuracy of all the above mentioned ways.
##Hence, PPBS1 is alone capable of predicting the diabetes.
|
aa1cddb0142d2a84e5190873765fcae44490134e | 29585dff702209dd446c0ab52ceea046c58e384e | /QuantumClone/R/plots.R | 074fc89e49472dfb32a16a0f5a72aadadf094af9 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,576 | r | plots.R | #'Plot with margin densities
#'
#'Adapted from http://stackoverflow.com/questions/11883844/inserting-a-table-under-the-legend-in-a-ggplot2-histogram
#'Uses gridExtra package
#' @param QClone_Output Output from QuantumClone algorithm
#' @keywords Plot Densities
#' @export plot_with_margins_densities
#' @examples
#' require(ggplot2)
#' require(gridExtra)
#' message("Using preclustered data:")
#' QC_out<-QuantumClone::QC_output
#' plot_with_margins_densities(QC_out)
#' @importFrom gridExtra grid.arrange
#'
#'
plot_with_margins_densities<-function(QClone_Output){
if(length(QClone_Output$filtered.data)!=2){
stop("This function can only take 2 samples at a time.")
}
sq<-floor(sqrt(max(QClone_Output$cluster)))+1
main<-ggplot2::qplot(x=QClone_Output$filtered.data[[1]]$Cellularity,y=QClone_Output$filtered.data[[2]]$Cellularity,color=as.character(QClone_Output$cluster),
xlab="Cellularity diag",ylab="Cellulariy relapse",xlim=c(0,1),ylim=c(0,1))+ggplot2::theme_bw()+ggplot2::scale_color_discrete(guide = ggplot2::guide_legend(title="Cluster",ncol=sq))
top<-ggplot2::ggplot(QClone_Output$filtered.data[[1]],
ggplot2::aes_string("Cellularity"))+ggplot2::geom_density(alpha=.5)+ggplot2::theme_bw()+ggplot2::theme(legend.position="none",
axis.title.x=ggplot2::element_blank())
right<-ggplot2::ggplot(QClone_Output$filtered.data[[2]],
ggplot2::aes_string("Cellularity"))+ggplot2::geom_density(alpha=.5)+ggplot2::coord_flip()+ggplot2::theme_bw()+ggplot2::theme(legend.position="none",
axis.title.y=ggplot2::element_blank())
tmp <- ggplot2::ggplot_gtable(ggplot2::ggplot_build(main))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(gridExtra::grid.arrange(top,
legend,
main+ggplot2::theme(legend.position="none"),
right,
ncol=2, nrow=2, widths=c(4, 1), heights=c(1, 4)))
}
#' Plot QC_output
#'
#' This function was implemented to re-plot easily the diagrams of clonality for changes/enhancement.
#' Returns a ggplot object
#' Uses ggplot2 package
#' @param QClone_Output Output from QuantumClone algorithm
#' @param simulated Was the data generated by QuantumCat?
#' @param sample_selected : number of the sample to be considered for plot (can be 1 or 2 samples)
#' @param Sample_names : character vector of the names of each sample (in the same order as the data)
#' @keywords Plot
#' @export plot_QC_out
#' @examples
#' require(ggplot2)
#' message("Using preclustered data:")
#' QC_out<-QuantumClone::QC_output
#' plot_QC_out(QC_out)
#'
plot_QC_out<-function(QClone_Output,Sample_names=NULL, simulated = FALSE,sample_selected = 1:2){
Cell <- QClone_Output$filtered.data
M<-max(as.numeric(as.character(QClone_Output$cluster)))
cluster<-factor(QClone_Output$cluster)
if(is.null(Sample_names)){
Sample_names<-unlist(lapply(X = QClone_Output$filtered.data,FUN = function(df){
df[1,1]
}))
}
if(length(sample_selected)==2){
result<-list()
if(!simulated){
q<-ggplot2::qplot(x=Cell[[sample_selected[1]]]$Cellularity,y=Cell[[sample_selected[2]]]$Cellularity, asp = 1,main=paste('Cellular prevalence',Sample_names[sample_selected[1]],Sample_names[sample_selected[2]]),
xlab=paste('Cellular prevalence',Sample_names[sample_selected[1]]),ylab=paste('Cellular prevalence',Sample_names[sample_selected[2]]),
colour = cluster)+ggplot2::scale_colour_discrete(name='Clone')+ggplot2::coord_cartesian(xlim=c(0,1),ylim=c(0,1))+ggplot2::theme_bw()
}
else{
q<-ggplot2::qplot(x=Cell[[sample_selected[1]]]$Cellularity,y=Cell[[sample_selected[2]]]$Cellularity, asp = 1,main=paste('Cellular prevalence plot',Sample_names[sample_selected[1]],Sample_names[sample_selected[2]]),
xlab=paste('Cellular prevalence',Sample_names[sample_selected[1]]),ylab=paste('Cellular prevalence',Sample_names[sample_selected[2]]),
colour = cluster,
shape=factor(Cell[[sample_selected[1]]]$Chr))+ggplot2::theme_bw()+ggplot2::scale_shape_discrete(factor(1:max(Cell[[sample_selected[1]]][,'Chr'])),
name='Clone \n(simulated)')+ggplot2::scale_colour_discrete(name='Cluster')+ggplot2::coord_cartesian(xlim=c(0,1),ylim=c(0,1))+ggplot2::theme_bw()
}
return(q)
}
else if(length(sample_selected)==1){
if(!simulated){
result<-ggplot2::qplot(x=Cell[[sample_selected[1]]]$Cellularity, y=jitter(rep(0.5,times=length(Cell[[sample_selected[1]]]$Cellularity)),factor = 5) , asp = 1,main=paste('Cellular prevalence',Sample_names[sample_selected[1]]),
xlab=paste('cellularity',Sample_names[sample_selected[1]]),ylab='',
colour = cluster)+ggplot2::scale_colour_discrete(name='Clone')+ggplot2::coord_cartesian(xlim=c(0,1),ylim=c(0,1))+ggplot2::theme_bw()+ggplot2::theme(axis.line.y=ggplot2::element_blank(),
axis.ticks.y=ggplot2::element_blank(),
panel.background = ggplot2::element_blank(),
axis.text.y = ggplot2::element_blank())
}
else{
result<-ggplot2::qplot(x=Cell[[sample_selected[1]]],y=jitter(rep(0.5,times=length(Cell[[sample_selected[1]]]$Cellularity)),factor = 5), asp = 1,main=paste('Cellular prevalence',Sample_names[sample_selected[1]]),
xlab=paste('Cellular prevalence',Sample_names[sample_selected[1]]),ylab='',
colour = cluster)+ggplot2::scale_colour_discrete(name='Cluster')+ggplot2::coord_cartesian(xlim=c(0,1),ylim=c(0,1))+ggplot2::scale_shape_discrete(factor(1:max(Cell[[1]][,'Chr'])),
name='Clone \n(simulated)')+ggplot2::theme_bw()+ggplot2::theme(axis.line.y=ggplot2::element_blank(),
axis.ticks.y=ggplot2::element_blank(),
panel.background = ggplot2::element_blank(),
axis.text.y = ggplot2::element_blank())
}
}
else{
stop("Number of samples can only be 1 or 2 for this function.Use sample_selected parameter.")
}
return(result)
}
#' Evolution plot
#'
#' Plots evolution in time of clones
#' @param QC_out : Output from One_step_clustering
#' @param Sample_names : character vector of the names of each sample (in the same order as the data)
#' @export evolution_plot
#' @examples
#' require(ggplot2)
evolution_plot<-function(QC_out,Sample_names=NULL){
L<-length(QC_out$EM.output$centers)
if(is.null(Sample_names)){
warning(paste("Samples_names is empty, will use 1 to",L))
Sample_names<-1:L
}
x<-character()
y<-numeric()
col<-rep(1:length(unique(QC_out$cluster)),times = L)
col<-as.factor(col)
clone_width<-sapply(col,FUN = function(z){
sum(as.factor(QC_out$cluster)==z)/length(QC_out$cluster)
})
for(i in 1:L){
y<-c(y,QC_out$EM.output$centers[[i]])
x<-c(x,rep(Sample_names[i],times = length(QC_out$EM.output$centers[[i]])))
}
df<-data.frame(row.names = 1:length(x))
df$x<-x
df$y<-y
df$col<-col
df$width<-clone_width
# q<-ggplot2::qplot(data = df,
# x= x,
# y= y,
# colour = col,
# xlab ="Sample",
# ylab = "Cellularity",geom = "line")+ggplot2::theme_bw()+ggplot2::scale_colour_discrete("Clone")
q<-ggplot2::ggplot(df,ggplot2::aes_string(x ="x",y="y",
group ="col",
color = "col",
size = "width"),
xlab = "Sample",
ylab = "Cellularity")+
ggplot2::geom_line()+
ggplot2::scale_color_discrete("Clone")+
ggplot2::scale_size("Fraction of mutations",range = c(0.5,3))+
ggplot2::xlab("Sample")+
ggplot2::ylab("Cellularity")+
ggplot2::theme_bw()
return(q)
}
#' Plots multiple trees
#'
#' Plots all trees created by the function Tree_generation. The red line means that mutations occured.
#' @param result_list List of lists (tree generated and the probability associated with each tree)
#' @param d Number of clusters found by QuantumClone
#' @param cex Coefficient of expansion for the texts in phylogenetic tree plots. Default is 0.8
#' @export
#' @keywords Clonal inference phylogeny
#' @examples multiplot_trees(QuantumClone::Tree, d= 4)
multiplot_trees<-function(result_list,d,cex=0.8){
if(length(result_list)%%2==0){
L<-length(result_list)%/%2
}
else{
L<-length(result_list)%/%2+1
}
if(L>1){
op<-par(mfrow = c(2,L),mar = rep(2, 4))
}
for(i in 1:length(result_list)){
manual_plot_trees(result_list[[i]][[1]],d,cex,result_list[[i]][[2]])
}
}
#' Plot tree
#'
#' Creates a visual output for the phylogeny created by Tree_generation()
#' @param connexion_list Data frame of the concatenation of the interaction matrix and the cellularity of each clone at different time points.
#' @param d Number of clusters found by QuantumClone
#' @param cex Coefficient of expansion for the texts in phylogenetic tree plots. Default is 0.8
#' @param p Probability of a tree
#' @export
#' @examples # Extract one tree out of the 3 available trees:
#' Example_tree<-QuantumClone::Tree[[1]]
#' manual_plot_trees(Example_tree[[1]], d= 4,p = Example_tree[[2]])
#' @keywords Clonal inference phylogeny
manual_plot_trees<-function(connexion_list,d,cex=0.8,p){
s<-dim(connexion_list[[1]][2])
V<-numeric()
X<-numeric()
for(i in 1:(2*d-1)){
V[i]<-longueur(connexion_list[1:(2*d-1),1:(2*d-1)],i)
X[i]<-find_x_position(connexion_list[1:(2*d-1),1:(2*d-1)],i,d)
}
Y<-1-V/(max(V))
plot(x=X,y=Y,xlim=c(-1,1),ylim=c(min(Y),1),cex=0, axes = F,xlab='',ylab='',main = paste('p = ',round(p,digits=5)))
for(i in which(apply(X = connexion_list[1:(2*d-1),1:(2*d-1)],MARGIN = 1,FUN = sum)==2)){
segments(x0=X[i],x1=X[i],y0=Y[i],y1=Y[i]-1/(max(V)))
segments(x0=X[which(connexion_list[i,]==1)[1]],x1=X[i],y0=Y[i]-1/(max(V)),y1=Y[i]-1/(max(V)),col='red')
segments(x0=X[i],x1=X[which(connexion_list[i,]==1)[2]],y0=Y[i]-1/(max(V)),y1=Y[i]-1/(max(V)))
}
if(2*d<dim(connexion_list)[2]){
LABELS<-apply(X = apply(X = connexion_list[1:(2*d-1),(2*d):(dim(connexion_list)[2])],2,FUN = round,digit=3),1,paste,collapse='\n')
text(x=X,y=Y,labels = LABELS,pos = 3,cex = cex)
}
else{
LABELS<-sapply(X = connexion_list[1:(2*d-1),(2*d)],FUN = round,digit=3)
text(x=X,y=Y,labels = LABELS,pos = 3,cex = cex)
}
} |
cf3073459d179636a67ddafb19aa5c9b6fe2a7dd | 0d66571223dd7e689b6713f62b482c92ffa4a12b | /Census Data/import_quick_facts.R | 2aab17666213709d34670af5a4dca789ded868eb | [] | no_license | rpghub/exit_polls_2016 | e5f0db2924ae36e35b97f90e2d0e06a467083727 | 73f49a2569b836c38e2444a60867c27a0b9824c8 | refs/heads/master | 2021-01-25T05:51:17.435393 | 2017-02-25T02:49:55 | 2017-02-25T02:49:55 | 80,698,724 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,792 | r | import_quick_facts.R | ### download census data
library(curl)
library(data.table)
# pars
save_loc <- 'c:/projects/post election/census data'
template_loc <- 'c:/projects/post election/census data'
fips_loc <- 'C:/Projects/Post Election/County Results/Results 2016'
# template
setwd(template_loc)
template <- fread('quick_facts_template.csv')
setwd(fips_loc)
fips <- fread('county_election_results_2016.csv')
fips[, fips := ifelse(nchar(fips) == 4, paste('0', fips, sep = '')
, fips)]
fips <- fips[!is.na(fips), .N
, .(state = abbr_state, county
, state_fips = substr(fips, 1, 2)
, county_fips = substr(fips, 3, 5))]
fips_add <-
data.table(state = c('nc', 'ak')
, county = c('vance', 'kusilvak census area')
, state_fips = c('37', '02')
, county_fips = c('181', '158'))
fips <- rbind(fips, fips_add)
# function to read and conv
f_quick_facts <- function(fips_row) {
if(exists('dat')) rm(dat)
state <- fips[fips_row]
sfips <- state$state_fips
cfips <- state$county_fips
file <- paste('https://www.census.gov/quickfacts/download.php?fips='
, sfips, cfips, '&type=csv', sep = '')
tryCatch(
dat <- read.csv(text=paste0(head(readLines(file), - 33), collapse="\n"))
, error = function(e) {
print(paste('Cannot download: ', sfips, cfips
, sep = ''))})
if (exists('dat')) {
fields <- c(2, 3, 9, 10, 11, 12, 13, 14, 30, 31, 18, 19, 20, 21, 22, 23, 24
, 25, 26, 27, 28, 29, 50, 51, 53, 56, 57, 67, 84)
vals <- as.character(dat[[2]][fields])
vals <- gsub('(1)', '', vals, fixed = TRUE)
data.table(state_fips = sfips, county_fips = cfips
, state = state$state, county = state$county
, census_date = template$census_date
, field = template$field
, field_resp = template$field_resp
, value = as.numeric(vals))
}
}
system.time(
census <- do.call('rbind', lapply(seq_len(nrow(fips)), f_quick_facts)))
# 46113 changed to 46102, use new one for old code
fix <- census[state_fips == '46'& county_fips == '102']
fix[, county_fips := '113']
fix[, county := 'shannon']
census <- rbind(census, fix)
# 51515 changed to 51019 use new one for old code
fix <- census[state_fips == '51'& county_fips == '019']
fix[, county_fips := '515']
fix[, county := 'bedford city']
census <- rbind(census, fix)
# 02270 changed to 02158 use new one for old code
fix <- census[state_fips == '02'& county_fips == '158']
fix[, county_fips := '270']
fix[, county := 'wade hampton census area']
census <- rbind(census, fix)
setwd(save_loc)
write.csv(census, 'county_quickfacts.csv', row.names = FALSE) |
58bb66c78860c1ff740dff61e1c730046f1bf75b | 538669bde404d12ef6908fc0ee436b68413b39ff | /readme.rd | 3a1639c2f1470c4dae15d80b3657d938092f8cfc | [] | no_license | Lokeshwarrobo/geet | 335afef814032f0185888b1c68b213893b2e4c61 | 36337ef7dc045bd4c811b762438b09cb0b6342d0 | refs/heads/master | 2023-02-22T07:32:55.983973 | 2021-01-24T06:01:20 | 2021-01-24T06:01:20 | 332,379,109 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23 | rd | readme.rd | # Hello
Beggining
|
149c67344ab2d4763b07809269dd68fbb8f221ee | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.management/man/cloudwatchlogs_put_subscription_filter.Rd | 4a1c02d0c4dd88d445d9df4d6599ef308340a974 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 2,951 | rd | cloudwatchlogs_put_subscription_filter.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudwatchlogs_operations.R
\name{cloudwatchlogs_put_subscription_filter}
\alias{cloudwatchlogs_put_subscription_filter}
\title{Creates or updates a subscription filter and associates it with the
specified log group}
\usage{
cloudwatchlogs_put_subscription_filter(
logGroupName,
filterName,
filterPattern,
destinationArn,
roleArn = NULL,
distribution = NULL
)
}
\arguments{
\item{logGroupName}{[required] The name of the log group.}
\item{filterName}{[required] A name for the subscription filter. If you are updating an existing
filter, you must specify the correct name in \code{filterName}. To find the
name of the filter currently associated with a log group, use
\code{\link[=cloudwatchlogs_describe_subscription_filters]{describe_subscription_filters}}.}
\item{filterPattern}{[required] A filter pattern for subscribing to a filtered stream of log events.}
\item{destinationArn}{[required] The ARN of the destination to deliver matching log events to. Currently,
the supported destinations are:
\itemize{
\item An Amazon Kinesis stream belonging to the same account as the
subscription filter, for same-account delivery.
\item A logical destination (specified using an ARN) belonging to a
different account, for cross-account delivery.
If you're setting up a cross-account subscription, the destination
must have an IAM policy associated with it. The IAM policy must
allow the sender to send logs to the destination. For more
information, see
\code{\link[=cloudwatchlogs_put_destination_policy]{put_destination_policy}}.
\item A Kinesis Data Firehose delivery stream belonging to the same
account as the subscription filter, for same-account delivery.
\item A Lambda function belonging to the same account as the subscription
filter, for same-account delivery.
}}
\item{roleArn}{The ARN of an IAM role that grants CloudWatch Logs permissions to
deliver ingested log events to the destination stream. You don't need to
provide the ARN when you are working with a logical destination for
cross-account delivery.}
\item{distribution}{The method used to distribute log data to the destination. By default,
log data is grouped by log stream, but the grouping can be set to random
for a more even distribution. This property is only applicable when the
destination is an Amazon Kinesis data stream.}
}
\description{
Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through \code{\link[=cloudwatchlogs_put_log_events]{put_log_events}} and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.
See \url{https://www.paws-r-sdk.com/docs/cloudwatchlogs_put_subscription_filter/} for full documentation.
}
\keyword{internal}
|
737553c3c382672def8ad52b90c602a2fdc50387 | 705a330cc00320907b5b8c328f53259e5b033c88 | /data functions/standardize-gee.r | 5080261e33e41e64d61e6935820da8e15ce6444a | [
"MIT"
] | permissive | matthewgthomas/mosuo-kinship | 696a0fe21a6e1d4098a65198a5ce07fd2a00acd9 | 14996bc45585d46300dfb30b176dcce4775d4747 | refs/heads/master | 2021-09-07T06:13:09.270141 | 2018-02-18T16:55:36 | 2018-02-18T16:55:36 | 111,209,612 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,834 | r | standardize-gee.r | ##
## Analysis code for Thomas et al. (2015): Saami reindeer herders cooperate with social group members and genetic kin
##
## This modifies the `standardize` function in the package `arm` to work with GEE models.
##
## Author: Matthew Gwynfryn Thomas
##
## {------- email --------}
## {-- twitter --}
## mgt@matthewgthomas.co.uk
## {------ web -------}
##
##
## Copyright (c) 2015 Matthew Gwynfryn Thomas
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License along
## with this program; if not, write to the Free Software Foundation, Inc.,
## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##
standardize.default <- function(call, unchanged=NULL,
standardize.y=FALSE, binary.inputs="center"){
form <- call$formula
varnames <- all.vars (form)
n.vars <- length (varnames)
#
# Decide which variables will be unchanged
#
transform <- rep ("leave.alone", n.vars)
if (standardize.y) {
transform[1] <- "full"
}
for (i in 2:n.vars){
v <- varnames[i]
if (is.null(call$data)) {
thedata <- get(v)
}
else {
thedata <- get(as.character(call$data))[[v]]
}
if (is.na(match(v,unchanged))){
num.categories <- length (unique(thedata[!is.na(thedata)]))
if (num.categories==2){
transform[i] <- binary.inputs
}
else if (num.categories>2 & is.numeric(thedata)){
transform[i] <- "full"
}
}
}
#
# New variable names:
# prefix with "c." if centered or "z." if centered and scaled
#
varnames.new <- ifelse (transform=="leave.alone", varnames,
ifelse (transform=="full", paste ("z", varnames, sep="."),
paste ("c", varnames, sep=".")))
transformed.variables <- (1:n.vars)[transform!="leave.alone"]
#Define the new variables
if (is.null(call$data)) {
for (i in transformed.variables) {
assign(varnames.new[i], rescale(get(varnames[i]), binary.inputs))
}
}
else {
newvars <- NULL
for (i in transformed.variables) {
assign(varnames.new[i], rescale(get(as.character(call$data))[[varnames[i]]],
binary.inputs))
newvars <- cbind(newvars, get(varnames.new[i]))
}
assign(as.character(call$data), cbind(get(as.character(call$data)), newvars))
}
# Now call the regression with the new variables
call.new <- call
L <- sapply (as.list (varnames.new), as.name)
names(L) <- varnames
call.new$formula <- do.call (substitute, list (form, L))
formula <- as.character (call.new$formula)
if (length(formula)!=3) stop ("formula does not have three components")
formula <- paste (formula[2],formula[1],formula[3])
formula <- gsub ("factor(z.", "factor(", formula, fixed=TRUE)
formula <- gsub ("factor(c.", "factor(", formula, fixed=TRUE)
call.new$formula <- as.formula (formula)
return (eval (call.new))
}
standardize.gee = function(object, unchanged=NULL,
standardize.y=FALSE, binary.inputs="center")
{
call <- object$call
out <- standardize.default(call=call, unchanged=unchanged,
standardize.y=standardize.y, binary.inputs=binary.inputs)
return(out)
}
|
ed3c35d63ec85394f8e399b8216be3d3dcbf4a09 | 6eb63376f407b9265a7a0359442b297e1d7ce184 | /.gitignore/MJ50.R | ba4a60af9f728b50dbea1e48bdcd67cba151ef4a | [] | no_license | Kedaj/Cmp108 | 51af3849178ee53b91c5fae96a7a69a7b424e6aa | bc660caa0381ba3eb40bb5264535b816900903c8 | refs/heads/master | 2021-07-13T04:33:41.181443 | 2017-10-16T17:33:23 | 2017-10-16T17:33:23 | 107,156,631 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 59 | r | MJ50.R | #Makeda Joseph
04/26/2017
for (i in 1:20) print (1:i)
|
3abd4b7f22ad6f9c5c96e9e5a189a58faec4e11e | 7bb21189354bf72b2e8aeeb9f0e4340e69ed2913 | /man/runif.std.tetra.Rd | 1e8555dacf20d8635343b207518aa59cba8c033f | [] | no_license | elvanceyhan/pcds | 16371849188f98138933afd2e68a46167f674923 | 00331843a0670e7cd9a62b7bca70df06d4629212 | refs/heads/master | 2023-07-02T10:03:48.702073 | 2023-06-16T15:50:46 | 2023-06-16T15:50:46 | 218,353,699 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,876 | rd | runif.std.tetra.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PatternGen.R
\name{runif.std.tetra}
\alias{runif.std.tetra}
\title{Generation of Uniform Points
in the Standard Regular Tetrahedron \eqn{T_h}}
\usage{
runif.std.tetra(n)
}
\arguments{
\item{n}{A positive integer
representing the number of uniform points to be generated in the
standard regular tetrahedron \eqn{T_h}.}
}
\value{
A \code{list} with the elements
\item{type}{The type of the pattern
from which points are to be generated}
\item{mtitle}{The \code{"main"} title for the plot of the point pattern}
\item{tess.points}{The vertices of the support region
of the uniformly generated points, it is the
standard regular tetrahedron \eqn{T_h} for this function}
\item{gen.points}{The output set of generated points uniformly
in the standard regular tetrahedron \eqn{T_h}.}
\item{out.region}{The outer region which contains the support region,
\code{NULL} for this function.}
\item{desc.pat}{Description of the point pattern
from which points are to be generated}
\item{num.points}{The \code{vector} of two numbers,
which are the number of generated points and the number
of vertices of the support points (here it is 4).}
\item{txt4pnts}{Description of the two numbers in \code{num.points}}
\item{xlimit,ylimit,zlimit}{The ranges of the \eqn{x}-, \eqn{y}-,
and \eqn{z}-coordinates of the support, \eqn{T_h}}
}
\description{
An object of class \code{"Uniform"}.
Generates \code{n} points uniformly in the standard regular tetrahedron
\eqn{T_h=T((0,0,0),(1,0,0),(1/2,\sqrt{3}/2,0),(1/2,\sqrt{3}/6,\sqrt{6}/3))}.
}
\examples{
\dontrun{
A<-c(0,0,0); B<-c(1,0,0); C<-c(1/2,sqrt(3)/2,0); D<-c(1/2,sqrt(3)/6,sqrt(6)/3)
tetra<-rbind(A,B,C,D)
n<-100
set.seed(1)
Xdt<-runif.std.tetra(n)
Xdt
summary(Xdt)
plot(Xdt)
Xp<-runif.std.tetra(n)$g
Xlim<-range(tetra[,1])
Ylim<-range(tetra[,2])
Zlim<-range(tetra[,3])
xd<-Xlim[2]-Xlim[1]
yd<-Ylim[2]-Ylim[1]
zd<-Zlim[2]-Zlim[1]
plot3D::scatter3D(Xp[,1],Xp[,2],Xp[,3],
phi =20,theta=15, bty = "g", pch = 20, cex = 1,
ticktype = "detailed",
xlim=Xlim+xd*c(-.05,.05),ylim=Ylim+yd*c(-.05,.05),
zlim=Zlim+zd*c(-.05,.05))
#add the vertices of the tetrahedron
plot3D::points3D(tetra[,1],tetra[,2],tetra[,3], add=TRUE)
L<-rbind(A,A,A,B,B,C); R<-rbind(B,C,D,C,D,D)
plot3D::segments3D(L[,1], L[,2], L[,3], R[,1], R[,2],R[,3],
add=TRUE,lwd=2)
plot3D::text3D(tetra[,1]+c(.05,0,0,0),tetra[,2],tetra[,3],
labels=c("A","B","C","D"), add=TRUE)
}
\dontrun{
#need to install scatterplot3d package and call "library(scatterplot3d)"
s3d<-scatterplot3d(Xp, highlight.3d=TRUE,xlab="x",
ylab="y",zlab="z",
col.axis="blue", col.grid="lightblue",
main="3D Scatterplot of the data", pch=20)
s3d$points3d(tetra,pch=20,col="blue")
}
}
\seealso{
\code{\link{runif.tetra}}, \code{\link{runif.tri}},
and \code{\link{runif.multi.tri}}
}
\author{
Elvan Ceyhan
}
|
f25b7054f5210b0c8c4fba6e9c10aabdc203c5aa | a5364a735b6b0b1923324c704fd8ce1003e0ea59 | /R/opt_create_graphs.R | 19b69c882a502241704d2190d370ddf6e93fdc16 | [
"MIT"
] | permissive | vzhomeexperiments/lazytrade | 5d0bf66836058b4855ba9bc7f030e27c297aa607 | 1165ad36caf4900ebb22f503e285505a91334c5e | refs/heads/master | 2022-10-25T15:52:19.208902 | 2021-12-18T08:40:36 | 2021-12-18T08:40:36 | 189,709,132 | 19 | 23 | NOASSERTION | 2021-12-18T08:40:37 | 2019-06-01T08:29:59 | R | UTF-8 | R | false | false | 1,851 | r | opt_create_graphs.R | #' Function to create summary graphs of the trading results
#'
#' @description Create graphs and store them into pdf file
#'
#' `r lifecycle::badge('stable')`
#'
#' @details bar graph and time series optionally written to the pdf file.
#' File is named with a date of analysis to the location specified by the user
#'
#' @param x - dataframe with aggregated trading results
#' @param outp_path - path to the folder where to write file
#' @param graph_type - character, one of the options c('ts', 'bars', 'pdf')
#'
#' @return graphic output
#'
#' @export
#'
#' @examples
#'
#' library(lazytrade)
#' library(readr)
#' library(dplyr)
#' library(magrittr)
#' library(lubridate)
#' library(ggplot2)
#' data(DFR)
#' dir <- normalizePath(tempdir(),winslash = "/")
#' # create pdf file with two graphs
#' opt_create_graphs(x = DFR, outp_path = dir)
#'
#' # only show time series plot
#' opt_create_graphs(x = DFR, graph_type = 'ts')
#'
#'
opt_create_graphs <- function(x, outp_path, graph_type = "pdf"){
requireNamespace("ggplot2", quietly = TRUE)
# generate bar plot
bars <- x %>%
dplyr::mutate_if(is.character, as.factor) %>%
dplyr::group_by(Symbol) %>%
dplyr::summarise(PairGain = sum(Profit)) %>%
ggplot2::ggplot(aes(x = Symbol, y = PairGain))+ggplot2::geom_bar(stat = "identity")
# generate time series plot
# extract currency pairs used
pairs_used <- unique(DFR$Symbol) %>% paste(collapse = " ")
ts <- x %>% ggplot2::ggplot(ggplot2::aes(x = OrderCloseTime, y = CUMSUM_PNL))+
ggplot2::geom_line()+ ggplot2::ggtitle(paste("Using pairs: ", pairs_used))
if(graph_type == "ts"){ print(ts) }
if(graph_type == "bars"){ print(bars) }
if(graph_type == "pdf"){
grDevices::pdf(file = file.path(outp_path, paste0(Sys.Date(), ".pdf")))
print(ts)
print(bars)
grDevices::dev.off()
}
}
|
6fbeb6dc3e021c79b798fc4847e7c51f460b6c62 | 83d35a0c687e56de320bbe025fe876df41ea3bf6 | /inst/unitTests/saveas_test.R | 99c31e947482e6b41c684789d1ceaad039eb661f | [] | no_license | smgogarten/GWASTools | 797f4cc0d90299195fea29ee1fc24c492267541a | 720bfc6bede713dfcfbff1dd506f4c9f338caa9d | refs/heads/devel | 2023-06-26T13:37:21.371466 | 2023-06-22T12:37:41 | 2023-06-22T12:37:41 | 100,623,140 | 11 | 8 | null | 2023-06-22T12:34:02 | 2017-08-17T16:18:11 | R | UTF-8 | R | false | false | 484 | r | saveas_test.R | test_saveas <- function() {
x <- 1:10
path <- tempdir()
saveas(x, "myx", path)
newfile <- file.path(path, "myx.RData")
checkTrue(file.exists(newfile))
load(newfile)
checkTrue("myx" %in% objects())
unlink(newfile)
saveas(x, "myx.Rdata", path)
newfile <- file.path(path, "myx.Rdata")
checkTrue(file.exists(newfile))
unlink(newfile)
saveas(x, "myx.rda", path)
newfile <- file.path(path, "myx.rda")
checkTrue(file.exists(newfile))
unlink(newfile)
}
|
e14c05dbcc094cdd82430ff7e36ac167577cad81 | 2f092ad846b4a326ba09252bc3b8bab89bf27977 | /Q7/AA2/lab/lab2/AA2-L2.R | 5c7542b18c943e9b97629285e92495d8878a97b0 | [] | no_license | Atellas23/apunts | 11b4dc1e94e2ca11001ece111335ca772f11de29 | 8e0c8017358349064d53e4e11f4df9cf429a3a8b | refs/heads/master | 2021-12-08T12:34:55.529110 | 2021-10-19T07:30:48 | 2021-10-19T07:30:48 | 151,959,998 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,004 | r | AA2-L2.R | ####################################################################
# AA2 - GCED
# Lluís A. Belanche
# LAB 2: modelling 2D classification data
# version of September 2021
####################################################################
## the SVM is located in two different packages: one of them is 'e1071'
library(e1071)
## First we create a simple two-class data set:
N <- 200
make.sinusoidals <- function(m,noise=0.2)
{
x1 <- c(1:2*m)
x2 <- c(1:2*m)
for (i in 1:m) {
x1[i] <- (i/m) * pi
x2[i] <- sin(x1[i]) + rnorm(1,0,noise)
}
for (j in 1:m) {
x1[m+j] <- (j/m + 1/2) * pi
x2[m+j] <- cos(x1[m+j]) + rnorm(1,0,noise)
}
target <- c(rep(+1,m),rep(-1,m))
return(data.frame(x1,x2,target))
}
## let's generate the data
dataset <- make.sinusoidals (N)
## and have a look at it
summary(dataset)
plot(dataset$x1,dataset$x2,col=as.factor(dataset$target))
## Now we define a utility function for performing k-fold CV
## the learning data is split into k equal sized parts
## every time, one part goes for validation and k-1 go for building the model (training)
## the final error is the mean prediction error in the validation parts
## Note k=N corresponds to LOOCV
## a typical choice is k=10
k <- 10
folds <- sample(rep(1:k, length=N), N, replace=FALSE)
valid.error <- rep(0,k)
C <- 1
## This function is not intended to be useful for general training purposes but it is useful for illustration
## In particular, it does not optimize the value of C (it requires it as parameter)
train.svm.kCV <- function (which.kernel, mycost)
{
for (i in 1:k)
{
train <- dataset[folds!=i,] # for building the model (training)
valid <- dataset[folds==i,] # for prediction (validation)
x_train <- train[,1:2]
t_train <- train[,3]
switch(which.kernel,
linear={model <- svm(x_train, t_train, type="C-classification", cost=mycost, kernel="linear", scale = FALSE)},
poly.2={model <- svm(x_train, t_train, type="C-classification", cost=mycost, kernel="polynomial", degree=2, coef0=1, scale = FALSE)},
poly.3={model <- svm(x_train, t_train, type="C-classification", cost=mycost, kernel="polynomial", degree=3, coef0=1, scale = FALSE)},
RBF={model <- svm(x_train, t_train, type="C-classification", cost=mycost, kernel="radial", scale = FALSE)},
stop("Enter one of 'linear', 'poly.2', 'poly.3', 'radial'"))
x_valid <- valid[,1:2]
pred <- predict(model,x_valid)
t_true <- valid[,3]
# compute validation error for part 'i'
valid.error[i] <- sum(pred != t_true)/length(t_true)
}
# return average validation error
sum(valid.error)/length(valid.error)
}
# Fit an SVM with linear kernel
(VA.error.linear <- train.svm.kCV ("linear", C))
## We should choose the model with the lowest CV error and refit it to the whole learning data
## then use it to predict the test set; we will do this at the end
## As for now we wish to visualize the models
# so first we refit the model:
model <- svm(dataset[,1:2],dataset[,3], type="C-classification", cost=C, kernel="linear", scale = FALSE)
## Now we are going to visualize what we have done; since we have artificial data, instead of creating
## a random test set, we can create a grid of points as test
plot.prediction <- function (model.name, resol=200)
# the grid has a (resol x resol) resolution
{
x <- cbind(dataset$x1,dataset$x2)
rng <- apply(x,2,range);
tx <- seq(rng[1,1],rng[2,1],length=resol);
ty <- seq(rng[1,2],rng[2,2],length=resol);
pnts <- matrix(nrow=length(tx)*length(ty),ncol=2);
k <- 1
for(j in 1:length(ty))
{
for(i in 1:length(tx))
{
pnts[k,] <- c(tx[i],ty[j])
k <- k+1
}
}
# we calculate the predictions on the grid
pred <- predict(model, pnts, decision.values = TRUE)
z <- matrix(attr(pred,"decision.values"),nrow=length(tx),ncol=length(ty))
# and plot them
image(tx,ty,z,xlab=model.name,ylab="",axes=FALSE,
xlim=c(rng[1,1],rng[2,1]),ylim=c(rng[1,2],rng[2,2]),
col = cm.colors(64))
# col = rainbow(200, start=0.9, end=0.1))
# then we draw the optimal separation and its margins
contour(tx,ty,z,add=TRUE, drawlabels=TRUE, level=0, lwd=3)
contour(tx,ty,z,add=TRUE, drawlabels=TRUE, level=1, lty=1, lwd=1, col="grey")
contour(tx,ty,z,add=TRUE, drawlabels=TRUE, level=-1, lty=1, lwd=1, col="grey")
# then we plot the input data from the two classes
points(dataset[dataset$target==1,1:2],pch=21,col=1,cex=1)
points(dataset[dataset$target==-1,1:2],pch=19,col=4,cex=1)
# finally we add the SVs
sv <- dataset[c(model$index),];
sv1 <- sv[sv$target==1,];
sv2 <- sv[sv$target==-1,];
points(sv1[,1:2],pch=13,col=1,cex=2)
points(sv2[,1:2],pch=13,col=4,cex=2)
}
## plot the predictions, the separation, the support vectors, everything
plot.prediction ("linear")
## right, now a quadratic SVM model
(VA.error.poly.2 <- train.svm.kCV ("poly.2", C))
model <- svm(dataset[,1:2],dataset[,3], type="C-classification", cost=C, kernel="polynomial", degree=2, coef0=1, scale = FALSE)
plot.prediction ("poly.2")
## right, now a cubic SVM model
(VA.error.poly.3 <- train.svm.kCV ("poly.3", C))
model <- svm(dataset[,1:2],dataset[,3], type="C-classification", cost=C, kernel="polynomial", degree=3, coef0=1, scale = FALSE)
plot.prediction ("poly.3")
## and finally an RBF Gaussian SVM model
(VA.error.RBF <- train.svm.kCV ("RBF", C))
model <- svm(dataset[,1:2],dataset[,3], type="C-classification", cost=C, kernel="radial", scale = FALSE)
plot.prediction ("RBF")
## Now in a real scenario we should choose the model with the lowest CV error
## which in this case is the RBF
## In a real setting we should optimize the value of C, again with CV; this can be done
## very conveniently using tune() in this package to do automatic grid-search
## another, more general, possibility is to use the train() method in the {caret} package
## Just for illustration, let's see the effect of altering C (significantly):
C <- 50
(VA.error.linear <- train.svm.kCV ("linear", C))
model <- svm(dataset[,1:2],dataset[,3], type="C-classification", cost=C, kernel="linear", scale = FALSE)
plot.prediction ("linear")
(VA.error.RBF <- train.svm.kCV ("RBF", C))
model <- svm(dataset[,1:2],dataset[,3], type="C-classification", cost=C, kernel="radial", scale = FALSE)
plot.prediction ("RBF")
C <- 0.05
(VA.error.linear <- train.svm.kCV ("linear", C))
model <- svm(dataset[,1:2],dataset[,3], type="C-classification", cost=C, kernel="linear", scale = FALSE)
plot.prediction ("linear")
(VA.error.RBF <- train.svm.kCV ("RBF", C))
model <- svm(dataset[,1:2],dataset[,3], type="C-classification", cost=C, kernel="radial", scale = FALSE)
plot.prediction ("RBF")
|
92b6431fcd31ee98b6b363e30b8363e0b1b8e87f | 431719d48e8567140216bdfdcd27c76cc335a490 | /man/ApplicationArgumentDetails.Rd | 07bb82d647e563a438d7de9053d5df4507960bfd | [
"BSD-3-Clause"
] | permissive | agaveplatform/r-sdk | 4f32526da4889b4c6d72905e188ccdbb3452b840 | b09f33d150103e7ef25945e742b8d0e8e9bb640d | refs/heads/master | 2018-10-15T08:34:11.607171 | 2018-09-21T23:40:19 | 2018-09-21T23:40:19 | 118,783,778 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 898 | rd | ApplicationArgumentDetails.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ApplicationArgumentDetails.r
\docType{data}
\name{ApplicationArgumentDetails}
\alias{ApplicationArgumentDetails}
\title{ApplicationArgumentDetails Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
ApplicationArgumentDetails
}
\description{
ApplicationArgumentDetails Class
}
\section{Fields}{
\describe{
\item{\code{description}}{Description of this input.}
\item{\code{label}}{The label for this input}
\item{\code{argument}}{The command line value of this input (ex -n, --name, -name, etc)}
\item{\code{showArgument}}{Whether the argument value should be passed into the wrapper at run time}
\item{\code{repeatArgument}}{Whether the argument value should be repeated in front of each user-supplied input before injection into the wrapper template at runtime}
}}
\keyword{datasets}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.