content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rbeta2.R
\name{rbeta2}
\alias{rbeta2}
\title{Generate random number(s) from a beta distribution with specified mean and standard deviation}
\usage{
rbeta2(n, mean, sdev)
}
\arguments{
\item{n}{number of random deviates desired}
\item{mean}{mean of the distribution (must be in (0,1))}
\item{sdev}{standard deviation of the distribution. Limits depend on the value of \code{mean}}
}
\value{
a vector of length \code{n} with the random numbers
}
\description{
\code{rbeta2} converts the mean and stdev to appropriate shape parameters and calls \code{rbeta}.
It has similar functionality to \code{betaval} but is faster.
}
\details{
All arguments must be scalars.
}
\seealso{
\code{\link{rbeta}} and \code{\link[popbio]{betaval}}
}
|
/man/rbeta2.Rd
|
no_license
|
juvelas/PVA
|
R
| false
| true
| 809
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rbeta2.R
\name{rbeta2}
\alias{rbeta2}
\title{Generate random number(s) from a beta distribution with specified mean and standard deviation}
\usage{
rbeta2(n, mean, sdev)
}
\arguments{
\item{n}{number of random deviates desired}
\item{mean}{mean of the distribution (must be in (0,1))}
\item{sdev}{standard deviation of the distribution. Limits depend on the value of \code{mean}}
}
\value{
a vector of length \code{n} with the random numbers
}
\description{
\code{rbeta2} converts the mean and stdev to appropriate shape parameters and calls \code{rbeta}.
It has similar functionality to \code{betaval} but is faster.
}
\details{
All arguments must be scalars.
}
\seealso{
\code{\link{rbeta}} and \code{\link[popbio]{betaval}}
}
|
########################################################################
# dailyhourlybarplot/createDataList.R
#
# Create a list of data needed to generate the plot.
#
# Author: Tate Brasel, Spencer Pease, Jonathan Callahan
########################################################################
createDataList <- function(infoList = NULL, dataDir = NULL) {
logger.debug("----- createDataList() -----")
# ----- Validate parameters --------------------------------------------------
MazamaCoreUtils::stopIfNull(infoList)
MazamaCoreUtils::stopIfNull(dataDir)
if ( !is.null(dataDir) ) {
if ( !dir.exists(dataDir) ) {
err_msg <- sprintf("dataDir = '%s' doesn't exist", dataDir)
logger.error(err_msg)
stop(err_msg)
}
}
# ----- Get infoList parameters ----------------------------------------------
monitorids <- infoList$monitorids
startdate <- infoList$startdate
enddate <- infoList$enddate
logger.trace("Getting parameters from infoList:")
logger.trace("startdate = '%s'", printUTC(startdate))
logger.trace("enddate = '%s'", printUTC(enddate))
logger.trace("monitorids = '%s'", paste0(monitorids, collapse = ","))
# ----- Load ws_monitor data -------------------------------------------------
## NOTE:
# Host data directories are mounted at dataDir as specified in the
# docker-compose file
if ( startdate > lubridate::now(tzone = "UTC") - lubridate::days(10) ) {
# * Recent data uses local files -------------------------------------------
result <- try({
logger.trace("loading latest monitoring data from %s", dataDir)
ws_monitor <- PWFSLSmoke::monitor_load(
startdate = startdate,
enddate = enddate,
monitorIDs = monitorids,
dataDir = dataDir
)
}, silent = TRUE)
} else {
# * Archival data loads as needed ------------------------------------------
result <- try({
logger.trace("Loading archival data with `monitor_load()`")
ws_monitor <- PWFSLSmoke::monitor_load(
startdate = startdate,
enddate = enddate,
monitorIDs = monitorids
)
}, silent = TRUE)
}
if ( "try-error" %in% class(result) ) {
err_msg <- geterrmessage()
stop(paste0('Error loading data: ', err_msg))
}
# # ----- Load and subset data ------------------------------------------------
# # Load latest monitoring data (most recent 45 days)
# dailyData <- loadDaily()
# latestData <- loadLatest()
# ws_monitor <- monitor_join(dailyData, latestData, monitorIDs)
# ----- Validate data -------------------------------------------------------
# Check for bad monitorIDs
badMonitorIDs <- setdiff(monitorids, ws_monitor$meta$monitorID)
goodMonitorIDs <- intersect(monitorids, ws_monitor$meta$monitorID)
if ( length(badMonitorIDs) > 0 ) {
logger.trace(
"The following monitors are not found in the most recent 45 days of data: %s",
paste0(badMonitorIDs, collapse = ", "))
}
if ( length(goodMonitorIDs) == 0 ) {
stop("No data available for the selected monitors", call. = FALSE)
}
# NOTE: siteName is used in the table and for facet_wrap() in the ggplot code.
# NOTE: Bad things happen if siteName == NA. Here we replace missing values
# NOTE: with monitorID.
badSiteMask <- is.na(ws_monitor$meta$siteName)
ws_monitor$meta$siteName[badSiteMask] <- ws_monitor$meta$monitorID[badSiteMask]
# ----- Create data structures ----------------------------------------------
# Create a dataframe for tabular presentation
tableData <- ws_monitor$meta[, c("siteName", "countyName", "stateCode", "agencyName")]
tableData$countyName <- stringr::str_to_title(tableData$countyName)
names(tableData) <- c("Site", "County", "State", "Agency")
# Create dataList
dataList <- list(
ws_monitor = ws_monitor,
tableData = tableData
)
return(dataList)
}
|
/monitor-custom/R/dailyhourlybarplot/createDataList.R
|
no_license
|
MazamaScience/monitoring-custom-service
|
R
| false
| false
| 3,902
|
r
|
########################################################################
# dailyhourlybarplot/createDataList.R
#
# Create a list of data needed to generate the plot.
#
# Author: Tate Brasel, Spencer Pease, Jonathan Callahan
########################################################################
createDataList <- function(infoList = NULL, dataDir = NULL) {
logger.debug("----- createDataList() -----")
# ----- Validate parameters --------------------------------------------------
MazamaCoreUtils::stopIfNull(infoList)
MazamaCoreUtils::stopIfNull(dataDir)
if ( !is.null(dataDir) ) {
if ( !dir.exists(dataDir) ) {
err_msg <- sprintf("dataDir = '%s' doesn't exist", dataDir)
logger.error(err_msg)
stop(err_msg)
}
}
# ----- Get infoList parameters ----------------------------------------------
monitorids <- infoList$monitorids
startdate <- infoList$startdate
enddate <- infoList$enddate
logger.trace("Getting parameters from infoList:")
logger.trace("startdate = '%s'", printUTC(startdate))
logger.trace("enddate = '%s'", printUTC(enddate))
logger.trace("monitorids = '%s'", paste0(monitorids, collapse = ","))
# ----- Load ws_monitor data -------------------------------------------------
## NOTE:
# Host data directories are mounted at dataDir as specified in the
# docker-compose file
if ( startdate > lubridate::now(tzone = "UTC") - lubridate::days(10) ) {
# * Recent data uses local files -------------------------------------------
result <- try({
logger.trace("loading latest monitoring data from %s", dataDir)
ws_monitor <- PWFSLSmoke::monitor_load(
startdate = startdate,
enddate = enddate,
monitorIDs = monitorids,
dataDir = dataDir
)
}, silent = TRUE)
} else {
# * Archival data loads as needed ------------------------------------------
result <- try({
logger.trace("Loading archival data with `monitor_load()`")
ws_monitor <- PWFSLSmoke::monitor_load(
startdate = startdate,
enddate = enddate,
monitorIDs = monitorids
)
}, silent = TRUE)
}
if ( "try-error" %in% class(result) ) {
err_msg <- geterrmessage()
stop(paste0('Error loading data: ', err_msg))
}
# # ----- Load and subset data ------------------------------------------------
# # Load latest monitoring data (most recent 45 days)
# dailyData <- loadDaily()
# latestData <- loadLatest()
# ws_monitor <- monitor_join(dailyData, latestData, monitorIDs)
# ----- Validate data -------------------------------------------------------
# Check for bad monitorIDs
badMonitorIDs <- setdiff(monitorids, ws_monitor$meta$monitorID)
goodMonitorIDs <- intersect(monitorids, ws_monitor$meta$monitorID)
if ( length(badMonitorIDs) > 0 ) {
logger.trace(
"The following monitors are not found in the most recent 45 days of data: %s",
paste0(badMonitorIDs, collapse = ", "))
}
if ( length(goodMonitorIDs) == 0 ) {
stop("No data available for the selected monitors", call. = FALSE)
}
# NOTE: siteName is used in the table and for facet_wrap() in the ggplot code.
# NOTE: Bad things happen if siteName == NA. Here we replace missing values
# NOTE: with monitorID.
badSiteMask <- is.na(ws_monitor$meta$siteName)
ws_monitor$meta$siteName[badSiteMask] <- ws_monitor$meta$monitorID[badSiteMask]
# ----- Create data structures ----------------------------------------------
# Create a dataframe for tabular presentation
tableData <- ws_monitor$meta[, c("siteName", "countyName", "stateCode", "agencyName")]
tableData$countyName <- stringr::str_to_title(tableData$countyName)
names(tableData) <- c("Site", "County", "State", "Agency")
# Create dataList
dataList <- list(
ws_monitor = ws_monitor,
tableData = tableData
)
return(dataList)
}
|
# Zad 4-1
mpg
str(mpg)
?mpg
# 1. 11
# 2. 234
# 3. engine displacement to pojemność silnika, miles per gallon to amerykańska wersja litrów na 100 km
# 4.
# manufacturer (producent)
# model- model name (model samochodu)
# displ- engine displacement, in litres (pojemność silnika)
# year- year of manufacture (rocznik)
# cyl- number of cylinders (liczba cylindrów)
# trans- type of transmission (typ skrzynii biegów)
# drv- f = front-wheel drive, r = rear wheel drive, 4 = 4wd (napęd)
# cty- city miles per gallon (mile na galon paliwa w mieście)
# hwy- highway miles per gallon (mile na galon paliwa w trasie)
# fl- fuel type (typ paliwa)
# class- "type" of car ("typ" samochodu)
# Zad 4-2
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = cty))
# Zad 4-3
ggplot(data = mpg) +
geom_bar(mapping = aes(x = class, fill = class)) +
labs(title = "Ilość samochodów w poszczególnych klasach", x = "Klasa", y = "Ilość")
# Zad 4-4
ggplot(data = mpg) +
geom_histogram(mapping = aes(displ), bins = 35) +
labs(title = "Pojemności silnika", x = "Pojemność", y = "Ilość")
# Zad 4-5
ggplot(data = mpg, aes(x = class, y = displ)) +
geom_boxplot() +
coord_flip() +
labs(title = "Zależność pojemności silnika od klasy", x = "Pojemność", y = "Klasa")
|
/zadania4.R
|
no_license
|
misiolek/tipn_zad_misiolek
|
R
| false
| false
| 1,297
|
r
|
# Zad 4-1
mpg
str(mpg)
?mpg
# 1. 11
# 2. 234
# 3. engine displacement to pojemność silnika, miles per gallon to amerykańska wersja litrów na 100 km
# 4.
# manufacturer (producent)
# model- model name (model samochodu)
# displ- engine displacement, in litres (pojemność silnika)
# year- year of manufacture (rocznik)
# cyl- number of cylinders (liczba cylindrów)
# trans- type of transmission (typ skrzynii biegów)
# drv- f = front-wheel drive, r = rear wheel drive, 4 = 4wd (napęd)
# cty- city miles per gallon (mile na galon paliwa w mieście)
# hwy- highway miles per gallon (mile na galon paliwa w trasie)
# fl- fuel type (typ paliwa)
# class- "type" of car ("typ" samochodu)
# Zad 4-2
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = cty))
# Zad 4-3
ggplot(data = mpg) +
geom_bar(mapping = aes(x = class, fill = class)) +
labs(title = "Ilość samochodów w poszczególnych klasach", x = "Klasa", y = "Ilość")
# Zad 4-4
ggplot(data = mpg) +
geom_histogram(mapping = aes(displ), bins = 35) +
labs(title = "Pojemności silnika", x = "Pojemność", y = "Ilość")
# Zad 4-5
ggplot(data = mpg, aes(x = class, y = displ)) +
geom_boxplot() +
coord_flip() +
labs(title = "Zależność pojemności silnika od klasy", x = "Pojemność", y = "Klasa")
|
library(gethr)
### Name: shh_getSymKey
### Title: Symmetric key given a symmetric key ID.
### Aliases: shh_getSymKey
### ** Examples
## No test:
shh_getSymKey('8d7b2dff569d14308a8e74ca1475dd93ba8dd42a9a74e97638796d5d6c8751ac')
## End(No test)
|
/data/genthat_extracted_code/gethr/examples/shh_getSymKey.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 252
|
r
|
library(gethr)
### Name: shh_getSymKey
### Title: Symmetric key given a symmetric key ID.
### Aliases: shh_getSymKey
### ** Examples
## No test:
shh_getSymKey('8d7b2dff569d14308a8e74ca1475dd93ba8dd42a9a74e97638796d5d6c8751ac')
## End(No test)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.backup_operations.R
\name{describe_protected_resource}
\alias{describe_protected_resource}
\title{Returns information about a saved resource, including the last time it was backed-up, its Amazon Resource Name (ARN), and the AWS service type of the saved resource}
\usage{
describe_protected_resource(ResourceArn)
}
\arguments{
\item{ResourceArn}{[required] An Amazon Resource Name (ARN) that uniquely identifies a resource. The format of the ARN depends on the resource type.}
}
\description{
Returns information about a saved resource, including the last time it was backed-up, its Amazon Resource Name (ARN), and the AWS service type of the saved resource.
}
\section{Accepted Parameters}{
\preformatted{describe_protected_resource(
ResourceArn = "string"
)
}
}
|
/service/paws.backup/man/describe_protected_resource.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 850
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.backup_operations.R
\name{describe_protected_resource}
\alias{describe_protected_resource}
\title{Returns information about a saved resource, including the last time it was backed-up, its Amazon Resource Name (ARN), and the AWS service type of the saved resource}
\usage{
describe_protected_resource(ResourceArn)
}
\arguments{
\item{ResourceArn}{[required] An Amazon Resource Name (ARN) that uniquely identifies a resource. The format of the ARN depends on the resource type.}
}
\description{
Returns information about a saved resource, including the last time it was backed-up, its Amazon Resource Name (ARN), and the AWS service type of the saved resource.
}
\section{Accepted Parameters}{
\preformatted{describe_protected_resource(
ResourceArn = "string"
)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Tuning.R
\name{MakeLowessPred}
\alias{MakeLowessPred}
\title{Predict RFLOWESS Tuning}
\usage{
MakeLowessPred(OOBWeights, PredWeights, TRAINY, TEST, tol = tol, ind, parvec,
method = "Tukey")
}
\arguments{
\item{OOBWeights}{matrix of training cases with response in last column}
\item{PredWeights}{Vector of zeros and ones indicating whether training cases came from contaminating distribution}
\item{TRAINY}{number of folds to perform in cross validation}
\item{TEST}{Test data}
\item{tol}{maximal change in interation for LOWESSRF weights in cross validation}
\item{ind}{index of parameter vector to use in tuning}
\item{parvec}{vector of candidate values for tuning parameter alpha}
\item{method}{should Tukey or Huber weighting function be used?}
\item{ndsize}{nodesize random forest tuning parameter for cross validation}
}
\value{
Returns:
LPREDERR: A length 2 vector containing MSPE and MAPE on test data using alpha parameter specified
LWeights: A ntest by ntrain matrix containing LOWESSRF weights for test cases
LIter: Number of iterations until convergence of LOWESSRF algorithm
}
\description{
This is a function to be called inside TuneMultifoldCV. It makes the RFLOWESS predictions for a specific alpha
and records them in a matrix LPREDERR.
}
|
/man/MakeLowessPred.Rd
|
no_license
|
AndrewjSage/RFLOWESS
|
R
| false
| true
| 1,368
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Tuning.R
\name{MakeLowessPred}
\alias{MakeLowessPred}
\title{Predict RFLOWESS Tuning}
\usage{
MakeLowessPred(OOBWeights, PredWeights, TRAINY, TEST, tol = tol, ind, parvec,
method = "Tukey")
}
\arguments{
\item{OOBWeights}{matrix of training cases with response in last column}
\item{PredWeights}{Vector of zeros and ones indicating whether training cases came from contaminating distribution}
\item{TRAINY}{number of folds to perform in cross validation}
\item{TEST}{Test data}
\item{tol}{maximal change in interation for LOWESSRF weights in cross validation}
\item{ind}{index of parameter vector to use in tuning}
\item{parvec}{vector of candidate values for tuning parameter alpha}
\item{method}{should Tukey or Huber weighting function be used?}
\item{ndsize}{nodesize random forest tuning parameter for cross validation}
}
\value{
Returns:
LPREDERR: A length 2 vector containing MSPE and MAPE on test data using alpha parameter specified
LWeights: A ntest by ntrain matrix containing LOWESSRF weights for test cases
LIter: Number of iterations until convergence of LOWESSRF algorithm
}
\description{
This is a function to be called inside TuneMultifoldCV. It makes the RFLOWESS predictions for a specific alpha
and records them in a matrix LPREDERR.
}
|
# qsub -v script=pQTL_Mapk3_mediation Rsubmit_args.sh
# R/3.4.1
setwd("/projects/korstanje-lab/ytakemon/JAC_DO_Kidney/")
load("./RNAseq_data/DO188b_kidney_noprobs.RData")
library(ggplot2)
library(dplyr)
library(scales)
# Get list of genes with trans pQTL
list <- read.csv("./QTLscan/output/Threshold6_pQTL_intAge_pbatch.csv", header = TRUE, stringsAsFactors = FALSE)
chr <- "7"
list <- list[list$IntAgeChr == chr, ]
gene <- "Mapk3"
# parameters
addscan.dir <- "./QTLscan/addscan_prot_Mapk3/"
intscan.dir.Age <- "./QTLscan/intscan_prot_Mapk3/"
output.file1 <- "./QTLscan/output/pQTLBestperGene_Mapk3_thr6_chr7.csv"
annot.protein <- annot.protein[annot.protein$id %in% list$id,]
output <- annot.protein[,c(1:6,10)]
output$AdditiveLOD <- output$AdditivePos <- output$AdditiveChr <- NA
output$IntAgeLODFull <- output$IntAgeLODDiff <- output$IntAgePos <- output$IntAgeChr <- NA
file.name <- function(i) paste0(output$id[i],"_",output$symbol[i],".rds")
# for
for (i in 1:nrow(output)) {
if (i %% 10 == 0) print(i)
if(file.exists(paste0(addscan.dir,file.name(i))) && file.exists(paste0(intscan.dir.Age, file.name(i)))){
fit0 <- readRDS(paste0(addscan.dir,file.name(i)))
fitAge <- readRDS(paste0(intscan.dir.Age, file.name(i)))
}else{
next
}
# additive scan
dt <- data.frame(AdditiveChr=sapply(strsplit(rownames(fit0),"_"), "[", 1),
AdditivePos=as.numeric(sapply(strsplit(rownames(fit0),"_"), "[", 2)),
AdditiveLOD=fit0[,1], stringsAsFactors=FALSE)
# find max lod score of whole genome
# dt2 <- dt %>% group_by(AdditiveChr) %>%
# summarize(AdditivePos = AdditivePos[which.max(AdditiveLOD)[1]],
# AdditiveLOD = max(AdditiveLOD)) %>%
# arrange(-AdditiveLOD)
# output[i, c("AdditiveChr", "AdditivePos", "AdditiveLOD")] <- dt2[1,]
# find max lod score of chr 7 ONLY!
dt2 <- dt[dt$AdditiveChr == chr,]
dt2 <- dt %>% group_by(AdditiveChr) %>%
summarize(AdditivePos = AdditivePos[which.max(AdditiveLOD)[1]],
AdditiveLOD = max(AdditiveLOD)) %>%
arrange(-AdditiveLOD)
output[i, c("AdditiveChr", "AdditivePos", "AdditiveLOD")] <- dt2[dt2$AdditiveChr == chr,]
# int. scan - Age
stopifnot(rownames(fit0) == rownames(fitAge))
dt <- data.frame(IntAgeChr=sapply(strsplit(rownames(fit0),"_"), "[", 1),
IntAgePos=as.numeric(sapply(strsplit(rownames(fit0),"_"), "[", 2)),
IntAgeLODDiff=fitAge[,1] - fit0[,1],
IntAgeLODFull = fitAge[,1],
stringsAsFactors=FALSE)
# find max lod score of whole genome
# dt2 <- dt %>% group_by(IntAgeChr) %>%
# summarize(IntAgePos = IntAgePos[which.max(IntAgeLODFull)[1]],
# IntAgeLODDiff=IntAgeLODDiff[which.max(IntAgeLODFull)[1]],
# IntAgeLODFull = max(IntAgeLODFull)) %>%
# arrange(-IntAgeLODDiff)
# output[i, c("IntAgeChr", "IntAgePos", "IntAgeLODDiff", "IntAgeLODFull")] <- dt2[1,]
# find max lod score of chr7 ONLY!
dt2 <- dt %>% group_by(IntAgeChr) %>%
summarize(IntAgePos = IntAgePos[which.max(IntAgeLODFull)[1]],
IntAgeLODDiff=IntAgeLODDiff[which.max(IntAgeLODFull)[1]],
IntAgeLODFull = max(IntAgeLODFull)) %>%
arrange(-IntAgeLODDiff)
output[i, c("IntAgeChr", "IntAgePos", "IntAgeLODDiff", "IntAgeLODFull")] <- dt2[dt2$IntAgeChr == chr,]
}
# collect rows into one data frame
write.csv(output, file=output.file1, row.names=FALSE)
# compare & plot -------------------------------------------------------------
list <- read.csv("./QTLscan/output/Threshold6_pQTL_intAge_pbatch.csv", header = TRUE, stringsAsFactors = FALSE)
list <- list[list$IntAgeChr == chr, ]
list <- arrange(list, id)
# output.file1 <- "./QTLscan/output/pQTLBestperGene_Mapk3_thr8_chr7.csv"
list_add <- read.csv(file = paste(output.file1), header = TRUE, stringsAsFactors = FALSE)
list_add <- arrange(list_add, id)
if (identical(list$id, list_add$id)){
compare <- list[,colnames(list) %in% c("id", "gene_id", "symbol", "IntAgeChr", "IntAgePos", "IntAgeLODDiff")]
compare$addIntAgeChr <- list_add$IntAgeChr
compare$addIntAgePos <- list_add$IntAgePos
compare$addIntAgeLODDiff <- list_add$IntAgeLODDiff
compare <- compare[complete.cases(compare$addIntAgeChr),]
write.csv(compare, file="./QTLscan/output/pQTLint_Mapk3_chr7_thr6.csv", row.names = FALSE)
} else {
print("Lists do not match")
}
# Plot Chr15 LOD scores
pdf("./QTLscan/output/plots/pQTL_Mapk3_Mediation_chr7_thr6.pdf", width = 9, heigh =9)
ggplot(compare, aes(x=IntAgeLODDiff, y=addIntAgeLODDiff)) +
geom_point(alpha=0.5) +
geom_abline(intercept = 0, slope = 1, color="red") +
geom_abline(intercept = -2, slope = 1, color="blue") +
scale_x_continuous( name = "LOD score Interactive age pQTL-diff",
breaks = seq(0, 15, by = 1),
labels = seq(0, 15, by = 1)) +
scale_y_continuous( name = "LOD score (X | Mapk3 mRNA)",
breaks = seq(0, 12, by = 1),
labels = seq(0, 12, by = 1)) +
theme_bw() +
labs(title=paste0("pQTL Chr", chr, " Genes ", gene, " Mediation"),
subtitle = paste0("Chr ", chr, " total: ", nrow(compare), " genes, threshold > 6 "))
dev.off()
# Plot LOD score *retired*
# pdf("./QTLscan/output/plots/pQTL_Akt1Mediation_thr6.pdf", width = 9, heigh =9)
# ggplot(compare, aes(x=IntAgeLODDiff, y=addIntAgeLODDiff, colour = change)) +
# geom_point(alpha=0.5) +
# geom_abline(intercept = 0, slope = 1, color="red") +
# guides(colour=guide_legend(title = "Mediation")) +
# xlab("LOD score Interactive age pQTL-diff") +
# ylab("LOD score (X | Akt1)") +
# theme_bw() +
# labs(title="Akt1 pQTL Chr12 Genes Mediation",
# subtitle = paste0("Chr 12 total: ", nrow(compare), " genes, mediated: ", table(compare$change)[[2]], " genes, threshold > 6 "))
# dev.off()
|
/QTLmapping/scan_addtrans/pQTL_Mapk3_mediation.R
|
permissive
|
ytakemon/JAC_DO_Kidney
|
R
| false
| false
| 5,915
|
r
|
# qsub -v script=pQTL_Mapk3_mediation Rsubmit_args.sh
# R/3.4.1
setwd("/projects/korstanje-lab/ytakemon/JAC_DO_Kidney/")
load("./RNAseq_data/DO188b_kidney_noprobs.RData")
library(ggplot2)
library(dplyr)
library(scales)
# Get list of genes with trans pQTL
list <- read.csv("./QTLscan/output/Threshold6_pQTL_intAge_pbatch.csv", header = TRUE, stringsAsFactors = FALSE)
chr <- "7"
list <- list[list$IntAgeChr == chr, ]
gene <- "Mapk3"
# parameters
addscan.dir <- "./QTLscan/addscan_prot_Mapk3/"
intscan.dir.Age <- "./QTLscan/intscan_prot_Mapk3/"
output.file1 <- "./QTLscan/output/pQTLBestperGene_Mapk3_thr6_chr7.csv"
annot.protein <- annot.protein[annot.protein$id %in% list$id,]
output <- annot.protein[,c(1:6,10)]
output$AdditiveLOD <- output$AdditivePos <- output$AdditiveChr <- NA
output$IntAgeLODFull <- output$IntAgeLODDiff <- output$IntAgePos <- output$IntAgeChr <- NA
file.name <- function(i) paste0(output$id[i],"_",output$symbol[i],".rds")
# for
for (i in 1:nrow(output)) {
if (i %% 10 == 0) print(i)
if(file.exists(paste0(addscan.dir,file.name(i))) && file.exists(paste0(intscan.dir.Age, file.name(i)))){
fit0 <- readRDS(paste0(addscan.dir,file.name(i)))
fitAge <- readRDS(paste0(intscan.dir.Age, file.name(i)))
}else{
next
}
# additive scan
dt <- data.frame(AdditiveChr=sapply(strsplit(rownames(fit0),"_"), "[", 1),
AdditivePos=as.numeric(sapply(strsplit(rownames(fit0),"_"), "[", 2)),
AdditiveLOD=fit0[,1], stringsAsFactors=FALSE)
# find max lod score of whole genome
# dt2 <- dt %>% group_by(AdditiveChr) %>%
# summarize(AdditivePos = AdditivePos[which.max(AdditiveLOD)[1]],
# AdditiveLOD = max(AdditiveLOD)) %>%
# arrange(-AdditiveLOD)
# output[i, c("AdditiveChr", "AdditivePos", "AdditiveLOD")] <- dt2[1,]
# find max lod score of chr 7 ONLY!
dt2 <- dt[dt$AdditiveChr == chr,]
dt2 <- dt %>% group_by(AdditiveChr) %>%
summarize(AdditivePos = AdditivePos[which.max(AdditiveLOD)[1]],
AdditiveLOD = max(AdditiveLOD)) %>%
arrange(-AdditiveLOD)
output[i, c("AdditiveChr", "AdditivePos", "AdditiveLOD")] <- dt2[dt2$AdditiveChr == chr,]
# int. scan - Age
stopifnot(rownames(fit0) == rownames(fitAge))
dt <- data.frame(IntAgeChr=sapply(strsplit(rownames(fit0),"_"), "[", 1),
IntAgePos=as.numeric(sapply(strsplit(rownames(fit0),"_"), "[", 2)),
IntAgeLODDiff=fitAge[,1] - fit0[,1],
IntAgeLODFull = fitAge[,1],
stringsAsFactors=FALSE)
# find max lod score of whole genome
# dt2 <- dt %>% group_by(IntAgeChr) %>%
# summarize(IntAgePos = IntAgePos[which.max(IntAgeLODFull)[1]],
# IntAgeLODDiff=IntAgeLODDiff[which.max(IntAgeLODFull)[1]],
# IntAgeLODFull = max(IntAgeLODFull)) %>%
# arrange(-IntAgeLODDiff)
# output[i, c("IntAgeChr", "IntAgePos", "IntAgeLODDiff", "IntAgeLODFull")] <- dt2[1,]
# find max lod score of chr7 ONLY!
dt2 <- dt %>% group_by(IntAgeChr) %>%
summarize(IntAgePos = IntAgePos[which.max(IntAgeLODFull)[1]],
IntAgeLODDiff=IntAgeLODDiff[which.max(IntAgeLODFull)[1]],
IntAgeLODFull = max(IntAgeLODFull)) %>%
arrange(-IntAgeLODDiff)
output[i, c("IntAgeChr", "IntAgePos", "IntAgeLODDiff", "IntAgeLODFull")] <- dt2[dt2$IntAgeChr == chr,]
}
# collect rows into one data frame
write.csv(output, file=output.file1, row.names=FALSE)
# compare & plot -------------------------------------------------------------
list <- read.csv("./QTLscan/output/Threshold6_pQTL_intAge_pbatch.csv", header = TRUE, stringsAsFactors = FALSE)
list <- list[list$IntAgeChr == chr, ]
list <- arrange(list, id)
# output.file1 <- "./QTLscan/output/pQTLBestperGene_Mapk3_thr8_chr7.csv"
list_add <- read.csv(file = paste(output.file1), header = TRUE, stringsAsFactors = FALSE)
list_add <- arrange(list_add, id)
if (identical(list$id, list_add$id)){
compare <- list[,colnames(list) %in% c("id", "gene_id", "symbol", "IntAgeChr", "IntAgePos", "IntAgeLODDiff")]
compare$addIntAgeChr <- list_add$IntAgeChr
compare$addIntAgePos <- list_add$IntAgePos
compare$addIntAgeLODDiff <- list_add$IntAgeLODDiff
compare <- compare[complete.cases(compare$addIntAgeChr),]
write.csv(compare, file="./QTLscan/output/pQTLint_Mapk3_chr7_thr6.csv", row.names = FALSE)
} else {
print("Lists do not match")
}
# Plot Chr15 LOD scores
pdf("./QTLscan/output/plots/pQTL_Mapk3_Mediation_chr7_thr6.pdf", width = 9, heigh =9)
ggplot(compare, aes(x=IntAgeLODDiff, y=addIntAgeLODDiff)) +
geom_point(alpha=0.5) +
geom_abline(intercept = 0, slope = 1, color="red") +
geom_abline(intercept = -2, slope = 1, color="blue") +
scale_x_continuous( name = "LOD score Interactive age pQTL-diff",
breaks = seq(0, 15, by = 1),
labels = seq(0, 15, by = 1)) +
scale_y_continuous( name = "LOD score (X | Mapk3 mRNA)",
breaks = seq(0, 12, by = 1),
labels = seq(0, 12, by = 1)) +
theme_bw() +
labs(title=paste0("pQTL Chr", chr, " Genes ", gene, " Mediation"),
subtitle = paste0("Chr ", chr, " total: ", nrow(compare), " genes, threshold > 6 "))
dev.off()
# Plot LOD score *retired*
# pdf("./QTLscan/output/plots/pQTL_Akt1Mediation_thr6.pdf", width = 9, heigh =9)
# ggplot(compare, aes(x=IntAgeLODDiff, y=addIntAgeLODDiff, colour = change)) +
# geom_point(alpha=0.5) +
# geom_abline(intercept = 0, slope = 1, color="red") +
# guides(colour=guide_legend(title = "Mediation")) +
# xlab("LOD score Interactive age pQTL-diff") +
# ylab("LOD score (X | Akt1)") +
# theme_bw() +
# labs(title="Akt1 pQTL Chr12 Genes Mediation",
# subtitle = paste0("Chr 12 total: ", nrow(compare), " genes, mediated: ", table(compare$change)[[2]], " genes, threshold > 6 "))
# dev.off()
|
#' @title Sub-community plot under each sub-environmental space K
#' @aliases subplot
#' @description The function to represent the community subniche position under each subenvironment K with their respective marginality from Gk.
#' @param ... further arguments passed to or from other methods.
#' @param subnic an object of class \code{subniche}.
#' @param sig_thres value for minimum significance, default 0.05
#' @param sig a factor defining the significance species, default NULL.
#' @param main a main title for the plot, see \link[graphics]{title} for more details.
#' @param xlab a label for the x axis, defaults to a description of x, see \link[graphics]{title} for more details.
#' @param ylab a label for the y axis, defaults to a description of y, see \link[graphics]{title} for more details.
#' @param col.axis axis color, see \link[graphics]{par} for more details.
#' @param lty.axis axis line type, see \link[graphics]{par} for more details.
#' @param lwd.axis axis width, see \link[graphics]{par} for more details.
#'
#' @param pch.SR.pos type of the point representing SR position, see \link[graphics]{points} for more details.
#' @param cex.SR.pos size of the point representing SR position, see \link[graphics]{points} for more details.
#' @param col.SR.pt point color contour if pch=21:25.
#' @param col.SR.pos color of the point representing SR position, see \link[graphics]{points} for more details.
#' @param col.SR.lab color of the species labels, see see \link[graphics]{text} for more details.
#' @param cex.SR.lab size of the species labels defautls NA for no labels, see see \link[graphics]{text} for more details.
#' @param fac.SR.lab factor for moving the SR labels from its original coordinates for clarity, by defaults they are multiply 1.2
#' @param border.E color border of E polygon, see \link[graphics]{polygon} for more details.
#' @param col.E inside color of E polygon, see \link[graphics]{polygon} for more details.
#' @param lty.E line type for the E border, see \link[graphics]{polygon} for more details.
#' @param lwd.E line width for the E border, see \link[graphics]{polygon} for more details.
#'
#' @param border.K color border of K polygon, see \link[graphics]{polygon} for more details.
#' @param col.K inside color of K polygon, see \link[graphics]{polygon} for more details.
#' @param lty.K line type for the K border, see \link[graphics]{polygon} for more details.
#' @param lwd.K line width for the K border, see \link[graphics]{polygon} for more details.
#'
#' @param col.Gk.pos color of the point representing Gk, see \link[graphics]{points} for more details.
#' @param col.Gk.pt point color contour if pch=21:25.
#' @param cex.Gk.pos size of the point representing Gk, see \link[graphics]{points} for more details.
#' @param pch.Gk.pos type of the point representing Gk, see \link[graphics]{points} for more details.
#'
#' @param col.su color of the points representing the sampling units (SU), see \link[graphics]{points} for more details.
#' @param pt.su point color contour if pch=21:25.
#' @param cex.su size of the points representing the sampling units (SU), see \link[graphics]{points} for more details.
#' @param pch.su type of the points representing the sampling units (SU), see \link[graphics]{points} for more details.
#'
#' @param leg a logical option for legend to be plotted or not, default leg=T.
#' @param font.sp font of the species labels, see see \link[graphics]{text} for more details.
#' @param posi.leg legend location in the graph, see \link[graphics]{legend} for more details.
#' @param col.arrow arrow color, see \link[graphics]{arrows} for more details.
#' @param angle.arrow arrow angle head, see \link[graphics]{arrows} for more details.
#' @param lwd.arrow arrow width, see \link[graphics]{arrows} for more details.
#' @param length.arrow arrow head length, see \link[graphics]{arrows} for more details.
#' @param bty.leg the type of box to be drawn around the legends. The allowed values are "o" (the default) and "n". See \link[graphics]{legend} for more details
#' @examples
#' library(subniche)
#' data(doubs)
#' dudi1 <- dudi.pca(doubs$env, scale = TRUE, scan = FALSE, nf = 3)
#' nic1 <- niche(dudi1, doubs$fish, scann = FALSE)
#' # number of sites
#' N <- dim(nic1$ls)[1]
#' #Create a factor which defines the subsets
#' fact <- factor(c(rep(1,N/2),rep(2,N/2)))
#' # nic1 will be use as reference and fact will be use to define the subniches environment
#' subnic1 <- subniche(nic1, fact)
#' #Two graphs are drawn one after the other
#' siggk <- rtestsubor(subnic1,10)
#' sig = c(siggk$`1`$witomigktest$subni.pvalue[-28],siggk$`2`$witomigktest$subni.pvalue[-28])
#' subplot(subnic1, sig = sig, sig_thres= 0.1)
#'
#' @rdname subplot
#' @export subplot
#' @importFrom graphics par layout arrows points legend polygon abline
#' @importFrom wordcloud textplot
#' @importFrom SIBER siberConvexhull
subplot <- function(subnic, main=NULL,sig=NULL, sig_thres=0.05, xlab=NULL, ylab=NULL, col.axis="azure3", lty.axis=2, lwd.axis=2,
pch.SR.pos=21,
cex.SR.pos=1,
col.SR.pt="black",
col.SR.pos="#ffa600",
col.SR.lab="black",
cex.SR.lab= NA,
fac.SR.lab=1.2,
border.E="black",
col.E="#92c5de",
lty.E=1,
lwd.E=1,
border.K ="black",
col.K ="#2c7fb8",
lty.K=1,
lwd.K=1,
col.arrow="black",
angle.arrow=20,
lwd.arrow=2,
length.arrow=0.1,
col.Gk.pos= "red",
col.Gk.pt= "black",
cex.Gk.pos=1,
pch.Gk.pos=21,
col.su="#b35806",
pt.su="black",
cex.su=0.7,
pch.su=1,
font.sp=2,
leg=T,
posi.leg="topleft",
bty.leg="n", ...){
fac <- subnic$factor
lev <- levels(fac)
eig <- round(subnic$eig/sum(subnic$eig)*100,2)[1:2]
if(is.null(xlab)){
xlab=paste(paste("OMI1",eig[1], sep=" "),"%",sep="")}
if(is.null(ylab)){
ylab=paste(paste("OMI2",eig[2], sep=" "),"%",sep="")}
N <- length(lev)
subsp <- subnic$sub
if(pch.SR.pos<21|pch.SR.pos>25){
col.SR.pt <- col.SR.pos
}
if(pch.Gk.pos<21|pch.Gk.pos>25){
col.Gk.pt <- col.Gk.pos
}
if(pch.su<21|pch.su>25){
pt.su <- col.su
}
for (i in 1:N){
subnici <- subnic$ls[which(fac==lev[i]),]
G_k <- subnic$G_k[grep(lev[i],rownames(subnic$G_k)),]
if(is.null(sig)){
subspk <- subsp[grep(lev[i],rownames(subsp)),]
subspk <- subspk[!is.na(subspk[,1]),]
}else{
subspk <- subsp[which(round(sig,2)<=sig_thres),]
subspk <- subspk[grep(lev[i],rownames(subspk)),]
subspk <- subspk[!is.na(subspk[,1]),]
}
sp <- sub(lev[i],"",rownames(subspk))
m <- dim(subspk)[1]
plot(subnic$ls, main=main, xlab=xlab, ylab=ylab, type="n",...)
E <- siberConvexhull(subnic$ls[,1], subnic$ls[,2])
polygon(E$xcoords,E$ycoords, border=border.E, col=col.E, lty=lty.E, lwd=lwd.E)
K <- siberConvexhull(subnici[,1], subnici[,2])
polygon(K$xcoords,K$ycoords, border=border.K, col=col.K, lty=lty.K, lwd=lwd.K)
abline(h=0, lty=lty.axis, lwd=lwd.axis, col=col.axis)
abline(v=0, lty=lty.axis, lwd=lwd.axis, col=col.axis)
arrows(rep(G_k[,1],m),rep( G_k[,2],m),subspk[,1], subspk[,2], angle=angle.arrow,
col=col.arrow,lwd=lwd.arrow, length=length.arrow)
points(subnici,cex=cex.su, col=pt.su, bg=col.su, pch=pch.su)
points(G_k[,1], G_k[,2], col=col.Gk.pt, bg=col.Gk.pos, pch=pch.Gk.pos, cex= cex.Gk.pos)
points(subspk[,1], subspk[,2], col=col.SR.pt, bg=col.SR.pos, pch=pch.SR.pos, cex= cex.SR.pos)
if(!is.na(cex.SR.lab)){
text(subspk[,1]*fac.SR.lab, subspk[,2]*fac.SR.lab, sp, col=col.SR.lab, font=font.sp, cex=cex.SR.lab)
}
if(isTRUE(leg)){
filli <- c(col.E, col.K, NA, NA, NA)
borderi <- c(border.E, border.K, NA, NA, NA)
col.leg <- c(NA,NA, col.Gk.pt, col.SR.pt, pt.su)
col.bg <- c(NA,NA, col.Gk.pos,col.SR.pos,col.su)
pch.leg <- c(NA,NA,pch.Gk.pos,pch.SR.pos,pch.su)
tex.leg <- c("E",paste("K", lev[i], sep=""),paste("GK", lev[i], sep=""),"SR","SU")
lty.leg <- c(0,0,0,0,0)
lwd.leg <- c(0,0,0,0,0)
posi.cex <-c(NA,NA,1,1,1)
if(is.na(col.E)){
filli[1] <- NA
borderi[1] <- NA
tex.leg[1] <- NA
}
if(is.na(col.K)){
filli[2] <- NA
borderi[2] <- NA
tex.leg[2] <- NA
}
if(anyNA(cex.Gk.pos)){
posi.cex[3] <- NA
tex.leg[3] <- NA
}
if(anyNA(cex.SR.pos)){
posi.cex[3] <- NA
tex.leg[3] <- NA
}
if(anyNA(cex.su)){
posi.cex[3] <- NA
tex.leg[3] <- NA
}
if(lty.E>1){
pch.leg[1] <- NA
lty.leg[1] <- lty.E
lwd.leg[1] <- lwd.E
}
if(lty.K>1){
pch.leg[2] <- NA
lty.leg[2] <- lty.E
lwd.leg[2] <- lwd.E
}
legend(posi.leg, legend=tex.leg,fill =filli, border=borderi, pch=pch.leg, col=col.leg, pt.cex = posi.cex,
pt.bg=col.bg,lty=lty.leg,pt.lwd=c(NA,NA,1,1,1), lwd=lwd.leg, bty=bty.leg,...)
}
}
}
|
/R/subplot.R
|
no_license
|
cran/subniche
|
R
| false
| false
| 9,437
|
r
|
#' @title Sub-community plot under each sub-environmental space K
#' @aliases subplot
#' @description The function to represent the community subniche position under each subenvironment K with their respective marginality from Gk.
#' @param ... further arguments passed to or from other methods.
#' @param subnic an object of class \code{subniche}.
#' @param sig_thres value for minimum significance, default 0.05
#' @param sig a factor defining the significance species, default NULL.
#' @param main a main title for the plot, see \link[graphics]{title} for more details.
#' @param xlab a label for the x axis, defaults to a description of x, see \link[graphics]{title} for more details.
#' @param ylab a label for the y axis, defaults to a description of y, see \link[graphics]{title} for more details.
#' @param col.axis axis color, see \link[graphics]{par} for more details.
#' @param lty.axis axis line type, see \link[graphics]{par} for more details.
#' @param lwd.axis axis width, see \link[graphics]{par} for more details.
#'
#' @param pch.SR.pos type of the point representing SR position, see \link[graphics]{points} for more details.
#' @param cex.SR.pos size of the point representing SR position, see \link[graphics]{points} for more details.
#' @param col.SR.pt point color contour if pch=21:25.
#' @param col.SR.pos color of the point representing SR position, see \link[graphics]{points} for more details.
#' @param col.SR.lab color of the species labels, see see \link[graphics]{text} for more details.
#' @param cex.SR.lab size of the species labels defautls NA for no labels, see see \link[graphics]{text} for more details.
#' @param fac.SR.lab factor for moving the SR labels from its original coordinates for clarity, by defaults they are multiply 1.2
#' @param border.E color border of E polygon, see \link[graphics]{polygon} for more details.
#' @param col.E inside color of E polygon, see \link[graphics]{polygon} for more details.
#' @param lty.E line type for the E border, see \link[graphics]{polygon} for more details.
#' @param lwd.E line width for the E border, see \link[graphics]{polygon} for more details.
#'
#' @param border.K color border of K polygon, see \link[graphics]{polygon} for more details.
#' @param col.K inside color of K polygon, see \link[graphics]{polygon} for more details.
#' @param lty.K line type for the K border, see \link[graphics]{polygon} for more details.
#' @param lwd.K line width for the K border, see \link[graphics]{polygon} for more details.
#'
#' @param col.Gk.pos color of the point representing Gk, see \link[graphics]{points} for more details.
#' @param col.Gk.pt point color contour if pch=21:25.
#' @param cex.Gk.pos size of the point representing Gk, see \link[graphics]{points} for more details.
#' @param pch.Gk.pos type of the point representing Gk, see \link[graphics]{points} for more details.
#'
#' @param col.su color of the points representing the sampling units (SU), see \link[graphics]{points} for more details.
#' @param pt.su point color contour if pch=21:25.
#' @param cex.su size of the points representing the sampling units (SU), see \link[graphics]{points} for more details.
#' @param pch.su type of the points representing the sampling units (SU), see \link[graphics]{points} for more details.
#'
#' @param leg a logical option for legend to be plotted or not, default leg=T.
#' @param font.sp font of the species labels, see see \link[graphics]{text} for more details.
#' @param posi.leg legend location in the graph, see \link[graphics]{legend} for more details.
#' @param col.arrow arrow color, see \link[graphics]{arrows} for more details.
#' @param angle.arrow arrow angle head, see \link[graphics]{arrows} for more details.
#' @param lwd.arrow arrow width, see \link[graphics]{arrows} for more details.
#' @param length.arrow arrow head length, see \link[graphics]{arrows} for more details.
#' @param bty.leg the type of box to be drawn around the legends. The allowed values are "o" (the default) and "n". See \link[graphics]{legend} for more details
#' @examples
#' library(subniche)
#' data(doubs)
#' dudi1 <- dudi.pca(doubs$env, scale = TRUE, scan = FALSE, nf = 3)
#' nic1 <- niche(dudi1, doubs$fish, scann = FALSE)
#' # number of sites
#' N <- dim(nic1$ls)[1]
#' #Create a factor which defines the subsets
#' fact <- factor(c(rep(1,N/2),rep(2,N/2)))
#' # nic1 will be use as reference and fact will be use to define the subniches environment
#' subnic1 <- subniche(nic1, fact)
#' #Two graphs are drawn one after the other
#' siggk <- rtestsubor(subnic1,10)
#' sig = c(siggk$`1`$witomigktest$subni.pvalue[-28],siggk$`2`$witomigktest$subni.pvalue[-28])
#' subplot(subnic1, sig = sig, sig_thres= 0.1)
#'
#' @rdname subplot
#' @export subplot
#' @importFrom graphics par layout arrows points legend polygon abline
#' @importFrom wordcloud textplot
#' @importFrom SIBER siberConvexhull
subplot <- function(subnic, main=NULL,sig=NULL, sig_thres=0.05, xlab=NULL, ylab=NULL, col.axis="azure3", lty.axis=2, lwd.axis=2,
pch.SR.pos=21,
cex.SR.pos=1,
col.SR.pt="black",
col.SR.pos="#ffa600",
col.SR.lab="black",
cex.SR.lab= NA,
fac.SR.lab=1.2,
border.E="black",
col.E="#92c5de",
lty.E=1,
lwd.E=1,
border.K ="black",
col.K ="#2c7fb8",
lty.K=1,
lwd.K=1,
col.arrow="black",
angle.arrow=20,
lwd.arrow=2,
length.arrow=0.1,
col.Gk.pos= "red",
col.Gk.pt= "black",
cex.Gk.pos=1,
pch.Gk.pos=21,
col.su="#b35806",
pt.su="black",
cex.su=0.7,
pch.su=1,
font.sp=2,
leg=T,
posi.leg="topleft",
bty.leg="n", ...){
fac <- subnic$factor
lev <- levels(fac)
eig <- round(subnic$eig/sum(subnic$eig)*100,2)[1:2]
if(is.null(xlab)){
xlab=paste(paste("OMI1",eig[1], sep=" "),"%",sep="")}
if(is.null(ylab)){
ylab=paste(paste("OMI2",eig[2], sep=" "),"%",sep="")}
N <- length(lev)
subsp <- subnic$sub
if(pch.SR.pos<21|pch.SR.pos>25){
col.SR.pt <- col.SR.pos
}
if(pch.Gk.pos<21|pch.Gk.pos>25){
col.Gk.pt <- col.Gk.pos
}
if(pch.su<21|pch.su>25){
pt.su <- col.su
}
for (i in 1:N){
subnici <- subnic$ls[which(fac==lev[i]),]
G_k <- subnic$G_k[grep(lev[i],rownames(subnic$G_k)),]
if(is.null(sig)){
subspk <- subsp[grep(lev[i],rownames(subsp)),]
subspk <- subspk[!is.na(subspk[,1]),]
}else{
subspk <- subsp[which(round(sig,2)<=sig_thres),]
subspk <- subspk[grep(lev[i],rownames(subspk)),]
subspk <- subspk[!is.na(subspk[,1]),]
}
sp <- sub(lev[i],"",rownames(subspk))
m <- dim(subspk)[1]
plot(subnic$ls, main=main, xlab=xlab, ylab=ylab, type="n",...)
E <- siberConvexhull(subnic$ls[,1], subnic$ls[,2])
polygon(E$xcoords,E$ycoords, border=border.E, col=col.E, lty=lty.E, lwd=lwd.E)
K <- siberConvexhull(subnici[,1], subnici[,2])
polygon(K$xcoords,K$ycoords, border=border.K, col=col.K, lty=lty.K, lwd=lwd.K)
abline(h=0, lty=lty.axis, lwd=lwd.axis, col=col.axis)
abline(v=0, lty=lty.axis, lwd=lwd.axis, col=col.axis)
arrows(rep(G_k[,1],m),rep( G_k[,2],m),subspk[,1], subspk[,2], angle=angle.arrow,
col=col.arrow,lwd=lwd.arrow, length=length.arrow)
points(subnici,cex=cex.su, col=pt.su, bg=col.su, pch=pch.su)
points(G_k[,1], G_k[,2], col=col.Gk.pt, bg=col.Gk.pos, pch=pch.Gk.pos, cex= cex.Gk.pos)
points(subspk[,1], subspk[,2], col=col.SR.pt, bg=col.SR.pos, pch=pch.SR.pos, cex= cex.SR.pos)
if(!is.na(cex.SR.lab)){
text(subspk[,1]*fac.SR.lab, subspk[,2]*fac.SR.lab, sp, col=col.SR.lab, font=font.sp, cex=cex.SR.lab)
}
if(isTRUE(leg)){
filli <- c(col.E, col.K, NA, NA, NA)
borderi <- c(border.E, border.K, NA, NA, NA)
col.leg <- c(NA,NA, col.Gk.pt, col.SR.pt, pt.su)
col.bg <- c(NA,NA, col.Gk.pos,col.SR.pos,col.su)
pch.leg <- c(NA,NA,pch.Gk.pos,pch.SR.pos,pch.su)
tex.leg <- c("E",paste("K", lev[i], sep=""),paste("GK", lev[i], sep=""),"SR","SU")
lty.leg <- c(0,0,0,0,0)
lwd.leg <- c(0,0,0,0,0)
posi.cex <-c(NA,NA,1,1,1)
if(is.na(col.E)){
filli[1] <- NA
borderi[1] <- NA
tex.leg[1] <- NA
}
if(is.na(col.K)){
filli[2] <- NA
borderi[2] <- NA
tex.leg[2] <- NA
}
if(anyNA(cex.Gk.pos)){
posi.cex[3] <- NA
tex.leg[3] <- NA
}
if(anyNA(cex.SR.pos)){
posi.cex[3] <- NA
tex.leg[3] <- NA
}
if(anyNA(cex.su)){
posi.cex[3] <- NA
tex.leg[3] <- NA
}
if(lty.E>1){
pch.leg[1] <- NA
lty.leg[1] <- lty.E
lwd.leg[1] <- lwd.E
}
if(lty.K>1){
pch.leg[2] <- NA
lty.leg[2] <- lty.E
lwd.leg[2] <- lwd.E
}
legend(posi.leg, legend=tex.leg,fill =filli, border=borderi, pch=pch.leg, col=col.leg, pt.cex = posi.cex,
pt.bg=col.bg,lty=lty.leg,pt.lwd=c(NA,NA,1,1,1), lwd=lwd.leg, bty=bty.leg,...)
}
}
}
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(plotly)
# Define UI for application that draws a histogram
shinyUI(
fluidPage(
# Navbar
navbarPage(
"CMalla",
tabPanel(
"About Me",
mainPanel(
tags$h1("Chad Malla"),
tags$p("I am a computing science major with a focus in AI and Data Science.
Also have an interest in theoretical computing science. I have worked as a full-stack
developer at Plexia and going to work as a Data Analyst starting in May. I took
this assignment as an opportunity to explore animations and more advanced
data visualizations with Plotly and GGPlot2. Through the process I got more
experience using dplyr library for data wrangling."))
),
#---------------------------------------------------------------------
tabPanel(
"Your Analysis",
sidebarPanel(
selectInput(inputId = "file",
label = "Select File for Distribution Plot",
choices = c("NYCarCrashes",
"C02Worldwide",
"CanadianAvgSnow",
"CanadianMeanTemp"))
),
mainPanel(
conditionalPanel(
condition = "input.file == 'CanadianAvgSnow'",
plotOutput("snowRidge")
),
conditionalPanel(
condition = "input.file == 'CanadianMeanTemp'",
plotOutput("tempRidge")
),
conditionalPanel(
condition = "input.file == 'C02Worldwide'",
plotlyOutput("co2Anim"),
plotlyOutput("co2Acc")
),
conditionalPanel(
condition = "input.file == 'NYCarCrashes'",
plotlyOutput("carAgeFreq")
))
),
tabPanel(
"Airline Crash Story",
mainPanel(
tags$h1("The Airline Crash Story"),
tags$br(),
plotlyOutput("animation"),
tags$p("The data points are green if the survival rate for that year is greater than
40% else red."),
tags$br(),
tags$h3("Top 60 Operators involved in crashes"),
plotOutput("cirPlot"),
tags$br(),
plotOutput("opTop"),
tags$br(),
verbatimTextOutput("air_selec_sum"),
tags$br(),
verbatimTextOutput("LargestAFCrash"),
tags$br(),
tags$h3("Story behind largest Air France Crash"),
tags$p("The largest Air France crash in terms of the number of fatalities
happened on June 1, 2009 where all 228 that boarded the plane had
been killed. According to Wikipedia only parts of the plane and
51 bodies have been found in the days following the crash. The probable
cause was reported, by the BEA, as the aircraft's pilot tubes icing over leading
the autopilot to disconnect and handling full control to pilots. Pilots
were confused by the all warnings, pulled up the nose of the aircraft to
the point that the aircraft stalled. By the time the pilots realized
the aircraft stalled, it was too late."),
tags$h3("But the biggest crash involved two planes"),
tags$p("The Pan American World Airways plane crashed with an KLM on the runway
as one was still on the runway after landing and the other taking off."),
verbatimTextOutput("LargestCrash")
)
)
)
)
)
|
/ui.R
|
no_license
|
cmalla94/FirstDataStory
|
R
| false
| false
| 3,872
|
r
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(plotly)
# Define UI for application that draws a histogram
shinyUI(
fluidPage(
# Navbar
navbarPage(
"CMalla",
tabPanel(
"About Me",
mainPanel(
tags$h1("Chad Malla"),
tags$p("I am a computing science major with a focus in AI and Data Science.
Also have an interest in theoretical computing science. I have worked as a full-stack
developer at Plexia and going to work as a Data Analyst starting in May. I took
this assignment as an opportunity to explore animations and more advanced
data visualizations with Plotly and GGPlot2. Through the process I got more
experience using dplyr library for data wrangling."))
),
#---------------------------------------------------------------------
tabPanel(
"Your Analysis",
sidebarPanel(
selectInput(inputId = "file",
label = "Select File for Distribution Plot",
choices = c("NYCarCrashes",
"C02Worldwide",
"CanadianAvgSnow",
"CanadianMeanTemp"))
),
mainPanel(
conditionalPanel(
condition = "input.file == 'CanadianAvgSnow'",
plotOutput("snowRidge")
),
conditionalPanel(
condition = "input.file == 'CanadianMeanTemp'",
plotOutput("tempRidge")
),
conditionalPanel(
condition = "input.file == 'C02Worldwide'",
plotlyOutput("co2Anim"),
plotlyOutput("co2Acc")
),
conditionalPanel(
condition = "input.file == 'NYCarCrashes'",
plotlyOutput("carAgeFreq")
))
),
tabPanel(
"Airline Crash Story",
mainPanel(
tags$h1("The Airline Crash Story"),
tags$br(),
plotlyOutput("animation"),
tags$p("The data points are green if the survival rate for that year is greater than
40% else red."),
tags$br(),
tags$h3("Top 60 Operators involved in crashes"),
plotOutput("cirPlot"),
tags$br(),
plotOutput("opTop"),
tags$br(),
verbatimTextOutput("air_selec_sum"),
tags$br(),
verbatimTextOutput("LargestAFCrash"),
tags$br(),
tags$h3("Story behind largest Air France Crash"),
tags$p("The largest Air France crash in terms of the number of fatalities
happened on June 1, 2009 where all 228 that boarded the plane had
been killed. According to Wikipedia only parts of the plane and
51 bodies have been found in the days following the crash. The probable
cause was reported, by the BEA, as the aircraft's pilot tubes icing over leading
the autopilot to disconnect and handling full control to pilots. Pilots
were confused by the all warnings, pulled up the nose of the aircraft to
the point that the aircraft stalled. By the time the pilots realized
the aircraft stalled, it was too late."),
tags$h3("But the biggest crash involved two planes"),
tags$p("The Pan American World Airways plane crashed with an KLM on the runway
as one was still on the runway after landing and the other taking off."),
verbatimTextOutput("LargestCrash")
)
)
)
)
)
|
run_analysis <- function(featuresfile = "features.txt", activitylabelfile = "activity_labels.txt",
traindatafile = "./train/X_train.txt", trainlabelfile = "./train/y_train.txt",
testdatafile = "./test/X_test.txt", testlabelfile = "./test/y_test.txt" ){
## Run script by providing the filepath for featuresfile, activitylablesfile,
## traindatafile, trainlabelfile, testdatafile, and testlabelfile to be loaded to R
## default file path assumes that the run_analysis script will be saved in the same folder
## as features and activity_lables text file
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. 4.Appropriately labels the data set with descriptive variable names.
## 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
features <- read.table(featuresfile)
activitylabel <- read.table(activitylabelfile)
traindata <-read.table(traindatafile)
trainlabel <- read.table(trainlabelfile)
testdata <- read.table(testdatafile)
testlabel <- read.table(testlabelfile)
## add subject train to train dataset and merge labels to dataset
trainlabel <- cbind("subject" = "train", trainlabel)
traindata <- cbind.data.frame(trainlabel, traindata)
## add subject test to test dataset and merge labels to dataset
testlabel <- cbind("subject" = "test", testlabel)
testdata <- cbind.data.frame(testlabel, testdata)
##combine train and test data together
allsubjectdata <- rbind.data.frame(traindata, testdata)
##rename column to descriptive values provided in the features file
featureslabel <- as.vector(features$V2)
names(allsubjectdata) <- c("subject", "activityid", featureslabel)
##merge the activitylable to allsujbectdata
names(activitylabel) <- c("activityid", "activity")
allsubjectdata <- merge(activitylabel, allsubjectdata)
##subset data based on mean and SD
subsetdata <- allsubjectdata[, grepl("subject|activity$|mean\\(\\)|std\\(\\)", names(allsubjectdata))]
names(subsetdata) <- gsub("\\(\\)","",names(subsetdata))
#average of each variable for each activity and each subject
averagedata <- aggregate(subsetdata[, 3:68], list("subject" = subsetdata$subject, "activity" = subsetdata$activity), mean)
write.table(averagedata, file="./tidy_data.txt", row.names = FALSE)
}
|
/run_analysis.r
|
no_license
|
JayeBlurb/tidydataassign
|
R
| false
| false
| 2,670
|
r
|
run_analysis <- function(featuresfile = "features.txt", activitylabelfile = "activity_labels.txt",
traindatafile = "./train/X_train.txt", trainlabelfile = "./train/y_train.txt",
testdatafile = "./test/X_test.txt", testlabelfile = "./test/y_test.txt" ){
## Run script by providing the filepath for featuresfile, activitylablesfile,
## traindatafile, trainlabelfile, testdatafile, and testlabelfile to be loaded to R
## default file path assumes that the run_analysis script will be saved in the same folder
## as features and activity_lables text file
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. 4.Appropriately labels the data set with descriptive variable names.
## 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
features <- read.table(featuresfile)
activitylabel <- read.table(activitylabelfile)
traindata <-read.table(traindatafile)
trainlabel <- read.table(trainlabelfile)
testdata <- read.table(testdatafile)
testlabel <- read.table(testlabelfile)
## add subject train to train dataset and merge labels to dataset
trainlabel <- cbind("subject" = "train", trainlabel)
traindata <- cbind.data.frame(trainlabel, traindata)
## add subject test to test dataset and merge labels to dataset
testlabel <- cbind("subject" = "test", testlabel)
testdata <- cbind.data.frame(testlabel, testdata)
##combine train and test data together
allsubjectdata <- rbind.data.frame(traindata, testdata)
##rename column to descriptive values provided in the features file
featureslabel <- as.vector(features$V2)
names(allsubjectdata) <- c("subject", "activityid", featureslabel)
##merge the activitylable to allsujbectdata
names(activitylabel) <- c("activityid", "activity")
allsubjectdata <- merge(activitylabel, allsubjectdata)
##subset data based on mean and SD
subsetdata <- allsubjectdata[, grepl("subject|activity$|mean\\(\\)|std\\(\\)", names(allsubjectdata))]
names(subsetdata) <- gsub("\\(\\)","",names(subsetdata))
#average of each variable for each activity and each subject
averagedata <- aggregate(subsetdata[, 3:68], list("subject" = subsetdata$subject, "activity" = subsetdata$activity), mean)
write.table(averagedata, file="./tidy_data.txt", row.names = FALSE)
}
|
#! /usr/bin/env Rscript
#############################################
############## FUNCTIONAL HUNTER ###########
#############################################
# this is wrapped in a tryCatch. The first expression works when source executes, the
# second expression works when R CMD does it.
full.fpath <- tryCatch(normalizePath(parent.frame(2)$ofile), # works when using source
error=function(e) # works when using R CMD
normalizePath(unlist(strsplit(commandArgs()[grep('^--file=', commandArgs())], '='))[2]))
main_path_script <- dirname(full.fpath)
#Loading libraries
suppressPackageStartupMessages(require(optparse))
suppressPackageStartupMessages(require(knitr))
#############################################
### MAIN
#############################################
# Parse command line
#------------------------------------------------
option_list <- list(
make_option(c("-i", "--input_hunter_folder"), type="character",
help="DEgenes Hunter's differential expression analysis output folder"),
make_option(c("-o", "--output_files"), type="character", default="results",
help="Output path. Default=%default")
)
opt <- parse_args(OptionParser(option_list=option_list))
############ CREATE FOLDERS #########3
paths <- list()
dir.create(opt$output_files)
paths$root <-opt$output_files
source(file.path(main_path_script, 'lib', 'functional_analysis_library.R'))
source(file.path(main_path_script, 'lib', 'plotting_functions.R'))
#############################################
### LOAD AND PARSE
#############################################
DEGH_results <- read.table(file.path(opt$input_hunter_folder, "Common_results", "hunter_results_table.txt"), header=TRUE, row.names=1, sep="\t", stringsAsFactors = FALSE)
aux <- which(DEGH_results$genes_tag == "FILTERED_OUT")
if(length(aux) > 0){
DEGH_results <- DEGH_results[-aux,]
}
#############################################
### PREPARE AND TRANSFORM DATA
#############################################
####
# LOAD NORMALIZED COUNTS
####
# LOAD NORMALIZED COUNTS
norm_counts <- as.matrix(read.table(file.path(opt$input_hunter_folder, "Results_DESeq2", "Normalized_counts_DESeq2.txt"), header=TRUE, row.names=1, sep="\t", stringsAsFactors = FALSE))
scaled_counts <- scale_data_matrix(data_matrix = norm_counts, transpose = TRUE)
scaled_counts_table <- as.data.frame(as.table(scaled_counts))
colnames(scaled_counts_table) <- c("Gene","Sample","Count")
####
# LOAD WGCNA clusters representative profiles with samples
cl_eigvalues <- as.matrix(read.table(file.path(opt$input_hunter_folder, "Results_WGCNA", "eigen_values_per_samples.txt"), header=TRUE, row.names=1, sep="\t", stringsAsFactors = FALSE))
cl_eigvalues <- as.data.frame(as.table(cl_eigvalues),stringsAsFactors = FALSE)
colnames(cl_eigvalues) <- c("Sample","Cluster_ID","Count")
cl_eigvalues_gnorm <- cl_eigvalues
cl_eigvalues_gnorm$Count <- (cl_eigvalues_gnorm$Count + 1) / 2
####
# LOAD WGCNA - PVal (Cluster - Trait)
wgcna_pval_cl_trait <- as.matrix(read.table(file.path(opt$input_hunter_folder, "Results_WGCNA", "module_trait_p_val.txt"), header=TRUE, row.names=1, sep="\t", stringsAsFactors = FALSE))
wgcna_corr_cl_trait <- as.matrix(read.table(file.path(opt$input_hunter_folder, "Results_WGCNA", "module_trait.txt"), header=TRUE, row.names=1, sep="\t", stringsAsFactors = FALSE))
####
# LOAD WGCNA - Correlation (Sample - Trait)
wgcna_count_sample_trait <- as.matrix(read.table(file.path(opt$input_hunter_folder, "Results_WGCNA", "sample_trait.txt"), header=TRUE, row.names=1, sep="\t", stringsAsFactors = FALSE))
wgcna_count_sample_trait <- scale_data_matrix(wgcna_count_sample_trait)
# Obtain clusters
cls <- unique(DEGH_results$Cluster_ID)
if(any(c(0,"grey") %in% cls)){
cls <- cls[!cls %in% c(0,"grey")]
}else{
warning("Cluster Zero/Grey not found")
}
clgenes <- lapply(cls,function(cl){unique(rownames(DEGH_results[which(DEGH_results$Cluster_ID == cl),]))}) # Find
names(clgenes) <- cls
############################################################
## GENERATE REPORT ##
############################################################
results_path <- normalizePath(paths$root)
invisible(lapply(cls,function(cl){
# Take output name
aux <- paste0("cl_func_",cl,".html")
outf_cls_i <- file.path(results_path, aux)
# Generate report
rmarkdown::render(file.path(main_path_script, 'templates', 'corrprofiles_report.Rmd'), output_file = outf_cls_i, intermediates_dir = results_path)
}))
|
/render_corr_report.R
|
no_license
|
MariaSuero/DEgenesHunter
|
R
| false
| false
| 4,533
|
r
|
#! /usr/bin/env Rscript
#############################################
############## FUNCTIONAL HUNTER ###########
#############################################
# this is wrapped in a tryCatch. The first expression works when source executes, the
# second expression works when R CMD does it.
full.fpath <- tryCatch(normalizePath(parent.frame(2)$ofile), # works when using source
error=function(e) # works when using R CMD
normalizePath(unlist(strsplit(commandArgs()[grep('^--file=', commandArgs())], '='))[2]))
main_path_script <- dirname(full.fpath)
#Loading libraries
suppressPackageStartupMessages(require(optparse))
suppressPackageStartupMessages(require(knitr))
#############################################
### MAIN
#############################################
# Parse command line
#------------------------------------------------
option_list <- list(
make_option(c("-i", "--input_hunter_folder"), type="character",
help="DEgenes Hunter's differential expression analysis output folder"),
make_option(c("-o", "--output_files"), type="character", default="results",
help="Output path. Default=%default")
)
opt <- parse_args(OptionParser(option_list=option_list))
############ CREATE FOLDERS #########3
paths <- list()
dir.create(opt$output_files)
paths$root <-opt$output_files
source(file.path(main_path_script, 'lib', 'functional_analysis_library.R'))
source(file.path(main_path_script, 'lib', 'plotting_functions.R'))
#############################################
### LOAD AND PARSE
#############################################
DEGH_results <- read.table(file.path(opt$input_hunter_folder, "Common_results", "hunter_results_table.txt"), header=TRUE, row.names=1, sep="\t", stringsAsFactors = FALSE)
aux <- which(DEGH_results$genes_tag == "FILTERED_OUT")
if(length(aux) > 0){
DEGH_results <- DEGH_results[-aux,]
}
#############################################
### PREPARE AND TRANSFORM DATA
#############################################
####
# LOAD NORMALIZED COUNTS
####
# LOAD NORMALIZED COUNTS
norm_counts <- as.matrix(read.table(file.path(opt$input_hunter_folder, "Results_DESeq2", "Normalized_counts_DESeq2.txt"), header=TRUE, row.names=1, sep="\t", stringsAsFactors = FALSE))
scaled_counts <- scale_data_matrix(data_matrix = norm_counts, transpose = TRUE)
scaled_counts_table <- as.data.frame(as.table(scaled_counts))
colnames(scaled_counts_table) <- c("Gene","Sample","Count")
####
# LOAD WGCNA clusters representative profiles with samples
cl_eigvalues <- as.matrix(read.table(file.path(opt$input_hunter_folder, "Results_WGCNA", "eigen_values_per_samples.txt"), header=TRUE, row.names=1, sep="\t", stringsAsFactors = FALSE))
cl_eigvalues <- as.data.frame(as.table(cl_eigvalues),stringsAsFactors = FALSE)
colnames(cl_eigvalues) <- c("Sample","Cluster_ID","Count")
cl_eigvalues_gnorm <- cl_eigvalues
cl_eigvalues_gnorm$Count <- (cl_eigvalues_gnorm$Count + 1) / 2
####
# LOAD WGCNA - PVal (Cluster - Trait)
wgcna_pval_cl_trait <- as.matrix(read.table(file.path(opt$input_hunter_folder, "Results_WGCNA", "module_trait_p_val.txt"), header=TRUE, row.names=1, sep="\t", stringsAsFactors = FALSE))
wgcna_corr_cl_trait <- as.matrix(read.table(file.path(opt$input_hunter_folder, "Results_WGCNA", "module_trait.txt"), header=TRUE, row.names=1, sep="\t", stringsAsFactors = FALSE))
####
# LOAD WGCNA - Correlation (Sample - Trait)
wgcna_count_sample_trait <- as.matrix(read.table(file.path(opt$input_hunter_folder, "Results_WGCNA", "sample_trait.txt"), header=TRUE, row.names=1, sep="\t", stringsAsFactors = FALSE))
wgcna_count_sample_trait <- scale_data_matrix(wgcna_count_sample_trait)
# Obtain clusters
cls <- unique(DEGH_results$Cluster_ID)
if(any(c(0,"grey") %in% cls)){
cls <- cls[!cls %in% c(0,"grey")]
}else{
warning("Cluster Zero/Grey not found")
}
clgenes <- lapply(cls,function(cl){unique(rownames(DEGH_results[which(DEGH_results$Cluster_ID == cl),]))}) # Find
names(clgenes) <- cls
############################################################
## GENERATE REPORT ##
############################################################
results_path <- normalizePath(paths$root)
invisible(lapply(cls,function(cl){
# Take output name
aux <- paste0("cl_func_",cl,".html")
outf_cls_i <- file.path(results_path, aux)
# Generate report
rmarkdown::render(file.path(main_path_script, 'templates', 'corrprofiles_report.Rmd'), output_file = outf_cls_i, intermediates_dir = results_path)
}))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcox.R
\name{rcox}
\alias{rcox}
\title{Simulate Cox process in 2- or 3-dimensional box}
\usage{
rcox(lambda, n, bbox, W, iter = 10000, verb = FALSE)
}
\arguments{
\item{lambda}{object from 'lambda'-function}
\item{n}{points to simulate if fixed count wanted}
\item{bbox}{bounding box, column matrix giving ranges}
\item{W}{owin-object rectangular, can be given instead of bbox}
\item{iter}{iterations of MH algorithm if n given}
\item{verb}{Print some runtime output}
}
\description{
Simulate the spatial Cox point process in 2- or 3-dimensional box
(rectangular cuboid). Three algorithms are available:
}
|
/man/rcox.Rd
|
no_license
|
antiphon/rcox
|
R
| false
| true
| 690
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcox.R
\name{rcox}
\alias{rcox}
\title{Simulate Cox process in 2- or 3-dimensional box}
\usage{
rcox(lambda, n, bbox, W, iter = 10000, verb = FALSE)
}
\arguments{
\item{lambda}{object from 'lambda'-function}
\item{n}{points to simulate if fixed count wanted}
\item{bbox}{bounding box, column matrix giving ranges}
\item{W}{owin-object rectangular, can be given instead of bbox}
\item{iter}{iterations of MH algorithm if n given}
\item{verb}{Print some runtime output}
}
\description{
Simulate the spatial Cox point process in 2- or 3-dimensional box
(rectangular cuboid). Three algorithms are available:
}
|
#Optimal Number of Clusters in data
#Reduce total within ss
data = iris[-5]
head(data)
km1= kmeans(data,centers=1)
km1$tot.withinss
km2= kmeans(data,centers=2)
km2$tot.withinss
km2$withinss
km3= kmeans(data,centers=3)
km3$tot.withinss
km4= kmeans(data,centers=4)
km4$tot.withinss
km5= kmeans(data,centers=5)
km5$tot.withinss
#Selecting the number of clusters
library(NbClust)
nc = NbClust(data, distance="euclidean",min.nc=2, max.nc=15, method="average")
nc = NbClust(mtcars, distance="euclidean",min.nc=2, max.nc=15, method="average")
det(as.matrix(mtcars))
?na.action
km3= kmeans(data,centers=3)
km3$tot.withinss
cbind(km1$tot.withinss, km2$tot.withinss, km3$tot.withinss, km4$tot.withinss,km5$tot.withinss)
#we select no clusters at elbow point
#adding more clusters does not significantly reduce total withinss
|
/optimal no.R
|
no_license
|
Vishakha2992/Analytics1
|
R
| false
| false
| 827
|
r
|
#Optimal Number of Clusters in data
#Reduce total within ss
data = iris[-5]
head(data)
km1= kmeans(data,centers=1)
km1$tot.withinss
km2= kmeans(data,centers=2)
km2$tot.withinss
km2$withinss
km3= kmeans(data,centers=3)
km3$tot.withinss
km4= kmeans(data,centers=4)
km4$tot.withinss
km5= kmeans(data,centers=5)
km5$tot.withinss
#Selecting the number of clusters
library(NbClust)
nc = NbClust(data, distance="euclidean",min.nc=2, max.nc=15, method="average")
nc = NbClust(mtcars, distance="euclidean",min.nc=2, max.nc=15, method="average")
det(as.matrix(mtcars))
?na.action
km3= kmeans(data,centers=3)
km3$tot.withinss
cbind(km1$tot.withinss, km2$tot.withinss, km3$tot.withinss, km4$tot.withinss,km5$tot.withinss)
#we select no clusters at elbow point
#adding more clusters does not significantly reduce total withinss
|
context("shed")
test_that("shed works as expected", {
tdat <- data.frame(
Sepal.Length = c("Sepal.Length", "blah", "4.9", "4.7"),
Sepal.Width = c("Sepal.Width", "3.5", "3.0", "3.2"),
Petal.Length = c("Petal.Length", "1.4", "1.4", "1.3"),
Petal.Width = c("Petal.Width", "0.2", "0.2", "0.2"),
Species = c("Species", "setosa", "setosa", "setosa"),
stringsAsFactors = FALSE
)
tres <- parse_output_df(tdat)
expect_identical(
names(tres),
c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "Species")
)
expect_true(is.character(tres[[1]]))
expect_true(is.numeric(tres[[2]]))
expect_true(is.numeric(tres[[3]]))
expect_true(is.numeric(tres[[4]]))
expect_true(is.character(tres[[5]]))
tdat2 <- data.frame(
X1 = c("X1", ""),
stringsAsFactors = FALSE
)
tres <- parse_output_df(tdat2)
expect_equal(
tres,
data.frame(
X1 = NA,
stringsAsFactors = FALSE
)
)
})
|
/tests/testthat/test_shed.R
|
permissive
|
s-fleck/shed
|
R
| false
| false
| 964
|
r
|
context("shed")
test_that("shed works as expected", {
tdat <- data.frame(
Sepal.Length = c("Sepal.Length", "blah", "4.9", "4.7"),
Sepal.Width = c("Sepal.Width", "3.5", "3.0", "3.2"),
Petal.Length = c("Petal.Length", "1.4", "1.4", "1.3"),
Petal.Width = c("Petal.Width", "0.2", "0.2", "0.2"),
Species = c("Species", "setosa", "setosa", "setosa"),
stringsAsFactors = FALSE
)
tres <- parse_output_df(tdat)
expect_identical(
names(tres),
c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "Species")
)
expect_true(is.character(tres[[1]]))
expect_true(is.numeric(tres[[2]]))
expect_true(is.numeric(tres[[3]]))
expect_true(is.numeric(tres[[4]]))
expect_true(is.character(tres[[5]]))
tdat2 <- data.frame(
X1 = c("X1", ""),
stringsAsFactors = FALSE
)
tres <- parse_output_df(tdat2)
expect_equal(
tres,
data.frame(
X1 = NA,
stringsAsFactors = FALSE
)
)
})
|
rm(list=ls())
####### R version 3.6.3 (2020-02-29) -- "Holding the Windsock"
library(rocc)
library(ggplot2)
library(Biobase)
library(edgeR)
library(limma)
library(biomaRt)
library(dplyr)
library(cancerclass)
library(cowplot)
library(caret)
library(RColorBrewer)
library(FactoMineR)
library(factoextra)
########################
#calculate roc and draw a roc curve from Xiong et al. (https://github.com/donghaixiong/Immune_cells_analysis)
rocdata <- function(grp, pred){
# Produces x and y co-ordinates for ROC curve plot
# Arguments: grp - labels classifying subject status
# pred - values of each observation
# Output: List with 2 components:
# roc = data.frame with x and y co-ordinates of plot
# stats = data.frame containing: area under ROC curve, p value, upper and lower 95% confidence interval
grp <- as.factor(grp)
if (length(pred) != length(grp)) {
stop("The number of classifiers must match the number of data points")
}
if (length(levels(grp)) != 2) {
stop("There must only be 2 values for the classifier")
}
cut <- unique(pred)
tp <- sapply(cut, function(x) length(which(pred > x & grp == levels(grp)[2])))
fn <- sapply(cut, function(x) length(which(pred < x & grp == levels(grp)[2])))
fp <- sapply(cut, function(x) length(which(pred > x & grp == levels(grp)[1])))
tn <- sapply(cut, function(x) length(which(pred < x & grp == levels(grp)[1])))
tpr <- tp / (tp + fn)
fpr <- fp / (fp + tn)
roc = data.frame(x = fpr, y = tpr)
roc <- roc[order(roc$x, roc$y),]
i <- 2:nrow(roc)
auc <- (roc$x[i] - roc$x[i - 1]) %*% (roc$y[i] + roc$y[i - 1])/2
pos <- pred[grp == levels(grp)[2]]
neg <- pred[grp == levels(grp)[1]]
q1 <- auc/(2-auc)
q2 <- (2*auc^2)/(1+auc)
se.auc <- sqrt(((auc * (1 - auc)) + ((length(pos) -1)*(q1 - auc^2)) + ((length(neg) -1)*(q2 - auc^2)))/(length(pos)*length(neg)))
ci.upper <- auc + (se.auc * 0.96)
ci.lower <- auc - (se.auc * 0.96)
se.auc.null <- sqrt((1 + length(pos) + length(neg))/(12*length(pos)*length(neg)))
z <- (auc - 0.5)/se.auc.null
p <- 2*pnorm(-abs(z))
stats <- data.frame (auc = auc,
p.value = p,
ci.upper = ci.upper,
ci.lower = ci.lower
)
return (list(roc = roc, stats = stats))
}
# single ROC plot
rocplot.single.V2 <- function(grp, pred, title = "ROC Plot", p.value = FALSE){
require(ggplot2)
plotdata <- rocdata(grp, pred)
if (p.value == TRUE){
annotation <- with(plotdata$stats, paste("AUC=",signif(auc, 2), " (P=", signif(p.value, 2), ")", sep=""))
} else {
annotation <- with(plotdata$stats, paste("AUC = ",signif(auc, 2), " (95% CI: ", signif(ci.lower, 2), "-", signif(ci.upper, 2), ")", sep=""))
}
p <- ggplot(plotdata$roc, aes(x = x, y = y)) +
geom_line(aes(colour = "")) +
geom_abline (intercept = 0, slope = 1) +
theme_bw() +
scale_x_continuous("False Positive Rate (1-Specificity)") +
scale_y_continuous("True Positive Rate (Sensitivity)") +
scale_colour_manual(labels = annotation, values = "#000000") +
theme(
plot.title = element_text(size=14, hjust = 0.5),
axis.text.x = element_text(face="bold", size=14),
axis.text.y = element_text(face="bold", size=14),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14, angle=90),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.justification=c(1,0),
legend.position=c(0.95,0.15),
legend.text = element_text(size = 14),
legend.title=element_blank(),
legend.key = element_blank()
)+
labs(title=title)
return(p)
}
setwd('C:\\Donghai_Desktop2\\NCpaper_argument_discuss\\Redo_analysis')
ImSig.genes <- readRDS('ImSig.rds')
# GSE78220 28 samples
GSE78220_AltAnalyze <- readRDS('GSE78220_expressionMatrix.rds')
GSE78220_PhenoInfo2 <- readRDS('GSE78220_PhenoInfo2.rds')
GSE78220_AltAnalyze <- GSE78220_AltAnalyze[,c('Symbol',GSE78220_PhenoInfo2$sample)]
GSE78220_AltAnalyze <- GSE78220_AltAnalyze[rowSums(GSE78220_AltAnalyze[,-1]) > 1, ] # same operation with the original paper
head(GSE78220_AltAnalyze)
range(GSE78220_AltAnalyze[,-1])
# GSE91061 = BMS038 51 samples
BMS038.Pre.CountTable.normalized.log <- as.data.frame(readRDS('BMS038.Pre.CountTable.normalized.log.rds'))
BMS038_phenoData <- readRDS('BMS038_phenoData.rds')
BMS038.Pre.CountTable.normalized.log <- BMS038.Pre.CountTable.normalized.log[rowSums(BMS038.Pre.CountTable.normalized.log) > 10,] # same operation with the original paper, otherwise function fit() does not work
BMS038.Pre.CountTable.normalized.log$Symbol <- rownames(BMS038.Pre.CountTable.normalized.log)
BMS038_PhenoInfo <- BMS038_phenoData@data
head(BMS038.Pre.CountTable.normalized.log)
range(BMS038.Pre.CountTable.normalized.log[,-ncol(BMS038.Pre.CountTable.normalized.log)])
# CC_73samples from PRJEB23709 73 samples
CC_73samples_GE <- read.table('DATASET-PRJEB23709_Pre_73samples.txt',sep="\t",header=T)
CC_73samples_pData <- readRDS('PRJEB23709_Pre_73samples_phenoData.rds')
CC_73samples_GE <- CC_73samples_GE[CC_73samples_GE$Symbol != '',]
CC_73samples_GE_matrix <- CC_73samples_GE[,c('Symbol',CC_73samples_pData$sample)]
head(CC_73samples_GE_matrix)
range(CC_73samples_GE_matrix[,-1])
#### MGSP project: 103 samples
NatMed_103samples_GE_matrix <- readRDS('NatMed_103samples_GE_matrix.rds')
NatMed_103samples_pData <- readRDS('NatMed_103samples_pData.rds')
head(NatMed_103samples_GE_matrix)
range(NatMed_103samples_GE_matrix[,-1])
################# use the same dataset for train and test with 5-fold cross-validation strategy:
table(GSE78220_PhenoInfo2$class) # 15:13
table(CC_73samples_pData$class) # 27:46
table(BMS038_PhenoInfo$class) # 25:26 -> GSE91061
table(NatMed_103samples_pData$class) # 56:47 -> MGSP
processData <- function(exp, phenoInfo, features, printngenes=FALSE){
### unify response name for cross dataset test
phenoInfo$class <- as.character(phenoInfo$class)
phenoInfo$class <- ifelse(phenoInfo$class == 'nonPD','Responder',phenoInfo$class)
phenoInfo$class <- ifelse(phenoInfo$class == 'PD','NonResponder',phenoInfo$class)
phenoInfo$class <- ifelse(phenoInfo$class == 'Nonresponder','NonResponder',phenoInfo$class)
phenoInfo$class <- ifelse(phenoInfo$class == 'Progressor','NonResponder',phenoInfo$class)
pData = data.frame(class=phenoInfo$class, sample=phenoInfo$sample,
row.names=phenoInfo$sample)
phenoData <- new("AnnotatedDataFrame",data=pData)
expdata_col_rearranged <- exp[,c("Symbol",phenoInfo$sample)]
expdata.sig <- expdata_col_rearranged[expdata_col_rearranged$Symbol %in% features,]
expdata.sig <- expdata.sig[,-1]
expdata.sig <- as.matrix(expdata.sig)
#expdata.sig <- expdata.sig[rowSums(expdata.sig) > 1,]
rownames(expdata.sig) <- features
if (printngenes){
print(dim(expdata.sig))
}
ExpSet_V5 <- ExpressionSet(assayData=as.matrix(expdata.sig),phenoData=phenoData)
return(ExpSet_V5)
#return(list(exp=ExpSet_V5,genes=rownames(expdata.sig)))
}
trainANDtestModel <- function(traindata, testdata, features, phenoInfo_train, phenoInfo_test){
"
expdata: col - sample, row - gene
features: gene list
phenoInfo: dataframe: class, sample
"
common <- intersect(traindata$Symbol,testdata$Symbol)
features <- intersect(common, features)
ExpSet_train <- processData(exp = traindata, phenoInfo = phenoInfo_train, features = features)
ExpSet_test <- processData(exp = testdata, phenoInfo = phenoInfo_test, features = features)
predictor_V5 <- fit(ExpSet_train, method = "welch.test")
positive.class <- unique(pData(ExpSet_test)$class)[2]
negative.class <- unique(pData(ExpSet_test)$class)[1]
#print(table(pData(ExpSet_test)$class))
#print(length(features))
prediction_V5 <- predict(predictor_V5, ExpSet_test, as.character(positive.class), ngenes=length(features), dist = "cor")
out_V5 <- as.factor(rep(c(1,2),c(table(pData(ExpSet_test)[["class"]])[[negative.class]],table(pData(ExpSet_test)[["class"]])[[positive.class]])))
z_V5 <- as.numeric(prediction_V5@prediction[,'z'])
Test_V5 <- cbind(out_V5,z_V5)
colnames(Test_V5) <- c('grp','res')
Test_V5 <- as.data.frame(Test_V5)
return(Test_V5)
}
testCV <- function(expdata, phenoInfo, features, num=5){
set.seed(17)
folds <- createFolds(y=phenoInfo$sample,k=num)
auc_value<-as.numeric()
test <- data.frame(label=c(), prediction=c(), nfold = c())
for (i in 1:5){
fold_test <- phenoInfo[folds[[i]],] #folds[[i]] for test
fold_train <- phenoInfo[-folds[[i]],] # remaining data for train
print(table(fold_train$class))
print(table(fold_test$class))
test0 <- trainANDtestModel(traindata = expdata, testdata = expdata,
features = features, phenoInfo_train = fold_train, phenoInfo_test = fold_test)
colnames(test0) <- c('label','prediction')
auc_value <- append(auc_value, as.numeric(rocdata(test0$label, test0$prediction)$stats$auc))
if (i==1){
test <- test0
}else{
test <- rbind(test, test0)
}
}
return(list(auc=auc_value, result=test))
}
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = ImSig.genes)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = ImSig.genes)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = ImSig.genes)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = ImSig.genes)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("OwnSig_four_sets_ImmuneCells.Sig.tiff", width = 11, height = 11, units = "in", res = 800)
#plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
plot_grid(p1, p2, p3 ,p4, ncol = 2)
dev.off()
###################################################################################################################################
###################################################################################################################################
###################################################################################################################################
### testing other 12 signatures
###################################################################################################################################
# Other_1 IFNG.Sig
IFNG.Sig <- c('IFNG', 'STAT1', 'IDO1', 'CXCL10', 'CXCL9', 'HLA-DRA')
IFNG.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = IFNG.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = IFNG.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = IFNG.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = IFNG.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others1_four_sets_IFNG.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_2 CD8.Sig
CD8.Sig <- c("CD8A", "CD8B", "CD3D", "CD3E", "CD3G")
CD8.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = CD8.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = CD8.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = CD8.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = CD8.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
criteria.zero <- colSums(NatMed_103samples_GE_matrix[NatMed_103samples_GE_matrix$Symbol %in% CD8.Sig,-1])==0 ### exclude patients with 0 values for all CD8.Sig genes in MGSP dataset
zero.patients <- colnames(NatMed_103samples_GE_matrix[,-1])[criteria.zero]
NatMed_103samples_GE_matrix_v2 <- NatMed_103samples_GE_matrix[,!colnames(NatMed_103samples_GE_matrix) %in% zero.patients]
NatMed_103samples_pData_v2 <- NatMed_103samples_pData[!NatMed_103samples_pData$sample %in% zero.patients,]
test0 <- testCV(expdata = NatMed_103samples_GE_matrix_v2, phenoInfo = NatMed_103samples_pData_v2, features = CD8.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others2_four_sets_CD8.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_3 PDL1.Sig
# PDL1 i.e., PDL1
PDL1.Sig <- c('PDL1','CD274','PDCD1LG2','PDCD1')
PDL1.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = PDL1.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = PDL1.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = PDL1.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = PDL1.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
criteria.zero <- colSums(NatMed_103samples_GE_matrix[NatMed_103samples_GE_matrix$Symbol %in% PDL1.Sig,-1])==0 ### exclude patients with 0 values for all PDL1.Sig genes in MGSP dataset
zero.patients <- colnames(NatMed_103samples_GE_matrix[,-1])[criteria.zero]
NatMed_103samples_GE_matrix_v2 <- NatMed_103samples_GE_matrix[,!colnames(NatMed_103samples_GE_matrix) %in% zero.patients]
NatMed_103samples_pData_v2 <- NatMed_103samples_pData[!NatMed_103samples_pData$sample %in% zero.patients,]
test0 <- testCV(expdata = NatMed_103samples_GE_matrix_v2, phenoInfo = NatMed_103samples_pData_v2, features = PDL1.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others3_four_sets_PDL1.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_4 CRMA.Sig
CRMA.Sig <- c('CT1.2', 'MAGEA2', 'MAGEA2A', 'MAGEA2B', 'MAGEA3', 'MAGEA6', 'MAGEA12')
CRMA.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = CRMA.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
criteria.zero <- colSums(BMS038.Pre.CountTable.normalized.log[BMS038.Pre.CountTable.normalized.log$Symbol %in% CRMA.Sig,-ncol(BMS038.Pre.CountTable.normalized.log)])==0 ### exclude patients with 0 values for all CRMA.Sig genes in BMS dataset
zero.patients <- colnames(BMS038.Pre.CountTable.normalized.log[,-ncol(BMS038.Pre.CountTable.normalized.log)])[criteria.zero]
BMS038.Pre.CountTable.normalized.log_v2 <- BMS038.Pre.CountTable.normalized.log[,!colnames(BMS038.Pre.CountTable.normalized.log) %in% zero.patients]
BMS038_PhenoInfo_v2 <- BMS038_PhenoInfo[!BMS038_PhenoInfo$sample %in% zero.patients,]
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log_v2, phenoInfo = BMS038_PhenoInfo_v2, features = CRMA.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
CC_73samples_GE_matrix[CC_73samples_GE_matrix$Symbol %in% CRMA.Sig,]
criteria.zero <- colSums(CC_73samples_GE_matrix[CC_73samples_GE_matrix$Symbol %in% CRMA.Sig,-1])< 10 ### exclude patients with little expression for all CRMA.Sig genes in PRJEB23709 dataset
zero.patients <- colnames(CC_73samples_GE_matrix[,-1])[criteria.zero]
CC_73samples_GE_matrix_v2 <- CC_73samples_GE_matrix[,!colnames(CC_73samples_GE_matrix) %in% zero.patients]
CC_73samples_pData_v2 <- CC_73samples_pData[!CC_73samples_pData$sample %in% zero.patients,]
test0 <- testCV(expdata = CC_73samples_GE_matrix_v2, phenoInfo = CC_73samples_pData_v2, features = CRMA.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
criteria.zero <- colSums(NatMed_103samples_GE_matrix[NatMed_103samples_GE_matrix$Symbol %in% CRMA.Sig,-1])==0 ### exclude patients with 0 values for all CRMA.Sig genes in MGSP dataset
zero.patients <- colnames(NatMed_103samples_GE_matrix[,-1])[criteria.zero]
NatMed_103samples_GE_matrix_v2 <- NatMed_103samples_GE_matrix[,!colnames(NatMed_103samples_GE_matrix) %in% zero.patients]
NatMed_103samples_pData_v2 <- NatMed_103samples_pData[!NatMed_103samples_pData$sample %in% zero.patients,]
test0 <- testCV(expdata = NatMed_103samples_GE_matrix_v2, phenoInfo = NatMed_103samples_pData_v2, features = CRMA.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others4_four_sets_CRMA.Sig.tiff", width = 14, height = 14, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_5 IMPRES.Sig
IMPRES.Sig <- c("BTLA", "CD200", "CD200R1", "CD27", "CD276", "CD28", "CD40", "CD80", "CD86", "CEACAM1", "CTLA4", "IDO1",
"IL2RB", "LAG3", "PVR", "PVRL2", "TIGIT", "TNFRSF18", "TNFRSF4", "TNFRSF9", "PDL1", "HAVCR2", "PDCD1", "PDCD1LG2", "TNFRSF14", "TNFSF4", "TNFSF9", "C10orf54")
IMPRES.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = IMPRES.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = IMPRES.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = IMPRES.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = IMPRES.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others5_four_sets_IMPRES.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_6 IRG.Sig
IRG.Sig <- c('LEPR','PRLHR','NR2F2','PRL','NRP1','ANGPTL5','IGF1','TNFRSF10B','TNFRSF10A','PLAU','IFI30') # Alias for 'PRLHR' are:'GR3','GPR10','PrRPR'
IRG.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = IRG.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = IRG.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = IRG.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = IRG.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others6_four_sets_IRG.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_7 LRRC15.CAF.Sig
LRRC15.CAF.Sig <- c('MMP11','COL11A1','C1QTNF3','CTHRC1','COL12A1','COL10A1','COL5A2','GJB2','THBS2','AEBP1','MFAP2','LRRC15','PLAU','ITGA11') # Alias for 'PRLHR' are:'GR3','GPR10','PrRPR'
LRRC15.CAF.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = LRRC15.CAF.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = LRRC15.CAF.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = LRRC15.CAF.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = LRRC15.CAF.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others7_four_sets_LRRC15.CAF.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_8_T.cell.inflamed.Sig
T.cell.inflamed.Sig <- c('CD3D','IDO1','CIITA','CD3E','CCL5','GZMK','CD2','HLA-DRA','CXCL13','IL2RG','NKG7','HLA-E','CXCR6','LAG3','TAGAP','CXCL10','STAT1','GZMB')
T.cell.inflamed.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = T.cell.inflamed.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = T.cell.inflamed.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = T.cell.inflamed.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = T.cell.inflamed.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others8_four_sets_T.cell.inflamed.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_9 IPRES.Sig
IPRES.Sig <- c('ANGPT2','AXL','CCL13','CCL2','CCL7','CDH1','FAP','FLT1','1L10','LOXL2','RORS','TAGLN','TWIST2','VEGFA','VEGFC','WNT5A')
IPRES.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = IPRES.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = IPRES.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = IPRES.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = IPRES.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others9_four_sets_IPRES.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_10 Inflammatory.Sig
Inflammatory.Sig <- c('CCL5','CCR5','PDL1','CD3D','CD3E','CD8A','CIITA','CTLA4','CXCL10','CXCL11','CXCL13','CXCL9','GZMA','GZMB','HLA-DRA','HKA.DRB1','HLA-E',
'IDO1','IL2RG','ITGAL','LAG3','NKG7','PDCD1','PRF1','PTPRC','STAT1','TAGAP')
Inflammatory.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = Inflammatory.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = Inflammatory.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = Inflammatory.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = Inflammatory.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others10_four_sets_Inflammatory.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_11 EMT.Sig
EMT.Sig <- c('CDH1','CDH3','CLDN4','EPCAM','ST14','MAL2','VIM','SNAI2','ZEB2','FN1','MMP2','AGER')
EMT.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = EMT.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = EMT.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = EMT.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = EMT.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others11_four_sets_EMT.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_12 Blood.Sig
Blood.Sig <- c('ADAM17', 'CDK2', 'CDKN2A', 'DPP4', 'ERBB2', 'HLA-DRA', 'ICOS', 'ITGA4', 'LARGE', 'MYC', 'NAB2', 'NRAS', 'RHOC', 'TGFB1', 'TIMP1')
Blood.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = Blood.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = Blood.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = Blood.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = Blood.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others12_four_sets_Blood.Sig.tiff", width = 13.5, height = 13.5, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
############################## PCA analysis:
common_genes <- Reduce(intersect, list(GSE78220_AltAnalyze$Symbol, CC_73samples_GE_matrix$Symbol, BMS038.Pre.CountTable.normalized.log$Symbol,
NatMed_103samples_GE_matrix$Symbol))
common_genes <- intersect(common_genes, ImSig.genes)
getCommon <- function(df, common){
df <- df[df$Symbol %in% common_genes,]
rownames(df) <- df$Symbol
df <- df[,-which(colnames(df)=='Symbol')]
}
GSE78220 <- getCommon(GSE78220_AltAnalyze, common_genes)
PRJEB23709 <- getCommon(CC_73samples_GE_matrix, common_genes)
GSE91061 <- getCommon(BMS038.Pre.CountTable.normalized.log, common_genes)
MGSP <- getCommon(NatMed_103samples_GE_matrix, common_genes)
merged <- cbind(GSE78220, PRJEB23709, GSE91061, MGSP)
group_list <- c(rep('GSE78220',dim(GSE78220)[2]), rep('PRJEB23709', dim(PRJEB23709)[2]),
rep('GSE91061',dim(GSE91061)[2]), rep('MGSP', dim(MGSP)[2]))
dat.pca <- PCA(t(merged), graph = FALSE, scale.unit = TRUE)
g<-fviz_pca_ind(dat.pca,repel =T,
geom.ind = "point", # show points only (nbut not "text")
col.ind = group_list, # color by groups
# palette = c("#00AFBB", "#E7B800"),
addEllipses = TRUE, # Concentration ellipses
legend.title = "Dataset",
)
tiff("PCA_four_RNAseq_datasets.tiff", width = 8, height = 6, units = "in", res = 800)
print(g)
dev.off()
|
/immuneSig_CV_test_mod.R
|
no_license
|
donghaixiong/ReplyToMattersArising
|
R
| false
| false
| 31,403
|
r
|
rm(list=ls())
####### R version 3.6.3 (2020-02-29) -- "Holding the Windsock"
library(rocc)
library(ggplot2)
library(Biobase)
library(edgeR)
library(limma)
library(biomaRt)
library(dplyr)
library(cancerclass)
library(cowplot)
library(caret)
library(RColorBrewer)
library(FactoMineR)
library(factoextra)
########################
#calculate roc and draw a roc curve from Xiong et al. (https://github.com/donghaixiong/Immune_cells_analysis)
rocdata <- function(grp, pred){
# Produces x and y co-ordinates for ROC curve plot
# Arguments: grp - labels classifying subject status
# pred - values of each observation
# Output: List with 2 components:
# roc = data.frame with x and y co-ordinates of plot
# stats = data.frame containing: area under ROC curve, p value, upper and lower 95% confidence interval
grp <- as.factor(grp)
if (length(pred) != length(grp)) {
stop("The number of classifiers must match the number of data points")
}
if (length(levels(grp)) != 2) {
stop("There must only be 2 values for the classifier")
}
cut <- unique(pred)
tp <- sapply(cut, function(x) length(which(pred > x & grp == levels(grp)[2])))
fn <- sapply(cut, function(x) length(which(pred < x & grp == levels(grp)[2])))
fp <- sapply(cut, function(x) length(which(pred > x & grp == levels(grp)[1])))
tn <- sapply(cut, function(x) length(which(pred < x & grp == levels(grp)[1])))
tpr <- tp / (tp + fn)
fpr <- fp / (fp + tn)
roc = data.frame(x = fpr, y = tpr)
roc <- roc[order(roc$x, roc$y),]
i <- 2:nrow(roc)
auc <- (roc$x[i] - roc$x[i - 1]) %*% (roc$y[i] + roc$y[i - 1])/2
pos <- pred[grp == levels(grp)[2]]
neg <- pred[grp == levels(grp)[1]]
q1 <- auc/(2-auc)
q2 <- (2*auc^2)/(1+auc)
se.auc <- sqrt(((auc * (1 - auc)) + ((length(pos) -1)*(q1 - auc^2)) + ((length(neg) -1)*(q2 - auc^2)))/(length(pos)*length(neg)))
ci.upper <- auc + (se.auc * 0.96)
ci.lower <- auc - (se.auc * 0.96)
se.auc.null <- sqrt((1 + length(pos) + length(neg))/(12*length(pos)*length(neg)))
z <- (auc - 0.5)/se.auc.null
p <- 2*pnorm(-abs(z))
stats <- data.frame (auc = auc,
p.value = p,
ci.upper = ci.upper,
ci.lower = ci.lower
)
return (list(roc = roc, stats = stats))
}
# single ROC plot
rocplot.single.V2 <- function(grp, pred, title = "ROC Plot", p.value = FALSE){
require(ggplot2)
plotdata <- rocdata(grp, pred)
if (p.value == TRUE){
annotation <- with(plotdata$stats, paste("AUC=",signif(auc, 2), " (P=", signif(p.value, 2), ")", sep=""))
} else {
annotation <- with(plotdata$stats, paste("AUC = ",signif(auc, 2), " (95% CI: ", signif(ci.lower, 2), "-", signif(ci.upper, 2), ")", sep=""))
}
p <- ggplot(plotdata$roc, aes(x = x, y = y)) +
geom_line(aes(colour = "")) +
geom_abline (intercept = 0, slope = 1) +
theme_bw() +
scale_x_continuous("False Positive Rate (1-Specificity)") +
scale_y_continuous("True Positive Rate (Sensitivity)") +
scale_colour_manual(labels = annotation, values = "#000000") +
theme(
plot.title = element_text(size=14, hjust = 0.5),
axis.text.x = element_text(face="bold", size=14),
axis.text.y = element_text(face="bold", size=14),
axis.title.x = element_text(face="bold", size=14),
axis.title.y = element_text(face="bold", size=14, angle=90),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.justification=c(1,0),
legend.position=c(0.95,0.15),
legend.text = element_text(size = 14),
legend.title=element_blank(),
legend.key = element_blank()
)+
labs(title=title)
return(p)
}
setwd('C:\\Donghai_Desktop2\\NCpaper_argument_discuss\\Redo_analysis')
ImSig.genes <- readRDS('ImSig.rds')
# GSE78220 28 samples
GSE78220_AltAnalyze <- readRDS('GSE78220_expressionMatrix.rds')
GSE78220_PhenoInfo2 <- readRDS('GSE78220_PhenoInfo2.rds')
GSE78220_AltAnalyze <- GSE78220_AltAnalyze[,c('Symbol',GSE78220_PhenoInfo2$sample)]
GSE78220_AltAnalyze <- GSE78220_AltAnalyze[rowSums(GSE78220_AltAnalyze[,-1]) > 1, ] # same operation with the original paper
head(GSE78220_AltAnalyze)
range(GSE78220_AltAnalyze[,-1])
# GSE91061 = BMS038 51 samples
BMS038.Pre.CountTable.normalized.log <- as.data.frame(readRDS('BMS038.Pre.CountTable.normalized.log.rds'))
BMS038_phenoData <- readRDS('BMS038_phenoData.rds')
BMS038.Pre.CountTable.normalized.log <- BMS038.Pre.CountTable.normalized.log[rowSums(BMS038.Pre.CountTable.normalized.log) > 10,] # same operation with the original paper, otherwise function fit() does not work
BMS038.Pre.CountTable.normalized.log$Symbol <- rownames(BMS038.Pre.CountTable.normalized.log)
BMS038_PhenoInfo <- BMS038_phenoData@data
head(BMS038.Pre.CountTable.normalized.log)
range(BMS038.Pre.CountTable.normalized.log[,-ncol(BMS038.Pre.CountTable.normalized.log)])
# CC_73samples from PRJEB23709 73 samples
CC_73samples_GE <- read.table('DATASET-PRJEB23709_Pre_73samples.txt',sep="\t",header=T)
CC_73samples_pData <- readRDS('PRJEB23709_Pre_73samples_phenoData.rds')
CC_73samples_GE <- CC_73samples_GE[CC_73samples_GE$Symbol != '',]
CC_73samples_GE_matrix <- CC_73samples_GE[,c('Symbol',CC_73samples_pData$sample)]
head(CC_73samples_GE_matrix)
range(CC_73samples_GE_matrix[,-1])
#### MGSP project: 103 samples
NatMed_103samples_GE_matrix <- readRDS('NatMed_103samples_GE_matrix.rds')
NatMed_103samples_pData <- readRDS('NatMed_103samples_pData.rds')
head(NatMed_103samples_GE_matrix)
range(NatMed_103samples_GE_matrix[,-1])
################# use the same dataset for train and test with 5-fold cross-validation strategy:
table(GSE78220_PhenoInfo2$class) # 15:13
table(CC_73samples_pData$class) # 27:46
table(BMS038_PhenoInfo$class) # 25:26 -> GSE91061
table(NatMed_103samples_pData$class) # 56:47 -> MGSP
processData <- function(exp, phenoInfo, features, printngenes=FALSE){
### unify response name for cross dataset test
phenoInfo$class <- as.character(phenoInfo$class)
phenoInfo$class <- ifelse(phenoInfo$class == 'nonPD','Responder',phenoInfo$class)
phenoInfo$class <- ifelse(phenoInfo$class == 'PD','NonResponder',phenoInfo$class)
phenoInfo$class <- ifelse(phenoInfo$class == 'Nonresponder','NonResponder',phenoInfo$class)
phenoInfo$class <- ifelse(phenoInfo$class == 'Progressor','NonResponder',phenoInfo$class)
pData = data.frame(class=phenoInfo$class, sample=phenoInfo$sample,
row.names=phenoInfo$sample)
phenoData <- new("AnnotatedDataFrame",data=pData)
expdata_col_rearranged <- exp[,c("Symbol",phenoInfo$sample)]
expdata.sig <- expdata_col_rearranged[expdata_col_rearranged$Symbol %in% features,]
expdata.sig <- expdata.sig[,-1]
expdata.sig <- as.matrix(expdata.sig)
#expdata.sig <- expdata.sig[rowSums(expdata.sig) > 1,]
rownames(expdata.sig) <- features
if (printngenes){
print(dim(expdata.sig))
}
ExpSet_V5 <- ExpressionSet(assayData=as.matrix(expdata.sig),phenoData=phenoData)
return(ExpSet_V5)
#return(list(exp=ExpSet_V5,genes=rownames(expdata.sig)))
}
trainANDtestModel <- function(traindata, testdata, features, phenoInfo_train, phenoInfo_test){
"
expdata: col - sample, row - gene
features: gene list
phenoInfo: dataframe: class, sample
"
common <- intersect(traindata$Symbol,testdata$Symbol)
features <- intersect(common, features)
ExpSet_train <- processData(exp = traindata, phenoInfo = phenoInfo_train, features = features)
ExpSet_test <- processData(exp = testdata, phenoInfo = phenoInfo_test, features = features)
predictor_V5 <- fit(ExpSet_train, method = "welch.test")
positive.class <- unique(pData(ExpSet_test)$class)[2]
negative.class <- unique(pData(ExpSet_test)$class)[1]
#print(table(pData(ExpSet_test)$class))
#print(length(features))
prediction_V5 <- predict(predictor_V5, ExpSet_test, as.character(positive.class), ngenes=length(features), dist = "cor")
out_V5 <- as.factor(rep(c(1,2),c(table(pData(ExpSet_test)[["class"]])[[negative.class]],table(pData(ExpSet_test)[["class"]])[[positive.class]])))
z_V5 <- as.numeric(prediction_V5@prediction[,'z'])
Test_V5 <- cbind(out_V5,z_V5)
colnames(Test_V5) <- c('grp','res')
Test_V5 <- as.data.frame(Test_V5)
return(Test_V5)
}
testCV <- function(expdata, phenoInfo, features, num=5){
set.seed(17)
folds <- createFolds(y=phenoInfo$sample,k=num)
auc_value<-as.numeric()
test <- data.frame(label=c(), prediction=c(), nfold = c())
for (i in 1:5){
fold_test <- phenoInfo[folds[[i]],] #folds[[i]] for test
fold_train <- phenoInfo[-folds[[i]],] # remaining data for train
print(table(fold_train$class))
print(table(fold_test$class))
test0 <- trainANDtestModel(traindata = expdata, testdata = expdata,
features = features, phenoInfo_train = fold_train, phenoInfo_test = fold_test)
colnames(test0) <- c('label','prediction')
auc_value <- append(auc_value, as.numeric(rocdata(test0$label, test0$prediction)$stats$auc))
if (i==1){
test <- test0
}else{
test <- rbind(test, test0)
}
}
return(list(auc=auc_value, result=test))
}
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = ImSig.genes)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = ImSig.genes)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = ImSig.genes)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = ImSig.genes)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("OwnSig_four_sets_ImmuneCells.Sig.tiff", width = 11, height = 11, units = "in", res = 800)
#plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
plot_grid(p1, p2, p3 ,p4, ncol = 2)
dev.off()
###################################################################################################################################
###################################################################################################################################
###################################################################################################################################
### testing other 12 signatures
###################################################################################################################################
# Other_1 IFNG.Sig
IFNG.Sig <- c('IFNG', 'STAT1', 'IDO1', 'CXCL10', 'CXCL9', 'HLA-DRA')
IFNG.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = IFNG.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = IFNG.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = IFNG.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = IFNG.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others1_four_sets_IFNG.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_2 CD8.Sig
CD8.Sig <- c("CD8A", "CD8B", "CD3D", "CD3E", "CD3G")
CD8.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = CD8.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = CD8.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = CD8.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = CD8.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
criteria.zero <- colSums(NatMed_103samples_GE_matrix[NatMed_103samples_GE_matrix$Symbol %in% CD8.Sig,-1])==0 ### exclude patients with 0 values for all CD8.Sig genes in MGSP dataset
zero.patients <- colnames(NatMed_103samples_GE_matrix[,-1])[criteria.zero]
NatMed_103samples_GE_matrix_v2 <- NatMed_103samples_GE_matrix[,!colnames(NatMed_103samples_GE_matrix) %in% zero.patients]
NatMed_103samples_pData_v2 <- NatMed_103samples_pData[!NatMed_103samples_pData$sample %in% zero.patients,]
test0 <- testCV(expdata = NatMed_103samples_GE_matrix_v2, phenoInfo = NatMed_103samples_pData_v2, features = CD8.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others2_four_sets_CD8.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_3 PDL1.Sig
# PDL1 i.e., PDL1
PDL1.Sig <- c('PDL1','CD274','PDCD1LG2','PDCD1')
PDL1.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = PDL1.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = PDL1.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = PDL1.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = PDL1.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
criteria.zero <- colSums(NatMed_103samples_GE_matrix[NatMed_103samples_GE_matrix$Symbol %in% PDL1.Sig,-1])==0 ### exclude patients with 0 values for all PDL1.Sig genes in MGSP dataset
zero.patients <- colnames(NatMed_103samples_GE_matrix[,-1])[criteria.zero]
NatMed_103samples_GE_matrix_v2 <- NatMed_103samples_GE_matrix[,!colnames(NatMed_103samples_GE_matrix) %in% zero.patients]
NatMed_103samples_pData_v2 <- NatMed_103samples_pData[!NatMed_103samples_pData$sample %in% zero.patients,]
test0 <- testCV(expdata = NatMed_103samples_GE_matrix_v2, phenoInfo = NatMed_103samples_pData_v2, features = PDL1.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others3_four_sets_PDL1.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_4 CRMA.Sig
CRMA.Sig <- c('CT1.2', 'MAGEA2', 'MAGEA2A', 'MAGEA2B', 'MAGEA3', 'MAGEA6', 'MAGEA12')
CRMA.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = CRMA.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
criteria.zero <- colSums(BMS038.Pre.CountTable.normalized.log[BMS038.Pre.CountTable.normalized.log$Symbol %in% CRMA.Sig,-ncol(BMS038.Pre.CountTable.normalized.log)])==0 ### exclude patients with 0 values for all CRMA.Sig genes in BMS dataset
zero.patients <- colnames(BMS038.Pre.CountTable.normalized.log[,-ncol(BMS038.Pre.CountTable.normalized.log)])[criteria.zero]
BMS038.Pre.CountTable.normalized.log_v2 <- BMS038.Pre.CountTable.normalized.log[,!colnames(BMS038.Pre.CountTable.normalized.log) %in% zero.patients]
BMS038_PhenoInfo_v2 <- BMS038_PhenoInfo[!BMS038_PhenoInfo$sample %in% zero.patients,]
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log_v2, phenoInfo = BMS038_PhenoInfo_v2, features = CRMA.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
CC_73samples_GE_matrix[CC_73samples_GE_matrix$Symbol %in% CRMA.Sig,]
criteria.zero <- colSums(CC_73samples_GE_matrix[CC_73samples_GE_matrix$Symbol %in% CRMA.Sig,-1])< 10 ### exclude patients with little expression for all CRMA.Sig genes in PRJEB23709 dataset
zero.patients <- colnames(CC_73samples_GE_matrix[,-1])[criteria.zero]
CC_73samples_GE_matrix_v2 <- CC_73samples_GE_matrix[,!colnames(CC_73samples_GE_matrix) %in% zero.patients]
CC_73samples_pData_v2 <- CC_73samples_pData[!CC_73samples_pData$sample %in% zero.patients,]
test0 <- testCV(expdata = CC_73samples_GE_matrix_v2, phenoInfo = CC_73samples_pData_v2, features = CRMA.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
criteria.zero <- colSums(NatMed_103samples_GE_matrix[NatMed_103samples_GE_matrix$Symbol %in% CRMA.Sig,-1])==0 ### exclude patients with 0 values for all CRMA.Sig genes in MGSP dataset
zero.patients <- colnames(NatMed_103samples_GE_matrix[,-1])[criteria.zero]
NatMed_103samples_GE_matrix_v2 <- NatMed_103samples_GE_matrix[,!colnames(NatMed_103samples_GE_matrix) %in% zero.patients]
NatMed_103samples_pData_v2 <- NatMed_103samples_pData[!NatMed_103samples_pData$sample %in% zero.patients,]
test0 <- testCV(expdata = NatMed_103samples_GE_matrix_v2, phenoInfo = NatMed_103samples_pData_v2, features = CRMA.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others4_four_sets_CRMA.Sig.tiff", width = 14, height = 14, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_5 IMPRES.Sig
IMPRES.Sig <- c("BTLA", "CD200", "CD200R1", "CD27", "CD276", "CD28", "CD40", "CD80", "CD86", "CEACAM1", "CTLA4", "IDO1",
"IL2RB", "LAG3", "PVR", "PVRL2", "TIGIT", "TNFRSF18", "TNFRSF4", "TNFRSF9", "PDL1", "HAVCR2", "PDCD1", "PDCD1LG2", "TNFRSF14", "TNFSF4", "TNFSF9", "C10orf54")
IMPRES.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = IMPRES.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = IMPRES.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = IMPRES.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = IMPRES.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others5_four_sets_IMPRES.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_6 IRG.Sig
IRG.Sig <- c('LEPR','PRLHR','NR2F2','PRL','NRP1','ANGPTL5','IGF1','TNFRSF10B','TNFRSF10A','PLAU','IFI30') # Alias for 'PRLHR' are:'GR3','GPR10','PrRPR'
IRG.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = IRG.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = IRG.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = IRG.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = IRG.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others6_four_sets_IRG.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_7 LRRC15.CAF.Sig
LRRC15.CAF.Sig <- c('MMP11','COL11A1','C1QTNF3','CTHRC1','COL12A1','COL10A1','COL5A2','GJB2','THBS2','AEBP1','MFAP2','LRRC15','PLAU','ITGA11') # Alias for 'PRLHR' are:'GR3','GPR10','PrRPR'
LRRC15.CAF.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = LRRC15.CAF.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = LRRC15.CAF.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = LRRC15.CAF.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = LRRC15.CAF.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others7_four_sets_LRRC15.CAF.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_8_T.cell.inflamed.Sig
T.cell.inflamed.Sig <- c('CD3D','IDO1','CIITA','CD3E','CCL5','GZMK','CD2','HLA-DRA','CXCL13','IL2RG','NKG7','HLA-E','CXCR6','LAG3','TAGAP','CXCL10','STAT1','GZMB')
T.cell.inflamed.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = T.cell.inflamed.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = T.cell.inflamed.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = T.cell.inflamed.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = T.cell.inflamed.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others8_four_sets_T.cell.inflamed.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_9 IPRES.Sig
IPRES.Sig <- c('ANGPT2','AXL','CCL13','CCL2','CCL7','CDH1','FAP','FLT1','1L10','LOXL2','RORS','TAGLN','TWIST2','VEGFA','VEGFC','WNT5A')
IPRES.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = IPRES.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = IPRES.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = IPRES.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = IPRES.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others9_four_sets_IPRES.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_10 Inflammatory.Sig
Inflammatory.Sig <- c('CCL5','CCR5','PDL1','CD3D','CD3E','CD8A','CIITA','CTLA4','CXCL10','CXCL11','CXCL13','CXCL9','GZMA','GZMB','HLA-DRA','HKA.DRB1','HLA-E',
'IDO1','IL2RG','ITGAL','LAG3','NKG7','PDCD1','PRF1','PTPRC','STAT1','TAGAP')
Inflammatory.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = Inflammatory.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = Inflammatory.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = Inflammatory.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = Inflammatory.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others10_four_sets_Inflammatory.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_11 EMT.Sig
EMT.Sig <- c('CDH1','CDH3','CLDN4','EPCAM','ST14','MAL2','VIM','SNAI2','ZEB2','FN1','MMP2','AGER')
EMT.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = EMT.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = EMT.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = EMT.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = EMT.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others11_four_sets_EMT.Sig.tiff", width = 13, height = 13, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
###################################################################################################################################
# Other_12 Blood.Sig
Blood.Sig <- c('ADAM17', 'CDK2', 'CDKN2A', 'DPP4', 'ERBB2', 'HLA-DRA', 'ICOS', 'ITGA4', 'LARGE', 'MYC', 'NAB2', 'NRAS', 'RHOC', 'TGFB1', 'TIMP1')
Blood.Sig
test0 <- testCV(expdata = GSE78220_AltAnalyze, phenoInfo = GSE78220_PhenoInfo2, features = Blood.Sig)[['result']]
p1 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE78220 data")
test0 <- testCV(expdata = BMS038.Pre.CountTable.normalized.log, phenoInfo = BMS038_PhenoInfo, features = Blood.Sig)[['result']]
p2 <- rocplot.single.V2(test0$label, test0$prediction, title = "GSE91061 data")
test0 <- testCV(expdata = CC_73samples_GE_matrix, phenoInfo = CC_73samples_pData, features = Blood.Sig)[['result']]
p3 <- rocplot.single.V2(test0$label, test0$prediction, title = "PRJEB23709 data")
test0 <- testCV(expdata = NatMed_103samples_GE_matrix, phenoInfo = NatMed_103samples_pData, features = Blood.Sig)[['result']]
p4 <- rocplot.single.V2(test0$label, test0$prediction, title = "MGSP data")
tiff("Others12_four_sets_Blood.Sig.tiff", width = 13.5, height = 13.5, units = "in", res = 800)
plot_grid(p1, p2, p3 ,p4, labels = c("a", "b", "c", "d"),ncol = 2)
dev.off()
############################## PCA analysis:
common_genes <- Reduce(intersect, list(GSE78220_AltAnalyze$Symbol, CC_73samples_GE_matrix$Symbol, BMS038.Pre.CountTable.normalized.log$Symbol,
NatMed_103samples_GE_matrix$Symbol))
common_genes <- intersect(common_genes, ImSig.genes)
getCommon <- function(df, common){
df <- df[df$Symbol %in% common_genes,]
rownames(df) <- df$Symbol
df <- df[,-which(colnames(df)=='Symbol')]
}
GSE78220 <- getCommon(GSE78220_AltAnalyze, common_genes)
PRJEB23709 <- getCommon(CC_73samples_GE_matrix, common_genes)
GSE91061 <- getCommon(BMS038.Pre.CountTable.normalized.log, common_genes)
MGSP <- getCommon(NatMed_103samples_GE_matrix, common_genes)
merged <- cbind(GSE78220, PRJEB23709, GSE91061, MGSP)
group_list <- c(rep('GSE78220',dim(GSE78220)[2]), rep('PRJEB23709', dim(PRJEB23709)[2]),
rep('GSE91061',dim(GSE91061)[2]), rep('MGSP', dim(MGSP)[2]))
dat.pca <- PCA(t(merged), graph = FALSE, scale.unit = TRUE)
g<-fviz_pca_ind(dat.pca,repel =T,
geom.ind = "point", # show points only (nbut not "text")
col.ind = group_list, # color by groups
# palette = c("#00AFBB", "#E7B800"),
addEllipses = TRUE, # Concentration ellipses
legend.title = "Dataset",
)
tiff("PCA_four_RNAseq_datasets.tiff", width = 8, height = 6, units = "in", res = 800)
print(g)
dev.off()
|
library(shiny)
poundsToKilos <<- 2.204 # 1 kg = 2.204 pounds
inchesToMeters <<- 0.0254 # 1 inches = 0.0254 m
# Metric BMI formula where weight is in kg, and height is in meters
calculateBMI <- function(w,h) w/(h^2)
# Check the measurement units and convert to metric if it is imperial system
calculate <- function(wUnit,hUnit,hFeet,hInch,wPound,h,w){
wConv <<- w
hConv <<- h
if(wUnit=='pounds'){
wConv <<- wPound/poundsToKilos
}
if(hUnit=='f'){
hConv <<- (hFeet*12+hInch)*inchesToMeters
}
calculateBMI(wConv,hConv)
}
shinyServer(
function(input, output){
bmiVal <- reactive({calculate(input$weightUnit,input$heightUnit,as.numeric(input$heightFeet),as.numeric(input$heightInch),input$weightPounds,input$height,input$weight)})
output$bmi <- renderText({bmiVal()})
# Assign BMI category according to calculation result
observe({
bmi <- bmiVal()
if(bmi>=30)
output$result <- renderText('Obese')
else if(bmi>=25)
output$result <- renderText('Overweight')
else if(bmi>=18.5)
output$result <- renderText('Normal weight')
else
output$result <- renderText('Underweight')
})
}
)
|
/server.R
|
no_license
|
naiavu/DDP_Coursera
|
R
| false
| false
| 1,348
|
r
|
library(shiny)
poundsToKilos <<- 2.204 # 1 kg = 2.204 pounds
inchesToMeters <<- 0.0254 # 1 inches = 0.0254 m
# Metric BMI formula where weight is in kg, and height is in meters
calculateBMI <- function(w,h) w/(h^2)
# Check the measurement units and convert to metric if it is imperial system
calculate <- function(wUnit,hUnit,hFeet,hInch,wPound,h,w){
wConv <<- w
hConv <<- h
if(wUnit=='pounds'){
wConv <<- wPound/poundsToKilos
}
if(hUnit=='f'){
hConv <<- (hFeet*12+hInch)*inchesToMeters
}
calculateBMI(wConv,hConv)
}
shinyServer(
function(input, output){
bmiVal <- reactive({calculate(input$weightUnit,input$heightUnit,as.numeric(input$heightFeet),as.numeric(input$heightInch),input$weightPounds,input$height,input$weight)})
output$bmi <- renderText({bmiVal()})
# Assign BMI category according to calculation result
observe({
bmi <- bmiVal()
if(bmi>=30)
output$result <- renderText('Obese')
else if(bmi>=25)
output$result <- renderText('Overweight')
else if(bmi>=18.5)
output$result <- renderText('Normal weight')
else
output$result <- renderText('Underweight')
})
}
)
|
NEI <- readRDS("data/exdata_data_NEI_data/summarySCC_PM25.rds")
SCC <- readRDS("data/exdata_data_NEI_data/Source_Classification_Code.rds")
png(filename = "datasciencecoursera/Exploratory Data Analysis/figure/plot1.png",
width = 480, height = 380, units = "px", pointsize = 12, bg = "white")
totEm = aggregate(NEI$Emissions, list(NEI$year), FUN = "sum")
plot(totEm, type = "l", xlab = "Year",
main = 'Total Emissions from 1999 to 2008 (United States)',
ylab = 'Total PM2.5 Emission')
dev.off()
|
/Exploratory Data Analysis/Assignment2.R
|
no_license
|
siddharth012/datasciencecoursera-1
|
R
| false
| false
| 525
|
r
|
NEI <- readRDS("data/exdata_data_NEI_data/summarySCC_PM25.rds")
SCC <- readRDS("data/exdata_data_NEI_data/Source_Classification_Code.rds")
png(filename = "datasciencecoursera/Exploratory Data Analysis/figure/plot1.png",
width = 480, height = 380, units = "px", pointsize = 12, bg = "white")
totEm = aggregate(NEI$Emissions, list(NEI$year), FUN = "sum")
plot(totEm, type = "l", xlab = "Year",
main = 'Total Emissions from 1999 to 2008 (United States)',
ylab = 'Total PM2.5 Emission')
dev.off()
|
library(plyr)
library(greport)
rm(list=ls())
#Loading Data
trainD <- read.table("X_train.txt")
trainL <- read.table("y_train.txt")
trainSub <- read.table("subject_train.txt")
testD <- read.table("X_test.txt")
testL <- read.table("y_test.txt")
testSub <- read.table("subject_test.txt")
#Preparations
feat <- read.table("features.txt",col.names=c("featID", "fLabel"))
mean_stdev <- grep("-mean\\(\\)|-std\\(\\)", feat$fLabel)
acts <- read.table("activity_labels.txt", col.names=c("actID", "aLabel"))
acts$aLabel <- gsub("_", "", as.character(acts$aLabel))
# Merging datasets
comSub <- rbind(testSub, trainSub)
names(comSub) <- "Id"
comD <- rbind(testD, trainD)
comD <- comD[, mean_stdev]
names(comD) <- gsub("\\(|\\)", "", feat$fLabel[mean_stdev])
comL<- rbind(testL, trainL)
names(comL) = "actID"
act <- merge(comL, acts, by="actID")$aLabel
# Merging data frames
tab<- cbind(comSub, comD, act)
write.table(tab, "tidy_data.txt")
# create a dataset grouped by subject and activity after applying standard deviation and average calculations
library(data.table)
DT <- data.table(tab)
cDT<- DT[, lapply(.SD, mean), by=c("Id", "activity")]
write.table(cDT, "mean.txt")
|
/run_analysis.R
|
no_license
|
Belphegorus/Getting-Cleaning-Data
|
R
| false
| false
| 1,309
|
r
|
library(plyr)
library(greport)
rm(list=ls())
#Loading Data
trainD <- read.table("X_train.txt")
trainL <- read.table("y_train.txt")
trainSub <- read.table("subject_train.txt")
testD <- read.table("X_test.txt")
testL <- read.table("y_test.txt")
testSub <- read.table("subject_test.txt")
#Preparations
feat <- read.table("features.txt",col.names=c("featID", "fLabel"))
mean_stdev <- grep("-mean\\(\\)|-std\\(\\)", feat$fLabel)
acts <- read.table("activity_labels.txt", col.names=c("actID", "aLabel"))
acts$aLabel <- gsub("_", "", as.character(acts$aLabel))
# Merging datasets
comSub <- rbind(testSub, trainSub)
names(comSub) <- "Id"
comD <- rbind(testD, trainD)
comD <- comD[, mean_stdev]
names(comD) <- gsub("\\(|\\)", "", feat$fLabel[mean_stdev])
comL<- rbind(testL, trainL)
names(comL) = "actID"
act <- merge(comL, acts, by="actID")$aLabel
# Merging data frames
tab<- cbind(comSub, comD, act)
write.table(tab, "tidy_data.txt")
# create a dataset grouped by subject and activity after applying standard deviation and average calculations
library(data.table)
DT <- data.table(tab)
cDT<- DT[, lapply(.SD, mean), by=c("Id", "activity")]
write.table(cDT, "mean.txt")
|
## ---- include = FALSE----------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup, include=FALSE------------------------
knitr::opts_chunk$set(echo = TRUE)
## ------------------------------------------------
X <- 4
## ------------------------------------------------
library(palmerpenguins)
data(penguins)
attributes(penguins)
## ------------------------------------------------
str(penguins)
names(penguins) #ls(penguins) provides this as well
## ------------------------------------------------
X
## ------------------------------------------------
penguins$species
## ------------------------------------------------
A <- 1:20
A
B <- seq(from = 1, to = 20, by = 1)
B
C <- c("cheese", "is", "great")
C
D <- rep(1, times = 30)
D
## ------------------------------------------------
class(A)
class(C)
class(penguins)
class(penguins$species)
## ------------------------------------------------
dim(penguins) #rows, columns
length(penguins)
length(penguins$species)
## ------------------------------------------------
output <- lm(flipper_length_mm ~ bill_length_mm, data = penguins)
str(output)
output$coefficients
## ------------------------------------------------
myMatrix <- matrix(data = 1:10,
nrow = 5,
ncol = 2)
myMatrix
## ------------------------------------------------
penguins[1, 2:3]
penguins$sex[4:25] #why no comma?
## ------------------------------------------------
X <- 1:5
Y <- 6:10
# I can use either because they are the same size
cbind(X,Y)
rbind(X,Y)
## ------------------------------------------------
ls()
ls(penguins)
## ------------------------------------------------
newDF <- as.data.frame(cbind(X,Y))
str(newDF)
as.numeric(c("one", "two", "3"))
## ------------------------------------------------
penguins[1:2,] #just the first two rows
penguins[penguins$bill_length_mm > 54 , ] #how does this work?
penguins$bill_length_mm > 54
## ------------------------------------------------
#you can create complex rules
penguins[penguins$bill_length_mm > 54 & penguins$bill_depth_mm > 17, ]
#you can do all BUT
penguins[ , -1]
#grab a few columns by name
vars <- c("bill_length_mm", "sex")
penguins[ , vars]
## ------------------------------------------------
#another function
#notice any differences?
subset(penguins, bill_length_mm > 54)
#other functions include filter() in tidyverse
## ------------------------------------------------
head(complete.cases(penguins)) #creates logical
head(na.omit(penguins)) #creates actual rows
head(is.na(penguins$body_mass_g)) #for individual vectors
## ------------------------------------------------
getwd()
## ----eval = F------------------------------------
# setwd("/Users/buchanan/OneDrive - Harrisburg University/Teaching/ANLY 580/updated/1 Introduction R")
## ------------------------------------------------
library(rio)
myDF <- import("data/example_introR.csv")
head(myDF)
## ----eval = F------------------------------------
# install.packages("car")
## ------------------------------------------------
library(car)
## ----eval = F------------------------------------
# ?lm
# help(lm)
## ----eval = F------------------------------------
# args(lm)
# example(lm)
## ------------------------------------------------
pizza <- function(x){ x^2 }
pizza(3)
## ------------------------------------------------
table(penguins$species)
summary(penguins$bill_length_mm)
## ------------------------------------------------
mean(penguins$bill_length_mm) #returns NA
mean(penguins$bill_length_mm, na.rm = TRUE)
cor(penguins[ , c("bill_length_mm", "bill_depth_mm", "flipper_length_mm")])
cor(penguins[ , c("bill_length_mm", "bill_depth_mm", "flipper_length_mm")],
use = "pairwise.complete.obs")
|
/inst/doc/Introduction-to-R.R
|
no_license
|
Hamrita/StatMath_R
|
R
| false
| false
| 3,782
|
r
|
## ---- include = FALSE----------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----setup, include=FALSE------------------------
knitr::opts_chunk$set(echo = TRUE)
## ------------------------------------------------
X <- 4
## ------------------------------------------------
library(palmerpenguins)
data(penguins)
attributes(penguins)
## ------------------------------------------------
str(penguins)
names(penguins) #ls(penguins) provides this as well
## ------------------------------------------------
X
## ------------------------------------------------
penguins$species
## ------------------------------------------------
A <- 1:20
A
B <- seq(from = 1, to = 20, by = 1)
B
C <- c("cheese", "is", "great")
C
D <- rep(1, times = 30)
D
## ------------------------------------------------
class(A)
class(C)
class(penguins)
class(penguins$species)
## ------------------------------------------------
dim(penguins) #rows, columns
length(penguins)
length(penguins$species)
## ------------------------------------------------
output <- lm(flipper_length_mm ~ bill_length_mm, data = penguins)
str(output)
output$coefficients
## ------------------------------------------------
myMatrix <- matrix(data = 1:10,
nrow = 5,
ncol = 2)
myMatrix
## ------------------------------------------------
penguins[1, 2:3]
penguins$sex[4:25] #why no comma?
## ------------------------------------------------
X <- 1:5
Y <- 6:10
# I can use either because they are the same size
cbind(X,Y)
rbind(X,Y)
## ------------------------------------------------
ls()
ls(penguins)
## ------------------------------------------------
newDF <- as.data.frame(cbind(X,Y))
str(newDF)
as.numeric(c("one", "two", "3"))
## ------------------------------------------------
penguins[1:2,] #just the first two rows
penguins[penguins$bill_length_mm > 54 , ] #how does this work?
penguins$bill_length_mm > 54
## ------------------------------------------------
#you can create complex rules
penguins[penguins$bill_length_mm > 54 & penguins$bill_depth_mm > 17, ]
#you can do all BUT
penguins[ , -1]
#grab a few columns by name
vars <- c("bill_length_mm", "sex")
penguins[ , vars]
## ------------------------------------------------
#another function
#notice any differences?
subset(penguins, bill_length_mm > 54)
#other functions include filter() in tidyverse
## ------------------------------------------------
head(complete.cases(penguins)) #creates logical
head(na.omit(penguins)) #creates actual rows
head(is.na(penguins$body_mass_g)) #for individual vectors
## ------------------------------------------------
getwd()
## ----eval = F------------------------------------
# setwd("/Users/buchanan/OneDrive - Harrisburg University/Teaching/ANLY 580/updated/1 Introduction R")
## ------------------------------------------------
library(rio)
myDF <- import("data/example_introR.csv")
head(myDF)
## ----eval = F------------------------------------
# install.packages("car")
## ------------------------------------------------
library(car)
## ----eval = F------------------------------------
# ?lm
# help(lm)
## ----eval = F------------------------------------
# args(lm)
# example(lm)
## ------------------------------------------------
pizza <- function(x){ x^2 }
pizza(3)
## ------------------------------------------------
table(penguins$species)
summary(penguins$bill_length_mm)
## ------------------------------------------------
mean(penguins$bill_length_mm) #returns NA
mean(penguins$bill_length_mm, na.rm = TRUE)
cor(penguins[ , c("bill_length_mm", "bill_depth_mm", "flipper_length_mm")])
cor(penguins[ , c("bill_length_mm", "bill_depth_mm", "flipper_length_mm")],
use = "pairwise.complete.obs")
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/helper_functions.R
\name{symb2I}
\alias{symb2I}
\title{Convert names of pathogen/combinations into 0/1 coding}
\usage{
symb2I(pathogen_name, pathogen_list)
}
\arguments{
\item{pathogen_name}{The allowed pathogen name (can be a combination of pathogens in "pathlist")}
\item{pathogen_list}{The complete list of pathogen names}
}
\value{
A 1 by length(pathlist) matrix of binary code (usually for pathogen presence/absence)
}
\description{
Convert names of pathogen/combinations into 0/1 coding
}
\examples{
symb2I("A",c("A","B","C"))
symb2I("A+B",c("A","B","C"))
symb2I("NoA",c("A","B","C"))
symb2I(c("A","B+C"),c("A","B","C")) # gives a 2 by 3 matrix.
}
|
/man/symb2I.Rd
|
permissive
|
swihart/nplcm
|
R
| false
| false
| 742
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/helper_functions.R
\name{symb2I}
\alias{symb2I}
\title{Convert names of pathogen/combinations into 0/1 coding}
\usage{
symb2I(pathogen_name, pathogen_list)
}
\arguments{
\item{pathogen_name}{The allowed pathogen name (can be a combination of pathogens in "pathlist")}
\item{pathogen_list}{The complete list of pathogen names}
}
\value{
A 1 by length(pathlist) matrix of binary code (usually for pathogen presence/absence)
}
\description{
Convert names of pathogen/combinations into 0/1 coding
}
\examples{
symb2I("A",c("A","B","C"))
symb2I("A+B",c("A","B","C"))
symb2I("NoA",c("A","B","C"))
symb2I(c("A","B+C"),c("A","B","C")) # gives a 2 by 3 matrix.
}
|
# Iowa
ncaag('https://www.sports-reference.com/cbb/boxscores/2023-03-17-01-iowa_w.html')
ncaag('https://www.sports-reference.com/cbb/boxscores/2023-03-19-01-iowa_w.html')
ncaag('https://www.sports-reference.com/cbb/boxscores/2023-03-24-03-iowa_w.html')
ncaag('https://www.sports-reference.com/cbb/boxscores/2023-03-26-03-iowa_w.html')
ncaag('')
ncaag('')
|
/scripts/tournament2023w.r
|
no_license
|
sportModel/basketball
|
R
| false
| false
| 355
|
r
|
# Iowa
ncaag('https://www.sports-reference.com/cbb/boxscores/2023-03-17-01-iowa_w.html')
ncaag('https://www.sports-reference.com/cbb/boxscores/2023-03-19-01-iowa_w.html')
ncaag('https://www.sports-reference.com/cbb/boxscores/2023-03-24-03-iowa_w.html')
ncaag('https://www.sports-reference.com/cbb/boxscores/2023-03-26-03-iowa_w.html')
ncaag('')
ncaag('')
|
Transformer <- R6::R6Class(
classname = "Transformer",
public = list(
fit = function(X, y = NULL, ...) stop('Undefined'),
fit_transform = function(X, y = NULL, ...) {
# Override method if "transform" can be efficiently implemented immediately after fit.
self$fit(X, y)
self$transform(X)
},
transform = function(X, y = NULL, ...) stop('Undefined')
)
)
Tokenizer <- R6::R6Class(
classname = 'Tokenizer',
inherit = Transformer,
public = list(
ignored_terms = as.character(c()),
casing_transformation = NA,
case_sensitive_aliases = as.character(c()),
case_insensitive_aliases = as.character(c()),
ngrams_size = 1,
min_term_length = 1,
stemming_language = NA,
initialize = function(ignored_terms = as.character(c()),
case_sensitive_aliases = as.character(c()),
case_insensitive_aliases = as.character(c()),
casing_transformation = NA,
ngrams_size = 1L,
min_term_length = 1L,
stemming_language = NA) {
if(!is.character(ignored_terms)) stop('ignored_terms should be of type character')
self$ignored_terms <- ignored_terms
if(!is.character(case_sensitive_aliases)) stop('case_sensitive_aliases should be of type character')
if(length(case_sensitive_aliases) > 0) {
terms <- unique(names(case_sensitive_aliases))
replacements <- unique(names(case_sensitive_aliases))
if(length(terms) != length(replacements)) stop('case_sensitive_aliases should be a named character array with unique names')
self$case_sensitive_aliases <- case_sensitive_aliases
}
if(!is.character(case_insensitive_aliases)) stop('case_insensitive_aliases should be of type character')
if(length(case_insensitive_aliases) > 0) {
terms <- unique(names(case_insensitive_aliases))
replacements <- unique(names(case_insensitive_aliases))
if(length(terms) != length(replacements)) stop('case_insensitive_aliases should be a named character array with unique names')
self$case_insensitive_aliases <- case_insensitive_aliases
}
if(anyNA(casing_transformation) || is.null(casing_transformation)) casing_transformation <- ''
if(!casing_transformation %in% c('lower', '')) stop('Supported casing_transformation values are: lower or NA')
self$casing_transformation <- casing_transformation
if(anyNA(ngrams_size) || is.null(ngrams_size)) ngrams_size <- 1
if(is.numeric(ngrams_size)) ngrams_size <- as.integer(ngrams_size)
if(!is.integer(ngrams_size) || ngrams_size < 1) stop('ngrams_size should be a integer >= 1')
self$ngrams_size <- ngrams_size[1]
if(anyNA(min_term_length) || is.null(min_term_length)) min_term_length <- 1
self$min_term_length <- min_term_length[1]
if(anyNA(stemming_language) || is.null(stemming_language)) stemming_language <- ''
if(!is.character(stemming_language)) stop('stemming_language should be of type character')
if(!stemming_language %in% c('english', 'spanish', '')) stop(sprintf('Unsupported stemming language "%s"', stemming_language))
self$stemming_language <- stemming_language[1]
private$config_updated()
}
),
private = list(
config_updated = function() stop('Undefined')
)
)
Vectorizer <- R6::R6Class(
classname = "Vectorizer",
inherit = Tokenizer,
public = list(
vocabulary = NULL,
initialize = function(vocabulary = as.character(c()), ...) {
if(!is.character(vocabulary)) stop('vocabulary should be of type character')
self$vocabulary <- vocabulary
super$initialize(...)
},
tokenize = function(X) stop('Undefined'),
set_vocabulary = function(vocabulary) {
if(!is.character(vocabulary)) stop('vocabulary should be of type character')
self$vocabulary <- vocabulary
private$config_updated()
}
)
)
|
/R/base.R
|
no_license
|
schmidtfederico/txtlib
|
R
| false
| false
| 4,387
|
r
|
Transformer <- R6::R6Class(
classname = "Transformer",
public = list(
fit = function(X, y = NULL, ...) stop('Undefined'),
fit_transform = function(X, y = NULL, ...) {
# Override method if "transform" can be efficiently implemented immediately after fit.
self$fit(X, y)
self$transform(X)
},
transform = function(X, y = NULL, ...) stop('Undefined')
)
)
Tokenizer <- R6::R6Class(
classname = 'Tokenizer',
inherit = Transformer,
public = list(
ignored_terms = as.character(c()),
casing_transformation = NA,
case_sensitive_aliases = as.character(c()),
case_insensitive_aliases = as.character(c()),
ngrams_size = 1,
min_term_length = 1,
stemming_language = NA,
initialize = function(ignored_terms = as.character(c()),
case_sensitive_aliases = as.character(c()),
case_insensitive_aliases = as.character(c()),
casing_transformation = NA,
ngrams_size = 1L,
min_term_length = 1L,
stemming_language = NA) {
if(!is.character(ignored_terms)) stop('ignored_terms should be of type character')
self$ignored_terms <- ignored_terms
if(!is.character(case_sensitive_aliases)) stop('case_sensitive_aliases should be of type character')
if(length(case_sensitive_aliases) > 0) {
terms <- unique(names(case_sensitive_aliases))
replacements <- unique(names(case_sensitive_aliases))
if(length(terms) != length(replacements)) stop('case_sensitive_aliases should be a named character array with unique names')
self$case_sensitive_aliases <- case_sensitive_aliases
}
if(!is.character(case_insensitive_aliases)) stop('case_insensitive_aliases should be of type character')
if(length(case_insensitive_aliases) > 0) {
terms <- unique(names(case_insensitive_aliases))
replacements <- unique(names(case_insensitive_aliases))
if(length(terms) != length(replacements)) stop('case_insensitive_aliases should be a named character array with unique names')
self$case_insensitive_aliases <- case_insensitive_aliases
}
if(anyNA(casing_transformation) || is.null(casing_transformation)) casing_transformation <- ''
if(!casing_transformation %in% c('lower', '')) stop('Supported casing_transformation values are: lower or NA')
self$casing_transformation <- casing_transformation
if(anyNA(ngrams_size) || is.null(ngrams_size)) ngrams_size <- 1
if(is.numeric(ngrams_size)) ngrams_size <- as.integer(ngrams_size)
if(!is.integer(ngrams_size) || ngrams_size < 1) stop('ngrams_size should be a integer >= 1')
self$ngrams_size <- ngrams_size[1]
if(anyNA(min_term_length) || is.null(min_term_length)) min_term_length <- 1
self$min_term_length <- min_term_length[1]
if(anyNA(stemming_language) || is.null(stemming_language)) stemming_language <- ''
if(!is.character(stemming_language)) stop('stemming_language should be of type character')
if(!stemming_language %in% c('english', 'spanish', '')) stop(sprintf('Unsupported stemming language "%s"', stemming_language))
self$stemming_language <- stemming_language[1]
private$config_updated()
}
),
private = list(
config_updated = function() stop('Undefined')
)
)
Vectorizer <- R6::R6Class(
classname = "Vectorizer",
inherit = Tokenizer,
public = list(
vocabulary = NULL,
initialize = function(vocabulary = as.character(c()), ...) {
if(!is.character(vocabulary)) stop('vocabulary should be of type character')
self$vocabulary <- vocabulary
super$initialize(...)
},
tokenize = function(X) stop('Undefined'),
set_vocabulary = function(vocabulary) {
if(!is.character(vocabulary)) stop('vocabulary should be of type character')
self$vocabulary <- vocabulary
private$config_updated()
}
)
)
|
context("Functions")
test_that("merge_ibaq throws error without valid input", {
expect_error(merge_ibaq("test_data", test_pep))
expect_error(merge_ibaq(test_data, "test_pep"))
expect_error(merge_ibaq(test_data[,-(30)], test_pep))
expect_error(merge_ibaq(test_data[,-(31)], test_pep))
expect_error(merge_ibaq(test_data[,-(15:20)], test_pep))
expect_error(merge_ibaq(test_data, test_pep[,-(14)]))
expect_error(merge_ibaq(test_data, test_pep[,-(6)]))
})
test_that("merge_ibaq returns a data.frame", {
expect_is(merge_ibaq(test_data, test_pep), "data.frame")
expect_is(merge_ibaq(tibble::as_tibble(test_data), test_pep), "data.frame")
expect_is(merge_ibaq(test_data, tibble::as_tibble(test_pep)), "data.frame")
})
test_that("merge_ibaq returns an object with the rigth dimensions and columns", {
result <- merge_ibaq(test_data, test_pep)
expect_equal(grep("iBAQ", colnames(result)), 4:9)
expect_equal(dim(result), c(359,10))
})
test_that("get_stoichiometry throws error without valid input", {
expect_error(get_stoichiometry("test_dep", test_ibaq, "GFP_vs_WT", "Rbbp4", 1))
expect_error(get_stoichiometry(test_dep, "test_ibaq", "GFP_vs_WT", "Rbbp4", 1))
expect_error(get_stoichiometry(test_dep, test_ibaq, GFP_vs_WT, "Rbbp4", 1))
expect_error(get_stoichiometry(test_dep, test_ibaq, "GFP_vs_WT", Rbbp4, 1))
expect_error(get_stoichiometry(test_dep, test_ibaq, "GFP_vs_WT", "Rbbp4", "1"))
expect_error(get_stoichiometry(test_dep, test_ibaq[,-(2:3)], "GFP_vs_WT", "Rbbp4", 1))
expect_error(get_stoichiometry(test_dep, test_ibaq[,-(4:9)], "GFP_vs_WT", "Rbbp4", 1))
test_ibaq_sign_error1 <- test_dep
SummarizedExperiment::rowData(test_ibaq_sign_error1) <- SummarizedExperiment::rowData(test_ibaq_sign_error1)[,-(1)]
expect_error(get_stoichiometry(test_ibaq_sign_error1, test_ibaq, "GFP_vs_WT", "Rbbp4", 1))
test_ibaq_sign_error2 <- test_dep
SummarizedExperiment::rowData(test_ibaq_sign_error2) <- SummarizedExperiment::rowData(test_ibaq_sign_error2)[,-c(31,32)]
expect_error(get_stoichiometry(test_ibaq_sign_error2, test_ibaq, "GFP_vs_WT", "Rbbp4", 1))
test_ibaq_sign_error3 <- test_dep
SummarizedExperiment::rowData(test_ibaq_sign_error3) <- SummarizedExperiment::rowData(test_ibaq_sign_error3)[,-(35)]
expect_error(get_stoichiometry(test_ibaq_sign_error3, test_ibaq, "GFP_vs_WT", "Rbbp4", 1))
})
test_that("get_stoichiometry returns a data.frame", {
expect_is(get_stoichiometry(test_dep, test_ibaq, "GFP_vs_WT", "Rbbp4", 1), "data.frame")
})
test_that("get_stoichiometry returns an object with the rigth dimensions and columns", {
result <- get_stoichiometry(test_dep, test_ibaq, "GFP_vs_WT", "Rbbp4", 1)
expect_equal(result$stoichiometry[result$name == "Rbbp4"], 1)
expect_equal(dim(result), c(5,4))
})
test_that("plot_stoichiometry throws error without valid input", {
expect_error(plot_stoichiometry("test_stoi", 0.001, NULL))
expect_error(plot_stoichiometry(test_stoi, "0.001", NULL))
expect_error(plot_stoichiometry(test_stoi, 0.001, "0.05"))
expect_error(plot_stoichiometry(test_stoi[,-(1)], 0.001, NULL))
expect_error(plot_stoichiometry(test_stoi[,-(2)], 0.001, NULL))
expect_error(plot_stoichiometry(test_stoi[,-(3)], 0.001, NULL))
expect_error(plot_stoichiometry(test_stoi[,-(4)], 0.001, NULL))
})
test_that("plot_stoichiometry returns a ggplot object", {
expect_is(plot_stoichiometry(test_stoi, 0.001), "ggplot")
expect_is(plot_stoichiometry(test_stoi, 0.001, 0.5), "ggplot")
})
test_that("plot_ibaq throws error without valid input", {
expect_error(plot_ibaq("test_dep", "GFP_vs_WT", 3))
expect_error(plot_ibaq(test_dep, GFP_vs_WT, 3))
expect_error(plot_ibaq(test_dep, "GFP_vs_WT", "3"))
expect_error(plot_ibaq(test_dep, "test", 3))
test_ibaq_sign_error1 <- test_dep
SummarizedExperiment::rowData(test_ibaq_sign_error1) <- SummarizedExperiment::rowData(test_ibaq_sign_error1)[,-(1)]
expect_error(plot_ibaq(test_ibaq_sign_error1, "GFP_vs_WT", 3))
test_ibaq_sign_error2 <- test_dep
SummarizedExperiment::rowData(test_ibaq_sign_error2) <- SummarizedExperiment::rowData(test_ibaq_sign_error2)[,-c(31,32)]
expect_error(plot_ibaq(test_ibaq_sign_error2, "GFP_vs_WT", 3))
test_ibaq_sign_error3 <- test_dep
SummarizedExperiment::rowData(test_ibaq_sign_error3) <- SummarizedExperiment::rowData(test_ibaq_sign_error3)[,-(16:21)]
expect_error(plot_ibaq(test_ibaq_sign_error3, "GFP_vs_WT", 3))
test_ibaq_sign_error4 <- test_dep
SummarizedExperiment::rowData(test_ibaq_sign_error4) <- SummarizedExperiment::rowData(test_ibaq_sign_error4)[,-(35)]
expect_error(plot_ibaq(test_ibaq_sign_error4, "GFP_vs_WT", 3))
})
test_that("plot_ibaq returns a ggplot object", {
expect_is(plot_ibaq(test_dep, "GFP_vs_WT", 3), "ggplot")
})
test_that("iBAQ throws error without valid input", {
expect_error(iBAQ("test_result", test_pep, "GFP_vs_WT", "Rbbp4"))
expect_error(iBAQ(test_result, "test_pep", "GFP_vs_WT", "Rbbp4"))
expect_error(iBAQ(test_result, test_pep, GFP_vs_WT, "Rbbp4"))
expect_error(iBAQ(test_result, test_pep, "GFP_vs_WT", Rbbp4))
expect_error(iBAQ(test_result[-(1)], test_pep, "GFP_vs_WT", "Rbbp4"))
expect_error(iBAQ(test_result, test_pep[,-(6)], "GFP_vs_WT", "Rbbp4"))
expect_error(iBAQ(test_result, test_pep[,-(14)], "GFP_vs_WT", "Rbbp4"))
expect_error(iBAQ(test_result, test_pep, "test", "Rbbp4"))
expect_error(iBAQ(test_result, test_pep, "GFP_vs_WT", "test"))
result_error <- test_result
result_error$data <- result_error$data[,-(16:21)]
expect_error(iBAQ(result_error, test_pep, "GFP_vs_WT", "Rbbp4"))
result_error2 <- test_result
SummarizedExperiment::rowData(result_error2$dep) <- SummarizedExperiment::rowData(result_error2$dep)[,-(1)]
expect_error(iBAQ(result_error2, test_pep, "GFP_vs_WT", "Rbbp4"))
result_error3 <- test_result
SummarizedExperiment::rowData(result_error3$dep) <- SummarizedExperiment::rowData(result_error3$dep)[,-c(31,35)]
expect_error(iBAQ(result_error3, test_pep, "GFP_vs_WT", "Rbbp4"))
})
test_that("iBAQ returns a data.frame", {
expect_is(iBAQ(test_result, test_pep, "GFP_vs_WT", "Rbbp4", level = 1), "data.frame")
expect_is(iBAQ(test_result, test_pep, "GFP_vs_WT", "Rbbp4", level = 1L), "data.frame")
expect_is(iBAQ(test_result, tibble::as_tibble(test_pep), "GFP_vs_WT", "Rbbp4", level = 1), "data.frame")
})
test_that("run_app throws error without valid input", {
expect_error(run_app("test"))
})
|
/tests/testthat/test.R
|
no_license
|
algom/DEPstoi
|
R
| false
| false
| 6,410
|
r
|
context("Functions")
test_that("merge_ibaq throws error without valid input", {
expect_error(merge_ibaq("test_data", test_pep))
expect_error(merge_ibaq(test_data, "test_pep"))
expect_error(merge_ibaq(test_data[,-(30)], test_pep))
expect_error(merge_ibaq(test_data[,-(31)], test_pep))
expect_error(merge_ibaq(test_data[,-(15:20)], test_pep))
expect_error(merge_ibaq(test_data, test_pep[,-(14)]))
expect_error(merge_ibaq(test_data, test_pep[,-(6)]))
})
test_that("merge_ibaq returns a data.frame", {
expect_is(merge_ibaq(test_data, test_pep), "data.frame")
expect_is(merge_ibaq(tibble::as_tibble(test_data), test_pep), "data.frame")
expect_is(merge_ibaq(test_data, tibble::as_tibble(test_pep)), "data.frame")
})
test_that("merge_ibaq returns an object with the rigth dimensions and columns", {
result <- merge_ibaq(test_data, test_pep)
expect_equal(grep("iBAQ", colnames(result)), 4:9)
expect_equal(dim(result), c(359,10))
})
test_that("get_stoichiometry throws error without valid input", {
expect_error(get_stoichiometry("test_dep", test_ibaq, "GFP_vs_WT", "Rbbp4", 1))
expect_error(get_stoichiometry(test_dep, "test_ibaq", "GFP_vs_WT", "Rbbp4", 1))
expect_error(get_stoichiometry(test_dep, test_ibaq, GFP_vs_WT, "Rbbp4", 1))
expect_error(get_stoichiometry(test_dep, test_ibaq, "GFP_vs_WT", Rbbp4, 1))
expect_error(get_stoichiometry(test_dep, test_ibaq, "GFP_vs_WT", "Rbbp4", "1"))
expect_error(get_stoichiometry(test_dep, test_ibaq[,-(2:3)], "GFP_vs_WT", "Rbbp4", 1))
expect_error(get_stoichiometry(test_dep, test_ibaq[,-(4:9)], "GFP_vs_WT", "Rbbp4", 1))
test_ibaq_sign_error1 <- test_dep
SummarizedExperiment::rowData(test_ibaq_sign_error1) <- SummarizedExperiment::rowData(test_ibaq_sign_error1)[,-(1)]
expect_error(get_stoichiometry(test_ibaq_sign_error1, test_ibaq, "GFP_vs_WT", "Rbbp4", 1))
test_ibaq_sign_error2 <- test_dep
SummarizedExperiment::rowData(test_ibaq_sign_error2) <- SummarizedExperiment::rowData(test_ibaq_sign_error2)[,-c(31,32)]
expect_error(get_stoichiometry(test_ibaq_sign_error2, test_ibaq, "GFP_vs_WT", "Rbbp4", 1))
test_ibaq_sign_error3 <- test_dep
SummarizedExperiment::rowData(test_ibaq_sign_error3) <- SummarizedExperiment::rowData(test_ibaq_sign_error3)[,-(35)]
expect_error(get_stoichiometry(test_ibaq_sign_error3, test_ibaq, "GFP_vs_WT", "Rbbp4", 1))
})
test_that("get_stoichiometry returns a data.frame", {
expect_is(get_stoichiometry(test_dep, test_ibaq, "GFP_vs_WT", "Rbbp4", 1), "data.frame")
})
test_that("get_stoichiometry returns an object with the rigth dimensions and columns", {
result <- get_stoichiometry(test_dep, test_ibaq, "GFP_vs_WT", "Rbbp4", 1)
expect_equal(result$stoichiometry[result$name == "Rbbp4"], 1)
expect_equal(dim(result), c(5,4))
})
test_that("plot_stoichiometry throws error without valid input", {
expect_error(plot_stoichiometry("test_stoi", 0.001, NULL))
expect_error(plot_stoichiometry(test_stoi, "0.001", NULL))
expect_error(plot_stoichiometry(test_stoi, 0.001, "0.05"))
expect_error(plot_stoichiometry(test_stoi[,-(1)], 0.001, NULL))
expect_error(plot_stoichiometry(test_stoi[,-(2)], 0.001, NULL))
expect_error(plot_stoichiometry(test_stoi[,-(3)], 0.001, NULL))
expect_error(plot_stoichiometry(test_stoi[,-(4)], 0.001, NULL))
})
test_that("plot_stoichiometry returns a ggplot object", {
expect_is(plot_stoichiometry(test_stoi, 0.001), "ggplot")
expect_is(plot_stoichiometry(test_stoi, 0.001, 0.5), "ggplot")
})
test_that("plot_ibaq throws error without valid input", {
expect_error(plot_ibaq("test_dep", "GFP_vs_WT", 3))
expect_error(plot_ibaq(test_dep, GFP_vs_WT, 3))
expect_error(plot_ibaq(test_dep, "GFP_vs_WT", "3"))
expect_error(plot_ibaq(test_dep, "test", 3))
test_ibaq_sign_error1 <- test_dep
SummarizedExperiment::rowData(test_ibaq_sign_error1) <- SummarizedExperiment::rowData(test_ibaq_sign_error1)[,-(1)]
expect_error(plot_ibaq(test_ibaq_sign_error1, "GFP_vs_WT", 3))
test_ibaq_sign_error2 <- test_dep
SummarizedExperiment::rowData(test_ibaq_sign_error2) <- SummarizedExperiment::rowData(test_ibaq_sign_error2)[,-c(31,32)]
expect_error(plot_ibaq(test_ibaq_sign_error2, "GFP_vs_WT", 3))
test_ibaq_sign_error3 <- test_dep
SummarizedExperiment::rowData(test_ibaq_sign_error3) <- SummarizedExperiment::rowData(test_ibaq_sign_error3)[,-(16:21)]
expect_error(plot_ibaq(test_ibaq_sign_error3, "GFP_vs_WT", 3))
test_ibaq_sign_error4 <- test_dep
SummarizedExperiment::rowData(test_ibaq_sign_error4) <- SummarizedExperiment::rowData(test_ibaq_sign_error4)[,-(35)]
expect_error(plot_ibaq(test_ibaq_sign_error4, "GFP_vs_WT", 3))
})
test_that("plot_ibaq returns a ggplot object", {
expect_is(plot_ibaq(test_dep, "GFP_vs_WT", 3), "ggplot")
})
test_that("iBAQ throws error without valid input", {
expect_error(iBAQ("test_result", test_pep, "GFP_vs_WT", "Rbbp4"))
expect_error(iBAQ(test_result, "test_pep", "GFP_vs_WT", "Rbbp4"))
expect_error(iBAQ(test_result, test_pep, GFP_vs_WT, "Rbbp4"))
expect_error(iBAQ(test_result, test_pep, "GFP_vs_WT", Rbbp4))
expect_error(iBAQ(test_result[-(1)], test_pep, "GFP_vs_WT", "Rbbp4"))
expect_error(iBAQ(test_result, test_pep[,-(6)], "GFP_vs_WT", "Rbbp4"))
expect_error(iBAQ(test_result, test_pep[,-(14)], "GFP_vs_WT", "Rbbp4"))
expect_error(iBAQ(test_result, test_pep, "test", "Rbbp4"))
expect_error(iBAQ(test_result, test_pep, "GFP_vs_WT", "test"))
result_error <- test_result
result_error$data <- result_error$data[,-(16:21)]
expect_error(iBAQ(result_error, test_pep, "GFP_vs_WT", "Rbbp4"))
result_error2 <- test_result
SummarizedExperiment::rowData(result_error2$dep) <- SummarizedExperiment::rowData(result_error2$dep)[,-(1)]
expect_error(iBAQ(result_error2, test_pep, "GFP_vs_WT", "Rbbp4"))
result_error3 <- test_result
SummarizedExperiment::rowData(result_error3$dep) <- SummarizedExperiment::rowData(result_error3$dep)[,-c(31,35)]
expect_error(iBAQ(result_error3, test_pep, "GFP_vs_WT", "Rbbp4"))
})
test_that("iBAQ returns a data.frame", {
expect_is(iBAQ(test_result, test_pep, "GFP_vs_WT", "Rbbp4", level = 1), "data.frame")
expect_is(iBAQ(test_result, test_pep, "GFP_vs_WT", "Rbbp4", level = 1L), "data.frame")
expect_is(iBAQ(test_result, tibble::as_tibble(test_pep), "GFP_vs_WT", "Rbbp4", level = 1), "data.frame")
})
test_that("run_app throws error without valid input", {
expect_error(run_app("test"))
})
|
setwd("C:/Users/SINDHU/Desktop/data science")
train=read.csv("train.csv")
str(train)
train <- train[,-c(10,11)]
#Converting integer variables to factor variables
train$season <- as.factor(train$season)
train$holiday <- as.factor(train$holiday)
train$workingday <- as.factor(train$workingday)
train$weather <- as.factor(train$weather)
train$datetime <-as.POSIXct(train$datetime, format="%Y-%m-%d %H:%M:%S")
# Extract day from datetime value
train$day <- strftime(train$datetime, '%u')
train$day <- as.factor(train$day)
# Extract hour from datetime value
train$hour <- substring(train$datetime, 12,13)
train$hour <- as.factor(train$hour)
# Removing datetime field
train <- train[,-1]
library(caTools)
set.seed(123)
split <- sample.split(train$count, SplitRatio = 0.60)
training <- subset(train, split == TRUE)
validation <- subset(train, split == FALSE)
split <- sample.split(validation$count, SplitRatio = 0.50)
valid<- subset(validation, split == TRUE)
test <- subset(validation, split == FALSE)
str(training)
str(valid)
str(test)
# Applying Linear Regression model
lmBikeRent <- lm(count~., data = training)
lmBikeRent <- lm(count~season+holiday+workingday+weather+day+hour,data=training)
summary(lmBikeRent)
library(MASS)
lmBikeRentAIC<-stepAIC(lmBikeRent, direction="both")
summary(lmBikeRentAIC)
lm_predict_validation <- predict(lmBikeRentAIC, newdata = valid)
library(Metrics)
validaion_rmse<-rmse(valid$count,lm_predict_validation)
print("root-mean-square error between actual and predicted")
print(validaion_rmse)
# Let's check the summary of predicted count values
cat("\n")
print("summary of predicted count values")
summary(lm_predict_validation)
# summary of actual count values
print("summary of actual count values")
summary(valid$count)
# From above summary we saw negative values of predicted count.
# We don't want negative values as forecast for bike count. Replace all negative numbers with 1
Output2Mod <- lm_predict_validation
Output2Mod[lm_predict_validation<=0] <-1
# Check again the summary of predicted count values
print("summary of predicted count values after replaced the negative values")
summary(Output2Mod)
# As we replaced the negative values, the rmse value got reduced
print("root-mean-square error value after replaced the negative values")
print(rmse(valid$count,Output2Mod))
validaion_rmse<-rmse(valid$count,Output2Mod)
print(validaion_rmse)
# Since we got negative predicted values, let's do log transformation and run regression model again
lmBikeRentLog <- lm(log(count)~., data = training)
# Now performs stepwise model selection on log model
lmBikeRentLogAIC <- stepAIC(lmBikeRentLog, direction="both")
lm_predict_validation_log <- predict(lmBikeRentLogAIC,newdata=valid)
# As the predicted values are in log format, use exponential(exp) to convert from log to non-log values
lm_predict_validation_nonlog <- exp(lm_predict_validation_log)
# Let's check the summary of predicted count values, it shows there are no negative values
print("summary of predicted count values after log transformation")
summary(lm_predict_validation_nonlog)
validaion_nonlog_rmse<-rmse(valid$count,lm_predict_validation_nonlog)
print("root-mean-square-log error value after log transformation")
print(validaion_nonlog_rmse)
test$count=0
str(test$count)
# Run model on test data
#lm_predict_validation_log <- predict(lmBikeRentLogAIC,newdata=valid)
lm_predict_test_log <- predict(lmBikeRentLogAIC,newdata=test)
str(test)
str(valid)
str(training)
lm_predict_test_nonlog <-exp(lm_predict_test_log)
final_df <- cbind(as.data.frame(lm_predict_test_log), test$hour)
colnames(final_df) <- c("count", "datetime")
final_df
|
/Linear_Regression.R
|
no_license
|
sindhukrovvidi/Bike-sharing-demand
|
R
| false
| false
| 3,782
|
r
|
setwd("C:/Users/SINDHU/Desktop/data science")
train=read.csv("train.csv")
str(train)
train <- train[,-c(10,11)]
#Converting integer variables to factor variables
train$season <- as.factor(train$season)
train$holiday <- as.factor(train$holiday)
train$workingday <- as.factor(train$workingday)
train$weather <- as.factor(train$weather)
train$datetime <-as.POSIXct(train$datetime, format="%Y-%m-%d %H:%M:%S")
# Extract day from datetime value
train$day <- strftime(train$datetime, '%u')
train$day <- as.factor(train$day)
# Extract hour from datetime value
train$hour <- substring(train$datetime, 12,13)
train$hour <- as.factor(train$hour)
# Removing datetime field
train <- train[,-1]
library(caTools)
set.seed(123)
split <- sample.split(train$count, SplitRatio = 0.60)
training <- subset(train, split == TRUE)
validation <- subset(train, split == FALSE)
split <- sample.split(validation$count, SplitRatio = 0.50)
valid<- subset(validation, split == TRUE)
test <- subset(validation, split == FALSE)
str(training)
str(valid)
str(test)
# Applying Linear Regression model
lmBikeRent <- lm(count~., data = training)
lmBikeRent <- lm(count~season+holiday+workingday+weather+day+hour,data=training)
summary(lmBikeRent)
library(MASS)
lmBikeRentAIC<-stepAIC(lmBikeRent, direction="both")
summary(lmBikeRentAIC)
lm_predict_validation <- predict(lmBikeRentAIC, newdata = valid)
library(Metrics)
validaion_rmse<-rmse(valid$count,lm_predict_validation)
print("root-mean-square error between actual and predicted")
print(validaion_rmse)
# Let's check the summary of predicted count values
cat("\n")
print("summary of predicted count values")
summary(lm_predict_validation)
# summary of actual count values
print("summary of actual count values")
summary(valid$count)
# From above summary we saw negative values of predicted count.
# We don't want negative values as forecast for bike count. Replace all negative numbers with 1
Output2Mod <- lm_predict_validation
Output2Mod[lm_predict_validation<=0] <-1
# Check again the summary of predicted count values
print("summary of predicted count values after replaced the negative values")
summary(Output2Mod)
# As we replaced the negative values, the rmse value got reduced
print("root-mean-square error value after replaced the negative values")
print(rmse(valid$count,Output2Mod))
validaion_rmse<-rmse(valid$count,Output2Mod)
print(validaion_rmse)
# Since we got negative predicted values, let's do log transformation and run regression model again
lmBikeRentLog <- lm(log(count)~., data = training)
# Now performs stepwise model selection on log model
lmBikeRentLogAIC <- stepAIC(lmBikeRentLog, direction="both")
lm_predict_validation_log <- predict(lmBikeRentLogAIC,newdata=valid)
# As the predicted values are in log format, use exponential(exp) to convert from log to non-log values
lm_predict_validation_nonlog <- exp(lm_predict_validation_log)
# Let's check the summary of predicted count values, it shows there are no negative values
print("summary of predicted count values after log transformation")
summary(lm_predict_validation_nonlog)
validaion_nonlog_rmse<-rmse(valid$count,lm_predict_validation_nonlog)
print("root-mean-square-log error value after log transformation")
print(validaion_nonlog_rmse)
test$count=0
str(test$count)
# Run model on test data
#lm_predict_validation_log <- predict(lmBikeRentLogAIC,newdata=valid)
lm_predict_test_log <- predict(lmBikeRentLogAIC,newdata=test)
str(test)
str(valid)
str(training)
lm_predict_test_nonlog <-exp(lm_predict_test_log)
final_df <- cbind(as.data.frame(lm_predict_test_log), test$hour)
colnames(final_df) <- c("count", "datetime")
final_df
|
testlist <- list(A = structure(c(2.32784082958487e-308, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613107166-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 344
|
r
|
testlist <- list(A = structure(c(2.32784082958487e-308, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
MaxentProject <-
function(Species, VariableNamesIn, PredictorsIn, OutGridID, SubsetVariableNumber, VariableSubset, TrainSWD, TrainPresAbsID, Threshold, BetaMult, OutDirectIn..., Output, CatVarsPrefix, MaxentArgsIn) {
##############################################################################
# This R function outputs the Maxent model built upon all
# presence data with no evaluation statistics
##############################################################################
#
##############################################################################
# The model associated presence environmental data are processed using the R program
# "SpeciesPresenceAbsencePointProcessingSTB18k.R" together with the ArcPython program
# "SpeciesPresenceAbsencePointProcessingSTB18k.py"
##############################################################################
#
##############################################################################
# This section loads libraries, sets working directory and reads environmental rasters
##############################################################################
#BetaMult=1
# Record start time of program
starttime <- Sys.time()
# load needed packages of raster, rgdal, dismo, rjava, and maptools (printouts not shown)
# This sections reads in the lat/long data and formats it
library(dismo)
library(maptools)
library(raster)
library(rgdal)
library(sp)
#
# Set default values for LongOutput, if optional values left out of function call
MaxentBaseArgs <- c(paste0("betamultiplier=", BetaMult), "writebackgroundpredictions=false")
if(missing(MaxentArgsIn)) {
MaxentArgs1 <- MaxentBaseArgs
} else {
MaxentArgs1 <- MaxentArgsIn
}
if(missing(CatVarsPrefix)) {
AnyCategoricalVars=FALSE
} else {
AnyCategoricalVars=TRUE
}
#
if(missing(OutGridID)) { OutGridID="" }
# Create definition for a geographical projection
crs.geo <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84") # geographical, datum WGS84
#
VariableNamesSel <- c(unlist(VariableSubset))
# Split VarNames of VariableSubsets into separate variables
VarNames <- unlist(strsplit(VariableNamesSel, "-"))
SubsetVarNum <- length(VarNames)
## Keep only data for selected variables
head(TrainSWD)
MaxentTrainDataK <- as.data.frame(TrainSWD[,VarNames])
colnames(MaxentTrainDataK) <- VarNames
head(MaxentTrainDataK)
nrow(MaxentTrainDataK)
# Assemble environmental rasters
names(PredictorsIn) <- toupper(names(PredictorsIn))
names(PredictorsIn) <- gsub("_NS", "", names(PredictorsIn))
#plot(PredictorsIn[[1]])
# Subset raster stack of predictors by VarNames
predictors <- subset(PredictorsIn, VarNames)
##
#plot(predictors[[41]])
# Develop maxent model from specified presence and background data
#
## Account for categorical variables specified by CarVarsPrefix, if any
if(AnyCategoricalVars==TRUE) {
MaxentCatArg <- paste0("togglelayertype=", CatVarsPrefix)
MaxentArgs <- c(MaxentArgs1, MaxentCatArg)
} else {
MaxentArgs <- MaxentArgs1
}
#
MaxentOut <- maxent(MaxentTrainDataK, TrainPresAbsID, args=MaxentArgs, path=OutDirectSub)
######
# Project maxent model on native range
#maxent.scoreraw1 <- maxent.scoreraw
maxent.score1 <- predict(MaxentOut, predictors)
# Multiply raw grid by 1000 and convert to integer
maxent.score <- calc(maxent.score1, function(x) as.integer(x * 1000) )
# Project maxent model on introduced range (if any)
# Save for Arc as a geoTIFF grid
setwd(OutDirectIn)
writeRaster(maxent.score, paste0(Species, "Maxent", OutGridID, SubsetVariableNumber, "Vars_Beta", BetaMult), format = "GTiff", overwrite=TRUE)
#
################################################################################
### Calibrate model using using mean threshold of mean model value (e.g., envelope.score)
### at maximum TSS from evaluation runs of k-fold data
## Multiply threshold by 1000
ThresholdK <- Threshold * 1000
maxent.scorecal <- calc(maxent.score, function(x) ifelse(x < ThresholdK, 0, 1) )
#plot(maxent.scorecal)
# SubsetVariableNumber <- 19
#OutGridID <- "Full"
writeRaster(maxent.scorecal, paste0(Species, "Maxent", OutGridID, SubsetVariableNumber, "Vars_Beta", BetaMult, "Cal"), format = "GTiff", overwrite=TRUE)
#
return(maxent.scorecal)
}
|
/MaxEnt/RCode/Functions/MaxentProject_Function.R
|
no_license
|
jamesltracy/RSFSA_R
|
R
| false
| false
| 4,455
|
r
|
MaxentProject <-
function(Species, VariableNamesIn, PredictorsIn, OutGridID, SubsetVariableNumber, VariableSubset, TrainSWD, TrainPresAbsID, Threshold, BetaMult, OutDirectIn..., Output, CatVarsPrefix, MaxentArgsIn) {
##############################################################################
# This R function outputs the Maxent model built upon all
# presence data with no evaluation statistics
##############################################################################
#
##############################################################################
# The model associated presence environmental data are processed using the R program
# "SpeciesPresenceAbsencePointProcessingSTB18k.R" together with the ArcPython program
# "SpeciesPresenceAbsencePointProcessingSTB18k.py"
##############################################################################
#
##############################################################################
# This section loads libraries, sets working directory and reads environmental rasters
##############################################################################
#BetaMult=1
# Record start time of program
starttime <- Sys.time()
# load needed packages of raster, rgdal, dismo, rjava, and maptools (printouts not shown)
# This sections reads in the lat/long data and formats it
library(dismo)
library(maptools)
library(raster)
library(rgdal)
library(sp)
#
# Set default values for LongOutput, if optional values left out of function call
MaxentBaseArgs <- c(paste0("betamultiplier=", BetaMult), "writebackgroundpredictions=false")
if(missing(MaxentArgsIn)) {
MaxentArgs1 <- MaxentBaseArgs
} else {
MaxentArgs1 <- MaxentArgsIn
}
if(missing(CatVarsPrefix)) {
AnyCategoricalVars=FALSE
} else {
AnyCategoricalVars=TRUE
}
#
if(missing(OutGridID)) { OutGridID="" }
# Create definition for a geographical projection
crs.geo <- CRS("+proj=longlat +ellps=WGS84 +datum=WGS84") # geographical, datum WGS84
#
VariableNamesSel <- c(unlist(VariableSubset))
# Split VarNames of VariableSubsets into separate variables
VarNames <- unlist(strsplit(VariableNamesSel, "-"))
SubsetVarNum <- length(VarNames)
## Keep only data for selected variables
head(TrainSWD)
MaxentTrainDataK <- as.data.frame(TrainSWD[,VarNames])
colnames(MaxentTrainDataK) <- VarNames
head(MaxentTrainDataK)
nrow(MaxentTrainDataK)
# Assemble environmental rasters
names(PredictorsIn) <- toupper(names(PredictorsIn))
names(PredictorsIn) <- gsub("_NS", "", names(PredictorsIn))
#plot(PredictorsIn[[1]])
# Subset raster stack of predictors by VarNames
predictors <- subset(PredictorsIn, VarNames)
##
#plot(predictors[[41]])
# Develop maxent model from specified presence and background data
#
## Account for categorical variables specified by CarVarsPrefix, if any
if(AnyCategoricalVars==TRUE) {
MaxentCatArg <- paste0("togglelayertype=", CatVarsPrefix)
MaxentArgs <- c(MaxentArgs1, MaxentCatArg)
} else {
MaxentArgs <- MaxentArgs1
}
#
MaxentOut <- maxent(MaxentTrainDataK, TrainPresAbsID, args=MaxentArgs, path=OutDirectSub)
######
# Project maxent model on native range
#maxent.scoreraw1 <- maxent.scoreraw
maxent.score1 <- predict(MaxentOut, predictors)
# Multiply raw grid by 1000 and convert to integer
maxent.score <- calc(maxent.score1, function(x) as.integer(x * 1000) )
# Project maxent model on introduced range (if any)
# Save for Arc as a geoTIFF grid
setwd(OutDirectIn)
writeRaster(maxent.score, paste0(Species, "Maxent", OutGridID, SubsetVariableNumber, "Vars_Beta", BetaMult), format = "GTiff", overwrite=TRUE)
#
################################################################################
### Calibrate model using using mean threshold of mean model value (e.g., envelope.score)
### at maximum TSS from evaluation runs of k-fold data
## Multiply threshold by 1000
ThresholdK <- Threshold * 1000
maxent.scorecal <- calc(maxent.score, function(x) ifelse(x < ThresholdK, 0, 1) )
#plot(maxent.scorecal)
# SubsetVariableNumber <- 19
#OutGridID <- "Full"
writeRaster(maxent.scorecal, paste0(Species, "Maxent", OutGridID, SubsetVariableNumber, "Vars_Beta", BetaMult, "Cal"), format = "GTiff", overwrite=TRUE)
#
return(maxent.scorecal)
}
|
createNullSampling <-function(X, groupLabel, N=100,
verbose=TRUE){
groupNum=length(levels(groupLabel));
samplePool=X;
groupMean=list();
for (i in 1:groupNum){
groupLabeli=which(groupLabel==levels(groupLabel)[i]);
Xi=X[groupLabeli,]
mi=colMeans(Xi);
groupMean[[i]]=mi;
}
for (i in 1:nrow(samplePool)){
samplePool[i,]=
X[i,]-groupMean[[which(levels(groupLabel)==groupLabel[i])]];
}
L=nrow(X);
H0=matrix(data=0,nrow=N,ncol=ncol(X));
for(i in 1 : N){
if (verbose) cat("\n Permutation th",i);
index=sample(L);
H0[i,]=BWR(samplePool[index,],groupLabel);
}
return(H0);
}
|
/R/createNullSampling.R
|
permissive
|
nghiavtr/speaq
|
R
| false
| false
| 662
|
r
|
createNullSampling <-function(X, groupLabel, N=100,
verbose=TRUE){
groupNum=length(levels(groupLabel));
samplePool=X;
groupMean=list();
for (i in 1:groupNum){
groupLabeli=which(groupLabel==levels(groupLabel)[i]);
Xi=X[groupLabeli,]
mi=colMeans(Xi);
groupMean[[i]]=mi;
}
for (i in 1:nrow(samplePool)){
samplePool[i,]=
X[i,]-groupMean[[which(levels(groupLabel)==groupLabel[i])]];
}
L=nrow(X);
H0=matrix(data=0,nrow=N,ncol=ncol(X));
for(i in 1 : N){
if (verbose) cat("\n Permutation th",i);
index=sample(L);
H0[i,]=BWR(samplePool[index,],groupLabel);
}
return(H0);
}
|
# This script defines custom functions to be sourced in the
# `rna-expression-validation.R` script of this module.
#
# Chante Bethell for CCDL 2019
#
# # #### USAGE
# This script is intended to be sourced in the
# 'analyses/focal-cn-file-preparation/rna-expression-validation.R' script as
# follows:
#
# source(file.path("util", "rna-expression-functions.R"))
calculate_z_score <- function (expression_data, rnaseq_ind) {
# Given an expression data matrix, filter for independent samples and
# calculate the log2 expression values and z-scores.
#
# Args:
# expression_data: data.frame with RNA expression data
#
# Return:
# long_expression: expression data.frame filtered for independent samples
# and now in long format, and with the added columns:
# `log2_exp`, `z_score`, `expression_class`
# Change expression data.frame to long tidy format
long_expression <- expression_data %>%
tidyr::gather(biospecimen_id, expression_value, -gene_id) %>%
dplyr::distinct() %>%
dplyr::filter(biospecimen_id %in% rnaseq_ind) %>%
dplyr::mutate(gene_id = gsub(".*\\_", "", gene_id)) # Trim the gene ids to
# include only the gene symbol
long_expression <- long_expression %>%
dplyr::group_by(gene_id) %>%
dplyr::mutate(
log2_exp = log2(expression_value + 1),
z_score = (log2_exp - mean(log2_exp) / sd(log2_exp)),
expression_class = dplyr::case_when(
z_score < -2 ~ "z < -2",
z_score < -1 ~ "-2 ≤ z < -1",
z_score < -0.5 ~ "-1 ≤ z < -0.5",
z_score < 0 ~ "-0.5 ≤ z < 0",
z_score < 0.5 ~ "0 ≤ z < 0.5",
z_score < 1 ~ "0.5 ≤ z < 1",
z_score < 2 ~ "1 ≤ z < 2",
!(is.na(z_score)) ~ "z ≥ 2"
) %>%
ordered(
levels = c(
"z < -2",
"-2 ≤ z < -1",
"-1 ≤ z < -0.5",
"-0.5 ≤ z < 0",
"0 ≤ z < 0.5",
"0.5 ≤ z < 1",
"1 ≤ z < 2",
"z ≥ 2"
)
)
)
}
merge_expression <-
function (copy_number_df,
expression_df,
metadata,
filename) {
# Given the focal copy number data.frame already annotated with the
# metadata, the RNA-seq expression data.frame, and the metadata, combine
# the data into one data.frame and save data.frame as tsv file.
#
# Args:
# copy_number_df: focal copy number data.frame
# expression_df: RNA-seq expression data.frame
# metadata: the relevant metadata data.frame
# filename: filename of the output tsv file
#
# Returns:
# combined_df: data.frame with information from the focal CN, the
# RNA-seq expression, and metadata data.frames
# Annotate expression data with metadata
expression_metadata <- expression_df %>%
dplyr::inner_join(metadata,
by = c("biospecimen_id" = "Kids_First_Biospecimen_ID")) %>%
dplyr::select(
gene_id,
Kids_First_Participant_ID,
sample_id,
biospecimen_id,
expression_value,
log2_exp,
z_score,
expression_class,
tumor_descriptor
) %>%
dplyr::distinct()
# Annotate focal CN data with metadata
cn_metadata <- copy_number_df %>%
dplyr::inner_join(metadata,
by = c("biospecimen_id" = "Kids_First_Biospecimen_ID")) %>%
dplyr::select(
gene_symbol,
Kids_First_Participant_ID,
sample_id,
biospecimen_id,
status,
copy_number,
tumor_ploidy,
tumor_descriptor
) %>%
dplyr::distinct()
# Merge Focal CN data.frame with RNA expression data.frame
combined_df <- expression_metadata %>%
dplyr::left_join(
cn_metadata,
by = c("sample_id",
"gene_id" = "gene_symbol",
"tumor_descriptor"),
suffix = c("_cn", "_expression")
)
# Save results
readr::write_tsv(combined_df, file.path(results_dir, filename))
return(combined_df)
}
plot_stacked_expression <- function (cn_expression_loss_df,
cn_expression_neutral_df,
cn_expression_zero_df,
all_stacked_plot_name) {
# Given a data.frame with annotated CN and RNA expression data, produce a
# stacked barplot for loss calls, neutral calls, and instances where
# `copy_number` == 0.
#
# Args:
# cn_expression_loss_df: data.frame with annotated CN and RNA expression
# data produced using `merge_expression` custom
# function and filtered for loss calls
# cn_expression_neutral_df: data.frame with annotated CN and RNA expression
# data produced using `merge_expression` custom
# function and filtered for neutral calls
# cn_expression_zero_df: data.frame with annotated CN and RNA expression
# data produced using `merge_expression` custom
# function and filtered for `copy_number` = 0
# all_stacked_plot_name: name to save the combined stacked barplot as
#
# Returns:
# Saves stacked barplot.
# Bind input data.frame rows
cn_expression_df <- dplyr::bind_rows(
loss = cn_expression_loss_df,
zero = cn_expression_zero_df,
neutral = cn_expression_neutral_df,
.id = "status_name"
)
# Plot and save
cn_expression_plot <- ggplot2::ggplot(cn_expression_df,
ggplot2::aes(x = status_name,
fill = expression_class)) +
ggplot2::geom_bar(position = ggplot2::position_fill(reverse = TRUE)) +
ggplot2::ylab("Proportion of called genes") +
ggplot2::labs(title = toupper(gsub(".png", "", all_stacked_plot_name)))
ggplot2::ggsave(file.path(plots_dir, all_stacked_plot_name),
cn_expression_plot)
}
plot_mean_expression <- function (cn_expression_loss_df,
cn_expression_neutral_df,
cn_expression_zero_df,
loss_cor_plot_name,
zero_cor_plot_name) {
# Given a data.frame with expression values for all CN calls and a
# data.frame with expression values for loss calls, produce and save
# a scatterplot displaying the correlation of loss and neutral calls across
# genes.
#
# Args:
# cn_expression_loss_df: data.frame with annotated CN and RNA expression
# data produced using `merge_expression` custom
# function and filtered for loss calls
# cn_expression_neutral_df: data.frame with annotated CN and RNA expression
# data produced using `merge_expression` custom
# function and filtered for neutral calls
# cn_expression_zero_df: data.frame with annotated CN and RNA expression
# data produced using `merge_expression` custom
# function and filtered for `copy_number` = 0
# loss_cor_plot_name: name to save the output neutral/loss correlation plot
# zero_cor_plot_name: name to save the output neutral/zero correlation plot
#
# Returns:
# The above named plots are saved in `plots_dir`
# Calculate the mean of log2 expression values
mean_loss <- cn_expression_loss_df %>%
dplyr::group_by(gene_id, is_driver_gene) %>%
dplyr::summarise(mean_loss_log_expression = mean(log2_exp))
mean_neutral <- cn_expression_neutral_df %>%
dplyr::group_by(gene_id, is_driver_gene) %>%
dplyr::summarise(mean_neutral_log_expression = mean(log2_exp))
mean_zero <- cn_expression_zero_df %>%
dplyr::group_by(gene_id, is_driver_gene) %>%
dplyr::summarise(mean_zero_log_expression = mean(log2_exp))
# Combine the mean values to be plotted
mean_combined_loss_neutral <- mean_loss %>%
dplyr::inner_join(mean_neutral, by = c("gene_id", "is_driver_gene"))
mean_combined_zero_neutral <- mean_zero %>%
dplyr::inner_join(mean_neutral, by = c("gene_id", "is_driver_gene"))
# Plot neutral/loss mean values
mean_combined_plot_loss <-
ggplot2::ggplot(
mean_combined_loss_neutral,
ggplot2::aes(x = mean_neutral_log_expression,
y = mean_loss_log_expression,
col = is_driver_gene)
) +
ggplot2::geom_point(alpha = 0.2) +
ggplot2::geom_abline() +
ggplot2::labs(title = toupper(gsub(".png", "", loss_cor_plot_name)))
ggplot2::ggsave(file.path(plots_dir, loss_cor_plot_name),
mean_combined_plot_loss)
# Plot neutral/zero mean values
mean_combined_plot_zero <-
ggplot2::ggplot(
mean_combined_zero_neutral,
ggplot2::aes(x = mean_neutral_log_expression,
y = mean_zero_log_expression,
col = is_driver_gene)
) +
ggplot2::geom_point(alpha = 0.2) +
ggplot2::geom_abline() +
ggplot2::labs(title = toupper(gsub(".png", "", zero_cor_plot_name)))
ggplot2::ggsave(file.path(plots_dir, zero_cor_plot_name),
mean_combined_plot_zero)
}
|
/analyses/focal-cn-file-preparation/util/rna-expression-functions.R
|
permissive
|
7716870223/OpenPBTA-analysis
|
R
| false
| false
| 9,409
|
r
|
# This script defines custom functions to be sourced in the
# `rna-expression-validation.R` script of this module.
#
# Chante Bethell for CCDL 2019
#
# # #### USAGE
# This script is intended to be sourced in the
# 'analyses/focal-cn-file-preparation/rna-expression-validation.R' script as
# follows:
#
# source(file.path("util", "rna-expression-functions.R"))
calculate_z_score <- function (expression_data, rnaseq_ind) {
# Given an expression data matrix, filter for independent samples and
# calculate the log2 expression values and z-scores.
#
# Args:
# expression_data: data.frame with RNA expression data
#
# Return:
# long_expression: expression data.frame filtered for independent samples
# and now in long format, and with the added columns:
# `log2_exp`, `z_score`, `expression_class`
# Change expression data.frame to long tidy format
long_expression <- expression_data %>%
tidyr::gather(biospecimen_id, expression_value, -gene_id) %>%
dplyr::distinct() %>%
dplyr::filter(biospecimen_id %in% rnaseq_ind) %>%
dplyr::mutate(gene_id = gsub(".*\\_", "", gene_id)) # Trim the gene ids to
# include only the gene symbol
long_expression <- long_expression %>%
dplyr::group_by(gene_id) %>%
dplyr::mutate(
log2_exp = log2(expression_value + 1),
z_score = (log2_exp - mean(log2_exp) / sd(log2_exp)),
expression_class = dplyr::case_when(
z_score < -2 ~ "z < -2",
z_score < -1 ~ "-2 ≤ z < -1",
z_score < -0.5 ~ "-1 ≤ z < -0.5",
z_score < 0 ~ "-0.5 ≤ z < 0",
z_score < 0.5 ~ "0 ≤ z < 0.5",
z_score < 1 ~ "0.5 ≤ z < 1",
z_score < 2 ~ "1 ≤ z < 2",
!(is.na(z_score)) ~ "z ≥ 2"
) %>%
ordered(
levels = c(
"z < -2",
"-2 ≤ z < -1",
"-1 ≤ z < -0.5",
"-0.5 ≤ z < 0",
"0 ≤ z < 0.5",
"0.5 ≤ z < 1",
"1 ≤ z < 2",
"z ≥ 2"
)
)
)
}
merge_expression <-
function (copy_number_df,
expression_df,
metadata,
filename) {
# Given the focal copy number data.frame already annotated with the
# metadata, the RNA-seq expression data.frame, and the metadata, combine
# the data into one data.frame and save data.frame as tsv file.
#
# Args:
# copy_number_df: focal copy number data.frame
# expression_df: RNA-seq expression data.frame
# metadata: the relevant metadata data.frame
# filename: filename of the output tsv file
#
# Returns:
# combined_df: data.frame with information from the focal CN, the
# RNA-seq expression, and metadata data.frames
# Annotate expression data with metadata
expression_metadata <- expression_df %>%
dplyr::inner_join(metadata,
by = c("biospecimen_id" = "Kids_First_Biospecimen_ID")) %>%
dplyr::select(
gene_id,
Kids_First_Participant_ID,
sample_id,
biospecimen_id,
expression_value,
log2_exp,
z_score,
expression_class,
tumor_descriptor
) %>%
dplyr::distinct()
# Annotate focal CN data with metadata
cn_metadata <- copy_number_df %>%
dplyr::inner_join(metadata,
by = c("biospecimen_id" = "Kids_First_Biospecimen_ID")) %>%
dplyr::select(
gene_symbol,
Kids_First_Participant_ID,
sample_id,
biospecimen_id,
status,
copy_number,
tumor_ploidy,
tumor_descriptor
) %>%
dplyr::distinct()
# Merge Focal CN data.frame with RNA expression data.frame
combined_df <- expression_metadata %>%
dplyr::left_join(
cn_metadata,
by = c("sample_id",
"gene_id" = "gene_symbol",
"tumor_descriptor"),
suffix = c("_cn", "_expression")
)
# Save results
readr::write_tsv(combined_df, file.path(results_dir, filename))
return(combined_df)
}
plot_stacked_expression <- function (cn_expression_loss_df,
cn_expression_neutral_df,
cn_expression_zero_df,
all_stacked_plot_name) {
# Given a data.frame with annotated CN and RNA expression data, produce a
# stacked barplot for loss calls, neutral calls, and instances where
# `copy_number` == 0.
#
# Args:
# cn_expression_loss_df: data.frame with annotated CN and RNA expression
# data produced using `merge_expression` custom
# function and filtered for loss calls
# cn_expression_neutral_df: data.frame with annotated CN and RNA expression
# data produced using `merge_expression` custom
# function and filtered for neutral calls
# cn_expression_zero_df: data.frame with annotated CN and RNA expression
# data produced using `merge_expression` custom
# function and filtered for `copy_number` = 0
# all_stacked_plot_name: name to save the combined stacked barplot as
#
# Returns:
# Saves stacked barplot.
# Bind input data.frame rows
cn_expression_df <- dplyr::bind_rows(
loss = cn_expression_loss_df,
zero = cn_expression_zero_df,
neutral = cn_expression_neutral_df,
.id = "status_name"
)
# Plot and save
cn_expression_plot <- ggplot2::ggplot(cn_expression_df,
ggplot2::aes(x = status_name,
fill = expression_class)) +
ggplot2::geom_bar(position = ggplot2::position_fill(reverse = TRUE)) +
ggplot2::ylab("Proportion of called genes") +
ggplot2::labs(title = toupper(gsub(".png", "", all_stacked_plot_name)))
ggplot2::ggsave(file.path(plots_dir, all_stacked_plot_name),
cn_expression_plot)
}
plot_mean_expression <- function (cn_expression_loss_df,
cn_expression_neutral_df,
cn_expression_zero_df,
loss_cor_plot_name,
zero_cor_plot_name) {
# Given a data.frame with expression values for all CN calls and a
# data.frame with expression values for loss calls, produce and save
# a scatterplot displaying the correlation of loss and neutral calls across
# genes.
#
# Args:
# cn_expression_loss_df: data.frame with annotated CN and RNA expression
# data produced using `merge_expression` custom
# function and filtered for loss calls
# cn_expression_neutral_df: data.frame with annotated CN and RNA expression
# data produced using `merge_expression` custom
# function and filtered for neutral calls
# cn_expression_zero_df: data.frame with annotated CN and RNA expression
# data produced using `merge_expression` custom
# function and filtered for `copy_number` = 0
# loss_cor_plot_name: name to save the output neutral/loss correlation plot
# zero_cor_plot_name: name to save the output neutral/zero correlation plot
#
# Returns:
# The above named plots are saved in `plots_dir`
# Calculate the mean of log2 expression values
mean_loss <- cn_expression_loss_df %>%
dplyr::group_by(gene_id, is_driver_gene) %>%
dplyr::summarise(mean_loss_log_expression = mean(log2_exp))
mean_neutral <- cn_expression_neutral_df %>%
dplyr::group_by(gene_id, is_driver_gene) %>%
dplyr::summarise(mean_neutral_log_expression = mean(log2_exp))
mean_zero <- cn_expression_zero_df %>%
dplyr::group_by(gene_id, is_driver_gene) %>%
dplyr::summarise(mean_zero_log_expression = mean(log2_exp))
# Combine the mean values to be plotted
mean_combined_loss_neutral <- mean_loss %>%
dplyr::inner_join(mean_neutral, by = c("gene_id", "is_driver_gene"))
mean_combined_zero_neutral <- mean_zero %>%
dplyr::inner_join(mean_neutral, by = c("gene_id", "is_driver_gene"))
# Plot neutral/loss mean values
mean_combined_plot_loss <-
ggplot2::ggplot(
mean_combined_loss_neutral,
ggplot2::aes(x = mean_neutral_log_expression,
y = mean_loss_log_expression,
col = is_driver_gene)
) +
ggplot2::geom_point(alpha = 0.2) +
ggplot2::geom_abline() +
ggplot2::labs(title = toupper(gsub(".png", "", loss_cor_plot_name)))
ggplot2::ggsave(file.path(plots_dir, loss_cor_plot_name),
mean_combined_plot_loss)
# Plot neutral/zero mean values
mean_combined_plot_zero <-
ggplot2::ggplot(
mean_combined_zero_neutral,
ggplot2::aes(x = mean_neutral_log_expression,
y = mean_zero_log_expression,
col = is_driver_gene)
) +
ggplot2::geom_point(alpha = 0.2) +
ggplot2::geom_abline() +
ggplot2::labs(title = toupper(gsub(".png", "", zero_cor_plot_name)))
ggplot2::ggsave(file.path(plots_dir, zero_cor_plot_name),
mean_combined_plot_zero)
}
|
#' @templateVar class anova
#' @template title_desc_tidy
#'
#' @param x An `anova` objects, such as those created by [stats::anova()] or
#' [car::Anova()].
#' @template param_unused_dots
#'
#' @evalRd return_tidy(
#' "term",
#' "df",
#' "sumsq",
#' "meansq",
#' "statistic",
#' "p.value"
#' )
#'
#' @details The `term` column of an ANOVA table can come with leading or
#' trailing whitespace, which this tidying method trims.
#'
#' @examples
#'
#' a <- lm(mpg ~ wt + qsec + disp, mtcars)
#' b <- lm(mpg ~ wt + qsec, mtcars)
#' tidy(anova(a, b))
#'
#' @export
#' @family anova tidiers
#' @seealso [tidy()], [stats::anova()], [car::Anova()]
tidy.anova <- function(x, ...) {
# there are many possible column names that need to be transformed
renamers <- c(
"AIC" = "AIC", # merMod
"BIC" = "BIC", # merMod
"deviance" = "deviance", # merMod
"logLik" = "logLik", # merMod
"Df" = "df",
"Chi.Df" = "df",
"Sum Sq" = "sumsq",
"Mean Sq" = "meansq",
"F value" = "statistic",
"Pr(>F)" = "p.value",
"Res.Df" = "res.df",
"RSS" = "rss",
"Sum of Sq" = "sumsq",
"F" = "statistic",
"Chisq" = "statistic",
"P(>|Chi|)" = "p.value",
"Pr(>Chi)" = "p.value",
"Pr..Chisq." = "p.value",
"Pr..Chi." = "p.value",
"p.value" = "p.value",
"Chi.sq" = "statistic",
"LR.Chisq" = "statistic",
"LR Chisq" = "statistic",
"edf" = "edf",
"Ref.df" = "ref.df"
)
names(renamers) <- make.names(names(renamers))
ret <- fix_data_frame(x)
unknown_cols <- setdiff(colnames(ret), c("term", names(renamers)))
if (length(unknown_cols) > 0) {
warning(
"The following column names in ANOVA output were not ",
"recognized or transformed: ",
paste(unknown_cols, collapse = ", ")
)
}
colnames(ret) <- dplyr::recode(colnames(ret), !!!renamers)
if("term" %in% names(ret)){
# if rows had names, strip whitespace in them
ret <- mutate(ret, term = stringr::str_trim(term))
}
as_tibble(ret)
}
#' @templateVar class aov
#' @template title_desc_tidy
#'
#' @param x An `aov` object, such as those created by [stats::aov()].
#' @template param_unused_dots
#'
#' @inherit tidy.anova return details
#'
#' @examples
#'
#' a <- aov(mpg ~ wt + qsec + disp, mtcars)
#' tidy(a)
#'
#' @export
#' @family anova tidiers
#' @seealso [tidy()], [stats::aov()]
tidy.aov <- function(x, ...) {
summary(x)[[1]] %>%
tibble::as_tibble(rownames = "term") %>%
dplyr::mutate("term" = stringr::str_trim(term)) %>%
rename2("df" = "Df",
"sumsq" = "Sum Sq",
"meansq" = "Mean Sq",
"statistic" = "F value",
"p.value" = "Pr(>F)")
}
#' @templateVar class lm
#' @template title_desc_glance
#'
#' @inherit tidy.aov params examples
#'
#' @note
#' From `0.7.0`, `broom` has changed the return summary and the new model
#' summary dataframe contains only the following information- `logLik`, `IC`,
#' `BIC`, `deviance`, `nobs`. Note that `tidy.aov` contains the numerator and
#' denominator degrees of freedom, which were previously included in the glance
#' summary.
#'
#' @evalRd return_glance(
#' "logLik",
#' "AIC",
#' "BIC",
#' "deviance",
#' "nobs"
#' )
#'
#' @export
#' @seealso [glance()]
#' @family anova tidiers
glance.aov <- function(x, ...) {
with(
summary(x),
tibble(
logLik = as.numeric(stats::logLik(x)),
AIC = stats::AIC(x),
BIC = stats::BIC(x),
deviance = stats::deviance(x),
nobs = stats::nobs(x)
)
)
}
#' @templateVar class aovlist
#' @template title_desc_tidy
#'
#' @param x An `aovlist` objects, such as those created by [stats::aov()].
#' @template param_unused_dots
#'
#' @evalRd return_tidy(
#' "term",
#' "df",
#' "sumsq",
#' "meansq",
#' "statistic",
#' "p.value",
#' "stratum"
#' )
#'
#' @inherit tidy.anova details
#'
#' @examples
#'
#' a <- aov(mpg ~ wt + qsec + Error(disp / am), mtcars)
#' tidy(a)
#'
#' @export
#' @family anova tidiers
#' @seealso [tidy()], [stats::aov()]
tidy.aovlist <- function(x, ...) {
# must filter out Intercept stratum since it has no dimensions
if (names(x)[1L] == "(Intercept)") {
x <- x[-1L]
}
ret <- map_df(x, tidy, .id = "stratum")
# get rid of leading and trailing whitespace in term and stratum columns
ret <- ret %>%
mutate(
term = stringr::str_trim(term),
stratum = stringr::str_trim(stratum)
)
as_tibble(ret)
}
#' @templateVar class manova
#' @template title_desc_tidy
#'
#' @param x A `manova` object return from [stats::manova()].
#' @param test One of "Pillai" (Pillai's trace), "Wilks" (Wilk's lambda),
#' "Hotelling-Lawley" (Hotelling-Lawley trace) or "Roy" (Roy's greatest root)
#' indicating which test statistic should be used. Defaults to "Pillai".
#' @inheritDotParams stats::summary.manova
#'
#' @evalRd return_tidy(
#' "term",
#' "num.df",
#' "den.df",
#' "statistic",
#' "p.value",
#' pillai = "Pillai's trace.",
#' wilks = "Wilk's lambda.",
#' hl = "Hotelling-Lawley trace.",
#' roy = "Roy's greatest root."
#' )
#'
#'
#' @details Depending on which test statistic is specified only one of `pillai`,
#' `wilks`, `hl` or `roy` is included.
#'
#' @examples
#'
#' npk2 <- within(npk, foo <- rnorm(24))
#' m <- manova(cbind(yield, foo) ~ block + N * P * K, npk2)
#' tidy(m)
#'
#' @export
#' @seealso [tidy()], [stats::summary.manova()]
#' @family anova tidiers
tidy.manova <- function(x, test = "Pillai", ...) {
test.pos <- pmatch(test, c(
"Pillai", "Wilks",
"Hotelling-Lawley", "Roy"
))
test.name <- c("pillai", "wilks", "hl", "roy")[test.pos]
nn <- c("df", test.name, "statistic", "num.df", "den.df", "p.value")
fix_data_frame(summary(x, test = test, ...)$stats, nn)
}
#' @templateVar class summary.manova
#' @template title_desc_tidy
#'
#' @param x A `summary.manova` object return from [stats::summary.manova()].
#' @template param_unused_dots
#'
#' @evalRd return_tidy(
#' "term",
#' "num.df",
#' "den.df",
#' "statistic",
#' "p.value",
#' pillai = "Pillai's trace.",
#' wilks = "Wilk's lambda.",
#' hl = "Hotelling-Lawley trace.",
#' roy = "Roy's greatest root."
#' )
#'
#'
#' @details Depending on which test statistic was calculated when the object
#' was created, only one of `pillai`, `wilks`, `hl` or `roy` is included.
#'
#' @examples
#'
#' npk2 <- within(npk, foo <- rnorm(24))
#'
#' m <- summary(
#' manova(cbind(yield, foo) ~ block + N * P * K, npk2),
#' test = "Wilks"
#' )
#'
#' tidy(m)
#'
#' @export
#' @seealso [tidy()], [stats::summary.manova()]
#' @family anova tidiers
tidy.summary.manova <- function(x, ...) {
manova_tests <- c(
"Pillai" = "pillai",
"Wilks" = "wilks",
"Hotelling-Lawley" = "hl",
"Roy" = "roy"
)
test.name <- manova_tests[[intersect(colnames(x$stats), names(manova_tests))[[1]]]]
nn <- c("df", test.name, "statistic", "num.df", "den.df", "p.value")
fix_data_frame(x$stats, nn)
}
#' @templateVar class TukeyHSD
#' @template title_desc_tidy
#'
#' @param x A `TukeyHSD` object return from [stats::TukeyHSD()].
#' @template param_unused_dots
#'
#' @evalRd return_tidy(
#' "tidy",
#' "comparison",
#' "estimate",
#' "conf.low",
#' "conf.high",
#' "adj.p.value"
#' )
#'
#' @examples
#'
#' fm1 <- aov(breaks ~ wool + tension, data = warpbreaks)
#' thsd <- TukeyHSD(fm1, "tension", ordered = TRUE)
#' tidy(thsd)
#'
#' # may include comparisons on multiple terms
#' fm2 <- aov(mpg ~ as.factor(gear) * as.factor(cyl), data = mtcars)
#' tidy(TukeyHSD(fm2))
#'
#' @export
#' @seealso [tidy()], [stats::TukeyHSD()]
#' @family anova tidiers
tidy.TukeyHSD <- function(x, ...) {
purrr::map_df(x,
function(e) {
nn <- c("estimate", "conf.low", "conf.high", "adj.p.value")
fix_data_frame(e, nn, "comparison")
}, .id = "term"
)
}
|
/R/stats-anova-tidiers.R
|
no_license
|
kyusque/broom
|
R
| false
| false
| 7,865
|
r
|
#' @templateVar class anova
#' @template title_desc_tidy
#'
#' @param x An `anova` objects, such as those created by [stats::anova()] or
#' [car::Anova()].
#' @template param_unused_dots
#'
#' @evalRd return_tidy(
#' "term",
#' "df",
#' "sumsq",
#' "meansq",
#' "statistic",
#' "p.value"
#' )
#'
#' @details The `term` column of an ANOVA table can come with leading or
#' trailing whitespace, which this tidying method trims.
#'
#' @examples
#'
#' a <- lm(mpg ~ wt + qsec + disp, mtcars)
#' b <- lm(mpg ~ wt + qsec, mtcars)
#' tidy(anova(a, b))
#'
#' @export
#' @family anova tidiers
#' @seealso [tidy()], [stats::anova()], [car::Anova()]
tidy.anova <- function(x, ...) {
# there are many possible column names that need to be transformed
renamers <- c(
"AIC" = "AIC", # merMod
"BIC" = "BIC", # merMod
"deviance" = "deviance", # merMod
"logLik" = "logLik", # merMod
"Df" = "df",
"Chi.Df" = "df",
"Sum Sq" = "sumsq",
"Mean Sq" = "meansq",
"F value" = "statistic",
"Pr(>F)" = "p.value",
"Res.Df" = "res.df",
"RSS" = "rss",
"Sum of Sq" = "sumsq",
"F" = "statistic",
"Chisq" = "statistic",
"P(>|Chi|)" = "p.value",
"Pr(>Chi)" = "p.value",
"Pr..Chisq." = "p.value",
"Pr..Chi." = "p.value",
"p.value" = "p.value",
"Chi.sq" = "statistic",
"LR.Chisq" = "statistic",
"LR Chisq" = "statistic",
"edf" = "edf",
"Ref.df" = "ref.df"
)
names(renamers) <- make.names(names(renamers))
ret <- fix_data_frame(x)
unknown_cols <- setdiff(colnames(ret), c("term", names(renamers)))
if (length(unknown_cols) > 0) {
warning(
"The following column names in ANOVA output were not ",
"recognized or transformed: ",
paste(unknown_cols, collapse = ", ")
)
}
colnames(ret) <- dplyr::recode(colnames(ret), !!!renamers)
if("term" %in% names(ret)){
# if rows had names, strip whitespace in them
ret <- mutate(ret, term = stringr::str_trim(term))
}
as_tibble(ret)
}
#' @templateVar class aov
#' @template title_desc_tidy
#'
#' @param x An `aov` object, such as those created by [stats::aov()].
#' @template param_unused_dots
#'
#' @inherit tidy.anova return details
#'
#' @examples
#'
#' a <- aov(mpg ~ wt + qsec + disp, mtcars)
#' tidy(a)
#'
#' @export
#' @family anova tidiers
#' @seealso [tidy()], [stats::aov()]
tidy.aov <- function(x, ...) {
summary(x)[[1]] %>%
tibble::as_tibble(rownames = "term") %>%
dplyr::mutate("term" = stringr::str_trim(term)) %>%
rename2("df" = "Df",
"sumsq" = "Sum Sq",
"meansq" = "Mean Sq",
"statistic" = "F value",
"p.value" = "Pr(>F)")
}
#' @templateVar class lm
#' @template title_desc_glance
#'
#' @inherit tidy.aov params examples
#'
#' @note
#' From `0.7.0`, `broom` has changed the return summary and the new model
#' summary dataframe contains only the following information- `logLik`, `IC`,
#' `BIC`, `deviance`, `nobs`. Note that `tidy.aov` contains the numerator and
#' denominator degrees of freedom, which were previously included in the glance
#' summary.
#'
#' @evalRd return_glance(
#' "logLik",
#' "AIC",
#' "BIC",
#' "deviance",
#' "nobs"
#' )
#'
#' @export
#' @seealso [glance()]
#' @family anova tidiers
glance.aov <- function(x, ...) {
with(
summary(x),
tibble(
logLik = as.numeric(stats::logLik(x)),
AIC = stats::AIC(x),
BIC = stats::BIC(x),
deviance = stats::deviance(x),
nobs = stats::nobs(x)
)
)
}
#' @templateVar class aovlist
#' @template title_desc_tidy
#'
#' @param x An `aovlist` objects, such as those created by [stats::aov()].
#' @template param_unused_dots
#'
#' @evalRd return_tidy(
#' "term",
#' "df",
#' "sumsq",
#' "meansq",
#' "statistic",
#' "p.value",
#' "stratum"
#' )
#'
#' @inherit tidy.anova details
#'
#' @examples
#'
#' a <- aov(mpg ~ wt + qsec + Error(disp / am), mtcars)
#' tidy(a)
#'
#' @export
#' @family anova tidiers
#' @seealso [tidy()], [stats::aov()]
tidy.aovlist <- function(x, ...) {
# must filter out Intercept stratum since it has no dimensions
if (names(x)[1L] == "(Intercept)") {
x <- x[-1L]
}
ret <- map_df(x, tidy, .id = "stratum")
# get rid of leading and trailing whitespace in term and stratum columns
ret <- ret %>%
mutate(
term = stringr::str_trim(term),
stratum = stringr::str_trim(stratum)
)
as_tibble(ret)
}
#' @templateVar class manova
#' @template title_desc_tidy
#'
#' @param x A `manova` object return from [stats::manova()].
#' @param test One of "Pillai" (Pillai's trace), "Wilks" (Wilk's lambda),
#' "Hotelling-Lawley" (Hotelling-Lawley trace) or "Roy" (Roy's greatest root)
#' indicating which test statistic should be used. Defaults to "Pillai".
#' @inheritDotParams stats::summary.manova
#'
#' @evalRd return_tidy(
#' "term",
#' "num.df",
#' "den.df",
#' "statistic",
#' "p.value",
#' pillai = "Pillai's trace.",
#' wilks = "Wilk's lambda.",
#' hl = "Hotelling-Lawley trace.",
#' roy = "Roy's greatest root."
#' )
#'
#'
#' @details Depending on which test statistic is specified only one of `pillai`,
#' `wilks`, `hl` or `roy` is included.
#'
#' @examples
#'
#' npk2 <- within(npk, foo <- rnorm(24))
#' m <- manova(cbind(yield, foo) ~ block + N * P * K, npk2)
#' tidy(m)
#'
#' @export
#' @seealso [tidy()], [stats::summary.manova()]
#' @family anova tidiers
tidy.manova <- function(x, test = "Pillai", ...) {
test.pos <- pmatch(test, c(
"Pillai", "Wilks",
"Hotelling-Lawley", "Roy"
))
test.name <- c("pillai", "wilks", "hl", "roy")[test.pos]
nn <- c("df", test.name, "statistic", "num.df", "den.df", "p.value")
fix_data_frame(summary(x, test = test, ...)$stats, nn)
}
#' @templateVar class summary.manova
#' @template title_desc_tidy
#'
#' @param x A `summary.manova` object return from [stats::summary.manova()].
#' @template param_unused_dots
#'
#' @evalRd return_tidy(
#' "term",
#' "num.df",
#' "den.df",
#' "statistic",
#' "p.value",
#' pillai = "Pillai's trace.",
#' wilks = "Wilk's lambda.",
#' hl = "Hotelling-Lawley trace.",
#' roy = "Roy's greatest root."
#' )
#'
#'
#' @details Depending on which test statistic was calculated when the object
#' was created, only one of `pillai`, `wilks`, `hl` or `roy` is included.
#'
#' @examples
#'
#' npk2 <- within(npk, foo <- rnorm(24))
#'
#' m <- summary(
#' manova(cbind(yield, foo) ~ block + N * P * K, npk2),
#' test = "Wilks"
#' )
#'
#' tidy(m)
#'
#' @export
#' @seealso [tidy()], [stats::summary.manova()]
#' @family anova tidiers
tidy.summary.manova <- function(x, ...) {
manova_tests <- c(
"Pillai" = "pillai",
"Wilks" = "wilks",
"Hotelling-Lawley" = "hl",
"Roy" = "roy"
)
test.name <- manova_tests[[intersect(colnames(x$stats), names(manova_tests))[[1]]]]
nn <- c("df", test.name, "statistic", "num.df", "den.df", "p.value")
fix_data_frame(x$stats, nn)
}
#' @templateVar class TukeyHSD
#' @template title_desc_tidy
#'
#' @param x A `TukeyHSD` object return from [stats::TukeyHSD()].
#' @template param_unused_dots
#'
#' @evalRd return_tidy(
#' "tidy",
#' "comparison",
#' "estimate",
#' "conf.low",
#' "conf.high",
#' "adj.p.value"
#' )
#'
#' @examples
#'
#' fm1 <- aov(breaks ~ wool + tension, data = warpbreaks)
#' thsd <- TukeyHSD(fm1, "tension", ordered = TRUE)
#' tidy(thsd)
#'
#' # may include comparisons on multiple terms
#' fm2 <- aov(mpg ~ as.factor(gear) * as.factor(cyl), data = mtcars)
#' tidy(TukeyHSD(fm2))
#'
#' @export
#' @seealso [tidy()], [stats::TukeyHSD()]
#' @family anova tidiers
tidy.TukeyHSD <- function(x, ...) {
purrr::map_df(x,
function(e) {
nn <- c("estimate", "conf.low", "conf.high", "adj.p.value")
fix_data_frame(e, nn, "comparison")
}, .id = "term"
)
}
|
# 09.08.2016 Alberto.Rovellini@vuw.ac.nz
# this is a script to read the opwall monitoring data
# 11.08.2016 Script is a bit better now. Next thing to do is to order the factors of the location
# for the plotting, but the renaming of the empty benthic types is working now and the code is
# better commented
# rename in a way that all object in the project have unique names please
require(XLConnect)
require(abind)
require(plyr)
require(ggplot2)
setwd("/home/somros/Documents/Data/Hoga/MonitoringProgram")
# set flags (later)
listOfSheets <- list.files("/home/somros/Documents/Data/Hoga/MonitoringProgram",
pattern = "Benthic", recursive = T)
setSiteName <- c("B3", "S1")
setDepthName <- c("F", "C", "S")
setReplicate <- 1:3
namesOfSheets <- as.vector(sapply(setSiteName,
function(x) {c(paste(x, as.vector(sapply(setDepthName,
function(x) {c(paste(".", x, setReplicate, sep = ""))})),
sep = ""))}))
namesOfLocations <- substr(namesOfSheets, 1, nchar(namesOfSheets)-1)
namesOfLocations <- levels(factor(namesOfLocations, levels = unique(namesOfLocations)))
summaryData <- vector(mode = "list", length = length(namesOfSheets))
monitBentReader <- function(spreadsheet) {
book <- loadWorkbook(spreadsheet)
bookSheets <- getSheets(book)
for (i in bookSheets) {
summaryData[[i]] <- readWorksheet(book, i, startRow = 1, endRow = 202,
startCol = 1, endCol = 4)
}
return(summaryData)
}
listOfBenthicTransectsTmp <- lapply(listOfSheets, monitBentReader)
# for some reason the function read 18 empty entries, need to investigate
listOfBenthicTransects <- lapply(listOfBenthicTransectsTmp, function(x) x[19:27]) # list of 2 lists of 9 dataframe each. each level 1 list is one location, each level 2 a transect
# lump all lists into one single list
benthicTransects <- listOfBenthicTransects[[1]]
for (i in 2:length(listOfBenthicTransects)) {
benthicTransects <- append(benthicTransects, listOfBenthicTransects[[i]])
}
##########################################
# section to correct all the misspelled entries, which are a lot. need to come up with a function, won't be
# easy. Limit to the Benthic.Type for now. This requires some analysis of the typos
benthicTypeLevels <- levels(factor(unlist(lapply(benthicTransects, function(x) levels(factor(x$Benthic.Type))))))
benthicTypeLevels
# correction routine, manual specification of the faulty entries
benthicTransectsCorrect <- lapply(benthicTransects, function(x) {
x[x=="algae"] <- "Algae"
x[x=="rubble"] <- "Rubble"
x[x=="sand"] <- "Sand"
x[x=="silt"] <- "Silt"
x[x=="sponge"] <- "Sponge"
x[x=="Sponge "] <- "Sponge"
return(x)
})
newLevels <- levels(factor(unlist(lapply(benthicTransectsCorrect, function(x) levels(factor(x$Benthic.Type))))))
#************************************************************************************************#
# routine to substitute the missing entries in Morphology with Benthic.Type and
# of Further.Info with Morphology. Aim is to have entries for all the levels of details.
# for reasons of the function it's necessary to get rid of the NAs, turn them to 0
benthicTransectsCorrect <- lapply(benthicTransectsCorrect, function(x) {
x[is.na(x)] <- "0"
return(x)}
)
# function to substitute the relevant entries
groupsRewriter <- function(frameTransect) {
frameTransect1 <- within(frameTransect, Morphology[Morphology=="0"] <- Benthic.Type[Morphology=="0"])
frameTransect2 <- within(frameTransect1, Further.Info[Further.Info=="0"] <- Morphology[Further.Info=="0"])
return(frameTransect2)
}
benthicTransectsComplete <- lapply(benthicTransectsCorrect, groupsRewriter) # yep
################################################################################################
# routine to add column with coarse benthic type
levels(factor(benthicTransectsComplete[[1]]$Benthic.Type))
# function: take the benthic type out as vector, replace with coarse entry with similar function as above,
# add column to the data frame with the new benthic type. applied to all frames in the list
# recode the rest with a dynamic column identifier instead of benthic type to pick the desired aggregation
# method
# routine to calculate the percentage cover from the tape points per category. Column IDs should be
# specified outside the function, at the beginning of the script as flags. Flags will have to become
# function arguments if the set of scripts has to be turned into a package at some point, which it should
percentCoverCalc <- function(frameReplicate) {
subsetFrame <- frameReplicate[,1:2]
nOfPoints <- as.data.frame(colSums(table(subsetFrame)))
nOfPoints$Benthic.Type <- rownames(nOfPoints)
nOfPoints$Percentage.Cover <- nOfPoints[,1]*100/sum(nOfPoints[,1])
rownames(nOfPoints) <- 1:nrow(nOfPoints)
nOfPoints <- nOfPoints[,c(2,1,3)] # reorders the columns with type, % cover and points
colnames(nOfPoints) <- c("Type", "Points", "Cover")
return(nOfPoints)
}
pointsAndCover <- lapply(benthicTransectsComplete, percentCoverCalc)
# need to have all frames with the same levels. they have to be the sum of all the available levels
# first build a dummy frame with all the levels and 0 as entries for cover and points
dummyFrame <- as.data.frame(cbind(newLevels, rep(0, length(newLevels)), rep(0, length(newLevels))))
colnames(dummyFrame) <- names(pointsAndCover[[1]]) # rename the columns for consistency
# then append the dummy data frame at the end of each transect data frame. A bit of reorganization,
# column renaming and class manipulation is in the routine too to keep things as smooth as possible
pointsAndCoverComplete <- lapply(pointsAndCover, function (x) {
completeLevels <- as.data.frame(rbind(x, dummyFrame)) # append dummy frame
colnames(dummyFrame) <- names(dummyFrame) # rename columns as step above changes the names
completeLevels$Points <- as.numeric(as.character(completeLevels$Points)) # redefine class to numeric
completeLevels$Cover <- as.numeric(as.character(completeLevels$Cover)) # redefine class to numeric
# split-apply-combine routine follows
oneEntryList <- split(completeLevels, completeLevels$Type) # splits on the benthic type
oneEntryListAgg <- lapply(oneEntryList, function(y) {
z <- c(levels(factor(y[,1])), colSums(y[,2:3])) # sums points and cover per benthic type
return(z)}
)
oneEntry <- as.data.frame(abind(oneEntryListAgg, along = 0)) # recombines all of it into one frame
# polishing follows
colnames(oneEntry) <- names(dummyFrame)
oneEntry$Points <- as.numeric(as.character(oneEntry$Points))
oneEntry$Cover <- as.numeric(as.character(oneEntry$Cover))
return(oneEntry)
})
# adds a column with the corresponding replicate name to each dataframe
for (i in 1:length(pointsAndCoverComplete)) {
pointsAndCoverComplete[[i]]$Replicate <- rep(names(pointsAndCoverComplete[i]),
nrow(pointsAndCoverComplete[[i]]))
pointsAndCoverComplete[[i]]$Location <- substr(pointsAndCoverComplete[[i]]$Replicate,
1, nchar(pointsAndCoverComplete[[i]]$Replicate)-1)
}
# now the transect frames are correctly named, organized and the %cover is calculated.
# all the dataframes are merged together by row, renaming and reclassing follows
benthicData <- as.data.frame(abind(pointsAndCoverComplete, along = 1))
benthicData$Points <- as.numeric(as.character(benthicData$Points))
benthicData$Cover <- as.numeric(as.character(benthicData$Cover))
rownames(benthicData) <- 1:nrow(benthicData)
# now can do summary statistics over the replicate, mean and sd or whatever else.
# recursive split-apply-combine on the benthic type and the location
typeSplit <- split(benthicData, benthicData$Type)
locationSplit <- lapply(typeSplit, function(x) split(x, x$Location))
meanAndSdList <- lapply(locationSplit, function(x) {
lapply(x, function(y) {
means <- mean(y$Cover)
stdev <- sd(y$Cover)
meanSd <- data.frame(levels(factor(y$Type)), means, stdev, levels(factor(y$Location)))
return(meanSd)
})
})
# nested lists on the second dimension are remerged first on the benthic type factor...
meanAndSdTmp <- lapply(meanAndSdList, function(x) as.data.frame(abind(x, along = 1)))
# ... and then on the location factor. Usual renaming and reclassing follows
meanAndSd <- as.data.frame(abind(meanAndSdTmp, along = 1))
rownames(meanAndSd) <- 1:nrow(meanAndSd)
colnames(meanAndSd) <- c("Type", "Mean", "Sd", "Location")
meanAndSd$Mean <- as.numeric(as.character(meanAndSd$Mean))
meanAndSd$Sd <- as.numeric(as.character(meanAndSd$Sd))
# order factors of stations for plot: two steps required:
# first order the data frame according to the indices of the namesOfLocations object
meanAndSd <- meanAndSd[order(match(meanAndSd$Location, namesOfLocations)),] # easy peasy
# then assign unique values to the levels of the Location column to keep the order for the plot
meanAndSd$Location <- factor(meanAndSd$Location, levels = unique(meanAndSd$Location))
# abiotic types can be lumped into one single type. However, to do that I'd wait to see other datasets
# create column for coarse benthic type
levels(meanAndSd$Type)
coarse <- c("Abiotic", "Algae", "Ascidian", "Abiotic", "Abiotic", "Hard coral",
"Other", "Other", "Abiotic", "Abiotic", "Abiotic", "Abiotic", "Soft coral",
"Sponge", "Seagrass", "Unknown", "Abiotic")
meanAndSd$Coarse <- as.factor(rep(coarse, length(levels(meanAndSd$Location))))
# need to add the means or do this early on, not working this way
# plot
library(RColorBrewer)
par(mar = c(0, 4, 0, 0))
#display.brewer.all()
nOfColors <- length(levels(meanAndSd$Coarse))
getPalette <- colorRampPalette(brewer.pal(9, "BrBG"))
#myPalette <- doublePalette[seq(3,length(doublePalette),1)]
benthicMonitoring <- ggplot(meanAndSd, aes(x=Location, y=Mean, fill=Coarse))+
geom_bar(stat = "identity", width = .7)+
# geom_errorbar(data = buoy3Sampela,
# aes(ymax = Mean + StdErr, ymin = Mean - StdErr),
# width = .7)+
#scale_fill_grey(start = 0, end = 0.95)+
scale_fill_manual(values = getPalette(nOfColors))+
labs(y = "Average % cover")+
theme_bw()+
theme(panel.grid.minor = element_blank(),
panel.grid.major = element_blank())+
theme(plot.title = element_text(size=14, vjust=2))+
theme(axis.title.x = element_text(size=10,vjust=-0.5),
axis.title.y = element_text(size=10,vjust=0.5))+
theme(axis.text.x=element_text(size=10, angle = 45,
hjust = 1, vjust = .9))+
theme(axis.text.y=element_text(size=10))
benthicMonitoring
ggsave("/home/somros/Documents/R/exploratoryHoga/output/pics/benthicMonitoring.pdf", benthicMonitoring,
width=10, height=4, useDingbats=T)
|
/trunk/monitoringBenthic.R
|
no_license
|
somros/exploratoryHoga
|
R
| false
| false
| 10,965
|
r
|
# 09.08.2016 Alberto.Rovellini@vuw.ac.nz
# this is a script to read the opwall monitoring data
# 11.08.2016 Script is a bit better now. Next thing to do is to order the factors of the location
# for the plotting, but the renaming of the empty benthic types is working now and the code is
# better commented
# rename in a way that all object in the project have unique names please
require(XLConnect)
require(abind)
require(plyr)
require(ggplot2)
setwd("/home/somros/Documents/Data/Hoga/MonitoringProgram")
# set flags (later)
listOfSheets <- list.files("/home/somros/Documents/Data/Hoga/MonitoringProgram",
pattern = "Benthic", recursive = T)
setSiteName <- c("B3", "S1")
setDepthName <- c("F", "C", "S")
setReplicate <- 1:3
namesOfSheets <- as.vector(sapply(setSiteName,
function(x) {c(paste(x, as.vector(sapply(setDepthName,
function(x) {c(paste(".", x, setReplicate, sep = ""))})),
sep = ""))}))
namesOfLocations <- substr(namesOfSheets, 1, nchar(namesOfSheets)-1)
namesOfLocations <- levels(factor(namesOfLocations, levels = unique(namesOfLocations)))
summaryData <- vector(mode = "list", length = length(namesOfSheets))
monitBentReader <- function(spreadsheet) {
book <- loadWorkbook(spreadsheet)
bookSheets <- getSheets(book)
for (i in bookSheets) {
summaryData[[i]] <- readWorksheet(book, i, startRow = 1, endRow = 202,
startCol = 1, endCol = 4)
}
return(summaryData)
}
listOfBenthicTransectsTmp <- lapply(listOfSheets, monitBentReader)
# for some reason the function read 18 empty entries, need to investigate
listOfBenthicTransects <- lapply(listOfBenthicTransectsTmp, function(x) x[19:27]) # list of 2 lists of 9 dataframe each. each level 1 list is one location, each level 2 a transect
# lump all lists into one single list
benthicTransects <- listOfBenthicTransects[[1]]
for (i in 2:length(listOfBenthicTransects)) {
benthicTransects <- append(benthicTransects, listOfBenthicTransects[[i]])
}
##########################################
# section to correct all the misspelled entries, which are a lot. need to come up with a function, won't be
# easy. Limit to the Benthic.Type for now. This requires some analysis of the typos
benthicTypeLevels <- levels(factor(unlist(lapply(benthicTransects, function(x) levels(factor(x$Benthic.Type))))))
benthicTypeLevels
# correction routine, manual specification of the faulty entries
benthicTransectsCorrect <- lapply(benthicTransects, function(x) {
x[x=="algae"] <- "Algae"
x[x=="rubble"] <- "Rubble"
x[x=="sand"] <- "Sand"
x[x=="silt"] <- "Silt"
x[x=="sponge"] <- "Sponge"
x[x=="Sponge "] <- "Sponge"
return(x)
})
newLevels <- levels(factor(unlist(lapply(benthicTransectsCorrect, function(x) levels(factor(x$Benthic.Type))))))
#************************************************************************************************#
# routine to substitute the missing entries in Morphology with Benthic.Type and
# of Further.Info with Morphology. Aim is to have entries for all the levels of details.
# for reasons of the function it's necessary to get rid of the NAs, turn them to 0
benthicTransectsCorrect <- lapply(benthicTransectsCorrect, function(x) {
x[is.na(x)] <- "0"
return(x)}
)
# function to substitute the relevant entries
groupsRewriter <- function(frameTransect) {
frameTransect1 <- within(frameTransect, Morphology[Morphology=="0"] <- Benthic.Type[Morphology=="0"])
frameTransect2 <- within(frameTransect1, Further.Info[Further.Info=="0"] <- Morphology[Further.Info=="0"])
return(frameTransect2)
}
benthicTransectsComplete <- lapply(benthicTransectsCorrect, groupsRewriter) # yep
################################################################################################
# routine to add column with coarse benthic type
levels(factor(benthicTransectsComplete[[1]]$Benthic.Type))
# function: take the benthic type out as vector, replace with coarse entry with similar function as above,
# add column to the data frame with the new benthic type. applied to all frames in the list
# recode the rest with a dynamic column identifier instead of benthic type to pick the desired aggregation
# method
# routine to calculate the percentage cover from the tape points per category. Column IDs should be
# specified outside the function, at the beginning of the script as flags. Flags will have to become
# function arguments if the set of scripts has to be turned into a package at some point, which it should
percentCoverCalc <- function(frameReplicate) {
subsetFrame <- frameReplicate[,1:2]
nOfPoints <- as.data.frame(colSums(table(subsetFrame)))
nOfPoints$Benthic.Type <- rownames(nOfPoints)
nOfPoints$Percentage.Cover <- nOfPoints[,1]*100/sum(nOfPoints[,1])
rownames(nOfPoints) <- 1:nrow(nOfPoints)
nOfPoints <- nOfPoints[,c(2,1,3)] # reorders the columns with type, % cover and points
colnames(nOfPoints) <- c("Type", "Points", "Cover")
return(nOfPoints)
}
pointsAndCover <- lapply(benthicTransectsComplete, percentCoverCalc)
# need to have all frames with the same levels. they have to be the sum of all the available levels
# first build a dummy frame with all the levels and 0 as entries for cover and points
dummyFrame <- as.data.frame(cbind(newLevels, rep(0, length(newLevels)), rep(0, length(newLevels))))
colnames(dummyFrame) <- names(pointsAndCover[[1]]) # rename the columns for consistency
# then append the dummy data frame at the end of each transect data frame. A bit of reorganization,
# column renaming and class manipulation is in the routine too to keep things as smooth as possible
pointsAndCoverComplete <- lapply(pointsAndCover, function (x) {
completeLevels <- as.data.frame(rbind(x, dummyFrame)) # append dummy frame
colnames(dummyFrame) <- names(dummyFrame) # rename columns as step above changes the names
completeLevels$Points <- as.numeric(as.character(completeLevels$Points)) # redefine class to numeric
completeLevels$Cover <- as.numeric(as.character(completeLevels$Cover)) # redefine class to numeric
# split-apply-combine routine follows
oneEntryList <- split(completeLevels, completeLevels$Type) # splits on the benthic type
oneEntryListAgg <- lapply(oneEntryList, function(y) {
z <- c(levels(factor(y[,1])), colSums(y[,2:3])) # sums points and cover per benthic type
return(z)}
)
oneEntry <- as.data.frame(abind(oneEntryListAgg, along = 0)) # recombines all of it into one frame
# polishing follows
colnames(oneEntry) <- names(dummyFrame)
oneEntry$Points <- as.numeric(as.character(oneEntry$Points))
oneEntry$Cover <- as.numeric(as.character(oneEntry$Cover))
return(oneEntry)
})
# adds a column with the corresponding replicate name to each dataframe
for (i in 1:length(pointsAndCoverComplete)) {
pointsAndCoverComplete[[i]]$Replicate <- rep(names(pointsAndCoverComplete[i]),
nrow(pointsAndCoverComplete[[i]]))
pointsAndCoverComplete[[i]]$Location <- substr(pointsAndCoverComplete[[i]]$Replicate,
1, nchar(pointsAndCoverComplete[[i]]$Replicate)-1)
}
# now the transect frames are correctly named, organized and the %cover is calculated.
# all the dataframes are merged together by row, renaming and reclassing follows
benthicData <- as.data.frame(abind(pointsAndCoverComplete, along = 1))
benthicData$Points <- as.numeric(as.character(benthicData$Points))
benthicData$Cover <- as.numeric(as.character(benthicData$Cover))
rownames(benthicData) <- 1:nrow(benthicData)
# now can do summary statistics over the replicate, mean and sd or whatever else.
# recursive split-apply-combine on the benthic type and the location
typeSplit <- split(benthicData, benthicData$Type)
locationSplit <- lapply(typeSplit, function(x) split(x, x$Location))
meanAndSdList <- lapply(locationSplit, function(x) {
lapply(x, function(y) {
means <- mean(y$Cover)
stdev <- sd(y$Cover)
meanSd <- data.frame(levels(factor(y$Type)), means, stdev, levels(factor(y$Location)))
return(meanSd)
})
})
# nested lists on the second dimension are remerged first on the benthic type factor...
meanAndSdTmp <- lapply(meanAndSdList, function(x) as.data.frame(abind(x, along = 1)))
# ... and then on the location factor. Usual renaming and reclassing follows
meanAndSd <- as.data.frame(abind(meanAndSdTmp, along = 1))
rownames(meanAndSd) <- 1:nrow(meanAndSd)
colnames(meanAndSd) <- c("Type", "Mean", "Sd", "Location")
meanAndSd$Mean <- as.numeric(as.character(meanAndSd$Mean))
meanAndSd$Sd <- as.numeric(as.character(meanAndSd$Sd))
# order factors of stations for plot: two steps required:
# first order the data frame according to the indices of the namesOfLocations object
meanAndSd <- meanAndSd[order(match(meanAndSd$Location, namesOfLocations)),] # easy peasy
# then assign unique values to the levels of the Location column to keep the order for the plot
meanAndSd$Location <- factor(meanAndSd$Location, levels = unique(meanAndSd$Location))
# abiotic types can be lumped into one single type. However, to do that I'd wait to see other datasets
# create column for coarse benthic type
levels(meanAndSd$Type)
coarse <- c("Abiotic", "Algae", "Ascidian", "Abiotic", "Abiotic", "Hard coral",
"Other", "Other", "Abiotic", "Abiotic", "Abiotic", "Abiotic", "Soft coral",
"Sponge", "Seagrass", "Unknown", "Abiotic")
meanAndSd$Coarse <- as.factor(rep(coarse, length(levels(meanAndSd$Location))))
# need to add the means or do this early on, not working this way
# plot
library(RColorBrewer)
par(mar = c(0, 4, 0, 0))
#display.brewer.all()
nOfColors <- length(levels(meanAndSd$Coarse))
getPalette <- colorRampPalette(brewer.pal(9, "BrBG"))
#myPalette <- doublePalette[seq(3,length(doublePalette),1)]
benthicMonitoring <- ggplot(meanAndSd, aes(x=Location, y=Mean, fill=Coarse))+
geom_bar(stat = "identity", width = .7)+
# geom_errorbar(data = buoy3Sampela,
# aes(ymax = Mean + StdErr, ymin = Mean - StdErr),
# width = .7)+
#scale_fill_grey(start = 0, end = 0.95)+
scale_fill_manual(values = getPalette(nOfColors))+
labs(y = "Average % cover")+
theme_bw()+
theme(panel.grid.minor = element_blank(),
panel.grid.major = element_blank())+
theme(plot.title = element_text(size=14, vjust=2))+
theme(axis.title.x = element_text(size=10,vjust=-0.5),
axis.title.y = element_text(size=10,vjust=0.5))+
theme(axis.text.x=element_text(size=10, angle = 45,
hjust = 1, vjust = .9))+
theme(axis.text.y=element_text(size=10))
benthicMonitoring
ggsave("/home/somros/Documents/R/exploratoryHoga/output/pics/benthicMonitoring.pdf", benthicMonitoring,
width=10, height=4, useDingbats=T)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prune.phylo.rank.R
\name{prune.phylo.rank}
\alias{prune.phylo.rank}
\title{Prune Phylogenies to Higher Ranks}
\usage{
prune.phylo.rank(phy, tax, rank = "genus")
}
\arguments{
\item{phy}{An object of class \code{\link[ape]{phylo}}.}
\item{tax}{A data frame containing taxonomic information for the tip labels
in \code{phy}.}
\item{rank}{A character string giving the name of a column (= taxonomic rank)
in \code{tax} to which \code{phy} will be pruned.}
}
\value{
An object of class \code{\link[ape]{phylo}}.
}
\description{
Prune tips of a certain taxonomic rank from an object of class
\code{\link[ape]{phylo}} and obtain a new \code{phylo} object whose tips
are of a higher rank.
}
\seealso{
\code{\link{addTips}} and \code{\link{addSingleTip}} to add terminal nodes to a phylogeny.
}
|
/man/prune.phylo.rank.Rd
|
no_license
|
heibl/megaptera
|
R
| false
| true
| 871
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prune.phylo.rank.R
\name{prune.phylo.rank}
\alias{prune.phylo.rank}
\title{Prune Phylogenies to Higher Ranks}
\usage{
prune.phylo.rank(phy, tax, rank = "genus")
}
\arguments{
\item{phy}{An object of class \code{\link[ape]{phylo}}.}
\item{tax}{A data frame containing taxonomic information for the tip labels
in \code{phy}.}
\item{rank}{A character string giving the name of a column (= taxonomic rank)
in \code{tax} to which \code{phy} will be pruned.}
}
\value{
An object of class \code{\link[ape]{phylo}}.
}
\description{
Prune tips of a certain taxonomic rank from an object of class
\code{\link[ape]{phylo}} and obtain a new \code{phylo} object whose tips
are of a higher rank.
}
\seealso{
\code{\link{addTips}} and \code{\link{addSingleTip}} to add terminal nodes to a phylogeny.
}
|
.DC_store
visualisations
*.xlsx
|
/.gitignore.R
|
no_license
|
charleaj/bti-tech-lab2
|
R
| false
| false
| 32
|
r
|
.DC_store
visualisations
*.xlsx
|
# Generate a figure to show how ROC curves originate from the distributions
rm(list=ls())
require(warningsignals)
null <- rnorm(1000, 5.5, 1) # Consider a -1, 1 range?
test <- rnorm(1000, 6.5, 1)
init.pow <- function(null, test){
pow <- vector("list", length=2)
class(pow) <- "pow"
dummy <- list(loglik=0, k=0)
class(dummy) <- "gauss"
pow$test <- dummy
pow$null <- dummy
pow$null_dist <- null
pow$test_dist <- test
pow
}
pow <- init.pow(null,test)
## ROC film
M <- 10 # frames
t <- seq(4,9,length=M) # sequence of thresholds
system("rm roc*.png roc.mp4")
roc_pts <- matrix(NA, nrow=2, ncol=M)
for(i in 1:M){
png(paste("roc_", i, ".png", sep=""),
width=400, height=800)
par(mfrow=c(2,1), mar=c(4,6,4,2))
roc_pts[,i] <- roc_fig(null, test, thresh=t[i],
xlab="Test Statistic", ylim=c(0,.54),
main="", legend=F, cex=2, cex.axis=2.,
cex.lab=3, color.line="black", lwd=6,
numeric_legend=T, cex.legend=2)
## Show points on the ROC Curve
plot(t(roc_pts), pch=19, cex=3, xlim=c(0,1),
ylim=c(0,1), ylab="True Positive",
xlab="False Positive", cex.lab=3, cex.axis=2.)
abline(v=roc_pts[1,i], col="blue", lwd=4, lty=2)
abline(h=roc_pts[2,i], col="red", lwd=4, lty=2)
## Show Curve
if(i==M) roc_curve(pow, add=TRUE, lwd=3)
dev.off()
}
system("ffmpeg -qscale 2 -r 2 -b 9600 -i roc_%d.png roc.mp4")
## Film showing ROC curve as distributions move apart
tests <- lapply(1:M, function(i) rnorm(1000, i*5/M+5.2, 1))
system("rm pow*.png pow.mp4")
for(i in 1:M){
png(paste("pow_", i, ".png", sep=""), width=400, height=800)
par(mfrow=c(2,1), mar=c(4,6,4,2))
pow <- init.pow(null,tests[[i]])
plot(pow, show_text=FALSE, cex.lab=3, cex.axis=2,
xlab="Test Statistic", xlim=c(2,12))
roc_curve(pow, cex=2, cex.lab=3, cex.axis=2, lwd=3)
dev.off()
}
system("ffmpeg -qscale 2 -r 2 -b 9600 -i pow_%d.png pow.mp4")
### Social report
require(socialR)
script <- "roc_figure.R"
gitaddr <- gitcommit(script) # ok to do last since quick-run script
tags="warningsignals, stochpop"
|
/demo/esa/roc_figure.R
|
no_license
|
cboettig/warningsignals
|
R
| false
| false
| 2,220
|
r
|
# Generate a figure to show how ROC curves originate from the distributions
rm(list=ls())
require(warningsignals)
null <- rnorm(1000, 5.5, 1) # Consider a -1, 1 range?
test <- rnorm(1000, 6.5, 1)
init.pow <- function(null, test){
pow <- vector("list", length=2)
class(pow) <- "pow"
dummy <- list(loglik=0, k=0)
class(dummy) <- "gauss"
pow$test <- dummy
pow$null <- dummy
pow$null_dist <- null
pow$test_dist <- test
pow
}
pow <- init.pow(null,test)
## ROC film
M <- 10 # frames
t <- seq(4,9,length=M) # sequence of thresholds
system("rm roc*.png roc.mp4")
roc_pts <- matrix(NA, nrow=2, ncol=M)
for(i in 1:M){
png(paste("roc_", i, ".png", sep=""),
width=400, height=800)
par(mfrow=c(2,1), mar=c(4,6,4,2))
roc_pts[,i] <- roc_fig(null, test, thresh=t[i],
xlab="Test Statistic", ylim=c(0,.54),
main="", legend=F, cex=2, cex.axis=2.,
cex.lab=3, color.line="black", lwd=6,
numeric_legend=T, cex.legend=2)
## Show points on the ROC Curve
plot(t(roc_pts), pch=19, cex=3, xlim=c(0,1),
ylim=c(0,1), ylab="True Positive",
xlab="False Positive", cex.lab=3, cex.axis=2.)
abline(v=roc_pts[1,i], col="blue", lwd=4, lty=2)
abline(h=roc_pts[2,i], col="red", lwd=4, lty=2)
## Show Curve
if(i==M) roc_curve(pow, add=TRUE, lwd=3)
dev.off()
}
system("ffmpeg -qscale 2 -r 2 -b 9600 -i roc_%d.png roc.mp4")
## Film showing ROC curve as distributions move apart
tests <- lapply(1:M, function(i) rnorm(1000, i*5/M+5.2, 1))
system("rm pow*.png pow.mp4")
for(i in 1:M){
png(paste("pow_", i, ".png", sep=""), width=400, height=800)
par(mfrow=c(2,1), mar=c(4,6,4,2))
pow <- init.pow(null,tests[[i]])
plot(pow, show_text=FALSE, cex.lab=3, cex.axis=2,
xlab="Test Statistic", xlim=c(2,12))
roc_curve(pow, cex=2, cex.lab=3, cex.axis=2, lwd=3)
dev.off()
}
system("ffmpeg -qscale 2 -r 2 -b 9600 -i pow_%d.png pow.mp4")
### Social report
require(socialR)
script <- "roc_figure.R"
gitaddr <- gitcommit(script) # ok to do last since quick-run script
tags="warningsignals, stochpop"
|
library( ReporteRs )
library( ggplot2)
# get data
source("data_pre_processing.R")
data <- data_pre_processing()
#################### visualisation of data analysis ############################
# Creation of mydoc, a mydocx object
html = bsdoc( title = 'Data_visualisation' )
html = addTitle(html, value = "Report", 1, par.properties = parCenter())
# add a title to the section
html = addTitle( html, value = "Relations between variables:", 2 )
# add a title for the following matrix
html = addParagraph( html, value = "Correlation matrix", par.properties = parCenter() )
# corrrelation matrix
matrix.cor <- cor(data[,.(date = as.numeric(date), orders, drivers_available)])
# add a correlation matrix into html
html = addFlexTable( html, vanilla.table(matrix.cor, add.rownames = TRUE), par.properties = parCenter() )
# add a paragraphe
html = addParagraph( html, value = "The table above shows that there is a relatively high correlation between 'date' and 'orders' and the
correlation between 'orders' and 'drivers_available' is not low. We assume that there might be a relationship
between 'date' and 'orders', 'drivers_available' and 'orders'. In order to make it more persuadable, we
display how they evoluate along each other as follows.",
par.properties = parCenter() )
# plot how orders evolute along time
p <- ggplot( data, aes(date, orders)) +
geom_line(colour = "blue") +
ggtitle("Orders evolution along time")
# add a plot into html
html = addPlot( html,
function() print(p),
width = 9, height = 7
)
# add a paragraph
html = addParagraph( html, value = "With the plot above, we can roughly estimate that a polynomial regression model can fit this dataset.
The curve is increasing slowly at the first beginning. After the July 2013, it starts growing significantly.
This way of growth looks like polynomial regression.
",
par.properties = parCenter() )
# plot how drivers_available evolute along time
p <- ggplot( data, aes(date, drivers_available)) +
geom_line(colour = "blue") +
ggtitle("Available drivers evolution along time")
# add a plot into html
html = addPlot( html,
function() print(p),
width = 9, height = 7
)
html = addParagraph( html, value = "Accoding to the plot above, we find out that 'drivers_available' keeps increasing along 'date'. If we
compare it with the plot representing the relation between 'orders' and 'date', we can approximately observe
that their variations are almost simultaneous. the variation of 'drivers_available' is smaller than the one
of 'orders'.
",
par.properties = parCenter() )
# orders evolution along available drivers
avg.dri <- data[, .(qty = mean(orders)), by = .(drivers_available)]
avg.dri <- avg.dri[order(drivers_available, decreasing = FALSE )]
p <- ggplot(avg.dri, aes(drivers_available, qty)) +
geom_point(colour = "blue") +
ggtitle("Orders evolution along available drivers") +
ylab("orders")
# add a plot into html
html = addPlot( html,
function() print(p),
width = 9, height = 9
)
html = addParagraph( html, value = "This graph demonstrates the relation between 'orders' and 'drivers_available'. From this graph, it seems that
the relationship can be explained by a linear regression model or a rapidly growing polynomial regression.
",
par.properties = parCenter() )
########################### Tuning model ############################
# add a title to the section
html = addTitle( html, value = "Construction of prediction model:", 2 )
html = addParagraph( html, value = "As what we have mentioned in the previous chapter, it seems that a polynomial regression can fit well.
So we start with a polynomial regression using 'date' and 'drivers_available' as independant varialbe,
and 'orders' as dependant variable.
",
par.properties = parCenter() )
# predict with polynomial regression eand plot
lr <- lm(orders ~ as.numeric(date) + I((as.numeric(date))^2) +
I((as.numeric(date))^3) + drivers_available +
I(drivers_available^2) + I(drivers_available^3) +
I(drivers_available^4) + I(drivers_available^5),
data)
# store primary predictions in data frame
data$pred <- lr$fitted.values
# calculate mean cost
cost <- sqrt(mean((data$pred - data$orders)^2))
p <- ggplot(data, aes(x=date)) +
geom_line(aes(y=orders, colour="Actual orders")) +
geom_line(aes(y=pred, colour="Prediction")) +
scale_colour_discrete("") +
ggtitle("Actual orders VS Forecast orders") +
ylab("orders")
# add a plot into html
html = addPlot( html,
function() print(p),
width = 9, height = 7
)
html = addParagraph(html, value = paste("Mean cost:", cost), par.properties = parCenter())
# add a paragraph
html = addParagraph( html, value = "According to the graph above, we can observe that our predictions fit roughly the actual orders.
But there are still some peaks of falls in red (actual orders). Due to this matter, we assume that these peaks
might be related to circumustances (the only varialble left). Therefore, a calculation of prediction errors is
done as follows: ",
par.properties = parCenter() )
# calculate average error by circumstance
err.circ <- data[, .(error_average = mean((orders - pred)/orders)), by=.(circumstance) ]
# add an error matrix into html
html = addFlexTable( html, vanilla.table(err.circ), par.properties = parCenter() )
# add a paragraph
html = addParagraph( html, value = "From this table, we find that the errors for dry and rainy circumstance are not huge but they are both
negative. In comparison, the one of very_rainy weather is positive and much larger (6 times) than them
and the error of strike circumstance is even larger (10 times) than them. Depending to this fact,
we assume that different circumstance should be processed differently.",
par.properties = parCenter() )
circs <- unique(data[, circumstance])
# add correlation matrix of each circumstance into report
for (i in 1:length(circs)) {
circ <- circs[i]
matrix.cor.tmp <- cor(data[circumstance==circ, .( date = as.numeric(date), orders, drivers_available)])
# add an error matrix into html
html = addParagraph( html, paste("Correlation matrix -", circ), par.properties = parCenter() )
html = addFlexTable( html, vanilla.table(matrix.cor.tmp, add.rownames = TRUE), par.properties = parCenter() )
}
# initialise a cost matrix
matrix.cost <- matrix(nrow=1, ncol=4, dimnames = list(c("cost"), circs))
list.lr <- list()
# predict with polynomial linear regression and plot for each circumstance
for (i in 1:length(circs)) {
circ <- circs[i]
lr <- lm(orders ~ as.numeric(date) + I((as.numeric(date))^2) +
I((as.numeric(date))^3) + drivers_available +
I(drivers_available^2) + I(drivers_available^3) +
I(drivers_available^4) + I(drivers_available^5),
data[circumstance==circ])
list.lr[[circ]] <- lr
# store primary predictions in data frame
data[circumstance==circ, pred1 := lr$fitted.values]
# calculate mean cost
cost <- sqrt(mean((data[circumstance==circ]$pred1 - data[circumstance==circ]$orders)^2))
matrix.cost["cost", circ] <- cost
}
# calculate mean cost
cost <- sqrt(mean((data$pred1 - data$orders)^2))
# plot actual orders and predictions
p <- ggplot(data, aes(x=date)) +
geom_line(aes(y=orders, colour="Actual orders")) +
geom_line(aes(y=pred1, colour="Prediction")) +
scale_colour_discrete("") +
ggtitle("Actual orders VS Forecast orders") +
ylab("orders")
# add a plot into html
html = addPlot( html,
function() print(p),
width = 9, height = 7
)
# plot errors of predictions
p <- ggplot( data, aes(date, (orders-pred1)^2)) +
geom_line(colour = "red") +
ylab("error") +
ggtitle("Errors evolution along time")
# add a plot into html
html = addPlot( html,
function() print(p),
width = 9, height = 7
)
html = addParagraph(html, value = "Mean cost per circumnstance", par.properties = parCenter())
html = addFlexTable( html, vanilla.table(matrix.cost), par.properties = parCenter() )
html = addParagraph(html, value = paste("Mean cost:", cost), par.properties = parCenter())
html = addParagraph(html,
value = "With constructing different models for different circumnstances, the cost is decreased. This means the performance
gets better. Therefore, we are constructing one model per circumstance and each model is based on polynomial
regression with two varialbles (date & drivers_avaiable). This model can be used for forecasting the orders of
future dates.",
par.properties = parCenter())
# write the doc
writeDoc( html, file = "examples/htmloutput/data_visualisation.html" )
|
/data_analysing.R
|
no_license
|
jinxing1/ravelin_challenge
|
R
| false
| false
| 9,748
|
r
|
library( ReporteRs )
library( ggplot2)
# get data
source("data_pre_processing.R")
data <- data_pre_processing()
#################### visualisation of data analysis ############################
# Creation of mydoc, a mydocx object
html = bsdoc( title = 'Data_visualisation' )
html = addTitle(html, value = "Report", 1, par.properties = parCenter())
# add a title to the section
html = addTitle( html, value = "Relations between variables:", 2 )
# add a title for the following matrix
html = addParagraph( html, value = "Correlation matrix", par.properties = parCenter() )
# corrrelation matrix
matrix.cor <- cor(data[,.(date = as.numeric(date), orders, drivers_available)])
# add a correlation matrix into html
html = addFlexTable( html, vanilla.table(matrix.cor, add.rownames = TRUE), par.properties = parCenter() )
# add a paragraphe
html = addParagraph( html, value = "The table above shows that there is a relatively high correlation between 'date' and 'orders' and the
correlation between 'orders' and 'drivers_available' is not low. We assume that there might be a relationship
between 'date' and 'orders', 'drivers_available' and 'orders'. In order to make it more persuadable, we
display how they evoluate along each other as follows.",
par.properties = parCenter() )
# plot how orders evolute along time
p <- ggplot( data, aes(date, orders)) +
geom_line(colour = "blue") +
ggtitle("Orders evolution along time")
# add a plot into html
html = addPlot( html,
function() print(p),
width = 9, height = 7
)
# add a paragraph
html = addParagraph( html, value = "With the plot above, we can roughly estimate that a polynomial regression model can fit this dataset.
The curve is increasing slowly at the first beginning. After the July 2013, it starts growing significantly.
This way of growth looks like polynomial regression.
",
par.properties = parCenter() )
# plot how drivers_available evolute along time
p <- ggplot( data, aes(date, drivers_available)) +
geom_line(colour = "blue") +
ggtitle("Available drivers evolution along time")
# add a plot into html
html = addPlot( html,
function() print(p),
width = 9, height = 7
)
html = addParagraph( html, value = "Accoding to the plot above, we find out that 'drivers_available' keeps increasing along 'date'. If we
compare it with the plot representing the relation between 'orders' and 'date', we can approximately observe
that their variations are almost simultaneous. the variation of 'drivers_available' is smaller than the one
of 'orders'.
",
par.properties = parCenter() )
# orders evolution along available drivers
avg.dri <- data[, .(qty = mean(orders)), by = .(drivers_available)]
avg.dri <- avg.dri[order(drivers_available, decreasing = FALSE )]
p <- ggplot(avg.dri, aes(drivers_available, qty)) +
geom_point(colour = "blue") +
ggtitle("Orders evolution along available drivers") +
ylab("orders")
# add a plot into html
html = addPlot( html,
function() print(p),
width = 9, height = 9
)
html = addParagraph( html, value = "This graph demonstrates the relation between 'orders' and 'drivers_available'. From this graph, it seems that
the relationship can be explained by a linear regression model or a rapidly growing polynomial regression.
",
par.properties = parCenter() )
########################### Tuning model ############################
# add a title to the section
html = addTitle( html, value = "Construction of prediction model:", 2 )
html = addParagraph( html, value = "As what we have mentioned in the previous chapter, it seems that a polynomial regression can fit well.
So we start with a polynomial regression using 'date' and 'drivers_available' as independant varialbe,
and 'orders' as dependant variable.
",
par.properties = parCenter() )
# predict with polynomial regression eand plot
lr <- lm(orders ~ as.numeric(date) + I((as.numeric(date))^2) +
I((as.numeric(date))^3) + drivers_available +
I(drivers_available^2) + I(drivers_available^3) +
I(drivers_available^4) + I(drivers_available^5),
data)
# store primary predictions in data frame
data$pred <- lr$fitted.values
# calculate mean cost
cost <- sqrt(mean((data$pred - data$orders)^2))
p <- ggplot(data, aes(x=date)) +
geom_line(aes(y=orders, colour="Actual orders")) +
geom_line(aes(y=pred, colour="Prediction")) +
scale_colour_discrete("") +
ggtitle("Actual orders VS Forecast orders") +
ylab("orders")
# add a plot into html
html = addPlot( html,
function() print(p),
width = 9, height = 7
)
html = addParagraph(html, value = paste("Mean cost:", cost), par.properties = parCenter())
# add a paragraph
html = addParagraph( html, value = "According to the graph above, we can observe that our predictions fit roughly the actual orders.
But there are still some peaks of falls in red (actual orders). Due to this matter, we assume that these peaks
might be related to circumustances (the only varialble left). Therefore, a calculation of prediction errors is
done as follows: ",
par.properties = parCenter() )
# calculate average error by circumstance
err.circ <- data[, .(error_average = mean((orders - pred)/orders)), by=.(circumstance) ]
# add an error matrix into html
html = addFlexTable( html, vanilla.table(err.circ), par.properties = parCenter() )
# add a paragraph
html = addParagraph( html, value = "From this table, we find that the errors for dry and rainy circumstance are not huge but they are both
negative. In comparison, the one of very_rainy weather is positive and much larger (6 times) than them
and the error of strike circumstance is even larger (10 times) than them. Depending to this fact,
we assume that different circumstance should be processed differently.",
par.properties = parCenter() )
circs <- unique(data[, circumstance])
# add correlation matrix of each circumstance into report
for (i in 1:length(circs)) {
circ <- circs[i]
matrix.cor.tmp <- cor(data[circumstance==circ, .( date = as.numeric(date), orders, drivers_available)])
# add an error matrix into html
html = addParagraph( html, paste("Correlation matrix -", circ), par.properties = parCenter() )
html = addFlexTable( html, vanilla.table(matrix.cor.tmp, add.rownames = TRUE), par.properties = parCenter() )
}
# initialise a cost matrix
matrix.cost <- matrix(nrow=1, ncol=4, dimnames = list(c("cost"), circs))
list.lr <- list()
# predict with polynomial linear regression and plot for each circumstance
for (i in 1:length(circs)) {
circ <- circs[i]
lr <- lm(orders ~ as.numeric(date) + I((as.numeric(date))^2) +
I((as.numeric(date))^3) + drivers_available +
I(drivers_available^2) + I(drivers_available^3) +
I(drivers_available^4) + I(drivers_available^5),
data[circumstance==circ])
list.lr[[circ]] <- lr
# store primary predictions in data frame
data[circumstance==circ, pred1 := lr$fitted.values]
# calculate mean cost
cost <- sqrt(mean((data[circumstance==circ]$pred1 - data[circumstance==circ]$orders)^2))
matrix.cost["cost", circ] <- cost
}
# calculate mean cost
cost <- sqrt(mean((data$pred1 - data$orders)^2))
# plot actual orders and predictions
p <- ggplot(data, aes(x=date)) +
geom_line(aes(y=orders, colour="Actual orders")) +
geom_line(aes(y=pred1, colour="Prediction")) +
scale_colour_discrete("") +
ggtitle("Actual orders VS Forecast orders") +
ylab("orders")
# add a plot into html
html = addPlot( html,
function() print(p),
width = 9, height = 7
)
# plot errors of predictions
p <- ggplot( data, aes(date, (orders-pred1)^2)) +
geom_line(colour = "red") +
ylab("error") +
ggtitle("Errors evolution along time")
# add a plot into html
html = addPlot( html,
function() print(p),
width = 9, height = 7
)
html = addParagraph(html, value = "Mean cost per circumnstance", par.properties = parCenter())
html = addFlexTable( html, vanilla.table(matrix.cost), par.properties = parCenter() )
html = addParagraph(html, value = paste("Mean cost:", cost), par.properties = parCenter())
html = addParagraph(html,
value = "With constructing different models for different circumnstances, the cost is decreased. This means the performance
gets better. Therefore, we are constructing one model per circumstance and each model is based on polynomial
regression with two varialbles (date & drivers_avaiable). This model can be used for forecasting the orders of
future dates.",
par.properties = parCenter())
# write the doc
writeDoc( html, file = "examples/htmloutput/data_visualisation.html" )
|
test_that("Type of elements dataset:", {
datasetToTest <- resTspToHour(analyse_crps_news(dl_data_from(as.numeric(as.POSIXct("2018-12-11 1:00:00 EST"))), c("BTC", "ETH"))
, c("BTC", "ETH"))
expect_is(datasetToTest$time, "POSIXct")
expect_is(datasetToTest$BTC, "numeric")
expect_is(datasetToTest$ETH, "numeric")
})
|
/CryptoShiny/tests/testthat/test-resTspToHour.R
|
permissive
|
souhailelaissaoui/Crypto-Trading-Tools
|
R
| false
| false
| 321
|
r
|
test_that("Type of elements dataset:", {
datasetToTest <- resTspToHour(analyse_crps_news(dl_data_from(as.numeric(as.POSIXct("2018-12-11 1:00:00 EST"))), c("BTC", "ETH"))
, c("BTC", "ETH"))
expect_is(datasetToTest$time, "POSIXct")
expect_is(datasetToTest$BTC, "numeric")
expect_is(datasetToTest$ETH, "numeric")
})
|
# 4. faza: Analiza podatkov
require(ggdendro)
#Napoved prihodkov od prodaje za panogo z največjimi prihodki za naslenjih 6 let
najboljsa <- melt(prva, Panoge.zap., id.vars =c("Panoga", "Leto"),
measure.vars = c("Prihodki_od_prodaje_v_tisoc_EUR", "Bruto_poslovni_presezek_v_tisoc_EUR", "Stevilo_zaposlenih"),
variable.name = "Spremenljivka", value.name = "Stevilo")
data1 <- subset(najboljsa, Spremenljivka == "Prihodki_od_prodaje_v_tisoc_EUR")
fit <- lm(data = data1, Stevilo ~ Leto)
l <- data.frame(Leto=seq(2005, 2023))
predict(fit, l)
napoved <- l %>% mutate(Stevilo=predict(fit, .))
lin1 <- ggplot(data1, aes(x=Leto,y=Stevilo/1e6)) +
geom_line() +
geom_point(data=napoved, aes(x=Leto, y=Stevilo/1e6), color="blue", size=3) +
geom_smooth(method = 'lm', se = FALSE) +
xlab("Leto") + ylab("Prihodki od prodaje v milijardah EUR")
#za shiny
Napoved <- function(data){
fit <- lm(data = data, Stevilo ~ Leto)
l <- data.frame(Leto=seq(2005, 2023))
predict(fit, l)
napoved <- l %>% mutate(Stevilo=predict(fit, .))
return(napoved)
}
#razvrscanje
podatki <- Regije1 %>% filter(Leto==2016) %>% select(Regija, Stevilo_oseb)
regija <- podatki$Regija
row.names(podatki) <- podatki$Regija
podatki$Regija <- NULL
#prilagoditev
lestvica <- scale(podatki)
#matrika različnosti (class: dist)
matrika <- dist(lestvica)
#hierarhično razvrščanje v 3 skupine
n <- 3
skupina <- hclust(matrika)
#izris
ggdendrogram(skupina, rotate = FALSE, size = 2)
#dolocimo razvrstitev skupina
skupina <- hclust(matrika) %>% cutree(n)
skupine <- data.frame(regija, skupina)
skupine <- remove_rownames(skupine)
#katere regije so v skupini 2
skupine %>% filter(skupina==2) %>% select(regija)
zemljevid3 <- ggplot(left_join(zemljevid, skupine, by = c("NAME_1"='regija'))) +
geom_polygon(aes(x = long, y = lat, group = group, fill = skupina)) +
labs(fill="Lestvica") +
ggtitle("Razvrščanje glede na število oseb, ki delajo")
|
/analiza/analiza.r
|
permissive
|
melamalej/APPR-2018-19
|
R
| false
| false
| 1,976
|
r
|
# 4. faza: Analiza podatkov
require(ggdendro)
#Napoved prihodkov od prodaje za panogo z največjimi prihodki za naslenjih 6 let
najboljsa <- melt(prva, Panoge.zap., id.vars =c("Panoga", "Leto"),
measure.vars = c("Prihodki_od_prodaje_v_tisoc_EUR", "Bruto_poslovni_presezek_v_tisoc_EUR", "Stevilo_zaposlenih"),
variable.name = "Spremenljivka", value.name = "Stevilo")
data1 <- subset(najboljsa, Spremenljivka == "Prihodki_od_prodaje_v_tisoc_EUR")
fit <- lm(data = data1, Stevilo ~ Leto)
l <- data.frame(Leto=seq(2005, 2023))
predict(fit, l)
napoved <- l %>% mutate(Stevilo=predict(fit, .))
lin1 <- ggplot(data1, aes(x=Leto,y=Stevilo/1e6)) +
geom_line() +
geom_point(data=napoved, aes(x=Leto, y=Stevilo/1e6), color="blue", size=3) +
geom_smooth(method = 'lm', se = FALSE) +
xlab("Leto") + ylab("Prihodki od prodaje v milijardah EUR")
#za shiny
Napoved <- function(data){
fit <- lm(data = data, Stevilo ~ Leto)
l <- data.frame(Leto=seq(2005, 2023))
predict(fit, l)
napoved <- l %>% mutate(Stevilo=predict(fit, .))
return(napoved)
}
#razvrscanje
podatki <- Regije1 %>% filter(Leto==2016) %>% select(Regija, Stevilo_oseb)
regija <- podatki$Regija
row.names(podatki) <- podatki$Regija
podatki$Regija <- NULL
#prilagoditev
lestvica <- scale(podatki)
#matrika različnosti (class: dist)
matrika <- dist(lestvica)
#hierarhično razvrščanje v 3 skupine
n <- 3
skupina <- hclust(matrika)
#izris
ggdendrogram(skupina, rotate = FALSE, size = 2)
#dolocimo razvrstitev skupina
skupina <- hclust(matrika) %>% cutree(n)
skupine <- data.frame(regija, skupina)
skupine <- remove_rownames(skupine)
#katere regije so v skupini 2
skupine %>% filter(skupina==2) %>% select(regija)
zemljevid3 <- ggplot(left_join(zemljevid, skupine, by = c("NAME_1"='regija'))) +
geom_polygon(aes(x = long, y = lat, group = group, fill = skupina)) +
labs(fill="Lestvica") +
ggtitle("Razvrščanje glede na število oseb, ki delajo")
|
library(tidyverse)
library(data.table)
library(randomForest)
library(mice)
library(mlr)
source("na_replace.R")
# Custom Function ---------------------------------------------------------
dummify <- function(data){
# Create Features
features <- colnames(data)
for(f in features) {
if ((class(data[[f]])=="factor") || (class(data[[f]])=="character")) {
levels <- unique(data[[f]])
data[[f]] <- as.numeric(factor(data[[f]], levels=levels))
}
}
return(data)
}
# Loading Data ------------------------------------------------------------
application_train <- fread("data/application_train.csv") %>%
mutate(SK_ID_CURR = as.character(SK_ID_CURR)) %>%
mutate(TARGET = as.factor(TARGET)) %>%
na.omit() %>%
data.table()
application_test <- fread("data/application_test.csv") %>%
mutate(SK_ID_CURR = as.character(SK_ID_CURR)) %>%
na.omit() %>%
data.table()
prediction <- rep(0, nrow(application_test))
n_ensemble <- 250
prop_features <- 0.4
prop_rows <- 0.2
# Feature Selection -----------------------------
faceless_train <- application_train %>%
select(-TARGET, -SK_ID_CURR) %>%
dummify() %>%
data.table()
faceless_test <- application_test %>%
select(-SK_ID_CURR) %>%
dummify() %>%
data.table()
face_test <- application_test %>%
select(SK_ID_CURR)
face_train <- application_train %>%
select(TARGET)
for(i in n_ensemble){
features_num <- sample(1:length(faceless_train), length(faceless_train)*prop_features, replace = FALSE)
to_train <- faceless_train[features_num] %>% cbind(face_train) %>% sample_n(nrow(faceless_train)*prop_rows)%>% data.table()
to_test <- faceless_test[features_num] %>% cbind(face_test) %>% data.table()
formula = TARGET ~ .
fit <- fit <- glm(formula = TARGET ~.,
data = to_train,
na.action = na.omit,
family = binomial)
partial_prediction <- as.numeric(predict(fit, to_test, type = "response"))
prediction <- prediction + partial_prediction
}
solution <- data.frame(id = to_test$SK_ID_CURR, prediction = prediction/n_ensemble)
|
/auto_predict.R
|
no_license
|
aljrico/kaggle_hcdr
|
R
| false
| false
| 2,053
|
r
|
library(tidyverse)
library(data.table)
library(randomForest)
library(mice)
library(mlr)
source("na_replace.R")
# Custom Function ---------------------------------------------------------
dummify <- function(data){
# Create Features
features <- colnames(data)
for(f in features) {
if ((class(data[[f]])=="factor") || (class(data[[f]])=="character")) {
levels <- unique(data[[f]])
data[[f]] <- as.numeric(factor(data[[f]], levels=levels))
}
}
return(data)
}
# Loading Data ------------------------------------------------------------
application_train <- fread("data/application_train.csv") %>%
mutate(SK_ID_CURR = as.character(SK_ID_CURR)) %>%
mutate(TARGET = as.factor(TARGET)) %>%
na.omit() %>%
data.table()
application_test <- fread("data/application_test.csv") %>%
mutate(SK_ID_CURR = as.character(SK_ID_CURR)) %>%
na.omit() %>%
data.table()
prediction <- rep(0, nrow(application_test))
n_ensemble <- 250
prop_features <- 0.4
prop_rows <- 0.2
# Feature Selection -----------------------------
faceless_train <- application_train %>%
select(-TARGET, -SK_ID_CURR) %>%
dummify() %>%
data.table()
faceless_test <- application_test %>%
select(-SK_ID_CURR) %>%
dummify() %>%
data.table()
face_test <- application_test %>%
select(SK_ID_CURR)
face_train <- application_train %>%
select(TARGET)
for(i in n_ensemble){
features_num <- sample(1:length(faceless_train), length(faceless_train)*prop_features, replace = FALSE)
to_train <- faceless_train[features_num] %>% cbind(face_train) %>% sample_n(nrow(faceless_train)*prop_rows)%>% data.table()
to_test <- faceless_test[features_num] %>% cbind(face_test) %>% data.table()
formula = TARGET ~ .
fit <- fit <- glm(formula = TARGET ~.,
data = to_train,
na.action = na.omit,
family = binomial)
partial_prediction <- as.numeric(predict(fit, to_test, type = "response"))
prediction <- prediction + partial_prediction
}
solution <- data.frame(id = to_test$SK_ID_CURR, prediction = prediction/n_ensemble)
|
testlist <- list(a = 0L, b = 0L, x = c(-121423L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67371009L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610131256-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 670
|
r
|
testlist <- list(a = 0L, b = 0L, x = c(-121423L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67372037L, -67371009L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
#Library for RNAseq analysis
#05-15
#Christian Meyer
#Contact christian.t.meyer@vanderbilt.edu
#Functions and libraries to load initially
#location of Data:
dataLoc = "~/Documents/Lab/WGCNA_analysis/GSE58135_RAW/FPKM"
#location of MRSR report
MRSRreport = ""
#location of
##Initial library loading and workspace configuration
# Please adapt the following path. Note that we use / instead of \ in the path.
setwd("~/Documents/Lab/WGCNA_analysis/GSE58135_RAW/FPKM")
# read in the R libraries
library(MASS) # standard, no need to install
library(class) # standard, no need to install
library(cluster)
library(impute)# install it for imputing missing value
library(Hmisc) # install it for the C-index calculations
library(survival)
library(dendextend)
library(WGCNA)
library(MultiRankSeq)
library(R.utils)
library(matrixStats)
library(pracma)
library(extremevalues)
allowWGCNAThreads()
options(stringsAsFactors=F)
##Functions for getting Filenames and reading in Data
#Read in data fileName is the name of each file type
fileName <- function(file_names) {
toKeep <- gregexpr("_", file_names) #Just keep the GSE... part of file name as identifier
name <- substr(file_names,1,toKeep[[1]][2]-1)
return(name)
}
#Read in files in directory between init and fin
getData <- function(init,fin,file_names,sample_names) {
list_dataset = list(FPKM=NULL, CI=NULL)
for(i in init:fin) {
if (i == init) {
temp_dataset <- read.delim(file_names[i])
#Change depending on what kind of name you want for the rows
#rnames <- paste0(temp_dataset[[4]],'_',temp_dataset[[5]])
rnames <- temp_dataset[[4]]
list_dataset[[1]] <- cbind(temp_dataset[[10]])
list_dataset[[2]] <- cbind(temp_dataset[[12]] - temp_dataset[[11]])
rownames(list_dataset[[1]]) <- rnames
rownames(list_dataset[[2]]) <- rnames
}
if (i != init){
temp_dataset <- read.delim(file_names[i])
list_dataset[[1]] <- cbind(list_dataset[[1]],temp_dataset[[10]])
list_dataset[[2]] <- cbind(list_dataset[[2]],temp_dataset[[12]] - temp_dataset[[11]])
}
}
colnames(list_dataset[[1]]) <- sample_names[init:fin]
colnames(list_dataset[[2]]) <- sample_names[init:fin]
return(list_dataset)
}
#Get the gene short names from the dataset.
geneShort <- function(file_names){
temp_dataset <- read.delim(file_names[3])
gene_shortName = temp_dataset[[5]]
return(gene_shortName)
}
#Functions to measure time in R similary to matlab execution
tic <- 1
class(tic) <- "tic"
toc <- 1
class(toc) <- "toc"
print.tic <- function(x,...) {
if (!exists("proc.time"))
stop("cannot measure time")
gc(FALSE)
assign(".temp.tictime", proc.time(), envir = .GlobalEnv)
}
print.toc <- function(x,...) {
if (!exists(".temp.tictime", envir = .GlobalEnv))
stop("Did you tic?")
time <- get(".temp.tictime", envir = .GlobalEnv)
rm(".temp.tictime", envir = .GlobalEnv)
print(res <- structure(proc.time() - time,
class = "proc_time"), ...)
invisible(res)
}
|
/Lib_RNAseq.R
|
no_license
|
meyerct1/LabCode
|
R
| false
| false
| 3,021
|
r
|
#Library for RNAseq analysis
#05-15
#Christian Meyer
#Contact christian.t.meyer@vanderbilt.edu
#Functions and libraries to load initially
#location of Data:
dataLoc = "~/Documents/Lab/WGCNA_analysis/GSE58135_RAW/FPKM"
#location of MRSR report
MRSRreport = ""
#location of
##Initial library loading and workspace configuration
# Please adapt the following path. Note that we use / instead of \ in the path.
setwd("~/Documents/Lab/WGCNA_analysis/GSE58135_RAW/FPKM")
# read in the R libraries
library(MASS) # standard, no need to install
library(class) # standard, no need to install
library(cluster)
library(impute)# install it for imputing missing value
library(Hmisc) # install it for the C-index calculations
library(survival)
library(dendextend)
library(WGCNA)
library(MultiRankSeq)
library(R.utils)
library(matrixStats)
library(pracma)
library(extremevalues)
allowWGCNAThreads()
options(stringsAsFactors=F)
##Functions for getting Filenames and reading in Data
#Read in data fileName is the name of each file type
fileName <- function(file_names) {
toKeep <- gregexpr("_", file_names) #Just keep the GSE... part of file name as identifier
name <- substr(file_names,1,toKeep[[1]][2]-1)
return(name)
}
#Read in files in directory between init and fin
getData <- function(init,fin,file_names,sample_names) {
list_dataset = list(FPKM=NULL, CI=NULL)
for(i in init:fin) {
if (i == init) {
temp_dataset <- read.delim(file_names[i])
#Change depending on what kind of name you want for the rows
#rnames <- paste0(temp_dataset[[4]],'_',temp_dataset[[5]])
rnames <- temp_dataset[[4]]
list_dataset[[1]] <- cbind(temp_dataset[[10]])
list_dataset[[2]] <- cbind(temp_dataset[[12]] - temp_dataset[[11]])
rownames(list_dataset[[1]]) <- rnames
rownames(list_dataset[[2]]) <- rnames
}
if (i != init){
temp_dataset <- read.delim(file_names[i])
list_dataset[[1]] <- cbind(list_dataset[[1]],temp_dataset[[10]])
list_dataset[[2]] <- cbind(list_dataset[[2]],temp_dataset[[12]] - temp_dataset[[11]])
}
}
colnames(list_dataset[[1]]) <- sample_names[init:fin]
colnames(list_dataset[[2]]) <- sample_names[init:fin]
return(list_dataset)
}
#Get the gene short names from the dataset.
geneShort <- function(file_names){
temp_dataset <- read.delim(file_names[3])
gene_shortName = temp_dataset[[5]]
return(gene_shortName)
}
#Functions to measure time in R similary to matlab execution
tic <- 1
class(tic) <- "tic"
toc <- 1
class(toc) <- "toc"
print.tic <- function(x,...) {
if (!exists("proc.time"))
stop("cannot measure time")
gc(FALSE)
assign(".temp.tictime", proc.time(), envir = .GlobalEnv)
}
print.toc <- function(x,...) {
if (!exists(".temp.tictime", envir = .GlobalEnv))
stop("Did you tic?")
time <- get(".temp.tictime", envir = .GlobalEnv)
rm(".temp.tictime", envir = .GlobalEnv)
print(res <- structure(proc.time() - time,
class = "proc_time"), ...)
invisible(res)
}
|
library(gamlss)
### Name: gamlss
### Title: Generalized Additive Models for Location Scale and Shape
### Aliases: gamlss is.gamlss gamlssNews
### Keywords: regression
### ** Examples
data(abdom)
mod<-gamlss(y~pb(x),sigma.fo=~pb(x),family=BCT, data=abdom, method=mixed(1,20))
plot(mod)
rm(mod)
|
/data/genthat_extracted_code/gamlss/examples/gamlss.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 301
|
r
|
library(gamlss)
### Name: gamlss
### Title: Generalized Additive Models for Location Scale and Shape
### Aliases: gamlss is.gamlss gamlssNews
### Keywords: regression
### ** Examples
data(abdom)
mod<-gamlss(y~pb(x),sigma.fo=~pb(x),family=BCT, data=abdom, method=mixed(1,20))
plot(mod)
rm(mod)
|
library(raster)
library(rgdal)
landsat<- stack("Landsat7.tif")
landsat
plotRGB(landsat, r = 1, g = 2, b = 3, alpha=80,stretch="hist")
library(shiny)
ui <- fluidPage(
sliderInput(inputId = "num",
label = "Transparency",
value = 100, min = 1, max = 255),
sliderInput(inputId = "red",
label = "Red",
value = 1, min = 1, max = 3),
sliderInput(inputId = "green",
label = "Green",
value = 2, min = 1, max = 3),
sliderInput(inputId = "blue",
label = "Blue",
value = 3, min = 1, max = 3),
selectInput('stretch', 'Stretch', c("hist","lin")),
plotOutput("plot1")
)
server <- function(input, output) {
output$plot1 <- renderPlot({landsat <- stack("Landsat7.tif")
plotRGB(landsat, r = input$red, g = input$green, b = input$blue,alpha=input$num,stretch=input$stretch)
})
}
shinyApp(ui = ui, server = server)
|
/Assignment Spatial R/Shiny.R
|
no_license
|
QiZhiqi/262
|
R
| false
| false
| 1,005
|
r
|
library(raster)
library(rgdal)
landsat<- stack("Landsat7.tif")
landsat
plotRGB(landsat, r = 1, g = 2, b = 3, alpha=80,stretch="hist")
library(shiny)
ui <- fluidPage(
sliderInput(inputId = "num",
label = "Transparency",
value = 100, min = 1, max = 255),
sliderInput(inputId = "red",
label = "Red",
value = 1, min = 1, max = 3),
sliderInput(inputId = "green",
label = "Green",
value = 2, min = 1, max = 3),
sliderInput(inputId = "blue",
label = "Blue",
value = 3, min = 1, max = 3),
selectInput('stretch', 'Stretch', c("hist","lin")),
plotOutput("plot1")
)
server <- function(input, output) {
output$plot1 <- renderPlot({landsat <- stack("Landsat7.tif")
plotRGB(landsat, r = input$red, g = input$green, b = input$blue,alpha=input$num,stretch=input$stretch)
})
}
shinyApp(ui = ui, server = server)
|
#!/usr/bin/env Rscript
# Verilog sin ROM
n <- 32
m <- 8-1
cat(paste("reg [", m,":0" ,"] sin [0:",n-1,"];\n", sep=""))
cat("initial begin\n")
a <- floor((1+sin((0:n)*2*pi/n))*(2^m-1))
if (T) { # Hex
for (i in 1:(length(a)-1)){
cat(paste(" sin[",i-1 ,"] = ", 2, "'h",
format(as.hexmode(a[i+1]),upper.case=T, width=2, flag="0"),
sep=''), ';\n',sep='')
}
} else { # Binary
for (i in 1:(length(a)-1)){
cat(paste(" sin[",i-1 ,"] = ", 8, "'b",
substr(paste(rev(as.integer(intToBits(a[i+1]))), collapse=''),
n-8+1,n), ";\n", sep=""))
}
}
if (F){ # Aggregate requires SystemVerilog
cat(paste(" sin = {", sep=""))
for (i in 1:(length(a)-1)){
if (((i-1) %% 8 == 0) && (i<(length(a)-1)) ){
cat("\n ")
}
cat(paste(2, "'h",
format(as.hexmode(a[i+1]),upper.case=T, width=2, flag="0"),
sep=''),sep='')
if (i<(length(a)-1)){
cat(", ")
}
}
cat("};\n")
}
cat("end\n")
|
/sin.r
|
no_license
|
humdeum/spinv
|
R
| false
| false
| 903
|
r
|
#!/usr/bin/env Rscript
# Verilog sin ROM
n <- 32
m <- 8-1
cat(paste("reg [", m,":0" ,"] sin [0:",n-1,"];\n", sep=""))
cat("initial begin\n")
a <- floor((1+sin((0:n)*2*pi/n))*(2^m-1))
if (T) { # Hex
for (i in 1:(length(a)-1)){
cat(paste(" sin[",i-1 ,"] = ", 2, "'h",
format(as.hexmode(a[i+1]),upper.case=T, width=2, flag="0"),
sep=''), ';\n',sep='')
}
} else { # Binary
for (i in 1:(length(a)-1)){
cat(paste(" sin[",i-1 ,"] = ", 8, "'b",
substr(paste(rev(as.integer(intToBits(a[i+1]))), collapse=''),
n-8+1,n), ";\n", sep=""))
}
}
if (F){ # Aggregate requires SystemVerilog
cat(paste(" sin = {", sep=""))
for (i in 1:(length(a)-1)){
if (((i-1) %% 8 == 0) && (i<(length(a)-1)) ){
cat("\n ")
}
cat(paste(2, "'h",
format(as.hexmode(a[i+1]),upper.case=T, width=2, flag="0"),
sep=''),sep='')
if (i<(length(a)-1)){
cat(", ")
}
}
cat("};\n")
}
cat("end\n")
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{griffun}
\alias{griffun}
\alias{griffun-package}
\title{My library of functions}
\description{
My library of functions
}
|
/man/griffun.Rd
|
no_license
|
SathishN/griffun
|
R
| false
| false
| 199
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{griffun}
\alias{griffun}
\alias{griffun-package}
\title{My library of functions}
\description{
My library of functions
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/get_rcards.R
\name{get_rcards}
\alias{get_rcards}
\title{get_rcards()}
\usage{
get_rcards(file, format_type = "format1")
}
\arguments{
\item{file}{The name of the file which the data are to be read from (including the .pdf extension). read_pdf that the file to be read from is in the working directory}
\item{format_type}{character: Identify the report card format that is used as input}
}
\description{
This function takes a pdf of studetns report card as input (vpdf), and returns the information in table format
}
\examples{
get_rcards('my_pdf_file.pdf')
}
\keyword{file}
|
/man/get_rcards.Rd
|
no_license
|
thelayc/laycReportCards
|
R
| false
| false
| 663
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/get_rcards.R
\name{get_rcards}
\alias{get_rcards}
\title{get_rcards()}
\usage{
get_rcards(file, format_type = "format1")
}
\arguments{
\item{file}{The name of the file which the data are to be read from (including the .pdf extension). read_pdf that the file to be read from is in the working directory}
\item{format_type}{character: Identify the report card format that is used as input}
}
\description{
This function takes a pdf of studetns report card as input (vpdf), and returns the information in table format
}
\examples{
get_rcards('my_pdf_file.pdf')
}
\keyword{file}
|
library(here)
library(prophet)
library(tidyverse)
# load(here("data", "tidy_data", "BBDD_Germany_20180619.RData"))
# BBDD_Germany <- BBDD_Germany %>% as.tibble()
#
# tb_data <- BBDD_Germany %>%
# transmute(ds = fecha,
# y = Reservas_Ocio_Berlin)
load(here("data", "tidy_data", "all_V3.RData"))
rm(xts_daily_base, tb_weekly_base, xts_weekly_base); gc()
tb_data <- tb_daily_base %>%
select(Fecha, trafico_web_sesiones_totales,
contains("sales"),
goog_trends, temp_mn, otros_precio_medio,
eco_sent, unemp_rate,
tiendas_numero_de_tiendas,
winter_sales, summer_sales,
w1_winter_sales, w1_summer_sales,
black_friday_2016, black_friday_2017) %>%
rename(ds = Fecha,
y = trafico_web_sesiones_totales)
# QUICK -------------------------------------------------------------------
# Fit the model
m <- prophet(tb_data)
# CV
# tb_data_cv <- cross_validation(m, initial = 53, period = 4, horizon = 52, units = 'weeks')
# tb_data_cv <- cross_validation(m, horizon = 30, units = 'days')
tb_data_cv <- cross_validation(m, initial = 365, horizon = 30, units = 'days')
head(tb_data_cv)
tb_data_cv %>% select(cutoff) %>% unique
tb_data_cv %>% group_by(cutoff) %>% summarise(n = n())
tb_data_p <- performance_metrics(tb_data_cv)
head(tb_data_p)
tb_data_p_by_h <- tb_data_p %>% group_by(horizon) %>% summarise_all(mean)
with(tb_data_p_by_h,
plot(horizon, mse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, rmse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mae, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mape, type = "l"))
with(tb_data_p_by_h,
plot(horizon, coverage, type = "l"))
plot_cross_validation_metric(tb_data_cv, metric = 'mape')
# Make predictions
future <- make_future_dataframe(m, periods = 365)
forecast <- predict(m, future)
# Plots
plot(m, forecast)
prophet_plot_components(m, forecast)
dyplot.prophet(m, forecast)
# TREND CHANGEPOINTS ------------------------------------------------------
plot(m, forecast) + add_changepoints_to_plot(m)
# MORE flexibility
m <- prophet(tb_data, changepoint.prior.scale = 0.5)
tb_data_cv <- cross_validation(m, initial = 365, horizon = 30, units = 'days')
head(tb_data_cv)
tb_data_cv %>% select(cutoff) %>% unique
tb_data_cv %>% group_by(cutoff) %>% summarise(n = n())
tb_data_p <- performance_metrics(tb_data_cv)
head(tb_data_p)
tb_data_p_by_h <- tb_data_p %>% group_by(horizon) %>% summarise_all(mean)
with(tb_data_p_by_h,
plot(horizon, mse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, rmse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mae, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mape, type = "l"))
with(tb_data_p_by_h,
plot(horizon, coverage, type = "l"))
plot_cross_validation_metric(tb_data_cv, metric = 'mape')
forecast <- predict(m, future)
plot(m, forecast) + add_changepoints_to_plot(m)
# SEASONALITIES -----------------------------------------------------------
# m <- prophet(weekly.seasonality=FALSE)
m <- prophet()
m <- add_seasonality(m, name='monthly', period=30.5, fourier.order=5)
m <- fit.prophet(m, tb_data)
tb_data_cv <- cross_validation(m, initial = 365, horizon = 30, units = 'days')
head(tb_data_cv)
tb_data_cv %>% select(cutoff) %>% unique
tb_data_cv %>% group_by(cutoff) %>% summarise(n = n())
tb_data_p <- performance_metrics(tb_data_cv)
head(tb_data_p)
tb_data_p_by_h <- tb_data_p %>% group_by(horizon) %>% summarise_all(mean)
with(tb_data_p_by_h,
plot(horizon, mse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, rmse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mae, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mape, type = "l"))
with(tb_data_p_by_h,
plot(horizon, coverage, type = "l"))
plot_cross_validation_metric(tb_data_cv, metric = 'mape')
forecast <- predict(m, future)
plot(m, forecast)
prophet_plot_components(m, forecast)
# REGRESSORS --------------------------------------------------------------
m <- prophet()
# m <- add_seasonality(m, name='monthly_1', period=365, fourier.order=10)
# m <- add_seasonality(m, name='monthly_2', period=30.5, fourier.order=10)
m <- add_regressor(m, "goog_trends")
m <- add_regressor(m, "temp_mn")
m <- add_regressor(m, "otros_precio_medio")
m <- add_regressor(m, "unemp_rate")
m <- add_regressor(m, "tiendas_numero_de_tiendas")
m <- add_regressor(m, "w1_winter_sales")
m <- add_regressor(m, "w1_summer_sales")
# m <- add_regressor(m, "winter_sales")
# m <- add_regressor(m, "summer_sales")
m <- add_regressor(m, "black_friday_2016")
m <- add_regressor(m, "black_friday_2017")
m <- fit.prophet(m, tb_data)
tb_data_cv <- cross_validation(m, initial = 365, horizon = 30, units = 'days')
head(tb_data_cv)
tb_data_cv %>% select(cutoff) %>% unique
tb_data_cv %>% group_by(cutoff) %>% summarise(n = n())
tb_data_p <- performance_metrics(tb_data_cv)
head(tb_data_p)
tb_data_p_by_h <- tb_data_p %>% group_by(horizon) %>% summarise_all(mean)
with(tb_data_p_by_h,
plot(horizon, mse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, rmse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mae, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mape, type = "l"))
with(tb_data_p_by_h,
plot(horizon, coverage, type = "l"))
plot_cross_validation_metric(tb_data_cv, metric = 'mape')
forecast <- predict(m, tb_data)
plot(m, forecast)
prophet_plot_components(m, forecast)
dyplot.prophet(m, forecast)
aux <- forecast %>% select(ds, yhat) %>%
left_join(tb_data %>% select(ds, y) %>%
mutate(ds = as.POSIXct(ds))) %>%
mutate(residuals = y - yhat)
Metrics::mape(aux$y, aux$yhat)
r2 <- (1 - crossprod(aux$residuals) / crossprod(aux$y - mean(aux$y))) %>%
as.numeric()
r2
# ALL ---------------------------------------------------------------------
m <- prophet(changepoint.prior.scale = 0.5)
m <- add_seasonality(m, name='monthly', period=30.5, fourier.order=5)
m <- add_regressor(m, "goog_trends")
m <- add_regressor(m, "temp_mn")
m <- add_regressor(m, "otros_precio_medio")
m <- add_regressor(m, "unemp_rate")
m <- add_regressor(m, "tiendas_numero_de_tiendas")
m <- add_regressor(m, "w1_winter_sales")
m <- add_regressor(m, "w1_summer_sales")
m <- add_regressor(m, "black_friday_2016")
m <- add_regressor(m, "black_friday_2017")
m <- fit.prophet(m, tb_data)
tb_data_cv <- cross_validation(m, initial = 365, horizon = 30, units = 'days')
head(tb_data_cv)
tb_data_cv %>% select(cutoff) %>% unique
tb_data_cv %>% group_by(cutoff) %>% summarise(n = n())
tb_data_p <- performance_metrics(tb_data_cv)
head(tb_data_p)
tb_data_p_by_h <- tb_data_p %>% group_by(horizon) %>% summarise_all(mean)
with(tb_data_p_by_h,
plot(horizon, mse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, rmse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mae, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mape, type = "l"))
with(tb_data_p_by_h,
plot(horizon, coverage, type = "l"))
plot_cross_validation_metric(tb_data_cv, metric = 'mape')
forecast <- predict(m, tb_data)
plot(m, forecast)
prophet_plot_components(m, forecast)
dyplot.prophet(m, forecast)
aux <- forecast %>% select(ds, yhat) %>%
left_join(tb_data %>% select(ds, y) %>%
mutate(ds = as.POSIXct(ds))) %>%
mutate(residuals = y - yhat)
Metrics::mape(aux$y, aux$yhat)
r2 <- (1 - crossprod(aux$residuals) / crossprod(aux$y - mean(aux$y))) %>%
as.numeric()
r2
# CONTRIBUTIONS -----------------------------------------------------------
m_aportes <- forecast %>% as.tibble() %>%
select(-ends_with("_lower"), -ends_with("_upper"), -ends_with("_terms"),
-starts_with("extra_regressors_"), -yhat)
Conento::aportes_ag(m_aportes)
Conento::dibuja_areas(m_aportes %>% mutate(ds = as.Date(ds)))
# CORES -------------------------------------------------------------------
library(tidyverse)
library(lubridate)
library(prophet)
library(readxl)
URL_CORES <- "https://www.cores.es/sites/default/files/archivos/estadisticas/consumos-pp.xlsx"
get_cores_data <- function(sheet = "Gasolinas") {
#
# Esta función se baja de la web de CORES el archivo excel con los consumos
# nacionales mensuales de productos petrolíferos
#
# PARAMETERS
# - sheet: char("Gasolinas", "Gasoleos") - hoja del libro excel cuyos datos
# se desea obtener
#
# RETURN
# - Tibble con <fecha> (ajustada al dia fina de mes) y una columna para el
# consumo mensual (en Tn) por producto
destfile <- paste0(tempfile(), ".xlsx")
curl::curl_download(URL_CORES, destfile)
consumos_pp <- read_excel(destfile, sheet = sheet, skip = 5) %>%
janitor::clean_names() %>%
filter(!is.na(mes)) %>%
filter(mes != "total") %>%
mutate(dia = 1) %>%
unite(fecha, c("ano", "mes", "dia"), sep = "-") %>%
mutate(fecha = ymd(fecha))
unlink(destfile)
day(consumos_pp$fecha) <- days_in_month(consumos_pp$fecha)
consumos_pp
}
tb_gna95 <- get_cores_data() %>%
select(fecha, gasolina_95) %>%
rename(ds = fecha,
y = gasolina_95)
tb_goa <- get_cores_data("Gasoleos") %>%
select(fecha, gasoleo_a) %>%
rename(ds = fecha,
y = gasoleo_a)
# Fit the model
m <- prophet(tb_gna95, holidays = generated_holidays %>%
filter(country == "Spain"))
# m <- prophet(daily.seasonality = FALSE,
# weekly.seasonality = FALSE)
# m <- add_seasonality(m, name='monthly', period=30.5, fourier.order=5)
# m <- fit.prophet(m, tb_goa)
# Make predictions
future <- make_future_dataframe(m, periods = 24, freq = 'month')
tail(future)
forecast <- predict(m, future)
tail(forecast[c('ds', 'yhat', 'yhat_lower', 'yhat_upper')])
# Plots
plot(m, forecast)
prophet_plot_components(m, forecast)
dyplot.prophet(m, forecast)
### PERF
# tb_data_cv <- cross_validation(m,
# initial = 365.25*4,
# period = 365.25,
# horizon = 30, #365.25*2,
# units = 'days')
tb_data_cv <- cross_validation(m,
horizon = 365.25 / 12 * 2,
units = 'days')
head(tb_data_cv)
tb_data_cv %>% select(cutoff) %>% unique
tb_data_cv %>% group_by(cutoff) %>% summarise(n = n())
tb_data_p <- performance_metrics(tb_data_cv)
head(tb_data_p)
tb_data_p_by_h <- tb_data_p %>%
group_by(horizon) %>%
summarise_all(mean)
with(tb_data_p_by_h,
plot(horizon, mse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, rmse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mae, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mape, type = "l"))
with(tb_data_p_by_h,
plot(horizon, coverage, type = "l"))
plot_cross_validation_metric(tb_data_cv, metric = 'mape')
|
/code/raw_code/my_example.R
|
no_license
|
miguel-conde/prophet_primer
|
R
| false
| false
| 10,822
|
r
|
library(here)
library(prophet)
library(tidyverse)
# load(here("data", "tidy_data", "BBDD_Germany_20180619.RData"))
# BBDD_Germany <- BBDD_Germany %>% as.tibble()
#
# tb_data <- BBDD_Germany %>%
# transmute(ds = fecha,
# y = Reservas_Ocio_Berlin)
load(here("data", "tidy_data", "all_V3.RData"))
rm(xts_daily_base, tb_weekly_base, xts_weekly_base); gc()
tb_data <- tb_daily_base %>%
select(Fecha, trafico_web_sesiones_totales,
contains("sales"),
goog_trends, temp_mn, otros_precio_medio,
eco_sent, unemp_rate,
tiendas_numero_de_tiendas,
winter_sales, summer_sales,
w1_winter_sales, w1_summer_sales,
black_friday_2016, black_friday_2017) %>%
rename(ds = Fecha,
y = trafico_web_sesiones_totales)
# QUICK -------------------------------------------------------------------
# Fit the model
m <- prophet(tb_data)
# CV
# tb_data_cv <- cross_validation(m, initial = 53, period = 4, horizon = 52, units = 'weeks')
# tb_data_cv <- cross_validation(m, horizon = 30, units = 'days')
tb_data_cv <- cross_validation(m, initial = 365, horizon = 30, units = 'days')
head(tb_data_cv)
tb_data_cv %>% select(cutoff) %>% unique
tb_data_cv %>% group_by(cutoff) %>% summarise(n = n())
tb_data_p <- performance_metrics(tb_data_cv)
head(tb_data_p)
tb_data_p_by_h <- tb_data_p %>% group_by(horizon) %>% summarise_all(mean)
with(tb_data_p_by_h,
plot(horizon, mse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, rmse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mae, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mape, type = "l"))
with(tb_data_p_by_h,
plot(horizon, coverage, type = "l"))
plot_cross_validation_metric(tb_data_cv, metric = 'mape')
# Make predictions
future <- make_future_dataframe(m, periods = 365)
forecast <- predict(m, future)
# Plots
plot(m, forecast)
prophet_plot_components(m, forecast)
dyplot.prophet(m, forecast)
# TREND CHANGEPOINTS ------------------------------------------------------
plot(m, forecast) + add_changepoints_to_plot(m)
# MORE flexibility
m <- prophet(tb_data, changepoint.prior.scale = 0.5)
tb_data_cv <- cross_validation(m, initial = 365, horizon = 30, units = 'days')
head(tb_data_cv)
tb_data_cv %>% select(cutoff) %>% unique
tb_data_cv %>% group_by(cutoff) %>% summarise(n = n())
tb_data_p <- performance_metrics(tb_data_cv)
head(tb_data_p)
tb_data_p_by_h <- tb_data_p %>% group_by(horizon) %>% summarise_all(mean)
with(tb_data_p_by_h,
plot(horizon, mse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, rmse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mae, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mape, type = "l"))
with(tb_data_p_by_h,
plot(horizon, coverage, type = "l"))
plot_cross_validation_metric(tb_data_cv, metric = 'mape')
forecast <- predict(m, future)
plot(m, forecast) + add_changepoints_to_plot(m)
# SEASONALITIES -----------------------------------------------------------
# m <- prophet(weekly.seasonality=FALSE)
m <- prophet()
m <- add_seasonality(m, name='monthly', period=30.5, fourier.order=5)
m <- fit.prophet(m, tb_data)
tb_data_cv <- cross_validation(m, initial = 365, horizon = 30, units = 'days')
head(tb_data_cv)
tb_data_cv %>% select(cutoff) %>% unique
tb_data_cv %>% group_by(cutoff) %>% summarise(n = n())
tb_data_p <- performance_metrics(tb_data_cv)
head(tb_data_p)
tb_data_p_by_h <- tb_data_p %>% group_by(horizon) %>% summarise_all(mean)
with(tb_data_p_by_h,
plot(horizon, mse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, rmse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mae, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mape, type = "l"))
with(tb_data_p_by_h,
plot(horizon, coverage, type = "l"))
plot_cross_validation_metric(tb_data_cv, metric = 'mape')
forecast <- predict(m, future)
plot(m, forecast)
prophet_plot_components(m, forecast)
# REGRESSORS --------------------------------------------------------------
m <- prophet()
# m <- add_seasonality(m, name='monthly_1', period=365, fourier.order=10)
# m <- add_seasonality(m, name='monthly_2', period=30.5, fourier.order=10)
m <- add_regressor(m, "goog_trends")
m <- add_regressor(m, "temp_mn")
m <- add_regressor(m, "otros_precio_medio")
m <- add_regressor(m, "unemp_rate")
m <- add_regressor(m, "tiendas_numero_de_tiendas")
m <- add_regressor(m, "w1_winter_sales")
m <- add_regressor(m, "w1_summer_sales")
# m <- add_regressor(m, "winter_sales")
# m <- add_regressor(m, "summer_sales")
m <- add_regressor(m, "black_friday_2016")
m <- add_regressor(m, "black_friday_2017")
m <- fit.prophet(m, tb_data)
tb_data_cv <- cross_validation(m, initial = 365, horizon = 30, units = 'days')
head(tb_data_cv)
tb_data_cv %>% select(cutoff) %>% unique
tb_data_cv %>% group_by(cutoff) %>% summarise(n = n())
tb_data_p <- performance_metrics(tb_data_cv)
head(tb_data_p)
tb_data_p_by_h <- tb_data_p %>% group_by(horizon) %>% summarise_all(mean)
with(tb_data_p_by_h,
plot(horizon, mse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, rmse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mae, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mape, type = "l"))
with(tb_data_p_by_h,
plot(horizon, coverage, type = "l"))
plot_cross_validation_metric(tb_data_cv, metric = 'mape')
forecast <- predict(m, tb_data)
plot(m, forecast)
prophet_plot_components(m, forecast)
dyplot.prophet(m, forecast)
aux <- forecast %>% select(ds, yhat) %>%
left_join(tb_data %>% select(ds, y) %>%
mutate(ds = as.POSIXct(ds))) %>%
mutate(residuals = y - yhat)
Metrics::mape(aux$y, aux$yhat)
r2 <- (1 - crossprod(aux$residuals) / crossprod(aux$y - mean(aux$y))) %>%
as.numeric()
r2
# ALL ---------------------------------------------------------------------
m <- prophet(changepoint.prior.scale = 0.5)
m <- add_seasonality(m, name='monthly', period=30.5, fourier.order=5)
m <- add_regressor(m, "goog_trends")
m <- add_regressor(m, "temp_mn")
m <- add_regressor(m, "otros_precio_medio")
m <- add_regressor(m, "unemp_rate")
m <- add_regressor(m, "tiendas_numero_de_tiendas")
m <- add_regressor(m, "w1_winter_sales")
m <- add_regressor(m, "w1_summer_sales")
m <- add_regressor(m, "black_friday_2016")
m <- add_regressor(m, "black_friday_2017")
m <- fit.prophet(m, tb_data)
tb_data_cv <- cross_validation(m, initial = 365, horizon = 30, units = 'days')
head(tb_data_cv)
tb_data_cv %>% select(cutoff) %>% unique
tb_data_cv %>% group_by(cutoff) %>% summarise(n = n())
tb_data_p <- performance_metrics(tb_data_cv)
head(tb_data_p)
tb_data_p_by_h <- tb_data_p %>% group_by(horizon) %>% summarise_all(mean)
with(tb_data_p_by_h,
plot(horizon, mse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, rmse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mae, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mape, type = "l"))
with(tb_data_p_by_h,
plot(horizon, coverage, type = "l"))
plot_cross_validation_metric(tb_data_cv, metric = 'mape')
forecast <- predict(m, tb_data)
plot(m, forecast)
prophet_plot_components(m, forecast)
dyplot.prophet(m, forecast)
aux <- forecast %>% select(ds, yhat) %>%
left_join(tb_data %>% select(ds, y) %>%
mutate(ds = as.POSIXct(ds))) %>%
mutate(residuals = y - yhat)
Metrics::mape(aux$y, aux$yhat)
r2 <- (1 - crossprod(aux$residuals) / crossprod(aux$y - mean(aux$y))) %>%
as.numeric()
r2
# CONTRIBUTIONS -----------------------------------------------------------
m_aportes <- forecast %>% as.tibble() %>%
select(-ends_with("_lower"), -ends_with("_upper"), -ends_with("_terms"),
-starts_with("extra_regressors_"), -yhat)
Conento::aportes_ag(m_aportes)
Conento::dibuja_areas(m_aportes %>% mutate(ds = as.Date(ds)))
# CORES -------------------------------------------------------------------
library(tidyverse)
library(lubridate)
library(prophet)
library(readxl)
URL_CORES <- "https://www.cores.es/sites/default/files/archivos/estadisticas/consumos-pp.xlsx"
get_cores_data <- function(sheet = "Gasolinas") {
#
# Esta función se baja de la web de CORES el archivo excel con los consumos
# nacionales mensuales de productos petrolíferos
#
# PARAMETERS
# - sheet: char("Gasolinas", "Gasoleos") - hoja del libro excel cuyos datos
# se desea obtener
#
# RETURN
# - Tibble con <fecha> (ajustada al dia fina de mes) y una columna para el
# consumo mensual (en Tn) por producto
destfile <- paste0(tempfile(), ".xlsx")
curl::curl_download(URL_CORES, destfile)
consumos_pp <- read_excel(destfile, sheet = sheet, skip = 5) %>%
janitor::clean_names() %>%
filter(!is.na(mes)) %>%
filter(mes != "total") %>%
mutate(dia = 1) %>%
unite(fecha, c("ano", "mes", "dia"), sep = "-") %>%
mutate(fecha = ymd(fecha))
unlink(destfile)
day(consumos_pp$fecha) <- days_in_month(consumos_pp$fecha)
consumos_pp
}
tb_gna95 <- get_cores_data() %>%
select(fecha, gasolina_95) %>%
rename(ds = fecha,
y = gasolina_95)
tb_goa <- get_cores_data("Gasoleos") %>%
select(fecha, gasoleo_a) %>%
rename(ds = fecha,
y = gasoleo_a)
# Fit the model
m <- prophet(tb_gna95, holidays = generated_holidays %>%
filter(country == "Spain"))
# m <- prophet(daily.seasonality = FALSE,
# weekly.seasonality = FALSE)
# m <- add_seasonality(m, name='monthly', period=30.5, fourier.order=5)
# m <- fit.prophet(m, tb_goa)
# Make predictions
future <- make_future_dataframe(m, periods = 24, freq = 'month')
tail(future)
forecast <- predict(m, future)
tail(forecast[c('ds', 'yhat', 'yhat_lower', 'yhat_upper')])
# Plots
plot(m, forecast)
prophet_plot_components(m, forecast)
dyplot.prophet(m, forecast)
### PERF
# tb_data_cv <- cross_validation(m,
# initial = 365.25*4,
# period = 365.25,
# horizon = 30, #365.25*2,
# units = 'days')
tb_data_cv <- cross_validation(m,
horizon = 365.25 / 12 * 2,
units = 'days')
head(tb_data_cv)
tb_data_cv %>% select(cutoff) %>% unique
tb_data_cv %>% group_by(cutoff) %>% summarise(n = n())
tb_data_p <- performance_metrics(tb_data_cv)
head(tb_data_p)
tb_data_p_by_h <- tb_data_p %>%
group_by(horizon) %>%
summarise_all(mean)
with(tb_data_p_by_h,
plot(horizon, mse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, rmse, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mae, type = "l"))
with(tb_data_p_by_h,
plot(horizon, mape, type = "l"))
with(tb_data_p_by_h,
plot(horizon, coverage, type = "l"))
plot_cross_validation_metric(tb_data_cv, metric = 'mape')
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/dplyr.r
\docType{package}
\name{dplyr}
\alias{dplyr}
\alias{dplyr-package}
\title{dplyr: a grammar of data manipulation}
\description{
dplyr provides a flexible grammar of data manipulation. It's the next
iteration of plyr, focussed on tools for working with data frames (hence the
\emph{d} in the name).
}
\details{
It has three main goals:
\itemize{
\item Identify the most important data manipulation verbs and make them
easy to use from R.
\item Provide blazing fast performance for in-memory data by writing key
pieces in C++ (using Rcpp)
\item Use the same interface to work with data no matter where it's stored,
whether in a data frame, a data table or database.
}
To learn more about dplyr, start with the vignettes:
\code{browseVignettes(package = "dplyr")}
}
|
/man/dplyr.Rd
|
no_license
|
PriyankaSGA/dplyr
|
R
| false
| false
| 865
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/dplyr.r
\docType{package}
\name{dplyr}
\alias{dplyr}
\alias{dplyr-package}
\title{dplyr: a grammar of data manipulation}
\description{
dplyr provides a flexible grammar of data manipulation. It's the next
iteration of plyr, focussed on tools for working with data frames (hence the
\emph{d} in the name).
}
\details{
It has three main goals:
\itemize{
\item Identify the most important data manipulation verbs and make them
easy to use from R.
\item Provide blazing fast performance for in-memory data by writing key
pieces in C++ (using Rcpp)
\item Use the same interface to work with data no matter where it's stored,
whether in a data frame, a data table or database.
}
To learn more about dplyr, start with the vignettes:
\code{browseVignettes(package = "dplyr")}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dplyr.R
\name{mutate.PKNCAresults}
\alias{mutate.PKNCAresults}
\alias{mutate.PKNCAconc}
\alias{mutate.PKNCAdose}
\title{dplyr mutate-based modification for PKNCA}
\usage{
\method{mutate}{PKNCAresults}(.data, ...)
\method{mutate}{PKNCAconc}(.data, ...)
\method{mutate}{PKNCAdose}(.data, ...)
}
\arguments{
\item{.data}{A data frame, data frame extension (e.g. a tibble), or a
lazy data frame (e.g. from dbplyr or dtplyr). See \emph{Methods}, below, for
more details.}
\item{...}{<\code{\link[dplyr:dplyr_data_masking]{data-masking}}> Name-value pairs.
The name gives the name of the column in the output.
The value can be:
\itemize{
\item A vector of length 1, which will be recycled to the correct length.
\item A vector the same length as the current group (or the whole data frame
if ungrouped).
\item \code{NULL}, to remove the column.
\item A data frame or tibble, to create multiple columns in the output.
}}
}
\description{
dplyr mutate-based modification for PKNCA
}
\seealso{
Other dplyr verbs:
\code{\link{filter.PKNCAresults}()},
\code{\link{group_by.PKNCAresults}()},
\code{\link{inner_join.PKNCAresults}()}
}
\concept{dplyr verbs}
|
/man/mutate.PKNCAresults.Rd
|
no_license
|
cran/PKNCA
|
R
| false
| true
| 1,267
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dplyr.R
\name{mutate.PKNCAresults}
\alias{mutate.PKNCAresults}
\alias{mutate.PKNCAconc}
\alias{mutate.PKNCAdose}
\title{dplyr mutate-based modification for PKNCA}
\usage{
\method{mutate}{PKNCAresults}(.data, ...)
\method{mutate}{PKNCAconc}(.data, ...)
\method{mutate}{PKNCAdose}(.data, ...)
}
\arguments{
\item{.data}{A data frame, data frame extension (e.g. a tibble), or a
lazy data frame (e.g. from dbplyr or dtplyr). See \emph{Methods}, below, for
more details.}
\item{...}{<\code{\link[dplyr:dplyr_data_masking]{data-masking}}> Name-value pairs.
The name gives the name of the column in the output.
The value can be:
\itemize{
\item A vector of length 1, which will be recycled to the correct length.
\item A vector the same length as the current group (or the whole data frame
if ungrouped).
\item \code{NULL}, to remove the column.
\item A data frame or tibble, to create multiple columns in the output.
}}
}
\description{
dplyr mutate-based modification for PKNCA
}
\seealso{
Other dplyr verbs:
\code{\link{filter.PKNCAresults}()},
\code{\link{group_by.PKNCAresults}()},
\code{\link{inner_join.PKNCAresults}()}
}
\concept{dplyr verbs}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/heavy_model.R
\name{HEAVYmodel}
\alias{HEAVYmodel}
\title{HEAVY Model estimation}
\usage{
HEAVYmodel(
data,
p = matrix(c(0, 0, 1, 1), ncol = 2),
q = matrix(c(1, 0, 0, 1), ncol = 2),
startingValues = NULL,
LB = NULL,
UB = NULL,
backCast = NULL,
compConst = FALSE
)
}
\arguments{
\item{data}{a (T x K) matrix containing the data, with T the number of days. For the traditional HEAVY model: K = 2, the first column contains the squared daily demeaned returns, the second column contains the realized measures.}
\item{p}{a (K x K) matrix containing the lag length for the model innovations. Position (i, j) in the matrix indicates the number of lags in equation i of the model for the innovations in data column j. For the traditional heavy model p is given by matrix(c(0,0,1,1), ncol = 2) (default).}
\item{q}{a (K x K) matrix containing the lag length for the conditional variances. Position (i, j) in the matrix indicates the number of lags in equation i of the model for conditional variances corresponding to series j. For the traditional heavy model introduced above q is given by matrix( c(1,0,0,1),ncol=2 ) (default).}
\item{startingValues}{a vector containing the starting values to be used in the optimization to find the optimal parameters estimates.}
\item{LB}{a vector of length K indicating the lower bounds to be used in the estimation. If NULL it is set to a vector of zeros by default.}
\item{UB}{a vector of length K indicating the upper bounds to be used in the estimation. If NULL it is set to a vector of Inf by default.}
\item{backCast}{a vector of length K used to initialize the estimation. If NULL the unconditional estimates are taken.}
\item{compConst}{a boolean variable. In case TRUE, the omega values are estimated in the optimization. In case FALSE, volatility targeting is done and omega is just 1 minus the sum of all relevant alpha's and beta's multiplied by the unconditional variance.}
}
\value{
A list with the following values:
(i) loglikelihood: the log likelihood evaluated at the parameter estimates.
(ii) likelihoods: an xts object of length T containing the log likelihoods per day.
(iii) condvar: a (T x K) xts object containing the conditional variances
(iv) estparams: a vector with the parameter estimates. The order in which the
parameters are reported is as follows: First the estimates for omega then the
estimates for the non-zero alpha's with the most recent lags first in case max(p) > 1,
then the estimates for the non-zero beta's with the most recent lag first in case
max(q) > 1.
(v) convergence: an integer code indicating the successfulness of the optimization. See \verb{optim} for more information.
}
\description{
This function calculates the High frEquency bAsed VolatilitY (HEAVY) model proposed in Shephard and Sheppard (2010). This function is used as a predictive volatility model built to exploit highfrequency data.
}
\details{
Assume there are \eqn{T} daily returns and realized measures in the period \eqn{t}. Let \eqn{r_i} and \eqn{RM_i} be the \eqn{i^{th}} daily return and daily realized measure respectively (with \eqn{i=1, \ldots,T}).
The most basic heavy model is the one with lag matrices p of \eqn{\left( \begin{array}{ccc} 0 & 1 \\ 0 & 1 \end{array} \right)} and q of \eqn{\left( \begin{array}{ccc} 1 & 0 \\ 0 & 1 \end{array} \right)}. This can be represented by the following equations:
\deqn{
\mbox{var}{\left(r_t \right)} = h_t = w + \alpha RM_{t-1} + \beta h_{t-1}; w,\alpha \geq 0, \beta \in [0,1]
}
\deqn{
\mbox{E}{\left(RM_t \right)} = \mu_t = w_R + \alpha_R RM_{t-1} + \beta_R \mu_{t-1}; w_R,\alpha_R, \beta_R \geq 0, \alpha_R+\beta_R \in [0,1]
}
Equivalently, they can be presented in terms of matrix notation as below:
\deqn{
\left( \begin{array}{ccc} h_t \\ \mu_t \end{array} \right) = \left( \begin{array}{ccc} w \\ w_R \end{array} \right) + \left( \begin{array}{ccc} 0 & \alpha \\ 0 & \alpha_R \end{array} \right) \left( \begin{array}{ccc} r^2_{t-1} \\ RM_{t-1} \end{array} \right) + \left( \begin{array}{ccc} \beta & 0 \\ 0 & \beta_R \end{array} \right) \left( \begin{array}{ccc} h_{t-1} \\ \mu_{t-1} \end{array} \right)
}
In this version, the parameters vector to be estimated is \eqn{\left( w, w_R,\alpha, \alpha_R, \beta, \beta_R \right) }.
In terms of startingValues, Shephard and Sheppard recommend for this version of the Heavy model to set \eqn{\beta} be around 0.6 and sum of \eqn{\alpha}+\eqn{\beta} to be close to but slightly less than one.
In general, the lag length for the model innovation and the conditional covariance can be greater than 1. Consider, for example, matrix p is \eqn{\left( \begin{array}{ccc} 0 & 2 \\ 0 & 1 \end{array} \right)} and matrix q is the same as above. Matrix notation will be as below:
\deqn{
\left( \begin{array}{ccc} h_t \\ \mu_t \end{array} \right) = \left( \begin{array}{ccc} w \\ w_R \end{array} \right) + \left( \begin{array}{ccc} 0 & \alpha_1 \\ 0 & \alpha_R \end{array} \right) \left( \begin{array}{ccc} r^2_{t-1} \\ RM_{t-1} \end{array} \right) +\left( \begin{array}{ccc} 0 & \alpha_2 \\ 0 & 0 \end{array} \right) \left( \begin{array}{ccc} r^2_{t-2} \\ RM_{t-2} \end{array} \right) + \left( \begin{array}{ccc} \beta & 0 \\ 0 & \beta_R \end{array} \right) \left( \begin{array}{ccc} h_{t-1} \\ \mu_{t-1} \end{array} \right)}
In this version, the parameters vector to be estimated is \eqn{\left( w, w_R,\alpha_1, \alpha_R, \alpha_2, \beta, \beta_R \right) }.
}
\examples{
# Implementation of the heavy model on DJI:
returns <- realizedLibrary$open_to_close
bv <- realizedLibrary$bv
returns <- returns[!is.na(bv)]
bv <- bv[!is.na(bv)] # Remove NA's
data <- cbind( returns^2, bv) # Make data matrix with returns and realized measures
backCast <- matrix(c(var(returns), mean(bv)), ncol = 1)
#For traditional (default) version:
startvalues <- c(0.004,0.02,0.44,0.41,0.74,0.56) # Initial values
output <- HEAVYmodel(data = as.matrix(data,ncol=2), compConst=FALSE,
startingValues = startvalues, backCast=backCast)
#For general version:
startvalues <- c(0.004, 0.02, 0.44, 0.4, 0.41, 0.74, 0.56) # Initial values;
p <- matrix(c(2, 0, 0, 1), ncol = 2)
q <- matrix(c(1, 0, 0, 1), ncol = 2)
heavy_model <- HEAVYmodel(data = as.matrix(data, ncol = 2), p = p, q = q, compConst = FALSE,
startingValues = startvalues, backCast = backCast)
}
\references{
Shephard, N. and K. Sheppard (2010). Realising the future: forecasting with high frequency based volatility (heavy) models. Journal of Applied Econometrics 25, 197-231.
}
\author{
Giang Nguyen, Jonathan Cornelissen, Kris Boudt and Onno Kleen.
}
|
/highfrequency/man/HEAVYmodel.Rd
|
no_license
|
akhikolla/InformationHouse
|
R
| false
| true
| 6,692
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/heavy_model.R
\name{HEAVYmodel}
\alias{HEAVYmodel}
\title{HEAVY Model estimation}
\usage{
HEAVYmodel(
data,
p = matrix(c(0, 0, 1, 1), ncol = 2),
q = matrix(c(1, 0, 0, 1), ncol = 2),
startingValues = NULL,
LB = NULL,
UB = NULL,
backCast = NULL,
compConst = FALSE
)
}
\arguments{
\item{data}{a (T x K) matrix containing the data, with T the number of days. For the traditional HEAVY model: K = 2, the first column contains the squared daily demeaned returns, the second column contains the realized measures.}
\item{p}{a (K x K) matrix containing the lag length for the model innovations. Position (i, j) in the matrix indicates the number of lags in equation i of the model for the innovations in data column j. For the traditional heavy model p is given by matrix(c(0,0,1,1), ncol = 2) (default).}
\item{q}{a (K x K) matrix containing the lag length for the conditional variances. Position (i, j) in the matrix indicates the number of lags in equation i of the model for conditional variances corresponding to series j. For the traditional heavy model introduced above q is given by matrix( c(1,0,0,1),ncol=2 ) (default).}
\item{startingValues}{a vector containing the starting values to be used in the optimization to find the optimal parameters estimates.}
\item{LB}{a vector of length K indicating the lower bounds to be used in the estimation. If NULL it is set to a vector of zeros by default.}
\item{UB}{a vector of length K indicating the upper bounds to be used in the estimation. If NULL it is set to a vector of Inf by default.}
\item{backCast}{a vector of length K used to initialize the estimation. If NULL the unconditional estimates are taken.}
\item{compConst}{a boolean variable. In case TRUE, the omega values are estimated in the optimization. In case FALSE, volatility targeting is done and omega is just 1 minus the sum of all relevant alpha's and beta's multiplied by the unconditional variance.}
}
\value{
A list with the following values:
(i) loglikelihood: the log likelihood evaluated at the parameter estimates.
(ii) likelihoods: an xts object of length T containing the log likelihoods per day.
(iii) condvar: a (T x K) xts object containing the conditional variances
(iv) estparams: a vector with the parameter estimates. The order in which the
parameters are reported is as follows: First the estimates for omega then the
estimates for the non-zero alpha's with the most recent lags first in case max(p) > 1,
then the estimates for the non-zero beta's with the most recent lag first in case
max(q) > 1.
(v) convergence: an integer code indicating the successfulness of the optimization. See \verb{optim} for more information.
}
\description{
This function calculates the High frEquency bAsed VolatilitY (HEAVY) model proposed in Shephard and Sheppard (2010). This function is used as a predictive volatility model built to exploit highfrequency data.
}
\details{
Assume there are \eqn{T} daily returns and realized measures in the period \eqn{t}. Let \eqn{r_i} and \eqn{RM_i} be the \eqn{i^{th}} daily return and daily realized measure respectively (with \eqn{i=1, \ldots,T}).
The most basic heavy model is the one with lag matrices p of \eqn{\left( \begin{array}{ccc} 0 & 1 \\ 0 & 1 \end{array} \right)} and q of \eqn{\left( \begin{array}{ccc} 1 & 0 \\ 0 & 1 \end{array} \right)}. This can be represented by the following equations:
\deqn{
\mbox{var}{\left(r_t \right)} = h_t = w + \alpha RM_{t-1} + \beta h_{t-1}; w,\alpha \geq 0, \beta \in [0,1]
}
\deqn{
\mbox{E}{\left(RM_t \right)} = \mu_t = w_R + \alpha_R RM_{t-1} + \beta_R \mu_{t-1}; w_R,\alpha_R, \beta_R \geq 0, \alpha_R+\beta_R \in [0,1]
}
Equivalently, they can be presented in terms of matrix notation as below:
\deqn{
\left( \begin{array}{ccc} h_t \\ \mu_t \end{array} \right) = \left( \begin{array}{ccc} w \\ w_R \end{array} \right) + \left( \begin{array}{ccc} 0 & \alpha \\ 0 & \alpha_R \end{array} \right) \left( \begin{array}{ccc} r^2_{t-1} \\ RM_{t-1} \end{array} \right) + \left( \begin{array}{ccc} \beta & 0 \\ 0 & \beta_R \end{array} \right) \left( \begin{array}{ccc} h_{t-1} \\ \mu_{t-1} \end{array} \right)
}
In this version, the parameters vector to be estimated is \eqn{\left( w, w_R,\alpha, \alpha_R, \beta, \beta_R \right) }.
In terms of startingValues, Shephard and Sheppard recommend for this version of the Heavy model to set \eqn{\beta} be around 0.6 and sum of \eqn{\alpha}+\eqn{\beta} to be close to but slightly less than one.
In general, the lag length for the model innovation and the conditional covariance can be greater than 1. Consider, for example, matrix p is \eqn{\left( \begin{array}{ccc} 0 & 2 \\ 0 & 1 \end{array} \right)} and matrix q is the same as above. Matrix notation will be as below:
\deqn{
\left( \begin{array}{ccc} h_t \\ \mu_t \end{array} \right) = \left( \begin{array}{ccc} w \\ w_R \end{array} \right) + \left( \begin{array}{ccc} 0 & \alpha_1 \\ 0 & \alpha_R \end{array} \right) \left( \begin{array}{ccc} r^2_{t-1} \\ RM_{t-1} \end{array} \right) +\left( \begin{array}{ccc} 0 & \alpha_2 \\ 0 & 0 \end{array} \right) \left( \begin{array}{ccc} r^2_{t-2} \\ RM_{t-2} \end{array} \right) + \left( \begin{array}{ccc} \beta & 0 \\ 0 & \beta_R \end{array} \right) \left( \begin{array}{ccc} h_{t-1} \\ \mu_{t-1} \end{array} \right)}
In this version, the parameters vector to be estimated is \eqn{\left( w, w_R,\alpha_1, \alpha_R, \alpha_2, \beta, \beta_R \right) }.
}
\examples{
# Implementation of the heavy model on DJI:
returns <- realizedLibrary$open_to_close
bv <- realizedLibrary$bv
returns <- returns[!is.na(bv)]
bv <- bv[!is.na(bv)] # Remove NA's
data <- cbind( returns^2, bv) # Make data matrix with returns and realized measures
backCast <- matrix(c(var(returns), mean(bv)), ncol = 1)
#For traditional (default) version:
startvalues <- c(0.004,0.02,0.44,0.41,0.74,0.56) # Initial values
output <- HEAVYmodel(data = as.matrix(data,ncol=2), compConst=FALSE,
startingValues = startvalues, backCast=backCast)
#For general version:
startvalues <- c(0.004, 0.02, 0.44, 0.4, 0.41, 0.74, 0.56) # Initial values;
p <- matrix(c(2, 0, 0, 1), ncol = 2)
q <- matrix(c(1, 0, 0, 1), ncol = 2)
heavy_model <- HEAVYmodel(data = as.matrix(data, ncol = 2), p = p, q = q, compConst = FALSE,
startingValues = startvalues, backCast = backCast)
}
\references{
Shephard, N. and K. Sheppard (2010). Realising the future: forecasting with high frequency based volatility (heavy) models. Journal of Applied Econometrics 25, 197-231.
}
\author{
Giang Nguyen, Jonathan Cornelissen, Kris Boudt and Onno Kleen.
}
|
rm(list=ls())
training<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/RFE-RF/CAD Dataset Train70p.csv")
testing<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/RFE-RF/CAD Dataset Test30p.csv")
sapdata<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/RFE-RF/Sap_SVM_Selected.csv")
seedarray <- c(131,124,451,689,320,420,987,150,489,143,323,550,740,430,303,264,456,970,950,880)
#seedarray <- c(131,124,451,689,320,420,987,150,489,143,640,550,740,250,870,264,456,970,950,880)
# seedarray<-970
accarray<-rep(0,length(seedarray))
senarray<-rep(0,length(seedarray))
spearray<-rep(0,length(seedarray))
mccarray<-rep(0,length(seedarray))
f1array<-rep(0,length(seedarray))
rocaucarray<-rep(0,length(seedarray))
for(i in 1:length(seedarray)){
library(e1071)
library(pROC)
set.seed(seedarray[i])
wts <- 100 / table(training$Cath)
system.time(svm_tune <- tune(svm, train.x=training[,1:21], train.y=as.factor(training[,22]),
kernel="radial",class.weights = wts,tunecontrol = tune.control(sampling = "cross"), ranges=list(cost=2^(-8:8), gamma=c(2^(-8:8)))))
classifier=svm(formula=factor(Cath) ~ .,
data=training,
scale=TRUE,
type='C-classification',
kernel='radial',
cost=svm_tune$best.parameters$cost,
gamma=svm_tune$best.parameters$gamma)
print(classifier)
y_pred=predict(classifier,newdata=testing[-22])
#y_pred<-round(y_pred)
cm=table(y_pred,testing[,22],dnn=c("Prediction","Actual"))
miscl<-1-sum(diag(cm))/sum(cm)
print(i)
print('Accuracy:')
acc=(1-miscl)
print(acc)
accarray[i]<-acc
tp<-cm[2,2]
tn<-cm[1,1]
fn<-cm[1,2]
fp<-cm[2,1]
sen=tp/(tp+fn)
spe=tn/(tn+fp)
mcc=((tp*tn) - (fp*fn))/(sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)))
f1=2*tp/((2*tp)+fp+fn)
senarray[i]<-sen
spearray[i]<-spe
mccarray[i]<-mcc
f1array[i]<-f1
roc_obj<-roc(testing[,22],as.numeric(y_pred))
rocauc<-auc(roc_obj)
rocaucarray[i]<-rocauc
sapdata[i,1]=seedarray[i]
sapdata[i,2]=svm_tune$best.parameters$cost
sapdata[i,3]=svm_tune$best.parameters$gamma
sapdata[i,4]=acc
sapdata[i,5]=sen
sapdata[i,6]=spe
sapdata[i,7]=mcc
sapdata[i,8]=f1
sapdata[i,9]=rocauc
}
write.csv(sapdata,"F:/Thesis/DataMing+MachieLeaning/CAD/RFE-RF/Sap_SVM_Selected.csv")
print('Mean Accuracy')
print(mean(accarray))
print('Standard Deviation')
print(sd(accarray))
print('Mean Sensitivity')
print(mean(senarray))
print('Standard Deviation')
print(sd(senarray))
print('Mean Specificity')
print(mean(spearray))
print('Stanard Deviation')
print(sd(spearray))
print('Mean mcc')
print(mean(mccarray))
print('Stanard Deviation')
print(sd(mccarray))
print('Mean F1')
print(mean(f1array))
print('Stanard Deviation')
print(sd(f1array))
print('Mean Auc')
print(mean(rocaucarray))
print('Stanard Deviation')
print(sd(rocaucarray))
|
/RFE-RF/SVM.R
|
permissive
|
UtshaDas/CAD-Classification
|
R
| false
| false
| 3,027
|
r
|
rm(list=ls())
training<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/RFE-RF/CAD Dataset Train70p.csv")
testing<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/RFE-RF/CAD Dataset Test30p.csv")
sapdata<-read.csv("F:/Thesis/DataMing+MachieLeaning/CAD/RFE-RF/Sap_SVM_Selected.csv")
seedarray <- c(131,124,451,689,320,420,987,150,489,143,323,550,740,430,303,264,456,970,950,880)
#seedarray <- c(131,124,451,689,320,420,987,150,489,143,640,550,740,250,870,264,456,970,950,880)
# seedarray<-970
accarray<-rep(0,length(seedarray))
senarray<-rep(0,length(seedarray))
spearray<-rep(0,length(seedarray))
mccarray<-rep(0,length(seedarray))
f1array<-rep(0,length(seedarray))
rocaucarray<-rep(0,length(seedarray))
for(i in 1:length(seedarray)){
library(e1071)
library(pROC)
set.seed(seedarray[i])
wts <- 100 / table(training$Cath)
system.time(svm_tune <- tune(svm, train.x=training[,1:21], train.y=as.factor(training[,22]),
kernel="radial",class.weights = wts,tunecontrol = tune.control(sampling = "cross"), ranges=list(cost=2^(-8:8), gamma=c(2^(-8:8)))))
classifier=svm(formula=factor(Cath) ~ .,
data=training,
scale=TRUE,
type='C-classification',
kernel='radial',
cost=svm_tune$best.parameters$cost,
gamma=svm_tune$best.parameters$gamma)
print(classifier)
y_pred=predict(classifier,newdata=testing[-22])
#y_pred<-round(y_pred)
cm=table(y_pred,testing[,22],dnn=c("Prediction","Actual"))
miscl<-1-sum(diag(cm))/sum(cm)
print(i)
print('Accuracy:')
acc=(1-miscl)
print(acc)
accarray[i]<-acc
tp<-cm[2,2]
tn<-cm[1,1]
fn<-cm[1,2]
fp<-cm[2,1]
sen=tp/(tp+fn)
spe=tn/(tn+fp)
mcc=((tp*tn) - (fp*fn))/(sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)))
f1=2*tp/((2*tp)+fp+fn)
senarray[i]<-sen
spearray[i]<-spe
mccarray[i]<-mcc
f1array[i]<-f1
roc_obj<-roc(testing[,22],as.numeric(y_pred))
rocauc<-auc(roc_obj)
rocaucarray[i]<-rocauc
sapdata[i,1]=seedarray[i]
sapdata[i,2]=svm_tune$best.parameters$cost
sapdata[i,3]=svm_tune$best.parameters$gamma
sapdata[i,4]=acc
sapdata[i,5]=sen
sapdata[i,6]=spe
sapdata[i,7]=mcc
sapdata[i,8]=f1
sapdata[i,9]=rocauc
}
write.csv(sapdata,"F:/Thesis/DataMing+MachieLeaning/CAD/RFE-RF/Sap_SVM_Selected.csv")
print('Mean Accuracy')
print(mean(accarray))
print('Standard Deviation')
print(sd(accarray))
print('Mean Sensitivity')
print(mean(senarray))
print('Standard Deviation')
print(sd(senarray))
print('Mean Specificity')
print(mean(spearray))
print('Stanard Deviation')
print(sd(spearray))
print('Mean mcc')
print(mean(mccarray))
print('Stanard Deviation')
print(sd(mccarray))
print('Mean F1')
print(mean(f1array))
print('Stanard Deviation')
print(sd(f1array))
print('Mean Auc')
print(mean(rocaucarray))
print('Stanard Deviation')
print(sd(rocaucarray))
|
context("dplyr")
sc <- testthat_spark_connection()
iris_tbl <- testthat_tbl("iris")
test_requires("dplyr")
df1 <- tibble(a = 1:3, b = letters[1:3])
df2 <- tibble(b = letters[1:3], c = letters[24:26])
df1_tbl <- testthat_tbl("df1")
df2_tbl <- testthat_tbl("df2")
test_that("the implementation of 'mutate' functions as expected", {
test_requires("dplyr")
expect_equivalent(
iris %>% mutate(x = Species) %>% tbl_vars() %>% length(),
iris_tbl %>% mutate(x = Species) %>% collect() %>% tbl_vars() %>% length()
)
})
test_that("the implementation of 'filter' functions as expected", {
test_requires("dplyr")
expect_equivalent(
iris_tbl %>%
filter(Sepal_Length == 5.1) %>%
filter(Sepal_Width == 3.5) %>%
filter(Petal_Length == 1.4) %>%
filter(Petal_Width == 0.2) %>%
select(Species) %>%
collect(),
iris %>%
transmute(
Sepal_Length = `Sepal.Length`,
Sepal_Width = `Sepal.Width`,
Petal_Length = `Petal.Length`,
Petal_Width = `Petal.Width`,
Species = Species
) %>%
filter(Sepal_Length == 5.1) %>%
filter(Sepal_Width == 3.5) %>%
filter(Petal_Length == 1.4) %>%
filter(Petal_Width == 0.2) %>%
transmute(Species = as.character(Species))
)
})
test_that("if_else works as expected", {
sdf <- copy_to(sc, tibble::tibble(x = c(0.9, NA_real_, 1.1)))
expect_equal(
sdf %>% dplyr::mutate(x = ifelse(x > 1, "good", "bad")) %>% dplyr::pull(x),
c("bad", NA, "good")
)
expect_equal(
sdf %>% dplyr::mutate(x = ifelse(x > 1, "good", "bad", "unknown")) %>%
dplyr::pull(x),
c("bad", "unknown", "good")
)
})
test_that("grepl works as expected", {
test_requires("dplyr")
regexes <- c(
"a|c", ".", "b", "x|z", "", "y", "e", "^", "$", "^$", "[0-9]", "[a-z]", "[b-z]"
)
verify_equivalent <- function(actual, expected) {
# handle an edge case for arrow-enabled Spark connection
for (col in colnames(df2)) {
expect_equivalent(
as.character(actual[[col]]),
as.character(expected[[col]])
)
}
}
for (regex in regexes) {
verify_equivalent(
df2 %>% dplyr::filter(grepl(regex, b)),
df2_tbl %>% dplyr::filter(grepl(regex, b)) %>% collect()
)
verify_equivalent(
df2 %>% dplyr::filter(grepl(regex, c)),
df2_tbl %>% dplyr::filter(grepl(regex, c)) %>% collect()
)
}
})
test_that("weighted.mean works as expected", {
df <- tibble::tibble(
x = c(NA_real_, 3.1, 2.2, NA_real_, 3.3, 4),
w = c(NA_real_, 1, 0.5, 1, 0.75, NA_real_)
)
sdf <- copy_to(sc, df, overwrite = TRUE)
expect_equal(
sdf %>% dplyr::summarize(wm = weighted.mean(x, w)) %>% dplyr::pull(wm),
df %>%
dplyr::summarize(
wm = sum(w * x, na.rm = TRUE) /
sum(w * as.numeric(!is.na(x)), na.rm = TRUE)
) %>%
dplyr::pull(wm)
)
df <- tibble::tibble(
x = rep(c(NA_real_, 3.1, 2.2, NA_real_, 3.3, 4), 3L),
w = rep(c(NA_real_, 1, 0.5, 1, 0.75, NA_real_), 3L),
grp = c(rep(1L, 6L), rep(2L, 6L), rep(3L, 6L))
)
sdf <- copy_to(sc, df, overwrite = TRUE)
expect_equal(
sdf %>% dplyr::summarize(wm = weighted.mean(x, w)) %>% dplyr::pull(wm),
df %>%
dplyr::summarize(
wm = sum(w * x, na.rm = TRUE) /
sum(w * as.numeric(!is.na(x)), na.rm = TRUE)
) %>%
dplyr::pull(wm)
)
expect_equal(
sdf %>% dplyr::summarize(wm = weighted.mean(x ^ 3, w ^ 2)) %>% dplyr::pull(wm),
df %>%
dplyr::summarize(
wm = sum(w ^ 2 * x ^ 3, na.rm = TRUE) /
sum(w ^ 2 * as.numeric(!is.na(x)), na.rm = TRUE)
) %>%
dplyr::pull(wm)
)
})
test_that("'head' uses 'limit' clause", {
test_requires("dplyr")
test_requires("dbplyr")
expect_true(
grepl(
"LIMIT",
sql_render(head(iris_tbl))
)
)
})
test_that("'left_join' does not use 'using' clause", {
test_requires("dplyr")
test_requires("dbplyr")
expect_equal(
spark_version(sc) >= "2.0.0" && packageVersion("dplyr") < "0.5.0.90",
grepl(
"USING",
sql_render(left_join(df1_tbl, df2_tbl))
)
)
})
test_that("the implementation of 'left_join' functions as expected", {
test_requires("dplyr")
expect_equivalent(
left_join(df1, df2) %>% dplyr::arrange(b),
left_join(df1_tbl, df2_tbl) %>% dplyr::arrange(b) %>% collect()
)
})
test_that("'sample_n' works as expected", {
test_requires_version("2.0.0")
test_requires("dplyr")
for (weight in list(NULL, rlang::sym("Petal_Length"))) {
for (replace in list(FALSE, TRUE)) {
sample_sdf <- iris_tbl %>%
sample_n(10, weight = !!weight, replace = replace)
expect_equal(colnames(sample_sdf), colnames(iris_tbl))
expect_equal(sample_sdf %>% collect() %>% nrow(), 10)
sample_sdf <- iris_tbl %>%
select(Petal_Length) %>%
sample_n(10, weight = !!weight, replace = replace)
expect_equal(colnames(sample_sdf), "Petal_Length")
expect_equal(sample_sdf %>% collect() %>% nrow(), 10)
}
}
})
test_that("'sample_frac' works as expected", {
test_requires_version("2.0.0")
test_requires("dplyr")
for (weight in list(NULL, rlang::sym("Petal_Length"))) {
for (replace in list(FALSE, TRUE)) {
sample_sdf <- iris_tbl %>%
sample_frac(0.2, weight = !!weight, replace = replace)
expect_equal(colnames(sample_sdf), colnames(iris_tbl))
expect_equal(sample_sdf %>% collect() %>% nrow(), round(0.2 * nrow(iris)))
sample_sdf <- iris_tbl %>%
select(Petal_Length) %>%
sample_frac(0.2, weight = !!weight, replace = replace)
expect_equal(colnames(sample_sdf), "Petal_Length")
expect_equal(sample_sdf %>% collect() %>% nrow(), round(0.2 * nrow(iris)))
}
}
})
test_that("weighted sampling works as expected with integer weight columns", {
test_requires_version("2.0.0")
test_requires("dplyr")
sdf <- copy_to(sc, tibble::tibble(id = seq(100), weight = seq(100)))
for (replace in list(FALSE, TRUE)) {
sample_sdf <- sdf %>%
sample_n(20, weight = weight, replace = replace)
expect_equal(colnames(sample_sdf), colnames(sdf))
expect_equal(sample_sdf %>% collect() %>% nrow(), 20)
sample_sdf <- sdf %>%
sample_frac(0.2, weight = weight, replace = replace)
expect_equal(colnames(sample_sdf), colnames(sdf))
expect_equal(sample_sdf %>% collect() %>% nrow(), 20)
}
})
test_that("set.seed makes sampling outcomes deterministic", {
test_requires_version("2.0.0")
test_requires("dplyr")
sdf <- copy_to(sc, tibble::tibble(id = seq(1000), weight = rep(seq(5), 200)))
for (weight in list(NULL, rlang::sym("weight"))) {
for (replace in list(FALSE, TRUE)) {
outcomes <- lapply(
seq(2),
function(i) {
set.seed(142857L)
sdf %>% sample_n(200, weight = weight, replace = replace) %>% collect()
}
)
expect_equivalent(outcomes[[1]], outcomes[[2]])
outcomes <- lapply(
seq(2),
function(i) {
set.seed(142857L)
sdf %>% sample_frac(0.2, weight = weight, replace = replace) %>% collect()
}
)
expect_equivalent(outcomes[[1]], outcomes[[2]])
}
}
})
test_that("'sdf_broadcast' forces broadcast hash join", {
query_plan <- df1_tbl %>%
sdf_broadcast() %>%
left_join(df2_tbl, by = "b") %>%
spark_dataframe() %>%
invoke("queryExecution") %>%
invoke("analyzed") %>%
invoke("toString")
expect_match(query_plan, "B|broadcast")
})
test_that("can compute() over tables", {
test_requires("dplyr")
iris_tbl %>% compute()
succeed()
})
|
/tests/testthat/test-dplyr.R
|
permissive
|
COUBANAO/sparklyr
|
R
| false
| false
| 7,649
|
r
|
context("dplyr")
sc <- testthat_spark_connection()
iris_tbl <- testthat_tbl("iris")
test_requires("dplyr")
df1 <- tibble(a = 1:3, b = letters[1:3])
df2 <- tibble(b = letters[1:3], c = letters[24:26])
df1_tbl <- testthat_tbl("df1")
df2_tbl <- testthat_tbl("df2")
test_that("the implementation of 'mutate' functions as expected", {
test_requires("dplyr")
expect_equivalent(
iris %>% mutate(x = Species) %>% tbl_vars() %>% length(),
iris_tbl %>% mutate(x = Species) %>% collect() %>% tbl_vars() %>% length()
)
})
test_that("the implementation of 'filter' functions as expected", {
test_requires("dplyr")
expect_equivalent(
iris_tbl %>%
filter(Sepal_Length == 5.1) %>%
filter(Sepal_Width == 3.5) %>%
filter(Petal_Length == 1.4) %>%
filter(Petal_Width == 0.2) %>%
select(Species) %>%
collect(),
iris %>%
transmute(
Sepal_Length = `Sepal.Length`,
Sepal_Width = `Sepal.Width`,
Petal_Length = `Petal.Length`,
Petal_Width = `Petal.Width`,
Species = Species
) %>%
filter(Sepal_Length == 5.1) %>%
filter(Sepal_Width == 3.5) %>%
filter(Petal_Length == 1.4) %>%
filter(Petal_Width == 0.2) %>%
transmute(Species = as.character(Species))
)
})
test_that("if_else works as expected", {
sdf <- copy_to(sc, tibble::tibble(x = c(0.9, NA_real_, 1.1)))
expect_equal(
sdf %>% dplyr::mutate(x = ifelse(x > 1, "good", "bad")) %>% dplyr::pull(x),
c("bad", NA, "good")
)
expect_equal(
sdf %>% dplyr::mutate(x = ifelse(x > 1, "good", "bad", "unknown")) %>%
dplyr::pull(x),
c("bad", "unknown", "good")
)
})
test_that("grepl works as expected", {
test_requires("dplyr")
regexes <- c(
"a|c", ".", "b", "x|z", "", "y", "e", "^", "$", "^$", "[0-9]", "[a-z]", "[b-z]"
)
verify_equivalent <- function(actual, expected) {
# handle an edge case for arrow-enabled Spark connection
for (col in colnames(df2)) {
expect_equivalent(
as.character(actual[[col]]),
as.character(expected[[col]])
)
}
}
for (regex in regexes) {
verify_equivalent(
df2 %>% dplyr::filter(grepl(regex, b)),
df2_tbl %>% dplyr::filter(grepl(regex, b)) %>% collect()
)
verify_equivalent(
df2 %>% dplyr::filter(grepl(regex, c)),
df2_tbl %>% dplyr::filter(grepl(regex, c)) %>% collect()
)
}
})
test_that("weighted.mean works as expected", {
df <- tibble::tibble(
x = c(NA_real_, 3.1, 2.2, NA_real_, 3.3, 4),
w = c(NA_real_, 1, 0.5, 1, 0.75, NA_real_)
)
sdf <- copy_to(sc, df, overwrite = TRUE)
expect_equal(
sdf %>% dplyr::summarize(wm = weighted.mean(x, w)) %>% dplyr::pull(wm),
df %>%
dplyr::summarize(
wm = sum(w * x, na.rm = TRUE) /
sum(w * as.numeric(!is.na(x)), na.rm = TRUE)
) %>%
dplyr::pull(wm)
)
df <- tibble::tibble(
x = rep(c(NA_real_, 3.1, 2.2, NA_real_, 3.3, 4), 3L),
w = rep(c(NA_real_, 1, 0.5, 1, 0.75, NA_real_), 3L),
grp = c(rep(1L, 6L), rep(2L, 6L), rep(3L, 6L))
)
sdf <- copy_to(sc, df, overwrite = TRUE)
expect_equal(
sdf %>% dplyr::summarize(wm = weighted.mean(x, w)) %>% dplyr::pull(wm),
df %>%
dplyr::summarize(
wm = sum(w * x, na.rm = TRUE) /
sum(w * as.numeric(!is.na(x)), na.rm = TRUE)
) %>%
dplyr::pull(wm)
)
expect_equal(
sdf %>% dplyr::summarize(wm = weighted.mean(x ^ 3, w ^ 2)) %>% dplyr::pull(wm),
df %>%
dplyr::summarize(
wm = sum(w ^ 2 * x ^ 3, na.rm = TRUE) /
sum(w ^ 2 * as.numeric(!is.na(x)), na.rm = TRUE)
) %>%
dplyr::pull(wm)
)
})
test_that("'head' uses 'limit' clause", {
test_requires("dplyr")
test_requires("dbplyr")
expect_true(
grepl(
"LIMIT",
sql_render(head(iris_tbl))
)
)
})
test_that("'left_join' does not use 'using' clause", {
test_requires("dplyr")
test_requires("dbplyr")
expect_equal(
spark_version(sc) >= "2.0.0" && packageVersion("dplyr") < "0.5.0.90",
grepl(
"USING",
sql_render(left_join(df1_tbl, df2_tbl))
)
)
})
test_that("the implementation of 'left_join' functions as expected", {
test_requires("dplyr")
expect_equivalent(
left_join(df1, df2) %>% dplyr::arrange(b),
left_join(df1_tbl, df2_tbl) %>% dplyr::arrange(b) %>% collect()
)
})
test_that("'sample_n' works as expected", {
test_requires_version("2.0.0")
test_requires("dplyr")
for (weight in list(NULL, rlang::sym("Petal_Length"))) {
for (replace in list(FALSE, TRUE)) {
sample_sdf <- iris_tbl %>%
sample_n(10, weight = !!weight, replace = replace)
expect_equal(colnames(sample_sdf), colnames(iris_tbl))
expect_equal(sample_sdf %>% collect() %>% nrow(), 10)
sample_sdf <- iris_tbl %>%
select(Petal_Length) %>%
sample_n(10, weight = !!weight, replace = replace)
expect_equal(colnames(sample_sdf), "Petal_Length")
expect_equal(sample_sdf %>% collect() %>% nrow(), 10)
}
}
})
test_that("'sample_frac' works as expected", {
test_requires_version("2.0.0")
test_requires("dplyr")
for (weight in list(NULL, rlang::sym("Petal_Length"))) {
for (replace in list(FALSE, TRUE)) {
sample_sdf <- iris_tbl %>%
sample_frac(0.2, weight = !!weight, replace = replace)
expect_equal(colnames(sample_sdf), colnames(iris_tbl))
expect_equal(sample_sdf %>% collect() %>% nrow(), round(0.2 * nrow(iris)))
sample_sdf <- iris_tbl %>%
select(Petal_Length) %>%
sample_frac(0.2, weight = !!weight, replace = replace)
expect_equal(colnames(sample_sdf), "Petal_Length")
expect_equal(sample_sdf %>% collect() %>% nrow(), round(0.2 * nrow(iris)))
}
}
})
test_that("weighted sampling works as expected with integer weight columns", {
test_requires_version("2.0.0")
test_requires("dplyr")
sdf <- copy_to(sc, tibble::tibble(id = seq(100), weight = seq(100)))
for (replace in list(FALSE, TRUE)) {
sample_sdf <- sdf %>%
sample_n(20, weight = weight, replace = replace)
expect_equal(colnames(sample_sdf), colnames(sdf))
expect_equal(sample_sdf %>% collect() %>% nrow(), 20)
sample_sdf <- sdf %>%
sample_frac(0.2, weight = weight, replace = replace)
expect_equal(colnames(sample_sdf), colnames(sdf))
expect_equal(sample_sdf %>% collect() %>% nrow(), 20)
}
})
test_that("set.seed makes sampling outcomes deterministic", {
test_requires_version("2.0.0")
test_requires("dplyr")
sdf <- copy_to(sc, tibble::tibble(id = seq(1000), weight = rep(seq(5), 200)))
for (weight in list(NULL, rlang::sym("weight"))) {
for (replace in list(FALSE, TRUE)) {
outcomes <- lapply(
seq(2),
function(i) {
set.seed(142857L)
sdf %>% sample_n(200, weight = weight, replace = replace) %>% collect()
}
)
expect_equivalent(outcomes[[1]], outcomes[[2]])
outcomes <- lapply(
seq(2),
function(i) {
set.seed(142857L)
sdf %>% sample_frac(0.2, weight = weight, replace = replace) %>% collect()
}
)
expect_equivalent(outcomes[[1]], outcomes[[2]])
}
}
})
test_that("'sdf_broadcast' forces broadcast hash join", {
query_plan <- df1_tbl %>%
sdf_broadcast() %>%
left_join(df2_tbl, by = "b") %>%
spark_dataframe() %>%
invoke("queryExecution") %>%
invoke("analyzed") %>%
invoke("toString")
expect_match(query_plan, "B|broadcast")
})
test_that("can compute() over tables", {
test_requires("dplyr")
iris_tbl %>% compute()
succeed()
})
|
### Coauthors network; i.e., authors as vertices and two vertices
### joined by an edge if the two authors have written a joint paper.
library("ISIPTA")
demo("regular-contributors", package = "ISIPTA",
verbose = FALSE, echo = FALSE, ask = FALSE)
data("papers_authors", package = "ISIPTA")
coauthors_pairs <- ddply(papers_authors, .(id),
function(x) {
if ( nrow(x) > 1 ) {
authors <- sort(as.character(x$author))
pairs <- combn(authors, 2)
data.frame(author1 =
factor(pairs[1, ],
levels = levels(x$author)),
author2 =
factor(pairs[2, ],
levels = levels(x$author)),
year = x$year[1],
id = x$id[1])
}
})
coauthors_pairs <- within(coauthors_pairs, {
year <- ordered(year)
id <- factor(id)
})
## Reduce to the number of each pair:
coauthors_npairs <- ddply(coauthors_pairs, .(author1, author2),
function(x) {
c(npairs = nrow(x))
})
### Overall collaboration graph: #####################################
## Edgelist; width of the edge is the number of joint papers:
edgelist <- within(coauthors_npairs, {
width <- npairs
npairs <- NULL
})
## Vertices:
vertices <- data.frame(name = levels(edgelist$author1))
## Graph:
graph <- graph.data.frame(edgelist,
directed = FALSE,
vertices = vertices)
summary(graph)
### Visualization of the graph: ######################################
set.seed(1234)
plot(graph,
vertex.size = 5,
vertex.color = "gray90",
vertex.frame.color = "gray90",
vertex.label.color = "black",
edge.color = "SkyBlue2",
layout = layout.fruchterman.reingold)
legend("topleft",
legend = sort(unique(edgelist$width)),
lwd = sort(unique(edgelist$width)),
col = "SkyBlue2",
bty = "n")
### Average path length, i.e., the deegres of separation: ############
average.path.length(graph)
### The longest shortest path, i.e., the diameter: ###################
diameter(graph)
V(graph)[get.diameter(graph)]
### Distance distributions: ##########################################
distances <- shortest.paths(graph)
dimnames(distances) <- list(V(graph)$name, V(graph)$name)
### Personal distributions of the "regular contributors":
regulars <- subset(authors_ncontributions,
ncontribs == nconferences)$author
regulars_distances <-
distances[, match(regulars, colnames(distances)), drop = FALSE]
ggplot(melt(regulars_distances), aes(value)) +
geom_density(aes(y = ..count..), fill = "SkyBlue2") +
facet_grid(X2 ~ .)
### Evolution of the network over time: ##############################
## Vertices, i.e., coauthors, by years:
coauthors_years <- ddply(coauthors_pairs, .(author1, author2),
function(x) {
as.data.frame(t(as.matrix(table(x$year))))
})
colnames(coauthors_years) <- c("author1", "author2",
sprintf("ISIPTA%s",
levels(coauthors_pairs$year)))
coauthors_years <- cbind(coauthors_years[, 1:2],
t(apply(coauthors_years[, -c(1:2)], 1, cumsum)))
## Edges, i.e., authors, by years:
authors_years <- cbind(conferences_contributors[, 1, drop = FALSE],
t(apply(conferences_contributors[, -c(1)], 1, cumsum)))
### Graphs over time:
years <- levels(coauthors_pairs$year)
years <- sapply(years, grep,
colnames(coauthors_years), value = TRUE)
op <- par(mfrow = c(1, length(years)))
for ( i in years ) {
ewidth <- coauthors_years[[i]]
ecolor <- ifelse(coauthors_years[[i]] > 0, "SkyBlue2", "white")
vcolor <- ifelse(authors_years[[i]] > 0, "black", "white")
fcolor <- ifelse(authors_years[[i]] > 0, "black", "white")
op1 <- par(mar = c(1, 0, 0, 0))
set.seed(1234)
plot(graph,
vertex.size = 3,
vertex.label = NA,
vertex.color = vcolor,
vertex.frame.color = fcolor,
edge.color = ecolor,
edge.width = ewidth,
layout = layout.fruchterman.reingold)
mtext(i, side = 1, line = 0)
par(op1)
}
par(op)
|
/demo/coauthors-network.R
|
no_license
|
mjaeugster/ISIPTA
|
R
| false
| false
| 4,807
|
r
|
### Coauthors network; i.e., authors as vertices and two vertices
### joined by an edge if the two authors have written a joint paper.
library("ISIPTA")
demo("regular-contributors", package = "ISIPTA",
verbose = FALSE, echo = FALSE, ask = FALSE)
data("papers_authors", package = "ISIPTA")
coauthors_pairs <- ddply(papers_authors, .(id),
function(x) {
if ( nrow(x) > 1 ) {
authors <- sort(as.character(x$author))
pairs <- combn(authors, 2)
data.frame(author1 =
factor(pairs[1, ],
levels = levels(x$author)),
author2 =
factor(pairs[2, ],
levels = levels(x$author)),
year = x$year[1],
id = x$id[1])
}
})
coauthors_pairs <- within(coauthors_pairs, {
year <- ordered(year)
id <- factor(id)
})
## Reduce to the number of each pair:
coauthors_npairs <- ddply(coauthors_pairs, .(author1, author2),
function(x) {
c(npairs = nrow(x))
})
### Overall collaboration graph: #####################################
## Edgelist; width of the edge is the number of joint papers:
edgelist <- within(coauthors_npairs, {
width <- npairs
npairs <- NULL
})
## Vertices:
vertices <- data.frame(name = levels(edgelist$author1))
## Graph:
graph <- graph.data.frame(edgelist,
directed = FALSE,
vertices = vertices)
summary(graph)
### Visualization of the graph: ######################################
set.seed(1234)
plot(graph,
vertex.size = 5,
vertex.color = "gray90",
vertex.frame.color = "gray90",
vertex.label.color = "black",
edge.color = "SkyBlue2",
layout = layout.fruchterman.reingold)
legend("topleft",
legend = sort(unique(edgelist$width)),
lwd = sort(unique(edgelist$width)),
col = "SkyBlue2",
bty = "n")
### Average path length, i.e., the deegres of separation: ############
average.path.length(graph)
### The longest shortest path, i.e., the diameter: ###################
diameter(graph)
V(graph)[get.diameter(graph)]
### Distance distributions: ##########################################
distances <- shortest.paths(graph)
dimnames(distances) <- list(V(graph)$name, V(graph)$name)
### Personal distributions of the "regular contributors":
regulars <- subset(authors_ncontributions,
ncontribs == nconferences)$author
regulars_distances <-
distances[, match(regulars, colnames(distances)), drop = FALSE]
ggplot(melt(regulars_distances), aes(value)) +
geom_density(aes(y = ..count..), fill = "SkyBlue2") +
facet_grid(X2 ~ .)
### Evolution of the network over time: ##############################
## Vertices, i.e., coauthors, by years:
coauthors_years <- ddply(coauthors_pairs, .(author1, author2),
function(x) {
as.data.frame(t(as.matrix(table(x$year))))
})
colnames(coauthors_years) <- c("author1", "author2",
sprintf("ISIPTA%s",
levels(coauthors_pairs$year)))
coauthors_years <- cbind(coauthors_years[, 1:2],
t(apply(coauthors_years[, -c(1:2)], 1, cumsum)))
## Edges, i.e., authors, by years:
authors_years <- cbind(conferences_contributors[, 1, drop = FALSE],
t(apply(conferences_contributors[, -c(1)], 1, cumsum)))
### Graphs over time:
years <- levels(coauthors_pairs$year)
years <- sapply(years, grep,
colnames(coauthors_years), value = TRUE)
op <- par(mfrow = c(1, length(years)))
for ( i in years ) {
ewidth <- coauthors_years[[i]]
ecolor <- ifelse(coauthors_years[[i]] > 0, "SkyBlue2", "white")
vcolor <- ifelse(authors_years[[i]] > 0, "black", "white")
fcolor <- ifelse(authors_years[[i]] > 0, "black", "white")
op1 <- par(mar = c(1, 0, 0, 0))
set.seed(1234)
plot(graph,
vertex.size = 3,
vertex.label = NA,
vertex.color = vcolor,
vertex.frame.color = fcolor,
edge.color = ecolor,
edge.width = ewidth,
layout = layout.fruchterman.reingold)
mtext(i, side = 1, line = 0)
par(op1)
}
par(op)
|
\name{print_relimp}
\alias{print_relimp}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Print tables of estimates}
\description{
Presents various estimates for measures of educational differentials, the relative importance of primary and secondary effects and corresponding standard errors and confidence intervals.
}
\usage{
print_relimp(dataset)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dataset}{A data frame with 4 columns only, in the following order:
1: student's ID, 2: class, 3: transition (0 if not, 1 if yes) and 4: performance score.}
}
%\details{
% ~~ If necessary, more details than the description above ~~
%}
\value{
Returns a more nicely presented version of the results given by \code{relative.importance}.
% ~Describe the value returned
% If it is a LIST, use
% \item{comp1 }{Description of 'comp1'}
% \item{comp2 }{Description of 'comp2'}
% ...
}
\references{
Kartsonaki, C., Jackson, M. and Cox, D. R. (2013). Primary and secondary effects: Some methodological issues, in Jackson, M. (ed.) \emph{Determined to succeed?}, Stanford: Stanford University Press.
Erikson, R., Goldthorpe, J. H., Jackson, M., Yaish, M. and Cox, D. R. (2005) On Class Differentials in Educational Attainment. \emph{Proceedings of the National Academy of Sciences}, \bold{102}: 9730--9733
Jackson, M., Erikson, R., Goldthorpe, J. H. and Yaish, M. (2007) Primary and secondary effects in class differentials in educational attainment: The transition to A-level courses in England and Wales. \emph{Acta Sociologica}, \bold{50} (3): 211--229
}
\author{Christiana Kartsonaki}
%\note{ %~~further notes~~
% ~Make other sections like Warning with \section{Warning }{....} ~
%}
\seealso{ \code{\link{relative.importance}} }
\examples{
# generate a dataset
set.seed(1)
data <- data.frame(seq(1:10), rep(c(1, 2, 3), length.out = 10),
rbinom(1, n = 10, p = 0.7), c(rnorm(8, 0, 1), NA, NA))
# run function
print_relimp(data)
}
|
/man/print_relimp.Rd
|
no_license
|
cran/DECIDE
|
R
| false
| false
| 2,036
|
rd
|
\name{print_relimp}
\alias{print_relimp}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Print tables of estimates}
\description{
Presents various estimates for measures of educational differentials, the relative importance of primary and secondary effects and corresponding standard errors and confidence intervals.
}
\usage{
print_relimp(dataset)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dataset}{A data frame with 4 columns only, in the following order:
1: student's ID, 2: class, 3: transition (0 if not, 1 if yes) and 4: performance score.}
}
%\details{
% ~~ If necessary, more details than the description above ~~
%}
\value{
Returns a more nicely presented version of the results given by \code{relative.importance}.
% ~Describe the value returned
% If it is a LIST, use
% \item{comp1 }{Description of 'comp1'}
% \item{comp2 }{Description of 'comp2'}
% ...
}
\references{
Kartsonaki, C., Jackson, M. and Cox, D. R. (2013). Primary and secondary effects: Some methodological issues, in Jackson, M. (ed.) \emph{Determined to succeed?}, Stanford: Stanford University Press.
Erikson, R., Goldthorpe, J. H., Jackson, M., Yaish, M. and Cox, D. R. (2005) On Class Differentials in Educational Attainment. \emph{Proceedings of the National Academy of Sciences}, \bold{102}: 9730--9733
Jackson, M., Erikson, R., Goldthorpe, J. H. and Yaish, M. (2007) Primary and secondary effects in class differentials in educational attainment: The transition to A-level courses in England and Wales. \emph{Acta Sociologica}, \bold{50} (3): 211--229
}
\author{Christiana Kartsonaki}
%\note{ %~~further notes~~
% ~Make other sections like Warning with \section{Warning }{....} ~
%}
\seealso{ \code{\link{relative.importance}} }
\examples{
# generate a dataset
set.seed(1)
data <- data.frame(seq(1:10), rep(c(1, 2, 3), length.out = 10),
rbinom(1, n = 10, p = 0.7), c(rnorm(8, 0, 1), NA, NA))
# run function
print_relimp(data)
}
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## -----------------------------------------------------------------------------
library("cdata")
data <- wrapr::build_frame(
"record_id" , "row" , "col1", "col2", "col3" |
1 , "row1", 1 , 2 , 3 |
1 , "row2", 4 , 5 , 6 |
1 , "row3", 7 , 8 , 9 |
2 , "row1", 11 , 12 , 13 |
2 , "row2", 14 , 15 , 16 |
2 , "row3", 17 , 18 , 19 )
knitr::kable(data)
## -----------------------------------------------------------------------------
recordKeys = 'record_id'
incoming_shape <- wrapr::qchar_frame(
"row" , "col1", "col2", "col3" |
"row1", v11 , v12 , v13 |
"row2", v21 , v22 , v23 |
"row3", v31 , v32 , v33 )
## -----------------------------------------------------------------------------
outgoing_shape <- wrapr::qchar_frame(
"column_label" , "c_row1", "c_row2", "c_row3" |
"rec_col1" , v11 , v21 , v31 |
"rec_col2" , v12 , v22 , v32 |
"rec_col3" , v13 , v23 , v33 )
## -----------------------------------------------------------------------------
layout <- layout_specification(
incoming_shape = incoming_shape,
outgoing_shape = outgoing_shape,
recordKeys = recordKeys)
print(layout)
## -----------------------------------------------------------------------------
data %.>%
layout %.>%
knitr::kable(.)
## -----------------------------------------------------------------------------
lr <- layout_by(layout, data)
knitr::kable(lr)
cr <- convert_records(
data,
keyColumns = recordKeys,
incoming_shape = incoming_shape,
outgoing_shape = outgoing_shape)
knitr::kable(cr)
## -----------------------------------------------------------------------------
inv_layout <- t(layout)
print(inv_layout)
data %.>%
layout %.>%
inv_layout %.>%
knitr::kable(.)
## -----------------------------------------------------------------------------
table_desciption <- rquery::local_td(data)
ops <- table_desciption %.>%
layout
cat(format(ops))
rquery::column_names(ops)
if(requireNamespace("DBI", quietly = TRUE) &&
requireNamespace("RSQLite", quietly = TRUE)) {
raw_connection <- DBI::dbConnect(RSQLite::SQLite(),
":memory:")
RSQLite::initExtension(raw_connection)
db <- rquery::rquery_db_info(
connection = raw_connection,
is_dbi = TRUE,
connection_options = rquery::rq_connection_tests(raw_connection))
db_td <- rquery::rq_copy_to(db, "data", data)
ops %.>%
db %.>%
knitr::kable(.) %.>%
print(.)
DBI::dbDisconnect(raw_connection)
}
|
/inst/doc/general_transform.R
|
no_license
|
cran/cdata
|
R
| false
| false
| 2,847
|
r
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## -----------------------------------------------------------------------------
library("cdata")
data <- wrapr::build_frame(
"record_id" , "row" , "col1", "col2", "col3" |
1 , "row1", 1 , 2 , 3 |
1 , "row2", 4 , 5 , 6 |
1 , "row3", 7 , 8 , 9 |
2 , "row1", 11 , 12 , 13 |
2 , "row2", 14 , 15 , 16 |
2 , "row3", 17 , 18 , 19 )
knitr::kable(data)
## -----------------------------------------------------------------------------
recordKeys = 'record_id'
incoming_shape <- wrapr::qchar_frame(
"row" , "col1", "col2", "col3" |
"row1", v11 , v12 , v13 |
"row2", v21 , v22 , v23 |
"row3", v31 , v32 , v33 )
## -----------------------------------------------------------------------------
outgoing_shape <- wrapr::qchar_frame(
"column_label" , "c_row1", "c_row2", "c_row3" |
"rec_col1" , v11 , v21 , v31 |
"rec_col2" , v12 , v22 , v32 |
"rec_col3" , v13 , v23 , v33 )
## -----------------------------------------------------------------------------
layout <- layout_specification(
incoming_shape = incoming_shape,
outgoing_shape = outgoing_shape,
recordKeys = recordKeys)
print(layout)
## -----------------------------------------------------------------------------
data %.>%
layout %.>%
knitr::kable(.)
## -----------------------------------------------------------------------------
lr <- layout_by(layout, data)
knitr::kable(lr)
cr <- convert_records(
data,
keyColumns = recordKeys,
incoming_shape = incoming_shape,
outgoing_shape = outgoing_shape)
knitr::kable(cr)
## -----------------------------------------------------------------------------
inv_layout <- t(layout)
print(inv_layout)
data %.>%
layout %.>%
inv_layout %.>%
knitr::kable(.)
## -----------------------------------------------------------------------------
table_desciption <- rquery::local_td(data)
ops <- table_desciption %.>%
layout
cat(format(ops))
rquery::column_names(ops)
if(requireNamespace("DBI", quietly = TRUE) &&
requireNamespace("RSQLite", quietly = TRUE)) {
raw_connection <- DBI::dbConnect(RSQLite::SQLite(),
":memory:")
RSQLite::initExtension(raw_connection)
db <- rquery::rquery_db_info(
connection = raw_connection,
is_dbi = TRUE,
connection_options = rquery::rq_connection_tests(raw_connection))
db_td <- rquery::rq_copy_to(db, "data", data)
ops %.>%
db %.>%
knitr::kable(.) %.>%
print(.)
DBI::dbDisconnect(raw_connection)
}
|
pacman::p_load(tidyverse,ggrepel, lubridate, ggridges, ggalt, Rcolorbrewer)
the_dark_knight <- theme(panel.background = element_rect(fill = "#404040"), # This is my theme I will use
plot.background = element_rect(fill = "#404040"),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.major.x = element_line(color = "#7d7c7b"),
panel.grid.minor.x = element_line(color = "#7d7c7b"),
axis.text = element_text(color = "white"),
axis.title = element_text(color = "white"),
title = element_text(color = "white"),
legend.background = element_rect(fill = "#404040"),
legend.key = element_rect(fill = "#404040"),
legend.text = element_text(color = "white"))
grosses <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-04-28/grosses.csv') %>%
mutate(year_month = ymd(floor_date(ymd(week_ending), unit = "month"))) %>%
left_join(mutate(cpi, year_month = ymd(year_month)), by = "year_month")
dat <- grosses %>%
group_by(show) %>%
summarize(total = sum((weekly_gross/cpi)*266.795)) %>%
mutate(ranking = rank(desc(total))) %>%
filter(ranking <= 30)
dat1 <- grosses %>%
group_by(show) %>%
summarise(ending = max(week_ending), Starting = min(week_ending),total = sum((weekly_gross/cpi)*266.795)) %>%
mutate(current = case_when(
ending > ymd("2020-01-01") ~ "Yes",
TRUE ~ "No"
))
synopses <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-04-28/synopses.csv')
cpi <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-04-28/cpi.csv')
pre_1985_starts <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-04-28/pre-1985-starts.csv')
filter(grosses, str_detect(show, paste(dat$show, collapse = "|"))) %>%
ggplot(aes(x = ymd(week_ending), y = (weekly_gross/cpi)*266.795)) +
geom_point() +
theme_minimal() +
labs(x = "Week of Show", y = "Amount Grossed Adjusted for Inlation")
filter(grosses, str_detect(show, paste(dat$show, collapse = "|"))) %>%
ggplot() +
geom_density_ridges_gradient(stat = "identity", aes(y = show, x = week_ending, height = (weekly_gross/cpi)*266.795, fill = week_ending), scale = 2.5)
filter(dat1, str_detect(show, paste(dat$show, collapse = "|"))) %>%
ggplot() +
geom_dumbbell(aes(x = Starting, xend = ending, y = reorder(show,total), color = current), size_x = 3, size_xend = 3, dot_guide_size = 2) +
labs(x = "Dates the Show was Open on Broadway", y = "Show Title", title = "Thirty Highest-Grossing Broadway Shows since 1985", subtitle = "Descending from Highest Grossing to Least - Adjusted For Inflation", color = "Currently Playing? \n(Until COVID-19)") +
the_dark_knight +
theme(axis.text.x = element_text(angle = 20)) +
guides(colour = guide_legend(override.aes = list(size=3))) +
scale_x_date(date_breaks = "5 years", date_labels = "%Y") +
scale_color_brewer(palette = "Spectral")
#scale_x_datetime(breaks = c(as.Date("1985-01-01"), as.Date("1990-01-01"), as.Date("1995-01-01"), as.Date("2000-01-01"), as.Date("2005-01-01"),as.Date("2010-01-01"),as.Date("2015-01-01"),as.Date("2020-01-01")))
|
/Full_Projects/Tidy_Tues_complete/Broadway Musicals/Musicals.r
|
no_license
|
jashonnew/DSci-Actua-Rjunkie
|
R
| false
| false
| 3,511
|
r
|
pacman::p_load(tidyverse,ggrepel, lubridate, ggridges, ggalt, Rcolorbrewer)
the_dark_knight <- theme(panel.background = element_rect(fill = "#404040"), # This is my theme I will use
plot.background = element_rect(fill = "#404040"),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.major.x = element_line(color = "#7d7c7b"),
panel.grid.minor.x = element_line(color = "#7d7c7b"),
axis.text = element_text(color = "white"),
axis.title = element_text(color = "white"),
title = element_text(color = "white"),
legend.background = element_rect(fill = "#404040"),
legend.key = element_rect(fill = "#404040"),
legend.text = element_text(color = "white"))
grosses <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-04-28/grosses.csv') %>%
mutate(year_month = ymd(floor_date(ymd(week_ending), unit = "month"))) %>%
left_join(mutate(cpi, year_month = ymd(year_month)), by = "year_month")
dat <- grosses %>%
group_by(show) %>%
summarize(total = sum((weekly_gross/cpi)*266.795)) %>%
mutate(ranking = rank(desc(total))) %>%
filter(ranking <= 30)
dat1 <- grosses %>%
group_by(show) %>%
summarise(ending = max(week_ending), Starting = min(week_ending),total = sum((weekly_gross/cpi)*266.795)) %>%
mutate(current = case_when(
ending > ymd("2020-01-01") ~ "Yes",
TRUE ~ "No"
))
synopses <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-04-28/synopses.csv')
cpi <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-04-28/cpi.csv')
pre_1985_starts <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-04-28/pre-1985-starts.csv')
filter(grosses, str_detect(show, paste(dat$show, collapse = "|"))) %>%
ggplot(aes(x = ymd(week_ending), y = (weekly_gross/cpi)*266.795)) +
geom_point() +
theme_minimal() +
labs(x = "Week of Show", y = "Amount Grossed Adjusted for Inlation")
filter(grosses, str_detect(show, paste(dat$show, collapse = "|"))) %>%
ggplot() +
geom_density_ridges_gradient(stat = "identity", aes(y = show, x = week_ending, height = (weekly_gross/cpi)*266.795, fill = week_ending), scale = 2.5)
filter(dat1, str_detect(show, paste(dat$show, collapse = "|"))) %>%
ggplot() +
geom_dumbbell(aes(x = Starting, xend = ending, y = reorder(show,total), color = current), size_x = 3, size_xend = 3, dot_guide_size = 2) +
labs(x = "Dates the Show was Open on Broadway", y = "Show Title", title = "Thirty Highest-Grossing Broadway Shows since 1985", subtitle = "Descending from Highest Grossing to Least - Adjusted For Inflation", color = "Currently Playing? \n(Until COVID-19)") +
the_dark_knight +
theme(axis.text.x = element_text(angle = 20)) +
guides(colour = guide_legend(override.aes = list(size=3))) +
scale_x_date(date_breaks = "5 years", date_labels = "%Y") +
scale_color_brewer(palette = "Spectral")
#scale_x_datetime(breaks = c(as.Date("1985-01-01"), as.Date("1990-01-01"), as.Date("1995-01-01"), as.Date("2000-01-01"), as.Date("2005-01-01"),as.Date("2010-01-01"),as.Date("2015-01-01"),as.Date("2020-01-01")))
|
svc <- paws::networkmanager()
test_that("describe_global_networks", {
expect_error(svc$describe_global_networks(), NA)
})
test_that("describe_global_networks", {
expect_error(svc$describe_global_networks(MaxResults = 20), NA)
})
|
/paws/tests/testthat/test_networkmanager.R
|
permissive
|
TWarczak/paws
|
R
| false
| false
| 235
|
r
|
svc <- paws::networkmanager()
test_that("describe_global_networks", {
expect_error(svc$describe_global_networks(), NA)
})
test_that("describe_global_networks", {
expect_error(svc$describe_global_networks(MaxResults = 20), NA)
})
|
suppressMessages(library(dplyr))
suppressMessages(library(glmnet))
suppressMessages((library(reshape2)))
suppressMessages(library(methods))
suppressMessages(library(doMC))
suppressMessages(library(doRNG))
suppressMessages(library(tidyr))
suppressMessages(library(tibble))
"%&%" <- function(a,b) paste(a,b, sep = "")
#Shortened notation for concatenating strings
## Couldn't use these line in pipeline due to confidentiality issues
# get_gene_expression <- function(gene_expression_file_name, gene_annot) { #row are obs, columns are features
# expr_df <- as.data.frame(t(read.table(gene_expression_file_name, header = T, stringsAsFactors = F, row.names = NULL)))
# expr_df <- expr_df %>% select(one_of(intersect(gene_annot$gene_id, colnames(expr_df)))) #%>% mutate(id=gsub("\\.[0-9]+","",id))
# expr_df
# }
get_filtered_snp_annot <- function(snp_annot_file_name) {
snp_annot <- read.table(snp_annot_file_name, header = T, stringsAsFactors = F) %>%
filter(!((refAllele == 'A' & effectAllele == 'T') |
(refAllele == 'T' & effectAllele == 'A') |
(refAllele == 'C' & effectAllele == 'G') |
(refAllele == 'G' & effectAllele == 'C')) &
!(is.na(rsid))) %>%
distinct(varID, .keep_all = TRUE)
snp_annot
}
get_maf_filtered_genotype <- function(genotype_file_name) {
gt_df <- read.table(genotype_file_name, header = T, stringsAsFactors = F) %>% distinct(snp_ID,.keep_all=T) %>% column_to_rownames(var="snp_ID")
gt_df
}
get_eQTL_snps <- function(bim_file) {
eQTL_snps <- read.table(bim_file, header = F, stringsAsFactors = F, col.names=c("chr","rsid","pos","start","allele1","allele2")) %>%
select(rsid)
snp_list <- eQTL_snps$rsid
names(snp_list) <- NULL
print(snp_list)
}
get_gene_annotation <- function(gene_annot_file_name, chrom, gene_types=c('protein_coding',"aptamer", 'pseudogene', 'lincRNA',"aptamer","VALUE")){
gene_df <- read.table(gene_annot_file_name, header = TRUE, stringsAsFactors = FALSE) %>%
filter((chr == chrom) & gene_type %in% gene_types)
gene_df
}
## Couldn't use these line in pipeline due to confidentiality issues
# get_gene_type <- function(gene_annot, gene) {
# filter(gene_annot, gene_id == gene)$gene_type
# }
get_gene_coords <- function(gene_annot, gene) {
row <- gene_annot[which(gene_annot$gene_id == gene),]
c(row$start, row$end)
}
get_cis_genotype <- function(gt_df, snp_annot, coords, cis_window) {
snp_info <- snp_annot %>% filter((pos >= (coords[1] - cis_window) & !is.na(rsid)) & (pos <= (coords[2] + cis_window)))
print(c('number of snps: ', nrow(snp_info)))
if (nrow(snp_info) == 0)
return(NA)
snp_info$SNP <- paste(snp_info$rsid, snp_info$refAllele, snp_info$effectAllele, sep = ':')
cis_gt <- snp_info %>% select(SNP)
print(cis_gt)
}
get_overlapping_snps <- function(TOPMed_snps, thousand_genomes_snps) {
overlapping_snps <- TOPMed_snps %>% filter(SNP %in% thousand_genomes_snps)
filtered_snps <- overlapping_snps$SNP
names(filtered_snps) <- NULL
print(filtered_snps)
}
main <- function(snp_annot_file, gene_annot_file, genotype_file, expression_file, eQTL_bim_file, out_dir,
covariates_file=NULL, chrom, pop, maf=0.01, n_folds=10, n_train_test_folds=5,
seed=NA, cis_window=1000000, alpha=0.5, null_testing=FALSE) {
gene_annot <- get_gene_annotation(gene_annot_file, chrom)
## Couldn't use these line in pipeline due to confidentiality issues
#expr_df <- get_gene_expression(expression_file, gene_annot)
#genes <- colnames(expr_df)
#samples <- rownames(expr_df)
genes <- as.vector(gene_annot$gene_id)
n_genes <- length(genes)
snp_annot <- get_filtered_snp_annot(snp_annot_file)
gt_df <- get_maf_filtered_genotype(genotype_file)
eQTL_snps <- get_eQTL_snps(eQTL_bim_file)
for (i in 1:n_genes) {
cat(i, "/", n_genes, "\n")
gene <- unlist(genes[i])
print(gene)
gene_name <- gene_annot$gene_name[gene_annot$gene_id == gene]
## Couldn't use these line in pipeline due to confidentiality issues
#gene_type <- get_gene_type(gene_annot, gene)
coords <- get_gene_coords(gene_annot, gene)
cis_gt <- get_cis_genotype(gt_df, snp_annot, coords, cis_window)
str(gene)
## Couldn't use these line in pipeline due to confidentiality issues
#str(gene_type)
str(coords)
str(cis_gt)
overlap_cis_gt <- get_overlapping_snps(cis_gt,eQTL_snps)
print(overlap_cis_gt)
if(length(overlap_cis_gt) > 2){
write.table(as.data.frame(overlap_cis_gt, stringsAsFactors=FALSE), file=paste(out_dir,'/LD_matrix/',pop,'/', pop, '_chr_', chrom, '_', gene, '_1Mb_of_gene.txt', sep = ''), quote = F, row.names=F, col.names=F)
}
}
}
|
/01_pull_snps_driving.R
|
no_license
|
annie-novak9/Coloc
|
R
| false
| false
| 4,732
|
r
|
suppressMessages(library(dplyr))
suppressMessages(library(glmnet))
suppressMessages((library(reshape2)))
suppressMessages(library(methods))
suppressMessages(library(doMC))
suppressMessages(library(doRNG))
suppressMessages(library(tidyr))
suppressMessages(library(tibble))
"%&%" <- function(a,b) paste(a,b, sep = "")
#Shortened notation for concatenating strings
## Couldn't use these line in pipeline due to confidentiality issues
# get_gene_expression <- function(gene_expression_file_name, gene_annot) { #row are obs, columns are features
# expr_df <- as.data.frame(t(read.table(gene_expression_file_name, header = T, stringsAsFactors = F, row.names = NULL)))
# expr_df <- expr_df %>% select(one_of(intersect(gene_annot$gene_id, colnames(expr_df)))) #%>% mutate(id=gsub("\\.[0-9]+","",id))
# expr_df
# }
get_filtered_snp_annot <- function(snp_annot_file_name) {
snp_annot <- read.table(snp_annot_file_name, header = T, stringsAsFactors = F) %>%
filter(!((refAllele == 'A' & effectAllele == 'T') |
(refAllele == 'T' & effectAllele == 'A') |
(refAllele == 'C' & effectAllele == 'G') |
(refAllele == 'G' & effectAllele == 'C')) &
!(is.na(rsid))) %>%
distinct(varID, .keep_all = TRUE)
snp_annot
}
get_maf_filtered_genotype <- function(genotype_file_name) {
gt_df <- read.table(genotype_file_name, header = T, stringsAsFactors = F) %>% distinct(snp_ID,.keep_all=T) %>% column_to_rownames(var="snp_ID")
gt_df
}
get_eQTL_snps <- function(bim_file) {
eQTL_snps <- read.table(bim_file, header = F, stringsAsFactors = F, col.names=c("chr","rsid","pos","start","allele1","allele2")) %>%
select(rsid)
snp_list <- eQTL_snps$rsid
names(snp_list) <- NULL
print(snp_list)
}
get_gene_annotation <- function(gene_annot_file_name, chrom, gene_types=c('protein_coding',"aptamer", 'pseudogene', 'lincRNA',"aptamer","VALUE")){
gene_df <- read.table(gene_annot_file_name, header = TRUE, stringsAsFactors = FALSE) %>%
filter((chr == chrom) & gene_type %in% gene_types)
gene_df
}
## Couldn't use these line in pipeline due to confidentiality issues
# get_gene_type <- function(gene_annot, gene) {
# filter(gene_annot, gene_id == gene)$gene_type
# }
get_gene_coords <- function(gene_annot, gene) {
row <- gene_annot[which(gene_annot$gene_id == gene),]
c(row$start, row$end)
}
get_cis_genotype <- function(gt_df, snp_annot, coords, cis_window) {
snp_info <- snp_annot %>% filter((pos >= (coords[1] - cis_window) & !is.na(rsid)) & (pos <= (coords[2] + cis_window)))
print(c('number of snps: ', nrow(snp_info)))
if (nrow(snp_info) == 0)
return(NA)
snp_info$SNP <- paste(snp_info$rsid, snp_info$refAllele, snp_info$effectAllele, sep = ':')
cis_gt <- snp_info %>% select(SNP)
print(cis_gt)
}
get_overlapping_snps <- function(TOPMed_snps, thousand_genomes_snps) {
overlapping_snps <- TOPMed_snps %>% filter(SNP %in% thousand_genomes_snps)
filtered_snps <- overlapping_snps$SNP
names(filtered_snps) <- NULL
print(filtered_snps)
}
main <- function(snp_annot_file, gene_annot_file, genotype_file, expression_file, eQTL_bim_file, out_dir,
covariates_file=NULL, chrom, pop, maf=0.01, n_folds=10, n_train_test_folds=5,
seed=NA, cis_window=1000000, alpha=0.5, null_testing=FALSE) {
gene_annot <- get_gene_annotation(gene_annot_file, chrom)
## Couldn't use these line in pipeline due to confidentiality issues
#expr_df <- get_gene_expression(expression_file, gene_annot)
#genes <- colnames(expr_df)
#samples <- rownames(expr_df)
genes <- as.vector(gene_annot$gene_id)
n_genes <- length(genes)
snp_annot <- get_filtered_snp_annot(snp_annot_file)
gt_df <- get_maf_filtered_genotype(genotype_file)
eQTL_snps <- get_eQTL_snps(eQTL_bim_file)
for (i in 1:n_genes) {
cat(i, "/", n_genes, "\n")
gene <- unlist(genes[i])
print(gene)
gene_name <- gene_annot$gene_name[gene_annot$gene_id == gene]
## Couldn't use these line in pipeline due to confidentiality issues
#gene_type <- get_gene_type(gene_annot, gene)
coords <- get_gene_coords(gene_annot, gene)
cis_gt <- get_cis_genotype(gt_df, snp_annot, coords, cis_window)
str(gene)
## Couldn't use these line in pipeline due to confidentiality issues
#str(gene_type)
str(coords)
str(cis_gt)
overlap_cis_gt <- get_overlapping_snps(cis_gt,eQTL_snps)
print(overlap_cis_gt)
if(length(overlap_cis_gt) > 2){
write.table(as.data.frame(overlap_cis_gt, stringsAsFactors=FALSE), file=paste(out_dir,'/LD_matrix/',pop,'/', pop, '_chr_', chrom, '_', gene, '_1Mb_of_gene.txt', sep = ''), quote = F, row.names=F, col.names=F)
}
}
}
|
mardia<-function(x){
x<-data.frame(x)
n<-dim(x)[1]
p<-dim(x)[2]
bar<-mean(x)
S<-matrix(var(x)*(n-1)/n,2,2)
b1.ma<-matrix(NA,n,n)
b2.ma<-rep(NA,n)
for(i in 1:n){
for(j in 1:n){
b1.ma[i,j]<-(sum((x[i,]-bar)*(solve(S)%*%t(x[j,]-bar))))^3
}
b2.ma[i]<-(sum((x[i,]-bar)*(solve(S)%*%t(x[i,]-bar))))^2
}
b1<-sum(b1.ma)/(n^2)
b2<-sum(b2.ma)/n
B1<-n*b1/6
B2<-(b2-p*(p+2))/(sqrt(8*p*(p+2)/n))
v<-p*(p+1)*(p+2)/6
p.val1<-pchisq(B1,v,lower.tail=F)
p.val2<-2*pnorm(abs(B2),lower.tail=F)
list("B1"=B1,"pval1"=p.val1,"B2"=B2,"pval2"=p.val2)
}
ante<-c(230,245,220,250, 260,250,220,300,310,290,260,240,210,220,
250,245,274,230,285,275)
desp<-c(210,230,215,220,240,220,210,260,280,270,230,235,200,200,
210,230,250,210,260,230)
x<-data.frame(cbind(ante,desp))
mardia(x)
|
/Z Old Tex Files/program R/Apéndice/Mardia.r
|
no_license
|
cualquiercosa327/2ed-team
|
R
| false
| false
| 783
|
r
|
mardia<-function(x){
x<-data.frame(x)
n<-dim(x)[1]
p<-dim(x)[2]
bar<-mean(x)
S<-matrix(var(x)*(n-1)/n,2,2)
b1.ma<-matrix(NA,n,n)
b2.ma<-rep(NA,n)
for(i in 1:n){
for(j in 1:n){
b1.ma[i,j]<-(sum((x[i,]-bar)*(solve(S)%*%t(x[j,]-bar))))^3
}
b2.ma[i]<-(sum((x[i,]-bar)*(solve(S)%*%t(x[i,]-bar))))^2
}
b1<-sum(b1.ma)/(n^2)
b2<-sum(b2.ma)/n
B1<-n*b1/6
B2<-(b2-p*(p+2))/(sqrt(8*p*(p+2)/n))
v<-p*(p+1)*(p+2)/6
p.val1<-pchisq(B1,v,lower.tail=F)
p.val2<-2*pnorm(abs(B2),lower.tail=F)
list("B1"=B1,"pval1"=p.val1,"B2"=B2,"pval2"=p.val2)
}
ante<-c(230,245,220,250, 260,250,220,300,310,290,260,240,210,220,
250,245,274,230,285,275)
desp<-c(210,230,215,220,240,220,210,260,280,270,230,235,200,200,
210,230,250,210,260,230)
x<-data.frame(cbind(ante,desp))
mardia(x)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/network_utils.R
\name{connect_gap_modules}
\alias{connect_gap_modules}
\title{Loop through each of the modules and connect the adjacent ones which is only allow.gap away from each other}
\usage{
connect_gap_modules(final_summary, final_genelist, allow.gap = 1)
}
\arguments{
\item{final_summary}{summary table}
\item{final_genelist}{summary gene list}
\item{allow.gap}{allow how man gaps between each sub-modules}
}
\description{
Loop through each of the modules and connect the adjacent ones which is only allow.gap away from each other
}
\examples{
connect_gap_modules(final_summary, final_genelist, allow.gap=1)
}
\keyword{co-expression,}
\keyword{connectivity}
\keyword{network,}
|
/man/connect_gap_modules.Rd
|
no_license
|
naikai/sake
|
R
| false
| true
| 764
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/network_utils.R
\name{connect_gap_modules}
\alias{connect_gap_modules}
\title{Loop through each of the modules and connect the adjacent ones which is only allow.gap away from each other}
\usage{
connect_gap_modules(final_summary, final_genelist, allow.gap = 1)
}
\arguments{
\item{final_summary}{summary table}
\item{final_genelist}{summary gene list}
\item{allow.gap}{allow how man gaps between each sub-modules}
}
\description{
Loop through each of the modules and connect the adjacent ones which is only allow.gap away from each other
}
\examples{
connect_gap_modules(final_summary, final_genelist, allow.gap=1)
}
\keyword{co-expression,}
\keyword{connectivity}
\keyword{network,}
|
library(Thermimage)
### Name: qrad
### Title: Estimates the area specific heat transfer by radiation (W/m2)
### Aliases: qrad
### ** Examples
## The function is currently defined as
function (Ts = 30, Ta = 25, Tg = NULL, RH = 0.5, E = 0.96, rho = 0.1,
cloud = 0, SE = 0)
{
qrad <- qabs(Ta = Ta, Tg = Tg, RH = RH, E = E, rho = rho,
cloud = cloud, SE = SE) - E * StephBoltz() * (Ts + 273.15)^4
qrad
}
# Example:
Ts<-30
Ta<-25
Tg<-28
RH<-0.5
E<-0.96
rho<-0.1
cloud<-0
SE<-100
# qrad should result in a positive gain of heat:
qrad(Ts, Ta, Tg, RH, E, rho, cloud, SE)
# if rho is elevated (i.e. doubles reflectance of solar energy), heat exchange by
# radiation is reduced
rho<-0.2
qrad(Ts, Ta, Tg, RH, E, rho, cloud, SE)
# But if solar energy = 0, under similar conditions, qrad is negative:
SE<-0
qrad(Ts, Ta, Tg, RH, E, rho, cloud, SE)
# For detailed examples and explanations, see:
# https://github.com/gtatters/Thermimage/blob/master/HeatTransferCalculations.md
|
/data/genthat_extracted_code/Thermimage/examples/qrad.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,004
|
r
|
library(Thermimage)
### Name: qrad
### Title: Estimates the area specific heat transfer by radiation (W/m2)
### Aliases: qrad
### ** Examples
## The function is currently defined as
function (Ts = 30, Ta = 25, Tg = NULL, RH = 0.5, E = 0.96, rho = 0.1,
cloud = 0, SE = 0)
{
qrad <- qabs(Ta = Ta, Tg = Tg, RH = RH, E = E, rho = rho,
cloud = cloud, SE = SE) - E * StephBoltz() * (Ts + 273.15)^4
qrad
}
# Example:
Ts<-30
Ta<-25
Tg<-28
RH<-0.5
E<-0.96
rho<-0.1
cloud<-0
SE<-100
# qrad should result in a positive gain of heat:
qrad(Ts, Ta, Tg, RH, E, rho, cloud, SE)
# if rho is elevated (i.e. doubles reflectance of solar energy), heat exchange by
# radiation is reduced
rho<-0.2
qrad(Ts, Ta, Tg, RH, E, rho, cloud, SE)
# But if solar energy = 0, under similar conditions, qrad is negative:
SE<-0
qrad(Ts, Ta, Tg, RH, E, rho, cloud, SE)
# For detailed examples and explanations, see:
# https://github.com/gtatters/Thermimage/blob/master/HeatTransferCalculations.md
|
#------------------------------------------------#
# Home Assignment Geoffrey 3
#------------------------------------------------#
rm(list=ls())
graphics.off()
setwd("~/Documents/Master/Statistics /Exams")
library(ggplot2)
#write function for simulating data with a given n_group and output only the p-value
aov_simulation <- function(n_group){
group1 <- rnorm(n = n_group, mean = -.5, sd= 1)
group2 <- rnorm(n = n_group, mean = 0, sd= 1)
group3 <- rnorm(n = n_group, mean = 0, sd= 1)
group4 <- rnorm(n = n_group, mean = .5, sd= 1)
values <- c(group1, group2, group3, group4)
subject <- rep(seq(1,n_group),4)
subject <- factor(subject)
group <- c(rep("G1", n_group), rep("G2", n_group), rep("G3", n_group), rep("G4",n_group))
df <- data.frame(subject, group, values)
return(summary(aov(values ~ group, data=df))[[1]][["Pr(>F)"]][[1]])
}
n_group <- rep(10:50, each = 100) #vector with each group size 100 times
p_value <- lapply(n_group, aov_simulation) #use lapply to do the simulation for each element in vector (therefore each group size 100 times)
p_vector <- unlist(p_value, use.names=F) #transform output list into vector
results <- data.frame(n_group, p_vector) #get the group size and p-values together
#To see lowest and highest p for each group size
lowest_highest_p <- results %>%
group_by(n_group) %>%
summarize(low = min(p_vector), high = max(p_vector))
#Calculate noncentral f-distribution's value for each sample size
output <- results %>%
group_by(n_group) %>% #group by sample size for each group
summarize(sign = sum(p_vector < .05)) %>% #how many p<.05 for each sample size
mutate(lambda = (n_group * 4)*(0.35^2)) %>% #lambda: N*f^2
mutate(df_num = 3) %>% #in numerator: always df=3
mutate(df_den = n_group*4 - 4) %>% #in denominator: N-k
mutate(f_crit = qf(p = .95, df1 = df_num, df2 = df_den)) %>% #get critical F-value for each group size
mutate(power = 1-(pf(q = f_crit, df1=df_num, df2=df_den, ncp=lambda))) #get power through non-central F-distribution
output
#Making the plot
gg_successes <- ggplot(output, aes(x=n_group, y=sign)) +
geom_bar(stat="identity", fill="dodgerblue3", alpha=.9) +
theme_bw() +
labs(x="Participants per Group", y="Count of Significant Results &\nPower*100") +
geom_line(aes(x=n_group, y = power*100), size=1.1) +
scale_y_continuous(breaks=c(0,20,40,60,80,100))
gg_successes
ggsave("GP3_graph.png", path= "~/Documents/Master/Statistics /Assignment Writing",
plot = gg_successes, width = 25, height = 16, units = "cm")
|
/Geoffrey_Assignment_3.R
|
no_license
|
boeltzig/PSYP13_Assignments_Marius
|
R
| false
| false
| 2,795
|
r
|
#------------------------------------------------#
# Home Assignment Geoffrey 3
#------------------------------------------------#
rm(list=ls())
graphics.off()
setwd("~/Documents/Master/Statistics /Exams")
library(ggplot2)
#write function for simulating data with a given n_group and output only the p-value
aov_simulation <- function(n_group){
group1 <- rnorm(n = n_group, mean = -.5, sd= 1)
group2 <- rnorm(n = n_group, mean = 0, sd= 1)
group3 <- rnorm(n = n_group, mean = 0, sd= 1)
group4 <- rnorm(n = n_group, mean = .5, sd= 1)
values <- c(group1, group2, group3, group4)
subject <- rep(seq(1,n_group),4)
subject <- factor(subject)
group <- c(rep("G1", n_group), rep("G2", n_group), rep("G3", n_group), rep("G4",n_group))
df <- data.frame(subject, group, values)
return(summary(aov(values ~ group, data=df))[[1]][["Pr(>F)"]][[1]])
}
n_group <- rep(10:50, each = 100) #vector with each group size 100 times
p_value <- lapply(n_group, aov_simulation) #use lapply to do the simulation for each element in vector (therefore each group size 100 times)
p_vector <- unlist(p_value, use.names=F) #transform output list into vector
results <- data.frame(n_group, p_vector) #get the group size and p-values together
#To see lowest and highest p for each group size
lowest_highest_p <- results %>%
group_by(n_group) %>%
summarize(low = min(p_vector), high = max(p_vector))
#Calculate noncentral f-distribution's value for each sample size
output <- results %>%
group_by(n_group) %>% #group by sample size for each group
summarize(sign = sum(p_vector < .05)) %>% #how many p<.05 for each sample size
mutate(lambda = (n_group * 4)*(0.35^2)) %>% #lambda: N*f^2
mutate(df_num = 3) %>% #in numerator: always df=3
mutate(df_den = n_group*4 - 4) %>% #in denominator: N-k
mutate(f_crit = qf(p = .95, df1 = df_num, df2 = df_den)) %>% #get critical F-value for each group size
mutate(power = 1-(pf(q = f_crit, df1=df_num, df2=df_den, ncp=lambda))) #get power through non-central F-distribution
output
#Making the plot
gg_successes <- ggplot(output, aes(x=n_group, y=sign)) +
geom_bar(stat="identity", fill="dodgerblue3", alpha=.9) +
theme_bw() +
labs(x="Participants per Group", y="Count of Significant Results &\nPower*100") +
geom_line(aes(x=n_group, y = power*100), size=1.1) +
scale_y_continuous(breaks=c(0,20,40,60,80,100))
gg_successes
ggsave("GP3_graph.png", path= "~/Documents/Master/Statistics /Assignment Writing",
plot = gg_successes, width = 25, height = 16, units = "cm")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hmc_glmm_logistic.R
\name{g_glmm_bin_posterior}
\alias{g_glmm_bin_posterior}
\title{Gradient of a Logistic Mixed Effects model log posterior}
\usage{
g_glmm_bin_posterior(
theta,
y,
X,
Z,
m,
q = 1,
A = 10000,
nulambda = 1,
Alambda = 25,
B = 10000
)
}
\arguments{
\item{theta}{vector of parameters. Stored as a single vector in order fixed effect, random effect, log-transformed diagonal \eqn{\lambda}, and off-diagonal of \code{G} vector \code{a}}
\item{y}{numeric vector for the dependent variable}
\item{X}{numeric design matrix of fixed effect parameters}
\item{Z}{numeric design matrix of random effect parameters}
\item{m}{number of random effect linear parameters}
\item{q}{number of random effects covariance parameters}
\item{A}{hyperprior numeric vector for the random effects off-diagonal \code{a}}
\item{nulambda}{hyperprior for the half-t prior of the random effects diagonal \eqn{\lambda}}
\item{Alambda}{hyperprior for the half-t prior of the random effects diagonal \eqn{A_\lambda}}
\item{B}{prior for linear predictors is multivariate Normal with mean 0 with diagonal covariance B^-1}
}
\value{
numeric vector for the gradient of the log posterior
}
\description{
Compute the gradient of the log posterior of a logistic mixed effects regression model.
Priors are multivariate Normal for the fixed effects
}
\details{
The likelihood function for logistic mixed effect regression
\deqn{p(y | X, Z, \beta, u) = \prod_{i=1}^n\prod_{j=1}^m \left(\frac{1}{1 + e^{-X_{i}\beta - Z_{ij}u_i}}\right)^{y_{ij}} \left(\frac{e^{-X_i\beta - Z_{ij}u_i}}{1 + e^{-X_{i}\beta - Z_{ij}u_i}}\right)^{1-y_{ij}} }
with priors \eqn{\beta \sim N(0, BI)}, \eqn{\sigma_\epsilon \sim half-t(A_\epsilon, nu_\epsilon)}, \eqn{\lambda \sim half-t(A_\lambda, nu_\lambda )}.
The vector \eqn{\lambda} is the diagonal of the covariance \code{G} hyperprior where \eqn{u \sim N(0, G}. The off-diagonal hyperpriors are stored in a vector \eqn{a \sim N(0, A}. See Chan, Jeliazkov (2009) for details.
The input parameter vector \code{theta} is of length \code{k}. The first \code{k-1} parameters are for \eqn{\beta}, and the last parameter is \eqn{\gamma}
}
\references{
Gelman, A. (2006). \emph{Prior distributions for variance parameters in hierarchical models (comment on article by Browne and Draper)}. Bayesian analysis, 1(3), 515-534.
Chan, J. C. C., & Jeliazkov, I. (2009). \emph{MCMC estimation of restricted covariance matrices}. Journal of Computational and Graphical Statistics, 18(2), 457-480.
Betancourt, M., & Girolami, M. (2015). \emph{Hamiltonian Monte Carlo for hierarchical models}. Current trends in Bayesian methodology with applications, 79, 30.
}
|
/man/g_glmm_bin_posterior.Rd
|
no_license
|
be-green/hmclearn
|
R
| false
| true
| 2,761
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hmc_glmm_logistic.R
\name{g_glmm_bin_posterior}
\alias{g_glmm_bin_posterior}
\title{Gradient of a Logistic Mixed Effects model log posterior}
\usage{
g_glmm_bin_posterior(
theta,
y,
X,
Z,
m,
q = 1,
A = 10000,
nulambda = 1,
Alambda = 25,
B = 10000
)
}
\arguments{
\item{theta}{vector of parameters. Stored as a single vector in order fixed effect, random effect, log-transformed diagonal \eqn{\lambda}, and off-diagonal of \code{G} vector \code{a}}
\item{y}{numeric vector for the dependent variable}
\item{X}{numeric design matrix of fixed effect parameters}
\item{Z}{numeric design matrix of random effect parameters}
\item{m}{number of random effect linear parameters}
\item{q}{number of random effects covariance parameters}
\item{A}{hyperprior numeric vector for the random effects off-diagonal \code{a}}
\item{nulambda}{hyperprior for the half-t prior of the random effects diagonal \eqn{\lambda}}
\item{Alambda}{hyperprior for the half-t prior of the random effects diagonal \eqn{A_\lambda}}
\item{B}{prior for linear predictors is multivariate Normal with mean 0 with diagonal covariance B^-1}
}
\value{
numeric vector for the gradient of the log posterior
}
\description{
Compute the gradient of the log posterior of a logistic mixed effects regression model.
Priors are multivariate Normal for the fixed effects
}
\details{
The likelihood function for logistic mixed effect regression
\deqn{p(y | X, Z, \beta, u) = \prod_{i=1}^n\prod_{j=1}^m \left(\frac{1}{1 + e^{-X_{i}\beta - Z_{ij}u_i}}\right)^{y_{ij}} \left(\frac{e^{-X_i\beta - Z_{ij}u_i}}{1 + e^{-X_{i}\beta - Z_{ij}u_i}}\right)^{1-y_{ij}} }
with priors \eqn{\beta \sim N(0, BI)}, \eqn{\sigma_\epsilon \sim half-t(A_\epsilon, nu_\epsilon)}, \eqn{\lambda \sim half-t(A_\lambda, nu_\lambda )}.
The vector \eqn{\lambda} is the diagonal of the covariance \code{G} hyperprior where \eqn{u \sim N(0, G}. The off-diagonal hyperpriors are stored in a vector \eqn{a \sim N(0, A}. See Chan, Jeliazkov (2009) for details.
The input parameter vector \code{theta} is of length \code{k}. The first \code{k-1} parameters are for \eqn{\beta}, and the last parameter is \eqn{\gamma}
}
\references{
Gelman, A. (2006). \emph{Prior distributions for variance parameters in hierarchical models (comment on article by Browne and Draper)}. Bayesian analysis, 1(3), 515-534.
Chan, J. C. C., & Jeliazkov, I. (2009). \emph{MCMC estimation of restricted covariance matrices}. Journal of Computational and Graphical Statistics, 18(2), 457-480.
Betancourt, M., & Girolami, M. (2015). \emph{Hamiltonian Monte Carlo for hierarchical models}. Current trends in Bayesian methodology with applications, 79, 30.
}
|
library(synapseClient)
synapseLogin()
# This code is implemented for curating data which has a features with gene symbol and non replicated samples with averaged replicates
# uploaded in Synapse with RObject
# Dataset <- add Layer
# project id <- add Dataset, etc
###############################################################################
# specify your working directory where raw data exist
# Here I selected the Sanger dataset for curating
workDirectory <- "/gluster/external-data/DAT_023__GSKCellLines/2010-12/Expression/Raw"
setwd(workDirectory)
library(affy)
library(preprocessCore)
# depending on CDF of given microarray, we have to use corresponding CDF info.
# Raw data importing procedure
Data <- ReadAffy()
# you can easily check the CDF by just typing
Data
# okay, GSK Expr microarray is hgu133plus2
library(hgu133plus2.db)
library(gdata)
probeName<-probeNames(Data)
probeSetName <- geneNames(Data)
## backgound adjustment : RMA convolution
# you might want to MAS5.0 background.Then replace method parameter to : method = "mas"
DataBgRma <- bg.correct(Data, method = 'rma')
## Warning !!! hereafter, data should be log2 transformed
# log2 transformation
pmDataBgRma<-log2(probes(DataBgRma,"pm"))
## normalization : Quantile Normalization
normPmDataBgRma<-normalize.quantiles(pmDataBgRma)
rownames(normPmDataBgRma) <-rownames(pmDataBgRma)
colnames(normPmDataBgRma) <-colnames(pmDataBgRma)
# Symbol and probeset mapping ###########################
x <- hgu133plus2SYMBOL
# Get the probe identifiers that are mapped to a gene symbol
mapped_probes <- mappedkeys(x)
# Convert to a list
xx <- as.list(x[mapped_probes])
probeGeneMap <-c()
for(i in 1:length(xx)){
probeGeneMap <- rbind(probeGeneMap,cbind(names(xx[i]),xx[[i]]))
}
#####################################
geneSymbolName<-unique(probeGeneMap[,2])
## Summarization : median polish by Tukey
sumExp <-c()
for (i in 1:length(geneSymbolName)){
probesPerGene<-probeGeneMap[which(!is.na(match(probeGeneMap[,2],geneSymbolName[i]))),1]
set<-c()
for(j in 1:length(probesPerGene)){
set<-union(set,which(!is.na(match(probeName,probesPerGene[j]))))
}
exp<-normPmDataBgRma[set,]
sumExp <- rbind(sumExp,apply(t(exp)-medpolish(t(exp),trace.iter = FALSE)$residuals,1,mean))
}
rownames(sumExp) <- geneSymbolName
#########################################
## Read Sample Annotation for processing replicates
## Replace CEL file name to sampleName given annotation file
#########################################
AnnotData <- read.delim2("GSK_RNA.sdrf",header = TRUE, skip =8)
# Replace special characters "-" to "_"
annotSampleName <-toupper(AnnotData$Characteristics.Cell.Line.Name.)
while(length(grep(" ",annotSampleName)) !=0){
annotSampleName<-sub(" ","",annotSampleName)
}
while(length(grep("-",annotSampleName)) !=0){
annotSampleName<-sub("-","",annotSampleName)
}
# define function : geometric mean, not arithmetic mean : because of log2 transformed data
gm.mean <-function(dataVec){
n <- length(dataVec)
prod(dataVec) ^(1/n)
}
# to preserve non replicated samples
uniqueSampleName <-unique(annotSampleName)
finalData<-c()
for (i in 1:length(uniqueSampleName)){
a<-which(uniqueSampleName[i]==annotSampleName)
if (length(a)==1){
finalData<-cbind(finalData,sumExp[,a])
}
else{
finalData<-cbind(finalData,apply(sumExp[,a],1,gm.mean))
}
}
colnames(finalData)<-uniqueSampleName
rownames(finalData)<-rownames(sumExp)
eSet <- new("ExpressionSet",exprs = finalData, annotation = annotation(Data))
probeIDLayer <- Layer(list(name = "R_Expression_GSK_RMA_GeneSymbol", type = "E", parentId = "114505", status="db"))
testLayer <- addObject(probeIDLayer, eSet)
testLayer <- storeEntity(testLayer)
|
/curation_code/curation_GSK_exprs.R
|
no_license
|
insockjang/DrugResponse
|
R
| false
| false
| 3,721
|
r
|
library(synapseClient)
synapseLogin()
# This code is implemented for curating data which has a features with gene symbol and non replicated samples with averaged replicates
# uploaded in Synapse with RObject
# Dataset <- add Layer
# project id <- add Dataset, etc
###############################################################################
# specify your working directory where raw data exist
# Here I selected the Sanger dataset for curating
workDirectory <- "/gluster/external-data/DAT_023__GSKCellLines/2010-12/Expression/Raw"
setwd(workDirectory)
library(affy)
library(preprocessCore)
# depending on CDF of given microarray, we have to use corresponding CDF info.
# Raw data importing procedure
Data <- ReadAffy()
# you can easily check the CDF by just typing
Data
# okay, GSK Expr microarray is hgu133plus2
library(hgu133plus2.db)
library(gdata)
probeName<-probeNames(Data)
probeSetName <- geneNames(Data)
## backgound adjustment : RMA convolution
# you might want to MAS5.0 background.Then replace method parameter to : method = "mas"
DataBgRma <- bg.correct(Data, method = 'rma')
## Warning !!! hereafter, data should be log2 transformed
# log2 transformation
pmDataBgRma<-log2(probes(DataBgRma,"pm"))
## normalization : Quantile Normalization
normPmDataBgRma<-normalize.quantiles(pmDataBgRma)
rownames(normPmDataBgRma) <-rownames(pmDataBgRma)
colnames(normPmDataBgRma) <-colnames(pmDataBgRma)
# Symbol and probeset mapping ###########################
x <- hgu133plus2SYMBOL
# Get the probe identifiers that are mapped to a gene symbol
mapped_probes <- mappedkeys(x)
# Convert to a list
xx <- as.list(x[mapped_probes])
probeGeneMap <-c()
for(i in 1:length(xx)){
probeGeneMap <- rbind(probeGeneMap,cbind(names(xx[i]),xx[[i]]))
}
#####################################
geneSymbolName<-unique(probeGeneMap[,2])
## Summarization : median polish by Tukey
sumExp <-c()
for (i in 1:length(geneSymbolName)){
probesPerGene<-probeGeneMap[which(!is.na(match(probeGeneMap[,2],geneSymbolName[i]))),1]
set<-c()
for(j in 1:length(probesPerGene)){
set<-union(set,which(!is.na(match(probeName,probesPerGene[j]))))
}
exp<-normPmDataBgRma[set,]
sumExp <- rbind(sumExp,apply(t(exp)-medpolish(t(exp),trace.iter = FALSE)$residuals,1,mean))
}
rownames(sumExp) <- geneSymbolName
#########################################
## Read Sample Annotation for processing replicates
## Replace CEL file name to sampleName given annotation file
#########################################
AnnotData <- read.delim2("GSK_RNA.sdrf",header = TRUE, skip =8)
# Replace special characters "-" to "_"
annotSampleName <-toupper(AnnotData$Characteristics.Cell.Line.Name.)
while(length(grep(" ",annotSampleName)) !=0){
annotSampleName<-sub(" ","",annotSampleName)
}
while(length(grep("-",annotSampleName)) !=0){
annotSampleName<-sub("-","",annotSampleName)
}
# define function : geometric mean, not arithmetic mean : because of log2 transformed data
gm.mean <-function(dataVec){
n <- length(dataVec)
prod(dataVec) ^(1/n)
}
# to preserve non replicated samples
uniqueSampleName <-unique(annotSampleName)
finalData<-c()
for (i in 1:length(uniqueSampleName)){
a<-which(uniqueSampleName[i]==annotSampleName)
if (length(a)==1){
finalData<-cbind(finalData,sumExp[,a])
}
else{
finalData<-cbind(finalData,apply(sumExp[,a],1,gm.mean))
}
}
colnames(finalData)<-uniqueSampleName
rownames(finalData)<-rownames(sumExp)
eSet <- new("ExpressionSet",exprs = finalData, annotation = annotation(Data))
probeIDLayer <- Layer(list(name = "R_Expression_GSK_RMA_GeneSymbol", type = "E", parentId = "114505", status="db"))
testLayer <- addObject(probeIDLayer, eSet)
testLayer <- storeEntity(testLayer)
|
library(shiny)
library(dplyr)
library(plotly)
library(shinythemes)
ui <- navbarPage(
# returns a theme
theme = shinytheme("slate"),
"Law, Happiness, & Gun Violence",
# Introduction tab
tabPanel(
"Introduction",
includeCSS("www/style.css"),
div(
class = "center",
h1("Why It Matters"),
em("The United States has been vicitim to 101 mass
shootings this calendar,"),
em("accroding to the following article by"),
a("Business Insider",
href =
"http://www.businessinsider.com/how-many
-mass-shootings-in-america-this-year-2018-2", "."
),
em("More worrisome however, is the continious lack of
action from our elected officials in Washington D.C."),
em("to get anything done in order to address this ongoing
issue that has claimed so many innoncent lives."),
h2("Our Purpose"),
em("Our mission behind this project is not take sides
or shift the blame from one side to another!"),
em("Instead, our goal is to simply prompt awareness to an
issue that has almost become like something of a"),
em("normality nowadays, unfortunately. Furthermore,
we want to make it very clear that this no way, shape, or form"),
em(" mean't to come off as a"), strong("attack"),
em("or"), strong("challenge"), em("to gun owners and
those who believe and
support the 2nd Amendment."),
em("Instead, the goal of our work to spurr healthy dialogue
amongst all spectrums so that we can all work"),
em("together to find a solution. No kid should be afraid
to attend school and no parent should have to fear"),
em("for their child's safety."),
h3("Meet The Team:")
),
flowLayout(
div(
class = "caption",
tags$img(
src = "33895746_1417277445084602_4308802712805310464_n.jpg",
height = 200, width = 200
),
p(em("Varun Patel"))
),
div(
class = "caption",
tags$img(
src = "33850842_961835110646127_8613901815181737984_n.jpg",
height = 200, width = 200
),
p(em("Logan Selley"))
),
div(
class = "caption",
tags$img(
src = "33835367_2093832907496109_6306088030159503360_n.jpg",
height = 200, width = 200
),
p(em("Trevor Leung"))
),
div(
class = "caption",
tags$img(
src = "IMG_3992.jpg",
height = 200, width = 200
),
p(em("Jose Aguirre"))
)
)
),
# Happiness tab
tabPanel(
"Happiness",
sidebarLayout(
sidebarPanel( # options for y axis
radioButtons("yaxis", "View Happiness Score by Rate of
Gun Related Deaths per 100k People,
Percent of Gun Ownership, or the
Count of Gun Control Laws:",
choiceNames = c(
"Rate of Gun Related Deaths per 100k People",
"Percent of Gun Ownership", "Count of Gun Control Laws"
),
choiceValues = c("rate", "percown", "lawtotal")
)
),
mainPanel(
div(
class = "center",
# Intoduction of tab
h1("Comparing State Happiness with Gun Related Data"),
em("Are happier states "), strong("safer"),
em("? We took data from wallet hub where
they took in multiple factors "),
em("like depression rates and average income
to calculate a happiness score "),
em("for each state. We compared the happiness scores
from this data with the "),
a("percentages of gun ownership per state",
href = "http://demographicdata.org/facts-and
-figures/gun-ownership-statistics/"
),
em(", "),
a("rate of gun related deaths per 100k people",
href = "https://www.cdc.gov/nchs/pressroom/sosmap
/firearm_mortality/firearm.htm"
),
em(" and the "),
a("count of gun control laws",
href = "https://www.statefirearmlaws.org/table.html"
),
em(" to make insightful conclusions and correlations
between these variables. "),
em("In order to learn more about how happiness score
was calculated, follow this "),
a("link", href = "https://wallethub.com/edu/happiest
-states/6959/#methodology"),
em("."),
h3("Interactive Scatter Plot"),
br(),
plotlyOutput("scatter_plot"),
br(),
# Adds correlation and what it means
textOutput("cor"),
textOutput("cor_message"),
br(),
# interactive map description and visualization
p("The following map is color coded by happiness rank.
Useful information such as
the happiness rank out of all the states, gun industry rank,
which ranks the states
by how much a state relies on the firearm industry,
and total victims of mass shootings."),
h3("Interactive Map")
),
br(),
plotlyOutput("interactive_map")
)
)
),
# Legislation tab
tabPanel(
"Legislation",
sidebarLayout(
sidebarPanel( # Options for scatter
radioButtons("legislation",
label = h3("Legislation Comparisons"),
choices = list(
"Gun Industry" = 1, "Gun Ownership" = 2,
"Violent Crime" = 3,
"Murder & Manslaughter" = 4
)
)
),
mainPanel(
div(
#intro
class = "center",
h1("Analyzing the effects of State Legislated Gun Control"),
em("Due to a lack of federal action, many have looked to state
legislatures to pass gun control laws"),
em(" in order to stem the epidemic of gun violence in the country.
But is this legislation effective"),
em(" in keeping our communities safe? And what might be keeping
state legislatures from implementing"),
em(" this reform?"),
br(),
br(),
#Data
strong("DATA & METHODOLOGY:"),
br(),
a("Mother Jones mass shooting data",
href = "https://www.motherjones.com/politics/2012/12/mass
-shootings-mother-jones-full-data/"
),
br(),
a("WalletHub gun industry data",
href = "https://wallethub.com/edu/states-most-
dependent-on-the-gun-industry/18719/"
),
br(),
a("State Firearm Laws",
href = "https://www.statefirearmlaws.org/table.html"
),
br(),
a("State gun crime/ownership",
href = "http://demographicdata.org/facts-and
-figures/gun-ownership-statistics/"
),
br(),
br(),
#Interactive scatter
plotlyOutput("legis_scatter"),
br(),
textOutput("legis_cor"),
textOutput("legis_cor_message"),
br(),
br(),
#Static Bar
plotlyOutput("legislation_bar")
)
)
)
),
# References
tabPanel(
"References",
tags$div(tags$ul(
tags$li(a("WalletHub",
href = "https://wallethub.com/edu/happiest-states/6959/#methodology"
)),
br(),
tags$li(a("DemographicData.org",
href = "https://wallethub.com/edu/happiest-states/6959/#methodology"
)),
br(),
tags$li(a("Centers for Disease Control and Prevention",
href = "https://www.cdc.gov/nchs/pressroom
/sosmap/firearm_mortality/firearm.htm"
)),
br(),
tags$li(a("State Firearm Laws",
href = "https://www.statefirearmlaws.org/table.html"
)),
br(),
tags$li(a("Mother Jones mass shooting data",
href = "https://www.motherjones.com/
politics/2012/12/mass-shootings-mother-jones-full-data/"
)),
br(),
tags$li(a("WalletHub gun industry data",
href = "https://wallethub.com/edu/
states-most-dependent-on-the-gun-industry/18719/"
)),
br(),
tags$li(a("State Firearm Laws",
href = "https://www.statefirearmlaws.org/table.html"
)),
br(),
tags$li(a("State gun crime/ownership",
href = "http://demographicdata.org/
facts-and-figures/gun-ownership-statistics/"
))
))
)
)
shinyUI(ui)
|
/ui.R
|
no_license
|
tleung22/final-project
|
R
| false
| false
| 8,825
|
r
|
library(shiny)
library(dplyr)
library(plotly)
library(shinythemes)
ui <- navbarPage(
# returns a theme
theme = shinytheme("slate"),
"Law, Happiness, & Gun Violence",
# Introduction tab
tabPanel(
"Introduction",
includeCSS("www/style.css"),
div(
class = "center",
h1("Why It Matters"),
em("The United States has been vicitim to 101 mass
shootings this calendar,"),
em("accroding to the following article by"),
a("Business Insider",
href =
"http://www.businessinsider.com/how-many
-mass-shootings-in-america-this-year-2018-2", "."
),
em("More worrisome however, is the continious lack of
action from our elected officials in Washington D.C."),
em("to get anything done in order to address this ongoing
issue that has claimed so many innoncent lives."),
h2("Our Purpose"),
em("Our mission behind this project is not take sides
or shift the blame from one side to another!"),
em("Instead, our goal is to simply prompt awareness to an
issue that has almost become like something of a"),
em("normality nowadays, unfortunately. Furthermore,
we want to make it very clear that this no way, shape, or form"),
em(" mean't to come off as a"), strong("attack"),
em("or"), strong("challenge"), em("to gun owners and
those who believe and
support the 2nd Amendment."),
em("Instead, the goal of our work to spurr healthy dialogue
amongst all spectrums so that we can all work"),
em("together to find a solution. No kid should be afraid
to attend school and no parent should have to fear"),
em("for their child's safety."),
h3("Meet The Team:")
),
flowLayout(
div(
class = "caption",
tags$img(
src = "33895746_1417277445084602_4308802712805310464_n.jpg",
height = 200, width = 200
),
p(em("Varun Patel"))
),
div(
class = "caption",
tags$img(
src = "33850842_961835110646127_8613901815181737984_n.jpg",
height = 200, width = 200
),
p(em("Logan Selley"))
),
div(
class = "caption",
tags$img(
src = "33835367_2093832907496109_6306088030159503360_n.jpg",
height = 200, width = 200
),
p(em("Trevor Leung"))
),
div(
class = "caption",
tags$img(
src = "IMG_3992.jpg",
height = 200, width = 200
),
p(em("Jose Aguirre"))
)
)
),
# Happiness tab
tabPanel(
"Happiness",
sidebarLayout(
sidebarPanel( # options for y axis
radioButtons("yaxis", "View Happiness Score by Rate of
Gun Related Deaths per 100k People,
Percent of Gun Ownership, or the
Count of Gun Control Laws:",
choiceNames = c(
"Rate of Gun Related Deaths per 100k People",
"Percent of Gun Ownership", "Count of Gun Control Laws"
),
choiceValues = c("rate", "percown", "lawtotal")
)
),
mainPanel(
div(
class = "center",
# Intoduction of tab
h1("Comparing State Happiness with Gun Related Data"),
em("Are happier states "), strong("safer"),
em("? We took data from wallet hub where
they took in multiple factors "),
em("like depression rates and average income
to calculate a happiness score "),
em("for each state. We compared the happiness scores
from this data with the "),
a("percentages of gun ownership per state",
href = "http://demographicdata.org/facts-and
-figures/gun-ownership-statistics/"
),
em(", "),
a("rate of gun related deaths per 100k people",
href = "https://www.cdc.gov/nchs/pressroom/sosmap
/firearm_mortality/firearm.htm"
),
em(" and the "),
a("count of gun control laws",
href = "https://www.statefirearmlaws.org/table.html"
),
em(" to make insightful conclusions and correlations
between these variables. "),
em("In order to learn more about how happiness score
was calculated, follow this "),
a("link", href = "https://wallethub.com/edu/happiest
-states/6959/#methodology"),
em("."),
h3("Interactive Scatter Plot"),
br(),
plotlyOutput("scatter_plot"),
br(),
# Adds correlation and what it means
textOutput("cor"),
textOutput("cor_message"),
br(),
# interactive map description and visualization
p("The following map is color coded by happiness rank.
Useful information such as
the happiness rank out of all the states, gun industry rank,
which ranks the states
by how much a state relies on the firearm industry,
and total victims of mass shootings."),
h3("Interactive Map")
),
br(),
plotlyOutput("interactive_map")
)
)
),
# Legislation tab
tabPanel(
"Legislation",
sidebarLayout(
sidebarPanel( # Options for scatter
radioButtons("legislation",
label = h3("Legislation Comparisons"),
choices = list(
"Gun Industry" = 1, "Gun Ownership" = 2,
"Violent Crime" = 3,
"Murder & Manslaughter" = 4
)
)
),
mainPanel(
div(
#intro
class = "center",
h1("Analyzing the effects of State Legislated Gun Control"),
em("Due to a lack of federal action, many have looked to state
legislatures to pass gun control laws"),
em(" in order to stem the epidemic of gun violence in the country.
But is this legislation effective"),
em(" in keeping our communities safe? And what might be keeping
state legislatures from implementing"),
em(" this reform?"),
br(),
br(),
#Data
strong("DATA & METHODOLOGY:"),
br(),
a("Mother Jones mass shooting data",
href = "https://www.motherjones.com/politics/2012/12/mass
-shootings-mother-jones-full-data/"
),
br(),
a("WalletHub gun industry data",
href = "https://wallethub.com/edu/states-most-
dependent-on-the-gun-industry/18719/"
),
br(),
a("State Firearm Laws",
href = "https://www.statefirearmlaws.org/table.html"
),
br(),
a("State gun crime/ownership",
href = "http://demographicdata.org/facts-and
-figures/gun-ownership-statistics/"
),
br(),
br(),
#Interactive scatter
plotlyOutput("legis_scatter"),
br(),
textOutput("legis_cor"),
textOutput("legis_cor_message"),
br(),
br(),
#Static Bar
plotlyOutput("legislation_bar")
)
)
)
),
# References
tabPanel(
"References",
tags$div(tags$ul(
tags$li(a("WalletHub",
href = "https://wallethub.com/edu/happiest-states/6959/#methodology"
)),
br(),
tags$li(a("DemographicData.org",
href = "https://wallethub.com/edu/happiest-states/6959/#methodology"
)),
br(),
tags$li(a("Centers for Disease Control and Prevention",
href = "https://www.cdc.gov/nchs/pressroom
/sosmap/firearm_mortality/firearm.htm"
)),
br(),
tags$li(a("State Firearm Laws",
href = "https://www.statefirearmlaws.org/table.html"
)),
br(),
tags$li(a("Mother Jones mass shooting data",
href = "https://www.motherjones.com/
politics/2012/12/mass-shootings-mother-jones-full-data/"
)),
br(),
tags$li(a("WalletHub gun industry data",
href = "https://wallethub.com/edu/
states-most-dependent-on-the-gun-industry/18719/"
)),
br(),
tags$li(a("State Firearm Laws",
href = "https://www.statefirearmlaws.org/table.html"
)),
br(),
tags$li(a("State gun crime/ownership",
href = "http://demographicdata.org/
facts-and-figures/gun-ownership-statistics/"
))
))
)
)
shinyUI(ui)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/submitter.R
\name{wait_for_qsub}
\alias{wait_for_qsub}
\title{Wait for qsub job}
\usage{
wait_for_qsub(remote, job_id, quiet = TRUE)
}
\arguments{
\item{remote}{(\code{\link{remote_server}}) The remote server information.}
\item{job_id}{(\code{character} of length 1) The id of the job.}
\item{quiet}{(\code{logical} of length 1) Supress messeges.
Default: TRUE}
}
\description{
Wait for qsub job to complete or fail.
}
|
/man/wait_for_qsub.Rd
|
no_license
|
zachary-foster/qsubmitter
|
R
| false
| true
| 500
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/submitter.R
\name{wait_for_qsub}
\alias{wait_for_qsub}
\title{Wait for qsub job}
\usage{
wait_for_qsub(remote, job_id, quiet = TRUE)
}
\arguments{
\item{remote}{(\code{\link{remote_server}}) The remote server information.}
\item{job_id}{(\code{character} of length 1) The id of the job.}
\item{quiet}{(\code{logical} of length 1) Supress messeges.
Default: TRUE}
}
\description{
Wait for qsub job to complete or fail.
}
|
# initialise h2o cluster
localH2O <- h2o.init(ip = "localhost", port = 54321, startH2O = TRUE, nthreads = -1)
# get max / min for scaling
maxs <- apply(summary.dt %>% select(-device_id, -epoch_id, -steps_bin, -steps, -n), 2, max)
mins <- apply(summary.dt %>% select(-device_id, -epoch_id, -steps_bin, -steps, -n), 2, min)
# scale summary 0-1
scaled.summary <- cbind(device_id = summary.dt$device_id,
steps_bin = summary.dt$steps_bin,
as.data.frame(scale(summary.dt %>% select(-device_id, -epoch_id, -steps_bin, -steps, -n),
center = mins, scale = maxs - mins)))
extra.test.data.scaled <- cbind(device_id = extra.test.data$device_id,
steps_bin = extra.test.data$steps_bin,
as.data.frame(scale(extra.test.data %>%
select(-device_id, -epoch_id, -steps_bin, -steps, -n),
center = mins, scale = maxs - mins)))
# partition
set.seed(789465)
s <- createDataPartition(summary.dt$device_id, p = 0.6, list = FALSE)
set.seed(654)
train.h2o <- scaled.summary[s, ]
s1 <- createDataPartition(train.h2o$device_id, p = 0.6, list = FALSE)
val.h2o <- train.h2o[-s1, ]
train.h2o <- train.h2o[s1, ]
test.h2o <- scaled.summary[-s, ]
# altogether
train_hex <- as.h2o(x = train.h2o %>% mutate(steps_bin = as.factor(steps_bin)), destination_frame = "localH2O")
val_hex <- as.h2o(x = val.h2o %>% mutate(steps_bin = as.factor(steps_bin)), destination_frame = "localH2O")
test_hex <- as.h2o(x = test.h2o %>% mutate(steps_bin = as.factor(steps_bin)), destination_frame = "localH2O")
# simple rf model
simple.h2o.rf.model <- h2o.randomForest(x = 2:9,
y = 1,
training_frame = train_hex,
validation_frame = val_hex,
ntrees = 50,
max_depth = 10,
stopping_rounds = 2,
stopping_metric = "AUTO",
stopping_tolerance = 0.001,
seed = 789)
simple.h2o.gbm.model <- h2o.gbm(x = 2:9,
y = 1,
training_frame = train_hex,
validation_frame = val_hex,
ntrees = 50,
max_depth = 10,
stopping_rounds = 2,
stopping_metric = "AUTO",
stopping_tolerance = 0.001,
seed = 789)
simple.h2o.dnn.model <- h2o.deeplearning(x = 2:9,
y = 1,
training_frame = train_hex,
validation_frame = val_hex,
hidden = c(200,200,200),
activation = "Rectifier",
epochs = 10,
stopping_rounds = 2,
stopping_metric = "AUTO",
stopping_tolerance = 0.001,
seed = 789)
confusionMatrix(h2o.predict(simple.h2o.rf.model, test_hex)$predict %>% as.vector(),
test_hex$device_id %>% as.vector())
confusionMatrix(h2o.predict(simple.h2o.gbm.model, test_hex)$predict %>% as.vector(),
test_hex$device_id %>% as.vector())
confusionMatrix(h2o.predict(simple.h2o.dnn.model, test_hex)$predict %>% as.vector(),
test_hex$device_id %>% as.vector())
#### tune gbm
search_criteria <- list(strategy = "RandomDiscrete",
max_runtime_secs = 240)
nfolds <- 5
# GBM Hyperparamters
learn_rate_opt <- c(0.01, 0.02, 0.03)
max_depth_opt <- c(3, 5, 9, 15)
sample_rate_opt <- c(0.7, 0.8, 0.9, 1.0)
col_sample_rate_opt <- c(0.2, 0.4, 0.6, 0.8)
ntrees <- c(10, 100)
hyper_params <- list(learn_rate = learn_rate_opt,
ntrees = ntrees,
max_depth = max_depth_opt,
sample_rate = sample_rate_opt,
col_sample_rate = col_sample_rate_opt)
gbm_grid <- h2o.grid("gbm", x = 2:9, y = 1,
training_frame = train_hex,
seed = 1,
nfolds = nfolds,
fold_assignment = "Modulo",
keep_cross_validation_predictions = TRUE,
hyper_params = hyper_params,
search_criteria = search_criteria)
gbm_grid <- h2o.getGrid(gbm_grid@grid_id, sort_by="logloss", decreasing = F)
h2o.performance(h2o.getModel(gbm_grid@model_ids[[1]]), test_hex)
confusionMatrix(h2o.predict(h2o.getModel(gbm_grid@model_ids[[1]]), train_hex)$predict %>% as.vector,
test_hex$device_id %>% as.vector)
gbm_models <- lapply(gbm_grid@model_ids, function(model_id) h2o.getModel(model_id))
# on new data
## mostly matches with device TAS1E31150028 which was me
table(h2o.predict(simple.h2o.rf.model,
as.h2o(extra.test.data.scaled, destination_frame = "localH2O"))$predict %>%
as.factor %>% as.vector(),
extra.test.data.scaled$device_id)
table(h2o.predict(simple.h2o.gbm.model,
as.h2o(extra.test.data.scaled, destination_frame = "localH2O"))$predict %>%
as.factor %>% as.vector(),
extra.test.data.scaled$device_id)
table(h2o.predict(simple.h2o.rf.model,
as.h2o(extra.test.data.scaled, destination_frame = "localH2O"))$predict %>%
as.factor %>% as.vector(),
h2o.predict(simple.h2o.gbm.model,
as.h2o(extra.test.data.scaled, destination_frame = "localH2O"))$predict %>%
as.factor %>% as.vector())
h2o.varimp(simple.h2o.rf.model)
h2o.varimp(simple.h2o.gbm.model)
## Import Data to H2O Cluster
h2o.results <- data.frame()
h2o.models <- list()
for (i in unique(scaled.summary$steps_bin)){
print(i)
# filter to steps_bin in question, partition and load data to cluster
train_hex <- as.h2o(x = train.h2o %>% filter(steps_bin == i) %>% select(-steps_bin), destination_frame = "localH2O")
val_hex <- as.h2o(x = val.h2o %>% filter(steps_bin == i) %>% select(-steps_bin), destination_frame = "localH2O")
test_hex <- as.h2o(x = test.h2o %>% filter(steps_bin == i) %>% select(-steps_bin), destination_frame = "localH2O")
# train h2o random forest
## straight out of box
set.seed(8)
h2o.rf.model <- h2o.randomForest(x = 2:8,
y = 1,
training_frame = train_hex,
validation_frame = val_hex,
ntrees = 50,
max_depth = 10,
stopping_rounds = 2,
stopping_metric = "AUTO",
stopping_tolerance = 0.001,
seed = 789)
# store model in list of models
h2o.models[[which(i == unique(scaled.summary$steps_bin))]] <- h2o.rf.model
# predict on test dataset
yhat_test <- h2o.predict(h2o.rf.model, test_hex)$predict %>% as.factor
# store results
h2o.results <- rbind(h2o.results,
data.frame(steps_bin = i,
actual_dev_id = test_hex$device_id %>% as.vector(),
pred_dev_id = yhat_test %>% as.vector()))
}
rm(train_hex)
rm(test_hex)
rm(i)
rm(s)
rm(yhat_test)
names(h2o.models) <- unique(unique(scaled.summary$steps_bin))
cache('h2o.models')
cache('h2o.results')
# results
confusionMatrix(h2o.results$pred_dev_id, h2o.results$actual_dev_id)
#### test on extra.test.data
## actually worse :(
h2o.results.extra <- data.frame()
for (i in unique(scaled.summary$steps_bin)) {
print(i)
extra.test_hex <- as.h2o(x = extra.test.data.scaled %>%
filter(steps_bin == i) %>%
select(-steps_bin),
destination_frame = "localH2O")
pred <- h2o.predict(h2o.models[i][[1]], extra.test_hex)$predict %>% as.factor
h2o.results.extra <- rbind(h2o.results.extra,
data.frame(steps_bin = i,
pred_dev_id = pred %>% as.vector()))
}
table(h2o.results.extra$pred_dev_id, extra.test.data.scaled$device_id)
# variable importance
lapply(h2o.models, function(x) h2o.varimp(x))
# shut down cluster
h2o.shutdown()
y
|
/src/03 - model.binned.h2o.R
|
no_license
|
irishlouis/actigraphy_device_id
|
R
| false
| false
| 8,898
|
r
|
# initialise h2o cluster
localH2O <- h2o.init(ip = "localhost", port = 54321, startH2O = TRUE, nthreads = -1)
# get max / min for scaling
maxs <- apply(summary.dt %>% select(-device_id, -epoch_id, -steps_bin, -steps, -n), 2, max)
mins <- apply(summary.dt %>% select(-device_id, -epoch_id, -steps_bin, -steps, -n), 2, min)
# scale summary 0-1
scaled.summary <- cbind(device_id = summary.dt$device_id,
steps_bin = summary.dt$steps_bin,
as.data.frame(scale(summary.dt %>% select(-device_id, -epoch_id, -steps_bin, -steps, -n),
center = mins, scale = maxs - mins)))
extra.test.data.scaled <- cbind(device_id = extra.test.data$device_id,
steps_bin = extra.test.data$steps_bin,
as.data.frame(scale(extra.test.data %>%
select(-device_id, -epoch_id, -steps_bin, -steps, -n),
center = mins, scale = maxs - mins)))
# partition
set.seed(789465)
s <- createDataPartition(summary.dt$device_id, p = 0.6, list = FALSE)
set.seed(654)
train.h2o <- scaled.summary[s, ]
s1 <- createDataPartition(train.h2o$device_id, p = 0.6, list = FALSE)
val.h2o <- train.h2o[-s1, ]
train.h2o <- train.h2o[s1, ]
test.h2o <- scaled.summary[-s, ]
# altogether
train_hex <- as.h2o(x = train.h2o %>% mutate(steps_bin = as.factor(steps_bin)), destination_frame = "localH2O")
val_hex <- as.h2o(x = val.h2o %>% mutate(steps_bin = as.factor(steps_bin)), destination_frame = "localH2O")
test_hex <- as.h2o(x = test.h2o %>% mutate(steps_bin = as.factor(steps_bin)), destination_frame = "localH2O")
# simple rf model
simple.h2o.rf.model <- h2o.randomForest(x = 2:9,
y = 1,
training_frame = train_hex,
validation_frame = val_hex,
ntrees = 50,
max_depth = 10,
stopping_rounds = 2,
stopping_metric = "AUTO",
stopping_tolerance = 0.001,
seed = 789)
simple.h2o.gbm.model <- h2o.gbm(x = 2:9,
y = 1,
training_frame = train_hex,
validation_frame = val_hex,
ntrees = 50,
max_depth = 10,
stopping_rounds = 2,
stopping_metric = "AUTO",
stopping_tolerance = 0.001,
seed = 789)
simple.h2o.dnn.model <- h2o.deeplearning(x = 2:9,
y = 1,
training_frame = train_hex,
validation_frame = val_hex,
hidden = c(200,200,200),
activation = "Rectifier",
epochs = 10,
stopping_rounds = 2,
stopping_metric = "AUTO",
stopping_tolerance = 0.001,
seed = 789)
confusionMatrix(h2o.predict(simple.h2o.rf.model, test_hex)$predict %>% as.vector(),
test_hex$device_id %>% as.vector())
confusionMatrix(h2o.predict(simple.h2o.gbm.model, test_hex)$predict %>% as.vector(),
test_hex$device_id %>% as.vector())
confusionMatrix(h2o.predict(simple.h2o.dnn.model, test_hex)$predict %>% as.vector(),
test_hex$device_id %>% as.vector())
#### tune gbm
search_criteria <- list(strategy = "RandomDiscrete",
max_runtime_secs = 240)
nfolds <- 5
# GBM Hyperparamters
learn_rate_opt <- c(0.01, 0.02, 0.03)
max_depth_opt <- c(3, 5, 9, 15)
sample_rate_opt <- c(0.7, 0.8, 0.9, 1.0)
col_sample_rate_opt <- c(0.2, 0.4, 0.6, 0.8)
ntrees <- c(10, 100)
hyper_params <- list(learn_rate = learn_rate_opt,
ntrees = ntrees,
max_depth = max_depth_opt,
sample_rate = sample_rate_opt,
col_sample_rate = col_sample_rate_opt)
gbm_grid <- h2o.grid("gbm", x = 2:9, y = 1,
training_frame = train_hex,
seed = 1,
nfolds = nfolds,
fold_assignment = "Modulo",
keep_cross_validation_predictions = TRUE,
hyper_params = hyper_params,
search_criteria = search_criteria)
gbm_grid <- h2o.getGrid(gbm_grid@grid_id, sort_by="logloss", decreasing = F)
h2o.performance(h2o.getModel(gbm_grid@model_ids[[1]]), test_hex)
confusionMatrix(h2o.predict(h2o.getModel(gbm_grid@model_ids[[1]]), train_hex)$predict %>% as.vector,
test_hex$device_id %>% as.vector)
gbm_models <- lapply(gbm_grid@model_ids, function(model_id) h2o.getModel(model_id))
# on new data
## mostly matches with device TAS1E31150028 which was me
table(h2o.predict(simple.h2o.rf.model,
as.h2o(extra.test.data.scaled, destination_frame = "localH2O"))$predict %>%
as.factor %>% as.vector(),
extra.test.data.scaled$device_id)
table(h2o.predict(simple.h2o.gbm.model,
as.h2o(extra.test.data.scaled, destination_frame = "localH2O"))$predict %>%
as.factor %>% as.vector(),
extra.test.data.scaled$device_id)
table(h2o.predict(simple.h2o.rf.model,
as.h2o(extra.test.data.scaled, destination_frame = "localH2O"))$predict %>%
as.factor %>% as.vector(),
h2o.predict(simple.h2o.gbm.model,
as.h2o(extra.test.data.scaled, destination_frame = "localH2O"))$predict %>%
as.factor %>% as.vector())
h2o.varimp(simple.h2o.rf.model)
h2o.varimp(simple.h2o.gbm.model)
## Import Data to H2O Cluster
h2o.results <- data.frame()
h2o.models <- list()
for (i in unique(scaled.summary$steps_bin)){
print(i)
# filter to steps_bin in question, partition and load data to cluster
train_hex <- as.h2o(x = train.h2o %>% filter(steps_bin == i) %>% select(-steps_bin), destination_frame = "localH2O")
val_hex <- as.h2o(x = val.h2o %>% filter(steps_bin == i) %>% select(-steps_bin), destination_frame = "localH2O")
test_hex <- as.h2o(x = test.h2o %>% filter(steps_bin == i) %>% select(-steps_bin), destination_frame = "localH2O")
# train h2o random forest
## straight out of box
set.seed(8)
h2o.rf.model <- h2o.randomForest(x = 2:8,
y = 1,
training_frame = train_hex,
validation_frame = val_hex,
ntrees = 50,
max_depth = 10,
stopping_rounds = 2,
stopping_metric = "AUTO",
stopping_tolerance = 0.001,
seed = 789)
# store model in list of models
h2o.models[[which(i == unique(scaled.summary$steps_bin))]] <- h2o.rf.model
# predict on test dataset
yhat_test <- h2o.predict(h2o.rf.model, test_hex)$predict %>% as.factor
# store results
h2o.results <- rbind(h2o.results,
data.frame(steps_bin = i,
actual_dev_id = test_hex$device_id %>% as.vector(),
pred_dev_id = yhat_test %>% as.vector()))
}
rm(train_hex)
rm(test_hex)
rm(i)
rm(s)
rm(yhat_test)
names(h2o.models) <- unique(unique(scaled.summary$steps_bin))
cache('h2o.models')
cache('h2o.results')
# results
confusionMatrix(h2o.results$pred_dev_id, h2o.results$actual_dev_id)
#### test on extra.test.data
## actually worse :(
h2o.results.extra <- data.frame()
for (i in unique(scaled.summary$steps_bin)) {
print(i)
extra.test_hex <- as.h2o(x = extra.test.data.scaled %>%
filter(steps_bin == i) %>%
select(-steps_bin),
destination_frame = "localH2O")
pred <- h2o.predict(h2o.models[i][[1]], extra.test_hex)$predict %>% as.factor
h2o.results.extra <- rbind(h2o.results.extra,
data.frame(steps_bin = i,
pred_dev_id = pred %>% as.vector()))
}
table(h2o.results.extra$pred_dev_id, extra.test.data.scaled$device_id)
# variable importance
lapply(h2o.models, function(x) h2o.varimp(x))
# shut down cluster
h2o.shutdown()
y
|
#####-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#####
#####-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#####
##### Allele/Genotype Analysis at different Focal Points #####
#####-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#####
#####-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#####
#Today is March 9th, 2019
.libPaths("/global/home/hpc4300/RPackages")
Phenotype_Results = data.frame(Method = character(),IBS = numeric(),
AM = numeric(), AS = numeric(), h1 = numeric(),
Skat = numeric(), stringsAsFactors = FALSE )
SKAT_indices = seq(1,300, by=3)
GTSM_indices = seq(2,300, by = 3)
MDMR_indices=seq(3, 300, by=3)
for (file_number in (1:100)){
setwd("/global/project/hpcg1578/Crohn/Kernel_Analysis/JobFiles")
directory_name = paste("Run_", file_number, sep="")
setwd(directory_name)
haplodat = read.delim("crohn5q31_haplo.dat", colClasses = "character", header=FALSE)
FirstLine=unlist(strsplit(haplodat[1,1],split=""))
segsites=length(FirstLine)
newhaplodat=matrix(as.numeric(unlist(strsplit(haplodat[,1],split=""))),
ncol=segsites,byrow=T)
#STEP 2: Make sure our data is in terms of the Minor Allele.
## Get allele frequencies
f1=colSums(newhaplodat)/nrow(newhaplodat)
## We want allele frequencies to be in terms of minor allele frequencues (MAF)
# So.... if MAF>0.5, we ned to reverse the coding:
tochange=which(f1>0.5)
if (length(tochange) !=0){
for (i in 1:length(tochange)){
index=tochange[i]
newhaplodat[,index]=1-newhaplodat[,index]
}
}
#Code for phenotypes:
#Data set up such that the binary phenotype alternates between 1 and 0 for consecutive rows.
if (nrow(newhaplodat) %% 2 == 1){
status_phenotype=rep(c(1,0),ceiling( nrow(newhaplodat)/2) )[-length(status_phenotype)]
}else {
status_phenotype=rep(c(1,0),nrow(newhaplodat)/2)
}
#Source some scripts to help calculate Kernels and to calculate assoc. statistics
source("/global/project/hpcg1578/Crohn/Kernel_Analysis/Peter_R_Code/Crohn_Calculate_all_kernels.R")
source("/global/project/hpcg1578/Crohn/Kernel_Analysis/Peter_R_Code/Crohn_NEW_gtsm.R")
source("/global/project/hpcg1578/Crohn/Kernel_Analysis/Peter_R_Code/Crohn_MDMR_Code.R")
mykernels = get.kernels(G=newhaplodat, n=nrow(newhaplodat), K=ncol(newhaplodat), treename="blah")
#Phenotype_Results = data.frame(Method = character(),IBS = numeric(),
# AM = numeric(), AS = numeric(), h1 = numeric(),
# Skat = numeric(),Tree1 = numeric(), Tree2 = numeric(),
# Tree3 = numeric(), Tree4 = numeric(), Tree5 = numeric(), stringsAsFactors = FALSE )
#-----||-----||-----|| - SKAT Code - ||-----||-----||-----#
null.model = SKAT_Null_Model(status_phenotype ~ 1, out_type="D")
#Fit the null model
p.val.SKAT=vector("list", length(mykernels))
#Initialize list of SKAT p-values
for (j in 1:length(mykernels)){
#Fill in list of SKAT p-values
p.val.SKAT[[j]]= tryCatch(SKAT(Z=as.matrix(newhaplodat), obj=null.model, kernel = mykernels[[j]])$p.value,
error = function(e)
paste("NA"))
#-----||-----||-----|| - What is tryCatch()? - ||-----||-----||-----#
#--> tryCatch() is implemented because in some datasets, we get an error.
#--> Essentially, some product kernels seem to not produce any positive eigenvalues
# and we can't compute p-values. This only happens a small amount of times.
#--> With the tryCatch(), if this error occurs we assign an NA value and just move on without
#disrupting the code.
#-----||-----||-----|| - What is tryCatch()? - ||-----||-----||-----#
}
for (j in (1:(length(p.val.SKAT)))){
#Add SKAT p-values to our results table.
Phenotype_Results[SKAT_indices[file_number],j+1] = p.val.SKAT[[j]]
}
Phenotype_Results[SKAT_indices[file_number],1] = "SKAT"
#-----||-----||-----|| - Gene Trait Similarity Regression - ||-----||-----||-----#
p.val.gtsm = similarity.regression.pheno1(P1=status_phenotype,n=nrow(newhaplodat), kernel.list=mykernels)
#Obtain list of GTSR p-values
for (j in (1:length(p.val.gtsm))){
#Fill in results table with GTSR p-values
Phenotype_Results[GTSM_indices[file_number],j+1] = p.val.gtsm[j]
}
Phenotype_Results[GTSM_indices[file_number],1] = "GTSM"
#-----||-----||-----||-----|| - MDMR - ||-----||-----||-----||-----#
p.val.MDMR = pval_P1_MDMR_function(P1=status_phenotype, kernel.list=mykernels)
#Obtain list of MDMR p-values
for (j in (1:length(p.val.MDMR))){
#Fill in results table with MDMR p-values
Phenotype_Results[MDMR_indices[file_number],j+1] = p.val.MDMR[j]
}
Phenotype_Results[MDMR_indices[file_number],1] = "MDMR"
labels = c("SKAT", "GTSM", "MDMR")
# for (i in 1:length(labels)){
# Phenotype_Results[i,1] = labels[i]
# }
}
setwd("/global/project/hpcg1578/Crohn/Kernel_Analysis/Focal_Analysis")
write.table(Phenotype_Results,paste("New_Allele_Kernel_Data",".txt", sep=""),quote=F,row=F,col=F)
|
/Crohns Disease/Allele Kernel Analysis/Focal Point Analysis/Crohn_focal_point_analysis.R
|
no_license
|
petertea96/Genetic-Association-Methodology-Research
|
R
| false
| false
| 5,356
|
r
|
#####-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#####
#####-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#####
##### Allele/Genotype Analysis at different Focal Points #####
#####-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#####
#####-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#-----#####
#Today is March 9th, 2019
.libPaths("/global/home/hpc4300/RPackages")
Phenotype_Results = data.frame(Method = character(),IBS = numeric(),
AM = numeric(), AS = numeric(), h1 = numeric(),
Skat = numeric(), stringsAsFactors = FALSE )
SKAT_indices = seq(1,300, by=3)
GTSM_indices = seq(2,300, by = 3)
MDMR_indices=seq(3, 300, by=3)
for (file_number in (1:100)){
setwd("/global/project/hpcg1578/Crohn/Kernel_Analysis/JobFiles")
directory_name = paste("Run_", file_number, sep="")
setwd(directory_name)
haplodat = read.delim("crohn5q31_haplo.dat", colClasses = "character", header=FALSE)
FirstLine=unlist(strsplit(haplodat[1,1],split=""))
segsites=length(FirstLine)
newhaplodat=matrix(as.numeric(unlist(strsplit(haplodat[,1],split=""))),
ncol=segsites,byrow=T)
#STEP 2: Make sure our data is in terms of the Minor Allele.
## Get allele frequencies
f1=colSums(newhaplodat)/nrow(newhaplodat)
## We want allele frequencies to be in terms of minor allele frequencues (MAF)
# So.... if MAF>0.5, we ned to reverse the coding:
tochange=which(f1>0.5)
if (length(tochange) !=0){
for (i in 1:length(tochange)){
index=tochange[i]
newhaplodat[,index]=1-newhaplodat[,index]
}
}
#Code for phenotypes:
#Data set up such that the binary phenotype alternates between 1 and 0 for consecutive rows.
if (nrow(newhaplodat) %% 2 == 1){
status_phenotype=rep(c(1,0),ceiling( nrow(newhaplodat)/2) )[-length(status_phenotype)]
}else {
status_phenotype=rep(c(1,0),nrow(newhaplodat)/2)
}
#Source some scripts to help calculate Kernels and to calculate assoc. statistics
source("/global/project/hpcg1578/Crohn/Kernel_Analysis/Peter_R_Code/Crohn_Calculate_all_kernels.R")
source("/global/project/hpcg1578/Crohn/Kernel_Analysis/Peter_R_Code/Crohn_NEW_gtsm.R")
source("/global/project/hpcg1578/Crohn/Kernel_Analysis/Peter_R_Code/Crohn_MDMR_Code.R")
mykernels = get.kernels(G=newhaplodat, n=nrow(newhaplodat), K=ncol(newhaplodat), treename="blah")
#Phenotype_Results = data.frame(Method = character(),IBS = numeric(),
# AM = numeric(), AS = numeric(), h1 = numeric(),
# Skat = numeric(),Tree1 = numeric(), Tree2 = numeric(),
# Tree3 = numeric(), Tree4 = numeric(), Tree5 = numeric(), stringsAsFactors = FALSE )
#-----||-----||-----|| - SKAT Code - ||-----||-----||-----#
null.model = SKAT_Null_Model(status_phenotype ~ 1, out_type="D")
#Fit the null model
p.val.SKAT=vector("list", length(mykernels))
#Initialize list of SKAT p-values
for (j in 1:length(mykernels)){
#Fill in list of SKAT p-values
p.val.SKAT[[j]]= tryCatch(SKAT(Z=as.matrix(newhaplodat), obj=null.model, kernel = mykernels[[j]])$p.value,
error = function(e)
paste("NA"))
#-----||-----||-----|| - What is tryCatch()? - ||-----||-----||-----#
#--> tryCatch() is implemented because in some datasets, we get an error.
#--> Essentially, some product kernels seem to not produce any positive eigenvalues
# and we can't compute p-values. This only happens a small amount of times.
#--> With the tryCatch(), if this error occurs we assign an NA value and just move on without
#disrupting the code.
#-----||-----||-----|| - What is tryCatch()? - ||-----||-----||-----#
}
for (j in (1:(length(p.val.SKAT)))){
#Add SKAT p-values to our results table.
Phenotype_Results[SKAT_indices[file_number],j+1] = p.val.SKAT[[j]]
}
Phenotype_Results[SKAT_indices[file_number],1] = "SKAT"
#-----||-----||-----|| - Gene Trait Similarity Regression - ||-----||-----||-----#
p.val.gtsm = similarity.regression.pheno1(P1=status_phenotype,n=nrow(newhaplodat), kernel.list=mykernels)
#Obtain list of GTSR p-values
for (j in (1:length(p.val.gtsm))){
#Fill in results table with GTSR p-values
Phenotype_Results[GTSM_indices[file_number],j+1] = p.val.gtsm[j]
}
Phenotype_Results[GTSM_indices[file_number],1] = "GTSM"
#-----||-----||-----||-----|| - MDMR - ||-----||-----||-----||-----#
p.val.MDMR = pval_P1_MDMR_function(P1=status_phenotype, kernel.list=mykernels)
#Obtain list of MDMR p-values
for (j in (1:length(p.val.MDMR))){
#Fill in results table with MDMR p-values
Phenotype_Results[MDMR_indices[file_number],j+1] = p.val.MDMR[j]
}
Phenotype_Results[MDMR_indices[file_number],1] = "MDMR"
labels = c("SKAT", "GTSM", "MDMR")
# for (i in 1:length(labels)){
# Phenotype_Results[i,1] = labels[i]
# }
}
setwd("/global/project/hpcg1578/Crohn/Kernel_Analysis/Focal_Analysis")
write.table(Phenotype_Results,paste("New_Allele_Kernel_Data",".txt", sep=""),quote=F,row=F,col=F)
|
/MP3_project.R
|
no_license
|
Dlaureano/mini-project-3
|
R
| false
| false
| 11,713
|
r
| ||
# Of the four types of sources indicated by the type (point, nonpoint,
# onroad, nonroad) variable, which of these four sources have
# seen decreases in emissions from 1999–2008 for Baltimore
# City? Which have seen increases in emissions from 1999–2008?
# Use the ggplot2 plotting system to make a plot answer this
# question.
# Note, we will be using NEI_BC data
library(ggplot2, dplyr)
data1 <- group_by(NEI_BC, year, type)
data2 <- summarise(data1, eyear = mean(Emissions))
#####################################################
# Create plot
#####################################################
png(filename = "plot3.png")
qplot(year, eyear, data=data2, color = type,
ylab = "Average Emissions (tons)",
geom=c("point", "line"),
main = "Average Emissions for Baltimore City, MD by Source")
dev.off()
|
/plot3.R
|
no_license
|
mbirgen/Ex_Data_An_Project2
|
R
| false
| false
| 835
|
r
|
# Of the four types of sources indicated by the type (point, nonpoint,
# onroad, nonroad) variable, which of these four sources have
# seen decreases in emissions from 1999–2008 for Baltimore
# City? Which have seen increases in emissions from 1999–2008?
# Use the ggplot2 plotting system to make a plot answer this
# question.
# Note, we will be using NEI_BC data
library(ggplot2, dplyr)
data1 <- group_by(NEI_BC, year, type)
data2 <- summarise(data1, eyear = mean(Emissions))
#####################################################
# Create plot
#####################################################
png(filename = "plot3.png")
qplot(year, eyear, data=data2, color = type,
ylab = "Average Emissions (tons)",
geom=c("point", "line"),
main = "Average Emissions for Baltimore City, MD by Source")
dev.off()
|
library(dplyr)
install.packages("ggplot2")
str(ggplot2::mpg)
head(ggplot2::mpg)
#문제1
mpg<-as.data.frame(ggplot2::mpg)
#1-1
mpg %>% select()
mpg %>% filter()
nrow(mpg)
ncol(mpg)
#1-2
mpg %>% head(10)
#1-3
mpg %>% tail(10)
#1-4
mpg %>% View()
#1-5
summary(mpg)
#1-6
str(mpg)
#2-1
mpg<-mpg %>% rename("city"="cty","highway"="hwy")
#2-2
mpg %>% head()
#3-1
midwest<-as.data.frame(ggplot2::midwest)
str(midwest)
#3-2
midwest<-midwest %>% rename("total"="poptotal","asian"="popasian")
head(midwest)
#3-3
midwest<-midwest %>% mutate(tmp=asian/total*100)
#3-4
midwest %>% mutate(size=ifelse(tmp>mean(tmp),"large","small"))
#4-1
mpg<-ggplot2::mpg
tmp<-mpg %>% filter(displ<=4)
mean(tmp$hwy)
tmp2<-mpg %>% filter(displ>=5)
mean(tmp2$hwy)
#4-2
audi<-mpg %>% filter(manufacturer=="audi")
mean(audi$cty)
toyota<-mpg %>% filter(manufacturer=="toyota")
mean(toyota$cty)
#4-3
chevrolet<-mpg %>% filter(manufacturer=="chevrolet")
mean(chevrolet$hwy)
ford<-mpg %>% filter(manufacturer=="ford")
mean(ford$hwy)
honda<-mpg %>% filter(manufacturer=="honda")
mean(honda$hwy)
allCar<-mpg %>% filter(manufacturer=="chevrolet"|manufacturer=="ford"|manufacturer=="honda")
mean(allCar$hwy)
mpg %>% filter(manufacturer=="chevrolet"|manufacturer=="ford"|manufacturer=="honda") %>% summarise(tmp=mean(hwy))
#5-1
mpg2<-mpg %>% select(class,cty)
str(mpg2)
#5-2
tmp<-mpg2 %>% filter(class=="suv")
mean(tmp$cty)
tmp2<-mpg2 %>% filter(class=="compact")
mean(tmp2$cty)
mpg2 %>% group_by(class)%>% summarise(tmp=mean(cty)) %>% filter(class=="suv"|class=="compact")
#6-1
mpg %>% filter(manufacturer=="audi")%>% group_by(model) %>% summarise(tmp=mean(hwy)) %>% arrange(desc(tmp)) %>% head(1)
#6-2
mpg %>% filter(manufacturer=="audi") %>% arrange(desc(hwy)) %>% head(5)
|
/dplyr_lab1.R
|
no_license
|
hsyun89/Rstudy
|
R
| false
| false
| 1,750
|
r
|
library(dplyr)
install.packages("ggplot2")
str(ggplot2::mpg)
head(ggplot2::mpg)
#문제1
mpg<-as.data.frame(ggplot2::mpg)
#1-1
mpg %>% select()
mpg %>% filter()
nrow(mpg)
ncol(mpg)
#1-2
mpg %>% head(10)
#1-3
mpg %>% tail(10)
#1-4
mpg %>% View()
#1-5
summary(mpg)
#1-6
str(mpg)
#2-1
mpg<-mpg %>% rename("city"="cty","highway"="hwy")
#2-2
mpg %>% head()
#3-1
midwest<-as.data.frame(ggplot2::midwest)
str(midwest)
#3-2
midwest<-midwest %>% rename("total"="poptotal","asian"="popasian")
head(midwest)
#3-3
midwest<-midwest %>% mutate(tmp=asian/total*100)
#3-4
midwest %>% mutate(size=ifelse(tmp>mean(tmp),"large","small"))
#4-1
mpg<-ggplot2::mpg
tmp<-mpg %>% filter(displ<=4)
mean(tmp$hwy)
tmp2<-mpg %>% filter(displ>=5)
mean(tmp2$hwy)
#4-2
audi<-mpg %>% filter(manufacturer=="audi")
mean(audi$cty)
toyota<-mpg %>% filter(manufacturer=="toyota")
mean(toyota$cty)
#4-3
chevrolet<-mpg %>% filter(manufacturer=="chevrolet")
mean(chevrolet$hwy)
ford<-mpg %>% filter(manufacturer=="ford")
mean(ford$hwy)
honda<-mpg %>% filter(manufacturer=="honda")
mean(honda$hwy)
allCar<-mpg %>% filter(manufacturer=="chevrolet"|manufacturer=="ford"|manufacturer=="honda")
mean(allCar$hwy)
mpg %>% filter(manufacturer=="chevrolet"|manufacturer=="ford"|manufacturer=="honda") %>% summarise(tmp=mean(hwy))
#5-1
mpg2<-mpg %>% select(class,cty)
str(mpg2)
#5-2
tmp<-mpg2 %>% filter(class=="suv")
mean(tmp$cty)
tmp2<-mpg2 %>% filter(class=="compact")
mean(tmp2$cty)
mpg2 %>% group_by(class)%>% summarise(tmp=mean(cty)) %>% filter(class=="suv"|class=="compact")
#6-1
mpg %>% filter(manufacturer=="audi")%>% group_by(model) %>% summarise(tmp=mean(hwy)) %>% arrange(desc(tmp)) %>% head(1)
#6-2
mpg %>% filter(manufacturer=="audi") %>% arrange(desc(hwy)) %>% head(5)
|
library(ProTrackR)
### Name: name
### Title: Obtain or replace the name of a PTModule or PTSample
### Aliases: name name,PTSample-method name<-
### name<-,PTSample,character-method name,PTModule-method
### name<-,PTModule,character-method
### ** Examples
data("mod.intro")
## get the name of mod.intro:
name(mod.intro)
## I don't like the name, let's change it:
name(mod.intro) <- "I like this name better"
## Note that the provided name was too long and is truncated:
name(mod.intro)
## print all sample names in the module:
unlist(lapply(as.list(1:31), function(x)
name(PTSample(mod.intro, x))))
|
/data/genthat_extracted_code/ProTrackR/examples/name.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 616
|
r
|
library(ProTrackR)
### Name: name
### Title: Obtain or replace the name of a PTModule or PTSample
### Aliases: name name,PTSample-method name<-
### name<-,PTSample,character-method name,PTModule-method
### name<-,PTModule,character-method
### ** Examples
data("mod.intro")
## get the name of mod.intro:
name(mod.intro)
## I don't like the name, let's change it:
name(mod.intro) <- "I like this name better"
## Note that the provided name was too long and is truncated:
name(mod.intro)
## print all sample names in the module:
unlist(lapply(as.list(1:31), function(x)
name(PTSample(mod.intro, x))))
|
testlist <- list(AgeVector = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ExpressionSet = structure(c(1.63064200184954e+212, 1.11558267938731e+157, 3.85183323117031e+59, 7.59955788945772e-256, 5.55398349536846e-07, 2.63833996739876e-240, 1.7158565271506e-24, 3.45241123006119e+96, 3.49016504742521e+83, 8.75421973838948e-251, 1.93656435398732e-237, 3.86617962359471e-308, 1.51457052685755e+122, 6.35453708406506e-226, 1.34149999500835e+258, 3.08695662079571e+274, 1.2778384355529e-304, 1.3429648484931e-231, 7085.87319714646, 4.26173394236936e+31, 3.05695536508135e-40, 2.80384286150823e-70, 4.98598164707396e+226, 2.67284746621031e-50, 1.268983112604e+270, 6.96927128326474e-92, 0.00315105907067092, 2.28082165029915e+210, 964215356953.314, 3.48762608111849e-233, 1.57025504623905e+177, 2.36697187507964e+42, 3.62903965781702e+225, 1.7243009391465e-142, 1.46182058652606e-281), .Dim = c(5L, 7L)))
result <- do.call(myTAI:::cpp_omitMatrix,testlist)
str(result)
|
/myTAI/inst/testfiles/cpp_omitMatrix/AFL_cpp_omitMatrix/cpp_omitMatrix_valgrind_files/1615845589-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 1,091
|
r
|
testlist <- list(AgeVector = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ExpressionSet = structure(c(1.63064200184954e+212, 1.11558267938731e+157, 3.85183323117031e+59, 7.59955788945772e-256, 5.55398349536846e-07, 2.63833996739876e-240, 1.7158565271506e-24, 3.45241123006119e+96, 3.49016504742521e+83, 8.75421973838948e-251, 1.93656435398732e-237, 3.86617962359471e-308, 1.51457052685755e+122, 6.35453708406506e-226, 1.34149999500835e+258, 3.08695662079571e+274, 1.2778384355529e-304, 1.3429648484931e-231, 7085.87319714646, 4.26173394236936e+31, 3.05695536508135e-40, 2.80384286150823e-70, 4.98598164707396e+226, 2.67284746621031e-50, 1.268983112604e+270, 6.96927128326474e-92, 0.00315105907067092, 2.28082165029915e+210, 964215356953.314, 3.48762608111849e-233, 1.57025504623905e+177, 2.36697187507964e+42, 3.62903965781702e+225, 1.7243009391465e-142, 1.46182058652606e-281), .Dim = c(5L, 7L)))
result <- do.call(myTAI:::cpp_omitMatrix,testlist)
str(result)
|
## Part III: Fish Community Composition between Reefs
#Here again, we will remove all the object to clean the memory
knitr::opts_chunk$set(eval = FALSE)
remove(list = ls())
cran_packages <- c("knitr", "phyloseqGraphTest", "phyloseq", "shiny", "microbiome",
"tidyverse", "miniUI", "caret", "pls", "e1071", "ggplot2",
"randomForest","entropart", "vegan", "plyr", "dplyr", "here",
"ggrepel", "nlme", "R.utils", "gridExtra","grid", "googledrive",
"googlesheets", "phangorn", "devtools", "rmarkdown", "sys",
"reshape2", "devtools", "PMA","structSSI","ade4", "ape",
"Biostrings", "igraph", "ggnetwork", "intergraph", "ips",
"scales", "kableExtra", "pgirmess", "treemap", "knitr","kableExtra",
"rstudioapi" ,"data.table","DT","pander","formatR","grDevices","svgPanZoom",
"RCurl","plotly","pairwiseAdonis", "stringr")
github_packages <- c("jfukuyama/phyloseqGraphTest")
bioc_packages <- c("phyloseq", "genefilter", "impute", "dada2", "DECIPHER")
# Install CRAN packages (if not already installed)
#Some packages would be not availbale for your R version
inst <- cran_packages %in% installed.packages()
if (any(! inst)) {
install.packages(cran_packages[!inst], repos = "http://cran.rstudio.com/") }
#
inst <- github_packages %in% installed.packages()
if (any(! inst)) {
devtools::install_github(github_packages[!inst]) }
# Load libraries
sapply(c(cran_packages, bioc_packages), require, character.only = TRUE)
sessionInfo()
set.seed(1000)
# Set wd------
knitr::opts_knit$set(root.dir = getwd())
path = getwd()
# This will setwd to wherever the .Rmd file is opened.
dir_sample_selection <- paste0(path,"/analyses/01_select_samples/")
dir_seq_processing <- paste0(path,"/analyses/02_process_sequences/")
dir_taxa_assign <- paste0(path,"/analyses/03_assign_taxonomy/")
dir_data_cleaning <- paste0(path, "/analyses/04_data_cleaning/")
dir_primers <- paste0(path,"/dir_data_source/primers_sequences/")
dir_refdb <- paste0(path,"/dir_data_source/reference_databases/")
dir_fastq_source <- paste0(path,"/dir_data_source/sequences/")
#In this is part, we will focus on our sampling dataset and analyze the distribution of the hosts between the IRs and HRs.
envdata <- read.csv2(paste0(dir_refdb, "env.csv"))
colnames(envdata)[1] = "ID"
load(paste0(dir_taxa_assign, "sey_gut.RData"))
load(paste0(dir_taxa_assign, "sey_final.RData"))
#We will first write the table of the species and their corresponding diet.
diet_sp <- as.data.frame(table(envdata$diet4, envdata$tax1))[which(as.data.frame(table(envdata$diet4, envdata$tax1))[,3]> 0),][,c(1,2)]
sp_reef <- table(envdata$tax1, envdata$geomorpho)
sp_reef <- cbind(as.data.frame(sp_reef[,1]), as.data.frame(sp_reef[,2]))
table_diet_sp <- cbind(diet_sp[,2],diet_sp[,1],sp_reef[,c(1,2)])
table_to_print <- rbind(table_diet_sp[-c(26,37),], table_diet_sp[c(26,37),])
datatable(table_to_print, rownames = F, width = "100%",
colnames = c("Species", "Diet", "HR","IR"),
caption = htmltools::tags$caption(style = "caption-side:
bottom; text-align: left;",
"Table: ",
htmltools::em("Sampling table of the species and diet between reefs.")),
extensions = "Buttons",
options = list(columnDefs =
list(list(className = "dt-left", targets = 0)),
dom = "Blfrtip", pageLength = 5,
lengthMenu = c(5, 10, 25, 50),
buttons = c("csv", "copy"),
scrollX = TRUE, scrollCollapse = TRUE))
##### PCoA on fish community------
#The distribution of the sampling set is a first result showing the influence of the shift on the fish communities. How are they distributed?
samp_data <- envdata[envdata$type=="gut",]
fam_tab <- table(samp_data$site,samp_data$family)
# Transform to log
fam.log <- log1p(fam_tab) # Equivalent: log(fam_tab + 1)
# Principal coordinate analysis and simple ordination plot
fam.D <- vegdist(fam.log, "bray")
res <- pcoa(fam.D)
#res$values
biplot(res, fam.log)
#round(res$values$Relative_eig[1]*100, 1) # 57.8 %
#round(res$values$Relative_eig[2]*100, 1) # 26 %
site1 <- c("C1","C2","C3","C4", "M1","M2","M3")
site2 <- c(rep("coral", 4), rep("macroalgal",3))
site_data <- cbind(site1,site2)
colnames(site_data) <- c("site", "geomorpho")
site_data <- as.data.frame(site_data)
adonis(fam.D ~ geomorpho, data = site_data)
beta_reef <- betadisper(fam.D, site_data$geomorpho)
permutest(beta_reef)
#Now, we will do the same on trophic srtucture with the diet
samp_data <- envdata[envdata$type=="gut",]
diet_tab <- table(samp_data$site,samp_data$diet4)
# Transform to log
diet.log <- log1p(diet_tab) # Equivalent: log(diet_tab + 1)
# Principal coordinate analysis and simple ordination plot
diet.D <- vegdist(diet.log, "bray")
res <- pcoa(diet.D)
#res$values
par(mfrow=c(1,2))
biplot(res, diet.log)
percent(res$values$Relative_eig[1])
percent(res$values$Relative_eig[2])
site1 <- c("C1","C2","C3","C4", "M1","M2","M3")
site2 <- c(rep("coral", 4), rep("macroalgal",3))
site_data <- cbind(site1,site2)
colnames(site_data) <- c("site", "geomorpho")
site_data <- as.data.frame(site_data)
adonis(diet.D ~ geomorpho, data = site_data)
beta_reef <- betadisper(diet.D, site_data$geomorpho)
permutest(beta_reef)
#Because we will focus our sutdy on herbivores and invertivores, we need to know if families are equally distributed.
samp_data_inv <- samp_data[samp_data$diet4 == "Mobile invertebrate",]
fam_tab <- table(samp_data_inv$site,samp_data_inv$family)
# Transform to log
fam.log <- log1p(fam_tab) # Equivalent: log(fam_tab + 1)
# Principal coordinate analysis and simple ordination plot
fam.D <- vegdist(fam.log, "bray")
res <- pcoa(fam.D)
#res$values
biplot(res, fam.log)
percent(res$values$Relative_eig[1])
percent(res$values$Relative_eig[2])
site1 <- c("C1","C2","C3","C4", "M1","M2","M3")
site2 <- c(rep("coral", 4), rep("macroalgal",3))
site_data <- cbind(site1,site2)
colnames(site_data) <- c("site", "geomorpho")
site_data <- as.data.frame(site_data)
adonis(fam.D ~ geomorpho, data = site_data)
beta_reef <- betadisper(fam.D, site_data$geomorpho)
permutest(beta_reef)
|
/03_host_community.R
|
no_license
|
mccheutin/Seychelles
|
R
| false
| false
| 6,427
|
r
|
## Part III: Fish Community Composition between Reefs
#Here again, we will remove all the object to clean the memory
knitr::opts_chunk$set(eval = FALSE)
remove(list = ls())
cran_packages <- c("knitr", "phyloseqGraphTest", "phyloseq", "shiny", "microbiome",
"tidyverse", "miniUI", "caret", "pls", "e1071", "ggplot2",
"randomForest","entropart", "vegan", "plyr", "dplyr", "here",
"ggrepel", "nlme", "R.utils", "gridExtra","grid", "googledrive",
"googlesheets", "phangorn", "devtools", "rmarkdown", "sys",
"reshape2", "devtools", "PMA","structSSI","ade4", "ape",
"Biostrings", "igraph", "ggnetwork", "intergraph", "ips",
"scales", "kableExtra", "pgirmess", "treemap", "knitr","kableExtra",
"rstudioapi" ,"data.table","DT","pander","formatR","grDevices","svgPanZoom",
"RCurl","plotly","pairwiseAdonis", "stringr")
github_packages <- c("jfukuyama/phyloseqGraphTest")
bioc_packages <- c("phyloseq", "genefilter", "impute", "dada2", "DECIPHER")
# Install CRAN packages (if not already installed)
#Some packages would be not availbale for your R version
inst <- cran_packages %in% installed.packages()
if (any(! inst)) {
install.packages(cran_packages[!inst], repos = "http://cran.rstudio.com/") }
#
inst <- github_packages %in% installed.packages()
if (any(! inst)) {
devtools::install_github(github_packages[!inst]) }
# Load libraries
sapply(c(cran_packages, bioc_packages), require, character.only = TRUE)
sessionInfo()
set.seed(1000)
# Set wd------
knitr::opts_knit$set(root.dir = getwd())
path = getwd()
# This will setwd to wherever the .Rmd file is opened.
dir_sample_selection <- paste0(path,"/analyses/01_select_samples/")
dir_seq_processing <- paste0(path,"/analyses/02_process_sequences/")
dir_taxa_assign <- paste0(path,"/analyses/03_assign_taxonomy/")
dir_data_cleaning <- paste0(path, "/analyses/04_data_cleaning/")
dir_primers <- paste0(path,"/dir_data_source/primers_sequences/")
dir_refdb <- paste0(path,"/dir_data_source/reference_databases/")
dir_fastq_source <- paste0(path,"/dir_data_source/sequences/")
#In this is part, we will focus on our sampling dataset and analyze the distribution of the hosts between the IRs and HRs.
envdata <- read.csv2(paste0(dir_refdb, "env.csv"))
colnames(envdata)[1] = "ID"
load(paste0(dir_taxa_assign, "sey_gut.RData"))
load(paste0(dir_taxa_assign, "sey_final.RData"))
#We will first write the table of the species and their corresponding diet.
diet_sp <- as.data.frame(table(envdata$diet4, envdata$tax1))[which(as.data.frame(table(envdata$diet4, envdata$tax1))[,3]> 0),][,c(1,2)]
sp_reef <- table(envdata$tax1, envdata$geomorpho)
sp_reef <- cbind(as.data.frame(sp_reef[,1]), as.data.frame(sp_reef[,2]))
table_diet_sp <- cbind(diet_sp[,2],diet_sp[,1],sp_reef[,c(1,2)])
table_to_print <- rbind(table_diet_sp[-c(26,37),], table_diet_sp[c(26,37),])
datatable(table_to_print, rownames = F, width = "100%",
colnames = c("Species", "Diet", "HR","IR"),
caption = htmltools::tags$caption(style = "caption-side:
bottom; text-align: left;",
"Table: ",
htmltools::em("Sampling table of the species and diet between reefs.")),
extensions = "Buttons",
options = list(columnDefs =
list(list(className = "dt-left", targets = 0)),
dom = "Blfrtip", pageLength = 5,
lengthMenu = c(5, 10, 25, 50),
buttons = c("csv", "copy"),
scrollX = TRUE, scrollCollapse = TRUE))
##### PCoA on fish community------
#The distribution of the sampling set is a first result showing the influence of the shift on the fish communities. How are they distributed?
samp_data <- envdata[envdata$type=="gut",]
fam_tab <- table(samp_data$site,samp_data$family)
# Transform to log
fam.log <- log1p(fam_tab) # Equivalent: log(fam_tab + 1)
# Principal coordinate analysis and simple ordination plot
fam.D <- vegdist(fam.log, "bray")
res <- pcoa(fam.D)
#res$values
biplot(res, fam.log)
#round(res$values$Relative_eig[1]*100, 1) # 57.8 %
#round(res$values$Relative_eig[2]*100, 1) # 26 %
site1 <- c("C1","C2","C3","C4", "M1","M2","M3")
site2 <- c(rep("coral", 4), rep("macroalgal",3))
site_data <- cbind(site1,site2)
colnames(site_data) <- c("site", "geomorpho")
site_data <- as.data.frame(site_data)
adonis(fam.D ~ geomorpho, data = site_data)
beta_reef <- betadisper(fam.D, site_data$geomorpho)
permutest(beta_reef)
#Now, we will do the same on trophic srtucture with the diet
samp_data <- envdata[envdata$type=="gut",]
diet_tab <- table(samp_data$site,samp_data$diet4)
# Transform to log
diet.log <- log1p(diet_tab) # Equivalent: log(diet_tab + 1)
# Principal coordinate analysis and simple ordination plot
diet.D <- vegdist(diet.log, "bray")
res <- pcoa(diet.D)
#res$values
par(mfrow=c(1,2))
biplot(res, diet.log)
percent(res$values$Relative_eig[1])
percent(res$values$Relative_eig[2])
site1 <- c("C1","C2","C3","C4", "M1","M2","M3")
site2 <- c(rep("coral", 4), rep("macroalgal",3))
site_data <- cbind(site1,site2)
colnames(site_data) <- c("site", "geomorpho")
site_data <- as.data.frame(site_data)
adonis(diet.D ~ geomorpho, data = site_data)
beta_reef <- betadisper(diet.D, site_data$geomorpho)
permutest(beta_reef)
#Because we will focus our sutdy on herbivores and invertivores, we need to know if families are equally distributed.
samp_data_inv <- samp_data[samp_data$diet4 == "Mobile invertebrate",]
fam_tab <- table(samp_data_inv$site,samp_data_inv$family)
# Transform to log
fam.log <- log1p(fam_tab) # Equivalent: log(fam_tab + 1)
# Principal coordinate analysis and simple ordination plot
fam.D <- vegdist(fam.log, "bray")
res <- pcoa(fam.D)
#res$values
biplot(res, fam.log)
percent(res$values$Relative_eig[1])
percent(res$values$Relative_eig[2])
site1 <- c("C1","C2","C3","C4", "M1","M2","M3")
site2 <- c(rep("coral", 4), rep("macroalgal",3))
site_data <- cbind(site1,site2)
colnames(site_data) <- c("site", "geomorpho")
site_data <- as.data.frame(site_data)
adonis(fam.D ~ geomorpho, data = site_data)
beta_reef <- betadisper(fam.D, site_data$geomorpho)
permutest(beta_reef)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bulma-card.R
\name{bulmaCard}
\alias{bulmaCard}
\alias{bulmaCardHeader}
\alias{bulmaCardHeaderTitle}
\alias{bulmaCardContent}
\alias{bulmaCardFooter}
\alias{bulmaCardFooterItem}
\title{Add card}
\usage{
bulmaCard(...)
bulmaCardHeader(...)
bulmaCardHeaderTitle(...)
bulmaCardContent(...)
bulmaCardFooter(...)
bulmaCardFooterItem(..., tag = shiny::span)
}
\arguments{
\item{...}{any element.}
\item{tag}{html tag.}
}
\description{
Flexible card component.
}
\examples{
if(interactive()){
library(shiny)
shinyApp(
ui = bulmaPage(
bulmaSection(
bulmaContainer(
bulmaCard(
bulmaCardHeader(
bulmaCardHeaderTitle(
"Card title"
)
),
bulmaCardContent(
"Content of the card."
),
bulmaCardFooter(
bulmaCardFooterItem(
"Item 1"
),
bulmaCardFooterItem(
"Item 2"
)
)
)
)
)
),
server = function(input, output) {}
)
}
}
\author{
John Coene, \email{jcoenep@ymail.com}
}
|
/man/card.Rd
|
no_license
|
PaulC91/shinybulma
|
R
| false
| true
| 1,145
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bulma-card.R
\name{bulmaCard}
\alias{bulmaCard}
\alias{bulmaCardHeader}
\alias{bulmaCardHeaderTitle}
\alias{bulmaCardContent}
\alias{bulmaCardFooter}
\alias{bulmaCardFooterItem}
\title{Add card}
\usage{
bulmaCard(...)
bulmaCardHeader(...)
bulmaCardHeaderTitle(...)
bulmaCardContent(...)
bulmaCardFooter(...)
bulmaCardFooterItem(..., tag = shiny::span)
}
\arguments{
\item{...}{any element.}
\item{tag}{html tag.}
}
\description{
Flexible card component.
}
\examples{
if(interactive()){
library(shiny)
shinyApp(
ui = bulmaPage(
bulmaSection(
bulmaContainer(
bulmaCard(
bulmaCardHeader(
bulmaCardHeaderTitle(
"Card title"
)
),
bulmaCardContent(
"Content of the card."
),
bulmaCardFooter(
bulmaCardFooterItem(
"Item 1"
),
bulmaCardFooterItem(
"Item 2"
)
)
)
)
)
),
server = function(input, output) {}
)
}
}
\author{
John Coene, \email{jcoenep@ymail.com}
}
|
## ----------------------------------------------------------
## makeCacheMatrix -
##
## defines a function that takes a matrix as a parameter,
## and defines access methods to the matrix and a
## value of the matrix inverse, to be calculated and stored
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() {
x
}
setinv <- function(inv) {
m <<- inv
}
getinv <- function(){
m
}
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## ----------------------------------------------------------
## ----------------------------------------------------------
## cacheSolve -
##
## defines a function that takes an object defined by a
## makeCacheMatrix function. computes the inverse of the
## object's stored matrix value
##
## assumes the matrix is square and invertible
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
## ----------------------------------------------------------
|
/cachematrix.R
|
no_license
|
lastlift/ProgrammingAssignment2
|
R
| false
| false
| 1,413
|
r
|
## ----------------------------------------------------------
## makeCacheMatrix -
##
## defines a function that takes a matrix as a parameter,
## and defines access methods to the matrix and a
## value of the matrix inverse, to be calculated and stored
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() {
x
}
setinv <- function(inv) {
m <<- inv
}
getinv <- function(){
m
}
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## ----------------------------------------------------------
## ----------------------------------------------------------
## cacheSolve -
##
## defines a function that takes an object defined by a
## makeCacheMatrix function. computes the inverse of the
## object's stored matrix value
##
## assumes the matrix is square and invertible
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m)) {
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinv(m)
m
}
## ----------------------------------------------------------
|
##############################################################################
## STAR for Testing on DAGs
##################################################################
############
source("generic_STAR.R")
#### Find_Candidate_Fun
find.DAG.strong <- function(covar, mask, prop = NULL,
order = 1){
DAG <- induced_subgraph(covar, mask)
n <- sum(mask)
leaves <- V(DAG)$name[degree(DAG, mode = "out") < 1]
if (order == 1){
return(as.list(as.numeric(leaves)))
}
## candids <- neighborhood(DAG, order = n,
## nodes = V(DAG), mode = "out")
## candids <- lapply(candids, function(candid){
## as.numeric(names(candid))
## })
return(candids)
}
find.DAG.weak <- function(covar, mask, prop = NULL){
DAG <- induced_subgraph(covar, mask)
n <- sum(mask)
child.degree <- neighborhood(DAG, order = 1, nodes = V(DAG),
mode = "in", mindist = 1)
child.degree <- sapply(child.degree, length)
invalid.child <- V(DAG)$name[child.degree == 1]
invalid.node <- neighborhood(DAG, order = 1,
nodes = invalid.child,
mode = "in", mindist = 1)
invalid.node <- sapply(invalid.node, function(node){
names(node)
})
candids <- setdiff(V(DAG)$name, invalid.node)
candids <- as.list(as.numeric(candids))
## candids <- lapply(V(DAG), function(vertex){
## subtree <- neighborhood(DAG, order = n,
## nodes = vertex, mode = "out")[[1]]
## if (length(subtree) == 1){
## return(subtree)
## } else {
## valid <- sapply(subtree[-1], function(node){
## parents <-
## neighborhood(DAG, order = 1,
## nodes = node, mode = "in")[[1]]
## all(parents %in% subtree)
## })
## }
## subtree[c(TRUE, valid)]
## })
## candids <- lapply(candids, function(candid){
## as.numeric(names(candid))
## })
return(candids)
}
#### Update_Mask_Fun
## Naive Update
DAG.mask.update <- function(candid, score, mask, prop){
num.update <- min(max(ceiling(sum(mask) * prop), 1),
length(candid))
candid.vals <- sapply(candid, function(set){
mean(score[set])
})
thresh <- sort(candid.vals, decreasing = TRUE)[num.update]
reveal.inds <- unique(unlist(candid[candid.vals >= thresh]))
mask[reveal.inds] <- FALSE
return(mask)
}
#### Plot_Fun
plot.rej.DAG <- function(DAG, mask, main, layers = NULL,
col.bg = "#000000", col.fg = "#FFB6C1",
vertex.size = 7,
edge.arrow.size = 0.5,
...){
color <- ifelse(mask, col.fg, col.bg)
layout <- layout_with_sugiyama(DAG, layers,
attributes="all")
plot(layout$extd_graph, main = main,
vertex.size = vertex.size,
edge.arrow.size = edge.arrow.size,
vertex.color = color, ...)
}
DAG.plot <- function(covar, mask, score, main,
add.plot = NULL,
cex.main = 2, ...){
par(cex.main = cex.main)
if (!is.null(add.plot)){
par(mfrow = c(1, 2))
eval(add.plot)
}
plot.rej.DAG(covar, mask, main = main, ...)
}
####
STAR.DAG <- function(
pvals, DAG,
fun.list = create.fun("SeqStep", pstar = 0.5),
alpha.list = seq(0.05, 0.3, 0.01),
type = c("naive", "knockoff"),
score = NULL,
criterion = c("strong", "weak"),
plot.fun = DAG.plot,
update.mask.fun = DAG.mask.update,
prop.high = 0.05,
prop.low = 0.01,
...){
type <- type[1]
criterion <- criterion[1]
if (criterion == "strong"){
find.candid.fun <- find.DAG.strong
} else if (criterion == "weak"){
find.candid.fun <- find.DAG.weak
prop.high = 1 / length(pvals)
prop.low = 1 / length(pvals)
}
if (type == "naive"){
score.fun <- NULL
} else if (type == "knockoff"){
score.fun <- function(covar, pvals, mask,
fun.list, score0){
score
}
}
generic.STAR(pvals = pvals, covar = DAG, type = type,
fun.list = fun.list, alpha.list = alpha.list,
score.fun = score.fun,
find.candid.fun = find.candid.fun,
update.mask.fun = update.mask.fun,
plot.fun = plot.fun,
prop.high = prop.high,
prop.low = prop.low,
...)
}
|
/R/STAR_DAG.R
|
no_license
|
lihualei71/STAR
|
R
| false
| false
| 4,699
|
r
|
##############################################################################
## STAR for Testing on DAGs
##################################################################
############
source("generic_STAR.R")
#### Find_Candidate_Fun
find.DAG.strong <- function(covar, mask, prop = NULL,
order = 1){
DAG <- induced_subgraph(covar, mask)
n <- sum(mask)
leaves <- V(DAG)$name[degree(DAG, mode = "out") < 1]
if (order == 1){
return(as.list(as.numeric(leaves)))
}
## candids <- neighborhood(DAG, order = n,
## nodes = V(DAG), mode = "out")
## candids <- lapply(candids, function(candid){
## as.numeric(names(candid))
## })
return(candids)
}
find.DAG.weak <- function(covar, mask, prop = NULL){
DAG <- induced_subgraph(covar, mask)
n <- sum(mask)
child.degree <- neighborhood(DAG, order = 1, nodes = V(DAG),
mode = "in", mindist = 1)
child.degree <- sapply(child.degree, length)
invalid.child <- V(DAG)$name[child.degree == 1]
invalid.node <- neighborhood(DAG, order = 1,
nodes = invalid.child,
mode = "in", mindist = 1)
invalid.node <- sapply(invalid.node, function(node){
names(node)
})
candids <- setdiff(V(DAG)$name, invalid.node)
candids <- as.list(as.numeric(candids))
## candids <- lapply(V(DAG), function(vertex){
## subtree <- neighborhood(DAG, order = n,
## nodes = vertex, mode = "out")[[1]]
## if (length(subtree) == 1){
## return(subtree)
## } else {
## valid <- sapply(subtree[-1], function(node){
## parents <-
## neighborhood(DAG, order = 1,
## nodes = node, mode = "in")[[1]]
## all(parents %in% subtree)
## })
## }
## subtree[c(TRUE, valid)]
## })
## candids <- lapply(candids, function(candid){
## as.numeric(names(candid))
## })
return(candids)
}
#### Update_Mask_Fun
## Naive Update
DAG.mask.update <- function(candid, score, mask, prop){
num.update <- min(max(ceiling(sum(mask) * prop), 1),
length(candid))
candid.vals <- sapply(candid, function(set){
mean(score[set])
})
thresh <- sort(candid.vals, decreasing = TRUE)[num.update]
reveal.inds <- unique(unlist(candid[candid.vals >= thresh]))
mask[reveal.inds] <- FALSE
return(mask)
}
#### Plot_Fun
plot.rej.DAG <- function(DAG, mask, main, layers = NULL,
col.bg = "#000000", col.fg = "#FFB6C1",
vertex.size = 7,
edge.arrow.size = 0.5,
...){
color <- ifelse(mask, col.fg, col.bg)
layout <- layout_with_sugiyama(DAG, layers,
attributes="all")
plot(layout$extd_graph, main = main,
vertex.size = vertex.size,
edge.arrow.size = edge.arrow.size,
vertex.color = color, ...)
}
DAG.plot <- function(covar, mask, score, main,
add.plot = NULL,
cex.main = 2, ...){
par(cex.main = cex.main)
if (!is.null(add.plot)){
par(mfrow = c(1, 2))
eval(add.plot)
}
plot.rej.DAG(covar, mask, main = main, ...)
}
####
STAR.DAG <- function(
pvals, DAG,
fun.list = create.fun("SeqStep", pstar = 0.5),
alpha.list = seq(0.05, 0.3, 0.01),
type = c("naive", "knockoff"),
score = NULL,
criterion = c("strong", "weak"),
plot.fun = DAG.plot,
update.mask.fun = DAG.mask.update,
prop.high = 0.05,
prop.low = 0.01,
...){
type <- type[1]
criterion <- criterion[1]
if (criterion == "strong"){
find.candid.fun <- find.DAG.strong
} else if (criterion == "weak"){
find.candid.fun <- find.DAG.weak
prop.high = 1 / length(pvals)
prop.low = 1 / length(pvals)
}
if (type == "naive"){
score.fun <- NULL
} else if (type == "knockoff"){
score.fun <- function(covar, pvals, mask,
fun.list, score0){
score
}
}
generic.STAR(pvals = pvals, covar = DAG, type = type,
fun.list = fun.list, alpha.list = alpha.list,
score.fun = score.fun,
find.candid.fun = find.candid.fun,
update.mask.fun = update.mask.fun,
plot.fun = plot.fun,
prop.high = prop.high,
prop.low = prop.low,
...)
}
|
min.rv <- function(..., na.rm=FALSE) {
if (anyisrv(...)) {
simapply(cbind.rv(...), base::min, na.rm=na.rm)
} else {
base::min(..., na.rm=na.rm)
}
}
max.rv <- function(..., na.rm=FALSE) {
if (anyisrv(...)) {
simapply(cbind.rv(...), base::max, na.rm=na.rm)
} else {
base::max(..., na.rm=na.rm)
}
}
pmin.rv <- function(..., na.rm=FALSE) {
if (anyisrv(...)) {
a <- sims(cbind.rv(...), dimensions=TRUE)
rvsims(t(apply(a, 1, function (m) apply(m, 1, base::pmin))))
} else {
base::pmin(..., na.rm=na.rm)
}
}
pmax.rv <- function(..., na.rm=FALSE) {
if (anyisrv(...)) {
a <- sims(cbind.rv(...), dimensions=TRUE)
rvsims(t(apply(a, 1, function (m) apply(m, 1, base::pmax))))
} else {
base::pmax(..., na.rm=na.rm)
}
}
is.recursive.rv <- function (x) {
return(FALSE)
}
is.atomic.rv <- function (x) {
return(TRUE)
}
is.integer.rv <- function (x) {
return(is.rv(x) && all(rvsimapply(x, is.integer)))
}
is.logical.rv <- function (x) {
return(is.rv(x) && all(rvsimapply(x, is.logical)))
}
|
/rv/R/min_max_rv.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 1,054
|
r
|
min.rv <- function(..., na.rm=FALSE) {
if (anyisrv(...)) {
simapply(cbind.rv(...), base::min, na.rm=na.rm)
} else {
base::min(..., na.rm=na.rm)
}
}
max.rv <- function(..., na.rm=FALSE) {
if (anyisrv(...)) {
simapply(cbind.rv(...), base::max, na.rm=na.rm)
} else {
base::max(..., na.rm=na.rm)
}
}
pmin.rv <- function(..., na.rm=FALSE) {
if (anyisrv(...)) {
a <- sims(cbind.rv(...), dimensions=TRUE)
rvsims(t(apply(a, 1, function (m) apply(m, 1, base::pmin))))
} else {
base::pmin(..., na.rm=na.rm)
}
}
pmax.rv <- function(..., na.rm=FALSE) {
if (anyisrv(...)) {
a <- sims(cbind.rv(...), dimensions=TRUE)
rvsims(t(apply(a, 1, function (m) apply(m, 1, base::pmax))))
} else {
base::pmax(..., na.rm=na.rm)
}
}
is.recursive.rv <- function (x) {
return(FALSE)
}
is.atomic.rv <- function (x) {
return(TRUE)
}
is.integer.rv <- function (x) {
return(is.rv(x) && all(rvsimapply(x, is.integer)))
}
is.logical.rv <- function (x) {
return(is.rv(x) && all(rvsimapply(x, is.logical)))
}
|
CV.calc<-function(x){
cv.out=(sd(x)/mean(x))*100
return(cv.out)
}
filter.fun<-function(data,low_pct,high_pct,cv,exclude){
norm_data=apply(data,1,function(x) sqrt(sum(x^2)) )
data_sub=data[(norm_data < quantile(norm_data , high_pct) & norm_data > quantile(norm_data,low_pct) ),]
if(cv!=0) {
dd_cv=apply(data_sub,1,CV.calc)
data_out_sub=data_sub[(dd_cv>quantile(dd_cv,cv)),]
}else{
data_out_sub=data_sub
}
data_out=data[union(rownames(data_out_sub),exclude),]
return(data_out)
}
filter.ENSG.fun<-function(data_in,mean.low,cv,exclude){
#data_in=data.mix;cv=0.5;exclude=unlist(MKS.list.order);mean.low=2^7
mean_data=apply(data_in,1,mean )
data_sub=data_in[(mean_data > mean.low ),]
if(cv!=0) {
dd_cv=apply(data_sub,1,CV.calc)
data_out_sub=data_sub[(dd_cv>quantile(dd_cv,cv)),]
}else{
data_out_sub=data_sub
}
data_out=data_in[union(rownames(data_out_sub),exclude),]
return(data_out)
}
|
/FUNS_new/data_filter.R
|
no_license
|
dhard/semi-CAM
|
R
| false
| false
| 976
|
r
|
CV.calc<-function(x){
cv.out=(sd(x)/mean(x))*100
return(cv.out)
}
filter.fun<-function(data,low_pct,high_pct,cv,exclude){
norm_data=apply(data,1,function(x) sqrt(sum(x^2)) )
data_sub=data[(norm_data < quantile(norm_data , high_pct) & norm_data > quantile(norm_data,low_pct) ),]
if(cv!=0) {
dd_cv=apply(data_sub,1,CV.calc)
data_out_sub=data_sub[(dd_cv>quantile(dd_cv,cv)),]
}else{
data_out_sub=data_sub
}
data_out=data[union(rownames(data_out_sub),exclude),]
return(data_out)
}
filter.ENSG.fun<-function(data_in,mean.low,cv,exclude){
#data_in=data.mix;cv=0.5;exclude=unlist(MKS.list.order);mean.low=2^7
mean_data=apply(data_in,1,mean )
data_sub=data_in[(mean_data > mean.low ),]
if(cv!=0) {
dd_cv=apply(data_sub,1,CV.calc)
data_out_sub=data_sub[(dd_cv>quantile(dd_cv,cv)),]
}else{
data_out_sub=data_sub
}
data_out=data_in[union(rownames(data_out_sub),exclude),]
return(data_out)
}
|
\name{tm.divisiveWords}
\alias{tm.divisiveWords}
\title{Divisive Words}
\usage{
tm.divisiveWords(tm,species)
}
\description{
Reuturns words that seperate a species from all other species
}
\examples{
tm.divisiveWords(tm.ecology,3,"Cryptococcus Neoformans")
}
|
/man/tm.divisiveWords.Rd
|
no_license
|
molikd/MicrobeAssociator
|
R
| false
| false
| 261
|
rd
|
\name{tm.divisiveWords}
\alias{tm.divisiveWords}
\title{Divisive Words}
\usage{
tm.divisiveWords(tm,species)
}
\description{
Reuturns words that seperate a species from all other species
}
\examples{
tm.divisiveWords(tm.ecology,3,"Cryptococcus Neoformans")
}
|
#!/usr/bin/Rscript
## Script to run oncodriverClust
## Separate because I need to run all the samples together
setwd("/mnt/lustre/users/k1469280/mourikisa/data/OAC")
cat("Sourcing relevant functions...")
source("functions.R")
OUT = commandArgs(trailingOnly = TRUE)[1]
## Mutations
# mainDirs = c("~/data/OAC/71_OAC/strelka/",
# "~/data/OAC/87_OAC/66_ICGC/strelka/")
mainDirs = c("/mnt/lustre/users/k1469280/mourikisa/data/OAC/87_OAC/21_literature/strelka/")
message("Getting mutations...")
all_muts = list()
count = 0
for(b in mainDirs){
samples = list.dirs(b, recursive = F)
for(s in samples){
cat(s, "\n")
muts_fn = paste0(s, "/parsing_and_annotation/annovar/muts_annovar_dbnsfp_19014.Rdata")
load(muts_fn)
sname = unlist(strsplit(s, "/"))
sname = sname[length(sname)]
all_muts[[sname]] = muts %>% mutate(sample=sname)
count = count +1
}
}
cat(paste0("Samples: ", count), "\n")
muts = do.call(rbind.fill, all_muts)
save(muts, file=paste0(OUT, "/Rdata/muts_129_66_71_OACs_annovar_dbnsfp_19014.Rdata"))
## Add also the 129 from before (uncomment the following two lines if you run the main cohort)
#message("Getting 129 previous samples...")
#load("~/data/OAC/129_OAC/Rdata/mutations_annotated_19014.Rdata")
## If you want one table (uncomment the following two lines if you run the main cohort)
#muts2 = do.call(rbind.fill, muts)
#muts2 = muts2 %>% select(-oncodriveClust)
## Fix the names of the samples (uncomment the following three lines if you run the main cohort)
#load("~/data/OAC/129_OAC/Rdata/mainCohort.Rdata")
#mainCohort = mainCohort %>% select(directory, Primary_tumour_solid_tissue) %>% rename(sample=Primary_tumour_solid_tissue)
#muts2 = muts2 %>% left_join(mainCohort) %>% select(-sample) %>% rename(sample=directory)
#m = rbind.fill(muts1, muts2)
m = muts
#rm(muts1)
#rm(muts2)
rm(muts)
rm(all_muts)
muts = runOncodriveClust(muts=m, save_dir=paste0(OUT, "/oncodriveClust"))
save(muts, file=paste0(OUT, "/Rdata/muts_129_66_71_OACs_annovar_dbnsfp_19014_oncodriveClust.Rdata"))
|
/raw_scripts/runOncodriveClust.R
|
no_license
|
ciccalab/sysSVM
|
R
| false
| false
| 2,096
|
r
|
#!/usr/bin/Rscript
## Script to run oncodriverClust
## Separate because I need to run all the samples together
setwd("/mnt/lustre/users/k1469280/mourikisa/data/OAC")
cat("Sourcing relevant functions...")
source("functions.R")
OUT = commandArgs(trailingOnly = TRUE)[1]
## Mutations
# mainDirs = c("~/data/OAC/71_OAC/strelka/",
# "~/data/OAC/87_OAC/66_ICGC/strelka/")
mainDirs = c("/mnt/lustre/users/k1469280/mourikisa/data/OAC/87_OAC/21_literature/strelka/")
message("Getting mutations...")
all_muts = list()
count = 0
for(b in mainDirs){
samples = list.dirs(b, recursive = F)
for(s in samples){
cat(s, "\n")
muts_fn = paste0(s, "/parsing_and_annotation/annovar/muts_annovar_dbnsfp_19014.Rdata")
load(muts_fn)
sname = unlist(strsplit(s, "/"))
sname = sname[length(sname)]
all_muts[[sname]] = muts %>% mutate(sample=sname)
count = count +1
}
}
cat(paste0("Samples: ", count), "\n")
muts = do.call(rbind.fill, all_muts)
save(muts, file=paste0(OUT, "/Rdata/muts_129_66_71_OACs_annovar_dbnsfp_19014.Rdata"))
## Add also the 129 from before (uncomment the following two lines if you run the main cohort)
#message("Getting 129 previous samples...")
#load("~/data/OAC/129_OAC/Rdata/mutations_annotated_19014.Rdata")
## If you want one table (uncomment the following two lines if you run the main cohort)
#muts2 = do.call(rbind.fill, muts)
#muts2 = muts2 %>% select(-oncodriveClust)
## Fix the names of the samples (uncomment the following three lines if you run the main cohort)
#load("~/data/OAC/129_OAC/Rdata/mainCohort.Rdata")
#mainCohort = mainCohort %>% select(directory, Primary_tumour_solid_tissue) %>% rename(sample=Primary_tumour_solid_tissue)
#muts2 = muts2 %>% left_join(mainCohort) %>% select(-sample) %>% rename(sample=directory)
#m = rbind.fill(muts1, muts2)
m = muts
#rm(muts1)
#rm(muts2)
rm(muts)
rm(all_muts)
muts = runOncodriveClust(muts=m, save_dir=paste0(OUT, "/oncodriveClust"))
save(muts, file=paste0(OUT, "/Rdata/muts_129_66_71_OACs_annovar_dbnsfp_19014_oncodriveClust.Rdata"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotJPEG.R
\name{plotJPEG}
\alias{plotJPEG}
\title{Plot JPEG file using base graphics}
\usage{
plotJPEG(input_jpeg, add = FALSE)
}
\arguments{
\item{input_jpeg}{string. Input filename of JPG picture.}
}
\description{
Internal function to plot a JPEG file in R
}
|
/man/plotJPEG.Rd
|
no_license
|
yangxhcaf/GWRFC
|
R
| false
| true
| 340
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotJPEG.R
\name{plotJPEG}
\alias{plotJPEG}
\title{Plot JPEG file using base graphics}
\usage{
plotJPEG(input_jpeg, add = FALSE)
}
\arguments{
\item{input_jpeg}{string. Input filename of JPG picture.}
}
\description{
Internal function to plot a JPEG file in R
}
|
testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 1.24944110113233e-310, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615827406-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 487
|
r
|
testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 1.24944110113233e-310, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
# Basic R commands and usage
# 30 January 2018
# LAP
library(ggplot2)
#Using the assignment operator
x <- 5 # preferred
print(x)
y = 4 # legal but not used except in function
y = y+1.1
y <- y+1
plantHeight <- 5.5
# Class Feb 1 2018
# the combine function
z<-c(3,7,7,10) # simple atomic vector
print(z)
typeof(z) # get the variable type
# double means numeric value
str(z) # structure function of the varaible (gives us numeric and range)
is.numeric(z) # logical test for variable type
is.character(z)
# c always "flattens" to an atomic vector
z<-c(c(3,4),c(5,6))
print(z)
# character stirngs with single or doubel quotes
z<-c("perc", "bass", "trout", "red snapper")
print(z)
# use both quoute types ofr an internal quote
z<-c("THis is only 'one' character strong", 'a second string')
str(z)
# logical TRUE FALSE
z<-c(TRUE, TRUE, FALSE)
is.numeric(z)
is.logical(z)
# Three properties of atomic vectors
# Type of atomic vector
z<- c(1.1,2.2,3.2)
typeof(z)
is.numeric(z)
# Length of vector
length(z)
# Name of vector elements (option)
z<-runif(5) #random unifor variable (0,1)
#add names after variable is created
names(z)<-c("chow","pug","beagle","greyhound","akita")
print(z)
# add names when variable is built
z2<-c(gold=3.3,silver=10,lead=2)
print(z2)
names(z2)
print(z2)
names(z2)<-c("copper","zinc")
print(z2)
# special data values
# NA for missing values
z<-c(3.22,3.3,NA)
length(z)
typeof(z[3])
#missing values can trip up basic functions
mean(z) # does not work
is.na(z) #checks for missing
!is.na(z) # ! is the NOT
mean(!is.na(z)) # WRONG! calc mean false setting
mean(z[!is.na(z)]) # do it this way
#-------------------------
# NaN, Inf, -Inf
#bad values/results that come from numeric calculations
z<-0/0
print(z)
z<-1/0
print(z)
z<- -1/0
print(z)
z<-1/0
typeof(z)
#------------------
#Null is an object that is nothing!
z<-NULL
typeof(z)
length(z)
# Three properties of atomic vectors
# Coercion
a<-c(2.1,2.2)
typeof(a)
b<-c("purple","green")
typeof(b)
d <- c(a,b)
print(d)
# hierarchy of conversion
# logical->integers->double->character
a <- runif(10)
print(a)
a > 0.5 # logical operation
temp <- a >0.5 #hold these logical values
sum(temp)
# what proportion of the values are >0.5
mean(a > 0.5)
# qualifying exam question: approximately proportion of oberscations from a normal (0,1) variable are > 2.0
mean(rnorm(1000000)>2.0)
#----------------- Vectorization
z<-c(10,20,30)
z + 1
y<-c(1,2,3)
z+y
short<-c(1,2)
z+short #what happens?
z^2
# creating vectors
# create an empty vector
z<-vector(mode="numeric",length=0)
print(z)
# add elements to empty vector
z<-c(z,5) # dont do this in your code
print(z)
# instead create a vector of pre-defined length
z<-rep(0,100)
z[1]<-3.3
z <- rep(NA,100)
head(z)
typeof(z)
z[c(1:20)]<-c("Washington",2.2)
typeof(z)
head(z)
z[1:30]
# generate a long list of names
myVector<-runif(100) # get 100 random uniform values
myNames<-paste("File",seq(1:length(myVector)),".txt",sep="")
head(myNames)
names(myVector) <- myNames
head(myVector)
# using rep to repeat elements and create vectors
#-------------------------------------End of Feb 1 2018
# using rep to repeat elements and create vectors
rep(0.5,6)
rep("mystring",3)
rep(x=0.5,times=6)
rep(times=6,x=0.5)
# seq for creating sequences
seq(from=2,to=4)
myVec<-c(1,2,3)
rep(myVec,times=2)
rep(x=myVec,each=2)
rep(x=myVec,times=myVec)
rep(x=1:5,times=1:5)
# seq for creating sequences
seq(from=2,to=4)
seq(from=2,to=4,by=0.5)
seq(from=2,to=4, length=7)
x<-seq(from=2,to=4, length=7)
1:length(x)
seq_along(x) # faster, better
seq_len(10)
x<-vector(mode="numeric",length=0)
str(x)
1:length(x)
seq_along(x)
#using random numbers
runif(1)
set.seed(100)
runif(1)
runif(n=5,min=100,max=200)
library(ggplot2)
z<-runif(n=1000,mi=30,max=300)
qplot(x=z)
# random normal distribution
z<-rnorm(1000)
qplot(x=z)
z<-rnorm(n=1000,mean=30,sd=20)
qplot(x=z)
# use the sample funciton to draw from an existing vector
longVec<-seq_len(10)
longVec
sample(x=longVec)
sample(x=longVec,size=3) #sample without replacement
sample(x=longVec,size=3,replace=TRUE)
myWeights<-c(rep(20,5),rep(100,5))
myWeights
sample(x=longVec,replace=TRUE,prob=myWeights)
sample(x=longVec,replace=FALSE,prob=myWeights)
# subsetting of atomic vectors
z<-c(3.1,9.2,1.3,0.4,7.5)
# subsetting on positive index values
z[2]
z[c(2,3)]
# subset on negative index values
z[-c(2,3)]
# subset by creating a booblean vector to select elements that meet a condition
z<3
z[z<3]
which(z<3)
myCriteria<-z<3
z[myCriteria]
z[which(z<3)]
zx<-c(NA,z)
zx[zx<3] # missing values retain
zx[which(zx<3)] # missing values dropped
# keep entire vector
z[]
z[-(length(z):(length(z)-2))]
# subset on names of vector elements
z
names(z)<-letters[seq_along(z)]
print(z)
z[c("b","d","e")]
# arithmetic operations
10+3
10-3
10*3
10/3
10^3
log(10)
log10(10)
# modulus operator (remainder of division)
10%%3
# integer division
10%/%3
# generate the set of all numbers from 1 to 100 that are divisible by 9
q<-seq_len(100)
q[q%%9==0]
#---------------------------------End of Feb 6 2018
# Realational operators
# all return a boolean (a true or false values)
3<4
3>5:7
3>=3
3<=3
3<=4
3==4
3=4 # throws an error
3!=4
# set operators
# compare two atomic vectors and return one atomic vector
# always strip out duplicate elements
# before the comparison
i<-c(1,1:7)
print(i)
j<-3:10
print(j)
union(i,j) # all of the elements
intersect(i,j) # common elements
setdiff(i,j) # unique elements of i not in j
setdiff(j,i) # unique elements of j not in i
# set operators that return a single boolean
setequal(i,j)
setequal(i,i)
is.element(i,j) # compare elements in i to j (ORDER DOES NOT MATTER)
i %in% j #(does the samething as line above)
# logical operators
z<-10:20
z<15
z<20&z>17 # And operator
z<20| z>17 # OR operator
###End of Atomic Vector - must be same type of elements, 1 dimension
### More basic coding tools for matrices and lists
## Feb 8 2018
library(ggplot2)
# create a matrix from an atomic vector
# matrix has 2 dimensions, atomic vector and another way to refer to it
m<-matrix(data=1:12,nrow=4) #minimal info needed for matrix
m
m<-matrix(data=1:12,nrow=4, byrow = TRUE) #matrix numbers entered by row not by column
m
dim(m)
dim(m)<-c(6,2) # alter dimension
m
dim(m)<-c(4,3)
m
nrow(m)
ncol(m)
length(m)
# add names to rows, columns
rownames(m)<-c("a","b","c","d")
m
colnames(m)<-LETTERS[1:ncol(m)] #ncol lenght of columns
m
# subsetting matrix values
print(m[2,3])
print(m["b","C"])
print(m[2, ])# must put space for blank to get all columns from second row
print(m[,2]) # although print out horizontally all rows from second column
print(m[,])
print(m[c(1,4),c(1,3)])
rownames(m)<-paste("Species",LETTERS[1:nrow(m)],sep="")
m
colnames(m)<-paste("Site",1:ncol(m),sep="")
m
# add names through the dim command with a list
dimnames(m)<-list(paste("Site",1:nrow(m),sep=""),paste("Species",ncol(m):1,sep = ""))
m
t(m) # simply switch rows and columns (transposes function)
# add a row to m with rbind
m2<-t(m)
m2
m2<-rbind(m2,c(10,20,30,40))
m2
rownames(m2)
rownames(m2)[4]<-"Species X"
m2
m2["Species X",c("Site3","Site4")]
# can always convert this back to an atomic vector
myVec<-as.vector(m)
# Lists are vectors but each element
# can hold things of different sizes and different types
myList<-list(1:10,matrix(1:8,nrow=4,byrow=TRUE),letters[1:3],pi)
myList
str(myList)
# lists dont behave as you think they should
myList[4]-3 #ERROR
myList[4]
str(myList[4])
myList[[4]]
# combine single and double brackets to access items
myList[[2]]
myList[[2]][4,1]
# names list items as we create them
myList2<-list(Tester=FALSE,littleM=matrix(1:9,nrow=3))
myList2$littleM[2,3] # get row 2, column 3
myList2[["Tester"]]
m
m[1]
m[4]
##------------------End-----------
#Feb 13 2018
#Regular Expressions - are find and replace on steroids
# on bbedit & typora
|
/FirstProject/BasicCoding.R
|
no_license
|
lpett/Bio381_2018
|
R
| false
| false
| 7,907
|
r
|
# Basic R commands and usage
# 30 January 2018
# LAP
library(ggplot2)
#Using the assignment operator
x <- 5 # preferred
print(x)
y = 4 # legal but not used except in function
y = y+1.1
y <- y+1
plantHeight <- 5.5
# Class Feb 1 2018
# the combine function
z<-c(3,7,7,10) # simple atomic vector
print(z)
typeof(z) # get the variable type
# double means numeric value
str(z) # structure function of the varaible (gives us numeric and range)
is.numeric(z) # logical test for variable type
is.character(z)
# c always "flattens" to an atomic vector
z<-c(c(3,4),c(5,6))
print(z)
# character stirngs with single or doubel quotes
z<-c("perc", "bass", "trout", "red snapper")
print(z)
# use both quoute types ofr an internal quote
z<-c("THis is only 'one' character strong", 'a second string')
str(z)
# logical TRUE FALSE
z<-c(TRUE, TRUE, FALSE)
is.numeric(z)
is.logical(z)
# Three properties of atomic vectors
# Type of atomic vector
z<- c(1.1,2.2,3.2)
typeof(z)
is.numeric(z)
# Length of vector
length(z)
# Name of vector elements (option)
z<-runif(5) #random unifor variable (0,1)
#add names after variable is created
names(z)<-c("chow","pug","beagle","greyhound","akita")
print(z)
# add names when variable is built
z2<-c(gold=3.3,silver=10,lead=2)
print(z2)
names(z2)
print(z2)
names(z2)<-c("copper","zinc")
print(z2)
# special data values
# NA for missing values
z<-c(3.22,3.3,NA)
length(z)
typeof(z[3])
#missing values can trip up basic functions
mean(z) # does not work
is.na(z) #checks for missing
!is.na(z) # ! is the NOT
mean(!is.na(z)) # WRONG! calc mean false setting
mean(z[!is.na(z)]) # do it this way
#-------------------------
# NaN, Inf, -Inf
#bad values/results that come from numeric calculations
z<-0/0
print(z)
z<-1/0
print(z)
z<- -1/0
print(z)
z<-1/0
typeof(z)
#------------------
#Null is an object that is nothing!
z<-NULL
typeof(z)
length(z)
# Three properties of atomic vectors
# Coercion
a<-c(2.1,2.2)
typeof(a)
b<-c("purple","green")
typeof(b)
d <- c(a,b)
print(d)
# hierarchy of conversion
# logical->integers->double->character
a <- runif(10)
print(a)
a > 0.5 # logical operation
temp <- a >0.5 #hold these logical values
sum(temp)
# what proportion of the values are >0.5
mean(a > 0.5)
# qualifying exam question: approximately proportion of oberscations from a normal (0,1) variable are > 2.0
mean(rnorm(1000000)>2.0)
#----------------- Vectorization
z<-c(10,20,30)
z + 1
y<-c(1,2,3)
z+y
short<-c(1,2)
z+short #what happens?
z^2
# creating vectors
# create an empty vector
z<-vector(mode="numeric",length=0)
print(z)
# add elements to empty vector
z<-c(z,5) # dont do this in your code
print(z)
# instead create a vector of pre-defined length
z<-rep(0,100)
z[1]<-3.3
z <- rep(NA,100)
head(z)
typeof(z)
z[c(1:20)]<-c("Washington",2.2)
typeof(z)
head(z)
z[1:30]
# generate a long list of names
myVector<-runif(100) # get 100 random uniform values
myNames<-paste("File",seq(1:length(myVector)),".txt",sep="")
head(myNames)
names(myVector) <- myNames
head(myVector)
# using rep to repeat elements and create vectors
#-------------------------------------End of Feb 1 2018
# using rep to repeat elements and create vectors
rep(0.5,6)
rep("mystring",3)
rep(x=0.5,times=6)
rep(times=6,x=0.5)
# seq for creating sequences
seq(from=2,to=4)
myVec<-c(1,2,3)
rep(myVec,times=2)
rep(x=myVec,each=2)
rep(x=myVec,times=myVec)
rep(x=1:5,times=1:5)
# seq for creating sequences
seq(from=2,to=4)
seq(from=2,to=4,by=0.5)
seq(from=2,to=4, length=7)
x<-seq(from=2,to=4, length=7)
1:length(x)
seq_along(x) # faster, better
seq_len(10)
x<-vector(mode="numeric",length=0)
str(x)
1:length(x)
seq_along(x)
#using random numbers
runif(1)
set.seed(100)
runif(1)
runif(n=5,min=100,max=200)
library(ggplot2)
z<-runif(n=1000,mi=30,max=300)
qplot(x=z)
# random normal distribution
z<-rnorm(1000)
qplot(x=z)
z<-rnorm(n=1000,mean=30,sd=20)
qplot(x=z)
# use the sample funciton to draw from an existing vector
longVec<-seq_len(10)
longVec
sample(x=longVec)
sample(x=longVec,size=3) #sample without replacement
sample(x=longVec,size=3,replace=TRUE)
myWeights<-c(rep(20,5),rep(100,5))
myWeights
sample(x=longVec,replace=TRUE,prob=myWeights)
sample(x=longVec,replace=FALSE,prob=myWeights)
# subsetting of atomic vectors
z<-c(3.1,9.2,1.3,0.4,7.5)
# subsetting on positive index values
z[2]
z[c(2,3)]
# subset on negative index values
z[-c(2,3)]
# subset by creating a booblean vector to select elements that meet a condition
z<3
z[z<3]
which(z<3)
myCriteria<-z<3
z[myCriteria]
z[which(z<3)]
zx<-c(NA,z)
zx[zx<3] # missing values retain
zx[which(zx<3)] # missing values dropped
# keep entire vector
z[]
z[-(length(z):(length(z)-2))]
# subset on names of vector elements
z
names(z)<-letters[seq_along(z)]
print(z)
z[c("b","d","e")]
# arithmetic operations
10+3
10-3
10*3
10/3
10^3
log(10)
log10(10)
# modulus operator (remainder of division)
10%%3
# integer division
10%/%3
# generate the set of all numbers from 1 to 100 that are divisible by 9
q<-seq_len(100)
q[q%%9==0]
#---------------------------------End of Feb 6 2018
# Realational operators
# all return a boolean (a true or false values)
3<4
3>5:7
3>=3
3<=3
3<=4
3==4
3=4 # throws an error
3!=4
# set operators
# compare two atomic vectors and return one atomic vector
# always strip out duplicate elements
# before the comparison
i<-c(1,1:7)
print(i)
j<-3:10
print(j)
union(i,j) # all of the elements
intersect(i,j) # common elements
setdiff(i,j) # unique elements of i not in j
setdiff(j,i) # unique elements of j not in i
# set operators that return a single boolean
setequal(i,j)
setequal(i,i)
is.element(i,j) # compare elements in i to j (ORDER DOES NOT MATTER)
i %in% j #(does the samething as line above)
# logical operators
z<-10:20
z<15
z<20&z>17 # And operator
z<20| z>17 # OR operator
###End of Atomic Vector - must be same type of elements, 1 dimension
### More basic coding tools for matrices and lists
## Feb 8 2018
library(ggplot2)
# create a matrix from an atomic vector
# matrix has 2 dimensions, atomic vector and another way to refer to it
m<-matrix(data=1:12,nrow=4) #minimal info needed for matrix
m
m<-matrix(data=1:12,nrow=4, byrow = TRUE) #matrix numbers entered by row not by column
m
dim(m)
dim(m)<-c(6,2) # alter dimension
m
dim(m)<-c(4,3)
m
nrow(m)
ncol(m)
length(m)
# add names to rows, columns
rownames(m)<-c("a","b","c","d")
m
colnames(m)<-LETTERS[1:ncol(m)] #ncol lenght of columns
m
# subsetting matrix values
print(m[2,3])
print(m["b","C"])
print(m[2, ])# must put space for blank to get all columns from second row
print(m[,2]) # although print out horizontally all rows from second column
print(m[,])
print(m[c(1,4),c(1,3)])
rownames(m)<-paste("Species",LETTERS[1:nrow(m)],sep="")
m
colnames(m)<-paste("Site",1:ncol(m),sep="")
m
# add names through the dim command with a list
dimnames(m)<-list(paste("Site",1:nrow(m),sep=""),paste("Species",ncol(m):1,sep = ""))
m
t(m) # simply switch rows and columns (transposes function)
# add a row to m with rbind
m2<-t(m)
m2
m2<-rbind(m2,c(10,20,30,40))
m2
rownames(m2)
rownames(m2)[4]<-"Species X"
m2
m2["Species X",c("Site3","Site4")]
# can always convert this back to an atomic vector
myVec<-as.vector(m)
# Lists are vectors but each element
# can hold things of different sizes and different types
myList<-list(1:10,matrix(1:8,nrow=4,byrow=TRUE),letters[1:3],pi)
myList
str(myList)
# lists dont behave as you think they should
myList[4]-3 #ERROR
myList[4]
str(myList[4])
myList[[4]]
# combine single and double brackets to access items
myList[[2]]
myList[[2]][4,1]
# names list items as we create them
myList2<-list(Tester=FALSE,littleM=matrix(1:9,nrow=3))
myList2$littleM[2,3] # get row 2, column 3
myList2[["Tester"]]
m
m[1]
m[4]
##------------------End-----------
#Feb 13 2018
#Regular Expressions - are find and replace on steroids
# on bbedit & typora
|
#server
options(shiny.maxRequestSize=10000*1024^2)
server <- function(input, output, session) {
fileObs <- callModule(file_observed_server, "obs")
fileSim <- callModule(file_simulated_server, "sim")
observeEvent(fileObs(),{
updateSelectizeInput(session, inputId = "yvar", choices = names(fileObs()))
updateSelectizeInput(session, inputId = "xvar", choices = names(fileObs()))
updateSelectizeInput(session, inputId = "stratvar", choices = names(fileObs()))
updateSelectizeInput(session, inputId = "predvar", choices = names(fileObs()))
updateSelectizeInput(session, inputId = "censorvar", choices = names(fileObs()))
})
binlessInputs <- callModule(binless_inputs, "binlessInputs1")
userQuantiles <- callModule(quantiles_server, "qpred1")
confidenceInterval <- callModule(confidence_interval_server, "ci1")
stratlist <- reactive({
input$stratvar
})
stratdata <- reactive({
subset(fileObs(), select = stratlist())
})
output$stratTable <- renderTable({
lapply(stratdata(), unique)
})
stratnamelvl <- reactive({
l <- lapply(stratdata(), unique)
stratlevels <- as.character(unlist(l))
stratname <- names(unlist(l))
stratname <- gsub('[[:digit:]]+', '', stratname)
name <- sort(paste0(stratname, stratlevels))
return(name)
})
userStratNames <- reactive({
dt <- vector("list", length = length(stratnamelvl()))
for (i in seq_along(stratnamelvl())) {
dt[[i]] <- eval(parse(text = paste0("input$", stratnamelvl()[[i]])))
}
dt <- as.data.table(dt)
setnames(dt, stratnamelvl())
})
userStratLvl <- metaReactive({
as.data.table(..(lapply(userStratNames(), getnum)))
})
output$lamTable <- renderTable({
req(userStratNames())
setnames(userStratNames(), stratnamelvl())
})
t <- reactive({
req(input$stratvar)
l <- render_lam_strat(stratlist(), stratdata())
})
tt <- reactive({
req(input$stratvar)
ll <- render_strat_binning(stratlist(), stratdata())
})
output$stratpanels <- renderUI({
tagList(
tags$h5("Select Binning Method by Strata"),
tt())
})
output$stratLambdas <- renderUI({
tagList(
tags$h5("Select Lambda Smoothing by Strata"),
t(),
tags$h6("(Lower, Median, Upper)")
)
})
# userLamStrat <- metaReactive({
# data.table(
# group0 = c(..(input$lambdaStratLo0_1), ..(input$lambdaStratMed0_1), ..(input$lambdaStratHi0_1)),
# group1 = c(..(input$lambdaStratLo1_1), ..(input$lambdaStratMed1_1), ..(input$lambdaStratHi1_1))
# )
# })
stratdata <- reactive({
subset(fileObs(), select = input$stratvar)
})
namesstrat <- reactive({
names(stratdata())
})
stratlvl <- reactive({
l <- lapply(stratdata(), unique)
l <- lapply(l, sort)
})
vpc <- metaReactive2({
req(input$buttonPlot)
isolate({
if(length(input$stratvar > 1)) {
strata <- paste(input$stratvar, collapse = " + ")
form <- formula(paste0("~", strata))
} else if(length(input$stratvar == 1)) {
strata <- input$stratvar
form <- formula(paste0("~", strata))
} else {
form <- NULL
}
vpcUser <- metaExpr({
observed(..(fileObs()), x= !!rlang::sym(..(input$xvar)), y= !!rlang::sym(..(input$yvar))) %>%
simulated(..(fileSim()), y= !!rlang::sym(..(input$yvar)))
})
if(input$isCensoring) {
if (input$censorType == "Value") {
req(input$userLLOQ)
vpcUser <- metaExpr({
..(vpcUser) %>%
censoring(blq = !!rlang::sym(..(input$yvar)) < ..(input$userLLOQ), lloq = ..(input$userLLOQ))
})
} else {
req(input$censorvar)
vpcUser <- metaExpr({
..(vpcUser) %>%
censoring(blq = !!rlang::sym(..(input$yvar)) < !!rlang::sym(..(input$censorvar)), lloq = !!rlang::sym(..(input$censorvar)))
})
}
}
if(!is.null(form)) {
req(input$stratvar)
vpcUser <- metaExpr({
..(vpcUser) %>%
stratify(..(form))
})
}
if (input$typeVPC == "Binning" && !input$isBinStrat) {
if(input$typeBinning == "x-variable")
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(bin =!!rlang::sym(..(input$xvar)))
})
if (input$typeBinning == "centers") {
centers <- as.numeric(unlist(strsplit(input$centers, split = ",")))
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(bin = "centers", centers = ..(centers))
})
}
if(input$typeBinning == "breaks") {
breaks <- as.numeric(unlist(strsplit(input$breaks, split = ",")))
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(bin = "breaks", breaks = ..(breaks))
})
}
if(input$typeBinning == "breaks") {
breaks <- as.numeric(unlist(strsplit(input$breaks, split = ",")))
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(bin = "breaks", breaks = ..(breaks))
})
}
if(is_false(input$typeBinning %in% c("breaks","centers","x-variable"))) {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(bin = ..(input$typeBinning), nbins = ..(input$nbins))
})
}
}
#Different binning by strata
if (input$typeVPC == "Binning" && input$isBinStrat && !is.null(form)) { # && !input$isPred && input$isBinStrat && !is.null(form)) {
#1 Strat variable, 2 level
if (length(stratlvl()[[1]]) == 2) {
b1 <- input$typeBinning1
b2 <- input$typeBinning2
l1 <- list(stratlvl()[[1]][[1]])
l2 <- list(stratlvl()[[1]][[2]])
names(l1) <- namesstrat()
names(l2) <- namesstrat()
if(b1 == "centers") {
centers1 <- as.numeric(unlist(strsplit(input$centers1, split = ",")))
} else {
centers1 <- NULL
}
if(b1 == "breaks") {
breaks1 <- as.numeric(unlist(strsplit(input$breaks1, split = ",")))
} else {
breaks1 <- NULL
}
if(b2 == "centers") {
centers2 <- as.numeric(unlist(strsplit(input$centers2, split = ",")))
} else {
centers2 <- NULL
}
if(b2 == "breaks") {
breaks2 <- as.numeric(unlist(strsplit(input$breaks2, split = ",")))
} else {
breaks2 <- NULL
}
if(b1 == "x-variable" && b2!="x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T) %>%
binning(stratum = ..(l2), bin = ..(b2), xbin = ..(input$midPoint), nbins = ..(input$nbins2), centers = ..(centers2), breaks = ..(breaks2), by.strata = T)
})
} else if(b1 != "x-variable" && b2 =="x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = ..(b1), xbin = ..(input$midPoint), nbins = ..(input$nbins1), centers = ..(centers1), breaks = ..(breaks1), by.strata = T) %>%
binning(stratum = ..(l2), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else if(b1 == "x-variable" && b2 =="x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T) %>%
binning(stratum = ..(l2), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = ..(b1), xbin = ..(input$midPoint), nbins = ..(input$nbins1), centers = ..(centers1), breaks = ..(breaks1), by.strata = T) %>%
binning(stratum = ..(l2), bin = ..(b2), xbin = ..(input$midPoint), nbins = ..(input$nbins2), centers = ..(centers2), breaks = ..(breaks2), by.strata = T)
})
}
}
if (length(stratlvl()[[1]]) == 3) {
b1 <- input$typeBinning1
b2 <- input$typeBinning2
b3 <- input$typeBinning3
l1 <- list(stratlvl()[[1]][[1]])
l2 <- list(stratlvl()[[1]][[2]])
l3 <- list(stratlvl()[[1]][[3]])
names(l1) <- namesstrat()
names(l2) <- namesstrat()
names(l3) <- namesstrat()
if(b1 == "centers") {
centers1 <- as.numeric(unlist(strsplit(input$centers1, split = ",")))
} else {
centers1 <- NULL
}
if(b1 == "breaks") {
breaks1 <- as.numeric(unlist(strsplit(input$breaks1, split = ",")))
} else {
breaks1 <- NULL
}
if(b2 == "centers") {
centers2 <- as.numeric(unlist(strsplit(input$centers2, split = ",")))
} else {
centers2 <- NULL
}
if(b2 == "breaks") {
breaks2 <- as.numeric(unlist(strsplit(input$breaks2, split = ",")))
} else {
breaks2 <- NULL
}
if(b3 == "centers") {
centers3 <- as.numeric(unlist(strsplit(input$centers3, split = ",")))
} else {
centers3 <- NULL
}
if(b3 == "breaks") {
breaks3 <- as.numeric(unlist(strsplit(input$breaks3, split = ",")))
} else {
breaks3 <- NULL
}
if(b1 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = ..(b1), xbin = ..(input$midPoint), nbins = ..(input$nbins1), centers = ..(centers1), breaks = ..(breaks1), by.strata = T)
})
}
if(b2 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l2), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l2), bin = ..(b2), xbin = ..(input$midPoint), nbins = ..(input$nbins2), centers = ..(centers2), breaks = ..(breaks2), by.strata = T)
})
}
if(b3 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l3), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l3), bin = ..(b3), xbin = ..(input$midPoint), nbins = ..(input$nbins3), centers = ..(centers3), breaks = ..(breaks3), by.strata = T)
})
}
}
if (length(stratlvl()[[1]]) == 4) {
b1 <- input$typeBinning1
b2 <- input$typeBinning2
b3 <- input$typeBinning3
b4 <- input$typeBinning4
l1 <- list(stratlvl()[[1]][[1]])
l2 <- list(stratlvl()[[1]][[2]])
l3 <- list(stratlvl()[[1]][[3]])
l4 <- list(stratlvl()[[1]][[4]])
names(l1) <- namesstrat()
names(l2) <- namesstrat()
names(l3) <- namesstrat()
names(l4) <- namesstrat()
if(b1 == "centers") {
centers1 <- as.numeric(unlist(strsplit(input$centers1, split = ",")))
} else {
centers1 <- NULL
}
if(b1 == "breaks") {
breaks1 <- as.numeric(unlist(strsplit(input$breaks1, split = ",")))
} else {
breaks1 <- NULL
}
if(b2 == "centers") {
centers2 <- as.numeric(unlist(strsplit(input$centers2, split = ",")))
} else {
centers2 <- NULL
}
if(b2 == "breaks") {
breaks2 <- as.numeric(unlist(strsplit(input$breaks2, split = ",")))
} else {
breaks2 <- NULL
}
if(b3 == "centers") {
centers3 <- as.numeric(unlist(strsplit(input$centers3, split = ",")))
} else {
centers3 <- NULL
}
if(b3 == "breaks") {
breaks3 <- as.numeric(unlist(strsplit(input$breaks3, split = ",")))
} else {
breaks3 <- NULL
}
if(b4 == "centers") {
centers4 <- as.numeric(unlist(strsplit(input$centers4, split = ",")))
} else {
centers4 <- NULL
}
if(b4 == "breaks") {
breaks4 <- as.numeric(unlist(strsplit(input$breaks4, split = ",")))
} else {
breaks4 <- NULL
}
if(b1 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = ..(b1), xbin = ..(input$midPoint), nbins = ..(input$nbins1), centers = ..(centers1), breaks = ..(breaks1), by.strata = T)
})
}
if(b2 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l2), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l2), bin = ..(b2), xbin = ..(input$midPoint), nbins = ..(input$nbins2), centers = ..(centers2), breaks = ..(breaks2), by.strata = T)
})
}
if(b3 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l3), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l3), bin = ..(b3), xbin = ..(input$midPoint), nbins = ..(input$nbins3), centers = ..(centers3), breaks = ..(breaks3), by.strata = T)
})
}
if(b4 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l4), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l4), bin = ..(b4), xbin = ..(input$midPoint), nbins = ..(input$nbins4), centers = ..(centers4), breaks = ..(breaks4), by.strata = T)
})
}
}
if (length(stratlvl()[[1]]) == 5) {
b1 <- input$typeBinning1
b2 <- input$typeBinning2
b3 <- input$typeBinning3
b4 <- input$typeBinning4
b5 <- input$typeBinning5
l1 <- list(stratlvl()[[1]][[1]])
l2 <- list(stratlvl()[[1]][[2]])
l3 <- list(stratlvl()[[1]][[3]])
l4 <- list(stratlvl()[[1]][[4]])
l5 <- list(stratlvl()[[1]][[5]])
names(l1) <- namesstrat()
names(l2) <- namesstrat()
names(l3) <- namesstrat()
names(l4) <- namesstrat()
names(l5) <- namesstrat()
if(b1 == "centers") {
centers1 <- as.numeric(unlist(strsplit(input$centers1, split = ",")))
} else {
centers1 <- NULL
}
if(b1 == "breaks") {
breaks1 <- as.numeric(unlist(strsplit(input$breaks1, split = ",")))
} else {
breaks1 <- NULL
}
if(b2 == "centers") {
centers2 <- as.numeric(unlist(strsplit(input$centers2, split = ",")))
} else {
centers2 <- NULL
}
if(b2 == "breaks") {
breaks2 <- as.numeric(unlist(strsplit(input$breaks2, split = ",")))
} else {
breaks2 <- NULL
}
if(b3 == "centers") {
centers3 <- as.numeric(unlist(strsplit(input$centers3, split = ",")))
} else {
centers3 <- NULL
}
if(b3 == "breaks") {
breaks3 <- as.numeric(unlist(strsplit(input$breaks3, split = ",")))
} else {
breaks3 <- NULL
}
if(b4 == "centers") {
centers4 <- as.numeric(unlist(strsplit(input$centers4, split = ",")))
} else {
centers4 <- NULL
}
if(b4 == "breaks") {
breaks4 <- as.numeric(unlist(strsplit(input$breaks4, split = ",")))
} else {
breaks4 <- NULL
}
if(b5 == "centers") {
centers5 <- as.numeric(unlist(strsplit(input$centers5, split = ",")))
} else {
centers5 <- NULL
}
if(b5 == "breaks") {
breaks5 <- as.numeric(unlist(strsplit(input$breaks5, split = ",")))
} else {
breaks5 <- NULL
}
if(b1 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = ..(b1), xbin = ..(input$midPoint), nbins = ..(input$nbins1), centers = ..(centers1), breaks = ..(breaks1), by.strata = T)
})
}
if(b2 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l2), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l2), bin = ..(b2), xbin = ..(input$midPoint), nbins = ..(input$nbins2), centers = ..(centers2), breaks = ..(breaks2), by.strata = T)
})
}
if(b3 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l3), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l3), bin = ..(b3), xbin = ..(input$midPoint), nbins = ..(input$nbins3), centers = ..(centers3), breaks = ..(breaks3), by.strata = T)
})
}
if(b4 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l4), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l4), bin = ..(b4), xbin = ..(input$midPoint), nbins = ..(input$nbins4), centers = ..(centers4), breaks = ..(breaks4), by.strata = T)
})
}
if(b5 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l5), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l5), bin = ..(b5), xbin = ..(input$midPoint), nbins = ..(input$nbins5), centers = ..(centers5), breaks = ..(breaks5), by.strata = T)
})
}
}
}
if (input$typeVPC == "Binning" && input$isPred && !input$log_dv) {
vpcUser <- metaExpr({
..(vpcUser) %>%
predcorrect(pred = !!rlang::sym(..(input$predvar)))
})
}
if (input$typeVPC == "Binning" && input$isPred && input$log_dv) {
vpcUser <- metaExpr({
..(vpcUser) %>%
predcorrect(pred = !!rlang::sym(..(input$predvar)), log = TRUE)
})
}
if (input$typeVPC == "Binning") {
vpcUser <- metaExpr({
..(vpcUser) %>%
vpcstats(qpred = ..(userQuantiles()), conf.level = ..(confidenceInterval()))
})
}
if(input$typeVPC == "Binless" && input$isPred) {
if(input$isAutoOptimize) {
# vpcUser <- metaExpr({
# ..(vpcUser) %>%
# predcorrect(pred = !!rlang::sym(..(input$predvar))) %>%
# binlessaugment(qpred = ..(userQuantiles()), interval = ..(binlessInputs()$intervalUser), loess.ypc = TRUE) %>%
# binlessfit(conf.level = ..(confidenceInterval())) %>%
# vpcstats()
vpcUser <- metaExpr({
..(vpcUser) %>%
predcorrect(pred = !!rlang::sym(..(input$predvar))) %>%
binless(qpred = ..(userQuantiles()), optimize = TRUE, optimization.interval = ..(binlessInputs()$intervalUser), conf.level = ..(confidenceInterval()), loess.ypc = TRUE) %>%
vpcstats()
})
} else {
if(!is.null(form)) {
vpcUser <- metaExpr({
..(vpcUser) %>%
predcorrect(pred = !!rlang::sym(..(input$predvar))) %>%
binless(qpred = ..(userQuantiles()), optimize = FALSE, conf.level = ..(confidenceInterval()), lambda = ..(userStratLvl()), span = NULL, loess.ypc = TRUE) %>%
vpcstats()
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
predcorrect(pred = !!rlang::sym(..(input$predvar))) %>%
binless(qpred = ..(userQuantiles()), optimize = FALSE, conf.level = ..(confidenceInterval()), lambda = ..(binlessInputs()$lamUser), span = ..(binlessInputs()$spanUser), loess.ypc = TRUE) %>%
vpcstats()
})
}
}
}
if(input$typeVPC == "Binless" && !input$isPred) {
if(input$isAutoOptimize) {
vpcUser <- metaExpr({
..(vpcUser) %>%
binless(qpred = ..(userQuantiles()), optimize = TRUE, optimization.interval = ..(binlessInputs()$intervalUser), conf.level = ..(confidenceInterval())) %>%
vpcstats()
})
} else {
if(!is.null(form)) {
vpcUser <- metaExpr({
..(vpcUser) %>%
binless(qpred = ..(userQuantiles()), optimize = FALSE, conf.level = ..(confidenceInterval()), lambda = ..(userStratLvl())) %>%
vpcstats()
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binless(qpred = ..(userQuantiles()), optimize = FALSE, conf.level = ..(confidenceInterval()), lambda = ..(binlessInputs()$lamUser)) %>%
vpcstats()
})
}
}
}
})
vpcUser
})
plotAesthetics <- reactive({
list(
linetype = c(input$lineTypeLo, input$lineTypeMed, input$lineTypeHi),
color = c(input$colorTypeHi, input$colorTypeMed, input$colorTypeLo),
color.fill = input$colorFill,
#custom.theme = input$themeType,
show.points = input$showPoints,
#show.boundaries = input$showBoundaries,
#show.stats = input$showStats,
legend.position = input$legendPosition,
facet.scales = input$facetScales,
xlabel = input$xlabel,
ylabel = input$ylabel,
#qlabel = userQuantiles(),
facet.scales.type = input$facetScales,
conf.level = confidenceInterval()
)
})
vpcPlot <- metaReactive2({
req(vpc())
isolate({
if(length(input$stratvar > 1)) {
facet_formula <- paste0("~", paste(input$stratvar, collapse = " + "))
} else if (length(input$stratvar == 1)) {
facet_formula <- paste0("~", input$stratvar)
} else {
facet_formula <- ""
}
if(input$typeVPC == "Binless") {
g <- metaExpr({
ggplot(
..(vpc())$stats, aes(x=x))
})
} else {
g <- metaExpr({
ggplot(
..(vpc())$stats, aes(x=xbin))
})
}
})
if(facet_formula != "") {
if(input$facetQuantile) {
facet_formula <- as.formula(paste0("qname", facet_formula))
} else {
facet_formula <- as.formula(facet_formula)
}
if(input$facetScales == "free") {
g <- metaExpr({
..(g) +
facet_grid(..(facet_formula), scales = "free", as.table = FALSE)
})
} else {
g <- metaExpr({
..(g) +
facet_grid(..(facet_formula), scales = "fixed", as.table = FALSE)
})
}
}
if(facet_formula == "" && input$facetQuantile) {
facet_formula <- as.formula(paste0("~", "qname"))
if(input$facetScales == "free") {
g <- metaExpr({
..(g) +
facet_wrap(..(facet_formula), scales = "free", as.table = TRUE)
})
} else {
g <- metaExpr({
..(g) +
facet_wrap(..(facet_formula), scales = "fixed", as.table = TRUE)
})
}
}
g <- metaExpr({
..(g) +
geom_ribbon(aes(ymin=lo, ymax=hi, fill=qname, col=qname, group=qname), alpha=..(plotAesthetics()$color.fill), col=NA) +
geom_line(aes(y=md, col=qname, group=qname)) +
geom_line(aes(y=y, linetype=qname), size=1)
})
isolate({
if(input$isCensoring) {
if(input$censorType == "Variable")
if(!is.null(input$stratvar)) {
g <- metaExpr({
..(g) +
geom_hline(data=unique(..(vpc())$data[, .(LLOQ), by = eval(..(input$stratvar))]),
aes(yintercept = !!as.symbol(..(input$censorvar))), linetype="dotted", size=1) +
geom_text(data=unique(..(vpc())$data[, .(LLOQ), by = eval(..(input$stratvar))]),
aes(x=10, y=LLOQ, label=paste("LLOQ", LLOQ, sep="="),), vjust=-1)
})
} else {
g <- metaExpr({
..(g) +
geom_hline(data=unique(..(vpc())$data[, .(LLOQ)]),
aes(yintercept = !!as.symbol(..(input$censorvar))), linetype="dotted", size=1) +
geom_text(data=unique(..(vpc())$data[, .(LLOQ)]),
aes(x=10, y=LLOQ, label=paste("LLOQ", LLOQ, sep="="),), vjust=-1)
})
} else {
g <- metaExpr({
..(g) +
geom_hline(aes(yintercept = ..(input$userLLOQ)), linetype="dotted", size=1) +
geom_text(aes(x=10, y=..(input$userLLOQ), label=paste("LLOQ", ..(input$userLLOQ), sep="="),), vjust=-1)
})
}
}
})
if(input$isLogDV) {
if(min(vpc()$stats$lo) < 0) {
g <- metaExpr({
..(g) + scale_y_continuous(trans="log10", limits=c(0.1,max(..(vpc())$stats$hi)))
})
} else {
g <- metaExpr({
..(g) + scale_y_log10()
})
}
}
if(input$isLogX) {
g <- metaExpr({
..(g) + scale_x_log10()
})
}
g <- isolate({
metaExpr({
..(g) +
scale_colour_manual(
name=..(paste0("Simulated Percentiles\nMedian (lines) ", plotAesthetics()$conf.level * 100, "% CI (areas)")),
breaks=..(paste0("q", userQuantiles())) ,
values= ..(plotAesthetics()$color),
labels=..(paste0(userQuantiles() * 100, "%"))) +
scale_fill_manual(
name=..(paste0("Simulated Percentiles\nMedian (lines) ", plotAesthetics()$conf.level * 100, "% CI (areas)")),
breaks=..(paste0("q", userQuantiles())),
values=..(plotAesthetics()$color),
labels=..(paste0(userQuantiles() * 100, "%"))) +
scale_linetype_manual(
name=..(paste0("Observed Percentiles\nMedian (lines) ", plotAesthetics()$conf.level * 100, "% CI (areas)")),
breaks=..(paste0("q", userQuantiles())),
values= ..(plotAesthetics()$linetype),
labels=..(paste0(userQuantiles() * 100, "%"))) +
guides(
fill=guide_legend(order=2),
colour=guide_legend(order=2),
linetype=guide_legend(order=1)) +
theme(
legend.position=..(plotAesthetics()$legend.position),
legend.key.width=grid::unit(2, "cm")) +
labs(x= ..(plotAesthetics()$xlabel), y= ..(plotAesthetics()$ylabel))
})
})
if(!input$showStats && input$typeVPC == "Binning") {
if(facet_formula != "") {
if(input$facetScales == "free") {
g <- isolate(metaExpr({
ggplot2::ggplot(vpc()$strat) +
facet_wrap(..(facet_formula), scales = "free") +
theme(
legend.position=..(plotAesthetics()$legend.position),
legend.key.width=grid::unit(2, "cm")) +
labs(x= ..(plotAesthetics()$xlabel), y= ..(plotAesthetics()$ylabel))
}))
} else {
g <- isolate(metaExpr({
ggplot2::ggplot(vpc()$strat) +
facet_grid(..(facet_formula)) +
theme(
legend.position=..(plotAesthetics()$legend.position),
legend.key.width=grid::unit(2, "cm")) +
labs(x= ..(plotAesthetics()$xlabel), y= ..(plotAesthetics()$ylabel))
}))
}
} else {
g <- isolate(metaExpr({
ggplot2::ggplot(vpc()$strat) +
theme(
legend.position=..(plotAesthetics()$legend.position),
legend.key.width=grid::unit(2, "cm")) +
labs(x= ..(plotAesthetics()$xlabel), y= ..(plotAesthetics()$ylabel))
}))
}
}
if(input$showPoints) {
g <- isolate(metaExpr({
..(g) + ggplot2::geom_point(data=..(vpc())$obs, ggplot2::aes(x=x, y=y), size=1, alpha=0.4, show.legend=F)
}))
} else {
g
}
if (input$showBoundaries) {
if(is.null(vpc()$rqss.obs.fits)) {
if (!is.null(vpc()$strat)) {
boundaries <- isolate(metaExpr({
bininfo(..(vpc()))[, .(x=sort(unique(c(xleft, xright)))), by=names(..(vpc())$strat)]
}))
} else {
boundaries <- isolate(metaExpr({
bininfo(..(vpc()))[, .(x=sort(unique(c(xleft, xright))))]
}))
}
if (input$showBinning) {
g <- isolate(metaExpr({
..(g) + ggplot2::geom_vline(data=..(boundaries), ggplot2::aes(xintercept=x), size=rel(0.5), col="gray80") +
ggplot2::theme(panel.grid=ggplot2::element_blank())
}))
}
g <- isolate(metaExpr({
..(g) + ggplot2::geom_rug(data=..(boundaries), ggplot2::aes(x=x), sides="t", size=1)
}))
}
} else {
g
}
g
})
blqPlot <- metaReactive2({
req(vpc())
req(input$isCensoring)
isolate({
if(length(input$stratvar > 1)) {
strata <- paste(input$stratvar, collapse = " + ")
form <- formula(paste0("~", strata))
} else if(length(input$stratvar == 1)) {
strata <- input$stratvar
form <- formula(paste0("~", strata))
} else {
form <- NULL
}
})
if(input$isPlotBlq) {
if(input$typeVPC == "Binning") {
g <- metaExpr({
ggplot(
..(vpc())$pctblq) +
geom_ribbon(aes(x = xbin, ymin= lo, ymax = hi), fill = "red", alpha = .2) +
geom_line(aes(x = xbin, y = y)) +
labs(x= ..(plotAesthetics()$xlabel), y= "% BLQ")
})
}
if(input$typeVPC == "Binless") {
g <- metaExpr({
ggplot(
..(vpc())$pctblq) +
geom_ribbon(aes(x = x, ymin= lo, ymax = hi), fill = "red", alpha = .2) +
geom_line(aes(x = x, y = y), colour = "black") +
geom_line(aes(x = x, y = md), colour = "red") +
labs(x= ..(plotAesthetics()$xlabel), y= "% BLQ")
})
}
}
if(!is.null(form)) {
g <- metaExpr({
..(g) +
facet_wrap(..(form))
})
}
g
})
observeEvent(input$generateCode, {
code <- expandChain(
quote({
library(ggplot2)
library(tidyvpc)
}),
output$vpccode(),
output$plotVPC()
)
displayCodeModal(
code = code,
title = "Code to reproduce VPC"
)
})
output$plotVPC <- metaRender(renderPlot, {
..(vpcPlot())
}, height = function() {
session$clientData$output_plotVPC_width * .6
},
width = function() {
session$clientData$output_plotVPC_width
},
execOnResize = TRUE)
output$plotBlq <- metaRender(renderPlot, {
..(blqPlot())
})
output$vpccode <- metaRender(renderPrint,{
..(vpc())
})
output$tableObs <- renderDataTable({
datatable(vpc()$stats, rownames = FALSE, options = list(
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#0a7bc1', 'color': '#FFFFFF'});",
"}")
)) %>%
formatRound(c("y", "lo", "md", "hi"), digits = 2)
})
# output$optLambda <- renderTable({
# l <- as.data.table(rev(vpc()$llam.qpred))
# setnames(l, "Lambda")
# })
#spanOut <-
# output$optSpan <- renderTable({
# s <- as.data.table(vpc()$span)
# setnames(s, "Span")
# })
#Notifications
observe({
if(!input$isCensoring && input$isPlotBlq) {
showNotification("Please select censoring value before plotting BLQ", type = "error")
}
})
observe({
if(is.null(input$stratvar) && input$isBinStrat) {
showNotification("Please select stratfication variable before binning by strat.", type = "error")
}
})
observe({
if(input$isBinStrat && length(input$stratvar) > 1) {
showNotification("Bin by strata limited to one stratification variable. Use one binning method for multiple stratification variables.", type = "error", duration = 5)
}
})
observe({
if(input$isBinlessStrata && length(input$stratvar) > 1) {
showNotification("Manual smoothing limited to one stratification variable. Use auto-smoothing for multiple stratification variables.", type = "error", duration = 5)
}
})
observe({
isolate({
logicBinless <- isFALSE(input$isBinlessStrata) && isFALSE(input$isAutoOptimize)
updateCheckboxInput(session, "isBinlessStrata", value = logicBinless)
})
})
observe({
isolate({
isStrat2 <- length(input$stratvar) >= 2 && isFALSE(input$isAutoOptimize)
updateSwitchInput(session, "isAutoOptimize", value = isStrat2)
})
})
}
|
/server.R
|
no_license
|
jameswcraig/shiny-vpc
|
R
| false
| false
| 34,931
|
r
|
#server
options(shiny.maxRequestSize=10000*1024^2)
server <- function(input, output, session) {
fileObs <- callModule(file_observed_server, "obs")
fileSim <- callModule(file_simulated_server, "sim")
observeEvent(fileObs(),{
updateSelectizeInput(session, inputId = "yvar", choices = names(fileObs()))
updateSelectizeInput(session, inputId = "xvar", choices = names(fileObs()))
updateSelectizeInput(session, inputId = "stratvar", choices = names(fileObs()))
updateSelectizeInput(session, inputId = "predvar", choices = names(fileObs()))
updateSelectizeInput(session, inputId = "censorvar", choices = names(fileObs()))
})
binlessInputs <- callModule(binless_inputs, "binlessInputs1")
userQuantiles <- callModule(quantiles_server, "qpred1")
confidenceInterval <- callModule(confidence_interval_server, "ci1")
stratlist <- reactive({
input$stratvar
})
stratdata <- reactive({
subset(fileObs(), select = stratlist())
})
output$stratTable <- renderTable({
lapply(stratdata(), unique)
})
stratnamelvl <- reactive({
l <- lapply(stratdata(), unique)
stratlevels <- as.character(unlist(l))
stratname <- names(unlist(l))
stratname <- gsub('[[:digit:]]+', '', stratname)
name <- sort(paste0(stratname, stratlevels))
return(name)
})
userStratNames <- reactive({
dt <- vector("list", length = length(stratnamelvl()))
for (i in seq_along(stratnamelvl())) {
dt[[i]] <- eval(parse(text = paste0("input$", stratnamelvl()[[i]])))
}
dt <- as.data.table(dt)
setnames(dt, stratnamelvl())
})
userStratLvl <- metaReactive({
as.data.table(..(lapply(userStratNames(), getnum)))
})
output$lamTable <- renderTable({
req(userStratNames())
setnames(userStratNames(), stratnamelvl())
})
t <- reactive({
req(input$stratvar)
l <- render_lam_strat(stratlist(), stratdata())
})
tt <- reactive({
req(input$stratvar)
ll <- render_strat_binning(stratlist(), stratdata())
})
output$stratpanels <- renderUI({
tagList(
tags$h5("Select Binning Method by Strata"),
tt())
})
output$stratLambdas <- renderUI({
tagList(
tags$h5("Select Lambda Smoothing by Strata"),
t(),
tags$h6("(Lower, Median, Upper)")
)
})
# userLamStrat <- metaReactive({
# data.table(
# group0 = c(..(input$lambdaStratLo0_1), ..(input$lambdaStratMed0_1), ..(input$lambdaStratHi0_1)),
# group1 = c(..(input$lambdaStratLo1_1), ..(input$lambdaStratMed1_1), ..(input$lambdaStratHi1_1))
# )
# })
stratdata <- reactive({
subset(fileObs(), select = input$stratvar)
})
namesstrat <- reactive({
names(stratdata())
})
stratlvl <- reactive({
l <- lapply(stratdata(), unique)
l <- lapply(l, sort)
})
vpc <- metaReactive2({
req(input$buttonPlot)
isolate({
if(length(input$stratvar > 1)) {
strata <- paste(input$stratvar, collapse = " + ")
form <- formula(paste0("~", strata))
} else if(length(input$stratvar == 1)) {
strata <- input$stratvar
form <- formula(paste0("~", strata))
} else {
form <- NULL
}
vpcUser <- metaExpr({
observed(..(fileObs()), x= !!rlang::sym(..(input$xvar)), y= !!rlang::sym(..(input$yvar))) %>%
simulated(..(fileSim()), y= !!rlang::sym(..(input$yvar)))
})
if(input$isCensoring) {
if (input$censorType == "Value") {
req(input$userLLOQ)
vpcUser <- metaExpr({
..(vpcUser) %>%
censoring(blq = !!rlang::sym(..(input$yvar)) < ..(input$userLLOQ), lloq = ..(input$userLLOQ))
})
} else {
req(input$censorvar)
vpcUser <- metaExpr({
..(vpcUser) %>%
censoring(blq = !!rlang::sym(..(input$yvar)) < !!rlang::sym(..(input$censorvar)), lloq = !!rlang::sym(..(input$censorvar)))
})
}
}
if(!is.null(form)) {
req(input$stratvar)
vpcUser <- metaExpr({
..(vpcUser) %>%
stratify(..(form))
})
}
if (input$typeVPC == "Binning" && !input$isBinStrat) {
if(input$typeBinning == "x-variable")
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(bin =!!rlang::sym(..(input$xvar)))
})
if (input$typeBinning == "centers") {
centers <- as.numeric(unlist(strsplit(input$centers, split = ",")))
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(bin = "centers", centers = ..(centers))
})
}
if(input$typeBinning == "breaks") {
breaks <- as.numeric(unlist(strsplit(input$breaks, split = ",")))
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(bin = "breaks", breaks = ..(breaks))
})
}
if(input$typeBinning == "breaks") {
breaks <- as.numeric(unlist(strsplit(input$breaks, split = ",")))
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(bin = "breaks", breaks = ..(breaks))
})
}
if(is_false(input$typeBinning %in% c("breaks","centers","x-variable"))) {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(bin = ..(input$typeBinning), nbins = ..(input$nbins))
})
}
}
#Different binning by strata
if (input$typeVPC == "Binning" && input$isBinStrat && !is.null(form)) { # && !input$isPred && input$isBinStrat && !is.null(form)) {
#1 Strat variable, 2 level
if (length(stratlvl()[[1]]) == 2) {
b1 <- input$typeBinning1
b2 <- input$typeBinning2
l1 <- list(stratlvl()[[1]][[1]])
l2 <- list(stratlvl()[[1]][[2]])
names(l1) <- namesstrat()
names(l2) <- namesstrat()
if(b1 == "centers") {
centers1 <- as.numeric(unlist(strsplit(input$centers1, split = ",")))
} else {
centers1 <- NULL
}
if(b1 == "breaks") {
breaks1 <- as.numeric(unlist(strsplit(input$breaks1, split = ",")))
} else {
breaks1 <- NULL
}
if(b2 == "centers") {
centers2 <- as.numeric(unlist(strsplit(input$centers2, split = ",")))
} else {
centers2 <- NULL
}
if(b2 == "breaks") {
breaks2 <- as.numeric(unlist(strsplit(input$breaks2, split = ",")))
} else {
breaks2 <- NULL
}
if(b1 == "x-variable" && b2!="x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T) %>%
binning(stratum = ..(l2), bin = ..(b2), xbin = ..(input$midPoint), nbins = ..(input$nbins2), centers = ..(centers2), breaks = ..(breaks2), by.strata = T)
})
} else if(b1 != "x-variable" && b2 =="x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = ..(b1), xbin = ..(input$midPoint), nbins = ..(input$nbins1), centers = ..(centers1), breaks = ..(breaks1), by.strata = T) %>%
binning(stratum = ..(l2), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else if(b1 == "x-variable" && b2 =="x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T) %>%
binning(stratum = ..(l2), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = ..(b1), xbin = ..(input$midPoint), nbins = ..(input$nbins1), centers = ..(centers1), breaks = ..(breaks1), by.strata = T) %>%
binning(stratum = ..(l2), bin = ..(b2), xbin = ..(input$midPoint), nbins = ..(input$nbins2), centers = ..(centers2), breaks = ..(breaks2), by.strata = T)
})
}
}
if (length(stratlvl()[[1]]) == 3) {
b1 <- input$typeBinning1
b2 <- input$typeBinning2
b3 <- input$typeBinning3
l1 <- list(stratlvl()[[1]][[1]])
l2 <- list(stratlvl()[[1]][[2]])
l3 <- list(stratlvl()[[1]][[3]])
names(l1) <- namesstrat()
names(l2) <- namesstrat()
names(l3) <- namesstrat()
if(b1 == "centers") {
centers1 <- as.numeric(unlist(strsplit(input$centers1, split = ",")))
} else {
centers1 <- NULL
}
if(b1 == "breaks") {
breaks1 <- as.numeric(unlist(strsplit(input$breaks1, split = ",")))
} else {
breaks1 <- NULL
}
if(b2 == "centers") {
centers2 <- as.numeric(unlist(strsplit(input$centers2, split = ",")))
} else {
centers2 <- NULL
}
if(b2 == "breaks") {
breaks2 <- as.numeric(unlist(strsplit(input$breaks2, split = ",")))
} else {
breaks2 <- NULL
}
if(b3 == "centers") {
centers3 <- as.numeric(unlist(strsplit(input$centers3, split = ",")))
} else {
centers3 <- NULL
}
if(b3 == "breaks") {
breaks3 <- as.numeric(unlist(strsplit(input$breaks3, split = ",")))
} else {
breaks3 <- NULL
}
if(b1 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = ..(b1), xbin = ..(input$midPoint), nbins = ..(input$nbins1), centers = ..(centers1), breaks = ..(breaks1), by.strata = T)
})
}
if(b2 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l2), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l2), bin = ..(b2), xbin = ..(input$midPoint), nbins = ..(input$nbins2), centers = ..(centers2), breaks = ..(breaks2), by.strata = T)
})
}
if(b3 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l3), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l3), bin = ..(b3), xbin = ..(input$midPoint), nbins = ..(input$nbins3), centers = ..(centers3), breaks = ..(breaks3), by.strata = T)
})
}
}
if (length(stratlvl()[[1]]) == 4) {
b1 <- input$typeBinning1
b2 <- input$typeBinning2
b3 <- input$typeBinning3
b4 <- input$typeBinning4
l1 <- list(stratlvl()[[1]][[1]])
l2 <- list(stratlvl()[[1]][[2]])
l3 <- list(stratlvl()[[1]][[3]])
l4 <- list(stratlvl()[[1]][[4]])
names(l1) <- namesstrat()
names(l2) <- namesstrat()
names(l3) <- namesstrat()
names(l4) <- namesstrat()
if(b1 == "centers") {
centers1 <- as.numeric(unlist(strsplit(input$centers1, split = ",")))
} else {
centers1 <- NULL
}
if(b1 == "breaks") {
breaks1 <- as.numeric(unlist(strsplit(input$breaks1, split = ",")))
} else {
breaks1 <- NULL
}
if(b2 == "centers") {
centers2 <- as.numeric(unlist(strsplit(input$centers2, split = ",")))
} else {
centers2 <- NULL
}
if(b2 == "breaks") {
breaks2 <- as.numeric(unlist(strsplit(input$breaks2, split = ",")))
} else {
breaks2 <- NULL
}
if(b3 == "centers") {
centers3 <- as.numeric(unlist(strsplit(input$centers3, split = ",")))
} else {
centers3 <- NULL
}
if(b3 == "breaks") {
breaks3 <- as.numeric(unlist(strsplit(input$breaks3, split = ",")))
} else {
breaks3 <- NULL
}
if(b4 == "centers") {
centers4 <- as.numeric(unlist(strsplit(input$centers4, split = ",")))
} else {
centers4 <- NULL
}
if(b4 == "breaks") {
breaks4 <- as.numeric(unlist(strsplit(input$breaks4, split = ",")))
} else {
breaks4 <- NULL
}
if(b1 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = ..(b1), xbin = ..(input$midPoint), nbins = ..(input$nbins1), centers = ..(centers1), breaks = ..(breaks1), by.strata = T)
})
}
if(b2 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l2), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l2), bin = ..(b2), xbin = ..(input$midPoint), nbins = ..(input$nbins2), centers = ..(centers2), breaks = ..(breaks2), by.strata = T)
})
}
if(b3 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l3), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l3), bin = ..(b3), xbin = ..(input$midPoint), nbins = ..(input$nbins3), centers = ..(centers3), breaks = ..(breaks3), by.strata = T)
})
}
if(b4 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l4), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l4), bin = ..(b4), xbin = ..(input$midPoint), nbins = ..(input$nbins4), centers = ..(centers4), breaks = ..(breaks4), by.strata = T)
})
}
}
if (length(stratlvl()[[1]]) == 5) {
b1 <- input$typeBinning1
b2 <- input$typeBinning2
b3 <- input$typeBinning3
b4 <- input$typeBinning4
b5 <- input$typeBinning5
l1 <- list(stratlvl()[[1]][[1]])
l2 <- list(stratlvl()[[1]][[2]])
l3 <- list(stratlvl()[[1]][[3]])
l4 <- list(stratlvl()[[1]][[4]])
l5 <- list(stratlvl()[[1]][[5]])
names(l1) <- namesstrat()
names(l2) <- namesstrat()
names(l3) <- namesstrat()
names(l4) <- namesstrat()
names(l5) <- namesstrat()
if(b1 == "centers") {
centers1 <- as.numeric(unlist(strsplit(input$centers1, split = ",")))
} else {
centers1 <- NULL
}
if(b1 == "breaks") {
breaks1 <- as.numeric(unlist(strsplit(input$breaks1, split = ",")))
} else {
breaks1 <- NULL
}
if(b2 == "centers") {
centers2 <- as.numeric(unlist(strsplit(input$centers2, split = ",")))
} else {
centers2 <- NULL
}
if(b2 == "breaks") {
breaks2 <- as.numeric(unlist(strsplit(input$breaks2, split = ",")))
} else {
breaks2 <- NULL
}
if(b3 == "centers") {
centers3 <- as.numeric(unlist(strsplit(input$centers3, split = ",")))
} else {
centers3 <- NULL
}
if(b3 == "breaks") {
breaks3 <- as.numeric(unlist(strsplit(input$breaks3, split = ",")))
} else {
breaks3 <- NULL
}
if(b4 == "centers") {
centers4 <- as.numeric(unlist(strsplit(input$centers4, split = ",")))
} else {
centers4 <- NULL
}
if(b4 == "breaks") {
breaks4 <- as.numeric(unlist(strsplit(input$breaks4, split = ",")))
} else {
breaks4 <- NULL
}
if(b5 == "centers") {
centers5 <- as.numeric(unlist(strsplit(input$centers5, split = ",")))
} else {
centers5 <- NULL
}
if(b5 == "breaks") {
breaks5 <- as.numeric(unlist(strsplit(input$breaks5, split = ",")))
} else {
breaks5 <- NULL
}
if(b1 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l1), bin = ..(b1), xbin = ..(input$midPoint), nbins = ..(input$nbins1), centers = ..(centers1), breaks = ..(breaks1), by.strata = T)
})
}
if(b2 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l2), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l2), bin = ..(b2), xbin = ..(input$midPoint), nbins = ..(input$nbins2), centers = ..(centers2), breaks = ..(breaks2), by.strata = T)
})
}
if(b3 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l3), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l3), bin = ..(b3), xbin = ..(input$midPoint), nbins = ..(input$nbins3), centers = ..(centers3), breaks = ..(breaks3), by.strata = T)
})
}
if(b4 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l4), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l4), bin = ..(b4), xbin = ..(input$midPoint), nbins = ..(input$nbins4), centers = ..(centers4), breaks = ..(breaks4), by.strata = T)
})
}
if(b5 == "x-variable") {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l5), bin = !!rlang::sym(..(input$xvar)), xbin = ..(input$midPoint), by.strata = T)
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binning(stratum = ..(l5), bin = ..(b5), xbin = ..(input$midPoint), nbins = ..(input$nbins5), centers = ..(centers5), breaks = ..(breaks5), by.strata = T)
})
}
}
}
if (input$typeVPC == "Binning" && input$isPred && !input$log_dv) {
vpcUser <- metaExpr({
..(vpcUser) %>%
predcorrect(pred = !!rlang::sym(..(input$predvar)))
})
}
if (input$typeVPC == "Binning" && input$isPred && input$log_dv) {
vpcUser <- metaExpr({
..(vpcUser) %>%
predcorrect(pred = !!rlang::sym(..(input$predvar)), log = TRUE)
})
}
if (input$typeVPC == "Binning") {
vpcUser <- metaExpr({
..(vpcUser) %>%
vpcstats(qpred = ..(userQuantiles()), conf.level = ..(confidenceInterval()))
})
}
if(input$typeVPC == "Binless" && input$isPred) {
if(input$isAutoOptimize) {
# vpcUser <- metaExpr({
# ..(vpcUser) %>%
# predcorrect(pred = !!rlang::sym(..(input$predvar))) %>%
# binlessaugment(qpred = ..(userQuantiles()), interval = ..(binlessInputs()$intervalUser), loess.ypc = TRUE) %>%
# binlessfit(conf.level = ..(confidenceInterval())) %>%
# vpcstats()
vpcUser <- metaExpr({
..(vpcUser) %>%
predcorrect(pred = !!rlang::sym(..(input$predvar))) %>%
binless(qpred = ..(userQuantiles()), optimize = TRUE, optimization.interval = ..(binlessInputs()$intervalUser), conf.level = ..(confidenceInterval()), loess.ypc = TRUE) %>%
vpcstats()
})
} else {
if(!is.null(form)) {
vpcUser <- metaExpr({
..(vpcUser) %>%
predcorrect(pred = !!rlang::sym(..(input$predvar))) %>%
binless(qpred = ..(userQuantiles()), optimize = FALSE, conf.level = ..(confidenceInterval()), lambda = ..(userStratLvl()), span = NULL, loess.ypc = TRUE) %>%
vpcstats()
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
predcorrect(pred = !!rlang::sym(..(input$predvar))) %>%
binless(qpred = ..(userQuantiles()), optimize = FALSE, conf.level = ..(confidenceInterval()), lambda = ..(binlessInputs()$lamUser), span = ..(binlessInputs()$spanUser), loess.ypc = TRUE) %>%
vpcstats()
})
}
}
}
if(input$typeVPC == "Binless" && !input$isPred) {
if(input$isAutoOptimize) {
vpcUser <- metaExpr({
..(vpcUser) %>%
binless(qpred = ..(userQuantiles()), optimize = TRUE, optimization.interval = ..(binlessInputs()$intervalUser), conf.level = ..(confidenceInterval())) %>%
vpcstats()
})
} else {
if(!is.null(form)) {
vpcUser <- metaExpr({
..(vpcUser) %>%
binless(qpred = ..(userQuantiles()), optimize = FALSE, conf.level = ..(confidenceInterval()), lambda = ..(userStratLvl())) %>%
vpcstats()
})
} else {
vpcUser <- metaExpr({
..(vpcUser) %>%
binless(qpred = ..(userQuantiles()), optimize = FALSE, conf.level = ..(confidenceInterval()), lambda = ..(binlessInputs()$lamUser)) %>%
vpcstats()
})
}
}
}
})
vpcUser
})
plotAesthetics <- reactive({
list(
linetype = c(input$lineTypeLo, input$lineTypeMed, input$lineTypeHi),
color = c(input$colorTypeHi, input$colorTypeMed, input$colorTypeLo),
color.fill = input$colorFill,
#custom.theme = input$themeType,
show.points = input$showPoints,
#show.boundaries = input$showBoundaries,
#show.stats = input$showStats,
legend.position = input$legendPosition,
facet.scales = input$facetScales,
xlabel = input$xlabel,
ylabel = input$ylabel,
#qlabel = userQuantiles(),
facet.scales.type = input$facetScales,
conf.level = confidenceInterval()
)
})
vpcPlot <- metaReactive2({
req(vpc())
isolate({
if(length(input$stratvar > 1)) {
facet_formula <- paste0("~", paste(input$stratvar, collapse = " + "))
} else if (length(input$stratvar == 1)) {
facet_formula <- paste0("~", input$stratvar)
} else {
facet_formula <- ""
}
if(input$typeVPC == "Binless") {
g <- metaExpr({
ggplot(
..(vpc())$stats, aes(x=x))
})
} else {
g <- metaExpr({
ggplot(
..(vpc())$stats, aes(x=xbin))
})
}
})
if(facet_formula != "") {
if(input$facetQuantile) {
facet_formula <- as.formula(paste0("qname", facet_formula))
} else {
facet_formula <- as.formula(facet_formula)
}
if(input$facetScales == "free") {
g <- metaExpr({
..(g) +
facet_grid(..(facet_formula), scales = "free", as.table = FALSE)
})
} else {
g <- metaExpr({
..(g) +
facet_grid(..(facet_formula), scales = "fixed", as.table = FALSE)
})
}
}
if(facet_formula == "" && input$facetQuantile) {
facet_formula <- as.formula(paste0("~", "qname"))
if(input$facetScales == "free") {
g <- metaExpr({
..(g) +
facet_wrap(..(facet_formula), scales = "free", as.table = TRUE)
})
} else {
g <- metaExpr({
..(g) +
facet_wrap(..(facet_formula), scales = "fixed", as.table = TRUE)
})
}
}
g <- metaExpr({
..(g) +
geom_ribbon(aes(ymin=lo, ymax=hi, fill=qname, col=qname, group=qname), alpha=..(plotAesthetics()$color.fill), col=NA) +
geom_line(aes(y=md, col=qname, group=qname)) +
geom_line(aes(y=y, linetype=qname), size=1)
})
isolate({
if(input$isCensoring) {
if(input$censorType == "Variable")
if(!is.null(input$stratvar)) {
g <- metaExpr({
..(g) +
geom_hline(data=unique(..(vpc())$data[, .(LLOQ), by = eval(..(input$stratvar))]),
aes(yintercept = !!as.symbol(..(input$censorvar))), linetype="dotted", size=1) +
geom_text(data=unique(..(vpc())$data[, .(LLOQ), by = eval(..(input$stratvar))]),
aes(x=10, y=LLOQ, label=paste("LLOQ", LLOQ, sep="="),), vjust=-1)
})
} else {
g <- metaExpr({
..(g) +
geom_hline(data=unique(..(vpc())$data[, .(LLOQ)]),
aes(yintercept = !!as.symbol(..(input$censorvar))), linetype="dotted", size=1) +
geom_text(data=unique(..(vpc())$data[, .(LLOQ)]),
aes(x=10, y=LLOQ, label=paste("LLOQ", LLOQ, sep="="),), vjust=-1)
})
} else {
g <- metaExpr({
..(g) +
geom_hline(aes(yintercept = ..(input$userLLOQ)), linetype="dotted", size=1) +
geom_text(aes(x=10, y=..(input$userLLOQ), label=paste("LLOQ", ..(input$userLLOQ), sep="="),), vjust=-1)
})
}
}
})
if(input$isLogDV) {
if(min(vpc()$stats$lo) < 0) {
g <- metaExpr({
..(g) + scale_y_continuous(trans="log10", limits=c(0.1,max(..(vpc())$stats$hi)))
})
} else {
g <- metaExpr({
..(g) + scale_y_log10()
})
}
}
if(input$isLogX) {
g <- metaExpr({
..(g) + scale_x_log10()
})
}
g <- isolate({
metaExpr({
..(g) +
scale_colour_manual(
name=..(paste0("Simulated Percentiles\nMedian (lines) ", plotAesthetics()$conf.level * 100, "% CI (areas)")),
breaks=..(paste0("q", userQuantiles())) ,
values= ..(plotAesthetics()$color),
labels=..(paste0(userQuantiles() * 100, "%"))) +
scale_fill_manual(
name=..(paste0("Simulated Percentiles\nMedian (lines) ", plotAesthetics()$conf.level * 100, "% CI (areas)")),
breaks=..(paste0("q", userQuantiles())),
values=..(plotAesthetics()$color),
labels=..(paste0(userQuantiles() * 100, "%"))) +
scale_linetype_manual(
name=..(paste0("Observed Percentiles\nMedian (lines) ", plotAesthetics()$conf.level * 100, "% CI (areas)")),
breaks=..(paste0("q", userQuantiles())),
values= ..(plotAesthetics()$linetype),
labels=..(paste0(userQuantiles() * 100, "%"))) +
guides(
fill=guide_legend(order=2),
colour=guide_legend(order=2),
linetype=guide_legend(order=1)) +
theme(
legend.position=..(plotAesthetics()$legend.position),
legend.key.width=grid::unit(2, "cm")) +
labs(x= ..(plotAesthetics()$xlabel), y= ..(plotAesthetics()$ylabel))
})
})
if(!input$showStats && input$typeVPC == "Binning") {
if(facet_formula != "") {
if(input$facetScales == "free") {
g <- isolate(metaExpr({
ggplot2::ggplot(vpc()$strat) +
facet_wrap(..(facet_formula), scales = "free") +
theme(
legend.position=..(plotAesthetics()$legend.position),
legend.key.width=grid::unit(2, "cm")) +
labs(x= ..(plotAesthetics()$xlabel), y= ..(plotAesthetics()$ylabel))
}))
} else {
g <- isolate(metaExpr({
ggplot2::ggplot(vpc()$strat) +
facet_grid(..(facet_formula)) +
theme(
legend.position=..(plotAesthetics()$legend.position),
legend.key.width=grid::unit(2, "cm")) +
labs(x= ..(plotAesthetics()$xlabel), y= ..(plotAesthetics()$ylabel))
}))
}
} else {
g <- isolate(metaExpr({
ggplot2::ggplot(vpc()$strat) +
theme(
legend.position=..(plotAesthetics()$legend.position),
legend.key.width=grid::unit(2, "cm")) +
labs(x= ..(plotAesthetics()$xlabel), y= ..(plotAesthetics()$ylabel))
}))
}
}
if(input$showPoints) {
g <- isolate(metaExpr({
..(g) + ggplot2::geom_point(data=..(vpc())$obs, ggplot2::aes(x=x, y=y), size=1, alpha=0.4, show.legend=F)
}))
} else {
g
}
if (input$showBoundaries) {
if(is.null(vpc()$rqss.obs.fits)) {
if (!is.null(vpc()$strat)) {
boundaries <- isolate(metaExpr({
bininfo(..(vpc()))[, .(x=sort(unique(c(xleft, xright)))), by=names(..(vpc())$strat)]
}))
} else {
boundaries <- isolate(metaExpr({
bininfo(..(vpc()))[, .(x=sort(unique(c(xleft, xright))))]
}))
}
if (input$showBinning) {
g <- isolate(metaExpr({
..(g) + ggplot2::geom_vline(data=..(boundaries), ggplot2::aes(xintercept=x), size=rel(0.5), col="gray80") +
ggplot2::theme(panel.grid=ggplot2::element_blank())
}))
}
g <- isolate(metaExpr({
..(g) + ggplot2::geom_rug(data=..(boundaries), ggplot2::aes(x=x), sides="t", size=1)
}))
}
} else {
g
}
g
})
blqPlot <- metaReactive2({
req(vpc())
req(input$isCensoring)
isolate({
if(length(input$stratvar > 1)) {
strata <- paste(input$stratvar, collapse = " + ")
form <- formula(paste0("~", strata))
} else if(length(input$stratvar == 1)) {
strata <- input$stratvar
form <- formula(paste0("~", strata))
} else {
form <- NULL
}
})
if(input$isPlotBlq) {
if(input$typeVPC == "Binning") {
g <- metaExpr({
ggplot(
..(vpc())$pctblq) +
geom_ribbon(aes(x = xbin, ymin= lo, ymax = hi), fill = "red", alpha = .2) +
geom_line(aes(x = xbin, y = y)) +
labs(x= ..(plotAesthetics()$xlabel), y= "% BLQ")
})
}
if(input$typeVPC == "Binless") {
g <- metaExpr({
ggplot(
..(vpc())$pctblq) +
geom_ribbon(aes(x = x, ymin= lo, ymax = hi), fill = "red", alpha = .2) +
geom_line(aes(x = x, y = y), colour = "black") +
geom_line(aes(x = x, y = md), colour = "red") +
labs(x= ..(plotAesthetics()$xlabel), y= "% BLQ")
})
}
}
if(!is.null(form)) {
g <- metaExpr({
..(g) +
facet_wrap(..(form))
})
}
g
})
observeEvent(input$generateCode, {
code <- expandChain(
quote({
library(ggplot2)
library(tidyvpc)
}),
output$vpccode(),
output$plotVPC()
)
displayCodeModal(
code = code,
title = "Code to reproduce VPC"
)
})
output$plotVPC <- metaRender(renderPlot, {
..(vpcPlot())
}, height = function() {
session$clientData$output_plotVPC_width * .6
},
width = function() {
session$clientData$output_plotVPC_width
},
execOnResize = TRUE)
output$plotBlq <- metaRender(renderPlot, {
..(blqPlot())
})
output$vpccode <- metaRender(renderPrint,{
..(vpc())
})
output$tableObs <- renderDataTable({
datatable(vpc()$stats, rownames = FALSE, options = list(
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#0a7bc1', 'color': '#FFFFFF'});",
"}")
)) %>%
formatRound(c("y", "lo", "md", "hi"), digits = 2)
})
# output$optLambda <- renderTable({
# l <- as.data.table(rev(vpc()$llam.qpred))
# setnames(l, "Lambda")
# })
#spanOut <-
# output$optSpan <- renderTable({
# s <- as.data.table(vpc()$span)
# setnames(s, "Span")
# })
#Notifications
observe({
if(!input$isCensoring && input$isPlotBlq) {
showNotification("Please select censoring value before plotting BLQ", type = "error")
}
})
observe({
if(is.null(input$stratvar) && input$isBinStrat) {
showNotification("Please select stratfication variable before binning by strat.", type = "error")
}
})
observe({
if(input$isBinStrat && length(input$stratvar) > 1) {
showNotification("Bin by strata limited to one stratification variable. Use one binning method for multiple stratification variables.", type = "error", duration = 5)
}
})
observe({
if(input$isBinlessStrata && length(input$stratvar) > 1) {
showNotification("Manual smoothing limited to one stratification variable. Use auto-smoothing for multiple stratification variables.", type = "error", duration = 5)
}
})
observe({
isolate({
logicBinless <- isFALSE(input$isBinlessStrata) && isFALSE(input$isAutoOptimize)
updateCheckboxInput(session, "isBinlessStrata", value = logicBinless)
})
})
observe({
isolate({
isStrat2 <- length(input$stratvar) >= 2 && isFALSE(input$isAutoOptimize)
updateSwitchInput(session, "isAutoOptimize", value = isStrat2)
})
})
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/storage_functions.R
\name{objects.get}
\alias{objects.get}
\title{Retrieves objects or their associated metadata.}
\usage{
objects.get(bucket, object, projection = NULL)
}
\arguments{
\item{bucket}{Name of the bucket in which the object resides}
\item{object}{Name of the object}
\item{projection}{Set of properties to return}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/devstorage.full_control
\item https://www.googleapis.com/auth/devstorage.read_only
\item https://www.googleapis.com/auth/devstorage.read_write
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/devstorage.full_control, https://www.googleapis.com/auth/devstorage.read_only, https://www.googleapis.com/auth/devstorage.read_write)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/storage/docs/json_api/}{Google Documentation}
}
|
/googlestoragev1beta1.auto/man/objects.get.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false
| true
| 1,173
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/storage_functions.R
\name{objects.get}
\alias{objects.get}
\title{Retrieves objects or their associated metadata.}
\usage{
objects.get(bucket, object, projection = NULL)
}
\arguments{
\item{bucket}{Name of the bucket in which the object resides}
\item{object}{Name of the object}
\item{projection}{Set of properties to return}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/devstorage.full_control
\item https://www.googleapis.com/auth/devstorage.read_only
\item https://www.googleapis.com/auth/devstorage.read_write
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/devstorage.full_control, https://www.googleapis.com/auth/devstorage.read_only, https://www.googleapis.com/auth/devstorage.read_write)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/storage/docs/json_api/}{Google Documentation}
}
|
library(MLmetrics)
library(StatMeasures)
library(dplyr)
library(tidyr)
library(corrgram)
library(MASS)
library(Hmisc)
library(ggplot2)
library(readxl)
library(glmnet)
trainRaw <- read.csv(file.choose(), header=TRUE, sep=",") #load the data into the HousingPrices dataframe
HousingPricesPrediction <- read.csv(file.choose(), header=TRUE, sep=",") #load the data into the HousingPrices dataframe
HousingPricesPrediction <- mutate(HousingPricesPrediction, SalePrice = 0)
trainRaw <- rbind(trainRaw,HousingPricesPrediction)
HousingPricesTesting<-subset(trainRaw, (Id>1100 & Id<=1460))
HousingPricesTraining<-subset(trainRaw, Id<=1100)
# regression with all variables didnt work so we do the following to check if ....
# ....there is any factors that we have to drop due to singular results
c <- sapply(HousingPricesTraining, function(x) is.numeric(x))
numCols <- HousingPricesTraining[, c]
l <- sapply(HousingPricesTraining, function(x) is.factor(x))
m <- HousingPricesTraining[, l]
ifelse(n <- sapply(m, function(x) length(levels(x))) == 1, "DROP", "NODROP")
# I created transforms for the Age and Remod variables to see if a relationship there exists
HouseAge <- 2020 - HousingPricesTesting$YearBuilt
HouseRemod <- 2020 - HousingPricesTesting$YearRemodAdd
HousingPricesTesting <- cbind(HousingPricesTesting,HouseRemod,HouseAge)
HouseAge <- 2020-HousingPricesTraining$YearBuilt
HouseRemod <- 2020-HousingPricesTraining$YearRemodAdd
HousingPricesTraining <- cbind(HousingPricesTraining,HouseRemod,HouseAge)
HouseAge <- 2020 - HousingPricesPrediction$YearBuilt
HouseRemod <- 2020 - HousingPricesPrediction$YearRemodAdd
HousingPricesPrediction <- cbind(HousingPricesPrediction,HouseRemod,HouseAge)
HouseAge <- 2020 - trainRaw$YearBuilt
HouseRemod <- 2020 - trainRaw$YearRemodAdd
trainRaw <- cbind(trainRaw,HouseRemod,HouseAge)
# check correlation
correlationMatrix <- cor(numCols,use="complete.obs")
corrgram(correlationMatrix, order = TRUE, panel=, lower.panel=, upper.panel=, text.panel=, diag.panel=)
SalePriceCor <- correlationMatrix[,"SalePrice"]
sort(SalePriceCor, decreasing = TRUE)
#check plots
pairs(numCols[c(38,2:6)])
pairs(HousingPricesTraining[c(81,2:13)])
pairs(cbind(m[c(31:43)],HousingPricesTraining[c(81)]))
# Data Cleanup
# Clean up of LotArea
Q <- quantile(HousingPricesTraining$LotArea, probs=c(0,0.99), na.rm = FALSE)
HousingPricesTraining <- mutate(HousingPricesTraining, LotArea = ifelse(as.numeric(HousingPricesTraining$LotArea) <= (Q[1]),NA,
ifelse(as.numeric(HousingPricesTraining$LotArea) >= (Q[2]),NA,HousingPricesTraining$LotArea)))
# Clean up of MasVnrArea
Q <- quantile(HousingPricesTraining$MasVnrArea, probs=c(0,0.99), na.rm = TRUE)
HousingPricesTraining <- mutate(HousingPricesTraining, MasVnrArea = ifelse(as.numeric(HousingPricesTraining$MasVnrArea) <= (Q[1]),NA,
ifelse(as.numeric(HousingPricesTraining$MasVnrArea) >= (Q[2]),NA,HousingPricesTraining$MasVnrArea)))
# Clean up of TotalBsmtSF
Q <- quantile(HousingPricesTraining$TotalBsmtSF, probs=c(0,0.99), na.rm = TRUE)
HousingPricesTraining <- mutate(HousingPricesTraining, TotalBsmtSF = ifelse(as.numeric(HousingPricesTraining$TotalBsmtSF) <= (Q[1]),NA,
ifelse(as.numeric(HousingPricesTraining$TotalBsmtSF) >= (Q[2]),NA,HousingPricesTraining$TotalBsmtSF)))
# Clean up of X1stFlrSF
Q <- quantile(HousingPricesTraining$X1stFlrSF, probs=c(0,0.99), na.rm = TRUE)
HousingPricesTraining <- mutate(HousingPricesTraining, X1stFlrSF = ifelse(as.numeric(HousingPricesTraining$X1stFlrSF) <= (Q[1]),NA,
ifelse(as.numeric(HousingPricesTraining$X1stFlrSF) >= (Q[2]),NA,HousingPricesTraining$X1stFlrSF)))
# Clean up of GrLivArea
Q <- quantile(HousingPricesTraining$GrLivArea, probs=c(0,0.99), na.rm = TRUE)
HousingPricesTraining <- mutate(HousingPricesTraining, GrLivArea = ifelse(as.numeric(HousingPricesTraining$GrLivArea) <= (Q[1]),NA,
ifelse(as.numeric(HousingPricesTraining$GrLivArea) >= (Q[2]),NA,HousingPricesTraining$GrLivArea)))
# Clean up of HouseAge
Q <- quantile(HousingPricesTraining$HouseAge, probs=c(0,0.99), na.rm = TRUE)
HousingPricesTraining <- mutate(HousingPricesTraining, HouseAge = ifelse(as.numeric(HousingPricesTraining$HouseAge) <= (Q[1]),NA,
ifelse(as.numeric(HousingPricesTraining$HouseAge) >= (Q[2]),NA,HousingPricesTraining$HouseAge)))
# HousingPricesTraining <- drop_na(HousingPricesTraining,SalePrice)
plot(SalePrice ~ TotRmsAbvGrd, data=HousingPricesTraining)
plot(log(SalePrice) ~ log(HouseAge), data=HousingPricesTraining)
plot(log(SalePrice) ~ log(HouseRemod), data=HousingPricesTraining)
# model 1 - HouseAge Transform - Use Log Log
fit <- lm(log(SalePrice) ~ log(HouseAge), data = HousingPricesTraining)
summary(fit)
plot(log(SalePrice) ~ OverallQual, data=HousingPricesTraining)
plot(log(SalePrice) ~ LotArea, data=HousingPricesTraining)
plot(log(SalePrice) ~ LotArea, xlim=c(0,20000),ylim=c(10,14), data=HousingPricesTraining)
plot(log(SalePrice) ~ (MasVnrArea), data=HousingPricesTraining)
plot(log(SalePrice) ~ log(MasVnrArea), data=HousingPricesTraining)
plot(log(SalePrice) ~ (TotalBsmtSF), data=HousingPricesTraining)
plot(log(SalePrice) ~ (GrLivArea), data=HousingPricesTraining)
plot(log(SalePrice) ~ (TotRmsAbvGrd), data=HousingPricesTraining)
plot(log(SalePrice) ~ FullBath, data=HousingPricesTraining)
# MAIN MODEL
fit <- stepAIC(lm(log(SalePrice) ~ log(HouseAge) + TotRmsAbvGrd + log(HouseRemod) + OverallQual + GrLivArea
+ GarageCars*GarageArea + TotalBsmtSF + log(X1stFlrSF) + LotArea + FullBath*BsmtFullBath
+ FullBath + BsmtFullBath + TotRmsAbvGrd*BedroomAbvGr
+ TotalBsmtSF * log(X1stFlrSF), data = HousingPricesTraining),direction="both")
summary(fit)
fit2 <- stepAIC(lm(log(SalePrice) ~ log(HouseAge) + TotRmsAbvGrd + log(HouseRemod) + OverallQual + GrLivArea
+ GarageCars*GarageArea + TotalBsmtSF + log(X1stFlrSF) + LotArea + FullBath*BsmtFullBath
+ MSSubClass + MSZoning + Neighborhood
+ FullBath + BsmtFullBath + TotRmsAbvGrd * BedroomAbvGr + TotalBsmtSF * log(X1stFlrSF)
+ BldgType+ OverallCond + Fireplaces+ RoofStyle + KitchenQual+GarageCars+GarageArea, data = HousingPricesTraining),direction="both")
summary(fit)
predicted.prices <- exp(predict(fit, HousingPricesTesting))
predicted.prices2 <- exp(predict(fit, HousingPricesPrediction))
predicted.prices3 <- exp(predict(fit2, HousingPricesPrediction))
# RealPrices <- cbind(RealPrices,predicted.prices2)
# RealPrices <- na.omit(RealPrices)
percent.errors.log.i <- abs((HousingPricesTesting$SalePrice - predicted.prices)/predicted.prices)*100
mean(percent.errors.log.i)
hist(predicted.prices2)
mean(na.omit(predicted.prices2))
#write to excel
write.csv(predicted.prices3,"C:\\Users\\daniy\\Desktop\\predict_LASTModel.csv")
################### LASSO MODEL #######################
Y<-log(HousingPricesTraining$SalePrice)
X<-model.matrix(Id ~ log(HouseAge) + TotRmsAbvGrd + log(HouseRemod) + OverallQual + GrLivArea
+ GarageCars*GarageArea + TotalBsmtSF + log(X1stFlrSF) + LotArea + FullBath*BsmtFullBath
+ MSSubClass + MSZoning + Neighborhood
+ FullBath + BsmtFullBath + TotRmsAbvGrd * BedroomAbvGr + TotalBsmtSF * log(X1stFlrSF)
+ BldgType+ OverallCond + Fireplaces+ RoofStyle + KitchenQual+GarageCars+GarageArea
, trainRaw)[,-1]
# X<-model.matrix(Id ~ log(HouseAge) + TotRmsAbvGrd + log(HouseRemod) + OverallQual + GrLivArea
# + GarageCars*GarageArea + TotalBsmtSF + log(X1stFlrSF) + LotArea + FullBath*BsmtFullBath
# + FullBath + BsmtFullBath + TotRmsAbvGrd * BedroomAbvGr
# + TotalBsmtSF * log(X1stFlrSF), trainRaw)[,-1]
X<-cbind(trainRaw$Id,X)
X.training<-subset(X,X[,1]<=1100)
X.testing<-subset(X, (X[,1]>1100 & X[,1]<=1460))
X.prediction<-subset(X,X[,1]>1460)
#LASSO (alpha=1)
#selecting the best penalty lambda
lasso.fit<-glmnet(x = X.training, y = Y, alpha = 1)
plot(lasso.fit, xvar = "lambda")
#create cross-validation data
crossval <- cv.glmnet(x = X.training, y = Y, alpha = 1)
plot(crossval)
#determine optimal penalty parameter, lambda
penalty.lasso <- crossval$lambda.min
#see where it was on the graph
log(penalty.lasso)
# lets zoom-in
plot(crossval,xlim=c(),ylim=c())
#estimate the model with the optimal penalty
lasso.opt.fit <- glmnet(x = X.training, y = Y, alpha = 1, lambda = penalty.lasso)
#resultant model coefficients
coef(lasso.opt.fit)
# predicting the performance on the testing set
lasso.testing_final <- exp(predict(lasso.opt.fit, s = penalty.lasso, newx = X.testing))
#calculate and display MAPE
mean(abs(lasso.testing_final- HousingPricesTesting$SalePrice)/HousingPricesTesting$SalePrice*100)
#predict
predicted.prices.log.i.lasso <- exp(predict(lasso.opt.fit, s = penalty.lasso, newx =X.prediction))
#write to excel
write.csv(predicted.prices.log.i.lasso,"C:\\Users\\daniy\\Desktop\\predict_Final2.csv")
##################################
# keep variable names handy to use
# SalePrice,
# X1stFlrSF,
# GrLivArea,
# TotRmsAbvGrd,
# LotArea,
# MSSubClass,
# MSZoning,
# Alley,
# Neighborhood,
# BldgType,
# OverallQual,
# OverallCond,
# HouseAge,
# HouseRemod,
# RoofStyle
# RoofMatl,
# BsmtCond,
# BsmtFinType1,
# TotalBsmtSF,
# HeatingQC,
# CentralAir,
# FullBath
# GarageFinish,
# BedroomAbvGr,
# KitchenQual,
# GarageCars
|
/HousingPrices_MMA867.R
|
no_license
|
daniyalfarooqi/DF-MMA867-Repo
|
R
| false
| false
| 10,007
|
r
|
library(MLmetrics)
library(StatMeasures)
library(dplyr)
library(tidyr)
library(corrgram)
library(MASS)
library(Hmisc)
library(ggplot2)
library(readxl)
library(glmnet)
trainRaw <- read.csv(file.choose(), header=TRUE, sep=",") #load the data into the HousingPrices dataframe
HousingPricesPrediction <- read.csv(file.choose(), header=TRUE, sep=",") #load the data into the HousingPrices dataframe
HousingPricesPrediction <- mutate(HousingPricesPrediction, SalePrice = 0)
trainRaw <- rbind(trainRaw,HousingPricesPrediction)
HousingPricesTesting<-subset(trainRaw, (Id>1100 & Id<=1460))
HousingPricesTraining<-subset(trainRaw, Id<=1100)
# regression with all variables didnt work so we do the following to check if ....
# ....there is any factors that we have to drop due to singular results
c <- sapply(HousingPricesTraining, function(x) is.numeric(x))
numCols <- HousingPricesTraining[, c]
l <- sapply(HousingPricesTraining, function(x) is.factor(x))
m <- HousingPricesTraining[, l]
ifelse(n <- sapply(m, function(x) length(levels(x))) == 1, "DROP", "NODROP")
# I created transforms for the Age and Remod variables to see if a relationship there exists
HouseAge <- 2020 - HousingPricesTesting$YearBuilt
HouseRemod <- 2020 - HousingPricesTesting$YearRemodAdd
HousingPricesTesting <- cbind(HousingPricesTesting,HouseRemod,HouseAge)
HouseAge <- 2020-HousingPricesTraining$YearBuilt
HouseRemod <- 2020-HousingPricesTraining$YearRemodAdd
HousingPricesTraining <- cbind(HousingPricesTraining,HouseRemod,HouseAge)
HouseAge <- 2020 - HousingPricesPrediction$YearBuilt
HouseRemod <- 2020 - HousingPricesPrediction$YearRemodAdd
HousingPricesPrediction <- cbind(HousingPricesPrediction,HouseRemod,HouseAge)
HouseAge <- 2020 - trainRaw$YearBuilt
HouseRemod <- 2020 - trainRaw$YearRemodAdd
trainRaw <- cbind(trainRaw,HouseRemod,HouseAge)
# check correlation
correlationMatrix <- cor(numCols,use="complete.obs")
corrgram(correlationMatrix, order = TRUE, panel=, lower.panel=, upper.panel=, text.panel=, diag.panel=)
SalePriceCor <- correlationMatrix[,"SalePrice"]
sort(SalePriceCor, decreasing = TRUE)
#check plots
pairs(numCols[c(38,2:6)])
pairs(HousingPricesTraining[c(81,2:13)])
pairs(cbind(m[c(31:43)],HousingPricesTraining[c(81)]))
# Data Cleanup
# Clean up of LotArea
Q <- quantile(HousingPricesTraining$LotArea, probs=c(0,0.99), na.rm = FALSE)
HousingPricesTraining <- mutate(HousingPricesTraining, LotArea = ifelse(as.numeric(HousingPricesTraining$LotArea) <= (Q[1]),NA,
ifelse(as.numeric(HousingPricesTraining$LotArea) >= (Q[2]),NA,HousingPricesTraining$LotArea)))
# Clean up of MasVnrArea
Q <- quantile(HousingPricesTraining$MasVnrArea, probs=c(0,0.99), na.rm = TRUE)
HousingPricesTraining <- mutate(HousingPricesTraining, MasVnrArea = ifelse(as.numeric(HousingPricesTraining$MasVnrArea) <= (Q[1]),NA,
ifelse(as.numeric(HousingPricesTraining$MasVnrArea) >= (Q[2]),NA,HousingPricesTraining$MasVnrArea)))
# Clean up of TotalBsmtSF
Q <- quantile(HousingPricesTraining$TotalBsmtSF, probs=c(0,0.99), na.rm = TRUE)
HousingPricesTraining <- mutate(HousingPricesTraining, TotalBsmtSF = ifelse(as.numeric(HousingPricesTraining$TotalBsmtSF) <= (Q[1]),NA,
ifelse(as.numeric(HousingPricesTraining$TotalBsmtSF) >= (Q[2]),NA,HousingPricesTraining$TotalBsmtSF)))
# Clean up of X1stFlrSF
Q <- quantile(HousingPricesTraining$X1stFlrSF, probs=c(0,0.99), na.rm = TRUE)
HousingPricesTraining <- mutate(HousingPricesTraining, X1stFlrSF = ifelse(as.numeric(HousingPricesTraining$X1stFlrSF) <= (Q[1]),NA,
ifelse(as.numeric(HousingPricesTraining$X1stFlrSF) >= (Q[2]),NA,HousingPricesTraining$X1stFlrSF)))
# Clean up of GrLivArea
Q <- quantile(HousingPricesTraining$GrLivArea, probs=c(0,0.99), na.rm = TRUE)
HousingPricesTraining <- mutate(HousingPricesTraining, GrLivArea = ifelse(as.numeric(HousingPricesTraining$GrLivArea) <= (Q[1]),NA,
ifelse(as.numeric(HousingPricesTraining$GrLivArea) >= (Q[2]),NA,HousingPricesTraining$GrLivArea)))
# Clean up of HouseAge
Q <- quantile(HousingPricesTraining$HouseAge, probs=c(0,0.99), na.rm = TRUE)
HousingPricesTraining <- mutate(HousingPricesTraining, HouseAge = ifelse(as.numeric(HousingPricesTraining$HouseAge) <= (Q[1]),NA,
ifelse(as.numeric(HousingPricesTraining$HouseAge) >= (Q[2]),NA,HousingPricesTraining$HouseAge)))
# HousingPricesTraining <- drop_na(HousingPricesTraining,SalePrice)
plot(SalePrice ~ TotRmsAbvGrd, data=HousingPricesTraining)
plot(log(SalePrice) ~ log(HouseAge), data=HousingPricesTraining)
plot(log(SalePrice) ~ log(HouseRemod), data=HousingPricesTraining)
# model 1 - HouseAge Transform - Use Log Log
fit <- lm(log(SalePrice) ~ log(HouseAge), data = HousingPricesTraining)
summary(fit)
plot(log(SalePrice) ~ OverallQual, data=HousingPricesTraining)
plot(log(SalePrice) ~ LotArea, data=HousingPricesTraining)
plot(log(SalePrice) ~ LotArea, xlim=c(0,20000),ylim=c(10,14), data=HousingPricesTraining)
plot(log(SalePrice) ~ (MasVnrArea), data=HousingPricesTraining)
plot(log(SalePrice) ~ log(MasVnrArea), data=HousingPricesTraining)
plot(log(SalePrice) ~ (TotalBsmtSF), data=HousingPricesTraining)
plot(log(SalePrice) ~ (GrLivArea), data=HousingPricesTraining)
plot(log(SalePrice) ~ (TotRmsAbvGrd), data=HousingPricesTraining)
plot(log(SalePrice) ~ FullBath, data=HousingPricesTraining)
# MAIN MODEL
fit <- stepAIC(lm(log(SalePrice) ~ log(HouseAge) + TotRmsAbvGrd + log(HouseRemod) + OverallQual + GrLivArea
+ GarageCars*GarageArea + TotalBsmtSF + log(X1stFlrSF) + LotArea + FullBath*BsmtFullBath
+ FullBath + BsmtFullBath + TotRmsAbvGrd*BedroomAbvGr
+ TotalBsmtSF * log(X1stFlrSF), data = HousingPricesTraining),direction="both")
summary(fit)
fit2 <- stepAIC(lm(log(SalePrice) ~ log(HouseAge) + TotRmsAbvGrd + log(HouseRemod) + OverallQual + GrLivArea
+ GarageCars*GarageArea + TotalBsmtSF + log(X1stFlrSF) + LotArea + FullBath*BsmtFullBath
+ MSSubClass + MSZoning + Neighborhood
+ FullBath + BsmtFullBath + TotRmsAbvGrd * BedroomAbvGr + TotalBsmtSF * log(X1stFlrSF)
+ BldgType+ OverallCond + Fireplaces+ RoofStyle + KitchenQual+GarageCars+GarageArea, data = HousingPricesTraining),direction="both")
summary(fit)
predicted.prices <- exp(predict(fit, HousingPricesTesting))
predicted.prices2 <- exp(predict(fit, HousingPricesPrediction))
predicted.prices3 <- exp(predict(fit2, HousingPricesPrediction))
# RealPrices <- cbind(RealPrices,predicted.prices2)
# RealPrices <- na.omit(RealPrices)
percent.errors.log.i <- abs((HousingPricesTesting$SalePrice - predicted.prices)/predicted.prices)*100
mean(percent.errors.log.i)
hist(predicted.prices2)
mean(na.omit(predicted.prices2))
#write to excel
write.csv(predicted.prices3,"C:\\Users\\daniy\\Desktop\\predict_LASTModel.csv")
################### LASSO MODEL #######################
Y<-log(HousingPricesTraining$SalePrice)
X<-model.matrix(Id ~ log(HouseAge) + TotRmsAbvGrd + log(HouseRemod) + OverallQual + GrLivArea
+ GarageCars*GarageArea + TotalBsmtSF + log(X1stFlrSF) + LotArea + FullBath*BsmtFullBath
+ MSSubClass + MSZoning + Neighborhood
+ FullBath + BsmtFullBath + TotRmsAbvGrd * BedroomAbvGr + TotalBsmtSF * log(X1stFlrSF)
+ BldgType+ OverallCond + Fireplaces+ RoofStyle + KitchenQual+GarageCars+GarageArea
, trainRaw)[,-1]
# X<-model.matrix(Id ~ log(HouseAge) + TotRmsAbvGrd + log(HouseRemod) + OverallQual + GrLivArea
# + GarageCars*GarageArea + TotalBsmtSF + log(X1stFlrSF) + LotArea + FullBath*BsmtFullBath
# + FullBath + BsmtFullBath + TotRmsAbvGrd * BedroomAbvGr
# + TotalBsmtSF * log(X1stFlrSF), trainRaw)[,-1]
X<-cbind(trainRaw$Id,X)
X.training<-subset(X,X[,1]<=1100)
X.testing<-subset(X, (X[,1]>1100 & X[,1]<=1460))
X.prediction<-subset(X,X[,1]>1460)
#LASSO (alpha=1)
#selecting the best penalty lambda
lasso.fit<-glmnet(x = X.training, y = Y, alpha = 1)
plot(lasso.fit, xvar = "lambda")
#create cross-validation data
crossval <- cv.glmnet(x = X.training, y = Y, alpha = 1)
plot(crossval)
#determine optimal penalty parameter, lambda
penalty.lasso <- crossval$lambda.min
#see where it was on the graph
log(penalty.lasso)
# lets zoom-in
plot(crossval,xlim=c(),ylim=c())
#estimate the model with the optimal penalty
lasso.opt.fit <- glmnet(x = X.training, y = Y, alpha = 1, lambda = penalty.lasso)
#resultant model coefficients
coef(lasso.opt.fit)
# predicting the performance on the testing set
lasso.testing_final <- exp(predict(lasso.opt.fit, s = penalty.lasso, newx = X.testing))
#calculate and display MAPE
mean(abs(lasso.testing_final- HousingPricesTesting$SalePrice)/HousingPricesTesting$SalePrice*100)
#predict
predicted.prices.log.i.lasso <- exp(predict(lasso.opt.fit, s = penalty.lasso, newx =X.prediction))
#write to excel
write.csv(predicted.prices.log.i.lasso,"C:\\Users\\daniy\\Desktop\\predict_Final2.csv")
##################################
# keep variable names handy to use
# SalePrice,
# X1stFlrSF,
# GrLivArea,
# TotRmsAbvGrd,
# LotArea,
# MSSubClass,
# MSZoning,
# Alley,
# Neighborhood,
# BldgType,
# OverallQual,
# OverallCond,
# HouseAge,
# HouseRemod,
# RoofStyle
# RoofMatl,
# BsmtCond,
# BsmtFinType1,
# TotalBsmtSF,
# HeatingQC,
# CentralAir,
# FullBath
# GarageFinish,
# BedroomAbvGr,
# KitchenQual,
# GarageCars
|
build.lm.model.inter <- lm(price ~ carat + color + clarity +
cut + channel + store + store:clarity,
data = train.tmsalary)
|
/build_lm_model_inter_script.R
|
no_license
|
DarrelDent/P412-Assignment-2
|
R
| false
| false
| 182
|
r
|
build.lm.model.inter <- lm(price ~ carat + color + clarity +
cut + channel + store + store:clarity,
data = train.tmsalary)
|
#' PaleyIPrimePower
#'
#' @param n integer
#' @return
#' Hadamard matrix
#' @export
#' @details
#' let q = n-1 , and q = 3 (mod 4), q is the prime power, then obtained the Hadamard
#' matrix of order q+1.if input satisfies these condition it retuns Hadamard Matrix; otherwise
#' returns NULL.
#'
#' @references
#' Paley, R.E.A.C. (1933). On Orthogonal matrices. J. Combin. Theory, A 57(1), 86-108.
#' @examples
#' PaleyIPrimePower(28)
#' @examples
#' PaleyIPrimePower(28)
#' #NULL
PaleyIPrimePower <- function(n){
cardin<-n-1
d<-is.primepower(cardin)
if(is.null(d)){
return(NULL)
}
p<-d[1]
r<-d[2]
Q<-QPrimePower(cardin)
S<-matrix(rep(0,n*n),nrow = n,ncol = n)
for (j in 2:n){
S[1,j]=1
S[j,1]=-1
}
for (i in 2:n){
for(j in 2:n)
S[i,j]=Q[i-1,j-1]
}
I<-diag(n)
H<-S+I
return(H)
}
|
/R/PaleyIPrimePower.R
|
no_license
|
cran/HadamardR
|
R
| false
| false
| 876
|
r
|
#' PaleyIPrimePower
#'
#' @param n integer
#' @return
#' Hadamard matrix
#' @export
#' @details
#' let q = n-1 , and q = 3 (mod 4), q is the prime power, then obtained the Hadamard
#' matrix of order q+1.if input satisfies these condition it retuns Hadamard Matrix; otherwise
#' returns NULL.
#'
#' @references
#' Paley, R.E.A.C. (1933). On Orthogonal matrices. J. Combin. Theory, A 57(1), 86-108.
#' @examples
#' PaleyIPrimePower(28)
#' @examples
#' PaleyIPrimePower(28)
#' #NULL
PaleyIPrimePower <- function(n){
cardin<-n-1
d<-is.primepower(cardin)
if(is.null(d)){
return(NULL)
}
p<-d[1]
r<-d[2]
Q<-QPrimePower(cardin)
S<-matrix(rep(0,n*n),nrow = n,ncol = n)
for (j in 2:n){
S[1,j]=1
S[j,1]=-1
}
for (i in 2:n){
for(j in 2:n)
S[i,j]=Q[i-1,j-1]
}
I<-diag(n)
H<-S+I
return(H)
}
|
## install required packages and get the data
library(dplyr)
zipUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipFile <- "UCI HAR Dataset.zip"
if (!file.exists(zipFile)) {
download.file(zipUrl, zipFile, mode = "wb")
}
database <- "UCI HAR Dataset"
if (!file.exists(database)) {
unzip(zipFile)
}
## read training and test data
trainingSubjects <- read.table(file.path(database, "train", "subject_train.txt"))
trainingValues <- read.table(file.path(database, "train", "X_train.txt"))
trainingActivity <- read.table(file.path(database, "train", "y_train.txt"))
testSubjects <- read.table(file.path(database, "test", "subject_test.txt"))
testValues <- read.table(file.path(database, "test", "X_test.txt"))
testActivity <- read.table(file.path(database, "test", "y_test.txt"))
## read features and activity labels
features <- read.table(file.path(database, "features.txt"), as.is = TRUE)
activities <- read.table(file.path(database, "activity_labels.txt"))
colnames(activities) <- c("activityId", "activityLabel")
## 1.Merges traning and test sets to create one data set.
Activity <- rbind(
cbind(trainingSubjects, trainingValues, trainingActivity),
cbind(testSubjects, testValues, testActivity)
)
colnames(Activity) <- c("subject", features[, 2], "activity")
## 2.Extracts only the measurements on the mean and standard deviation for each measurement.
columnsToKeep <- grepl("subject|activity|mean|std", colnames(Activity))
Activity <- Activity[, columnsToKeep]
## 3.Uses descriptive activity names to name the activities in the data set
Activity$activity <- factor(Activity$activity,
levels = activities[, 1], labels = activities[, 2])
## 4.Appropriately labels the data set with descriptive variable names.
ActivityColumns <- colnames(Activity)
ActivityColumns <- gsub("[\\(\\)-]", "", ActivityColumns )
ActivityColumns <- gsub("^f", "frequencyDomain", ActivityColumns)
ActivityColumns <- gsub("^t", "timeDomain", ActivityColumns)
ActivityColumns <- gsub("Acc", "Accelerometer", ActivityColumns)
ActivityColumns <- gsub("Gyro", "Gyroscope", ActivityColumns)
ActivityColumns <- gsub("Mag", "Magnitude", ActivityColumns)
ActivityColumns <- gsub("Freq", "Frequency", ActivityColumns)
ActivityColumns <- gsub("mean", "Mean", ActivityColumns)
ActivityColumns <- gsub("std", "StandardDeviation", ActivityColumns)
colnames(Activity) <- ActivityColumns
## 5.From the data set in step 4, creates a second, independent tidy data set with the
## average of each variable for each activity and each subject.
ActivityMeans <- Activity %>%
group_by(subject, activity) %>%
summarise_each(funs(mean))
write.table(ActivityMeans, "tidy_data.txt", row.names = FALSE,
quote = FALSE)
|
/run_analysis.R
|
no_license
|
Abhishek500/Getting-and-Cleaning-Data-Course-Project
|
R
| false
| false
| 2,846
|
r
|
## install required packages and get the data
library(dplyr)
zipUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipFile <- "UCI HAR Dataset.zip"
if (!file.exists(zipFile)) {
download.file(zipUrl, zipFile, mode = "wb")
}
database <- "UCI HAR Dataset"
if (!file.exists(database)) {
unzip(zipFile)
}
## read training and test data
trainingSubjects <- read.table(file.path(database, "train", "subject_train.txt"))
trainingValues <- read.table(file.path(database, "train", "X_train.txt"))
trainingActivity <- read.table(file.path(database, "train", "y_train.txt"))
testSubjects <- read.table(file.path(database, "test", "subject_test.txt"))
testValues <- read.table(file.path(database, "test", "X_test.txt"))
testActivity <- read.table(file.path(database, "test", "y_test.txt"))
## read features and activity labels
features <- read.table(file.path(database, "features.txt"), as.is = TRUE)
activities <- read.table(file.path(database, "activity_labels.txt"))
colnames(activities) <- c("activityId", "activityLabel")
## 1.Merges traning and test sets to create one data set.
Activity <- rbind(
cbind(trainingSubjects, trainingValues, trainingActivity),
cbind(testSubjects, testValues, testActivity)
)
colnames(Activity) <- c("subject", features[, 2], "activity")
## 2.Extracts only the measurements on the mean and standard deviation for each measurement.
columnsToKeep <- grepl("subject|activity|mean|std", colnames(Activity))
Activity <- Activity[, columnsToKeep]
## 3.Uses descriptive activity names to name the activities in the data set
Activity$activity <- factor(Activity$activity,
levels = activities[, 1], labels = activities[, 2])
## 4.Appropriately labels the data set with descriptive variable names.
ActivityColumns <- colnames(Activity)
ActivityColumns <- gsub("[\\(\\)-]", "", ActivityColumns )
ActivityColumns <- gsub("^f", "frequencyDomain", ActivityColumns)
ActivityColumns <- gsub("^t", "timeDomain", ActivityColumns)
ActivityColumns <- gsub("Acc", "Accelerometer", ActivityColumns)
ActivityColumns <- gsub("Gyro", "Gyroscope", ActivityColumns)
ActivityColumns <- gsub("Mag", "Magnitude", ActivityColumns)
ActivityColumns <- gsub("Freq", "Frequency", ActivityColumns)
ActivityColumns <- gsub("mean", "Mean", ActivityColumns)
ActivityColumns <- gsub("std", "StandardDeviation", ActivityColumns)
colnames(Activity) <- ActivityColumns
## 5.From the data set in step 4, creates a second, independent tidy data set with the
## average of each variable for each activity and each subject.
ActivityMeans <- Activity %>%
group_by(subject, activity) %>%
summarise_each(funs(mean))
write.table(ActivityMeans, "tidy_data.txt", row.names = FALSE,
quote = FALSE)
|
some_vector <- c("a", "b", "c")
index <- 1
while (index <= length(some_vector)) {
cat("Element ", index, " contains ", some_vector[index], ".\n", sep = "")
index <- index + 1
}
|
/src/while_loop.r
|
permissive
|
fvafrCU/programmieren_in_r
|
R
| false
| false
| 185
|
r
|
some_vector <- c("a", "b", "c")
index <- 1
while (index <= length(some_vector)) {
cat("Element ", index, " contains ", some_vector[index], ".\n", sep = "")
index <- index + 1
}
|
InitDataObjects("conc", "stat", FALSE)
mSet<-Read.TextData(mSet, "Replacing_with_your_file_path", "rowu", "disc");
mSet<-SanityCheckData(mSet)
mSet<-ReplaceMin(mSet);
mSet<-Normalization(mSet, "NULL", "LogNorm", "ParetoNorm", "1", ratio=FALSE, ratioNum=20)
mSet<-PlotNormSummary(mSet, "norm_0_", "png", 72, width=NA)
mSet<-PlotSampleNormSummary(mSet, "snorm_0_", "png", 72, width=NA)
mSet<-PCA.Anal(mSet)
mSet<-PlotPCAPairSummary(mSet, "pca_pair_0_", "png", 72, width=NA, 5)
mSet<-PlotPCAScree(mSet, "pca_scree_0_", "png", 72, width=NA, 5)
mSet<-PlotPCA2DScore(mSet, "pca_score2d_0_", "png", 72, width=NA, 1,2,0.95,1,0)
mSet<-PlotPCALoading(mSet, "pca_loading_0_", "png", 72, width=NA, 1,2,"scatter", 1);
mSet<-PlotPCABiplot(mSet, "pca_biplot_0_", "png", 72, width=NA, 1,2)
mSet<-PlotPCA3DScore(mSet, "pca_score3d_0_", "json", 1,2,3)
mSet<-PlotCmpdView(mSet, "Ce", "png", 72, width=NA)
mSet<-PlotCmpdView(mSet, "Ac", "png", 72, width=NA)
mSet<-PlotCmpdView(mSet, "LPC", "png", 72, width=NA)
mSet<-PlotCmpdView(mSet, "PE", "png", 72, width=NA)
mSet<-PlotCmpdView(mSet, "TG", "png", 72, width=NA)
mSet<-SPLSR.Anal(mSet, 5, 10, "same")
mSet<-PlotSPLSPairSummary(mSet, "spls_pair_0_", "png", 72, width=NA, 5)
mSet<-PlotSPLS2DScore(mSet, "spls_score2d_0_", "png", 72, width=NA, 1,2,0.95,1,0)
mSet<-PlotSPLS3DScore(mSet, "spls_score3d_0_", "json")
mSet<-PlotSPLSLoading(mSet, "spls_loading_0_", "png", 72, width=NA, 1,"overview");
mSet<-PlotSPLSDA.Classification(mSet, "spls_cv_0_", "Mfold", "png", 72, width=NA)
mSet<-SPLSR.Anal(mSet, 5, 15, "same")
mSet<-PlotSPLSPairSummary(mSet, "spls_pair_1_", "png", 72, width=NA, 5)
mSet<-PlotSPLS2DScore(mSet, "spls_score2d_1_", "png", 72, width=NA, 1,2,0.95,1,0)
mSet<-PlotSPLS3DScore(mSet, "spls_score3d_1_", "json")
mSet<-PlotSPLSLoading(mSet, "spls_loading_1_", "png", 72, width=NA, 1,"overview");
mSet<-PlotSPLSDA.Classification(mSet, "spls_cv_1_", "Mfold", "png", 72, width=NA)
mSet<-PlotSPLSLoading(mSet, "spls_loading_1_", "png", 600, width=NA, 1,"overview");
|
/output/3Genotypes_sum_lipid_class_sPLSD_Loadings.R
|
no_license
|
rr-lab/Zea-Lip
|
R
| false
| false
| 2,003
|
r
|
InitDataObjects("conc", "stat", FALSE)
mSet<-Read.TextData(mSet, "Replacing_with_your_file_path", "rowu", "disc");
mSet<-SanityCheckData(mSet)
mSet<-ReplaceMin(mSet);
mSet<-Normalization(mSet, "NULL", "LogNorm", "ParetoNorm", "1", ratio=FALSE, ratioNum=20)
mSet<-PlotNormSummary(mSet, "norm_0_", "png", 72, width=NA)
mSet<-PlotSampleNormSummary(mSet, "snorm_0_", "png", 72, width=NA)
mSet<-PCA.Anal(mSet)
mSet<-PlotPCAPairSummary(mSet, "pca_pair_0_", "png", 72, width=NA, 5)
mSet<-PlotPCAScree(mSet, "pca_scree_0_", "png", 72, width=NA, 5)
mSet<-PlotPCA2DScore(mSet, "pca_score2d_0_", "png", 72, width=NA, 1,2,0.95,1,0)
mSet<-PlotPCALoading(mSet, "pca_loading_0_", "png", 72, width=NA, 1,2,"scatter", 1);
mSet<-PlotPCABiplot(mSet, "pca_biplot_0_", "png", 72, width=NA, 1,2)
mSet<-PlotPCA3DScore(mSet, "pca_score3d_0_", "json", 1,2,3)
mSet<-PlotCmpdView(mSet, "Ce", "png", 72, width=NA)
mSet<-PlotCmpdView(mSet, "Ac", "png", 72, width=NA)
mSet<-PlotCmpdView(mSet, "LPC", "png", 72, width=NA)
mSet<-PlotCmpdView(mSet, "PE", "png", 72, width=NA)
mSet<-PlotCmpdView(mSet, "TG", "png", 72, width=NA)
mSet<-SPLSR.Anal(mSet, 5, 10, "same")
mSet<-PlotSPLSPairSummary(mSet, "spls_pair_0_", "png", 72, width=NA, 5)
mSet<-PlotSPLS2DScore(mSet, "spls_score2d_0_", "png", 72, width=NA, 1,2,0.95,1,0)
mSet<-PlotSPLS3DScore(mSet, "spls_score3d_0_", "json")
mSet<-PlotSPLSLoading(mSet, "spls_loading_0_", "png", 72, width=NA, 1,"overview");
mSet<-PlotSPLSDA.Classification(mSet, "spls_cv_0_", "Mfold", "png", 72, width=NA)
mSet<-SPLSR.Anal(mSet, 5, 15, "same")
mSet<-PlotSPLSPairSummary(mSet, "spls_pair_1_", "png", 72, width=NA, 5)
mSet<-PlotSPLS2DScore(mSet, "spls_score2d_1_", "png", 72, width=NA, 1,2,0.95,1,0)
mSet<-PlotSPLS3DScore(mSet, "spls_score3d_1_", "json")
mSet<-PlotSPLSLoading(mSet, "spls_loading_1_", "png", 72, width=NA, 1,"overview");
mSet<-PlotSPLSDA.Classification(mSet, "spls_cv_1_", "Mfold", "png", 72, width=NA)
mSet<-PlotSPLSLoading(mSet, "spls_loading_1_", "png", 600, width=NA, 1,"overview");
|
install.packages("tidyverse")
install.packages("leaflet")
install.packages("data.table")
library("data.table")
library("ggplot2")
library("dplyr")
library("tidyr")
library("lubridate")
library("tibble")
library("leaflet")
library("scales")
crime_raw <- read.csv("RMS_Crime_Incidents.csv")
weather_raw <- read.csv("Detroit_Weather.csv")
unique_offenses <- unique(crime[c("offense_category")])
#1711140010
violent_crime <- c("ASSAULT","AGGRAVATED ASSAULT","SEXUAL ASSAULT","HOMICIDE","KIDNAPPING")
property_crime <- c("ROBBERY","LARCENY","ARSON","STOLEN VEHICLE","BURGLARY","DAMAGE TO PROPERTY","STOLEN PROPERTY")
#violent_crime <- scan("violent_crimes.txt", what ="character")
#property_crime <- scan("property_crimes.txt", what = "character")
#crime <- crime_raw %>%
#mutate(offense_description = replace(offense_description, offense_category == "AGGRAVATED ASSAULT", "AGGRAVATED / FELONIOUS ASSAULT"))
crime_raw %>%
group_by(year) %>%
summarize(count = n()) %>%
arrange(desc(year))
crime <- crime_raw %>%
rename(date = incident_timestamp) %>%
mutate(date = ymd_hms(date)) %>%
mutate(date = date(date)) %>%
filter(year > 2016 & year <= 2020) %>%
add_column(crime_type = NA) %>%
add_column(month = NA) %>%
mutate(month = month(date, TRUE))
`%notin%` <- Negate(`%in%`)
crime <- within(crime, {
crime_type[offense_category %notin% (violent_crime) || (property_crime)] = "other"
crime_type[offense_category %in% (violent_crime)] = "violent"
crime_type[offense_category %in% (property_crime)] = "property"
})
crime %>%
group_by(crime_type) %>%
summarize(count = n())
crime <- crime %>%
filter(crime_type != "other")
data <- crime %>% filter(year == 2020)
data$popup <- paste("<b>Incident #: </b>", data$crime_id, "<br>", "<b>Category: </b>", data$offense_category,
"<br>", "<b>Description: </b>", data$offense_description,
"<br>", "<b>Day of week: </b>", data$day_of_week,
"<br>", "<b>Date: </b>", data$date,
"<br>", "<b>Time: </b>", data$incident_time,
"<br>", "<b>Neighborhood: </b>", data$neighborhood,
"<br>", "<b>Longitude: </b>", data$longitude,
"<br>", "<b>Latitude: </b>", data$latitude)
leaflet(data, width = "100%") %>% addTiles() %>%
addTiles(group = "OSM (default)") %>%
addProviderTiles(provider = "Esri.WorldStreetMap",group = "World StreetMap") %>%
#addProviderTiles(provider = "Esri.WorldImagery",group = "World Imagery") %>%
#addProviderTiles(provider = "NASAGIBS.ViirsEarthAtNight2012",group = "Nighttime Imagery") %>%
addMarkers(lng = ~longitude, lat = ~latitude, popup = data$popup, clusterOptions = markerClusterOptions()) %>%
addLayersControl(
baseGroups = c("OSM (default)","World StreetMap"),
options = layersControlOptions(collapsed = FALSE)
)
crime_daily <- crime %>%
mutate(date = as.Date(date, "%m/%d/%Y")) %>%
group_by(date) %>%
summarize(count = n()) %>%
arrange(date)
crime_daily %>%
ggplot(aes(date, count))+
geom_line(color = "orange")+
geom_smooth(color = "black")
crime %>%
group_by(month) %>%
summarize(count= n()) %>%
arrange(month)
crime_month <- crime %>%
group_by(month, year) %>%
summarize(count= n()) %>%
arrange(month)
crime_month %>%
ggplot(aes(month, count, group = 1))+
geom_area(fill = "orange")+
scale_y_continuous(expand = c(0, 0), limits = c(0, NA))+
facet_wrap(~year)
crime_month <-crime %>%
mutate(days_in_month = days_in_month(date)) %>%
group_by(month, year, days_in_month) %>%
summarize(count= n()) %>%
mutate(daily_crime_rate = count/days_in_month)
crime_month %>%
ggplot(aes(month, daily_crime_rate, group = 1))+
geom_area(fill = "orange")+
scale_y_continuous(expand = c(0, 0), limits = c(0, NA))+
facet_wrap(~year)
weather <- weather_raw %>%
select(DATE, HourlyDryBulbTemperature) %>%
rename(temperature = HourlyDryBulbTemperature, date = DATE)
weather <- weather %>% mutate(date = ymd_hms(date))
weather$date <- round_date(weather$date, unit="15 minutes")
weather$hour_of_day <- hour(weather$date)
weather$date <- date(weather$date)
weather <- unique(weather[c("hour_of_day", "date", "temperature")])
weather <- weather[!(is.na(weather$temperature) | weather$temperature==""), ]
weather$temperature = as.integer(weather$temperature)
crime <- left_join(crime, weather, by = c("date", "hour_of_day")) %>%
distinct(arrest_charge, offense_category, date, crime_id, .keep_all = TRUE) %>%
select(-arrest_charge, -crime_id) %>%
fill(temperature)
crime_temp_month <- crime %>%
mutate(pop = case_when(
year == 2017 ~ 679865,
year == 2018 ~ 677155,
year == 2019 ~ 674841,
year == 2020 ~ 664139
)) %>%
group_by(month, year, pop) %>%
summarize(mean_temp = mean(temperature, na.rm =T), count = n()) %>%
mutate(crime_rate = count/pop*10000)
crime_temp_month %>%
ggplot(aes(mean_temp, crime_rate)) +
geom_point()+
geom_smooth(color = "orange", method = "lm")
crime_dow <- crime %>%
group_by(day_of_week, offense_category, hour_of_day) %>%
summarize(count = n())
hour_format <- c(paste(c(12,1:11),"AM"), paste(c(12,1:11),"PM"))
dow_format <- c("Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday")
crime_dow$day_of_week <- factor(crime_dow$day_of_week, labels = rev(dow_format), ordered = TRUE)
crime_dow$hour_of_day <- factor(crime_dow$hour_of_day, level = 0:23, label = hour_format)
ggplot(crime_dow, aes(x = hour_of_day, y = day_of_week, fill = count))+
geom_tile()+
theme(axis.text.x = element_text(angle = 90, vjust = 0.6), legend.title = element_blank(), legend.position="top", legend.direction="horizontal", legend.key.width=unit(2, "cm"), legend.key.height=unit(0.25, "cm")) +
labs(x = "Hour of Reported Crime", y = "Day of Week ", title = "Reported Property and Violent Crimes in Detroit (2016-2020)") +
scale_fill_gradient(low = "white", high = "#101ade")
crime_dow <- crime_dow %>%
mutate(norm = count/sum(count))
ggplot(crime_dow, aes(x = hour_of_day, y = day_of_week, fill = norm)) +
geom_tile() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.6, size = 4)) +
labs(x = "Hour of Arrest (Local Time)", y = "Day of Week of Arrest", title = "Number of Police Arrests in San Francisco from 2007 – 2016, by Offense") +
scale_fill_gradient(low = "white", high = "#2980B9") +
facet_wrap(~ offense_category)
crime_dow_zipcode <- crime %>%
group_by(day_of_week, zip_code, hour_of_day) %>%
summarise(count = n())
crime_dow_zipcode$day_of_week <- factor(crime_dow_zipcode$day_of_week, labels = rev(dow_format), ordered = TRUE)
crime_dow_zipcode$hour_of_day <- factor(crime_dow_zipcode$hour_of_day, level = 0:23, label = hour_format)
ggplot(crime_dow_zipcode, aes(x = hour_of_day, y = day_of_week, fill = count)) +
geom_tile() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.6, size = 4)) +
labs(x = "Hour of Arrest (Local Time)", y = "Day of Week of Arrest", title = "Number of Police Arrests in San Francisco from 2007 – 2016, by Offense") +
scale_fill_gradient(low = "white", high = "#2980B9") +
facet_wrap(~ zip_code)
zip_code_database<- read.csv("detroit_zipcodes.csv")
filtered_zipcodes <-c(48203, 48212, 48236, 48239, 48243)
crime_zipcode <- crime %>%
filter(zip_code %notin% filtered_zipcodes) %>%
group_by(zip_code, crime_type) %>%
summarise(count = n())
crime_zipcode <- left_join(crime_zipcode, zip_code_database, by = "zip_code") %>%
drop_na() %>%
mutate(crime_rate = (count/Population)*1000)
crime_zipcode$zip_code <- factor(crime_zipcode$zip_code)
ggplot(crime_zipcode, aes(zip_code,crime_rate, fill = crime_type))+
geom_col() +
scale_fill_manual("crime_type", values = c("property"= "#2980B9", "violent" = "orange")) +
coord_flip()+
labs(title = "Crime Rate in Detroit by Zipcode (2016-2020)",x = "Zip Code",
y = "Crime Rate per 1000",
fill = "Crime Type")
downtown_zipcodes <- c(48243, 48226, 48201, 48202, 48216, 48207)
crime_zipcode %>%
filter(zip_code %notin% downtown_zipcodes) %>%
ggplot(aes(zip_code,crime_rate, fill = crime_type))+
geom_col() +
scale_fill_manual("crime_type", values = c("property"= "#2980B9", "violent" = "orange")) +
coord_flip()+
labs(title = "Crime Rate in Detroit by Zipcode* (2016-2020)",
caption = "* Downtown, Midtown and surrounding areas removed",
x = "Zip Code",
y = "Crime Rate per 1000",
fill = "Crime Type")
crime_dow_zipcode %>%
filter(zip_code == 48238) %>%
ggplot(aes(x = hour_of_day, y = day_of_week, fill = count)) +
geom_raster() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.6, size = 10)) +
labs(x = "Hour of Arrest (Local Time)", y = "Day of Week of Arrest", title = "Number of Police Arrests in San Francisco from 2007 – 2016, by Offense") +
scale_fill_gradient(low = "white", high = "#101ade")
crime_dow_zipcode %>%
filter(zip_code == 48205) %>%
ggplot(aes(x = hour_of_day, y = day_of_week, fill = count)) +
geom_raster() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.6, size = 10)) +
labs(x = "Hour of Arrest (Local Time)", y = "Day of Week of Arrest", title = "Number of Police Arrests in San Francisco from 2007 – 2016, by Offense") +
scale_fill_gradient(low = "white", high = "#101ade")
crime$day_of_week <- factor(crime$day_of_week, labels = rev(dow_format), ordered = TRUE)
crime$hour_of_day <- factor(crime$hour_of_day, level = 0:23, label = hour_format)
|
/DPDCrime/Crime_Data.R
|
no_license
|
Yellowmanic/DPDCRIME
|
R
| false
| false
| 9,829
|
r
|
install.packages("tidyverse")
install.packages("leaflet")
install.packages("data.table")
library("data.table")
library("ggplot2")
library("dplyr")
library("tidyr")
library("lubridate")
library("tibble")
library("leaflet")
library("scales")
crime_raw <- read.csv("RMS_Crime_Incidents.csv")
weather_raw <- read.csv("Detroit_Weather.csv")
unique_offenses <- unique(crime[c("offense_category")])
#1711140010
violent_crime <- c("ASSAULT","AGGRAVATED ASSAULT","SEXUAL ASSAULT","HOMICIDE","KIDNAPPING")
property_crime <- c("ROBBERY","LARCENY","ARSON","STOLEN VEHICLE","BURGLARY","DAMAGE TO PROPERTY","STOLEN PROPERTY")
#violent_crime <- scan("violent_crimes.txt", what ="character")
#property_crime <- scan("property_crimes.txt", what = "character")
#crime <- crime_raw %>%
#mutate(offense_description = replace(offense_description, offense_category == "AGGRAVATED ASSAULT", "AGGRAVATED / FELONIOUS ASSAULT"))
crime_raw %>%
group_by(year) %>%
summarize(count = n()) %>%
arrange(desc(year))
crime <- crime_raw %>%
rename(date = incident_timestamp) %>%
mutate(date = ymd_hms(date)) %>%
mutate(date = date(date)) %>%
filter(year > 2016 & year <= 2020) %>%
add_column(crime_type = NA) %>%
add_column(month = NA) %>%
mutate(month = month(date, TRUE))
`%notin%` <- Negate(`%in%`)
crime <- within(crime, {
crime_type[offense_category %notin% (violent_crime) || (property_crime)] = "other"
crime_type[offense_category %in% (violent_crime)] = "violent"
crime_type[offense_category %in% (property_crime)] = "property"
})
crime %>%
group_by(crime_type) %>%
summarize(count = n())
crime <- crime %>%
filter(crime_type != "other")
data <- crime %>% filter(year == 2020)
data$popup <- paste("<b>Incident #: </b>", data$crime_id, "<br>", "<b>Category: </b>", data$offense_category,
"<br>", "<b>Description: </b>", data$offense_description,
"<br>", "<b>Day of week: </b>", data$day_of_week,
"<br>", "<b>Date: </b>", data$date,
"<br>", "<b>Time: </b>", data$incident_time,
"<br>", "<b>Neighborhood: </b>", data$neighborhood,
"<br>", "<b>Longitude: </b>", data$longitude,
"<br>", "<b>Latitude: </b>", data$latitude)
leaflet(data, width = "100%") %>% addTiles() %>%
addTiles(group = "OSM (default)") %>%
addProviderTiles(provider = "Esri.WorldStreetMap",group = "World StreetMap") %>%
#addProviderTiles(provider = "Esri.WorldImagery",group = "World Imagery") %>%
#addProviderTiles(provider = "NASAGIBS.ViirsEarthAtNight2012",group = "Nighttime Imagery") %>%
addMarkers(lng = ~longitude, lat = ~latitude, popup = data$popup, clusterOptions = markerClusterOptions()) %>%
addLayersControl(
baseGroups = c("OSM (default)","World StreetMap"),
options = layersControlOptions(collapsed = FALSE)
)
crime_daily <- crime %>%
mutate(date = as.Date(date, "%m/%d/%Y")) %>%
group_by(date) %>%
summarize(count = n()) %>%
arrange(date)
crime_daily %>%
ggplot(aes(date, count))+
geom_line(color = "orange")+
geom_smooth(color = "black")
crime %>%
group_by(month) %>%
summarize(count= n()) %>%
arrange(month)
crime_month <- crime %>%
group_by(month, year) %>%
summarize(count= n()) %>%
arrange(month)
crime_month %>%
ggplot(aes(month, count, group = 1))+
geom_area(fill = "orange")+
scale_y_continuous(expand = c(0, 0), limits = c(0, NA))+
facet_wrap(~year)
crime_month <-crime %>%
mutate(days_in_month = days_in_month(date)) %>%
group_by(month, year, days_in_month) %>%
summarize(count= n()) %>%
mutate(daily_crime_rate = count/days_in_month)
crime_month %>%
ggplot(aes(month, daily_crime_rate, group = 1))+
geom_area(fill = "orange")+
scale_y_continuous(expand = c(0, 0), limits = c(0, NA))+
facet_wrap(~year)
weather <- weather_raw %>%
select(DATE, HourlyDryBulbTemperature) %>%
rename(temperature = HourlyDryBulbTemperature, date = DATE)
weather <- weather %>% mutate(date = ymd_hms(date))
weather$date <- round_date(weather$date, unit="15 minutes")
weather$hour_of_day <- hour(weather$date)
weather$date <- date(weather$date)
weather <- unique(weather[c("hour_of_day", "date", "temperature")])
weather <- weather[!(is.na(weather$temperature) | weather$temperature==""), ]
weather$temperature = as.integer(weather$temperature)
crime <- left_join(crime, weather, by = c("date", "hour_of_day")) %>%
distinct(arrest_charge, offense_category, date, crime_id, .keep_all = TRUE) %>%
select(-arrest_charge, -crime_id) %>%
fill(temperature)
crime_temp_month <- crime %>%
mutate(pop = case_when(
year == 2017 ~ 679865,
year == 2018 ~ 677155,
year == 2019 ~ 674841,
year == 2020 ~ 664139
)) %>%
group_by(month, year, pop) %>%
summarize(mean_temp = mean(temperature, na.rm =T), count = n()) %>%
mutate(crime_rate = count/pop*10000)
crime_temp_month %>%
ggplot(aes(mean_temp, crime_rate)) +
geom_point()+
geom_smooth(color = "orange", method = "lm")
crime_dow <- crime %>%
group_by(day_of_week, offense_category, hour_of_day) %>%
summarize(count = n())
hour_format <- c(paste(c(12,1:11),"AM"), paste(c(12,1:11),"PM"))
dow_format <- c("Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday")
crime_dow$day_of_week <- factor(crime_dow$day_of_week, labels = rev(dow_format), ordered = TRUE)
crime_dow$hour_of_day <- factor(crime_dow$hour_of_day, level = 0:23, label = hour_format)
ggplot(crime_dow, aes(x = hour_of_day, y = day_of_week, fill = count))+
geom_tile()+
theme(axis.text.x = element_text(angle = 90, vjust = 0.6), legend.title = element_blank(), legend.position="top", legend.direction="horizontal", legend.key.width=unit(2, "cm"), legend.key.height=unit(0.25, "cm")) +
labs(x = "Hour of Reported Crime", y = "Day of Week ", title = "Reported Property and Violent Crimes in Detroit (2016-2020)") +
scale_fill_gradient(low = "white", high = "#101ade")
crime_dow <- crime_dow %>%
mutate(norm = count/sum(count))
ggplot(crime_dow, aes(x = hour_of_day, y = day_of_week, fill = norm)) +
geom_tile() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.6, size = 4)) +
labs(x = "Hour of Arrest (Local Time)", y = "Day of Week of Arrest", title = "Number of Police Arrests in San Francisco from 2007 – 2016, by Offense") +
scale_fill_gradient(low = "white", high = "#2980B9") +
facet_wrap(~ offense_category)
crime_dow_zipcode <- crime %>%
group_by(day_of_week, zip_code, hour_of_day) %>%
summarise(count = n())
crime_dow_zipcode$day_of_week <- factor(crime_dow_zipcode$day_of_week, labels = rev(dow_format), ordered = TRUE)
crime_dow_zipcode$hour_of_day <- factor(crime_dow_zipcode$hour_of_day, level = 0:23, label = hour_format)
ggplot(crime_dow_zipcode, aes(x = hour_of_day, y = day_of_week, fill = count)) +
geom_tile() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.6, size = 4)) +
labs(x = "Hour of Arrest (Local Time)", y = "Day of Week of Arrest", title = "Number of Police Arrests in San Francisco from 2007 – 2016, by Offense") +
scale_fill_gradient(low = "white", high = "#2980B9") +
facet_wrap(~ zip_code)
zip_code_database<- read.csv("detroit_zipcodes.csv")
filtered_zipcodes <-c(48203, 48212, 48236, 48239, 48243)
crime_zipcode <- crime %>%
filter(zip_code %notin% filtered_zipcodes) %>%
group_by(zip_code, crime_type) %>%
summarise(count = n())
crime_zipcode <- left_join(crime_zipcode, zip_code_database, by = "zip_code") %>%
drop_na() %>%
mutate(crime_rate = (count/Population)*1000)
crime_zipcode$zip_code <- factor(crime_zipcode$zip_code)
ggplot(crime_zipcode, aes(zip_code,crime_rate, fill = crime_type))+
geom_col() +
scale_fill_manual("crime_type", values = c("property"= "#2980B9", "violent" = "orange")) +
coord_flip()+
labs(title = "Crime Rate in Detroit by Zipcode (2016-2020)",x = "Zip Code",
y = "Crime Rate per 1000",
fill = "Crime Type")
downtown_zipcodes <- c(48243, 48226, 48201, 48202, 48216, 48207)
crime_zipcode %>%
filter(zip_code %notin% downtown_zipcodes) %>%
ggplot(aes(zip_code,crime_rate, fill = crime_type))+
geom_col() +
scale_fill_manual("crime_type", values = c("property"= "#2980B9", "violent" = "orange")) +
coord_flip()+
labs(title = "Crime Rate in Detroit by Zipcode* (2016-2020)",
caption = "* Downtown, Midtown and surrounding areas removed",
x = "Zip Code",
y = "Crime Rate per 1000",
fill = "Crime Type")
crime_dow_zipcode %>%
filter(zip_code == 48238) %>%
ggplot(aes(x = hour_of_day, y = day_of_week, fill = count)) +
geom_raster() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.6, size = 10)) +
labs(x = "Hour of Arrest (Local Time)", y = "Day of Week of Arrest", title = "Number of Police Arrests in San Francisco from 2007 – 2016, by Offense") +
scale_fill_gradient(low = "white", high = "#101ade")
crime_dow_zipcode %>%
filter(zip_code == 48205) %>%
ggplot(aes(x = hour_of_day, y = day_of_week, fill = count)) +
geom_raster() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.6, size = 10)) +
labs(x = "Hour of Arrest (Local Time)", y = "Day of Week of Arrest", title = "Number of Police Arrests in San Francisco from 2007 – 2016, by Offense") +
scale_fill_gradient(low = "white", high = "#101ade")
crime$day_of_week <- factor(crime$day_of_week, labels = rev(dow_format), ordered = TRUE)
crime$hour_of_day <- factor(crime$hour_of_day, level = 0:23, label = hour_format)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_LN.R
\docType{class}
\name{LNoutput-class}
\alias{LNoutput-class}
\title{make S4 object}
\description{
make S4 object
}
|
/man/LNoutput-class.Rd
|
no_license
|
jtmancilla/LexisNexisTools
|
R
| false
| true
| 203
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_LN.R
\docType{class}
\name{LNoutput-class}
\alias{LNoutput-class}
\title{make S4 object}
\description{
make S4 object
}
|
# packages
library(ggplot2)
# a few pretty plots
edulvl_ggpot <- ggplot(data = x, aes(edulvl, fill = 'indianred')) + geom_bar() + theme_minimal() +
labs(y = '# of people', x = 'Education level') +
scale_x_discrete(limits = 1:6, labels = c("no degree","GED/high school",
"college no degree", "associate degree",
'bachelor/professional', 'master/phd')) +
theme(axis.text.x = element_text(angle = 15)) + guides(fill = FALSE)
adover_ggplot <- ggplot(data = x %>% count(adover42), aes(adover42, weight = n, fill = 'indianred')) +
geom_bar() + theme_minimal() +
labs(y = '# of people', x = 'Can overcome illnes on his/her own') +
scale_x_discrete(labels = c("disagree strongly", "disagree somewhat",
"uncertain", "agree somewhat",
'agree strongly', 'NA')) +
theme(axis.text.x = element_text(angle = 15)) + guides(fill = FALSE)
regions_ggplot <- ggplot(data = x %>% count(region12), aes(region12, weight = n, fill = 'indianred')) +
geom_bar() + theme_minimal() +
labs(y = '# of people', x = 'US region') +
scale_x_discrete(labels = c("Northeast", "Midwest",
"South", "West")) +
theme(axis.text.x = element_text(angle = 15)) + guides(fill = FALSE)
married_ggplot <- ggplot(data = x %>% count(marry12x), aes(marry12x, weight = n, fill = 'indianred')) +
geom_bar() + theme_minimal() +
labs(y = '# of people', x = 'Marriage status') +
scale_x_discrete(labels = c("married", "widowed",
"divorced", "separated", 'never married')) +
theme(axis.text.x = element_text(angle = 15)) + guides(fill = FALSE)
insurance_ggplot <- ggplot(data = x %>% count(inscov12), aes(inscov12, weight = n, fill = 'indianred')) +
geom_bar() + theme_minimal() +
theme(axis.text.x = element_text(angle = 15)) + guides(fill = FALSE) +
labs(y = '# of people', x = 'Type of insurance') +
scale_x_discrete(labels = c("private", "public",
"uninsured"))
|
/ass1/ggplot.R
|
no_license
|
lafet1/microeconometrics
|
R
| false
| false
| 2,138
|
r
|
# packages
library(ggplot2)
# a few pretty plots
edulvl_ggpot <- ggplot(data = x, aes(edulvl, fill = 'indianred')) + geom_bar() + theme_minimal() +
labs(y = '# of people', x = 'Education level') +
scale_x_discrete(limits = 1:6, labels = c("no degree","GED/high school",
"college no degree", "associate degree",
'bachelor/professional', 'master/phd')) +
theme(axis.text.x = element_text(angle = 15)) + guides(fill = FALSE)
adover_ggplot <- ggplot(data = x %>% count(adover42), aes(adover42, weight = n, fill = 'indianred')) +
geom_bar() + theme_minimal() +
labs(y = '# of people', x = 'Can overcome illnes on his/her own') +
scale_x_discrete(labels = c("disagree strongly", "disagree somewhat",
"uncertain", "agree somewhat",
'agree strongly', 'NA')) +
theme(axis.text.x = element_text(angle = 15)) + guides(fill = FALSE)
regions_ggplot <- ggplot(data = x %>% count(region12), aes(region12, weight = n, fill = 'indianred')) +
geom_bar() + theme_minimal() +
labs(y = '# of people', x = 'US region') +
scale_x_discrete(labels = c("Northeast", "Midwest",
"South", "West")) +
theme(axis.text.x = element_text(angle = 15)) + guides(fill = FALSE)
married_ggplot <- ggplot(data = x %>% count(marry12x), aes(marry12x, weight = n, fill = 'indianred')) +
geom_bar() + theme_minimal() +
labs(y = '# of people', x = 'Marriage status') +
scale_x_discrete(labels = c("married", "widowed",
"divorced", "separated", 'never married')) +
theme(axis.text.x = element_text(angle = 15)) + guides(fill = FALSE)
insurance_ggplot <- ggplot(data = x %>% count(inscov12), aes(inscov12, weight = n, fill = 'indianred')) +
geom_bar() + theme_minimal() +
theme(axis.text.x = element_text(angle = 15)) + guides(fill = FALSE) +
labs(y = '# of people', x = 'Type of insurance') +
scale_x_discrete(labels = c("private", "public",
"uninsured"))
|
#' Sort a BibEntry Object
#'
#' Sorts a \code{BibEntry} object by specified fields. The possible fields used for sorting and
#' the order they are used in correspond with the options avaiable in BibLaTeX.
#'
#' @param x an object of class BibEntry
#' @param decreasing logical; should the sort be increasing or decreasing?
#' @param sorting sort method to use, see \bold{Details}.
#' @param .bibstyle bibliography style; used when \code{sort} is called by \code{\link{print.BibEntry}}
#' @param ... internal use only
#' @return the sorted BibEntry object
#' @method sort BibEntry
#' @export
#' @keywords manip methods
#' @details The possible values for argument \code{sorting} are
#' \itemize{
#' \item nty - sort by name, then by title, then by year
#' \item nyt - sort by name, then by year, then title
#' \item nyvt - sort by name, year, volume, title
#' \item anyt - sort by alphabetic label, name, year, title
#' \item anyvt - sort by alphabetic label, name, year, volume, title
#' \item ynt - sort by year, name, title
#' \item ydnt - sort by year (descending), name, title
#' \item debug - sort by keys
#' \item none - no sorting is performed
#' }
#'
#' All sorting methods first consider the field presort, if available. Entries with no presort field are assigned presort
#' value \dQuote{mm}. Next the sortkey field is used.
#'
#' When sorting by name, the sortname field is used first. If it is not present, the author field is used,
#' if that is not present editor is used, and if that is not present translator is used. All of these fields are affected
#' by the value of \code{max.names} in .BibOptions()$max.names.
#'
#' When sorting by title, first the field sorttitle is considered. Similarly, when sorting by year, the field sortyear is
#' first considered.
#'
#' When sorting by volume, if the field is present it is padded to four digits with leading zeros; otherwise,
#' the string \dQuote{0000} is used.
#'
#' When sorting by alphabetic label, the labels that would be generating with the \dQuote{alphabetic} bibstyle are used.
#' First the shorthand field is considered, then label, then shortauthor, shorteditor, author, editor, and translator.
#' Refer to the BibLaTeX manual Sections 3.1.2.1 and 3.5 and Appendix C.2 for more information.
#' @references Lehman, Philipp and Kime, Philip and Boruvka, Audrey and Wright, J. (2013). The biblatex Package. \url{http://mirrors.ctan.org/macros/latex/contrib/biblatex/doc/biblatex.pdf}.
#' @seealso \code{\link{BibEntry}}, \code{\link{print.BibEntry}}, \code{\link{order}}
#' @importFrom tools bibstyle getBibstyle
#' @examples
#' file.name <- system.file("Bib", "biblatexExamples.bib", package="RefManageR")
#' bib <- suppressMessages(ReadBib(file.name)[[70:73]])
#' BibOptions(sorting = "none")
#' bib
#' sort(bib, sorting = "nyt")
#' sort(bib, sorting = "ynt")
#' BibOptions(restore.defaults = TRUE)
sort.BibEntry <- function(x, decreasing = FALSE, sorting = BibOptions()$sorting,
.bibstyle = BibOptions()$bib.style, ...){
if (is.null(sorting))
sorting <- "nty"
if (sorting == 'debug' || .bibstyle == 'draft')
return(x[order(names(x))])
if (sorting != "none" || .bibstyle == "alphabetic"){
aut <- MakeBibLaTeX()$sortKeys(x)
yr <- MakeBibLaTeX()$sortKeysY(x)
ps <- MakeBibLaTeX()$sortKeysPS(x)
ttl <- MakeBibLaTeX()$sortKeysT(x)
if (sorting %in% c('nyvt', 'anyvt'))
vol <- MakeBibLaTeX()$sortKeysV(x)
}
if (.bibstyle == 'alphabetic' || sorting == 'anyt' || sorting == 'anyvt')
alabs <- MakeBibLaTeX()$sortKeysLA(x, yr)
if (sorting != "none"){
ord <- switch(sorting, nyt = order(ps, aut, yr, ttl, decreasing = decreasing),
nyvt = order(ps, aut, yr, vol, ttl, decreasing = decreasing),
anyt = order(ps, alabs, aut, yr, ttl, decreasing = decreasing),
anyvt = order(ps, alabs, aut, yr, vol, ttl, decreasing = decreasing),
ynt = order(ps, yr, aut, ttl, decreasing = decreasing),
ydnt = order(ps, -as.numeric(yr), aut, ttl, decreasing = decreasing),
order(ps, aut, ttl, yr, decreasing = decreasing)) # DEFAULT = nty
suppressWarnings(x <- x[ord])
aut <- aut[ord]
if (.bibstyle == "alphabetic"){
lab.ord <- order(alabs)
alabs[lab.ord] <- paste0(alabs[lab.ord], unlist(lapply(rle(alabs[lab.ord])$len,
function(x){
if (x == 1)
''
else letters[seq_len(x)]
})))
alabs <- alabs[ord]
}
}
# create labels if needed
if (hasArg(return.labs) && !length(unlist(x$.index))){
if (.bibstyle %in% c("authoryear", "authortitle")){
if (sorting == "none")
aut <- MakeBibLaTeX()$sortKeys(x)
suppressWarnings({
ind <- nchar(aut) == 0L & !x$bibtype %in% c("XData", "Set")
aut[ind] <- x$title[ind]
x$.duplicated <- duplicated(aut)
})
if (.bibstyle == "authoryear"){
tmp <- MakeAuthorYear()$GetLastNames(x)
# sortyear could mess things up, so can't reuse yr
yr <- sapply(unclass(x), function(dat)
tryCatch(year(attr(dat, "dateobj")), error = function(e) ""))
tmp <- paste0(tmp, yr)
lab.ord <- order(tmp)
alabs <- character(length(x))
alabs[lab.ord] <- unlist(lapply(rle(tmp[lab.ord])$len,
function(x){
if (x == 1)
''
else letters[seq_len(x)]
}))
}
}
suppressWarnings(x$.index <- switch(.bibstyle, numeric = {
ind <- which(!unlist(x$bibtype) %in% c('Set', 'XData'))
index <- numeric(length(x))
index[ind] <- seq_along(ind)
index
}, alphabetic = alabs, authoryear = alabs, NULL))
}
x
}
|
/RefManageR/R/09sort.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 6,011
|
r
|
#' Sort a BibEntry Object
#'
#' Sorts a \code{BibEntry} object by specified fields. The possible fields used for sorting and
#' the order they are used in correspond with the options avaiable in BibLaTeX.
#'
#' @param x an object of class BibEntry
#' @param decreasing logical; should the sort be increasing or decreasing?
#' @param sorting sort method to use, see \bold{Details}.
#' @param .bibstyle bibliography style; used when \code{sort} is called by \code{\link{print.BibEntry}}
#' @param ... internal use only
#' @return the sorted BibEntry object
#' @method sort BibEntry
#' @export
#' @keywords manip methods
#' @details The possible values for argument \code{sorting} are
#' \itemize{
#' \item nty - sort by name, then by title, then by year
#' \item nyt - sort by name, then by year, then title
#' \item nyvt - sort by name, year, volume, title
#' \item anyt - sort by alphabetic label, name, year, title
#' \item anyvt - sort by alphabetic label, name, year, volume, title
#' \item ynt - sort by year, name, title
#' \item ydnt - sort by year (descending), name, title
#' \item debug - sort by keys
#' \item none - no sorting is performed
#' }
#'
#' All sorting methods first consider the field presort, if available. Entries with no presort field are assigned presort
#' value \dQuote{mm}. Next the sortkey field is used.
#'
#' When sorting by name, the sortname field is used first. If it is not present, the author field is used,
#' if that is not present editor is used, and if that is not present translator is used. All of these fields are affected
#' by the value of \code{max.names} in .BibOptions()$max.names.
#'
#' When sorting by title, first the field sorttitle is considered. Similarly, when sorting by year, the field sortyear is
#' first considered.
#'
#' When sorting by volume, if the field is present it is padded to four digits with leading zeros; otherwise,
#' the string \dQuote{0000} is used.
#'
#' When sorting by alphabetic label, the labels that would be generating with the \dQuote{alphabetic} bibstyle are used.
#' First the shorthand field is considered, then label, then shortauthor, shorteditor, author, editor, and translator.
#' Refer to the BibLaTeX manual Sections 3.1.2.1 and 3.5 and Appendix C.2 for more information.
#' @references Lehman, Philipp and Kime, Philip and Boruvka, Audrey and Wright, J. (2013). The biblatex Package. \url{http://mirrors.ctan.org/macros/latex/contrib/biblatex/doc/biblatex.pdf}.
#' @seealso \code{\link{BibEntry}}, \code{\link{print.BibEntry}}, \code{\link{order}}
#' @importFrom tools bibstyle getBibstyle
#' @examples
#' file.name <- system.file("Bib", "biblatexExamples.bib", package="RefManageR")
#' bib <- suppressMessages(ReadBib(file.name)[[70:73]])
#' BibOptions(sorting = "none")
#' bib
#' sort(bib, sorting = "nyt")
#' sort(bib, sorting = "ynt")
#' BibOptions(restore.defaults = TRUE)
sort.BibEntry <- function(x, decreasing = FALSE, sorting = BibOptions()$sorting,
.bibstyle = BibOptions()$bib.style, ...){
if (is.null(sorting))
sorting <- "nty"
if (sorting == 'debug' || .bibstyle == 'draft')
return(x[order(names(x))])
if (sorting != "none" || .bibstyle == "alphabetic"){
aut <- MakeBibLaTeX()$sortKeys(x)
yr <- MakeBibLaTeX()$sortKeysY(x)
ps <- MakeBibLaTeX()$sortKeysPS(x)
ttl <- MakeBibLaTeX()$sortKeysT(x)
if (sorting %in% c('nyvt', 'anyvt'))
vol <- MakeBibLaTeX()$sortKeysV(x)
}
if (.bibstyle == 'alphabetic' || sorting == 'anyt' || sorting == 'anyvt')
alabs <- MakeBibLaTeX()$sortKeysLA(x, yr)
if (sorting != "none"){
ord <- switch(sorting, nyt = order(ps, aut, yr, ttl, decreasing = decreasing),
nyvt = order(ps, aut, yr, vol, ttl, decreasing = decreasing),
anyt = order(ps, alabs, aut, yr, ttl, decreasing = decreasing),
anyvt = order(ps, alabs, aut, yr, vol, ttl, decreasing = decreasing),
ynt = order(ps, yr, aut, ttl, decreasing = decreasing),
ydnt = order(ps, -as.numeric(yr), aut, ttl, decreasing = decreasing),
order(ps, aut, ttl, yr, decreasing = decreasing)) # DEFAULT = nty
suppressWarnings(x <- x[ord])
aut <- aut[ord]
if (.bibstyle == "alphabetic"){
lab.ord <- order(alabs)
alabs[lab.ord] <- paste0(alabs[lab.ord], unlist(lapply(rle(alabs[lab.ord])$len,
function(x){
if (x == 1)
''
else letters[seq_len(x)]
})))
alabs <- alabs[ord]
}
}
# create labels if needed
if (hasArg(return.labs) && !length(unlist(x$.index))){
if (.bibstyle %in% c("authoryear", "authortitle")){
if (sorting == "none")
aut <- MakeBibLaTeX()$sortKeys(x)
suppressWarnings({
ind <- nchar(aut) == 0L & !x$bibtype %in% c("XData", "Set")
aut[ind] <- x$title[ind]
x$.duplicated <- duplicated(aut)
})
if (.bibstyle == "authoryear"){
tmp <- MakeAuthorYear()$GetLastNames(x)
# sortyear could mess things up, so can't reuse yr
yr <- sapply(unclass(x), function(dat)
tryCatch(year(attr(dat, "dateobj")), error = function(e) ""))
tmp <- paste0(tmp, yr)
lab.ord <- order(tmp)
alabs <- character(length(x))
alabs[lab.ord] <- unlist(lapply(rle(tmp[lab.ord])$len,
function(x){
if (x == 1)
''
else letters[seq_len(x)]
}))
}
}
suppressWarnings(x$.index <- switch(.bibstyle, numeric = {
ind <- which(!unlist(x$bibtype) %in% c('Set', 'XData'))
index <- numeric(length(x))
index[ind] <- seq_along(ind)
index
}, alphabetic = alabs, authoryear = alabs, NULL))
}
x
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/break.R
\name{broken.default}
\alias{broken.default}
\title{Create the model agnostic broken object}
\usage{
\method{broken}{default}(model, new_observation, data, direction = "up", ...,
baseline = 0, predict.function = predict)
}
\arguments{
\item{model}{a ranger model}
\item{new_observation}{a new observation with collumns that corresponds to variables used in the model}
\item{data}{the original data used for model fitting, should have same collumns as the 'new_observation'.}
\item{direction}{either 'up' or 'down' determined the exploration strategy}
\item{...}{other parameters}
\item{baseline}{the orgin/baseline for the breakDown plots, where the rectangles start. It may be a number or a character "Intercept". In the latter case the orgin will be set to model intercept.}
\item{predict.function}{function that will calculate predictions out of model. It shall return a single numeric value per observation. For classification it may be a probability of the default class.}
}
\value{
an object of the broken class
}
\description{
Create the model agnostic broken object
}
\examples{
library(breakDown)
library(randomForest)
library(ggplot2)
set.seed(1313)
model <- randomForest(factor(left)~., data = HR_data, family = "binomial", maxnodes = 5)
predict.function <- function(model, new_observation)
predict(model, new_observation, type="prob")[,2]
predict.function(model, HR_data[11,-7])
explain_1 <- broken(model, HR_data[11,-7], data = HR_data[,-7],
predict.function = predict.function, direction = "down")
explain_1
plot(explain_1) + ggtitle("breakDown plot (direction=down) for randomForest model")
}
|
/man/broken.default.Rd
|
no_license
|
shan-huang/breakDown
|
R
| false
| true
| 1,709
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/break.R
\name{broken.default}
\alias{broken.default}
\title{Create the model agnostic broken object}
\usage{
\method{broken}{default}(model, new_observation, data, direction = "up", ...,
baseline = 0, predict.function = predict)
}
\arguments{
\item{model}{a ranger model}
\item{new_observation}{a new observation with collumns that corresponds to variables used in the model}
\item{data}{the original data used for model fitting, should have same collumns as the 'new_observation'.}
\item{direction}{either 'up' or 'down' determined the exploration strategy}
\item{...}{other parameters}
\item{baseline}{the orgin/baseline for the breakDown plots, where the rectangles start. It may be a number or a character "Intercept". In the latter case the orgin will be set to model intercept.}
\item{predict.function}{function that will calculate predictions out of model. It shall return a single numeric value per observation. For classification it may be a probability of the default class.}
}
\value{
an object of the broken class
}
\description{
Create the model agnostic broken object
}
\examples{
library(breakDown)
library(randomForest)
library(ggplot2)
set.seed(1313)
model <- randomForest(factor(left)~., data = HR_data, family = "binomial", maxnodes = 5)
predict.function <- function(model, new_observation)
predict(model, new_observation, type="prob")[,2]
predict.function(model, HR_data[11,-7])
explain_1 <- broken(model, HR_data[11,-7], data = HR_data[,-7],
predict.function = predict.function, direction = "down")
explain_1
plot(explain_1) + ggtitle("breakDown plot (direction=down) for randomForest model")
}
|
.RfigletEnv <- new.env()
.__Rfiglet_fonts <- c(
"3-d", "3x5", "4max", "5lineoblique", "acrobatic", "alligator2",
"alligator", "alphabet", "arrows", "avatar", "B1FF", "banner3-D",
"banner3", "banner4", "banner", "barbwire", "basic", "bell",
"bigchief", "bigfig", "big", "binary", "block", "bolger", "bright",
"broadway", "bubble", "bulbhead", "calgphy2", "caligraphy",
"catwalk", "chunky", "coinstak", "colossal", "computer", "contessa",
"contrast", "cosmic", "crawford", "cyberlarge", "cybermedium",
"cybersmall", "diamond", "digital", "doh", "doom", "dotmatrix",
"double", "drpepper", "eftifont", "eftipiti", "eftirobot",
"eftitalic", "eftiwall", "epic", "fender", "fourtops", "fraktur",
"fuzzy", "goofy", "gothic", "gradient", "graffiti", "hex",
"hollywood", "invita", "isometric1", "isometric2", "isometric3",
"isometric4", "italic", "ivrit", "jazmine", "jerusalem", "katakana",
"kban", "keyboard", "larry3d", "lcd", "lean", "linux", "lockergnome",
"madrid", "marquee", "mike", "mini", "mirror", "mnemonic", "morse",
"moscow", "nancyj-fancy", "nancyj", "nancyj-improved",
"nancyj-underline", "nipples", "ntgreek", "o8", "ogre", "oldbanner",
"os2", "pawp", "peaks", "pebbles", "pepper", "poison", "puffy",
"pyramid", "rectangles", "rev", "roman", "rot13", "rounded",
"rowancap", "rozzo", "runic", "sblood", "script", "serifcap",
"shadow", "shimrod", "short", "slant", "slide", "slscript", "small",
"smisome1", "smkeyboard", "smpoison", "smscript", "smshadow",
"smslant", "speed", "s-relief", "stacey", "stampatello", "standard",
"starwars", "stellar", "stop", "straight", "tanja", "term", "thick",
"thin", "threepoint", "ticks", "ticksslant", "tiles", "tinker-toy",
"tombstone", "trek", "tubular", "twopoint", "univers", "usaflag",
"weird")
|
/R/fonts.r
|
permissive
|
talgalili/Rfiglet
|
R
| false
| false
| 1,800
|
r
|
.RfigletEnv <- new.env()
.__Rfiglet_fonts <- c(
"3-d", "3x5", "4max", "5lineoblique", "acrobatic", "alligator2",
"alligator", "alphabet", "arrows", "avatar", "B1FF", "banner3-D",
"banner3", "banner4", "banner", "barbwire", "basic", "bell",
"bigchief", "bigfig", "big", "binary", "block", "bolger", "bright",
"broadway", "bubble", "bulbhead", "calgphy2", "caligraphy",
"catwalk", "chunky", "coinstak", "colossal", "computer", "contessa",
"contrast", "cosmic", "crawford", "cyberlarge", "cybermedium",
"cybersmall", "diamond", "digital", "doh", "doom", "dotmatrix",
"double", "drpepper", "eftifont", "eftipiti", "eftirobot",
"eftitalic", "eftiwall", "epic", "fender", "fourtops", "fraktur",
"fuzzy", "goofy", "gothic", "gradient", "graffiti", "hex",
"hollywood", "invita", "isometric1", "isometric2", "isometric3",
"isometric4", "italic", "ivrit", "jazmine", "jerusalem", "katakana",
"kban", "keyboard", "larry3d", "lcd", "lean", "linux", "lockergnome",
"madrid", "marquee", "mike", "mini", "mirror", "mnemonic", "morse",
"moscow", "nancyj-fancy", "nancyj", "nancyj-improved",
"nancyj-underline", "nipples", "ntgreek", "o8", "ogre", "oldbanner",
"os2", "pawp", "peaks", "pebbles", "pepper", "poison", "puffy",
"pyramid", "rectangles", "rev", "roman", "rot13", "rounded",
"rowancap", "rozzo", "runic", "sblood", "script", "serifcap",
"shadow", "shimrod", "short", "slant", "slide", "slscript", "small",
"smisome1", "smkeyboard", "smpoison", "smscript", "smshadow",
"smslant", "speed", "s-relief", "stacey", "stampatello", "standard",
"starwars", "stellar", "stop", "straight", "tanja", "term", "thick",
"thin", "threepoint", "ticks", "ticksslant", "tiles", "tinker-toy",
"tombstone", "trek", "tubular", "twopoint", "univers", "usaflag",
"weird")
|
library(ggplot2)
library(lme4)
library(hydroGOF)
library(dplyr)
source("../results/helpers.R")
setwd("~/git/arabic_adjectives/experiments/2-order-preference-expanded/Submiterator-master")
num_round_dirs = 15
df1 = do.call(rbind, lapply(1:num_round_dirs, function(i) {
return (read.csv(paste(
'round', i, '/arabic-order-expanded.csv', sep=''),stringsAsFactors=FALSE) %>%
mutate(workerid = (workerid + (i-1)*9)))}))
df1$workerid = paste("vi.",df1$workerid)
d1 = subset(df1, select=c("workerid","noun","gender","nounclass","slide_number", "predicate1", "predicate2", "class1","class2","response","language","comments","asses","gender.1","test1","test2","test3","dialect","lived","describe","years","proficiency"))
d <- d1
# got all the test questions correct
d = d[d$test1=="correct"&d$test2=="correct"&d$test3=="correct",]
# lived more than 5 years both before and after age 8 in arabic country
d = d[d$lived=="both"&d$years=="5+",]
# describe as arabic-arabic
d = d[d$describe=="arabic-arabic",]
unique(d$language)
d = d[d$language != "البلوشية، العربية، الانجليزيه"&d$language!="",]
#d = d[d$asses=="Yes",]
length(unique(d$workerid)) #n=24
table(d$dialect)
t <- d
#####
## duplicate observations by first predicate
#####
library(tidyr)
o <- t
o$rightpredicate1 = o$predicate2
o$rightpredicate2 = o$predicate1
o$rightresponse = 1-o$response
agr = o %>%
select(predicate1,rightpredicate1,response,rightresponse,workerid,noun,nounclass,class1,class2) %>%
gather(predicateposition,predicate,predicate1:rightpredicate1,-workerid,-noun,-nounclass,-class1,-class2)
agr$correctresponse = agr$response
agr[agr$predicateposition == "rightpredicate1",]$correctresponse = agr[agr$predicateposition == "rightpredicate1",]$rightresponse
agr$correctclass = agr$class1
agr[agr$predicateposition == "rightpredicate1",]$correctclass = agr[agr$predicateposition == "rightpredicate1",]$class2
head(agr[agr$predicateposition == "rightpredicate1",])
agr$response = NULL
agr$rightresponse = NULL
agr$class1 = NULL
agr$class2 = NULL
nrow(agr) #XXX
#write.csv(agr,"~/git/arabic_adjectives/experiments/2-order-preference-expanded/results/arabic-naturalness-duplicated.csv")
agr$correctresponse = 1 - agr$correctresponse
agr = agr[!is.na(agr$correctresponse),]
adj_agr = aggregate(correctresponse~predicate*correctclass,FUN=mean,data=agr)
adj_agr
class_agr = aggregate(correctresponse~correctclass,FUN=mean,data=agr)
class_s = bootsSummary(data=agr, measurevar="correctresponse", groupvars=c("correctclass"))
#write.csv(class_s,"../results/tagalog_class_s.csv")
ggplot(data=class_s,aes(x=reorder(correctclass,-correctresponse,mean),y=correctresponse))+
geom_bar(stat="identity",fill="lightgray",color="black")+
geom_errorbar(aes(ymin=bootsci_low, ymax=bootsci_high, x=reorder(correctclass,-correctresponse,mean), width=0.1))+
geom_hline(yintercept=0.5,linetype="dashed") +
xlab("\nadjective class")+
ylab("preferred distance from noun\n")+
ylim(0,1)+
#labs("order\npreference")+
theme_bw()#+
#theme(axis.text.x=element_text(angle=90,vjust=0.35,hjust=1))
#ggsave("../results/class_distance.pdf",height=3)
#ggsave("../results/LSA_class_distance.png",height=2,width=4.3)
#ggsave("../results/arabic-ordering.pdf",height=2.5,width=7)
#ggsave("../results/arabic-distance.pdf",height=2.5)
|
/experiments/2-order-preference-expanded/results/analysis.R
|
no_license
|
gscontras/arabic_adjectives
|
R
| false
| false
| 3,336
|
r
|
library(ggplot2)
library(lme4)
library(hydroGOF)
library(dplyr)
source("../results/helpers.R")
setwd("~/git/arabic_adjectives/experiments/2-order-preference-expanded/Submiterator-master")
num_round_dirs = 15
df1 = do.call(rbind, lapply(1:num_round_dirs, function(i) {
return (read.csv(paste(
'round', i, '/arabic-order-expanded.csv', sep=''),stringsAsFactors=FALSE) %>%
mutate(workerid = (workerid + (i-1)*9)))}))
df1$workerid = paste("vi.",df1$workerid)
d1 = subset(df1, select=c("workerid","noun","gender","nounclass","slide_number", "predicate1", "predicate2", "class1","class2","response","language","comments","asses","gender.1","test1","test2","test3","dialect","lived","describe","years","proficiency"))
d <- d1
# got all the test questions correct
d = d[d$test1=="correct"&d$test2=="correct"&d$test3=="correct",]
# lived more than 5 years both before and after age 8 in arabic country
d = d[d$lived=="both"&d$years=="5+",]
# describe as arabic-arabic
d = d[d$describe=="arabic-arabic",]
unique(d$language)
d = d[d$language != "البلوشية، العربية، الانجليزيه"&d$language!="",]
#d = d[d$asses=="Yes",]
length(unique(d$workerid)) #n=24
table(d$dialect)
t <- d
#####
## duplicate observations by first predicate
#####
library(tidyr)
o <- t
o$rightpredicate1 = o$predicate2
o$rightpredicate2 = o$predicate1
o$rightresponse = 1-o$response
agr = o %>%
select(predicate1,rightpredicate1,response,rightresponse,workerid,noun,nounclass,class1,class2) %>%
gather(predicateposition,predicate,predicate1:rightpredicate1,-workerid,-noun,-nounclass,-class1,-class2)
agr$correctresponse = agr$response
agr[agr$predicateposition == "rightpredicate1",]$correctresponse = agr[agr$predicateposition == "rightpredicate1",]$rightresponse
agr$correctclass = agr$class1
agr[agr$predicateposition == "rightpredicate1",]$correctclass = agr[agr$predicateposition == "rightpredicate1",]$class2
head(agr[agr$predicateposition == "rightpredicate1",])
agr$response = NULL
agr$rightresponse = NULL
agr$class1 = NULL
agr$class2 = NULL
nrow(agr) #XXX
#write.csv(agr,"~/git/arabic_adjectives/experiments/2-order-preference-expanded/results/arabic-naturalness-duplicated.csv")
agr$correctresponse = 1 - agr$correctresponse
agr = agr[!is.na(agr$correctresponse),]
adj_agr = aggregate(correctresponse~predicate*correctclass,FUN=mean,data=agr)
adj_agr
class_agr = aggregate(correctresponse~correctclass,FUN=mean,data=agr)
class_s = bootsSummary(data=agr, measurevar="correctresponse", groupvars=c("correctclass"))
#write.csv(class_s,"../results/tagalog_class_s.csv")
ggplot(data=class_s,aes(x=reorder(correctclass,-correctresponse,mean),y=correctresponse))+
geom_bar(stat="identity",fill="lightgray",color="black")+
geom_errorbar(aes(ymin=bootsci_low, ymax=bootsci_high, x=reorder(correctclass,-correctresponse,mean), width=0.1))+
geom_hline(yintercept=0.5,linetype="dashed") +
xlab("\nadjective class")+
ylab("preferred distance from noun\n")+
ylim(0,1)+
#labs("order\npreference")+
theme_bw()#+
#theme(axis.text.x=element_text(angle=90,vjust=0.35,hjust=1))
#ggsave("../results/class_distance.pdf",height=3)
#ggsave("../results/LSA_class_distance.png",height=2,width=4.3)
#ggsave("../results/arabic-ordering.pdf",height=2.5,width=7)
#ggsave("../results/arabic-distance.pdf",height=2.5)
|
context("RSEC")
test_that("`RSEC` works with matrix, ClusterExperiment, summarizedExperiment",{
##these examples don't do dendrogram/merge because all -1 after combineMany
##only tests clusterMany, combineMany parts.
##so can't do expect_silent, because returns NOTE about that issue.
expect_message(rsecOut1<-RSEC(x=mat, isCount=FALSE,reduceMethod="none",k0s=4:5,
clusterFunction="tight", alphas=0.1,dendroReduce="none",
subsampleArgs=list(resamp.num=5),random.seed=495
),"makeDendrogram encountered following error")
expect_message(rsecOut2<-RSEC(x=cc, reduceMethod="none",k0s=4:5,
clusterFunction="tight", alphas=0.1,dendroReduce="none",
subsampleArgs=list(resamp.num=5),random.seed=495
),"makeDendrogram encountered following error")
expect_message(rsecOut3<-RSEC(x=ccSE,
reduceMethod="none",k0s=4:5,clusterFunction="tight",
alphas=0.1,dendroReduce="none",
subsampleArgs=list(resamp.num=5),random.seed=495),
"makeDendrogram encountered following error")
expect_message(rsecOut4<-RSEC(x=se,isCount=FALSE,reduceMethod="none",
k0s=4:5,clusterFunction="tight", alphas=0.1,dendroReduce="none",
subsampleArgs=list(resamp.num=5),random.seed=495),
"makeDendrogram encountered following error")
#test rerunClusterMany argument:
expect_message(rsecOut5<-RSEC(rsecOut2,reduceMethod="none",
k0s=4:5,clusterFunction="tight", alphas=0.1,dendroReduce="none",rerunClusterMany=TRUE,
subsampleArgs=list(resamp.num=5),random.seed=495),
"makeDendrogram encountered following error")
#makes dendrogram so important have here so has to catch defaults of RSEC...
expect_message(rsecOut6<-RSEC(rsecOut2,
reduceMethod="none",k0s=4:5,clusterFunction="tight",
alphas=0.1,dendroReduce="none",rerunClusterMany=FALSE,
subsampleArgs=list(resamp.num=5),random.seed=495),
"makeDendrogram encountered following error")
})
test_that("`RSEC` works through whole series of steps",{
#bigger example where actually goes through all the steps, takes some time:
expect_message(rsecOut<-RSEC(x=assay(seSimCount), isCount=TRUE,reduceMethod="none",
k0s=4:5,clusterFunction="tight", alphas=0.1,
betas=0.9,dendroReduce="none",minSizes=1,
subsampleArgs=list(resamp.num=5),random.seed=495),
"Merging will be done on")
expect_silent(ceOut<-clusterMany(x=assay(seSimCount),ks=4:5,clusterFunction="tight",alphas=0.1,betas=0.9,minSizes=1,
isCount=TRUE, reduceMethod="none", transFun = NULL,
sequential=TRUE,removeSil=FALSE,subsample=TRUE,silCutoff=0,distFunction=NA,
nFilterDims=NA,nReducedDims=NA,
mainClusterArgs=NULL,subsampleArgs=list(resamp.num=5),
ncores=1,run=TRUE,seqArgs=list(verbose=FALSE),random.seed=495
))
expect_equal(clusterMatrix(rsecOut,whichClusters="clusterMany"),clusterMatrix(ceOut))
expect_message(combOut<-combineMany(ceOut, proportion = 0.7,minSize = 5),"no clusters specified to combine")
expect_equal(clusterMatrix(rsecOut,whichClusters="combineMany"),clusterMatrix(combOut,whichClusters="combineMany"))
expect_equal(coClustering(rsecOut),coClustering(combOut))
expect_silent(dendOut<-makeDendrogram(combOut,reduceMethod="none",nDims=NA))
expect_equal(dendOut@dendro_clusters,rsecOut@dendro_clusters)
expect_equal(dendOut@dendro_outbranch,rsecOut@dendro_outbranch)
#now should be the same, check all objects except dendro_samples because very big:
mergeOut<-mergeClusters(dendOut,mergeMethod = "adjP", cutoff = 0.05,isCount=TRUE)
expect_equal(dendroClusterIndex(mergeOut),dendroClusterIndex(rsecOut))
expect_equal(mergeOut@dendro_clusters,rsecOut@dendro_clusters)
expect_equal(mergeOut@dendro_outbranch,rsecOut@dendro_outbranch)
expect_equal(coClustering(mergeOut),coClustering(rsecOut))
expect_equal(clusterMatrix(rsecOut,whichClusters="mergeClusters"), clusterMatrix(mergeOut,whichClusters="mergeClusters"))
expect_equal(clusterTypes(rsecOut),clusterTypes(mergeOut))
})
test_that("`RSEC` works with no merging",{
#bigger example where actually goes through all the steps (above skips the merging, in particular, because no dendrogram); takes some time:
rsecOut<-RSEC(x=assay(seSimCount), isCount=TRUE,reduceMethod="none",
k0s=4:5,clusterFunction="tight", alphas=0.1,
betas=0.9,dendroReduce="none",minSizes=1,
subsampleArgs=list(resamp.num=5),random.seed=495,
mergeMethod="none")
})
test_that("`RSEC` returns clusterMany even when errors later",{
#error in combineMany param
expect_message(rsecOut1<-RSEC(x=mat, isCount=FALSE,k0s=4:5,
clusterFunction="tight", alphas=0.1, nReducedDims=3,
subsampleArgs=list(resamp.num=5),random.seed=495, combineProportion = -1, combineMinSize = 5),"Invalid value for the 'proportion' parameter"
)
expect_true("clusterMany" %in% clusterTypes(rsecOut1))
#error in dendro param
expect_message(rsecOut2<-RSEC(x=mat, isCount=FALSE,k0s=4:5,
clusterFunction="tight", alphas=0.1, nReducedDims=3,
subsampleArgs=list(resamp.num=5),random.seed=495,
dendroReduce="myfakemethod"
),"does not contain the given 'reduceMethod' value")
expect_true(all(c("clusterMany","combineMany") %in% clusterTypes(rsecOut2)))
#error in merging -- have to get one where can make dendrogram... takes longer.
expect_message(rsecOut3<-RSEC(x=assay(seSimCount[sample(size=50,x=1:nrow(seSimCount)),]), isCount=TRUE,reduceMethod="none",
k0s=4:5,clusterFunction="pam", alphas=0.1,
betas=0.9,dendroReduce="none",minSizes=1,
subsampleArgs=list(resamp.num=5),random.seed=495,
mergeMethod="fakeMerge"
),"mergeClusters encountered following error")
expect_true(all(c("clusterMany","combineMany") %in% clusterTypes(rsecOut3)))
})
|
/tests/testthat/test_RSEC.R
|
no_license
|
kevinbenac/clusterExperiment
|
R
| false
| false
| 5,782
|
r
|
context("RSEC")
test_that("`RSEC` works with matrix, ClusterExperiment, summarizedExperiment",{
##these examples don't do dendrogram/merge because all -1 after combineMany
##only tests clusterMany, combineMany parts.
##so can't do expect_silent, because returns NOTE about that issue.
expect_message(rsecOut1<-RSEC(x=mat, isCount=FALSE,reduceMethod="none",k0s=4:5,
clusterFunction="tight", alphas=0.1,dendroReduce="none",
subsampleArgs=list(resamp.num=5),random.seed=495
),"makeDendrogram encountered following error")
expect_message(rsecOut2<-RSEC(x=cc, reduceMethod="none",k0s=4:5,
clusterFunction="tight", alphas=0.1,dendroReduce="none",
subsampleArgs=list(resamp.num=5),random.seed=495
),"makeDendrogram encountered following error")
expect_message(rsecOut3<-RSEC(x=ccSE,
reduceMethod="none",k0s=4:5,clusterFunction="tight",
alphas=0.1,dendroReduce="none",
subsampleArgs=list(resamp.num=5),random.seed=495),
"makeDendrogram encountered following error")
expect_message(rsecOut4<-RSEC(x=se,isCount=FALSE,reduceMethod="none",
k0s=4:5,clusterFunction="tight", alphas=0.1,dendroReduce="none",
subsampleArgs=list(resamp.num=5),random.seed=495),
"makeDendrogram encountered following error")
#test rerunClusterMany argument:
expect_message(rsecOut5<-RSEC(rsecOut2,reduceMethod="none",
k0s=4:5,clusterFunction="tight", alphas=0.1,dendroReduce="none",rerunClusterMany=TRUE,
subsampleArgs=list(resamp.num=5),random.seed=495),
"makeDendrogram encountered following error")
#makes dendrogram so important have here so has to catch defaults of RSEC...
expect_message(rsecOut6<-RSEC(rsecOut2,
reduceMethod="none",k0s=4:5,clusterFunction="tight",
alphas=0.1,dendroReduce="none",rerunClusterMany=FALSE,
subsampleArgs=list(resamp.num=5),random.seed=495),
"makeDendrogram encountered following error")
})
test_that("`RSEC` works through whole series of steps",{
#bigger example where actually goes through all the steps, takes some time:
expect_message(rsecOut<-RSEC(x=assay(seSimCount), isCount=TRUE,reduceMethod="none",
k0s=4:5,clusterFunction="tight", alphas=0.1,
betas=0.9,dendroReduce="none",minSizes=1,
subsampleArgs=list(resamp.num=5),random.seed=495),
"Merging will be done on")
expect_silent(ceOut<-clusterMany(x=assay(seSimCount),ks=4:5,clusterFunction="tight",alphas=0.1,betas=0.9,minSizes=1,
isCount=TRUE, reduceMethod="none", transFun = NULL,
sequential=TRUE,removeSil=FALSE,subsample=TRUE,silCutoff=0,distFunction=NA,
nFilterDims=NA,nReducedDims=NA,
mainClusterArgs=NULL,subsampleArgs=list(resamp.num=5),
ncores=1,run=TRUE,seqArgs=list(verbose=FALSE),random.seed=495
))
expect_equal(clusterMatrix(rsecOut,whichClusters="clusterMany"),clusterMatrix(ceOut))
expect_message(combOut<-combineMany(ceOut, proportion = 0.7,minSize = 5),"no clusters specified to combine")
expect_equal(clusterMatrix(rsecOut,whichClusters="combineMany"),clusterMatrix(combOut,whichClusters="combineMany"))
expect_equal(coClustering(rsecOut),coClustering(combOut))
expect_silent(dendOut<-makeDendrogram(combOut,reduceMethod="none",nDims=NA))
expect_equal(dendOut@dendro_clusters,rsecOut@dendro_clusters)
expect_equal(dendOut@dendro_outbranch,rsecOut@dendro_outbranch)
#now should be the same, check all objects except dendro_samples because very big:
mergeOut<-mergeClusters(dendOut,mergeMethod = "adjP", cutoff = 0.05,isCount=TRUE)
expect_equal(dendroClusterIndex(mergeOut),dendroClusterIndex(rsecOut))
expect_equal(mergeOut@dendro_clusters,rsecOut@dendro_clusters)
expect_equal(mergeOut@dendro_outbranch,rsecOut@dendro_outbranch)
expect_equal(coClustering(mergeOut),coClustering(rsecOut))
expect_equal(clusterMatrix(rsecOut,whichClusters="mergeClusters"), clusterMatrix(mergeOut,whichClusters="mergeClusters"))
expect_equal(clusterTypes(rsecOut),clusterTypes(mergeOut))
})
test_that("`RSEC` works with no merging",{
#bigger example where actually goes through all the steps (above skips the merging, in particular, because no dendrogram); takes some time:
rsecOut<-RSEC(x=assay(seSimCount), isCount=TRUE,reduceMethod="none",
k0s=4:5,clusterFunction="tight", alphas=0.1,
betas=0.9,dendroReduce="none",minSizes=1,
subsampleArgs=list(resamp.num=5),random.seed=495,
mergeMethod="none")
})
test_that("`RSEC` returns clusterMany even when errors later",{
#error in combineMany param
expect_message(rsecOut1<-RSEC(x=mat, isCount=FALSE,k0s=4:5,
clusterFunction="tight", alphas=0.1, nReducedDims=3,
subsampleArgs=list(resamp.num=5),random.seed=495, combineProportion = -1, combineMinSize = 5),"Invalid value for the 'proportion' parameter"
)
expect_true("clusterMany" %in% clusterTypes(rsecOut1))
#error in dendro param
expect_message(rsecOut2<-RSEC(x=mat, isCount=FALSE,k0s=4:5,
clusterFunction="tight", alphas=0.1, nReducedDims=3,
subsampleArgs=list(resamp.num=5),random.seed=495,
dendroReduce="myfakemethod"
),"does not contain the given 'reduceMethod' value")
expect_true(all(c("clusterMany","combineMany") %in% clusterTypes(rsecOut2)))
#error in merging -- have to get one where can make dendrogram... takes longer.
expect_message(rsecOut3<-RSEC(x=assay(seSimCount[sample(size=50,x=1:nrow(seSimCount)),]), isCount=TRUE,reduceMethod="none",
k0s=4:5,clusterFunction="pam", alphas=0.1,
betas=0.9,dendroReduce="none",minSizes=1,
subsampleArgs=list(resamp.num=5),random.seed=495,
mergeMethod="fakeMerge"
),"mergeClusters encountered following error")
expect_true(all(c("clusterMany","combineMany") %in% clusterTypes(rsecOut3)))
})
|
##################Answer
# A. Use R to create a scatter plot and add the regression line.
library(readxl)
# Import data from an Excel workbook
COMNODE <- read_excel("C:\\Users\\Zack\\Documents\\Spring 2021\\MATH 212\\Data Sets\\COMNODE3.xlsx", sheet=1)
# Open the book CD file and save as "xlsx" in your folder
# View Data
head(COMNODE)
# Scatter plot
plot(COMNODE$NUMPORTS, COMNODE$COST, pch = 16, cex = 1.3, col = "blue", main = "Cost against Number of ports", xlab = "Number of ports", ylab = "Cost($)")
# Create regression model
lm(COMNODE$COST ~ COMNODE$NUMPORTS)
# Add regression line
abline(lm(COMNODE$COST ~ COMNODE$NUMPORTS))
###########################################################
Model <- lm(COMNODE$COST ~ COMNODE$NUMPORTS)
summary(Model)
# Finding SST and SSE
anova(Model)
|
/MATH 212/R Files/Chapter 3.3 Comm Node Student.R
|
no_license
|
zackroder/Spring-2021
|
R
| false
| false
| 801
|
r
|
##################Answer
# A. Use R to create a scatter plot and add the regression line.
library(readxl)
# Import data from an Excel workbook
COMNODE <- read_excel("C:\\Users\\Zack\\Documents\\Spring 2021\\MATH 212\\Data Sets\\COMNODE3.xlsx", sheet=1)
# Open the book CD file and save as "xlsx" in your folder
# View Data
head(COMNODE)
# Scatter plot
plot(COMNODE$NUMPORTS, COMNODE$COST, pch = 16, cex = 1.3, col = "blue", main = "Cost against Number of ports", xlab = "Number of ports", ylab = "Cost($)")
# Create regression model
lm(COMNODE$COST ~ COMNODE$NUMPORTS)
# Add regression line
abline(lm(COMNODE$COST ~ COMNODE$NUMPORTS))
###########################################################
Model <- lm(COMNODE$COST ~ COMNODE$NUMPORTS)
summary(Model)
# Finding SST and SSE
anova(Model)
|
# Artificial Neural Network
# Importing the dataset
dataset = read.csv('Churn_Modelling.csv')
dataset = dataset[4:14]
# Encoding the categorical variables as factors
dataset$Geography = as.numeric(factor(dataset$Geography,
levels = c('France', 'Spain', 'Germany'),
labels = c(1, 2, 3)))
dataset$Gender = as.numeric(factor(dataset$Gender,
levels = c('Female', 'Male'),
labels = c(1, 2)))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Exited, SplitRatio = 0.8)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-11] = scale(training_set[-11])
test_set[-11] = scale(test_set[-11])
# Fitting ANN to the Training set
install.packages('h2o')
library(h2o)
h2o.init(nthreads = -1)
# funcao de ativaçao activation = 'Rectifier'
#é a A função ReLU é a unidade linear rectificada. É definida como:
#f(x) = max (0, x)
# http://deeplearningbook.com.br/funcao-de-ativacao/
model = h2o.deeplearning(y = 'Exited',
training_frame = as.h2o(training_set),
activation = 'Rectifier',
hidden = c(5,5),
epochs = 100,
train_samples_per_iteration = -2)
# Predicting the Test set results
prob_pred = h2o.predict(model, newdata = as.h2o(test_set[-11]))
y_pred = (prob_pred>0.5)
y_pred = as.vector(y_pred)
# Making the Confusion Matrix
cm = table(test_set[, 11], y_pred)
h2o.shutdown()
|
/Part 8 - Deep Learning/Section 39 - Artificial Neural Networks (ANN)/Artificial_Neural_Networks/ann.R
|
no_license
|
arnaldoljr/Curso-Machine-Learning
|
R
| false
| false
| 1,719
|
r
|
# Artificial Neural Network
# Importing the dataset
dataset = read.csv('Churn_Modelling.csv')
dataset = dataset[4:14]
# Encoding the categorical variables as factors
dataset$Geography = as.numeric(factor(dataset$Geography,
levels = c('France', 'Spain', 'Germany'),
labels = c(1, 2, 3)))
dataset$Gender = as.numeric(factor(dataset$Gender,
levels = c('Female', 'Male'),
labels = c(1, 2)))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Exited, SplitRatio = 0.8)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-11] = scale(training_set[-11])
test_set[-11] = scale(test_set[-11])
# Fitting ANN to the Training set
install.packages('h2o')
library(h2o)
h2o.init(nthreads = -1)
# funcao de ativaçao activation = 'Rectifier'
#é a A função ReLU é a unidade linear rectificada. É definida como:
#f(x) = max (0, x)
# http://deeplearningbook.com.br/funcao-de-ativacao/
model = h2o.deeplearning(y = 'Exited',
training_frame = as.h2o(training_set),
activation = 'Rectifier',
hidden = c(5,5),
epochs = 100,
train_samples_per_iteration = -2)
# Predicting the Test set results
prob_pred = h2o.predict(model, newdata = as.h2o(test_set[-11]))
y_pred = (prob_pred>0.5)
y_pred = as.vector(y_pred)
# Making the Confusion Matrix
cm = table(test_set[, 11], y_pred)
h2o.shutdown()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.