content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
# Functions to generate datasets
generateUIDs <- function(n){
UID <- sapply(1:n, digest, algo="md5")
return(UID)
}
### TEAM ASSIGN
assignTeam <- function(teams, data, p=rep(1/length(teams), length(teams))){
data$team <- rbinom(dim(data)[1], length(teams)-1, p) + 1
data$team <- as.factor(teams[data$team])
return(data)
}
### SUSScale DATA
monthlySUSScaleData <- function(id, time, m0 = 2.5, within.effect=.4){ # team hard-coded
# Create long form
data <- ddply(data.frame(id), .(id), function(x, t){
return(data.frame("ID"=rep(x$id,t), "Time"=c(1:t)))
}, t=time)[,2:3]
# Add scale data
data <- ddply(data, .(Time), function(x, m0, within){
x <- generatePsychometricData(x$ID, mean=(m0+x$Time*within), nvar=10, nfact=2, g=.3, r=.3, store=FALSE)
}, m0=m0, within=within.effect)
data <- data[order(data$ID,data$Time),]
# clean names:
data[c(1,2)] <- data[c(2,1)]
names(data) <- c("ID", "Time", paste("SUS", c(1:10), sep=""))
return(data)
}
generatePsychometricData <- function(uid, mean=3, nvar=9, nfact=3, g=.3, r=.3, store=FALSE, smin=1, smax=7, ...){
n <- length(uid)
raw.dat <- sim.general(nvar,nfact, g=g,r=r,n=n)
scale.dat <- round(raw.dat+mean)
scale.dat <- ifelse(scale.dat<smin,1,scale.dat)
scale.dat <- ifelse(scale.dat>smax,5,scale.dat)
ret.dat <- data.frame("ID"=uid, scale.dat)
return(ret.dat)
}
## Average email response time
emailResponseTime <- function(data, t, m0=360, within=-20, between=-5, wb=-40, sigma=5){ # team hard-coded
data <- ddply(data, .(), function(x, t){
return(data.frame(data[rep(seq_len(nrow(data)), each=t),], "Time"=c(1:t)))
}, t=t)
data <- data[-1]
data$team.n <- ifelse(data$Team=="Team A", 0, 1)
data <- ddply(data, .(team.n, Time), function(x, m0, within, between, wb, sigma){
x <- data.frame(x, responseTime=rnorm(length(x$ID), (m0+x$Time*within+x$team.n*between+x$Time*x$team.n*wb), sigma))
return(x)
}, m0=m0, within=within, between=between, wb=wb, sigma=sigma)
# No values smalle then 0
data$responseTime <- ifelse(data$responseTime < 0, 0, data$responseTime)
# Clean and order
data <- data[order(data$Team,data$Time),]
data <- data[c(1,2,3,5)]
return(data)
}
|
/functions.r
|
no_license
|
Judyr/hcistats
|
R
| false
| false
| 2,184
|
r
|
# Functions to generate datasets
generateUIDs <- function(n){
UID <- sapply(1:n, digest, algo="md5")
return(UID)
}
### TEAM ASSIGN
assignTeam <- function(teams, data, p=rep(1/length(teams), length(teams))){
data$team <- rbinom(dim(data)[1], length(teams)-1, p) + 1
data$team <- as.factor(teams[data$team])
return(data)
}
### SUSScale DATA
monthlySUSScaleData <- function(id, time, m0 = 2.5, within.effect=.4){ # team hard-coded
# Create long form
data <- ddply(data.frame(id), .(id), function(x, t){
return(data.frame("ID"=rep(x$id,t), "Time"=c(1:t)))
}, t=time)[,2:3]
# Add scale data
data <- ddply(data, .(Time), function(x, m0, within){
x <- generatePsychometricData(x$ID, mean=(m0+x$Time*within), nvar=10, nfact=2, g=.3, r=.3, store=FALSE)
}, m0=m0, within=within.effect)
data <- data[order(data$ID,data$Time),]
# clean names:
data[c(1,2)] <- data[c(2,1)]
names(data) <- c("ID", "Time", paste("SUS", c(1:10), sep=""))
return(data)
}
generatePsychometricData <- function(uid, mean=3, nvar=9, nfact=3, g=.3, r=.3, store=FALSE, smin=1, smax=7, ...){
n <- length(uid)
raw.dat <- sim.general(nvar,nfact, g=g,r=r,n=n)
scale.dat <- round(raw.dat+mean)
scale.dat <- ifelse(scale.dat<smin,1,scale.dat)
scale.dat <- ifelse(scale.dat>smax,5,scale.dat)
ret.dat <- data.frame("ID"=uid, scale.dat)
return(ret.dat)
}
## Average email response time
emailResponseTime <- function(data, t, m0=360, within=-20, between=-5, wb=-40, sigma=5){ # team hard-coded
data <- ddply(data, .(), function(x, t){
return(data.frame(data[rep(seq_len(nrow(data)), each=t),], "Time"=c(1:t)))
}, t=t)
data <- data[-1]
data$team.n <- ifelse(data$Team=="Team A", 0, 1)
data <- ddply(data, .(team.n, Time), function(x, m0, within, between, wb, sigma){
x <- data.frame(x, responseTime=rnorm(length(x$ID), (m0+x$Time*within+x$team.n*between+x$Time*x$team.n*wb), sigma))
return(x)
}, m0=m0, within=within, between=between, wb=wb, sigma=sigma)
# No values smalle then 0
data$responseTime <- ifelse(data$responseTime < 0, 0, data$responseTime)
# Clean and order
data <- data[order(data$Team,data$Time),]
data <- data[c(1,2,3,5)]
return(data)
}
|
#' Coerce phyloseq object
#'
#' @param obj a \code{phyloseq} object
#'
#' @return An object of class MicrobiomeExperiment
#'
#' @importFrom S4Vectors SimpleList DataFrame
#' @importFrom SummarizedExperiment colData colData<-
#'
#' @export
#'
#' @examples
#' if (requireNamespace("phyloseq")) {
#' data(GlobalPatterns, package="phyloseq")
#' makeMicrobiomeExperimentFromphyloseq(GlobalPatterns)
#' data(enterotype, package="phyloseq")
#' makeMicrobiomeExperimentFromphyloseq(enterotype)
#' data(esophagus, package="phyloseq")
#' makeMicrobiomeExperimentFromphyloseq(esophagus)
#' }
makeMicrobiomeExperimentFromphyloseq <- function(obj) {
# input check
.require_package("phyloseq")
if(!is(obj,"phyloseq")){
stop("'obj' must be a 'phyloseq' object")
}
#
assays <- SimpleList(counts = obj@otu_table@.Data)
rowData <- S4Vectors:::make_zero_col_DataFrame(nrow(assays$counts))
colData <- S4Vectors:::make_zero_col_DataFrame(ncol(assays$counts))
if(!is.null(obj@tax_table@.Data)){
rowData <- DataFrame(data.frame(obj@tax_table@.Data))
}
if(!is.null(obj@sam_data)){
colData <- DataFrame(data.frame(obj@sam_data))
}
if(!is.null(obj@phy_tree)){
rowTree <- obj@phy_tree
} else {
rowTree <- NULL
}
if (!is.null(obj@refseq)) {
referenceSeq <- obj@refseq
} else {
referenceSeq <- NULL
}
MicrobiomeExperiment(assays = assays,
rowData = obj@tax_table@.Data,
colData = colData,
rowTree = rowTree,
referenceSeq = referenceSeq)
}
|
/R/makeMicrobiomeExperimentFromphyloseq.R
|
no_license
|
microbiome/MicrobiomeExperiment
|
R
| false
| false
| 1,666
|
r
|
#' Coerce phyloseq object
#'
#' @param obj a \code{phyloseq} object
#'
#' @return An object of class MicrobiomeExperiment
#'
#' @importFrom S4Vectors SimpleList DataFrame
#' @importFrom SummarizedExperiment colData colData<-
#'
#' @export
#'
#' @examples
#' if (requireNamespace("phyloseq")) {
#' data(GlobalPatterns, package="phyloseq")
#' makeMicrobiomeExperimentFromphyloseq(GlobalPatterns)
#' data(enterotype, package="phyloseq")
#' makeMicrobiomeExperimentFromphyloseq(enterotype)
#' data(esophagus, package="phyloseq")
#' makeMicrobiomeExperimentFromphyloseq(esophagus)
#' }
makeMicrobiomeExperimentFromphyloseq <- function(obj) {
# input check
.require_package("phyloseq")
if(!is(obj,"phyloseq")){
stop("'obj' must be a 'phyloseq' object")
}
#
assays <- SimpleList(counts = obj@otu_table@.Data)
rowData <- S4Vectors:::make_zero_col_DataFrame(nrow(assays$counts))
colData <- S4Vectors:::make_zero_col_DataFrame(ncol(assays$counts))
if(!is.null(obj@tax_table@.Data)){
rowData <- DataFrame(data.frame(obj@tax_table@.Data))
}
if(!is.null(obj@sam_data)){
colData <- DataFrame(data.frame(obj@sam_data))
}
if(!is.null(obj@phy_tree)){
rowTree <- obj@phy_tree
} else {
rowTree <- NULL
}
if (!is.null(obj@refseq)) {
referenceSeq <- obj@refseq
} else {
referenceSeq <- NULL
}
MicrobiomeExperiment(assays = assays,
rowData = obj@tax_table@.Data,
colData = colData,
rowTree = rowTree,
referenceSeq = referenceSeq)
}
|
summarize_rarefaction <- function(dataset){
methods <- c("an", "fn", "nn", "agc", "dgc", "closed", "open", "swarm", "vagc", "vdgc")
output_file_name <- paste0("data/process/", dataset, ".rarefaction.summary")
write(x="method\tfraction\tsobs\tsobs_lci\tsobs_uci\trarefied\trare_lci\trare_uci\tp_value", file=output_file_name)
for(m in methods){
print(m)
file_name <- paste0("data/", dataset, "/", dataset, ".", m, ".rarefaction")
rarefy <-read.table(file=file_name, header=T)
fractions <- unique(rarefy$fraction)
p <- numeric()
rare_mean <- numeric()
rare_lci <- numeric()
rare_uci <- numeric()
sobs_mean <- numeric()
sobs_lci <- numeric()
sobs_uci <- numeric()
for(f in fractions){
rarefy_sub <- rarefy[rarefy$f==f,]
sobs_mean[as.character(f)] <- mean(rarefy_sub$sobs)
sobs_lci[as.character(f)] <- quantile(rarefy_sub$sobs, prob=0.025)
sobs_uci[as.character(f)] <- quantile(rarefy_sub$sobs, prob=0.975)
rare_mean[as.character(f)] <- mean(rarefy_sub$rarefied)
rare_lci[as.character(f)] <- quantile(rarefy_sub$rarefied, prob=0.025)
rare_uci[as.character(f)] <- quantile(rarefy_sub$rarefied, prob=0.975)
if(f != 1 && m != "closed"){
p[as.character(f)] <- t.test(rarefy_sub$rarefied, rarefy_sub$sobs)$p.value
} else {
p[as.character(f)] <- NA
}
}
output <- cbind(rep(m, length(fractions)), fractions, sobs_mean, sobs_lci, sobs_uci, rare_mean, rare_lci, rare_uci, p)
write.table(file=output_file_name, output, col.names=F, row.names=F, append=T, quote=F, sep='\t')
}
}
|
/code/summarize_rarefaction.R
|
permissive
|
SchlossLab/Schloss_Cluster_PeerJ_2015
|
R
| false
| false
| 1,544
|
r
|
summarize_rarefaction <- function(dataset){
methods <- c("an", "fn", "nn", "agc", "dgc", "closed", "open", "swarm", "vagc", "vdgc")
output_file_name <- paste0("data/process/", dataset, ".rarefaction.summary")
write(x="method\tfraction\tsobs\tsobs_lci\tsobs_uci\trarefied\trare_lci\trare_uci\tp_value", file=output_file_name)
for(m in methods){
print(m)
file_name <- paste0("data/", dataset, "/", dataset, ".", m, ".rarefaction")
rarefy <-read.table(file=file_name, header=T)
fractions <- unique(rarefy$fraction)
p <- numeric()
rare_mean <- numeric()
rare_lci <- numeric()
rare_uci <- numeric()
sobs_mean <- numeric()
sobs_lci <- numeric()
sobs_uci <- numeric()
for(f in fractions){
rarefy_sub <- rarefy[rarefy$f==f,]
sobs_mean[as.character(f)] <- mean(rarefy_sub$sobs)
sobs_lci[as.character(f)] <- quantile(rarefy_sub$sobs, prob=0.025)
sobs_uci[as.character(f)] <- quantile(rarefy_sub$sobs, prob=0.975)
rare_mean[as.character(f)] <- mean(rarefy_sub$rarefied)
rare_lci[as.character(f)] <- quantile(rarefy_sub$rarefied, prob=0.025)
rare_uci[as.character(f)] <- quantile(rarefy_sub$rarefied, prob=0.975)
if(f != 1 && m != "closed"){
p[as.character(f)] <- t.test(rarefy_sub$rarefied, rarefy_sub$sobs)$p.value
} else {
p[as.character(f)] <- NA
}
}
output <- cbind(rep(m, length(fractions)), fractions, sobs_mean, sobs_lci, sobs_uci, rare_mean, rare_lci, rare_uci, p)
write.table(file=output_file_name, output, col.names=F, row.names=F, append=T, quote=F, sep='\t')
}
}
|
heading("BEGIN TEST")
conn <- new("h2o.client", ip=myIP, port=myPort)
heading("Uploading train data to H2O")
iris_train.hex <- h2o.importFile(conn, train)
heading("Creating DL model in H2O")
balance_classes <- if (exists("balance_classes")) balance_classes else FALSE
iris.dl.h2o <- h2o.deeplearning(x = x, y = y, data = iris_train.hex, hidden = hidden, balance_classes = balance_classes, classification = classification, activation = activation, epochs = epochs)
print(iris.dl.h2o)
heading("Downloading Java prediction model code from H2O")
model_key <- iris.dl.h2o@key
tmpdir_name <- sprintf("%s/results/tmp_model_%s", TEST_ROOT_DIR, as.character(Sys.getpid()))
cmd <- sprintf("rm -fr %s", tmpdir_name)
safeSystem(cmd)
cmd <- sprintf("mkdir -p %s", tmpdir_name)
safeSystem(cmd)
cmd <- sprintf("curl -o %s/%s.java http://%s:%d/2/DeepLearningModelView.java?_modelKey=%s", tmpdir_name, model_key, myIP, myPort, model_key)
safeSystem(cmd)
heading("Uploading test data to H2O")
iris_test.hex <- h2o.importFile(conn, test)
heading("Predicting in H2O")
iris.dl.pred <- h2o.predict(iris.dl.h2o, iris_test.hex)
summary(iris.dl.pred)
head(iris.dl.pred)
prediction1 <- as.data.frame(iris.dl.pred)
cmd <- sprintf( "%s/out_h2o.csv", tmpdir_name)
write.csv(prediction1, cmd, quote=FALSE, row.names=FALSE)
heading("Setting up for Java POJO")
iris_test_with_response <- read.csv(test, header=T)
iris_test_without_response <- iris_test_with_response[,x]
write.csv(iris_test_without_response, file = sprintf("%s/in.csv", tmpdir_name), row.names=F, quote=F)
cmd <- sprintf("cp PredictCSV.java %s", tmpdir_name)
safeSystem(cmd)
cmd <- sprintf("javac -cp %s/h2o-model.jar -J-Xmx2g -J-XX:MaxPermSize=128m %s/PredictCSV.java %s/%s.java", H2O_JAR_DIR, tmpdir_name, tmpdir_name, model_key)
safeSystem(cmd)
heading("Predicting with Java POJO")
cmd <- sprintf("java -ea -cp \"%s/h2o-model.jar%s%s\" -Xmx2g -XX:MaxPermSize=256m -XX:ReservedCodeCacheSize=256m PredictCSV --header --model %s --input %s/in.csv --output %s/out_pojo.csv", H2O_JAR_DIR, .Platform$path.sep, tmpdir_name, model_key, tmpdir_name, tmpdir_name)
safeSystem(cmd)
heading("Comparing predictions between H2O and Java POJO")
prediction2 <- read.csv(sprintf("%s/out_pojo.csv", tmpdir_name), header=T)
if (nrow(prediction1) != nrow(prediction2)) {
warning("Prediction mismatch")
print(paste("Rows from H2O", nrow(prediction1)))
print(paste("Rows from Java POJO", nrow(prediction2)))
stop("Number of rows mismatch")
}
match <- all(norm(as.matrix(prediction1[,-1] - prediction2[,-1]), type="M") < 1e-4)
if (! match) {
for (i in 1:nrow(prediction1)) {
rowmatches <- (norm(as.matrix(prediction1[i,-1] - prediction2[i,-1]), type="M") < 1e-4)
if (! rowmatches) {
print("----------------------------------------------------------------------")
print("")
print(paste("Prediction mismatch on data row", i, "of test file", test))
print("")
print( "(Note: That is the 1-based data row number, not the file line number.")
print( " If you have a header row, then the file line number is off by one.)")
print("")
print("----------------------------------------------------------------------")
print("")
print("Data from failing row")
print("")
print(iris_test_without_response[i,])
print("")
print("----------------------------------------------------------------------")
print("")
print("Prediction from H2O")
print("")
print(prediction1[i,])
print("")
print("----------------------------------------------------------------------")
print("")
print("Prediction from Java POJO")
print("")
print(prediction2[i,])
print("")
print("----------------------------------------------------------------------")
print("")
stop("Prediction mismatch")
}
}
}
heading("Cleaning up tmp files")
cmd <- sprintf("rm -fr %s", tmpdir_name)
safeSystem(cmd)
PASS_BANNER()
|
/h2o-r/tests/Utils/shared_javapredict_DL.R
|
permissive
|
JMR-b/h2o-dev
|
R
| false
| false
| 3,998
|
r
|
heading("BEGIN TEST")
conn <- new("h2o.client", ip=myIP, port=myPort)
heading("Uploading train data to H2O")
iris_train.hex <- h2o.importFile(conn, train)
heading("Creating DL model in H2O")
balance_classes <- if (exists("balance_classes")) balance_classes else FALSE
iris.dl.h2o <- h2o.deeplearning(x = x, y = y, data = iris_train.hex, hidden = hidden, balance_classes = balance_classes, classification = classification, activation = activation, epochs = epochs)
print(iris.dl.h2o)
heading("Downloading Java prediction model code from H2O")
model_key <- iris.dl.h2o@key
tmpdir_name <- sprintf("%s/results/tmp_model_%s", TEST_ROOT_DIR, as.character(Sys.getpid()))
cmd <- sprintf("rm -fr %s", tmpdir_name)
safeSystem(cmd)
cmd <- sprintf("mkdir -p %s", tmpdir_name)
safeSystem(cmd)
cmd <- sprintf("curl -o %s/%s.java http://%s:%d/2/DeepLearningModelView.java?_modelKey=%s", tmpdir_name, model_key, myIP, myPort, model_key)
safeSystem(cmd)
heading("Uploading test data to H2O")
iris_test.hex <- h2o.importFile(conn, test)
heading("Predicting in H2O")
iris.dl.pred <- h2o.predict(iris.dl.h2o, iris_test.hex)
summary(iris.dl.pred)
head(iris.dl.pred)
prediction1 <- as.data.frame(iris.dl.pred)
cmd <- sprintf( "%s/out_h2o.csv", tmpdir_name)
write.csv(prediction1, cmd, quote=FALSE, row.names=FALSE)
heading("Setting up for Java POJO")
iris_test_with_response <- read.csv(test, header=T)
iris_test_without_response <- iris_test_with_response[,x]
write.csv(iris_test_without_response, file = sprintf("%s/in.csv", tmpdir_name), row.names=F, quote=F)
cmd <- sprintf("cp PredictCSV.java %s", tmpdir_name)
safeSystem(cmd)
cmd <- sprintf("javac -cp %s/h2o-model.jar -J-Xmx2g -J-XX:MaxPermSize=128m %s/PredictCSV.java %s/%s.java", H2O_JAR_DIR, tmpdir_name, tmpdir_name, model_key)
safeSystem(cmd)
heading("Predicting with Java POJO")
cmd <- sprintf("java -ea -cp \"%s/h2o-model.jar%s%s\" -Xmx2g -XX:MaxPermSize=256m -XX:ReservedCodeCacheSize=256m PredictCSV --header --model %s --input %s/in.csv --output %s/out_pojo.csv", H2O_JAR_DIR, .Platform$path.sep, tmpdir_name, model_key, tmpdir_name, tmpdir_name)
safeSystem(cmd)
heading("Comparing predictions between H2O and Java POJO")
prediction2 <- read.csv(sprintf("%s/out_pojo.csv", tmpdir_name), header=T)
if (nrow(prediction1) != nrow(prediction2)) {
warning("Prediction mismatch")
print(paste("Rows from H2O", nrow(prediction1)))
print(paste("Rows from Java POJO", nrow(prediction2)))
stop("Number of rows mismatch")
}
match <- all(norm(as.matrix(prediction1[,-1] - prediction2[,-1]), type="M") < 1e-4)
if (! match) {
for (i in 1:nrow(prediction1)) {
rowmatches <- (norm(as.matrix(prediction1[i,-1] - prediction2[i,-1]), type="M") < 1e-4)
if (! rowmatches) {
print("----------------------------------------------------------------------")
print("")
print(paste("Prediction mismatch on data row", i, "of test file", test))
print("")
print( "(Note: That is the 1-based data row number, not the file line number.")
print( " If you have a header row, then the file line number is off by one.)")
print("")
print("----------------------------------------------------------------------")
print("")
print("Data from failing row")
print("")
print(iris_test_without_response[i,])
print("")
print("----------------------------------------------------------------------")
print("")
print("Prediction from H2O")
print("")
print(prediction1[i,])
print("")
print("----------------------------------------------------------------------")
print("")
print("Prediction from Java POJO")
print("")
print(prediction2[i,])
print("")
print("----------------------------------------------------------------------")
print("")
stop("Prediction mismatch")
}
}
}
heading("Cleaning up tmp files")
cmd <- sprintf("rm -fr %s", tmpdir_name)
safeSystem(cmd)
PASS_BANNER()
|
#' My regression package
#'
#' @name lreg-package
#' @aliases lreg
#' @docType package
#' @keywords package
#' @import methods
NULL
|
/R/lreg-package.R
|
no_license
|
jefferis/lreg
|
R
| false
| false
| 133
|
r
|
#' My regression package
#'
#' @name lreg-package
#' @aliases lreg
#' @docType package
#' @keywords package
#' @import methods
NULL
|
#Reading data from data sets.
xTest <- read.table("./UCI HAR Dataset/test/X_test.txt", header=FALSE)
yTest <- read.table("./UCI HAR Dataset/test/y_test.txt", header=FALSE)
xTrain <- read.table("./UCI HAR Dataset/train/X_train.txt", header=FALSE)
yTrain <- read.table("./UCI HAR Dataset/train/y_train.txt", header=FALSE)
subjectTrain <- read.table("./UCI HAR Dataset/train/subject_train.txt", header=FALSE)
subjectTest <- read.table("./UCI HAR Dataset/test/subject_test.txt", header=FALSE)
features <- read.table("./UCI HAR Dataset/features.txt", header=FALSE)
activityType <- read.table("./UCI HAR Dataset/activity_labels.txt", header=FALSE)
# Providing header for datas.
colnames(xTrain) <- features[,2]
colnames(yTrain) <- "activityID"
colnames(subjectTrain) = "subjectID"
colnames(activityType) <- c("activityID", "activityType")
colnames(xTest) <- features[,2]
colnames(yTest) <- "activityID"
colnames(subjectTest) = "subjectID"
# Merging datas to form train and test data frame.
trainData <- cbind(yTrain, subjectTrain, xTrain)
testData <- cbind(yTest, subjectTest, xTest)
# merged Data
mergedData = rbind(testData, trainData)
# Select Data associated with mean and SD.
colNames <- colnames(mergedData)
selectMeanSD <- (grepl("activity",colNames)|grepl("subject",colNames) |grepl("mean",colNames)|grepl("Mean",colNames)|grepl("std",colNames))
finalData <- mergedData[selectMeanSD]
# Merge data with activity Type
finalData = merge(finalData,activityType,by='activityID', all.x=TRUE)
colNames <- colnames(finalData)
# Cleaning names of the variables
colNames <- gsub("\\()", "", colNames)
colNames <- gsub("-mean", "Mean", colNames)
colNames <- gsub("-std", "SD", colNames)
colNames <- gsub("BodyBody", "Body", colNames)
# Providing new variable names for finalData.
colnames(finalData) <- colNames
# Removing Activity type.
finalData$activityType <- NULL
# final clean data.
cleanData <- aggregate(finalData, by=list(activityID=finalData$activityID, subjectID=finalData$subjectID), mean)
# Activity ID and Subject ID appeared data frame twice hence removed repeated presence of the column.
cleanData[,1] <- NULL
cleanData[,3] <- NULL
head(cleanData)
|
/Getting and Cleaning Data/project/run_analysis.R
|
no_license
|
mlamichh/Getting-and-Cleaning-Data
|
R
| false
| false
| 2,172
|
r
|
#Reading data from data sets.
xTest <- read.table("./UCI HAR Dataset/test/X_test.txt", header=FALSE)
yTest <- read.table("./UCI HAR Dataset/test/y_test.txt", header=FALSE)
xTrain <- read.table("./UCI HAR Dataset/train/X_train.txt", header=FALSE)
yTrain <- read.table("./UCI HAR Dataset/train/y_train.txt", header=FALSE)
subjectTrain <- read.table("./UCI HAR Dataset/train/subject_train.txt", header=FALSE)
subjectTest <- read.table("./UCI HAR Dataset/test/subject_test.txt", header=FALSE)
features <- read.table("./UCI HAR Dataset/features.txt", header=FALSE)
activityType <- read.table("./UCI HAR Dataset/activity_labels.txt", header=FALSE)
# Providing header for datas.
colnames(xTrain) <- features[,2]
colnames(yTrain) <- "activityID"
colnames(subjectTrain) = "subjectID"
colnames(activityType) <- c("activityID", "activityType")
colnames(xTest) <- features[,2]
colnames(yTest) <- "activityID"
colnames(subjectTest) = "subjectID"
# Merging datas to form train and test data frame.
trainData <- cbind(yTrain, subjectTrain, xTrain)
testData <- cbind(yTest, subjectTest, xTest)
# merged Data
mergedData = rbind(testData, trainData)
# Select Data associated with mean and SD.
colNames <- colnames(mergedData)
selectMeanSD <- (grepl("activity",colNames)|grepl("subject",colNames) |grepl("mean",colNames)|grepl("Mean",colNames)|grepl("std",colNames))
finalData <- mergedData[selectMeanSD]
# Merge data with activity Type
finalData = merge(finalData,activityType,by='activityID', all.x=TRUE)
colNames <- colnames(finalData)
# Cleaning names of the variables
colNames <- gsub("\\()", "", colNames)
colNames <- gsub("-mean", "Mean", colNames)
colNames <- gsub("-std", "SD", colNames)
colNames <- gsub("BodyBody", "Body", colNames)
# Providing new variable names for finalData.
colnames(finalData) <- colNames
# Removing Activity type.
finalData$activityType <- NULL
# final clean data.
cleanData <- aggregate(finalData, by=list(activityID=finalData$activityID, subjectID=finalData$subjectID), mean)
# Activity ID and Subject ID appeared data frame twice hence removed repeated presence of the column.
cleanData[,1] <- NULL
cleanData[,3] <- NULL
head(cleanData)
|
\name{asia}
\docType{data}
\alias{asia}
\title{Asia (synthetic) data set by Lauritzen and Spiegelhalter}
\description{
Small synthetic data set from Lauritzen and Spiegelhalter (1988) about lung
diseases (tuberculosis, lung cancer or bronchitis) and visits to Asia.
}
\usage{
data(asia)
}
\format{
The \code{asia} data set contains the following variables:
\itemize{
\item \code{D} (\emph{dyspnoea}), a two-level factor with levels \code{yes}
and \code{no}.
\item \code{T} (\emph{tuberculosis}), a two-level factor with levels
\code{yes} and \code{no}.
\item \code{L} (\emph{lung cancer}), a two-level factor with levels
\code{yes} and \code{no}.
\item \code{B} (\emph{bronchitis}), a two-level factor with levels
\code{yes} and \code{no}.
\item \code{A} (\emph{visit to Asia}), a two-level factor with levels
\code{yes} and \code{no}.
\item \code{S} (\emph{smoking}), a two-level factor with levels \code{yes}
and \code{no}.
\item \code{X} (\emph{chest X-ray}), a two-level factor with levels
\code{yes} and \code{no}.
\item \code{E} (\emph{tuberculosis versus lung cancer/bronchitis}), a
two-level factor with levels \code{yes} and \code{no}.
}
}
\note{
Lauritzen and Spiegelhalter (1988) motivate this example as follows:
\dQuote{Shortness-of-breath (dyspnoea) may be due to tuberculosis, lung
cancer or bronchitis, or none of them, or more than one of them. A recent
visit to Asia increases the chances of tuberculosis, while smoking is known
to be a risk factor for both lung cancer and bronchitis. The results of a
single chest X-ray do not discriminate between lung cancer and tuberculosis,
as neither does the presence or absence of dyspnoea.}
Standard learning algorithms are not able to recover the true structure of
the network because of the presence of a node (\code{E}) with conditional
probabilities equal to both 0 and 1. Monte Carlo tests seems to behave
better than their parametric counterparts.
The complete BN can be downloaded from \url{http://www.bnlearn.com/bnrepository}.
}
\source{
Lauritzen S, Spiegelhalter D (1988). "Local Computation with Probabilities
on Graphical Structures and their Application to Expert Systems (with
discussion)". \emph{Journal of the Royal Statistical Society: Series B
(Statistical Methodology)}, \strong{50}(2), 157-224.
}
\examples{
# load the data and build the correct network from the model string.
data(asia)
res = empty.graph(names(asia))
modelstring(res) = "[A][S][T|A][L|S][B|S][D|B:E][E|T:L][X|E]"
plot(res)
}
\keyword{datasets}
|
/man/asia.Rd
|
no_license
|
areinert9/bnlearn
|
R
| false
| false
| 2,641
|
rd
|
\name{asia}
\docType{data}
\alias{asia}
\title{Asia (synthetic) data set by Lauritzen and Spiegelhalter}
\description{
Small synthetic data set from Lauritzen and Spiegelhalter (1988) about lung
diseases (tuberculosis, lung cancer or bronchitis) and visits to Asia.
}
\usage{
data(asia)
}
\format{
The \code{asia} data set contains the following variables:
\itemize{
\item \code{D} (\emph{dyspnoea}), a two-level factor with levels \code{yes}
and \code{no}.
\item \code{T} (\emph{tuberculosis}), a two-level factor with levels
\code{yes} and \code{no}.
\item \code{L} (\emph{lung cancer}), a two-level factor with levels
\code{yes} and \code{no}.
\item \code{B} (\emph{bronchitis}), a two-level factor with levels
\code{yes} and \code{no}.
\item \code{A} (\emph{visit to Asia}), a two-level factor with levels
\code{yes} and \code{no}.
\item \code{S} (\emph{smoking}), a two-level factor with levels \code{yes}
and \code{no}.
\item \code{X} (\emph{chest X-ray}), a two-level factor with levels
\code{yes} and \code{no}.
\item \code{E} (\emph{tuberculosis versus lung cancer/bronchitis}), a
two-level factor with levels \code{yes} and \code{no}.
}
}
\note{
Lauritzen and Spiegelhalter (1988) motivate this example as follows:
\dQuote{Shortness-of-breath (dyspnoea) may be due to tuberculosis, lung
cancer or bronchitis, or none of them, or more than one of them. A recent
visit to Asia increases the chances of tuberculosis, while smoking is known
to be a risk factor for both lung cancer and bronchitis. The results of a
single chest X-ray do not discriminate between lung cancer and tuberculosis,
as neither does the presence or absence of dyspnoea.}
Standard learning algorithms are not able to recover the true structure of
the network because of the presence of a node (\code{E}) with conditional
probabilities equal to both 0 and 1. Monte Carlo tests seems to behave
better than their parametric counterparts.
The complete BN can be downloaded from \url{http://www.bnlearn.com/bnrepository}.
}
\source{
Lauritzen S, Spiegelhalter D (1988). "Local Computation with Probabilities
on Graphical Structures and their Application to Expert Systems (with
discussion)". \emph{Journal of the Royal Statistical Society: Series B
(Statistical Methodology)}, \strong{50}(2), 157-224.
}
\examples{
# load the data and build the correct network from the model string.
data(asia)
res = empty.graph(names(asia))
modelstring(res) = "[A][S][T|A][L|S][B|S][D|B:E][E|T:L][X|E]"
plot(res)
}
\keyword{datasets}
|
# SEI-SEIR model with temperature, humidity, and rainfall --------------------------------
seiseir_model_thr <- function(t, state, parameters) {
with(as.list(c(state,parameters)), {
dM1 <- (EFD(temp[t])*pEA(temp[t])*MDR(temp[t])*mu_th(temp[t], hum[t])^(-1))*(M1+M2+M3)*max((1-((M1+M2+M3)/K_trh(temp[t], hum[t], rain[t], Rmax, (S+E+I+R)))),0)-(a(temp[t])*pMI(temp[t])*I/(S+E+I+R)+mu_th(temp[t], hum[t])*M1)
dM2 <- (a(temp[t])*pMI(temp[t])*I/(S+E+I+R))*M1-(PDR(temp[t])+mu_th(temp[t], hum[t]))*M2
dM3 <- PDR(temp[t])*M2-mu_th(temp[t], hum[t])*M3
dS <- -a(temp[t])*b(temp[t])*(M3/(M1+M2+M3+0.001))*S + BR*(S/1000)/360 - DR*(S/1000)/360 + ie*(S+E+I+R) - ie*S
dE <- a(temp[t])*b(temp[t])*(M3/(M1+M2+M3+0.001))*S-(1.0/5.9)*E - DR*(E/1000)/360 - ie*E
dI <- (1.0/5.9)*E-(1.0/5.0)*I - DR*(I/1000)/360 - ie*I
dR <- (1.0/5.0)*I - DR*(R/1000)/360 - ie*R
list(c(dM1, dM2, dM3, dS, dE, dI, dR))
})
}
# This is the general function for the Briere fit.
briere <- function(x, c, T0, Tm){
if((x < T0) | (x > Tm))
0.0
else
c*x*(x-T0)*sqrt(Tm-x)
}
# This is the general function for the quadratic fit.
quadratic <- function(x, c, T0, Tm){
if((x < T0) | (x > Tm))
0.0
else
c*(x-T0)*(x-Tm)
}
# This is the general function for the inverted quadratic fit.
inverted_quadratic <- function(x, c, T0, Tm){
if((x < T0) | (x > Tm))
24.0
else
1.0/(c*(x-T0)*(x-Tm))
}
# eggs per female per day
EFD <- function(temp){
briere(temp, EFD_c, EFD_T0, EFD_Tm)
}
# probability egg to adult survival
pEA <- function(temp){
quadratic(temp, pEA_c, pEA_T0, pEA_Tm)
}
# mosquito development rate (1/larval development period)
MDR <- function(temp){
briere(temp, MDR_c, MDR_T0, MDR_Tm)
}
# biting rate
a <- function(temp){
briere(temp, a_c, a_T0, a_Tm)
}
# probability of mosquito infection per bite on an infectious host
pMI <- function(temp){
briere(temp, pMI_c, pMI_T0, pMI_Tm)
}
# adult mosquito mortality rate (1/adult lifespan)
mu_th <- function(temp, hum){
if (hum <= 1){
inverted_quadratic(temp, mu_th_c, mu_th_T0, mu_th_Tm)+(1-(0.01256 + 2.00893*hum))*0.01
} else {
inverted_quadratic(temp, mu_th_c, mu_th_T0, mu_th_Tm)+(1-(1.2248 + 0.2673*hum))*0.01
}
}
# parasite development rate
PDR <- function(temp){
briere(temp, PDR_c, PDR_T0, PDR_Tm)
}
# transmission competence: probability of human infection per bite by an infectious mosquito
b <- function(temp){
briere(temp, b_c, b_T0, b_Tm)
}
# carrying capacity for temperature only
carrying_capacity_th <- function(temp, T0, EA, N, h0){
kappa <- 8.617e-05; # Boltzmann constant
alpha <- (EFD(T0)*pEA(T0)*MDR(T0)*mu_th(T0, h0)^(-1)-mu_th(T0, h0))/(EFD(T0)*pEA(T0)*MDR(T0)*mu_th(T0, h0)^(-1))
(alpha*N*exp(-EA*((temp-T0)^2)/(kappa*(temp+273.0)*(T0+273.0))))
}
K_trh_briere <- function(temp, h0, rain, Rmax, N){
R0 <- 1
if((rain < R0) | (rain > Rmax)){
max(0.01*carrying_capacity_th(temp, 29.0, 0.05, N, h0), 1000)
}
else {
c <- 7.86e-05
max(carrying_capacity_th(temp,29.0,0.05, N, h0)*c*rain*(rain-R0)*sqrt(Rmax-rain)*0.3 + 0.001, 1000)
}
}
K_tr_quadratic <- function(temp, rain, Rmax, N){
R0 <- 1
if((rain < R0) | (rain > Rmax)){
max(0.01*carrying_capacity_t(temp, 29.0, 0.05, N), 1000)
}
else {
c <- -5.99e-03
max(carrying_capacity_t(temp, 29.0, 0.05, N)*(c*(rain-R0)*(rain-Rmax))/2 + 0.001, 1000)
}
}
|
/Codes/SEI-SEIR_model_with_trait_variation.R
|
no_license
|
jms5151/EVP
|
R
| false
| false
| 3,385
|
r
|
# SEI-SEIR model with temperature, humidity, and rainfall --------------------------------
seiseir_model_thr <- function(t, state, parameters) {
with(as.list(c(state,parameters)), {
dM1 <- (EFD(temp[t])*pEA(temp[t])*MDR(temp[t])*mu_th(temp[t], hum[t])^(-1))*(M1+M2+M3)*max((1-((M1+M2+M3)/K_trh(temp[t], hum[t], rain[t], Rmax, (S+E+I+R)))),0)-(a(temp[t])*pMI(temp[t])*I/(S+E+I+R)+mu_th(temp[t], hum[t])*M1)
dM2 <- (a(temp[t])*pMI(temp[t])*I/(S+E+I+R))*M1-(PDR(temp[t])+mu_th(temp[t], hum[t]))*M2
dM3 <- PDR(temp[t])*M2-mu_th(temp[t], hum[t])*M3
dS <- -a(temp[t])*b(temp[t])*(M3/(M1+M2+M3+0.001))*S + BR*(S/1000)/360 - DR*(S/1000)/360 + ie*(S+E+I+R) - ie*S
dE <- a(temp[t])*b(temp[t])*(M3/(M1+M2+M3+0.001))*S-(1.0/5.9)*E - DR*(E/1000)/360 - ie*E
dI <- (1.0/5.9)*E-(1.0/5.0)*I - DR*(I/1000)/360 - ie*I
dR <- (1.0/5.0)*I - DR*(R/1000)/360 - ie*R
list(c(dM1, dM2, dM3, dS, dE, dI, dR))
})
}
# This is the general function for the Briere fit.
briere <- function(x, c, T0, Tm){
if((x < T0) | (x > Tm))
0.0
else
c*x*(x-T0)*sqrt(Tm-x)
}
# This is the general function for the quadratic fit.
quadratic <- function(x, c, T0, Tm){
if((x < T0) | (x > Tm))
0.0
else
c*(x-T0)*(x-Tm)
}
# This is the general function for the inverted quadratic fit.
inverted_quadratic <- function(x, c, T0, Tm){
if((x < T0) | (x > Tm))
24.0
else
1.0/(c*(x-T0)*(x-Tm))
}
# eggs per female per day
EFD <- function(temp){
briere(temp, EFD_c, EFD_T0, EFD_Tm)
}
# probability egg to adult survival
pEA <- function(temp){
quadratic(temp, pEA_c, pEA_T0, pEA_Tm)
}
# mosquito development rate (1/larval development period)
MDR <- function(temp){
briere(temp, MDR_c, MDR_T0, MDR_Tm)
}
# biting rate
a <- function(temp){
briere(temp, a_c, a_T0, a_Tm)
}
# probability of mosquito infection per bite on an infectious host
pMI <- function(temp){
briere(temp, pMI_c, pMI_T0, pMI_Tm)
}
# adult mosquito mortality rate (1/adult lifespan)
mu_th <- function(temp, hum){
if (hum <= 1){
inverted_quadratic(temp, mu_th_c, mu_th_T0, mu_th_Tm)+(1-(0.01256 + 2.00893*hum))*0.01
} else {
inverted_quadratic(temp, mu_th_c, mu_th_T0, mu_th_Tm)+(1-(1.2248 + 0.2673*hum))*0.01
}
}
# parasite development rate
PDR <- function(temp){
briere(temp, PDR_c, PDR_T0, PDR_Tm)
}
# transmission competence: probability of human infection per bite by an infectious mosquito
b <- function(temp){
briere(temp, b_c, b_T0, b_Tm)
}
# carrying capacity for temperature only
carrying_capacity_th <- function(temp, T0, EA, N, h0){
kappa <- 8.617e-05; # Boltzmann constant
alpha <- (EFD(T0)*pEA(T0)*MDR(T0)*mu_th(T0, h0)^(-1)-mu_th(T0, h0))/(EFD(T0)*pEA(T0)*MDR(T0)*mu_th(T0, h0)^(-1))
(alpha*N*exp(-EA*((temp-T0)^2)/(kappa*(temp+273.0)*(T0+273.0))))
}
K_trh_briere <- function(temp, h0, rain, Rmax, N){
R0 <- 1
if((rain < R0) | (rain > Rmax)){
max(0.01*carrying_capacity_th(temp, 29.0, 0.05, N, h0), 1000)
}
else {
c <- 7.86e-05
max(carrying_capacity_th(temp,29.0,0.05, N, h0)*c*rain*(rain-R0)*sqrt(Rmax-rain)*0.3 + 0.001, 1000)
}
}
K_tr_quadratic <- function(temp, rain, Rmax, N){
R0 <- 1
if((rain < R0) | (rain > Rmax)){
max(0.01*carrying_capacity_t(temp, 29.0, 0.05, N), 1000)
}
else {
c <- -5.99e-03
max(carrying_capacity_t(temp, 29.0, 0.05, N)*(c*(rain-R0)*(rain-Rmax))/2 + 0.001, 1000)
}
}
|
library(polmineR)
### Name: chisquare
### Title: Perform chisquare-text.
### Aliases: chisquare chisquare,features-method chisquare,context-method
### chisquare,cooccurrences-method
### Keywords: textstatistics
### ** Examples
use("polmineR")
library(data.table)
m <- partition(
"GERMAPARLMINI", speaker = "Merkel", interjection = "speech",
regex = TRUE, p_attribute = "word"
)
f <- features(m, "GERMAPARLMINI", included = TRUE)
f_min <- subset(f, count_coi >= 5)
summary(f_min)
## Not run:
##D
##D # A sample do-it-yourself calculation for chisquare:
##D
##D # (a) prepare matrix with observed values
##D o <- matrix(data = rep(NA, 4), ncol = 2)
##D o[1,1] <- as.data.table(m)[word == "Weg"][["count"]]
##D o[1,2] <- count("GERMAPARLMINI", query = "Weg")[["count"]] - o[1,1]
##D o[2,1] <- size(f)[["coi"]] - o[1,1]
##D o[2,2] <- size(f)[["ref"]] - o[1,2]
##D
##D
##D # prepare matrix with expected values, calculate margin sums first
##D
##D r <- rowSums(o)
##D c <- colSums(o)
##D N <- sum(o)
##D
##D e <- matrix(data = rep(NA, 4), ncol = 2)
##D e[1,1] <- r[1] * (c[1] / N)
##D e[1,2] <- r[1] * (c[2] / N)
##D e[2,1] <- r[2] * (c[1] / N)
##D e[2,2] <- r[2] * (c[2] / N)
##D
##D
##D # compute chisquare statistic
##D
##D y <- matrix(rep(NA, 4), ncol = 2)
##D for (i in 1:2) for (j in 1:2) y[i,j] <- (o[i,j] - e[i,j])^2 / e[i,j]
##D chisquare_value <- sum(y)
##D
##D as(f, "data.table")[word == "Weg"][["chisquare"]]
## End(Not run)
|
/data/genthat_extracted_code/polmineR/examples/chisquare-method.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,460
|
r
|
library(polmineR)
### Name: chisquare
### Title: Perform chisquare-text.
### Aliases: chisquare chisquare,features-method chisquare,context-method
### chisquare,cooccurrences-method
### Keywords: textstatistics
### ** Examples
use("polmineR")
library(data.table)
m <- partition(
"GERMAPARLMINI", speaker = "Merkel", interjection = "speech",
regex = TRUE, p_attribute = "word"
)
f <- features(m, "GERMAPARLMINI", included = TRUE)
f_min <- subset(f, count_coi >= 5)
summary(f_min)
## Not run:
##D
##D # A sample do-it-yourself calculation for chisquare:
##D
##D # (a) prepare matrix with observed values
##D o <- matrix(data = rep(NA, 4), ncol = 2)
##D o[1,1] <- as.data.table(m)[word == "Weg"][["count"]]
##D o[1,2] <- count("GERMAPARLMINI", query = "Weg")[["count"]] - o[1,1]
##D o[2,1] <- size(f)[["coi"]] - o[1,1]
##D o[2,2] <- size(f)[["ref"]] - o[1,2]
##D
##D
##D # prepare matrix with expected values, calculate margin sums first
##D
##D r <- rowSums(o)
##D c <- colSums(o)
##D N <- sum(o)
##D
##D e <- matrix(data = rep(NA, 4), ncol = 2)
##D e[1,1] <- r[1] * (c[1] / N)
##D e[1,2] <- r[1] * (c[2] / N)
##D e[2,1] <- r[2] * (c[1] / N)
##D e[2,2] <- r[2] * (c[2] / N)
##D
##D
##D # compute chisquare statistic
##D
##D y <- matrix(rep(NA, 4), ncol = 2)
##D for (i in 1:2) for (j in 1:2) y[i,j] <- (o[i,j] - e[i,j])^2 / e[i,j]
##D chisquare_value <- sum(y)
##D
##D as(f, "data.table")[word == "Weg"][["chisquare"]]
## End(Not run)
|
possibly <- function(.f, otherwise, quiet = TRUE) {
force(otherwise)
function(...) {
tryCatch(
.f(...),
error = function(e) {
if (!quiet)
message("Error: ", e$message)
otherwise
},
interrupt = function(e) {
stop("Terminated by user", call. = FALSE)
}
)
}
}
safe_GET <- possibly(GET, NULL, quiet = TRUE)
#' Download file from the Internet (cache-aware)
#'
#' This is an alternative to [utils::download.file()] and a convenience wrapper for
#' [GET()] + [httr::write_disk()] to perform file downloads.
#'
#' Since this function uses [GET()], callers can pass in `httr` configuration
#' options to customize the behaviour of the download process (e.g. specify a `User-Agent` via
#' [user_agent()], set proxy config via [use_proxy()], etc.).
#'
#' The function is also "cache-aware" in the sense that you deliberately have to specify
#' `overwrite = TRUE` to force a re-download. This has the potential to save bandwidth
#' of both the caller and the site hosting files for download.
#'
#' @note While this function supports specifying multiple URLs and download paths it
#' does not perform concurrent downloads.
#' @param url the url(s) of the file to retrieve. If multiple URLs are provided then the same
#' number of `path`s must also be provided.
#' @param path Path(s) to save content to. If more than one `path` is specified then the same
#' number of `url`s must also be provided. THis parameter will be [path.expand()]ed.
#' @param overwrite Will only overwrite existing path if `TRUE`.
#' @param ... passed on to [GET()]
#' @return a data frame containing the `url`(s), `path`(s), cache status, and HTTP status code(s).
#' If there was an error downloading a file the path, status code, and HTTP status
#' columns will be `NA`. If the file was now re-downloaded the status code will be 399
#' @seealso [GET()]; [write_disk()]
#' @export
#' @examples
#' tmp1 <- tempfile()
#' tmp2 <- tempfile()
#' tmp3 <- tempfile()
#'
#' download_file("https://google.com", tmp1) # downloads fine
#' download_file("https://google.com", tmp1) # doesn't re-download since it's cached
#' download_file("https://google.com", tmp1, overwrite = TRUE) # re-downloads (overwrites file)
#' download_file("https://google.com", tmp2) # re-downloads (new file)
#' download_file("badurl", tmp3) # handles major errors gracefully
#'
#' # multi-file example with no-caching
#' download_file(
#' c(rep("https://google.com", 2), "badurl"),
#' c(tmp1, tmp2, tmp3),
#' overwrite = TRUE
#' )
#'
#' # multi-file example with caching
#' download_file(
#' c(rep("https://google.com", 2), "badurl"),
#' c(tmp1, tmp2, tmp3),
#' overwrite = FALSE
#' )
download_file <- function(url, path, overwrite = FALSE, ...) {
url <- as.character(url)
path <- as.character(path)
if (length(url) != length(path)) {
stop("The lengths of the 'url' and 'path' parameters must be equal.", call.=FALSE)
}
path <- path.expand(path)
overwrite <- as.logical(overwrite)
stopifnot(length(overwrite) == 1)
out <- vector("list", length = length(url))
for (idx in seq_along(url)) {
u <- url[[idx]]
p <- path[[idx]]
if (file.exists(p)) {
if (overwrite) { # file exists but caller wants to re-download
res <- safe_GET(u, write_disk(p, overwrite = TRUE), ...)
if (is.null(res)) {
p <- NA_character_
cache_used = FALSE
status <- NA_integer_
} else {
cache_used <- FALSE
status <- status_code(res)
}
} else { # file exists but caller does not want to re-download
if (is.null(parse_url(u)[["hostname"]])) { # quick non-network test for invalid URL
p <- NA_character_
cache_used = FALSE
status <- NA_integer_
} else {
cache_used <- TRUE
status <- 399L
}
}
} else { # file does not exist, so do the thing
res <- safe_GET(u, write_disk(p, overwrite = overwrite), ...)
if (is.null(res)) {
p <- NA_character_
cache_used <- FALSE
status <- NA_integer_
} else {
status <- status_code(res)
cache_used <- FALSE
}
}
out[[idx]] <- data.frame(
url = u, path = p,
status_code = status,
cache_used = cache_used,
stringsAsFactors = FALSE
)
}
out <- do.call(rbind.data.frame, out)
class(out) <- c("tbl_df", "tbl", "data.frame")
invisible(out)
}
|
/R/download-file.R
|
no_license
|
hrbrmstr/httr
|
R
| false
| false
| 4,540
|
r
|
possibly <- function(.f, otherwise, quiet = TRUE) {
force(otherwise)
function(...) {
tryCatch(
.f(...),
error = function(e) {
if (!quiet)
message("Error: ", e$message)
otherwise
},
interrupt = function(e) {
stop("Terminated by user", call. = FALSE)
}
)
}
}
safe_GET <- possibly(GET, NULL, quiet = TRUE)
#' Download file from the Internet (cache-aware)
#'
#' This is an alternative to [utils::download.file()] and a convenience wrapper for
#' [GET()] + [httr::write_disk()] to perform file downloads.
#'
#' Since this function uses [GET()], callers can pass in `httr` configuration
#' options to customize the behaviour of the download process (e.g. specify a `User-Agent` via
#' [user_agent()], set proxy config via [use_proxy()], etc.).
#'
#' The function is also "cache-aware" in the sense that you deliberately have to specify
#' `overwrite = TRUE` to force a re-download. This has the potential to save bandwidth
#' of both the caller and the site hosting files for download.
#'
#' @note While this function supports specifying multiple URLs and download paths it
#' does not perform concurrent downloads.
#' @param url the url(s) of the file to retrieve. If multiple URLs are provided then the same
#' number of `path`s must also be provided.
#' @param path Path(s) to save content to. If more than one `path` is specified then the same
#' number of `url`s must also be provided. THis parameter will be [path.expand()]ed.
#' @param overwrite Will only overwrite existing path if `TRUE`.
#' @param ... passed on to [GET()]
#' @return a data frame containing the `url`(s), `path`(s), cache status, and HTTP status code(s).
#' If there was an error downloading a file the path, status code, and HTTP status
#' columns will be `NA`. If the file was now re-downloaded the status code will be 399
#' @seealso [GET()]; [write_disk()]
#' @export
#' @examples
#' tmp1 <- tempfile()
#' tmp2 <- tempfile()
#' tmp3 <- tempfile()
#'
#' download_file("https://google.com", tmp1) # downloads fine
#' download_file("https://google.com", tmp1) # doesn't re-download since it's cached
#' download_file("https://google.com", tmp1, overwrite = TRUE) # re-downloads (overwrites file)
#' download_file("https://google.com", tmp2) # re-downloads (new file)
#' download_file("badurl", tmp3) # handles major errors gracefully
#'
#' # multi-file example with no-caching
#' download_file(
#' c(rep("https://google.com", 2), "badurl"),
#' c(tmp1, tmp2, tmp3),
#' overwrite = TRUE
#' )
#'
#' # multi-file example with caching
#' download_file(
#' c(rep("https://google.com", 2), "badurl"),
#' c(tmp1, tmp2, tmp3),
#' overwrite = FALSE
#' )
download_file <- function(url, path, overwrite = FALSE, ...) {
url <- as.character(url)
path <- as.character(path)
if (length(url) != length(path)) {
stop("The lengths of the 'url' and 'path' parameters must be equal.", call.=FALSE)
}
path <- path.expand(path)
overwrite <- as.logical(overwrite)
stopifnot(length(overwrite) == 1)
out <- vector("list", length = length(url))
for (idx in seq_along(url)) {
u <- url[[idx]]
p <- path[[idx]]
if (file.exists(p)) {
if (overwrite) { # file exists but caller wants to re-download
res <- safe_GET(u, write_disk(p, overwrite = TRUE), ...)
if (is.null(res)) {
p <- NA_character_
cache_used = FALSE
status <- NA_integer_
} else {
cache_used <- FALSE
status <- status_code(res)
}
} else { # file exists but caller does not want to re-download
if (is.null(parse_url(u)[["hostname"]])) { # quick non-network test for invalid URL
p <- NA_character_
cache_used = FALSE
status <- NA_integer_
} else {
cache_used <- TRUE
status <- 399L
}
}
} else { # file does not exist, so do the thing
res <- safe_GET(u, write_disk(p, overwrite = overwrite), ...)
if (is.null(res)) {
p <- NA_character_
cache_used <- FALSE
status <- NA_integer_
} else {
status <- status_code(res)
cache_used <- FALSE
}
}
out[[idx]] <- data.frame(
url = u, path = p,
status_code = status,
cache_used = cache_used,
stringsAsFactors = FALSE
)
}
out <- do.call(rbind.data.frame, out)
class(out) <- c("tbl_df", "tbl", "data.frame")
invisible(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions.R
\name{meta_s_norm}
\alias{meta_s_norm}
\title{Normalize 16S stools metadata}
\usage{
meta_s_norm(meta)
}
\arguments{
\item{meta}{The metadata}
}
\value{
The dataframe with the metadata corrected
}
\description{
Normalize 16S stools metadata
}
|
/man/meta_s_norm.Rd
|
permissive
|
llrs/integration-helper
|
R
| false
| true
| 340
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helper_functions.R
\name{meta_s_norm}
\alias{meta_s_norm}
\title{Normalize 16S stools metadata}
\usage{
meta_s_norm(meta)
}
\arguments{
\item{meta}{The metadata}
}
\value{
The dataframe with the metadata corrected
}
\description{
Normalize 16S stools metadata
}
|
# The loadData.R script will download the file, unzip it, load 'NEI' and 'SCC'
# dataframes and then merge them into a dataframe named 'ALL'
source("./loadData.R")
# Q2. Have total emissions from PM2.5 decreased in the **Baltimore City**, Maryland
# (`fips == "24510"`) from 1999 to 2008? Use the **base** plotting system to make
# a plot answering this question.
# Subset data for Blatimore City, MD
BALTIMORE <- ALL[ALL$fips == "24510",]
# str(BALTIMORE)
# sum(is.na(BALTIMORE))
# Calculate yearly totals from all sources for each of the years 1999-2008.
DF <- aggregate(BALTIMORE$Emissions, by=list(BALTIMORE$year), FUN=sum)
# Using the base plotting system, we plot the total Emission for Baltimore City, MD
png("plot2.png")
plot(DF, type = "l", main = "Total Emissions for Baltimore City, MD", ylab = "Emissions (tons)", xlab = "Year")
dev.off()
# Answer: overall they have decreased, though not in a linear pattern. a spike
# in 2005 brought it almost up to original levels but in 2008 it dropped sharply
# to be much less than in 1999.
|
/04-ExploratoryDataAnalysis/EXData_Project2/plot2.R
|
no_license
|
anhnguyendepocen/data-science-specialization
|
R
| false
| false
| 1,050
|
r
|
# The loadData.R script will download the file, unzip it, load 'NEI' and 'SCC'
# dataframes and then merge them into a dataframe named 'ALL'
source("./loadData.R")
# Q2. Have total emissions from PM2.5 decreased in the **Baltimore City**, Maryland
# (`fips == "24510"`) from 1999 to 2008? Use the **base** plotting system to make
# a plot answering this question.
# Subset data for Blatimore City, MD
BALTIMORE <- ALL[ALL$fips == "24510",]
# str(BALTIMORE)
# sum(is.na(BALTIMORE))
# Calculate yearly totals from all sources for each of the years 1999-2008.
DF <- aggregate(BALTIMORE$Emissions, by=list(BALTIMORE$year), FUN=sum)
# Using the base plotting system, we plot the total Emission for Baltimore City, MD
png("plot2.png")
plot(DF, type = "l", main = "Total Emissions for Baltimore City, MD", ylab = "Emissions (tons)", xlab = "Year")
dev.off()
# Answer: overall they have decreased, though not in a linear pattern. a spike
# in 2005 brought it almost up to original levels but in 2008 it dropped sharply
# to be much less than in 1999.
|
#' Access files in the current app
#'
#' @param ... Character vector specifying directory and or file to
#' point to inside the current package.
#'
#' @noRd
app_sys <- function(...){
system.file(..., package = "drakeClinModel")
}
#' Read App Config
#'
#' @param value Value to retrieve from the config file.
#' @param config R_CONFIG_ACTIVE value.
#' @param use_parent Logical, scan the parent directory for config file.
#'
#' @importFrom config get
#'
#' @noRd
get_golem_config <- function(
value,
config = Sys.getenv("R_CONFIG_ACTIVE", "default"),
use_parent = TRUE
){
config::get(
value = value,
config = config,
# Modify this if your config file is somewhere else:
file = app_sys("golem-config.yml"),
use_parent = use_parent
)
}
|
/R/app_config.R
|
permissive
|
jixing475/drakeClinModel
|
R
| false
| false
| 788
|
r
|
#' Access files in the current app
#'
#' @param ... Character vector specifying directory and or file to
#' point to inside the current package.
#'
#' @noRd
app_sys <- function(...){
system.file(..., package = "drakeClinModel")
}
#' Read App Config
#'
#' @param value Value to retrieve from the config file.
#' @param config R_CONFIG_ACTIVE value.
#' @param use_parent Logical, scan the parent directory for config file.
#'
#' @importFrom config get
#'
#' @noRd
get_golem_config <- function(
value,
config = Sys.getenv("R_CONFIG_ACTIVE", "default"),
use_parent = TRUE
){
config::get(
value = value,
config = config,
# Modify this if your config file is somewhere else:
file = app_sys("golem-config.yml"),
use_parent = use_parent
)
}
|
#script to use LEA for Structure/Admixture analyses
#you will need to install this packages if its the first time on a new computer
#install.packages(c("fields","RColorBrewer","mapplots"))
#source("http://bioconductor.org/biocLite.R")
#biocLite("LEA")
library(fields)
library(LEA)
#additional files need from the structure format
source("Conversion.R")
source("POPSutilities.R")
#import input file, if this is coming directly from Stacks you will need to delete the first two rows of the file. Should be left with just the data, no other information
struct2geno(file = "Shabrocahites_no_outgroups.recode.p.structure", TESS = FALSE, diploid = TRUE, FORMAT = 2,extra.row = 0, extra.col = 0, output = "Shab.geno")
#test for best K
obj.snmf = snmf("Shab.geno", K = 1:25, ploidy = 2, entropy = T, alpha = 100, project = "new")
pdf(file="delta_K.pdf")
plot(obj.snmf, col = "blue4", cex = 2.0, pch = 19,cex.axis=1.4,cex.lab=1.4)
dev.off()
##########
### K = 2
##########
obj.snmf = snmf("Shab.geno", K = 2, alpha = 100, project = "new", iterations=200)
qmatrix = Q(obj.snmf, K = 2)
palette4 <- c("#FF9326","#FFFB23")
names <- c("LA1625","LA0407","LA1223","LA2119","LA2128","LA1252","LA2861","LA2114","LA2105","LA2106","LA2103","LA2859","LA2860","LA2869","LA2855","LA2100","LA2098_1","LA2098_2","LA2650","LA2175","LA1737","LA1739","LA2541","LA2171","LA2204","LA2196","LA2156","LA2155","LA2324","LA1352","LA1354","LA2329","LA1986","LA2574","LA1362","LA1361","LA1778","LA2975","LA1777","LA2976","LA1772","LA0094","LA1298","LA1560","LA1753","LA2409","LA1691","LA1681","LA1731","LA1721","LA1928")
#plot classic structure diagram
pdf(file="K_equals_2.pdf",10,5)
barplot(t(qmatrix), col = c(palette4), border = NA, space = 0,names.arg=names,las=2,cex.names=0.7,
xlab = "Individuals", ylab = "Admixture coefficients")
dev.off()
#######
## K = 3
#######
obj.snmf = snmf("Shab.geno", K = 3, alpha = 100, project = "new", iterations=200)
qmatrix = Q(obj.snmf, K = 3)
palette4 <- c("#FFFB23","#FF9326","#DF0101")
names <- c("LA1625","LA0407","LA1223","LA2119","LA2128","LA1252","LA2861","LA2114","LA2105","LA2106","LA2103","LA2859","LA2860","LA2869","LA2855","LA2100","LA2098_1","LA2098_2","LA2650","LA2175","LA1737","LA1739","LA2541","LA2171","LA2204","LA2196","LA2156","LA2155","LA2324","LA1352","LA1354","LA2329","LA1986","LA2574","LA1362","LA1361","LA1778","LA2975","LA1777","LA2976","LA1772","LA0094","LA1298","LA1560","LA1753","LA2409","LA1691","LA1681","LA1731","LA1721","LA1928")
#plot classic structure diagram
pdf(file="K_equals_3.pdf",10,5)
barplot(t(qmatrix), col = c(palette4), border = NA, space = 0,names.arg=names,las=2,cex.names=0.7,
xlab = "Individuals", ylab = "Admixture coefficients")
dev.off()
orginal_palette4 <- c("#FFFB23","#FF9326","#DF0101","#A945FF")
#run with best K of 4
obj.snmf = snmf("Shab.geno", K = 4, alpha = 100, project = "new", iterations=500)
qmatrix = Q(obj.snmf, K = 4)
palette4 <- c("#FF9326","#DF0101","#FFFB23","#A945FF")
names <- c("LA1625","LA0407","LA1223","LA2119","LA2128","LA1252","LA2861","LA2114","LA2105","LA2106","LA2103","LA2859","LA2860","LA2869","LA2855","LA2100","LA2098_1","LA2098_2","LA2650","LA2175","LA1737","LA1739","LA2541","LA2171","LA2204","LA2196","LA2156","LA2155","LA2324","LA1352","LA1354","LA2329","LA1986","LA2574","LA1362","LA1361","LA1778","LA2975","LA1777","LA2976","LA1772","LA0094","LA1298","LA1560","LA1753","LA2409","LA1691","LA1681","LA1731","LA1721","LA1928")
#plot classic structure diagram
pdf(file="K_equals_4.pdf",7,3)
barplot(t(qmatrix), col = c(palette4), border = NA, space = 0,names.arg=names,las=2,cex.names=0.7,
xlab = "Individuals", ylab = "Admixture coefficients")
dev.off()
#####
# K = 5
#####
obj.snmf = snmf("Shab.geno", K = 5, alpha = 100, project = "new", iterations=200)
qmatrix = Q(obj.snmf, K = 5)
palette5 <- c("#A945FF","#FFFB23","#FF9326","#DF0101","#9999FF")
names <- c("LA1625","LA0407","LA1223","LA2119","LA2128","LA1252","LA2861","LA2114","LA2105","LA2106","LA2103","LA2859","LA2860","LA2869","LA2855","LA2100","LA2098_1","LA2098_2","LA2650","LA2175","LA1737","LA1739","LA2541","LA2171","LA2204","LA2196","LA2156","LA2155","LA2324","LA1352","LA1354","LA2329","LA1986","LA2574","LA1362","LA1361","LA1778","LA2975","LA1777","LA2976","LA1772","LA0094","LA1298","LA1560","LA1753","LA2409","LA1691","LA1681","LA1731","LA1721","LA1928")
#plot classic structure diagram
pdf(file="K_equals_5.pdf",10,5)
barplot(t(qmatrix), col = c(palette5), border = NA, space = 0,names.arg=names,las=2,cex.names=0.7,
xlab = "Individuals", ylab = "Admixture coefficients")
dev.off()
#####
# K = 6
#####
obj.snmf = snmf("Shab.geno", K = 6, alpha = 100, project = "new", iterations=200)
qmatrix = Q(obj.snmf, K = 6)
palette6 <- c("#FF9326","#A945FF","#FFFB23","#9999FF","#04B404","#DF0101")
names <- c("LA1625","LA0407","LA1223","LA2119","LA2128","LA1252","LA2861","LA2114","LA2105","LA2106","LA2103","LA2859","LA2860","LA2869","LA2855","LA2100","LA2098_1","LA2098_2","LA2650","LA2175","LA1737","LA1739","LA2541","LA2171","LA2204","LA2196","LA2156","LA2155","LA2324","LA1352","LA1354","LA2329","LA1986","LA2574","LA1362","LA1361","LA1778","LA2975","LA1777","LA2976","LA1772","LA0094","LA1298","LA1560","LA1753","LA2409","LA1691","LA1681","LA1731","LA1721","LA1928")
#plot classic structure diagram
pdf(file="K_equals_6.pdf",10,5)
barplot(t(qmatrix), col = c(palette6), border = NA, space = 0,names.arg=names,las=2,cex.names=0.7,
xlab = "Individuals", ylab = "Admixture coefficients")
dev.off()
#####
# K = 7
#####
obj.snmf = snmf("Shab.geno", K = 7, alpha = 100, project = "new", iterations=200)
qmatrix = Q(obj.snmf, K = 7)
#original
#palette7 <- c("#FF9326", "#04B404", "#2121D9", "#9999FF", "#FFFB23", "#DF0101", "#A945FF")
palette7 <- c("#04B404","#9999FF","#A945FF","#2121D9","#FFFB23","#FF9326","#DF0101")
names <- c("LA1625","LA0407","LA1223","LA2119","LA2128","LA1252","LA2861","LA2114","LA2105","LA2106","LA2103","LA2859","LA2860","LA2869","LA2855","LA2100","LA2098_1","LA2098_2","LA2650","LA2175","LA1737","LA1739","LA2541","LA2171","LA2204","LA2196","LA2156","LA2155","LA2324","LA1352","LA1354","LA2329","LA1986","LA2574","LA1362","LA1361","LA1778","LA2975","LA1777","LA2976","LA1772","LA0094","LA1298","LA1560","LA1753","LA2409","LA1691","LA1681","LA1731","LA1721","LA1928")
#plot classic structure diagram
pdf(file="K_equals_7.pdf",10,5)
barplot(t(qmatrix), col = c(palette7), border = NA, space = 0,names.arg=names,las=2,cex.names=0.7,
xlab = "Individuals", ylab = "Admixture coefficients")
dev.off()
#####
# K = 8
#####
obj.snmf = snmf("Shab.geno", K = 8, alpha = 100, project = "new", iterations=200)
qmatrix = Q(obj.snmf, K = 8)
#original
#palette7 <- c("#FF9326", "#04B404", "#2121D9", "#9999FF", "#FFFB23", "#DF0101", "#A945FF")
palette8 <- c("#9999FF","#FFFB23","#A945FF","#2121D9","#FF9326","gray","#04B404","#DF0101")
names <- c("LA1625","LA0407","LA1223","LA2119","LA2128","LA1252","LA2861","LA2114","LA2105","LA2106","LA2103","LA2859","LA2860","LA2869","LA2855","LA2100","LA2098_1","LA2098_2","LA2650","LA2175","LA1737","LA1739","LA2541","LA2171","LA2204","LA2196","LA2156","LA2155","LA2324","LA1352","LA1354","LA2329","LA1986","LA2574","LA1362","LA1361","LA1778","LA2975","LA1777","LA2976","LA1772","LA0094","LA1298","LA1560","LA1753","LA2409","LA1691","LA1681","LA1731","LA1721","LA1928")
#plot classic structure diagram
pdf(file="K_equals_8.pdf",10,5)
barplot(t(qmatrix), col = c(palette8), border = NA, space = 0,names.arg=names,las=2,cex.names=0.7,
xlab = "Individuals", ylab = "Admixture coefficients")
dev.off()
|
/Structure_analyses/Structure.R
|
no_license
|
moghelab/Solanum_habrochaites_popgen
|
R
| false
| false
| 7,589
|
r
|
#script to use LEA for Structure/Admixture analyses
#you will need to install this packages if its the first time on a new computer
#install.packages(c("fields","RColorBrewer","mapplots"))
#source("http://bioconductor.org/biocLite.R")
#biocLite("LEA")
library(fields)
library(LEA)
#additional files need from the structure format
source("Conversion.R")
source("POPSutilities.R")
#import input file, if this is coming directly from Stacks you will need to delete the first two rows of the file. Should be left with just the data, no other information
struct2geno(file = "Shabrocahites_no_outgroups.recode.p.structure", TESS = FALSE, diploid = TRUE, FORMAT = 2,extra.row = 0, extra.col = 0, output = "Shab.geno")
#test for best K
obj.snmf = snmf("Shab.geno", K = 1:25, ploidy = 2, entropy = T, alpha = 100, project = "new")
pdf(file="delta_K.pdf")
plot(obj.snmf, col = "blue4", cex = 2.0, pch = 19,cex.axis=1.4,cex.lab=1.4)
dev.off()
##########
### K = 2
##########
obj.snmf = snmf("Shab.geno", K = 2, alpha = 100, project = "new", iterations=200)
qmatrix = Q(obj.snmf, K = 2)
palette4 <- c("#FF9326","#FFFB23")
names <- c("LA1625","LA0407","LA1223","LA2119","LA2128","LA1252","LA2861","LA2114","LA2105","LA2106","LA2103","LA2859","LA2860","LA2869","LA2855","LA2100","LA2098_1","LA2098_2","LA2650","LA2175","LA1737","LA1739","LA2541","LA2171","LA2204","LA2196","LA2156","LA2155","LA2324","LA1352","LA1354","LA2329","LA1986","LA2574","LA1362","LA1361","LA1778","LA2975","LA1777","LA2976","LA1772","LA0094","LA1298","LA1560","LA1753","LA2409","LA1691","LA1681","LA1731","LA1721","LA1928")
#plot classic structure diagram
pdf(file="K_equals_2.pdf",10,5)
barplot(t(qmatrix), col = c(palette4), border = NA, space = 0,names.arg=names,las=2,cex.names=0.7,
xlab = "Individuals", ylab = "Admixture coefficients")
dev.off()
#######
## K = 3
#######
obj.snmf = snmf("Shab.geno", K = 3, alpha = 100, project = "new", iterations=200)
qmatrix = Q(obj.snmf, K = 3)
palette4 <- c("#FFFB23","#FF9326","#DF0101")
names <- c("LA1625","LA0407","LA1223","LA2119","LA2128","LA1252","LA2861","LA2114","LA2105","LA2106","LA2103","LA2859","LA2860","LA2869","LA2855","LA2100","LA2098_1","LA2098_2","LA2650","LA2175","LA1737","LA1739","LA2541","LA2171","LA2204","LA2196","LA2156","LA2155","LA2324","LA1352","LA1354","LA2329","LA1986","LA2574","LA1362","LA1361","LA1778","LA2975","LA1777","LA2976","LA1772","LA0094","LA1298","LA1560","LA1753","LA2409","LA1691","LA1681","LA1731","LA1721","LA1928")
#plot classic structure diagram
pdf(file="K_equals_3.pdf",10,5)
barplot(t(qmatrix), col = c(palette4), border = NA, space = 0,names.arg=names,las=2,cex.names=0.7,
xlab = "Individuals", ylab = "Admixture coefficients")
dev.off()
orginal_palette4 <- c("#FFFB23","#FF9326","#DF0101","#A945FF")
#run with best K of 4
obj.snmf = snmf("Shab.geno", K = 4, alpha = 100, project = "new", iterations=500)
qmatrix = Q(obj.snmf, K = 4)
palette4 <- c("#FF9326","#DF0101","#FFFB23","#A945FF")
names <- c("LA1625","LA0407","LA1223","LA2119","LA2128","LA1252","LA2861","LA2114","LA2105","LA2106","LA2103","LA2859","LA2860","LA2869","LA2855","LA2100","LA2098_1","LA2098_2","LA2650","LA2175","LA1737","LA1739","LA2541","LA2171","LA2204","LA2196","LA2156","LA2155","LA2324","LA1352","LA1354","LA2329","LA1986","LA2574","LA1362","LA1361","LA1778","LA2975","LA1777","LA2976","LA1772","LA0094","LA1298","LA1560","LA1753","LA2409","LA1691","LA1681","LA1731","LA1721","LA1928")
#plot classic structure diagram
pdf(file="K_equals_4.pdf",7,3)
barplot(t(qmatrix), col = c(palette4), border = NA, space = 0,names.arg=names,las=2,cex.names=0.7,
xlab = "Individuals", ylab = "Admixture coefficients")
dev.off()
#####
# K = 5
#####
obj.snmf = snmf("Shab.geno", K = 5, alpha = 100, project = "new", iterations=200)
qmatrix = Q(obj.snmf, K = 5)
palette5 <- c("#A945FF","#FFFB23","#FF9326","#DF0101","#9999FF")
names <- c("LA1625","LA0407","LA1223","LA2119","LA2128","LA1252","LA2861","LA2114","LA2105","LA2106","LA2103","LA2859","LA2860","LA2869","LA2855","LA2100","LA2098_1","LA2098_2","LA2650","LA2175","LA1737","LA1739","LA2541","LA2171","LA2204","LA2196","LA2156","LA2155","LA2324","LA1352","LA1354","LA2329","LA1986","LA2574","LA1362","LA1361","LA1778","LA2975","LA1777","LA2976","LA1772","LA0094","LA1298","LA1560","LA1753","LA2409","LA1691","LA1681","LA1731","LA1721","LA1928")
#plot classic structure diagram
pdf(file="K_equals_5.pdf",10,5)
barplot(t(qmatrix), col = c(palette5), border = NA, space = 0,names.arg=names,las=2,cex.names=0.7,
xlab = "Individuals", ylab = "Admixture coefficients")
dev.off()
#####
# K = 6
#####
obj.snmf = snmf("Shab.geno", K = 6, alpha = 100, project = "new", iterations=200)
qmatrix = Q(obj.snmf, K = 6)
palette6 <- c("#FF9326","#A945FF","#FFFB23","#9999FF","#04B404","#DF0101")
names <- c("LA1625","LA0407","LA1223","LA2119","LA2128","LA1252","LA2861","LA2114","LA2105","LA2106","LA2103","LA2859","LA2860","LA2869","LA2855","LA2100","LA2098_1","LA2098_2","LA2650","LA2175","LA1737","LA1739","LA2541","LA2171","LA2204","LA2196","LA2156","LA2155","LA2324","LA1352","LA1354","LA2329","LA1986","LA2574","LA1362","LA1361","LA1778","LA2975","LA1777","LA2976","LA1772","LA0094","LA1298","LA1560","LA1753","LA2409","LA1691","LA1681","LA1731","LA1721","LA1928")
#plot classic structure diagram
pdf(file="K_equals_6.pdf",10,5)
barplot(t(qmatrix), col = c(palette6), border = NA, space = 0,names.arg=names,las=2,cex.names=0.7,
xlab = "Individuals", ylab = "Admixture coefficients")
dev.off()
#####
# K = 7
#####
obj.snmf = snmf("Shab.geno", K = 7, alpha = 100, project = "new", iterations=200)
qmatrix = Q(obj.snmf, K = 7)
#original
#palette7 <- c("#FF9326", "#04B404", "#2121D9", "#9999FF", "#FFFB23", "#DF0101", "#A945FF")
palette7 <- c("#04B404","#9999FF","#A945FF","#2121D9","#FFFB23","#FF9326","#DF0101")
names <- c("LA1625","LA0407","LA1223","LA2119","LA2128","LA1252","LA2861","LA2114","LA2105","LA2106","LA2103","LA2859","LA2860","LA2869","LA2855","LA2100","LA2098_1","LA2098_2","LA2650","LA2175","LA1737","LA1739","LA2541","LA2171","LA2204","LA2196","LA2156","LA2155","LA2324","LA1352","LA1354","LA2329","LA1986","LA2574","LA1362","LA1361","LA1778","LA2975","LA1777","LA2976","LA1772","LA0094","LA1298","LA1560","LA1753","LA2409","LA1691","LA1681","LA1731","LA1721","LA1928")
#plot classic structure diagram
pdf(file="K_equals_7.pdf",10,5)
barplot(t(qmatrix), col = c(palette7), border = NA, space = 0,names.arg=names,las=2,cex.names=0.7,
xlab = "Individuals", ylab = "Admixture coefficients")
dev.off()
#####
# K = 8
#####
obj.snmf = snmf("Shab.geno", K = 8, alpha = 100, project = "new", iterations=200)
qmatrix = Q(obj.snmf, K = 8)
#original
#palette7 <- c("#FF9326", "#04B404", "#2121D9", "#9999FF", "#FFFB23", "#DF0101", "#A945FF")
palette8 <- c("#9999FF","#FFFB23","#A945FF","#2121D9","#FF9326","gray","#04B404","#DF0101")
names <- c("LA1625","LA0407","LA1223","LA2119","LA2128","LA1252","LA2861","LA2114","LA2105","LA2106","LA2103","LA2859","LA2860","LA2869","LA2855","LA2100","LA2098_1","LA2098_2","LA2650","LA2175","LA1737","LA1739","LA2541","LA2171","LA2204","LA2196","LA2156","LA2155","LA2324","LA1352","LA1354","LA2329","LA1986","LA2574","LA1362","LA1361","LA1778","LA2975","LA1777","LA2976","LA1772","LA0094","LA1298","LA1560","LA1753","LA2409","LA1691","LA1681","LA1731","LA1721","LA1928")
#plot classic structure diagram
pdf(file="K_equals_8.pdf",10,5)
barplot(t(qmatrix), col = c(palette8), border = NA, space = 0,names.arg=names,las=2,cex.names=0.7,
xlab = "Individuals", ylab = "Admixture coefficients")
dev.off()
|
library(tinytest)
library(tiledb)
isOldWindows <- Sys.info()[["sysname"]] == "Windows" && grepl('Windows Server 2008', osVersion)
if (isOldWindows) exit_file("skip this file on old Windows releases")
isWindows <- Sys.info()[["sysname"]] == "Windows"
ctx <- tiledb_ctx(limitTileDBCores())
if (tiledb_version(TRUE) < "2.9.0") exit_file("Needs TileDB 2.9.* or later")
text_file <- tempfile()
writeLines(c("Simple text file.", "With two lines."), text_file)
expect_true(file.exists(text_file))
expect_true(file.info(text_file)$size > 0)
## check schema creation from no file or given file, and error from missing file
expect_true(inherits(tiledb_filestore_schema_create(), "tiledb_array_schema"))
expect_true(inherits(tiledb_filestore_schema_create(text_file), "tiledb_array_schema"))
expect_error(tiledb_filestore_schema_create("does_not_exist"))
if (isWindows) exit_file("Skip remainder as tests randomly fail")
tempuri <- tempfile()
res <- tiledb_filestore_schema_create(text_file) # schema from text_file
expect_silent( tiledb_array_create(tempuri, res) ) # create array
expect_true(tiledb_filestore_uri_import(tempuri, text_file)) # import text_file into array
newfile <- tempfile()
expect_true(tiledb_filestore_uri_export(newfile, tempuri))
oldcntnt <- readLines(text_file)
newcntnt <- readLines(newfile)
expect_equal(oldcntnt, newcntnt)
unlink(newfile)
unlink(tempuri, recursive=TRUE)
res <- tiledb_filestore_schema_create() # default schema
expect_silent( tiledb_array_create(tempuri, res) ) # create array
buf <- paste(newcntnt, collapse="\n")
expect_true(tiledb_filestore_buffer_import(tempuri, buf)) # import from variable into array
expect_silent(chkbuf <- tiledb_filestore_buffer_export(tempuri))
expect_equal(chkbuf, buf)
expect_equal(tiledb_filestore_size(tempuri), nchar(buf))
## also test reading from filestore-create array via tiledb_array
n <- tiledb_filestore_size(tempuri)
arr <- tiledb_array(tempuri, return_as="asis")
reftxt <- paste(oldcntnt, collapse="\n") # collapse input file into one string
chk <- arr[0:(n-1)]$contents
expect_equal(reftxt, rawToChar(chk))
|
/inst/tinytest/test_filestore.R
|
permissive
|
TileDB-Inc/TileDB-R
|
R
| false
| false
| 2,153
|
r
|
library(tinytest)
library(tiledb)
isOldWindows <- Sys.info()[["sysname"]] == "Windows" && grepl('Windows Server 2008', osVersion)
if (isOldWindows) exit_file("skip this file on old Windows releases")
isWindows <- Sys.info()[["sysname"]] == "Windows"
ctx <- tiledb_ctx(limitTileDBCores())
if (tiledb_version(TRUE) < "2.9.0") exit_file("Needs TileDB 2.9.* or later")
text_file <- tempfile()
writeLines(c("Simple text file.", "With two lines."), text_file)
expect_true(file.exists(text_file))
expect_true(file.info(text_file)$size > 0)
## check schema creation from no file or given file, and error from missing file
expect_true(inherits(tiledb_filestore_schema_create(), "tiledb_array_schema"))
expect_true(inherits(tiledb_filestore_schema_create(text_file), "tiledb_array_schema"))
expect_error(tiledb_filestore_schema_create("does_not_exist"))
if (isWindows) exit_file("Skip remainder as tests randomly fail")
tempuri <- tempfile()
res <- tiledb_filestore_schema_create(text_file) # schema from text_file
expect_silent( tiledb_array_create(tempuri, res) ) # create array
expect_true(tiledb_filestore_uri_import(tempuri, text_file)) # import text_file into array
newfile <- tempfile()
expect_true(tiledb_filestore_uri_export(newfile, tempuri))
oldcntnt <- readLines(text_file)
newcntnt <- readLines(newfile)
expect_equal(oldcntnt, newcntnt)
unlink(newfile)
unlink(tempuri, recursive=TRUE)
res <- tiledb_filestore_schema_create() # default schema
expect_silent( tiledb_array_create(tempuri, res) ) # create array
buf <- paste(newcntnt, collapse="\n")
expect_true(tiledb_filestore_buffer_import(tempuri, buf)) # import from variable into array
expect_silent(chkbuf <- tiledb_filestore_buffer_export(tempuri))
expect_equal(chkbuf, buf)
expect_equal(tiledb_filestore_size(tempuri), nchar(buf))
## also test reading from filestore-create array via tiledb_array
n <- tiledb_filestore_size(tempuri)
arr <- tiledb_array(tempuri, return_as="asis")
reftxt <- paste(oldcntnt, collapse="\n") # collapse input file into one string
chk <- arr[0:(n-1)]$contents
expect_equal(reftxt, rawToChar(chk))
|
setwd("C:\\Users\\賴柏霖\\Desktop\\統計\\SME10e_EXCEL_DATA_SETS\\ch12 xlsx")
name="Xr12-35.xlsx"
df1=loadWorkbook(name)
df2=readWorksheet(df1,1)
df2<-df2[-c(123:144),]
df2<-df2[-c(49,77,94),]
sampl=df2[,2]
tcv=qt(1-0.025,118)
mean1=mean(sampl)
std=sd(sampl)
lcl=mean1-tcv*std/sqrt(119)
ucl=mean1+tcv*std/sqrt(119)
ans<-c(lcl,ucl)//12.35end
setwd("C:\\Users\\賴柏霖\\Desktop\\統計\\SME10e_EXCEL_DATA_SETS\\ch12 xlsx")
neme="Xr12-75.xlsx"
df1=loadWorkbook(neme)
df2=readWorksheet(df1,1)
sampl=df2$Demand
std=sd(sampl)
vars=std^2
qleft=qchisq(0.025,24)
qright=qchisq(1-0.025,24)
qwe=24*vars/250
if(qwe<=qleft|qwe>=qright) print("can reject null h0")
hist(sampl,col='red')
|
/statistics/12.35,12.75.r.r
|
no_license
|
lailaiforNTUIM/For-interview
|
R
| false
| false
| 725
|
r
|
setwd("C:\\Users\\賴柏霖\\Desktop\\統計\\SME10e_EXCEL_DATA_SETS\\ch12 xlsx")
name="Xr12-35.xlsx"
df1=loadWorkbook(name)
df2=readWorksheet(df1,1)
df2<-df2[-c(123:144),]
df2<-df2[-c(49,77,94),]
sampl=df2[,2]
tcv=qt(1-0.025,118)
mean1=mean(sampl)
std=sd(sampl)
lcl=mean1-tcv*std/sqrt(119)
ucl=mean1+tcv*std/sqrt(119)
ans<-c(lcl,ucl)//12.35end
setwd("C:\\Users\\賴柏霖\\Desktop\\統計\\SME10e_EXCEL_DATA_SETS\\ch12 xlsx")
neme="Xr12-75.xlsx"
df1=loadWorkbook(neme)
df2=readWorksheet(df1,1)
sampl=df2$Demand
std=sd(sampl)
vars=std^2
qleft=qchisq(0.025,24)
qright=qchisq(1-0.025,24)
qwe=24*vars/250
if(qwe<=qleft|qwe>=qright) print("can reject null h0")
hist(sampl,col='red')
|
shinyUI(
dashboardPage(
title = "r2d3 Gallary",
dashboardHeader(
title = "r2d3 Gallary", titleWidth = 250
),
dashboardSidebar(
collapsed = F,
width = 250,
sidebarMenu(
id = "menu",
menuItem("barchart", icon = icon("globe"), tabName = "tab_barchart"),
menuItem("calender", icon = icon("globe"), tabName = "tab_calender"),
sliderInput("bar_max", label = "Max:", min = 0.1, max = 1.0, value = 0.2, step = 0.1)
)
),
dashboardBody(
tabItems(
# ----------------------------------------------
# barchart
# ----------------------------------------------
tabItem(tabName = "tab_barchart",
fluidRow(
d3Output("d3_barchart")
)
),
# ----------------------------------------------
# calender
# ----------------------------------------------
tabItem(tabName = "tab_calender",
fluidRow(
d3Output("d3_calender")
)
)
)
)
)
)
|
/shiny/ex_r2d3/ui.R
|
no_license
|
okiyuki99/HowToR
|
R
| false
| false
| 1,079
|
r
|
shinyUI(
dashboardPage(
title = "r2d3 Gallary",
dashboardHeader(
title = "r2d3 Gallary", titleWidth = 250
),
dashboardSidebar(
collapsed = F,
width = 250,
sidebarMenu(
id = "menu",
menuItem("barchart", icon = icon("globe"), tabName = "tab_barchart"),
menuItem("calender", icon = icon("globe"), tabName = "tab_calender"),
sliderInput("bar_max", label = "Max:", min = 0.1, max = 1.0, value = 0.2, step = 0.1)
)
),
dashboardBody(
tabItems(
# ----------------------------------------------
# barchart
# ----------------------------------------------
tabItem(tabName = "tab_barchart",
fluidRow(
d3Output("d3_barchart")
)
),
# ----------------------------------------------
# calender
# ----------------------------------------------
tabItem(tabName = "tab_calender",
fluidRow(
d3Output("d3_calender")
)
)
)
)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimator_plans.R
\name{est_params}
\alias{est_params}
\alias{est_params.lm_est}
\alias{est_params.simple_est}
\alias{est_params.grid_rf}
\title{Estimate parameters}
\usage{
est_params(obj, y, d = NULL, X, sample = "est", ret_var = FALSE)
\method{est_params}{lm_est}(obj, y, d = NULL, X, sample = "est", ret_var = FALSE)
\method{est_params}{simple_est}(obj, y, d = NULL, X, sample = "est", ret_var = FALSE)
\method{est_params}{grid_rf}(obj, y, d = NULL, X, sample = "est", ret_var = FALSE)
}
\arguments{
\item{obj}{an EstimatorPlan object}
\item{y}{A N-vector}
\item{d}{A N-vector or NxM matrix (so that they can be estimated jointly)}
\item{X}{A NxK matrix or data.frame}
\item{sample}{One of: "trtr", "trcv", "est"}
\item{ret_var}{Return Variance in the return list}
}
\value{
\code{list(param_est=...)} or \code{list(param_est=...)} if \code{ret_var}
}
\description{
Estimate parameters on the data.
}
\section{Methods (by class)}{
\itemize{
\item \code{lm_est}: lm_est
\item \code{simple_est}: simple_est
\item \code{grid_rf}: grid_rf
}}
|
/man/est_params.Rd
|
permissive
|
test-mass-forker-org-1/CausalGrid
|
R
| false
| true
| 1,131
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimator_plans.R
\name{est_params}
\alias{est_params}
\alias{est_params.lm_est}
\alias{est_params.simple_est}
\alias{est_params.grid_rf}
\title{Estimate parameters}
\usage{
est_params(obj, y, d = NULL, X, sample = "est", ret_var = FALSE)
\method{est_params}{lm_est}(obj, y, d = NULL, X, sample = "est", ret_var = FALSE)
\method{est_params}{simple_est}(obj, y, d = NULL, X, sample = "est", ret_var = FALSE)
\method{est_params}{grid_rf}(obj, y, d = NULL, X, sample = "est", ret_var = FALSE)
}
\arguments{
\item{obj}{an EstimatorPlan object}
\item{y}{A N-vector}
\item{d}{A N-vector or NxM matrix (so that they can be estimated jointly)}
\item{X}{A NxK matrix or data.frame}
\item{sample}{One of: "trtr", "trcv", "est"}
\item{ret_var}{Return Variance in the return list}
}
\value{
\code{list(param_est=...)} or \code{list(param_est=...)} if \code{ret_var}
}
\description{
Estimate parameters on the data.
}
\section{Methods (by class)}{
\itemize{
\item \code{lm_est}: lm_est
\item \code{simple_est}: simple_est
\item \code{grid_rf}: grid_rf
}}
|
\name{pdat1}
\alias{pdat1}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
pdat1(dat)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{
%% ~~Describe \code{dat} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (dat)
{
dt = dtt = dat$d0
about = dat$about
titl = dat$titl
unit = dat$unit
ln = dat$ln
pee = dat$p
neyer = dat$neyer
x = dt$X
y = dt$Y
id = dt$ID
nid = length(id)
if (is.null(about)) {
cat("This function only works for lists created by gonogo\n\n")
return()
}
if (is.null(neyer)) {
neyer = F
b = gsub("[0-9]+", "", id[1])
if (b == "B")
neyer = T
}
if (length(pee) == 0)
pee = 0
fini = 0
if (id[nid] == "III3")
fini = 1
if (fini == 1) {
dtt = dtt[-nid, ]
x = x[-nid]
y = y[-nid]
id = id[-nid]
nid = nid - 1
}
zee = x[1]
if (pee * (1 - pee) > 0 & fini == 1) {
yu = glmmle(dtt)
zee = yu$mu + qnorm(pee) * yu$sig
}
about1 = expression(paste("{", mu[lo], ",", mu[hi], ",",
sigma[g], "|", n[1], ",", n[2], ",", n[3], "|p,", lambda,
",res}", sep = ""))
w = pretty(x, n = 10)
ens = 1:nid
rd = which(y == 1)
gr = which(y == 0)
ylm = range(pretty(c(x, max(x, na.rm = T) + diff(range(x))/80),
n = 10))
lb = nid - 1
if (lb > 30)
lb = ceiling(lb/2)
if (nid == 1)
return()
if (nid > 1) {
par(mar = c(4, 4, 5, 2) + 0.1)
lnum = 2.3
if (!ln)
plot(c(ens, 1), c(x, zee), type = "n", xlab = "",
ylab = "", ylim = ylm, lab = c(lb, 5, 7))
else {
par(mar = c(4, 3, 5, 3) + 0.1)
plot(c(ens, 1), c(x, zee), type = "n", xlab = "",
ylab = "", ylim = ylm, yaxt = "n")
w7 = pretty(exp(x), n = 6)
axis(2, at = log(w7), lab = round(w7, 1), srt = 90,
tcl = -0.4, mgp = c(1, 0.5, 0))
w8 = pretty(x, n = 6)
axis(4, at = w8, lab = round(w8, 1), srt = 90, tcl = -0.4,
mgp = c(1, 0.5, 0))
mtext("Log Scale", side = 4, line = 1.6)
lnum = 1.8
}
mtext(paste("Test Level (", unit, ")", sep = ""), side = 2,
line = lnum)
mtext("Trial Number", side = 1, line = 2.2)
points(ens[rd], x[rd], pch = 25, cex = 0.7, bg = 4)
points(ens[gr], x[gr], pch = 24, cex = 0.7, bg = 3)
if (neyer)
tf = addneyr(dtt, ylm)
else tf = add3pod(dtt, ylm)
mtext(titl, side = 3, line = 3.4, cex = 1.2, col = 1)
mtext(about1, side = 3, line = 1.8, cex = 1.2)
if (tf[1] & neyer)
about = chabout(about, nrow(dtt), 4)
mtext(about, side = 3, line = 0.5, cex = 1.2)
if (fini == 1) {
axis(4, label = F, at = dt$RX[nid + 1], tcl = 0.25,
lwd = 2)
axis(4, label = F, at = zee, tcl = -0.25, lwd = 2)
}
reset()
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/pdat1.Rd
|
no_license
|
Auburngrads/3pod
|
R
| false
| false
| 4,102
|
rd
|
\name{pdat1}
\alias{pdat1}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
pdat1(dat)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{
%% ~~Describe \code{dat} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (dat)
{
dt = dtt = dat$d0
about = dat$about
titl = dat$titl
unit = dat$unit
ln = dat$ln
pee = dat$p
neyer = dat$neyer
x = dt$X
y = dt$Y
id = dt$ID
nid = length(id)
if (is.null(about)) {
cat("This function only works for lists created by gonogo\n\n")
return()
}
if (is.null(neyer)) {
neyer = F
b = gsub("[0-9]+", "", id[1])
if (b == "B")
neyer = T
}
if (length(pee) == 0)
pee = 0
fini = 0
if (id[nid] == "III3")
fini = 1
if (fini == 1) {
dtt = dtt[-nid, ]
x = x[-nid]
y = y[-nid]
id = id[-nid]
nid = nid - 1
}
zee = x[1]
if (pee * (1 - pee) > 0 & fini == 1) {
yu = glmmle(dtt)
zee = yu$mu + qnorm(pee) * yu$sig
}
about1 = expression(paste("{", mu[lo], ",", mu[hi], ",",
sigma[g], "|", n[1], ",", n[2], ",", n[3], "|p,", lambda,
",res}", sep = ""))
w = pretty(x, n = 10)
ens = 1:nid
rd = which(y == 1)
gr = which(y == 0)
ylm = range(pretty(c(x, max(x, na.rm = T) + diff(range(x))/80),
n = 10))
lb = nid - 1
if (lb > 30)
lb = ceiling(lb/2)
if (nid == 1)
return()
if (nid > 1) {
par(mar = c(4, 4, 5, 2) + 0.1)
lnum = 2.3
if (!ln)
plot(c(ens, 1), c(x, zee), type = "n", xlab = "",
ylab = "", ylim = ylm, lab = c(lb, 5, 7))
else {
par(mar = c(4, 3, 5, 3) + 0.1)
plot(c(ens, 1), c(x, zee), type = "n", xlab = "",
ylab = "", ylim = ylm, yaxt = "n")
w7 = pretty(exp(x), n = 6)
axis(2, at = log(w7), lab = round(w7, 1), srt = 90,
tcl = -0.4, mgp = c(1, 0.5, 0))
w8 = pretty(x, n = 6)
axis(4, at = w8, lab = round(w8, 1), srt = 90, tcl = -0.4,
mgp = c(1, 0.5, 0))
mtext("Log Scale", side = 4, line = 1.6)
lnum = 1.8
}
mtext(paste("Test Level (", unit, ")", sep = ""), side = 2,
line = lnum)
mtext("Trial Number", side = 1, line = 2.2)
points(ens[rd], x[rd], pch = 25, cex = 0.7, bg = 4)
points(ens[gr], x[gr], pch = 24, cex = 0.7, bg = 3)
if (neyer)
tf = addneyr(dtt, ylm)
else tf = add3pod(dtt, ylm)
mtext(titl, side = 3, line = 3.4, cex = 1.2, col = 1)
mtext(about1, side = 3, line = 1.8, cex = 1.2)
if (tf[1] & neyer)
about = chabout(about, nrow(dtt), 4)
mtext(about, side = 3, line = 0.5, cex = 1.2)
if (fini == 1) {
axis(4, label = F, at = dt$RX[nid + 1], tcl = 0.25,
lwd = 2)
axis(4, label = F, at = zee, tcl = -0.25, lwd = 2)
}
reset()
}
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
library(agridat)
library(psych)
library(sommer)
library(tidyverse)
# get example data --------------------------------------------------------
dat <- agridat::john.alpha
# fit model ---------------------------------------------------------------
# random genotype effect
g.ran <- mmer(fixed = yield ~ rep,
random = ~ gen + rep:block,
data = dat)
# handle model estimates --------------------------------------------------
vc_g <- g.ran %>% pluck("sigma") %>% pluck("gen") %>% as.numeric # genetic variance component
n_g <- g.ran %>% pluck("U") %>% pluck("gen") %>% pluck("yield") %>% length # number of genotypes
G_g <- diag(n_g)*vc_g # subset of G regarding genotypic effects = I * vc.g
C22_g <- g.ran %>% pluck("PevU") %>% pluck("gen") %>% pluck("yield") # Prediction error variance matrix for genotypic BLUPs
ED <- diag(n_g) - (solve(G_g) %*% C22_g) # [see p. 813 bottom left in Oakey (2006)]
eM <- ED %>% eigen # obtain eigenvalues
# H2 Oakey ----------------------------------------------------------------
# main method [see eq. (7) in Oakey (2006)]
H2Oakey <- sum(eM$values)/(n_g-1)
H2Oakey # 0.8091336
# approximate method [see p. 813 top right in Oakey (2006)]
H2Oakey_approx <- 1 - psych::tr( as.matrix(solve(G_g) %*% C22_g / n_g ) )
H2Oakey_approx # 0.7754197
|
/Alternative Heritability Measures/sommer/H2 Oakey.R
|
no_license
|
PaulSchmidtGit/Heritability
|
R
| false
| false
| 1,472
|
r
|
library(agridat)
library(psych)
library(sommer)
library(tidyverse)
# get example data --------------------------------------------------------
dat <- agridat::john.alpha
# fit model ---------------------------------------------------------------
# random genotype effect
g.ran <- mmer(fixed = yield ~ rep,
random = ~ gen + rep:block,
data = dat)
# handle model estimates --------------------------------------------------
vc_g <- g.ran %>% pluck("sigma") %>% pluck("gen") %>% as.numeric # genetic variance component
n_g <- g.ran %>% pluck("U") %>% pluck("gen") %>% pluck("yield") %>% length # number of genotypes
G_g <- diag(n_g)*vc_g # subset of G regarding genotypic effects = I * vc.g
C22_g <- g.ran %>% pluck("PevU") %>% pluck("gen") %>% pluck("yield") # Prediction error variance matrix for genotypic BLUPs
ED <- diag(n_g) - (solve(G_g) %*% C22_g) # [see p. 813 bottom left in Oakey (2006)]
eM <- ED %>% eigen # obtain eigenvalues
# H2 Oakey ----------------------------------------------------------------
# main method [see eq. (7) in Oakey (2006)]
H2Oakey <- sum(eM$values)/(n_g-1)
H2Oakey # 0.8091336
# approximate method [see p. 813 top right in Oakey (2006)]
H2Oakey_approx <- 1 - psych::tr( as.matrix(solve(G_g) %*% C22_g / n_g ) )
H2Oakey_approx # 0.7754197
|
#-------------------------------------------------------------------
# Update Date: 07/21/2019
# Create Date: 04/30/2019
# Author: Yan (Dora) Zhang
#-------------------------------------------------------------------
rm(list=ls())
# setwd("~/Dropbox/2018_02_cancer/code_new/results_summary_clump//")
setwd("~/OneDrive - The University Of Hong Kong/AAA/2018_02_cancer/CancerEffectSize/code/d_summary_figure")
library(data.table)
# tr0 = fread("~/Dropbox/2018_02_cancer/data_samplesize/cancer_sample_size.csv")
tr0 = fread("../../data_samplesize//cancer_sample_size.csv")
inx1 = c(23,15,43,19,39)
inx2 = c(42,16,26,13,14,27)
inx3 = c(20,40,4)
inx = c(inx1,inx2,inx3)
tr = tr0[inx,]
M = 1070777
traitlist = unlist(tr[,1])
traitlistplot = unlist(tr[,7])
tem = matrix(0, length(inx),35);
inx2com = c(2,4)
#-------------------------------
for(iter in 1:nrow(tr)){
trait_name = traitlist[iter]
trait_name_plot = traitlistplot[iter]
output_path = paste0("../../genesis_result_new/",trait_name)
ncase = tr[iter,2][[1]]; ncontrol = tr[iter,3][[1]]
current.case = tr[iter, 5][[1]]; current.control = tr[iter,6][[1]]
if(iter %in% inx2com){
if(!file.exists(paste0(output_path,"/bestfit2_RemoveOutlier.RData"))){
load(paste0(output_path,"/bestfit2.RData"))
print(c(trait_name,"no"))
herit_OutlierIndep = 0;
n_OutlierIndep = 0;
}
if(file.exists(paste0(output_path,"/bestfit2_RemoveOutlier.RData"))){
load(paste0(output_path,"/bestfit2_RemoveOutlier.RData"))
load(paste0("../../data_new/",trait_name,"/herit_OutlierIndep.RData"))
}
result = fit
est = result$estimates$`Parameter (pic, sigmasq, a) estimates`
sd = result$estimates$`S.D. of parameter estimates`
llk = result$estimates$`Composite log-likelihood of fitted model`
n_SNP_ana = result$estimates$`Total number of SNPs in the GWAS study after quality control`
bic = result$estimates$`Model selection related`$BIC
nssnp = as.numeric(strsplit(result$estimates$`Number of sSNPs (sd)`, " ")[[1]][1])
sd_nssnp = as.numeric(gsub(".*\\((.*)\\).*", "\\1",strsplit(result$estimates$`Number of sSNPs (sd)`, " ")[[1]][2]))
nssnp = nssnp + n_OutlierIndep
nssnp1 = NA
sd_nssnp1 = NA
herit1 = NA
sd_herit1 = NA
herit2 = NA
sd_herit2 = NA
herit = as.numeric(strsplit(result$estimates$`Total heritability in log-odds-ratio scale (sd)`, " ")[[1]][1])
sd_herit = as.numeric(gsub(".*\\((.*)\\).*", "\\1",strsplit(result$estimates$`Total heritability in log-odds-ratio scale (sd)`, " ")[[1]][2]))
herit = herit+herit_OutlierIndep
pic = est[1]; sd_pic = sd[1];
p1 = NA; sd_p1 = NA
sig1 = est[2]; sd_sig1 = sd[2]
sig2 = NA; sd_sig2 = NA
a = est[3]; sd_a = sd[3]
effect_perSNP = sig1
sd_effect_perSNP = sd_sig1
auc<-pnorm(sqrt(herit/2))
sd_auc <- sd_herit * dnorm(sqrt(herit/2))/(sqrt(2)*2*sqrt(herit))
#-----
# number of effective associated SNPs
ebeta2 = sig1
ebeta4 = 3*(sig1^2)
kap = ebeta4/(ebeta2)^2
effect.causal = 3*(M*pic+n_OutlierIndep)/kap
}
#-----------
# 3com GENESIS
if(!iter %in% inx2com){
if(!file.exists(paste0(output_path,"/fit3_RemoveOutlier.RData"))){
load(paste0(output_path,"/fit3.RData"))
print(c(trait_name,"no"))
herit_OutlierIndep = 0;
n_OutlierIndep = 0;
}
if(file.exists(paste0(output_path,"/fit3_RemoveOutlier.RData"))){
load(paste0(output_path,"/fit3_RemoveOutlier.RData"))
load(paste0("../../data_new/",trait_name,"/herit_OutlierIndep.RData"))
}
result = fit
est = result$estimates$`Parameter (pic, p1, sigmasq1, sigmasq2, a) estimates`
sd = result$estimates$`S.D. of parameter estimates`
llk = result$estimates$`Composite log-likelihood of fitted model`
n_SNP_ana = result$estimates$`Total number of SNPs in the GWAS study after quality control`
bic = result$estimates$`Model selection related`$BIC
nssnp = as.numeric(strsplit(result$estimates$`Number of sSNPs (sd)`, " ")[[1]][1])
sd_nssnp = as.numeric(gsub(".*\\((.*)\\).*", "\\1",strsplit(result$estimates$`Number of sSNPs (sd)`, " ")[[1]][2]))
nssnp = nssnp + n_OutlierIndep
nssnp1 = as.numeric(strsplit(result$estimates$`Number of sSNPs in the cluster with larger variance component (sd)`, " ")[[1]][1])
sd_nssnp1 = as.numeric(gsub(".*\\((.*)\\).*", "\\1",strsplit(result$estimates$`Number of sSNPs in the cluster with larger variance component (sd)`, " ")[[1]][2]))
nssnp1 = nssnp1 + n_OutlierIndep
herit1 = as.numeric(strsplit(result$estimates$`Heritability explained by the cluster with larger variance component (sd)`, " ")[[1]][1])
sd_herit1 = as.numeric(gsub(".*\\((.*)\\).*", "\\1",strsplit(result$estimates$`Heritability explained by the cluster with larger variance component (sd)`, " ")[[1]][2]))
herit1 = herit1 + herit_OutlierIndep
herit2 = as.numeric(strsplit(result$estimates$`Heritability explained by the cluster with samller variance component`, " ")[[1]][1])
sd_herit2 = as.numeric(gsub(".*\\((.*)\\).*", "\\1",strsplit(result$estimates$`Heritability explained by the cluster with samller variance component`, " ")[[1]][2]))
herit = as.numeric(strsplit(result$estimates$`Total heritability in log-odds-ratio scale (sd)`, " ")[[1]][1])
sd_herit = as.numeric(gsub(".*\\((.*)\\).*", "\\1",strsplit(result$estimates$`Total heritability in log-odds-ratio scale (sd)`, " ")[[1]][2]))
herit = herit+herit_OutlierIndep
pic = est[1]; sd_pic = sd[1];
p1 = est[2]; sd_p1 = sd[2]
sig1 = est[3]; sd_sig1 = sd[3]
sig2 = est[4]; sd_sig2 = sd[4]
a = est[5]; sd_a = sd[5]
effect_perSNP = (p1*sig1+(1-p1)*sig2)
var_est = result$estimates$`Covariance matrix of parameter estimates`
temtem = matrix(c(0,
(est[3] - est[4]),
(est[2]),
(1-est[2]),
0), ncol=1)
sd_effect_perSNP = sqrt( t(temtem) %*% var_est %*% temtem) # standard error of effect per sSNP
auc<-pnorm(sqrt(herit/2))
sd_auc <- sd_herit * dnorm(sqrt(herit/2))/(sqrt(2)*2*sqrt(herit))
#-----
# number of effective associated SNPs
p2 = 1-p1
ebeta4 = 3*(p1*sig1^2+p2*sig2^2)
ebeta2 = p1*sig1 + p2*sig2
kap = ebeta4/(ebeta2)^2
effect.causal = 3*(M*pic+n_OutlierIndep)/kap
}
tem[iter,] = c(trait_name_plot, n_SNP_ana,
nssnp,sd_nssnp, nssnp1,sd_nssnp1, herit1,sd_herit1,herit2,sd_herit2,
herit,sd_herit, bic, auc, sd_auc,
pic, sd_pic,
p1,sd_p1,sig1,sd_sig1,sig2,sd_sig2,a,sd_a,llk,
effect_perSNP, sd_effect_perSNP,
ncase, ncontrol, current.case, current.control,
herit_OutlierIndep,n_OutlierIndep, effect.causal)
}
tem = data.frame(tem)
colnames(tem) = c("Trait",
"Total number of SNPs in the GWAS study after quality control",
"Estimated # of susceptibility SNPs (contain outlier)", "sd_num_sSNP",
"Estimated # of susceptibility SNPs in cluster with larger effects (contain outlier)","sd_num_sSNP1",
"Heritability explained in cluster with larger effects (contain outlier)", "sd_herit1",
"Heritability explained in cluster with smaller effects","sd_herit2",
"Estimate of total observed scale heritability (contain outlier)", "sd_herit",
"BIC","AUC (contain outlier)", "sd_AUC",
"pic", "sd_pic",
"p1", "sd_p1",
"sig1", "sd_sig1", "sig2", "sd_sig2",
"a", "sd_a", "llk",
"Average heritability explained per sSNP (no outlier)", "sd_effect per sSNP",
"# of cases", "#of controls","current cases", "current controls",
"herit_OutlierIndep", "n_OutlierIndep", "# of effective causal SNPs")
output_path = paste0("../../result_png_csv_new_clump//")
write.csv(tem,file= paste0(output_path,"/table_est_all.csv"),row.names=F)
|
/code/d_summary_figure/table_est_all.R
|
no_license
|
yandorazhang/CancerEffectSize
|
R
| false
| false
| 8,218
|
r
|
#-------------------------------------------------------------------
# Update Date: 07/21/2019
# Create Date: 04/30/2019
# Author: Yan (Dora) Zhang
#-------------------------------------------------------------------
rm(list=ls())
# setwd("~/Dropbox/2018_02_cancer/code_new/results_summary_clump//")
setwd("~/OneDrive - The University Of Hong Kong/AAA/2018_02_cancer/CancerEffectSize/code/d_summary_figure")
library(data.table)
# tr0 = fread("~/Dropbox/2018_02_cancer/data_samplesize/cancer_sample_size.csv")
tr0 = fread("../../data_samplesize//cancer_sample_size.csv")
inx1 = c(23,15,43,19,39)
inx2 = c(42,16,26,13,14,27)
inx3 = c(20,40,4)
inx = c(inx1,inx2,inx3)
tr = tr0[inx,]
M = 1070777
traitlist = unlist(tr[,1])
traitlistplot = unlist(tr[,7])
tem = matrix(0, length(inx),35);
inx2com = c(2,4)
#-------------------------------
for(iter in 1:nrow(tr)){
trait_name = traitlist[iter]
trait_name_plot = traitlistplot[iter]
output_path = paste0("../../genesis_result_new/",trait_name)
ncase = tr[iter,2][[1]]; ncontrol = tr[iter,3][[1]]
current.case = tr[iter, 5][[1]]; current.control = tr[iter,6][[1]]
if(iter %in% inx2com){
if(!file.exists(paste0(output_path,"/bestfit2_RemoveOutlier.RData"))){
load(paste0(output_path,"/bestfit2.RData"))
print(c(trait_name,"no"))
herit_OutlierIndep = 0;
n_OutlierIndep = 0;
}
if(file.exists(paste0(output_path,"/bestfit2_RemoveOutlier.RData"))){
load(paste0(output_path,"/bestfit2_RemoveOutlier.RData"))
load(paste0("../../data_new/",trait_name,"/herit_OutlierIndep.RData"))
}
result = fit
est = result$estimates$`Parameter (pic, sigmasq, a) estimates`
sd = result$estimates$`S.D. of parameter estimates`
llk = result$estimates$`Composite log-likelihood of fitted model`
n_SNP_ana = result$estimates$`Total number of SNPs in the GWAS study after quality control`
bic = result$estimates$`Model selection related`$BIC
nssnp = as.numeric(strsplit(result$estimates$`Number of sSNPs (sd)`, " ")[[1]][1])
sd_nssnp = as.numeric(gsub(".*\\((.*)\\).*", "\\1",strsplit(result$estimates$`Number of sSNPs (sd)`, " ")[[1]][2]))
nssnp = nssnp + n_OutlierIndep
nssnp1 = NA
sd_nssnp1 = NA
herit1 = NA
sd_herit1 = NA
herit2 = NA
sd_herit2 = NA
herit = as.numeric(strsplit(result$estimates$`Total heritability in log-odds-ratio scale (sd)`, " ")[[1]][1])
sd_herit = as.numeric(gsub(".*\\((.*)\\).*", "\\1",strsplit(result$estimates$`Total heritability in log-odds-ratio scale (sd)`, " ")[[1]][2]))
herit = herit+herit_OutlierIndep
pic = est[1]; sd_pic = sd[1];
p1 = NA; sd_p1 = NA
sig1 = est[2]; sd_sig1 = sd[2]
sig2 = NA; sd_sig2 = NA
a = est[3]; sd_a = sd[3]
effect_perSNP = sig1
sd_effect_perSNP = sd_sig1
auc<-pnorm(sqrt(herit/2))
sd_auc <- sd_herit * dnorm(sqrt(herit/2))/(sqrt(2)*2*sqrt(herit))
#-----
# number of effective associated SNPs
ebeta2 = sig1
ebeta4 = 3*(sig1^2)
kap = ebeta4/(ebeta2)^2
effect.causal = 3*(M*pic+n_OutlierIndep)/kap
}
#-----------
# 3com GENESIS
if(!iter %in% inx2com){
if(!file.exists(paste0(output_path,"/fit3_RemoveOutlier.RData"))){
load(paste0(output_path,"/fit3.RData"))
print(c(trait_name,"no"))
herit_OutlierIndep = 0;
n_OutlierIndep = 0;
}
if(file.exists(paste0(output_path,"/fit3_RemoveOutlier.RData"))){
load(paste0(output_path,"/fit3_RemoveOutlier.RData"))
load(paste0("../../data_new/",trait_name,"/herit_OutlierIndep.RData"))
}
result = fit
est = result$estimates$`Parameter (pic, p1, sigmasq1, sigmasq2, a) estimates`
sd = result$estimates$`S.D. of parameter estimates`
llk = result$estimates$`Composite log-likelihood of fitted model`
n_SNP_ana = result$estimates$`Total number of SNPs in the GWAS study after quality control`
bic = result$estimates$`Model selection related`$BIC
nssnp = as.numeric(strsplit(result$estimates$`Number of sSNPs (sd)`, " ")[[1]][1])
sd_nssnp = as.numeric(gsub(".*\\((.*)\\).*", "\\1",strsplit(result$estimates$`Number of sSNPs (sd)`, " ")[[1]][2]))
nssnp = nssnp + n_OutlierIndep
nssnp1 = as.numeric(strsplit(result$estimates$`Number of sSNPs in the cluster with larger variance component (sd)`, " ")[[1]][1])
sd_nssnp1 = as.numeric(gsub(".*\\((.*)\\).*", "\\1",strsplit(result$estimates$`Number of sSNPs in the cluster with larger variance component (sd)`, " ")[[1]][2]))
nssnp1 = nssnp1 + n_OutlierIndep
herit1 = as.numeric(strsplit(result$estimates$`Heritability explained by the cluster with larger variance component (sd)`, " ")[[1]][1])
sd_herit1 = as.numeric(gsub(".*\\((.*)\\).*", "\\1",strsplit(result$estimates$`Heritability explained by the cluster with larger variance component (sd)`, " ")[[1]][2]))
herit1 = herit1 + herit_OutlierIndep
herit2 = as.numeric(strsplit(result$estimates$`Heritability explained by the cluster with samller variance component`, " ")[[1]][1])
sd_herit2 = as.numeric(gsub(".*\\((.*)\\).*", "\\1",strsplit(result$estimates$`Heritability explained by the cluster with samller variance component`, " ")[[1]][2]))
herit = as.numeric(strsplit(result$estimates$`Total heritability in log-odds-ratio scale (sd)`, " ")[[1]][1])
sd_herit = as.numeric(gsub(".*\\((.*)\\).*", "\\1",strsplit(result$estimates$`Total heritability in log-odds-ratio scale (sd)`, " ")[[1]][2]))
herit = herit+herit_OutlierIndep
pic = est[1]; sd_pic = sd[1];
p1 = est[2]; sd_p1 = sd[2]
sig1 = est[3]; sd_sig1 = sd[3]
sig2 = est[4]; sd_sig2 = sd[4]
a = est[5]; sd_a = sd[5]
effect_perSNP = (p1*sig1+(1-p1)*sig2)
var_est = result$estimates$`Covariance matrix of parameter estimates`
temtem = matrix(c(0,
(est[3] - est[4]),
(est[2]),
(1-est[2]),
0), ncol=1)
sd_effect_perSNP = sqrt( t(temtem) %*% var_est %*% temtem) # standard error of effect per sSNP
auc<-pnorm(sqrt(herit/2))
sd_auc <- sd_herit * dnorm(sqrt(herit/2))/(sqrt(2)*2*sqrt(herit))
#-----
# number of effective associated SNPs
p2 = 1-p1
ebeta4 = 3*(p1*sig1^2+p2*sig2^2)
ebeta2 = p1*sig1 + p2*sig2
kap = ebeta4/(ebeta2)^2
effect.causal = 3*(M*pic+n_OutlierIndep)/kap
}
tem[iter,] = c(trait_name_plot, n_SNP_ana,
nssnp,sd_nssnp, nssnp1,sd_nssnp1, herit1,sd_herit1,herit2,sd_herit2,
herit,sd_herit, bic, auc, sd_auc,
pic, sd_pic,
p1,sd_p1,sig1,sd_sig1,sig2,sd_sig2,a,sd_a,llk,
effect_perSNP, sd_effect_perSNP,
ncase, ncontrol, current.case, current.control,
herit_OutlierIndep,n_OutlierIndep, effect.causal)
}
tem = data.frame(tem)
colnames(tem) = c("Trait",
"Total number of SNPs in the GWAS study after quality control",
"Estimated # of susceptibility SNPs (contain outlier)", "sd_num_sSNP",
"Estimated # of susceptibility SNPs in cluster with larger effects (contain outlier)","sd_num_sSNP1",
"Heritability explained in cluster with larger effects (contain outlier)", "sd_herit1",
"Heritability explained in cluster with smaller effects","sd_herit2",
"Estimate of total observed scale heritability (contain outlier)", "sd_herit",
"BIC","AUC (contain outlier)", "sd_AUC",
"pic", "sd_pic",
"p1", "sd_p1",
"sig1", "sd_sig1", "sig2", "sd_sig2",
"a", "sd_a", "llk",
"Average heritability explained per sSNP (no outlier)", "sd_effect per sSNP",
"# of cases", "#of controls","current cases", "current controls",
"herit_OutlierIndep", "n_OutlierIndep", "# of effective causal SNPs")
output_path = paste0("../../result_png_csv_new_clump//")
write.csv(tem,file= paste0(output_path,"/table_est_all.csv"),row.names=F)
|
\name{xmp11.06}
\alias{xmp11.06}
\non_function{}
\title{data from Example 11.6}
\description{
The \code{xmp11.06} data frame has 24 rows and 3 columns.
}
\format{
This data frame contains the following columns:
\describe{
\item{Resp}{
a numeric vector of the mean number of responses emitted by each
subject during single and compound stimuli presentations over a
4-day period.
}
\item{Stimulus}{
a numeric vector of stimulus levels. These levels correspond to
L1 (moderate intensity light), L2 (low intensity light), T (tone),
L1+L2, L1+T, and L2+T.
}
\item{Subject}{
a numeric vector identifying the subject (rat).
}
}
}
\source{
Devore, J. L. (2000) \emph{Probability and Statistics for Engineering
and the Sciences (5th ed)}, Duxbury
(1971), ``Compounding of discriminative stimuli from the same and
different sensory modalities'', \emph{J. Experimental Analysis and
Behavior}, 337-342
}
\examples{
data(xmp11.06)
plot(Resp ~ Stimulus, data = xmp11.06, col = "lightgray",
main = "Data from Example 11.6",
ylab = "Mean number of responses")
for (i in seq(along = levels(xmp11.06$Subject))) {
attach(xmp11.06[ xmp11.06$Subject == i, ])
lines(Resp ~ as.integer(Stimulus), col = i+1, type = "b")
}
legend(0.8, 95, paste("Subject", levels(xmp11.06$Subject)),
col = 1 + seq(along = levels(xmp11.06$Subject)),
lty = 1)
fm1 <- lm(Resp ~ Stimulus + Subject, data = xmp11.06)
anova(fm1) # compare to Table 11.5, page 443
attach(xmp11.06)
means <- sort(tapply(Resp, Stimulus, mean))
means
diff(means) # successive differences
qtukey(0.95, nmeans = 6, df = 15) #for Tukey comparisons
detach()
}
\keyword{datasets}
|
/man/xmp11.06.Rd
|
no_license
|
cran/Devore5
|
R
| false
| false
| 1,736
|
rd
|
\name{xmp11.06}
\alias{xmp11.06}
\non_function{}
\title{data from Example 11.6}
\description{
The \code{xmp11.06} data frame has 24 rows and 3 columns.
}
\format{
This data frame contains the following columns:
\describe{
\item{Resp}{
a numeric vector of the mean number of responses emitted by each
subject during single and compound stimuli presentations over a
4-day period.
}
\item{Stimulus}{
a numeric vector of stimulus levels. These levels correspond to
L1 (moderate intensity light), L2 (low intensity light), T (tone),
L1+L2, L1+T, and L2+T.
}
\item{Subject}{
a numeric vector identifying the subject (rat).
}
}
}
\source{
Devore, J. L. (2000) \emph{Probability and Statistics for Engineering
and the Sciences (5th ed)}, Duxbury
(1971), ``Compounding of discriminative stimuli from the same and
different sensory modalities'', \emph{J. Experimental Analysis and
Behavior}, 337-342
}
\examples{
data(xmp11.06)
plot(Resp ~ Stimulus, data = xmp11.06, col = "lightgray",
main = "Data from Example 11.6",
ylab = "Mean number of responses")
for (i in seq(along = levels(xmp11.06$Subject))) {
attach(xmp11.06[ xmp11.06$Subject == i, ])
lines(Resp ~ as.integer(Stimulus), col = i+1, type = "b")
}
legend(0.8, 95, paste("Subject", levels(xmp11.06$Subject)),
col = 1 + seq(along = levels(xmp11.06$Subject)),
lty = 1)
fm1 <- lm(Resp ~ Stimulus + Subject, data = xmp11.06)
anova(fm1) # compare to Table 11.5, page 443
attach(xmp11.06)
means <- sort(tapply(Resp, Stimulus, mean))
means
diff(means) # successive differences
qtukey(0.95, nmeans = 6, df = 15) #for Tukey comparisons
detach()
}
\keyword{datasets}
|
##' Create a theme object for remap
##'
##' get_theme create a theme list to control the color
##' we see in the remap.including lineColor, backgroundColor,
##' titleColor, borderColor and regionColor.
##'
##' If you use the theme argument of the get_theme function,
##' you will get the default theme in one of ("Dark","Bright,"Sky").
##' If you don't like the color, set theme = "none" and use other
##' parameters control remap.\\
##' Can use "red","#1b1b1b" or rgba(100,100,100,1) to control the color
##'
##'
##' @param theme a character object in ("Dark","Bright,"Sky","none)
##' @param lineColor Control the color of the line, "Random" for
##' random color
##' @param backgroundColor Control the color of the background
##' @param titleColor Control the color of the title
##' @param borderColor Control the color of the border
##' @param regionColor Control the color of the region
##' @param labelShow whether show the label of each element,
##' only support mapC.
##' @param pointShow whether show the center point of each element,
##' only support mapC.
##' @param pointColor color of the center point of each element,
##' only support mapC.
##' @return A list of color control, which can be used by remap
##' @author Chiffon <\url{http://lchiffon.github.io}>
##' @examples
##' ## default theme:"Dark"
##' set.seed(125)
##' out = remap(demoC,title = "REmap示例数据",subtitle = "theme:Bright",
##' theme = get_theme("Bright"))
##' plot(out)
##'
##' ## set Line color as 'orange'
##' set.seed(125)
##' out = remap(demoC,title = "REmap示例数据",subtitle = "theme:Bright",
##' theme = get_theme("None",
##' lineColor = "orange"))
##' plot(out)
##'
##' ## Set backgroundColor as 'red'(#FF0000)
##'
##' out = remap(demoC,title = "REmap示例数据",subtitle = "theme:Bright",
##' theme = get_theme("None",
##' lineColor = "orange",
##' backgroundColor = "#FF0000"))
##' plot(out)
##'
##' ## Set TitleColor
##' out = remap(demoC,title = "REmap示例数据",subtitle = "theme:Bright",
##' theme = get_theme("None",
##' lineColor = "orange",
##' backgroundColor = "#FFC1C1",
##' titleColor = "#1b1b1b"))
##' plot(out)
##'
##' ## Set Region Color
##' out = remap(demoC,title = "REmap示例数据",subtitle = "theme:Bright",
##' theme = get_theme("None",
##' lineColor = "orange",
##' backgroundColor = "#FFC1C1",
##' titleColor = "#1b1b1b",
##' regionColor = '#ADD8E6'))
##' plot(out)
get_theme = function(theme = "Dark",
lineColor = "Random",
backgroundColor = "#1b1b1b",
titleColor = "#fff",
borderColor = "rgba(100,149,237,1)",
regionColor = "#1b1b1b",
labelShow = T,
pointShow = F,
pointColor = 'gold'){
theme_data = list(
Dark = list(
lineColor = "Random",
backgroundColor = "#1b1b1b",
titleColor = "#fff",
borderColor = "rgba(100,149,237,1)",
regionColor = "#1b1b1b",
labelShow = 'true',
pointShow = 'false',
pointColor = 'gold'
),
Bright = list(
lineColor = "Random",
backgroundColor = "#D9D9D9",
titleColor = "#1b1b1b",
borderColor = "rgba(100,149,237,1)",
regionColor = "#fff",
labelShow = 'true',
pointShow = 'false',
pointColor = 'gold'
),
Sky = list(
lineColor = "Random",
backgroundColor = "#fff",
titleColor = "#1b1b1b",
borderColor = "rgba(100,149,237,1)",
regionColor = "#AEEEEE",
labelShow = 'true',
pointShow = 'false',
pointColor = 'gold'
)
)
if(labelShow){
labelShow = 'true'
}else{
labelShow = 'false'
}
if(pointShow){
pointShow = 'true'
}else{
pointShow = 'false'
}
if (theme %in% c("Dark","Bright","Sky")){
out_theme = theme_data[[theme]]
}else{
out_theme = list(
lineColor = lineColor,
backgroundColor = backgroundColor,
titleColor = titleColor,
borderColor = borderColor,
regionColor = regionColor,
labelShow = labelShow,
pointShow = pointShow,
pointColor = pointColor
)
}
if(out_theme$lineColor == "Random"){
out_theme$lineColor2 =
"['#ff3333', 'orange', 'yellow','lime','aqua']"
}else{
out_theme$lineColor2 =
paste0("['",out_theme$lineColor,
"', '",
out_theme$lineColor,"', '",
out_theme$lineColor,"', '",
out_theme$lineColor,"', '",
out_theme$lineColor,"']")
}
out_theme
}
|
/REmap/R/get_theme.R
|
permissive
|
moisiet/R-Packge
|
R
| false
| false
| 5,205
|
r
|
##' Create a theme object for remap
##'
##' get_theme create a theme list to control the color
##' we see in the remap.including lineColor, backgroundColor,
##' titleColor, borderColor and regionColor.
##'
##' If you use the theme argument of the get_theme function,
##' you will get the default theme in one of ("Dark","Bright,"Sky").
##' If you don't like the color, set theme = "none" and use other
##' parameters control remap.\\
##' Can use "red","#1b1b1b" or rgba(100,100,100,1) to control the color
##'
##'
##' @param theme a character object in ("Dark","Bright,"Sky","none)
##' @param lineColor Control the color of the line, "Random" for
##' random color
##' @param backgroundColor Control the color of the background
##' @param titleColor Control the color of the title
##' @param borderColor Control the color of the border
##' @param regionColor Control the color of the region
##' @param labelShow whether show the label of each element,
##' only support mapC.
##' @param pointShow whether show the center point of each element,
##' only support mapC.
##' @param pointColor color of the center point of each element,
##' only support mapC.
##' @return A list of color control, which can be used by remap
##' @author Chiffon <\url{http://lchiffon.github.io}>
##' @examples
##' ## default theme:"Dark"
##' set.seed(125)
##' out = remap(demoC,title = "REmap示例数据",subtitle = "theme:Bright",
##' theme = get_theme("Bright"))
##' plot(out)
##'
##' ## set Line color as 'orange'
##' set.seed(125)
##' out = remap(demoC,title = "REmap示例数据",subtitle = "theme:Bright",
##' theme = get_theme("None",
##' lineColor = "orange"))
##' plot(out)
##'
##' ## Set backgroundColor as 'red'(#FF0000)
##'
##' out = remap(demoC,title = "REmap示例数据",subtitle = "theme:Bright",
##' theme = get_theme("None",
##' lineColor = "orange",
##' backgroundColor = "#FF0000"))
##' plot(out)
##'
##' ## Set TitleColor
##' out = remap(demoC,title = "REmap示例数据",subtitle = "theme:Bright",
##' theme = get_theme("None",
##' lineColor = "orange",
##' backgroundColor = "#FFC1C1",
##' titleColor = "#1b1b1b"))
##' plot(out)
##'
##' ## Set Region Color
##' out = remap(demoC,title = "REmap示例数据",subtitle = "theme:Bright",
##' theme = get_theme("None",
##' lineColor = "orange",
##' backgroundColor = "#FFC1C1",
##' titleColor = "#1b1b1b",
##' regionColor = '#ADD8E6'))
##' plot(out)
get_theme = function(theme = "Dark",
lineColor = "Random",
backgroundColor = "#1b1b1b",
titleColor = "#fff",
borderColor = "rgba(100,149,237,1)",
regionColor = "#1b1b1b",
labelShow = T,
pointShow = F,
pointColor = 'gold'){
theme_data = list(
Dark = list(
lineColor = "Random",
backgroundColor = "#1b1b1b",
titleColor = "#fff",
borderColor = "rgba(100,149,237,1)",
regionColor = "#1b1b1b",
labelShow = 'true',
pointShow = 'false',
pointColor = 'gold'
),
Bright = list(
lineColor = "Random",
backgroundColor = "#D9D9D9",
titleColor = "#1b1b1b",
borderColor = "rgba(100,149,237,1)",
regionColor = "#fff",
labelShow = 'true',
pointShow = 'false',
pointColor = 'gold'
),
Sky = list(
lineColor = "Random",
backgroundColor = "#fff",
titleColor = "#1b1b1b",
borderColor = "rgba(100,149,237,1)",
regionColor = "#AEEEEE",
labelShow = 'true',
pointShow = 'false',
pointColor = 'gold'
)
)
if(labelShow){
labelShow = 'true'
}else{
labelShow = 'false'
}
if(pointShow){
pointShow = 'true'
}else{
pointShow = 'false'
}
if (theme %in% c("Dark","Bright","Sky")){
out_theme = theme_data[[theme]]
}else{
out_theme = list(
lineColor = lineColor,
backgroundColor = backgroundColor,
titleColor = titleColor,
borderColor = borderColor,
regionColor = regionColor,
labelShow = labelShow,
pointShow = pointShow,
pointColor = pointColor
)
}
if(out_theme$lineColor == "Random"){
out_theme$lineColor2 =
"['#ff3333', 'orange', 'yellow','lime','aqua']"
}else{
out_theme$lineColor2 =
paste0("['",out_theme$lineColor,
"', '",
out_theme$lineColor,"', '",
out_theme$lineColor,"', '",
out_theme$lineColor,"', '",
out_theme$lineColor,"']")
}
out_theme
}
|
library(ZeligDVN)
isLegalPkg <- function (pkg) {
grepl("^[a-zA-Z][a-zA-Z0-9\\.]*$", pkg)
}
model <- "logit"
pkg <- getModelPkg(model)
load.script <- ""
if (isLegalPkg(pkg))
load.script <- sprintf("library(%s)", pkg)
load.script
|
/demo/find.package.R
|
no_license
|
zeligdev/ZeligDVN
|
R
| false
| false
| 238
|
r
|
library(ZeligDVN)
isLegalPkg <- function (pkg) {
grepl("^[a-zA-Z][a-zA-Z0-9\\.]*$", pkg)
}
model <- "logit"
pkg <- getModelPkg(model)
load.script <- ""
if (isLegalPkg(pkg))
load.script <- sprintf("library(%s)", pkg)
load.script
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bacon.lipd.R
\name{runBacon}
\alias{runBacon}
\title{Generate a Bayesian Reconstruction Age Model (Bacon) and add it into a LiPD object}
\usage{
runBacon(
L,
chron.num = NA,
meas.table.num = NA,
bacon.dir = NA,
site.name = L$dataSetName,
model.num = NA,
remove.rejected = TRUE,
overwrite = TRUE,
cc = NA,
max.ens = 1000,
use.marine = NULL,
lab.id.var = "labID",
age.14c.var = "age14C",
age.14c.uncertainty.var = "age14CUnc",
age.var = "age",
age.uncertainty.var = "ageUnc",
depth.var = "depth",
reservoir.age.14c.var = "reservoirAge",
reservoir.age.14c.uncertainty.var = "reservoirAge14C",
rejected.ages.var = "rejected",
ask.reservoir = TRUE,
bacon.thick = NA,
bacon.acc.mean = NA,
...
)
}
\arguments{
\item{L}{a LiPD object}
\item{chron.num}{the number of the chronData object that you'll be working in}
\item{meas.table.num}{an integer that corresponds to paleo.num measurementTable has the variable you want?}
\item{bacon.dir}{the directory where Bacon is installed on this computer.}
\item{site.name}{the name used for the bacon model (and directories)}
\item{model.num}{chron.numModel do you want to use?}
\item{remove.rejected}{don't write out dates that are marked as rejected}
\item{overwrite}{overwrite files and directories}
\item{cc}{An integer, or vector of integers corresponding to age that describes the calibration curve. You can specify here (see below) or if it's NA the code will guess based on archiveType
\itemize{
\item cc=1 IntCal20
\item cc=2 MarineCal20
\item cc=3 SHCal20
}}
\item{max.ens}{the maximum number of ensembles to load in (default = 1000)}
\item{use.marine}{use the marine 13C curve? (yes or no, or NULL to choose)}
\item{lab.id.var}{Lab Id variable name}
\item{age.14c.var}{Radiocarbon age variable name}
\item{age.14c.uncertainty.var}{Radiocarbon age uncertainty variable name}
\item{age.var}{Calibrated age variable name}
\item{age.uncertainty.var}{Calibrated age uncertainty variable name}
\item{depth.var}{Depth variable name}
\item{reservoir.age.14c.var}{Reservoir age variable name}
\item{reservoir.age.14c.uncertainty.var}{Reservoir age uncertainty variable name}
\item{rejected.ages.var}{Rejected ages variable name}
\item{ask.reservoir}{ask about reservoir corrections}
\item{bacon.thick}{thickness parameter to pass to bacon (How thick is each chunk to model)}
\item{bacon.acc.mean}{prior mean accumulation rate estimate for bacon}
\item{...}{
Arguments passed on to \code{\link[rbacon:Bacon]{rbacon::Bacon}}
\describe{
\item{\code{core}}{Name of the core, given using quotes. Defaults to one of the cores provided with rbacon, \code{core="MSB2K"}.
An alternative core provided with this package is RLGH3 (Jones et al., 1989).
To run your own core, produce a .csv file with the dates as outlined in the manual, add a folder with the core's name to the default directory for cores (see \code{coredir}), and save the .csv file there. For example, the file's location and name could be \code{Bacon_runs/MyCore/MyCore.csv}. Then run Bacon as follows: \code{Bacon("MyCore")}}
\item{\code{thick}}{Bacon will divide the core into sections of equal thickness specified by thick (default \code{thick=5}).}
\item{\code{coredir}}{Folder where the core's files \code{core} are and/or will be located. This will be a folder with the core's name, within either the folder \code{coredir='Bacon_runs/'}, or the folder Cores/ if it already exists within R's working directory, or a custom-built folder. For example, use \code{coredir="."} to place the core's folder within the current working directory, or \code{coredir="F:"} if you want to put the core's folder and files on a USB drive loaded under F:.
Thinner (and thus more) sections will result in smoother age-models, but too many sections can cause `run-away' models.}
\item{\code{prob}}{Confidence interval to report. This should lie between 0 and 1, default 0.95 (95 \%).}
\item{\code{d.min}}{Minimum depth of age-depth model (use this to extrapolate to depths higher than the top dated depth).}
\item{\code{d.max}}{Maximum depth of age-depth model (use this to extrapolate to depths below the bottom dated depth).}
\item{\code{add.bottom}}{Add a model section at the bottom of the core, in order to ensure the bottommost date is taken into account. Default \code{add.bottom=TRUE}. This is a new option and can cause age-models to differ from previous version. Please re-run the model if in doubt.}
\item{\code{d.by}}{Depth intervals at which ages are calculated. Defaults to \code{d.by=1}.}
\item{\code{seed}}{Seed used for C++ executions. If it is not assigned (\code{seed=NA}; default) then the seed is set by system.}
\item{\code{depths.file}}{By default, Bacon will calculate the ages for the depths \code{d.min} to \code{d.max} in steps of \code{d.by}.
If \code{depths.file=TRUE}, Bacon will read a file containing the depths for which you require ages.
This file, containing the depths in a single column without a header, should be stored within \code{coredir},
and its name should start with the core's name and end with `_depths.txt'. Then specify \code{depths.file=TRUE} (default \code{FALSE}). See also \code{depths}.}
\item{\code{depths}}{By default, Bacon will calculate the ages for the depths \code{d.min} to \code{d.max} in steps of \code{d.by}.
Alternative depths can be provided as, e.g., \code{depths=seq(0, 100, length=500)} or as a file, e.g., \code{depths=read.table("CoreDepths.txt"}. See also \code{depths.file}.}
\item{\code{depth.unit}}{Units of the depths. Defaults to \code{depth.unit="cm"}.}
\item{\code{age.unit}}{Units of the ages. Defaults to \code{age.unit="yr"}.}
\item{\code{unit}}{Deprecated and replaced by \code{depth.unit}.}
\item{\code{acc.shape}}{The prior for the accumulation rate consists of a gamma distribution with two parameters.
Its shape is set by acc.shape (default \code{acc.shape=1.5}; higher values result in more peaked shapes).}
\item{\code{acc.mean}}{The accumulation rate prior consists of a gamma distribution with two parameters. Its mean is set by acc.mean (default \code{acc.mean=20} yr/cm (or whatever age or depth units are chosen),
which can be changed to, e.g., 5, 10 or 50 for different kinds of deposits). Multiple values can be given in case of hiatuses or boundaries, e.g., Bacon(hiatus.depths=23, acc.mean=c(5,20))}
\item{\code{mem.strength}}{The prior for the memory (dependence of accumulation rate between neighbouring depths) is a beta distribution, which looks much like the gamma distribution.
but its values are always between 0 (no assumed memory) and 1 (100\% memory). Its default settings of \code{mem.strength=10}
(higher values result in more peaked shapes) allow for a large range of posterior memory values. Please note that the default memory prior has been updated from rbacon version 2.5.1 on, to repair a bug.}
\item{\code{mem.mean}}{The prior for the memory is a beta distribution, which looks much like the gamma distribution but
its values are always between 0 (no assumed memory) and 1 (100\% memory). Its default settings of \code{mem.mean=0.5}
allow for a large range of posterior memory values. Please note that the default memory prior has been updated from rbacon version 2.5.1. on, to repair a bug.}
\item{\code{boundary}}{The assumed depths of any boundary, which divides sections of different accumulation rate regimes (e.g., as indicated by major change in the stratigraphy). No hiatus is assumed between these sections, and memory is reset crossing the boundary. Different accumulation priors can be set for the sections above and below the boundary, e.g., \code{acc.mean=c(5, 20)}. See also \code{hiatus.depths}, \code{mem.mean}, \code{acc.mean} and \code{acc.shape}. Setting many boundaries might not work, and having more than one boundary per model section (see \code{'thick'}) might not work either.}
\item{\code{hiatus.depths}}{The assumed depths for any hiatus should be provided as, e.g.,
\code{hiatus.depths=20} for one at 20cm depth, and \code{hiatus.depths=c(20,40)} for two hiatuses at 20 and 40 cm depth.}
\item{\code{hiatus.max}}{The prior for the maximum length of the hiatus. Hiatus length is a uniform distribution, with equal probabilities between 0 and \code{hiatus.max} yr (or whatever other \code{age.unit} is chosen).}
\item{\code{add}}{Add a value to the maximum hiatus length if a boundary is chosen. Defaults to 100 yr (or whatever other age unit is chosen). Can be adapted if Bacon complains that the parameters are out of support.}
\item{\code{after}}{Sets a short section above and below hiatus.depths within which to calculate ages. For internal calculations - do not change.}
\item{\code{cc1}}{For northern hemisphere terrestrial 14C dates (IntCal20).}
\item{\code{cc2}}{For marine 14C dates (Marine20).}
\item{\code{cc3}}{For southern hemisphere 14C dates (SHCal20).}
\item{\code{cc4}}{Use an alternative curve (3 columns: cal BP, 14C age, error, separated by white spaces and saved as a plain-text file). See \code{ccdir}.}
\item{\code{ccdir}}{Directory where the calibration curves for C14 dates \code{cc} are located. By default \code{ccdir=""}.
For example, use \code{ccdir="."} to choose current working directory, or \code{ccdir="Curves/"} to choose sub-folder \code{Curves/}. Note that all calibration curves should reside in the same directory. If you want to add a custom-built curve, put it in the directory where the default calibration curves are (probably \code{list.files(paste0(.libPaths(), "/IntCal/extdata"))}).
Alternatively produce a new folder, and add your curve as well as the default calibration curves there (cc1, cc2 and cc3; e.g., \code{write.table(ccurve(1), "./3Col_intcal20.14C", sep="\t")}.)}
\item{\code{postbomb}}{Use a postbomb curve for negative (i.e. postbomb) 14C ages. \code{0 = none, 1 = NH1, 2 = NH2, 3 = NH3, 4 = SH1-2, 5 = SH3}}
\item{\code{delta.R}}{Mean of core-wide age offsets (e.g., regional marine offsets).}
\item{\code{delta.STD}}{Error of core-wide age offsets (e.g., regional marine offsets).}
\item{\code{t.a}}{The dates are treated using the student's t distribution by default (\code{normal=FALSE}).
The student's t-distribution has two parameters, t.a and t.b, set at 3 and 4 by default (see Christen and Perez, 2010).
If you want to assign narrower error distributions (more closely resembling the normal distribution), set t.a and t.b at for example 33 and 34 respectively (e.g., for specific dates in your .csv file).
For symmetry reasons, t.a must always be equal to t.b-1.}
\item{\code{t.b}}{The dates are treated using the student's t distribution by default (\code{normal=FALSE}).
The student's t-distribution has two parameters, t.a and t.b, set at 3 and 4 by default (see Christen and Perez, 2010).
If you want to assign narrower error distributions (more closely resembling the normal distribution), set t.a and t.b at for example 33 and 34 respectively (e.g., for specific dates in your .csv file).
For symmetry reasons, t.a must always be equal to t.b-1.}
\item{\code{normal}}{By default, Bacon uses the student's t-distribution to treat the dates. Use \code{normal=TRUE} to use the normal/Gaussian distribution. This will generally give higher weight to the dates.}
\item{\code{suggest}}{If initial analysis of the data indicates abnormally slow or fast accumulation rates, Bacon will suggest to change the prior.}
\item{\code{accept.suggestions}}{Automatically accept the suggested values. Use with care. Default \code{accept.suggestions=FALSE}.
Also, if the length of the core would cause too few or too many sections with the default settings, Bacon will suggest an alternative section thickness \code{thick}.
Accept these suggested alternative settings by typing "y" (or "yes please" if you prefer to be polite), or leave as is by typing "n" (or anything else, really). To get rid of these suggestions, use \code{suggest=FALSE}.}
\item{\code{reswarn}}{Bacon will warn you if the number of sections lies outside the safe range (default between 10 and 200 sections;
\code{reswarn=c(10,200)}). Too few sections could lead to an `elbowy' model while with too many sections the modelling process can get lost,
resulting in age-models far away from the dated depths.}
\item{\code{remember}}{Bacon will try to remember which settings you have applied to your cores (default \code{remember=TRUE}). If you run into inconsistencies or other problems,
try running your core again with \code{remember=FALSE}, or, start cleanly by typing \code{Bacon.cleanup()}.}
\item{\code{ask}}{By default Bacon will ask you to confirm that you want to run the core with the provided settings. Disable this using \code{ask=FALSE} (e.g., for batch runs).}
\item{\code{run}}{In order to load an existing Bacon run instead of producing a new one, you can use \code{run=FALSE}.}
\item{\code{defaults}}{Name of the file containing settings for the core. For internal use only - do not change.}
\item{\code{sep}}{Separator between the fields of the plain text file containing the dating information. Default \code{sep=","}.}
\item{\code{dec}}{Character for decimal points. Default to \code{dec="."}.}
\item{\code{runname}}{Text to add to the corename for specific runs, e.g., \code{runname="MyCore_Test1"}.}
\item{\code{slump}}{Upper and lower depths of any sections of assumed abrupt accumulation, that require excising before age-modelling (and adding after age-modelling). Requires pairs of depths, e.g., \code{slump=c(10,15,60,67)} for slumps at 67-60 and 15-10 cm core depth.}
\item{\code{remove}}{Whether or not to remove depths within slumps. Defaults to \code{remove=FALSE}.}
\item{\code{BCAD}}{The calendar scale of graphs and age output-files is in cal BP (calendar or calibrated years before the present, where the present is AD 1950) by default, but can be changed to BC/AD using \code{BCAD=TRUE}.}
\item{\code{ssize}}{The approximate amount of iterations to store at the end of the MCMC run. Default 2000; decrease for faster (but less reliable) runs or increase for cores where the MCMC mixing (panel at upper-left corner of age-model graph) appears problematic.}
\item{\code{th0}}{Starting years for the MCMC iterations.}
\item{\code{burnin}}{Amount of initial, likely sub-optimal MCMC iterations that will be removed.}
\item{\code{MinAge}}{Minimum age limit for Bacon runs, default at current year in cal BP. To set plot limits, use \code{yr.min} instead.}
\item{\code{MaxAge}}{Maximum age limit for Bacon runs, default at 1,000,000 cal BP. To set plot limits, use \code{yr.max} instead.}
\item{\code{MinYr}}{Deprecated - use MinAge instead.}
\item{\code{MaxYr}}{Deprecated - use MaxAge instead.}
\item{\code{cutoff}}{Avoid plotting very low probabilities of date distributions (default \code{cutoff=0.001}).}
\item{\code{plot.pdf}}{Produce a pdf file of the age-depth plot. Defaults to \code{plot.pdf=TRUE} after a Bacon run.}
\item{\code{dark}}{Darkness of the greyscale age-depth model. The darkest grey value is \code{dark=1} by default.
Lower values will result in lighter grey but values >1 are not allowed.}
\item{\code{date.res}}{Date distributions are plotted using \code{date.res=100} segments by default.}
\item{\code{age.res}}{Resolution or amount of greyscale pixels to cover the age scale of the age-model plot. Default \code{yr.res=200}.}
\item{\code{yr.res}}{Deprecated - use age.res instead}
\item{\code{close.connections}}{Internal option to close connections after a run. Default \code{close.connections=TRUE}.}
\item{\code{verbose}}{Provide feedback on what is happening (default \code{verbose=TRUE}).}
}}
}
\value{
L The single LiPD object that was entered, with methods, ensembleTable, summaryTable and distributionTable added to the chronData model.
}
\description{
This is a high-level function that uses Bacon to simulate an age model, and stores this as an age-ensemble in a model in chronData. If needed input variables are not entered, and cannot be deduced, it will run in interactive mode. See Blaauw and Christen (2011) doi:10.1214/11-BA618 for details.
}
\section{Long-form example}{
\href{http://nickmckay.github.io/GeoChronR/articles/Introduction.html}{View a full-fledged example of how to use this function.}
}
\examples{
\dontrun{
#Run in interactive mode:
L = runBacon(L)
#Run in noninteractive mode, describing everything:
L = runBacon(L,chron.num = 1, meas.table.num = 1, model.num = 3, bacon.dir = "~/Bacon/",site.name = "MSB2K", cc = 1)
}
}
\seealso{
Other Bacon:
\code{\link{getBaconDir}()},
\code{\link{loadBaconOutput}()},
\code{\link{sampleBaconAges}()},
\code{\link{setBaconDir}()},
\code{\link{writeBacon}()}
}
\author{
Nick McKay
Maarten Blaauw (Bacon)
}
\concept{Bacon}
|
/man/runBacon.Rd
|
permissive
|
nickmckay/GeoChronR
|
R
| false
| true
| 17,012
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bacon.lipd.R
\name{runBacon}
\alias{runBacon}
\title{Generate a Bayesian Reconstruction Age Model (Bacon) and add it into a LiPD object}
\usage{
runBacon(
L,
chron.num = NA,
meas.table.num = NA,
bacon.dir = NA,
site.name = L$dataSetName,
model.num = NA,
remove.rejected = TRUE,
overwrite = TRUE,
cc = NA,
max.ens = 1000,
use.marine = NULL,
lab.id.var = "labID",
age.14c.var = "age14C",
age.14c.uncertainty.var = "age14CUnc",
age.var = "age",
age.uncertainty.var = "ageUnc",
depth.var = "depth",
reservoir.age.14c.var = "reservoirAge",
reservoir.age.14c.uncertainty.var = "reservoirAge14C",
rejected.ages.var = "rejected",
ask.reservoir = TRUE,
bacon.thick = NA,
bacon.acc.mean = NA,
...
)
}
\arguments{
\item{L}{a LiPD object}
\item{chron.num}{the number of the chronData object that you'll be working in}
\item{meas.table.num}{an integer that corresponds to paleo.num measurementTable has the variable you want?}
\item{bacon.dir}{the directory where Bacon is installed on this computer.}
\item{site.name}{the name used for the bacon model (and directories)}
\item{model.num}{chron.numModel do you want to use?}
\item{remove.rejected}{don't write out dates that are marked as rejected}
\item{overwrite}{overwrite files and directories}
\item{cc}{An integer, or vector of integers corresponding to age that describes the calibration curve. You can specify here (see below) or if it's NA the code will guess based on archiveType
\itemize{
\item cc=1 IntCal20
\item cc=2 MarineCal20
\item cc=3 SHCal20
}}
\item{max.ens}{the maximum number of ensembles to load in (default = 1000)}
\item{use.marine}{use the marine 13C curve? (yes or no, or NULL to choose)}
\item{lab.id.var}{Lab Id variable name}
\item{age.14c.var}{Radiocarbon age variable name}
\item{age.14c.uncertainty.var}{Radiocarbon age uncertainty variable name}
\item{age.var}{Calibrated age variable name}
\item{age.uncertainty.var}{Calibrated age uncertainty variable name}
\item{depth.var}{Depth variable name}
\item{reservoir.age.14c.var}{Reservoir age variable name}
\item{reservoir.age.14c.uncertainty.var}{Reservoir age uncertainty variable name}
\item{rejected.ages.var}{Rejected ages variable name}
\item{ask.reservoir}{ask about reservoir corrections}
\item{bacon.thick}{thickness parameter to pass to bacon (How thick is each chunk to model)}
\item{bacon.acc.mean}{prior mean accumulation rate estimate for bacon}
\item{...}{
Arguments passed on to \code{\link[rbacon:Bacon]{rbacon::Bacon}}
\describe{
\item{\code{core}}{Name of the core, given using quotes. Defaults to one of the cores provided with rbacon, \code{core="MSB2K"}.
An alternative core provided with this package is RLGH3 (Jones et al., 1989).
To run your own core, produce a .csv file with the dates as outlined in the manual, add a folder with the core's name to the default directory for cores (see \code{coredir}), and save the .csv file there. For example, the file's location and name could be \code{Bacon_runs/MyCore/MyCore.csv}. Then run Bacon as follows: \code{Bacon("MyCore")}}
\item{\code{thick}}{Bacon will divide the core into sections of equal thickness specified by thick (default \code{thick=5}).}
\item{\code{coredir}}{Folder where the core's files \code{core} are and/or will be located. This will be a folder with the core's name, within either the folder \code{coredir='Bacon_runs/'}, or the folder Cores/ if it already exists within R's working directory, or a custom-built folder. For example, use \code{coredir="."} to place the core's folder within the current working directory, or \code{coredir="F:"} if you want to put the core's folder and files on a USB drive loaded under F:.
Thinner (and thus more) sections will result in smoother age-models, but too many sections can cause `run-away' models.}
\item{\code{prob}}{Confidence interval to report. This should lie between 0 and 1, default 0.95 (95 \%).}
\item{\code{d.min}}{Minimum depth of age-depth model (use this to extrapolate to depths higher than the top dated depth).}
\item{\code{d.max}}{Maximum depth of age-depth model (use this to extrapolate to depths below the bottom dated depth).}
\item{\code{add.bottom}}{Add a model section at the bottom of the core, in order to ensure the bottommost date is taken into account. Default \code{add.bottom=TRUE}. This is a new option and can cause age-models to differ from previous version. Please re-run the model if in doubt.}
\item{\code{d.by}}{Depth intervals at which ages are calculated. Defaults to \code{d.by=1}.}
\item{\code{seed}}{Seed used for C++ executions. If it is not assigned (\code{seed=NA}; default) then the seed is set by system.}
\item{\code{depths.file}}{By default, Bacon will calculate the ages for the depths \code{d.min} to \code{d.max} in steps of \code{d.by}.
If \code{depths.file=TRUE}, Bacon will read a file containing the depths for which you require ages.
This file, containing the depths in a single column without a header, should be stored within \code{coredir},
and its name should start with the core's name and end with `_depths.txt'. Then specify \code{depths.file=TRUE} (default \code{FALSE}). See also \code{depths}.}
\item{\code{depths}}{By default, Bacon will calculate the ages for the depths \code{d.min} to \code{d.max} in steps of \code{d.by}.
Alternative depths can be provided as, e.g., \code{depths=seq(0, 100, length=500)} or as a file, e.g., \code{depths=read.table("CoreDepths.txt"}. See also \code{depths.file}.}
\item{\code{depth.unit}}{Units of the depths. Defaults to \code{depth.unit="cm"}.}
\item{\code{age.unit}}{Units of the ages. Defaults to \code{age.unit="yr"}.}
\item{\code{unit}}{Deprecated and replaced by \code{depth.unit}.}
\item{\code{acc.shape}}{The prior for the accumulation rate consists of a gamma distribution with two parameters.
Its shape is set by acc.shape (default \code{acc.shape=1.5}; higher values result in more peaked shapes).}
\item{\code{acc.mean}}{The accumulation rate prior consists of a gamma distribution with two parameters. Its mean is set by acc.mean (default \code{acc.mean=20} yr/cm (or whatever age or depth units are chosen),
which can be changed to, e.g., 5, 10 or 50 for different kinds of deposits). Multiple values can be given in case of hiatuses or boundaries, e.g., Bacon(hiatus.depths=23, acc.mean=c(5,20))}
\item{\code{mem.strength}}{The prior for the memory (dependence of accumulation rate between neighbouring depths) is a beta distribution, which looks much like the gamma distribution.
but its values are always between 0 (no assumed memory) and 1 (100\% memory). Its default settings of \code{mem.strength=10}
(higher values result in more peaked shapes) allow for a large range of posterior memory values. Please note that the default memory prior has been updated from rbacon version 2.5.1 on, to repair a bug.}
\item{\code{mem.mean}}{The prior for the memory is a beta distribution, which looks much like the gamma distribution but
its values are always between 0 (no assumed memory) and 1 (100\% memory). Its default settings of \code{mem.mean=0.5}
allow for a large range of posterior memory values. Please note that the default memory prior has been updated from rbacon version 2.5.1. on, to repair a bug.}
\item{\code{boundary}}{The assumed depths of any boundary, which divides sections of different accumulation rate regimes (e.g., as indicated by major change in the stratigraphy). No hiatus is assumed between these sections, and memory is reset crossing the boundary. Different accumulation priors can be set for the sections above and below the boundary, e.g., \code{acc.mean=c(5, 20)}. See also \code{hiatus.depths}, \code{mem.mean}, \code{acc.mean} and \code{acc.shape}. Setting many boundaries might not work, and having more than one boundary per model section (see \code{'thick'}) might not work either.}
\item{\code{hiatus.depths}}{The assumed depths for any hiatus should be provided as, e.g.,
\code{hiatus.depths=20} for one at 20cm depth, and \code{hiatus.depths=c(20,40)} for two hiatuses at 20 and 40 cm depth.}
\item{\code{hiatus.max}}{The prior for the maximum length of the hiatus. Hiatus length is a uniform distribution, with equal probabilities between 0 and \code{hiatus.max} yr (or whatever other \code{age.unit} is chosen).}
\item{\code{add}}{Add a value to the maximum hiatus length if a boundary is chosen. Defaults to 100 yr (or whatever other age unit is chosen). Can be adapted if Bacon complains that the parameters are out of support.}
\item{\code{after}}{Sets a short section above and below hiatus.depths within which to calculate ages. For internal calculations - do not change.}
\item{\code{cc1}}{For northern hemisphere terrestrial 14C dates (IntCal20).}
\item{\code{cc2}}{For marine 14C dates (Marine20).}
\item{\code{cc3}}{For southern hemisphere 14C dates (SHCal20).}
\item{\code{cc4}}{Use an alternative curve (3 columns: cal BP, 14C age, error, separated by white spaces and saved as a plain-text file). See \code{ccdir}.}
\item{\code{ccdir}}{Directory where the calibration curves for C14 dates \code{cc} are located. By default \code{ccdir=""}.
For example, use \code{ccdir="."} to choose current working directory, or \code{ccdir="Curves/"} to choose sub-folder \code{Curves/}. Note that all calibration curves should reside in the same directory. If you want to add a custom-built curve, put it in the directory where the default calibration curves are (probably \code{list.files(paste0(.libPaths(), "/IntCal/extdata"))}).
Alternatively produce a new folder, and add your curve as well as the default calibration curves there (cc1, cc2 and cc3; e.g., \code{write.table(ccurve(1), "./3Col_intcal20.14C", sep="\t")}.)}
\item{\code{postbomb}}{Use a postbomb curve for negative (i.e. postbomb) 14C ages. \code{0 = none, 1 = NH1, 2 = NH2, 3 = NH3, 4 = SH1-2, 5 = SH3}}
\item{\code{delta.R}}{Mean of core-wide age offsets (e.g., regional marine offsets).}
\item{\code{delta.STD}}{Error of core-wide age offsets (e.g., regional marine offsets).}
\item{\code{t.a}}{The dates are treated using the student's t distribution by default (\code{normal=FALSE}).
The student's t-distribution has two parameters, t.a and t.b, set at 3 and 4 by default (see Christen and Perez, 2010).
If you want to assign narrower error distributions (more closely resembling the normal distribution), set t.a and t.b at for example 33 and 34 respectively (e.g., for specific dates in your .csv file).
For symmetry reasons, t.a must always be equal to t.b-1.}
\item{\code{t.b}}{The dates are treated using the student's t distribution by default (\code{normal=FALSE}).
The student's t-distribution has two parameters, t.a and t.b, set at 3 and 4 by default (see Christen and Perez, 2010).
If you want to assign narrower error distributions (more closely resembling the normal distribution), set t.a and t.b at for example 33 and 34 respectively (e.g., for specific dates in your .csv file).
For symmetry reasons, t.a must always be equal to t.b-1.}
\item{\code{normal}}{By default, Bacon uses the student's t-distribution to treat the dates. Use \code{normal=TRUE} to use the normal/Gaussian distribution. This will generally give higher weight to the dates.}
\item{\code{suggest}}{If initial analysis of the data indicates abnormally slow or fast accumulation rates, Bacon will suggest to change the prior.}
\item{\code{accept.suggestions}}{Automatically accept the suggested values. Use with care. Default \code{accept.suggestions=FALSE}.
Also, if the length of the core would cause too few or too many sections with the default settings, Bacon will suggest an alternative section thickness \code{thick}.
Accept these suggested alternative settings by typing "y" (or "yes please" if you prefer to be polite), or leave as is by typing "n" (or anything else, really). To get rid of these suggestions, use \code{suggest=FALSE}.}
\item{\code{reswarn}}{Bacon will warn you if the number of sections lies outside the safe range (default between 10 and 200 sections;
\code{reswarn=c(10,200)}). Too few sections could lead to an `elbowy' model while with too many sections the modelling process can get lost,
resulting in age-models far away from the dated depths.}
\item{\code{remember}}{Bacon will try to remember which settings you have applied to your cores (default \code{remember=TRUE}). If you run into inconsistencies or other problems,
try running your core again with \code{remember=FALSE}, or, start cleanly by typing \code{Bacon.cleanup()}.}
\item{\code{ask}}{By default Bacon will ask you to confirm that you want to run the core with the provided settings. Disable this using \code{ask=FALSE} (e.g., for batch runs).}
\item{\code{run}}{In order to load an existing Bacon run instead of producing a new one, you can use \code{run=FALSE}.}
\item{\code{defaults}}{Name of the file containing settings for the core. For internal use only - do not change.}
\item{\code{sep}}{Separator between the fields of the plain text file containing the dating information. Default \code{sep=","}.}
\item{\code{dec}}{Character for decimal points. Default to \code{dec="."}.}
\item{\code{runname}}{Text to add to the corename for specific runs, e.g., \code{runname="MyCore_Test1"}.}
\item{\code{slump}}{Upper and lower depths of any sections of assumed abrupt accumulation, that require excising before age-modelling (and adding after age-modelling). Requires pairs of depths, e.g., \code{slump=c(10,15,60,67)} for slumps at 67-60 and 15-10 cm core depth.}
\item{\code{remove}}{Whether or not to remove depths within slumps. Defaults to \code{remove=FALSE}.}
\item{\code{BCAD}}{The calendar scale of graphs and age output-files is in cal BP (calendar or calibrated years before the present, where the present is AD 1950) by default, but can be changed to BC/AD using \code{BCAD=TRUE}.}
\item{\code{ssize}}{The approximate amount of iterations to store at the end of the MCMC run. Default 2000; decrease for faster (but less reliable) runs or increase for cores where the MCMC mixing (panel at upper-left corner of age-model graph) appears problematic.}
\item{\code{th0}}{Starting years for the MCMC iterations.}
\item{\code{burnin}}{Amount of initial, likely sub-optimal MCMC iterations that will be removed.}
\item{\code{MinAge}}{Minimum age limit for Bacon runs, default at current year in cal BP. To set plot limits, use \code{yr.min} instead.}
\item{\code{MaxAge}}{Maximum age limit for Bacon runs, default at 1,000,000 cal BP. To set plot limits, use \code{yr.max} instead.}
\item{\code{MinYr}}{Deprecated - use MinAge instead.}
\item{\code{MaxYr}}{Deprecated - use MaxAge instead.}
\item{\code{cutoff}}{Avoid plotting very low probabilities of date distributions (default \code{cutoff=0.001}).}
\item{\code{plot.pdf}}{Produce a pdf file of the age-depth plot. Defaults to \code{plot.pdf=TRUE} after a Bacon run.}
\item{\code{dark}}{Darkness of the greyscale age-depth model. The darkest grey value is \code{dark=1} by default.
Lower values will result in lighter grey but values >1 are not allowed.}
\item{\code{date.res}}{Date distributions are plotted using \code{date.res=100} segments by default.}
\item{\code{age.res}}{Resolution or amount of greyscale pixels to cover the age scale of the age-model plot. Default \code{yr.res=200}.}
\item{\code{yr.res}}{Deprecated - use age.res instead}
\item{\code{close.connections}}{Internal option to close connections after a run. Default \code{close.connections=TRUE}.}
\item{\code{verbose}}{Provide feedback on what is happening (default \code{verbose=TRUE}).}
}}
}
\value{
L The single LiPD object that was entered, with methods, ensembleTable, summaryTable and distributionTable added to the chronData model.
}
\description{
This is a high-level function that uses Bacon to simulate an age model, and stores this as an age-ensemble in a model in chronData. If needed input variables are not entered, and cannot be deduced, it will run in interactive mode. See Blaauw and Christen (2011) doi:10.1214/11-BA618 for details.
}
\section{Long-form example}{
\href{http://nickmckay.github.io/GeoChronR/articles/Introduction.html}{View a full-fledged example of how to use this function.}
}
\examples{
\dontrun{
#Run in interactive mode:
L = runBacon(L)
#Run in noninteractive mode, describing everything:
L = runBacon(L,chron.num = 1, meas.table.num = 1, model.num = 3, bacon.dir = "~/Bacon/",site.name = "MSB2K", cc = 1)
}
}
\seealso{
Other Bacon:
\code{\link{getBaconDir}()},
\code{\link{loadBaconOutput}()},
\code{\link{sampleBaconAges}()},
\code{\link{setBaconDir}()},
\code{\link{writeBacon}()}
}
\author{
Nick McKay
Maarten Blaauw (Bacon)
}
\concept{Bacon}
|
a <- summary(out1)
a <- a$summary
# get the column-names and row-names
colnames(a)
rownames(a)
parMean <- as.matrix(a[,1])
parMedn <- as.matrix(a[,6])
## calculating is a bit tricky
library(modeest)
#### extract mcmc ####
lr_mu <- extract (out1, "lr_mu")$lr_mu # returns an array
|
/extract_param.R
|
no_license
|
lei-zhang/sit_stan
|
R
| false
| false
| 299
|
r
|
a <- summary(out1)
a <- a$summary
# get the column-names and row-names
colnames(a)
rownames(a)
parMean <- as.matrix(a[,1])
parMedn <- as.matrix(a[,6])
## calculating is a bit tricky
library(modeest)
#### extract mcmc ####
lr_mu <- extract (out1, "lr_mu")$lr_mu # returns an array
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deg2rad.r
\name{deg2rad}
\alias{deg2rad}
\alias{rad2deg}
\title{Conversion between degrees and radians}
\usage{
deg2rad(x)
rad2deg(x)
}
\arguments{
\item{x}{a numeric vector}
}
\value{
a numeric vector the same length as \code{x}
}
\description{
\code{deg2rad} performs conversion from degrees to radians.
\code{rad2deg} performs conversion from radians to degrees.
}
\section{Details}{
Radians and degrees are both units used for measuring angles.
A degree is a measure of angle equal to 1/360th of a revolution, or circle.
A radian is the measurement of angle equal to the length of an arc divided by the radius of the circle or arc.
A circle is comprised of 2*pi radians, which is the equivalent of 360 degrees.
A common application in ecological studies is the conversion of measured exposition (in degrees) of plots into statistically meaningful measures, such as the north value or the east value.
For this, the cosine (for northness) or sine (for eastness) is applied to the radian of the exposition.
}
\examples{
## Covert the value pi to degrees
rad2deg(pi)
# Calculate north and east values based on exposition measured in degrees
north <- cos(deg2rad(schedenenv$exp))
east <- sin(deg2rad(schedenenv$exp))
}
\references{
BIPM (2019): The International System of Units (SI). Bureau international des poids et mesures, ninth edition. \url{https://www.bipm.org/en/publications/si-brochure}, ISBN 978-92-822-2272-0
}
|
/man/deg2rad.Rd
|
no_license
|
fvlampe/goeveg
|
R
| false
| true
| 1,510
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deg2rad.r
\name{deg2rad}
\alias{deg2rad}
\alias{rad2deg}
\title{Conversion between degrees and radians}
\usage{
deg2rad(x)
rad2deg(x)
}
\arguments{
\item{x}{a numeric vector}
}
\value{
a numeric vector the same length as \code{x}
}
\description{
\code{deg2rad} performs conversion from degrees to radians.
\code{rad2deg} performs conversion from radians to degrees.
}
\section{Details}{
Radians and degrees are both units used for measuring angles.
A degree is a measure of angle equal to 1/360th of a revolution, or circle.
A radian is the measurement of angle equal to the length of an arc divided by the radius of the circle or arc.
A circle is comprised of 2*pi radians, which is the equivalent of 360 degrees.
A common application in ecological studies is the conversion of measured exposition (in degrees) of plots into statistically meaningful measures, such as the north value or the east value.
For this, the cosine (for northness) or sine (for eastness) is applied to the radian of the exposition.
}
\examples{
## Covert the value pi to degrees
rad2deg(pi)
# Calculate north and east values based on exposition measured in degrees
north <- cos(deg2rad(schedenenv$exp))
east <- sin(deg2rad(schedenenv$exp))
}
\references{
BIPM (2019): The International System of Units (SI). Bureau international des poids et mesures, ninth edition. \url{https://www.bipm.org/en/publications/si-brochure}, ISBN 978-92-822-2272-0
}
|
#
# EDA 1 Aux functions
#
#'
#' toascii - Removes any non ascii character from text
#'
toascii <- function(x, encoding="UTF-8") {
# removes non ascii characters from char vector
iconv(x, from=encoding, to="ASCII", sub="")
}
#'
#' map - Applies function to elements of a vector
#'
map <- function(x, fun, progress=T, ...) {
# Applies function to element
cat(">>> Applying function to a text with", length(x), "records. \n")
y <- rep(0, length(x))
for (i in 1:length(x)) {
y[i] <- fun(x[i], ...)
if (!progress)
next
if (i %% 1000 == 0)
cat(".")
if (i %% 50000 == 0) {
segm <- (i %/% 50000) * 50
cat(segm, "K", sep="")
}
}
if (progress)
cat("EoF \n")
y
}
#'
#' step - applies function in steps
#'
step <- function(x, fun, steps=1, ...){
# executes a function in steps
steps <- as.integer(steps)
cat(">>> Processing object in", steps, "steps. \n")
recs <- length(x) %/% steps
lst_y <- list()
for (i in 1:steps){
cat(" Step ", i,". ", sep="")
init <- 1 + (i-1) * recs
if (i == steps)
recs = recs + length(x) %% steps
y <- fun(x[init:(init + recs - 1)], ...)
lst_y <- append(lst_y, list(y))
cat(recs, "records processed in this step. \n")
}
lst_y
}
#'
#' ngram - Recursive n-gram generator
#'
ngram <- function(x, n=2, split=" ", sep="::",
startMark='<s>', stopMark='</s>'){
# Takes a vector of strings, size of n-grams to generate, split char and separator
# Returns a vector of n-grams
# recursive ngram generator
ngrm <- function(words, n, sep, ngrams){
if (length(words) < n) # no more n-grams, end of story
return(unlist(ngrams))
ngrams <- append(ngrams, list(paste(words[1:n], collapse=sep))) # append n-gram
return(ngrm(words[2:(length(words))], n, sep, ngrams)) # again, without first word
}
# wrapper function
if(split == " ") # if split by whitespaces
split <- "\\s" # use it as split char but
x <- gsub(paste0(split,"+"), " ", x) # make sure there's only one between words
x <- gsub("^\\s+", "", x) # and none as first character
words <- unlist(strsplit(x,split = split)) # create vector of words
if (n > 1) {
words <- append(startMark, words) # add start...
words <- append(words, stopMark) # and stop markers
}
ngrams <- list() # list of ngrams
if (n < 2) # just return input words
return(words) # or empty vector
return(ngrm(words, n, sep, ngrams)) # not a trivial case. call generator.
}
#'
#' ngramN - Recursive n-gram generator for encoded words
#'
ngramN <- function(x, n=2, sep="::", startMark=0, stopMark=0){
# Takes a vector of words encoded as numbers, size of n-grams to generate,
# split char and separator and returns a vector of encoded n-grams
# recursive ngram generator
ngrm <- function(words, n, sep, ngrams){
if (length(words) < n) # no more n-grams, end of story
return(ngrams) # list replaced by vector
ngrams <- append(ngrams, paste(words[1:n], collapse=sep)) # append n-gram
return(ngrm(words[2:(length(words))], n, sep, ngrams)) # again, without first word
}
# wrapper function
words <- x
if (n > 1) {
words <- append(startMark, x) # add start...
words <- append(words, stopMark) # and stop markers
}
ngrams <- c() # list replaced by vector
if (n < 2) # just return input words
return(words) # or empty vector
return(ngrm(words, n, sep, ngrams)) # not a trivial case. call generator.
}
#
#'
#' top - Returns the required percent of most used words in a
#' term frequency vector.
#'
top <- function(x, prop){
x[order(x, decreasing=T)][1:(as.integer((prop * length(x))))]
}
#'
#' cdf - Computes a cumulative distribution function from x
#'
cdf <- function(x){
x <- x[order(x)]
y <- rep(0, length(x))
for (i in 1:length(x)){
y[i] <- sum(x[1:i])
}
return(y/sum(x))
}
#'
#' qxdist - computes quintile where acum probability > p
#' using x's empirical cumulative distribution
#'
qxdist <- function(x, p, lower.tail=TRUE){
# returns quantile of a distribution where P <= value
if (lower.tail) {
q <- max(which(cdf(x) <= p))
} else {
q <- min(which(cdf(x) >= p))
}
return(q / length(x))
}
#
# is.english - Check if words are english
# Note. Requires rJava and wordnet libraries
#
is.english <- function(words) {
# Checks if words in wordnet
# lookup function
lookup <- function(word) {
# looks up a word in wordnet
pos <- c("ADJECTIVE", "ADVERB", "NOUN", "VERB")
term <- NULL
lemma <- NULL
idx <- 1
while (is.null(term)){
filter <- getTermFilter("ExactMatchFilter", word, TRUE)
term <- getIndexTerms(pos[idx], 25, filter)
idx <- idx + 1
if (idx > length(pos))
break
}
return(term)
}
# main
ans <- rep(FALSE, length(words))
for (i in 1:length(words)){
ans[i] <- !is.null(lookup(words[i]))
}
ans
}
#
# clock - Measures a function call elapsed time
#
clock <- function(f, ...){
# clock measures a function call elapsed time
# return time and function results in a list
start_time <- Sys.time()
result <- f(...)
elapsed <- Sys.time() - start_time
return(list(elapsed, result))
}
#
# getCorpusInfo - Prints basic info of a tm corpus
#
getCorpusInfo <- function(crp){
# Prints basic info of tm corpus
summary(crp)
for (i in 1:length(crp)) {
if (i == 1)
cat(">>> Corpus contents \n")
id <- ID(crp[[i]])
dts <- as.character(DateTimeStamp(crp[[i]]))
lang <- Language(crp[[i]])
cat('>>>', id, '-', dts, '-', lang, '-', length(crp[[i]]), 'lines. \n')
}
}
#'
#' dict - Creates a dictionary of elements and counts
#'
dict <- function(x){
x <- x[order(x)]
y <- rep(0, length(unique(x)))
j <- 1
names(y)[1] <- x[1]
for (i in 1:length(x)){
if (x[i] != names(y)[j]){
j = j + 1
names(y)[j] <- x[i]
}
y[j] <- y[j] + 1
}
y
}
#'
#' dict2 - Creates a dictionary of elements and counts using
#' package hash created by Christopher Brown.
#'
library(hash)
dict2 <- function(x, ht){
# Takes a character vector of words and optionally a dict (hash table)
# Returns a new or updated dict of term frequencies.
if(missing(ht)) {
ht <- hash(unique(x), 0) # init hash table
} else { # or identify new entries
newones <-!(has.key(unique(x), ht))
if (sum(newones) > 0)
ht[x[newones]] <- 0 # set new entries count to zero
}
for (i in 1:length(x)){
ht[x[i]] <- ht[[x[i]]] + 1 # increment counter
}
ht
}
# dict.lkup - Retrieves the nth most frequently used terms in a dict
#
dict.lkup <- function(word, dict, n=1){
# Retrieves the nth most frequent terms in a dict
# if n = 0, returns the most frequente term
if (!((n >= 0) & (n <= 1)))
n <- 1
key <- paste0("^", word, "::")
matches <- grep(key, names(dict))
if (length(matches) == 0)
return(NULL)
elems <- dict[matches]
elems <- elems[order(elems, decreasing=T)]
if (n == 0)
return(elems[1])
return(elems[1:as.integer(length(elems) * n)])
}
#
chop <- function(x, n){
# divides an object in n parts without separating elements
# of equal value (for vectors only)
# recursive chopping
chp <- function(x, n, y){
# chops a vector recursively
# cat(">>> x:", x, "n:", n, "y:", unlist(y), "\n")
if ((n == 1) | (length(x) <= n))
return (append(y, list(x)))
cutsize <- as.integer(length(x) / n)
x1 <- x[1:cutsize]
x2 <- x[cutsize + 1:(length(x) - cutsize)]
if (!class(x) == 'list') {
x1 <- append(x1, x2[x2==x1[length(x1)]])
x2 <- x2[x2 != x1[length(x1)]]
y <- append(y, list(x1))
} else {
y <- append(y, list(x1))
}
if (length(x2) == 0)
return(y)
return(chp(x2, n-1, y))
}
# wrapper
# main
if (!class(x) == 'list')
x <- x[order(x)]
return(chp(x, n, list()))
}
#
# ramStatus - Gets status of used ram by objects
#
ramStatus <- function(size=0, class='all'){
# returns a df listing objects' names and sizes in decreasing order
inmemory <- ls(envir= .GlobalEnv)
outlen <- length(inmemory)
objsizes <- rep(0, outlen)
objnames <- rep(NA, outlen)
objclasses <- rep('', outlen)
for (i in 1:length(inmemory)){
objnames[i] <- inmemory[i]
objsizes[i] <- round(as.numeric(object.size(get(inmemory[i])) / 1024 ^ 2), 2)
objclass <- class(get(inmemory[i]))
for (j in 1:length(objclass)){
objclasses[i] <- paste0(objclasses[i], paste(objclass[j], ' '))
}
}
ordr <- order(objsizes, decreasing=T)
return(data.frame('Name' = objnames[ordr],
'Class' = objclasses[ordr],
'MB' = objsizes[ordr],
row.names = 1:length(inmemory)))
}
#
# convTime - Return time difference in correct units
# Necessary when estimating time periods.
#
convTime <- function(x) {
# takes and object of class time-difference and returns
# elapsed time in correct units
if (attr(x, "units") == 'secs')
if (x >= 60) {
x <- round(x / 60, 2)
attr(x, "units") <- 'mins'
}
if (attr(x, "units") == 'mins')
if (x >= 60) {
x <- round(x / 60, 2)
attr(x, "units") <- 'hours'
}
return(round(x, 2))
}
#
# ngram.DF - Creates a data frame from a dict of term frequencies
# New version. Replace the one above.
#
ngram.DF <- function(x, encoded=TRUE) {
# Creates a data frame from a dict of frequencies
k <- as.double(names(x)) # ngram code as key
w <- sapply(k, dCodeNgram) # get a matrix with ngram words as rows
if (class(w) != 'matrix'){ # but check if it's a vector
n <- 1 # yep. it's an 1-gram
} else { # nope. bigram or greater ngram
n <- nrow(w) # number of words in ngram
}
df <- data.frame(Key=k, Count=x, # init data frame with key and frequencies
row.names=NULL,
stringsAsFactors = FALSE)
if (n == 1) { # for just words add a single column
df <- data.frame(df, w, stringsAsFactors = FALSE)
colnames(df)[3] <- 'W1' # column name
} else { # for high orded ngrams we need a loop
for (i in 1:n){ # add columns for each word in the ngram
df <- data.frame(df, w[i,],
stringsAsFactors = FALSE)
colnames(df)[2+i] <- paste0('W',i) # add name to data frame column
}
}
return(df) # done, go home.
}
#
# ngram.DF - Creates a data frame from a dict of term frequencies
# New version. Replace the one above.
#
ngram.DF <- function(x, encoded=TRUE, sep='::') {
# Creates a data frame from a dict of frequencies
if (encoded) {
k <- as.double(names(x)) # ngram code as key
sk <- sapply(k, dCodeNgram, # get n-1 ngrams
subngram=T, # to use as search keys
decode = F)
w <- sapply(k, dCodeNgram) # get a matrix with ngram words as rows
if (class(w) != 'matrix'){ # but check if it's a vector
n <- 1 # yep. it's an 1-gram
} else { # nope. bigram or greater ngram
n <- nrow(w) # number of words in ngram
}
if (n == 1) { # for just words add a single column
df <- data.frame(Key=k, # and init data frame
Count=x, # with no sub key
row.names=NULL,
stringsAsFactors = FALSE)
df <- data.frame(df, w, stringsAsFactors = FALSE) # add words
colnames(df)[3] <- 'W1' # and column name
} else { # for high orded ngrams
df <- data.frame(Key=k, # init data frame with sub key
Count=x,
Skey=sk,
row.names=NULL,
stringsAsFactors = FALSE)
for (i in 1:n){ # add columns for each word in the ngram
df <- data.frame(df, w[i,],
stringsAsFactors = FALSE)
colnames(df)[3+i] <- paste0('W',i) # add name to data frame column
}
}
} else { # ngrams are not encoded
k <- names(x) # get keys
lk <- strsplit(k, sep) # split words
w <- matrix(data=NA, nrow=length(x), # create matrix
ncol=length(lk[[1]]))
for (i in 1:length(lk[[1]])){ # unlist columns into matrix
w[,i] <- unlist(lapply(lk, function(x){x[i]}))
}
count <- x # init data frame with keys and freq
df <- data.frame(Key=k, Count=count,
row.names=NULL,
stringsAsFactors = FALSE)
for (i in 1:ncol(w)){ # add columns to df
col <- w[,i]
if (encoded) # ensure word as numeric if encoded
col <- as.numeric(as.character(w[,i]))
df <- data.frame(df, col, # add column to df
stringsAsFactors = FALSE)
colnames(df)[2+i] <- paste0('W',i) # give column a name
}
}
return(df) # done. go home.
}
#
repUnk <- function(x, vocab=WRDS, unkMark='<UNK>'){
# converts a string of words to a character vector with
# words not in vocabulary replaced by the UNK mark
y <- ngram(x, n=1)
change <- !(y %in% vocab)
y[change] <- unkMark
return(y)
}
nCode <- function(x, vocab=WRDS, unkMark='<UNK>'){
# encodes words in a string of words using a vocab hash table.
# returns a numeric vector with words in the same order as input.
# Unknown words are replaced by the UNK mark
y <- repUnk(x, vocab, unkMark)
z <- c()
for (i in 1:length(y)){
z <- append(z, which(y[i] == vocab))
}
return(z)
}
dCode <- function(x, vocab=WRDS){
# decodes words in a numeric vector using a vocab hash table.
# Returns a character vector.
z <- c()
for (i in 1:length(x)){
if (x[i] > 0) {
xd <- vocab[x[i]]
} else {
xd <- '<M>' # Mark up symbol
}
z <- append(z, xd)
}
return(z)
}
#
# nCode2 - Encodes / decodes a word vector using hash package's functions.
# Replaces functions nCode and dCode.
#
nCode2 <- function(x, vocab=WRDS_H, unkMark='<UNK>',
decode=FALSE, vocab_dec=WRDS_H_INV){
#
# Encodes or decodes a vector of words. If decode = TRUE,
# an inverted hash table must be specified as vocab.
# Returns a vector of encoded / decoded words.
#
if(!decode) # if encoding...
x <- ngram(x, 1) # convert to word vector
if (decode) {
vocab <- vocab_dec # use inverted hash table and...
x <- as.character(x) # convert to char for decoding
}
not_found <-!(has.key(x, vocab)) # identify words without code and..,
x[not_found] <- unkMark # replace unknown words with special tag
res <- sapply(x, function(y){vocab[[y]]}) # get codes
names(res) <- NULL # remove names
return(res) # and go home.
}
# for compatibility with old versions
nCode <- function(x, vocab=WRDS, unkMark='<UNK>')
return(nCode2(x, vocab=WRDS_H, unkMark='<UNK>'))
# a shorthand version for decoding
dCode <- function(x) {nCode2(x, decode=T)}
# skip.ngram - Creates bigrams within a specified window size
#
skip.ngram <- function(x, n=2, window=3, split=" ", sep="::"){
# Takes a vector of strings, size of window, split char and separator
# Returns a vector of bigrams within the specified window size
# recursive ngram generator
ngrm <- function(words, n, window, sep, ngrams){
# processes all ngrams with first word and calls itself
# check exit condition
if (length(words) < n) # no more n-grams, end of story
return(unlist(ngrams))
# process ngrams and call
pairs <- min(window, length(words) - 1) # number of pairs of ngrams to add
for (i in 1:pairs){ # append n-grams
ngrams <- append(ngrams,
list(paste(c(words[1], words[i+1]), collapse=sep)))
}
return(ngrm(words[2:(length(words))], n, window, sep, ngrams)) # play it again, Sam
}
# wrapper function
if(split == " ") # if split by whitespaces
split <- "\\s" # use it as split char but
x <- gsub(paste0(split,"+"), " ", x) # make sure there's only one between words
x <- gsub("^\\s+", "", x) # and none as first character
words <- unlist(strsplit(x,split = split)) # create vector of words
ngrams <- list() # list of ngrams
if (n < 2) # just return input words
return(words) # or empty vector
return(ngrm(words, n, window, sep, ngrams)) # not a trivial case. call generator.
}
#
# updt.DF - Updates ngram data frames
#
updt.DF <- function(x, y){
# Takes a term frequency data frame and a term freq vector.
# Returns a data frame with counts updated. Non existing keys are added.
# Update existing keys
for (i in 1:length(y)){
x$Count[x$Key == names(y)[i]] <- x$Count[x$Key == names(y)[i]] + y[i]
}
# Add non existing keys
toadd <- !(names(y) %in% x$Key)
totadded <- sum(toadd)
if (totadded > 0) {
x <- rbind(x, ngram.DF(y[toadd]))
cat('>>>', totadded, 'row(s) added to df. \n')
}
# Return updated data frame in order
return(x[order(x$Count, decreasing=T),])
}
#
# updt.DF - Updates ngram data frames - vectorized version -
#
updt.DF <- function(x, y, test=FALSE){
# Takes a ngram freq df of type nig_df (i=1,2,3...) and a term freq vector.
# Returns a data frame of the right type with counts updated and new keys added.
# to vectorize df update
yinKeys <- names(y) %in% x$Key # y entries already in x
xinKeys <- x$Key %in% names(y) # same from x point of view
# Update existing keys
x$Count[xinKeys] <- x$Count[xinKeys] + y[yinKeys] # increment existing counters
# Add non existing keys
tot2add <- sum(!yinKeys) # Nr of rows to add
if (tot2add > 0) { # rbind without rows result in error
x <- rbind(x, ngram.DF(y[!yinKeys]), row.names=NULL)
rownames(x) <- NULL
if (test)
cat('>>>', tot2add, 'row(s) added to df. \n') # inform nr of rows added
}
# Return updated data frame in order
return(list(added = tot2add,
updted = sum(yinKeys),
df = x[order(x$Key, decreasing=F),])) # return new df in key order
}
#
# updt.Vocab - Updates Vocabulary
#
updt.Vocab <- function(x, Vocab=WRDS, test=FALSE) {
#
# Takes a character vector and adds inexisting words to Vocab
#
words <- ngram(x, 1)
new_words <- !(words %in% Vocab)
strt <- length(Vocab) + 1
end <- strt + sum(new_words) - 1
if (end >= strt)
Vocab[strt:end] <- words[new_words]
if(test)
cat('>>>', sum(new_words), 'words added. \n')
return(Vocab)
}
#
# updt.Vocab - Using hash package...
#
updt.Vocab <- function(x, Vocab=WRDS_H, Vocab_inv=WRDS_H_INV, test=FALSE) {
#
# Takes a character vector of words and adds inexisting ones
# to Vocab abd to the inverted Vocab hast table.
# Returns the number of words actually added.
#
new_words <- !(has.key(x, Vocab)) # identify words to add
strt <- max(values(Vocab)) + 1 # compute range of new values
end <- strt + sum(new_words) - 1
if (end >= strt) { # add new words
Vocab[x[new_words]] <- strt:end # to Vocab
Vocab_inv[strt:end] <- x[new_words] # and inverted Vocab
}
if (test) # useful for testing
cat('>>> Words added:', paste(x[new_words], collapse='-'), '\n')
return(sum(new_words)) # Nr of words added
}
|
/eda1_auxFunctions.R
|
no_license
|
amitkb3/Capstone
|
R
| false
| false
| 22,774
|
r
|
#
# EDA 1 Aux functions
#
#'
#' toascii - Removes any non ascii character from text
#'
toascii <- function(x, encoding="UTF-8") {
# removes non ascii characters from char vector
iconv(x, from=encoding, to="ASCII", sub="")
}
#'
#' map - Applies function to elements of a vector
#'
map <- function(x, fun, progress=T, ...) {
# Applies function to element
cat(">>> Applying function to a text with", length(x), "records. \n")
y <- rep(0, length(x))
for (i in 1:length(x)) {
y[i] <- fun(x[i], ...)
if (!progress)
next
if (i %% 1000 == 0)
cat(".")
if (i %% 50000 == 0) {
segm <- (i %/% 50000) * 50
cat(segm, "K", sep="")
}
}
if (progress)
cat("EoF \n")
y
}
#'
#' step - applies function in steps
#'
step <- function(x, fun, steps=1, ...){
# executes a function in steps
steps <- as.integer(steps)
cat(">>> Processing object in", steps, "steps. \n")
recs <- length(x) %/% steps
lst_y <- list()
for (i in 1:steps){
cat(" Step ", i,". ", sep="")
init <- 1 + (i-1) * recs
if (i == steps)
recs = recs + length(x) %% steps
y <- fun(x[init:(init + recs - 1)], ...)
lst_y <- append(lst_y, list(y))
cat(recs, "records processed in this step. \n")
}
lst_y
}
#'
#' ngram - Recursive n-gram generator
#'
ngram <- function(x, n=2, split=" ", sep="::",
startMark='<s>', stopMark='</s>'){
# Takes a vector of strings, size of n-grams to generate, split char and separator
# Returns a vector of n-grams
# recursive ngram generator
ngrm <- function(words, n, sep, ngrams){
if (length(words) < n) # no more n-grams, end of story
return(unlist(ngrams))
ngrams <- append(ngrams, list(paste(words[1:n], collapse=sep))) # append n-gram
return(ngrm(words[2:(length(words))], n, sep, ngrams)) # again, without first word
}
# wrapper function
if(split == " ") # if split by whitespaces
split <- "\\s" # use it as split char but
x <- gsub(paste0(split,"+"), " ", x) # make sure there's only one between words
x <- gsub("^\\s+", "", x) # and none as first character
words <- unlist(strsplit(x,split = split)) # create vector of words
if (n > 1) {
words <- append(startMark, words) # add start...
words <- append(words, stopMark) # and stop markers
}
ngrams <- list() # list of ngrams
if (n < 2) # just return input words
return(words) # or empty vector
return(ngrm(words, n, sep, ngrams)) # not a trivial case. call generator.
}
#'
#' ngramN - Recursive n-gram generator for encoded words
#'
ngramN <- function(x, n=2, sep="::", startMark=0, stopMark=0){
# Takes a vector of words encoded as numbers, size of n-grams to generate,
# split char and separator and returns a vector of encoded n-grams
# recursive ngram generator
ngrm <- function(words, n, sep, ngrams){
if (length(words) < n) # no more n-grams, end of story
return(ngrams) # list replaced by vector
ngrams <- append(ngrams, paste(words[1:n], collapse=sep)) # append n-gram
return(ngrm(words[2:(length(words))], n, sep, ngrams)) # again, without first word
}
# wrapper function
words <- x
if (n > 1) {
words <- append(startMark, x) # add start...
words <- append(words, stopMark) # and stop markers
}
ngrams <- c() # list replaced by vector
if (n < 2) # just return input words
return(words) # or empty vector
return(ngrm(words, n, sep, ngrams)) # not a trivial case. call generator.
}
#
#'
#' top - Returns the required percent of most used words in a
#' term frequency vector.
#'
top <- function(x, prop){
x[order(x, decreasing=T)][1:(as.integer((prop * length(x))))]
}
#'
#' cdf - Computes a cumulative distribution function from x
#'
cdf <- function(x){
x <- x[order(x)]
y <- rep(0, length(x))
for (i in 1:length(x)){
y[i] <- sum(x[1:i])
}
return(y/sum(x))
}
#'
#' qxdist - computes quintile where acum probability > p
#' using x's empirical cumulative distribution
#'
qxdist <- function(x, p, lower.tail=TRUE){
# returns quantile of a distribution where P <= value
if (lower.tail) {
q <- max(which(cdf(x) <= p))
} else {
q <- min(which(cdf(x) >= p))
}
return(q / length(x))
}
#
# is.english - Check if words are english
# Note. Requires rJava and wordnet libraries
#
is.english <- function(words) {
# Checks if words in wordnet
# lookup function
lookup <- function(word) {
# looks up a word in wordnet
pos <- c("ADJECTIVE", "ADVERB", "NOUN", "VERB")
term <- NULL
lemma <- NULL
idx <- 1
while (is.null(term)){
filter <- getTermFilter("ExactMatchFilter", word, TRUE)
term <- getIndexTerms(pos[idx], 25, filter)
idx <- idx + 1
if (idx > length(pos))
break
}
return(term)
}
# main
ans <- rep(FALSE, length(words))
for (i in 1:length(words)){
ans[i] <- !is.null(lookup(words[i]))
}
ans
}
#
# clock - Measures a function call elapsed time
#
clock <- function(f, ...){
# clock measures a function call elapsed time
# return time and function results in a list
start_time <- Sys.time()
result <- f(...)
elapsed <- Sys.time() - start_time
return(list(elapsed, result))
}
#
# getCorpusInfo - Prints basic info of a tm corpus
#
getCorpusInfo <- function(crp){
# Prints basic info of tm corpus
summary(crp)
for (i in 1:length(crp)) {
if (i == 1)
cat(">>> Corpus contents \n")
id <- ID(crp[[i]])
dts <- as.character(DateTimeStamp(crp[[i]]))
lang <- Language(crp[[i]])
cat('>>>', id, '-', dts, '-', lang, '-', length(crp[[i]]), 'lines. \n')
}
}
#'
#' dict - Creates a dictionary of elements and counts
#'
dict <- function(x){
x <- x[order(x)]
y <- rep(0, length(unique(x)))
j <- 1
names(y)[1] <- x[1]
for (i in 1:length(x)){
if (x[i] != names(y)[j]){
j = j + 1
names(y)[j] <- x[i]
}
y[j] <- y[j] + 1
}
y
}
#'
#' dict2 - Creates a dictionary of elements and counts using
#' package hash created by Christopher Brown.
#'
library(hash)
dict2 <- function(x, ht){
# Takes a character vector of words and optionally a dict (hash table)
# Returns a new or updated dict of term frequencies.
if(missing(ht)) {
ht <- hash(unique(x), 0) # init hash table
} else { # or identify new entries
newones <-!(has.key(unique(x), ht))
if (sum(newones) > 0)
ht[x[newones]] <- 0 # set new entries count to zero
}
for (i in 1:length(x)){
ht[x[i]] <- ht[[x[i]]] + 1 # increment counter
}
ht
}
# dict.lkup - Retrieves the nth most frequently used terms in a dict
#
dict.lkup <- function(word, dict, n=1){
# Retrieves the nth most frequent terms in a dict
# if n = 0, returns the most frequente term
if (!((n >= 0) & (n <= 1)))
n <- 1
key <- paste0("^", word, "::")
matches <- grep(key, names(dict))
if (length(matches) == 0)
return(NULL)
elems <- dict[matches]
elems <- elems[order(elems, decreasing=T)]
if (n == 0)
return(elems[1])
return(elems[1:as.integer(length(elems) * n)])
}
#
chop <- function(x, n){
# divides an object in n parts without separating elements
# of equal value (for vectors only)
# recursive chopping
chp <- function(x, n, y){
# chops a vector recursively
# cat(">>> x:", x, "n:", n, "y:", unlist(y), "\n")
if ((n == 1) | (length(x) <= n))
return (append(y, list(x)))
cutsize <- as.integer(length(x) / n)
x1 <- x[1:cutsize]
x2 <- x[cutsize + 1:(length(x) - cutsize)]
if (!class(x) == 'list') {
x1 <- append(x1, x2[x2==x1[length(x1)]])
x2 <- x2[x2 != x1[length(x1)]]
y <- append(y, list(x1))
} else {
y <- append(y, list(x1))
}
if (length(x2) == 0)
return(y)
return(chp(x2, n-1, y))
}
# wrapper
# main
if (!class(x) == 'list')
x <- x[order(x)]
return(chp(x, n, list()))
}
#
# ramStatus - Gets status of used ram by objects
#
ramStatus <- function(size=0, class='all'){
# returns a df listing objects' names and sizes in decreasing order
inmemory <- ls(envir= .GlobalEnv)
outlen <- length(inmemory)
objsizes <- rep(0, outlen)
objnames <- rep(NA, outlen)
objclasses <- rep('', outlen)
for (i in 1:length(inmemory)){
objnames[i] <- inmemory[i]
objsizes[i] <- round(as.numeric(object.size(get(inmemory[i])) / 1024 ^ 2), 2)
objclass <- class(get(inmemory[i]))
for (j in 1:length(objclass)){
objclasses[i] <- paste0(objclasses[i], paste(objclass[j], ' '))
}
}
ordr <- order(objsizes, decreasing=T)
return(data.frame('Name' = objnames[ordr],
'Class' = objclasses[ordr],
'MB' = objsizes[ordr],
row.names = 1:length(inmemory)))
}
#
# convTime - Return time difference in correct units
# Necessary when estimating time periods.
#
convTime <- function(x) {
# takes and object of class time-difference and returns
# elapsed time in correct units
if (attr(x, "units") == 'secs')
if (x >= 60) {
x <- round(x / 60, 2)
attr(x, "units") <- 'mins'
}
if (attr(x, "units") == 'mins')
if (x >= 60) {
x <- round(x / 60, 2)
attr(x, "units") <- 'hours'
}
return(round(x, 2))
}
#
# ngram.DF - Creates a data frame from a dict of term frequencies
# New version. Replace the one above.
#
ngram.DF <- function(x, encoded=TRUE) {
# Creates a data frame from a dict of frequencies
k <- as.double(names(x)) # ngram code as key
w <- sapply(k, dCodeNgram) # get a matrix with ngram words as rows
if (class(w) != 'matrix'){ # but check if it's a vector
n <- 1 # yep. it's an 1-gram
} else { # nope. bigram or greater ngram
n <- nrow(w) # number of words in ngram
}
df <- data.frame(Key=k, Count=x, # init data frame with key and frequencies
row.names=NULL,
stringsAsFactors = FALSE)
if (n == 1) { # for just words add a single column
df <- data.frame(df, w, stringsAsFactors = FALSE)
colnames(df)[3] <- 'W1' # column name
} else { # for high orded ngrams we need a loop
for (i in 1:n){ # add columns for each word in the ngram
df <- data.frame(df, w[i,],
stringsAsFactors = FALSE)
colnames(df)[2+i] <- paste0('W',i) # add name to data frame column
}
}
return(df) # done, go home.
}
#
# ngram.DF - Creates a data frame from a dict of term frequencies
# New version. Replace the one above.
#
ngram.DF <- function(x, encoded=TRUE, sep='::') {
# Creates a data frame from a dict of frequencies
if (encoded) {
k <- as.double(names(x)) # ngram code as key
sk <- sapply(k, dCodeNgram, # get n-1 ngrams
subngram=T, # to use as search keys
decode = F)
w <- sapply(k, dCodeNgram) # get a matrix with ngram words as rows
if (class(w) != 'matrix'){ # but check if it's a vector
n <- 1 # yep. it's an 1-gram
} else { # nope. bigram or greater ngram
n <- nrow(w) # number of words in ngram
}
if (n == 1) { # for just words add a single column
df <- data.frame(Key=k, # and init data frame
Count=x, # with no sub key
row.names=NULL,
stringsAsFactors = FALSE)
df <- data.frame(df, w, stringsAsFactors = FALSE) # add words
colnames(df)[3] <- 'W1' # and column name
} else { # for high orded ngrams
df <- data.frame(Key=k, # init data frame with sub key
Count=x,
Skey=sk,
row.names=NULL,
stringsAsFactors = FALSE)
for (i in 1:n){ # add columns for each word in the ngram
df <- data.frame(df, w[i,],
stringsAsFactors = FALSE)
colnames(df)[3+i] <- paste0('W',i) # add name to data frame column
}
}
} else { # ngrams are not encoded
k <- names(x) # get keys
lk <- strsplit(k, sep) # split words
w <- matrix(data=NA, nrow=length(x), # create matrix
ncol=length(lk[[1]]))
for (i in 1:length(lk[[1]])){ # unlist columns into matrix
w[,i] <- unlist(lapply(lk, function(x){x[i]}))
}
count <- x # init data frame with keys and freq
df <- data.frame(Key=k, Count=count,
row.names=NULL,
stringsAsFactors = FALSE)
for (i in 1:ncol(w)){ # add columns to df
col <- w[,i]
if (encoded) # ensure word as numeric if encoded
col <- as.numeric(as.character(w[,i]))
df <- data.frame(df, col, # add column to df
stringsAsFactors = FALSE)
colnames(df)[2+i] <- paste0('W',i) # give column a name
}
}
return(df) # done. go home.
}
#
repUnk <- function(x, vocab=WRDS, unkMark='<UNK>'){
# converts a string of words to a character vector with
# words not in vocabulary replaced by the UNK mark
y <- ngram(x, n=1)
change <- !(y %in% vocab)
y[change] <- unkMark
return(y)
}
nCode <- function(x, vocab=WRDS, unkMark='<UNK>'){
# encodes words in a string of words using a vocab hash table.
# returns a numeric vector with words in the same order as input.
# Unknown words are replaced by the UNK mark
y <- repUnk(x, vocab, unkMark)
z <- c()
for (i in 1:length(y)){
z <- append(z, which(y[i] == vocab))
}
return(z)
}
dCode <- function(x, vocab=WRDS){
# decodes words in a numeric vector using a vocab hash table.
# Returns a character vector.
z <- c()
for (i in 1:length(x)){
if (x[i] > 0) {
xd <- vocab[x[i]]
} else {
xd <- '<M>' # Mark up symbol
}
z <- append(z, xd)
}
return(z)
}
#
# nCode2 - Encodes / decodes a word vector using hash package's functions.
# Replaces functions nCode and dCode.
#
nCode2 <- function(x, vocab=WRDS_H, unkMark='<UNK>',
decode=FALSE, vocab_dec=WRDS_H_INV){
#
# Encodes or decodes a vector of words. If decode = TRUE,
# an inverted hash table must be specified as vocab.
# Returns a vector of encoded / decoded words.
#
if(!decode) # if encoding...
x <- ngram(x, 1) # convert to word vector
if (decode) {
vocab <- vocab_dec # use inverted hash table and...
x <- as.character(x) # convert to char for decoding
}
not_found <-!(has.key(x, vocab)) # identify words without code and..,
x[not_found] <- unkMark # replace unknown words with special tag
res <- sapply(x, function(y){vocab[[y]]}) # get codes
names(res) <- NULL # remove names
return(res) # and go home.
}
# for compatibility with old versions
nCode <- function(x, vocab=WRDS, unkMark='<UNK>')
return(nCode2(x, vocab=WRDS_H, unkMark='<UNK>'))
# a shorthand version for decoding
dCode <- function(x) {nCode2(x, decode=T)}
# skip.ngram - Creates bigrams within a specified window size
#
skip.ngram <- function(x, n=2, window=3, split=" ", sep="::"){
# Takes a vector of strings, size of window, split char and separator
# Returns a vector of bigrams within the specified window size
# recursive ngram generator
ngrm <- function(words, n, window, sep, ngrams){
# processes all ngrams with first word and calls itself
# check exit condition
if (length(words) < n) # no more n-grams, end of story
return(unlist(ngrams))
# process ngrams and call
pairs <- min(window, length(words) - 1) # number of pairs of ngrams to add
for (i in 1:pairs){ # append n-grams
ngrams <- append(ngrams,
list(paste(c(words[1], words[i+1]), collapse=sep)))
}
return(ngrm(words[2:(length(words))], n, window, sep, ngrams)) # play it again, Sam
}
# wrapper function
if(split == " ") # if split by whitespaces
split <- "\\s" # use it as split char but
x <- gsub(paste0(split,"+"), " ", x) # make sure there's only one between words
x <- gsub("^\\s+", "", x) # and none as first character
words <- unlist(strsplit(x,split = split)) # create vector of words
ngrams <- list() # list of ngrams
if (n < 2) # just return input words
return(words) # or empty vector
return(ngrm(words, n, window, sep, ngrams)) # not a trivial case. call generator.
}
#
# updt.DF - Updates ngram data frames
#
updt.DF <- function(x, y){
# Takes a term frequency data frame and a term freq vector.
# Returns a data frame with counts updated. Non existing keys are added.
# Update existing keys
for (i in 1:length(y)){
x$Count[x$Key == names(y)[i]] <- x$Count[x$Key == names(y)[i]] + y[i]
}
# Add non existing keys
toadd <- !(names(y) %in% x$Key)
totadded <- sum(toadd)
if (totadded > 0) {
x <- rbind(x, ngram.DF(y[toadd]))
cat('>>>', totadded, 'row(s) added to df. \n')
}
# Return updated data frame in order
return(x[order(x$Count, decreasing=T),])
}
#
# updt.DF - Updates ngram data frames - vectorized version -
#
updt.DF <- function(x, y, test=FALSE){
# Takes a ngram freq df of type nig_df (i=1,2,3...) and a term freq vector.
# Returns a data frame of the right type with counts updated and new keys added.
# to vectorize df update
yinKeys <- names(y) %in% x$Key # y entries already in x
xinKeys <- x$Key %in% names(y) # same from x point of view
# Update existing keys
x$Count[xinKeys] <- x$Count[xinKeys] + y[yinKeys] # increment existing counters
# Add non existing keys
tot2add <- sum(!yinKeys) # Nr of rows to add
if (tot2add > 0) { # rbind without rows result in error
x <- rbind(x, ngram.DF(y[!yinKeys]), row.names=NULL)
rownames(x) <- NULL
if (test)
cat('>>>', tot2add, 'row(s) added to df. \n') # inform nr of rows added
}
# Return updated data frame in order
return(list(added = tot2add,
updted = sum(yinKeys),
df = x[order(x$Key, decreasing=F),])) # return new df in key order
}
#
# updt.Vocab - Updates Vocabulary
#
updt.Vocab <- function(x, Vocab=WRDS, test=FALSE) {
#
# Takes a character vector and adds inexisting words to Vocab
#
words <- ngram(x, 1)
new_words <- !(words %in% Vocab)
strt <- length(Vocab) + 1
end <- strt + sum(new_words) - 1
if (end >= strt)
Vocab[strt:end] <- words[new_words]
if(test)
cat('>>>', sum(new_words), 'words added. \n')
return(Vocab)
}
#
# updt.Vocab - Using hash package...
#
updt.Vocab <- function(x, Vocab=WRDS_H, Vocab_inv=WRDS_H_INV, test=FALSE) {
#
# Takes a character vector of words and adds inexisting ones
# to Vocab abd to the inverted Vocab hast table.
# Returns the number of words actually added.
#
new_words <- !(has.key(x, Vocab)) # identify words to add
strt <- max(values(Vocab)) + 1 # compute range of new values
end <- strt + sum(new_words) - 1
if (end >= strt) { # add new words
Vocab[x[new_words]] <- strt:end # to Vocab
Vocab_inv[strt:end] <- x[new_words] # and inverted Vocab
}
if (test) # useful for testing
cat('>>> Words added:', paste(x[new_words], collapse='-'), '\n')
return(sum(new_words)) # Nr of words added
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/get_bdate.R
\name{get_date}
\alias{get_date}
\title{Returns a given individual's date of birth or death}
\usage{
get_date(x = NULL, df_ind = NULL, df_fam = NULL, var_id = "id",
var_date = c("bdate", "ddate", "mdate")[1])
}
\arguments{
\item{x}{Individual ID}
\item{df_ind}{A dataframe containing IDs and birth dates}
\item{var_id}{Variable name for ID (default is 'id')}
\item{var_date}{Variable name for birth date (default is 'bdate')}
}
\description{
This function returns the birth date of a given individual, if known.
}
\examples{
\dontrun{
df_ind <- get_exmpl_df()
df_ind$bdate <- sample(seq(as.Date("1774-12-31"), as.Date("1874-12-31"), 100), nrow(df_ind))
df_fam <- data.frame(idf = c(0,unique(df_ind$momid[df_ind$momid>0])), fall = "C")
evmat <- get_evmat(df_ind, df_fam)
my_id <- sample_kh(df_ind, df_fam)
get_age(my_id, paste(df_ind$bdate[df_ind$id == my_id]+21*365-25), evmat)
}
}
\keyword{kh}
\keyword{spouse}
|
/man/get_date.Rd
|
no_license
|
johow/kinlab
|
R
| false
| false
| 1,017
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/get_bdate.R
\name{get_date}
\alias{get_date}
\title{Returns a given individual's date of birth or death}
\usage{
get_date(x = NULL, df_ind = NULL, df_fam = NULL, var_id = "id",
var_date = c("bdate", "ddate", "mdate")[1])
}
\arguments{
\item{x}{Individual ID}
\item{df_ind}{A dataframe containing IDs and birth dates}
\item{var_id}{Variable name for ID (default is 'id')}
\item{var_date}{Variable name for birth date (default is 'bdate')}
}
\description{
This function returns the birth date of a given individual, if known.
}
\examples{
\dontrun{
df_ind <- get_exmpl_df()
df_ind$bdate <- sample(seq(as.Date("1774-12-31"), as.Date("1874-12-31"), 100), nrow(df_ind))
df_fam <- data.frame(idf = c(0,unique(df_ind$momid[df_ind$momid>0])), fall = "C")
evmat <- get_evmat(df_ind, df_fam)
my_id <- sample_kh(df_ind, df_fam)
get_age(my_id, paste(df_ind$bdate[df_ind$id == my_id]+21*365-25), evmat)
}
}
\keyword{kh}
\keyword{spouse}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/XGR.R
\name{XGR.iterate_overlap}
\alias{XGR.iterate_overlap}
\title{Check overlap with XGR annotations}
\usage{
XGR.iterate_overlap(
lib.selections = c("ENCODE_TFBS_ClusteredV3_CellTypes", "TFBS_Conserved",
"ReMap_PublicAndEncode_TFBS", "Uniform_TFBS"),
subset_DT,
save_path = F,
nCores = 4
)
}
\arguments{
\item{lib.selections}{Which XGR annotations to check overlap with.
For full list of libraries see:
\url{http://xgr.r-forge.r-project.org/#annotations-at-the-genomic-region-level}}
\item{subset_DT}{Data.frame with at least the following columns:
\describe{
\item{SNP}{SNP RSID}
\item{CHR}{chromosome}
\item{POS}{position}
}}
\item{save_path}{Save the results as a data.frame}
\item{nCores}{Multi-thread across libraries}
}
\description{
Automatically handles different file formats provided by XGR
(e.g. varying kinds of nested/unnested \code{GRanges}).
Then returns a \code{Granges} object with only the XGR annotation ranges
that overlap with the SNPs in \code{subset_DT}.
The \code{GRanges} merges hits from \code{subset_DT}.
}
\examples{
\dontrun{
data("BST1")
finemap_DT <- BST1
gr.hits <- XGR.iterate_overlap(lib.selections=c("ENCODE_TFBS_ClusteredV3_CellTypes"), subset_DT=finemap_DT, nCores=1)
}
}
\seealso{
Other XGR:
\code{\link{DT_to_GRanges}()},
\code{\link{GRanges_to_BED}()},
\code{\link{XGR.download_and_standardize}()},
\code{\link{XGR.enrichment_bootstrap}()},
\code{\link{XGR.enrichment_plot}()},
\code{\link{XGR.enrichment}()},
\code{\link{XGR.filter_assays}()},
\code{\link{XGR.filter_sources}()},
\code{\link{XGR.import_annotations}()},
\code{\link{XGR.iterate_enrichment}()},
\code{\link{XGR.merge_and_process}()},
\code{\link{XGR.plot_enrichment}()},
\code{\link{XGR.plot_peaks}()},
\code{\link{XGR.prepare_foreground_background}()}
}
\concept{XGR}
\keyword{internal}
|
/man/XGR.iterate_overlap.Rd
|
permissive
|
UKDRI/echolocatoR
|
R
| false
| true
| 1,893
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/XGR.R
\name{XGR.iterate_overlap}
\alias{XGR.iterate_overlap}
\title{Check overlap with XGR annotations}
\usage{
XGR.iterate_overlap(
lib.selections = c("ENCODE_TFBS_ClusteredV3_CellTypes", "TFBS_Conserved",
"ReMap_PublicAndEncode_TFBS", "Uniform_TFBS"),
subset_DT,
save_path = F,
nCores = 4
)
}
\arguments{
\item{lib.selections}{Which XGR annotations to check overlap with.
For full list of libraries see:
\url{http://xgr.r-forge.r-project.org/#annotations-at-the-genomic-region-level}}
\item{subset_DT}{Data.frame with at least the following columns:
\describe{
\item{SNP}{SNP RSID}
\item{CHR}{chromosome}
\item{POS}{position}
}}
\item{save_path}{Save the results as a data.frame}
\item{nCores}{Multi-thread across libraries}
}
\description{
Automatically handles different file formats provided by XGR
(e.g. varying kinds of nested/unnested \code{GRanges}).
Then returns a \code{Granges} object with only the XGR annotation ranges
that overlap with the SNPs in \code{subset_DT}.
The \code{GRanges} merges hits from \code{subset_DT}.
}
\examples{
\dontrun{
data("BST1")
finemap_DT <- BST1
gr.hits <- XGR.iterate_overlap(lib.selections=c("ENCODE_TFBS_ClusteredV3_CellTypes"), subset_DT=finemap_DT, nCores=1)
}
}
\seealso{
Other XGR:
\code{\link{DT_to_GRanges}()},
\code{\link{GRanges_to_BED}()},
\code{\link{XGR.download_and_standardize}()},
\code{\link{XGR.enrichment_bootstrap}()},
\code{\link{XGR.enrichment_plot}()},
\code{\link{XGR.enrichment}()},
\code{\link{XGR.filter_assays}()},
\code{\link{XGR.filter_sources}()},
\code{\link{XGR.import_annotations}()},
\code{\link{XGR.iterate_enrichment}()},
\code{\link{XGR.merge_and_process}()},
\code{\link{XGR.plot_enrichment}()},
\code{\link{XGR.plot_peaks}()},
\code{\link{XGR.prepare_foreground_background}()}
}
\concept{XGR}
\keyword{internal}
|
# Class providing the Module object
#
# Here is a more detailed sentence
# about the Module class
#
# @docType class
# @keywords biomarker classification
# @return An object of class \code{Module}
# @format \code{\link{R6Class}} object
#
# @field label the name of the Module
# @field tasks a list of active Task objects
# @field inactive a list of inactive Task objects
#
# @section Methods:
# \describe{
# \item{\code{addTask(label,method,datatype,parameters,control,libraries)}}{Add a Task to the Module}
# \item{\code{deleteTask(label)}}{Remove a Task from the Module}
# \item{\code{run()}}{Execute a Task}
# \item{\code{activate(label)}}{Activate a Task}
# \item{\code{deactivate(label)}}{Deactivate a Task}
# \item{\code{summary()}}{Print out a summary of the Tasks in the Module}
# }
#
# @keywords internal
# define the Module class
Module <- R6::R6Class("Module",
public = list(
#================#
# public members #
#================#
label = NA, # the name of the Module
tasks = list(), # a list of Task objects
inactive =list(), # a list of inactive Task objects
#================#
# public methods #
#================#
# global function to add a task
addTask = function(label=NULL,method=NULL,parameters=NULL,libraries=NULL,control=NULL){
# create a new Task object
# task <- Task$new(label,method,datatype,parameters,libraries,control)
task <- Task$new(label,method,parameters,libraries,control)
# validate the object's parameters
private$validate(task)
# if there are no errors, the above function will execute silently
self$tasks[[label]] <- task
# update the active task list
private$active[[label]] <- TRUE
# return self
invisible(self)
},
# global function to delete a task
deleteTask = function(label=NULL){
# check that label is not null
if(is.null(label)){
stop("argument 'label' cannot be NULL")
}
# check that label is a character string
if(!is.character(label)){
stop("argument 'label' must be of class character")
}
# check that all values provided to label are true labels of tasks
if(!all(label%in%c(names(self$tasks),names(self$inactive)))){
not.labels <- label[which(!(label%in%c(names(self$tasks),names(self$inactive))))]
if(length(not.labels)==1){
msg <- paste0(not.labels," is not a valid task label")
} else {
msg <- paste0(paste(not.labels,collapse=", ")," are not valid task labels")
}
stop(msg)
}
# remove the task from the module
self$tasks[label] <- NULL
self$inactive[label] <- NULL
# remove the task from the active list
private$active[label] <- NULL
# if there are no more tasks in the module, restore to default
if(length(self$tasks)==0){
self$tasks <- list()
}
# return self
invisible(self)
},
# global function to activate a task
activateTask = function(label){
# check that label is not null
if(is.null(label)){
stop("argument 'label' cannot be NULL")
}
# check that label is a character string
if(!is.character(label)){
stop("argument 'label' must be of class character")
}
# check that all values provided to label are true labels of tasks
if(!all(label%in%c(names(self$tasks),names(self$inactive)))){
not.labels <- label[which(!(label%in%c(names(self$tasks),names(self$inactive))))]
if(length(not.labels)==1){
msg <- paste0(not.labels," is not a valid task label")
} else {
msg <- paste0(paste(not.labels,collapse=", ")," are not valid task labels")
}
stop(msg)
}
# check if any of the tasks are already inactive, if so, issue warning
if(any(label%in%names(self$tasks))){
idx <- which(label%in%names(self$tasks))
already.active <- label[idx]
warning(paste0("the following tasks are already active: ",paste0("'",already.active,"'",collapse=", ")))
label <- label[-idx]
}
if(length(label)>0){
# activate the task(s)
private$active[label] <- TRUE
# move the task from the active task member to the inactive task member
for(i in 1:length(label)){
self$tasks[[label[i]]] <- self$inactive[[label[i]]]
self$inactive[[label[i]]] <- NULL
}
}
# return self
invisible(self)
},
# global function to deactivate a task
deactivateTask = function(label){
# check that label is not null
if(is.null(label)){
stop("argument 'label' cannot be NULL")
}
# check that label is a character string
if(!is.character(label)){
stop("argument 'label' must be of class character")
}
# check that all values provided to label are true labels of tasks
if(!all(label%in%c(names(self$tasks),names(self$inactive)))){
not.labels <- label[which(!(label%in%names(self$tasks)))]
if(length(not.labels)==1){
msg <- paste0("'",not.labels,"' is not a valid task label")
} else {
msg <- paste0(paste("'",not.labels,collapse="', "),"' are not valid task labels")
}
stop(msg)
}
# check if any of the tasks are already inactive, if so, issue warning
if(any(label%in%names(self$inactive))){
idx <- which(label%in%names(self$inactive))
already.inactive <- label[idx]
warning(paste0("the following tasks are already inactive: ",paste0("'",already.inactive,"'",collapse=", ")))
label <- label[-idx]
}
if(length(label)>0){
# deactivate the task(s)
private$active[label] <- FALSE
# move the task from the active task member to the inactive task member
for(i in 1:length(label)){
self$inactive[[label[i]]] <- self$tasks[[label[i]]]
self$tasks[[label[i]]] <- NULL
}
}
# return self
invisible(self)
},
getActive = function(){
return(private$active)
}
),
private = list(
#=================#
# private members #
#=================#
class = NA, # the child class of the Module
active = list(), # a Boolean vector of length = number of Tasks
#=================#
# private methods #
#=================#
# global placeholder for validate function. This method is reestablished in submodules
validate = function(task){
invisible(self)
}
),
active = list(
summary = function(){ # a function that will print out a summary of the Tasks in the Module
invisible(self)
},
getClass = function(){
return(private$class)
}
),
lock_class = FALSE
)
|
/R/module-class.R
|
no_license
|
jperezrogers/rabbit
|
R
| false
| false
| 7,153
|
r
|
# Class providing the Module object
#
# Here is a more detailed sentence
# about the Module class
#
# @docType class
# @keywords biomarker classification
# @return An object of class \code{Module}
# @format \code{\link{R6Class}} object
#
# @field label the name of the Module
# @field tasks a list of active Task objects
# @field inactive a list of inactive Task objects
#
# @section Methods:
# \describe{
# \item{\code{addTask(label,method,datatype,parameters,control,libraries)}}{Add a Task to the Module}
# \item{\code{deleteTask(label)}}{Remove a Task from the Module}
# \item{\code{run()}}{Execute a Task}
# \item{\code{activate(label)}}{Activate a Task}
# \item{\code{deactivate(label)}}{Deactivate a Task}
# \item{\code{summary()}}{Print out a summary of the Tasks in the Module}
# }
#
# @keywords internal
# define the Module class
Module <- R6::R6Class("Module",
public = list(
#================#
# public members #
#================#
label = NA, # the name of the Module
tasks = list(), # a list of Task objects
inactive =list(), # a list of inactive Task objects
#================#
# public methods #
#================#
# global function to add a task
addTask = function(label=NULL,method=NULL,parameters=NULL,libraries=NULL,control=NULL){
# create a new Task object
# task <- Task$new(label,method,datatype,parameters,libraries,control)
task <- Task$new(label,method,parameters,libraries,control)
# validate the object's parameters
private$validate(task)
# if there are no errors, the above function will execute silently
self$tasks[[label]] <- task
# update the active task list
private$active[[label]] <- TRUE
# return self
invisible(self)
},
# global function to delete a task
deleteTask = function(label=NULL){
# check that label is not null
if(is.null(label)){
stop("argument 'label' cannot be NULL")
}
# check that label is a character string
if(!is.character(label)){
stop("argument 'label' must be of class character")
}
# check that all values provided to label are true labels of tasks
if(!all(label%in%c(names(self$tasks),names(self$inactive)))){
not.labels <- label[which(!(label%in%c(names(self$tasks),names(self$inactive))))]
if(length(not.labels)==1){
msg <- paste0(not.labels," is not a valid task label")
} else {
msg <- paste0(paste(not.labels,collapse=", ")," are not valid task labels")
}
stop(msg)
}
# remove the task from the module
self$tasks[label] <- NULL
self$inactive[label] <- NULL
# remove the task from the active list
private$active[label] <- NULL
# if there are no more tasks in the module, restore to default
if(length(self$tasks)==0){
self$tasks <- list()
}
# return self
invisible(self)
},
# global function to activate a task
activateTask = function(label){
# check that label is not null
if(is.null(label)){
stop("argument 'label' cannot be NULL")
}
# check that label is a character string
if(!is.character(label)){
stop("argument 'label' must be of class character")
}
# check that all values provided to label are true labels of tasks
if(!all(label%in%c(names(self$tasks),names(self$inactive)))){
not.labels <- label[which(!(label%in%c(names(self$tasks),names(self$inactive))))]
if(length(not.labels)==1){
msg <- paste0(not.labels," is not a valid task label")
} else {
msg <- paste0(paste(not.labels,collapse=", ")," are not valid task labels")
}
stop(msg)
}
# check if any of the tasks are already inactive, if so, issue warning
if(any(label%in%names(self$tasks))){
idx <- which(label%in%names(self$tasks))
already.active <- label[idx]
warning(paste0("the following tasks are already active: ",paste0("'",already.active,"'",collapse=", ")))
label <- label[-idx]
}
if(length(label)>0){
# activate the task(s)
private$active[label] <- TRUE
# move the task from the active task member to the inactive task member
for(i in 1:length(label)){
self$tasks[[label[i]]] <- self$inactive[[label[i]]]
self$inactive[[label[i]]] <- NULL
}
}
# return self
invisible(self)
},
# global function to deactivate a task
deactivateTask = function(label){
# check that label is not null
if(is.null(label)){
stop("argument 'label' cannot be NULL")
}
# check that label is a character string
if(!is.character(label)){
stop("argument 'label' must be of class character")
}
# check that all values provided to label are true labels of tasks
if(!all(label%in%c(names(self$tasks),names(self$inactive)))){
not.labels <- label[which(!(label%in%names(self$tasks)))]
if(length(not.labels)==1){
msg <- paste0("'",not.labels,"' is not a valid task label")
} else {
msg <- paste0(paste("'",not.labels,collapse="', "),"' are not valid task labels")
}
stop(msg)
}
# check if any of the tasks are already inactive, if so, issue warning
if(any(label%in%names(self$inactive))){
idx <- which(label%in%names(self$inactive))
already.inactive <- label[idx]
warning(paste0("the following tasks are already inactive: ",paste0("'",already.inactive,"'",collapse=", ")))
label <- label[-idx]
}
if(length(label)>0){
# deactivate the task(s)
private$active[label] <- FALSE
# move the task from the active task member to the inactive task member
for(i in 1:length(label)){
self$inactive[[label[i]]] <- self$tasks[[label[i]]]
self$tasks[[label[i]]] <- NULL
}
}
# return self
invisible(self)
},
getActive = function(){
return(private$active)
}
),
private = list(
#=================#
# private members #
#=================#
class = NA, # the child class of the Module
active = list(), # a Boolean vector of length = number of Tasks
#=================#
# private methods #
#=================#
# global placeholder for validate function. This method is reestablished in submodules
validate = function(task){
invisible(self)
}
),
active = list(
summary = function(){ # a function that will print out a summary of the Tasks in the Module
invisible(self)
},
getClass = function(){
return(private$class)
}
),
lock_class = FALSE
)
|
raldm <- function(sigmas, p) {
#
# Generates random samples from ALD(alpha) multiple
#
unique_sigmas <- unique(sigmas)
error <- vector(length = length(sigmas))
for (sigma in unique_sigmas) {
dim <- sum(sigmas == sigma)
simulations <- ralp(dim, sigma = sigma, p)
error[sigmas == sigma] <- simulations
}
return(error)
}
|
/Code/raldm.R
|
no_license
|
plataformapreventiva/MDI.Scripts
|
R
| false
| false
| 352
|
r
|
raldm <- function(sigmas, p) {
#
# Generates random samples from ALD(alpha) multiple
#
unique_sigmas <- unique(sigmas)
error <- vector(length = length(sigmas))
for (sigma in unique_sigmas) {
dim <- sum(sigmas == sigma)
simulations <- ralp(dim, sigma = sigma, p)
error[sigmas == sigma] <- simulations
}
return(error)
}
|
library(dplyr)
library(ggplot2)
library(ggpubr)
library(tidyr)
library(viridis)
base_dir = here::here()
#source(paste0(base_dir, "/R/bm-sim-data.R"))
#source(paste0(base_dir, "/R/helper.R"))
sysfonts::font_add("Gyre Bonum",
regular = "/usr/share/texmf-dist/fonts/opentype/public/tex-gyre/texgyrebonum-regular.otf",
bold = "/usr/share/texmf-dist/fonts/opentype/public/tex-gyre/texgyrebonum-bold.otf")
showtext::showtext_auto()
font_scale = 6
## Figure 1:
## =====================================
# Data:
# - `df_plt` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-1-agbm-restart-iters.Rda"))
agbm_iters = which.min(df_plt[df_plt$method == "ACWB", "Risk"])
iter = sum(df_plt$method == "ACWB")
# Plot
gg = ggplot(
df_plt %>% filter(type == "oob"),
aes(x = Iteration, y = Risk, color = method)) +
geom_vline(
xintercept = agbm_iters,
color = "dark red",
alpha = 0.5,
linetype = "dashed",
size = 1.3) +
geom_line(size = 1.6) +
xlim(0, iter) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
scale_color_viridis(discrete = TRUE) +
xlab("Iterations") +
ylab("Empirical risk on\ntest data") +
labs(color = "Algorithm") +
annotate("text",
x = agbm_iters,
y = max(df_plt$Risk),
label = "Optimal stopping ACWB",
color = "dark red",
hjust = -0.1,
size = 8)
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-1-optim_emp_risk.pdf",
width = dinA4width * 2/3 * 0.5,
height = dinA4width * 2/3 * 0.7 * 0.5,
units = "mm")
## Figure 2:
## =====================================
# Data:
# - `ll_feats` list with
# $ .. `categorical` data.frame
# $ .. `numeric` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-2-features-viz.Rda"))
## NUMERIC
gg = ggplot(data = ll_feats$numeric, aes(x = x, y = y)) +
geom_line(size = 1.2) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
ylab(expression(eta[j])) +
facet_wrap(. ~ feat, scales = "free", ncol = 3)
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-2-fe-numeric.pdf",
width = dinA4width * 2/3 * 0.5,
height = dinA4width * 2/3 * 0.5,
units = "mm")
## CATEGORICAL
gg = ggplot(data = ll_feats$categorical, aes(x = param.cls, y = param.means)) +
geom_boxplot() +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
#axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text.x = element_blank(),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
ylab(expression(paste("Group means ", tau[j]))) +
xlab("Group") +
facet_wrap(. ~ feat, scales = "free", ncol = 3)
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-2-fe-cat.pdf",
width = dinA4width * 2/3 * 0.5,
height = dinA4width * 2/3 * 0.5,
units = "mm")
## Figure 3:
## =====================================
# Data:
# - `df_plt_mem` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-3-binning-memory.Rda"))
# - `df_plt_run` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-3-binning-runtime.Rda"))
## MEMORY
gg = ggplot(
data = df_plt_mem,
aes(x = nrows, y = rel_mem, color = ptotal, group = paste0(ncols, ncolsnoise))) +
geom_hline(
yintercept = 1,
color = "dark red",
lty = 2) +
#geom_line() +
geom_smooth(se = FALSE, alpha = 0.7) +
geom_point(size = 10, alpha = 0.7) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
scale_x_continuous(
breaks = sort(unique(df_plt_mem$nrows)),
trans = "log10") +
scale_y_continuous(breaks = c(1, 2, 4, 6)) +
scale_color_viridis(discrete = TRUE) +
xlab("Number of Rows\n(log10 Scale)") +
ylab(expression(atop("Memory improvement\n", paste(Mem["No Binning"], "/", Mem["Binning"], sep = "")))) +
labs(color = "Number of\nFeatures") +
annotate("text",
x = max(df_plt_mem$nrows),
y = 1,
label = "Baseline (used memory is equal)",
color = "dark red",
vjust = 1.5,
hjust = 1,
size = 1.5 * font_scale)
dinA4width = 210 * font_scale
scale_fac = 1/2
ggsave(
plot = gg,
filename = "figures/fig-3-binning-memory.pdf",
width = dinA4width * scale_fac,
height = dinA4width * scale_fac * 0.7,
units = "mm")
## RUNTIME:
df_plt_run = df_plt_run %>%
filter(rel_time < 10, rel_time > 2, phase == "Fitting") %>%
group_by(nrows, ptotal) %>%
summarize(med = median(rel_time), min = min(rel_time), max = max(rel_time))
dodge_width = 0.25
gg = ggplot() +
geom_hline(
yintercept = 1,
lty = 2,
col = "dark red") +
geom_point(
data = df_plt_run,
aes(x = nrows, y = med, color = as.factor(ptotal)),
size = 10,
alpha = 0.7,
position = position_dodge(width = dodge_width)) +
geom_errorbar(
data = df_plt_run,
aes(x = nrows, ymax = max, ymin = min, color = as.factor(ptotal)),
na.rm = TRUE,
position = position_dodge(width = dodge_width),
size = 1.3) +
#geom_violin(
#data = df_plt_run ,
#aes(x = as.factor(nrows), y = rel_time, fill = as.factor(ptotal), color = as.factor(ptotal)),
#alpha = 0.2,
#show.legend = FALSE) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale),
panel.grid.major.x = element_blank()
) +
scale_color_viridis(discrete=TRUE) +
scale_fill_viridis(discrete=TRUE) +
xlab("Number of Rows\n(log10 Scale)") +
ylab(expression(atop("Speedup\n", paste(Time["No Binning"], "/", Time["Binning"], sep = "")))) +
labs(color = "Number of\nFeatures", fill = "Number of\nFeatures") +
scale_y_continuous(breaks = c(1, 2, 4, 6)) +
#scale_x_continuous(trans = "log10")# +
scale_x_continuous(
breaks = sort(unique(df_plt_mem$nrows)),
trans = "log10") +
annotate("text",
x = max(df_plt_mem$nrows),
y = 1,
label = "Baseline (runtime is equal)",
color = "dark red",
vjust = 1.5,
hjust = 1,
size = 1.5 * font_scale)
#facet_grid(. ~ factor(phase, levels = c("Initialization", "Fitting", "Initialization + Fitting")), scales = "free_y")
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-3-binning-runtime.pdf",
width = dinA4width * scale_fac,
height = dinA4width * 0.7 * scale_fac,
units = "mm")
## Figure 3:
## =====================================
seed = 31415
n = 10000
p = 4
pnoise = 2
sn_ratio = 0.4
set.seed(seed)
dat = simData(n, p, pnoise)
dat_noise = dat$data
set.seed(seed)
dat_noise$y = rnorm(n = n, mean = dat_noise$y, sd = sd(dat_noise$y) / sn_ratio)
library(compboost)
set.seed(seed)
cboost = boostSplines(data = dat_noise, target = "y", iterations = 10000L, learning_rate = 0.01,
loss = LossQuadratic$new(), stop_args = list(eps_for_break = 0, patience = 3L), oob_fraction = 0.3,
df = 7)
set.seed(seed)
cboost_bin = boostSplines(data = dat_noise, target = "y", iterations = 10000L, learning_rate = 0.01,
loss = LossQuadratic$new(), stop_args = list(eps_for_break = 0, patience = 3L), oob_fraction = 0.3,
bin_root = 2, df = 7)
ndata = 1000L
dat_idx = as.integer(seq(1, n, len = ndata))
feat = colnames(dat$data)[grepl(pattern = "x", x = colnames(dat$data))]
bls = paste0(feat, "_spline")
coef_names = c("coef_binning", "coef_nobinning")
coefs = list(coef_binning = cboost_bin$getEstimatedCoef(), coef_nobinning = cboost$getEstimatedCoef())
out = list()
for(bl in bls) {
bl_nbr = as.numeric(gsub("\\D", "", bl))
x = dat$data[[paste0("x", bl_nbr)]][dat_idx]
y = dat$sim_poly[[bl_nbr]]$y[dat_idx]
df_temp = data.frame(x = x, truth = y)
knots = compboostSplines::createKnots(values = x, n_knots = 20, degree = 3)
basis = compboostSplines::createSplineBasis(values = x, degree = 3, knots = knots)
for (cn in coef_names) {
params = coefs[[cn]]
if (bl %in% names(params)) {
param = params[[bl]]
pred = basis %*% param
df_pred = data.frame(pred)
} else {
df_pred = data.frame(rep(0, ndata))
}
colnames(df_pred) = cn
df_temp = cbind(df_temp, df_pred, bl = bl)
}
out[[bl]] = df_temp
}
ll_fe = lapply(out, function (df) {
df %>%
pivot_longer(cols = all_of(c(coef_names, "truth")), names_to = "method", values_to = "effect") %>%
group_by(method) %>%
mutate(y = effect - mean(effect)) %>%
arrange(method, x)
})
df_fe = do.call(rbind, ll_fe)
feat_id = as.integer(gsub("\\D", "", df_fe$bl))
feat = paste0("Feature ", feat_id)
df_fe$feat = factor(feat, levels = paste0("Feature ", sort(unique(feat_id))))
df_fe$line = df_fe$method
df_fe$line[df_fe$line == "truth"] = "Truth"
df_fe$line[df_fe$line == "coef_binning"] = "Binning"
df_fe$line[df_fe$line == "coef_nobinning"] = "No Binning"
df_fe$line = factor(df_fe$line)
df_fe$line = ordered(df_fe$line, c("Truth", "No Binning", "Binning"))
df_fe$linetype = ifelse(df_fe$line == "Binning", "solid", "dashed")
df_area = cbind(
df_fe %>% select(x, bl, method, y, feat, line) %>% filter(line == "No Binning"),
df_fe %>% ungroup() %>% mutate(y_t = y, line_t = line) %>% select(y_t, line_t) %>% filter(line_t == "Truth"))
gg = ggplot() +
geom_ribbon(
data = df_area,
aes(ymin = y, ymax = y_t, x = x),
fill = "dark red",
alpha = 0.2) +
geom_line(
data = df_fe,
aes(x = x, y = y, color = line, linetype = linetype),
lwd = 2) +
#scale_color_viridis(discrete = TRUE) +
theme_minimal(base_family = "Gyre Bonum") +
scale_color_viridis(discrete = TRUE) +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale),
panel.grid.major.x = element_blank()) +
xlab("x") +
ylab("Partial effect") +
labs(color = "", linetype = "") +
#scale_x_continuous(breaks = NULL) +
scale_linetype(guide = "none") +
facet_wrap(. ~ feat, scales = "free_x")#, scales = "free")
gg
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-4-binning-fe.pdf",
width = dinA4width * 1/2,
height = dinA4width * 0.7 * 1/2,
units = "mm")
## Figure 5:
## =====================================
# Data:
# - `df_imse` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-5-binning-imse.Rda"))
df_imse$method_n = as.factor(df_imse$method_n)
levels(df_imse$method_n) = c("Binning", "Binning 4", "No binning")
gg = ggplot(
data = df_imse %>% filter(method_n != "Binning 4"),
aes(x = as.factor(nrows), y = mimse, fill = method_n, color = method_n)) +
geom_boxplot(alpha = 0.2) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale),
panel.grid.major.x = element_blank()
) +
scale_color_viridis(discrete = TRUE) +
scale_fill_viridis(discrete = TRUE) +
xlab("Number of rows\n(log10 scale)") +
ylab("MISE") +
labs(color = "", fill = "") +
facet_grid(paste0("SNR = ", sn_ratio) ~ ., scales = "free_y")
gg
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-5-binning-imse.pdf",
width = dinA4width * 1/2,
height = dinA4width * 1/2 * 0.7,
units = "mm")
## Figure 6:
## =====================================
# Data:
# - `df_plt_mem` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-6-cat-memory.Rda"))
# - `df_plt_run` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-6-cat-runtime.Rda"))
font_scale = 6
## MEMORY
df_plt_mem = df_cat_memory %>%
filter(method != "linear") %>%
group_by(nrows, ncols, ncolsnoise, method, nclasses) %>%
summarize(mem = median(mem)) %>%
mutate(ptotal = ncols + ncolsnoise) %>%
mutate(method = as.factor(method)) %>%
filter(ptotal %in% c(10, 40, 75, 100, 150, 300))
levels(df_plt_mem$method) = list(Binary = "binary", Ridge = "ridge")
df_plt_mem$ptotal = ordered(df_plt_mem$ptotal, levels = sort(unique(df_plt_mem$ptotal)))
df_plt_mem$nclasses = factor(paste0("# Classes: ", df_plt_mem$nclasses), levels = paste0("# Classes: ", c(5, 10, 20)))
gg = ggplot() +
geom_smooth(
data = df_plt_mem,
aes(x = nrows, y = mem, color = ptotal, group = paste0(ncols, ncolsnoise, nclasses)),
se = FALSE) +
geom_point(
data = df_plt_mem,
aes(x = nrows, y = mem, color = ptotal, group = paste0(ncols, ncolsnoise)),
size = 6,
alpha = 0.5) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
scale_x_continuous(breaks = sort(unique(df_plt_mem$nrows)), trans = "log10") +
scale_color_viridis(discrete = TRUE) +
xlab("Number of rows\n(log10 Scale)") +
ylab("Used memory in MB") +
labs(color = "Number of\nreatures") +
coord_cartesian(clip = 'off') +
facet_grid(nclasses ~ method)#, scales= "free_y")
gg
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "../../paper-figures/figures/fig-6-cat-memory.pdf",
#filename = "figures/fig-6-cat-memory.pdf",
width = dinA4width * 1/2,
height = dinA4width * 1/2,
units = "mm")
## RUNTIME
df_plt_run = df_cat_runtime %>%
filter(method != "linear") %>%
#group_by(nrows, ncols, ncolsnoise, method, nclasses) %>%
#summarize(mem = median(time)) %>%
mutate(ptotal = ncols + ncolsnoise) %>%
mutate(method = as.factor(method)) %>%
group_by(nrows, ptotal, method, nclasses) %>%
summarize(med = median(time), min = min(time), max = max(time)) %>%
filter(ptotal %in% c(10, 40, 75, 100, 150, 300))
levels(df_plt_run$method) = list(Binary = "binary", Ridge = "ridge")
df_plt_run$ptotal = ordered(df_plt_run$ptotal, levels = sort(unique(df_plt_run$ptotal)))
df_plt_run$nclasses = factor(paste0("# Classes: ", df_plt_run$nclasses), levels = paste0("# Classes: ", c(5, 10, 20)))
dodge_width = 0.25
gg = ggplot() +
geom_point(
data = df_plt_run,
aes(x = nrows, y = med / 60, color = as.factor(ptotal)),
size = 6,
alpha = 0.5,
position = position_dodge(width = dodge_width)) +
geom_errorbar(
data = df_plt_run,
aes(x = nrows, ymax = max / 60, ymin = min / 60, color = as.factor(ptotal)),
na.rm = TRUE,
position = position_dodge(width = dodge_width),
size = 1.3) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale),
panel.grid.major.x = element_blank()
) +
scale_color_viridis(discrete=TRUE) +
scale_fill_viridis(discrete=TRUE) +
xlab("Number of rows\n(log10 Scale)") +
ylab("Runtime in minutes") +
labs(color = "Number of\nfeatures", fill = "Number of\nfeatures") +
scale_x_continuous(
breaks = sort(unique(df_plt_mem$nrows)),
trans = "log10") +
facet_grid(nclasses ~ method)#, scales= "free_y")
dinA4width = 210 * font_scale
ggsave(
plot = gg,
#filename = "figures/fig-6-cat-runtime.pdf",
filename = "../../paper-figures/figures/fig-6-cat-runtime.pdf",
width = dinA4width * 1/2,
height = dinA4width * 1/2,
units = "mm")
## Figure 7:
## =====================================
# Data:
# - `ll_noise` list with
# $ .. `density` data.frame
# $ .. `cat_sel` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-7-cat-noise.Rda"))
gg = ggplot(
mapping = aes(
x = rel_nwrongnotselected,
y = rel_notselected,
shape = method,
color = method,
fill = method)) +
geom_polygon(
data = ll_noise$density,
alpha = 0.2,
size = 0.1) +
geom_point(data = ll_noise$cat_sel) +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) +
xlab("FNR") +
ylab("TNR") +
labs(fill = "", color = "", shape = "") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
xlim(min(ll_noise$density$rel_nwrongnotselected), max(ll_noise$denstiy$rel_nwrongnotselected)) +
ylim(min(ll_noise$density$rel_notselected), max(ll_noise$density$rel_notselected)) +
scale_x_continuous(breaks = seq(0, 1, 0.2)) +
scale_y_continuous(breaks = seq(0, 1, 0.2)) +
facet_grid(sn_ratiof ~ .)#, scales = "free_y")
gg
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-7-cat-noise.pdf",
width = dinA4width * 2/3 * 0.7,
height = dinA4width * 2/3 * 0.5,
units = "mm")
## Figure 8:
## =====================================
# Data:
# - `df_cat_bp` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-8-cat-mse.Rda"))
font_scale = 6
gg = ggplot(df_cat_bp, aes(x = mse, y = value, fill = method, color = method)) +
geom_boxplot(alpha = 0.2) +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) +
xlab("") +
ylab("MSE") +
labs(fill = "", color = "", shape = "") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
ylim(0, 40) +
facet_grid(sn_ratiof ~ .) #, scales = "free_y")
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-8-cat-mse.pdf",
width = dinA4width * 2/3 * 0.7,
height = dinA4width * 2/3 * 0.5,
units = "mm")
## Figure 9:
## =====================================
## MEMORY:
load("rda/fig-9-acwb-mem.Rda")
gg = ggplot(
data = df_plt_mem %>% filter(ptotal %in% c(10, 30, 75, 100, 150, 300)),
aes(x = nrows, y = rel_mem, color = as.factor(ptotal), group = paste0(ncols, ncolsnoise))) +
geom_hline(
yintercept = 1,
color = "dark red",
lty = 2) +
#geom_line() +
geom_smooth(se = FALSE, alpha = 0.7) +
geom_point(size = 10, alpha = 0.7) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
scale_x_continuous(
breaks = sort(unique(df_plt_mem$nrows)),
trans = "log10") +
scale_color_viridis(discrete = TRUE) +
xlab("Number of Rows\n(log10 Scale)") +
ylab(expression(atop("Memory improvement\n", paste(Mem["CWB"], "/", Mem["ACWB"], sep = "")))) +
labs(color = "Number of\nFeatures") +
annotate("text",
x = max(df_plt_mem$nrows),
y = 1,
label = "Baseline (used memory is equal)",
color = "dark red",
vjust = 1.5,
hjust = 1,
size = 1.5 * font_scale)
dinA4width = 210 * font_scale
scale_fac = 1/2
ggsave(
plot = gg,
filename = "figures/fig-9-acwb-memory.pdf",
width = dinA4width * scale_fac,
height = dinA4width * scale_fac * 0.7,
units = "mm")
## RUNTIME:
load("rda/fig-9-acwb-run.Rda")
df_plt_run = df_plt_run %>%
filter(rel_time < 0.7) %>%
group_by(nrows, ptotal) %>%
summarize(med = median(rel_time), min = min(rel_time), max = max(rel_time)) %>%
filter(ptotal %in% c(10, 30, 75, 100, 150, 300))
dodge_width = 0.25
gg = ggplot() +
geom_hline(
yintercept = 1,
lty = 2,
col = "dark red") +
geom_point(
data = df_plt_run,
aes(x = nrows, y = med, color = as.factor(ptotal)),
size = 10,
alpha = 0.7,
position = position_dodge(width = dodge_width)) +
geom_errorbar(
data = df_plt_run,
aes(x = nrows, ymax = max, ymin = min, color = as.factor(ptotal)),
na.rm = TRUE,
position = position_dodge(width = dodge_width),
size = 1.3) +
#geom_violin(
#data = df_plt_run ,
#aes(x = as.factor(nrows), y = rel_time, fill = as.factor(ptotal), color = as.factor(ptotal)),
#alpha = 0.2,
#show.legend = FALSE) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale),
panel.grid.major.x = element_blank()
) +
scale_color_viridis(discrete=TRUE) +
scale_fill_viridis(discrete=TRUE) +
xlab("Number of Rows\n(log10 Scale)") +
ylab(expression(atop("Speedup\n", paste(Time["No Binning"], "/", Time["Binning"], sep = "")))) +
labs(color = "Number of\nFeatures", fill = "Number of\nFeatures") +
scale_x_continuous(
breaks = sort(unique(df_plt_mem$nrows)),
trans = "log10") +
annotate("text",
x = max(df_plt_mem$nrows),
y = 1,
label = "Baseline (runtime is equal)",
vjust = 1.5,
color = "dark red",
hjust = 1,
size = 1.5 * font_scale) +
ylim(0.25, 1)
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-9-acwb-runtime.pdf",
width = dinA4width * scale_fac,
height = dinA4width * 0.7 * scale_fac,
units = "mm")
|
/paper-figures/create-figures.R
|
no_license
|
schalkdaniel/bm-CompAspCboost
|
R
| false
| false
| 24,965
|
r
|
library(dplyr)
library(ggplot2)
library(ggpubr)
library(tidyr)
library(viridis)
base_dir = here::here()
#source(paste0(base_dir, "/R/bm-sim-data.R"))
#source(paste0(base_dir, "/R/helper.R"))
sysfonts::font_add("Gyre Bonum",
regular = "/usr/share/texmf-dist/fonts/opentype/public/tex-gyre/texgyrebonum-regular.otf",
bold = "/usr/share/texmf-dist/fonts/opentype/public/tex-gyre/texgyrebonum-bold.otf")
showtext::showtext_auto()
font_scale = 6
## Figure 1:
## =====================================
# Data:
# - `df_plt` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-1-agbm-restart-iters.Rda"))
agbm_iters = which.min(df_plt[df_plt$method == "ACWB", "Risk"])
iter = sum(df_plt$method == "ACWB")
# Plot
gg = ggplot(
df_plt %>% filter(type == "oob"),
aes(x = Iteration, y = Risk, color = method)) +
geom_vline(
xintercept = agbm_iters,
color = "dark red",
alpha = 0.5,
linetype = "dashed",
size = 1.3) +
geom_line(size = 1.6) +
xlim(0, iter) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
scale_color_viridis(discrete = TRUE) +
xlab("Iterations") +
ylab("Empirical risk on\ntest data") +
labs(color = "Algorithm") +
annotate("text",
x = agbm_iters,
y = max(df_plt$Risk),
label = "Optimal stopping ACWB",
color = "dark red",
hjust = -0.1,
size = 8)
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-1-optim_emp_risk.pdf",
width = dinA4width * 2/3 * 0.5,
height = dinA4width * 2/3 * 0.7 * 0.5,
units = "mm")
## Figure 2:
## =====================================
# Data:
# - `ll_feats` list with
# $ .. `categorical` data.frame
# $ .. `numeric` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-2-features-viz.Rda"))
## NUMERIC
gg = ggplot(data = ll_feats$numeric, aes(x = x, y = y)) +
geom_line(size = 1.2) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
ylab(expression(eta[j])) +
facet_wrap(. ~ feat, scales = "free", ncol = 3)
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-2-fe-numeric.pdf",
width = dinA4width * 2/3 * 0.5,
height = dinA4width * 2/3 * 0.5,
units = "mm")
## CATEGORICAL
gg = ggplot(data = ll_feats$categorical, aes(x = param.cls, y = param.means)) +
geom_boxplot() +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
#axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text.x = element_blank(),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
ylab(expression(paste("Group means ", tau[j]))) +
xlab("Group") +
facet_wrap(. ~ feat, scales = "free", ncol = 3)
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-2-fe-cat.pdf",
width = dinA4width * 2/3 * 0.5,
height = dinA4width * 2/3 * 0.5,
units = "mm")
## Figure 3:
## =====================================
# Data:
# - `df_plt_mem` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-3-binning-memory.Rda"))
# - `df_plt_run` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-3-binning-runtime.Rda"))
## MEMORY
gg = ggplot(
data = df_plt_mem,
aes(x = nrows, y = rel_mem, color = ptotal, group = paste0(ncols, ncolsnoise))) +
geom_hline(
yintercept = 1,
color = "dark red",
lty = 2) +
#geom_line() +
geom_smooth(se = FALSE, alpha = 0.7) +
geom_point(size = 10, alpha = 0.7) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
scale_x_continuous(
breaks = sort(unique(df_plt_mem$nrows)),
trans = "log10") +
scale_y_continuous(breaks = c(1, 2, 4, 6)) +
scale_color_viridis(discrete = TRUE) +
xlab("Number of Rows\n(log10 Scale)") +
ylab(expression(atop("Memory improvement\n", paste(Mem["No Binning"], "/", Mem["Binning"], sep = "")))) +
labs(color = "Number of\nFeatures") +
annotate("text",
x = max(df_plt_mem$nrows),
y = 1,
label = "Baseline (used memory is equal)",
color = "dark red",
vjust = 1.5,
hjust = 1,
size = 1.5 * font_scale)
dinA4width = 210 * font_scale
scale_fac = 1/2
ggsave(
plot = gg,
filename = "figures/fig-3-binning-memory.pdf",
width = dinA4width * scale_fac,
height = dinA4width * scale_fac * 0.7,
units = "mm")
## RUNTIME:
df_plt_run = df_plt_run %>%
filter(rel_time < 10, rel_time > 2, phase == "Fitting") %>%
group_by(nrows, ptotal) %>%
summarize(med = median(rel_time), min = min(rel_time), max = max(rel_time))
dodge_width = 0.25
gg = ggplot() +
geom_hline(
yintercept = 1,
lty = 2,
col = "dark red") +
geom_point(
data = df_plt_run,
aes(x = nrows, y = med, color = as.factor(ptotal)),
size = 10,
alpha = 0.7,
position = position_dodge(width = dodge_width)) +
geom_errorbar(
data = df_plt_run,
aes(x = nrows, ymax = max, ymin = min, color = as.factor(ptotal)),
na.rm = TRUE,
position = position_dodge(width = dodge_width),
size = 1.3) +
#geom_violin(
#data = df_plt_run ,
#aes(x = as.factor(nrows), y = rel_time, fill = as.factor(ptotal), color = as.factor(ptotal)),
#alpha = 0.2,
#show.legend = FALSE) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale),
panel.grid.major.x = element_blank()
) +
scale_color_viridis(discrete=TRUE) +
scale_fill_viridis(discrete=TRUE) +
xlab("Number of Rows\n(log10 Scale)") +
ylab(expression(atop("Speedup\n", paste(Time["No Binning"], "/", Time["Binning"], sep = "")))) +
labs(color = "Number of\nFeatures", fill = "Number of\nFeatures") +
scale_y_continuous(breaks = c(1, 2, 4, 6)) +
#scale_x_continuous(trans = "log10")# +
scale_x_continuous(
breaks = sort(unique(df_plt_mem$nrows)),
trans = "log10") +
annotate("text",
x = max(df_plt_mem$nrows),
y = 1,
label = "Baseline (runtime is equal)",
color = "dark red",
vjust = 1.5,
hjust = 1,
size = 1.5 * font_scale)
#facet_grid(. ~ factor(phase, levels = c("Initialization", "Fitting", "Initialization + Fitting")), scales = "free_y")
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-3-binning-runtime.pdf",
width = dinA4width * scale_fac,
height = dinA4width * 0.7 * scale_fac,
units = "mm")
## Figure 3:
## =====================================
seed = 31415
n = 10000
p = 4
pnoise = 2
sn_ratio = 0.4
set.seed(seed)
dat = simData(n, p, pnoise)
dat_noise = dat$data
set.seed(seed)
dat_noise$y = rnorm(n = n, mean = dat_noise$y, sd = sd(dat_noise$y) / sn_ratio)
library(compboost)
set.seed(seed)
cboost = boostSplines(data = dat_noise, target = "y", iterations = 10000L, learning_rate = 0.01,
loss = LossQuadratic$new(), stop_args = list(eps_for_break = 0, patience = 3L), oob_fraction = 0.3,
df = 7)
set.seed(seed)
cboost_bin = boostSplines(data = dat_noise, target = "y", iterations = 10000L, learning_rate = 0.01,
loss = LossQuadratic$new(), stop_args = list(eps_for_break = 0, patience = 3L), oob_fraction = 0.3,
bin_root = 2, df = 7)
ndata = 1000L
dat_idx = as.integer(seq(1, n, len = ndata))
feat = colnames(dat$data)[grepl(pattern = "x", x = colnames(dat$data))]
bls = paste0(feat, "_spline")
coef_names = c("coef_binning", "coef_nobinning")
coefs = list(coef_binning = cboost_bin$getEstimatedCoef(), coef_nobinning = cboost$getEstimatedCoef())
out = list()
for(bl in bls) {
bl_nbr = as.numeric(gsub("\\D", "", bl))
x = dat$data[[paste0("x", bl_nbr)]][dat_idx]
y = dat$sim_poly[[bl_nbr]]$y[dat_idx]
df_temp = data.frame(x = x, truth = y)
knots = compboostSplines::createKnots(values = x, n_knots = 20, degree = 3)
basis = compboostSplines::createSplineBasis(values = x, degree = 3, knots = knots)
for (cn in coef_names) {
params = coefs[[cn]]
if (bl %in% names(params)) {
param = params[[bl]]
pred = basis %*% param
df_pred = data.frame(pred)
} else {
df_pred = data.frame(rep(0, ndata))
}
colnames(df_pred) = cn
df_temp = cbind(df_temp, df_pred, bl = bl)
}
out[[bl]] = df_temp
}
ll_fe = lapply(out, function (df) {
df %>%
pivot_longer(cols = all_of(c(coef_names, "truth")), names_to = "method", values_to = "effect") %>%
group_by(method) %>%
mutate(y = effect - mean(effect)) %>%
arrange(method, x)
})
df_fe = do.call(rbind, ll_fe)
feat_id = as.integer(gsub("\\D", "", df_fe$bl))
feat = paste0("Feature ", feat_id)
df_fe$feat = factor(feat, levels = paste0("Feature ", sort(unique(feat_id))))
df_fe$line = df_fe$method
df_fe$line[df_fe$line == "truth"] = "Truth"
df_fe$line[df_fe$line == "coef_binning"] = "Binning"
df_fe$line[df_fe$line == "coef_nobinning"] = "No Binning"
df_fe$line = factor(df_fe$line)
df_fe$line = ordered(df_fe$line, c("Truth", "No Binning", "Binning"))
df_fe$linetype = ifelse(df_fe$line == "Binning", "solid", "dashed")
df_area = cbind(
df_fe %>% select(x, bl, method, y, feat, line) %>% filter(line == "No Binning"),
df_fe %>% ungroup() %>% mutate(y_t = y, line_t = line) %>% select(y_t, line_t) %>% filter(line_t == "Truth"))
gg = ggplot() +
geom_ribbon(
data = df_area,
aes(ymin = y, ymax = y_t, x = x),
fill = "dark red",
alpha = 0.2) +
geom_line(
data = df_fe,
aes(x = x, y = y, color = line, linetype = linetype),
lwd = 2) +
#scale_color_viridis(discrete = TRUE) +
theme_minimal(base_family = "Gyre Bonum") +
scale_color_viridis(discrete = TRUE) +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale),
panel.grid.major.x = element_blank()) +
xlab("x") +
ylab("Partial effect") +
labs(color = "", linetype = "") +
#scale_x_continuous(breaks = NULL) +
scale_linetype(guide = "none") +
facet_wrap(. ~ feat, scales = "free_x")#, scales = "free")
gg
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-4-binning-fe.pdf",
width = dinA4width * 1/2,
height = dinA4width * 0.7 * 1/2,
units = "mm")
## Figure 5:
## =====================================
# Data:
# - `df_imse` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-5-binning-imse.Rda"))
df_imse$method_n = as.factor(df_imse$method_n)
levels(df_imse$method_n) = c("Binning", "Binning 4", "No binning")
gg = ggplot(
data = df_imse %>% filter(method_n != "Binning 4"),
aes(x = as.factor(nrows), y = mimse, fill = method_n, color = method_n)) +
geom_boxplot(alpha = 0.2) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale),
panel.grid.major.x = element_blank()
) +
scale_color_viridis(discrete = TRUE) +
scale_fill_viridis(discrete = TRUE) +
xlab("Number of rows\n(log10 scale)") +
ylab("MISE") +
labs(color = "", fill = "") +
facet_grid(paste0("SNR = ", sn_ratio) ~ ., scales = "free_y")
gg
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-5-binning-imse.pdf",
width = dinA4width * 1/2,
height = dinA4width * 1/2 * 0.7,
units = "mm")
## Figure 6:
## =====================================
# Data:
# - `df_plt_mem` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-6-cat-memory.Rda"))
# - `df_plt_run` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-6-cat-runtime.Rda"))
font_scale = 6
## MEMORY
df_plt_mem = df_cat_memory %>%
filter(method != "linear") %>%
group_by(nrows, ncols, ncolsnoise, method, nclasses) %>%
summarize(mem = median(mem)) %>%
mutate(ptotal = ncols + ncolsnoise) %>%
mutate(method = as.factor(method)) %>%
filter(ptotal %in% c(10, 40, 75, 100, 150, 300))
levels(df_plt_mem$method) = list(Binary = "binary", Ridge = "ridge")
df_plt_mem$ptotal = ordered(df_plt_mem$ptotal, levels = sort(unique(df_plt_mem$ptotal)))
df_plt_mem$nclasses = factor(paste0("# Classes: ", df_plt_mem$nclasses), levels = paste0("# Classes: ", c(5, 10, 20)))
gg = ggplot() +
geom_smooth(
data = df_plt_mem,
aes(x = nrows, y = mem, color = ptotal, group = paste0(ncols, ncolsnoise, nclasses)),
se = FALSE) +
geom_point(
data = df_plt_mem,
aes(x = nrows, y = mem, color = ptotal, group = paste0(ncols, ncolsnoise)),
size = 6,
alpha = 0.5) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
scale_x_continuous(breaks = sort(unique(df_plt_mem$nrows)), trans = "log10") +
scale_color_viridis(discrete = TRUE) +
xlab("Number of rows\n(log10 Scale)") +
ylab("Used memory in MB") +
labs(color = "Number of\nreatures") +
coord_cartesian(clip = 'off') +
facet_grid(nclasses ~ method)#, scales= "free_y")
gg
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "../../paper-figures/figures/fig-6-cat-memory.pdf",
#filename = "figures/fig-6-cat-memory.pdf",
width = dinA4width * 1/2,
height = dinA4width * 1/2,
units = "mm")
## RUNTIME
df_plt_run = df_cat_runtime %>%
filter(method != "linear") %>%
#group_by(nrows, ncols, ncolsnoise, method, nclasses) %>%
#summarize(mem = median(time)) %>%
mutate(ptotal = ncols + ncolsnoise) %>%
mutate(method = as.factor(method)) %>%
group_by(nrows, ptotal, method, nclasses) %>%
summarize(med = median(time), min = min(time), max = max(time)) %>%
filter(ptotal %in% c(10, 40, 75, 100, 150, 300))
levels(df_plt_run$method) = list(Binary = "binary", Ridge = "ridge")
df_plt_run$ptotal = ordered(df_plt_run$ptotal, levels = sort(unique(df_plt_run$ptotal)))
df_plt_run$nclasses = factor(paste0("# Classes: ", df_plt_run$nclasses), levels = paste0("# Classes: ", c(5, 10, 20)))
dodge_width = 0.25
gg = ggplot() +
geom_point(
data = df_plt_run,
aes(x = nrows, y = med / 60, color = as.factor(ptotal)),
size = 6,
alpha = 0.5,
position = position_dodge(width = dodge_width)) +
geom_errorbar(
data = df_plt_run,
aes(x = nrows, ymax = max / 60, ymin = min / 60, color = as.factor(ptotal)),
na.rm = TRUE,
position = position_dodge(width = dodge_width),
size = 1.3) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale),
panel.grid.major.x = element_blank()
) +
scale_color_viridis(discrete=TRUE) +
scale_fill_viridis(discrete=TRUE) +
xlab("Number of rows\n(log10 Scale)") +
ylab("Runtime in minutes") +
labs(color = "Number of\nfeatures", fill = "Number of\nfeatures") +
scale_x_continuous(
breaks = sort(unique(df_plt_mem$nrows)),
trans = "log10") +
facet_grid(nclasses ~ method)#, scales= "free_y")
dinA4width = 210 * font_scale
ggsave(
plot = gg,
#filename = "figures/fig-6-cat-runtime.pdf",
filename = "../../paper-figures/figures/fig-6-cat-runtime.pdf",
width = dinA4width * 1/2,
height = dinA4width * 1/2,
units = "mm")
## Figure 7:
## =====================================
# Data:
# - `ll_noise` list with
# $ .. `density` data.frame
# $ .. `cat_sel` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-7-cat-noise.Rda"))
gg = ggplot(
mapping = aes(
x = rel_nwrongnotselected,
y = rel_notselected,
shape = method,
color = method,
fill = method)) +
geom_polygon(
data = ll_noise$density,
alpha = 0.2,
size = 0.1) +
geom_point(data = ll_noise$cat_sel) +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) +
xlab("FNR") +
ylab("TNR") +
labs(fill = "", color = "", shape = "") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
xlim(min(ll_noise$density$rel_nwrongnotselected), max(ll_noise$denstiy$rel_nwrongnotselected)) +
ylim(min(ll_noise$density$rel_notselected), max(ll_noise$density$rel_notselected)) +
scale_x_continuous(breaks = seq(0, 1, 0.2)) +
scale_y_continuous(breaks = seq(0, 1, 0.2)) +
facet_grid(sn_ratiof ~ .)#, scales = "free_y")
gg
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-7-cat-noise.pdf",
width = dinA4width * 2/3 * 0.7,
height = dinA4width * 2/3 * 0.5,
units = "mm")
## Figure 8:
## =====================================
# Data:
# - `df_cat_bp` data.frame
load(paste0(base_dir, "/paper-figures/rda/fig-8-cat-mse.Rda"))
font_scale = 6
gg = ggplot(df_cat_bp, aes(x = mse, y = value, fill = method, color = method)) +
geom_boxplot(alpha = 0.2) +
scale_fill_viridis(discrete = TRUE) +
scale_color_viridis(discrete = TRUE) +
xlab("") +
ylab("MSE") +
labs(fill = "", color = "", shape = "") +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
ylim(0, 40) +
facet_grid(sn_ratiof ~ .) #, scales = "free_y")
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-8-cat-mse.pdf",
width = dinA4width * 2/3 * 0.7,
height = dinA4width * 2/3 * 0.5,
units = "mm")
## Figure 9:
## =====================================
## MEMORY:
load("rda/fig-9-acwb-mem.Rda")
gg = ggplot(
data = df_plt_mem %>% filter(ptotal %in% c(10, 30, 75, 100, 150, 300)),
aes(x = nrows, y = rel_mem, color = as.factor(ptotal), group = paste0(ncols, ncolsnoise))) +
geom_hline(
yintercept = 1,
color = "dark red",
lty = 2) +
#geom_line() +
geom_smooth(se = FALSE, alpha = 0.7) +
geom_point(size = 10, alpha = 0.7) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale)
) +
scale_x_continuous(
breaks = sort(unique(df_plt_mem$nrows)),
trans = "log10") +
scale_color_viridis(discrete = TRUE) +
xlab("Number of Rows\n(log10 Scale)") +
ylab(expression(atop("Memory improvement\n", paste(Mem["CWB"], "/", Mem["ACWB"], sep = "")))) +
labs(color = "Number of\nFeatures") +
annotate("text",
x = max(df_plt_mem$nrows),
y = 1,
label = "Baseline (used memory is equal)",
color = "dark red",
vjust = 1.5,
hjust = 1,
size = 1.5 * font_scale)
dinA4width = 210 * font_scale
scale_fac = 1/2
ggsave(
plot = gg,
filename = "figures/fig-9-acwb-memory.pdf",
width = dinA4width * scale_fac,
height = dinA4width * scale_fac * 0.7,
units = "mm")
## RUNTIME:
load("rda/fig-9-acwb-run.Rda")
df_plt_run = df_plt_run %>%
filter(rel_time < 0.7) %>%
group_by(nrows, ptotal) %>%
summarize(med = median(rel_time), min = min(rel_time), max = max(rel_time)) %>%
filter(ptotal %in% c(10, 30, 75, 100, 150, 300))
dodge_width = 0.25
gg = ggplot() +
geom_hline(
yintercept = 1,
lty = 2,
col = "dark red") +
geom_point(
data = df_plt_run,
aes(x = nrows, y = med, color = as.factor(ptotal)),
size = 10,
alpha = 0.7,
position = position_dodge(width = dodge_width)) +
geom_errorbar(
data = df_plt_run,
aes(x = nrows, ymax = max, ymin = min, color = as.factor(ptotal)),
na.rm = TRUE,
position = position_dodge(width = dodge_width),
size = 1.3) +
#geom_violin(
#data = df_plt_run ,
#aes(x = as.factor(nrows), y = rel_time, fill = as.factor(ptotal), color = as.factor(ptotal)),
#alpha = 0.2,
#show.legend = FALSE) +
theme_minimal(base_family = "Gyre Bonum") +
theme(
strip.background = element_rect(fill = rgb(47,79,79,maxColorValue = 255), color = "white"),
strip.text = element_text(color = "white", face = "bold", size = 8 * font_scale),
axis.text.x = element_text(angle = 45, vjust = 0.5),
axis.text = element_text(size = 8 * font_scale),
axis.title = element_text(size = 10 * font_scale),
legend.text = element_text(size = 6 * font_scale),
legend.title = element_text(size = 8 * font_scale),
panel.grid.major.x = element_blank()
) +
scale_color_viridis(discrete=TRUE) +
scale_fill_viridis(discrete=TRUE) +
xlab("Number of Rows\n(log10 Scale)") +
ylab(expression(atop("Speedup\n", paste(Time["No Binning"], "/", Time["Binning"], sep = "")))) +
labs(color = "Number of\nFeatures", fill = "Number of\nFeatures") +
scale_x_continuous(
breaks = sort(unique(df_plt_mem$nrows)),
trans = "log10") +
annotate("text",
x = max(df_plt_mem$nrows),
y = 1,
label = "Baseline (runtime is equal)",
vjust = 1.5,
color = "dark red",
hjust = 1,
size = 1.5 * font_scale) +
ylim(0.25, 1)
dinA4width = 210 * font_scale
ggsave(
plot = gg,
filename = "figures/fig-9-acwb-runtime.pdf",
width = dinA4width * scale_fac,
height = dinA4width * 0.7 * scale_fac,
units = "mm")
|
library(shiny)
library(magrittr)
library(leaflet)
library(DT)
library(dplyr)
# Load City of Philadelphia DBHIDS data on treatment resources
programs <- read.csv("dbhids_geocoded.csv")
programs <- subset(programs, select=-c(X))
# Create UI
ui <- shinyUI(fluidPage(
titlePanel("MATchmaker - Shiny Version"),
sidebarLayout(
sidebarPanel(width = 3,
tags$b("Treatments Available"),
checkboxInput("bup", "Buprenorphine", value=F),
checkboxInput("meth", "Methadone", value=F),
checkboxInput("viv", "Vivitrol", value=F),
tags$b("Other Features"),
checkboxInput("same", "Same-Day Induction", value=F)
),
mainPanel(
leafletOutput("leafletmap", width = "350px"),
dataTableOutput("tbl")
)
),
hr(),
print("Source: Philadelphia Department of Behavioral Health and Intellectual disAbility Services")
))
# Create underlying app server
server <- function(input, output) {
# function to create bounding box (map range) based on the data remaining after filtering
in_bounding_box <- function(data, lat, lon, bounds) {
data %>%
filter(
lat > bounds$south &
lat < bounds$north &
lon < bounds$east & lon > bounds$west)
}
# function to apply a filter if a box is checked. 'else TRUE' returns the full input dataset
checkfilter <- function(checkbox, filter){
if (checkbox) filter else TRUE
}
# create a dynamic dataframe called 'map_data_react' that updates as filters toggle on/off.
map_data_react <- reactive({
programs %>%
filter(
checkfilter(input$bup == TRUE, Buprenorphine. == "Yes"),
checkfilter(input$meth == TRUE, Methadone. == "Yes"),
checkfilter(input$viv == TRUE, Vivitrol. == "Yes"),
checkfilter(input$same == TRUE, Same.day.induction.during.walk.in.hours. == "Yes")
)
})
# create a leaflet map of the filtered data
output$leafletmap <- renderLeaflet({
program_data <- map_data_react
program_data %>% leaflet() %>%
addProviderTiles("CartoDB.Voyager") %>%
addCircleMarkers(
data = map_data_react(),
~ lon ,
~ lat,
popup = ~ paste("<b>",ProgramName,"</b>","<br>",Address),
radius = 4 ,
stroke = FALSE,
fillOpacity = 0.8,
popupOptions = popupOptions(closeButton = FALSE)
)
})
# create a datatable of the filtered data
output$tbl <- DT::renderDataTable({
DT::datatable(
map_data_react(),
extensions = "Scroller",
style = "bootstrap",
class = "compact",
width = "100%",
options = list(
deferRender = TRUE,
scrollY = 300,
scrollX = TRUE,
scroller = TRUE,
dom = 'tp'
)
)
})
}
# run the app
shinyApp(ui = ui, server = server)
|
/analyses/team08/shiny_app/dbhids_app.R
|
no_license
|
zixi-liu/datahack2020
|
R
| false
| false
| 2,879
|
r
|
library(shiny)
library(magrittr)
library(leaflet)
library(DT)
library(dplyr)
# Load City of Philadelphia DBHIDS data on treatment resources
programs <- read.csv("dbhids_geocoded.csv")
programs <- subset(programs, select=-c(X))
# Create UI
ui <- shinyUI(fluidPage(
titlePanel("MATchmaker - Shiny Version"),
sidebarLayout(
sidebarPanel(width = 3,
tags$b("Treatments Available"),
checkboxInput("bup", "Buprenorphine", value=F),
checkboxInput("meth", "Methadone", value=F),
checkboxInput("viv", "Vivitrol", value=F),
tags$b("Other Features"),
checkboxInput("same", "Same-Day Induction", value=F)
),
mainPanel(
leafletOutput("leafletmap", width = "350px"),
dataTableOutput("tbl")
)
),
hr(),
print("Source: Philadelphia Department of Behavioral Health and Intellectual disAbility Services")
))
# Create underlying app server
server <- function(input, output) {
# function to create bounding box (map range) based on the data remaining after filtering
in_bounding_box <- function(data, lat, lon, bounds) {
data %>%
filter(
lat > bounds$south &
lat < bounds$north &
lon < bounds$east & lon > bounds$west)
}
# function to apply a filter if a box is checked. 'else TRUE' returns the full input dataset
checkfilter <- function(checkbox, filter){
if (checkbox) filter else TRUE
}
# create a dynamic dataframe called 'map_data_react' that updates as filters toggle on/off.
map_data_react <- reactive({
programs %>%
filter(
checkfilter(input$bup == TRUE, Buprenorphine. == "Yes"),
checkfilter(input$meth == TRUE, Methadone. == "Yes"),
checkfilter(input$viv == TRUE, Vivitrol. == "Yes"),
checkfilter(input$same == TRUE, Same.day.induction.during.walk.in.hours. == "Yes")
)
})
# create a leaflet map of the filtered data
output$leafletmap <- renderLeaflet({
program_data <- map_data_react
program_data %>% leaflet() %>%
addProviderTiles("CartoDB.Voyager") %>%
addCircleMarkers(
data = map_data_react(),
~ lon ,
~ lat,
popup = ~ paste("<b>",ProgramName,"</b>","<br>",Address),
radius = 4 ,
stroke = FALSE,
fillOpacity = 0.8,
popupOptions = popupOptions(closeButton = FALSE)
)
})
# create a datatable of the filtered data
output$tbl <- DT::renderDataTable({
DT::datatable(
map_data_react(),
extensions = "Scroller",
style = "bootstrap",
class = "compact",
width = "100%",
options = list(
deferRender = TRUE,
scrollY = 300,
scrollX = TRUE,
scroller = TRUE,
dom = 'tp'
)
)
})
}
# run the app
shinyApp(ui = ui, server = server)
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
theme = shinytheme("Readable"),
# Application title
titlePanel("Where should I live?"),
# Sidebar with a slider input for number of bins
fluidRow(
column(12,
plotlyOutput("map"),
textOutput("l1")),
column(3,
selectInput("lang",
"Language",
choices = levels(factor(df.clean3$Spoken_languages)),
selected = "English"),
#selectInput("pop",
# "Population Size",
# choices = c("very small", "small", "medium", "large")),
sliderInput("pop",
"Population",
min = 0,
max = 5,
value = 0),
sliderInput("health",
"Healthcare",
min = 0,
max = 5,
value = 0),
sliderInput("tc",
"Travel Connectivity",
min = 0,
max = 5,
value = 0),
checkboxGroupInput("weatype",
"Climate Type",
choices = c(levels(factor(df.final$`Weather type`))))
),
column(3,
sliderInput("ci",
"Culture Index",
min = 0,
max = 5,
value = 0),
sliderInput("weal",
"Wealth",
min = 0,
max = 5,
value = 0),
sliderInput("cor",
"Corruption",
min = 0,
max = 5,
value = 0)
),
column(3,
sliderInput("sft",
"Safety",
min = 0,
max = 5,
value = 0),
#I'm thinking this could include air quality, water quality,
sliderInput("env",
"Environment",
min = 0,
max = 5,
value = 0),
#I decided to exclude GDP, people tend to look more at their own financials when
#choosing a city
sliderInput("ntrnet",
"Internet Access",
min = 0,
max = 5,
value = 0)
),
column(3,
sliderInput("edu",
"Education",
min = 0,
max = 5,
value = 0),
sliderInput("col",
"Low Cost of Living",
min = 0,
max = 5,
value = 0),
sliderInput("tax",
"Taxes",
min = 0,
max = 5,
value = 0)
)
)
)
)
|
/NeveRathome/ui.R
|
no_license
|
arthurwuhoo/whereshouldIlive
|
R
| false
| false
| 3,367
|
r
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
theme = shinytheme("Readable"),
# Application title
titlePanel("Where should I live?"),
# Sidebar with a slider input for number of bins
fluidRow(
column(12,
plotlyOutput("map"),
textOutput("l1")),
column(3,
selectInput("lang",
"Language",
choices = levels(factor(df.clean3$Spoken_languages)),
selected = "English"),
#selectInput("pop",
# "Population Size",
# choices = c("very small", "small", "medium", "large")),
sliderInput("pop",
"Population",
min = 0,
max = 5,
value = 0),
sliderInput("health",
"Healthcare",
min = 0,
max = 5,
value = 0),
sliderInput("tc",
"Travel Connectivity",
min = 0,
max = 5,
value = 0),
checkboxGroupInput("weatype",
"Climate Type",
choices = c(levels(factor(df.final$`Weather type`))))
),
column(3,
sliderInput("ci",
"Culture Index",
min = 0,
max = 5,
value = 0),
sliderInput("weal",
"Wealth",
min = 0,
max = 5,
value = 0),
sliderInput("cor",
"Corruption",
min = 0,
max = 5,
value = 0)
),
column(3,
sliderInput("sft",
"Safety",
min = 0,
max = 5,
value = 0),
#I'm thinking this could include air quality, water quality,
sliderInput("env",
"Environment",
min = 0,
max = 5,
value = 0),
#I decided to exclude GDP, people tend to look more at their own financials when
#choosing a city
sliderInput("ntrnet",
"Internet Access",
min = 0,
max = 5,
value = 0)
),
column(3,
sliderInput("edu",
"Education",
min = 0,
max = 5,
value = 0),
sliderInput("col",
"Low Cost of Living",
min = 0,
max = 5,
value = 0),
sliderInput("tax",
"Taxes",
min = 0,
max = 5,
value = 0)
)
)
)
)
|
library(fungible)
### Name: smoothLG
### Title: Smooth NPD to Nearest PSD or PD Matrix
### Aliases: smoothLG
### Keywords: statistics
### ** Examples
data(BadRLG)
out<-smoothLG(R = BadRLG, Penalty = 50000)
cat("\nGradient at solution:", out$gr,"\n")
cat("\nNearest Correlation Matrix\n")
print( round(out$RLG,8) )
################################
## Rousseeuw Molenbergh example
data(BadRRM)
out <- smoothLG(R = BadRRM, PD=TRUE)
cat("\nGradient at solution:", out$gr,"\n")
cat("\nNearest Correlation Matrix\n")
print( round(out$RLG,8) )
## Weights for the weighted solution
W <- matrix(c(1, 1, .5,
1, 1, 1,
.5, 1, 1), nrow = 3, ncol = 3)
tmp <- smoothLG(R = BadRRM, PD = TRUE, eps=.001)
cat("\nGradient at solution:", out$gr,"\n")
cat("\nNearest Correlation Matrix\n")
print( round(out$RLG,8) )
print( eigen(out$RLG)$val )
## Rousseeuw Molenbergh
## non symmetric matrix
T <- matrix(c(.8, -.9, -.9,
-1.2, 1.1, .3,
-.8, .4, .9), nrow = 3, ncol = 3,byrow=TRUE)
out <- smoothLG(R = T, PD = FALSE, eps=.001)
cat("\nGradient at solution:", out$gr,"\n")
cat("\nNearest Correlation Matrix\n")
print( round(out$RLG,8) )
|
/data/genthat_extracted_code/fungible/examples/smoothLG.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,195
|
r
|
library(fungible)
### Name: smoothLG
### Title: Smooth NPD to Nearest PSD or PD Matrix
### Aliases: smoothLG
### Keywords: statistics
### ** Examples
data(BadRLG)
out<-smoothLG(R = BadRLG, Penalty = 50000)
cat("\nGradient at solution:", out$gr,"\n")
cat("\nNearest Correlation Matrix\n")
print( round(out$RLG,8) )
################################
## Rousseeuw Molenbergh example
data(BadRRM)
out <- smoothLG(R = BadRRM, PD=TRUE)
cat("\nGradient at solution:", out$gr,"\n")
cat("\nNearest Correlation Matrix\n")
print( round(out$RLG,8) )
## Weights for the weighted solution
W <- matrix(c(1, 1, .5,
1, 1, 1,
.5, 1, 1), nrow = 3, ncol = 3)
tmp <- smoothLG(R = BadRRM, PD = TRUE, eps=.001)
cat("\nGradient at solution:", out$gr,"\n")
cat("\nNearest Correlation Matrix\n")
print( round(out$RLG,8) )
print( eigen(out$RLG)$val )
## Rousseeuw Molenbergh
## non symmetric matrix
T <- matrix(c(.8, -.9, -.9,
-1.2, 1.1, .3,
-.8, .4, .9), nrow = 3, ncol = 3,byrow=TRUE)
out <- smoothLG(R = T, PD = FALSE, eps=.001)
cat("\nGradient at solution:", out$gr,"\n")
cat("\nNearest Correlation Matrix\n")
print( round(out$RLG,8) )
|
\name{PredictFRegressNormTest}
\alias{PredictFRegressNormTest}
\title{Functional Data Analysis Using the Concurrent Model}
\usage{
PredictFRegressNormTest(PredictorMat,ResponseMat,predVec, nBasis, Basis="Fourier", lambda = 10^-1, Plot = FALSE, NormalErrors = FALSE) }
\arguments{
PredictorMat is the matrix of predictor (X) values
ResponseMat is the complete matrix of response (Y) values
predVec is a vector of X values that correspond to the missing responce vector of interest.
nBasis is the number of basis function needed to smooth the data. The optimal number can be found using the bestEst function from this package.
Basis is the basis type. The two options are Fourier and BSpline.
lambda is the smoothing parameter that can be found using the choosing_lambda function
Plot gives you the option to produce plots for B0(t) and B1(t) along with their respective confidence intervals
NormalErrors will return the p-values from the Shapiro-Wilks test of normality of errors at each index
}
\description{
This function produces an estimate for a missing vector y(t) given a known corresponding vector x(t). The function uses the known data to estimate B0(t) and B1(t) for the equation Y(t) = B0(t) + X(t)*B1(t), where Y(t) = ResponseMat and X(t) = PredictorMat. This function also produces a 95 percent prediction interval.}
\examples{
Using hip and knee data from the fda package.
library(fda)
library(MASS)
library(corpcor)
hip1<-fda::gait
hip<-hip1[,, 'Hip Angle']
knee<-hip1[,, 'Knee Angle']
pred39Knee<-PredictFRegress(hip[,-39], knee[, -39], predVec = hip[,39], lambda = 10^(-2), nBasis = 17 )
par( mfrow = c(1,1) )
plot(x= rownames(hip1[, , 'Knee Angle']), y = knee[, 39], type = "l", main = "Knee Angle", xlab = "Knee Angle", ylab = "Time")
lines(x= rownames(hip1[, , 'Knee Angle']), y = pred39Knee$PredictedResponse, col = "red" )
lines(x= rownames(hip1[, , 'Knee Angle']), y = pred39Knee$Lower, col = "green" )
lines(x= rownames(hip1[, , 'Knee Angle']), y = pred39Knee$Upper, col = "green" )
legend("topleft", legend=c("True Knee Angle", "Predicted Knee Angle", "95 PI"), col=c("black", "red", "green"), lty=1, cex=0.8)
}
|
/fdaconcur/man/PredictFRegressNormTest.Rd
|
no_license
|
rpittman188/fdaconcur
|
R
| false
| false
| 2,160
|
rd
|
\name{PredictFRegressNormTest}
\alias{PredictFRegressNormTest}
\title{Functional Data Analysis Using the Concurrent Model}
\usage{
PredictFRegressNormTest(PredictorMat,ResponseMat,predVec, nBasis, Basis="Fourier", lambda = 10^-1, Plot = FALSE, NormalErrors = FALSE) }
\arguments{
PredictorMat is the matrix of predictor (X) values
ResponseMat is the complete matrix of response (Y) values
predVec is a vector of X values that correspond to the missing responce vector of interest.
nBasis is the number of basis function needed to smooth the data. The optimal number can be found using the bestEst function from this package.
Basis is the basis type. The two options are Fourier and BSpline.
lambda is the smoothing parameter that can be found using the choosing_lambda function
Plot gives you the option to produce plots for B0(t) and B1(t) along with their respective confidence intervals
NormalErrors will return the p-values from the Shapiro-Wilks test of normality of errors at each index
}
\description{
This function produces an estimate for a missing vector y(t) given a known corresponding vector x(t). The function uses the known data to estimate B0(t) and B1(t) for the equation Y(t) = B0(t) + X(t)*B1(t), where Y(t) = ResponseMat and X(t) = PredictorMat. This function also produces a 95 percent prediction interval.}
\examples{
Using hip and knee data from the fda package.
library(fda)
library(MASS)
library(corpcor)
hip1<-fda::gait
hip<-hip1[,, 'Hip Angle']
knee<-hip1[,, 'Knee Angle']
pred39Knee<-PredictFRegress(hip[,-39], knee[, -39], predVec = hip[,39], lambda = 10^(-2), nBasis = 17 )
par( mfrow = c(1,1) )
plot(x= rownames(hip1[, , 'Knee Angle']), y = knee[, 39], type = "l", main = "Knee Angle", xlab = "Knee Angle", ylab = "Time")
lines(x= rownames(hip1[, , 'Knee Angle']), y = pred39Knee$PredictedResponse, col = "red" )
lines(x= rownames(hip1[, , 'Knee Angle']), y = pred39Knee$Lower, col = "green" )
lines(x= rownames(hip1[, , 'Knee Angle']), y = pred39Knee$Upper, col = "green" )
legend("topleft", legend=c("True Knee Angle", "Predicted Knee Angle", "95 PI"), col=c("black", "red", "green"), lty=1, cex=0.8)
}
|
# Set seed to enable replication of the results
set.seed(1203)
## Dependencies
#install.packages("mlr")
#install.packages("here")
#install.packages("glm")
library(mlr)
library(here)
## (1) Read and clean data
train <- read.csv(here("Data", "train.csv"))
test <- read.csv(here("Data", "test.csv"))
test$medv <- 0
testID <- test$ID
train$ID <- test$ID <- NULL
## (2) Define the task
trainTask <- makeRegrTask(data = train, target = "medv")
testTask <-makeRegrTask(data = test, target = "medv")
## (3) Preprocessing
train$rad <- as.factor(train$rad)
test$rad <- as.factor(test$rad)
trainTask <- createDummyFeatures(trainTask)
testTask <- createDummyFeatures(testTask)
trainTask <- normalizeFeatures(trainTask)
testTask <- normalizeFeatures(testTask)
## (4) Define the learner
lrn <- makeLearner("regr.glmnet")
## (5) Tune hyperparameter (s: lambda)
ps <- makeParamSet(
makeNumericParam("s", lower = -5, upper = 2, trafo = function(x) 10^x)
)
ctrl <- makeTuneControlRandom(maxit = 100L)
rdesc <- makeResampleDesc("CV", iters = 10L)
res <- tuneParams(lrn, trainTask, rdesc, measures = rmse, par.set = ps, control = ctrl)
## (6) Fit the model
lrn <- setHyperPars(makeLearner("regr.glmnet"), par.vals = res$x)
model <- train(lrn, trainTask)
## (7) Make predictions
pred <- predict(model, testTask)
preds <- pred$data$response
table(preds)
## (8) create submission file
submission <- data.frame(ID = testID)
submission$medv <- preds
write.csv(submission, "Submissions/LassoSubmission.csv", row.names = FALSE)
|
/LassoPrediction.R
|
no_license
|
dormeir999/BoIKaggle
|
R
| false
| false
| 1,552
|
r
|
# Set seed to enable replication of the results
set.seed(1203)
## Dependencies
#install.packages("mlr")
#install.packages("here")
#install.packages("glm")
library(mlr)
library(here)
## (1) Read and clean data
train <- read.csv(here("Data", "train.csv"))
test <- read.csv(here("Data", "test.csv"))
test$medv <- 0
testID <- test$ID
train$ID <- test$ID <- NULL
## (2) Define the task
trainTask <- makeRegrTask(data = train, target = "medv")
testTask <-makeRegrTask(data = test, target = "medv")
## (3) Preprocessing
train$rad <- as.factor(train$rad)
test$rad <- as.factor(test$rad)
trainTask <- createDummyFeatures(trainTask)
testTask <- createDummyFeatures(testTask)
trainTask <- normalizeFeatures(trainTask)
testTask <- normalizeFeatures(testTask)
## (4) Define the learner
lrn <- makeLearner("regr.glmnet")
## (5) Tune hyperparameter (s: lambda)
ps <- makeParamSet(
makeNumericParam("s", lower = -5, upper = 2, trafo = function(x) 10^x)
)
ctrl <- makeTuneControlRandom(maxit = 100L)
rdesc <- makeResampleDesc("CV", iters = 10L)
res <- tuneParams(lrn, trainTask, rdesc, measures = rmse, par.set = ps, control = ctrl)
## (6) Fit the model
lrn <- setHyperPars(makeLearner("regr.glmnet"), par.vals = res$x)
model <- train(lrn, trainTask)
## (7) Make predictions
pred <- predict(model, testTask)
preds <- pred$data$response
table(preds)
## (8) create submission file
submission <- data.frame(ID = testID)
submission$medv <- preds
write.csv(submission, "Submissions/LassoSubmission.csv", row.names = FALSE)
|
testlist <- list(lims = structure(0, .Dim = c(1L, 1L)), points = structure(c(4.34970285608805e-114, 7.27917492813417e-95, 4.6343369826479e+252, 1.11574538688949e-310, 1.78005908680576e-307, 1.71583253459489e-314, 1.59493294177796e-304, 4.94065645841247e-324, 4.94065645841247e-324, 1.05274122851034e-314, 4.23720119539941e-310, 3.23158457631843e-319, 2.71615461243555e-312, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 2L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result)
|
/palm/inst/testfiles/pbc_distances/libFuzzer_pbc_distances/pbc_distances_valgrind_files/1612988842-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 481
|
r
|
testlist <- list(lims = structure(0, .Dim = c(1L, 1L)), points = structure(c(4.34970285608805e-114, 7.27917492813417e-95, 4.6343369826479e+252, 1.11574538688949e-310, 1.78005908680576e-307, 1.71583253459489e-314, 1.59493294177796e-304, 4.94065645841247e-324, 4.94065645841247e-324, 1.05274122851034e-314, 4.23720119539941e-310, 3.23158457631843e-319, 2.71615461243555e-312, 0, 0, 0, 0, 0, 0, 0), .Dim = c(10L, 2L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result)
|
# Internal functions
prepdata <- function(knownpts, unknownpts, matdist, bypassctrl, longlat, mask,
resolution){
if(is(knownpts, "Spatial")){knownpts <- st_as_sf(knownpts)}
if (!missing(unknownpts)){
if(is(unknownpts, "Spatial")){unknownpts <- st_as_sf(unknownpts)}
if (!missing(matdist)){
matdist <- UseDistMatrix(matdist = matdist, knownpts = knownpts,
unknownpts = unknownpts)
}else{
matdist <- CreateDistMatrix(knownpts = knownpts, unknownpts = unknownpts,
bypassctrl = bypassctrl, longlat = longlat)
}
}else{
if(missing(mask)){
mask <- knownpts
} else {
if(is(mask, "Spatial")){unknownpts <- st_as_sf(mask)}
}
unknownpts <- CreateGrid(w = mask, resolution = resolution,
returnclass = "sf")
matdist <- CreateDistMatrix(knownpts = knownpts, unknownpts = unknownpts,
bypassctrl = bypassctrl, longlat = longlat)
}
return(list(knownpts=knownpts, unknownpts = unknownpts, matdist = matdist))
}
UseDistMatrix <- function(matdist, knownpts, unknownpts){
i <- factor(row.names(knownpts), levels = row.names(knownpts))
j <- factor(row.names(unknownpts), levels = row.names(unknownpts))
matdist <- matdist[levels(i), levels(j)]
return(round(matdist, digits = 8))
}
ComputeInteractDensity <- function(matdist, typefct, beta, span)
{
if(typefct == "pareto") {
alpha <- (2 ^ (1 / beta) - 1) / span
matDens <- (1 + alpha * matdist) ^ (-beta)
} else if(typefct == "exponential") {
alpha <- log(2) / span ^ beta
matDens <- exp(- alpha * matdist ^ beta)
} else {
stop("Please choose a valid interaction function argument (typefct)")
}
matDens <- round(matDens, digits = 8)
return(matDens)
}
ComputeOpportunity <- function(knownpts, matdens, varname = varname)
{
matOpport <- knownpts[[varname]] * matdens
return(round(matOpport, digits = 8))
}
ComputePotentials <- function(unknownpts, matopport)
{
unknownpts$OUTPUT <- apply(matopport, 2, sum, na.rm = TRUE)
return(unknownpts)
}
ComputeReilly <- function(unknownpts, matopport)
{
unknownpts$OUTPUT <- row.names(matopport)[apply(matopport, 2, which.max)]
return(unknownpts)
}
ComputeHuff <- function(unknownpts, matopport)
{
sumCol <- colSums(x = matopport, na.rm = TRUE)
matOpportPct <- 100 * t(t(matopport) / sumCol)
matOpportPct[is.na(matOpportPct) | is.infinite(matOpportPct)] <- 0
unknownpts$OUTPUT <- apply(matOpportPct, 2, max, na.rm = TRUE)
return(unknownpts)
}
ComputeSmooth<- function(unknownpts, matopport, matdens)
{
unknownpts$OUTPUT <- apply(matopport, 2, sum, na.rm = TRUE) /
colSums(matdens, na.rm = TRUE)
return(unknownpts)
}
projError <- function(x,y){
if(identicalCRS(x,y) == FALSE){
stop("Inputs do not use the same coordinate reference system.",
call. = FALSE)
}
}
|
/R/utils.R
|
no_license
|
riatelab/SpatialPosition
|
R
| false
| false
| 2,959
|
r
|
# Internal functions
prepdata <- function(knownpts, unknownpts, matdist, bypassctrl, longlat, mask,
resolution){
if(is(knownpts, "Spatial")){knownpts <- st_as_sf(knownpts)}
if (!missing(unknownpts)){
if(is(unknownpts, "Spatial")){unknownpts <- st_as_sf(unknownpts)}
if (!missing(matdist)){
matdist <- UseDistMatrix(matdist = matdist, knownpts = knownpts,
unknownpts = unknownpts)
}else{
matdist <- CreateDistMatrix(knownpts = knownpts, unknownpts = unknownpts,
bypassctrl = bypassctrl, longlat = longlat)
}
}else{
if(missing(mask)){
mask <- knownpts
} else {
if(is(mask, "Spatial")){unknownpts <- st_as_sf(mask)}
}
unknownpts <- CreateGrid(w = mask, resolution = resolution,
returnclass = "sf")
matdist <- CreateDistMatrix(knownpts = knownpts, unknownpts = unknownpts,
bypassctrl = bypassctrl, longlat = longlat)
}
return(list(knownpts=knownpts, unknownpts = unknownpts, matdist = matdist))
}
UseDistMatrix <- function(matdist, knownpts, unknownpts){
i <- factor(row.names(knownpts), levels = row.names(knownpts))
j <- factor(row.names(unknownpts), levels = row.names(unknownpts))
matdist <- matdist[levels(i), levels(j)]
return(round(matdist, digits = 8))
}
ComputeInteractDensity <- function(matdist, typefct, beta, span)
{
if(typefct == "pareto") {
alpha <- (2 ^ (1 / beta) - 1) / span
matDens <- (1 + alpha * matdist) ^ (-beta)
} else if(typefct == "exponential") {
alpha <- log(2) / span ^ beta
matDens <- exp(- alpha * matdist ^ beta)
} else {
stop("Please choose a valid interaction function argument (typefct)")
}
matDens <- round(matDens, digits = 8)
return(matDens)
}
ComputeOpportunity <- function(knownpts, matdens, varname = varname)
{
matOpport <- knownpts[[varname]] * matdens
return(round(matOpport, digits = 8))
}
ComputePotentials <- function(unknownpts, matopport)
{
unknownpts$OUTPUT <- apply(matopport, 2, sum, na.rm = TRUE)
return(unknownpts)
}
ComputeReilly <- function(unknownpts, matopport)
{
unknownpts$OUTPUT <- row.names(matopport)[apply(matopport, 2, which.max)]
return(unknownpts)
}
ComputeHuff <- function(unknownpts, matopport)
{
sumCol <- colSums(x = matopport, na.rm = TRUE)
matOpportPct <- 100 * t(t(matopport) / sumCol)
matOpportPct[is.na(matOpportPct) | is.infinite(matOpportPct)] <- 0
unknownpts$OUTPUT <- apply(matOpportPct, 2, max, na.rm = TRUE)
return(unknownpts)
}
ComputeSmooth<- function(unknownpts, matopport, matdens)
{
unknownpts$OUTPUT <- apply(matopport, 2, sum, na.rm = TRUE) /
colSums(matdens, na.rm = TRUE)
return(unknownpts)
}
projError <- function(x,y){
if(identicalCRS(x,y) == FALSE){
stop("Inputs do not use the same coordinate reference system.",
call. = FALSE)
}
}
|
#' Script: De-Novo Clustering
#' Author: Ilias Lagkouvardos
#'
#' Calculate beta-diversity for microbial communities
#' based on permutational mulitvariate analysis of variances (PERMANOVA) using multiple distance matrices
#' computed from phylogenetic distances between observed organisms
#'
#' Input:
#' 1. Set the path to the directory where the file is stored
#' 2. Write the name of the mapping file that includes the samples groups
#' 4. Write the name of the distance table
#' 5. Write the number of clusters
#'
#' Output:
#' The script generates three graphical outputs (pdf) and one text file
#' 1. MDS and NMDS plots showing information about beta-diversity for number of clusters
#' 2. Add additional column to the mapping file for assigned cluster groups
#'
#' Concept:
#' Samples are clustered based on the distance matrix using the Ward's hierarchical clustering method
#' The number of clusters is based on the set parameter (default 3).
#' To determine similarities between samples, a multivariate analysis is applied
#' and sample distribution is illustrated by means of MDS and NMDS (non-metric) plots
##################################################################################
###### Set parameters in this section manually ######
##################################################################################
#' Please set the directory of the script as the working folder (e.g D:/studyname/NGS-Data/Rhea/beta-diversity/)
#' Note: the path is denoted by forward slash "/"
setwd("D:/path/to/Rhea/3.Beta-Diversity/") #<--- CHANGE ACCORDINGLY !!!
#' Please give the file name of the normalized OTU-table without taxonomic classification
input_otu = "OTUs_Table-norm.tab" #<--- CHANGE ACCORDINGLY !!!
#' Please give the name of the distance matrix (generated by beta-diversity)
input_distance = "distance-matrix-gunif.tab" #<--- CHANGE ACCORDINGLY !!!
#' Please give the name of the meta-file that contains individual sample information
input_meta = "mapping_file.tab" #<--- CHANGE ACCORDINGLY !!!
#' Please give the number of clusters
#' Default number of clusters is 3
cluster_number = 3 #<--- CHANGE ACCORDINGLY !!!
###### NO CHANGES ARE NEEDED BELOW THIS LINE ######
##################################################################################
###### Main Script ######
##################################################################################
################### Load all required libraries ########################
# Check if required packages are already installed, and install if missing
packages <-c("cluster","ade4","GUniFrac","phangorn","vegan")
# Function to check whether the package is installed
InsPack <- function(pack)
{
if ((pack %in% installed.packages()) == FALSE) {
install.packages(pack,repos ="http://cloud.r-project.org/")
}
}
# Applying the installation on the list of packages
lapply(packages, InsPack)
# Make the libraries
lib <- lapply(packages, require, character.only = TRUE)
# Check if it was possible to install all required libraries
flag <- all(as.logical(lib))
################### Read all required input files ####################
# Load the mapping file containing individual sample information (sample names in the first column)
meta_file <- read.table (file = input_meta, check.names = FALSE, header = TRUE, dec = ".", sep = "\t", row.names = 1, comment.char = "")
# Clean table from empty lines
meta_file <- meta_file[!apply(is.na(meta_file) | meta_file=="",1,all),,drop=FALSE]
#------------------------------------------------------------------------
# Load the tab-delimited file containing the values to be analyzed (samples names in the first column)
otu_file <- read.table (file = input_otu, check.names = FALSE, header = TRUE, dec = ".", sep = "\t", row.names = 1, comment.char = "")
# Clean table from empty lines
otu_file <- otu_file[!apply(is.na(otu_file) | otu_file =="",1,all),]
# keep only those rows that appear in the mapping file
otu_file <- otu_file[,rownames(meta_file)]
#------------------------------------------------------------------------
# Load the distance file containing individual sample information (sample names in the first column)
unifract_dist <- read.table(input_distance, header=T, row.names=1, dec=".", sep="\t")
# Create the directory where all output files are saved (is named after the number of clusters set above for comparisons)
folder_name <- paste(cluster_number,"De-Novo-Clustering",sep="_")
dir.create(folder_name)
#################### Calculate beta-diversity ###################
# Order the mapping file by sample names (ascending)
meta_file <- meta_file[order(row.names(meta_file)),,drop=FALSE]
# Generate vector with assigned group to each sample
pam <- pam(as.dist(unifract_dist), cluster_number, diss=TRUE)
data_cluster <- as.vector(pam$clustering)
# Store the medois of the clusters
medoids <- data.frame(otu_file[,pam$medoids])
colnames(medoids) <- pam$medoids
# Add column to mapping file with information about cluster group
meta_file <- cbind(data_cluster,meta_file)
# Calculate the NMDS plot (Non-metric Multidimensional Scaling plot)
meta <- metaMDS(unifract_dist,k = 2)
# Calculate the significance of variance to compare multivariate sample means (including two or more dependent variables)
adonis<-adonis2(as.dist(unifract_dist) ~ data_cluster)
permdisp <- permutest(betadisper(as.dist(unifract_dist),as.factor(data_cluster),type="median"))
# Generated MDS and NMDS plot are saved in one pdf file (named after number of set clusters)
file_name <- paste(cluster_number,"de-novo-clustering.pdf",sep="_")
pdf(paste(folder_name,"/",file_name,sep=""))
# Calculate and display MDS plot
# Groups are based on number of set clusters
s.class(cmdscale(unifract_dist, k = 2), col = rainbow(cluster_number), cpoint = 2,
fac=as.factor(data_cluster), grid=T,
sub = paste("MDS plot of Microbial Profiles\nPERMDISP p=",permdisp[["tab"]][["Pr(>F)"]][1],"\n",
"PERMANOVA p=",adonis[1,5],sep="")
)
# Display NMDS plot
# Groups are based on number of set clusters
s.class(meta$points, col = rainbow(cluster_number), cpoint = 2,
fac=as.factor(data_cluster), grid=T,
sub = paste("metaNMDS plot of Microbial Profiles\nPERMDISP p=",permdisp[["tab"]][["Pr(>F)"]][1],"\n",
"PERMANOVA p=",adonis[1,5],sep="")
)
dev.off()
#################################################################################
###### Write Output Files ######
#################################################################################
# Write mapping file with additional cluster variable
write.table(meta_file,paste(folder_name,"/","mapping_file.tab",sep=""),sep = "\t",col.names = NA)
# Write the medoids of the clusters
write.table(medoids,paste(folder_name,"/","medoids.tab",sep=""),sep = "\t",col.names = NA)
# Write the modified mapping file and copy in directory Serial-Group-Comparisons if existing
suppressWarnings (try(write.table(meta_file, "../5.Serial-Group-Comparisons/mapping_file.tab", sep = "\t",col.names = NA, quote = FALSE), silent =TRUE))
if(!flag) { stop("
It was not possible to install all required R libraries properly.
Please check the installation of all required libraries manually.\n
Required libaries:cluster, clusterSim, ade4, GUniFrac, phangorn")
}
#################################################################################
###### End of Script ######
#################################################################################
|
/3.Beta-Diversity/De-novo-Clustering.R
|
permissive
|
Lagkouvardos/Rhea
|
R
| false
| false
| 7,887
|
r
|
#' Script: De-Novo Clustering
#' Author: Ilias Lagkouvardos
#'
#' Calculate beta-diversity for microbial communities
#' based on permutational mulitvariate analysis of variances (PERMANOVA) using multiple distance matrices
#' computed from phylogenetic distances between observed organisms
#'
#' Input:
#' 1. Set the path to the directory where the file is stored
#' 2. Write the name of the mapping file that includes the samples groups
#' 4. Write the name of the distance table
#' 5. Write the number of clusters
#'
#' Output:
#' The script generates three graphical outputs (pdf) and one text file
#' 1. MDS and NMDS plots showing information about beta-diversity for number of clusters
#' 2. Add additional column to the mapping file for assigned cluster groups
#'
#' Concept:
#' Samples are clustered based on the distance matrix using the Ward's hierarchical clustering method
#' The number of clusters is based on the set parameter (default 3).
#' To determine similarities between samples, a multivariate analysis is applied
#' and sample distribution is illustrated by means of MDS and NMDS (non-metric) plots
##################################################################################
###### Set parameters in this section manually ######
##################################################################################
#' Please set the directory of the script as the working folder (e.g D:/studyname/NGS-Data/Rhea/beta-diversity/)
#' Note: the path is denoted by forward slash "/"
setwd("D:/path/to/Rhea/3.Beta-Diversity/") #<--- CHANGE ACCORDINGLY !!!
#' Please give the file name of the normalized OTU-table without taxonomic classification
input_otu = "OTUs_Table-norm.tab" #<--- CHANGE ACCORDINGLY !!!
#' Please give the name of the distance matrix (generated by beta-diversity)
input_distance = "distance-matrix-gunif.tab" #<--- CHANGE ACCORDINGLY !!!
#' Please give the name of the meta-file that contains individual sample information
input_meta = "mapping_file.tab" #<--- CHANGE ACCORDINGLY !!!
#' Please give the number of clusters
#' Default number of clusters is 3
cluster_number = 3 #<--- CHANGE ACCORDINGLY !!!
###### NO CHANGES ARE NEEDED BELOW THIS LINE ######
##################################################################################
###### Main Script ######
##################################################################################
################### Load all required libraries ########################
# Check if required packages are already installed, and install if missing
packages <-c("cluster","ade4","GUniFrac","phangorn","vegan")
# Function to check whether the package is installed
InsPack <- function(pack)
{
if ((pack %in% installed.packages()) == FALSE) {
install.packages(pack,repos ="http://cloud.r-project.org/")
}
}
# Applying the installation on the list of packages
lapply(packages, InsPack)
# Make the libraries
lib <- lapply(packages, require, character.only = TRUE)
# Check if it was possible to install all required libraries
flag <- all(as.logical(lib))
################### Read all required input files ####################
# Load the mapping file containing individual sample information (sample names in the first column)
meta_file <- read.table (file = input_meta, check.names = FALSE, header = TRUE, dec = ".", sep = "\t", row.names = 1, comment.char = "")
# Clean table from empty lines
meta_file <- meta_file[!apply(is.na(meta_file) | meta_file=="",1,all),,drop=FALSE]
#------------------------------------------------------------------------
# Load the tab-delimited file containing the values to be analyzed (samples names in the first column)
otu_file <- read.table (file = input_otu, check.names = FALSE, header = TRUE, dec = ".", sep = "\t", row.names = 1, comment.char = "")
# Clean table from empty lines
otu_file <- otu_file[!apply(is.na(otu_file) | otu_file =="",1,all),]
# keep only those rows that appear in the mapping file
otu_file <- otu_file[,rownames(meta_file)]
#------------------------------------------------------------------------
# Load the distance file containing individual sample information (sample names in the first column)
unifract_dist <- read.table(input_distance, header=T, row.names=1, dec=".", sep="\t")
# Create the directory where all output files are saved (is named after the number of clusters set above for comparisons)
folder_name <- paste(cluster_number,"De-Novo-Clustering",sep="_")
dir.create(folder_name)
#################### Calculate beta-diversity ###################
# Order the mapping file by sample names (ascending)
meta_file <- meta_file[order(row.names(meta_file)),,drop=FALSE]
# Generate vector with assigned group to each sample
pam <- pam(as.dist(unifract_dist), cluster_number, diss=TRUE)
data_cluster <- as.vector(pam$clustering)
# Store the medois of the clusters
medoids <- data.frame(otu_file[,pam$medoids])
colnames(medoids) <- pam$medoids
# Add column to mapping file with information about cluster group
meta_file <- cbind(data_cluster,meta_file)
# Calculate the NMDS plot (Non-metric Multidimensional Scaling plot)
meta <- metaMDS(unifract_dist,k = 2)
# Calculate the significance of variance to compare multivariate sample means (including two or more dependent variables)
adonis<-adonis2(as.dist(unifract_dist) ~ data_cluster)
permdisp <- permutest(betadisper(as.dist(unifract_dist),as.factor(data_cluster),type="median"))
# Generated MDS and NMDS plot are saved in one pdf file (named after number of set clusters)
file_name <- paste(cluster_number,"de-novo-clustering.pdf",sep="_")
pdf(paste(folder_name,"/",file_name,sep=""))
# Calculate and display MDS plot
# Groups are based on number of set clusters
s.class(cmdscale(unifract_dist, k = 2), col = rainbow(cluster_number), cpoint = 2,
fac=as.factor(data_cluster), grid=T,
sub = paste("MDS plot of Microbial Profiles\nPERMDISP p=",permdisp[["tab"]][["Pr(>F)"]][1],"\n",
"PERMANOVA p=",adonis[1,5],sep="")
)
# Display NMDS plot
# Groups are based on number of set clusters
s.class(meta$points, col = rainbow(cluster_number), cpoint = 2,
fac=as.factor(data_cluster), grid=T,
sub = paste("metaNMDS plot of Microbial Profiles\nPERMDISP p=",permdisp[["tab"]][["Pr(>F)"]][1],"\n",
"PERMANOVA p=",adonis[1,5],sep="")
)
dev.off()
#################################################################################
###### Write Output Files ######
#################################################################################
# Write mapping file with additional cluster variable
write.table(meta_file,paste(folder_name,"/","mapping_file.tab",sep=""),sep = "\t",col.names = NA)
# Write the medoids of the clusters
write.table(medoids,paste(folder_name,"/","medoids.tab",sep=""),sep = "\t",col.names = NA)
# Write the modified mapping file and copy in directory Serial-Group-Comparisons if existing
suppressWarnings (try(write.table(meta_file, "../5.Serial-Group-Comparisons/mapping_file.tab", sep = "\t",col.names = NA, quote = FALSE), silent =TRUE))
if(!flag) { stop("
It was not possible to install all required R libraries properly.
Please check the installation of all required libraries manually.\n
Required libaries:cluster, clusterSim, ade4, GUniFrac, phangorn")
}
#################################################################################
###### End of Script ######
#################################################################################
|
# Leitura dos dados
library(ggplot2)
dados <- read.table("/home/alan/Github/INE5649-StatisticalPredictionTechniques/carros.txt", # modificar o caminho
stringsAsFactors = T, # strings são fatores
header=T) # primeira linha do arquivo são os rótulos das variáveis
head(dados) # imprimir início do data frame
g2 = ggplot(data=dados,
aes(x=valor,y=quilometragem))+
geom_point()+
labs(x = 'Valor', y = 'KM') +
theme_minimal()
modelo2 = lm(dados$valor~dados$quilometragem)
coef(modelo2)
g2 + geom_smooth(method='lm', se=F)
summary(modelo2)
# gráfico de dispersão dos resíduos padronizados
ggplot(data = modelo2) + # função principal, utilizando como data o modelo ajustado
geom_point(aes(x=.fitted, # valores preditos
y=.stdresid)) + # resíduos padronizados
geom_hline(yintercept = 0) + # reta em y=0
labs(x = 'Valores preditos', # nomeação dos eixos
y = 'Resíduos padronizados') +
theme_minimal()
# qq plot
ggplot(data = modelo2, # utilizando como data o modelo ajustado
aes(sample = .stdresid)) + # na estética utilizar os resíduos padronizados em sample
stat_qq() + # construir os pontos do gráfico
stat_qq_line() + # construir a linha do gráfico
labs(x = 'Valores esperados pela normal', # nomeação dos eixos
y = 'Resíduos padronizados') +
theme_minimal()
# histograma dos resíduos padronizados
ggplot(data = modelo2) + # utilizando como data o modelo ajustado
geom_histogram(aes(x = .stdresid), # resíduo padronizado
bins = 5, # quantidade de barras do histograma
fill = 'lightgrey', # preenchimento
colour = 'black') + # cor das bordas
labs(x = 'Resíduos padronizados', # nomeação dos eixos
y = 'Frequência')
# teste de Shapiro-Wilk
shapiro.test(rstandard(modelo2)) # a função rstandard() calcula os resíduos padronizados
|
/residuos-valor-km.R
|
no_license
|
alanensina/INE5649-StatisticalPredictionTechniques
|
R
| false
| false
| 1,934
|
r
|
# Leitura dos dados
library(ggplot2)
dados <- read.table("/home/alan/Github/INE5649-StatisticalPredictionTechniques/carros.txt", # modificar o caminho
stringsAsFactors = T, # strings são fatores
header=T) # primeira linha do arquivo são os rótulos das variáveis
head(dados) # imprimir início do data frame
g2 = ggplot(data=dados,
aes(x=valor,y=quilometragem))+
geom_point()+
labs(x = 'Valor', y = 'KM') +
theme_minimal()
modelo2 = lm(dados$valor~dados$quilometragem)
coef(modelo2)
g2 + geom_smooth(method='lm', se=F)
summary(modelo2)
# gráfico de dispersão dos resíduos padronizados
ggplot(data = modelo2) + # função principal, utilizando como data o modelo ajustado
geom_point(aes(x=.fitted, # valores preditos
y=.stdresid)) + # resíduos padronizados
geom_hline(yintercept = 0) + # reta em y=0
labs(x = 'Valores preditos', # nomeação dos eixos
y = 'Resíduos padronizados') +
theme_minimal()
# qq plot
ggplot(data = modelo2, # utilizando como data o modelo ajustado
aes(sample = .stdresid)) + # na estética utilizar os resíduos padronizados em sample
stat_qq() + # construir os pontos do gráfico
stat_qq_line() + # construir a linha do gráfico
labs(x = 'Valores esperados pela normal', # nomeação dos eixos
y = 'Resíduos padronizados') +
theme_minimal()
# histograma dos resíduos padronizados
ggplot(data = modelo2) + # utilizando como data o modelo ajustado
geom_histogram(aes(x = .stdresid), # resíduo padronizado
bins = 5, # quantidade de barras do histograma
fill = 'lightgrey', # preenchimento
colour = 'black') + # cor das bordas
labs(x = 'Resíduos padronizados', # nomeação dos eixos
y = 'Frequência')
# teste de Shapiro-Wilk
shapiro.test(rstandard(modelo2)) # a função rstandard() calcula os resíduos padronizados
|
##*****************************************************************************************************
## For complete background and details, please refer to:
## Shankar J. et al. Looking beyond respiratory cultures: Microbiome-cytokine signatures of bacterial
## pneumonia and tracheobronchitis in lung transplant recipients. Am J Transplant. Wiley Online Library;
## 2015; Available from: http://dx.doi.org/10.1111/ajt.13676
##
##
## Modeling was performed under the following environment:
##
## > sessionInfo()
## R version 3.2.0 (2015-04-16)
## Platform: x86_64-apple-darwin13.4.0 (64-bit)
## Running under: OS X 10.10.3 (Yosemite)
##
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
##
## attached base packages:
## [1] parallel grid stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] BoomSpikeSlab_0.5.2 Boom_0.2 MASS_7.3-40 doMC_1.3.3 iterators_1.0.7
## [6] foreach_1.4.2 data.table_1.9.4 RColorBrewer_1.1-2 plyr_1.8.2 scales_0.2.4
## [11] reshape_0.8.5 ggplot2_1.0.1 rlecuyer_0.3-3
##
## loaded via a namespace (and not attached):
## [1] Rcpp_0.11.6 magrittr_1.5 munsell_0.4.2 colorspace_1.2-6 stringr_1.0.0 tools_3.2.0
## [7] gtable_0.1.2 digest_0.6.8 reshape2_1.4.1 codetools_0.2-11 stringi_0.5-5 chron_2.3-45
## [13] proto_0.3-10
##
## This script provides the code for estimating the BMA models.
## ******************************************************************************************************
##### 1. Load required libraries #####
## clone the GitHub directory and set it as the working directory
## setwd('/path_to_GitHub_clone')
## Create a directory 'output' for saving output from the analysis
dir.create("./output", showWarnings = T, recursive = T)
##### 2. Load algorithms and project data #####
source(file = "./lungbiome_utilities.R")
source(file = "./lungbiome_algorithms.R")
source(file = "./lungbiome_loadprojectdata.R")
##### 3. Run BMA MLR classification model for pneumonia, tracheobronchitis and colonization #####
runmlrbmamodel <- function(responsevar, bmaiterations, regdata) {
yvar <- regdata[, responsevar]
xvar <- regdata[, setdiff(colnames(regdata), responsevar)]
## eliminate variables that are zero throughout or have a constant value in all columns
zeroes <- which(colSums(xvar) == 0)
if (length(zeroes) > 0) {
xvar <- xvar[, -zeroes]
}
## setting ems to the lowest that works. 8 for 3 levels. (addtomodelsize= 5 + 3 levels of outcome)
bmaresults <- runmlrbma(x = xvar, y = yvar, addtomodelsize = 5, iter = bmaiterations,
seed = 101)
## Save the model results
saveRDS(object = bmaresults, file = "./output/lungbiome_MLR_diagnoses.RDS")
}
runmlrbmamodel(responsevar = "diagnosis_simple_code",
bmaiterations = 80000,
regdata = regmatrix_diagnoses)
##### 4. Run BMA linear regression model for checking association of microbiome diversity with therapy #####
runlinearbma <- function(regmatrix, bmaiterations) {
## Eliminate variables that are zero throughout or have a constant value in all columns.
zeroes <- which(sapply(colnames(regmatrix), function(cname) {
out <- max(regmatrix[, cname]) - min(regmatrix[, cname]) == 0
return(out)
}))
if (length(zeroes) > 0) {
regmatrix <- regmatrix[, -zeroes]
}
yvector <- regmatrix[, "invsimpson"]
xvars <- setdiff(colnames(regmatrix), "invsimpson")
xmatrix <- regmatrix[, xvars]
bmaresults <- runbmac(x = xmatrix, y = yvector, defaultmode = "gaussian",
modelsizearray = c(1, 3, 5), iter = bmaiterations, seed = 101)
## Save the model results
saveRDS(object = bmaresults, file = "./output/lungbiome_linearreg_therapy.RDS")
}
runlinearbma(regmatrix = regmatrix_therapy, bmaiterations = 80000)
## At the end of this script, you should have the following files:
## ./output/lungbiome_MLR_diagnoses.RDS
## ./output/lungbiome_linearreg_therapy.RDS
##### Next scripts: Extract model findings and evaluate model #####
## Extract model findings: lungbiome_modelextraction.R
## Evaluate model performance: lungbiome_evaluation.R
|
/lungbiome_bmamodeling.R
|
permissive
|
openpencil/lungbiome
|
R
| false
| false
| 4,306
|
r
|
##*****************************************************************************************************
## For complete background and details, please refer to:
## Shankar J. et al. Looking beyond respiratory cultures: Microbiome-cytokine signatures of bacterial
## pneumonia and tracheobronchitis in lung transplant recipients. Am J Transplant. Wiley Online Library;
## 2015; Available from: http://dx.doi.org/10.1111/ajt.13676
##
##
## Modeling was performed under the following environment:
##
## > sessionInfo()
## R version 3.2.0 (2015-04-16)
## Platform: x86_64-apple-darwin13.4.0 (64-bit)
## Running under: OS X 10.10.3 (Yosemite)
##
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
##
## attached base packages:
## [1] parallel grid stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] BoomSpikeSlab_0.5.2 Boom_0.2 MASS_7.3-40 doMC_1.3.3 iterators_1.0.7
## [6] foreach_1.4.2 data.table_1.9.4 RColorBrewer_1.1-2 plyr_1.8.2 scales_0.2.4
## [11] reshape_0.8.5 ggplot2_1.0.1 rlecuyer_0.3-3
##
## loaded via a namespace (and not attached):
## [1] Rcpp_0.11.6 magrittr_1.5 munsell_0.4.2 colorspace_1.2-6 stringr_1.0.0 tools_3.2.0
## [7] gtable_0.1.2 digest_0.6.8 reshape2_1.4.1 codetools_0.2-11 stringi_0.5-5 chron_2.3-45
## [13] proto_0.3-10
##
## This script provides the code for estimating the BMA models.
## ******************************************************************************************************
##### 1. Load required libraries #####
## clone the GitHub directory and set it as the working directory
## setwd('/path_to_GitHub_clone')
## Create a directory 'output' for saving output from the analysis
dir.create("./output", showWarnings = T, recursive = T)
##### 2. Load algorithms and project data #####
source(file = "./lungbiome_utilities.R")
source(file = "./lungbiome_algorithms.R")
source(file = "./lungbiome_loadprojectdata.R")
##### 3. Run BMA MLR classification model for pneumonia, tracheobronchitis and colonization #####
runmlrbmamodel <- function(responsevar, bmaiterations, regdata) {
yvar <- regdata[, responsevar]
xvar <- regdata[, setdiff(colnames(regdata), responsevar)]
## eliminate variables that are zero throughout or have a constant value in all columns
zeroes <- which(colSums(xvar) == 0)
if (length(zeroes) > 0) {
xvar <- xvar[, -zeroes]
}
## setting ems to the lowest that works. 8 for 3 levels. (addtomodelsize= 5 + 3 levels of outcome)
bmaresults <- runmlrbma(x = xvar, y = yvar, addtomodelsize = 5, iter = bmaiterations,
seed = 101)
## Save the model results
saveRDS(object = bmaresults, file = "./output/lungbiome_MLR_diagnoses.RDS")
}
runmlrbmamodel(responsevar = "diagnosis_simple_code",
bmaiterations = 80000,
regdata = regmatrix_diagnoses)
##### 4. Run BMA linear regression model for checking association of microbiome diversity with therapy #####
runlinearbma <- function(regmatrix, bmaiterations) {
## Eliminate variables that are zero throughout or have a constant value in all columns.
zeroes <- which(sapply(colnames(regmatrix), function(cname) {
out <- max(regmatrix[, cname]) - min(regmatrix[, cname]) == 0
return(out)
}))
if (length(zeroes) > 0) {
regmatrix <- regmatrix[, -zeroes]
}
yvector <- regmatrix[, "invsimpson"]
xvars <- setdiff(colnames(regmatrix), "invsimpson")
xmatrix <- regmatrix[, xvars]
bmaresults <- runbmac(x = xmatrix, y = yvector, defaultmode = "gaussian",
modelsizearray = c(1, 3, 5), iter = bmaiterations, seed = 101)
## Save the model results
saveRDS(object = bmaresults, file = "./output/lungbiome_linearreg_therapy.RDS")
}
runlinearbma(regmatrix = regmatrix_therapy, bmaiterations = 80000)
## At the end of this script, you should have the following files:
## ./output/lungbiome_MLR_diagnoses.RDS
## ./output/lungbiome_linearreg_therapy.RDS
##### Next scripts: Extract model findings and evaluate model #####
## Extract model findings: lungbiome_modelextraction.R
## Evaluate model performance: lungbiome_evaluation.R
|
library(ggplot2)
library(plyr)
library(dplyr)
library(stringr)
WAR
#Create line history CSV
#metric for aging curve
#metric for season performance versus regression
#bayesian look at things (stochastic interval)
#close look at context
TotalWAR <- read.csv("Total WAR.csv")
SeasonalWAR <- read.csv("Seasonal WARs.csv")
PlayerStats <- read.csv("Player Stats.csv")
LineHistory <- read.csv("Line History.csv")
summary(TotalWAR)
summary(SeasonalWAR)
#Running a projection for a sample team
Season1415 <- filter(SeasonalWAR, season == 20142015)
Player1415 <- filter(PlayerStats, season == 20142015)
EDMPlayer1415 <- filter(Player1415, Team == "EDM") %>% select(Name, pos, Team, season, TOI, TOI.)
gsub("\\.", "", EDMPlayer1415$Name, fixed=TRUE)
SampleTeam <- filter(Season1415, team == "EDM")
TeamProject <- merge(SampleTeam, EDMPlayer1415, by="Name")
|
/WAR.R
|
no_license
|
dbrait/NHL
|
R
| false
| false
| 863
|
r
|
library(ggplot2)
library(plyr)
library(dplyr)
library(stringr)
WAR
#Create line history CSV
#metric for aging curve
#metric for season performance versus regression
#bayesian look at things (stochastic interval)
#close look at context
TotalWAR <- read.csv("Total WAR.csv")
SeasonalWAR <- read.csv("Seasonal WARs.csv")
PlayerStats <- read.csv("Player Stats.csv")
LineHistory <- read.csv("Line History.csv")
summary(TotalWAR)
summary(SeasonalWAR)
#Running a projection for a sample team
Season1415 <- filter(SeasonalWAR, season == 20142015)
Player1415 <- filter(PlayerStats, season == 20142015)
EDMPlayer1415 <- filter(Player1415, Team == "EDM") %>% select(Name, pos, Team, season, TOI, TOI.)
gsub("\\.", "", EDMPlayer1415$Name, fixed=TRUE)
SampleTeam <- filter(Season1415, team == "EDM")
TeamProject <- merge(SampleTeam, EDMPlayer1415, by="Name")
|
#===============================================================================================
# Getting and Cleaning Data Class Assignment
#===============================================================================================
# The purpose of this project is to demonstrate your ability to collect, work with, and clean a data set.
# The goal is to prepare tidy data that can be used for later analysis. You will be graded by your peers
# on a series of yes/no questions related to the project. You will be required to submit:
# 1) a tidy data set as described below,
# 2) a link to a Github repository with your script for performing the analysis, and
# 3) a code book that describes the variables, the data, and any transformations or work that you performed
# to clean up the data called CodeBook.md. You should also include a README.md in the repo with your scripts.
# This repo explains how all of the scripts work and how they are connected.
# One of the most exciting areas in all of data science right now is wearable computing - see for example
# this article . Companies like Fitbit, Nike, and Jawbone Up are racing to develop the most advanced
# algorithms to attract new users. The data linked to from the course website represent data collected
# from the accelerometers from the Samsung Galaxy S smartphone. A full description is available at the
# site where the data was obtained:
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
# Here are the data for the project:
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
# You should create one R script called run_analysis.R that does the following.
#===============================================================================================
# Extracts only the measurements on the mean and standard deviation for each measurement.
# Uses descriptive activity names to name the activities in the data set
# Appropriately labels the data set with descriptive activity names.
## Read in the Ydata
ytest<-read.table("/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/test/y_test.txt",header=F,col.names=c("ActivityID"))
ytr<-read.table("/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/train/y_train.txt",header=F,col.names=c("ActivityID"))
## Read in the Subject ID Data
SubTest<-read.table("/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/test/subject_test.txt",header=F,col.names=c("SubjectID"))
SubTr<-read.table("/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/train/subject_train.txt",header=F,col.names=c("SubjectID"))
## Read in the File Features with Column Names
det<-read.table("/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/features.txt", header=F, as.is=T, col.names=c("MeasureID", "MeasureName"))
## Read the X Data assigning column names from the features file
Xtest<-read.table("/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/test/X_test.txt",header=F, col.names=det$MeasureName)
Xtr<-read.table("/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/train/X_train.txt",header=F, col.names=det$MeasureName)
## Subset the column names
sub_det<- grep(".*mean\\(\\)|.*std\\(\\)", det$MeasureName)
## Subset the X data on the subset features
Xtest<-Xtest[,sub_det]
Xtr<-Xtr[,sub_det]
## Append the activity and Subject IDs
Xtest$ActivityID<-ytest$ActivityID
Xtest$SubjectID<-SubTest$SubjectID
Xtr$ActivityID<-ytr$ActivityID
Xtr$SubjectID<-SubTr$SubjectID
## Merge the update X files
data<-rbind(Xtest, Xtr)
cnames<-colnames(data)
cnames<-gsub("\\.+mean\\.+", cnames, replacement="Mean")
cnames<-gsub("\\.+std\\.+", cnames, replacement="Std")
colnames(data)<-cnames
## Add an activiy names column
act<-read.table("/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/activity_labels.txt", header=F, as.is=T, col.names=c("ActivityID", "ActivityName"))
act$ActivityName<-as.factor(act$ActivityName)
lab_data<-merge(data,act)
#===============================================================================================
# Creates a second, independent tidy data set with the average of each variable for each
# activity and each subject.
library(reshape2)
## melt the dataset
id_vars=c("ActivityID", "ActivityName", "SubjectID")
measure_vars=setdiff(colnames(lab_data), id_vars)
melt_dat<-melt(lab_data, id=id_vars, measure.vars=measure_vars)
## recast
recas<-dcast(melted_data, ActivityName + SubjectID ~ variable, mean)
## Create the tidy data set and save it on to the named file
write.table(recas,"/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/tidy_data.txt")
#===============================================================================================
|
/run_analysis.R
|
no_license
|
tysond7414/run_analysis.R
|
R
| false
| false
| 4,869
|
r
|
#===============================================================================================
# Getting and Cleaning Data Class Assignment
#===============================================================================================
# The purpose of this project is to demonstrate your ability to collect, work with, and clean a data set.
# The goal is to prepare tidy data that can be used for later analysis. You will be graded by your peers
# on a series of yes/no questions related to the project. You will be required to submit:
# 1) a tidy data set as described below,
# 2) a link to a Github repository with your script for performing the analysis, and
# 3) a code book that describes the variables, the data, and any transformations or work that you performed
# to clean up the data called CodeBook.md. You should also include a README.md in the repo with your scripts.
# This repo explains how all of the scripts work and how they are connected.
# One of the most exciting areas in all of data science right now is wearable computing - see for example
# this article . Companies like Fitbit, Nike, and Jawbone Up are racing to develop the most advanced
# algorithms to attract new users. The data linked to from the course website represent data collected
# from the accelerometers from the Samsung Galaxy S smartphone. A full description is available at the
# site where the data was obtained:
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
# Here are the data for the project:
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
# You should create one R script called run_analysis.R that does the following.
#===============================================================================================
# Extracts only the measurements on the mean and standard deviation for each measurement.
# Uses descriptive activity names to name the activities in the data set
# Appropriately labels the data set with descriptive activity names.
## Read in the Ydata
ytest<-read.table("/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/test/y_test.txt",header=F,col.names=c("ActivityID"))
ytr<-read.table("/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/train/y_train.txt",header=F,col.names=c("ActivityID"))
## Read in the Subject ID Data
SubTest<-read.table("/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/test/subject_test.txt",header=F,col.names=c("SubjectID"))
SubTr<-read.table("/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/train/subject_train.txt",header=F,col.names=c("SubjectID"))
## Read in the File Features with Column Names
det<-read.table("/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/features.txt", header=F, as.is=T, col.names=c("MeasureID", "MeasureName"))
## Read the X Data assigning column names from the features file
Xtest<-read.table("/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/test/X_test.txt",header=F, col.names=det$MeasureName)
Xtr<-read.table("/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/train/X_train.txt",header=F, col.names=det$MeasureName)
## Subset the column names
sub_det<- grep(".*mean\\(\\)|.*std\\(\\)", det$MeasureName)
## Subset the X data on the subset features
Xtest<-Xtest[,sub_det]
Xtr<-Xtr[,sub_det]
## Append the activity and Subject IDs
Xtest$ActivityID<-ytest$ActivityID
Xtest$SubjectID<-SubTest$SubjectID
Xtr$ActivityID<-ytr$ActivityID
Xtr$SubjectID<-SubTr$SubjectID
## Merge the update X files
data<-rbind(Xtest, Xtr)
cnames<-colnames(data)
cnames<-gsub("\\.+mean\\.+", cnames, replacement="Mean")
cnames<-gsub("\\.+std\\.+", cnames, replacement="Std")
colnames(data)<-cnames
## Add an activiy names column
act<-read.table("/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/activity_labels.txt", header=F, as.is=T, col.names=c("ActivityID", "ActivityName"))
act$ActivityName<-as.factor(act$ActivityName)
lab_data<-merge(data,act)
#===============================================================================================
# Creates a second, independent tidy data set with the average of each variable for each
# activity and each subject.
library(reshape2)
## melt the dataset
id_vars=c("ActivityID", "ActivityName", "SubjectID")
measure_vars=setdiff(colnames(lab_data), id_vars)
melt_dat<-melt(lab_data, id=id_vars, measure.vars=measure_vars)
## recast
recas<-dcast(melted_data, ActivityName + SubjectID ~ variable, mean)
## Create the tidy data set and save it on to the named file
write.table(recas,"/Users/Tyson/Desktop/DS3_Getting_and_Cleaning Data/UCI HAR Dataset/tidy_data.txt")
#===============================================================================================
|
#returns the reached p-value for the interval along with the positions (start, stop, p.value)
#if the third value is larger than the confidence level, it is not a valid region.
#616 not a valid peak
#617 the examined window was too small compared to the internal windowsize (minWindow)
#919 too few markers after removal of 0-sum-markers, filtering. Too few means that there are less than ten or that next too all are constant
#chromosome,positions, background_count,forground_count and error_count are vectors of the same length
#require(EMT)
ShoreMap.confint <- function(chromosome,positions, background_count, foreground_count, error_count, foreground_frequency=1, level=0.99, recurse=FALSE, forceInclude=TRUE, allowAdjustment=0.0, filterOutliers=200000, filterPValue=0.05, winSize=50000, winStep=10000, minMarker=10, minCoverage=0, maxCoverage=Inf, peakFinding=3, peakWinSize=50000, peakWinStep=10000) {
verbose=FALSE
if(verbose){
print(paste("foreground_frequency=", foreground_frequency), sep="")
print(paste("level=", level), sep="")
print(paste("recurse=", recurse), sep="")
print(paste("forceInclude=", forceInclude), sep="")
print(paste("allowAdjustment=", allowAdjustment), sep="")
print(paste("filterOutliers=", filterOutliers), sep="")
print(paste("filterPValue=", filterPValue), sep="")
print(paste("winSize=", winSize), sep="")
print(paste("winStep=", winStep), sep="")
print(paste("minMarker=", minMarker), sep="")
print(paste("minCoverage=", minCoverage), sep="")
print(paste("maxCoverage=", maxCoverage), sep="")
print(paste("peakFinding=", peakFinding), sep="") #3 is boost, 4 is R
print(paste("peakWinSize=", peakWinSize), sep="")
print(paste("peakWinStep=", peakWinStep), sep="")
}
minMarker<-max(1,minMarker)
foreground_frequency<-as.numeric(foreground_frequency)
internalData<- cbind(chromosome,positions,foreground_count,background_count,error_count)
internalData<- internalData[rowSums(internalData[,3:5])>minCoverage&rowSums(internalData[,3:5])<maxCoverage,]
print(paste("Analysing chr ",chromosome[1],", with ",length(chromosome)," (",length(internalData[,1]),") markers for equality to ",foreground_frequency,"(",typeof(foreground_frequency),")",sep=""))
assign("storage_shoremapmle",matrix(c(-1,-1,-1),nrow=1),".GlobalEnv")
#apply filtering here:
filtered=c();
if(filterOutliers>0){ #condition
#filterOutliers is the windowsize to use
f<-filterSampling(internalData,as.numeric(filterOutliers),as.numeric(filterPValue),FALSE)
print(paste("Removed: ",sum(!f)," markers as outliers"))
filtered<-internalData[!f,2]
internalData<- internalData[f,]
}
assign("dataset_shoremapmle",internalData,".GlobalEnv")
freqs<-internalData[,3]/rowSums(internalData[,3:5])
assign("i_shoremapmle",0,".GlobalEnv")
minWindow<-max(minMarker,2)
bestsize<-max(minMarker,2)
bestsize<-max(bestsize,minWindow)
# print(paste("Bestsize:",bestsize))
if(bestsize<length(dataset_shoremapmle[,1])){
avg_pos<-c(sapply(seq(0,winSize-1,winStep),function(shift){
windows<- floor((internalData[,2]+shift)/winSize)
windowsToUse<- windows %in% unique(windows)[table(windows)>minMarker]
tapply(internalData[windowsToUse,2],windows[windowsToUse],mean)
}),recursive=TRUE)
avg_freq<-c(sapply(seq(0,winSize-1,winStep),function(shift){
windows<- floor((internalData[,2]+shift)/winSize)
windowsToUse<- windows %in% unique(windows)[table(windows)>minMarker]
tapply(internalData[windowsToUse,3],windows[windowsToUse],sum)/tapply(rowSums(internalData[windowsToUse,3:5]),windows[windowsToUse],sum)
}),recursive=TRUE)
avg_R<-c(sapply(seq(0,winSize-1,winStep),function(shift){
windows<- floor((internalData[,2]+shift)/winSize)
windowsToUse<- windows %in% unique(windows)[table(windows)>minMarker]
allele<-tapply(internalData[windowsToUse,3],windows[windowsToUse],sum)
ref<-tapply(internalData[windowsToUse,4],windows[windowsToUse],sum)
ret<-pmax(allele/ref,ref/allele)
ret[ret==1]<-0
rMax=max(ret[!is.infinite(ret)])
ret[is.infinite(ret)]<-rMax
ret
}),recursive=TRUE)
avg_boost<-abs(1/(1-max(foreground_frequency,1-foreground_frequency)/pmax(avg_freq,1-avg_freq)))
boostMax<-max(avg_boost[!is.infinite(avg_boost)])
avg_boost[is.infinite(avg_boost)]<-boostMax
avg_posFreq<-cbind(avg_pos,avg_freq,avg_boost,avg_R)
avg_posFreq<-t(sapply(sort(avg_posFreq[,1],index.return=T)$ix,function(x) avg_posFreq[x,]))
ci<-matrix(c(0,0,920,1,0),nrow=5)
if(level[1]<=1){
peak_pos<-c(sapply(seq(0,peakWinSize-1,peakWinStep),function(shift){
windows<- floor((internalData[,2]+shift)/peakWinSize)
windowsToUse<- windows %in% unique(windows)[table(windows)>minMarker]
tapply(internalData[windowsToUse,2],windows[windowsToUse],mean)
}),recursive=TRUE)
peak_freq<-c(sapply(seq(0,peakWinSize-1,peakWinStep),function(shift){
windows<- floor((internalData[,2]+shift)/peakWinSize)
windowsToUse<- windows %in% unique(windows)[table(windows)>minMarker]
tapply(internalData[windowsToUse,3],windows[windowsToUse],sum)/tapply(rowSums(internalData[windowsToUse,3:5]),windows[windowsToUse],sum)
}),recursive=TRUE)
peak_R<-c(sapply(seq(0,peakWinSize-1,peakWinStep),function(shift){
windows<- floor((internalData[,2]+shift)/peakWinSize)
windowsToUse<- windows %in% unique(windows)[table(windows)>minMarker]
allele<-tapply(internalData[windowsToUse,3],windows[windowsToUse],sum)
ref<-tapply(internalData[windowsToUse,4],windows[windowsToUse],sum)
ret<-pmax(allele/ref,ref/allele)
ret[ret==1]<-0
rMax=max(ret[!is.infinite(ret)])
ret[is.infinite(ret)]<-rMax
ret
}),recursive=TRUE)
peak_boost<-abs(1/(1-max(foreground_frequency,1-foreground_frequency)/pmax(peak_freq,1-peak_freq)))
boostMax<-max(peak_boost[!is.infinite(peak_boost)])
peak_boost[is.infinite(peak_boost)]<-boostMax
peak_posFreq<-cbind(peak_pos,peak_freq,peak_boost,peak_R)
peak_posFreq<-t(sapply(sort(peak_posFreq[,1],index.return=T)$ix,function(x) peak_posFreq[x,]))
peak_minIndex<-which(peak_posFreq[,peakFinding]==max(peak_posFreq[,peakFinding]))
print(paste("Finding initial peak(s).. choosen method in a window of size ",peakWinSize," bp with step size of ", peakWinStep, " bp",sep=""))
for(index in peak_minIndex){
print(paste(" At (avg(pos) in window): ",round(peak_posFreq[index,1])," bp",sep=""))
}
#order confidence levels
level<-sort(level)
res<- identify_peaks(1,length(internalData[,2]),foreground_frequency,level,minWindow,peak_posFreq[,c(1,peakFinding)],bestsize,recurse,forceInclude, allowAdjustment)
res<-matrix(res[res[,3]<0,],ncol=4)
if(!is.null(dim(res))&&dim(res)[1]>0){
ci<-matrix(apply(res,1,function(x) t(c(start=ifelse(x[3]<0,internalData[max(x[1]-1,1),2]+1,0), stop=ifelse(x[3]<0,internalData[min(x[1]+x[2],length(internalData[,2])),2]-1,0),p.value=ifelse(x[3]<0,x[4]+x[3]+x[2],x[3]),level=x[4],nrOfMarkers= x[2]))),nrow=5)
print("Found interval:")
for(i in 1:length(ci[1,])){
print(paste(ci[1,i],"-",ci[2,i],"level:",ci[4,i]))
}
}
}
list(confidenceInterval=ci,excluded=filtered,averaged=avg_posFreq)
}else{
#too few markers
print("Too few markers")
list(confidenceInterval=matrix(c(0,0,919,1,0),nrow=5),excluded=filtered,averaged=matrix(c(-1,-1,-1,-1),ncol=4))
}
}
#version5
filterSampling<-function(internalData,fs_windowsize=200000,fs_limit=0.05,fs_exact=FALSE){
size<-length(internalData[,2])
chrStart<-min(internalData[,2])
chrEnd<-max(internalData[,2])
indices<-1:size
largeWindow<-fs_windowsize*4 #use the double size to make sure the window is enclosed
windows1<-floor((internalData[,2])/largeWindow)
uniqueWin1<-unique(windows1)
windows2<-floor((internalData[,2]+largeWindow/2)/largeWindow)
uniqueWin2<-unique(windows2)
freqs<-internalData[,3]/rowSums(internalData[,3:5])
diffDataRaw<-abs(diff(freqs))
diffDataRaw<-c(diffDataRaw[1],diffDataRaw)+c(diffDataRaw,diffDataRaw[size-1])
diffDataMod<-diffDataRaw
allPos<-internalData[,2]
#corresponds to adjusting the p-values below with respect to size nr of tests
limit<-fs_limit/size
#use two frames and choose the frame closest to the current marker position
data1<-tapply(indices,windows1,function(x) data.frame(internalData[x,2] ,internalData[x,3] ,internalData[x,4] ,internalData[x,5] ,rep(FALSE,length(x))))
data2<-tapply(indices,windows2,function(x) data.frame(internalData[x,2] ,internalData[x,3] ,internalData[x,4] ,internalData[x,5] ,rep(FALSE,length(x))))
filtered<-c()
for(i in indices){
#get marker to test this iteration
curIndex<-which.max(diffDataMod)
curPos<-allPos[curIndex]
#start and end of window
start<-max(chrStart,curPos-fs_windowsize/2) #0
end<- start + fs_windowsize #0
if(end>chrEnd){ #0
start<-max(chrStart,end-fs_windowsize)
}
#decide which frame and which windows to use
curWin1<-curPos/largeWindow
curWin2<-curWin1+0.5
use1<-abs((curWin1%%1)-0.5)<abs((curWin2%%1)-0.5)
curWin1<-which(uniqueWin1==floor(curWin1))
curWin2<-which(uniqueWin2==floor(curWin2))
red<-c()
curSize<-0
if(use1){
#include markers within window
toUse<-data1[[curWin1]][,1]>=start &data1[[curWin1]][,1]<=end
#add two closest markers
toUse<- toUse | 1:length(toUse) %in% sort(abs(data2[[curWin2]][,1]-curPos),index.return=TRUE)$ix[1:min(3,length(toUse))]
curSize<-sum(toUse)
red<-data1[[curWin1]][toUse,]
}else{
#include markers within window
toUse<-data2[[curWin2]][,1]>=start &data2[[curWin2]][,1]<=end
#add two closest markers
toUse<- toUse | 1:length(toUse) %in% sort(abs(data2[[curWin2]][,1]-curPos),index.return=TRUE)$ix[1:min(3,length(toUse))]
curSize<-sum(toUse)
red<-data2[[curWin2]][toUse,]
}
p<-if(curSize>3){
x<-c(red[red[,1]==curPos,2:4],recursive=TRUE)
red2<- red[red[,1]!=curPos & !red[,5],]
if(nrow(red2)>0){
p.win<-colSums(red2[,2:4])
p.win<-p.win+1/500
p.win<-p.win/sum(p.win) #0
fs_p1<-p.win[3] #0
fs_p2<-p.win[1]/sum(p.win[1:2]) #0
pbinom(x[3]+ifelse(x[3]<sum(x)*fs_p1,1,-1),size=sum(x),prob=fs_p1,lower.tail=x[3]<sum(x)*fs_p1)*pbinom(x[1]+ifelse(x[1]<sum(x[1:2])*fs_p2,1,-1),size=sum(x[1:2]),prob=fs_p2,lower.tail=x[1]<sum(x[1:2])*fs_p2) #0.001
}else{
1
}
}else{
1
}
#judgement
if(is.na(p)) { p=0 }
if(p<=limit){ #0.011
#mark outlier
if (length(data1[[curWin1]][data1[[curWin1]][,1]==curPos,5]) != 0) {
data1[[curWin1]][data1[[curWin1]][,1]==curPos,5]<-TRUE #0.001
}
if (length(data2[[curWin2]][data2[[curWin2]][,1]==curPos,5]) != 0) {
data2[[curWin2]][data2[[curWin2]][,1]==curPos,5]<-TRUE #0.001
}
diffDataMod[curIndex]<--2 #0.001
#recalculate diff values for neighboring markers
before<-curIndex-1 #0
while(sum(before==filtered)>0){ #~0 with 57 markers in filtered
before<-before-1
}
after<-curIndex+1 #0
while(sum(after==filtered)>0){ #0 with no markers in filtered
after<-after+1
}
if(before<1){ #0
#first marker
diffDataRaw[after]<-2*(diffDataRaw[after]-abs(freqs[after]-freqs[curIndex]))
if(diffDataMod[after]>=0){
diffDataMod[after]<-diffDataRaw[after]
}
}else if(after>size){ #0
#last marker
diffDataRaw[before]<-2*(diffDataRaw[before]-abs(freqs[before]-freqs[curIndex]))
if(diffDataMod[before]>=0){
diffDataMod[before]<-diffDataRaw[before]
}
}else{
flank<-abs(freqs[before]-freqs[after]) #0
diffDataRaw[before]<-diffDataRaw[before]-abs(freqs[before]-freqs[curIndex])+flank #0
diffDataRaw[after]<-diffDataRaw[after]-abs(freqs[after]-freqs[curIndex])+flank
if(diffDataMod[after]>=0){
diffDataMod[after]<-diffDataRaw[after]
}
if(diffDataMod[before]>=0){
diffDataMod[before]<-diffDataRaw[before]
}
}
#add the position to the filtered positions
filtered<-c(filtered,curIndex) #0
}else{
diffDataMod[curIndex]<--1
}
}
diffDataMod!=-2
}
identify_peaks <- function(indexL,indexH,frequency,level,minWindow,avg_posFreq,bestsize,recurse,forceInclude=TRUE,allowAdjustment=0.05){
require(bbmle)
assign("storage_shoremapmle",matrix(c(-1,-1,-1),nrow=1),".GlobalEnv")
if(indexH-indexL>min(minWindow,bestsize)){ #too small window 617
cur_indices<-indexL:indexH
avg_toUse<-avg_posFreq[,1]>=min(dataset_shoremapmle[cur_indices,2]) & avg_posFreq[,1]<=max(dataset_shoremapmle[cur_indices,2])
if(sum(avg_toUse)>1){ #too few windowed markers 616
avg_pf<-avg_posFreq[avg_toUse,]
#try to find peaks
starts<-avg_pf[which(avg_pf[,2]==max(avg_pf[,2])),1]
while(length(starts)>0){
start<- starts[ceiling(length(starts)/2)]
beststarts<-which(min(abs(dataset_shoremapmle[,2]-start))==abs(dataset_shoremapmle[,2]-start))
beststart<-beststarts[ceiling(length(beststarts)/2)]
beststart<-min(length(dataset_shoremapmle[,1])-bestsize,beststart)
#see if the frequency needs adjustment
newFreq<-multll(beststart-floor(bestsize/2),bestsize)$coef[1]
if(abs(frequency-newFreq)<allowAdjustment){
print(paste("The goal frequency was adjusted from ",frequency," to ",newFreq,", which is the estimated frequency in the preliminary interval",sep=""))
frequency<-newFreq
}
res<-extend(beststart,bestsize,level[1],frequency,indexL,indexH,minWindow, forceInclude)
if(length(level)>1){
for(i in 2:length(level)){
res2<-c(1,1,618,level[i])
assign("storage_shoremapmle",matrix(c(-1,-1,-1),nrow=1),".GlobalEnv")
if(res[i-1,3]>0){
res2<-extend(beststart,bestsize,level[i],frequency,indexL,indexH,minWindow, forceInclude)
}else{
res2<-extend(res[i-1,1],res[i-1,2],level[i],frequency,indexL,indexH,minWindow, forceInclude)
}
res<-rbind(res,res2)
}
}
if(min(res[,3])>0){
if(length(starts)==1){
return(matrix(c(1,1,616,1),ncol=4))
break
}else{
starts<-starts[1:length(starts)!=ceiling(length(starts)/2)]
}
}else if(recurse){
if(min(res[,3])<0){
resL<- identify_peaks(indexL, min(res[res[,3]<0,1]), frequency, level[1], minWindow, avg_posFreq, bestsize, recurse, forceInclude,0.0)
if(length(level)>1){
for(i in 2:length(level)){
res2<-c(1,1,618,level[i])
assign("storage_shoremapmle",matrix(c(-1,-1,-1),nrow=1),".GlobalEnv")
if(res[i-1,3]>0){
res2<-extend(beststart,bestsize,level[i],frequency,indexL,min(res[res[,3]<0,1]),minWindow, forceInclude)
}else{
res2<-extend(resL[i-1,1],resL[i-1,2],level[i],frequency,indexL,min(res[res[,3]<0,1]),minWindow, forceInclude)
}
resL<-rbind(resL,res2)
}
}
resH<- identify_peaks(max(res[res[,3]<0,1]+res[res[,3]<0,2]), indexH, frequency, level[1], minWindow, avg_posFreq, bestsize, recurse, forceInclude,0.0)
if(length(level)>1){
for(i in 2:length(level)){
res2<-c(1,1,618,level[i])
assign("storage_shoremapmle",matrix(c(-1,-1,-1),nrow=1),".GlobalEnv")
if(res[i-1,3]>0){
res2<-extend(beststart,bestsize,level[i],frequency,max(res[res[,3]<0,1]+res[res[,3]<0,2]),indexH,minWindow, forceInclude)
}else{
res2<-extend(resH[i-1,1],resH[i-1,2],level[i],frequency,max(res[res[,3]<0,1]+res[res[,3]<0,2]),indexH,minWindow, forceInclude)
}
resH<-rbind(resH,res2)
}
}
if(min(resL[,3])<0){
if(min(resH[,3])<0){
#both good
return(rbind(resL,res,resH))
break
}else{
#low good
return(rbind(resL,res))
break
}
}else if(min(resH[,3])<0){
#high good
return(rbind(res,resH))
break
}else{
return(res)
break
}
}else{
return(res)
break
}
}else{
return(res)
break
}
}#end while loop
}else{
#Too few windowed markers
return(t(as.matrix(c(1,1,616,1))))
}
}else{
#Too small window
return(t(as.matrix(c(1,1,617,1))))
}
}
loglikelihood_mult <- function(llm_P1=0.5,llm_err=0.01,llm_index=0,llm_size=0){
#the loglikelihood function. Returns 110000 for unvalid p-values
if(is.na(llm_P1) || is.na(llm_err) || llm_size<0){
220000
} else if(llm_P1<0 || llm_P1>1 || llm_err<0 || llm_err>1) {
110000
}else{
llm_P1<-as.numeric(llm_P1)
llm_err<-as.numeric(llm_err)
llm_p1<- llm_P1*(1-4*llm_err/3)+llm_err/3
llm_pe<- 2*llm_err/3
llm_p2<- 1-llm_p1-llm_pe
llm_p.all <- c(llm_p1,llm_p2,llm_pe)
-sum(apply(dataset_shoremapmle[llm_index:(llm_index+llm_size-1),],1,function(llm_x){dmultinom(x=c(llm_x[3],llm_x[4],llm_x[5]),prob=llm_p.all,log=TRUE)}))
}
}
samplefreqs <- function(sf_startPos,sf_size) {
curIndices<- sf_startPos:(sf_startPos+sf_size-1)
colSums(dataset_shoremapmle[curIndices,3:5])/sum(dataset_shoremapmle[curIndices,3:5])
}
multll<- function(ml_x,ml_size=10) {
p.win<-samplefreqs(ml_x,ml_size)
ml_errEst<-3*p.win[3]/2
ml_P1est<-(p.win[1]-ml_errEst/3)/(1-4*ml_errEst/3)
#preventing out of bounds results for non-model cases like a homozygous marker with errors
ml_errEst<-min(1,max(0,ml_errEst))
ml_P1est<-min(1,max(0,ml_P1est))
#calculate likelihood
ml_min<-loglikelihood_mult(llm_P1=ml_P1est,llm_err=ml_errEst,llm_index=ml_x,llm_size=ml_size)
#return mle2-like results
list(coef=c(ml_P1est,ml_errEst),min=ml_min)
}
restrictedModel <- function(P1,x,size) {
rM_errEst<-0.0001
if(x>0&&x+size<length(dataset_shoremapmle)){
rM_curIndices<-x:(x+size-1)
rM_errEst<-3*sum(dataset_shoremapmle[rM_curIndices,5])/sum(dataset_shoremapmle[rM_curIndices,3:5])/2
}
mle2(loglikelihood_mult,optimizer="optimize",start=list(llm_err=rM_errEst),fixed=list(llm_P1=P1,llm_index=x,llm_size=size),lower=0, upper=1)
}
maxConf<-function(x,level=0.95,freq=0,indexL=0,indexH=Inf,minWindow=10,include=c(-1,-1)){
#function to minimize for the optimization of the interval
start<-floor(x[1])
size<-floor(x[2])
indexL<- max(1,indexL)
indexH<- min(length(dataset_shoremapmle[,2]),indexH)
if(size<minWindow){
140000-size+minWindow
}else if(start<indexL){
130000+indexL-start
}else if(start+size-1>indexH){
120000-indexH-1+start+size
}else if(sum(include)>0 && (start>include[1])){
#given region not included
110000+start-include[1]
}else if(sum(include)>0 && (start+size < sum(include))){
#given region not included
110000-start-size+sum(include)
}else{
#check storage
if(sum(storage_shoremapmle[,1]==start&storage_shoremapmle[,2]==size)==1){
res<-storage_shoremapmle[storage_shoremapmle[,1]==start&storage_shoremapmle[,2]==size,3]
res
}else{
#if not in storage, calculate
fit<-multll(start,size)
if(fit$min>100000){
res<-fit$min
}else{
restrictedFit<- restrictedModel(freq,start,size)
p<- pchisq(-2*(fit$min-restrictedFit@min),1)
if(p<=level){
res<- -size-(level-p)
}else{
res<- size+(level-p)
}
}
assign("storage_shoremapmle",rbind(storage_shoremapmle,c(start,size,res)),".GlobalEnv")
res
}
}
}
pInterval<-function(start,size,freq){
fit<-multll(start,size)
restrictedFit<- restrictedModel(freq,start,size)
pchisq(-2*(fit$min-restrictedFit@min),1)
}
extend <- function(beststart,bestsize=10,level=0.99,freq=1,indexL=0,indexH=Inf,minWindow=10,forceInclude=TRUE){
#given a window it extends this as far as possible to the left and right without exceeding the confidence level
bestvalue<-Inf
indexL<- max(1,indexL)
indexH<- min(length(dataset_shoremapmle[,2]),indexH)
inclusion<-c(-1,-1)
if(forceInclude){
inclusion<-c(beststart,bestsize)
}
#a first optimization
nextTest<- optim(fn=maxConf,par=c(beststart,bestsize),control=list(ndeps=c(1,1)),level=level,freq=freq,indexL=max(indexL,beststart-10*minWindow),indexH=min(indexH,beststart+bestsize+10*minWindow),minWindow=minWindow,include=inclusion)
while(nextTest$value<bestvalue){
bestvalue<-nextTest$value
beststart<-floor(nextTest$par[1])
bestsize<-floor(nextTest$par[2])
#alternatively right-left extension by minWindow
i<-1
lastvalue<-bestvalue
curvalue<-maxConf(c(beststart-floor(i/2)*minWindow,bestsize+i*minWindow),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
while(i<1000&&curvalue<lastvalue){
lastvalue<-curvalue
i<-i+1
curvalue<-maxConf(c(beststart-floor(i/2)*minWindow,bestsize+i*minWindow),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
}
i<-i-1
beststart<-beststart-floor(i/2)*minWindow
bestsize<-bestsize+i*minWindow
if(i%%2==0){
#if the extension breaks down on the right, continue on to expand left
i<-1
lastvalue<-maxConf(c(beststart,bestsize),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
curvalue<-maxConf(c(beststart-i*minWindow,bestsize+i*minWindow),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
while(i<1000&&curvalue<lastvalue){
lastvalue<-curvalue
i<-i+1
curvalue<-maxConf(c(beststart-i*minWindow,bestsize+i*minWindow),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
}
i<-i-1
beststart<-beststart-i*minWindow
bestsize<-bestsize+i*minWindow
}else{
#if the extension breaks down on the left, continue on to expand right
i<-1
lastvalue<-maxConf(c(beststart,bestsize),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
curvalue<-maxConf(c(beststart,bestsize+i*minWindow),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
while(i<1000&&curvalue<lastvalue){
lastvalue<-curvalue
i<-i+1
curvalue<-maxConf(c(beststart,bestsize+i*minWindow),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
}
i<-i-1
beststart<-beststart
bestsize<-bestsize+i*minWindow
}
#alternatively right-left extension
i<-1
lastvalue<- maxConf(c(beststart,bestsize),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
curvalue<-maxConf(c(beststart-floor(i/2),bestsize+i),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
while(i<minWindow*2&&curvalue<lastvalue){
lastvalue<-curvalue
i<-i+1
curvalue<-maxConf(c(beststart-floor(i/2),bestsize+i),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
}
i<-i-1
beststart<-beststart-floor(i/2)
bestsize<-bestsize+i
if(i%%2==0 && i<2*minWindow){
#if the extension breaks down on the right, continue on to expand left
i<-1
lastvalue<-maxConf(c(beststart,bestsize),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
curvalue<-maxConf(c(beststart-i,bestsize+i),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
while(i<minWindow&&curvalue<lastvalue){
lastvalue<-curvalue
i<-i+1
curvalue<-maxConf(c(beststart-i,bestsize+i),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
}
i<-i-1
beststart<-beststart-i
bestsize<-bestsize+i
}else{
#if the extension breaks down on the left, continue on to expand right
i<-1
lastvalue<-maxConf(c(beststart,bestsize),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
curvalue<-maxConf(c(beststart,bestsize+i),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
while(i<minWindow&&curvalue<lastvalue){
lastvalue<-curvalue
i<-i+1
curvalue<-maxConf(c(beststart,bestsize+i),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
}
i<-i-1
beststart<-beststart
bestsize<-bestsize+i
}
#optimization again before reitteration
nextTest<- optim(fn=maxConf,method="Nelder-Mead",par=c(beststart,bestsize),control=list(ndeps=c(1,1),maxit=100),level=level,freq=freq,indexL=max(indexL,beststart-10*minWindow),indexH=min(indexH,beststart+bestsize+10*minWindow),minWindow=minWindow,include=inclusion)
}
matrix(as.numeric(c(beststart,bestsize,bestvalue,level)),ncol=4)
}
|
/SHOREmap_confInt.R
|
no_license
|
zzygyx9119/shoremap
|
R
| false
| false
| 24,068
|
r
|
#returns the reached p-value for the interval along with the positions (start, stop, p.value)
#if the third value is larger than the confidence level, it is not a valid region.
#616 not a valid peak
#617 the examined window was too small compared to the internal windowsize (minWindow)
#919 too few markers after removal of 0-sum-markers, filtering. Too few means that there are less than ten or that next too all are constant
#chromosome,positions, background_count,forground_count and error_count are vectors of the same length
#require(EMT)
ShoreMap.confint <- function(chromosome,positions, background_count, foreground_count, error_count, foreground_frequency=1, level=0.99, recurse=FALSE, forceInclude=TRUE, allowAdjustment=0.0, filterOutliers=200000, filterPValue=0.05, winSize=50000, winStep=10000, minMarker=10, minCoverage=0, maxCoverage=Inf, peakFinding=3, peakWinSize=50000, peakWinStep=10000) {
verbose=FALSE
if(verbose){
print(paste("foreground_frequency=", foreground_frequency), sep="")
print(paste("level=", level), sep="")
print(paste("recurse=", recurse), sep="")
print(paste("forceInclude=", forceInclude), sep="")
print(paste("allowAdjustment=", allowAdjustment), sep="")
print(paste("filterOutliers=", filterOutliers), sep="")
print(paste("filterPValue=", filterPValue), sep="")
print(paste("winSize=", winSize), sep="")
print(paste("winStep=", winStep), sep="")
print(paste("minMarker=", minMarker), sep="")
print(paste("minCoverage=", minCoverage), sep="")
print(paste("maxCoverage=", maxCoverage), sep="")
print(paste("peakFinding=", peakFinding), sep="") #3 is boost, 4 is R
print(paste("peakWinSize=", peakWinSize), sep="")
print(paste("peakWinStep=", peakWinStep), sep="")
}
minMarker<-max(1,minMarker)
foreground_frequency<-as.numeric(foreground_frequency)
internalData<- cbind(chromosome,positions,foreground_count,background_count,error_count)
internalData<- internalData[rowSums(internalData[,3:5])>minCoverage&rowSums(internalData[,3:5])<maxCoverage,]
print(paste("Analysing chr ",chromosome[1],", with ",length(chromosome)," (",length(internalData[,1]),") markers for equality to ",foreground_frequency,"(",typeof(foreground_frequency),")",sep=""))
assign("storage_shoremapmle",matrix(c(-1,-1,-1),nrow=1),".GlobalEnv")
#apply filtering here:
filtered=c();
if(filterOutliers>0){ #condition
#filterOutliers is the windowsize to use
f<-filterSampling(internalData,as.numeric(filterOutliers),as.numeric(filterPValue),FALSE)
print(paste("Removed: ",sum(!f)," markers as outliers"))
filtered<-internalData[!f,2]
internalData<- internalData[f,]
}
assign("dataset_shoremapmle",internalData,".GlobalEnv")
freqs<-internalData[,3]/rowSums(internalData[,3:5])
assign("i_shoremapmle",0,".GlobalEnv")
minWindow<-max(minMarker,2)
bestsize<-max(minMarker,2)
bestsize<-max(bestsize,minWindow)
# print(paste("Bestsize:",bestsize))
if(bestsize<length(dataset_shoremapmle[,1])){
avg_pos<-c(sapply(seq(0,winSize-1,winStep),function(shift){
windows<- floor((internalData[,2]+shift)/winSize)
windowsToUse<- windows %in% unique(windows)[table(windows)>minMarker]
tapply(internalData[windowsToUse,2],windows[windowsToUse],mean)
}),recursive=TRUE)
avg_freq<-c(sapply(seq(0,winSize-1,winStep),function(shift){
windows<- floor((internalData[,2]+shift)/winSize)
windowsToUse<- windows %in% unique(windows)[table(windows)>minMarker]
tapply(internalData[windowsToUse,3],windows[windowsToUse],sum)/tapply(rowSums(internalData[windowsToUse,3:5]),windows[windowsToUse],sum)
}),recursive=TRUE)
avg_R<-c(sapply(seq(0,winSize-1,winStep),function(shift){
windows<- floor((internalData[,2]+shift)/winSize)
windowsToUse<- windows %in% unique(windows)[table(windows)>minMarker]
allele<-tapply(internalData[windowsToUse,3],windows[windowsToUse],sum)
ref<-tapply(internalData[windowsToUse,4],windows[windowsToUse],sum)
ret<-pmax(allele/ref,ref/allele)
ret[ret==1]<-0
rMax=max(ret[!is.infinite(ret)])
ret[is.infinite(ret)]<-rMax
ret
}),recursive=TRUE)
avg_boost<-abs(1/(1-max(foreground_frequency,1-foreground_frequency)/pmax(avg_freq,1-avg_freq)))
boostMax<-max(avg_boost[!is.infinite(avg_boost)])
avg_boost[is.infinite(avg_boost)]<-boostMax
avg_posFreq<-cbind(avg_pos,avg_freq,avg_boost,avg_R)
avg_posFreq<-t(sapply(sort(avg_posFreq[,1],index.return=T)$ix,function(x) avg_posFreq[x,]))
ci<-matrix(c(0,0,920,1,0),nrow=5)
if(level[1]<=1){
peak_pos<-c(sapply(seq(0,peakWinSize-1,peakWinStep),function(shift){
windows<- floor((internalData[,2]+shift)/peakWinSize)
windowsToUse<- windows %in% unique(windows)[table(windows)>minMarker]
tapply(internalData[windowsToUse,2],windows[windowsToUse],mean)
}),recursive=TRUE)
peak_freq<-c(sapply(seq(0,peakWinSize-1,peakWinStep),function(shift){
windows<- floor((internalData[,2]+shift)/peakWinSize)
windowsToUse<- windows %in% unique(windows)[table(windows)>minMarker]
tapply(internalData[windowsToUse,3],windows[windowsToUse],sum)/tapply(rowSums(internalData[windowsToUse,3:5]),windows[windowsToUse],sum)
}),recursive=TRUE)
peak_R<-c(sapply(seq(0,peakWinSize-1,peakWinStep),function(shift){
windows<- floor((internalData[,2]+shift)/peakWinSize)
windowsToUse<- windows %in% unique(windows)[table(windows)>minMarker]
allele<-tapply(internalData[windowsToUse,3],windows[windowsToUse],sum)
ref<-tapply(internalData[windowsToUse,4],windows[windowsToUse],sum)
ret<-pmax(allele/ref,ref/allele)
ret[ret==1]<-0
rMax=max(ret[!is.infinite(ret)])
ret[is.infinite(ret)]<-rMax
ret
}),recursive=TRUE)
peak_boost<-abs(1/(1-max(foreground_frequency,1-foreground_frequency)/pmax(peak_freq,1-peak_freq)))
boostMax<-max(peak_boost[!is.infinite(peak_boost)])
peak_boost[is.infinite(peak_boost)]<-boostMax
peak_posFreq<-cbind(peak_pos,peak_freq,peak_boost,peak_R)
peak_posFreq<-t(sapply(sort(peak_posFreq[,1],index.return=T)$ix,function(x) peak_posFreq[x,]))
peak_minIndex<-which(peak_posFreq[,peakFinding]==max(peak_posFreq[,peakFinding]))
print(paste("Finding initial peak(s).. choosen method in a window of size ",peakWinSize," bp with step size of ", peakWinStep, " bp",sep=""))
for(index in peak_minIndex){
print(paste(" At (avg(pos) in window): ",round(peak_posFreq[index,1])," bp",sep=""))
}
#order confidence levels
level<-sort(level)
res<- identify_peaks(1,length(internalData[,2]),foreground_frequency,level,minWindow,peak_posFreq[,c(1,peakFinding)],bestsize,recurse,forceInclude, allowAdjustment)
res<-matrix(res[res[,3]<0,],ncol=4)
if(!is.null(dim(res))&&dim(res)[1]>0){
ci<-matrix(apply(res,1,function(x) t(c(start=ifelse(x[3]<0,internalData[max(x[1]-1,1),2]+1,0), stop=ifelse(x[3]<0,internalData[min(x[1]+x[2],length(internalData[,2])),2]-1,0),p.value=ifelse(x[3]<0,x[4]+x[3]+x[2],x[3]),level=x[4],nrOfMarkers= x[2]))),nrow=5)
print("Found interval:")
for(i in 1:length(ci[1,])){
print(paste(ci[1,i],"-",ci[2,i],"level:",ci[4,i]))
}
}
}
list(confidenceInterval=ci,excluded=filtered,averaged=avg_posFreq)
}else{
#too few markers
print("Too few markers")
list(confidenceInterval=matrix(c(0,0,919,1,0),nrow=5),excluded=filtered,averaged=matrix(c(-1,-1,-1,-1),ncol=4))
}
}
#version5
filterSampling<-function(internalData,fs_windowsize=200000,fs_limit=0.05,fs_exact=FALSE){
size<-length(internalData[,2])
chrStart<-min(internalData[,2])
chrEnd<-max(internalData[,2])
indices<-1:size
largeWindow<-fs_windowsize*4 #use the double size to make sure the window is enclosed
windows1<-floor((internalData[,2])/largeWindow)
uniqueWin1<-unique(windows1)
windows2<-floor((internalData[,2]+largeWindow/2)/largeWindow)
uniqueWin2<-unique(windows2)
freqs<-internalData[,3]/rowSums(internalData[,3:5])
diffDataRaw<-abs(diff(freqs))
diffDataRaw<-c(diffDataRaw[1],diffDataRaw)+c(diffDataRaw,diffDataRaw[size-1])
diffDataMod<-diffDataRaw
allPos<-internalData[,2]
#corresponds to adjusting the p-values below with respect to size nr of tests
limit<-fs_limit/size
#use two frames and choose the frame closest to the current marker position
data1<-tapply(indices,windows1,function(x) data.frame(internalData[x,2] ,internalData[x,3] ,internalData[x,4] ,internalData[x,5] ,rep(FALSE,length(x))))
data2<-tapply(indices,windows2,function(x) data.frame(internalData[x,2] ,internalData[x,3] ,internalData[x,4] ,internalData[x,5] ,rep(FALSE,length(x))))
filtered<-c()
for(i in indices){
#get marker to test this iteration
curIndex<-which.max(diffDataMod)
curPos<-allPos[curIndex]
#start and end of window
start<-max(chrStart,curPos-fs_windowsize/2) #0
end<- start + fs_windowsize #0
if(end>chrEnd){ #0
start<-max(chrStart,end-fs_windowsize)
}
#decide which frame and which windows to use
curWin1<-curPos/largeWindow
curWin2<-curWin1+0.5
use1<-abs((curWin1%%1)-0.5)<abs((curWin2%%1)-0.5)
curWin1<-which(uniqueWin1==floor(curWin1))
curWin2<-which(uniqueWin2==floor(curWin2))
red<-c()
curSize<-0
if(use1){
#include markers within window
toUse<-data1[[curWin1]][,1]>=start &data1[[curWin1]][,1]<=end
#add two closest markers
toUse<- toUse | 1:length(toUse) %in% sort(abs(data2[[curWin2]][,1]-curPos),index.return=TRUE)$ix[1:min(3,length(toUse))]
curSize<-sum(toUse)
red<-data1[[curWin1]][toUse,]
}else{
#include markers within window
toUse<-data2[[curWin2]][,1]>=start &data2[[curWin2]][,1]<=end
#add two closest markers
toUse<- toUse | 1:length(toUse) %in% sort(abs(data2[[curWin2]][,1]-curPos),index.return=TRUE)$ix[1:min(3,length(toUse))]
curSize<-sum(toUse)
red<-data2[[curWin2]][toUse,]
}
p<-if(curSize>3){
x<-c(red[red[,1]==curPos,2:4],recursive=TRUE)
red2<- red[red[,1]!=curPos & !red[,5],]
if(nrow(red2)>0){
p.win<-colSums(red2[,2:4])
p.win<-p.win+1/500
p.win<-p.win/sum(p.win) #0
fs_p1<-p.win[3] #0
fs_p2<-p.win[1]/sum(p.win[1:2]) #0
pbinom(x[3]+ifelse(x[3]<sum(x)*fs_p1,1,-1),size=sum(x),prob=fs_p1,lower.tail=x[3]<sum(x)*fs_p1)*pbinom(x[1]+ifelse(x[1]<sum(x[1:2])*fs_p2,1,-1),size=sum(x[1:2]),prob=fs_p2,lower.tail=x[1]<sum(x[1:2])*fs_p2) #0.001
}else{
1
}
}else{
1
}
#judgement
if(is.na(p)) { p=0 }
if(p<=limit){ #0.011
#mark outlier
if (length(data1[[curWin1]][data1[[curWin1]][,1]==curPos,5]) != 0) {
data1[[curWin1]][data1[[curWin1]][,1]==curPos,5]<-TRUE #0.001
}
if (length(data2[[curWin2]][data2[[curWin2]][,1]==curPos,5]) != 0) {
data2[[curWin2]][data2[[curWin2]][,1]==curPos,5]<-TRUE #0.001
}
diffDataMod[curIndex]<--2 #0.001
#recalculate diff values for neighboring markers
before<-curIndex-1 #0
while(sum(before==filtered)>0){ #~0 with 57 markers in filtered
before<-before-1
}
after<-curIndex+1 #0
while(sum(after==filtered)>0){ #0 with no markers in filtered
after<-after+1
}
if(before<1){ #0
#first marker
diffDataRaw[after]<-2*(diffDataRaw[after]-abs(freqs[after]-freqs[curIndex]))
if(diffDataMod[after]>=0){
diffDataMod[after]<-diffDataRaw[after]
}
}else if(after>size){ #0
#last marker
diffDataRaw[before]<-2*(diffDataRaw[before]-abs(freqs[before]-freqs[curIndex]))
if(diffDataMod[before]>=0){
diffDataMod[before]<-diffDataRaw[before]
}
}else{
flank<-abs(freqs[before]-freqs[after]) #0
diffDataRaw[before]<-diffDataRaw[before]-abs(freqs[before]-freqs[curIndex])+flank #0
diffDataRaw[after]<-diffDataRaw[after]-abs(freqs[after]-freqs[curIndex])+flank
if(diffDataMod[after]>=0){
diffDataMod[after]<-diffDataRaw[after]
}
if(diffDataMod[before]>=0){
diffDataMod[before]<-diffDataRaw[before]
}
}
#add the position to the filtered positions
filtered<-c(filtered,curIndex) #0
}else{
diffDataMod[curIndex]<--1
}
}
diffDataMod!=-2
}
identify_peaks <- function(indexL,indexH,frequency,level,minWindow,avg_posFreq,bestsize,recurse,forceInclude=TRUE,allowAdjustment=0.05){
require(bbmle)
assign("storage_shoremapmle",matrix(c(-1,-1,-1),nrow=1),".GlobalEnv")
if(indexH-indexL>min(minWindow,bestsize)){ #too small window 617
cur_indices<-indexL:indexH
avg_toUse<-avg_posFreq[,1]>=min(dataset_shoremapmle[cur_indices,2]) & avg_posFreq[,1]<=max(dataset_shoremapmle[cur_indices,2])
if(sum(avg_toUse)>1){ #too few windowed markers 616
avg_pf<-avg_posFreq[avg_toUse,]
#try to find peaks
starts<-avg_pf[which(avg_pf[,2]==max(avg_pf[,2])),1]
while(length(starts)>0){
start<- starts[ceiling(length(starts)/2)]
beststarts<-which(min(abs(dataset_shoremapmle[,2]-start))==abs(dataset_shoremapmle[,2]-start))
beststart<-beststarts[ceiling(length(beststarts)/2)]
beststart<-min(length(dataset_shoremapmle[,1])-bestsize,beststart)
#see if the frequency needs adjustment
newFreq<-multll(beststart-floor(bestsize/2),bestsize)$coef[1]
if(abs(frequency-newFreq)<allowAdjustment){
print(paste("The goal frequency was adjusted from ",frequency," to ",newFreq,", which is the estimated frequency in the preliminary interval",sep=""))
frequency<-newFreq
}
res<-extend(beststart,bestsize,level[1],frequency,indexL,indexH,minWindow, forceInclude)
if(length(level)>1){
for(i in 2:length(level)){
res2<-c(1,1,618,level[i])
assign("storage_shoremapmle",matrix(c(-1,-1,-1),nrow=1),".GlobalEnv")
if(res[i-1,3]>0){
res2<-extend(beststart,bestsize,level[i],frequency,indexL,indexH,minWindow, forceInclude)
}else{
res2<-extend(res[i-1,1],res[i-1,2],level[i],frequency,indexL,indexH,minWindow, forceInclude)
}
res<-rbind(res,res2)
}
}
if(min(res[,3])>0){
if(length(starts)==1){
return(matrix(c(1,1,616,1),ncol=4))
break
}else{
starts<-starts[1:length(starts)!=ceiling(length(starts)/2)]
}
}else if(recurse){
if(min(res[,3])<0){
resL<- identify_peaks(indexL, min(res[res[,3]<0,1]), frequency, level[1], minWindow, avg_posFreq, bestsize, recurse, forceInclude,0.0)
if(length(level)>1){
for(i in 2:length(level)){
res2<-c(1,1,618,level[i])
assign("storage_shoremapmle",matrix(c(-1,-1,-1),nrow=1),".GlobalEnv")
if(res[i-1,3]>0){
res2<-extend(beststart,bestsize,level[i],frequency,indexL,min(res[res[,3]<0,1]),minWindow, forceInclude)
}else{
res2<-extend(resL[i-1,1],resL[i-1,2],level[i],frequency,indexL,min(res[res[,3]<0,1]),minWindow, forceInclude)
}
resL<-rbind(resL,res2)
}
}
resH<- identify_peaks(max(res[res[,3]<0,1]+res[res[,3]<0,2]), indexH, frequency, level[1], minWindow, avg_posFreq, bestsize, recurse, forceInclude,0.0)
if(length(level)>1){
for(i in 2:length(level)){
res2<-c(1,1,618,level[i])
assign("storage_shoremapmle",matrix(c(-1,-1,-1),nrow=1),".GlobalEnv")
if(res[i-1,3]>0){
res2<-extend(beststart,bestsize,level[i],frequency,max(res[res[,3]<0,1]+res[res[,3]<0,2]),indexH,minWindow, forceInclude)
}else{
res2<-extend(resH[i-1,1],resH[i-1,2],level[i],frequency,max(res[res[,3]<0,1]+res[res[,3]<0,2]),indexH,minWindow, forceInclude)
}
resH<-rbind(resH,res2)
}
}
if(min(resL[,3])<0){
if(min(resH[,3])<0){
#both good
return(rbind(resL,res,resH))
break
}else{
#low good
return(rbind(resL,res))
break
}
}else if(min(resH[,3])<0){
#high good
return(rbind(res,resH))
break
}else{
return(res)
break
}
}else{
return(res)
break
}
}else{
return(res)
break
}
}#end while loop
}else{
#Too few windowed markers
return(t(as.matrix(c(1,1,616,1))))
}
}else{
#Too small window
return(t(as.matrix(c(1,1,617,1))))
}
}
loglikelihood_mult <- function(llm_P1=0.5,llm_err=0.01,llm_index=0,llm_size=0){
#the loglikelihood function. Returns 110000 for unvalid p-values
if(is.na(llm_P1) || is.na(llm_err) || llm_size<0){
220000
} else if(llm_P1<0 || llm_P1>1 || llm_err<0 || llm_err>1) {
110000
}else{
llm_P1<-as.numeric(llm_P1)
llm_err<-as.numeric(llm_err)
llm_p1<- llm_P1*(1-4*llm_err/3)+llm_err/3
llm_pe<- 2*llm_err/3
llm_p2<- 1-llm_p1-llm_pe
llm_p.all <- c(llm_p1,llm_p2,llm_pe)
-sum(apply(dataset_shoremapmle[llm_index:(llm_index+llm_size-1),],1,function(llm_x){dmultinom(x=c(llm_x[3],llm_x[4],llm_x[5]),prob=llm_p.all,log=TRUE)}))
}
}
samplefreqs <- function(sf_startPos,sf_size) {
curIndices<- sf_startPos:(sf_startPos+sf_size-1)
colSums(dataset_shoremapmle[curIndices,3:5])/sum(dataset_shoremapmle[curIndices,3:5])
}
multll<- function(ml_x,ml_size=10) {
p.win<-samplefreqs(ml_x,ml_size)
ml_errEst<-3*p.win[3]/2
ml_P1est<-(p.win[1]-ml_errEst/3)/(1-4*ml_errEst/3)
#preventing out of bounds results for non-model cases like a homozygous marker with errors
ml_errEst<-min(1,max(0,ml_errEst))
ml_P1est<-min(1,max(0,ml_P1est))
#calculate likelihood
ml_min<-loglikelihood_mult(llm_P1=ml_P1est,llm_err=ml_errEst,llm_index=ml_x,llm_size=ml_size)
#return mle2-like results
list(coef=c(ml_P1est,ml_errEst),min=ml_min)
}
restrictedModel <- function(P1,x,size) {
rM_errEst<-0.0001
if(x>0&&x+size<length(dataset_shoremapmle)){
rM_curIndices<-x:(x+size-1)
rM_errEst<-3*sum(dataset_shoremapmle[rM_curIndices,5])/sum(dataset_shoremapmle[rM_curIndices,3:5])/2
}
mle2(loglikelihood_mult,optimizer="optimize",start=list(llm_err=rM_errEst),fixed=list(llm_P1=P1,llm_index=x,llm_size=size),lower=0, upper=1)
}
maxConf<-function(x,level=0.95,freq=0,indexL=0,indexH=Inf,minWindow=10,include=c(-1,-1)){
#function to minimize for the optimization of the interval
start<-floor(x[1])
size<-floor(x[2])
indexL<- max(1,indexL)
indexH<- min(length(dataset_shoremapmle[,2]),indexH)
if(size<minWindow){
140000-size+minWindow
}else if(start<indexL){
130000+indexL-start
}else if(start+size-1>indexH){
120000-indexH-1+start+size
}else if(sum(include)>0 && (start>include[1])){
#given region not included
110000+start-include[1]
}else if(sum(include)>0 && (start+size < sum(include))){
#given region not included
110000-start-size+sum(include)
}else{
#check storage
if(sum(storage_shoremapmle[,1]==start&storage_shoremapmle[,2]==size)==1){
res<-storage_shoremapmle[storage_shoremapmle[,1]==start&storage_shoremapmle[,2]==size,3]
res
}else{
#if not in storage, calculate
fit<-multll(start,size)
if(fit$min>100000){
res<-fit$min
}else{
restrictedFit<- restrictedModel(freq,start,size)
p<- pchisq(-2*(fit$min-restrictedFit@min),1)
if(p<=level){
res<- -size-(level-p)
}else{
res<- size+(level-p)
}
}
assign("storage_shoremapmle",rbind(storage_shoremapmle,c(start,size,res)),".GlobalEnv")
res
}
}
}
pInterval<-function(start,size,freq){
fit<-multll(start,size)
restrictedFit<- restrictedModel(freq,start,size)
pchisq(-2*(fit$min-restrictedFit@min),1)
}
extend <- function(beststart,bestsize=10,level=0.99,freq=1,indexL=0,indexH=Inf,minWindow=10,forceInclude=TRUE){
#given a window it extends this as far as possible to the left and right without exceeding the confidence level
bestvalue<-Inf
indexL<- max(1,indexL)
indexH<- min(length(dataset_shoremapmle[,2]),indexH)
inclusion<-c(-1,-1)
if(forceInclude){
inclusion<-c(beststart,bestsize)
}
#a first optimization
nextTest<- optim(fn=maxConf,par=c(beststart,bestsize),control=list(ndeps=c(1,1)),level=level,freq=freq,indexL=max(indexL,beststart-10*minWindow),indexH=min(indexH,beststart+bestsize+10*minWindow),minWindow=minWindow,include=inclusion)
while(nextTest$value<bestvalue){
bestvalue<-nextTest$value
beststart<-floor(nextTest$par[1])
bestsize<-floor(nextTest$par[2])
#alternatively right-left extension by minWindow
i<-1
lastvalue<-bestvalue
curvalue<-maxConf(c(beststart-floor(i/2)*minWindow,bestsize+i*minWindow),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
while(i<1000&&curvalue<lastvalue){
lastvalue<-curvalue
i<-i+1
curvalue<-maxConf(c(beststart-floor(i/2)*minWindow,bestsize+i*minWindow),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
}
i<-i-1
beststart<-beststart-floor(i/2)*minWindow
bestsize<-bestsize+i*minWindow
if(i%%2==0){
#if the extension breaks down on the right, continue on to expand left
i<-1
lastvalue<-maxConf(c(beststart,bestsize),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
curvalue<-maxConf(c(beststart-i*minWindow,bestsize+i*minWindow),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
while(i<1000&&curvalue<lastvalue){
lastvalue<-curvalue
i<-i+1
curvalue<-maxConf(c(beststart-i*minWindow,bestsize+i*minWindow),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
}
i<-i-1
beststart<-beststart-i*minWindow
bestsize<-bestsize+i*minWindow
}else{
#if the extension breaks down on the left, continue on to expand right
i<-1
lastvalue<-maxConf(c(beststart,bestsize),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
curvalue<-maxConf(c(beststart,bestsize+i*minWindow),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
while(i<1000&&curvalue<lastvalue){
lastvalue<-curvalue
i<-i+1
curvalue<-maxConf(c(beststart,bestsize+i*minWindow),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
}
i<-i-1
beststart<-beststart
bestsize<-bestsize+i*minWindow
}
#alternatively right-left extension
i<-1
lastvalue<- maxConf(c(beststart,bestsize),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
curvalue<-maxConf(c(beststart-floor(i/2),bestsize+i),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
while(i<minWindow*2&&curvalue<lastvalue){
lastvalue<-curvalue
i<-i+1
curvalue<-maxConf(c(beststart-floor(i/2),bestsize+i),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
}
i<-i-1
beststart<-beststart-floor(i/2)
bestsize<-bestsize+i
if(i%%2==0 && i<2*minWindow){
#if the extension breaks down on the right, continue on to expand left
i<-1
lastvalue<-maxConf(c(beststart,bestsize),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
curvalue<-maxConf(c(beststart-i,bestsize+i),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
while(i<minWindow&&curvalue<lastvalue){
lastvalue<-curvalue
i<-i+1
curvalue<-maxConf(c(beststart-i,bestsize+i),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
}
i<-i-1
beststart<-beststart-i
bestsize<-bestsize+i
}else{
#if the extension breaks down on the left, continue on to expand right
i<-1
lastvalue<-maxConf(c(beststart,bestsize),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
curvalue<-maxConf(c(beststart,bestsize+i),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
while(i<minWindow&&curvalue<lastvalue){
lastvalue<-curvalue
i<-i+1
curvalue<-maxConf(c(beststart,bestsize+i),level=level,freq=freq,indexL=indexL,indexH=indexH,minWindow=minWindow,include=inclusion)
}
i<-i-1
beststart<-beststart
bestsize<-bestsize+i
}
#optimization again before reitteration
nextTest<- optim(fn=maxConf,method="Nelder-Mead",par=c(beststart,bestsize),control=list(ndeps=c(1,1),maxit=100),level=level,freq=freq,indexL=max(indexL,beststart-10*minWindow),indexH=min(indexH,beststart+bestsize+10*minWindow),minWindow=minWindow,include=inclusion)
}
matrix(as.numeric(c(beststart,bestsize,bestvalue,level)),ncol=4)
}
|
library(car)
library(ggplot2)
library(glmnet)
library(leaps)
library(MASS)
library(tidyverse)
library(hrbrthemes)
setwd("D:/Documenti HDD/Uni/Magistrale/Statistical Data Analysis/Esercizi/Progetto")
set.seed(42)
########################################################################################################
### FUNCTIONS
open_dataset_past_cases <- function(iso_codes, dates=FALSE, iso=FALSE) {
df.data <- read.csv('covid-data.csv')
if(dates) {
columns <- colnames(df.data)[c(1,4,9,15,16,20,21,22,23,24,25,26,28,29,30,31)]
} else {
columns <- colnames(df.data)[c(1,9,15,16,20,21,22,23,24,25,26,28,29,30,31)]
}
df.data <- df.data[which(df.data$iso_code %in% iso_codes), columns]
df.data <- na.omit(df.data)
df.data$total_cases <- df.data$total_cases_per_million
df.data$total_cases_per_million <- NULL
df.data$new_tests <- df.data$new_tests_per_thousand
df.data$new_tests_per_thousand <- NULL
df.data$total_tests <- df.data$total_tests_per_thousand
df.data$total_tests_per_thousand <- NULL
actual_cases <- c()
for(code in iso_codes) {
tmp_actual <- df.data[df.data$iso_code == code,'total_cases']
actual_cases <- c(actual_cases, 0, tmp_actual[1:length(tmp_actual)-1])
}
df.data$actual_cases <- actual_cases
if(!iso) {
df.data$iso_code <- NULL
}
return(df.data)
}
plot_best_predictors = function(summary,fit){
#dev.new()
par(mfrow=c(2,2))
plot(summary$rss ,xlab="Number of Variables ",ylab="RSS",type="l")
points(which.min(summary$rss),min(summary$rss), col="red",cex=2,pch=20)
plot(summary$adjr2 ,xlab="Number of Variables ",
ylab="Adjusted RSq",type="l")
points(which.max(summary$adjr2),max(summary$adjr2), col="red",cex=2,pch=20)
plot(summary$cp ,xlab="Number of Variables ",ylab="Cp", type="l")
points(which.min(summary$cp ),min(summary$cp),col="red",cex=2,pch=20)
plot(summary$bic ,xlab="Number of Variables ",ylab="BIC",type="l")
points(which.min(summary$bic),min(summary$bic),col="red",cex=2,pch=20)
dev.new()
plot(fit,scale="r2")
dev.new()
plot(fit,scale="adjr2")
dev.new()
plot(fit,scale="Cp")
dev.new()
plot(fit,scale="bic")
}
test_country <- function(df.test, iso, model) {
df.test <- df.test[df.test$iso_code == iso,]
to.plot <- data.frame(y_real = df.test$new_cases, date = df.test$date)
df.test$new_cases <- NULL
df.test$date <- NULL
to.plot$y_pred <- predict(model, newdata=df.test)
print(mean((to.plot$y_real-to.plot$y_pred)^2))
plot <- ggplot(to.plot, aes(x=date)) +
geom_line(aes(y=y_real, group=1)) +
geom_line(aes(y=y_pred, group=2), color='red')
print(plot)
return(mean((to.plot$y_real-to.plot$y_pred)^2))
}
predict.regsubsets = function(object,newdata,id,...){
form=as.formula(object$call[[2]])
mat=model.matrix(form, newdata)
coefi=coef(object, id=id)
xvars=names(coefi)
mat[,xvars]%*%coefi
}
ridge_regression <- function(x, y) {
grid=10^seq(10,-2,length=100)
ridge.mod=glmnet(x,y,alpha=0,lambda=grid)
}
########################################################################################################
### DATA IMPORT
iso_codes_train <- c('ITA', 'GBR', 'IND', 'JPN', 'ISR', 'LUX', 'AUS', 'AUT', 'NZL', 'ARG', 'BEL', 'ZAF', 'PRT', 'CHE', 'ISL', 'RUS','TUR','DNK')
iso_codes_test <- c('USA','IRN','KOR','URY')
covid.data <- open_dataset_past_cases(iso_codes_train)
covid.data.test <- open_dataset_past_cases(iso_codes_test, dates = TRUE, iso = TRUE)
########################################################################################################
### CHECKING NONLINEAR RELATIONSHIP BETWEEN VARIABLES AND RESPONSE
dev.new()
pairs(~total_cases+stringency_index+population_density+median_age+population,covid.data)
dev.new()
pairs(~total_cases+aged_65_older+aged_70_older+gdp_per_capita,covid.data)
dev.new()
pairs(~total_cases+cvd_death_rate+diabetes_prevalence+female_smokers,covid.data)
dev.new()
pairs(~total_cases+new_tests+total_tests+actual_cases,covid.data)
########################################################################################################
### FITTING LINEAR MODEL WITH ALL PREDICTORS
fit <- lm(total_cases~., data=covid.data)
summary(fit)
# Diagnostic plots
dev.new()
par(mfrow=c(2,2))
plot(fit)
# Removing outliers
dev.new()
cooksd <- cooks.distance(fit)
sample_size <- nrow(covid.data)
plot(cooksd, pch="*", cex=2, main="Influential Points by Cooks distance")
abline(h = 4/sample_size, col="red")
cook_th <- 4/sample_size
influential <- names(cooksd)[(cooksd > cook_th)]
covid.data <- covid.data[which(!(rownames(covid.data) %in% influential)),]
# Model re-fit
fit <- lm(total_cases~., data=covid.data)
vif(fit)
#Adjustinhg model using p-value and vif
adj.fit <- lm(total_cases~.-aged_70_older-aged_65_older-population, data=covid.data)
vif(adj.fit)
summary(adj.fit)
adj.fit <- lm(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate, data=covid.data)
summary(adj.fit)
# Diagnostic plots (again)
dev.new()
par(mfrow=c(2,2))
plot(fit)
# Residual plot
fit.res = resid(adj.fit)
dev.new()
plot(covid.data$total_cases, fit.res, ylab="Residuals", xlab="Samples", main="Residual Plot")
abline(0, 0)
########################################################################################################
### BEST SUBSET SELECTION USING K-FOLD
k <- 10
predictors <- 14
folds <- sample(1:k,nrow(covid.data),replace=TRUE)
cv.errors <- matrix(NA,k,predictors, dimnames=list(NULL, paste(1:predictors)))
# write a for loop that performs cross-validation
for(j in 1:k){
best.fit <- regsubsets(total_cases~., data=covid.data[folds!=j,], nvmax=predictors)
for(i in 1:predictors){
pred <- predict(best.fit, covid.data[folds==j,], id=i)
cv.errors[j,i] <- mean((covid.data[folds==j,]$total_cases-pred)^2)
}
}
# MSE for each model using K-fold
mean.cv.errors <- apply(cv.errors, 2, mean)
par(mfrow=c(1,1))
dev.new()
plot(mean.cv.errors, type="b")
best.subsets <- regsubsets(total_cases~., data = covid.data, nvmax = 14)
summary(best.subsets)
best.fit <- lm(total_cases~.-gdp_per_capita-diabetes_prevalence-new_tests-actual_cases, data=covid.data)
plot_best_predictors(summary(best.subsets), best.subsets)
### BEST SUBSET STARTING FROM ADJUSTED MODEL
predictors <- 9
folds <- sample(1:k,nrow(covid.data),replace=TRUE)
cv.errors <- matrix(NA,k,predictors, dimnames=list(NULL, paste(1:predictors)))
# write a for loop that performs cross-validation
for(j in 1:k){
best.fit.adj <- regsubsets(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate, data=covid.data[folds!=j,], nvmax=predictors)
for(i in 1:predictors){
pred <- predict(best.fit.adj, covid.data[folds==j,], id=i)
cv.errors[j,i] <- mean((covid.data[folds==j,]$total_cases-pred)^2)
}
}
# MSE for each model using K-fold
mean.cv.errors <- apply(cv.errors, 2, mean)
par(mfrow=c(1,1))
dev.new()
plot(mean.cv.errors, type="b", xlab="#Predictors", ylab="MSE")
best.subsets <- regsubsets(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate, data = covid.data, nvmax = 14)
summary(best.subsets)
best.fit.adj <- lm(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate-new_tests, data=covid.data)
plot_best_predictors(summary(best.subsets), best.subsets)
### BACKWARDS SELECTION
train <- sample(c(TRUE,FALSE), nrow(covid.data), rep=TRUE)
test <- (!train)
back.fit <- regsubsets(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate, covid.data[train,], method = "backward", nvmax=14)
back.summary <- summary(back.fit)
plot_best_predictors(back.summary, back.fit)
test.mat <- model.matrix(total_cases~.,data=covid.data[test,])
val.errors=rep(NA,predictors)
for(i in 1:predictors){
coefi <- coef(back.fit, id=i)
pred <- test.mat[,names(coefi)]%*%coefi
val.errors[i] = mean((covid.data[test,]$total_cases-pred)^2)
}
val.errors
which.min(val.errors)
back.fit = lm(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate-new_tests, data = covid.data)
### FORWARD SELECTION
train <- sample(c(TRUE,FALSE), nrow(covid.data), rep=TRUE)
test <- (!train)
for.fit <- regsubsets(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate, covid.data[train,], method = "forward", nvmax=14)
for.summary <- summary(for.fit)
plot_best_predictors(for.summary, for.fit)
test.mat <- model.matrix(total_cases~.,data=covid.data[test,])
val.errors=rep(NA,predictors)
for(i in 1:predictors){
coefi <- coef(for.fit, id=i)
pred <- test.mat[,names(coefi)]%*%coefi
val.errors[i] = mean((covid.data[test,]$total_cases-pred)^2)
}
val.errors
which.min(val.errors)
for.fit = lm(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate-new_tests-new_tests, data = covid.data)
########################################################################################################
### RIDGE REGRESSION
train <- sample(c(TRUE,FALSE), nrow(covid.data),rep=TRUE)
test <- (!train)
x <- model.matrix(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate, covid.data)[,-1]
y <- covid.data$total_cases
grid <- 10^seq(10,-2,length=100)
cv.out <- cv.glmnet(x, y, alpha=0, lambda=grid)
dev.new()
plot(cv.out)
bestlam <- cv.out$lambda.min
cv.out$lambda.1se
ridge.mod <- glmnet(x[train,], y[train], alpha=0, lambda=bestlam, thresh=1e-12)
ridge.pred <- predict(ridge.mod, newx=x[test,])
mean((ridge.pred-y[test])^2)
out <- glmnet(x, y, alpha=0, lambda=grid)
predict(out, type="coefficients", s=bestlam)[1:9,]
dev.new()
plot(out,label = T, xvar = "lambda")
abline(v = log(bestlam), col="red", lwd=2, lty=2)
ridge.mod <- glmnet(x, y, alpha=0, lambda=bestlam, thresh=1e-12)
### LASSO REGRESSION
cv.out <- cv.glmnet(x, y, alpha=1, lamda=grid)
dev.new()
plot(cv.out)
bestlam <- cv.out$lambda.1se
out <- glmnet(x, y, alpha=1, lambda=grid)
predict(out, type="coefficients", s=bestlam)[1:9,]
dev.new()
plot(out,label = T, xvar = "lambda")
abline(v = log(bestlam), col="red", lwd=2, lty=2)
lasso.mod <- glmnet(x, y, alpha=1, lambda=bestlam, thresh=1e-12)
########################################################################################################
x.test <- model.matrix(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate, covid.data.test[,-c(1, 2)])[,-1]
x.train <- model.matrix(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate, covid.data)[,-1]
y.test <- covid.data.test$total_cases
y.train <- covid.data$total_cases
type <- rep(c("Test", "Train"), 6)
labels <- c("all_predictors", "all_predictors", "vif_selected", "vif_selected", "best_subset_all", "best_subset_all", "best_subset_9", "best_subset_9", "ridge", "ridge", "lasso", "lasso")
test.mse <- c(1:12)
df.plot <- covid.data.test
#df.plot$date <- substr(df.plot$date, 6, 10)
y.fit <- predict(fit, covid.data.test[,-c(1, 2)])
df.plot$all_predictors <- y.fit
test.mse[1] <- mean((y.test - y.fit)^2)
test.mse[2] <- mean((y.train - predict(fit, covid.data))^2)
y.adj.fit = predict(adj.fit, covid.data.test[,-c(1, 2)])
df.plot$vif_selected <- y.adj.fit
test.mse[3] <- mean((y.test - y.adj.fit)^2)
test.mse[4] <- mean((y.train - predict(adj.fit, covid.data))^2)
y.best.fit = predict(best.fit, covid.data.test[,-c(1, 2)])
df.plot$best_subset_all <- y.best.fit
test.mse[5] <- mean((y.test - y.best.fit)^2)
test.mse[6] <- mean((y.train - predict(best.fit, covid.data))^2)
y.best.fit.adj = predict(best.fit.adj, covid.data.test[,-c(1, 2)])
df.plot$best_subset_9 <- y.best.fit.adj
test.mse[7] <- mean((y.test - y.best.fit.adj)^2)
test.mse[8] <- mean((y.train - predict(best.fit.adj, covid.data))^2)
y.back.fit = predict(back.fit, covid.data.test[,-c(1, 2)])
mean((y.test - y.back.fit)^2)
y.for.fit = predict(for.fit, covid.data.test[,-c(1, 2)])
mean((y.test - y.for.fit)^2)
y.ridge <- predict(ridge.mod, newx=x.test)
df.plot$ridge <- y.ridge
test.mse[9] <- mean((y.test - y.ridge)^2)
test.mse[10] <- mean((y.train - predict(ridge.mod, newx=x.train))^2)
y.lasso <- predict(lasso.mod, newx=x.test)
df.plot$lasso <- y.lasso
test.mse[11] <- mean((y.test - y.lasso)^2)
test.mse[12] <- mean((y.train - predict(lasso.mod, newx=x.train))^2)
result.df <- data.frame(models = labels, mse = test.mse, type = type)
p <- ggplot(result.df, aes(x = models, y = mse, fill = type)) +
geom_bar(stat="identity", position=position_dodge()) +
#geom_text(aes(label=test.mse), vjust=1.6, color="white", position = position_dodge(0.9), size=3.5) +
scale_fill_brewer(palette="Paired") +
theme_minimal()
p
test_multiple_country <- function(df.test, iso) {
line.width <- 1.05
to.plot <- df.test[df.test$iso_code == iso,]
to.plot$date <- as.Date(to.plot$date)
plot <- ggplot(to.plot, aes(x=date)) +
geom_line(aes(y=total_cases, colour='black', group = 1), size=line.width, ) +
geom_line(aes(y=all_predictors, colour='red', group = 2), size=line.width) +
geom_line(aes(y=best_subset_9, colour='cyan', group = 3), size=line.width) +
geom_line(aes(y=best_subset_all, colour='blue', group = 4), size=line.width) +
geom_line(aes(y=vif_selected, colour='green', group = 5), size=line.width) +
geom_line(aes(y=ridge, colour='magenta', group = 6), size=line.width) +
geom_line(aes(y=lasso, colour='yellow', group = 7), size=line.width) +
scale_color_discrete(name = "Legend", labels = c("real_cases", "all_predictors", "vif_selected", "best_subset_all", "best_subset_9","ridge", "lasso")) +
scale_x_date("Days", breaks = "10 days") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
print(plot)
}
test_multiple_country(df.plot, 'KOR')
test_multiple_country(df.plot, 'USA')
test_multiple_country(df.plot, 'URY')
test_multiple_country(df.plot, 'IRN')
single_state_prediction <- function(country, split) {
s.train <- open_dataset_past_cases(c(country))[c(1:split),]
s.test <- open_dataset_past_cases(c(country), iso = TRUE, dates = TRUE)
plot.s <- data.frame(date=as.Date(s.test$date), total_cases=s.test$total_cases)
adj.fit.s <- lm(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate-population_density-gdp_per_capita-diabetes_prevalence-female_smokers-male_smokers-new_tests, data=s.train)
summary(adj.fit.s)
pred <- rep(0, length(s.test$total_cases))
pred[1:split] <- predict(adj.fit.s, s.test[c(1:split),-c(1, 2)])
# s.test$total_tests[(split+1):length(pred)] <- s.test$total_tests[split] + s.test$new_tests[split]*c(1:(length(pred)-split))
# s.test$stringency_index[(split+1):length(pred)] <- 0
for(i in (split+1):length(pred)) {
s.test[i,]$actual_cases <- pred[i-1]
pred[i] <- predict(adj.fit.s, s.test[i,-c(1, 2)])
}
plot.s$prediction <- pred
line.width <- 1.05
plot <- ggplot(plot.s, aes(x=date)) +
geom_line(aes(y=total_cases, colour='black', group = 1), size=line.width, ) +
geom_line(aes(y=prediction, colour='green', group = 2), size=line.width) +
scale_color_discrete(name = "Legend", labels = c("real_cases", "prediction")) +
geom_vline(xintercept = plot.s$date[split], linetype="dashed", color = "blue", size=line.width) +
scale_x_date("Days", breaks = "5 days") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
print(plot)
return(mean((pred[(split+1):length(pred)] - plot.s$total_cases[(split+1):length(pred)])^2))
}
s.mse <- rep(0, 4)
s.mse[1] <- single_state_prediction('JPN', 50)
s.mse[2] <- single_state_prediction('USA', 50)
s.mse[3] <- single_state_prediction('URY', 50)
s.mse[4] <- single_state_prediction('IRN', 25)
single.state.mse <- mean(s.mse)
single_state_prediction.2 <- function(country, split, model) {
s.train <- open_dataset_past_cases(c(country))[c(1:split),]
s.test <- open_dataset_past_cases(c(country), iso = TRUE, dates = TRUE)
s.test.t <- open_dataset_past_cases(c(country), iso = TRUE, dates = TRUE)
plot.s <- data.frame(date=as.Date(s.test$date), total_cases=s.test$total_cases)
adj.fit.s <- lm(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate-population_density-gdp_per_capita-diabetes_prevalence-female_smokers-male_smokers-new_tests, data=s.train)
summary(adj.fit.s)
pred <- rep(0, length(s.test$total_cases))
pred_t <- rep(0, length(s.test$total_cases))
pred[1:split] <- predict(adj.fit.s, s.test[c(1:split),-c(1, 2)])
pred_t[1:split] <- predict(model, s.test[c(1:split),-c(1, 2)])
# s.test$total_tests[(split+1):length(pred)] <- s.test$total_tests[split] + s.test$new_tests[split]*c(1:(length(pred)-split))
# s.test$stringency_index[(split+1):length(pred)] <- 0
for(i in (split+1):length(pred)) {
s.test[i,]$actual_cases <- pred[i-1]
pred[i] <- predict(adj.fit.s, s.test[i,-c(1, 2)])
s.test.t[i,]$actual_cases <- pred_t[i-1]
pred_t[i] <- predict(model, s.test.t[i, -c(1, 2)])
}
plot.s$prediction <- pred
plot.s$prediction_all <- pred_t
line.width <- 1.05
plot <- ggplot(plot.s, aes(x=date)) +
geom_line(aes(y=total_cases, colour='black', group = 1), size=line.width, ) +
geom_line(aes(y=prediction, colour='green', group = 2), size=line.width) +
geom_line(aes(y=prediction_all, colour='magenta', group = 3), size=line.width) +
scale_color_discrete(name = "Legend", labels = c("real_cases", "prediction", "prediction_all")) +
geom_vline(xintercept = plot.s$date[split], linetype="dashed", color = "blue", size=line.width) +
scale_x_date("Days", breaks = "5 days") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
print(plot)
return(mean((pred_t[(split+1):length(pred)] - plot.s$total_cases[(split+1):length(pred)])^2))
}
t.mse <- rep(0, 4)
t.mse[1] <- single_state_prediction.2('KOR', 50, adj.fit)
t.mse[2] <- single_state_prediction.2('USA', 50, adj.fit)
t.mse[3] <- single_state_prediction.2('URY', 50, adj.fit)
t.mse[4] <- single_state_prediction.2('IRN', 25, adj.fit)
all.states.mse <- mean(t.mse)
mse.plot <- data.frame(model=c("single state", "multi state"), mse=c(single.state.mse, all.states.mse))
p <- ggplot(mse.plot, aes(x = model, y = mse)) +
geom_bar(stat="identity", position=position_dodge(), fill="lightblue") +
scale_fill_brewer(palette="Paired") +
theme_minimal()
p
single_state_prediction.3 <- function(country, split, model) {
s.train <- open_dataset_past_cases(c(country))[c(1:split),]
s.test <- open_dataset_past_cases(c(country), iso = TRUE, dates = TRUE)
s.test.t <- open_dataset_past_cases(c(country), iso = TRUE, dates = TRUE)
s.test.i <- open_dataset_past_cases(c(country), iso = TRUE, dates = TRUE)
plot.s <- data.frame(date=as.Date(s.test$date), total_cases=s.test$total_cases)
pred <- rep(0, length(s.test$total_cases))
pred_t <- rep(0, length(s.test$total_cases))
pred_i <- rep(0, length(s.test$total_cases))
pred[1:split] <- predict(model, s.test[c(1:split),-c(1, 2)])
pred_t[1:split] <- predict(model, s.test[c(1:split),-c(1, 2)])
pred_i[1:split] <- predict(model, s.test[c(1:split),-c(1, 2)])
# s.test$total_tests[(split+1):length(pred)] <- s.test$total_tests[split] # + s.test$new_tests[split]*c(1:(length(pred)-split))
# s.test.i$total_tests[(split+1):length(pred)] <- 2*s.test.t$total_tests[(split+1):length(pred)]
s.test$stringency_index[(split+1):length(pred)] <- seq(s.test$stringency_index[split], 0, length=length(pred)-split)
s.test.i$stringency_index[(split+1):length(pred)] <- seq(s.test$stringency_index[split], 100, length=length(pred)-split)
for(i in (split+1):length(pred)) {
s.test[i,]$actual_cases <- pred[i-1]
pred[i] <- predict(model, s.test[i,-c(1, 2)])
s.test.t[i,]$actual_cases <- pred_t[i-1]
pred_t[i] <- predict(model, s.test.t[i, -c(1, 2)])
s.test.i[i,]$actual_cases <- pred_i[i-1]
pred_i[i] <- predict(model, s.test.i[i, -c(1, 2)])
}
plot.s$prediction <- pred
plot.s$prediction_all <- pred_t
plot.s$prediction_inc <- pred_i
line.width <- 1.05
plot <- ggplot(plot.s, aes(x=date)) +
geom_line(aes(y=total_cases, colour='black', group = 1), size=line.width, ) +
geom_line(aes(y=prediction, colour='green', group = 2), size=line.width) +
geom_line(aes(y=prediction_all, colour='magenta', group = 3), size=line.width) +
geom_line(aes(y=prediction_inc, colour='pink', group = 4), size=line.width) +
scale_color_discrete(name = "Legend", labels = c("real_cases", "reduced stringency", "real stringency", "increased stringency")) +
geom_vline(xintercept = plot.s$date[split], linetype="dashed", color = "blue", size=line.width) +
scale_x_date("Days", breaks = "5 days") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
print(plot)
}
single_state_prediction.3('KOR', 50, adj.fit)
single_state_prediction.3('USA', 50, adj.fit)
single_state_prediction.3('URY', 50, adj.fit)
single_state_prediction.3('IRN', 25, adj.fit)
|
/Totale_regressione.r
|
no_license
|
TestaDiRapa/sda-project
|
R
| false
| false
| 20,863
|
r
|
library(car)
library(ggplot2)
library(glmnet)
library(leaps)
library(MASS)
library(tidyverse)
library(hrbrthemes)
setwd("D:/Documenti HDD/Uni/Magistrale/Statistical Data Analysis/Esercizi/Progetto")
set.seed(42)
########################################################################################################
### FUNCTIONS
open_dataset_past_cases <- function(iso_codes, dates=FALSE, iso=FALSE) {
df.data <- read.csv('covid-data.csv')
if(dates) {
columns <- colnames(df.data)[c(1,4,9,15,16,20,21,22,23,24,25,26,28,29,30,31)]
} else {
columns <- colnames(df.data)[c(1,9,15,16,20,21,22,23,24,25,26,28,29,30,31)]
}
df.data <- df.data[which(df.data$iso_code %in% iso_codes), columns]
df.data <- na.omit(df.data)
df.data$total_cases <- df.data$total_cases_per_million
df.data$total_cases_per_million <- NULL
df.data$new_tests <- df.data$new_tests_per_thousand
df.data$new_tests_per_thousand <- NULL
df.data$total_tests <- df.data$total_tests_per_thousand
df.data$total_tests_per_thousand <- NULL
actual_cases <- c()
for(code in iso_codes) {
tmp_actual <- df.data[df.data$iso_code == code,'total_cases']
actual_cases <- c(actual_cases, 0, tmp_actual[1:length(tmp_actual)-1])
}
df.data$actual_cases <- actual_cases
if(!iso) {
df.data$iso_code <- NULL
}
return(df.data)
}
plot_best_predictors = function(summary,fit){
#dev.new()
par(mfrow=c(2,2))
plot(summary$rss ,xlab="Number of Variables ",ylab="RSS",type="l")
points(which.min(summary$rss),min(summary$rss), col="red",cex=2,pch=20)
plot(summary$adjr2 ,xlab="Number of Variables ",
ylab="Adjusted RSq",type="l")
points(which.max(summary$adjr2),max(summary$adjr2), col="red",cex=2,pch=20)
plot(summary$cp ,xlab="Number of Variables ",ylab="Cp", type="l")
points(which.min(summary$cp ),min(summary$cp),col="red",cex=2,pch=20)
plot(summary$bic ,xlab="Number of Variables ",ylab="BIC",type="l")
points(which.min(summary$bic),min(summary$bic),col="red",cex=2,pch=20)
dev.new()
plot(fit,scale="r2")
dev.new()
plot(fit,scale="adjr2")
dev.new()
plot(fit,scale="Cp")
dev.new()
plot(fit,scale="bic")
}
test_country <- function(df.test, iso, model) {
df.test <- df.test[df.test$iso_code == iso,]
to.plot <- data.frame(y_real = df.test$new_cases, date = df.test$date)
df.test$new_cases <- NULL
df.test$date <- NULL
to.plot$y_pred <- predict(model, newdata=df.test)
print(mean((to.plot$y_real-to.plot$y_pred)^2))
plot <- ggplot(to.plot, aes(x=date)) +
geom_line(aes(y=y_real, group=1)) +
geom_line(aes(y=y_pred, group=2), color='red')
print(plot)
return(mean((to.plot$y_real-to.plot$y_pred)^2))
}
predict.regsubsets = function(object,newdata,id,...){
form=as.formula(object$call[[2]])
mat=model.matrix(form, newdata)
coefi=coef(object, id=id)
xvars=names(coefi)
mat[,xvars]%*%coefi
}
ridge_regression <- function(x, y) {
grid=10^seq(10,-2,length=100)
ridge.mod=glmnet(x,y,alpha=0,lambda=grid)
}
########################################################################################################
### DATA IMPORT
iso_codes_train <- c('ITA', 'GBR', 'IND', 'JPN', 'ISR', 'LUX', 'AUS', 'AUT', 'NZL', 'ARG', 'BEL', 'ZAF', 'PRT', 'CHE', 'ISL', 'RUS','TUR','DNK')
iso_codes_test <- c('USA','IRN','KOR','URY')
covid.data <- open_dataset_past_cases(iso_codes_train)
covid.data.test <- open_dataset_past_cases(iso_codes_test, dates = TRUE, iso = TRUE)
########################################################################################################
### CHECKING NONLINEAR RELATIONSHIP BETWEEN VARIABLES AND RESPONSE
dev.new()
pairs(~total_cases+stringency_index+population_density+median_age+population,covid.data)
dev.new()
pairs(~total_cases+aged_65_older+aged_70_older+gdp_per_capita,covid.data)
dev.new()
pairs(~total_cases+cvd_death_rate+diabetes_prevalence+female_smokers,covid.data)
dev.new()
pairs(~total_cases+new_tests+total_tests+actual_cases,covid.data)
########################################################################################################
### FITTING LINEAR MODEL WITH ALL PREDICTORS
fit <- lm(total_cases~., data=covid.data)
summary(fit)
# Diagnostic plots
dev.new()
par(mfrow=c(2,2))
plot(fit)
# Removing outliers
dev.new()
cooksd <- cooks.distance(fit)
sample_size <- nrow(covid.data)
plot(cooksd, pch="*", cex=2, main="Influential Points by Cooks distance")
abline(h = 4/sample_size, col="red")
cook_th <- 4/sample_size
influential <- names(cooksd)[(cooksd > cook_th)]
covid.data <- covid.data[which(!(rownames(covid.data) %in% influential)),]
# Model re-fit
fit <- lm(total_cases~., data=covid.data)
vif(fit)
#Adjustinhg model using p-value and vif
adj.fit <- lm(total_cases~.-aged_70_older-aged_65_older-population, data=covid.data)
vif(adj.fit)
summary(adj.fit)
adj.fit <- lm(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate, data=covid.data)
summary(adj.fit)
# Diagnostic plots (again)
dev.new()
par(mfrow=c(2,2))
plot(fit)
# Residual plot
fit.res = resid(adj.fit)
dev.new()
plot(covid.data$total_cases, fit.res, ylab="Residuals", xlab="Samples", main="Residual Plot")
abline(0, 0)
########################################################################################################
### BEST SUBSET SELECTION USING K-FOLD
k <- 10
predictors <- 14
folds <- sample(1:k,nrow(covid.data),replace=TRUE)
cv.errors <- matrix(NA,k,predictors, dimnames=list(NULL, paste(1:predictors)))
# write a for loop that performs cross-validation
for(j in 1:k){
best.fit <- regsubsets(total_cases~., data=covid.data[folds!=j,], nvmax=predictors)
for(i in 1:predictors){
pred <- predict(best.fit, covid.data[folds==j,], id=i)
cv.errors[j,i] <- mean((covid.data[folds==j,]$total_cases-pred)^2)
}
}
# MSE for each model using K-fold
mean.cv.errors <- apply(cv.errors, 2, mean)
par(mfrow=c(1,1))
dev.new()
plot(mean.cv.errors, type="b")
best.subsets <- regsubsets(total_cases~., data = covid.data, nvmax = 14)
summary(best.subsets)
best.fit <- lm(total_cases~.-gdp_per_capita-diabetes_prevalence-new_tests-actual_cases, data=covid.data)
plot_best_predictors(summary(best.subsets), best.subsets)
### BEST SUBSET STARTING FROM ADJUSTED MODEL
predictors <- 9
folds <- sample(1:k,nrow(covid.data),replace=TRUE)
cv.errors <- matrix(NA,k,predictors, dimnames=list(NULL, paste(1:predictors)))
# write a for loop that performs cross-validation
for(j in 1:k){
best.fit.adj <- regsubsets(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate, data=covid.data[folds!=j,], nvmax=predictors)
for(i in 1:predictors){
pred <- predict(best.fit.adj, covid.data[folds==j,], id=i)
cv.errors[j,i] <- mean((covid.data[folds==j,]$total_cases-pred)^2)
}
}
# MSE for each model using K-fold
mean.cv.errors <- apply(cv.errors, 2, mean)
par(mfrow=c(1,1))
dev.new()
plot(mean.cv.errors, type="b", xlab="#Predictors", ylab="MSE")
best.subsets <- regsubsets(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate, data = covid.data, nvmax = 14)
summary(best.subsets)
best.fit.adj <- lm(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate-new_tests, data=covid.data)
plot_best_predictors(summary(best.subsets), best.subsets)
### BACKWARDS SELECTION
train <- sample(c(TRUE,FALSE), nrow(covid.data), rep=TRUE)
test <- (!train)
back.fit <- regsubsets(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate, covid.data[train,], method = "backward", nvmax=14)
back.summary <- summary(back.fit)
plot_best_predictors(back.summary, back.fit)
test.mat <- model.matrix(total_cases~.,data=covid.data[test,])
val.errors=rep(NA,predictors)
for(i in 1:predictors){
coefi <- coef(back.fit, id=i)
pred <- test.mat[,names(coefi)]%*%coefi
val.errors[i] = mean((covid.data[test,]$total_cases-pred)^2)
}
val.errors
which.min(val.errors)
back.fit = lm(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate-new_tests, data = covid.data)
### FORWARD SELECTION
train <- sample(c(TRUE,FALSE), nrow(covid.data), rep=TRUE)
test <- (!train)
for.fit <- regsubsets(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate, covid.data[train,], method = "forward", nvmax=14)
for.summary <- summary(for.fit)
plot_best_predictors(for.summary, for.fit)
test.mat <- model.matrix(total_cases~.,data=covid.data[test,])
val.errors=rep(NA,predictors)
for(i in 1:predictors){
coefi <- coef(for.fit, id=i)
pred <- test.mat[,names(coefi)]%*%coefi
val.errors[i] = mean((covid.data[test,]$total_cases-pred)^2)
}
val.errors
which.min(val.errors)
for.fit = lm(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate-new_tests-new_tests, data = covid.data)
########################################################################################################
### RIDGE REGRESSION
train <- sample(c(TRUE,FALSE), nrow(covid.data),rep=TRUE)
test <- (!train)
x <- model.matrix(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate, covid.data)[,-1]
y <- covid.data$total_cases
grid <- 10^seq(10,-2,length=100)
cv.out <- cv.glmnet(x, y, alpha=0, lambda=grid)
dev.new()
plot(cv.out)
bestlam <- cv.out$lambda.min
cv.out$lambda.1se
ridge.mod <- glmnet(x[train,], y[train], alpha=0, lambda=bestlam, thresh=1e-12)
ridge.pred <- predict(ridge.mod, newx=x[test,])
mean((ridge.pred-y[test])^2)
out <- glmnet(x, y, alpha=0, lambda=grid)
predict(out, type="coefficients", s=bestlam)[1:9,]
dev.new()
plot(out,label = T, xvar = "lambda")
abline(v = log(bestlam), col="red", lwd=2, lty=2)
ridge.mod <- glmnet(x, y, alpha=0, lambda=bestlam, thresh=1e-12)
### LASSO REGRESSION
cv.out <- cv.glmnet(x, y, alpha=1, lamda=grid)
dev.new()
plot(cv.out)
bestlam <- cv.out$lambda.1se
out <- glmnet(x, y, alpha=1, lambda=grid)
predict(out, type="coefficients", s=bestlam)[1:9,]
dev.new()
plot(out,label = T, xvar = "lambda")
abline(v = log(bestlam), col="red", lwd=2, lty=2)
lasso.mod <- glmnet(x, y, alpha=1, lambda=bestlam, thresh=1e-12)
########################################################################################################
x.test <- model.matrix(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate, covid.data.test[,-c(1, 2)])[,-1]
x.train <- model.matrix(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate, covid.data)[,-1]
y.test <- covid.data.test$total_cases
y.train <- covid.data$total_cases
type <- rep(c("Test", "Train"), 6)
labels <- c("all_predictors", "all_predictors", "vif_selected", "vif_selected", "best_subset_all", "best_subset_all", "best_subset_9", "best_subset_9", "ridge", "ridge", "lasso", "lasso")
test.mse <- c(1:12)
df.plot <- covid.data.test
#df.plot$date <- substr(df.plot$date, 6, 10)
y.fit <- predict(fit, covid.data.test[,-c(1, 2)])
df.plot$all_predictors <- y.fit
test.mse[1] <- mean((y.test - y.fit)^2)
test.mse[2] <- mean((y.train - predict(fit, covid.data))^2)
y.adj.fit = predict(adj.fit, covid.data.test[,-c(1, 2)])
df.plot$vif_selected <- y.adj.fit
test.mse[3] <- mean((y.test - y.adj.fit)^2)
test.mse[4] <- mean((y.train - predict(adj.fit, covid.data))^2)
y.best.fit = predict(best.fit, covid.data.test[,-c(1, 2)])
df.plot$best_subset_all <- y.best.fit
test.mse[5] <- mean((y.test - y.best.fit)^2)
test.mse[6] <- mean((y.train - predict(best.fit, covid.data))^2)
y.best.fit.adj = predict(best.fit.adj, covid.data.test[,-c(1, 2)])
df.plot$best_subset_9 <- y.best.fit.adj
test.mse[7] <- mean((y.test - y.best.fit.adj)^2)
test.mse[8] <- mean((y.train - predict(best.fit.adj, covid.data))^2)
y.back.fit = predict(back.fit, covid.data.test[,-c(1, 2)])
mean((y.test - y.back.fit)^2)
y.for.fit = predict(for.fit, covid.data.test[,-c(1, 2)])
mean((y.test - y.for.fit)^2)
y.ridge <- predict(ridge.mod, newx=x.test)
df.plot$ridge <- y.ridge
test.mse[9] <- mean((y.test - y.ridge)^2)
test.mse[10] <- mean((y.train - predict(ridge.mod, newx=x.train))^2)
y.lasso <- predict(lasso.mod, newx=x.test)
df.plot$lasso <- y.lasso
test.mse[11] <- mean((y.test - y.lasso)^2)
test.mse[12] <- mean((y.train - predict(lasso.mod, newx=x.train))^2)
result.df <- data.frame(models = labels, mse = test.mse, type = type)
p <- ggplot(result.df, aes(x = models, y = mse, fill = type)) +
geom_bar(stat="identity", position=position_dodge()) +
#geom_text(aes(label=test.mse), vjust=1.6, color="white", position = position_dodge(0.9), size=3.5) +
scale_fill_brewer(palette="Paired") +
theme_minimal()
p
test_multiple_country <- function(df.test, iso) {
line.width <- 1.05
to.plot <- df.test[df.test$iso_code == iso,]
to.plot$date <- as.Date(to.plot$date)
plot <- ggplot(to.plot, aes(x=date)) +
geom_line(aes(y=total_cases, colour='black', group = 1), size=line.width, ) +
geom_line(aes(y=all_predictors, colour='red', group = 2), size=line.width) +
geom_line(aes(y=best_subset_9, colour='cyan', group = 3), size=line.width) +
geom_line(aes(y=best_subset_all, colour='blue', group = 4), size=line.width) +
geom_line(aes(y=vif_selected, colour='green', group = 5), size=line.width) +
geom_line(aes(y=ridge, colour='magenta', group = 6), size=line.width) +
geom_line(aes(y=lasso, colour='yellow', group = 7), size=line.width) +
scale_color_discrete(name = "Legend", labels = c("real_cases", "all_predictors", "vif_selected", "best_subset_all", "best_subset_9","ridge", "lasso")) +
scale_x_date("Days", breaks = "10 days") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
print(plot)
}
test_multiple_country(df.plot, 'KOR')
test_multiple_country(df.plot, 'USA')
test_multiple_country(df.plot, 'URY')
test_multiple_country(df.plot, 'IRN')
single_state_prediction <- function(country, split) {
s.train <- open_dataset_past_cases(c(country))[c(1:split),]
s.test <- open_dataset_past_cases(c(country), iso = TRUE, dates = TRUE)
plot.s <- data.frame(date=as.Date(s.test$date), total_cases=s.test$total_cases)
adj.fit.s <- lm(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate-population_density-gdp_per_capita-diabetes_prevalence-female_smokers-male_smokers-new_tests, data=s.train)
summary(adj.fit.s)
pred <- rep(0, length(s.test$total_cases))
pred[1:split] <- predict(adj.fit.s, s.test[c(1:split),-c(1, 2)])
# s.test$total_tests[(split+1):length(pred)] <- s.test$total_tests[split] + s.test$new_tests[split]*c(1:(length(pred)-split))
# s.test$stringency_index[(split+1):length(pred)] <- 0
for(i in (split+1):length(pred)) {
s.test[i,]$actual_cases <- pred[i-1]
pred[i] <- predict(adj.fit.s, s.test[i,-c(1, 2)])
}
plot.s$prediction <- pred
line.width <- 1.05
plot <- ggplot(plot.s, aes(x=date)) +
geom_line(aes(y=total_cases, colour='black', group = 1), size=line.width, ) +
geom_line(aes(y=prediction, colour='green', group = 2), size=line.width) +
scale_color_discrete(name = "Legend", labels = c("real_cases", "prediction")) +
geom_vline(xintercept = plot.s$date[split], linetype="dashed", color = "blue", size=line.width) +
scale_x_date("Days", breaks = "5 days") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
print(plot)
return(mean((pred[(split+1):length(pred)] - plot.s$total_cases[(split+1):length(pred)])^2))
}
s.mse <- rep(0, 4)
s.mse[1] <- single_state_prediction('JPN', 50)
s.mse[2] <- single_state_prediction('USA', 50)
s.mse[3] <- single_state_prediction('URY', 50)
s.mse[4] <- single_state_prediction('IRN', 25)
single.state.mse <- mean(s.mse)
single_state_prediction.2 <- function(country, split, model) {
s.train <- open_dataset_past_cases(c(country))[c(1:split),]
s.test <- open_dataset_past_cases(c(country), iso = TRUE, dates = TRUE)
s.test.t <- open_dataset_past_cases(c(country), iso = TRUE, dates = TRUE)
plot.s <- data.frame(date=as.Date(s.test$date), total_cases=s.test$total_cases)
adj.fit.s <- lm(total_cases~.-aged_70_older-aged_65_older-population-median_age-cvd_death_rate-population_density-gdp_per_capita-diabetes_prevalence-female_smokers-male_smokers-new_tests, data=s.train)
summary(adj.fit.s)
pred <- rep(0, length(s.test$total_cases))
pred_t <- rep(0, length(s.test$total_cases))
pred[1:split] <- predict(adj.fit.s, s.test[c(1:split),-c(1, 2)])
pred_t[1:split] <- predict(model, s.test[c(1:split),-c(1, 2)])
# s.test$total_tests[(split+1):length(pred)] <- s.test$total_tests[split] + s.test$new_tests[split]*c(1:(length(pred)-split))
# s.test$stringency_index[(split+1):length(pred)] <- 0
for(i in (split+1):length(pred)) {
s.test[i,]$actual_cases <- pred[i-1]
pred[i] <- predict(adj.fit.s, s.test[i,-c(1, 2)])
s.test.t[i,]$actual_cases <- pred_t[i-1]
pred_t[i] <- predict(model, s.test.t[i, -c(1, 2)])
}
plot.s$prediction <- pred
plot.s$prediction_all <- pred_t
line.width <- 1.05
plot <- ggplot(plot.s, aes(x=date)) +
geom_line(aes(y=total_cases, colour='black', group = 1), size=line.width, ) +
geom_line(aes(y=prediction, colour='green', group = 2), size=line.width) +
geom_line(aes(y=prediction_all, colour='magenta', group = 3), size=line.width) +
scale_color_discrete(name = "Legend", labels = c("real_cases", "prediction", "prediction_all")) +
geom_vline(xintercept = plot.s$date[split], linetype="dashed", color = "blue", size=line.width) +
scale_x_date("Days", breaks = "5 days") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
print(plot)
return(mean((pred_t[(split+1):length(pred)] - plot.s$total_cases[(split+1):length(pred)])^2))
}
t.mse <- rep(0, 4)
t.mse[1] <- single_state_prediction.2('KOR', 50, adj.fit)
t.mse[2] <- single_state_prediction.2('USA', 50, adj.fit)
t.mse[3] <- single_state_prediction.2('URY', 50, adj.fit)
t.mse[4] <- single_state_prediction.2('IRN', 25, adj.fit)
all.states.mse <- mean(t.mse)
mse.plot <- data.frame(model=c("single state", "multi state"), mse=c(single.state.mse, all.states.mse))
p <- ggplot(mse.plot, aes(x = model, y = mse)) +
geom_bar(stat="identity", position=position_dodge(), fill="lightblue") +
scale_fill_brewer(palette="Paired") +
theme_minimal()
p
single_state_prediction.3 <- function(country, split, model) {
s.train <- open_dataset_past_cases(c(country))[c(1:split),]
s.test <- open_dataset_past_cases(c(country), iso = TRUE, dates = TRUE)
s.test.t <- open_dataset_past_cases(c(country), iso = TRUE, dates = TRUE)
s.test.i <- open_dataset_past_cases(c(country), iso = TRUE, dates = TRUE)
plot.s <- data.frame(date=as.Date(s.test$date), total_cases=s.test$total_cases)
pred <- rep(0, length(s.test$total_cases))
pred_t <- rep(0, length(s.test$total_cases))
pred_i <- rep(0, length(s.test$total_cases))
pred[1:split] <- predict(model, s.test[c(1:split),-c(1, 2)])
pred_t[1:split] <- predict(model, s.test[c(1:split),-c(1, 2)])
pred_i[1:split] <- predict(model, s.test[c(1:split),-c(1, 2)])
# s.test$total_tests[(split+1):length(pred)] <- s.test$total_tests[split] # + s.test$new_tests[split]*c(1:(length(pred)-split))
# s.test.i$total_tests[(split+1):length(pred)] <- 2*s.test.t$total_tests[(split+1):length(pred)]
s.test$stringency_index[(split+1):length(pred)] <- seq(s.test$stringency_index[split], 0, length=length(pred)-split)
s.test.i$stringency_index[(split+1):length(pred)] <- seq(s.test$stringency_index[split], 100, length=length(pred)-split)
for(i in (split+1):length(pred)) {
s.test[i,]$actual_cases <- pred[i-1]
pred[i] <- predict(model, s.test[i,-c(1, 2)])
s.test.t[i,]$actual_cases <- pred_t[i-1]
pred_t[i] <- predict(model, s.test.t[i, -c(1, 2)])
s.test.i[i,]$actual_cases <- pred_i[i-1]
pred_i[i] <- predict(model, s.test.i[i, -c(1, 2)])
}
plot.s$prediction <- pred
plot.s$prediction_all <- pred_t
plot.s$prediction_inc <- pred_i
line.width <- 1.05
plot <- ggplot(plot.s, aes(x=date)) +
geom_line(aes(y=total_cases, colour='black', group = 1), size=line.width, ) +
geom_line(aes(y=prediction, colour='green', group = 2), size=line.width) +
geom_line(aes(y=prediction_all, colour='magenta', group = 3), size=line.width) +
geom_line(aes(y=prediction_inc, colour='pink', group = 4), size=line.width) +
scale_color_discrete(name = "Legend", labels = c("real_cases", "reduced stringency", "real stringency", "increased stringency")) +
geom_vline(xintercept = plot.s$date[split], linetype="dashed", color = "blue", size=line.width) +
scale_x_date("Days", breaks = "5 days") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
print(plot)
}
single_state_prediction.3('KOR', 50, adj.fit)
single_state_prediction.3('USA', 50, adj.fit)
single_state_prediction.3('URY', 50, adj.fit)
single_state_prediction.3('IRN', 25, adj.fit)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/core.R
\name{generateDistn}
\alias{generateDistn}
\title{Generate theoretical GC content distributions}
\usage{
generateDistn(seqs, genome, nchrom, file = "fastqc_theoretical_gc.txt",
n = 1e+06, bp = 100, wts = 1, name = "")
}
\arguments{
\item{seqs}{a DNAStringSet of the sequences to simulate
read from. E.g. for RNA-seq, the transcripts, which
can be generated with \code{extractTranscriptSeqs}
from the GenomicFeatures package.
See the example script located in \code{inst/script/human_mouse.R}}
\item{genome}{a BSgenome object.
See the example script located in \code{inst/script/human_mouse.R}}
\item{nchrom}{the number of chromosomes from the genome to simulate
reads from}
\item{file}{the path of the file to write out}
\item{n}{the number of reads to simulate}
\item{bp}{the basepair of the reads}
\item{wts}{optional weights to go along with the \code{seqs} or
the chromosomes in \code{genome}, e.g. to represent
more realistic expression of transcripts}
\item{name}{the name to be printed at the top of the file}
}
\value{
the name of the file which was written
}
\description{
This function generates random simulated reads from
either provided \code{seqs} (best for RNA-seq)
or from a genome (best for DNA-seq). The GC content
of these reads is then tabulated to produce a distribution
file which can be read by MultiQC to be displayed
on top of the FASTQC GC content module. Either \code{seqs}
or \code{genome} is required, and only one can be specified.
Specifying \code{genome} requires also specifying \code{nchrom}.
}
\references{
MultiQC:
http://multiqc.info/
FASTQC:
http://www.bioinformatics.babraham.ac.uk/projects/fastqc/
}
|
/man/generateDistn.Rd
|
permissive
|
jnpaulson/fastqcTheoreticalGC
|
R
| false
| true
| 1,737
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/core.R
\name{generateDistn}
\alias{generateDistn}
\title{Generate theoretical GC content distributions}
\usage{
generateDistn(seqs, genome, nchrom, file = "fastqc_theoretical_gc.txt",
n = 1e+06, bp = 100, wts = 1, name = "")
}
\arguments{
\item{seqs}{a DNAStringSet of the sequences to simulate
read from. E.g. for RNA-seq, the transcripts, which
can be generated with \code{extractTranscriptSeqs}
from the GenomicFeatures package.
See the example script located in \code{inst/script/human_mouse.R}}
\item{genome}{a BSgenome object.
See the example script located in \code{inst/script/human_mouse.R}}
\item{nchrom}{the number of chromosomes from the genome to simulate
reads from}
\item{file}{the path of the file to write out}
\item{n}{the number of reads to simulate}
\item{bp}{the basepair of the reads}
\item{wts}{optional weights to go along with the \code{seqs} or
the chromosomes in \code{genome}, e.g. to represent
more realistic expression of transcripts}
\item{name}{the name to be printed at the top of the file}
}
\value{
the name of the file which was written
}
\description{
This function generates random simulated reads from
either provided \code{seqs} (best for RNA-seq)
or from a genome (best for DNA-seq). The GC content
of these reads is then tabulated to produce a distribution
file which can be read by MultiQC to be displayed
on top of the FASTQC GC content module. Either \code{seqs}
or \code{genome} is required, and only one can be specified.
Specifying \code{genome} requires also specifying \code{nchrom}.
}
\references{
MultiQC:
http://multiqc.info/
FASTQC:
http://www.bioinformatics.babraham.ac.uk/projects/fastqc/
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/dse23g_1.R
\docType{data}
\name{dse23g_1}
\alias{dse23g_1}
\title{Dataset for Exercise G-1, Chapter 23}
\format{A \code{data.frame} with 15 rows and 9 variables:
\describe{
\item{level}{}
\item{tube}{}
\item{x1}{}
\item{x2}{}
\item{x3}{}
\item{x4}{}
\item{x5}{}
\item{x6}{}
\item{y}{}
}}
\source{
Draper, N.R., Smith, H., (1998) Applied Regression Analyis, 3rd ed., New York: Wiley
}
\usage{
dse23g_1
}
\description{
Dataset for Exercise G-1, Chapter 23
}
\examples{
dse23g_1
}
\keyword{datasets}
|
/man/dse23g_1.Rd
|
no_license
|
danielgil1/aprean3
|
R
| false
| false
| 584
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/dse23g_1.R
\docType{data}
\name{dse23g_1}
\alias{dse23g_1}
\title{Dataset for Exercise G-1, Chapter 23}
\format{A \code{data.frame} with 15 rows and 9 variables:
\describe{
\item{level}{}
\item{tube}{}
\item{x1}{}
\item{x2}{}
\item{x3}{}
\item{x4}{}
\item{x5}{}
\item{x6}{}
\item{y}{}
}}
\source{
Draper, N.R., Smith, H., (1998) Applied Regression Analyis, 3rd ed., New York: Wiley
}
\usage{
dse23g_1
}
\description{
Dataset for Exercise G-1, Chapter 23
}
\examples{
dse23g_1
}
\keyword{datasets}
|
## Load Libraries
list.of.packages <- c("data.table","plyr","dplyr","tibble", "lubridate", "knitr", "ggplot2", "stringr", "forcats", "ggpubr")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages, require, character.only = TRUE)
# Functions
sumnona <- function(x){if (all(is.na(x))) x[NA_integer_] else sum(x, na.rm = TRUE)}
meanona <- function(x){if (all(is.na(x))) x[NA_integer_] else mean(x, na.rm = TRUE)}
# DATA DOWNLOAD
# Create temporary file
temp <- tempfile()
# Download zip into temporary file
download.file(url = "https://github.com/nairarshad/RepData_PeerAssessment1/blob/master/activity.zip?raw=true",
destfile = temp, mode = 'wb')
# Unzip the zip file
data.unzip <- unzip(temp)
# Unlink the temporary file
unlink(temp)
# DATA READ
# Read the activity data
activity.data <- read.csv(data.unzip, stringsAsFactors = FALSE) %>%
tbl_df() %>%
mutate(date = as.Date(date))
# Total steps by date
total.steps <- activity.data %>%
group_by(date) %>%
summarize(total_steps = sumnona(steps),
total_intervals = n()) %>%
mutate(total_intervals = NULL)
# Histogram of the total number of steps taken each day
p1 <- ggplot(data = total.steps, aes(x = total_steps))
p1 <- p1 + geom_histogram(bins = 25)
p1 <- p1 + labs(x = "Total Step Count",
y = "Count",
title = "Histogram of the total number of steps taken each day")
p1 <- p1 + theme_bw()
# Mean and median total steps
total.steps.mean <- mean(total.steps$total_steps, na.rm = TRUE)
total.steps.medn <- median(total.steps$total_steps, na.rm = TRUE)
# Average step count for each 5-min interval
interval.5min <- activity.data %>%
group_by(interval) %>%
summarize(avg_steps = mean(steps, na.rm = TRUE))
# Time series plot of the average number of steps taken across all days at 5-minute intervals
p2 <- ggplot(data = interval.5min, aes(x = interval, y = avg_steps))
p2 <- p2 + geom_line()
p2 <- p2 + labs(x = "Interval (HHMM)",
y = "Average step count",
title = "Time series plot of the average number of steps taken across all days at 5-minute intervals")
p2 <- p2 + scale_x_continuous(labels = function(x) str_pad(x, width = 4, pad = "0"))
p2 <- p2 + theme_bw()
# Which interval corresponds to maximum average steps
maxintrvl <- interval.5min$interval[which.max(interval.5min$avg_steps)]
# How many NAs in the data
countna <- sum(is.na(activity.data$steps))
# Inititalize df for manioulation: impute for NAs
activity.data.nona <- as.data.frame(activity.data)
# Row-wise identify NA and replace with interval average
for(i in 1:nrow(activity.data.nona)){
if(is.na(activity.data.nona[i,"steps"])){
intrvl <- activity.data.nona[i,"interval"]
activity.data.nona[i,"steps"] <-
interval.5min$avg_steps[interval.5min$interval==intrvl]
}
}
# Total steps from imputed data by date
total.steps.nona <- activity.data.nona %>%
group_by(date) %>%
summarize(total_steps = sumnona(steps),
total_intervals = n()) %>%
mutate(total_intervals = NULL)
# Histogram of the total number of steps taken each day (Imputed Data)
p3 <- ggplot(data = total.steps.nona, aes(x = total_steps))
p3 <- p3 + geom_histogram(bins = 25)
p3 <- p3 + labs(x = "Total Step Count",
y = "Count",
title = "Histogram of the total number of steps taken each day (Imputed Data)")
p3 <- p3 + theme_bw()
# Mean and median total steps after imputing
total.steps.nona.mean <- mean(total.steps.nona$total_steps, na.rm = TRUE)
total.steps.nona.medn <- median(total.steps.nona$total_steps, na.rm = TRUE)
# Backup the imputed data
activity.data.nona.bc <- activity.data.nona
activity.data.nona <- NULL
# Add factor column whether Weekday/Weekend
activity.data.nona <- activity.data.nona.bc %>%
mutate(day = fct_collapse(factor(weekdays(date, abbreviate = TRUE)),
Weekend = c("Sat","Sun"),
Weekday = c("Mon","Tue","Wed","Thu","Fri"))) %>%
mutate(day = factor(day, levels = c("Weekend","Weekday")))
# Average by wday and interval
wday.mean <- activity.data.nona %>%
tbl_df() %>%
group_by(day, interval) %>%
summarize(avg_steps = mean(steps, na.rm = TRUE))
# Time series plot of the average number of steps taken across all days at 5-minute intervals
p4 <- ggplot(data = wday.mean, aes(x = interval, y = avg_steps))
p4 <- p4 + geom_line()
p4 <- p4 + labs(x = "Interval (HHMM)",
y = "Average step count",
title = "Time series plot of the average number of steps taken across all days at 5-minute intervals")
p4 <- p4 + scale_x_continuous(labels = function(x) str_pad(x, width = 4, pad = "0"))
p4 <- p4 + facet_grid(day~.)
p4 <- p4 + theme_bw()
|
/RepResProject.R
|
no_license
|
nairarshad/RepData_PeerAssessment1
|
R
| false
| false
| 4,913
|
r
|
## Load Libraries
list.of.packages <- c("data.table","plyr","dplyr","tibble", "lubridate", "knitr", "ggplot2", "stringr", "forcats", "ggpubr")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages, require, character.only = TRUE)
# Functions
sumnona <- function(x){if (all(is.na(x))) x[NA_integer_] else sum(x, na.rm = TRUE)}
meanona <- function(x){if (all(is.na(x))) x[NA_integer_] else mean(x, na.rm = TRUE)}
# DATA DOWNLOAD
# Create temporary file
temp <- tempfile()
# Download zip into temporary file
download.file(url = "https://github.com/nairarshad/RepData_PeerAssessment1/blob/master/activity.zip?raw=true",
destfile = temp, mode = 'wb')
# Unzip the zip file
data.unzip <- unzip(temp)
# Unlink the temporary file
unlink(temp)
# DATA READ
# Read the activity data
activity.data <- read.csv(data.unzip, stringsAsFactors = FALSE) %>%
tbl_df() %>%
mutate(date = as.Date(date))
# Total steps by date
total.steps <- activity.data %>%
group_by(date) %>%
summarize(total_steps = sumnona(steps),
total_intervals = n()) %>%
mutate(total_intervals = NULL)
# Histogram of the total number of steps taken each day
p1 <- ggplot(data = total.steps, aes(x = total_steps))
p1 <- p1 + geom_histogram(bins = 25)
p1 <- p1 + labs(x = "Total Step Count",
y = "Count",
title = "Histogram of the total number of steps taken each day")
p1 <- p1 + theme_bw()
# Mean and median total steps
total.steps.mean <- mean(total.steps$total_steps, na.rm = TRUE)
total.steps.medn <- median(total.steps$total_steps, na.rm = TRUE)
# Average step count for each 5-min interval
interval.5min <- activity.data %>%
group_by(interval) %>%
summarize(avg_steps = mean(steps, na.rm = TRUE))
# Time series plot of the average number of steps taken across all days at 5-minute intervals
p2 <- ggplot(data = interval.5min, aes(x = interval, y = avg_steps))
p2 <- p2 + geom_line()
p2 <- p2 + labs(x = "Interval (HHMM)",
y = "Average step count",
title = "Time series plot of the average number of steps taken across all days at 5-minute intervals")
p2 <- p2 + scale_x_continuous(labels = function(x) str_pad(x, width = 4, pad = "0"))
p2 <- p2 + theme_bw()
# Which interval corresponds to maximum average steps
maxintrvl <- interval.5min$interval[which.max(interval.5min$avg_steps)]
# How many NAs in the data
countna <- sum(is.na(activity.data$steps))
# Inititalize df for manioulation: impute for NAs
activity.data.nona <- as.data.frame(activity.data)
# Row-wise identify NA and replace with interval average
for(i in 1:nrow(activity.data.nona)){
if(is.na(activity.data.nona[i,"steps"])){
intrvl <- activity.data.nona[i,"interval"]
activity.data.nona[i,"steps"] <-
interval.5min$avg_steps[interval.5min$interval==intrvl]
}
}
# Total steps from imputed data by date
total.steps.nona <- activity.data.nona %>%
group_by(date) %>%
summarize(total_steps = sumnona(steps),
total_intervals = n()) %>%
mutate(total_intervals = NULL)
# Histogram of the total number of steps taken each day (Imputed Data)
p3 <- ggplot(data = total.steps.nona, aes(x = total_steps))
p3 <- p3 + geom_histogram(bins = 25)
p3 <- p3 + labs(x = "Total Step Count",
y = "Count",
title = "Histogram of the total number of steps taken each day (Imputed Data)")
p3 <- p3 + theme_bw()
# Mean and median total steps after imputing
total.steps.nona.mean <- mean(total.steps.nona$total_steps, na.rm = TRUE)
total.steps.nona.medn <- median(total.steps.nona$total_steps, na.rm = TRUE)
# Backup the imputed data
activity.data.nona.bc <- activity.data.nona
activity.data.nona <- NULL
# Add factor column whether Weekday/Weekend
activity.data.nona <- activity.data.nona.bc %>%
mutate(day = fct_collapse(factor(weekdays(date, abbreviate = TRUE)),
Weekend = c("Sat","Sun"),
Weekday = c("Mon","Tue","Wed","Thu","Fri"))) %>%
mutate(day = factor(day, levels = c("Weekend","Weekday")))
# Average by wday and interval
wday.mean <- activity.data.nona %>%
tbl_df() %>%
group_by(day, interval) %>%
summarize(avg_steps = mean(steps, na.rm = TRUE))
# Time series plot of the average number of steps taken across all days at 5-minute intervals
p4 <- ggplot(data = wday.mean, aes(x = interval, y = avg_steps))
p4 <- p4 + geom_line()
p4 <- p4 + labs(x = "Interval (HHMM)",
y = "Average step count",
title = "Time series plot of the average number of steps taken across all days at 5-minute intervals")
p4 <- p4 + scale_x_continuous(labels = function(x) str_pad(x, width = 4, pad = "0"))
p4 <- p4 + facet_grid(day~.)
p4 <- p4 + theme_bw()
|
\name{mergeparameters}
\alias{mergeparameters}
%- Also NEED an `\alias' for EACH other topic documented here.
\title{New parameters from merging two Gaussian mixture components}
\description{
Re-computes pointwise posterior probabilities, mean and covariance
matrix for a mixture component obtained by merging two mixture
components in a Gaussian mixture.
}
\usage{
mergeparameters(xdata, j1, j2, probs, muarray,Sigmaarray, z)
}
%- maybe also `usage' for other objects documented here.
\arguments{
\item{xdata}{data (something that can be coerced into a matrix).}
\item{j1}{integer. Number of first mixture component to be merged.}
\item{j2}{integer. Number of second mixture component to be merged.}
\item{probs}{vector of component proportions (for all components;
should sum up to one).}
\item{muarray}{matrix of component means (rows).}
\item{Sigmaarray}{array of component covariance matrices (third
dimension refers to component number).}
\item{z}{matrix of observation- (row-)wise posterior probabilities of
belonging to the components (columns).}
}
\value{
List with components
\item{probs}{see above; sum of probabilities for original components
\code{j1} and \code{j2} is now \code{probs[j1]}. Note that generally,
also for the further components, values for the merged component are
in place \code{j1} and values in place \code{j2} are not changed. This
means that in order to have only the information for the new mixture
after merging, the entries in places \code{j2} need to be suppressed.}
\item{muarray}{see above; weighted mean of means of component
\code{j1} and \code{j2} is now in place \code{j1}.}
\item{Sigmaarray}{see above; weighted covariance matrix handled as
above.}
\item{z}{see above; original entries for columns \code{j1} and
\code{j2} are summed up and now in column \code{j1}.}
}
\references{
Hennig, C. (2010) Methods for merging Gaussian mixture components,
\emph{Advances in Data Analysis and Classification}, 4, 3-34.
}
\author{Christian Hennig
\email{c.hennig@ucl.ac.uk}
\url{http://www.homepages.ucl.ac.uk/~ucakche/}
}
\examples{
options(digits=3)
set.seed(98765)
require(mclust)
iriss <- iris[sample(150,20),-5]
irisBIC <- mclustBIC(iriss)
siris <- summary(irisBIC,iriss)
probs <- siris$parameters$pro
muarray <- siris$parameters$mean
Sigmaarray <- siris$parameters$variance$sigma
z <- siris$z
mpi <- mergeparameters(iriss,1,2,probs,muarray,Sigmaarray,z)
mpi$probs
mpi$muarray
}
\keyword{multivariate}
\keyword{cluster}
|
/JCGS-R3/main_code/lib/fpc/man/mergeparameters.Rd
|
no_license
|
patperry/cvclust
|
R
| false
| false
| 2,562
|
rd
|
\name{mergeparameters}
\alias{mergeparameters}
%- Also NEED an `\alias' for EACH other topic documented here.
\title{New parameters from merging two Gaussian mixture components}
\description{
Re-computes pointwise posterior probabilities, mean and covariance
matrix for a mixture component obtained by merging two mixture
components in a Gaussian mixture.
}
\usage{
mergeparameters(xdata, j1, j2, probs, muarray,Sigmaarray, z)
}
%- maybe also `usage' for other objects documented here.
\arguments{
\item{xdata}{data (something that can be coerced into a matrix).}
\item{j1}{integer. Number of first mixture component to be merged.}
\item{j2}{integer. Number of second mixture component to be merged.}
\item{probs}{vector of component proportions (for all components;
should sum up to one).}
\item{muarray}{matrix of component means (rows).}
\item{Sigmaarray}{array of component covariance matrices (third
dimension refers to component number).}
\item{z}{matrix of observation- (row-)wise posterior probabilities of
belonging to the components (columns).}
}
\value{
List with components
\item{probs}{see above; sum of probabilities for original components
\code{j1} and \code{j2} is now \code{probs[j1]}. Note that generally,
also for the further components, values for the merged component are
in place \code{j1} and values in place \code{j2} are not changed. This
means that in order to have only the information for the new mixture
after merging, the entries in places \code{j2} need to be suppressed.}
\item{muarray}{see above; weighted mean of means of component
\code{j1} and \code{j2} is now in place \code{j1}.}
\item{Sigmaarray}{see above; weighted covariance matrix handled as
above.}
\item{z}{see above; original entries for columns \code{j1} and
\code{j2} are summed up and now in column \code{j1}.}
}
\references{
Hennig, C. (2010) Methods for merging Gaussian mixture components,
\emph{Advances in Data Analysis and Classification}, 4, 3-34.
}
\author{Christian Hennig
\email{c.hennig@ucl.ac.uk}
\url{http://www.homepages.ucl.ac.uk/~ucakche/}
}
\examples{
options(digits=3)
set.seed(98765)
require(mclust)
iriss <- iris[sample(150,20),-5]
irisBIC <- mclustBIC(iriss)
siris <- summary(irisBIC,iriss)
probs <- siris$parameters$pro
muarray <- siris$parameters$mean
Sigmaarray <- siris$parameters$variance$sigma
z <- siris$z
mpi <- mergeparameters(iriss,1,2,probs,muarray,Sigmaarray,z)
mpi$probs
mpi$muarray
}
\keyword{multivariate}
\keyword{cluster}
|
\name{betapriorGauss}
\alias{betapriorGauss}
\title{betapriorGauss function}
\usage{
betapriorGauss(mean, sd)
}
\arguments{
\item{mean}{the prior mean, a vector of length 1 or more.
1 implies a common mean.}
\item{sd}{the prior standard deviation, a vector of
length 1 or more. 1 implies a common standard deviation.}
}
\value{
an object of class "betapriorGauss"
}
\description{
A function to define Gaussian priors for beta.
}
|
/man/betapriorGauss.Rd
|
no_license
|
ssouyris/spatsurv
|
R
| false
| false
| 439
|
rd
|
\name{betapriorGauss}
\alias{betapriorGauss}
\title{betapriorGauss function}
\usage{
betapriorGauss(mean, sd)
}
\arguments{
\item{mean}{the prior mean, a vector of length 1 or more.
1 implies a common mean.}
\item{sd}{the prior standard deviation, a vector of
length 1 or more. 1 implies a common standard deviation.}
}
\value{
an object of class "betapriorGauss"
}
\description{
A function to define Gaussian priors for beta.
}
|
# HEADER
# Display: Figure 7.2 Box plot - Change from Baseline by Analysis Timepoint, Visit and Treatment
# White paper: Central Tendency
# Specs: https://github.com/phuse-org/phuse-scripts/blob/master/whitepapers/specification/
# Output: https://github.com/phuse-org/phuse-scripts/blob/master/whitepapers/WPCT/outputs_r/
# Contributors: Jeno Pizarro, Kirsten Burdett
#TESTING and QUALIFICATION:
#DEVELOP STAGE
#10-JAN-2016, adapted from WPCT 7.01.R script
#needs ANCOVA p values in table...
### updated 21-June-2017, Kirsten - converted to an R package
# BDS_Box_Chg
# Kirsten Burdett
#' BDS_Box_Chg
#'
#' Create a boxplot for PhUSE
#'
#' @export
#' @data data frame
#' @param treatmentname Which treatment arm variable? e.g."TRTA" #TRTA, TRTP, etc
#' @param useshortnames Rename Treatment Arms? (if you wish to display shorter names).TRUE OR FALSE
#' @param oldnames Treatment Arms old names .e.g "Xanomeline Low Dose","Xanomeline High Dose"
#' @param newnames Treatment Arms new names .e.g "X-low", "X-High"
#' @param usepopflag subset on a population flag. TRUE OR FALSE
#' @param popflag value "SAFFL"
#' @param testname test or parameter to be analyzed e.g."DIABP"
#' @param yaxislabel labels for y axis
#' @param selectedvisits visit numbers to be analyzed e.g 0,2,4,6,8,12,16,20,24
#' @param perpage how many visits to display per page
#' @param dignum number of digits in table, standard deviation = dignum +1
#' @param inputdirectory set input file directory
#' @param outputdirectory set output file directory
#' @param testfilename accepts CSV or XPT files
#' @param filetype output file type - TIFF or JPEG or PNG
#' @param pixelwidth choose output file size: pixel width
#' @param pixelheight choose output file size: pixel height
#' @param outputfontsize choose output font size
#' @param charttitle Title for the chart
#' @return Figure 7.2 Box plot - Change from Baseline by Analysis Timepoint, Visit and Treatment
#'
#' @examples
#' BDS_Box_Chg(treatmentname = "TRTA", useshortnames = TRUE, oldnames = c("Xanomeline Low Dose","Xanomeline High Dose"), newnames = c("X-low", "X-High"), usepopflag = TRUE, popflag = "SAFFL", testname = "DIABP", yaxislabel = "Change in Diastolic Blood Pressure (mmHG)",selectedvisits = c(0,2,4,6,8,12,16,20,24), perpage = 6, dignum = 1, inputdirectory = "R:/StatOpB/CSV/9_GB_PhUSE/phuse-scripts/data/adam/cdisc",outdirectory = "U:/github", testfilename = "advs.xpt",filetype = "PNG", pixelwidth = 1200, pixelheight = 1000, outputfontsize = 16, charttitle = "Box")
#'
#' @import ggplot2
#' @import tools
#' @import gridExtra
#' @import data.table
BDS_Box_Chg<-function(treatmentname,useshortnames = c(TRUE,FALSE),oldnames,newnames,
usepopflag = c(TRUE,FALSE),popflag,testname,yaxislabel,selectedvisits,
perpage,dignum,inputdirectory,outputdirectory,
testfilename,filetype = c("PNG","TIFF","JPEG"),pixelwidth,pixelheight,
outputfontsize,charttitle) {
#Read in DATASET
if (file_ext(testfilename) == "csv") {
testresultsread <- read.csv(file.path(inputdirectory,testfilename))
} else {
testresultsread <-
Hmisc::sasxport.get(file.path(inputdirectory,testfilename), lowernames = FALSE)
}
#buildtable function to be called later, summarize data to enable creation of accompanying datatable
buildtable <- function(avalue, dfname, by1, by2, dignum){
byvarslist <- c(by1,by2)
summary <- eval(dfname)[,list(
n = .N,
mean = round(mean(eval(avalue), na.rm = TRUE), digits=dignum),
sd = round(sd(eval(avalue), na.rm = TRUE), digits=dignum+1),
min = round(min(eval(avalue), na.rm = TRUE), digits=dignum),
q1 = round(quantile(eval(avalue), .25, na.rm = TRUE), digits=dignum),
mediam = round(median(eval(avalue), na.rm = TRUE), digits=dignum),
q3 = round(quantile(eval(avalue), .75, na.rm = TRUE), digits = dignum),
max = round(max(eval(avalue), na.rm = TRUE), digits = dignum)
),
by = byvarslist]
return(summary)
}
#SELECT VARIABLES (examples in parenthesis): TREATMENT (TRTP, TRTA), PARAMCD (LBTESTCD)
#colnames(testresults)[names(testresults) == "OLD VARIABLE"] <- "NEW VARIABLE"
colnames(testresultsread)[names(testresultsread) == treatmentname] <- "TREATMENT"
colnames(testresultsread)[names(testresultsread) == popflag] <- "FLAG" #select population flag to subset on such as SAFFL or ITTFL
if (useshortnames == TRUE){
for(i in 1:length(oldnames)) {
testresultsread$TREATMENT <- ifelse(testresultsread$TREATMENT == oldnames[i], as.character(newnames[i]), as.character(testresultsread$TREATMENT))
}
}
#determine number of pages needed
initial <- 1
visitsplits <- ceiling((length(selectedvisits)/perpage))
#for each needed page, subset selected visits by number per page
for(i in 1:visitsplits) {
#subset on test, visits, population to be analyzed
if (usepopflag == TRUE){
testresults <- subset(testresultsread, PARAMCD == testname & AVISITN %in% selectedvisits[(initial):
(ifelse(perpage*i>length(selectedvisits),length(selectedvisits),perpage*i))]
& FLAG == "Y")
} else {
testresults <- subset(testresultsread, PARAMCD == testname & AVISITN %in% selectedvisits[(initial):(perpage*i)])
}
initial <- initial + perpage
testresults<- data.table(testresults)
#setkey for speed gains when summarizing
setkey(testresults, USUBJID, TREATMENT, AVISITN)
#specify plot
p <- ggplot(testresults, aes(factor(AVISITN), fill = TREATMENT, CHG))
# add notch, axis labels, legend, text size
p1 <- p + geom_boxplot(notch = TRUE) + xlab("Visit Number") + ylab(yaxislabel) + theme(legend.position="bottom", legend.title=element_blank(),
text = element_text(size = outputfontsize),
axis.text.x = element_text(size=outputfontsize),
axis.text.y = element_text(size=outputfontsize)) +ggtitle(charttitle)
# add mean points
p2 <- p1 + stat_summary(fun.y=mean, colour="dark red", geom="point", position=position_dodge(width=0.75))
# horizontal line at 0
p3 <- p2 + geom_hline(yintercept = 0, colour = "red")
#call summary table function
summary <- buildtable(avalue = quote(CHG), dfname= quote(testresults), by1 = "AVISITN", by2 = "TREATMENT", dignum)[order(AVISITN, TREATMENT)]
table_summary <- data.frame(t(summary))
t1theme <- ttheme_default(core = list(fg_params = list (fontsize = outputfontsize)))
t1 <- tableGrob(table_summary, theme = t1theme, cols = NULL)
if (filetype == "TIFF"){
#Output to TIFF
tiff(file.path(outputdirectory,paste("plot",i,".TIFF",sep = "" )), width = pixelwidth, height = pixelheight, units = "px", pointsize = 12)
grid.arrange(p3, t1, ncol = 1)
dev.off()
}
if (filetype == "JPEG") {
# Optionally, use JPEG
jpeg(file.path(outputdirectory,paste("plot",i,".JPEG",sep = "" )), width = pixelwidth, height = pixelheight, units = "px", pointsize = 12)
grid.arrange(p3, t1, ncol = 1)
dev.off()
}
if (filetype == "PNG") {
# Optionally, use PNG
png(file.path(outputdirectory,paste("plot",i,".PNG",sep = "" )), width = pixelwidth, height = pixelheight, units = "px", pointsize = 12)
grid.arrange(p3, t1, ncol = 1)
dev.off()
}
}}
|
/BDS_Box_Chg.R
|
no_license
|
phuse-org/R-Packages
|
R
| false
| false
| 7,811
|
r
|
# HEADER
# Display: Figure 7.2 Box plot - Change from Baseline by Analysis Timepoint, Visit and Treatment
# White paper: Central Tendency
# Specs: https://github.com/phuse-org/phuse-scripts/blob/master/whitepapers/specification/
# Output: https://github.com/phuse-org/phuse-scripts/blob/master/whitepapers/WPCT/outputs_r/
# Contributors: Jeno Pizarro, Kirsten Burdett
#TESTING and QUALIFICATION:
#DEVELOP STAGE
#10-JAN-2016, adapted from WPCT 7.01.R script
#needs ANCOVA p values in table...
### updated 21-June-2017, Kirsten - converted to an R package
# BDS_Box_Chg
# Kirsten Burdett
#' BDS_Box_Chg
#'
#' Create a boxplot for PhUSE
#'
#' @export
#' @data data frame
#' @param treatmentname Which treatment arm variable? e.g."TRTA" #TRTA, TRTP, etc
#' @param useshortnames Rename Treatment Arms? (if you wish to display shorter names).TRUE OR FALSE
#' @param oldnames Treatment Arms old names .e.g "Xanomeline Low Dose","Xanomeline High Dose"
#' @param newnames Treatment Arms new names .e.g "X-low", "X-High"
#' @param usepopflag subset on a population flag. TRUE OR FALSE
#' @param popflag value "SAFFL"
#' @param testname test or parameter to be analyzed e.g."DIABP"
#' @param yaxislabel labels for y axis
#' @param selectedvisits visit numbers to be analyzed e.g 0,2,4,6,8,12,16,20,24
#' @param perpage how many visits to display per page
#' @param dignum number of digits in table, standard deviation = dignum +1
#' @param inputdirectory set input file directory
#' @param outputdirectory set output file directory
#' @param testfilename accepts CSV or XPT files
#' @param filetype output file type - TIFF or JPEG or PNG
#' @param pixelwidth choose output file size: pixel width
#' @param pixelheight choose output file size: pixel height
#' @param outputfontsize choose output font size
#' @param charttitle Title for the chart
#' @return Figure 7.2 Box plot - Change from Baseline by Analysis Timepoint, Visit and Treatment
#'
#' @examples
#' BDS_Box_Chg(treatmentname = "TRTA", useshortnames = TRUE, oldnames = c("Xanomeline Low Dose","Xanomeline High Dose"), newnames = c("X-low", "X-High"), usepopflag = TRUE, popflag = "SAFFL", testname = "DIABP", yaxislabel = "Change in Diastolic Blood Pressure (mmHG)",selectedvisits = c(0,2,4,6,8,12,16,20,24), perpage = 6, dignum = 1, inputdirectory = "R:/StatOpB/CSV/9_GB_PhUSE/phuse-scripts/data/adam/cdisc",outdirectory = "U:/github", testfilename = "advs.xpt",filetype = "PNG", pixelwidth = 1200, pixelheight = 1000, outputfontsize = 16, charttitle = "Box")
#'
#' @import ggplot2
#' @import tools
#' @import gridExtra
#' @import data.table
BDS_Box_Chg<-function(treatmentname,useshortnames = c(TRUE,FALSE),oldnames,newnames,
usepopflag = c(TRUE,FALSE),popflag,testname,yaxislabel,selectedvisits,
perpage,dignum,inputdirectory,outputdirectory,
testfilename,filetype = c("PNG","TIFF","JPEG"),pixelwidth,pixelheight,
outputfontsize,charttitle) {
#Read in DATASET
if (file_ext(testfilename) == "csv") {
testresultsread <- read.csv(file.path(inputdirectory,testfilename))
} else {
testresultsread <-
Hmisc::sasxport.get(file.path(inputdirectory,testfilename), lowernames = FALSE)
}
#buildtable function to be called later, summarize data to enable creation of accompanying datatable
buildtable <- function(avalue, dfname, by1, by2, dignum){
byvarslist <- c(by1,by2)
summary <- eval(dfname)[,list(
n = .N,
mean = round(mean(eval(avalue), na.rm = TRUE), digits=dignum),
sd = round(sd(eval(avalue), na.rm = TRUE), digits=dignum+1),
min = round(min(eval(avalue), na.rm = TRUE), digits=dignum),
q1 = round(quantile(eval(avalue), .25, na.rm = TRUE), digits=dignum),
mediam = round(median(eval(avalue), na.rm = TRUE), digits=dignum),
q3 = round(quantile(eval(avalue), .75, na.rm = TRUE), digits = dignum),
max = round(max(eval(avalue), na.rm = TRUE), digits = dignum)
),
by = byvarslist]
return(summary)
}
#SELECT VARIABLES (examples in parenthesis): TREATMENT (TRTP, TRTA), PARAMCD (LBTESTCD)
#colnames(testresults)[names(testresults) == "OLD VARIABLE"] <- "NEW VARIABLE"
colnames(testresultsread)[names(testresultsread) == treatmentname] <- "TREATMENT"
colnames(testresultsread)[names(testresultsread) == popflag] <- "FLAG" #select population flag to subset on such as SAFFL or ITTFL
if (useshortnames == TRUE){
for(i in 1:length(oldnames)) {
testresultsread$TREATMENT <- ifelse(testresultsread$TREATMENT == oldnames[i], as.character(newnames[i]), as.character(testresultsread$TREATMENT))
}
}
#determine number of pages needed
initial <- 1
visitsplits <- ceiling((length(selectedvisits)/perpage))
#for each needed page, subset selected visits by number per page
for(i in 1:visitsplits) {
#subset on test, visits, population to be analyzed
if (usepopflag == TRUE){
testresults <- subset(testresultsread, PARAMCD == testname & AVISITN %in% selectedvisits[(initial):
(ifelse(perpage*i>length(selectedvisits),length(selectedvisits),perpage*i))]
& FLAG == "Y")
} else {
testresults <- subset(testresultsread, PARAMCD == testname & AVISITN %in% selectedvisits[(initial):(perpage*i)])
}
initial <- initial + perpage
testresults<- data.table(testresults)
#setkey for speed gains when summarizing
setkey(testresults, USUBJID, TREATMENT, AVISITN)
#specify plot
p <- ggplot(testresults, aes(factor(AVISITN), fill = TREATMENT, CHG))
# add notch, axis labels, legend, text size
p1 <- p + geom_boxplot(notch = TRUE) + xlab("Visit Number") + ylab(yaxislabel) + theme(legend.position="bottom", legend.title=element_blank(),
text = element_text(size = outputfontsize),
axis.text.x = element_text(size=outputfontsize),
axis.text.y = element_text(size=outputfontsize)) +ggtitle(charttitle)
# add mean points
p2 <- p1 + stat_summary(fun.y=mean, colour="dark red", geom="point", position=position_dodge(width=0.75))
# horizontal line at 0
p3 <- p2 + geom_hline(yintercept = 0, colour = "red")
#call summary table function
summary <- buildtable(avalue = quote(CHG), dfname= quote(testresults), by1 = "AVISITN", by2 = "TREATMENT", dignum)[order(AVISITN, TREATMENT)]
table_summary <- data.frame(t(summary))
t1theme <- ttheme_default(core = list(fg_params = list (fontsize = outputfontsize)))
t1 <- tableGrob(table_summary, theme = t1theme, cols = NULL)
if (filetype == "TIFF"){
#Output to TIFF
tiff(file.path(outputdirectory,paste("plot",i,".TIFF",sep = "" )), width = pixelwidth, height = pixelheight, units = "px", pointsize = 12)
grid.arrange(p3, t1, ncol = 1)
dev.off()
}
if (filetype == "JPEG") {
# Optionally, use JPEG
jpeg(file.path(outputdirectory,paste("plot",i,".JPEG",sep = "" )), width = pixelwidth, height = pixelheight, units = "px", pointsize = 12)
grid.arrange(p3, t1, ncol = 1)
dev.off()
}
if (filetype == "PNG") {
# Optionally, use PNG
png(file.path(outputdirectory,paste("plot",i,".PNG",sep = "" )), width = pixelwidth, height = pixelheight, units = "px", pointsize = 12)
grid.arrange(p3, t1, ncol = 1)
dev.off()
}
}}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add.R
\name{add_trace}
\alias{add_trace}
\alias{add_markers}
\alias{add_text}
\alias{add_paths}
\alias{add_lines}
\alias{add_segments}
\alias{add_polygons}
\alias{add_ribbons}
\alias{add_area}
\alias{add_pie}
\alias{add_bars}
\alias{add_histogram}
\alias{add_histogram2d}
\alias{add_histogram2dcontour}
\alias{add_heatmap}
\alias{add_contour}
\alias{add_boxplot}
\alias{add_surface}
\alias{add_mesh}
\alias{add_scattergeo}
\alias{add_choropleth}
\title{Add trace(s) to a plotly visualization}
\usage{
add_trace(p, ..., data = NULL, inherit = TRUE)
add_markers(p, x = NULL, y = NULL, z = NULL, ..., data = NULL,
inherit = TRUE)
add_text(p, x = NULL, y = NULL, z = NULL, text = NULL, ...,
data = NULL, inherit = TRUE)
add_paths(p, x = NULL, y = NULL, z = NULL, ..., data = NULL,
inherit = TRUE)
add_lines(p, x = NULL, y = NULL, z = NULL, ..., data = NULL,
inherit = TRUE)
add_segments(p, x = NULL, y = NULL, xend = NULL, yend = NULL, ...,
data = NULL, inherit = TRUE)
add_polygons(p, x = NULL, y = NULL, ..., data = NULL, inherit = TRUE)
add_ribbons(p, x = NULL, ymin = NULL, ymax = NULL, ..., data = NULL,
inherit = TRUE)
add_area(p, r = NULL, t = NULL, ..., data = NULL, inherit = TRUE)
add_pie(p, values = NULL, labels = NULL, ..., data = NULL,
inherit = TRUE)
add_bars(p, x = NULL, y = NULL, ..., data = NULL, inherit = TRUE)
add_histogram(p, x = NULL, y = NULL, ..., data = NULL, inherit = TRUE)
add_histogram2d(p, x = NULL, y = NULL, z = NULL, ..., data = NULL,
inherit = TRUE)
add_histogram2dcontour(p, x = NULL, y = NULL, z = NULL, ...,
data = NULL, inherit = TRUE)
add_heatmap(p, x = NULL, y = NULL, z = NULL, ..., data = NULL,
inherit = TRUE)
add_contour(p, z = NULL, ..., data = NULL, inherit = TRUE)
add_boxplot(p, x = NULL, y = NULL, ..., data = NULL, inherit = TRUE)
add_surface(p, z = NULL, ..., data = NULL, inherit = TRUE)
add_mesh(p, x = NULL, y = NULL, z = NULL, ..., data = NULL,
inherit = TRUE)
add_scattergeo(p, ...)
add_choropleth(p, z = NULL, ..., data = NULL, inherit = TRUE)
}
\arguments{
\item{p}{a plotly object}
\item{...}{These arguments are documented at \url{https://plot.ly/r/reference/}
Note that acceptable arguments depend on the value of \code{type}.}
\item{data}{A data frame (optional) or \link[crosstalk:SharedData]{crosstalk::SharedData} object.}
\item{inherit}{inherit attributes from \code{\link[=plot_ly]{plot_ly()}}?}
\item{x}{the x variable.}
\item{y}{the y variable.}
\item{z}{a numeric matrix}
\item{text}{textual labels.}
\item{xend}{"final" x position (in this context, x represents "start")}
\item{yend}{"final" y position (in this context, y represents "start")}
\item{ymin}{a variable used to define the lower boundary of a polygon.}
\item{ymax}{a variable used to define the upper boundary of a polygon.}
\item{r}{For polar chart only. Sets the radial coordinates.}
\item{t}{For polar chart only. Sets the radial coordinates.}
\item{values}{the value to associated with each slice of the pie.}
\item{labels}{the labels (categories) corresponding to \code{values}.}
}
\description{
Add trace(s) to a plotly visualization
}
\examples{
p <- plot_ly(economics, x = ~date, y = ~uempmed)
p
p \%>\% add_markers()
p \%>\% add_lines()
p \%>\% add_text(text = ".")
# attributes declared in plot_ly() carry over to downstream traces,
# but can be overwritten
plot_ly(economics, x = ~date, y = ~uempmed, color = I("red")) \%>\%
add_lines() \%>\%
add_markers(color = ~pop) \%>\%
layout(showlegend = FALSE)
txhousing \%>\%
group_by(city) \%>\%
plot_ly(x = ~date, y = ~median) \%>\%
add_lines(fill = "black")
ggplot2::map_data("world", "canada") \%>\%
group_by(group) \%>\%
plot_ly(x = ~long, y = ~lat) \%>\%
add_polygons(hoverinfo = "none") \%>\%
add_markers(text = ~paste(name, "<br />", pop), hoverinfo = "text",
data = maps::canada.cities) \%>\%
layout(showlegend = FALSE)
plot_ly(economics, x = ~date) \%>\%
add_ribbons(ymin = ~pce - 1e3, ymax = ~pce + 1e3)
p <- plot_ly(plotly::wind, r = ~r, t = ~t) \%>\% add_area(color = ~nms)
layout(p, radialaxis = list(ticksuffix = "\%"), orientation = 270)
ds <- data.frame(
labels = c("A", "B", "C"),
values = c(10, 40, 60)
)
plot_ly(ds, labels = ~labels, values = ~values) \%>\%
add_pie() \%>\%
layout(title = "Basic Pie Chart using Plotly")
library(dplyr)
mtcars \%>\%
count(vs) \%>\%
plot_ly(x = ~vs, y = ~n) \%>\%
add_bars()
plot_ly(x = ~rnorm(100)) \%>\% add_histogram()
plot_ly(x = ~LETTERS, y = ~LETTERS) \%>\% add_histogram2d()
z <- as.matrix(table(LETTERS, LETTERS))
plot_ly(x = ~LETTERS, y = ~LETTERS, z = ~z) \%>\% add_histogram2d()
plot_ly(MASS::geyser, x = ~waiting, y = ~duration) \%>\%
add_histogram2dcontour()
plot_ly(z = ~volcano) \%>\% add_heatmap()
plot_ly(z = ~volcano) \%>\% add_contour()
plot_ly(mtcars, x = ~factor(vs), y = ~mpg) \%>\% add_boxplot()
plot_ly(z = ~volcano) \%>\% add_surface()
plot_ly(x = c(0, 0, 1), y = c(0, 1, 0), z = c(0, 0, 0)) \%>\% add_mesh()
}
\references{
\url{https://plot.ly/r/reference/}
}
\seealso{
\code{\link[=plot_ly]{plot_ly()}}
}
\author{
Carson Sievert
}
|
/man/add_trace.Rd
|
permissive
|
MhAmine/plotly
|
R
| false
| true
| 5,184
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add.R
\name{add_trace}
\alias{add_trace}
\alias{add_markers}
\alias{add_text}
\alias{add_paths}
\alias{add_lines}
\alias{add_segments}
\alias{add_polygons}
\alias{add_ribbons}
\alias{add_area}
\alias{add_pie}
\alias{add_bars}
\alias{add_histogram}
\alias{add_histogram2d}
\alias{add_histogram2dcontour}
\alias{add_heatmap}
\alias{add_contour}
\alias{add_boxplot}
\alias{add_surface}
\alias{add_mesh}
\alias{add_scattergeo}
\alias{add_choropleth}
\title{Add trace(s) to a plotly visualization}
\usage{
add_trace(p, ..., data = NULL, inherit = TRUE)
add_markers(p, x = NULL, y = NULL, z = NULL, ..., data = NULL,
inherit = TRUE)
add_text(p, x = NULL, y = NULL, z = NULL, text = NULL, ...,
data = NULL, inherit = TRUE)
add_paths(p, x = NULL, y = NULL, z = NULL, ..., data = NULL,
inherit = TRUE)
add_lines(p, x = NULL, y = NULL, z = NULL, ..., data = NULL,
inherit = TRUE)
add_segments(p, x = NULL, y = NULL, xend = NULL, yend = NULL, ...,
data = NULL, inherit = TRUE)
add_polygons(p, x = NULL, y = NULL, ..., data = NULL, inherit = TRUE)
add_ribbons(p, x = NULL, ymin = NULL, ymax = NULL, ..., data = NULL,
inherit = TRUE)
add_area(p, r = NULL, t = NULL, ..., data = NULL, inherit = TRUE)
add_pie(p, values = NULL, labels = NULL, ..., data = NULL,
inherit = TRUE)
add_bars(p, x = NULL, y = NULL, ..., data = NULL, inherit = TRUE)
add_histogram(p, x = NULL, y = NULL, ..., data = NULL, inherit = TRUE)
add_histogram2d(p, x = NULL, y = NULL, z = NULL, ..., data = NULL,
inherit = TRUE)
add_histogram2dcontour(p, x = NULL, y = NULL, z = NULL, ...,
data = NULL, inherit = TRUE)
add_heatmap(p, x = NULL, y = NULL, z = NULL, ..., data = NULL,
inherit = TRUE)
add_contour(p, z = NULL, ..., data = NULL, inherit = TRUE)
add_boxplot(p, x = NULL, y = NULL, ..., data = NULL, inherit = TRUE)
add_surface(p, z = NULL, ..., data = NULL, inherit = TRUE)
add_mesh(p, x = NULL, y = NULL, z = NULL, ..., data = NULL,
inherit = TRUE)
add_scattergeo(p, ...)
add_choropleth(p, z = NULL, ..., data = NULL, inherit = TRUE)
}
\arguments{
\item{p}{a plotly object}
\item{...}{These arguments are documented at \url{https://plot.ly/r/reference/}
Note that acceptable arguments depend on the value of \code{type}.}
\item{data}{A data frame (optional) or \link[crosstalk:SharedData]{crosstalk::SharedData} object.}
\item{inherit}{inherit attributes from \code{\link[=plot_ly]{plot_ly()}}?}
\item{x}{the x variable.}
\item{y}{the y variable.}
\item{z}{a numeric matrix}
\item{text}{textual labels.}
\item{xend}{"final" x position (in this context, x represents "start")}
\item{yend}{"final" y position (in this context, y represents "start")}
\item{ymin}{a variable used to define the lower boundary of a polygon.}
\item{ymax}{a variable used to define the upper boundary of a polygon.}
\item{r}{For polar chart only. Sets the radial coordinates.}
\item{t}{For polar chart only. Sets the radial coordinates.}
\item{values}{the value to associated with each slice of the pie.}
\item{labels}{the labels (categories) corresponding to \code{values}.}
}
\description{
Add trace(s) to a plotly visualization
}
\examples{
p <- plot_ly(economics, x = ~date, y = ~uempmed)
p
p \%>\% add_markers()
p \%>\% add_lines()
p \%>\% add_text(text = ".")
# attributes declared in plot_ly() carry over to downstream traces,
# but can be overwritten
plot_ly(economics, x = ~date, y = ~uempmed, color = I("red")) \%>\%
add_lines() \%>\%
add_markers(color = ~pop) \%>\%
layout(showlegend = FALSE)
txhousing \%>\%
group_by(city) \%>\%
plot_ly(x = ~date, y = ~median) \%>\%
add_lines(fill = "black")
ggplot2::map_data("world", "canada") \%>\%
group_by(group) \%>\%
plot_ly(x = ~long, y = ~lat) \%>\%
add_polygons(hoverinfo = "none") \%>\%
add_markers(text = ~paste(name, "<br />", pop), hoverinfo = "text",
data = maps::canada.cities) \%>\%
layout(showlegend = FALSE)
plot_ly(economics, x = ~date) \%>\%
add_ribbons(ymin = ~pce - 1e3, ymax = ~pce + 1e3)
p <- plot_ly(plotly::wind, r = ~r, t = ~t) \%>\% add_area(color = ~nms)
layout(p, radialaxis = list(ticksuffix = "\%"), orientation = 270)
ds <- data.frame(
labels = c("A", "B", "C"),
values = c(10, 40, 60)
)
plot_ly(ds, labels = ~labels, values = ~values) \%>\%
add_pie() \%>\%
layout(title = "Basic Pie Chart using Plotly")
library(dplyr)
mtcars \%>\%
count(vs) \%>\%
plot_ly(x = ~vs, y = ~n) \%>\%
add_bars()
plot_ly(x = ~rnorm(100)) \%>\% add_histogram()
plot_ly(x = ~LETTERS, y = ~LETTERS) \%>\% add_histogram2d()
z <- as.matrix(table(LETTERS, LETTERS))
plot_ly(x = ~LETTERS, y = ~LETTERS, z = ~z) \%>\% add_histogram2d()
plot_ly(MASS::geyser, x = ~waiting, y = ~duration) \%>\%
add_histogram2dcontour()
plot_ly(z = ~volcano) \%>\% add_heatmap()
plot_ly(z = ~volcano) \%>\% add_contour()
plot_ly(mtcars, x = ~factor(vs), y = ~mpg) \%>\% add_boxplot()
plot_ly(z = ~volcano) \%>\% add_surface()
plot_ly(x = c(0, 0, 1), y = c(0, 1, 0), z = c(0, 0, 0)) \%>\% add_mesh()
}
\references{
\url{https://plot.ly/r/reference/}
}
\seealso{
\code{\link[=plot_ly]{plot_ly()}}
}
\author{
Carson Sievert
}
|
#' Check the integrity of survival data.
#'
#' Check that exit occurs after enter, that spells from an individual do not
#' overlap, and that each individual experiences at most one event.
#'
#' Interval lengths must be strictly positive.
#'
#' @param enter Left truncation time.
#' @param exit Time of exit.
#' @param event Indicator of event. Zero means 'no event'.
#' @param id Identification of individuals.
#' @param eps The smallest allowed spell length or overlap.
#' @return A vector of id's for the insane individuals. Of zero length if no
#' errors.
#' @author Göran Broström
#' @seealso \code{\link{join.spells}}, \code{\link{coxreg}},
#' \code{\link{aftreg}}
#' @keywords manip survival
#' @examples
#'
#' xx <- data.frame(enter = c(0, 1), exit = c(1.5, 3), event = c(0, 1), id =
#' c(1,1))
#' check.surv(xx$enter, xx$exit, xx$event, xx$id)
#'
#' @export
check.surv <- function(enter, exit, event, id = NULL, eps = 1.e-8){
## The '.Fortran' version.
##########################
n <- length(enter)
if (length(exit) != n)stop("Length mismatch (enter/exit)")
if (length(event) != n)stop("Length mismatch (enter/event)")
if(!is.null(id)) if (length(id) != n)stop("Length mismatch (enter/id)")
## If no id (or one record per id).
if (is.null(id) || (length(unique(id)) == n)) return(all(enter < exit))
## Now, id is set; let's sort data:
#id <- factor(id)
n.ind <- length(unique(id))
ord <- order(id, enter)
id <- id[ord]
enter <- enter[ord]
exit <- exit[ord]
event <- as.logical(event[ord])
id <- factor(id)
id.size <- table(id)
xx <- .Fortran("chek",
as.integer(n),
as.integer(n.ind),
as.integer(id.size), ## length = n.ind
as.double(enter), ## length = n
as.double(exit), ## length = n
as.integer(event), ## length = n
as.double(eps),
sane = integer(n.ind) ## boolean; TRUE: good individual
)
bad.id <- levels(id)[xx$sane == 0]
bad.id
}
|
/R/check.surv.R
|
no_license
|
cran/eha
|
R
| false
| false
| 2,137
|
r
|
#' Check the integrity of survival data.
#'
#' Check that exit occurs after enter, that spells from an individual do not
#' overlap, and that each individual experiences at most one event.
#'
#' Interval lengths must be strictly positive.
#'
#' @param enter Left truncation time.
#' @param exit Time of exit.
#' @param event Indicator of event. Zero means 'no event'.
#' @param id Identification of individuals.
#' @param eps The smallest allowed spell length or overlap.
#' @return A vector of id's for the insane individuals. Of zero length if no
#' errors.
#' @author Göran Broström
#' @seealso \code{\link{join.spells}}, \code{\link{coxreg}},
#' \code{\link{aftreg}}
#' @keywords manip survival
#' @examples
#'
#' xx <- data.frame(enter = c(0, 1), exit = c(1.5, 3), event = c(0, 1), id =
#' c(1,1))
#' check.surv(xx$enter, xx$exit, xx$event, xx$id)
#'
#' @export
check.surv <- function(enter, exit, event, id = NULL, eps = 1.e-8){
## The '.Fortran' version.
##########################
n <- length(enter)
if (length(exit) != n)stop("Length mismatch (enter/exit)")
if (length(event) != n)stop("Length mismatch (enter/event)")
if(!is.null(id)) if (length(id) != n)stop("Length mismatch (enter/id)")
## If no id (or one record per id).
if (is.null(id) || (length(unique(id)) == n)) return(all(enter < exit))
## Now, id is set; let's sort data:
#id <- factor(id)
n.ind <- length(unique(id))
ord <- order(id, enter)
id <- id[ord]
enter <- enter[ord]
exit <- exit[ord]
event <- as.logical(event[ord])
id <- factor(id)
id.size <- table(id)
xx <- .Fortran("chek",
as.integer(n),
as.integer(n.ind),
as.integer(id.size), ## length = n.ind
as.double(enter), ## length = n
as.double(exit), ## length = n
as.integer(event), ## length = n
as.double(eps),
sane = integer(n.ind) ## boolean; TRUE: good individual
)
bad.id <- levels(id)[xx$sane == 0]
bad.id
}
|
coefficients_matrix <- function(x) {
q <- length(x$info$parameters)
cov_names <- c("(Intercept)", x$info$covariates)
k <- length(cov_names)
res <- do.call(rbind, lapply(x$regression, FUN = function(X) summary(X)$coefficients))
res <- as.data.frame(res)
res <- cbind(
Parameter = rep(x$info$parameters, each = k),
Covariate = rep(cov_names, times = q),
res
)
rownames(res) <- NULL
res
}
|
/R/coefficients_matrix.R
|
no_license
|
manuelarnold/ipcr
|
R
| false
| false
| 420
|
r
|
coefficients_matrix <- function(x) {
q <- length(x$info$parameters)
cov_names <- c("(Intercept)", x$info$covariates)
k <- length(cov_names)
res <- do.call(rbind, lapply(x$regression, FUN = function(X) summary(X)$coefficients))
res <- as.data.frame(res)
res <- cbind(
Parameter = rep(x$info$parameters, each = k),
Covariate = rep(cov_names, times = q),
res
)
rownames(res) <- NULL
res
}
|
\alias{gtkTextIterGetCharsInLine}
\name{gtkTextIterGetCharsInLine}
\title{gtkTextIterGetCharsInLine}
\description{Returns the number of characters in the line containing \code{iter},
including the paragraph delimiters.}
\usage{gtkTextIterGetCharsInLine(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkTextIter}}] an iterator}}
\value{[integer] number of characters in the line}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
/man/gtkTextIterGetCharsInLine.Rd
|
no_license
|
cran/RGtk2.10
|
R
| false
| false
| 460
|
rd
|
\alias{gtkTextIterGetCharsInLine}
\name{gtkTextIterGetCharsInLine}
\title{gtkTextIterGetCharsInLine}
\description{Returns the number of characters in the line containing \code{iter},
including the paragraph delimiters.}
\usage{gtkTextIterGetCharsInLine(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkTextIter}}] an iterator}}
\value{[integer] number of characters in the line}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
#' Fit the pre-defined Neural Network for Longitudinal Data
#'
#' @param model The model object produced by create_model().
#' @param ver ver=0 to show nothing, ver=1 to show animated progress bar, ver=2 to just mention the number of epoch during training.
#' @param n_epoch The number of epochs to train the model.
#' @param bsize The batch size.
#' @param X1 Features as inputs of 1st LSTM.
#' @param X2 Features as inputs of 2nd LSTM.
#' @param X3 Features as inputs of 3rd LSTM.
#' @param X4 Features as inputs of 4th LSTM.
#' @param X5 Features as inputs of 5th LSTM.
#' @param X6 Features as inputs of 6th LSTM.
#' @param X7 Features as inputs of 7th LSTM.
#' @param X8 Features as inputs of 8th LSTM.
#' @param X9 Features as inputs of 9th LSTM.
#' @param X10 Features as inputs of 10th LSTM.
#' @param Xif The features to be concatenated with the outputs of the LSTMs.
#' @param y The target variable.
#' @return The fitted model.
#' @description Fit the created Neural Network model (Keras).
#' @examples
#' X1 <- matrix(runif(500*20), nrow=500, ncol=20)
#' X2 <- matrix(runif(500*24), nrow=500, ncol=24)
#' X3 <- matrix(runif(500*24), nrow=500, ncol=24)
#' X4 <- matrix(runif(500*24), nrow=500, ncol=24)
#' X5 <- matrix(runif(500*16), nrow=500, ncol=16)
#' X6 <- matrix(runif(500*16), nrow=500, ncol=16)
#' X7 <- matrix(runif(500*16), nrow=500, ncol=16)
#' X8 <- matrix(runif(500*16), nrow=500, ncol=16)
#' X9 <- matrix(runif(500*16), nrow=500, ncol=16)
#' X10 <- matrix(runif(500*15), nrow=500, ncol=15)
#' Xif <- matrix(runif(500*232), nrow=500, ncol=232)
#' y <- matrix(runif(500), nrow=500, ncol=1)
#' \dontrun{
#' fitted_model = fit_model(model,0,1,32,X1,X2,X3,X4,X5,X6,X7,X8,X9,X10,Xif,y)
#' }
#' # The functions require to have python installed
#' # As well as tensorflow, keras and reticulate package.
#' @import keras
#' @export fit_model
fit_model<-function(model, ver, n_epoch, bsize, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, Xif, y){
checkpoint_path <- "checkpoints/cp.ckpt"
# Create checkpoint callback
cp_callback <- callback_model_checkpoint(
monitor = "val_loss",
filepath = checkpoint_path,
save_weights_only = TRUE,
save_best_only = TRUE,
verbose = 0 # not printing
)
trained_model <- model %>% fit(
x = list(inp1 = X1,
inp2 = X2,
inp3 = X3,
inp4 = X4,
inp5 = X5,
inp6 = X6,
inp7 = X7,
inp8 = X8,
inp9 = X9,
inp10 = X10,
inpif = Xif), # sequence we're using for prediction
y = y, # sequence we're predicting
batch_size = bsize, # how many samples to pass to our model at a time
epochs = n_epoch, # how many times we'll look at the whole dataset
validation_split = 0.1, # how much data to hold out for testing as we go along
callbacks = list(cp_callback), # pass callback to training
verbose = ver) # printing during training
fitted = model %>% load_model_weights_tf(filepath = checkpoint_path)
return(fitted)
}
|
/R/fit_model.R
|
no_license
|
jinshuai886/LDNN
|
R
| false
| false
| 3,122
|
r
|
#' Fit the pre-defined Neural Network for Longitudinal Data
#'
#' @param model The model object produced by create_model().
#' @param ver ver=0 to show nothing, ver=1 to show animated progress bar, ver=2 to just mention the number of epoch during training.
#' @param n_epoch The number of epochs to train the model.
#' @param bsize The batch size.
#' @param X1 Features as inputs of 1st LSTM.
#' @param X2 Features as inputs of 2nd LSTM.
#' @param X3 Features as inputs of 3rd LSTM.
#' @param X4 Features as inputs of 4th LSTM.
#' @param X5 Features as inputs of 5th LSTM.
#' @param X6 Features as inputs of 6th LSTM.
#' @param X7 Features as inputs of 7th LSTM.
#' @param X8 Features as inputs of 8th LSTM.
#' @param X9 Features as inputs of 9th LSTM.
#' @param X10 Features as inputs of 10th LSTM.
#' @param Xif The features to be concatenated with the outputs of the LSTMs.
#' @param y The target variable.
#' @return The fitted model.
#' @description Fit the created Neural Network model (Keras).
#' @examples
#' X1 <- matrix(runif(500*20), nrow=500, ncol=20)
#' X2 <- matrix(runif(500*24), nrow=500, ncol=24)
#' X3 <- matrix(runif(500*24), nrow=500, ncol=24)
#' X4 <- matrix(runif(500*24), nrow=500, ncol=24)
#' X5 <- matrix(runif(500*16), nrow=500, ncol=16)
#' X6 <- matrix(runif(500*16), nrow=500, ncol=16)
#' X7 <- matrix(runif(500*16), nrow=500, ncol=16)
#' X8 <- matrix(runif(500*16), nrow=500, ncol=16)
#' X9 <- matrix(runif(500*16), nrow=500, ncol=16)
#' X10 <- matrix(runif(500*15), nrow=500, ncol=15)
#' Xif <- matrix(runif(500*232), nrow=500, ncol=232)
#' y <- matrix(runif(500), nrow=500, ncol=1)
#' \dontrun{
#' fitted_model = fit_model(model,0,1,32,X1,X2,X3,X4,X5,X6,X7,X8,X9,X10,Xif,y)
#' }
#' # The functions require to have python installed
#' # As well as tensorflow, keras and reticulate package.
#' @import keras
#' @export fit_model
fit_model<-function(model, ver, n_epoch, bsize, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, Xif, y){
checkpoint_path <- "checkpoints/cp.ckpt"
# Create checkpoint callback
cp_callback <- callback_model_checkpoint(
monitor = "val_loss",
filepath = checkpoint_path,
save_weights_only = TRUE,
save_best_only = TRUE,
verbose = 0 # not printing
)
trained_model <- model %>% fit(
x = list(inp1 = X1,
inp2 = X2,
inp3 = X3,
inp4 = X4,
inp5 = X5,
inp6 = X6,
inp7 = X7,
inp8 = X8,
inp9 = X9,
inp10 = X10,
inpif = Xif), # sequence we're using for prediction
y = y, # sequence we're predicting
batch_size = bsize, # how many samples to pass to our model at a time
epochs = n_epoch, # how many times we'll look at the whole dataset
validation_split = 0.1, # how much data to hold out for testing as we go along
callbacks = list(cp_callback), # pass callback to training
verbose = ver) # printing during training
fitted = model %>% load_model_weights_tf(filepath = checkpoint_path)
return(fitted)
}
|
library(bcp)
generate_data_frame = function(nrow, ncol) {
col_types = list(
`integer` = function() runif(nrow, -.Machine$integer.max, .Machine$integer.max),
`double` = function() rnorm(nrow),
`character` = function() paste("str", as.character(rnorm(nrow))),
`factor` = function() as.factor(paste("factor", as.character(runif(nrow, 1, 100)))),
`logical` = function() rnorm(nrow) > 0,
`Date` = function() as.Date("1970-01-01") + runif(nrow, 0, 25000)
)
col_data = lapply(seq_len(ncol), function(i) {
type_id = runif(1, 1, length(col_types))
col_types[[type_id]]()
})
names(col_data) = paste("col", seq_along(col_data), sep = "_")
res = data.frame(col_data)
return(res)
}
test_scenarios = list(
`1` = list(nrow = 100, ncol = 20),
`2` = list(nrow = 51000, ncol = 30),
`3` = list(nrow = 1000, ncol = 80),
`4` = list(nrow = 100000, ncol = 5),
`5` = list(nrow = 101001, ncol = 1)
)
conn = odbcDriverConnect('driver={SQL Server};server=DESKTOP-0U0OJS1\\SQLEXPRESS;database=test;trusted_connection=true')
for(i in seq_along(test_scenarios)) {
print(sprintf("scenario %d; nrow = %d; ncol = %d", i, test_scenarios[[i]]$nrow, test_scenarios[[i]]$ncol))
df = generate_data_frame(nrow = test_scenarios[[i]]$nrow, ncol = test_scenarios[[i]]$ncol)
table_name = sprintf("dbo.DF%d", i)
time = system.time({
bcp(conn, df, table_name, auto_create_table = TRUE, drop_if_exists = TRUE)
})
print(time)
}
df_perf = generate_data_frame(nrow = 40000, ncol = 60)
sqlQuery(conn, "IF OBJECT_ID('dbo.PerfSqlSaveTest') IS NOT NULL DROP TABLE dbo.PerfSqlSaveTest")
system.time({ bcp(conn, df_perf, "dbo.PerfBcpTest", auto_create_table = TRUE, drop_if_exists = TRUE) })
system.time({ sqlSave(conn, df_perf, "dbo.PerfSqlSaveTest") })
|
/inst/test_random_data_frames.R
|
no_license
|
marymoni/bcp
|
R
| false
| false
| 1,871
|
r
|
library(bcp)
generate_data_frame = function(nrow, ncol) {
col_types = list(
`integer` = function() runif(nrow, -.Machine$integer.max, .Machine$integer.max),
`double` = function() rnorm(nrow),
`character` = function() paste("str", as.character(rnorm(nrow))),
`factor` = function() as.factor(paste("factor", as.character(runif(nrow, 1, 100)))),
`logical` = function() rnorm(nrow) > 0,
`Date` = function() as.Date("1970-01-01") + runif(nrow, 0, 25000)
)
col_data = lapply(seq_len(ncol), function(i) {
type_id = runif(1, 1, length(col_types))
col_types[[type_id]]()
})
names(col_data) = paste("col", seq_along(col_data), sep = "_")
res = data.frame(col_data)
return(res)
}
test_scenarios = list(
`1` = list(nrow = 100, ncol = 20),
`2` = list(nrow = 51000, ncol = 30),
`3` = list(nrow = 1000, ncol = 80),
`4` = list(nrow = 100000, ncol = 5),
`5` = list(nrow = 101001, ncol = 1)
)
conn = odbcDriverConnect('driver={SQL Server};server=DESKTOP-0U0OJS1\\SQLEXPRESS;database=test;trusted_connection=true')
for(i in seq_along(test_scenarios)) {
print(sprintf("scenario %d; nrow = %d; ncol = %d", i, test_scenarios[[i]]$nrow, test_scenarios[[i]]$ncol))
df = generate_data_frame(nrow = test_scenarios[[i]]$nrow, ncol = test_scenarios[[i]]$ncol)
table_name = sprintf("dbo.DF%d", i)
time = system.time({
bcp(conn, df, table_name, auto_create_table = TRUE, drop_if_exists = TRUE)
})
print(time)
}
df_perf = generate_data_frame(nrow = 40000, ncol = 60)
sqlQuery(conn, "IF OBJECT_ID('dbo.PerfSqlSaveTest') IS NOT NULL DROP TABLE dbo.PerfSqlSaveTest")
system.time({ bcp(conn, df_perf, "dbo.PerfBcpTest", auto_create_table = TRUE, drop_if_exists = TRUE) })
system.time({ sqlSave(conn, df_perf, "dbo.PerfSqlSaveTest") })
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/view_ui.R
\name{view_ui}
\alias{view_ui}
\title{Show UI output in viewer pane}
\usage{
view_ui(x, close_after = 5)
}
\arguments{
\item{x}{ui content (actionButton, selectInput, valueBox), if x is not provided, \code{view_ui()} will look for selected text in the source pane or the last output from running the UI code. In the latter case, it expects an object with class "shiny.tag" or "shiny.tag.list"}
\item{close_after}{number of seconds to display UI in Viewer panel. If NULL, app must be stopped manually before more code can be run.}
}
\description{
Show UI output in viewer pane
}
\examples{
if (interactive()) {
# run this line
shiny::selectInput(
"state",
"Choose a state:",
list(
`East Coast` = list("NY", "NJ", "CT"),
`West Coast` = list("WA", "OR", "CA"),
`Midwest` = list("MN", "WI", "IA")
)
)
# the output will automatically be used here
view_ui(close_after = 6)
}
}
|
/man/view_ui.Rd
|
no_license
|
cran/shinyobjects
|
R
| false
| true
| 1,014
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/view_ui.R
\name{view_ui}
\alias{view_ui}
\title{Show UI output in viewer pane}
\usage{
view_ui(x, close_after = 5)
}
\arguments{
\item{x}{ui content (actionButton, selectInput, valueBox), if x is not provided, \code{view_ui()} will look for selected text in the source pane or the last output from running the UI code. In the latter case, it expects an object with class "shiny.tag" or "shiny.tag.list"}
\item{close_after}{number of seconds to display UI in Viewer panel. If NULL, app must be stopped manually before more code can be run.}
}
\description{
Show UI output in viewer pane
}
\examples{
if (interactive()) {
# run this line
shiny::selectInput(
"state",
"Choose a state:",
list(
`East Coast` = list("NY", "NJ", "CT"),
`West Coast` = list("WA", "OR", "CA"),
`Midwest` = list("MN", "WI", "IA")
)
)
# the output will automatically be used here
view_ui(close_after = 6)
}
}
|
library("sqldf")
data <- read.csv.sql("household_power_consumption.txt",sep=";",sql = "select * from file where Date in ('1/2/2007','2/2/2007')")
dateTime <- as.POSIXlt(paste(as.Date(data$Date, format="%d/%m/%Y"), data$Time, sep=" "))
png(file="plot3.png")
plot(dateTime, data$Sub_metering_1,type="l",col="black",xlab="",ylab="Energy sub metering")
lines(dateTime, data$Sub_metering_2,type="l",col="red",xlab="",ylab="Energy sub metering")
lines(dateTime, data$Sub_metering_3,type="l",col="blue",xlab="",ylab="Energy sub metering")
legend('topright', names(data)[7:9] , lty=1, col=c('black','red', 'blue'))
dev.off()
|
/plot3.R
|
no_license
|
davidamsallem/ExData_Plotting1
|
R
| false
| false
| 621
|
r
|
library("sqldf")
data <- read.csv.sql("household_power_consumption.txt",sep=";",sql = "select * from file where Date in ('1/2/2007','2/2/2007')")
dateTime <- as.POSIXlt(paste(as.Date(data$Date, format="%d/%m/%Y"), data$Time, sep=" "))
png(file="plot3.png")
plot(dateTime, data$Sub_metering_1,type="l",col="black",xlab="",ylab="Energy sub metering")
lines(dateTime, data$Sub_metering_2,type="l",col="red",xlab="",ylab="Energy sub metering")
lines(dateTime, data$Sub_metering_3,type="l",col="blue",xlab="",ylab="Energy sub metering")
legend('topright', names(data)[7:9] , lty=1, col=c('black','red', 'blue'))
dev.off()
|
context("Field")
test_that("@fields creates a new section and lists fields", {
out <- roc_proc_text(rd_roclet(), "
#' Important class.
#'
#' @field a field a
#' @field b field b
setRefClass('test')
")[[1]]
expect_equal(get_tag(out, "field")$values, c(a = "field a", b = "field b"))
})
|
/tests/testthat/test-field.R
|
no_license
|
kevinushey/roxygen
|
R
| false
| false
| 314
|
r
|
context("Field")
test_that("@fields creates a new section and lists fields", {
out <- roc_proc_text(rd_roclet(), "
#' Important class.
#'
#' @field a field a
#' @field b field b
setRefClass('test')
")[[1]]
expect_equal(get_tag(out, "field")$values, c(a = "field a", b = "field b"))
})
|
########################
#
rm(list=ls())
require(LaplacesDemon)
N <- 10000
J <- 5
X <- matrix(1,N,J)
for (j in 2:J) {X[,j] <- rnorm(N,runif(1,-3,3),runif(1,0.1,1))}
beta <- runif(J,-3,3)
e <- rnorm(N,0,0.1)
y <- tcrossprod(X, t(beta)) + e
mon.names <- c("LP", "sigma")
parm.names <- as.parm.names(list(beta=rep(0,J), log.sigma=0))
PGF <- function(Data){c(rnormv(Data$J,0,10), log(rhalfcauchy(1,5)))}
MyData <- list(J=J, X=X, PGF=PGF,mon.names=mon.names, parm.names=parm.names, y=y)
#
#################################################################
#
Model <- function(parm, Data)
{
### Parameters
beta <- parm[1:Data$J]
sigma <- exp(parm[Data$J+1])
### Log(Prior Densities)
beta.prior <- sum(dnormv(beta, 0, 500, log=TRUE))
sigma.prior <- dgamma(sigma, 10, log=TRUE)
### Log-Likelihood
mu <- tcrossprod(Data$X, t(beta))
LL <- sum(dnorm(Data$y, mu, sigma, log=TRUE))
### Log-Posterior
LP <- LL + beta.prior + sigma.prior
Modelout <- list(LP=LP, Dev=-2*LL, Monitor=c(LP, sigma),
yhat=rnorm(length(mu), mu, sigma), parm=parm)
return(Modelout)
}
############################ Initial Values #############################
Initial.Values <- GIV(Model, MyData, PGF=TRUE)
Initial.Values <- beta
###########################################################################
# Examples of MCMC Algorithms #
###########################################################################
######################## Hit-And-Run Metropolis #########################
Fit0 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=1000, Status=100, Thinning=1,
Algorithm="HARM", Specs=NULL)
#Fit0
#beta
#plot(Fit0, BurnIn=50, MyData, PDF=F)
# Consort(Fit)
# plot(BMK.Diagnostic(Fit))
# PosteriorChecks(Fit)
# caterpillar.plot(Fit, Parms="beta")
# BurnIn <- Fit$Rec.BurnIn.Thinned
# plot(Fit, BurnIn, MyData, PDF=FALSE)
# Pred <- predict(Fit, Model, MyData)
# summary(Pred, Discrep="Chi-Square")
# plot(Pred, Style="Covariates", Data=MyData)
# plot(Pred, Style="Density", Rows=1:9)
# plot(Pred, Style="ECDF")
# plot(Pred, Style="Fitted")
# plot(Pred, Style="Jarque-Bera")
# plot(Pred, Style="Predictive Quantiles")
# plot(Pred, Style="Residual Density")
# plot(Pred, Style="Residuals")
# Levene.Test(Pred)
# Importance(Fit, Model, MyData, Discrep="Chi-Square")
################## Adaptive Hamiltonian Monte Carlo #####################
Fit1 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="AHMC", Specs=list(epsilon=rep(0.02, length(Initial.Values)),
L=2, Periodicity=10))
########################## Adaptive Metropolis ##########################
Fit2 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="AM", Specs=list(Adaptive=500, Periodicity=10))
################### Adaptive Metropolis-within-Gibbs ####################
Fit3 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="AMWG", Specs=list(Periodicity=50))
###################### Adaptive-Mixture Metropolis ######################
Fit4 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="AMM", Specs=list(Adaptive=500, B=NULL, Periodicity=10,
w=0.05))
################### Affine-Invariant Ensemble Sampler ###################
Fit5 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="AIES", Specs=list(Nc=2*length(Initial.Values), Z=NULL,
beta=2, CPUs=1, Packages=NULL, Dyn.lib=NULL))
################# Componentwise Hit-And-Run Metropolis ##################
Fit6 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="CHARM", Specs=NULL)
########### Componentwise Hit-And-Run (Adaptive) Metropolis #############
Fit7 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="CHARM", Specs=list(alpha.star=0.44))
################# Delayed Rejection Adaptive Metropolis #################
Fit8 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="DRAM", Specs=list(Adaptive=500, Periodicity=10))
##################### Delayed Rejection Metropolis ######################
Fit9 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="DRM", Specs=NULL)
################## Differential Evolution Markov Chain ##################
Fit10 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="DEMC", Specs=list(Nc=3, Z=NULL, gamma=NULL, w=0.1))
####################### Hamiltonian Monte Carlo #########################
Fit11 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="HMC", Specs=list(epsilon=rep(0.02, length(Initial.Values)),
L=2))
############# Hamiltonian Monte Carlo with Dual-Averaging ###############
Fit12 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="HMCDA", Specs=list(A=500, delta=0.65, epsilon=NULL,
Lmax=1000, lambda=0.1))
################## Hit-And-Run (Adaptive) Metropolis ####################
Fit13 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="HARM", Specs=list(alpha.star=0.234))
######################## Independence Metropolis ########################
### Note: the mu and Covar arguments are populated from a previous Laplace
### Approximation.
Fit14 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=Fit$Covar, Iterations=2000, Status=100, Thinning=1,
Algorithm="IM",
Specs=list(mu=Fit$Summary1[1:length(Initial.Values),1]))
######################### Interchain Adaptation #########################
Initial.Values <- rbind(Initial.Values, GIV(Model, MyData, PGF=TRUE))
Fit15 <- LaplacesDemon.hpc(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="INCA", Specs=list(Adaptive=500, Periodicity=10),
Chains=2, CPUs=2, Packages=NULL, Dyn.libs=NULL)
####################### Metropolis-within-Gibbs #########################
Fit16 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="MWG", Specs=NULL)
########################## No-U-Turn Sampler ############################
Fit17 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=10, Thinning=1,
Algorithm="NUTS", Specs=list(A=50, delta=0.6, epsilon=NULL))
###################### Robust Adaptive Metropolis #######################
Fit18 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="RAM", Specs=list(alpha.star=0.234, Dist="N", gamma=0.66,
Periodicity=10))
########################### Reversible-Jump #############################
bin.n <- J-1
bin.p <- 0.2
parm.p <- c(1, rep(1/(J-1),(J-1)), 1)
selectable <- c(0, rep(1,J-1), 0)
Fit19 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="RJ", Specs=list(bin.n=bin.n, bin.p=bin.p,
parm.p=parm.p, selectable=selectable,
selected=c(0,rep(1,J-1),0)))
######################## Random-Walk Metropolis #########################
Fit20 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="RWM", Specs=NULL)
############## Sequential Adaptive Metropolis-within-Gibbs ##############
#NOTE: The SAMWG algorithm is only for state-space models (SSMs)
Fit21 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="SAMWG", Specs=list(Dyn=Dyn, Periodicity=50))
################## Sequential Metropolis-within-Gibbs ###################
#NOTE: The SMWG algorithm is only for state-space models (SSMs)
Fit22 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="SMWG", Specs=list(Dyn=Dyn))
# ############################# Slice Sampler #############################
# m <- Inf; w <- 1
# Fit23 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
# Covar=NULL, Iterations=2000, Status=100, Thinning=1,
# Algorithm="Slice", Specs=list(m=m, w=w))
#
################### Tempered Hamiltonian Monte Carlo ####################
Fit24 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="THMC", Specs=list(epsilon=rep(0.05,length(Initial.Values)),
L=2, Temperature=2))
############################### t-walk #################################
Fit25 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="twalk", Specs=list(SIV=NULL, n1=4, at=6, aw=1.5))
#End
############################################################################
Fit0
Fit1
Fit2
Fit3
Fit4
Fit5
Fit6
Fit7
Fit8
Fit9
Fit10
Fit11
Fit12#!
Fit13
Fit14#!
Fit15#!
Fit16
Fit17#!
Fit18
Fit19
Fit20
Fit21#!
Fit22#!
Fit23#!
Fit24
####################
#
op <- par(no.readonly = TRUE)
Fit = Fit0
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
#
#
Fit = Fit1
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit2
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit3
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit4
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit5
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit6
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit7
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit8
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit9
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
# #
#
Fit = Fit13
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
#
Fit = Fit16
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
#
Fit = Fit18
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit19
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit20
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
|
/旧资料/LinearRegression.R
|
no_license
|
wangbinzjcc/DeadStandingTrees
|
R
| false
| false
| 11,930
|
r
|
########################
#
rm(list=ls())
require(LaplacesDemon)
N <- 10000
J <- 5
X <- matrix(1,N,J)
for (j in 2:J) {X[,j] <- rnorm(N,runif(1,-3,3),runif(1,0.1,1))}
beta <- runif(J,-3,3)
e <- rnorm(N,0,0.1)
y <- tcrossprod(X, t(beta)) + e
mon.names <- c("LP", "sigma")
parm.names <- as.parm.names(list(beta=rep(0,J), log.sigma=0))
PGF <- function(Data){c(rnormv(Data$J,0,10), log(rhalfcauchy(1,5)))}
MyData <- list(J=J, X=X, PGF=PGF,mon.names=mon.names, parm.names=parm.names, y=y)
#
#################################################################
#
Model <- function(parm, Data)
{
### Parameters
beta <- parm[1:Data$J]
sigma <- exp(parm[Data$J+1])
### Log(Prior Densities)
beta.prior <- sum(dnormv(beta, 0, 500, log=TRUE))
sigma.prior <- dgamma(sigma, 10, log=TRUE)
### Log-Likelihood
mu <- tcrossprod(Data$X, t(beta))
LL <- sum(dnorm(Data$y, mu, sigma, log=TRUE))
### Log-Posterior
LP <- LL + beta.prior + sigma.prior
Modelout <- list(LP=LP, Dev=-2*LL, Monitor=c(LP, sigma),
yhat=rnorm(length(mu), mu, sigma), parm=parm)
return(Modelout)
}
############################ Initial Values #############################
Initial.Values <- GIV(Model, MyData, PGF=TRUE)
Initial.Values <- beta
###########################################################################
# Examples of MCMC Algorithms #
###########################################################################
######################## Hit-And-Run Metropolis #########################
Fit0 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=1000, Status=100, Thinning=1,
Algorithm="HARM", Specs=NULL)
#Fit0
#beta
#plot(Fit0, BurnIn=50, MyData, PDF=F)
# Consort(Fit)
# plot(BMK.Diagnostic(Fit))
# PosteriorChecks(Fit)
# caterpillar.plot(Fit, Parms="beta")
# BurnIn <- Fit$Rec.BurnIn.Thinned
# plot(Fit, BurnIn, MyData, PDF=FALSE)
# Pred <- predict(Fit, Model, MyData)
# summary(Pred, Discrep="Chi-Square")
# plot(Pred, Style="Covariates", Data=MyData)
# plot(Pred, Style="Density", Rows=1:9)
# plot(Pred, Style="ECDF")
# plot(Pred, Style="Fitted")
# plot(Pred, Style="Jarque-Bera")
# plot(Pred, Style="Predictive Quantiles")
# plot(Pred, Style="Residual Density")
# plot(Pred, Style="Residuals")
# Levene.Test(Pred)
# Importance(Fit, Model, MyData, Discrep="Chi-Square")
################## Adaptive Hamiltonian Monte Carlo #####################
Fit1 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="AHMC", Specs=list(epsilon=rep(0.02, length(Initial.Values)),
L=2, Periodicity=10))
########################## Adaptive Metropolis ##########################
Fit2 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="AM", Specs=list(Adaptive=500, Periodicity=10))
################### Adaptive Metropolis-within-Gibbs ####################
Fit3 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="AMWG", Specs=list(Periodicity=50))
###################### Adaptive-Mixture Metropolis ######################
Fit4 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="AMM", Specs=list(Adaptive=500, B=NULL, Periodicity=10,
w=0.05))
################### Affine-Invariant Ensemble Sampler ###################
Fit5 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="AIES", Specs=list(Nc=2*length(Initial.Values), Z=NULL,
beta=2, CPUs=1, Packages=NULL, Dyn.lib=NULL))
################# Componentwise Hit-And-Run Metropolis ##################
Fit6 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="CHARM", Specs=NULL)
########### Componentwise Hit-And-Run (Adaptive) Metropolis #############
Fit7 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="CHARM", Specs=list(alpha.star=0.44))
################# Delayed Rejection Adaptive Metropolis #################
Fit8 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="DRAM", Specs=list(Adaptive=500, Periodicity=10))
##################### Delayed Rejection Metropolis ######################
Fit9 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="DRM", Specs=NULL)
################## Differential Evolution Markov Chain ##################
Fit10 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="DEMC", Specs=list(Nc=3, Z=NULL, gamma=NULL, w=0.1))
####################### Hamiltonian Monte Carlo #########################
Fit11 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="HMC", Specs=list(epsilon=rep(0.02, length(Initial.Values)),
L=2))
############# Hamiltonian Monte Carlo with Dual-Averaging ###############
Fit12 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="HMCDA", Specs=list(A=500, delta=0.65, epsilon=NULL,
Lmax=1000, lambda=0.1))
################## Hit-And-Run (Adaptive) Metropolis ####################
Fit13 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="HARM", Specs=list(alpha.star=0.234))
######################## Independence Metropolis ########################
### Note: the mu and Covar arguments are populated from a previous Laplace
### Approximation.
Fit14 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=Fit$Covar, Iterations=2000, Status=100, Thinning=1,
Algorithm="IM",
Specs=list(mu=Fit$Summary1[1:length(Initial.Values),1]))
######################### Interchain Adaptation #########################
Initial.Values <- rbind(Initial.Values, GIV(Model, MyData, PGF=TRUE))
Fit15 <- LaplacesDemon.hpc(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="INCA", Specs=list(Adaptive=500, Periodicity=10),
Chains=2, CPUs=2, Packages=NULL, Dyn.libs=NULL)
####################### Metropolis-within-Gibbs #########################
Fit16 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="MWG", Specs=NULL)
########################## No-U-Turn Sampler ############################
Fit17 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=10, Thinning=1,
Algorithm="NUTS", Specs=list(A=50, delta=0.6, epsilon=NULL))
###################### Robust Adaptive Metropolis #######################
Fit18 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="RAM", Specs=list(alpha.star=0.234, Dist="N", gamma=0.66,
Periodicity=10))
########################### Reversible-Jump #############################
bin.n <- J-1
bin.p <- 0.2
parm.p <- c(1, rep(1/(J-1),(J-1)), 1)
selectable <- c(0, rep(1,J-1), 0)
Fit19 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="RJ", Specs=list(bin.n=bin.n, bin.p=bin.p,
parm.p=parm.p, selectable=selectable,
selected=c(0,rep(1,J-1),0)))
######################## Random-Walk Metropolis #########################
Fit20 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="RWM", Specs=NULL)
############## Sequential Adaptive Metropolis-within-Gibbs ##############
#NOTE: The SAMWG algorithm is only for state-space models (SSMs)
Fit21 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="SAMWG", Specs=list(Dyn=Dyn, Periodicity=50))
################## Sequential Metropolis-within-Gibbs ###################
#NOTE: The SMWG algorithm is only for state-space models (SSMs)
Fit22 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="SMWG", Specs=list(Dyn=Dyn))
# ############################# Slice Sampler #############################
# m <- Inf; w <- 1
# Fit23 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
# Covar=NULL, Iterations=2000, Status=100, Thinning=1,
# Algorithm="Slice", Specs=list(m=m, w=w))
#
################### Tempered Hamiltonian Monte Carlo ####################
Fit24 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="THMC", Specs=list(epsilon=rep(0.05,length(Initial.Values)),
L=2, Temperature=2))
############################### t-walk #################################
Fit25 <- LaplacesDemon(Model, Data=MyData, Initial.Values,
Covar=NULL, Iterations=2000, Status=100, Thinning=1,
Algorithm="twalk", Specs=list(SIV=NULL, n1=4, at=6, aw=1.5))
#End
############################################################################
Fit0
Fit1
Fit2
Fit3
Fit4
Fit5
Fit6
Fit7
Fit8
Fit9
Fit10
Fit11
Fit12#!
Fit13
Fit14#!
Fit15#!
Fit16
Fit17#!
Fit18
Fit19
Fit20
Fit21#!
Fit22#!
Fit23#!
Fit24
####################
#
op <- par(no.readonly = TRUE)
Fit = Fit0
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
#
#
Fit = Fit1
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit2
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit3
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit4
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit5
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit6
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit7
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit8
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit9
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
# #
#
Fit = Fit13
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
#
Fit = Fit16
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
#
Fit = Fit18
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit19
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
##
#
Fit = Fit20
beta
par(op)
caterpillar.plot(Fit, Parms="beta")
#
points(beta,(5:1),col=2,pch=17)
plot(Fit, BurnIn=50, MyData, PDF=F)
#
#
|
#!/usr/bin/env Rscript
library(MutationalPatterns)
ref_genome <- 'BSgenome.Hsapiens.UCSC.hg38'
ref_transcriptome <- "TxDb.Hsapiens.UCSC.hg38.knownGene"
library(ref_genome, character.only = TRUE)
library(ref_transcriptome, character.only = TRUE)
library(NMF)
library(gridExtra)
library(ggplot2)
library(reshape)
#library(RColorBrewer)
#library(pheatmap)
cosmic <- paste("https://cancer.sanger.ac.uk/cancergenome/assets/","signatures_probabilities.txt", sep = "") # ???
args <- commandArgs(trailingOnly = TRUE)
input <- args[1]
outputdir <- args[2]
palette <- args[3]
nsign <- as.numeric(args[4])
image <- args[5]
# infile is tsv vfc / samplename_bulk|vivo|vitro
vcfs <- read.table(input, sep="\t", header=FALSE, stringsAsFactors = FALSE)
vcf_files <- vcfs$V1
sample_names <- vcfs$V2
vcfs <- read_vcfs_as_granges(vcf_files, sample_names, ref_genome)
mut_mat <- mut_matrix(vcf_list = vcfs, ref_genome = ref_genome)
setwd(outputdir)
save.image(image)
nrun_estimate <- 50
nrun <- 150
seed <- 123456
mut_mat2 <- mut_mat + 0.0001
estimate <- nmf(mut_mat2, rank=2:5, method="brunet", nrun=nrun_estimate, seed=seed)
png('nmf_k.png')
plot(estimate)
graphics.off()
nmf_res <- extract_signatures(mut_mat, rank = nsign, nrun = nrun)
names_sign <- paste0("sign_", seq(1, nsign))
colnames(nmf_res$signatures) <- names_sign
rownames(nmf_res$contribution) <- names_sign
plot_96_profile(nmf_res$signatures, condensed = TRUE)
mypc <- function (contribution, signatures, index = c(), coord_flip = FALSE, mode = "relative", palette = c())
{
if (!(mode == "relative" | mode == "absolute"))
stop("mode parameter should be either 'relative' or 'absolute'")
if (length(index > 0)) {
contribution = contribution[, index]
}
Sample = NULL
Contribution = NULL
Signature = NULL
if (mode == "relative") {
m_contribution = melt(contribution)
colnames(m_contribution) = c("Signature", "Sample", "Contribution")
plot = ggplot(m_contribution, aes(x = factor(Sample),
y = Contribution, fill = factor(Signature), order = Sample)) +
geom_bar(position = "fill", stat = "identity", colour = "black") +
labs(x = "", y = "Relative contribution") + theme_bw() +
theme(panel.grid.minor.x = element_blank(), panel.grid.major.x = element_blank()) +
theme(panel.grid.minor.y = element_blank(), panel.grid.major.y = element_blank())+
theme(text = element_text(size=15), axis.text.x = element_text(angle = 90, hjust = 1))
}
else {
if (missing(signatures))
stop(paste("For contribution plotting in mode 'absolute':",
"also provide signatures matrix"))
total_signatures = colSums(signatures)
abs_contribution = contribution * total_signatures
m_contribution = melt(abs_contribution)
colnames(m_contribution) = c("Signature", "Sample", "Contribution")
plot = ggplot(m_contribution, aes(x = factor(Sample),
y = Contribution, fill = factor(Signature), order = Sample)) +
geom_bar(stat = "identity", colour = "black") + labs(x = "",
y = "Absolute contribution \n (no. mutations)") +
theme_bw() + theme(panel.grid.minor.x = element_blank(),
panel.grid.major.x = element_blank()) + theme(panel.grid.minor.y = element_blank(),
panel.grid.major.y = element_blank())+
theme(text = element_text(size=15), axis.text.x = element_text(angle = 90, hjust = 1))
}
if (length(palette) > 0)
plot = plot + scale_fill_manual(name = "Signature", values = palette)
else plot = plot + scale_fill_discrete(name = "Signature")
if (coord_flip)
plot = plot + coord_flip() + xlim(rev(levels(factor(m_contribution$Sample))))
else plot = plot + xlim(levels(factor(m_contribution$Sample)))
return(plot)
}
png('contrib_barplot.png')
mypc(nmf_res$contribution, nmf_res$signature, mode = "relative")
graphics.off()
png('heatmap.png')
plot_contribution_heatmap(nmf_res$contribution,sig_order = names_sign,cluster_samples=F)
graphics.off()
sp_url <- paste(cosmic, sep = "")
cancer_signatures = read.table(sp_url, sep = "\t", header = TRUE)
# Match the order of the mutation types to MutationalPatterns standard
new_order = match(row.names(mut_mat), cancer_signatures$Somatic.Mutation.Type)
# Reorder cancer signatures dataframe>
cancer_signatures = cancer_signatures[as.vector(new_order),]
# Add trinucletiode changes names as row.names>
row.names(cancer_signatures) = cancer_signatures$Somatic.Mutation.Type
# Keep only 96 contributions of the signatures in matrix
cancer_signatures = as.matrix(cancer_signatures[,4:33])
us <- nmf_res$signatures
colnames(us) <- names_sign
hclust_cosmic_us = cluster_signatures(cbind(us, cancer_signatures), method = "average") # store signatures in new order
png('hclust.png')
plot(hclust_cosmic_us)
graphics.off()
# put together vitro-vivo of each starting clone and redo (to get more power in reconstructing signatures?)
# 5 - 18 with k=2 and 1 sample for each starting clone - with k = 3 signature 8 pops out
# 5 - 18 - 8 with k=3 and 1 sample for each model (vivo/vitro separated)
save.image(image)
|
/local/src/mut_pat_signatures_denovo.R
|
no_license
|
vodkatad/AF_spectra
|
R
| false
| false
| 5,271
|
r
|
#!/usr/bin/env Rscript
library(MutationalPatterns)
ref_genome <- 'BSgenome.Hsapiens.UCSC.hg38'
ref_transcriptome <- "TxDb.Hsapiens.UCSC.hg38.knownGene"
library(ref_genome, character.only = TRUE)
library(ref_transcriptome, character.only = TRUE)
library(NMF)
library(gridExtra)
library(ggplot2)
library(reshape)
#library(RColorBrewer)
#library(pheatmap)
cosmic <- paste("https://cancer.sanger.ac.uk/cancergenome/assets/","signatures_probabilities.txt", sep = "") # ???
args <- commandArgs(trailingOnly = TRUE)
input <- args[1]
outputdir <- args[2]
palette <- args[3]
nsign <- as.numeric(args[4])
image <- args[5]
# infile is tsv vfc / samplename_bulk|vivo|vitro
vcfs <- read.table(input, sep="\t", header=FALSE, stringsAsFactors = FALSE)
vcf_files <- vcfs$V1
sample_names <- vcfs$V2
vcfs <- read_vcfs_as_granges(vcf_files, sample_names, ref_genome)
mut_mat <- mut_matrix(vcf_list = vcfs, ref_genome = ref_genome)
setwd(outputdir)
save.image(image)
nrun_estimate <- 50
nrun <- 150
seed <- 123456
mut_mat2 <- mut_mat + 0.0001
estimate <- nmf(mut_mat2, rank=2:5, method="brunet", nrun=nrun_estimate, seed=seed)
png('nmf_k.png')
plot(estimate)
graphics.off()
nmf_res <- extract_signatures(mut_mat, rank = nsign, nrun = nrun)
names_sign <- paste0("sign_", seq(1, nsign))
colnames(nmf_res$signatures) <- names_sign
rownames(nmf_res$contribution) <- names_sign
plot_96_profile(nmf_res$signatures, condensed = TRUE)
mypc <- function (contribution, signatures, index = c(), coord_flip = FALSE, mode = "relative", palette = c())
{
if (!(mode == "relative" | mode == "absolute"))
stop("mode parameter should be either 'relative' or 'absolute'")
if (length(index > 0)) {
contribution = contribution[, index]
}
Sample = NULL
Contribution = NULL
Signature = NULL
if (mode == "relative") {
m_contribution = melt(contribution)
colnames(m_contribution) = c("Signature", "Sample", "Contribution")
plot = ggplot(m_contribution, aes(x = factor(Sample),
y = Contribution, fill = factor(Signature), order = Sample)) +
geom_bar(position = "fill", stat = "identity", colour = "black") +
labs(x = "", y = "Relative contribution") + theme_bw() +
theme(panel.grid.minor.x = element_blank(), panel.grid.major.x = element_blank()) +
theme(panel.grid.minor.y = element_blank(), panel.grid.major.y = element_blank())+
theme(text = element_text(size=15), axis.text.x = element_text(angle = 90, hjust = 1))
}
else {
if (missing(signatures))
stop(paste("For contribution plotting in mode 'absolute':",
"also provide signatures matrix"))
total_signatures = colSums(signatures)
abs_contribution = contribution * total_signatures
m_contribution = melt(abs_contribution)
colnames(m_contribution) = c("Signature", "Sample", "Contribution")
plot = ggplot(m_contribution, aes(x = factor(Sample),
y = Contribution, fill = factor(Signature), order = Sample)) +
geom_bar(stat = "identity", colour = "black") + labs(x = "",
y = "Absolute contribution \n (no. mutations)") +
theme_bw() + theme(panel.grid.minor.x = element_blank(),
panel.grid.major.x = element_blank()) + theme(panel.grid.minor.y = element_blank(),
panel.grid.major.y = element_blank())+
theme(text = element_text(size=15), axis.text.x = element_text(angle = 90, hjust = 1))
}
if (length(palette) > 0)
plot = plot + scale_fill_manual(name = "Signature", values = palette)
else plot = plot + scale_fill_discrete(name = "Signature")
if (coord_flip)
plot = plot + coord_flip() + xlim(rev(levels(factor(m_contribution$Sample))))
else plot = plot + xlim(levels(factor(m_contribution$Sample)))
return(plot)
}
png('contrib_barplot.png')
mypc(nmf_res$contribution, nmf_res$signature, mode = "relative")
graphics.off()
png('heatmap.png')
plot_contribution_heatmap(nmf_res$contribution,sig_order = names_sign,cluster_samples=F)
graphics.off()
sp_url <- paste(cosmic, sep = "")
cancer_signatures = read.table(sp_url, sep = "\t", header = TRUE)
# Match the order of the mutation types to MutationalPatterns standard
new_order = match(row.names(mut_mat), cancer_signatures$Somatic.Mutation.Type)
# Reorder cancer signatures dataframe>
cancer_signatures = cancer_signatures[as.vector(new_order),]
# Add trinucletiode changes names as row.names>
row.names(cancer_signatures) = cancer_signatures$Somatic.Mutation.Type
# Keep only 96 contributions of the signatures in matrix
cancer_signatures = as.matrix(cancer_signatures[,4:33])
us <- nmf_res$signatures
colnames(us) <- names_sign
hclust_cosmic_us = cluster_signatures(cbind(us, cancer_signatures), method = "average") # store signatures in new order
png('hclust.png')
plot(hclust_cosmic_us)
graphics.off()
# put together vitro-vivo of each starting clone and redo (to get more power in reconstructing signatures?)
# 5 - 18 with k=2 and 1 sample for each starting clone - with k = 3 signature 8 pops out
# 5 - 18 - 8 with k=3 and 1 sample for each model (vivo/vitro separated)
save.image(image)
|
library(aimsir17)
new_obs <- select(observations,station,year,month,day,hour,temp)
select(observations,station:rain)
select(observations,-(station:rain))
select(observations,starts_with("w"))
select(observations,ends_with("p"))
select(observations,ends_with("p"),everything())
|
/code/05 dplyr 1/03 Select.R
|
permissive
|
JimDuggan/CT1100
|
R
| false
| false
| 283
|
r
|
library(aimsir17)
new_obs <- select(observations,station,year,month,day,hour,temp)
select(observations,station:rain)
select(observations,-(station:rain))
select(observations,starts_with("w"))
select(observations,ends_with("p"))
select(observations,ends_with("p"),everything())
|
# install.packages("usethis")
library(usethis)
edit_git_config()
create_github_token()
# install.packages("gitcreds")
library(gitcreds)
gitcreds_set()
# Make this project have a git repository locally
use_git()
# Connect this to a GitHub repository
?use_github()
use_github(organisation = "rin3-spring-2021", private = TRUE)
# This is a comment
|
/github-setup.R
|
no_license
|
rin3-spring-2021/github-setup-demo
|
R
| false
| false
| 353
|
r
|
# install.packages("usethis")
library(usethis)
edit_git_config()
create_github_token()
# install.packages("gitcreds")
library(gitcreds)
gitcreds_set()
# Make this project have a git repository locally
use_git()
# Connect this to a GitHub repository
?use_github()
use_github(organisation = "rin3-spring-2021", private = TRUE)
# This is a comment
|
testlist <- list(doy = numeric(0), latitude = numeric(0), temp = c(8.5728629954997e-312, -4.74571492238721e-249, 5.18440867598767e+307, -1.22227637740241e-150, -4.15923997689852e-209, -1.10339037428038e-87, -6.95025412017465e+44, -1.18078903777812e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, -1.0254047066058e-199, -6.96017950870002e-145, -4.23245790176481e+95, -1.3199888952305e+101, -1.86834569065576e+236, 4.01115143374698e+166, 1.63329743414245e+86, 2.91667231363207e-269, 1.95236685739849e-214, 2.28917898403533e-310))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615831038-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 634
|
r
|
testlist <- list(doy = numeric(0), latitude = numeric(0), temp = c(8.5728629954997e-312, -4.74571492238721e-249, 5.18440867598767e+307, -1.22227637740241e-150, -4.15923997689852e-209, -1.10339037428038e-87, -6.95025412017465e+44, -1.18078903777812e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, -1.0254047066058e-199, -6.96017950870002e-145, -4.23245790176481e+95, -1.3199888952305e+101, -1.86834569065576e+236, 4.01115143374698e+166, 1.63329743414245e+86, 2.91667231363207e-269, 1.95236685739849e-214, 2.28917898403533e-310))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{Antilles}
\alias{Antilles}
\title{Antilles Bird Immigration Dates}
\format{A data frame with 37 observations of one variable. \describe{
\item{immigration.date}{approximate immigration date (in millions of
years)} }}
\source{
\emph{inferred from} Ricklefs, R.E. and E. Bermingham. 2001.
Nonequilibrium diversity dynamics of the Lesser Antillean avifauna.
\emph{Science} 294: 1522-1524.
}
\description{
Approximate dates of immigration for 37 species of birds in the Lesser
Antilles.
}
\examples{
histogram(~immigration.date, Antilles,n=15)
densityplot(~immigration.date, Antilles)
}
\references{
\url{http://www.sciencemag.org/cgi/content/abstract/sci;294/5546/1522}
}
\keyword{datasets}
|
/man/Antilles.Rd
|
no_license
|
mdlama/abd
|
R
| false
| true
| 801
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{Antilles}
\alias{Antilles}
\title{Antilles Bird Immigration Dates}
\format{A data frame with 37 observations of one variable. \describe{
\item{immigration.date}{approximate immigration date (in millions of
years)} }}
\source{
\emph{inferred from} Ricklefs, R.E. and E. Bermingham. 2001.
Nonequilibrium diversity dynamics of the Lesser Antillean avifauna.
\emph{Science} 294: 1522-1524.
}
\description{
Approximate dates of immigration for 37 species of birds in the Lesser
Antilles.
}
\examples{
histogram(~immigration.date, Antilles,n=15)
densityplot(~immigration.date, Antilles)
}
\references{
\url{http://www.sciencemag.org/cgi/content/abstract/sci;294/5546/1522}
}
\keyword{datasets}
|
#<<BEGIN>>
unmc <- function(x, drop=TRUE)
#TITLE Unclasses the mc or the mcnode Object
#DESCRIPTION
#Unclasses the \samp{mc} object in a list of arrays
#or the \samp{mcnode} object in an array.
#KEYWORDS manip
#INPUTS
#{x}<<A \samp{mc} or a \samp{mcnode} object.>>
#[INPUTS]
#{drop}<<Should the dimensions of size 1 be dropped (see \code{\link{drop}}).>>
#VALUE
#if x is an \samp{mc} object: a list of arrays. If \samp{drop=TRUE}, a list of vectors, matrixes and arrays.
#if x is an \samp{mcnode} object: an array. If \samp{drop=TRUE}, a vector, matrix or array.
#EXAMPLE
#data(total)
### A vector
#unmc(total$xV, drop=TRUE)
### An array
#unmc(total$xV, drop=FALSE)
#CREATED 07-08-01
#REVISED 07-08-01
#--------------------------------------------
{
unmcnode <- function(y){
attr(y,"type") <- NULL
attr(y,"outm") <- NULL
y <- unclass(y)
if(drop) y <- drop(y)
return(y)}
if(is.mc(x)){
x <- lapply(x,unmcnode)
x <- unclass(x)
return(x)}
return(unmcnode(x))
}
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
/R/unmc.R
|
no_license
|
cran/mc2d
|
R
| false
| false
| 1,096
|
r
|
#<<BEGIN>>
unmc <- function(x, drop=TRUE)
#TITLE Unclasses the mc or the mcnode Object
#DESCRIPTION
#Unclasses the \samp{mc} object in a list of arrays
#or the \samp{mcnode} object in an array.
#KEYWORDS manip
#INPUTS
#{x}<<A \samp{mc} or a \samp{mcnode} object.>>
#[INPUTS]
#{drop}<<Should the dimensions of size 1 be dropped (see \code{\link{drop}}).>>
#VALUE
#if x is an \samp{mc} object: a list of arrays. If \samp{drop=TRUE}, a list of vectors, matrixes and arrays.
#if x is an \samp{mcnode} object: an array. If \samp{drop=TRUE}, a vector, matrix or array.
#EXAMPLE
#data(total)
### A vector
#unmc(total$xV, drop=TRUE)
### An array
#unmc(total$xV, drop=FALSE)
#CREATED 07-08-01
#REVISED 07-08-01
#--------------------------------------------
{
unmcnode <- function(y){
attr(y,"type") <- NULL
attr(y,"outm") <- NULL
y <- unclass(y)
if(drop) y <- drop(y)
return(y)}
if(is.mc(x)){
x <- lapply(x,unmcnode)
x <- unclass(x)
return(x)}
return(unmcnode(x))
}
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/excel_dates.R
\name{excel_numeric_to_date}
\alias{excel_numeric_to_date}
\title{Convert dates encoded as serial numbers to Date class.}
\usage{
excel_numeric_to_date(date_num, date_system = "modern")
}
\arguments{
\item{date_num}{numeric vector of serial numbers to convert.}
\item{date_system}{the date system, either \code{"modern"} or \code{"mac pre-2011"}.}
}
\value{
Returns a vector of class Date.
}
\description{
Converts numbers like \code{42370} into date values like \code{2016-01-01}.
Defaults to the modern Excel date encoding system. However, Excel for Mac 2008 and earlier Mac versions of Excel used a different date system. To determine what platform to specify: if the date 2016-01-01 is represented by the number 42370 in your spreadsheet, it's the modern system. If it's 40908, it's the old Mac system.
More on date encoding systems at http://support.office.com/en-us/article/Date-calculations-in-Excel-e7fe7167-48a9-4b96-bb53-5612a800b487.
}
\examples{
excel_numeric_to_date(40000)
}
|
/man/excel_numeric_to_date.Rd
|
permissive
|
khunreus/janitor
|
R
| false
| true
| 1,084
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/excel_dates.R
\name{excel_numeric_to_date}
\alias{excel_numeric_to_date}
\title{Convert dates encoded as serial numbers to Date class.}
\usage{
excel_numeric_to_date(date_num, date_system = "modern")
}
\arguments{
\item{date_num}{numeric vector of serial numbers to convert.}
\item{date_system}{the date system, either \code{"modern"} or \code{"mac pre-2011"}.}
}
\value{
Returns a vector of class Date.
}
\description{
Converts numbers like \code{42370} into date values like \code{2016-01-01}.
Defaults to the modern Excel date encoding system. However, Excel for Mac 2008 and earlier Mac versions of Excel used a different date system. To determine what platform to specify: if the date 2016-01-01 is represented by the number 42370 in your spreadsheet, it's the modern system. If it's 40908, it's the old Mac system.
More on date encoding systems at http://support.office.com/en-us/article/Date-calculations-in-Excel-e7fe7167-48a9-4b96-bb53-5612a800b487.
}
\examples{
excel_numeric_to_date(40000)
}
|
#' Prepare data for SWD maxent calibration processes
#'
#' @description prepare_swd helps to create csv files containing occurrence
#' records (all, train, and test records) and background coordinates, together
#' with values of predictor variables, that later can be used to run model
#' calibration in Maxent using the SWD format.
#'
#' @param occ data.frame containing occurrence records of the species of interest.
#' Mandatory columns are: species, longitude, and latitude. Other columns will
#' be ignored.
#' @param species (character) name of column containing species name.
#' @param longitude (character) name of column containing longitude values.
#' @param latitude (character) name of column containing latitude values.
#' @param data.split.method (character) name of the method to split training and
#' testing records. Default and only option for now = "random".
#' @param train.proportion (numeric) proportion of records to be used for training
#' models. Default = 0.5
#' @param raster.layers RasterStack of predictor variables masked to the area
#' where the model will be calibrated.
#' @param sample.size (numeric) number of points to represent the background for
#' the model. Default = 10000
#' @param var.sets (character or list) if character the only option is "all_comb",
#' which will prepare the background to obtain all potential combinations of
#' variables considering the ones in \code{raster.layers}. The minimum number of
#' variables per set is defied by \code{min.number}. If list, a list
#' of character vectors with the names of the variables per each set. Names of
#' variables in sets must match names of layers in \code{raster.layers}.
#' The default (NULL) produces only one set of variables for the background.
#' @param min.number (numeric) minimum number of variables per set when option
#' "all_comb" is used in \code{var.sets}. Default = 2.
#' @param save (logical) whether or not to write csv files containing all, train,
#' and test occurrences, as well as the background. All files will contain
#' additional columns with the values of the variables for each coordinate.
#' Default = FALSE.
#' @param name.occ (character) name to be used for files with occurrence records.
#' Only one name is needed, a sufix will be added to represent all (_join),
#' _train, and _test records (e.g., "occurrences").
#' @param back.folder name for the csv file containing background coordinates
#' (e.g., "background").
#' @param set.seed seed to be used when sampling background and splitting records.
#' Default = 1
#'
#' @usage
#' prepare_swd(occ, species, longitude, latitude, data.split.method = "random",
#' train.proportion = 0.5, raster.layers, sample.size = 10000,
#' var.sets = NULL, min.number = 2, save = FALSE, name.occ,
#' back.folder, set.seed = 1)
#' @export
#'
#' @examples
#' # data
#' occ <- read.csv(list.files(system.file("extdata", package = "kuenm"),
#' pattern = "sp_joint.csv", full.names = TRUE))
#' occ <- data.frame(Species = "A_americanum", occ)
#'
#' mvars <- raster::stack(list.files(system.file("extdata", package = "kuenm"),
#' pattern = "Mbio_", full.names = TRUE))
#'
#' # preparing swd data one set of variables
#' prep <- prepare_swd(occ, species = "Species", longitude = "Longitude",
#' latitude = "Latitude", raster.layers = mvars,
#' sample.size = 5000)
#'
#' # various sets of variables
#' preps <- prepare_swd(occ, species = "Species", longitude = "Longitude",
#' latitude = "Latitude", raster.layers = mvars,
#' var.sets = "all_comb", min.number = 3,
#' sample.size = 5000)
prepare_swd <- function(occ, species, longitude, latitude,
data.split.method = "random", train.proportion = 0.5,
raster.layers, sample.size = 10000, var.sets = NULL,
min.number = 2, save = FALSE, name.occ, back.folder,
set.seed = 1) {
xy <- occ[, c(longitude, latitude)]
xyval <- raster::extract(raster.layers, xy, cellnumbers = TRUE)
xyras <- raster::xyFromCell(raster.layers, xyval[, 1])
occ <- data.frame(occ[, species], xyras, xyval[, -1])
colnames(occ)[1:3] <- c(species, longitude, latitude)
back <- raster::rasterToPoints(raster.layers)
set.seed(set.seed)
if (nrow(back) > sample.size) {back <- back[sample(nrow(back), sample.size), ]}
back <- data.frame(background = "background", back)
names(back)[1:3] <- c("background", longitude, latitude)
octi <- which(!paste(occ[, longitude], occ[, latitude]) %in%
paste(back[, longitude], back[, latitude]))
if (length(octi) > 0) {
octid <- occ[octi, ]
bnames <- c("background", longitude, latitude)
names(octid)[1:3] <- bnames
octid$background <- "background"
back <- rbind(octid, back)
}
back <- na.omit(back)
occ <- kuenm_occsplit(occ, train.proportion, data.split.method, save, name.occ)
if (save == TRUE) {dir.create(back.folder)}
if (!is.null(var.sets)) {
if (class(var.sets)[1] %in% c("character", "list")) {
if (class(var.sets)[1] == "character") {
if (var.sets == "all_comb") {
if (min.number == 1) {
message("Minimum number of variables in background sets is 1, do not use product features.")
}
var_names <- colnames(back)[-(1:3)]
var.sets <- all_var_comb(var_names, min.number)
} else {
warning("Argument 'var.sets' is not valid returning one set of background variables.")
}
} else {
ls <- sapply(var.sets, length)
if (any(ls == 1)) {
message("Minimum number of variables in background sets is 1, do not use product features.")
}
names(var.sets) <- paste0("Set_", 1:length(var.sets))
}
} else {
warning("Argument 'var.sets' is not valid returning one set of background variables.")
}
if (save == TRUE) {
nambs <- names(var.sets)
sv <- sapply(nambs, function(x) {
nms <- c(bnames, var.sets[[x]])
write.csv(back[, nms], file = paste0(back.folder, "/", x, ".csv"),
row.names = FALSE)
})
}
} else {
var.sets <- list(Set_1 = colnames(back)[-(1:3)])
if (save == TRUE) {
write.csv(back, file = paste0(back.folder, "/Set_1.csv"), row.names = FALSE)
}
}
occ$background <- back
occ$sets <- var.sets
return(occ)
}
#' Helper to create all variable combinations
#' @param var.names (character) vector of variable names
#' @param min.number (numeric) minimum number of variables per set.
#' @export
#' @return A list of vectors containing variable names per set.
all_var_comb <- function(var.names, min.number = 2) {
var_comb <- lapply(min.number:length(var.names), function(x) {
comb <- combn(var.names, m = x)
comb_vs <- lapply(1:dim(comb)[2], function(y) {comb[, y]})
})
var_combs <- do.call(c, var_comb)
names(var_combs) <- paste0("Set_", 1:length(var_combs))
return(var_combs)
}
|
/R/prepare_swd.R
|
no_license
|
lucianolasala/kuenm
|
R
| false
| false
| 7,131
|
r
|
#' Prepare data for SWD maxent calibration processes
#'
#' @description prepare_swd helps to create csv files containing occurrence
#' records (all, train, and test records) and background coordinates, together
#' with values of predictor variables, that later can be used to run model
#' calibration in Maxent using the SWD format.
#'
#' @param occ data.frame containing occurrence records of the species of interest.
#' Mandatory columns are: species, longitude, and latitude. Other columns will
#' be ignored.
#' @param species (character) name of column containing species name.
#' @param longitude (character) name of column containing longitude values.
#' @param latitude (character) name of column containing latitude values.
#' @param data.split.method (character) name of the method to split training and
#' testing records. Default and only option for now = "random".
#' @param train.proportion (numeric) proportion of records to be used for training
#' models. Default = 0.5
#' @param raster.layers RasterStack of predictor variables masked to the area
#' where the model will be calibrated.
#' @param sample.size (numeric) number of points to represent the background for
#' the model. Default = 10000
#' @param var.sets (character or list) if character the only option is "all_comb",
#' which will prepare the background to obtain all potential combinations of
#' variables considering the ones in \code{raster.layers}. The minimum number of
#' variables per set is defied by \code{min.number}. If list, a list
#' of character vectors with the names of the variables per each set. Names of
#' variables in sets must match names of layers in \code{raster.layers}.
#' The default (NULL) produces only one set of variables for the background.
#' @param min.number (numeric) minimum number of variables per set when option
#' "all_comb" is used in \code{var.sets}. Default = 2.
#' @param save (logical) whether or not to write csv files containing all, train,
#' and test occurrences, as well as the background. All files will contain
#' additional columns with the values of the variables for each coordinate.
#' Default = FALSE.
#' @param name.occ (character) name to be used for files with occurrence records.
#' Only one name is needed, a sufix will be added to represent all (_join),
#' _train, and _test records (e.g., "occurrences").
#' @param back.folder name for the csv file containing background coordinates
#' (e.g., "background").
#' @param set.seed seed to be used when sampling background and splitting records.
#' Default = 1
#'
#' @usage
#' prepare_swd(occ, species, longitude, latitude, data.split.method = "random",
#' train.proportion = 0.5, raster.layers, sample.size = 10000,
#' var.sets = NULL, min.number = 2, save = FALSE, name.occ,
#' back.folder, set.seed = 1)
#' @export
#'
#' @examples
#' # data
#' occ <- read.csv(list.files(system.file("extdata", package = "kuenm"),
#' pattern = "sp_joint.csv", full.names = TRUE))
#' occ <- data.frame(Species = "A_americanum", occ)
#'
#' mvars <- raster::stack(list.files(system.file("extdata", package = "kuenm"),
#' pattern = "Mbio_", full.names = TRUE))
#'
#' # preparing swd data one set of variables
#' prep <- prepare_swd(occ, species = "Species", longitude = "Longitude",
#' latitude = "Latitude", raster.layers = mvars,
#' sample.size = 5000)
#'
#' # various sets of variables
#' preps <- prepare_swd(occ, species = "Species", longitude = "Longitude",
#' latitude = "Latitude", raster.layers = mvars,
#' var.sets = "all_comb", min.number = 3,
#' sample.size = 5000)
prepare_swd <- function(occ, species, longitude, latitude,
data.split.method = "random", train.proportion = 0.5,
raster.layers, sample.size = 10000, var.sets = NULL,
min.number = 2, save = FALSE, name.occ, back.folder,
set.seed = 1) {
xy <- occ[, c(longitude, latitude)]
xyval <- raster::extract(raster.layers, xy, cellnumbers = TRUE)
xyras <- raster::xyFromCell(raster.layers, xyval[, 1])
occ <- data.frame(occ[, species], xyras, xyval[, -1])
colnames(occ)[1:3] <- c(species, longitude, latitude)
back <- raster::rasterToPoints(raster.layers)
set.seed(set.seed)
if (nrow(back) > sample.size) {back <- back[sample(nrow(back), sample.size), ]}
back <- data.frame(background = "background", back)
names(back)[1:3] <- c("background", longitude, latitude)
octi <- which(!paste(occ[, longitude], occ[, latitude]) %in%
paste(back[, longitude], back[, latitude]))
if (length(octi) > 0) {
octid <- occ[octi, ]
bnames <- c("background", longitude, latitude)
names(octid)[1:3] <- bnames
octid$background <- "background"
back <- rbind(octid, back)
}
back <- na.omit(back)
occ <- kuenm_occsplit(occ, train.proportion, data.split.method, save, name.occ)
if (save == TRUE) {dir.create(back.folder)}
if (!is.null(var.sets)) {
if (class(var.sets)[1] %in% c("character", "list")) {
if (class(var.sets)[1] == "character") {
if (var.sets == "all_comb") {
if (min.number == 1) {
message("Minimum number of variables in background sets is 1, do not use product features.")
}
var_names <- colnames(back)[-(1:3)]
var.sets <- all_var_comb(var_names, min.number)
} else {
warning("Argument 'var.sets' is not valid returning one set of background variables.")
}
} else {
ls <- sapply(var.sets, length)
if (any(ls == 1)) {
message("Minimum number of variables in background sets is 1, do not use product features.")
}
names(var.sets) <- paste0("Set_", 1:length(var.sets))
}
} else {
warning("Argument 'var.sets' is not valid returning one set of background variables.")
}
if (save == TRUE) {
nambs <- names(var.sets)
sv <- sapply(nambs, function(x) {
nms <- c(bnames, var.sets[[x]])
write.csv(back[, nms], file = paste0(back.folder, "/", x, ".csv"),
row.names = FALSE)
})
}
} else {
var.sets <- list(Set_1 = colnames(back)[-(1:3)])
if (save == TRUE) {
write.csv(back, file = paste0(back.folder, "/Set_1.csv"), row.names = FALSE)
}
}
occ$background <- back
occ$sets <- var.sets
return(occ)
}
#' Helper to create all variable combinations
#' @param var.names (character) vector of variable names
#' @param min.number (numeric) minimum number of variables per set.
#' @export
#' @return A list of vectors containing variable names per set.
all_var_comb <- function(var.names, min.number = 2) {
var_comb <- lapply(min.number:length(var.names), function(x) {
comb <- combn(var.names, m = x)
comb_vs <- lapply(1:dim(comb)[2], function(y) {comb[, y]})
})
var_combs <- do.call(c, var_comb)
names(var_combs) <- paste0("Set_", 1:length(var_combs))
return(var_combs)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biomarkers.R
\name{filter_eitems_by_site}
\alias{filter_eitems_by_site}
\title{Function that filters clinical evidence items by tumor type/primary site}
\usage{
filter_eitems_by_site(eitems = NULL, ontology = NULL, primary_site = "")
}
\arguments{
\item{eitems}{data frame with clinical evidence items}
\item{ontology}{phenotype ontology data frame}
\item{primary_site}{primary tumor site}
}
\description{
Function that filters clinical evidence items by tumor type/primary site
}
|
/pcgrr/man/filter_eitems_by_site.Rd
|
permissive
|
sigven/pcgr
|
R
| false
| true
| 561
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biomarkers.R
\name{filter_eitems_by_site}
\alias{filter_eitems_by_site}
\title{Function that filters clinical evidence items by tumor type/primary site}
\usage{
filter_eitems_by_site(eitems = NULL, ontology = NULL, primary_site = "")
}
\arguments{
\item{eitems}{data frame with clinical evidence items}
\item{ontology}{phenotype ontology data frame}
\item{primary_site}{primary tumor site}
}
\description{
Function that filters clinical evidence items by tumor type/primary site
}
|
options(shiny.maxRequestSize = 500*1024^2)
|
/RegionalOverviews/overviews_shiny/server/general_settings.R
|
no_license
|
LauraDiernaes/RCGs
|
R
| false
| false
| 42
|
r
|
options(shiny.maxRequestSize = 500*1024^2)
|
# Rscript top_scoring_ratioDown.R
# source("../Cancer_HiC_data_TAD_DA/utils_fct.R")
# 3 => done in Rscript coexpr_DE_queryTAD.R
# chekcer que withinCoexpr mm chose que meanTADcorr !!! [manque pval comb!!!]
script_name <- "top_scoring_ratioDown.R"
cat("> Start ", script_name, "\n")
startTime <- Sys.time()
require(foreach)
require(doMC)
source("../Cancer_HiC_data_TAD_DA/utils_fct.R")
registerDoMC(40)
plotType <- "png"
myHeight <- ifelse(plotType=="png", 400, 7)
myWidth <- myHeight
plotCex <- 1.4
myCexAxis <- 1.2
myCexLab <- 1.2
outFolder <- "TOP_SCORING_RATIODOWN"
dir.create(outFolder)
dataFolder <- "COEXPR_BETWEEN_WITHIN_ALL"
pipOutFolder <- file.path("PIPELINE", "OUTPUT_FOLDER")
all_pvalComb_files <- list.files(pipOutFolder, recursive = TRUE, pattern="emp_pval_combined.Rdata", full.names = FALSE)
stopifnot(length(all_pvalComb_files) > 0)
all_fc_files <- list.files(pipOutFolder, recursive = TRUE, pattern="all_meanLogFC_TAD.Rdata", full.names = FALSE)
stopifnot(length(all_fc_files) > 0)
all_meanCorr_files <- list.files(pipOutFolder, recursive = TRUE, pattern="all_meanCorr_TAD.Rdata", full.names = FALSE)
stopifnot(length(all_meanCorr_files) > 0)
#
# dataFile <- file.path(dataFolder, "allData_within_between_coexpr.Rdata")
# stopifnot(file.exists(dataFile))
# allData_within_between_coexpr <- eval(parse(text = load(dataFile)))
all_ratioDown_files <- list.files(pipOutFolder, recursive = TRUE, pattern="all_obs_ratioDown.Rdata", full.names = FALSE)
stopifnot(length(all_ratioDown_files) > 0)
### BUILD THE LOGFC TABLE
fc_file = all_fc_files[1]
fc_DT <- foreach(fc_file = all_fc_files, .combine = 'rbind') %dopar% {
curr_file <- file.path(pipOutFolder, fc_file)
stopifnot(file.exists(curr_file))
tad_fc <- eval(parse(text = load(curr_file)))
dataset <- dirname(dirname(fc_file))
data.frame(
dataset = dataset,
region = names(tad_fc),
meanFC = as.numeric(tad_fc),
stringsAsFactors = FALSE
)
}
### BUILD THE MEANCORR TABLE
meanCorr_file = all_meanCorr_files[1]
meanCorr_DT <- foreach(meanCorr_file = all_meanCorr_files, .combine = 'rbind') %dopar% {
curr_file <- file.path(pipOutFolder, meanCorr_file)
stopifnot(file.exists(curr_file))
tad_meanCorr <- eval(parse(text = load(curr_file)))
dataset <- dirname(dirname(meanCorr_file))
data.frame(
dataset = dataset,
region = names(tad_meanCorr),
meanCorr = as.numeric(tad_meanCorr),
stringsAsFactors = FALSE
)
}
### BUILD THE ratio down TABLE
rd_file = all_ratioDown_files[1]
rD_DT <- foreach(rd_file = all_ratioDown_files, .combine = 'rbind') %dopar% {
curr_file <- file.path(pipOutFolder,rd_file)
stopifnot(file.exists(curr_file))
tad_rd <- eval(parse(text = load(curr_file)))
dataset <- dirname(dirname(rd_file))
data.frame(
dataset = dataset,
region = names(tad_rd),
ratioDown = as.numeric(tad_rd),
stringsAsFactors = FALSE
)
}
### BUILD THE PVAL COMBINED TABLE
pvalcomb_file = all_pvalComb_files[1]
pvalComb_DT <- foreach(pvalcomb_file = all_pvalComb_files, .combine = 'rbind') %dopar% {
curr_file <- file.path(pipOutFolder,pvalcomb_file)
stopifnot(file.exists(curr_file))
tad_pvalComb <- eval(parse(text = load(curr_file)))
dataset <- dirname(dirname(pvalcomb_file))
adj_pvalComb <- p.adjust(tad_pvalComb, method="BH")
stopifnot(setequal(names(tad_pvalComb), names(adj_pvalComb)))
data.frame(
dataset = dataset,
region = names(tad_pvalComb),
pvalComb = as.numeric(tad_pvalComb),
adj_pvalComb = as.numeric(adj_pvalComb[names(tad_pvalComb)]),
stringsAsFactors = FALSE
)
}
all_DT <- merge(fc_DT, meanCorr_DT, by =c("dataset", "region"), all = TRUE)
stopifnot(nrow(fc_DT) == nrow(meanCorr_DT))
stopifnot(nrow(fc_DT) == nrow(all_DT))
stopifnot(!is.na(all_DT))
all_DT <- merge(all_DT, pvalComb_DT, by =c("dataset", "region"), all = TRUE)
stopifnot(nrow(all_DT) == nrow(pvalComb_DT))
stopifnot(!is.na(all_DT))
all_DT <- merge(all_DT, rD_DT, by =c("dataset", "region"), all = TRUE)
stopifnot(nrow(all_DT) == nrow(rD_DT))
stopifnot(!is.na(all_DT))
# sort the TADs by decreasing withinCoexpr
# plot level of coexpr within and between on the same plot
# tad_coexpr_DT <- data.frame(
# dataset = as.character(unlist(lapply(1:length(allData_within_between_coexpr), function(i) {
# ds_name <- names(allData_within_between_coexpr)[i]
# ds_name <- gsub("^CREATE_COEXPR_SORTNODUP/", "", ds_name)
# ds_name <- gsub("/pearson/coexprDT.Rdata$", "", ds_name)
# rep(ds_name, length(allData_within_between_coexpr[[i]]))
# }))),
# region = as.character(unlist(lapply(1:length(allData_within_between_coexpr), function(i) {
# names(allData_within_between_coexpr[[i]])
# }))),
#
# withinCoexpr = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["withinCoexpr"]])))),
# betweenAllCoexpr = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenAllCoexpr"]])))),
# betweenKbCoexpr = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenKbCoexpr"]])))),
# betweenNbrCoexpr = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenNbrCoexpr"]])))),
# withinCoexpr_cond1 = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["withinCoexpr_cond1"]])))),
# betweenAllCoexpr_cond1 = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenAllCoexpr_cond1"]])))),
# betweenKbCoexpr_cond1 = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenKbCoexpr_cond1"]])))),
# betweenNbrCoexpr_cond1 = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenNbrCoexpr_cond1"]])))),
# withinCoexpr_cond2 = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["withinCoexpr_cond2"]])))),
# betweenAllCoexpr_cond2 = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenAllCoexpr_cond2"]])))),
# betweenKbCoexpr_cond2 = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenKbCoexpr_cond2"]])))),
# betweenNbrCoexpr_cond2 = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenNbrCoexpr_cond2"]])))),
#
# stringsAsFactors = FALSE
# )
#
# stopifnot(nrow(tad_coexprDT) == nrow(all_DT))
# all_DT <- merge(all_DT, tad_coexpr_DT, by=c("dataset", "region"), all = TRUE)
# stopifnot(nrow(tad_coexprDT) == nrow(all_DT))
stopifnot(!is.na(all_DT))
outFile <- file.path(outFolder, "all_DT.Rdata")
save(all_DT, file = outFile)
cat("... written: ", outFile, "\n")
xvar <- "ratioDown"
all_DT$pvalComb_log10 <- -log10(all_DT$pvalComb)
all_DT$adj_pvalComb_log10 <- -log10(all_DT$adj_pvalComb)
all_DT$meanCorr_rank <- rank(- all_DT$meanCorr, ties = "min")
all_DT$meanFC_rank <- rank(- abs(all_DT$meanFC), ties = "min")
all_DT$avgRank <- 0.5*(all_DT$meanFC_rank + all_DT$meanCorr_rank)
all_DT <- do.call(rbind, by(all_DT, all_DT$dataset, function(subDT) {
subDT$meanCorr_dsRank <- rank(- subDT$meanCorr, ties = "min")
subDT$meanFC_dsRank <- rank(- subDT$meanFC, ties = "min")
subDT$avgRank_ds <- 0.5*(subDT$meanCorr_dsRank + subDT$meanFC_dsRank)
subDT
}))
# all_y <- c("withinCoexpr", "meanFC", "meanCorr", "pvalComb", "adj_pvalComb")
all_y <- c("meanFC", "meanCorr",
"pvalComb", "adj_pvalComb",
"pvalComb_log10", "adj_pvalComb_log10",
"meanCorr_rank", "meanFC_rank", "avgRank",
"meanCorr_dsRank", "meanFC_dsRank", "avgRank_ds")
for(yvar in all_y) {
myx <- all_DT[, paste0(xvar)]
stopifnot(length(myx) > 0)
myy <- all_DT[, paste0(yvar)]
stopifnot(length(myy) > 0)
outFile <- file.path(outFolder, paste0(yvar, "_vs_", xvar, "_densplot.", plotType ))
do.call(plotType, list(outFile, height=myHeight, width=myWidth))
densplot(
y = myy,
x = myx,
cex.lab=myCexLab,
cex.axis=myCexAxis,
ylab=paste0(yvar),
xlab = paste0(xvar),
cex = 0.5,
main = paste0(yvar, " vs. ", xvar)
)
addCorr(x = myx, y = myy, bty="n")
foo <- dev.off()
cat("... written: ", outFile, "\n")
}
# ######################################################################################
# ######################################################################################
# ######################################################################################
cat(paste0("*** DONE: ", script_name, "\n"))
cat(paste0(startTime, "\n", Sys.time(), "\n"))
|
/top_scoring_ratioDown.R
|
no_license
|
marzuf/CORRECT_Yuanlong_Cancer_HiC_data_TAD_DA
|
R
| false
| false
| 9,730
|
r
|
# Rscript top_scoring_ratioDown.R
# source("../Cancer_HiC_data_TAD_DA/utils_fct.R")
# 3 => done in Rscript coexpr_DE_queryTAD.R
# chekcer que withinCoexpr mm chose que meanTADcorr !!! [manque pval comb!!!]
script_name <- "top_scoring_ratioDown.R"
cat("> Start ", script_name, "\n")
startTime <- Sys.time()
require(foreach)
require(doMC)
source("../Cancer_HiC_data_TAD_DA/utils_fct.R")
registerDoMC(40)
plotType <- "png"
myHeight <- ifelse(plotType=="png", 400, 7)
myWidth <- myHeight
plotCex <- 1.4
myCexAxis <- 1.2
myCexLab <- 1.2
outFolder <- "TOP_SCORING_RATIODOWN"
dir.create(outFolder)
dataFolder <- "COEXPR_BETWEEN_WITHIN_ALL"
pipOutFolder <- file.path("PIPELINE", "OUTPUT_FOLDER")
all_pvalComb_files <- list.files(pipOutFolder, recursive = TRUE, pattern="emp_pval_combined.Rdata", full.names = FALSE)
stopifnot(length(all_pvalComb_files) > 0)
all_fc_files <- list.files(pipOutFolder, recursive = TRUE, pattern="all_meanLogFC_TAD.Rdata", full.names = FALSE)
stopifnot(length(all_fc_files) > 0)
all_meanCorr_files <- list.files(pipOutFolder, recursive = TRUE, pattern="all_meanCorr_TAD.Rdata", full.names = FALSE)
stopifnot(length(all_meanCorr_files) > 0)
#
# dataFile <- file.path(dataFolder, "allData_within_between_coexpr.Rdata")
# stopifnot(file.exists(dataFile))
# allData_within_between_coexpr <- eval(parse(text = load(dataFile)))
all_ratioDown_files <- list.files(pipOutFolder, recursive = TRUE, pattern="all_obs_ratioDown.Rdata", full.names = FALSE)
stopifnot(length(all_ratioDown_files) > 0)
### BUILD THE LOGFC TABLE
fc_file = all_fc_files[1]
fc_DT <- foreach(fc_file = all_fc_files, .combine = 'rbind') %dopar% {
curr_file <- file.path(pipOutFolder, fc_file)
stopifnot(file.exists(curr_file))
tad_fc <- eval(parse(text = load(curr_file)))
dataset <- dirname(dirname(fc_file))
data.frame(
dataset = dataset,
region = names(tad_fc),
meanFC = as.numeric(tad_fc),
stringsAsFactors = FALSE
)
}
### BUILD THE MEANCORR TABLE
meanCorr_file = all_meanCorr_files[1]
meanCorr_DT <- foreach(meanCorr_file = all_meanCorr_files, .combine = 'rbind') %dopar% {
curr_file <- file.path(pipOutFolder, meanCorr_file)
stopifnot(file.exists(curr_file))
tad_meanCorr <- eval(parse(text = load(curr_file)))
dataset <- dirname(dirname(meanCorr_file))
data.frame(
dataset = dataset,
region = names(tad_meanCorr),
meanCorr = as.numeric(tad_meanCorr),
stringsAsFactors = FALSE
)
}
### BUILD THE ratio down TABLE
rd_file = all_ratioDown_files[1]
rD_DT <- foreach(rd_file = all_ratioDown_files, .combine = 'rbind') %dopar% {
curr_file <- file.path(pipOutFolder,rd_file)
stopifnot(file.exists(curr_file))
tad_rd <- eval(parse(text = load(curr_file)))
dataset <- dirname(dirname(rd_file))
data.frame(
dataset = dataset,
region = names(tad_rd),
ratioDown = as.numeric(tad_rd),
stringsAsFactors = FALSE
)
}
### BUILD THE PVAL COMBINED TABLE
pvalcomb_file = all_pvalComb_files[1]
pvalComb_DT <- foreach(pvalcomb_file = all_pvalComb_files, .combine = 'rbind') %dopar% {
curr_file <- file.path(pipOutFolder,pvalcomb_file)
stopifnot(file.exists(curr_file))
tad_pvalComb <- eval(parse(text = load(curr_file)))
dataset <- dirname(dirname(pvalcomb_file))
adj_pvalComb <- p.adjust(tad_pvalComb, method="BH")
stopifnot(setequal(names(tad_pvalComb), names(adj_pvalComb)))
data.frame(
dataset = dataset,
region = names(tad_pvalComb),
pvalComb = as.numeric(tad_pvalComb),
adj_pvalComb = as.numeric(adj_pvalComb[names(tad_pvalComb)]),
stringsAsFactors = FALSE
)
}
all_DT <- merge(fc_DT, meanCorr_DT, by =c("dataset", "region"), all = TRUE)
stopifnot(nrow(fc_DT) == nrow(meanCorr_DT))
stopifnot(nrow(fc_DT) == nrow(all_DT))
stopifnot(!is.na(all_DT))
all_DT <- merge(all_DT, pvalComb_DT, by =c("dataset", "region"), all = TRUE)
stopifnot(nrow(all_DT) == nrow(pvalComb_DT))
stopifnot(!is.na(all_DT))
all_DT <- merge(all_DT, rD_DT, by =c("dataset", "region"), all = TRUE)
stopifnot(nrow(all_DT) == nrow(rD_DT))
stopifnot(!is.na(all_DT))
# sort the TADs by decreasing withinCoexpr
# plot level of coexpr within and between on the same plot
# tad_coexpr_DT <- data.frame(
# dataset = as.character(unlist(lapply(1:length(allData_within_between_coexpr), function(i) {
# ds_name <- names(allData_within_between_coexpr)[i]
# ds_name <- gsub("^CREATE_COEXPR_SORTNODUP/", "", ds_name)
# ds_name <- gsub("/pearson/coexprDT.Rdata$", "", ds_name)
# rep(ds_name, length(allData_within_between_coexpr[[i]]))
# }))),
# region = as.character(unlist(lapply(1:length(allData_within_between_coexpr), function(i) {
# names(allData_within_between_coexpr[[i]])
# }))),
#
# withinCoexpr = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["withinCoexpr"]])))),
# betweenAllCoexpr = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenAllCoexpr"]])))),
# betweenKbCoexpr = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenKbCoexpr"]])))),
# betweenNbrCoexpr = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenNbrCoexpr"]])))),
# withinCoexpr_cond1 = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["withinCoexpr_cond1"]])))),
# betweenAllCoexpr_cond1 = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenAllCoexpr_cond1"]])))),
# betweenKbCoexpr_cond1 = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenKbCoexpr_cond1"]])))),
# betweenNbrCoexpr_cond1 = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenNbrCoexpr_cond1"]])))),
# withinCoexpr_cond2 = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["withinCoexpr_cond2"]])))),
# betweenAllCoexpr_cond2 = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenAllCoexpr_cond2"]])))),
# betweenKbCoexpr_cond2 = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenKbCoexpr_cond2"]])))),
# betweenNbrCoexpr_cond2 = as.numeric(unlist(lapply(allData_within_between_coexpr,
# function(sublist) lapply(sublist, function(x) x[["betweenNbrCoexpr_cond2"]])))),
#
# stringsAsFactors = FALSE
# )
#
# stopifnot(nrow(tad_coexprDT) == nrow(all_DT))
# all_DT <- merge(all_DT, tad_coexpr_DT, by=c("dataset", "region"), all = TRUE)
# stopifnot(nrow(tad_coexprDT) == nrow(all_DT))
stopifnot(!is.na(all_DT))
outFile <- file.path(outFolder, "all_DT.Rdata")
save(all_DT, file = outFile)
cat("... written: ", outFile, "\n")
xvar <- "ratioDown"
all_DT$pvalComb_log10 <- -log10(all_DT$pvalComb)
all_DT$adj_pvalComb_log10 <- -log10(all_DT$adj_pvalComb)
all_DT$meanCorr_rank <- rank(- all_DT$meanCorr, ties = "min")
all_DT$meanFC_rank <- rank(- abs(all_DT$meanFC), ties = "min")
all_DT$avgRank <- 0.5*(all_DT$meanFC_rank + all_DT$meanCorr_rank)
all_DT <- do.call(rbind, by(all_DT, all_DT$dataset, function(subDT) {
subDT$meanCorr_dsRank <- rank(- subDT$meanCorr, ties = "min")
subDT$meanFC_dsRank <- rank(- subDT$meanFC, ties = "min")
subDT$avgRank_ds <- 0.5*(subDT$meanCorr_dsRank + subDT$meanFC_dsRank)
subDT
}))
# all_y <- c("withinCoexpr", "meanFC", "meanCorr", "pvalComb", "adj_pvalComb")
all_y <- c("meanFC", "meanCorr",
"pvalComb", "adj_pvalComb",
"pvalComb_log10", "adj_pvalComb_log10",
"meanCorr_rank", "meanFC_rank", "avgRank",
"meanCorr_dsRank", "meanFC_dsRank", "avgRank_ds")
for(yvar in all_y) {
myx <- all_DT[, paste0(xvar)]
stopifnot(length(myx) > 0)
myy <- all_DT[, paste0(yvar)]
stopifnot(length(myy) > 0)
outFile <- file.path(outFolder, paste0(yvar, "_vs_", xvar, "_densplot.", plotType ))
do.call(plotType, list(outFile, height=myHeight, width=myWidth))
densplot(
y = myy,
x = myx,
cex.lab=myCexLab,
cex.axis=myCexAxis,
ylab=paste0(yvar),
xlab = paste0(xvar),
cex = 0.5,
main = paste0(yvar, " vs. ", xvar)
)
addCorr(x = myx, y = myy, bty="n")
foo <- dev.off()
cat("... written: ", outFile, "\n")
}
# ######################################################################################
# ######################################################################################
# ######################################################################################
cat(paste0("*** DONE: ", script_name, "\n"))
cat(paste0(startTime, "\n", Sys.time(), "\n"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudmonitoring_objects.R
\name{Timeseries}
\alias{Timeseries}
\title{Timeseries Object}
\usage{
Timeseries(points = NULL, timeseriesDesc = NULL)
}
\arguments{
\item{points}{The data points of this time series}
\item{timeseriesDesc}{The descriptor of this time series}
}
\value{
Timeseries object
}
\description{
Timeseries Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The monitoring data is organized as metrics and stored as data points that are recorded over time. Each data point represents information like the CPU utilization of your virtual machine. A historical record of these data points is called a time series.
}
|
/googlecloudmonitoringv2beta2.auto/man/Timeseries.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false
| true
| 747
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudmonitoring_objects.R
\name{Timeseries}
\alias{Timeseries}
\title{Timeseries Object}
\usage{
Timeseries(points = NULL, timeseriesDesc = NULL)
}
\arguments{
\item{points}{The data points of this time series}
\item{timeseriesDesc}{The descriptor of this time series}
}
\value{
Timeseries object
}
\description{
Timeseries Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
The monitoring data is organized as metrics and stored as data points that are recorded over time. Each data point represents information like the CPU utilization of your virtual machine. A historical record of these data points is called a time series.
}
|
setwd("C:/Users/Brad/Documents/coursera data science/Getting and Cleaning Data/Week_4")
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip")
# Unzip to ./data directory
unzip(zipfile="./data/Dataset.zip",exdir="./data")
# Read in tables:
x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
features <- read.table("./data/UCI HAR Dataset/features.txt")
activity_labels <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
#Assign Column Names
colnames(x_train) <- features[,2]
colnames(y_train) <- "activityId"
colnames(subject_train) <- "subjectId"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activityId"
colnames(subject_test) <- "subjectId"
colnames(activity_labels) <-c("activityId", "activityType")
# Merging datasets
merge_train <- cbind(subject_train,x_train,y_train)
merge_test <- cbind(subject_test,x_test,y_test)
setAllInOne <- rbind(merge_train,merge_test)
# Create vetor of Column Names for reference
colNames <- colnames(setAllInOne)
# add mean and stdev
mean_and_std <- (grepl("activityId" , colNames) |
grepl("subjectId" , colNames) |
grepl("mean.." , colNames) |
grepl("std.." , colNames)
)
setForMeanAndStd <- setAllInOne[ , mean_and_std == TRUE]
# create dataset with descriptive activity names
setWithActivityNames <- merge(setForMeanAndStd, activity_labels,by = 'activityId',all.x=TRUE)
# create a second dataset with the average of each variable for each activity and each subject
secTidySet <- aggregate(. ~subjectId + activityId, setWithActivityNames,mean)
secTidySet <- secTidySet[order(secTidySet$subjectId, secTidySet$activityId),]
write.table(secTidySet, "secTidySet.txt", row.name=FALSE)
write.csv(secTidySet, "secTidySet.csv")
|
/gettingandcleaningdata/week_4/run_analysis.R
|
no_license
|
bjcarter77/datasciencecoursera
|
R
| false
| false
| 2,252
|
r
|
setwd("C:/Users/Brad/Documents/coursera data science/Getting and Cleaning Data/Week_4")
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip")
# Unzip to ./data directory
unzip(zipfile="./data/Dataset.zip",exdir="./data")
# Read in tables:
x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
features <- read.table("./data/UCI HAR Dataset/features.txt")
activity_labels <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
#Assign Column Names
colnames(x_train) <- features[,2]
colnames(y_train) <- "activityId"
colnames(subject_train) <- "subjectId"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activityId"
colnames(subject_test) <- "subjectId"
colnames(activity_labels) <-c("activityId", "activityType")
# Merging datasets
merge_train <- cbind(subject_train,x_train,y_train)
merge_test <- cbind(subject_test,x_test,y_test)
setAllInOne <- rbind(merge_train,merge_test)
# Create vetor of Column Names for reference
colNames <- colnames(setAllInOne)
# add mean and stdev
mean_and_std <- (grepl("activityId" , colNames) |
grepl("subjectId" , colNames) |
grepl("mean.." , colNames) |
grepl("std.." , colNames)
)
setForMeanAndStd <- setAllInOne[ , mean_and_std == TRUE]
# create dataset with descriptive activity names
setWithActivityNames <- merge(setForMeanAndStd, activity_labels,by = 'activityId',all.x=TRUE)
# create a second dataset with the average of each variable for each activity and each subject
secTidySet <- aggregate(. ~subjectId + activityId, setWithActivityNames,mean)
secTidySet <- secTidySet[order(secTidySet$subjectId, secTidySet$activityId),]
write.table(secTidySet, "secTidySet.txt", row.name=FALSE)
write.csv(secTidySet, "secTidySet.csv")
|
#############################################################################
#
# XLConnect
# Copyright (C) 2010-2017 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Restoring objects from an Excel file (that was created via xlcDump)
#
# Author: Martin Studer, Mirai Solutions GmbH
#
#############################################################################
xlcRestore <- function(file = "dump.xlsx", pos = -1, overwrite = FALSE) {
wb = loadWorkbook(file, create = FALSE)
sheets = setdiff(getSheets(wb), getOption("XLConnect.Sheet"))
sapply(sheets, function(obj) {
if(exists(obj, where = pos) && !overwrite)
return(FALSE)
tryCatch({
data = readWorksheet(wb, sheet = obj, rownames = getOption("XLConnect.RownameCol"))
assign(obj, data, pos = pos)
TRUE
}, error = function(e) {
warning("Object '", obj, "' has not been restored. Reason: ", conditionMessage(e), call. = FALSE)
FALSE
})
})
}
|
/R/xlcRestore.R
|
no_license
|
GSuvorov/xlconnect
|
R
| false
| false
| 1,740
|
r
|
#############################################################################
#
# XLConnect
# Copyright (C) 2010-2017 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Restoring objects from an Excel file (that was created via xlcDump)
#
# Author: Martin Studer, Mirai Solutions GmbH
#
#############################################################################
xlcRestore <- function(file = "dump.xlsx", pos = -1, overwrite = FALSE) {
wb = loadWorkbook(file, create = FALSE)
sheets = setdiff(getSheets(wb), getOption("XLConnect.Sheet"))
sapply(sheets, function(obj) {
if(exists(obj, where = pos) && !overwrite)
return(FALSE)
tryCatch({
data = readWorksheet(wb, sheet = obj, rownames = getOption("XLConnect.RownameCol"))
assign(obj, data, pos = pos)
TRUE
}, error = function(e) {
warning("Object '", obj, "' has not been restored. Reason: ", conditionMessage(e), call. = FALSE)
FALSE
})
})
}
|
# Created 2015-12-19
# Uses wdt.export.csv and wdt.anaes.csv
library(Hmisc)
library(ggplot2)
library(lubridate)
library(data.table)
library(gmodels)
library(plotly)
# Set-up plotly
Sys.setenv("plotly_username"="drstevok")
Sys.setenv("plotly_api_key"="9zzlo2rb2q")
rm(list=ls(all=TRUE))
load("../data/anaesthesia.RData")
str(tdt.a)
tdt.a[,anaesthetic.location := ifelse(indication.theatre,"Theatre","Labour ward")]
# Plot anaesthetic interventions over time
# - by location
d <- tdt.a[,.(N=.N),by=.(anaesthetic.location,
date=round_date(anaesthetic.date, unit="month"))]
g <- ggplot(data=d, aes(y=N,x=date))
g + geom_smooth() +
facet_grid(.~anaesthetic.location) +
labs(x="Year", title="Anaesthetics by location")
ggsave("../figs/anaesthesia.by.location.png")
# - by type
d <- tdt.a[anaesthetic!="Other",.(N=.N),by=.(anaesthetic,
date=round_date(anaesthetic.date, unit="month"))]
g <- ggplot(data=d, aes(y=N,x=date))
g + geom_smooth() +
facet_grid(.~anaesthetic) +
labs(x="Year", title="Anaesthetics by type")
ggsave("../figs/anaesthesia.by.type.png", width=8, height=4, scale=2)
# Now plot theatre workload stratified by time of day
load("../data/theatre.RData")
str(tdt.t)
tdt.t[, shift:=
ifelse(theatre.hour %in% c(8:16), "Day",
ifelse(theatre.hour %in% c(17:19), "Evening",
"Night" ))]
describe(tdt.t$shift)
d <- tdt.t[,.(N=.N),by=.(shift,
date=round_date(theatre.date, unit="month"))]
g <- ggplot(data=d, aes(y=N,x=date))
g + geom_smooth() +
facet_grid(.~shift) +
labs(x="Year", y="Cases (per month)", title="Theatre work by shift")
ggsave("../figs/theatre.by.shift.png", width=8, height=6, scale=2)
# Now plot theatre workload stratified by time of day
# - additionally facet by day of the week
d <- tdt.t[,.(N=.N),by=.(dow=wday(theatre.date), shift,
date=round_date(theatre.date, unit="month"))]
str(d)
g <- ggplot(data=d, aes(y=N,x=date))
g + geom_smooth() +
facet_grid(shift~dow) +
labs(x="Year", y="Cases (per month)", title="Theatre work by shift")
ggsave("../figs/theatre.by.dow.shift.png", width=8, height=6, scale=2)
stop()
# ==============
# = Quick play =
# ==============
# Plot c-section rate by time of day over time
# Plot mean age of primips over time
str(wdt)
require(ggplot2)
require(lubridate)
g <- ggplot(data=wdt[primip==1],
aes(y=age.mother, x=round_date(dob, "month")))
g + geom_smooth() +
labs(x="Year", y="Age", title="Primips (Mean age)") +
coord_cartesian(ylim=c(25,35))
quantile(wdt$bmi, c(0.4), na.rm=TRUE)
fivenum(wdt$bmi)
g <- ggplot(data=wdt,
aes(y=quantile(bmi,c(0.9),na.rm=TRUE), x=round_date(dob, "month")))
g + geom_smooth() +
labs(x="Year", y="BMI", title="BMI") +
coord_cartesian(ylim=c(20,40))
|
/labbooks/labbook_151218.R
|
no_license
|
docsteveharris/collab-obs
|
R
| false
| false
| 2,676
|
r
|
# Created 2015-12-19
# Uses wdt.export.csv and wdt.anaes.csv
library(Hmisc)
library(ggplot2)
library(lubridate)
library(data.table)
library(gmodels)
library(plotly)
# Set-up plotly
Sys.setenv("plotly_username"="drstevok")
Sys.setenv("plotly_api_key"="9zzlo2rb2q")
rm(list=ls(all=TRUE))
load("../data/anaesthesia.RData")
str(tdt.a)
tdt.a[,anaesthetic.location := ifelse(indication.theatre,"Theatre","Labour ward")]
# Plot anaesthetic interventions over time
# - by location
d <- tdt.a[,.(N=.N),by=.(anaesthetic.location,
date=round_date(anaesthetic.date, unit="month"))]
g <- ggplot(data=d, aes(y=N,x=date))
g + geom_smooth() +
facet_grid(.~anaesthetic.location) +
labs(x="Year", title="Anaesthetics by location")
ggsave("../figs/anaesthesia.by.location.png")
# - by type
d <- tdt.a[anaesthetic!="Other",.(N=.N),by=.(anaesthetic,
date=round_date(anaesthetic.date, unit="month"))]
g <- ggplot(data=d, aes(y=N,x=date))
g + geom_smooth() +
facet_grid(.~anaesthetic) +
labs(x="Year", title="Anaesthetics by type")
ggsave("../figs/anaesthesia.by.type.png", width=8, height=4, scale=2)
# Now plot theatre workload stratified by time of day
load("../data/theatre.RData")
str(tdt.t)
tdt.t[, shift:=
ifelse(theatre.hour %in% c(8:16), "Day",
ifelse(theatre.hour %in% c(17:19), "Evening",
"Night" ))]
describe(tdt.t$shift)
d <- tdt.t[,.(N=.N),by=.(shift,
date=round_date(theatre.date, unit="month"))]
g <- ggplot(data=d, aes(y=N,x=date))
g + geom_smooth() +
facet_grid(.~shift) +
labs(x="Year", y="Cases (per month)", title="Theatre work by shift")
ggsave("../figs/theatre.by.shift.png", width=8, height=6, scale=2)
# Now plot theatre workload stratified by time of day
# - additionally facet by day of the week
d <- tdt.t[,.(N=.N),by=.(dow=wday(theatre.date), shift,
date=round_date(theatre.date, unit="month"))]
str(d)
g <- ggplot(data=d, aes(y=N,x=date))
g + geom_smooth() +
facet_grid(shift~dow) +
labs(x="Year", y="Cases (per month)", title="Theatre work by shift")
ggsave("../figs/theatre.by.dow.shift.png", width=8, height=6, scale=2)
stop()
# ==============
# = Quick play =
# ==============
# Plot c-section rate by time of day over time
# Plot mean age of primips over time
str(wdt)
require(ggplot2)
require(lubridate)
g <- ggplot(data=wdt[primip==1],
aes(y=age.mother, x=round_date(dob, "month")))
g + geom_smooth() +
labs(x="Year", y="Age", title="Primips (Mean age)") +
coord_cartesian(ylim=c(25,35))
quantile(wdt$bmi, c(0.4), na.rm=TRUE)
fivenum(wdt$bmi)
g <- ggplot(data=wdt,
aes(y=quantile(bmi,c(0.9),na.rm=TRUE), x=round_date(dob, "month")))
g + geom_smooth() +
labs(x="Year", y="BMI", title="BMI") +
coord_cartesian(ylim=c(20,40))
|
x<-c(1,2,3,4,5,6)
y<-c(9,8,7,6,5,4)
print(x==y)
|
/q2.R
|
no_license
|
bishal145/AP_LAB6
|
R
| false
| false
| 51
|
r
|
x<-c(1,2,3,4,5,6)
y<-c(9,8,7,6,5,4)
print(x==y)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inheritance.R
\name{highlight_inheritance_100}
\alias{highlight_inheritance_100}
\title{Create a new column Inheritance that labels G4 Diachronic players
in the 100 labor minute experiment.}
\usage{
highlight_inheritance_100(frame)
}
\description{
Inheritance is "diachronic_inheritance" for the G4 Diachronic player,
and "no_inheritance" for all other session types.
}
|
/man/highlight_inheritance_100.Rd
|
no_license
|
pedmiston/totems-data
|
R
| false
| true
| 448
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inheritance.R
\name{highlight_inheritance_100}
\alias{highlight_inheritance_100}
\title{Create a new column Inheritance that labels G4 Diachronic players
in the 100 labor minute experiment.}
\usage{
highlight_inheritance_100(frame)
}
\description{
Inheritance is "diachronic_inheritance" for the G4 Diachronic player,
and "no_inheritance" for all other session types.
}
|
library(data.table)
library(dplyr)
vcf_SNPs <- fread("sim_RFMix/80_20_6/admixed.snps", header = F)
full_gen_map <- fread("admixture-simulation/chr22.interpolated_genetic_map", header = F)
colnames(vcf_SNPs) <- "SNP"
colnames(full_gen_map) <- c("SNP", "BP", "cM")
vcf_gen_map <- left_join(vcf_SNPs, full_gen_map, by = "SNP")
vcf_gen_map$SNP <- NULL
vcf_gen_map$BP <- NULL
vcf_gen_map[is.na(vcf_gen_map)] <- 0 #the first cM is 0 for some reason, so forcing
fwrite(vcf_gen_map, "sim_RFMix/admixed_chr22.pos", col.names = F)
|
/class_project_scripts/03b3_make_RFMix_genetic_map.R
|
no_license
|
WheelerLab/Local_Ancestry
|
R
| false
| false
| 534
|
r
|
library(data.table)
library(dplyr)
vcf_SNPs <- fread("sim_RFMix/80_20_6/admixed.snps", header = F)
full_gen_map <- fread("admixture-simulation/chr22.interpolated_genetic_map", header = F)
colnames(vcf_SNPs) <- "SNP"
colnames(full_gen_map) <- c("SNP", "BP", "cM")
vcf_gen_map <- left_join(vcf_SNPs, full_gen_map, by = "SNP")
vcf_gen_map$SNP <- NULL
vcf_gen_map$BP <- NULL
vcf_gen_map[is.na(vcf_gen_map)] <- 0 #the first cM is 0 for some reason, so forcing
fwrite(vcf_gen_map, "sim_RFMix/admixed_chr22.pos", col.names = F)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Main.R
\name{ks_stat}
\alias{ks_stat}
\title{ks_stat}
\usage{
ks_stat(actuals, predictedScores, returnKSTable = FALSE)
}
\arguments{
\item{actuals}{The actual binary flags for the response variable. It can take a numeric vector containing values of either 1 or 0, where 1 represents the 'Good' or 'Events' while 0 represents 'Bad' or 'Non-Events'.}
\item{predictedScores}{The prediction probability scores for each observation. If your classification model gives the 1/0 predcitions, convert it to a numeric vector of 1's and 0's.}
\item{returnKSTable}{If set to TRUE, returns the KS table used to calculate the KS statistic instead. Defaults to FALSE.}
}
\value{
The KS statistic for a given actual values of a binary response variable and the respective prediction probability scores.
}
\description{
Compute the Kolmogorov-Smirnov statistic
}
\details{
Compute the KS statistic for a given actuals and predicted scores for a binary response variable. KS statistic is calculated as the maximum difference between the cumulative true positive and cumulative false positive rate.
Set returnKSTable to TRUE to see the calculations from ks_table.
}
\examples{
data('ActualsAndScores')
ks_stat(actuals=ActualsAndScores$Actuals, predictedScores=ActualsAndScores$PredictedScores)
}
\author{
Selva Prabhakaran \email{selva86@gmail.com}
}
|
/man/ks_stat.Rd
|
no_license
|
KillEdision/InformationValue
|
R
| false
| false
| 1,421
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Main.R
\name{ks_stat}
\alias{ks_stat}
\title{ks_stat}
\usage{
ks_stat(actuals, predictedScores, returnKSTable = FALSE)
}
\arguments{
\item{actuals}{The actual binary flags for the response variable. It can take a numeric vector containing values of either 1 or 0, where 1 represents the 'Good' or 'Events' while 0 represents 'Bad' or 'Non-Events'.}
\item{predictedScores}{The prediction probability scores for each observation. If your classification model gives the 1/0 predcitions, convert it to a numeric vector of 1's and 0's.}
\item{returnKSTable}{If set to TRUE, returns the KS table used to calculate the KS statistic instead. Defaults to FALSE.}
}
\value{
The KS statistic for a given actual values of a binary response variable and the respective prediction probability scores.
}
\description{
Compute the Kolmogorov-Smirnov statistic
}
\details{
Compute the KS statistic for a given actuals and predicted scores for a binary response variable. KS statistic is calculated as the maximum difference between the cumulative true positive and cumulative false positive rate.
Set returnKSTable to TRUE to see the calculations from ks_table.
}
\examples{
data('ActualsAndScores')
ks_stat(actuals=ActualsAndScores$Actuals, predictedScores=ActualsAndScores$PredictedScores)
}
\author{
Selva Prabhakaran \email{selva86@gmail.com}
}
|
testlist <- list(rates = numeric(0), thresholds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = NaN)
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610125463-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 187
|
r
|
testlist <- list(rates = numeric(0), thresholds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = NaN)
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/renderer.R
\name{renderer_graphviz}
\alias{renderer_graphviz}
\title{Render as a plain graph}
\usage{
renderer_graphviz(
svg_fit = TRUE,
svg_contain = FALSE,
svg_resize_fit = TRUE,
zoom_controls = TRUE,
zoom_initial = NULL
)
}
\arguments{
\item{svg_fit}{Whether to scale the process map to fully fit in its container. If set to `TRUE` the process map will be scaled to be fully visible and may appear very small.}
\item{svg_contain}{Whether to scale the process map to use all available space (contain) from its container. If set to `FALSE`, if `svg_fit` is set this takes precedence.}
\item{svg_resize_fit}{Whether to (re)-fit the process map to its container upon resize.}
\item{zoom_controls}{Whether to show zoom controls.}
\item{zoom_initial}{The initial zoom level to use.}
}
\value{
A rendering function to be used with \code{\link{animate_process}}
}
\description{
This renderer uses viz.js to render the process map using the DOT layout.
}
\examples{
data(example_log)
# Animate the process with the default GraphViz DOT renderer
animate_process(example_log, renderer = renderer_graphviz())
}
\seealso{
animate_process
}
|
/man/renderer_graphviz.Rd
|
permissive
|
bupaverse/processanimateR
|
R
| false
| true
| 1,224
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/renderer.R
\name{renderer_graphviz}
\alias{renderer_graphviz}
\title{Render as a plain graph}
\usage{
renderer_graphviz(
svg_fit = TRUE,
svg_contain = FALSE,
svg_resize_fit = TRUE,
zoom_controls = TRUE,
zoom_initial = NULL
)
}
\arguments{
\item{svg_fit}{Whether to scale the process map to fully fit in its container. If set to `TRUE` the process map will be scaled to be fully visible and may appear very small.}
\item{svg_contain}{Whether to scale the process map to use all available space (contain) from its container. If set to `FALSE`, if `svg_fit` is set this takes precedence.}
\item{svg_resize_fit}{Whether to (re)-fit the process map to its container upon resize.}
\item{zoom_controls}{Whether to show zoom controls.}
\item{zoom_initial}{The initial zoom level to use.}
}
\value{
A rendering function to be used with \code{\link{animate_process}}
}
\description{
This renderer uses viz.js to render the process map using the DOT layout.
}
\examples{
data(example_log)
# Animate the process with the default GraphViz DOT renderer
animate_process(example_log, renderer = renderer_graphviz())
}
\seealso{
animate_process
}
|
library(testthat)
library(parsnip)
library(dplyr)
library(rlang)
# ------------------------------------------------------------------------------
context("prediciton with failed models")
# ------------------------------------------------------------------------------
iris_bad <-
iris %>%
mutate(big_num = Inf)
data("lending_club")
lending_club <-
lending_club %>%
dplyr::slice(1:200) %>%
mutate(big_num = Inf)
lvl <- levels(lending_club$Class)
# ------------------------------------------------------------------------------
ctrl <- control_parsnip(catch = TRUE)
# ------------------------------------------------------------------------------
test_that('numeric model', {
lm_mod <-
linear_reg() %>%
set_engine("lm") %>%
fit(Sepal.Length ~ ., data = iris_bad, control = ctrl)
expect_warning(num_res <- predict(lm_mod, iris_bad[1:11, -1]))
expect_equal(num_res, NULL)
expect_warning(ci_res <- predict(lm_mod, iris_bad[1:11, -1], type = "conf_int"))
expect_equal(ci_res, NULL)
expect_warning(pi_res <- predict(lm_mod, iris_bad[1:11, -1], type = "pred_int"))
expect_equal(pi_res, NULL)
})
# ------------------------------------------------------------------------------
test_that('classification model', {
log_reg <-
logistic_reg() %>%
set_engine("glm") %>%
fit(Class ~ log(funded_amnt) + int_rate + big_num, data = lending_club, control = ctrl)
expect_warning(
cls_res <-
predict(log_reg, lending_club %>% dplyr::slice(1:7) %>% dplyr::select(-Class))
)
expect_equal(cls_res, NULL)
expect_warning(
prb_res <-
predict(log_reg, lending_club %>% dplyr::slice(1:7) %>% dplyr::select(-Class), type = "prob")
)
expect_equal(prb_res, NULL)
expect_warning(
ci_res <-
predict(log_reg, lending_club %>% dplyr::slice(1:7) %>% dplyr::select(-Class), type = "conf_int")
)
expect_equal(ci_res, NULL)
})
|
/tests/testthat/test_failed_models.R
|
no_license
|
bradthiessen/parsnip
|
R
| false
| false
| 1,909
|
r
|
library(testthat)
library(parsnip)
library(dplyr)
library(rlang)
# ------------------------------------------------------------------------------
context("prediciton with failed models")
# ------------------------------------------------------------------------------
iris_bad <-
iris %>%
mutate(big_num = Inf)
data("lending_club")
lending_club <-
lending_club %>%
dplyr::slice(1:200) %>%
mutate(big_num = Inf)
lvl <- levels(lending_club$Class)
# ------------------------------------------------------------------------------
ctrl <- control_parsnip(catch = TRUE)
# ------------------------------------------------------------------------------
test_that('numeric model', {
lm_mod <-
linear_reg() %>%
set_engine("lm") %>%
fit(Sepal.Length ~ ., data = iris_bad, control = ctrl)
expect_warning(num_res <- predict(lm_mod, iris_bad[1:11, -1]))
expect_equal(num_res, NULL)
expect_warning(ci_res <- predict(lm_mod, iris_bad[1:11, -1], type = "conf_int"))
expect_equal(ci_res, NULL)
expect_warning(pi_res <- predict(lm_mod, iris_bad[1:11, -1], type = "pred_int"))
expect_equal(pi_res, NULL)
})
# ------------------------------------------------------------------------------
test_that('classification model', {
log_reg <-
logistic_reg() %>%
set_engine("glm") %>%
fit(Class ~ log(funded_amnt) + int_rate + big_num, data = lending_club, control = ctrl)
expect_warning(
cls_res <-
predict(log_reg, lending_club %>% dplyr::slice(1:7) %>% dplyr::select(-Class))
)
expect_equal(cls_res, NULL)
expect_warning(
prb_res <-
predict(log_reg, lending_club %>% dplyr::slice(1:7) %>% dplyr::select(-Class), type = "prob")
)
expect_equal(prb_res, NULL)
expect_warning(
ci_res <-
predict(log_reg, lending_club %>% dplyr::slice(1:7) %>% dplyr::select(-Class), type = "conf_int")
)
expect_equal(ci_res, NULL)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getInp.R
\name{getInp}
\alias{getInp}
\title{Get prepared inp-object for use in TMB-call}
\usage{
getInp(
hydros,
toa,
E_dist,
n_ss,
pingType,
sdInits = 1,
rbi_min = 0,
rbi_max = 0,
ss_data_what = "est",
ss_data = 0,
biTable = NULL,
z_vec = NULL,
bbox = NULL
)
}
\arguments{
\item{hydros}{Dataframe from simHydros() or Dataframe with columns hx and hy containing positions of the receivers. Translate the coordinates to get the grid centre close to (0;0).}
\item{toa}{TOA-matrix: matrix with receivers in rows and detections in columns. Make sure that the receivers are in the same order as in hydros, and that the matrix is very regular: one ping per column (inlude empty columns if a ping is not detected).}
\item{E_dist}{Which distribution to use in the model - "Gaus" = Gaussian, "Mixture" = mixture of Gaussian and t or "t" = pure t-distribution}
\item{n_ss}{Number of soundspeed estimates: one estimate per hour is usually enough}
\item{pingType}{Type of transmitter to simulate - either stable burst interval ('sbi'), random burst interval ('rbi') or random burst interval but where the random sequence is known a priori}
\item{sdInits}{If >0 initial values will be randomized around the normally fixed value using rnorm(length(inits), mean=inits, sd=sdInits)}
\item{rbi_min, rbi_max}{Minimum and maximum BI for random burst interval transmitters}
\item{ss_data_what}{What speed of sound (ss) data to be used. Default ss_data_what='est': ss is estimated by the model. Alternatively, if ss_data_what='data': ss_data must be provided and length(ss_data) == ncol(toa)}
\item{ss_data}{Vector of ss-data to be used if ss_data_what = 'est'. Otherwise ss_data <- 0 (default)}
\item{biTable}{Table of known burst intervals. Only used when pingType == "pbi". Default=NULL}
\item{z_vec}{Vector of known depth values (positive real). Default=NULL is which case no 3D is assumed. If z_vec = "est" depth will be estimated.}
\item{bbox}{Spatial constraints in the form of a bounding box. See ?getBbox for details.}
}
\value{
List of input data ready for use in \code{runYaps()}
}
\description{
Wrapper-function to compile a list of input needed to run TMB
}
\examples{
\donttest{
library(yaps)
set.seed(42)
# # # Example using the ssu1 data included in package. See ?ssu1 for info.
# # # Set parameters to use in the sync model - these will differ per study
max_epo_diff <- 120
min_hydros <- 2
time_keeper_idx <- 5
fixed_hydros_idx <- c(2:3, 6, 8, 11, 13:17)
n_offset_day <- 2
n_ss_day <- 2
keep_rate <- 20
# # # Get input data ready for getSyncModel()
inp_sync <- getInpSync(sync_dat=ssu1, max_epo_diff, min_hydros, time_keeper_idx,
fixed_hydros_idx, n_offset_day, n_ss_day, keep_rate=keep_rate, silent_check=TRUE)
# # # Check that inp_sync is ok
checkInpSync(inp_sync, silent_check=FALSE)
# # # Also take a look at coverage of the sync data
getSyncCoverage(inp_sync, plot=TRUE)
# # # Fit the sync model
sync_model <- getSyncModel(inp_sync, silent=TRUE, max_iter=200, tmb_smartsearch = TRUE)
# # # On some systems it might work better, if we disbale the smartsearch feature in TMB
# # # To do so, set tmb_smartsearch = FALSE in getSyncModel()
# # # Visualize the resulting sync model
plotSyncModelResids(sync_model, by = "overall")
plotSyncModelResids(sync_model, by = "quantiles")
plotSyncModelResids(sync_model, by = "sync_tag")
plotSyncModelResids(sync_model, by = "hydro")
plotSyncModelResids(sync_model, by = "temporal_hydro")
plotSyncModelResids(sync_model, by = "temporal_sync_tag")
# # # If the above plots show outliers, sync_model can be fine tuned by excluding these.
# # # Use fineTuneSyncModel() for this.
# # # This should typically be done sequentially using eps_thresholds of e.g. 1E4, 1E3, 1E2, 1E2
sync_model <- fineTuneSyncModel(sync_model, eps_threshold=1E3, silent=TRUE)
sync_model <- fineTuneSyncModel(sync_model, eps_threshold=1E2, silent=TRUE)
# # # Apply the sync_model to detections data.
detections_synced <- applySync(toa=ssu1$detections, hydros=ssu1$hydros, sync_model)
# # # Prepare data for running yaps
hydros_yaps <- data.table::data.table(sync_model$pl$TRUE_H)
colnames(hydros_yaps) <- c('hx','hy','hz')
focal_tag <- 15266
rbi_min <- 20
rbi_max <- 40
synced_dat <- detections_synced[tag == focal_tag]
toa <- getToaYaps(synced_dat=synced_dat, hydros=hydros_yaps, pingType='rbi',
rbi_min=rbi_min, rbi_max=rbi_max)
bbox <- getBbox(hydros_yaps, buffer=50, pen=1e6)
inp <- getInp(hydros_yaps, toa, E_dist="Mixture", n_ss=5, pingType="rbi",
sdInits=1, rbi_min=rbi_min, rbi_max=rbi_max, ss_data_what="est", ss_data=0, bbox=bbox)
# # # Check that inp is ok
checkInp(inp)
# # # Run yaps on the prepared data to estimate track
yaps_out <- runYaps(inp, silent=TRUE, tmb_smartsearch=TRUE, maxIter=5000)
# # # Plot the results and compare to "the truth" obtained using gps
oldpar <- par(no.readonly = TRUE)
par(mfrow=c(2,2))
plot(hy~hx, data=hydros_yaps, asp=1, xlab="UTM X", ylab="UTM Y", pch=20, col="green")
lines(utm_y~utm_x, data=ssu1$gps, col="blue", lwd=2)
lines(y~x, data=yaps_out$track, col="red")
plot(utm_x~ts, data=ssu1$gps, col="blue", type="l", lwd=2)
points(x~top, data=yaps_out$track, col="red")
lines(x~top, data=yaps_out$track, col="red")
lines(x-2*x_sd~top, data=yaps_out$track, col="red", lty=2)
lines(x+2*x_sd~top, data=yaps_out$track, col="red", lty=2)
plot(utm_y~ts, data=ssu1$gps, col="blue", type="l", lwd=2)
points(y~top, data=yaps_out$track, col="red")
lines(y~top, data=yaps_out$track, col="red")
lines(y-2*y_sd~top, data=yaps_out$track, col="red", lty=2)
lines(y+2*y_sd~top, data=yaps_out$track, col="red", lty=2)
plot(nobs~top, data=yaps_out$track, type="p", main="#detecting hydros per ping")
lines(caTools::runmean(nobs, k=10)~top, data=yaps_out$track, col="orange", lwd=2)
par(oldpar)
}
}
|
/man/getInp.Rd
|
no_license
|
cran/yaps
|
R
| false
| true
| 6,025
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getInp.R
\name{getInp}
\alias{getInp}
\title{Get prepared inp-object for use in TMB-call}
\usage{
getInp(
hydros,
toa,
E_dist,
n_ss,
pingType,
sdInits = 1,
rbi_min = 0,
rbi_max = 0,
ss_data_what = "est",
ss_data = 0,
biTable = NULL,
z_vec = NULL,
bbox = NULL
)
}
\arguments{
\item{hydros}{Dataframe from simHydros() or Dataframe with columns hx and hy containing positions of the receivers. Translate the coordinates to get the grid centre close to (0;0).}
\item{toa}{TOA-matrix: matrix with receivers in rows and detections in columns. Make sure that the receivers are in the same order as in hydros, and that the matrix is very regular: one ping per column (inlude empty columns if a ping is not detected).}
\item{E_dist}{Which distribution to use in the model - "Gaus" = Gaussian, "Mixture" = mixture of Gaussian and t or "t" = pure t-distribution}
\item{n_ss}{Number of soundspeed estimates: one estimate per hour is usually enough}
\item{pingType}{Type of transmitter to simulate - either stable burst interval ('sbi'), random burst interval ('rbi') or random burst interval but where the random sequence is known a priori}
\item{sdInits}{If >0 initial values will be randomized around the normally fixed value using rnorm(length(inits), mean=inits, sd=sdInits)}
\item{rbi_min, rbi_max}{Minimum and maximum BI for random burst interval transmitters}
\item{ss_data_what}{What speed of sound (ss) data to be used. Default ss_data_what='est': ss is estimated by the model. Alternatively, if ss_data_what='data': ss_data must be provided and length(ss_data) == ncol(toa)}
\item{ss_data}{Vector of ss-data to be used if ss_data_what = 'est'. Otherwise ss_data <- 0 (default)}
\item{biTable}{Table of known burst intervals. Only used when pingType == "pbi". Default=NULL}
\item{z_vec}{Vector of known depth values (positive real). Default=NULL is which case no 3D is assumed. If z_vec = "est" depth will be estimated.}
\item{bbox}{Spatial constraints in the form of a bounding box. See ?getBbox for details.}
}
\value{
List of input data ready for use in \code{runYaps()}
}
\description{
Wrapper-function to compile a list of input needed to run TMB
}
\examples{
\donttest{
library(yaps)
set.seed(42)
# # # Example using the ssu1 data included in package. See ?ssu1 for info.
# # # Set parameters to use in the sync model - these will differ per study
max_epo_diff <- 120
min_hydros <- 2
time_keeper_idx <- 5
fixed_hydros_idx <- c(2:3, 6, 8, 11, 13:17)
n_offset_day <- 2
n_ss_day <- 2
keep_rate <- 20
# # # Get input data ready for getSyncModel()
inp_sync <- getInpSync(sync_dat=ssu1, max_epo_diff, min_hydros, time_keeper_idx,
fixed_hydros_idx, n_offset_day, n_ss_day, keep_rate=keep_rate, silent_check=TRUE)
# # # Check that inp_sync is ok
checkInpSync(inp_sync, silent_check=FALSE)
# # # Also take a look at coverage of the sync data
getSyncCoverage(inp_sync, plot=TRUE)
# # # Fit the sync model
sync_model <- getSyncModel(inp_sync, silent=TRUE, max_iter=200, tmb_smartsearch = TRUE)
# # # On some systems it might work better, if we disbale the smartsearch feature in TMB
# # # To do so, set tmb_smartsearch = FALSE in getSyncModel()
# # # Visualize the resulting sync model
plotSyncModelResids(sync_model, by = "overall")
plotSyncModelResids(sync_model, by = "quantiles")
plotSyncModelResids(sync_model, by = "sync_tag")
plotSyncModelResids(sync_model, by = "hydro")
plotSyncModelResids(sync_model, by = "temporal_hydro")
plotSyncModelResids(sync_model, by = "temporal_sync_tag")
# # # If the above plots show outliers, sync_model can be fine tuned by excluding these.
# # # Use fineTuneSyncModel() for this.
# # # This should typically be done sequentially using eps_thresholds of e.g. 1E4, 1E3, 1E2, 1E2
sync_model <- fineTuneSyncModel(sync_model, eps_threshold=1E3, silent=TRUE)
sync_model <- fineTuneSyncModel(sync_model, eps_threshold=1E2, silent=TRUE)
# # # Apply the sync_model to detections data.
detections_synced <- applySync(toa=ssu1$detections, hydros=ssu1$hydros, sync_model)
# # # Prepare data for running yaps
hydros_yaps <- data.table::data.table(sync_model$pl$TRUE_H)
colnames(hydros_yaps) <- c('hx','hy','hz')
focal_tag <- 15266
rbi_min <- 20
rbi_max <- 40
synced_dat <- detections_synced[tag == focal_tag]
toa <- getToaYaps(synced_dat=synced_dat, hydros=hydros_yaps, pingType='rbi',
rbi_min=rbi_min, rbi_max=rbi_max)
bbox <- getBbox(hydros_yaps, buffer=50, pen=1e6)
inp <- getInp(hydros_yaps, toa, E_dist="Mixture", n_ss=5, pingType="rbi",
sdInits=1, rbi_min=rbi_min, rbi_max=rbi_max, ss_data_what="est", ss_data=0, bbox=bbox)
# # # Check that inp is ok
checkInp(inp)
# # # Run yaps on the prepared data to estimate track
yaps_out <- runYaps(inp, silent=TRUE, tmb_smartsearch=TRUE, maxIter=5000)
# # # Plot the results and compare to "the truth" obtained using gps
oldpar <- par(no.readonly = TRUE)
par(mfrow=c(2,2))
plot(hy~hx, data=hydros_yaps, asp=1, xlab="UTM X", ylab="UTM Y", pch=20, col="green")
lines(utm_y~utm_x, data=ssu1$gps, col="blue", lwd=2)
lines(y~x, data=yaps_out$track, col="red")
plot(utm_x~ts, data=ssu1$gps, col="blue", type="l", lwd=2)
points(x~top, data=yaps_out$track, col="red")
lines(x~top, data=yaps_out$track, col="red")
lines(x-2*x_sd~top, data=yaps_out$track, col="red", lty=2)
lines(x+2*x_sd~top, data=yaps_out$track, col="red", lty=2)
plot(utm_y~ts, data=ssu1$gps, col="blue", type="l", lwd=2)
points(y~top, data=yaps_out$track, col="red")
lines(y~top, data=yaps_out$track, col="red")
lines(y-2*y_sd~top, data=yaps_out$track, col="red", lty=2)
lines(y+2*y_sd~top, data=yaps_out$track, col="red", lty=2)
plot(nobs~top, data=yaps_out$track, type="p", main="#detecting hydros per ping")
lines(caTools::runmean(nobs, k=10)~top, data=yaps_out$track, col="orange", lwd=2)
par(oldpar)
}
}
|
########################################################################################
## Register and launch an ST-GPR model from R to estimate OOP scalar
## Description: Sending ImFIn OOP models to ST-GPR
## Prepped dataset is sourced from the DOVE dataset on vaccine volumes
## Original Author: Matthew Schneider (adapted from Brandon Cummingham & Hayley Tymeson)
## Last edited: Emilie Maddison (ermadd@uw.edu), 24 June 2020
########################################################################################
rm(list = ls())
if (Sys.info()[1] == "Linux") {
j <- FILEPATH
h <- FILEPATH
}else if (Sys.info()[1] == "Windows") {
j <- FILEPATH
h <- FILEPATH
}
central_root <- FILEPATH
setwd(central_root)
source('FILEPATH/register.R')
source('FILEPATH/sendoff.R')
##to loop over all models - one for each of 9 antigens
runlist = list()
for (i in c(283:288)) {
# i = 1
####################################
# Set arguments
####################################
path_to_config <- "FILEPATH/oop_volume_config.csv"
# Arguments
me_name <- "imfin_oop_volume"
my_model_id <- i
project <- 'proj_fgh'
## number of parallelizations! More parallelizations --> faster). I usually do 50-100.
nparallel <- 50
## number of slots for each ST or GP job.
## You can profile by looking at a specific ST job through qacct.
slots <- 5
####################################
# Register an ST-GPR model
####################################
run <- register_stgpr_model(
path_to_config = path_to_config,
model_index_id = my_model_id
)
# Submit ST-GPR model for your newly-created run_id!
stgpr_sendoff(run, project, nparallel = nparallel) #,log_path = logs)
##creating a list of run_id's to pull data later
run.dict <- data.frame(model_id = i, run_id = run)
runlist[[i]] <- run.dict
}
## Create dictionary list of run_ids and model_ids
run.dictionary <- do.call(rbind, runlist)
run.dictionary$date <- format(Sys.time(), "%Y%m%d") ##Today's date
run.dictionary$status <- ''
## Read in existing dictionary and append newest run_ids and model_ids
output.dir <- FILEPATH
complete.dictionary <- read.csv(paste0(output.dir,
"oop_volume_runid_modelid_dictionary.csv"))
complete.dictionary <- rbind(complete.dictionary, run.dictionary)
write.csv(complete.dictionary,
paste0(output.dir, "oop_volume_runid_modelid_dictionary.csv"),
row.names = FALSE)
|
/Immunization Financing Project/code/out-of-pocket/03_stgpr/03b_2_run_stgpr_volume.R
|
no_license
|
hashimig/Resource_Tracking_Domestic_Health_Accounts
|
R
| false
| false
| 2,477
|
r
|
########################################################################################
## Register and launch an ST-GPR model from R to estimate OOP scalar
## Description: Sending ImFIn OOP models to ST-GPR
## Prepped dataset is sourced from the DOVE dataset on vaccine volumes
## Original Author: Matthew Schneider (adapted from Brandon Cummingham & Hayley Tymeson)
## Last edited: Emilie Maddison (ermadd@uw.edu), 24 June 2020
########################################################################################
rm(list = ls())
if (Sys.info()[1] == "Linux") {
j <- FILEPATH
h <- FILEPATH
}else if (Sys.info()[1] == "Windows") {
j <- FILEPATH
h <- FILEPATH
}
central_root <- FILEPATH
setwd(central_root)
source('FILEPATH/register.R')
source('FILEPATH/sendoff.R')
##to loop over all models - one for each of 9 antigens
runlist = list()
for (i in c(283:288)) {
# i = 1
####################################
# Set arguments
####################################
path_to_config <- "FILEPATH/oop_volume_config.csv"
# Arguments
me_name <- "imfin_oop_volume"
my_model_id <- i
project <- 'proj_fgh'
## number of parallelizations! More parallelizations --> faster). I usually do 50-100.
nparallel <- 50
## number of slots for each ST or GP job.
## You can profile by looking at a specific ST job through qacct.
slots <- 5
####################################
# Register an ST-GPR model
####################################
run <- register_stgpr_model(
path_to_config = path_to_config,
model_index_id = my_model_id
)
# Submit ST-GPR model for your newly-created run_id!
stgpr_sendoff(run, project, nparallel = nparallel) #,log_path = logs)
##creating a list of run_id's to pull data later
run.dict <- data.frame(model_id = i, run_id = run)
runlist[[i]] <- run.dict
}
## Create dictionary list of run_ids and model_ids
run.dictionary <- do.call(rbind, runlist)
run.dictionary$date <- format(Sys.time(), "%Y%m%d") ##Today's date
run.dictionary$status <- ''
## Read in existing dictionary and append newest run_ids and model_ids
output.dir <- FILEPATH
complete.dictionary <- read.csv(paste0(output.dir,
"oop_volume_runid_modelid_dictionary.csv"))
complete.dictionary <- rbind(complete.dictionary, run.dictionary)
write.csv(complete.dictionary,
paste0(output.dir, "oop_volume_runid_modelid_dictionary.csv"),
row.names = FALSE)
|
workDir <- 'C:/Users/smdevine/Desktop/post doc/soil health/publication/California Agriculture'
soilProps <- read.csv(file.path(workDir, 'soil_properties_SSURGO_by_SHR.csv'), stringsAsFactors = FALSE)
head(soilProps)
concat_func <- function(x,y) {paste0(x, " (", y, ")")}
concat_func(soilProps$Sand, soilProps$Sand.IQR)
finalTable <- cbind(soilProps[1], Sand=concat_func(soilProps$Sand, soilProps$Sand.IQR), Silt=concat_func(soilProps$Silt, soilProps$Silt.IQR), Clay=concat_func(soilProps$Clay, soilProps$Clay.IQR), OM=concat_func(soilProps$OM, soilProps$OM.IQR), CEC=concat_func(soilProps$CEC, soilProps$CEC.IQR), pH=concat_func(soilProps$pH, soilProps$pH.IQR), EC=concat_func(soilProps$EC, soilProps$EC.IQR), LEP=concat_func(soilProps$LEP, soilProps$LEP.IQR), Ksat=concat_func(soilProps$Ksat, soilProps$Ksat.IQR), Storie=concat_func(soilProps$Storie, soilProps$Storie.IQR), deparse.level = 1, stringsAsFactors=FALSE)
write.csv(finalTable, file = file.path(workDir, 'ssurgo_properties_final_bySHR.csv'), row.names = FALSE)
|
/CalAg/SSURGO_properties_table.R
|
no_license
|
smdevine/SoilHealthRegions
|
R
| false
| false
| 1,024
|
r
|
workDir <- 'C:/Users/smdevine/Desktop/post doc/soil health/publication/California Agriculture'
soilProps <- read.csv(file.path(workDir, 'soil_properties_SSURGO_by_SHR.csv'), stringsAsFactors = FALSE)
head(soilProps)
concat_func <- function(x,y) {paste0(x, " (", y, ")")}
concat_func(soilProps$Sand, soilProps$Sand.IQR)
finalTable <- cbind(soilProps[1], Sand=concat_func(soilProps$Sand, soilProps$Sand.IQR), Silt=concat_func(soilProps$Silt, soilProps$Silt.IQR), Clay=concat_func(soilProps$Clay, soilProps$Clay.IQR), OM=concat_func(soilProps$OM, soilProps$OM.IQR), CEC=concat_func(soilProps$CEC, soilProps$CEC.IQR), pH=concat_func(soilProps$pH, soilProps$pH.IQR), EC=concat_func(soilProps$EC, soilProps$EC.IQR), LEP=concat_func(soilProps$LEP, soilProps$LEP.IQR), Ksat=concat_func(soilProps$Ksat, soilProps$Ksat.IQR), Storie=concat_func(soilProps$Storie, soilProps$Storie.IQR), deparse.level = 1, stringsAsFactors=FALSE)
write.csv(finalTable, file = file.path(workDir, 'ssurgo_properties_final_bySHR.csv'), row.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bakers_raw.R
\docType{data}
\name{bakers_raw}
\alias{bakers_raw}
\title{Bakers (raw)}
\format{
A data frame with 120 rows representing individual bakers and 8
variables:
\describe{
\item{series}{A factor denoting UK series (\code{1}-\code{10}).}
\item{baker_full}{A character string giving full name.}
\item{baker}{A character string with a given name or nickname.}
\item{age}{An integer denoting age in years at first episode appeared.}
\item{occupation}{A character string giving occupation.}
\item{hometown}{A character string giving hometown.}
\item{baker_last}{A character string giving family name.}
\item{baker_first}{A character string giving given name.}
}
}
\source{
See
\url{https://en.wikipedia.org/wiki/The_Great_British_Bake_Off_(series_1)#The_Bakers},
for example, for series 1 bakers.
}
\usage{
bakers_raw
}
\description{
Information about each baker who has appeared on the show.
}
\examples{
if (require('tibble')) {
bakers_raw
}
head(bakers_raw)
}
\keyword{datasets}
|
/man/bakers_raw.Rd
|
permissive
|
apreshill/bakeoff
|
R
| false
| true
| 1,067
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bakers_raw.R
\docType{data}
\name{bakers_raw}
\alias{bakers_raw}
\title{Bakers (raw)}
\format{
A data frame with 120 rows representing individual bakers and 8
variables:
\describe{
\item{series}{A factor denoting UK series (\code{1}-\code{10}).}
\item{baker_full}{A character string giving full name.}
\item{baker}{A character string with a given name or nickname.}
\item{age}{An integer denoting age in years at first episode appeared.}
\item{occupation}{A character string giving occupation.}
\item{hometown}{A character string giving hometown.}
\item{baker_last}{A character string giving family name.}
\item{baker_first}{A character string giving given name.}
}
}
\source{
See
\url{https://en.wikipedia.org/wiki/The_Great_British_Bake_Off_(series_1)#The_Bakers},
for example, for series 1 bakers.
}
\usage{
bakers_raw
}
\description{
Information about each baker who has appeared on the show.
}
\examples{
if (require('tibble')) {
bakers_raw
}
head(bakers_raw)
}
\keyword{datasets}
|
#' @author Divya Mistry
#' @description This file is used to read in GSE3431 expression CEL files, normalize the data
#' and then save the processed data so that it can be read in later to get
#' pairwise coexpression data
#'
require(affy)
require(microbenchmark)
#' I downloaded the raw CEL files from author's site http://moment.utmb.edu/cgi-bin/dload.cgi
#' I read in CEL files so that I can RMA normalize
gse3431 <- ReadAffy(celfile.path = "data/GSE3431/")
rma.gse <- rma(gse3431)
#' expression matrix
expr.rma <- exprs(rma.gse)
#' sanity-check for normalized expressions
boxplot(expr.rma, las=2)
saveRDS(object = expr.rma, file = "data/Full_RMA_Processed_GSE3431_Expression_Matrix.RDS", compress = TRUE)
#' According to Li et.al. (2014) and Tang et.al. (2014) the data has 6777 "genes"
#' This is a result of noticing that multiple probesets seem to map to the same
#' transcribed region. A representative ID for this is obtained from reading
#' Affymetrix YG_S98 annotation file. The file has been downloaded in "data/GSE3431/"
#' In the following lines, I read the annotation file, and verify that there are
#' 6777 transcribed region, i.e. "Representative Public ID" as described at
#' http://www.affymetrix.com/support/technical/manual/taf_manual.affx#probe_design_information
#'
YG_S98Annot <- read.csv(file = "data/YG_S98.na34.annot.csv", header = T, skip=19, na.string = "---")
length(levels(YG_S98Annot$Representative.Public.ID))
#' Let's make sure probeset names from annotation file and experiment data are same
#' Following test makes sure that not only are the probesets the same, but that
#' they are in the same order. This comes in handy when referencing things by index
#' between different data sources.
#'
which(YG_S98Annot$Probe.Set.ID != rownames(rma.gse))
#' Now we read the expression profiles of probesets that map to same transcribed region.
#' We keep the probeset representing the highest mean expression over all 36 conditions.
#' This is simply to avoid breaking apart expressions that probably belongs to
#' one gene, into multiple "genes". This is not a perfect solution, but it's good enough.
#'
transcribedRegions <- levels(YG_S98Annot$Representative.Public.ID)
keeperPS <- sapply(X = transcribedRegions, simplify = T, FUN = function(x){ # took around a minute and half to run
currResult <- YG_S98Annot[ YG_S98Annot$Representative.Public.ID == x, ]
if(dim(currResult)[1]>1) { # these are the probesets with multiple choices
# the probesets with highest total expression will end up having highest average
rSums <- rowSums(expr.rma[ currResult$Probe.Set.ID, ])
# we save the highest contributor probeset
psKeeper <- which(rSums == max(rSums))
currResult$Probe.Set.ID[psKeeper]
} else {
# these are the unique choices, so we return them as-is
currResult$Probe.Set.ID
}
})
#' Save the list of unique transcribed regions separately for more use.
saveRDS(object = keeperPS, file = "data/gse3431_uniqueProbeSets_represented_by_NetAffx.RDS", ascii = T)
#' remove unnecessary objects
rm(expr.rma, rma.gse, gse3431, keeperPS, transcribedRegions, YG_S98Annot)
|
/src/pre-prep/get_Tu_etal_data.R
|
permissive
|
divyamistry/diffslc
|
R
| false
| false
| 3,139
|
r
|
#' @author Divya Mistry
#' @description This file is used to read in GSE3431 expression CEL files, normalize the data
#' and then save the processed data so that it can be read in later to get
#' pairwise coexpression data
#'
require(affy)
require(microbenchmark)
#' I downloaded the raw CEL files from author's site http://moment.utmb.edu/cgi-bin/dload.cgi
#' I read in CEL files so that I can RMA normalize
gse3431 <- ReadAffy(celfile.path = "data/GSE3431/")
rma.gse <- rma(gse3431)
#' expression matrix
expr.rma <- exprs(rma.gse)
#' sanity-check for normalized expressions
boxplot(expr.rma, las=2)
saveRDS(object = expr.rma, file = "data/Full_RMA_Processed_GSE3431_Expression_Matrix.RDS", compress = TRUE)
#' According to Li et.al. (2014) and Tang et.al. (2014) the data has 6777 "genes"
#' This is a result of noticing that multiple probesets seem to map to the same
#' transcribed region. A representative ID for this is obtained from reading
#' Affymetrix YG_S98 annotation file. The file has been downloaded in "data/GSE3431/"
#' In the following lines, I read the annotation file, and verify that there are
#' 6777 transcribed region, i.e. "Representative Public ID" as described at
#' http://www.affymetrix.com/support/technical/manual/taf_manual.affx#probe_design_information
#'
YG_S98Annot <- read.csv(file = "data/YG_S98.na34.annot.csv", header = T, skip=19, na.string = "---")
length(levels(YG_S98Annot$Representative.Public.ID))
#' Let's make sure probeset names from annotation file and experiment data are same
#' Following test makes sure that not only are the probesets the same, but that
#' they are in the same order. This comes in handy when referencing things by index
#' between different data sources.
#'
which(YG_S98Annot$Probe.Set.ID != rownames(rma.gse))
#' Now we read the expression profiles of probesets that map to same transcribed region.
#' We keep the probeset representing the highest mean expression over all 36 conditions.
#' This is simply to avoid breaking apart expressions that probably belongs to
#' one gene, into multiple "genes". This is not a perfect solution, but it's good enough.
#'
transcribedRegions <- levels(YG_S98Annot$Representative.Public.ID)
keeperPS <- sapply(X = transcribedRegions, simplify = T, FUN = function(x){ # took around a minute and half to run
currResult <- YG_S98Annot[ YG_S98Annot$Representative.Public.ID == x, ]
if(dim(currResult)[1]>1) { # these are the probesets with multiple choices
# the probesets with highest total expression will end up having highest average
rSums <- rowSums(expr.rma[ currResult$Probe.Set.ID, ])
# we save the highest contributor probeset
psKeeper <- which(rSums == max(rSums))
currResult$Probe.Set.ID[psKeeper]
} else {
# these are the unique choices, so we return them as-is
currResult$Probe.Set.ID
}
})
#' Save the list of unique transcribed regions separately for more use.
saveRDS(object = keeperPS, file = "data/gse3431_uniqueProbeSets_represented_by_NetAffx.RDS", ascii = T)
#' remove unnecessary objects
rm(expr.rma, rma.gse, gse3431, keeperPS, transcribedRegions, YG_S98Annot)
|
library(testthat)
library(rcdo)
test_check("rcdo")
|
/tests/testthat.R
|
permissive
|
robertjwilson/rcdo
|
R
| false
| false
| 52
|
r
|
library(testthat)
library(rcdo)
test_check("rcdo")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect_operations.R
\name{connect_list_instance_attributes}
\alias{connect_list_instance_attributes}
\title{This API is in preview release for Amazon Connect and is subject to
change}
\usage{
connect_list_instance_attributes(InstanceId, NextToken, MaxResults)
}
\arguments{
\item{InstanceId}{[required] The identifier of the Amazon Connect instance.}
\item{NextToken}{The token for the next set of results. Use the value returned in the
previous response in the next request to retrieve the next set of
results.}
\item{MaxResults}{The maximimum number of results to return per page.}
}
\description{
This API is in preview release for Amazon Connect and is subject to
change.
Returns a paginated list of all attribute types for the given instance.
}
\section{Request syntax}{
\preformatted{svc$list_instance_attributes(
InstanceId = "string",
NextToken = "string",
MaxResults = 123
)
}
}
\keyword{internal}
|
/cran/paws.customer.engagement/man/connect_list_instance_attributes.Rd
|
permissive
|
sanchezvivi/paws
|
R
| false
| true
| 996
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect_operations.R
\name{connect_list_instance_attributes}
\alias{connect_list_instance_attributes}
\title{This API is in preview release for Amazon Connect and is subject to
change}
\usage{
connect_list_instance_attributes(InstanceId, NextToken, MaxResults)
}
\arguments{
\item{InstanceId}{[required] The identifier of the Amazon Connect instance.}
\item{NextToken}{The token for the next set of results. Use the value returned in the
previous response in the next request to retrieve the next set of
results.}
\item{MaxResults}{The maximimum number of results to return per page.}
}
\description{
This API is in preview release for Amazon Connect and is subject to
change.
Returns a paginated list of all attribute types for the given instance.
}
\section{Request syntax}{
\preformatted{svc$list_instance_attributes(
InstanceId = "string",
NextToken = "string",
MaxResults = 123
)
}
}
\keyword{internal}
|
context('test multi-word dictionaries')
txt <- c(d1 = "The United States is bordered by the Atlantic Ocean and the Pacific Ocean.",
d2 = "The Supreme Court of the United States is seldom in a united state.",
d3 = "It's Arsenal versus Manchester United, states the announcer.",
d4 = "We need Manchester Unity in the Federal Republic of Germany today.",
d5 = "United statehood is a good state.",
d6 = "luv the united states XXOO!")
toks <- tokenize(txt, removePunct = TRUE)
toks_hash <- tokens(txt, removePunct = TRUE)
test_that("multi-word dictionary keys are counted correctly", {
dict_mw_fixed <- dictionary(list(Countries = c("United States", "Federal Republic of Germany"),
oceans = c("Atlantic Ocean", "Pacific Ocean"),
Institutions = c("federal government", "Supreme Court"),
team = c("Manchester United", "Arsenal")))
tokens_case_asis <-
applyDictionary(toks, dict_mw_fixed, valuetype = "fixed", case_insensitive = FALSE)
dfm_case_asis <- dfm(tokens_case_asis)
expect_equal(as.vector(dfm_case_asis[, "Countries"]), c(1, 1, 0, 1, 0, 0))
expect_equal(as.vector(dfm_case_asis[, "team"]), c(0, 0, 2, 0, 0, 0))
expect_equal(as.vector(dfm_case_asis["d3", "team"]), 2)
# note the overlap of Manchester United states in d3
expect_equal(as.vector(dfm_case_asis["d3", "Countries"]), 0)
tokens_case_ignore <-
applyDictionary(toks, dict_mw_fixed, valuetype = "fixed", case_insensitive = TRUE)
dfm_case_ignore <- dfm(tokens_case_ignore)
expect_equal(as.vector(dfm_case_ignore[, "Countries"]), c(1, 1, 1, 1, 0, 1))
expect_equal(as.vector(dfm_case_ignore["d3", "team"]), 2)
# note the overlap of Manchester United states in d3
expect_equal(as.vector(dfm_case_ignore["d3", "Countries"]), 1)
tokens_case_asis_hash <-
applyDictionary(toks_hash, dict_mw_fixed, valuetype = "fixed", case_insensitive = FALSE, concatenator = ' ')
dfm_case_asis_hash <- dfm(tokens_case_asis_hash)
expect_equal(as.vector(dfm_case_asis_hash[, "Countries"]), c(1, 1, 0, 1, 0, 0))
expect_equal(as.vector(dfm_case_asis_hash[, "team"]), c(0, 0, 2, 0, 0, 0))
tokens_case_ignore_hash <-
applyDictionary(toks_hash, dict_mw_fixed, valuetype = "fixed", case_insensitive = TRUE, concatenator = ' ')
dfm_case_ignore_hash <- dfm(tokens_case_ignore_hash)
expect_equal(as.vector(dfm_case_ignore_hash[, "Countries"]), c(1, 1, 1, 1, 0, 1))
expect_equal(as.vector(dfm_case_ignore_hash["d3", "team"]), 2)
# note the overlap of Manchester United states in d3
expect_equal(as.vector(dfm_case_ignore_hash["d3", "Countries"]), 1)
})
test_that("entirely single-word dictionary keys are counted correctly", {
dict_sw_fixed <- dictionary(list(Countries = c("States", "Germany"),
oceans = c("Atlantic", "Pacific"),
Institutions = c("government", "Court"),
team = c("Manchester", "Arsenal")))
tokens_case_asis <-
applyDictionary(toks, dict_sw_fixed, valuetype = "fixed", case_insensitive = FALSE)
dfm_case_asis <- dfm(tokens_case_asis)
expect_equal(as.vector(dfm_case_asis[, "Countries"]), c(1, 1, 0, 1, 0, 0))
expect_equal(as.vector(dfm_case_asis[, "team"]), c(0, 0, 2, 1, 0, 0))
expect_equal(as.vector(dfm_case_asis["d3", "team"]), 2)
# note the overlap of Manchester United states in d3
expect_equal(as.vector(dfm_case_asis["d3", "Countries"]), 0)
tokens_case_ignore <-
applyDictionary(toks, dict_sw_fixed, valuetype = "fixed", case_insensitive = TRUE)
dfm_case_ignore <- dfm(tokens_case_ignore)
expect_equal(as.vector(dfm_case_ignore[, "Countries"]), c(1, 1, 1, 1, 0, 1))
expect_equal(as.vector(dfm_case_ignore["d3", "team"]), 2)
expect_equal(as.vector(dfm_case_ignore["d3", "Countries"]), 1)
})
test_that("selection of tokens from multi-word dictionaries works", {
dict_mw_fixed <- dictionary(list(Countries = c("United States", "Federal Republic of Germany"),
oceans = c("Atlantic Ocean", "Pacific Ocean"),
Institutions = c("federal government", "Supreme Court"),
team = c("Manchester United", "Arsenal")))
# does not work for multi-word dictionary keys
selectFeatures(toks, dict_mw_fixed, valuetype = "fixed", case_insensitive = FALSE)
selectFeatures(toks, dict_mw_fixed, valuetype = "fixed", case_insensitive = TRUE)
})
test_that("selection of tokens from single-word dictionaries works", {
dict_sw_fixed <- dictionary(list(Countries = c("States", "Germany"),
oceans = c("Atlantic", "Pacific"),
Institutions = c("government", "Court"),
team = c("Manchester", "Arsenal")))
# works ok for single word dictionary keys
selectFeatures(toks, dict_sw_fixed, valuetype = "fixed", case_insensitive = FALSE)
selectFeatures(toks, dict_sw_fixed, valuetype = "fixed", case_insensitive = TRUE)
})
test_that("multi-word dictionary behavior is not sensitive to the order of dictionary entries", {
txt <- c(d1 = "The United States is a country.",
d2 = "Arsenal v Manchester United, states the announcer.")
toks <- tokens(txt, removePunct = TRUE)
toks_old <- tokenize(txt, removePunct = TRUE)
dict1 <- dictionary(list(Countries = c("United States"),
team = c("Manchester United", "Arsenal")))
dict2 <- dictionary(list(team = c("Manchester United", "Arsenal"),
Countries = c("United States")))
expect_equal(
lapply(as.list(applyDictionary(toks, dictionary = dict1, valuetype = "fixed")), sort),
lapply(as.list(applyDictionary(toks, dictionary = dict2, valuetype = "fixed")), sort)
)
expect_equal(
lapply(as.list(applyDictionary(toks_old, dictionary = dict1, valuetype = "fixed",
case_insensitive = TRUE, concatenator = " ")), sort),
lapply(as.list(applyDictionary(toks_old, dictionary = dict2, valuetype = "fixed",
case_insensitive = TRUE, concatenator = " ")), sort)
)
})
test_that("tokenizedTexts and tokens behave the same", {
txt <- c(d1 = "The United States is bordered by the Atlantic Ocean and the Pacific Ocean.",
d2 = "The Supreme Court of the United States is seldom in a united state.",
d3 = "It's Arsenal versus Manchester United, states the announcer.",
d4 = "We need Manchester Unity in the Federal Republic of Germany today.",
d5 = "United statehood is a good state.",
d6 = "luv the united states XXOO!")
toks <- tokenize(txt, removePunct = TRUE)
toks_hashed <- tokens(txt, removePunct = TRUE)
dict_mw_fixed <- dictionary(list(Countries = c("United States", "Federal Republic of Germany"),
oceans = c("Atlantic Ocean", "Pacific Ocean"),
Institutions = c("federal government", "Supreme Court"),
team = c("Manchester United", "Arsenal")))
expect_equal(
as.tokenizedTexts(applyDictionary(toks_hashed, dict_mw_fixed,
valuetype = "fixed", case_insensitive = TRUE)),
applyDictionary(toks, dictionary = dict_mw_fixed, valuetype = "fixed",
case_insensitive = TRUE, concatenator = " ")
)
})
test_that("classic and hashed applyDictionary produce equivalent objects", {
expect_equal(
applyDictionary(tokens("The United States is big."),
dictionary = dictionary(list(COUNTRY = "United States")),
valuetype = "fixed"),
as.tokens(applyDictionary(tokens("The United States is big.", hash = FALSE),
dictionary = dictionary(list(COUNTRY = "United States")),
valuetype = "fixed"))
)
})
test_that("classic and hashed applyDictionary produce same results", {
# with inaugural texts
toks <- tokenize(inaugTexts)
toksh <- tokens(inaugTexts)
dict <- dictionary(list(institutions = c("Supreme Court", "federal government",
"House of Representatives"),
countries = c("United States", "Soviet Union"),
tax = c("income tax", "property tax", "sales tax")))
expect_equal(dfm(applyDictionary(toks, dict, valuetype = "fixed"), verbose = FALSE),
dfm(applyDictionary(toksh, dict, valuetype = "fixed"), verbose = FALSE))
# microbenchmark::microbenchmark(
# classic = applyDictionary(toks, dict, valuetype = "fixed"),
# hashed = applyDictionary(toksh, dict, valuetype = "fixed"),
# unit = "relative", times = 30
# )
})
|
/tests/testthat/test_dictionary_multiword.R
|
no_license
|
fc1315/quanteda
|
R
| false
| false
| 9,245
|
r
|
context('test multi-word dictionaries')
txt <- c(d1 = "The United States is bordered by the Atlantic Ocean and the Pacific Ocean.",
d2 = "The Supreme Court of the United States is seldom in a united state.",
d3 = "It's Arsenal versus Manchester United, states the announcer.",
d4 = "We need Manchester Unity in the Federal Republic of Germany today.",
d5 = "United statehood is a good state.",
d6 = "luv the united states XXOO!")
toks <- tokenize(txt, removePunct = TRUE)
toks_hash <- tokens(txt, removePunct = TRUE)
test_that("multi-word dictionary keys are counted correctly", {
dict_mw_fixed <- dictionary(list(Countries = c("United States", "Federal Republic of Germany"),
oceans = c("Atlantic Ocean", "Pacific Ocean"),
Institutions = c("federal government", "Supreme Court"),
team = c("Manchester United", "Arsenal")))
tokens_case_asis <-
applyDictionary(toks, dict_mw_fixed, valuetype = "fixed", case_insensitive = FALSE)
dfm_case_asis <- dfm(tokens_case_asis)
expect_equal(as.vector(dfm_case_asis[, "Countries"]), c(1, 1, 0, 1, 0, 0))
expect_equal(as.vector(dfm_case_asis[, "team"]), c(0, 0, 2, 0, 0, 0))
expect_equal(as.vector(dfm_case_asis["d3", "team"]), 2)
# note the overlap of Manchester United states in d3
expect_equal(as.vector(dfm_case_asis["d3", "Countries"]), 0)
tokens_case_ignore <-
applyDictionary(toks, dict_mw_fixed, valuetype = "fixed", case_insensitive = TRUE)
dfm_case_ignore <- dfm(tokens_case_ignore)
expect_equal(as.vector(dfm_case_ignore[, "Countries"]), c(1, 1, 1, 1, 0, 1))
expect_equal(as.vector(dfm_case_ignore["d3", "team"]), 2)
# note the overlap of Manchester United states in d3
expect_equal(as.vector(dfm_case_ignore["d3", "Countries"]), 1)
tokens_case_asis_hash <-
applyDictionary(toks_hash, dict_mw_fixed, valuetype = "fixed", case_insensitive = FALSE, concatenator = ' ')
dfm_case_asis_hash <- dfm(tokens_case_asis_hash)
expect_equal(as.vector(dfm_case_asis_hash[, "Countries"]), c(1, 1, 0, 1, 0, 0))
expect_equal(as.vector(dfm_case_asis_hash[, "team"]), c(0, 0, 2, 0, 0, 0))
tokens_case_ignore_hash <-
applyDictionary(toks_hash, dict_mw_fixed, valuetype = "fixed", case_insensitive = TRUE, concatenator = ' ')
dfm_case_ignore_hash <- dfm(tokens_case_ignore_hash)
expect_equal(as.vector(dfm_case_ignore_hash[, "Countries"]), c(1, 1, 1, 1, 0, 1))
expect_equal(as.vector(dfm_case_ignore_hash["d3", "team"]), 2)
# note the overlap of Manchester United states in d3
expect_equal(as.vector(dfm_case_ignore_hash["d3", "Countries"]), 1)
})
test_that("entirely single-word dictionary keys are counted correctly", {
dict_sw_fixed <- dictionary(list(Countries = c("States", "Germany"),
oceans = c("Atlantic", "Pacific"),
Institutions = c("government", "Court"),
team = c("Manchester", "Arsenal")))
tokens_case_asis <-
applyDictionary(toks, dict_sw_fixed, valuetype = "fixed", case_insensitive = FALSE)
dfm_case_asis <- dfm(tokens_case_asis)
expect_equal(as.vector(dfm_case_asis[, "Countries"]), c(1, 1, 0, 1, 0, 0))
expect_equal(as.vector(dfm_case_asis[, "team"]), c(0, 0, 2, 1, 0, 0))
expect_equal(as.vector(dfm_case_asis["d3", "team"]), 2)
# note the overlap of Manchester United states in d3
expect_equal(as.vector(dfm_case_asis["d3", "Countries"]), 0)
tokens_case_ignore <-
applyDictionary(toks, dict_sw_fixed, valuetype = "fixed", case_insensitive = TRUE)
dfm_case_ignore <- dfm(tokens_case_ignore)
expect_equal(as.vector(dfm_case_ignore[, "Countries"]), c(1, 1, 1, 1, 0, 1))
expect_equal(as.vector(dfm_case_ignore["d3", "team"]), 2)
expect_equal(as.vector(dfm_case_ignore["d3", "Countries"]), 1)
})
test_that("selection of tokens from multi-word dictionaries works", {
dict_mw_fixed <- dictionary(list(Countries = c("United States", "Federal Republic of Germany"),
oceans = c("Atlantic Ocean", "Pacific Ocean"),
Institutions = c("federal government", "Supreme Court"),
team = c("Manchester United", "Arsenal")))
# does not work for multi-word dictionary keys
selectFeatures(toks, dict_mw_fixed, valuetype = "fixed", case_insensitive = FALSE)
selectFeatures(toks, dict_mw_fixed, valuetype = "fixed", case_insensitive = TRUE)
})
test_that("selection of tokens from single-word dictionaries works", {
dict_sw_fixed <- dictionary(list(Countries = c("States", "Germany"),
oceans = c("Atlantic", "Pacific"),
Institutions = c("government", "Court"),
team = c("Manchester", "Arsenal")))
# works ok for single word dictionary keys
selectFeatures(toks, dict_sw_fixed, valuetype = "fixed", case_insensitive = FALSE)
selectFeatures(toks, dict_sw_fixed, valuetype = "fixed", case_insensitive = TRUE)
})
test_that("multi-word dictionary behavior is not sensitive to the order of dictionary entries", {
txt <- c(d1 = "The United States is a country.",
d2 = "Arsenal v Manchester United, states the announcer.")
toks <- tokens(txt, removePunct = TRUE)
toks_old <- tokenize(txt, removePunct = TRUE)
dict1 <- dictionary(list(Countries = c("United States"),
team = c("Manchester United", "Arsenal")))
dict2 <- dictionary(list(team = c("Manchester United", "Arsenal"),
Countries = c("United States")))
expect_equal(
lapply(as.list(applyDictionary(toks, dictionary = dict1, valuetype = "fixed")), sort),
lapply(as.list(applyDictionary(toks, dictionary = dict2, valuetype = "fixed")), sort)
)
expect_equal(
lapply(as.list(applyDictionary(toks_old, dictionary = dict1, valuetype = "fixed",
case_insensitive = TRUE, concatenator = " ")), sort),
lapply(as.list(applyDictionary(toks_old, dictionary = dict2, valuetype = "fixed",
case_insensitive = TRUE, concatenator = " ")), sort)
)
})
test_that("tokenizedTexts and tokens behave the same", {
txt <- c(d1 = "The United States is bordered by the Atlantic Ocean and the Pacific Ocean.",
d2 = "The Supreme Court of the United States is seldom in a united state.",
d3 = "It's Arsenal versus Manchester United, states the announcer.",
d4 = "We need Manchester Unity in the Federal Republic of Germany today.",
d5 = "United statehood is a good state.",
d6 = "luv the united states XXOO!")
toks <- tokenize(txt, removePunct = TRUE)
toks_hashed <- tokens(txt, removePunct = TRUE)
dict_mw_fixed <- dictionary(list(Countries = c("United States", "Federal Republic of Germany"),
oceans = c("Atlantic Ocean", "Pacific Ocean"),
Institutions = c("federal government", "Supreme Court"),
team = c("Manchester United", "Arsenal")))
expect_equal(
as.tokenizedTexts(applyDictionary(toks_hashed, dict_mw_fixed,
valuetype = "fixed", case_insensitive = TRUE)),
applyDictionary(toks, dictionary = dict_mw_fixed, valuetype = "fixed",
case_insensitive = TRUE, concatenator = " ")
)
})
test_that("classic and hashed applyDictionary produce equivalent objects", {
expect_equal(
applyDictionary(tokens("The United States is big."),
dictionary = dictionary(list(COUNTRY = "United States")),
valuetype = "fixed"),
as.tokens(applyDictionary(tokens("The United States is big.", hash = FALSE),
dictionary = dictionary(list(COUNTRY = "United States")),
valuetype = "fixed"))
)
})
test_that("classic and hashed applyDictionary produce same results", {
# with inaugural texts
toks <- tokenize(inaugTexts)
toksh <- tokens(inaugTexts)
dict <- dictionary(list(institutions = c("Supreme Court", "federal government",
"House of Representatives"),
countries = c("United States", "Soviet Union"),
tax = c("income tax", "property tax", "sales tax")))
expect_equal(dfm(applyDictionary(toks, dict, valuetype = "fixed"), verbose = FALSE),
dfm(applyDictionary(toksh, dict, valuetype = "fixed"), verbose = FALSE))
# microbenchmark::microbenchmark(
# classic = applyDictionary(toks, dict, valuetype = "fixed"),
# hashed = applyDictionary(toksh, dict, valuetype = "fixed"),
# unit = "relative", times = 30
# )
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crp_related_functions.R
\name{alpha_estimator}
\alias{alpha_estimator}
\title{alpha_estimator}
\usage{
alpha_estimator(crp, alpha_lim, nbre_alpha = 10000)
}
\arguments{
\item{crp}{a numerical vector, the output of \code{\link{rcrp}}.}
\item{alpha_lim}{a two-vector, the range of the potential values for the estimate.}
\item{nbre_alpha}{an interger, the number of the potential values for the estimate.}
}
\value{
A list containing
\describe{
\item{alpha_hat}{the ML estimate,}
\item{elln_alpha}{a useful function to derive the estimate.}
}
}
\description{
Compute the Maximum Likelihood estimate of \code{alpha} from a
\code{crp}.
}
\examples{
crp <- rcrp(5,15)
print(crp)
alpha_ML <- alpha_estimator(crp,c(0.1,10))
alpha_ML$alpha_hat
plot(seq(0.1,10,le=1e4),alpha_ML$elln_alpha,type="l",xlab="",ylab="")
abline(h=length(crp),col="gray")
abline(v=alpha_ML$alpha_hat,col="gray")
}
|
/man/alpha_estimator.Rd
|
no_license
|
pmgrollemund/phyloCRP
|
R
| false
| true
| 962
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crp_related_functions.R
\name{alpha_estimator}
\alias{alpha_estimator}
\title{alpha_estimator}
\usage{
alpha_estimator(crp, alpha_lim, nbre_alpha = 10000)
}
\arguments{
\item{crp}{a numerical vector, the output of \code{\link{rcrp}}.}
\item{alpha_lim}{a two-vector, the range of the potential values for the estimate.}
\item{nbre_alpha}{an interger, the number of the potential values for the estimate.}
}
\value{
A list containing
\describe{
\item{alpha_hat}{the ML estimate,}
\item{elln_alpha}{a useful function to derive the estimate.}
}
}
\description{
Compute the Maximum Likelihood estimate of \code{alpha} from a
\code{crp}.
}
\examples{
crp <- rcrp(5,15)
print(crp)
alpha_ML <- alpha_estimator(crp,c(0.1,10))
alpha_ML$alpha_hat
plot(seq(0.1,10,le=1e4),alpha_ML$elln_alpha,type="l",xlab="",ylab="")
abline(h=length(crp),col="gray")
abline(v=alpha_ML$alpha_hat,col="gray")
}
|
# author: Alex Rayón
# date: Octubre, 2020
# Antes de nada, limpiamos el workspace, por si hubiera algún dataset o información cargada
rm(list = ls())
# Cambiar el directorio de trabajo
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
getwd()
# Vamos a cargar las librerías necesarias
library(dplyr)
library(caret)
library(funModeling)
# Leemos el dataset de clientes de una empresa de telecomunicaciones
estancias <- read.csv("data/duracionEstancia.csv")
# Vamos a hacer un poco de exploración de datos
str(estancias)
#############################################################################################
# ESTANCIAS
#############################################################################################
# Aquí explicación de todas las variables
# https://revolution-computing.typepad.com/.a/6a010534b1db25970b01b8d280d87b970c-pi
# Aquí más información: https://blog.revolutionanalytics.com/2017/05/hospital-length-of-stay.html
#############################################################################################
# 1. Identificador único de la admisión en el hospital
colnames(estancias)[1]<-"idAdmision"
estancias$idAdmision<-as.factor(estancias$idAdmision)
# 2. Fecha de visita
colnames(estancias)[2]<-"fechavisita"
estancias$fechavisita<-as.factor(estancias$fechavisita)
# 3. Número de readmisiones últimos 180 días
colnames(estancias)[3]<-"numReadmisiones"
estancias$numReadmisiones<-as.numeric(estancias$numReadmisiones)
# 4. Género
colnames(estancias)[4]<-"genero"
estancias$genero<-ifelse(estancias$genero=="M",0,1)
estancias$genero<-as.factor(estancias$genero)
# 5. Flag de enfermedad renal
colnames(estancias)[5]<-"renal"
estancias$renal<-as.factor(estancias$renal)
# 6. Flag de asma
colnames(estancias)[6]<-"asma"
estancias$asma<-as.factor(estancias$asma)
# 7. Flag de falta de hierro
colnames(estancias)[7]<-"faltaHierro"
estancias$faltaHierro<-as.factor(estancias$faltaHierro)
# 8. Flag de pneumonia
colnames(estancias)[8]<-"pneumonia"
estancias$pneumonia<-as.factor(estancias$pneumonia)
# 9. Flag de dependencia de sustancias
colnames(estancias)[9]<-"dependenciaSustancias"
estancias$dependenciaSustancias<-as.factor(estancias$dependenciaSustancias)
# 10. Flag de desorden psicológico
colnames(estancias)[10]<-"desordenPsicologico"
estancias$desordenPsicologico<-as.factor(estancias$desordenPsicologico)
# 11. Flag de depresión
colnames(estancias)[11]<-"depresion"
estancias$depresion<-as.factor(estancias$depresion)
# 12. Flag de otros desórdenes psiquicos
colnames(estancias)[12]<-"otrosDesordenesPsiquicos"
estancias$otrosDesordenesPsiquicos<-as.factor(estancias$otrosDesordenesPsiquicos)
# 13. Flag de fibrosis
colnames(estancias)[13]<-"fibrosis"
estancias$fibrosis<-as.factor(estancias$fibrosis)
# 14. Flag de malnutrición
colnames(estancias)[14]<-"malnutricion"
estancias$malnutricion<-as.factor(estancias$malnutricion)
# 15. Flag de desórdenes en la sangre
colnames(estancias)[15]<-"desordenSangre"
estancias$desordenSangre<-as.factor(estancias$desordenSangre)
# 16. Valor hematocrito (g/dL)
colnames(estancias)[16]<-"hematocrito"
estancias$hematocrito<-as.numeric(estancias$hematocrito)
# 17. Valor neutrófilos (células/microL)
colnames(estancias)[17]<-"neutrofilos"
estancias$neutrofilos<-as.numeric(estancias$neutrofilos)
# 18. Valor sodio (mmol/L)
colnames(estancias)[18]<-"sodio"
estancias$sodio<-as.numeric(estancias$sodio)
# 19. Valor glucosa (mmol/L)
colnames(estancias)[19]<-"glucosa"
estancias$glucosa<-as.numeric(estancias$glucosa)
# 20. Valor nitrógeno urea sangre (mg/dL)
colnames(estancias)[20]<-"ureaSangre"
estancias$ureaSangre<-as.numeric(estancias$ureaSangre)
# 21. Valor creatinina (mg/dL)
colnames(estancias)[21]<-"creatinina"
estancias$creatinina<-as.numeric(estancias$creatinina)
# 22. Valor BMI (kg/m2)
colnames(estancias)[22]<-"bmi"
estancias$bmi<-as.numeric(estancias$bmi)
# 23. Valor pulso (pulsaciones/minuto)
colnames(estancias)[23]<-"pulso"
estancias$pulso<-as.numeric(estancias$pulso)
# 24. Valor respiración (respiraciones/minuto)
colnames(estancias)[24]<-"respiracion"
estancias$respiracion<-as.numeric(estancias$respiracion)
# 25. Flag de diagnóstico secundario
colnames(estancias)[25]<-"diagnosticoSecundario"
estancias$diagnosticoSecundario<-as.factor(estancias$diagnosticoSecundario)
# 26. Fecha de alta
colnames(estancias)[26]<-"fechaAlta"
estancias$fechaAlta<-as.character(estancias$fechaAlta)
# 27. Número de readmisiones últimos 180 días
colnames(estancias)[27]<-"idHospital"
estancias$idHospital<-as.factor(estancias$idHospital)
# 28. Duración de la estancia
colnames(estancias)[28]<-"duracionEstancia"
estancias$duracionEstancia<-as.numeric(estancias$duracionEstancia)
# Vamos a cuidar todos los aspectos de calidad de datos
# 1. Perfilamos la estructura del dataset
df_status(estancias)
# Algunas de las métricas que obtenemos:
# q_zeros: cantidad de ceros (p_zeros: en porcentaje)
# q_inf: cantidad de valores infinitos (p_inf: en porcentaje)
# q_na: cantidad de NA (p_na: en porcentaje)
# type: factor o numérico
# unique: cantidad de valores únicos
# ¿Por qué estas métricas son importantes?
# - Ceros: Las variables con muchos ceros no serán muy útiles para los modelos.
# Pueden incluso llegar a sesgar mucho el modelo.
# - NAs: Hay modelos que incluso excluyen filas que tengan NA (RF por ejemplo).
# Por ello, los modelos finales pueden tener sesgos derivados de que falten filas.
# - Inf.: Si dejamos los valores infinitos, va a haber funciones de R que no sepamos siquiera cómo trabajan. No genera coherencia.
# - Tipo: Hay que estudiarlos bien, porque no siempre vienen con el formato adecuado, pese a que visualmente lo veamos.
# - Único: Cuando tenemos mucha variedad en los diferentes valores de datos, podemos sufrir overfitting.
# Perfilamos los datos de entrada y obtenemos la tabla de estado
datos_status=df_status(estancias, print_results = F)
# Quitamos variables que tengan más de un 60% de valores a cero
# Gestión de valores únicos
# Currency es constante, no nos aportará nada. ¿Por qué?
vars_to_remove=filter(datos_status, p_na > 60 | unique==1 | p_inf > 60) %>% .$variable
vars_to_remove
# Dejamos todas las columnas salvo aquellas que estén en el vector que crea df_status 'vars_to_remove'
estancias=dplyr::select(estancias, -one_of(vars_to_remove))
# Vamos a hacer una gestión de outliers para limpiar los datos: ¿qué decisión tomamos en este caso?
estancias$outlier=FALSE
for (i in 1:ncol(estancias)-1){
columna = estancias[,i]
if (is.numeric(columna)){
media = mean(columna)
desviacion = sd(columna)
estancias$outlier = (estancias$outlier | columna>(media+3*desviacion) | columna<(media-3*desviacion))
}
}
# Marcamos los TRUE y FALSE
table(estancias$outlier)
# Separamos el dataframe donde tenemos los outliers... creo que merecen un buen estudio
datosOutliers = estancias[estancias$outlier,]
# Marcamos los outliers en el gr?fico, los eliminamos y dibujamos
estancias=estancias[!estancias$outlier,]
# Y ya no necesitamos que haya una columna "outlier"
estancias$outlier=NULL
#############################################################################################
#############################################################################################
# MODELADO
#############################################################################################
#############################################################################################
#############################################################################################
# 2. MODELADO - PREDICTIVO
#############################################################################################
# Vamos a quitar alguna variable para el entrenamiento: ¿por qué?
variables_a_quitar <- c("idAdmision", "idHospital", "fechaAlta","fechavisita")
estancias <- dplyr::select(estancias, -one_of(variables_a_quitar))
# Para poder hacer frente a este problema, lo que hacemos es dividir unos datos para entrenamiento y otros
# para evaluar el modelo
# El paquete de R "caret" nos ayuda en eso.
set.seed(998)
indice <- createDataPartition(estancias$duracionEstancia,
p = .75,
list = FALSE)
training <- estancias[indice,]
testing <- estancias[ - indice,]
# Me voy a construir un dataframe para evaluar qué tal ha aprendido a predecir mi modelo
verificacionPredicciones <- subset(testing, select = c(duracionEstancia))
# 1. Entrenamos el primer modelo
parametrosEntrenamiento <- trainControl(## 10-fold CV
method = "repeatedcv",
number = 10,
repeats = 10)
# (1) Regresión lineal: LM
modelo_lm1 <- train(duracionEstancia ~ .,
data = training,
method = "lm",
trControl = parametrosEntrenamiento)
# Vamos a ver un resumen del modelo
summary(modelo_lm1)
# Vamos a ver su RMSE
modelo_lm1
# Lo representamos
verificacionPredicciones$modelo_lm1 <- predict(modelo_lm1,testing)
# A partir de aquí podemos tener en algunos casos problemas de tipos de da
#### CART (Classification And Regression Trees) ####
set.seed(400)
ctrl <- trainControl(method="repeatedcv",repeats = 3)
modelo_arbol <- train(duracionEstancia~ .,
data = training,
method = "rpart",
trControl = ctrl,
tuneLength = 20,
metric = "RMSE")
# Vemos nuevamente los resultados
modelo_arbol
plot(modelo_arbol)
verificacionPredicciones$modelo_arbol <- predict(modelo_arbol,testing)
# Comprobamos la matriz de verificación
|
/SC1_hospital.R
|
no_license
|
SergioMateosSanz/Rexamples
|
R
| false
| false
| 9,653
|
r
|
# author: Alex Rayón
# date: Octubre, 2020
# Antes de nada, limpiamos el workspace, por si hubiera algún dataset o información cargada
rm(list = ls())
# Cambiar el directorio de trabajo
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
getwd()
# Vamos a cargar las librerías necesarias
library(dplyr)
library(caret)
library(funModeling)
# Leemos el dataset de clientes de una empresa de telecomunicaciones
estancias <- read.csv("data/duracionEstancia.csv")
# Vamos a hacer un poco de exploración de datos
str(estancias)
#############################################################################################
# ESTANCIAS
#############################################################################################
# Aquí explicación de todas las variables
# https://revolution-computing.typepad.com/.a/6a010534b1db25970b01b8d280d87b970c-pi
# Aquí más información: https://blog.revolutionanalytics.com/2017/05/hospital-length-of-stay.html
#############################################################################################
# 1. Identificador único de la admisión en el hospital
colnames(estancias)[1]<-"idAdmision"
estancias$idAdmision<-as.factor(estancias$idAdmision)
# 2. Fecha de visita
colnames(estancias)[2]<-"fechavisita"
estancias$fechavisita<-as.factor(estancias$fechavisita)
# 3. Número de readmisiones últimos 180 días
colnames(estancias)[3]<-"numReadmisiones"
estancias$numReadmisiones<-as.numeric(estancias$numReadmisiones)
# 4. Género
colnames(estancias)[4]<-"genero"
estancias$genero<-ifelse(estancias$genero=="M",0,1)
estancias$genero<-as.factor(estancias$genero)
# 5. Flag de enfermedad renal
colnames(estancias)[5]<-"renal"
estancias$renal<-as.factor(estancias$renal)
# 6. Flag de asma
colnames(estancias)[6]<-"asma"
estancias$asma<-as.factor(estancias$asma)
# 7. Flag de falta de hierro
colnames(estancias)[7]<-"faltaHierro"
estancias$faltaHierro<-as.factor(estancias$faltaHierro)
# 8. Flag de pneumonia
colnames(estancias)[8]<-"pneumonia"
estancias$pneumonia<-as.factor(estancias$pneumonia)
# 9. Flag de dependencia de sustancias
colnames(estancias)[9]<-"dependenciaSustancias"
estancias$dependenciaSustancias<-as.factor(estancias$dependenciaSustancias)
# 10. Flag de desorden psicológico
colnames(estancias)[10]<-"desordenPsicologico"
estancias$desordenPsicologico<-as.factor(estancias$desordenPsicologico)
# 11. Flag de depresión
colnames(estancias)[11]<-"depresion"
estancias$depresion<-as.factor(estancias$depresion)
# 12. Flag de otros desórdenes psiquicos
colnames(estancias)[12]<-"otrosDesordenesPsiquicos"
estancias$otrosDesordenesPsiquicos<-as.factor(estancias$otrosDesordenesPsiquicos)
# 13. Flag de fibrosis
colnames(estancias)[13]<-"fibrosis"
estancias$fibrosis<-as.factor(estancias$fibrosis)
# 14. Flag de malnutrición
colnames(estancias)[14]<-"malnutricion"
estancias$malnutricion<-as.factor(estancias$malnutricion)
# 15. Flag de desórdenes en la sangre
colnames(estancias)[15]<-"desordenSangre"
estancias$desordenSangre<-as.factor(estancias$desordenSangre)
# 16. Valor hematocrito (g/dL)
colnames(estancias)[16]<-"hematocrito"
estancias$hematocrito<-as.numeric(estancias$hematocrito)
# 17. Valor neutrófilos (células/microL)
colnames(estancias)[17]<-"neutrofilos"
estancias$neutrofilos<-as.numeric(estancias$neutrofilos)
# 18. Valor sodio (mmol/L)
colnames(estancias)[18]<-"sodio"
estancias$sodio<-as.numeric(estancias$sodio)
# 19. Valor glucosa (mmol/L)
colnames(estancias)[19]<-"glucosa"
estancias$glucosa<-as.numeric(estancias$glucosa)
# 20. Valor nitrógeno urea sangre (mg/dL)
colnames(estancias)[20]<-"ureaSangre"
estancias$ureaSangre<-as.numeric(estancias$ureaSangre)
# 21. Valor creatinina (mg/dL)
colnames(estancias)[21]<-"creatinina"
estancias$creatinina<-as.numeric(estancias$creatinina)
# 22. Valor BMI (kg/m2)
colnames(estancias)[22]<-"bmi"
estancias$bmi<-as.numeric(estancias$bmi)
# 23. Valor pulso (pulsaciones/minuto)
colnames(estancias)[23]<-"pulso"
estancias$pulso<-as.numeric(estancias$pulso)
# 24. Valor respiración (respiraciones/minuto)
colnames(estancias)[24]<-"respiracion"
estancias$respiracion<-as.numeric(estancias$respiracion)
# 25. Flag de diagnóstico secundario
colnames(estancias)[25]<-"diagnosticoSecundario"
estancias$diagnosticoSecundario<-as.factor(estancias$diagnosticoSecundario)
# 26. Fecha de alta
colnames(estancias)[26]<-"fechaAlta"
estancias$fechaAlta<-as.character(estancias$fechaAlta)
# 27. Número de readmisiones últimos 180 días
colnames(estancias)[27]<-"idHospital"
estancias$idHospital<-as.factor(estancias$idHospital)
# 28. Duración de la estancia
colnames(estancias)[28]<-"duracionEstancia"
estancias$duracionEstancia<-as.numeric(estancias$duracionEstancia)
# Vamos a cuidar todos los aspectos de calidad de datos
# 1. Perfilamos la estructura del dataset
df_status(estancias)
# Algunas de las métricas que obtenemos:
# q_zeros: cantidad de ceros (p_zeros: en porcentaje)
# q_inf: cantidad de valores infinitos (p_inf: en porcentaje)
# q_na: cantidad de NA (p_na: en porcentaje)
# type: factor o numérico
# unique: cantidad de valores únicos
# ¿Por qué estas métricas son importantes?
# - Ceros: Las variables con muchos ceros no serán muy útiles para los modelos.
# Pueden incluso llegar a sesgar mucho el modelo.
# - NAs: Hay modelos que incluso excluyen filas que tengan NA (RF por ejemplo).
# Por ello, los modelos finales pueden tener sesgos derivados de que falten filas.
# - Inf.: Si dejamos los valores infinitos, va a haber funciones de R que no sepamos siquiera cómo trabajan. No genera coherencia.
# - Tipo: Hay que estudiarlos bien, porque no siempre vienen con el formato adecuado, pese a que visualmente lo veamos.
# - Único: Cuando tenemos mucha variedad en los diferentes valores de datos, podemos sufrir overfitting.
# Perfilamos los datos de entrada y obtenemos la tabla de estado
datos_status=df_status(estancias, print_results = F)
# Quitamos variables que tengan más de un 60% de valores a cero
# Gestión de valores únicos
# Currency es constante, no nos aportará nada. ¿Por qué?
vars_to_remove=filter(datos_status, p_na > 60 | unique==1 | p_inf > 60) %>% .$variable
vars_to_remove
# Dejamos todas las columnas salvo aquellas que estén en el vector que crea df_status 'vars_to_remove'
estancias=dplyr::select(estancias, -one_of(vars_to_remove))
# Vamos a hacer una gestión de outliers para limpiar los datos: ¿qué decisión tomamos en este caso?
estancias$outlier=FALSE
for (i in 1:ncol(estancias)-1){
columna = estancias[,i]
if (is.numeric(columna)){
media = mean(columna)
desviacion = sd(columna)
estancias$outlier = (estancias$outlier | columna>(media+3*desviacion) | columna<(media-3*desviacion))
}
}
# Marcamos los TRUE y FALSE
table(estancias$outlier)
# Separamos el dataframe donde tenemos los outliers... creo que merecen un buen estudio
datosOutliers = estancias[estancias$outlier,]
# Marcamos los outliers en el gr?fico, los eliminamos y dibujamos
estancias=estancias[!estancias$outlier,]
# Y ya no necesitamos que haya una columna "outlier"
estancias$outlier=NULL
#############################################################################################
#############################################################################################
# MODELADO
#############################################################################################
#############################################################################################
#############################################################################################
# 2. MODELADO - PREDICTIVO
#############################################################################################
# Vamos a quitar alguna variable para el entrenamiento: ¿por qué?
variables_a_quitar <- c("idAdmision", "idHospital", "fechaAlta","fechavisita")
estancias <- dplyr::select(estancias, -one_of(variables_a_quitar))
# Para poder hacer frente a este problema, lo que hacemos es dividir unos datos para entrenamiento y otros
# para evaluar el modelo
# El paquete de R "caret" nos ayuda en eso.
set.seed(998)
indice <- createDataPartition(estancias$duracionEstancia,
p = .75,
list = FALSE)
training <- estancias[indice,]
testing <- estancias[ - indice,]
# Me voy a construir un dataframe para evaluar qué tal ha aprendido a predecir mi modelo
verificacionPredicciones <- subset(testing, select = c(duracionEstancia))
# 1. Entrenamos el primer modelo
parametrosEntrenamiento <- trainControl(## 10-fold CV
method = "repeatedcv",
number = 10,
repeats = 10)
# (1) Regresión lineal: LM
modelo_lm1 <- train(duracionEstancia ~ .,
data = training,
method = "lm",
trControl = parametrosEntrenamiento)
# Vamos a ver un resumen del modelo
summary(modelo_lm1)
# Vamos a ver su RMSE
modelo_lm1
# Lo representamos
verificacionPredicciones$modelo_lm1 <- predict(modelo_lm1,testing)
# A partir de aquí podemos tener en algunos casos problemas de tipos de da
#### CART (Classification And Regression Trees) ####
set.seed(400)
ctrl <- trainControl(method="repeatedcv",repeats = 3)
modelo_arbol <- train(duracionEstancia~ .,
data = training,
method = "rpart",
trControl = ctrl,
tuneLength = 20,
metric = "RMSE")
# Vemos nuevamente los resultados
modelo_arbol
plot(modelo_arbol)
verificacionPredicciones$modelo_arbol <- predict(modelo_arbol,testing)
# Comprobamos la matriz de verificación
|
dist.gen <- function(x,method="euclidean", ...){
if ( method == "spearman" ) 1 - cor(t(x),method=method,...)
else if ( method == "pearson" ) 1 - pcor(t(x))
else if ( method == "logpearson" ) 1 - pcor(log2(t(x)))
else as.matrix(dist(x,method=method,...))
}
plot.err.bars.x <- function(x, y, x.err, col="black", lwd=1, lty=1, h=0.1){
arrows(x-x.err,y,x+x.err,y,code=0, col=col, lwd=lwd, lty=lty)
arrows(x-x.err,y-h,x-x.err,y+h,code=0, col=col, lwd=lwd, lty=lty)
arrows(x+x.err,y-h,x+x.err,y+h,code=0, col=col, lwd=lwd, lty=lty)
}
plot.err.bars.y <- function(x, y, y.err, col="black", lwd=1, lty=1, h=0.1){
arrows(x,y-y.err,x,y+y.err,code=0, col=col, lwd=lwd, lty=lty)
arrows(x-h,y-y.err,x+h,y-y.err,code=0, col=col, lwd=lwd, lty=lty)
arrows(x-h,y+y.err,x+h,y+y.err,code=0, col=col, lwd=lwd, lty=lty)
}
clusGapExt <-function (x, FUNcluster, K.max, B = 100, verbose = interactive(), method="euclidean",random=TRUE,diss=FALSE,
...)
{
stopifnot(is.function(FUNcluster), length(dim(x)) == 2, K.max >=
2, (n <- nrow(x)) >= 1, (p <- ncol(x)) >= 1)
if (B != (B. <- as.integer(B)) || (B <- B.) <= 0)
stop("'B' has to be a positive integer")
if (is.data.frame(x))
x <- as.matrix(x)
ii <- seq_len(n)
W.k <- function(X, kk) {
clus <- if (kk > 1)
FUNcluster(X, kk, ...)$cluster
else rep.int(1L, nrow(X))
0.5 * sum(vapply(split(ii, clus), function(I) {
if ( diss ){
xs <- X[I,I, drop = FALSE]
sum(xs/nrow(xs))
}else{
xs <- X[I, , drop = FALSE]
d <- dist.gen(xs,method=method)
sum(d/nrow(xs))
}
}, 0))
}
logW <- E.logW <- SE.sim <- numeric(K.max)
if (verbose)
cat("Clustering k = 1,2,..., K.max (= ", K.max, "): .. \n",
sep = "")
for (k in 1:K.max){
if (verbose) cat("k =",k,"\r")
logW[k] <- log(W.k(x, k))
}
if (verbose){
cat("\n")
cat("done.\n")
}
if (random){
xs <- scale(x, center = TRUE, scale = FALSE)
m.x <- rep(attr(xs, "scaled:center"), each = n)
V.sx <- svd(xs)$v
rng.x1 <- apply(xs %*% V.sx, 2, range)
logWks <- matrix(0, B, K.max)
if (verbose)
cat("Bootstrapping, b = 1,2,..., B (= ", B, ") [one \".\" per sample]:\n",
sep = "")
for (b in 1:B) {
z1 <- apply(rng.x1, 2, function(M, nn) runif(nn, min = M[1],
max = M[2]), nn = n)
z <- tcrossprod(z1, V.sx) + m.x
##z <- apply(x,2,function(m) runif(length(m),min=min(m),max=max(m)))
##z <- apply(x,2,function(m) sample(m))
for (k in 1:K.max) {
logWks[b, k] <- log(W.k(z, k))
}
if (verbose)
cat(".", if (b%%50 == 0)
paste(b, "\n"))
}
if (verbose && (B%%50 != 0))
cat("", B, "\n")
E.logW <- colMeans(logWks)
SE.sim <- sqrt((1 + 1/B) * apply(logWks, 2, var))
}else{
E.logW <- rep(NA,K.max)
SE.sim <- rep(NA,K.max)
}
structure(class = "clusGap", list(Tab = cbind(logW, E.logW,
gap = E.logW - logW, SE.sim), n = n, B = B, FUNcluster = FUNcluster))
}
clustfun <- function(diM,clustnr=20,bootnr=50,samp=NULL,sat=TRUE,cln=NULL,rseed=17000,FUNcluster="kmedoids")
{
if ( clustnr < 2) stop("Choose clustnr > 1")
if ( is.null(cln) ) cln <- 0
if ( nrow(diM) - 1 < clustnr ) clustnr <- nrow(diM) - 1
if ( sat | cln > 0 ){
gpr <- NULL
f <- if ( cln == 0 ) TRUE else FALSE
if ( sat ){
set.seed(rseed)
if ( !is.null(samp) ) n <- sample(1:ncol(diM),min(samp,ncol(diM))) else n <- 1:ncol(diM)
if ( FUNcluster =="kmedoids" ) gpr <- clusGapExt(diM[n,n], FUNcluster = function(x,k) cluster::pam(as.dist(x),k), K.max = clustnr, random=FALSE, diss=TRUE)
if ( FUNcluster =="kmeans" ) gpr <- clusGapExt(diM[n,n], FUNcluster = kmeans, K.max = clustnr, random=FALSE, diss=TRUE, iter.max=100)
if ( FUNcluster =="hclust" ) gpr <- clusGapExt(diM[n,n], FUNcluster = function(x,k){ y <- fpc::disthclustCBI(as.dist(x),k,link="single",scaling=FALSE,method="ward.D2"); y$cluster <- y$partition; y }, K.max = clustnr, random=FALSE, diss=TRUE)
g <- gpr$Tab[,1]
y <- g[-length(g)] - g[-1]
mm <- numeric(length(y))
nn <- numeric(length(y))
for ( i in 1:length(y)){
mm[i] <- mean(y[i:length(y)])
nn[i] <- sqrt(var(y[i:length(y)]))
}
if ( f ) cln <- max(min(which( y - (mm + nn) < 0 )),1)
}
if ( cln <= 1 ) {
clb <- list(result=list(partition=rep(1,ncol(diM))),bootmean=1)
names(clb$result$partition) <- colnames(diM)
return(list(clb=clb,gpr=gpr))
}
if ( FUNcluster =="kmedoids" ){
if ( is.null(samp) ) clb <- fpc::clusterboot(diM,B=bootnr,bootmethod="boot",clustermethod=pamkdCBI,scaling=FALSE,distances=TRUE,k=cln,multipleboot=FALSE,bscompare=TRUE,seed=rseed)
if ( !is.null(samp) ) clb <- fpc::clusterboot(diM,B=bootnr,bootmethod="subset",subtuning=min(ncol(diM),samp),clustermethod=pamkdCBI,scaling=FALSE,distances=TRUE,k=cln,multipleboot=FALSE,bscompare=TRUE,seed=rseed)
}
if ( FUNcluster =="kmeans" ){
if ( is.null(samp) ) clb <- fpc::clusterboot(diM,B=bootnr,distances=TRUE,bootmethod="boot",clustermethod=fpc::kmeansCBI,krange=cln,scaling=FALSE,multipleboot=FALSE,bscompare=TRUE,seed=rseed)
if ( !is.null(samp) ) clb <- fpc::clusterboot(diM,B=bootnr,distances=TRUE,bootmethod="subset",subtuning=min(ncol(diM),samp),clustermethod=fpc::kmeansCBI,krange=cln,scaling=FALSE,multipleboot=FALSE,bscompare=TRUE,seed=rseed)
}
if ( FUNcluster =="hclust" ){
if ( is.null(samp) ) clb <- fpc::clusterboot(diM,B=bootnr,distances=TRUE,bootmethod="boot",clustermethod=fpc::disthclustCBI,method="ward.D2",k=cln,link="single",scaling=FALSE,multipleboot=FALSE,bscompare=TRUE,seed=rseed)
if ( !is.null(samp) ) clb <- fpc::clusterboot(diM,B=bootnr,distances=TRUE,bootmethod="subset",subtuning=min(ncol(diM),samp),clustermethod=fpc::disthclustCBI,method="ward.D2",k=cln,link="single",scaling=FALSE,multipleboot=FALSE,bscompare=TRUE,seed=rseed)
}
return(list(clb=clb,gpr=gpr))
}
}
fitbackground <- function(x,mthr=-1){
m <- apply(x,1,mean)
v <- apply(x,1,var )
ml <- log2(m)
vl <- log2(v)
f <- ml > -Inf & vl > -Inf
ml <- ml[f]
vl <- vl[f]
mm <- -8
repeat{
fit <- lm(vl ~ ml + I(ml^2))
if( coef(fit)[3] >= 0 | mm >= mthr){
break
}
mm <- mm + .5
f <- ml > mm
ml <- ml[f]
vl <- vl[f]
}
vln <- log2(v) - log2(sapply(m,FUN=uvar,fit=fit))
n <- names(vln)[vln>0]
return(list(fit=fit,n=n))
}
#fitbackground <- function(x){
#
# n <- apply(x,2,sum)
#
# ll <- quantile(n,.4)
# ul <- quantile(n,.6)
# f <- n > ll & n < ul
# m <- apply(x[,f],1,mean)
# v <- apply(x[,f],1,var)
# f <- m > 0
# lm <- log2(m)[f]
# lv <- log2(v)[f]
# fit <- lm(lv ~ lm + I(lm^2))
#
# vln <- log2(v) - log2(sapply(m,FUN=uvar,fit=fit))
# n <- names(vln)[f & vln>0]
# return(list(fit=fit,n=n))
#}
lvar <- function(x,fit) 2**(coef(fit)[1] + log2(x)*coef(fit)[2] + coef(fit)[3] * log2(x)**2)
lsize <- function(x,lvar,fit) x**2/(max(x + 1e-6,lvar(x,fit)) - x)
uvar <- function(x,fit){
err <- coef(summary(fit))[, "Std. Error"]
2**(coef(fit)[1] + err[1] + log2(x)*(coef(fit)[2] + err[2]) + (coef(fit)[3] + err[3]) * log2(x)**2)
}
pamk <- function (data, krange = 2:10, criterion = "asw", usepam = TRUE,
scaling = FALSE, alpha = 0.001, diss = inherits(data, "dist"),
critout = FALSE, ns = 10, seed = NULL, ...)
{
ddata <- as.matrix(data)
if (!identical(scaling, FALSE))
sdata <- scale(ddata, scale = scaling)
else sdata <- ddata
cluster1 <- 1 %in% krange
critval <- numeric(max(krange))
pams <- list()
for (k in krange) {
if (usepam)
pams[[k]] <- cluster::pam(as.dist(sdata), k, diss = TRUE)
else pams[[k]] <- cluster::clara(as.dist(sdata), k, diss = TRUE)
if (k != 1)
critval[k] <- switch(criterion, asw = pams[[k]]$silinfo$avg.width,
multiasw = fpc::distcritmulti(sdata, pams[[k]]$clustering,
seed = seed, ns = ns)$crit.overall, ch = ifelse(diss,
fpc::cluster.stats(sdata, pams[[k]]$clustering)$ch,
fpc::calinhara(sdata, pams[[k]]$clustering)))
if (critout)
cat(k, " clusters ", critval[k], "\n")
}
k.best <- if ( length(krange) == 1 ) krange else (1:max(krange))[which.max(critval)]
if (cluster1) {
if (diss)
cluster1 <- FALSE
else {
cxx <- fpc::dudahart2(sdata, pams[[2]]$clustering, alpha = alpha)
critval[1] <- cxx$p.value
cluster1 <- cxx$cluster1
}
}
if (cluster1)
k.best <- 1
out <- list(pamobject = pams[[k.best]], nc = k.best, crit = critval)
out
}
pamkdCBI <- function (data, krange = 2:10, k = NULL, criterion = "asw", usepam = TRUE,
scaling = TRUE, diss = inherits(data, "dist"), ...)
{
if (!is.null(k))
krange <- k
c1 <- pamk(as.dist(data), krange = krange, criterion = criterion,
usepam = usepam, scaling = scaling, diss = diss, ...)
partition <- c1$pamobject$clustering
cl <- list()
nc <- c1$nc
for (i in 1:nc) cl[[i]] <- partition == i
out <- list(result = c1, nc = nc, clusterlist = cl, partition = partition,
clustermethod = "pam/estimated k", criterion = criterion)
out
}
QP <- function(k,m,norm=TRUE){
Dmat <- t(m) %*% m
#Dmat <- 2 * t(m) %*% m
dvec <- t(k) %*% m
if ( norm ){
Amat <- cbind(rep(1,ncol(m)), diag(ncol(m)))
bvec <- c(1,rep(0,ncol(m)))
qp <- solve.QP(Dmat = Dmat, dvec = dvec, Amat = Amat, bvec = bvec, meq = 1)
}else{
Amat <- diag(ncol(m))
bvec <- rep(0,ncol(m))
qp <- solve.QP(Dmat = Dmat, dvec = dvec, Amat = Amat, bvec = bvec, meq = 0, factorized=FALSE)
}
return( list(w=qp$solution, fit=m %*% qp$solution, residual= sum((m %*% qp$solution - k)**2), qp=qp))
}
gm_mean = function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
zscore <- function(x) ( x - apply(x,1,mean) )/sqrt(apply(x,1,var))
|
/R/RaceID_utils.R
|
no_license
|
JING-Bio/RaceID3_StemID2_package
|
R
| false
| false
| 10,361
|
r
|
dist.gen <- function(x,method="euclidean", ...){
if ( method == "spearman" ) 1 - cor(t(x),method=method,...)
else if ( method == "pearson" ) 1 - pcor(t(x))
else if ( method == "logpearson" ) 1 - pcor(log2(t(x)))
else as.matrix(dist(x,method=method,...))
}
plot.err.bars.x <- function(x, y, x.err, col="black", lwd=1, lty=1, h=0.1){
arrows(x-x.err,y,x+x.err,y,code=0, col=col, lwd=lwd, lty=lty)
arrows(x-x.err,y-h,x-x.err,y+h,code=0, col=col, lwd=lwd, lty=lty)
arrows(x+x.err,y-h,x+x.err,y+h,code=0, col=col, lwd=lwd, lty=lty)
}
plot.err.bars.y <- function(x, y, y.err, col="black", lwd=1, lty=1, h=0.1){
arrows(x,y-y.err,x,y+y.err,code=0, col=col, lwd=lwd, lty=lty)
arrows(x-h,y-y.err,x+h,y-y.err,code=0, col=col, lwd=lwd, lty=lty)
arrows(x-h,y+y.err,x+h,y+y.err,code=0, col=col, lwd=lwd, lty=lty)
}
clusGapExt <-function (x, FUNcluster, K.max, B = 100, verbose = interactive(), method="euclidean",random=TRUE,diss=FALSE,
...)
{
stopifnot(is.function(FUNcluster), length(dim(x)) == 2, K.max >=
2, (n <- nrow(x)) >= 1, (p <- ncol(x)) >= 1)
if (B != (B. <- as.integer(B)) || (B <- B.) <= 0)
stop("'B' has to be a positive integer")
if (is.data.frame(x))
x <- as.matrix(x)
ii <- seq_len(n)
W.k <- function(X, kk) {
clus <- if (kk > 1)
FUNcluster(X, kk, ...)$cluster
else rep.int(1L, nrow(X))
0.5 * sum(vapply(split(ii, clus), function(I) {
if ( diss ){
xs <- X[I,I, drop = FALSE]
sum(xs/nrow(xs))
}else{
xs <- X[I, , drop = FALSE]
d <- dist.gen(xs,method=method)
sum(d/nrow(xs))
}
}, 0))
}
logW <- E.logW <- SE.sim <- numeric(K.max)
if (verbose)
cat("Clustering k = 1,2,..., K.max (= ", K.max, "): .. \n",
sep = "")
for (k in 1:K.max){
if (verbose) cat("k =",k,"\r")
logW[k] <- log(W.k(x, k))
}
if (verbose){
cat("\n")
cat("done.\n")
}
if (random){
xs <- scale(x, center = TRUE, scale = FALSE)
m.x <- rep(attr(xs, "scaled:center"), each = n)
V.sx <- svd(xs)$v
rng.x1 <- apply(xs %*% V.sx, 2, range)
logWks <- matrix(0, B, K.max)
if (verbose)
cat("Bootstrapping, b = 1,2,..., B (= ", B, ") [one \".\" per sample]:\n",
sep = "")
for (b in 1:B) {
z1 <- apply(rng.x1, 2, function(M, nn) runif(nn, min = M[1],
max = M[2]), nn = n)
z <- tcrossprod(z1, V.sx) + m.x
##z <- apply(x,2,function(m) runif(length(m),min=min(m),max=max(m)))
##z <- apply(x,2,function(m) sample(m))
for (k in 1:K.max) {
logWks[b, k] <- log(W.k(z, k))
}
if (verbose)
cat(".", if (b%%50 == 0)
paste(b, "\n"))
}
if (verbose && (B%%50 != 0))
cat("", B, "\n")
E.logW <- colMeans(logWks)
SE.sim <- sqrt((1 + 1/B) * apply(logWks, 2, var))
}else{
E.logW <- rep(NA,K.max)
SE.sim <- rep(NA,K.max)
}
structure(class = "clusGap", list(Tab = cbind(logW, E.logW,
gap = E.logW - logW, SE.sim), n = n, B = B, FUNcluster = FUNcluster))
}
clustfun <- function(diM,clustnr=20,bootnr=50,samp=NULL,sat=TRUE,cln=NULL,rseed=17000,FUNcluster="kmedoids")
{
if ( clustnr < 2) stop("Choose clustnr > 1")
if ( is.null(cln) ) cln <- 0
if ( nrow(diM) - 1 < clustnr ) clustnr <- nrow(diM) - 1
if ( sat | cln > 0 ){
gpr <- NULL
f <- if ( cln == 0 ) TRUE else FALSE
if ( sat ){
set.seed(rseed)
if ( !is.null(samp) ) n <- sample(1:ncol(diM),min(samp,ncol(diM))) else n <- 1:ncol(diM)
if ( FUNcluster =="kmedoids" ) gpr <- clusGapExt(diM[n,n], FUNcluster = function(x,k) cluster::pam(as.dist(x),k), K.max = clustnr, random=FALSE, diss=TRUE)
if ( FUNcluster =="kmeans" ) gpr <- clusGapExt(diM[n,n], FUNcluster = kmeans, K.max = clustnr, random=FALSE, diss=TRUE, iter.max=100)
if ( FUNcluster =="hclust" ) gpr <- clusGapExt(diM[n,n], FUNcluster = function(x,k){ y <- fpc::disthclustCBI(as.dist(x),k,link="single",scaling=FALSE,method="ward.D2"); y$cluster <- y$partition; y }, K.max = clustnr, random=FALSE, diss=TRUE)
g <- gpr$Tab[,1]
y <- g[-length(g)] - g[-1]
mm <- numeric(length(y))
nn <- numeric(length(y))
for ( i in 1:length(y)){
mm[i] <- mean(y[i:length(y)])
nn[i] <- sqrt(var(y[i:length(y)]))
}
if ( f ) cln <- max(min(which( y - (mm + nn) < 0 )),1)
}
if ( cln <= 1 ) {
clb <- list(result=list(partition=rep(1,ncol(diM))),bootmean=1)
names(clb$result$partition) <- colnames(diM)
return(list(clb=clb,gpr=gpr))
}
if ( FUNcluster =="kmedoids" ){
if ( is.null(samp) ) clb <- fpc::clusterboot(diM,B=bootnr,bootmethod="boot",clustermethod=pamkdCBI,scaling=FALSE,distances=TRUE,k=cln,multipleboot=FALSE,bscompare=TRUE,seed=rseed)
if ( !is.null(samp) ) clb <- fpc::clusterboot(diM,B=bootnr,bootmethod="subset",subtuning=min(ncol(diM),samp),clustermethod=pamkdCBI,scaling=FALSE,distances=TRUE,k=cln,multipleboot=FALSE,bscompare=TRUE,seed=rseed)
}
if ( FUNcluster =="kmeans" ){
if ( is.null(samp) ) clb <- fpc::clusterboot(diM,B=bootnr,distances=TRUE,bootmethod="boot",clustermethod=fpc::kmeansCBI,krange=cln,scaling=FALSE,multipleboot=FALSE,bscompare=TRUE,seed=rseed)
if ( !is.null(samp) ) clb <- fpc::clusterboot(diM,B=bootnr,distances=TRUE,bootmethod="subset",subtuning=min(ncol(diM),samp),clustermethod=fpc::kmeansCBI,krange=cln,scaling=FALSE,multipleboot=FALSE,bscompare=TRUE,seed=rseed)
}
if ( FUNcluster =="hclust" ){
if ( is.null(samp) ) clb <- fpc::clusterboot(diM,B=bootnr,distances=TRUE,bootmethod="boot",clustermethod=fpc::disthclustCBI,method="ward.D2",k=cln,link="single",scaling=FALSE,multipleboot=FALSE,bscompare=TRUE,seed=rseed)
if ( !is.null(samp) ) clb <- fpc::clusterboot(diM,B=bootnr,distances=TRUE,bootmethod="subset",subtuning=min(ncol(diM),samp),clustermethod=fpc::disthclustCBI,method="ward.D2",k=cln,link="single",scaling=FALSE,multipleboot=FALSE,bscompare=TRUE,seed=rseed)
}
return(list(clb=clb,gpr=gpr))
}
}
fitbackground <- function(x,mthr=-1){
m <- apply(x,1,mean)
v <- apply(x,1,var )
ml <- log2(m)
vl <- log2(v)
f <- ml > -Inf & vl > -Inf
ml <- ml[f]
vl <- vl[f]
mm <- -8
repeat{
fit <- lm(vl ~ ml + I(ml^2))
if( coef(fit)[3] >= 0 | mm >= mthr){
break
}
mm <- mm + .5
f <- ml > mm
ml <- ml[f]
vl <- vl[f]
}
vln <- log2(v) - log2(sapply(m,FUN=uvar,fit=fit))
n <- names(vln)[vln>0]
return(list(fit=fit,n=n))
}
#fitbackground <- function(x){
#
# n <- apply(x,2,sum)
#
# ll <- quantile(n,.4)
# ul <- quantile(n,.6)
# f <- n > ll & n < ul
# m <- apply(x[,f],1,mean)
# v <- apply(x[,f],1,var)
# f <- m > 0
# lm <- log2(m)[f]
# lv <- log2(v)[f]
# fit <- lm(lv ~ lm + I(lm^2))
#
# vln <- log2(v) - log2(sapply(m,FUN=uvar,fit=fit))
# n <- names(vln)[f & vln>0]
# return(list(fit=fit,n=n))
#}
lvar <- function(x,fit) 2**(coef(fit)[1] + log2(x)*coef(fit)[2] + coef(fit)[3] * log2(x)**2)
lsize <- function(x,lvar,fit) x**2/(max(x + 1e-6,lvar(x,fit)) - x)
uvar <- function(x,fit){
err <- coef(summary(fit))[, "Std. Error"]
2**(coef(fit)[1] + err[1] + log2(x)*(coef(fit)[2] + err[2]) + (coef(fit)[3] + err[3]) * log2(x)**2)
}
pamk <- function (data, krange = 2:10, criterion = "asw", usepam = TRUE,
scaling = FALSE, alpha = 0.001, diss = inherits(data, "dist"),
critout = FALSE, ns = 10, seed = NULL, ...)
{
ddata <- as.matrix(data)
if (!identical(scaling, FALSE))
sdata <- scale(ddata, scale = scaling)
else sdata <- ddata
cluster1 <- 1 %in% krange
critval <- numeric(max(krange))
pams <- list()
for (k in krange) {
if (usepam)
pams[[k]] <- cluster::pam(as.dist(sdata), k, diss = TRUE)
else pams[[k]] <- cluster::clara(as.dist(sdata), k, diss = TRUE)
if (k != 1)
critval[k] <- switch(criterion, asw = pams[[k]]$silinfo$avg.width,
multiasw = fpc::distcritmulti(sdata, pams[[k]]$clustering,
seed = seed, ns = ns)$crit.overall, ch = ifelse(diss,
fpc::cluster.stats(sdata, pams[[k]]$clustering)$ch,
fpc::calinhara(sdata, pams[[k]]$clustering)))
if (critout)
cat(k, " clusters ", critval[k], "\n")
}
k.best <- if ( length(krange) == 1 ) krange else (1:max(krange))[which.max(critval)]
if (cluster1) {
if (diss)
cluster1 <- FALSE
else {
cxx <- fpc::dudahart2(sdata, pams[[2]]$clustering, alpha = alpha)
critval[1] <- cxx$p.value
cluster1 <- cxx$cluster1
}
}
if (cluster1)
k.best <- 1
out <- list(pamobject = pams[[k.best]], nc = k.best, crit = critval)
out
}
pamkdCBI <- function (data, krange = 2:10, k = NULL, criterion = "asw", usepam = TRUE,
scaling = TRUE, diss = inherits(data, "dist"), ...)
{
if (!is.null(k))
krange <- k
c1 <- pamk(as.dist(data), krange = krange, criterion = criterion,
usepam = usepam, scaling = scaling, diss = diss, ...)
partition <- c1$pamobject$clustering
cl <- list()
nc <- c1$nc
for (i in 1:nc) cl[[i]] <- partition == i
out <- list(result = c1, nc = nc, clusterlist = cl, partition = partition,
clustermethod = "pam/estimated k", criterion = criterion)
out
}
QP <- function(k,m,norm=TRUE){
Dmat <- t(m) %*% m
#Dmat <- 2 * t(m) %*% m
dvec <- t(k) %*% m
if ( norm ){
Amat <- cbind(rep(1,ncol(m)), diag(ncol(m)))
bvec <- c(1,rep(0,ncol(m)))
qp <- solve.QP(Dmat = Dmat, dvec = dvec, Amat = Amat, bvec = bvec, meq = 1)
}else{
Amat <- diag(ncol(m))
bvec <- rep(0,ncol(m))
qp <- solve.QP(Dmat = Dmat, dvec = dvec, Amat = Amat, bvec = bvec, meq = 0, factorized=FALSE)
}
return( list(w=qp$solution, fit=m %*% qp$solution, residual= sum((m %*% qp$solution - k)**2), qp=qp))
}
gm_mean = function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
zscore <- function(x) ( x - apply(x,1,mean) )/sqrt(apply(x,1,var))
|
# First analysis of Perception of Prosody I
# created by gratton on 3rd June 2017
setwd("/Users/Chantal/Documents/Doctoral/Courses/Spring 2017/LIN 245/perception/data/")
library(languageR)
library(lme4)
library(tidyverse)
library(ggplot2)
df = read.csv("complete_trials_reduced.csv")
head(df)
summary(df)
nrow(df)
summary(df$topic)
# Subset with only critical trials
crit_df <- df[df$filler == "False",]
head(crit_df)
summary(crit_df)
nrow(crit_df)
# Re-factor data to remove empty labels
unique(crit_df$topic)
crit_df$topic <- factor(crit_df$topic)
unique(crit_df$topic)
crit_df$prosody <- factor(crit_df$prosody)
unique(crit_df$prosody)
summary(crit_df$topic)
# Number of trails for each topic/valence combination
table(crit_df$topic, crit_df$prosody)
# Number of trials for each valence/adjective valence combination
table(crit_df$prosody, crit_df$adj_1_pole)
# ------------------ EXPLORING AGREEMENT ------------------
## Distribution
table(crit_df$agree)
summary(crit_df$agree)
table(crit_df$agree, crit_df$prosody)
mean(crit_df$agree[crit_df$prosody=="affirmative"])
mean(crit_df$agree[crit_df$prosody=="contrastive"])
ggplot(crit_df, aes(x=agree)) +
geom_histogram(binwidth=.01) +
xlab("Perceived agreement (%)") +
ylab("Overall responses")
## Visualising the data
ggplot(crit_df, aes(x=prosody,y=agree,color=adj_1_pole)) +
geom_point(size=.5) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved agreement (%)", color = "Adjective polarity")
ggplot(crit_df, aes(x=prosody,y=agree)) +
geom_violin() +
geom_boxplot(alpha=.4,notch=T) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved agreement (%)", color = "Adjective polarity")
ggplot(crit_df, aes(x=prosody,y=agree,color=adj_1)) +
geom_violin() +
geom_boxplot(alpha=.4,notch=T) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved agreement (%)", color = "Adjective polarity")
#geom_point(data=agr,aes(y=MeanRT),color="orange",size=10)
ggsave(file="../graphs/agree_violin.png",width=5,height=5)
# agreement by prosody and worker
ggplot(crit_df, aes(x=as.factor(workerid),y=agree,color=adj_1_pole)) +
geom_boxplot(alpha=.4) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved agreement (%)", color = "Adjective polarity")
## Agreement models
m1_agree = lm(agree ~ prosody, data=crit_df, REML=F)
summary(m1_agree) ## prosodycontrastive Est = 0.02
m2_agree = lm(agree ~ prosody + adj_1_pole, data=crit_df)
summary(m2_agree)
m3_agree = lm(agree ~ prosody*adj_1_pole, data=crit_df)
summary(m3_agree)
m4_agree = lm(agree ~ prosody:adj_1_pole, data=crit_df)
summary(m4_agree)
# ------------------ EXPLORING EVALUATION ------------------
## Distribution
table(crit_df$like)
summary(crit_df$like)
mean(crit_df$like[crit_df$prosody=="affirmative"])
mean(crit_df$like[crit_df$prosody=="contrastive"])
ggplot(crit_df, aes(x=like)) +
geom_histogram(binwidth=.01) +
xlab("Perceived evaluation (%)") +
ylab("Overall responses")
## Visualising the data
ggplot(crit_df, aes(x=prosody,y=like,color=adj_1_pole)) +
geom_point(size=.5) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved evaluation (%)", color = "Adjective polarity")
ggplot(crit_df, aes(x=prosody,y=like)) +
geom_violin() +
geom_boxplot(alpha=.4,notch=T) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved evaluation (%)", color = "Adjective polarity")
ggplot(crit_df, aes(x=prosody,y=like,color=adj_1_pole)) +
geom_violin() +
geom_boxplot(alpha=.4,notch=T) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved evaluation (%)", color = "Adjective polarity")
#geom_point(data=agr,aes(y=MeanRT),color="orange",size=10)
## Simple model
m2_like = lm(like ~ prosody, data=crit_df, REML=F)
summary(m2_like) ## prosodycontrastive Est = -0.08
m2_like = lm(like ~ prosody + adj_1_pole, data=crit_df)
summary(m2_like)
m3_like = lm(like ~ prosody*adj_1_pole, data=crit_df)
summary(m3_like)
# ------------------ EXPLORING FRIENDLINESS ------------------
## Distribution
table(crit_df$friendly)
summary(crit_df$friendly)
mean(crit_df$friendly[crit_df$prosody=="affirmative"])
mean(crit_df$friendly[crit_df$prosody=="contrastive"])
ggplot(crit_df, aes(x=friendly)) +
geom_histogram(binwidth=.01) +
xlab("Perceived friendliness (%)") +
ylab("Overall responses")
## Visualising the data
ggplot(crit_df, aes(x=prosody,y=friendly,color=adj_1_pole)) +
geom_point(size=.5) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved friendliness (%)", color = "Adjective polarity")
ggplot(crit_df, aes(x=prosody,y=friendly,color=adj_1_pole)) +
geom_violin() +
geom_boxplot(alpha=.4,notch=T) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved friendliness (%)", color = "Adjective polarity")
#geom_point(data=agr,aes(y=MeanRT),color="orange",size=10)
## Simple model
m1_friendly = lm(friendly ~ prosody, data=crit_df, REML=F)
summary(m1_friendly) ## prosodycontrastive Est = -0.02
m2_friendly = lm(friendly ~ prosody + adj_1_pole, data=crit_df)
summary(m2_friendly)
m3_friendly = lm(friendly ~ prosody*adj_1_pole, data=crit_df)
summary(m3_friendly)
# ------------------ EXPLORING MOOD ------------------
## Distribution
table(crit_df$happy)
summary(crit_df$happy)
mean(crit_df$happy[crit_df$prosody=="affirmative"])
mean(crit_df$happy[crit_df$prosody=="contrastive"])
ggplot(crit_df, aes(x=happy)) +
geom_histogram(binwidth=.01) +
xlab("Perceived happiness (%)") +
ylab("Overall responses")
## Visualising the data
ggplot(crit_df, aes(x=prosody,y=happy,color=adj_1_pole)) +
geom_point(size=.5) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved happiness (%)", color = "Adjective polarity")
ggplot(crit_df, aes(x=prosody,y=happy,color=adj_1_pole)) +
geom_violin() +
geom_boxplot(alpha=.4,notch=T) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved happiness (%)", color = "Adjective polarity")
#geom_point(data=agr,aes(y=MeanRT),color="orange",size=10)
## Simple model
m1_happy = lm(happy ~ prosody, data=crit_df)
summary(m1_happy) ## prosodycontrastive Est = -0.12
m2_happy = lm(happy ~ prosody + adj_1_pole, data=crit_df)
summary(m2_happy)
m3_happy = lm(happy ~ prosody*adj_1_pole, data=crit_df)
summary(m3_happy)
summary(lm(agree ~ prosody*adj_1_pole + like + friendly + happy + as.factor(adj_1), data=crit_df, REML=F))
m1 =
summary(lm(agree ~ prosody +adj_1_pole + prosody:adj_1_pole + as.factor(workerid), data=sans22))
m = lmer(agree ~ prosody*adj_1_pole + (1|workerid), data=crit_df, REML=F)
summary(m)
cor(fitted(m),crit_df$agree)^2
summary(lmer(agree ~ prosody*adj_1_pole + (1|workerid), data=crit_df, REML=F))
cor(fitted(lmer(agree ~ prosody*adj_1_pole + (1|workerid), data=crit_df, REML=F)),crit_df$agree)^2
# interaction of prosody and adj_pole, with worker fixed effects
summary(lm(agree~as.factor(prosody)*as.factor(adj_1_pole) + as.factor(workerid),data=crit_df))
summary(lm(happy~as.factor(prosody)*as.factor(adj_1_pole) + as.factor(workerid),data=sans22))
library(lmerTest)
summary(lmer(happy ~ prosody*adj_1_pole + (1|workerid) + (1|adj_1), ))
#### TARA NOTES
|
/results/Rscripts/first_analysis.R
|
no_license
|
vrangasayee/perception
|
R
| false
| false
| 7,113
|
r
|
# First analysis of Perception of Prosody I
# created by gratton on 3rd June 2017
setwd("/Users/Chantal/Documents/Doctoral/Courses/Spring 2017/LIN 245/perception/data/")
library(languageR)
library(lme4)
library(tidyverse)
library(ggplot2)
df = read.csv("complete_trials_reduced.csv")
head(df)
summary(df)
nrow(df)
summary(df$topic)
# Subset with only critical trials
crit_df <- df[df$filler == "False",]
head(crit_df)
summary(crit_df)
nrow(crit_df)
# Re-factor data to remove empty labels
unique(crit_df$topic)
crit_df$topic <- factor(crit_df$topic)
unique(crit_df$topic)
crit_df$prosody <- factor(crit_df$prosody)
unique(crit_df$prosody)
summary(crit_df$topic)
# Number of trails for each topic/valence combination
table(crit_df$topic, crit_df$prosody)
# Number of trials for each valence/adjective valence combination
table(crit_df$prosody, crit_df$adj_1_pole)
# ------------------ EXPLORING AGREEMENT ------------------
## Distribution
table(crit_df$agree)
summary(crit_df$agree)
table(crit_df$agree, crit_df$prosody)
mean(crit_df$agree[crit_df$prosody=="affirmative"])
mean(crit_df$agree[crit_df$prosody=="contrastive"])
ggplot(crit_df, aes(x=agree)) +
geom_histogram(binwidth=.01) +
xlab("Perceived agreement (%)") +
ylab("Overall responses")
## Visualising the data
ggplot(crit_df, aes(x=prosody,y=agree,color=adj_1_pole)) +
geom_point(size=.5) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved agreement (%)", color = "Adjective polarity")
ggplot(crit_df, aes(x=prosody,y=agree)) +
geom_violin() +
geom_boxplot(alpha=.4,notch=T) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved agreement (%)", color = "Adjective polarity")
ggplot(crit_df, aes(x=prosody,y=agree,color=adj_1)) +
geom_violin() +
geom_boxplot(alpha=.4,notch=T) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved agreement (%)", color = "Adjective polarity")
#geom_point(data=agr,aes(y=MeanRT),color="orange",size=10)
ggsave(file="../graphs/agree_violin.png",width=5,height=5)
# agreement by prosody and worker
ggplot(crit_df, aes(x=as.factor(workerid),y=agree,color=adj_1_pole)) +
geom_boxplot(alpha=.4) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved agreement (%)", color = "Adjective polarity")
## Agreement models
m1_agree = lm(agree ~ prosody, data=crit_df, REML=F)
summary(m1_agree) ## prosodycontrastive Est = 0.02
m2_agree = lm(agree ~ prosody + adj_1_pole, data=crit_df)
summary(m2_agree)
m3_agree = lm(agree ~ prosody*adj_1_pole, data=crit_df)
summary(m3_agree)
m4_agree = lm(agree ~ prosody:adj_1_pole, data=crit_df)
summary(m4_agree)
# ------------------ EXPLORING EVALUATION ------------------
## Distribution
table(crit_df$like)
summary(crit_df$like)
mean(crit_df$like[crit_df$prosody=="affirmative"])
mean(crit_df$like[crit_df$prosody=="contrastive"])
ggplot(crit_df, aes(x=like)) +
geom_histogram(binwidth=.01) +
xlab("Perceived evaluation (%)") +
ylab("Overall responses")
## Visualising the data
ggplot(crit_df, aes(x=prosody,y=like,color=adj_1_pole)) +
geom_point(size=.5) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved evaluation (%)", color = "Adjective polarity")
ggplot(crit_df, aes(x=prosody,y=like)) +
geom_violin() +
geom_boxplot(alpha=.4,notch=T) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved evaluation (%)", color = "Adjective polarity")
ggplot(crit_df, aes(x=prosody,y=like,color=adj_1_pole)) +
geom_violin() +
geom_boxplot(alpha=.4,notch=T) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved evaluation (%)", color = "Adjective polarity")
#geom_point(data=agr,aes(y=MeanRT),color="orange",size=10)
## Simple model
m2_like = lm(like ~ prosody, data=crit_df, REML=F)
summary(m2_like) ## prosodycontrastive Est = -0.08
m2_like = lm(like ~ prosody + adj_1_pole, data=crit_df)
summary(m2_like)
m3_like = lm(like ~ prosody*adj_1_pole, data=crit_df)
summary(m3_like)
# ------------------ EXPLORING FRIENDLINESS ------------------
## Distribution
table(crit_df$friendly)
summary(crit_df$friendly)
mean(crit_df$friendly[crit_df$prosody=="affirmative"])
mean(crit_df$friendly[crit_df$prosody=="contrastive"])
ggplot(crit_df, aes(x=friendly)) +
geom_histogram(binwidth=.01) +
xlab("Perceived friendliness (%)") +
ylab("Overall responses")
## Visualising the data
ggplot(crit_df, aes(x=prosody,y=friendly,color=adj_1_pole)) +
geom_point(size=.5) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved friendliness (%)", color = "Adjective polarity")
ggplot(crit_df, aes(x=prosody,y=friendly,color=adj_1_pole)) +
geom_violin() +
geom_boxplot(alpha=.4,notch=T) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved friendliness (%)", color = "Adjective polarity")
#geom_point(data=agr,aes(y=MeanRT),color="orange",size=10)
## Simple model
m1_friendly = lm(friendly ~ prosody, data=crit_df, REML=F)
summary(m1_friendly) ## prosodycontrastive Est = -0.02
m2_friendly = lm(friendly ~ prosody + adj_1_pole, data=crit_df)
summary(m2_friendly)
m3_friendly = lm(friendly ~ prosody*adj_1_pole, data=crit_df)
summary(m3_friendly)
# ------------------ EXPLORING MOOD ------------------
## Distribution
table(crit_df$happy)
summary(crit_df$happy)
mean(crit_df$happy[crit_df$prosody=="affirmative"])
mean(crit_df$happy[crit_df$prosody=="contrastive"])
ggplot(crit_df, aes(x=happy)) +
geom_histogram(binwidth=.01) +
xlab("Perceived happiness (%)") +
ylab("Overall responses")
## Visualising the data
ggplot(crit_df, aes(x=prosody,y=happy,color=adj_1_pole)) +
geom_point(size=.5) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved happiness (%)", color = "Adjective polarity")
ggplot(crit_df, aes(x=prosody,y=happy,color=adj_1_pole)) +
geom_violin() +
geom_boxplot(alpha=.4,notch=T) +
labs(title = "XYZ", x = "Prosodic contour", y = "Percieved happiness (%)", color = "Adjective polarity")
#geom_point(data=agr,aes(y=MeanRT),color="orange",size=10)
## Simple model
m1_happy = lm(happy ~ prosody, data=crit_df)
summary(m1_happy) ## prosodycontrastive Est = -0.12
m2_happy = lm(happy ~ prosody + adj_1_pole, data=crit_df)
summary(m2_happy)
m3_happy = lm(happy ~ prosody*adj_1_pole, data=crit_df)
summary(m3_happy)
summary(lm(agree ~ prosody*adj_1_pole + like + friendly + happy + as.factor(adj_1), data=crit_df, REML=F))
m1 =
summary(lm(agree ~ prosody +adj_1_pole + prosody:adj_1_pole + as.factor(workerid), data=sans22))
m = lmer(agree ~ prosody*adj_1_pole + (1|workerid), data=crit_df, REML=F)
summary(m)
cor(fitted(m),crit_df$agree)^2
summary(lmer(agree ~ prosody*adj_1_pole + (1|workerid), data=crit_df, REML=F))
cor(fitted(lmer(agree ~ prosody*adj_1_pole + (1|workerid), data=crit_df, REML=F)),crit_df$agree)^2
# interaction of prosody and adj_pole, with worker fixed effects
summary(lm(agree~as.factor(prosody)*as.factor(adj_1_pole) + as.factor(workerid),data=crit_df))
summary(lm(happy~as.factor(prosody)*as.factor(adj_1_pole) + as.factor(workerid),data=sans22))
library(lmerTest)
summary(lmer(happy ~ prosody*adj_1_pole + (1|workerid) + (1|adj_1), ))
#### TARA NOTES
|
## ui.R ##
library(shinydashboard)
dashboardPage(
dashboardHeader(),
dashboardSidebar(),
dashboardBody(
column(6,
verbatimTextOutput("title")
)
)
)
|
/Project5-Capstone/kellymejiabreton/SHINY/ui.R
|
no_license
|
jeperez/bootcamp005_project
|
R
| false
| false
| 235
|
r
|
## ui.R ##
library(shinydashboard)
dashboardPage(
dashboardHeader(),
dashboardSidebar(),
dashboardBody(
column(6,
verbatimTextOutput("title")
)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getComment.R
\name{getAllComment}
\alias{getAllComment}
\title{Get All Comment}
\usage{
getAllComment(turl = url, ...)
}
\arguments{
\item{turl}{character. News article on 'Naver' such as 'http://news.naver.com/main/read.nhn?mode=LSD&mid=shm&sid1=100&oid=056&aid=0010335895'. News articl url that is not on Naver.com domain will generate an error.}
\item{...}{parameter in getComment function.}
}
\value{
a [tibble][tibble::tibble-package]
}
\description{
Get all comments from the provided news article url on naver
}
\details{
Works just like getComment, but this function executed in a fashion where it finds and extracts all comments from the given url.
}
|
/man/getAllComment.Rd
|
permissive
|
mkhoin/N2H4
|
R
| false
| true
| 739
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getComment.R
\name{getAllComment}
\alias{getAllComment}
\title{Get All Comment}
\usage{
getAllComment(turl = url, ...)
}
\arguments{
\item{turl}{character. News article on 'Naver' such as 'http://news.naver.com/main/read.nhn?mode=LSD&mid=shm&sid1=100&oid=056&aid=0010335895'. News articl url that is not on Naver.com domain will generate an error.}
\item{...}{parameter in getComment function.}
}
\value{
a [tibble][tibble::tibble-package]
}
\description{
Get all comments from the provided news article url on naver
}
\details{
Works just like getComment, but this function executed in a fashion where it finds and extracts all comments from the given url.
}
|
dashboardPage(skin="yellow", title = "World Bank",
dashboardHeader(title = "World Bank"),
dashboardSidebar(
includeCSS("custom.css"),
sidebarMenu(
menuItem("Searchable", tabName = "searchable", icon = icon("search")),
inputPanel(
textInput("searchValue",
label="Enter a term of interest. The resulting table can be further searched
Click on row of required indicator and after a few seconds a table of all the data,
a chart showing up to ten least-developed nations from latest data, and map of countries covered by indicator
will appear",
placeholder="Enter term e.g. Poverty")
),
actionButton("searchWB","Go"),
menuItem("Info", tabName = "info",icon = icon("info")),
menuItem("Code",icon = icon("code-fork"),
href = "https://github.com/pssguy/worldBank"),
tags$hr(),
menuItem(text="",href="https://mytinyshinys.shinyapps.io/dashboard",badgeLabel = "All Dashboards and Trelliscopes (14)"),
tags$hr(),
tags$body(
a(class="addpad",href="https://twitter.com/pssGuy", target="_blank",img(src="images/twitterImage25pc.jpg")),
a(class="addpad2",href="mailto:agcur@rogers.com", img(src="images/email25pc.jpg")),
a(class="addpad2",href="https://github.com/pssguy",target="_blank",img(src="images/GitHub-Mark30px.png")),
a(href="https://rpubs.com/pssguy",target="_blank",img(src="images/RPubs25px.png"))
)
)
),
dashboardBody(tabItems(
tabItem(
"searchable",
fluidRow(column(width=12,
box(width=12,collapsible=TRUE,solidHeader = TRUE,status = 'success',title="Click Required Indicator for Results Collapse box for easier viewing of outputs",
DT::dataTableOutput("tableChoice")
))),
fluidRow(column(width=12,
h2(textOutput("resultTitle"))
)),
fluidRow(column(width=2,
box(width=12,title="All Data",
DT::dataTableOutput("resultTable")
)),
column(width=5,
box(width=12,title="Extreme Countries - Add/Remove countries by clicking legend",
inputPanel(
radioButtons("extreme",label = "Extreme Countries",choices=c("Bottom","Top"),inline= TRUE),
radioButtons("incZero",label = "Include Zero values",choices=c("No","Yes"),inline= TRUE)
),
plotlyOutput("resultPlot")
)),
column(width=5,
box(width=12,title="Map of latest Data Pan/Zoom and click for details",
leafletOutput("resultMap")
)))
),
tabItem("info",includeMarkdown("info.md"))
))
)
|
/ui.R
|
no_license
|
rafwalas/worldBank
|
R
| false
| false
| 2,736
|
r
|
dashboardPage(skin="yellow", title = "World Bank",
dashboardHeader(title = "World Bank"),
dashboardSidebar(
includeCSS("custom.css"),
sidebarMenu(
menuItem("Searchable", tabName = "searchable", icon = icon("search")),
inputPanel(
textInput("searchValue",
label="Enter a term of interest. The resulting table can be further searched
Click on row of required indicator and after a few seconds a table of all the data,
a chart showing up to ten least-developed nations from latest data, and map of countries covered by indicator
will appear",
placeholder="Enter term e.g. Poverty")
),
actionButton("searchWB","Go"),
menuItem("Info", tabName = "info",icon = icon("info")),
menuItem("Code",icon = icon("code-fork"),
href = "https://github.com/pssguy/worldBank"),
tags$hr(),
menuItem(text="",href="https://mytinyshinys.shinyapps.io/dashboard",badgeLabel = "All Dashboards and Trelliscopes (14)"),
tags$hr(),
tags$body(
a(class="addpad",href="https://twitter.com/pssGuy", target="_blank",img(src="images/twitterImage25pc.jpg")),
a(class="addpad2",href="mailto:agcur@rogers.com", img(src="images/email25pc.jpg")),
a(class="addpad2",href="https://github.com/pssguy",target="_blank",img(src="images/GitHub-Mark30px.png")),
a(href="https://rpubs.com/pssguy",target="_blank",img(src="images/RPubs25px.png"))
)
)
),
dashboardBody(tabItems(
tabItem(
"searchable",
fluidRow(column(width=12,
box(width=12,collapsible=TRUE,solidHeader = TRUE,status = 'success',title="Click Required Indicator for Results Collapse box for easier viewing of outputs",
DT::dataTableOutput("tableChoice")
))),
fluidRow(column(width=12,
h2(textOutput("resultTitle"))
)),
fluidRow(column(width=2,
box(width=12,title="All Data",
DT::dataTableOutput("resultTable")
)),
column(width=5,
box(width=12,title="Extreme Countries - Add/Remove countries by clicking legend",
inputPanel(
radioButtons("extreme",label = "Extreme Countries",choices=c("Bottom","Top"),inline= TRUE),
radioButtons("incZero",label = "Include Zero values",choices=c("No","Yes"),inline= TRUE)
),
plotlyOutput("resultPlot")
)),
column(width=5,
box(width=12,title="Map of latest Data Pan/Zoom and click for details",
leafletOutput("resultMap")
)))
),
tabItem("info",includeMarkdown("info.md"))
))
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.