content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
library(shiny)
# Feedback
ui <- fluidPage(
shinyFeedback::useShinyFeedback(),
numericInput("n", "n", value = 11),
textOutput("half"),
numericInput("n3", "n", value = 10),
textOutput("third")
)
server <- function(input, output, session) {
half <- reactive({
even <- input$n %% 2 == 0
shinyFeedback::feedbackWarning(inputId = "n", show = !even, text = "Warning: Please select an even number")
shinyFeedback::feedback("n", even, "Feedback: you selected an even number")
input$n / 2
})
output$half <- renderText(half())
third <- reactive({
test <- input$n3 %% 3 == 0
shinyFeedback::feedbackDanger("n3", !test, "Danger: Please select number, dividable by three")
shinyFeedback::feedbackSuccess("n3", test, "Success: you selected a number dividable by three")
input$n3 / 3
})
output$third <- renderText(third())
}
shinyApp(ui = ui, server = server)
|
/examples/08-user_feedback_feedback_app/app.R
|
no_license
|
maskegger/bookclub-mshiny
|
R
| false
| false
| 977
|
r
|
library(shiny)
# Feedback
ui <- fluidPage(
shinyFeedback::useShinyFeedback(),
numericInput("n", "n", value = 11),
textOutput("half"),
numericInput("n3", "n", value = 10),
textOutput("third")
)
server <- function(input, output, session) {
half <- reactive({
even <- input$n %% 2 == 0
shinyFeedback::feedbackWarning(inputId = "n", show = !even, text = "Warning: Please select an even number")
shinyFeedback::feedback("n", even, "Feedback: you selected an even number")
input$n / 2
})
output$half <- renderText(half())
third <- reactive({
test <- input$n3 %% 3 == 0
shinyFeedback::feedbackDanger("n3", !test, "Danger: Please select number, dividable by three")
shinyFeedback::feedbackSuccess("n3", test, "Success: you selected a number dividable by three")
input$n3 / 3
})
output$third <- renderText(third())
}
shinyApp(ui = ui, server = server)
|
library(WhopGenome)
### Name: vcf_getfieldnames
### Title: Return a vector with the field names used in the VCF file.
### Aliases: vcf_getfieldnames
### ** Examples
vcffile <- vcf_open( system.file( "extdata", "ex.vcf.gz" , package="WhopGenome") )
vcf_getfieldnames( vcffile )
|
/data/genthat_extracted_code/WhopGenome/examples/vcf_getfieldnames.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 286
|
r
|
library(WhopGenome)
### Name: vcf_getfieldnames
### Title: Return a vector with the field names used in the VCF file.
### Aliases: vcf_getfieldnames
### ** Examples
vcffile <- vcf_open( system.file( "extdata", "ex.vcf.gz" , package="WhopGenome") )
vcf_getfieldnames( vcffile )
|
#' @title MCAR_0.2
#'
#' @description Summarized simulation results for coefficient of treatment using method A-D under MCAR with 0.2 missing rate.
#'
#' @format The following four methods are evaluated based on the mean bias, mean error, relative bias, coverage, mean square error, standard deviation.
#' \describe{
#' \item{Method A}{Complete case analysis}
#' \item{Method B}{No outcome imputation, not included in mi model}
#' \item{Method C}{No outcome imputation, outcome included in mi model}
#' \item{Method D}{Outcome imputed and included in mi model}
#' }
"MCAR_0.2"
|
/MIsimulation/R/MCAR_0.2.R
|
no_license
|
sheep720/MIsimulation
|
R
| false
| false
| 577
|
r
|
#' @title MCAR_0.2
#'
#' @description Summarized simulation results for coefficient of treatment using method A-D under MCAR with 0.2 missing rate.
#'
#' @format The following four methods are evaluated based on the mean bias, mean error, relative bias, coverage, mean square error, standard deviation.
#' \describe{
#' \item{Method A}{Complete case analysis}
#' \item{Method B}{No outcome imputation, not included in mi model}
#' \item{Method C}{No outcome imputation, outcome included in mi model}
#' \item{Method D}{Outcome imputed and included in mi model}
#' }
"MCAR_0.2"
|
#!/usr/bin/env r
suppressMessages(require(Rcpp))
set.seed(42)
n <- 200
a <- rnorm(n)
b <- rnorm(n)
## load shared libraries with wrapper code
dyn.load("convolve2_c.so")
dyn.load("convolve3_cpp.so")
dyn.load("convolve4_cpp.so")
dyn.load("convolve5_cpp.so")
dyn.load("convolve7_c.so")
dyn.load("convolve8_cpp.so")
dyn.load("convolve9_cpp.so")
dyn.load("convolve10_cpp.so")
dyn.load("convolve11_cpp.so")
dyn.load("convolve12_cpp.so" )
dyn.load("convolve14_cpp.so" )
## now run each one once for comparison of results,
## and define test functions
R_API_optimised <- function(n,a,b) .Call("convolve2__loop", n, a, b)
Rcpp_New_std <- function(n,a,b) .Call("convolve3cpp__loop", n, a, b)
#Rcpp_New_std_inside <- function(n,a,b) .Call("convolve3cpp__loop", n, a, b, PACKAGE = "Rcpp" )
Rcpp_New_ptr <- function(n,a,b) .Call("convolve4cpp__loop", n, a, b)
Rcpp_New_sugar <- function(n,a,b) .Call("convolve5cpp__loop", n, a, b)
Rcpp_New_sugar_noNA <- function(n,a,b) .Call("convolve11cpp__loop", n, a, b)
R_API_naive <- function(n,a,b) .Call("convolve7__loop", n, a, b)
Rcpp_New_std_2 <- function(n,a,b) .Call("convolve8cpp__loop", n, a, b)
#Rcpp_New_std_3 <- function(n,a,b) .Call("convolve9cpp__loop", n, a, b)
#Rcpp_New_std_4 <- function(n,a,b) .Call("convolve10cpp__loop", n, a, b)
Rcpp_New_std_it <- function(n,a,b) .Call("convolve12cpp__loop", n, a, b )
Rcpp_New_std_Fast <- function(n,a,b) .Call("convolve14cpp__loop", n, a, b )
v1 <- R_API_optimised(1L, a, b )
v3 <- Rcpp_New_std(1L, a, b)
v4 <- Rcpp_New_ptr(1L, a, b)
v5 <- Rcpp_New_sugar(1L, a, b )
v7 <- R_API_naive(1L, a, b)
v11 <- Rcpp_New_sugar_noNA(1L, a, b)
stopifnot(all.equal(v1, v3))
stopifnot(all.equal(v1, v4))
stopifnot(all.equal(v1, v5))
stopifnot(all.equal(v1, v7))
stopifnot(all.equal(v1, v11))
## load benchmarkin helper function
suppressMessages(library(rbenchmark))
REPS <- 5000L
bm <- benchmark(R_API_optimised(REPS,a,b),
R_API_naive(REPS,a,b),
Rcpp_New_std(REPS,a,b),
# Rcpp_New_std_inside(REPS,a,b),
Rcpp_New_ptr(REPS,a,b),
Rcpp_New_sugar(REPS,a,b),
Rcpp_New_sugar_noNA(REPS,a,b),
Rcpp_New_std_2(REPS,a,b),
# Rcpp_New_std_3(REPS,a,b),
# Rcpp_New_std_4(REPS,a,b),
Rcpp_New_std_it(REPS,a,b),
Rcpp_New_std_Fast(REPS,a,b),
columns=c("test", "elapsed", "relative", "user.self", "sys.self"),
order="relative",
replications=1)
print(bm)
cat("All results are equal\n") # as we didn't get stopped
q("no")
sizes <- 1:10*100
REPS <- 5000L
timings <- lapply( sizes, function(size){
cat( "size = ", size, "..." )
a <- rnorm(size); b <- rnorm(size)
bm <- benchmark(R_API_optimised(REPS,a,b),
R_API_naive(REPS,a,b),
Rcpp_New_std(REPS,a,b),
Rcpp_New_ptr(REPS,a,b),
Rcpp_New_sugar(REPS,a,b),
Rcpp_New_sugar_noNA(REPS,a,b),
columns=c("test", "elapsed", "relative", "user.self", "sys.self"),
order="relative",
replications=1)
cat( " done\n" )
bm
} )
for( i in seq_along(sizes)){
timings[[i]]$size <- sizes[i]
}
timings <- do.call( rbind, timings )
require( lattice )
png( "elapsed.png", width = 800, height = 600 )
xyplot( elapsed ~ size, groups = test, data = timings, auto.key = TRUE, type = "l", lwd = 2 )
dev.off()
png( "relative.png", width = 800, height = 600 )
xyplot( relative ~ size, groups = test, data = timings, auto.key = TRUE, type = "l", lwd = 2 )
dev.off()
|
/R-Portable/library/Rcpp/examples/ConvolveBenchmarks/exampleRCode.r
|
permissive
|
ksasso/Electron_ShinyApp_Deployment
|
R
| false
| false
| 3,588
|
r
|
#!/usr/bin/env r
suppressMessages(require(Rcpp))
set.seed(42)
n <- 200
a <- rnorm(n)
b <- rnorm(n)
## load shared libraries with wrapper code
dyn.load("convolve2_c.so")
dyn.load("convolve3_cpp.so")
dyn.load("convolve4_cpp.so")
dyn.load("convolve5_cpp.so")
dyn.load("convolve7_c.so")
dyn.load("convolve8_cpp.so")
dyn.load("convolve9_cpp.so")
dyn.load("convolve10_cpp.so")
dyn.load("convolve11_cpp.so")
dyn.load("convolve12_cpp.so" )
dyn.load("convolve14_cpp.so" )
## now run each one once for comparison of results,
## and define test functions
R_API_optimised <- function(n,a,b) .Call("convolve2__loop", n, a, b)
Rcpp_New_std <- function(n,a,b) .Call("convolve3cpp__loop", n, a, b)
#Rcpp_New_std_inside <- function(n,a,b) .Call("convolve3cpp__loop", n, a, b, PACKAGE = "Rcpp" )
Rcpp_New_ptr <- function(n,a,b) .Call("convolve4cpp__loop", n, a, b)
Rcpp_New_sugar <- function(n,a,b) .Call("convolve5cpp__loop", n, a, b)
Rcpp_New_sugar_noNA <- function(n,a,b) .Call("convolve11cpp__loop", n, a, b)
R_API_naive <- function(n,a,b) .Call("convolve7__loop", n, a, b)
Rcpp_New_std_2 <- function(n,a,b) .Call("convolve8cpp__loop", n, a, b)
#Rcpp_New_std_3 <- function(n,a,b) .Call("convolve9cpp__loop", n, a, b)
#Rcpp_New_std_4 <- function(n,a,b) .Call("convolve10cpp__loop", n, a, b)
Rcpp_New_std_it <- function(n,a,b) .Call("convolve12cpp__loop", n, a, b )
Rcpp_New_std_Fast <- function(n,a,b) .Call("convolve14cpp__loop", n, a, b )
v1 <- R_API_optimised(1L, a, b )
v3 <- Rcpp_New_std(1L, a, b)
v4 <- Rcpp_New_ptr(1L, a, b)
v5 <- Rcpp_New_sugar(1L, a, b )
v7 <- R_API_naive(1L, a, b)
v11 <- Rcpp_New_sugar_noNA(1L, a, b)
stopifnot(all.equal(v1, v3))
stopifnot(all.equal(v1, v4))
stopifnot(all.equal(v1, v5))
stopifnot(all.equal(v1, v7))
stopifnot(all.equal(v1, v11))
## load benchmarkin helper function
suppressMessages(library(rbenchmark))
REPS <- 5000L
bm <- benchmark(R_API_optimised(REPS,a,b),
R_API_naive(REPS,a,b),
Rcpp_New_std(REPS,a,b),
# Rcpp_New_std_inside(REPS,a,b),
Rcpp_New_ptr(REPS,a,b),
Rcpp_New_sugar(REPS,a,b),
Rcpp_New_sugar_noNA(REPS,a,b),
Rcpp_New_std_2(REPS,a,b),
# Rcpp_New_std_3(REPS,a,b),
# Rcpp_New_std_4(REPS,a,b),
Rcpp_New_std_it(REPS,a,b),
Rcpp_New_std_Fast(REPS,a,b),
columns=c("test", "elapsed", "relative", "user.self", "sys.self"),
order="relative",
replications=1)
print(bm)
cat("All results are equal\n") # as we didn't get stopped
q("no")
sizes <- 1:10*100
REPS <- 5000L
timings <- lapply( sizes, function(size){
cat( "size = ", size, "..." )
a <- rnorm(size); b <- rnorm(size)
bm <- benchmark(R_API_optimised(REPS,a,b),
R_API_naive(REPS,a,b),
Rcpp_New_std(REPS,a,b),
Rcpp_New_ptr(REPS,a,b),
Rcpp_New_sugar(REPS,a,b),
Rcpp_New_sugar_noNA(REPS,a,b),
columns=c("test", "elapsed", "relative", "user.self", "sys.self"),
order="relative",
replications=1)
cat( " done\n" )
bm
} )
for( i in seq_along(sizes)){
timings[[i]]$size <- sizes[i]
}
timings <- do.call( rbind, timings )
require( lattice )
png( "elapsed.png", width = 800, height = 600 )
xyplot( elapsed ~ size, groups = test, data = timings, auto.key = TRUE, type = "l", lwd = 2 )
dev.off()
png( "relative.png", width = 800, height = 600 )
xyplot( relative ~ size, groups = test, data = timings, auto.key = TRUE, type = "l", lwd = 2 )
dev.off()
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/append_values.r
\name{my_unlist}
\alias{my_unlist}
\title{Unlists while preserving NULLs and only unlisting lists with one value}
\usage{
my_unlist(l)
}
\arguments{
\item{l}{a list that we want to unlist}
}
\description{
Unlists while preserving NULLs and only unlisting lists with one value
}
|
/man/my_unlist.Rd
|
permissive
|
abresler/tidyjson
|
R
| false
| false
| 381
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/append_values.r
\name{my_unlist}
\alias{my_unlist}
\title{Unlists while preserving NULLs and only unlisting lists with one value}
\usage{
my_unlist(l)
}
\arguments{
\item{l}{a list that we want to unlist}
}
\description{
Unlists while preserving NULLs and only unlisting lists with one value
}
|
# demo/bench/methods.R
#
# Depends:
# library("bcv")
# library("devtools")
# library("cluster")
# library("e1071")
# library("mclust")
# library("MASS")
# library("nnet")
# load_all("../../lib/fpc")
# load_all("../../lib/NbClust")
# source("../../lib/NbClust.R")
# source("../../code/classify.R")
# source("../../code/cluster.R")
# source("../../code/gabriel.R")
# source("../../code/jump.R")
# source("../../code/wold.R")
#
methods <- list(
"gabriel-nearest-2x2" = function(x, maxcenters) {
cv <- cv.kmeans.gabriel(x, 2, 2, maxcenters,
classify.method="nearest")
cv$centers
},
"gabriel-rot-5x2" = function(x, maxcenters) {
# generate a random rotation
dim <- ncol(x)
z <- matrix(rnorm(dim * dim), dim, dim)
qr <- qr(z)
q <- qr.Q(qr)
sign <- sample(c(-1, 1), dim, replace=TRUE)
rot <- q %*% diag(sign, dim)
# rotate the columns of the data matrix
x_rot <- x %*% rot
# apply Gabriel CV to rotated data
cv <- cv.kmeans.gabriel(x_rot, 2, 2, maxcenters,
classify.method="nearest")
cv$centers
},
"gabriel-nearest-5x2" = function(x, maxcenters) {
cv <- cv.kmeans.gabriel(x, 5, 2, maxcenters,
classify.method="nearest")
cv$centers
},
"gabriel-lda-equal-2x2" = function(x, maxcenters) {
cv <- cv.kmeans.gabriel(x, 2, 2, maxcenters,
classify.method="lda-equal")
cv$centers
},
"gabriel-lda-equal-5x2" = function(x, maxcenters) {
cv <- cv.kmeans.gabriel(x, 5, 2, maxcenters,
classify.method="lda-equal")
cv$centers
},
"gabriel-lda-proportion-2x2" = function(x, maxcenters) {
cv <- cv.kmeans.gabriel(x, 2, 2, maxcenters,
classify.method="lda-proportions")
cv$centers
},
"gabriel-lda-proportion-5x2" = function(x, maxcenters) {
cv <- cv.kmeans.gabriel(x, 5, 2, maxcenters,
classify.method="lda-proportions")
cv$centers
},
"wold" = function(x, maxcenters) {
Wold_holdout(x, 5, max.k=maxcenters, Errortol=0.01)
},
"gap" = function(x, maxcenters) {
Gap <- cluster::clusGap(x, FUN = cluster_kmeans, K.max = maxcenters)
which.max(Gap[[1]][,3])
},
"BIC" = function(x, maxcenters) {
mcluster <- mclust::Mclust(x, G = 1:maxcenters,
control=mclust::emControl(itmax=c(10000L, 10000L)))
mcluster$G
},
"CH" = function(x, maxcenters) {
Ch <- NbClust::NbClust(x, min.nc = 2, max.nc = maxcenters,
method = "kmeans", index = "ch")
Ch$Best.nc[[1]]
},
"Hartigan" = function(x, maxcenters) {
Hartigan <- NbClust::NbClust(x, min.nc = 2, max.nc = maxcenters,
method = "kmeans", index = "hartigan")
Hartigan$Best.nc[[1]]
},
"Jump" = function(x, maxcenters) {
Jump <- jump(x, maxcenters, plotjumps=FALSE, trace=FALSE)
Jump$maxjump
},
"PS" = function(x, maxcenters) {
PS <- fpc::prediction.strength(x, Gmin=2, Gmax=maxcenters,
clustermethod=fpc::kmeansCBI)
PS$optimalk
},
"Stab" = function(x, maxcenters) {
SB <- fpc::nselectboot(x, clustermethod=fpc::kmeansCBI,
classification="centroid",
krange=2:maxcenters)
SB$kopt
})
|
/demo/bench/methods.R
|
no_license
|
patperry/cvclust
|
R
| false
| false
| 3,677
|
r
|
# demo/bench/methods.R
#
# Depends:
# library("bcv")
# library("devtools")
# library("cluster")
# library("e1071")
# library("mclust")
# library("MASS")
# library("nnet")
# load_all("../../lib/fpc")
# load_all("../../lib/NbClust")
# source("../../lib/NbClust.R")
# source("../../code/classify.R")
# source("../../code/cluster.R")
# source("../../code/gabriel.R")
# source("../../code/jump.R")
# source("../../code/wold.R")
#
methods <- list(
"gabriel-nearest-2x2" = function(x, maxcenters) {
cv <- cv.kmeans.gabriel(x, 2, 2, maxcenters,
classify.method="nearest")
cv$centers
},
"gabriel-rot-5x2" = function(x, maxcenters) {
# generate a random rotation
dim <- ncol(x)
z <- matrix(rnorm(dim * dim), dim, dim)
qr <- qr(z)
q <- qr.Q(qr)
sign <- sample(c(-1, 1), dim, replace=TRUE)
rot <- q %*% diag(sign, dim)
# rotate the columns of the data matrix
x_rot <- x %*% rot
# apply Gabriel CV to rotated data
cv <- cv.kmeans.gabriel(x_rot, 2, 2, maxcenters,
classify.method="nearest")
cv$centers
},
"gabriel-nearest-5x2" = function(x, maxcenters) {
cv <- cv.kmeans.gabriel(x, 5, 2, maxcenters,
classify.method="nearest")
cv$centers
},
"gabriel-lda-equal-2x2" = function(x, maxcenters) {
cv <- cv.kmeans.gabriel(x, 2, 2, maxcenters,
classify.method="lda-equal")
cv$centers
},
"gabriel-lda-equal-5x2" = function(x, maxcenters) {
cv <- cv.kmeans.gabriel(x, 5, 2, maxcenters,
classify.method="lda-equal")
cv$centers
},
"gabriel-lda-proportion-2x2" = function(x, maxcenters) {
cv <- cv.kmeans.gabriel(x, 2, 2, maxcenters,
classify.method="lda-proportions")
cv$centers
},
"gabriel-lda-proportion-5x2" = function(x, maxcenters) {
cv <- cv.kmeans.gabriel(x, 5, 2, maxcenters,
classify.method="lda-proportions")
cv$centers
},
"wold" = function(x, maxcenters) {
Wold_holdout(x, 5, max.k=maxcenters, Errortol=0.01)
},
"gap" = function(x, maxcenters) {
Gap <- cluster::clusGap(x, FUN = cluster_kmeans, K.max = maxcenters)
which.max(Gap[[1]][,3])
},
"BIC" = function(x, maxcenters) {
mcluster <- mclust::Mclust(x, G = 1:maxcenters,
control=mclust::emControl(itmax=c(10000L, 10000L)))
mcluster$G
},
"CH" = function(x, maxcenters) {
Ch <- NbClust::NbClust(x, min.nc = 2, max.nc = maxcenters,
method = "kmeans", index = "ch")
Ch$Best.nc[[1]]
},
"Hartigan" = function(x, maxcenters) {
Hartigan <- NbClust::NbClust(x, min.nc = 2, max.nc = maxcenters,
method = "kmeans", index = "hartigan")
Hartigan$Best.nc[[1]]
},
"Jump" = function(x, maxcenters) {
Jump <- jump(x, maxcenters, plotjumps=FALSE, trace=FALSE)
Jump$maxjump
},
"PS" = function(x, maxcenters) {
PS <- fpc::prediction.strength(x, Gmin=2, Gmax=maxcenters,
clustermethod=fpc::kmeansCBI)
PS$optimalk
},
"Stab" = function(x, maxcenters) {
SB <- fpc::nselectboot(x, clustermethod=fpc::kmeansCBI,
classification="centroid",
krange=2:maxcenters)
SB$kopt
})
|
library(tidyverse)
library(tidymodels)
sisters_select <- read_csv("data/sisters.csv") %>%
select(-sister)
# Split off the testing set
set.seed(123)
sisters_split <- initial_split(sisters_select, strata = age)
sisters_other <- training(sisters_split)
sisters_test <- testing(sisters_split)
# Create the validation split
set.seed(123)
sisters_val <- validation_split(sisters_other, strata = age)
glimpse(sisters_val)
|
/exercises/solution_04_07.R
|
permissive
|
snowdj/supervised-ML-case-studies-course
|
R
| false
| false
| 423
|
r
|
library(tidyverse)
library(tidymodels)
sisters_select <- read_csv("data/sisters.csv") %>%
select(-sister)
# Split off the testing set
set.seed(123)
sisters_split <- initial_split(sisters_select, strata = age)
sisters_other <- training(sisters_split)
sisters_test <- testing(sisters_split)
# Create the validation split
set.seed(123)
sisters_val <- validation_split(sisters_other, strata = age)
glimpse(sisters_val)
|
#' @title AMARETTO_HTMLCharacterisation
#'
#' @param AMARETTOresults
#' @param annotation
#' @param report_address
#' @param survivalanalysis
#' @param phenotypeanalysis
#' @param idpatients
#' @param idvitalstatus
#' @param iddaystodeath
#' @param idfollowupdays
#' @param daystoyears
#' @param rightcensored
#' @param printsurv
#' @param NrCores
#'
#' @return A set of HTMLs, giving caracteristics of the communities
#'
#' @import survminer
#' @import tidyverse
#' @import survival
#' @import gtools
#' @import kableExtra
#' @import doParallel
#' @import foreach
#' @return
#' @export
#'
AMARETTO_HTMLCharacterisation<-function(AMARETTOresults, annotation, report_address="./",survivalanalysis=FALSE, phenotypeanalysis=FALSE, idpatients=NULL, idvitalstatus=NULL, iddaystodeath=NULL, idfollowupdays=NULL, daystoyears=FALSE, rightcensored=FALSE, parameters=NULL, typelist=NULL, NrCores=1){
full_path<-normalizePath(report_address)
mean_expression_modules <- t(AMARETTOresults$ModuleData)
mean_expression_modules <- rownames_to_column(as.data.frame(mean_expression_modules),"idpatients")
if (sum(annotation[,idpatients] %in% mean_expression_modules[,"idpatients"],na.rm=TRUE)==0){
stop("No overlap between patients ids")
} else {
print(paste0("Survival data will be calculated on ",length(annotation[,idpatients] %in% mean_expression_modules[,"idpatients"]), " patients."))
}
annotation <- suppressMessages(inner_join(annotation, mean_expression_modules %>% dplyr::rename(!!idpatients :="idpatients")))
if (survivalanalysis == TRUE){
daysdeath <- as.numeric(annotation[grep("dead", annotation[,idvitalstatus],ignore.case=TRUE),iddaystodeath])
dead <- annotation[grep("dead", annotation[,idvitalstatus],ignore.case=TRUE),idvitalstatus]
daysfollowup <- as.numeric(annotation[grep("alive", annotation[,idvitalstatus],ignore.case=TRUE),idfollowupdays])
alive <- annotation[grep("alive", annotation[,idvitalstatus],ignore.case=TRUE),idvitalstatus]
time<- c(daysfollowup,daysdeath)
status <- tolower(c(alive, dead))
if (daystoyears == TRUE){
time<-time/365
}
if (!rightcensored == FALSE){
if (is.numeric(rightcensored)){
status[time>rightcensored]<-"alive"
time[time>rightcensored]<-rightcensored
} else {
stop("The time point to censor the data is incorrect.")
}
}
status <- as.numeric(as.factor(status))
c_surv<-Surv(time, status)
#per module
cluster <- makeCluster(c(rep("localhost", NrCores)), type = "SOCK")
registerDoParallel(cluster,cores=NrCores)
p_survival<-foreach (i = 1:AMARETTOresults$NrModules, .packages = c('tidyverse','rmarkdown','gtools','survminer','survival')) %dopar% {
moduleNr <- paste0("Module_",i)
mean_dead <- annotation[grep("dead", annotation[,idvitalstatus],ignore.case=TRUE), moduleNr]
mean_alive <- annotation[grep("alive", annotation[,idvitalstatus],ignore.case=TRUE), moduleNr]
mean_expression <- c(mean_alive,mean_dead)
mean_expression_cut <- quantcut(mean_expression, q=2, na.rm=TRUE)
datasurvfit <- as.data.frame(cbind(time,status,mean_expression,mean_expression_cut)) %>% drop_na()
cox_reg <- coxph(Surv(time,status)~mean_expression,data=datasurvfit)
coxstats<- c(summary(cox_reg)$waldtest["pvalue"], summary(cox_reg)$conf.int[1], summary(cox_reg)$conf.int[3],summary(cox_reg)$conf.int[4])
c_meanexp <- survival::survfit(Surv(time,status) ~ factor(mean_expression_cut),data=datasurvfit)
kmstats <- c(survminer::surv_pvalue(c_meanexp,data=datasurvfit)$pval, summary(c_meanexp)$table[1,"median"], summary(c_meanexp)$table[2,"median"], summary(c_meanexp)$table[1,"0.95LCL"],
summary(c_meanexp)$table[2,"0.95LCL"], summary(c_meanexp)$table[1,"0.95UCL"], summary(c_meanexp)$table[2,"0.95UCL"])
modulemd<-paste0(full_path,"/AMARETTOhtmls/survival/modules/module",i,".rmd")
file.copy(system.file("templates/TemplateSurvivalModule.Rmd",package="AMARETTO"),modulemd)
rmarkdown::render(modulemd,output_file = paste0("module",i,".html"), params = list(
cox_reg=cox_reg,
c_meanexp=c_meanexp,
datasurvfit=datasurvfit,
i=i),quiet = TRUE)
file.remove(modulemd)
return(c(coxstats,kmstats))
}
stopCluster(cluster)
cat("The survival module htmls are finished.\n")
p_survival<-data.frame(matrix(unlist(p_survival),byrow=T,ncol=11),stringsAsFactors=FALSE)
colnames(p_survival) <- c("coxregwaldtestp","coxreg_coef","coxreg_LL","coxreg_UL","logranktestp","medianOS1","medianOS2","Lower95CI1","Upper95CI1","Lower95CI2","Upper95CI2")
rownames(p_survival) <- paste0("Module ",1:AMARETTOresults$NrModules)
p_survival[,"logranktestpadj"]<-p.adjust(p_survival[,"logranktestp"],method="BH")
p_survival[,"coxregwaldtestpadj"]<-p.adjust(p_survival[,"coxregwaldtestp"],method="BH")
rmarkdown::render(system.file("templates/TemplateSurvivalIndex.Rmd",package="AMARETTO"),output_file=paste0(full_path,"/AMARETTOhtmls/survival/survivalindex.html"), params = list(
p_survival=p_survival))
}
if (phenotypeanalysis == TRUE){
#cluster <- makeCluster(c(rep("localhost", NrCores)), type = "SOCK")
#registerDoParallel(cluster,cores=NrCores)
#foreach (i = 1:AMARETTOresults$NrModules, .packages = c('tidyverse','rmarkdown','kableExtra')) %dopar% {
for(i in 1:AMARETTOresults$NrModules) {
moduleNr <- paste0("Module_",i)
modulemd<-paste0(full_path,"/AMARETTOhtmls/phenotypes/modules/module",i,".rmd")
file.copy(system.file("templates/TemplatePhenotypesModule.Rmd",package="AMARETTO"),modulemd)
rmarkdown::render(modulemd,output_file = paste0("module",i,".html"), params = list(
parameters=parameters,
typelist=typelist,
annotation=annotation %>% select(!!idpatients,parameters,moduleNr),
i=i),quiet = TRUE)
file.remove(modulemd)
}
#stopCluster(cluster)
cat("The phenotype module htmls are finished.\n")
phenotypetable<-AMARETTO_PhenAssociation(AMARETTOresults, annotation = annotation, idpatients = "ID", parameters=parameters, typelist = typelist,printplots = FALSE)
rmarkdown::render(system.file("templates/TemplatePhenotypesIndex.Rmd",package="AMARETTO"),output_file=paste0(full_path,"/AMARETTOhtmls/phenotypes/phenotypesindex.html"), params = list(
p_survival=p_survival))
}
}
|
/R/amaretto_htmlphenotypes.R
|
permissive
|
renesugar/ImagingAMARETTO
|
R
| false
| false
| 6,464
|
r
|
#' @title AMARETTO_HTMLCharacterisation
#'
#' @param AMARETTOresults
#' @param annotation
#' @param report_address
#' @param survivalanalysis
#' @param phenotypeanalysis
#' @param idpatients
#' @param idvitalstatus
#' @param iddaystodeath
#' @param idfollowupdays
#' @param daystoyears
#' @param rightcensored
#' @param printsurv
#' @param NrCores
#'
#' @return A set of HTMLs, giving caracteristics of the communities
#'
#' @import survminer
#' @import tidyverse
#' @import survival
#' @import gtools
#' @import kableExtra
#' @import doParallel
#' @import foreach
#' @return
#' @export
#'
AMARETTO_HTMLCharacterisation<-function(AMARETTOresults, annotation, report_address="./",survivalanalysis=FALSE, phenotypeanalysis=FALSE, idpatients=NULL, idvitalstatus=NULL, iddaystodeath=NULL, idfollowupdays=NULL, daystoyears=FALSE, rightcensored=FALSE, parameters=NULL, typelist=NULL, NrCores=1){
full_path<-normalizePath(report_address)
mean_expression_modules <- t(AMARETTOresults$ModuleData)
mean_expression_modules <- rownames_to_column(as.data.frame(mean_expression_modules),"idpatients")
if (sum(annotation[,idpatients] %in% mean_expression_modules[,"idpatients"],na.rm=TRUE)==0){
stop("No overlap between patients ids")
} else {
print(paste0("Survival data will be calculated on ",length(annotation[,idpatients] %in% mean_expression_modules[,"idpatients"]), " patients."))
}
annotation <- suppressMessages(inner_join(annotation, mean_expression_modules %>% dplyr::rename(!!idpatients :="idpatients")))
if (survivalanalysis == TRUE){
daysdeath <- as.numeric(annotation[grep("dead", annotation[,idvitalstatus],ignore.case=TRUE),iddaystodeath])
dead <- annotation[grep("dead", annotation[,idvitalstatus],ignore.case=TRUE),idvitalstatus]
daysfollowup <- as.numeric(annotation[grep("alive", annotation[,idvitalstatus],ignore.case=TRUE),idfollowupdays])
alive <- annotation[grep("alive", annotation[,idvitalstatus],ignore.case=TRUE),idvitalstatus]
time<- c(daysfollowup,daysdeath)
status <- tolower(c(alive, dead))
if (daystoyears == TRUE){
time<-time/365
}
if (!rightcensored == FALSE){
if (is.numeric(rightcensored)){
status[time>rightcensored]<-"alive"
time[time>rightcensored]<-rightcensored
} else {
stop("The time point to censor the data is incorrect.")
}
}
status <- as.numeric(as.factor(status))
c_surv<-Surv(time, status)
#per module
cluster <- makeCluster(c(rep("localhost", NrCores)), type = "SOCK")
registerDoParallel(cluster,cores=NrCores)
p_survival<-foreach (i = 1:AMARETTOresults$NrModules, .packages = c('tidyverse','rmarkdown','gtools','survminer','survival')) %dopar% {
moduleNr <- paste0("Module_",i)
mean_dead <- annotation[grep("dead", annotation[,idvitalstatus],ignore.case=TRUE), moduleNr]
mean_alive <- annotation[grep("alive", annotation[,idvitalstatus],ignore.case=TRUE), moduleNr]
mean_expression <- c(mean_alive,mean_dead)
mean_expression_cut <- quantcut(mean_expression, q=2, na.rm=TRUE)
datasurvfit <- as.data.frame(cbind(time,status,mean_expression,mean_expression_cut)) %>% drop_na()
cox_reg <- coxph(Surv(time,status)~mean_expression,data=datasurvfit)
coxstats<- c(summary(cox_reg)$waldtest["pvalue"], summary(cox_reg)$conf.int[1], summary(cox_reg)$conf.int[3],summary(cox_reg)$conf.int[4])
c_meanexp <- survival::survfit(Surv(time,status) ~ factor(mean_expression_cut),data=datasurvfit)
kmstats <- c(survminer::surv_pvalue(c_meanexp,data=datasurvfit)$pval, summary(c_meanexp)$table[1,"median"], summary(c_meanexp)$table[2,"median"], summary(c_meanexp)$table[1,"0.95LCL"],
summary(c_meanexp)$table[2,"0.95LCL"], summary(c_meanexp)$table[1,"0.95UCL"], summary(c_meanexp)$table[2,"0.95UCL"])
modulemd<-paste0(full_path,"/AMARETTOhtmls/survival/modules/module",i,".rmd")
file.copy(system.file("templates/TemplateSurvivalModule.Rmd",package="AMARETTO"),modulemd)
rmarkdown::render(modulemd,output_file = paste0("module",i,".html"), params = list(
cox_reg=cox_reg,
c_meanexp=c_meanexp,
datasurvfit=datasurvfit,
i=i),quiet = TRUE)
file.remove(modulemd)
return(c(coxstats,kmstats))
}
stopCluster(cluster)
cat("The survival module htmls are finished.\n")
p_survival<-data.frame(matrix(unlist(p_survival),byrow=T,ncol=11),stringsAsFactors=FALSE)
colnames(p_survival) <- c("coxregwaldtestp","coxreg_coef","coxreg_LL","coxreg_UL","logranktestp","medianOS1","medianOS2","Lower95CI1","Upper95CI1","Lower95CI2","Upper95CI2")
rownames(p_survival) <- paste0("Module ",1:AMARETTOresults$NrModules)
p_survival[,"logranktestpadj"]<-p.adjust(p_survival[,"logranktestp"],method="BH")
p_survival[,"coxregwaldtestpadj"]<-p.adjust(p_survival[,"coxregwaldtestp"],method="BH")
rmarkdown::render(system.file("templates/TemplateSurvivalIndex.Rmd",package="AMARETTO"),output_file=paste0(full_path,"/AMARETTOhtmls/survival/survivalindex.html"), params = list(
p_survival=p_survival))
}
if (phenotypeanalysis == TRUE){
#cluster <- makeCluster(c(rep("localhost", NrCores)), type = "SOCK")
#registerDoParallel(cluster,cores=NrCores)
#foreach (i = 1:AMARETTOresults$NrModules, .packages = c('tidyverse','rmarkdown','kableExtra')) %dopar% {
for(i in 1:AMARETTOresults$NrModules) {
moduleNr <- paste0("Module_",i)
modulemd<-paste0(full_path,"/AMARETTOhtmls/phenotypes/modules/module",i,".rmd")
file.copy(system.file("templates/TemplatePhenotypesModule.Rmd",package="AMARETTO"),modulemd)
rmarkdown::render(modulemd,output_file = paste0("module",i,".html"), params = list(
parameters=parameters,
typelist=typelist,
annotation=annotation %>% select(!!idpatients,parameters,moduleNr),
i=i),quiet = TRUE)
file.remove(modulemd)
}
#stopCluster(cluster)
cat("The phenotype module htmls are finished.\n")
phenotypetable<-AMARETTO_PhenAssociation(AMARETTOresults, annotation = annotation, idpatients = "ID", parameters=parameters, typelist = typelist,printplots = FALSE)
rmarkdown::render(system.file("templates/TemplatePhenotypesIndex.Rmd",package="AMARETTO"),output_file=paste0(full_path,"/AMARETTOhtmls/phenotypes/phenotypesindex.html"), params = list(
p_survival=p_survival))
}
}
|
# Demonstrate grid approximation of the posterior for a
# globe tossing problem with 6 water observations out of
# 9 globe tosses
#==========================================================
# 1) Define the grid to be used to compute the posterior
# 20 grid points, bounded by 0 and 1 (p_grid)
p_grid <- seq(0, 1, length.out = 20)
#==========================================================
# 2) Compute/define the value of the prior at each
# parameter value on the grid
# In this case, simply choose a flat prior (prior)
prior <- rep(1, 20)
# Plot it with plot()
plot(p_grid, prior)
#==========================================================
# 3) Compute the likelihood at each parameter value on
# the grid
# likelihood with dbinom
likelihood <- dbinom(6, size = 9, p = p_grid)
# plot it
plot(p_grid, likelihood)
#==========================================================
# 4) Compute the unstandardized posterior at each
# parameter value on the grid
# The unstandardized posterior is simply the product of
# the likelihood and prior
unstd.posterior <- likelihood * prior
# Again, could visualize
plot(p_grid, unstd.posterior, pch = 19, type = "b")
# Note that this unstandardized posterior is not a proper
# probability distribution since it does not add to 1
sum(unstd.posterior)
#==========================================================
# 5) Standardize the posterior
posterior <- unstd.posterior/sum(unstd.posterior)
# This standardized posterior is a now proper probability
# distribution
sum(posterior)
# Visualize the posterior
plot(p_grid, posterior, pch = 19, type = "b",
xlab = "proportion of water on globe",
ylab = "posterior probability")
|
/lecture_code/lecture_week_06.R
|
no_license
|
ger2128/stats_eco_evo_2021
|
R
| false
| false
| 1,720
|
r
|
# Demonstrate grid approximation of the posterior for a
# globe tossing problem with 6 water observations out of
# 9 globe tosses
#==========================================================
# 1) Define the grid to be used to compute the posterior
# 20 grid points, bounded by 0 and 1 (p_grid)
p_grid <- seq(0, 1, length.out = 20)
#==========================================================
# 2) Compute/define the value of the prior at each
# parameter value on the grid
# In this case, simply choose a flat prior (prior)
prior <- rep(1, 20)
# Plot it with plot()
plot(p_grid, prior)
#==========================================================
# 3) Compute the likelihood at each parameter value on
# the grid
# likelihood with dbinom
likelihood <- dbinom(6, size = 9, p = p_grid)
# plot it
plot(p_grid, likelihood)
#==========================================================
# 4) Compute the unstandardized posterior at each
# parameter value on the grid
# The unstandardized posterior is simply the product of
# the likelihood and prior
unstd.posterior <- likelihood * prior
# Again, could visualize
plot(p_grid, unstd.posterior, pch = 19, type = "b")
# Note that this unstandardized posterior is not a proper
# probability distribution since it does not add to 1
sum(unstd.posterior)
#==========================================================
# 5) Standardize the posterior
posterior <- unstd.posterior/sum(unstd.posterior)
# This standardized posterior is a now proper probability
# distribution
sum(posterior)
# Visualize the posterior
plot(p_grid, posterior, pch = 19, type = "b",
xlab = "proportion of water on globe",
ylab = "posterior probability")
|
plot2 <- function(file){
file <- "household_power_consumption.txt"
# Reading file into a variable for further subsetting in the next step
temp_data <- read.table(file, sep=";", header=TRUE, colClasses=rep("character",9))
# Subsetting data to the required dates (comparing them as characters)
data <- subset(temp_data, temp_data$Date == "2/2/2007" | temp_data$Date == "1/2/2007")
#Creating timestamps using strptime() for plotting
timestamp <- strptime(paste(data$Date, data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#Converting to numeric datatype
data$Global_active_power <- as.numeric(data$Global_active_power)
#Initiating a png file
png("plot2.png", height=480, width=480)
plot(timestamp, data$Global_active_power, type="l", xlab="", ylab="Global Active Power(kilowatts)")
#Closing the device (png file)
dev.off()
}
|
/plot2.R
|
no_license
|
kandsar/Exploratory_Data_Analysis
|
R
| false
| false
| 845
|
r
|
plot2 <- function(file){
file <- "household_power_consumption.txt"
# Reading file into a variable for further subsetting in the next step
temp_data <- read.table(file, sep=";", header=TRUE, colClasses=rep("character",9))
# Subsetting data to the required dates (comparing them as characters)
data <- subset(temp_data, temp_data$Date == "2/2/2007" | temp_data$Date == "1/2/2007")
#Creating timestamps using strptime() for plotting
timestamp <- strptime(paste(data$Date, data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#Converting to numeric datatype
data$Global_active_power <- as.numeric(data$Global_active_power)
#Initiating a png file
png("plot2.png", height=480, width=480)
plot(timestamp, data$Global_active_power, type="l", xlab="", ylab="Global Active Power(kilowatts)")
#Closing the device (png file)
dev.off()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hexjsonwidget.R
\name{hexjsonfromdataframe}
\alias{hexjsonfromdataframe}
\title{Create a hexjson file from a dataframe}
\usage{
hexjsonfromdataframe(df, layout = "odd-r", keyid = "id", q = "q",
r = "r")
}
\arguments{
\item{df}{Dataframe to convert to hexjson}
\item{layout}{Can be one of \code{odd-r} (pointy-topped, default), \code{even-r} (pointy-topped), \code{odd-q} (flat-topped), \code{even-q} (flat-topped).}
\item{keyid}{The column specifying the hex identifier/key (default is \code{id}).}
\item{q}{The column specifying the hexJSON columns (default is \code{q}).}
\item{r}{The column specifying the hexJSON rows (default is \code{r}).}
}
\value{
a hexjson (JSON) object
}
\description{
Create a hexjson file from a dataframe
}
\author{
Tony Hirst (@psychemedia)
}
|
/man/hexjsonfromdataframe.Rd
|
no_license
|
psychemedia/htmlwidget-hexjson
|
R
| false
| true
| 858
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hexjsonwidget.R
\name{hexjsonfromdataframe}
\alias{hexjsonfromdataframe}
\title{Create a hexjson file from a dataframe}
\usage{
hexjsonfromdataframe(df, layout = "odd-r", keyid = "id", q = "q",
r = "r")
}
\arguments{
\item{df}{Dataframe to convert to hexjson}
\item{layout}{Can be one of \code{odd-r} (pointy-topped, default), \code{even-r} (pointy-topped), \code{odd-q} (flat-topped), \code{even-q} (flat-topped).}
\item{keyid}{The column specifying the hex identifier/key (default is \code{id}).}
\item{q}{The column specifying the hexJSON columns (default is \code{q}).}
\item{r}{The column specifying the hexJSON rows (default is \code{r}).}
}
\value{
a hexjson (JSON) object
}
\description{
Create a hexjson file from a dataframe
}
\author{
Tony Hirst (@psychemedia)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/npn_allobssp.R
\name{npn_allobssp}
\alias{npn_allobssp}
\title{Get all observations for a particular species or set of species}
\usage{
npn_allobssp(speciesid, startdate = NULL, enddate = NULL, ...)
}
\arguments{
\item{speciesid}{species id numbers, from 1 to infinity, potentially,
use e.g., c(52, 53, etc.) if more than one species desired (numeric)}
\item{startdate}{start date of data period desired, see format in examples (character)}
\item{enddate}{end date of data period desired, see format in examples (character)}
\item{...}{Curl options passed on to \code{\link[httr]{GET}}}
}
\value{
A list with slots for taxa, stations, phenophase (metadata) and data
}
\description{
Get all observations for a particular species or set of species
}
\examples{
\dontrun{
# Lookup names
lookup_names(name='Pinus', type='genus')
# Get data on one species
npn_allobssp(speciesid = 52, startdate='2008-01-01', enddate='2011-12-31')
# Get data on two species
npn_allobssp(speciesid = c(52, 53), startdate='2008-01-01', enddate='2011-12-31')
# Get data on one species, convert to a single data.frame
npn_allobssp(speciesid = 52, startdate='2008-01-01', enddate='2011-12-31')
}
}
|
/man/npn_allobssp.Rd
|
permissive
|
PhenologyOfPlace/rnpn
|
R
| false
| true
| 1,255
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/npn_allobssp.R
\name{npn_allobssp}
\alias{npn_allobssp}
\title{Get all observations for a particular species or set of species}
\usage{
npn_allobssp(speciesid, startdate = NULL, enddate = NULL, ...)
}
\arguments{
\item{speciesid}{species id numbers, from 1 to infinity, potentially,
use e.g., c(52, 53, etc.) if more than one species desired (numeric)}
\item{startdate}{start date of data period desired, see format in examples (character)}
\item{enddate}{end date of data period desired, see format in examples (character)}
\item{...}{Curl options passed on to \code{\link[httr]{GET}}}
}
\value{
A list with slots for taxa, stations, phenophase (metadata) and data
}
\description{
Get all observations for a particular species or set of species
}
\examples{
\dontrun{
# Lookup names
lookup_names(name='Pinus', type='genus')
# Get data on one species
npn_allobssp(speciesid = 52, startdate='2008-01-01', enddate='2011-12-31')
# Get data on two species
npn_allobssp(speciesid = c(52, 53), startdate='2008-01-01', enddate='2011-12-31')
# Get data on one species, convert to a single data.frame
npn_allobssp(speciesid = 52, startdate='2008-01-01', enddate='2011-12-31')
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/frame.R
\name{z.values.get}
\alias{z.values.get}
\title{get value labels, wrapper of \code{\link[sjmisc]{get_labels}}}
\usage{
z.values.get(x, include.values = NULL, attr.only = T,
include.non.labelled = F, ...)
}
\value{
returns a list $varname
}
\details{
see also \code{\link[sjmisc]{get_values}}
}
\examples{
}
\seealso{
\code{\link[tidyr]{gather}}, \code{\link[tidyr]{spread}}, \code{\link[tidyr]{separate}}, \code{\link[tidyr]{unite}}
\cr \code{\link[dplyr]{select}}, \code{\link[dplyr]{slice}}
\cr \code{\link[dplyr]{distinct}}, \code{\link[dplyr]{arrange}}
\cr \code{\link[dplyr]{summarise}}, \code{\link[dplyr]{count}}, \code{\link[dplyr]{mutate}}
\cr \code{\link[dplyr]{group_by}}, \code{\link[dplyr]{left_join}}, \code{\link[dplyr]{right_join}}, \code{\link[dplyr]{inner_join}}, \code{\link[dplyr]{full_join}}, \code{\link[dplyr]{semi_join}}, \code{\link[dplyr]{anti_join}}
\cr \code{\link[dplyr]{intersect}}, \code{\link[dplyr]{union}}, \code{\link[dplyr]{setdiff}}
\cr \code{\link[dplyr]{bind_rows}}, \code{\link[dplyr]{bind_cols}}
Other data.transformation.functions: \code{\link{z.2factor}},
\code{\link{z.2label}}, \code{\link{z.2long}},
\code{\link{z.2lower}}, \code{\link{z.2value}},
\code{\link{z.2wide}}, \code{\link{z.compute}},
\code{\link{z.delete}}, \code{\link{z.del}},
\code{\link{z.label.get}}, \code{\link{z.label.set}},
\code{\link{z.leftjoin}}, \code{\link{z.move}},
\code{\link{z.newcol}}, \code{\link{z.recode2}},
\code{\link{z.recode}}, \code{\link{z.recols}},
\code{\link{z.recol}}, \code{\link{z.rename}},
\code{\link{z.rncols}}, \code{\link{z.rncol}},
\code{\link{z.select}}, \code{\link{z.sort}},
\code{\link{z.split}}, \code{\link{z.unique}},
\code{\link{z.values.set}}
}
|
/man/z.values.get.Rd
|
permissive
|
hmorzaria/zmisc
|
R
| false
| true
| 1,819
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/frame.R
\name{z.values.get}
\alias{z.values.get}
\title{get value labels, wrapper of \code{\link[sjmisc]{get_labels}}}
\usage{
z.values.get(x, include.values = NULL, attr.only = T,
include.non.labelled = F, ...)
}
\value{
returns a list $varname
}
\details{
see also \code{\link[sjmisc]{get_values}}
}
\examples{
}
\seealso{
\code{\link[tidyr]{gather}}, \code{\link[tidyr]{spread}}, \code{\link[tidyr]{separate}}, \code{\link[tidyr]{unite}}
\cr \code{\link[dplyr]{select}}, \code{\link[dplyr]{slice}}
\cr \code{\link[dplyr]{distinct}}, \code{\link[dplyr]{arrange}}
\cr \code{\link[dplyr]{summarise}}, \code{\link[dplyr]{count}}, \code{\link[dplyr]{mutate}}
\cr \code{\link[dplyr]{group_by}}, \code{\link[dplyr]{left_join}}, \code{\link[dplyr]{right_join}}, \code{\link[dplyr]{inner_join}}, \code{\link[dplyr]{full_join}}, \code{\link[dplyr]{semi_join}}, \code{\link[dplyr]{anti_join}}
\cr \code{\link[dplyr]{intersect}}, \code{\link[dplyr]{union}}, \code{\link[dplyr]{setdiff}}
\cr \code{\link[dplyr]{bind_rows}}, \code{\link[dplyr]{bind_cols}}
Other data.transformation.functions: \code{\link{z.2factor}},
\code{\link{z.2label}}, \code{\link{z.2long}},
\code{\link{z.2lower}}, \code{\link{z.2value}},
\code{\link{z.2wide}}, \code{\link{z.compute}},
\code{\link{z.delete}}, \code{\link{z.del}},
\code{\link{z.label.get}}, \code{\link{z.label.set}},
\code{\link{z.leftjoin}}, \code{\link{z.move}},
\code{\link{z.newcol}}, \code{\link{z.recode2}},
\code{\link{z.recode}}, \code{\link{z.recols}},
\code{\link{z.recol}}, \code{\link{z.rename}},
\code{\link{z.rncols}}, \code{\link{z.rncol}},
\code{\link{z.select}}, \code{\link{z.sort}},
\code{\link{z.split}}, \code{\link{z.unique}},
\code{\link{z.values.set}}
}
|
# Download gdal-2.2.0 from rwinlib
VERSION <- commandArgs(TRUE)
if(!file.exists(sprintf("../windows/gdal2-%s/include/gdal/gdal.h", VERSION))){
if(getRversion() < "3.3.0") setInternet2()
download.file(sprintf("https://github.com/rwinlib/gdal2/archive/v%s.zip", VERSION), "lib.zip", quiet = TRUE)
dir.create("../windows", showWarnings = FALSE)
unzip("lib.zip", exdir = "../windows")
unlink("lib.zip")
}
|
/fuzzedpackages/uFTIR/tools/winlibs.R
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 411
|
r
|
# Download gdal-2.2.0 from rwinlib
VERSION <- commandArgs(TRUE)
if(!file.exists(sprintf("../windows/gdal2-%s/include/gdal/gdal.h", VERSION))){
if(getRversion() < "3.3.0") setInternet2()
download.file(sprintf("https://github.com/rwinlib/gdal2/archive/v%s.zip", VERSION), "lib.zip", quiet = TRUE)
dir.create("../windows", showWarnings = FALSE)
unzip("lib.zip", exdir = "../windows")
unlink("lib.zip")
}
|
# Exercise 7: interactive applications
# Load libraries so they are available
library("shiny")
# Use source() to execute the `app_ui.R` and `app_server.R` files. These will
# define the UI value and server function respectively.
source("app_ui.R")
source("app_server.R")
# You will need to fill in the `app_ui.R` file to create the layout.
# Run the app through this file.
# Create a new `shinyApp()` using the loaded `ui` and `server` variables
shinyApp(ui = ui, server = server)
|
/chapter-19-exercises/exercise-7/app.R
|
permissive
|
sumeetwaraich/book-exercises
|
R
| false
| false
| 484
|
r
|
# Exercise 7: interactive applications
# Load libraries so they are available
library("shiny")
# Use source() to execute the `app_ui.R` and `app_server.R` files. These will
# define the UI value and server function respectively.
source("app_ui.R")
source("app_server.R")
# You will need to fill in the `app_ui.R` file to create the layout.
# Run the app through this file.
# Create a new `shinyApp()` using the loaded `ui` and `server` variables
shinyApp(ui = ui, server = server)
|
set.seed( 20 )
library(mvtnorm)
library(fields)
library(Rcpp)
library(mclust)
library(kernlab)
library(ConsensusClusterPlus)
simu=function(s){
prob_glcm<-function(c,s=s,mc=30000){
mu<-c(2+c,14-c)
sigma<-matrix(s*c(1,-0.7,-0.7,1),nrow=2)
elip<-rmvnorm(mc,mu,sigma)
# par(xaxs='i',yaxs='i')
# plot(elip,xlim =c(0,16) ,ylim=c(0,16))
# abline(16,-1,col='red')
# abline(h=16);abline(h=15);abline(h=14);abline(h=13);abline(h=12);abline(h=11);abline(h=10);abline(h=9);
# abline(h=8);abline(h=7);abline(h=6);abline(h=5);abline(h=4);abline(h=3);abline(h=2);abline(h=1);abline(h=0)
# abline(v=16);abline(v=15);abline(v=14);abline(v=13);abline(v=12);abline(v=11);abline(v=10);abline(v=9);
# abline(v=0);abline(v=1);abline(v=2);abline(v=3);abline(v=4);abline(v=5);abline(v=6);abline(v=7);abline(v=8)
cell_count<-rep(0,16*16)
for (i in 1:mc)
{
for (m in 1:16) {
for (k in 16:1) {
if (( (m-1) <elip[i,1])&(elip[i,1]< m)&( (k-1) <elip[i,2])&(elip[i,2]< k)) {
cell_count[16-k+1+16*(m-1)]=cell_count[16-k+1+16*(m-1)]+1}
}
}
}
## -c(2:16,19:32,36:48,53:64,70:80,87:96,104:112,121:128,138:144,155:160,172:176,189:192,206:208,223:224,240)
z<-cell_count/sum(cell_count)
z_whole<-z[c(1,17,33,49,65,81,97,113,129,145,161,177,193,209,225,241,
17,18,34,50,66,82,98,114,130,146,162,178,194,210,226,242,
33,34,35,51,67,83,99,115,131,147,163,179,195,211,227,243,
49,50,51,52,68,84,100,116,132,148,164,180,196,212,228,244,
65,66,67,68,69,85,101,117,133,149,165,181,197,213,229,245,
81,82,83,84,85,86,102,118,134,150,166,182,198,214,230,246,
97,98,99,100,101,102,103,119,135,151,167,183,199,215,231,247,
113,114,115,116,117,118,119,120,136,152,168,184,200,216,232,248,
129,130,131,132,133,134,135,136,137,153,169,185,201,217,233,249,
145,146,147,148,149,150,151,152,153,154,170,186,202,218,234,250,
161,162,163,164,165,166,167,168,169,170,171,187,203,219,235,251,
177,178,179,180,181,182,183,184,185,186,187,188,204,220,236,252,
193,194,195,196,197,198,199,200,201,202,203,204,205,221,237,253,
209,210,211,212,213,214,215,216,217,218,219,220,221,222,238,254,
225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,255,
241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256)]
arg. <- expand.grid(c(0.5:15.5),c(15.5:0.5))
I = as.image( Z=z_whole, x=arg., grid=list(x=seq(0.5,15.5,1), y=seq(0.5,15.5,1)))
image(I)
smooth.I <- image.smooth(I, theta=1);
#################################################
### notice the order of this sommthed image ###
#################################################
den=c()
for (r in 1:16) {
for (w in 1:r) {
den=c(den,smooth.I$z[r,16-(w-1)])
}
}
prob<-den/sum(den)
return(prob)
}
prob1=prob_glcm(c=5,s=s)
prob2=prob_glcm(c=5.5,s=s)
prob3=prob_glcm(c=6,s=s)
prob4=prob_glcm(c=6.5,s=s)
prob5=prob_glcm(c=7,s=s)
glcm=matrix(0,nrow=20*5,ncol=136)
for (j in 1:20)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob1)
}
for (j in 21:40)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob2)
}
for (j in 41:60)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob3)
}
for (j in 61:80)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob4)
}
for (j in 81:100)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob5)
}
glcm
}
Z=simu(s=12)
Z_met=Z
T_met=nrow(Z_met)
n=ncol(Z_met)
X=apply(Z_met,1,sum)
X_met=X
sX_met=(X-mean(X))/sd(X)
R=array(data = NA,dim = c(2,n,T_met))
for (t in 1: nrow(Z_met)) R[,,t]=matrix(rep(c(1,sX_met[t]),times=n),byrow = FALSE,nrow=2,ncol=n)
############################################################################
########################## MCMC ########################
############################################################################
library(HI)
library(invgamma)
source('/gstore/scratch/u/lix233/RGSDP/sdp_functions_selfwriting_V12_cpp.R')
sourceCpp('/gstore/scratch/u/lix233/RGSDP/rgsdp.cpp')
D=read.csv('/gstore/scratch/u/lix233/RGSDP/D_16.csv',header=TRUE)
W=read.csv('/gstore/scratch/u/lix233/RGSDP/W_16.csv',header=TRUE)
N=20000;Burnin=N/2
Y_iter_met=Theta_iter_met=array(data=NA,dim = c(T_met,n,N))
try=matrix(0,nrow =T_met ,ncol = n)
for (i in 1:T_met){
for (j in 1:n){
if (Z_met[i,j]==0) {
try[i,j]=rnorm(1,mean=-10,sd=1)
} else {
try[i,j]=rnorm(1,mean=Z_met[i,j],sd=1)
}
}
}
g=update_Y(Z=Z_met,X=X_met,tau2=100,Theta = try,Beta =c(0.1,0.1),R)
sum(g==Inf)+sum(g==-Inf)
Theta_iter_met[,,1]=try
tau2_met=v_met=rho_met=sig2_met=rep(NA,N)
tau2_met[1]=50
v_met[1]=0.8
rho_met[1]=0.9
sig2_met[1]=10
# v_met=rep(1,N) # Fix v
av=bv=1
atau=0.0001 ;btau=0.0001
asig=0.0001 ;bsig=0.0001
Betam=c(0,0);Sigma_m=matrix(c(10^5,0,0,10^5),nrow=2,ncol=2)
Beta_iter_met=matrix(NA,nrow=N,ncol=nrow(R[,,1]))
Beta_iter_met[1,]=c(40,20)
for (iter in 2:N) {
Y_iter_met[,,iter]=update_Y(Z_met,X_met,tau2_met[iter-1],Theta_iter_met[,,iter-1],Beta_iter_met[iter-1,],R)
Theta_iter_met[,,iter]=update_theta(as.vector(X_met),Y_iter_met[,,iter],as.matrix(D),as.matrix(W),rho_met[iter-1],Theta_iter_met[,,iter-1],sig2_met[iter-1],tau2_met[iter-1],v_met[iter-1],Beta_iter_met[iter-1,],R)
Beta_iter_met[iter,]=update_Beta(Betam,Sigma_m,tau2_met[iter-1],X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],R)
tau2_met[iter] = update_tau2(X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],atau,btau,Beta_iter_met[iter,],R)
sig2_met[iter]= update_sig2(asig,bsig,D,W,rho_met[iter-1],Theta_iter_met[,,iter])
rho_met[iter] = update_rho(D,W,Theta_iter_met[,,iter],sig2_met[iter])
v_met[iter]=update_v(Z_met,v_met[iter-1],Tstar=nrow(unique.matrix(Theta_iter_met[,,iter])),av,bv)
}
library(coda)
mcmc_beta=mcmc(Beta_iter_met[(1+Burnin):N,])
pnorm(abs(geweke.diag(mcmc_beta)$z),lower.tail=FALSE)*2
mcmc_rho=mcmc(rho_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_rho)$z),lower.tail=FALSE)*2
mcmc_sig2=mcmc(sig2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_sig2)$z),lower.tail=FALSE)*2
mcmc_tau2=mcmc(tau2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_tau2)$z),lower.tail=FALSE)*2
mcmc_v=mcmc(v_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_v)$z),lower.tail=FALSE)*2
Theta_ave=Theta_sum=matrix(0,nrow=nrow(Theta_iter_met[,,1]),ncol=ncol(Theta_iter_met[,,1]))
for (i in (Burnin+1):N) {
Theta_sum=Theta_sum+Theta_iter_met[,,i]
}
Theta_ave=Theta_sum/(N-Burnin)
library('NbClust')
NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')
HRGSDP=NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')$Best.partition
glcm_whole=Z[,c(1,2,4,7,11,16,22,29,37,46,56,67,79,92,106,121,
2,3,5,8,12,17,23,30,38,47,57,68,80,93,107,122,
4,5,6,9,13,18,24,31,39,48,58,69,81,94,108,123,
7,8,9,10,14,19,25,32,40,49,59,70,82,95,109,124,
11,12,13,14,15,20,26,33,41,50,60,71,83,96,110,125,
16,17,18,19,20,21,27,34,42,51,61,72,84,97,111,126,
22,23,24,25,26,27,28,35,43,52,62,73,85,98,112,127,
29,30,31,32,33,34,35,36,44,53,63,74,86,99,113,128,
37,38,39,40,41,42,43,44,45,54,64,75,87,100,114,129,
46,47,48,49,50,51,52,53,54,55,65,76,88,101,115,130,
56,57,58,59,60,61,62,63,64,65,66,77,89,102,116,131,
67,68,69,70,71,72,73,74,75,76,77,78,90,103,117,132,
79,80,81,82,83,84,85,86,87,88,89,90,91,104,118,133,
92,93,94,95,96,97,98,99,100,101,102,103,104,105,119,134,
106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,135,
121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136)]
source('/gstore/scratch/u/lix233/RGSDP/cal_stat.R')
features=cal_stat(glcm_whole)
GMM=Mclust(features,5)
my.dist <- function(x) dist(x, method='euclidean')
my.hclust <- function(d) hclust(d, method='ward.D2')
HC<-cutree(my.hclust(my.dist(data.matrix(features))),k=5)
KM=kmeans(features,5)
SC=specc(features,5)
CO <- ConsensusClusterPlus(t(features),maxK=9,reps=100,pItem=0.90, pFeature=1,
clusterAlg='hc',distance='euclidean',plot=FALSE)
CO <- CO[[5]]$consensusClass
aa <- table(rep(1:5,each=20), CO)
bb <- table(rep(1:5,each=20), GMM$classification)
cc <- table(rep(1:5,each=20), HC)
dd <- table(rep(1:5,each=20), KM$cluster)
ee <- table(rep(1:5,each=20), SC)
ff <- table(rep(1:5,each=20), HRGSDP)
res_FeaCO=c(chisq.test(aa,correct = TRUE)$statistic,ncol(aa),error_rate(aa), 'FeaCO')
res_FeaGMM=c(chisq.test(bb,correct = TRUE)$statistic,ncol(bb),error_rate(bb), 'FeaGMM')
res_FeaHC=c(chisq.test(cc,correct = TRUE)$statistic,ncol(cc),error_rate(cc), 'FeaHC')
res_FeaKM=c(chisq.test(dd,correct = TRUE)$statistic,ncol(dd),error_rate(dd), 'FeaKM')
res_FeaSC=c(chisq.test(ee,correct = TRUE)$statistic,ncol(ee),error_rate(ee), 'FeaSC')
res_HRGSDP=c(chisq.test(ff,correct = TRUE)$statistic,ncol(ff),error_rate(ff), 'HRGSDP')
xx = rbind(res_FeaCO, res_FeaGMM, res_FeaHC, res_FeaKM, res_FeaSC, res_HRGSDP)
colnames(xx) = c('pearson.chi.sq', 'nunber of clusters', 'error.rate', 'method')
xx = as.data.frame(xx)
print(xx)
|
/s=12/simu_20.R
|
no_license
|
mguindanigroup/Radiomics-Hierarchical-Rounded-Gaussian-Spatial-Dirichlet-Process
|
R
| false
| false
| 9,293
|
r
|
set.seed( 20 )
library(mvtnorm)
library(fields)
library(Rcpp)
library(mclust)
library(kernlab)
library(ConsensusClusterPlus)
simu=function(s){
prob_glcm<-function(c,s=s,mc=30000){
mu<-c(2+c,14-c)
sigma<-matrix(s*c(1,-0.7,-0.7,1),nrow=2)
elip<-rmvnorm(mc,mu,sigma)
# par(xaxs='i',yaxs='i')
# plot(elip,xlim =c(0,16) ,ylim=c(0,16))
# abline(16,-1,col='red')
# abline(h=16);abline(h=15);abline(h=14);abline(h=13);abline(h=12);abline(h=11);abline(h=10);abline(h=9);
# abline(h=8);abline(h=7);abline(h=6);abline(h=5);abline(h=4);abline(h=3);abline(h=2);abline(h=1);abline(h=0)
# abline(v=16);abline(v=15);abline(v=14);abline(v=13);abline(v=12);abline(v=11);abline(v=10);abline(v=9);
# abline(v=0);abline(v=1);abline(v=2);abline(v=3);abline(v=4);abline(v=5);abline(v=6);abline(v=7);abline(v=8)
cell_count<-rep(0,16*16)
for (i in 1:mc)
{
for (m in 1:16) {
for (k in 16:1) {
if (( (m-1) <elip[i,1])&(elip[i,1]< m)&( (k-1) <elip[i,2])&(elip[i,2]< k)) {
cell_count[16-k+1+16*(m-1)]=cell_count[16-k+1+16*(m-1)]+1}
}
}
}
## -c(2:16,19:32,36:48,53:64,70:80,87:96,104:112,121:128,138:144,155:160,172:176,189:192,206:208,223:224,240)
z<-cell_count/sum(cell_count)
z_whole<-z[c(1,17,33,49,65,81,97,113,129,145,161,177,193,209,225,241,
17,18,34,50,66,82,98,114,130,146,162,178,194,210,226,242,
33,34,35,51,67,83,99,115,131,147,163,179,195,211,227,243,
49,50,51,52,68,84,100,116,132,148,164,180,196,212,228,244,
65,66,67,68,69,85,101,117,133,149,165,181,197,213,229,245,
81,82,83,84,85,86,102,118,134,150,166,182,198,214,230,246,
97,98,99,100,101,102,103,119,135,151,167,183,199,215,231,247,
113,114,115,116,117,118,119,120,136,152,168,184,200,216,232,248,
129,130,131,132,133,134,135,136,137,153,169,185,201,217,233,249,
145,146,147,148,149,150,151,152,153,154,170,186,202,218,234,250,
161,162,163,164,165,166,167,168,169,170,171,187,203,219,235,251,
177,178,179,180,181,182,183,184,185,186,187,188,204,220,236,252,
193,194,195,196,197,198,199,200,201,202,203,204,205,221,237,253,
209,210,211,212,213,214,215,216,217,218,219,220,221,222,238,254,
225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,255,
241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256)]
arg. <- expand.grid(c(0.5:15.5),c(15.5:0.5))
I = as.image( Z=z_whole, x=arg., grid=list(x=seq(0.5,15.5,1), y=seq(0.5,15.5,1)))
image(I)
smooth.I <- image.smooth(I, theta=1);
#################################################
### notice the order of this sommthed image ###
#################################################
den=c()
for (r in 1:16) {
for (w in 1:r) {
den=c(den,smooth.I$z[r,16-(w-1)])
}
}
prob<-den/sum(den)
return(prob)
}
prob1=prob_glcm(c=5,s=s)
prob2=prob_glcm(c=5.5,s=s)
prob3=prob_glcm(c=6,s=s)
prob4=prob_glcm(c=6.5,s=s)
prob5=prob_glcm(c=7,s=s)
glcm=matrix(0,nrow=20*5,ncol=136)
for (j in 1:20)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob1)
}
for (j in 21:40)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob2)
}
for (j in 41:60)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob3)
}
for (j in 61:80)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob4)
}
for (j in 81:100)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob5)
}
glcm
}
Z=simu(s=12)
Z_met=Z
T_met=nrow(Z_met)
n=ncol(Z_met)
X=apply(Z_met,1,sum)
X_met=X
sX_met=(X-mean(X))/sd(X)
R=array(data = NA,dim = c(2,n,T_met))
for (t in 1: nrow(Z_met)) R[,,t]=matrix(rep(c(1,sX_met[t]),times=n),byrow = FALSE,nrow=2,ncol=n)
############################################################################
########################## MCMC ########################
############################################################################
library(HI)
library(invgamma)
source('/gstore/scratch/u/lix233/RGSDP/sdp_functions_selfwriting_V12_cpp.R')
sourceCpp('/gstore/scratch/u/lix233/RGSDP/rgsdp.cpp')
D=read.csv('/gstore/scratch/u/lix233/RGSDP/D_16.csv',header=TRUE)
W=read.csv('/gstore/scratch/u/lix233/RGSDP/W_16.csv',header=TRUE)
N=20000;Burnin=N/2
Y_iter_met=Theta_iter_met=array(data=NA,dim = c(T_met,n,N))
try=matrix(0,nrow =T_met ,ncol = n)
for (i in 1:T_met){
for (j in 1:n){
if (Z_met[i,j]==0) {
try[i,j]=rnorm(1,mean=-10,sd=1)
} else {
try[i,j]=rnorm(1,mean=Z_met[i,j],sd=1)
}
}
}
g=update_Y(Z=Z_met,X=X_met,tau2=100,Theta = try,Beta =c(0.1,0.1),R)
sum(g==Inf)+sum(g==-Inf)
Theta_iter_met[,,1]=try
tau2_met=v_met=rho_met=sig2_met=rep(NA,N)
tau2_met[1]=50
v_met[1]=0.8
rho_met[1]=0.9
sig2_met[1]=10
# v_met=rep(1,N) # Fix v
av=bv=1
atau=0.0001 ;btau=0.0001
asig=0.0001 ;bsig=0.0001
Betam=c(0,0);Sigma_m=matrix(c(10^5,0,0,10^5),nrow=2,ncol=2)
Beta_iter_met=matrix(NA,nrow=N,ncol=nrow(R[,,1]))
Beta_iter_met[1,]=c(40,20)
for (iter in 2:N) {
Y_iter_met[,,iter]=update_Y(Z_met,X_met,tau2_met[iter-1],Theta_iter_met[,,iter-1],Beta_iter_met[iter-1,],R)
Theta_iter_met[,,iter]=update_theta(as.vector(X_met),Y_iter_met[,,iter],as.matrix(D),as.matrix(W),rho_met[iter-1],Theta_iter_met[,,iter-1],sig2_met[iter-1],tau2_met[iter-1],v_met[iter-1],Beta_iter_met[iter-1,],R)
Beta_iter_met[iter,]=update_Beta(Betam,Sigma_m,tau2_met[iter-1],X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],R)
tau2_met[iter] = update_tau2(X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],atau,btau,Beta_iter_met[iter,],R)
sig2_met[iter]= update_sig2(asig,bsig,D,W,rho_met[iter-1],Theta_iter_met[,,iter])
rho_met[iter] = update_rho(D,W,Theta_iter_met[,,iter],sig2_met[iter])
v_met[iter]=update_v(Z_met,v_met[iter-1],Tstar=nrow(unique.matrix(Theta_iter_met[,,iter])),av,bv)
}
library(coda)
mcmc_beta=mcmc(Beta_iter_met[(1+Burnin):N,])
pnorm(abs(geweke.diag(mcmc_beta)$z),lower.tail=FALSE)*2
mcmc_rho=mcmc(rho_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_rho)$z),lower.tail=FALSE)*2
mcmc_sig2=mcmc(sig2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_sig2)$z),lower.tail=FALSE)*2
mcmc_tau2=mcmc(tau2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_tau2)$z),lower.tail=FALSE)*2
mcmc_v=mcmc(v_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_v)$z),lower.tail=FALSE)*2
Theta_ave=Theta_sum=matrix(0,nrow=nrow(Theta_iter_met[,,1]),ncol=ncol(Theta_iter_met[,,1]))
for (i in (Burnin+1):N) {
Theta_sum=Theta_sum+Theta_iter_met[,,i]
}
Theta_ave=Theta_sum/(N-Burnin)
library('NbClust')
NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')
HRGSDP=NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')$Best.partition
glcm_whole=Z[,c(1,2,4,7,11,16,22,29,37,46,56,67,79,92,106,121,
2,3,5,8,12,17,23,30,38,47,57,68,80,93,107,122,
4,5,6,9,13,18,24,31,39,48,58,69,81,94,108,123,
7,8,9,10,14,19,25,32,40,49,59,70,82,95,109,124,
11,12,13,14,15,20,26,33,41,50,60,71,83,96,110,125,
16,17,18,19,20,21,27,34,42,51,61,72,84,97,111,126,
22,23,24,25,26,27,28,35,43,52,62,73,85,98,112,127,
29,30,31,32,33,34,35,36,44,53,63,74,86,99,113,128,
37,38,39,40,41,42,43,44,45,54,64,75,87,100,114,129,
46,47,48,49,50,51,52,53,54,55,65,76,88,101,115,130,
56,57,58,59,60,61,62,63,64,65,66,77,89,102,116,131,
67,68,69,70,71,72,73,74,75,76,77,78,90,103,117,132,
79,80,81,82,83,84,85,86,87,88,89,90,91,104,118,133,
92,93,94,95,96,97,98,99,100,101,102,103,104,105,119,134,
106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,135,
121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136)]
source('/gstore/scratch/u/lix233/RGSDP/cal_stat.R')
features=cal_stat(glcm_whole)
GMM=Mclust(features,5)
my.dist <- function(x) dist(x, method='euclidean')
my.hclust <- function(d) hclust(d, method='ward.D2')
HC<-cutree(my.hclust(my.dist(data.matrix(features))),k=5)
KM=kmeans(features,5)
SC=specc(features,5)
CO <- ConsensusClusterPlus(t(features),maxK=9,reps=100,pItem=0.90, pFeature=1,
clusterAlg='hc',distance='euclidean',plot=FALSE)
CO <- CO[[5]]$consensusClass
aa <- table(rep(1:5,each=20), CO)
bb <- table(rep(1:5,each=20), GMM$classification)
cc <- table(rep(1:5,each=20), HC)
dd <- table(rep(1:5,each=20), KM$cluster)
ee <- table(rep(1:5,each=20), SC)
ff <- table(rep(1:5,each=20), HRGSDP)
res_FeaCO=c(chisq.test(aa,correct = TRUE)$statistic,ncol(aa),error_rate(aa), 'FeaCO')
res_FeaGMM=c(chisq.test(bb,correct = TRUE)$statistic,ncol(bb),error_rate(bb), 'FeaGMM')
res_FeaHC=c(chisq.test(cc,correct = TRUE)$statistic,ncol(cc),error_rate(cc), 'FeaHC')
res_FeaKM=c(chisq.test(dd,correct = TRUE)$statistic,ncol(dd),error_rate(dd), 'FeaKM')
res_FeaSC=c(chisq.test(ee,correct = TRUE)$statistic,ncol(ee),error_rate(ee), 'FeaSC')
res_HRGSDP=c(chisq.test(ff,correct = TRUE)$statistic,ncol(ff),error_rate(ff), 'HRGSDP')
xx = rbind(res_FeaCO, res_FeaGMM, res_FeaHC, res_FeaKM, res_FeaSC, res_HRGSDP)
colnames(xx) = c('pearson.chi.sq', 'nunber of clusters', 'error.rate', 'method')
xx = as.data.frame(xx)
print(xx)
|
source("m70final.R")
library(ggcorrplot)
X = parse.data("all_stocks_5yr.csv")
stocks = t(X[,1])
X = as.matrix(X[2:length(X)])
R = cor(t(X))
iR = iiR = solve(R)
n = dim(R)[1]
for(i in 1:n) {
for(j in 1:n) {
iR[i,j] = -iiR[i,j]/sqrt(iiR[i,i]*iiR[j,j])
}
}
rownames(R) = colnames(R) = rownames(iR) = colnames(iR) = stocks
pdf("heatmap.pdf", colormodel="cmyk")
ggcorrplot(R, hc.order=T, colors= c("green", "blue", "red"),outline.color="white",hc.method="average",
tl.cex=1,tl.srt=90,title="Correlation Heat Map of 468 Stocks")
dev.off()
pdf("partialheatmap.pdf", colormodel="cmyk")
ggcorrplot(iR, hc.order=T, colors= c("green", "blue", "red"),outline.color="white",hc.method="average",
tl.cex=1,tl.srt=90,title="Partial Correlation Heat Map of 468 Stocks")
dev.off()
|
/corrplot.R
|
no_license
|
joegyorda/M70Final
|
R
| false
| false
| 798
|
r
|
source("m70final.R")
library(ggcorrplot)
X = parse.data("all_stocks_5yr.csv")
stocks = t(X[,1])
X = as.matrix(X[2:length(X)])
R = cor(t(X))
iR = iiR = solve(R)
n = dim(R)[1]
for(i in 1:n) {
for(j in 1:n) {
iR[i,j] = -iiR[i,j]/sqrt(iiR[i,i]*iiR[j,j])
}
}
rownames(R) = colnames(R) = rownames(iR) = colnames(iR) = stocks
pdf("heatmap.pdf", colormodel="cmyk")
ggcorrplot(R, hc.order=T, colors= c("green", "blue", "red"),outline.color="white",hc.method="average",
tl.cex=1,tl.srt=90,title="Correlation Heat Map of 468 Stocks")
dev.off()
pdf("partialheatmap.pdf", colormodel="cmyk")
ggcorrplot(iR, hc.order=T, colors= c("green", "blue", "red"),outline.color="white",hc.method="average",
tl.cex=1,tl.srt=90,title="Partial Correlation Heat Map of 468 Stocks")
dev.off()
|
# This script is used for standardizing New York Street tree data
# By: Lindsay Darling
#TODO Remove dead trees and stumps?
# started on: Oct 15, 2020
# load useful libraries
library(tidyverse)
library(magrittr)
library(sf)
# read in the i-Tree data
NYStreetTree <- st_read(file.path('RawTreeData',
'geo_export_6ba3853a-80c5-4589-b553-ecd0ef92a100.shp'))
NYStreetTree %<>%
mutate(data_source = 'NYStreetTree',
dbh_cm = `tree_dbh`*2.54,
tree_ID = row_number(),
obs_year = lubridate::year(as.Date(`created_at`, format = '%m/%d/%Y')))%>%
rename(genus_species = `spc_latin`) %>%
mutate(genus_species=recode(genus_species,
'Platanus x acerifolia' = 'Platanus acerifolia',
'Gleditsia triacanthos var. inermis' = 'Gleditsia triacanthos',
'Lagerstroemia' ='Lagerstroemia',
"Acer platanoides 'Crimson King'" = 'Acer platanoides',
'Crataegus crusgalli var. inermis' = 'Crataegus crusgalli',
'Aesculus x carnea' = 'Aesculus carnea'))%>%
separate(genus_species, into = c("genus", "species"),sep = " ", remove = TRUE) %>%
mutate(species=replace_na(species,'spp.')) %>%
mutate(genus=replace_na(genus,'Unknown')) %>%
mutate(`genus_species` = paste0(genus, ' ', species)) %>%
select(data_source,
tree_ID,
obs_year,
genus,
species,
genus_species,
dbh_cm,
latitude,
longitude)
#checks
unique(NYStreetTree$genus_species)
# save out the shapefile
st_write(NYStreetTree, file.path('CleanTreeData','NYStreetTree.shp'), layer = NULL, driver = 'ESRI Shapefile')
|
/NYStreetTree.R
|
no_license
|
ChicagoRTI/CompRedlining
|
R
| false
| false
| 1,809
|
r
|
# This script is used for standardizing New York Street tree data
# By: Lindsay Darling
#TODO Remove dead trees and stumps?
# started on: Oct 15, 2020
# load useful libraries
library(tidyverse)
library(magrittr)
library(sf)
# read in the i-Tree data
NYStreetTree <- st_read(file.path('RawTreeData',
'geo_export_6ba3853a-80c5-4589-b553-ecd0ef92a100.shp'))
NYStreetTree %<>%
mutate(data_source = 'NYStreetTree',
dbh_cm = `tree_dbh`*2.54,
tree_ID = row_number(),
obs_year = lubridate::year(as.Date(`created_at`, format = '%m/%d/%Y')))%>%
rename(genus_species = `spc_latin`) %>%
mutate(genus_species=recode(genus_species,
'Platanus x acerifolia' = 'Platanus acerifolia',
'Gleditsia triacanthos var. inermis' = 'Gleditsia triacanthos',
'Lagerstroemia' ='Lagerstroemia',
"Acer platanoides 'Crimson King'" = 'Acer platanoides',
'Crataegus crusgalli var. inermis' = 'Crataegus crusgalli',
'Aesculus x carnea' = 'Aesculus carnea'))%>%
separate(genus_species, into = c("genus", "species"),sep = " ", remove = TRUE) %>%
mutate(species=replace_na(species,'spp.')) %>%
mutate(genus=replace_na(genus,'Unknown')) %>%
mutate(`genus_species` = paste0(genus, ' ', species)) %>%
select(data_source,
tree_ID,
obs_year,
genus,
species,
genus_species,
dbh_cm,
latitude,
longitude)
#checks
unique(NYStreetTree$genus_species)
# save out the shapefile
st_write(NYStreetTree, file.path('CleanTreeData','NYStreetTree.shp'), layer = NULL, driver = 'ESRI Shapefile')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prices.R
\docType{data}
\name{prices}
\alias{prices}
\title{Sample data for fisheries_summary function}
\format{A data frame with two columns:
\itemize{
\item fish: name of fish species
\item price: going price of the species}}
\usage{
prices
}
\description{
prices - A data frame with prices of each fish species
}
\keyword{datasets}
|
/man/prices.Rd
|
no_license
|
klarrieu/ESM262PackageAssignment
|
R
| false
| true
| 413
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prices.R
\docType{data}
\name{prices}
\alias{prices}
\title{Sample data for fisheries_summary function}
\format{A data frame with two columns:
\itemize{
\item fish: name of fish species
\item price: going price of the species}}
\usage{
prices
}
\description{
prices - A data frame with prices of each fish species
}
\keyword{datasets}
|
\name{EWHP}
\alias{EWHP}
\docType{data}
\title{House price data set (DataFrame) in England and Wales}
\description{
A house price data set for England and Wales from 2001 with 9 hedonic (explanatory) variables.
}
\usage{data(EWHP)}
\format{
A data frame with 519 observations on the following 12 variables.
\describe{
\item{Easting}{a numeric vector, X coordinate}
\item{Northing}{a numeric vector, Y coordinate}
\item{PurPrice}{a numeric vector, the purchase price of the property}
\item{BldIntWr}{a numeric vector, 1 if the property was built during the world war, 0 otherwise}
\item{BldPostW}{a numeric vector, 1 if the property was built after the world war, 0 otherwise}
\item{Bld60s}{a numeric vector, 1 if the property was built between 1960 and 1969, 0 otherwise}
\item{Bld70s}{a numeric vector, 1 if the property was built between 1970 and 1979, 0 otherwise}
\item{Bld80s}{a numeric vector, 1 if the property was built between 1980 and 1989, 0 otherwise}
\item{TypDetch}{a numeric vector, 1 if the property is detached (i.e. it is a stand-alone house), 0 otherwise}
\item{TypSemiD}{a numeric vector, 1 if the property is semi detached, 0 otherwise}
\item{TypFlat}{a numeric vector, if the property is a flat (or 'apartment' in the USA), 0 otherwise}
\item{FlrArea}{a numeric vector, floor area of the property in square metres}
}
}
\references{
Fotheringham, A.S., Brunsdon, C., and Charlton, M.E. (2002), Geographically Weighted Regression:
The Analysis of Spatially Varying Relationships, Chichester: Wiley.
}
\author{Binbin Lu \email{binbinlu@whu.edu.cn}}
\examples{
library(sf)
data(EWHP)
data(EWOutline)
ewhp_sf <- st_as_sf(EWHP, coords = c("Easting", "Northing"))
plot(EWOutline$geometry)
plot(ewhp_sf["PurPrice"], add = TRUE)
}
\keyword{data}
\concept{house price}
|
/man/EWHP.Rd
|
no_license
|
GWmodel-Lab/GWmodel3
|
R
| false
| false
| 1,838
|
rd
|
\name{EWHP}
\alias{EWHP}
\docType{data}
\title{House price data set (DataFrame) in England and Wales}
\description{
A house price data set for England and Wales from 2001 with 9 hedonic (explanatory) variables.
}
\usage{data(EWHP)}
\format{
A data frame with 519 observations on the following 12 variables.
\describe{
\item{Easting}{a numeric vector, X coordinate}
\item{Northing}{a numeric vector, Y coordinate}
\item{PurPrice}{a numeric vector, the purchase price of the property}
\item{BldIntWr}{a numeric vector, 1 if the property was built during the world war, 0 otherwise}
\item{BldPostW}{a numeric vector, 1 if the property was built after the world war, 0 otherwise}
\item{Bld60s}{a numeric vector, 1 if the property was built between 1960 and 1969, 0 otherwise}
\item{Bld70s}{a numeric vector, 1 if the property was built between 1970 and 1979, 0 otherwise}
\item{Bld80s}{a numeric vector, 1 if the property was built between 1980 and 1989, 0 otherwise}
\item{TypDetch}{a numeric vector, 1 if the property is detached (i.e. it is a stand-alone house), 0 otherwise}
\item{TypSemiD}{a numeric vector, 1 if the property is semi detached, 0 otherwise}
\item{TypFlat}{a numeric vector, if the property is a flat (or 'apartment' in the USA), 0 otherwise}
\item{FlrArea}{a numeric vector, floor area of the property in square metres}
}
}
\references{
Fotheringham, A.S., Brunsdon, C., and Charlton, M.E. (2002), Geographically Weighted Regression:
The Analysis of Spatially Varying Relationships, Chichester: Wiley.
}
\author{Binbin Lu \email{binbinlu@whu.edu.cn}}
\examples{
library(sf)
data(EWHP)
data(EWOutline)
ewhp_sf <- st_as_sf(EWHP, coords = c("Easting", "Northing"))
plot(EWOutline$geometry)
plot(ewhp_sf["PurPrice"], add = TRUE)
}
\keyword{data}
\concept{house price}
|
if (require("shiny")) {
data(World)
world_vars <- setdiff(names(World), c("iso_a3", "name", "sovereignt", "geometry"))
ui <- fluidPage(
tmapOutput("map"),
selectInput("var", "Variable", world_vars)
)
server <- function(input, output, session) {
output$map <- renderTmap({
tm_shape(World) +
tm_polygons(world_vars[1], zindex = 401)
})
observe({
var <- input$var
tmapProxy("map", session, {
tm_remove_layer(401) +
tm_shape(World) +
tm_polygons(var, zindex = 401)
})
})
}
app <- shinyApp(ui, server)
if (interactive()) app
}
|
/09_developing_data_products/week4/trash.R
|
no_license
|
Cardosaum/data_science_specialization_jhu
|
R
| false
| false
| 767
|
r
|
if (require("shiny")) {
data(World)
world_vars <- setdiff(names(World), c("iso_a3", "name", "sovereignt", "geometry"))
ui <- fluidPage(
tmapOutput("map"),
selectInput("var", "Variable", world_vars)
)
server <- function(input, output, session) {
output$map <- renderTmap({
tm_shape(World) +
tm_polygons(world_vars[1], zindex = 401)
})
observe({
var <- input$var
tmapProxy("map", session, {
tm_remove_layer(401) +
tm_shape(World) +
tm_polygons(var, zindex = 401)
})
})
}
app <- shinyApp(ui, server)
if (interactive()) app
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen-namespace-docs.R,
% R/gen-namespace-examples.R, R/gen-namespace.R
\name{torch_cumprod}
\alias{torch_cumprod}
\title{Cumprod}
\usage{
torch_cumprod(self, dim, dtype = NULL)
}
\arguments{
\item{self}{(Tensor) the input tensor.}
\item{dim}{(int) the dimension to do the operation over}
\item{dtype}{(\code{torch.dtype}, optional) the desired data type of returned tensor. If specified, the input tensor is casted to \code{dtype} before the operation is performed. This is useful for preventing data type overflows. Default: NULL.}
}
\description{
Cumprod
}
\section{cumprod(input, dim, out=NULL, dtype=NULL) -> Tensor }{
Returns the cumulative product of elements of \code{input} in the dimension
\code{dim}.
For example, if \code{input} is a vector of size N, the result will also be
a vector of size N, with elements.
\deqn{
y_i = x_1 \times x_2\times x_3\times \dots \times x_i
}
}
\examples{
if (torch_is_installed()) {
a = torch_randn(c(10))
a
torch_cumprod(a, dim=1)
}
}
|
/man/torch_cumprod.Rd
|
permissive
|
mlverse/torch
|
R
| false
| true
| 1,084
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen-namespace-docs.R,
% R/gen-namespace-examples.R, R/gen-namespace.R
\name{torch_cumprod}
\alias{torch_cumprod}
\title{Cumprod}
\usage{
torch_cumprod(self, dim, dtype = NULL)
}
\arguments{
\item{self}{(Tensor) the input tensor.}
\item{dim}{(int) the dimension to do the operation over}
\item{dtype}{(\code{torch.dtype}, optional) the desired data type of returned tensor. If specified, the input tensor is casted to \code{dtype} before the operation is performed. This is useful for preventing data type overflows. Default: NULL.}
}
\description{
Cumprod
}
\section{cumprod(input, dim, out=NULL, dtype=NULL) -> Tensor }{
Returns the cumulative product of elements of \code{input} in the dimension
\code{dim}.
For example, if \code{input} is a vector of size N, the result will also be
a vector of size N, with elements.
\deqn{
y_i = x_1 \times x_2\times x_3\times \dots \times x_i
}
}
\examples{
if (torch_is_installed()) {
a = torch_randn(c(10))
a
torch_cumprod(a, dim=1)
}
}
|
#' Retrieve synonyms from various sources given input taxonomic
#' names or identifiers
#'
#' @param x Vector of taxa names (character) or IDs (character or numeric) to
#' query.
#' @param db character; database to query. either `itis`, `tropicos`, `col`,
#' `nbn`, `worms`. Note that each taxonomic data source has their own
#' identifiers, so that if you provide the wrong `db` value for the identifier
#' you could get a result, but it will likely be wrong (not what you were
#' expecting). If using tropicos, we recommend getting an API key;
#' see [taxize-authentication]
#' @param id character; identifiers, returned by [get_tsn()], [get_tpsid()],
#' [get_nbnid()], [get_colid()] `get_wormsid()]
#' @param rows (numeric) Any number from 1 to infinity. If the default NA, all
#' rows are considered. Note that this parameter is ignored if you pass in a
#' taxonomic id of any of the acceptable classes: tsn, tpsid, nbnid, ids.
#' @param ... Other passed arguments to internal functions `get_*()` and
#' functions to gather synonyms.
#'
#' @return A named list of results with three types of output in each slot:
#'
#' - if the name was not found: `NA_character_`
#' - if the name was found but no synonyms found, an empty data.frame (0 rows)
#' - if the name was found, and synonyms found, a data.frames with the
#' synonyms - the column names vary by data source
#'
#' @details If IDs are supplied directly (not from the `get_*()` functions)
#' you must specify the type of ID.
#'
#' For `db = "itis"` you can pass in a parameter `accepted` to
#' toggle whether only accepted names are used `accepted = TRUE`, or if
#' all are used `accepted = FALSE`. The default is `accepted = FALSE`
#'
#' Note that IUCN requires an API key. See [rredlist::rredlist-package]
#' for help on authentiating with IUCN Redlist
#'
#' @seealso [get_tsn()] `get_tpsid()], [get_nbnid()] `get_colid()],
#' [get_wormsid()] `get_iucn()]
#'
#' @export
#' @examples \dontrun{
#' # Plug in taxon IDs
#' synonyms(183327, db="itis")
#' synonyms("25509881", db="tropicos")
#' synonyms("NBNSYS0000004629", db='nbn')
#' # synonyms("87e986b0873f648711900866fa8abde7", db='col') # FIXME
#' synonyms(105706, db='worms')
#' synonyms(12392, db='iucn')
#'
#' # Plug in taxon names directly
#' synonyms("Pinus contorta", db="itis")
#' synonyms("Puma concolor", db="itis")
#' synonyms(c("Poa annua",'Pinus contorta','Puma concolor'), db="itis")
#' synonyms("Poa annua", db="tropicos")
#' synonyms("Pinus contorta", db="tropicos")
#' synonyms(c("Poa annua",'Pinus contorta'), db="tropicos")
#' synonyms("Pinus sylvestris", db='nbn')
#' synonyms("Puma concolor", db='col')
#' synonyms("Ursus americanus", db='col')
#' synonyms("Amblyomma rotundatum", db='col')
#' synonyms('Pomatomus', db='worms')
#' synonyms('Pomatomus saltatrix', db='worms')
#'
#' # not accepted names, with ITIS
#' ## looks for whether the name given is an accepted name,
#' ## and if not, uses the accepted name to look for synonyms
#' synonyms("Acer drummondii", db="itis")
#' synonyms("Spinus pinus", db="itis")
#'
#' # Use get_* methods
#' synonyms(get_tsn("Poa annua"))
#' synonyms(get_tpsid("Poa annua"))
#' synonyms(get_nbnid("Carcharodon carcharias"))
#' synonyms(get_colid("Ornithodoros lagophilus"))
#' synonyms(get_iucn('Loxodonta africana'))
#'
#' # Pass many ids from class "ids"
#' out <- get_ids(names="Poa annua", db = c('itis','tropicos'))
#' synonyms(out)
#'
#' # Use the rows parameter to select certain rows
#' synonyms("Poa annua", db='tropicos', rows=1)
#' synonyms("Poa annua", db='tropicos', rows=1:3)
#' synonyms("Pinus sylvestris", db='nbn', rows=1:3)
#' synonyms("Amblyomma rotundatum", db='col', rows=2)
#' synonyms("Amblyomma rotundatum", db='col', rows=2:3)
#'
#' # Use curl options
#' synonyms("Poa annua", db='tropicos', rows=1, verbose = TRUE)
#' synonyms("Poa annua", db='itis', rows=1, verbose = TRUE)
#' synonyms("Poa annua", db='col', rows=1, verbose = TRUE)
#'
#'
#' # combine many outputs together
#' x <- synonyms(c("Osmia bicornis", "Osmia rufa", "Osmia"), db = "itis")
#' synonyms_df(x)
#'
#' ## note here how Pinus contorta is dropped due to no synonyms found
#' x <- synonyms(c("Poa annua",'Pinus contorta','Puma concolor'), db="col")
#' synonyms_df(x)
#'
#' ## note here that ids are taxon identifiers b/c you start with them
#' x <- synonyms(c(25509881, 13100094), db="tropicos")
#' synonyms_df(x)
#'
#' ## NBN
#' x <- synonyms(c('Aglais io', 'Usnea hirta', 'Arctostaphylos uva-ursi'),
#' db="nbn")
#' synonyms_df(x)
#' }
synonyms <- function(...) {
UseMethod("synonyms")
}
#' @export
#' @rdname synonyms
synonyms.default <- function(x, db = NULL, rows = NA, ...) {
nstop(db)
switch(
db,
itis = {
id <- process_syn_ids(x, db, get_tsn, rows = rows, ...)
structure(stats::setNames(synonyms(id, ...), x),
class = "synonyms", db = "itis")
},
tropicos = {
id <- process_syn_ids(x, db, get_tpsid, rows = rows, ...)
structure(stats::setNames(synonyms(id, ...), x),
class = "synonyms", db = "tropicos")
},
nbn = {
id <- process_syn_ids(x, db, get_nbnid, rows = rows, ...)
structure(stats::setNames(synonyms(id, ...), x),
class = "synonyms", db = "nbn")
},
col = {
id <- process_syn_ids(x, db, get_colid, rows = rows, ...)
structure(stats::setNames(synonyms(id, ...), x),
class = "synonyms", db = "col")
},
worms = {
id <- process_syn_ids(x, db, get_wormsid, rows = rows, ...)
structure(stats::setNames(synonyms(id, ...), x),
class = "synonyms", db = "worms")
},
iucn = {
id <- process_syn_ids(x, db, get_iucn, ...)
structure(stats::setNames(synonyms(id, ...), x),
class = "synonyms", db = "iucn")
},
stop("the provided db value was not recognised", call. = FALSE)
)
}
process_syn_ids <- function(input, db, fxn, ...){
g <- tryCatch(as.numeric(as.character(input)), warning = function(e) e)
if (
inherits(g,"numeric") || is.character(input) && grepl("N[HB]", input) ||
is.character(input) && grepl("[[:digit:]]", input)
) {
as_fxn <- switch(db,
itis = as.tsn,
tropicos = as.tpsid,
nbn = as.nbnid,
col = as.colid,
worms = as.wormsid,
iucn = as.iucn)
if (db == "iucn") return(as_fxn(input, check = TRUE))
return(as_fxn(input, check = FALSE))
} else {
eval(fxn)(input, ...)
}
}
#' @export
#' @rdname synonyms
synonyms.tsn <- function(id, ...) {
warn_db(list(...), "itis")
fun <- function(x){
if (is.na(x)) { NA_character_ } else {
is_acc <- rit_acc_name(x, ...)
if (all(!is.na(is_acc$acceptedName))) {
accdf <- stats::setNames(
data.frame(x[1], is_acc, stringsAsFactors = FALSE),
c("sub_tsn", "acc_name", "acc_tsn", "acc_author")
)
x <- is_acc$acceptedTsn
message("Accepted name(s) is/are '",
paste0(is_acc$acceptedName, collapse = "/"), "'")
message("Using tsn(s) ", paste0(is_acc$acceptedTsn, collapse = "/"),
"\n")
} else {
accdf <- data.frame(sub_tsn = x[1], acc_tsn = x[1],
stringsAsFactors = FALSE)
}
res <- Map(function(z, w) {
tmp <- ritis::synonym_names(z)
if (NROW(tmp) == 0) {
tibble::tibble()
} else {
tmp <- stats::setNames(tmp, c('syn_author', 'syn_name', 'syn_tsn'))
cbind(w, tmp, row.names = NULL)
}
# if (as.character(tmp[1,1]) == 'nomatch') {
# tmp <- data.frame(message = "no syns found", stringsAsFactors = FALSE)
# }
}, x, split(accdf, seq_len(NROW(accdf))))
do.call("rbind", unname(res))
}
}
stats::setNames(lapply(id, fun), id)
}
rit_acc_name <- function(x, ...) {
tmp <- ritis::accepted_names(x, ...)
if (NROW(tmp) == 0) {
data.frame(submittedtsn = x[1], acceptedName = NA, acceptedTsn = x[1],
stringsAsFactors = FALSE)
} else {
tmp
}
}
#' @export
#' @rdname synonyms
synonyms.colid <- function(id, ...) {
warn_db(list(...), "col")
fun <- function(x) {
if (is.na(x)) {
NA_character_
} else {
res <- col_synonyms(x, ...)
if (is.na(res)) tibble::tibble() else res
}
}
stats::setNames(lapply(id, fun), id)
}
col_synonyms <- function(x, ...) {
base <- "http://www.catalogueoflife.org/col/webservice"
args <- list(id = x[1], response = "full", format = "json")
cli <- crul::HttpClient$new(base, headers = tx_ual)
res <- cli$get(query = args)
res$raise_for_status()
out <- jsonlite::fromJSON(res$parse("UTF-8"), FALSE)
tmp <- out$results[[1]]
if ("synonyms" %in% names(tmp)) {
df <- taxize_ldfast(lapply(tmp$synonyms, function(w) {
w[sapply(w, length) == 0] <- NA
w$references <- NULL
data.frame(w, stringsAsFactors = FALSE)
}))
if (!is.null(df)) {
df$rank <- tolower(df$rank)
} else {
df <- NA
}
df
} else {
NULL
}
}
#' @export
#' @rdname synonyms
synonyms.tpsid <- function(id, ...) {
warn_db(list(...), "topicos")
fun <- function(x) {
if (is.na(x)) {
NA_character_
} else {
res <- tp_synonyms(x, ...)$synonyms
if (grepl("no syns found", res[1,1])) tibble::tibble() else res
}
}
stats::setNames(lapply(id, fun), id)
}
#' @export
#' @rdname synonyms
synonyms.nbnid <- function(id, ...) {
warn_db(list(...), "nbn")
fun <- function(x){
if (is.na(x)) {
NA_character_
} else {
res <- nbn_synonyms(x, ...)
if (length(res) == 0) tibble::tibble() else res
}
}
stats::setNames(lapply(id, fun), id)
}
#' @export
#' @rdname synonyms
synonyms.wormsid <- function(id, ...) {
warn_db(list(...), "worms")
fun <- function(x) {
if (is.na(x)) {
NA_character_
} else {
res <- tryCatch(worrms::wm_synonyms(as.numeric(x), ...),
error = function(e) e)
if (inherits(res, "error")) tibble::tibble() else res
}
}
stats::setNames(lapply(id, fun), id)
}
#' @export
#' @rdname synonyms
synonyms.iucn <- function(id, ...) {
warn_db(list(...), "iucn")
out <- vector(mode = "list", length = length(id))
for (i in seq_along(id)) {
if (is.na(id[[i]])) {
out[[i]] <- NA_character_
} else {
res <- rredlist::rl_synonyms(attr(id, "name")[i], ...)$result
out[[i]] <- if (length(res) == 0) tibble::tibble() else res
}
}
stats::setNames(out, id)
}
#' @export
#' @rdname synonyms
synonyms.ids <- function(id, ...) {
fun <- function(x){
if (is.na(x)) {
out <- NA_character_
} else {
out <- synonyms(x, ...)
}
return( out )
}
lapply(id, fun)
}
### Combine synonyms output into single data.frame -----------
#' @export
#' @rdname synonyms
synonyms_df <- function(x) {
UseMethod("synonyms_df")
}
#' @export
synonyms_df.default <- function(x) {
stop("no 'synonyms_df' method for ", class(x), call. = FALSE)
}
#' @export
synonyms_df.synonyms <- function(x) {
# x <- Filter(function(z) inherits(z[1], "data.frame"), x)
x <- Filter(function(z) inherits(z, "data.frame"), x)
x <- Filter(function(z) NROW(z) > 0, x)
(data.table::setDF(
data.table::rbindlist(x, use.names = TRUE, fill = TRUE, idcol = TRUE)
))
}
|
/R/synonyms.R
|
permissive
|
muschellij2/taxize
|
R
| false
| false
| 11,406
|
r
|
#' Retrieve synonyms from various sources given input taxonomic
#' names or identifiers
#'
#' @param x Vector of taxa names (character) or IDs (character or numeric) to
#' query.
#' @param db character; database to query. either `itis`, `tropicos`, `col`,
#' `nbn`, `worms`. Note that each taxonomic data source has their own
#' identifiers, so that if you provide the wrong `db` value for the identifier
#' you could get a result, but it will likely be wrong (not what you were
#' expecting). If using tropicos, we recommend getting an API key;
#' see [taxize-authentication]
#' @param id character; identifiers, returned by [get_tsn()], [get_tpsid()],
#' [get_nbnid()], [get_colid()] `get_wormsid()]
#' @param rows (numeric) Any number from 1 to infinity. If the default NA, all
#' rows are considered. Note that this parameter is ignored if you pass in a
#' taxonomic id of any of the acceptable classes: tsn, tpsid, nbnid, ids.
#' @param ... Other passed arguments to internal functions `get_*()` and
#' functions to gather synonyms.
#'
#' @return A named list of results with three types of output in each slot:
#'
#' - if the name was not found: `NA_character_`
#' - if the name was found but no synonyms found, an empty data.frame (0 rows)
#' - if the name was found, and synonyms found, a data.frames with the
#' synonyms - the column names vary by data source
#'
#' @details If IDs are supplied directly (not from the `get_*()` functions)
#' you must specify the type of ID.
#'
#' For `db = "itis"` you can pass in a parameter `accepted` to
#' toggle whether only accepted names are used `accepted = TRUE`, or if
#' all are used `accepted = FALSE`. The default is `accepted = FALSE`
#'
#' Note that IUCN requires an API key. See [rredlist::rredlist-package]
#' for help on authentiating with IUCN Redlist
#'
#' @seealso [get_tsn()] `get_tpsid()], [get_nbnid()] `get_colid()],
#' [get_wormsid()] `get_iucn()]
#'
#' @export
#' @examples \dontrun{
#' # Plug in taxon IDs
#' synonyms(183327, db="itis")
#' synonyms("25509881", db="tropicos")
#' synonyms("NBNSYS0000004629", db='nbn')
#' # synonyms("87e986b0873f648711900866fa8abde7", db='col') # FIXME
#' synonyms(105706, db='worms')
#' synonyms(12392, db='iucn')
#'
#' # Plug in taxon names directly
#' synonyms("Pinus contorta", db="itis")
#' synonyms("Puma concolor", db="itis")
#' synonyms(c("Poa annua",'Pinus contorta','Puma concolor'), db="itis")
#' synonyms("Poa annua", db="tropicos")
#' synonyms("Pinus contorta", db="tropicos")
#' synonyms(c("Poa annua",'Pinus contorta'), db="tropicos")
#' synonyms("Pinus sylvestris", db='nbn')
#' synonyms("Puma concolor", db='col')
#' synonyms("Ursus americanus", db='col')
#' synonyms("Amblyomma rotundatum", db='col')
#' synonyms('Pomatomus', db='worms')
#' synonyms('Pomatomus saltatrix', db='worms')
#'
#' # not accepted names, with ITIS
#' ## looks for whether the name given is an accepted name,
#' ## and if not, uses the accepted name to look for synonyms
#' synonyms("Acer drummondii", db="itis")
#' synonyms("Spinus pinus", db="itis")
#'
#' # Use get_* methods
#' synonyms(get_tsn("Poa annua"))
#' synonyms(get_tpsid("Poa annua"))
#' synonyms(get_nbnid("Carcharodon carcharias"))
#' synonyms(get_colid("Ornithodoros lagophilus"))
#' synonyms(get_iucn('Loxodonta africana'))
#'
#' # Pass many ids from class "ids"
#' out <- get_ids(names="Poa annua", db = c('itis','tropicos'))
#' synonyms(out)
#'
#' # Use the rows parameter to select certain rows
#' synonyms("Poa annua", db='tropicos', rows=1)
#' synonyms("Poa annua", db='tropicos', rows=1:3)
#' synonyms("Pinus sylvestris", db='nbn', rows=1:3)
#' synonyms("Amblyomma rotundatum", db='col', rows=2)
#' synonyms("Amblyomma rotundatum", db='col', rows=2:3)
#'
#' # Use curl options
#' synonyms("Poa annua", db='tropicos', rows=1, verbose = TRUE)
#' synonyms("Poa annua", db='itis', rows=1, verbose = TRUE)
#' synonyms("Poa annua", db='col', rows=1, verbose = TRUE)
#'
#'
#' # combine many outputs together
#' x <- synonyms(c("Osmia bicornis", "Osmia rufa", "Osmia"), db = "itis")
#' synonyms_df(x)
#'
#' ## note here how Pinus contorta is dropped due to no synonyms found
#' x <- synonyms(c("Poa annua",'Pinus contorta','Puma concolor'), db="col")
#' synonyms_df(x)
#'
#' ## note here that ids are taxon identifiers b/c you start with them
#' x <- synonyms(c(25509881, 13100094), db="tropicos")
#' synonyms_df(x)
#'
#' ## NBN
#' x <- synonyms(c('Aglais io', 'Usnea hirta', 'Arctostaphylos uva-ursi'),
#' db="nbn")
#' synonyms_df(x)
#' }
synonyms <- function(...) {
UseMethod("synonyms")
}
#' @export
#' @rdname synonyms
synonyms.default <- function(x, db = NULL, rows = NA, ...) {
nstop(db)
switch(
db,
itis = {
id <- process_syn_ids(x, db, get_tsn, rows = rows, ...)
structure(stats::setNames(synonyms(id, ...), x),
class = "synonyms", db = "itis")
},
tropicos = {
id <- process_syn_ids(x, db, get_tpsid, rows = rows, ...)
structure(stats::setNames(synonyms(id, ...), x),
class = "synonyms", db = "tropicos")
},
nbn = {
id <- process_syn_ids(x, db, get_nbnid, rows = rows, ...)
structure(stats::setNames(synonyms(id, ...), x),
class = "synonyms", db = "nbn")
},
col = {
id <- process_syn_ids(x, db, get_colid, rows = rows, ...)
structure(stats::setNames(synonyms(id, ...), x),
class = "synonyms", db = "col")
},
worms = {
id <- process_syn_ids(x, db, get_wormsid, rows = rows, ...)
structure(stats::setNames(synonyms(id, ...), x),
class = "synonyms", db = "worms")
},
iucn = {
id <- process_syn_ids(x, db, get_iucn, ...)
structure(stats::setNames(synonyms(id, ...), x),
class = "synonyms", db = "iucn")
},
stop("the provided db value was not recognised", call. = FALSE)
)
}
process_syn_ids <- function(input, db, fxn, ...){
g <- tryCatch(as.numeric(as.character(input)), warning = function(e) e)
if (
inherits(g,"numeric") || is.character(input) && grepl("N[HB]", input) ||
is.character(input) && grepl("[[:digit:]]", input)
) {
as_fxn <- switch(db,
itis = as.tsn,
tropicos = as.tpsid,
nbn = as.nbnid,
col = as.colid,
worms = as.wormsid,
iucn = as.iucn)
if (db == "iucn") return(as_fxn(input, check = TRUE))
return(as_fxn(input, check = FALSE))
} else {
eval(fxn)(input, ...)
}
}
#' @export
#' @rdname synonyms
synonyms.tsn <- function(id, ...) {
warn_db(list(...), "itis")
fun <- function(x){
if (is.na(x)) { NA_character_ } else {
is_acc <- rit_acc_name(x, ...)
if (all(!is.na(is_acc$acceptedName))) {
accdf <- stats::setNames(
data.frame(x[1], is_acc, stringsAsFactors = FALSE),
c("sub_tsn", "acc_name", "acc_tsn", "acc_author")
)
x <- is_acc$acceptedTsn
message("Accepted name(s) is/are '",
paste0(is_acc$acceptedName, collapse = "/"), "'")
message("Using tsn(s) ", paste0(is_acc$acceptedTsn, collapse = "/"),
"\n")
} else {
accdf <- data.frame(sub_tsn = x[1], acc_tsn = x[1],
stringsAsFactors = FALSE)
}
res <- Map(function(z, w) {
tmp <- ritis::synonym_names(z)
if (NROW(tmp) == 0) {
tibble::tibble()
} else {
tmp <- stats::setNames(tmp, c('syn_author', 'syn_name', 'syn_tsn'))
cbind(w, tmp, row.names = NULL)
}
# if (as.character(tmp[1,1]) == 'nomatch') {
# tmp <- data.frame(message = "no syns found", stringsAsFactors = FALSE)
# }
}, x, split(accdf, seq_len(NROW(accdf))))
do.call("rbind", unname(res))
}
}
stats::setNames(lapply(id, fun), id)
}
rit_acc_name <- function(x, ...) {
tmp <- ritis::accepted_names(x, ...)
if (NROW(tmp) == 0) {
data.frame(submittedtsn = x[1], acceptedName = NA, acceptedTsn = x[1],
stringsAsFactors = FALSE)
} else {
tmp
}
}
#' @export
#' @rdname synonyms
synonyms.colid <- function(id, ...) {
warn_db(list(...), "col")
fun <- function(x) {
if (is.na(x)) {
NA_character_
} else {
res <- col_synonyms(x, ...)
if (is.na(res)) tibble::tibble() else res
}
}
stats::setNames(lapply(id, fun), id)
}
col_synonyms <- function(x, ...) {
base <- "http://www.catalogueoflife.org/col/webservice"
args <- list(id = x[1], response = "full", format = "json")
cli <- crul::HttpClient$new(base, headers = tx_ual)
res <- cli$get(query = args)
res$raise_for_status()
out <- jsonlite::fromJSON(res$parse("UTF-8"), FALSE)
tmp <- out$results[[1]]
if ("synonyms" %in% names(tmp)) {
df <- taxize_ldfast(lapply(tmp$synonyms, function(w) {
w[sapply(w, length) == 0] <- NA
w$references <- NULL
data.frame(w, stringsAsFactors = FALSE)
}))
if (!is.null(df)) {
df$rank <- tolower(df$rank)
} else {
df <- NA
}
df
} else {
NULL
}
}
#' @export
#' @rdname synonyms
synonyms.tpsid <- function(id, ...) {
warn_db(list(...), "topicos")
fun <- function(x) {
if (is.na(x)) {
NA_character_
} else {
res <- tp_synonyms(x, ...)$synonyms
if (grepl("no syns found", res[1,1])) tibble::tibble() else res
}
}
stats::setNames(lapply(id, fun), id)
}
#' @export
#' @rdname synonyms
synonyms.nbnid <- function(id, ...) {
warn_db(list(...), "nbn")
fun <- function(x){
if (is.na(x)) {
NA_character_
} else {
res <- nbn_synonyms(x, ...)
if (length(res) == 0) tibble::tibble() else res
}
}
stats::setNames(lapply(id, fun), id)
}
#' @export
#' @rdname synonyms
synonyms.wormsid <- function(id, ...) {
warn_db(list(...), "worms")
fun <- function(x) {
if (is.na(x)) {
NA_character_
} else {
res <- tryCatch(worrms::wm_synonyms(as.numeric(x), ...),
error = function(e) e)
if (inherits(res, "error")) tibble::tibble() else res
}
}
stats::setNames(lapply(id, fun), id)
}
#' @export
#' @rdname synonyms
synonyms.iucn <- function(id, ...) {
warn_db(list(...), "iucn")
out <- vector(mode = "list", length = length(id))
for (i in seq_along(id)) {
if (is.na(id[[i]])) {
out[[i]] <- NA_character_
} else {
res <- rredlist::rl_synonyms(attr(id, "name")[i], ...)$result
out[[i]] <- if (length(res) == 0) tibble::tibble() else res
}
}
stats::setNames(out, id)
}
#' @export
#' @rdname synonyms
synonyms.ids <- function(id, ...) {
fun <- function(x){
if (is.na(x)) {
out <- NA_character_
} else {
out <- synonyms(x, ...)
}
return( out )
}
lapply(id, fun)
}
### Combine synonyms output into single data.frame -----------
#' @export
#' @rdname synonyms
synonyms_df <- function(x) {
UseMethod("synonyms_df")
}
#' @export
synonyms_df.default <- function(x) {
stop("no 'synonyms_df' method for ", class(x), call. = FALSE)
}
#' @export
synonyms_df.synonyms <- function(x) {
# x <- Filter(function(z) inherits(z[1], "data.frame"), x)
x <- Filter(function(z) inherits(z, "data.frame"), x)
x <- Filter(function(z) NROW(z) > 0, x)
(data.table::setDF(
data.table::rbindlist(x, use.names = TRUE, fill = TRUE, idcol = TRUE)
))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_processing.R
\name{process_elevation}
\alias{process_elevation}
\title{Export modeled elevation data from existing animal data file}
\usage{
process_elevation(elev, zoom = 11, get_slope = TRUE,
get_aspect = TRUE, in_path, out_path)
}
\arguments{
\item{elev}{elevation data as terrain tiles}
\item{zoom}{level of zoom, defaults to 11}
\item{get_slope}{logical, whether to compute slope (in degrees), defaults to true}
\item{get_aspect}{logical, whether to compute aspect (in degrees), defaults to true}
\item{in_path}{animal tracking data file to model elevation from}
\item{out_path}{exported file path, .rds}
}
\value{
list of data frames with gps data augmented by elevation
}
\description{
Export modeled elevation data from existing animal data file
}
\examples{
# Export elevation data from demo .rds datasets
\donttest{
\dontrun{
## Get elevation
elev <- read_zip_to_rasters("inst/extdata/elev/USA_msk_alt.zip")
## Process and export
process_elevation(elev, zoom = 11, get_slope = TRUE, get_aspect = TRUE,
in_path = system.file("extdata", "demo_aug19.rds",
package = "animaltracker"), out_path = "demo_aug19_elev.rds")
}
}
}
|
/man/process_elevation.Rd
|
no_license
|
IotGod/animaltracker
|
R
| false
| true
| 1,225
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_processing.R
\name{process_elevation}
\alias{process_elevation}
\title{Export modeled elevation data from existing animal data file}
\usage{
process_elevation(elev, zoom = 11, get_slope = TRUE,
get_aspect = TRUE, in_path, out_path)
}
\arguments{
\item{elev}{elevation data as terrain tiles}
\item{zoom}{level of zoom, defaults to 11}
\item{get_slope}{logical, whether to compute slope (in degrees), defaults to true}
\item{get_aspect}{logical, whether to compute aspect (in degrees), defaults to true}
\item{in_path}{animal tracking data file to model elevation from}
\item{out_path}{exported file path, .rds}
}
\value{
list of data frames with gps data augmented by elevation
}
\description{
Export modeled elevation data from existing animal data file
}
\examples{
# Export elevation data from demo .rds datasets
\donttest{
\dontrun{
## Get elevation
elev <- read_zip_to_rasters("inst/extdata/elev/USA_msk_alt.zip")
## Process and export
process_elevation(elev, zoom = 11, get_slope = TRUE, get_aspect = TRUE,
in_path = system.file("extdata", "demo_aug19.rds",
package = "animaltracker"), out_path = "demo_aug19_elev.rds")
}
}
}
|
#' @method summary entbal_cont
#' @export
summary.entbal_cont <- function(object,
show_unweighted = TRUE,
show_higherorder = TRUE,
show_parameters = FALSE,
n_digits = 3){
if(is.null(ncol(object$X))) {
object$X <- matrix(object$X, ncol =1)
colnames(object$X)[1] <- c('cov')
}
CB_unwtd <- cbind(apply(object$X, 2, mean),
apply(object$X, 2, sd),
apply(object$X, 2, function(x) cor(x, object$TA)),
t(apply(object$X, 2, function(x) summary(lm(x ~ scale(object$TA)))$coef[2,c(1,3,4)])))
colnames(CB_unwtd) <- c('Mean',
'SD',
'Cor.',
'Beta',
'tval',
'pval')
CB_wtd <- cbind(apply(object$X, 2, function(x) wmean(object$wts, x)),
apply(object$X, 2, function(x) sqrt(wvar(object$wts, x))),
apply(object$X, 2, function(x) wcor(object$wts, x, object$TA)),
t(apply(object$X, 2, function(x) .lm_ps(x, object$TA, object$wts)[2,c(1,3,4)])),
apply(object$X, 2, function(x) .ksbal(x, object$wts)))
colnames(CB_wtd) <- c('wtd-Mean',
'wtd-SD',
'wtd-Cor.',
'wtd-Beta',
'wtd-tval',
'wtd-pval',
'wtd-KSstat')
baltabs <- list('before' = CB_unwtd,
'after' = CB_wtd)
if(show_unweighted){
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
cat('Unweighted Summary Statistics:\n')
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
print(round(baltabs$before, digits = n_digits))
}
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
cat('Weighted Summary Statistics:\n')
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
colnames(CB_wtd) <- gsub('wtd-', '', colnames(CB_wtd))
print(round(CB_wtd, digits = n_digits))
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
orig_N <- nrow(object$X)
esssum <- 1/sum(object$wts^2)
cat('Original & Effective Sample Sizes:\n')
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
SS <- cbind(orig_N, esssum, esssum/orig_N)
colnames(SS) <- c('Orig N', 'ESS', 'Ratio')
rownames(SS) <- c('')
print(round(SS, digits = n_digits))
if(show_parameters){
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
cat('Parameter Values from Optimization:\n')
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
pars <- object$opt_obj$par
check_vals <- pars %in% object$constraints
pars <- data.frame('Value' = round(pars, digits = n_digits),
'Saturated' = check_vals)
print(pars[order(abs(pars$Value), decreasing = T),])
if(sum(pars[,2]) > 0 ) {
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
cat('Warning: some parameters have saturated at the constraint\n')
}
}
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
invisible(baltabs)
}
|
/R/entbal_cont_summary.R
|
no_license
|
bvegetabile/entbal
|
R
| false
| false
| 3,244
|
r
|
#' @method summary entbal_cont
#' @export
summary.entbal_cont <- function(object,
show_unweighted = TRUE,
show_higherorder = TRUE,
show_parameters = FALSE,
n_digits = 3){
if(is.null(ncol(object$X))) {
object$X <- matrix(object$X, ncol =1)
colnames(object$X)[1] <- c('cov')
}
CB_unwtd <- cbind(apply(object$X, 2, mean),
apply(object$X, 2, sd),
apply(object$X, 2, function(x) cor(x, object$TA)),
t(apply(object$X, 2, function(x) summary(lm(x ~ scale(object$TA)))$coef[2,c(1,3,4)])))
colnames(CB_unwtd) <- c('Mean',
'SD',
'Cor.',
'Beta',
'tval',
'pval')
CB_wtd <- cbind(apply(object$X, 2, function(x) wmean(object$wts, x)),
apply(object$X, 2, function(x) sqrt(wvar(object$wts, x))),
apply(object$X, 2, function(x) wcor(object$wts, x, object$TA)),
t(apply(object$X, 2, function(x) .lm_ps(x, object$TA, object$wts)[2,c(1,3,4)])),
apply(object$X, 2, function(x) .ksbal(x, object$wts)))
colnames(CB_wtd) <- c('wtd-Mean',
'wtd-SD',
'wtd-Cor.',
'wtd-Beta',
'wtd-tval',
'wtd-pval',
'wtd-KSstat')
baltabs <- list('before' = CB_unwtd,
'after' = CB_wtd)
if(show_unweighted){
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
cat('Unweighted Summary Statistics:\n')
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
print(round(baltabs$before, digits = n_digits))
}
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
cat('Weighted Summary Statistics:\n')
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
colnames(CB_wtd) <- gsub('wtd-', '', colnames(CB_wtd))
print(round(CB_wtd, digits = n_digits))
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
orig_N <- nrow(object$X)
esssum <- 1/sum(object$wts^2)
cat('Original & Effective Sample Sizes:\n')
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
SS <- cbind(orig_N, esssum, esssum/orig_N)
colnames(SS) <- c('Orig N', 'ESS', 'Ratio')
rownames(SS) <- c('')
print(round(SS, digits = n_digits))
if(show_parameters){
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
cat('Parameter Values from Optimization:\n')
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
pars <- object$opt_obj$par
check_vals <- pars %in% object$constraints
pars <- data.frame('Value' = round(pars, digits = n_digits),
'Saturated' = check_vals)
print(pars[order(abs(pars$Value), decreasing = T),])
if(sum(pars[,2]) > 0 ) {
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
cat('Warning: some parameters have saturated at the constraint\n')
}
}
cat(paste(paste(rep('-', 80), collapse = ''), '\n', sep=''))
invisible(baltabs)
}
|
#' Apply transforms to points.
#'
#' Apply a transform list to map a pointset from one domain to another. In
#' registration, one computes mappings between pairs of domains.
#' These transforms are often a sequence of increasingly complex maps, e.g.
#' from translation, to rigid, to affine to deformation. The list of such
#' transforms is passed to this function to interpolate one image domain
#' into the next image domain, as below. The order matters strongly and the
#' user is advised to familiarize with the standards established in examples.
#' Importantly, point mapping goes the opposite direction of image mapping,
#' for both reasons of convention and engineering.
#'
#' @param dim dimensionality of the transformation.
#' @param points moving point set with n-points in rows of at least dim
#' columns - we maintain extra information in additional columns. this
#' may be either a dataframe or a 2D antsImage - the latter may be better
#' for large pointsets.
#' @param transformlist character vector of transforms generated by
#' antsRegistration where each transform is a filename.
#' @param whichtoinvert vector of booleans, same length as transforms
#' @return same type as input. 1 -- Failure
#' @author Avants BB
#' @examples
#'
#' fixed <- antsImageRead( getANTsRData("r16") ,2)
#' moving <- antsImageRead( getANTsRData("r64") ,2)
#' fixed <- resampleImage(fixed,c(64,64),1,0)
#' moving <- resampleImage(moving,c(64,64),1,0)
#' mytx <- antsRegistration(fixed=fixed , moving=moving ,
#' typeofTransform = c("SyN") )
#' pts=data.frame( x=c(110.5,120,130) , y=c(108.1,121.0,130),
#' label=c(1,2,3) )
#' wpts <- antsApplyTransformsToPoints( dim=2, points=pts,
#' transformlist=mytx$fwdtransforms )
#' wptsi <- antsApplyTransformsToPoints( dim=2, points=wpts,
#' transformlist=mytx$invtransforms ) # close to pts
#'
#' \dontrun{
#' fixed <- antsImageRead( getANTsRData("r16") ,2)
#' moving <- antsImageRead( getANTsRData("r64") ,2)
#' fpts = kmeansSegmentation( fixed , 3 )$segmentation %>%
#' thresholdImage(1,1) %>%
#' labelClusters( 5 ) %>% getCentroids(5)
#' wpts <- antsApplyTransformsToPoints( dim=2, points=fpts,
#' transformlist=mytx$fwdtransforms )
#' labimgf=fixed*0
#' labimgm=moving*0
#' for ( p in 1:nrow(wpts))
#' {
#' pt=as.numeric( wpts[p,1:2] )
#' idx=round( antsTransformPhysicalPointToIndex(moving, pt ) )
#' labimgm[ idx[1], idx[2] ]=p
#' pt=as.numeric( fpts[p,1:2] )
#' idx=round( antsTransformPhysicalPointToIndex(fixed, pt ) )
#' labimgf[ idx[1], idx[2] ]=p
#' }
#' plot(fixed,labimgf %>% iMath("GD",2) )
#' plot(moving,labimgm %>% iMath("GD",2) )
#' }
#'
#' @seealso \code{\link{antsRegistration}}
#' @export antsApplyTransformsToPoints
antsApplyTransformsToPoints <- function(
dim,
points,
transformlist = "",
whichtoinvert = NA )
{
ttexists <- TRUE
for (i in 1:length(transformlist))
{
if ( ! file.exists( transformlist[i] ) ) ttexists <- FALSE
}
if (ttexists)
{
mytx <- list()
if (all(is.na(whichtoinvert)))
{
whichtoinvert <- rep(F, length(transformlist))
}
for (i in c(1:length(transformlist)))
{
ismat <- FALSE
if ((i == 1 & length(transformlist) > 1) |
whichtoinvert[i] == TRUE)
{
if (length(grep(".mat", transformlist[i])) == 1)
{
ismat <- TRUE
}
}
if (!ismat)
{
mytx <- list(mytx, "-t", transformlist[i])
} else if (ismat) {
mytx <- list( mytx, "-t",
paste("[", transformlist[i], ",1]", sep = "") )
}
}
if ( class(points)[[1]] != "antsImage" )
{
usepts = as.antsImage( data.matrix( points ) )
} else {
usepts = antsImageClone( points )
}
if ( usepts@dimension != 2 ) stop("must be 2d antsImage")
pointsout = antsImageClone( usepts )
args <- list( d = dim, i = usepts, o = pointsout, unlist(mytx) )
myargs <- .int_antsProcessArguments(c(args))
for (jj in c(1:length(myargs))) {
if (!is.na(myargs[jj])) {
if (myargs[jj] == "-") {
myargs2 <- rep(NA, (length(myargs) - 1))
myargs2[1:(jj - 1)] <- myargs[1:(jj - 1)]
myargs2[jj:(length(myargs) - 1)] <- myargs[(jj + 1):(length(myargs))]
myargs <- myargs2
}
}
}
.Call("antsApplyTransformsToPoints",
c(myargs, "-f", 1, "--precision", 0), PACKAGE = "ANTsR" )
if ( class(points)[[1]] == "antsImage" ) return( pointsout )
pointsout = data.frame( as.matrix( pointsout ) )
colnames( pointsout ) = colnames( points )
if ( ncol( pointsout ) > dim )
pointsout[ , (dim+1):ncol(points) ] = points[ , (dim+1):ncol(points) ]
return( pointsout )
}
if (!ttexists)
stop("transforms may not exist")
}
|
/R/antsApplyTransformsToPoints.R
|
permissive
|
alainlompo/ANTsR
|
R
| false
| false
| 4,837
|
r
|
#' Apply transforms to points.
#'
#' Apply a transform list to map a pointset from one domain to another. In
#' registration, one computes mappings between pairs of domains.
#' These transforms are often a sequence of increasingly complex maps, e.g.
#' from translation, to rigid, to affine to deformation. The list of such
#' transforms is passed to this function to interpolate one image domain
#' into the next image domain, as below. The order matters strongly and the
#' user is advised to familiarize with the standards established in examples.
#' Importantly, point mapping goes the opposite direction of image mapping,
#' for both reasons of convention and engineering.
#'
#' @param dim dimensionality of the transformation.
#' @param points moving point set with n-points in rows of at least dim
#' columns - we maintain extra information in additional columns. this
#' may be either a dataframe or a 2D antsImage - the latter may be better
#' for large pointsets.
#' @param transformlist character vector of transforms generated by
#' antsRegistration where each transform is a filename.
#' @param whichtoinvert vector of booleans, same length as transforms
#' @return same type as input. 1 -- Failure
#' @author Avants BB
#' @examples
#'
#' fixed <- antsImageRead( getANTsRData("r16") ,2)
#' moving <- antsImageRead( getANTsRData("r64") ,2)
#' fixed <- resampleImage(fixed,c(64,64),1,0)
#' moving <- resampleImage(moving,c(64,64),1,0)
#' mytx <- antsRegistration(fixed=fixed , moving=moving ,
#' typeofTransform = c("SyN") )
#' pts=data.frame( x=c(110.5,120,130) , y=c(108.1,121.0,130),
#' label=c(1,2,3) )
#' wpts <- antsApplyTransformsToPoints( dim=2, points=pts,
#' transformlist=mytx$fwdtransforms )
#' wptsi <- antsApplyTransformsToPoints( dim=2, points=wpts,
#' transformlist=mytx$invtransforms ) # close to pts
#'
#' \dontrun{
#' fixed <- antsImageRead( getANTsRData("r16") ,2)
#' moving <- antsImageRead( getANTsRData("r64") ,2)
#' fpts = kmeansSegmentation( fixed , 3 )$segmentation %>%
#' thresholdImage(1,1) %>%
#' labelClusters( 5 ) %>% getCentroids(5)
#' wpts <- antsApplyTransformsToPoints( dim=2, points=fpts,
#' transformlist=mytx$fwdtransforms )
#' labimgf=fixed*0
#' labimgm=moving*0
#' for ( p in 1:nrow(wpts))
#' {
#' pt=as.numeric( wpts[p,1:2] )
#' idx=round( antsTransformPhysicalPointToIndex(moving, pt ) )
#' labimgm[ idx[1], idx[2] ]=p
#' pt=as.numeric( fpts[p,1:2] )
#' idx=round( antsTransformPhysicalPointToIndex(fixed, pt ) )
#' labimgf[ idx[1], idx[2] ]=p
#' }
#' plot(fixed,labimgf %>% iMath("GD",2) )
#' plot(moving,labimgm %>% iMath("GD",2) )
#' }
#'
#' @seealso \code{\link{antsRegistration}}
#' @export antsApplyTransformsToPoints
antsApplyTransformsToPoints <- function(
dim,
points,
transformlist = "",
whichtoinvert = NA )
{
ttexists <- TRUE
for (i in 1:length(transformlist))
{
if ( ! file.exists( transformlist[i] ) ) ttexists <- FALSE
}
if (ttexists)
{
mytx <- list()
if (all(is.na(whichtoinvert)))
{
whichtoinvert <- rep(F, length(transformlist))
}
for (i in c(1:length(transformlist)))
{
ismat <- FALSE
if ((i == 1 & length(transformlist) > 1) |
whichtoinvert[i] == TRUE)
{
if (length(grep(".mat", transformlist[i])) == 1)
{
ismat <- TRUE
}
}
if (!ismat)
{
mytx <- list(mytx, "-t", transformlist[i])
} else if (ismat) {
mytx <- list( mytx, "-t",
paste("[", transformlist[i], ",1]", sep = "") )
}
}
if ( class(points)[[1]] != "antsImage" )
{
usepts = as.antsImage( data.matrix( points ) )
} else {
usepts = antsImageClone( points )
}
if ( usepts@dimension != 2 ) stop("must be 2d antsImage")
pointsout = antsImageClone( usepts )
args <- list( d = dim, i = usepts, o = pointsout, unlist(mytx) )
myargs <- .int_antsProcessArguments(c(args))
for (jj in c(1:length(myargs))) {
if (!is.na(myargs[jj])) {
if (myargs[jj] == "-") {
myargs2 <- rep(NA, (length(myargs) - 1))
myargs2[1:(jj - 1)] <- myargs[1:(jj - 1)]
myargs2[jj:(length(myargs) - 1)] <- myargs[(jj + 1):(length(myargs))]
myargs <- myargs2
}
}
}
.Call("antsApplyTransformsToPoints",
c(myargs, "-f", 1, "--precision", 0), PACKAGE = "ANTsR" )
if ( class(points)[[1]] == "antsImage" ) return( pointsout )
pointsout = data.frame( as.matrix( pointsout ) )
colnames( pointsout ) = colnames( points )
if ( ncol( pointsout ) > dim )
pointsout[ , (dim+1):ncol(points) ] = points[ , (dim+1):ncol(points) ]
return( pointsout )
}
if (!ttexists)
stop("transforms may not exist")
}
|
testlist <- list(genotype = c(-471604253L, NA, -471604253L, -471604253L, -471604253L, -471604253L, -471604253L))
result <- do.call(detectRUNS:::genoConvertCpp,testlist)
str(result)
|
/detectRUNS/inst/testfiles/genoConvertCpp/libFuzzer_genoConvertCpp/genoConvertCpp_valgrind_files/1609875479-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 181
|
r
|
testlist <- list(genotype = c(-471604253L, NA, -471604253L, -471604253L, -471604253L, -471604253L, -471604253L))
result <- do.call(detectRUNS:::genoConvertCpp,testlist)
str(result)
|
# Shuffles and splits the dataset into a training and testset.
# In the dataset we have cellnames, spiketimes and awake_angle.
train_test_split = function(data){
# Create a vector of timestamps
timespan = length(data$awake_angle)
time_seq = seq(1,timespan)
# Shuffle the timesequence
# shuffled_time_seq = sample(time_seq, timespan)
# Dont shuffle the timesequence
shuffled_time_seq = time_seq
# Split and order the timesequence
training_times = order(shuffled_time_seq[1:timespan/2])
test_times = order(shuffled_time_seq[-(1:timespan/2)])
training_data = data
test_data = data
# Extract relevant awake angles
training_data$awake_angle = data$awake_angle[training_times]
test_data$awake_angle = data$awake_angle[test_times]
for(cellname in data$cellnames){
#Extract relevant spiketimes
training_data$spiketimes[[cellname]] = data$spiketimes[[cellname]][data$spiketimes[[cellname]] %in% training_times]
test_data$spiketimes[[cellname]] = data$spiketimes[[cellname]][data$spiketimes[[cellname]] %in% test_times]
}
return(list(training_data = training_data,
test_data = test_data))
}
# Removes indexs and values where we dont have angle data
processMomentaryPopulationVector = function(momentary_population_matrix, smoothing_window, total_time){
# Convert to firing rate
momentary_population_matrix = momentary_population_matrix/(2*smoothing_window/1000)
# Remove bins that are smaller than 200ms or where we don't have the relevant
# angle data for the mouse
usable_indexs = smoothing_window:(total_time-smoothing_window)
usable_indexs = usable_indexs[which(!is.nan(test_data$awake_angle[usable_indexs]))]
momentary_population_matrix = as.matrix(momentary_population_matrix[,usable_indexs])
return(list(usable_indexs=usable_indexs,
momentary_population_matrix=momentary_population_matrix))
}
# Calculates the momentary population vector for every timestamp
calculateMomentaryPopulationVectors = function(data, smoothing_windows){
momentary_population_matrix_list = list()
num_cells = length(data$cellnames)
total_time = length(data$awake_angle)
# Matrix with cells as rows and timestamps as columns
activity_matrix=matrix(0, nrow=num_cells, ncol=total_time)
# Set activity to one for every timestamp there is a spike in the
# corresponding cell.
for(i in 1:num_cells){
activity_matrix[i,data$spiketimes[[data$cellnames[[i]]]]] = 1
}
activity_matrix = Matrix(activity_matrix, sparse=TRUE)
# Create matrix where we will apply the smoothing window
momentary_population_matrix = activity_matrix
left = activity_matrix
right = activity_matrix
zero_vec = Matrix(0, nrow=num_cells, ncol=1, sparse=TRUE)
for(i in 1:max(smoothing_windows)){
left = cbind(zero_vec,left[,-dim(left)[2]])
right = cbind(right[,-1], zero_vec)
momentary_population_matrix = momentary_population_matrix + left + right
if(i %in% smoothing_windows){
temp_result = processMomentaryPopulationVector(momentary_population_matrix, i, total_time)
momentary_population_matrix_list = cbind(momentary_population_matrix_list, list(temp_result))
}
}
names(momentary_population_matrix_list) = smoothing_windows
colnames(momentary_population_matrix_list) = smoothing_windows
return(momentary_population_matrix_list)
}
# Returns the reference population vector based on an input angle
calculateReferencePopulationVector = function(angle, tuning_curves){
#Place the input angle into the corresponding bin
bin_length = 2*min(tuning_curves$angle_bins)
bin_index = ceiling(angle/bin_length)
binned_input_angle = tuning_curves$angle_bin[bin_index]
#Fetch the reference population vector for the binned angle
reference_vector = tuning_curves[tuning_curves$angle_bin == binned_input_angle,]$firing_rate
return(reference_vector)
}
# Finds the angle with the lowest pearson coefficient
findHighestCorAngle = function(momentary_population, tuning_curves){
population_vectors = lapply(unique(tuning_curve$angle_bins), calculateReferencePopulationVector, tuning_curves=tuning_curves)
correlations = lapply(population_vectors, cor, momentary_population, use="pairwise.complete.obs", method="pearson")
best_angle=unique(tuning_curves$angle_bins)[which.max(correlations)]
return(best_angle)
}
angleDiff = function(angle1, angle2){
angle1 = pi/180*angle1
angle2 = pi/180*angle2
diff = abs(atan2(sin(angle1-angle2), cos(angle1-angle2)))
return(diff*180/pi)
}
## Mutual information
calculateMutualInformation = function(binned_awake_angles, momentary_population_matrix, cellnumber, smoothing_window){
library(entropy)
response = momentary_population_matrix[cellnumber,]
# Convert from firing rate to number of spikes
response = response*(2*smoothing_window/1000)
stimulus_dist = estimateDistribution(binned_awake_angles)
p_conditional_response = calculateConditionalResponse(response, binned_awake_angles)
response_dist = estimateDistribution(response)
## I tried implementing this myself first, the trends look correct, but my values are wrong
## probably some normalization issues.
# information = 0
# for(s in 1:length(stimulus_dist)){
# for(r in 1:length(response_dist)){
# log_term = log2(p_conditional_response[r,s]/response_dist[r])
# if(is.infinite(log_term)){
# next
# }
# information = information + stimulus_dist[s]*p_conditional_response[r,s]*log_term
# # print(paste('i', s, 'j', r))
# # print(information)
#
# }
# }
# count_stim = freqs.empirical(table(binned_awake_angles))
# count_resp = freqs.empirical(table(response))
return(mi.empirical(p_conditional_response))
# return(information)
}
# I.e P(r|s)
calculateConditionalResponse = function(response, binned_awake_angles){
# Stimulus response dataframe
df = data.frame(stimulus=binned_awake_angles, response=as.factor(response))
result = rep(0,length(unique(response)))
for(stim in unique(df$stimulus)){
filtered = df %>% filter(stimulus==stim) %>% select(response)
response_given_stim = as.data.frame(table(filtered))
result = cbind(result, response_given_stim$Freq)
}
result = result[,-1]
result = result/sum(rowSums(result))
return(result)
}
# Plug in principle
estimateDistribution = function(data){
return(as.data.frame(table(data))$Freq/length(data))
}
|
/util/problem5.R
|
no_license
|
ramunter/NEVR3004
|
R
| false
| false
| 6,808
|
r
|
# Shuffles and splits the dataset into a training and testset.
# In the dataset we have cellnames, spiketimes and awake_angle.
train_test_split = function(data){
# Create a vector of timestamps
timespan = length(data$awake_angle)
time_seq = seq(1,timespan)
# Shuffle the timesequence
# shuffled_time_seq = sample(time_seq, timespan)
# Dont shuffle the timesequence
shuffled_time_seq = time_seq
# Split and order the timesequence
training_times = order(shuffled_time_seq[1:timespan/2])
test_times = order(shuffled_time_seq[-(1:timespan/2)])
training_data = data
test_data = data
# Extract relevant awake angles
training_data$awake_angle = data$awake_angle[training_times]
test_data$awake_angle = data$awake_angle[test_times]
for(cellname in data$cellnames){
#Extract relevant spiketimes
training_data$spiketimes[[cellname]] = data$spiketimes[[cellname]][data$spiketimes[[cellname]] %in% training_times]
test_data$spiketimes[[cellname]] = data$spiketimes[[cellname]][data$spiketimes[[cellname]] %in% test_times]
}
return(list(training_data = training_data,
test_data = test_data))
}
# Removes indexs and values where we dont have angle data
processMomentaryPopulationVector = function(momentary_population_matrix, smoothing_window, total_time){
# Convert to firing rate
momentary_population_matrix = momentary_population_matrix/(2*smoothing_window/1000)
# Remove bins that are smaller than 200ms or where we don't have the relevant
# angle data for the mouse
usable_indexs = smoothing_window:(total_time-smoothing_window)
usable_indexs = usable_indexs[which(!is.nan(test_data$awake_angle[usable_indexs]))]
momentary_population_matrix = as.matrix(momentary_population_matrix[,usable_indexs])
return(list(usable_indexs=usable_indexs,
momentary_population_matrix=momentary_population_matrix))
}
# Calculates the momentary population vector for every timestamp
calculateMomentaryPopulationVectors = function(data, smoothing_windows){
momentary_population_matrix_list = list()
num_cells = length(data$cellnames)
total_time = length(data$awake_angle)
# Matrix with cells as rows and timestamps as columns
activity_matrix=matrix(0, nrow=num_cells, ncol=total_time)
# Set activity to one for every timestamp there is a spike in the
# corresponding cell.
for(i in 1:num_cells){
activity_matrix[i,data$spiketimes[[data$cellnames[[i]]]]] = 1
}
activity_matrix = Matrix(activity_matrix, sparse=TRUE)
# Create matrix where we will apply the smoothing window
momentary_population_matrix = activity_matrix
left = activity_matrix
right = activity_matrix
zero_vec = Matrix(0, nrow=num_cells, ncol=1, sparse=TRUE)
for(i in 1:max(smoothing_windows)){
left = cbind(zero_vec,left[,-dim(left)[2]])
right = cbind(right[,-1], zero_vec)
momentary_population_matrix = momentary_population_matrix + left + right
if(i %in% smoothing_windows){
temp_result = processMomentaryPopulationVector(momentary_population_matrix, i, total_time)
momentary_population_matrix_list = cbind(momentary_population_matrix_list, list(temp_result))
}
}
names(momentary_population_matrix_list) = smoothing_windows
colnames(momentary_population_matrix_list) = smoothing_windows
return(momentary_population_matrix_list)
}
# Returns the reference population vector based on an input angle
calculateReferencePopulationVector = function(angle, tuning_curves){
#Place the input angle into the corresponding bin
bin_length = 2*min(tuning_curves$angle_bins)
bin_index = ceiling(angle/bin_length)
binned_input_angle = tuning_curves$angle_bin[bin_index]
#Fetch the reference population vector for the binned angle
reference_vector = tuning_curves[tuning_curves$angle_bin == binned_input_angle,]$firing_rate
return(reference_vector)
}
# Finds the angle with the lowest pearson coefficient
findHighestCorAngle = function(momentary_population, tuning_curves){
population_vectors = lapply(unique(tuning_curve$angle_bins), calculateReferencePopulationVector, tuning_curves=tuning_curves)
correlations = lapply(population_vectors, cor, momentary_population, use="pairwise.complete.obs", method="pearson")
best_angle=unique(tuning_curves$angle_bins)[which.max(correlations)]
return(best_angle)
}
angleDiff = function(angle1, angle2){
angle1 = pi/180*angle1
angle2 = pi/180*angle2
diff = abs(atan2(sin(angle1-angle2), cos(angle1-angle2)))
return(diff*180/pi)
}
## Mutual information
calculateMutualInformation = function(binned_awake_angles, momentary_population_matrix, cellnumber, smoothing_window){
library(entropy)
response = momentary_population_matrix[cellnumber,]
# Convert from firing rate to number of spikes
response = response*(2*smoothing_window/1000)
stimulus_dist = estimateDistribution(binned_awake_angles)
p_conditional_response = calculateConditionalResponse(response, binned_awake_angles)
response_dist = estimateDistribution(response)
## I tried implementing this myself first, the trends look correct, but my values are wrong
## probably some normalization issues.
# information = 0
# for(s in 1:length(stimulus_dist)){
# for(r in 1:length(response_dist)){
# log_term = log2(p_conditional_response[r,s]/response_dist[r])
# if(is.infinite(log_term)){
# next
# }
# information = information + stimulus_dist[s]*p_conditional_response[r,s]*log_term
# # print(paste('i', s, 'j', r))
# # print(information)
#
# }
# }
# count_stim = freqs.empirical(table(binned_awake_angles))
# count_resp = freqs.empirical(table(response))
return(mi.empirical(p_conditional_response))
# return(information)
}
# I.e P(r|s)
calculateConditionalResponse = function(response, binned_awake_angles){
# Stimulus response dataframe
df = data.frame(stimulus=binned_awake_angles, response=as.factor(response))
result = rep(0,length(unique(response)))
for(stim in unique(df$stimulus)){
filtered = df %>% filter(stimulus==stim) %>% select(response)
response_given_stim = as.data.frame(table(filtered))
result = cbind(result, response_given_stim$Freq)
}
result = result[,-1]
result = result/sum(rowSums(result))
return(result)
}
# Plug in principle
estimateDistribution = function(data){
return(as.data.frame(table(data))$Freq/length(data))
}
|
module2json <- function(module_text){
library(roxygen2, quietly=TRUE)
library(jsonlite, quietly=TRUE)
x <- roxygen2:::parse_text(module_text)[[2]]
if(is.null(x)){
x <- list()
}
if(length(x) == 1){
x <- list(x)
}
y <- sapply(x, function(o){
o$object = NULL
o$srcref = NULL
if(!is.null(o$examples)){ o$examples = toString(o$examples) }
return(o)
})
return(jsonlite::toJSON(y, pretty = TRUE, auto_unbox = TRUE))
}
con <- file("stdin")
file_contents <- readLines(con)
result = try(module2json(file_contents), silent=TRUE)
if(class(result) == "try-error"){
result = "[]"
}
cat(paste(result, "\n"))
|
/R/module2json.R
|
permissive
|
Vizzuality/zoon
|
R
| false
| false
| 649
|
r
|
module2json <- function(module_text){
library(roxygen2, quietly=TRUE)
library(jsonlite, quietly=TRUE)
x <- roxygen2:::parse_text(module_text)[[2]]
if(is.null(x)){
x <- list()
}
if(length(x) == 1){
x <- list(x)
}
y <- sapply(x, function(o){
o$object = NULL
o$srcref = NULL
if(!is.null(o$examples)){ o$examples = toString(o$examples) }
return(o)
})
return(jsonlite::toJSON(y, pretty = TRUE, auto_unbox = TRUE))
}
con <- file("stdin")
file_contents <- readLines(con)
result = try(module2json(file_contents), silent=TRUE)
if(class(result) == "try-error"){
result = "[]"
}
cat(paste(result, "\n"))
|
library(xts)
library(plm)
data <- readRDS("./panel.rds")
dataP <- pdata.frame(data, index = c("IDRSSD", "quarter"), drop.index=TRUE, row.names=TRUE)
testFEmodel <- plm(loans_cap ~ retained_earnings + tier_1_cap + total_noninterest_income, data = dataP, model = "within", effect = "twoways")
summary(testFEmodel)
### can SUCCESSFULLY run a FE model with plm!
|
/1_querying_data_and_analysis/analyses/panel_data_analysis/old/panel_alive_2017/test_panel_data_frame.R
|
no_license
|
Matt-Brigida/FFIEC_Call_Reports
|
R
| false
| false
| 364
|
r
|
library(xts)
library(plm)
data <- readRDS("./panel.rds")
dataP <- pdata.frame(data, index = c("IDRSSD", "quarter"), drop.index=TRUE, row.names=TRUE)
testFEmodel <- plm(loans_cap ~ retained_earnings + tier_1_cap + total_noninterest_income, data = dataP, model = "within", effect = "twoways")
summary(testFEmodel)
### can SUCCESSFULLY run a FE model with plm!
|
r=359.75
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7rp5q/media/images/d7rp5q-001/svc:tesseract/full/full/359.75/default.jpg Accept:application/hocr+xml
|
/ark_87287/d7rp5q/d7rp5q-001/rotated.r
|
permissive
|
ucd-library/wine-price-extraction
|
R
| false
| false
| 199
|
r
|
r=359.75
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7rp5q/media/images/d7rp5q-001/svc:tesseract/full/full/359.75/default.jpg Accept:application/hocr+xml
|
\name{optiscale-package}
\alias{optiscale-package}
\alias{optiscale}
\docType{package}
\title{Optimal Scaling of a Data Vector}
\description{
This package provides tools to perform an optimal scaling analysis
on a data vector.
The main result of the optimal scaling is a vector of scores which
are a least-squares approximation to a vector of quantitative
values, subject to measurement constraints based upon a vector
of qualitative data values.
See Young (1981) for details.
}
\details{
\tabular{ll}{
Package: \tab optiscale\cr
Type: \tab Package\cr
Version: \tab 1.2.2\cr
Date: \tab 2021-02-02\cr
License: \tab GPL-2 \cr
LazyLoad: \tab yes\cr
}
The function that performs the optimal scaling is \code{opscale()}.
It produces an object of class "opscale".
Generic methods are defined for \code{print}, \code{summary}, and
\code{plot} (graphing optimally-scaled values versus
original data values).
}
\author{
William G. Jacoby
Maintainer:
William G. Jacoby <\email{wm.g.jacoby@gmail.com}>
}
\references{
Young, Forrest W. (1981) \dQuote{Quantitative Analysis of
Qualitative Data.} \emph{Psychometrika} 46: 357-388.
}
\seealso{
\code{\link{opscale},\link{plot.opscale}, \link{print.opscale},
\link{summary.opscale}}
}
\examples{
### x1 is vector of qualitative data
### x2 is vector of quantitative values
x1 <- c(1,1,1,1,2,2,2,3,3,3,3,3,3)
x2 <- c(3,2,2,2,1,2,3,4,5,2,6,6,4)
### Optimal scaling, specifying that x1
### is ordinal-discrete
op.scaled <- opscale(x.qual=x1, x.quant=x2,
level=2, process=1)
print(op.scaled)
summary(op.scaled)
}
|
/man/optiscale-package.Rd
|
no_license
|
cran/optiscale
|
R
| false
| false
| 1,762
|
rd
|
\name{optiscale-package}
\alias{optiscale-package}
\alias{optiscale}
\docType{package}
\title{Optimal Scaling of a Data Vector}
\description{
This package provides tools to perform an optimal scaling analysis
on a data vector.
The main result of the optimal scaling is a vector of scores which
are a least-squares approximation to a vector of quantitative
values, subject to measurement constraints based upon a vector
of qualitative data values.
See Young (1981) for details.
}
\details{
\tabular{ll}{
Package: \tab optiscale\cr
Type: \tab Package\cr
Version: \tab 1.2.2\cr
Date: \tab 2021-02-02\cr
License: \tab GPL-2 \cr
LazyLoad: \tab yes\cr
}
The function that performs the optimal scaling is \code{opscale()}.
It produces an object of class "opscale".
Generic methods are defined for \code{print}, \code{summary}, and
\code{plot} (graphing optimally-scaled values versus
original data values).
}
\author{
William G. Jacoby
Maintainer:
William G. Jacoby <\email{wm.g.jacoby@gmail.com}>
}
\references{
Young, Forrest W. (1981) \dQuote{Quantitative Analysis of
Qualitative Data.} \emph{Psychometrika} 46: 357-388.
}
\seealso{
\code{\link{opscale},\link{plot.opscale}, \link{print.opscale},
\link{summary.opscale}}
}
\examples{
### x1 is vector of qualitative data
### x2 is vector of quantitative values
x1 <- c(1,1,1,1,2,2,2,3,3,3,3,3,3)
x2 <- c(3,2,2,2,1,2,3,4,5,2,6,6,4)
### Optimal scaling, specifying that x1
### is ordinal-discrete
op.scaled <- opscale(x.qual=x1, x.quant=x2,
level=2, process=1)
print(op.scaled)
summary(op.scaled)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/init.R
\name{cnlp_init_corenlp}
\alias{cnlp_init_corenlp}
\title{Interface for initializing the coreNLP backend}
\usage{
cnlp_init_corenlp(lang = NULL, models_dir = NULL)
}
\arguments{
\item{lang}{string giving the language name for the corenlp backend.
Defaults to "en" (English) if set to NULL.}
\item{models_dir}{directory where model files are located. Set to NULL to
use the default.}
}
\description{
This function must be run before annotating text with
the coreNLP backend. It sets the properties for the
spacy engine and loads the file using the R to Python
interface provided by reticulate.
}
\examples{
\dontrun{
cnlp_init_corenlp()
}
}
\author{
Taylor B. Arnold, \email{taylor.arnold@acm.org}
}
|
/man/cnlp_init_corenlp.Rd
|
no_license
|
takewiki/cleanNLP
|
R
| false
| true
| 786
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/init.R
\name{cnlp_init_corenlp}
\alias{cnlp_init_corenlp}
\title{Interface for initializing the coreNLP backend}
\usage{
cnlp_init_corenlp(lang = NULL, models_dir = NULL)
}
\arguments{
\item{lang}{string giving the language name for the corenlp backend.
Defaults to "en" (English) if set to NULL.}
\item{models_dir}{directory where model files are located. Set to NULL to
use the default.}
}
\description{
This function must be run before annotating text with
the coreNLP backend. It sets the properties for the
spacy engine and loads the file using the R to Python
interface provided by reticulate.
}
\examples{
\dontrun{
cnlp_init_corenlp()
}
}
\author{
Taylor B. Arnold, \email{taylor.arnold@acm.org}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/emuR-dataDocs.R
\docType{data}
\name{vowlax}
\alias{vowlax}
\title{Segment list of four lax vowels, read speech, one male and one female
speaker of Standard North German from database kielread.}
\format{segmentlist}
\description{
An EMU dataset
}
\keyword{datasets}
|
/man/vowlax.Rd
|
no_license
|
fsuettmann/emuR
|
R
| false
| true
| 344
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/emuR-dataDocs.R
\docType{data}
\name{vowlax}
\alias{vowlax}
\title{Segment list of four lax vowels, read speech, one male and one female
speaker of Standard North German from database kielread.}
\format{segmentlist}
\description{
An EMU dataset
}
\keyword{datasets}
|
#k.hbcdist.R
# Weighted Average Logsum
mf.hcls <- HBC.lsLowWeight*mf.hclsl + HBC.lsMidWeight*mf.hclsm + HBC.lsHighWeight*mf.hclsh
east2westhill<-as.matrix(array(0,c(numzones,numzones)))
east2westhill[ensemble.gw==2,ensemble.gw==1]<-1
westhill2east<-as.matrix(array(0,c(numzones,numzones)))
westhill2east[ensemble.gw==1,ensemble.gw==2]<-1
east2westriv<-as.matrix(array(0,c(numzones,numzones)))
east2westriv[ensemble.gr==2,ensemble.gr==1]<-1
westriv2east<-as.matrix(array(0,c(numzones,numzones)))
westriv2east[ensemble.gr==1,ensemble.gr==2]<-1
#############################################################
# Raw HBC Utility #
#############################################################
mf.collat <- matrix (ma.collat, length(ma.collat), length(ma.collat), byrow=T)
mf.util <- exp(sweep(HBC.lsCoeff * mf.hcls
+ HBC.logdistXorwaCoeff * mf.orwa * log (mf.tdist + 1)
+ HBC.logdistXwaorCoeff * mf.waor * log (mf.tdist + 1)
+ HBC.logdistXnoXingCoeff * ((mf.orwa + mf.waor)==0) * log (mf.tdist + 1)
+ HBC.logdistXewWestHillsCoeff * east2westhill * log (mf.tdist + 1)
+ HBC.logdistXweWestHillsCoeff * westhill2east * log (mf.tdist + 1)
+ HBC.logdistXewWillRiverCoeff * east2westriv * log (mf.tdist + 1)
+ HBC.logdistXweWillRiverCoeff * westriv2east * log (mf.tdist + 1)
, 2, log(HBC.enrollCoeff * ma.enroll + 1), "+"))
mf.util[mf.collat[,]==0] <- 0
ma.utsum <- apply(mf.util,1,sum)
mf.utsum <- matrix(ma.utsum,length(ma.utsum),length(ma.utsum))
# Low Income Distribution
mf.hbcdtl <- matrix(0,numzones,numzones)
mf.hbcdtl[mf.utsum!=0] <- mf.util[mf.utsum!=0]/mf.utsum[mf.utsum!=0]
mf.hbcdtl <- sweep(mf.hbcdtl,1,ma.collprl,"*")
if (mce){
ma.hcldcls <- log(ma.utsum)
# save (ma.hbcldcls, file="ma.hbcldcls.dat")
# write.table(ma.hbcldcls, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.hbcldcls.csv", col.names=c("hbcldcls"))
# write.table(ma.collprl, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.collprl.csv", col.names=c("hbcprl"))
}
# Middle Income Distribution
mf.hbcdtm <- matrix(0,numzones,numzones)
mf.hbcdtm[mf.utsum!=0] <- mf.util[mf.utsum!=0]/mf.utsum[mf.utsum!=0]
mf.hbcdtm <- sweep(mf.hbcdtm,1,ma.collprm,"*")
if (mce){
ma.hcmdcls <- log(ma.utsum)
# save (ma.hbcmdcls, file="ma.hbcmdcls.dat")
# write.table(ma.hbcmdcls, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.hbcmdcls.csv", col.names=c("hbcmdcls"))
# write.table(ma.collprm, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.collprm.csv", col.names=c("hbcprm"))
}
# High Income Distribution
mf.hbcdth <- matrix(0,numzones,numzones)
mf.hbcdth[mf.utsum!=0] <- mf.util[mf.utsum!=0]/mf.utsum[mf.utsum!=0]
mf.hbcdth <- sweep(mf.hbcdth,1,ma.collprh,"*")
if (mce){
ma.hchdcls <- log(ma.utsum)
# save (ma.hbchdcls, file="ma.hbchdcls.dat")
# write.table(ma.hbchdcls, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.hbchdcls.csv", col.names=c("hbchdcls"))
# write.table(ma.collprh, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.collprh.csv", col.names=c("hbcprh"))
}
if (mce) {
purpose_dc <- 'hc'
omxFileName <- paste(project.dir,"/_mceInputs/",project,"_",year,"_",alternative,"_dest_choice_",purpose_dc,".omx",sep='')
create_omx(omxFileName, numzones, numzones, 7)
write_omx(file=omxFileName,
matrix=get(paste("ma.",purpose_dc,"ldcls",sep='')),
name=paste("ma.",purpose_dc,"ldcls",sep=''),
replace=TRUE)
write_omx(file=omxFileName,
matrix=get(paste("ma.",purpose_dc,"mdcls",sep='')),
name=paste("ma.",purpose_dc,"mdcls",sep=''),
replace=TRUE)
write_omx(file=omxFileName,
matrix=get(paste("ma.",purpose_dc,"hdcls",sep='')),
name=paste("ma.",purpose_dc,"hdcls",sep=''),
replace=TRUE)
}
#############################################################
# Output productions and attractions - WRS 11/28/06
#############################################################
if (file.access("panda_coll.rpt", mode=0) == 0) {system ("rm panda_coll.rpt")}
panda <- matrix (0,8,2)
panda[1,1] <- round (sum (ma.collpr[ensemble.ga==1]),0)
panda[2,1] <- round (sum (ma.collpr[ensemble.ga==2]),0)
panda[3,1] <- round (sum (ma.collpr[ensemble.ga==3]),0)
panda[4,1] <- round (sum (ma.collpr[ensemble.ga==4]),0)
panda[5,1] <- round (sum (ma.collpr[ensemble.ga==5]),0)
panda[6,1] <- round (sum (ma.collpr[ensemble.ga==6]),0)
panda[7,1] <- round (sum (ma.collpr[ensemble.ga==7]),0)
panda[8,1] <- round (sum (ma.collpr[ensemble.ga==8]),0)
panda[1,2] <- round (sum (ma.collat[ensemble.ga==1]),0)
panda[2,2] <- round (sum (ma.collat[ensemble.ga==2]),0)
panda[3,2] <- round (sum (ma.collat[ensemble.ga==3]),0)
panda[4,2] <- round (sum (ma.collat[ensemble.ga==4]),0)
panda[5,2] <- round (sum (ma.collat[ensemble.ga==5]),0)
panda[6,2] <- round (sum (ma.collat[ensemble.ga==6]),0)
panda[7,2] <- round (sum (ma.collat[ensemble.ga==7]),0)
panda[8,2] <- round (sum (ma.collat[ensemble.ga==8]),0)
rownames (panda) <- c("ga1","ga2","ga3","ga4","ga5","ga6","ga7","ga8")
colnames (panda) <- c("collpr","collat")
outfile <- file ("panda_hbc.rpt", "w")
writeLines (project, con=outfile, sep="\n")
writeLines (paste ("Metro (", toupper (initials), ") - ", date(), sep=''),
con=outfile, sep="\n")
writeLines ("ens\tcollpr\tcollat", con=outfile,sep="\n")
close (outfile)
write.table (panda,"panda_hbc.rpt",append=T,row.names=T,col.names=F,quote=F,sep="\t")
# Balance Distribution Matrix
source(paste(R.path, "k.balance.R", sep='/'))
mf.colldtl <- balance (mf.hbcdtl,ma.collprl,ma.collatl,100)
mf.colldtm <- balance (mf.hbcdtm,ma.collprm,ma.collatm,100)
mf.colldth <- balance (mf.hbcdth,ma.collprh,ma.collath,100)
#############################################################
# Total HBC Distribution #
#############################################################
mf.colldt <- mf.colldtl + mf.colldtm + mf.colldth
# Remove temporary matrices
rm(ma.utsum,mf.util,mf.collat)
# 8-district summaries
if (file.access("hbcdist.rpt", mode=0) == 0) {system ("rm hbcdist.rpt")}
distsum("mf.colldt", "College Distribution - Total", "ga", 3, "hbcdist", project, initials)
distsum("mf.colldtl", "College Distribution - LowInc", "ga", 3, "hbcdist", project, initials)
distsum("mf.colldtm", "College Distribution - MidInc", "ga", 3, "hbcdist", project, initials)
distsum("mf.colldth", "College Distribution - HighInc", "ga", 3, "hbcdist", project, initials)
|
/kate_programs_v2.0/k.hbcdist.R
|
no_license
|
tkangk/tdm
|
R
| false
| false
| 6,742
|
r
|
#k.hbcdist.R
# Weighted Average Logsum
mf.hcls <- HBC.lsLowWeight*mf.hclsl + HBC.lsMidWeight*mf.hclsm + HBC.lsHighWeight*mf.hclsh
east2westhill<-as.matrix(array(0,c(numzones,numzones)))
east2westhill[ensemble.gw==2,ensemble.gw==1]<-1
westhill2east<-as.matrix(array(0,c(numzones,numzones)))
westhill2east[ensemble.gw==1,ensemble.gw==2]<-1
east2westriv<-as.matrix(array(0,c(numzones,numzones)))
east2westriv[ensemble.gr==2,ensemble.gr==1]<-1
westriv2east<-as.matrix(array(0,c(numzones,numzones)))
westriv2east[ensemble.gr==1,ensemble.gr==2]<-1
#############################################################
# Raw HBC Utility #
#############################################################
mf.collat <- matrix (ma.collat, length(ma.collat), length(ma.collat), byrow=T)
mf.util <- exp(sweep(HBC.lsCoeff * mf.hcls
+ HBC.logdistXorwaCoeff * mf.orwa * log (mf.tdist + 1)
+ HBC.logdistXwaorCoeff * mf.waor * log (mf.tdist + 1)
+ HBC.logdistXnoXingCoeff * ((mf.orwa + mf.waor)==0) * log (mf.tdist + 1)
+ HBC.logdistXewWestHillsCoeff * east2westhill * log (mf.tdist + 1)
+ HBC.logdistXweWestHillsCoeff * westhill2east * log (mf.tdist + 1)
+ HBC.logdistXewWillRiverCoeff * east2westriv * log (mf.tdist + 1)
+ HBC.logdistXweWillRiverCoeff * westriv2east * log (mf.tdist + 1)
, 2, log(HBC.enrollCoeff * ma.enroll + 1), "+"))
mf.util[mf.collat[,]==0] <- 0
ma.utsum <- apply(mf.util,1,sum)
mf.utsum <- matrix(ma.utsum,length(ma.utsum),length(ma.utsum))
# Low Income Distribution
mf.hbcdtl <- matrix(0,numzones,numzones)
mf.hbcdtl[mf.utsum!=0] <- mf.util[mf.utsum!=0]/mf.utsum[mf.utsum!=0]
mf.hbcdtl <- sweep(mf.hbcdtl,1,ma.collprl,"*")
if (mce){
ma.hcldcls <- log(ma.utsum)
# save (ma.hbcldcls, file="ma.hbcldcls.dat")
# write.table(ma.hbcldcls, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.hbcldcls.csv", col.names=c("hbcldcls"))
# write.table(ma.collprl, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.collprl.csv", col.names=c("hbcprl"))
}
# Middle Income Distribution
mf.hbcdtm <- matrix(0,numzones,numzones)
mf.hbcdtm[mf.utsum!=0] <- mf.util[mf.utsum!=0]/mf.utsum[mf.utsum!=0]
mf.hbcdtm <- sweep(mf.hbcdtm,1,ma.collprm,"*")
if (mce){
ma.hcmdcls <- log(ma.utsum)
# save (ma.hbcmdcls, file="ma.hbcmdcls.dat")
# write.table(ma.hbcmdcls, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.hbcmdcls.csv", col.names=c("hbcmdcls"))
# write.table(ma.collprm, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.collprm.csv", col.names=c("hbcprm"))
}
# High Income Distribution
mf.hbcdth <- matrix(0,numzones,numzones)
mf.hbcdth[mf.utsum!=0] <- mf.util[mf.utsum!=0]/mf.utsum[mf.utsum!=0]
mf.hbcdth <- sweep(mf.hbcdth,1,ma.collprh,"*")
if (mce){
ma.hchdcls <- log(ma.utsum)
# save (ma.hbchdcls, file="ma.hbchdcls.dat")
# write.table(ma.hbchdcls, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.hbchdcls.csv", col.names=c("hbchdcls"))
# write.table(ma.collprh, sep=",", row.names=F, file="../_mceInputs/nonskims/ma.collprh.csv", col.names=c("hbcprh"))
}
if (mce) {
purpose_dc <- 'hc'
omxFileName <- paste(project.dir,"/_mceInputs/",project,"_",year,"_",alternative,"_dest_choice_",purpose_dc,".omx",sep='')
create_omx(omxFileName, numzones, numzones, 7)
write_omx(file=omxFileName,
matrix=get(paste("ma.",purpose_dc,"ldcls",sep='')),
name=paste("ma.",purpose_dc,"ldcls",sep=''),
replace=TRUE)
write_omx(file=omxFileName,
matrix=get(paste("ma.",purpose_dc,"mdcls",sep='')),
name=paste("ma.",purpose_dc,"mdcls",sep=''),
replace=TRUE)
write_omx(file=omxFileName,
matrix=get(paste("ma.",purpose_dc,"hdcls",sep='')),
name=paste("ma.",purpose_dc,"hdcls",sep=''),
replace=TRUE)
}
#############################################################
# Output productions and attractions - WRS 11/28/06
#############################################################
if (file.access("panda_coll.rpt", mode=0) == 0) {system ("rm panda_coll.rpt")}
panda <- matrix (0,8,2)
panda[1,1] <- round (sum (ma.collpr[ensemble.ga==1]),0)
panda[2,1] <- round (sum (ma.collpr[ensemble.ga==2]),0)
panda[3,1] <- round (sum (ma.collpr[ensemble.ga==3]),0)
panda[4,1] <- round (sum (ma.collpr[ensemble.ga==4]),0)
panda[5,1] <- round (sum (ma.collpr[ensemble.ga==5]),0)
panda[6,1] <- round (sum (ma.collpr[ensemble.ga==6]),0)
panda[7,1] <- round (sum (ma.collpr[ensemble.ga==7]),0)
panda[8,1] <- round (sum (ma.collpr[ensemble.ga==8]),0)
panda[1,2] <- round (sum (ma.collat[ensemble.ga==1]),0)
panda[2,2] <- round (sum (ma.collat[ensemble.ga==2]),0)
panda[3,2] <- round (sum (ma.collat[ensemble.ga==3]),0)
panda[4,2] <- round (sum (ma.collat[ensemble.ga==4]),0)
panda[5,2] <- round (sum (ma.collat[ensemble.ga==5]),0)
panda[6,2] <- round (sum (ma.collat[ensemble.ga==6]),0)
panda[7,2] <- round (sum (ma.collat[ensemble.ga==7]),0)
panda[8,2] <- round (sum (ma.collat[ensemble.ga==8]),0)
rownames (panda) <- c("ga1","ga2","ga3","ga4","ga5","ga6","ga7","ga8")
colnames (panda) <- c("collpr","collat")
outfile <- file ("panda_hbc.rpt", "w")
writeLines (project, con=outfile, sep="\n")
writeLines (paste ("Metro (", toupper (initials), ") - ", date(), sep=''),
con=outfile, sep="\n")
writeLines ("ens\tcollpr\tcollat", con=outfile,sep="\n")
close (outfile)
write.table (panda,"panda_hbc.rpt",append=T,row.names=T,col.names=F,quote=F,sep="\t")
# Balance Distribution Matrix
source(paste(R.path, "k.balance.R", sep='/'))
mf.colldtl <- balance (mf.hbcdtl,ma.collprl,ma.collatl,100)
mf.colldtm <- balance (mf.hbcdtm,ma.collprm,ma.collatm,100)
mf.colldth <- balance (mf.hbcdth,ma.collprh,ma.collath,100)
#############################################################
# Total HBC Distribution #
#############################################################
mf.colldt <- mf.colldtl + mf.colldtm + mf.colldth
# Remove temporary matrices
rm(ma.utsum,mf.util,mf.collat)
# 8-district summaries
if (file.access("hbcdist.rpt", mode=0) == 0) {system ("rm hbcdist.rpt")}
distsum("mf.colldt", "College Distribution - Total", "ga", 3, "hbcdist", project, initials)
distsum("mf.colldtl", "College Distribution - LowInc", "ga", 3, "hbcdist", project, initials)
distsum("mf.colldtm", "College Distribution - MidInc", "ga", 3, "hbcdist", project, initials)
distsum("mf.colldth", "College Distribution - HighInc", "ga", 3, "hbcdist", project, initials)
|
#Conversion DateTime
d<- as.Date(subsettd$Date, "%d/%m/%Y")
date_heure <- strptime(paste(d, subsettd$Time),"%Y-%m-%d %H:%M:%S")
# Plot on jpeg device
png("plot4.png", width = 480, height = 480)
par(mfrow= c(2,2))
# 1er Graph
plot(date_heure, subsettd$Global_active_power,xlab = "", ylab="Global_active_power", type="l")
# 2eme Graph
plot(date_heure, subsettd$Voltage, xlab="datetime", ylab="Voltage", type="l")
# 3eme Graph
plot(date_heure, subsettd$Sub_metering_1, xlab="", ylab="Energy Sub_metering", type="l")
lines(date_heure, subsettd$Sub_metering_2, col = "red")
lines(date_heure, subsettd$Sub_metering_3, col = "blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
,col=c("black", "red", "blue"), lty= 1, bty="n")
# 4 eme Graph
plot(date_heure, subsettd$Global_reactive_power, xlab="datetime", ylab="Global_reactive_power", type="l")
dev.off()
|
/plot4.r
|
no_license
|
herve78150/ExData_Plotting1
|
R
| false
| false
| 901
|
r
|
#Conversion DateTime
d<- as.Date(subsettd$Date, "%d/%m/%Y")
date_heure <- strptime(paste(d, subsettd$Time),"%Y-%m-%d %H:%M:%S")
# Plot on jpeg device
png("plot4.png", width = 480, height = 480)
par(mfrow= c(2,2))
# 1er Graph
plot(date_heure, subsettd$Global_active_power,xlab = "", ylab="Global_active_power", type="l")
# 2eme Graph
plot(date_heure, subsettd$Voltage, xlab="datetime", ylab="Voltage", type="l")
# 3eme Graph
plot(date_heure, subsettd$Sub_metering_1, xlab="", ylab="Energy Sub_metering", type="l")
lines(date_heure, subsettd$Sub_metering_2, col = "red")
lines(date_heure, subsettd$Sub_metering_3, col = "blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
,col=c("black", "red", "blue"), lty= 1, bty="n")
# 4 eme Graph
plot(date_heure, subsettd$Global_reactive_power, xlab="datetime", ylab="Global_reactive_power", type="l")
dev.off()
|
# Calculate partial procruses analysis for functional beta diversity -----------
# Authors(s): Nicholas Sookhan, Garland Xie
# Institutional affiliation: University of Toronto
# libraries --------------------------------------------------------------------
library(vegan) # for conducting partial procrustes analysis
library(here) # for creating relative file-paths
library(readr) # for reading csv files
library(dplyr) # for manipulating data in R
library(tibble) # for converting rownames to columnnames ( + vice versa)
library(ggplot2) # for visualizing data
library(broom)
library(picante)
# import -----------------------------------------------------------------------
# relative file-paths
env_250_path <- here("data/original", "land_use_metrics_250.csv")
env_500_path <- here("data/original", "land_use_metrics_500.csv")
func_beta_path <- here("data/working", "func_beta_matrices.rds")
dist_spa_path <- here("data/original", "spatial_distance_matrix.csv")
# import the data
env_250 <- read_csv(env_250_path)
env_500 <- read_csv(env_500_path)
func_beta <- readRDS(func_beta_path)
dist_spa <- read_csv(dist_spa_path)
# check packaging --------------------------------------------------------------
# environmental variables
glimpse(env_250)
head(env_250, n = 5)
tail(env_250, n = 5)
glimpse(env_500)
head(env_500, n = 5)
tail(env_500, n = 5)
# spatial distance
glimpse(dist_spa)
head(dist_spa, n = 5)
tail(dist_spa, n = 5)
# beta diversity (total)
dim(as.matrix(func_beta$funct.beta.sor))
# beta diversity (turnover)
dim(as.matrix(func_beta$funct.beta.sim))
# beta diversity (nestedness)
dim(as.matrix(func_beta$funct.beta.sne))
# clean data: functional beta diversity ----------------------------------------
# spatial turnover
func_tu <- as.matrix(func_beta$funct.beta.sim)
# nestedness
func_ne <- as.matrix(func_beta$funct.beta.sne)
# total beta diversity
func_tot <- as.matrix(func_beta$funct.beta.sor)
# verify equal dimension sizes (rows and columns)
all(dim(func_ne), dim(func_tot), dim(func_tu))
# verify equal rownames
all(rownames(func_ne) == rownames(func_tot))
all(rownames(func_tu) == rownames(func_tot))
all(rownames(func_ne) == rownames(func_tu))
# verify symmetrical matrices
isSymmetric.matrix(func_ne)
isSymmetric.matrix(func_tu)
isSymmetric.matrix(func_tot)
# verify phylogenetic distances are equal or above zero
all(func_tu >= 0)
all(func_ne >= 0)
all(func_tot >= 0)
# clean data: environmental data -----------------------------------------------
# subset metric data to include sites with diversity data
# func beta diversity datasetes are restricted to sites with 2+ species
# equal dimensions for func beta diversty data: use any matrix for subsetting
env_250_tidy <- env_250 %>%
select(ID = "X1",
grass_250_percent = "prop.landscape_250_grass",
tree_250_percent = "prop.landscape_250_tree_canopy",
urban_250_percent = "prop.landscape_250_urban") %>%
filter(ID %in% rownames(func_tu)) %>%
column_to_rownames(var = "ID")
env_500_tidy <- env_500 %>%
select(ID = "X1",
grass_500_percent = "prop.landscape_500_grass",
tree_500_percent = "prop.landscape_500_tree_canopy",
urban_500_percent = "prop.landscape_500_urban") %>%
filter(ID %in% rownames(func_tu)) %>%
column_to_rownames(var = "ID")
# double check
glimpse(env_250_tidy)
glimpse(env_500_tidy)
# double check: subsets have an environmental gradient?
# high variation in % impervious cover
# use histograms for quick data viz
env_250_tidy %>%
ggplot(aes(x = urban_250_percent)) +
geom_histogram(bins = 30) +
labs(x = "% Impervious Cover",
y = "",
title = "250m buffer") +
theme_minimal()
env_500_tidy %>%
ggplot(aes(x = urban_500_percent)) +
geom_histogram(bins = 30) +
labs(x = "% Impervious Cover",
y = "",
title = "500m buffer") +
theme_minimal()
# clean data: spatial distance ------------------------------------------------
# subset to match sites in environmental distance matrix
dist_spa_250 <- dist_spa %>%
select(ID = X1,
rownames(env_250_tidy)) %>%
filter(ID %in% rownames(env_250_tidy)) %>%
column_to_rownames(var = "ID") %>%
as.matrix()
dist_spa_500 <- dist_spa %>%
select(ID = X1,
rownames(env_500_tidy)) %>%
filter(ID %in% rownames(env_500_tidy)) %>%
column_to_rownames(var = "ID") %>%
as.matrix()
# double checks
isSymmetric.matrix(dist_spa_250)
isSymmetric.matrix(dist_spa_500)
# PCA: environmental data (250m) -----------------------------------------------
# perform PCA on environmental variables
# standardize all variables
pc_env_250 <- prcomp(env_250_tidy, scale = TRUE)
# check for cumulative eigenvalues
summary(pc_env_250)
screeplot(pc_env_250)
# plot score loadings
biplot(pc_env_250)
# get scores for the first two PCA axes
scores_env_250 <- scores(pc_env_250, display = "sites", choice = 1:2)
# PCA: environmental distance (500m) ------------------------------------------
# perform PCA on environmental variables
# standardize all variables
pc_env_500 <- prcomp(env_500_tidy, scale = TRUE)
# check for cumulative eigenvalues
summary(pc_env_500)
screeplot(pc_env_500)
# plot score loadings
biplot(pc_env_500)
# get scores for first two PCA axes
scores_env_500 <- scores(pc_env_500, display = "sites", choice = 1:2)
# PCoA: spatial distance (250m) ------------------------------------------------
# conduct principal coordinate analysis
# apply square root transformation on raw data to avoid negative eigenvalues
# Taken from Legendre (2018). Principal Coordinate Analysis.
pc_spa_250 <- pcoa(sqrt(dist_spa_250))
# double check: no negative eigenvalues
plot(pc_spa_250$values$Eigenvalues)
# plot
pc_spa_250$vectors %>%
as.data.frame() %>%
select(Axis.1, Axis.2) %>%
ggplot(aes(x = Axis.1, y = Axis.2)) +
geom_point() +
theme_minimal()
# get scores for both axes
scores_spa_250 <- pc_spa_250$vectors[, 1:2]
# PCoA: spatial distance (500m) ------------------------------------------------
# conduct principal coordinate analysis
# apply square root transformation on raw data to avoid negative eigenvalues
# Taken from Legendre (2018). Principal Coordinate Analysis.
pc_spa_500 <- pcoa(sqrt(dist_spa_500))
# double check: no negative eigenvalues
plot(pc_spa_500$values$Eigenvalues)
# plot
pc_spa_500$vectors %>%
as.data.frame() %>%
select(Axis.1, Axis.2) %>%
ggplot(aes(x = Axis.1, y = Axis.2)) +
geom_point() +
theme_minimal()
# get scores for both axes
scores_spa_500 <- pc_spa_500$vectors[, 1:2]
# PCoA: func nestedness (250m) --------------------------------------------
# prep nestedness
func_ne_250 <- func_ne %>%
as.data.frame() %>%
select(rownames(env_250_tidy)) %>%
rownames_to_column(var = "ID") %>%
filter(ID %in% rownames(env_250_tidy)) %>%
column_to_rownames(var = "ID") %>%
as.matrix()
# verify dimensions
dim(func_ne_250)
# pcoa on nestedness
pc_func_ne_250 <- pcoa(func_ne_250, correction = "lingoes")
# check for negative eigenvalues
all(pc_func_ne_250$values$Eigenvalues >= 0)
# check for cumulative eigenvalues
pc_func_ne_250$values$Rel_corr_eig
# plot
pc_func_ne_250$vectors %>%
as.data.frame() %>%
select(Axis.1, Axis.2) %>%
ggplot(aes(x = Axis.1, y = Axis.2)) +
geom_point() +
theme_minimal()
# get scores of all axes
scores_func_ne_250 <- pc_func_ne_250$vectors.cor[, 1:2]
# PCoA: func nestedness (500m) --------------------------------------------
# prep nestedness
func_ne_500 <- func_ne %>%
as.data.frame() %>%
select(rownames(env_500_tidy)) %>%
rownames_to_column(var = "ID") %>%
filter(ID %in% rownames(env_500_tidy)) %>%
column_to_rownames(var = "ID") %>%
as.matrix()
# pcoa on nestedness
pc_func_ne_500 <- pcoa(func_ne_500, correction = "lingoes")
# check for negative eigenvalues
all(pc_func_ne_500$values$Eigenvalues >= 0)
# check for cumulative eigenvalues
pc_func_ne_500$values$Cum_corr_eig
# plot
pc_func_ne_500$vectors %>%
as.data.frame() %>%
select(Axis.1, Axis.2) %>%
ggplot(aes(x = Axis.1, y = Axis.2)) +
geom_point() +
theme_minimal()
# get scores of all axes
scores_func_ne_500 <- pc_func_ne_500$vectors.cor[, 1:2]
# PCoA: func turnover (250m) --------------------------------------------
func_tu_250 <- func_tu %>%
as.data.frame() %>%
select(rownames(env_250_tidy)) %>%
rownames_to_column(var = "ID") %>%
filter(ID %in% rownames(env_250_tidy)) %>%
column_to_rownames(var = "ID") %>%
as.matrix()
# verify dimensions
dim(func_tu_250)
# pcoa on nestedness
pc_func_tu_250 <- pcoa(func_tu_250, correction = "lingoes")
# check for negative eigenvalues
all(pc_func_tu_250$values$Corr_eig >= 0)
# check for cumulative eigenvalues
pc_func_tu_250$values$Cum_corr_eig
pc_func_tu_250$vectors.cor %>%
as.data.frame() %>%
select(Axis.1, Axis.2) %>%
ggplot(aes(x = Axis.1, y = Axis.2)) +
geom_point() +
theme_minimal()
# get scores of all axes
scores_func_tu_250 <- pc_func_tu_250$vectors.cor[, 1:2]
# PCoA: func turnover (500m) --------------------------------------------
func_tu_500 <- func_tu %>%
as.data.frame() %>%
select(rownames(env_500_tidy)) %>%
rownames_to_column(var = "ID") %>%
filter(ID %in% rownames(env_500_tidy)) %>%
column_to_rownames(var = "ID") %>%
as.matrix()
# verify dimensions
dim(func_tu_500)
# pcoa on turnover (500m scale)
# apply a lingoez transformation to avoid negative eigenvalues
pc_func_tu_500 <- pcoa(func_tu_500, correction = "lingoes")
# check for negative eigenvalues
pc_func_tu_500$values$Corr_eig
# check for cumulative (lingoes-corrected) eigenvalues
pc_func_tu_500$vectors.cor
# quick plot
pc_func_tu_500$vectors.cor %>%
as.data.frame() %>%
select(Axis.1, Axis.2) %>%
ggplot(aes(x = Axis.1, y = Axis.2)) +
geom_point() +
theme_minimal()
# get scores of all axes
scores_func_tu_500 <- pc_func_tu_500$vectors.cor[, 1:2]
# PCoA: func total (250m) --------------------------------------------
func_tot_250 <- func_tot %>%
as.data.frame() %>%
select(rownames(env_250_tidy)) %>%
rownames_to_column(var = "ID") %>%
filter(ID %in% rownames(env_250_tidy)) %>%
column_to_rownames(var = "ID") %>%
as.matrix()
# verify dimensions
dim(func_tot_250)
# pcoa on total functional beta diversity
# apply lingoes transformation to correct for negative eigenvalues
pc_func_tot_250 <- pcoa(func_tot_250, correction = "lingoes")
# double check lingoes-corrected eigenvalues
pc_func_tot_250$values$Corr_eig
# check for cumulative (lingoes-corrected) eigenvalues
pc_func_tot_250$values$Cum_corr_eig
# quick plot
pc_func_tot_250$vectors.cor %>%
as.data.frame() %>%
select(Axis.1, Axis.2) %>%
ggplot(aes(x = Axis.1, y = Axis.2)) +
geom_point() +
theme_minimal()
# get scores of all axes
scores_func_tot_250 <- pc_func_tot_250$vectors.cor[, 1:2]
# PCoA: func total (500m) --------------------------------------------
func_tot_500 <- func_tot %>%
as.data.frame() %>%
select(rownames(env_500_tidy)) %>%
rownames_to_column(var = "ID") %>%
filter(ID %in% rownames(env_500_tidy)) %>%
column_to_rownames(var = "ID") %>%
as.matrix()
# verify dimensions
dim(func_tot_500)
# pcoa on total functional beta diversity
# apply lingoes transformation to correct for negative eigenvalues
pc_func_tot_500 <- pcoa(func_tot_500, correction = "lingoes")
# double check lingoes-corrected eigenvalues
pc_func_tot_500$values$Corr_eig
# check for cumulative (lingoes-corrected) eigenvalues
pc_func_tot_250$values$Cum_corr_eig
# quick plot
pc_func_tot_500$vectors.cor %>%
as.data.frame() %>%
select(Axis.1, Axis.2) %>%
ggplot(aes(x = Axis.1, y = Axis.2)) +
geom_point() +
theme_minimal()
# get scores of all axes
scores_func_tot_500 <- pc_func_tot_500$vectors.cor[, 1:2]
# Partial procrustes analysis: nestedness --------------------------------------
# control for space (250m buffer)
resid_spa_phy_ne_250<- resid(lm(scores_func_ne_250 ~ scores_spa_250))
resid_spa_env_250 <- resid(lm(scores_env_250 ~ scores_spa_250))
# control for space (500m buffer)
resid_spa_func_ne_500 <- resid(lm(scores_func_ne_500 ~ scores_spa_500))
resid_spa_env_500 <- resid(lm(scores_env_500 ~ scores_spa_500))
# run analysis
parpro_ne_250 <- protest(X = resid_spa_env_250, Y = resid_spa_phy_ne_250)
parpro_ne_500 <- protest(X = resid_spa_env_500, Y = resid_spa_func_ne_500)
# Partial procustes analysis: turnover -----------------------------------------
# control for space (250m buffer)
resid_spa_func_tu_250<- resid(lm(scores_func_tu_250 ~ scores_spa_250))
resid_spa_env_250 <- resid(lm(scores_env_250 ~ scores_spa_250))
# control for space (500m buffer)
resid_spa_func_tu_500 <- resid(lm(scores_func_tu_500 ~ scores_spa_500))
resid_spa_env_500 <- resid(lm(scores_env_500 ~ scores_spa_500))
# run analysis
parpro_tu_250 <- protest(resid_spa_env_250, resid_spa_func_tu_250)
parpro_tu_500 <- protest(resid_spa_env_500, resid_spa_func_tu_500)
# Partial procustes analysis: total --------------------------------------------
# control for space (250m buffer)
resid_spa_func_tot_250<- resid(lm(scores_func_tot_250 ~ scores_spa_250))
resid_spa_env_250 <- resid(lm(scores_env_250 ~ scores_spa_250))
# control for space (500m buffer)
resid_spa_func_tot_500 <- resid(lm(scores_func_tot_500 ~ scores_spa_500))
resid_spa_env_500 <- resid(lm(scores_env_500 ~ scores_spa_500))
# run analysis
parpro_tot_250 <- protest(resid_spa_env_250, resid_spa_func_tot_250)
parpro_tot_500 <- protest(resid_spa_env_500, resid_spa_func_tot_500)
|
/src/procrustes_func.R
|
no_license
|
garlandxie/env_filt_bees
|
R
| false
| false
| 13,626
|
r
|
# Calculate partial procruses analysis for functional beta diversity -----------
# Authors(s): Nicholas Sookhan, Garland Xie
# Institutional affiliation: University of Toronto
# libraries --------------------------------------------------------------------
library(vegan) # for conducting partial procrustes analysis
library(here) # for creating relative file-paths
library(readr) # for reading csv files
library(dplyr) # for manipulating data in R
library(tibble) # for converting rownames to columnnames ( + vice versa)
library(ggplot2) # for visualizing data
library(broom)
library(picante)
# import -----------------------------------------------------------------------
# relative file-paths
env_250_path <- here("data/original", "land_use_metrics_250.csv")
env_500_path <- here("data/original", "land_use_metrics_500.csv")
func_beta_path <- here("data/working", "func_beta_matrices.rds")
dist_spa_path <- here("data/original", "spatial_distance_matrix.csv")
# import the data
env_250 <- read_csv(env_250_path)
env_500 <- read_csv(env_500_path)
func_beta <- readRDS(func_beta_path)
dist_spa <- read_csv(dist_spa_path)
# check packaging --------------------------------------------------------------
# environmental variables
glimpse(env_250)
head(env_250, n = 5)
tail(env_250, n = 5)
glimpse(env_500)
head(env_500, n = 5)
tail(env_500, n = 5)
# spatial distance
glimpse(dist_spa)
head(dist_spa, n = 5)
tail(dist_spa, n = 5)
# beta diversity (total)
dim(as.matrix(func_beta$funct.beta.sor))
# beta diversity (turnover)
dim(as.matrix(func_beta$funct.beta.sim))
# beta diversity (nestedness)
dim(as.matrix(func_beta$funct.beta.sne))
# clean data: functional beta diversity ----------------------------------------
# spatial turnover
func_tu <- as.matrix(func_beta$funct.beta.sim)
# nestedness
func_ne <- as.matrix(func_beta$funct.beta.sne)
# total beta diversity
func_tot <- as.matrix(func_beta$funct.beta.sor)
# verify equal dimension sizes (rows and columns)
all(dim(func_ne), dim(func_tot), dim(func_tu))
# verify equal rownames
all(rownames(func_ne) == rownames(func_tot))
all(rownames(func_tu) == rownames(func_tot))
all(rownames(func_ne) == rownames(func_tu))
# verify symmetrical matrices
isSymmetric.matrix(func_ne)
isSymmetric.matrix(func_tu)
isSymmetric.matrix(func_tot)
# verify phylogenetic distances are equal or above zero
all(func_tu >= 0)
all(func_ne >= 0)
all(func_tot >= 0)
# clean data: environmental data -----------------------------------------------
# subset metric data to include sites with diversity data
# func beta diversity datasetes are restricted to sites with 2+ species
# equal dimensions for func beta diversty data: use any matrix for subsetting
env_250_tidy <- env_250 %>%
select(ID = "X1",
grass_250_percent = "prop.landscape_250_grass",
tree_250_percent = "prop.landscape_250_tree_canopy",
urban_250_percent = "prop.landscape_250_urban") %>%
filter(ID %in% rownames(func_tu)) %>%
column_to_rownames(var = "ID")
env_500_tidy <- env_500 %>%
select(ID = "X1",
grass_500_percent = "prop.landscape_500_grass",
tree_500_percent = "prop.landscape_500_tree_canopy",
urban_500_percent = "prop.landscape_500_urban") %>%
filter(ID %in% rownames(func_tu)) %>%
column_to_rownames(var = "ID")
# double check
glimpse(env_250_tidy)
glimpse(env_500_tidy)
# double check: subsets have an environmental gradient?
# high variation in % impervious cover
# use histograms for quick data viz
env_250_tidy %>%
ggplot(aes(x = urban_250_percent)) +
geom_histogram(bins = 30) +
labs(x = "% Impervious Cover",
y = "",
title = "250m buffer") +
theme_minimal()
env_500_tidy %>%
ggplot(aes(x = urban_500_percent)) +
geom_histogram(bins = 30) +
labs(x = "% Impervious Cover",
y = "",
title = "500m buffer") +
theme_minimal()
# clean data: spatial distance ------------------------------------------------
# subset to match sites in environmental distance matrix
dist_spa_250 <- dist_spa %>%
select(ID = X1,
rownames(env_250_tidy)) %>%
filter(ID %in% rownames(env_250_tidy)) %>%
column_to_rownames(var = "ID") %>%
as.matrix()
dist_spa_500 <- dist_spa %>%
select(ID = X1,
rownames(env_500_tidy)) %>%
filter(ID %in% rownames(env_500_tidy)) %>%
column_to_rownames(var = "ID") %>%
as.matrix()
# double checks
isSymmetric.matrix(dist_spa_250)
isSymmetric.matrix(dist_spa_500)
# PCA: environmental data (250m) -----------------------------------------------
# perform PCA on environmental variables
# standardize all variables
pc_env_250 <- prcomp(env_250_tidy, scale = TRUE)
# check for cumulative eigenvalues
summary(pc_env_250)
screeplot(pc_env_250)
# plot score loadings
biplot(pc_env_250)
# get scores for the first two PCA axes
scores_env_250 <- scores(pc_env_250, display = "sites", choice = 1:2)
# PCA: environmental distance (500m) ------------------------------------------
# perform PCA on environmental variables
# standardize all variables
pc_env_500 <- prcomp(env_500_tidy, scale = TRUE)
# check for cumulative eigenvalues
summary(pc_env_500)
screeplot(pc_env_500)
# plot score loadings
biplot(pc_env_500)
# get scores for first two PCA axes
scores_env_500 <- scores(pc_env_500, display = "sites", choice = 1:2)
# PCoA: spatial distance (250m) ------------------------------------------------
# conduct principal coordinate analysis
# apply square root transformation on raw data to avoid negative eigenvalues
# Taken from Legendre (2018). Principal Coordinate Analysis.
pc_spa_250 <- pcoa(sqrt(dist_spa_250))
# double check: no negative eigenvalues
plot(pc_spa_250$values$Eigenvalues)
# plot
pc_spa_250$vectors %>%
as.data.frame() %>%
select(Axis.1, Axis.2) %>%
ggplot(aes(x = Axis.1, y = Axis.2)) +
geom_point() +
theme_minimal()
# get scores for both axes
scores_spa_250 <- pc_spa_250$vectors[, 1:2]
# PCoA: spatial distance (500m) ------------------------------------------------
# conduct principal coordinate analysis
# apply square root transformation on raw data to avoid negative eigenvalues
# Taken from Legendre (2018). Principal Coordinate Analysis.
pc_spa_500 <- pcoa(sqrt(dist_spa_500))
# double check: no negative eigenvalues
plot(pc_spa_500$values$Eigenvalues)
# plot
pc_spa_500$vectors %>%
as.data.frame() %>%
select(Axis.1, Axis.2) %>%
ggplot(aes(x = Axis.1, y = Axis.2)) +
geom_point() +
theme_minimal()
# get scores for both axes
scores_spa_500 <- pc_spa_500$vectors[, 1:2]
# PCoA: func nestedness (250m) --------------------------------------------
# prep nestedness
func_ne_250 <- func_ne %>%
as.data.frame() %>%
select(rownames(env_250_tidy)) %>%
rownames_to_column(var = "ID") %>%
filter(ID %in% rownames(env_250_tidy)) %>%
column_to_rownames(var = "ID") %>%
as.matrix()
# verify dimensions
dim(func_ne_250)
# pcoa on nestedness
pc_func_ne_250 <- pcoa(func_ne_250, correction = "lingoes")
# check for negative eigenvalues
all(pc_func_ne_250$values$Eigenvalues >= 0)
# check for cumulative eigenvalues
pc_func_ne_250$values$Rel_corr_eig
# plot
pc_func_ne_250$vectors %>%
as.data.frame() %>%
select(Axis.1, Axis.2) %>%
ggplot(aes(x = Axis.1, y = Axis.2)) +
geom_point() +
theme_minimal()
# get scores of all axes
scores_func_ne_250 <- pc_func_ne_250$vectors.cor[, 1:2]
# PCoA: func nestedness (500m) --------------------------------------------
# prep nestedness
func_ne_500 <- func_ne %>%
as.data.frame() %>%
select(rownames(env_500_tidy)) %>%
rownames_to_column(var = "ID") %>%
filter(ID %in% rownames(env_500_tidy)) %>%
column_to_rownames(var = "ID") %>%
as.matrix()
# pcoa on nestedness
pc_func_ne_500 <- pcoa(func_ne_500, correction = "lingoes")
# check for negative eigenvalues
all(pc_func_ne_500$values$Eigenvalues >= 0)
# check for cumulative eigenvalues
pc_func_ne_500$values$Cum_corr_eig
# plot
pc_func_ne_500$vectors %>%
as.data.frame() %>%
select(Axis.1, Axis.2) %>%
ggplot(aes(x = Axis.1, y = Axis.2)) +
geom_point() +
theme_minimal()
# get scores of all axes
scores_func_ne_500 <- pc_func_ne_500$vectors.cor[, 1:2]
# PCoA: func turnover (250m) --------------------------------------------
func_tu_250 <- func_tu %>%
as.data.frame() %>%
select(rownames(env_250_tidy)) %>%
rownames_to_column(var = "ID") %>%
filter(ID %in% rownames(env_250_tidy)) %>%
column_to_rownames(var = "ID") %>%
as.matrix()
# verify dimensions
dim(func_tu_250)
# pcoa on nestedness
pc_func_tu_250 <- pcoa(func_tu_250, correction = "lingoes")
# check for negative eigenvalues
all(pc_func_tu_250$values$Corr_eig >= 0)
# check for cumulative eigenvalues
pc_func_tu_250$values$Cum_corr_eig
pc_func_tu_250$vectors.cor %>%
as.data.frame() %>%
select(Axis.1, Axis.2) %>%
ggplot(aes(x = Axis.1, y = Axis.2)) +
geom_point() +
theme_minimal()
# get scores of all axes
scores_func_tu_250 <- pc_func_tu_250$vectors.cor[, 1:2]
# PCoA: func turnover (500m) --------------------------------------------
func_tu_500 <- func_tu %>%
as.data.frame() %>%
select(rownames(env_500_tidy)) %>%
rownames_to_column(var = "ID") %>%
filter(ID %in% rownames(env_500_tidy)) %>%
column_to_rownames(var = "ID") %>%
as.matrix()
# verify dimensions
dim(func_tu_500)
# pcoa on turnover (500m scale)
# apply a lingoez transformation to avoid negative eigenvalues
pc_func_tu_500 <- pcoa(func_tu_500, correction = "lingoes")
# check for negative eigenvalues
pc_func_tu_500$values$Corr_eig
# check for cumulative (lingoes-corrected) eigenvalues
pc_func_tu_500$vectors.cor
# quick plot
pc_func_tu_500$vectors.cor %>%
as.data.frame() %>%
select(Axis.1, Axis.2) %>%
ggplot(aes(x = Axis.1, y = Axis.2)) +
geom_point() +
theme_minimal()
# get scores of all axes
scores_func_tu_500 <- pc_func_tu_500$vectors.cor[, 1:2]
# PCoA: func total (250m) --------------------------------------------
func_tot_250 <- func_tot %>%
as.data.frame() %>%
select(rownames(env_250_tidy)) %>%
rownames_to_column(var = "ID") %>%
filter(ID %in% rownames(env_250_tidy)) %>%
column_to_rownames(var = "ID") %>%
as.matrix()
# verify dimensions
dim(func_tot_250)
# pcoa on total functional beta diversity
# apply lingoes transformation to correct for negative eigenvalues
pc_func_tot_250 <- pcoa(func_tot_250, correction = "lingoes")
# double check lingoes-corrected eigenvalues
pc_func_tot_250$values$Corr_eig
# check for cumulative (lingoes-corrected) eigenvalues
pc_func_tot_250$values$Cum_corr_eig
# quick plot
pc_func_tot_250$vectors.cor %>%
as.data.frame() %>%
select(Axis.1, Axis.2) %>%
ggplot(aes(x = Axis.1, y = Axis.2)) +
geom_point() +
theme_minimal()
# get scores of all axes
scores_func_tot_250 <- pc_func_tot_250$vectors.cor[, 1:2]
# PCoA: func total (500m) --------------------------------------------
func_tot_500 <- func_tot %>%
as.data.frame() %>%
select(rownames(env_500_tidy)) %>%
rownames_to_column(var = "ID") %>%
filter(ID %in% rownames(env_500_tidy)) %>%
column_to_rownames(var = "ID") %>%
as.matrix()
# verify dimensions
dim(func_tot_500)
# pcoa on total functional beta diversity
# apply lingoes transformation to correct for negative eigenvalues
pc_func_tot_500 <- pcoa(func_tot_500, correction = "lingoes")
# double check lingoes-corrected eigenvalues
pc_func_tot_500$values$Corr_eig
# check for cumulative (lingoes-corrected) eigenvalues
pc_func_tot_250$values$Cum_corr_eig
# quick plot
pc_func_tot_500$vectors.cor %>%
as.data.frame() %>%
select(Axis.1, Axis.2) %>%
ggplot(aes(x = Axis.1, y = Axis.2)) +
geom_point() +
theme_minimal()
# get scores of all axes
scores_func_tot_500 <- pc_func_tot_500$vectors.cor[, 1:2]
# Partial procrustes analysis: nestedness --------------------------------------
# control for space (250m buffer)
resid_spa_phy_ne_250<- resid(lm(scores_func_ne_250 ~ scores_spa_250))
resid_spa_env_250 <- resid(lm(scores_env_250 ~ scores_spa_250))
# control for space (500m buffer)
resid_spa_func_ne_500 <- resid(lm(scores_func_ne_500 ~ scores_spa_500))
resid_spa_env_500 <- resid(lm(scores_env_500 ~ scores_spa_500))
# run analysis
parpro_ne_250 <- protest(X = resid_spa_env_250, Y = resid_spa_phy_ne_250)
parpro_ne_500 <- protest(X = resid_spa_env_500, Y = resid_spa_func_ne_500)
# Partial procustes analysis: turnover -----------------------------------------
# control for space (250m buffer)
resid_spa_func_tu_250<- resid(lm(scores_func_tu_250 ~ scores_spa_250))
resid_spa_env_250 <- resid(lm(scores_env_250 ~ scores_spa_250))
# control for space (500m buffer)
resid_spa_func_tu_500 <- resid(lm(scores_func_tu_500 ~ scores_spa_500))
resid_spa_env_500 <- resid(lm(scores_env_500 ~ scores_spa_500))
# run analysis
parpro_tu_250 <- protest(resid_spa_env_250, resid_spa_func_tu_250)
parpro_tu_500 <- protest(resid_spa_env_500, resid_spa_func_tu_500)
# Partial procustes analysis: total --------------------------------------------
# control for space (250m buffer)
resid_spa_func_tot_250<- resid(lm(scores_func_tot_250 ~ scores_spa_250))
resid_spa_env_250 <- resid(lm(scores_env_250 ~ scores_spa_250))
# control for space (500m buffer)
resid_spa_func_tot_500 <- resid(lm(scores_func_tot_500 ~ scores_spa_500))
resid_spa_env_500 <- resid(lm(scores_env_500 ~ scores_spa_500))
# run analysis
parpro_tot_250 <- protest(resid_spa_env_250, resid_spa_func_tot_250)
parpro_tot_500 <- protest(resid_spa_env_500, resid_spa_func_tot_500)
|
library(ggplot2)
library(data.table)
library(reshape2)
library(RColorBrewer)
setwd("C:/Users/jmaburto/Documents/GitHub/Lifespan-inequality-Denmark")
#2 Cancer, amenable to smoking
#3 Cancer, not amenable to smoking
#5 Cardiovascular & Diabetes mellitus (move to 5)
#6 Respiratory, infectious
#7 Respiratory, non-infectious
#8 External
#9 Other & Infectious, non-respiratory
load('Data/Results.RData')
new.level.order<- c("Smoking related cancer","Non-Smoking related cancer","Cardiovascular","Respiratory-Non-infectious","Respiratory-Infectious","External","Other","Above age 85")
Decomp.ex <- local(get(load("Data/DecompResults_ex_List.RData")))
Decomp.cv <- local(get(load("Data/DecompResults_cv_List.RData")))
Compare.ex <- local(get(load("Data/Compare_DecompResults_ex_List.RData")))
Compare.cv <- local(get(load("Data/Compare_DecompResults_cv_List.RData")))
Labels.periods <- c('1960-1975','1975-1995','1995-2014')
DT.Decomp.ex$Period <- (cut(DT.Decomp.ex$Year+1, breaks=c(1960,1975,1995,Inf),labels=Labels.periods))
DT.Decomp.cv$Period <- (cut(DT.Decomp.cv$Year+1, breaks=c(1960,1975,1995,Inf),labels=Labels.periods))
DT.Decomp.ex <- DT.Decomp.ex[,list(Contribution = sum(Contribution)), by = list(Sex,Country,Period,Cause,Age)]
DT.Decomp.cv <- DT.Decomp.cv[,list(Contribution = sum(Contribution)), by = list(Sex,Country,Period,Cause,Age)]
base2 <- c(rev(brewer.pal(8,name = 'Spectral'))[1:5],rev(brewer.pal(8,name = 'Spectral'))[8],'lightgrey','lightpink')
base2[2] <- 'lightseagreen'
base2[4] <- 'tan1'
levels(DT.compare.ex$Cause)
DT.Decomp.ex$Cause <- factor(DT.Decomp.ex$Cause, levels = new.level.order)
DT.Decomp.cv$Cause <- factor(DT.Decomp.cv$Cause, levels = new.level.order)
DT.compare.ex$Cause <- factor(DT.compare.ex$Cause, levels = new.level.order)
DT.compare.cv$Cause <- factor(DT.compare.cv$Cause, levels = new.level.order)
#base2 <- c("#66C2A5", "#ABDDA4",'peachpuff2', '#fc8d62','coral2', "#D53E4F" , "lightgrey" ,"lightpink")
# Changes in Denmark in life expectancy
p <- ggplot(DT.Decomp.ex[DT.Decomp.ex$Sex == 'Female' & DT.Decomp.ex$Country=='Denmark',], aes(x = Age, y = Contribution, fill = Cause)) +
ggtitle('A Decomposition of life expectancy', subtitle = 'Danish females. Negative (positive) values decrease (increase) life expectancy')+
facet_wrap(~Period)+
scale_fill_manual('Cause of death', values = base2) +
geom_bar(stat = "identity",position = "stack")+
theme_light()+
theme(text = element_text(size=10),
axis.text.x = element_text(angle=45, hjust=1),panel.grid.minor.x = element_blank())+
labs(x = "Age group", y = "Years",size=10)+
theme(text = element_text(size=10),
strip.text.x = element_text(size = 10, colour = "black"))+
theme(legend.text=element_text(size=10), legend.title = element_text(size=10),
legend.position = c(0.9, 0.18))+
geom_hline(yintercept = 0)+
coord_flip()
p
q <- ggplot(DT.Decomp.cv[DT.Decomp.cv$Sex == 'Female' & DT.Decomp.cv$Country=='Denmark',], aes(x = Age, y = Contribution, fill = Cause)) +
ggtitle('B Decomposition of lifespan inequality (CoV)', subtitle = 'Danish females. Negative (positive) values decrease (increase) CoV')+
facet_wrap(~Period)+
scale_fill_manual('Cause of death', values = base2) +
geom_bar(stat = "identity",position = "stack", show.legend = F)+
theme_light()+ coord_cartesian(ylim=c(-.0035, .003))+
theme(text = element_text(size=10),
axis.text.x = element_text(angle=45, hjust=1),panel.grid.minor.x = element_blank())+
labs(x = "Age group", y = "Units",size=10)+
theme(text = element_text(size=10),
strip.text.x = element_text(size = 10, colour = "black"))+
theme(legend.text=element_text(size=10), legend.title = element_text(size=10),
legend.position = c(0.93, 0.25))+
geom_hline(yintercept = 0)+
coord_flip(ylim =c(-.0035, .003) )
q
require(gridExtra)
pdf(file="R/Figures/DX_Decomp.pdf",width=12,height=11,pointsize=4,useDingbats = F)
grid.arrange(p,q,nrow=2)
dev.off()
library(ggplot2)
library(data.table)
library(reshape2)
library(RColorBrewer)
setwd("C:/Users/jmaburto/Documents/GitHub/Lifespan-inequality-Denmark")
#2 Cancer, amenable to smoking
#3 Cancer, not amenable to smoking
#5 Cardiovascular & Diabetes mellitus (move to 5)
#6 Respiratory, infectious
#7 Respiratory, non-infectious
#8 External
#9 Other & Infectious, non-respiratory
load('Data/Results.RData')
new.level.order<- c("Smoking related cancer","Non-Smoking related cancer","Cardiovascular","Respiratory-Non-infectious","Respiratory-Infectious","External","Other","Above age 85")
Decomp.ex <- local(get(load("Data/DecompResults_ex_List.RData")))
Decomp.cv <- local(get(load("Data/DecompResults_cv_List.RData")))
Compare.ex <- local(get(load("Data/Compare_DecompResults_ex_List.RData")))
Compare.cv <- local(get(load("Data/Compare_DecompResults_cv_List.RData")))
Labels.periods <- c('1960-1975','1975-1995','1995-2014')
DT.Decomp.ex$Period <- (cut(DT.Decomp.ex$Year+1, breaks=c(1960,1975,1995,Inf),labels=Labels.periods))
DT.Decomp.cv$Period <- (cut(DT.Decomp.cv$Year+1, breaks=c(1960,1975,1995,Inf),labels=Labels.periods))
DT.Decomp.ex <- DT.Decomp.ex[,list(Contribution = sum(Contribution)), by = list(Sex,Country,Period,Cause,Age)]
DT.Decomp.cv <- DT.Decomp.cv[,list(Contribution = sum(Contribution)), by = list(Sex,Country,Period,Cause,Age)]
base2 <- c(rev(brewer.pal(8,name = 'Spectral'))[1:5],rev(brewer.pal(8,name = 'Spectral'))[8],'lightgrey','lightpink')
base2[2] <- 'lightseagreen'
base2[4] <- 'tan1'
levels(DT.compare.ex$Cause)
DT.Decomp.ex$Cause <- factor(DT.Decomp.ex$Cause, levels = new.level.order)
DT.Decomp.cv$Cause <- factor(DT.Decomp.cv$Cause, levels = new.level.order)
DT.compare.ex$Cause <- factor(DT.compare.ex$Cause, levels = new.level.order)
DT.compare.cv$Cause <- factor(DT.compare.cv$Cause, levels = new.level.order)
#base2 <- c("#66C2A5", "#ABDDA4",'peachpuff2', '#fc8d62','coral2', "#D53E4F" , "lightgrey" ,"lightpink")
# Changes in Denmark in life expectancy
t <- ggplot(DT.Decomp.ex[DT.Decomp.ex$Sex == 'Male' & DT.Decomp.ex$Country=='Denmark',], aes(x = Age, y = Contribution, fill = Cause)) +
ggtitle('A Decomposition of life expectancy', subtitle = 'Danish males. Negative (positive) values decrease (increase) life expectancy')+
facet_wrap(~Period)+
scale_fill_manual('Cause of death', values = base2) +
geom_bar(stat = "identity",position = "stack")+
theme_light()+
theme(text = element_text(size=10),
axis.text.x = element_text(angle=45, hjust=1),panel.grid.minor.x = element_blank())+
labs(x = "Age group", y = "Years",size=10)+
theme(text = element_text(size=10),
strip.text.x = element_text(size = 10, colour = "black"))+
theme(legend.text=element_text(size=10), legend.title = element_text(size=10),
legend.position = c(0.9, 0.18))+
geom_hline(yintercept = 0)+
coord_flip()
t
u <- ggplot(DT.Decomp.cv[DT.Decomp.cv$Sex == 'Male' & DT.Decomp.cv$Country=='Denmark',], aes(x = Age, y = Contribution, fill = Cause)) +
ggtitle('B Decomposition of lifespan inequality (CoV)', subtitle = 'Danish Males. Negative (positive) values decrease (increase) CoV')+
facet_wrap(~Period)+
scale_fill_manual('Cause of death', values = base2) +
geom_bar(stat = "identity",position = "stack", show.legend = F)+
theme_light()+
#coord_cartesian(ylim=c(-.0035, .003))+
theme(text = element_text(size=10),
axis.text.x = element_text(angle=45, hjust=1),panel.grid.minor.x = element_blank())+
labs(x = "Age group", y = "Units",size=10)+
theme(text = element_text(size=10),
strip.text.x = element_text(size = 10, colour = "black"))+
theme(legend.text=element_text(size=10), legend.title = element_text(size=10),
legend.position = c(0.93, 0.25))+
geom_hline(yintercept = 0)+
coord_flip(ylim =c(-.004, .0035) )
u
require(gridExtra)
pdf(file="R/Figures/DX_Decomp_males.pdf",width=12,height=11,pointsize=4,useDingbats = F)
grid.arrange(t,u,nrow=2)
dev.off()
DT.Decomp.ex[,list(Contribution = sum(Contribution)), by = list(Sex,Country,Period)]
DT.Decomp.cv[,list(Contribution = sum(Contribution)), by = list(Sex,Country,Period)]
### Decomp with Sweden
# Changes in Denmark in life expectancy
r <- ggplot(DT.compare.ex[DT.compare.ex$Country=='Denmark' & DT.compare.ex$Year == 2014,], aes(x = Age, y = Contribution, fill = Cause)) +
ggtitle('A Decomposition of life expectancy', subtitle = 'Sweden - Denmark, 2014. Negative (positive) values decrease (increase) the gap in life expectancy with Sweden.')+
facet_wrap(~Sex)+
scale_fill_manual('Cause of death', values = base2) +
geom_bar(stat = "identity",position = "stack")+
theme_light()+
theme(text = element_text(size=10),
axis.text.x = element_text(angle=45, hjust=1),panel.grid.minor.x = element_blank())+
labs(x = "Age group", y = "Years",size=10)+
theme(text = element_text(size=10),
strip.text.x = element_text(size = 10, colour = "black"))+
theme(legend.text=element_text(size=9), legend.title = element_text(size=9),
legend.position = c(0.39, 0.18))+
geom_hline(yintercept = 0)+
coord_flip()
r
s <- ggplot(DT.compare.cv[DT.compare.cv$Country=='Denmark' & DT.compare.cv$Year == 2014,], aes(x = Age, y = -Contribution, fill = Cause)) +
ggtitle('B Decomposition of lifespan inequality (CoV)', subtitle = 'Denmark - Sweden, 2014. Negative (positive) values decrease (increase) the gap in CoV with Sweden.')+
facet_wrap(~Sex)+
scale_fill_manual('Cause of death', values = base2) +
geom_bar(stat = "identity",position = "stack", show.legend = F)+
theme_light()+
theme(text = element_text(size=10),
axis.text.x = element_text(angle=45, hjust=1),panel.grid.minor.x = element_blank())+
labs(x = "Age group", y = "Units",size=10)+
theme(text = element_text(size=10),
strip.text.x = element_text(size = 10, colour = "black"))+
theme(legend.text=element_text(size=10), legend.title = element_text(size=10),
legend.position = c(0.93, 0.25))+
geom_hline(yintercept = 0)+
coord_flip()
s
require(gridExtra)
pdf(file="R/Figures/DK_Compare.pdf",width=10,height=10,pointsize=4,useDingbats = F)
grid.arrange(r,s,nrow=2)
dev.off()
#Table of potential gaind in life expectancy
Data2014ex <- DT.compare.ex[DT.compare.ex$Year == 2014,]
Data2014ex$ind <- 'ex'
Data2014cv <- DT.compare.cv[DT.compare.cv$Year == 2014,]
Data2014cv$ind <- 'cv'
Data2014 <- rbind(Data2014ex,Data2014cv)
Data2014 <- Data2014[Data2014$Country == 'Denmark',]
table1 <- Data2014[, sum(Contribution), by = list(Sex,Cause,ind)]
table1
write.csv(table1,file = 'R/Figures/Table1.cvs')
Data2014[, sum(Contribution), by = list(Sex,Age,ind)]
|
/R/7_Decomp_Figures.R
|
permissive
|
jmaburto/Lifespan-inequality-Denmark
|
R
| false
| false
| 10,765
|
r
|
library(ggplot2)
library(data.table)
library(reshape2)
library(RColorBrewer)
setwd("C:/Users/jmaburto/Documents/GitHub/Lifespan-inequality-Denmark")
#2 Cancer, amenable to smoking
#3 Cancer, not amenable to smoking
#5 Cardiovascular & Diabetes mellitus (move to 5)
#6 Respiratory, infectious
#7 Respiratory, non-infectious
#8 External
#9 Other & Infectious, non-respiratory
load('Data/Results.RData')
new.level.order<- c("Smoking related cancer","Non-Smoking related cancer","Cardiovascular","Respiratory-Non-infectious","Respiratory-Infectious","External","Other","Above age 85")
Decomp.ex <- local(get(load("Data/DecompResults_ex_List.RData")))
Decomp.cv <- local(get(load("Data/DecompResults_cv_List.RData")))
Compare.ex <- local(get(load("Data/Compare_DecompResults_ex_List.RData")))
Compare.cv <- local(get(load("Data/Compare_DecompResults_cv_List.RData")))
Labels.periods <- c('1960-1975','1975-1995','1995-2014')
DT.Decomp.ex$Period <- (cut(DT.Decomp.ex$Year+1, breaks=c(1960,1975,1995,Inf),labels=Labels.periods))
DT.Decomp.cv$Period <- (cut(DT.Decomp.cv$Year+1, breaks=c(1960,1975,1995,Inf),labels=Labels.periods))
DT.Decomp.ex <- DT.Decomp.ex[,list(Contribution = sum(Contribution)), by = list(Sex,Country,Period,Cause,Age)]
DT.Decomp.cv <- DT.Decomp.cv[,list(Contribution = sum(Contribution)), by = list(Sex,Country,Period,Cause,Age)]
base2 <- c(rev(brewer.pal(8,name = 'Spectral'))[1:5],rev(brewer.pal(8,name = 'Spectral'))[8],'lightgrey','lightpink')
base2[2] <- 'lightseagreen'
base2[4] <- 'tan1'
levels(DT.compare.ex$Cause)
DT.Decomp.ex$Cause <- factor(DT.Decomp.ex$Cause, levels = new.level.order)
DT.Decomp.cv$Cause <- factor(DT.Decomp.cv$Cause, levels = new.level.order)
DT.compare.ex$Cause <- factor(DT.compare.ex$Cause, levels = new.level.order)
DT.compare.cv$Cause <- factor(DT.compare.cv$Cause, levels = new.level.order)
#base2 <- c("#66C2A5", "#ABDDA4",'peachpuff2', '#fc8d62','coral2', "#D53E4F" , "lightgrey" ,"lightpink")
# Changes in Denmark in life expectancy
p <- ggplot(DT.Decomp.ex[DT.Decomp.ex$Sex == 'Female' & DT.Decomp.ex$Country=='Denmark',], aes(x = Age, y = Contribution, fill = Cause)) +
ggtitle('A Decomposition of life expectancy', subtitle = 'Danish females. Negative (positive) values decrease (increase) life expectancy')+
facet_wrap(~Period)+
scale_fill_manual('Cause of death', values = base2) +
geom_bar(stat = "identity",position = "stack")+
theme_light()+
theme(text = element_text(size=10),
axis.text.x = element_text(angle=45, hjust=1),panel.grid.minor.x = element_blank())+
labs(x = "Age group", y = "Years",size=10)+
theme(text = element_text(size=10),
strip.text.x = element_text(size = 10, colour = "black"))+
theme(legend.text=element_text(size=10), legend.title = element_text(size=10),
legend.position = c(0.9, 0.18))+
geom_hline(yintercept = 0)+
coord_flip()
p
q <- ggplot(DT.Decomp.cv[DT.Decomp.cv$Sex == 'Female' & DT.Decomp.cv$Country=='Denmark',], aes(x = Age, y = Contribution, fill = Cause)) +
ggtitle('B Decomposition of lifespan inequality (CoV)', subtitle = 'Danish females. Negative (positive) values decrease (increase) CoV')+
facet_wrap(~Period)+
scale_fill_manual('Cause of death', values = base2) +
geom_bar(stat = "identity",position = "stack", show.legend = F)+
theme_light()+ coord_cartesian(ylim=c(-.0035, .003))+
theme(text = element_text(size=10),
axis.text.x = element_text(angle=45, hjust=1),panel.grid.minor.x = element_blank())+
labs(x = "Age group", y = "Units",size=10)+
theme(text = element_text(size=10),
strip.text.x = element_text(size = 10, colour = "black"))+
theme(legend.text=element_text(size=10), legend.title = element_text(size=10),
legend.position = c(0.93, 0.25))+
geom_hline(yintercept = 0)+
coord_flip(ylim =c(-.0035, .003) )
q
require(gridExtra)
pdf(file="R/Figures/DX_Decomp.pdf",width=12,height=11,pointsize=4,useDingbats = F)
grid.arrange(p,q,nrow=2)
dev.off()
library(ggplot2)
library(data.table)
library(reshape2)
library(RColorBrewer)
setwd("C:/Users/jmaburto/Documents/GitHub/Lifespan-inequality-Denmark")
#2 Cancer, amenable to smoking
#3 Cancer, not amenable to smoking
#5 Cardiovascular & Diabetes mellitus (move to 5)
#6 Respiratory, infectious
#7 Respiratory, non-infectious
#8 External
#9 Other & Infectious, non-respiratory
load('Data/Results.RData')
new.level.order<- c("Smoking related cancer","Non-Smoking related cancer","Cardiovascular","Respiratory-Non-infectious","Respiratory-Infectious","External","Other","Above age 85")
Decomp.ex <- local(get(load("Data/DecompResults_ex_List.RData")))
Decomp.cv <- local(get(load("Data/DecompResults_cv_List.RData")))
Compare.ex <- local(get(load("Data/Compare_DecompResults_ex_List.RData")))
Compare.cv <- local(get(load("Data/Compare_DecompResults_cv_List.RData")))
Labels.periods <- c('1960-1975','1975-1995','1995-2014')
DT.Decomp.ex$Period <- (cut(DT.Decomp.ex$Year+1, breaks=c(1960,1975,1995,Inf),labels=Labels.periods))
DT.Decomp.cv$Period <- (cut(DT.Decomp.cv$Year+1, breaks=c(1960,1975,1995,Inf),labels=Labels.periods))
DT.Decomp.ex <- DT.Decomp.ex[,list(Contribution = sum(Contribution)), by = list(Sex,Country,Period,Cause,Age)]
DT.Decomp.cv <- DT.Decomp.cv[,list(Contribution = sum(Contribution)), by = list(Sex,Country,Period,Cause,Age)]
base2 <- c(rev(brewer.pal(8,name = 'Spectral'))[1:5],rev(brewer.pal(8,name = 'Spectral'))[8],'lightgrey','lightpink')
base2[2] <- 'lightseagreen'
base2[4] <- 'tan1'
levels(DT.compare.ex$Cause)
DT.Decomp.ex$Cause <- factor(DT.Decomp.ex$Cause, levels = new.level.order)
DT.Decomp.cv$Cause <- factor(DT.Decomp.cv$Cause, levels = new.level.order)
DT.compare.ex$Cause <- factor(DT.compare.ex$Cause, levels = new.level.order)
DT.compare.cv$Cause <- factor(DT.compare.cv$Cause, levels = new.level.order)
#base2 <- c("#66C2A5", "#ABDDA4",'peachpuff2', '#fc8d62','coral2', "#D53E4F" , "lightgrey" ,"lightpink")
# Changes in Denmark in life expectancy
t <- ggplot(DT.Decomp.ex[DT.Decomp.ex$Sex == 'Male' & DT.Decomp.ex$Country=='Denmark',], aes(x = Age, y = Contribution, fill = Cause)) +
ggtitle('A Decomposition of life expectancy', subtitle = 'Danish males. Negative (positive) values decrease (increase) life expectancy')+
facet_wrap(~Period)+
scale_fill_manual('Cause of death', values = base2) +
geom_bar(stat = "identity",position = "stack")+
theme_light()+
theme(text = element_text(size=10),
axis.text.x = element_text(angle=45, hjust=1),panel.grid.minor.x = element_blank())+
labs(x = "Age group", y = "Years",size=10)+
theme(text = element_text(size=10),
strip.text.x = element_text(size = 10, colour = "black"))+
theme(legend.text=element_text(size=10), legend.title = element_text(size=10),
legend.position = c(0.9, 0.18))+
geom_hline(yintercept = 0)+
coord_flip()
t
u <- ggplot(DT.Decomp.cv[DT.Decomp.cv$Sex == 'Male' & DT.Decomp.cv$Country=='Denmark',], aes(x = Age, y = Contribution, fill = Cause)) +
ggtitle('B Decomposition of lifespan inequality (CoV)', subtitle = 'Danish Males. Negative (positive) values decrease (increase) CoV')+
facet_wrap(~Period)+
scale_fill_manual('Cause of death', values = base2) +
geom_bar(stat = "identity",position = "stack", show.legend = F)+
theme_light()+
#coord_cartesian(ylim=c(-.0035, .003))+
theme(text = element_text(size=10),
axis.text.x = element_text(angle=45, hjust=1),panel.grid.minor.x = element_blank())+
labs(x = "Age group", y = "Units",size=10)+
theme(text = element_text(size=10),
strip.text.x = element_text(size = 10, colour = "black"))+
theme(legend.text=element_text(size=10), legend.title = element_text(size=10),
legend.position = c(0.93, 0.25))+
geom_hline(yintercept = 0)+
coord_flip(ylim =c(-.004, .0035) )
u
require(gridExtra)
pdf(file="R/Figures/DX_Decomp_males.pdf",width=12,height=11,pointsize=4,useDingbats = F)
grid.arrange(t,u,nrow=2)
dev.off()
DT.Decomp.ex[,list(Contribution = sum(Contribution)), by = list(Sex,Country,Period)]
DT.Decomp.cv[,list(Contribution = sum(Contribution)), by = list(Sex,Country,Period)]
### Decomp with Sweden
# Changes in Denmark in life expectancy
r <- ggplot(DT.compare.ex[DT.compare.ex$Country=='Denmark' & DT.compare.ex$Year == 2014,], aes(x = Age, y = Contribution, fill = Cause)) +
ggtitle('A Decomposition of life expectancy', subtitle = 'Sweden - Denmark, 2014. Negative (positive) values decrease (increase) the gap in life expectancy with Sweden.')+
facet_wrap(~Sex)+
scale_fill_manual('Cause of death', values = base2) +
geom_bar(stat = "identity",position = "stack")+
theme_light()+
theme(text = element_text(size=10),
axis.text.x = element_text(angle=45, hjust=1),panel.grid.minor.x = element_blank())+
labs(x = "Age group", y = "Years",size=10)+
theme(text = element_text(size=10),
strip.text.x = element_text(size = 10, colour = "black"))+
theme(legend.text=element_text(size=9), legend.title = element_text(size=9),
legend.position = c(0.39, 0.18))+
geom_hline(yintercept = 0)+
coord_flip()
r
s <- ggplot(DT.compare.cv[DT.compare.cv$Country=='Denmark' & DT.compare.cv$Year == 2014,], aes(x = Age, y = -Contribution, fill = Cause)) +
ggtitle('B Decomposition of lifespan inequality (CoV)', subtitle = 'Denmark - Sweden, 2014. Negative (positive) values decrease (increase) the gap in CoV with Sweden.')+
facet_wrap(~Sex)+
scale_fill_manual('Cause of death', values = base2) +
geom_bar(stat = "identity",position = "stack", show.legend = F)+
theme_light()+
theme(text = element_text(size=10),
axis.text.x = element_text(angle=45, hjust=1),panel.grid.minor.x = element_blank())+
labs(x = "Age group", y = "Units",size=10)+
theme(text = element_text(size=10),
strip.text.x = element_text(size = 10, colour = "black"))+
theme(legend.text=element_text(size=10), legend.title = element_text(size=10),
legend.position = c(0.93, 0.25))+
geom_hline(yintercept = 0)+
coord_flip()
s
require(gridExtra)
pdf(file="R/Figures/DK_Compare.pdf",width=10,height=10,pointsize=4,useDingbats = F)
grid.arrange(r,s,nrow=2)
dev.off()
#Table of potential gaind in life expectancy
Data2014ex <- DT.compare.ex[DT.compare.ex$Year == 2014,]
Data2014ex$ind <- 'ex'
Data2014cv <- DT.compare.cv[DT.compare.cv$Year == 2014,]
Data2014cv$ind <- 'cv'
Data2014 <- rbind(Data2014ex,Data2014cv)
Data2014 <- Data2014[Data2014$Country == 'Denmark',]
table1 <- Data2014[, sum(Contribution), by = list(Sex,Cause,ind)]
table1
write.csv(table1,file = 'R/Figures/Table1.cvs')
Data2014[, sum(Contribution), by = list(Sex,Age,ind)]
|
library(astsa)
### Name: sarima
### Title: Fit ARIMA Models
### Aliases: sarima
### Keywords: ts
### ** Examples
sarima(log(AirPassengers),0,1,1,0,1,1,12)
(dog <- sarima(log(AirPassengers),0,1,1,0,1,1,12))
summary(dog$fit) # fit has all the returned arima() values
plot(resid(dog$fit)) # plot the innovations (residuals)
sarima(log(AirPassengers),0,1,1,0,1,1,12,details=FALSE)$BIC # print model BIC only
|
/data/genthat_extracted_code/astsa/examples/sarima.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 415
|
r
|
library(astsa)
### Name: sarima
### Title: Fit ARIMA Models
### Aliases: sarima
### Keywords: ts
### ** Examples
sarima(log(AirPassengers),0,1,1,0,1,1,12)
(dog <- sarima(log(AirPassengers),0,1,1,0,1,1,12))
summary(dog$fit) # fit has all the returned arima() values
plot(resid(dog$fit)) # plot the innovations (residuals)
sarima(log(AirPassengers),0,1,1,0,1,1,12,details=FALSE)$BIC # print model BIC only
|
library(swirl)
swirl()
kingcalyb123
1
2
1
2
getwd()
ls()
x <- 9
ls()
dir()
?list.files
args(list.files)
old.dir <- getwd()
testdir <- getwd()
dir.create("testdir")
setwd("testdir")
file.create("mytest.R")\
list.files()
file.exists("mytest.R")
file.info("mytest.R")
file.rename("mytest.R", "mytest2.R")
file.copy("mytest2.R", "mytest3.R")
file.path("mytest3.R")
file.path('folder1', 'folder2')
?dir.create
dir.create(file.path('testdir2', 'testdir3'), recursive = TRUE)
setwd(old.dir)
1
3
1:20
pi:10
15:1
?`:`
seq(1,20)
seq(0,10, by =0.5)
my_seq <- seq(5,10, length =30)
length(my_seq)
1:length(my_seq)
seq(along.with = my_seq)
seq_along(my_seq)
rep(0, times=40)
rep(c(0,1,2), times = 10)
rep(c(0,1,2), each = 10)
2
1
5
x <- c(44,NA,5,NA)
x * 3
y <- rnorm(1000)
z <- rep(NA, 1000)
my_data <- sample(c(y,z), 100)
my_na <- is.na(my_data)
my_na
my_data == NA
sum(my_na)
my_na
skip()
0/0
Inf-Inf
1
6
x
x[1:10]
4
2
x[is.na(x)]
y <- x[!is.na(x)]
y
1
y[y>0]
x[x>0]
x[!is.na(x) & x>0]
x[c(3,5,7)]
x[0]
x[3000]
x[c(-2,-10)]
x[-c(2,10)]
vect <- c(foo = 11, bar = 2, norf = NA)
vect
names(vect)
vect2 <- c(11, 2, NA)
names(vect2) <- c("foo", "bar", "norf")
identical(vect, vect2)
2
vect["bar"]
vect[c("foo", "bar")]
2
1
7
my_vector <- 1:20
my_vector
dim(my_vector)
length(my_vector)
dim(my_vector) <- c(4,5)
dim(my_vector)
attributes(my_vector)
my_vector
class(my_vector)
my_matrix <- my_vector
?matrix
my_matrix2 <- matrix(data = 1:20, nrow = 4, ncol = 5)
identical(my_matrix, my_matrix2)
skip()
cbind(patients, my_matrix)
my_data <- data.frame(patients, my_matrix)
my_data
class(my_data)
cnames <- vector("patient", "age", "weight", "bp", "rating", "test")
skip()
my_data
2
1
8
TRUE == TRUE
(FALSE == TRUE) == FALSE
6 == 7
6 < 7
10 <= 10
4
3
5 != 7
!5 != 7
! 5 == 7
2
FALSE & FALSE
TRUE & c(TRUE, FALSE, FALSE)
TRUE && c(TRUE, FALSE, FALSE)
TRUE | c(TRUE, FALSE, FALSE)
TRUE || c(TRUE, FALSE, FALSE)
5>8 || 6 != 8 && 4 > 3.9
2
4
isTRUE(6 > 4)
3
identical('twins', 'twins')
2
xor(5 == 6, !FALSE)
4
ints <- sample(10)
ints
ints>5
which(c(>7))
which(ints > 7)
3
any(ints < 0)
all(ints > 0)
4
1
9
Sys.Date()
mean(c(2,4,5))
skip()
boring_function('My first function!')
boring_function
skip()
my_mean(c(4,5,10))
remainder(5)
remainder(11,5)
remainder(divisor = 11, num = 5)
remainder(4, div=2)
args(remainder)
evaluate(sd, c(1.4, 3.6, 7.9, 8.8))
evaluate(function(x){x+1}, 6)
evaluate(function(x), c(8, 4, 0))
skip()
evaluate(function(x){x[length(x)]}, c(8, 4, 0))
?paste
paste("Programming", "is", "fun!")
skip()
mad_libs
paste(Colorado, running, river)
skip()
15
data(cars)
?cars
head(cars)
plot(cars)
?plot
plot((cars) x-axis = speed, y- axis = dist)
plot(x = cars$dist, y = cars$speed, x-axis = speed)
plot(x= cars$speed, y = cars$dist, xlab = "Speed", ylab = "Stopping Distance")
skip()
plot(cars, main = "My Plot")
plot(car, subtitle = "My Plot Subtitle")
skip()
plot(cars, col = 2)
plot(cars, xlim = c(10, 15))
plot(cars, pch = 2)
data(mtcars)
?boxplot
boxplot(cars, form= mpg ~cyl)
boxplot(formula = mpg ~cyl, data =mtcars)
hist(mtcars$mpg)
1s
|
/Task_01/Task_01c.R
|
no_license
|
kingcalyb123/Tasks
|
R
| false
| false
| 3,036
|
r
|
library(swirl)
swirl()
kingcalyb123
1
2
1
2
getwd()
ls()
x <- 9
ls()
dir()
?list.files
args(list.files)
old.dir <- getwd()
testdir <- getwd()
dir.create("testdir")
setwd("testdir")
file.create("mytest.R")\
list.files()
file.exists("mytest.R")
file.info("mytest.R")
file.rename("mytest.R", "mytest2.R")
file.copy("mytest2.R", "mytest3.R")
file.path("mytest3.R")
file.path('folder1', 'folder2')
?dir.create
dir.create(file.path('testdir2', 'testdir3'), recursive = TRUE)
setwd(old.dir)
1
3
1:20
pi:10
15:1
?`:`
seq(1,20)
seq(0,10, by =0.5)
my_seq <- seq(5,10, length =30)
length(my_seq)
1:length(my_seq)
seq(along.with = my_seq)
seq_along(my_seq)
rep(0, times=40)
rep(c(0,1,2), times = 10)
rep(c(0,1,2), each = 10)
2
1
5
x <- c(44,NA,5,NA)
x * 3
y <- rnorm(1000)
z <- rep(NA, 1000)
my_data <- sample(c(y,z), 100)
my_na <- is.na(my_data)
my_na
my_data == NA
sum(my_na)
my_na
skip()
0/0
Inf-Inf
1
6
x
x[1:10]
4
2
x[is.na(x)]
y <- x[!is.na(x)]
y
1
y[y>0]
x[x>0]
x[!is.na(x) & x>0]
x[c(3,5,7)]
x[0]
x[3000]
x[c(-2,-10)]
x[-c(2,10)]
vect <- c(foo = 11, bar = 2, norf = NA)
vect
names(vect)
vect2 <- c(11, 2, NA)
names(vect2) <- c("foo", "bar", "norf")
identical(vect, vect2)
2
vect["bar"]
vect[c("foo", "bar")]
2
1
7
my_vector <- 1:20
my_vector
dim(my_vector)
length(my_vector)
dim(my_vector) <- c(4,5)
dim(my_vector)
attributes(my_vector)
my_vector
class(my_vector)
my_matrix <- my_vector
?matrix
my_matrix2 <- matrix(data = 1:20, nrow = 4, ncol = 5)
identical(my_matrix, my_matrix2)
skip()
cbind(patients, my_matrix)
my_data <- data.frame(patients, my_matrix)
my_data
class(my_data)
cnames <- vector("patient", "age", "weight", "bp", "rating", "test")
skip()
my_data
2
1
8
TRUE == TRUE
(FALSE == TRUE) == FALSE
6 == 7
6 < 7
10 <= 10
4
3
5 != 7
!5 != 7
! 5 == 7
2
FALSE & FALSE
TRUE & c(TRUE, FALSE, FALSE)
TRUE && c(TRUE, FALSE, FALSE)
TRUE | c(TRUE, FALSE, FALSE)
TRUE || c(TRUE, FALSE, FALSE)
5>8 || 6 != 8 && 4 > 3.9
2
4
isTRUE(6 > 4)
3
identical('twins', 'twins')
2
xor(5 == 6, !FALSE)
4
ints <- sample(10)
ints
ints>5
which(c(>7))
which(ints > 7)
3
any(ints < 0)
all(ints > 0)
4
1
9
Sys.Date()
mean(c(2,4,5))
skip()
boring_function('My first function!')
boring_function
skip()
my_mean(c(4,5,10))
remainder(5)
remainder(11,5)
remainder(divisor = 11, num = 5)
remainder(4, div=2)
args(remainder)
evaluate(sd, c(1.4, 3.6, 7.9, 8.8))
evaluate(function(x){x+1}, 6)
evaluate(function(x), c(8, 4, 0))
skip()
evaluate(function(x){x[length(x)]}, c(8, 4, 0))
?paste
paste("Programming", "is", "fun!")
skip()
mad_libs
paste(Colorado, running, river)
skip()
15
data(cars)
?cars
head(cars)
plot(cars)
?plot
plot((cars) x-axis = speed, y- axis = dist)
plot(x = cars$dist, y = cars$speed, x-axis = speed)
plot(x= cars$speed, y = cars$dist, xlab = "Speed", ylab = "Stopping Distance")
skip()
plot(cars, main = "My Plot")
plot(car, subtitle = "My Plot Subtitle")
skip()
plot(cars, col = 2)
plot(cars, xlim = c(10, 15))
plot(cars, pch = 2)
data(mtcars)
?boxplot
boxplot(cars, form= mpg ~cyl)
boxplot(formula = mpg ~cyl, data =mtcars)
hist(mtcars$mpg)
1s
|
#### SPLIT UP THE CODE FOR TIME BASIS AND QUANTILE BASIS: IT REDUCES CONFUSION AND IT ALSO CREATES MORE FUNCTIONS AND "FELESH"!!
cfc.tbasis <- function(p1, p2, unity.tol = 1e-6,
diff.tol = 1e-2, diff.tol.policy = c("mean","all"),
check = TRUE) {
# checks for p1,p2: 1) same dimensions, 2) between 0.0 and 1.0, 3) non-increasing with time, 4) start at 1.0, 5) check for large steps
diff.tol.policy <- match.arg(diff.tol.policy)
if (is.null(dim(p1))) {
nt <- length(p1)
if (length(p2) != nt) stop("p1 and p2 have unequal lengths")
nother <- 1
dim.p1 <- c(nt, 1)
} else {
dim.p1 <- dim(p1)
if (!identical(dim.p1, dim(p2))) stop("p1 and p2 dimensions do not match")
nt <- dim.p1[1]
nother <- prod(dim.p1)/nt
}
if (check) if (any(p1<0.0 | p1>1.0 | p2<0.0 | p2>1.0)) stop("out-of-range probabilities")
p1.2d <- array(p1, dim = c(nt, nother))
p2.2d <- array(p2, dim = c(nt, nother))
if (check) if (any(abs(p1.2d[1,] - 1.0) > unity.tol)) stop("p1 probabilities must start at 1.0")
if (check) if (any(abs(p2.2d[1,] - 1.0) > unity.tol)) stop("p2 probabilities must start at 1.0")
seq.left <- 1:(nt-1)
seq.right <- seq.left+1
dp1 <- apply(p1.2d, 2, diff)
if (check) {
if (any(dp1>0.0)) stop("increasing probabilities with time detected for p1")
if (diff.tol.policy == "mean") {
if (mean(dp1) < -1.0*diff.tol) stop("average change in p1 exceeds threshold")
} else if (diff.tol.policy == "all") {
if (any(dp1 < -1.0*diff.tol)) stop("one or more changes in p1 exceed threshold")
}
}
dci1 <- -0.5 * (p2.2d[seq.left,] + p2.2d[seq.right,]) * dp1
ci1 <- rbind(0, apply(dci1, 2, cumsum))
ci1 <- array(ci1, dim = dim.p1)
dp2 <- apply(p2.2d, 2, diff)
if (check) {
if (any(dp2>0.0)) stop("increasing probabilities with time detected for p2")
if (diff.tol.policy == "mean") {
if (mean(dp2) < -1.0*diff.tol) stop("average change in p2 exceeds threshold")
} else if (diff.tol.policy == "all") {
if (any(dp2 < -1.0*diff.tol)) stop("one or more changes in p2 exceed threshold")
}
}
dci2 <- -0.5 * (p1.2d[seq.left,] + p1.2d[seq.right,]) * dp2
ci2 <- rbind(0, apply(dci2, 2, cumsum))
ci2 <- array(ci2, dim = dim.p1)
if (is.null(dim(p1))) {
ci1 <- drop(ci1)
ci2 <- drop(ci2)
}
if (is.null(dim(p1))) {
ret <- cbind(ci1, ci2, p1*p2)
colnames(ret) <- c("ci1", "ci2", "efp")
} else {
ret <- list(ci1 = ci1, ci2 = ci2, efp = p1*p2)
}
class(ret) <- c("cfc.tbasis", class(ret))
return (ret)
}
summary.cfc.tbasis <- function(object,
MARGIN = if (class(object)[2] == "matrix") NULL else 1, ...) {
if (class(object)[2] == "matrix") {
class(object)[1] <- "summary.cfc.tbasis"
attr(object, "popavg") <- FALSE
return (object)
}
MARGIN <- as.integer(MARGIN)
if (!(1 %in% MARGIN)) stop("time dimension cannot be aggregated")
if (identical(MARGIN, 1:length(dim(object$ci1)))) stop("cannot keep all dimensions during aggregation")
#cat("MARGIN:", MARGIN, "\n")
ci1 <- apply(object$ci1, MARGIN = MARGIN, mean)
ci2 <- apply(object$ci2, MARGIN = MARGIN, mean)
efp <- apply(object$efp, MARGIN = MARGIN, mean)
if (is.null(dim(ci1))) {
ret <- cbind(ci1, ci2, efp)
colnames(ret) <- c("ci1", "ci2", "efp")
} else {
ret <- list(ci1 = ci1, ci2 = ci2, efp = efp)
}
class(ret) <- c("summary.cfc.tbasis", class(ret))
return (invisible(ret))
}
plot.summary.cfc.tbasis <- function(x, t = 1, ci = 0.95, ...) {
if (class(x)[2] == "matrix") {
nt <- dim(x)[1]
if (length(t) == 1) t <- (0:(nt-1))*t
else if (length(t) != nt) stop("bad length for t vector")
plot(t, x[,"efp"], type = "l", ylim = c(0.0,1.0)
, xlab = "Time", ylab = "Probability", col = "black")
lines(t, x[,"ci1"], col = "red")
lines(t, x[,"ci2"], col = "green")
legend("topright", legend = c("Event-Free", "CI - Cause 1", "CI - Cause 2")
, col = c("black", "red", "green"), lty = rep(1,3))
} else {
dims <- dim(x$ci1)
nt <- dims[1]
if (length(t) == 1) t <- (0:(nt-1))*t
else if (length(t) != nt) stop("bad length for t vector")
nother <- prod(dims)/nt
ci1.2d <- array(x$ci1, dim = c(nt, nother))
ci2.2d <- array(x$ci2, dim = c(nt, nother))
efp.2d <- array(x$efp, dim = c(nt, nother))
qvec <- c(0.5*(1-ci), 0.5, 0.5*(1+ci))
efp.q <- t(apply(efp.2d, 1, quantile, probs = qvec))
ci1.q <- t(apply(ci1.2d, 1, quantile, probs = qvec))
ci2.q <- t(apply(ci2.2d, 1, quantile, probs = qvec))
plot(t, efp.q[,2], type = "l", ylim = c(0.0, 1.0)
, xlab = "Time", ylab = "Population Average", col = "black")
lines(t, efp.q[,1], col = "black", lty = 2)
lines(t, efp.q[,3], col = "black", lty = 2)
lines(t, ci1.q[,2], col = "red")
lines(t, ci1.q[,1], col = "red", lty = 2)
lines(t, ci1.q[,3], col = "red", lty = 2)
lines(t, ci2.q[,2], col = "green")
lines(t, ci2.q[,1], col = "green", lty = 2)
lines(t, ci2.q[,3], col = "green", lty = 2)
legend("topright", legend = c("Event-Free", "CI - Cause 1", "CI - Cause 2")
, col = c("black", "red", "green"), lty = rep(1,3))
return (invisible(list(efp = efp.q, ci1 = ci1.q, ci2 = ci2.q)))
}
}
cfc.pbasis <- function(t1, t2, probs, unity.tol = 1e-6,
diff.tol = 1e-2, diff.tol.policy = c("all", "mean")) {
# TODO: consider allowing unsorted vectors; sort and then check for validity
diff.tol.policy <- match.arg(diff.tol.policy)
if (abs(probs[1] - 1.0) > unity.tol) stop("probability vector must start at 1.0")
if (any(diff(probs) >= 0.0)) stop("probabilities must be decreasing with time")
if (diff.tol.policy == "all") {
if (any(diff(probs) < -1.0*diff.tol)) stop("one or more changes in probs exceed threshold")
} else if (diff.tol.policy == "mean") {
if (mean(diff(probs)) < -1.0*diff.tol) stop("average change in probs exceeds threshold")
}
if (is.null(dim(t1))) {
nt <- length(t1)
if (!is.null(dim(t2)) || length(t2) != nt) stop("t1 and t2 dimensions do not match")
nother <- 1
dim.t1 <- c(nt, 1)
} else {
dim.t1 <- dim(t1)
if (!identical(dim.t1, dim(t2))) stop("t1 and t2 dimensions do not match")
nt <- dim.t1[1]
nother <- prod(dim.t1)/nt
}
t1.2d <- array(t1, dim = c(nt, nother))
t2.2d <- array(t2, dim = c(nt, nother))
dt1 <- apply(t1.2d, 2, diff)
dt2 <- apply(t2.2d, 2, diff)
if (any(dt1 <= 0.0)) stop("non-increasing times detected in t1")
if (any(dt2 <= 0.0)) stop("non-increasing times detected in t2")
ret <- lapply(1:nother, function(n) {
ta <- t1.2d[,n]
tb <- t2.2d[,n]
tmax <- min(max(ta), max(tb))
tcomb <- sort(unique(c(ta, tb)))
tcomb <- tcomb[which(tcomb <= tmax)]
pa <- approx(ta, probs, tcomb)$y
pb <- approx(tb, probs, tcomb)$y
rettmp <- cbind(tcomb, cfc.tbasis(pa, pb, check = FALSE))
colnames(rettmp) <- c("time", "ci1", "ci2", "efp")
return (rettmp)
})
if (nother == 1) ret <- ret[[1]]
class(ret) <- c("cfc.pbasis", class(ret))
return (ret)
}
summary.cfc.pbasis <- function(object, ...) {
if (class(object)[2] == "matrix") {
class(object)[1] <- "summary.cfc.pbasis"
attr(object, "popavg") <- FALSE
return (object)
}
tmax <- min(sapply(object, function(x) max(x[,"time"])))
tvec <- unique(sort(unlist(sapply(object, function(x) x[,"time"]))))
tvec <- tvec[tvec < tmax]
ci1 <- rowMeans(sapply(object, function(x) {
approx(x[,"time"], x[,"ci1"], tvec)$y
}))
ci2 <- rowMeans(sapply(object, function(x) {
approx(x[,"time"], x[,"ci2"], tvec)$y
}))
efp <- 1 - (ci1 + ci2)
ret <- cbind(tvec, ci1, ci2, efp)
colnames(ret) <- c("time", "ci1", "ci2", "efp")
attr(ret, "popavg") <- TRUE
class(ret) <- c("summary.cfc.pbasis", class(ret))
return (invisible(ret))
}
plot.summary.cfc.pbasis <- function(x, ...) {
popavg <- attr(x, "popavg")
ylim <- c(0.0, 1.0)
ylab <- if (popavg) "Population Average" else "Probability"
plot(x[,"time"], x[,"efp"], type = "l", col = "black"
, xlab = "Time", ylab = ylab, ylim = ylim)
lines(x[,"time"], x[,"ci1"], col = "red")
lines(x[,"time"], x[,"ci2"], col = "green")
legend("topright", col = c("black", "red", "green")
, legend = c("Event-Free", "CI - Cause 1", "CI - Cause 2")
, lty = rep(1,3))
return (invisible(NULL))
}
# print.summary.cfc.pbasis <- function(x, ...) {
# if (attr(x, "popavg")) cat("Population averages:\n")
# nprint <- 6
# print(head(x, nprint))
# if (nrow(x) > nprint) cat("(", nrow(x)-nprint, " more rows ...)\n", sep = "")
# return (invisible(NULL))
# }
|
/fuzzedpackages/CFC/R/cfc_legacy.R
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 8,841
|
r
|
#### SPLIT UP THE CODE FOR TIME BASIS AND QUANTILE BASIS: IT REDUCES CONFUSION AND IT ALSO CREATES MORE FUNCTIONS AND "FELESH"!!
cfc.tbasis <- function(p1, p2, unity.tol = 1e-6,
diff.tol = 1e-2, diff.tol.policy = c("mean","all"),
check = TRUE) {
# checks for p1,p2: 1) same dimensions, 2) between 0.0 and 1.0, 3) non-increasing with time, 4) start at 1.0, 5) check for large steps
diff.tol.policy <- match.arg(diff.tol.policy)
if (is.null(dim(p1))) {
nt <- length(p1)
if (length(p2) != nt) stop("p1 and p2 have unequal lengths")
nother <- 1
dim.p1 <- c(nt, 1)
} else {
dim.p1 <- dim(p1)
if (!identical(dim.p1, dim(p2))) stop("p1 and p2 dimensions do not match")
nt <- dim.p1[1]
nother <- prod(dim.p1)/nt
}
if (check) if (any(p1<0.0 | p1>1.0 | p2<0.0 | p2>1.0)) stop("out-of-range probabilities")
p1.2d <- array(p1, dim = c(nt, nother))
p2.2d <- array(p2, dim = c(nt, nother))
if (check) if (any(abs(p1.2d[1,] - 1.0) > unity.tol)) stop("p1 probabilities must start at 1.0")
if (check) if (any(abs(p2.2d[1,] - 1.0) > unity.tol)) stop("p2 probabilities must start at 1.0")
seq.left <- 1:(nt-1)
seq.right <- seq.left+1
dp1 <- apply(p1.2d, 2, diff)
if (check) {
if (any(dp1>0.0)) stop("increasing probabilities with time detected for p1")
if (diff.tol.policy == "mean") {
if (mean(dp1) < -1.0*diff.tol) stop("average change in p1 exceeds threshold")
} else if (diff.tol.policy == "all") {
if (any(dp1 < -1.0*diff.tol)) stop("one or more changes in p1 exceed threshold")
}
}
dci1 <- -0.5 * (p2.2d[seq.left,] + p2.2d[seq.right,]) * dp1
ci1 <- rbind(0, apply(dci1, 2, cumsum))
ci1 <- array(ci1, dim = dim.p1)
dp2 <- apply(p2.2d, 2, diff)
if (check) {
if (any(dp2>0.0)) stop("increasing probabilities with time detected for p2")
if (diff.tol.policy == "mean") {
if (mean(dp2) < -1.0*diff.tol) stop("average change in p2 exceeds threshold")
} else if (diff.tol.policy == "all") {
if (any(dp2 < -1.0*diff.tol)) stop("one or more changes in p2 exceed threshold")
}
}
dci2 <- -0.5 * (p1.2d[seq.left,] + p1.2d[seq.right,]) * dp2
ci2 <- rbind(0, apply(dci2, 2, cumsum))
ci2 <- array(ci2, dim = dim.p1)
if (is.null(dim(p1))) {
ci1 <- drop(ci1)
ci2 <- drop(ci2)
}
if (is.null(dim(p1))) {
ret <- cbind(ci1, ci2, p1*p2)
colnames(ret) <- c("ci1", "ci2", "efp")
} else {
ret <- list(ci1 = ci1, ci2 = ci2, efp = p1*p2)
}
class(ret) <- c("cfc.tbasis", class(ret))
return (ret)
}
summary.cfc.tbasis <- function(object,
MARGIN = if (class(object)[2] == "matrix") NULL else 1, ...) {
if (class(object)[2] == "matrix") {
class(object)[1] <- "summary.cfc.tbasis"
attr(object, "popavg") <- FALSE
return (object)
}
MARGIN <- as.integer(MARGIN)
if (!(1 %in% MARGIN)) stop("time dimension cannot be aggregated")
if (identical(MARGIN, 1:length(dim(object$ci1)))) stop("cannot keep all dimensions during aggregation")
#cat("MARGIN:", MARGIN, "\n")
ci1 <- apply(object$ci1, MARGIN = MARGIN, mean)
ci2 <- apply(object$ci2, MARGIN = MARGIN, mean)
efp <- apply(object$efp, MARGIN = MARGIN, mean)
if (is.null(dim(ci1))) {
ret <- cbind(ci1, ci2, efp)
colnames(ret) <- c("ci1", "ci2", "efp")
} else {
ret <- list(ci1 = ci1, ci2 = ci2, efp = efp)
}
class(ret) <- c("summary.cfc.tbasis", class(ret))
return (invisible(ret))
}
plot.summary.cfc.tbasis <- function(x, t = 1, ci = 0.95, ...) {
if (class(x)[2] == "matrix") {
nt <- dim(x)[1]
if (length(t) == 1) t <- (0:(nt-1))*t
else if (length(t) != nt) stop("bad length for t vector")
plot(t, x[,"efp"], type = "l", ylim = c(0.0,1.0)
, xlab = "Time", ylab = "Probability", col = "black")
lines(t, x[,"ci1"], col = "red")
lines(t, x[,"ci2"], col = "green")
legend("topright", legend = c("Event-Free", "CI - Cause 1", "CI - Cause 2")
, col = c("black", "red", "green"), lty = rep(1,3))
} else {
dims <- dim(x$ci1)
nt <- dims[1]
if (length(t) == 1) t <- (0:(nt-1))*t
else if (length(t) != nt) stop("bad length for t vector")
nother <- prod(dims)/nt
ci1.2d <- array(x$ci1, dim = c(nt, nother))
ci2.2d <- array(x$ci2, dim = c(nt, nother))
efp.2d <- array(x$efp, dim = c(nt, nother))
qvec <- c(0.5*(1-ci), 0.5, 0.5*(1+ci))
efp.q <- t(apply(efp.2d, 1, quantile, probs = qvec))
ci1.q <- t(apply(ci1.2d, 1, quantile, probs = qvec))
ci2.q <- t(apply(ci2.2d, 1, quantile, probs = qvec))
plot(t, efp.q[,2], type = "l", ylim = c(0.0, 1.0)
, xlab = "Time", ylab = "Population Average", col = "black")
lines(t, efp.q[,1], col = "black", lty = 2)
lines(t, efp.q[,3], col = "black", lty = 2)
lines(t, ci1.q[,2], col = "red")
lines(t, ci1.q[,1], col = "red", lty = 2)
lines(t, ci1.q[,3], col = "red", lty = 2)
lines(t, ci2.q[,2], col = "green")
lines(t, ci2.q[,1], col = "green", lty = 2)
lines(t, ci2.q[,3], col = "green", lty = 2)
legend("topright", legend = c("Event-Free", "CI - Cause 1", "CI - Cause 2")
, col = c("black", "red", "green"), lty = rep(1,3))
return (invisible(list(efp = efp.q, ci1 = ci1.q, ci2 = ci2.q)))
}
}
cfc.pbasis <- function(t1, t2, probs, unity.tol = 1e-6,
diff.tol = 1e-2, diff.tol.policy = c("all", "mean")) {
# TODO: consider allowing unsorted vectors; sort and then check for validity
diff.tol.policy <- match.arg(diff.tol.policy)
if (abs(probs[1] - 1.0) > unity.tol) stop("probability vector must start at 1.0")
if (any(diff(probs) >= 0.0)) stop("probabilities must be decreasing with time")
if (diff.tol.policy == "all") {
if (any(diff(probs) < -1.0*diff.tol)) stop("one or more changes in probs exceed threshold")
} else if (diff.tol.policy == "mean") {
if (mean(diff(probs)) < -1.0*diff.tol) stop("average change in probs exceeds threshold")
}
if (is.null(dim(t1))) {
nt <- length(t1)
if (!is.null(dim(t2)) || length(t2) != nt) stop("t1 and t2 dimensions do not match")
nother <- 1
dim.t1 <- c(nt, 1)
} else {
dim.t1 <- dim(t1)
if (!identical(dim.t1, dim(t2))) stop("t1 and t2 dimensions do not match")
nt <- dim.t1[1]
nother <- prod(dim.t1)/nt
}
t1.2d <- array(t1, dim = c(nt, nother))
t2.2d <- array(t2, dim = c(nt, nother))
dt1 <- apply(t1.2d, 2, diff)
dt2 <- apply(t2.2d, 2, diff)
if (any(dt1 <= 0.0)) stop("non-increasing times detected in t1")
if (any(dt2 <= 0.0)) stop("non-increasing times detected in t2")
ret <- lapply(1:nother, function(n) {
ta <- t1.2d[,n]
tb <- t2.2d[,n]
tmax <- min(max(ta), max(tb))
tcomb <- sort(unique(c(ta, tb)))
tcomb <- tcomb[which(tcomb <= tmax)]
pa <- approx(ta, probs, tcomb)$y
pb <- approx(tb, probs, tcomb)$y
rettmp <- cbind(tcomb, cfc.tbasis(pa, pb, check = FALSE))
colnames(rettmp) <- c("time", "ci1", "ci2", "efp")
return (rettmp)
})
if (nother == 1) ret <- ret[[1]]
class(ret) <- c("cfc.pbasis", class(ret))
return (ret)
}
summary.cfc.pbasis <- function(object, ...) {
if (class(object)[2] == "matrix") {
class(object)[1] <- "summary.cfc.pbasis"
attr(object, "popavg") <- FALSE
return (object)
}
tmax <- min(sapply(object, function(x) max(x[,"time"])))
tvec <- unique(sort(unlist(sapply(object, function(x) x[,"time"]))))
tvec <- tvec[tvec < tmax]
ci1 <- rowMeans(sapply(object, function(x) {
approx(x[,"time"], x[,"ci1"], tvec)$y
}))
ci2 <- rowMeans(sapply(object, function(x) {
approx(x[,"time"], x[,"ci2"], tvec)$y
}))
efp <- 1 - (ci1 + ci2)
ret <- cbind(tvec, ci1, ci2, efp)
colnames(ret) <- c("time", "ci1", "ci2", "efp")
attr(ret, "popavg") <- TRUE
class(ret) <- c("summary.cfc.pbasis", class(ret))
return (invisible(ret))
}
plot.summary.cfc.pbasis <- function(x, ...) {
popavg <- attr(x, "popavg")
ylim <- c(0.0, 1.0)
ylab <- if (popavg) "Population Average" else "Probability"
plot(x[,"time"], x[,"efp"], type = "l", col = "black"
, xlab = "Time", ylab = ylab, ylim = ylim)
lines(x[,"time"], x[,"ci1"], col = "red")
lines(x[,"time"], x[,"ci2"], col = "green")
legend("topright", col = c("black", "red", "green")
, legend = c("Event-Free", "CI - Cause 1", "CI - Cause 2")
, lty = rep(1,3))
return (invisible(NULL))
}
# print.summary.cfc.pbasis <- function(x, ...) {
# if (attr(x, "popavg")) cat("Population averages:\n")
# nprint <- 6
# print(head(x, nprint))
# if (nrow(x) > nprint) cat("(", nrow(x)-nprint, " more rows ...)\n", sep = "")
# return (invisible(NULL))
# }
|
#Replication Study 48
#Study_48_Protocol_2_Figure
#R Version 3.3.2
#Required Packages
library(httr) #version 1.2.1
library(rjson) #version 0.2.15
library(Rmisc) #version 1.5
library(ggplot2) #version 2.2.1
library(cowplot) #version 0.7.0
#source("~/credentials.R") #for private use during generation
#Downloads R script "download.OSF.file.R"
GET("https://osf.io/hkpjb/?action=download", write_disk("download.OSF.file.R", overwrite = TRUE))
source("download.OSF.file.R")
#calls the download.OSF.file
#Downloads data file 'Study_48_Protocol_2_Data.csv' from https://osf.io/72czk/
download.OSF.file(GUID="72czk",Access_Token=RPCB_private_access,
file_name="Study_48_Protocol_2_Data.csv")
#names raw data from protocol 2 from csv file
data2 <- read.csv("Study_48_Protocol_2_Data.csv", header=T, sep=",")
#creates new column calculating RNA in 100uL
data2$RNA.100uL <- data2$Average.RNA.Concentration*100
##calculates RNA per cell
data2$RNA.per.cell <- data2$RNA.100uL/data2$Total.Cells.Harvested
#calculates RNA per 1000 cells
data2$value <- data2$RNA.per.cell*1000
#classifies time as character
data2$Time <- as.character(data2$Time)
########## subsets and summarizes Data ##########
#subsets data on lot 1
lot1dat <- data2[which(data2$Lot=="1"),]
#subsets data on lot 2
lot2dat <- data2[which(data2$Lot=="2"),]
#summarizes lot 1 data
lot1sum <- summarySE(data=lot1dat, measurevar = "value", groupvars = "Time")
#summarizes lot 2 data
lot2sum <- summarySE(data=lot2dat, measurevar = "value", groupvars = "Time")
########## Generates bar plot for lot 1 ##########
##################################################
plot.lot1 <- ggplot(lot1sum, aes(x=Time, y=lot1sum$value, fill=Time)) +
geom_bar(stat="identity", width=.8, color = "black") +
geom_errorbar(aes(x=Time, ymin=value-se, ymax=value+se),
width=.20)+
coord_cartesian(ylim=c(0,2.5)) +
scale_fill_manual(values = c("grey30", "grey30","grey30")) +
ylab(expression(paste("Total RNA (ng) \n per 1,000 cells"))) +
scale_y_continuous(expand = c(0,0),
limits = c(0,6),
breaks = c(0, .5, 1.0, 1.5, 2.0, 2.5),
labels = c("0.0", "0.5", "1.0", "1.5", "2.0", "2.5")) +
scale_x_discrete(labels = c("0hr", "1hr", "24hr")) +
theme(plot.margin = unit(c(1,1,1,2), "lines"),
axis.ticks.length = unit(0.25, "cm"),
axis.text.x = element_text(size=15, color = "black"),
axis.text.y = element_text(size = 15, color = "black"),
axis.title.y = element_text(size = 20),
axis.title.x = element_blank(),
panel.background = element_blank(),
axis.line.y = element_line(),
legend.position = "none",
axis.line.x = element_line())
########## Generates bar plot for lot 1 ##########
##################################################
plot.lot2 <- ggplot(lot2sum, aes(x=Time, y=lot2sum$value, fill=Time)) +
geom_bar(stat="identity", width=.8, color = "black") +
geom_errorbar(aes(x=Time, ymin=value-se, ymax=value+se),
width=.20)+
coord_cartesian(ylim=c(0,2.5)) +
scale_fill_manual(values = c("grey30","grey30","grey30")) +
ylab(expression("Total RNA (ng) \n per 1,000 cells")) +
scale_y_continuous(expand = c(0,0),
limits = c(0,6),
breaks = c(0, .5, 1.0, 1.5, 2.0, 2.5),
labels = c("0.0", "0.5", "1.0", "1.5", "2.0", "2.5")) +
scale_x_discrete(labels = c("0hr", "1hr", "24hr")) +
theme(plot.margin = unit(c(1,1,1,2), "lines"),
axis.text.x = element_text(size=15, color = "black"),
axis.text.y = element_text(size = 15, color = "black"),
axis.title.y = element_text(size = 20),
axis.title.x = element_blank(),
panel.background = element_blank(),
axis.line.y = element_line(),
legend.position = "none",
axis.line.x = element_line())
Figure_1B <- plot_grid(plot.lot1, plot.lot2, labels = c("Lot 1", "Lot 2"), label_size = 20, hjust = .01)
#May issue a warning about font, this does not affect the outcome of the plot
Figure_1B
################################################################
# saves file 'Study_48_Figure_1B.pdf' locally
ggsave(file = "Study_48_Figure_1B.pdf", width = 15, height = 6)
|
/elife-30274/sources/Study_48_Figure_1B.R
|
permissive
|
stencila/examples
|
R
| false
| false
| 4,286
|
r
|
#Replication Study 48
#Study_48_Protocol_2_Figure
#R Version 3.3.2
#Required Packages
library(httr) #version 1.2.1
library(rjson) #version 0.2.15
library(Rmisc) #version 1.5
library(ggplot2) #version 2.2.1
library(cowplot) #version 0.7.0
#source("~/credentials.R") #for private use during generation
#Downloads R script "download.OSF.file.R"
GET("https://osf.io/hkpjb/?action=download", write_disk("download.OSF.file.R", overwrite = TRUE))
source("download.OSF.file.R")
#calls the download.OSF.file
#Downloads data file 'Study_48_Protocol_2_Data.csv' from https://osf.io/72czk/
download.OSF.file(GUID="72czk",Access_Token=RPCB_private_access,
file_name="Study_48_Protocol_2_Data.csv")
#names raw data from protocol 2 from csv file
data2 <- read.csv("Study_48_Protocol_2_Data.csv", header=T, sep=",")
#creates new column calculating RNA in 100uL
data2$RNA.100uL <- data2$Average.RNA.Concentration*100
##calculates RNA per cell
data2$RNA.per.cell <- data2$RNA.100uL/data2$Total.Cells.Harvested
#calculates RNA per 1000 cells
data2$value <- data2$RNA.per.cell*1000
#classifies time as character
data2$Time <- as.character(data2$Time)
########## subsets and summarizes Data ##########
#subsets data on lot 1
lot1dat <- data2[which(data2$Lot=="1"),]
#subsets data on lot 2
lot2dat <- data2[which(data2$Lot=="2"),]
#summarizes lot 1 data
lot1sum <- summarySE(data=lot1dat, measurevar = "value", groupvars = "Time")
#summarizes lot 2 data
lot2sum <- summarySE(data=lot2dat, measurevar = "value", groupvars = "Time")
########## Generates bar plot for lot 1 ##########
##################################################
plot.lot1 <- ggplot(lot1sum, aes(x=Time, y=lot1sum$value, fill=Time)) +
geom_bar(stat="identity", width=.8, color = "black") +
geom_errorbar(aes(x=Time, ymin=value-se, ymax=value+se),
width=.20)+
coord_cartesian(ylim=c(0,2.5)) +
scale_fill_manual(values = c("grey30", "grey30","grey30")) +
ylab(expression(paste("Total RNA (ng) \n per 1,000 cells"))) +
scale_y_continuous(expand = c(0,0),
limits = c(0,6),
breaks = c(0, .5, 1.0, 1.5, 2.0, 2.5),
labels = c("0.0", "0.5", "1.0", "1.5", "2.0", "2.5")) +
scale_x_discrete(labels = c("0hr", "1hr", "24hr")) +
theme(plot.margin = unit(c(1,1,1,2), "lines"),
axis.ticks.length = unit(0.25, "cm"),
axis.text.x = element_text(size=15, color = "black"),
axis.text.y = element_text(size = 15, color = "black"),
axis.title.y = element_text(size = 20),
axis.title.x = element_blank(),
panel.background = element_blank(),
axis.line.y = element_line(),
legend.position = "none",
axis.line.x = element_line())
########## Generates bar plot for lot 1 ##########
##################################################
plot.lot2 <- ggplot(lot2sum, aes(x=Time, y=lot2sum$value, fill=Time)) +
geom_bar(stat="identity", width=.8, color = "black") +
geom_errorbar(aes(x=Time, ymin=value-se, ymax=value+se),
width=.20)+
coord_cartesian(ylim=c(0,2.5)) +
scale_fill_manual(values = c("grey30","grey30","grey30")) +
ylab(expression("Total RNA (ng) \n per 1,000 cells")) +
scale_y_continuous(expand = c(0,0),
limits = c(0,6),
breaks = c(0, .5, 1.0, 1.5, 2.0, 2.5),
labels = c("0.0", "0.5", "1.0", "1.5", "2.0", "2.5")) +
scale_x_discrete(labels = c("0hr", "1hr", "24hr")) +
theme(plot.margin = unit(c(1,1,1,2), "lines"),
axis.text.x = element_text(size=15, color = "black"),
axis.text.y = element_text(size = 15, color = "black"),
axis.title.y = element_text(size = 20),
axis.title.x = element_blank(),
panel.background = element_blank(),
axis.line.y = element_line(),
legend.position = "none",
axis.line.x = element_line())
Figure_1B <- plot_grid(plot.lot1, plot.lot2, labels = c("Lot 1", "Lot 2"), label_size = 20, hjust = .01)
#May issue a warning about font, this does not affect the outcome of the plot
Figure_1B
################################################################
# saves file 'Study_48_Figure_1B.pdf' locally
ggsave(file = "Study_48_Figure_1B.pdf", width = 15, height = 6)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{valid_year_month}
\alias{valid_year_month}
\title{Ensure that years and months are within a certain time span}
\usage{
valid_year_month(years, months, begin = "1970-01-01", end = Sys.Date())
}
\arguments{
\item{years}{a numeric vector of years}
\item{months}{a numeric vector of months}
\item{begin}{the earliest valid date, defaults to the UNIX epoch}
\item{end}{the most recent valid date, defaults to today}
}
\value{
a \code{\link{data.frame}} with four variables: \code{year},
\code{month}, \code{month_begin} (the first day of the month), and
\code{month_end} (the last day of the month).
}
\description{
Ensure that years and months are within a certain time span
}
\details{
Often, a data source will \code{begin} and \code{end} at
known points in time. At the same time, many data sources are divided
into monthly archives. Given a set of \code{years} and \code{months},
any combination of which should be considered valid, this function will
return a \code{\link{data.frame}} in which each row is one of those
valid year-month pairs. Further, if the optional \code{begin} and
\code{end} arguments are specified, the rows will be filter to lie
within that time interval. Furthermore, the first and last day of
each month are computed.
}
\examples{
valid_year_month(years = 1999:2001, months = c(1:3, 7))
# Mets in the World Series since the UNIX epoch
mets_ws <- c(1969, 1973, 1986, 2000, 2015)
valid_year_month(years = mets_ws, months = 10)
# Mets in the World Series during the Clinton administration
if (require(ggplot2)) {
clinton <- filter(presidential, name == "Clinton")
valid_year_month(years = mets_ws, months = 10,
begin = clinton$start, end = clinton$end)
}
}
|
/man/valid_year_month.Rd
|
no_license
|
edgar-cornejo/etl
|
R
| false
| true
| 1,788
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{valid_year_month}
\alias{valid_year_month}
\title{Ensure that years and months are within a certain time span}
\usage{
valid_year_month(years, months, begin = "1970-01-01", end = Sys.Date())
}
\arguments{
\item{years}{a numeric vector of years}
\item{months}{a numeric vector of months}
\item{begin}{the earliest valid date, defaults to the UNIX epoch}
\item{end}{the most recent valid date, defaults to today}
}
\value{
a \code{\link{data.frame}} with four variables: \code{year},
\code{month}, \code{month_begin} (the first day of the month), and
\code{month_end} (the last day of the month).
}
\description{
Ensure that years and months are within a certain time span
}
\details{
Often, a data source will \code{begin} and \code{end} at
known points in time. At the same time, many data sources are divided
into monthly archives. Given a set of \code{years} and \code{months},
any combination of which should be considered valid, this function will
return a \code{\link{data.frame}} in which each row is one of those
valid year-month pairs. Further, if the optional \code{begin} and
\code{end} arguments are specified, the rows will be filter to lie
within that time interval. Furthermore, the first and last day of
each month are computed.
}
\examples{
valid_year_month(years = 1999:2001, months = c(1:3, 7))
# Mets in the World Series since the UNIX epoch
mets_ws <- c(1969, 1973, 1986, 2000, 2015)
valid_year_month(years = mets_ws, months = 10)
# Mets in the World Series during the Clinton administration
if (require(ggplot2)) {
clinton <- filter(presidential, name == "Clinton")
valid_year_month(years = mets_ws, months = 10,
begin = clinton$start, end = clinton$end)
}
}
|
library(caret)
library(plyr)
require(neuralnet)
#Import data
cardata <- readRDS(file = "data/cardata.Rds")
#https://medium.com/@tyagi.sudarshini/how-to-shuffle-a-dataframe-in-r-by-rows-e7971cd7949e
# randomize data
set.seed(500) #sets seed to 500 for recreation
rows <- sample(nrow(cardata))
r_cardata <- cardata[rows, ]
#https://stackoverflow.com/questions/17200114/how-to-split-data-into-training-testing-sets-using-sample-function
#Split dataset
dt <- sort(sample(nrow(r_cardata), nrow(r_cardata)*.75))
train <- r_cardata[dt,]
test <- r_cardata[-dt,]
#https://amunategui.github.io/dummyVar-Walkthrough/
# One hot encoding
one_hot <- dummyVars("~ .", data=train)
one_hot_mat <- predict(one_hot, newdata = r_cardata)
one_hot_data <- data.frame(one_hot_mat)
one_hot <- dummyVars("~ .", data=test)
one_hot_mat <- predict(one_hot, r_cardata)
one_hot_test <- data.frame(one_hot_mat)
# One hot encoding the entire dataset for cross validation
one_hot_cross <- dummyVars("~ .", data=r_cardata)
one_hot_mat_cross <- predict(one_hot_cross, r_cardata)
one_hot_cross_var <- data.frame(one_hot_mat_cross)
# https://discuss.analyticsvidhya.com/t/how-to-count-the-missing-value-in-r/2949/9
# Find the number of missing values
missing_values <- sum(is.na(train))
# Neuralnet prediction
# Neural network fitting from: https://www.datacamp.com/community/tutorials/neural-network-models-r
nn=neuralnet("buying.high + buying.low + buying.med + buying.vhigh ~ .", data=one_hot_data, hidden=3, act.fct="logistic", linear.output = FALSE)
plot(nn)
result.mat <- nn$result.matrix
# Training data
result <- compute(nn, one_hot_data)
ex_res <- result$net.result
# Calculate the accuracy
act_dat <- max.col(one_hot_data[,1:4])
res_ex_2 <- max.col(ex_res)
mean(res_ex_2 == act_dat)
# Test data
test_result <- compute(nn, one_hot_test)
test_net_result <- result$net.result
# Calculate the accuracy
act_test_dat <- max.col(one_hot_test[,1:4])
res_test_dat <- max.col(test_net_result)
mean(act_test_dat == res_test_dat)
# Cross validation
# Code taken from:
# https://www.r-bloggers.com/multilabel-classification-with-neuralnet-package/
k <- 10
outs <- NULL
proportion <- 0.95
for (i in 1:k) {
index <- sample(1:nrow(one_hot_cross_var), round(proportion*nrow(one_hot_cross_var)))
train_cv <- one_hot_cross_var[index, ]
test_cv <- one_hot_cross_var[-index, ]
nn=neuralnet("buying.high + buying.low + buying.med + buying.vhigh ~ .", data=one_hot_cross_var, hidden=3, act.fct="logistic", linear.output = FALSE)
# Compute predictions
pr.nn <- compute(nn, test_cv)
# Extract results
pr.nn_ <- pr.nn$net.result
# Accuracy (test set)
original_values <- max.col(test_cv[, 1:4])
pr.nn_2 <- max.col(pr.nn_)
outs[i] <- mean(pr.nn_2 == original_values)
}
mean(outs)
|
/project1/R/onehot.r
|
no_license
|
skarning/machine-learning-projects
|
R
| false
| false
| 2,807
|
r
|
library(caret)
library(plyr)
require(neuralnet)
#Import data
cardata <- readRDS(file = "data/cardata.Rds")
#https://medium.com/@tyagi.sudarshini/how-to-shuffle-a-dataframe-in-r-by-rows-e7971cd7949e
# randomize data
set.seed(500) #sets seed to 500 for recreation
rows <- sample(nrow(cardata))
r_cardata <- cardata[rows, ]
#https://stackoverflow.com/questions/17200114/how-to-split-data-into-training-testing-sets-using-sample-function
#Split dataset
dt <- sort(sample(nrow(r_cardata), nrow(r_cardata)*.75))
train <- r_cardata[dt,]
test <- r_cardata[-dt,]
#https://amunategui.github.io/dummyVar-Walkthrough/
# One hot encoding
one_hot <- dummyVars("~ .", data=train)
one_hot_mat <- predict(one_hot, newdata = r_cardata)
one_hot_data <- data.frame(one_hot_mat)
one_hot <- dummyVars("~ .", data=test)
one_hot_mat <- predict(one_hot, r_cardata)
one_hot_test <- data.frame(one_hot_mat)
# One hot encoding the entire dataset for cross validation
one_hot_cross <- dummyVars("~ .", data=r_cardata)
one_hot_mat_cross <- predict(one_hot_cross, r_cardata)
one_hot_cross_var <- data.frame(one_hot_mat_cross)
# https://discuss.analyticsvidhya.com/t/how-to-count-the-missing-value-in-r/2949/9
# Find the number of missing values
missing_values <- sum(is.na(train))
# Neuralnet prediction
# Neural network fitting from: https://www.datacamp.com/community/tutorials/neural-network-models-r
nn=neuralnet("buying.high + buying.low + buying.med + buying.vhigh ~ .", data=one_hot_data, hidden=3, act.fct="logistic", linear.output = FALSE)
plot(nn)
result.mat <- nn$result.matrix
# Training data
result <- compute(nn, one_hot_data)
ex_res <- result$net.result
# Calculate the accuracy
act_dat <- max.col(one_hot_data[,1:4])
res_ex_2 <- max.col(ex_res)
mean(res_ex_2 == act_dat)
# Test data
test_result <- compute(nn, one_hot_test)
test_net_result <- result$net.result
# Calculate the accuracy
act_test_dat <- max.col(one_hot_test[,1:4])
res_test_dat <- max.col(test_net_result)
mean(act_test_dat == res_test_dat)
# Cross validation
# Code taken from:
# https://www.r-bloggers.com/multilabel-classification-with-neuralnet-package/
k <- 10
outs <- NULL
proportion <- 0.95
for (i in 1:k) {
index <- sample(1:nrow(one_hot_cross_var), round(proportion*nrow(one_hot_cross_var)))
train_cv <- one_hot_cross_var[index, ]
test_cv <- one_hot_cross_var[-index, ]
nn=neuralnet("buying.high + buying.low + buying.med + buying.vhigh ~ .", data=one_hot_cross_var, hidden=3, act.fct="logistic", linear.output = FALSE)
# Compute predictions
pr.nn <- compute(nn, test_cv)
# Extract results
pr.nn_ <- pr.nn$net.result
# Accuracy (test set)
original_values <- max.col(test_cv[, 1:4])
pr.nn_2 <- max.col(pr.nn_)
outs[i] <- mean(pr.nn_2 == original_values)
}
mean(outs)
|
require(devtools)
require(pipeR)
load_all()
## -------------------------------------------------------------------------- ##
## SUMMARY OF CRISIS EVENTS
## -------------------------------------------------------------------------- ##
crisis <- loadCrisisDB()
out <-
tabulateCrises(
crisisDT = crisis,
crisisTypes = c(
'Stock Market Crash',
'Currency Crisis',
'Inflation Crisis',
'Foreign Sov Debt',
'Domestic Sov Debt',
'Banking Crisis'
),
min.time = 1960,
idCol = "ISO3",
timeCol = "Year",
outfile = './inst/RESULTS/crisisdb.tex'
)
## -------------------------------------------------------------------------- ##
## DATA AVAILABILITY ##
## -------------------------------------------------------------------------- ##
bench <- getSovBenchmarks()
lookup.table <- rbindlist(
list(
data.table(name = 'ratingnum',
label = 'S&P LT Foreign Issuer Sovereign Rating'),
data.table(name = 'cds',
label = '5-Year Sovereign CDS Spread (Source: Bloomberg)'),
data.table(name = 'spread',
label = 'Treasury Bond Spread above U.S. Treasury Yield (in b.p)')
)
)
r <-
tabulateDataAvailability(dt = bench,
outfile = "./inst/RESULTS/availability.tex",
lookup.table = lookup.table,
selCols = c("label", "Availability"))
## -------------------------------------------------------------------------- ##
## STUDY CORRELATIIONS ##
## -------------------------------------------------------------------------- ##
alt <- getAltmanZscore()
bs <- getAggregatedBankscopePDs()
dtList <-
list(alt,bs)
dt <-
augmentBenchmarkDataset(crisisdb = alternativeCrisisDB(),
dtList = dtList)
LaTeXTableGems:::createLatexTableHeader(
outfile = './inst/RESULTS/tabulateCorrelations-head.tex')
tabulateCorrelationsByTime(
data = dt,
xvar = c('zscorepd75','SC_CLOSURE_ALL.Q3.'),
xvarConvert = '`*`(1)',
benchVars = c('ratingnum','spread','cds'),
benchConvert = '`*`(1)',
method = 'pearson',
outfile = './inst/RESULTS/tabulateCorrelations.tex'
)
tabulateCorrelationsByTime(
data = dt,
xvar = c('zscorepd75','SC_CLOSURE_ALL.Q3.'),
xvarConvert = 'shift(lag = -1, dif = TRUE)',
benchVars = c('ratingnum','spread','cds'),
benchConvert = 'shift(lag = -1, dif = TRUE)',
method = 'pearson',
outfile = './inst/RESULTS/tabulateCorrelations-dif.tex'
)
tabulateCorrelationsByTime(
data = dt,
xvar = c('zscorepd75','SC_CLOSURE_ALL.Q3.'),
xvarConvert = '`*`(1)',
benchVars = c('ratingnum','spread','cds'),
benchConvert = '`*`(1)',
method = 'spearman',
outfile = './inst/RESULTS/tabulateCorrelations-spearman.tex'
)
tabulateCorrelationsByTime(
data = dt,
xvar = c('zscorepd75','SC_CLOSURE_ALL.Q3.'),
xvarConvert = 'shift(lag = -1, dif = TRUE)',
benchVars = c('ratingnum','spread','cds'),
benchConvert = 'shift(lag = -1, dif = TRUE)',
method = 'spearman',
outfile = './inst/RESULTS/tabulateCorrelations-spearman-dif.tex'
)
## -------------------------------------------------------------------------- ##
## RELATION BETWEEN TAX REVENUES AND SOVEREIGN DEFAULT ##
## -------------------------------------------------------------------------- ##
## dt %>>%
## (df ~ procExpand(df,
## by = 'iso3',
## convert =
## list('ratingnum ~ shift(lag=-1,dif=TRUE)'))) %>>%
## (? df ~ table(df[[2]])) %>>%
## (df ~ df[[2]]) %>>%
## hist(40)
Pipe(mtcars)$
data.table(.)$
.(? df ~ summary(df))$
.(? ncol(.) -> n)$
.(? length(.[,carb]))$
.(~ .[,carb] %>>% hist(10))$
invisible()
|
/scripts/exec.tables.R
|
no_license
|
Karagul/SovereignCrisis
|
R
| false
| false
| 3,993
|
r
|
require(devtools)
require(pipeR)
load_all()
## -------------------------------------------------------------------------- ##
## SUMMARY OF CRISIS EVENTS
## -------------------------------------------------------------------------- ##
crisis <- loadCrisisDB()
out <-
tabulateCrises(
crisisDT = crisis,
crisisTypes = c(
'Stock Market Crash',
'Currency Crisis',
'Inflation Crisis',
'Foreign Sov Debt',
'Domestic Sov Debt',
'Banking Crisis'
),
min.time = 1960,
idCol = "ISO3",
timeCol = "Year",
outfile = './inst/RESULTS/crisisdb.tex'
)
## -------------------------------------------------------------------------- ##
## DATA AVAILABILITY ##
## -------------------------------------------------------------------------- ##
bench <- getSovBenchmarks()
lookup.table <- rbindlist(
list(
data.table(name = 'ratingnum',
label = 'S&P LT Foreign Issuer Sovereign Rating'),
data.table(name = 'cds',
label = '5-Year Sovereign CDS Spread (Source: Bloomberg)'),
data.table(name = 'spread',
label = 'Treasury Bond Spread above U.S. Treasury Yield (in b.p)')
)
)
r <-
tabulateDataAvailability(dt = bench,
outfile = "./inst/RESULTS/availability.tex",
lookup.table = lookup.table,
selCols = c("label", "Availability"))
## -------------------------------------------------------------------------- ##
## STUDY CORRELATIIONS ##
## -------------------------------------------------------------------------- ##
alt <- getAltmanZscore()
bs <- getAggregatedBankscopePDs()
dtList <-
list(alt,bs)
dt <-
augmentBenchmarkDataset(crisisdb = alternativeCrisisDB(),
dtList = dtList)
LaTeXTableGems:::createLatexTableHeader(
outfile = './inst/RESULTS/tabulateCorrelations-head.tex')
tabulateCorrelationsByTime(
data = dt,
xvar = c('zscorepd75','SC_CLOSURE_ALL.Q3.'),
xvarConvert = '`*`(1)',
benchVars = c('ratingnum','spread','cds'),
benchConvert = '`*`(1)',
method = 'pearson',
outfile = './inst/RESULTS/tabulateCorrelations.tex'
)
tabulateCorrelationsByTime(
data = dt,
xvar = c('zscorepd75','SC_CLOSURE_ALL.Q3.'),
xvarConvert = 'shift(lag = -1, dif = TRUE)',
benchVars = c('ratingnum','spread','cds'),
benchConvert = 'shift(lag = -1, dif = TRUE)',
method = 'pearson',
outfile = './inst/RESULTS/tabulateCorrelations-dif.tex'
)
tabulateCorrelationsByTime(
data = dt,
xvar = c('zscorepd75','SC_CLOSURE_ALL.Q3.'),
xvarConvert = '`*`(1)',
benchVars = c('ratingnum','spread','cds'),
benchConvert = '`*`(1)',
method = 'spearman',
outfile = './inst/RESULTS/tabulateCorrelations-spearman.tex'
)
tabulateCorrelationsByTime(
data = dt,
xvar = c('zscorepd75','SC_CLOSURE_ALL.Q3.'),
xvarConvert = 'shift(lag = -1, dif = TRUE)',
benchVars = c('ratingnum','spread','cds'),
benchConvert = 'shift(lag = -1, dif = TRUE)',
method = 'spearman',
outfile = './inst/RESULTS/tabulateCorrelations-spearman-dif.tex'
)
## -------------------------------------------------------------------------- ##
## RELATION BETWEEN TAX REVENUES AND SOVEREIGN DEFAULT ##
## -------------------------------------------------------------------------- ##
## dt %>>%
## (df ~ procExpand(df,
## by = 'iso3',
## convert =
## list('ratingnum ~ shift(lag=-1,dif=TRUE)'))) %>>%
## (? df ~ table(df[[2]])) %>>%
## (df ~ df[[2]]) %>>%
## hist(40)
Pipe(mtcars)$
data.table(.)$
.(? df ~ summary(df))$
.(? ncol(.) -> n)$
.(? length(.[,carb]))$
.(~ .[,carb] %>>% hist(10))$
invisible()
|
# Building a very simple classifier, based solely on historical proportions.
library(data.table)
games <- read.csv("wog_games.csv", stringsAsFactors = FALSE)
games <- as.data.table(games)
# Make a table of likelihoods, not conditional on anything.
HeroLikelihood <- games[, .N, by = hero]
HeroLikelihood[, proportion := N / sum(N)]
# Obviously the sum of proportion should be 1...
sum(HeroLikelihood$proportion) # ... and it is 1.
# Set the key of HeroLikelihood and games so that we can easily pull the
# proportion variable from one to the other.
setkey(HeroLikelihood, "hero")
setkey(games, "hero")
# Each proportion is our guess for the likelihood of seeing that proportion in
# the data.
games[HeroLikelihood, HeroGuess := proportion]
# The sume of the natural log of all of our guesses is a measure of how well
# this estimator has done. If we can't beat this score, we've accomplished
# very little.
games[, sum(log(HeroGuess))]
|
/Hearthstone Analysis 1.R
|
no_license
|
MichaelGarrison89/Hearthstone-Project
|
R
| false
| false
| 949
|
r
|
# Building a very simple classifier, based solely on historical proportions.
library(data.table)
games <- read.csv("wog_games.csv", stringsAsFactors = FALSE)
games <- as.data.table(games)
# Make a table of likelihoods, not conditional on anything.
HeroLikelihood <- games[, .N, by = hero]
HeroLikelihood[, proportion := N / sum(N)]
# Obviously the sum of proportion should be 1...
sum(HeroLikelihood$proportion) # ... and it is 1.
# Set the key of HeroLikelihood and games so that we can easily pull the
# proportion variable from one to the other.
setkey(HeroLikelihood, "hero")
setkey(games, "hero")
# Each proportion is our guess for the likelihood of seeing that proportion in
# the data.
games[HeroLikelihood, HeroGuess := proportion]
# The sume of the natural log of all of our guesses is a measure of how well
# this estimator has done. If we can't beat this score, we've accomplished
# very little.
games[, sum(log(HeroGuess))]
|
##reading files
testx<- read.table("C:/Users/Yulia/Desktop/fernstudium/datascientist/R/assigmnets/UCI HAR Dataset/test/X_test.txt")
testy<- read.table("C:/Users/Yulia/Desktop/fernstudium/datascientist/R/assigmnets/UCI HAR Dataset/test/y_test.txt")
subject_test<-read.table("C:/Users/Yulia/Desktop/fernstudium/datascientist/R/assigmnets/UCI HAR Dataset/test/subject_test.txt")
trainx<- read.table("C:/Users/Yulia/Desktop/fernstudium/datascientist/R/assigmnets/UCI HAR Dataset/train/X_train.txt")
trainy<- read.table("C:/Users/Yulia/Desktop/fernstudium/datascientist/R/assigmnets/UCI HAR Dataset/train/y_train.txt")
subject_train<-read.table("C:/Users/Yulia/Desktop/fernstudium/datascientist/R/assigmnets/UCI HAR Dataset/train/subject_train.txt")
##rename variable because they are all have the same name->problem by merging
library("dplyr")
subject_test <-rename(subject_test, ID= V1)
testy<-rename(testy, activities= V1)
trainy<-rename(trainy, activities = V1)
subject_train <-rename(subject_train, ID= V1)
#merging test data sets together
dataTest<- cbind(subject_test,testy,testx)
##merging train data sets together
dataTrain<- cbind(subject_train,trainy,trainx)
##merging two datasets togheter
data<-rbind(dataTest, dataTrain)
View(data)
##subsetting only meaNS AND ST for each activity
data_new<-select(data, ID, activities,V1 :V6, V41 :V46, V81 :V86,V121: V126, V161:V166, V201,V202, V214, V215, V227,V228, V240, V241, V253, V254, V266:V271, V345:V350, V424:V429,V503:V504,V516,V517,V529,V530,V542,V543)
##renaming variables
data_new<-rename(data_new, tBodyAccmeanX = V1,tBodyAccmeanY = V2,tBodyAccmeanZ = V3, tBodyAccstdX=V4, tBodyAccstdY= V5,tBodyAccstdZ= V6)
data_new<-rename(data_new,tGravityAccmeanX = V41,tGravityAccmeanY = V42, tGravityAccmeanZ = V43, tGravityAccstdX=V44, tGravityAccstdY= V45,tGravityAccstdZ= V46)
data_new<-rename(data_new, tBodyAccJerkmeanX = V81,tBodyAccJerkmeanY = V82,tBodyAccJerkmeanZ = V83, tBodyAccJerkstdX=V84, tBodyAccJerkstdY= V85,tBodyAccJerkstdZ= V86)
data_new<-rename(data_new, tBodyGyromeanX = V121,tBodyGyromeanY = V122,tBodyGyromeanZ = V123, tBodyGyrostdX=V124, tBodyGyrostdY= V125,tBodyGyrostdZ= V126)
data_new<-rename(data_new, tBodyGyroJerkmeanX = V161,tBodyGyroJerkmeanY = V162,tBodyGyroJerkmeanZ = V163, tBodyGyroJerkstdX=V164, tBodyGyroJerkstdY= V165,tBodyGyroJerkstdZ= V166)
data_new<-rename(data_new, tBodyAccMagmean = V201,tBodyAccMagstd = V202, tGravityAccMagmean = V214, tGravityAccMagstd=V215, tBodyAccJerkMagmean= V227, tBodyAccJerkMagstd= V228)
data_new<-rename(data_new, tBodyGyroMagmean = V240,tBodyGyroMagstd = V241,tBodyGyroJerkMagmean = V253, tBodyGyroJerkMagstd=V254, fBodyAccmeanX= V266,fBodyAccmeanY= V267, fBodyAccmeanZ=V268)
data_new<-rename(data_new, fBodyAccstdX = V269, fBodyAccstdY = V270,fBodyAccstdZ = V271, fBodyAccJerkmeanX=V345, fBodyAccJerkmeanY= V346,fBodyAccJerkmeanZ= V347)
data_new<-rename(data_new, fBodyAccJerkstdX = V348,fBodyAccJerkstdY = V349,fBodyAccJerkstdZ = V350, fBodyGyromeanX=V424, fBodyGyromeanY= V425,fBodyGyromeanZ= V426,fBodyGyrostdX= V427,fBodyGyrostdY= V428)
data_new<-rename(data_new, fBodyGyrostdZ = V429,fBodyAccMagmean = V503,fBodyAccMagstd = V504,fBodyBodyAccJerkMagmean=V516, fBodyBodyAccJerkMagstd= V517,fBodyBodyGyroMagmean= V529,fBodyBodyGyroMagstd= V530,fBodyBodyGyroJerkMagmean=V542, fBodyBodyGyroJerkMagstd=V543)
##recoding the activities
install.packages("plyr")
library("plyr")
data_new$activities <- mapvalues(data$activities, from= c(1, 2, 3, 4, 5, 6 ), to=c("WALKING","WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS","SITTING", "STANDING","LAYING" ))
##creatin a tidy data set
install-package("reshape2")
library("reshape2")
molten_data<-melt(data_new, id=c("ID", "activities"))
tidy_data<-dcast(molten_data, ID+activities~variable, mean)
write.table(tidy_data,file="tidy_data.txt", row.names=FALSE)
|
/run_analysis.R
|
no_license
|
julira/Getting-and-cleaning-data-Assigment-3
|
R
| false
| false
| 3,881
|
r
|
##reading files
testx<- read.table("C:/Users/Yulia/Desktop/fernstudium/datascientist/R/assigmnets/UCI HAR Dataset/test/X_test.txt")
testy<- read.table("C:/Users/Yulia/Desktop/fernstudium/datascientist/R/assigmnets/UCI HAR Dataset/test/y_test.txt")
subject_test<-read.table("C:/Users/Yulia/Desktop/fernstudium/datascientist/R/assigmnets/UCI HAR Dataset/test/subject_test.txt")
trainx<- read.table("C:/Users/Yulia/Desktop/fernstudium/datascientist/R/assigmnets/UCI HAR Dataset/train/X_train.txt")
trainy<- read.table("C:/Users/Yulia/Desktop/fernstudium/datascientist/R/assigmnets/UCI HAR Dataset/train/y_train.txt")
subject_train<-read.table("C:/Users/Yulia/Desktop/fernstudium/datascientist/R/assigmnets/UCI HAR Dataset/train/subject_train.txt")
##rename variable because they are all have the same name->problem by merging
library("dplyr")
subject_test <-rename(subject_test, ID= V1)
testy<-rename(testy, activities= V1)
trainy<-rename(trainy, activities = V1)
subject_train <-rename(subject_train, ID= V1)
#merging test data sets together
dataTest<- cbind(subject_test,testy,testx)
##merging train data sets together
dataTrain<- cbind(subject_train,trainy,trainx)
##merging two datasets togheter
data<-rbind(dataTest, dataTrain)
View(data)
##subsetting only meaNS AND ST for each activity
data_new<-select(data, ID, activities,V1 :V6, V41 :V46, V81 :V86,V121: V126, V161:V166, V201,V202, V214, V215, V227,V228, V240, V241, V253, V254, V266:V271, V345:V350, V424:V429,V503:V504,V516,V517,V529,V530,V542,V543)
##renaming variables
data_new<-rename(data_new, tBodyAccmeanX = V1,tBodyAccmeanY = V2,tBodyAccmeanZ = V3, tBodyAccstdX=V4, tBodyAccstdY= V5,tBodyAccstdZ= V6)
data_new<-rename(data_new,tGravityAccmeanX = V41,tGravityAccmeanY = V42, tGravityAccmeanZ = V43, tGravityAccstdX=V44, tGravityAccstdY= V45,tGravityAccstdZ= V46)
data_new<-rename(data_new, tBodyAccJerkmeanX = V81,tBodyAccJerkmeanY = V82,tBodyAccJerkmeanZ = V83, tBodyAccJerkstdX=V84, tBodyAccJerkstdY= V85,tBodyAccJerkstdZ= V86)
data_new<-rename(data_new, tBodyGyromeanX = V121,tBodyGyromeanY = V122,tBodyGyromeanZ = V123, tBodyGyrostdX=V124, tBodyGyrostdY= V125,tBodyGyrostdZ= V126)
data_new<-rename(data_new, tBodyGyroJerkmeanX = V161,tBodyGyroJerkmeanY = V162,tBodyGyroJerkmeanZ = V163, tBodyGyroJerkstdX=V164, tBodyGyroJerkstdY= V165,tBodyGyroJerkstdZ= V166)
data_new<-rename(data_new, tBodyAccMagmean = V201,tBodyAccMagstd = V202, tGravityAccMagmean = V214, tGravityAccMagstd=V215, tBodyAccJerkMagmean= V227, tBodyAccJerkMagstd= V228)
data_new<-rename(data_new, tBodyGyroMagmean = V240,tBodyGyroMagstd = V241,tBodyGyroJerkMagmean = V253, tBodyGyroJerkMagstd=V254, fBodyAccmeanX= V266,fBodyAccmeanY= V267, fBodyAccmeanZ=V268)
data_new<-rename(data_new, fBodyAccstdX = V269, fBodyAccstdY = V270,fBodyAccstdZ = V271, fBodyAccJerkmeanX=V345, fBodyAccJerkmeanY= V346,fBodyAccJerkmeanZ= V347)
data_new<-rename(data_new, fBodyAccJerkstdX = V348,fBodyAccJerkstdY = V349,fBodyAccJerkstdZ = V350, fBodyGyromeanX=V424, fBodyGyromeanY= V425,fBodyGyromeanZ= V426,fBodyGyrostdX= V427,fBodyGyrostdY= V428)
data_new<-rename(data_new, fBodyGyrostdZ = V429,fBodyAccMagmean = V503,fBodyAccMagstd = V504,fBodyBodyAccJerkMagmean=V516, fBodyBodyAccJerkMagstd= V517,fBodyBodyGyroMagmean= V529,fBodyBodyGyroMagstd= V530,fBodyBodyGyroJerkMagmean=V542, fBodyBodyGyroJerkMagstd=V543)
##recoding the activities
install.packages("plyr")
library("plyr")
data_new$activities <- mapvalues(data$activities, from= c(1, 2, 3, 4, 5, 6 ), to=c("WALKING","WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS","SITTING", "STANDING","LAYING" ))
##creatin a tidy data set
install-package("reshape2")
library("reshape2")
molten_data<-melt(data_new, id=c("ID", "activities"))
tidy_data<-dcast(molten_data, ID+activities~variable, mean)
write.table(tidy_data,file="tidy_data.txt", row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/primitive.R
\name{prim}
\alias{prim}
\title{Primitive object}
\usage{
prim(nm)
}
\arguments{
\item{nm}{The name of an exported primitive as a string or symbol.}
}
\description{
This just grabs a primitive object (type BUILTINSXP or SPECIALSXP)
from the base environment.
}
|
/man/prim.Rd
|
no_license
|
lionelhenry/robin
|
R
| false
| true
| 351
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/primitive.R
\name{prim}
\alias{prim}
\title{Primitive object}
\usage{
prim(nm)
}
\arguments{
\item{nm}{The name of an exported primitive as a string or symbol.}
}
\description{
This just grabs a primitive object (type BUILTINSXP or SPECIALSXP)
from the base environment.
}
|
clinical_outcomes_survival_server <- function(id, cohort_obj) {
shiny::moduleServer(
id,
function(input, output, session) {
ns <- session$ns
output$time_feature_selection_ui <- shiny::renderUI({
shiny::selectInput(
inputId = ns("time_feature_choice"),
label = "Select or Search for Survival Endpoint",
choices = build_co_survival_list(
cohort_obj()$feature_tbl
)
)
})
status_feature_choice <- shiny::reactive({
shiny::req(input$time_feature_choice)
get_co_status_feature(input$time_feature_choice)
})
survival_value_tbl <- shiny::reactive({
shiny::req(input$time_feature_choice, status_feature_choice())
build_co_survival_value_tbl(
cohort_obj(),
input$time_feature_choice,
status_feature_choice()
)
})
output$survival_plot <- shiny::renderPlot({
shiny::req(survival_value_tbl(), input$risktable)
shiny::validate(shiny::need(
nrow(survival_value_tbl()) > 0,
paste0(
"Samples with selected variable don't have selected ",
"survival features."
)
))
num_groups <- length(unique(survival_value_tbl()$group))
shiny::validate(shiny::need(
num_groups <= 10,
paste0(
"Too many sample groups (", num_groups, ") ",
"for KM plot; choose a continuous variable or select ",
"different sample groups."
)
))
fit <- survival::survfit(
survival::Surv(time, status) ~ group,
data = survival_value_tbl()
)
create_kmplot(
fit = fit,
df = survival_value_tbl(),
confint = input$confint,
risktable = input$risktable,
title = cohort_obj()$group_display,
group_colors = unname(cohort_obj()$plot_colors)
)
})
output$download_tbl <- shiny::downloadHandler(
filename = function() stringr::str_c("data-", Sys.Date(), ".csv"),
content = function(con) readr::write_csv(survival_value_tbl(), con)
)
}
)
}
|
/R/clinical_outcomes_survival_server.R
|
permissive
|
CRI-iAtlas/iatlas-app
|
R
| false
| false
| 2,201
|
r
|
clinical_outcomes_survival_server <- function(id, cohort_obj) {
shiny::moduleServer(
id,
function(input, output, session) {
ns <- session$ns
output$time_feature_selection_ui <- shiny::renderUI({
shiny::selectInput(
inputId = ns("time_feature_choice"),
label = "Select or Search for Survival Endpoint",
choices = build_co_survival_list(
cohort_obj()$feature_tbl
)
)
})
status_feature_choice <- shiny::reactive({
shiny::req(input$time_feature_choice)
get_co_status_feature(input$time_feature_choice)
})
survival_value_tbl <- shiny::reactive({
shiny::req(input$time_feature_choice, status_feature_choice())
build_co_survival_value_tbl(
cohort_obj(),
input$time_feature_choice,
status_feature_choice()
)
})
output$survival_plot <- shiny::renderPlot({
shiny::req(survival_value_tbl(), input$risktable)
shiny::validate(shiny::need(
nrow(survival_value_tbl()) > 0,
paste0(
"Samples with selected variable don't have selected ",
"survival features."
)
))
num_groups <- length(unique(survival_value_tbl()$group))
shiny::validate(shiny::need(
num_groups <= 10,
paste0(
"Too many sample groups (", num_groups, ") ",
"for KM plot; choose a continuous variable or select ",
"different sample groups."
)
))
fit <- survival::survfit(
survival::Surv(time, status) ~ group,
data = survival_value_tbl()
)
create_kmplot(
fit = fit,
df = survival_value_tbl(),
confint = input$confint,
risktable = input$risktable,
title = cohort_obj()$group_display,
group_colors = unname(cohort_obj()$plot_colors)
)
})
output$download_tbl <- shiny::downloadHandler(
filename = function() stringr::str_c("data-", Sys.Date(), ".csv"),
content = function(con) readr::write_csv(survival_value_tbl(), con)
)
}
)
}
|
suppressMessages(suppressWarnings(
expr=source('setup.R', echo = F)
))
suppressMessages(suppressWarnings(
expr=source('missingValues.R', echo = F)
))
ssq_diff <- t3.diff.ssq.no.na %>% select(record_id,redcap_event_name,starts_with("ssq_"))
colnames(ssq_diff) <- c(colnames(ssq_diff[,1:2]), paste0(colnames(ssq_diff[,3:17]), "_diff"))
df_everything <- t3.data.15d.no.na %>% select(record_id, redcap_event_name, starts_with("fifteen_d_")) %>%
merge(
t3.data.ssq.no.na %>% select(record_id,redcap_event_name,starts_with("ssq_"), IsT1DrawerUser)
, by=c("record_id", "redcap_event_name")) %>% merge(
t3.data.thi.no.na %>% select(record_id,redcap_event_name, tinnitus, starts_with("tinnitus_"),-ends_with("complete"))
, by=c("record_id", "redcap_event_name")) %>% merge(
df_ioi.no.na %>% select(record_id, starts_with("ioi_ha_"))
, by=c("record_id")) %>% merge(
ssq_diff %>% select(record_id, starts_with("ssq_"))
, by=c("record_id")) %>% merge(
df_ha_use.no.na %>% select(record_id, own_ha, ha_number_of_ha, ha_use_time, ha_usetime_hours_per_day, sex, age)
, by=c("record_id")) %>% merge(
df_audiogram %>% select(record_id, Class)
, by=c("record_id"))
summary(df_everything)
d1 <- df_everything %>%
select(everything(), -starts_with("tinnitus"), -ioi_ha_1, -redcap_event_name, -record_id, -ha_usetime_hours_per_day, -IsT1DrawerUser) %>%
mutate_if(is.factor, as.numeric)
pca.fit <- prcomp(d1, scale. = T)
d2 <- data.frame(pca.fit$rotation)
d2$col <- rownames(d2)
ggplot(d2[,c(paste0("PC",1:6),"col")] %>% melt(id.vars="col"), aes(x=col, y=value)) + geom_col()+ facet_wrap(~variable)+theme(axis.text.x = element_text(angle=90))
load <- data.frame(pca.fit$rotation)[,1:6]
load$name <- rownames(load)
data <- load %>% melt(id.vars="name") %>% mutate_if(is.numeric, function(x)ifelse(abs(x)<0.15, 0, x))
ggplot(data) + geom_col(aes(y=value, fill=variable, x=name), color=rgb(0,0,0,0.5))+
theme_light() + theme(axis.text.x = element_text(angle=90, vjust = 0.5)) + labs(fill="Component", x="Variable", y="Loading") + ggtitle("Loadings of Principal Component Analysis") + scale_fill_brewer(palette = "RdYlBu")
|
/MSc/everythingPca.R
|
no_license
|
gerardloquet/BEAR_publications
|
R
| false
| false
| 2,226
|
r
|
suppressMessages(suppressWarnings(
expr=source('setup.R', echo = F)
))
suppressMessages(suppressWarnings(
expr=source('missingValues.R', echo = F)
))
ssq_diff <- t3.diff.ssq.no.na %>% select(record_id,redcap_event_name,starts_with("ssq_"))
colnames(ssq_diff) <- c(colnames(ssq_diff[,1:2]), paste0(colnames(ssq_diff[,3:17]), "_diff"))
df_everything <- t3.data.15d.no.na %>% select(record_id, redcap_event_name, starts_with("fifteen_d_")) %>%
merge(
t3.data.ssq.no.na %>% select(record_id,redcap_event_name,starts_with("ssq_"), IsT1DrawerUser)
, by=c("record_id", "redcap_event_name")) %>% merge(
t3.data.thi.no.na %>% select(record_id,redcap_event_name, tinnitus, starts_with("tinnitus_"),-ends_with("complete"))
, by=c("record_id", "redcap_event_name")) %>% merge(
df_ioi.no.na %>% select(record_id, starts_with("ioi_ha_"))
, by=c("record_id")) %>% merge(
ssq_diff %>% select(record_id, starts_with("ssq_"))
, by=c("record_id")) %>% merge(
df_ha_use.no.na %>% select(record_id, own_ha, ha_number_of_ha, ha_use_time, ha_usetime_hours_per_day, sex, age)
, by=c("record_id")) %>% merge(
df_audiogram %>% select(record_id, Class)
, by=c("record_id"))
summary(df_everything)
d1 <- df_everything %>%
select(everything(), -starts_with("tinnitus"), -ioi_ha_1, -redcap_event_name, -record_id, -ha_usetime_hours_per_day, -IsT1DrawerUser) %>%
mutate_if(is.factor, as.numeric)
pca.fit <- prcomp(d1, scale. = T)
d2 <- data.frame(pca.fit$rotation)
d2$col <- rownames(d2)
ggplot(d2[,c(paste0("PC",1:6),"col")] %>% melt(id.vars="col"), aes(x=col, y=value)) + geom_col()+ facet_wrap(~variable)+theme(axis.text.x = element_text(angle=90))
load <- data.frame(pca.fit$rotation)[,1:6]
load$name <- rownames(load)
data <- load %>% melt(id.vars="name") %>% mutate_if(is.numeric, function(x)ifelse(abs(x)<0.15, 0, x))
ggplot(data) + geom_col(aes(y=value, fill=variable, x=name), color=rgb(0,0,0,0.5))+
theme_light() + theme(axis.text.x = element_text(angle=90, vjust = 0.5)) + labs(fill="Component", x="Variable", y="Loading") + ggtitle("Loadings of Principal Component Analysis") + scale_fill_brewer(palette = "RdYlBu")
|
# 10/29/2016. Author: Manu Garcia-Quismondo
#if (interactive()) {
# shinyApp(ui, server)
#}
library(shiny)
library(shinyjs)
source("AppRoutes.R")
setwd(source.route)
# source("AppRoutes.R")
# source("createHistogramTable.R")
setwd(source.route)
source("loadTables.R")
setwd(source.route)
source("checkOrProcessTable.R")
setwd(source.route)
source("ui.R")
source("server.R")
setwd(source.route)
app=shinyApp(ui=ui, server=server)
runApp(app, launch.browser = TRUE)
setwd(source.route)
|
/src/app.R
|
no_license
|
manugarciaquismondo/biomarkerviewer
|
R
| false
| false
| 487
|
r
|
# 10/29/2016. Author: Manu Garcia-Quismondo
#if (interactive()) {
# shinyApp(ui, server)
#}
library(shiny)
library(shinyjs)
source("AppRoutes.R")
setwd(source.route)
# source("AppRoutes.R")
# source("createHistogramTable.R")
setwd(source.route)
source("loadTables.R")
setwd(source.route)
source("checkOrProcessTable.R")
setwd(source.route)
source("ui.R")
source("server.R")
setwd(source.route)
app=shinyApp(ui=ui, server=server)
runApp(app, launch.browser = TRUE)
setwd(source.route)
|
set.seed(123)
data(mtcars)
model <-
stats::lm(
formula = wt ~ am * cyl * vs,
data = mtcars
)
test_that("model_parameters-rank_deficiency", {
expect_message(model_parameters(model))
params <- suppressMessages(model_parameters(model))
expect_equal(params$Parameter, c("(Intercept)", "am", "cyl", "vs", "am:cyl", "am:vs"), tolerance = 1e-3)
expect_equal(params$Coefficient, c(2.28908, -1.37908, 0.22688, -0.26158, 0.08062, 0.14987), tolerance = 1e-3)
})
|
/tests/testthat/test-rank_deficienty.R
|
no_license
|
cran/parameters
|
R
| false
| false
| 486
|
r
|
set.seed(123)
data(mtcars)
model <-
stats::lm(
formula = wt ~ am * cyl * vs,
data = mtcars
)
test_that("model_parameters-rank_deficiency", {
expect_message(model_parameters(model))
params <- suppressMessages(model_parameters(model))
expect_equal(params$Parameter, c("(Intercept)", "am", "cyl", "vs", "am:cyl", "am:vs"), tolerance = 1e-3)
expect_equal(params$Coefficient, c(2.28908, -1.37908, 0.22688, -0.26158, 0.08062, 0.14987), tolerance = 1e-3)
})
|
#์คํํ ๊ฒ์
๋๋ค.
#ํจํค์ง ์ค์น
install.packages(c('shiny','magritt','stringr','ggplot2'))
getwd()
##ํจํค์ง ์คํ
library(shiny)
library(magrittr)
library(stringr)
library(ggplot2)
###๋ณธ์ฝ๋
##๋ฐ์ดํฐ ๋ถ๋ฌ์ค๊ธฐ
ticker<-read.csv('./data/KOR_ticker.csv',encoding = "euc-kr" ,row.names = 1)
ticker$์ข
๋ชฉ์ฝ๋=str_pad(ticker$์ข
๋ชฉ์ฝ๋,6,side=c("left"),pad='0')
ind<-ticker$fics์ฐ์
๊ตฐ%>%unique()
#--ui
ui=fluidPage(
titlePanel("์ฐ์
๋ณ ์ฌ๋ฌด์ฌํ"),
#์ฌ์ด๋๋ฐ
sidebarLayout(
#์ฌ์ด๋๋ฐ ํจ๋
sidebarPanel(
helpText("์ํ๋ ์ฐ์
์ ๊ณ ๋ฅด์์."),
selectInput('Ind',"์ฐ์
๊ตฐ",ind,selected='ํด๋ํฐ ๋ฐ ๊ด๋ จ๋ถํ'),
helpText('์ฐ์
๊ตฐ๋ด ๊ธฐ์
์ ๊ณ ๋ฅด์์.'),
uiOutput("uilist")
),#-์ฌ์ด๋ํจ๋
#๋ฉ์ธํ๋
mainPanel(
#ํญ์ํจ๋
tabsetPanel(
tabPanel('IS',dataTableOutput('is')),
tabPanel('FS', dataTableOutput('fs')),
tabPanel('CF', dataTableOutput('cf'))
),#-ํญ์ํจ๋
#์์ต์ฑ์งํ
div(style="background-color: coral;", h3('์์ต์ฑ'),
helpText("roe"),
fluidRow(
column(5,
plotOutput('roe_c', height = "200px")
),
column(5,
plotOutput('roe_i', height = "200px")
)
),
helpText("roa"),
fluidRow(
column(5,
plotOutput('roa_c', height = "200px")
),
column(5,
plotOutput('roa_i', height = "200px")
)
),
helpText("opm"),
fluidRow(
column(5,
plotOutput('opm_c', height = "200px")
),
column(5,
plotOutput('opm_i', height = "200px")
)
)
)#-๋๋ธ
,#์์ต์ฑ ์งํ ๋
#์์ ์ฑ์งํ
div(style="background-color: coral;", h3('์์ ์ฑ'),
fluidRow(
helpText("์ ๋๋น์จ"),
column(5,
plotOutput('cr_c', height = "200px")
),
column(5,
plotOutput('cr_i', height = "200px")
)
),
fluidRow(
helpText("๋น์ข๋น์จ"),
column(5,
plotOutput('qr_c', height = "200px")
),
column(5,
plotOutput('qr_i', height = "200px")
)
),
fluidRow(
helpText("๋ถ์ฑ๋น์จ"),
column(5,
plotOutput('dr_c', height = "200px")
),
column(5,
plotOutput('dr_i', height = "200px")
)
)#-ํ๋ฃจ์ด๋ ๋ก์ฐ
),#-๋๋ธ
#-์์ ์ฑ ์งํ ๋
#์ฑ์ฅ์ฑ์งํ
div(style="background-color: coral;", h3('์ฑ์ฅ์ฑ'),
fluidRow(
helpText("๋งค์ถ์ก์ฑ์ฅ๋น์จ"),
column(5,
plotOutput('si_c', height = "200px")
),
column(5,
plotOutput('si_i', height = "200px")
)
),
fluidRow(
helpText("์์
์ด์ต์ฑ์ฅ๋น์จ"),
column(5,
plotOutput('oi_c', height = "200px")
),
column(5,
plotOutput('oi_i', height = "200px")
)
),
fluidRow(
helpText("๋น๊ธฐ์์ด์ต๋น์จ"),
column(5,
plotOutput('ni_c', height = "200px")
),
column(5,
plotOutput('ni_i', height = "200px")
)
)#-ํ๋ฃจ์ด๋ ๋ก์ฐ
)#-๋๋ธ
#-์ฑ์ฅ์ฑ ์งํ ๋
)#-๋ฉ์ธํจ๋
)#-์ฌ์ด๋๋ ์ด์์
)#-ํ๋ฃจ๋ ํ์ด์ง
#--server
server = function(input,output) {
#๊ธฐ์
๋ฆฌ์คํธ ๋ง๋ค๊ธฐ
output$'uilist'<-renderUI({
switch(input$'Ind',
"ํด๋ํฐ ๋ฐ ๊ด๋ จ๋ถํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="ํด๋ํฐ ๋ฐ ๊ด๋ จ๋ถํ")],selected='์ผ์ฑ์ ์'),
"๋ฐ๋์ฒด ๋ฐ ๊ด๋ จ์ฅ๋น"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ฐ๋์ฒด ๋ฐ ๊ด๋ จ์ฅ๋น")]),
"๋ฐ์ด์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ฐ์ด์ค")]),
"์ธํฐ๋ท ์๋น์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ธํฐ๋ท ์๋น์ค")]),
"ํํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="ํํ")]),
"์๋์ฐจ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์๋์ฐจ")]),
"์ ์ ์ฅ๋น ๋ฐ ๊ธฐ๊ธฐ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ ์ ์ฅ๋น ๋ฐ ๊ธฐ๊ธฐ")]),
"๊ฐ์ธ์ํ์ฉํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ฐ์ธ์ํ์ฉํ")]),
"๋ณตํฉ ์ฐ์
"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ณตํฉ ์ฐ์
")]),
"์๋์ฐจ๋ถํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์๋์ฐจ๋ถํ")]),
"๋ฌด์ ํต์ "=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ฌด์ ํต์ ")]),
"๊ฒ์ ์ํํธ์จ์ด"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ฒ์ ์ํํธ์จ์ด")]),
"๊ธ์ ๋ฐ ๊ด๋ฌผ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ธ์ ๋ฐ ๊ด๋ฌผ")]),
"์์
์ํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์์
์ํ")]),
"์์ ๋ฐ ๊ฐ์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์์ ๋ฐ ๊ฐ์ค")]),
"๋ด๊ตฌ์๋น์ฌ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ด๊ตฌ์๋น์ฌ")]),
"์ ์ฝ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ ์ฝ")]),
"์ ๋ ฅ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ ๋ ฅ")]),
"IT ์๋น์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="IT ์๋น์ค")]),
"๋ณดํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ณดํ")]),
"๋ด๋ฐฐ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ด๋ฐฐ")]),
"์กฐ์ "=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์กฐ์ ")]),
"์๋ฃ ์ฅ๋น ๋ฐ ์๋น์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์๋ฃ ์ฅ๋น ๋ฐ ์๋น์ค")]),
"์ก์์ด์"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ก์์ด์")]),
"์๋ฃํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์๋ฃํ")]),
"์ฆ๊ถ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ฆ๊ถ")]),
"ํญ๊ณต์ด์"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="ํญ๊ณต์ด์")]),
"ํธํ
๋ฐ ๋ ์ "=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="ํธํ
๋ฐ ๋ ์ ")]),
"๋์คํ๋ ์ด ๋ฐ ๊ด๋ จ๋ถํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋์คํ๋ ์ด ๋ฐ ๊ด๋ จ๋ถํ")]),
"์์
์๋น์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์์
์๋น์ค")]),
"๊ฑด์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ฑด์ค")]),
"๋์๋งค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋์๋งค")]),
"์๋น์ ๊ธ์ต"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์๋น์ ๊ธ์ต")]),
"์ผ๋ฐ ์ํํธ์จ์ด"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ผ๋ฐ ์ํํธ์จ์ด")]),
"๊ฑด์ถ์์ฌ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ฑด์ถ์์ฌ")]),
"ํต์ ์ฅ๋น"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="ํต์ ์ฅ๋น")]),
"๊ธฐ๊ณ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ธฐ๊ณ")]),
"๋ฏธ๋์ด"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ฏธ๋์ด")]),
"์๋ฃ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์๋ฃ")]),
"๊ฐ์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ฐ์ค")]),
"๋ฐฑํ์ "=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ฐฑํ์ ")]),
"ํด์์ด์"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="ํด์์ด์")]),
"์ฌ์ ๋ฐ ์๋ณต"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ฌ์ ๋ฐ ์๋ณต")]),
"๋ฌด์ญ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ฌด์ญ")]),
"์ ๊ธฐ์ฅ๋น"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ ๊ธฐ์ฅ๋น")]),
"์ด์ก์ธํ๋ผ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ด์ก์ธํ๋ผ")]),
"์๋์ง ์์ค ๋ฐ ์๋น์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์๋์ง ์์ค ๋ฐ ์๋น์ค")]),
"๊ฑด์ถ์์ฌ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ฑด์ถ์์ฌ")]),
"์ข
์ด ๋ฐ ๋ชฉ์ฌ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ข
์ด ๋ฐ ๋ชฉ์ฌ")]),
"์จ๋ผ์ธ์ผํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์จ๋ผ์ธ์ผํ")]),
"๋ถ๋์ฐ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ถ๋์ฐ")]),
"์ฉ๊ธฐ ๋ฐ ํฌ์ฅ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ฉ๊ธฐ ๋ฐ ํฌ์ฅ")]),
"์ปดํจํฐ ๋ฐ ์ฃผ๋ณ๊ธฐ๊ธฐ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ปดํจํฐ ๋ฐ ์ฃผ๋ณ๊ธฐ๊ธฐ")]),
"์ฐฝ์
ํฌ์ ๋ฐ ์ข
๊ธ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ฐฝ์
ํฌ์ ๋ฐ ์ข
๊ธ")]),
"๊ต์ก"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ต์ก")]),
"๋ณด์์ฅ๋น"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ณด์์ฅ๋น")]),
"์ํธ์ ์ถ์ํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ํธ์ ์ถ์ํ")]),
"๊ฐ์ ์ํ์ฉํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ฐ์ ์ํ์ฉํ")]),
"๋ ์ ์ฉํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ ์ ์ฉํ")]),
"์ฌ๋ฌด๊ธฐ๊ธฐ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ฌ๋ฌด๊ธฐ๊ธฐ")]),
"์ ์ ํต์ "=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ ์ ํต์ ")]),
"์
ํฑ ๋ฐ์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์
ํฑ ๋ฐ์ค")])
)
})
#์ฌ๋ฌด์ ํ 3์ด์ฌ
output$'is'<-renderDataTable({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr")
a[a[,1]%in%c('๋งค์ถ์ก','๋งค์ถ์ด์ด์ต','์์
์ด์ต','์ธ์ ๊ณ์์ฌ์
์ด์ต','๋ฒ์ธ์ธ๋น์ฉ','๋น๊ธฐ์์ด์ต'),]
})
output$'fs'<-renderDataTable({
a=read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",stringsAsFactors=F)
a[a[,1]%in%c('์์ฐ','์ ๋์์ฐ','๋น์ ๋์์ฐ','๊ธฐํ๊ธ์ต์์ฐ','๋ถ์ฑ','์ ๋๋ถ์ฑ','๋น์ ๋๋ถ์ฑ','๊ธฐํ๊ธ์ต์
๋ถ์ฑ',
'์๋ณธ','์ง๋ฐฐ๊ธฐ์
์ฃผ์ฃผ์ง๋ถ','๋น์ง๋ฐฐ์ฃผ์ฃผ์ง๋ถ'),]
})
output$'cf'<-renderDataTable({
a=read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",stringsAsFactors=F)
a[a[,1]%in%c('์์
ํ๋์ผ๋ก์ธํํ๊ธํ๋ฆ','ํฌ์ํ๋์ผ๋ก์ธํํ๊ธํ๋ฆ','์ฌ๋ฌดํ๋์ผ๋ก์ธํํ๊ธํ๋ฆ','ํ๊ธ๋ฐํ๊ธ์ฑ์์ฐ์์ฆ๊ฐ',
'๊ธฐ์ดํ๊ธ๋ฐํ๊ธ์ฑ์์ฐ','๊ธฐ๋งํ๊ธ๋ฐํ๊ธ์ฑ์์ฐ'),]
})
#๋น์จ ๊ตฌํ๊ธฐ
#๋น์จ.์์ต์ฑ
#____roe
output$'roe_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['๋น๊ธฐ์์ด์ต',]/a['์๋ณธ',]
b<-t(b)
b=b%>%as.data.frame()
b$'๋น๊ธฐ์์ด์ต'<-as.numeric(b$'๋น๊ธฐ์์ด์ต')%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
ROE")
})
output$'roe_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['๋น๊ธฐ์์ด์ต',]/a['์๋ณธ',]
b<-t(b)
b=b%>%as.data.frame()
b$๋น๊ธฐ์์ด์ต<-as.numeric(b$๋น๊ธฐ์์ด์ต)%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "์ฐ์
ROE ")
})
#___roa
output$'roa_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['๋น๊ธฐ์์ด์ต',]/a['์์ฐ',]
b<-t(b)
b=b%>%as.data.frame()
b$๋น๊ธฐ์์ด์ต<-as.numeric(b$๋น๊ธฐ์์ด์ต)%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
ROA")
})
output$'roa_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['๋น๊ธฐ์์ด์ต',]/a['์์ฐ',]
b<-t(b)
b=b%>%as.data.frame()
b$๋น๊ธฐ์์ด์ต<-as.numeric(b$๋น๊ธฐ์์ด์ต)%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = '์ฐ์
ROA')
})
#___opm
output$'opm_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['์์
์ด์ต',]/a['๋งค์ถ์ก',]
b<-t(b)
b=b%>%as.data.frame()
b$'์์
์ด์ต'<-as.numeric(b$'์์
์ด์ต')%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
OPM")
})
output$'opm_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['์์
์ด์ต',]/a['๋งค์ถ์ก',]
b<-t(b)
b=b%>%as.data.frame()
b$'์์
์ด์ต'<-as.numeric(b$'์์
์ด์ต')%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = '์ฐ์
OPM')
})
#๋น์จ.์์ ์ฑ
#____์ ๋์ฑ๋น์จ
output$'cr_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['์ ๋์์ฐ',]/a['์ ๋๋ถ์ฑ',]
b<-t(b)
b=b%>%as.data.frame()
b$'์ ๋์์ฐ'<-as.numeric(b$'์ ๋์์ฐ')%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
์ ๋๋น์จ")
})
output$'cr_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['์ ๋์์ฐ',]/a['์ ๋๋ถ์ฑ',]
b<-t(b)
b=b%>%as.data.frame()
b$'์ ๋์์ฐ'<-as.numeric(b$'์ ๋์์ฐ')%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "์ฐ์
์ ๋๋น์จ")
})
#___๋น์ข๋น์จ
output$'qr_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-(a['์ ๋์์ฐ',]-a['์ฌ๊ณ ์์ฐ',])/a['์ ๋๋ถ์ฑ',]
b<-t(b)
b=b%>%as.data.frame()
b$์ ๋์์ฐ<-as.numeric(b$์ ๋์์ฐ)%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
๋น์ข๋น์จ")
})
output$'qr_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-(a['์ ๋์์ฐ',]-a['์ฌ๊ณ ์์ฐ',])/a['์ ๋๋ถ์ฑ',]
b<-t(b)
b=b%>%as.data.frame()
b$์ ๋์์ฐ<-as.numeric(b$์ ๋์์ฐ)%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "์ฐ์
๋น์ข๋น์จ")
})
#___๋ถ์ฑ๋น์จ
output$'dr_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['์์ฐ',]/a['๋ถ์ฑ',]
b<-t(b)
b=b%>%as.data.frame()
b$'์์ฐ'<-as.numeric(b$'์์ฐ')%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
๋ถ์ฑ๋น์จ")
})
output$'dr_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['์์ฐ',]/a['๋ถ์ฑ',]
b<-t(b)
b=b%>%as.data.frame()
b$'์์ฐ'<-as.numeric(b$'์์ฐ')%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "์ฐ์
๋ถ์ฑ๋น์จ")
})
#๋น์จ.์ฑ์ฅ์ฑ
#____๋งค์ถ์ก์ฑ์ฅ์ฑ๋น์จ
output$'si_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
a<-a['๋งค์ถ์ก',1:3]
b<-a[,2:3]-a[,1:2]
b<-b/a[,1:2]
b<-t(b)
b=b%>%as.data.frame()
b$๋งค์ถ์ก<-as.numeric(b$๋งค์ถ์ก)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
๋งค์ถ์ฑ์ฅ")
})
output$'si_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
a<-a['๋งค์ถ์ก',1:3]
b<-a[,2:3]-a[,1:2]
b<-b/a[,1:2]
b<-t(b)
b=b%>%as.data.frame()
b$๋งค์ถ์ก<-as.numeric(b$๋งค์ถ์ก)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "์ฐ์
๋งค์ถ์ฑ์ฅ")
})
#___์์
์ด์ต์ฑ์ฅ๋น์จ
output$'oi_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
a<-a['์์
์ด์ต',1:3]
b<-a[,2:3]-a[,1:2]
b<-b/a[,1:2]
b<-t(b)
b=b%>%as.data.frame()
b$์์
์ด์ต<-as.numeric(b$์์
์ด์ต)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
์์
์ด์ต ์ฑ์ฅ")
})
output$'oi_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
a<-a['์์
์ด์ต',1:3]
b<-a[,2:3]-a[,1:2]
b<-b/a[,1:2]
b<-t(b)
b=b%>%as.data.frame()
b$์์
์ด์ต<-as.numeric(b$์์
์ด์ต)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "์ฐ์
์์
์ด์ต ์ฑ์ฅ")
})
#___๋น๊ธฐ์์ด์ต์ฑ์ฅ๋น์จ
output$'ni_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
a<-a['๋น๊ธฐ์์ด์ต',1:3]
b<-a[,2:3]-a[,1:2]
b<-b/a[,1:2]
b<-t(b)
b=b%>%as.data.frame()
b$๋น๊ธฐ์์ด์ต<-as.numeric(b$๋น๊ธฐ์์ด์ต)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
๋น๊ธฐ์์ด์ต ์ฑ์ฅ")
})
output$'ni_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
a<-a['๋น๊ธฐ์์ด์ต',1:3]
b<-a[,2:3]-a[,1:2]
b<-b/a[,1:2]
b<-t(b)
b=b%>%as.data.frame()
b$๋น๊ธฐ์์ด์ต<-as.numeric(b$๋น๊ธฐ์์ด์ต)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "์ฐ์
๋น๊ธฐ์์ด์ต ์ฑ์ฅ")
})
}
shinyApp(ui, server)
|
/shiny.R
|
no_license
|
kiki3700/ShinyIndustryAnalysis
|
R
| false
| false
| 22,756
|
r
|
#์คํํ ๊ฒ์
๋๋ค.
#ํจํค์ง ์ค์น
install.packages(c('shiny','magritt','stringr','ggplot2'))
getwd()
##ํจํค์ง ์คํ
library(shiny)
library(magrittr)
library(stringr)
library(ggplot2)
###๋ณธ์ฝ๋
##๋ฐ์ดํฐ ๋ถ๋ฌ์ค๊ธฐ
ticker<-read.csv('./data/KOR_ticker.csv',encoding = "euc-kr" ,row.names = 1)
ticker$์ข
๋ชฉ์ฝ๋=str_pad(ticker$์ข
๋ชฉ์ฝ๋,6,side=c("left"),pad='0')
ind<-ticker$fics์ฐ์
๊ตฐ%>%unique()
#--ui
ui=fluidPage(
titlePanel("์ฐ์
๋ณ ์ฌ๋ฌด์ฌํ"),
#์ฌ์ด๋๋ฐ
sidebarLayout(
#์ฌ์ด๋๋ฐ ํจ๋
sidebarPanel(
helpText("์ํ๋ ์ฐ์
์ ๊ณ ๋ฅด์์."),
selectInput('Ind',"์ฐ์
๊ตฐ",ind,selected='ํด๋ํฐ ๋ฐ ๊ด๋ จ๋ถํ'),
helpText('์ฐ์
๊ตฐ๋ด ๊ธฐ์
์ ๊ณ ๋ฅด์์.'),
uiOutput("uilist")
),#-์ฌ์ด๋ํจ๋
#๋ฉ์ธํ๋
mainPanel(
#ํญ์ํจ๋
tabsetPanel(
tabPanel('IS',dataTableOutput('is')),
tabPanel('FS', dataTableOutput('fs')),
tabPanel('CF', dataTableOutput('cf'))
),#-ํญ์ํจ๋
#์์ต์ฑ์งํ
div(style="background-color: coral;", h3('์์ต์ฑ'),
helpText("roe"),
fluidRow(
column(5,
plotOutput('roe_c', height = "200px")
),
column(5,
plotOutput('roe_i', height = "200px")
)
),
helpText("roa"),
fluidRow(
column(5,
plotOutput('roa_c', height = "200px")
),
column(5,
plotOutput('roa_i', height = "200px")
)
),
helpText("opm"),
fluidRow(
column(5,
plotOutput('opm_c', height = "200px")
),
column(5,
plotOutput('opm_i', height = "200px")
)
)
)#-๋๋ธ
,#์์ต์ฑ ์งํ ๋
#์์ ์ฑ์งํ
div(style="background-color: coral;", h3('์์ ์ฑ'),
fluidRow(
helpText("์ ๋๋น์จ"),
column(5,
plotOutput('cr_c', height = "200px")
),
column(5,
plotOutput('cr_i', height = "200px")
)
),
fluidRow(
helpText("๋น์ข๋น์จ"),
column(5,
plotOutput('qr_c', height = "200px")
),
column(5,
plotOutput('qr_i', height = "200px")
)
),
fluidRow(
helpText("๋ถ์ฑ๋น์จ"),
column(5,
plotOutput('dr_c', height = "200px")
),
column(5,
plotOutput('dr_i', height = "200px")
)
)#-ํ๋ฃจ์ด๋ ๋ก์ฐ
),#-๋๋ธ
#-์์ ์ฑ ์งํ ๋
#์ฑ์ฅ์ฑ์งํ
div(style="background-color: coral;", h3('์ฑ์ฅ์ฑ'),
fluidRow(
helpText("๋งค์ถ์ก์ฑ์ฅ๋น์จ"),
column(5,
plotOutput('si_c', height = "200px")
),
column(5,
plotOutput('si_i', height = "200px")
)
),
fluidRow(
helpText("์์
์ด์ต์ฑ์ฅ๋น์จ"),
column(5,
plotOutput('oi_c', height = "200px")
),
column(5,
plotOutput('oi_i', height = "200px")
)
),
fluidRow(
helpText("๋น๊ธฐ์์ด์ต๋น์จ"),
column(5,
plotOutput('ni_c', height = "200px")
),
column(5,
plotOutput('ni_i', height = "200px")
)
)#-ํ๋ฃจ์ด๋ ๋ก์ฐ
)#-๋๋ธ
#-์ฑ์ฅ์ฑ ์งํ ๋
)#-๋ฉ์ธํจ๋
)#-์ฌ์ด๋๋ ์ด์์
)#-ํ๋ฃจ๋ ํ์ด์ง
#--server
server = function(input,output) {
#๊ธฐ์
๋ฆฌ์คํธ ๋ง๋ค๊ธฐ
output$'uilist'<-renderUI({
switch(input$'Ind',
"ํด๋ํฐ ๋ฐ ๊ด๋ จ๋ถํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="ํด๋ํฐ ๋ฐ ๊ด๋ จ๋ถํ")],selected='์ผ์ฑ์ ์'),
"๋ฐ๋์ฒด ๋ฐ ๊ด๋ จ์ฅ๋น"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ฐ๋์ฒด ๋ฐ ๊ด๋ จ์ฅ๋น")]),
"๋ฐ์ด์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ฐ์ด์ค")]),
"์ธํฐ๋ท ์๋น์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ธํฐ๋ท ์๋น์ค")]),
"ํํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="ํํ")]),
"์๋์ฐจ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์๋์ฐจ")]),
"์ ์ ์ฅ๋น ๋ฐ ๊ธฐ๊ธฐ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ ์ ์ฅ๋น ๋ฐ ๊ธฐ๊ธฐ")]),
"๊ฐ์ธ์ํ์ฉํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ฐ์ธ์ํ์ฉํ")]),
"๋ณตํฉ ์ฐ์
"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ณตํฉ ์ฐ์
")]),
"์๋์ฐจ๋ถํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์๋์ฐจ๋ถํ")]),
"๋ฌด์ ํต์ "=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ฌด์ ํต์ ")]),
"๊ฒ์ ์ํํธ์จ์ด"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ฒ์ ์ํํธ์จ์ด")]),
"๊ธ์ ๋ฐ ๊ด๋ฌผ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ธ์ ๋ฐ ๊ด๋ฌผ")]),
"์์
์ํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์์
์ํ")]),
"์์ ๋ฐ ๊ฐ์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์์ ๋ฐ ๊ฐ์ค")]),
"๋ด๊ตฌ์๋น์ฌ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ด๊ตฌ์๋น์ฌ")]),
"์ ์ฝ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ ์ฝ")]),
"์ ๋ ฅ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ ๋ ฅ")]),
"IT ์๋น์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="IT ์๋น์ค")]),
"๋ณดํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ณดํ")]),
"๋ด๋ฐฐ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ด๋ฐฐ")]),
"์กฐ์ "=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์กฐ์ ")]),
"์๋ฃ ์ฅ๋น ๋ฐ ์๋น์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์๋ฃ ์ฅ๋น ๋ฐ ์๋น์ค")]),
"์ก์์ด์"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ก์์ด์")]),
"์๋ฃํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์๋ฃํ")]),
"์ฆ๊ถ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ฆ๊ถ")]),
"ํญ๊ณต์ด์"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="ํญ๊ณต์ด์")]),
"ํธํ
๋ฐ ๋ ์ "=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="ํธํ
๋ฐ ๋ ์ ")]),
"๋์คํ๋ ์ด ๋ฐ ๊ด๋ จ๋ถํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋์คํ๋ ์ด ๋ฐ ๊ด๋ จ๋ถํ")]),
"์์
์๋น์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์์
์๋น์ค")]),
"๊ฑด์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ฑด์ค")]),
"๋์๋งค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋์๋งค")]),
"์๋น์ ๊ธ์ต"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์๋น์ ๊ธ์ต")]),
"์ผ๋ฐ ์ํํธ์จ์ด"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ผ๋ฐ ์ํํธ์จ์ด")]),
"๊ฑด์ถ์์ฌ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ฑด์ถ์์ฌ")]),
"ํต์ ์ฅ๋น"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="ํต์ ์ฅ๋น")]),
"๊ธฐ๊ณ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ธฐ๊ณ")]),
"๋ฏธ๋์ด"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ฏธ๋์ด")]),
"์๋ฃ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์๋ฃ")]),
"๊ฐ์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ฐ์ค")]),
"๋ฐฑํ์ "=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ฐฑํ์ ")]),
"ํด์์ด์"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="ํด์์ด์")]),
"์ฌ์ ๋ฐ ์๋ณต"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ฌ์ ๋ฐ ์๋ณต")]),
"๋ฌด์ญ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ฌด์ญ")]),
"์ ๊ธฐ์ฅ๋น"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ ๊ธฐ์ฅ๋น")]),
"์ด์ก์ธํ๋ผ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ด์ก์ธํ๋ผ")]),
"์๋์ง ์์ค ๋ฐ ์๋น์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์๋์ง ์์ค ๋ฐ ์๋น์ค")]),
"๊ฑด์ถ์์ฌ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ฑด์ถ์์ฌ")]),
"์ข
์ด ๋ฐ ๋ชฉ์ฌ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ข
์ด ๋ฐ ๋ชฉ์ฌ")]),
"์จ๋ผ์ธ์ผํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์จ๋ผ์ธ์ผํ")]),
"๋ถ๋์ฐ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ถ๋์ฐ")]),
"์ฉ๊ธฐ ๋ฐ ํฌ์ฅ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ฉ๊ธฐ ๋ฐ ํฌ์ฅ")]),
"์ปดํจํฐ ๋ฐ ์ฃผ๋ณ๊ธฐ๊ธฐ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ปดํจํฐ ๋ฐ ์ฃผ๋ณ๊ธฐ๊ธฐ")]),
"์ฐฝ์
ํฌ์ ๋ฐ ์ข
๊ธ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ฐฝ์
ํฌ์ ๋ฐ ์ข
๊ธ")]),
"๊ต์ก"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ต์ก")]),
"๋ณด์์ฅ๋น"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ณด์์ฅ๋น")]),
"์ํธ์ ์ถ์ํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ํธ์ ์ถ์ํ")]),
"๊ฐ์ ์ํ์ฉํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๊ฐ์ ์ํ์ฉํ")]),
"๋ ์ ์ฉํ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="๋ ์ ์ฉํ")]),
"์ฌ๋ฌด๊ธฐ๊ธฐ"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ฌ๋ฌด๊ธฐ๊ธฐ")]),
"์ ์ ํต์ "=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์ ์ ํต์ ")]),
"์
ํฑ ๋ฐ์ค"=selectInput('com','๊ธฐ์
',ticker$์ข
๋ชฉ๋ช
[which(ticker$fics์ฐ์
๊ตฐ=="์
ํฑ ๋ฐ์ค")])
)
})
#์ฌ๋ฌด์ ํ 3์ด์ฌ
output$'is'<-renderDataTable({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr")
a[a[,1]%in%c('๋งค์ถ์ก','๋งค์ถ์ด์ด์ต','์์
์ด์ต','์ธ์ ๊ณ์์ฌ์
์ด์ต','๋ฒ์ธ์ธ๋น์ฉ','๋น๊ธฐ์์ด์ต'),]
})
output$'fs'<-renderDataTable({
a=read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",stringsAsFactors=F)
a[a[,1]%in%c('์์ฐ','์ ๋์์ฐ','๋น์ ๋์์ฐ','๊ธฐํ๊ธ์ต์์ฐ','๋ถ์ฑ','์ ๋๋ถ์ฑ','๋น์ ๋๋ถ์ฑ','๊ธฐํ๊ธ์ต์
๋ถ์ฑ',
'์๋ณธ','์ง๋ฐฐ๊ธฐ์
์ฃผ์ฃผ์ง๋ถ','๋น์ง๋ฐฐ์ฃผ์ฃผ์ง๋ถ'),]
})
output$'cf'<-renderDataTable({
a=read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",stringsAsFactors=F)
a[a[,1]%in%c('์์
ํ๋์ผ๋ก์ธํํ๊ธํ๋ฆ','ํฌ์ํ๋์ผ๋ก์ธํํ๊ธํ๋ฆ','์ฌ๋ฌดํ๋์ผ๋ก์ธํํ๊ธํ๋ฆ','ํ๊ธ๋ฐํ๊ธ์ฑ์์ฐ์์ฆ๊ฐ',
'๊ธฐ์ดํ๊ธ๋ฐํ๊ธ์ฑ์์ฐ','๊ธฐ๋งํ๊ธ๋ฐํ๊ธ์ฑ์์ฐ'),]
})
#๋น์จ ๊ตฌํ๊ธฐ
#๋น์จ.์์ต์ฑ
#____roe
output$'roe_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['๋น๊ธฐ์์ด์ต',]/a['์๋ณธ',]
b<-t(b)
b=b%>%as.data.frame()
b$'๋น๊ธฐ์์ด์ต'<-as.numeric(b$'๋น๊ธฐ์์ด์ต')%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
ROE")
})
output$'roe_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['๋น๊ธฐ์์ด์ต',]/a['์๋ณธ',]
b<-t(b)
b=b%>%as.data.frame()
b$๋น๊ธฐ์์ด์ต<-as.numeric(b$๋น๊ธฐ์์ด์ต)%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "์ฐ์
ROE ")
})
#___roa
output$'roa_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['๋น๊ธฐ์์ด์ต',]/a['์์ฐ',]
b<-t(b)
b=b%>%as.data.frame()
b$๋น๊ธฐ์์ด์ต<-as.numeric(b$๋น๊ธฐ์์ด์ต)%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
ROA")
})
output$'roa_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['๋น๊ธฐ์์ด์ต',]/a['์์ฐ',]
b<-t(b)
b=b%>%as.data.frame()
b$๋น๊ธฐ์์ด์ต<-as.numeric(b$๋น๊ธฐ์์ด์ต)%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = '์ฐ์
ROA')
})
#___opm
output$'opm_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['์์
์ด์ต',]/a['๋งค์ถ์ก',]
b<-t(b)
b=b%>%as.data.frame()
b$'์์
์ด์ต'<-as.numeric(b$'์์
์ด์ต')%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
OPM")
})
output$'opm_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['์์
์ด์ต',]/a['๋งค์ถ์ก',]
b<-t(b)
b=b%>%as.data.frame()
b$'์์
์ด์ต'<-as.numeric(b$'์์
์ด์ต')%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = '์ฐ์
OPM')
})
#๋น์จ.์์ ์ฑ
#____์ ๋์ฑ๋น์จ
output$'cr_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['์ ๋์์ฐ',]/a['์ ๋๋ถ์ฑ',]
b<-t(b)
b=b%>%as.data.frame()
b$'์ ๋์์ฐ'<-as.numeric(b$'์ ๋์์ฐ')%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
์ ๋๋น์จ")
})
output$'cr_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['์ ๋์์ฐ',]/a['์ ๋๋ถ์ฑ',]
b<-t(b)
b=b%>%as.data.frame()
b$'์ ๋์์ฐ'<-as.numeric(b$'์ ๋์์ฐ')%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "์ฐ์
์ ๋๋น์จ")
})
#___๋น์ข๋น์จ
output$'qr_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-(a['์ ๋์์ฐ',]-a['์ฌ๊ณ ์์ฐ',])/a['์ ๋๋ถ์ฑ',]
b<-t(b)
b=b%>%as.data.frame()
b$์ ๋์์ฐ<-as.numeric(b$์ ๋์์ฐ)%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
๋น์ข๋น์จ")
})
output$'qr_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-(a['์ ๋์์ฐ',]-a['์ฌ๊ณ ์์ฐ',])/a['์ ๋๋ถ์ฑ',]
b<-t(b)
b=b%>%as.data.frame()
b$์ ๋์์ฐ<-as.numeric(b$์ ๋์์ฐ)%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "์ฐ์
๋น์ข๋น์จ")
})
#___๋ถ์ฑ๋น์จ
output$'dr_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['์์ฐ',]/a['๋ถ์ฑ',]
b<-t(b)
b=b%>%as.data.frame()
b$'์์ฐ'<-as.numeric(b$'์์ฐ')%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
๋ถ์ฑ๋น์จ")
})
output$'dr_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
b<-a['์์ฐ',]/a['๋ถ์ฑ',]
b<-t(b)
b=b%>%as.data.frame()
b$'์์ฐ'<-as.numeric(b$'์์ฐ')%>%round(.,4)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "์ฐ์
๋ถ์ฑ๋น์จ")
})
#๋น์จ.์ฑ์ฅ์ฑ
#____๋งค์ถ์ก์ฑ์ฅ์ฑ๋น์จ
output$'si_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
a<-a['๋งค์ถ์ก',1:3]
b<-a[,2:3]-a[,1:2]
b<-b/a[,1:2]
b<-t(b)
b=b%>%as.data.frame()
b$๋งค์ถ์ก<-as.numeric(b$๋งค์ถ์ก)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
๋งค์ถ์ฑ์ฅ")
})
output$'si_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
a<-a['๋งค์ถ์ก',1:3]
b<-a[,2:3]-a[,1:2]
b<-b/a[,1:2]
b<-t(b)
b=b%>%as.data.frame()
b$๋งค์ถ์ก<-as.numeric(b$๋งค์ถ์ก)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "์ฐ์
๋งค์ถ์ฑ์ฅ")
})
#___์์
์ด์ต์ฑ์ฅ๋น์จ
output$'oi_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
a<-a['์์
์ด์ต',1:3]
b<-a[,2:3]-a[,1:2]
b<-b/a[,1:2]
b<-t(b)
b=b%>%as.data.frame()
b$์์
์ด์ต<-as.numeric(b$์์
์ด์ต)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
์์
์ด์ต ์ฑ์ฅ")
})
output$'oi_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
a<-a['์์
์ด์ต',1:3]
b<-a[,2:3]-a[,1:2]
b<-b/a[,1:2]
b<-t(b)
b=b%>%as.data.frame()
b$์์
์ด์ต<-as.numeric(b$์์
์ด์ต)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "์ฐ์
์์
์ด์ต ์ฑ์ฅ")
})
#___๋น๊ธฐ์์ด์ต์ฑ์ฅ๋น์จ
output$'ni_c'<-renderPlot({
a<-read.csv(paste0('./data/KOR_fs_t/',ticker[which(ticker$์ข
๋ชฉ๋ช
==input$'com'),1],'_fs_t.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
a<-a['๋น๊ธฐ์์ด์ต',1:3]
b<-a[,2:3]-a[,1:2]
b<-b/a[,1:2]
b<-t(b)
b=b%>%as.data.frame()
b$๋น๊ธฐ์์ด์ต<-as.numeric(b$๋น๊ธฐ์์ด์ต)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "๊ธฐ์
๋น๊ธฐ์์ด์ต ์ฑ์ฅ")
})
output$'ni_i'<-renderPlot({
a<-read.csv(paste0('./data/analysis/industry/',input$'Ind','/',input$'Ind','_fs_total.csv'),encoding = "euc-kr",row.names=1,stringsAsFactors = T)
a<-a['๋น๊ธฐ์์ด์ต',1:3]
b<-a[,2:3]-a[,1:2]
b<-b/a[,1:2]
b<-t(b)
b=b%>%as.data.frame()
b$๋น๊ธฐ์์ด์ต<-as.numeric(b$๋น๊ธฐ์์ด์ต)
b<-cbind(row.names(b),b)
b<-setNames(b,c('date','ratio'))
ggplot(b, aes(x=date,y=ratio))+
geom_bar(position = "dodge",
stat = "identity")+labs(title = "์ฐ์
๋น๊ธฐ์์ด์ต ์ฑ์ฅ")
})
}
shinyApp(ui, server)
|
emails = read.csv("./Data/emails.csv", stringsAsFactors = F)
source('TermDocumentMatrix.R')
EmailsDF = TextDataFrame(TermMatrix(emails$text))
EmailsDF$Spam = as.factor(emails$spam)
#Split the data
set.seed(123)
Spl = caTools::sample.split(EmailsDF$Spam, SplitRatio = 0.7)
source('TrainTestDataset.R')
train = Train(EmailsDF, Spl)
test = Test(EmailsDF, Spl)
library(ggplot2)
library(ggthemes)
library(ggsci)
source('LogisticRegression.R')
log = LogisticRegressionFit(train)
source('TreeRPART.R')
Tree = TreeFit()
source('RandomForest.R')
RF = RandomForestFit()
source('SupportVectorMachine.R')
SVM = SVMFit()
roc_train = rbind.data.frame( cbind(LogisticRegressionROC(log, train),
Model = rep("LR",nrow(LogisticRegressionROC(log, train))) ),
cbind(TreeROC(Tree, train),
Model = rep("Tree",nrow(TreeROC(Tree, train))) ),
cbind(RandomForestROC(RF, train),
Model = rep("RF",nrow(RandomForestROC(RF, train))) ),
cbind(SVMROC(SVM, train),
Model = rep("SVM",nrow(SVMROC(SVM, train))) )
)
ggplot(roc_train, aes(FP, TP, col = Model)) +
xlab("False positive rate") +
ylab("True positive rate") +
geom_line(data = roc_train[roc_train$Model == 'LR', ], size = 1.0) +
geom_line(data = roc_train[roc_train$Model == 'Tree', ], size = 1.0) +
geom_line(data = roc_train[roc_train$Model == 'RF', ], size = 1.0) +
geom_line(data = roc_train[roc_train$Model == 'SVM', ], size = 1.0)+
scale_color_d3()
roc_test = rbind.data.frame( cbind(LogisticRegressionROC(log, test),
Model = rep("LR",nrow(LogisticRegressionROC(log, test))) ),
cbind(TreeROC(Tree, test),
Model = rep("Tree",nrow(TreeROC(Tree, test))) ),
cbind(RandomForestROC(RF, test),
Model = rep("RF",nrow(RandomForestROC(RF, test))) ),
cbind(SVMROC(SVM, test),
Model = rep("SVM",nrow(SVMROC(SVM, test))) )
)
ggplot(roc_test, aes(FP, TP, col = Model)) +
xlab("False positive rate") +
ylab("True positive rate") +
geom_line(data = roc_test[roc_test$Model == 'LR', ], size = 1.0) +
geom_line(data = roc_test[roc_test$Model == 'Tree', ], size = 1.0) +
geom_line(data = roc_test[roc_test$Model == 'RF', ], size = 1.0) +
geom_line(data = roc_test[roc_test$Model == 'SVM', ], size = 1.0)+
scale_color_d3()
|
/ROC-AllModels.R
|
no_license
|
smotrova/R-Flexdashboard-TextAnalytics
|
R
| false
| false
| 2,839
|
r
|
emails = read.csv("./Data/emails.csv", stringsAsFactors = F)
source('TermDocumentMatrix.R')
EmailsDF = TextDataFrame(TermMatrix(emails$text))
EmailsDF$Spam = as.factor(emails$spam)
#Split the data
set.seed(123)
Spl = caTools::sample.split(EmailsDF$Spam, SplitRatio = 0.7)
source('TrainTestDataset.R')
train = Train(EmailsDF, Spl)
test = Test(EmailsDF, Spl)
library(ggplot2)
library(ggthemes)
library(ggsci)
source('LogisticRegression.R')
log = LogisticRegressionFit(train)
source('TreeRPART.R')
Tree = TreeFit()
source('RandomForest.R')
RF = RandomForestFit()
source('SupportVectorMachine.R')
SVM = SVMFit()
roc_train = rbind.data.frame( cbind(LogisticRegressionROC(log, train),
Model = rep("LR",nrow(LogisticRegressionROC(log, train))) ),
cbind(TreeROC(Tree, train),
Model = rep("Tree",nrow(TreeROC(Tree, train))) ),
cbind(RandomForestROC(RF, train),
Model = rep("RF",nrow(RandomForestROC(RF, train))) ),
cbind(SVMROC(SVM, train),
Model = rep("SVM",nrow(SVMROC(SVM, train))) )
)
ggplot(roc_train, aes(FP, TP, col = Model)) +
xlab("False positive rate") +
ylab("True positive rate") +
geom_line(data = roc_train[roc_train$Model == 'LR', ], size = 1.0) +
geom_line(data = roc_train[roc_train$Model == 'Tree', ], size = 1.0) +
geom_line(data = roc_train[roc_train$Model == 'RF', ], size = 1.0) +
geom_line(data = roc_train[roc_train$Model == 'SVM', ], size = 1.0)+
scale_color_d3()
roc_test = rbind.data.frame( cbind(LogisticRegressionROC(log, test),
Model = rep("LR",nrow(LogisticRegressionROC(log, test))) ),
cbind(TreeROC(Tree, test),
Model = rep("Tree",nrow(TreeROC(Tree, test))) ),
cbind(RandomForestROC(RF, test),
Model = rep("RF",nrow(RandomForestROC(RF, test))) ),
cbind(SVMROC(SVM, test),
Model = rep("SVM",nrow(SVMROC(SVM, test))) )
)
ggplot(roc_test, aes(FP, TP, col = Model)) +
xlab("False positive rate") +
ylab("True positive rate") +
geom_line(data = roc_test[roc_test$Model == 'LR', ], size = 1.0) +
geom_line(data = roc_test[roc_test$Model == 'Tree', ], size = 1.0) +
geom_line(data = roc_test[roc_test$Model == 'RF', ], size = 1.0) +
geom_line(data = roc_test[roc_test$Model == 'SVM', ], size = 1.0)+
scale_color_d3()
|
Sys.setenv(JAVA_HOME='C:\\Program Files\\Java\\jdk1.8.0_91\\jre')
library(stringr)
library(data.table)
library(openNLP)
library(NLP)
sent_token_annotator <- Maxent_Sent_Token_Annotator()
word_token_annotator <- Maxent_Word_Token_Annotator()
pos_tag_annotator <- Maxent_POS_Tag_Annotator()
unigram_dt <- readRDS("data/unigram_dt.rds")
bigram_dt <- readRDS("data/bigram_dt.rds")
trigram_dt <- readRDS("data/trigram_dt.rds")
badwords <- readRDS("data/badwords.rds")
clean_text <- function(text) {
input_str <- tolower(text)
input_str <- str_replace_all(input_str, "([iu]n)-([a-z])", "\\1\\2")
input_str <- str_replace_all(input_str, "([0-9])(st|nd|rd|th)", "\\1")
input_str <- str_replace_all(input_str, "[^a-z.' ]", " ")
input_str <- str_replace_all(input_str, "www\\.[a-z]+\\.[a-z]+", "")
input_str <- str_replace_all(input_str, "\\.", " ")
input_str <- str_replace_all(input_str, " ([a-z])\\1+ |^([a-z])\\1+ | ([a-z])\\1+$|^([a-z])\\1+$", " ")
input_str <- str_replace_all(input_str, "([a-z])\\1{2,}", "\\1\\1")
input_str <- str_replace_all(input_str, "\\'+([a-z]+)\\'+", "\\1")
input_str <- str_replace_all(input_str, "\\'+ \\'+", " ")
input_str <- str_replace_all(input_str, "(\\'+ )+|( \\'+)+|^\\'+|\\'+$", " ")
input_str <- str_replace_all(input_str, "^[a-z]+$", "")
input_str <- str_replace_all(input_str, "( [^ai])+ |^([^ai] )+|( [^ai])+$", " ")
input_str <- str_replace_all(input_str, "^ +| +$|", "")
input_str <- str_replace_all(input_str, " {2,}", " ")
input_str <- str_replace_all(input_str, " +$|^ +", "")
return(input_str)
}
filter_text <- function(text) {
tmp <- text
if (length(tmp) > 0) {
words <- parse_text(tmp)
num_words <- length(words)
if (num_words > 0) {
for (i in 1:num_words) {
if (words[i] %in% badwords) words[i] <- paste(substring(words[i], 1, 1), "***", sep = "")
}
tmp_w <- paste(words[1])
if (num_words > 1) {
for (i in 2:num_words) tmp_w <- paste(tmp_w, words[i])
}
return(tmp_w)
}
}
return(tmp)
}
get_default <- function(text) {
if (length(text) > 0) {
a2 <- annotate(as.String(text), list(sent_token_annotator, word_token_annotator))
a3 <- annotate(as.String(text), pos_tag_annotator, a2)
a3w <- subset(a3, type == "word")
tags <- sapply(a3w$features, `[[`, "POS")
if (tags %like% "NN") {
return("in")
} else if (tags %like% "VB") {
return("a")
} else if (tags %like% "JJ") {
return("time")
} else if (tags %like% "PRP") {
return("first")
} else if (tags %like% "CC") {
return("i")
} else if (text == "the") {
return("first")
}
}
return("the")
}
parse_text <- function(text) {
tmp <- unlist(str_split(text, " "))
tmp <- tmp[tmp != ""]
return(tmp)
}
get_word <- function(text) {
if (text != " ") {
words <- parse_text(tolower(text))
num_words <- length(words)
if (num_words > 0) {
filter <- paste("^", words[num_words], sep = "")
tmp_dt <- unigram_dt[n0 %like% filter]
pred_word <- dim(tmp_dt)[1]
if (pred_word > 0) {
tmp_dt <- tmp_dt[order(rank(-freq))]
pred <- tmp_dt[1]$n0
if (num_words > 2) {
tmp_w <- paste(words[1])
for (i in 2:(num_words - 1)) tmp_w <- paste(tmp_w, words[i])
return(paste(tmp_w, filter_text(pred)))
} else if (num_words > 1) {
tmp_w <- paste(words[1])
return(paste(tmp_w, filter_text(pred)))
}
}
}
}
return(text)
}
get_pred <- function(text) {
if (text != " ") {
input_words <- parse_text(clean_text(text))
len <- length(input_words)
if (len > 1) {
w1 <- input_words[len]
w2 <- input_words[len - 1]
} else if (len > 0) {
w1 <- input_words[len]
w2 <- "NA"
} else return("the")
l1 <- .95
l2 <- .04
l3 <- .01
len3 <- length(trigram_dt[trigram_dt[n2 == w2 & n1 == w1]]$freq)
len2 <- length(bigram_dt[bigram_dt[n1 == w1]]$freq)
matches <- matrix(nrow = len3 + len2, ncol = 2)
matches[,1] <- ""
matches[,2] <- 0
if (len3 > 0) {
for (i in 1:len3) {
matches[i, 1] <- trigram_dt[trigram_dt[n2 == w2 & n1 == w1]]$n0[i]
cnt2 <- length(bigram_dt[bigram_dt[n1 == w1 & n0 == matches[i, 1]]]$freq)
cnt1 <- length(unigram_dt[unigram_dt[n0 == matches[i, 1]]]$freq)
if (cnt2 > 0) freq2 <- bigram_dt[bigram_dt[n1 == w1 &
n0 == matches[i, 1]]]$freq else freq2 <- 0
if (cnt1 > 0) freq1 <- unigram_dt[unigram_dt[n0 == matches[i, 1]]]$freq else freq1 <- 0
matches[i, 2] <- trigram_dt[trigram_dt[n2 == w2 & n1 == w1]]$freq[i] *
l1 + freq2 * l2 + freq1 * l3
}
}
if (len2 > 0) {
for (i in sum(len3, 1):sum(len3, len2)) {
matches[i, 1] <- bigram_dt[bigram_dt[n1 == w1]]$n0[i - len3]
cnt1 <- length(unigram_dt[unigram_dt[n0 == matches[i, 1]]]$freq)
if (cnt1 > 0) freq1 <- unigram_dt[unigram_dt[n0 == matches[i, 1]]]$freq else freq1 <- 0
matches[i, 2] <- bigram_dt[bigram_dt[n1 == w1]]$freq[i - len3] * l2 + freq1 * l3
}
}
match_len <- length(matches[which.max(matches[,2])])
if (match_len > 0) return(matches[which.max(matches[,2])])
return(get_default(w1))
}
return(" ")
}
|
/helpers.R
|
no_license
|
Abhishek2017/Next-Word-Prediction
|
R
| false
| false
| 5,523
|
r
|
Sys.setenv(JAVA_HOME='C:\\Program Files\\Java\\jdk1.8.0_91\\jre')
library(stringr)
library(data.table)
library(openNLP)
library(NLP)
sent_token_annotator <- Maxent_Sent_Token_Annotator()
word_token_annotator <- Maxent_Word_Token_Annotator()
pos_tag_annotator <- Maxent_POS_Tag_Annotator()
unigram_dt <- readRDS("data/unigram_dt.rds")
bigram_dt <- readRDS("data/bigram_dt.rds")
trigram_dt <- readRDS("data/trigram_dt.rds")
badwords <- readRDS("data/badwords.rds")
clean_text <- function(text) {
input_str <- tolower(text)
input_str <- str_replace_all(input_str, "([iu]n)-([a-z])", "\\1\\2")
input_str <- str_replace_all(input_str, "([0-9])(st|nd|rd|th)", "\\1")
input_str <- str_replace_all(input_str, "[^a-z.' ]", " ")
input_str <- str_replace_all(input_str, "www\\.[a-z]+\\.[a-z]+", "")
input_str <- str_replace_all(input_str, "\\.", " ")
input_str <- str_replace_all(input_str, " ([a-z])\\1+ |^([a-z])\\1+ | ([a-z])\\1+$|^([a-z])\\1+$", " ")
input_str <- str_replace_all(input_str, "([a-z])\\1{2,}", "\\1\\1")
input_str <- str_replace_all(input_str, "\\'+([a-z]+)\\'+", "\\1")
input_str <- str_replace_all(input_str, "\\'+ \\'+", " ")
input_str <- str_replace_all(input_str, "(\\'+ )+|( \\'+)+|^\\'+|\\'+$", " ")
input_str <- str_replace_all(input_str, "^[a-z]+$", "")
input_str <- str_replace_all(input_str, "( [^ai])+ |^([^ai] )+|( [^ai])+$", " ")
input_str <- str_replace_all(input_str, "^ +| +$|", "")
input_str <- str_replace_all(input_str, " {2,}", " ")
input_str <- str_replace_all(input_str, " +$|^ +", "")
return(input_str)
}
filter_text <- function(text) {
tmp <- text
if (length(tmp) > 0) {
words <- parse_text(tmp)
num_words <- length(words)
if (num_words > 0) {
for (i in 1:num_words) {
if (words[i] %in% badwords) words[i] <- paste(substring(words[i], 1, 1), "***", sep = "")
}
tmp_w <- paste(words[1])
if (num_words > 1) {
for (i in 2:num_words) tmp_w <- paste(tmp_w, words[i])
}
return(tmp_w)
}
}
return(tmp)
}
get_default <- function(text) {
if (length(text) > 0) {
a2 <- annotate(as.String(text), list(sent_token_annotator, word_token_annotator))
a3 <- annotate(as.String(text), pos_tag_annotator, a2)
a3w <- subset(a3, type == "word")
tags <- sapply(a3w$features, `[[`, "POS")
if (tags %like% "NN") {
return("in")
} else if (tags %like% "VB") {
return("a")
} else if (tags %like% "JJ") {
return("time")
} else if (tags %like% "PRP") {
return("first")
} else if (tags %like% "CC") {
return("i")
} else if (text == "the") {
return("first")
}
}
return("the")
}
parse_text <- function(text) {
tmp <- unlist(str_split(text, " "))
tmp <- tmp[tmp != ""]
return(tmp)
}
get_word <- function(text) {
if (text != " ") {
words <- parse_text(tolower(text))
num_words <- length(words)
if (num_words > 0) {
filter <- paste("^", words[num_words], sep = "")
tmp_dt <- unigram_dt[n0 %like% filter]
pred_word <- dim(tmp_dt)[1]
if (pred_word > 0) {
tmp_dt <- tmp_dt[order(rank(-freq))]
pred <- tmp_dt[1]$n0
if (num_words > 2) {
tmp_w <- paste(words[1])
for (i in 2:(num_words - 1)) tmp_w <- paste(tmp_w, words[i])
return(paste(tmp_w, filter_text(pred)))
} else if (num_words > 1) {
tmp_w <- paste(words[1])
return(paste(tmp_w, filter_text(pred)))
}
}
}
}
return(text)
}
get_pred <- function(text) {
if (text != " ") {
input_words <- parse_text(clean_text(text))
len <- length(input_words)
if (len > 1) {
w1 <- input_words[len]
w2 <- input_words[len - 1]
} else if (len > 0) {
w1 <- input_words[len]
w2 <- "NA"
} else return("the")
l1 <- .95
l2 <- .04
l3 <- .01
len3 <- length(trigram_dt[trigram_dt[n2 == w2 & n1 == w1]]$freq)
len2 <- length(bigram_dt[bigram_dt[n1 == w1]]$freq)
matches <- matrix(nrow = len3 + len2, ncol = 2)
matches[,1] <- ""
matches[,2] <- 0
if (len3 > 0) {
for (i in 1:len3) {
matches[i, 1] <- trigram_dt[trigram_dt[n2 == w2 & n1 == w1]]$n0[i]
cnt2 <- length(bigram_dt[bigram_dt[n1 == w1 & n0 == matches[i, 1]]]$freq)
cnt1 <- length(unigram_dt[unigram_dt[n0 == matches[i, 1]]]$freq)
if (cnt2 > 0) freq2 <- bigram_dt[bigram_dt[n1 == w1 &
n0 == matches[i, 1]]]$freq else freq2 <- 0
if (cnt1 > 0) freq1 <- unigram_dt[unigram_dt[n0 == matches[i, 1]]]$freq else freq1 <- 0
matches[i, 2] <- trigram_dt[trigram_dt[n2 == w2 & n1 == w1]]$freq[i] *
l1 + freq2 * l2 + freq1 * l3
}
}
if (len2 > 0) {
for (i in sum(len3, 1):sum(len3, len2)) {
matches[i, 1] <- bigram_dt[bigram_dt[n1 == w1]]$n0[i - len3]
cnt1 <- length(unigram_dt[unigram_dt[n0 == matches[i, 1]]]$freq)
if (cnt1 > 0) freq1 <- unigram_dt[unigram_dt[n0 == matches[i, 1]]]$freq else freq1 <- 0
matches[i, 2] <- bigram_dt[bigram_dt[n1 == w1]]$freq[i - len3] * l2 + freq1 * l3
}
}
match_len <- length(matches[which.max(matches[,2])])
if (match_len > 0) return(matches[which.max(matches[,2])])
return(get_default(w1))
}
return(" ")
}
|
library(leaflet)
library(purrr)
library(dplyr)
library(glue)
library(sf)
acs_poverty_county_sp <- st_read(here::here("data", "original", "acs_poverty_county.geojson"))
# -------------------------------------------------------------------------------------------------
# check ranges for three variables of interest
# range(acs_poverty_county_sp$estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_estimate)
# 2.7 35.9
# range(acs_poverty_county_sp$estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_white_alone_estimate)
# 2.2 35.2
# range(range(acs_poverty_county_sp$estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_black_or_african_american_alone_estimate, na.rm = TRUE))
# 0.0, 88.1
# check distributions
hist(acs_poverty_county_sp$estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_estimate)
hist(acs_poverty_county_sp$estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_white_alone_estimate)
hist(acs_poverty_county_sp$estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_black_or_african_american_alone_estimate)
# I bet anything over ~40 is due to absurd margins of error
# function to check if moe very high, which I'm calling > 50% of value. Returns NA for elements that are
check_unstable <- function(variable_name) {
ifelse((acs_poverty_county_sp[[glue("{variable_name}_estimate")]]) < 2 * acs_poverty_county_sp[[glue("{variable_name}_moe")]],
NA,
acs_poverty_county_sp[[glue("{variable_name}_estimate")]])
}
# range(check_unstable("estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined"), na.rm = TRUE)
# range(check_unstable("estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_white_alone"), na.rm = TRUE)
# range(check_unstable("estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_black_or_african_american_alone"), na.rm = TRUE)
# quick map of unemployment by white and black. Note that unemployments above 30% are assumed unstable
poverty_scale <- colorBin("BuPu", c(0,100), c(0, 5, 10, 20, 40, 100))
m <- leaflet(acs_poverty_county_sp) %>%
addTiles() %>% # Add default OpenStreetMap map tiles
addPolygons(color = "#444444", weight = 0.5, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.8,
fillColor = poverty_scale(check_unstable("estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined")),
group = "Overall Poverty",
label = ~map(glue("{NAME.x} County<br/>
Poverty Rate: {estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_estimate}%<br/>
MOE: {estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_moe}%"), htmltools::HTML)
) %>%
addPolygons(color = "#444444", weight = 0.5, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.8,
fillColor = poverty_scale(check_unstable("estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_white_alone")),
group = "White Alone Poverty",
label = ~map(glue("{NAME.x} County<br/>
Poverty Rate: {estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_white_alone_estimate}%<br/>
MOE: {estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_white_alone_moe}%"), htmltools::HTML)
) %>%
addPolygons(color = "#444444", weight = 0.5, smoothFactor = 0.5,
opacity = , fillOpacity = 0.8,
fillColor = poverty_scale(check_unstable("estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_black_or_african_american_alone")),
group = "Black Alone Poverty",
label = ~map(glue("{NAME.x} County<br/>
Poverty Rate: {estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_black_or_african_american_alone_estimate}%<br/>
MOE: {estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_black_or_african_american_alone_moe}%"), htmltools::HTML)
) %>%
addLegend("bottomright", pal = poverty_scale, values = ~estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_estimate,
title = "Poverty Rate",
opacity = .8,
labFormat = labelFormat(suffix = "%")
) %>%
addLayersControl(
baseGroups = c("Overall Poverty", "White Alone Poverty", "Black Alone Poverty"),
options = layersControlOptions(collapsed = FALSE)
)
m
|
/src/Mapping/acs_poverty_mapping.R
|
no_license
|
DSPG-Young-Scholars-Program/dspg20halifax
|
R
| false
| false
| 5,350
|
r
|
library(leaflet)
library(purrr)
library(dplyr)
library(glue)
library(sf)
acs_poverty_county_sp <- st_read(here::here("data", "original", "acs_poverty_county.geojson"))
# -------------------------------------------------------------------------------------------------
# check ranges for three variables of interest
# range(acs_poverty_county_sp$estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_estimate)
# 2.7 35.9
# range(acs_poverty_county_sp$estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_white_alone_estimate)
# 2.2 35.2
# range(range(acs_poverty_county_sp$estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_black_or_african_american_alone_estimate, na.rm = TRUE))
# 0.0, 88.1
# check distributions
hist(acs_poverty_county_sp$estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_estimate)
hist(acs_poverty_county_sp$estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_white_alone_estimate)
hist(acs_poverty_county_sp$estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_black_or_african_american_alone_estimate)
# I bet anything over ~40 is due to absurd margins of error
# function to check if moe very high, which I'm calling > 50% of value. Returns NA for elements that are
check_unstable <- function(variable_name) {
ifelse((acs_poverty_county_sp[[glue("{variable_name}_estimate")]]) < 2 * acs_poverty_county_sp[[glue("{variable_name}_moe")]],
NA,
acs_poverty_county_sp[[glue("{variable_name}_estimate")]])
}
# range(check_unstable("estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined"), na.rm = TRUE)
# range(check_unstable("estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_white_alone"), na.rm = TRUE)
# range(check_unstable("estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_black_or_african_american_alone"), na.rm = TRUE)
# quick map of unemployment by white and black. Note that unemployments above 30% are assumed unstable
poverty_scale <- colorBin("BuPu", c(0,100), c(0, 5, 10, 20, 40, 100))
m <- leaflet(acs_poverty_county_sp) %>%
addTiles() %>% # Add default OpenStreetMap map tiles
addPolygons(color = "#444444", weight = 0.5, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.8,
fillColor = poverty_scale(check_unstable("estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined")),
group = "Overall Poverty",
label = ~map(glue("{NAME.x} County<br/>
Poverty Rate: {estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_estimate}%<br/>
MOE: {estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_moe}%"), htmltools::HTML)
) %>%
addPolygons(color = "#444444", weight = 0.5, smoothFactor = 0.5,
opacity = 1.0, fillOpacity = 0.8,
fillColor = poverty_scale(check_unstable("estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_white_alone")),
group = "White Alone Poverty",
label = ~map(glue("{NAME.x} County<br/>
Poverty Rate: {estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_white_alone_estimate}%<br/>
MOE: {estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_white_alone_moe}%"), htmltools::HTML)
) %>%
addPolygons(color = "#444444", weight = 0.5, smoothFactor = 0.5,
opacity = , fillOpacity = 0.8,
fillColor = poverty_scale(check_unstable("estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_black_or_african_american_alone")),
group = "Black Alone Poverty",
label = ~map(glue("{NAME.x} County<br/>
Poverty Rate: {estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_black_or_african_american_alone_estimate}%<br/>
MOE: {estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_race_and_hispanic_or_latino_origin_black_or_african_american_alone_moe}%"), htmltools::HTML)
) %>%
addLegend("bottomright", pal = poverty_scale, values = ~estimate_percent_below_poverty_level_population_for_whom_poverty_status_is_determined_estimate,
title = "Poverty Rate",
opacity = .8,
labFormat = labelFormat(suffix = "%")
) %>%
addLayersControl(
baseGroups = c("Overall Poverty", "White Alone Poverty", "Black Alone Poverty"),
options = layersControlOptions(collapsed = FALSE)
)
m
|
library(readr)
bptms_Labour_force_by_marital_status_state <- read_csv("C:/Users/MOE/Downloads/bptms-Labour_force_by_marital_status_state.csv")
View(bptms_Labour_force_by_marital_status_state)
|
/gettingdata.R
|
no_license
|
fuadkpm1980/GettingData-1
|
R
| false
| false
| 192
|
r
|
library(readr)
bptms_Labour_force_by_marital_status_state <- read_csv("C:/Users/MOE/Downloads/bptms-Labour_force_by_marital_status_state.csv")
View(bptms_Labour_force_by_marital_status_state)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ensembling.R
\name{ensemble}
\alias{ensemble}
\alias{ensemble_forecasts}
\title{Combine (Ensemble) Casts}
\usage{
ensemble_forecasts(
main = ".",
method = "unwtavg",
forecasts_groups = NULL,
forecasts_ids = NULL,
forecast_table = NULL,
historic_end_newmoonnumber = NULL,
models = NULL,
dataset = NULL,
species = NULL
)
}
\arguments{
\item{main}{\code{character} value of the name of the main component of the directory tree.}
\item{method}{\code{character} value of the name of the ensemble method to use. Presently, only \code{"unwtavg"} (unweighted average) is allowed.}
\item{forecasts_groups}{\code{integer} (or integer \code{numeric}) value of the forecasts groups to combine with an ensemble. If \code{NULL} (default), the most recent forecast group is ensembled.}
\item{forecasts_ids}{\code{integer} (or integer \code{numeric}) values representing the forecasts of interest for restricting ensembling, as indexed within the directory in the \code{casts} sub folder. See the forecasts metadata file (\code{forecasts_metadata.csv}) for summary information.}
\item{forecast_table}{Optional \code{data.frame} of forecast table outputs. If not input, will be loaded.}
\item{historic_end_newmoonnumber}{\code{integer} (or integer \code{numeric}) newmoon number of the forecast origin. Default value is \code{NULL}, which equates to no selection.}
\item{models}{\code{character} value(s) of the name of the model to include. Default value is \code{NULL}, which equates to no selection with respect to \code{model}. \code{NULL} translates to all \code{models} in the table.}
\item{dataset}{\code{character} value of the rodent data set to include Default value is \code{NULL}, which equates to the first data set encountered.}
\item{species}{\code{character} vector of the species code(s) or \code{"total"} for the total across species) to be plotted \code{NULL} translates to the species defined by \code{\link[portalr:rodent_species]{forecasting_species}} called by \code{\link{prefab_species}}.}
}
\value{
\code{data.frame} of ensembled forecasts.
}
\description{
Combine multiple forecasts' output into a single ensemble. Presently, only a general average ensemble is available.
}
\details{
A pre-loaded table of forecasts can be input, but if not (default), the table will be efficiently (as defined by the inputs) loaded and trimmed. \cr
The forecasts can be trimmed specifically using the \code{forecasts_ids} input, otherwise, all relevant forecasts from the stated \code{forecast_groups} will be included.
}
\examples{
\dontrun{
main1 <- file.path(tempdir(), "ensemble")
setup_production(main = main1)
forecasts_ids <- select_forecasts(main = main1,
datasets = "controls",
species = "DM")$forecast_id
ensemble_forecasts(main = main1,
forecasts_ids = forecasts_ids)
unlink(main1, recursive = TRUE)
}
}
\seealso{
Core forecasting functions:
\code{\link{evaluate forecasts}},
\code{\link{portalcast}()},
\code{\link{process forecast output}}
}
\concept{core}
|
/man/ensemble.Rd
|
permissive
|
weecology/portalcasting
|
R
| false
| true
| 3,200
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ensembling.R
\name{ensemble}
\alias{ensemble}
\alias{ensemble_forecasts}
\title{Combine (Ensemble) Casts}
\usage{
ensemble_forecasts(
main = ".",
method = "unwtavg",
forecasts_groups = NULL,
forecasts_ids = NULL,
forecast_table = NULL,
historic_end_newmoonnumber = NULL,
models = NULL,
dataset = NULL,
species = NULL
)
}
\arguments{
\item{main}{\code{character} value of the name of the main component of the directory tree.}
\item{method}{\code{character} value of the name of the ensemble method to use. Presently, only \code{"unwtavg"} (unweighted average) is allowed.}
\item{forecasts_groups}{\code{integer} (or integer \code{numeric}) value of the forecasts groups to combine with an ensemble. If \code{NULL} (default), the most recent forecast group is ensembled.}
\item{forecasts_ids}{\code{integer} (or integer \code{numeric}) values representing the forecasts of interest for restricting ensembling, as indexed within the directory in the \code{casts} sub folder. See the forecasts metadata file (\code{forecasts_metadata.csv}) for summary information.}
\item{forecast_table}{Optional \code{data.frame} of forecast table outputs. If not input, will be loaded.}
\item{historic_end_newmoonnumber}{\code{integer} (or integer \code{numeric}) newmoon number of the forecast origin. Default value is \code{NULL}, which equates to no selection.}
\item{models}{\code{character} value(s) of the name of the model to include. Default value is \code{NULL}, which equates to no selection with respect to \code{model}. \code{NULL} translates to all \code{models} in the table.}
\item{dataset}{\code{character} value of the rodent data set to include Default value is \code{NULL}, which equates to the first data set encountered.}
\item{species}{\code{character} vector of the species code(s) or \code{"total"} for the total across species) to be plotted \code{NULL} translates to the species defined by \code{\link[portalr:rodent_species]{forecasting_species}} called by \code{\link{prefab_species}}.}
}
\value{
\code{data.frame} of ensembled forecasts.
}
\description{
Combine multiple forecasts' output into a single ensemble. Presently, only a general average ensemble is available.
}
\details{
A pre-loaded table of forecasts can be input, but if not (default), the table will be efficiently (as defined by the inputs) loaded and trimmed. \cr
The forecasts can be trimmed specifically using the \code{forecasts_ids} input, otherwise, all relevant forecasts from the stated \code{forecast_groups} will be included.
}
\examples{
\dontrun{
main1 <- file.path(tempdir(), "ensemble")
setup_production(main = main1)
forecasts_ids <- select_forecasts(main = main1,
datasets = "controls",
species = "DM")$forecast_id
ensemble_forecasts(main = main1,
forecasts_ids = forecasts_ids)
unlink(main1, recursive = TRUE)
}
}
\seealso{
Core forecasting functions:
\code{\link{evaluate forecasts}},
\code{\link{portalcast}()},
\code{\link{process forecast output}}
}
\concept{core}
|
setwd("~/Documents/git/DL")
# 1
# minst0_test <- read.table(file = "./minst0_test.dat")
# minst0_train <- read.table(file = "./minst0_train.dat")
# minst9_test <- read.table(file = "./minst9_test.dat")
# minst9_train <- read.table(file = "./minst9_train.dat")
# save(list = c("minst0_test", "minst0_train", "minst9_test", "minst9_train"),
# file = "./hw3q1_data.RData")
load(file = "./hw3q1_data.RData")
library(EBImage)
draw <- function(mat, main = ""){
image(t(mat)[,ncol(mat):1], axes = FALSE, col = grey(seq(0, 1, length = 256)), main=main)
}
par(mfrow = c(3, 3), oma = c(0, 0, 0, 0), mar = c(1, 1, 1, 1))
m <- matrix(data = unlist(minst9_train[1, ]), nrow = 28, byrow = TRUE)
draw(m)
#a, rotate, clockwise, 90 degree
ya <- matrix(c(0, 1, 0, 0, 0, 0, 0, 0, 0), nrow = 3)
draw(filter2(m, ya))
yb <- matrix(c(0, 0, 0, 0, 0, 0, 0, 1, 0), nrow = 3)
draw(filter2(m, yb))
yc <- matrix(c(0, 0, 0, 1, 0, 0, 0, 0, 0), nrow = 3)
draw(filter2(m, yc))
yd <- matrix(c(0, 0, 0, 0, 0, 1, 0, 0, 0), nrow = 3)
draw(filter2(m, yd))
ye1 <- matrix(c(-1, 0, 1, -2, 0, 2, -1, 0, 1), nrow = 3)
draw(filter2(m, ye1))
ye2 <- matrix(c(1, 2, 1, 0, 0, 0, -1, -2, -1), nrow = 3)
draw(filter2(m, ye2))
ye3 <- matrix(c(0, -1, 0, -1, 4, -1, 0, -1, 0), nrow = 3)
draw(filter2(m, ye3))
ye4 <- matrix(c(-1, -1, -1, -1, 8, -1, -1, -1, -1), nrow = 3)
draw(filter2(m, ye4))
# b
par(mfrow = c(2, 3))
yb_3 <- matrix(rep(1/3^2, 3^2), nrow = 3)
yb_5 <- matrix(rep(1/5^2, 5^2), nrow = 5)
yb_7 <- matrix(rep(1/7^2, 7^2), nrow = 7)
draw(filter2(m, yb_3))
draw(filter2(m, yb_5))
draw(filter2(m, yb_7))
ys_3 <- matrix(numeric(3^2), nrow = 3)
ys_3[(3+1)/2, (3+1)/2] <- 2
ys_3 <- ys_3 - yb_3
ys_5 <- matrix(numeric(5^2), nrow = 5)
ys_5[(5+1)/2, (5+1)/2] <- 2
ys_5 <- ys_5 - yb_5
ys_7 <- matrix(numeric(7^2), nrow = 7)
ys_7[(7+1)/2, (7+1)/2] <- 2
ys_7 <- ys_7 - yb_7
draw(filter2(m, ys_3))
draw(filter2(m, ys_5))
draw(filter2(m, ys_7))
# yu_3 <- matrix(runif(3^2), nrow = 3)
# yu_5 <- matrix(runif(5^2), nrow = 5)
# yu_7 <- matrix(runif(7^2), nrow = 7)
# image(filter2(m, yu_3), axes = FALSE, col = grey(seq(0, 1, length = 256)))
# image(filter2(m, yu_5), axes = FALSE, col = grey(seq(0, 1, length = 256)))
# image(filter2(m, yu_7), axes = FALSE, col = grey(seq(0, 1, length = 256)))
# c
pooling <- function(x, pool, method){
m <- nrow(x)/pool
n <- ncol(x)/pool
z <- matrix(nrow = m, ncol = n)
func <- ifelse(method == "mean", mean, max)
for (i in 1:m){
for (j in 1:n){
z[i,j] <- func(x[(pool*(i-1)+1):(pool*i),
(pool*(j-1)+1):(pool*j)])
}
}
z
}
par(mfrow = c(2, 3))
draw(m)
draw(pooling(m,4,"mean"))
draw(pooling(m,4,"max"))
draw(m)
draw(pooling(m,7,"mean"))
draw(pooling(m,7,"max"))
## d
cov_laplacian <- filter2(m, ye3)
detect_ReLU <- ifelse(cov_laplacian>0, cov_laplacian, 0)
pool_max <- pooling(detect_ReLU,4,"max")
par(mfrow = c(2, 2))
draw(m)
draw(cov_laplacian)
draw(detect_ReLU)
draw(pool_max)
## e
train_data <- rbind(cbind(minst9_train, label = 1)[1:100, ],
cbind(minst0_train, label = 0)[1:100, ])
sigmoid <- function(x) 1/(1+exp(-x))
forward_prop <- function(pars){
kern_1 <- matrix(pars[1:9], nrow = 3, ncol = 3)
kern_2 <- matrix(pars[10:18], nrow = 3, ncol = 3)
kern_3 <- matrix(pars[19:27], nrow = 3, ncol = 3)
kern_4 <- matrix(pars[28:36], nrow = 3, ncol = 3)
kern_5 <- matrix(pars[37:45], nrow = 3, ncol = 3)
alpha_0 <- matrix(pars[46:55], ncol = 10)
alpha_1 <- matrix(pars[56:255], ncol = 10)
beta_0 <- matrix(pars[256:256], ncol = 1)
beta_1 <- matrix(pars[257:266], ncol = 1)
outs <- numeric(nrow(train_data))
for (i in 1:nrow(train_data)){
temp_m <- matrix(data = unlist(train_data[i, -785]),
nrow = 28, byrow = TRUE)
cov_laplacian_1 <- filter2(temp_m, kern_1)
detect_ReLU_1 <- ifelse(cov_laplacian_1>0, cov_laplacian_1, 0)
pool_max_1 <- pooling(detect_ReLU_1,14,"max")
cov_laplacian_2 <- filter2(temp_m, kern_2)
detect_ReLU_2 <- ifelse(cov_laplacian_2>0, cov_laplacian_2, 0)
pool_max_2 <- pooling(detect_ReLU_2,14,"max")
cov_laplacian_3 <- filter2(temp_m, kern_3)
detect_ReLU_3 <- ifelse(cov_laplacian_3>0, cov_laplacian_3, 0)
pool_max_3 <- pooling(detect_ReLU_3,14,"max")
cov_laplacian_4 <- filter2(temp_m, kern_4)
detect_ReLU_4 <- ifelse(cov_laplacian_4>0, cov_laplacian_4, 0)
pool_max_4 <- pooling(detect_ReLU_4,14,"max")
cov_laplacian_5 <- filter2(temp_m, kern_5)
detect_ReLU_5 <- ifelse(cov_laplacian_5>0, cov_laplacian_5, 0)
pool_max_5 <- pooling(detect_ReLU_5,14,"max")
input <- t(as.numeric(c(pool_max_1, pool_max_2, pool_max_3, pool_max_4, pool_max_5)))
output <- (input %*% alpha_1 + alpha_0) %*% beta_1 + beta_0
outs[i] <- sigmoid(output)
}
outs
}
cost_function <- function(y, yhat){
-mean(y*log(yhat)+(1-y)*log(1-yhat))
}
# start_time <- Sys.time()
# forward_prop(pars)
# dur <- Sys.time() - start_time
INITIAL_PARTICLE <- function(){
cur_loc <- runif(266, -0.1, 0.1)
cur_velocity <- runif(266, -0.1, 0.1)
cur_pred <- forward_prop(cur_loc)
cur_cost <- cost_function(train_data$label, cur_pred)
best_loc <- cur_loc
best_cost <- cur_cost
return(list(cur_loc = cur_loc,
cur_velocity = cur_velocity,
cur_cost = cur_cost,
best_loc = best_loc,
best_cost = best_cost))
}
# start_time <- Sys.time()
# p1 <- INITIAL_PARTICLE()
# dur <- Sys.time() - start_time
# Time difference of 2.691472 secs
INITIAL_SWARM <- function(n){
swarm <- list()
for (i in 1:n){
swarm[[i]] <- INITIAL_PARTICLE()
}
return(swarm)
}
# start_time <- Sys.time()
# swarm <- INITIAL_SWARM(5)
# dur <- Sys.time() - start_time
# Time difference of 13.50203 secs
GET_GLOBAL_BEST <- function(swarm){
g_best <- swarm[[1]]
for (i in 1:length(swarm)){
if (swarm[[i]]$cur_cost < g_best$cur_cost){
g_best <- swarm[[i]]
}
}
return(g_best)
}
UPDATE_VELOCITY <- function(particle, g_best, w = 0.729, c1 = 1.49445, c2 = 1.49445, max_v){
v1 <- c1 * runif(266) * (particle$best_loc - particle$cur_loc)
v2 <- c2 * runif(266) * (g_best$best_loc - particle$cur_loc)
particle$cur_velocity <- ifelse(w * particle$cur_velocity + v1 + v2 > max_v,
max_v,
ifelse(w * particle$cur_velocity + v1 + v2 < -max_v,
-max_v,
w * particle$cur_velocity + v1 + v2))
return(particle)
}
UPDATE_LOCATION <- function(particle, bond){
particle$cur_loc <- ifelse(particle$cur_loc + particle$cur_velocity > bond[2],
bond[2],
ifelse(particle$cur_loc + particle$cur_velocity < bond[1]),
bond[1],
particle$cur_loc + particle$cur_velocity)
cur_pred <- forward_prop(particle$cur_loc)
particle$cur_cost <- cost_function(train_data$label, cur_pred)
if (particle$cur_cost < particle$best_cost){
particle$best_cost <- particle$cur_cost
particle$best_loc <- particle$cur_loc
}
return(particle)
}
SEARCH <- function(iter = 1000, size = 20, w = 0.729,
c1 = 1.49445, c2 = 1.49445, max_v = 2, bond = c(-10, 10)){
swarm <- INITIAL_SWARM(size)
g_best <- GET_GLOBAL_BEST(swarm)
for (i in 1:iter){
for (j in length(swarm)){
swarm[[j]] <- UPDATE_VELOCITY(swarm[[j]],
g_best,
w,
c1,
c2,
max_v)
swarm[[j]] <- UPDATE_LOCATION(swarm[[j]], bond)
}
g_best <- GET_GLOBAL_BEST(swarm)
return(g_best)
}
}
# start_time <- Sys.time()
# swarm <- SEARCH(10)
# dur <- Sys.time() - start_time
# Time difference of 13.50203 secs
## 2
# a
k <- 32
ising <- matrix(sample(c(0, 1), k*k, replace = TRUE), ncol = k)
par(mfrow = c(3, 2))
draw(ising, main = "initial status")
betas <- c(0.2, 0.5, 0.75, 0.9, 1.25)
for(n in 1:5){
beta <- betas[n]
ising_vec <- as.numeric(ising)
for (t in 1:2000){
for (i in 1:length(ising_vec)){
neighbor <- c()
if (i+1<=length(ising_vec)) neighbor <- c(neighbor, ising_vec[i+1])
if (i+k<=length(ising_vec)) neighbor <- c(neighbor, ising_vec[i+k])
if (i-1>0) neighbor <- c(neighbor, ising_vec[i-1])
if (i-k>0) neighbor <- c(neighbor, ising_vec[i-k])
p <- exp(beta*sum(neighbor==1))/(exp(beta*sum(neighbor==1))+exp(beta*sum(neighbor==0)))
U <- runif(1)
if (U<p)
ising_vec[i] <- 1
else
ising_vec[i] <- 0
}
}
ising_mat <- matrix(ising_vec, nrow = k)
draw(ising_mat, main = paste("beta=", beta, sep = ""))
}
# b
library(pscl)
x <- runif(10)
q <- 1/2
r <- 1/2 #rate, instead of scale, to be same definition in rigamma of package pscl
Sigma <- diag(c(100, 100))
beta0 <- rnorm(1, 0, Sigma[1, 1])
beta1 <- rnorm(1, 0, Sigma[2, 2])
sigma_2 <- rigamma(1, q, r)
betas <- matrix(NA, nrow = 100, ncol = 2)
sigma_2s <- numeric(100)
y <- beta0+beta1*x+sigma_2
X <- cbind(1, x)
for (t in 1:10){
repeat{
mu <- solve(t(X)%*%X+sigma_2*Sigma)%*%t(X)%*%y
S <- sigma_2*solve(t(X)%*%X+sigma_2*Sigma)
beta0_star <- rnorm(1, mu[1], S[1, 1])
beta1_star <- rnorm(1, mu[2], S[2, 2])
sigma_2_star <- rigamma(1, q+10/2,
r+1/2*sum((y-beta0_star-beta1_star*x)^2))
y_star <- beta0_star+beta1_star*x+sigma_2_star
if (ks.test(y, y_star)$p.value>0.5){
beta0 <- beta0_star1
beta1 <- beta1_star2
sigma_2 <- sigma_2_star
betas[t, 1] <- beta0_star
betas[t, 2] <- beta1_star
sigma_2s[t] <- sigma_2_star
break
}
}
}
# c
myst_im <- as.matrix(read.table(file = "./myst_im.dat"))
draw(myst_im)
size <- nrow(myst_im)
x <-as.numeric(matrix(sample(c(0, 1), size*size, replace = TRUE), ncol = size))
beta <- 0.5
sigma2 <- 1
q <- 1/2
r <- 1/2
tau <- 100
B <- 1
y <- as.numeric(myst_im)
start_q3_c <- Sys.time()
for (t in 1:10){
hx <- c()
hz <- c()
for (s in 1:20){
x_prime <- 1-x
for (i in 1:length(x)){
neighbor <- c()
if (i+1<=length(x)) neighbor <- c(neighbor, x[i+1])
if (i+k<=length(x)) neighbor <- c(neighbor, x[i+k])
if (i-1>0) neighbor <- c(neighbor, x[i-1])
if (i-k>0) neighbor <- c(neighbor, x[i-k])
d <- beta*sum(neighbor==x[i])+(-1/(2*sigma2)*(y[i]-x[i])^2)
d_prime <- beta*sum(neighbor==x_prime[i])+(-1/(2*sigma2)*(y[i]-x_prime[i])^2)
p <- exp(min(c(d_prime-d), 0))
U <- runif(1)
if (U<p)
x[i] <- x_prime[i]
}
}
sigma2 <- rigamma(1, q+size*size/2, (r+1/2*sum((y-x)^2)))
beta_star <- rnorm(1, beta, B)
z <- x
for (M in 1:10){
for (i in 1:length(z)){
neighbor <- c()
if (i+1<=length(z)) neighbor <- c(neighbor, z[i+1])
if (i+k<=length(z)) neighbor <- c(neighbor, z[i+k])
if (i-1>0) neighbor <- c(neighbor, z[i-1])
if (i-k>0) neighbor <- c(neighbor, z[i-k])
p <- exp(beta_star*sum(neighbor==1))/
(exp(beta_star*sum(neighbor==1))+exp(beta_star*sum(neighbor==0)))
U <- runif(1)
if (U<p)
z[i] <- 1
else
z[i] <- 0
}
}
for (i in 1:length(z)){
neighbor <- c()
if (i+1<=length(z)) neighbor <- c(neighbor, z[i+1])
if (i+k<=length(z)) neighbor <- c(neighbor, z[i+k])
if (i-1>0) neighbor <- c(neighbor, z[i-1])
if (i-k>0) neighbor <- c(neighbor, z[i-k])
hz[i] <- sum(neighbor==z[i])
}
hz <- sum(hz)
for (i in 1:length(x)){
neighbor <- c()
if (i+1<=length(x)) neighbor <- c(neighbor, x[i+1])
if (i+k<=length(z)) neighbor <- c(neighbor, x[i+k])
if (i-1>0) neighbor <- c(neighbor, x[i-1])
if (i-k>0) neighbor <- c(neighbor, x[i-k])
hx[i] <- sum(neighbor==x[i])
}
hx <- sum(hx)
if (abs(hx-hz)<1e-3*hx){
ratio <- ((dnorm(beta_star, 0, tau))/(dnorm(beta, 0, tau)))/
((dnorm(beta, beta_star, B))/(dnorm(beta_star, beta, B)))
u <- runif(1)
if (u<ratio)
beta <- beta_star
}
print(t)
}
dur_q3_c <- Sys.time()-start_q3_c
|
/hw3draft.R
|
permissive
|
lilian743/DL
|
R
| false
| false
| 14,415
|
r
|
setwd("~/Documents/git/DL")
# 1
# minst0_test <- read.table(file = "./minst0_test.dat")
# minst0_train <- read.table(file = "./minst0_train.dat")
# minst9_test <- read.table(file = "./minst9_test.dat")
# minst9_train <- read.table(file = "./minst9_train.dat")
# save(list = c("minst0_test", "minst0_train", "minst9_test", "minst9_train"),
# file = "./hw3q1_data.RData")
load(file = "./hw3q1_data.RData")
library(EBImage)
draw <- function(mat, main = ""){
image(t(mat)[,ncol(mat):1], axes = FALSE, col = grey(seq(0, 1, length = 256)), main=main)
}
par(mfrow = c(3, 3), oma = c(0, 0, 0, 0), mar = c(1, 1, 1, 1))
m <- matrix(data = unlist(minst9_train[1, ]), nrow = 28, byrow = TRUE)
draw(m)
#a, rotate, clockwise, 90 degree
ya <- matrix(c(0, 1, 0, 0, 0, 0, 0, 0, 0), nrow = 3)
draw(filter2(m, ya))
yb <- matrix(c(0, 0, 0, 0, 0, 0, 0, 1, 0), nrow = 3)
draw(filter2(m, yb))
yc <- matrix(c(0, 0, 0, 1, 0, 0, 0, 0, 0), nrow = 3)
draw(filter2(m, yc))
yd <- matrix(c(0, 0, 0, 0, 0, 1, 0, 0, 0), nrow = 3)
draw(filter2(m, yd))
ye1 <- matrix(c(-1, 0, 1, -2, 0, 2, -1, 0, 1), nrow = 3)
draw(filter2(m, ye1))
ye2 <- matrix(c(1, 2, 1, 0, 0, 0, -1, -2, -1), nrow = 3)
draw(filter2(m, ye2))
ye3 <- matrix(c(0, -1, 0, -1, 4, -1, 0, -1, 0), nrow = 3)
draw(filter2(m, ye3))
ye4 <- matrix(c(-1, -1, -1, -1, 8, -1, -1, -1, -1), nrow = 3)
draw(filter2(m, ye4))
# b
par(mfrow = c(2, 3))
yb_3 <- matrix(rep(1/3^2, 3^2), nrow = 3)
yb_5 <- matrix(rep(1/5^2, 5^2), nrow = 5)
yb_7 <- matrix(rep(1/7^2, 7^2), nrow = 7)
draw(filter2(m, yb_3))
draw(filter2(m, yb_5))
draw(filter2(m, yb_7))
ys_3 <- matrix(numeric(3^2), nrow = 3)
ys_3[(3+1)/2, (3+1)/2] <- 2
ys_3 <- ys_3 - yb_3
ys_5 <- matrix(numeric(5^2), nrow = 5)
ys_5[(5+1)/2, (5+1)/2] <- 2
ys_5 <- ys_5 - yb_5
ys_7 <- matrix(numeric(7^2), nrow = 7)
ys_7[(7+1)/2, (7+1)/2] <- 2
ys_7 <- ys_7 - yb_7
draw(filter2(m, ys_3))
draw(filter2(m, ys_5))
draw(filter2(m, ys_7))
# yu_3 <- matrix(runif(3^2), nrow = 3)
# yu_5 <- matrix(runif(5^2), nrow = 5)
# yu_7 <- matrix(runif(7^2), nrow = 7)
# image(filter2(m, yu_3), axes = FALSE, col = grey(seq(0, 1, length = 256)))
# image(filter2(m, yu_5), axes = FALSE, col = grey(seq(0, 1, length = 256)))
# image(filter2(m, yu_7), axes = FALSE, col = grey(seq(0, 1, length = 256)))
# c
pooling <- function(x, pool, method){
m <- nrow(x)/pool
n <- ncol(x)/pool
z <- matrix(nrow = m, ncol = n)
func <- ifelse(method == "mean", mean, max)
for (i in 1:m){
for (j in 1:n){
z[i,j] <- func(x[(pool*(i-1)+1):(pool*i),
(pool*(j-1)+1):(pool*j)])
}
}
z
}
par(mfrow = c(2, 3))
draw(m)
draw(pooling(m,4,"mean"))
draw(pooling(m,4,"max"))
draw(m)
draw(pooling(m,7,"mean"))
draw(pooling(m,7,"max"))
## d
cov_laplacian <- filter2(m, ye3)
detect_ReLU <- ifelse(cov_laplacian>0, cov_laplacian, 0)
pool_max <- pooling(detect_ReLU,4,"max")
par(mfrow = c(2, 2))
draw(m)
draw(cov_laplacian)
draw(detect_ReLU)
draw(pool_max)
## e
train_data <- rbind(cbind(minst9_train, label = 1)[1:100, ],
cbind(minst0_train, label = 0)[1:100, ])
sigmoid <- function(x) 1/(1+exp(-x))
forward_prop <- function(pars){
kern_1 <- matrix(pars[1:9], nrow = 3, ncol = 3)
kern_2 <- matrix(pars[10:18], nrow = 3, ncol = 3)
kern_3 <- matrix(pars[19:27], nrow = 3, ncol = 3)
kern_4 <- matrix(pars[28:36], nrow = 3, ncol = 3)
kern_5 <- matrix(pars[37:45], nrow = 3, ncol = 3)
alpha_0 <- matrix(pars[46:55], ncol = 10)
alpha_1 <- matrix(pars[56:255], ncol = 10)
beta_0 <- matrix(pars[256:256], ncol = 1)
beta_1 <- matrix(pars[257:266], ncol = 1)
outs <- numeric(nrow(train_data))
for (i in 1:nrow(train_data)){
temp_m <- matrix(data = unlist(train_data[i, -785]),
nrow = 28, byrow = TRUE)
cov_laplacian_1 <- filter2(temp_m, kern_1)
detect_ReLU_1 <- ifelse(cov_laplacian_1>0, cov_laplacian_1, 0)
pool_max_1 <- pooling(detect_ReLU_1,14,"max")
cov_laplacian_2 <- filter2(temp_m, kern_2)
detect_ReLU_2 <- ifelse(cov_laplacian_2>0, cov_laplacian_2, 0)
pool_max_2 <- pooling(detect_ReLU_2,14,"max")
cov_laplacian_3 <- filter2(temp_m, kern_3)
detect_ReLU_3 <- ifelse(cov_laplacian_3>0, cov_laplacian_3, 0)
pool_max_3 <- pooling(detect_ReLU_3,14,"max")
cov_laplacian_4 <- filter2(temp_m, kern_4)
detect_ReLU_4 <- ifelse(cov_laplacian_4>0, cov_laplacian_4, 0)
pool_max_4 <- pooling(detect_ReLU_4,14,"max")
cov_laplacian_5 <- filter2(temp_m, kern_5)
detect_ReLU_5 <- ifelse(cov_laplacian_5>0, cov_laplacian_5, 0)
pool_max_5 <- pooling(detect_ReLU_5,14,"max")
input <- t(as.numeric(c(pool_max_1, pool_max_2, pool_max_3, pool_max_4, pool_max_5)))
output <- (input %*% alpha_1 + alpha_0) %*% beta_1 + beta_0
outs[i] <- sigmoid(output)
}
outs
}
cost_function <- function(y, yhat){
-mean(y*log(yhat)+(1-y)*log(1-yhat))
}
# start_time <- Sys.time()
# forward_prop(pars)
# dur <- Sys.time() - start_time
INITIAL_PARTICLE <- function(){
cur_loc <- runif(266, -0.1, 0.1)
cur_velocity <- runif(266, -0.1, 0.1)
cur_pred <- forward_prop(cur_loc)
cur_cost <- cost_function(train_data$label, cur_pred)
best_loc <- cur_loc
best_cost <- cur_cost
return(list(cur_loc = cur_loc,
cur_velocity = cur_velocity,
cur_cost = cur_cost,
best_loc = best_loc,
best_cost = best_cost))
}
# start_time <- Sys.time()
# p1 <- INITIAL_PARTICLE()
# dur <- Sys.time() - start_time
# Time difference of 2.691472 secs
INITIAL_SWARM <- function(n){
swarm <- list()
for (i in 1:n){
swarm[[i]] <- INITIAL_PARTICLE()
}
return(swarm)
}
# start_time <- Sys.time()
# swarm <- INITIAL_SWARM(5)
# dur <- Sys.time() - start_time
# Time difference of 13.50203 secs
GET_GLOBAL_BEST <- function(swarm){
g_best <- swarm[[1]]
for (i in 1:length(swarm)){
if (swarm[[i]]$cur_cost < g_best$cur_cost){
g_best <- swarm[[i]]
}
}
return(g_best)
}
UPDATE_VELOCITY <- function(particle, g_best, w = 0.729, c1 = 1.49445, c2 = 1.49445, max_v){
v1 <- c1 * runif(266) * (particle$best_loc - particle$cur_loc)
v2 <- c2 * runif(266) * (g_best$best_loc - particle$cur_loc)
particle$cur_velocity <- ifelse(w * particle$cur_velocity + v1 + v2 > max_v,
max_v,
ifelse(w * particle$cur_velocity + v1 + v2 < -max_v,
-max_v,
w * particle$cur_velocity + v1 + v2))
return(particle)
}
UPDATE_LOCATION <- function(particle, bond){
particle$cur_loc <- ifelse(particle$cur_loc + particle$cur_velocity > bond[2],
bond[2],
ifelse(particle$cur_loc + particle$cur_velocity < bond[1]),
bond[1],
particle$cur_loc + particle$cur_velocity)
cur_pred <- forward_prop(particle$cur_loc)
particle$cur_cost <- cost_function(train_data$label, cur_pred)
if (particle$cur_cost < particle$best_cost){
particle$best_cost <- particle$cur_cost
particle$best_loc <- particle$cur_loc
}
return(particle)
}
SEARCH <- function(iter = 1000, size = 20, w = 0.729,
c1 = 1.49445, c2 = 1.49445, max_v = 2, bond = c(-10, 10)){
swarm <- INITIAL_SWARM(size)
g_best <- GET_GLOBAL_BEST(swarm)
for (i in 1:iter){
for (j in length(swarm)){
swarm[[j]] <- UPDATE_VELOCITY(swarm[[j]],
g_best,
w,
c1,
c2,
max_v)
swarm[[j]] <- UPDATE_LOCATION(swarm[[j]], bond)
}
g_best <- GET_GLOBAL_BEST(swarm)
return(g_best)
}
}
# start_time <- Sys.time()
# swarm <- SEARCH(10)
# dur <- Sys.time() - start_time
# Time difference of 13.50203 secs
## 2
# a
k <- 32
ising <- matrix(sample(c(0, 1), k*k, replace = TRUE), ncol = k)
par(mfrow = c(3, 2))
draw(ising, main = "initial status")
betas <- c(0.2, 0.5, 0.75, 0.9, 1.25)
for(n in 1:5){
beta <- betas[n]
ising_vec <- as.numeric(ising)
for (t in 1:2000){
for (i in 1:length(ising_vec)){
neighbor <- c()
if (i+1<=length(ising_vec)) neighbor <- c(neighbor, ising_vec[i+1])
if (i+k<=length(ising_vec)) neighbor <- c(neighbor, ising_vec[i+k])
if (i-1>0) neighbor <- c(neighbor, ising_vec[i-1])
if (i-k>0) neighbor <- c(neighbor, ising_vec[i-k])
p <- exp(beta*sum(neighbor==1))/(exp(beta*sum(neighbor==1))+exp(beta*sum(neighbor==0)))
U <- runif(1)
if (U<p)
ising_vec[i] <- 1
else
ising_vec[i] <- 0
}
}
ising_mat <- matrix(ising_vec, nrow = k)
draw(ising_mat, main = paste("beta=", beta, sep = ""))
}
# b
library(pscl)
x <- runif(10)
q <- 1/2
r <- 1/2 #rate, instead of scale, to be same definition in rigamma of package pscl
Sigma <- diag(c(100, 100))
beta0 <- rnorm(1, 0, Sigma[1, 1])
beta1 <- rnorm(1, 0, Sigma[2, 2])
sigma_2 <- rigamma(1, q, r)
betas <- matrix(NA, nrow = 100, ncol = 2)
sigma_2s <- numeric(100)
y <- beta0+beta1*x+sigma_2
X <- cbind(1, x)
for (t in 1:10){
repeat{
mu <- solve(t(X)%*%X+sigma_2*Sigma)%*%t(X)%*%y
S <- sigma_2*solve(t(X)%*%X+sigma_2*Sigma)
beta0_star <- rnorm(1, mu[1], S[1, 1])
beta1_star <- rnorm(1, mu[2], S[2, 2])
sigma_2_star <- rigamma(1, q+10/2,
r+1/2*sum((y-beta0_star-beta1_star*x)^2))
y_star <- beta0_star+beta1_star*x+sigma_2_star
if (ks.test(y, y_star)$p.value>0.5){
beta0 <- beta0_star1
beta1 <- beta1_star2
sigma_2 <- sigma_2_star
betas[t, 1] <- beta0_star
betas[t, 2] <- beta1_star
sigma_2s[t] <- sigma_2_star
break
}
}
}
# c
myst_im <- as.matrix(read.table(file = "./myst_im.dat"))
draw(myst_im)
size <- nrow(myst_im)
x <-as.numeric(matrix(sample(c(0, 1), size*size, replace = TRUE), ncol = size))
beta <- 0.5
sigma2 <- 1
q <- 1/2
r <- 1/2
tau <- 100
B <- 1
y <- as.numeric(myst_im)
start_q3_c <- Sys.time()
for (t in 1:10){
hx <- c()
hz <- c()
for (s in 1:20){
x_prime <- 1-x
for (i in 1:length(x)){
neighbor <- c()
if (i+1<=length(x)) neighbor <- c(neighbor, x[i+1])
if (i+k<=length(x)) neighbor <- c(neighbor, x[i+k])
if (i-1>0) neighbor <- c(neighbor, x[i-1])
if (i-k>0) neighbor <- c(neighbor, x[i-k])
d <- beta*sum(neighbor==x[i])+(-1/(2*sigma2)*(y[i]-x[i])^2)
d_prime <- beta*sum(neighbor==x_prime[i])+(-1/(2*sigma2)*(y[i]-x_prime[i])^2)
p <- exp(min(c(d_prime-d), 0))
U <- runif(1)
if (U<p)
x[i] <- x_prime[i]
}
}
sigma2 <- rigamma(1, q+size*size/2, (r+1/2*sum((y-x)^2)))
beta_star <- rnorm(1, beta, B)
z <- x
for (M in 1:10){
for (i in 1:length(z)){
neighbor <- c()
if (i+1<=length(z)) neighbor <- c(neighbor, z[i+1])
if (i+k<=length(z)) neighbor <- c(neighbor, z[i+k])
if (i-1>0) neighbor <- c(neighbor, z[i-1])
if (i-k>0) neighbor <- c(neighbor, z[i-k])
p <- exp(beta_star*sum(neighbor==1))/
(exp(beta_star*sum(neighbor==1))+exp(beta_star*sum(neighbor==0)))
U <- runif(1)
if (U<p)
z[i] <- 1
else
z[i] <- 0
}
}
for (i in 1:length(z)){
neighbor <- c()
if (i+1<=length(z)) neighbor <- c(neighbor, z[i+1])
if (i+k<=length(z)) neighbor <- c(neighbor, z[i+k])
if (i-1>0) neighbor <- c(neighbor, z[i-1])
if (i-k>0) neighbor <- c(neighbor, z[i-k])
hz[i] <- sum(neighbor==z[i])
}
hz <- sum(hz)
for (i in 1:length(x)){
neighbor <- c()
if (i+1<=length(x)) neighbor <- c(neighbor, x[i+1])
if (i+k<=length(z)) neighbor <- c(neighbor, x[i+k])
if (i-1>0) neighbor <- c(neighbor, x[i-1])
if (i-k>0) neighbor <- c(neighbor, x[i-k])
hx[i] <- sum(neighbor==x[i])
}
hx <- sum(hx)
if (abs(hx-hz)<1e-3*hx){
ratio <- ((dnorm(beta_star, 0, tau))/(dnorm(beta, 0, tau)))/
((dnorm(beta, beta_star, B))/(dnorm(beta_star, beta, B)))
u <- runif(1)
if (u<ratio)
beta <- beta_star
}
print(t)
}
dur_q3_c <- Sys.time()-start_q3_c
|
## You must check the 'check' parameter
mainAgeing = function(file = 'http://ves-ebi-d0:8090/mi/impc/dev/solr/experiment/select?q=*%3A*&fq=procedure_stable_id%3AIMPC_ECG_002&rows=590000&wt=csv&indent=true' ,
sep = ',' ,
na.strings = 'NA' ,
normalisedPhenlist = FALSE ,
subdir = 'Results' ,
seed = 123456 ,
readCategoriesFromFile = TRUE ,
OverwriteExistingFiles = FALSE ,
onlyFillNotExisitingResults = FALSE ,
WhiteListMethods = NULL ,
# Carefully use this option!
# It can remove the entire result file (some colonies in a single output file)
# Only for multicore
activateMulticore = TRUE ,
coreRatio = 5 / 14 ,
concurrentControlSelect = FALSE ,
MultiCoreErrorHandling = 'pass' ,
inorder = FALSE ,
verbose = TRUE ,
# Only for simulations
simulation = FALSE ,
Simulation.iteration = 1 ,
# Only for windowing
activeWindowing = TRUE ,
sensitivity = c(1, 1, 1, 0) ,
pvalThreshold = c(0, 0, 0, 0) ,
check = 2 ,
direction = c(1, 1) ,
weightORthreshold = 'weight' ,
predFunction = function(m) {
predict(m)
} ,
residFunction = function(m) {
resid(m)
} ,
messages = TRUE ,
threshold = sqrt(.Machine$double.eps) * 10,
outdelim = '\t' ,
debug = TRUE ,
encode = FALSE ,
noSpaceAllowed = TRUE ,
plotWindowing = TRUE ,
storeplot = TRUE ,
virtualDrive = FALSE ,
checkNamesForMissingColNames = TRUE ,
# Raw data
storeRawData = TRUE ,
compressRawData = TRUE ,
# Only for Batch generator
BatchProducer = FALSE ,
cpu = 4 ,
memory = 9000 ,
nMax = 10000 ,
ChunkSize = 24 ,
MinColoniesInChunks = 32 ,
controlSize = 1500 ,
### Just for debuging
superDebug = FALSE ,
extraBatchParameters = '-m "rh7-hosts-ebi5-12 rh7-hosts-ebi5-13 rh7-hosts-ebi5-14 rh7-hosts-ebi5-15 rh7-hosts-ebi5-16 rh7-hosts-ebi5-17 rh7-hosts-ebi5-18 rh7-hosts-ebi5-19 rh7-hosts-ebi5-20 rh7-hosts-ebi5-24 rh7-hosts-ebi5-25 rh7-hosts-ebi5-26 rh7-hosts-ebi5-27 rh7-hosts-ebi6-00 rh7-hosts-ebi6-01 rh7-hosts-ebi6-02 rh7-hosts-ebi6-03 rh7-hosts-ebi6-04 rh7-hosts-ebi6-05 rh7-hosts-ebi6-06 rh7-hosts-ebi6-07 rh7-hosts-ebi6-08 rh7-hosts-ebi6-09 rh7-hosts-ebi6-10 rh7-hosts-ebi6-11 rh7-hosts-ebi6-12 rh7-hosts-ebi6-13 rh7-hosts-ebi6-14 rh7-hosts-ebi6-15 rh7-hosts-ebi6-16 rh7-hosts-ebi6-17"',
...) {
message0('DRrequiredAgeing loaded')
message0(
Sys.time(),
' ############################################################\n',
Sys.time(),
' % Make sure ExceptionList and CategoryList are updated! %\n',
Sys.time(),
' % Please check SENSITIVITY for the windowing algorithm %\n',
Sys.time(),
' % Sensitivy:',
sensitivity,
' \n',
Sys.time(),
' ############################################################',
ShowTimeTag = FALSE
)
message0('Process started ...')
message0('Machine info: ', paste(Sys.info(), collapse = ', '))
message0('Loading dependent packages ...')
requireNamespace('PhenStat')
requireNamespace('PhenStatAgeing' )
requireNamespace('doParallel')
requireNamespace('parallel')
requireNamespace('foreach')
requireNamespace('SmoothWin')
requireNamespace('nlme')
requireNamespace('base64enc')
requireNamespace('RJSONIO' )
requireNamespace('jsonlite' )
# Config files
message0('Loading configuration ...')
methodmap = readConf('MethodMap.conf')
equationmap = readConf('EquationMap.conf')
CategoryMap = readConf('CategoryMap.conf')
initial = readConf('Initialize.conf')
exceptionList = readFile(file = 'ExceptionMap.list')
#CategoricalCategoryBlackList = readFile(file = 'CategoricalCategoryBlackList.list')
# Main subdirectory/working directory
message0('Preparing the working directory ...')
cwd = getwd()
wd = file.path(cwd,
paste(subdir, sep = '_', collapse = '_'))
dir.create0(wd, recursive = TRUE)
if (virtualDrive) {
message0('Creating a virtual drive ... ')
system('subst U: /D', wait = TRUE)
system(paste0('subst U: "', wd, '"'), wait = TRUE)
wd = 'U:'
}
message0('Setting the working directory to: \n\t\t ===> ', wd)
setwd(dir = wd)
##################
set.seed(seed)
# Read file
message0('Reading the input file ...\n\t ~>', file)
if (!file.exists(file))
message0('File is not local or does not exist!')
rdata = read.csv(
file = file ,
check.names = checkNamesForMissingColNames,
sep = sep ,
na.strings = na.strings ,
stringsAsFactors = TRUE
)
#### Temporary for ageing pipeline only
# rdataEarly = read.csv(
# file = gsub(
# pattern = 'LA_',
# replacement = '_',
# gsub(
# pattern = 'http://ves-ebi-d1.ebi.ac.uk:8988',
# replacement = 'http://ves-ebi-d0.ebi.ac.uk:8986',
# x = file
# )
# ),
# check.names = checkNamesForMissingColNames,
# sep = sep ,
# na.strings = na.strings ,
# stringsAsFactors = TRUE
# )
# com_cols = intersect(colnames(rdata), colnames(rdataEarly))
# rdata = rbind(rdata[, com_cols], rdataEarly[, com_cols])
message0('Input file dimentions: ',
paste0(dim(rdata), collapse = ', '))
rdata = rdata[!is.na(rdata$phenotyping_center), ] # Just to remove NA centers
new.data = rdata
new.data = new.data[order(Date2Integer(new.data$date_of_experiment)), ]
#########
new.data$colony_id = as.character(new.data$colony_id)
new.data$colony_id[new.data$biological_sample_group %in% "control"] = NA
new.data$external_sample_id = as.factor(new.data$external_sample_id)
################
# Start analysis
################
# Initializing cores
message0('Initialising cores ...')
crs = cores0(coreRatio = coreRatio, activate = activateMulticore)
closeAllConnections()
registerDoSEQ()
message0('The detected OS: ', .Platform$OS.type)
if (.Platform$OS.type == 'windows') {
cl = makeCluster(crs,
outfile = outMCoreLog(wd))
} else{
cl = makeForkCluster(crs,
outfile = outMCoreLog(wd))
}
registerDoParallel(cl, cores = crs)
# End of multicore initialization
# Get possible categories for the categorical variables
message0('Loading the list of possible categories for categorical variables ...')
CatList = GetPossibleCategories (procedure = NULL, file = readCategoriesFromFile)
message0('Filtering the dataset in progress ....')
Strtime = Sys.time()
procedures = as.character(unique(na.omit(new.data$procedure_group)))
for (procedure in procedures) {
###
n2.9 = base::subset(new.data, new.data$procedure_group %in% procedures)
parameters = as.character(unique(na.omit(n2.9$parameter_stable_id)))
for (parameter in parameters) {
FactorLevels = ReadFactorLevelsFromSolr(parameter = parameter, CatList = CatList)
### counter starts here ....
counter = 1
outP = list()
n3.0 = base::subset(n2.9, n2.9$parameter_stable_id %in% parameter)
centers = as.character(unique(na.omit(n3.0$phenotyping_center)))
for (center in centers) {
n3.1 = base::subset(n3.0, n3.0$phenotyping_center %in% center)
strains = as.character(unique(na.omit(n3.1$strain_accession_id)))
for (strain in strains) {
n3.2 = base::subset(n3.1, n3.1$strain_accession_id %in% strain)
metas = as.character(unique(na.omit(n3.2$metadata_group)))
for (meta in metas) {
n3.3 = base::subset(n3.2, n3.2$metadata_group %in% meta)
n3.3.c = base::subset(n3.3, n3.3$biological_sample_group %in% 'control')
n3.3.m = base::subset(n3.3, !(n3.3$biological_sample_group %in% 'control'))
zygositys = as.character(unique(na.omit(n3.3.m$zygosity)))
for (zyg in zygositys) {
n3.3.m_zyg = base::subset(n3.3.m, n3.3.m$zygosity %in% zyg)
colonys = as.character(unique(na.omit(n3.3.m_zyg$colony_id)))
nColonies = length(colonys)
if (BatchProducer && nColonies > 0) {
#nMax = 10000
ChunkSizeFromNumbers = ((nrow(n3.3.c) < nMax) * max(1, round(nColonies /
ChunkSize)) +
(nrow(n3.3.c) >= nMax) * nColonies)
minCol = ((nrow(n3.3.c) < nMax) * MinColoniesInChunks + (nrow(n3.3.c) >=
nMax) * 1)
ColonyChunks = chunkVector(
x = colonys,
n = ChunkSizeFromNumbers,
min = minCol,
activate = (nColonies >= MinColoniesInChunks) &&
(nrow(n3.3.c) >= controlSize)
)
outpDir = file.path0(
wd,
paste0(Sys.Date(), '_', subdir, '_RawData/'),
check = FALSE,
create = TRUE,
IncludedFileName = FALSE
)
SubSubDirOrdFileName = RemoveSpecialChars(paste(
Sys.Date() ,
#RandomRegardSeed() ,
#procedure ,
parameter ,
center ,
zyg ,
strain ,
meta ,
collapse = '_'
))
outpfile = file.path0(
outpDir,
SubSubDirOrdFileName,
check = FALSE,
create = TRUE,
IncludedFileName = TRUE
)
mess = paste0(
Sys.time(),
'. Processed file: ',
SubSubDirOrdFileName,
'. #Colonies = ',
nColonies,
', #Controls = ',
nrow(n3.3.c),
', Chunks = ',
length(ColonyChunks)
)
message0(mess, ShowTimeTag = FALSE)
write(
x = mess,
file = paste0(
Sys.Date(),
'_',
subdir,
'_DataGenerationLog.log'
),
10 ^ 5,
append = TRUE
)
counter = 1
for (ChunkedColonies in ColonyChunks) {
BatchData = rbind (subset(n3.3.m_zyg, n3.3.m_zyg$colony_id %in% ChunkedColonies),
n3.3.c)
BatchFileName = file.exists0(
paste0(
outpfile,
'_C',
length(ColonyChunks),
'_',
RandomRegardSeed(),
'_',
counter,
'.csv'
),
overwrite = OverwriteExistingFiles
)
if (all(dim(BatchData) > 0)) {
write.csv(BatchData,
file = BatchFileName,
row.names = FALSE)
out = BatchGenerator(
file = BatchFileName ,
dir = outpDir ,
procedure = procedure ,
parameter = parameter ,
center = center ,
cpu = cpu ,
memory = memory ,
extraBatchParameters = extraBatchParameters
)
write(
x = out,
file = paste0(outpDir, '/', subdir, '_Batch.bch'),
ncolumns = 10 ^ 5,
append = TRUE
)
counter = counter + 1
}
rm0(c('BatchData', 'BatchFileName'), silent = TRUE)
}
} else{
message0(
' [',
paste(
procedure,
parameter,
center ,
strain ,
meta ,
zyg ,
length(colonys),
sep = ']~>['
),
']\n'
)
### Single or multiple cores?
`%activemulticore%` = ifelse (activateMulticore &&
!BatchProducer,
`%dopar%`,
`%do%`)
if (activateMulticore &&
!BatchProducer) {
message0('Multicore processing in progress ...')
} else{
message0('Single core processing in progress ...')
}
i = 1
MultiCoreRes = foreach::foreach (
i = 1:length(colonys),
.packages = c(
'PhenStat' ,
'SmoothWin' ,
'base64enc' ,
'nlme' ,
'RJSONIO' ,
'jsonlite' ,
'PhenStatAgeing',
'DRrequiredAgeing'
),
.errorhandling = c(MultiCoreErrorHandling),
.verbose = verbose ,
.inorder = inorder
) %activemulticore% {
#for (i in 1:length(colonys)){
message0('*~*~*~*~*~* ', i, '|', length(colonys), ' *~*~*~*~*~*')
for (sim.index in 1:ifelse(simulation, Simulation.iteration, 1)) {
# Removing the old objects if exist
ObjectsThatMustBeRemovedInEachIteration()
# Initialization before starting the analysis
note = list()
colony = colonys[i]
message0('Current colony: ',colony)
n3.4 = base::subset(n3.3.m_zyg, n3.3.m_zyg$colony_id %in% c(colony))
n3.5 = rbind (n3.4, n3.3.c)
note = c(note,
list(
bodyweight_included_in_data = CheckIfNameExistInDataFrame(obj = n3.5,
name = 'weight',
checkLevels = FALSE)
))
# Imaginary URLs
note$gene_page_url = GenePageURL = GenePageURL(n3.5)
note$bodyweight_page_url = BodyWeightCurvURL = BodyWeightCurvURL(n3.5)
ReadMeTxt = ReadMe (obj = n3.4, URL = GenePageURL)
# Define response column [do not move me!]
depVariable = getResponseColumn(n3.5$observation_type)
depVar = depVariable$column
message0('Dependent variable: ', depVar)
note$response_type = paste0(depVar,
'_of_type_',
paste(depVariable$lbl, sep = '.'))
note$observation_type =
if (!is.null(unique(n3.5$observation_type))) {
paste(unique(n3.5$observation_type),
sep = '~',
collapse = '~')
} else{
NULL
}
note$data_type =
if (!is.null(unique(n3.5$data_type))) {
paste(unique(n3.5$data_type),
sep = '~',
collapse = '~')
} else{
NULL
}
minSampRequired = ifelse(
is.ABR(x = parameter),
as.numeric(initial$min_ABR_mut_each_sex),
ifelse(
is.numeric(n3.5[, depVar]),
as.numeric(initial$min_num_mut_each_sex),
2
)
)
# add missing levels to categorical variables
if (!is.numeric(n3.5[, depVar])) {
AllLevels = mapLevelsToFactor(levels = levels(n3.5[, depVar]),
newlevels = FactorLevels$levels)
levels(n3.5[, depVar]) = AllLevels$levels
#####
note = c(note,
FactorLevels$note,
AllLevels$note)
#####
SexGenResLevels = min(2 * 2 * length(AllLevels$levels), 4)
} else{
SexGenResLevels = 4
}
if (!depVariable$accepted)
return('Not a proper dataset!')
if (simulation && is.numeric(n3.5[, depVar])) {
message0('Simulation in progress ... Round ',
sim.index)
n3.5_tmp = mimicControls(
df = n3.5,
removeMutants = (sim.index == 1) ,
ArtifLabel = 'experimental' ,
mutLabel = 'experimental' ,
baselines = 'control' ,
neutralise = TRUE ,
resample = (sim.index != 1) ,
depVariable = depVar ,
sex = 'sex' ,
minSampRequired = minSampRequired,
SexGenResLevels = SexGenResLevels,
indicator = sim.index ,
plot = superDebug
)
n3.5 = n3.5_tmp$df
note = list(note , simulation_details = n3.5_tmp$note)
}
# Summary statistics
n3.5_summary = SummaryStatisticsOriginal(x = n3.5, depVar = depVar)
note = c(note, n3.5_summary)
# Remove zero frequency categories
n3.5.1_F_list = RemoveZeroFrequencyCategories(
x = n3.5,
minSampRequired = minSampRequired,
depVar = depVar,
totalLevels = SexGenResLevels
)
n3.5.1 = n3.5.1_F_list$x
note = c(note, n3.5.1_F_list$note)
# Remove var categories
n3.5.1_v_list = RemoveZerovarCategories(
x = n3.5.1,
depVar = depVar,
minvar = 0,
method = getMethodi(
var = parameter,
type = ifelse(
is.numeric(n3.5.1[, depVar]),
'numeric',
'charachter'
),
methodMap = methodmap
)
)
n3.5.1 = n3.5.1_v_list$x
note = c(note, n3.5.1_v_list$note)
OrgSpecIds = OtherExtraColumns(
obj = n3.5,
ColNames = c(
'external_sample_id',
'sex',
'biological_sample_group',
depVar,
'date_of_experiment',
'weight'
),
names = c(
# all lower case
'original_external_sample_id',
'original_sex',
'original_biological_sample_group',
'original_response',
'original_date_of_experiment',
'original_body_weight'
)
)
note = c(note, OrgSpecIds)
message0('Creating output directory and file name ...')
SubSubDirOrdFileName = file.path0(
RemoveSpecialChars(center) ,
RemoveSpecialChars(procedure) ,
RemoveSpecialChars(parameter) ,
RemoveSpecialChars(colony) ,
RemoveSpecialChars(zyg) ,
RemoveSpecialChars(meta) ,
create = FALSE,
check = noSpaceAllowed
)
FileName = 'output'
outDir = file.path0(
wd,
SubSubDirOrdFileName,
create = TRUE,
check = FALSE,
IncludedFileName = FALSE
)
outpfile = outpfile2 = paste0(outDir,
'/',
FileName,
collapse = '')
message0('Output directory: \n \t\t =>=>=> ',
outpfile)
if (onlyFillNotExisitingResults) {
if (any(file.exists(paste(
outpfile, c('NotProcessed.tsv', 'Successful.tsv'), sep = '_'
)))) {
message0('File already exists then skipt!')
return(NULL)
} else{
message0('Result does not exist! Adding in progress ...')
rmme = lapply(list.files(dirname(outpfile), full.names = TRUE), function(x) {
if (!is.null(x) &&
file.exists(x) &&
(
grepl(
pattern = '.Rdata',
x = x,
fixed = TRUE
) ||
grepl(
pattern = 'Failed_critical_error',
x = x,
fixed = TRUE
)
)
)
file.remove(x)
})
write(outpfile, file = 'DoesNotExists.log', append = TRUE)
}
}
####
if (storeRawData) {
# There is a second snippet for the rawdata + weights
RawoutputFile = RawoutputFile0 = file.exists0(paste(outpfile2,
'rawData.csv',
sep = '_'),
overwrite = OverwriteExistingFiles)
ReadMeFile = file.exists0(file.path(dirname(RawoutputFile), 'ReadMe.txt'))
message0(
'writting the raw data file to disk ... \n \t\t ===> ',
RawoutputFile
)
write.csv(
x = n3.5 ,
row.names = FALSE ,
file = RawoutputFile
)
write(
x = ReadMeTxt ,
file = ReadMeFile,
ncolumns = 1
)
if (compressRawData) {
comRes = compressFiles(
fileList = c(ReadMeFile , RawoutputFile),
dir = dirname (RawoutputFile) ,
filename = basename(RawoutputFile) ,
overwrite = OverwriteExistingFiles
)
if (comRes$status == 0)
message0('Compression successful')
else
message0('Compression failed')
RawoutputFile0 = comRes$file
}
}
note$input_file = relativePath(path = file, reference = wd)
note$output_raw_data_file = relativePath(path = if (storeRawData) {
RawoutputFile0
} else{
NULL
},
reference = wd)
note$read_me_file = relativePath(path = if (storeRawData &&
!compressRawData) {
ReadMeFile
} else{
NULL
},
reference = wd)
###'
isException = IsInList(
item = c(parameter, procedure),
list = exceptionList,
message = 'Value found in the skipt list'
)
n3.5.2 = n3.5.1
MergLev = MergeLevels(x = n3.5.2[, depVar],
listOfLevelMaps = CategoryMap)
n3.5.2[, depVar] = MergLev$x
n3.5.2 = droplevels0(n3.5.2[!is.na(n3.5.2[, depVar]),])
n3.5.2OnlyKO = subset(n3.5.2,n3.5.2$biological_sample_group %in% 'experimental')
note$relabeled_levels_categorical_variables_only = MergLev$note
if (!is.null(n3.5.2) &&
# data.frame is not zero
min0(dim(n3.5.2)) > 0 &&
# is it really exist!
length(unique(n3.5.2$biological_sample_group)) > 1 &&
# include mut and cont
min0(table(n3.5.2$biological_sample_group)) > minSampRequired &&
max0(table(n3.5.2OnlyKO$biological_sample_group, n3.5.2OnlyKO$sex)) > 1 &&
# include at least 4/2 of each genotype
#length(unique(n3.5.2$colony_id)) > 1 &&
length(RepBlank(
unique(n3.5.2$colony_id),
match = c('', NA, 'NA')
)) > 1 &&
# include 2 colonies (cont & mut)
checkGenInCol(n3.5.2) &&
# each sex and genotype
depVariable$accepted &&
length(na.omit(n3.5.2[, depVar])) > 0 &&
# response is not empty!
# there must be variation in data
NonZeroVariation(n3.5.2[, depVar]) &&
!isException &&
columnLevelsVariationRadio(dataset = n3.5.2, columnName = depVar) > 0.005 &&
RR_thresholdCheck(data = n3.5.2,depVar = depVar,parameter = parameter,methodmap = methodmap)$criteria_result
) {
message0('Analysing the dataset in progress ...')
message0('Creating PhenList object ...')
a = PhenStat::PhenList(
n3.5.2,
testGenotype = 'experimental',
refGenotype = 'control',
dataset.colname.genotype = 'biological_sample_group',
dataset.colname.sex = 'sex',
dataset.colname.weight = 'weight',
dataset.colname.batch = 'date_of_experiment'
)
a_summary_before_concurrent = SummaryStatisticsOriginal(
x = a@datasetPL,
depVar = depVar,
sex = 'Sex',
genotype = 'Genotype',
label = 'phenlist_data_summary_statistics'
)
note = c(note, a_summary_before_concurrent)
#
PhenListSpecIds = OtherExtraColumns (
obj = a@datasetPL,
ColNames = 'external_sample_id',
names = 'phenlist_data_spec_ids'
)
note = c(note, PhenListSpecIds)
### Get method of analysis
method = getMethodi(
var = parameter,
type = ifelse(
is.numeric(a@datasetPL[, depVar]),
'numeric',
'charachter'
),
methodMap = methodmap
)
# WhiteList methods
if (!is.null(WhiteListMethods) &&
!(method %in% WhiteListMethods)) {
message0('Black list applied. Method = ', method)
return(FALSE)
}
#### Check for concurrent control selection
aTmp = concurrentContSelect(
activate = concurrentControlSelect &&
(method %in% 'MM') && !activeWindowing,
PhenListObj = a,
depVar = depVar,
minSampRequired = minSampRequired
)
a = aTmp$obj
note = c(note, aTmp$note)
#### END OF concurrent
# Just before analysing data (because cuncurrent sampling)
if (concurrentControlSelect &&
(method %in% 'MM') && !activeWindowing) {
a_phenlist_concurrent_summary = SummaryStatisticsOriginal(
x = a@datasetPL,
depVar = depVar,
sex = 'Sex',
genotype = 'Genotype',
label = 'phenlist_and_cuncurrent_data_summary_statistics'
)
note = c(note, a_phenlist_concurrent_summary)
}
# check the Weight column
message0('Checking whether Weight column exists in the raw data ...')
if (!CheckIfNameExistInDataFrame(a@datasetPL, 'Weight')) {
note$existence_of_weight_column =
'Weight column does not exist in the raw data'
}
# Equation type
equationType = ifelse(
CheckIfNameExistInDataFrame(a@datasetPL, 'Weight'),
getEquation(var = parameter,
equationMap = equationmap),
'withoutWeight'
)
# This is the main engine!
note = c(note, list(
bodyweight_initially_included_in_model = ifelse(method %in% 'MM', equationType, FALSE)
))
if (normalisedPhenlist){
a = normalisePhenList(phenlist = a, colnames = c(depVar, 'Weight'))
}
message0('Fitting the model ...')
message0('Method: ', method, '\n\t Equation:', equationType)
c.ww0 = PhenStatWindow(
phenlistObject = a,
parameter = parameter,
minObs = minSampRequired,
method = method,
depVariable = depVar,
equation = equationType,
threshold = threshold,
pvalThreshold = pvalThreshold,
sensitivity = sensitivity,
messages = messages,
main = paste(
unique(n3.5.2$procedure_name)[1],
'\n',
unique(n3.5.2$parameter_name)[1],
'\n',
unique(center)[1],
unique(colony)[1],
unique(zyg)[1],
sep = '-',
collapse = ','
),
seed = seed,
check = check,
storeplot = storeplot,
windowing = activeWindowing,
plot = plotWindowing,
PicDir = dirname(outpfile),
filename = basename(outpfile),
OverwriteExistingFiles = OverwriteExistingFiles,
superDebug = superDebug,
predFunction = predFunction,
residFunction = residFunction,
weightORthreshold = weightORthreshold,
direction = direction
)
note = c(
note ,
c.ww0$note ,
applied_method = c.ww0$method ,
image_url = relativePath(
path = c.ww0$graphFileName ,
reference = wd
)
)
ExtraCols = c('external_sample_id')
####
message0('Preparing the output from VectorOutput function ...')
c.ww.vec = VectorOutput0(
c.ww0 = c.ww0,
ExtraCols = ExtraCols,
activeWindowing = activeWindowing
)
##
outP = SuccessfulOutput(
args = list(
c.ww0 = c.ww0 ,
depVariable = depVariable ,
depVar = depVar ,
c.ww.vec = c.ww.vec ,
procedure = procedure ,
parameter = parameter ,
center = center ,
n3.5 = n3.5 ,
strain = strain ,
meta = meta ,
zyg = zyg ,
colony = colony ,
note = note ,
PhenListSpecIds = PhenListSpecIds ,
OrgSpecIds = OrgSpecIds ,
BodyWeightCurvURL = BodyWeightCurvURL ,
GenePageURL = GenePageURL ,
encode = encode ,
wd = wd
)
)# c(as.list(environment()), ls()))
if ((
NullOrError(c.ww0$NormalObj) ||
!NullOrError(c.ww0$NormalObj$messages) ||
(
NullOrError(c.ww0$WindowedObj) &&
activeWindowing && (c.ww0$method %in% 'MM')
)
) &&
debug)
save(
n3.5,
file = paste0(
outpfile,
RemoveSpecialChars(colony),
'_',
RandomRegardSeed(),
'.Rdata'
)
)
##
StoreRawDataAndWindowingWeights(
storeRawData = storeRawData,
activeWindowing = activeWindowing,
c.ww0 = c.ww0,
RawoutputFile = RawoutputFile,
orgData = n3.5.2,
compressRawData = compressRawData,
files = c(RawoutputFile, ReadMeFile),
dir = dirname (RawoutputFile) ,
filename = basename(RawoutputFile) ,
ReadMeTxt = ReadMeTxt,
ReadMeFile = ReadMeFile,
methodmap = methodmap
)
SucFaiFile = paste(
outpfile2,
ifelse(
!NullOrError(c.ww0$NormalObj) && NullOrError(c.ww0$NormalObj$messages),
'Successful.tsv',
'Failed_critical_error.tsv'
),
sep =
'_'
)
write.table(
x = paste(outP,
collapse = outdelim),
file = file.exists0(SucFaiFile, overwrite = OverwriteExistingFiles),
append = TRUE,
row.names = FALSE,
col.names = FALSE,
quote = FALSE
)
} else {
message0('Dataset not processed ...')
optFail = NotProcessedOutput(
args = list(
procedure = procedure ,
parameter = parameter ,
center = center ,
n3.5.2 = n3.5.2 ,
n3.5 = n3.5 ,
strain = strain ,
meta = meta ,
zyg = zyg ,
colony = colony ,
depVariable = depVariable ,
#c.ww.vec = c.ww.vec ,
note = note ,
isException = isException ,
minSampRequired = minSampRequired ,
depVar = depVar ,
GenePageURL = GenePageURL ,
BodyWeightCurvURL = BodyWeightCurvURL ,
OrgSpecIds = OrgSpecIds ,
encode = encode ,
methodmap = methodmap
)
)
#optFail = NotProcessedOutput(args = c(as.list(environment()), ls()))
NotProcFile = paste(outpfile2,
'NotProcessed.tsv',
sep =
'_')
write.table(
paste(optFail,
collapse = outdelim),
file = file.exists0(NotProcFile, overwrite = OverwriteExistingFiles),
append = TRUE,
row.names = FALSE,
col.names = FALSE,
quote = FALSE
)
if(simulation)
break
}
}
message0(
'Finished in ',
round(difftime(Sys.time() , Strtime, units = 'sec'), 2),
'(s).\n
-----------------------------------
\n\n '
)
counter = counter + 1
}
}
}
}
}
}
}
}
message0('Closing Connections ...')
stopCluster(cl)
registerDoSEQ()
closeAllConnections()
stopImplicitCluster()
message0('Finished.')
setwd(cwd)
}
|
/Late adults stats pipeline/DRrequiredAgeing/DRrequiredAgeingPackage/R/main.R
|
permissive
|
xhyuo/impc_stats_pipeline
|
R
| false
| false
| 44,714
|
r
|
## You must check the 'check' parameter
mainAgeing = function(file = 'http://ves-ebi-d0:8090/mi/impc/dev/solr/experiment/select?q=*%3A*&fq=procedure_stable_id%3AIMPC_ECG_002&rows=590000&wt=csv&indent=true' ,
sep = ',' ,
na.strings = 'NA' ,
normalisedPhenlist = FALSE ,
subdir = 'Results' ,
seed = 123456 ,
readCategoriesFromFile = TRUE ,
OverwriteExistingFiles = FALSE ,
onlyFillNotExisitingResults = FALSE ,
WhiteListMethods = NULL ,
# Carefully use this option!
# It can remove the entire result file (some colonies in a single output file)
# Only for multicore
activateMulticore = TRUE ,
coreRatio = 5 / 14 ,
concurrentControlSelect = FALSE ,
MultiCoreErrorHandling = 'pass' ,
inorder = FALSE ,
verbose = TRUE ,
# Only for simulations
simulation = FALSE ,
Simulation.iteration = 1 ,
# Only for windowing
activeWindowing = TRUE ,
sensitivity = c(1, 1, 1, 0) ,
pvalThreshold = c(0, 0, 0, 0) ,
check = 2 ,
direction = c(1, 1) ,
weightORthreshold = 'weight' ,
predFunction = function(m) {
predict(m)
} ,
residFunction = function(m) {
resid(m)
} ,
messages = TRUE ,
threshold = sqrt(.Machine$double.eps) * 10,
outdelim = '\t' ,
debug = TRUE ,
encode = FALSE ,
noSpaceAllowed = TRUE ,
plotWindowing = TRUE ,
storeplot = TRUE ,
virtualDrive = FALSE ,
checkNamesForMissingColNames = TRUE ,
# Raw data
storeRawData = TRUE ,
compressRawData = TRUE ,
# Only for Batch generator
BatchProducer = FALSE ,
cpu = 4 ,
memory = 9000 ,
nMax = 10000 ,
ChunkSize = 24 ,
MinColoniesInChunks = 32 ,
controlSize = 1500 ,
### Just for debuging
superDebug = FALSE ,
extraBatchParameters = '-m "rh7-hosts-ebi5-12 rh7-hosts-ebi5-13 rh7-hosts-ebi5-14 rh7-hosts-ebi5-15 rh7-hosts-ebi5-16 rh7-hosts-ebi5-17 rh7-hosts-ebi5-18 rh7-hosts-ebi5-19 rh7-hosts-ebi5-20 rh7-hosts-ebi5-24 rh7-hosts-ebi5-25 rh7-hosts-ebi5-26 rh7-hosts-ebi5-27 rh7-hosts-ebi6-00 rh7-hosts-ebi6-01 rh7-hosts-ebi6-02 rh7-hosts-ebi6-03 rh7-hosts-ebi6-04 rh7-hosts-ebi6-05 rh7-hosts-ebi6-06 rh7-hosts-ebi6-07 rh7-hosts-ebi6-08 rh7-hosts-ebi6-09 rh7-hosts-ebi6-10 rh7-hosts-ebi6-11 rh7-hosts-ebi6-12 rh7-hosts-ebi6-13 rh7-hosts-ebi6-14 rh7-hosts-ebi6-15 rh7-hosts-ebi6-16 rh7-hosts-ebi6-17"',
...) {
message0('DRrequiredAgeing loaded')
message0(
Sys.time(),
' ############################################################\n',
Sys.time(),
' % Make sure ExceptionList and CategoryList are updated! %\n',
Sys.time(),
' % Please check SENSITIVITY for the windowing algorithm %\n',
Sys.time(),
' % Sensitivy:',
sensitivity,
' \n',
Sys.time(),
' ############################################################',
ShowTimeTag = FALSE
)
message0('Process started ...')
message0('Machine info: ', paste(Sys.info(), collapse = ', '))
message0('Loading dependent packages ...')
requireNamespace('PhenStat')
requireNamespace('PhenStatAgeing' )
requireNamespace('doParallel')
requireNamespace('parallel')
requireNamespace('foreach')
requireNamespace('SmoothWin')
requireNamespace('nlme')
requireNamespace('base64enc')
requireNamespace('RJSONIO' )
requireNamespace('jsonlite' )
# Config files
message0('Loading configuration ...')
methodmap = readConf('MethodMap.conf')
equationmap = readConf('EquationMap.conf')
CategoryMap = readConf('CategoryMap.conf')
initial = readConf('Initialize.conf')
exceptionList = readFile(file = 'ExceptionMap.list')
#CategoricalCategoryBlackList = readFile(file = 'CategoricalCategoryBlackList.list')
# Main subdirectory/working directory
message0('Preparing the working directory ...')
cwd = getwd()
wd = file.path(cwd,
paste(subdir, sep = '_', collapse = '_'))
dir.create0(wd, recursive = TRUE)
if (virtualDrive) {
message0('Creating a virtual drive ... ')
system('subst U: /D', wait = TRUE)
system(paste0('subst U: "', wd, '"'), wait = TRUE)
wd = 'U:'
}
message0('Setting the working directory to: \n\t\t ===> ', wd)
setwd(dir = wd)
##################
set.seed(seed)
# Read file
message0('Reading the input file ...\n\t ~>', file)
if (!file.exists(file))
message0('File is not local or does not exist!')
rdata = read.csv(
file = file ,
check.names = checkNamesForMissingColNames,
sep = sep ,
na.strings = na.strings ,
stringsAsFactors = TRUE
)
#### Temporary for ageing pipeline only
# rdataEarly = read.csv(
# file = gsub(
# pattern = 'LA_',
# replacement = '_',
# gsub(
# pattern = 'http://ves-ebi-d1.ebi.ac.uk:8988',
# replacement = 'http://ves-ebi-d0.ebi.ac.uk:8986',
# x = file
# )
# ),
# check.names = checkNamesForMissingColNames,
# sep = sep ,
# na.strings = na.strings ,
# stringsAsFactors = TRUE
# )
# com_cols = intersect(colnames(rdata), colnames(rdataEarly))
# rdata = rbind(rdata[, com_cols], rdataEarly[, com_cols])
message0('Input file dimentions: ',
paste0(dim(rdata), collapse = ', '))
rdata = rdata[!is.na(rdata$phenotyping_center), ] # Just to remove NA centers
new.data = rdata
new.data = new.data[order(Date2Integer(new.data$date_of_experiment)), ]
#########
new.data$colony_id = as.character(new.data$colony_id)
new.data$colony_id[new.data$biological_sample_group %in% "control"] = NA
new.data$external_sample_id = as.factor(new.data$external_sample_id)
################
# Start analysis
################
# Initializing cores
message0('Initialising cores ...')
crs = cores0(coreRatio = coreRatio, activate = activateMulticore)
closeAllConnections()
registerDoSEQ()
message0('The detected OS: ', .Platform$OS.type)
if (.Platform$OS.type == 'windows') {
cl = makeCluster(crs,
outfile = outMCoreLog(wd))
} else{
cl = makeForkCluster(crs,
outfile = outMCoreLog(wd))
}
registerDoParallel(cl, cores = crs)
# End of multicore initialization
# Get possible categories for the categorical variables
message0('Loading the list of possible categories for categorical variables ...')
CatList = GetPossibleCategories (procedure = NULL, file = readCategoriesFromFile)
message0('Filtering the dataset in progress ....')
Strtime = Sys.time()
procedures = as.character(unique(na.omit(new.data$procedure_group)))
for (procedure in procedures) {
###
n2.9 = base::subset(new.data, new.data$procedure_group %in% procedures)
parameters = as.character(unique(na.omit(n2.9$parameter_stable_id)))
for (parameter in parameters) {
FactorLevels = ReadFactorLevelsFromSolr(parameter = parameter, CatList = CatList)
### counter starts here ....
counter = 1
outP = list()
n3.0 = base::subset(n2.9, n2.9$parameter_stable_id %in% parameter)
centers = as.character(unique(na.omit(n3.0$phenotyping_center)))
for (center in centers) {
n3.1 = base::subset(n3.0, n3.0$phenotyping_center %in% center)
strains = as.character(unique(na.omit(n3.1$strain_accession_id)))
for (strain in strains) {
n3.2 = base::subset(n3.1, n3.1$strain_accession_id %in% strain)
metas = as.character(unique(na.omit(n3.2$metadata_group)))
for (meta in metas) {
n3.3 = base::subset(n3.2, n3.2$metadata_group %in% meta)
n3.3.c = base::subset(n3.3, n3.3$biological_sample_group %in% 'control')
n3.3.m = base::subset(n3.3, !(n3.3$biological_sample_group %in% 'control'))
zygositys = as.character(unique(na.omit(n3.3.m$zygosity)))
for (zyg in zygositys) {
n3.3.m_zyg = base::subset(n3.3.m, n3.3.m$zygosity %in% zyg)
colonys = as.character(unique(na.omit(n3.3.m_zyg$colony_id)))
nColonies = length(colonys)
if (BatchProducer && nColonies > 0) {
#nMax = 10000
ChunkSizeFromNumbers = ((nrow(n3.3.c) < nMax) * max(1, round(nColonies /
ChunkSize)) +
(nrow(n3.3.c) >= nMax) * nColonies)
minCol = ((nrow(n3.3.c) < nMax) * MinColoniesInChunks + (nrow(n3.3.c) >=
nMax) * 1)
ColonyChunks = chunkVector(
x = colonys,
n = ChunkSizeFromNumbers,
min = minCol,
activate = (nColonies >= MinColoniesInChunks) &&
(nrow(n3.3.c) >= controlSize)
)
outpDir = file.path0(
wd,
paste0(Sys.Date(), '_', subdir, '_RawData/'),
check = FALSE,
create = TRUE,
IncludedFileName = FALSE
)
SubSubDirOrdFileName = RemoveSpecialChars(paste(
Sys.Date() ,
#RandomRegardSeed() ,
#procedure ,
parameter ,
center ,
zyg ,
strain ,
meta ,
collapse = '_'
))
outpfile = file.path0(
outpDir,
SubSubDirOrdFileName,
check = FALSE,
create = TRUE,
IncludedFileName = TRUE
)
mess = paste0(
Sys.time(),
'. Processed file: ',
SubSubDirOrdFileName,
'. #Colonies = ',
nColonies,
', #Controls = ',
nrow(n3.3.c),
', Chunks = ',
length(ColonyChunks)
)
message0(mess, ShowTimeTag = FALSE)
write(
x = mess,
file = paste0(
Sys.Date(),
'_',
subdir,
'_DataGenerationLog.log'
),
10 ^ 5,
append = TRUE
)
counter = 1
for (ChunkedColonies in ColonyChunks) {
BatchData = rbind (subset(n3.3.m_zyg, n3.3.m_zyg$colony_id %in% ChunkedColonies),
n3.3.c)
BatchFileName = file.exists0(
paste0(
outpfile,
'_C',
length(ColonyChunks),
'_',
RandomRegardSeed(),
'_',
counter,
'.csv'
),
overwrite = OverwriteExistingFiles
)
if (all(dim(BatchData) > 0)) {
write.csv(BatchData,
file = BatchFileName,
row.names = FALSE)
out = BatchGenerator(
file = BatchFileName ,
dir = outpDir ,
procedure = procedure ,
parameter = parameter ,
center = center ,
cpu = cpu ,
memory = memory ,
extraBatchParameters = extraBatchParameters
)
write(
x = out,
file = paste0(outpDir, '/', subdir, '_Batch.bch'),
ncolumns = 10 ^ 5,
append = TRUE
)
counter = counter + 1
}
rm0(c('BatchData', 'BatchFileName'), silent = TRUE)
}
} else{
message0(
' [',
paste(
procedure,
parameter,
center ,
strain ,
meta ,
zyg ,
length(colonys),
sep = ']~>['
),
']\n'
)
### Single or multiple cores?
`%activemulticore%` = ifelse (activateMulticore &&
!BatchProducer,
`%dopar%`,
`%do%`)
if (activateMulticore &&
!BatchProducer) {
message0('Multicore processing in progress ...')
} else{
message0('Single core processing in progress ...')
}
i = 1
MultiCoreRes = foreach::foreach (
i = 1:length(colonys),
.packages = c(
'PhenStat' ,
'SmoothWin' ,
'base64enc' ,
'nlme' ,
'RJSONIO' ,
'jsonlite' ,
'PhenStatAgeing',
'DRrequiredAgeing'
),
.errorhandling = c(MultiCoreErrorHandling),
.verbose = verbose ,
.inorder = inorder
) %activemulticore% {
#for (i in 1:length(colonys)){
message0('*~*~*~*~*~* ', i, '|', length(colonys), ' *~*~*~*~*~*')
for (sim.index in 1:ifelse(simulation, Simulation.iteration, 1)) {
# Removing the old objects if exist
ObjectsThatMustBeRemovedInEachIteration()
# Initialization before starting the analysis
note = list()
colony = colonys[i]
message0('Current colony: ',colony)
n3.4 = base::subset(n3.3.m_zyg, n3.3.m_zyg$colony_id %in% c(colony))
n3.5 = rbind (n3.4, n3.3.c)
note = c(note,
list(
bodyweight_included_in_data = CheckIfNameExistInDataFrame(obj = n3.5,
name = 'weight',
checkLevels = FALSE)
))
# Imaginary URLs
note$gene_page_url = GenePageURL = GenePageURL(n3.5)
note$bodyweight_page_url = BodyWeightCurvURL = BodyWeightCurvURL(n3.5)
ReadMeTxt = ReadMe (obj = n3.4, URL = GenePageURL)
# Define response column [do not move me!]
depVariable = getResponseColumn(n3.5$observation_type)
depVar = depVariable$column
message0('Dependent variable: ', depVar)
note$response_type = paste0(depVar,
'_of_type_',
paste(depVariable$lbl, sep = '.'))
note$observation_type =
if (!is.null(unique(n3.5$observation_type))) {
paste(unique(n3.5$observation_type),
sep = '~',
collapse = '~')
} else{
NULL
}
note$data_type =
if (!is.null(unique(n3.5$data_type))) {
paste(unique(n3.5$data_type),
sep = '~',
collapse = '~')
} else{
NULL
}
minSampRequired = ifelse(
is.ABR(x = parameter),
as.numeric(initial$min_ABR_mut_each_sex),
ifelse(
is.numeric(n3.5[, depVar]),
as.numeric(initial$min_num_mut_each_sex),
2
)
)
# add missing levels to categorical variables
if (!is.numeric(n3.5[, depVar])) {
AllLevels = mapLevelsToFactor(levels = levels(n3.5[, depVar]),
newlevels = FactorLevels$levels)
levels(n3.5[, depVar]) = AllLevels$levels
#####
note = c(note,
FactorLevels$note,
AllLevels$note)
#####
SexGenResLevels = min(2 * 2 * length(AllLevels$levels), 4)
} else{
SexGenResLevels = 4
}
if (!depVariable$accepted)
return('Not a proper dataset!')
if (simulation && is.numeric(n3.5[, depVar])) {
message0('Simulation in progress ... Round ',
sim.index)
n3.5_tmp = mimicControls(
df = n3.5,
removeMutants = (sim.index == 1) ,
ArtifLabel = 'experimental' ,
mutLabel = 'experimental' ,
baselines = 'control' ,
neutralise = TRUE ,
resample = (sim.index != 1) ,
depVariable = depVar ,
sex = 'sex' ,
minSampRequired = minSampRequired,
SexGenResLevels = SexGenResLevels,
indicator = sim.index ,
plot = superDebug
)
n3.5 = n3.5_tmp$df
note = list(note , simulation_details = n3.5_tmp$note)
}
# Summary statistics
n3.5_summary = SummaryStatisticsOriginal(x = n3.5, depVar = depVar)
note = c(note, n3.5_summary)
# Remove zero frequency categories
n3.5.1_F_list = RemoveZeroFrequencyCategories(
x = n3.5,
minSampRequired = minSampRequired,
depVar = depVar,
totalLevels = SexGenResLevels
)
n3.5.1 = n3.5.1_F_list$x
note = c(note, n3.5.1_F_list$note)
# Remove var categories
n3.5.1_v_list = RemoveZerovarCategories(
x = n3.5.1,
depVar = depVar,
minvar = 0,
method = getMethodi(
var = parameter,
type = ifelse(
is.numeric(n3.5.1[, depVar]),
'numeric',
'charachter'
),
methodMap = methodmap
)
)
n3.5.1 = n3.5.1_v_list$x
note = c(note, n3.5.1_v_list$note)
OrgSpecIds = OtherExtraColumns(
obj = n3.5,
ColNames = c(
'external_sample_id',
'sex',
'biological_sample_group',
depVar,
'date_of_experiment',
'weight'
),
names = c(
# all lower case
'original_external_sample_id',
'original_sex',
'original_biological_sample_group',
'original_response',
'original_date_of_experiment',
'original_body_weight'
)
)
note = c(note, OrgSpecIds)
message0('Creating output directory and file name ...')
SubSubDirOrdFileName = file.path0(
RemoveSpecialChars(center) ,
RemoveSpecialChars(procedure) ,
RemoveSpecialChars(parameter) ,
RemoveSpecialChars(colony) ,
RemoveSpecialChars(zyg) ,
RemoveSpecialChars(meta) ,
create = FALSE,
check = noSpaceAllowed
)
FileName = 'output'
outDir = file.path0(
wd,
SubSubDirOrdFileName,
create = TRUE,
check = FALSE,
IncludedFileName = FALSE
)
outpfile = outpfile2 = paste0(outDir,
'/',
FileName,
collapse = '')
message0('Output directory: \n \t\t =>=>=> ',
outpfile)
if (onlyFillNotExisitingResults) {
if (any(file.exists(paste(
outpfile, c('NotProcessed.tsv', 'Successful.tsv'), sep = '_'
)))) {
message0('File already exists then skipt!')
return(NULL)
} else{
message0('Result does not exist! Adding in progress ...')
rmme = lapply(list.files(dirname(outpfile), full.names = TRUE), function(x) {
if (!is.null(x) &&
file.exists(x) &&
(
grepl(
pattern = '.Rdata',
x = x,
fixed = TRUE
) ||
grepl(
pattern = 'Failed_critical_error',
x = x,
fixed = TRUE
)
)
)
file.remove(x)
})
write(outpfile, file = 'DoesNotExists.log', append = TRUE)
}
}
####
if (storeRawData) {
# There is a second snippet for the rawdata + weights
RawoutputFile = RawoutputFile0 = file.exists0(paste(outpfile2,
'rawData.csv',
sep = '_'),
overwrite = OverwriteExistingFiles)
ReadMeFile = file.exists0(file.path(dirname(RawoutputFile), 'ReadMe.txt'))
message0(
'writting the raw data file to disk ... \n \t\t ===> ',
RawoutputFile
)
write.csv(
x = n3.5 ,
row.names = FALSE ,
file = RawoutputFile
)
write(
x = ReadMeTxt ,
file = ReadMeFile,
ncolumns = 1
)
if (compressRawData) {
comRes = compressFiles(
fileList = c(ReadMeFile , RawoutputFile),
dir = dirname (RawoutputFile) ,
filename = basename(RawoutputFile) ,
overwrite = OverwriteExistingFiles
)
if (comRes$status == 0)
message0('Compression successful')
else
message0('Compression failed')
RawoutputFile0 = comRes$file
}
}
note$input_file = relativePath(path = file, reference = wd)
note$output_raw_data_file = relativePath(path = if (storeRawData) {
RawoutputFile0
} else{
NULL
},
reference = wd)
note$read_me_file = relativePath(path = if (storeRawData &&
!compressRawData) {
ReadMeFile
} else{
NULL
},
reference = wd)
###'
isException = IsInList(
item = c(parameter, procedure),
list = exceptionList,
message = 'Value found in the skipt list'
)
n3.5.2 = n3.5.1
MergLev = MergeLevels(x = n3.5.2[, depVar],
listOfLevelMaps = CategoryMap)
n3.5.2[, depVar] = MergLev$x
n3.5.2 = droplevels0(n3.5.2[!is.na(n3.5.2[, depVar]),])
n3.5.2OnlyKO = subset(n3.5.2,n3.5.2$biological_sample_group %in% 'experimental')
note$relabeled_levels_categorical_variables_only = MergLev$note
if (!is.null(n3.5.2) &&
# data.frame is not zero
min0(dim(n3.5.2)) > 0 &&
# is it really exist!
length(unique(n3.5.2$biological_sample_group)) > 1 &&
# include mut and cont
min0(table(n3.5.2$biological_sample_group)) > minSampRequired &&
max0(table(n3.5.2OnlyKO$biological_sample_group, n3.5.2OnlyKO$sex)) > 1 &&
# include at least 4/2 of each genotype
#length(unique(n3.5.2$colony_id)) > 1 &&
length(RepBlank(
unique(n3.5.2$colony_id),
match = c('', NA, 'NA')
)) > 1 &&
# include 2 colonies (cont & mut)
checkGenInCol(n3.5.2) &&
# each sex and genotype
depVariable$accepted &&
length(na.omit(n3.5.2[, depVar])) > 0 &&
# response is not empty!
# there must be variation in data
NonZeroVariation(n3.5.2[, depVar]) &&
!isException &&
columnLevelsVariationRadio(dataset = n3.5.2, columnName = depVar) > 0.005 &&
RR_thresholdCheck(data = n3.5.2,depVar = depVar,parameter = parameter,methodmap = methodmap)$criteria_result
) {
message0('Analysing the dataset in progress ...')
message0('Creating PhenList object ...')
a = PhenStat::PhenList(
n3.5.2,
testGenotype = 'experimental',
refGenotype = 'control',
dataset.colname.genotype = 'biological_sample_group',
dataset.colname.sex = 'sex',
dataset.colname.weight = 'weight',
dataset.colname.batch = 'date_of_experiment'
)
a_summary_before_concurrent = SummaryStatisticsOriginal(
x = a@datasetPL,
depVar = depVar,
sex = 'Sex',
genotype = 'Genotype',
label = 'phenlist_data_summary_statistics'
)
note = c(note, a_summary_before_concurrent)
#
PhenListSpecIds = OtherExtraColumns (
obj = a@datasetPL,
ColNames = 'external_sample_id',
names = 'phenlist_data_spec_ids'
)
note = c(note, PhenListSpecIds)
### Get method of analysis
method = getMethodi(
var = parameter,
type = ifelse(
is.numeric(a@datasetPL[, depVar]),
'numeric',
'charachter'
),
methodMap = methodmap
)
# WhiteList methods
if (!is.null(WhiteListMethods) &&
!(method %in% WhiteListMethods)) {
message0('Black list applied. Method = ', method)
return(FALSE)
}
#### Check for concurrent control selection
aTmp = concurrentContSelect(
activate = concurrentControlSelect &&
(method %in% 'MM') && !activeWindowing,
PhenListObj = a,
depVar = depVar,
minSampRequired = minSampRequired
)
a = aTmp$obj
note = c(note, aTmp$note)
#### END OF concurrent
# Just before analysing data (because cuncurrent sampling)
if (concurrentControlSelect &&
(method %in% 'MM') && !activeWindowing) {
a_phenlist_concurrent_summary = SummaryStatisticsOriginal(
x = a@datasetPL,
depVar = depVar,
sex = 'Sex',
genotype = 'Genotype',
label = 'phenlist_and_cuncurrent_data_summary_statistics'
)
note = c(note, a_phenlist_concurrent_summary)
}
# check the Weight column
message0('Checking whether Weight column exists in the raw data ...')
if (!CheckIfNameExistInDataFrame(a@datasetPL, 'Weight')) {
note$existence_of_weight_column =
'Weight column does not exist in the raw data'
}
# Equation type
equationType = ifelse(
CheckIfNameExistInDataFrame(a@datasetPL, 'Weight'),
getEquation(var = parameter,
equationMap = equationmap),
'withoutWeight'
)
# This is the main engine!
note = c(note, list(
bodyweight_initially_included_in_model = ifelse(method %in% 'MM', equationType, FALSE)
))
if (normalisedPhenlist){
a = normalisePhenList(phenlist = a, colnames = c(depVar, 'Weight'))
}
message0('Fitting the model ...')
message0('Method: ', method, '\n\t Equation:', equationType)
c.ww0 = PhenStatWindow(
phenlistObject = a,
parameter = parameter,
minObs = minSampRequired,
method = method,
depVariable = depVar,
equation = equationType,
threshold = threshold,
pvalThreshold = pvalThreshold,
sensitivity = sensitivity,
messages = messages,
main = paste(
unique(n3.5.2$procedure_name)[1],
'\n',
unique(n3.5.2$parameter_name)[1],
'\n',
unique(center)[1],
unique(colony)[1],
unique(zyg)[1],
sep = '-',
collapse = ','
),
seed = seed,
check = check,
storeplot = storeplot,
windowing = activeWindowing,
plot = plotWindowing,
PicDir = dirname(outpfile),
filename = basename(outpfile),
OverwriteExistingFiles = OverwriteExistingFiles,
superDebug = superDebug,
predFunction = predFunction,
residFunction = residFunction,
weightORthreshold = weightORthreshold,
direction = direction
)
note = c(
note ,
c.ww0$note ,
applied_method = c.ww0$method ,
image_url = relativePath(
path = c.ww0$graphFileName ,
reference = wd
)
)
ExtraCols = c('external_sample_id')
####
message0('Preparing the output from VectorOutput function ...')
c.ww.vec = VectorOutput0(
c.ww0 = c.ww0,
ExtraCols = ExtraCols,
activeWindowing = activeWindowing
)
##
outP = SuccessfulOutput(
args = list(
c.ww0 = c.ww0 ,
depVariable = depVariable ,
depVar = depVar ,
c.ww.vec = c.ww.vec ,
procedure = procedure ,
parameter = parameter ,
center = center ,
n3.5 = n3.5 ,
strain = strain ,
meta = meta ,
zyg = zyg ,
colony = colony ,
note = note ,
PhenListSpecIds = PhenListSpecIds ,
OrgSpecIds = OrgSpecIds ,
BodyWeightCurvURL = BodyWeightCurvURL ,
GenePageURL = GenePageURL ,
encode = encode ,
wd = wd
)
)# c(as.list(environment()), ls()))
if ((
NullOrError(c.ww0$NormalObj) ||
!NullOrError(c.ww0$NormalObj$messages) ||
(
NullOrError(c.ww0$WindowedObj) &&
activeWindowing && (c.ww0$method %in% 'MM')
)
) &&
debug)
save(
n3.5,
file = paste0(
outpfile,
RemoveSpecialChars(colony),
'_',
RandomRegardSeed(),
'.Rdata'
)
)
##
StoreRawDataAndWindowingWeights(
storeRawData = storeRawData,
activeWindowing = activeWindowing,
c.ww0 = c.ww0,
RawoutputFile = RawoutputFile,
orgData = n3.5.2,
compressRawData = compressRawData,
files = c(RawoutputFile, ReadMeFile),
dir = dirname (RawoutputFile) ,
filename = basename(RawoutputFile) ,
ReadMeTxt = ReadMeTxt,
ReadMeFile = ReadMeFile,
methodmap = methodmap
)
SucFaiFile = paste(
outpfile2,
ifelse(
!NullOrError(c.ww0$NormalObj) && NullOrError(c.ww0$NormalObj$messages),
'Successful.tsv',
'Failed_critical_error.tsv'
),
sep =
'_'
)
write.table(
x = paste(outP,
collapse = outdelim),
file = file.exists0(SucFaiFile, overwrite = OverwriteExistingFiles),
append = TRUE,
row.names = FALSE,
col.names = FALSE,
quote = FALSE
)
} else {
message0('Dataset not processed ...')
optFail = NotProcessedOutput(
args = list(
procedure = procedure ,
parameter = parameter ,
center = center ,
n3.5.2 = n3.5.2 ,
n3.5 = n3.5 ,
strain = strain ,
meta = meta ,
zyg = zyg ,
colony = colony ,
depVariable = depVariable ,
#c.ww.vec = c.ww.vec ,
note = note ,
isException = isException ,
minSampRequired = minSampRequired ,
depVar = depVar ,
GenePageURL = GenePageURL ,
BodyWeightCurvURL = BodyWeightCurvURL ,
OrgSpecIds = OrgSpecIds ,
encode = encode ,
methodmap = methodmap
)
)
#optFail = NotProcessedOutput(args = c(as.list(environment()), ls()))
NotProcFile = paste(outpfile2,
'NotProcessed.tsv',
sep =
'_')
write.table(
paste(optFail,
collapse = outdelim),
file = file.exists0(NotProcFile, overwrite = OverwriteExistingFiles),
append = TRUE,
row.names = FALSE,
col.names = FALSE,
quote = FALSE
)
if(simulation)
break
}
}
message0(
'Finished in ',
round(difftime(Sys.time() , Strtime, units = 'sec'), 2),
'(s).\n
-----------------------------------
\n\n '
)
counter = counter + 1
}
}
}
}
}
}
}
}
message0('Closing Connections ...')
stopCluster(cl)
registerDoSEQ()
closeAllConnections()
stopImplicitCluster()
message0('Finished.')
setwd(cwd)
}
|
library(synapser)
library(tidyverse)
library(magrittr)
library(wrapr)
library(data.table)
synLogin()
source("../../../utils.R")
REQ_COLS <- c("HLA_ALLELE", "ALT_EPI_SEQ", "VAR_ID")
REQ_RANKED_COLS <- c(REQ_COLS, "RANK")
OPT_COLS <- c(
"SCORE",
"REF_EPI_SEQ",
"PEP_LEN",
"HLA_ALLELE_MUT",
"HLA_ALT_BINDING",
"HLA_REF_BINDING",
"REF_ALLELE_EXP",
"ALT_ALLELE_EXP",
"RANK_METRICS",
"RANK_DESC",
"ADDN_INFO",
"STEP_ID",
"PROT_POS")
COL_FUNCS <- list(
"RANK" = as.integer,
"HLA_ALLELE" = as.character,
"ALT_EPI_SEQ" = as.character,
"VAR_ID" = as.character,
"SCORE" = as.integer,
"REF_EPI_SEQ" = as.character,
"PEP_LEN" = as.integer,
"HLA_ALLELE_MUT" = as.character,
"HLA_ALT_BINDING" = as.double,
"HLA_REF_BINDING" = as.double,
"REF_ALLELE_EXP" = as.double,
"ALT_ALLELE_EXP" = as.double,
"RANK_METRICS" = as.character,
"RANK_DESC" = as.character,
"ADDN_INFO" = as.character,
"STEP_ID" = as.character,
"PROT_POS" = as.integer)
ADDED_COLS <- c(
"SOURCE_ROW_N",
"SOURCE",
"PREDICTION_ID"
)
submission_df <-
"select id, name, submissionId, patientId from syn18387034 where round = 'x'" %>%
synapser::synTableQuery() %>%
as.data.frame() %>%
tibble::as_tibble() %>%
dplyr::select(id, name, submissionId, patientId) %>%
dplyr::mutate(file_type = stringr::str_match(name, "TESLA_[:print:]+$")) %>%
dplyr::select(-name) %>%
dplyr::filter(file_type != "TESLA_YAML.yaml") %>%
spread(key = "file_type", value = "id")
create_prediction_tables <- function(args){
prediction_df <- create_prediction_table(args)
if(!is.na(args$TESLA_OUT_2.csv) && !is.na(args$TESLA_OUT_4.csv)){
vcf_df <- create_prediction_table(args, "vcf")
prediction_df <- dplyr::bind_rows(prediction_df, vcf_df)
}
variant_prediction_df <- prediction_df %>%
dplyr::select(PREDICTION_ID, VAR_ID) %>%
tidyr::unnest() %>%
dplyr::mutate(VARIANT_ID = str_c(args$submissionId, "_", VAR_ID)) %>%
dplyr::select(-VAR_ID)
protein_position_df <- prediction_df %>%
dplyr::select(PREDICTION_ID, PROT_POS) %>%
tidyr::unnest()
prediction_df <- prediction_df %>%
dplyr::select(-c(VAR_ID, PROT_POS)) %>%
dplyr::group_by(SOURCE, ALT_EPI_SEQ, HLA_ALLELE) %>%
dplyr::arrange(RANK)
bad_prediction_df <- prediction_df %>%
dplyr::slice(-1) %>%
dplyr::ungroup()
prediction_df <- prediction_df %>%
dplyr::slice(1) %>%
dplyr::ungroup()
return(list(
"variant_prediction_df" = variant_prediction_df,
"protein_position_df" = protein_position_df,
"bad_prediction_df" = bad_prediction_df,
"prediction_df" = prediction_df
))
}
create_prediction_table <- function(args, src = "fastq"){
if(src == "fastq"){
ranked_df <- create_df_from_synapse_id(args$TESLA_OUT_1.csv)
unranked_df <- create_df_from_synapse_id(args$TESLA_OUT_3.csv)
} else {
ranked_df <- create_df_from_synapse_id(args$TESLA_OUT_2.csv)
unranked_df <- create_df_from_synapse_id(args$TESLA_OUT_4.csv)
}
ranked_df <- ranked_df %>%
separate_rows(VAR_ID, sep = ":") %>%
separate_rows(PROT_POS, sep = ";")
unranked_df <- unranked_df %>%
separate_rows(VAR_ID, sep = ":") %>%
separate_rows(PROT_POS, sep = ";")
check_columns(ranked_df, REQ_RANKED_COLS)
ranked_df <- ranked_df %>%
convert_df_to_types() %>%
dplyr::mutate(SOURCE_ROW_N = as.character(1:n())) %>%
dplyr::mutate(STEP_ID = NA)
if (nrow(unranked_df) > 0 ){
check_columns(unranked_df, REQ_COLS)
unranked_df <- unranked_df %>%
convert_df_to_types() %>%
dplyr::mutate(SOURCE_ROW_N = as.character(1:n())) %>%
dplyr::mutate(RANK = NA)
}
combined_df <- ranked_df %>%
dplyr::select(dplyr::one_of(names(COL_FUNCS), ADDED_COLS)) %>%
convert_df_to_types() %>%
dplyr::filter(!is.na(RANK)) %>%
dplyr::bind_rows(unranked_df) %>%
dplyr::filter(!is.na(HLA_ALLELE)) %>%
dplyr::filter(!is.na(ALT_EPI_SEQ)) %>%
dplyr::mutate(ALT_EPI_SEQ = format_epitopes(ALT_EPI_SEQ)) %>%
dplyr::filter(!ALT_EPI_SEQ == "") %>%
dplyr::mutate(SOURCE = src) %>%
dplyr::group_by_at(vars(-c(PROT_POS, VAR_ID, SOURCE_ROW_N, STEP_ID))) %>%
dplyr::summarise(PROT_POS = list(unique(PROT_POS)),
VAR_ID = list(unique(VAR_ID)),
SOURCE_ROW_N = str_c(unique(SOURCE_ROW_N), collapse = ";"),
STEP_ID = str_c(unique(STEP_ID), collapse = ";")) %>%
dplyr::ungroup() %>%
dplyr::mutate(SUBMISSION_ID = args$submissionId) %>%
dplyr::mutate(PREDICTION_ID = stringr::str_c(
SUBMISSION_ID,
SOURCE,
ALT_EPI_SEQ,
HLA_ALLELE,
sep = "_"))
}
format_epitopes <- function(epitopes){
epitopes %>%
stringr::str_remove_all("[^A-Za-z]") %>%
toupper()
}
convert_df_to_types <- function(df){
for(col in names(COL_FUNCS)){
df <- mutate_col_if_exists(
df,
col,
col,
COL_FUNCS[[col]])
}
return(df)
}
mutate_col_if_exists <- function(df, old_col, new_col, func, ...){
if(!is.null(magrittr::extract2(df, old_col))){
wrapr::let(
alias = c(
COL1 = old_col,
COL2 = new_col), {
df <- dplyr::mutate(df, COL2 = func(COL1, ...))
}
)
}
return(df)
}
check_columns <- function(df, required_cols){
missing_columns <- required_cols[!required_cols %in% colnames(df)]
if(length(missing_columns > 0)){
stop("df has missing columns: ",
str_c(missing_columns, collapse = ", "))
}
}
dfs <- submission_df %>%
dplyr::ungroup() %>%
dplyr::group_by(submissionId) %>%
dplyr::group_split() %>%
purrr::map(create_prediction_tables)
dfs %>%
purrr::map("prediction_df") %>%
dplyr::bind_rows() %>%
readr::write_csv("roundx_predictions.csv")
dfs %>%
purrr::map("protein_position_df") %>%
dplyr::bind_rows() %>%
readr::write_csv("roundx_protein_positions.csv")
dfs %>%
purrr::map("variant_prediction_df") %>%
dplyr::bind_rows() %>%
readr::write_csv("roundx_prediction_variants.csv")
dfs %>%
purrr::map("bad_prediction_df") %>%
dplyr::bind_rows() %>%
readr::write_csv("roundx_bad_predictions.csv")
|
/upload_data/Submissions/roundx/make_roundx_prediction_files.R
|
no_license
|
Sage-Bionetworks/Tesla-Bigquery-ETL
|
R
| false
| false
| 6,753
|
r
|
library(synapser)
library(tidyverse)
library(magrittr)
library(wrapr)
library(data.table)
synLogin()
source("../../../utils.R")
REQ_COLS <- c("HLA_ALLELE", "ALT_EPI_SEQ", "VAR_ID")
REQ_RANKED_COLS <- c(REQ_COLS, "RANK")
OPT_COLS <- c(
"SCORE",
"REF_EPI_SEQ",
"PEP_LEN",
"HLA_ALLELE_MUT",
"HLA_ALT_BINDING",
"HLA_REF_BINDING",
"REF_ALLELE_EXP",
"ALT_ALLELE_EXP",
"RANK_METRICS",
"RANK_DESC",
"ADDN_INFO",
"STEP_ID",
"PROT_POS")
COL_FUNCS <- list(
"RANK" = as.integer,
"HLA_ALLELE" = as.character,
"ALT_EPI_SEQ" = as.character,
"VAR_ID" = as.character,
"SCORE" = as.integer,
"REF_EPI_SEQ" = as.character,
"PEP_LEN" = as.integer,
"HLA_ALLELE_MUT" = as.character,
"HLA_ALT_BINDING" = as.double,
"HLA_REF_BINDING" = as.double,
"REF_ALLELE_EXP" = as.double,
"ALT_ALLELE_EXP" = as.double,
"RANK_METRICS" = as.character,
"RANK_DESC" = as.character,
"ADDN_INFO" = as.character,
"STEP_ID" = as.character,
"PROT_POS" = as.integer)
ADDED_COLS <- c(
"SOURCE_ROW_N",
"SOURCE",
"PREDICTION_ID"
)
submission_df <-
"select id, name, submissionId, patientId from syn18387034 where round = 'x'" %>%
synapser::synTableQuery() %>%
as.data.frame() %>%
tibble::as_tibble() %>%
dplyr::select(id, name, submissionId, patientId) %>%
dplyr::mutate(file_type = stringr::str_match(name, "TESLA_[:print:]+$")) %>%
dplyr::select(-name) %>%
dplyr::filter(file_type != "TESLA_YAML.yaml") %>%
spread(key = "file_type", value = "id")
create_prediction_tables <- function(args){
prediction_df <- create_prediction_table(args)
if(!is.na(args$TESLA_OUT_2.csv) && !is.na(args$TESLA_OUT_4.csv)){
vcf_df <- create_prediction_table(args, "vcf")
prediction_df <- dplyr::bind_rows(prediction_df, vcf_df)
}
variant_prediction_df <- prediction_df %>%
dplyr::select(PREDICTION_ID, VAR_ID) %>%
tidyr::unnest() %>%
dplyr::mutate(VARIANT_ID = str_c(args$submissionId, "_", VAR_ID)) %>%
dplyr::select(-VAR_ID)
protein_position_df <- prediction_df %>%
dplyr::select(PREDICTION_ID, PROT_POS) %>%
tidyr::unnest()
prediction_df <- prediction_df %>%
dplyr::select(-c(VAR_ID, PROT_POS)) %>%
dplyr::group_by(SOURCE, ALT_EPI_SEQ, HLA_ALLELE) %>%
dplyr::arrange(RANK)
bad_prediction_df <- prediction_df %>%
dplyr::slice(-1) %>%
dplyr::ungroup()
prediction_df <- prediction_df %>%
dplyr::slice(1) %>%
dplyr::ungroup()
return(list(
"variant_prediction_df" = variant_prediction_df,
"protein_position_df" = protein_position_df,
"bad_prediction_df" = bad_prediction_df,
"prediction_df" = prediction_df
))
}
create_prediction_table <- function(args, src = "fastq"){
if(src == "fastq"){
ranked_df <- create_df_from_synapse_id(args$TESLA_OUT_1.csv)
unranked_df <- create_df_from_synapse_id(args$TESLA_OUT_3.csv)
} else {
ranked_df <- create_df_from_synapse_id(args$TESLA_OUT_2.csv)
unranked_df <- create_df_from_synapse_id(args$TESLA_OUT_4.csv)
}
ranked_df <- ranked_df %>%
separate_rows(VAR_ID, sep = ":") %>%
separate_rows(PROT_POS, sep = ";")
unranked_df <- unranked_df %>%
separate_rows(VAR_ID, sep = ":") %>%
separate_rows(PROT_POS, sep = ";")
check_columns(ranked_df, REQ_RANKED_COLS)
ranked_df <- ranked_df %>%
convert_df_to_types() %>%
dplyr::mutate(SOURCE_ROW_N = as.character(1:n())) %>%
dplyr::mutate(STEP_ID = NA)
if (nrow(unranked_df) > 0 ){
check_columns(unranked_df, REQ_COLS)
unranked_df <- unranked_df %>%
convert_df_to_types() %>%
dplyr::mutate(SOURCE_ROW_N = as.character(1:n())) %>%
dplyr::mutate(RANK = NA)
}
combined_df <- ranked_df %>%
dplyr::select(dplyr::one_of(names(COL_FUNCS), ADDED_COLS)) %>%
convert_df_to_types() %>%
dplyr::filter(!is.na(RANK)) %>%
dplyr::bind_rows(unranked_df) %>%
dplyr::filter(!is.na(HLA_ALLELE)) %>%
dplyr::filter(!is.na(ALT_EPI_SEQ)) %>%
dplyr::mutate(ALT_EPI_SEQ = format_epitopes(ALT_EPI_SEQ)) %>%
dplyr::filter(!ALT_EPI_SEQ == "") %>%
dplyr::mutate(SOURCE = src) %>%
dplyr::group_by_at(vars(-c(PROT_POS, VAR_ID, SOURCE_ROW_N, STEP_ID))) %>%
dplyr::summarise(PROT_POS = list(unique(PROT_POS)),
VAR_ID = list(unique(VAR_ID)),
SOURCE_ROW_N = str_c(unique(SOURCE_ROW_N), collapse = ";"),
STEP_ID = str_c(unique(STEP_ID), collapse = ";")) %>%
dplyr::ungroup() %>%
dplyr::mutate(SUBMISSION_ID = args$submissionId) %>%
dplyr::mutate(PREDICTION_ID = stringr::str_c(
SUBMISSION_ID,
SOURCE,
ALT_EPI_SEQ,
HLA_ALLELE,
sep = "_"))
}
format_epitopes <- function(epitopes){
epitopes %>%
stringr::str_remove_all("[^A-Za-z]") %>%
toupper()
}
convert_df_to_types <- function(df){
for(col in names(COL_FUNCS)){
df <- mutate_col_if_exists(
df,
col,
col,
COL_FUNCS[[col]])
}
return(df)
}
mutate_col_if_exists <- function(df, old_col, new_col, func, ...){
if(!is.null(magrittr::extract2(df, old_col))){
wrapr::let(
alias = c(
COL1 = old_col,
COL2 = new_col), {
df <- dplyr::mutate(df, COL2 = func(COL1, ...))
}
)
}
return(df)
}
check_columns <- function(df, required_cols){
missing_columns <- required_cols[!required_cols %in% colnames(df)]
if(length(missing_columns > 0)){
stop("df has missing columns: ",
str_c(missing_columns, collapse = ", "))
}
}
dfs <- submission_df %>%
dplyr::ungroup() %>%
dplyr::group_by(submissionId) %>%
dplyr::group_split() %>%
purrr::map(create_prediction_tables)
dfs %>%
purrr::map("prediction_df") %>%
dplyr::bind_rows() %>%
readr::write_csv("roundx_predictions.csv")
dfs %>%
purrr::map("protein_position_df") %>%
dplyr::bind_rows() %>%
readr::write_csv("roundx_protein_positions.csv")
dfs %>%
purrr::map("variant_prediction_df") %>%
dplyr::bind_rows() %>%
readr::write_csv("roundx_prediction_variants.csv")
dfs %>%
purrr::map("bad_prediction_df") %>%
dplyr::bind_rows() %>%
readr::write_csv("roundx_bad_predictions.csv")
|
#' Package: siteFreqBounds
#'
#' A collection of functions plotting model-independent bounds on summary statistics of the site frequency
#' spectrum. See examples
#' for functions \code{\link{f.min}} and \code{\link{f.max}}.
#' @seealso \code{\link{f.min}}
#' @seealso \code{\link{f.max}}
#' @seealso \code{\link{f.thetaPi.max}}
#' @seealso \code{\link{f.thetaPi.min}}
#' @seealso \code{\link{plot_tajima_Pi_k}}
#' @seealso \code{\link{plot_tajima_Pi_n}}
#' @seealso \code{\link{plot_tajima_Pi_S}}
#' @seealso \code{\link{little.a}}
#' @seealso \code{\link{big.a}}
#' @seealso \code{\link{v.T}}
#' @seealso \code{\link{u.T}}
#' @seealso \code{\link{v.F}}
#' @seealso \code{\link{u.F}}
#' @seealso \code{\link{f.TajimaD.max}}
#' @seealso \code{\link{f.TajimaD.min}}
#' @seealso \code{\link{plot_tajima_D_k}}
#' @seealso \code{\link{plot_tajima_D_S}}
#' @seealso \code{\link{plot_tajima_D_n}}
#' @seealso \code{\link{f.FuLiF.max}}
#' @seealso \code{\link{f.FuLiF.min}}
#' @seealso \code{\link{plot_Fu_li_k}}
#' @seealso \code{\link{plot_statistic}}
#' @seealso \code{\link{plot_n}}
#' @seealso \code{\link{plot_S}}
#' @seealso \code{\link{plot_Fu_li_S}}
#' @seealso \code{\link{plot_Fu_li_n}}
#' @seealso \code{\link{getCommand}}
#' @seealso \code{\link{runCommand}}
#' @docType package
#' @name siteFreqBounds
NULL
|
/siteFreqBounds/R/siteFreqBounds.R
|
no_license
|
RayneHernandez/ARY
|
R
| false
| false
| 1,315
|
r
|
#' Package: siteFreqBounds
#'
#' A collection of functions plotting model-independent bounds on summary statistics of the site frequency
#' spectrum. See examples
#' for functions \code{\link{f.min}} and \code{\link{f.max}}.
#' @seealso \code{\link{f.min}}
#' @seealso \code{\link{f.max}}
#' @seealso \code{\link{f.thetaPi.max}}
#' @seealso \code{\link{f.thetaPi.min}}
#' @seealso \code{\link{plot_tajima_Pi_k}}
#' @seealso \code{\link{plot_tajima_Pi_n}}
#' @seealso \code{\link{plot_tajima_Pi_S}}
#' @seealso \code{\link{little.a}}
#' @seealso \code{\link{big.a}}
#' @seealso \code{\link{v.T}}
#' @seealso \code{\link{u.T}}
#' @seealso \code{\link{v.F}}
#' @seealso \code{\link{u.F}}
#' @seealso \code{\link{f.TajimaD.max}}
#' @seealso \code{\link{f.TajimaD.min}}
#' @seealso \code{\link{plot_tajima_D_k}}
#' @seealso \code{\link{plot_tajima_D_S}}
#' @seealso \code{\link{plot_tajima_D_n}}
#' @seealso \code{\link{f.FuLiF.max}}
#' @seealso \code{\link{f.FuLiF.min}}
#' @seealso \code{\link{plot_Fu_li_k}}
#' @seealso \code{\link{plot_statistic}}
#' @seealso \code{\link{plot_n}}
#' @seealso \code{\link{plot_S}}
#' @seealso \code{\link{plot_Fu_li_S}}
#' @seealso \code{\link{plot_Fu_li_n}}
#' @seealso \code{\link{getCommand}}
#' @seealso \code{\link{runCommand}}
#' @docType package
#' @name siteFreqBounds
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gadgetlikelihood.R
\name{gadgetlikelihood}
\alias{gadgetlikelihood}
\title{Create a gadgetlikelihood object}
\usage{
gadgetlikelihood(file_name, path, missingOkay = FALSE)
}
\arguments{
\item{file_name}{The name of the likelihood file}
\item{path}{The path to the gadget directory to read from}
\item{missingOkay}{If \code{TRUE}, return an empty likelihood file object if file does not exist.}
}
\value{
A list of likelihood components representing file
}
\description{
Create a likelihood file object, from fresh or an existing file.
}
\examples{
path <- './cod-model'
gadgetlikelihood('likelihood', path, missingOkay = TRUE) # Read 'likelihood' likelihood file, creating it if it doesn't exist
}
|
/man/gadgetlikelihood.Rd
|
no_license
|
sCervino/rgadget
|
R
| false
| true
| 779
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gadgetlikelihood.R
\name{gadgetlikelihood}
\alias{gadgetlikelihood}
\title{Create a gadgetlikelihood object}
\usage{
gadgetlikelihood(file_name, path, missingOkay = FALSE)
}
\arguments{
\item{file_name}{The name of the likelihood file}
\item{path}{The path to the gadget directory to read from}
\item{missingOkay}{If \code{TRUE}, return an empty likelihood file object if file does not exist.}
}
\value{
A list of likelihood components representing file
}
\description{
Create a likelihood file object, from fresh or an existing file.
}
\examples{
path <- './cod-model'
gadgetlikelihood('likelihood', path, missingOkay = TRUE) # Read 'likelihood' likelihood file, creating it if it doesn't exist
}
|
# Assignment: ASSIGNMENT 3
# Name: Shekhar, Manish
# Date: 2021-03-28
## Load the ggplot2 package
library(ggplot2)
theme_set(theme_minimal())
## Set the working directory to the root of your DSC 520 directory
# setwd("/home/jdoe/Workspaces/dsc520")
# This step was not needed as copied heights.csv to my working directory
## Load the `data/r4ds/heights.csv` to
heights_df <- read.csv("heights.csv")
heights_df
# https://ggplot2.tidyverse.org/reference/geom_point.html
## Using `geom_point()` create three scatterplots for
## `height` vs. `earn`
# creating a data object for reusability
data <- ggplot(data = heights_df)
data + geom_point(aes(x=height, y=earn), color = "Blue")
## `age` vs. `earn`
data + geom_point(aes(x=age, y=earn), color = "Blue")
## `ed` vs. `earn`
data + geom_point(aes(x=ed, y=earn), color = "Blue")
## Re-create the three scatterplots and add a regression trend line using
## the `geom_smooth()` function
## `height` vs. `earn`
# one way using layering, not sure why geom_smooth() was crying for missing aesthetics x and y. It seems
# it is not able to inherit from aesthetics in geom_point(), thus I had to pass aethetics separately
data + geom_point(aes(x=height, y=earn)) + geom_smooth(aes(x=height, y=earn))
# another way, geom_smooth() interits aesthetics from ggplot() just fine
ggplot(data = heights_df, aes(x=height, y=earn)) + geom_point(size = 1) + geom_smooth()
## `age` vs. `earn`
ggplot(data = heights_df, aes(x=age, y=earn)) + geom_point(size = 1) + geom_smooth()
## `ed` vs. `earn`
ggplot(data = heights_df, aes(x=ed, y=earn)) + geom_point(size = 1) + geom_smooth()
## Create a scatterplot of `height`` vs. `earn`. Use `sex` as the `col` (color) attribute
ggplot(data = heights_df, aes(x=height, y=earn, col=sex)) + geom_point()
## Using `ggtitle()`, `xlab()`, and `ylab()` to add a title, x label, and y label to the previous plot
## Title: Height vs. Earnings
## X label: Height (Inches)
## Y Label: Earnings (Dollars)
ggplot(data = heights_df, aes(x=height, y=earn, col=sex)) +
geom_point() +
ggtitle(label = "Height vs. Earnings") +
xlab(label = "Height (I(nches)") +
ylab(label = "Earnings (Dollars)")
# https://ggplot2.tidyverse.org/reference/geom_histogram.html
## Create a histogram of the `earn` variable using `geom_histogram()`
ggplot(data = heights_df, aes(x=earn)) + geom_histogram()
## Create a histogram of the `earn` variable using `geom_histogram()`
## Use 10 bins
ggplot(data = heights_df, aes(x=earn)) + geom_histogram(bins=10)
# https://ggplot2.tidyverse.org/reference/geom_density.html
## Create a kernel density plot of `earn` using `geom_density()`
ggplot(data = heights_df, aes(x=earn)) + geom_density()
|
/assignment_03_Shekhar_Manish.R
|
no_license
|
datatodecision/stats_for_data_science
|
R
| false
| false
| 2,688
|
r
|
# Assignment: ASSIGNMENT 3
# Name: Shekhar, Manish
# Date: 2021-03-28
## Load the ggplot2 package
library(ggplot2)
theme_set(theme_minimal())
## Set the working directory to the root of your DSC 520 directory
# setwd("/home/jdoe/Workspaces/dsc520")
# This step was not needed as copied heights.csv to my working directory
## Load the `data/r4ds/heights.csv` to
heights_df <- read.csv("heights.csv")
heights_df
# https://ggplot2.tidyverse.org/reference/geom_point.html
## Using `geom_point()` create three scatterplots for
## `height` vs. `earn`
# creating a data object for reusability
data <- ggplot(data = heights_df)
data + geom_point(aes(x=height, y=earn), color = "Blue")
## `age` vs. `earn`
data + geom_point(aes(x=age, y=earn), color = "Blue")
## `ed` vs. `earn`
data + geom_point(aes(x=ed, y=earn), color = "Blue")
## Re-create the three scatterplots and add a regression trend line using
## the `geom_smooth()` function
## `height` vs. `earn`
# one way using layering, not sure why geom_smooth() was crying for missing aesthetics x and y. It seems
# it is not able to inherit from aesthetics in geom_point(), thus I had to pass aethetics separately
data + geom_point(aes(x=height, y=earn)) + geom_smooth(aes(x=height, y=earn))
# another way, geom_smooth() interits aesthetics from ggplot() just fine
ggplot(data = heights_df, aes(x=height, y=earn)) + geom_point(size = 1) + geom_smooth()
## `age` vs. `earn`
ggplot(data = heights_df, aes(x=age, y=earn)) + geom_point(size = 1) + geom_smooth()
## `ed` vs. `earn`
ggplot(data = heights_df, aes(x=ed, y=earn)) + geom_point(size = 1) + geom_smooth()
## Create a scatterplot of `height`` vs. `earn`. Use `sex` as the `col` (color) attribute
ggplot(data = heights_df, aes(x=height, y=earn, col=sex)) + geom_point()
## Using `ggtitle()`, `xlab()`, and `ylab()` to add a title, x label, and y label to the previous plot
## Title: Height vs. Earnings
## X label: Height (Inches)
## Y Label: Earnings (Dollars)
ggplot(data = heights_df, aes(x=height, y=earn, col=sex)) +
geom_point() +
ggtitle(label = "Height vs. Earnings") +
xlab(label = "Height (I(nches)") +
ylab(label = "Earnings (Dollars)")
# https://ggplot2.tidyverse.org/reference/geom_histogram.html
## Create a histogram of the `earn` variable using `geom_histogram()`
ggplot(data = heights_df, aes(x=earn)) + geom_histogram()
## Create a histogram of the `earn` variable using `geom_histogram()`
## Use 10 bins
ggplot(data = heights_df, aes(x=earn)) + geom_histogram(bins=10)
# https://ggplot2.tidyverse.org/reference/geom_density.html
## Create a kernel density plot of `earn` using `geom_density()`
ggplot(data = heights_df, aes(x=earn)) + geom_density()
|
#Load data
plastics <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-01-26/plastics.csv')
#Load libraries
library(tidyr)
library(dplyr)
library(plyr)
library(readr)
library(plotrix)
library(ggplot2)
library(forcats)
plastics$year<-as.character(plastics$year)#change year from numeric to character to be able to tidy by this group
plastics<-plastics[,-4]#remove empty column
#Filter by top polluters
pa<- plastics %>% filter(parent_company==c("The Coca-Cola Company","PepsiCo","Nestlรฉ"))
View(pa)
#I want to first sum by year and parent company for each plastic type
hdpe <- aggregate(pa$hdpe,
by = list(pa$year,
pa$parent_company),
FUN = 'sum', na.rm=TRUE)
colnames(hdpe) <- c("year","parent_company","hdpe")
pet <- aggregate(pa$pet,
by = list(pa$year,
pa$parent_company),
FUN = 'sum', na.rm=TRUE)
ldpe <- aggregate(pa$ldpe,
by = list(pa$year,
pa$parent_company),
FUN = 'sum', na.rm=TRUE)
pp <- aggregate(pa$pp,
by = list(pa$year,
pa$parent_company),
FUN = 'sum', na.rm=TRUE)
ps <- aggregate(pa$ps,
by = list(pa$year,
pa$parent_company),
FUN = 'sum', na.rm=TRUE)
o <- aggregate(pa$o,
by = list(pa$year,
pa$parent_company),
FUN = 'sum', na.rm=TRUE)
pvc <- aggregate(pa$pvc,
by = list(pa$year,
pa$parent_company),
FUN = 'sum', na.rm=TRUE)
gt <- aggregate(pa$grand_total,
by = list(pa$year,
pa$parent_company),
FUN = 'sum', na.rm=TRUE)
p<-cbind(hdpe, pet$x, ldpe$x,pp$x, ps$x, o$x, pvc$x,gt$x)#Then bind each of the sums into one dataframe
colnames(p) <- c("year","parent_company","hdpe","pet","ldpe","pp","ps","o","pvc","gt")#rename columns in new dataframe
View(p)
#One way to make plastic type into tidy format (aka wide to long)
p_long<-p %>% pivot_longer(cols=hdpe:pvc,names_to="plastic.type",values_to="count",values_drop_na = TRUE) #change so plastic types are in tidy format; cols= columns to convert to long format, names_to= name of new categorical column, values_to= names column with values in cells of columns you're tidying and values_drop_na= drops any NAs if there are any
View(p_long)
#I want to add classification for each plastic type's recyclability (this is general since it depends on the local facilities in each country):
p_long$recyclable<-p_long$plastic.type %>% fct_collapse(Recyclable= c("hdpe","pet","ldpe","pp"), Nonrecyclable=c("ps","o","pvc"))
#add column for proportion of total plastics in each plastic type
p_long$prop<-(p_long$count/p_long$gt)
#Make donut charts for each company
###Nestle
np<-p_long %>% filter(parent_company=="Nestlรฉ")
#Compute the cumulative percentages (top of each rectangle)
np$ymax = cumsum(np$prop)
#Compute the bottom of each rectangle
np$ymin = c(0, head(np$ymax, n=-1))
#Compute label position
np$labelPosition <- (np$ymax + np$ymin) / 2
#
#Compute a good label
np$label <- paste0(np$plastic.type, "\n ", np$count)#"\n " put space between the plastic type and its count
# Make the plot
j<-ggplot(np, aes(ymax=ymax, ymin=ymin, xmax=4, xmin=3, fill=recyclable)) +
geom_rect() +
geom_label( x=4, aes(y=labelPosition, label=label), size=4) +
scale_fill_brewer(palette=2) +
scale_color_brewer(palette=2)+
coord_polar(theta="y") + #turns bar plot into a ring shape
xlim(c(0, 4)) +
theme_void() #gets rid of gray background in plot
#Add company logo to center of donut plot
logo <- image_read("desktop/nestle.png")
j
grid::grid.raster(logo, x = 0.4, y = 0.5, just = c('center','center'), width = unit(1, 'inches'))#x and y are values between 0 and 1 with 0 being the far bottom or left and 1 being the far top or right of the plot, width controls the size of the logo
###PepsiCo
pep<-p_long %>% filter(parent_company=="PepsiCo")
#Compute the cumulative percentages (top of each rectangle)
pep$ymax = cumsum(pep$prop)
#Compute the bottom of each rectangle
pep$ymin = c(0, head(pep$ymax, n=-1))
#Compute label position
pep$labelPosition <- (pep$ymax + pep$ymin) / 2
#
#Compute a good label
pep$label <- paste0(pep$plastic.type, "\n ", pep$count)#"\n " put space between the plastic type and its count
#Make the plot
j<-ggplot(pep, aes(ymax=ymax, ymin=ymin, xmax=4, xmin=3, fill=recyclable)) +
geom_rect() +
geom_label( x=4, aes(y=labelPosition, label=label), size=4) +
scale_fill_brewer(palette=2) +
scale_color_brewer(palette=2)+
coord_polar(theta="y") +
xlim(c(0, 4)) +
theme_void()
#Add company logo to center of plot
logo <- image_read("desktop/PEPSICO.png")
j
grid::grid.raster(logo, x = 0.4, y = 0.5, just = c('center','center'), width = unit(2, 'inches'))
###Coca-cola
cc<-p_long %>% filter(parent_company=="The Coca-Cola Company", year=="2019")
#Compute the cumulative percentages (top of each rectangle)
cc$ymax = cumsum(cc$prop)
#Compute the bottom of each rectangle
cc$ymin = c(0, head(cc$ymax, n=-1))
#Compute label position
cc$labelPosition <- (cc$ymax + cc$ymin) / 2
#Compute a good label
cc$label <- paste0(cc$plastic.type, "\n ", cc$count)#"\n " put space between the plastic type and its count
# Make the plot
j<-ggplot(cc, aes(ymax=ymax, ymin=ymin, xmax=4, xmin=3, fill=recyclable)) +
geom_rect() +
geom_label( x=4, aes(y=labelPosition, label=label), size=4) +
scale_fill_brewer(palette=2) +
scale_color_brewer(palette=2)+
coord_polar(theta="y") +
xlim(c(0, 4)) +
theme_void()
#Add company logo to center of plot
logo <- image_read("desktop/cocacola.png")
j
grid::grid.raster(logo, x = 0.4, y = 0.5, just = c('center','center'), width = unit(2.5, 'inches'))
|
/20210126/tidyscript_20210126.R
|
no_license
|
jsglanz/TidyTuesdayCSUN
|
R
| false
| false
| 5,947
|
r
|
#Load data
plastics <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-01-26/plastics.csv')
#Load libraries
library(tidyr)
library(dplyr)
library(plyr)
library(readr)
library(plotrix)
library(ggplot2)
library(forcats)
plastics$year<-as.character(plastics$year)#change year from numeric to character to be able to tidy by this group
plastics<-plastics[,-4]#remove empty column
#Filter by top polluters
pa<- plastics %>% filter(parent_company==c("The Coca-Cola Company","PepsiCo","Nestlรฉ"))
View(pa)
#I want to first sum by year and parent company for each plastic type
hdpe <- aggregate(pa$hdpe,
by = list(pa$year,
pa$parent_company),
FUN = 'sum', na.rm=TRUE)
colnames(hdpe) <- c("year","parent_company","hdpe")
pet <- aggregate(pa$pet,
by = list(pa$year,
pa$parent_company),
FUN = 'sum', na.rm=TRUE)
ldpe <- aggregate(pa$ldpe,
by = list(pa$year,
pa$parent_company),
FUN = 'sum', na.rm=TRUE)
pp <- aggregate(pa$pp,
by = list(pa$year,
pa$parent_company),
FUN = 'sum', na.rm=TRUE)
ps <- aggregate(pa$ps,
by = list(pa$year,
pa$parent_company),
FUN = 'sum', na.rm=TRUE)
o <- aggregate(pa$o,
by = list(pa$year,
pa$parent_company),
FUN = 'sum', na.rm=TRUE)
pvc <- aggregate(pa$pvc,
by = list(pa$year,
pa$parent_company),
FUN = 'sum', na.rm=TRUE)
gt <- aggregate(pa$grand_total,
by = list(pa$year,
pa$parent_company),
FUN = 'sum', na.rm=TRUE)
p<-cbind(hdpe, pet$x, ldpe$x,pp$x, ps$x, o$x, pvc$x,gt$x)#Then bind each of the sums into one dataframe
colnames(p) <- c("year","parent_company","hdpe","pet","ldpe","pp","ps","o","pvc","gt")#rename columns in new dataframe
View(p)
#One way to make plastic type into tidy format (aka wide to long)
p_long<-p %>% pivot_longer(cols=hdpe:pvc,names_to="plastic.type",values_to="count",values_drop_na = TRUE) #change so plastic types are in tidy format; cols= columns to convert to long format, names_to= name of new categorical column, values_to= names column with values in cells of columns you're tidying and values_drop_na= drops any NAs if there are any
View(p_long)
#I want to add classification for each plastic type's recyclability (this is general since it depends on the local facilities in each country):
p_long$recyclable<-p_long$plastic.type %>% fct_collapse(Recyclable= c("hdpe","pet","ldpe","pp"), Nonrecyclable=c("ps","o","pvc"))
#add column for proportion of total plastics in each plastic type
p_long$prop<-(p_long$count/p_long$gt)
#Make donut charts for each company
###Nestle
np<-p_long %>% filter(parent_company=="Nestlรฉ")
#Compute the cumulative percentages (top of each rectangle)
np$ymax = cumsum(np$prop)
#Compute the bottom of each rectangle
np$ymin = c(0, head(np$ymax, n=-1))
#Compute label position
np$labelPosition <- (np$ymax + np$ymin) / 2
#
#Compute a good label
np$label <- paste0(np$plastic.type, "\n ", np$count)#"\n " put space between the plastic type and its count
# Make the plot
j<-ggplot(np, aes(ymax=ymax, ymin=ymin, xmax=4, xmin=3, fill=recyclable)) +
geom_rect() +
geom_label( x=4, aes(y=labelPosition, label=label), size=4) +
scale_fill_brewer(palette=2) +
scale_color_brewer(palette=2)+
coord_polar(theta="y") + #turns bar plot into a ring shape
xlim(c(0, 4)) +
theme_void() #gets rid of gray background in plot
#Add company logo to center of donut plot
logo <- image_read("desktop/nestle.png")
j
grid::grid.raster(logo, x = 0.4, y = 0.5, just = c('center','center'), width = unit(1, 'inches'))#x and y are values between 0 and 1 with 0 being the far bottom or left and 1 being the far top or right of the plot, width controls the size of the logo
###PepsiCo
pep<-p_long %>% filter(parent_company=="PepsiCo")
#Compute the cumulative percentages (top of each rectangle)
pep$ymax = cumsum(pep$prop)
#Compute the bottom of each rectangle
pep$ymin = c(0, head(pep$ymax, n=-1))
#Compute label position
pep$labelPosition <- (pep$ymax + pep$ymin) / 2
#
#Compute a good label
pep$label <- paste0(pep$plastic.type, "\n ", pep$count)#"\n " put space between the plastic type and its count
#Make the plot
j<-ggplot(pep, aes(ymax=ymax, ymin=ymin, xmax=4, xmin=3, fill=recyclable)) +
geom_rect() +
geom_label( x=4, aes(y=labelPosition, label=label), size=4) +
scale_fill_brewer(palette=2) +
scale_color_brewer(palette=2)+
coord_polar(theta="y") +
xlim(c(0, 4)) +
theme_void()
#Add company logo to center of plot
logo <- image_read("desktop/PEPSICO.png")
j
grid::grid.raster(logo, x = 0.4, y = 0.5, just = c('center','center'), width = unit(2, 'inches'))
###Coca-cola
cc<-p_long %>% filter(parent_company=="The Coca-Cola Company", year=="2019")
#Compute the cumulative percentages (top of each rectangle)
cc$ymax = cumsum(cc$prop)
#Compute the bottom of each rectangle
cc$ymin = c(0, head(cc$ymax, n=-1))
#Compute label position
cc$labelPosition <- (cc$ymax + cc$ymin) / 2
#Compute a good label
cc$label <- paste0(cc$plastic.type, "\n ", cc$count)#"\n " put space between the plastic type and its count
# Make the plot
j<-ggplot(cc, aes(ymax=ymax, ymin=ymin, xmax=4, xmin=3, fill=recyclable)) +
geom_rect() +
geom_label( x=4, aes(y=labelPosition, label=label), size=4) +
scale_fill_brewer(palette=2) +
scale_color_brewer(palette=2)+
coord_polar(theta="y") +
xlim(c(0, 4)) +
theme_void()
#Add company logo to center of plot
logo <- image_read("desktop/cocacola.png")
j
grid::grid.raster(logo, x = 0.4, y = 0.5, just = c('center','center'), width = unit(2.5, 'inches'))
|
FSTAT = function(dat, LV, ALV=NULL, covariate=NULL) {
# Calculate F-statistics and parametric significance
m = dim(dat)[1]
n = dim(dat)[2]
if(is.null(ALV)) {
if(is.null(covariate)) {
model.alt = model.matrix(seq(n) ~ LV)
model.null = model.matrix(seq(n) ~ 1)
}
if(!is.null(covariate)) {
model.alt = model.matrix(seq(n) ~ LV + covariate)
model.null = model.matrix(seq(n) ~ 1 + covariate)
}
} else if(is.matrix(ALV) || is.vector(ALV)) {
if(is.null(covariate)) {
model.alt = model.matrix(seq(n) ~ LV + ALV)
model.null = model.matrix(seq(n) ~ 1 + ALV)
}
if(!is.null(covariate)) {
model.alt = model.matrix(seq(n) ~ LV + ALV + covariate)
model.null = model.matrix(seq(n) ~ 1 + ALV + covariate)
}
} else {
stop("Invalid arguments into a function \'fstat\'. Adjustment latent variable must be either a matrix (n rows) or a vector (size n)")
}
RSS.alt = RSS(dat, model.alt)
RSS.null = RSS(dat, model.null)
fstat = (RSS.null - RSS.alt)/(ncol(model.alt)-ncol(model.null)) / (RSS.alt/(n-ncol(model.alt)))
fstat.pval = 1-pf(fstat, ncol(model.alt)-ncol(model.null), n-ncol(model.alt))
return(list(fstat=fstat, p.value=fstat.pval))
}
RSS = function(dat,mod){
# Calculate residual sum of squares, comparing to an alternative model
if(is.vector(dat)) {
m = 1
n = length(dat)
} else {
m = dim(dat)[1]
n = dim(dat)[2]
}
Id = diag(n)
res = dat %*% (Id - mod %*% solve(t(mod) %*% mod) %*% t(mod)) #Residuals of the model
rss = res^2 %*% rep(1,n)
rsst = (dat-mean(dat))^2 %*% rep(1,n)
r2.total = 1 - (rss/rsst) #R^2 explained by the model
residual.r2 = rss/rsst #R^2 not explained by the model
return(residual.r2)
}
getp = function(lr,lr0) {
# Get resampled p-values, pulling across variables (e.g., genes)
# lr: observed statistics
# lr0: null statistics (i.e. from resampled residuals)
m = length(lr)
v = c(rep(TRUE,m),rep(FALSE,length(lr0)))
v = v[rev(order(c(lr,lr0)))]
u = 1:length(v)
w = 1:m
p = ((u[v==TRUE]-w)+1)/(length(lr0)+2)
p = p[rank(-lr)]
return(p)
}
|
/jackstraw/R/generic.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 2,051
|
r
|
FSTAT = function(dat, LV, ALV=NULL, covariate=NULL) {
# Calculate F-statistics and parametric significance
m = dim(dat)[1]
n = dim(dat)[2]
if(is.null(ALV)) {
if(is.null(covariate)) {
model.alt = model.matrix(seq(n) ~ LV)
model.null = model.matrix(seq(n) ~ 1)
}
if(!is.null(covariate)) {
model.alt = model.matrix(seq(n) ~ LV + covariate)
model.null = model.matrix(seq(n) ~ 1 + covariate)
}
} else if(is.matrix(ALV) || is.vector(ALV)) {
if(is.null(covariate)) {
model.alt = model.matrix(seq(n) ~ LV + ALV)
model.null = model.matrix(seq(n) ~ 1 + ALV)
}
if(!is.null(covariate)) {
model.alt = model.matrix(seq(n) ~ LV + ALV + covariate)
model.null = model.matrix(seq(n) ~ 1 + ALV + covariate)
}
} else {
stop("Invalid arguments into a function \'fstat\'. Adjustment latent variable must be either a matrix (n rows) or a vector (size n)")
}
RSS.alt = RSS(dat, model.alt)
RSS.null = RSS(dat, model.null)
fstat = (RSS.null - RSS.alt)/(ncol(model.alt)-ncol(model.null)) / (RSS.alt/(n-ncol(model.alt)))
fstat.pval = 1-pf(fstat, ncol(model.alt)-ncol(model.null), n-ncol(model.alt))
return(list(fstat=fstat, p.value=fstat.pval))
}
RSS = function(dat,mod){
# Calculate residual sum of squares, comparing to an alternative model
if(is.vector(dat)) {
m = 1
n = length(dat)
} else {
m = dim(dat)[1]
n = dim(dat)[2]
}
Id = diag(n)
res = dat %*% (Id - mod %*% solve(t(mod) %*% mod) %*% t(mod)) #Residuals of the model
rss = res^2 %*% rep(1,n)
rsst = (dat-mean(dat))^2 %*% rep(1,n)
r2.total = 1 - (rss/rsst) #R^2 explained by the model
residual.r2 = rss/rsst #R^2 not explained by the model
return(residual.r2)
}
getp = function(lr,lr0) {
# Get resampled p-values, pulling across variables (e.g., genes)
# lr: observed statistics
# lr0: null statistics (i.e. from resampled residuals)
m = length(lr)
v = c(rep(TRUE,m),rep(FALSE,length(lr0)))
v = v[rev(order(c(lr,lr0)))]
u = 1:length(v)
w = 1:m
p = ((u[v==TRUE]-w)+1)/(length(lr0)+2)
p = p[rank(-lr)]
return(p)
}
|
trainData <- read.csv("C:\\REGIS\\practium1\\data\\train.csv",header = TRUE , stringsAsFactors = FALSE)
testData <- read.csv("C:\\REGIS\\practium1\\data\\test.csv",header = TRUE , stringsAsFactors = FALSE)
head(trainData$SalePrice)
train_index <- 1460
str(trainData)
str(testData)
testData$SalePrice <- 0
# 1.remove outliers GrLivArea > 4k but SalePRice < 200k
library(plyr)
library(dplyr)
trainData <- trainData %>% filter(!(GrLivArea > 4000 & SalePrice < 200000))
str(trainData)
train_index <- 1458
# 2. Log transform "SalePrice" response variable to remove right skew
trainData$SalePrice <- log10(trainData$SalePrice)
library(ggplot2)
ggplot(trainData,aes(SalePrice)) + geom_histogram()
summary(trainData$SalePrice)
# 3. Combine train.csv and test.csv into one dataset
full_data <- rbind(trainData,testData)
str(full_data)
summary(full_data$SalePrice)
# 4.these feature below have NA for ther value but it is not missing data
# but simply that house does not have that feature so convert NA to NONE
library("mice")
col_na_to_none <- c("Alley","BsmtQual","BsmtCond","BsmtExposure","BsmtFinType1","BsmtFinType2",
"FireplaceQu","GarageType","GarageFinish","GarageQual","GarageCond",
"PoolQC","Fence","MiscFeature")
full_data[col_na_to_none][is.na(full_data[col_na_to_none])] <- "None"
na_to_none_Data <- as.data.frame(unclass(full_data))
str(na_to_none_Data[col_na_to_none])
summary(na_to_none_Data)
# 5.these three feature is categorical feature but sea enterd as numberic
# so convert them back to factor
na_to_none_Data$MSSubClass <- as.factor(na_to_none_Data$MSSubClass)
na_to_none_Data$MoSold <- as.factor(na_to_none_Data$MoSold)
na_to_none_Data$YrSold <- as.factor(na_to_none_Data$YrSold)
str(na_to_none_Data$MSSubClass)
str(na_to_none_Data$MoSold)
str(na_to_none_Data$YrSold)
# sepeate numberic and categorical feature so easy to clean it up
numberic_only_data <- na_to_none_Data %>% select(which(sapply(na_to_none_Data, is.numeric)))
factor_only_data <- na_to_none_Data %>% select(which(sapply(na_to_none_Data, is.factor)))
str(numberic_only_data)
str(factor_only_data)
# 6.imputing missing data for numberic data
m <- mice(numberic_only_data, method="rf")
no_missing_numberic_data <- mice::complete(m)
sum(is.na(no_missing_numberic_data))
# 7.scale numeric feature except id and SalePrice
library(dplyr)
noId_sale_price_data <- no_missing_numberic_data %>% select(-Id,-SalePrice)
scale_center_num_data <- scale(noId_sale_price_data,center=TRUE,scale=TRUE)
scale_center_num_data <- cbind(Id = no_missing_numberic_data$Id,scale_center_num_data,SalePrice=no_missing_numberic_data$SalePrice)
summary(scale_center_num_data)
# 8.log transform numberic predictors that are have skewness > 1 and skewness > -1
tmp <- as.data.frame(scale_center_num_data)
library(e1071)
df <- data.frame(name=character(),skewness=numeric())
for(i in colnames(tmp)){
df = rbind(df, data.frame(name = i, skewness = skewness(tmp[[i]])))
}
sort_skewness_feature <- df %>% arrange(desc(skewness)) %>% filter(skewness >1 & skewness > -1)
sort_skewness_feature
for(i in sort_skewness_feature$name){
tmp[[i]] <- log10((abs(tmp[[i]])))
}
dx <- data.frame(name=character(),skewness=numeric())
for(i in sort_skewness_feature$name){
dx = rbind(dx, data.frame(name = i, skewness = skewness(tmp[[i]])))
}
sort_dx <- dx %>% arrange(desc(skewness))
sort_dx
scale_center_num_data <- tmp
# 9.replace Na for factor data with the mode of that feature
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
for(i in 1:ncol(factor_only_data)){
factor_only_data[is.na(factor_only_data[,i]), i] <- getmode(factor_only_data[,i])
}
sum(is.na(factor_only_data))
# let combine the numberic and categorical data togheter
clean_data <- cbind(scale_center_num_data,factor_only_data)
str(clean_data)
# 10.Sub select the features that have predictive power using Boruta package
library(Boruta)
set.seed(8)
feature.selection <- Boruta(SalePrice ~., data = clean_data[1:train_index,], doTrace = 1)
table(feature.selection$finalDecision)
slelected_feature <- getSelectedAttributes(feature.selection)
selected_feautres_data <- clean_data %>% select(Id,selected_feautres,SalePrice)
colnames(selected_feautres_data)
write.csv(selected_feautres_data,file = "C:\\REGIS\\practium1\\data\\selectedFeatureData.csv",row.names=FALSE)
|
/clean_data.R
|
no_license
|
minhthien/Practicum1-house_price-regression
|
R
| false
| false
| 4,510
|
r
|
trainData <- read.csv("C:\\REGIS\\practium1\\data\\train.csv",header = TRUE , stringsAsFactors = FALSE)
testData <- read.csv("C:\\REGIS\\practium1\\data\\test.csv",header = TRUE , stringsAsFactors = FALSE)
head(trainData$SalePrice)
train_index <- 1460
str(trainData)
str(testData)
testData$SalePrice <- 0
# 1.remove outliers GrLivArea > 4k but SalePRice < 200k
library(plyr)
library(dplyr)
trainData <- trainData %>% filter(!(GrLivArea > 4000 & SalePrice < 200000))
str(trainData)
train_index <- 1458
# 2. Log transform "SalePrice" response variable to remove right skew
trainData$SalePrice <- log10(trainData$SalePrice)
library(ggplot2)
ggplot(trainData,aes(SalePrice)) + geom_histogram()
summary(trainData$SalePrice)
# 3. Combine train.csv and test.csv into one dataset
full_data <- rbind(trainData,testData)
str(full_data)
summary(full_data$SalePrice)
# 4.these feature below have NA for ther value but it is not missing data
# but simply that house does not have that feature so convert NA to NONE
library("mice")
col_na_to_none <- c("Alley","BsmtQual","BsmtCond","BsmtExposure","BsmtFinType1","BsmtFinType2",
"FireplaceQu","GarageType","GarageFinish","GarageQual","GarageCond",
"PoolQC","Fence","MiscFeature")
full_data[col_na_to_none][is.na(full_data[col_na_to_none])] <- "None"
na_to_none_Data <- as.data.frame(unclass(full_data))
str(na_to_none_Data[col_na_to_none])
summary(na_to_none_Data)
# 5.these three feature is categorical feature but sea enterd as numberic
# so convert them back to factor
na_to_none_Data$MSSubClass <- as.factor(na_to_none_Data$MSSubClass)
na_to_none_Data$MoSold <- as.factor(na_to_none_Data$MoSold)
na_to_none_Data$YrSold <- as.factor(na_to_none_Data$YrSold)
str(na_to_none_Data$MSSubClass)
str(na_to_none_Data$MoSold)
str(na_to_none_Data$YrSold)
# sepeate numberic and categorical feature so easy to clean it up
numberic_only_data <- na_to_none_Data %>% select(which(sapply(na_to_none_Data, is.numeric)))
factor_only_data <- na_to_none_Data %>% select(which(sapply(na_to_none_Data, is.factor)))
str(numberic_only_data)
str(factor_only_data)
# 6.imputing missing data for numberic data
m <- mice(numberic_only_data, method="rf")
no_missing_numberic_data <- mice::complete(m)
sum(is.na(no_missing_numberic_data))
# 7.scale numeric feature except id and SalePrice
library(dplyr)
noId_sale_price_data <- no_missing_numberic_data %>% select(-Id,-SalePrice)
scale_center_num_data <- scale(noId_sale_price_data,center=TRUE,scale=TRUE)
scale_center_num_data <- cbind(Id = no_missing_numberic_data$Id,scale_center_num_data,SalePrice=no_missing_numberic_data$SalePrice)
summary(scale_center_num_data)
# 8.log transform numberic predictors that are have skewness > 1 and skewness > -1
tmp <- as.data.frame(scale_center_num_data)
library(e1071)
df <- data.frame(name=character(),skewness=numeric())
for(i in colnames(tmp)){
df = rbind(df, data.frame(name = i, skewness = skewness(tmp[[i]])))
}
sort_skewness_feature <- df %>% arrange(desc(skewness)) %>% filter(skewness >1 & skewness > -1)
sort_skewness_feature
for(i in sort_skewness_feature$name){
tmp[[i]] <- log10((abs(tmp[[i]])))
}
dx <- data.frame(name=character(),skewness=numeric())
for(i in sort_skewness_feature$name){
dx = rbind(dx, data.frame(name = i, skewness = skewness(tmp[[i]])))
}
sort_dx <- dx %>% arrange(desc(skewness))
sort_dx
scale_center_num_data <- tmp
# 9.replace Na for factor data with the mode of that feature
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
for(i in 1:ncol(factor_only_data)){
factor_only_data[is.na(factor_only_data[,i]), i] <- getmode(factor_only_data[,i])
}
sum(is.na(factor_only_data))
# let combine the numberic and categorical data togheter
clean_data <- cbind(scale_center_num_data,factor_only_data)
str(clean_data)
# 10.Sub select the features that have predictive power using Boruta package
library(Boruta)
set.seed(8)
feature.selection <- Boruta(SalePrice ~., data = clean_data[1:train_index,], doTrace = 1)
table(feature.selection$finalDecision)
slelected_feature <- getSelectedAttributes(feature.selection)
selected_feautres_data <- clean_data %>% select(Id,selected_feautres,SalePrice)
colnames(selected_feautres_data)
write.csv(selected_feautres_data,file = "C:\\REGIS\\practium1\\data\\selectedFeatureData.csv",row.names=FALSE)
|
testlist <- list(a = -235802142L, b = 41L, x = -488377871L)
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610128507-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 120
|
r
|
testlist <- list(a = -235802142L, b = 41L, x = -488377871L)
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/its.root.R
\docType{data}
\name{its.root}
\alias{its.root}
\title{An Experiment Level phyloseq Object}
\format{
A phyloseq object with otu_table, sample_data and tax_table. The sample_data variables are:
\describe{
\item{P}{Phosporous level, H or L}
\item{Genotype}{ One of three: 2, 3, and C}
\item{Label}{A code for treatments: 2HR, 2LR, 3HR, 3LR, CHR, CLR}
}
}
\usage{
its.root
}
\description{
Based on ITS2 sequences amplified from corn roots.
}
\keyword{datasets}
|
/man/its.root.Rd
|
no_license
|
jfq3/QsRutils
|
R
| false
| true
| 555
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/its.root.R
\docType{data}
\name{its.root}
\alias{its.root}
\title{An Experiment Level phyloseq Object}
\format{
A phyloseq object with otu_table, sample_data and tax_table. The sample_data variables are:
\describe{
\item{P}{Phosporous level, H or L}
\item{Genotype}{ One of three: 2, 3, and C}
\item{Label}{A code for treatments: 2HR, 2LR, 3HR, 3LR, CHR, CLR}
}
}
\usage{
its.root
}
\description{
Based on ITS2 sequences amplified from corn roots.
}
\keyword{datasets}
|
# b)
pol_div <- function(p, q){
# temporary array for xor'ing
tmp <- p[1:length(q)]
result <- c()
i <- length(q)
while(i <= length(p)) {
result <- c(result, tmp[1])
i <- i + 1
if(tmp[1] == 1){
tmp2 <- xor(tmp, q)
} else {
tmp2 <- xor(tmp, rep(0, length(q)))
}
# remove first and add last from dividend
tmp <- c(tmp2[2:length(q)], p[i])
}
# add the remainder to the end of the result
c(result, tmp[1:(length(tmp) - 1)])
}
res <- pol_div(c(1, 1, 0, 1, 1, 0, 1, 1), c(1, 0, 0, 1))
res <- pol_div(c(1, 0 , 1, 1, 1, 0, 0), c(1, 0, 1))
res <- pol_div(c(1, 0, 0, 1, 0), c(1, 0, 0, 1))
# c)
crc_calculate <- function(bin_vec, method){
padding <- rep(0, length(method) - 1)
result <- pol_div(c(bin_vec, padding), method)
# remainder
tail(result, length(method) - 1)
}
# d)
crc_check <- function(bin_vec, method){
result <- pol_div(bin_vec, method)
remainder <- tail(result, length(method) - 1)
# check if remainder is 0
all(!remainder)
}
# available methods
usb <- c(1, 0, 0, 1, 0, 1)
bluetooth <- c(1, 1, 0, 1, 0, 1)
isdn <- c(1, 0, 0, 0, 0, 0, 1, 1, 1)
crc_16 <- c(1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1)
bits = c(1, 0, 1, 1, 1)
crc <- crc_calculate(bits, crc_16)
# comment out to make crc invalid
# crc[1] = !crc[1]
crc_checksum <- crc_check(c(bits, crc), crc_16)
|
/uebung4/uebung4-3.r
|
no_license
|
reitermarkus/rnit
|
R
| false
| false
| 1,405
|
r
|
# b)
pol_div <- function(p, q){
# temporary array for xor'ing
tmp <- p[1:length(q)]
result <- c()
i <- length(q)
while(i <= length(p)) {
result <- c(result, tmp[1])
i <- i + 1
if(tmp[1] == 1){
tmp2 <- xor(tmp, q)
} else {
tmp2 <- xor(tmp, rep(0, length(q)))
}
# remove first and add last from dividend
tmp <- c(tmp2[2:length(q)], p[i])
}
# add the remainder to the end of the result
c(result, tmp[1:(length(tmp) - 1)])
}
res <- pol_div(c(1, 1, 0, 1, 1, 0, 1, 1), c(1, 0, 0, 1))
res <- pol_div(c(1, 0 , 1, 1, 1, 0, 0), c(1, 0, 1))
res <- pol_div(c(1, 0, 0, 1, 0), c(1, 0, 0, 1))
# c)
crc_calculate <- function(bin_vec, method){
padding <- rep(0, length(method) - 1)
result <- pol_div(c(bin_vec, padding), method)
# remainder
tail(result, length(method) - 1)
}
# d)
crc_check <- function(bin_vec, method){
result <- pol_div(bin_vec, method)
remainder <- tail(result, length(method) - 1)
# check if remainder is 0
all(!remainder)
}
# available methods
usb <- c(1, 0, 0, 1, 0, 1)
bluetooth <- c(1, 1, 0, 1, 0, 1)
isdn <- c(1, 0, 0, 0, 0, 0, 1, 1, 1)
crc_16 <- c(1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1)
bits = c(1, 0, 1, 1, 1)
crc <- crc_calculate(bits, crc_16)
# comment out to make crc invalid
# crc[1] = !crc[1]
crc_checksum <- crc_check(c(bits, crc), crc_16)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cancensus.R
\name{search_census_regions}
\alias{search_census_regions}
\title{Query the CensusMapper API for regions with names matching a searchterm.}
\usage{
search_census_regions(searchterm, dataset, level = NA, ...)
}
\arguments{
\item{searchterm}{The term to search for e.g. \code{"Victoria"}.
Search terms are case insensitive. If unable to find a given search term,
this function will suggest the correct spelling to use when possible.}
\item{dataset}{The dataset to query for available regions, e.g.
\code{"CA16"}.}
\item{level}{One of \code{NA}, \code{'C'}, \code{'PR'}, \code{'CMA'}, \code{'CD'}, or \code{'CSD'}.
If specified, only return variables of specified `level`.}
\item{...}{Further arguments passed on to \code{\link{list_census_regions}}.}
}
\description{
Query the CensusMapper API for regions with names matching a searchterm.
}
\examples{
search_census_regions('Victorea', 'CA16')
# This will return a warning that no match was found, but will suggest similar named regions.
search_census_vectors('Victoria', 'CA16')
# This will limit region results to only include CMA level regions
search_census_vectors('Victoria', 'CA16', level = "CMA")
}
|
/man/search_census_regions.Rd
|
permissive
|
sasha-ruby/cancensus
|
R
| false
| true
| 1,250
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cancensus.R
\name{search_census_regions}
\alias{search_census_regions}
\title{Query the CensusMapper API for regions with names matching a searchterm.}
\usage{
search_census_regions(searchterm, dataset, level = NA, ...)
}
\arguments{
\item{searchterm}{The term to search for e.g. \code{"Victoria"}.
Search terms are case insensitive. If unable to find a given search term,
this function will suggest the correct spelling to use when possible.}
\item{dataset}{The dataset to query for available regions, e.g.
\code{"CA16"}.}
\item{level}{One of \code{NA}, \code{'C'}, \code{'PR'}, \code{'CMA'}, \code{'CD'}, or \code{'CSD'}.
If specified, only return variables of specified `level`.}
\item{...}{Further arguments passed on to \code{\link{list_census_regions}}.}
}
\description{
Query the CensusMapper API for regions with names matching a searchterm.
}
\examples{
search_census_regions('Victorea', 'CA16')
# This will return a warning that no match was found, but will suggest similar named regions.
search_census_vectors('Victoria', 'CA16')
# This will limit region results to only include CMA level regions
search_census_vectors('Victoria', 'CA16', level = "CMA")
}
|
# beeswarm.R
#
# Aron Charles Eklund
#
# A part of the "beeswarm" R package
#
beeswarm <- function (x, ...)
UseMethod("beeswarm")
## here x should be a list or data.frame or numeric
beeswarm.default <- function(x,
method = c("swarm", "center", "hex", "square"),
vertical = TRUE, horizontal = !vertical,
cex = 1, spacing = 1, breaks = NULL,
labels, at = NULL,
corral = c("none", "gutter", "wrap", "random", "omit"),
corralWidth, side = 0L,
priority = c("ascending", "descending", "density", "random", "none"),
pch = par("pch"), col = par("col"), bg = NA,
pwpch = NULL, pwcol = NULL, pwbg = NULL,
do.plot = TRUE, add = FALSE, axes = TRUE, log = FALSE,
xlim = NULL, ylim = NULL, dlim = NULL, glim = NULL,
xlab = NULL, ylab = NULL, dlab = "", glab = "",
...) {
method <- match.arg(method)
corral <- match.arg(corral)
priority <- match.arg(priority)
if(length(cex) > 1) {
stop('the parameter "cex" must have length 1')
}
stopifnot(side %in% -1:1)
if(is.numeric(x)) {
x <- list(x)
}
n.groups <- length(x)
#### Resolve group labels
if(missing(labels) || is.null(labels)) {
if(is.null(names(x))) {
if(n.groups == 1) {
labels <- NA
} else {
labels <- 1:n.groups
}
} else {
labels <- names(x)
}
} else {
labels <- rep(labels, length.out = n.groups)
}
if (is.null(at))
at <- 1:n.groups
else if (length(at) != n.groups)
stop(gettextf("'at' must have length equal to %d, the number of groups",
n.groups), domain = NA)
if (is.null(dlab))
dlab <- deparse(substitute(x))
## this function returns a "group" vector, to complement "unlist"
unlistGroup <- function(x, nms = names(x)) rep(nms, sapply(x, length))
x.val <- unlist(x)
x.gp <- unlistGroup(x, nms = labels)
if((range(x.val, finite = TRUE)[1] <= 0) && log)
warning('values <= 0 omitted from logarithmic plot')
n.obs <- length(x.val)
n.obs.per.group <- sapply(x, length)
#### Resolve xlim, ylim, dlim, xlab, ylab
if(is.null(dlim)) {
if(log) {
dlim <- 10 ^ (extendrange(log10(x.val[x.val > 0])))
} else {
dlim <- extendrange(x.val, f = 0.01)
}
} else if (length(dlim) != 2) {
stop ("'dlim' must have length 2")
}
if(is.null(glim)) {
glim <- c(min(at) - 0.5, max(at) + 0.5)
} else if (length(glim) != 2) {
stop ("'glim' must have length 2")
}
if(horizontal) { ## plot is horizontal
if(is.null(ylim))
ylim <- glim
if(is.null(xlim)) {
xlim <- dlim
} else {
dlim <- xlim
}
if (is.null(xlab))
xlab <- dlab
if (is.null(ylab))
ylab <- glab
} else { ## plot is vertical
if(is.null(xlim))
xlim <- glim
if(is.null(ylim)) {
ylim <- dlim
} else {
dlim <- ylim
}
if (is.null(ylab))
ylab <- dlab
if (is.null(xlab))
xlab <- glab
}
if(length(xlim) != 2)
stop ("'xlim' must have length 2")
if(length(ylim) != 2)
stop ("'ylim' must have length 2")
#### Resolve plotting characters and colors
if(is.null(pwpch)) {
pch.out <- unlistGroup(x, nms = rep(pch, length.out = n.groups))
} else {
if(is.list(pwpch)) {
names(pwpch) <- names(x)
stopifnot(all(sapply(pwpch, length) == n.obs.per.group))
pch.out <- unlist(pwpch)
} else {
pch.out <- pwpch
}
}
stopifnot(length(pch.out) == n.obs)
if(is.null(pwcol)) {
col.out <- unlistGroup(x, nms = rep(col, length.out = n.groups))
} else {
if(is.list(pwcol)) {
names(pwcol) <- names(x)
stopifnot(all(sapply(pwcol, length) == n.obs.per.group))
col.out <- unlist(pwcol)
} else {
col.out <- pwcol
}
}
stopifnot(length(col.out) == n.obs)
if(is.null(pwbg)) {
bg.out <- unlistGroup(x, nms = rep(bg, length.out = n.groups))
} else {
if(is.list(pwbg)) {
names(pwbg) <- names(x)
stopifnot(all(sapply(pwbg, length) == n.obs.per.group))
bg.out <- unlist(pwbg)
} else {
bg.out <- pwbg
}
}
stopifnot(length(bg.out) == n.obs)
#### Set up the plot
if(do.plot & !add) {
plot(xlim, ylim,
type = 'n', axes = FALSE,
log = ifelse(log, ifelse(horizontal, 'x', 'y'), ''),
xlab = xlab, ylab = ylab, ...)
}
#### Calculate the size of a plotting character along group- or data-axis
sizeMultiplier <- par('cex') * cex * spacing
if(horizontal) {
size.g <- yinch(0.08, warn.log = FALSE) * sizeMultiplier
size.d <- xinch(0.08, warn.log = FALSE) * sizeMultiplier
} else { # vertical
size.g <- xinch(0.08, warn.log = FALSE) * sizeMultiplier
size.d <- yinch(0.08, warn.log = FALSE) * sizeMultiplier
}
##### Calculate point positions g.pos, d.pos
if(method == 'swarm') {
if(horizontal) {
g.offset <- lapply(x, function(a) swarmy(x = a, y = rep(0, length(a)),
cex = sizeMultiplier, side = side, priority = priority)$y)
} else {
g.offset <- lapply(x, function(a) swarmx(x = rep(0, length(a)), y = a,
cex = sizeMultiplier, side = side, priority = priority)$x)
}
d.pos <- x
} else { #### non-swarm methods
##### first determine positions along the data axis
if(method == 'hex') size.d <- size.d * sqrt(3) / 2
if(log) { ## if data axis IS on a log scale
if(is.null(breaks))
breaks <- 10 ^ seq(log10(dlim[1]), log10(dlim[2]) + size.d, by = size.d)
if(length(breaks) == 1 && is.na(breaks[1])) {
d.index <- x
d.pos <- x
} else {
mids <- 10 ^ ((log10(head(breaks, -1)) + log10(tail(breaks, -1))) / 2)
d.index <- lapply(x, cut, breaks = breaks, labels = FALSE)
d.pos <- lapply(d.index, function(a) mids[a])
}
} else { ## if data axis is NOT on a log scale
if(is.null(breaks))
breaks <- seq(dlim[1], dlim[2] + size.d, by = size.d)
if(length(breaks) == 1 && is.na(breaks[1])) {
d.index <- x
d.pos <- x
} else {
mids <- (head(breaks, -1) + tail(breaks, -1)) / 2
d.index <- lapply(x, cut, breaks = breaks, labels = FALSE)
d.pos <- lapply(d.index, function(a) mids[a])
}
}
##### now determine positions along the group axis
x.index <- lapply(d.index, function(v) {
if(length(na.omit(v)) == 0)
return(v)
v.s <- lapply(split(v, v), seq_along)
if(method %in% c('center', 'square') && side == -1)
v.s <- lapply(v.s, function(a) a - max(a))
else if(method %in% c('center', 'square') && side == 1)
v.s <- lapply(v.s, function(a) a - 1)
else if(method == 'center')
v.s <- lapply(v.s, function(a) a - mean(a))
else if(method == 'square')
v.s <- lapply(v.s, function(a) a - floor(mean(a)))
else if(method == 'hex') {
odd.row <- (as.numeric(names(v.s)) %% 2) == 1
if(side == 0) {
v.s[ odd.row] <- lapply(v.s[ odd.row], function(a) a - floor(mean(a)) - 0.25)
v.s[!odd.row] <- lapply(v.s[!odd.row], function(a) a - ceiling(mean(a)) + 0.25)
} else if(side == -1) {
v.s[ odd.row] <- lapply(v.s[ odd.row], function(a) a - max(a))
v.s[!odd.row] <- lapply(v.s[!odd.row], function(a) a - max(a) - 0.5)
} else if(side == 1) {
v.s[ odd.row] <- lapply(v.s[ odd.row], function(a) a - 1)
v.s[!odd.row] <- lapply(v.s[!odd.row], function(a) a - 0.5)
}
}
unsplit(v.s, v)
})
g.offset <- lapply(1:n.groups, function(i) x.index[[i]] * size.g)
} ###### end of non-swarm methods
##### now check for runaway points (if "corral" has been set)
if(corral != 'none') {
if(missing(corralWidth)) {
if(n.groups > 1) {
corralWidth <- min(at[-1] - at[-n.groups]) - (2 * size.g)
} else {
corralWidth <- 2 * (min(diff(c(par('usr')[1], at, par('usr')[2]))) - size.g)
}
} else {
stopifnot(length(corralWidth) == 1)
stopifnot(corralWidth > 0)
}
corralLo <- (side - 1) * corralWidth / 2
corralHi <- (side + 1) * corralWidth / 2
if(corral == 'gutter') {
g.offset <- lapply(g.offset, function(zz) pmin(corralHi, pmax(corralLo, zz)))
}
if(corral == 'wrap') {
if(side == -1) { ## special case with side=-1: reverse the corral to avoid artifacts at zero
g.offset <- lapply(g.offset, function(zz) corralHi - ((corralHi - zz) %% corralWidth))
} else {
g.offset <- lapply(g.offset, function(zz) ((zz - corralLo) %% corralWidth) + corralLo)
}
}
if(corral == 'random') {
g.offset <- lapply(g.offset, function(zz) ifelse(zz > corralHi | zz < corralLo, yes = runif(length(zz), corralLo, corralHi), no = zz))
}
if(corral == 'omit') {
g.offset <- lapply(g.offset, function(zz) ifelse(zz > corralHi | zz < corralLo, yes = NA, no = zz))
}
}
g.pos <- lapply(1:n.groups, function(i) at[i] + g.offset[[i]])
out <- data.frame(x = unlist(g.pos), y = unlist(d.pos),
pch = pch.out, col = col.out, bg = bg.out,
x.orig = x.gp, y.orig = x.val,
stringsAsFactors = FALSE)
if(do.plot) {
if(horizontal) { ## plot is horizontal
points(out$y, out$x, pch = out$pch, col = out$col, bg = out$bg, cex = cex)
if(axes & !add) {
axis(1, ...)
axis(2, at = at, labels = labels, tick = FALSE, ...)
box(...)
}
} else { ## plot is vertical
points(out$x, out$y, pch = out$pch, col = out$col, bg = out$bg, cex = cex)
if(axes & !add) {
axis(2, ...)
axis(1, at = at, labels = labels, tick = FALSE, ...)
box(...)
}
}
}
invisible(out)
}
beeswarm.formula <- function (formula, data = NULL, subset, na.action = NULL,
pwpch = NULL, pwcol = NULL, pwbg = NULL, dlab, glab, ...)
{
if (missing(formula) || (length(formula) != 3))
stop("'formula' missing or incorrect")
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval(m$data, parent.frame())))
m$data <- as.data.frame(data)
m$... <- NULL
m$dlab <- NULL
m$glab <- NULL
m$na.action <- na.action
m[[1]] <- as.name("model.frame")
mf <- eval(m, parent.frame())
response <- attr(attr(mf, "terms"), "response")
if (missing(dlab))
dlab <- names(mf)[response]
if (missing(glab))
glab <- as.character(formula)[3]
f <- mf[-response]
f <- f[names(f) %in% attr(attr(mf, "terms"), "term.labels")]
if(!is.null(mf$'(pwpch)')) pwpch <- split(mf$'(pwpch)', f)
if(!is.null(mf$'(pwcol)')) pwcol <- split(mf$'(pwcol)', f)
if(!is.null(mf$'(pwbg)')) pwbg <- split(mf$'(pwbg)',f)
beeswarm(split(mf[[response]], f),
pwpch = pwpch, pwcol = pwcol, pwbg = pwbg,
dlab = dlab, glab = glab, ...)
}
#### hidden function to do swarm layout
.calculateSwarm <- function(x, dsize, gsize, side = 0L, priority = "ascending") {
if(length(x) == 0) return(numeric(0))
stopifnot(side %in% -1:1)
out <- data.frame(x = x / dsize, y = 0, index = seq(along = x))
#### Determine the order in which points will be placed
if( priority == "ascending" ) { out <- out[order( out$x), ] } ## default "smile"
else if(priority == "descending") { out <- out[order(-out$x), ] } ## frown
else if(priority == "none") { } ## do not reorder
else if(priority == "density") {
dens.x <- density(out$x, na.rm = TRUE) ## compute kernel density estimate
dens.interp <- approx(dens.x$x, dens.x$y, xout = out$x, rule = 2) ## interpolated density
out <- out[order(-dens.interp$y), ] ## arrange outward from densest areas
}
else if(priority == "random") {
out <- out[sample(nrow(out)), ]
}
#### place the points
if(nrow(out) > 1) {
for (ii in 2:nrow(out)) { ## we will place one point at a time
xi <- out$x[ii]
## identify previously-placed points with potential to overlap the current point
isPotOverlap <- (abs(xi - out$x) < 1) & (1:nrow(out) < ii)
isPotOverlap[is.na(isPotOverlap)] <- FALSE
if(any(isPotOverlap)) {
pre.x <- out[isPotOverlap, 'x']
pre.y <- out[isPotOverlap, 'y']
poty.off <- sqrt(1 - ((xi - pre.x) ^ 2)) ## potential y offsets
poty <- switch(side + 2,
c(0, pre.y - poty.off),
c(0, pre.y + poty.off, pre.y - poty.off),
c(0, pre.y + poty.off)
)
poty.bad <- sapply(poty, function(y) { ## check for overlaps
any(((xi - pre.x) ^ 2 + (y - pre.y) ^ 2) < 0.999)
})
poty[poty.bad] <- Inf
out$y[ii] <- poty[which.min(abs(poty))]
} else {
out$y[ii] <- 0
}
}
}
out[is.na(out$x), 'y'] <- NA ## missing x values should have missing y values
out$y[order(out$index)] * gsize
}
### jitter points horizontally
swarmx <- function(x, y,
xsize = xinch(0.08, warn.log = FALSE),
ysize = yinch(0.08, warn.log = FALSE),
log = NULL, cex = par("cex"), side = 0L,
priority = c("ascending", "descending", "density", "random", "none")) {
priority <- match.arg(priority)
if(is.null(log))
log <- paste(ifelse(par('xlog'), 'x', ''), ifelse(par('ylog'), 'y', ''), sep = '')
xlog <- 'x' %in% strsplit(log, NULL)[[1L]]
ylog <- 'y' %in% strsplit(log, NULL)[[1L]]
xy <- xy.coords(x = x, y = y, recycle = TRUE, log = log)
stopifnot((length(unique(xy$x)) <= 1))
if(xlog) xy$x <- log10(xy$x)
if(ylog) xy$y <- log10(xy$y)
x.new <- xy$x + .calculateSwarm(xy$y, dsize = ysize * cex, gsize = xsize * cex,
side = side, priority = priority)
out <- data.frame(x = x.new, y = y)
if(xlog) out$x <- 10 ^ out$x
out
}
### jitter points vertically
swarmy <- function(x, y,
xsize = xinch(0.08, warn.log = FALSE),
ysize = yinch(0.08, warn.log = FALSE),
log = NULL, cex = par("cex"), side = 0L,
priority = c("ascending", "descending", "density", "random", "none")) {
priority <- match.arg(priority)
if(is.null(log))
log <- paste(ifelse(par('xlog'), 'x', ''), ifelse(par('ylog'), 'y', ''), sep = '')
xlog <- 'x' %in% strsplit(log, NULL)[[1L]]
ylog <- 'y' %in% strsplit(log, NULL)[[1L]]
xy <- xy.coords(x = x, y = y, recycle = TRUE, log = log)
stopifnot((length(unique(xy$y)) <= 1))
if(xlog) xy$x <- log10(xy$x)
if(ylog) xy$y <- log10(xy$y)
y.new <- xy$y + .calculateSwarm(xy$x, dsize = xsize * cex, gsize = ysize * cex,
side = side, priority = priority)
out <- data.frame(x = x, y = y.new)
if(ylog) out$y <- 10 ^ out$y
out
}
|
/beeswarm/R/beeswarm.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 14,796
|
r
|
# beeswarm.R
#
# Aron Charles Eklund
#
# A part of the "beeswarm" R package
#
beeswarm <- function (x, ...)
UseMethod("beeswarm")
## here x should be a list or data.frame or numeric
beeswarm.default <- function(x,
method = c("swarm", "center", "hex", "square"),
vertical = TRUE, horizontal = !vertical,
cex = 1, spacing = 1, breaks = NULL,
labels, at = NULL,
corral = c("none", "gutter", "wrap", "random", "omit"),
corralWidth, side = 0L,
priority = c("ascending", "descending", "density", "random", "none"),
pch = par("pch"), col = par("col"), bg = NA,
pwpch = NULL, pwcol = NULL, pwbg = NULL,
do.plot = TRUE, add = FALSE, axes = TRUE, log = FALSE,
xlim = NULL, ylim = NULL, dlim = NULL, glim = NULL,
xlab = NULL, ylab = NULL, dlab = "", glab = "",
...) {
method <- match.arg(method)
corral <- match.arg(corral)
priority <- match.arg(priority)
if(length(cex) > 1) {
stop('the parameter "cex" must have length 1')
}
stopifnot(side %in% -1:1)
if(is.numeric(x)) {
x <- list(x)
}
n.groups <- length(x)
#### Resolve group labels
if(missing(labels) || is.null(labels)) {
if(is.null(names(x))) {
if(n.groups == 1) {
labels <- NA
} else {
labels <- 1:n.groups
}
} else {
labels <- names(x)
}
} else {
labels <- rep(labels, length.out = n.groups)
}
if (is.null(at))
at <- 1:n.groups
else if (length(at) != n.groups)
stop(gettextf("'at' must have length equal to %d, the number of groups",
n.groups), domain = NA)
if (is.null(dlab))
dlab <- deparse(substitute(x))
## this function returns a "group" vector, to complement "unlist"
unlistGroup <- function(x, nms = names(x)) rep(nms, sapply(x, length))
x.val <- unlist(x)
x.gp <- unlistGroup(x, nms = labels)
if((range(x.val, finite = TRUE)[1] <= 0) && log)
warning('values <= 0 omitted from logarithmic plot')
n.obs <- length(x.val)
n.obs.per.group <- sapply(x, length)
#### Resolve xlim, ylim, dlim, xlab, ylab
if(is.null(dlim)) {
if(log) {
dlim <- 10 ^ (extendrange(log10(x.val[x.val > 0])))
} else {
dlim <- extendrange(x.val, f = 0.01)
}
} else if (length(dlim) != 2) {
stop ("'dlim' must have length 2")
}
if(is.null(glim)) {
glim <- c(min(at) - 0.5, max(at) + 0.5)
} else if (length(glim) != 2) {
stop ("'glim' must have length 2")
}
if(horizontal) { ## plot is horizontal
if(is.null(ylim))
ylim <- glim
if(is.null(xlim)) {
xlim <- dlim
} else {
dlim <- xlim
}
if (is.null(xlab))
xlab <- dlab
if (is.null(ylab))
ylab <- glab
} else { ## plot is vertical
if(is.null(xlim))
xlim <- glim
if(is.null(ylim)) {
ylim <- dlim
} else {
dlim <- ylim
}
if (is.null(ylab))
ylab <- dlab
if (is.null(xlab))
xlab <- glab
}
if(length(xlim) != 2)
stop ("'xlim' must have length 2")
if(length(ylim) != 2)
stop ("'ylim' must have length 2")
#### Resolve plotting characters and colors
if(is.null(pwpch)) {
pch.out <- unlistGroup(x, nms = rep(pch, length.out = n.groups))
} else {
if(is.list(pwpch)) {
names(pwpch) <- names(x)
stopifnot(all(sapply(pwpch, length) == n.obs.per.group))
pch.out <- unlist(pwpch)
} else {
pch.out <- pwpch
}
}
stopifnot(length(pch.out) == n.obs)
if(is.null(pwcol)) {
col.out <- unlistGroup(x, nms = rep(col, length.out = n.groups))
} else {
if(is.list(pwcol)) {
names(pwcol) <- names(x)
stopifnot(all(sapply(pwcol, length) == n.obs.per.group))
col.out <- unlist(pwcol)
} else {
col.out <- pwcol
}
}
stopifnot(length(col.out) == n.obs)
if(is.null(pwbg)) {
bg.out <- unlistGroup(x, nms = rep(bg, length.out = n.groups))
} else {
if(is.list(pwbg)) {
names(pwbg) <- names(x)
stopifnot(all(sapply(pwbg, length) == n.obs.per.group))
bg.out <- unlist(pwbg)
} else {
bg.out <- pwbg
}
}
stopifnot(length(bg.out) == n.obs)
#### Set up the plot
if(do.plot & !add) {
plot(xlim, ylim,
type = 'n', axes = FALSE,
log = ifelse(log, ifelse(horizontal, 'x', 'y'), ''),
xlab = xlab, ylab = ylab, ...)
}
#### Calculate the size of a plotting character along group- or data-axis
sizeMultiplier <- par('cex') * cex * spacing
if(horizontal) {
size.g <- yinch(0.08, warn.log = FALSE) * sizeMultiplier
size.d <- xinch(0.08, warn.log = FALSE) * sizeMultiplier
} else { # vertical
size.g <- xinch(0.08, warn.log = FALSE) * sizeMultiplier
size.d <- yinch(0.08, warn.log = FALSE) * sizeMultiplier
}
##### Calculate point positions g.pos, d.pos
if(method == 'swarm') {
if(horizontal) {
g.offset <- lapply(x, function(a) swarmy(x = a, y = rep(0, length(a)),
cex = sizeMultiplier, side = side, priority = priority)$y)
} else {
g.offset <- lapply(x, function(a) swarmx(x = rep(0, length(a)), y = a,
cex = sizeMultiplier, side = side, priority = priority)$x)
}
d.pos <- x
} else { #### non-swarm methods
##### first determine positions along the data axis
if(method == 'hex') size.d <- size.d * sqrt(3) / 2
if(log) { ## if data axis IS on a log scale
if(is.null(breaks))
breaks <- 10 ^ seq(log10(dlim[1]), log10(dlim[2]) + size.d, by = size.d)
if(length(breaks) == 1 && is.na(breaks[1])) {
d.index <- x
d.pos <- x
} else {
mids <- 10 ^ ((log10(head(breaks, -1)) + log10(tail(breaks, -1))) / 2)
d.index <- lapply(x, cut, breaks = breaks, labels = FALSE)
d.pos <- lapply(d.index, function(a) mids[a])
}
} else { ## if data axis is NOT on a log scale
if(is.null(breaks))
breaks <- seq(dlim[1], dlim[2] + size.d, by = size.d)
if(length(breaks) == 1 && is.na(breaks[1])) {
d.index <- x
d.pos <- x
} else {
mids <- (head(breaks, -1) + tail(breaks, -1)) / 2
d.index <- lapply(x, cut, breaks = breaks, labels = FALSE)
d.pos <- lapply(d.index, function(a) mids[a])
}
}
##### now determine positions along the group axis
x.index <- lapply(d.index, function(v) {
if(length(na.omit(v)) == 0)
return(v)
v.s <- lapply(split(v, v), seq_along)
if(method %in% c('center', 'square') && side == -1)
v.s <- lapply(v.s, function(a) a - max(a))
else if(method %in% c('center', 'square') && side == 1)
v.s <- lapply(v.s, function(a) a - 1)
else if(method == 'center')
v.s <- lapply(v.s, function(a) a - mean(a))
else if(method == 'square')
v.s <- lapply(v.s, function(a) a - floor(mean(a)))
else if(method == 'hex') {
odd.row <- (as.numeric(names(v.s)) %% 2) == 1
if(side == 0) {
v.s[ odd.row] <- lapply(v.s[ odd.row], function(a) a - floor(mean(a)) - 0.25)
v.s[!odd.row] <- lapply(v.s[!odd.row], function(a) a - ceiling(mean(a)) + 0.25)
} else if(side == -1) {
v.s[ odd.row] <- lapply(v.s[ odd.row], function(a) a - max(a))
v.s[!odd.row] <- lapply(v.s[!odd.row], function(a) a - max(a) - 0.5)
} else if(side == 1) {
v.s[ odd.row] <- lapply(v.s[ odd.row], function(a) a - 1)
v.s[!odd.row] <- lapply(v.s[!odd.row], function(a) a - 0.5)
}
}
unsplit(v.s, v)
})
g.offset <- lapply(1:n.groups, function(i) x.index[[i]] * size.g)
} ###### end of non-swarm methods
##### now check for runaway points (if "corral" has been set)
if(corral != 'none') {
if(missing(corralWidth)) {
if(n.groups > 1) {
corralWidth <- min(at[-1] - at[-n.groups]) - (2 * size.g)
} else {
corralWidth <- 2 * (min(diff(c(par('usr')[1], at, par('usr')[2]))) - size.g)
}
} else {
stopifnot(length(corralWidth) == 1)
stopifnot(corralWidth > 0)
}
corralLo <- (side - 1) * corralWidth / 2
corralHi <- (side + 1) * corralWidth / 2
if(corral == 'gutter') {
g.offset <- lapply(g.offset, function(zz) pmin(corralHi, pmax(corralLo, zz)))
}
if(corral == 'wrap') {
if(side == -1) { ## special case with side=-1: reverse the corral to avoid artifacts at zero
g.offset <- lapply(g.offset, function(zz) corralHi - ((corralHi - zz) %% corralWidth))
} else {
g.offset <- lapply(g.offset, function(zz) ((zz - corralLo) %% corralWidth) + corralLo)
}
}
if(corral == 'random') {
g.offset <- lapply(g.offset, function(zz) ifelse(zz > corralHi | zz < corralLo, yes = runif(length(zz), corralLo, corralHi), no = zz))
}
if(corral == 'omit') {
g.offset <- lapply(g.offset, function(zz) ifelse(zz > corralHi | zz < corralLo, yes = NA, no = zz))
}
}
g.pos <- lapply(1:n.groups, function(i) at[i] + g.offset[[i]])
out <- data.frame(x = unlist(g.pos), y = unlist(d.pos),
pch = pch.out, col = col.out, bg = bg.out,
x.orig = x.gp, y.orig = x.val,
stringsAsFactors = FALSE)
if(do.plot) {
if(horizontal) { ## plot is horizontal
points(out$y, out$x, pch = out$pch, col = out$col, bg = out$bg, cex = cex)
if(axes & !add) {
axis(1, ...)
axis(2, at = at, labels = labels, tick = FALSE, ...)
box(...)
}
} else { ## plot is vertical
points(out$x, out$y, pch = out$pch, col = out$col, bg = out$bg, cex = cex)
if(axes & !add) {
axis(2, ...)
axis(1, at = at, labels = labels, tick = FALSE, ...)
box(...)
}
}
}
invisible(out)
}
beeswarm.formula <- function (formula, data = NULL, subset, na.action = NULL,
pwpch = NULL, pwcol = NULL, pwbg = NULL, dlab, glab, ...)
{
if (missing(formula) || (length(formula) != 3))
stop("'formula' missing or incorrect")
m <- match.call(expand.dots = FALSE)
if (is.matrix(eval(m$data, parent.frame())))
m$data <- as.data.frame(data)
m$... <- NULL
m$dlab <- NULL
m$glab <- NULL
m$na.action <- na.action
m[[1]] <- as.name("model.frame")
mf <- eval(m, parent.frame())
response <- attr(attr(mf, "terms"), "response")
if (missing(dlab))
dlab <- names(mf)[response]
if (missing(glab))
glab <- as.character(formula)[3]
f <- mf[-response]
f <- f[names(f) %in% attr(attr(mf, "terms"), "term.labels")]
if(!is.null(mf$'(pwpch)')) pwpch <- split(mf$'(pwpch)', f)
if(!is.null(mf$'(pwcol)')) pwcol <- split(mf$'(pwcol)', f)
if(!is.null(mf$'(pwbg)')) pwbg <- split(mf$'(pwbg)',f)
beeswarm(split(mf[[response]], f),
pwpch = pwpch, pwcol = pwcol, pwbg = pwbg,
dlab = dlab, glab = glab, ...)
}
#### hidden function to do swarm layout
.calculateSwarm <- function(x, dsize, gsize, side = 0L, priority = "ascending") {
if(length(x) == 0) return(numeric(0))
stopifnot(side %in% -1:1)
out <- data.frame(x = x / dsize, y = 0, index = seq(along = x))
#### Determine the order in which points will be placed
if( priority == "ascending" ) { out <- out[order( out$x), ] } ## default "smile"
else if(priority == "descending") { out <- out[order(-out$x), ] } ## frown
else if(priority == "none") { } ## do not reorder
else if(priority == "density") {
dens.x <- density(out$x, na.rm = TRUE) ## compute kernel density estimate
dens.interp <- approx(dens.x$x, dens.x$y, xout = out$x, rule = 2) ## interpolated density
out <- out[order(-dens.interp$y), ] ## arrange outward from densest areas
}
else if(priority == "random") {
out <- out[sample(nrow(out)), ]
}
#### place the points
if(nrow(out) > 1) {
for (ii in 2:nrow(out)) { ## we will place one point at a time
xi <- out$x[ii]
## identify previously-placed points with potential to overlap the current point
isPotOverlap <- (abs(xi - out$x) < 1) & (1:nrow(out) < ii)
isPotOverlap[is.na(isPotOverlap)] <- FALSE
if(any(isPotOverlap)) {
pre.x <- out[isPotOverlap, 'x']
pre.y <- out[isPotOverlap, 'y']
poty.off <- sqrt(1 - ((xi - pre.x) ^ 2)) ## potential y offsets
poty <- switch(side + 2,
c(0, pre.y - poty.off),
c(0, pre.y + poty.off, pre.y - poty.off),
c(0, pre.y + poty.off)
)
poty.bad <- sapply(poty, function(y) { ## check for overlaps
any(((xi - pre.x) ^ 2 + (y - pre.y) ^ 2) < 0.999)
})
poty[poty.bad] <- Inf
out$y[ii] <- poty[which.min(abs(poty))]
} else {
out$y[ii] <- 0
}
}
}
out[is.na(out$x), 'y'] <- NA ## missing x values should have missing y values
out$y[order(out$index)] * gsize
}
### jitter points horizontally
swarmx <- function(x, y,
xsize = xinch(0.08, warn.log = FALSE),
ysize = yinch(0.08, warn.log = FALSE),
log = NULL, cex = par("cex"), side = 0L,
priority = c("ascending", "descending", "density", "random", "none")) {
priority <- match.arg(priority)
if(is.null(log))
log <- paste(ifelse(par('xlog'), 'x', ''), ifelse(par('ylog'), 'y', ''), sep = '')
xlog <- 'x' %in% strsplit(log, NULL)[[1L]]
ylog <- 'y' %in% strsplit(log, NULL)[[1L]]
xy <- xy.coords(x = x, y = y, recycle = TRUE, log = log)
stopifnot((length(unique(xy$x)) <= 1))
if(xlog) xy$x <- log10(xy$x)
if(ylog) xy$y <- log10(xy$y)
x.new <- xy$x + .calculateSwarm(xy$y, dsize = ysize * cex, gsize = xsize * cex,
side = side, priority = priority)
out <- data.frame(x = x.new, y = y)
if(xlog) out$x <- 10 ^ out$x
out
}
### jitter points vertically
swarmy <- function(x, y,
xsize = xinch(0.08, warn.log = FALSE),
ysize = yinch(0.08, warn.log = FALSE),
log = NULL, cex = par("cex"), side = 0L,
priority = c("ascending", "descending", "density", "random", "none")) {
priority <- match.arg(priority)
if(is.null(log))
log <- paste(ifelse(par('xlog'), 'x', ''), ifelse(par('ylog'), 'y', ''), sep = '')
xlog <- 'x' %in% strsplit(log, NULL)[[1L]]
ylog <- 'y' %in% strsplit(log, NULL)[[1L]]
xy <- xy.coords(x = x, y = y, recycle = TRUE, log = log)
stopifnot((length(unique(xy$y)) <= 1))
if(xlog) xy$x <- log10(xy$x)
if(ylog) xy$y <- log10(xy$y)
y.new <- xy$y + .calculateSwarm(xy$x, dsize = xsize * cex, gsize = ysize * cex,
side = side, priority = priority)
out <- data.frame(x = x, y = y.new)
if(ylog) out$y <- 10 ^ out$y
out
}
|
### Basic Models ###
library(data.table)
library(MatrixModels)
library(e1071)
library(FNN)
library(glmnet)
library(ranger)
library(xgboost)
# error measure
mse = function(ydash, y) {
mse = mean((y - ydash)^2)
return(mse)
}
# load and combine dataset
train = fread("BlogFeedback-Train.csv")
test = fread("BlogFeedback-Test.csv")
# create design matrices
train_x = model.Matrix(V281 ~ . - 1, data = train, sparse = F)
train_x_sparse = model.Matrix(V281 ~ . - 1, data = train, sparse = T)
train_y = train$V281
test_x = model.Matrix(V281 ~ . - 1, data = test, sparse = F)
test_y = test$V281
train_xgb = xgb.DMatrix(data = as.matrix(train_x), label = train_y)
test_xgb = xgb.DMatrix(data = as.matrix(test_x), label = test_y)
# try kNN
mdl_knn= knn.reg(train_x, test_x, train_y, k = 15)
pred_knn = knn.reg(train_x, test_x, train_y, k = 15)$pred
mse(pred_knn, test_y)
save(mdl_knn, file = "mdl_knn.rda")
# try LASSO
mdl_lasso = cv.glmnet(train_x_sparse, train_y, family = "gaussian", alpha = 2)
pred_lasso = predict(mdl_lasso, newx = test_x)
mse(pred_lasso, test_y)
save(mdl_lasso, file = "mdl_lasso.rda")
# try SVM
mdl_svm = svm(V281 ~ V52 + V55 + V61 + V51 + V54 + V21 + V6 + V10, data = train, kernel = "radial", cost = 2, gamma = 0.25)
pred_svm = predict(mdl_svm, test)
mse(pred_svm, test_y)
save(mdl_svm, file = "mdl_svm.rda")
# try random forest
mdl_rf = ranger(V281 ~ ., data = train, num.trees = 1000, mtry = 120, write.forest = T)
pred_rf = predict(mdl_rf, test)
mse(pred_rf$predictions, test_y)
save(mdl_lasso, file = "mdl_rf.rda")
# try XGboost
mdl_xgb = xgboost(data = train_xgb, nround = 500, nthread = 4, max_depth = 6, eta = 0.025, subsample = 0.7, gamma = 3)
pred_xgb = predict(mdl_xgb, test_xgb)
mse(pred_xgb, test_y)
save(mdl_xgb, file = "mdl_xgb.rda")
|
/6.Code/2.Baseline-Model.R
|
no_license
|
rajbharat/Ensemble-Predict-No-Of-Comments-On-BlogPost
|
R
| false
| false
| 1,783
|
r
|
### Basic Models ###
library(data.table)
library(MatrixModels)
library(e1071)
library(FNN)
library(glmnet)
library(ranger)
library(xgboost)
# error measure
mse = function(ydash, y) {
mse = mean((y - ydash)^2)
return(mse)
}
# load and combine dataset
train = fread("BlogFeedback-Train.csv")
test = fread("BlogFeedback-Test.csv")
# create design matrices
train_x = model.Matrix(V281 ~ . - 1, data = train, sparse = F)
train_x_sparse = model.Matrix(V281 ~ . - 1, data = train, sparse = T)
train_y = train$V281
test_x = model.Matrix(V281 ~ . - 1, data = test, sparse = F)
test_y = test$V281
train_xgb = xgb.DMatrix(data = as.matrix(train_x), label = train_y)
test_xgb = xgb.DMatrix(data = as.matrix(test_x), label = test_y)
# try kNN
mdl_knn= knn.reg(train_x, test_x, train_y, k = 15)
pred_knn = knn.reg(train_x, test_x, train_y, k = 15)$pred
mse(pred_knn, test_y)
save(mdl_knn, file = "mdl_knn.rda")
# try LASSO
mdl_lasso = cv.glmnet(train_x_sparse, train_y, family = "gaussian", alpha = 2)
pred_lasso = predict(mdl_lasso, newx = test_x)
mse(pred_lasso, test_y)
save(mdl_lasso, file = "mdl_lasso.rda")
# try SVM
mdl_svm = svm(V281 ~ V52 + V55 + V61 + V51 + V54 + V21 + V6 + V10, data = train, kernel = "radial", cost = 2, gamma = 0.25)
pred_svm = predict(mdl_svm, test)
mse(pred_svm, test_y)
save(mdl_svm, file = "mdl_svm.rda")
# try random forest
mdl_rf = ranger(V281 ~ ., data = train, num.trees = 1000, mtry = 120, write.forest = T)
pred_rf = predict(mdl_rf, test)
mse(pred_rf$predictions, test_y)
save(mdl_lasso, file = "mdl_rf.rda")
# try XGboost
mdl_xgb = xgboost(data = train_xgb, nround = 500, nthread = 4, max_depth = 6, eta = 0.025, subsample = 0.7, gamma = 3)
pred_xgb = predict(mdl_xgb, test_xgb)
mse(pred_xgb, test_y)
save(mdl_xgb, file = "mdl_xgb.rda")
|
library(data.table)
library(R.utils)
# Do not output scientific notation
options(scipen=999)
# initial garbage collection
gc()
# Set command line arguments
args <- commandArgs(TRUE)
CGmap <- args[1]
outFile <- args[2]
aggregateStrandsCG <- function(CGmap){
# Create the temp directory for uncompressed CGmap file
tmpFile <- tempfile(tmpdir = tempdir(), fileext = ".CGmap.tmp")
#gunzip(CGmap, remove=FALSE, destname = tmpFile)
command <- paste("zcat ", CGmap, " > ", tmpFile, sep = "")
system(command)
# Read the file
dat <- fread(input = tmpFile, sep = "\t", select = c(1,2,3,5,7,8),
col.names = c("chr", "base", "position", "context",
"C_reads", "CT_reads"))
# Subset to CG context only
dat <- dat[dat$context == "CG", ]
# Set up the locus id
dat$locus <- NA
# Get a locus id relative to forward strand
dat$locus <- ifelse(test = dat$base == "G",
yes = paste(dat$chr, dat$position - 1, sep = ":"),
no = paste(dat$chr, dat$position, sep = ":"))
# Drop the unused columns
dat <- dat[ ,c("chr", "base", "position", "context") := NULL]
# Sum the read counts for + and - strand
combined <- dat[, lapply(.SD, sum), by=.(locus), .SDcols=c("C_reads", "CT_reads")]
rm(dat)
# Delete the temp file
file.remove(tmpFile)
# return the aggregated data object
return(combined)
}
# Apply the function
dat <- aggregateStrandsCG(CGmap = CGmap)
# Write the output file
out <- gzfile(outFile)
write.table(x = dat, file = out, quote = FALSE, sep = "\t",
row.names = FALSE, col.names = TRUE)
|
/analysis_scripts/aggregateStrandsFromCGMap.R
|
no_license
|
SamBuckberry/Neural_rosette_methylomes
|
R
| false
| false
| 1,877
|
r
|
library(data.table)
library(R.utils)
# Do not output scientific notation
options(scipen=999)
# initial garbage collection
gc()
# Set command line arguments
args <- commandArgs(TRUE)
CGmap <- args[1]
outFile <- args[2]
aggregateStrandsCG <- function(CGmap){
# Create the temp directory for uncompressed CGmap file
tmpFile <- tempfile(tmpdir = tempdir(), fileext = ".CGmap.tmp")
#gunzip(CGmap, remove=FALSE, destname = tmpFile)
command <- paste("zcat ", CGmap, " > ", tmpFile, sep = "")
system(command)
# Read the file
dat <- fread(input = tmpFile, sep = "\t", select = c(1,2,3,5,7,8),
col.names = c("chr", "base", "position", "context",
"C_reads", "CT_reads"))
# Subset to CG context only
dat <- dat[dat$context == "CG", ]
# Set up the locus id
dat$locus <- NA
# Get a locus id relative to forward strand
dat$locus <- ifelse(test = dat$base == "G",
yes = paste(dat$chr, dat$position - 1, sep = ":"),
no = paste(dat$chr, dat$position, sep = ":"))
# Drop the unused columns
dat <- dat[ ,c("chr", "base", "position", "context") := NULL]
# Sum the read counts for + and - strand
combined <- dat[, lapply(.SD, sum), by=.(locus), .SDcols=c("C_reads", "CT_reads")]
rm(dat)
# Delete the temp file
file.remove(tmpFile)
# return the aggregated data object
return(combined)
}
# Apply the function
dat <- aggregateStrandsCG(CGmap = CGmap)
# Write the output file
out <- gzfile(outFile)
write.table(x = dat, file = out, quote = FALSE, sep = "\t",
row.names = FALSE, col.names = TRUE)
|
set.seed(0102)
options(htmlwidgets.TOJSON_ARGS = list(pretty = TRUE))
Sys.setenv(R_KNITR_OPTIONS = 'knitr.chunk.tidy = FALSE')
##knitr::opts_chunk$set(out.width = '100%')
library(leaflet)
f = rmarkdown::render(commandArgs(TRUE))
print(f)
## combine with the headers and footers
header <- readLines("_includes/before_body.html")
a <- readLines(f)
footer <- readLines("_includes/after_body.html")
a <- c(header, a)
writeLines(a, f)
|
/R/compile.R
|
no_license
|
richardbeare/MelbourneSTEMIAccess
|
R
| false
| false
| 436
|
r
|
set.seed(0102)
options(htmlwidgets.TOJSON_ARGS = list(pretty = TRUE))
Sys.setenv(R_KNITR_OPTIONS = 'knitr.chunk.tidy = FALSE')
##knitr::opts_chunk$set(out.width = '100%')
library(leaflet)
f = rmarkdown::render(commandArgs(TRUE))
print(f)
## combine with the headers and footers
header <- readLines("_includes/before_body.html")
a <- readLines(f)
footer <- readLines("_includes/after_body.html")
a <- c(header, a)
writeLines(a, f)
|
## Exploratory Data Analysis - Course Project 1
## Plot 3
## Libraries:
library(sqldf)
## Download the data set:
## download.file("https://d396qusza40orc.cloudfront.net/exdata/data/household_power_consumption.zip", "household_power_consumption.zip")
## Load data into data frame:
top = read.csv("household_power_consumption.txt", header=T, sep=";", nrows=1)
ColClass = sapply(top, class)
df = read.csv.sql( "household_power_consumption.txt"
, "select Sub_metering_1, Sub_metering_2, Sub_metering_3, Date, Time from file where Date in ('01/02/2007', '02/02/2007', '1/2/2007', '2/2/2007')"
, header=T, sep=";", colClasses=ColClass)
## Convert date and time variables into one Date-Time variable:
df$DateTime = paste(df$Date, df$Time)
df$DateTime = strptime(df$DateTime, "%d/%m/%Y %H:%M:%S")
## Plot 3:
png("plot3.png")
with(df, plot(DateTime, Sub_metering_1
, type="n"
, ylab = "Energy sub metering"
, xlab = ""))
with(df, lines(DateTime, Sub_metering_1, col="grey"))
with(df, lines(DateTime, Sub_metering_2, col="red"))
with(df, lines(DateTime, Sub_metering_3, col="blue"))
legendch = names(df)[1:3]
legend("topright", pch="---", lwd=2, col=c("grey", "red", "blue"), legend=legendch)
dev.off()
|
/plot3.R
|
no_license
|
jerosmith/ExData_Plotting1
|
R
| false
| false
| 1,250
|
r
|
## Exploratory Data Analysis - Course Project 1
## Plot 3
## Libraries:
library(sqldf)
## Download the data set:
## download.file("https://d396qusza40orc.cloudfront.net/exdata/data/household_power_consumption.zip", "household_power_consumption.zip")
## Load data into data frame:
top = read.csv("household_power_consumption.txt", header=T, sep=";", nrows=1)
ColClass = sapply(top, class)
df = read.csv.sql( "household_power_consumption.txt"
, "select Sub_metering_1, Sub_metering_2, Sub_metering_3, Date, Time from file where Date in ('01/02/2007', '02/02/2007', '1/2/2007', '2/2/2007')"
, header=T, sep=";", colClasses=ColClass)
## Convert date and time variables into one Date-Time variable:
df$DateTime = paste(df$Date, df$Time)
df$DateTime = strptime(df$DateTime, "%d/%m/%Y %H:%M:%S")
## Plot 3:
png("plot3.png")
with(df, plot(DateTime, Sub_metering_1
, type="n"
, ylab = "Energy sub metering"
, xlab = ""))
with(df, lines(DateTime, Sub_metering_1, col="grey"))
with(df, lines(DateTime, Sub_metering_2, col="red"))
with(df, lines(DateTime, Sub_metering_3, col="blue"))
legendch = names(df)[1:3]
legend("topright", pch="---", lwd=2, col=c("grey", "red", "blue"), legend=legendch)
dev.off()
|
#################################################################################
# File Name: Robustness_years.R
# Project: EU ETS effectiveness paper
# Purpose: Creates gsynth estimates and 95% CIs for different years of treatment assignment
# Data input: ../Data/ETS_analysis.RData
# Output File:
# Author: Patrick Bayer
# Date: 12 April 2020
#################################################################################
# Load required packages
library(foreign)
library(gsynth)
library(tidyverse)
# Load data
load("../Data/ETS_analysis.RData")
# Restrict data to ETS regulated and unregulated emissions
carbon <- carbon[carbon$sector=="Regulated" | carbon$sector=="Unregulated",]
carbon <- carbon[with(carbon, order(carbon$country,carbon$year,carbon$sector)),]
# Treatment assignment
# Treatment window of +/-5 years around 2008
D <- 2008
window.size <- 5
window <- seq(D-window.size,D+window.size,1)
# Loop through different years of treatment assignment
for(i in (1:length(window))){
#i <- 1
# Create treatment indicator
carbon$ets <- 0
carbon$ets[carbon$sector=="Regulated" & carbon$year>=window[i]] <- 1
# Create indicator for regulated/unregulated fixed effects
carbon$ctrysector <- paste(carbon$countryname,carbon$sector)
#################################################################################
# Generalized synthetic control
#################################################################################
# Gsynth does not tolerate data with missings
carbon.new <- carbon[is.na(carbon$loggdp)==FALSE & is.na(carbon$logUNemit)==FALSE,]
e0 <- gsynth(logUNemit ~ ets + loggdp + loggdp2, data=carbon.new, index=c("ctrysector","year"), force="two-way", se=TRUE, min.T0=7, nboots=1000, seed=1234)
e0
# #################################################################################
# # Plot ATT over time for different treatment years
# #################################################################################
year <- seq(min(carbon.new$year), max(carbon.new$year),1)
# Color settings: colorblind-friendly palette
# http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/#a-colorblind-friendly-palette
cols <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
# ATT plot
p <- ggplot() +
geom_ribbon(aes(x=year,ymin=(exp(e0$est.att[,3])-1)*100,ymax=(exp(e0$est.att[,4])-1)*100),fill=cols[1]) +
geom_line(aes(x=year,y=(exp(e0$est.att[,1])-1)*100),color=cols[3],size=1.2) +
labs(x="Year",y="Difference in CO2 emissions (percent)", title="ATT Estimates for EU ETS, 2008-2016", subtitle=paste("Placebo test: Treatment assignments in year",window[i])) +
geom_vline(aes(xintercept=window[i])) +
geom_vline(aes(xintercept=D), size=1.5) +
geom_hline(aes(yintercept=0),lty=2)+
ylim(-40,20) +
scale_x_continuous(breaks=seq(min(year), max(year), by=5)) +
theme(plot.margin=unit(c(.5,.5,.5,.5),"cm"))
p
ggsave(p,filename=paste("../Figures/si/treat_",window[i],".png",sep=""),width=6.5, height=6)
}
#################################################################################
# END OF FILE
#################################################################################
|
/seminars/03 - match-n-synth/BayerAklin2020/Analysis/SI6_Robustness_years.R
|
no_license
|
l5d1l5/CausalInference
|
R
| false
| false
| 3,227
|
r
|
#################################################################################
# File Name: Robustness_years.R
# Project: EU ETS effectiveness paper
# Purpose: Creates gsynth estimates and 95% CIs for different years of treatment assignment
# Data input: ../Data/ETS_analysis.RData
# Output File:
# Author: Patrick Bayer
# Date: 12 April 2020
#################################################################################
# Load required packages
library(foreign)
library(gsynth)
library(tidyverse)
# Load data
load("../Data/ETS_analysis.RData")
# Restrict data to ETS regulated and unregulated emissions
carbon <- carbon[carbon$sector=="Regulated" | carbon$sector=="Unregulated",]
carbon <- carbon[with(carbon, order(carbon$country,carbon$year,carbon$sector)),]
# Treatment assignment
# Treatment window of +/-5 years around 2008
D <- 2008
window.size <- 5
window <- seq(D-window.size,D+window.size,1)
# Loop through different years of treatment assignment
for(i in (1:length(window))){
#i <- 1
# Create treatment indicator
carbon$ets <- 0
carbon$ets[carbon$sector=="Regulated" & carbon$year>=window[i]] <- 1
# Create indicator for regulated/unregulated fixed effects
carbon$ctrysector <- paste(carbon$countryname,carbon$sector)
#################################################################################
# Generalized synthetic control
#################################################################################
# Gsynth does not tolerate data with missings
carbon.new <- carbon[is.na(carbon$loggdp)==FALSE & is.na(carbon$logUNemit)==FALSE,]
e0 <- gsynth(logUNemit ~ ets + loggdp + loggdp2, data=carbon.new, index=c("ctrysector","year"), force="two-way", se=TRUE, min.T0=7, nboots=1000, seed=1234)
e0
# #################################################################################
# # Plot ATT over time for different treatment years
# #################################################################################
year <- seq(min(carbon.new$year), max(carbon.new$year),1)
# Color settings: colorblind-friendly palette
# http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/#a-colorblind-friendly-palette
cols <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
# ATT plot
p <- ggplot() +
geom_ribbon(aes(x=year,ymin=(exp(e0$est.att[,3])-1)*100,ymax=(exp(e0$est.att[,4])-1)*100),fill=cols[1]) +
geom_line(aes(x=year,y=(exp(e0$est.att[,1])-1)*100),color=cols[3],size=1.2) +
labs(x="Year",y="Difference in CO2 emissions (percent)", title="ATT Estimates for EU ETS, 2008-2016", subtitle=paste("Placebo test: Treatment assignments in year",window[i])) +
geom_vline(aes(xintercept=window[i])) +
geom_vline(aes(xintercept=D), size=1.5) +
geom_hline(aes(yintercept=0),lty=2)+
ylim(-40,20) +
scale_x_continuous(breaks=seq(min(year), max(year), by=5)) +
theme(plot.margin=unit(c(.5,.5,.5,.5),"cm"))
p
ggsave(p,filename=paste("../Figures/si/treat_",window[i],".png",sep=""),width=6.5, height=6)
}
#################################################################################
# END OF FILE
#################################################################################
|
# Packages and functions --------------------------------------------------
# library(data.table)
library(tidyverse)
library(geosphere)
# library(igraph)
library(lubridate)
library(ggmap)
# Get data ----------------------------------------------------------------
#' The source of data: http://opendata.praha.eu
dir.create("_temp", showWarnings = FALSE)
fl <- "_temp/jrdata.zip"
# download.file("http://opendata.iprpraha.cz/DPP/JR/jrdata.zip", fl)
pth.dir <- "_temp/jrdata"
# unzip(fl, exdir = pth.dir)
pth.fls <- dir(pth.dir, full.names = TRUE)
jr.info <- readr::read_delim(pth.fls[1], delim = ":", skip = 2, locale = locale("cs", encoding = "windows-1250"), col_names = c("co", "hodnota"))
jr.data <- sapply(pth.fls[-1], readr::read_csv, locale = locale("cs"))
names(jr.data) <- names(jr.data) %>% basename %>% str_remove("[.].*$")
# Filter data ------------------------------------------------------------
#' Let's downdize the data just to the day we are interested in.
#' (Plus we'll add next day - just to complete trips startet before midnight..).
jr.data$calendar <- jr.data$calendar %>%
mutate(start_date = ymd(start_date),
end_date = ymd(end_date))
jr.data$calendar_dates <- jr.data$calendar_dates %>% mutate(date = ymd(date))
# pick dates
c.days <- jr.data$calendar_dates$date %>% sample(1)
c.days <- c.days + 0:1
# filter datasets
jr.data$calendar <- jr.data$calendar %>%
filter(start_date <= c.days[1], end_date >= c.days[2])
l.available_services <- lapply(c.days,function(i_day) {
i_wday <- lubridate::wday(i_day, week_start = 1, label = TRUE, abbr = FALSE,
locale = "English_United States.1252") %>% str_to_lower()
kt <- jr.data$calendar[,i_wday] == 1
jr.data$calendar$service_id[kt]
})
names(l.available_services) <- c.days
# Optionaly - remove services with an exception..
# jr.data$calendar_dates <- jr.data$calendar_dates %>%
# filter(date %in% c.days, exception_type == 2)
# l.available_services <- lapply(l.available_services, function(available_services) {
# setdiff(available_services, jr.data$calendar_dates$service_id)
# })
# Remove unavailable services
jr.data$trips <- jr.data$trips %>%
filter(service_id %in% unlist(l.available_services))
l.available_trips <- lapply(l.available_services, function(available_services) {
jr.data$trips %>%
filter(service_id %in% available_services) %>%
select(trip_id) %>%
unlist(use.names = FALSE) %>%
unique
})
jr.data$stop_times <- jr.data$stop_times %>%
filter(trip_id %in% unlist(l.available_trips))
available_stops <- jr.data$stop_times$stop_id %>% unique
jr.data$stops <- jr.data$stops %>% filter(stop_id %in% available_stops)
# also remove stop times without arrival/departure time specified
jr.data$stop_times <- jr.data$stop_times %>%
filter(!is.na(arrival_time) & !is.na(departure_time))
# Stop's id recode --------------------------------------------------------
#' To save some space lets recode stop's id to just id.
d.stops_id_dict <- jr.data$stops %>%
mutate(stop_id_orig = stop_id, stop_id = row_number()) %>%
select(stop_id, stop_id_orig, stop_name, location_type, parent_station, wheelchair_boarding)
jr.data$stops <- jr.data$stops %>%
rename(stop_id_orig = stop_id) %>%
left_join(select(d.stops_id_dict, stop_id, stop_id_orig), by = "stop_id_orig") %>%
select(stop_id, stop_lat, stop_lon)
jr.data$stop_times <- jr.data$stop_times %>%
rename(stop_id_orig = stop_id) %>%
left_join(select(d.stops_id_dict, stop_id, stop_id_orig), by = "stop_id_orig") %>%
select(trip_id, stop_id, arrival_time, departure_time, stop_sequence)
# also we'll make concrete departure/arrival times
temp_stop_times <- lapply(as.character(c.days), function(i_day) {
available_trips <- l.available_trips[[i_day]]
jr.data$stop_times %>%
filter(trip_id %in% available_trips) %>%
mutate(arrival_time = as_datetime(paste(i_day, arrival_time)),
departure_time = as_datetime(paste(i_day, departure_time)))
})
jr.data$stop_times <- do.call("rbind", temp_stop_times)
rm(temp_stop_times)
# Count walking distances between stops -----------------------------------
#' Let's make it simpliest - just count the distance on Earth's surface and then multiply it
#' by average human's walking speed.
#'
#' Optionally - use manhattan distance to reflect necesity of following roads etc.
c.avg_walking_speed <- 1.4 # source: wikipedia.org
d.stops_walking_dist <- geosphere::distm(jr.data$stops[,c("stop_lon", "stop_lat")]) %>%
as.dist %>%
broom::tidy(diagonal = FALSE, upper = FALSE) %>%
mutate(duration = distance/c.avg_walking_speed) %>%
select(-distance) %>%
rename(stop_id.d = item1, stop_id.a = item2)
# d.stops_walking_dist$trip_id <- 0 # 0 means use your legs :)
# d.stops_walking_dist$departure_time <- NA # and you can start whenever you want
# Create trips
#' For each trip we will take into account possibility to use it for 1-k stops (starting on
#' whichever stop you want...)
#'
stops_seq <- seq(max(jr.data$stop_times$stop_sequence))
stops_seq_pairs <- expand.grid(stops_seq, stops_seq) %>%
rename(stop_seq_id.d = Var1, stop_seq_id.a = Var2) %>%
filter(stop_seq_id.d < stop_seq_id.a)
l.sub_paths <- stops_seq_pairs%>%
apply(1, function(comb){
d.departure <- jr.data$stop_times %>%
filter(stop_sequence == comb["stop_seq_id.d"]) %>%
select(trip_id, stop_id, departure_time) %>%
rename(stop_id.d = stop_id)
d.arrival <- jr.data$stop_times %>%
filter(stop_sequence == comb["stop_seq_id.a"]) %>%
select(trip_id, stop_id, arrival_time) %>%
rename(stop_id.a = stop_id)
d.path <- inner_join(d.departure, d.arrival, by = c("trip_id")) %>%
filter(arrival_time >= departure_time & arrival_time < departure_time + ddays(1)) %>%
select(stop_id.d, stop_id.a, departure_time, arrival_time)
})
d.sub_paths <- do.call("rbind", l.sub_paths)
rm(l.sub_paths)
# d.stops_dist <- rbind(d.sub_paths, d.stops_walking_dist[,colnames(d.sub_paths)])
# rm(d.stops_walking_dist, d.sub_paths)
gc()
# Computing distances to given stop (at given time) -----------------------
init_stop <- 2539
init_time <- lubridate::ymd_hms(paste(as.character(c.days[1]),"07:00:00"))
c.max_iter <- 1e2
# init - distance by walk/one vehicle
d.best <- d.stops_walking_dist %>%
filter(stop_id.d == init_stop) %>%
mutate(arrival_time = duration + init_time,
updated = (stop_id.a == init_stop)) %>%
rename(stop_id = stop_id.a) %>%
select(stop_id, arrival_time, updated)
d.best <- rbind(d.best,
tibble(stop_id = init_stop,
arrival_time = init_time,
updated = TRUE))
iter.no <- 0
n.updated <- sum(d.best$updated)
c.transfer_overhead_secs <- 30 # overhead for each changing a mean of transport (even vehicle -> walk and vice versa)
while (iter.no < c.max_iter && n.updated > 0) {
iter.no <- iter.no + 1
cat('Iter no. ', iter.no, ', last upd.:',n.updated,' (',as.character(Sys.time()),')\n')
# find stops with updated arrivals
d.checking <- d.best %>%
filter(updated == TRUE) %>%
rename(prev_arrival_time = arrival_time) %>%
select(stop_id, prev_arrival_time)
# try to use your legs
d.update_by_walk <- d.checking %>%
inner_join(d.stops_walking_dist, by = c("stop_id" = "stop_id.d")) %>%
mutate(arrival_time = prev_arrival_time + duration + c.transfer_overhead_secs,
prev_arrival_time = NULL, stop_id = NULL, duration = NULL) %>%
group_by(stop_id.a) %>%
summarise(new_arrival_time = min(arrival_time, na.rm = TRUE))
# try to use public transport
d.update_by_vehicle <- d.sub_paths %>%
inner_join(d.checking, by = c("stop_id.d" = "stop_id")) %>%
filter(prev_arrival_time + c.transfer_overhead_secs < departure_time) %>%
group_by(stop_id.a) %>%
summarise(new_arrival_time = min(arrival_time, na.rm = TRUE))
# find the best of those ways for each destination
d.update <- rbind(d.update_by_walk, d.update_by_vehicle) %>%
group_by(stop_id.a) %>%
summarise(new_arrival_time = min(new_arrival_time, na.rm = TRUE))
# check if you make some best times
d.update <- d.update %>% left_join(d.best, by = c("stop_id.a" = "stop_id")) %>%
filter(new_arrival_time < arrival_time | is.na(arrival_time)) %>%
select(stop_id.a, new_arrival_time)
# do some updates (if any)
d.best <- d.best %>%
left_join(d.update, by = c("stop_id" = "stop_id.a")) %>%
mutate(updated = !is.na(new_arrival_time),
arrival_time = pmin(new_arrival_time, arrival_time, na.rm = TRUE)) %>%
select(stop_id, arrival_time, updated)
n.updated <- sum(d.best$updated)
}
# Agregation --------------------------------------------------------------
# Visualisation -----------------------------------------------------------
d.best <- d.best %>%
left_join(d.stops_id_dict, by = "stop_id") %>%
left_join(jr.data$stops, by = "stop_id") %>%
mutate(min_duration = arrival_time - init_time)
d.best %>%
ggplot(aes(x = stop_lat, y = stop_lon, col = as.numeric(min_duration)/3600)) +
geom_point() +
theme_bw() +
scale_color_viridis_c()
d.best %>%
filter(min_duration < 3600) %>%
ggplot(aes(x = stop_lat, y = stop_lon, col = as.numeric(min_duration)/60)) +
geom_point() +
theme_bw() +
scale_color_viridis_c()
init_stop_spatial <- jr.data$stops %>%
filter(stop_id == init_stop) %>%
select(stop_lat, stop_lon) %>%
unlist
prague_map = ggmap::get_map(location = init_stop_spatial, maptype = "roadmap",
zoom = 11, color = "bw")
# install.packages("maps")
library(ggmap)
map_box <- ggmap::make_bbox(lon = jr.data$stops$stop_lon, lat = jr.data$stops$stop_lat, f = .01)
m <- ggmap::get_stamenmap(map_box, maptype = "toner-hybrid")
ggmap(m) + geom_point(data = jr.data$stops, aes(x = stop_lon, y = stop_lat, col = factor(location_type)))
|
/main.R
|
no_license
|
matuladominik/PublicTransportArrival
|
R
| false
| false
| 10,317
|
r
|
# Packages and functions --------------------------------------------------
# library(data.table)
library(tidyverse)
library(geosphere)
# library(igraph)
library(lubridate)
library(ggmap)
# Get data ----------------------------------------------------------------
#' The source of data: http://opendata.praha.eu
dir.create("_temp", showWarnings = FALSE)
fl <- "_temp/jrdata.zip"
# download.file("http://opendata.iprpraha.cz/DPP/JR/jrdata.zip", fl)
pth.dir <- "_temp/jrdata"
# unzip(fl, exdir = pth.dir)
pth.fls <- dir(pth.dir, full.names = TRUE)
jr.info <- readr::read_delim(pth.fls[1], delim = ":", skip = 2, locale = locale("cs", encoding = "windows-1250"), col_names = c("co", "hodnota"))
jr.data <- sapply(pth.fls[-1], readr::read_csv, locale = locale("cs"))
names(jr.data) <- names(jr.data) %>% basename %>% str_remove("[.].*$")
# Filter data ------------------------------------------------------------
#' Let's downdize the data just to the day we are interested in.
#' (Plus we'll add next day - just to complete trips startet before midnight..).
jr.data$calendar <- jr.data$calendar %>%
mutate(start_date = ymd(start_date),
end_date = ymd(end_date))
jr.data$calendar_dates <- jr.data$calendar_dates %>% mutate(date = ymd(date))
# pick dates
c.days <- jr.data$calendar_dates$date %>% sample(1)
c.days <- c.days + 0:1
# filter datasets
jr.data$calendar <- jr.data$calendar %>%
filter(start_date <= c.days[1], end_date >= c.days[2])
l.available_services <- lapply(c.days,function(i_day) {
i_wday <- lubridate::wday(i_day, week_start = 1, label = TRUE, abbr = FALSE,
locale = "English_United States.1252") %>% str_to_lower()
kt <- jr.data$calendar[,i_wday] == 1
jr.data$calendar$service_id[kt]
})
names(l.available_services) <- c.days
# Optionaly - remove services with an exception..
# jr.data$calendar_dates <- jr.data$calendar_dates %>%
# filter(date %in% c.days, exception_type == 2)
# l.available_services <- lapply(l.available_services, function(available_services) {
# setdiff(available_services, jr.data$calendar_dates$service_id)
# })
# Remove unavailable services
jr.data$trips <- jr.data$trips %>%
filter(service_id %in% unlist(l.available_services))
l.available_trips <- lapply(l.available_services, function(available_services) {
jr.data$trips %>%
filter(service_id %in% available_services) %>%
select(trip_id) %>%
unlist(use.names = FALSE) %>%
unique
})
jr.data$stop_times <- jr.data$stop_times %>%
filter(trip_id %in% unlist(l.available_trips))
available_stops <- jr.data$stop_times$stop_id %>% unique
jr.data$stops <- jr.data$stops %>% filter(stop_id %in% available_stops)
# also remove stop times without arrival/departure time specified
jr.data$stop_times <- jr.data$stop_times %>%
filter(!is.na(arrival_time) & !is.na(departure_time))
# Stop's id recode --------------------------------------------------------
#' To save some space lets recode stop's id to just id.
d.stops_id_dict <- jr.data$stops %>%
mutate(stop_id_orig = stop_id, stop_id = row_number()) %>%
select(stop_id, stop_id_orig, stop_name, location_type, parent_station, wheelchair_boarding)
jr.data$stops <- jr.data$stops %>%
rename(stop_id_orig = stop_id) %>%
left_join(select(d.stops_id_dict, stop_id, stop_id_orig), by = "stop_id_orig") %>%
select(stop_id, stop_lat, stop_lon)
jr.data$stop_times <- jr.data$stop_times %>%
rename(stop_id_orig = stop_id) %>%
left_join(select(d.stops_id_dict, stop_id, stop_id_orig), by = "stop_id_orig") %>%
select(trip_id, stop_id, arrival_time, departure_time, stop_sequence)
# also we'll make concrete departure/arrival times
temp_stop_times <- lapply(as.character(c.days), function(i_day) {
available_trips <- l.available_trips[[i_day]]
jr.data$stop_times %>%
filter(trip_id %in% available_trips) %>%
mutate(arrival_time = as_datetime(paste(i_day, arrival_time)),
departure_time = as_datetime(paste(i_day, departure_time)))
})
jr.data$stop_times <- do.call("rbind", temp_stop_times)
rm(temp_stop_times)
# Count walking distances between stops -----------------------------------
#' Let's make it simpliest - just count the distance on Earth's surface and then multiply it
#' by average human's walking speed.
#'
#' Optionally - use manhattan distance to reflect necesity of following roads etc.
c.avg_walking_speed <- 1.4 # source: wikipedia.org
d.stops_walking_dist <- geosphere::distm(jr.data$stops[,c("stop_lon", "stop_lat")]) %>%
as.dist %>%
broom::tidy(diagonal = FALSE, upper = FALSE) %>%
mutate(duration = distance/c.avg_walking_speed) %>%
select(-distance) %>%
rename(stop_id.d = item1, stop_id.a = item2)
# d.stops_walking_dist$trip_id <- 0 # 0 means use your legs :)
# d.stops_walking_dist$departure_time <- NA # and you can start whenever you want
# Create trips
#' For each trip we will take into account possibility to use it for 1-k stops (starting on
#' whichever stop you want...)
#'
stops_seq <- seq(max(jr.data$stop_times$stop_sequence))
stops_seq_pairs <- expand.grid(stops_seq, stops_seq) %>%
rename(stop_seq_id.d = Var1, stop_seq_id.a = Var2) %>%
filter(stop_seq_id.d < stop_seq_id.a)
l.sub_paths <- stops_seq_pairs%>%
apply(1, function(comb){
d.departure <- jr.data$stop_times %>%
filter(stop_sequence == comb["stop_seq_id.d"]) %>%
select(trip_id, stop_id, departure_time) %>%
rename(stop_id.d = stop_id)
d.arrival <- jr.data$stop_times %>%
filter(stop_sequence == comb["stop_seq_id.a"]) %>%
select(trip_id, stop_id, arrival_time) %>%
rename(stop_id.a = stop_id)
d.path <- inner_join(d.departure, d.arrival, by = c("trip_id")) %>%
filter(arrival_time >= departure_time & arrival_time < departure_time + ddays(1)) %>%
select(stop_id.d, stop_id.a, departure_time, arrival_time)
})
d.sub_paths <- do.call("rbind", l.sub_paths)
rm(l.sub_paths)
# d.stops_dist <- rbind(d.sub_paths, d.stops_walking_dist[,colnames(d.sub_paths)])
# rm(d.stops_walking_dist, d.sub_paths)
gc()
# Computing distances to given stop (at given time) -----------------------
init_stop <- 2539
init_time <- lubridate::ymd_hms(paste(as.character(c.days[1]),"07:00:00"))
c.max_iter <- 1e2
# init - distance by walk/one vehicle
d.best <- d.stops_walking_dist %>%
filter(stop_id.d == init_stop) %>%
mutate(arrival_time = duration + init_time,
updated = (stop_id.a == init_stop)) %>%
rename(stop_id = stop_id.a) %>%
select(stop_id, arrival_time, updated)
d.best <- rbind(d.best,
tibble(stop_id = init_stop,
arrival_time = init_time,
updated = TRUE))
iter.no <- 0
n.updated <- sum(d.best$updated)
c.transfer_overhead_secs <- 30 # overhead for each changing a mean of transport (even vehicle -> walk and vice versa)
while (iter.no < c.max_iter && n.updated > 0) {
iter.no <- iter.no + 1
cat('Iter no. ', iter.no, ', last upd.:',n.updated,' (',as.character(Sys.time()),')\n')
# find stops with updated arrivals
d.checking <- d.best %>%
filter(updated == TRUE) %>%
rename(prev_arrival_time = arrival_time) %>%
select(stop_id, prev_arrival_time)
# try to use your legs
d.update_by_walk <- d.checking %>%
inner_join(d.stops_walking_dist, by = c("stop_id" = "stop_id.d")) %>%
mutate(arrival_time = prev_arrival_time + duration + c.transfer_overhead_secs,
prev_arrival_time = NULL, stop_id = NULL, duration = NULL) %>%
group_by(stop_id.a) %>%
summarise(new_arrival_time = min(arrival_time, na.rm = TRUE))
# try to use public transport
d.update_by_vehicle <- d.sub_paths %>%
inner_join(d.checking, by = c("stop_id.d" = "stop_id")) %>%
filter(prev_arrival_time + c.transfer_overhead_secs < departure_time) %>%
group_by(stop_id.a) %>%
summarise(new_arrival_time = min(arrival_time, na.rm = TRUE))
# find the best of those ways for each destination
d.update <- rbind(d.update_by_walk, d.update_by_vehicle) %>%
group_by(stop_id.a) %>%
summarise(new_arrival_time = min(new_arrival_time, na.rm = TRUE))
# check if you make some best times
d.update <- d.update %>% left_join(d.best, by = c("stop_id.a" = "stop_id")) %>%
filter(new_arrival_time < arrival_time | is.na(arrival_time)) %>%
select(stop_id.a, new_arrival_time)
# do some updates (if any)
d.best <- d.best %>%
left_join(d.update, by = c("stop_id" = "stop_id.a")) %>%
mutate(updated = !is.na(new_arrival_time),
arrival_time = pmin(new_arrival_time, arrival_time, na.rm = TRUE)) %>%
select(stop_id, arrival_time, updated)
n.updated <- sum(d.best$updated)
}
# Agregation --------------------------------------------------------------
# Visualisation -----------------------------------------------------------
d.best <- d.best %>%
left_join(d.stops_id_dict, by = "stop_id") %>%
left_join(jr.data$stops, by = "stop_id") %>%
mutate(min_duration = arrival_time - init_time)
d.best %>%
ggplot(aes(x = stop_lat, y = stop_lon, col = as.numeric(min_duration)/3600)) +
geom_point() +
theme_bw() +
scale_color_viridis_c()
d.best %>%
filter(min_duration < 3600) %>%
ggplot(aes(x = stop_lat, y = stop_lon, col = as.numeric(min_duration)/60)) +
geom_point() +
theme_bw() +
scale_color_viridis_c()
init_stop_spatial <- jr.data$stops %>%
filter(stop_id == init_stop) %>%
select(stop_lat, stop_lon) %>%
unlist
prague_map = ggmap::get_map(location = init_stop_spatial, maptype = "roadmap",
zoom = 11, color = "bw")
# install.packages("maps")
library(ggmap)
map_box <- ggmap::make_bbox(lon = jr.data$stops$stop_lon, lat = jr.data$stops$stop_lat, f = .01)
m <- ggmap::get_stamenmap(map_box, maptype = "toner-hybrid")
ggmap(m) + geom_point(data = jr.data$stops, aes(x = stop_lon, y = stop_lat, col = factor(location_type)))
|
trim <-
function(x) {
sub(" *([^ ]+) *", "\\1", x)
}
#===================================================================================================
trimLR <-
function(x) {
sub("^[[:space:]]*(.*?)[[:space:]]*$", "\\1", x, perl=TRUE)
}
#===================================================================================================
NSE <-
function(yobs,ysim) {
n <- length(yobs)
ym_obs <- mean(yobs)
sum1 <- sum((yobs-ym_obs)^2)
sum2 <- sum((ysim-yobs)^2)
RSO<-mean(ysim)/mean(yobs)
NSEC <- 1.0-sum2/sum1
dret<-data.frame(n,RSO,NSEC)
return(dret)
}
#===================================================================================================
cluster.no <-
function(mydata){
# Determine number of clusters
nRow<-nrow(mydata)
wss <- (nRow-1)*sum(apply(mydata,2,var))
for (i in 2:nRow-1) wss[i] <- sum(kmeans(mydata,
centers=i)$withinss)
plot(1:(nRow-1), wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
}
#===================================================================================================
histNorm <-
function(x,nbreaks,xlab,...){
h1<-hist(x,breaks=nbreaks,type="count",col="red",xlab=xlab,ylab="Frequency",main = NULL, border=TRUE)
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h1$mids[1:2])*length(x)
lines(xfit, yfit, col="blue", lwd=2)
}
#===================================================================================================
bar.compare <-
function(x,xylab,model1,std=TRUE, group=TRUE, horiz=FALSE, LSD=TRUE,diffLetter,fontSize,...) {
y<-x[,2]
#----------------------
A1<-model1$model
y1<-A1[,1]
ipch<-pmatch(xylab[1],names(A1))
if( is.na(ipch)) return(cat("Name: ",treatment,"\n",names(A1)[-1],"\n"))
#name.t <-names(A)[ipch]
trt<-A1[,ipch]
#name.y <- names(A)[1]
junto <- subset(data.frame(y1, trt), is.na(y1) == FALSE)
means <- tapply.stat(junto[, 1], junto[, 2], stat="mean") #change
sds <- tapply.stat(junto[, 1], junto[, 2], stat="sd") #change
nn <- tapply.stat(junto[, 1], junto[, 2], stat="length") #change
o1=order(means[,2],decreasing = TRUE)
nn1 = rbind(nn[o1,2])
nn1[1]<-paste("n=",nn1[1])
#----------------------
names(y)<-paste(x[,1],"\n",nn1)
if( std ) {
nivel0<-x[,2]-x[,5]*sqrt(x[,4])
nivel1<-x[,2]+x[,5]*sqrt(x[,4])
}
else {
nivel0<-x[,2]-x[,5]
nivel1<-x[,2]+x[,5]
}
n<-length(y)
tope<-max(nivel1)/20
Ytop = max(nivel1)+tope
Ybottom = min(nivel1)-tope
if(Ybottom>0) {
indice<-barplot(y,horiz=horiz, xlab=xylab[1], ylab=xylab[2],ylim = c(0,Ytop),cex.names=fontSize,cex.axis=fontSize,cex.lab=fontSize,...)
}
else {
indice<-barplot(y,horiz=horiz, xlab=xylab[1], ylab=xylab[2],ylim = c(Ybottom,Ytop),cex.names=fontSize, cex.axis=fontSize,cex.lab=fontSize,...)
}
for ( i in 1:n) {
if (horiz) {
lines(rbind(c(nivel0[i],indice[i]),c(nivel1[i],indice[i])),col="red")
text( cex=1,nivel0[i],indice[i],"[")
text( cex=1,nivel1[i],indice[i],"]")
}
else {
lines(rbind(c(indice[i],nivel0[i]),c(indice[i],nivel1[i])),col="red")
text( cex=1,indice[i],nivel0[i],"---")
text( cex=1,indice[i],nivel1[i],"---")
}
}
if(group) {
if(!LSD){
x[,3]<-diffLetter
}
for ( i in 1:n)
text(indice[i],nivel1[i]+tope*0.5,trimLR(x[i,3]),cex=fontSize)
}
}
#===================================================================================================
LSD1.test <-
function (y, trt, DFerror, MSerror, alpha = 0.05, p.adj = c("none",
"holm", "hochberg", "bonferroni", "BH", "BY", "fdr"), group = TRUE,
main = NULL)
{
p.adj <- match.arg(p.adj)
clase<-c("aov","lm")
name.y <- paste(deparse(substitute(y)))
name.t <- paste(deparse(substitute(trt)))
if("aov"%in%class(y) | "lm"%in%class(y)){
A<-y$model
DFerror<-df.residual(y)
MSerror<-deviance(y)/DFerror
y<-A[,1]
ipch<-pmatch(trt,names(A))
if( is.na(ipch)) return(cat("Name: ",trt,"\n",names(A)[-1],"\n"))
name.t <-names(A)[ipch]
trt<-A[,ipch]
name.y <- names(A)[1]
}
junto <- subset(data.frame(y, trt), is.na(y) == FALSE)
means <- tapply.stat(junto[, 1], junto[, 2], stat="mean") #change
sds <- tapply.stat(junto[, 1], junto[, 2], stat="sd") #change
nn <- tapply.stat(junto[, 1], junto[, 2], stat="length") #change
std.err <- sds[, 2]/sqrt(nn[, 2])
Tprob <- qt(1 - alpha/2, DFerror)
LCL <- means[,2]-Tprob*std.err
UCL <- means[,2]+Tprob*std.err
means <- data.frame(means, std.err, replication = nn[, 2],
LCL, UCL)
names(means)[1:2] <- c(name.t, name.y)
#row.names(means) <- means[, 1]
ntr <- nrow(means)
nk <- choose(ntr, 2)
if (p.adj != "none")
{
a <- 1e-06
b <- 1
for (i in 1:100) {
x <- (b + a)/2
d <- p.adjust(x, n = nk, p.adj) - alpha
fa <- p.adjust(a, n = nk, p.adj) - alpha
if (d * fa < 0)
b <- x
if (d * fa > 0)
a <- x
}
Tprob <- qt(1 - x/2, DFerror)
}
nr <- unique(nn[, 2])
cat("\nwgsStudy:", main)
cat("\n\nLSD t Test for", name.y, "\n")
if (p.adj != "none")
cat("P value adjustment method:", p.adj, "\n")
cat("\nMean Square Error: ",MSerror,"\n\n")
cat(paste(name.t,",",sep="")," means and individual (",(1-alpha)*100,"%) CI\n\n")
print(data.frame(row.names = means[,1], means[,-1]))
cat("\nalpha:",alpha,"; Df Error:",DFerror)
cat("\nCritical Value of t:", Tprob,"\n")
if (group) {
if (length(nr) == 1) {
LSD <- Tprob * sqrt(2 * MSerror/nr)
cat("\nLeast Significant Difference", LSD)
}
else {
nr1 <- 1/mean(1/nn[, 2])
LSD1 <- Tprob * sqrt(2 * MSerror/nr1)
cat("\nLeast Significant Difference", LSD1)
cat("\nHarmonic Mean of Cell Sizes ", nr1)
}
cat("\nMeans with the same letter are not significantly different.")
cat("\n\nGroups, Treatments and means\n")
output <- order1.group(means[, 1], means[, 2], means[,
4], MSerror, Tprob, means[, 3])
w<-order(means[,2],decreasing = TRUE)
output <- data.frame(output,LCI=means[w,5],UCI=means[w,6])
}
if (!group) {
comb <- combn(ntr, 2)
nn <- ncol(comb)
dif <- rep(0, nn)
LCL1<-dif
UCL1<-dif
sig<-NULL
pvalue <- rep(0, nn)
for (k in 1:nn) {
i <- comb[1, k]
j <- comb[2, k]
if (means[i, 2] < means[j, 2]){
comb[1, k]<-j
comb[2, k]<-i
}
dif[k] <- abs(means[i, 2] - means[j, 2])
sdtdif <- sqrt(MSerror * (1/means[i, 4] + 1/means[j,
4]))
pvalue[k] <- 2 * (1 - pt(dif[k]/sdtdif, DFerror))
if (p.adj != "none")
pvalue[k] <- p.adjust(pvalue[k], n = nk, p.adj)
pvalue[k] <- round(pvalue[k],6)
LCL1[k] <- dif[k] - Tprob*sdtdif
UCL1[k] <- dif[k] + Tprob*sdtdif
sig[k]<-" "
if (pvalue[k] <= 0.001) sig[k]<-"***"
else if (pvalue[k] <= 0.01) sig[k]<-"**"
else if (pvalue[k] <= 0.05) sig[k]<-"*"
else if (pvalue[k] <= 0.1) sig[k]<-"."
}
tr.i <- means[comb[1, ],1]
tr.j <- means[comb[2, ],1]
output<-data.frame("Difference" = dif, pvalue = pvalue,sig,LCL=LCL1,UCL=UCL1)
rownames(output)<-paste(tr.i,tr.j,sep=" - ")
cat("\nComparison between treatments means\n\n")
print(output)
output <- data.frame(trt = means[, 1], means = means[,
2], M = "", N = means[, 4], std.err ,LCL,UCL)
}
invisible(output)
}
#=============================================================================
order1.group <-
function(trt,means,N,MSerror,Tprob,std.err,parameter=1, snk=0, DFerror=NULL,alpha=NULL,sdtdif=NULL) {
N<-rep(1/mean(1/N),length(N))
n<-length(means)
z<-data.frame(trt,means,N,std.err)
# ordena tratamientos
w<-z[order(z[,2],decreasing = TRUE), ]
M<-rep("",n)
k<-1
j<-1
k<-1
cambio<-n
cambio1<-0
chequeo=0
M[1]<-letters[k]
while(j<n) {
chequeo<-chequeo+1
if (chequeo > n) break
for(i in j:n) {
nx<-abs(i-j)+1
if (nx==1) nx=2
if(snk ==1 ) Tprob <- qtukey(p=1-alpha,nmeans=nx, df=DFerror)
if(snk ==2 ) Tprob <- qtukey(p=(1-alpha)^(nx-1),nmeans=nx, df=DFerror)
if(is.null(sdtdif)) minimo<-Tprob*sqrt(parameter*MSerror*(1/N[i]+1/N[j]))
if(!is.null(sdtdif)) minimo<-Tprob*sdtdif
s<-abs(w[i,2]-w[j,2])<=minimo
if(s) {
if(lastC(M[i]) != letters[k])M[i]<-paste(M[i],letters[k],sep="")
}
else {
k<-k+1
cambio<-i
cambio1<-0
ja<-j
for(jj in cambio:n) M[jj]<-paste(M[jj]," ",sep="")
M[cambio]<-paste(M[cambio],letters[k],sep="")
for( v in ja:cambio) {
nx<-abs(v-cambio)+1
if(nx == 1) nx=2
if(snk ==1 ) Tprob <- qtukey(p=1-alpha,nmeans=nx, df=DFerror)
if(snk ==2 ) Tprob <- qtukey(p=(1-alpha)^(nx-1),nmeans=nx, df=DFerror)
if(is.null(sdtdif)) minimo<-Tprob*sqrt(parameter*MSerror*(1/N[i]+1/N[j]))
if(!is.null(sdtdif)) minimo<-Tprob*sdtdif
if(abs(w[v,2]-w[cambio,2])>minimo) {j<-j+1
cambio1<-1
}
else break
}
break
}
}
if (cambio1 ==0 )j<-j+1
}
#-----
w<-data.frame(w,stat=M)
trt<-as.character(w$trt)
means<-as.numeric(w$means)
N<-as.numeric(w$N)
std.err<- as.numeric(w$std.err)
cmax<-max(nchar(trt))
trt<-paste(trt," ")
trt<-substr(trt,1,cmax)
for(i in 1:n){
cat(M[i],"\t",trt[i],"\t",means[i],"\n")
}
output<-data.frame(trt,means,M,N,std.err)
return(output)
}
#=============================================================================
KW.test <- function (resp, categ, probs = 0.05, cont = NULL,...)
{
db<-na.omit(data.frame(resp,categ))
if(nrow(db)!=length(resp)) warning(paste(length(resp)-nrow(db),"lines including NA have been omitted"))
resp<-db[,1]
categ<-db[,2]
lst <- split(rank(resp), categ)
avg <- eval(bquote(tapply(.(as.name(colnames(db)[1])),.(as.name(colnames(db)[2])),mean)))
name <- names(lst)
R <- sapply(lst, mean)
n <- sapply(lst, length)
n<-n[order(avg,decreasing = TRUE)]
R<-R[order(avg,decreasing = TRUE)]
avg<-avg[order(avg,decreasing = TRUE)]
name<-names(avg)
N = length(resp)
dif <- abs(outer(R, R, "-"))
if (is.null(cont)) {
difv <- NULL
vname <- NULL
indices <- NULL
for (i in 1:(length(name) - 1)) {
for (j in (i + 1):length(name)) {
vname <- c(vname, paste(name[i], "-", name[j], sep = ""))
indices <- rbind(indices, c(i, j))
difv<-c(difv,dif[i,j])
}
}
names(difv) <- vname
z <- qnorm(probs/(length(lst) * (length(lst) - 1)), lower.tail = FALSE)
lims <- z * sqrt(N * (N + 1)/12 * (1/n[indices[1:length(vname),1]] + 1/n[indices[1:length(vname), 2]]))
names(lims) <- vname
stat <- "Multiple comparison test after Kruskal-Wallis"
}
else {
vname = NULL
indices = NULL
for (j in 2:length(dif[1, ])) {
vname <- c(vname, paste(name[1], "-", name[j], sep = ""))
indices <- rbind(indices, c(1, j))
}
dif <- dif[1, 2:length(dif[1, ])]
names(dif) <- vname
difv<-dif
choice <- pmatch(cont, c("one-tailed", "two-tailed"),
nomatch = 3)
if (choice == 1) {
z <- qnorm(probs/2 * (length(lst) - 1), lower.tail = FALSE)
lims <- z * sqrt(N * (N + 1)/12 * (1/n[indices[1:length(vname),
1]] + 1/n[indices[1:length(vname), 2]]))
names(lims) <- vname
stat <- "Multiple comparison test after Kruskal-Wallis, treatments vs control (one-tailed)"
}
if (choice == 2) {
z <- qnorm(probs/(length(lst) - 1), lower.tail = FALSE)
lims <- z * sqrt(N * (N + 1)/12 * (1/n[indices[1:length(vname),
1]] + 1/n[indices[1:length(vname), 2]]))
names(lims) <- vname
stat <- "Multiple comparison test after Kruskal-Wallis, treatment vs control (two-tailed)"
}
if (choice == 3)
stop("Values must be 'one-tailed' or 'two-tailed', partial matching accepted")
}
# output <- list(statistic = stat, p.value = probs, dif.com = data.frame(obs.dif = difv,
# critical.dif = lims, difference = ifelse((difv - lims) > 0, TRUE, FALSE)))
sig<-ifelse((difv - lims) > 0, TRUE, FALSE)
output <- data.frame(vname,difv, lims, sig)
row.names(output)<-NULL
#class(output) <- c("mc", "list")
output
}
#=============================================================================
KW.group <-
function(LSD,KWtest)#(trt,means,N,MSerror,Tprob,std.err,parameter=1, snk=0, DFerror=NULL,alpha=NULL,sdtdif=NULL) {
{
n<-nrow(LSD) #LSD already sorted
nKW<-nrow(KWtest)
M<-rep("",n)
k<-1
j<-1
cambio<-n
cambio1<-0
chequeo=0
M[1]<-letters[k]
trt<-trimLR(LSD[,1])
while(j<n) {
chequeo<-chequeo+1
if (chequeo > n) break
for(i in (j+1):n) {
vname <- paste(trt[j], "-", trt[i], sep = "")#c(paste(trt[j], "-", trt[i], sep = ""),paste(trt[i], "-", trt[j], sep = ""))
for(iKW in (0.5*(j-1)*(2*n-j)+1):(0.5*j*(2*n-j-1))){ #1:nKW
if(vname==trimLR(KWtest[iKW,1])){
s<-KWtest[iKW,4]
# cat(paste(iKW,"\t",s,"\n"))
break} #if vname
} #end for iKW
if(s) #s=TRUE, difference
{
if(M[i]!="") break #&!is.null(match(lastC(M[i]),letters))
else{
k<-k+1
if(lastC(M[i]) != letters[k]) M[i]<-paste(M[i],letters[k],sep="")#M[cambio]<-paste(M[cambio],letters[k],sep="")
# cat(paste("\t",k,"\t",j,"\t",i,"\t",M[j],"\t",M[i],"\n"))
break
}
}
else { #s=FALSE, no difference
bSame<-NsameLetters(M[j],M[i])
# cat("\t",bSame,"\t")
if(!bSame) {
if(M[i]!=""){
M[j]<-paste(M[j],lastC(M[i]),sep="")}
else{
if(lastC(M[j]) != letters[k]) M[j]<-paste(M[j],letters[k],sep="")
if(lastC(M[i]) != letters[k]) M[i]<-paste(M[i],letters[k],sep="")
# cat(paste("\t",k,"\t",j,"\t",i,"\t",M[j],"\t",M[i],"\n"))
} #else
} #for i
}
}
j<-j+1
} #while j
return(M)
}
#=============================================================================
NsameLetters<-
function(str1,str2)
{
bSame<-FALSE
n1<-nchar(as.character(str1))
n2<-nchar(as.character(str2))
char1<-strsplit(str1,NULL)
char2<-strsplit(str2,NULL)
if(n1<1||n2<1) return(bSame)
for(i in 1:n1){
for(j in 1:n2){
if(char1[[1]][i]==char2[[1]][j]){
bSame<-TRUE
break
}
}
}
return(bSame)
}#function
#=============================================================================
matrixplot<-
function (corr, method = c("circle", "square", "ellipse", "number",
"shade", "color", "pie"), type = c("full", "lower", "upper"),
add = FALSE, col = NULL, bg = "white", title = "", is.corr = TRUE,
diag = TRUE, outline = FALSE, mar = c(0, 0, 0, 0), addgrid.col = NULL,
addCoef.col = NULL, addCoefasPercent = FALSE, order = c("original",
"AOE", "FPC", "hclust", "alphabet"), hclust.method = c("complete",
"ward", "single", "average", "mcquitty", "median", "centroid"),
addrect = NULL, rect.col = "black", rect.lwd = 2, tl.pos = NULL,
tl.cex = 1, tl.col = "red", tl.offset = 0.4, tl.srt = 90,
cl.pos = NULL, cl.lim = NULL, cl.length = NULL, cl.cex = 0.8,
cl.ratio = 0.15, cl.align.text = "c", cl.offset = 0.5, addshade = c("negative",
"positive", "all"), shade.lwd = 1, shade.col = "white",
p.mat = NULL, sig.level = 0.05, insig = c("pch", "p-value",
"blank", "n"), pch = 4, pch.col = "black", pch.cex = 3,
plotCI = c("n", "square", "circle", "rect"), lowCI.mat = NULL,
uppCI.mat = NULL, ...)
{
method <- match.arg(method)
type <- match.arg(type)
order <- match.arg(order)
hclust.method <- match.arg(hclust.method)
plotCI <- match.arg(plotCI)
insig <- match.arg(insig)
if (!is.matrix(corr) & !is.data.frame(corr))
stop("Need a matrix or data frame!")
if (is.null(addgrid.col)) {
addgrid.col <- ifelse(method == "color" | method == "shade",
"white", "grey")
}
if (any(corr < cl.lim[1]) | any(corr > cl.lim[2]))
stop("color limits should cover matrix")
if (is.null(cl.lim)) {
if (is.corr)
cl.lim <- c(-1, 1)
if (!is.corr)
cl.lim <- c(min(corr), max(corr))
}
intercept <- 0
zoom <- 1
if (!is.corr) {
if (max(corr) * min(corr) < 0) {
intercept <- 0
zoom <- 1/max(abs(cl.lim))
}
if (min(corr) >= 0) {
intercept <- -cl.lim[1]
zoom <- 1/(diff(cl.lim))
}
if (max(corr) <= 0) {
intercept <- -cl.lim[2]
zoom <- 1/(diff(cl.lim))
}
corr <- (intercept + corr) * zoom
}
cl.lim2 <- (intercept + cl.lim) * zoom
int <- intercept * zoom
if (min(corr) < -1 - .Machine$double.eps || max(corr) > 1 +
.Machine$double.eps) {
stop("The matrix is not in [-1, 1]!")
}
if (is.null(col)) {
col <- colorRampPalette(c("#67001F", "#B2182B", "#D6604D",
"#F4A582", "#FDDBC7", "#FFFFFF", "#D1E5F0", "#92C5DE",
"#4393C3", "#2166AC", "#053061"))(200)
}
n <- nrow(corr)
m <- ncol(corr)
min.nm <- min(n, m)
ord <- 1:min.nm
if (!order == "original") {
ord <- corrMatOrder(corr, order = order, hclust.method = hclust.method)
corr <- corr[ord, ord]
}
if (is.null(rownames(corr)))
rownames(corr) <- 1:n
if (is.null(colnames(corr)))
colnames(corr) <- 1:m
getPos.Dat <- function(mat) {
x <- matrix(1:n * m, n, m)
tmp <- mat
if (type == "upper")
tmp[row(x) > col(x)] <- Inf
if (type == "lower")
tmp[row(x) < col(x)] <- Inf
if (type == "full")
tmp <- tmp
if (!diag)
diag(tmp) <- Inf
Dat <- tmp[is.finite(tmp)]
ind <- which(is.finite(tmp), arr.ind = TRUE)
Pos <- ind
Pos[, 1] <- ind[, 2]
Pos[, 2] <- -ind[, 1] + 1 + n
return(list(Pos, Dat))
}
Pos <- getPos.Dat(corr)[[1]]
n2 <- max(Pos[, 2])
n1 <- min(Pos[, 2])
nn <- n2 - n1
newrownames <- as.character(rownames(corr)[(n + 1 - n2):(n +
1 - n1)])
m2 <- max(Pos[, 1])
m1 <- min(Pos[, 1])
mm <- m2 - m1
newcolnames <- as.character(colnames(corr)[m1:m2])
# cat("n=",n,"\n")
# cat("m=",m,"\n")
# mat1<-getPos.Dat(corr)
# print(corr)
# print(mat1)
# cat("newrownames=",newrownames,"\n")
# cat("newcolnames=",newcolnames,"\n")
DAT <- getPos.Dat(corr)[[2]]
len.DAT <- length(DAT)
assign.color <- function(DAT) {
newcorr <- (DAT + 1)/2
newcorr[newcorr == 1] <- 1 - 1e-10
col.fill <- col[floor(newcorr * length(col)) + 1]
}
col.fill <- assign.color(DAT)
isFALSE = function(x) identical(x, FALSE)
isTRUE = function(x) identical(x, TRUE)
if (isFALSE(tl.pos)) {
tl.pos <- "n"
}
if (is.null(tl.pos) | isTRUE(tl.pos)) {
if (type == "full")
tl.pos <- "lt"
if (type == "lower")
tl.pos <- "ld"
if (type == "upper")
tl.pos <- "td"
}
if (isFALSE(cl.pos)) {
cl.pos <- "n"
}
if (is.null(cl.pos) | isTRUE(cl.pos)) {
if (type == "full")
cl.pos <- "r"
if (type == "lower")
cl.pos <- "b"
if (type == "upper")
cl.pos <- "r"
}
if (outline)
col.border <- "black"
if (!outline)
col.border <- col.fill
if (!add) {
par(mar = mar, bg = "white")
plot.new()
xlabwidth <- ylabwidth <- 0
for (i in 1:50) {
xlim <- c(m1 - 0.5 - xlabwidth, m2 + 0.5 + mm * cl.ratio *
(cl.pos == "r"))
ylim <- c(n1 - 0.5 - nn * cl.ratio * (cl.pos == "b"),
n2 + 0.5 + ylabwidth)
plot.window(xlim + c(-0.2, 0.2), ylim + c(-0.2, 0.2),
asp = 1, xaxs = "i", yaxs = "i")
x.tmp <- max(strwidth(newrownames, cex = tl.cex))
y.tmp <- max(strwidth(newcolnames, cex = tl.cex))
if (min(x.tmp - xlabwidth, y.tmp - ylabwidth) < 1e-04)
break
xlabwidth <- x.tmp
ylabwidth <- y.tmp
}
if (tl.pos == "n" | tl.pos == "d")
xlabwidth <- ylabwidth <- 0
if (tl.pos == "td")
ylabwidth <- 0
if (tl.pos == "ld")
xlabwidth <- 0
laboffset <- strwidth("W", cex = tl.cex) * tl.offset
xlim <- c(m1 - 0.5 - xlabwidth - laboffset, m2 + 0.5 +
mm * cl.ratio * (cl.pos == "r")) + c(-0.35, 0.15)
ylim <- c(n1 - 0.5 - nn * cl.ratio * (cl.pos == "b"),
n2 + 0.5 + ylabwidth * abs(sin(tl.srt * pi/180)) +
laboffset) + c(-0.15, 0.35)
if (.Platform$OS.type == "windows") {
windows.options(width = 7, height = 7 * diff(ylim)/diff(xlim))
}
plot.window(xlim = xlim, ylim = ylim, asp = 1, xlab = "",
ylab = "", xaxs = "i", yaxs = "i")
}
laboffset <- strwidth("W", cex = tl.cex) * tl.offset
symbols(Pos, add = TRUE, inches = FALSE, squares = rep(1,
len.DAT), bg = bg, fg = bg)
if (method == "circle" & plotCI == "n") {
symbols(Pos, add = TRUE, inches = FALSE, bg = col.fill,
circles = 0.9 * abs(DAT)^0.5/2, fg = col.border)
}
if (method == "ellipse" & plotCI == "n") {
ell.dat <- function(rho, length = 99) {
k <- seq(0, 2 * pi, length = length)
x <- cos(k + acos(rho)/2)/2
y <- cos(k - acos(rho)/2)/2
return(cbind(rbind(x, y), c(NA, NA)))
}
ELL.dat <- lapply(DAT, ell.dat)
ELL.dat2 <- 0.85 * matrix(unlist(ELL.dat), ncol = 2,
byrow = TRUE)
ELL.dat2 <- ELL.dat2 + Pos[rep(1:length(DAT), each = 100),
]
polygon(ELL.dat2, border = col.border, col = col.fill)
}
if (method == "number" & plotCI == "n") {
text(Pos[, 1], Pos[, 2], font = 2, col = col.fill, labels = round((DAT -
int) * ifelse(addCoefasPercent, 100, 1)/zoom, ifelse(addCoefasPercent,
0, 2)))
}
if (method == "pie" & plotCI == "n") {
symbols(Pos, add = TRUE, inches = FALSE, circles = rep(0.5,
len.DAT) * 0.85)
pie.dat <- function(theta, length = 100) {
k <- seq(pi/2, pi/2 - theta, length = 0.5 * length *
abs(theta)/pi)
x <- c(0, cos(k)/2, 0)
y <- c(0, sin(k)/2, 0)
return(cbind(rbind(x, y), c(NA, NA)))
}
PIE.dat <- lapply(DAT * 2 * pi, pie.dat)
len.pie <- unlist(lapply(PIE.dat, length))/2
PIE.dat2 <- 0.85 * matrix(unlist(PIE.dat), ncol = 2,
byrow = TRUE)
PIE.dat2 <- PIE.dat2 + Pos[rep(1:length(DAT), len.pie),
]
polygon(PIE.dat2, border = "black", col = col.fill)
}
if (method == "shade" & plotCI == "n") {
addshade <- match.arg(addshade)
symbols(Pos, add = TRUE, inches = FALSE, squares = rep(1,
len.DAT), bg = col.fill, fg = addgrid.col)
shade.dat <- function(w) {
x <- w[1]
y <- w[2]
rho <- w[3]
x1 <- x - 0.5
x2 <- x + 0.5
y1 <- y - 0.5
y2 <- y + 0.5
dat <- NA
if ((addshade == "positive" || addshade == "all") &
rho > 0) {
dat <- cbind(c(x1, x1, x), c(y, y1, y1), c(x,
x2, x2), c(y2, y2, y))
}
if ((addshade == "negative" || addshade == "all") &
rho < 0) {
dat <- cbind(c(x1, x1, x), c(y, y2, y2), c(x,
x2, x2), c(y1, y1, y))
}
return(t(dat))
}
pos_corr <- rbind(cbind(Pos, DAT))
pos_corr2 <- split(pos_corr, 1:nrow(pos_corr))
SHADE.dat <- matrix(na.omit(unlist(lapply(pos_corr2,
shade.dat))), byrow = TRUE, ncol = 4)
segments(SHADE.dat[, 1], SHADE.dat[, 2], SHADE.dat[,
3], SHADE.dat[, 4], col = shade.col, lwd = shade.lwd)
}
if (method == "square" & plotCI == "n") {
symbols(Pos, add = TRUE, inches = FALSE, squares = abs(DAT)^0.5,
bg = col.fill, fg = col.border)
}
if (method == "color" & plotCI == "n") {
symbols(Pos, add = TRUE, inches = FALSE, squares = rep(1,
len.DAT), bg = col.fill, fg = col.border)
}
symbols(Pos, add = TRUE, inches = FALSE, bg = NA, squares = rep(1,
len.DAT), fg = addgrid.col)
if (plotCI != "n") {
if (is.null(lowCI.mat) || is.null(uppCI.mat))
stop("Need lowCI.mat and uppCI.mat!")
if (!order == "original") {
lowCI.mat <- lowCI.mat[ord, ord]
uppCI.mat <- uppCI.mat[ord, ord]
}
pos.lowNew <- getPos.Dat(lowCI.mat)[[1]]
lowNew <- getPos.Dat(lowCI.mat)[[2]]
pos.uppNew <- getPos.Dat(uppCI.mat)[[1]]
uppNew <- getPos.Dat(uppCI.mat)[[2]]
if (!(method == "circle" || method == "square"))
stop("method shoud be circle or square if draw confidence interval!")
k1 <- (abs(uppNew) > abs(lowNew))
bigabs <- uppNew
bigabs[which(!k1)] <- lowNew[!k1]
smallabs <- lowNew
smallabs[which(!k1)] <- uppNew[!k1]
sig <- sign(uppNew * lowNew)
if (plotCI == "circle") {
symbols(pos.uppNew[, 1], pos.uppNew[, 2], add = TRUE,
inches = FALSE, circles = 0.95 * abs(bigabs)^0.5/2,
bg = ifelse(sig > 0, col.fill, col[ceiling((bigabs +
1) * length(col)/2)]), fg = ifelse(sig > 0,
col.fill, col[ceiling((bigabs + 1) * length(col)/2)]))
symbols(pos.lowNew[, 1], pos.lowNew[, 2], add = TRUE,
inches = FALSE, circles = 0.95 * abs(smallabs)^0.5/2,
bg = ifelse(sig > 0, bg, col[ceiling((smallabs +
1) * length(col)/2)]), fg = ifelse(sig > 0,
col.fill, col[ceiling((smallabs + 1) * length(col)/2)]))
}
if (plotCI == "square") {
symbols(pos.uppNew[, 1], pos.uppNew[, 2], add = TRUE,
inches = FALSE, squares = abs(bigabs)^0.5, bg = ifelse(sig >
0, col.fill, col[ceiling((bigabs + 1) * length(col)/2)]),
fg = ifelse(sig > 0, col.fill, col[ceiling((bigabs +
1) * length(col)/2)]))
symbols(pos.lowNew[, 1], pos.lowNew[, 2], add = TRUE,
inches = FALSE, squares = abs(smallabs)^0.5,
bg = ifelse(sig > 0, bg, col[ceiling((smallabs +
1) * length(col)/2)]), fg = ifelse(sig > 0,
col.fill, col[ceiling((smallabs + 1) * length(col)/2)]))
}
if (plotCI == "rect") {
rect.width <- 0.25
rect(pos.uppNew[, 1] - rect.width, pos.uppNew[, 2] +
smallabs/2, pos.uppNew[, 1] + rect.width, pos.uppNew[,
2] + bigabs/2, col = col.fill, border = col.fill)
segments(pos.lowNew[, 1] - rect.width, pos.lowNew[,
2] + DAT/2, pos.lowNew[, 1] + rect.width, pos.lowNew[,
2] + DAT/2, col = "black", lwd = 1)
segments(pos.uppNew[, 1] - rect.width, pos.uppNew[,
2] + uppNew/2, pos.uppNew[, 1] + rect.width,
pos.uppNew[, 2] + uppNew/2, col = "black", lwd = 1)
segments(pos.lowNew[, 1] - rect.width, pos.lowNew[,
2] + lowNew/2, pos.lowNew[, 1] + rect.width,
pos.lowNew[, 2] + lowNew/2, col = "black", lwd = 1)
segments(pos.lowNew[, 1] - 0.5, pos.lowNew[, 2],
pos.lowNew[, 1] + 0.5, pos.lowNew[, 2], col = "grey70",
lty = 3)
}
}
if (!is.null(p.mat) & !insig == "n") {
if (!order == "original")
p.mat <- p.mat[ord, ord]
pos.pNew <- getPos.Dat(p.mat)[[1]]
pNew <- getPos.Dat(p.mat)[[2]]
ind.p <- which(pNew > (sig.level))
if (insig == "pch") {
points(pos.pNew[, 1][ind.p], pos.pNew[, 2][ind.p],
pch = pch, col = pch.col, cex = pch.cex, lwd = 2)
}
if (insig == "p-value") {
text(pos.pNew[, 1][ind.p], pos.pNew[, 2][ind.p],
round(pNew[ind.p], 2), col = pch.col)
}
if (insig == "blank") {
symbols(pos.pNew[, 1][ind.p], pos.pNew[, 2][ind.p],
inches = FALSE, squares = rep(1, length(pos.pNew[,
1][ind.p])), fg = addgrid.col, bg = bg, add = TRUE)
}
}
if (cl.pos != "n") {
colRange <- assign.color(cl.lim2)
ind1 <- which(col == colRange[1])
ind2 <- which(col == colRange[2])
colbar <- col[ind1:ind2]
if (is.null(cl.length))
cl.length <- ifelse(length(colbar) > 20, 11, length(colbar) +
1)
labels <- seq(cl.lim[1], cl.lim[2], length = cl.length)
at <- seq(0, 1, length = length(labels))
if (cl.pos == "r") {
vertical <- TRUE
xlim <- c(m2 + 0.5 + mm * 0.02, m2 + 0.5 + mm * cl.ratio)
ylim <- c(n1 - 0.5, n2 + 0.5)
}
if (cl.pos == "b") {
vertical <- FALSE
xlim <- c(m1 - 0.5, m2 + 0.5)
ylim <- c(n1 - 0.5 - nn * cl.ratio, n1 - 0.5 - nn *
0.02)
}
print(ylim)
colorlegend(colbar = colbar, labels = round(labels, 2),
offset = cl.offset, ratio.colbar = 0.3, cex = cl.cex,
xlim = xlim, ylim = ylim, vertical = vertical, align = cl.align.text)
}
if (tl.pos != "n") {
ylabwidth2 <- strwidth(newrownames, cex = tl.cex)
xlabwidth2 <- strwidth(newcolnames, cex = tl.cex)
pos.xlabel <- cbind(m1:m2, n2 + 0.5 + laboffset)
pos.ylabel <- cbind(m1 - 0.5, n2:n1)
if (tl.pos == "td") {
if (type != "upper")
stop("type should be \"upper\" if tl.pos is \"dt\".")
pos.ylabel <- cbind(m1:(m1 + nn) - 0.5, n2:n1)
}
if (tl.pos == "ld") {
if (type != "lower")
stop("type should be \"lower\" if tl.pos is \"ld\".")
pos.xlabel <- cbind(m1:m2, n2:(n2 - mm) + 0.5 + laboffset)
}
if (tl.pos == "d") {
pos.ylabel <- cbind(m1:(m1 + nn) - 0.5, n2:n1)
pos.ylabel <- pos.ylabel[1:min(n, m), ]
symbols(pos.ylabel[, 1] + 0.5, pos.ylabel[, 2], add = TRUE,
bg = bg, fg = addgrid.col, inches = FALSE, squares = rep(1,
length(pos.ylabel[, 1])))
text(pos.ylabel[, 1] + 0.5, pos.ylabel[, 2], newcolnames[1:min(n,
m)], col = tl.col, cex = tl.cex, ...)
}
else {
text(pos.xlabel[, 1], pos.xlabel[, 2], newcolnames,
srt = tl.srt, adj = ifelse(tl.srt == 0, c(0.5,
0), c(0, 0)), col = tl.col, cex = tl.cex, offset = tl.offset,
...)
text(pos.ylabel[, 1], pos.ylabel[, 2], newrownames,
col = tl.col, cex = tl.cex, pos = 2, offset = tl.offset,
...)
}
}
title(title, ...)
if (!is.null(addCoef.col) & (!method == "number")) {
text(Pos[, 1], Pos[, 2], col = addCoef.col, labels = round((DAT -
int) * ifelse(addCoefasPercent, 100, 1)/zoom, ifelse(addCoefasPercent,
0, 2)))
}
if (type == "full" & plotCI == "n" & !is.null(addgrid.col))
rect(m1 - 0.5, n1 - 0.5, m2 + 0.5, n2 + 0.5, border = addgrid.col)
if (!is.null(addrect) & order == "hclust" & type == "full") {
corrRect.hclust(corr, k = addrect, method = hclust.method,
col = rect.col, lwd = rect.lwd)
}
invisible(corr)
} #matrixplot
#=============================================================================
getPos.Dat <-
function(mat,type = c("full", "lower", "upper"),diag = TRUE,...) {
n <- nrow(mat)
m <- ncol(mat)
x <- matrix(1:n * m, n, m)
tmp <- mat
if (type == "upper")
tmp[row(x) > col(x)] <- Inf
if (type == "lower")
tmp[row(x) < col(x)] <- Inf
if (type == "full")
tmp <- tmp
if (!diag)
diag(tmp) <- Inf
Dat <- tmp[is.finite(tmp)]
ind <- which(is.finite(tmp), arr.ind = TRUE)
Pos <- ind
Pos[, 1] <- ind[, 2]
Pos[, 2] <- -ind[, 1] + 1 + n
return(list(Pos, Dat))
}
#=============================================================================
|
/aTools/R/wgs.R
|
no_license
|
wanggangsheng/SWATopt
|
R
| false
| false
| 33,513
|
r
|
trim <-
function(x) {
sub(" *([^ ]+) *", "\\1", x)
}
#===================================================================================================
trimLR <-
function(x) {
sub("^[[:space:]]*(.*?)[[:space:]]*$", "\\1", x, perl=TRUE)
}
#===================================================================================================
NSE <-
function(yobs,ysim) {
n <- length(yobs)
ym_obs <- mean(yobs)
sum1 <- sum((yobs-ym_obs)^2)
sum2 <- sum((ysim-yobs)^2)
RSO<-mean(ysim)/mean(yobs)
NSEC <- 1.0-sum2/sum1
dret<-data.frame(n,RSO,NSEC)
return(dret)
}
#===================================================================================================
cluster.no <-
function(mydata){
# Determine number of clusters
nRow<-nrow(mydata)
wss <- (nRow-1)*sum(apply(mydata,2,var))
for (i in 2:nRow-1) wss[i] <- sum(kmeans(mydata,
centers=i)$withinss)
plot(1:(nRow-1), wss, type="b", xlab="Number of Clusters",
ylab="Within groups sum of squares")
}
#===================================================================================================
histNorm <-
function(x,nbreaks,xlab,...){
h1<-hist(x,breaks=nbreaks,type="count",col="red",xlab=xlab,ylab="Frequency",main = NULL, border=TRUE)
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h1$mids[1:2])*length(x)
lines(xfit, yfit, col="blue", lwd=2)
}
#===================================================================================================
bar.compare <-
function(x,xylab,model1,std=TRUE, group=TRUE, horiz=FALSE, LSD=TRUE,diffLetter,fontSize,...) {
y<-x[,2]
#----------------------
A1<-model1$model
y1<-A1[,1]
ipch<-pmatch(xylab[1],names(A1))
if( is.na(ipch)) return(cat("Name: ",treatment,"\n",names(A1)[-1],"\n"))
#name.t <-names(A)[ipch]
trt<-A1[,ipch]
#name.y <- names(A)[1]
junto <- subset(data.frame(y1, trt), is.na(y1) == FALSE)
means <- tapply.stat(junto[, 1], junto[, 2], stat="mean") #change
sds <- tapply.stat(junto[, 1], junto[, 2], stat="sd") #change
nn <- tapply.stat(junto[, 1], junto[, 2], stat="length") #change
o1=order(means[,2],decreasing = TRUE)
nn1 = rbind(nn[o1,2])
nn1[1]<-paste("n=",nn1[1])
#----------------------
names(y)<-paste(x[,1],"\n",nn1)
if( std ) {
nivel0<-x[,2]-x[,5]*sqrt(x[,4])
nivel1<-x[,2]+x[,5]*sqrt(x[,4])
}
else {
nivel0<-x[,2]-x[,5]
nivel1<-x[,2]+x[,5]
}
n<-length(y)
tope<-max(nivel1)/20
Ytop = max(nivel1)+tope
Ybottom = min(nivel1)-tope
if(Ybottom>0) {
indice<-barplot(y,horiz=horiz, xlab=xylab[1], ylab=xylab[2],ylim = c(0,Ytop),cex.names=fontSize,cex.axis=fontSize,cex.lab=fontSize,...)
}
else {
indice<-barplot(y,horiz=horiz, xlab=xylab[1], ylab=xylab[2],ylim = c(Ybottom,Ytop),cex.names=fontSize, cex.axis=fontSize,cex.lab=fontSize,...)
}
for ( i in 1:n) {
if (horiz) {
lines(rbind(c(nivel0[i],indice[i]),c(nivel1[i],indice[i])),col="red")
text( cex=1,nivel0[i],indice[i],"[")
text( cex=1,nivel1[i],indice[i],"]")
}
else {
lines(rbind(c(indice[i],nivel0[i]),c(indice[i],nivel1[i])),col="red")
text( cex=1,indice[i],nivel0[i],"---")
text( cex=1,indice[i],nivel1[i],"---")
}
}
if(group) {
if(!LSD){
x[,3]<-diffLetter
}
for ( i in 1:n)
text(indice[i],nivel1[i]+tope*0.5,trimLR(x[i,3]),cex=fontSize)
}
}
#===================================================================================================
LSD1.test <-
function (y, trt, DFerror, MSerror, alpha = 0.05, p.adj = c("none",
"holm", "hochberg", "bonferroni", "BH", "BY", "fdr"), group = TRUE,
main = NULL)
{
p.adj <- match.arg(p.adj)
clase<-c("aov","lm")
name.y <- paste(deparse(substitute(y)))
name.t <- paste(deparse(substitute(trt)))
if("aov"%in%class(y) | "lm"%in%class(y)){
A<-y$model
DFerror<-df.residual(y)
MSerror<-deviance(y)/DFerror
y<-A[,1]
ipch<-pmatch(trt,names(A))
if( is.na(ipch)) return(cat("Name: ",trt,"\n",names(A)[-1],"\n"))
name.t <-names(A)[ipch]
trt<-A[,ipch]
name.y <- names(A)[1]
}
junto <- subset(data.frame(y, trt), is.na(y) == FALSE)
means <- tapply.stat(junto[, 1], junto[, 2], stat="mean") #change
sds <- tapply.stat(junto[, 1], junto[, 2], stat="sd") #change
nn <- tapply.stat(junto[, 1], junto[, 2], stat="length") #change
std.err <- sds[, 2]/sqrt(nn[, 2])
Tprob <- qt(1 - alpha/2, DFerror)
LCL <- means[,2]-Tprob*std.err
UCL <- means[,2]+Tprob*std.err
means <- data.frame(means, std.err, replication = nn[, 2],
LCL, UCL)
names(means)[1:2] <- c(name.t, name.y)
#row.names(means) <- means[, 1]
ntr <- nrow(means)
nk <- choose(ntr, 2)
if (p.adj != "none")
{
a <- 1e-06
b <- 1
for (i in 1:100) {
x <- (b + a)/2
d <- p.adjust(x, n = nk, p.adj) - alpha
fa <- p.adjust(a, n = nk, p.adj) - alpha
if (d * fa < 0)
b <- x
if (d * fa > 0)
a <- x
}
Tprob <- qt(1 - x/2, DFerror)
}
nr <- unique(nn[, 2])
cat("\nwgsStudy:", main)
cat("\n\nLSD t Test for", name.y, "\n")
if (p.adj != "none")
cat("P value adjustment method:", p.adj, "\n")
cat("\nMean Square Error: ",MSerror,"\n\n")
cat(paste(name.t,",",sep="")," means and individual (",(1-alpha)*100,"%) CI\n\n")
print(data.frame(row.names = means[,1], means[,-1]))
cat("\nalpha:",alpha,"; Df Error:",DFerror)
cat("\nCritical Value of t:", Tprob,"\n")
if (group) {
if (length(nr) == 1) {
LSD <- Tprob * sqrt(2 * MSerror/nr)
cat("\nLeast Significant Difference", LSD)
}
else {
nr1 <- 1/mean(1/nn[, 2])
LSD1 <- Tprob * sqrt(2 * MSerror/nr1)
cat("\nLeast Significant Difference", LSD1)
cat("\nHarmonic Mean of Cell Sizes ", nr1)
}
cat("\nMeans with the same letter are not significantly different.")
cat("\n\nGroups, Treatments and means\n")
output <- order1.group(means[, 1], means[, 2], means[,
4], MSerror, Tprob, means[, 3])
w<-order(means[,2],decreasing = TRUE)
output <- data.frame(output,LCI=means[w,5],UCI=means[w,6])
}
if (!group) {
comb <- combn(ntr, 2)
nn <- ncol(comb)
dif <- rep(0, nn)
LCL1<-dif
UCL1<-dif
sig<-NULL
pvalue <- rep(0, nn)
for (k in 1:nn) {
i <- comb[1, k]
j <- comb[2, k]
if (means[i, 2] < means[j, 2]){
comb[1, k]<-j
comb[2, k]<-i
}
dif[k] <- abs(means[i, 2] - means[j, 2])
sdtdif <- sqrt(MSerror * (1/means[i, 4] + 1/means[j,
4]))
pvalue[k] <- 2 * (1 - pt(dif[k]/sdtdif, DFerror))
if (p.adj != "none")
pvalue[k] <- p.adjust(pvalue[k], n = nk, p.adj)
pvalue[k] <- round(pvalue[k],6)
LCL1[k] <- dif[k] - Tprob*sdtdif
UCL1[k] <- dif[k] + Tprob*sdtdif
sig[k]<-" "
if (pvalue[k] <= 0.001) sig[k]<-"***"
else if (pvalue[k] <= 0.01) sig[k]<-"**"
else if (pvalue[k] <= 0.05) sig[k]<-"*"
else if (pvalue[k] <= 0.1) sig[k]<-"."
}
tr.i <- means[comb[1, ],1]
tr.j <- means[comb[2, ],1]
output<-data.frame("Difference" = dif, pvalue = pvalue,sig,LCL=LCL1,UCL=UCL1)
rownames(output)<-paste(tr.i,tr.j,sep=" - ")
cat("\nComparison between treatments means\n\n")
print(output)
output <- data.frame(trt = means[, 1], means = means[,
2], M = "", N = means[, 4], std.err ,LCL,UCL)
}
invisible(output)
}
#=============================================================================
order1.group <-
function(trt,means,N,MSerror,Tprob,std.err,parameter=1, snk=0, DFerror=NULL,alpha=NULL,sdtdif=NULL) {
N<-rep(1/mean(1/N),length(N))
n<-length(means)
z<-data.frame(trt,means,N,std.err)
# ordena tratamientos
w<-z[order(z[,2],decreasing = TRUE), ]
M<-rep("",n)
k<-1
j<-1
k<-1
cambio<-n
cambio1<-0
chequeo=0
M[1]<-letters[k]
while(j<n) {
chequeo<-chequeo+1
if (chequeo > n) break
for(i in j:n) {
nx<-abs(i-j)+1
if (nx==1) nx=2
if(snk ==1 ) Tprob <- qtukey(p=1-alpha,nmeans=nx, df=DFerror)
if(snk ==2 ) Tprob <- qtukey(p=(1-alpha)^(nx-1),nmeans=nx, df=DFerror)
if(is.null(sdtdif)) minimo<-Tprob*sqrt(parameter*MSerror*(1/N[i]+1/N[j]))
if(!is.null(sdtdif)) minimo<-Tprob*sdtdif
s<-abs(w[i,2]-w[j,2])<=minimo
if(s) {
if(lastC(M[i]) != letters[k])M[i]<-paste(M[i],letters[k],sep="")
}
else {
k<-k+1
cambio<-i
cambio1<-0
ja<-j
for(jj in cambio:n) M[jj]<-paste(M[jj]," ",sep="")
M[cambio]<-paste(M[cambio],letters[k],sep="")
for( v in ja:cambio) {
nx<-abs(v-cambio)+1
if(nx == 1) nx=2
if(snk ==1 ) Tprob <- qtukey(p=1-alpha,nmeans=nx, df=DFerror)
if(snk ==2 ) Tprob <- qtukey(p=(1-alpha)^(nx-1),nmeans=nx, df=DFerror)
if(is.null(sdtdif)) minimo<-Tprob*sqrt(parameter*MSerror*(1/N[i]+1/N[j]))
if(!is.null(sdtdif)) minimo<-Tprob*sdtdif
if(abs(w[v,2]-w[cambio,2])>minimo) {j<-j+1
cambio1<-1
}
else break
}
break
}
}
if (cambio1 ==0 )j<-j+1
}
#-----
w<-data.frame(w,stat=M)
trt<-as.character(w$trt)
means<-as.numeric(w$means)
N<-as.numeric(w$N)
std.err<- as.numeric(w$std.err)
cmax<-max(nchar(trt))
trt<-paste(trt," ")
trt<-substr(trt,1,cmax)
for(i in 1:n){
cat(M[i],"\t",trt[i],"\t",means[i],"\n")
}
output<-data.frame(trt,means,M,N,std.err)
return(output)
}
#=============================================================================
KW.test <- function (resp, categ, probs = 0.05, cont = NULL,...)
{
db<-na.omit(data.frame(resp,categ))
if(nrow(db)!=length(resp)) warning(paste(length(resp)-nrow(db),"lines including NA have been omitted"))
resp<-db[,1]
categ<-db[,2]
lst <- split(rank(resp), categ)
avg <- eval(bquote(tapply(.(as.name(colnames(db)[1])),.(as.name(colnames(db)[2])),mean)))
name <- names(lst)
R <- sapply(lst, mean)
n <- sapply(lst, length)
n<-n[order(avg,decreasing = TRUE)]
R<-R[order(avg,decreasing = TRUE)]
avg<-avg[order(avg,decreasing = TRUE)]
name<-names(avg)
N = length(resp)
dif <- abs(outer(R, R, "-"))
if (is.null(cont)) {
difv <- NULL
vname <- NULL
indices <- NULL
for (i in 1:(length(name) - 1)) {
for (j in (i + 1):length(name)) {
vname <- c(vname, paste(name[i], "-", name[j], sep = ""))
indices <- rbind(indices, c(i, j))
difv<-c(difv,dif[i,j])
}
}
names(difv) <- vname
z <- qnorm(probs/(length(lst) * (length(lst) - 1)), lower.tail = FALSE)
lims <- z * sqrt(N * (N + 1)/12 * (1/n[indices[1:length(vname),1]] + 1/n[indices[1:length(vname), 2]]))
names(lims) <- vname
stat <- "Multiple comparison test after Kruskal-Wallis"
}
else {
vname = NULL
indices = NULL
for (j in 2:length(dif[1, ])) {
vname <- c(vname, paste(name[1], "-", name[j], sep = ""))
indices <- rbind(indices, c(1, j))
}
dif <- dif[1, 2:length(dif[1, ])]
names(dif) <- vname
difv<-dif
choice <- pmatch(cont, c("one-tailed", "two-tailed"),
nomatch = 3)
if (choice == 1) {
z <- qnorm(probs/2 * (length(lst) - 1), lower.tail = FALSE)
lims <- z * sqrt(N * (N + 1)/12 * (1/n[indices[1:length(vname),
1]] + 1/n[indices[1:length(vname), 2]]))
names(lims) <- vname
stat <- "Multiple comparison test after Kruskal-Wallis, treatments vs control (one-tailed)"
}
if (choice == 2) {
z <- qnorm(probs/(length(lst) - 1), lower.tail = FALSE)
lims <- z * sqrt(N * (N + 1)/12 * (1/n[indices[1:length(vname),
1]] + 1/n[indices[1:length(vname), 2]]))
names(lims) <- vname
stat <- "Multiple comparison test after Kruskal-Wallis, treatment vs control (two-tailed)"
}
if (choice == 3)
stop("Values must be 'one-tailed' or 'two-tailed', partial matching accepted")
}
# output <- list(statistic = stat, p.value = probs, dif.com = data.frame(obs.dif = difv,
# critical.dif = lims, difference = ifelse((difv - lims) > 0, TRUE, FALSE)))
sig<-ifelse((difv - lims) > 0, TRUE, FALSE)
output <- data.frame(vname,difv, lims, sig)
row.names(output)<-NULL
#class(output) <- c("mc", "list")
output
}
#=============================================================================
KW.group <-
function(LSD,KWtest)#(trt,means,N,MSerror,Tprob,std.err,parameter=1, snk=0, DFerror=NULL,alpha=NULL,sdtdif=NULL) {
{
n<-nrow(LSD) #LSD already sorted
nKW<-nrow(KWtest)
M<-rep("",n)
k<-1
j<-1
cambio<-n
cambio1<-0
chequeo=0
M[1]<-letters[k]
trt<-trimLR(LSD[,1])
while(j<n) {
chequeo<-chequeo+1
if (chequeo > n) break
for(i in (j+1):n) {
vname <- paste(trt[j], "-", trt[i], sep = "")#c(paste(trt[j], "-", trt[i], sep = ""),paste(trt[i], "-", trt[j], sep = ""))
for(iKW in (0.5*(j-1)*(2*n-j)+1):(0.5*j*(2*n-j-1))){ #1:nKW
if(vname==trimLR(KWtest[iKW,1])){
s<-KWtest[iKW,4]
# cat(paste(iKW,"\t",s,"\n"))
break} #if vname
} #end for iKW
if(s) #s=TRUE, difference
{
if(M[i]!="") break #&!is.null(match(lastC(M[i]),letters))
else{
k<-k+1
if(lastC(M[i]) != letters[k]) M[i]<-paste(M[i],letters[k],sep="")#M[cambio]<-paste(M[cambio],letters[k],sep="")
# cat(paste("\t",k,"\t",j,"\t",i,"\t",M[j],"\t",M[i],"\n"))
break
}
}
else { #s=FALSE, no difference
bSame<-NsameLetters(M[j],M[i])
# cat("\t",bSame,"\t")
if(!bSame) {
if(M[i]!=""){
M[j]<-paste(M[j],lastC(M[i]),sep="")}
else{
if(lastC(M[j]) != letters[k]) M[j]<-paste(M[j],letters[k],sep="")
if(lastC(M[i]) != letters[k]) M[i]<-paste(M[i],letters[k],sep="")
# cat(paste("\t",k,"\t",j,"\t",i,"\t",M[j],"\t",M[i],"\n"))
} #else
} #for i
}
}
j<-j+1
} #while j
return(M)
}
#=============================================================================
NsameLetters<-
function(str1,str2)
{
bSame<-FALSE
n1<-nchar(as.character(str1))
n2<-nchar(as.character(str2))
char1<-strsplit(str1,NULL)
char2<-strsplit(str2,NULL)
if(n1<1||n2<1) return(bSame)
for(i in 1:n1){
for(j in 1:n2){
if(char1[[1]][i]==char2[[1]][j]){
bSame<-TRUE
break
}
}
}
return(bSame)
}#function
#=============================================================================
matrixplot<-
function (corr, method = c("circle", "square", "ellipse", "number",
"shade", "color", "pie"), type = c("full", "lower", "upper"),
add = FALSE, col = NULL, bg = "white", title = "", is.corr = TRUE,
diag = TRUE, outline = FALSE, mar = c(0, 0, 0, 0), addgrid.col = NULL,
addCoef.col = NULL, addCoefasPercent = FALSE, order = c("original",
"AOE", "FPC", "hclust", "alphabet"), hclust.method = c("complete",
"ward", "single", "average", "mcquitty", "median", "centroid"),
addrect = NULL, rect.col = "black", rect.lwd = 2, tl.pos = NULL,
tl.cex = 1, tl.col = "red", tl.offset = 0.4, tl.srt = 90,
cl.pos = NULL, cl.lim = NULL, cl.length = NULL, cl.cex = 0.8,
cl.ratio = 0.15, cl.align.text = "c", cl.offset = 0.5, addshade = c("negative",
"positive", "all"), shade.lwd = 1, shade.col = "white",
p.mat = NULL, sig.level = 0.05, insig = c("pch", "p-value",
"blank", "n"), pch = 4, pch.col = "black", pch.cex = 3,
plotCI = c("n", "square", "circle", "rect"), lowCI.mat = NULL,
uppCI.mat = NULL, ...)
{
method <- match.arg(method)
type <- match.arg(type)
order <- match.arg(order)
hclust.method <- match.arg(hclust.method)
plotCI <- match.arg(plotCI)
insig <- match.arg(insig)
if (!is.matrix(corr) & !is.data.frame(corr))
stop("Need a matrix or data frame!")
if (is.null(addgrid.col)) {
addgrid.col <- ifelse(method == "color" | method == "shade",
"white", "grey")
}
if (any(corr < cl.lim[1]) | any(corr > cl.lim[2]))
stop("color limits should cover matrix")
if (is.null(cl.lim)) {
if (is.corr)
cl.lim <- c(-1, 1)
if (!is.corr)
cl.lim <- c(min(corr), max(corr))
}
intercept <- 0
zoom <- 1
if (!is.corr) {
if (max(corr) * min(corr) < 0) {
intercept <- 0
zoom <- 1/max(abs(cl.lim))
}
if (min(corr) >= 0) {
intercept <- -cl.lim[1]
zoom <- 1/(diff(cl.lim))
}
if (max(corr) <= 0) {
intercept <- -cl.lim[2]
zoom <- 1/(diff(cl.lim))
}
corr <- (intercept + corr) * zoom
}
cl.lim2 <- (intercept + cl.lim) * zoom
int <- intercept * zoom
if (min(corr) < -1 - .Machine$double.eps || max(corr) > 1 +
.Machine$double.eps) {
stop("The matrix is not in [-1, 1]!")
}
if (is.null(col)) {
col <- colorRampPalette(c("#67001F", "#B2182B", "#D6604D",
"#F4A582", "#FDDBC7", "#FFFFFF", "#D1E5F0", "#92C5DE",
"#4393C3", "#2166AC", "#053061"))(200)
}
n <- nrow(corr)
m <- ncol(corr)
min.nm <- min(n, m)
ord <- 1:min.nm
if (!order == "original") {
ord <- corrMatOrder(corr, order = order, hclust.method = hclust.method)
corr <- corr[ord, ord]
}
if (is.null(rownames(corr)))
rownames(corr) <- 1:n
if (is.null(colnames(corr)))
colnames(corr) <- 1:m
getPos.Dat <- function(mat) {
x <- matrix(1:n * m, n, m)
tmp <- mat
if (type == "upper")
tmp[row(x) > col(x)] <- Inf
if (type == "lower")
tmp[row(x) < col(x)] <- Inf
if (type == "full")
tmp <- tmp
if (!diag)
diag(tmp) <- Inf
Dat <- tmp[is.finite(tmp)]
ind <- which(is.finite(tmp), arr.ind = TRUE)
Pos <- ind
Pos[, 1] <- ind[, 2]
Pos[, 2] <- -ind[, 1] + 1 + n
return(list(Pos, Dat))
}
Pos <- getPos.Dat(corr)[[1]]
n2 <- max(Pos[, 2])
n1 <- min(Pos[, 2])
nn <- n2 - n1
newrownames <- as.character(rownames(corr)[(n + 1 - n2):(n +
1 - n1)])
m2 <- max(Pos[, 1])
m1 <- min(Pos[, 1])
mm <- m2 - m1
newcolnames <- as.character(colnames(corr)[m1:m2])
# cat("n=",n,"\n")
# cat("m=",m,"\n")
# mat1<-getPos.Dat(corr)
# print(corr)
# print(mat1)
# cat("newrownames=",newrownames,"\n")
# cat("newcolnames=",newcolnames,"\n")
DAT <- getPos.Dat(corr)[[2]]
len.DAT <- length(DAT)
assign.color <- function(DAT) {
newcorr <- (DAT + 1)/2
newcorr[newcorr == 1] <- 1 - 1e-10
col.fill <- col[floor(newcorr * length(col)) + 1]
}
col.fill <- assign.color(DAT)
isFALSE = function(x) identical(x, FALSE)
isTRUE = function(x) identical(x, TRUE)
if (isFALSE(tl.pos)) {
tl.pos <- "n"
}
if (is.null(tl.pos) | isTRUE(tl.pos)) {
if (type == "full")
tl.pos <- "lt"
if (type == "lower")
tl.pos <- "ld"
if (type == "upper")
tl.pos <- "td"
}
if (isFALSE(cl.pos)) {
cl.pos <- "n"
}
if (is.null(cl.pos) | isTRUE(cl.pos)) {
if (type == "full")
cl.pos <- "r"
if (type == "lower")
cl.pos <- "b"
if (type == "upper")
cl.pos <- "r"
}
if (outline)
col.border <- "black"
if (!outline)
col.border <- col.fill
if (!add) {
par(mar = mar, bg = "white")
plot.new()
xlabwidth <- ylabwidth <- 0
for (i in 1:50) {
xlim <- c(m1 - 0.5 - xlabwidth, m2 + 0.5 + mm * cl.ratio *
(cl.pos == "r"))
ylim <- c(n1 - 0.5 - nn * cl.ratio * (cl.pos == "b"),
n2 + 0.5 + ylabwidth)
plot.window(xlim + c(-0.2, 0.2), ylim + c(-0.2, 0.2),
asp = 1, xaxs = "i", yaxs = "i")
x.tmp <- max(strwidth(newrownames, cex = tl.cex))
y.tmp <- max(strwidth(newcolnames, cex = tl.cex))
if (min(x.tmp - xlabwidth, y.tmp - ylabwidth) < 1e-04)
break
xlabwidth <- x.tmp
ylabwidth <- y.tmp
}
if (tl.pos == "n" | tl.pos == "d")
xlabwidth <- ylabwidth <- 0
if (tl.pos == "td")
ylabwidth <- 0
if (tl.pos == "ld")
xlabwidth <- 0
laboffset <- strwidth("W", cex = tl.cex) * tl.offset
xlim <- c(m1 - 0.5 - xlabwidth - laboffset, m2 + 0.5 +
mm * cl.ratio * (cl.pos == "r")) + c(-0.35, 0.15)
ylim <- c(n1 - 0.5 - nn * cl.ratio * (cl.pos == "b"),
n2 + 0.5 + ylabwidth * abs(sin(tl.srt * pi/180)) +
laboffset) + c(-0.15, 0.35)
if (.Platform$OS.type == "windows") {
windows.options(width = 7, height = 7 * diff(ylim)/diff(xlim))
}
plot.window(xlim = xlim, ylim = ylim, asp = 1, xlab = "",
ylab = "", xaxs = "i", yaxs = "i")
}
laboffset <- strwidth("W", cex = tl.cex) * tl.offset
symbols(Pos, add = TRUE, inches = FALSE, squares = rep(1,
len.DAT), bg = bg, fg = bg)
if (method == "circle" & plotCI == "n") {
symbols(Pos, add = TRUE, inches = FALSE, bg = col.fill,
circles = 0.9 * abs(DAT)^0.5/2, fg = col.border)
}
if (method == "ellipse" & plotCI == "n") {
ell.dat <- function(rho, length = 99) {
k <- seq(0, 2 * pi, length = length)
x <- cos(k + acos(rho)/2)/2
y <- cos(k - acos(rho)/2)/2
return(cbind(rbind(x, y), c(NA, NA)))
}
ELL.dat <- lapply(DAT, ell.dat)
ELL.dat2 <- 0.85 * matrix(unlist(ELL.dat), ncol = 2,
byrow = TRUE)
ELL.dat2 <- ELL.dat2 + Pos[rep(1:length(DAT), each = 100),
]
polygon(ELL.dat2, border = col.border, col = col.fill)
}
if (method == "number" & plotCI == "n") {
text(Pos[, 1], Pos[, 2], font = 2, col = col.fill, labels = round((DAT -
int) * ifelse(addCoefasPercent, 100, 1)/zoom, ifelse(addCoefasPercent,
0, 2)))
}
if (method == "pie" & plotCI == "n") {
symbols(Pos, add = TRUE, inches = FALSE, circles = rep(0.5,
len.DAT) * 0.85)
pie.dat <- function(theta, length = 100) {
k <- seq(pi/2, pi/2 - theta, length = 0.5 * length *
abs(theta)/pi)
x <- c(0, cos(k)/2, 0)
y <- c(0, sin(k)/2, 0)
return(cbind(rbind(x, y), c(NA, NA)))
}
PIE.dat <- lapply(DAT * 2 * pi, pie.dat)
len.pie <- unlist(lapply(PIE.dat, length))/2
PIE.dat2 <- 0.85 * matrix(unlist(PIE.dat), ncol = 2,
byrow = TRUE)
PIE.dat2 <- PIE.dat2 + Pos[rep(1:length(DAT), len.pie),
]
polygon(PIE.dat2, border = "black", col = col.fill)
}
if (method == "shade" & plotCI == "n") {
addshade <- match.arg(addshade)
symbols(Pos, add = TRUE, inches = FALSE, squares = rep(1,
len.DAT), bg = col.fill, fg = addgrid.col)
shade.dat <- function(w) {
x <- w[1]
y <- w[2]
rho <- w[3]
x1 <- x - 0.5
x2 <- x + 0.5
y1 <- y - 0.5
y2 <- y + 0.5
dat <- NA
if ((addshade == "positive" || addshade == "all") &
rho > 0) {
dat <- cbind(c(x1, x1, x), c(y, y1, y1), c(x,
x2, x2), c(y2, y2, y))
}
if ((addshade == "negative" || addshade == "all") &
rho < 0) {
dat <- cbind(c(x1, x1, x), c(y, y2, y2), c(x,
x2, x2), c(y1, y1, y))
}
return(t(dat))
}
pos_corr <- rbind(cbind(Pos, DAT))
pos_corr2 <- split(pos_corr, 1:nrow(pos_corr))
SHADE.dat <- matrix(na.omit(unlist(lapply(pos_corr2,
shade.dat))), byrow = TRUE, ncol = 4)
segments(SHADE.dat[, 1], SHADE.dat[, 2], SHADE.dat[,
3], SHADE.dat[, 4], col = shade.col, lwd = shade.lwd)
}
if (method == "square" & plotCI == "n") {
symbols(Pos, add = TRUE, inches = FALSE, squares = abs(DAT)^0.5,
bg = col.fill, fg = col.border)
}
if (method == "color" & plotCI == "n") {
symbols(Pos, add = TRUE, inches = FALSE, squares = rep(1,
len.DAT), bg = col.fill, fg = col.border)
}
symbols(Pos, add = TRUE, inches = FALSE, bg = NA, squares = rep(1,
len.DAT), fg = addgrid.col)
if (plotCI != "n") {
if (is.null(lowCI.mat) || is.null(uppCI.mat))
stop("Need lowCI.mat and uppCI.mat!")
if (!order == "original") {
lowCI.mat <- lowCI.mat[ord, ord]
uppCI.mat <- uppCI.mat[ord, ord]
}
pos.lowNew <- getPos.Dat(lowCI.mat)[[1]]
lowNew <- getPos.Dat(lowCI.mat)[[2]]
pos.uppNew <- getPos.Dat(uppCI.mat)[[1]]
uppNew <- getPos.Dat(uppCI.mat)[[2]]
if (!(method == "circle" || method == "square"))
stop("method shoud be circle or square if draw confidence interval!")
k1 <- (abs(uppNew) > abs(lowNew))
bigabs <- uppNew
bigabs[which(!k1)] <- lowNew[!k1]
smallabs <- lowNew
smallabs[which(!k1)] <- uppNew[!k1]
sig <- sign(uppNew * lowNew)
if (plotCI == "circle") {
symbols(pos.uppNew[, 1], pos.uppNew[, 2], add = TRUE,
inches = FALSE, circles = 0.95 * abs(bigabs)^0.5/2,
bg = ifelse(sig > 0, col.fill, col[ceiling((bigabs +
1) * length(col)/2)]), fg = ifelse(sig > 0,
col.fill, col[ceiling((bigabs + 1) * length(col)/2)]))
symbols(pos.lowNew[, 1], pos.lowNew[, 2], add = TRUE,
inches = FALSE, circles = 0.95 * abs(smallabs)^0.5/2,
bg = ifelse(sig > 0, bg, col[ceiling((smallabs +
1) * length(col)/2)]), fg = ifelse(sig > 0,
col.fill, col[ceiling((smallabs + 1) * length(col)/2)]))
}
if (plotCI == "square") {
symbols(pos.uppNew[, 1], pos.uppNew[, 2], add = TRUE,
inches = FALSE, squares = abs(bigabs)^0.5, bg = ifelse(sig >
0, col.fill, col[ceiling((bigabs + 1) * length(col)/2)]),
fg = ifelse(sig > 0, col.fill, col[ceiling((bigabs +
1) * length(col)/2)]))
symbols(pos.lowNew[, 1], pos.lowNew[, 2], add = TRUE,
inches = FALSE, squares = abs(smallabs)^0.5,
bg = ifelse(sig > 0, bg, col[ceiling((smallabs +
1) * length(col)/2)]), fg = ifelse(sig > 0,
col.fill, col[ceiling((smallabs + 1) * length(col)/2)]))
}
if (plotCI == "rect") {
rect.width <- 0.25
rect(pos.uppNew[, 1] - rect.width, pos.uppNew[, 2] +
smallabs/2, pos.uppNew[, 1] + rect.width, pos.uppNew[,
2] + bigabs/2, col = col.fill, border = col.fill)
segments(pos.lowNew[, 1] - rect.width, pos.lowNew[,
2] + DAT/2, pos.lowNew[, 1] + rect.width, pos.lowNew[,
2] + DAT/2, col = "black", lwd = 1)
segments(pos.uppNew[, 1] - rect.width, pos.uppNew[,
2] + uppNew/2, pos.uppNew[, 1] + rect.width,
pos.uppNew[, 2] + uppNew/2, col = "black", lwd = 1)
segments(pos.lowNew[, 1] - rect.width, pos.lowNew[,
2] + lowNew/2, pos.lowNew[, 1] + rect.width,
pos.lowNew[, 2] + lowNew/2, col = "black", lwd = 1)
segments(pos.lowNew[, 1] - 0.5, pos.lowNew[, 2],
pos.lowNew[, 1] + 0.5, pos.lowNew[, 2], col = "grey70",
lty = 3)
}
}
if (!is.null(p.mat) & !insig == "n") {
if (!order == "original")
p.mat <- p.mat[ord, ord]
pos.pNew <- getPos.Dat(p.mat)[[1]]
pNew <- getPos.Dat(p.mat)[[2]]
ind.p <- which(pNew > (sig.level))
if (insig == "pch") {
points(pos.pNew[, 1][ind.p], pos.pNew[, 2][ind.p],
pch = pch, col = pch.col, cex = pch.cex, lwd = 2)
}
if (insig == "p-value") {
text(pos.pNew[, 1][ind.p], pos.pNew[, 2][ind.p],
round(pNew[ind.p], 2), col = pch.col)
}
if (insig == "blank") {
symbols(pos.pNew[, 1][ind.p], pos.pNew[, 2][ind.p],
inches = FALSE, squares = rep(1, length(pos.pNew[,
1][ind.p])), fg = addgrid.col, bg = bg, add = TRUE)
}
}
if (cl.pos != "n") {
colRange <- assign.color(cl.lim2)
ind1 <- which(col == colRange[1])
ind2 <- which(col == colRange[2])
colbar <- col[ind1:ind2]
if (is.null(cl.length))
cl.length <- ifelse(length(colbar) > 20, 11, length(colbar) +
1)
labels <- seq(cl.lim[1], cl.lim[2], length = cl.length)
at <- seq(0, 1, length = length(labels))
if (cl.pos == "r") {
vertical <- TRUE
xlim <- c(m2 + 0.5 + mm * 0.02, m2 + 0.5 + mm * cl.ratio)
ylim <- c(n1 - 0.5, n2 + 0.5)
}
if (cl.pos == "b") {
vertical <- FALSE
xlim <- c(m1 - 0.5, m2 + 0.5)
ylim <- c(n1 - 0.5 - nn * cl.ratio, n1 - 0.5 - nn *
0.02)
}
print(ylim)
colorlegend(colbar = colbar, labels = round(labels, 2),
offset = cl.offset, ratio.colbar = 0.3, cex = cl.cex,
xlim = xlim, ylim = ylim, vertical = vertical, align = cl.align.text)
}
if (tl.pos != "n") {
ylabwidth2 <- strwidth(newrownames, cex = tl.cex)
xlabwidth2 <- strwidth(newcolnames, cex = tl.cex)
pos.xlabel <- cbind(m1:m2, n2 + 0.5 + laboffset)
pos.ylabel <- cbind(m1 - 0.5, n2:n1)
if (tl.pos == "td") {
if (type != "upper")
stop("type should be \"upper\" if tl.pos is \"dt\".")
pos.ylabel <- cbind(m1:(m1 + nn) - 0.5, n2:n1)
}
if (tl.pos == "ld") {
if (type != "lower")
stop("type should be \"lower\" if tl.pos is \"ld\".")
pos.xlabel <- cbind(m1:m2, n2:(n2 - mm) + 0.5 + laboffset)
}
if (tl.pos == "d") {
pos.ylabel <- cbind(m1:(m1 + nn) - 0.5, n2:n1)
pos.ylabel <- pos.ylabel[1:min(n, m), ]
symbols(pos.ylabel[, 1] + 0.5, pos.ylabel[, 2], add = TRUE,
bg = bg, fg = addgrid.col, inches = FALSE, squares = rep(1,
length(pos.ylabel[, 1])))
text(pos.ylabel[, 1] + 0.5, pos.ylabel[, 2], newcolnames[1:min(n,
m)], col = tl.col, cex = tl.cex, ...)
}
else {
text(pos.xlabel[, 1], pos.xlabel[, 2], newcolnames,
srt = tl.srt, adj = ifelse(tl.srt == 0, c(0.5,
0), c(0, 0)), col = tl.col, cex = tl.cex, offset = tl.offset,
...)
text(pos.ylabel[, 1], pos.ylabel[, 2], newrownames,
col = tl.col, cex = tl.cex, pos = 2, offset = tl.offset,
...)
}
}
title(title, ...)
if (!is.null(addCoef.col) & (!method == "number")) {
text(Pos[, 1], Pos[, 2], col = addCoef.col, labels = round((DAT -
int) * ifelse(addCoefasPercent, 100, 1)/zoom, ifelse(addCoefasPercent,
0, 2)))
}
if (type == "full" & plotCI == "n" & !is.null(addgrid.col))
rect(m1 - 0.5, n1 - 0.5, m2 + 0.5, n2 + 0.5, border = addgrid.col)
if (!is.null(addrect) & order == "hclust" & type == "full") {
corrRect.hclust(corr, k = addrect, method = hclust.method,
col = rect.col, lwd = rect.lwd)
}
invisible(corr)
} #matrixplot
#=============================================================================
getPos.Dat <-
function(mat,type = c("full", "lower", "upper"),diag = TRUE,...) {
n <- nrow(mat)
m <- ncol(mat)
x <- matrix(1:n * m, n, m)
tmp <- mat
if (type == "upper")
tmp[row(x) > col(x)] <- Inf
if (type == "lower")
tmp[row(x) < col(x)] <- Inf
if (type == "full")
tmp <- tmp
if (!diag)
diag(tmp) <- Inf
Dat <- tmp[is.finite(tmp)]
ind <- which(is.finite(tmp), arr.ind = TRUE)
Pos <- ind
Pos[, 1] <- ind[, 2]
Pos[, 2] <- -ind[, 1] + 1 + n
return(list(Pos, Dat))
}
#=============================================================================
|
f174dc23e22f6d29a2893047e36a2e70 dungeon_i25-m12-u3-v0.pddl_planlen=7.qdimacs 5559 47453
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i25-m12-u3-v0.pddl_planlen=7/dungeon_i25-m12-u3-v0.pddl_planlen=7.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 88
|
r
|
f174dc23e22f6d29a2893047e36a2e70 dungeon_i25-m12-u3-v0.pddl_planlen=7.qdimacs 5559 47453
|
##---------------------------helper functions--------------------------------------##
## install (if needed) and require packages
require_libraries<-function(package_list,verb=T){
for (lib in package_list) {
chk_install<-!(lib %in% installed.packages()[,"Package"])
if(chk_install){
install.packages(lib)
}
library(lib, character.only=TRUE,lib.loc=.libPaths())
if(verb){
cat("\n", lib, " loaded.", sep="")
}
}
}
connect_to_db<-function(DBMS_type,driver_type=c("OCI","JDBC"),config_file){
if(is.null(driver_type)){
stop("must specify type of database connection driver!")
}
if(DBMS_type=="Oracle"){
if(driver_type=="OCI"){
require_libraries("ROracle")
conn<-dbConnect(ROracle::Oracle(),
config_file$username,
config_file$password,
file.path(config_file$access,config_file$sid))
}else if(driver_type=="JDBC"){
require_libraries("RJDBC")
# make sure ojdbc6.jar is in the AKI_CDM folder
# Source: https://www.r-bloggers.com/connecting-r-to-an-oracle-database-with-rjdbc/
drv<-JDBC(driverClass="oracle.jdbc.OracleDriver",
classPath="./inst/ojdbc6.jar")
url <- paste0("jdbc:oracle:thin:@", config_file$access,":",config_file$sid)
conn <- RJDBC::dbConnect(drv, url,
config_file$username,
config_file$password)
}else{
stop("The driver type is not currently supported!")
}
}else if(DBMS_type=="tSQL"){
require_libraries("RJDBC")
# make sure sqljdbc.jar is in the AKI_CDM folder
drv <- JDBC(driverClass="com.microsoft.sqlserver.jdbc.SQLServerDriver",
classPath="./inst/sqljdbc.jar",
identifier.quote="`")
url <- paste0("jdbc:sqlserver:", config_file$access,
";DatabaseName=",config_file$cdm_db_name,
";username=",config_file$username,
";password=",config_file$password)
conn <- dbConnect(drv, url)
}else if(DBMS_type=="PostgreSQL"){
#not tested yet!
require_libraries("RPostgres")
server<-gsub("/","",str_extract(config_file$access,"//.*(/)"))
host<-gsub(":.*","",server)
port<-gsub(".*:","",server)
conn<-dbConnect(RPostgres::Postgres(),
host=host,
port=port,
dbname=config_file$cdm_db_name,
user=config_file$username,
password=config_file$password)
}else{
stop("the DBMS type is not currectly supported!")
}
attr(conn,"DBMS_type")<-DBMS_type
attr(conn,"driver_type")<-driver_type
return(conn)
}
## parse Oracle sql lines
parse_sql<-function(file_path,...){
param_val<-list(...)
#read file
con<-file(file_path,"r")
#initialize string
sql_string <- ""
#intialize result holder
params_ind<-FALSE
tbl_out<-NULL
action<-NULL
while (TRUE){
#parse the first line
line <- readLines(con, n = 1)
#check for endings
if (length(line)==0) break
#collect overhead info
if(grepl("^(/\\*out)",line)){
#output table name
tbl_out<-trimws(gsub("(/\\*out\\:\\s)","",line),"both")
}else if(grepl("^(/\\*action)",line)){
#"write" or "query"(fetch) the output table
action<-trimws(gsub("(/\\*action\\:\\s)","",line),"both")
}else if(grepl("^(/\\*params)",line)){
params_ind<-TRUE
#breakdown global parameters
params<-gsub(",","",strsplit(trimws(gsub("(/\\*params\\:\\s)","",line),"both")," ")[[1]])
params_symbol<-params
#normalize the parameter names
params<-gsub("&&","",params)
}
#remove the first line
line<-gsub("\\t", " ", line)
#translate comment symbol '--'
if(grepl("--",line) == TRUE){
line <- paste(sub("--","/*",line),"*/")
}
#attach new line
if(!grepl("^(/\\*)",line)){
sql_string <- paste(sql_string, line)
}
}
close(con)
#update parameters as needed
if(params_ind){
#align param_val with params
params_miss<-params[!(params %in% names(param_val))]
for(j in seq_along(params_miss)){
param_val[params_miss[j]]<-list(NULL)
}
param_val<-param_val[which(names(param_val) %in% params)]
param_val<-param_val[order(names(param_val))]
params_symbol<-params_symbol[order(params)]
params<-params[order(params)]
#substitube params_symbol by param_val
for(i in seq_along(params)){
sql_string<-gsub(params_symbol[i],
ifelse(is.null(param_val[[i]])," ",
ifelse(params[i]=="cdm_db_link",
paste0("@",param_val[[i]]),
ifelse(params[i] %in% c("start_date","end_date"),
paste0("'",param_val[[i]],"'"),
param_val[[i]]))),
sql_string)
}
}
#clean up excessive "[ ]." or "[@" in tSQL when substitute value is NULL
sql_string<-gsub("\\[\\ ]\\.","",sql_string)
sql_string<-gsub("\\[@","[",sql_string)
out<-list(tbl_out=tbl_out,
action=action,
statement=sql_string)
return(out)
}
## execute single sql snippet
execute_single_sql<-function(conn,statement,write,table_name){
DBMS_type<-attr(conn,"DBMS_type")
driver_type<-attr(conn,"driver_type")
if(write){
#oracle and sql sever uses different connection driver and different functions are expected for sending queries
#dbSendQuery silently returns an S4 object after execution, which causes error in RJDBC connection (for sql server)
if(DBMS_type=="Oracle"){
if(!(driver_type %in% c("OCI","JDBC"))){
stop("Driver type not supported for ",DBMS_type,"!\n")
}else{
#drop existing tables, if applicable
chk_exist<-dbGetQuery(conn,paste0("select tname from tab where tname ='",table_name,"'"))
if(length(chk_exist$TNAME)>0){
if(driver_type=="OCI"){
dbSendQuery(conn,paste("drop table",table_name))
}else{
dbSendUpdate(conn,paste("drop table",table_name))
}
}
if(driver_type=="OCI"){
dbSendQuery(conn,statement)
}else{
dbSendUpdate(conn,statement)
}
}
}else if(DBMS_type=="tSQL"){
if(driver_type=="JDBC"){
#drop existing tables, if applicable
# dbSendUpdate(conn,paste0("IF EXISTS (select * from dbo.sysobjects
# where id = object_id(N'dbo.",table_name,"') and
# objectproperty(id, N'IsTable') = 1)",
# "BEGIN ",paste("drop table",table_name)," END ",
# "GO"))
#write new tables
dbSendUpdate(conn,statement)
}else{
stop("Driver type not supported for ",DBMS_type,"!\n")
}
}else{
stop("DBMS type not supported!")
}
}else{
dat<-dbGetQuery(conn,statement)
return(dat)
}
cat("create temporary table: ", table_name, ".\n")
}
## execute multiple sql snippets
#---statements have to be in correct logical order
execute_batch_sql<-function(conn,statements,verb,...){
for(i in seq_along(statements)){
sql<-parse_sql(file_path=statements[i],...)
execute_single_sql(conn,
statement=sql$statement,
write=(sql$action=="write"),
table_name=toupper(sql$tbl_out))
if(verb){
cat(statements[i],"has been executed and table",
toupper(sql$tbl_out),"was created.\n")
}
}
}
## clean up intermediate tables
drop_tbl<-function(conn,table_name){
DBMS_type<-attr(conn,"DBMS_type")
driver_type<-attr(conn,"driver_type")
if(DBMS_type=="Oracle"){
# purge is only required in Oracle for completely destroying temporary tables
drop_temp<-paste("drop table",table_name,"purge")
if(driver_type=="OCI"){
dbSendQuery(conn,drop_temp)
}else if(driver_type=="JDBC"){
dbSendUpdate(conn,drop_temp)
}else{
stop("Driver type not supported for ",DBMS_type,"!.\n")
}
}else if(DBMS_type=="tSQL"){
drop_temp<-paste("drop table",table_name)
if(driver_type=="JDBC"){
dbSendUpdate(conn,drop_temp)
}else{
stop("Driver type not supported for ",DBMS_type,"!.\n")
}
}else{
warning("DBMS type not supported!")
}
}
extract_cohort<-function(conn,
cdm_db_name,
cdm_db_schema,
start_date="2010-01-01",
end_date="2018-12-31",
verb=T){
#check if DBMS type is currently supported
if(!(attr(conn,"DBMS_type") %in% c("Oracle","tSQL","PostgreSQL"))){
stop("DBMS_type=",attr(conn,"DBMS_type"),"is not currently supported \n(should be one of 'Oracle','tSQL','PostgreSQL', case-sensitive)")
}
#execute(write) the following sql snippets according to the specified order
statements<-paste0(
paste0("./src/",attr(conn,"DBMS_type")),
c("/cohort_initial.sql",
"/cohort_all_SCr.sql",
"/cohort_enc_SCr.sql",
"/cohort_baseline_SCr.sql",
"/cohort_exclude.sql",
"/cohort_eligib.sql",
"/cohort_AKI_staging.sql",
"/cohort_final.sql")
)
execute_batch_sql(conn,statements,verb,
cdm_db_name=cdm_db_name,
cdm_db_schema=cdm_db_schema,
start_date=start_date,
end_date=end_date)
#collect attrition info
sql<-parse_sql(paste0("./src/",attr(conn,"DBMS_type"),"/consort_diagram.sql"))
attrition<-execute_single_sql(conn,
statement=sql$statement,
write=(sql$action=="write"),
table_name=toupper(sql$tbl_out))
#read Table1
tbl1<-parse_sql(statements[length(statements)])$tbl_out
aki_enc<-dbGetQuery(conn,paste("select * from",tbl1))
#clean out intermediate tables
for(i in 1:(length(statements)-1)){
parse_out<-parse_sql(statements[i])
if(parse_out$action=="write"){
drop_tbl(conn,toupper(parse_out$tbl_out))
}else{
warning("no temporary table was created by this statment!")
}
if(verb){
cat("temp table",toupper(parse_out$tbl_out),"dropped. \n")
}
}
#output
out<-list(aki_enc=aki_enc,
attrition=attrition)
return(out)
}
#----collect facts from i2b2 observation_fact table----
#note: there should be a reference patient table ("pat_num") on oracle server with the key column "key_col"
collect_i2b2_obs<-function(conn,
code_vec=c(),
regexp_str="",
col_out=c("patient_num",
"encounter_num",
"concept_cd",
"units_cd",
"nval_num",
"tval_char",
"modifier_cd",
"start_date",
"end_date"),
key_col=c("patient_num"),
schema=c("blueherondata"),
pat_num){
col_out<-col_out[!col_out %in% key_col]
match_key<-c()
for(i in seq_along(key_col)){
match_key<-c(match_key,paste(paste0(c("p.","f."),key_col[i]),collapse = "="))
}
if(length(key_col)>1){
match_key<-paste(match_key,collapse = " and ")
}
i2b2_obs_lst<-list()
for(i in seq_along(schema)){
sql<-paste0("select distinct ",
paste0(paste(paste0("p.",key_col,collapse = ","),",",
paste(paste0("f.",col_out,collapse = ",")),
paste0(" from ", pat_num, " p"),
paste0(" join ", schema, ".observation_fact f"),
" on ",match_key)))
if(length(code_vec)>0&nchar(regexp_str)>0){
sql<-paste0(sql," and ",
"(f.concept_cd in ",paste0("('",paste(code_vec,collapse="','"),"')")," or ",
"regexp_like(f.concept_cd,'",regexp_str,"','i'))")
}else if(length(code_vec)==0&nchar(regexp_str)>0){
sql<-paste0(sql," and ",
"regexp_like(f.concept_cd,'",regexp_str,"','i')")
}else if(length(code_vec)>0&nchar(regexp_str)==0){
sql<-paste0(sql," and ",
"f.concept_cd in ",paste0("('",paste(code_vec,collapse="','"),"')"))
}else{
stop("either code_vec or regexp_str should be specified for filtering concept_cd!")
}
i2b2_obs_lst[[schema[i]]]<-DBI::dbGetQuery(conn,sql)
}
return(i2b2_obs_lst)
}
#----collect concepts from i2b2 concept_dimension table----
#note: there should be a reference concept table ("concept") on oracle server with the key column "key_col"
collect_i2b2_cd<-function(conn,
exact_match=T,
cd_prefix=NULL,
col_out=c("concept_cd",
"name_char",
"concept_path"),
key_col=c("CONCEPT_CD"),
schema=c("blueherondata"),
concept){
col_out<-col_out[!col_out %in% key_col]
match_key<-c()
for(i in seq_along(key_col)){
if(exact_match){
match_key<-c(match_key,paste(paste0(c("cd.","f."),key_col[i]),collapse = "="))
}else{
match_key<-c(match_key,paste0("regexp_like(f.",key_col[i],",('(' || cd.",key_col[i]," || ')+'),'i')"))
}
if(!is.null(cd_prefix)){
match_key<-paste0(match_key," and regexp_like(f.",key_col[i],",'^(",cd_prefix,")+')")
}else{
}
}
if(length(key_col)>1){
match_key<-paste(match_key,collapse = " and ")
}
i2b2_obs_lst<-list()
for(i in seq_along(schema)){
sql<-paste0("select distinct ",
paste0(paste(paste0("cd.",key_col,collapse = ",")," ICD_FUZZY,",
paste(paste0("f.",col_out,collapse = ",")),
paste0(" from ", concept, " cd"),
paste0(" join ", schema, ".concept_dimension f"),
" on ",match_key)))
i2b2_obs_lst[[schema[i]]]<-DBI::dbGetQuery(conn,sql)
}
return(i2b2_obs_lst)
}
#----collect data from one of the CDM tables----
#note: there should be a reference patient table ("pat_num") on oracle server with the key column "key_col"
collect_cdm<-function(conn,
code_vec=c(),
str_vec=c(),
col_out=NULL,
key_col_schema=c("PATID"),
key_col_pat=c("PATIENT_NUM"),
schema=c("PCORNET_CDM_C7R2"),
tbl="DEMOGRAPHIC",
pat_num){
if(is.null(col_out)){
col_out<-colnames(DBI::dbGetQuery(conn,
paste0("select * from ",schema[1],".",tbl," where 1=0")))
}
col_out<-col_out[!col_out %in% key_col_schema]
match_key<-c()
for(i in seq_along(key_col_pat)){
match_key<-c(match_key,paste(c(paste0("p.",key_col_pat[i]),
paste0("f.",key_col_schema[i])),
collapse = "="))
}
if(length(key_col_pat)>1){
match_key<-paste(match_key,collapse = " and ")
}
cdm_obs_lst<-list()
for(i in seq_along(schema)){
sql<-paste0("select distinct ",
paste0(paste(paste0("p.",key_col_pat,collapse = ","),",",
paste(paste0("f.",col_out,collapse = ",")),
paste0(" from ", pat_num, " p"),
paste0(" join ", schema[i], ".",tbl," f"),
" on ",match_key)))
if(tbl=="PROCEDURES"){
#procedures are identified uniquely by (PX_TYPE || ':' || PX)
sql<-paste0(sql," and",
" (f.PX_TYPE || ':' || f.PX) in ",paste0("('",paste(code_vec,collapse="','"),"')"))
}else if(tbl=="PRESCRIBING"){
#prescribing are identified uniquely by RXNORM_CUI or RAW_RX_MED_NAME
if(length(code_vec)>0){
sql<-paste0(sql," and",
"f.RXNORM_CUI in ",paste0("('",paste(code_vec,collapse="','"),"')"))
}
if(length(str_vec)>0){
if(length(code_vec)>0){
sql<-paste0(sql, " or ",
" regexp_like(f.RAW_RX_MED_NAME,",paste0("'((",paste(str_vec,collapse = ")|("),"))+'"),",'i')")
}else{
sql<-paste0(sql, " and ",
" regexp_like(f.RAW_RX_MED_NAME,",paste0("'((",paste(str_vec,collapse = ")|("),"))+'"),",'i')")
}
}
}else if(tbl=="DISPENSING"){
#dispensing are identified by NDC codes
sql<-paste0(sql," and ",
"(f.NDC in ",paste0("('",paste(code_vec,collapse="','"),"')"))
}else if(tbl=="DIAGNOSIS"){
#diagnosis are identified by (DX_TYPE || ':' || DX)
if(length(code_vec)>0){
sql<-paste0(sql," and",
"(f.DX_TYPE || ':' || f.DX) in ",paste0("('",paste(code_vec,collapse="','"),"')"))
}
if(length(str_vec)>0){
if(length(code_vec)>0){
sql<-paste0(sql, " or ",
" regexp_like((f.DX_TYPE || ':' || f.DX),",paste0("'((",paste(str_vec,collapse = ")|("),"))+'"),",'i')")
}else{
sql<-paste0(sql," and",
" regexp_like((f.DX_TYPE || ':' || f.DX),",paste0("'((",paste(str_vec,collapse = ")|("),"))+'"),",'i')")
}
}
}else{
sql<-sql
}
cdm_obs_lst[[schema[i]]]<-DBI::dbGetQuery(conn,sql)
}
return(cdm_obs_lst)
}
consort_diag<-function(consort_tbl){
tbl<-data.frame(CNT_TYPE=c("Initial",
"Has_at_least_2_SCr",
"Initial_GFR_below_15",
"RRT_within_48hr",
"Burn_patients",
"Pre_ESRD",
"Pre_RRT",
"Total",
"nonAKI",
"AKI1",
"nonAKI_to_AKI2",
"AKI1_to_AKI2",
"nonAKI_to_AKI3",
"nonAKI_to_AKI2_to_AKI3",
"AKI1_to_AKI2_to_AKI3"),
label_txt=c("Inpatient visit with LOS >= 2\nand of age >= 18",
"Has at least 2 SCr record",
"Excluded: Initial eGFR below 15",
"Excluded: RRT with 48 hours since \nadmission",
"Excluded: Burn Patients",
"Excluded: Pre-existance of \nESRD",
"Excluded: Pre-existance of \ndialysis and renal transplantation",
"Total eligible encounters",
"Non-AKI",
"AKI1",
"AKI2",
"AKI1 to AKI2",
"AKI3",
"AKI2 to AKI3",
"AKI1 to AKI2 to AKI3"),
stringsAsFactors=F) %>%
left_join(consort_tbl, by="CNT_TYPE") %>%
replace_na(list(ENC_CNT=0)) %>%
mutate(cnt_ref=ifelse(CNT_TYPE %in% c("Initial","Has_at_least_1_SCr","Total"),ENC_CNT,NA)) %>%
fill(cnt_ref,.direction="down") %>%
mutate(cnt_ref=ifelse(CNT_TYPE=="Has_at_least_1_SCr",lag(cnt_ref,n=1L),cnt_ref)) %>%
mutate(ENC_PROP=round(ENC_CNT/cnt_ref,4)) %>%
mutate(label_val=paste0("(",ENC_CNT,",",ENC_PROP*100,"%)")) %>%
mutate(label=paste(label_txt,"\n",label_val)) %>%
mutate(node_id=c(2,5,7,9,10,12,13,17,18,22,23,25,24,26,28))
#prepare canvas
par(mfrow=c(1,1))
par(mar=c(0,0,0,0))
openplotmat()
##number of elements per row
elpos<-coordinates(rep(3,10))
fromto<-matrix(ncol=2,byrow=T,
c(2,5,
5,8,
8,7,
8,9,
8,11,
11,10,
11,12,
11,14,
14,13,
14,17,
17,18,
17,20,
20,19,
20,21,
19,22,
20,23,
21,24,
22,25,
23,26,
25,28
))
##draw arrows
arrpos <- matrix(ncol = 2, nrow = nrow(fromto))
for (i in 1:nrow(fromto)){
arrpos[i, ] <- straightarrow (to = elpos[fromto[i, 2], ],
from = elpos[fromto[i, 1], ],
lwd = 1, arr.pos = 0.6, arr.length = 0.3)
}
##draw nodes
for(i in 1:nrow(tbl)){
textrect(elpos[tbl$node_id[i],],
radx=0.15,
rady=0.05,
lab=tbl$label[i],
font=4,
cex=0.7)
}
}
## print link for LOINC code search result
get_loinc_ref<-function(loinc){
#url to loinc.org
url<-paste0(paste0("https://loinc.org/",loinc))
#return the link
return(url)
}
## get drug names for RXNORM by scraping REST API
get_rxcui_nm<-function(rxcui){
#url link to REST API
rx_url<-paste0("https://rxnav.nlm.nih.gov/REST/rxcui/",rxcui,"/")
#get and parse html object
rxcui_obj <- getURL(url = rx_url)
rxcui_content<-htmlParse(rxcui_obj)
#extract name
rxcui_name<-xpathApply(rxcui_content, "//body//rxnormdata//idgroup//name", xmlValue)
rxcui_name<-unique(unlist(rxcui_name))
if(length(rxcui_name)>0) rxcui_name[1]
else rxcui
}
## get drug names for NDC by scraping REST API
get_ndc_nm<-function(ndc){
parse_nm<-function(rx_obj){
rx_content<-htmlParse(rx_obj)
rx_attr<-xpathApply(rx_content, "//tbody//td[@data-title]",xmlAttrs)
rx_name<-xpathApply(rx_content, "//tbody//td[@data-title]",xmlValue)[which(rx_attr=="Proprietary Name")]
rx_name<-unique(unlist(rx_name))
return(rx_name)
}
#ndc is at least 11-digits
ndc<-case_when(
nchar(ndc) <= 11 ~ str_pad(ndc,side="left",11,pad="0"),
TRUE ~ ndc
)
#url link to REST API
rx_url<-paste0("https://ndclist.com/?s=",ndc)
#get and parse html object
rx_obj<-getURL(url = rx_url)
if(!length(rx_obj) %in% c("",NULL)) parse_nm(rx_obj)[1]
else ndc
}
#ref: https://www.r-bloggers.com/web-scraping-google-urls/
google_code<-function(code,nlink=1){
code_type<-ifelse(gsub(":.*","",code)=="CH","CPT",
gsub(":.*","",code))
code<-gsub(".*:","",code)
#search on google
gu<-paste0("https://www.google.com/search?q=",code_type,":",code)
html<-getURL(gu)
#parse HTML into tree structure
doc<-htmlParse(html)
#extract url nodes using XPath. Originally I had used "//a[@href][@class='l']" until the google code change.
attrs<-xpathApply(doc, "//h3//a[@href]", xmlAttrs)
#extract urls
links<-sapply(attrs, function(x) x[[1]])
#only keep the secure links
links<-links[grepl("(https\\:)+",links)]
links<-gsub("(\\&sa=U).*$","",links)
links<-paste0("https://",gsub(".*(https://)","",links))
#free doc from memory
free(doc)
return(links[1])
}
## render report
render_report<-function(which_report="./report/AKI_CDM_EXT_VALID_p1_QA.Rmd",
DBMS_type,driver_type,
start_date,end_date=as.character(Sys.Date())){
# to avoid <Error in unlockBinding("params", <environment>) : no binding for "params">
# a hack to trick r thinking it's in interactive environment --not work!
# unlockBinding('interactive',as.environment('package:base'))
# assign('interactive',function() TRUE,envir=as.environment('package:base'))
rmarkdown::render(input=which_report,
params=list(DBMS_type=DBMS_type,
driver_type=driver_type,
start_date=start_date,
end_date=end_date),
output_dir="./output/",
knit_root_dir="../")
}
#### survival-like data format transformation ####
format_data<-function(dat,type=c("demo","vital","lab","dx","px","med"),pred_end){
if(type=="demo"){
#demo has to be unqiue for each encounter
dat_out<-dat %>%
filter(key %in% c("AGE","SEX","RACE","HISPANIC")) %>%
group_by(ENCOUNTERID,key) %>%
top_n(n=1L,wt=value) %>% #randomly pick one if multiple entries exist
ungroup %>%
mutate(cat=value,dsa=-1,key_cp=key,
value2=ifelse(key=="AGE",value,"1")) %>%
unite("key2",c("key_cp","cat"),sep="_") %>%
mutate(key=ifelse(key=="AGE",key,key2),
value=as.numeric(value2)) %>%
dplyr::select(ENCOUNTERID,key,value,dsa)
}else if(type=="vital"){
dat_out<-c()
#multiple smoking status is resolved by using the most recent record
dat_out %<>%
bind_rows(dat %>% dplyr::select(-PATID) %>%
filter(key %in% c("SMOKING","TOBACCO","TOBACCO_TYPE")) %>%
group_by(ENCOUNTERID,key) %>%
arrange(value) %>% dplyr::slice(1:1) %>%
ungroup %>%
mutate(cat=value,dsa=-1,key_cp=key,value=1) %>%
unite("key",c("key_cp","cat"),sep="_") %>%
dplyr::select(ENCOUNTERID,key,value,dsa))
#multiple ht,wt,bmi resolved by taking median
dat_out %<>%
bind_rows(dat %>% dplyr::select(-PATID) %>%
filter(key %in% c("HT","WT","BMI")) %>%
mutate(value=as.numeric(value)) %>%
group_by(ENCOUNTERID,key) %>%
mutate(value=ifelse((key=="HT" & (value>95 | value<=0))|
(key=="WT" & (value>1400 | value<=0))|
(key=="BMI" & (value>70 | value<=0)),NA,value)) %>%
dplyr::summarize(value=median(value,na.rm=T),.groups="drop") %>%
mutate(dsa=-1))
#multiple bp are aggregated by taking: lowest & slope
bp<-dat %>% dplyr::select(-PATID) %>%
filter(key %in% c("BP_DIASTOLIC","BP_SYSTOLIC")) %>%
mutate(value=as.numeric(value)) %>%
mutate(value=ifelse((key=="BP_DIASTOLIC" & (value>120 | value<40))|
(key=="BP_SYSTOLIC" & (value>210 | value<40)),NA,value)) %>%
group_by(ENCOUNTERID,key,dsa) %>%
dplyr::mutate(value_imp=median(value,na.rm=T)) %>%
ungroup
bp %<>%
filter(!is.na(value_imp)) %>%
mutate(imp_ind=ifelse(is.na(value),1,0)) %>%
mutate(value=ifelse(is.na(value),value_imp,value)) %>%
dplyr::select(-value_imp)
bp %<>% dplyr::select(-imp_ind)
#--minimal bp
bp_min<-bp %>%
group_by(ENCOUNTERID,key,dsa) %>%
dplyr::summarize(value_lowest=min(value,na.rm=T),.groups="drop") %>%
mutate(key=paste0(key,"_min")) %>%
dplyr::rename(value=value_lowest)
#--trend of bp
bp_slp_eligb<-bp %>%
mutate(add_time=difftime(strptime(timestamp,"%Y-%m-%d %H:%M:%S"),strptime(timestamp,"%Y-%m-%d"),units="mins")) %>%
mutate(timestamp=round(as.numeric(add_time)/60,2)) %>% #coefficient represents change per hour
dplyr::select(-add_time) %>%
group_by(ENCOUNTERID,key,dsa) %>%
dplyr::mutate(df=length(unique(timestamp))-1) %>%
dplyr::mutate(sd=ifelse(df>0,sd(value),0))
bp_slp<-bp_slp_eligb %>%
filter(df > 1 & sd >= 1e-2) %>%
nest(data=c(timestamp,value,df,sd)) %>%
mutate(
fit_val=map(data, ~ lm(value ~ timestamp, data=.x)),
tidied=map(fit_val,tidy)
) %>%
unnest(tidied)
bp_slp %<>%
filter(term=="timestamp") %>%
dplyr::rename(value=estimate) %>%
mutate(value=ifelse(p.value>0.5 | is.nan(p.value),0,value)) %>%
dplyr::select(ENCOUNTERID,key,dsa,value) %>%
bind_rows(bp_slp_eligb %>%
filter(df<=1 | sd < 1e-2) %>% mutate(value=0) %>%
dplyr::select(ENCOUNTERID,key,value,dsa) %>%
ungroup %>% unique) %>%
bind_rows(bind_rows(bp_slp_eligb %>%
filter(df==1 & sd >= 1e-2) %>%
mutate(value=round((max(value)-min(value))/(max(timestamp)-min(timestamp)),2)) %>%
dplyr::select(ENCOUNTERID,key,value,dsa) %>%
ungroup %>% unique)) %>%
mutate(key=paste0(key,"_slope"))
#--stack bp
bp<-bp_min %>%
dplyr::select(ENCOUNTERID,key,value,dsa) %>%
bind_rows(bp_slp %>%
dplyr::select(ENCOUNTERID,key,value,dsa))
#all vitals
dat_out %<>%
mutate(dsa=-1) %>% bind_rows(bp)
#clean out some memories
rm(bp,bp_min,bp_slp_eligb,bp_slp_obj,bp_slp)
gc()
}else if(type=="lab"){
#multiple same lab on the same day will be resolved by taking the average
dat_out<-dat %>%
filter(key != "NI") %>%
mutate(key_cp=key,unit_cp=unit) %>%
unite("key_unit",c("key_cp","unit_cp"),sep="@") %>%
group_by(ENCOUNTERID,key,unit,key_unit,dsa) %>%
dplyr::summarize(value=mean(value,na.rm=T),.groups="drop")
#calculated new features: BUN/SCr ratio (same-day)
bun_scr_ratio<-dat_out %>%
mutate(key_agg=case_when(key %in% c('2160-0','38483-4','14682-9','21232-4','35203-9','44784-7','59826-8',
'16188-5','16189-3','59826-8','35591-7','50380-5','50381-3','35592-5',
'44784-7','11041-1','51620-3','72271-0','11042-9','51619-5','35203-9','14682-9') ~ "SCR",
key %in% c('12966-8','12965-0','6299-2','59570-2','12964-3','49071-4','72270-2',
'11065-0','3094-0','35234-4','14937-7') ~ "BUN",
key %in% c('3097-3','44734-2') ~ "BUN_SCR")) %>% #not populated
filter((toupper(unit) %in% c("MG/DL","MG/MG")) &
(key_agg %in% c("SCR","BUN","BUN_SCR"))) %>%
group_by(ENCOUNTERID,key_agg,dsa) %>%
dplyr::summarize(value=mean(value,na.rm=T),.groups="drop") %>%
spread(key_agg,value) %>%
filter(!is.na(SCR)&!is.na(BUN)) %>%
mutate(BUN_SCR = round(BUN/SCR,2)) %>%
mutate(key="BUN_SCR") %>%
dplyr::rename(value=BUN_SCR) %>%
dplyr::select(ENCOUNTERID,key,value,dsa)
dat_out %<>% bind_rows(bun_scr_ratio)
#engineer new features: change of lab from last collection
lab_delta_eligb<-dat_out %>%
group_by(ENCOUNTERID,key) %>%
dplyr::mutate(lab_cnt=sum(dsa<=pred_end)) %>%
ungroup %>%
group_by(key) %>%
dplyr::summarize(p5=quantile(lab_cnt,probs=0.05,na.rm=T),
p25=quantile(lab_cnt,probs=0.25,na.rm=T),
med=median(lab_cnt,na.rm=T),
p75=quantile(lab_cnt,probs=0.75,na.rm=T),
p95=quantile(lab_cnt,probs=0.95,na.rm=T),
.groups="drop")
#--collect changes of lab only for those are regularly repeated (floor(pred_end/2))
freq_lab<-lab_delta_eligb %>% filter(med>=(floor(pred_end/2)))
if(nrow(freq_lab)>0){
lab_delta<-dat_out %>%
semi_join(freq_lab,by="key")
dsa_rg<-seq(0,pred_end)
lab_delta %<>%
group_by(ENCOUNTERID,key) %>%
dplyr::mutate(dsa_max=max(dsa)) %>%
filter(dsa<=dsa_max) %>%
arrange(dsa) %>%
dplyr::mutate(value_lag=lag(value,n=1L,default=NA)) %>%
ungroup %>%
filter(!is.na(value_lag)) %>%
mutate(value=value-value_lag,
key=paste0(key,"_change")) %>%
dplyr::select(ENCOUNTERID,key,value,dsa) %>%
unique
dat_out %<>% bind_rows(lab_delta)
}
}else if(type == "dx"){
#multiple records resolved as "present (1) or absent (0)"
dat_out<-dat %>% dplyr::select(-PATID) %>%
group_by(ENCOUNTERID,key,dsa) %>%
dplyr::summarize(value=(n() >= 1)*1,.groups="drop") %>%
group_by(ENCOUNTERID,key) %>%
top_n(n=1L,wt=dsa) %>%
ungroup %>%
mutate(key=as.character(key)) %>%
dplyr::select(ENCOUNTERID,key,value,dsa)
}else if(type == "px"){
#multiple records resolved as "present (1) or absent (0)"
dat_out<-dat %>% dplyr::select(-PATID) %>%
group_by(ENCOUNTERID,key,dsa) %>%
dplyr::summarize(value=(n() >= 1)*1,.groups="drop") %>%
dplyr::select(ENCOUNTERID,key,value,dsa)
}else if(type=="med"){
#multiple records accumulated
dat_out<-dat %>%
group_by(ENCOUNTERID,key) %>%
arrange(dsa) %>%
dplyr::mutate(value=cumsum(value)) %>%
ungroup %>%
mutate(key=paste0(key,"_cum")) %>%
dplyr::select(ENCOUNTERID,key,value,dsa) %>%
bind_rows(dat %>%
dplyr::select(ENCOUNTERID,key,value,dsa) %>%
unique)
}
return(dat_out)
}
#tw should be the same time unit as dsa
get_dsurv_temporal<-function(dat,censor,tw,pred_in_d=1,carry_over=T){
y_surv<-c()
X_surv<-c()
for(t in tw){
#stack y
censor_t<-censor %>%
mutate(pred_pt=case_when(dsa_y >= t ~ t,
dsa_y < t ~ NA_real_),
y_ep=case_when(dsa_y == t ~ y,
dsa_y > t ~ pmax(0,y-1),
dsa_y < t ~ NA_real_)) %>%
filter(!is.na(pred_pt)) %>%
group_by(ENCOUNTERID) %>%
arrange(desc(pred_pt),desc(y_ep)) %>%
dplyr::slice(1:1) %>%
ungroup %>%
mutate(dsa_y=pred_pt,y=y_ep) %>%
dplyr::select(-pred_pt,-y_ep)
y_surv %<>%
bind_rows(censor_t %>%
dplyr::select(ENCOUNTERID,dsa_y,y))
#stack x
if(carry_over){
X_surv %<>%
bind_rows(dat %>% left_join(censor_t,by="ENCOUNTERID") %>%
filter(dsa < dsa_y-(pred_in_d-1)) %>% # prediction point is at least "pred_in_d" days before endpoint
group_by(ENCOUNTERID,key) %>%
top_n(n=1,wt=dsa) %>% # take latest value (carry over)
ungroup %>%
dplyr::select(ENCOUNTERID,dsa_y,dsa,key,value) %>%
bind_rows(censor_t %>%
mutate(dsa=dsa_y-1,
key=paste0("day",(dsa_y-1)),
value=1) %>%
dplyr::select(ENCOUNTERID,dsa_y,dsa,key,value)))
}else{
X_surv %<>%
bind_rows(dat %>% left_join(censor_t,by="ENCOUNTERID") %>%
filter(dsa < dsa_y-(pred_in_d-1)) %>% # prediction point is at least "pred_in_d" days before endpoint
dplyr::select(ENCOUNTERID,dsa_y,dsa,key,value) %>%
bind_rows(censor_t %>%
mutate(dsa=dsa_y-1,
key=paste0("day",(dsa_y-1)),
value=1) %>%
dplyr::select(ENCOUNTERID,dsa_y,dsa,key,value)))
}
}
Xy_surv<-list(X = X_surv,
y = y_surv)
return(Xy_surv)
}
#identify initial stage of and collect most recent values
get_most_recent<-function(dat,censor,pred_in_d=1,obs_tw=0){
y_mr<-censor %>%
group_by(ENCOUNTERID) %>%
arrange(desc(dsa_y),desc(y)) %>%
dplyr::slice(1:1) %>%
ungroup %>%
select(ENCOUNTERID,dsa_y,y)
force_censor<-y_mr %>%
filter(y==1) %>% summarise(censor_end=median(dsa_y)) %>% unlist
X_mr<-dat %>% left_join(y_mr,by="ENCOUNTERID") %>%
filter((dsa < dsa_y-(pred_in_d-1-obs_tw))| # y==1, prediction point is at least "pred_in_d" days before endpoint
(y==0& dsa <= force_censor) # y==0, censored at median days of AKI occurrence in AKI patients
) %>%
group_by(ENCOUNTERID,key) %>%
top_n(n=1,wt=dsa) %>% # take latest value (carry over)
ungroup %>%
dplyr::select(ENCOUNTERID,dsa_y,dsa,key,value) %>%
bind_rows(y_mr %>%
mutate(dsa=dsa_y-1,
key=paste0("day",(dsa_y-1)),
value=1) %>%
dplyr::select(ENCOUNTERID,dsa_y,dsa,key,value))
Xy_mr<-list(X=X_mr,
y=y_mr)
return(Xy_mr)
}
## convert long matrix to wide sparse matrix
long_to_sparse_matrix<-function(df,id,variable,val,binary=FALSE){
if(binary){
x_sparse<-with(df,
sparseMatrix(i=as.numeric(as.factor(get(id))),
j=as.numeric(as.factor(get(variable))),
x=1,
dimnames=list(levels(as.factor(get(id))),
levels(as.factor(get(variable))))))
}else{
x_sparse<-with(df,
sparseMatrix(i=as.numeric(as.factor(get(id))),
j=as.numeric(as.factor(get(variable))),
x=ifelse(is.na(get(val)),1,as.numeric(get(val))),
dimnames=list(levels(as.factor(get(id))),
levels(as.factor(get(variable))))))
}
return(x_sparse)
}
get_perf_summ<-function(pred,real,keep_all_cutoffs=F){
# various performace table
pred_obj<-ROCR::prediction(pred,real)
prc<-performance(pred_obj,"prec","rec")
roc<-performance(pred_obj,"sens","spec")
nppv<-performance(pred_obj,"ppv","npv")
pcfall<-performance(pred_obj,"pcfall")
acc<-performance(pred_obj,"acc")
fscore<-performance(pred_obj,"f")
mcc<-performance(pred_obj,"phi")
perf_at<-data.frame(cutoff=prc@alpha.values[[1]],
prec=prc@y.values[[1]],
rec_sens=prc@x.values[[1]],
stringsAsFactors = F) %>%
arrange(cutoff) %>%
left_join(data.frame(cutoff=nppv@alpha.values[[1]],
ppv=nppv@y.values[[1]],
npv=nppv@x.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
dplyr::mutate(prec_rec_dist=abs(prec-rec_sens)) %>%
left_join(data.frame(cutoff=fscore@x.values[[1]],
fscore=fscore@y.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
left_join(data.frame(cutoff=roc@alpha.values[[1]],
spec=roc@x.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
dplyr::mutate(Euclid_meas=sqrt((1-rec_sens)^2+(0-(1-spec))^2),
Youden_meas=rec_sens+spec-1) %>%
left_join(data.frame(cutoff=pcfall@x.values[[1]],
pcfall=pcfall@y.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
left_join(data.frame(cutoff=acc@x.values[[1]],
acc=acc@y.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
left_join(data.frame(cutoff=mcc@x.values[[1]],
mcc=mcc@y.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
filter(prec > 0 & rec_sens > 0 & spec > 0) %>%
group_by(cutoff) %>%
dplyr::mutate(size=n()) %>%
ungroup
# performance summary
lab1<-pred[real==1]
lab0<-pred[real==0]
pr<-pr.curve(scores.class0 = lab1,
scores.class1 = lab0,curve=F)
roc_ci<-pROC::ci.auc(real,pred)
perf_summ<-data.frame(overall_meas=c("roauc_low",
"roauc",
"roauc_up",
"opt_thresh",
"opt_sens",
"opt_spec",
"opt_ppv",
"opt_npv",
"prauc1",
"prauc2",
"opt_prec",
"opt_rec",
"opt_fscore"),
meas_val=c(roc_ci[[1]],
roc_ci[[2]],
roc_ci[[3]],
perf_at$cutoff[which.min(perf_at$Euclid_meas)],
perf_at$rec_sens[which.min(perf_at$Euclid_meas)],
perf_at$spec[which.min(perf_at$Euclid_meas)],
perf_at$ppv[which.min(perf_at$Euclid_meas)],
perf_at$npv[which.min(perf_at$Euclid_meas)],
pr$auc.integral,
pr$auc.davis.goadrich,
perf_at$prec[which.min(perf_at$prec_rec_dist)],
perf_at$rec_sens[which.min(perf_at$prec_rec_dist)],
perf_at$fscore[which.min(perf_at$prec_rec_dist)]),
stringsAsFactors = F) %>%
bind_rows(perf_at %>%
dplyr::summarize(prec_m=mean(prec,na.rm=T),
sens_m=mean(rec_sens,na.rm=T),
spec_m=mean(spec,na.rm=T),
ppv_m=mean(ppv,na.rm=T),
npv_m=mean(npv,na.rm=T),
acc_m=mean(acc,na.rm=T),
fscore_m=mean(fscore,na.rm=T),
mcc_m=mean(mcc,na.rm=T),
.groups="drop") %>%
gather(overall_meas,meas_val))
out<-list(perf_summ=perf_summ)
if(keep_all_cutoffs){
out$perf_at<-perf_at
}
return(out)
}
get_calibr<-function(pred,real,n_bin=20){
calib<-data.frame(pred=pred,
y=real) %>%
arrange(pred) %>%
dplyr::mutate(pred_bin = cut(pred,
breaks=unique(quantile(pred,0:(n_bin)/(n_bin))),
include.lowest=T,
labels=F)) %>%
ungroup %>% group_by(pred_bin) %>%
dplyr::summarize(expos=n(),
bin_lower=min(pred),
bin_upper=max(pred),
bin_mid=median(pred),
y_agg = sum(y),
pred_p = mean(pred),
.groups="drop") %>%
dplyr::mutate(y_p=y_agg/expos) %>%
dplyr::mutate(binCI_lower = pmax(0,pred_p-1.96*sqrt(y_p*(1-y_p)/expos)),
binCI_upper = pred_p+1.96*sqrt(y_p*(1-y_p)/expos))
return(calib)
}
get_recalibr<-function(pred,real,p=0.5,n_bin=20){
re_calib_in<-get_calibr(pred=pred,real=real) %>%
mutate(grp_idx=sample(1:2,n(),prob=c(p,1-p),replace = TRUE)) %>%
select(y_agg,pred_p,pred_bin, expos, grp_idx) %>%
gather(overall_mear,meas_med,-pred_bin,-expos,-grp_idx) %>%
re_calib_in1<-re_calib_in %>% filter(grp_idx==1)
re_calib_in2<-re_calib_in %>% filter(grp_idx==2)
re_calib_in_lm<- re_calib_in %>% filter(grp_idx==1) %>%
group_by(pred_bin) %>%
dplyr::summarize(expos=round(mean(expos)),
meas_med=median(meas_med),
.groups="drop") %>%
spread(overall_meas,meas_med) %>%
group_by(pred_in_d,pred_task,fs_type,grp) %>%
do(recalib=lm(y_p ~ pred_p,data=.))
re_calib_in_coef<-tidy(re_calib_in_lm,recalib) %>%
select(term,estimate) %>%
mutate(term=recode(term,
"(Intercept)"="a",
"pred_p"="b")) %>%
spread(term,estimate) %>%
ungroup
int_calib<-re_calib_in %>% filter(grp_idx==2) %>%
group_by(pred_bin) %>%
dplyr::summarize(expos=round(mean(expos)),
meas_med=median(meas_med),
.groups="drop") %>%
spread(overall_meas,meas_med) %>% mutate(k=1)
left_join(re_calib_in_coef %>% mutate(k=1),by="k") %>%
mutate(pred_p_adj=pred_p*b+a) %>% # simple re-calibration
dplyr::rename("real_pos"="y_agg") %>%
mutate(real_neg=expos-real_pos,
pred_pos=round(expos*pred_p),
pred_pos_adj=round(expos*pred_p_adj),
pred_neg=expos-pred_pos,
pred_neg_adj=expos-pred_pos_adj) %>%
filter(pred_pos>0&pred_neg>0&pred_pos_adj>0&pred_neg_adj>0) %>%
mutate(pos_term=((real_pos-pred_pos)^2)/pred_pos,
neg_term=((real_neg-pred_neg)^2)/pred_neg,
pos_adj_term=((real_pos-pred_pos_adj)^2)/pred_pos_adj,
neg_adj_term=((real_neg-pred_neg_adj)^2)/pred_neg_adj)
int_calib_pvt<-int_calib %>%
select(pred_bin,pred_p,pred_p_adj,expos) %>%
left_join(int_calib %>% select(pred_bin,real_pos),
by="pred_bin") %>%
unique %>% mutate(y_p=real_pos/expos)
int_calib_hl<-int_calib %>%
dplyr::summarize(chi_sq=sum(pos_term)+sum(neg_term),
chi_sq_adj=sum(pos_adj_term)+sum(neg_adj_term),
df=max(3,length(unique(pred_bin)))-2,
.groups="drop") %>%
mutate(p_val=pchisq(chi_sq,df=df,lower.tail = F),
p_val_adj=pchisq(chi_sq_adj,df=df,lower.tail = F))
return(list(int_calib_pvt=int_calib_pvt,
int_calib_hl=int_calib_hl))
}
bin_fd<-function(x){
n<-length(x)
k<-length(unique(x))
x<-x[order(x)]
#remove outliers (middle 95%)
lb<-quantile(x,probs=0.025,na.rm=T)
ub<-quantile(x,probs=0.975,na.rm=T)
x<-x[x>=lb&x<=ub]
#https://www.answerminer.com/blog/binning-guide-ideal-histogram
if(IQR(x,na.rm=T)!=0){
n_bin<-(max(x,na.rm=T)-min(x,na.rm=T))/(2*IQR(x,na.rm=T)/(k^(1/3)))
}else{
n_bin<-(max(x,na.rm=T)-min(x,na.rm=T))/(3.5*sd(x,na.rm=T)/(k^(1/3)))
}
brk<-levels(cut(x,n_bin,include.lowest = T,right=F))
lb<-c(-Inf,as.numeric(gsub("(,).*","",gsub("\\[","",brk))))
ub<-c(lb[-1],Inf)
return(data.frame(bin=seq_len(length(lb)),brk_lb=lb,brk_ub=ub))
}
get_ks<-function(x,y,unbiased=T,rs=0.6){
x_rs<-sample(x,round(length(x)*rs))
y_rs<-sample(y,round(length(y)*rs))
#broadcast x_rs
# x_mt<-t(replicate(length(y),x_rs)) #slow
x_mt<-x_rs %*% t(rep(1,length(y_rs)))
#get pair-wise gaussian kernel matrix
gauss_k<-exp(-(x_mt-y_rs)^2/2)
#nullify the diagnoal
gauss_k[row(gauss_k) == col(gauss_k)] <- NA
#take the average
if(unbiased==T){
xyk<-sum(gauss_k,na.rm=T)/(length(x_rs)*(length(y_rs)-1))
}else{
xyk<-sum(gauss_k,na.rm=T)/(length(x_rs)*length(y_rs))
}
return(xyk)
}
penalize_sample<-function(x,n,alpha=0.99){
#kernel density estimation
gk<-density(x)
#get cumulative distribution
fk<-scales::rescale(cumsum(gk$y),c(0,1))
#unlikely value range
bias_pool<-c(gk$x[c(which(fk>=alpha),(fk<=(1-alpha)))])
#generate noises
bias_rs<-sample(bias_pool,n,replace=T)
return(bias_rs)
}
|
/R/util.R
|
permissive
|
kumc-bmi/AKI_CDM
|
R
| false
| false
| 47,384
|
r
|
##---------------------------helper functions--------------------------------------##
## install (if needed) and require packages
require_libraries<-function(package_list,verb=T){
for (lib in package_list) {
chk_install<-!(lib %in% installed.packages()[,"Package"])
if(chk_install){
install.packages(lib)
}
library(lib, character.only=TRUE,lib.loc=.libPaths())
if(verb){
cat("\n", lib, " loaded.", sep="")
}
}
}
connect_to_db<-function(DBMS_type,driver_type=c("OCI","JDBC"),config_file){
if(is.null(driver_type)){
stop("must specify type of database connection driver!")
}
if(DBMS_type=="Oracle"){
if(driver_type=="OCI"){
require_libraries("ROracle")
conn<-dbConnect(ROracle::Oracle(),
config_file$username,
config_file$password,
file.path(config_file$access,config_file$sid))
}else if(driver_type=="JDBC"){
require_libraries("RJDBC")
# make sure ojdbc6.jar is in the AKI_CDM folder
# Source: https://www.r-bloggers.com/connecting-r-to-an-oracle-database-with-rjdbc/
drv<-JDBC(driverClass="oracle.jdbc.OracleDriver",
classPath="./inst/ojdbc6.jar")
url <- paste0("jdbc:oracle:thin:@", config_file$access,":",config_file$sid)
conn <- RJDBC::dbConnect(drv, url,
config_file$username,
config_file$password)
}else{
stop("The driver type is not currently supported!")
}
}else if(DBMS_type=="tSQL"){
require_libraries("RJDBC")
# make sure sqljdbc.jar is in the AKI_CDM folder
drv <- JDBC(driverClass="com.microsoft.sqlserver.jdbc.SQLServerDriver",
classPath="./inst/sqljdbc.jar",
identifier.quote="`")
url <- paste0("jdbc:sqlserver:", config_file$access,
";DatabaseName=",config_file$cdm_db_name,
";username=",config_file$username,
";password=",config_file$password)
conn <- dbConnect(drv, url)
}else if(DBMS_type=="PostgreSQL"){
#not tested yet!
require_libraries("RPostgres")
server<-gsub("/","",str_extract(config_file$access,"//.*(/)"))
host<-gsub(":.*","",server)
port<-gsub(".*:","",server)
conn<-dbConnect(RPostgres::Postgres(),
host=host,
port=port,
dbname=config_file$cdm_db_name,
user=config_file$username,
password=config_file$password)
}else{
stop("the DBMS type is not currectly supported!")
}
attr(conn,"DBMS_type")<-DBMS_type
attr(conn,"driver_type")<-driver_type
return(conn)
}
## parse Oracle sql lines
parse_sql<-function(file_path,...){
param_val<-list(...)
#read file
con<-file(file_path,"r")
#initialize string
sql_string <- ""
#intialize result holder
params_ind<-FALSE
tbl_out<-NULL
action<-NULL
while (TRUE){
#parse the first line
line <- readLines(con, n = 1)
#check for endings
if (length(line)==0) break
#collect overhead info
if(grepl("^(/\\*out)",line)){
#output table name
tbl_out<-trimws(gsub("(/\\*out\\:\\s)","",line),"both")
}else if(grepl("^(/\\*action)",line)){
#"write" or "query"(fetch) the output table
action<-trimws(gsub("(/\\*action\\:\\s)","",line),"both")
}else if(grepl("^(/\\*params)",line)){
params_ind<-TRUE
#breakdown global parameters
params<-gsub(",","",strsplit(trimws(gsub("(/\\*params\\:\\s)","",line),"both")," ")[[1]])
params_symbol<-params
#normalize the parameter names
params<-gsub("&&","",params)
}
#remove the first line
line<-gsub("\\t", " ", line)
#translate comment symbol '--'
if(grepl("--",line) == TRUE){
line <- paste(sub("--","/*",line),"*/")
}
#attach new line
if(!grepl("^(/\\*)",line)){
sql_string <- paste(sql_string, line)
}
}
close(con)
#update parameters as needed
if(params_ind){
#align param_val with params
params_miss<-params[!(params %in% names(param_val))]
for(j in seq_along(params_miss)){
param_val[params_miss[j]]<-list(NULL)
}
param_val<-param_val[which(names(param_val) %in% params)]
param_val<-param_val[order(names(param_val))]
params_symbol<-params_symbol[order(params)]
params<-params[order(params)]
#substitube params_symbol by param_val
for(i in seq_along(params)){
sql_string<-gsub(params_symbol[i],
ifelse(is.null(param_val[[i]])," ",
ifelse(params[i]=="cdm_db_link",
paste0("@",param_val[[i]]),
ifelse(params[i] %in% c("start_date","end_date"),
paste0("'",param_val[[i]],"'"),
param_val[[i]]))),
sql_string)
}
}
#clean up excessive "[ ]." or "[@" in tSQL when substitute value is NULL
sql_string<-gsub("\\[\\ ]\\.","",sql_string)
sql_string<-gsub("\\[@","[",sql_string)
out<-list(tbl_out=tbl_out,
action=action,
statement=sql_string)
return(out)
}
## execute single sql snippet
execute_single_sql<-function(conn,statement,write,table_name){
DBMS_type<-attr(conn,"DBMS_type")
driver_type<-attr(conn,"driver_type")
if(write){
#oracle and sql sever uses different connection driver and different functions are expected for sending queries
#dbSendQuery silently returns an S4 object after execution, which causes error in RJDBC connection (for sql server)
if(DBMS_type=="Oracle"){
if(!(driver_type %in% c("OCI","JDBC"))){
stop("Driver type not supported for ",DBMS_type,"!\n")
}else{
#drop existing tables, if applicable
chk_exist<-dbGetQuery(conn,paste0("select tname from tab where tname ='",table_name,"'"))
if(length(chk_exist$TNAME)>0){
if(driver_type=="OCI"){
dbSendQuery(conn,paste("drop table",table_name))
}else{
dbSendUpdate(conn,paste("drop table",table_name))
}
}
if(driver_type=="OCI"){
dbSendQuery(conn,statement)
}else{
dbSendUpdate(conn,statement)
}
}
}else if(DBMS_type=="tSQL"){
if(driver_type=="JDBC"){
#drop existing tables, if applicable
# dbSendUpdate(conn,paste0("IF EXISTS (select * from dbo.sysobjects
# where id = object_id(N'dbo.",table_name,"') and
# objectproperty(id, N'IsTable') = 1)",
# "BEGIN ",paste("drop table",table_name)," END ",
# "GO"))
#write new tables
dbSendUpdate(conn,statement)
}else{
stop("Driver type not supported for ",DBMS_type,"!\n")
}
}else{
stop("DBMS type not supported!")
}
}else{
dat<-dbGetQuery(conn,statement)
return(dat)
}
cat("create temporary table: ", table_name, ".\n")
}
## execute multiple sql snippets
#---statements have to be in correct logical order
execute_batch_sql<-function(conn,statements,verb,...){
for(i in seq_along(statements)){
sql<-parse_sql(file_path=statements[i],...)
execute_single_sql(conn,
statement=sql$statement,
write=(sql$action=="write"),
table_name=toupper(sql$tbl_out))
if(verb){
cat(statements[i],"has been executed and table",
toupper(sql$tbl_out),"was created.\n")
}
}
}
## clean up intermediate tables
drop_tbl<-function(conn,table_name){
DBMS_type<-attr(conn,"DBMS_type")
driver_type<-attr(conn,"driver_type")
if(DBMS_type=="Oracle"){
# purge is only required in Oracle for completely destroying temporary tables
drop_temp<-paste("drop table",table_name,"purge")
if(driver_type=="OCI"){
dbSendQuery(conn,drop_temp)
}else if(driver_type=="JDBC"){
dbSendUpdate(conn,drop_temp)
}else{
stop("Driver type not supported for ",DBMS_type,"!.\n")
}
}else if(DBMS_type=="tSQL"){
drop_temp<-paste("drop table",table_name)
if(driver_type=="JDBC"){
dbSendUpdate(conn,drop_temp)
}else{
stop("Driver type not supported for ",DBMS_type,"!.\n")
}
}else{
warning("DBMS type not supported!")
}
}
extract_cohort<-function(conn,
cdm_db_name,
cdm_db_schema,
start_date="2010-01-01",
end_date="2018-12-31",
verb=T){
#check if DBMS type is currently supported
if(!(attr(conn,"DBMS_type") %in% c("Oracle","tSQL","PostgreSQL"))){
stop("DBMS_type=",attr(conn,"DBMS_type"),"is not currently supported \n(should be one of 'Oracle','tSQL','PostgreSQL', case-sensitive)")
}
#execute(write) the following sql snippets according to the specified order
statements<-paste0(
paste0("./src/",attr(conn,"DBMS_type")),
c("/cohort_initial.sql",
"/cohort_all_SCr.sql",
"/cohort_enc_SCr.sql",
"/cohort_baseline_SCr.sql",
"/cohort_exclude.sql",
"/cohort_eligib.sql",
"/cohort_AKI_staging.sql",
"/cohort_final.sql")
)
execute_batch_sql(conn,statements,verb,
cdm_db_name=cdm_db_name,
cdm_db_schema=cdm_db_schema,
start_date=start_date,
end_date=end_date)
#collect attrition info
sql<-parse_sql(paste0("./src/",attr(conn,"DBMS_type"),"/consort_diagram.sql"))
attrition<-execute_single_sql(conn,
statement=sql$statement,
write=(sql$action=="write"),
table_name=toupper(sql$tbl_out))
#read Table1
tbl1<-parse_sql(statements[length(statements)])$tbl_out
aki_enc<-dbGetQuery(conn,paste("select * from",tbl1))
#clean out intermediate tables
for(i in 1:(length(statements)-1)){
parse_out<-parse_sql(statements[i])
if(parse_out$action=="write"){
drop_tbl(conn,toupper(parse_out$tbl_out))
}else{
warning("no temporary table was created by this statment!")
}
if(verb){
cat("temp table",toupper(parse_out$tbl_out),"dropped. \n")
}
}
#output
out<-list(aki_enc=aki_enc,
attrition=attrition)
return(out)
}
#----collect facts from i2b2 observation_fact table----
#note: there should be a reference patient table ("pat_num") on oracle server with the key column "key_col"
collect_i2b2_obs<-function(conn,
code_vec=c(),
regexp_str="",
col_out=c("patient_num",
"encounter_num",
"concept_cd",
"units_cd",
"nval_num",
"tval_char",
"modifier_cd",
"start_date",
"end_date"),
key_col=c("patient_num"),
schema=c("blueherondata"),
pat_num){
col_out<-col_out[!col_out %in% key_col]
match_key<-c()
for(i in seq_along(key_col)){
match_key<-c(match_key,paste(paste0(c("p.","f."),key_col[i]),collapse = "="))
}
if(length(key_col)>1){
match_key<-paste(match_key,collapse = " and ")
}
i2b2_obs_lst<-list()
for(i in seq_along(schema)){
sql<-paste0("select distinct ",
paste0(paste(paste0("p.",key_col,collapse = ","),",",
paste(paste0("f.",col_out,collapse = ",")),
paste0(" from ", pat_num, " p"),
paste0(" join ", schema, ".observation_fact f"),
" on ",match_key)))
if(length(code_vec)>0&nchar(regexp_str)>0){
sql<-paste0(sql," and ",
"(f.concept_cd in ",paste0("('",paste(code_vec,collapse="','"),"')")," or ",
"regexp_like(f.concept_cd,'",regexp_str,"','i'))")
}else if(length(code_vec)==0&nchar(regexp_str)>0){
sql<-paste0(sql," and ",
"regexp_like(f.concept_cd,'",regexp_str,"','i')")
}else if(length(code_vec)>0&nchar(regexp_str)==0){
sql<-paste0(sql," and ",
"f.concept_cd in ",paste0("('",paste(code_vec,collapse="','"),"')"))
}else{
stop("either code_vec or regexp_str should be specified for filtering concept_cd!")
}
i2b2_obs_lst[[schema[i]]]<-DBI::dbGetQuery(conn,sql)
}
return(i2b2_obs_lst)
}
#----collect concepts from i2b2 concept_dimension table----
#note: there should be a reference concept table ("concept") on oracle server with the key column "key_col"
collect_i2b2_cd<-function(conn,
exact_match=T,
cd_prefix=NULL,
col_out=c("concept_cd",
"name_char",
"concept_path"),
key_col=c("CONCEPT_CD"),
schema=c("blueherondata"),
concept){
col_out<-col_out[!col_out %in% key_col]
match_key<-c()
for(i in seq_along(key_col)){
if(exact_match){
match_key<-c(match_key,paste(paste0(c("cd.","f."),key_col[i]),collapse = "="))
}else{
match_key<-c(match_key,paste0("regexp_like(f.",key_col[i],",('(' || cd.",key_col[i]," || ')+'),'i')"))
}
if(!is.null(cd_prefix)){
match_key<-paste0(match_key," and regexp_like(f.",key_col[i],",'^(",cd_prefix,")+')")
}else{
}
}
if(length(key_col)>1){
match_key<-paste(match_key,collapse = " and ")
}
i2b2_obs_lst<-list()
for(i in seq_along(schema)){
sql<-paste0("select distinct ",
paste0(paste(paste0("cd.",key_col,collapse = ",")," ICD_FUZZY,",
paste(paste0("f.",col_out,collapse = ",")),
paste0(" from ", concept, " cd"),
paste0(" join ", schema, ".concept_dimension f"),
" on ",match_key)))
i2b2_obs_lst[[schema[i]]]<-DBI::dbGetQuery(conn,sql)
}
return(i2b2_obs_lst)
}
#----collect data from one of the CDM tables----
#note: there should be a reference patient table ("pat_num") on oracle server with the key column "key_col"
collect_cdm<-function(conn,
code_vec=c(),
str_vec=c(),
col_out=NULL,
key_col_schema=c("PATID"),
key_col_pat=c("PATIENT_NUM"),
schema=c("PCORNET_CDM_C7R2"),
tbl="DEMOGRAPHIC",
pat_num){
if(is.null(col_out)){
col_out<-colnames(DBI::dbGetQuery(conn,
paste0("select * from ",schema[1],".",tbl," where 1=0")))
}
col_out<-col_out[!col_out %in% key_col_schema]
match_key<-c()
for(i in seq_along(key_col_pat)){
match_key<-c(match_key,paste(c(paste0("p.",key_col_pat[i]),
paste0("f.",key_col_schema[i])),
collapse = "="))
}
if(length(key_col_pat)>1){
match_key<-paste(match_key,collapse = " and ")
}
cdm_obs_lst<-list()
for(i in seq_along(schema)){
sql<-paste0("select distinct ",
paste0(paste(paste0("p.",key_col_pat,collapse = ","),",",
paste(paste0("f.",col_out,collapse = ",")),
paste0(" from ", pat_num, " p"),
paste0(" join ", schema[i], ".",tbl," f"),
" on ",match_key)))
if(tbl=="PROCEDURES"){
#procedures are identified uniquely by (PX_TYPE || ':' || PX)
sql<-paste0(sql," and",
" (f.PX_TYPE || ':' || f.PX) in ",paste0("('",paste(code_vec,collapse="','"),"')"))
}else if(tbl=="PRESCRIBING"){
#prescribing are identified uniquely by RXNORM_CUI or RAW_RX_MED_NAME
if(length(code_vec)>0){
sql<-paste0(sql," and",
"f.RXNORM_CUI in ",paste0("('",paste(code_vec,collapse="','"),"')"))
}
if(length(str_vec)>0){
if(length(code_vec)>0){
sql<-paste0(sql, " or ",
" regexp_like(f.RAW_RX_MED_NAME,",paste0("'((",paste(str_vec,collapse = ")|("),"))+'"),",'i')")
}else{
sql<-paste0(sql, " and ",
" regexp_like(f.RAW_RX_MED_NAME,",paste0("'((",paste(str_vec,collapse = ")|("),"))+'"),",'i')")
}
}
}else if(tbl=="DISPENSING"){
#dispensing are identified by NDC codes
sql<-paste0(sql," and ",
"(f.NDC in ",paste0("('",paste(code_vec,collapse="','"),"')"))
}else if(tbl=="DIAGNOSIS"){
#diagnosis are identified by (DX_TYPE || ':' || DX)
if(length(code_vec)>0){
sql<-paste0(sql," and",
"(f.DX_TYPE || ':' || f.DX) in ",paste0("('",paste(code_vec,collapse="','"),"')"))
}
if(length(str_vec)>0){
if(length(code_vec)>0){
sql<-paste0(sql, " or ",
" regexp_like((f.DX_TYPE || ':' || f.DX),",paste0("'((",paste(str_vec,collapse = ")|("),"))+'"),",'i')")
}else{
sql<-paste0(sql," and",
" regexp_like((f.DX_TYPE || ':' || f.DX),",paste0("'((",paste(str_vec,collapse = ")|("),"))+'"),",'i')")
}
}
}else{
sql<-sql
}
cdm_obs_lst[[schema[i]]]<-DBI::dbGetQuery(conn,sql)
}
return(cdm_obs_lst)
}
consort_diag<-function(consort_tbl){
tbl<-data.frame(CNT_TYPE=c("Initial",
"Has_at_least_2_SCr",
"Initial_GFR_below_15",
"RRT_within_48hr",
"Burn_patients",
"Pre_ESRD",
"Pre_RRT",
"Total",
"nonAKI",
"AKI1",
"nonAKI_to_AKI2",
"AKI1_to_AKI2",
"nonAKI_to_AKI3",
"nonAKI_to_AKI2_to_AKI3",
"AKI1_to_AKI2_to_AKI3"),
label_txt=c("Inpatient visit with LOS >= 2\nand of age >= 18",
"Has at least 2 SCr record",
"Excluded: Initial eGFR below 15",
"Excluded: RRT with 48 hours since \nadmission",
"Excluded: Burn Patients",
"Excluded: Pre-existance of \nESRD",
"Excluded: Pre-existance of \ndialysis and renal transplantation",
"Total eligible encounters",
"Non-AKI",
"AKI1",
"AKI2",
"AKI1 to AKI2",
"AKI3",
"AKI2 to AKI3",
"AKI1 to AKI2 to AKI3"),
stringsAsFactors=F) %>%
left_join(consort_tbl, by="CNT_TYPE") %>%
replace_na(list(ENC_CNT=0)) %>%
mutate(cnt_ref=ifelse(CNT_TYPE %in% c("Initial","Has_at_least_1_SCr","Total"),ENC_CNT,NA)) %>%
fill(cnt_ref,.direction="down") %>%
mutate(cnt_ref=ifelse(CNT_TYPE=="Has_at_least_1_SCr",lag(cnt_ref,n=1L),cnt_ref)) %>%
mutate(ENC_PROP=round(ENC_CNT/cnt_ref,4)) %>%
mutate(label_val=paste0("(",ENC_CNT,",",ENC_PROP*100,"%)")) %>%
mutate(label=paste(label_txt,"\n",label_val)) %>%
mutate(node_id=c(2,5,7,9,10,12,13,17,18,22,23,25,24,26,28))
#prepare canvas
par(mfrow=c(1,1))
par(mar=c(0,0,0,0))
openplotmat()
##number of elements per row
elpos<-coordinates(rep(3,10))
fromto<-matrix(ncol=2,byrow=T,
c(2,5,
5,8,
8,7,
8,9,
8,11,
11,10,
11,12,
11,14,
14,13,
14,17,
17,18,
17,20,
20,19,
20,21,
19,22,
20,23,
21,24,
22,25,
23,26,
25,28
))
##draw arrows
arrpos <- matrix(ncol = 2, nrow = nrow(fromto))
for (i in 1:nrow(fromto)){
arrpos[i, ] <- straightarrow (to = elpos[fromto[i, 2], ],
from = elpos[fromto[i, 1], ],
lwd = 1, arr.pos = 0.6, arr.length = 0.3)
}
##draw nodes
for(i in 1:nrow(tbl)){
textrect(elpos[tbl$node_id[i],],
radx=0.15,
rady=0.05,
lab=tbl$label[i],
font=4,
cex=0.7)
}
}
## print link for LOINC code search result
get_loinc_ref<-function(loinc){
#url to loinc.org
url<-paste0(paste0("https://loinc.org/",loinc))
#return the link
return(url)
}
## get drug names for RXNORM by scraping REST API
get_rxcui_nm<-function(rxcui){
#url link to REST API
rx_url<-paste0("https://rxnav.nlm.nih.gov/REST/rxcui/",rxcui,"/")
#get and parse html object
rxcui_obj <- getURL(url = rx_url)
rxcui_content<-htmlParse(rxcui_obj)
#extract name
rxcui_name<-xpathApply(rxcui_content, "//body//rxnormdata//idgroup//name", xmlValue)
rxcui_name<-unique(unlist(rxcui_name))
if(length(rxcui_name)>0) rxcui_name[1]
else rxcui
}
## get drug names for NDC by scraping REST API
get_ndc_nm<-function(ndc){
parse_nm<-function(rx_obj){
rx_content<-htmlParse(rx_obj)
rx_attr<-xpathApply(rx_content, "//tbody//td[@data-title]",xmlAttrs)
rx_name<-xpathApply(rx_content, "//tbody//td[@data-title]",xmlValue)[which(rx_attr=="Proprietary Name")]
rx_name<-unique(unlist(rx_name))
return(rx_name)
}
#ndc is at least 11-digits
ndc<-case_when(
nchar(ndc) <= 11 ~ str_pad(ndc,side="left",11,pad="0"),
TRUE ~ ndc
)
#url link to REST API
rx_url<-paste0("https://ndclist.com/?s=",ndc)
#get and parse html object
rx_obj<-getURL(url = rx_url)
if(!length(rx_obj) %in% c("",NULL)) parse_nm(rx_obj)[1]
else ndc
}
#ref: https://www.r-bloggers.com/web-scraping-google-urls/
google_code<-function(code,nlink=1){
code_type<-ifelse(gsub(":.*","",code)=="CH","CPT",
gsub(":.*","",code))
code<-gsub(".*:","",code)
#search on google
gu<-paste0("https://www.google.com/search?q=",code_type,":",code)
html<-getURL(gu)
#parse HTML into tree structure
doc<-htmlParse(html)
#extract url nodes using XPath. Originally I had used "//a[@href][@class='l']" until the google code change.
attrs<-xpathApply(doc, "//h3//a[@href]", xmlAttrs)
#extract urls
links<-sapply(attrs, function(x) x[[1]])
#only keep the secure links
links<-links[grepl("(https\\:)+",links)]
links<-gsub("(\\&sa=U).*$","",links)
links<-paste0("https://",gsub(".*(https://)","",links))
#free doc from memory
free(doc)
return(links[1])
}
## render report
render_report<-function(which_report="./report/AKI_CDM_EXT_VALID_p1_QA.Rmd",
DBMS_type,driver_type,
start_date,end_date=as.character(Sys.Date())){
# to avoid <Error in unlockBinding("params", <environment>) : no binding for "params">
# a hack to trick r thinking it's in interactive environment --not work!
# unlockBinding('interactive',as.environment('package:base'))
# assign('interactive',function() TRUE,envir=as.environment('package:base'))
rmarkdown::render(input=which_report,
params=list(DBMS_type=DBMS_type,
driver_type=driver_type,
start_date=start_date,
end_date=end_date),
output_dir="./output/",
knit_root_dir="../")
}
#### survival-like data format transformation ####
format_data<-function(dat,type=c("demo","vital","lab","dx","px","med"),pred_end){
if(type=="demo"){
#demo has to be unqiue for each encounter
dat_out<-dat %>%
filter(key %in% c("AGE","SEX","RACE","HISPANIC")) %>%
group_by(ENCOUNTERID,key) %>%
top_n(n=1L,wt=value) %>% #randomly pick one if multiple entries exist
ungroup %>%
mutate(cat=value,dsa=-1,key_cp=key,
value2=ifelse(key=="AGE",value,"1")) %>%
unite("key2",c("key_cp","cat"),sep="_") %>%
mutate(key=ifelse(key=="AGE",key,key2),
value=as.numeric(value2)) %>%
dplyr::select(ENCOUNTERID,key,value,dsa)
}else if(type=="vital"){
dat_out<-c()
#multiple smoking status is resolved by using the most recent record
dat_out %<>%
bind_rows(dat %>% dplyr::select(-PATID) %>%
filter(key %in% c("SMOKING","TOBACCO","TOBACCO_TYPE")) %>%
group_by(ENCOUNTERID,key) %>%
arrange(value) %>% dplyr::slice(1:1) %>%
ungroup %>%
mutate(cat=value,dsa=-1,key_cp=key,value=1) %>%
unite("key",c("key_cp","cat"),sep="_") %>%
dplyr::select(ENCOUNTERID,key,value,dsa))
#multiple ht,wt,bmi resolved by taking median
dat_out %<>%
bind_rows(dat %>% dplyr::select(-PATID) %>%
filter(key %in% c("HT","WT","BMI")) %>%
mutate(value=as.numeric(value)) %>%
group_by(ENCOUNTERID,key) %>%
mutate(value=ifelse((key=="HT" & (value>95 | value<=0))|
(key=="WT" & (value>1400 | value<=0))|
(key=="BMI" & (value>70 | value<=0)),NA,value)) %>%
dplyr::summarize(value=median(value,na.rm=T),.groups="drop") %>%
mutate(dsa=-1))
#multiple bp are aggregated by taking: lowest & slope
bp<-dat %>% dplyr::select(-PATID) %>%
filter(key %in% c("BP_DIASTOLIC","BP_SYSTOLIC")) %>%
mutate(value=as.numeric(value)) %>%
mutate(value=ifelse((key=="BP_DIASTOLIC" & (value>120 | value<40))|
(key=="BP_SYSTOLIC" & (value>210 | value<40)),NA,value)) %>%
group_by(ENCOUNTERID,key,dsa) %>%
dplyr::mutate(value_imp=median(value,na.rm=T)) %>%
ungroup
bp %<>%
filter(!is.na(value_imp)) %>%
mutate(imp_ind=ifelse(is.na(value),1,0)) %>%
mutate(value=ifelse(is.na(value),value_imp,value)) %>%
dplyr::select(-value_imp)
bp %<>% dplyr::select(-imp_ind)
#--minimal bp
bp_min<-bp %>%
group_by(ENCOUNTERID,key,dsa) %>%
dplyr::summarize(value_lowest=min(value,na.rm=T),.groups="drop") %>%
mutate(key=paste0(key,"_min")) %>%
dplyr::rename(value=value_lowest)
#--trend of bp
bp_slp_eligb<-bp %>%
mutate(add_time=difftime(strptime(timestamp,"%Y-%m-%d %H:%M:%S"),strptime(timestamp,"%Y-%m-%d"),units="mins")) %>%
mutate(timestamp=round(as.numeric(add_time)/60,2)) %>% #coefficient represents change per hour
dplyr::select(-add_time) %>%
group_by(ENCOUNTERID,key,dsa) %>%
dplyr::mutate(df=length(unique(timestamp))-1) %>%
dplyr::mutate(sd=ifelse(df>0,sd(value),0))
bp_slp<-bp_slp_eligb %>%
filter(df > 1 & sd >= 1e-2) %>%
nest(data=c(timestamp,value,df,sd)) %>%
mutate(
fit_val=map(data, ~ lm(value ~ timestamp, data=.x)),
tidied=map(fit_val,tidy)
) %>%
unnest(tidied)
bp_slp %<>%
filter(term=="timestamp") %>%
dplyr::rename(value=estimate) %>%
mutate(value=ifelse(p.value>0.5 | is.nan(p.value),0,value)) %>%
dplyr::select(ENCOUNTERID,key,dsa,value) %>%
bind_rows(bp_slp_eligb %>%
filter(df<=1 | sd < 1e-2) %>% mutate(value=0) %>%
dplyr::select(ENCOUNTERID,key,value,dsa) %>%
ungroup %>% unique) %>%
bind_rows(bind_rows(bp_slp_eligb %>%
filter(df==1 & sd >= 1e-2) %>%
mutate(value=round((max(value)-min(value))/(max(timestamp)-min(timestamp)),2)) %>%
dplyr::select(ENCOUNTERID,key,value,dsa) %>%
ungroup %>% unique)) %>%
mutate(key=paste0(key,"_slope"))
#--stack bp
bp<-bp_min %>%
dplyr::select(ENCOUNTERID,key,value,dsa) %>%
bind_rows(bp_slp %>%
dplyr::select(ENCOUNTERID,key,value,dsa))
#all vitals
dat_out %<>%
mutate(dsa=-1) %>% bind_rows(bp)
#clean out some memories
rm(bp,bp_min,bp_slp_eligb,bp_slp_obj,bp_slp)
gc()
}else if(type=="lab"){
#multiple same lab on the same day will be resolved by taking the average
dat_out<-dat %>%
filter(key != "NI") %>%
mutate(key_cp=key,unit_cp=unit) %>%
unite("key_unit",c("key_cp","unit_cp"),sep="@") %>%
group_by(ENCOUNTERID,key,unit,key_unit,dsa) %>%
dplyr::summarize(value=mean(value,na.rm=T),.groups="drop")
#calculated new features: BUN/SCr ratio (same-day)
bun_scr_ratio<-dat_out %>%
mutate(key_agg=case_when(key %in% c('2160-0','38483-4','14682-9','21232-4','35203-9','44784-7','59826-8',
'16188-5','16189-3','59826-8','35591-7','50380-5','50381-3','35592-5',
'44784-7','11041-1','51620-3','72271-0','11042-9','51619-5','35203-9','14682-9') ~ "SCR",
key %in% c('12966-8','12965-0','6299-2','59570-2','12964-3','49071-4','72270-2',
'11065-0','3094-0','35234-4','14937-7') ~ "BUN",
key %in% c('3097-3','44734-2') ~ "BUN_SCR")) %>% #not populated
filter((toupper(unit) %in% c("MG/DL","MG/MG")) &
(key_agg %in% c("SCR","BUN","BUN_SCR"))) %>%
group_by(ENCOUNTERID,key_agg,dsa) %>%
dplyr::summarize(value=mean(value,na.rm=T),.groups="drop") %>%
spread(key_agg,value) %>%
filter(!is.na(SCR)&!is.na(BUN)) %>%
mutate(BUN_SCR = round(BUN/SCR,2)) %>%
mutate(key="BUN_SCR") %>%
dplyr::rename(value=BUN_SCR) %>%
dplyr::select(ENCOUNTERID,key,value,dsa)
dat_out %<>% bind_rows(bun_scr_ratio)
#engineer new features: change of lab from last collection
lab_delta_eligb<-dat_out %>%
group_by(ENCOUNTERID,key) %>%
dplyr::mutate(lab_cnt=sum(dsa<=pred_end)) %>%
ungroup %>%
group_by(key) %>%
dplyr::summarize(p5=quantile(lab_cnt,probs=0.05,na.rm=T),
p25=quantile(lab_cnt,probs=0.25,na.rm=T),
med=median(lab_cnt,na.rm=T),
p75=quantile(lab_cnt,probs=0.75,na.rm=T),
p95=quantile(lab_cnt,probs=0.95,na.rm=T),
.groups="drop")
#--collect changes of lab only for those are regularly repeated (floor(pred_end/2))
freq_lab<-lab_delta_eligb %>% filter(med>=(floor(pred_end/2)))
if(nrow(freq_lab)>0){
lab_delta<-dat_out %>%
semi_join(freq_lab,by="key")
dsa_rg<-seq(0,pred_end)
lab_delta %<>%
group_by(ENCOUNTERID,key) %>%
dplyr::mutate(dsa_max=max(dsa)) %>%
filter(dsa<=dsa_max) %>%
arrange(dsa) %>%
dplyr::mutate(value_lag=lag(value,n=1L,default=NA)) %>%
ungroup %>%
filter(!is.na(value_lag)) %>%
mutate(value=value-value_lag,
key=paste0(key,"_change")) %>%
dplyr::select(ENCOUNTERID,key,value,dsa) %>%
unique
dat_out %<>% bind_rows(lab_delta)
}
}else if(type == "dx"){
#multiple records resolved as "present (1) or absent (0)"
dat_out<-dat %>% dplyr::select(-PATID) %>%
group_by(ENCOUNTERID,key,dsa) %>%
dplyr::summarize(value=(n() >= 1)*1,.groups="drop") %>%
group_by(ENCOUNTERID,key) %>%
top_n(n=1L,wt=dsa) %>%
ungroup %>%
mutate(key=as.character(key)) %>%
dplyr::select(ENCOUNTERID,key,value,dsa)
}else if(type == "px"){
#multiple records resolved as "present (1) or absent (0)"
dat_out<-dat %>% dplyr::select(-PATID) %>%
group_by(ENCOUNTERID,key,dsa) %>%
dplyr::summarize(value=(n() >= 1)*1,.groups="drop") %>%
dplyr::select(ENCOUNTERID,key,value,dsa)
}else if(type=="med"){
#multiple records accumulated
dat_out<-dat %>%
group_by(ENCOUNTERID,key) %>%
arrange(dsa) %>%
dplyr::mutate(value=cumsum(value)) %>%
ungroup %>%
mutate(key=paste0(key,"_cum")) %>%
dplyr::select(ENCOUNTERID,key,value,dsa) %>%
bind_rows(dat %>%
dplyr::select(ENCOUNTERID,key,value,dsa) %>%
unique)
}
return(dat_out)
}
#tw should be the same time unit as dsa
get_dsurv_temporal<-function(dat,censor,tw,pred_in_d=1,carry_over=T){
y_surv<-c()
X_surv<-c()
for(t in tw){
#stack y
censor_t<-censor %>%
mutate(pred_pt=case_when(dsa_y >= t ~ t,
dsa_y < t ~ NA_real_),
y_ep=case_when(dsa_y == t ~ y,
dsa_y > t ~ pmax(0,y-1),
dsa_y < t ~ NA_real_)) %>%
filter(!is.na(pred_pt)) %>%
group_by(ENCOUNTERID) %>%
arrange(desc(pred_pt),desc(y_ep)) %>%
dplyr::slice(1:1) %>%
ungroup %>%
mutate(dsa_y=pred_pt,y=y_ep) %>%
dplyr::select(-pred_pt,-y_ep)
y_surv %<>%
bind_rows(censor_t %>%
dplyr::select(ENCOUNTERID,dsa_y,y))
#stack x
if(carry_over){
X_surv %<>%
bind_rows(dat %>% left_join(censor_t,by="ENCOUNTERID") %>%
filter(dsa < dsa_y-(pred_in_d-1)) %>% # prediction point is at least "pred_in_d" days before endpoint
group_by(ENCOUNTERID,key) %>%
top_n(n=1,wt=dsa) %>% # take latest value (carry over)
ungroup %>%
dplyr::select(ENCOUNTERID,dsa_y,dsa,key,value) %>%
bind_rows(censor_t %>%
mutate(dsa=dsa_y-1,
key=paste0("day",(dsa_y-1)),
value=1) %>%
dplyr::select(ENCOUNTERID,dsa_y,dsa,key,value)))
}else{
X_surv %<>%
bind_rows(dat %>% left_join(censor_t,by="ENCOUNTERID") %>%
filter(dsa < dsa_y-(pred_in_d-1)) %>% # prediction point is at least "pred_in_d" days before endpoint
dplyr::select(ENCOUNTERID,dsa_y,dsa,key,value) %>%
bind_rows(censor_t %>%
mutate(dsa=dsa_y-1,
key=paste0("day",(dsa_y-1)),
value=1) %>%
dplyr::select(ENCOUNTERID,dsa_y,dsa,key,value)))
}
}
Xy_surv<-list(X = X_surv,
y = y_surv)
return(Xy_surv)
}
#identify initial stage of and collect most recent values
get_most_recent<-function(dat,censor,pred_in_d=1,obs_tw=0){
y_mr<-censor %>%
group_by(ENCOUNTERID) %>%
arrange(desc(dsa_y),desc(y)) %>%
dplyr::slice(1:1) %>%
ungroup %>%
select(ENCOUNTERID,dsa_y,y)
force_censor<-y_mr %>%
filter(y==1) %>% summarise(censor_end=median(dsa_y)) %>% unlist
X_mr<-dat %>% left_join(y_mr,by="ENCOUNTERID") %>%
filter((dsa < dsa_y-(pred_in_d-1-obs_tw))| # y==1, prediction point is at least "pred_in_d" days before endpoint
(y==0& dsa <= force_censor) # y==0, censored at median days of AKI occurrence in AKI patients
) %>%
group_by(ENCOUNTERID,key) %>%
top_n(n=1,wt=dsa) %>% # take latest value (carry over)
ungroup %>%
dplyr::select(ENCOUNTERID,dsa_y,dsa,key,value) %>%
bind_rows(y_mr %>%
mutate(dsa=dsa_y-1,
key=paste0("day",(dsa_y-1)),
value=1) %>%
dplyr::select(ENCOUNTERID,dsa_y,dsa,key,value))
Xy_mr<-list(X=X_mr,
y=y_mr)
return(Xy_mr)
}
## convert long matrix to wide sparse matrix
long_to_sparse_matrix<-function(df,id,variable,val,binary=FALSE){
if(binary){
x_sparse<-with(df,
sparseMatrix(i=as.numeric(as.factor(get(id))),
j=as.numeric(as.factor(get(variable))),
x=1,
dimnames=list(levels(as.factor(get(id))),
levels(as.factor(get(variable))))))
}else{
x_sparse<-with(df,
sparseMatrix(i=as.numeric(as.factor(get(id))),
j=as.numeric(as.factor(get(variable))),
x=ifelse(is.na(get(val)),1,as.numeric(get(val))),
dimnames=list(levels(as.factor(get(id))),
levels(as.factor(get(variable))))))
}
return(x_sparse)
}
get_perf_summ<-function(pred,real,keep_all_cutoffs=F){
# various performace table
pred_obj<-ROCR::prediction(pred,real)
prc<-performance(pred_obj,"prec","rec")
roc<-performance(pred_obj,"sens","spec")
nppv<-performance(pred_obj,"ppv","npv")
pcfall<-performance(pred_obj,"pcfall")
acc<-performance(pred_obj,"acc")
fscore<-performance(pred_obj,"f")
mcc<-performance(pred_obj,"phi")
perf_at<-data.frame(cutoff=prc@alpha.values[[1]],
prec=prc@y.values[[1]],
rec_sens=prc@x.values[[1]],
stringsAsFactors = F) %>%
arrange(cutoff) %>%
left_join(data.frame(cutoff=nppv@alpha.values[[1]],
ppv=nppv@y.values[[1]],
npv=nppv@x.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
dplyr::mutate(prec_rec_dist=abs(prec-rec_sens)) %>%
left_join(data.frame(cutoff=fscore@x.values[[1]],
fscore=fscore@y.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
left_join(data.frame(cutoff=roc@alpha.values[[1]],
spec=roc@x.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
dplyr::mutate(Euclid_meas=sqrt((1-rec_sens)^2+(0-(1-spec))^2),
Youden_meas=rec_sens+spec-1) %>%
left_join(data.frame(cutoff=pcfall@x.values[[1]],
pcfall=pcfall@y.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
left_join(data.frame(cutoff=acc@x.values[[1]],
acc=acc@y.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
left_join(data.frame(cutoff=mcc@x.values[[1]],
mcc=mcc@y.values[[1]],
stringsAsFactors = F),
by="cutoff") %>%
filter(prec > 0 & rec_sens > 0 & spec > 0) %>%
group_by(cutoff) %>%
dplyr::mutate(size=n()) %>%
ungroup
# performance summary
lab1<-pred[real==1]
lab0<-pred[real==0]
pr<-pr.curve(scores.class0 = lab1,
scores.class1 = lab0,curve=F)
roc_ci<-pROC::ci.auc(real,pred)
perf_summ<-data.frame(overall_meas=c("roauc_low",
"roauc",
"roauc_up",
"opt_thresh",
"opt_sens",
"opt_spec",
"opt_ppv",
"opt_npv",
"prauc1",
"prauc2",
"opt_prec",
"opt_rec",
"opt_fscore"),
meas_val=c(roc_ci[[1]],
roc_ci[[2]],
roc_ci[[3]],
perf_at$cutoff[which.min(perf_at$Euclid_meas)],
perf_at$rec_sens[which.min(perf_at$Euclid_meas)],
perf_at$spec[which.min(perf_at$Euclid_meas)],
perf_at$ppv[which.min(perf_at$Euclid_meas)],
perf_at$npv[which.min(perf_at$Euclid_meas)],
pr$auc.integral,
pr$auc.davis.goadrich,
perf_at$prec[which.min(perf_at$prec_rec_dist)],
perf_at$rec_sens[which.min(perf_at$prec_rec_dist)],
perf_at$fscore[which.min(perf_at$prec_rec_dist)]),
stringsAsFactors = F) %>%
bind_rows(perf_at %>%
dplyr::summarize(prec_m=mean(prec,na.rm=T),
sens_m=mean(rec_sens,na.rm=T),
spec_m=mean(spec,na.rm=T),
ppv_m=mean(ppv,na.rm=T),
npv_m=mean(npv,na.rm=T),
acc_m=mean(acc,na.rm=T),
fscore_m=mean(fscore,na.rm=T),
mcc_m=mean(mcc,na.rm=T),
.groups="drop") %>%
gather(overall_meas,meas_val))
out<-list(perf_summ=perf_summ)
if(keep_all_cutoffs){
out$perf_at<-perf_at
}
return(out)
}
get_calibr<-function(pred,real,n_bin=20){
calib<-data.frame(pred=pred,
y=real) %>%
arrange(pred) %>%
dplyr::mutate(pred_bin = cut(pred,
breaks=unique(quantile(pred,0:(n_bin)/(n_bin))),
include.lowest=T,
labels=F)) %>%
ungroup %>% group_by(pred_bin) %>%
dplyr::summarize(expos=n(),
bin_lower=min(pred),
bin_upper=max(pred),
bin_mid=median(pred),
y_agg = sum(y),
pred_p = mean(pred),
.groups="drop") %>%
dplyr::mutate(y_p=y_agg/expos) %>%
dplyr::mutate(binCI_lower = pmax(0,pred_p-1.96*sqrt(y_p*(1-y_p)/expos)),
binCI_upper = pred_p+1.96*sqrt(y_p*(1-y_p)/expos))
return(calib)
}
get_recalibr<-function(pred,real,p=0.5,n_bin=20){
re_calib_in<-get_calibr(pred=pred,real=real) %>%
mutate(grp_idx=sample(1:2,n(),prob=c(p,1-p),replace = TRUE)) %>%
select(y_agg,pred_p,pred_bin, expos, grp_idx) %>%
gather(overall_mear,meas_med,-pred_bin,-expos,-grp_idx) %>%
re_calib_in1<-re_calib_in %>% filter(grp_idx==1)
re_calib_in2<-re_calib_in %>% filter(grp_idx==2)
re_calib_in_lm<- re_calib_in %>% filter(grp_idx==1) %>%
group_by(pred_bin) %>%
dplyr::summarize(expos=round(mean(expos)),
meas_med=median(meas_med),
.groups="drop") %>%
spread(overall_meas,meas_med) %>%
group_by(pred_in_d,pred_task,fs_type,grp) %>%
do(recalib=lm(y_p ~ pred_p,data=.))
re_calib_in_coef<-tidy(re_calib_in_lm,recalib) %>%
select(term,estimate) %>%
mutate(term=recode(term,
"(Intercept)"="a",
"pred_p"="b")) %>%
spread(term,estimate) %>%
ungroup
int_calib<-re_calib_in %>% filter(grp_idx==2) %>%
group_by(pred_bin) %>%
dplyr::summarize(expos=round(mean(expos)),
meas_med=median(meas_med),
.groups="drop") %>%
spread(overall_meas,meas_med) %>% mutate(k=1)
left_join(re_calib_in_coef %>% mutate(k=1),by="k") %>%
mutate(pred_p_adj=pred_p*b+a) %>% # simple re-calibration
dplyr::rename("real_pos"="y_agg") %>%
mutate(real_neg=expos-real_pos,
pred_pos=round(expos*pred_p),
pred_pos_adj=round(expos*pred_p_adj),
pred_neg=expos-pred_pos,
pred_neg_adj=expos-pred_pos_adj) %>%
filter(pred_pos>0&pred_neg>0&pred_pos_adj>0&pred_neg_adj>0) %>%
mutate(pos_term=((real_pos-pred_pos)^2)/pred_pos,
neg_term=((real_neg-pred_neg)^2)/pred_neg,
pos_adj_term=((real_pos-pred_pos_adj)^2)/pred_pos_adj,
neg_adj_term=((real_neg-pred_neg_adj)^2)/pred_neg_adj)
int_calib_pvt<-int_calib %>%
select(pred_bin,pred_p,pred_p_adj,expos) %>%
left_join(int_calib %>% select(pred_bin,real_pos),
by="pred_bin") %>%
unique %>% mutate(y_p=real_pos/expos)
int_calib_hl<-int_calib %>%
dplyr::summarize(chi_sq=sum(pos_term)+sum(neg_term),
chi_sq_adj=sum(pos_adj_term)+sum(neg_adj_term),
df=max(3,length(unique(pred_bin)))-2,
.groups="drop") %>%
mutate(p_val=pchisq(chi_sq,df=df,lower.tail = F),
p_val_adj=pchisq(chi_sq_adj,df=df,lower.tail = F))
return(list(int_calib_pvt=int_calib_pvt,
int_calib_hl=int_calib_hl))
}
bin_fd<-function(x){
n<-length(x)
k<-length(unique(x))
x<-x[order(x)]
#remove outliers (middle 95%)
lb<-quantile(x,probs=0.025,na.rm=T)
ub<-quantile(x,probs=0.975,na.rm=T)
x<-x[x>=lb&x<=ub]
#https://www.answerminer.com/blog/binning-guide-ideal-histogram
if(IQR(x,na.rm=T)!=0){
n_bin<-(max(x,na.rm=T)-min(x,na.rm=T))/(2*IQR(x,na.rm=T)/(k^(1/3)))
}else{
n_bin<-(max(x,na.rm=T)-min(x,na.rm=T))/(3.5*sd(x,na.rm=T)/(k^(1/3)))
}
brk<-levels(cut(x,n_bin,include.lowest = T,right=F))
lb<-c(-Inf,as.numeric(gsub("(,).*","",gsub("\\[","",brk))))
ub<-c(lb[-1],Inf)
return(data.frame(bin=seq_len(length(lb)),brk_lb=lb,brk_ub=ub))
}
get_ks<-function(x,y,unbiased=T,rs=0.6){
x_rs<-sample(x,round(length(x)*rs))
y_rs<-sample(y,round(length(y)*rs))
#broadcast x_rs
# x_mt<-t(replicate(length(y),x_rs)) #slow
x_mt<-x_rs %*% t(rep(1,length(y_rs)))
#get pair-wise gaussian kernel matrix
gauss_k<-exp(-(x_mt-y_rs)^2/2)
#nullify the diagnoal
gauss_k[row(gauss_k) == col(gauss_k)] <- NA
#take the average
if(unbiased==T){
xyk<-sum(gauss_k,na.rm=T)/(length(x_rs)*(length(y_rs)-1))
}else{
xyk<-sum(gauss_k,na.rm=T)/(length(x_rs)*length(y_rs))
}
return(xyk)
}
penalize_sample<-function(x,n,alpha=0.99){
#kernel density estimation
gk<-density(x)
#get cumulative distribution
fk<-scales::rescale(cumsum(gk$y),c(0,1))
#unlikely value range
bias_pool<-c(gk$x[c(which(fk>=alpha),(fk<=(1-alpha)))])
#generate noises
bias_rs<-sample(bias_pool,n,replace=T)
return(bias_rs)
}
|
structure(list(url = "https://storage.googleapis.com/twttr-user-compliance/1460716596591312898/submission/1400164763938787342_1460716596591312898?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=complianceapi-public-svc-acct%40twttr-compliance-public-prod.iam.gserviceaccount.com%2F20211116%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20211116T210910Z&X-Goog-Expires=900&X-Goog-SignedHeaders=content-type%3Bhost&X-Goog-Signature=68d2115eff58f6f8e275e6f47eb1a06bc80af8dd242cbe4953fc6b681797e8be18c62d48654cfa9bbc228b299817cdcdbacc3565675388fd80f61881a3ceea327bff0939440a93649a9777dfe9af6acaf911d2c50e3f19095b15ab05fb863be661e4a03a1c4eab8b312b639216eb6b2814f1252e0becc621badf0d741e22e90b199fd33b15226e1278c3d466508dbd7219158e1961c9f26c34586019d0390b0f2d97d968ed37b8166940feee8af9f57c656b883092c39fa8ae627a42b23d48ff7c6a207ba3a27170538ad515c3bb00e24dadf12710079f8cef50742a48d0627caa1d20a689c8ad38fcf8f82ce58f2ae4a6969b7bf25492946e620eff5e1d8c36",
status_code = 200L, headers = structure(list(`x-guploader-uploadid` = "ADPycduBMGFuRVrN7XaHuFH84guL5XFv8c66uQUdRsmS2yxvK5F3BxDg_yUUtiJnay6Rr3JCtqVCJd2ECHJ-JA2rgOvOuFJnzQ",
etag = "\"65a6878e6caa08d04b88ca7754bbe0ab\"", `x-goog-generation` = "1637096951769854",
`x-goog-metageneration` = "1", `x-goog-hash` = "crc32c=ihUoIQ==",
`x-goog-hash` = "md5=ZaaHjmyqCNBLiMp3VLvgqw==", `x-goog-stored-content-length` = "334",
`x-goog-stored-content-encoding` = "identity", vary = "Origin",
`content-length` = "0", date = "Tue, 16 Nov 2021 21:09:11 GMT",
server = "UploadServer", `content-type` = "text/html; charset=UTF-8",
`alt-svc` = "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""), class = c("insensitive",
"list")), all_headers = list(list(status = 200L, version = "HTTP/2",
headers = structure(list(`x-guploader-uploadid` = "ADPycduBMGFuRVrN7XaHuFH84guL5XFv8c66uQUdRsmS2yxvK5F3BxDg_yUUtiJnay6Rr3JCtqVCJd2ECHJ-JA2rgOvOuFJnzQ",
etag = "\"65a6878e6caa08d04b88ca7754bbe0ab\"", `x-goog-generation` = "1637096951769854",
`x-goog-metageneration` = "1", `x-goog-hash` = "crc32c=ihUoIQ==",
`x-goog-hash` = "md5=ZaaHjmyqCNBLiMp3VLvgqw==", `x-goog-stored-content-length` = "334",
`x-goog-stored-content-encoding` = "identity", vary = "Origin",
`content-length` = "0", date = "Tue, 16 Nov 2021 21:09:11 GMT",
server = "UploadServer", `content-type` = "text/html; charset=UTF-8",
`alt-svc` = "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""), class = c("insensitive",
"list")))), cookies = structure(list(domain = logical(0),
flag = logical(0), path = logical(0), secure = logical(0),
expiration = structure(numeric(0), class = c("POSIXct",
"POSIXt")), name = logical(0), value = logical(0)), row.names = integer(0), class = "data.frame"),
content = charToRaw(""), date = structure(1637096951, class = c("POSIXct",
"POSIXt"), tzone = "GMT"), times = c(redirect = 0, namelookup = 3.6e-05,
connect = 3.7e-05, pretransfer = 0.000123, starttransfer = 0.000124,
total = 0.310233)), class = "response")
|
/tests/testthat/storage.googleapis.com/twttr-user-compliance/1460716596591312898/submission/1400164763938787342_1460716596591312898-b681c2-PUT.R
|
permissive
|
cjbarrie/academictwitteR
|
R
| false
| false
| 3,417
|
r
|
structure(list(url = "https://storage.googleapis.com/twttr-user-compliance/1460716596591312898/submission/1400164763938787342_1460716596591312898?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=complianceapi-public-svc-acct%40twttr-compliance-public-prod.iam.gserviceaccount.com%2F20211116%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20211116T210910Z&X-Goog-Expires=900&X-Goog-SignedHeaders=content-type%3Bhost&X-Goog-Signature=68d2115eff58f6f8e275e6f47eb1a06bc80af8dd242cbe4953fc6b681797e8be18c62d48654cfa9bbc228b299817cdcdbacc3565675388fd80f61881a3ceea327bff0939440a93649a9777dfe9af6acaf911d2c50e3f19095b15ab05fb863be661e4a03a1c4eab8b312b639216eb6b2814f1252e0becc621badf0d741e22e90b199fd33b15226e1278c3d466508dbd7219158e1961c9f26c34586019d0390b0f2d97d968ed37b8166940feee8af9f57c656b883092c39fa8ae627a42b23d48ff7c6a207ba3a27170538ad515c3bb00e24dadf12710079f8cef50742a48d0627caa1d20a689c8ad38fcf8f82ce58f2ae4a6969b7bf25492946e620eff5e1d8c36",
status_code = 200L, headers = structure(list(`x-guploader-uploadid` = "ADPycduBMGFuRVrN7XaHuFH84guL5XFv8c66uQUdRsmS2yxvK5F3BxDg_yUUtiJnay6Rr3JCtqVCJd2ECHJ-JA2rgOvOuFJnzQ",
etag = "\"65a6878e6caa08d04b88ca7754bbe0ab\"", `x-goog-generation` = "1637096951769854",
`x-goog-metageneration` = "1", `x-goog-hash` = "crc32c=ihUoIQ==",
`x-goog-hash` = "md5=ZaaHjmyqCNBLiMp3VLvgqw==", `x-goog-stored-content-length` = "334",
`x-goog-stored-content-encoding` = "identity", vary = "Origin",
`content-length` = "0", date = "Tue, 16 Nov 2021 21:09:11 GMT",
server = "UploadServer", `content-type` = "text/html; charset=UTF-8",
`alt-svc` = "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""), class = c("insensitive",
"list")), all_headers = list(list(status = 200L, version = "HTTP/2",
headers = structure(list(`x-guploader-uploadid` = "ADPycduBMGFuRVrN7XaHuFH84guL5XFv8c66uQUdRsmS2yxvK5F3BxDg_yUUtiJnay6Rr3JCtqVCJd2ECHJ-JA2rgOvOuFJnzQ",
etag = "\"65a6878e6caa08d04b88ca7754bbe0ab\"", `x-goog-generation` = "1637096951769854",
`x-goog-metageneration` = "1", `x-goog-hash` = "crc32c=ihUoIQ==",
`x-goog-hash` = "md5=ZaaHjmyqCNBLiMp3VLvgqw==", `x-goog-stored-content-length` = "334",
`x-goog-stored-content-encoding` = "identity", vary = "Origin",
`content-length` = "0", date = "Tue, 16 Nov 2021 21:09:11 GMT",
server = "UploadServer", `content-type` = "text/html; charset=UTF-8",
`alt-svc` = "h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\""), class = c("insensitive",
"list")))), cookies = structure(list(domain = logical(0),
flag = logical(0), path = logical(0), secure = logical(0),
expiration = structure(numeric(0), class = c("POSIXct",
"POSIXt")), name = logical(0), value = logical(0)), row.names = integer(0), class = "data.frame"),
content = charToRaw(""), date = structure(1637096951, class = c("POSIXct",
"POSIXt"), tzone = "GMT"), times = c(redirect = 0, namelookup = 3.6e-05,
connect = 3.7e-05, pretransfer = 0.000123, starttransfer = 0.000124,
total = 0.310233)), class = "response")
|
context("Utility Functions")
library(ffcAPIClient)
## Commented out because I can't figure out how to get it to compare by matched columns and rows, and allow for some floating point variation
#test_that("Online Flow Metrics and Offline Are Comparable", {
# metrics_offline <- get_predicted_flow_metrics_offline(8060983)
# metrics_online <- get_predicted_flow_metrics_online(8060983)
# metrics_offline <- metrics_offline[, !names(metrics_offline) %in% c("comid", "source", "result_type")]
# metrics_online <- metrics_online[, !names(metrics_online) %in% c("comid", "source", "result_type")]
# expect_identical(metrics_offline, metrics_online)
# # Confirm it's not just being nice to us - throw it an intentional failure
# metrics_other_offline <- get_predicted_flow_metrics(8062273)
# expect_false(identical(metrics_online, metrics_other_offline))
#})
test_that("Get Predicted Flow Metrics Warns on Duplicates", { # should raise a warning when it retrieves duplicate flow metric values
expect_condition(get_predicted_flow_metrics(3953273, TRUE), "contained duplicated records")
})
|
/ffcAPIClient/tests/testthat/test-utils.R
|
no_license
|
Yesicaleo/ffc_api_client
|
R
| false
| false
| 1,095
|
r
|
context("Utility Functions")
library(ffcAPIClient)
## Commented out because I can't figure out how to get it to compare by matched columns and rows, and allow for some floating point variation
#test_that("Online Flow Metrics and Offline Are Comparable", {
# metrics_offline <- get_predicted_flow_metrics_offline(8060983)
# metrics_online <- get_predicted_flow_metrics_online(8060983)
# metrics_offline <- metrics_offline[, !names(metrics_offline) %in% c("comid", "source", "result_type")]
# metrics_online <- metrics_online[, !names(metrics_online) %in% c("comid", "source", "result_type")]
# expect_identical(metrics_offline, metrics_online)
# # Confirm it's not just being nice to us - throw it an intentional failure
# metrics_other_offline <- get_predicted_flow_metrics(8062273)
# expect_false(identical(metrics_online, metrics_other_offline))
#})
test_that("Get Predicted Flow Metrics Warns on Duplicates", { # should raise a warning when it retrieves duplicate flow metric values
expect_condition(get_predicted_flow_metrics(3953273, TRUE), "contained duplicated records")
})
|
## makeCachematrix creates a special matrix object that can cache its inverse, the function contains 4 functions
##set, get setinverse, getinverse
##set the value of the matrix (overwrites value given with makeCacheMatrix)
##get the value of the matrix
##setinverse -> set the inverse matrix value (not the outcome of the solve function)
##getinverse -> get the value of the inverse matrix (not the outcome of the solve function,
##will show value of setinverse, if not set will show NULL)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The following function calculates the inverse of the special "matrix" created with the makeCacheMatrix function
##However, it first checks to see if the inverse has already been set (using the setinverse function) If so,
## it gets the inverse from the cache and skips the computation. If not, it calculates the inverse of the data and
##sets the value of the inverse in the cache via the setinverse function.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
Test1two3/ProgrammingAssignment2
|
R
| false
| false
| 1,405
|
r
|
## makeCachematrix creates a special matrix object that can cache its inverse, the function contains 4 functions
##set, get setinverse, getinverse
##set the value of the matrix (overwrites value given with makeCacheMatrix)
##get the value of the matrix
##setinverse -> set the inverse matrix value (not the outcome of the solve function)
##getinverse -> get the value of the inverse matrix (not the outcome of the solve function,
##will show value of setinverse, if not set will show NULL)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The following function calculates the inverse of the special "matrix" created with the makeCacheMatrix function
##However, it first checks to see if the inverse has already been set (using the setinverse function) If so,
## it gets the inverse from the cache and skips the computation. If not, it calculates the inverse of the data and
##sets the value of the inverse in the cache via the setinverse function.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
###########################################################################
####################### Analysis (1) BE CR ################################
###########################################################################
filename = "D:\\Results_paper\\BE CR\\"
setwd("H:\\My documents\\Visual Studio 2010\\Projects\\porc_Test\\Backups\\");
results <- read.table("RG_SH_Paternal_62.txt", header=F)
results = results[,c(2,1,3,15,12,13,16,17,18)]
colnames(results) = c("Marker_ID","CHR","BP","P","T","U","Nonsense","Conflict","Missing")
results$BP = results$BP / 1000000
results$logp = -log10(results$P)
grep(pattern=paste("^",'ALGA0081437',"$",sep=""),x=as.character(results[,1]))
results = results[-42716,]
#results = results[-log10(na.omit(results$P)) <= 22, ]
#rownames(results) = seq(1:1,nrow(results))
xat <- locator()$x
xat <- round(xat)
textvec <- c(as.character(0:18),"X")
x = c("black","antiquewhite4")
mycolors = rep(x,10)
mhtplot(results[,2:4], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mtext(textvec, side=1, line=0, at=xat)
axis(2,at=seq(from=0,to=60,by=1),cex.lab=5)
abline(h=0)
title("Paternal TDT For CR & SH Disease Belgium PAR")
abline(h=5,col=4,lty=2,lwd =2)
abline(h=6,col=2,lty=2,lwd =2)
write.table(results, file = paste(filename,"BE_CR.txt",sep=""), sep = "\t", quote = FALSE, row.names = FALSE);
###########################################################################
####################### Analysis (4) NO CR ################################
###########################################################################
filename = "D:\\Results_paper\\NO CR\\"
setwd("D:\\Results_paper\\NO CR\\");
results <- read.table("CR_Maternal_2_M.txt", header=F)
results = results[,c(1,2,3,7,4,5,8,9,10)]
colnames(results) = c("Marker_ID","CHR","BP","P","T","U","Nonsense","Conflict","Missing")
results$BP = results$BP / 1000000
results$logp = -log10(results$P)
results = results[- grep(pattern=paste("^",'ALGA0122477',"$",sep=""),x=as.character(results[,1])),]
N= 34753+200
-log10(0.05/N)
-log10(0.37/N)
plot(results$BP,results$logp,pch = 16,xlab = "-PAR- Chrmosome X (MB)",ylab="-log10(P)",main="Maternal transmission NO SH")
points(tail(results,n=4)[,c(3,10)],col="red",pch = 16)
abline(h=4.975283)
xat <- locator()$x
xat <- round(xat)
textvec <- c(as.character(0:18),"X")
x = c("black","antiquewhite4")
mycolors = rep(x,10)
mhtplot(results[,2:4], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mtext(textvec, side=1, line=0, at=xat)
axis(2,at=seq(from=0,to=23,by=1),cex.lab=5)
abline(h=0)
title("Maternal TDT For SH Disease Norway PAR")
abline(h=5,col=4,lty=2,lwd =2)
abline(h=6,col=2,lty=2,lwd =2)
write.table(results, file = paste(filename,"NO_CR.txt",sep=""), sep = "\t", quote = FALSE, row.names = FALSE);
###########################################################################
####################### Analysis (6) CR NO & BE ###########################
###########################################################################
filename = "D:\\Results_paper\\NO CR + SH\\"
setwd("D:\\Results_paper\\CR BE + NO\\")
results <- read.table("TDT_Maternal_CR_BE_NO.txt", header=T)
results = results[,c(1,2,3,17,14,15,19)]
colnames(results) = c("Marker_ID","CHR","BP","P","T","U","P2")
results$BP = results$BP / 1000000
results$logp = -log10(results$P)
results$logp2 = -log10(results$P2)
results = results[- grep(pattern=paste("^",'ALGA0122477',"$",sep=""),x=as.character(results[,1])),]
xat <- locator()$x
xat <- round(xat)
textvec <- c(as.character(0:18),"X")
x = c("black","antiquewhite4")
mycolors = rep(x,10)
mhtplot(results[,c(2:3,7)], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mtext(textvec, side=1, line=0, at=xat)
axis(2,at=seq(from=0,to=30,by=1),cex.lab=5)
abline(h=0)
#title("Maternal TDT For merging CR Belgium & Norway data set PAR")
title("Maternal TDT For CR PAR")
abline(h=5,col=4,lty=2,lwd =2)
abline(h=6,col=2,lty=2,lwd =2)
write.table(results, file = paste(filename,"NO_SH.txt",sep=""), sep = "\t", quote = FALSE, row.names = FALSE);
###########################################################################
####################### Analysis (8) SH BE + NO ###########################
###########################################################################
filename = "D:\\Results_paper\\SH BE + NO\\";
setwd("D:\\Results_paper\\SH BE + NO\\");
#setwd("D:\\GIGA lab\\Norway day\\Analysis\\BE\\");
results <- read.table("TDT_Maternal_SH_BE_NO.txt", header=T)
results = results[,c(1,2,3,17,14,15,19)]
colnames(results) = c("Marker_ID","CHR","BP","P","T","U","P2")
results$BP = results$BP / 1000000
results$logp = -log10(results$P)
results$logp2 = -log10(results$P2)
results = results[- grep(pattern=paste("^",'ALGA0122477',"$",sep=""),x=as.character(results[,1])),]
plot(results$BP,results$logp,pch = 16,xlab = "-PAR- Chrmosome X (MB)",ylab="-log10(P)",main="Maternal transmission NO SH")
points(tail(results,n=4)[,c(3,10)],col="red",pch = 16)
xat <- locator()$x
xat <- round(xat)
textvec <- c(as.character(0:18),"X")
x = c("black","antiquewhite4")
mycolors = rep(x,10)
#mhtplot(results[,c(2:4)], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mhtplot(results[,c(2:3,7)], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mtext(textvec[1:19], side=1, line=0, at=xat)
axis(2,at=seq(from=0,to=45,by=1),cex.lab=5)
abline(h=0)
#title("Maternal TDT For merging SH Belgium & Norway data set PAR")
title("Maternal TDT For SH PAR")
abline(h=5,col=4,lty=2,lwd =2)
abline(h=6,col=2,lty=2,lwd =2)
write.table(results3, file = paste(filename,"TEST.txt",sep=""), sep = "\t", quote = FALSE, row.names = FALSE);
###########################################################################
####################### Analysis (9) All ##########################
###########################################################################
filename = "D:\\Results_paper\\SH BE + NO\\";
setwd("D:\\Results_paper\\All\\");
results <- read.table("All_Maternal_X.txt", header=T)
results = results[,c(1,2,3,7,4,5)]
colnames(results) = c("Marker_ID","CHR","BP","P","T","U")
results$BP = results$BP / 1000000
results$logp = -log10(results$P)
results = results[- grep(pattern=paste("^",'ALGA0122477',"$",sep=""),x=as.character(results[,1])),]
xat <- locator()$x
xat <- round(xat)
textvec <- c(as.character(0:18),"X")
x = c("black","antiquewhite4")
mycolors = rep(x,10)
#mhtplot(results[ ,2:4], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mhtplot(results[results$CHR != 0,c(2:4)], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mtext(textvec[1:20], side=1, line=0, at=xat)
axis(2,at=seq(from=0,to=45,by=1),cex.axis=0.85)
abline(h=0)
title("Maternal TDT For CR & SH")
abline(h=5,col=4,lty=2,lwd =2)
abline(h=6,col=2,lty=2,lwd =2)
write.table(results3, file = paste(filename,"TEST.txt",sep=""), sep = "\t", quote = FALSE, row.names = FALSE);
NO_CR = read.table('D:\\Results_paper\\NO CR\\TDT_cases.txt')
NO_SH = read.table('D:\\Results_paper\\NO SH\\TDT_cases.txt')
BE_CR = read.table('D:\\Results_paper\\BE CR\\TDT_2.txt')
BE_SH = read.table('D:\\Results_paper\\BE SH\\TDT_6.txt')
int_SNPs = intersect(intersect(intersect(as.character(NO_CR[,1]),as.character(NO_SH[,1])),as.character(BE_CR[,2])),as.character(BE_SH[,2]) )
NO_CR = NO_CR[NO_CR[,1] %in% int_SNPs,]
NO_SH = NO_SH[NO_SH[,1] %in% int_SNPs,]
BE_CR = BE_CR[BE_CR[,2] %in% int_SNPs,]
BE_SH = BE_SH[BE_SH[,2] %in% int_SNPs,]
rownames(NO_CR) = as.character(NO_CR[,1])
rownames(NO_SH) = as.character(NO_SH[,1])
rownames(BE_CR) = as.character(BE_CR[,2])
rownames(BE_SH) = as.character(BE_SH[,2])
NO_CR = NO_CR[as.character(int_SNPs),]
NO_SH = NO_SH[as.character(int_SNPs),]
BE_CR = BE_CR[as.character(int_SNPs),]
BE_SH = BE_SH[as.character(int_SNPs),]
isTRUE(all.equal(as.character(NO_SH[,1]),as.character(NO_CR[,1]) ));
isTRUE(all.equal(as.character(BE_CR[,2]),as.character(BE_SH[,2])));
temp = cbind(NO_SH[,1:3], NO_SH[,6] + NO_CR[,6] + BE_CR[,14] + BE_SH[,14] )
colnames(temp) = c("Marker_ID","CHR","BP","X2")
temp$P2 = pchisq(q=temp$X2,df=4,lower.tail=F)
results = temp
results$BP = results$BP / 1000000
results$logp = -log10(results$P2)
results = results[- grep(pattern=paste("^",'ALGA0122477',"$",sep=""),x=as.character(results[,1])),]
xat <- locator()$x
xat <- round(xat)
textvec <- c(as.character(0:18),"X")
x = c("black","antiquewhite4")
mycolors = rep(x,10)
#mhtplot(results[ ,2:4], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mhtplot(results[results$CHR == 19,c(2:3,5)], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mtext(textvec[1:20], side=1, line=0, at=xat)
axis(2,at=seq(from=0,to=70,by=1),cex.axis=0.85)
abline(h=0)
title("TDT For CR & SH CHR X")
abline(h=5,col=4,lty=2,lwd =2)
abline(h=6,col=2,lty=2,lwd =2)
write.table(results3, file = paste(filename,"TEST.txt",sep=""), sep = "\t", quote = FALSE, row.names = FALSE);
-log10(0.05/31833)
-log10(0.37/31833)
-log10(0.05/41088)
-log10(0.37/41088)
|
/R_Code/Draw_PAR.R
|
no_license
|
Elansary-Mahmoud/pigendeff
|
R
| false
| false
| 9,601
|
r
|
###########################################################################
####################### Analysis (1) BE CR ################################
###########################################################################
filename = "D:\\Results_paper\\BE CR\\"
setwd("H:\\My documents\\Visual Studio 2010\\Projects\\porc_Test\\Backups\\");
results <- read.table("RG_SH_Paternal_62.txt", header=F)
results = results[,c(2,1,3,15,12,13,16,17,18)]
colnames(results) = c("Marker_ID","CHR","BP","P","T","U","Nonsense","Conflict","Missing")
results$BP = results$BP / 1000000
results$logp = -log10(results$P)
grep(pattern=paste("^",'ALGA0081437',"$",sep=""),x=as.character(results[,1]))
results = results[-42716,]
#results = results[-log10(na.omit(results$P)) <= 22, ]
#rownames(results) = seq(1:1,nrow(results))
xat <- locator()$x
xat <- round(xat)
textvec <- c(as.character(0:18),"X")
x = c("black","antiquewhite4")
mycolors = rep(x,10)
mhtplot(results[,2:4], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mtext(textvec, side=1, line=0, at=xat)
axis(2,at=seq(from=0,to=60,by=1),cex.lab=5)
abline(h=0)
title("Paternal TDT For CR & SH Disease Belgium PAR")
abline(h=5,col=4,lty=2,lwd =2)
abline(h=6,col=2,lty=2,lwd =2)
write.table(results, file = paste(filename,"BE_CR.txt",sep=""), sep = "\t", quote = FALSE, row.names = FALSE);
###########################################################################
####################### Analysis (4) NO CR ################################
###########################################################################
filename = "D:\\Results_paper\\NO CR\\"
setwd("D:\\Results_paper\\NO CR\\");
results <- read.table("CR_Maternal_2_M.txt", header=F)
results = results[,c(1,2,3,7,4,5,8,9,10)]
colnames(results) = c("Marker_ID","CHR","BP","P","T","U","Nonsense","Conflict","Missing")
results$BP = results$BP / 1000000
results$logp = -log10(results$P)
results = results[- grep(pattern=paste("^",'ALGA0122477',"$",sep=""),x=as.character(results[,1])),]
N= 34753+200
-log10(0.05/N)
-log10(0.37/N)
plot(results$BP,results$logp,pch = 16,xlab = "-PAR- Chrmosome X (MB)",ylab="-log10(P)",main="Maternal transmission NO SH")
points(tail(results,n=4)[,c(3,10)],col="red",pch = 16)
abline(h=4.975283)
xat <- locator()$x
xat <- round(xat)
textvec <- c(as.character(0:18),"X")
x = c("black","antiquewhite4")
mycolors = rep(x,10)
mhtplot(results[,2:4], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mtext(textvec, side=1, line=0, at=xat)
axis(2,at=seq(from=0,to=23,by=1),cex.lab=5)
abline(h=0)
title("Maternal TDT For SH Disease Norway PAR")
abline(h=5,col=4,lty=2,lwd =2)
abline(h=6,col=2,lty=2,lwd =2)
write.table(results, file = paste(filename,"NO_CR.txt",sep=""), sep = "\t", quote = FALSE, row.names = FALSE);
###########################################################################
####################### Analysis (6) CR NO & BE ###########################
###########################################################################
filename = "D:\\Results_paper\\NO CR + SH\\"
setwd("D:\\Results_paper\\CR BE + NO\\")
results <- read.table("TDT_Maternal_CR_BE_NO.txt", header=T)
results = results[,c(1,2,3,17,14,15,19)]
colnames(results) = c("Marker_ID","CHR","BP","P","T","U","P2")
results$BP = results$BP / 1000000
results$logp = -log10(results$P)
results$logp2 = -log10(results$P2)
results = results[- grep(pattern=paste("^",'ALGA0122477',"$",sep=""),x=as.character(results[,1])),]
xat <- locator()$x
xat <- round(xat)
textvec <- c(as.character(0:18),"X")
x = c("black","antiquewhite4")
mycolors = rep(x,10)
mhtplot(results[,c(2:3,7)], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mtext(textvec, side=1, line=0, at=xat)
axis(2,at=seq(from=0,to=30,by=1),cex.lab=5)
abline(h=0)
#title("Maternal TDT For merging CR Belgium & Norway data set PAR")
title("Maternal TDT For CR PAR")
abline(h=5,col=4,lty=2,lwd =2)
abline(h=6,col=2,lty=2,lwd =2)
write.table(results, file = paste(filename,"NO_SH.txt",sep=""), sep = "\t", quote = FALSE, row.names = FALSE);
###########################################################################
####################### Analysis (8) SH BE + NO ###########################
###########################################################################
filename = "D:\\Results_paper\\SH BE + NO\\";
setwd("D:\\Results_paper\\SH BE + NO\\");
#setwd("D:\\GIGA lab\\Norway day\\Analysis\\BE\\");
results <- read.table("TDT_Maternal_SH_BE_NO.txt", header=T)
results = results[,c(1,2,3,17,14,15,19)]
colnames(results) = c("Marker_ID","CHR","BP","P","T","U","P2")
results$BP = results$BP / 1000000
results$logp = -log10(results$P)
results$logp2 = -log10(results$P2)
results = results[- grep(pattern=paste("^",'ALGA0122477',"$",sep=""),x=as.character(results[,1])),]
plot(results$BP,results$logp,pch = 16,xlab = "-PAR- Chrmosome X (MB)",ylab="-log10(P)",main="Maternal transmission NO SH")
points(tail(results,n=4)[,c(3,10)],col="red",pch = 16)
xat <- locator()$x
xat <- round(xat)
textvec <- c(as.character(0:18),"X")
x = c("black","antiquewhite4")
mycolors = rep(x,10)
#mhtplot(results[,c(2:4)], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mhtplot(results[,c(2:3,7)], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mtext(textvec[1:19], side=1, line=0, at=xat)
axis(2,at=seq(from=0,to=45,by=1),cex.lab=5)
abline(h=0)
#title("Maternal TDT For merging SH Belgium & Norway data set PAR")
title("Maternal TDT For SH PAR")
abline(h=5,col=4,lty=2,lwd =2)
abline(h=6,col=2,lty=2,lwd =2)
write.table(results3, file = paste(filename,"TEST.txt",sep=""), sep = "\t", quote = FALSE, row.names = FALSE);
###########################################################################
####################### Analysis (9) All ##########################
###########################################################################
filename = "D:\\Results_paper\\SH BE + NO\\";
setwd("D:\\Results_paper\\All\\");
results <- read.table("All_Maternal_X.txt", header=T)
results = results[,c(1,2,3,7,4,5)]
colnames(results) = c("Marker_ID","CHR","BP","P","T","U")
results$BP = results$BP / 1000000
results$logp = -log10(results$P)
results = results[- grep(pattern=paste("^",'ALGA0122477',"$",sep=""),x=as.character(results[,1])),]
xat <- locator()$x
xat <- round(xat)
textvec <- c(as.character(0:18),"X")
x = c("black","antiquewhite4")
mycolors = rep(x,10)
#mhtplot(results[ ,2:4], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mhtplot(results[results$CHR != 0,c(2:4)], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mtext(textvec[1:20], side=1, line=0, at=xat)
axis(2,at=seq(from=0,to=45,by=1),cex.axis=0.85)
abline(h=0)
title("Maternal TDT For CR & SH")
abline(h=5,col=4,lty=2,lwd =2)
abline(h=6,col=2,lty=2,lwd =2)
write.table(results3, file = paste(filename,"TEST.txt",sep=""), sep = "\t", quote = FALSE, row.names = FALSE);
NO_CR = read.table('D:\\Results_paper\\NO CR\\TDT_cases.txt')
NO_SH = read.table('D:\\Results_paper\\NO SH\\TDT_cases.txt')
BE_CR = read.table('D:\\Results_paper\\BE CR\\TDT_2.txt')
BE_SH = read.table('D:\\Results_paper\\BE SH\\TDT_6.txt')
int_SNPs = intersect(intersect(intersect(as.character(NO_CR[,1]),as.character(NO_SH[,1])),as.character(BE_CR[,2])),as.character(BE_SH[,2]) )
NO_CR = NO_CR[NO_CR[,1] %in% int_SNPs,]
NO_SH = NO_SH[NO_SH[,1] %in% int_SNPs,]
BE_CR = BE_CR[BE_CR[,2] %in% int_SNPs,]
BE_SH = BE_SH[BE_SH[,2] %in% int_SNPs,]
rownames(NO_CR) = as.character(NO_CR[,1])
rownames(NO_SH) = as.character(NO_SH[,1])
rownames(BE_CR) = as.character(BE_CR[,2])
rownames(BE_SH) = as.character(BE_SH[,2])
NO_CR = NO_CR[as.character(int_SNPs),]
NO_SH = NO_SH[as.character(int_SNPs),]
BE_CR = BE_CR[as.character(int_SNPs),]
BE_SH = BE_SH[as.character(int_SNPs),]
isTRUE(all.equal(as.character(NO_SH[,1]),as.character(NO_CR[,1]) ));
isTRUE(all.equal(as.character(BE_CR[,2]),as.character(BE_SH[,2])));
temp = cbind(NO_SH[,1:3], NO_SH[,6] + NO_CR[,6] + BE_CR[,14] + BE_SH[,14] )
colnames(temp) = c("Marker_ID","CHR","BP","X2")
temp$P2 = pchisq(q=temp$X2,df=4,lower.tail=F)
results = temp
results$BP = results$BP / 1000000
results$logp = -log10(results$P2)
results = results[- grep(pattern=paste("^",'ALGA0122477',"$",sep=""),x=as.character(results[,1])),]
xat <- locator()$x
xat <- round(xat)
textvec <- c(as.character(0:18),"X")
x = c("black","antiquewhite4")
mycolors = rep(x,10)
#mhtplot(results[ ,2:4], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mhtplot(results[results$CHR == 19,c(2:3,5)], control = mht.control(usepos = TRUE,cex=1.2,colors=mycolors,labels=rep("",20),yline=2,xline=2),pch = 16)
mtext(textvec[1:20], side=1, line=0, at=xat)
axis(2,at=seq(from=0,to=70,by=1),cex.axis=0.85)
abline(h=0)
title("TDT For CR & SH CHR X")
abline(h=5,col=4,lty=2,lwd =2)
abline(h=6,col=2,lty=2,lwd =2)
write.table(results3, file = paste(filename,"TEST.txt",sep=""), sep = "\t", quote = FALSE, row.names = FALSE);
-log10(0.05/31833)
-log10(0.37/31833)
-log10(0.05/41088)
-log10(0.37/41088)
|
#' 01310 Gas Bubbles (Severity)
#'
#' A table containing the USGS Gas Bubbles (Severity) parameter codes.
#'
#' @format A data frame with 5 rows and 3 variables:
#' \describe{
#' \item{Parameter Code}{USGS Parameter Code}
#' \item{Fixed Value}{Fixed Value}
#' \item{Fixed Text}{Fixed Text}
#' }
#'
#'
#' @references
#' This data is from Table 26. Parameter codes with fixed values (USGS Water Quality Samples for USA: Sample Data). See \url{https://help.waterdata.usgs.gov/codes-and-parameters/}.
#'
#'
#'
#'
"pmcode_01310"
#> [1] "pmcode_01310"
|
/R/pmcode_01310.R
|
permissive
|
cran/ie2miscdata
|
R
| false
| false
| 546
|
r
|
#' 01310 Gas Bubbles (Severity)
#'
#' A table containing the USGS Gas Bubbles (Severity) parameter codes.
#'
#' @format A data frame with 5 rows and 3 variables:
#' \describe{
#' \item{Parameter Code}{USGS Parameter Code}
#' \item{Fixed Value}{Fixed Value}
#' \item{Fixed Text}{Fixed Text}
#' }
#'
#'
#' @references
#' This data is from Table 26. Parameter codes with fixed values (USGS Water Quality Samples for USA: Sample Data). See \url{https://help.waterdata.usgs.gov/codes-and-parameters/}.
#'
#'
#'
#'
"pmcode_01310"
#> [1] "pmcode_01310"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DicomRDS.R
\docType{data}
\name{DicomRDS}
\alias{DicomRDS}
\title{DicomRDS Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
DicomRDS$new(data)
}
\arguments{
\item{data}{Data frame imported from DICOM RDS file}
}
\description{
This class is a class that imports various metadata by reading an RDS file containing DICOM information.
There are basically functions that import data related to Radiology CDM.
}
\examples{
############################# Example Code: Read DICOM RDS File ###################################
rdsFile <- "/home/ohdsi/DICOM-header/header.rds"
data <- readRDS(file = rdsFile)
# If single data (only one metadata in dicom file )
dcmRDS <- DicomRDS$new(data)
# Create Occurrence ID for radiology image
roID <- dcmRDS$createOccurrenceID()
# Get PatientID
patientid <- dcmRDS$getPatientID()
# This radiology contast or non-contrast
isContrast <- dcmRDS$isPost4BrainCT()
# If Multi data
for(i in 1:length(data)) {
dcmRDS <- DicomRDS$new(data[[i]])
roID <- dcmRDS$createOccurrenceID()
patientid <- dcmRDS$getPatientID()
isContrast <- dcmRDS$isPost4BrainCT()
df <- data.frame(roID, patientid, isContrast)
}
View(df)
######################################## Example Code: END ##########################################
}
\seealso{
https://github.com/OHDSI/Radiology-CDM/wiki
}
\author{
Neon K.I.D
}
\keyword{datasets}
|
/man/DicomRDS.Rd
|
no_license
|
Ji-Eon/Radiology-CDM
|
R
| false
| true
| 1,463
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DicomRDS.R
\docType{data}
\name{DicomRDS}
\alias{DicomRDS}
\title{DicomRDS Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
DicomRDS$new(data)
}
\arguments{
\item{data}{Data frame imported from DICOM RDS file}
}
\description{
This class is a class that imports various metadata by reading an RDS file containing DICOM information.
There are basically functions that import data related to Radiology CDM.
}
\examples{
############################# Example Code: Read DICOM RDS File ###################################
rdsFile <- "/home/ohdsi/DICOM-header/header.rds"
data <- readRDS(file = rdsFile)
# If single data (only one metadata in dicom file )
dcmRDS <- DicomRDS$new(data)
# Create Occurrence ID for radiology image
roID <- dcmRDS$createOccurrenceID()
# Get PatientID
patientid <- dcmRDS$getPatientID()
# This radiology contast or non-contrast
isContrast <- dcmRDS$isPost4BrainCT()
# If Multi data
for(i in 1:length(data)) {
dcmRDS <- DicomRDS$new(data[[i]])
roID <- dcmRDS$createOccurrenceID()
patientid <- dcmRDS$getPatientID()
isContrast <- dcmRDS$isPost4BrainCT()
df <- data.frame(roID, patientid, isContrast)
}
View(df)
######################################## Example Code: END ##########################################
}
\seealso{
https://github.com/OHDSI/Radiology-CDM/wiki
}
\author{
Neon K.I.D
}
\keyword{datasets}
|
#!/usr/bin/env Rscript
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set the precision to 16 digits:
options( digits = 16 );
#' Run benchmarks.
#'
#' @examples
#' main();
main <- function() {
# Define benchmark parameters:
name <- "random-gumbel";
iterations <- 1000000L;
repeats <- 3;
#' Print the TAP version.
#'
#' @examples
#' print_version();
print_version <- function() {
cat( "TAP version 13\n" );
}
#' Print the TAP summary.
#'
#' @param total Total number of tests.
#' @param passing Total number of passing tests.
#'
#' @examples
#' print_summary( 3, 3 );
print_summary <- function( total, passing ) {
cat( "#\n" );
cat( paste0( "1..", total, "\n" ) ); # TAP plan
cat( paste0( "# total ", total, "\n" ) );
cat( paste0( "# pass ", passing, "\n" ) );
cat( "#\n" );
cat( "# ok\n" );
}
#' Print benchmark results.
#'
#' @param iterations Number of iterations.
#' @param elapsed Elapsed time in seconds.
#'
#' @examples
#' print_results( 10000L, 0.131009101868 );
print_results <- function( iterations, elapsed ) {
rate <- iterations / elapsed;
cat( " ---\n" );
cat( paste0( " iterations: ", iterations, "\n" ) );
cat( paste0( " elapsed: ", elapsed, "\n" ) );
cat( paste0( " rate: ", rate, "\n" ) );
cat( " ...\n" );
}
#' Run a benchmark.
#'
#' ## Notes
#'
#' * We compute and return a total "elapsed" time, rather than the minimum
#' evaluation time, to match benchmark results in other languages (e.g.,
#' Python).
#'
#'
#' @param iterations Number of Iterations.
#' @return Elapsed time in seconds.
#'
#' @examples
#' elapsed <- benchmark( 10000L );
benchmark <- function( iterations ) {
# Run the benchmarks:
results <- microbenchmark::microbenchmark( evd::rgumbel( 1, runif(1,min=-50.0,max=50.0), runif(1,min=0.0,max=20.0)+.Machine$double.eps ), times = iterations );
# Sum all the raw timing results to get a total "elapsed" time:
elapsed <- sum( results$time );
# Convert the elapsed time from nanoseconds to seconds:
elapsed <- elapsed / 1.0e9;
return( elapsed );
}
print_version();
for ( i in 1:repeats ) {
cat( paste0( "# r::", name, "\n" ) );
elapsed <- benchmark( iterations );
print_results( iterations, elapsed );
cat( paste0( "ok ", i, " benchmark finished", "\n" ) );
}
print_summary( repeats, repeats );
}
main();
|
/lib/node_modules/@stdlib/random/base/gumbel/benchmark/r/benchmark.R
|
permissive
|
doc22940/stdlib
|
R
| false
| false
| 2,921
|
r
|
#!/usr/bin/env Rscript
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set the precision to 16 digits:
options( digits = 16 );
#' Run benchmarks.
#'
#' @examples
#' main();
main <- function() {
# Define benchmark parameters:
name <- "random-gumbel";
iterations <- 1000000L;
repeats <- 3;
#' Print the TAP version.
#'
#' @examples
#' print_version();
print_version <- function() {
cat( "TAP version 13\n" );
}
#' Print the TAP summary.
#'
#' @param total Total number of tests.
#' @param passing Total number of passing tests.
#'
#' @examples
#' print_summary( 3, 3 );
print_summary <- function( total, passing ) {
cat( "#\n" );
cat( paste0( "1..", total, "\n" ) ); # TAP plan
cat( paste0( "# total ", total, "\n" ) );
cat( paste0( "# pass ", passing, "\n" ) );
cat( "#\n" );
cat( "# ok\n" );
}
#' Print benchmark results.
#'
#' @param iterations Number of iterations.
#' @param elapsed Elapsed time in seconds.
#'
#' @examples
#' print_results( 10000L, 0.131009101868 );
print_results <- function( iterations, elapsed ) {
rate <- iterations / elapsed;
cat( " ---\n" );
cat( paste0( " iterations: ", iterations, "\n" ) );
cat( paste0( " elapsed: ", elapsed, "\n" ) );
cat( paste0( " rate: ", rate, "\n" ) );
cat( " ...\n" );
}
#' Run a benchmark.
#'
#' ## Notes
#'
#' * We compute and return a total "elapsed" time, rather than the minimum
#' evaluation time, to match benchmark results in other languages (e.g.,
#' Python).
#'
#'
#' @param iterations Number of Iterations.
#' @return Elapsed time in seconds.
#'
#' @examples
#' elapsed <- benchmark( 10000L );
benchmark <- function( iterations ) {
# Run the benchmarks:
results <- microbenchmark::microbenchmark( evd::rgumbel( 1, runif(1,min=-50.0,max=50.0), runif(1,min=0.0,max=20.0)+.Machine$double.eps ), times = iterations );
# Sum all the raw timing results to get a total "elapsed" time:
elapsed <- sum( results$time );
# Convert the elapsed time from nanoseconds to seconds:
elapsed <- elapsed / 1.0e9;
return( elapsed );
}
print_version();
for ( i in 1:repeats ) {
cat( paste0( "# r::", name, "\n" ) );
elapsed <- benchmark( iterations );
print_results( iterations, elapsed );
cat( paste0( "ok ", i, " benchmark finished", "\n" ) );
}
print_summary( repeats, repeats );
}
main();
|
## Put comments here that give an overall description of what your
## functions do
## Create a list of functions tied to a specific matrix for use in future functions.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL #creates inverse object, sets it to NULL
set <- function(y){
x <<- y #resets value of the matrix object
m <<- NULL #if set fxn is used, m is reset to NULL
}
get <- function() x #returns x object
setinverse <- function(inverse) m <<- inverse #m is defined in second function; it is the inverse of the matrix, or NULL if it has not been cached or is reset
getinverse <- function() m #returns the value of m
list(set = set, #creates a list with each fxn as a named object
get = get, setinverse = setinverse,
getinverse=getinverse)
}
## Cache the result of the inverse of a matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data,...)
x$setinverse(m)
m
}
#test it out...
B = matrix(
c(2, 4, 3, 1),
nrow=2,
ncol=2)
solve(B)
test2 <- makeCacheMatrix(matrix(c(5,5,5,5), nrow=2, ncol=2))
test2$get()
test2$getinverse()
test2$set(B)
test2$get()
cacheSolve(test2)
test2$getinverse()
|
/cachematrix.R
|
no_license
|
novajess/ProgrammingAssignment2
|
R
| false
| false
| 1,486
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Create a list of functions tied to a specific matrix for use in future functions.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL #creates inverse object, sets it to NULL
set <- function(y){
x <<- y #resets value of the matrix object
m <<- NULL #if set fxn is used, m is reset to NULL
}
get <- function() x #returns x object
setinverse <- function(inverse) m <<- inverse #m is defined in second function; it is the inverse of the matrix, or NULL if it has not been cached or is reset
getinverse <- function() m #returns the value of m
list(set = set, #creates a list with each fxn as a named object
get = get, setinverse = setinverse,
getinverse=getinverse)
}
## Cache the result of the inverse of a matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data,...)
x$setinverse(m)
m
}
#test it out...
B = matrix(
c(2, 4, 3, 1),
nrow=2,
ncol=2)
solve(B)
test2 <- makeCacheMatrix(matrix(c(5,5,5,5), nrow=2, ncol=2))
test2$get()
test2$getinverse()
test2$set(B)
test2$get()
cacheSolve(test2)
test2$getinverse()
|
###############################################################################
# Package documentation
###############################################################################
#' @title ggRandomForests: Visually Exploring Random Forests
#'
#' @description \code{ggRandomForests} is a utility package for \code{randomForestSRC}
#' (Iswaran et.al. 2014, 2008, 2007) for survival, regression and
#' classification forests and uses the \code{ggplot2} (Wickham 2009) package for plotting results.
#' \code{ggRandomForests} is structured to extract data objects from the random forest
#' and provides S3 functions for printing and plotting these objects.
#'
#' The \code{randomForestSRC} package provides a unified treatment of Breiman's (2001) random
#' forests for a variety of data settings. Regression and
#' classification forests are grown when the response is numeric or categorical
#' (factor) while survival and competing risk forests (Ishwaran et al. 2008, 2012)
#' are grown for right-censored survival data.
#'
#' Many of the figures created by the \code{ggRandomForests} package are also available
#' directly from within the \code{randomForestSRC} package. However, \code{ggRandomForests} offers the
#' following advantages:
#' \itemize{
#' \item Separation of data and figures: \code{ggRandomForest} contains functions that
#' operate on either the \code{\link[randomForestSRC]{rfsrc}} forest object directly, or on
#' the output from \code{randomForestSRC} post processing functions (i.e.
#' \code{plot.variable},
#' \code{var.select},
#' \code{find.interaction}) to
#' generate intermediate \code{ggRandomForests} data objects. S3 functions are provide to
#' further process these objects and plot results using the \code{ggplot2} graphics package.
#' Alternatively, users can use these data objects for additional custom plotting or
#' analysis operations.
#'
#' \item Each data object/figure is a single, self contained object. This allows simple
#' modification and manipulation of the data or \code{ggplot2} objects to meet users specific
#' needs and requirements.
#'
#' \item The use of \code{ggplot2} for plotting. We chose to use the \code{ggplot2} package
#' for our figures to allow users flexibility in modifying the figures to their liking. Each S3
#' plot function returns either a single \code{ggplot2} object, or a \code{list} of
#' \code{ggplot2} objects, allowing users to use additional \code{ggplot2} functions or themes
#' to modify and customise the figures to their liking.
#' }
#'
#' The \code{ggRandomForests} package contains the following data functions:
#' \itemize{
#' \item \code{\link{gg_rfsrc}}: randomForest[SRC] predictions.
#' \item \code{\link{gg_error}}: randomForest[SRC] convergence rate based on the OOB error rate.
#' \item \code{\link{gg_roc}}: ROC curves for randomForest classification models.
#' \item \code{\link{gg_vimp}}: Variable Importance ranking for variable selection.
#' \item \code{\link{gg_minimal_depth}}: Minimal Depth ranking for variable selection
#' (Ishwaran et.al. 2010).
#' \item \code{\link{gg_minimal_vimp}}: Comparing Minimal Depth and VIMP rankings for variable
#' selection.
#' \item \code{\link{gg_interaction}}: Minimal Depth interaction detection (Ishwaran et.al. 2010)
#' \item \code{\link{gg_variable}}: Marginal variable dependence.
#' \item \code{\link{gg_partial}}: Partial (risk adjusted) variable dependence.
#' \item \code{\link{gg_partial_coplot}}: Partial variable conditional dependence
#' (computationally expensive).
#' \item \code{\link{gg_survival}}: Kaplan-Meier/Nelson-Aalon hazard analysis.
#' }
#'
#' Each of these data functions has an associated S3 plot function that returns \code{ggplot2} objects, either
#' individually or as a list, which can be further customised using standard \code{ggplot2} commands.
#'
#' @references
#' Breiman, L. (2001). Random forests, Machine Learning, 45:5-32.
#'
#' Ishwaran H. and Kogalur U.B. (2014). Random Forests for Survival,
#' Regression and Classification (RF-SRC), R package version 1.5.5.12.
#'
#' Ishwaran H. and Kogalur U.B. (2007). Random survival forests for R. R News
#' 7(2), 25--31.
#'
#' Ishwaran H., Kogalur U.B., Blackstone E.H. and Lauer M.S. (2008). Random
#' survival forests. Ann. Appl. Statist. 2(3), 841--860.
#'
#' Ishwaran, H., U. B. Kogalur, E. Z. Gorodeski, A. J. Minn, and M. S. Lauer (2010).
#' High-dimensional variable selection for survival data. J. Amer. Statist. Assoc.
#' 105, 205-217.
#'
#' Ishwaran, H. (2007). Variable importance in binary regression trees and forests.
#' Electronic J. Statist., 1, 519-537.
#'
#' Wickham, H. ggplot2: elegant graphics for data analysis. Springer New York, 2009.
#'
#' @docType package
#' @name ggRandomForests-package
#'
################
NULL
|
/Methodology/Survival Model Research/ggRandomForests/ggRandomForests-master/R/help.R
|
no_license
|
ryerex/Research_and_Methods
|
R
| false
| false
| 4,812
|
r
|
###############################################################################
# Package documentation
###############################################################################
#' @title ggRandomForests: Visually Exploring Random Forests
#'
#' @description \code{ggRandomForests} is a utility package for \code{randomForestSRC}
#' (Iswaran et.al. 2014, 2008, 2007) for survival, regression and
#' classification forests and uses the \code{ggplot2} (Wickham 2009) package for plotting results.
#' \code{ggRandomForests} is structured to extract data objects from the random forest
#' and provides S3 functions for printing and plotting these objects.
#'
#' The \code{randomForestSRC} package provides a unified treatment of Breiman's (2001) random
#' forests for a variety of data settings. Regression and
#' classification forests are grown when the response is numeric or categorical
#' (factor) while survival and competing risk forests (Ishwaran et al. 2008, 2012)
#' are grown for right-censored survival data.
#'
#' Many of the figures created by the \code{ggRandomForests} package are also available
#' directly from within the \code{randomForestSRC} package. However, \code{ggRandomForests} offers the
#' following advantages:
#' \itemize{
#' \item Separation of data and figures: \code{ggRandomForest} contains functions that
#' operate on either the \code{\link[randomForestSRC]{rfsrc}} forest object directly, or on
#' the output from \code{randomForestSRC} post processing functions (i.e.
#' \code{plot.variable},
#' \code{var.select},
#' \code{find.interaction}) to
#' generate intermediate \code{ggRandomForests} data objects. S3 functions are provide to
#' further process these objects and plot results using the \code{ggplot2} graphics package.
#' Alternatively, users can use these data objects for additional custom plotting or
#' analysis operations.
#'
#' \item Each data object/figure is a single, self contained object. This allows simple
#' modification and manipulation of the data or \code{ggplot2} objects to meet users specific
#' needs and requirements.
#'
#' \item The use of \code{ggplot2} for plotting. We chose to use the \code{ggplot2} package
#' for our figures to allow users flexibility in modifying the figures to their liking. Each S3
#' plot function returns either a single \code{ggplot2} object, or a \code{list} of
#' \code{ggplot2} objects, allowing users to use additional \code{ggplot2} functions or themes
#' to modify and customise the figures to their liking.
#' }
#'
#' The \code{ggRandomForests} package contains the following data functions:
#' \itemize{
#' \item \code{\link{gg_rfsrc}}: randomForest[SRC] predictions.
#' \item \code{\link{gg_error}}: randomForest[SRC] convergence rate based on the OOB error rate.
#' \item \code{\link{gg_roc}}: ROC curves for randomForest classification models.
#' \item \code{\link{gg_vimp}}: Variable Importance ranking for variable selection.
#' \item \code{\link{gg_minimal_depth}}: Minimal Depth ranking for variable selection
#' (Ishwaran et.al. 2010).
#' \item \code{\link{gg_minimal_vimp}}: Comparing Minimal Depth and VIMP rankings for variable
#' selection.
#' \item \code{\link{gg_interaction}}: Minimal Depth interaction detection (Ishwaran et.al. 2010)
#' \item \code{\link{gg_variable}}: Marginal variable dependence.
#' \item \code{\link{gg_partial}}: Partial (risk adjusted) variable dependence.
#' \item \code{\link{gg_partial_coplot}}: Partial variable conditional dependence
#' (computationally expensive).
#' \item \code{\link{gg_survival}}: Kaplan-Meier/Nelson-Aalon hazard analysis.
#' }
#'
#' Each of these data functions has an associated S3 plot function that returns \code{ggplot2} objects, either
#' individually or as a list, which can be further customised using standard \code{ggplot2} commands.
#'
#' @references
#' Breiman, L. (2001). Random forests, Machine Learning, 45:5-32.
#'
#' Ishwaran H. and Kogalur U.B. (2014). Random Forests for Survival,
#' Regression and Classification (RF-SRC), R package version 1.5.5.12.
#'
#' Ishwaran H. and Kogalur U.B. (2007). Random survival forests for R. R News
#' 7(2), 25--31.
#'
#' Ishwaran H., Kogalur U.B., Blackstone E.H. and Lauer M.S. (2008). Random
#' survival forests. Ann. Appl. Statist. 2(3), 841--860.
#'
#' Ishwaran, H., U. B. Kogalur, E. Z. Gorodeski, A. J. Minn, and M. S. Lauer (2010).
#' High-dimensional variable selection for survival data. J. Amer. Statist. Assoc.
#' 105, 205-217.
#'
#' Ishwaran, H. (2007). Variable importance in binary regression trees and forests.
#' Electronic J. Statist., 1, 519-537.
#'
#' Wickham, H. ggplot2: elegant graphics for data analysis. Springer New York, 2009.
#'
#' @docType package
#' @name ggRandomForests-package
#'
################
NULL
|
# ๋ฐ์ดํฐ ๋ถ์๊ฐ _ james \
# \
# ์คํฌ๋ฆฝํธ ์คํ(Run a script) \
## : Windows : 'Ctrl + Enter' \
## : MAC : 'Command + Enter'\
#---------------------------------
#1. ์๊ด๊ด๊ณ๋ถ์(Correlation) :์ฐ์ํ ๋ฐ์ดํฐ๋ก ์งํํ๋ค. ๋ฒ์ฃผํ์ผ๋ก๋ X
height<-c(164,175,166,185)
weight<-c(62,70,64,86)
cor(height,weight)
round(cor(height,weight),3)
cor.test(height,weight) #ํผ์ด์จ ์๊ด๊ณ์๋ถ์. x,y๋ณ์ ๊ฐ ๊ฐ๊ฐ ์
๋ ฅํ์. ๋ฐ์ดํฐํ๋ ์ํํ ๋ถ๊ฐ.
# install.packages("moonBook")
library(moonBook)
data(acs)
#install.packages("psych")
library(psych)
str(acs)
acs2<-acs[,c(1,6:9)]
cor(acs2)
#na๊ฐ ์กด์ฌํ ์ na ์ ์ธํ ๊ณ์ฐ
cor(acs2,use="na.or.complete")
#์ฐ์ ๋ํ๋ ฌ
pairs.panels(acs2)
#install.packages("PerformanceAnalytics");
library(PerformanceAnalytics)
#์ฐ์ ๋ํ๋ ฌ
chart.Correlation(acs2, histogram=TRUE, pch=19)
#cor.test: x,y๋๊ฐ ๋ณ์
#ํน์ฝฉ data์ ์ถ๊ฐ -> ๋ฐ์ดํฐ ํ๋์ ํฐ ์ํฅ
dat<-data.frame(
a=c(15,20,25,27,31,25,23,23,42,12,34,23,40),
b=c(50,55,52,52,56,54,62,56,70,46,43,50,54)
)
plot(dat$a,dat$b)
abline(lm(dat$b~dat$a))
cor(dat$a,dat$b)
#outlier ์ถ๊ฐ #์๊ด๊ณ์์์๋ outlier์ํฅ์ ๋ง์ด ๋ฐ๊ธฐ ๋๋ฌธ์ ์ฒ๋ฆฌ์ ์ ์ํด์ผํจ.
dat[14,]<-c(200,230)
plot(dat$a,dat$b)
abline(lm(dat$b~dat$a))
cor(dat$a,dat$b)
#heatmap expression
# install.packages("corrplot")
library(corrplot)
corrplot(cor(acs2,use="na.or.complete"))
corrplot(cor(acs2,use="na.or.complete"),method="square")
corrplot(cor(acs2,use="na.or.complete"),method="ellipse")
corrplot(cor(acs2,use="na.or.complete"),method="number")
corrplot(cor(acs2,use="na.or.complete"),method="shade")
corrplot(cor(acs2,use="na.or.complete"),method="color")
corrplot(cor(acs2,use="na.or.complete"),method="pie")
#cor์ ๋น๋ชจ์์ ์ธ ํํ๋ค
#1. spearman
#2. kendall's tau
cor(height,weight)
cor(height,weight,method="spearman")
cor(height,weight,method="kendall")
?cor
############### ์ฐ์ต๋ฌธ์ ###############
data(iris)
#1. iris์์ ์ฐ์ํ ๋ฐ์ดํฐ๋ฅผ ๊ฐ๊ณ ์๊ด๊ด๊ณ๋ฅผ ๊ตฌํ๊ณ Sepal.Length์ ๊ฐ์ฅ ์๊ด์๋ ๋ณ์๋ ๋ฌด์์ธ๊ฐ?
#(2๊ฐ์ง ์ด์์ ์๊ฐํ๋ฅผ ๊ทธ๋ ค๋ณด์์ค)
pairs.panels(iris[,1:4])
#####
data(mtcars)
head(mtcars)
#mpg์์ qesc๊น์ง์ ๋ณ์๋ฅผ ๊ฐ๊ณ ์๊ด๊ด๊ณ๋ฅผ ๊ตฌํ์์ค
##################################
#2. 2 ์ง๋จ์๋ํ ํ๊ท ๋น๊ต t-test
t_data<-data.frame(
group=c(1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2),
score=c(100,100,80,50,40,90,20,50,50,70,30,40,30,70,30,40,30,60,30,60),
age=c(80,20,30,70,90,20,30,60,50,50,20,30,20,20,25,10,13,12,11,10))
ggplot(t_data,aes(x=factor(group),y=score,fill=factor(group))) + geom_boxplot()
#์ ๊ท์ฑ๊ฒ์
#์ ๊ท์ฑ ๊ฒ์ - shapiro
shapiro.test(t_data$score)
#๋ฑ๋ถ์ฐ์ฑ ๊ฒ์
t_data_1<-t_data[t_data$group==1,]
t_data_2<-t_data[t_data$group==2,]
var.test(t_data_1$score,t_data_2$score)
#t_test๋ฐฉ๋ฒ 2๊ฐ์ง ์กด์ฌ
#1๋ฒ t.test๋ฐฉ๋ฒ
t.test(t_data_1$score,t_data_2$score,var.equal=T) #var.equal=T ๊ฐ ๊ธฐ๋ณธ๊ฐ. F์ผ ๊ฒฝ์ฐ์๋ ์จ์ฃผ์ด์ผ ํจ.
#๊ฒฐ๊ณผ: ์ ์ํ๋ฅ ์ด 0.05๋ณด๋ค ์์์ ๋๋ฆฝ๊ฐ์ค ์ฑํ(score ํ๊ท ์ ๊ฐ์ ์์ค์ด ์๋๋ค. )
#2๋ฒ t.test๋ฐฉ๋ฒ
t.test(score~group,data=t_data,var.equal=T)
#๋ฑ๋ถ์ฐ์ด ์๋๊ฒฝ์ฐ
var.test(t_data_1$age,t_data_2$age)
t.test(t_data_1$age,t_data_2$age,var.equal=F)
#๋์ T-test์ ์ํ(์ /ํ๋น๊ต) - paried=T๋ฅผ ๋ถ์ฌ์ค
before_op = c(137,119,117,122,132,110,114,117)
after_op = c(126,111,117,116,135,110,113,112)
t.test(before_op,after_op,paired=T)
#๊ฒฐ๊ณผ: ์ ์ํ๋ฅ ์ด 0.05๋ณด๋ค ํฌ๋ฏ๋ก ๊ท๋ฌด๊ฐ์ค ์ฑํ (์ฐจ์ด๊ฐ ์๋ค. ๊ฐ์ 3.5 ์ฐจ์ด ๋์ง๋ง, ํต๊ณ์ฉ์ผ๋ก๋ ์ฐจ์ด ์๋ ์์ค)
mid = c(16, 20, 21, 22, 23, 22, 27, 25, 27, 28)
final = c(19, 20, 24, 24, 25, 25, 26, 26, 28, 32)
t.test(mid,final, paired=TRUE)
################## T๊ฒ์ ์ฐ์ตํด๋ณด๊ธฐ ###################
# 1
a = c(175, 168, 168, 190, 156, 181, 182, 175, 174, 179)
b = c(185, 169, 173, 173, 188, 186, 175, 174, 179, 180)
### ๋ค์ ๋ฐ์ดํฐ๋ฅผ ๊ฐ๊ณ T๊ฒ์ ์ ํ์์ค (์ ๊ท์ฑ ์๋ต)
var.test(a,b)
t.test(a,b, var.equal=T)
#์ ์ํ๋ฅ ์ด 0.356์ผ๋ก ์ ์์์ค 0.05๋ณด๋ค ํฌ๋ฏ๋ก ์ฑํ. ๋ ์ง๋จ์ ์ฐจ์ด๋ ์๋ค.
#(๊ท๋ฌด๊ฐ์ค: a์ง๋จ๊ณผ b์ง๋จ์ ์ฐจ์ด๊ฐ ์๋ค.)
# 2
data(mtcars)
# am ๋ณ์์ ๋ฐ๋ผ mpg๊ฐ ์ฐจ์ด๊ฐ ์๋์ง ํ์ธํ์์ค
######################################################
#3.3๊ฐ์ด์์ ํ๊ท ๋น๊ต ์ ๋ถ์ฐ๋ถ์ - Anova(Analysis of Variance)
#install.packages("laercio")
anova_data<-data.frame(
group=c(1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3),
score=c(50.5, 52.1, 51.9, 52.4, 50.6, 51.4, 51.2, 52.2, 51.5, 50.8,47.5, 47.7, 46.6, 47.1, 47.2, 47.8, 45.2, 47.4, 45.0, 47.9,46.0, 47.1, 45.6, 47.1, 47.2, 46.4, 45.9, 47.1, 44.9, 46.2))
ggplot(anova_data,aes(x=factor(group),y=score,fill=factor(group))) + geom_boxplot()
tapply(anova_data$score,anova_data$group,mean)
tapply(anova_data$score,anova_data$group,max)
#๋ฑ๋ถ์ฐ์ฑ test
bartlett.test(score~as.factor(group),data=anova_data)
#oneway.test
oneway.test(score~group,data=anova_data,var.equal = T)
?aov
a1<-aov(score~group,data=anova_data)
summary(aov(score~group,data=anova_data))
#์ฌํ๋ถ์
library(laercio)
LDuncan(a1, "group")
#group์ ํด๋นํ๋ ๋ถ๋ถ์ด ๋ฌธ์ํ ์ด์ด์ผํจ
TukeyHSD(aov(score~as.character(group),data=anova_data))
plot(TukeyHSD(aov(score~as.character(group),data=anova_data)))
######################
#### ๋ฑ๋ถ์ฐ์ด ์๋๊ฒฝ์ฐ
anova_data2<-data.frame(
group=c(1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3),
score=c(70, 30, 20.3, 85.3, 50.6, 51.4, 51.2, 52.2, 51.5, 50.8,47.5, 47.7, 46.6, 47.1, 47.2, 47.8, 45.2, 47.4, 45.0, 47.9,46.0, 47.1, 45.6, 47.1, 47.2, 46.4, 45.9, 47.1, 44.9, 46.2))
#๋ฑ๋ถ์ฐ์ฑ test
bartlett.test(score~as.factor(group),data=anova_data2)
#oneway.test
oneway.test(score~group,data=anova_data2,var.equal = F)
a2<-aov(score~group,data=anova_data2)
summary(aov(score~group,data=anova_data2))
#์ฌํ๋ถ์
library(laercio)
LDuncan(a2, "group")
#group์ ํด๋นํ๋ ๋ถ๋ถ์ด ๋ฌธ์ํ ์ด์ด์ผํจ
TukeyHSD(aov(score~as.character(group),data=anova_data2))
plot(TukeyHSD(aov(score~as.character(group),data=anova_data2)))
################## F๊ฒ์ ์ฐ์ตํด๋ณด๊ธฐ ###################
data(iris)
#1. iris์์ Species๋ง๋ค Sepal.Width์ ์ฐจ์ด๊ฐ ์๋์ง ํ์ธํ์์ค
# ์ฌํ ๊ฒ์ ๊ณผ ํด์์ ์ ์ผ์์ค
#2 mtcars๋ฐ์ดํฐ์์ gear๋ฐ๋ผ mpg์ ์ฐจ์ด๊ฐ ์๋์ง ํ์ธํ์์ค
# ์ฌํ ๊ฒ์ ๊ณผ ํด์์ ์ ์ผ์์ค
#######################################################
#๋ฌธ์ํ ๋ฐ์ดํฐ๋ถ์
data(acs)
head(acs)
# ์ฑ๋ณ๊ณผ ๋น๋ง์ ์ฐ๊ด์ด ์์๊น?
table(acs$sex,acs$obesity)
acs %>%
dplyr::count(sex,obesity) %>%
ggplot(aes(x=sex,y=n,fill=obesity)) + geom_bar(stat="identity",position = "dodge")
chisq.test(acs$sex,acs$obesity,correct = F)
chisq.test(table(acs$sex,acs$obesity))
# correct?
# ๋น ์ฐ์์ ์ดํญ๋ถํฌ์์ ํ๋ฅ ์ด๋ ๋น์จ์ ์๊ธฐ ์ํ์ฌ ์ฐ์์ ๋ถํฌ์ธ
# ์นด์ด์ ๊ณฑ ๋ถํฌ๋ฅผ ์ด์ฉํ ๋๋ ์ฐ์์ฑ์ ๊ฐ์ง๋๋ก ๋น์ฐ์์ฑ์ ๊ต์ ํด์ผํ ํ์ํ ์์ ๋ ์ฌ์ฉํ๋ ๋ฐฉ๋ฒ
# ๋ณดํต 2X2 ํ๋ ฌ์์ ์์ฃผ ์ฌ์ฉํจ
install.packages("gmodels")
library(gmodels)
CrossTable(acs$sex,acs$obesity,chisq=T,prop.t=F)
CrossTable(table(acs$sex,acs$obesity))
0.089 + 0.175 + 0.045 + 0.088
# ์ผ๋ฐํ์
# ์นด์ด ์ ๊ณฑ ( ๊ธฐ๋์น ๋น์จ )
# ํ์ ๊ธฐ์ค์ผ๋ก ๋น์จ ๊ฐ ( ๊ฐ๋ก๋ก ์ฝ๋๋ค. )
# ์ปฌ๋ผ์ ๊ธฐ์ค์ผ๋ก ๋น์จ ๊ฐ ( ์ธ๋ก๋ก ์ฝ๋๋ค. )
# ์ ์ฒด๋ฅผ ๊ธฐ์ค์ผ๋ก ๋น์จ ๊ฐ
# ์ฑ๋ณ๊ณผ ๋น๋ง์ ์ฐ๊ด์ด ์์๊น?
table(acs$sex,acs$smoking)
acs %>%
dplyr::count(sex,smoking) %>%
ggplot(aes(x=sex,y=n,fill=smoking)) + geom_bar(stat="identity",position = "dodge")
chisq.test(acs$sex,acs$smoking,correct = F)
chisq.test(table(acs$sex,acs$smoking),correct = F)
#์๋ฃ ์์ฑ
dat <- matrix(c(20,24,15,5),ncol=2)
row.names(dat) <- c("ํก์ฐ","๋นํก์ฐ")
colnames(dat)<- c("์ ์","๋น์ ์")
dat
xtab <- matrix(c(384, 536, 335,951, 869, 438),nrow=2)
dimnames(xtab) <- list(
stone = c("yes", "no"),
age = c("30-39", "40-49", "50-59")
)
colSums(xtab)
prop.trend.test(xtab[1,],colSums(xtab))
mosaicplot(t(xtab),col=c("deepskyblue", "brown2"))
# ๋์ด ๋น์จ์ด ๋์ผํ์ง ์๋ค
################## ์นด์ด์ ๊ณฑ ์ฐ์ตํด๋ณด๊ธฐ ###################
# 1
data("survey")
# survey ๋ฐํฐ์ด์์ Sex๋ณ์์ Smoke๊ฐ ์ฐ๊ด์ด ์๋์ง ๊ฒ์ ํ์ฌ๋ผ
# ์๊ฐํ ํฌํจ
# 2
delivery = read.csv('SKT.csv', fileEncoding='UTF-8')
head(delivery)
# ์์ผ๋ณ ์
์ข
์ ์ฐจ์ด๊ฐ ์๋์ง ๊ฒ์ ํ์ฌ๋ผ
#######################################################
|
/๋๋
ธ_8day.R
|
no_license
|
julesdata/R_basic
|
R
| false
| false
| 8,821
|
r
|
# ๋ฐ์ดํฐ ๋ถ์๊ฐ _ james \
# \
# ์คํฌ๋ฆฝํธ ์คํ(Run a script) \
## : Windows : 'Ctrl + Enter' \
## : MAC : 'Command + Enter'\
#---------------------------------
#1. ์๊ด๊ด๊ณ๋ถ์(Correlation) :์ฐ์ํ ๋ฐ์ดํฐ๋ก ์งํํ๋ค. ๋ฒ์ฃผํ์ผ๋ก๋ X
height<-c(164,175,166,185)
weight<-c(62,70,64,86)
cor(height,weight)
round(cor(height,weight),3)
cor.test(height,weight) #ํผ์ด์จ ์๊ด๊ณ์๋ถ์. x,y๋ณ์ ๊ฐ ๊ฐ๊ฐ ์
๋ ฅํ์. ๋ฐ์ดํฐํ๋ ์ํํ ๋ถ๊ฐ.
# install.packages("moonBook")
library(moonBook)
data(acs)
#install.packages("psych")
library(psych)
str(acs)
acs2<-acs[,c(1,6:9)]
cor(acs2)
#na๊ฐ ์กด์ฌํ ์ na ์ ์ธํ ๊ณ์ฐ
cor(acs2,use="na.or.complete")
#์ฐ์ ๋ํ๋ ฌ
pairs.panels(acs2)
#install.packages("PerformanceAnalytics");
library(PerformanceAnalytics)
#์ฐ์ ๋ํ๋ ฌ
chart.Correlation(acs2, histogram=TRUE, pch=19)
#cor.test: x,y๋๊ฐ ๋ณ์
#ํน์ฝฉ data์ ์ถ๊ฐ -> ๋ฐ์ดํฐ ํ๋์ ํฐ ์ํฅ
dat<-data.frame(
a=c(15,20,25,27,31,25,23,23,42,12,34,23,40),
b=c(50,55,52,52,56,54,62,56,70,46,43,50,54)
)
plot(dat$a,dat$b)
abline(lm(dat$b~dat$a))
cor(dat$a,dat$b)
#outlier ์ถ๊ฐ #์๊ด๊ณ์์์๋ outlier์ํฅ์ ๋ง์ด ๋ฐ๊ธฐ ๋๋ฌธ์ ์ฒ๋ฆฌ์ ์ ์ํด์ผํจ.
dat[14,]<-c(200,230)
plot(dat$a,dat$b)
abline(lm(dat$b~dat$a))
cor(dat$a,dat$b)
#heatmap expression
# install.packages("corrplot")
library(corrplot)
corrplot(cor(acs2,use="na.or.complete"))
corrplot(cor(acs2,use="na.or.complete"),method="square")
corrplot(cor(acs2,use="na.or.complete"),method="ellipse")
corrplot(cor(acs2,use="na.or.complete"),method="number")
corrplot(cor(acs2,use="na.or.complete"),method="shade")
corrplot(cor(acs2,use="na.or.complete"),method="color")
corrplot(cor(acs2,use="na.or.complete"),method="pie")
#cor์ ๋น๋ชจ์์ ์ธ ํํ๋ค
#1. spearman
#2. kendall's tau
cor(height,weight)
cor(height,weight,method="spearman")
cor(height,weight,method="kendall")
?cor
############### ์ฐ์ต๋ฌธ์ ###############
data(iris)
#1. iris์์ ์ฐ์ํ ๋ฐ์ดํฐ๋ฅผ ๊ฐ๊ณ ์๊ด๊ด๊ณ๋ฅผ ๊ตฌํ๊ณ Sepal.Length์ ๊ฐ์ฅ ์๊ด์๋ ๋ณ์๋ ๋ฌด์์ธ๊ฐ?
#(2๊ฐ์ง ์ด์์ ์๊ฐํ๋ฅผ ๊ทธ๋ ค๋ณด์์ค)
pairs.panels(iris[,1:4])
#####
data(mtcars)
head(mtcars)
#mpg์์ qesc๊น์ง์ ๋ณ์๋ฅผ ๊ฐ๊ณ ์๊ด๊ด๊ณ๋ฅผ ๊ตฌํ์์ค
##################################
#2. 2 ์ง๋จ์๋ํ ํ๊ท ๋น๊ต t-test
t_data<-data.frame(
group=c(1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2),
score=c(100,100,80,50,40,90,20,50,50,70,30,40,30,70,30,40,30,60,30,60),
age=c(80,20,30,70,90,20,30,60,50,50,20,30,20,20,25,10,13,12,11,10))
ggplot(t_data,aes(x=factor(group),y=score,fill=factor(group))) + geom_boxplot()
#์ ๊ท์ฑ๊ฒ์
#์ ๊ท์ฑ ๊ฒ์ - shapiro
shapiro.test(t_data$score)
#๋ฑ๋ถ์ฐ์ฑ ๊ฒ์
t_data_1<-t_data[t_data$group==1,]
t_data_2<-t_data[t_data$group==2,]
var.test(t_data_1$score,t_data_2$score)
#t_test๋ฐฉ๋ฒ 2๊ฐ์ง ์กด์ฌ
#1๋ฒ t.test๋ฐฉ๋ฒ
t.test(t_data_1$score,t_data_2$score,var.equal=T) #var.equal=T ๊ฐ ๊ธฐ๋ณธ๊ฐ. F์ผ ๊ฒฝ์ฐ์๋ ์จ์ฃผ์ด์ผ ํจ.
#๊ฒฐ๊ณผ: ์ ์ํ๋ฅ ์ด 0.05๋ณด๋ค ์์์ ๋๋ฆฝ๊ฐ์ค ์ฑํ(score ํ๊ท ์ ๊ฐ์ ์์ค์ด ์๋๋ค. )
#2๋ฒ t.test๋ฐฉ๋ฒ
t.test(score~group,data=t_data,var.equal=T)
#๋ฑ๋ถ์ฐ์ด ์๋๊ฒฝ์ฐ
var.test(t_data_1$age,t_data_2$age)
t.test(t_data_1$age,t_data_2$age,var.equal=F)
#๋์ T-test์ ์ํ(์ /ํ๋น๊ต) - paried=T๋ฅผ ๋ถ์ฌ์ค
before_op = c(137,119,117,122,132,110,114,117)
after_op = c(126,111,117,116,135,110,113,112)
t.test(before_op,after_op,paired=T)
#๊ฒฐ๊ณผ: ์ ์ํ๋ฅ ์ด 0.05๋ณด๋ค ํฌ๋ฏ๋ก ๊ท๋ฌด๊ฐ์ค ์ฑํ (์ฐจ์ด๊ฐ ์๋ค. ๊ฐ์ 3.5 ์ฐจ์ด ๋์ง๋ง, ํต๊ณ์ฉ์ผ๋ก๋ ์ฐจ์ด ์๋ ์์ค)
mid = c(16, 20, 21, 22, 23, 22, 27, 25, 27, 28)
final = c(19, 20, 24, 24, 25, 25, 26, 26, 28, 32)
t.test(mid,final, paired=TRUE)
################## T๊ฒ์ ์ฐ์ตํด๋ณด๊ธฐ ###################
# 1
a = c(175, 168, 168, 190, 156, 181, 182, 175, 174, 179)
b = c(185, 169, 173, 173, 188, 186, 175, 174, 179, 180)
### ๋ค์ ๋ฐ์ดํฐ๋ฅผ ๊ฐ๊ณ T๊ฒ์ ์ ํ์์ค (์ ๊ท์ฑ ์๋ต)
var.test(a,b)
t.test(a,b, var.equal=T)
#์ ์ํ๋ฅ ์ด 0.356์ผ๋ก ์ ์์์ค 0.05๋ณด๋ค ํฌ๋ฏ๋ก ์ฑํ. ๋ ์ง๋จ์ ์ฐจ์ด๋ ์๋ค.
#(๊ท๋ฌด๊ฐ์ค: a์ง๋จ๊ณผ b์ง๋จ์ ์ฐจ์ด๊ฐ ์๋ค.)
# 2
data(mtcars)
# am ๋ณ์์ ๋ฐ๋ผ mpg๊ฐ ์ฐจ์ด๊ฐ ์๋์ง ํ์ธํ์์ค
######################################################
#3.3๊ฐ์ด์์ ํ๊ท ๋น๊ต ์ ๋ถ์ฐ๋ถ์ - Anova(Analysis of Variance)
#install.packages("laercio")
anova_data<-data.frame(
group=c(1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3),
score=c(50.5, 52.1, 51.9, 52.4, 50.6, 51.4, 51.2, 52.2, 51.5, 50.8,47.5, 47.7, 46.6, 47.1, 47.2, 47.8, 45.2, 47.4, 45.0, 47.9,46.0, 47.1, 45.6, 47.1, 47.2, 46.4, 45.9, 47.1, 44.9, 46.2))
ggplot(anova_data,aes(x=factor(group),y=score,fill=factor(group))) + geom_boxplot()
tapply(anova_data$score,anova_data$group,mean)
tapply(anova_data$score,anova_data$group,max)
#๋ฑ๋ถ์ฐ์ฑ test
bartlett.test(score~as.factor(group),data=anova_data)
#oneway.test
oneway.test(score~group,data=anova_data,var.equal = T)
?aov
a1<-aov(score~group,data=anova_data)
summary(aov(score~group,data=anova_data))
#์ฌํ๋ถ์
library(laercio)
LDuncan(a1, "group")
#group์ ํด๋นํ๋ ๋ถ๋ถ์ด ๋ฌธ์ํ ์ด์ด์ผํจ
TukeyHSD(aov(score~as.character(group),data=anova_data))
plot(TukeyHSD(aov(score~as.character(group),data=anova_data)))
######################
#### ๋ฑ๋ถ์ฐ์ด ์๋๊ฒฝ์ฐ
anova_data2<-data.frame(
group=c(1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3),
score=c(70, 30, 20.3, 85.3, 50.6, 51.4, 51.2, 52.2, 51.5, 50.8,47.5, 47.7, 46.6, 47.1, 47.2, 47.8, 45.2, 47.4, 45.0, 47.9,46.0, 47.1, 45.6, 47.1, 47.2, 46.4, 45.9, 47.1, 44.9, 46.2))
#๋ฑ๋ถ์ฐ์ฑ test
bartlett.test(score~as.factor(group),data=anova_data2)
#oneway.test
oneway.test(score~group,data=anova_data2,var.equal = F)
a2<-aov(score~group,data=anova_data2)
summary(aov(score~group,data=anova_data2))
#์ฌํ๋ถ์
library(laercio)
LDuncan(a2, "group")
#group์ ํด๋นํ๋ ๋ถ๋ถ์ด ๋ฌธ์ํ ์ด์ด์ผํจ
TukeyHSD(aov(score~as.character(group),data=anova_data2))
plot(TukeyHSD(aov(score~as.character(group),data=anova_data2)))
################## F๊ฒ์ ์ฐ์ตํด๋ณด๊ธฐ ###################
data(iris)
#1. iris์์ Species๋ง๋ค Sepal.Width์ ์ฐจ์ด๊ฐ ์๋์ง ํ์ธํ์์ค
# ์ฌํ ๊ฒ์ ๊ณผ ํด์์ ์ ์ผ์์ค
#2 mtcars๋ฐ์ดํฐ์์ gear๋ฐ๋ผ mpg์ ์ฐจ์ด๊ฐ ์๋์ง ํ์ธํ์์ค
# ์ฌํ ๊ฒ์ ๊ณผ ํด์์ ์ ์ผ์์ค
#######################################################
#๋ฌธ์ํ ๋ฐ์ดํฐ๋ถ์
data(acs)
head(acs)
# ์ฑ๋ณ๊ณผ ๋น๋ง์ ์ฐ๊ด์ด ์์๊น?
table(acs$sex,acs$obesity)
acs %>%
dplyr::count(sex,obesity) %>%
ggplot(aes(x=sex,y=n,fill=obesity)) + geom_bar(stat="identity",position = "dodge")
chisq.test(acs$sex,acs$obesity,correct = F)
chisq.test(table(acs$sex,acs$obesity))
# correct?
# ๋น ์ฐ์์ ์ดํญ๋ถํฌ์์ ํ๋ฅ ์ด๋ ๋น์จ์ ์๊ธฐ ์ํ์ฌ ์ฐ์์ ๋ถํฌ์ธ
# ์นด์ด์ ๊ณฑ ๋ถํฌ๋ฅผ ์ด์ฉํ ๋๋ ์ฐ์์ฑ์ ๊ฐ์ง๋๋ก ๋น์ฐ์์ฑ์ ๊ต์ ํด์ผํ ํ์ํ ์์ ๋ ์ฌ์ฉํ๋ ๋ฐฉ๋ฒ
# ๋ณดํต 2X2 ํ๋ ฌ์์ ์์ฃผ ์ฌ์ฉํจ
install.packages("gmodels")
library(gmodels)
CrossTable(acs$sex,acs$obesity,chisq=T,prop.t=F)
CrossTable(table(acs$sex,acs$obesity))
0.089 + 0.175 + 0.045 + 0.088
# ์ผ๋ฐํ์
# ์นด์ด ์ ๊ณฑ ( ๊ธฐ๋์น ๋น์จ )
# ํ์ ๊ธฐ์ค์ผ๋ก ๋น์จ ๊ฐ ( ๊ฐ๋ก๋ก ์ฝ๋๋ค. )
# ์ปฌ๋ผ์ ๊ธฐ์ค์ผ๋ก ๋น์จ ๊ฐ ( ์ธ๋ก๋ก ์ฝ๋๋ค. )
# ์ ์ฒด๋ฅผ ๊ธฐ์ค์ผ๋ก ๋น์จ ๊ฐ
# ์ฑ๋ณ๊ณผ ๋น๋ง์ ์ฐ๊ด์ด ์์๊น?
table(acs$sex,acs$smoking)
acs %>%
dplyr::count(sex,smoking) %>%
ggplot(aes(x=sex,y=n,fill=smoking)) + geom_bar(stat="identity",position = "dodge")
chisq.test(acs$sex,acs$smoking,correct = F)
chisq.test(table(acs$sex,acs$smoking),correct = F)
#์๋ฃ ์์ฑ
dat <- matrix(c(20,24,15,5),ncol=2)
row.names(dat) <- c("ํก์ฐ","๋นํก์ฐ")
colnames(dat)<- c("์ ์","๋น์ ์")
dat
xtab <- matrix(c(384, 536, 335,951, 869, 438),nrow=2)
dimnames(xtab) <- list(
stone = c("yes", "no"),
age = c("30-39", "40-49", "50-59")
)
colSums(xtab)
prop.trend.test(xtab[1,],colSums(xtab))
mosaicplot(t(xtab),col=c("deepskyblue", "brown2"))
# ๋์ด ๋น์จ์ด ๋์ผํ์ง ์๋ค
################## ์นด์ด์ ๊ณฑ ์ฐ์ตํด๋ณด๊ธฐ ###################
# 1
data("survey")
# survey ๋ฐํฐ์ด์์ Sex๋ณ์์ Smoke๊ฐ ์ฐ๊ด์ด ์๋์ง ๊ฒ์ ํ์ฌ๋ผ
# ์๊ฐํ ํฌํจ
# 2
delivery = read.csv('SKT.csv', fileEncoding='UTF-8')
head(delivery)
# ์์ผ๋ณ ์
์ข
์ ์ฐจ์ด๊ฐ ์๋์ง ๊ฒ์ ํ์ฌ๋ผ
#######################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrap.R
\name{create_temp_table}
\alias{create_temp_table}
\title{Create a temporary table}
\usage{
create_temp_table(sql, table_name)
}
\arguments{
\item{table_name}{The name of the temp table you wish to create}
\item{sql:}{The SQL table you want to create a temp table out of. Should
be a table that starts with a WITH or SELECT clause.}
}
\description{
Create a temporary table
}
\examples{
`dbtools::create_temp_table("SELECT a_col, count(*) as n FROM a_database.table GROUP BY a_col", table_name="temp_table_1")`
}
|
/man/create_temp_table.Rd
|
permissive
|
moj-analytical-services/dbtools
|
R
| false
| true
| 599
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrap.R
\name{create_temp_table}
\alias{create_temp_table}
\title{Create a temporary table}
\usage{
create_temp_table(sql, table_name)
}
\arguments{
\item{table_name}{The name of the temp table you wish to create}
\item{sql:}{The SQL table you want to create a temp table out of. Should
be a table that starts with a WITH or SELECT clause.}
}
\description{
Create a temporary table
}
\examples{
`dbtools::create_temp_table("SELECT a_col, count(*) as n FROM a_database.table GROUP BY a_col", table_name="temp_table_1")`
}
|
MRT_1F <-c(517.1468515630205, 85.13094142168089, 30.333207896694553, 12.694776264558937, 3.3041601673945418, 1.1823111717498882, 1.1892293502386786)
MRT_3F <-c(156.68929936163462, 11.540837783562276, 0.4512835621696538, 0.4509797929766453, 0.4502068233039181, 0.4496185276300172, 0.4543157082191288)
MRT_5F <-c(83.90319666471157, 0.3068151086494968, 0.30522314133037304, 0.3072588968084928, 0.30655265997285697, 0.3055812715727718, 0.3053297166713006)
MRT_10F <-c(29.55430642951759, 0.19832832665772515, 0.1971923924717474, 0.19796648905716516, 0.19615594370806338, 0.2034569237883263, 0.19617420889447737)
MRT_15F <-c(11.317736530583566, 0.167364215666193, 0.16172168266811013, 0.16701085329580515, 0.1598052657153692, 0.1645934043532696, 0.16216563797118075)
MRT_sem_F <-c(11.93430909937736, 0.6095414637034009, 0.6060645101029295, 0.612167181646899, 0.6146761002685637, 0.6096747087200697, 0.6125810476877268)
clock <- c(0.1, 0.5, 1, 1.5, 2, 2.5, 3)
plot(clock,MRT_1F,
type = "o",
main="Grรกfico",
xlab = "Time Between Requests",
ylab="Responses Time",
xlim=c(0, 3),
ylim=c(0, 700),
col="red")
lines(MRT_3F, type = "o", col='Blue' , pch=2)
lines(MRT_5F, type = "o", col='Green', pch=3)
lines(MRT_10F, type = "o", col='Yellow', pch=4)
lines(MRT_15F, type="o", col="Black", pch=5)
lines(MRT_sem_F, type="o", col="Purple", pch=6)
legend("topright", pch = c(1,2,3,4, 5,6), col = c("red", "blue", "green", "yellow", "Black"),
legend=c("MRT_3F", "MRT_5F", "MRT_5F", "MRT_10F", "MRT_15F", "MRT_sem_F"))
layout(matrix(c(1, 2,
3, 4,
5,6), nr=3,
byrow=T))
barplot(MRT_1F, col='red' ,
xlab = "Time Between Requests",
ylab="Responses Time",
xlim=c(0, 3),
names.arg=clock,
ylim=c(0, 400))
barplot(MRT_3F, col='Blue' ,
names.arg=clock,
xlab = "Time Between Requests",
ylab="Responses Time",
xlim=c(0, 3),
ylim=c(0, 100),)
barplot(MRT_5F, col='Green',
names.arg=clock,
xlab = "Time Between Requests",
ylab="Responses Time",
xlim=c(0, 3),
ylim=c(0, 60),)
barplot(MRT_10F, col='Yellow',
names.arg=clock,
xlab = "Time Between Requests",
ylab="Responses Time",
xlim=c(0, 3),
ylim=c(0, 20),)
barplot(MRT_15F, col="Black",
names.arg=clock,
xlab = "Time Between Requests",
ylab="Responses Time",
xlim=c(0, 3),
ylim=c(0, 8),)
barplot(MRT_sem_F, col="Purple",
names.arg=clock,
xlab = "Time Between Requests",
ylab="Responses Time",
xlim=c(0, 3),
ylim=c(0, 8),)
layout.show(n=4)
values <- matrix(
c(53.8,43.6,2.6,
33.9, 54.2, 11.9,
2.6, 60.5, 36.8,
0.0, 21.4, 78.6),
nrow = 3,
ncol=4,
)
colors <- c("red","green", "blue", "black")
par(mfrow=c(1,1))
prices <- c("$10-19", "$20-29", "$30-39", "$40-49")
barplot(values, main="Food Quality",
xlab="Price",
ylab="Quality %",
names.arg =prices, col=colors )
Quality <- c("Good", "Very Good", "Excellent")
legend("topright", pch = c(15,15,15), col =colors,
legend=Quality)
head(airquality)
airMay <- subset(airquality, airquality$Month==5)
head(airMay)
to_celsius <- function(temp) {
return((temp โ 32) / 1.8)
}
airMay$Temp <- sapply(airMay$Temp,FUN=to_celsius )
TemperaturaMaio <- airMay$Temp
hist(TemperaturaMaio, col = "blue", density = 10, freq = F)
lines(density(TemperaturaMaio))
sales <- read.table("https://training-course-material.com/images/8/8f/Sales.txt",header=TRUE)
per <- round((sales$SALES*100)/sum(sales$SALES))
labels <- sales$COUNTRY
lbls <- paste(per,"%", sep = "")
pie(sales$SALES, lbls, main="Grรกfico de Vendas", col=rainbow(nrow(sales)))
legend("topleft", legend = labels, cex = 0.7, fill=rainbow(nrow(sales)))
head(InsectSprays)
boxplot(count ~ spray, data = InsectSprays
, col=c("yellow"),
xlab = "Contagem de Insetos",
ylab="Spray",
main="Insetos/Spray")
head(mtcars)
plot(mtcars$wt, mtcars$mpg, main="MtCars: Wt vs Mpg",
xlab = "Peso", ylab="Milhas/h")
abline(v = mean(mtcars$wt), col="red", lwd=3, lty=2)
monitoringCloudData_0.1 <- read.csv('datasets/monitoringCloudData_0.1.csv')
head(monitoringCloudData_0.1)
|
/exercio12.R
|
no_license
|
Tiagoblima/r-course-ufrpe
|
R
| false
| false
| 4,471
|
r
|
MRT_1F <-c(517.1468515630205, 85.13094142168089, 30.333207896694553, 12.694776264558937, 3.3041601673945418, 1.1823111717498882, 1.1892293502386786)
MRT_3F <-c(156.68929936163462, 11.540837783562276, 0.4512835621696538, 0.4509797929766453, 0.4502068233039181, 0.4496185276300172, 0.4543157082191288)
MRT_5F <-c(83.90319666471157, 0.3068151086494968, 0.30522314133037304, 0.3072588968084928, 0.30655265997285697, 0.3055812715727718, 0.3053297166713006)
MRT_10F <-c(29.55430642951759, 0.19832832665772515, 0.1971923924717474, 0.19796648905716516, 0.19615594370806338, 0.2034569237883263, 0.19617420889447737)
MRT_15F <-c(11.317736530583566, 0.167364215666193, 0.16172168266811013, 0.16701085329580515, 0.1598052657153692, 0.1645934043532696, 0.16216563797118075)
MRT_sem_F <-c(11.93430909937736, 0.6095414637034009, 0.6060645101029295, 0.612167181646899, 0.6146761002685637, 0.6096747087200697, 0.6125810476877268)
clock <- c(0.1, 0.5, 1, 1.5, 2, 2.5, 3)
plot(clock,MRT_1F,
type = "o",
main="Grรกfico",
xlab = "Time Between Requests",
ylab="Responses Time",
xlim=c(0, 3),
ylim=c(0, 700),
col="red")
lines(MRT_3F, type = "o", col='Blue' , pch=2)
lines(MRT_5F, type = "o", col='Green', pch=3)
lines(MRT_10F, type = "o", col='Yellow', pch=4)
lines(MRT_15F, type="o", col="Black", pch=5)
lines(MRT_sem_F, type="o", col="Purple", pch=6)
legend("topright", pch = c(1,2,3,4, 5,6), col = c("red", "blue", "green", "yellow", "Black"),
legend=c("MRT_3F", "MRT_5F", "MRT_5F", "MRT_10F", "MRT_15F", "MRT_sem_F"))
layout(matrix(c(1, 2,
3, 4,
5,6), nr=3,
byrow=T))
barplot(MRT_1F, col='red' ,
xlab = "Time Between Requests",
ylab="Responses Time",
xlim=c(0, 3),
names.arg=clock,
ylim=c(0, 400))
barplot(MRT_3F, col='Blue' ,
names.arg=clock,
xlab = "Time Between Requests",
ylab="Responses Time",
xlim=c(0, 3),
ylim=c(0, 100),)
barplot(MRT_5F, col='Green',
names.arg=clock,
xlab = "Time Between Requests",
ylab="Responses Time",
xlim=c(0, 3),
ylim=c(0, 60),)
barplot(MRT_10F, col='Yellow',
names.arg=clock,
xlab = "Time Between Requests",
ylab="Responses Time",
xlim=c(0, 3),
ylim=c(0, 20),)
barplot(MRT_15F, col="Black",
names.arg=clock,
xlab = "Time Between Requests",
ylab="Responses Time",
xlim=c(0, 3),
ylim=c(0, 8),)
barplot(MRT_sem_F, col="Purple",
names.arg=clock,
xlab = "Time Between Requests",
ylab="Responses Time",
xlim=c(0, 3),
ylim=c(0, 8),)
layout.show(n=4)
values <- matrix(
c(53.8,43.6,2.6,
33.9, 54.2, 11.9,
2.6, 60.5, 36.8,
0.0, 21.4, 78.6),
nrow = 3,
ncol=4,
)
colors <- c("red","green", "blue", "black")
par(mfrow=c(1,1))
prices <- c("$10-19", "$20-29", "$30-39", "$40-49")
barplot(values, main="Food Quality",
xlab="Price",
ylab="Quality %",
names.arg =prices, col=colors )
Quality <- c("Good", "Very Good", "Excellent")
legend("topright", pch = c(15,15,15), col =colors,
legend=Quality)
head(airquality)
airMay <- subset(airquality, airquality$Month==5)
head(airMay)
to_celsius <- function(temp) {
return((temp โ 32) / 1.8)
}
airMay$Temp <- sapply(airMay$Temp,FUN=to_celsius )
TemperaturaMaio <- airMay$Temp
hist(TemperaturaMaio, col = "blue", density = 10, freq = F)
lines(density(TemperaturaMaio))
sales <- read.table("https://training-course-material.com/images/8/8f/Sales.txt",header=TRUE)
per <- round((sales$SALES*100)/sum(sales$SALES))
labels <- sales$COUNTRY
lbls <- paste(per,"%", sep = "")
pie(sales$SALES, lbls, main="Grรกfico de Vendas", col=rainbow(nrow(sales)))
legend("topleft", legend = labels, cex = 0.7, fill=rainbow(nrow(sales)))
head(InsectSprays)
boxplot(count ~ spray, data = InsectSprays
, col=c("yellow"),
xlab = "Contagem de Insetos",
ylab="Spray",
main="Insetos/Spray")
head(mtcars)
plot(mtcars$wt, mtcars$mpg, main="MtCars: Wt vs Mpg",
xlab = "Peso", ylab="Milhas/h")
abline(v = mean(mtcars$wt), col="red", lwd=3, lty=2)
monitoringCloudData_0.1 <- read.csv('datasets/monitoringCloudData_0.1.csv')
head(monitoringCloudData_0.1)
|
rnorm(1)
rnorm(2)
rnorm(3)
rnorm(4)
|
/Rcode.R
|
no_license
|
Ysluib/gitlearning
|
R
| false
| false
| 36
|
r
|
rnorm(1)
rnorm(2)
rnorm(3)
rnorm(4)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{allstations}
\alias{allstations}
\title{A tibble of all Canadian Stations stations and their names.}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 7791 rows and 3 columns.}
\source{
HYDAT
}
\usage{
allstations
}
\description{
A shorthand to avoid having always call \code{STATIONS}. Only up to date as of 2017-07-18.
}
\keyword{datasets}
|
/man/allstations.Rd
|
permissive
|
lawinslow/tidyhydat
|
R
| false
| true
| 490
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{allstations}
\alias{allstations}
\title{A tibble of all Canadian Stations stations and their names.}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 7791 rows and 3 columns.}
\source{
HYDAT
}
\usage{
allstations
}
\description{
A shorthand to avoid having always call \code{STATIONS}. Only up to date as of 2017-07-18.
}
\keyword{datasets}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{add_guide_legend}
\alias{add_guide_legend}
\title{Defunct function for adding a legend}
\usage{
add_guide_legend(...)
}
\arguments{
\item{...}{Other arguments.}
}
\description{
This function has been replaced with \code{\link{add_legend}}.
}
|
/man/add_guide_legend.Rd
|
no_license
|
rlzijdeman/ggvis
|
R
| false
| false
| 302
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{add_guide_legend}
\alias{add_guide_legend}
\title{Defunct function for adding a legend}
\usage{
add_guide_legend(...)
}
\arguments{
\item{...}{Other arguments.}
}
\description{
This function has been replaced with \code{\link{add_legend}}.
}
|
## PROJECT 1 : MODULE 1 ##
## SCRIPT FOR DATA MINING ##
library(twitteR)
library(df2json)
#library(rjson)
library(jsonlite)
CONSUMER_KEY <- "***********************"
CONSUMER_SECRET <- "*******************************************"
ACCESS_TOKEN <- "*******************************************"
ACCESS_TOKEN_SECRET <- "*******************************************"
#getTwitterOAuth(consumer_key, consumer_secret)
setup_twitter_oauth(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
tweets = searchTwitter("#election2016",n=10000,lang="en", since="2016-02-27",until = "2016-02-28")
tweets_df = twListToDF(tweets)
#write(exportJson<-df2json(tweets_df), "test.json")
write(exportJson<-toJSON(tweets_df), "electionData27.json")
|
/UB DIC Projects/Project1_R Data Analytics/Tweet Collection and Import/CollectP1athigale.R
|
no_license
|
aniket898/UBProjects
|
R
| false
| false
| 741
|
r
|
## PROJECT 1 : MODULE 1 ##
## SCRIPT FOR DATA MINING ##
library(twitteR)
library(df2json)
#library(rjson)
library(jsonlite)
CONSUMER_KEY <- "***********************"
CONSUMER_SECRET <- "*******************************************"
ACCESS_TOKEN <- "*******************************************"
ACCESS_TOKEN_SECRET <- "*******************************************"
#getTwitterOAuth(consumer_key, consumer_secret)
setup_twitter_oauth(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
tweets = searchTwitter("#election2016",n=10000,lang="en", since="2016-02-27",until = "2016-02-28")
tweets_df = twListToDF(tweets)
#write(exportJson<-df2json(tweets_df), "test.json")
write(exportJson<-toJSON(tweets_df), "electionData27.json")
|
#Title: worm species densities in deep-sea
#Purpose:
#Name: Ville Lehtonen
#R-version: 3.6
setwd('/home/villelehtonen/Desktop/statw-r')
Nemaspec <- read.csv("data/nemaspec.csv", header=TRUE, row.names = 1)
head(Nemaspec)
#1
dens <- Nemaspec$M160b
#2
dens <- dens[dens != 0] #remove zeros
#3
N <- sum(dens)
#4
p <- dens/N
sum(p)
#5
S <- length(p)
#6
N1 <- -sum(p * log(p))
N2 <- 1/sum(p^2)
N3 <- 1/max(p)
#7
n = 100
N = 699
ES <- sum(1-choose(N-dens,100)/choose(N,n))
#8
diversity <- c(N,S,N1,N2,N3,ES)
name_v <- c('N','S','N1','N2','N3','ES')
names(diversity) <- name_v
diversity
|
/Chapter4.R
|
no_license
|
violehtone/Statistics-with-r
|
R
| false
| false
| 583
|
r
|
#Title: worm species densities in deep-sea
#Purpose:
#Name: Ville Lehtonen
#R-version: 3.6
setwd('/home/villelehtonen/Desktop/statw-r')
Nemaspec <- read.csv("data/nemaspec.csv", header=TRUE, row.names = 1)
head(Nemaspec)
#1
dens <- Nemaspec$M160b
#2
dens <- dens[dens != 0] #remove zeros
#3
N <- sum(dens)
#4
p <- dens/N
sum(p)
#5
S <- length(p)
#6
N1 <- -sum(p * log(p))
N2 <- 1/sum(p^2)
N3 <- 1/max(p)
#7
n = 100
N = 699
ES <- sum(1-choose(N-dens,100)/choose(N,n))
#8
diversity <- c(N,S,N1,N2,N3,ES)
name_v <- c('N','S','N1','N2','N3','ES')
names(diversity) <- name_v
diversity
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
source('getData.R')
library(kableExtra)
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# get data
tickerReturnsData <- reactive({
dat1 <- diff(log(getData(input$ticker)), 1)
return(dat1)
})
meanModelData <- reactive({
# find the best fitting ARFIMA model for returns
ARFIMA.fit = autoarfima(
data = tickerReturnsData(),
ar.max = input$ar,
ma.max = input$ma,
criterion = input$criterion,
method = "full"
)
data.frame(head(ARFIMA.fit$rank.matrix))
})
# create list object to assign best fitting mean model to GARCH model
meanModel <- reactive({
headings <- colnames(meanModelData())
# get the specification of the best fitting model
arLength <- length(headings[grepl("^ar", headings)]) - 1
maLength <- length(headings[grepl("^ma", headings)])
firstRow <- meanModelData()[1,]
myList <- headings[1:(arLength + maLength)]
myRow <- firstRow[1:(arLength + maLength)]
# if the sum of the AR indicators is zero, then remove the AR part of the vector
myRow2 <- myRow
if (sum(myRow[1:arLength]) == 0) {
myRow2 <- myRow[-c(1:arLength)]
}
# if the sum of the MA indicators is zero, then remove the MA part of the vector
if (sum(myRow2[grepl('^ma', names(myRow2))]) == 0) {
myRow2 <- myRow2[-which(c(grepl('^ma', names(myRow2))))]
}
return(myRow2)
})
# convert meanModel() into a list object
meanModelList <- reactive({
## now construct a list object consisting of fixed AR and/or MA components
myList <- as.list(unname(meanModel()))
names(myList) <- names(meanModel())
# remove elements not equal to 0
myList <- myList[myList == 0]
return(myList)
})
# get number of AR terms in mean model
arNumber <- reactive({
sum(grepl('^ar', names(meanModel())))
})
# get number of MA terms in mean model
maNumber <- reactive({
sum(grepl('^ma', names(meanModel())))
})
# create GARCH model specification
spec <- reactive({
ugarchspec(
mean.model = list(armaOrder = c(arNumber(), maNumber())),
variance.model = list(model = input$varianceModelType),
distribution = "jsu"
)
})
# indicate which AR/MA terms to keep fixed in the GARCH model
myList <- reactive({
# if myList is not empty then define fixed compoents in the GARCH model specification
if (length(meanModelList()) != 0) {
setfixed(spec()) <- meanModelList()
}
return(myList)
})
# fit the GARCH model
fittedModel <- reactive({
fit <- ugarchfit(spec(), tickerReturnsData())
})
# create forecasts
bootp <- reactive({
ugarchboot(
fit,
method = c("Partial", "Full")[1],
n.ahead = input$forecastLength,
n.bootpred = input$forecastLength
)
})
# create tables
output$tickerDataTable <- reactive({
df <- data.frame(tickerReturnsData())
df1 <- tail(df, 20)
names(df1) <- paste0(input$ticker, " (returns)")
df1 %>%
knitr::kable("html") %>%
kable_styling(bootstrap_options = "striped",
full_width = F)
})
output$meanModelTable <- reactive({
df2 <- meanModelData()
df2 %>%
knitr::kable("html") %>%
kable_styling(bootstrap_options = "striped",
full_width = F)
})
output$meanModeListTable <- reactive({
fittedModel()@model$modelinc %>%
knitr::kable("html") %>%
kable_styling(bootstrap_options = "striped",
full_width = F)
})
# create plots
output$diagnosticPlots <- renderPlot({
par(mar = c(1, 1, 1, 1))
plot(fittedModel(), which = 'all') # diagnostics
})
output$forecastPlot <- renderPlot({
dateSeries <- seq(as.Date(colnames(bootp()@forc@forecast$seriesFor) ), length.out = input$forecastLength, by = 1)
forecast <- xts( bootp()@forc@forecast$seriesFor * 100, order.by = dateSeries)
plot(forecast, xlab = 'Date' , ylab = '%', main = paste0('Point forecast of ', input$ticker))
})
})
|
/VolForecaster/server.R
|
no_license
|
cobleg/VolForecaster
|
R
| false
| false
| 4,337
|
r
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
source('getData.R')
library(kableExtra)
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# get data
tickerReturnsData <- reactive({
dat1 <- diff(log(getData(input$ticker)), 1)
return(dat1)
})
meanModelData <- reactive({
# find the best fitting ARFIMA model for returns
ARFIMA.fit = autoarfima(
data = tickerReturnsData(),
ar.max = input$ar,
ma.max = input$ma,
criterion = input$criterion,
method = "full"
)
data.frame(head(ARFIMA.fit$rank.matrix))
})
# create list object to assign best fitting mean model to GARCH model
meanModel <- reactive({
headings <- colnames(meanModelData())
# get the specification of the best fitting model
arLength <- length(headings[grepl("^ar", headings)]) - 1
maLength <- length(headings[grepl("^ma", headings)])
firstRow <- meanModelData()[1,]
myList <- headings[1:(arLength + maLength)]
myRow <- firstRow[1:(arLength + maLength)]
# if the sum of the AR indicators is zero, then remove the AR part of the vector
myRow2 <- myRow
if (sum(myRow[1:arLength]) == 0) {
myRow2 <- myRow[-c(1:arLength)]
}
# if the sum of the MA indicators is zero, then remove the MA part of the vector
if (sum(myRow2[grepl('^ma', names(myRow2))]) == 0) {
myRow2 <- myRow2[-which(c(grepl('^ma', names(myRow2))))]
}
return(myRow2)
})
# convert meanModel() into a list object
meanModelList <- reactive({
## now construct a list object consisting of fixed AR and/or MA components
myList <- as.list(unname(meanModel()))
names(myList) <- names(meanModel())
# remove elements not equal to 0
myList <- myList[myList == 0]
return(myList)
})
# get number of AR terms in mean model
arNumber <- reactive({
sum(grepl('^ar', names(meanModel())))
})
# get number of MA terms in mean model
maNumber <- reactive({
sum(grepl('^ma', names(meanModel())))
})
# create GARCH model specification
spec <- reactive({
ugarchspec(
mean.model = list(armaOrder = c(arNumber(), maNumber())),
variance.model = list(model = input$varianceModelType),
distribution = "jsu"
)
})
# indicate which AR/MA terms to keep fixed in the GARCH model
myList <- reactive({
# if myList is not empty then define fixed compoents in the GARCH model specification
if (length(meanModelList()) != 0) {
setfixed(spec()) <- meanModelList()
}
return(myList)
})
# fit the GARCH model
fittedModel <- reactive({
fit <- ugarchfit(spec(), tickerReturnsData())
})
# create forecasts
bootp <- reactive({
ugarchboot(
fit,
method = c("Partial", "Full")[1],
n.ahead = input$forecastLength,
n.bootpred = input$forecastLength
)
})
# create tables
output$tickerDataTable <- reactive({
df <- data.frame(tickerReturnsData())
df1 <- tail(df, 20)
names(df1) <- paste0(input$ticker, " (returns)")
df1 %>%
knitr::kable("html") %>%
kable_styling(bootstrap_options = "striped",
full_width = F)
})
output$meanModelTable <- reactive({
df2 <- meanModelData()
df2 %>%
knitr::kable("html") %>%
kable_styling(bootstrap_options = "striped",
full_width = F)
})
output$meanModeListTable <- reactive({
fittedModel()@model$modelinc %>%
knitr::kable("html") %>%
kable_styling(bootstrap_options = "striped",
full_width = F)
})
# create plots
output$diagnosticPlots <- renderPlot({
par(mar = c(1, 1, 1, 1))
plot(fittedModel(), which = 'all') # diagnostics
})
output$forecastPlot <- renderPlot({
dateSeries <- seq(as.Date(colnames(bootp()@forc@forecast$seriesFor) ), length.out = input$forecastLength, by = 1)
forecast <- xts( bootp()@forc@forecast$seriesFor * 100, order.by = dateSeries)
plot(forecast, xlab = 'Date' , ylab = '%', main = paste0('Point forecast of ', input$ticker))
})
})
|
# Log-likelihood function:
test_that("loglik_function remains stable", {
## vector-based:
### Test with 'shock':
x <- shock$distance
status <- shock$status
### log-location scale distributions (two-parametric):
dists <- c("weibull", "lognormal", "loglogistic")
logL <- lapply(
dists,
loglik_function,
x = x,
status = status,
wts = rep(1, length(x)),
dist_params = c(10.23, 0.35)
)
expect_snapshot_output(logL)
## data-based:
data <- reliability_data(data = shock, x = distance, status = status)
logL_data <- lapply(
dists,
loglik_function,
x = data,
wts = rep(1, nrow(data)),
dist_params = c(10.23, 0.35)
)
expect_equal(logL, logL_data)
### location-scale distributions (two-parametric):
dists <- c("sev", "normal", "logistic")
logL <- lapply(
dists,
loglik_function,
x = x,
status = status,
wts = rep(1, length(x)),
dist_params = c(25000, 8500)
)
expect_snapshot_output(logL)
})
# Log-likelihood profile function:
## vector-based:
test_that("loglik_profiling remains stable for vectors", {
cycles <- alloy$cycles
status <- alloy$status
threshold <- seq(0, min(cycles[status == 1]) - 0.1, length.out = 100)
profile_logL <- loglik_profiling(
x = cycles,
status = status,
thres = threshold,
distribution = "weibull3"
)
expect_snapshot_output(profile_logL)
## data-based:
data <- reliability_data(data = alloy, x = cycles, status = status)
profile_logL_data <- loglik_profiling(
x = data,
thres = threshold,
distribution = "weibull3"
)
expect_equal(profile_logL, profile_logL_data)
})
|
/tests/testthat/test-likelihood_functions.R
|
no_license
|
Tim-TU/weibulltools
|
R
| false
| false
| 1,651
|
r
|
# Log-likelihood function:
test_that("loglik_function remains stable", {
## vector-based:
### Test with 'shock':
x <- shock$distance
status <- shock$status
### log-location scale distributions (two-parametric):
dists <- c("weibull", "lognormal", "loglogistic")
logL <- lapply(
dists,
loglik_function,
x = x,
status = status,
wts = rep(1, length(x)),
dist_params = c(10.23, 0.35)
)
expect_snapshot_output(logL)
## data-based:
data <- reliability_data(data = shock, x = distance, status = status)
logL_data <- lapply(
dists,
loglik_function,
x = data,
wts = rep(1, nrow(data)),
dist_params = c(10.23, 0.35)
)
expect_equal(logL, logL_data)
### location-scale distributions (two-parametric):
dists <- c("sev", "normal", "logistic")
logL <- lapply(
dists,
loglik_function,
x = x,
status = status,
wts = rep(1, length(x)),
dist_params = c(25000, 8500)
)
expect_snapshot_output(logL)
})
# Log-likelihood profile function:
## vector-based:
test_that("loglik_profiling remains stable for vectors", {
cycles <- alloy$cycles
status <- alloy$status
threshold <- seq(0, min(cycles[status == 1]) - 0.1, length.out = 100)
profile_logL <- loglik_profiling(
x = cycles,
status = status,
thres = threshold,
distribution = "weibull3"
)
expect_snapshot_output(profile_logL)
## data-based:
data <- reliability_data(data = alloy, x = cycles, status = status)
profile_logL_data <- loglik_profiling(
x = data,
thres = threshold,
distribution = "weibull3"
)
expect_equal(profile_logL, profile_logL_data)
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.