blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
198f65fcc165b9f8f92326f648b0f0d1f929e9da
|
564f5fcfa159f5f3fde03bea679e3f17a28857b0
|
/R/post_debt_correction.R
|
7ae805115611b18c32c88dd01a002414b3eee346
|
[
"MIT"
] |
permissive
|
signaux-faibles/rsignauxfaibles
|
44aa11c5a5defa0dd1dd62969ce51a08c4caf6c9
|
9fc81fcc17f51ff436851d893f8c38b12ff9aa78
|
refs/heads/master
| 2021-06-30T04:59:41.785386
| 2020-11-30T16:50:05
| 2020-11-30T16:50:05
| 198,392,876
| 1
| 0
|
MIT
| 2020-11-30T16:50:06
| 2019-07-23T09:01:10
|
R
|
UTF-8
|
R
| false
| false
| 1,166
|
r
|
post_debt_correction.R
|
#' Calcule une correction pour les entreprises endettées
#'
#' La correction est dans l'espace des log-vraisemblance (donc après avoir
#' appliqué un logit aux prédictions en probabilité).
#'
#' @inheritParams generic_task
#'
#' @return `data.frame` avec colonnes "siret" et "correction_debt"
#' @export
compute_debt_correction <- function(task) {
# Correction débits
assertthat::assert_that("new_data" %in% names(task))
new_data <- task$new_data
correction <- new_data %>%
group_by(code_ape_niveau3) %>%
mutate(ratio_dette = (montant_part_patronale + montant_part_patronale) / effectif) %>%
arrange(code_ape_niveau3, desc(ratio_dette)) %>%
group_by(code_ape_niveau3, ratio_dette > 1) %>%
mutate(
# position == 1 pour max dette, puis décroit jusqu'à 0 pour pas de dette
position = case_when(
ratio_dette > 1 ~ seq(1, 0, length.out = n()),
TRUE ~ 0 # Tout les autres cas: pas de dette => pas de pénalité
),
correction_debt = dbeta(position, 3, 1) / 3
# Courbe croissante, de f(0) = 0 à f(1) = 1
) %>%
ungroup() %>%
select(siret, correction_debt)
return(correction)
}
|
2dcc8d347cbf8f455ce659db58fd1ea50a7a2e75
|
fbaef343b4882ed40f7a9e74dd7a1e5708d73a68
|
/Simulation_study/Post_simulation_data_separation_check/Script/fitting_models_wo_xgb.R
|
0346c08fc41f0646befab5d62f9108739e491005
|
[
"CC-BY-4.0"
] |
permissive
|
Goorbergh/resampling_techniquesCPM
|
8c968b9c67df190ddcd19cefdd3624b21f2ba5dd
|
02a9e4900feec4bf8f01c5144fcc9084a52baf38
|
refs/heads/main
| 2023-04-27T07:19:47.345461
| 2021-05-14T14:12:52
| 2021-05-14T14:12:52
| 365,748,765
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,974
|
r
|
fitting_models_wo_xgb.R
|
################################################
########## Model fitting functions #############
################################################
# In this script the functions to fit all models are defined. In the ML logistic
# regression function a test for data separation is integrated.
################################################
########## Re-calibration model ################
################################################
# This function takes the probabilities estimated by a model as imput and re-estimates
# the intercept to re-calibrate the model
recal_function <- function(probs, outcome) {
if (sum(probs == 1) != 0){
probs[probs == 1] <- 1 - ((1-max(probs[probs != 1]))/2)
}
if (sum(probs == 0) != 0) {
probs[probs == 0] <- min(probs[probs != 0]) /2
}
recal_mod <- glm(outcome ~ 1, offset = log(probs/(1-probs)), family = "binomial")
}
################################################
########## ML logistic regression #############
################################################
# This function fits a ML logistic regression model. If the model is fit on an
# adjusted data set, a second, recalibrated model is fit. The output of this function
# consists of a list containing a list with the ML model and the system time and,
# if appropiate, a list with the recalibrated model and its system time.
ML_reg <- function(data, unad_data){
sys_time <- system.time(mod_ML <- glm(formula = y~., family = 'binomial', data = data))
if (fastAUC(p = mod_ML$fitted.values, y = data[,1]) == 1){
warning("Apperant AUC = 1, seperation is assumed")
}
if (nrow(data) != nrow(unad_data)){
probs <- predict(mod_ML, unad_data, type = 'response')
sys_time_recal <- system.time(recal_mod <- recal_function(probs = probs, outcome = unad_data[,1]))
return(list(list(mod_ML, sys_time[3]), list(recal_mod, sys_time_recal[3])))# getting list of ML model, recalibrated ML model and system time
} else {
return(list(list(mod_ML, sys_time[3]))) # getting list of ML model and system time
}
}
################################################
######## ridge logistic regression ############
################################################
# This function fits a ridge logistic regression model. Line 62-65 create a vector
# with lambdas. If the model is fit on an adjusted data set, a second, recalibrated model is fit. The output of this function
# consists of a list containing a list with the ridge model and the system time and,
# if appropiate, a list with the recalibrated model and its system time.
lseq <- function(from=0.001, to=64, length.out=251) {
exp(seq(log(from), log(to), length.out = length.out))
}
lambdas <- c(0, lseq())
RID_reg <- function(data, unad_data, lambdas){
x <- model.matrix(y ~., data)[,-1]
y <- data[,1]
# Get hyper parameter
nfolds <- ifelse(any(table(data$y)<8),nrow(data),10) # If data is near degenerate nfolds for LOOCV
cv_out <- cv.glmnet(x = x , y = y, alpha = 0, lambda = lambdas,
family = 'binomial', nfolds = nfolds)
# Fit model
sys_time <- system.time(mod_RID <- glmnet(x = x, y = y, alpha = 0,
family = 'binomial',
lambda = cv_out$lambda.min))
# Fit recalibration model
if (nrow(data) != nrow(unad_data)){
probs <- predict(mod_RID, as.matrix(unad_data)[,-1], type = 'response')
sys_time_recal <- system.time(recal_mod <- recal_function(probs = probs, outcome = unad_data[,1]))
return(list(list(mod_RID, sys_time[3]), list(recal_mod, sys_time_recal[3])))
} else {
return(list(list(mod_RID,sys_time[3])))
}
}
################################################
############### Random forest ##################
################################################
# This function fits a random forest model. If the model is fit on an
# adjusted data set, a second, recalibrated model is fit. The output of this function
# consists of a list containing a list with the RF model and the system time and,
# if appropiate, a list with the recalibrated model and its system time.
RF <- function(data, unad_data){
data[,1] <- as.factor(data[,1]) # set outcome to factor to work for RF-function
# Fit model
sys_time <- system.time(mod_RF <- randomForest(formula = y~., data = data))
# Recalibration
if (nrow(data) != nrow(unad_data)){
probs <- predict(mod_RF, unad_data, type = 'prob')[,2]
sys_time_recal <- system.time(recal_mod <- recal_function(probs = probs, outcome = unad_data[,1]))
return(list(list(mod_RF,sys_time[3]), list(recal_mod, sys_time_recal[3])))
} else {
return(list(list(mod_RF, sys_time[3])))
}
}
################################################
################## XGboost #####################
################################################
# This function fits a gradient boosting model using the xgboost algorithm. If the model is fit on an
# adjusted data set, a second, recalibrated model is fit. The output of this function
# consists of a list containing a list with the XGB model and the system time and,
# if appropiate, a list with the recalibrated model and its system time.
# XGB <- function(data, unad_data){
# train <- sparse.model.matrix(y ~., data = data)[,-1]
# output_vector <- data[,1]
# dtrain <- xgb.DMatrix(data = train, label = output_vector) # create xgb.DMatrix object to train model
#
# sys_time <- system.time(mod_XGB <- xgboost(data = dtrain, nrounds = 5, objective = "binary:logistic"))
#
# # Recalibration
# if (nrow(data) != nrow(unad_data)){ # Check if dealing with an adjusted data set
#
# cal <- sparse.model.matrix(y ~., data = unad_data)[,-1]
# output_vector <- unad_data[,1]
# dcal <- xgb.DMatrix(data = cal, label = output_vector) # create xgb.DMatrix object
#
# probs <- predict(mod_XGB, dcal)
# sys_time_recal <- system.time(recal_mod <- recal_function(probs = probs, outcome = unad_data[,1]))
#
# return(list(list(mod_XGB, sys_time[3]), list(recal_mod, sys_time_recal[3])))
# } else {
# return(list(list(mod_XGB, sys_time[3])))
# }
# }
################################################
######## Function to fit all models ############
################################################
# This function takes the data and adjusted data set as input (these may be the same),
# and gives all four models as output in the form of a list.
# The custom tryCatch.W.E.function is used to
# prevent the code from stopping when an error occurs and to save warnings.
fit_mod <- function(data, unad_data, lambdas){
lr_mod <- tryCatch.W.E(ML_reg(data, unad_data))
rid_mod <- tryCatch.W.E(RID_reg(data,unad_data, lambdas))
rf_mod <- tryCatch.W.E(RF(data,unad_data))
#xg_mod <- tryCatch.W.E(XGB(data,unad_data))
models <- list(lr_mod, rid_mod, rf_mod)#, xg_mod)
names(models) <- c("lr_mod", "rid_mod", "rf_mod")#, "xg_mod")
models
}
############################################################
######## Function to fit all models on all data ############
############################################################
# This function takes as input the list of all development data sets (adjusted and unadjusted)
# and fits all models on those data sets. The output is a list containing 4 lists (1 for each adjustment method)
# which in their turn contain lists containing the different models fit on the
# particular development set
# Fitting all models on all data sets (adjusted & non-adjusted). The order of the
# models is: unadjusted, ROS, RUS, SMOTE
fit_mod_ALL <- function(ALL_data){
models <- list()
for (i in 1:4){
models[i] <- list(fit_mod(ALL_data[[i]], ALL_data[[1]], lambdas))
}
names(models) <- names(ALL_data)
return(models)
}
#########################################
############# Test script ###############
#########################################
#ALL_models <- fit_mod_ALL(ALL_data)
|
f4d8b69a9559197c9e3ab627c7c6e317646ff789
|
ed1920915c1f7070c7cec39de8ca82672be18cc5
|
/source/util/get.prm.os.R
|
0f4df63f381bd487220bfa8404639e82febffec4
|
[] |
no_license
|
sthallor/miscRscripts
|
d28f7a9cdbc53fc7c7994c6000d1753b3679236d
|
c3a5a206c35cdbbb15f07a4ea9250ff861b2e7f1
|
refs/heads/master
| 2022-11-06T03:39:03.951539
| 2020-06-21T23:21:47
| 2020-06-21T23:21:47
| 273,998,540
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,653
|
r
|
get.prm.os.R
|
#######################################################################################
# get.prm.os.R - read and decode ASCII parameters file for mainOffset.R
# Ensign Energy Services Inc. retains all rights to this software
# FHS, Feb 15, 2017
#######################################################################################
#
# Program Offset V3.0 Program Parameters 2/15/2017
#
# Title [Historian to EDR time offset 2/15/2017]
#
# Historian .csv Filename [/Users/Fred.Seymour/Historian_Data/160216_Datasets/BlockHeight_Rig774_WellID48543.csv]
# EDR .csv Filename [/Users/Fred.Seymour/Historian_Data/160216_Datasets/input_EDR_file_output.csv]
# Output log .txt Filename [/Users/Fred.Seymour/Historian_Data/160216_Datasets/histEDR_correlation.txt]
# Plot correlation .png Filename [/Users/Fred.Seymour/Historian_Data/160216_Datasets/histEDR_correlation.png]
#
# Historian Block Height Tag Name [BLOCK_HEIGHT]
# EDR Block Height Tag Name [EDR_BlockHeight]
# Offset interval start (negative count from 0) [-100]
# Offset interval stop (positive count from 0) [100]
get.prm.os <- function(prmFilename) {
# Read and decode text parameters file
text <- read.table(prmFilename,sep="\\",
stringsAsFactors=FALSE,
blank.lines.skip=FALSE)
text <- prm.decode(text)
keywords <- c('Program Offset V3.0 Program Parameters 2/15/2017',
'Title',
'Historian .csv Filename',
'EDR .csv Filename',
'Output log .txt Filename',
'Plot correlation .png Filename',
'Historian Block Height Tag Name',
'EDR Block Height Tag Name',
'Offset interval start',
'Offset interval stop')
# Initialize parameters list
prm.os <- list()
# copies text parameters file verbatim
prm.os$text <- text$text
# Program Data Clean V1.0 2/18/2016
kwi <- 1 # Key Word Index
r <- pmatch(keywords[kwi],text$text)
if (is.na(r)) stop(sprintf('ERROR WITH PARAMETER FILE...
WRONG PROGRAM, VERSION, OR DATE...
MUST EXACTLY MATCH WITH "%s"\n',keywords[kwi]))
prm.os$version <- keywords[kwi]
# Title
kwi <- 2 # Key Word Index
r <- pmatch(keywords[kwi],text$text)
if (is.na(r)) stop(sprintf('ERROR WITH PARAMETER FILE...
MUST HAVE EXACTLY ONE LINE STARTING WITH "%s"\n',keywords[kwi]))
if (text$count[r] != 1) stop(sprintf('ERROR WITH PARAMETER FILE ... need exactly 1 bracket field (i.e. bracket pair []) on line\n%s\n',
text$text[r]))
prm.os$title <- text$v1[r]
# Historian .csv Filename
kwi <- 3 # Key Word Index
r <- pmatch(keywords[kwi],text$text)
if (is.na(r)) stop(sprintf('ERROR WITH PARAMETER FILE...
MUST HAVE EXACTLY ONE LINE STARTING WITH "%s"\n',keywords[kwi]))
if (text$count[r] != 1) stop(sprintf('ERROR WITH PARAMETER FILE ... need exactly 1 bracket field (i.e. bracket pair []) on line\n%s\n',
text$text[r]))
prm.os$histName <- text$v1[r]
if(!(substr(prm.os$histName,(nchar(prm.os$histName)-3),nchar(prm.os$histName))==".csv")) {
stop('ERROR WITH PARAMETER FILE ... historian file must be of type .csv')
}
# EDR .csv Filename
kwi <- 4 # Key Word Index
r <- pmatch(keywords[kwi],text$text)
if (is.na(r)) stop(sprintf('ERROR WITH PARAMETER FILE...
MUST HAVE EXACTLY ONE LINE STARTING WITH "%s"\n',keywords[kwi]))
if (text$count[r] != 1) stop(sprintf('ERROR WITH PARAMETER FILE ... need exactly 1 bracket field (i.e. bracket pair []) on line\n%s\n',
text$text[r]))
prm.os$EDRName <- text$v1[r]
if(!(substr(prm.os$EDRName,(nchar(prm.os$EDRName)-3),nchar(prm.os$EDRName))==".csv")) {
stop('ERROR WITH PARAMETER FILE ... EDR file must be of type .csv')
}
# Output log .txt Filename
kwi <- 5 # Key Word Index
r <- pmatch(keywords[kwi],text$text)
if (is.na(r)) stop(sprintf('ERROR WITH PARAMETER FILE...
MUST HAVE EXACTLY ONE LINE STARTING WITH "%s"\n',keywords[kwi]))
if (text$count[r] != 1) stop(sprintf('ERROR WITH PARAMETER FILE ... need exactly 1 bracket field (i.e. bracket pair []) on line\n%s\n',
text$text[r]))
if (nchar(text$v1[r])>0) {
if(!(substr(text$v1[r],(nchar(text$v1[r])-3),nchar(text$v1[r]))==".txt"))
stop('ERROR WITH PARAMETER FILE ... offset output log file name must be of type .txt')
prm.os$outputName <- text$v1[r]
} else { # if blank then output log filename is blank and output is sent to default device
prm.os$outputName <- ''
}
# Plot correlation .png Filename
kwi <- 6 # Key Word Index
r <- pmatch(keywords[kwi],text$text)
if (is.na(r)) stop(sprintf('ERROR WITH PARAMETER FILE...
MUST HAVE EXACTLY ONE LINE STARTING WITH "%s"\n',keywords[kwi]))
if (text$count[r] != 1) stop(sprintf('ERROR WITH PARAMETER FILE ... need exactly 1 bracket field (i.e. bracket pair []) on line\n%s\n',
text$text[r]))
if (nchar(text$v1[r])>0) {
if(!(substr(text$v1[r],(nchar(text$v1[r])-3),nchar(text$v1[r]))==".png"))
stop('ERROR WITH PARAMETER FILE ... offset plot filename must must be of type .png')
prm.os$plotName <- text$v1[r]
} else { # if blank then output plot filename is blank and output is sent to default device
prm.os$plotName <- ''
}
# Historian Block Height Tag Name
kwi <- 7 # Key Word Index
r <- pmatch(keywords[kwi],text$text)
if (is.na(r)) stop(sprintf('ERROR WITH PARAMETER FILE...
MUST HAVE EXACTLY ONE LINE STARTING WITH "%s"\n',keywords[kwi]))
if (text$count[r] != 1) stop(sprintf('ERROR WITH PARAMETER FILE ... need exactly 1 bracket field (i.e. bracket pair []) on line\n%s\n',
text$text[r]))
prm.os$histKeyword <- text$v1[r]
# EDR Block Height Tag Name
kwi <- 8 # Key Word Index
r <- pmatch(keywords[kwi],text$text)
if (is.na(r)) stop(sprintf('ERROR WITH PARAMETER FILE...
MUST HAVE EXACTLY ONE LINE STARTING WITH "%s"\n',keywords[kwi]))
if (text$count[r] != 1) stop(sprintf('ERROR WITH PARAMETER FILE ... need exactly 1 bracket field (i.e. bracket pair []) on line\n%s\n',
text$text[r]))
prm.os$EDRKeyword <- text$v1[r]
# Offset interval start
kwi <- 9
r <- pmatch(keywords[kwi],text$text)
prm.os$offsetMin <- (-60) # default is no number conversion to NA
if (!is.na(r)) {
if (text$count[r] != 1) stop(sprintf('ERROR WITH PARAMETER FILE ... need exactly 1 bracket field (i.e. bracket pair []) on line\n%s\n',
text$text[r]))
if (suppressWarnings(!(is.na(as.integer(text$v1[r]))))) {
prm.os$offsetMin <- as.integer(text$v1[r])
}
}
# Offset interval stop
kwi <- 10
r <- pmatch(keywords[kwi],text$text)
prm.os$offsetMax <- 60 # default is no number conversion to NA
if (!is.na(r)) {
if (text$count[r] != 1) stop(sprintf('ERROR WITH PARAMETER FILE ... need exactly 1 bracket field (i.e. bracket pair []) on line\n%s\n',
text$text[r]))
if (suppressWarnings(!(is.na(as.integer(text$v1[r]))))) {
prm.os$offsetMax <- as.integer(text$v1[r])
}
}
return(prm.os)
}
|
a3e3d1f457c401747f23eea5d97ea8e22bb2719b
|
84415effc813af58e57141e03fde482c8853afd7
|
/Scripts/RMD4/remove_low_copy_RNAs.R
|
387e860d26773a6ffb5932029eebf024255c857b
|
[] |
no_license
|
brianpenghe/bteefy_piwi_transposon
|
9943d9f980165d33cc6c3ea8e3924764707dd217
|
1e6d0a354b8544d96ef59b67b0b653de7debe274
|
refs/heads/master
| 2023-03-19T11:54:49.067882
| 2020-07-07T23:02:50
| 2020-07-07T23:02:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,720
|
r
|
remove_low_copy_RNAs.R
|
setwd("/group/julianolab/bteefy/piwi_revisions/small_RNA")
data.out <- "/group/julianolab/bteefy/piwi_revisions/small_RNA"
#Ecto
Ect_S <- read.table("Ecto_S_Reads.rep_pirna.txt", header = F)
#retain sequences with greater than 3 copies
Ect_S <- subset(Ect_S, V3 > 3)
#keep only sequences
Ect_S <- as.data.frame(Ect_S$V4)
write.table(Ect_S, file = "Ect_S_low_copy_filtered.txt", col.names = F, row.names = F, quote = F)
rm(Ect_S)
#Endo
End_S <- read.table("Endo_S_Reads.rep_pirna.txt", header = F)
End_S <- subset(End_S, V3 > 3)
End_S <- as.data.frame(End_S$V4)
write.table(End_S, file = "End_S_low_copy_filtered.txt", col.names = F, row.names = F, quote = F)
rm(End_S)
#Int
Int_S <- read.table("Int_S_Reads.rep_pirna.txt", header = F)
Int_S <- subset(Int_S, V3 > 3)
Int_S <- as.data.frame(Int_S$V4)
write.table(Int_S, file = "Int_S_low_copy_filtered.txt", col.names = F, row.names = F, quote = F)
rm(Int_S)
#### Repeat for antisense
#Ecto
Ect_AS <- read.table("Ecto_AS_Reads.rep_pirna.txt", header = F)
Ect_AS <- subset(Ect_AS, V3 > 3)
Ect_AS <- as.data.frame(Ect_AS$V4)
write.table(Ect_AS, file = "Ect_AS_low_copy_filtered.txt", col.names = F, row.names = F, quote = F)
rm(Ect_AS)
#Endo
End_AS <- read.table("Endo_AS_Reads.rep_pirna.txt", header = F)
End_AS <- subset(End_AS, V3 > 3)
End_AS <- as.data.frame(End_AS$V4)
write.table(End_AS, file = "End_AS_low_copy_filtered.txt", col.names = F, row.names = F, quote = F)
rm(End_AS)
#Int
Int_AS <- read.table("Int_AS_Reads.rep_pirna.txt", header = F)
Int_AS <- subset(Int_AS, V3 > 3)
Int_AS <- as.data.frame(Int_AS$V4)
write.table(Int_AS, file = "Int_AS_low_copy_filtered.txt", col.names = F, row.names = F, quote = F)
rm(Int_AS)
|
4aefffb4725cc3670b5b005e86f0de434fdca92a
|
ce94e221e5fd686cfb1218b0a9625decb77ac0c7
|
/man/mdra.Rd
|
fa0cd91627b588e1f565da0ff5aeff0575d9053f
|
[] |
no_license
|
daniel-gerhard/medrc
|
bb95f91a63e150dd4a114fbfa409dcddc89195ea
|
232b2f3887510add1851e6eae21f6ac529b6bf33
|
refs/heads/master
| 2020-12-24T08:24:02.948797
| 2017-12-27T03:39:07
| 2017-12-27T03:39:07
| 10,939,171
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,726
|
rd
|
mdra.Rd
|
\name{mdra}
\alias{mdra}
\docType{data}
\title{3T3 mouse fibroblasts and NRU assay}
\description{
The toxicity of sodium valproate was tested, using the 3T3 mouse fibroblasts and neutral red uptake (NRU) assay. 22 different experiments were performed independently in six laboratories, using eight concentration levels, each with six replicates on a 96-well plate. In addition, twelve measurements were taken for the solvent control.
}
\usage{data("mdra")}
\format{
A data frame with 1320 observations on the following 4 variables.
\describe{
\item{\code{LabID}}{a factor with levels \code{A} \code{B} \code{C} \code{D} \code{E} \code{F}}
\item{\code{ExperimentID}}{a factor with levels \code{1} \code{2} \code{3} \code{4} \code{5} \code{6} \code{7} \code{8} \code{9} \code{10} \code{11} \code{12} \code{13} \code{14} \code{15} \code{16} \code{17} \code{18} \code{19} \code{20} \code{21} \code{22}}
\item{\code{Concentration}}{a numeric vector}
\item{\code{Response}}{a numeric vector}
}
}
\source{
http://biostatistics.dkfz.de/download/mdra/MDRA_ExampleData.csv
}
\references{
Clothier, R., Gomez-Lechon, M. J., Kinsner-Ovaskainen, A., Kopp-Schneider, A., O'Connor, J. E., Prieto, P., and Stanzel, S. (2013). Comparative analysis of eight cytotoxicity assays evaluated within the ACuteTox Project. Toxicology in vitro, 27(4):1347--1356.
}
\examples{
data(mdra)
# fit a 3-parameter log-logistic mixed model
m <- medrm(Response ~ Concentration, data=mdra, fct=LL.3(),
random=d + e ~ 1|LabID/ExperimentID,
weights=varExp(form=~Concentration),
start=c(2.13, 0.95, 0.02))
plot(m, logx=TRUE, ndose=250, ranef=TRUE) +
theme_classic()
}
\keyword{datasets}
|
bfaf8abc4bace0bbba356155631b03dc1794debb
|
1afa5017b24a0964f80ec49e772bc260daadfe7d
|
/man/get_o3.Rd
|
29a2e1c06dd432012f38a7966d7cedd89c2de591
|
[
"MIT"
] |
permissive
|
healthinnovation/innovar
|
76b313f6c295f4001ab60d10ccf94c1856fb61c7
|
45a2b3cd36bac06fedb93b2a9b7e1432c66021ba
|
refs/heads/master
| 2023-09-04T04:56:15.083794
| 2023-08-28T15:44:34
| 2023-08-28T15:44:34
| 296,215,442
| 5
| 5
|
NOASSERTION
| 2022-11-07T21:54:17
| 2020-09-17T04:04:55
|
R
|
UTF-8
|
R
| false
| true
| 2,373
|
rd
|
get_o3.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_o3.R
\name{get_o3}
\alias{get_o3}
\title{Extract Ozone data of Sentinel5}
\usage{
get_o3(from, to, band, region, fun = "max", scale = 1000)
}
\arguments{
\item{to, from}{it's a string object,starting and final date.}
\item{band}{name of band.}
\item{region}{is a feature or feature collection.}
\item{fun}{function for extract statistic zonal (count, kurtosis, max, mean, median, min, mode, percentile, std, sum, variance, first).}
\item{scale}{A nominal scale in meters of the projection to work in.}
}
\value{
a tibble object with the new variables.
}
\description{
A function that extract a time series of ozone (2018-07-10T11:02:44Z - 2022-05-15T00:00:00).
}
\details{
Name of some bands.
\itemize{
\item \bold{O3_column_number_density (mol/m²):} Total atmospheric column of O3 between the surface and the top of atmosphere, calculated with the DOAS algorithm.
\item \bold{O3_column_number_density_amf (mol/m²):} Weighted mean of cloudy and clear air mass factor (amf) weighted by intensity-weighted cloud fraction.
\item \bold{O3_slant_column_number_density (mol/m²):} O3 ring corrected slant column number density.
\item \bold{O3_effective_temperature (K):} Ozone cross section effective temperature.
\item \bold{cloud_fraction:} Effective cloud fraction. See the Sentinel 5P L2 Input/Output Data Definition Spec, p.220.
\item \bold{sensor_azimuth_angle (degrees):} Azimuth angle of the satellite at the ground pixel location (WGS84); angle measured East-of-North.
\item \bold{sensor_zenith_angle (degrees):} Zenith angle of the satellite at the ground pixel location (WGS84); angle measured away from the vertical.
\item \bold{solar_azimuth_angle (degrees):} Azimuth angle of the Sun at the ground pixel location (WGS84); angle measured East-of-North.
\item \bold{solar_zenith_angle (degrees):} Zenith angle of the satellite at the ground pixel location (WGS84); angle measured away from the vertical.
}
}
\examples{
\dontrun{
library(tidyverse)
library(rgee)
library(innovar)
library(sf)
ee_Initialize()
# 1. Reading a sf object
region <- Peru
region_ee <- pol_as_ee(region , id = 'distr' , simplify = 1000)
# 2. Extracting climate information
data <- region_ee \%>\% get_o3(
from = "2019-02-01", to = "2019-12-31",
band = "CO_column_number_density", fun = "max")
}
}
|
0afb9adb0b1cff8e3eecdaa53a43c232428f8bd0
|
dbaeb60398f6cc9420d2dbd3ade57bce56aca2d1
|
/man/plot_TL.plateau.Rd
|
671a874e8c91a2bc86a140295b25275b2a1b3105
|
[] |
no_license
|
dstreble/TLdating
|
6125a9323a8b7c814323454dad4c436b1b189797
|
ff7cbf39a67db240808f9b59d4135325744a42c7
|
refs/heads/master
| 2020-05-22T04:39:03.900638
| 2017-09-06T14:37:28
| 2017-09-06T14:37:28
| 52,873,093
| 5
| 1
| null | 2016-03-17T10:58:42
| 2016-03-01T11:51:17
|
R
|
UTF-8
|
R
| false
| true
| 1,955
|
rd
|
plot_TL.plateau.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_TL.plateau.R
\name{plot_TL.plateau}
\alias{plot_TL.plateau}
\title{plot plateau test result}
\usage{
plot_TL.plateau(sample.name, temperatures, names, doses, Lx, Lx.a, Lx.plateau,
LxTx, LxTx.a, LxTx.plateau, plotting.parameters = list(plateau.Tmin = 0,
plateau.Tmax = NA, plot.Tmin = 0, plot.Tmax = NA))
}
\arguments{
\item{sample.name}{\link{character} (\bold{required}): Sample name.}
\item{temperatures}{\link{numeric} (\bold{required}): temperature vector}
\item{names}{\link{character} (\bold{required}): Name vector for the additive curves.}
\item{doses}{\link{numeric} (\bold{required}): Dose vector for the additive curves.}
\item{Lx}{\link{numeric} (\bold{required}): Lx matrix for the additive curves.}
\item{Lx.a}{\link{numeric} (\bold{required}): Lx matrix for the average additive curves.}
\item{Lx.plateau}{\link{numeric} (\bold{required}): Ln/Lx matrix for the additive curves.}
\item{LxTx}{\link{numeric} (\bold{required}): Lx/Tx matrix for the additive curves.}
\item{LxTx.a}{\link{numeric} (\bold{required}): Lx/Tx matrix for the average additive curves.}
\item{LxTx.plateau}{\link{numeric} (\bold{required}): (Ln/Tn)/(Lx/Tx) matrix for the additive curves.}
\item{plotting.parameters}{\link{list} (with default): list containing the plotting parameters. See details.}
}
\description{
This function plots the results for \link{analyse_TL.plateau}.
}
\details{
\bold{Plotting parameters} \cr
The plotting parameters are: \cr
\describe{
\item{\code{plot.Tmin}}{
\link{numeric}: Lowest temperature plotted.}
\item{\code{plot.Tmax}}{
\link{numeric}: Highest temperature plotted.}
\item{\code{no.plot}}{
\link{logical}: If \code{TRUE}, the results will not be plotted.}
}
See also \link{analyse_TL.MAAD}. \cr
}
\seealso{
\link{analyse_TL.plateau},
\link{calc_TL.MAAD.fit.Q},
\link{calc_TL.MAAD.fit.I}.
}
\author{
David Strebler
}
|
8e06b02276dd0988c51b2e5b70787362af3f8c1d
|
15ba08494f3ce8731aff56873d16e58e062d05a5
|
/Lab05.R
|
f5485d9b6176ea79adfd99cebde2f7b1b509e732
|
[] |
no_license
|
zhaoleist/advStats
|
98aea043c4ea5aec7a25e464592f4f8d9d9529cb
|
92b7fdd7061897d128bf76a84cec1132e2654017
|
refs/heads/master
| 2021-05-09T19:22:07.546232
| 2018-04-26T03:07:12
| 2018-04-26T03:07:12
| 118,638,166
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,667
|
r
|
Lab05.R
|
# lab 5
rm(list=ls())
# (1)
myT <- read.table("nc101_scaff_dataCounts.txt", header=TRUE, row.names = 1)
# (2)
par(mfrow=c(3,2))
myT_log10 <- log10(myT+1)
plot(myT_log10[,1], myT_log10[,2], main="log scale counts for the two samples")
lines(c(0,4), c(0,4), col="red")
# ANSWER:
# The biological replicates do not seem to have similar patterns of gene expression.
# While the red line indicates the case that the two replicates have same patterns, most actual points are to the right of the line, which means most genes
# have more expression in replicate 1.
# (3) (4)
sumCol <- apply(myT, 2, sum)
pValues <- vector(length=nrow(myT))
for (i in 1:nrow(myT)){
m <- matrix(c(myT[i,1], myT[i, 2], sumCol[1]-myT[i,1], sumCol[2]-myT[i,2]), nrow=2, byrow=TRUE)
pValues[i] <- fisher.test(m)$p.value
}
hist(pValues, breaks=20, main="p-values for fisher.test")
# The p-value for question #(3) is 1.670017e-11.
myT_rmvLowAbund <- myT[myT$D2_01 + myT$D2_02 > 50,]
sumCol_2 <- apply(myT_rmvLowAbund, 2, sum)
pValues_2 <- vector(length=nrow(myT_rmvLowAbund))
for (i in 1:nrow(myT_rmvLowAbund)){
m <- matrix(c(myT_rmvLowAbund[i,1], myT_rmvLowAbund[i, 2], sumCol_2[1]-myT[i,1], sumCol_2[2]-myT[i,2]), nrow=2, byrow=TRUE)
pValues_2[i] <- fisher.test(m)$p.value
}
hist(pValues_2, breaks=20, main="p-values for fisher.test without low abundance counts")
# Answer for #(4):
# They are not uniformly distributed and I don't expect them to be. If we say p <- 0.05 is "significant",
# the ratio of significant p-values is: 29.1% ( hist(pValues, breaks=20, main="p-values for fisher.test")$counts[1]/length(pValues) ),
# so we say they are less significant. After removing low abundance expression, 1) the histogram lost the right-most bar, which was caused
# by the raw zero count in the dataset; 2) the ratio of significant p-values went up to
# 43.9% (hist(pValues_2, breaks=20, main="p-values for fisher.test")$counts[1]/length(pValues_2)).
# (5) (6)
myT <- myT + 1
freq_expt <- myT[1,1]/sum(myT[,1])
poisson.test(myT[1,2], sum(myT[,2]), freq_expt)$p.value
# The p-value for question #(5) is: 1.139341e-13
sumCol_plusOne <- apply(myT, 2, sum)
pValues_3 <- vector(length=nrow(myT))
for (i in 1:nrow(myT)){
freq_background <- myT[i,1]/sumCol_plusOne[1]
pValues_3[i] <- poisson.test(myT[i,2], sumCol_plusOne[2], freq_background)$p.value
}
hist(pValues_3, breaks=20, main="p-values for poisson")
plot(pValues, pValues_3, main="p-values from fisher.test vs. p-values from poisson.test")
lines(c(0,1), c(0,1), col="red")
# Answer for #(6):
# The histograms look similar, but from the last graph, they don't agree.
|
7d4b0aa8e6c0973d568d2d7d300a7700b05e5a62
|
d5e77a217b54f36d5d187b14ecd1e537dc5d958b
|
/R/LogisticRegression.R
|
7e8bad5ab6641a5a93b8712bd88cef0b16980f93
|
[] |
no_license
|
zzxxyui/metadarclean
|
6328c7a51d04834795d3c39e148b61e9921f8635
|
e548d007ba386ca26f62ab5004547fbf558e925c
|
refs/heads/master
| 2020-09-03T22:21:55.737593
| 2019-11-04T20:16:41
| 2019-11-04T20:16:41
| 219,588,157
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,318
|
r
|
LogisticRegression.R
|
LogisticRegression <- setRefClass("LogisticRegression", contains="Classifier",
methods = list(
initialize = function(...) {
callSuper(...)
buildClassifier()
.self
},
buildClassifier = function() {
.self$model <- glm(y~., family=binomial,
data=data.frame("y"=factor(.self$y), t(.self$x), check.names=FALSE),
maxit=50)
},
subselect = function(method="stepAIC") {
switch(method,
"stepAIC"=reduce.stepaic(),
"anneal"=reduce.anneal(),
"lasso"=reduce.lasso())
},
reduce.stepaic = function() {
.self$model <- stepAIC(.self$model, trace=0, direction="backward")
},
reduce.anneal = function() {
matr <- glmHmat(.self$model)
.self$model <- anneal(matr$mat, kmin=3, H=matr$H, r=matr$r)
},
reduce.lasso = function() {
.self$model <- glmnet(t(.self$x), .self$y, family="binomial", alpha=1)
},
reduce.mclust.stepaic = function() {
mcl$new()
},
trainingPrediction = function() {
.self$predicted.value <- predict.glm(.self$model, newdata=data.frame(t(.self$x),check.names=F), type="response")
levels(.self$predicted.class) <- levels(.self$y)
if(!is.null(.self$optimalCutoff)) {
.self$predicted.class[.self$predicted.value > .self$optimalCutoff] <- levels(.self$y)[2]
} else {
.self$predicted.class[.self$predicted.value > 0.5] <- levels(.self$y)[2]
}
},
testPrediction = function() {
.self$predicted.value <- predict.glm(.self$model, newdata=data.frame(t(.self$x.test),check.names=F), type="response")
levels(.self$predicted.class) <- levels(.self$y.test)
if(!is.null(.self$optimalCutoff)) {
.self$predicted.class[.self$predicted.value > .self$optimalCutoff] <- levels(.self$y.test)[2]
} else {
.self$predicted.class[.self$predicted.value > 0.5] <- levels(.self$y)[2]
}
},
trainingStatistics = function() {
.self$trainingPrediction()
.self$computeROC()
.self$computeAUC()
.self$computeoptimalCutoff()
.self$computeSensitivity()
.self$computeSpecificity()
},
testStatistics = function() {
.self$testPrediction()
.self$computeROC()
.self$computeAUC()
.self$computeoptimalCutoff()
.self$computeSensitivity()
.self$computeSpecificity()
}
))
|
45d059bfb18e99ede9538b7821587127bde6e756
|
e88cbf37f6e9536d2467974017e5abed1ac30987
|
/R/03b_create_getplotsfunction.R
|
f6a49e30eb31581d1646def8735d53b4e7b56c6f
|
[] |
no_license
|
hkalvin/INFO550Project
|
f88699410ef6ef03f2c76cbe30d6c1e2cf1952cd
|
4fc3a0cd40cc1b01789f2c490fef9dfea402c3c7
|
refs/heads/master
| 2023-02-01T10:21:47.347870
| 2020-11-19T04:00:52
| 2020-11-19T04:00:52
| 299,963,017
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 493
|
r
|
03b_create_getplotsfunction.R
|
#! /usr/local/bin/Rscript
#create function for figure generation
getplots<-function(r,n,c){
dat<-subset(ds2_fig,region==r)
plottxt<-paste("Figure ",c,". Age adjusted breast cancer mortality rate by \nstate and race (",r,")",sep="")
f<-ggplot(dat, aes(x=Race, y=Age_Adjusted_Rate))+geom_col(aes(fill=Race))+ggtitle(plottxt)+
facet_wrap(~a, ncol=n)+ylab("Age adjusted breast cancer mortality rate")+
theme(legend.position="bottom",panel.spacing = unit(0.4, "lines"))
return(f)
}
|
2d5fede7eebbfc80babbcc6fd95b57e92427e06d
|
cfa9a6c3519a17bcded7cb5091be11c02739434d
|
/R/utilities.eval.R
|
e87ecdfe0bd72066f6d0d750379c236ad2d591dc
|
[] |
no_license
|
cran/ggloop
|
5f3529804c9c94c35788689d17dd733615b6672f
|
e3aaa56cbd19c4c9d6f1d2598a48eb3571d16a75
|
refs/heads/master
| 2021-01-11T03:49:08.312424
| 2016-10-20T01:58:31
| 2016-10-20T01:58:31
| 71,409,542
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,076
|
r
|
utilities.eval.R
|
#' @include utilities.eval2.R
# ops ---------------------------------------------------------------------
#
#' Arithmetic operators to search for.
ops <- c("/", "\\+", "-", "\\*", "\\^")
# is.op() ---------------------------------------------------------------
#
#' @title
#' Determine if an input uses an arithmetical operator (\code{/}, \code{+},
#' \code{-}, \code{*}, \code{^}).
#'
#' @description
#' Matches the arugment the \code{ops} string using \code{grep}. Any matches are
#' subsequently noted and the unique list is returned.
#'
#' @param lst A list object to be tested.
is.op <- function(lst) {
has.ops <- sapply(ops, function(expr) grep(expr, lst))
has.ops <- unlist(has.ops)
names(has.ops) <- NULL
unique(has.ops)
}
# fun.par -----------------------------------------------------------------
#
#' Regular expression pattern for determing if possible function parenthesis
#' are present. Searches for \code{"("} and \code{")"} preceeded by any number
#' of characters.
fun.par <- c("[A-Za-z]+\\(.+\\)")
# is.fun() ----------------------------------------------------------------
#
#' @title
#' Is it a function?
#'
#' @description
#' Attempts to decipher if a function other than \code{c()} has been supplied as
#' input. Returns the position of the possible non-\code{c} functions in
#' \code{lst}.
#'
#' @param lst A list of inputs wrapped in \code{substitute()} and coerced to a
#' list using \code{as.list()}.
is.fun <- function(lst) {
funs <- sapply(fun.par, function(expr) grep(expr, lst))
names(funs) <- NULL
funs <- unique(funs)
if (is.list(funs)) funs <- unlist(funs)
funs
}
# is.c() ------------------------------------------------------------------
#
#' @title
#' Determine if the first element of a parse tree is identical to the \code{c}
#' function.
#'
#' @description
#' This provides a quick way to evaluate whether the \code{x} or \code{y}
#' vectors have a \code{c()} wrapping. This is important for subsequent
#' subsetting of the respective vectors. Those vectors without a \code{c()}
#' wrapping will be wrapped by \code{list()}. Symbols are not passed to
#' \code{is.c()} due to the subsetting of the first element of the parse-tree.
#'
#' @param expr A parse tree generated by \code{substitute()}.
is.c <- function(expr) {
if (is.symbol(expr)) FALSE
else identical(expr[[1L]], quote(c))
}
# rm.gg2() ----------------------------------------------------------------
#
#' @title
#' Remove \code{ggplot2} style and stand-alone aesthetic arguments (i.e.
#' \code{y}, \code{x:z}, etc).
#'
#' @description
#' Expression aesthetics (variables wrapped in functions or using prefix/infix
#' operators) need to be handled differently than just standalone variable
#' aesthetics (i.e. \code{mpg}) or \pkg{dplyr}-like variable calls (i.e.
#' \code{mpg:hp}).
#'
#' @param expr A parse tree generated by \code{substitute()}. If the tree is not
#' wrapped by \code{c()} then it is advised to wrap \code{x} with
#' \code{list()}.
#'
#' @details
#' The reason it is advised wrap \code{x} in a \code{list} is due to the way
#' \code{x} will be indexed/subsetted. The \code{c} function wrapping is
#' assumed, so therefore the \code{list} wrapping is needed.
rm.gg2 <- function(expr) {
ops <- if (is.list(expr)) is.op(expr[[1L]]) else is.op(expr)
funs <- is.fun(expr)
c(ops, funs)
}
# messy_eval --------------------------------------------------------------
#
#' Reduce the amount of code by turning this sequence into a function.
#'
#' @param expr Lazy dots.
#' @param vars Variable names
#' @param names_list List of names built from \code{vars}.
#'
#' @details
#' The bulk of this code was taken from the \code{dply} package.
messy_eval <- function(expr, vars, names_list){
eval.index <- lazyeval::lazy_dots(eval(expr)) %>%
lazyeval::as.lazy_dots() %>%
lazyeval::lazy_eval(c(names_list, select_helpers))
eval.index <- eval.index[[1]]
vars[eval.index]
}
|
6802464f2bd54d97b26ddefa79b1d018a1a61a3c
|
368c9704acf4feddc6de68336cb57d0406d58b68
|
/Final.r
|
f191657b65d41a07b8ea76425152715477585b64
|
[] |
no_license
|
alex-selby/MKTG-562
|
0b7148fc96f04e8dcf701b639c18fbf8ce8af39f
|
2ea3029fcdd7cd6a7360893a06886b653c8bbc49
|
refs/heads/master
| 2021-05-06T23:03:07.116478
| 2018-09-24T03:25:06
| 2018-09-24T03:25:06
| 112,897,139
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,513
|
r
|
Final.r
|
# 1) CLEAR ALL Variables (and also clear the screen)
rm(list=ls())
cat("\014")
# 2) Tell your Code WHERE your Data is (i.e., SET the PATH)
library(rstudioapi)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# 3) Read in the data.
WebData = read.csv("webdata.csv")
# 4) Explore the data
str(WebData)
summary(WebData)
###############################################################################################################
# HISTOGRAM
# "Create a histogram showing the distribution of customer ages."
hist(WebData$transactionRevenue,main = paste("Histogram of Transaction Revenue"),xlab = "transactionRevenue")
###############################################################################################################
# HISTOGRAM FOR Sources
# "How do customers coming from Direct vs. Search compare in terms of total number of transaction revenue?"
# Make Traffic Sources
WebData.Direct <- subset(WebData, source_num == 0)
WebData.Search <- subset(WebData, source_num == 1)
hist(WebData.Direct$transactionRevenue,main = paste("Histogram of Transaction Revenue for Direct"),xlab = "Transaction Revenue")
hist(WebData.Direct$transactionRevenue,main = paste("Histogram of Transaction Revenue for Search"),xlab = "Transaction Revenue")
hist(WebData.Direct$transactions,main = paste("Histogram of # of Transactions for Direct"),xlab = "# of Transactions")
hist(WebData.Direct$transactions,main = paste("Histogram of # of Transactions for Search"),xlab = "# of Transactions")
|
87d302ec77c2802ce14defa7491f25d5d0807980
|
3c0359eb76bc599da2dfac34e6cd1831715e52b9
|
/regionalism_qca20/Rcode.R
|
8a52abb877860661fd188900b106a94333b27f47
|
[
"MIT"
] |
permissive
|
yello-data/publish
|
ccd10b9ff1eb258d9e1eb82623582f9d321d3435
|
3865169bfe4e61ae356e0c163092e93a51d79f0b
|
refs/heads/master
| 2020-09-28T01:58:05.064164
| 2020-02-24T13:55:52
| 2020-02-24T13:55:52
| 226,662,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,169
|
r
|
Rcode.R
|
#libraries
library(readxl)
library(dplyr)
library(tidyr)
library(countrycode)
#Download sheets (see files "the_database.xlsx" and "igo_dyads.xlsx")
pol <- read_xlsx("the_database.xlsx", sheet = 2)
eco <- read_xlsx("the_database.xlsx", sheet = 3)
pow <- read_xlsx("the_database.xlsx", sheet = 5)
base <- read_xlsx("igo_dyads.xlsx", sheet = 2)
full <- read_xlsx("igo_dyads.xlsx", sheet = 4)
#Redo political indicators
pol1 <- pol %>%
select(3:11) %>%
group_by(country, pol1) %>%
summarize() %>%
filter(pol1 != is.na(pol1)) %>%
select(country, pol = pol1)
pol2 <- pol %>%
select(3:11) %>%
group_by(country2, pol2) %>%
summarize() %>%
filter(pol2 != is.na(pol2)) %>%
select(country = country2, pol = pol2)
polt <- pol1 %>%
union(pol2)
#Correct duplicated countries
correct <- which(polt$country == "Mali" & polt$pol != 7 | polt$country == "Dominican Republic" & polt$pol != 8 |
polt$country == "Trinidad and Tobago" & polt$pol != 10 | polt$country == "Seychelles" & polt$pol != 8 |
polt$country == "Paraguay" & polt$pol != 9 | polt$country == "Nigeria" & polt$pol != 4 |
polt$country == "Guinea" & polt$pol != -1 | polt$country == "Ghana" & polt$pol != 8 |
polt$country == "Togo" & polt$pol != -4)
polt <- polt[-correct,]
#Join with pow and polt
base1 <- base %>%
select(2:4) %>%
left_join(pow) %>%
left_join(polt) %>%
select(1:3, gdp, poliv = pol, open = Trade, pop = Pop, land = terr)
#Correct NA in poliv
base1$poliv[which(is.na(base1$poliv))] <- 0
#Remove duplicated and join with eco
base1 <- base1 %>%
group_by(region, year, country, gdp, pop, open, land, poliv) %>%
summarize() %>%
left_join(select(eco, -gdp))
correct1 <- which(base1$region == "ESA interim" & base1$country == "Madagascar" & base1$exports != 1.9 |
base1$region == "ESA interim" & base1$country == "Seychelles" & base1$exports != 6.35 |
base1$region == "ESA interim" & base1$country == "Mauritius" & base1$exports != 10.7 |
base1$region == "ESA interim" & base1$country == "Zimbabwe" & base1$exports != 0.17 |
base1$region == "ESA full" & base1$country == "Madagascar" & base1$exports != 2.56 |
base1$region == "ESA full" & base1$country == "Seychelles" & base1$exports != 6.76 |
base1$region == "ESA full" & base1$country == "Mauritius" & base1$exports != 12.17 |
base1$region == "ESA full" & base1$country == "Zimbabwe" & base1$exports != 4.01 )
#Remove repeated ESA countries
base1 <- base1[-correct1,]
#Fix types of variables and create exp_imp
base2 <- base1 %>%
ungroup() %>%
select(region, country, year, gdp, pop, open, land, poliv, exports, imports) %>%
mutate(gdp = as.numeric(gdp),
pop = as.numeric(pop),
open = as.numeric(open),
exports = as.numeric(exports),
imports = as.numeric(imports),
exp_imp = (exports + imports)/2)
#Introduce country codes (ISO3, Polity IV, Vdem)
base2$iso3c <- countrycode(base2$country, origin = 'country.name', destination = 'iso3c')
base2$p4n <- countrycode(base2$iso3c, origin = 'iso3c', destination = 'p4n')
base2$vdem <- countrycode(base2$iso3c, origin = 'iso3c', destination = 'vdem')
# Create dyads
dyad_base2 <- base2 %>%
group_by(region) %>%
expand(country.a = country, country.b = country) %>%
filter(country.a < country.b) %>%
left_join(., base2, by=c("country.a"="country", "region")) %>%
left_join(., base2, by=c("country.b"="country", "region")) %>%
select(region, country.a, country.b, gdp.a = gdp.x, gdp.b = gdp.y,
poliv.a = poliv.x, poliv.b = poliv.y)
#Create Political variable
full_dataset <- base2 %>%
group_by(region) %>%
summarize(pol = sum(((poliv+10)/20) * gdp) / sum(gdp)) %>%
right_join(full) %>%
select(region, year, agreement, pref_dem = pol, institutions)
#Create Political variable2
dyad_base2 %>%
group_by(region) %>%
summarize(pol = 1- sum(abs((poliv.a+10)/20 - (poliv.b+10)/20) * (gdp.a + gdp.b) / (length(unique(gdp.a)))) / sum(unique(c(gdp.a, gdp.b)))) %>%
sum(abs((mercosur$poliv.a+10)/20 - (mercosur$poliv.b+10)/20) * (mercosur$gdp.a + mercosur$gdp.b) / (length(unique(mercosur$gdp.a))))
sum(unique(c(mercosur$gdp.a, mercosur$gdp.b)))
#Create Economic variable
full_dataset <- base2 %>%
group_by(region) %>%
summarize(eco = round((sum(exp_imp * gdp) / sum(gdp))/ 100, 3)) %>%
right_join(full_dataset) %>%
select(region, year, agreement, pref_trade = eco, pref_dem, institutions)
#Create Power variable
base2 <- base2 %>%
group_by(region) %>%
mutate(pow = gdp == max(gdp) | (open >=100 & pop < 10000 & land < 200000)) %>%
ungroup()
full_dataset <- base2 %>%
group_by(region) %>%
summarize(pow = sum(pow * gdp) / sum(gdp)) %>%
right_join(full_dataset) %>%
select(region, year, agreement, pref_trade, pref_dem, institutions, power = pow)
results_qca <- full_dataset %>%
mutate_at(4:7, ~if_else(. > mean(.), 1, 0)) %>%
arrange(desc(agreement))
#Export results
write.csv(results_qca, "results_qca1.csv")
write.csv2(full_dataset, "results_qca2.csv")
|
d0311e449614a1c630e1d53c0b0e88792f2829c8
|
92194ff532e7ec52f7d6733b4610795508bfdce5
|
/Models/DataExtrapolation/structure_test.R
|
01f19368b56710a63e6a6414e4fe5050510952cb
|
[] |
no_license
|
JusteRaimbault/RealEstate
|
9e567e4d28e490b7954077a5304902f20e724f26
|
0fd6634386947f9b3878809215e486d3c4e57d7b
|
refs/heads/master
| 2021-01-10T15:57:46.849552
| 2020-02-19T09:29:07
| 2020-02-19T09:29:07
| 47,781,427
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 867
|
r
|
structure_test.R
|
setwd(paste0(Sys.getenv('CS_HOME'),'/RealEstate/Models/DataExtrapolation/'))
source('inverseKernels.R')
years = c('01','02','03','04','05')
csp = c("EMP","OUV","INT","ART","CAD")
idcol = 'COM'
extrapolated <- read.csv(file='res/extrapolate_allyears_communes.csv',sep = ';')
#for(year in years){
year='01'
income <- getIncome(year,idcol=idcol,dispo='')
structure <-getStructure(year,idcol=idcol)
shareCAD = structure$CAD/rowSums(structure[,csp])
diff = left_join(extrapolated[extrapolated$year==2001,c('idcom','share_CAD')],data.frame(idcom=as.numeric(structure$COM),share=shareCAD))
summary(abs(diff$share_CAD - diff$share))
shareOUV = structure$OUV/rowSums(structure[,csp])
diff = left_join(extrapolated[extrapolated$year==2001,c('idcom','share_OUV')],data.frame(idcom=as.numeric(structure$COM),share=shareOUV))
summary(abs(diff$share_OUV - diff$share))
#}
|
098ee0cec7403955e4def8859b6550b0054c04de
|
d26a96ee3fbbe01e7eca74e1c0eea795677365ef
|
/R/bq_reader.R
|
2f14d1bcc0ad7fea7106eb0a1a2056bdb7d244df
|
[] |
no_license
|
mcdelaney/mmkit
|
0a353007950e941a7aada347dba037edca12f4bc
|
45205acd675e6fe600c75bc59b41f8d506689582
|
refs/heads/master
| 2023-06-17T09:35:31.891343
| 2021-07-16T17:10:26
| 2021-07-16T17:10:26
| 129,094,522
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 918
|
r
|
bq_reader.R
|
#' BQReader
#' @docType class
#' @description Bigquery database interface.
#' @param py_version_path Path to the python installation
#' @import DBI
#' @importFrom R6 R6Class
#' @import reticulate
#' @import arrow
#' @name BQReader
#' @export
#' @return Object of \code{\link{R6Class}} with methods for query execution.
#' @format \code{\link{R6Class}} object.
BQReader <- R6::R6Class("BQReader", list(
bq = NA,
initialize = function(py_version_path = NA) {
if (is.na(py_version_path)) {
path = Sys.getenv("PY_PATH")
if(path == "") {
path = "python3"
}
} else{
path = py_version_path
}
message("Using python path: ", path)
reticulate::use_python(path, required = TRUE)
self$bq <- reticulate::import("etllib")$Big()
},
query = function(sql, ...) {
result <- self$bq$query(mmkit::str_form(sql, ...), arrow = TRUE)
return(data.frame(result))
}))
|
a274a62bffe1fb2b11d97f30a308f4dc3c998ce5
|
21c5a7da2cfd702eaec886182d3a847230fc739c
|
/Script-COVID-Floripa.R
|
b9ffb691f0a055f4d6e3cae27f21db94d8ab1ffc
|
[] |
no_license
|
danielgonlopes/COVID-Floripa
|
1b23d4fd3eeeed181ad2555cc90a2b827f6d83df
|
8ae8351965e8b6c6385e5b78adab97df206d6068
|
refs/heads/main
| 2023-04-01T10:14:49.669695
| 2021-03-28T19:47:13
| 2021-03-28T19:47:13
| 350,546,043
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,314
|
r
|
Script-COVID-Floripa.R
|
rm(list=ls())
#### PACOTES ####
library(tidyverse)
library(ISOweek) # Para agrupar dias em semanas
library(DescTools) # Para calcular PseudoR2 do modelo Logit
library(caret) # Para rodar Árvore de Decisão
library(rattle) # Para plotar Árvore de Decisão
library(randomForest) # Para rodar Modelo Random Forest
#### LEITURA E LIMPEZA DOS DADOS ####
df <- read.csv("Dados/covid_florianopolis.csv", encoding="UTF-8", stringsAsFactors = T)
dados <- df
# Transformando data de notificação em Date
dados$data_notificacao <- as.Date(dados$data_notificacao)
# Criando semanas
dados$week <- ISOweek::ISOweek(dados$data_notificacao)
# Filtrando datas de notificações com erro de digitação
dados <- dados %>%
filter(data_notificacao >= "2020-03-01")
# Filtrando somente casos confirmados
dados_confirmados <- dados %>%
filter(classificacao_final == "CONFIRMAÇÃO LABORATORIAL" |
classificacao_final == "CONFIRMAÇÃO CLÍNICO EPIDEMIOLÓGICO" )
# Verificando quantidade de óbitos
table(dados_confirmados$obito)
#### PERFIL DOS CASOS ####
#### PORCENTAGEM DE CASOS POR CONDIÇÃO ####
#### PERFIL DOS ÓBITOS ####
dados_obitos <- dados_confirmados %>%
filter(obito == "SIM")
# Óbitos por sexo, idade e raça
table(dados_obitos$sexo)
table(dados_obitos$faixa_idade)
table(dados_obitos$raca)
# Gráfico de sexo dividido por faixa de idade
dados_obitos %>%
filter(faixa_idade != "ATÉ 9 ANOS") %>%
ggplot(aes(x = sexo)) +
geom_bar() +
facet_grid(. ~ faixa_idade) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
# Gráfico de faixa de idade dividido por raça
dados_obitos %>%
filter(faixa_idade != "ATÉ 9 ANOS") %>%
ggplot(aes(x = faixa_idade)) +
geom_bar() +
facet_grid(. ~ raca) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
# Quantidade de óbitos por bairro
obitos_bairro <- data.frame(table(dados_obitos$bairro))
obitos_bairro_arranged <- obitos_bairro %>%
arrange(desc(Freq))
head(obitos_bairro_arranged, 10)
#### PORCENTAGEM DE ÓBITOS POR CONDIÇÃO ####
# Porcentagem de morte por infectado
t_infectado <- table(dados$obito)
round(t_infectado[2]/t_infectado[1]*100, 2)
# Função para cálculo porcentagem de morte por condição (comorbidades)
porcentagem_obitos_por_condicao <- function(a, b) {
tabela <- table(a, b)
round(tabela[4]/tabela[3]*100, 2)
}
# Porcentagem de morte para quem tem diabetes
porcentagem_obitos_por_condicao(dados$obito, dados$diabetes)
# Porcentagem de morte para quem tem doença cardíaca crônica
porcentagem_obitos_por_condicao(dados$obito, dados$doenca_card_cronica)
# Porcentagem de morte para quem tem doença respiratória descompensada
porcentagem_obitos_por_condicao(dados$obito, dados$doenca_resp_descompensada)
# Porcentagem de morte para quem tem doenças renais avançadas
porcentagem_obitos_por_condicao(dados$obito, dados$doencas_renais_avancado)
# Porcentagem de morte por faixa de idade
t_faixa_idade <- table(dados$obito, dados$faixa_idade)
list_idade <- list()
for (i in c(2,4,6,8,10,12,14,16,18,20)){
list_idade[[i]] <- list(round(t_faixa_idade[i]/t_faixa_idade[i-1]*100, 2))
}
morte_idade <- data.frame(list_idade[2],list_idade[4],list_idade[6],list_idade[8],
list_idade[10],list_idade[12],list_idade[14],list_idade[16],
list_idade[18],list_idade[20])
colnames(morte_idade) <- levels(dados$faixa_idade)
morte_idade
#### ANÁLISE TEMPORAL ####
#### SETANDO PERÍODO PARA ANÁLISE ####
data_de_corte <- "2021-01-01" # Inserir data
#### CASOS POR DIA ####
# Filtrando casos após a data de corte e somente idosos (70+)
dados_casos_dia <- dados_confirmados %>%
filter(data_notificacao >= data_de_corte) %>%
group_by(data_notificacao, faixa_idade) %>%
summarise(quantidade = n()) %>%
filter(faixa_idade == "90 ANOS OU MAIS" | faixa_idade == "80 A 89 ANOS" | faixa_idade == "70 A 79 ANOS" | faixa_idade == "60 A 69 ANOS") %>%
mutate(vacinado = faixa_idade == "90 ANOS OU MAIS" | faixa_idade == "80 A 89 ANOS")
# Plotando gráfico de linha
ggplot(dados_casos_dia, aes(x = data_notificacao, y = quantidade, color = faixa_idade)) +
geom_line()
# Plotando gráfico com barras empilhadas (100%)
ggplot(dados_casos_dia, aes(x = data_notificacao, y = quantidade, fill = faixa_idade)) +
geom_bar(stat = "identity", position = "fill")
#### CASOS POR SEMANA ####
# Filtrando casos após a data de corte e somente idosos (70+)
dados_casos_semana <- dados_confirmados %>%
filter(data_notificacao >= data_de_corte) %>%
group_by(week, faixa_idade) %>%
summarise(quantidade = n()) %>%
filter(faixa_idade == "90 ANOS OU MAIS" | faixa_idade == "80 A 89 ANOS" | faixa_idade == "70 A 79 ANOS" | faixa_idade == "60 A 69 ANOS") %>%
mutate(vacinado = faixa_idade == "90 ANOS OU MAIS" | faixa_idade == "80 A 89 ANOS")
# Plotando gráfico com barras empilhadas (100%)
ggplot(dados_casos_semana, aes(x = week, y = quantidade, fill = faixa_idade)) +
geom_bar(stat = "identity", position = "fill")
# Plotando gráfico com barras
ggplot(dados_casos_semana, aes(x = week, y = quantidade, fill = faixa_idade)) +
geom_bar(stat = "identity", position = "dodge")
#### OBITOS POR DIA ####
# Filtrando somente obitos, após a data de corte, sem missings e somente idosos (70+)
dados_obitos_dia <- dados_confirmados %>%
filter(obito == "SIM" & data_notificacao >= data_de_corte) %>%
filter(data_obito != "") %>%
group_by(data_obito, faixa_idade) %>%
summarise(quantidade = n()) %>%
filter(faixa_idade == "90 ANOS OU MAIS" | faixa_idade == "80 A 89 ANOS" | faixa_idade == "70 A 79 ANOS")
# Plotando gráfico com barras empilhadas (100%)
ggplot(dados_obitos_dia, aes(x = data_obito, y = quantidade, fill = faixa_idade)) +
geom_bar(stat = "identity", position="fill") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
# Plotando gráfico com barras
ggplot(dados_obitos_dia, aes(x = data_obito, y = quantidade, fill = faixa_idade)) +
geom_bar(stat = "identity", position = "dodge") +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
#### OBITOS POR SEMANA ####
# Filtrando somente obitos, após a data de corte, sem missings e somente idosos (70+)
dados_obitos_semana <- dados_confirmados %>%
filter(obito == "SIM" & data_notificacao >= data_de_corte) %>%
filter(data_obito != "") %>%
group_by(week, faixa_idade) %>%
summarise(quantidade = n()) %>%
filter(faixa_idade == "90 ANOS OU MAIS" | faixa_idade == "80 A 89 ANOS" | faixa_idade == "70 A 79 ANOS")
# Plotando gráfico com barras empilhadas (100%)
ggplot(dados_obitos_semana, aes(x = week, y = quantidade, fill = faixa_idade)) +
geom_bar(stat = "identity", position="fill")
# Plotando gráfico com barras
ggplot(dados_obitos_semana, aes(x = week, y = quantidade, fill = faixa_idade)) +
geom_bar(stat = "identity", position = "dodge")
#### MODELO LOGIT ####
# Treinando o modelo de regressão logística
logit_model <- glm(obito ~ sexo + faixa_idade + raca + + diabetes + doenca_resp_descompensada
+ doencas_renais_avancado + doenca_card_cronica + gestante_alto_risco
+ portador_doenca_cromossomica + imunossupressao, data = dados_confirmados,
family = binomial)
summary(logit_model)
# Cálculando o PseudoR2
DescTools::PseudoR2(logit_model)
# Prevendo a probabilidade de cada caso do modelo de regressão logística
logit_model_probs <- predict(logit_model,type = "response")
# Prevendo o resultado de cada probabilidade do modelo de regressão logística
logit_model_preds <- ifelse(logit_model_probs > 0.5, "Pred Obito", "Pred Vivo")
# Calculando a matriz de confusão do modelo de regressão logística
logit_matriz_confusao <- table(dados_confirmados$obito, logit_model_preds)
logit_matriz_confusao
# Calculando porcentagem de acertos do modelo de regressão logística
logit_acertos <- logit_matriz_confusao[2] + logit_matriz_confusao[3]
logit_porcentagem_acertos <- logit_acertos / length(dados_confirmados$obito)
logit_porcentagem_acertos
#### DECISION TREE ####
# Set seed
set.seed(123)
# Criando datasets de treino e teste
inTrain <- caret::createDataPartition(y = dados_confirmados$obito, p = 0.7, list = FALSE)
treino <- dados_confirmados[inTrain, ]
teste <- dados_confirmados[-inTrain, ]
# Treinando o modelo de árvore de decisão no dataset de treino
Tree_Model <- caret::train(obito ~ ., method = "rpart", data = treino[,c(7:9,16:27,33)])
# Plotando árvore de decisão
rattle::fancyRpartPlot(Tree_Model$finalModel)
# Prevendo o resultado do modelo de árvore de decisão no dataset de teste
Tree_Preds <- predict(Tree_Model, newdata = teste[,c(7:9,16:27)])
# Calculando a matriz de confusão do modelo de árvore de decisão
confusionMatrix(Tree_Preds, teste$obito)
#### RANDOM FOREST ####
# Treinando o modelo de random forest no dataset de treino
forest_model <- randomForest::randomForest(obito ~ ., data = treino[,c(7:9,16:27,33)])
# Prevendo o resultado do modelo de random forest no dataset de teste
forest_model_preds <- predict(forest_model, newdata = teste[,c(7:9,16:27)])
# Calculando a matriz de confusão do modelo de random forest
confusionMatrix(forest_model_preds, teste$obito)
|
2e2adc6e4408f30a788e8bd238627ca61975d1f5
|
6c0bd9d42918ef3ff3804c4169ef573c5bf6458f
|
/_old_versions/_versions(RStudio)/build_and_send_Query_with_cellbaseR_17-10.R
|
374c34adf6c09e64a49ff93f183eba45053a55ba
|
[] |
no_license
|
IsaFG/vcf_pk
|
277ffe6885cbd7123f8ce0aab05710c0421f5884
|
950cfe487160d87516d4efe78878dcc63f370ada
|
refs/heads/master
| 2021-01-22T18:07:10.428420
| 2018-02-27T15:55:08
| 2018-02-27T15:55:08
| 100,740,025
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,246
|
r
|
build_and_send_Query_with_cellbaseR_17-10.R
|
############## [INFO] SCRIPT 3 General information ############
# NOTE : UNFINISHED WORK
# IMPORTANT : this script will use the library "cellbaseR"
# Bioconductor link dor library "cellbaseR":
# https://bioconductor.org/packages/release/bioc/html/cellbaseR.html
############## [INFO] Input and Output ############################
# Inputs :
# 1) A variant table (variants_table.txt) coming from a VCF file (script VCF_get_variant)
# 2) A filter parameters table (param_mtrx.txt) coming from script Swagger_get_parameters
# NOT DONE : The user will have to choice filter parameters so that the QUERY can be build
# Output 1 : builded QUERY
# DONE : The builded query will be sent to CellBase
# Output 2 : annotation table
############# [FACULTATIVE] Set Working directory ############
setwd("C:\\Users\\FollonIn\\Documents\\GitHub\\vcf_pk")
############# [FACULTATIVE] Install libraries ###################
install.packages("RCurl")
install.packages("jsonlite")
############# [FACULTATIVE] Install library cellbaseR ##########
# From Bioconductor :
source("https://bioconductor.org/biocLite.R")
## try http:// if https:// URLs are not supported
biocLite("cellbaseR")
# From the develloper github (Mohammed Elsiddieg <melsiddieg@gmail.com>) :
install.packages("devtools") # you may want to install devtools to install libraries from Github
library(devtools)
devtools::install_github("melsiddieg/cellbaseR")
############# [CODE] Load libraries ###################
library(RCurl)
library(jsonlite)
library(cellbaseR)
############# BUILDING QUERY: Set the different parts of the GET URL ############
# Ideally category and subcategory would be chosen by the user
# NOTE : maybe the common URL could suffer some changes
common_URL <- "http://bioinfo.hpc.cam.ac.uk/cellbase/webservices/rest/v4/hsapiens/"
category_URL <- "genomic/variant/"
subcategory_URL <- "/annotation"
# param_start_URL <- "?" # probably unnecesary
# param_separator_URL <- "&" # probably unnecesary
############# BUILDING QUERY: Get the parameters table ############
parameters_table <- read.table("test_files\\param_mtrx.txt", header=TRUE)
param_number <- nrow(parameters_table)
############# BUILDING QUERY: Parameters by default ###############
parameters_URL <- "limit=-1&skip=-1&skipCount=false&count=false&Output%20format=json&normalize=false&phased=false&useCache=false&imprecise=true&svExtraPadding=0&cnvExtraPadding=0"
############# [UNIFINISHED] BUILDING QUERY: Parameters chosen by user (TIBCO?) ###############
## NOT IN USE YET
## I will probably use TIBCO for this
# param_coded_table <- parameters_table
############# [Ony for GET VARIANT QUERIES] Get the variants table ############
variant_table <- read.table("test_files\\variants_table.txt", header=TRUE)
var_number <- nrow(variant_table)
############# [cellbaseR] Build the GET URL and query CellBase ############
query_vector <-character()
URL_vector <- character()
results_list <- list()
cb <- CellBaseR()
for (i in 1:var_number) {
var_chrom <- variant_table[i,1]
var_range <- variant_table[i,2]
var_refAl <- variant_table[i,3]
var_altAl <- variant_table[i,4]
# Get variant cellbase info with cellbaseR package
variant <- paste(var_chrom, ":", var_range, ":", var_refAl, ":", var_altAl, sep = "")
res2 <- getVariant(object=cb, ids=variant, resource="annotation")
res2table <- res2[c("chromosome", "start", "reference", "alternate", "id", "displayConsequenceType")]
if (i==1) {
getVariant_table <- res2table
} else {
getVariant_table <- (rbind(as.matrix(getVariant_table), as.matrix(res2table)))
}
# Get the Swagger link
variant_URL <- paste(var_chrom,"%3A",var_range,"%3A",var_refAl,"%3A",var_altAl,sep = "")
URL_base <- paste(common_URL,category_URL,variant_URL,subcategory_URL,"?",parameters_URL,sep = "")
URL_vector <- c(URL_vector, URL_base)
# query_v <- getURL(URL_base)
# query_vector <- c(query_vector, query_v)
print (paste("Processing variant number:", i)) # this line is for testing
}
########### Print the results by console ############
res2
getVariant_table
########### Print the table in a txt file ###########
try(write.table(getVariant_table,"test_files\\CB_variants_table.txt", append = TRUE, sep="\t",row.names=FALSE))
|
fda93f38f1203b191732115e51c4f9c304ee2178
|
0c90b72ff5b57481b7edb74620333a7c3e3c603f
|
/R_Youtube/airim/op_gg_popular.R
|
dcf7310e1978f9c80618527b4f500e446a0095fc
|
[] |
no_license
|
diligejy/R
|
41abd1cc6ef3731366f3196d3d428fd5541215e5
|
1b54334094c54e041b81d45f87c5c07336d62ff9
|
refs/heads/master
| 2022-09-11T18:13:10.614301
| 2022-08-21T14:57:21
| 2022-08-21T14:57:21
| 210,329,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 664
|
r
|
op_gg_popular.R
|
# op.gg talk 인기글 크롤링 연습
library(rvest)
library(RSelenium)
library(httr)
remD <- remoteDriver(remoteServerAddr = 'localhost',
port = 4445L, # 포트번호 입력
browserName = "chrome")
remD$open() # 서버에 연결
remD$navigate("https://www.op.gg/") # 홈페이지로 이동하라!
html <- remD$getPageSource()[[1]]
html <- read_html(html) # 페이지의 소스 읽어오기
for (i in 1:10){
on_populars <- html_nodes(html, css = "div.community-best__content-left > ul >li:nth-child(deparse(i))")
}
on_popular <- unique(html_attr(link, 'href'))
op_popular
op_popular[1:10]
|
d80cf1a2a54c7fd60c7601d4988eb8ccc772f79d
|
f2a1cebe0c88195da10e3ad8f3184e6738bab6a8
|
/Functional_Depth_ARIMA.R
|
eadbc36e94df04b73e2e7c03737b238bc508e1e0
|
[] |
no_license
|
als23/identifying_and_responding_to_outlier_demand_in_revenue_management
|
229ac85e0e723d0c2b352d0c186eeda0b16fde4a
|
bba9b8197a432bee65ea8fcf5fbc9d78d9b36726
|
refs/heads/main
| 2023-08-15T08:14:40.903841
| 2021-09-24T22:45:46
| 2021-09-24T22:45:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 324
|
r
|
Functional_Depth_ARIMA.R
|
arima_extrap <- function(df,k){
return(t(apply(df,1, function (x) c(x[1:k],ceiling(as.vector(forecast(auto.arima(as.numeric(x[1:k])),h=30-k)$mean))))))
}
func_depth_arima_outlier <- function(df, k, maxiter=50, B=1000){
d <- arima_extrap(df,k)
output <- func_depth_outlier(d, maxiter=maxiter, B=B)
return(output)
}
|
936b9523b6cd396dc294e1e017b4146af3c5bc27
|
6220c165c0acc1c21f4f48cffab834cf343e94b8
|
/scripts/intergenicRegionAnalysis.R
|
d97f893a43a79502209d60b6bf9fce365583d8e8
|
[] |
no_license
|
lkov0/bladderwort-analysis
|
29d8e6886b56e61d98302e496229d88ea2c7a433
|
259b90210f9c2d881e9359d58b8ef2d198d7a078
|
refs/heads/master
| 2023-08-12T04:07:47.332127
| 2021-10-11T01:10:19
| 2021-10-11T01:10:19
| 166,107,557
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,128
|
r
|
intergenicRegionAnalysis.R
|
# intergenic region analysis
# R code used to futher interrogate expression data around intergenic regions of interest.
# first, check orientation of candidates
library(ggplot2)
geneHits <- read.table("~/Xfer/jwlab/Bladderwort_3pri/1_Assembly/scaffolds_lk_anno_coordinates_in_PacBio.bed", stringsAsFactors = F)
head(geneHits)
uni_cand <- read.table("~/Xfer/jwlab/Bladderwort_3pri/4_CandidateMining/uni_cand.3pri.summary.tsv", header = T, stringsAsFactors = F)
bid_cand <- read.table("~/Xfer/jwlab/Bladderwort_3pri/4_CandidateMining/bid_cand.3pri.summary.tsv", header = T, stringsAsFactors = F)
ter_cand <- read.table("~/Xfer/jwlab/Bladderwort_3pri/4_CandidateMining/term_cand.3pri.summary.tsv", header = T, stringsAsFactors = F)
ins_cand <- read.table("~/Xfer/jwlab/Bladderwort_3pri/4_CandidateMining/ins_cand.3pri.summary.tsv", header = T, stringsAsFactors = F)
isOnSameChrom <- function(bedfile, candfile) {
output <- data.frame(gene1 = character(nrow(candfile)), gene2 = character(nrow(candfile)), onSameChrom = logical(nrow(candfile)), distInPacBio = numeric(nrow(candfile)), distInMyGenome = numeric(nrow(candfile)), pairId = character(nrow(candfile)), stringsAsFactors = F)
for(i in 1:nrow(candfile)) {
output[i,1] = as.character(candfile[i, "geneId1"])
output[i,2] = as.character(candfile[i, "geneId2"])
output[i,5] = candfile[i, "distance"]
output[i,6] = candfile[i, "candidate_name"]
if(!(candfile[i, "geneId1"] %in% bedfile$V4) | !(candfile[i, "geneId2"] %in% bedfile$V4)) {
output[i,3] = FALSE
output[i,4] = NA
}
else {
if((bedfile[bedfile$V4 == candfile[i, "geneId1"], 1] == bedfile[bedfile$V4 == candfile[i, "geneId2"], 1])) {
output[i,3] = TRUE
if(bedfile[bedfile$V4 == candfile[i, "geneId1"], 2] < bedfile[bedfile$V4 == candfile[i, "geneId2"], 2]) {
output[i,4] = bedfile[bedfile$V4 == candfile[i, "geneId2"], 2] - bedfile[bedfile$V4 == candfile[i, "geneId1"], 3]
}
else {
output[i,4] = bedfile[bedfile$V4 == candfile[i, "geneId1"], 2] - bedfile[bedfile$V4 == candfile[i, "geneId2"], 3]
}
}
else {
output[i,3] = FALSE
output[i,4] = NA
}
}
}
return(output)
}
uni_info <- isOnSameChrom(geneHits, uni_cand)
bid_info <- isOnSameChrom(geneHits, bid_cand)
ter_info <- isOnSameChrom(geneHits, ter_cand)
ins_info <- isOnSameChrom(geneHits, ins_cand)
ter_info$type <- "terminator"
ins_info$type <- "insulator"
bid_info$type <- "bidirectional promoter"
uni_info$type <- "unidirectional promoter"
all_info <- rbind(uni_info, bid_info, ter_info, ins_info)
ggplot(all_info, aes(x = distInPacBio, y = distInMyGenome, col = type)) +
geom_point(size = 2) +
theme_bw() +
xlab("PacBio genome distance (bp)") +
ylab("My genome distance (bp)") +
ggtitle("Intergenic region length comparison\nMy genome vs. published PacBio") +
theme(axis.text = element_text(size = 14, color = "black"), axis.title = element_text(size = 14), plot.title = element_text(size = 16), legend.title = element_blank(), legend.text = element_text(size = 12)) +
scale_color_manual(values = c("red", "blue", "green", "black"))
# plot difference in lengh - pacBio genome vs. my genome for each type of regulatory sequence
ggplot(all_info, aes(x = type, y = differenceInLength, col = type)) +
geom_boxplot() +
theme_bw() +
xlab("Element type") +
ylab("PacBio genome difference in length (bp)") +
ggtitle("Intergenic sequence length difference vs. PacBio genome") +
theme(axis.text = element_text(size = 14, color = "black"), axis.title = element_text(size = 14), plot.title = element_text(size = 16), axis.text.x = element_blank(), axis.ticks.x = element_blank()) +
scale_color_manual(values = c("red", "blue", "green", "black"))
ggsave("~/Dropbox/Bladderwort/0_Plots/intergenic_length_comparison_boxplots.png")
head(genePairs)
# compare amount and length of each type of sequence
genePairs_myGenome <- read.table("~/Xfer-work/genomes/utricularia/scaffolds.ugibba_lk.ALL.includingPacBio.genePairs.txt", header=T, sep="\t")
genePairs_pbio <- read.table("~/Xfer-work/genomes/utricularia/u.gibba_NEW.genePairs.txt", header=T, sep="\t")
genePairs_pbio$genome = "PacBio"
genePairs_myGenome$genome = "myGenome"
genePairs_all <- rbind(genePairs_myGenome, genePairs_pbio)
genePairs_all_lt10000 <- subset(genePairs_all, distance < 10000)
goodDists_lt10000 <- subset(genePairs_all_lt10000, distance > 0)
ggplot(goodDists_lt10000, aes(x = orientation, y = distance)) + geom_boxplot() + facet_grid(. ~ genome) +
theme_bw() +
xlab("Element type") +
ylab("Intergenic length (bp)") +
ggtitle("Intergenic sequence length by orientation") +
theme(axis.text = element_text(size = 14, color = "black"), axis.title = element_text(size = 14), plot.title = element_text(size = 16))
ggsave("~/Dropbox/Bladderwort/0_Plots/intergenic_length_comparison_byOrientation.png")
#plotting expression summaries
allExpr <- read.table("~/Xfer/jwlab/Bladderwort_3pri/3_Quantification/genePairs.3primeFoldChange.tsv", header=T, sep="\t")
all_expr_distGt0 <- subset(allExpr, distance > 0)
ggplot(all_expr_distGt0, aes(x = orientation, y = log(all_tissues_foldChange))) +
geom_boxplot() +
xlab("Element type") +
ylab("Avg fold change (log transformed)") +
ggtitle("Orientation by log(foldChange) - my genome") +
theme(axis.text = element_text(size = 14, color = "black"), axis.title = element_text(size = 14), plot.title = element_text(size = 16))
ggsave("~/Dropbox/Bladderwort/0_Plots/orientation_by_fold_change.png")
# plotting corr summaries
ggplot(all_expr_distGt0, aes(x = orientation, y = exprCorr)) +
geom_violin() +
geom_jitter(position=position_jitter(width=.1, height = 0)) +
xlab("Element type") +
ylab("Expression correlation") +
ggtitle("Orientation by expression correlation - my genome") +
theme(axis.text = element_text(size = 14, color = "black"), axis.title = element_text(size = 14), plot.title = element_text(size = 16))
ggsave("~/Dropbox/Bladderwort/0_Plots/orientation_by_correlation.png")
|
c1e23a5bacc3bb0090dcdaa3522e499113d5885b
|
e86adb54dbc48cfa175a3fabe5d1790b0a15553e
|
/Código fuente de Hidro.R
|
ccb2dbf0fbb6164faeb0f90e3857470e33d356be
|
[] |
no_license
|
RG-andrey/Trabajo-de-Hidro
|
5d2d4b9b95fafeeb8b108bd48880b4380234b279
|
82432c8a32292a304d4e285a070b46b37c4540d7
|
refs/heads/master
| 2023-04-30T01:27:30.737015
| 2021-05-23T20:06:42
| 2021-05-23T20:06:42
| 370,145,680
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,870
|
r
|
Código fuente de Hidro.R
|
#Datos hidrologicos trabajo exporativo
inp <- read.csv("FDC.csv")
head(inp)
dim(inp)
inp[!complete.cases(inp),]
#newinp <- na.omit(inp)
#en este caso observamos el río Estrella (segunda columna)y el río Banano (tercera columna)y se nos muestra la visualización de las dos series de tiempo de caudal.
plot(
inp[,2],
type= "l", col = "#FF6600",
xlab = ("Fecha"),
ylab = ("Caudal milimetros por día"),
main = ("Grafico de series de visualización de las dos series de tiempo de caudal.
")
)
lines(inp[ ,3], col = "#66FFFF")
#Promedio de caudales diarios.
summary(inp[,2:3])
#El histograma nos permite observar como se distribuyen los datos del mínimo al máximo.
hist(inp[,2],
main =("histograma del río Estrella"),
xlab = ("Agua por día"),
ylab = ("Caudal"),
col = "#99CCFF"
)
hist(inp[,3],
main =("histograma del río Banano"),
xlab = ("Agua por día"),
ylab = ("Caudal"),
col = "blue"
)
names(inp) <- c("fecha", "Estrella", "Banano")
attach(inp)
plot(inp[,2])
Tempdate <- strptime(inp[,1], format = "%d/%m/%Y")
#Series de tiempo a promedios anuales y mensuales
MAQ_Estrella <- tapply(inp[,2], format(Tempdate, format="%Y"), FUN=sum)
MAQ_Banano <- tapply(inp[,3], format(Tempdate, format="%Y"), FUN=sum)
write.csv(rbind(MAQ_Estrella,MAQ_Banano), file="MAQ.csv")
plot(MAQ_Banano, ylim=c(100,3000), main = "caudal por año (y) y mes (m)",
ylab = "años", xlab ="meses" )
lines(MAQ_Estrella, col=2)
MMQ_Estrella <- tapply(inp[,2], format(Tempdate, format="%m"), FUN=sum)
MMQ_Banano <- tapply(inp[,3], format(Tempdate, format="%m"), FUN=sum)
#Analisis de correlación
corinp <- cor(inp[,2:3], method= "spearman")
plot(inp[,2], inp[,3],main = "Relación de caudales",ylab = "Río Banano",
xlab = "Río Estrella")
inp.lm <- lm(inp[,2] ~ inp[,3], data=inp)
summary(inp.lm)
plot(inp.lm)
|
32dd621b8fcb23a8126475832078f28c134260a9
|
3f51298820aed88e7d0cc0569f49ab49a9fd1723
|
/run_analysis.R
|
1e7016f0ce16d65e8744cdde4d13d5d281790f2d
|
[] |
no_license
|
franciscoalvaro/gettingandcleaningdata
|
f2a70c2460b831a2e8cd0c9a86f001458a58f986
|
3c928b00d8f228ca48699d8a1269349ae4020d7f
|
refs/heads/master
| 2021-01-10T21:11:47.680462
| 2015-01-25T23:44:17
| 2015-01-25T23:44:17
| 29,836,593
| 0
| 0
| null | null | null | null |
ISO-8859-10
|
R
| false
| false
| 15,678
|
r
|
run_analysis.R
|
library(plyr)
dataX_train <- read.table("X_train.txt")
dataY_train <- read.table("Y_train.txt")
dataX_test <- read.table("X_test.txt")
dataY_test <- read.table("Y_test.txt")
dataactivity_labels <- read.table("activity_labels.txt")
datafeatures <- read.table("features.txt")
datasubject_train <- read.table("subject_train.txt")
datasubject_test <- read.table("subject_test.txt")
databody_acc_x_test <- read.table("body_acc_x_test.txt")
databody_acc_y_test <- read.table("body_acc_y_test.txt")
databody_acc_z_test <- read.table("body_acc_z_test.txt")
databody_gyro_x_test <- read.table("body_gyro_x_test.txt")
databody_gyro_y_test <- read.table("body_gyro_y_test.txt")
databody_gyro_z_test <- read.table("body_gyro_z_test.txt")
datatotal_acc_x_test <- read.table("total_acc_x_test.txt")
datatotal_acc_y_test <- read.table("total_acc_y_test.txt")
datatotal_acc_z_test <- read.table("total_acc_z_test.txt")
databody_acc_x_train <- read.table("body_acc_x_train.txt")
databody_acc_y_train <- read.table("body_acc_y_train.txt")
databody_acc_z_train <- read.table("body_acc_z_train.txt")
databody_gyro_x_train <- read.table("body_gyro_x_train.txt")
databody_gyro_y_train <- read.table("body_gyro_y_train.txt")
databody_gyro_z_train <- read.table("body_gyro_z_train.txt")
datatotal_acc_x_train <- read.table("total_acc_x_train.txt")
datatotal_acc_y_train <- read.table("total_acc_y_train.txt")
datatotal_acc_z_train <- read.table("total_acc_z_train.txt")
names(datasubject_train)[1] <- "SUBJECT"
names(dataY_train)[1] <- "ACTIVITY"
dataTemp <- data.frame(dataY_train,datasubject_train)
dataTemp <- data.frame(dataTemp,dataX_train)
names(dataY_test)[1] <- "ACTIVITY"
names(datasubject_test)[1] <- "SUBJECT"
dataTemp1 <- data.frame(dataY_test,datasubject_test)
dataTemp1 <- data.frame(dataTemp1,dataX_test)
//Se cambian los nombres de las columnas para las features.
i<-3
j<-1
for(i in 3:564){ names(dataTemp)[i] <- toString(datafeatures[j,2])
j<-j+1
if(j == 562){
break
}
}
//Se aņaden las medidas para la parte train.
// (4) Appropriately labels the data set with descriptive variable names.
dataTemp<-data.frame(dataTemp,databody_acc_x_train)
i<-564
j<-1
for(i in 564:691){ names(dataTemp)[i] <- paste("bodyAccX",toString(j))
j<-j+1
if(j == 129){
break
}
}
dataTemp<-data.frame(dataTemp,databody_acc_y_train)
i<-692
j<-1
for(i in 692:819){ names(dataTemp)[i] <- paste("bodyAccY",toString(j))
j<-j+1
if(j == 129){
break
}
}
dataTemp<-data.frame(dataTemp,databody_acc_z_train)
i<-820
j<-1
for(i in 820:947){ names(dataTemp)[i] <- paste("bodyAccZ",toString(j))
j<-j+1
if(j == 129){
break
}
}
dataTemp<-data.frame(dataTemp,databody_gyro_x_train)
i<-948
j<-1
for(i in 948:1075){ names(dataTemp)[i] <- paste("bodyGyroX",toString(j))
j<-j+1
if(j == 129){
break
}
}
dataTemp<-data.frame(dataTemp,databody_gyro_y_train)
i<-1076
j<-1
for(i in 1076:1203){ names(dataTemp)[i] <- paste("bodyGyroY",toString(j))
j<-j+1
if(j == 129){
break
}
}
dataTemp<-data.frame(dataTemp,databody_gyro_z_train)
i<-1204
j<-1
for(i in 1204:1331){ names(dataTemp)[i] <- paste("bodyGyroZ",toString(j))
j<-j+1
if(j == 129){
break
}
}
dataTemp<-data.frame(dataTemp,datatotal_acc_x_train)
i<-1332
j<-1
for(i in 1332:1459){ names(dataTemp)[i] <- paste("totalAccX",toString(j))
j<-j+1
if(j == 129){
break
}
}
dataTemp<-data.frame(dataTemp,datatotal_acc_y_train)
i<-1460
j<-1
for(i in 1460:1587){ names(dataTemp)[i] <- paste("totalAccY",toString(j))
j<-j+1
if(j == 129){
break
}
}
dataTemp<-data.frame(dataTemp,datatotal_acc_z_train)
i<-1588
j<-1
for(i in 1588:1715){ names(dataTemp)[i] <- paste("totalAccZ",toString(j))
j<-j+1
if(j == 129){
break
}
}
//Se cambian los nombres de las features para la parte test.
i<-3
j<-1
for(i in 3:564){ names(dataTemp1)[i] <- toString(datafeatures[j,2])
j<-j+1
if(j == 562){
break
}
}
//Se aņaden las medidas para la parte test.
dataTemp1<-data.frame(dataTemp1,databody_acc_x_test)
i<-564
j<-1
for(i in 564:691){ names(dataTemp1)[i] <- paste("bodyAccX",toString(j))
j<-j+1
if(j == 129){
break
}
}
dataTemp1<-data.frame(dataTemp1,databody_acc_y_test)
i<-692
j<-1
for(i in 692:819){ names(dataTemp1)[i] <- paste("bodyAccY",toString(j))
j<-j+1
if(j == 129){
break
}
}
dataTemp1<-data.frame(dataTemp1,databody_acc_z_test)
i<-820
j<-1
for(i in 820:947){ names(dataTemp1)[i] <- paste("bodyAccZ",toString(j))
j<-j+1
if(j == 129){
break
}
}
dataTemp1<-data.frame(dataTemp1,databody_gyro_x_test)
i<-948
j<-1
for(i in 948:1075){ names(dataTemp1)[i] <- paste("bodyGyroX",toString(j))
j<-j+1
if(j == 129){
break
}
}
dataTemp1<-data.frame(dataTemp1,databody_gyro_y_test)
i<-1076
j<-1
for(i in 1076:1203){ names(dataTemp1)[i] <- paste("bodyGyroY",toString(j))
j<-j+1
if(j == 129){
break
}
}
dataTemp1<-data.frame(dataTemp1,databody_gyro_z_test)
i<-1204
j<-1
for(i in 1204:1331){ names(dataTemp1)[i] <- paste("bodyGyroZ",toString(j))
j<-j+1
if(j == 129){
break
}
}
dataTemp1<-data.frame(dataTemp1,datatotal_acc_x_test)
i<-1332
j<-1
for(i in 1332:1459){ names(dataTemp1)[i] <- paste("totalAccX",toString(j))
j<-j+1
if(j == 129){
break
}
}
dataTemp1<-data.frame(dataTemp1,datatotal_acc_y_test)
i<-1460
j<-1
for(i in 1460:1587){ names(dataTemp1)[i] <- paste("totalAccY",toString(j))
j<-j+1
if(j == 129){
break
}
}
dataTemp1<-data.frame(dataTemp1,datatotal_acc_z_test)
i<-1588
j<-1
for(i in 1588:1715){ names(dataTemp1)[i] <- paste("totalAccZ",toString(j))
j<-j+1
if(j == 129){
break
}
}
//(2). Extracts only the measurements on the mean and standard deviation for each measurement.
meanBodyAccX <- c(1:10299)
meanBodyAccY <- c(1:10299)
meanBodyAccZ <- c(1:10299)
meanBodyGyroX <- c(1:10299)
meanBodyGyroY <- c(1:10299)
meanBodyGyroZ <- c(1:10299)
meanTotalAccX <- c(1:10299)
meanTotalAccY <- c(1:10299)
meanTotalAccZ <- c(1:10299)
dataTempTotal <- rbind(dataTemp,dataTemp1)
//Calculo de las mean and std for each measurement
i<-1
for(i in 1:10299){
meanBodyAccX[i] <- mean(dataTempTotal$bodyAccX.1[i]:dataTempTotal$bodyAccX.128[i])
}
i<-1
for(i in 1:10299){
meanBodyAccY[i] <- mean(dataTempTotal$bodyAccY.1[i]:dataTempTotal$bodyAccY.128[i])
}
i<-1
for(i in 1:10299){
meanBodyAccZ[i] <- mean(dataTempTotal$bodyAccZ.1[i]:dataTempTotal$bodyAccZ.128[i])
}
i<-1
for(i in 1:10299){
meanBodyGyroX[i] <- mean(dataTempTotal$bodyGyroX.1[i]:dataTempTotal$bodyGyroX.128[i])
}
i<-1
for(i in 1:10299){
meanBodyGyroY[i] <- mean(dataTempTotal$bodyGyroY.1[i]:dataTempTotal$bodyGyroY.128[i])
}
i<-1
for(i in 1:10299){
meanBodyGyroZ[i] <- mean(dataTempTotal$bodyGyroZ.1[i]:dataTempTotal$bodyGyroZ.128[i])
}
i<-1
for(i in 1:10299){
meanTotalAccX[i] <- mean(dataTempTotal$totalAccX.1[i]:dataTempTotal$totalAccX.128[i])
}
i<-1
for(i in 1:10299){
meanTotalAccY[i] <- mean(dataTempTotal$totalAccY.1[i]:dataTempTotal$totalAccY.128[i])
}
dataTempTotal <- data.frame(dataTempTotal,meanBodyAccX)
i<-1
for(i in 1:10299){
meanTotalAccZ[i] <- mean(dataTempTotal$totalAccZ.1[i]:dataTempTotal$totalAccZ.128[i])
}
dataTempTotal$meanBodyAccX <- NULL
sdBodyAccX <- c(1:10299)
sdBodyAccY <- c(1:10299)
sdBodyAccZ <- c(1:10299)
sdBodyGyroX <- c(1:10299)
sdBodyGyroY <- c(1:10299)
sdBodyGyroZ <- c(1:10299)
sdTotalAccX <- c(1:10299)
sdTotalAccY <- c(1:10299)
sdTotalAccZ <- c(1:10299)
i<-1
for(i in 1:10299){
sdBodyAccX[i] <- sd(dataTempTotal$bodyAccX.1[i]:dataTempTotal$bodyAccX.128[i])
}
i<-1
for(i in 1:10299){
sdBodyAccY[i] <- sd(dataTempTotal$bodyAccY.1[i]:dataTempTotal$bodyAccY.128[i])
}
i<-1
for(i in 1:10299){
sdBodyAccZ[i] <- sd(dataTempTotal$bodyAccZ.1[i]:dataTempTotal$bodyAccZ.128[i])
}
i<-1
for(i in 1:10299){
sdBodyGyroX[i] <- sd(dataTempTotal$bodyGyroX.1[i]:dataTempTotal$bodyGyroX.128[i])
}
i<-1
for(i in 1:10299){
sdBodyGyroY[i] <- sd(dataTempTotal$bodyGyroY.1[i]:dataTempTotal$bodyGyroY.128[i])
}
i<-1
for(i in 1:10299){
sdBodyGyroZ[i] <- sd(dataTempTotal$bodyGyroZ.1[i]:dataTempTotal$bodyGyroZ.128[i])
}
i<-1
for(i in 1:10299){
sdTotalAccX[i] <- sd(dataTempTotal$totalAccX.1[i]:dataTempTotal$totalAccX.128[i])
}
i<-1
for(i in 1:10299){
sdTotalAccY[i] <- sd(dataTempTotal$totalAccY.1[i]:dataTempTotal$totalAccY.128[i])
}
i<-1
for(i in 1:10299){
sdTotalAccZ[i] <- sd(dataTempTotal$totalAccZ.1[i]:dataTempTotal$totalAccZ.128[i])
}
//(3) Uses descriptive activity names to name the activities in the data set
i<-1
for(i in 1:10299){
if(dataTempTotal$ACTIVITY[i] == 1){
dataTempTotal$ACTIVITY[i]<- "WALKING"
}
if(dataTempTotal$ACTIVITY[i] == 2){
dataTempTotal$ACTIVITY[i]<- "WALKING_UPSTAIRS"
}
if(dataTempTotal$ACTIVITY[i] == 3){
dataTempTotal$ACTIVITY[i]<- "WALKING_DOWNSTAIRS"
}
if(dataTempTotal$ACTIVITY[i] == 4){
dataTempTotal$ACTIVITY[i]<- "SITTING"
}
if(dataTempTotal$ACTIVITY[i] == 5){
dataTempTotal$ACTIVITY[i]<- "STANDING"
}
if(dataTempTotal$ACTIVITY[i] == 6){
dataTempTotal$ACTIVITY[i]<- "LAYING"
}
}
(5) From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
newDataTemp <- dataTempTotal
newDataTemp[564:1715] <- list(NULL)
aux_calculus <- list(rep(0, 561))
k<-1
j<-1
subject<-1
counter<-0
matrix_aux <-matrix(0, nrow = 30, ncol = 6)
for(k in 1:10299){
for(subject in 1:30){
if(newDataTemp$SUBJECT[k] == subject){
if(newDataTemp$ACTIVITY[k] == "WALKING"){
count<-count+1
for(j in 3:563){
aux_calculus[[1]][j] <- aux_calculus[[1]][j] + as.numeric(newDataTemp[k,j])
}
matrix_aux[subject,1] <- aux_calculus[[1]][j]/count
}
j<-1
count<-0
aux_calculus <- list(rep(0, 561))
if(newDataTemp$ACTIVITY[k] == "WALKING_UPSTAIRS"){
count<-count+1
for(j in 3:563){
aux_calculus[[1]][j] <- aux_calculus[[1]][j] + as.numeric(newDataTemp[k,j])
}
matrix_aux[subject,2] <- aux_calculus[[1]][j]/count
}
j<-1
count<-0
aux_calculus <- list(rep(0, 561))
if(newDataTemp$ACTIVITY[k] == "WALKING_UPSTAIRS"){
count<-count+1
for(j in 3:563){
aux_calculus[[1]][j] <- aux_calculus[[1]][j] + as.numeric(newDataTemp[k,j])
}
matrix_aux[subject,3] <- aux_calculus[[1]][j]/count
}
j<-1
count<-0
aux_calculus <- list(rep(0, 561))
if(newDataTemp$ACTIVITY[k] == "SITTING"){
count<-count+1
for(j in 3:563){
aux_calculus[[1]][j] <- aux_calculus[[1]][j] + as.numeric(newDataTemp[k,j])
}
matrix_aux[subject,4] <- aux_calculus[[1]][j]/count
}
j<-1
count<-0
aux_calculus <- list(rep(0, 561))
if(newDataTemp$ACTIVITY[k] == "STANDING"){
count<-count+1
for(j in 3:563){
aux_calculus[[1]][j] <- aux_calculus[[1]][j] + as.numeric(newDataTemp[k,j])
}
matrix_aux[subject,5] <- aux_calculus[[1]][j]/count
}
j<-1
count<-0
aux_calculus <- list(rep(0, 561))
if(newDataTemp$ACTIVITY[k] == "LAYING"){
count<-count+1
for(j in 3:563){
aux_calculus[[1]][j] <- aux_calculus[[1]][j] + as.numeric(newDataTemp[k,j])
}
matrix_aux[subject,6] <- aux_calculus[[1]][j]/count
}
}
}
}
average <- matrix(0,nrow =1,ncol=561)
data_aux <- subset( newDataTemp,newDataTemp$ACTIVITY == "WALKING" & newDataTemp$SUBJECT == 1)
j<-3
i<-1
for(j in 3:563){
for(i in 1:dim(data_aux)[1]){
average[1,j-2] <- average[1,j-2] + data_aux[i,j]
}
}
j<-3
for(j in 3:563){
average[1,j-2] <- average[1,j-2]/dim(data_aux)[1]
}
aux<-cbind(1,"WALKING",average)
subject<-2
for(subject in 2:30){
data_aux <- subset( newDataTemp,newDataTemp$ACTIVITY == "WALKING" & newDataTemp$SUBJECT == subject)
j<-3
i<-1
for(j in 3:563){
for(i in 1:dim(data_aux)[1]){
average[1,j-2] <- average[1,j-2] + data_aux[i,j]
}
}
j<-3
for(j in 3:563){
average[1,j-2] <- average[1,j-2]/dim(data_aux)[1]
}
aux1<-cbind(subject,"WALKING",average)
aux<-rbind(aux,aux1)
}
subject<-1
for(subject in 1:30){
data_aux <- subset( newDataTemp,newDataTemp$ACTIVITY == "WALKING_UPSTAIRS" & newDataTemp$SUBJECT == subject)
j<-3
i<-1
for(j in 3:563){
for(i in 1:dim(data_aux)[1]){
average[1,j-2] <- average[1,j-2] + data_aux[i,j]
}
}
j<-3
for(j in 3:563){
average[1,j-2] <- average[1,j-2]/dim(data_aux)[1]
}
aux1<-cbind(subject,"WALKING_UPSTAIRS",average)
aux<-rbind(aux,aux1)
}
subject<-1
for(subject in 1:30){
data_aux <- subset( newDataTemp,newDataTemp$ACTIVITY == "WALKING_DOWNSTAIRS" & newDataTemp$SUBJECT == subject)
j<-3
i<-1
for(j in 3:563){
for(i in 1:dim(data_aux)[1]){
average[1,j-2] <- average[1,j-2] + data_aux[i,j]
}
}
j<-3
for(j in 3:563){
average[1,j-2] <- average[1,j-2]/dim(data_aux)[1]
}
aux1<-cbind(subject,"WALKING_DOWNSTAIRS",average)
aux<-rbind(aux,aux1)
}
subject<-1
for(subject in 1:30){
data_aux <- subset( newDataTemp,newDataTemp$ACTIVITY == "SITTING" & newDataTemp$SUBJECT == subject)
j<-3
i<-1
for(j in 3:563){
for(i in 1:dim(data_aux)[1]){
average[1,j-2] <- average[1,j-2] + data_aux[i,j]
}
}
j<-3
for(j in 3:563){
average[1,j-2] <- average[1,j-2]/dim(data_aux)[1]
}
aux1<-cbind(subject,"SITTING",average)
aux<-rbind(aux,aux1)
}
subject<-1
for(subject in 1:30){
data_aux <- subset( newDataTemp,newDataTemp$ACTIVITY == "STANDING" & newDataTemp$SUBJECT == subject)
j<-3
i<-1
for(j in 3:563){
for(i in 1:dim(data_aux)[1]){
average[1,j-2] <- average[1,j-2] + data_aux[i,j]
}
}
j<-3
for(j in 3:563){
average[1,j-2] <- average[1,j-2]/dim(data_aux)[1]
}
aux1<-cbind(subject,"STANDING",average)
aux<-rbind(aux,aux1)
}
subject<-1
for(subject in 1:30){
data_aux <- subset( newDataTemp,newDataTemp$ACTIVITY == "LAYING" & newDataTemp$SUBJECT == subject)
j<-3
i<-1
for(j in 3:563){
for(i in 1:dim(data_aux)[1]){
average[1,j-2] <- average[1,j-2] + data_aux[i,j]
}
}
j<-3
for(j in 3:563){
average[1,j-2] <- average[1,j-2]/dim(data_aux)[1]
}
aux1<-cbind(subject,"LAYING",average)
aux<-rbind(aux,aux1)
}
write.table(dataset, "dataset.txt", sep="\t")
|
1f9f2caba13638f39591cda656e9648e69010df3
|
1ff75d75ca6f6a1d9accb8cfb20d42095cb91792
|
/R/naive_fill_NA.R
|
cf330bbfdf0bb0a642554a101638dda925280088
|
[] |
no_license
|
minghao2016/miceFast
|
c7f33c4a17332e5e67b3a4fd56f4ade38f7c3614
|
35d8901cc814a3e1e43cf54477ecb36c232b7447
|
refs/heads/master
| 2022-11-23T11:25:59.522759
| 2020-07-11T20:36:56
| 2020-07-11T20:36:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,344
|
r
|
naive_fill_NA.R
|
#' \code{naive_fill_NA} function for the imputations purpose.
#'
#' @description
#' Regular imputations to fill the missing data.
#' Non missing independent variables are used to approximate a missing observations for a dependent variable.
#' For numeric columns with any missing data a simple bayesian mean will be used.
#' Next all numeric variables will be utilized to impute character/factoer variables by Linear Discriminant Analysis.
#'
#' @param x a numeric matrix or data.frame/data.table (factor/character/numeric/logical) - variables
#'
#' @return load imputations in a similar to the input type
#'
#' @note this is a very simple and fast solution but not recommended, for more complex solutions check the vignette
#'
#' @seealso \code{\link{fill_NA}} \code{\link{fill_NA_N}} \code{\link{VIF}}
#'
#' @examples
#' \dontrun{
#' library(naive_fill_NAast)
#' data(air_miss)
#' naive_fill_NA(air_miss)
#' }
#'
#' @name naive_fill_NA
#'
#' @export
naive_fill_NA <- function(x) {
if (inherits(x, "data.frame") || inherits(x, "matrix")) {
UseMethod("naive_fill_NA")
} else {
stop("wrong data type - it should be data.frame, matrix or data.table")
}
}
# check which have less than 15 levels
# matrix option
#' @describeIn naive_fill_NA S3 method for data.frame
naive_fill_NA.data.frame <- function(x) {
types <- vapply(x, class, FUN.VALUE = character(1))
types_n <- types %in% c("numeric", "integer")
types_fcl <- types %in% c("factor", "character", "logical")
na_col_p <- vapply(x, function(i) mean(is.na(i)), FUN.VALUE = numeric(1))
col_names <- colnames(x)
mm <- match(c("weights", "wei", "weis", "w"), tolower(colnames(x)))
ww <- if (all(is.na(mm))) vector() else x[[na.omit(mm)[1]]]
ww <- if (any(is.na(ww))) vector() else ww
nn <- col_names[types_n & (na_col_p > 0)]
for (posit_y in nn) {
# full_n <- (types_n) & (na_col_p_new == 0)
yy <- x[[posit_y]]
# p_posit_y <- match(posit_y, col_names)
yy_class <- class(yy)
all_pos_y <- FALSE
if (is.numeric(yy)) {
all_pos_y <- !any(yy < 0, na.rm = TRUE)
}
mean_over_median <- mean(yy, na.rm = T) > median(yy, na.rm = T)
log_transform <- all_pos_y && mean_over_median
pp <- as.matrix(rep(1, length(yy)))
# num_add <- setdiff(which(full_n), p_posit_y)
# xx <- as.matrix(if (is_DT) x[, num_add, with = FALSE] else x[, num_add])
if (log_transform) {
yy <- log(yy + 1e-8)
data_temp <- cbind(yy, 1)
ff <- exp(fill_NA_(data_temp, "lm_bayes", 1, 2, ww))
} else {
data_temp <- cbind(yy, 1)
ff <- fill_NA_(data_temp, "lm_bayes", 1, 2, ww)
}
x[[posit_y]] <- as.vector(ff)
}
if (any(types_n)) {
for (posit_y in col_names[types_fcl & (na_col_p > 0)]) {
yy <- x[[posit_y]]
n_unique <- length(unique(yy))
if ((n_unique < 2) || (n_unique > 15)) next
yy_class <- class(yy)
is_yy_factor <- is.factor(yy)
yy <- if (is_yy_factor) yy else factor(yy)
l <- levels(yy)
yy <- as.numeric(yy)
pp <- prcomp(Filter(is.numeric, x))$x[, 1]
f <- round(fill_NA_(cbind(yy, pp), "lda", 1, 2, vector()))
f[f <= 0] <- 1
f[f > length(l)] <- length(l)
ff <- if (is_yy_factor) factor(l[f]) else l[f]
class(ff) <- yy_class
attr(ff, "dim") <- attributes(ff)$dim[1]
x[[posit_y]] <- ff
}
}
return(x)
}
#' @describeIn naive_fill_NA S3 method for matrix
naive_fill_NA.matrix <- function(x) {
stopifnot(is.numeric(x))
na_col_p <- vapply(x, function(i) mean(is.na(i)), FUN.VALUE = numeric(1))
col_names <- colnames(x)
mm <- match(c("weights", "wei", "weis", "w"), tolower(colnames(x)))
ww <- if (all(is.na(mm))) vector() else x[[na.omit(mm)[1]]]
ww <- if (any(is.na(ww))) vector() else ww
nn <- col_names[(na_col_p > 0)]
for (posit_y in nn) {
yy <- x[[posit_y]]
all_pos_y <- !any(yy < 0, na.rm = TRUE)
mean_over_median <- mean(yy, na.rm = T) > median(yy, na.rm = T)
log_transform <- all_pos_y && mean_over_median
if (log_transform) {
yy <- log(yy + 1e-8)
data_temp <- cbind(yy, 1)
ff <- exp(fill_NA_(data_temp, "lm_bayes", 1, 2, ww))
} else {
data_temp <- cbind(yy, 1)
ff <- fill_NA_(data_temp, "lm_bayes", 1, 2, ww)
}
x[[posit_y]] <- as.vector(ff)
}
return(x)
}
|
4472030d895447b1527bec28fc3c42126ed624a6
|
da44ce19ae0c6d6573d2b1efc3e018339d1df9a5
|
/packages.R
|
3236d1ae1711c799aced494ab781b994c99f0ac6
|
[] |
no_license
|
jlehtoma/CDDA
|
b4c1a43b5f64c8fc59846367d38afa335e28509c
|
be5e68d49d51ca4a9c0a8b5a301f451aeac29707
|
refs/heads/master
| 2022-07-19T16:55:26.963197
| 2020-05-20T18:33:00
| 2020-05-20T18:33:00
| 265,581,637
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 241
|
r
|
packages.R
|
## library() calls go here
suppressMessages({
library(conflicted)
library(curl)
library(dotenv)
library(dplyr)
library(drake)
library(fs)
library(httr)
library(R.utils)
library(readr)
})
conflict_prefer("filter", "dplyr")
|
07fed6251e38e677f14db323535e62fc6548c0da
|
cb0401f0731240bd067a68563fac263ddbb0ac8e
|
/man/hypervolume_overlap_confidence.Rd
|
7b3b812f085cb19b87a689f45f2bcf38669a5810
|
[] |
no_license
|
bblonder/hypervolume
|
e4a92a430230b843b6e445233b079c93dbd7d248
|
f27eb9cc923c9937bb33f86879bed1e7d6b75cc4
|
refs/heads/master
| 2023-09-01T14:55:36.608891
| 2023-08-24T21:57:55
| 2023-08-24T21:57:55
| 22,171,728
| 20
| 8
| null | 2023-02-18T01:05:33
| 2014-07-23T19:24:49
|
R
|
UTF-8
|
R
| false
| false
| 2,583
|
rd
|
hypervolume_overlap_confidence.Rd
|
\name{hypervolume_overlap_confidence}
\alias{hypervolume_overlap_confidence}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Confidence intervals for overlap statistics
}
\description{
Generates confidence intervals of four different overlap statistics. In order to find the confidence interval for the overlap statistics of two hypervolumes, use \code{hypervolume_resample} twice to generate bootstraps. The function takes in paths to two sets of bootstrapped hypervolumes and gets overlap statistics for each possible pair. Confidence interval is calculated by taking a quantile of generated overlap statistics.
}
\usage{
hypervolume_overlap_confidence(path1, path2, CI = .95, cores = 1)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{path1}{
A path to a directory of bootstrapped hypervolumes
}
\item{path2}{
A path to a directory of bootstrapped hypervolumes
}
\item{CI}{
Desired confidence interval proportion
}
\item{cores}{
Number of logical cores to use while generating overlap statistics. If parallel backend already registered to \code{doParallel}, function will use that backend and ignore the argument in cores.
}
}
\details{
The four overlap statistics are Sorensen, Jaccard, frac_unique_1, frac_unique_2. See \code{\link{hypervolume_overlap_statistics}}
Each hypervolume from path1 is overlapped with each hypervolume from path2 using \code{hypervolume_set}. The four overlap statistics are calculated for each overlap.
}
\value{
\item{jaccard}{
Confidence interval for jaccard similarity score
}
\item{sorensen}{
Confidence interval for sorensen similarity score
}
\item{frac_unique_1}{
Confidence interval for fraction of first hypervolume that is unique
}
\item{frac_unique_2}{
Confidence interval for fraction of second hypervolume that is unique
}
\item{distribution}{
a matrix of overlap statistics used to generate the confidence intervals
}
}
\seealso{
\code{\link{hypervolume_resample}}
}
\examples{
\dontrun{
# Let us overlap two hypervolumes generated from multivariate nomral
# distributions with different means and same covariance matrices.
sample1 = rmvnorm(150, mean = c(0, 0))
sample2 = rmvnorm(150, mean = c(0.5, 0.5))
hv1 = hypervolume(sample1)
hv2 = hypervolume(sample2)
# generates confidence intervals from quantiles of 20*20 overlaps
path1 = hypervolume_resample("mean_0_0", hv1, n = 20)
path2 = hypervolume_resample("mean_0.5_0.5", hv2, n = 20)
result = hypervolume_overlap_confidence(path1, path2)
# confidence index of Sorensen coefficient
print(result["sorensen"])
}
}
|
5340f01d45854220628b1a2e5a35d4d9a5688412
|
846eb90003c329750ca6078a7d4941cd87e578cc
|
/Section 8/4981_08_01_Code.R
|
7ad0a4f8f2a354629357e26108a3c3228615168d
|
[] |
no_license
|
PacktPublishing/Learning-Data-Analysis-with-R-Video-
|
62685d9a9f9116184afb0791e243f6f8443bbf82
|
151713640dcdc4887f8e867064f73745749c49fa
|
refs/heads/master
| 2021-06-27T06:03:25.744205
| 2021-01-19T13:09:03
| 2021-01-19T13:09:03
| 187,592,737
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,583
|
r
|
4981_08_01_Code.R
|
#Volume 2
#Section 4
#Video 1
#Author: Dr. Fabio Veronesi
library(sp)
library(rgdal)
library(raster)
#Setting the working directory
setwd("E:/OneDrive/Packt - Data Analysis/Data")
#Load the country boundary polygons from Natural Earth
NatEarth <- shapefile("Shapefile/ne_110m_admin_0_countries.shp")
#Set the URL with the CSV Files
URL <- "http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/all_day.csv"
#Loading CSV Files
USGS <- read.table(file=URL,
sep=",",
header=TRUE,
na.string="")
#Transformation into a spatial object
coordinates(USGS)=~longitude+latitude
#Assign projection
projection(USGS)=CRS("+init=epsg:4326")
#Simple plot
plot(USGS)
#Change the symbol
#A list of all symbols for pch can be found here:
#http://www.endmemo.com/program/R/pchsymbols.php
plot(USGS, pch=20)
#We can also provide a custom symbol
#by copying and pasting it from the characters map in windows
plot(USGS, pch="+")
#Change color
plot(USGS, pch=20, col="red")
#A full list of all the color names can be found here:
#http://www.stat.columbia.edu/~tzheng/files/Rcolor.pdf
plot(USGS, pch=20, col="slateblue4")
#Size of the marker
plot(USGS, pch=20, col="red", cex=2)
#Add borders
plot(USGS, pch=20, col="red", cex=1)
lines(NatEarth)
#Save the results in jpeg
help(jpeg)
jpeg(filename="Earthquake.jpg", width=4000, height=2000,
units="px", res=300)
plot(USGS, pch=20, col="red", cex=1)
lines(NatEarth)
dev.off()
|
ece05aeeaebaf82728f27c5a4aba384f60883a71
|
f67b15c1b265b30bd4fa8eb9a9ffd25e1ab5567a
|
/R/Euler_20_Sum_of_Factorials.R
|
1c21e1f05ad01575bcb35469bc959addfec71ccc
|
[
"MIT"
] |
permissive
|
DougieWougie/ProjectEuler
|
1e51984c0a8e15d68466a2cb7e772ef792b96f4a
|
9617b048d8748f4f0b0f6e5df128e5bb77ec1f44
|
refs/heads/master
| 2022-07-12T06:17:52.629002
| 2020-05-15T17:23:40
| 2020-05-15T17:23:40
| 257,003,466
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 165
|
r
|
Euler_20_Sum_of_Factorials.R
|
library(gmp)
sum.digits <- function(number) {
return(sum(as.numeric(unlist(strsplit(as.character(number), split = "")))))
}
print((sum.digits(factorialZ(100))))
|
10959909c547c3bd56542729ef3118671f21273f
|
26648108b95b0b50e5cc6170ef103c8bfc463078
|
/man/plot_cpue_spaghetti.Rd
|
c27df91414d72e02e016ebc2d28facbe7ec223c5
|
[] |
no_license
|
pbs-assess/gfsynopsis
|
773a49e69735432a451adaabd87f39927c7f60b2
|
0ac1a42e96791a77b0a7f77c8914c83b3e814451
|
refs/heads/master
| 2023-08-17T08:21:02.676898
| 2023-07-27T22:30:04
| 2023-07-27T22:30:04
| 122,661,487
| 11
| 2
| null | 2023-07-17T21:49:45
| 2018-02-23T19:04:12
|
TeX
|
UTF-8
|
R
| false
| true
| 882
|
rd
|
plot_cpue_spaghetti.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cpue-plots.R
\name{plot_cpue_spaghetti}
\alias{plot_cpue_spaghetti}
\title{Plot locality specific estimates from a CPUE standardization model}
\usage{
plot_cpue_spaghetti(model, fleet, index_data, era = c("modern", "historical"))
}
\arguments{
\item{model}{Output from \code{\link[gfplot:fit_cpue_index_glmmtmb]{gfplot::fit_cpue_index_glmmtmb()}}.}
\item{fleet}{The original data object. Output from
\code{\link[gfplot:tidy_cpue_historical]{gfplot::tidy_cpue_historical()}} or \code{\link[gfplot:tidy_cpue_index]{gfplot::tidy_cpue_index()}}.}
\item{index_data}{Standardized index data. Output from
\code{\link[gfplot:fit_cpue_index_glmmtmb]{gfplot::predict_cpue_index_tweedie()}}.}
\item{era}{Modern or historical.}
}
\description{
Plot locality specific estimates from a CPUE standardization model
}
|
445d9fca1a5eeee4b6b0217763c17bbd0b9bbf5f
|
5ba24fb1d16e2056b1ee6fedf4023c878364b0ff
|
/run_analysis.R
|
e1bc38651776a23b2132f917b7f91480c0e0380f
|
[] |
no_license
|
pikerg/Getting-and-Cleaning-Data
|
7b868de20567e8ad274a53d1f2d2134e7e1b54a1
|
85d4b8ada63d31e376d1163a5058186d87ca35fe
|
refs/heads/master
| 2021-01-23T22:15:26.709302
| 2015-07-24T18:19:32
| 2015-07-24T18:19:32
| 39,515,889
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,525
|
r
|
run_analysis.R
|
####################################################################################################
## Assignment Objectives:
## Create one R script called run_analysis.R that does the following:
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
## 5. Creates a second, independent tidy data set with the average of each variable for each
## activity and each subject.
##
## A detailed description of the code steps is provided in the CodeBook.md located in this
## repository
####################################################################################################
## Read all of the required data
## a. Read the Traing and Test data sets
yTest <- read.table("UCI HAR Dataset/test/y_test.txt")
xTest <- read.table("UCI HAR Dataset/test/X_test.txt")
subjectTest <- read.table("UCI HAR Dataset/test/subject_test.txt")
yTrain <- read.table("UCI HAR Dataset/train/y_train.txt")
xTrain <- read.table("UCI HAR Dataset/train/X_train.txt")
subjectTrain <- read.table("UCI HAR Dataset/train/subject_train.txt")
## b. Read the names of the features
features <- read.table("UCI HAR Dataset/features.txt")
## c. read the activity labels (walking, walking upstairs, walking downstairs,
## sitting standing, laying)
actLabels <- read.table("UCI HAR Dataset/activity_labels.txt")
####################################################################################################
## Combine objectives 1, 3, and 4 in order to create a data set that is complete. This is intended
## to create one big data set that is useful for further analysis outside of the assignment
## objectives
## 1. Merge the training and the test sets to create one data set.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
## bind and label subject
subject <- rbind(subjectTrain, subjectTest)
names(subject) <- "subject"
## bind and label features
x <- rbind(xTrain, xTest)
names(x) <- features[,2]
## bind, update v1 to the correct activity name and name the column, and label activity
y <- rbind(yTrain, yTest)
y[,1] <- actLabels[y[,1],2]
names(y) <- "activity"
## create complete data set
completeData <- cbind(subject,y,x)
####################################################################################################
## 2. Extracts only the measurements on the mean and standard deviation for each measurement by
## first getting the subject and activity columns and then binding the columns that contain
## data on mean and standard deviation.
meanSubSet <- subset(completeData, select=c("subject","activity"))
meanSubSet <- cbind(meanSubSet, completeData[,grep("mean\\(\\)|std\\(\\)", names(completeData))])
####################################################################################################
## 5. Creates a second, independent tidy data set with the average of each variable for each
## activity and each subject.
tidyData <- ddply(meanSubSet, .(subject, activity), function(x) colMeans(x[, 3:68]))
####################################################################################################
## Write the reselt of the assignment objectives to a text file
write.table(tidyData, "averages_data.txt", row.name=FALSE)
|
24fbd78a11a6f5a3a9d7d76c215f8f4b2513eb54
|
61b4adde63a7b434e028488d2158ef23014c4cfc
|
/tests/table.R
|
d4f38f2cce68a3850a44fcb63896651479214e77
|
[] |
no_license
|
SVA-SE/mill
|
a165deeae9612c1448d287caea73f5de6e87d02a
|
b5faa7738d6b475759c7f2f980e30628d7f15f35
|
refs/heads/master
| 2021-05-10T09:19:22.504502
| 2020-06-15T12:05:40
| 2020-06-15T12:05:40
| 103,141,739
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,660
|
r
|
table.R
|
library(mill)
stopifnot(identical(mill:::merge_font_styles("\\textit{A }\\textit{B}"),
"\\textit{A B}"))
stopifnot(identical(mill:::merge_font_styles("\\textit{A} \\textit{B}"),
"\\textit{A B}"))
stopifnot(identical(mill:::merge_font_styles("\\textit{A. }\\textit{woodi}"),
"\\textit{A. woodi}"))
stopifnot(identical(mill:::merge_font_styles("\\textbf{1}\\textbf{06}"),
"\\textbf{106}"))
str <- paste0("\\textit{Mycoplasma }\\textit{gallisepticum}\\textit{ ",
"/ Mycoplasma }\\textit{synoviae}")
stopifnot(identical(
mill:::merge_font_styles(str),
"\\textit{Mycoplasma gallisepticum / Mycoplasma synoviae}"))
str <- paste0("\\textit{Mycoplasma }\\textit{gallisepticum}\\textit{/}",
"\\textit{synoviae} some more text.")
stopifnot(identical(
mill:::merge_font_styles(str),
"\\textit{Mycoplasma gallisepticum/synoviae} some more text."))
stopifnot(identical(
format(docx_tables("test-file-001.docx")[[1]], output = "tex"),
c("\\begin{table}[H]",
" \\begin{threeparttable}",
" \\caption{This is a simple table.}",
" \\begin{tabular}{",
" l",
" r}",
"",
" \\toprule",
"",
" \\textbf{Total} &",
" \\textbf{1}\\textsuperscript{\\textbf{A}} \\\\",
"",
" \\bottomrule",
"",
" \\end{tabular}",
" \\begin{tablenotes}",
" \\item \\textsuperscript{A}Footnote.",
" \\end{tablenotes}",
" \\label{tab:test}",
" \\end{threeparttable}",
"\\end{table}")))
|
e225f5c018791bb951f399bc75c80078e7fdba97
|
de89fd4bfd470b4df26bac1f22ac7b594238c585
|
/man/update_senate_database.Rd
|
abc38e0651ff4a9f0bc06462255c10bac6e70cfd
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.flusshygiene.app
|
d25414c614d3d5ba39b6a5225fea0ceda85d2b65
|
6cb657f79c8cb7ea627806797965ba716778d312
|
refs/heads/master
| 2021-07-21T01:23:36.782339
| 2019-10-30T10:15:53
| 2019-10-30T10:15:53
| 186,563,434
| 0
| 0
|
MIT
| 2021-07-12T09:43:55
| 2019-05-14T06:58:09
|
R
|
UTF-8
|
R
| false
| true
| 1,132
|
rd
|
update_senate_database.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/update_senate_database.R
\name{update_senate_database}
\alias{update_senate_database}
\title{Download New Files and Update Local "Database" of Senate's Data}
\usage{
update_senate_database(root = get_root(), dbg = 1)
}
\arguments{
\item{root}{path to "root" folder below which to find subfolders "downloads"
and "database"}
\item{dbg}{debug level. The higher the value, the more verbose the output}
}
\description{
\enumerate{
\item If database/flows.fst exists, load a data frame from there
\item If database/flows.fst does not exist, read all files in
downloads/senate/ into a data frame and save this data frame in
database/flows.fst as well as in database/flows.csv
\item Download one new file into downloads/senate/
\item Read the downloaded file int a data frame containing data for both
sites, Sophienwerder and Tiefwerder
\item Row-bind the data frame read in 4) with the data frame loaded in 1)
or read in 2)
\item Save the data frame resulting from 5) in database/flows.fst as well
as in database/flows.csv
}
}
|
dde962321ddf0a59c2c0c129c9fd800e95a49a40
|
3792ceaa3060ef1c8b2aede1f621ecc8b1777f5f
|
/man/baixar_julgados_trf2.Rd
|
b9e5d54b51c8f74d4f537d3b5a85c7a0de9aa323
|
[
"MIT"
] |
permissive
|
jjesusfilho/trf2
|
8283a3587c9f4dcd05bfd4b30198d4211345b29c
|
af3631695421499000f2bc970c656ee998648791
|
refs/heads/master
| 2020-07-02T13:22:01.753908
| 2020-06-02T01:51:35
| 2020-06-02T01:51:35
| 201,534,788
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 668
|
rd
|
baixar_julgados_trf2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/baixar_julgados_trf2.R
\name{baixar_julgados_trf2}
\alias{baixar_julgados_trf2}
\title{Baixar inteiro teor do acórdão}
\usage{
baixar_julgados_trf2(urls, diretorio = ".")
}
\arguments{
\item{urls}{As urls podem ser obtidas da tibble lida
por meio da função ler_cjsg_trf2}
\item{diretorio}{atual se não informado}
}
\value{
baixa pdfs no diretorio indicado
}
\description{
Baixar inteiro teor do acórdão
}
\examples{
\dontrun{
url <- "http://jurisprudencia.trf2.jus.br/sm/download?name=apolo-inteiro-teor&id=2019,08,02,00084163220184020000_2257151.pdf"
baixar_julgados_trf2(url)
}
}
|
b97bfc82185757bdf895c94b276661d533f25b18
|
09f489b818406f56e28f544d566121e5a2c1be2c
|
/download_ahn_sheets.R
|
ff1b889748ff9bb045da82d9ae2e9b35c9d2146a
|
[] |
no_license
|
Martien1973/rAHNextract
|
e477e998509912c217061902548a3c0e9aadf332
|
696d66ec58d5e15eb076623ccd2bac7bd5d46285
|
refs/heads/master
| 2021-01-04T10:21:28.198351
| 2020-02-14T08:22:17
| 2020-02-14T08:22:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,179
|
r
|
download_ahn_sheets.R
|
ahn_sheets <- function(name, surroundingBuffer, ahn = "AHN3"){
###BladIndex method ###
#get AHN bladIndex
my_EPSG <- "EPSG:28992"
directory <- paste("data", AHN, sep="/")
if (!dir.exists(directory)){
dir.create(directory)
}
bladIndex_shape_filepath <- paste(directory , sep="/")
bladIndex_shape_file <- paste(bladIndex_shape_filepath, "/", AHN, "_bladIndex", ".shp", sep="")
print(bladIndex_shape_file)
if(!file.exists(bladIndex_shape_file)){
print("Download ahn wfs blad Index")
ahn_WFS_baseUrl <- paste0("https://geodata.nationaalgeoregister.nl/", tolower(AHN), "/wfs?SERVICE=WFS&VERSION=1.0.0&REQUEST=GetFeature&TYPENAME=", tolower(AHN), ":", tolower(AHN), "_bladindex")
print(ahn_WFS_baseUrl)
ahn_wfs <- paste("WFS:", ahn_WFS_baseUrl, "&SRSNAME=", my_EPSG, sep="")
ogr2ogr(src_datasource_name = ahn_wfs , dst_datasource_name = bladIndex_shape_file, layer = paste0(tolower(AHN),":", tolower(AHN), "_bladindex"), overwrite = TRUE)
}
#load intersected blad indexes
bladIndex.shp <- readOGR(dsn = directory, layer = paste0(AHN, "_bladIndex"), stringsAsFactors=FALSE)
bladIndex.shp <- spTransform(bladIndex.shp, epsg_rd)
bladIndex.sf <- st_as_sf(bladIndex.shp)
st_agr(bladIndex.sf) <- "constant"
st_agr(surroundingBuffer) <- "constant"
bladIndex_buffer_intsct.sf <- st_intersection(bladIndex.sf, surroundingBuffer)
bladnrs <- bladIndex_buffer_intsct.sf$bladnr
dir.create(paste(directory, name_trim, sep="/"), showWarnings = FALSE)
aws_working_directory <- paste(directory, name_trim, sep="/")
ahn_atomFeed_BaseUrl <- paste("https://geodata.nationaalgeoregister.nl/", tolower(AHN), "/extract/", tolower(AHN), "_", sep="")
#download raw ahn of corresponding resolultion
indiv_raw_rasters <- list()
if(raw_ahn == TRUE){
ahn_raw_directory <- paste(aws_working_directory, "raw", sep="/")
ahn_raw_raster_filename <- paste(ahn_raw_directory, "/", name_trim, "_", AHN, "_raw_ahn", '.tif', sep="")
if(!file.exists(ahn_raw_raster_filename)){
dir.create(paste(aws_working_directory, "raw", sep="/"), showWarnings = FALSE)
print(ahn_raw_directory)
# rawXList <<- c()
# rawYList <<- c()
print(paste("Amount of sheets found:", length(bladnrs), sep=" "))
ahn_raw_file_paths <- c()
for(r in 1:length(bladnrs)){
#ahn2: https://geodata.nationaalgeoregister.nl/ahn2/extract/ahn2_05m_ruw/r32cn1.tif.zip
#ahn3: https://geodata.nationaalgeoregister.nl/ahn3/extract/ahn3_05m_dsm/R_32CN1.zip
if(AHN == "AHN3"){
tifZip = ".ZIP"
ahn_raw_letter <- "R"
ahn_raw_naming <- paste0("_dsm/", ahn_raw_letter,"_")
ahn_raw_downloadLink <- paste(ahn_atomFeed_BaseUrl, resolution_name, ahn_raw_naming, toupper(bladnrs[[r]]), tifZip, sep="")
ahn_raw_file_path <- paste(ahn_raw_directory, "/r_", tolower(bladnrs[[r]]), ".tif",sep="")
} else {
tifZip = ".tif.zip"
ahn_raw_letter <- "r"
ahn_raw_naming <- paste0("_ruw/", ahn_raw_letter)
ahn_raw_downloadLink <- paste(ahn_atomFeed_BaseUrl, resolution_name, ahn_raw_naming, tolower(bladnrs[[r]]), tifZip, sep="")
ahn_raw_file_path <- paste(ahn_raw_directory, "/", ahn_raw_letter, tolower(bladnrs[[r]]), ".tif",sep="")
}
print(ahn_raw_downloadLink)
ahn_rawZip_file_path <- paste(ahn_raw_directory, "/", ahn_raw_letter, tolower(bladnrs[[r]]), ".zip", sep="")
if(!file.exists(ahn_raw_file_path)){
download.file(ahn_raw_downloadLink, destfile = ahn_rawZip_file_path)
unzip(ahn_rawZip_file_path, overwrite = TRUE, exdir = ahn_raw_directory)
file.remove(ahn_rawZip_file_path)
}
ahn_sheet_raw <-stack(ahn_raw_file_path)
ahn_raw_file_paths <- cbind(ahn_raw_file_paths, ahn_raw_file_path)
proj4string(ahn_sheet_raw)<-CRS("+init=epsg:28992")
ahn_raw_crop <- raster::crop(ahn_sheet_raw,surroundingBuffer)
#plot(ahn_raw_crop)
# rXs <- c(xmin(ahn_raw_crop), xmax(ahn_raw_crop))
# rYs <- c(ymin(ahn_raw_crop), ymax(ahn_raw_crop))
# rawXList <- rbind(rawXList, rXs)
# rawYList <- rbind(rawYList, rYs)
indiv_raw_rasters[[r]] <- ahn_raw_crop
}
print(ahn_raw_raster_filename)
indiv_raw_rasters$filename <- paste(name_trim, "_", AHN ,"_raw_ahn", '.tif', sep="")
if(length(bladnrs) > 1){
indiv_raw_rasters$overwrite <- TRUE
print("Merging all raw rasters...")
print(ahn_raw_raster_filename)
ahn_raw_raster <- do.call(merge, indiv_raw_rasters)
writeRaster(ahn_raw_raster, filename = ahn_raw_raster_filename, overwrite = TRUE)
file.remove(paste(name_trim, "_", AHN , "_raw_ahn.tif", sep=""))
message("Download and merge of raw rasters complete.")
} else if(length(bladnrs) == 1){
ahn_raw_raster <- indiv_raw_rasters[[1]]
writeRaster(ahn_raw_raster, ahn_raw_raster_filename, overwrite = TRUE)
message("Download of raw rasters complete.")
}
if(delete_sheets == TRUE){
for(fr in 1:length(ahn_raw_file_paths)){
file.remove(ahn_raw_file_paths[fr])
}
}
} else {
if(redownload == TRUE){
redownload_files <- TRUE
file.remove(ahn_raw_raster_filename)
} else {
warning(paste("Cropped raw raster for", name, "already exists and was returned. Please remove it if you want to download it again or set redownload to TRUE.",sep =" "))
ahn_raw_raster <- raster(paste(ahn_raw_directory, "/", name_trim, "_", AHN ,"_raw_ahn", '.tif', sep=""))
}
}
}
#download terrain ahn of corresponding resolution
indiv_terrain_rasters <- list()
if(terrain_ahn == TRUE){
ahn_terrain_directory <- paste(aws_working_directory, "terrain", sep="/")
ahn_terrain_raster_filename <- paste(ahn_terrain_directory, "/", name_trim, "_", AHN ,"_terrain_ahn", '.tif', sep="")
# terrainXList <<- c()
# terrainYList <<- c()
if(!file.exists(ahn_terrain_raster_filename)){
dir.create(paste(aws_working_directory, "terrain", sep="/"), showWarnings = FALSE)
print(ahn_terrain_directory)
print(paste("Amount of sheets found:", length(bladnrs), sep=" "))
ahn_terrain_file_paths <- c()
for(t in 1:length(bladnrs)){
#AHN2: https://geodata.nationaalgeoregister.nl/ahn2/extract/ahn2_05m_int/i32cn1.tif.zip
#AHN3: https://geodata.nationaalgeoregister.nl/ahn3/extract/ahn3_05m_dtm/M_32CN1.ZIP
if(AHN == "AHN3"){
tifZip = ".ZIP"
ahn_terrain_letter <- "M"
ahn_terrain_naming <- paste0("_dtm/", ahn_terrain_letter,"_")
ahn_terrain_downloadLink <- paste(ahn_atomFeed_BaseUrl, resolution_name, ahn_terrain_naming, toupper(bladnrs[[t]]), tifZip, sep="")
ahn_terrain_file_path <- paste(ahn_terrain_directory, "/m_", tolower(bladnrs[[t]]), ".tif",sep="")
} else {
tifZip = ".tif.zip"
ahn_terrain_letter <- "i"
ahn_terrain_naming <- paste0("_int/", ahn_terrain_letter)
ahn_terrain_downloadLink <- paste(ahn_atomFeed_BaseUrl, resolution_name, ahn_terrain_naming, tolower(bladnrs[[t]]), tifZip, sep="")
ahn_terrain_file_path <- paste(ahn_terrain_directory, "/", ahn_terrain_letter, tolower(bladnrs[[t]]), ".tif",sep="")
}
print(ahn_terrain_downloadLink)
ahn_terrainZip_file_path <- paste(ahn_terrain_directory, "/", ahn_terrain_letter, bladnrs[[t]], ".zip", sep="")
if(!file.exists(ahn_terrain_file_path)){
download.file(ahn_terrain_downloadLink, destfile = ahn_terrainZip_file_path)
unzip(ahn_terrainZip_file_path, overwrite = TRUE, exdir = ahn_terrain_directory)
file.remove(ahn_terrainZip_file_path)
}
ahn_terrain_file_paths <- cbind(ahn_terrain_file_paths, ahn_terrain_file_path)
ahn_sheet_terrain <-stack(ahn_terrain_file_path)
proj4string(ahn_sheet_terrain)<-CRS("+init=epsg:28992")
ahn_terrain_crop<-raster::crop(ahn_sheet_terrain,surroundingBuffer)
# tXs <- c(xmin(ahn_terrain_crop), xmax(ahn_terrain_crop))
# tYs <- c(ymin(ahn_terrain_crop), ymax(ahn_terrain_crop))
# terrainXList <- rbind(terrainXList, tXs)
# terrainYList <- rbind(terrainYList, tYs)
indiv_terrain_rasters[[t]] <- ahn_terrain_crop
}
indiv_terrain_rasters$filename <- paste(name_trim, "_", AHN , "_terrain_ahn", '.tif', sep="")
if(length(bladnrs) > 1){
indiv_terrain_rasters$overwrite <- TRUE
print("Merging all terrain rasters...")
print(ahn_terrain_raster_filename)
ahn_terrain_raster <- do.call(merge, indiv_terrain_rasters, envir = )
writeRaster(ahn_terrain_raster, filename = ahn_terrain_raster_filename, overwrite = TRUE)
file.remove(paste(name_trim, "_", AHN , "_terrain_ahn.tif", sep=""))
message("Download and merge of terrain rasters complete.")
} else if(length(bladnrs) == 1){
ahn_terrain_raster <- indiv_terrain_rasters[[1]]
writeRaster(ahn_terrain_raster, ahn_terrain_raster_filename, overwrite = TRUE)
message("Download of terrain rasters complete.")
}
if(delete_sheets == TRUE){
for(ft in 1:length(ahn_terrain_file_paths)){
file.remove(ahn_terrain_file_paths[ft])
}
}
} else {
if(redownload == TRUE){
redownload_files <- TRUE
file.remove(ahn_terrain_raster_filename)
} else {
warning(paste("Cropped terrain raster for", name, "already exists and was returned. Please remove it if you want to download it again or set redownload to TRUE.",sep =" "))
ahn_terrain_raster <- raster(paste(ahn_terrain_directory, "/", name_trim, "_", AHN ,"_terrain_ahn", '.tif', sep=""))
}
}
}
if(redownload_files == TRUE){
warning("AHN file(s) already existed and were redownloaded")
import_single_ahn(df = df, name = name, addition = addition, station_coords = station_coords, X = X, Y = Y, LONLAT = LONLAT, resolution = resolution, radius = radius, raw_ahn = raw_ahn, terrain_ahn = terrain_ahn, AHN3 = AHN3, delete_sheets = delete_sheets, redownload = TRUE)
} else {
if(raw_ahn == TRUE & terrain_ahn == TRUE){
ahn_rasters <- stack(ahn_raw_raster, ahn_terrain_raster)
names(ahn_rasters) <- c("raw", "terrain")
} else if(raw_ahn == TRUE & terrain_ahn == FALSE){
ahn_rasters <- stack(ahn_raw_raster)
names(ahn_rasters) <- c("raw")
} else if(raw_ahn == FALSE & terrain_ahn == TRUE){
ahn_rasters <- stack(ahn_terrain_raster)
names(ahn_rasters) <- c("terrain")
} else {
ahn_rasters <- NULL
}
return (ahn_rasters)
}
# if(delete_sheets == TRUE){
# file.remove(bladIndex_shape_file)
# file.remove(paste( bladIndex_shape_filepath, "/", AHN, "_bladIndex", ".shx", sep=""))
# file.remove(paste( bladIndex_shape_filepath, "/", AHN, "_bladIndex", ".dbf", sep=""))
# file.remove(paste( bladIndex_shape_filepath, "/", AHN, "_bladIndex", ".prj", sep=""))
# }
}
|
6cd355ca877dea27b0dc7ef34bd8a0bdd6bb203e
|
b151f3472c1de0756675d41bc8f62598cc60df93
|
/learn_local_dbn.R
|
7714ca4019c871af3e8bb58d1e6463eb6d6fbfbf
|
[] |
no_license
|
sap01/TGS-2
|
d8450cbcf85d7b2619910fa0f4d685e2a866fe1a
|
9c84d3b8a478a0c27479f67c31e23563afa6a13d
|
refs/heads/master
| 2020-05-15T13:10:37.901192
| 2020-04-23T14:49:58
| 2020-04-23T14:49:58
| 182,055,893
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,848
|
r
|
learn_local_dbn.R
|
## Goal: Learn local Dynamic Bayesian Network (DBN)
##
#######################################################################################
## Goal: Learn local DBN using R package 'bnstruct'.
## This function allows nodes with less than two discrete levels.
#######################################################################################
LearnLocalDbnBnstruct <- function(local.dbn.input.data,
node.sizes,
scoring.func,
init.path) {
## Number of shortlisted source nodes.
## The last col is for the target node.
num.sl.src.nodes <- (ncol(local.dbn.input.data) - 1)
########################################################################
## Begin: Find out the subset of source nodes with the best score
########################################################################
## Begin with the empty subset
# curr.subset <- c()
curr.subset.str <- as.list(rep(FALSE, num.sl.src.nodes))
# print(local.dbn.input.data)
# print(str(local.dbn.input.data))
## Target node index
tgt.node.idx <- ncol(local.dbn.input.data)
########################################################################
## Begin: Calc score of the empty subset
########################################################################
## Source node indices.
## Initialized with the empty subset.
src.node.idx <- c()
## No. of source nodes
num.src.nodes <- 0
## Initialize the current score
curr.score <- NULL
if (scoring.func == 'BIC') {
## score_bn.R
## 'scoring.func': 0 = BDeu, 1 = AIC, 2 = BIC
##
curr.score <- ScoreBn(local.dbn.input.data, node.sizes,
scoring.func = 2,
tgt.node.idx,
src.node.idx, num.src.nodes)
} else {
stop('Only supports BIC scoring function till now')
}
########################################################################
## End: Calc score of the empty subset
########################################################################
best.score <- curr.score
best.subset.str <- curr.subset.str
########################################################################
## Begin: Calc scores of the non-empty subsets
########################################################################
num.subsets <- (2^num.sl.src.nodes)
for (subset.idx in 2:num.subsets) {
## 'find.next.subset.str()' is defined in this script
curr.subset.str <- find.next.subset.str(curr.subset.str)
##> 'curr.subset.str' = list(TRUE, FALSE, TRUE)
src.node.idx <- which(curr.subset.str == TRUE,
arr.ind = TRUE,
useNames = FALSE)
num.src.nodes <- length(src.node.idx)
if (scoring.func == 'BIC') {
## score_bn.R
## 'scoring.func': 0 = BDeu, 1 = AIC, 2 = BIC
##
curr.score <- ScoreBn(local.dbn.input.data, node.sizes,
scoring.func = 2,
tgt.node.idx,
src.node.idx, num.src.nodes)
} else {
stop('Only supports BIC scoring function till now')
}
## The higher the score, the fitter the model.
## Ref: Lines 38-42, defn of fbp() in 'src/smfast.c' of
## R package 'bnstruct' version 1.0.2.
if (curr.score > best.score) {
best.score <- curr.score
best.subset.str <- curr.subset.str
}
# print(tgt.node.idx)
# print(src.node.idx)
# print(best.score)
# print(curr.score)
}
## Remove all large objects that are no more required
rm(subset.idx, num.subsets, curr.score, curr.subset.str, best.score,
src.node.idx, tgt.node.idx, node.sizes)
########################################################################
## End: Calc scores of the non-empty subsets
########################################################################
########################################################################
## End: Find out the subset of source nodes with the best score
########################################################################
## Shortlisted source node names
## E.g., [1] "v1_t1" "v2_t1" "v3_t1"
sl.src.node.names <- colnames(local.dbn.input.data)[1:num.sl.src.nodes]
## Predicted source node names
pred.src.node.names <- sl.src.node.names[unlist(best.subset.str)]
# print(pred.src.node.names)
rm(sl.src.node.names, best.subset.str)
pred.src.node.names <- as.list(pred.src.node.names)
## Return the list of predicted source node names
return(pred.src.node.names)
}
#######################################################################################
## Goal: Learn local DBN using R package 'bnlearn'.
## This function requires that each node has at least two discrete levels.
#######################################################################################
LearnLocalDbnBnlearn <- function(local.dbn.input.data,
scoring.func) {
## Number of nodes in the local DBN
num.nodes.local.dbn <- ncol(local.dbn.input.data)
## E.g., [1] "v1_t2"
tgt.node.name <- colnames(local.dbn.input.data)[ncol(local.dbn.input.data)]
## Number of shortlisted source nodes.
## The last col is for the target node.
num.sl.src.nodes <- (ncol(local.dbn.input.data) - 1)
## E.g., [1] "v1_t1" "v2_t1" "v3_t1"
sl.src.node.names <- colnames(local.dbn.input.data)[1:num.sl.src.nodes]
########################################################################
## Begin: Find out the subset of source nodes with the best score
########################################################################
## Begin with the empty subset
# curr.subset <- c()
curr.subset.str <- as.list(rep(FALSE, num.sl.src.nodes))
########################################################################
## Begin: Compose model string for the source nodes
########################################################################
src.model.str <- c()
for (node.name in sl.src.node.names) {
node.to.add <- paste('[',
node.name,
']',
sep = '')
src.model.str <- paste(src.model.str,
node.to.add,
sep = '')
}
rm(node.name)
##> 'src.model.str' = '[v1_t1][v2_t1][v3_t1]'
########################################################################
## End: Compose model string for the source nodes
########################################################################
## Model string for the target node
tgt.model.str <- paste('[',
tgt.node.name,
']',
sep = '')
##> 'tgt.model.str' = '[v1_t2]'
## Current model string
curr.model.str <- paste(src.model.str,
tgt.model.str,
sep = '')
##> 'curr.model.str' = '[v1_t1][v2_t1][v3_t1][v1_t2]'
## Current network model
curr.net <- bnlearn::model2network(curr.model.str)
## Initialize the current score
curr.score <- NULL
local.dbn.input.data <- as.data.frame(local.dbn.input.data)
## Convert each column from int to factor
for (col.idx in 1:ncol(local.dbn.input.data)) {
local.dbn.input.data[, col.idx] <- as.factor(local.dbn.input.data[, col.idx])
}
rm(col.idx)
print(local.dbn.input.data)
print(str(local.dbn.input.data))
########################################################################
## Begin: Calc score of the empty subset
########################################################################
if (scoring.func == 'BIC') {
curr.score <- bnlearn::score(curr.net,
local.dbn.input.data,
type = 'bic')
} else {
stop('Only supports BIC scoring function till now')
}
########################################################################
## End: Calc score of the empty subset
########################################################################
best.score <- curr.score
best.subset.str <- curr.subset.str
########################################################################
## Begin: Calc scores of the non-empty subsets
########################################################################
num.subsets <- (2^num.sl.src.nodes)
for (subset.idx in 2:num.subsets) {
## 'find.next.subset.str()' is defined in this script
curr.subset.str <- find.next.subset.str(curr.subset.str)
##> 'curr.subset.str' = list(TRUE, FALSE, TRUE)
## Model string for the target node
tgt.model.str <- paste('[',
tgt.node.name,
'|',
sep = '')
##> 'tgt.model.str' = '[v1_t2|'
is.first <- TRUE
for (node.idx in length(curr.subset.str)) {
if (curr.subset.str[[node.idx]]) {
if (is.first) {
is.first <- FALSE
} else {
tgt.model.str <- paste(tgt.model.str,
':',
sep = '')
}
tgt.model.str <- paste(tgt.model.str,
sl.src.node.names[node.idx],
sep = '')
}
}
rm(node.idx)
tgt.model.str <- paste(tgt.model.str,
']',
sep = '')
##> tgt.model.str = '[v1_t2|v1_t1:v1_t3]'
## Current model string
curr.model.str <- paste(src.model.str,
tgt.model.str,
sep = '')
##> 'curr.model.str' = '[v1_t1][v2_t1][v3_t1][v1_t2|V1_t1:v1_t3]'
## Current network model
curr.net <- bnlearn::model2network(curr.model.str)
if (scoring.func == 'BIC') {
curr.score <- bnlearn::score(curr.net,
local.dbn.input.data,
type = 'bic')
} else {
stop('Only supports BIC scoring function till now')
}
## The higher the score, the fitter the model.
## Ref: Section 'Note' of function 'score' in the manual of
## R package 'bnlearn' version 4.3.
if (curr.score > best.score) {
best.score <- curr.score
best.subset.str <- curr.subset.str
}
}
rm(subset.idx, num.subsets)
########################################################################
## End: Calc scores of the non-empty subsets
########################################################################
########################################################################
## End: Find out the subset of source nodes with the best score
########################################################################
## Predicted source node names
pred.src.node.names <- sl.src.node.names[unlist(best.subset.str)]
rm(sl.src.node.names)
pred.src.node.names <- as.list(pred.src.node.names)
## Return the list of predicted source node names
return(pred.src.node.names)
}
#######################################################################################
## Goal: Find next subset string, given the previous subset string.
## Example 1:
## If 'prev.subset.str' = list(FALSE, FALSE)
## then it returns 'list(FALSE, TRUE)'.
##
## Example 2:
## If 'prev.subset.str' = list(FALSE, TRUE)
## then it returns 'list(TRUE, FALSE)'.
##
## So this function basically adds a TRUE to the last element of the
## list 'prev.subset.str'.
#######################################################################################
find.next.subset.str <- function(prev.subset.str) {
num.nodes.local.dbn <- length(prev.subset.str)
## Initialize the carry bit
carry.bit <- TRUE
for (node.idx in num.nodes.local.dbn:1) {
## Cuurent bit
curr.bit <- prev.subset.str[[node.idx]]
if (!curr.bit & !carry.bit) {
prev.subset.str[[node.idx]] <- FALSE
carry.bit <- FALSE
} else if (curr.bit & carry.bit) {
prev.subset.str[[node.idx]] <- FALSE
carry.bit <- TRUE
} else {
prev.subset.str[[node.idx]] <- TRUE
carry.bit <- FALSE
}
}
rm(node.idx)
## It is an in-place replacement i.e.
## the content of the previous subset
## string is modified to represent
## the next subset string
return(prev.subset.str)
}
|
e325bff2d201b1cde595a897c9ee777de65f5844
|
f86eae36eaa4487bc67718b81e293c85ff78a6fb
|
/man/mics-package.Rd
|
f37c7d9fa7659432fbf97494200e09757d518a8a
|
[] |
no_license
|
epix-project/mics
|
829b51e0cfadc467668f559e72a3836027e1aae9
|
fd13ab08910016baace45488ded801eac5e673d5
|
refs/heads/master
| 2020-03-28T05:58:10.235696
| 2018-10-09T09:54:39
| 2018-10-09T09:54:39
| 147,806,678
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 778
|
rd
|
mics-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mics.R
\docType{package}
\name{mics-package}
\alias{mics}
\alias{mics-package}
\title{mics: analysing multiple indicator cluster surveys}
\description{
mics provides a collection of functions to analyse multiple indicator cluster
surveys such as the UNICEF MICS or USAID DHS.
}
\details{
To learn more about mics, start with the vignettes:
`browseVignettes(package = "mics")`
}
\author{
\strong{Maintainer}: Marc Choisy \email{marc.choisy@ird.fr} (0000-0002-5187-6390) [copyright holder]
Authors:
\itemize{
\item Sonia Lewycka \email{slewycka@oucru.org} (0000-0002-5923-9468) [copyright holder]
\item Lucie Contamin \email{contamin.lucie@gmail.com} (0000-0001-5797-1279) [copyright holder]
}
}
|
501e40a93339f167ce25a63070a24dce9dd40207
|
28e5bcbacce8558e4198e2eab55b9c638c74bc8a
|
/implementation_2/NYCTaxi/R/RcppExports.R
|
814531796360e39fb86f72f9306f11788e111446
|
[] |
no_license
|
huragok/STA242HW5
|
57dbb6579a3a4fc0e2e47d6cd7b9c696de1d89cc
|
53a1b7303678bf2160375174a3524471e6c9fee9
|
refs/heads/master
| 2021-01-10T10:11:23.226890
| 2015-05-31T23:33:42
| 2015-05-31T23:33:42
| 36,768,675
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 868
|
r
|
RcppExports.R
|
# This file was generated by Rcpp::compileAttributes
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Function to update the sufficient statistics of linear regression based on a bulk of data
#'
#' The sufficient statistics of the linear regression is recorded as a p-by-(p+1) matrix which is the row concatenation of x^Hx and x^Hy where p is the number of predictors (including constant 1).
#' @param xx_xy a p-by-(p+1) matrix, [1:p, 1:p] represents the current value of x^Hx and [p+1, 1:p] represents the current value of x^Hy
#' @param y a n-by-1 vector, the bulk of observatons
#' @param x_less_ones a n-by-(p-1) matrix, the bulk of predictors excluding 1
#' @return the updated sufficient statistic xx_xy
#' @export
updateSuffStat <- function(xx_xy, y, x_less_ones) {
.Call('NYCTaxi_updateSuffStat', PACKAGE = 'NYCTaxi', xx_xy, y, x_less_ones)
}
|
8bf697dd10069a12c68ddb5f81ae9373a9020062
|
4498288a6df6d1cd6beedb6a59229c1784e70d29
|
/r_script/func_var_test.r
|
b0385e123d5dbd2a04d588946cc6e1acaa263c65
|
[] |
no_license
|
bioticinteractions/r_script
|
f92672a6d6fff5a3e8cff31fc10635b595e0495c
|
9deed66eca3ae6002bafcc48a9e2000d5a2db0b4
|
refs/heads/master
| 2021-10-01T18:07:28.340418
| 2018-11-28T03:25:22
| 2018-11-28T03:25:22
| 106,977,913
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,055
|
r
|
func_var_test.r
|
################################################################################
#function for calculating test statistics for potential variables for model use
################################################################################
var_test <- function(data, seg_col, formula){
library(car) # for VIF
df <- data # need to set prior to running function
segment_list <- as.character(unique(df[, seg_col])) #loop to automatically run everything for each segment
list_of_df <- vector("list", length(segment_list))
list_of_chi <- vector("list", length(segment_list))
for (k in 1:length(segment_list)){
segment <- segment_list[k]
df_seg <- df[df$seg == segment, ] # create temporary dataset with only a particular segment
glm_mod <- glm(formula, data = df_seg, family = binomial)
glm_chi <- anova(glm_mod, test = "Chisq")
chi_df <- as.data.frame(matrix(ncol = length(names(glm_chi))+1,
nrow = length(row.names(glm_chi))
)) #remove from all(??) loops (move to top of function)
for (j in 1:length(names(glm_chi))){
names(chi_df) <- c(segment, names(glm_chi))
if (j == 1)
chi_df[, j] <- row.names(glm_chi)
if (j != 2)
chi_df[, j+1] <- glm_chi[j]
}
glm_vif <- data.frame(vif(glm_mod)) # calculate VIF
colnames(glm_vif) <- "VIF"
glm_coeff <- as.data.frame(glm_mod$coefficients) # logistic regresion
colnames(glm_coeff) <- "coeff"
glm_vif_adj <- rbind(c(NA, NA), glm_vif) # put in new row at the top
glm_result <- cbind(chi_df, glm_coeff, glm_vif_adj)
df_pred <- scored_2015_m[scored_2015_m$seg == segment, ]
predicted <- as.data.frame(predict(glm_mod, df_pred, type = "response"))
names(predicted) <- "predicted"
list_of_df[[k]] <- merge(predicted, df_pred, by = "row.names") # k is for the number of segments
list_of_chi[[k]] <- glm_result
}
list_of_lists <- c(list_of_df, list_of_chi)
return(list_of_lists)
}
|
6e9eed3e608df895415cbbce8f55e1597ba8e3c7
|
dc7169116a18420ba27791d1ae937519cd3b7028
|
/man/rlogitnorm.Rd
|
0eaa6ca12706bcfdb55cf3a2eabca5bff2ef3c99
|
[] |
no_license
|
bgctw/logitnorm
|
1ee55f1f36a4700f276b8c0ed3bde4b199e2215b
|
527a5cf52b8d8a17b48ad9b5a2cfb3042e6093f6
|
refs/heads/master
| 2022-01-01T21:00:59.761492
| 2022-01-01T11:57:05
| 2022-01-01T11:57:05
| 73,286,222
| 1
| 1
| null | 2018-07-30T12:01:11
| 2016-11-09T13:25:06
|
R
|
UTF-8
|
R
| false
| false
| 428
|
rd
|
rlogitnorm.Rd
|
\name{rlogitnorm}
\alias{rlogitnorm}
\title{rlogitnorm}
\description{Random number generation for logitnormal distribution}
\usage{rlogitnorm(n, mu = 0, sigma = 1, ...)}
\arguments{
\item{n}{number of observations}
\item{mu}{distribution parameter}
\item{sigma}{distribution parameter}
\item{\dots}{arguments to \code{\link{rnorm}}}
}
\author{Thomas Wutzler}
\seealso{\code{\link{logitnorm}}}
|
2d2cdfe3b193c825623ae200855199a9c3294c29
|
72d0d60685ff5e1b8f4b122bfc5cfc67234175ef
|
/man/send_invite_2_fun.Rd
|
8066ec41a7fd0e60b71afd58d176f3788e58a32f
|
[] |
no_license
|
oliverpurschke/smols
|
90d1476045e31ac03b5467f0380879d28e8f2b56
|
1dbf5f5bcfa080d836d55fbf65b91173af81843a
|
refs/heads/main
| 2023-06-21T08:52:27.823888
| 2021-07-27T10:35:30
| 2021-07-27T10:35:30
| 348,012,437
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 603
|
rd
|
send_invite_2_fun.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/send_invite_2_fun.R
\name{send_invite_2_fun}
\alias{send_invite_2_fun}
\title{Einladungsmails ab 2. Wellen versenden}
\usage{
send_invite_2_fun(dat, senden)
}
\arguments{
\item{dat}{Dataframe mit Adressinformationen der Teilnehmer die angeschrieben werden sollen}
\item{senden}{Sollen mail sofort gesendet werden oder erst in Outlook geöffnet werden (TRUE, FALSE)}
}
\description{
send_invite_2_fun() erlaubt das versenden von Einladungsemails an die Teilnehmer innerhalb ab der 2. Welle
}
\examples{
send_invite_2_fun()
}
|
fe1bcc74dd74cd9d3b350cb71ae3d6ac0b66c844
|
364bb477d657913eef1ab7a5461ceb9e6aa95a32
|
/Airquality.R
|
e517910c5a42963fb4485ecc641021435ac610be
|
[] |
no_license
|
Eduardo0396/Programaci-n-Actuarial-III
|
2f76c6a30f36b26b1d6895fd38307088e4380627
|
f212d048a039778fd64b9c9c66cd10d0ba904054
|
refs/heads/master
| 2021-01-18T23:14:36.348796
| 2016-06-15T18:34:40
| 2016-06-15T18:34:40
| 50,951,116
| 1
| 1
| null | 2016-02-03T13:42:17
| 2016-02-02T20:53:21
| null |
UTF-8
|
R
| false
| false
| 186
|
r
|
Airquality.R
|
y <- airquality
dput ( y , " airquality.R " )
hijo <- (airquality)
datos <- airquality [complete.cases (airquality),]
datos
nrow(datos)
dim(datos)
sum(complete.cases ( airquality ))
|
eae216fb4fd08ba3a2e5a4bc3ba4809b0ada48d1
|
8560ce389e1cc0f6351bbe0daa17aba177ddc776
|
/HRR_tool_ensemble_OLDcountHRRwalk.R
|
458062a4701372da35e8545039818e0b51c29efd
|
[] |
no_license
|
alexandrekl/fema_r1
|
28d8577fccc51020817602e37b8e8de6931a7bbe
|
ada4a6d588ab9dbae949c9e4e1082c1df7aa8222
|
refs/heads/master
| 2023-04-14T22:41:56.759243
| 2021-04-27T19:15:55
| 2021-04-27T19:15:55
| 310,356,329
| 1
| 0
| null | 2021-04-27T19:15:56
| 2020-11-05T16:25:53
|
HTML
|
UTF-8
|
R
| false
| false
| 9,204
|
r
|
HRR_tool_ensemble_OLDcountHRRwalk.R
|
# Get CDC emsemble data for HRR Excel tool
library(dplyr)
library(openxlsx)
latest_forecast_date <- '2021-01-11'
NEstfips <- c('09','25','23','33','44','50', '36') # FIPS of states in New England + NY
# truth URL from the CDC ensemble
turl <- 'https://github.com/CSSEGISandData/COVID-19/raw/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv'
df <- read.csv( turl ) %>% tidyr::pivot_longer( cols = starts_with("X")
, names_to="date", names_prefix="X"
, values_to = "value", values_drop_na = TRUE ) %>%
mutate( date=as.Date(gsub("\\.","\\/",date), format="%m/%d/%y")
, location=sprintf("%05d", FIPS) ) %>%
filter( substr(location, start=1, stop=2) %in% NEstfips )
ens_hist <- df %>% filter( nchar(location)==5 ) # date <= "2021-01-11" location == '25' &
latest_history_date <- max(ens_hist$date)
tmp <- latest_history_date - min(ens_hist$date)
npastdays <- tmp[[1]] + 1
# read URL of CDC ensemble forecasts per county
eurl <- paste0('https://github.com/reichlab/covid19-forecast-hub/raw/master/data-processed/COVIDhub-ensemble/'
, latest_forecast_date, '-COVIDhub-ensemble.csv')
df <- read.table( eurl, sep = ',', header = T ) # fields are explained in https://github.com/reichlab/covid19-forecast-hub/blob/master/data-processed/README.md#quantile
# N wk ahead inc case
# This target is the incident (weekly) number of cases predicted by the model during the week that is N weeks after forecast_date.
# A week-ahead forecast should represent the total number of new cases reported during a given epiweek (from Sunday through Saturday, inclusive).
# Predictions for this target will be evaluated compared to the number of new reported cases, as recorded by JHU CSSE.
# location: FIPS (we get 5-digit that represent counties)
# type/quantile: we get median
ens_fcst <- df %>%
filter(substr(location, start=1, stop=2) %in% NEstfips # New England states only
& forecast_date==latest_forecast_date & grepl( '[1-4] wk ahead inc case', target )
& nchar(location)==5
& type=='quantile' & quantile==0.5)
nforecasts <- length(unique(ens_fcst$target))
# build county to HRR crosswalk data, to assign forecasts from counties to HRRs based on population
tmp <- tempfile()
download.file('https://atlasdata.dartmouth.edu/downloads/geography/ZipHsaHrr18.csv.zip',tmp)
ziphrr <- read.table( unz(tmp, 'ZipHsaHrr18.csv'), sep=',', quote="", header=T, colClasses='character' )
unlink(tmp)
# New england HRRs
ziphrrne <- ziphrr %>% filter( hrrnum %in% c(109,110,111,221,222,227,230,231,281,282,295,364,424))
# ZIPcode to County Code crosswalk from https://www.huduser.gov/portal/datasets/usps_crosswalk.html#data
tmp <- readxl::read_xlsx('/Users/aligo/Downloads/FEMA recovery data/ZIP_COUNTY_092020.xlsx', col_types=c('text')) %>%
mutate( RES_RATIO=as.numeric(RES_RATIO), BUS_RATIO=as.numeric(BUS_RATIO)
, OTH_RATIO=as.numeric(OTH_RATIO), TOT_RATIO=as.numeric(TOT_RATIO) ) # %>%
# group_by(ZIP) %>% mutate( ratio=max(TOT_RATIO) ) %>% ungroup()
zipcounty <- tmp #%>% filter( ratio == TOT_RATIO ) # keep county with biggest ratio of all addresses in the ZIP–County to the total number of all addresses in the entire ZIP.
#zip/county/hrr
zipctyhrr <- inner_join(ziphrrne, select(zipcounty,ZIP,COUNTY,TOT_RATIO), by=c('zipcode18'='ZIP'))
countylst <- unique(zipctyhrr$COUNTY)
#add zipcode population
zipop <- read.table( unz('/Users/aligo/Downloads/FEMA recovery data/ACSST5Y2019.S0101_2020-12-30T143736.zip'
, 'ACSST5Y2019.S0101_data_with_overlays_2020-12-30T143117.csv'), sep=',', skip=2 ) %>%
mutate( ZIP=substr(V2, 7, 12), ZIPOP=V3 )
zip_cty_hrr_pop <- left_join(zipctyhrr, select(zipop, ZIP, ZIPOP), by=c('zipcode18'='ZIP'))
zip_cty_hrr_pop$ZIPOP[is.na(zip_cty_hrr_pop$ZIPOP)] <- 0
# population by county,HRR
sumpopctyhrr <- zip_cty_hrr_pop %>% ungroup() %>%
group_by(hrrcity,COUNTY) %>% summarise( pop = sum(ZIPOP*TOT_RATIO) )
hrrcities <- unique( sumpopctyhrr$hrrcity )
# population by county
sumpopcty <- zip_cty_hrr_pop %>% ungroup() %>%
group_by(COUNTY) %>% summarise( pop = sum(ZIPOP*TOT_RATIO) )
CasesofCountythatareinHRR <- function(countyi, hrrcityi, ensemble){
# forecast of county countyi that are within HRR hrrcityi
casescty <- ensemble %>% filter( location==countyi ) # county forecast
if("forecast_date" %in% colnames(ensemble))
{ # forecast data: nforecasts rows
stopifnot( nrow(casescty)==nforecasts )
}
else
{ # historical data: npastdays rows
if (nrow(casescty)!=npastdays)
stop( paste0(countyi, ", nrow(casescty): ", nrow(casescty)) )
}
tmp <- sumpopctyhrr %>% filter( hrrcity==hrrcityi & COUNTY==countyi ) # pop of county that are in HRR
if (nrow(tmp)==1){ popctyhrr <- tmp$pop }
else if (nrow(tmp)==0){ popctyhrr <- 0 }
else{ stop(paste0('length popctyhrr',nrow(tmp))) }
popcty <- sumpopcty %>% filter( COUNTY==countyi ) # pop of county
stopifnot( nrow(popcty)==1 )
casescty$casesctyhrr <- casescty$value * popctyhrr / popcty$pop
if("forecast_date" %in% colnames(ensemble))
{ # forecast data (weekly)
# change weekly to daily frequency (linearly)
daily <- casescty[rep(seq_len(nrow(casescty)), each=7),]
daily$date <- seq( from=as.Date(casescty$forecast_date[1]), by="days", length.out=nrow(daily) )
daily$casesctyhrr <- daily$casesctyhrr / 7
return( daily )
}
else
{ # historical data is already daily
return( casescty )
}
}
CasesofHRR <- function(hrrcityi, ensemble){
# forecast of HRR hrrcityi
tmp <- lapply( countylst, CasesofCountythatareinHRR, hrrcityi, ensemble )
casesofctiesinhrr <- bind_rows(tmp)
caseshrr <- casesofctiesinhrr %>% ungroup() %>% group_by( date ) %>%
summarise( caseshrr = sum(casesctyhrr, na.rm=T) )
if("forecast_date" %in% colnames(ensemble))
stopifnot( nrow(caseshrr)==nforecasts*7 ) # forecast data: nforecasts rows
else
{ # historical data: npastdays rows
if (nrow(caseshrr)!=npastdays)
stop( paste0(hrrcityi, ", nrow(caseshrr): ", nrow(caseshrr)) )
}
caseshrr$hrrcity <- hrrcityi
return( caseshrr )
}
colseq <- c("date","Bridgeport","Hartford","New Haven"
,"Boston","Springfield","Worcester","Bangor","Portland"
,"Lebanon","Manchester","Providence","Burlington","Albany")
# history - raw data is cumulative
tmp <- lapply( hrrcities, CasesofHRR, ens_hist )
cumulhrrs_history <- bind_rows(tmp) %>% tidyr::pivot_wider( names_from=hrrcity, values_from=caseshrr )
cumulhrrs_history <- cumulhrrs_history[colseq]
# new cases with smoothing through 7-day moving average
inchrrs_history <- cumulhrrs_history
for ( col in 2:length(colseq) )
{
inchrrs_history[col] <- zoo::rollmean( diff( c(0,cumulhrrs_history[[col]]) )
, k=7, fill=0, align="right" )
}
# forecast - raw data is NEW CASES
tmp <- lapply( hrrcities, CasesofHRR, ens_fcst )
inchrrs_forecast <- bind_rows(tmp) %>% tidyr::pivot_wider( names_from=hrrcity, values_from=caseshrr ) %>%
filter( date > latest_history_date )
inchrrs_forecast <- inchrrs_forecast[colseq]
# bind history and forecast
inchrrs <- bind_rows( inchrrs_history, inchrrs_forecast )
# transform new cases to cumulative
cumulhrrs <- inchrrs
for ( col in 2:length(colseq) )
cumulhrrs[,col] <- cumsum( inchrrs[,col] )
# for ( col in 2:length(colseq) )
# caseshrrs_forecast[,col] <- cumsum( caseshrrs_forecast[,col] )
# forecast - raw data is NEW CASES
# tmp <- lapply( hrrcities, CasesofHRR, ens_fcst )
# caseshrrs_forecast <- bind_rows(tmp) %>% tidyr::pivot_wider( names_from=hrrcity, values_from=caseshrr ) %>%
# filter( date >= latest_history_date )
#caseshrrs_forecast <- caseshrrs_forecast[colseq]
# transform new cases to cumulative
#caseshrrs_forecast[caseshrrs_forecast$date==latest_history_date,] = caseshrrs_history[caseshrrs_history$date==latest_history_date,]
# for ( col in 2:length(colseq) )
# caseshrrs_forecast[,col] <- cumsum( caseshrrs_forecast[,col] )
# caseshrrs_forecast <- caseshrrs_forecast %>% filter( date > latest_history_date) # delete first row
# bind history and forecast
# caseshrrs <- bind_rows( caseshrrs_history, caseshrrs_forecast )
fname <- paste0('/Users/aligo/Downloads/FEMA recovery data/HRR_Forecast_cumul_',latest_history_date,'.xlsx')
write.xlsx( cumulhrrs, fname )
# DEBUG - 14-day rolling average of new cases per 100K people,
# to benchmark with https://www.dartmouthatlas.org/covid-19/hrr-mapping/
sumpophrr <- sumpopctyhrr %>% ungroup() %>% group_by( hrrcity ) %>%
summarise( pop = sum(pop), .groups='drop_last' )
inchrrs100k <- inchrrs
for ( col in 2:length(colseq) ){
pop <- sumpophrr$pop[sumpophrr$hrrcity == colseq[col]]
inchrrs100k[,col] <- zoo::rollsum( inchrrs100k[[col]] / pop * 100e3
, k=14, fill=NA, align="right" )
}
fname <- paste0('/Users/aligo/Downloads/FEMA recovery data/HRR_Forecast100K14d_',latest_history_date,'.xlsx')
write.xlsx( inchrrs100k, fname )
|
f92edabd9737b8f534a604431d1f0a8c1f577d0d
|
5e4af78accb607c8bc66e674ec29e3e010baf2c9
|
/R/zzz.R
|
fd2102cecf4274ec058d6e2cb90f89a9ee86fad0
|
[] |
no_license
|
onebacha/connectir
|
fc472b8c08c88063ac787c819c9fdfe10a262e7d
|
baff25329326e8c1784cbe63a7c2efdf4034be9a
|
refs/heads/master
| 2023-01-13T03:58:45.523952
| 2020-11-15T04:39:04
| 2020-11-15T04:39:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 258
|
r
|
zzz.R
|
#' @nord
.onLoad <- function(libname, pkgname) {
library.dynam("connectir", pkgname, libname);
}
#.noGenerics <- TRUE # This was a problem, not used.
#' @nord
.onUnload <- function(libpath) {
library.dynam.unload("connectir", libpath);
}
|
ceacce9d5581458fd5a778596e8768b0efb2a592
|
c9840d47330f946ec12a87acff7cb1f30b90090d
|
/tests/testthat/test-package.R
|
d704a1ff3455d0e805346b3de6d8ff80754b6b7e
|
[
"MIT"
] |
permissive
|
kforner/debugme
|
f205edc66574ea954ef431a1182513a7f09a5f4a
|
400ac11971ce694f0a55309020ed6ce2acb28f9a
|
refs/heads/master
| 2021-01-16T18:08:48.117894
| 2017-08-14T08:45:24
| 2017-08-14T08:45:24
| 100,038,860
| 0
| 0
| null | 2017-08-11T14:15:24
| 2017-08-11T14:15:24
| null |
UTF-8
|
R
| false
| false
| 742
|
r
|
test-package.R
|
context("debugme")
test_that(".onLoad", {
val <- NULL
mockery::stub(.onLoad, "initialize_colors", function(pkgs) val <<- pkgs)
withr::with_envvar(
c("DEBUGME" = c("foo,bar")),
.onLoad()
)
expect_identical(val, c("foo", "bar"))
})
test_that("debugme", {
env <- new.env()
env$f1 <- function() { "nothing here" }
env$f2 <- function() { "!DEBUG foobar" }
env$notme <- "!DEBUG nonono"
env$.hidden <- function() { "!DEBUG foobar2" }
expect_silent(debugme(env))
mockery::stub(debugme, "%in%", TRUE)
debugme(env)
expect_silent(env$f1())
expect_output(env$f2(), "debugme foobar \\+[0-9]+ms")
expect_identical(env$notme, "!DEBUG nonono")
expect_output(env$.hidden(), "debugme foobar2 \\+[0-9]+ms")
})
|
65a02f242541223ee8224d159076aa586c30cbbf
|
8f536537be5bf214525ea11bb84c568c9fb82fe7
|
/R/mp_y0.R
|
af56e4bf8acad92a8a43bb87927820344181d582
|
[
"MIT"
] |
permissive
|
yuliasidi/bin2mi
|
5fa742f72d21034c7def62bb30d078e63c18d2ff
|
51ec9b77d0afb0498ca59fbb91fd71e80479dede
|
refs/heads/master
| 2021-06-22T15:01:14.716873
| 2021-02-20T18:53:31
| 2021-02-20T18:53:31
| 197,215,389
| 0
| 0
|
NOASSERTION
| 2021-02-20T18:53:32
| 2019-07-16T15:00:09
|
R
|
UTF-8
|
R
| false
| false
| 603
|
r
|
mp_y0.R
|
#' @title conditional missing probability for missing not at random
#' @description calculates probability of missing conditional on y=0
#' @param do_tar numeric, target dro-out rate
#' @param mp_y1 numeric, missing probability conditional on y=1
#' @param p_y1 numeric, probability of y=1
#' @return numeric
#' @details the aim of this function is to calculate probability of missing
#' conditional on y=0, in order to impose mnar missingness structure
#' @examples
#' mp_y0(0.1, 0.14, 0.65)
#' @rdname mp_y0
#' @export
mp_y0 <- function(do_tar, mp_y1, p_y1){
(do_tar - mp_y1*p_y1)/(1 - p_y1)
}
|
ba0433849cfc2798c726c1c0662a102a4240b9df
|
65f6febb549fe2b9a2d41074ebab5317a4489d1c
|
/R/SQRL.R
|
e453209d0e8d47a596b2833677a74ee071a11c89
|
[] |
no_license
|
cran/SQRL
|
4c0d86211617631c630c16939e8e863e1aa5fd64
|
ae6f80e41d9e4f9c80721c487523221b46b2cea8
|
refs/heads/master
| 2022-10-14T07:59:22.216921
| 2022-09-20T20:40:02
| 2022-09-20T20:40:02
| 110,434,764
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 265,030
|
r
|
SQRL.R
|
####################################################################### SQRL ###
# Wrapper about RODBC. On load, SQRL automatically generates a like-named user-
# interface function to each DSN it finds on the system. These functions enable
# immediate interaction with each data source, since channels and communication
# parameters are managed behind the scenes. The general philosophy is to require
# the least possible typing from the user, while allowing the greatest possible
# flexibility in how commands are entered. This approach emphasises the source
# and query over concatenation functions and control parameters. The interfaces
# accept multi-statement SQL scripts, allowing the use of scripts developed in
# other applications without modification or fragmentation. The script parser
# supports query parameterisation via embedded R expressions, the feedback of
# intermediate results, reusable procedures, conditional submission, loops, and
# early returns. Secondary features include the protection of connection handles
# from rm(), automatic recovery from lost connections, the promotion of remote
# ODBC exceptions to local R errors, optional automatic closure of connections
# between queries, and visual indication of which connections are open and of
# where and whether queries or fetches are in progress.
# Mike Lee, South Titirangi, 7 March 2020.
################################################################### CONTENTS ###
# srqlHaus Private. Environment. Stores data-source parameters.
# srqlHelp Private. Environment. Stores interface-help temp files.
# SqrlAll() Private. Broadcasts a command to every SQRL source.
# SqrlCache() Private. Interfaces with srqlHaus (only point of contact).
# SqrlClose() Private. Closes data source connection channels.
# SqrlConfig() Private. Sets SQRL/RODBC parameters from a config file.
# SqrlDefault() Private. Defines and returns default parameter values.
# SqrlDefile() Private. Extracts parameter values from container files.
# SqrlDelegate() Private. Delegates commands to appropriate functions.
# SqrlDSNs() Private. Registers existing DSN data sources with SQRL.
# SqrlFace() Private. Interfaces with the SQRL:Face environment.
# SqrlFile() Private. Sources SQL (and/or R) statements from a file.
# SqrlHelp() Private. Generates run-time help for interface functions.
# SqrlHelper() Private. Escapes strings for help-file compatibility.
# SqrlIndicator() Private. Toggles display of open-connection indicators.
# SqrlInterface() Private. Defines and/or deletes data source interfaces.
# SqrlIsOpen() Private. Tests whether or not source channels are open.
# SqrlOff() Private. Closes all channels, detaches and unloads SQRL.
# SqrlOpen() Private. Opens connection channels to data sources.
# SqrlParam() Private. Gets and sets data source SQRL/RODBC parameters.
# SqrlParams() Private. Defines and returns various parameter groupings.
# SqrlPath() Private. Checks if args are the path to an existing file.
# SqrlPing() Private. Sets and submits ping queries to data sources.
# SqrlPL() Private. Detects procedural (PL) blocks within scripts.
# SqrlProc() Private. Checks if a value names a stored procedure.
# SqrlShell() Private. Relays commands from interfaces to delegator.
# SqrlStatement() Private. Assembles SQL statements from listed components.
# SqrlSource() Private. Registers/defines new data sources with SQRL.
# SqrlSources() Private. Look for, and summarise, known data sources.
# SqrlSubmit() Private. Submits SQL, retrieves results, handles errors.
# SqrlSubScript() Private. Relays data between SqrlFile() and SqrlSubmit().
# SqrlTry() Private. Silent error catching, with warning suppression.
# SqrlValue() Private. Wrapper to SqrlParam(). Keeps secrets secret.
# sqrlAll() Public. Wrapper to SqrlAll(). See above.
# sqrlInterface() Public. Wrapper to SqrlInterface(). See above.
# sqrlOff() Public. Wrapper to SqrlOff(). See above.
# sqrlSource() Public. Wrapper to SqrlSource(). See above.
# sqrlSources() Public. Wrapper to SqrlSources(). See above.
# .onLoad() Private. Attaches SQRL:Face and finds sources, on load.
# .onUnload() Private. Detaches the SQRL:Face environment, on unload.
############################################################### ENVIRONMENTS ###
# Environment for caching data source parameters. Not exported. The user will
# not, without some effort, be able to view or modify objects within this.
srqlHaus <- new.env(parent = emptyenv())
# Environment for tracking temp files for the dynamic run-time help system. Not
# exported. The user will not be able to easily view or modify objects within.
srqlHelp <- new.env(parent = emptyenv())
# There will also exist a public environment, attached to the R search path as
# 'SQRL:Face', by the .onLoad() function, when the package is loaded.
########################################################## PRIVATE FUNCTIONS ###
SqrlAll <- function(argsl,
envir = parent.frame())
{
# Applies the same command to each of the (currently defined) SQRL sources.
# Args:
# argsl : A list of arguments, to be passed unaltered to SqrlDelegate().
# envir : An environment, beneath which any embedded R script is evaluated.
# Returns:
# A list (by SQRL source name) of the results of running the argsl
# command(s) on each of the SQRL sources. SQRL source names are unique.
# SQRL Calls:
# SqrlCache(), SqrlShell().
# SQRL Callers:
# SqrlSources(), sqrlAll().
# User:
# Has no direct access, but is able to supply (only) the argsl argument,
# from sqrlAll(). That function ensures argsl is a list. Since argsl is
# otherwise unrestricted, no validity checking is required at this stage.
# Give the command to each data source in turn. Retrieve the results. Fatal
# errors will block sending the command to further sources. Opting not to wrap
# in try(), because stopping may be preferable under many circumstances.
results <- list()
for (datasource in SqrlCache("*"))
{
# Assigning NULL to a list element[[]] removes the element, whereas
# assigning list(NULL) to element[] leaves the element with a NULL value.
results[datasource] <- list(SqrlShell(datasource, envir, argsl))
}
# Return the results (listed by SQRL data source name).
return(results)
}
SqrlCache <- function(datasource = "",
exists = NULL,
create = FALSE,
delete = FALSE)
{
# Checks, creates, lists and gets data source cache environments.
# Args:
# datasource : The name of a data source, or '*' for all known data sources.
# exists : If set to TRUE or FALSE, test if a cache exists or doesn't.
# create : If set to TRUE, create a cache for the data source.
# delete : If set to TRUE, delete an existing data source cache.
# Returns:
# Either an environment handle, a logical (when performing an existence
# check), a character vector (when listing all known data sources), or
# invisible NULL (after removing a data source's cache).
# SQRL Calls:
# SqrlClose(), SqrlInterface(), SqrlParam(), SqrlParams(), srqlHaus.
# SQRL Callers:
# SqrlAll(), SqrlDefault(), SqrlDelegate(), SqrlDSNs(), SqrlOff(),
# SqrlParam(), SqrlSource(), SqrlSources(), sqrlInterface().
# User:
# Has no direct access, but is able to supply (only) the datasource argument
# via SqrlSource(), which verifies existence of that source before passing
# the argument on. Further validity checks are not required.
# Defines the name for data source <datasource>'s cache environment.
cachename <- paste(".", datasource, sep = "!")
# If the exists argument was specified, return whether or not the cache exists
# (whether or not the data source is known to SQRL). Use of exists as an
# argument (variable) does not interfere with base::exists(), the function.
# When exists is TRUE (FALSE), we return TRUE when the cache does (does not)
# exist (i.e., the source is (is not) known).
if (!is.null(exists))
{
ex <- exists(cachename, srqlHaus, mode = "environment", inherits = FALSE)
return(ex == exists)
}
# If the delete flag was set, close any open connection to the source, delete
# its interface, delete its cache (this completes deregistration from SQRL),
# and return invisible NULL. The garbage collector ought to take care of any
# parameters within the cache.
if (delete)
{
if (exists(cachename, srqlHaus, mode = "environment", inherits = FALSE))
{
SqrlClose(datasource)
SqrlInterface(datasource, "remove")
SqrlParam(datasource, "reset", SqrlParams("secret"))
SqrlParam(datasource, "reset", SqrlParams("semi-secret"))
remove(list = cachename, pos = srqlHaus, inherits = FALSE)
}
return(invisible(NULL))
}
# If datasource is specified as '*', return a character vector of all data
# source names for which a SQRL cache exists.
if (datasource == "*")
{
cachenames <- objects(srqlHaus, all.names = TRUE, pattern = "^\\.!")
if (length(cachenames) < 1L)
{
return(character(0L))
}
is.cache <- sapply(as.list(cachenames),
function(x) exists(x, srqlHaus, mode = "environment", inherits = FALSE))
cachenames <- cachenames[is.cache]
return(substring(cachenames, nchar(".!") + 1L))
}
# If the create argument was specified as TRUE, create a new cache for the
# specified data source. Abort if the cache (or like-named object) exists.
if (create)
{
if (exists(cachename, srqlHaus, inherits = FALSE))
{
stop("Source cache already exists.")
}
cache <- new.env(parent = emptyenv())
assign(cachename, cache, srqlHaus)
SqrlParam(datasource, "name", datasource)
return(cache)
}
# Otherwise, abort if the cache does not exist.
if (!exists(cachename, srqlHaus, mode = "environment", inherits = FALSE))
{
stop("Cache does not exist.")
}
# The cache exists; return a handle to it.
return(get(cachename, srqlHaus, mode = "environment", inherits = FALSE))
}
SqrlClose <- function(datasource = "")
{
# Closes the channel to the specified data source.
# Args:
# datasource : The name of a data source whose channel is to be closed.
# Returns:
# Invisible NULL, after closing the channel and removing the handle.
# SQRL Calls:
# SqrlParam(), SqrlTry().
# RODBC Calls:
# odbcClose().
# SQRL Callers:
# SqrlCache(), SqrlDelegate(), SqrlFile(), SqrlIsOpen(), SqrlOff().
# User:
# Has no direct access, unable to pass argument indirectly. No argument
# validity checks are required.
# Return invisible NULL if the channel is already closed.
if (is.null(SqrlParam(datasource, "channel")))
{
return(invisible(NULL))
}
# Attempt to close the channel (which may, or may not, actually be open).
SqrlTry(RODBC::odbcClose(SqrlParam(datasource, "channel")), warn = FALSE)
# Whatever the situation, nullify the connection handle immediately. If the
# channel somehow survived the close attempt, this makes it unusable. The
# SqrlParam() function will remove any visible connection indicators.
SqrlParam(datasource, "channel", NULL)
# Return invisible NULL.
return(invisible(NULL))
}
SqrlConfig <- function(datasource = "",
config = "")
{
# Assigns SQRL/RODBC parameter values, for a data source, from a file or list.
# Args:
# datasource : The name of a known (to SQRL) data source.
# config : The path of a configuration file, or a list of named values.
# Returns:
# The imported configuration, as an invisible list of (name, value) pairs.
# When no configuration file is specified, this function acts as a getter,
# and returns a list of all SQRL/RODBC parameters and their current values.
# SQRL Calls:
# SqrlDefile(), SqrlFile(), SqrlInterface(), SqrlParam(), SqrlParams(),
# SqrlPath(), SqrlValue().
# SQRL Callers:
# SqrlDelegate(), SqrlHelp(), SqrlSource().
# User:
# Has no direct access, but is able to pass the config argument (only) via
# SqrlDelegate(). That function vets the value, and ensures config is either
# a list (of named values) or a character string. In the latter case, the
# string ought to be the path of an actual (existing, readable) config file,
# but this is not guaranteed (and so is checked here).
# Parameter values will be copied into this list, before setting or returning.
conf <- list()
# If no config was specified, return the data source's configuration as a list
# of named SQRL/RODBC parameter values (with any secrets obliterated).
if (identical(class(config), class(character()))
&& (nchar(config) < 1L))
{
params <- SqrlParams("all")
params <- params[!(params %in% SqrlParams("omit-from-config"))]
for (param in params)
{
conf[param] <- list(SqrlValue(datasource, param))
}
return(conf)
}
# If a list of named elements was supplied, extract parameter values from it.
if (identical(class(config), class(list()))
&& !is.null(names(config))
&& all(grepl("[[:graph:]]", names(config))))
{
# If any name should be replicated, the final occurrence is taken
for (i in seq_along(config))
{
conf[trimws(names(config)[i])] <- list(config[[i]])
}
# Otherwise, a file path should have been supplied as a character string.
} else
{
# Abort if that file does not exist.
filepath <- SqrlPath(config)
if (is.null(filepath))
{
stop("File not found.")
}
# Slurp the file, and parse it as an R script.
ftext <- readLines(filepath, warn = FALSE)
flang <- parse(text = ftext, keep.source = FALSE)
# An environment within which to evaluate the script. Inherits functions
# from base, but not variables from the global environment.
cfenv <- new.env(parent = baseenv())
# Take each expression from the script, wrap it in a list, and evaluate.
# Where this results in a named list, interpret the expression as a request
# to set a value for the SQRL/RODBC parameter of that name, and add that
# name-value pair to the config list. When a name appears within the script
# multiple times, only the last value is used.
for (expr in flang)
{
etext <- paste0(deparse(expr), collapse = "\n")
lexp <- parse(text = paste0("list", "(", etext, ")"), keep.source = FALSE)
lval <- eval(lexp, cfenv)
if ((length(lval) == 1L)
&& !is.null(names(lval)))
{
conf[names(lval)] <- lval
}
}
}
# Ignore any request to set the channel.
conf <- conf[names(conf) != "channel"]
# If 'interface' is among the parameters to be set, then set it first (since
# it's the one most likely to fail). If this does fail, then no further
# parameter values will be set (SqrlInterface() will throw an exception).
if ("interface" %in% names(conf))
{
value <- SqrlDefile("interface", conf[["interface"]])
SqrlInterface(datasource, value)
conf["interface"] <- list(SqrlValue(datasource, "interface"))
}
# Defining the library is another special case.
if ("library" %in% names(conf))
{
value <- SqrlDefile("library", conf[["library"]])
if (is.null(value))
{
SqrlParam(datasource, "reset", "library")
} else
{
SqrlFile(datasource, value, libmode = TRUE)
}
conf["library"] <- list(SqrlValue(datasource, "library"))
}
# Assign all other values found (besides those of 'interface' and 'library').
# The driver parameter is set last, to override any default driver set as a
# side effect in the course of setting dsn (should that have been set). By
# the above construction, list-member (parameter) names are unique.
params <- names(conf)
params <- params[!(params %in% c("interface", "library"))]
params <- c(params[params != "driver"], params[params == "driver"])
for (parameter in params)
{
value <- SqrlDefile(parameter, conf[[parameter]])
conf[parameter] <- list(SqrlValue(datasource, parameter, value))
}
# Return the (sorted, secrets-obscured) configuration, invisibly.
return(invisible(conf[order(names(conf))]))
}
SqrlDefault <- function(datasource = "",
parameter = "")
{
# Defines and returns default parameter values.
# Args:
# datasource : The name of a known (to SQRL) data source.
# parameter : The name of a single specific parameter.
# Returns:
# The default value of the named parameter for the named data source.
# SQRL Calls:
# SqrlCache(), SqrlParam().
# SQRL Callers:
# SqrlParam().
# User:
# Has no direct access, but is able to supply (only) parameter via
# SqrlDelegate(), which does the vetting. No further checks are required.
# Obtain a handle to the data source's SQRL cache.
cacheenvir <- SqrlCache(datasource)
# Return the default value of the specified parameter.
return(switch(parameter,
# Parameters for RODBC::odbcConnect() and/or RODBC::odbcDriverConnect().
"dsn" = "",
"uid" = as.character(Sys.info()["user"]),
"pwd" = "",
"connection" = "",
"case" = "nochange",
"believeNRows" = !grepl("SQLite", SqrlParam(datasource, "driver"),
ignore.case = TRUE),
"colQuote" = ifelse(grepl("MySQL",
SqrlParam(datasource, "driver"),
ignore.case = TRUE),
"`", "\""),
"tabQuote" = SqrlParam(datasource, "colQuote"),
"interpretDot" = TRUE,
"DBMSencoding" = "",
"rows_at_time" = 100L,
"readOnlyOptimize" = FALSE,
# Parameters for RODBC::sqlQuery().
# Also uses believeNRows and rows_at_time, as above.
"channel" = NULL,
"errors" = TRUE,
"as.is" = FALSE,
"max" = 0L,
"buffsize" = 1000L,
"nullstring" = NA_character_,
"na.strings" = "NA",
"dec" = as.character(getOption("dec")),
"stringsAsFactors" = FALSE,
# Parameters for SQRL.
"*" = objects(cacheenvir, all.names = TRUE),
"aCollapse" = ",",
"autoclose" = FALSE,
"driver" = "",
"interface" = NULL,
"lCollapse" = "",
"library" = character(),
"libstack" = list(),
"name" = datasource,
"ping" = NULL,
"prompt" = substr(datasource, 1L, 1L),
"pstack" = list(cacheenvir),
"result" = NULL,
"retry" = TRUE,
"scdo" = TRUE,
"verbose" = FALSE,
"visible" = FALSE,
"wintitle" = paste0("(", datasource, ")"),
# No other default parameter values are defined (abort and notify).
stop("Unknown parameter.")))
}
SqrlDefile <- function(parameter = "",
value = "",
evaluate = FALSE)
{
# Recursively substitutes file paths with contained parameter values.
# Args:
# parameter : A single parameter name.
# value : Either a final value or a file path (or components thereof).
# evaluate : Whether or not to attempt to evaluate value as an expression.
# Returns:
# A value for the parameter, either as supplied or as found within the
# supplied file (alternative).
# SQRL Calls:
# SqrlDefile() (self), SqrlParams(), SqrlPath(), SqrlTry().
# SQRL Callers:
# SqrlConfig(), SqrlDefile() (self), SqrlDelegate(), SqrlSource(),
# sqrlInterface().
# User:
# Has no direct access, but is able to supply (only) parameter and value via
# SqrlParam() from SqrlDelegate() and/or SqrlConfig(). The parameter is
# guaranteed to be a string, and no further checks are required. The value
# may turn out to be unsuitable, but that is left for SqrlParam() to decide.
# If the value is to represent a file path, it must be a non-empty, non-blank,
# character vector (or list of character vectors). If the value is anything
# besides these, return it unmodified. The reason for returning both blank and
# empty character vectors here (before evaluation) is to take them as literal
# values (rather than as R expressions; they would evaluate to NULL).
if (!((identical(class(value), class(character()))
&& (length(value) > 0L)
&& any(nzchar(trimws(value))))
|| (identical(class(value), class(list()))
&& (length(value) > 0L)
&& all(rapply(rapply(value, class, how = "list"),
identical, classes = "ANY", deflt = NULL,
how = "unlist", class(character())))
&& any(nzchar(rapply(value, trimws))))))
{
return(value)
}
# See if the value corresponds to the path of a readable file. If so, this is
# that path. If not, this is NULL.
path <- SqrlPath(value)
# If the value is not the path of a readable file, then return either the
# unmodified value, or the (if possible and so requested) evaluated value.
if (is.null(path))
{
# The value is not a path. If it is not to be evaluated, return it as is.
if (!evaluate)
{
return(value)
}
# Otherwise, if the value doesn't evaluate, return it unmodified.
evaluated <- SqrlTry(eval(parse(text = value, keep.source = FALSE),
new.env(parent = baseenv())))
if (evaluated$error)
{
return(value)
}
# If the value evaluated to something odd (for example, 'ls' evaluates to a
# function), return it unmodified.
eclass <- class(evaluated$value)
if (!(identical(eclass, class(NULL))
|| identical(eclass, class(logical()))
|| identical(eclass, class(character()))
|| identical(eclass, class(numeric()))
|| identical(eclass, class(integer()))))
{
return(value)
}
# The value could be evaluated; return the evaluated value.
return(evaluated$value)
}
# Otherwise, the value specifies the path of a readable file. If the parameter
# is path-valued, then return that path.
if (parameter %in% SqrlParams("path-valued"))
{
return(path)
}
# Slurp the file.
ftxt <- readLines(path, warn = FALSE)
# If the file can't be parsed and evaluated, or if no 'parameter = value'
# assignment is found, assume the file text is a literal parameter value.
value <- trimws(ftxt)
value <- value[nzchar(value)]
# Attempt to parse the file as an R script.
fexp <- SqrlTry(parse(text = ftxt, keep.source = FALSE))
# If the file parsed, attempt to evaluate its R expressions (within an
# environment that inherits functions from base, but not variables from the
# global environment, or the evaluation environment of any parent file).
if (!fexp$error)
{
fenv <- new.env(parent = baseenv())
for (expr in fexp$value)
{
etxt <- paste0(deparse(expr), collapse = "\n")
lexp <- parse(text = paste0("list", "(", etxt, ")"), keep.source = FALSE)
lval <- SqrlTry(eval(lexp, fenv))
# Should an error occur, revert to the default value (literal script).
if (lval$error)
{
value <- trimws(ftxt)
value <- value[nzchar(value)]
break
}
# If the expression was of the form 'parameter = value', take that value.
if (identical(names(lval$value), parameter))
{
value <- lval$value[[1L]]
}
}
}
# Put the extracted value back into this function, in case it is another
# file path (recursive call, infinite loops are possible). Given that the
# current value is a file path, we evaluate the next value (since it is to be
# read from file as text), whether or not the current value was evaluated.
return(SqrlDefile(parameter, value))
}
SqrlDelegate <- function(datasource = "",
envir = parent.frame(),
args.list)
{
# Interpret the command, and forward to the appropriate handler.
# Args:
# datasource : The name of a known data source.
# envir : An R environment, from which variables are inherited.
# args.list : A list of arguments, to be interpreted and actioned.
# Returns:
# The result of the command (normally a data frame, sometimes a string).
# SQRL Calls:
# SqrlCache(), SqrlClose(), SqrlConfig(), SqrlDefile(), SqrlFile(),
# SqrlHelp(), SqrlIndicator(), SqrlInterface(), SqrlIsOpen(), SqrlOpen(),
# SqrlParam(), SqrlParams(), SqrlPath(), SqrlProc(), SqrlSources(),
# SqrlStatement(), SqrlSubmit(), SqrlTry(), SqrlValue().
# RODBC Calls:
# sqlColumns(), sqlTables(), sqlTypeInfo().
# SQRL Callers:
# SqrlFile() (via sqrl()), SqrlShell().
# User:
# User has no direct access, but is able to supply (only) the args.list
# argument from sqrlAll() and/or any data source interface (including
# intra-script sqrl() functions). Since args.list is unrestricted (it could
# be SQL), no argument validity checking is performed.
# Count the number of supplied arguments.
args.count <- length(args.list)
# If no command was given, open a channel to the data source. If no channel
# exists, a new channel is opened. If a channel exists, but wasn't open after
# all (after besure = TRUE pings the data source to check), we replace the
# dead channel with a new one. If a channel exists and is open, we do nothing
# else. Returns the configuration invisibly, enabling interface()$parameter.
if (args.count == 0L)
{
isopen <- SqrlIsOpen(datasource, besure = TRUE)
if (!isopen)
{
SqrlOpen(datasource)
isopen <- SqrlIsOpen(datasource)
}
config <- SqrlConfig(datasource)
config[["source"]] <- SqrlValue(datasource, "source")
config[["isopen"]] <- isopen
return(invisible(config[order(names(config))]))
}
# Obtain the stated names of the supplied arguments. This may be NULL (no
# names at all), or a character vector (with "" for any unnamed elements).
# Names need not be unique. These names cannot be NAs.
args.names <- names(args.list)
# Expand lists of named arguments.
if (is.null(args.names)
|| any(nchar(args.names) == 0L))
{
# Obtain the indices of any unnamed arguments.
i <- seq(args.count)
if (!is.null(args.names))
{
i <- i[nchar(args.names) == 0L]
}
# Unpack unnamed lists of (syntactically correctly) named members, except
# where that would place a named argument before an unnamed argument.
j <- length(i)
while ((j > 0L)
&& identical(class(args.list[[i[j]]]), class(list()))
&& (length(args.list[[i[j]]]) > 0L)
&& !is.null(names(args.list[[i[j]]]))
&& !any(is.na(names(args.list[[i[j]]])))
&& (all(names(args.list[[i[j]]]) ==
make.names(names(args.list[[i[j]]])))))
{
k <- seq_along(args.list)
args.list <- c(args.list[k[k < i[j]]],
args.list[[i[j]]],
args.list[k[k > i[j]]])
j <- j - 1L
}
# Update the number of (unpacked) arguments, and their names.
args.count <- length(args.list)
args.names <- names(args.list)
}
# When all arguments are named, treat them as either parameterised queries to
# be submitted, or as SQRL parameter values to be (re)set.
if (!is.null(args.names)
&& all(nchar(args.names) > 0L))
{
# When there is only one argument, and it is named 'verbatim', expect a
# single character string to be submitted directly (unmodified, without
# going through the SQRL concatenator, parser, or R-substitution process).
if ((length(args.names) == 1L)
&& (args.names == "verbatim"))
{
if ((length(args.list) != 1L)
|| (class(args.list[[1L]]) != class(character()))
|| (length(args.list[[1L]]) != 1L))
{
stop("Verbatim query not a single character string.")
}
return(SqrlSubmit(datasource, args.list[[1L]]))
}
# Prohibit the use of more than one of the names 'file', 'proc', and
# 'query', since it is unclear which refers to the script and which is a
# (are) parameter(s) to the that.
if (sum(c("file", "proc", "query") %in% args.names) > 1L)
{
stop("The file, proc, and query arguments are mutually exclusive.")
}
# If one of the names in 'proc', then submit the named procedure, and treat
# any other (named) arguments as parameters to that procedure. It is this
# function's responsibility to verify the existence of the procedure. When
# multiple arguments are named 'proc', the first of them is taken as the
# procedure while the others are treated as arguments to that procedure.
if ("proc" %in% args.names)
{
index <- which(args.names == "proc")[1L]
script <- SqrlProc(datasource, args.list[[index]])
if (is.null(script))
{
stop("Procedure not defined.")
}
params <- args.list[seq_along(args.list) != index]
result <- withVisible(
SqrlFile(datasource, script, envir, params, literal = TRUE))
SqrlParam(datasource, "result", result$value, override = TRUE)
if (!result$visible)
{
return(invisible(result$value))
}
return(result$value)
}
# If one of the names in 'file', then submit a query from the file, and
# treat any other (named) arguments as parameters to that query. It is this
# function's responsibility to verify the existence and readability of the
# file before passing it to SqrlFile(). When multiple arguments are named
# 'file', the first of them is taken as the query file while the others are
# treated as arguments to that query.
if ("file" %in% args.names)
{
index <- which(args.names == "file")[1L]
file.path <- SqrlPath(args.list[[index]])
if (is.null(file.path))
{
stop("File not found.")
}
params <- args.list[seq_along(args.list) != index]
result <- withVisible(SqrlFile(datasource, file.path, envir, params))
SqrlParam(datasource, "result", result$value, override = TRUE)
if (!result$visible)
{
return(invisible(result$value))
}
return(result$value)
}
# If one of the names is 'query', then pass the query to SqrlFile() (as a
# script, not as a file name), with any other arguments as named parameters.
# When multiple arguments are named 'query', the first of them is taken as
# the query, while the others are treated as parameters of that query.
if ("query" %in% args.names)
{
index <- which(args.names == "query")[1L]
script <- SqrlTry(SqrlStatement(datasource, list(args.list[[index]])))
if (script$error)
{
stop(script$value)
}
script <- script$value
params <- args.list[seq_along(args.list) != index]
result <- withVisible(SqrlFile(datasource, script, envir, params, TRUE))
SqrlParam(datasource, "result", result$value, override = TRUE)
if (!result$visible)
{
return(invisible(result$value))
}
return(result$value)
}
# Otherwise, interpret each name as that of a parameter, and assign each
# value accordingly. The name 'reset' is a special case (reset specified
# parameters to their default values). The driver parameter is set last,
# to override any default driver that may have been set as a side effect
# in the course of setting the dsn parameter. Names need not be unique.
if (any(args.names %in% SqrlParams("read-only")))
{
stop("Parameter is read-only.")
}
result <- list()
indices <- seq_along(args.list)
drivers <- args.names == "driver"
indices <- c(indices[!drivers], indices[drivers])
for (index in indices)
{
param <- args.names[index]
if (param == "config")
{
# SqrlConfig() returns a list with unique names.
conf <- SqrlConfig(datasource, args.list[[index]])
for (cpar in names(conf))
{
result[cpar] <- list(conf[[cpar]])
}
} else
{
if (param == "interface")
{
value <- SqrlDefile(param, args.list[[index]])
result[param] <- list(SqrlInterface(datasource, value))
} else if (param == "library")
{
if (is.null(args.list[[index]]))
{
SqrlParam(datasource, "reset", param)
} else
{
path <- SqrlPath(args.list[[index]])
if (is.null(path))
{
libdef <- SqrlTry(
SqrlStatement(datasource, list(args.list[[index]])))
if (libdef$error)
{
stop(libdef$value)
}
SqrlFile(datasource, libdef$value, envir,
libmode = TRUE, literal = TRUE)
} else
{
SqrlFile(datasource, path, envir, libmode = TRUE, literal = FALSE)
}
}
result[param] <- list(SqrlValue(datasource, param))
} else if (param == "reset")
{
# SqrlValue() returns a list of default values with unique names.
values <- SqrlValue(datasource, param, args.list[[index]])
result[names(values)] <- values
} else
{
value <- SqrlDefile(param, args.list[[index]])
result[param] <- list(SqrlValue(datasource, param, value))
}
}
}
if (!is.null(names(result)))
{
result <- result[order(names(result))]
}
return(invisible(result))
}
# When both named and unnamed arguments exist, and all named arguments trail
# all unnamed arguments, then interpret the unnamed arguments as defining a
# script (one way or another), and the named arguments as its parameters.
args.kindex <- which(nchar(args.names) > 0L)[1L]
if (!is.null(args.names)
&& all(nchar(args.names[args.kindex:args.count]) > 0L))
{
unnamed <- args.list[seq((args.kindex - 1L))]
params <- args.list[seq(args.kindex, args.count)]
# If the unnamed arguments name a stored procedure, use that.
if (!is.null(script <- SqrlProc(datasource, unnamed)))
{
literal <- TRUE
# If, instead, the unnamed arguments define a file path, read and use that.
} else if (!is.null(script <- SqrlPath(unnamed)))
{
literal <- FALSE
# If, instead, the single unnamed argument is 'config', set and return that.
} else if ((args.kindex == 2L)
&& identical(trimws(unnamed), "config"))
{
return(SqrlConfig(datasource, params))
# Otherwise, treat the unnamed arguments as a literal script.
} else
{
script <- SqrlTry(SqrlStatement(datasource, unnamed))
if (script$error)
{
stop(script$value)
}
script <- script$value
literal <- TRUE
}
# Submit the script and its parameters. Retrieve and return the result.
result <- withVisible(SqrlFile(datasource, script, envir, params, literal))
SqrlParam(datasource, "result", result$value, override = TRUE)
if (!result$visible)
{
return(invisible(result$value))
}
return(result$value)
}
# When even one unnamed argument trails at least one named argument, abort.
if (!is.null(args.names)
&& any(nchar(args.names) > 0L))
{
stop("All unnamed arguments must precede all named arguments.")
}
# Otherwise (when none of the arguments are named), attempt to interpret them
# as a list of subcommands, or as file-path components, or as a procedure
# name, or as specific SQRL commands (consisting of a command word, or a
# parameter name, and, optionally, a value to go with that).
# If the entire command names a procedure, submit that stored procedure.
procedure <- SqrlProc(datasource, args.list)
if (!is.null(procedure))
{
result <- withVisible(
SqrlFile(datasource, procedure, envir, literal = TRUE))
SqrlParam(datasource, "result", result$value, override = TRUE)
if (!result$visible)
{
return(invisible(result$value))
}
return(result$value)
}
# If the entire command specifies a path, try sourcing SQL from that file.
file.path <- SqrlPath(args.list)
if (!is.null(file.path))
{
result <- withVisible(SqrlFile(datasource, file.path, envir))
SqrlParam(datasource, "result", result$value, override = TRUE)
if (!result$visible)
{
return(invisible(result$value))
}
return(result$value)
}
# When the first argument is not a single character string, interpret all the
# arguments as components of a query, and submit that. This is performed here,
# because the grepl() logic below gets upset when args.list[[1L]] is a vector.
if (!identical(class(args.list[[1L]]), class(character()))
|| (length(args.list[[1L]]) != 1L))
{
statement <- SqrlTry(SqrlStatement(datasource, args.list))
if (statement$error)
{
stop(statement$value)
}
result <- withVisible(
SqrlFile(datasource, statement$value, envir, literal = TRUE))
SqrlParam(datasource, "result", result$value, override = TRUE)
if (!result$visible)
{
return(invisible(result$value))
}
return(result$value)
}
# Extract the first word from the first supplied argument.
first.word <- sub("^[^[:graph:]]*([[:graph:]]+).*$", "\\1",
args.list[[1L]])[1L]
# If the first word looks like standard SQL, submit the unaltered command.
if (tolower(first.word) %in% SqrlParams("sql-keywords"))
{
statement <- SqrlTry(SqrlStatement(datasource, args.list))
if (statement$error)
{
stop(statement$value)
}
result <- withVisible(
SqrlFile(datasource, statement$value, envir, literal = TRUE))
SqrlParam(datasource, "result", result$value, override = TRUE)
if (!result$visible)
{
return(invisible(result$value))
}
return(result$value)
}
# If the first supplied argument contains more than one word, the other words
# consist of everything except the first word (pasted together).
if (grepl("[[:graph:]]+[^[:graph:]]+[[:graph:]]+", args.list[[1L]]))
{
other.words <- trimws(sub(first.word, "", paste(args.list, collapse = ""),
fixed = TRUE))
only.word <- ""
# Otherwise (the first supplied argument is a single word), if only one
# argument was supplied, then the (that) first word is the only word.
} else if (args.count == 1L)
{
only.word <- first.word
other.words <- ""
# Otherwise, if precisely two arguments were supplied, then the other words
# are the second argument verbatim (could be any object, not just a string).
} else if (args.count == 2L)
{
only.word <- ""
other.words <- args.list[[2L]]
# Otherwise, the other words consist of all the supplied arguments besides the
# first (paste these together).
} else
{
only.word <- ""
other.words <- paste(args.list[-1L], collapse = "")
}
# If the only word is 'close', close the data source channel.
if ("close" == only.word)
{
return(SqrlClose(datasource))
}
# If the first word is 'columns', call RODBC::sqlColumns() on the remainder.
if ("columns" == first.word)
{
if (first.word == only.word)
{
stop("Table not specified.")
}
SqrlOpen(datasource)
SqrlIndicator(datasource, "query")
result <- SqrlTry(RODBC::sqlColumns(
channel = SqrlParam(datasource, "channel"),
sqtable = other.words,
errors = SqrlParam(datasource, "errors"),
as.is = TRUE))
SqrlIndicator(datasource, "done")
if (result$error
&& SqrlParam(datasource, "errors"))
{
stop(result$value)
}
return(result$value)
}
# If the first word is 'config', get or set the configuration.
if ("config" == first.word)
{
return(SqrlConfig(datasource, other.words))
}
# If the first word is 'help', or some multiple of '?', and the other words
# are 'text', 'html', or absent, then provide help. We test for those other
# words here, because (for example) 'help volatile table' is valid Teradata,
# and "help 'contents'" is valid MySQL. Neither allows just 'help' alone.
if ((("help" == first.word)
|| grepl("^[?]+$", first.word))
&& ((first.word == only.word)
|| identical(tolower(other.words), "html")
|| identical(tolower(other.words), "text")))
{
return(SqrlHelp(datasource, other.words))
}
# If the only word is 'interface', return the interface function name.
if ("interface" == only.word)
{
return(SqrlValue(datasource, only.word))
}
# If the first word is 'interface', change the interface function.
if ("interface" == first.word)
{
value <- SqrlDefile(first.word, other.words, evaluate = TRUE)
return(SqrlInterface(datasource, value))
}
# If the only word is 'isopen' (or if words one and two are 'is open'), return
# the channel's open status (TRUE for open, FALSE otherwise). This calls with
# besure = TRUE, to ping the source and make certain of the openness status.
if (("isopen" == only.word)
|| (("is" == first.word)
&& ("open" == other.words)))
{
return(SqrlIsOpen(datasource, besure = TRUE))
}
# If the first word is 'library', treat the other words as a library file or
# literal script (to be imported). If the other word is a literal NULL, then
# this is an alias for reset. The getter case, wherein 'library' is the only
# word, is handled below (with the other SQRL parameters).
if (("library" == first.word)
&& (first.word != only.word))
{
if (is.null(other.words))
{
SqrlParam(datasource, "reset", first.word)
} else
{
SqrlFile(datasource, other.words, envir, libmode = TRUE,
literal = is.null(SqrlPath(other.words)))
}
return(invisible(SqrlValue(datasource, "library")))
}
# If the only word is 'Library', return the full library definition.
if ("Library" == only.word)
{
return(SqrlParam(datasource, "library"))
}
# If the only word is 'open', open a channel to the specified data source.
if ("open" == only.word)
{
return(SqrlOpen(datasource))
}
# If the first word is 'primarykeys', then call RODBC::sqlPrimaryKeys() on the
# remaining words (which ought to be table or database.table).
if ("primarykeys" == first.word)
{
if (first.word == only.word)
{
stop("Table not specified.")
}
SqrlOpen(datasource)
SqrlIndicator(datasource, "query")
result <- SqrlTry(RODBC::sqlPrimaryKeys(
channel = SqrlParam(datasource, "channel"),
sqtable = other.words,
errors = SqrlParam(datasource, "errors"),
as.is = TRUE))
SqrlIndicator(datasource, "done")
if (result$error
&& SqrlParam(datasource, "errors"))
{
stop(result$value)
}
return(result$value)
}
# If the only word is 'remove', then deregister the source from SQRL.
if ("remove" == only.word)
{
return(SqrlCache(datasource, delete = TRUE))
}
# If the first word is 'reset', then reset the stated parameters.
if ("reset" == first.word)
{
return(invisible(SqrlValue(datasource, first.word, other.words)))
}
# If the only word is 'settings', return that subset of the configuration.
if ("settings" == only.word)
{
s <- SqrlConfig(datasource)
return(s[!(names(s) %in% SqrlParams("omit-from-settings"))])
}
# If the only word is 'source', return the (placeholder substituted, secrets
# obliterated) source definition (either a DSN or a connection string).
if ("source" == only.word)
{
return(SqrlValue(datasource, "source"))
}
# If the command is 'sources', return the data source summary table.
if ("sources" == only.word)
{
return(SqrlSources())
}
# If the first word is 'tables', call RODBC::sqlTables() on the data source.
if ("tables" == first.word)
{
schema <- NULL
if ("tables" != only.word)
{
schema <- other.words
}
SqrlOpen(datasource)
SqrlIndicator(datasource, "query")
result <- SqrlTry(RODBC::sqlTables(
channel = SqrlParam(datasource, "channel"),
errors = SqrlParam(datasource, "errors"),
as.is = TRUE,
schema = schema))
SqrlIndicator(datasource, "done")
if (result$error
&& SqrlParam(datasource, "errors"))
{
stop(result$value)
}
return(result$value)
}
# If the first word is 'typeinfo', call RODBC::sqlTypeInfo() on the others.
if ("typeinfo" == first.word)
{
SqrlOpen(datasource)
type <- ifelse(first.word == only.word, "all", other.words)
SqrlIndicator(datasource, "query")
info <- SqrlTry(RODBC::sqlTypeInfo(
channel = SqrlParam(datasource, "channel"),
type = type,
errors = SqrlParam(datasource, "errors"),
as.is = TRUE))
SqrlIndicator(datasource, "done")
if (info$error
&& SqrlParam(datasource, "errors"))
{
stop(info$value)
}
return(info$value)
}
# When the first word is an SQRL/RODBC parameter, get or set that parameter.
if (first.word %in% SqrlParams("all"))
{
# When getting, return the parameter's value (except for secrets, such as
# passwords, which are returned obliterated), visibly.
if (first.word == only.word)
{
return(SqrlValue(datasource, first.word))
}
# Allow getting, but not setting, of the channel parameter from here.
if (first.word %in% SqrlParams("read-only"))
{
stop("Parameter is read-only.")
}
# Set the parameter's value to the supplied other words, then return the
# (secrets-obscured) value, invisibly.
value <- SqrlDefile(first.word, other.words, evaluate = TRUE)
return(invisible(SqrlValue(datasource, first.word, value)))
}
# Otherwise, submit the original unaltered command, via the parser.
statement <- SqrlTry(SqrlStatement(datasource, args.list))
if (statement$error)
{
stop(statement$value)
}
result <- withVisible(
SqrlFile(datasource, statement$value, envir, literal = TRUE))
SqrlParam(datasource, "result", result$value, override = TRUE)
if (!result$visible)
{
return(invisible(result$value))
}
return(result$value)
}
SqrlDSNs <- function(import = "all")
{
# Import data source names (DSNs), and create interfaces for them.
# Args:
# import : The RODBC::odbcDataSources() type; 'all', 'user', or 'system'.
# Returns:
# Invisible NULL, after registering DSNs with SQRL.
# SQRL Calls:
# SqrlCache(), SqrlInterface(), SqrlParam(), SqrlParams().
# RODBC Calls:
# odbcDataSources().
# SQRL Callers:
# SqrlSources(), .onLoad().
# User:
# Has no direct access. Is able to supply the argument from sqrlSources(),
# via SqrlSources(), but it is vetted there and no further validity checks
# are required.
# Import a list (named character vector) of registered data sources (DSNs).
sources <- RODBC::odbcDataSources(type = import)
# The list may contain empty names when unixODBC is incorrectly configured.
# Such names cause SqrlParam(datasource, "driver", sources[datasource])
# (below) to throw an error (sources[""] is NA_character_). This prevents SQRL
# from loading, and that prevents its installation. Removing such elements
# re-enables both. However, the incorrect configuration will still prevent
# SQRL (and RODBC) from connecting to any data source.
sources <- sources[nzchar(names(sources))]
# Filter out Microsoft Access, dBASE, and Excel sources.
unwanted <- paste(SqrlParams("unwanted-sources"), collapse = "|")
sources <- sources[!grepl(unwanted, sources, ignore.case = TRUE)]
# If any of the sources was previously unknown (has no associated cache), then
# create a new cache for it. Store some valuables in the cache, then attempt
# to generate an interface for the source (failure to do so is non-fatal).
# A user-defined source will prevent importing a DSN of the same name. Source
# names might not be unique (multiple same-named DSNs may appear within a
# unixODBC .odbc.ini file), in which case only the first instance is imported
# (and this is also what unixODBC uses when a reference to that DSN is made).
for (datasource in names(sources))
{
if (SqrlCache(datasource, exists = FALSE))
{
SqrlCache(datasource, create = TRUE)
SqrlParam(datasource, "dsn", datasource)
SqrlParam(datasource, "driver", sources[datasource])
SqrlInterface(datasource, datasource, vital = FALSE)
}
}
# Return invisible NULL.
return(invisible(NULL))
}
SqrlFace <- function(interface = "",
set = NULL,
exists = NULL,
clashes = NULL,
delete = FALSE)
{
# Checks, sets, gets, and removes data source user-interface functions.
# Args:
# interface : The name (a string) of a SQRL interface function, or NULL.
# set : Is supplied, this definition is assigned to the interface.
# exists : If TRUE or FALSE, test whether or not the interface exists.
# clashes : If Boolean, test for an object name conflict with interface.
# delete : If TRUE, delete the interface.
# prime : If TRUE, create and attach the interface environment.
# Returns:
# Either the interface (function) definition, a logical existence or
# name-conflict indicator, or invisible NULL (when deleting).
# SQRL Calls:
# SQRL:Face.
# SQRL Callers:
# SqrlInterface().
# User:
# Has no direct access, but is able to pass-in the interface argument (only)
# from SqrlInterface() or SqrlSource(). Both of these check that interface
# is a unique (non-clashing) and assignable name. No further checks needed.
# When acting as a setter; make the assignment, return the result invisibly.
# This does not alter the data source's 'interface' parameter.
if (!is.null(set))
{
def <- eval(parse(text = set, keep.source = FALSE))
assign(interface, def, "SQRL:Face")
return(invisible(def))
}
# If the exists argument was specified, return whether or not the interface
# exists (as a function). When exists is TRUE (FALSE), we return TRUE when the
# interface does (does not) exist.
if (!is.null(exists))
{
if (is.null(interface))
{
return(FALSE == exists)
}
ex <- exists(interface, "SQRL:Face", mode = "function", inherits = FALSE)
return(ex == exists)
}
# If the clashes argument was specified, return whether or not the interface
# name is already taken by some other function, in SQRL:Face, the global
# environment, or any of their ancestor (parent, etc.) environments. When
# clashes is TRUE (FALSE), we return TRUE when there is (not) a conflict.
if (!is.null(clashes))
{
if (exists(interface, "SQRL:Face", mode = "function", inherits = TRUE)
|| exists(interface, globalenv(), mode = "function", inherits = TRUE))
{
return(clashes)
}
return(!clashes)
}
# Delete the interface function, on request. This does not alter the data
# source's 'interface' parameter.
if (delete)
{
suppressWarnings(
remove(list = interface, pos = "SQRL:Face", inherits = FALSE))
return(invisible(NULL))
}
# Acting as a getter; return the interface function.
return(get(interface, "SQRL:Face", mode = "function", inherits = FALSE))
}
SqrlFile <- function(datasource = "",
script = "",
envir = parent.frame(),
params = NULL,
literal = FALSE,
libmode = FALSE)
{
# Read a SQRL-script file and submit its content to a data source.
# Args:
# datasource : The name of a known data source.
# script : The path of a script file, or an actual script, as a string.
# envir : An R environment (script is executed in a child of this).
# params : A named list of R parameters for the script.
# literal : If set to TRUE, script is a literal script (not a file path).
# libmode : If TRUE, scripts are copied to the library parameter.
# Returns:
# Result of submitting the script.
# SQRL Calls:
# SqrlClose(), SqrlDelegate (via sqrl()), SqrlParam(), SqrlParams(),
# SqrlPL(), SqrlStatement(), SqrlSubScript(), SqrlTry().
# utils Calls:
# head() (only if utils is attached).
# SQRL Callers:
# SqrlConfig(), SqrlDelegate().
# User:
# Has no direct access, but is able to submit (only) the script argument
# (only) via SqrlDelegate(). When script is a file path, SqrlDelegate() will
# already have confirmed the file's existence and readability. When it's not,
# SqrlDelegate() will have set literal TRUE.
# Expand the temporary library stack by one layer, and ensure that layer is
# removed whenever, and however, this function exits (cleanly or otherwise).
SqrlParam(datasource, "libstack", "expand", override = TRUE)
on.exit(SqrlParam(datasource, "libstack", "contract", override = TRUE))
# Expand the temporary parameter stack by one layer, and ensure that layer is
# removed whenever, and however, this function exits (cleanly or otherwise).
SqrlParam(datasource, "pstack", "expand")
on.exit(SqrlParam(datasource, "pstack", "contract"), add = TRUE)
# When the script argument is a file path, slurp the entirety of that file.
# No ordinary script would be so large that this should be a problem.
if (!literal)
{
script <- paste(readLines(script, warn = FALSE, skipNul = TRUE),
collapse = "\n")
}
# Script delimiter definitions (regular expression patterns).
patterns <- c(
tag.r = "<r>",
tag.endr = "</r>",
tag.do = "<do>",
tag.stop = "<stop>",
tag.result = "<result[[:blank:]]*->[[:blank:]]*[^[:space:]>]+>",
tag.if = "<if[[:blank:]]*\\(",
tag.elseif = "<else[[:blank:]]*if[[:blank:]]*\\(",
tag.else = "<else>",
tag.endif = "</if>",
tag.while = "<while[[:blank:]]*\\(",
tag.endwhile = "</while>",
tag.return = "<return[[:blank:]]*\\(",
tag.close = "<close>",
tag.proc = "<proc",
tag.endproc = "</proc>",
tag.with = "<with>",
tag.endwith = "</with>",
end.expression = ")>",
comment.begin = "/\\*",
comment.end = "\\*/",
comment.line = "--",
comment.r = "#",
end.of.line = "\n",
quote.single = "'",
quote.double = "\"",
semi.colon = ";")
# Scan the script for delimiter positions (pos), types (pat), and character
# sequence lengths (len). For example, one delim might be pat = 'tag.result',
# starting at character pos = 145 of script, and len = 13 characters long
# The actual delimiter is then substring(script, pos, pos + len - 1), which in
# most cases (besides tag.result) is an invariant pattern. In our example, the
# delimiter might be '<result -> x>' (13 characters).
pos = NULL
pat = NULL
len = NULL
for (pattern in names(patterns))
{
matches <- gregexpr(patterns[pattern], script, ignore.case = TRUE)[[1L]]
positions <- as.integer(matches)
if ((length(positions) > 1L)
|| (positions > 0L))
{
pos <- c(pos, positions)
pat <- c(pat, rep(pattern, length(positions)))
len <- c(len, attr(matches, "match.length"))
}
}
# Sort the delimiters (if any exist) into ascending (script) positional order.
if (length(pos) > 1L)
{
ord <- order(pos)
pos <- pos[ord]
pat <- pat[ord]
len <- len[ord]
}
# The total number of delimiters (of all kinds) found in the script.
num.delims <- length(pos)
# The total number of characters (invisible or otherwise) within the script.
nchar.script <- nchar(script)
# Create a new environment as a child of the invoking environment.
# SqrlFile() evaluates R expressions (including the post-processing) within
# this environment (rather than the invoking environment) so as to avoid
# overwriting variables within the invoking environment.
sqrl.env <- new.env(parent = envir)
# When this is an initial (unnested) call, create an interface for making
# nested calls, and block the regular interface and public sqrl functions.
# Any nested calls will inherit these assignments.
if (length(SqrlParam(datasource, "libstack")) == 1L)
{
# Prevent calling the invoking interface from within the script. This is for
# correct autoclose behaviour, which is achieved by the tracking of call
# nesting within SqrlDelegate(). The stop() function prepends the offending
# function name to the error message (so we don't have to).
if (!is.null(SqrlParam(datasource, "interface")))
{
assign(SqrlParam(datasource, "interface"),
function(...) {stop("Calls from within scripts are blocked.")},
sqrl.env)
}
# Block calling any of the public sqrlXXX() functions from within a script.
# This is for autoclose behaviour (nesting tracking) and to prevent a call
# of one data source's interface from modifying the settings of any other.
# The stop() function prepends the offending function's name to the message.
for (fun in c("All", "Off", "Interface", "Source", "Sources"))
{
assign(paste0("sqrl", fun),
function(...) {stop("Calls from within scripts are blocked.")},
sqrl.env)
}
# Assign an interface to whichever datasource is running the script, into
# the working environment. This interface works even when the datasource has
# no devoted (regular) interface (in which case the script must have been
# passed from sqrlAll()). This function is an intra-script replacement of
# the regular interface (blocked above). It preserves nesting and makes
# scripts interface-name indifferent (i.e., improves portability). Whereas
# regular interfaces call SqrlShell(), this replacement goes directly to
# SqrlDelegate(), so that autoclosure only occurs upon exiting the initial
# (un-nested) user's command-line call.
assign("sqrl", eval(parse(text = paste0("function(...) {SqrlDelegate(\"",
datasource, "\", base::parent.frame(), base::list(...))}"),
keep.source = FALSE)), sqrl.env)
}
# Assign any supplied parameters to the processing environment. The supplied
# parameter names might not be unique, in which case the last value applies.
for (i in seq_along(params))
{
# When a parameter is called 'args', and is a non-empty list within which
# every member has a legitimate R-variable name, then individiually assign
# each of its members into the processing environment (rather than assigning
# the whole list, 'args', as a single object).
if ((names(params)[i] == "args")
&& identical(class(params[[i]]), class(list()))
&& (length(params[[i]]) > 0L)
&& (!is.null(names(params[[i]])))
&& (all(names(params[[i]]) == make.names(names(params[[i]])))))
{
for (j in seq_along(params[[i]]))
{
assign(names(params[[i]])[j], params[[i]][[j]], sqrl.env)
}
# Otherwise, assign the named object to the processing environment.
} else
{
assign(names(params)[i], params[[i]], sqrl.env)
}
}
# Default result. The result is a list of two components; value and visible,
# as per withVisible(). This function will return the last non-empty value.
result <- withVisible(invisible(character(0L)))
# The SQL statement in progress (the script may contain multiple statements).
statement <- list()
# A stack, upon which to store (while) loop return (start) points.
loop.points <- integer()
# A stack, upon which to store the results of nested conditionals.
cond.stack <- logical()
# Result of evaluating the last (innermost nested) condition.
cond.current <- TRUE
# A stack, upon which to store whether or not any of the previous alternative
# conditions within an if, else if, else structure have yet evaluated to TRUE.
else.stack <- logical()
# Initialise the procedural language extension tracker.
pl <- SqrlPL(NULL)
# Delimiter counter/index (to pos, pat, and len). Range is [1 : num.delims].
i <- 1L
# Character counter/index (to script). Range is [1 : nchar.script].
k <- 1L
# Parse the script, submit SQL, evaluate and substitute R.
while (i <= num.delims)
{
# Remove comments from SQL (both to-end-of-line and block).
# The main reason for this, is that some data sources are (have been) known
# to reject queries with more than one block of comments at the beginning.
# A second reason is that RODBC's error messages may include the submitted
# script, which is easier to read if we've cleaned it up. The flip side is
# that our parsing (rather than the source's) had better get things right.
if ((i <= num.delims)
&& (pat[i] %in% c("comment.line", "comment.begin")))
{
# Append any preceding fragment to the script, unless within the block of
# an untrue conditional expression.
if (cond.current)
{
# Isolate unappended (to the statement) script preceding this comment.
phrase <- substring(script, k, pos[i] - 1L)
# Remove trailing whitespace (including vertical) from the phrase.
# (Only before to-end-of-line comments.)
if (pat[i] == "comment.line")
{
phrase <- sub("[[:space:]]*$", "", phrase)
}
# Remove trailing whitespace from each internal line of the phrase.
phrase <- gsub("[[:blank:]]+\n", "\n", phrase)
# Remove vertical whitespace from within the phrase.
phrase <- gsub("\n+", "\n", phrase)
# Update the procedural-language state tracker.
pl <- SqrlPL(pl, phrase)
# Append the phrase to the statement (unless the phrase is empty).
# This is an error when the script is meant to define a library.
if (nchar(phrase) > 0L)
{
statement <- append(statement, phrase)
}
}
# Scan through the subsequent script delimiters, until the comment
# concludes with either an end-of-file, or appropriate delimiter.
end.marker <- switch(pat[i],
comment.line = "end.of.line",
comment.begin = "comment.end")
i <- i + 1L
while ((i <= num.delims)
&& (pat[i] != end.marker))
{
i <- i + 1L
}
# Reposition the start-of-phrase index immediately after the end of the
# comment. When the comment ends with a newline, the index is placed on
# that newline (so that the next phrase will begin with the newline).
k <- ifelse(i <= num.delims,
ifelse(end.marker == "end.of.line", pos[i], pos[i] + len[i]),
nchar.script + 1L)
# Advance to the next script delimiter.
i <- i + 1L
}
# Incorporate (single & double) quote-enclosed strings verbatim within SQL.
# That is; ignore anything that looks like a delimiter, but is in a string.
if ((i <= num.delims)
&& (pat[i] %in% c("quote.single", "quote.double")))
{
# Append any preceding fragment to the script, unless within the block of
# an untrue conditional expression.
if (cond.current)
{
# Isolate unappended (to the statement) script preceding this string.
phrase <- substring(script, k, pos[i] - 1L)
# Remove trailing whitespace from each internal line of the phrase.
phrase <- gsub("[[:blank:]]+\n", "\n", phrase)
# Remove vertical whitespace from within the phrase.
phrase <- gsub("\n+", "\n", phrase)
# Update the procedural-language state tracker.
pl <- SqrlPL(pl, phrase)
# Append the phrase to the statement (unless the phrase is empty).
if (nchar(phrase) > 0L)
{
statement <- append(statement, phrase)
}
}
# Reposition the start-of-phrase index on (including) the beginning quote.
k <- pos[i]
# Scan through the subsequent script delimiters, until the string
# concludes with either an end-of-file, or matching quote delimiter.
# We only test for \ escaped quotes here (once already in quote mode,
# which also guarantees i > 1). Some SQLs use doubled quotes within quoted
# strings to represent quote literals. This is supported here, via the
# following mechanism: 'x''' is read as two adjacent strings, 'x' and '',
# which are eventually collapsed together (with an empty string between),
# restoring the original 'x''' in the final SQL statement (string).
closing.quote <- pat[i]
i <- i + 1L
while ((i <= num.delims)
&& ((pat[i] != closing.quote)
|| ((attr(regexpr(
paste0("\\\\*", patterns[closing.quote], "$"),
substring(script, pos[i - 1L], pos[i])),
"match.length") %% 2L) == 0L)))
{
i <- i + 1L
}
# Append the quoted string to the statement. Verbatim, quotes included.
# Unless within the block of an untrue conditional expression.
if (cond.current)
{
statement <- append(statement, ifelse(i <= num.delims,
substring(script, k, pos[i]),
substring(script, k)))
}
# Position the start-of-phrase index immediately after the closing quote.
k <- ifelse(i <= num.delims, pos[i] + len[i], nchar.script + 1L)
# Advance to the next script delimiter.
i <- i + 1L
}
# Ignore remainder of script when encountering a 'stop' tag within SQL.
# The 'stop' tag is mainly used to run partial scripts while bug hunting.
if ((i <= num.delims)
&& (pat[i] == "tag.stop"))
{
if (cond.current)
{
# Isolate any unappended (to the statement) script preceding this stop.
phrase <- substring(script, k, pos[i] - 1L)
# Remove trailing whitespace (including vertical) from the phrase.
phrase <- sub("[[:space:]]*$", "", phrase)
# Remove trailing whitespace from each internal line of the phrase.
phrase <- gsub("[[:blank:]]+\n", "\n", phrase)
# Remove vertical whitespace from within the phrase.
phrase <- gsub("\n+", "\n", phrase)
# Update the procedural-language state tracker.
pl <- SqrlPL(pl, phrase)
# Append the phrase to the statement (unless the phrase is empty).
if (nchar(phrase) > 0L)
{
statement <- append(statement, phrase)
}
}
# Advance the delimiter and phrase indices beyond the end of the script.
# Break immediately (unnecessary). Statement will be submitted afterwards.
# Note that stop tags apply even inside untrue conditional blocks.
i <- num.delims + 1L
k <- nchar.script + 1L
break
}
# Transfer procedure definitions into either the permanent (source
# parameter) or temporary (working stack) library, without modification.
if ((i <= num.delims)
&& (pat[i] == "tag.proc"))
{
# Position of the character immediately before this potential proc tag. If
# it turns out to be an actual proc tag, then this position is needed to
# check for unsubmitted SQL.
k.prime <- pos[i] - 1L
# If this really is a proc tag, there must be nothing but horizontal
# whitespace between the matched pattern and a quotation mark (single or
# double). Since horizontal whitespace is not a matched pattern, that
# quote mark must be the next matched pattern. If it's not, then this is
# not a proc tag after all (is just SQL), and we continue with the next.
i <- i + 1L
if ((i > num.delims)
|| !(pat[i] %in% c("quote.single", "quote.double"))
|| !grepl("^[[:blank:]]*$",
substring(script, pos[i - 1L] + len[i - 1L], pos[i] - 1L)))
{
next
}
# Scan through the subsequent script delimiters, until the string
# concludes with a matching quote delimiter, or we reach the end of the
# file. We only test for \ escaped quotes on the inside of the string.
j <- i
i <- i + 1L
while ((i <= num.delims)
&& ((pat[i] != pat[j])
|| ((attr(regexpr(paste0("\\\\*", patterns[pat[j]], "$"),
substring(script, pos[i - 1L], pos[i])),
"match.length") %% 2L) == 0L)))
{
i <- i + 1L
}
# Stop if the end of the (procedure name) string was not found.
if ((i > num.delims)
|| (pat[i] != pat[j]))
{
stop("Unterminated procedure name.")
}
# Stop if the character immediately after the (name string) closing quote
# is not a (tag-closing) angle bracket ('>').
if (substring(script, pos[i] + 1L, pos[i] + 1L) != ">")
{
stop("Badly formatted proc tag (improperly terminated).")
}
# Stop if there's any unsubmitted SQL before the proc tag.
if (any(grepl("[[:graph:]]", unlist(statement)))
|| any(grepl("[[:graph:]]", substring(script, k, k.prime))))
{
if (libmode)
{
stop("Text outside of a procedure definition.")
}
stop("Unsubmitted SQL preceding a procedure definition.")
}
# Extract the name of the procedure.
proc.name <- substring(script, pos[j] + len[j], pos[i] - 1L)
# Ensure the proc name is not empty or blank, and does not contain any
# control characters (new line, carriage return, tab, vertical tab, etc.)
if (!grepl("[[:graph:]]", proc.name)
|| grepl("[[:cntrl:]]", proc.name))
{
stop("Invalid procedure name.")
}
# We have found one proc tag within a SQL section (not within an R block).
nproc <- 1L
rblock <- FALSE
# Reposition the start-of-phrase index immediately after the proc tag.
k <- pos[i] + 2L
# Scan the procedure definition (stepping by delimiter), to find its end.
while ((i <= num.delims)
&& (nproc > 0L))
{
# Advance to the next delimiter.
i <- i + 1L
# The end of the script concludes the definition, as does a stop tag.
if ((i > num.delims)
|| (pat[i] == "tag.stop"))
{
break
}
# Ignore delimiters within comments (advance to the end of the comment).
if (pat[i] %in% c("comment.line", "comment.begin"))
{
end.marker <- switch(pat[i],
comment.line = "end.of.line",
comment.begin = "comment.end")
i <- i + 1L
while ((i <= num.delims)
&& (pat[i] != end.marker))
{
i <- i + 1L
}
# Ignore delimiters within R comments (only when within an R section).
} else if (rblock
&& (pat[i] == "comment.r"))
{
i <- i + 1L
while ((i <= num.delims)
&& (pat[i] != "end.of.line"))
{
i <- i + 1L
}
# Ignore delimiters within quotes (advance to the end of the quote).
} else if (pat[i] %in% c("quote.single", "quote.double"))
{
closing.quote <- pat[i]
i <- i + 1L
while ((i <= num.delims)
&& ((pat[i] != closing.quote)
|| ((attr(regexpr(
paste0("\\\\*", patterns[closing.quote], "$"),
substring(script, pos[i - 1L], pos[i])),
"match.length") %% 2L) == 0L)))
{
i <- i + 1L
}
# R sections begin with either an <R> or <result> tag.
} else if ((i <= num.delims)
&& (pat[i] %in% c("tag.r", "tag.result")))
{
rblock <- TRUE
# R sections are terminated by </R> or <do> tags (revert to SQL).
} else if (rblock
&& (pat[i] %in% c("tag.endr", "tag.do")))
{
rblock <- FALSE
# R sections are also terminated by an extra semicolon (revert to SQL).
} else if (rblock
&& (pat[i] == "semi.colon")
&& (pat[i - 1L] %in% c("end.of.line", "semi.colon"))
&& !grepl("[[:graph:]]",
substring(script,
pos[i - 1L] + len[i - 1L],
pos[i] - 1L)))
{
rblock <- FALSE
# Upon meeting an end-of-procedure tag, decrement the nested-procedures
# counter. These tags are recognised both within SQL and R sections, and
# terminate the later (reverting to SQL).
} else if (pat[i] == "tag.endproc")
{
nproc <- nproc - 1L
rblock <- FALSE
# Upon meeting a start-of-procedure tag within a SQL section, increment
# the nested-procedure counter. These tags are not recognised within R,
# for consistency with the primary (extra-procedural) R-block parser
# (below). That parser does not recognise </proc> tags either (which we
# do here), but it is not applied within an open procedural definition.
} else if (!rblock
&& (pat[i] == "tag.proc")
&& (pat[i + 1L] %in% c("quote.single", "quote.double"))
&& grepl("^[[:blank:]]*$",
substring(script, pos[i] + len[i], pos[i + 1L] - 1L)))
{
closing.quote <- pat[i + 1L]
j <- i + 1L
i <- i + 2L
while ((i <= num.delims)
&& ((pat[i] != closing.quote)
|| ((attr(regexpr(
paste0("\\\\*", patterns[closing.quote], "$"),
substring(script, pos[i - 1L], pos[i])),
"match.length") %% 2L) == 0L)))
{
i <- i + 1L
}
if (i <= num.delims)
{
pname <- substring(script, pos[j] + len[j], pos[i] - 1L)
if (grepl("[[:graph:]]", pname)
&& !grepl("[[:cntrl:]]", pname)
&& substring(script, pos[i] + 1L, pos[i] + 1L) == ">")
{
nproc <- nproc + 1L
}
}
}
}
# If no closing tag was found, the procedure ends with the script. Extract
# the procedure, and move the start-of-phrase index beyond the end of the
# script (indicating there's no unprocessed script remaining).
if (i > num.delims)
{
proc.body <- substring(script, k, nchar.script)
k <- nchar.script + 1L
# Otherwise, the definition of the procedure ends immediately before the
# closing tag (that was found). Extract the procedure, up to the tag.
} else
{
proc.body <- substring(script, k, pos[i] - 1L)
# If the definition was terminated by a stop tag, then ignore the rest
# of the script (move the delimiter and phrase indices beyond its end).
if (pat[i] == "tag.stop")
{
i <- num.delims + 1L
k <- nchar.script + 1L
# Otherwise, the definition was terminated by an end-of-procedure tag.
# Advance the start-of-phrase index to the character after that tag.
} else
{
k <- pos[i] + len[i]
}
}
# Remove leading and trailing whitespace from the procedure. If it
# originally contained one or more trailing newlines, restore one.
tnl <- grepl("\\n[[:space:]]*$", proc.body)
proc.body <- trimws(proc.body)
if (tnl)
{
proc.body <- paste0(proc.body, "\n")
}
# It is possible, outside of library mode, that the procedure definition
# might appear within the block of an untrue conditional, in which case it
# should not be added to the stack.
if (cond.current)
{
# Apply the name to the procedure.
names(proc.body) <- proc.name
# Add the procedure to either the library or the stack. This operation
# requires the use of override = TRUE.
if (libmode)
{
SqrlParam(datasource, "library", proc.body, override = TRUE)
} else
{
SqrlParam(datasource, "libstack", proc.body, override = TRUE)
}
# If verbose, advise the user of the addition.
if (interactive()
&& SqrlParam(datasource, "verbose"))
{
cat("\n")
if (libmode)
{
cat(paste0("Added '", proc.name, "' to the library:\n"))
} else
{
cat(paste0("Defined procedure '", proc.name, "':\n"))
}
cat(proc.body)
cat("\n")
}
}
# Advance to the next script delimiter.
i <- i + 1L
}
# Submit the statement (and retrieve the result) on encountering a 'do' tag
# within SQL.
if ((i <= num.delims)
&& (pat[i] == "tag.do"))
{
# Prohibit query-submission in library mode.
if (libmode)
{
stop("Text outside of a procedure definition.")
}
# Submit the statement, plus any unappended fragment, unless it is within
# the block of an untrue conditional expression.
if (cond.current)
{
# Isolate any unappended (to the statement) script preceding the tag.
phrase <- substring(script, k, pos[i] - 1L)
# Submit the statement (with phrase) and pull the result.
dat <- withVisible(SqrlSubScript(datasource, statement, phrase))
# If there was a result (there was a query), replace the overall result.
if (!is.null(dat$value))
{
result <- dat
}
# Reset the statement (begin the next one afresh).
statement <- list()
# Reset the procedural-language state tracker.
pl <- SqrlPL(NULL)
}
# Reposition the start-of-phrase index immediately after the tag.
k <- pos[i] + len[i]
# Advance to the next script delimiter.
i <- i + 1L
}
# Act upon a semicolon, encountered within SQL. Dependent upon the current
# situation, either submit a query or do nothing.
if ((i <= num.delims)
&& (pat[i] == "semi.colon"))
{
# Prohibit query-submission in library mode.
if (libmode)
{
stop("Text outside of a procedure definition.")
}
# Consider the posibility this semicolon might termintate a complete
# query, only when the scdo parameter is TRUE.
if (SqrlParam(datasource, "scdo"))
{
# Assess the nature of the semicolon, unless it is within the block of
# an untrue conditional expression
if (cond.current)
{
# Isolate any unappended (to the statement) script; up to, and
# including, the semicolon.
phrase <- substring(script, k, pos[i] + len[i] - 1L)
# Remove trailing whitespace from each internal line of the phrase.
phrase <- gsub("[[:blank:]]+\n", "\n", phrase)
# Remove vertical whitespace from within the phrase.
phrase <- gsub("\n+", "\n", phrase)
# Update the procedural-language state tracker.
pl <- SqrlPL(pl, phrase)
# Append the phrase here, to avoid running it through the state
# tracker a second time, if no query is submitted below. The phrase
# can't be empty, because it ends with the semicolon.
statement <- append(statement, phrase)
# The semicolon is considered to terminate a complete SQL statement,
# if we're not in a PL block, or if the PL block has ended. When such
# is the case, a query could be submitted (it might not be, just yet).
if (!pl$block
|| ((pl$begins > 0L)
&& (pl$ends >= pl$begins)))
{
# The terminal semicolon will be treated a do tag (the query will be
# submitted, unless there's nothing but whitespace between it and a
# subsequent do or result tag.
do <- TRUE
if (i < num.delims)
{
j <- which(pat %in% c("tag.do", "tag.result"))
if (any(j > i))
{
j <- min(j[j > i])
do <- !grepl("^[[:space:]]*$",
substring(script, pos[i] + len[i], pos[j] - 1L))
}
}
# When the semicolon is not followed by a do or result tag, submit
# query and retrieve the result.
if (do)
{
# Submit the statement, and pull the result. There ought to be
# something, because the query at least contains the semicolon.
result <- withVisible(SqrlSubScript(datasource, statement))
# Reset the statement (begin the next one afresh).
statement <- list()
# Reset the procedural-language state tracker.
pl <- SqrlPL(NULL)
}
}
}
# Reposition the start-of-phrase index immediately after the marker.
k <- pos[i] + len[i]
}
# Advance to the next script delimiter, whether or not scdo is TRUE, and
# whether or not any query was submitted.
i <- i + 1L
}
# Act upon condition end and else tags, encountered within SQL.
if ((i <= num.delims)
&& (pat[i] %in% c("tag.endif", "tag.endwhile", "tag.else")))
{
# Prohibit R-execution (potential query-submission) in library mode.
if (libmode)
{
stop("Text outside of a procedure definition.")
}
# Remember the type of tag we've encountered.
pat.type = pat[i]
# Throw an exception if we're ending a loop that was never started.
if ((pat.type == "tag.endwhile")
&& cond.current
&& (length(loop.points) < 1L))
{
stop("End without while.")
}
# Throw an exception if we're ending a block that was never started.
if ((pat.type == "tag.endif")
&& length(cond.stack) < 1L)
{
stop("End without if.")
}
# Throw an exception if we've met an else but not a previous if.
if ((pat.type == "tag.else")
&& (length(else.stack) < 1L))
{
stop("Else without if.")
}
# Append any preceding fragment to the statement, unless ending (within)
# the block of an untrue conditional expression.
if (cond.current)
{
# Isolate any unappended (to the statement) script preceding this end.
phrase <- substring(script, k, pos[i] - 1L)
# Remove trailing whitespace (including vertical) from the phrase.
phrase <- sub("[[:space:]]*$", "", phrase)
# Remove trailing whitespace from each internal line of the phrase.
phrase <- gsub("[[:blank:]]+\n", "\n", phrase)
# Remove vertical whitespace from within the phrase.
phrase <- gsub("\n+", "\n", phrase)
# Update the procedural-language state tracker.
pl <- SqrlPL(pl, phrase)
# Append the phrase to the statement (unless the phrase is empty).
if (nchar(phrase) > 0L)
{
statement <- append(statement, phrase)
}
}
# If we've reached the end of an active loop, pop the loop starting index
# from the loop stack, and return to that point of the script.
if ((pat.type == "tag.endwhile")
&& cond.current)
{
i <- loop.points[length(loop.points)]
loop.points <- loop.points[-length(loop.points)]
k <- pos[i]
# Otherwise, we've reached an else, the end of a (TRUE OR FALSE) if block,
# or the end of an inactive (FALSE while) loop. Continue past the tag.
} else
{
# Reposition the start-of-phrase index immediately after the end tag.
k <- pos[i] + len[i]
# Advance to the next script delimiter.
i <- i + 1L
}
# In the case of an else tag, the condition becomes TRUE if all encasing
# conditionals are TRUE (so the else lies within an active block) and all
# previous alternatives (the parent if, and any else ifs) have evaluated
# FALSE (so none of them have already applied).
if (pat.type == "tag.else")
{
# Locate (grab the index of) the current (last) else stack entry.
es <- length(else.stack)
# The current condition becomes TRUE if all encasing conditionals are
# TRUE (so the else belongs to an if-else structure that lies within an
# active outer block) and all previous alternatives (the parent if, and
# any else ifs) have evaluated FALSE (so none of those conditions has
# already applied).
new.cond <- (!else.stack[es]) && all(cond.stack)
cond.current <- new.cond
# The else condition becomes (or is) TRUE if the new condition is TRUE
# (for this alternative) or if any previous else condition (alternative)
# has already been TRUE.
else.stack[es] <- new.cond || else.stack[es]
# Otherwise, in the case of an end tag, pop (restore) the previous
# (encasing) condition from the stack.
} else
{
cond.current <- cond.stack[length(cond.stack)]
cond.stack <- cond.stack[-length(cond.stack)]
}
# In the case of ending an if, remove the current (last) entry from the
# else stack (all alternatives having been exhausted).
if (pat.type == "tag.endif")
{
else.stack <- else.stack[-length(else.stack)]
}
}
# Act upon an if, else-if, while or return tag, encountered within SQL.
if ((i <= num.delims)
&& (pat[i] %in% c("tag.if", "tag.elseif", "tag.while", "tag.return")))
{
# Prohibit R-execution (potential query-submission) in library mode.
if (libmode)
{
stop("Text outside of a procedure definition.")
}
# Remember which type of tag we've encountered, and where we found it.
tag.type <- pat[i]
tag.pos <- i
# Abort on else-if without prior if (avoids an uncontrolled error later).
if ((tag.type == "tag.elseif")
&& (length(else.stack) < 1L))
{
stop("Else-if without if.")
}
# Append any preceding fragment to the statement, unless within the block
# of an untrue conditional expression.
if (cond.current)
{
# Isolate any unappended (to the statement) script preceding this end.
phrase <- substring(script, k, pos[i] - 1L)
# Remove trailing whitespace from each internal line of the phrase.
phrase <- gsub("[[:blank:]]+\n", "\n", phrase)
# Remove vertical whitespace from within the phrase.
phrase <- gsub("\n+", "\n", phrase)
# Update the procedural-language state tracker.
pl <- SqrlPL(pl, phrase)
# Append the phrase to the statement (unless the phrase is empty).
if (nchar(phrase) > 0L)
{
statement <- append(statement, phrase)
}
}
# Advance the start-of-phrase index to the opening expression parenthesis
# (that being the last character of the tag).
k <- pos[i] + len[i] - 1L
# Counters for the number of left and right parentheses within the phrase.
# We don't check the ordering of the parentheses.
lpar <- 0L
rpar <- 0L
# Have not yet located the end (closing parenthesis) of the expression.
complete <- FALSE
# Remove any comments within the R expression (both R and SQL). Because
# this involves parsing (to find the end of the section), we need to work
# through this even when cond.current is FALSE.
rscript <- list()
i <- i + 1L
while (i <= num.delims)
{
# Remove comments from R (including SQL line and block comments).
# This is so we can use SQL comments within the R (looks better under
# SQL syntax highlighting rules within your text editor), and is also
# necessary to allow commenting out of quote markers, <do>, <stop>, and
# </R> tags with R comments (as well as SQL).
if ((i <= num.delims)
&& (pat[i] %in% c("comment.line", "comment.begin", "comment.r")))
{
# Isolate any unappended script preceding this comment, and append it
# to the R-script.
fragment <- substring(script, k, pos[i] - 1L)
rscript <- append(rscript, fragment)
# Count the number of left and right parentheses within the fragment.
lpar <- lpar + nchar(gsub("[^(]", "", fragment))
rpar <- rpar + nchar(gsub("[^)]", "", fragment))
# Scan through the subsequent script delimiters, until the comment
# concludes with either an end-of-file, or appropriate delimiter.
end.marker <- switch(pat[i],
comment.r = "end.of.line",
comment.line = "end.of.line",
comment.begin = "comment.end")
i <- i + 1L
while ((i <= num.delims)
&& (pat[i] != end.marker))
{
i <- i + 1L
}
# Reposition the start-of-phrase index immediately after the end of
# the comment. When the comment ends with a newline, the index is
# placed on that newline (the next phrase will begin with newline).
k <- ifelse(i <= num.delims,
ifelse(end.marker == "end.of.line",
pos[i], pos[i] + len[i]),
nchar.script + 1L)
}
# Skip over (single, double) quote-enclosed strings (include verbatim).
# (Ignore anything that looks like a delimiter, but is inside a string.)
if ((i <= num.delims)
&& (pat[i] %in% c("quote.single", "quote.double")))
{
# Isolate any unappended script preceding this string literal, and
# append it to the R-script.
fragment <- substring(script, k, pos[i] - 1L)
rscript <- append(rscript, fragment)
# Count the number of left and right parentheses within the fragment.
lpar <- lpar + nchar(gsub("[^(]", "", fragment))
rpar <- rpar + nchar(gsub("[^)]", "", fragment))
# Reposition the start-of-phrase index on top of the opening quote.
k <- pos[i]
# Scan through the script delimiters until reaching the end of the
# quote (ignore all other delimiters found in between). We only test
# for \ escaped quotes here (once already within quote mode, which
# also guarantees that i > 1).
closing.quote <- pat[i]
i <- i + 1L
while ((i <= num.delims)
&& ((pat[i] != closing.quote)
|| ((attr(regexpr(
paste0("\\\\*", patterns[closing.quote], "$"),
substring(script, pos[i - 1L], pos[i])),
"match.length") %% 2L) == 0L)))
{
i <- i + 1L
}
# Append the quoted string literal to the R-script, without counting
# any parentheses that may appear within it.
rscript <- append(rscript, ifelse(i <= num.delims,
substring(script, k, pos[i]),
substring(script, k)))
# Position the start-of-phrase index just after the closing quote.
k <- ifelse(i <= num.delims, pos[i] + len[i], nchar.script + 1L)
}
# Upon finding an end-of-expression marker, establish whether or not it
# terminates the expression. If not, keep looking. If so, evaluate it.
if ((i <= num.delims)
&& (pat[i] == "end.expression"))
{
# Extract any un-appended fragment preceding (and including) the right
# parenthesis (first character) of the end.expression sequence to the
# expression (R-script).
fragment <- substring(script, k, pos[i])
rscript <- append(rscript, fragment)
# Count the numbers of left and right parentheses within the fragment.
lpar <- lpar + nchar(gsub("[^(]", "", fragment))
rpar <- rpar + nchar(gsub("[^)]", "", fragment))
# Reposition the start-of-phrase index immediately after the right
# parenthesis (first character) of the end.expression sequence.
k <- pos[i] + 1L
# When we have equal numbers of left and right parentheses (both being
# at least one), we consider the expression to be complete and attempt
# to evaluate it. We don't check for correct parenthesis ordering.
if (lpar <= rpar)
{
# The complete expression has been identified and isolated.
complete <- TRUE
# In the case of a first conditional tag (not an else-if), push the
# current conditional mode (cond.current) to the condition stack.
if (!(tag.type %in% c("tag.elseif", "tag.return")))
{
cond.stack <- c(cond.stack, cond.current)
}
# The expression must be evaluated, to determine the condition of
# the upcoming nested block, if this is a leading (if, while, or
# return) conditional and the current condition is TRUE, or if this
# is an alternative condition (else-if), no previous alternative has
# been TRUE (i.e., the current else state is FALSE), and the entire
# conditional stack (which does not include the current condition)
# is TRUE (i.e., we are within an active block of script).
if ((cond.current && (tag.type != "tag.elseif"))
|| ((tag.type == "tag.elseif")
&& !else.stack[length(else.stack)]
&& all(cond.stack)))
{
# Collapse the expression (fragments) and remove the enclosing
# parentheses (in case it contains multiple statements).
cond <- paste0(rscript, collapse = "")
expr <- substring(cond, 2L, nchar(cond) - 1L)
# Evaluate the tag expression. On error, stop with exception.
tval <- SqrlTry(withVisible(eval(
parse(text = expr, keep.source = FALSE),
sqrl.env)))
if (tval$error)
{
stop(tval$value)
}
tval <- tval$value
# In the case of a return tag, return the evaluated expression
# (stop processing the SQRL script, and exit from this point).
# That exit is an exception, if any unsubmitted SQL exists.
if (tag.type == "tag.return")
{
if (any(grepl("[[:graph:]]", unlist(statement))))
{
stop("Unsubmitted SQL preceding a <return> tag.")
}
if (tval$visible)
{
return(tval$value)
}
return(invisible(tval$value))
}
# Otherwise, the expression should be a logical condition,
# replacing the previous condition.
cond.current <- tval$value
# If in verbose mode, output the expression and its evaluation.
if (interactive()
&& SqrlParam(datasource, "verbose"))
{
cat("\n")
cat(paste(trimws(cond), "is", cond.current))
cat("\n")
}
# We require expressions to evaluate to Boolean singletons.
if (!is.logical(cond.current)
|| (length(cond.current) != 1L)
|| is.na(cond.current))
{
stop("Condition neither TRUE nor FALSE.")
}
# If this was a while condition, and if that condition was TRUE,
# then the loop is active and we push its starting location to the
# loop (return point) stack.
if ((tag.type == "tag.while")
&& cond.current)
{
loop.points <- c(loop.points, tag.pos)
}
# If the tag is an else-if, update the current else condition. To
# have arrived at this point with an else-if tag, the current else
# condition must be FALSE. FALSE && cond.current is cond.current.
if ((tag.type == "tag.elseif")
&& cond.current)
{
else.stack[length(else.stack)] <- cond.current
}
# Otherwise, when an encasing conditional is untrue, do not evaluate
# the expression (variables may be undefined). Instead, continue in
# untrue mode until reaching the end of the encasing block. In the
# case of a return tag within an untrue block, perform no action.
} else if (tag.type != "tag.return")
{
cond.current <- FALSE
}
# In the case of an if, copy the current condition onto the else
# stack, for any alternative (else-if, else) blocks to reference.
if (tag.type == "tag.if")
{
else.stack <- c(else.stack, cond.current)
}
# Reposition the start-of-phrase index immediately after the
# terminating (expression closing) tag.
k <- pos[i] + len[i]
# Advance to the next script delimiter.
i <- i + 1L
# Having processed the conditional tag, continue with the script.
break
}
}
# Advance to the next script delimiter.
i <- i + 1L
}
# Throw an exception if we run out of script without ever finding the end
# of the intra-tag expression.
if (!complete)
{
stop("Unterminated intra-tag expression.")
}
}
# Evaluate embedded R (and insert into SQL or produce a result).
if ((i <= num.delims)
&& pat[i] %in% c("tag.r", "tag.result"))
{
# Prohibit R-execution (potential query-submission) in library mode.
if (libmode)
{
stop("Text outside of a procedure definition.")
}
# Remember the mode we're in (either embedded or post-processing).
r.type <- pat[i]
# Process any preceding fragment, unless within the block of an untrue
# conditional expression.
if (cond.current)
{
# Isolate any unappended (to the statement) script preceding this tag.
phrase <- substring(script, k, pos[i] - 1L)
# If this is R post-processing, submit any query beforehand.
if (r.type == "tag.result")
{
# Extract the name of the intermediate variable to which the SQL
# result is to be assigned within the R processing environment.
intermediate <- gsub("^<result\\s*->\\s*|>$", "",
substring(script, pos[i], pos[i] + len[i] - 1L))
# Submit the statement (with phrase) and pull the result.
dat <- withVisible(SqrlSubScript(datasource, statement, phrase,
intermediate, sqrl.env))
# If there was a result (was a query), use it as the overall result.
if (!is.null(dat$value))
{
result <- dat
}
# Reset the statement (begin the next one afresh).
statement <- list()
# Reset the procedural-language state tracker.
pl <- SqrlPL(NULL)
# Otherwise (this is an R substitution into SQL), clean the phrase (but
# do not remove pre-tag trailing whitespace) and then append it to the
# statement. The phrase will never contain a string literal.
} else
{
# Remove trailing whitespace from each internal line of the phrase.
phrase <- gsub("[[:blank:]]+\n", "\n", phrase)
# Remove vertical whitespace from within the phrase.
phrase <- gsub("\n+", "\n", phrase)
# Update the procedural-language state tracker.
pl <- SqrlPL(pl, phrase)
# Append the phrase to the statement (unless the phrase is empty).
if (nchar(phrase) > 0L)
{
statement <- append(statement, phrase)
}
}
}
# Reposition the start-of-phrase index immediately after this tag.
k <- pos[i] + len[i]
# Isolate the R section. Because this involves parsing (to find the end of
# the section), we need to work through this, even when cond.current is
# FALSE.
rscript <- list()
i <- i + 1L
while ((i <= num.delims)
&& !(pat[i] %in% c("tag.endr", "tag.do")))
{
# Remove comments from R (including SQL line and block comments).
# This is so we can use SQL comments within the R (looks better under
# SQL syntax highlighting rules within your text editor), and is also
# necessary to allow commenting-out of quote markers, <do>, <stop>,
# and </R> tags with R comments (as well as SQL).
if ((i <= num.delims)
&& (pat[i] %in% c("comment.line", "comment.begin", "comment.r")))
{
# Isolate any unappended script preceding this comment, and append
# it to the R-script.
rscript <- append(rscript, substring(script, k, pos[i] - 1L))
# Scan through the subsequent script delimiters, until the comment
# concludes with either an end-of-file, or appropriate delimiter.
end.marker <- switch(pat[i],
comment.r = "end.of.line",
comment.line = "end.of.line",
comment.begin = "comment.end")
i <- i + 1L
while ((i <= num.delims)
&& (pat[i] != end.marker))
{
i <- i + 1L
}
# Reposition the start-of-phrase index immediately after the end of
# the comment. When the comment ends with a newline, the index is
# placed on that newline (the next phrase will begin with newline).
k <- ifelse(i <= num.delims,
ifelse(end.marker == "end.of.line",
pos[i], pos[i] + len[i]),
nchar.script + 1L)
}
# Skip over any (single or double) quote-enclosed strings (include
# them verbatim). (Ignore anything that looks like a delimiter, but
# is inside a string.)
if ((i <= num.delims)
&& (pat[i] %in% c("quote.single", "quote.double")))
{
# Since we're not cleaning-up the R script, we merely scan through
# the script delimiters until reaching the end of the quote (ignore
# all other delimiters found in between). Appending to the R script
# will only occur later (at a comment, or an end-of-R delimiter). We
# only test for \ escaped quotes here (once already within quote mode,
# which also guarantees that i > 1).
closing.quote <- pat[i]
i <- i + 1L
while ((i <= num.delims)
&& ((pat[i] != closing.quote)
|| ((attr(regexpr(
paste0("\\\\*", patterns[closing.quote], "$"),
substring(script, pos[i - 1L], pos[i])),
"match.length") %% 2L) == 0L)))
{
i <- i + 1L
}
}
# Semicolons may delimit R statements, or mark the end of the R section.
if ((i <= num.delims)
&& (pat[i] == "semi.colon"))
{
# Provided scdo is TRUE, if there is nothing but whitespace between
# the semicolon and the start of its line (or the R tag, or the result
# tag, or the previous semicolon), then interpret the semicolon as a
# do tag (the R parser won't accept it, anyway), and stop looking for
# one. We need not append the whitespace to the R script.
if (grepl("^[[:space:]]*$", substring(script, k, pos[i] - 1L))
&& SqrlParam(datasource, "scdo"))
{
break
# Otherwise, the semicolon marks the end of an R statement. Append
# any unappended script, up to and including the semicolon, to the
# R-script. Advance the start-of-phrase index past the semicolon, and
# resume searching for the end of the R section.
} else
{
rscript <- append(rscript, substring(script, k, pos[i]))
k <- pos[i] + len[i]
}
}
# Ignore remainder of script when encountering a 'stop' tag within an
# R post-processing section.
if ((i <= num.delims)
&& (pat[i] == "tag.stop"))
{
# Append the last chunk of R (before the stop tag).
rscript <- append(rscript, substring(script, k, pos[i] - 1L))
# Ignore (skip over) everything else in the script.
i <- num.delims + 1L
k <- nchar.script + 1L
break
}
# We append each line, as we reach its end, to the R-script, only to
# simplify distinguishing R-section from R-statement semicolons.
if ((i <= num.delims)
&& (pat[i] == "end.of.line"))
{
# Append any preceding R (up to and including the line end).
rscript <- append(rscript, substring(script, k, pos[i]))
# Move the start of phrase index to the beginning of the next line.
k <- pos[i] + len[i]
}
# Advance to the next script delimiter.
i <- i + 1L
}
# Evaluate the R script, and process the result, unless within the block
# of an untrue conditional expression.
if (cond.current)
{
# Append the final chunk to the R-script.
phrase <- ifelse(i <= num.delims,
substring(script, k, pos[i] - 1L),
substring(script, k))
rscript <- append(rscript, phrase)
# Collapse the rscript (list) to a single string.
rscript <- trimws(paste(rscript, collapse = ""))
# In the case of embedded R, evaluate and append to the encasing SQL.
if ((r.type == "tag.r")
&& (i <= num.delims)
&& (pat[i] == "tag.endr"))
{
sqlisedvalue <- SqrlTry(SqrlStatement(datasource, list(eval(
parse(text = rscript, keep.source = FALSE), sqrl.env))))
if (sqlisedvalue$error)
{
stop(sqlisedvalue$value)
}
pl <- SqrlPL(pl, sqlisedvalue$value)
statement <- append(statement, sqlisedvalue$value)
# Otherwise (R post-processing), evaluate and retain the result.
} else
{
# Stop if there's any unsubmitted SQL before an <R> ... <do> section.
# (SQL is always submitted before a <result> ... <do> section.)
if (any(grepl("[[:graph:]]", unlist(statement))))
{
stop("Unsubmitted SQL preceding an <R> ... <do> section.")
}
# If in verbose mode, output the script (prior to evaluation).
if (interactive()
&& SqrlParam(datasource, "verbose"))
{
cat("\n")
cat(rscript)
cat("\n")
}
# Evaluate the script, and retain the result, only if the script is
# non-empty (in the sense of containing no uncommented statements).
parsed <- SqrlTry(parse(text = rscript, keep.source = FALSE))
if (parsed$error)
{
stop(parsed$value)
}
if (!identical(as.character(parsed$value), character(0L)))
{
# Evaluate the script, retain the result. As above, stop with an
# error should such occur.
result <- SqrlTry(withVisible(eval(parsed$value, sqrl.env)))
if (result$error)
{
stop(result$value)
}
result <- result$value
# If verbose, output (some of) the result. This could be any object,
# with no guarantee of either the head() or print() methods.
if (interactive()
&& SqrlParam(datasource, "verbose"))
{
printed <- FALSE
if ("package:utils" %in% search())
{
top <- SqrlTry(utils::head(result$value))
if (!top$error)
{
printed <- !SqrlTry(print(top$value))$error
if (printed
&& !identical(top$value, result$value))
{
cat("(output truncated)\n")
}
}
}
if (!printed)
{
cat(paste0("(object of class '",
paste0(class(result$value), collapse = " "),
"')\n"))
}
cat("\n")
}
}
}
}
# Reposition the start-of-phrase index immediately after this R section.
k <- ifelse(i <= num.delims, pos[i] + len[i], nchar.script + 1L)
# Advance to the next script delimiter.
i <- i + 1L
}
# Assign temporary parameter values.
if ((i <= num.delims)
&& (pat[i] == "tag.with"))
{
# Prohibit changing RODBC and/or SQRL parameters in library mode.
if (libmode)
{
stop("Text outside of a procedure definition.")
}
# Stop if there's any unsubmitted SQL before the tag, unless within the
# block of an untrue conditional expression.
if ((cond.current)
&& any(grepl("[[:graph:]]", unlist(statement)))
|| any(grepl("[[:graph:]]", substring(script, k, pos[i] - 1L))))
{
stop("Unsubmitted SQL preceding a <with> block.")
}
# Reposition the start-of-phrase index immediately after the tag.
k <- pos[i] + len[i]
# Isolate the with section. This involves parsing (to find the end of the
# section), even when cond.current is FALSE.
withs <- list()
i <- i + 1L
while ((i <= num.delims)
&& !(pat[i] %in% c("tag.endwith", "tag.stop")))
{
# Remove SQL comments from the with block's R expressions.
if ((i <= num.delims)
&& (pat[i] %in% c("comment.line", "comment.begin")))
{
# Isolate any unappended script preceding this comment, and append
# it to the withs-script.
withs <- append(withs, substring(script, k, pos[i] - 1L))
# Scan through the subsequent script delimiters, until the comment
# concludes with either an end-of-file, or appropriate delimiter.
end.marker <- switch(pat[i],
comment.line = "end.of.line",
comment.begin = "comment.end")
i <- i + 1L
while ((i <= num.delims)
&& (pat[i] != end.marker))
{
i <- i + 1L
}
# Reposition the start-of-phrase index immediately after the end of
# the comment. When the comment ends with a newline, the index is
# placed on that newline (the next phrase will begin with newline).
k <- ifelse(i <= num.delims,
ifelse(end.marker == "end.of.line", pos[i], pos[i] + len[i]),
nchar.script + 1L)
}
# Skip over any (single or double) quote-enclosed strings (include
# them verbatim). (Ignore anything that looks like a delimiter, but
# is inside a string.)
if ((i <= num.delims)
&& (pat[i] %in% c("quote.single", "quote.double")))
{
# We only test for \ escaped quotes here (once already within quote
# mode, which also guarantees that i > 1).
closing.quote <- pat[i]
i <- i + 1L
while ((i <= num.delims)
&& ((pat[i] != closing.quote)
|| ((attr(regexpr(
paste0("\\\\*", patterns[closing.quote], "$"),
substring(script, pos[i - 1L], pos[i])),
"match.length") %% 2L) == 0L)))
{
i <- i + 1L
}
}
# Advance to the next script delimiter.
i <- i + 1L
}
# Process the withs script, unless within the block of an untrue
# conditional expression.
if (cond.current)
{
# Append the final chunk to the withs-script.
phrase <- ifelse(i <= num.delims,
substring(script, k, pos[i] - 1L),
substring(script, k))
withs <- append(withs, phrase)
# Collapse the withs-script (list) to a single string.
withs <- paste(withs, collapse = "\n")
# Attempt to parse the entire withs-script to an R expression.
# Expressions remain unevaluated at this point.
withs <- SqrlTry(parse(text = withs, keep.source = FALSE))
if (withs$error)
{
stop("Failed to parse <with> block.")
}
withs <- withs$value
# Create a sub-environment of the main-script working environment
# (sqrl.env), within which to evaluate expressions of the withs-script.
w.env <- new.env(parent = sqrl.env)
# Consistently apply the verbose mode in effect prior to the <with>
# block (whether or not the verbose value is changed within the block).
verbose <- interactive() && SqrlParam(datasource, "verbose")
# In verbose mode, add vertical whitespace before showing the values.
if (verbose)
{
cat("\n")
}
# Evaluate each item of the withs-script expression in turn.
for (w.itm in withs)
{
# Deparse this item's expression to text, wrap it in a list, and parse
# back to an expression. This ought to succeed, since the expression
# has been parsed before (as a part of the whole block, just above).
w.txt <- paste0(deparse(w.itm), collapse = "\n")
w.exp <- parse(text = paste0("list", "(", w.txt, ")"),
keep.source = FALSE)
# Evaluate the expression within the with-block sub-environment.
# Errors here are fatal. Warnings are visible.
w.val <- eval(w.exp, w.env)
# Any successful result must be a list. When that list comprises a
# single named member, we interpret it as an intended SQRL/RODBC
# parameter value.
if ((length(w.val) == 1L)
&& !is.null(names(w.val)))
{
w.par <- names(w.val)
if ((w.par %in% SqrlParams("locked-while-open"))
|| (w.par %in% SqrlParams("no-temp-allowed")))
{
stop("Parameter does not accept temporary values.")
}
# Attempt to set the temporary parameter value.
SqrlParam(datasource, "pstack", w.val)
# If in verbose mode, print the temporary value.
if (verbose)
{
w.val <- deparse(SqrlParam(datasource, w.par))
cat("Using:", w.par, "=", w.val, "\n")
}
}
}
# In verbose mode, add vertical whitespace after showing the values.
if (verbose)
{
cat("\n")
}
}
# When the section ends with a stop tag, skip the rest of the script.
if ((i <= num.delims)
&& (pat[i] == "tag.stop"))
{
i <- num.delims + 1L
k <- nchar.script + 1L
break
}
# Reposition the start-of-phrase index immediately after this section.
k <- ifelse(i <= num.delims, pos[i] + len[i], nchar.script + 1L)
# Advance to the next script delimiter.
i <- i + 1L
}
# Process a close tag, encountered within SQL.
if ((i <= num.delims)
&& (pat[i] == "tag.close"))
{
# Prohibit changing the connection status in library mode.
if (libmode)
{
stop("Text outside of a procedure definition.")
}
# Perform the closure action, unless within an untrue conditional block.
if (cond.current)
{
# Stop if there's any unsubmitted SQL before the close directive.
if (any(grepl("[[:graph:]]", unlist(statement)))
|| any(grepl("[[:graph:]]", substring(script, k, pos[i] - 1L))))
{
stop("Unsubmitted SQL preceding a <close> tag.")
}
# Close the channel to the source.
SqrlClose(datasource)
# If verbose, notify of the closure.
if (interactive()
&& SqrlParam(datasource, "verbose"))
{
cat("\nConnection channel closed.\n")
}
}
# Reposition the start-of-phrase index immediately after this tag.
k <- ifelse(i <= num.delims, pos[i] + len[i], nchar.script + 1L)
# Advance to the next script delimiter.
i <- i + 1L
}
# Take no special action at any other delimiter: end-of-line, end-of-intra-
# tag-expression, intra-SQL R comment marker, SQL end-comment-block marker,
# end-of-procedure-definition, or end-of-intra-SQL-embedded-R. In all cases
# we assume this is legitimate SQL and proceed to the next delimiter. This
# assumption is highly probable for the first two, and highly unlikely for
# the last three. When the assumption is wrong, the ODBC driver will return
# an error. However, if we were to throw a potentially more helpful error
# here, we might be blocking a legitimate query without even trying it.
if ((i <= num.delims)
&& (pat[i] %in% c("end.of.line", "end.expression", "comment.r",
"comment.end", "tag.endproc", "tag.endr")))
{
i <- i + 1L
}
}
# Reaching this point means we are in a (possibly empty) SQL block, and there
# are no delimiters (patterns) between the start-of-phrase index, k, and the
# end of the script. It may even be that k is beyond the end of the script.
# Unless within the block of an untrue condition, append all (any) remaining
# (SQL) script to the current (SQL) statement.
if (cond.current)
{
statement <- append(statement, substring(script, k))
}
# Prohibit query-submission in library mode.
if (libmode
&& (any(grepl("[[:graph:]]", unlist(statement)))))
{
stop("Text outside of a procedure definition.")
}
# Submit the statement and pull the result. The statement might be blank or
# empty, in which case the result will be NULL.
dat <- withVisible(SqrlSubScript(datasource, statement))
# If there was a result (if there was a query), replace the overall result.
if (!is.null(dat$value))
{
result <- dat
}
# If the last result was invisible, return it invisibly.
if (!result$visible)
{
return(invisible(result$value))
}
# Otherwise, (visibly) return whatever the last result was.
return(result$value)
}
SqrlHelp <- function(datasource = "",
type = "",
clean = FALSE)
{
# Generates run-time help for SQRL interface functions.
# Args:
# datasource : The name of a known data source.
# type : The requested help format ('text' or 'html').
# clean : If set to TRUE, any old temp files are removed.
# Returns:
# Invisible NULL, after displaying help.
# SQRL Calls:
# SqrlConfig(), SqrlHelp() (self), SqrlHelper(), SqrlPath(), SqrlTry(),
# srqlHelp.
# tools Calls:
# Rd2HTML(), Rd2txt() (only if the tools package is installed).
# utils Calls:
# browseURL(), help() (only if utils is attached).
# SQRL Callers:
# SqrlDelegate(), SqrlHelp() (self), .onLoad(), .onUnload().
# User:
# Has no direct access, but is able to submit (only) the 'type' argument.
# That is coerced to an allowed value, and no further checks are required.
# If the clean argument was set, prune any old temp files and return the temp
# files list (after creating an empty list if the list does not yet exist).
if (clean)
{
if (!exists("temps", srqlHelp, inherits = FALSE))
{
return(assign("temps", character(0L), srqlHelp))
}
temps <- get("temps", srqlHelp, inherits = FALSE)
temps <- temps[file.exists(temps)]
temps <- temps[!suppressWarnings(file.remove(temps))]
return(assign("temps", temps, srqlHelp))
}
# Unless a supported help type was supplied, use the default help type.
# This may be NULL valued. Types are not case sensitive.
type <- tolower(type)
if (!identical(type, "text")
&& !identical(type, "html"))
{
type <- tolower(getOption("help_type"))
}
# If the utils package (which provides the help() function) is not loaded,
# print a link to the CRAN SQRL page (which has a PDF help file), and return.
if (!("package:utils" %in% search()))
{
return(cat("https://CRAN.R-project.org/package=SQRL\n"))
}
# If the tools package (which converts Rd files) is unavailable, display the
# pre-built (static) interface-usage help page, and return.
if (length(find.package("tools", quiet = TRUE)) == 0L)
{
return(utils::help("sqrlUsage", help_type = type))
}
# The tools package is available. We shall dynamically generate tailored help
# for the invoking (data source's) interface function. This involves temp
# files. Remove any existing SQRL temp files (from any prior SqrlHelp() call).
temps <- SqrlHelp(clean = TRUE)
# Obtain the current source configuration (all parameter values).
config <- SqrlConfig(datasource)
# Extract the driver from the configuration, and escape any % characters
# (these are the Rd comment symbol, even with \preformatted{} sections).
if (grepl("[[:graph:]]", config[["driver"]]))
{
driver <- paste0("\\file{", SqrlHelper(config[["driver"]]), "}")
} else
{
driver <- "unknown or undefined"
}
# Extract and escape the data source's (SQRL) name.
if (identical(config[["name"]], config[["interface"]]))
{
dsrc <- "of the same name"
} else
{
dsrc <- paste0("\\file{", SqrlHelper(config[["name"]]), "}")
}
# Establish the channel status, and choose the appropriate phrase.
if (is.null(config[["channel"]]))
{
ochan <- "closed"
} else
{
ochan <- "open"
}
# Construct example queries, appropriate to the source's driver.
if (grepl("oracle|db2", driver, ignore.case = TRUE))
{
query1 <- "select 1 from dual"
query2 <- "\"select \", sample(6, 1), \" from dual\""
} else
{
query1 <- "select 1"
query2 <- "\"select \", sample(6, 1)"
}
# Extract and escape the interface function's name.
iface <- SqrlHelper(config[["interface"]])
# Escape and list all of the parameter values.
csc <- character(0L)
config <- SqrlHelper(config)
for (name in names(config))
{
csc <- c(csc, paste(name, "=", config[[name]]))
}
# Construct the help text, in Rd (R man file) format. We don't gsub() on tags
# in case the one of the parameter values happens to contain that sequence.
helprd <- c(
paste0("\\name{", iface, "}"),
paste0("\\title{ODBC Interface Function \\sQuote{", iface, "}}"),
"\\description{",
paste0("The function \\code{", iface, "} is"),
paste0("the interface to the data source ", dsrc,"."),
paste0("The \\acronym{ODBC} driver is ", driver, "."),
paste0("Communications are ", ochan, "."),
"}",
"\\section{Listing Sources}{\\preformatted{",
"# View the associated source definition.",
paste0(iface, "(\"source\")"),
"",
"# See all data sources and their interfaces.",
paste0(iface, "(\"sources\")"),
"}}",
"\\section{Opening and Closing}{\\preformatted{",
"# Open a connection to the data source.",
paste0(iface, "()"),
"",
"# Check if the connection is open.",
paste0(iface, "(\"isopen\")"),
"",
"# Close the connection.",
paste0(iface, "(\"close\")"),
"",
"# Close the connection when not in use.",
paste0(iface, "(autoclose = TRUE)"),
"}}",
"\\section{Submitting Queries}{\\preformatted{",
"# Submit a query.",
paste0(iface, "(\"", query1, "\")"),
"",
"# Submit a compound query.",
paste0(iface, "(", query2, ")"),
"",
"# Submit a query from file.",
paste0(iface, "(\"my/file.sql\")"),
"",
"# Submit a parameterised query from file.",
paste0(iface, "(\"rhaphidophoridae.sqrl\", genus = \"gymnoplectron\")"),
"",
"# Force submission as a query.",
paste0(iface, "(query = \"help\")"),
"}}",
"\\section{Communication Parameters}{\\preformatted{",
"# Get a named parameter value.",
paste0(iface, "(\"uid\")"),
"",
"# Set a named parameter value.",
paste0(iface, "(visible = TRUE)"),
"",
"# Reset a parameter to its default value.",
paste0(iface, "(reset = \"nullstring\")"),
"",
"# List all parameter values.",
paste0(iface, "(\"config\")"),
"",
"# Set multiple parameter values from file.",
paste0(iface, "(config = \"path/to/config/file\")"),
"}}",
"\\section{Further Assistance}{\\preformatted{",
"# Additional usage examples.",
"?sqrlUsage",
"",
"# Detailed parameter descriptions.",
"?sqrlParams",
"}}",
"\\section{Current Settings}{\\preformatted{",
csc,
"}}")
# Write the (Rd format) text to a temp file.
rdfile <- tempfile(fileext = ".Rd")
temps <- assign("temps", c(temps, rdfile), srqlHelp)
writeLines(helprd, rdfile)
# Detect and handle RStudio, which does things a bit differently (different
# viewer, different style, different file location rules). For RStudio, the
# 'type' argument is ignored (help is only provided in HTML format).
if (nzchar(Sys.getenv("RSTUDIO_USER_IDENTITY")))
{
# Rstudio's viewer seems to want a style file in the same directory as the
# help (HTML) file (and only wants the file name of that CSS file, not its
# full path). That compels us to copy a style file to this temp file.
csstemp <- tempfile(fileext = ".css")
temps <- assign("temps", c(temps, csstemp), srqlHelp)
# Set a default cascading style sheet. This won't exist within the temp
# directory, in which case the viewer will apply default styling when it
# does not find the CSS file (no harm done, but not aesthetically ideal).
cssfile <- "R.css"
# These locations are where we think the RStudio and R style files will be.
# The two styles are different, so the RStudio file is preferred.
cssfiles <- c(file.path(Sys.getenv("RSTUDIO_PANDOC"),
"../../resources/R.css"),
file.path(R.home(), "library/base/html/R.css"))
# If we find, and can copy, the RStudio file, use that. Otherwise, if we
# find, and can copy, the base R file, use that. If we find neither, the
# default style will apply.
for (css in cssfiles)
{
if (!is.null(SqrlPath(css))
&& file.copy(css, csstemp))
{
cssfile <- csstemp
break
}
}
# Convert the Rd to HTML, write that to another temp file, open that in the
# RStudio viewer, and return invisible NULL. Note the use of basename().
htmlfile <- tempfile(fileext = ".html")
assign("temps", c(temps, htmlfile), srqlHelp)
if (SqrlTry(tools::Rd2HTML(rdfile, htmlfile, package = "SQRL",
stylesheet = basename(cssfile)))$error)
{
return(utils::help("sqrlUsage"))
}
getOption("viewer")(htmlfile)
return(invisible(NULL))
}
# If the help type is 'html', convert the Rd to HTML, write that to another
# temp file, open that file in the default browser, and return NULL.
if (identical(type, "html"))
{
htmlfile <- tempfile(fileext = ".html")
assign("temps", c(temps, htmlfile), srqlHelp)
cssfile <- paste0(R.home(), "/library/base/html/R.css")
if (SqrlTry(tools::Rd2HTML(rdfile, htmlfile, package = "SQRL",
stylesheet = cssfile))$error)
{
return(utils::help("sqrlUsage", help_type = "html"))
}
utils::browseURL(htmlfile)
return(invisible(NULL))
}
# Otherwise, convert the Rd to text, write that to another temp file, open
# that file in the default text viewer (pager), and return invisible NULL.
txtfile <- tempfile(fileext = ".txt")
assign("temps", c(temps, txtfile), srqlHelp)
if (SqrlTry(tools::Rd2txt(rdfile, txtfile, package = "SQRL"))$error)
{
return(utils::help("sqrlUsage", help_type = "text"))
}
file.show(txtfile)
return(invisible(NULL))
}
SqrlHelper <- function(value = "")
{
# Formats parameter values for inclusion in a (.Rd-format) help file.
# Args:
# value : Either a list (source configuration) or a single character string.
# Returns:
# The input values, converted to strings and with %s escaped.
# SQRL Calls:
# None.
# SQRL Callers:
# SqrlHelp().
# User:
# Has no direct access, but is able to control the supplied value through
# parameter settings (interface name, ping query, and so on). These are
# converted to strings and escaped. No further checking is required.
# Deparse and escape the entire configuration parameter-value list.
# Any RODBC channel is reduced to its integer label.
# Any library is truncated to its first element (plus an ellipsis).
if (identical(class(value), class(list())))
{
if (!is.null(value[["channel"]]))
{
value[["channel"]] <- as.numeric(value[["channel"]])
}
if ("library" %in% names(value))
{
if (!is.null(value[["library"]])
&& (length(value[["library"]]) > 1L))
{
value[["library"]] <- paste0(deparse(value[["library"]][1L]), ", ...")
} else
{
value[["library"]] <- deparse(value[["library"]])
}
value[["library"]] <- gsub("%", "\\\\%", value[["library"]])
}
for (name in names(value)[names(value) != "library"])
{
value[[name]] <- gsub("%", "\\\\%", deparse(value[[name]]))
}
return(value)
}
# Escape a single character-string.
return(gsub("%", "\\\\%", value))
}
SqrlIndicator <- function(datasource = "",
action = "",
marker = "all")
{
# Alters the display-state of open-connection (channel) indicators.
# Args:
# datasource : The name of a known (to SQRL) data source.
# action : One of 'show', 'hide', 'busy', or 'done'.
# marker : One of 'prompt', 'wintitle', or 'all' (the default).
# Returns:
# Invisible NULL, after making the requested indicator changes.
# SQRL Calls:
# SqrlParam().
# utils Calls:
# getWindowTitle(), setWindowTitle() (only if the utils package is attached,
# and these two functions exist within it on the current OS/platform).
# SQRL Callers:
# SqrlDelegate(), SqrlParam(), SqrlPing(), SqrlSubmit().
# User:
# Has no direct access, and is unable to indirectly supply any of the
# arguments. Argument validity checks are not required.
# TRUE if the indicators are potentially visible (when the data source's
# channel is open). No test of openness is made here; that should be performed
# (where necessary) before calling this function.
visible <- (interactive()
&& SqrlParam(datasource, "visible"))
# TRUE if, and only if, the prompt is to be altered.
do.prompt <- (visible
&& (marker %in% c("all", "prompt")))
# TRUE if, and only if, the window title is to be altered.
# The get/setWindowTitle() functions only exist on Windows versions of R,
# and only work with Rgui, R Console, and Rterm (not with RStudio).
# We test ("package:utils" %in% search()), rather than
# requireNamespace("utils", quietly = TRUE), because, if utils is attached,
# we then need to look inside it to see whether or not the get & set functions
# exist. This doesn't work without attachment (having the namespace available
# does not suffice). We could promote our utils reliance from suggests to
# depends, in the package description file, but would rather not have this
# strict requirement (this indicator feature is nice to have, but not
# absolutely necessary). Utils is normally attached on start-up, anyhow.
do.title <- (visible
&& (marker %in% c("all", "wintitle"))
&& ("package:utils" %in% search())
&& exists("getWindowTitle", where = "package:utils",
mode = "function", inherits = FALSE)
&& exists("setWindowTitle", where = "package:utils",
mode = "function", inherits = FALSE))
# When the action is 'show', apply (append and/or prepend) the indicator(s).
if (action == "show")
{
# Append window title-bar open-channel indicator.
if (do.title)
{
indic <- SqrlParam(datasource, "wintitle")
if (grepl("[[:graph:]]", indic))
{
utils::setWindowTitle(title = sub("\\s+$", "",
paste(sub("\\s+$", "", utils::getWindowTitle()), indic)))
}
}
# Prepend command-prompt open-channel indicator.
if (do.prompt)
{
indic <- SqrlParam(datasource, "prompt")
options(prompt = paste0(indic, getOption("prompt")))
}
# Return invisible NULL.
return(invisible(NULL))
}
# When the action is 'hide', remove the indicator(s). This will work (as in,
# does nothing, quietly) if the indicators aren't actually on to begin with.
# Where this can go wrong, is when the open indicators are defined as, say,
# 'A', and 'AB'. Removal of 'A" from 'ABA' might leave 'BA'. So don't do that.
if (action == "hide")
{
# Remove one open-channel indicator from the window title.
if (do.title)
{
indic <- SqrlParam(datasource, "wintitle")
if (grepl("[[:graph:]]", indic))
{
windowtitle <- utils::getWindowTitle()
if (grepl(indic, windowtitle, fixed = TRUE))
{
position <- max(gregexpr(indic, windowtitle, fixed = TRUE)[[1L]])
before <- sub("\\s+$", "", substring(windowtitle, 1L, position - 1L))
after <- substring(windowtitle, position + nchar(indic))
utils::setWindowTitle(title = sub("\\s+$", "", paste0(before, after)))
}
}
}
# Remove one open-channel indicator from the R prompt.
if (do.prompt)
{
indic <- SqrlParam(datasource, "prompt")
if (nchar(indic) > 0L)
{
options(prompt = sub(indic, "", getOption("prompt"), fixed = TRUE))
}
}
# Return invisible NULL.
return(invisible(NULL))
}
# When the action is 'query', 'fetch', or 'ping', append a job-in-progress
# marker ('*', '+', or '?', respectively) to the data source's window title
# indicator, then return invisible NULL. This will work (as in, does nothing,
# quietly) if the indicator isn't actually on.
if (action %in% c("query", "fetch", "ping"))
{
glyph <- switch(action, "query" = "*", "fetch" = "+", "ping" = "?")
if (do.title)
{
indic <- SqrlParam(datasource, "wintitle")
glyphed <- paste0(indic, glyph)
if (grepl("[[:graph:]]", indic))
{
windowtitle <- utils::getWindowTitle()
for (unglyphed in paste0(indic, c(" ", "*", "+", "?", "")))
{
if (grepl(unglyphed, windowtitle, fixed = TRUE))
{
utils::setWindowTitle(title = sub("\\s+$", "",
sub(unglyphed, glyphed, windowtitle, fixed = TRUE)))
break
}
}
}
}
return(invisible(NULL))
}
# When the action is 'done', remove a job-in-progress marker ('*', '+', '?')
# from the data source's window title indicator, then return invisible NULL.
# This will work (does nothing, quietly) if no marker is actually present.
if (action == "done")
{
if (do.title)
{
indic <- SqrlParam(datasource, "wintitle")
unglyphed <- paste0(indic, " ")
if (grepl("[[:graph:]]", indic))
{
windowtitle <- utils::getWindowTitle()
for (glyphed in paste0(indic, c("*", "+", "?")))
{
if (grepl(glyphed, windowtitle, fixed = TRUE))
{
utils::setWindowTitle(title = sub("\\s+$", "",
sub(glyphed, unglyphed, windowtitle, fixed = TRUE)))
break
}
}
}
}
return(invisible(NULL))
}
# This should be unreachable, but if we were to arrive here, return NULL.
return(invisible(NULL))
}
SqrlInterface <- function(datasource = "",
interface = "",
vital = TRUE)
{
# Constructs a user-interface to a specified data source.
# Args:
# datasource : The name of a known data source.
# interface : The name to use for that data source's interface.
# vital : When set to FALSE, name conflicts are non-fatal.
# Returns:
# A function (named <interface>) for interacting with the data source.
# Any pre-existing interface to that data source will be deleted.
# If interface is not specified, the interface name defaults to the data
# source name (sans whitespace). When interface == "remove", no new
# interface is created, but any existing interface will be deleted. (There
# is no loss of generality, since "remove" is prohibited as an interface
# name due to its conflicting with the base::remove() function.)
# SQRL Calls:
# SqrlFace(), SqrlInterface() (self), SqrlParam().
# SQRL Callers:
# SqrlCache(), SqrlConfig(), SqrlDelegate(), SqrlDSNs(), SqrlInterface()
# (self), SqrlOff(), SqrlParam(), SqrlSource(), sqrlInterface().
# User:
# Has no direct access, but is able to indirectly supply the datasource
# argument via sqrlInterface(), and through SqrlSources() by editing the
# registered data source names (DSNs) prior to loading SQRL. The user can
# indirectly supply the interface argument via sqrlInterface(),
# SqrlDelegate(), and through SqrlSources() by editing the DSNs prior to
# loading SQRL. The user cannot indirectly supply the vital argument. In
# all cases, existence of the datasource is established before calling this
# function. The interface value could be anything, and is checked here.
# This is the user-interface function-body definition for the data source.
uibody <- paste0("function(...) {SqrlShell(\"", datasource,
"\", base::parent.frame(), base::list(...))}")
# Abort on invalid interface (name). Allowed values are NULL or a character
# string. The requested name may, or may not, be available and assignable.
if (!is.null(interface)
&& (!identical(class(interface), class(character()))
|| (length(interface) != 1L)
|| !nzchar(trimws(interface))))
{
if (!vital)
{
return(invisible(NULL))
}
stop("Invalid interface name.")
}
# Remove any name and leading or trailing whitespace from the interface
# argument. Applying trimws() to NULL would produce character(0). The
# as.character() function removes any name attribute the string may have.
if (!is.null(interface))
{
interface <- trimws(as.character(interface))
}
# Isolate the previous interface (NULL when no interface was defined).
preface <- SqrlParam(datasource, "interface")
# On a request to delete the data source's interface, if we can confirm the
# interface object retains its original SQRL definition, then we delete that
# object. Either way, the interface is deregistered in the data source's
# cache, and an invisible NULL is returned.
if (is.null(interface)
|| identical(interface, "remove"))
{
if (!is.null(preface))
{
if (SqrlFace(preface, exists = TRUE))
{
fun <- paste(deparse(SqrlFace(preface)), collapse = "")
if (gsub("[[:space:]]+", "", fun) == gsub("[[:space:]]+", "", uibody))
{
SqrlFace(preface, delete = TRUE)
}
}
SqrlParam(datasource, "interface", NULL)
}
return(invisible(NULL))
}
# Check that the preface actually is a SQRL interface, and set NULL otherwise.
# To be an interface, it must be registered within the source parameter cache,
# exist as a function, and have the precise uibody definition (above).
if (!is.null(preface)
&& (SqrlFace(preface, exists = FALSE)
|| (gsub("[[:space:]]+", "",
paste(deparse(SqrlFace(preface)), collapse = ""))
!= gsub("[[:space:]]+", "", uibody))))
{
preface <- NULL
SqrlParam(datasource, "interface", NULL)
}
# If no interface was specified, use the data source name (sans whitespace).
if (nchar(interface) < 1L)
{
interface <- gsub("[[:space:]]+", "", datasource)
}
# If the interface already exists (under the same name), return it (silently).
# The above chack on preface guarantees existence within envir when not NULL.
if (!is.null(preface)
&& (preface == interface))
{
return(invisible(interface))
}
# Ensure the interface name is assignable. Non-assignability is usually fatal,
# but when vital == FALSE the function exists normally (SqrlSources() uses
# this when auto-generating functions, since 'A<<B' is a valid DSN name).
if (interface != make.names(interface))
{
if (!vital)
{
return(invisible(NULL))
}
stop("Unassignable interface name.")
}
# Abort if some other object already exists under the chosen name.
# Usually, these conflicts are fatal, but when vital == FALSE the function
# exits normally (SqrlSources() uses this when auto-generating interfaces).
if (SqrlFace(interface, clashes = TRUE))
{
if (!vital)
{
return(invisible(NULL))
}
stop("Interface name conflict.")
}
# If the data source already has an interface (under some other name), then
# delete that existing interface (before continuing).
if (!is.null(preface)
&& (preface != interface))
{
SqrlInterface(datasource, "remove")
}
# Assign the interface function to the chosen name. Note that changing the
# interface (name) does not change the wintitle or prompt strings. Those are
# both based upon the (invariant) data source name.
SqrlFace(interface, uibody)
# Register that assignment within the data source's cache. Again, this does
# not alter the (data source name based) wintitle or prompt strings.
SqrlParam(datasource, "interface", interface)
# Return the name of the new user-interface function (invisibly).
return(invisible(interface))
}
SqrlIsOpen <- function(datasource = "",
besure = FALSE)
{
# Tests whether or not an open ODBC channel exists to the data source.
# Args:
# datasource : The name of a data source.
# besure : Check thoroughly (ping the source) when this is set to TRUE.
# Returns:
# TRUE if the data source exists, and SQRL has an open channel to it.
# FALSE, otherwise.
# SQRL Calls:
# SqrlClose(), SqrlParam(), SqrlPing(), SqrlTry().
# RODBC Calls:
# odbcGetInfo().
# SQRL Callers:
# SqrlDelegate(), SqrlOpen(), SqrlParam(), SqrlSource(), SqrlSources(),
# SqrlSubmit().
# User:
# Has no direct access, and is unable to indirectly supply either argument.
# Argument validity checks are not required.
# Attempt to obtain the channel parameter value for the specified data source.
channel <- SqrlTry(SqrlParam(datasource, "channel"), warn = FALSE)
# Return FALSE when the datasource is invalid (does not exist => is not open).
if (channel$error)
{
return(FALSE)
}
# Isolate the value of the channel parameter (strip the SqrlTty() error flag).
channel <- channel$value
# Return FALSE when the channel is closed (and we knew that).
if (is.null(channel))
{
return(FALSE)
}
# Return FALSE when the channel is not an RODBC handle (in which case we may
# have mistakenly thought the channel was open, since it was non-null valued).
if (!identical(class(channel), "RODBC"))
{
SqrlClose(datasource)
return(FALSE)
}
# Attempt to obtain channel information. This will fail if the channel has
# been closed from our end, or is not of RODBC class (repeating, in effect,
# the test above), but will succeed if the channel has been closed from the
# other end (and we were previously unaware of that).
info <- SqrlTry(RODBC::odbcGetInfo(channel), warn = FALSE)
# Return FALSE when the channel is closed (but we thought it was open).
if (info$error)
{
SqrlClose(datasource)
return(FALSE)
}
# When besure is FALSE (the default), we only check openness from our end.
# That being the case, return TRUE if we get to this point (the channel
# appears to be open from our end) and we're not going to be more thorough.
if (!besure)
{
return(TRUE)
}
# Otherwise (when we want to be thorough), ping the source to make sure.
# If the ping succeeds, the connection must be open.
if (SqrlPing(datasource))
{
return(TRUE)
}
# The ping failed; the connection is not open after all (has been dropped at
# the source's end). Formally close it at this end, before returning the
# openness status (FALSE).
SqrlClose(datasource)
return(FALSE)
}
SqrlOff <- function()
{
# Close SQRL channels, deactivate SQRL.
# Args:
# None.
# Returns:
# Invisible NULL, after closing channels and detaching SQRL.
# SQRL Calls:
# SqrlCache(), SqrlClose(), SqrlInterface(), SqrlTry().
# RODBC Calls:
# odbcCloseAll().
# SQRL Callers:
# sqrlOff().
# User:
# User has no direct access, and there are no arguments.
# SQRL data sources correspond to child environments of srqlHaus. For each
# of these, close any open channel, remove any interface, and delete any data
# within the source cache (may contain passwords and so on). The garbage
# collector ought to take care of the cached data after we detach srqlHaus,
# but it's best to be immediate and sure. Operations are wrapped in silent
# try(), because we don't want one hiccup to block any other activity.
for (datasource in SqrlCache("*"))
{
SqrlTry(SqrlClose(datasource), warn = FALSE)
SqrlTry(SqrlInterface(datasource, "remove"), warn = FALSE)
cache <- SqrlCache(datasource)
SqrlTry(remove(list = objects(pos = cache, all.names = TRUE), pos = cache),
warn = FALSE)
}
# Detach the public SQRL:Face (interfaces) environment. The garbage collector
# should handle the rest. Again, wrapped in try() so that a failure here won't
# have knock-on effects.
SqrlTry(detach("SQRL:Face"), warn = FALSE)
# Detach and unload the SQRL package. The .onUnload() function attempts to
# detach SQRL:Face once again (but this doesn't matter).
SqrlTry(detach("package:SQRL", unload = TRUE), warn = FALSE)
# Return invisible NULL.
return(invisible(NULL))
}
SqrlOpen <- function(datasource = "")
{
# Opens a channel to a data source.
# Args:
# datasource : The name of a data source.
# Returns:
# Invisible NULL, after creating and caching the data source channel.
# Will throw a fatal exception should the connection attempt fail.
# SQRL Calls:
# SqrlIsOpen(), SqrlParam(), SqrlParams(), SqrlPing(), SqrlTry().
# RODBC Calls:
# odbcConnect(), odbcDriverConnect()
# SQRL Callers:
# SqrlDelegate(), SqrlSubmit().
# User:
# Has no direct access. Is unable to supply the only argument.
# Argument validity checks are not required.
# If an open channel already exists, do not attempt to open another.
if (SqrlIsOpen(datasource, besure = TRUE))
{
return(invisible(NULL))
}
# RODBC will prompt the user (via dialog box) for missing information (uid,
# pwd, etc.) only in Rgui. In Rterm, RStudio, etc., pwd must be contained in
# the DSN or connection string, or the pwd parameter must be set, prior to
# attempting to connect. Otherwise, a (connection failure) error will result.
# If a connection string has been defined for this source, connect using that.
connection <- as.character(SqrlParam(datasource, "connection"))
if (nchar(connection) > 0L)
{
for (param in SqrlParams("substitutable"))
{
connection <- gsub(paste0("<", param, ">"), SqrlParam(datasource, param),
connection, fixed = TRUE)
}
channel <- SqrlTry(
RODBC::odbcDriverConnect(
connection = connection,
case = SqrlParam(datasource, "case"),
believeNRows = SqrlParam(datasource, "believeNRows"),
colQuote = SqrlParam(datasource, "colQuote"),
tabQuote = SqrlParam(datasource, "tabQuote"),
interpretDot = SqrlParam(datasource, "interpretDot"),
DBMSencoding = SqrlParam(datasource, "DBMSencoding"),
rows_at_time = SqrlParam(datasource, "rows_at_time"),
readOnlyOptimize = SqrlParam(datasource, "readOnlyOptimize")))
# Otherwise (no string), connect using the registered data source name (DSN).
} else
{
# If a user-ID and/or password has been defined, use the defined value (this
# overrides any corresponding value that may be defined within the DSN).
# Otherwise, use '' (rather than the default values), which causes RODBC to
# go with any values in the DSN (and to ask for missing values in Rgui).
# We can't just send the default values, because these will override any
# corresponding values on the DSN (which is unlikely to be the preferred
# behaviour). We do still want a non-empty default user-id, since this is
# useful when connecting via a string incorporating the <uid> placeholder.
uid <- ""
pwd <- ""
if (SqrlParam(datasource, "uid", isdefined = TRUE))
{
uid <- SqrlParam(datasource, "uid")
}
if (SqrlParam(datasource, "pwd", isdefined = TRUE))
{
pwd <- SqrlParam(datasource, "pwd")
}
channel <- SqrlTry(
RODBC::odbcConnect(
dsn = SqrlParam(datasource, "dsn"),
uid = uid,
pwd = pwd,
case = SqrlParam(datasource, "case"),
believeNRows = SqrlParam(datasource, "believeNRows"),
colQuote = SqrlParam(datasource, "colQuote"),
tabQuote = SqrlParam(datasource, "tabQuote"),
interpretDot = SqrlParam(datasource, "interpretDot"),
DBMSencoding = SqrlParam(datasource, "DBMSencoding"),
rows_at_time = SqrlParam(datasource, "rows_at_time"),
readOnlyOptimize = SqrlParam(datasource, "readOnlyOptimize")))
}
# Halt and notify on failure to connect. Might just be an incorrect password,
# but could also be a network or server outage, etc. Fatal error, regardless.
# When RODBC::odbcConnect or RODBC::odbcDriverConnect encounter a failure to
# connect, they do not stop with an error message, but instead return -1 and
# throw warning messages with the details.
if (channel$error
|| !identical(class(channel$value), "RODBC"))
{
stop("Connection attempt failed.")
}
# Looks like a valid connection channel was established. Record handle.
channel <- SqrlParam(datasource, "channel", channel$value)
# Double-check. If the connection attempt was unsuccessful, halt and notify.
if (!SqrlIsOpen(datasource))
{
stop("Connection attempt failed.")
}
# Scrape uid, dsn, and driver from the channel's connection attribute (in case
# the user should have entered something new). Mis-scraping will not kill the
# open channel, but it will produce an incorrect view in SqrlConfig(), and
# will prevent network drop-out recovery in SqrlSubmit(). We blank the uid
# parameter first, because if it does not appear in the channel's connection
# string, then it could be anything (when contained within a DSN, perhaps).
SqrlParam(datasource, "uid", "", override = TRUE)
cstring <- attr(channel, "connection.string")
cstrings <- unlist(strsplit(cstring, ';'))
for (param in SqrlParams("scrapeable-channel"))
{
pattern <- paste0("^", param, "=")
matches <- grepl(pattern, cstrings, ignore.case = TRUE)
if (any(matches))
{
index <- which(matches)[1L]
value <- trimws(sub(pattern, "", cstrings[index], ignore.case = TRUE))
SqrlParam(datasource, param, trimws(gsub("^\\{|\\}$", "", value)),
override = TRUE)
}
}
# If no ping has been defined for this data source, attempt to find (set) one.
if (is.null(SqrlParam(datasource, "ping")))
{
SqrlPing(datasource, set = TRUE)
}
# Return invisible NULL.
return(invisible(NULL))
}
SqrlParam <- function(datasource = "",
parameter = "",
set,
override = FALSE,
isdefined = NULL)
{
# Gets and sets named SQRL/RODBC control parameters for a data source.
# Args:
# datasource : The name of a known (to SQRL) data source.
# parameter : The name of a SQRL or RODBC control parameter.
# set : The value to assign to that parameter (optional).
# override : If set to TRUE, open status does not block value changes.
# isdefined : If set to TRUE, return whether or not a value is defined.
# Returns:
# The value of the named parameter for the named data source. If the set
# argument is specified, then the new value is returned (invisibly) after
# its assignment to the parameter (new passwords are not returned).
# SQRL Calls:
# SqrlCache(), SqrlDefault(), SqrlIndicator(), SqrlInterface(),
# SqrlIsOpen(), SqrlParam() (self), SqrlParams().
# RODBC Calls:
# odbcDataSources().
# SQRL Callers:
# SqrlCache(), SqrlClose(), SqrlConfig(), SqrlDefault(), SqrlDelegate(),
# SqrlDSNs(), SqrlFile(), SqrlIndicator(), SqrlInterface(), SqrlIsOpen(),
# SqrlOpen(), SqrlParam() (self), SqrlPing(), SqrlProc(), SqrlShell(),
# SqrlStatement(), SqrlSource(), SqrlSubmit(), SqrlSubScript(),
# SqrlStatement(), SqrlValue(), sqrlInterface().
# User:
# Has no direct access, but is able to supply (only) parameter and set via
# SqrlDelegate() and/or SqrlConfig(), by way of SqrlValue(). SqrlDelegate()
# vets parameter while the SqrlConfig() does not (although it will restrict
# parameter to being a string, and is write-only). Neither vets set, and
# that must be performed here. (SqrlValue() merely passes-through.)
# Obtain a handle to the data source's SQRL cache.
cacheenvir <- SqrlCache(datasource)
# When the defined flag is either TRUE or FALSE, return only whether or not a
# (default-overriding) value has been set (exists) for the parameter.
if (!is.null(isdefined))
{
return(exists(parameter, cacheenvir, inherits = FALSE) == isdefined)
}
# When the parameter is 'reset', the set argument should be a vector of
# parameter names for which the default values are to be restored.
if (identical(parameter, "reset"))
{
# Coerce parameters to a character vector (sort() doesn't handle lists).
set <- as.character(unlist(set))
# Retain only those (unique) parameter names that are in the official list.
params <- sort(unique(set[set %in% SqrlParams("all")]))
# If we are left with no parameters to reset, return invisible NULL.
if (length(params) < 1L)
{
return(invisible(NULL))
}
# Construct a named list of the default values for those parameters.
news <- vector(mode(list()), length(params))
names(news) <- params
for (param in params)
{
news[param] <- list(SqrlDefault(datasource, param))
}
# Retain only those parameters for which a value has been set. Any others
# must necessarily already be at their defaults (resetting does nothing).
params <- params[params %in% SqrlParam(datasource, "*")]
# When all parameters are at their defaults, invisibly return them.
if (length(params) < 1L)
{
return(invisible(news))
}
# Abort if any of the supplied parameters are write-protected ('name')
# or read-only ('channel').
if (any(params %in% SqrlParams("write-protected"))
|| any(params %in% SqrlParams("read-only")))
{
stop("Cannot reset protected parameter.")
}
# If the connection is open, we cannot reset any locked-while-open
# parameters (abort if such a request has been made), and we must also
# change any visible indicators (if those parameters are to be reset).
# This is a bit of a kludge; here we set any visible indicators to values
# that are identical to their defaults, then (later, below) we remove those
# set values, leaving the actual defaults in place.
if (SqrlIsOpen(datasource))
{
if (!override
&& any(params %in% SqrlParams('locked-while-open')))
{
stop("Cannot reset parameter while connection is open.")
}
if ("visible" %in% params)
{
SqrlParam(datasource, "visible",
SqrlDefault(datasource, "visible"), override)
}
if (SqrlParam(datasource, "visible"))
{
if ("prompt" %in% params)
{
SqrlParam(datasource, "prompt",
SqrlDefault(datasource, "prompt"), override)
}
if ("wintitle" %in% params)
{
SqrlParam(datasource, "wintitle",
SqrlDefault(datasource, "wintitle"), override)
}
}
}
# Interface removal is a special case, handled by SqrlInterface().
# Failure to re-apply the original (default) interface is non-fatal.
if ("interface" %in% params)
{
SqrlInterface(datasource, "remove")
SqrlInterface(datasource, datasource, vital = FALSE)
news["interface"] <- list(SqrlParam(datasource, "interface"))
params <- params[params != "interface"]
if (length(params) < 1L)
{
return(invisible(news))
}
}
# Remove the parameter-value definitions (restores default values).
remove(list = params, pos = cacheenvir)
# Invisibly return the new parameter-values (i.e., their defaults). Default
# values are never secret or semi-secret, so these can go back to the user.
return(invisible(news))
}
# When no value is supplied for the set argument, act as a getter.
if (missing(set))
{
# Obtain the temporary parameter-values (environments) stack (a list).
pstack <- if (exists("pstack", cacheenvir, inherits = FALSE))
{
get("pstack", cacheenvir, inherits = FALSE)
} else
{
SqrlDefault(datasource, "pstack")
}
# If the stack itself was sought, return it.
if (parameter == "pstack")
{
return(pstack)
}
# Otherwise, extract the last environment from the temporary-values stack.
# This inherits from the previous environment (and so on, to the first).
pstack <- pstack[[length(pstack)]]
# When there is no set value (temporary or cached) for the parameter,
# return its default. Default values are never secret or semi-secret.
if (!exists(parameter, pstack, inherits = TRUE))
{
return(SqrlDefault(datasource, parameter))
}
# Take care with regard to whom we supply secret parameter values.
if (parameter %in% SqrlParams("secret"))
{
# If we don't see an internal call of this function (i.e., it appears to
# have been called from outside of the namespace), return the default.
calls <- gsub("\\(.*", "", .traceback(0L))
i <- which(calls == "SqrlParam")
if (length(i) < 1L)
{
return(SqrlDefault(datasource, parameter))
}
# Likewise, if we don't see who called this function, return the default.
i <- max(i) + 1L
if (i > length(calls))
{
return(SqrlDefault(datasource, parameter))
}
# If the caller is aware, return either the default value (if such has
# been set) or a dummy value (when some non-default value has been set).
if (calls[i] %in% SqrlParams("aware"))
{
value <- get(parameter, pstack, inherits = TRUE)
if (identical(value, SqrlDefault(datasource, parameter)))
{
return(value)
}
return("*")
}
# If the caller is neither aware nor informed, return the default value.
if (!(calls[i] %in% SqrlParams("informed")))
{
return(SqrlDefault(datasource, parameter))
}
# Take care with regard to whom we supply semi-secret parameter values.
} else if (parameter %in% SqrlParams("semi-secret"))
{
# If we don't see an internal call of this function (i.e., it appears to
# have been called from outside of the namespace), return the default.
calls <- gsub("\\(.*", "", .traceback(0L))
i <- which(calls == "SqrlParam")
if (length(i) < 1L)
{
return(SqrlDefault(datasource, parameter))
}
# Likewise, if we don't see who called this function, return the default.
i <- max(i) + 1L
if (i > length(calls))
{
return(SqrlDefault(datasource, parameter))
}
# If the caller is neither aware nor informed, return the default value.
if (!(calls[i] %in% c(SqrlParams("aware"), SqrlParams("informed"))))
{
return(SqrlDefault(datasource, parameter))
}
}
# The parameter is not secret, or the caller is allowed to know its value.
# Return the current (temporary or cached) value.
return(get(parameter, pstack, inherits = TRUE))
}
# The set argument has been supplied; act as a setter (cache and return).
# First, we coerce the raw value to the expected type for the parameter.
# Normal action is to set the permanent value of the named parameter.
istemp <- FALSE
targetenvir <- cacheenvir
# However, if the parameter is 'pstack' and the set value is named, then this
# is reinterpreted as a request to assign a temporary value (value-of-set) for
# the named parameter (name-of-set) into the most recent environment of the
# temporary values stack (pstack).
if ((parameter == "pstack")
&& !is.null(names(set)))
{
parameter <- names(set)
set <- set[[1L]]
istemp <- TRUE
pstack <- SqrlParam(datasource, "pstack")
targetenvir <- pstack[[length(pstack)]]
if ((length(parameter) != 1L)
|| !(parameter %in% SqrlParams("all")))
{
stop("Unrecognised parameter for temporary assignment.")
}
if (parameter %in% SqrlParams("no-temp-allowed"))
{
stop("Parameter does not support temporary values.")
}
}
# In the special case where the parameter is (still) 'pstack', we have a
# request to expand or contract the temporary-values environments stack.
if (parameter == "pstack")
{
pstack <- SqrlParam(datasource, parameter)
ptop <- length(pstack)
if (set == "expand")
{
pstack <- append(pstack, new.env(parent = pstack[[ptop]]))
} else if (length(pstack) >= 2L)
{
tpars <- objects(pstack[[ptop]])
verbose <- interactive() && SqrlParam(datasource, "verbose")
remove(list = tpars, pos = pstack[[ptop]])
pstack <- pstack[-ptop]
if ((length(tpars) > 0L)
&& verbose)
{
cat("\n")
for (tpar in tpars)
{
rval <- deparse(SqrlParam(datasource, tpar))
cat("Reverting:", tpar, "=", rval, "\n")
}
cat("\n")
}
}
assign(parameter, pstack, cacheenvir)
return(invisible(set))
}
# In the special case where the connection parameter has been specified as a
# character vector of named and/or unnamed elements, we collapse that vector
# to a single (connection) string. Where present, the vector element names
# become the connection-parameter names within the string.
if ((parameter == "connection")
&& identical(class(set), class(character()))
&& (length(set) > 0L)
&& !any(is.na(set)))
{
if (is.null(names(set))
|| !any(nzchar(names(set))))
{
set <- paste0(set, collapse = ";")
} else
{
set <- paste0(names(set), c("", "=")[nzchar(names(set)) + 1L], set,
collapse = ";")
}
}
# Nullable-string parameters are string-types which accept a set value of NULL
# as an alias for the empty string.
if ((parameter %in% SqrlParams("nullable-string"))
&& is.null(set))
{
set <- ""
}
# Coerce set to the appropriate data type for the specified parameter.
# Firstly, the channel parameter can be either NULL, or of RODBC class.
# This can be set frequently, when the autoclose parameter value is TRUE.
if (parameter %in% SqrlParams("rodbc/null-type"))
{
if (!is.null(set)
&& !identical(class(set), "RODBC"))
{
stop("New parameter value is not a connection handle.")
}
# Parameters that are (non-NA) character-strings. (These include all the
# scrapeable-channel parameters that may be set with each new channel.)
} else if (parameter %in% SqrlParams("string-type"))
{
set <- suppressWarnings(as.character(set))
if ((length(set) != 1L)
|| is.na(set))
{
stop("New parameter value is not a character string.")
}
# Parameters that are logically-valued.
} else if (parameter %in% SqrlParams("boolean-type"))
{
set <- suppressWarnings(as.logical(set))
if (!identical(set, TRUE)
&& !identical(set, FALSE))
{
stop("New parameter value not a logical singleton.")
}
# Parameters that are integer-valued.
} else if (parameter %in% SqrlParams("integer-type"))
{
set <- suppressWarnings(as.integer(set))
if ((length(set) != 1L)
|| is.na(set))
{
stop("New parameter value is not an integer.")
}
# The interface parameter can be character-valued or null-valued.
# Changing the parameter does not change the interface.
} else if (parameter %in% SqrlParams("string/null-type"))
{
if (!is.null(set))
{
set <- suppressWarnings(as.character(set))
if ((length(set) != 1L)
|| is.na(set))
{
stop("New parameter value is not a character string.")
}
}
# The na.strings parameter is a character vector of any length, including 0.
} else if (parameter %in% SqrlParams("character-type"))
{
set <- suppressWarnings(as.character(set))
# The as.is parameter can be a logical, numerical, or character vector.
} else if (parameter %in% SqrlParams("index-type"))
{
# This can be a logical (not NA), a natural number (integer or numeric
# form), a character string (valid name form), or a vector of the same.
# The integer and numeric classes are both of numeric mode.
if (!(is.logical(set)
|| is.numeric(set)
|| is.character(set))
|| any(is.na(set)))
{
stop("Parameter must be of logical, numeric, or character type.")
}
# The colQuote and tabQuote parameters can be either NULL, or character
# vectors of length 0, 1, or 2.
} else if (parameter %in% SqrlParams("quote-type"))
{
if (!is.null(set))
{
set <- suppressWarnings(as.character(set))
if ((length(set) > 2L)
|| any(is.na(set)))
{
stop("New parameter value is not a quotation specifier.")
}
}
# The nullstring parameter is a character string, possibly NA_character_.
} else if (parameter %in% SqrlParams("string/na-type"))
{
set <- suppressWarnings(as.character(set))
if (length(set) != 1L)
{
stop("New parameter value is not a character string.")
}
# Values of the result and library parameters cannot be directly set by the
# user, but NULL is taken to mean remove the current value, which is allowed.
} else if (parameter %in% SqrlParams("nullable-internal"))
{
if (!(override
|| is.null(set)))
{
stop("New parameter value is not NULL.")
}
# Prevent the user from assigning to any name that is not on SqrlParams()'s
# 'all' list. Internal functions may do so, provided the override flag is set.
} else if (!override)
{
stop("Unrecognised parameter.")
}
# We have an acceptable value of set; so now act as a setter (below).
# No further modification of the value occurs, other than whitespace trimming
# for the prompt and wintitle parameters.
# Prevent overwriting (changing) the channel while it is open, with the
# exception that a channel can be nullified (dropped) at any time.
if ((parameter == "channel")
&& exists(parameter, cacheenvir, inherits = FALSE)
&& !is.null(set)
&& SqrlIsOpen(datasource))
{
if (identical(set, SqrlParam(datasource, "channel")))
{
return(invisible(set))
}
stop("Channel cannot be changed while open.")
}
# Prevent changing write-protected parameter values.
if ((parameter %in% SqrlParams("write-protected"))
&& exists(parameter, cacheenvir, inherits = FALSE))
{
if (identical(set, SqrlParam(datasource, parameter)))
{
return(invisible(set))
}
stop("Parameter is write-protected.")
}
# Prevent changing RODBC::odbcConnect() parameters while connection is open.
# (Because those changes would only take effect on opening a new channel.)
# The _default_ values of these 'locked-while-open' parameters cannot be
# changed while the connection is open. Hence, it is permissible to replace
# a currently default value with an identical static value at any time.
# The override condition allows SqrlOpen() to alter some of these (to values
# the user may have entered) when the connection channel is first opened.
if (!override
&& (parameter %in% SqrlParams("locked-while-open"))
&& SqrlIsOpen(datasource))
{
# This shouldn't ever happen, but just in case.
if (istemp)
{
stop(paste0("Cannot set a temporary value for the '", parameter,
"' parameter."))
}
# Throw an error on an attempt to change the parameter value. Must also
# throw an error when attempting to set a secret or semi-secret parameter to
# the value it already has, or else the value could be discovered by trial
# and error.
if ((parameter %in% c(SqrlParams("secret"), SqrlParams("semi-secret")))
|| !identical(set, SqrlParam(datasource, parameter)))
{
stop("Parameter is locked while a connection is open.")
}
# Otherwise, if the current value is a default (when no static value is
# defined), set the (identical) new value as an equivalent static
# replacement (for the default).
if (!exists(parameter, cacheenvir, inherits = FALSE))
{
assign(parameter, set, targetenvir)
}
# Return the (unchanged) value.
return(invisible(set))
}
# The channel parameter is a special case, because we want to toggle the
# indicator state along with a change of channel existence (null/not).
if (parameter == "channel")
{
# This shouldn't ever happen, but just in case.
if (istemp)
{
stop("Cannot set a temporary value for the 'channel' parameter.")
}
# Current value of the channel parameter. NULL is no channel (closed),
# anything else is we think the channel is open (it may or may not be).
current <- SqrlParam(datasource, "channel")
# No channel to channel; show indicators (conditional on settings, mode).
if (is.null(current)
&& !is.null(set))
{
SqrlIndicator(datasource, "show")
# Channel to no channel; hide indicators (conditional on settings, mode)
} else if (!is.null(current)
&& is.null(set))
{
SqrlIndicator(datasource, "hide")
}
# Set the new value. Return it invisibly.
assign(parameter, set, targetenvir)
return(invisible(set))
}
# The connection parameter is a special case, since we want to extract further
# parameter values from it, if we can. This may fail if any of the parameter
# values contain = or ;, but none of the test systems allow these characters
# in DSNs, passwords, etc. Does any system? See related 'scrape' comments
# within SqrlOpen().
if (parameter == "connection")
{
# This shouldn't ever happen, but just in case.
if (istemp)
{
stop("Cannot set a temporary value for the 'connection' parameter.")
}
# Unless the connection string contains a DSN placeholder ('<dsn>'),
# delete any dsn definition.
if (!grepl("<dsn>", set))
{
SqrlParam(datasource, "reset", "dsn", override)
}
# RODBC::odbcConnect() likes to know the driver (from which it determines
# whether or not it's dealing with MySQL). While we're doing that, we may as
# well attempt to extract some other parameter values, first. We make sure
# the driver parameter is done last, because setting a value for dsn sets
# driver as a side effect, and we may want to override that.
spars <- SqrlParams("scrapeable-string")
for (param in c(spars[spars != "driver"], spars[spars == "driver"]))
{
if (grepl(paste0(param, "\\s*="), set, ignore.case = TRUE))
{
assignee <- paste0("^.*", param, "\\s*=")
value <- sub(assignee, "", set, ignore.case = TRUE)
value <- trimws(sub(";.*$", "", value))
# 'user' and 'username' are connection string aliases for 'uid'.
if (param %in% SqrlParams("uid-aliases"))
{
param <- "uid"
# 'password' is a connection string alias for 'pwd'.
} else if (param %in% SqrlParams("pwd-aliases"))
{
param <- "pwd"
}
# SQRL accepts <uid> (etc.) as connection string template place holders
# (to be replaced with current values at connection time). We don't want
# to override default or previous values with these.
if (value != paste0("<", param, ">"))
{
SqrlParam(datasource, param, value, override)
}
}
}
# Set the (unaltered) connection string, return invisibly.
assign(parameter, set, targetenvir)
return(invisible(set))
}
# Setting the dsn parameter is a special case, because we simultaneously
# reset the connection parameter unless the connection string contains a
# '<dsn>' placeholder. If the DSN is defined on the local system, then we
# also set the driver parameter to the DSN's value, as obtained from
# RODBC::odbcDataSources().
if (parameter == "dsn")
{
if (istemp)
{
stop("Cannot set a temporary value for the 'dsn' parameter.")
}
if (!grepl("<dsn>", SqrlParam(datasource, "connection")))
{
SqrlParam(datasource, "reset", "connection", override)
}
assign(parameter, set, targetenvir)
sources <- RODBC::odbcDataSources("all")
if ((nzchar(set))
&& (set %in% names(sources)))
{
SqrlParam(datasource, "driver", sources[set], override)
}
return(invisible(set))
}
# The prompt and wintitle parameters are special cases, because, if the old
# prompt or wintitle is currently visible, it must be removed before changing
# the parameter value, and then the new value must be applied.
if (parameter %in% c("prompt", "wintitle"))
{
if (istemp)
{
stop(paste0("Cannot set a temporary value for the '", parameter,
"' parameter."))
}
set <- trimws(set)
if (set != SqrlParam(datasource, parameter))
{
isopen <- SqrlIsOpen(datasource)
if (isopen)
{
SqrlIndicator(datasource, "hide", parameter)
}
assign(parameter, set, targetenvir)
if (isopen)
{
SqrlIndicator(datasource, "show", parameter)
}
}
return(invisible(set))
}
# The visible parameter is a special case, because, if the channel is open,
# both prompt and window title changes (addition or removal) must be made.
if (parameter == "visible")
{
if (istemp)
{
stop("Cannot set a temporary value for the 'visible' parameter.")
}
if (set != SqrlParam(datasource, "visible"))
{
isopen <- SqrlIsOpen(datasource)
if (isopen
&& !set)
{
SqrlIndicator(datasource, "hide")
}
assign(parameter, set, targetenvir)
if (isopen
&& set)
{
SqrlIndicator(datasource, "show")
}
}
return(invisible(set))
}
# The libstack parameter is a special case, because the set value is appended
# to the top layer of the existing stack (rather than replacing the stack).
if (parameter == "libstack")
{
# This shouldn't ever happen (unless the libstack storage mechanism is
# changed to having a library within each layer of the pstack stack), but
# we check here anyway, just in case.
if (istemp)
{
stop("Cannot alter procedures via <with>.")
}
# A NULL value is interpreted as a request to remove the stack.
if (is.null(set))
{
if (exists(parameter, cacheenvir, inherits = FALSE))
{
remove(list = parameter, pos = cacheenvir)
}
return(invisible())
}
# It is not possible to directly assign the lib[n][name] element within the
# cache environment, so we have to pull the stack pointer back here, modify
# the local copy, and then point the cache environment at this new copy.
lib <- SqrlParam(datasource, parameter)
# Unnamed strings are used as special stack-control values.
if (is.null(names(set)))
{
# When set is 'expand', add a new layer to the top of the stack (list).
if (set == "expand")
{
lib[[length(lib) + 1L]] <- character()
# Otherwise, set will be 'contract'; remove the top layer of the stack.
} else
{
lib[[length(lib)]] <- NULL
}
# Named strings are procedure definitions, to be added to the topmost layer
# of the stack.
} else
{
lib <- SqrlParam(datasource, parameter)
lib[[length(lib)]][names(set)] <- as.character(set)
}
assign(parameter, lib, cacheenvir)
return(invisible())
}
# The library parameter is a special case, because the set value is appended
# to the existing library (rather than replacing it).
if (parameter == "library")
{
# This shouldn't ever happen, but just in case.
if (istemp)
{
stop("Cannot alter library via <with>.")
}
# A NULL value is interpreted as a request to reset (empty) the library.
if (is.null(set))
{
return(SqrlParam(datasource, "reset", parameter))
}
# Otherwise, the value can only have come from SqrlFile(), and will be a
# named string (procedure definition). Add that definition to the library.
# It is not possible to directly assign the lib[name] element within the
# cache environment.
lib <- SqrlParam(datasource, parameter)
lib[names(set)] <- as.character(set)
lib <- lib[order(names(lib))]
assign(parameter, lib, cacheenvir)
return(invisible(set))
}
# For all other cases, set and (invisibly) return the new parameter value.
assign(parameter, set, targetenvir)
return(invisible(set))
}
SqrlParams <- function(group = "")
{
# Returns any one of various useful parameter groupings.
# Args:
# group : The (string) name (description) of a parameter group.
# Returns:
# A character vector of the names of all parameters in the group.
# SQRL Calls:
# None.
# SQRL Callers:
# SqrlCache(), SqrlConfig(), SqrlDefile(), SqrlDelegate(), SqrlDSNs(),
# SqrlFile(), SqrlOpen(), SqrlParam(), SqrlSource(), SqrlSources(),
# SqrlValue(), sqrlAll().
# User:
# Has no direct access, and is unable to supply the argument. Validity
# checks are not required.
# Parameter-group definitions (find and return).
return(switch(group,
# All public (user-visible) parameter names, whether RODBC or SQRL.
"all" = c("aCollapse",
"as.is",
"autoclose",
"believeNRows",
"buffsize",
"case",
"channel",
"colQuote",
"connection",
"DBMSencoding",
"dec",
"driver",
"dsn",
"errors",
"interface",
"interpretDot",
"lCollapse",
"library",
"max",
"na.strings",
"name",
"nullstring",
"ping",
"prompt",
"pwd",
"readOnlyOptimize",
"result",
"retry",
"rows_at_time",
"scdo",
"stringsAsFactors",
"tabQuote",
"uid",
"verbose",
"visible",
"wintitle"),
# Functions (not parameters) allowed to know whether or not secrets exist.
"aware" = c("SqrlValue"),
# Parameters of Boolean-singleton type (TRUE/FALSE, not NA).
"boolean-type" = c("autoclose",
"believeNRows",
"errors",
"interpretDot",
"readOnlyOptimize",
"retry",
"scdo",
"stringsAsFactors",
"verbose",
"visible"),
# Parameters of character-vector type (any length, including zero).
"character-type" = c("na.strings"),
# Parameters not to copy when duplicating an existing SQRL data source.
"don't-copy" = c("channel",
"interface",
"libstack",
"name",
"prompt",
"result",
"wintitle"),
# Parameters of index type (logical, numerical, or character vectors).
"index-type" = c("as.is"),
# Functions (not parameters) allowed to know secrets.
"informed" = c("SqrlOpen",
"SqrlSource"),
# Parameters of integer-singleton type (not NA).
"integer-type" = c("buffsize",
"max",
"rows_at_time"),
# Parameters that cannot be changed while the connection channel is open.
"locked-while-open" = c("believeNRows",
"case",
"colQuote",
"connection",
"DBMSencoding",
"driver",
"dsn",
"interpretDot",
"readOnlyOptimize",
"rows_at_time",
"tabQuote",
"uid"),
# Parameters whose values are lists of named values.
"named-values" = c("library"),
# Parameters for which temporary working values cannot be assigned.
"no-temp-allowed" = c("autoclose",
"channel",
"interface",
"library",
"name",
"prompt",
"result",
"visible",
"wintitle"),
# Parameters the user can make NULL, but whose values are otherwise only
# settable by private SQRL functions.
"nullable-internal" = c("library",
"result"),
# String-type parameters that accept NULL as an aliaas for the empty string.
"nullable-string" = c("connection",
"DBMSencoding",
"driver",
"dsn",
"prompt",
"pwd",
"uid",
"wintitle"),
# Parameters that are omitted from the SqrlConfig() configuration list.
"omit-from-config" = c("result"),
# Parameters to omit from the 'settings' subset of the configuration list.
"omit-from-settings" = c("channel",
"connection",
"driver",
"dsn",
"library",
"name",
"ping",
"pwd",
"result",
"uid"),
# Parameters that can be file-path valued (excluded from SqrlDefile()).
"path-valued" = c("driver",
"dsn",
"library"),
# Aliases for 'pwd' (within the 'scrapeable-string' parameter set).
"pwd-aliases" = c("password"),
# Parameters of quote type can be NULL, or character-vectors of length <= 2.
"quote-type" = c("colQuote",
"tabQuote"),
# Parameters that cannot be set (written) by the user.
"read-only" = c("channel"),
# Parameters that are of RODBC type (can be NULL valued).
"rodbc/null-type" = c("channel"),
# Parameters that can have their values scraped from an open channel object.
"scrapeable-channel" = c("driver",
"dsn",
"uid"),
# Parameters that can have their values scraped from a connection string.
"scrapeable-string" = c("driver",
"dsn",
"password",
"pwd",
"uid",
"user",
"username"),
# Parameters whose actual values are never returned to the user.
"secret" = c("password",
"pwd"),
# Parameters whose values may contain a secret component.
"semi-secret" = c("connection"),
# Parameters appearing in the data source summary table, in table column
# order (not in alphabetical order).
"source-table" = c("name",
"interface",
"open",
"driver"),
# Keywords used for SQL script identification in SqrlDelegate().
"sql-keywords" = c("select",
"create",
"drop",
"update",
"insert"),
# Parameters that are of character-string (singleton) type (non-NA).
"string-type" = c("aCollapse",
"case",
"connection",
"DBMSencoding",
"dec",
"driver",
"dsn",
"lCollapse",
"name",
"prompt",
"pwd",
"uid",
"wintitle"),
# Parameters that are of character-string type, with NAs allowed.
"string/na-type" = c("nullstring"),
# Parameters that are of string type, or else can be NULL valued.
"string/null-type" = c("interface",
"ping"),
# Parameters that can take template-form within a connection string.
"substitutable" = c("driver",
"dsn",
"pwd",
"uid"),
# Aliases for 'uid' (within the 'scrapeable-string' parameter set).
"uid-aliases" = c("user",
"username"),
# Names to filter-out when obtaining DSNs.
"unwanted-sources" = c("Access",
"dBASE",
"Excel"),
# Parameters that are write-once (even by SQRL, not just the user).
"write-protected" = c("name"),
# This should never happen.
stop("Unknown parameter group.")))
}
SqrlPath <- function(path)
{
# Determines whether or not the argument is a path to an existing file.
# Args:
# path : A possible file path, perhaps given as a list of components.
# Returns:
# The normalised file path, when ... appears to specify an existing file,
# or NULL, when ... does not appear to specify an existing file.
# SQRL Calls:
# SqrlTry().
# SQRL Callers:
# SqrlConfig(), SqrlDefile(), SqrlDelegate(), SqrlHelp(), SqrlSource().
# User:
# Has no direct access, but is able to supply arguments(s) indirectly, via
# SqrlDelegate(). Unexpected input is silently caught.
# Paste all arguments together.
path <- SqrlTry(paste0(unlist(path), collapse = ""), warn = FALSE)
# If pasting failed, the arguments must not specify a file (return NULL).
if (path$error)
{
return(NULL)
}
# If the path isn't a single string, it cannot specify a file (return NULL).
path <- path$value
if ((length(path) != 1L)
|| (nchar(path) < 1L))
{
return(NULL)
}
# If path actually does point to a readable file, return the (normalised)
# path. Note that files '.' and '..' exist as directories, and that file '"'
# exists but is not read accessible (the '4' tests for read access).
if (file.exists(path)
&& (file.access(path, 4L) == 0L)
&& !(file.info(path)$isdir))
{
return(normalizePath(path))
}
# The arguments do not appear to specify a file path. Return NULL.
return(NULL)
}
SqrlPing <- function(datasource,
set = FALSE)
{
# Sets and submits 'ping' queries for testing source connection channels.
# Args:
# datasource : The name of a known data source.
# set : Whether to set a ping query, or to ping the source with one.
# Returns:
# In set mode, the resulting ping query, as a character string. In ping
# mode, TRUE if the source responded (is connected to), FALSE otherwise.
# SQRL Calls:
# SqrlIndicator(), SqrlParam(), SqrlPing() (self), SqrlTry().
# RODBC Calls:
# odbcQuery(), sqlQuery().
# SQRL Callers:
# SqrlIsOpen(), SqrlOpen(), SqrlPing() (self).
# User:
# Has no direct access, and is unable to supply the arguments. No argument
# validity checking is required. The user can define the ping query itself.
# Restriction:
# This function assumes the existence of an RODBC channel handle for the
# data source. That is, the value of the 'channel' parameter must be an
# RODBC channel handle, rather than NULL. In set mode, the channel should be
# open, in ping mode it need not be (the connection could have been dropped
# from the other end). This function should only be called immediately after
# the existence of such a handle has been established (as SqrlisOpen() and
# SqrlOpen() both do).
# In set mode, attempt to find a 'ping' query that works with the data source.
if (set)
{
# Here, we define some 'pings', being very simple SQL statements. These are
# used to ping the data source; confirming we're still connected when we get
# the expected result back (or telling us we've lost the connection when we
# don't). These, alas, are vendor dependent, so we have to guess, trial, and
# see what works. Some vendor-independent method would be vastly preferable.
# Ping for MySQL, PostgreSQL, SQL Server, SQLite, Teradata.
p1 <- "select 1"
# Ping for Oracle, MySQL, DB2.
p2 <- "select 1 from dual"
# Ping for Oracle, DB2.
p3 <- "begin null; end;"
# Arrange the pings into best-guess-first order, according to the driver.
pings <- c(p1, p2, p3)
driver <- tolower(SqrlParam(datasource, "driver"))
if (grepl("oracle", driver, fixed = TRUE)
|| grepl("db2", driver, fixed = TRUE))
{
pings <- pings[c(3L, 2L, 1L)]
}
# Try each ping, in (driver-dependent) order of decreasing preference, until
# we find a ping that works (is valid SQL for the data source). These could
# also fail if the connection has been unexpectedly closed.
for (ping in pings)
{
SqrlParam(datasource, "ping", ping)
if (SqrlPing(datasource))
{
return(SqrlParam(datasource, "ping"))
}
}
# Did not find a ping that works. Set and return the empty string. This
# causes the pinging system to submit a junk query and scan the response for
# error terms that suggest a lost connection (not completely reliable).
return(SqrlParam(datasource, "ping", ""))
}
# In normal operation (not set mode), submit this query and see what happens.
ping <- SqrlParam(datasource, "ping")
# When we have a ping query, submit it to the driver and look for an error.
if (!is.null(ping)
&& nzchar(ping))
{
# If RODBC::odbcQuery() is available, that's our preferred method. According
# to the RODBC manual, it returns 1L on success, and -1L on failure, but is
# 'likely to be confined to the 'RODBC' namespace in the near future'.
if ("odbcQuery" %in% getNamespaceExports("RODBC"))
{
# Append ping-in-progress marker to the window-title connection indicator.
SqrlIndicator(datasource, "ping")
# Submit the ping query, and retrieve the status code (which takes either
# of two values: 1L for success, or -1L for failure).
s <- SqrlTry(RODBC::odbcQuery(
channel = SqrlParam(datasource, "channel"),
query = ping,
rows_at_time = SqrlParam(datasource, "rows_at_time")),
warn = FALSE)
# Remove ping-in-progress marker from the window-title indicator.
SqrlIndicator(datasource, "done")
# An error suggests the connection is closed; return FALSE.
# No error implies the connection is open; return TRUE.
return(!(s$error || (s$value == -1L)))
}
# Otherwise, use RODBC::sqlQuery(), with errors = FALSE, and as.is = TRUE.
# The as.is setting makes the result indifferent to most of the other
# parameter values. With errors = FALSE, the function returns integer -1 on
# failure, and something else otherwise (a character vector or data frame).
SqrlIndicator(datasource, "ping")
s <- SqrlTry(RODBC::sqlQuery(
channel = SqrlParam(datasource, "channel"),
query = ping,
errors = FALSE,
as.is = TRUE,
max = SqrlParam(datasource, "max"),
buffsize = SqrlParam(datasource, "buffsize"),
nullstring = SqrlParam(datasource, "nullstring"),
na.strings = SqrlParam(datasource, "na.strings"),
believeNRows = SqrlParam(datasource, "believeNRows"),
dec = SqrlParam(datasource, "dec"),
stringsAsFactors = FALSE,
rows_at_time = SqrlParam(datasource, "rows_at_time")),
warn = FALSE)
SqrlIndicator(datasource, "done")
return(!(s$error || identical(s$value, -1L)))
}
# In the absence of a ping query, submit a junk statement in an attempt to
# cause the driver to generate an error message that indicates whether or not
# that query was received by the source. This will not be completely reliable.
SqrlIndicator(datasource, "ping")
s <- SqrlTry(RODBC::sqlQuery(
channel = SqrlParam(datasource, "channel"),
query = "junk",
errors = TRUE,
as.is = TRUE,
max = SqrlParam(datasource, "max"),
buffsize = SqrlParam(datasource, "buffsize"),
nullstring = SqrlParam(datasource, "nullstring"),
na.strings = SqrlParam(datasource, "na.strings"),
believeNRows = SqrlParam(datasource, "believeNRows"),
dec = SqrlParam(datasource, "dec"),
stringsAsFactors = FALSE,
rows_at_time = SqrlParam(datasource, "rows_at_time")),
warn = FALSE)
SqrlIndicator(datasource, "done")
# The error message should be a character vector. If we got something else,
# take the connection to be closed (it probably is, but might not be).
if (s$error
|| !identical(class(s$value), class(character()))
|| (length(s$value) == 0L))
{
return(FALSE)
}
# If the error message appears to indicate a socket error or closed
# connection, assume that's the case (although we might be mistaken).
for (word in c("sock", "libc", "connection", "reset", "open", "closed"))
{
if (any(grepl(word, s$value, fixed = TRUE)))
{
return(FALSE)
}
}
# Otherwise, the error message appears to arise from the junk query arriving
# at the source, in which case we take the connection to be open.
return(TRUE)
}
SqrlPL <- function(state = NULL,
phrase = "")
{
# Detects procedural (PL) script and tracks parser progress through the same.
# Args:
# state : A list of named procedural-language (PL) marker counts.
# phrase : A SQL fragment to scan for PL markers.
# Returns:
# An updated state list.
# SQRL Calls:
# None.
# SQRL Callers:
# SqrlFile().
# User:
# Has no direct access. The user is able to supply phrase via their SQL
# script, but only by way of SqrlFile(), which will ensure that phrase is a
# single string that does not contain any SQL comment or quoted literal. The
# user is unable to supply the state argument, although its value will
# reflect the content of their supplied SQL script. No argument validity
# checking is required.
# Thus function is fallible. Procedural language extensions appear in Oracle,
# DB2, Transact, Teradata, MySQL, Postgres, and many others. The nestable
# 'begin ... end;' syntax is common, but the different DMBSes have their own
# optional phrases beforehand. What's a keyword in one, may be a valid column
# or variable name in another. In the event that PL parsing fails, the scdo
# parameter can be set to FALSE (to submit only upon a <do> or <result> tag).
# On NULL input, initialise and return a new state list.
if (is.null(state))
{
return(list(block = FALSE, begins = 0L, ends = 0L))
}
# Because gregexpr() only finds disjoint matches, 'end;end', for instance,
# would count as only one end below (they share the semicolon). By doubling
# all word-break characters, 'end;end' becomes 'end;;end' and gregexpr()
# finds both ends. Fortunately, gsub() does not re-double characters.
phrase <- gsub("([^[:alnum:]@#$_])", "\\1\\1", phrase)
# Count instances of 'begin'. This keyword is mandatory within most PL blocks.
state$begins <- state$begins + sum(gregexpr(
"(^|[^[:alnum:]@#$_])begin([^[:alnum:]@#$_]|$)",
phrase, ignore.case = TRUE)[[1L]] > 0L)
# Count instances of 'end'. This keyword is mandatory within most PL blocks,
# and must be followed by a semicolon (to distinguish it from 'end loop' and
# 'end if'. This will fail if there's a comment in between the end and the ;.
state$ends <- state$ends + sum(gregexpr("(^|[^[:alnum:]@#$_])end\\s*;",
phrase, ignore.case = TRUE)[[1L]] > 0L)
# If we already believe we're inside a PL block, return the updated state.
if (state$block)
{
return(state)
}
# If we've got any begins, then we now think we're in a PL block.
# Return the updated state.
if (state$begins > 0L)
{
state$block <- TRUE
return(state)
}
# Search for optional PL key phrases that appear before a (mandatory) begin.
# Detection of any of these causes us to believe we're inside a PL block.
state$block <- grepl(paste0(
"(^|[^[:alnum:]@#$_])(declare|((create|replace)\\s+",
"(function|package|procedure|trigger|type)))([^[:alnum:]@#$_]|$)"),
phrase, ignore.case = TRUE)
# Return the updated state list.
return(state)
}
SqrlProc <- function(datasource,
proc)
{
# Retrieves a stored procedures by its name.
# Args:
# datasource : The name of data source, as known to SQRL.
# proc : A possible stored-procedure name, perhaps as components.
# Returns:
# The definition of the named procedure, as a character string. When no
# procedure matches the supplied name, NULL is returned instead.
# SQRL Calls:
# SqrlParam(), SqrlTry().
# SQRL Callers:
# SqrlDelegate().
# User:
# Has no direct access. Is able to supply (only) the proc argument, via
# SqrlDelegate(). Exceptions from unexpected input are silently caught.
# Collapse proc, which could be supplied as components, to a single string.
proc <- SqrlTry(paste0(unlist(proc), collapse = ""), warn = FALSE)
# If pasting failed, proc can't name a procedure (retun NULL).
if (proc$error)
{
return(NULL)
}
proc <- proc$value
# If proc isn't a single string, it cannot name a procedure (return NULL).
if ((length(proc) != 1L)
|| (nchar(proc) < 1L))
{
return(NULL)
}
# Work backward through the temporary stack (provided it exists), and return
# the first procedure definition with a matching name.
lib <- SqrlParam(datasource, "libstack")
for (i in rev(seq_along(lib)))
{
if (proc %in% names(lib[[i]]))
{
return(lib[[i]][proc])
}
}
# The name wasn't found on the stack; so now search the main library. Once
# again, if a matching name appears, then return the corresponding definition.
lib <- SqrlParam(datasource, "library")
if (proc %in% names(lib))
{
return(lib[proc])
}
# The name does not refer to any stored procedure. Return NULL.
return(NULL)
}
SqrlShell <- function(datasource = "",
envir = parent.frame(),
args.list)
{
# Relays commands from public interface functions to the private interpreter.
# Args:
# datasource : The name of a known data source.
# envir : An R environment, from which variables are inherited.
# args.list : A list of arguments, to be interpreted and actioned.
# Returns:
# The result of the command (frequently a data frame, string or list).
# SQRL Calls:
# SqrlCache(), SqrlClose(), SqrlDelegate(), SqrlParam(), SqrlTry().
# SQRL Callers:
# SqrlAll() (and data source interfaces).
# User:
# User has no direct access, but is able to supply (only) the args.list
# argument from sqrlAll() and/or any data source interface functions). Since
# args.list is unrestricted (it could be SQL), no argument validity checking
# is performed here.
# When autoclose is TRUE, always close any open connection upon exiting this
# function in any manner (including when an error has been thrown somewhere).
on.exit(
if (SqrlCache(datasource, exists = TRUE)
&& SqrlParam(datasource, "autoclose"))
{
SqrlClose(datasource)
})
# Relay the arguments to SqrlDelegate, for interpretation and evaluation,
# while trapping any error that might occur.
x <- SqrlTry(withVisible(SqrlDelegate(datasource, envir, args.list)))
# If an error occurred, throw a concise error message, showing only the top-
# level interface function (or data source name), rather than the originating
# internal function with all its messy arguments.
if (x$error)
{
f <- SqrlParam(datasource, "interface")
if (!is.null(f))
{
k <- parse(text = paste0(f, "(...)"), keep.source = FALSE)
stop(simpleError(x$value, k[[1L]]))
}
s <- parse(text = datasource, keep.source = FALSE)
stop(simpleError(x$value, s[[1L]]))
}
# No error occurred. Return the result visibly or invisibly, as intended.
x <- x$value
if (!x$visible)
{
return(invisible(x$value))
}
return(x$value)
}
SqrlStatement <- function(datasource,
parts)
{
# Constructs a SQL statement from the components supplied.
# Args:
# datasource : The name of a SQRL data source.
# parts : A list of components, constituting a SQL statement.
# Returns:
# The corresponding SQL statement. Differs from paste() in that lists are
# rewritten in comma-separated form and vectors in newline-separated form.
# SQRL Calls:
# SqrlParam().
# SQRL Callers:
# SqrlDelegate(), SqrlFile().
# User:
# Has no direct access. Can supply the only argument, via SqrlDelegate().
# In the event that the argument contains an object that cannot be pasted,
# all calls of this function are wrapped in try().
# As above, this function is only (directly) called from SqrlDelegate() and
# SqrlFile(). Both (only) supply objects wrapped inside of a list.
# Recurse over the list, converting the ultimate (atomic) objects to single
# strings (collapsing vectors with the aCollapse character).
ac <- SqrlParam(datasource, "aCollapse")
elements <- rapply(parts, paste0, how = "unlist", collapse = ac)
# Collapse the resulting character vector to a single string (with the
# lCollapse character), and return that string.
rc <- SqrlParam(datasource, "lCollapse")
return(paste0(elements, collapse = rc))
}
SqrlSource <- function(def)
{
# Defines (or re-defines) a data source and its interface.
# Args:
# def : A source name and definition (string or file), in that order.
# Returns:
# The interface name, invisibly, after creating, or re-defining, the source
# and its interface.
# SQRL Calls:
# SqrlCache(), SqrlConfig(), SqrlDefile(), SqrlInterface(), SqrlIsOpen(),
# SqrlParam(), SqrlParams(), SqrlPath(), SqrlTry().
# SQRL Callers:
# sqrlSource().
# User:
# Has no direct access. Can supply the argument via sqrlSource() (only).
# That function guarantees the existence of at least either two terms or
# one named term. Additional checks (assignability, conflict, etc.) are
# performed here.
# Separate the name from the definition component(s). When there is only one
# term, we use it's name. When there is more than one term, we use the first
# term as the name if that term is not itself named. If it is named, we look
# instead for a unique term named 'name', and use that if it exists.
if (length(def) == 1L)
{
name <- trimws(names(def))
names(def) <- NULL
} else if (is.null(names(def))
|| !nzchar(names(def)[1L]))
{
name <- trimws(def[[1L]])
def[[1L]] <- NULL
} else if ("name" %in% names(def))
{
i <- which(names(def) == "name")
if (length(i) > 1L)
{
stop("Multiple 'name' terms.")
}
name <- trimws(def[[i]])
def[[i]] <- NULL
} else
{
stop("Could not identify the intended source name.")
}
# Ensure either all terms are named, or that no term is named. When the terms
# are named, ensure all names are different (unique).
if (!is.null(names(def)))
{
isnamed <- nzchar(names(def))
if (!any(isnamed))
{
names(def) <- NULL
} else if (!all(isnamed))
{
stop("Mixture of named and unnamed arguments.")
} else if (length(unique(names(def))) != length(names(def)))
{
stop("Duplicated argument names.")
}
}
# Accept source = NULL as an alias for remove = source.
if ((length(def) == 1L)
&& is.null(def[[1L]]))
{
def <- list(name)
name <- "remove"
}
# If the name is 'remove', treat the definition as a list of names of sources
# to be removed (deregistered from SQRL). Do that, then return invisible NULL.
# Non-existent sources are quietly skipped (no error is thrown).
if (name == "remove")
{
datasources <- unique(as.character(unlist(def)))
datasources <- datasources[datasources %in% SqrlCache("*")]
for (datasource in datasources)
{
SqrlCache(datasource, delete = TRUE)
}
return(invisible(NULL))
}
# Abort if the source name is unassignable.
if (name != make.names(name))
{
stop("Unassignable data-source name.")
}
# Prohibit redefinition of open sources. Always remove a preexisting source,
# so that SqrlSource() begins from (defines onto) a clean slate.
if (SqrlCache(name, exists = TRUE))
{
if (SqrlIsOpen(name))
{
stop("Cannot redefine an open source.")
}
SqrlCache(name, delete = TRUE)
}
# When none of the terms are named, establish the implied name (and value),
# according to a sequential hierarchy.
if (is.null(names(def)))
{
# If the terms specify the path of a readable file, interpret them as a
# request to define and configure a source from that file.
def <- as.character(unlist(def))
path <- SqrlPath(def)
if (!is.null(path))
{
def <- list(config = path)
# Otherwise, if there is only one term and it names an existing source,
# interpret it as a request to make a copy of that source.
} else if ((length(def) == 1L)
&& SqrlCache(def, exists = TRUE))
{
def <- list(copy = def)
# Otherwise, if there are multiple terms, or if any term (string) contains
# an equals sign, interpret them as components of a connection string.
} else if ((length(def) > 1L)
|| any(grepl("=", def)))
{
def <- sub(";$", "", def)
def <- list(connection = paste0(def, collapse = ";"))
# Otherwise, there is only one term (string), it does not contain an equals
# sign, and does not name an existing source; interpret it as a DSN.
} else
{
def <- list(dsn = def)
}
}
# Abort if an original source (to be copied) has been specified, but that
# source does not exist within the SQRL cache.
if (("copy" %in% names(def))
&& !(def$copy %in% SqrlCache("*")))
{
stop("Copy source original not found.")
}
# Abort if a configuration file has been specified, but that file cannot be
# read (including file does not exist). We will miss this here when the file
# path has been specified in a list, but SqrlConfig() will pick that up later.
if (("config" %in% names(def))
&& !identical(class(def["config"]), class(list()))
&& is.null(SqrlPath(def["config"])))
{
stop("Cannot read the config file.")
}
# When the defining terms do not include a 'copy', 'config', or 'connection',
# there is no possibility of a single term specifying a connection string. If,
# additionally, we do not have a 'dsn' term, or if one of the terms does not
# correspond to a SQRL/RODBC parameter, then we interpret all of the terms as
# connection-string components, and construct the string from them.
if (!any(c("copy", "config", "connection") %in% names(def))
&& (!("dsn" %in% names(def))
|| !all(names(def) %in% SqrlParams("all"))))
{
def <- paste0(names(def), "=", sub(";$", "", def))
def <- list(connection = paste0(def, collapse = ";"))
}
# Create a fresh cache for the new data source (if it previously existed, it
# will have been deleted).
SqrlCache(name, create = TRUE)
# If we have a 'copy' term, perform the copy operation first (so that any
# other terms will subsequently override copied values).
if ("copy" %in% names(def))
{
# This only returns the set (non-default) parameters. Names are unique.
params <- SqrlParam(def$copy, "*")
# Don't copy parameters we shouldn't (name, interface, etc.).
params <- params[!(params %in% SqrlParams("don't-copy"))]
# If the original source has a library, copy it to the new source.
if ("library" %in% params)
{
SqrlParam(name, "reset", "library")
lib <- SqrlParam(def$copy, "library")
for (proc in names(lib))
{
script <- lib[[proc]]
names(script) <- proc
SqrlParam(name, "library", script, override = TRUE)
}
params <- params[params != "library"]
}
# Copy driver last, in case dsn was copied (and set a value for driver).
# Secrets are copied without loss, because SqrlSource() is informed.
params <- c(params[params != "driver"], params[params == "driver"])
for (param in params)
{
SqrlParam(name, param, SqrlParam(def$copy, param))
}
# If the original driver wasn't set, ensure the copy's driver is also
# undefined (in case dsn was defined and a copy driver has been set).
if (!("driver" %in% params))
{
SqrlParam(name, "reset", "driver")
}
}
# If we have a 'config' term, attempt to configure the source from the config
# file. Values in the file override any vales that may already have been
# copied from another source. The incomplete source is deleted on error. Note
# that def$config might be a file path (potentially in component form), or a
# list of (named) parameter = value pairs. SqrlConfig() will identify which
# (or neither) is the case, and handle appropriately.
if ("config" %in% names(def))
{
result <- SqrlTry(SqrlConfig(name, def$config))
if (result$error)
{
SqrlCache(name, delete = TRUE)
stop(result$value)
}
}
# If we have an 'interface' term, attempt to apply the specified name. This
# overrides any value that may have been set via config file. The incomplete
# source is deleted upon error.
if ("interface" %in% names(def))
{
result <- SqrlTry(
SqrlInterface(name, SqrlDefile("interface", def$interface)))
if (result$error)
{
SqrlCache(name, delete = TRUE)
stop(result$value)
}
}
# Iterate over all other terms (besides 'copy', 'config', and 'interface'),
# treating each as a SQRL/RODBC parameter. The incomplete source is deleted
# upon any error. The uniqueness of names has been asserted, above.
params <- names(def)
params <- params[!(params %in% c("copy", "config", "interface"))]
params <- c(params[params != "driver"], params[params == "driver"])
for (param in params)
{
result <- SqrlTry(SqrlParam(name, param, SqrlDefile(param, def[[param]])))
if (result$error)
{
SqrlCache(name, delete = TRUE)
stop(result$value)
}
}
# If no interface has been defined, attempt to apply the source name. The
# incomplete source will be deleted if this is not possible.
if (SqrlParam(name, "interface", isdefined = FALSE))
{
result <- SqrlTry(SqrlInterface(name, name))
if (result$error)
{
SqrlCache(name, delete = TRUE)
stop(result$value)
}
}
# Return the source's configuration, invisibly.
return(invisible(SqrlConfig(name)))
}
SqrlSources <- function(import = "")
{
# Returns a summary table of defined sources.
# Args:
# import : Specifies the class of DSNs to import (default is do not import).
# Returns:
# A data frame summarising locally defined data sources. There is no
# guarantee that any of these sources are presently available, or even that
# they exist. The data frame may be empty (have zero rows).
# SQRL Calls:
# SqrlAll(), SqrlCache(), SqrlDSNs(), SqrlIsOpen(), SqrlParams(),
# SqrlValue().
# SQRL Callers:
# SqrlDelegate(), sqrlSources().
# User:
# The user has no direct access, but is able to supply the argument via
# sqrlSources(), which vets it as being one of "", "all", "user", or
# "system". Further argument validity checking is not required.
# If the import argument is 'remove', then deregister (delete) all sources.
if (import == "remove")
{
SqrlAll(list("remove"), envir = parent.frame())
return(invisible(NULL))
}
# If the import argument was something else, import the corresponding DSNs.
if (nchar(import) > 0L)
{
SqrlDSNs(import)
}
# Retrieve and return a summary of sources (data frame).
params <- SqrlParams("source-table")
sumlist <- list()
for (param in params)
{
sumlist[[param]] <- list(character(0L))
}
for (datasource in SqrlCache("*"))
{
for (param in params)
{
if (param == "open")
{
value <- c("N", "Y")[SqrlIsOpen(datasource, besure = FALSE) + 1L]
} else
{
value <- SqrlValue(datasource, param)
if (is.null(value))
{
value <- NA
}
}
sumlist[[param]] <- append(sumlist[[param]], value)
}
}
for (param in params)
{
sumlist[[param]] <- unlist(sumlist[[param]])
}
sumframe <- as.data.frame(sumlist, stringsAsFactors = FALSE)
sumframe <- sumframe[order(sumframe[, 1L]), ]
rownames(sumframe) <- NULL
return(sumframe)
}
SqrlSubmit <- function(datasource,
statement,
retry = TRUE)
{
# Submit a SQL statement to a connected data source.
# Args:
# datasource : The name of a known data source.
# statement : A SQL statement (as a single character string).
# retry : When set to FALSE, do not resubmit on failure.
# Returns:
# Result of submitting the statement (typically a data frame).
# SQRL Calls:
# SqrlIndicator(), SqrlIsOpen(), SqrlOpen(), SqrlParam(),
# SqrlSubmit() (self), SqrlTry().
# RODBC Calls:
# odbcGetErrMsg, odbcQuery, sqlGetResults(), sqlQuery().
# SQRL Callers:
# SqrlDelegate(), SqrlSubmit() (self), SqrlSubScript().
# User:
# Has no direct access. Is able to supply (only) the statement argument (a
# string), via SqrlSubScript(). No further checks are required.
# If the statement is empty, return NULL (emulates no-query in any SQL).
# Now that all queries go via the parser, this should never happen (everything
# comes in from SqrlSubscript(), which already performs this operation).
if (!grepl("[[:graph:]]", statement))
{
return(NULL)
}
# Abort, unless an open channel exists, or can be established, to the data
# source. This is not a ping check, so the channel might still be closed.
if (!SqrlIsOpen(datasource))
{
SqrlOpen(datasource)
if (!SqrlIsOpen(datasource))
{
stop("Connection attempt failed.")
}
}
# Our preferred method is to submit the statement via RODBC::odbcQuery(), and
# then fetch the results via RODBC::sqlGetResults(). However, the RODBC manual
# states that odbcQuery() is 'likely to be confined to the "RODBC" namespace
# in the near future'. The same issue applies to RODBC::odbcGetErrMsg(), so
# first we check these functions are available.
rodbc <- getNamespaceExports("RODBC")
if (("odbcQuery" %in% rodbc)
&& (!SqrlParam(datasource, "errors")
|| ("odbcGetErrMsg" %in% rodbc)))
{
# Append query-in-progress marker to the window-title connection indicator.
SqrlIndicator(datasource, "query")
# Submit the query, and retrieve the exit code (+1 = success, -1 = failure).
status <- SqrlTry(RODBC::odbcQuery(
channel = SqrlParam(datasource, "channel"),
query = statement,
rows_at_time = SqrlParam(datasource, "rows_at_time")))
# Remove query-in-progress marker from the window-title indicator.
SqrlIndicator(datasource, "done")
# Two modes of failure exist; RODBC::odbcQuery() could throw an error, or
# else it could cleanly return its failure code (-1L). Should one occur, we
# either try again, throw the error, or return the error message.
if (status$error
|| (status$value == -1L))
{
# If we might need the ODBC error message, we'd better retrieve it now,
# because SqrlIsOpen() pings the source (below), destroying that message.
# If RODBC::odbcGetErrMsg() should fail here, we get the error message
# for that failure, instead of the original RODBC::odbcQuery() message.
if (!status$error
&& SqrlParam(datasource, "errors"))
{
error <- SqrlTry(RODBC::odbcGetErrMsg(SqrlParam(datasource, "channel")))
status$value <- paste0(error$value, collapse = "\n")
}
# If this was a first attempt (retry = TRUE), and second attempts are
# enabled (the retry parameter is also TRUE), and a ping of the source
# reveals the connection to have been dropped, then we infer that was the
# cause of the error, and make one more attempt (only). That will involve
# opening a new channel, which might prompt the user for authentication.
# This mechanism provides a (very) limited ability to recover from network
# drop-outs, but it cannot restore temporary tables.
if (retry
&& SqrlParam(datasource, "retry")
&& !SqrlIsOpen(datasource, besure = TRUE))
{
return(SqrlSubmit(datasource, statement, retry = FALSE))
}
# Otherwise, we do not make another attempt. When RODBC::odbcQuery() threw
# an error, or when the 'errors' parameter is TRUE, we throw the error. In
# the latter case, this has the effect of promoting ODBC failure messages
# to local R exceptions (unlike RODBC, which simply returns the messages
# as character strings).
if (status$error
|| SqrlParam(datasource, "errors"))
{
stop(status$value)
}
# Otherwise (the 'errors' parameter is FALSE), return the error message
# (as a character string, without raising an exception).
return(status$value)
}
# The query has succeeded, but we have not yet retrieved the result of it.
# Append fetch-in-progress marker to the window-title connection indicator.
SqrlIndicator(datasource, "fetch")
# Retrieve the data. If a connection error occurs here, we cannot easily
# recover without re-submitting the query, since pinging the source will
# destroy the waiting rows.
result <- SqrlTry(
RODBC::sqlGetResults(channel = SqrlParam(datasource, "channel"),
as.is = SqrlParam(datasource, "as.is"),
errors = SqrlParam(datasource, "errors"),
max = SqrlParam(datasource, "max"),
buffsize = SqrlParam(datasource, "buffsize"),
nullstring = SqrlParam(datasource, "nullstring"),
na.strings = SqrlParam(datasource, "na.strings"),
believeNRows = SqrlParam(datasource, "believeNRows"),
dec = SqrlParam(datasource, "dec"),
stringsAsFactors = SqrlParam(datasource, "stringsAsFactors")))
# Remove fetch-in-progress marker from the window-title indicator.
SqrlIndicator(datasource, "done")
# If RODBC::sqlGetResults() threw an error, or if it appears to have cleanly
# returned an error message or code, either try again, throw the exception,
# or return the result (potentially an error message or code).
if (result$error
|| identical(class(result$value), class(integer()))
|| (identical(class(result$value), class(character()))
&& (length(result$value) > 1L)))
{
# If the failure appears to have been caused by a lost connection, and
# this is our first attempt, and the retry parameter is TRUE (enabled),
# then make one more. Because SqrlIsOpen() may have destroyed any waiting
# rows, the original query must be resubmitted.
if (retry
&& SqrlParam(datasource, "retry")
&& !SqrlIsOpen(datasource, besure = TRUE))
{
return(SqrlSubmit(datasource, statement, retry = FALSE))
}
# If RODBC::sqlGetResults() threw an error, or if the 'errors' parameter
# is TRUE, throw the error. In the latter case, this promotes the ODBC
# error message to a local R exception, and throws it (RODBC doesn't).
if (result$error
|| SqrlParam(datasource, "errors"))
{
stop(paste(result$value, collapse = "\n"))
}
}
# Return the result. This could be a data frame, a character string, an
# empty character vector, or an integer code (-1 = failure, -2 = no data).
return(result$value)
}
# The block above is our preferred method, used so long as RODBC::odbcQuery()
# remains publicly available. Should that not be the case, the script below
# implements our fallback method, which uses RODBC::sqlQuery() instead.
# Append query-in-progress indicator to the window-title connection indicator.
SqrlIndicator(datasource, "query")
# A valid connection exists. Submit the statement, and retrieve only the first
# row (the least amount of data we can). Uses stringsAsFactors = FALSE, to
# simplify merging with any additional rows (discussed below).
result <- SqrlTry(RODBC::sqlQuery(channel = SqrlParam(datasource, "channel"),
query = statement,
errors = SqrlParam(datasource, "errors"),
as.is = SqrlParam(datasource, "as.is"),
max = 1L,
buffsize = SqrlParam(datasource, "buffsize"),
nullstring = SqrlParam(datasource, "nullstring"),
na.strings = SqrlParam(datasource, "na.strings"),
believeNRows = SqrlParam(datasource, "believeNRows"),
dec = SqrlParam(datasource, "dec"),
stringsAsFactors = FALSE,
rows_at_time = SqrlParam(datasource, "rows_at_time")))
# Remove query-in-progress indicator from the window title.
SqrlIndicator(datasource, "done")
# On success, RODBC::sqlQuery() returns a data frame or character string (both
# possibly empty). On an ODBC error, it returns either a character vector, or
# an integer (either -1, failure, or -2, no data). Refer to the RODBC manual.
# In the character vector error case, the length of the vector is usually at
# least two (the ODBC driver error message, plus the RODBC error message), but
# in some cases the driver can flag an error without generating a message to
# go with it, in which case the result is a single character string, being the
# RODBC message (only). All RODBC error messages begin with '[RODBC] ERROR:'.
if (result$error
|| identical(class(result$value), class(integer()))
|| (identical(class(result$value), class(character()))
&& any(grepl("^\\[RODBC\\] ERROR:", result$value))))
{
# If the failure appears to have been caused by a lost connection, and this
# is our first attempt, then make one more (unless the retry parameter has
# been set to FALSE).
if (retry
&& SqrlParam(datasource, "retry")
&& !SqrlIsOpen(datasource, besure = TRUE))
{
return(SqrlSubmit(datasource, statement, retry = FALSE))
}
# If RODBC::sqlQuery() threw an error, or when the 'errors' parameter is
# TRUE, throw the error. In the latter case, this promotes the ODBC error
# message or code to an R exception (RODBC doesn't).
if (result$error
|| SqrlParam(datasource, "errors"))
{
stop(paste(result$value, collapse = "\n"))
}
}
# No error occurred. Remove the error flag, retain only the (non-error) value.
result <- result$value
# When the result is not a data frame, there won't be any more rows to fetch.
# This could be a character string or integer error code.
if (!identical(class(result), class(data.frame())))
{
return(result)
}
# The result is a data frame of, at most, one row. If it has zero rows, then
# there can be no more to fetch, so return it now. For consistency with RODBC,
# strings are not converted to factors in this special case.
if (nrow(result) == 0L)
{
return(result)
}
# The result is a data frame of precisely one row. There could be others left
# to fetch, but if only one row is sought, we do not want them. In that case,
# return the data frame (after converting strings to factors, if instructed).
if (SqrlParam(datasource, "max") == 1L)
{
if (SqrlParam(datasource, "stringsAsFactors"))
{
for (i in seq_along(result))
{
if (identical(class(result[, i]), class(character())))
{
result[, i] <- as.factor(result[, i])
}
}
}
return(result)
}
# Otherwise, we need to fetch any remaining rows (up to the specified limit).
# Append fetch-in-progress marker to the window-title connection indicator.
SqrlIndicator(datasource, "fetch")
# Retrieve all remaining rows (up to any specified maximum limit).
restof <- SqrlTry(
RODBC::sqlGetResults(channel = SqrlParam(datasource, "channel"),
as.is = SqrlParam(datasource, "as.is"),
errors = SqrlParam(datasource, "errors"),
max = max(SqrlParam(datasource, "max") - 1L, 0L),
buffsize = SqrlParam(datasource, "buffsize"),
nullstring = SqrlParam(datasource, "nullstring"),
na.strings = SqrlParam(datasource, "na.strings"),
believeNRows = SqrlParam(datasource, "believeNRows"),
dec = SqrlParam(datasource, "dec"),
stringsAsFactors = FALSE))
# Remove the fetch-in-progress marker from the window title.
SqrlIndicator(datasource, "done")
# With the initial call of RODBC::sqlQuery() having returned a non-empty data
# frame (above), RODBC::sqlGetResults() should also have returned a data frame
# (although, possibly one with zero rows). Anything else is an error.
if (restof$error
|| !identical(class(restof$value), class(data.frame())))
{
# If the failure appears to have been caused by a lost connection, and this
# is our first attempt, then make one more (unless the retry parameter has
# been set to FALSE).
if (retry
&& SqrlParam(datasource, "retry")
&& !SqrlIsOpen(datasource, besure = TRUE))
{
return(SqrlSubmit(datasource, statement, retry = FALSE))
}
# If RODBC::sqlGetResults() threw an error, or when the 'errors' parameter
# is TRUE, throw the error. The latter case promotes ODBC error messages to
# local R exceptions (RODBC doesn't).
if (restof$error
|| SqrlParam(datasource, "errors"))
{
stop(paste(restof$value, collapse = "\n"))
}
# Otherwise (when 'errors' is FALSE), return the unexpected result.
return(restof$value)
}
# Append the subsequent rows (from RODBC::sqlGetResults()) to the initial row
# (from RODBC::sqlQuery()). It is this operation that requires pulling with
# stringsAsFactors = FALSE (above), because the two frames need not contain
# the same factor-level definitions.
result <- rbind(result, restof$value)
# Convert strings to factors, if so instructed.
if (SqrlParam(datasource, "stringsAsFactors"))
{
for (i in seq_along(result))
{
if (identical(class(result[, i]), class(character())))
{
result[, i] <- as.factor(result[, i])
}
}
}
# Return the (non-empty) data frame.
return(result)
}
SqrlSubScript <- function(datasource = "",
statement = "",
phrase = "",
intermediate = "null",
envir = NULL)
{
# Submits a SQL statement to a data source, and retrieves the result.
# Args:
# datasource : The name of data source, as known to SQRL.
# statement : A list of strings, forming a (partial) SQL statement.
# phrase : A single string, completing the SQL statement.
# intermediate : The name (string) of a variable to assign the result to.
# envir : An environment, within which the assignment is made.
# Returns:
# The result of submitting the statement (or NULL when the statement is
# blank). When the environment and intermediate are both non-null, the
# result (or NULL) is assigned to the intermediate within the environment.
# SQRL Calls:
# SqrlParam(), SqrlSubmit().
# utils Calls:
# head() (only if utils is attached).
# SQRL Callers:
# SqrlFile().
# User:
# Has no direct access, but is able to supply (only) the statement, phrase,
# and intermediate arguments via a SQRL script. These arguments will have
# already been parsed and worked into the correct format, by SqrlFile() and
# SqrlStatement(), so no argument validity checks should be required here.
# If the phrase is non-empty, append it to the statement.
if (nchar(phrase) > 0L)
{
# Remove trailing whitespace (including vertical) from the phrase.
# The phrase cannot (will never) contain quoted string literals.
phrase <- sub("[[:space:]]*$", "", phrase)
# Remove trailing whitespace from each internal line of the phrase.
phrase <- gsub("[[:blank:]]+\n", "\n", phrase)
# Remove vertical whitespace from within the phrase.
phrase <- gsub("\n+", "\n", phrase)
# Remove any whitespace preceding a terminal semi-colon.
phrase <- sub("[[:space:]]*;$", ";", phrase)
# Append the phrase to the statement (unless the phrase is empty).
if (nchar(phrase) > 0L)
{
statement <- append(statement, phrase)
}
}
# If the statement is non-empty, submit it and retrieve the result.
if (length(statement) > 0L)
{
# Collapse the statement to a single string. Submit it if non-blank.
statement <- trimws(paste(statement, collapse = ""))
if (grepl("[[:graph:]]", statement))
{
# Boolean; whether or not to show verbose output. The value of the verbose
# parameter cannot change while this function is executing.
verbose <- interactive() && SqrlParam(datasource, "verbose")
# If verbose, output the statement (prior to submission).
if (verbose)
{
cat("\n\n\n")
cat(statement)
cat("\n")
}
# Submit the statement to the source, retrieve the result.
result <- SqrlSubmit(datasource, statement)
# If verbose, output (some of) the result. Coming from SqrlSubmit(), this
# should be a data frame, a short character vector, an integer, or NULL.
# Methods for head() and print() are defined on all of these.
if (verbose)
{
printed <- FALSE
if ("package:utils" %in% search())
{
top <- utils::head(result)
print(top)
if (!identical(top, result))
{
cat("(output truncated)\n")
}
} else
{
cat(paste0("(object of class '",
paste0(class(result), collapse = " "), "')\n"))
}
cat("\n")
}
# Assign the result to the intermediate variable (unless null).
if (!is.null(envir)
&& (tolower(intermediate) != "null"))
{
assign(intermediate, result, envir)
}
# If the result was 'No Data' (generated by RODBC in response to receiving
# SQL_NO_DATA from the driver, after executing, say, drop table), or -2L
# (under the same conditions, but when the errors parameter is FALSE), or
# a zero-length character vector (sometimes produced by similar
# operations), then return it invisibly.
if (identical(result, -2L)
|| identical(result, "No Data")
|| identical(result, character(0L)))
{
return(invisible(result))
}
# Otherwise, return the result visibly.
return(result)
}
}
# There was actually no query (and, therefore, no result). If the result was
# to have been assigned to some name, assign it the value NULL.
if (!is.null(envir)
&& (tolower(intermediate) != "null"))
{
assign(intermediate, NULL, envir)
}
# Return NULL, signifying an undefined result (because there was no query).
# SqrlSubmit() does the same thing, if it receives a blank statement (which it
# shouldn't). RODBC::sqlQuery() (to which SqrlSubmit() is a wrapper), is
# incapable of returning NULL (or NA). It doesn't actually matter whether or
# not this is visible, since NULL is a special value; signifying to SqrlFile()
# that the current overall result should not be replaced by this one.
return(invisible(NULL))
}
SqrlTry <- function(expr,
warn = TRUE)
{
# Evaluation with silent error catching and optional warning suppression.
# Args:
# expr : An arbitrary R expression, to be evaluated.
# warn : When set to FALSE, warning messages are suppressed.
# Returns:
# A vector of two named elements; 'error' and 'value'. When evaluation
# produces an error, 'error' will be TRUE and 'value' will be the error
# message. Otherwise (when the expression evaluated normally), 'error' will
# be FALSE and 'value' will be the result of that evaluation.
# SQRL Calls:
# None.
# SQRL Callers:
# SqrlClose(), SqrlDefile(), SqrlDelegate(), SqrlFile(), SqrlHelp(),
# SqrlIsOpen(), SqrlOff(), SqrlOpen(), SqrlPath(), SqrlPing(), SqrlProc(),
# SqrlShell(), SqrlSource(), SqrlSubmit(), sqrlInterface(), sqrlSource(),
# .onUnload().
# User:
# Has no direct access, but can supply the expression indirectly. Here,
# that expression is inherently wrapped in tryCatch(), so no other checks
# are required.
# Error-handling function.
efun <- function(e)
{
list(error = TRUE, value = conditionMessage(e))
}
# When warnings are not to be suppressed, attempt to evaluate the expression
# while trapping errors but throwing any warning messages.
if (warn)
{
return(tryCatch(list(error = FALSE, value = expr), error = efun))
}
# Otherwise, warnings are to be suppressed. Attempt to evaluate the expression
# while trapping errors and also suppressing any warning messages.
return(suppressWarnings(
tryCatch(list(error = FALSE, value = expr), error = efun)))
}
SqrlValue <- function(datasource = "",
parameter = "",
set)
{
# Output-safe (password obliterated) wrapper to SqrlParam().
# Args:
# datasource : The name of data source, as known to SQRL.
# parameter : The name of a SQRL or RODBC control parameter.
# set : A value to assign to that parameter (optional).
# Returns:
# The edited parameter value (with secrets kept secret).
# SQRL Calls:
# SqrlParam(), SqrlParams(), SqrlValue() (self).
# SQRL Callers:
# SqrlConfig(), SqrlDelegate(), SqrlSources(), SqrlValue() (self).
# User:
# Has no direct access, but is able to supply (only) parameter and set via
# SqrlDelegate() and/or SqrlConfig(). The former vets parameter while the
# latter does not (although it will restrict parameter to being a string,
# and is write-only). Neither vets set. Both parameters are simply passed to
# SqrlParam(), and that function performs additional checking as required.
# All functions returning values to the user (outside of the SQRL namespace)
# should be sourcing their values from this, and not from SqrlParam().
# This is the text with which secret information (pwd) is replaced.
# Six asterisks have been chosen for consistency with RODBC.
oblit <- "******"
# A request for the (read-only) value of 'source' returns either the 'dsn'
# parameter, or the 'connection' parameter, whichever defines the source,
# with any placeholders substituted and any secrets obliterated.
if (identical(parameter, "source"))
{
connection <- as.character(SqrlValue(datasource, "connection"))
if (nchar(connection) > 0L)
{
for (spar in SqrlParams("substitutable"))
{
connection <- gsub(paste0("<", spar, ">"), SqrlValue(datasource, spar),
connection)
}
return(connection)
}
dsn <- as.character(SqrlValue(datasource, "dsn"))
# The conditional pasting below is as per RODBC::odbcConnect().
dsn <- paste0("DSN=", dsn)
if (SqrlParam(datasource, "uid", isdefined = TRUE)
&& (nchar(SqrlValue(datasource, "uid")) > 0L))
{
dsn <- paste0(dsn, ";UID=", SqrlValue(datasource, "uid"))
}
if (SqrlParam(datasource, "pwd", isdefined = TRUE)
&& (nchar(SqrlValue(datasource, "pwd")) > 0L))
{
dsn <- paste0(dsn, ";PWD=", SqrlValue(datasource, "pwd"))
}
return(dsn)
}
# Retrieve the parameter value, after setting it if so instructed.
if (!missing(set))
{
if (nchar(datasource) < 1L)
{
value <- set
} else
{
value <- SqrlParam(datasource, parameter, set)
}
} else
{
value <- SqrlParam(datasource, parameter)
}
# If the parameter is 'reset', then the value is a list of (uniquely) named
# defaults. While there can be no secrets contained within those defaults,
# some of them might be of named-value type, for which we return only the
# names, rather than the named-values (for brevity, not security).
if (parameter == "reset")
{
for (param in SqrlParams("named-values"))
{
if (param %in% names(value))
{
value[param] <- list(names(value[[param]]))
}
}
return(value)
}
# Return only the names of any library entries, rather than their complete
# definitions. This is for brevity, not for security.
if (parameter %in% SqrlParams("named-values"))
{
return(names(value))
}
# If the parameter is semi-secret (connection), it may contain secret (pwd)
# values as substrings. In this case, locate and obliterate any secrets.
if (parameter %in% SqrlParams("semi-secret"))
{
for (spar in SqrlParams("secret"))
{
pattern <- paste0("\\b", spar, "\\s*=")
if (grepl(pattern, value, ignore.case = TRUE))
{
# Construct regular expression patterns for each of the non-secret
# values (blank, <pwd>, and so on). These are unique.
ignorables <- SqrlParams("substitutable")
ignorables <- ignorables[ignorables %in% SqrlParams("secret")]
if (length(ignorables) > 0L)
{
ignorables <- paste0("\\s*<", ignorables, ">\\s*$")
}
ignorables <- paste0(pattern, c("\\s*$", ignorables))
# Positions (first-character indices) and lengths of the (potential)
# secret-containing sub-strings of the parameter-value string.
ssubs <- gregexpr(paste0(pattern, "\\s*[^;]*"), value,
ignore.case = TRUE)[[1L]]
slens <- c(0L, attr(ssubs, "match.length"))
ssubs <- c(0L, ssubs)
# Overwrite all non-ignorable (true) secrets with the replacement text.
eds <- character(0L)
for (i in seq(2L, length(ssubs)))
{
# Character positions (indices) within the value string; Start Of
# Secret substring, End Of Secret substring, Start Of Previous
# (non-secret) substring, End of Previous (non-secret) substring.
sos <- ssubs[i]
eos <- ssubs[i] + slens[i] - 1L
sop <- ssubs[i - 1L] + slens[i - 1L]
eop <- ssubs[i] - 1L
# Isolate the potentially secret containing substring.
ssub <- substring(value, sos, eos)
# If the parameter value is apparently non-sensitive (ignorable),
# then retain it unmodified (do not obliterate the value).
ignore <- FALSE
for (ignorable in ignorables)
{
if (grepl(ignorable, ssub, ignore.case = TRUE))
{
ignore <- TRUE
break
}
}
if (ignore)
{
eds <- c(eds, substring(value, sop, eos))
# Otherwise, the sub-string contains potentially secret information.
# Obliterate (replace) that information with the masking sequence.
} else
{
pat <- paste0("(", spar, "\\s*=\\s*)[^;]+")
eds <- c(eds, substring(value, sop, eop),
sub(pat, paste0("\\1", oblit), ssub, ignore.case = TRUE))
}
}
# Append any final (trailing) non-secret sub-string.
eds <- c(eds, substring(value,
ssubs[length(ssubs)] + slens[length(ssubs)]))
value <- paste0(eds, collapse = "")
}
}
return(value)
}
# If the parameter is secret, obliterate it entirely (unless it is empty).
if (parameter %in% SqrlParams("secret"))
{
if (!nzchar(value))
{
return(value)
}
return(oblit)
}
# Otherwise (the parameter is non-secret), return the unmodified value.
return(value)
}
########################################################### PUBLIC FUNCTIONS ###
sqrlAll <- function(...)
{
# Sends the same command to each of the defined SQRL sources.
# Args:
# ... : A sequence of strings, as per (to be supplied to) SqrlDelegate().
# Returns:
# A (possibly invisible) list of the results of the command on each source.
# SQRL Calls:
# SqrlAll(), SqrlParams().
# User:
# Exported function. User has direct access. However, the argument(s) are
# unrestricted, and no checking is required (beyond that in SqrlDelegate()).
# Return visibly when the command is a value request on either a single named
# parameter or connection openness status.
arglist <- list(...)
if ((length(arglist) == 1L)
&& is.null(names(arglist))
&& identical(class(arglist[[1L]]), class(character()))
&& (nchar(arglist[[1L]]) > 0L)
&& ((arglist[[1L]] %in% c(SqrlParams("all"), "source"))
|| grepl("^is\\s*open$", arglist[[1L]])))
{
return(SqrlAll(arglist, envir = parent.frame()))
}
# Apply the commands, return the results invisibly.
return(invisible(SqrlAll(arglist, envir = parent.frame())))
}
sqrlInterface <- function(...)
{
# Constructs a user-interface to a specified data source.
# Args:
# ... : A source name and, optionally, a new interface name, in that order.
# Returns:
# The name of the interface function to the specified source. When only a
# source name is supplied in the arguments, the function acts as a getter
# and returns the current interface name (or NULL when there is none). When
# both source and interface names are supplied, the new interface name is
# set before being returned. When the interface name is given as 'remove',
# no new interface is created, but any existing interface is deleted.
# SQRL Calls:
# SqrlCache(), SqrlDefile(), SqrlInterface(), SqrlParam(), SqrlTry().
# User:
# Exported function. User has direct access. The datasource name is checked
# for validity, but it is left to SqrlInterface() to establish the validity
# and usability of the interface name.
# Either one or two arguments are expected.
arglist <- list(...)
if ((length(arglist) < 1L)
|| (length(arglist) > 2L))
{
k <- parse(text = "sqrlInterface(...)", keep.source = FALSE)
m <- "A source name and an interface name are expected."
stop(simpleError(m, k[[1L]]))
}
# Identify the data-source name and also the interface name (if specified).
getname <- FALSE
if (length(arglist) == 1L)
{
if (!is.null(names(arglist)))
{
datasource <- names(arglist)
interface <- SqrlDefile("interface", arglist[[datasource]])
} else
{
datasource <- arglist[[1L]]
getname <- TRUE
}
} else
{
datasource <- arglist[[1L]]
interface <- SqrlDefile("interface", arglist[[2L]], evaluate = TRUE)
}
# Abort on non-existence of the specified data source.
if (!identical(class(datasource), class(character()))
|| (length(datasource) != 1L)
|| (nchar(datasource) < 1L)
|| SqrlCache(datasource, exists = FALSE))
{
k <- parse(text = "sqrlInterface(...)", keep.source = FALSE)
m <- "Unrecognised data source."
stop(simpleError(m, k[[1L]]))
}
# In the absence of a specified interface name, get and return the name of the
# current interface to the data source (returnsS NULL if none exists).
if (getname)
{
return(SqrlParam(datasource, "interface"))
}
# Relay the arguments to SqrlInterface() (returns the new name).
f <- SqrlTry(withVisible(SqrlInterface(datasource, interface)))
# In the event of an error, throw the message.
if (f$error)
{
k <- parse(text = "sqrlInterface(...)", keep.source = FALSE)
stop(simpleError(f$value, k[[1L]]))
}
# Return the new interface name, either visibly or invisibly, as appropriate.
f <- f$value
if (!f$visible)
{
return(invisible(f$value))
}
return(f$value)
}
sqrlOff <- function()
{
# Close SQRL channels, deactivate SQRL.
# Args:
# None.
# Returns:
# Invisible NULL, after closing channels and detaching SQRL.
# SQRL Calls:
# SqrlOff().
# User:
# Exported function. User has direct access, but there are no arguments.
# Relay the command-option to SqrlOff() (returns invisible NULL).
return(SqrlOff())
}
sqrlSource <- function(...)
{
# Defines (or re-defines) a data source and its interface.
# Args:
# ... : A source name and definition (string or file), in that order.
# Returns:
# The interface name, invisibly, after creating, or re-defining, the source
# and its interface.
# SQRL Calls:
# SqrlSource(), SqrlTry().
# User:
# Exported function. User has direct access. Here, we ensure the existence
# of name and definition terms (in the form of multiple arguments, or at
# least one named argument). Additional checks are left to SqrlSource().
# Unpack any list arguments (to their first-level elements).
def <- list(...)
i <- length(def)
while (i > 0L)
{
if (identical(class(def[[i]]), class(list())))
{
j <- seq_along(def)
if ((i == 1L)
&& !is.null(names(def))
&& nzchar(names(def)[1L]))
{
def <- c(names(def)[1L], def[[1L]], def[j[j > 1L]])
} else
{
def <- c(def[j[j < i]], def[[i]], def[j[j > i]])
}
}
i <- i - 1L
}
# Abort unless we have at least a pair of terms (name, definition) or a single
# named term (name = definition).
if ((length(def) < 2L)
&& is.null(names(def)))
{
k <- parse(text = "sqrlSource(...)", keep.source = FALSE)
m <- "A name and definition are expected."
stop(simpleError(m, k[[1L]]))
}
# Pass the arguments to SqrlSource() (returns the interface name, invisibly).
s <- SqrlTry(SqrlSource(def))
# In the event of an error, throw the message.
if (s$error)
{
k <- parse(text = "sqrlSource(...)", keep.source = FALSE)
stop(simpleError(s$value, k[[1L]]))
}
# Invisibly return the new interface name.
return(invisible(s$value))
}
sqrlSources <- function(...)
{
# Returns a summary table of defined data sources.
# Args:
# ... : Argument to RODBC::odbcDataSources(), or empty (default).
# Returns:
# A data frame summarising defined data sources. There is no guarantee that
# any of these are presently available, or even that they exist.
# SQRL Calls:
# SqrlSources().
# User:
# Exported function. User has direct access. Argument checking is required.
# Ensure the argument is either omitted or takes one of the three allowed
# values (strings). Each of the strings 'all', 'user', and 'system' cause
# RODBC::odbcDataSources() to (re)import the corresponding set of local DSNs.
# Omitting the argument simply returns the existing SQRL data source
# definitions, without (re)importing DSNs.
import <- list(...)
if (length(import) == 0L)
{
import <- ""
} else if ((length(import) == 1L)
&& (identical(import[[1L]], "all")
|| identical(import[[1L]], "user")
|| identical(import[[1L]], "system")
|| identical(import[[1L]], "remove")))
{
import <- import[[1L]]
} else
{
k <- parse(text = "sqrlSources(...)", keep.source = FALSE)
m <- "Argument should be 'all', 'user', 'system', or 'remove'."
stop(simpleError(m, k[[1L]]))
}
# Pass to SqrlSources(), return the summary.
return(SqrlSources(import))
}
###################################################### PRIVATE LOAD / UNLOAD ###
.onLoad <- function(libname = "",
pkgname = "")
{
# Create data source interfaces within a public environment, on SQRL load.
# Args:
# libname : The name of the package's directory, within the R library.
# pkgname : The name of the package.
# Returns:
# Invisible NULL.
# SQRL Calls:
# SqrlDSNs(), SqrlHelp(), SQRL:Face.
# Attach a public environment, SQRL:Face, for holding data source interfaces
# where the user can see them (on the R search path). The user will be able to
# assign and modify objects within this environment (we would prefer that they
# didn't, but must allow for the possibility). It doesn't seem possible to
# attach a SQRL environment (such as srqlHaus), only a copy of one (with the
# name attribute added to it).
if (!("SQRL:Face" %in% search()))
{
a <- paste0(letters[c(1, 20, 20, 1, 3, 8)], collapse = "")
eval(call(a, new.env(parent = emptyenv()), name = "SQRL:Face"))
}
# Look for data source names (DSNs). Create an interface for each.
SqrlDSNs("all")
# Initiate an empty temp-file vector within the help environment.
SqrlHelp(clean = TRUE)
# Return invisible NULL.
return(invisible(NULL))
}
.onUnload <- function(libpath = "")
{
# Detaches the SQRL:Face environment whenever the SQRL package is unloaded.
# Args:
# libpath : The complete path to the package.
# Returns:
# Invisible NULL.
# SQRL Calls:
# SqrlHelp(), SqrlTry(), SQRL:Face.
# Remove any SQRL temp files from the R-session temp directory.
SqrlTry(SqrlHelp(clean = TRUE), warn = FALSE)
# Attempt to detach the public SQRL:Face environment, if not already done.
if ("SQRL:Face" %in% search())
{
SqrlTry(detach("SQRL:Face"), warn = FALSE)
}
# Return invisible NULL.
return(invisible(NULL))
}
######################################################################## EOF ###
|
cff38d3e13446b77d8f30ebf51357c955811ca89
|
3f9db7481425c63a1fd9078c2583d096287df74f
|
/man/relabel_tree.Rd
|
13509f2c0c7a6d67f1545008dcee6eb9a8ce5c8e
|
[
"MIT"
] |
permissive
|
ethanmoyer/ICCE
|
2f8442a1afc3b66c0bb9c0bb8958c2bf5f3d0f02
|
0f23dc13b51e35b1a387f42a2e2ddc984ee991f9
|
refs/heads/master
| 2022-12-06T14:44:16.564361
| 2020-08-20T01:23:11
| 2020-08-20T01:23:11
| 278,681,090
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 511
|
rd
|
relabel_tree.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/editTree.R
\name{relabel_tree}
\alias{relabel_tree}
\title{Relabel node on tree}
\usage{
relabel_tree(icceTree, n_old, n_new)
}
\arguments{
\item{icceTree}{icceTree data structure}
\item{n_old}{node number}
\item{n_new}{node number}
}
\value{
relabeled tree with node label n_old with node label
n_new
}
\description{
Relabel node label n_old with node label n_new on tree.
}
\examples{
icceTree <- relabel_tree(icceTree, 7, 15)
}
|
09a4f08ceb32df2e1ff04f139a4752568a1aa303
|
0c131a3bee0e8659589add196303654f154e266b
|
/conf/install-reed.R
|
e3a06a94cd8a6af40cd86aaeb3a4d638a77bd37c
|
[
"MIT"
] |
permissive
|
mccahill/docker_rstudio_ibiem
|
7052b205339b619d8fd38b8a2cd91c7b0e31c5a6
|
e4d693fec6fea546ec36c975bcbd43ca45a031c8
|
refs/heads/master
| 2021-09-14T04:29:50.160430
| 2018-05-08T14:57:01
| 2018-05-08T14:57:01
| 106,874,026
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 258
|
r
|
install-reed.R
|
r <- getOption("repos")
r["CRAN"] <- "http://cran.r-project.org"
options(repos=r)
devtools::install_github("ismayc/reedoilabs")
devtools::install_github("ismayc/reedtemplates")
utils::install.packages("tufte")
devtools::install_github("andrewpbray/oilabs")
|
eb4ae0833e2780c16b2dda24b4aae74ee9a467f7
|
91ff77a02ca88dd9bb173961f61aa34229472a13
|
/prep/labels.R
|
a15f8758cde76f4dff9e11880cc05c3966899789
|
[] |
no_license
|
krishnan-viswanathan/summarizeNHTS
|
996b6cd447fa5c3d3a7a7b98aaf4ba96082a550e
|
afaea3c3168ddd1d82fb808e7d2298690bedb419
|
refs/heads/master
| 2021-01-15T16:29:37.402304
| 2017-07-26T18:58:16
| 2017-07-26T18:58:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 405
|
r
|
labels.R
|
# See prep/variables.R to get variable lists
file_labels_2001 <- file.choose()
file_labels_2009 <- file.choose()
labels_2001 <- fread(file_labels_2001)
labels_2009 <- fread(file_labels_2009)
nhts_2001 <- list(
labels = labels_2001,
variables = variables_2001
)
nhts_2009 <- list(
labels = labels_2009,
variables = variables_2009
)
#devtools::use_data(nhts_2001, nhts_2009, overwrite = TRUE)
|
4a3d440b500d754a6968e90955b08df937a9db29
|
b5ba5c578810105c9148fecadc61f124ae68118c
|
/man/lg.Rd
|
827df2fa8ad1faf49f87a84fde09be4a1f97ba1f
|
[] |
no_license
|
dangulod/ECTools
|
cce57dfe0189ee324922d4d014cb7a72bd97817d
|
a927092249a92ced28c6c50fe7b26588049a07d0
|
refs/heads/master
| 2021-01-25T10:51:04.021720
| 2018-05-16T10:31:25
| 2018-05-16T10:31:25
| 93,886,888
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 290
|
rd
|
lg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/maxcorlag.R
\name{lg}
\alias{lg}
\title{Modified lag function}
\usage{
lg(x = x, lag = lag)
}
\arguments{
\item{x}{vector}
\item{lag}{if possitive retard, negative delay}
}
\description{
Modified lag function
}
|
a4d1a2a06fc7154e9d5955046a593113d25f1a44
|
efa677f6569ccaefaa7dab5965758c9d5c14bc36
|
/lpa.mi.src/man/extract_class_proportions.Rd
|
a75349562e5e557724b8f61078a3f6df30ca2d1d
|
[] |
no_license
|
marcus-waldman/lpa-mi-src
|
2704f45f894184a81c0f934bf1ea9992e7b23c50
|
eed7d99fbc2f3a0ce46a0e4f624c2ad40ecc3502
|
refs/heads/master
| 2021-06-04T03:54:58.475339
| 2020-01-02T01:20:34
| 2020-01-02T01:20:34
| 140,605,108
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 699
|
rd
|
extract_class_proportions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extract_class_proportions.R
\name{extract_class_proportions}
\alias{extract_class_proportions}
\title{Extract class proportions from Bayesian fitted LC model in Mplus .out file.}
\usage{
extract_class_proportions(file, path = getwd())
}
\arguments{
\item{file}{(character) The filename of the Mplus .out file where the starting values are located}
\item{path}{(character) The path where the file is located}
}
\value{
(character) vector with Model sytax at starting values from the .out file.
}
\description{
Extract class proportions from Mplus .out file.
}
\examples{
extract_class_proportions(file = "example.out")
}
|
69da7e4f39906b15ff5c99eb34c80142cdc8551b
|
2a0d3a8812926e947c8b91ee8b49951b29dc5198
|
/scripts/checkplots_for_parallel_amarel/raref_for_test_3.R
|
7c3185c81834c70683ad4e5b0e5aa75a66020985
|
[] |
no_license
|
dushoff/diversity_metrics
|
27b6b883c816ba2af384a0458c73f8f7bb04b1ba
|
8f5f4ac07e56281511788be1d471aa5c7e8c93e1
|
refs/heads/master
| 2021-10-19T15:11:01.044247
| 2020-04-07T12:46:33
| 2020-04-07T12:46:33
| 45,498,849
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 871
|
r
|
raref_for_test_3.R
|
# load libraries
library(data.table)
library(tidyverse)
library(iNEXT)
library(furrr)
library(tictoc)
# we have some kind of results to read in
tic()
csamples<-fread("data/new_samples_for_rarefaction_2.csv")
logit<-function(x){log(x/(1-x))}
invlogit<-function(x)(exp(x)/(1+exp(x)))
clev<-invlogit(seq(0.5, 5, 0.25))[
4
]
toc()
print("read")
plan(strategy=multiprocess, workers=24)
csamples<-csamples %>% mutate(rowind=1:nrow(csamples))
tic()
one_level<-future_map_dfr(1:nrow(csamples), function(rown){
data.frame(estimateD(
as.numeric(csamples[rown, 1:200])
, base = "coverage"
, level = clev
, conf = 0.95)
%>% mutate(ell=1-order)
, rowind=rown
)
})
fwrite(one_level, file=paste0("data/coverage_rarefaction_at_",clev, "_2.csv"))
print(paste0("wrote", clev))
toc()
|
8aaf9ffa92c3aade76b9df2bf72aa5a8fc557510
|
73552179a08604504e307cede5e12eba217eb8ad
|
/Weekly.R
|
4d2ebe49a6c6a16752b283bb95a52ac4d9a5b0d6
|
[] |
no_license
|
nikhilraj0025/Weekly-percentage-returns-for-the-S-P-500-stock-index-between-1990-and-2010.
|
94707f21bd9124c6c7a55af26a5a437dd8424842
|
22d93377b8b8439e942c49401a3ada1755d2d95e
|
refs/heads/master
| 2020-06-06T12:37:56.030453
| 2019-06-19T13:52:57
| 2019-06-19T13:52:57
| 192,742,045
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,550
|
r
|
Weekly.R
|
library(ISLR)
data("Weekly")
View(Weekly)
summary(Weekly)
?Weekly
dim(Weekly)
Weekly1<-Weekly
plot(Weekly[,-9])#########correlation for x#######################################
attach(Weekly1)
plot(Volume)##########Volume is increasing wrt time#################################
library(dplyr)
Weekly1<-mutate(Weekly1,Direction=ifelse(Direction=="Up",1,0))
Direction<-factor(Direction)
#########################sampling################################################
W_sam<-sample(2,nrow(Weekly1),replace=TRUE,prob=c(.75,.25))
W_Train<-Weekly1[W_sam==1,]
W_Test<-Weekly1[W_sam==2,]
#################################Model Building##################################
###########on Train and Test on Test data##############################
W_mod1<-glm(Direction~.,data=W_Train,family=binomial)
summary(W_mod1)#############pr values are very high so we cant say that there is a real association between x&y
W_pred1<-predict(W_mod1,W_Test,type="response")
W_df1<-data.frame(W_pred1,W_Test$Direction)
W_df1<-mutate(W_df1,W_pred1=ifelse(W_pred1>0.5,1,0))
colnames(W_df1)<-c("predict","actual")
t1<-table(W_df1$predict,W_df1$actual)
t1
acc1<-sum(diag(t1))/sum(t1)
acc1
###############on complete data set##############################################
W_mod2<-glm(Direction~.,data=Weekly1,family=binomial)
summary(W_mod2)#############pr values are very high so we cant say that there is a real association between x&y
W_pred2<-predict(W_mod2,Weekly1,type="response")
W_df2<-data.frame(W_pred2,Weekly1$Direction)
W_df2<-mutate(W_df2,W_pred2=ifelse(W_pred2>0.5,1,0))
colnames(W_df2)<-c("predict","actual")
t2<-table(W_df2$predict,W_df2$actual)
t2
acc2<-sum(diag(t2))/sum(t2)
acc2
###Now fit the logistic regression model using a training data period
#from 1990 to 2008, with Lag2 as the only predictor. Compute the
#confusion matrix and the overall fraction of correct predictions
#for the held out data (that is, the data from 2009 and 2010).
Train_new<-filter(Weekly1,Year<=2008)
View(Train_new)
Test_new<-filter(Weekly1,Year>=2009)
View(Test_new)
W_mod3<-glm(Direction~Lag2,data=Train_new,family=binomial)
W_pred3<-predict(W_mod3,Test_new,type="response")
W_df3<-data.frame(W_pred3,Test_new$Direction)
W_df3<-mutate(W_df3,W_pred3=ifelse(W_pred3>0.5,1,0))
colnames(W_df3)<-c("predict","actual")
t3<-table(W_df3$predict,W_df3$actual)
t3
acc3<-sum(diag(t3))/sum(t3)
acc3
#####################################################################################
data("Auto")
View(Auto)
dim(Auto)
##########################scatter plot############################
plot(Auto)
#################computation of correlation matrix using function########
Auto1<-Auto[,-9]
cor(Auto1)
##########sampling######################################
W_Au<-sample(2,nrow(Auto1),replace=TRUE,prob=c(.75,.25))
A_Train<-Auto1[W_Au==1,]
A_Test<-Auto1[W_Au==2,]
########model building on linear regression############################
aumodel_1<-lm(mpg~.,data=A_Train)
summary(aumodel_1)
A_pred1<-predict(aumodel_1,A_Test)
df_A<-data.frame(A_pred1,A_Test$mpg)
df_A1<-mutate(df_A,error=(A_pred1-A_Test$mpg)^2)
RMSE_A<-sqrt(mean(df_A1$error))
RMSE_A
#####on complete data#####################################################
aumodel_2<-lm(mpg~.,data=A_Train)
par(mfrow=c(2,2))
plot(aumodel_2)
summary(aumodel_2)
A_pred2<-predict(aumodel_1,Auto1)
df_A2<-data.frame(A_pred2,Auto1$mpg)
df_A2<-mutate(df_A2,error=(A_pred2-Auto1$mpg)^2)
RMSE_2<-sqrt(mean(df_A2$error))
RMSE_2
|
e17da90a081505be75bdd4fe92f4b18aa21af476
|
1f439c7cc390d6b1238990b5794f2156edc34929
|
/exemplo_dataframe.R
|
dc72b08c55e7686cb926c63117907c5237f2e6be
|
[] |
no_license
|
flavioti/aula_r
|
4be9b872b7ffb0d77b126877362e71104a94f165
|
9c5d365fbc2c55e64ad52a0ed5d840e8d7b02e80
|
refs/heads/master
| 2020-03-19T16:40:32.723949
| 2018-09-04T01:02:39
| 2018-09-04T01:02:39
| 136,724,646
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 360
|
r
|
exemplo_dataframe.R
|
# Exemplo de uso de dataframe
nome = c("Edmar", "Pedro")
idade = c(30, 20)
salario = c(1000, 2000)
cadastro = data.frame(nome,idade,salario)
filhos = c(1, 2)
# Inclusão de coluna no dataframe
cadastro$filhos = filhos
aumento = c((salario * 0.06) + salario)
cadastro$aumento = aumento
cadastro
write.csv2(cadastro, file = "teste.csv")
# setwd("~/aula_r")
|
a21f7156225b0354140cf19a6fcb4fe699f6c34e
|
ab5845d7d934a087ef2d708a1d2776129999015a
|
/R/my_geoDA.R
|
477c532ee622193662d6f801eca566f65b2aeddc
|
[] |
no_license
|
gastonstat/DiscriMiner
|
6b9dc6bbc9b4a6a301cf4adbb87353fad716570f
|
61cc95e58d801ae6adb8446dbc9cf79ad473277b
|
refs/heads/master
| 2021-06-01T13:55:55.807215
| 2021-02-26T15:22:50
| 2021-02-26T15:22:50
| 5,786,053
| 3
| 4
| null | 2017-02-28T17:45:21
| 2012-09-12T21:15:43
|
R
|
UTF-8
|
R
| false
| false
| 1,574
|
r
|
my_geoDA.R
|
my_geoDA <-
function(X, y, learn, test)
{
# Perform a geometric predictive discriminant analysis
# X: matrix or data.frame with explanatory variables
# y: vector or factor with group membership
# learn: vector of learning observations
# test: vector of testing observations
# how many observations
n = nrow(X[learn,])
ntest = length(test)
# how many groups
ng = nlevels(y[learn])
glevs = levels(y[learn])
# group means
GM = my_groupMeans(X[learn,], y[learn])
# within-class covariance matrix
W = my_withinCov(X[learn,], y[learn])
# inverse of Within cov matrix
W_inv = solve(W)
# constant terms of classification functions
alphas = rep(0, ng)
# coefficients of classification functions
Betas = matrix(0, nrow(W_inv), ng)
for (k in 1:ng)
{
alphas[k] = -(1/2) * GM[k,] %*% W_inv %*% GM[k,]
Betas[,k] = t(GM[k,]) %*% W_inv
}
# Mahalanobis-Fisher Classification Rule
FDF = rbind(alphas, Betas)
rownames(FDF) = c("constant", colnames(X))
colnames(FDF) = glevs
# matrix of constant terms
A = matrix(rep(alphas,ntest), ntest, ng, byrow=TRUE)
# apply discrim functions
Disc = X[test,] %*% Betas + A
dimnames(Disc) = list(rownames(X[test,]), glevs)
# predicted class
pred = apply(Disc, 1, function(u) which(u == max(u)))
names(pred) = NULL
# assign class values
pred_class = factor(pred, levels=seq_along(glevs), labels=glevs)
# confusion matrix
conf = table(original=y[test], predicted=pred_class)
# results
res = list(FDF=FDF, conf=conf, Disc=Disc, pred_class=pred_class)
}
|
441393e695828ed9c64bc7f5541355bf12822f90
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed_and_cleaned/10233_0/rinput.R
|
d6d1c14341f6982c10024a286a5dfc7ab8a93da0
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("10233_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10233_0_unrooted.txt")
|
9d5deef40d98c191f4a645b202c10d4b27938251
|
e5ebddef173d10c4722c68f0ac090e5ecc626b8b
|
/IL2/bin/PLSR/plsr.R
|
6fa07e29848ddfb4fb702d95eb4fbf5983b56884
|
[] |
no_license
|
pontikos/PhD_Projects
|
1179d8f84c1d7a5e3c07943e61699eb3d91316ad
|
fe5cf169d4624cb18bdd09281efcf16ca2a0e397
|
refs/heads/master
| 2021-05-30T09:43:11.106394
| 2016-01-27T15:14:37
| 2016-01-27T15:14:37
| 31,047,996
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,263
|
r
|
plsr.R
|
library(pls)
library(iterators)
library(flowCore)
source('~nikolas/bin/FCS/fcs.R')
source('~nikolas/Projects/IL2/bin/common.R')
# partial least squares regression
# this individual looks ok
individual <- 'CB00086S'
day <- '2012-09-18'
BASE.DIR <- '~/dunwich/Projects/IL2/CD25-CD3-CD4-CD45RA-CD56-CD8-FOXP3-PSTAT5'
individual.date <- do.call('rbind',strsplit(gsub('.RData','',list.files(file.path(BASE.DIR,'RData','pstat5-join'))),'_'))
setwd(BASE.DIR)
#load('~/dunwich/Projects/IL2/transforms.RData')
#### All lymphocytes
print(f <- file.path(BASE.DIR,'RData','pstat5-join', sprintf('%s_%s.RData',individual,day)))
print(load(f))
fcs.data <- baseline.relative.pstat5(fcs.data)
#fcs.data <- applyTransforms(fcs.data,transforms)
print(load(file.path(BASE.DIR,'CLR','CB00086S_0U_2012-09-18.RData')))
print(dim(fcs.data <- fcs.data[as.logical(CLR[,'Single cells']),]))
MARKERS <- c('CD25','CD3','CD4','CD45RA','CD56','CD8','FOXP3')
#
fm <- as.formula(paste(sprintf('diff.PSTAT5.%d',4), paste(MARKERS, collapse='+'), sep='~'))
plsr(fm, data=as.data.frame(fcs.data),scale=FALSE)->p
Y <- p$scores[,1:2]
smoothPlot(Y, outliers=TRUE)
#g<-locator(type='l')
#g <- structure(list(x = c(-0.0103174435677124, -0.1599450850475, -0.339498254823246, -0.504088660451012, -0.548976952894949, -0.53401418874697, -0.474163132155055, -0.369423783119203, -0.264684434083352, -0.1599450850475, -0.115056792603564, -0.100094028455585), y = c(-1.62767441406014, -1.59984835807974, -1.73897863798175, -1.98941314180538, -2.2120215896486, -2.35115186955062, -2.35115186955062, -2.32332581357022, -2.2120215896486, -1.98941314180538, -1.85028286190336, -1.59984835807974)), .Names = c("x", "y"))
g <- structure(list(x = c(0.549386221184538, 0.299526136195612, 0.103689853366454, 0.0834309275565415, 0.21173745768599, 0.468350517944886, 0.542633245914567), y = c(-1.4566307230717, -1.58756893468617, -1.92800828488379, -2.16369706578983, -2.22916617159706, -1.92800828488379, -1.4566307230717)), .Names = c("x", "y"))
lines(g$x,g$y,col='lightblue')
j <- as.logical(point.in.polygon(Y[,1],Y[,2],g$x,g$y))
#g<-locator(type='l')
g <- structure(list(x = c(-0.444233868802116, -0.92602314405666, -1.55234920188757, -1.27933527924333, -0.861784574022721, -0.492412796327571), y = c(-0.163647369673267, 0.787426504230537, -0.269322244551468, -1.07949628528434, -1.1499462018698, -0.128422411380534)), .Names = c("x", "y"))
lines(g$x,g$y,col='pink')
k <- as.logical(point.in.polygon(Y[,1],Y[,2],g$x,g$y))
#g<-locator(type='l')
g <- structure(list(x = c(-0.701188148937873, -0.379995298768177, 0.439046469164547, 0.053615048960912, -0.701188148937873), y = c(0.857876420816004, 0.0829273383758669, 1.03400121227967, 1.70327541984161, 0.998776253986938)), .Names = c("x", "y"))
#lines(g$x,g$y,col='lightblue')
lines(g$x,g$y,col='purple')
l <- as.logical(point.in.polygon(Y[,1],Y[,2],g$x,g$y))
#identification of pink and blue high density clusters
print(dim(clr <- CLR[as.logical(CLR[,'Single cells']),]))
clr <- cbind(clr,lightblue=as.numeric(j))
clr <- cbind(clr,pink=as.numeric(k))
clr <- cbind(clr,purple=as.numeric(l))
pdf('~/Thesis/figures/plsr-lymphocytes.pdf',width=10,height=10)
figure.labels <- iter(paste(letters,')',sep=''))
par(mfrow=c(2,2),mai=c(.75,.75,.5,.1))
for (i in 2:4) {
fm <- as.formula(paste(sprintf('diff.PSTAT5.%d',i), paste(MARKERS, collapse='+'), sep='~'))
plsr(fm, data=as.data.frame(fcs.data),scale=FALSE)->p
# TODO: get the loadings and the variance explained
print(p)
Y <- p$scores[,1:2]
xlab <- 'Comp 1'
#xlab <- ''
ylab <- 'Comp 2'
#ylab <- ''
smoothPlot(Y,posteriors=clr[,c('Memory Eff','Memory Treg','Naive Eff','Naive Treg','purple','pink','lightblue')],chulls=FALSE,clusters.col=c('black','red','darkgreen','blue','purple','pink','lightblue'), outliers=TRUE,ellipse.lwd=3,ylab=ylab,xlab=xlab)
xlab <- paste(round(p$coefficients[,,1],2),'*',rownames(p$coefficients),collapse=' + ',sep='')
ylab <- paste(round(p$coefficients[,,2],2),'*',rownames(p$coefficients),collapse=' + ',sep='')
print(p$loadings)
print(p$coefficients[,,1])
print(p$coefficients[,,2])
#plot(NULL,xlim=range(p$loadings[,1]),ylim=range(p$loadings[,2]))
#for (m in MARKERS) {
#segments(0,0,p$loadings[m,1],p$loadings[m,2])
#text(p$loadings[m,1],p$loadings[m,2], label=m)
#}
#segments(0,0,p$loadings[m,1],p$loadings[m,2])
#segments(0,0,p$Yloadings[,1],p$Yloadings[,2])
#text(p$Yloadings[,1],p$Yloadings[,2],label=m)
title(paste(nextElem(figure.labels),DOSES[[i]], sep='\t'), adj=0)
}
cell.types <- c('Memory Eff','Memory Treg','Naive Eff','Naive Treg')
ylim <- range(sapply(c(cell.types,'purple','pink','lightblue'), function(cell.type) colMedians(fcs.data[as.logical(clr[,cell.type]),paste('diff.PSTAT5',1:4,sep='.')]) ))
plot(NULL, xlim=c(0,3), ylim=ylim, xaxt='n', xlab='dose', ylab='pSTAT5 MFI')
title(nextElem(figure.labels), adj=0)
axis(1, at=0:3, labels=DOSES)
i <- 1
cols <- c('black','red','darkgreen','blue','purple','pink','lightblue')
for (cell.type in c(cell.types,'purple','pink','lightblue')) {
mfi <- fcs.data[which(as.logical(clr[,cell.type])),paste('diff.PSTAT5',1:4,sep='.')]
lines(0:3, colMedians( mfi ), col=cols[[i]], lwd=3)
#polygon(
#c(0:3,3:0),
#c( colQuantile( mfi, prob=.75 ),
#rev(colQuantile( mfi, prob=.25)) ),
#col=do.call('rgb', c(as.list(t(col2rgb(i)/255)),alpha=.1)),
#border=i,
#lwd=.5)
i <- i+1
}
#a <- xx[,paste('diff.PSTAT5',1:4,sep='.')]
#lines(0:3, colMedians( a ), col='purple', lwd=2)
#polygon(
#c(0:3,3:0),
#c( colQuantile( a, prob=.75 ),
#rev(colQuantile( a, prob=.25)) ),
#col=do.call('rgb', c(as.list(t(col2rgb('purple')/255)),alpha=.1)),
#border='purple',
#lwd=.5)
dev.off()
#multivariate view
plotClusters(fcs.data[,MARKERS],posteriors=clr[,c('Memory Eff','Memory Treg','Naive Eff','Naive Treg','pink','lightblue')],chulls=FALSE,clusters.col=c('black','red','darkgreen','blue','pink','lightblue'), outliers=TRUE,ellipse.lwd=3)
#pink and purple do not respond so do not include them
pdf('~/Thesis/figures/plsr-lymphocytes-clusters.pdf',width=10,height=10)
#univariate view
par(mfrow=c(3,3))
for (marker in c(MARKERS,'diff.PSTAT5.3','diff.PSTAT5.4'))
smoothPlot1D(fcs.data[,marker],posteriors=clr[,c('Memory Eff','Memory Treg','Naive Eff','Naive Treg','lightblue')],chulls=FALSE,clusters.col=c('black','red','darkgreen','blue','lightblue'), outliers=TRUE,ellipse.lwd=3, main=marker)
dev.off()
#### Everything else
print(f <- file.path(BASE.DIR,'RData','pstat5-join', sprintf('%s_%s.RData',individual,day)))
print(load(f))
fcs.data <- baseline.relative.pstat5(fcs.data)
fcs.data <- transform.scatter(applyTransforms(fcs.data,transforms))
print(load(file.path(BASE.DIR,'CLR','CB00086S_0U_2012-09-18.RData')))
MARKERS <- c('FSCA','SSCA','CD25','CD3','CD4','CD45RA','CD56','CD8','FOXP3')
par(mfrow=c(1,1))
fm <- as.formula(paste(sprintf('diff.PSTAT5.%d',2), paste(MARKERS, collapse='+'), sep='~'))
plsr(fm, data=as.data.frame(fcs.data),scale=FALSE)->p
Y <- p$scores[,1:2]
smoothPlot(Y,outliers=T)
#g<-locator(type='l')
#dput(g)
g <- structure(list(x = c(-3.42781386640277, -2.83331075199373, -2.23880763758468, -2.29825794902559, -3.04138684203689, -4.02231698081181, -4.73572071810266, -4.94379680814583, -4.46819431661859, -3.54671448928458, -3.2197377763596), y = c(0.876407526174889, 0.876407526174889, 0.614897660608943, 0.291856061968656, 0.0303461964027103, 0.153409662551391, 0.322621928505827, 0.584131794071773, 0.799492859831964, 0.922556325980644, 0.861024592906304)), .Names = c("x", "y"))
lines(g$x,g$y,col='purple')
j <- as.logical(point.in.polygon(Y[,1],Y[,2],g$x,g$y))
#g<-locator(type='l')
#dput(g)
g <- structure(list(x = c(9.59180433915526, 8.46224842177809, 7.68939437304633, 7.89747046308949, 8.61087420038035, 9.35400309339165, 9.79988042919843, 10.3349332321666, 10.097131986403, 9.59180433915526), y = c(-0.000419670134459622, 0.445685394654507, 0.799492859831964, 1.07638565866649, 0.953322192517815, 0.707195260220453, 0.430302461385922, 0.153409662551391, -0.0773343364773852, -0.0158026034030448)), .Names = c("x", "y"))
lines(g$x,g$y,col='pink')
k <- as.logical(point.in.polygon(Y[,1],Y[,2],g$x,g$y))
#g<-locator(type='l')
#dput(g)
g <- structure(list(x = c(2.99281976921489, 4.62770333383976, 5.57890831689423, 6.35176236562598, 7.18406672579864, 7.30296734868045, 6.64901392283051, 5.81670956265785, 5.25193160396925, 4.5385278666784, 3.854849285108, 3.08199523637625, 2.33886634336495, 1.684912917515, 1.0012343359446, 1.0012343359446, 1.80381354039681, 2.30914118764449, 2.96309461349444), y = c(0.799492859831964, 0.99947099232357, 1.01485392559215, 0.968705125786399, 0.830258726369134, 0.368770728311582, 0.0611120629398807, -0.185014869357481, -0.384993001849086, -0.538822334534937, -0.508056467997767, -0.569588201072107, -0.554205267803522, -0.461907668192012, -0.384993001849086, -0.0465684699402148, 0.384153661580167, 0.676429393683283, 0.799492859831964)), .Names = c("x", "y"))
lines(g$x,g$y,col='lightblue')
l <- as.logical(point.in.polygon(Y[,1],Y[,2],g$x,g$y))
#g<-locator(type='l')
#dput(g)
g <- structure(list(x = c(-1.37677812169157, -0.782275007282528, -0.128321581432581, -0.068871269991677, -0.722824695841624, -1.43622843313247, -1.88210576893926, -1.94155608038016, -1.91183092465971, -1.46595358885293, -1.28760265453021), y = c(-0.323461268774746, -0.308078335506161, -0.523439401266352, -1.20028846508409, -1.38488366430712, -1.36950073103853, -1.01569326586107, -0.815715133369468, -0.492673534729182, -0.323461268774746, -0.338844202043331)), .Names = c("x", "y"))
lines(g$x,g$y,col='orange')
m <- as.logical(point.in.polygon(Y[,1],Y[,2],g$x,g$y))
par(mfrow=c(1,1))
fm <- as.formula(paste(sprintf('diff.PSTAT5.%d',4), paste(MARKERS, collapse='+'), sep='~'))
plsr(fm, data=as.data.frame(fcs.data),scale=FALSE)->p
Y <- p$scores[,1:2]
smoothPlot(Y,outliers=T)
#g<-locator(type='l')
g <- structure(list(x = c(3.90086813263438, 3.51311752200631, 3.34985410700502, 3.39066996075534, 3.63556508325728, 4.10494740138599, 4.65596142701535, 4.737593134516, 4.61514557326503, 4.43147423138858, 4.2069870357618, 3.92127605950954, 3.8396443520089, 3.6763809370076, 3.6763809370076), y = c(6.32942929401107, 5.61260526180101, 4.6316881650925, 3.80168139095453, 3.57531590709872, 4.06577445545298, 4.93350881023358, 5.72578800372891, 6.36715687465371, 6.63124993915215, 6.59352235850952, 6.32942929401107, 6.17851897144054, 5.87669832629946, 5.87669832629946)), .Names = c("x", "y"))
lines(g$x,g$y,col='yellow')
n <- as.logical(point.in.polygon(Y[,1],Y[,2],g$x,g$y))
#identification of pink and blue high density clusters
clr <- CLR
clr <- cbind(clr,purple=as.numeric(j))
clr <- cbind(clr,pink=as.numeric(k))
clr <- cbind(clr,lightblue=as.numeric(l))
clr <- cbind(clr,orange=as.numeric(m))
clr <- cbind(clr,yellow=as.numeric(n))
pdf('~/Thesis/figures/plsr-nonlymphocytes.pdf',width=10,height=10)
figure.labels <- iter(paste(letters,')',sep=''))
par(mfrow=c(2,2),mai=c(.75,.75,.5,.1))
for (i in 2:4) {
fm <- as.formula(paste(sprintf('diff.PSTAT5.%d',i), paste(MARKERS, collapse='+'), sep='~'))
plsr(fm, data=as.data.frame(fcs.data),scale=FALSE)->p
Y <- p$scores[,1:2]
smoothPlot(Y,posteriors=clr[,c('Lymphocytes','purple','pink','lightblue','orange','yellow')],clusters.col=c('black','purple','pink','lightblue','orange','yellow'),chulls=FALSE,outliers=TRUE,ellipse.lwd=3)
#smoothPlot(Y,classification=clr[,'Lymphocytes'],chulls=TRUE,outliers=TRUE,ellipse.lwd=3)
title(paste(nextElem(figure.labels),DOSES[[i]], sep='\t'), adj=0)
}
#dose response
ylim <- range(sapply(c('Lymphocytes','purple','pink','lightblue', 'orange', 'yellow'), function(cell.type) colMedians(fcs.data[as.logical(clr[,cell.type]),paste('diff.PSTAT5',1:4,sep='.')]) ))
plot(NULL, xlim=c(0,3), ylim=ylim, xaxt='n', xlab='dose', ylab='pSTAT5 MFI')
title(nextElem(figure.labels), adj=0)
axis(1, at=0:3, labels=DOSES)
i <- 1
cols <- c('black','purple','pink','lightblue','orange','yellow')
for (cell.type in c('Lymphocytes','purple','pink','lightblue','orange','yellow')) {
mfi <- fcs.data[which(as.logical(clr[,cell.type])),paste('diff.PSTAT5',1:4,sep='.')]
lines(0:3, colMedians( mfi ), col=cols[[i]], lwd=3)
i <- i+1
}
dev.off()
#MARKERS <- c('FSCA','SSCA','CD25','CD3','CD4','CD45RA','CD56','CD8','FOXP3')
#univariate view
#yellow and pink are interesting
pdf('~/Thesis/figures/plsr-nonlymphocytes-clusters.pdf',width=10,height=10)
#univariate view
par(mfrow=c(3,3))
for (marker in MARKERS)
#smoothPlot1D(fcs.data[,marker],posteriors=clr[,c('Lymphocytes','purple','pink','lightblue','orange','yellow')],chulls=FALSE,clusters.col=c('black','purple','pink','lightblue','orange','yellow'), outliers=TRUE,ellipse.lwd=3, main=marker)
smoothPlot1D(fcs.data[,marker],posteriors=clr[,c('Lymphocytes','pink','yellow')],chulls=FALSE,clusters.col=c('black','pink','yellow'), outliers=TRUE,ellipse.lwd=3, main=marker)
dev.off()
plotClusters(fcs.data[,c('SSCA','FSCA')],posteriors=clr[,c('Lymphocytes','purple','pink','lightblue','orange')],chulls=FALSE,clusters.col=c('black','purple','pink','lightblue','orange'), outliers=TRUE,ellipse.lwd=3)
|
8e90611b76811ac7cd994fd767f5dd015838fe8a
|
46f795095e1601f46e5c7aee941da98a8062f722
|
/man/geo.getTopTracks.Rd
|
e59d3b0f7e422834d870431b5b4c3fb640bd13ff
|
[] |
no_license
|
cran/RLastFM
|
e76ab8d9eee4cb337682835cbb7ce4f761934aca
|
8d6737c25922eb92631f6e67c623de0bf8266845
|
refs/heads/master
| 2021-01-21T09:59:32.612105
| 2009-08-24T00:00:00
| 2009-08-24T00:00:00
| 17,717,948
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 981
|
rd
|
geo.getTopTracks.Rd
|
\name{geo.getTopTracks}
\alias{geo.getTopTracks}
\title{API call to geo.getTopTracks} \description{API call to geo.getTopTracks}
\usage{
geo.getTopTracks(country, key = lastkey, parse = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{country}{Country name}
\item{key}{API key, defaults to global variable lastkey}
\item{parse}{Parse the returned XML into a list?}
}
\value{
If parse = TRUE,
\item{track}{Track name}
\item{playcount}{Reported playcount}
\item{artist}{Artist name}
\item{artistmbid}{Artist mbid}
\item{rank}{rank}
If parse = FALSE,
a object of type "XMLInternalDocument" and "XMLAbstractDocument"
}
\references{http://www.last.fm/api/show?service=298}
\author{Greg Hirson <ghirson@ucdavis.edu>}
\note{API is sensitive to spelling.}
\examples{
res = geo.getTopTracks("Albania")
do.call("cbind", res)[1,1:3]
# track playcount artist
# "Sex on Fire" "6" "Kings of Leon"
}
|
4519612c12c4088794d856d9cf4ca85c76bbcad5
|
22d53837167bb6fe1a6a962f9db9a066dfeddece
|
/R/benchmarking.R
|
d5dca5db3c860a868918573cc45521cb1ef660bd
|
[] |
no_license
|
kliegr/QCBA
|
afc2cdc9d4470e6b152cb6721428e2df7bc8db17
|
6cd46329a0f3f0c830c6e2ae9f01a916b983843e
|
refs/heads/master
| 2023-08-17T13:26:56.435887
| 2023-08-11T15:16:55
| 2023-08-11T15:16:55
| 91,793,994
| 10
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,565
|
r
|
benchmarking.R
|
library(arulesCBA)
library(qCBA)
set.seed(1)
#' @title Auto learn and evaluate QCBA postprocessing on multiple rule learners
#'
#' @description Learn multiple rule models using other rule induction algorithms and apply
#' QCBA to postprocess them.
#' @export
#' @param train data frame with training data
#' @param test data frame with testing data before postprocessing
#' @param classAtt the name of the class attribute
#' @param train_disc prediscretized training data
#' @param test_disc prediscretized tet data
#' @param cutPoints specification of cutpoints applied on the data
#' (ignored if train_disc is null)
#' @param algs vector with names of baseline rule learning algorithms.
#' Names must correspond to function names from the \pkg{arulesCBA} library
#' @param iterations number of excecutions over base learner, whihc is used for
#' obtaining a more precise estimate of build time
#' @param rounding_places statistics in the resulting dataframe will be rounded to
#' specified number of decimal places
#' @param return_models boolean indicating if also learnt rule lists should be
#' included in model output
#' @param debug_prints print debug information such as rule lists
#' @param ... parameters for base learners, the name of the argument is the base
#' learner (one of algs values) and value is a list of parameters to pass.
#' To specify paramaters for QCBA pass argument "QCBA".
#' @return Outputs a dataframe with evaluation metrics statistics.
#' Included metrics:
#' **accuracy**: percentage of correct predictions for the test set
#' **buildtime**: learning time for inference of the model. In case of QCBA, this
#' excludes time for the induction of the base learner
#' **rulecount**: number of rules in the rule list. Note that for QCBA the
#' count includes the default rule (rule with empty antecedent), while for
#' base learners this rule may not be included (depending on arules output)
#' **modelsize**: total number of conditions in the antecedents of all rules in
#' the model
#'
#' @examples
#'
benchmarkQCBA <- function(train,test, classAtt,train_disc=NULL, test_disc=NULL, cutPoints=NULL,
algs = c("CBA","CMAR","CPAR","PRM","FOIL2"), iterations=2, rounding_places=3, return_models = FALSE, debug_prints = FALSE, ...
){
algcombinations<-c(algs,paste0(algs,"_QCBA"))
df_stats <- data.frame(matrix(rep(0,length(algs)*2), ncol = length(algcombinations), nrow = 4), row.names = c("accuracy","rulecount","modelsize", "buildtime"))
returnList=list()
colnames(df_stats)<-algcombinations
if (is.null(train_disc))
{
message("Discretized data not passed (train_disc is NULL), performing
discretization and ignoring passed value of cutPoints and test_dic")
discrModel <- discrNumeric(trainFold, classAtt)
train_disc <- as.data.frame(lapply(discrModel$Disc.data, as.factor))
cutPoints <- discrModel$cutp
test_disc <- applyCuts(test, cutPoints, infinite_bounds=TRUE, labels=TRUE)
}
else{
message("Using passed prediscretized data")
if (sum(test_disc[[classAtt]] == test[[classAtt]]) != nrow(test_disc) | nrow(test_disc) != nrow(test))
{
Exception("Values of class attribute in test_disc and test must be the same and both must have the same length")
}
}
for (alg in algs)
{
algQCBA<-paste0(alg,"_QCBA")
message(paste0("** STARTED learning model with ", alg, " **"))
f <- match.fun(alg)
start.time <- Sys.time()
form <-as.formula(paste(classAtt, " ~ .",collapse = " "))
params <- list(formula = form, data = train_disc)
z <- list(...)
if (alg %in% names(z))
{
params <- append(params,z[[alg]])
}
for (i in 1:iterations) arulesBaseModel <- do.call(f, params)
averageExecTime<-as.numeric((Sys.time()- start.time)/iterations,units="secs")
message(paste0("** FINISHED learning model with ", alg, " **"))
#Important: use predict function from arules library
yhat <- predict(arulesBaseModel, test_disc) # Use rule list for prediction
if (return_models) returnList[[alg]] <- list(arulesBaseModel$rules)
baseModel_arc <- arulesCBA2arcCBAModel(arulesBaseModel, cutPoints, train, classAtt)
# Compute model statistics
df_stats["accuracy",alg] <- CBARuleModelAccuracy(yhat, test_disc[[classAtt]])
df_stats["buildtime",alg] <- averageExecTime
df_stats["rulecount",alg] <- length(arulesBaseModel$rules)
df_stats["modelsize",alg] <- sum(arulesBaseModel$rules@lhs@data)
message(paste0("** STARTED QCBA POSTPROCESSING OF ", alg, " **"))
params<-list(cbaRuleModel=baseModel_arc,datadf=train)
if ("QCBA" %in% names(z))
{
params <- append(params,z[["QCBA"]])
}
start.time <- Sys.time()
for (i in 1:iterations) qCBAmodel <- do.call(qcba,params)
if (alg %in% names(z))
{
params <- append(params,z[[alg]])
}
averageExecTime<-as.numeric((Sys.time() - start.time)/iterations,units="secs")
# wrapping in list is necessary when dataframe is added to a list
if (return_models) returnList[[ algQCBA ]] <- list(qCBAmodel@rules)
if (debug_prints) print(qCBAmodel@rules) #Rule list after postprocessing
yhat <- predict(qCBAmodel, test) # Use postprocessed rule list for prediction
# Compute model statistics
df_stats["accuracy",algQCBA] <- CBARuleModelAccuracy(yhat, test[[classAtt]])
df_stats["buildtime",algQCBA] <-averageExecTime
df_stats["rulecount",algQCBA] <-qCBAmodel@ruleCount
df_stats["modelsize",algQCBA] <- sum(qCBAmodel@rules$condition_count)
message(paste0("** FINISHED POSTPROCESSING ", alg, " model with QCBA **"))
}
rounded_df <- as.data.frame(lapply(df_stats, function(x) round(x, digits = rounding_places)))
rownames(rounded_df)=rownames(df_stats)
if (return_models)
{
returnList[["stats"]]=df_stats
return(returnList)
}
else
{
return(rounded_df)
}
}
# EXAMPLE 1 benchmarking only
#Define input dataset and target variable
df_all <-datasets::iris
classAtt <- "Species"
# Create train/test partition using built-in R functions
tot_rows<-nrow(df_all)
train_proportion<-2/3
df_all <- df_all[sample(tot_rows),]
trainFold <- df_all[1:(train_proportion*tot_rows),]
testFold <- df_all[(1+train_proportion*tot_rows):tot_rows,]
# learn with default metaparameter values
stats<-benchmarkQCBA(trainFold,testFold,classAtt)
print(stats)
# print relative change of QCBA results over baseline algorithms
print(stats[,6:10]/stats[,0:5]-1)
# EXAMPLE 2 external discretization
# Discretize numerical predictors using built-in discretization
# This performs supervised, entropy-based discretization (Fayyad and Irani, 1993)
# of all numerical predictor variables with 3 or more distinct numerical values
discrModel <- discrNumeric(trainFold, classAtt)
train_disc <- as.data.frame(lapply(discrModel$Disc.data, as.factor))
test_disc <- applyCuts(testFold, discrModel$cutp, infinite_bounds=TRUE, labels=TRUE)
stats<-benchmarkQCBA(trainFold,testFold,classAtt,train_disc,test_disc,discrModel$cutp)
print(stats)
# EXAMPLE 3 pass custom metaparameters for base learners,
# use only CBA as a base learner, return rule lists.
output<-benchmarkQCBA(trainFold,testFold,classAtt,train_disc,test_disc,discrModel$cutp,
CBA=list("support"=0.05,"confidence"=0.5),algs = c("CPAR"),
return_models=TRUE)
message("Evaluation statistics")
print(output$stats)
message("CPAR model")
inspect(output$CPAR[[1]])
message("QCBA model")
print(output$CPAR_QCBA[[1]])
|
0e6d993c64643794b0113d4e6c96b4637ad87c47
|
0ca8a44786ec4a0dc0a54dd6da20796d62478285
|
/plot1.R
|
63238c9e1e8b510ce280a925091917c40b407d1c
|
[] |
no_license
|
gianmarino/ExData_Plotting1
|
d2720e2563f3c643191b22c278900c7d83b56a18
|
e3edf64b60a69eaffeb736530dd161928062a6c1
|
refs/heads/master
| 2021-01-18T02:20:24.518203
| 2015-05-07T21:52:40
| 2015-05-07T21:52:40
| 35,221,385
| 0
| 0
| null | 2015-05-07T13:24:05
| 2015-05-07T13:24:02
| null |
UTF-8
|
R
| false
| false
| 752
|
r
|
plot1.R
|
plot1<-function(){
## Plot1 -> plots a Global Active Power histogram
## Extraction and preparation of data. Takes from fullData and dumps it, clean, on partData
fullData<-read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?",colClasses=c(rep("character",2),rep("numeric",7)))
fullData$Date<-as.Date(fullData[,"Date"],format="%d/%m/%Y")
#fullData$Time<-strptime(fullData[,"Time"],format="%X") ## doubt
filt1<-fullData$Date=="2007-02-01"|fullData$Date=="2007-02-02"
partData<-fullData[filt1,]
## Opens device (png) and begins graphic routine. Closes device.
png("plot1.png")
hist(partData$Global_active_power,main="Global Active Power",xlab="Global Active Power (kilowatts)",col="orange")
dev.off()
}
|
13474884a19d5f27634115825923700c8c0858ac
|
403f786c7c85fa551326d1e077bc895fea26e7c9
|
/tests/testthat/resources/venv-activate.R
|
1fd36dbce6955314812dfa1ddc1934bb59eebafc
|
[
"Apache-2.0"
] |
permissive
|
rstudio/reticulate
|
81528f898d3a8938433d2d6723cedc22bab06ecb
|
083552cefe51fe61441679870349b6c757d6ab48
|
refs/heads/main
| 2023-08-22T01:41:52.850907
| 2023-08-21T16:19:42
| 2023-08-21T16:19:42
| 81,120,794
| 1,672
| 399
|
Apache-2.0
| 2023-09-13T20:35:47
| 2017-02-06T18:59:46
|
R
|
UTF-8
|
R
| false
| false
| 223
|
r
|
venv-activate.R
|
args <- commandArgs(TRUE)
venv <- args[[1]]
Sys.unsetenv("RETICULATE_PYTHON")
Sys.unsetenv("RETICULATE_PYTHON_ENV")
reticulate::use_virtualenv(venv, required = TRUE)
sys <- reticulate::import("sys")
writeLines(sys$path)
|
cb6c06cadd72dbe43aebff4f18d572fca97b8224
|
18e34fdc32f1856ea62c518e9094cabfaf1b464f
|
/R/normalize_data.R
|
23989a15821e2f9c7091dc441e94bb06dc6cdee8
|
[] |
no_license
|
cran/dematel
|
200c784f9a1c314a9b1dcaedc02ba138270adb10
|
3f2335fdc1be6cdcf7645a6426518ced0203ad5d
|
refs/heads/master
| 2023-03-07T15:41:26.311469
| 2021-02-22T10:10:05
| 2021-02-22T10:10:05
| 341,248,652
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 675
|
r
|
normalize_data.R
|
#' Normalize Data
#'
#' Normalizes matrix format data
#'
#' @param x a matrix containing the values of direct relationship decision matrix.
#' @param data_control is a pre-defined logical parameter that whether data should checked.
#'
#' @return This function returns a \code{list} of data, and normalized matrix.
#'
#' @export
#'
normalize_data <- function(x , data_control = TRUE) {
if (data_control == TRUE) {
x <- check_data(x)
}
sum_of_rows <- apply(x, 1, sum)
sum_of_columns <- apply(x, 2, sum)
maximum_of_rows_and_column <- max(sum_of_rows, sum_of_columns)
return(normalized_matrix = round(x/maximum_of_rows_and_column, 5))
}
|
c7f83abaa4713686173b1e457aa89eea6560ee9f
|
750423288021c0d0bcd0d656d09351e4f86870de
|
/analysis/2016/shiny_apps/snake_draft/heavy_lifting.R
|
f570b3adda4d3f3db172e165321ef6dc4a525e05
|
[] |
no_license
|
johnckane/fantasy-football
|
b51ae061dc221ad9e17900d1915b95c231a454ad
|
2ccfdb62f0011738172d774f9f4e2ba72936de2b
|
refs/heads/master
| 2022-12-05T09:18:40.070217
| 2020-09-03T02:28:39
| 2020-09-03T02:28:39
| 106,360,628
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,535
|
r
|
heavy_lifting.R
|
library(dplyr)
library(tidyr)
# This is PPR data
data <- read.csv("/home/john/stats_corner/2016/shiny_apps/snake_draft/FFA-CustomRankings.csv",
stringsAsFactors = FALSE,
header = TRUE)
head(data)
str(data)
# we don't need all these variables
data <- select(data, 1,2,3,4,5,8,16,19,20,21)
str(data)
data <- data %>% filter(position %in% c("QB","RB","WR","TE","DST","K"))
data$ppg <- data$points/13
data$adp <- ifelse(data$adp == "null",999,as.numeric(data$adp))
table(data$adp)
# Let's create some players who have been drafted.
# Assume top 45 picks go as indicated
drafted_n0 <- filter(data, adp < 8)
drafted_n1 <- filter(data, adp < 17)
drafted_n2 <- filter(data, adp < 32)
available_n0 <- anti_join(data,drafted_n0, by = "playerId")
available_n1 <- anti_join(data,drafted_n1, by = "playerId")
available_n2 <- anti_join(data,drafted_n2, by = "playerId")
#split by position
n3 <- rbind(
available_n0 %>%
group_by(position) %>%
arrange(adp) %>%
slice(1),
available_n1 %>%
group_by(position) %>%
arrange(adp) %>%
slice(1),
available_n2 %>%
group_by(position) %>%
arrange(adp) %>%
slice(1)
) %>%
arrange(position,adp) %>%
mutate(pct_drop = round(100*(ppg - lag(ppg))/ppg,2),
raw_drop = (ppg-lag(ppg))) %>%
select (-adp, -playerId,-playerposition,-points,-upper,-lower,-risk) %>%
group_by(position) %>%
mutate(record = row_number(),
player_team = paste0(playername," - ",team))
# get the metrics
recs <-
n3 %>%
select(position,ppg,record) %>%
spread(key = record, value = ppg) %>%
# `colnames<-`(c("POS","BA","PPG Next Pick","PPG Two Picks")) %>%
# select(-BA) %>%
inner_join(
n3 %>%
select(position,pct_drop, record) %>%
spread(key = record, value = pct_drop), #%>%
# `colnames<-`(c("POS","BA","% Drop Next Pick","% Drop Two Picks")) %>%
# select(-BA),
by = "position") %>%
inner_join(
n3 %>%
select(position,raw_drop,record) %>%
spread(key = record, value = raw_drop), # %>%
# `colnames<-`(c("POS","BA","Raw Drop Next Pick","Raw Drop Two Picks")) %>%
# select(-BA),
by = "position") %>%
inner_join(
n3 %>%
select(position,player_team,record) %>%
spread(key = record, value = player_team), #%>%
# `colnames<-`(c("POS","BA","BA - 1 Pick","BA 2 Picks")) %>%
# select(-BA),
by = "position")
recs
recs_formatted <- recs %>%
select(1,11,2,12,3,6,9) %>%
`colnames<-`(c("POS","Best Available","PPG","Best Available Next Pick","PPG","% Drop","Drop"))
recs_formatted
|
0adad1f23e5ece3f6dcef1734625540bbf00c784
|
0a2ae3dc46bf6cc0af67fec2f716954bcc3beb5b
|
/man/get_custom_palette.Rd
|
13961175b9a5518f0184a7c0cac9f408f0e33158
|
[] |
no_license
|
borstell/flagrant
|
c18800d4f39830d3d811b2d2376585c8f3492eac
|
acf37e1b7b69ddccd60809082dd945901c99b76c
|
refs/heads/master
| 2023-07-03T21:30:46.976930
| 2023-06-12T15:15:19
| 2023-06-12T15:15:19
| 263,183,662
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 629
|
rd
|
get_custom_palette.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_custom_palette.R
\name{get_custom_palette}
\alias{get_custom_palette}
\title{Get custom palette}
\usage{
get_custom_palette(country = "Sweden", n = 3)
}
\arguments{
\item{country}{The name of a country, either official English name (e.g. "Sweden"), or ISO-2/3 (e.g. "SE"/"SWE")}
\item{n}{The number of colors required}
}
\value{
A color palette
}
\description{
This function inputs a country name and a required palette length
and returns a color palette based on the country's flag with an
expanded set of colors (if needed) and colors sorted
}
|
e7630a386e77c17ccd62a12068b9268af61f83e7
|
cdd5955cec0498b5c287c5f06efd641f363fa159
|
/1/2.R
|
2de845e0ddeaad9a660fe616dd39b8ea23c57284
|
[] |
no_license
|
amirhossein-alizad/EPS-using-R
|
0aee04fcb200a2bed82731779386674a9b448da7
|
3cc4add2ee9de49de49f0c123c09fe35b02688fd
|
refs/heads/main
| 2023-03-01T13:05:22.606658
| 2021-02-08T23:04:06
| 2021-02-08T23:04:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 228
|
r
|
2.R
|
times = 7000
x<-rep(0,500)
for(i in 1:7000)
{
num = floor(runif(1, min=1, max=500))
x[num] = x[num] + 1
hist(x, main = i, plot = T,col = "green", xlab = "guest's money", ylab = "guests count")
Sys.sleep(0.05)
}
|
2000d13c2e38c78739ef62b0ffd96cfb7b9b3a5e
|
92a0b69e95169c89ec0af530ed43a05af7134d45
|
/man/Make.dependency.graph.obj.Rd
|
97308bffd088c11d9e35f85fadcd725ad1a13c9c
|
[] |
no_license
|
gelfondjal/IT2
|
55185017b1b34849ac1010ea26afb6987471e62b
|
ee05e227403913e11bf16651658319c70c509481
|
refs/heads/master
| 2021-01-10T18:46:17.062432
| 2016-01-20T17:51:29
| 2016-01-20T17:51:29
| 21,449,261
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 486
|
rd
|
Make.dependency.graph.obj.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Make_dependency_graph.R
\name{Make.dependency.graph.obj}
\alias{Make.dependency.graph.obj}
\title{Creates an graph object from a dependency object}
\usage{
Make.dependency.graph.obj(dependency.out)
}
\arguments{
\item{dependency.out}{Dependency object(s) to make graph out of}
}
\value{
graph object of project/program dependencies
}
\description{
Creates an graph object from a dependency object
}
|
7f5b19a695ab523612f36af43740387b8deb11ce
|
8c5f0222a10ce128bcf20a0f62b03b8795ee4c3d
|
/R/spautocor.r
|
9bf42a311b0eab86fbf2c1ab0640d993803c5fcd
|
[] |
no_license
|
green-striped-gecko/PopGenReport
|
15a58e5184b877b65791a14b2271487c5d979b81
|
d6b970e91d2b90476704ff95586b0b6e40892111
|
refs/heads/master
| 2023-07-10T17:01:23.072377
| 2023-06-26T23:55:04
| 2023-06-26T23:55:04
| 33,985,286
| 6
| 5
| null | 2023-06-26T23:55:06
| 2015-04-15T09:33:23
|
R
|
UTF-8
|
R
| false
| false
| 4,391
|
r
|
spautocor.r
|
#' Spatial autocorrelation following Smouse and Pekall 1999
#'
#' Global spatial autocorrelation is a multivariate approach combining all loci
#' into a single analysis. The autocorrelation coefficient r is calculated for
#' each pairwise genetic distance pairs for all specified distance classes. For
#' more information see Smouse and Peakall 1999, Peakall et a. 2003 and Smouse
#' et al. 2008.
#'
#'
#' @param gen.m a matrix of individual pairwise genetic distances. Easiest to
#' use gd_smouse or gd_kosman to create such a matrix, but in priniciple any
#' other squared distance matrix can be used. see example
#' @param eucl.m A euclidean distance matrix, based on the coordinates of
#' individuals. see example
#' @param shuffle used internally for the permutation calculation
#' @param bins number of bins for the distance classes. Currently only even
#' bins are supported.
#' @return Returns a data frame with r values and number of distances within
#' each distance class.
#' @author Bernd Gruber, Bernd.Gruber@@canberra.edu.au
#' @seealso \code{\link{popgenreport}}
#' @references Smouse PE, Peakall R. 1999. Spatial autocorrelation analysis of
#' individual multiallele and multilocus genetic structure. Heredity 82:
#' 561-573.
#'
#' Double, MC, et al. 2005. Dispersal, philopatry and infidelity: dissecting
#' local genetic structure in superb fairy-wrens (Malurus cyaneus). Evolution
#' 59, 625-635.
#'
#' Peakall, R, et al. 2003. Spatial autocorrelation analysis offers new
#' insights into gene flow in the Australian bush rat, Rattus fuscipes.
#' Evolution 57, 1182-1195.
#'
#' Smouse, PE, et al. 2008. A heterogeneity test for fine-scale genetic
#' structure. Molecular Ecology 17, 3389-3400.
#'
#' Gonzales, E, et al. 2010. The impact of landscape disturbance on spatial
#' genetic structure in the Guanacaste tree, Enterolobium
#' cyclocarpum(Fabaceae). Journal of Heredity 101, 133-143.
#'
#' Beck, N, et al. 2008. Social constraint and an absence of sex-biased
#' dispersal drive fine-scale genetic structure in white-winged choughs.
#' Molecular Ecology 17, 4346-4358.
#' @examples
#'
#' \dontrun{
#' data(bilby)
#' popgenreport(bilby, mk.spautocor=TRUE, mk.pdf=FALSE)
#' #to get a pdf output you need to have a running Latex version installed on your system.
#' #popgenreport(bilby[1:50], mk.spautocor=TRUE, mk.pdf=TRUE)
#' }
#' @export
spautocor <- function(gen.m,eucl.m, shuffle=FALSE, bins = 10)
{
gd <- gen.m
ed <- eucl.m
if (shuffle==TRUE)
{
gdd <- as.dist(gd)
gdsample <- sample(1:length(gdd), length(gdd))
gd[lower.tri(gd)] <- gdd[gdsample]
gd[upper.tri(gd)] <- gdd[gdsample]
diag(gd) <- 0
}
cdmat <- function(gd)
{
dimen <- nrow(gd)
sgd <- sum(gd, na.rm=TRUE)
cscd <- matrix(colSums(gd, na.rm=TRUE), dimen, dimen)
rscd <- matrix(rowSums(gd, na.rm=TRUE), dimen, dimen, byrow=TRUE)
cd <- 0.5*( -gd + 1/dimen*( cscd + rscd) - 1/dimen^2*(sgd ))
cd
}
cd <- cdmat(gd)
#remove upper triangel to speed things up....
ed[upper.tri(ed)] <-NA
diag(ed) <- NA
r<- NA
distance <- NA
N<- NA
steps <- signif(diff(range(ed, na.rm=TRUE))/bins,4)
for (d in 1:bins )
{
index <- which(ed<=d*steps & ed >(d-1)*steps, arr.ind=TRUE)
cx <- sum(cd[index])
cxii<-sum(diag(cd)[index[,1]])
cxjj<-sum(diag(cd)[index[,2]])
r[d] <- 2 * cx /(cxii+cxjj)
distance[d] <- steps*d
N[d] <- length(index)
}
if (shuffle==FALSE) res <- data.frame(bin = distance, N=N, r =r)
else res <- data.frame(r=r)
res
}
#b<- redpossums[1:100]
#b@other$xy <- b@other$latlong
#
#xy <- read.csv("D:\\Bernd\\Projects\\aprasia\\apfinal\\apxy.csv")
#
#aprasia@other$xy <- xy
#
#gen.m<-as.matrix(gd_smouse(cats, verbose=FALSE))
#eucl.m <- as.matrix(dist(cats@other$xy))
#reps=1000
#bins=10
#
#splist<- spautocor(gen.m, eucl.m, bins=20)
#
#
#system.time(
#bssplist <- replicate(reps, spautocor(gen.m, eucl.m,shuffle=TRUE, bins=bins))
#)
#
#bs <-matrix(unlist(bssplist), nrow=reps, ncol=bins, byrow=TRUE)
#
#bs.l <- apply(bs,2, quantile, probs=0.025, na.rm=TRUE)
#bs.u <- apply(bs,2, quantile, probs=0.975, na.rm=TRUE)
#
#
#
#matplot(cbind(splist$r,bs.u, bs.l), type="l", lty=c(1,2,2), lwd=c(2,1,1), ylab="Spatial autocorrelation r", axes=FALSE, col=c(1,3,3), xlab="distance")
#axis(2)
#axis(1, at=1:nrow(splist), labels=signif(splist$bin,3))
#axis(1, at=1:nrow(splist), labels=splist$N, line=1, tick=FALSE)
#box()
#mtext("N=",1,line=2, at=0)
#mtext("Bins",1,line=1, at=0)
#
|
0fd1cf9d5142d999a3acdd5508b5f929c09af3b9
|
a1e3f742d80a225e9a2a35e8e88b3054f5408037
|
/R/test.maker.R
|
852c18802f5ec80355be1a12d65cac1c9aab0034
|
[] |
no_license
|
cran/MXM
|
7590471ea7ed05944f39bf542c41a07dc831d34f
|
46a61706172ba81272b80abf25b862c38d580d76
|
refs/heads/master
| 2022-09-12T12:14:29.564720
| 2022-08-25T07:52:40
| 2022-08-25T07:52:40
| 19,706,881
| 0
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,407
|
r
|
test.maker.R
|
test.maker <- function(test) {
if (test == "testIndReg") { ## It uMMPC the F test
test <- testIndReg;
} else if (test == "testIndFisher") { ## It uMMPC the F test
test <- testIndFisher;
} else if (test == "testIndSpearman") { ## It uMMPC the F test
test <- testIndSpearman;
} else if (test == "testIndMMFisher") { ## It uMMPC the F test
test <- testIndMMFisher;
} else if(test == "testIndBeta") {
test <- testIndBeta;
} else if(test == "testIndRQ") { ## quantile regression
test <- testIndRQ;
} else if (test == "testIndIGreg") { ## Inverse Gaussian regression
test <- testIndIGreg;
} else if (test == "testIndMMReg") { ## Inverse Gaussian regression
test <- testIndMMReg;
} else if (test == "testIndPois") { ## Poisson regression
test <- testIndPois;
} else if (test == "testIndNB") { ## Negative binomial regression
test <- testIndNB;
} else if (test == "testIndGamma") { ## Gamma regression
test <- testIndGamma;
} else if (test == "testIndNormLog") { ## Normal regression with a log link
test <- testIndNormLog;
} else if (test == "testIndZIP") { ## Zero inflated Poisson regression
test <- testIndZIP;
} else if (test == "testIndTobit") { ## Tobit regression
test <- testIndTobit;
} else if (test == "censIndCR") {
test <- censIndCR;
} else if (test == "censIndWR") {
test <- censIndWR;
} else if (test == "censIndER") {
test <- censIndER;
} else if (test == "censIndLLR") {
test <- censIndLLR;
} else if (test == "testIndClogit") {
test <- testIndClogit;
} else if (test == "testIndBinom") {
test <- testIndBinom;
} else if (test == "testIndLogistic") {
test <- testIndLogistic;
} else if (test == "testIndMultinom") {
test <- testIndMultinom;
} else if (test == "testIndOrdinal") {
test <- testIndOrdinal;
} else if (test == "testIndQBinom") {
test <- testIndQBinom;
} else if (test == "testIndQPois") {
test <- testIndQPois;
} else if (test == "gSquare") {
test <- gSquare;
} else if (test == "testIndSPML") {
test <- testIndSPML;
########################
} else if (test == "testIndGLMMReg") {
test <- testIndGLMMReg
} else if (test == "testIndLMM") {
test <- testIndLMM
} else if (test == "testIndGLMMPois") {
test <- testIndGLMMPois
} else if (test == "testIndGLMMLogistic") {
test <- testIndGLMMLogistic
} else if (test == "testIndGLMMGamma") {
test <- testIndGLMMGamma
} else if (test == "testIndGLMMNormlog") {
test <- testIndGLMMNormLog
} else if (test == "testIndGLMMOrdinal") {
test <- testIndGLMMOrdinal
} else if (test == "testIndGLMMCR") {
test <- testIndGLMMCR
##########################
} else if (test == "testIndGEEReg") {
test <- testIndGEEReg
} else if (test == "testIndGEELogistic") {
test <- testIndGEELogistic
} else if (test == "testIndGEEPois") {
test <- testIndGEEPois
} else if (test == "testIndGEEGamma") {
test <- testIndGEEGamma
} else if (test == "testIndGEENormLog") {
test <- testIndGEENormLog
}
test
}
|
f49619db2168b43c5da264c8709589a3e7381c6b
|
47e9b28e603f83d4b28cffd42a3c548168300058
|
/20170530_bayes_gibbs_sampling_01/run.r
|
6febd52029d607da35cbc3e824e938e93e2e63ce
|
[] |
no_license
|
kazufusa/til
|
4fa4c2b201c1c566dd148074ea94d1de96cc62c6
|
321e6ef62d8510f20a4d56834c33e3f4518ecbce
|
refs/heads/main
| 2023-08-08T07:13:48.103002
| 2023-07-19T14:21:56
| 2023-07-19T14:21:56
| 60,586,645
| 5
| 0
| null | 2023-03-06T22:37:47
| 2016-06-07T06:06:11
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,415
|
r
|
run.r
|
options(width=200)
set.seed(1)
N <- 100
a_true <- 0.4
mean1 <- 0
mean2 <- 3
sd1 <- 1
sd2 <- 1
Y <- c(rnorm((1-a_true)*N, mean1, sd1), rnorm(a_true*N, mean2, sd2))
data <- list(N=N, Y=Y)
write.table(Y, file="points.csv", sep=",", row.names=F, col.names=F)
model2 <- "
data {
int<lower=1> N;
vector[N] Y;
}
parameters {
real mu;
real<lower=0> s;
}
model {
Y ~ normal(mu, s);
}
generated quantities {
vector[N] log_likelihood;
real y_pred;
for(n in 1:N)
log_likelihood[n] = normal_lpdf(Y[n] | mu, s);
y_pred = normal_rng(mu, s);
}
"
sink(file="/dev/null")
suppressMessages({
library(rstan)
fit2 <- stan(model_code=model2, data=data, iter=11000, warmup=1000, seed=123)
})
sink()
cat("# results of the single normal distribution model with Rstan\n")
print(summary(fit2)$summary[c("mu", "s"), ])
waic <- function(log_likelihood) {
training_error <- - mean(log(colMeans(exp(log_likelihood))))
functional_variance_div_N <- mean(colMeans(log_likelihood^2) - colMeans(log_likelihood)^2)
waic <- training_error + functional_variance_div_N
return(waic)
}
wbic <- function(log_likelihood){
wbic <- - mean(rowSums(log_likelihood))
return(wbic)
}
ret <- matrix(0, 1, 2)
colnames(ret) <- c('WAIC', 'WBIC')
rownames(ret) <- c('single normal dist. model')
ret[1,1] <- waic(extract(fit2)$log_likelihood)
ret[1,2] <- wbic(extract(fit2)$log_likelihood)
cat("\n")
ret
|
1677479608192bf6020d09272170eca68111eebc
|
42355df3e045bfa63450f7b0c5c2af16baf06b77
|
/surviving_phases_dataverse/MSM_Sim.R
|
03a34583a3e5454c902c9b13433b3fe2a50f77f9
|
[] |
no_license
|
judgelord/DOT
|
0f9ca40f392d3436904bacd2949bb616f5cd50b8
|
3d6a2c74b540e75759da6a290c5b3650da4b4174
|
refs/heads/master
| 2020-04-01T05:43:19.174053
| 2019-05-28T18:28:14
| 2019-05-28T18:28:14
| 152,916,995
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,465
|
r
|
MSM_Sim.R
|
# Replicates with R 3.3.1, given the following package versions.
rm(list=ls())
library(msm) #v1.6.1
library(survival) #v.2.39.5 (!! Important.)
library(mstate) #v0.2.9
setwd("C:/Users/Shawna/Desktop/PA replic")
set.seed(031415)
### Simulate 250 individuals with common observation times
sim.df <- data.frame(subject = rep(1:250, rep(101,250)), time = rep(seq(0, 100, 1), 250), x=rep(NA,250))
for(i in 1:nrow(sim.df)){
tmp <- unique(sim.df$subject)[i]
sim.df$x[sim.df$subject==tmp] <- rnorm(1)
}
#Set Baseline Hazards for Each Transition in the Model
qmatrix <- rbind(c(0, 0.15, 0.05 ),
c(0.02, 0, 0.05 ),
c(0,0,0))
#Set Number of Simulations
loops <- 1000
#Define storage objects to store simulation results
betas.sep <- matrix(NA, nrow=loops, ncol=4)
std.sep <- matrix(NA, nrow= loops, ncol=4)
betas.comb.1 <- matrix(NA, nrow=loops, ncol=5)
std.comb.1 <- matrix(NA, nrow= loops, ncol=5)
ph.1 <- matrix(NA, nrow= loops, ncol=1)
betas.comb.1a <- matrix(NA, nrow=loops, ncol=3)
std.comb.1a <- matrix(NA, nrow= loops, ncol=3)
betas.comb.2 <- matrix(NA, nrow=loops, ncol=5)
std.comb.2 <- matrix(NA, nrow= loops, ncol=5)
ph.2 <- matrix(NA, nrow= loops, ncol=1)
betas.comb.3 <- matrix(NA, nrow=loops, ncol=4)
std.comb.3 <- matrix(NA, nrow= loops, ncol=4)
ph.3 <- matrix(NA, nrow= loops, ncol=1)
for(a in 1:loops){
#Simulate data, and set True coefficients
dat <- simmulti.msm(sim.df, qmatrix, death = 3, covariates = list(x = c(.5, 3, 1, -1)))
#Create empty data frame to store simulated data in multi-state format
dat.ms <- data.frame(id = numeric(0), entry = numeric(0), exit = numeric(0), from = numeric(0), to = numeric(0), status = numeric(0), x = numeric(0))
#Reformat simulated data into multi-state format
for (i in 1:nrow(dat)){
tmp <- unique(dat$subject)[i]
dat.tmp <- subset(dat, dat$subject == tmp)
dat.tmp$stop <- dat.tmp$time+1
startTemp <- dat.tmp$time[1]
for(k in 1:nrow(dat.tmp)){
if (k > 1){
if (dat.tmp$state[k] == dat.tmp$state[k-1]){
}else{
endStop <- dat.tmp$time[k]
tempRow <- data.frame(id = dat.tmp$subject[k], entry = startTemp, exit = endStop, from = dat.tmp$state[k-1], to = dat.tmp$state[k], status = 1, x = dat.tmp$x[k])
startTemp <- dat.tmp$time[k]
dat.ms <- rbind(dat.ms, tempRow)
}
}
}
}
#Rename multi-state formatted dataset
dat <- dat.ms
dat <- dat[order(dat$id, dat$entry),]
tid <- unique(dat$id)
dat.msf <- data.frame(id = numeric(0), entry = numeric(0), exit = numeric(0), from = numeric(0), to = numeric(0), status = numeric(0), x1 = numeric(0), x2 = numeric(0))
for (j in 1:length(tid)){
tmp <- dat[dat$id == tid[j],]
for (k in 1:nrow(tmp)){
tmp.ms <- tmp[k,]
tmp.ms$status <- 0
if(tmp[k,]$from==1 & tmp[k,]$to==2){tmp.ms$to <- 3}
if(tmp[k,]$from==1 & tmp[k,]$to==3){tmp.ms$to <- 2}
if(tmp[k,]$from==2 & tmp[k,]$to==1){tmp.ms$to <- 3}
if(tmp[k,]$from==2 & tmp[k,]$to==3){tmp.ms$to <- 1}
dat.msf <- rbind(dat.msf, tmp[k,], tmp.ms)
}
}
########Transitions Seperated###########
dat.msf.sep <- dat.msf
#Define transitions
dat.msf.sep$trans <- 1
dat.msf.sep$trans[dat.msf.sep$from==1 & dat.msf.sep$to==3] <- 2
dat.msf.sep$trans[dat.msf.sep$from==2 & dat.msf.sep$to==1] <- 3
dat.msf.sep$trans[dat.msf.sep$from==2 & dat.msf.sep$to==3] <- 4
#Define transitions-specific covariates
dat.msf.sep$x.1 <- dat.msf.sep$x
dat.msf.sep$x.1[dat.msf.sep$trans != 1] <- 0
dat.msf.sep$x.2 <- dat.msf.sep$x
dat.msf.sep$x.2[dat.msf.sep$trans != 2] <- 0
dat.msf.sep$x.3 <- dat.msf.sep$x
dat.msf.sep$x.3[dat.msf.sep$trans != 3] <- 0
dat.msf.sep$x.4 <- dat.msf.sep$x
dat.msf.sep$x.4[dat.msf.sep$trans != 4] <- 0
#Estimate model and store results
mod.sep <- coxph(Surv(entry, exit, status) ~ x.1 + x.2 + x.3 + x.4 + strata(trans), data = dat.msf.sep, method = "breslow")
summary(mod.sep)
betas.sep[a,] <- mod.sep$coef
std.sep[a,] <- sqrt(diag(mod.sep$var))
########Transitions 2 & 4 Collapsed, Unique Coefficients for X2 and X4###########
dat.msf.comb.2 <- dat.msf
#Define transitions
dat.msf.comb.2$trans <- 1
dat.msf.comb.2$trans[dat.msf.comb.2$from==1 & dat.msf.comb.2$to==3] <- 2
dat.msf.comb.2$trans[dat.msf.comb.2$from==2 & dat.msf.comb.2$to==1] <- 3
dat.msf.comb.2$trans[dat.msf.comb.2$from==2 & dat.msf.comb.2$to==3] <- 4
#Define transition-specific covariates
dat.msf.comb.2$x.1 <- dat.msf.comb.2$x
dat.msf.comb.2$x.1[dat.msf.comb.2$trans != 1] <- 0
dat.msf.comb.2$x.2 <- dat.msf.comb.2$x
dat.msf.comb.2$x.2[dat.msf.comb.2$trans != 2] <- 0
dat.msf.comb.2$x.3 <- dat.msf.comb.2$x
dat.msf.comb.2$x.3[dat.msf.comb.2$trans != 3] <- 0
dat.msf.comb.2$x.4 <- dat.msf.comb.2$x
dat.msf.comb.2$x.4[dat.msf.comb.2$trans != 4] <- 0
#Collapse transitions 2 and 4
dat.msf.comb.2$trans[dat.msf.comb.2$trans==4] <- 2
#Add Time Varying Illness Indicator
dat.msf.comb.2$ill <- 0
dat.msf.comb.2$ill[dat.msf.comb.2$from == 2 & dat.msf.comb.2$to == 3] <- 1
#Estimate model and store results
mod.comb.2 <- coxph(Surv(entry, exit, status) ~ x.1 + x.2 + x.3 + x.4 + ill + strata(trans), data = dat.msf.comb.2, method = "breslow")
summary(mod.comb.2)
betas.comb.2[a,] <- mod.comb.2$coef
std.comb.2[a,] <- sqrt(diag(mod.comb.2$var))
sto <- cox.zph(mod.comb.2, transform="km", global=TRUE)
ph.2[a,] <- sto$table[5,3]
########Transitions 2 & 4 Collapsed, Collapsed Coefficient for X2 and X4###########
dat.msf.comb.3 <- dat.msf
#Define transitions
dat.msf.comb.3$trans <- 1
dat.msf.comb.3$trans[dat.msf.comb.3$from==1 & dat.msf.comb.3$to==3] <- 2
dat.msf.comb.3$trans[dat.msf.comb.3$from==2 & dat.msf.comb.3$to==1] <- 3
dat.msf.comb.3$trans[dat.msf.comb.3$from==2 & dat.msf.comb.3$to==3] <- 4
#Define transition-specific covariates
dat.msf.comb.3$x.1 <- dat.msf.comb.3$x
dat.msf.comb.3$x.1[dat.msf.comb.3$trans != 1] <- 0
dat.msf.comb.3$x.2 <- dat.msf.comb.3$x
dat.msf.comb.3$x.2[dat.msf.comb.3$trans != 2 & dat.msf.comb.3$trans !=4] <- 0
dat.msf.comb.3$x.3 <- dat.msf.comb.3$x
dat.msf.comb.3$x.3[dat.msf.comb.3$trans != 3] <- 0
#Collapse transitions 2 and 4
dat.msf.comb.3$trans[dat.msf.comb.3$trans==4] <- 2
#Add Time Varying Illness Indicator
dat.msf.comb.3$ill <- 0
dat.msf.comb.3$ill[dat.msf.comb.3$from == 2 & dat.msf.comb.3$to == 3] <- 1
#Estimate model and store results
mod.comb.3 <- coxph(Surv(entry, exit, status) ~ x.1 + x.2 + x.3 + ill + strata(trans), data = dat.msf.comb.3, method = "breslow")
summary(mod.comb.3)
betas.comb.3[a,] <- mod.comb.3$coef
std.comb.3[a,] <- sqrt(diag(mod.comb.3$var))
sto <- cox.zph(mod.comb.3, transform="km", global=TRUE)
ph.3[a,] <- sto$table[4,3]
########Transitions 1 & 2 Collapsed - Unique Coefficients for X1 and X2###########
dat.msf.comb.1 <- dat.msf
#Define transitions
dat.msf.comb.1$trans <- 1
dat.msf.comb.1$trans[dat.msf.comb.1$from==1 & dat.msf.comb.1$to==3] <- 2
dat.msf.comb.1$trans[dat.msf.comb.1$from==2 & dat.msf.comb.1$to==1] <- 3
dat.msf.comb.1$trans[dat.msf.comb.1$from==2 & dat.msf.comb.1$to==3] <- 4
#Define transition-specific covariates
dat.msf.comb.1$x.1 <- dat.msf.comb.1$x
dat.msf.comb.1$x.1[dat.msf.comb.1$trans != 1] <- 0
dat.msf.comb.1$x.2 <- dat.msf.comb.1$x
dat.msf.comb.1$x.2[dat.msf.comb.1$trans != 2] <- 0
dat.msf.comb.1$x.3 <- dat.msf.comb.1$x
dat.msf.comb.1$x.3[dat.msf.comb.1$trans != 3] <- 0
dat.msf.comb.1$x.4 <- dat.msf.comb.1$x
dat.msf.comb.1$x.4[dat.msf.comb.1$trans != 4] <- 0
#Collapse transitions 1 and 2
dat.msf.comb.1$trans[dat.msf.comb.1$trans==2] <- 1
#Add Transition Indicator
dat.msf.comb.1$ill <- 0
dat.msf.comb.1$ill[dat.msf.comb.1$from == 1 & dat.msf.comb.1$to == 3] <- 1
#Estimate model and store results
mod.comb.1 <- coxph(Surv(entry, exit, status) ~ x.1 + x.2 + x.3 + x.4 + ill + strata(trans), data = dat.msf.comb.1, method = "breslow")
summary(mod.comb.1)
betas.comb.1[a,] <- mod.comb.1$coef
std.comb.1[a,] <- sqrt(diag(mod.comb.1$var))
sto <- cox.zph(mod.comb.1, transform="km", global=TRUE)
ph.1[a,] <- sto$table[5,3]
########Transitions 1 & 2 - Collapsed Coefficients for X1 and X2###########
dat.msf.comb.1a <- dat.msf
#Define transitions
dat.msf.comb.1a$trans <- 1
dat.msf.comb.1a$trans[dat.msf.comb.1a$from==1 & dat.msf.comb.1a$to==3] <- 2
dat.msf.comb.1a$trans[dat.msf.comb.1a$from==2 & dat.msf.comb.1a$to==1] <- 3
dat.msf.comb.1a$trans[dat.msf.comb.1a$from==2 & dat.msf.comb.1a$to==3] <- 4
#Define transition-specific covariates
dat.msf.comb.1a$x.1 <- dat.msf.comb.1a$x
dat.msf.comb.1a$x.1[dat.msf.comb.1a$trans != 1 & dat.msf.comb.1a$trans != 2] <- 0
dat.msf.comb.1a$x.3 <- dat.msf.comb.1a$x
dat.msf.comb.1a$x.3[dat.msf.comb.1a$trans != 3] <- 0
dat.msf.comb.1a$x.4 <- dat.msf.comb.1a$x
dat.msf.comb.1a$x.4[dat.msf.comb.1a$trans != 4] <- 0
#Estimate model and store results
mod.comb.1a <- coxph(Surv(entry, exit, status) ~ x.1 + x.3 + x.4 + strata(trans), data = dat.msf.comb.1a, method = "breslow")
summary(mod.comb.1a)
betas.comb.1a[a,] <- mod.comb.1a$coef
std.comb.1a[a,] <- sqrt(diag(mod.comb.1a$var))
print(a)
}
save.image("MonteCarlo_Simulations.RData")
|
530037eb5b4e3075934ee571df4cbe384ec7d201
|
f227db976d38b05a34245eb1a1c550cc51048499
|
/tests/testthat.R
|
8b7230396e31050d6e7c43d9df989101f7ab9bc6
|
[
"Apache-2.0"
] |
permissive
|
leeevans/Achilles
|
1d05f319adc6f9abd575881d1f140e3fd315fd7c
|
212211afaa77200ac9bbb809d85f440b99fbe9d6
|
refs/heads/master
| 2021-01-15T17:36:27.491108
| 2016-02-24T16:44:30
| 2016-02-24T16:44:30
| 52,451,809
| 0
| 0
| null | 2016-02-24T15:17:40
| 2016-02-24T15:17:40
| null |
UTF-8
|
R
| false
| false
| 88
|
r
|
testthat.R
|
Sys.setenv("R_TESTS" = "")
library(testthat)
library(Achilles)
test_check("Achilles")
|
a6736cf04645cde196eb83fa8135c9b473dc50f0
|
2a20ba73f6804363f0e4bcf17980bc4bb9d75592
|
/inst/unitTests/test_AnimalQTLDB.R
|
48d7b6ff28bd4e70098b61600c0cc139dee0d693
|
[] |
no_license
|
liuyufong/AnimalQTLDB
|
b03647428ae53798ce7892ca5a0154dd9bb03140
|
4ddf62d1c41041b2355d187aeacacc802758a826
|
refs/heads/master
| 2021-01-01T08:19:17.719641
| 2017-08-17T13:24:36
| 2017-08-17T13:24:36
| 96,750,432
| 0
| 0
| null | 2017-07-18T03:25:53
| 2017-07-10T07:48:05
| null |
UTF-8
|
R
| false
| false
| 169
|
r
|
test_AnimalQTLDB.R
|
test_AnimalQTLDB <- function(){
checkEquals(NROW(AnimalQTLDB()), 7)
checkTrue(AnimalQTLDB()[1,1] == 'table')
checkEqualsNumeric(NCOL(AnimalQTLDB()), 5)
}
|
39fa04e49fd413d8c4f4dd3bd6dc29f934e720c5
|
2b5895474a98cca1d0d41e7f44a21b28ac07aee6
|
/ad_hoc_analysis/30_point_rule.R
|
49ccabbd4eccfa07382fd2563fa7364f622f15be
|
[] |
no_license
|
insightlane/score-progression
|
7666c36ebdeaf1ac346e1da4982208ab92371a7a
|
ecf24455dcdb46ff149a3bbccaf33806bfb02755
|
refs/heads/master
| 2023-05-07T20:19:04.581541
| 2021-06-06T10:37:16
| 2021-06-06T10:37:16
| 288,646,937
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,569
|
r
|
30_point_rule.R
|
library(dplyr)
library(tidyr)
comebacksyear <- score_progression_worm %>%
ungroup() %>%
mutate(Season = as.numeric(format(as.Date(Date.x, "%d-%b-%Y"), "%Y"))) %>%
#filter(Event != "PS" & Event != "S" & Event != "F") %>%
group_by(Season) %>%
summarise(count = n_distinct(GameID),
countwin6d = length(unique(GameID[ScorerMargin <= -6 & ScorerFinalMargin > 0])),
count6d = length(unique(GameID[ScorerMargin <= -6])),
percwin6d = countwin6d/count,
countwin12d = length(unique(GameID[ScorerMargin <= -12 & ScorerFinalMargin > 0])),
count12d = length(unique(GameID[ScorerMargin <= -12])),
percwin12d = countwin12d/count,
countwin18d = length(unique(GameID[ScorerMargin <= -18 & ScorerFinalMargin > 0])),
count18d = length(unique(GameID[ScorerMargin <= -18])),
percwin18d = countwin18d/count,
countwin24d = length(unique(GameID[ScorerMargin <= -24 & ScorerFinalMargin > 0])),
count24d = length(unique(GameID[ScorerMargin <= -24])),
percwin24d = countwin24d/count,
countwin30d = length(unique(GameID[ScorerMargin <= -30 & ScorerFinalMargin > 0])),
count30d = length(unique(GameID[ScorerMargin <= -30])),
percwin30d = countwin30d/count)
score_progression_worm %>%
ungroup() %>%
mutate(Season = as.numeric(format(as.Date(Date.x, "%d-%b-%Y"), "%Y"))) %>%
group_by(Season) %>%
filter(Team1Margin <= -24 & Team1FinalMargin > 0) %>%
summarise(n_distinct(GameID, Team1))
score_progression_worm %>%
ungroup() %>%
mutate(Season = as.numeric(format(as.Date(Date.x, "%d-%b-%Y"), "%Y"))) %>%
group_by(Season) %>%
#filter(Team1Margin <= -24 & Team1FinalMargin > 0) %>%
summarise(count = n_distinct(GameID),
countwin24d = length(unique(GameID[Team1Margin <= -24 & Team1FinalMargin > 0])),
count24d = length(unique(GameID[Team1Margin <= -24])))
dots <- setNames(list( ~ mean(value),
~ sum(value),
~ median(value),
~ sd(value)),
c("Right", "Wrong", "Unanswered", "Invalid"))
comebacksteam <- score_progression_worm %>%
ungroup() %>%
mutate(Season = as.numeric(format(as.Date(Date.x, "%d-%b-%Y"), "%Y"))) %>%
#filter(Event != "PS" & Event != "S" & Event != "F") %>%
group_by(Team1) %>%
summarise(#count = n_distinct(GameID),
#countwin6d = length(unique(GameID[Team1Margin <= -6 & Team1FinalMargin > 0])),
count6d = length(unique(GameID[Team1Margin <= -6])),
#percwin6d = countwin6d/count,
#countwin12d = length(unique(GameID[Team1Margin <= -12 & Team1FinalMargin > 0])),
count12d = length(unique(GameID[Team1Margin <= -12])),
#percwin12d = countwin12d/count,
#countwin18d = length(unique(GameID[Team1Margin <= -18 & Team1FinalMargin > 0])),
count18d = length(unique(GameID[Team1Margin <= -18])),
#percwin18d = countwin18d/count,
countwin24d = length(unique(GameID[Team1Margin <= -24 & Team1FinalMargin > 0])),
count24d = length(unique(GameID[Team1Margin <= -24])),
#percwin24d = countwin24d/count,
countwin30d = length(unique(GameID[Team1Margin <= -30 & Team1FinalMargin > 0])),
count30d = length(unique(GameID[Team1Margin <= -30])),
#percwin30d = countwin30d/count,
countwin36d = length(unique(GameID[Team1Margin <= -36 & Team1FinalMargin > 0])),
count36d = length(unique(GameID[Team1Margin <= -36]))
#percwin36d = countwin30d/count
) %>%
summar
gather(situation, number, countwin6d:count6d) %>%
spread()
### Teams behind in last quarter
score_progression_worm %>%
ungroup() %>%
mutate(Season = as.numeric(format(as.Date(Date.x, "%d-%b-%Y"), "%Y"))) %>%
filter(Event != "PS" & Season == 2017) %>%
group_by(Team1) %>%
summarise(count = n_distinct(GameID),
countq4def = length(unique(GameID[Team1Margin < 0
& Quarter == 4])))
|
fe314a86788d35619dda06e650fb2a305faa75f3
|
c66a649227a633cbce7c1cd2307a34332670a3d8
|
/singler_annotation.R
|
36c9e9ff18e421b6f6af4a61956db62b8191248f
|
[] |
no_license
|
chansigit/scSnippet
|
64036602d4913b105ac4107bcc8c42f894e97696
|
5be7fc3ee2619177ceff21edc84f0e02ada22dc6
|
refs/heads/master
| 2022-11-17T01:50:33.027809
| 2022-11-11T08:38:05
| 2022-11-11T08:38:05
| 192,212,166
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,967
|
r
|
singler_annotation.R
|
tic()
load("/data/hca/SingleRReference/HumanPrimaryCellAtlasData.rda")
load("/data/hca/SingleRReference/BlueprintEncodeData.rda")
load("/data/hca/SingleRReference/DatabaseImmuneCellExpressionData.rda")
load("/data/hca/SingleRReference/NovershternHematopoieticData.rda")
load("/data/hca/SingleRReference/MonacoImmuneData.rda")
toc()
SingleR_Annotation <- function(seu, reference="HPCA", use_local=T){
if (use_local==F){
if (reference=="HPCA"){
ref <- HumanPrimaryCellAtlasData()
}else if (reference=="BED"){
ref <- BlueprintEncodeData()
}else if (reference=="DbImmExp"){
ref <- DatabaseImmuneCellExpressionData()
}else if (reference=="Hemato"){
ref <- NovershternHematopoieticData()
}else if (reference=="Monaco"){
ref <- MonacoImmuneData()
}else if (reference=="ImmGen"){
ref <- ImmGenData()
}else if (reference=="MouseRNA"){
ref <- MouseRNAseqData()
}else{
ref <- HumanPrimaryCellAtlasData()
}
}else{
if (reference=="HPCA"){
ref <- hpca
}else if (reference=="BED"){
ref <- blueprint
}else if (reference=="DbImmExp"){
ref <- dbimmexp
}else if (reference=="Hemato"){
ref <- hemato
}else if (reference=="Monaco"){
ref <- monaco
}else if (reference=="ImmGen"){
ref <- immgen
}else if (reference=="MouseRNA"){
ref <- mmrna
}else{
ref <- hpca
}
}
mat <- GetAssayData(seu, slot="data")
pred.fine <- SingleR(test = mat, ref = ref, labels = ref$label.fine)
pred.main <- SingleR(test = mat, ref = ref, labels = ref$label.main)
seu[[paste0(reference,"_main")]]<-pred.main$pruned.labels
seu[[paste0(reference,"_fine")]]<-pred.fine$pruned.labels
return(seu)
}
|
354a2eb7afbdd0b933579d64db38dfdafcfa6d54
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/8712_2/rinput.R
|
46035c45b6ee0378604fab28f3bd7f0de78ac759
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("8712_2.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8712_2_unrooted.txt")
|
d4ed67b8444100607c4dd2719fc152287a3dcfdf
|
6a2d44c6012f0715f1b1a3ca7c83bf07cb380d64
|
/cachematrix.R
|
806f7be3c4813f21fd049834e35ab2416d93f7f3
|
[] |
no_license
|
jjsjamesj/ProgrammingAssignment2
|
c4ef17755fb8a1fa1ed9f03d88fd428bbccc22ec
|
21a0ca6fcf7b7e1fe7ade54d7ad7b38dea078125
|
refs/heads/master
| 2021-01-12T13:46:49.922621
| 2016-09-25T12:27:36
| 2016-09-25T12:27:36
| 69,127,779
| 0
| 0
| null | 2016-09-24T21:08:12
| 2016-09-24T21:08:11
| null |
UTF-8
|
R
| false
| false
| 1,183
|
r
|
cachematrix.R
|
## makeCacheMatrix takes as argument a square invertible matrix
## and returns a list of getter and setter methods for the matrix
## and its inverse.
## code of the form 'X<<-Y' assigns to X in the parent environment
makeCacheMatrix <- function(x =matrix() ) {
xinverse <- NULL
set <- function(y) {
x <<- y
xinverse <<- NULL
}
get <- function() x
setxinverse <- function(inverse){xinverse <<- inverse}
getxinverse <- function() xinverse
list(set = set, get = get,
setxinverse = setxinverse,
getxinverse = getxinverse)
}
## cacheSolve takes as argument a list, such as type returned by
## makeCacheMatrix. It then checks whether or not the inverse is already
## cached. If the inverse is already cached, it returns the cached inverse
## If the inverse is not already cached, then it calls solve()
## and the inverse setter method, and then returns the inverse.
cacheSolve <- function(x, ...) {
Xinverse <- x$getxinverse()
if(!is.null(Xinverse)) {
message("getting cached data")
return(Xinverse)
}
data <- x$get()
Xinverse <- solve(data, ...)
x$setxinverse(Xinverse)
Xinverse
}
|
a653d72abe7d7b6e6cde679e2e8b15a97875fbf2
|
a07ee789e553ab8f6e01f77e3d92b13995976550
|
/Untitled.R
|
f7dfd380760814be86eabb958aa43d94796e8bfa
|
[] |
no_license
|
hancampbell/PracticeCPSC292
|
b0f1c31d958faca87504cd54699f99a6ebcac26d
|
5bfcbda1f3993521a9c6c2bb3ace3929d5cbc733
|
refs/heads/main
| 2023-08-28T13:16:00.117860
| 2021-10-29T19:23:09
| 2021-10-29T19:23:09
| 422,687,827
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 169
|
r
|
Untitled.R
|
#Practice code
print"Hello world"
library(usethis)
library(gitcreds)
usethis::create_github_token()
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
gitcreds::gitcreds_set()
|
0e9ca382884e58b9158787531ffbc4ecfa6001b7
|
ee785dcfc8f3d826dd995602ee9e312d7d95bbb0
|
/inst/save.v3.obj.R
|
3f08dcbd6e3dec996994adf24cded8673db703a4
|
[
"MIT"
] |
permissive
|
morris-lab/CellTagViz-package
|
b54905f5b2d69c48aade115a058cb271da535d37
|
196169aca3a03a541482c73611b641b9f81d7a58
|
refs/heads/master
| 2020-04-20T23:10:54.723257
| 2019-08-27T19:30:30
| 2019-08-27T19:30:30
| 169,161,856
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 479
|
r
|
save.v3.obj.R
|
load("~/../Desktop/sham.sbr.integrated.RData")
saveRDS(object = integrated.subset, file = "~/../Desktop/integrated.subset.RDS")
path <- "~/../Desktop/integrated.subset.RDS"
dat <- readRDS(file = path)
monPath <- "~/../Desktop/unsupervised timeline all data.RDS"
mon <- readRDS(file = monPath)
#monSCE <- exportCDS(mon, export_to = "Scater")
library(S4Vectors)
source("~/GitHub/working.Viz/R/dataTools.R")
monSCE <- addMonocleDataNew(mon)
sce <- addSeuratv3(dat)
|
a3a50647b3e6fc5cc510a2dada21b8f875a82b9e
|
1f25974833ab7f542da6ead2c1a6857c5bafeb21
|
/computations/network_distances.R
|
f833e667b129725cb5b32fddfbe7a580fdcd667e
|
[] |
no_license
|
SugiharaLab/SIO276L
|
e08387018321e13953f607c15e7c7d23fdad0b7a
|
26b9c5b7a9be32e4f3b6f5a25843050ff7d6952c
|
refs/heads/master
| 2020-05-04T13:23:35.764642
| 2019-05-21T10:49:48
| 2019-05-21T10:49:48
| 179,158,404
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,983
|
r
|
network_distances.R
|
source ("import_data.R")
library(qgraph)
library(rEDM)
library(ggplot2)
num_pairs <- length(modes_df)*(length(modes_df)-1)
weight_pairs <- data.frame(to=character(num_pairs),
from=character(num_pairs),thickness=numeric(num_pairs),
stringsAsFactors=FALSE)
GEN_CCM_TEMPORAL_GRAPH <- FALSE
SLIDING_WINDOW_NETWORK <- FALSE
CALC_SYNCHRONIZATION <- FALSE
NUM_TIME_WINDOWS = 20
args <- commandArgs(trailingOnly = TRUE)
#handpicked best E from simplex output
best_E <- function(mode) {
switch( mode,
"ENSO" = 5,
"NAO" = 8,
"NPI" = 7,
"PDO" = 3,
)
}
#generate constant coordinate positions.
coords <- c(0, 0,
1, 0,
0.5, 1,
0.3, 0.2)
coords <- matrix(coords, 4,2,byrow=T)
#generate connections between each pair
if ( GEN_CCM_TEMPORAL_GRAPH ) {
#break interval size to be in 11 month windows
interval_size <- ceiling( nrow( modes_df ) / NUM_TIME_WINDOWS )
#run for num interval windows generating weights for each window
for ( idx in 1:NUM_TIME_WINDOWS ) {
start_row <- 1 + (idx-1)*interval_size
end_row <- idx*interval_size
windowed_df <- modes_df[start_row:end_row,]
#where to write this plot to
png( paste("plots",idx,sep='/') )
#to keep track of which row to write to
curr_row <- 1
for ( idx in 1:ncol( windowed_df )) {
mode_name <- colnames(windowed_df)[idx]
mode_vec <- windowed_df[,idx]
#iterate every pair between this mode and other modes
other_modes <- subset( windowed_df, select = -idx )
for ( col_idx in 1:ncol( other_modes )) {
#get the weight of this connection
ccm_df <- ccm ( windowed_df, E = best_E(mode_name),
lib_column = colnames(other_modes)[col_idx],
target_column = mode_name,
lib_sizes = nrow(windowed_df), num_samples=1)
pair_weight <- ccm_df$rho
#set the pair data in the df
weight_pairs[curr_row,][1] <- mode_name
weight_pairs[curr_row,][2] <- colnames(other_modes)[col_idx]
weight_pairs[curr_row,][3] <- pair_weight
curr_row <- curr_row+1
}
}
qgraph( weight_pairs, layout=coords, esize=5, theme='gray')
}
#make the gif using shell script
}
#try the same but for sliding window instead of blocks
if ( SLIDING_WINDOW_NETWORK || args[1]=="slidingwindow") {
print("calculating syncrhonization")
window_size <- 120
delta=1 #space between windows
start_row <- 0
end_row <- window_size
plot_idx <- 0
png("plots/%d.png")
while ( end_row < nrow(modes_df) ) {
windowed_df <- modes_df[start_row:end_row,]
#to keep track of which row to write to
curr_row <- 1
#go thru every col to get pairwise ccm
for ( idx in 1:ncol( windowed_df )) {
mode_name <- colnames(windowed_df)[idx]
mode_vec <- windowed_df[,idx]
#iterate every pair between this mode and other modes
other_modes <- subset( windowed_df, select = -idx )
for ( col_idx in 1:ncol( other_modes )) {
#get the weight of this connection
ccm_df <- ccm ( windowed_df, E = best_E(mode_name),
lib_column = colnames(other_modes)[col_idx],
target_column = mode_name,
lib_sizes = nrow(windowed_df), num_samples=1)
pair_weight <- ccm_df$rho
#set the pair data in the df
weight_pairs[curr_row,][1] <- mode_name
weight_pairs[curr_row,][2] <- colnames(other_modes)[col_idx]
weight_pairs[curr_row,][3] <- pair_weight
curr_row <- curr_row+1
}
}
qgraph( weight_pairs, title=paste(start_row,end_row,sep=":"),
edge.labels=TRUE, layout=coords, esize=5, theme='gray')
#update bounds
start_row <- start_row + delta
end_row <- end_row + delta
plot_idx <- plot_idx+1
}
#make gif with shell script in dir
system( paste("./make_gif.sh ", plot_idx) )
}
if ( CALC_SYNCHRONIZATION || args[1]=="synchronization") {
print("calculating syncrhonization")
total_network_distances <- list()
#iterate through every row to compute network distance
window_size <- 120
delta=1 #space between windows
start_row <- 0
end_row <- window_size
plot_idx <- 0
png("plots/%d.png")
while ( end_row < nrow(modes_df) ) {
windowed_df <- modes_df[start_row:end_row,]
#to keep track of which row to write to
curr_row <- 1
#holds the sum of current correlation distances sqrt
sumDists <- 0
#go thru every col to get pairwise ccm - actually forgot order doesn't matter
for ( idx in 1:ncol( windowed_df )) {
mode_name <- colnames(windowed_df)[idx]
mode_vec <- windowed_df[,idx]
#iterate every pair between this mode and other modes
other_modes <- subset( windowed_df, select = -idx )
for ( col_idx in 1:ncol( other_modes )) {
#get the weight of this connection
pair_weight <- cor( windowed_df[idx], other_modes[col_idx], method="pearson",
use="complete.obs")
sqrt_cor <- sqrt( 2*(1-abs(pair_weight)) )
sumDists <- sumDists + sqrt_cor
#set the pair data in the df
weight_pairs[curr_row,][1] <- mode_name
weight_pairs[curr_row,][2] <- colnames(other_modes)[col_idx]
weight_pairs[curr_row,][3] <- pair_weight
curr_row <- curr_row+1
}
}
#calc total network synchronization at this point
totalNetwokDist <- sumDists*2/(ncol(modes_df)*(ncol(modes_df)-1))
totalNetwokDist <- format(round(totalNetwokDist, 2), nsmall = 2)
total_network_distances <- c(total_network_distances, totalNetwokDist)
networkDistStr <- paste( "total network dist is ",totalNetwokDist)
title <- paste("synchronization/correation graph ",start_row,":",end_row,"\n",networkDistStr)
if ( FALSE ) { #change to actually graph
qgraph( weight_pairs[1:3], title=title,
edge.labels=TRUE, directed=FALSE, layout=coords, esize=5, theme='gray')
}
#update bounds
start_row <- start_row + delta
end_row <- end_row + delta
plot_idx <- plot_idx+1
}
#make gif with shell script in dir
#system( paste("./make_gif.sh ", plot_idx) )
#pdf("synchronizationPlot.pdf")
#qplot(seq_along(total_network_distances), total_network_distances)
save(total_network_distances, file="synchronization_series.RData")
print("done calculating syncrhonization")
}
|
9881a111347e788f12cb81ff782160668b3f1ef6
|
35ae1abde4828b315a805ca5ed207bcf4d13722c
|
/reference.R
|
914416eacf483066264cad248d76cf9d183add68
|
[] |
no_license
|
DongboShi/chinese_author_disambiguation
|
a0f49b98123971d7ce7d32457ba83372f14e4ba4
|
0081699c386f73de598941ed1b7aa4a0ffbf90bb
|
refs/heads/master
| 2021-07-07T18:58:38.857284
| 2020-12-16T16:28:08
| 2020-12-16T16:28:08
| 215,990,534
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,925
|
r
|
reference.R
|
library(dplyr)
library(rjson)
library(rhdf5)
library(stringr)
library(tidyr)
library(rlist)
library(parallel)
files <- list.files(path='/Users/zijiangred/changjiang/dataset/inputdata',pattern='CJ_')
id <- sort(as.numeric(str_extract(files,'[0-9]+')))
Reference <- c()
for (i in id){
data <- fromJSON(file=paste0("/Users/zijiangred/changjiang/dataset/inputdata/CJ_",i,".json"),simplify=T)
papers <- data$papers
reference <- c()
for(k in 1:length(papers)){
ref <- data[["papers"]][[k]]$Reference[[1]]
reference <- c(reference,ref)
}
Reference<-c(Reference,reference)
}
#计算每个ref的log idf
part_ref3 <- as.data.frame(table(unlist(Reference)))
colnames(part_ref3) <- c('ref','freq')
part_ref3 <- mutate(part_ref3,part_idf_ref = log(sum(freq)/freq))
for (i in id){
pairorder <- h5read(file=paste0("/Users/zijiangred/changjiang/dataset/pairorder/",i,"_pair.h5"),name="pair")
data <- fromJSON(file=paste0("/Users/zijiangred/changjiang/dataset/inputdata/CJ_",i,".json"),simplify=T)
papers <- data$papers
ref_1 <- data.frame()
for(k in 1:length(papers)){
ut <- papers[[k]]$UT
ref <- data[["papers"]][[k]]$Reference[[1]]
result<-data.frame()
if(length(ref)>0){
result <- data.frame(ut,stringsAsFactors = F)
result$ref <- list(ref)
}
ref_1 <- rbind(ref_1,result)
}
Ref <- unnest(ref_1,ref)
#计算一对pair相交的ref数量
pairorderA <- left_join(pairorder,Ref,by=c('paperA'='ut'))
pairorderB <- left_join(pairorder,Ref,by=c('paperB'='ut'))
pairorderAB <- inner_join(pairorderA,pairorderB)
pairorderAB <- select(left_join(pairorderAB,part_ref3),-freq)
pairorderAB <- group_by(pairorderAB,paperA,paperB) %>%
mutate(ref2=n())
#计算sum log idf
feature <- group_by(pairorderAB,paperA,paperB) %>%
mutate(ref3=sum(part_idf_ref)) %>%
select(-ref,-part_idf_ref) %>%
distinct()
Feature <- left_join(pairorder,feature)
Feature[is.na(Feature)] <- 0
#计算第一个feature,A在B的ref里,记1,B在A的ref里,也记1,加和
paperAinB <- mutate(pairorderB,AinB=ifelse(paperA==ref,1,0))
paperAinB[is.na(paperAinB)] <-0
paperAinB <-group_by(paperAinB,paperA,paperB) %>%
mutate(part_ref1=sum(AinB)) %>%
select(-ref,-AinB) %>%
distinct()
paperBinA <- mutate(pairorderA,BinA=ifelse(paperB==ref,1,0))
paperBinA[is.na(paperBinA)] <-0
paperBinA <-group_by(paperBinA,paperA,paperB) %>%
mutate(part_ref2=sum(BinA)) %>%
select(-ref,-BinA) %>%
distinct()
feature2 <- inner_join(paperAinB,paperBinA) %>%
mutate(ref1=part_ref1+part_ref2) %>%
select(-part_ref1,-part_ref2) %>%
distinct()
Ref_feature <- full_join(feature2,Feature)
write.csv(Ref_feature,file=paste0('/Users/zijiangred/changjiang/dataset/feature/Feature_ref/feature_ref_',i,'.csv'),row.names=F,na='')
print(i)
}
|
8fa4eec4d3e7e17bd278f19cc4d10b2702dce5b2
|
e22fec1de80f57545bca4c379e2f3c5d56d83333
|
/man/geo_melbourne.Rd
|
b37f8b040c62c6bf55e3300a35805242eade0673
|
[
"MIT"
] |
permissive
|
SymbolixAU/geojsonsf
|
0ac38b9702921af23355a200ab4eca05f540d180
|
d1d8d3fefea8f5ee14297588552ae37dc17e22a6
|
refs/heads/master
| 2023-07-07T11:28:22.964093
| 2023-06-24T01:10:12
| 2023-06-24T01:10:12
| 127,064,889
| 69
| 7
|
NOASSERTION
| 2022-03-03T21:28:04
| 2018-03-28T01:14:20
|
R
|
UTF-8
|
R
| false
| true
| 366
|
rd
|
geo_melbourne.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geojsonsf-package.R
\docType{data}
\name{geo_melbourne}
\alias{geo_melbourne}
\title{geo_melbourne}
\format{
An object of class \code{geojson} (inherits from \code{json}) of length 1.
}
\usage{
geo_melbourne
}
\description{
GeoJSON data of Melbourne's Inner suburbs.
}
\keyword{datasets}
|
08f073ae36ff01253b59d1b29eab1431ec6bc486
|
39bd9f74565d4e24fb60546e8f61fc9340356296
|
/Chapter 1/NimTotals.R
|
56d4d93fae14ac8678ecb914058a05037551f6d8
|
[] |
no_license
|
afettouhi/GraphingDatawithR-R40
|
a7d13797bd45fafd9890722cd1c65b7740253288
|
e60f8d9222949603aefa259350eb5d9dddbcb8a4
|
refs/heads/master
| 2022-11-15T17:31:01.241036
| 2020-07-11T07:17:24
| 2020-07-11T07:17:24
| 275,732,725
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 781
|
r
|
NimTotals.R
|
installed.packages()
install.packages("gmodels")
install.packages("XLConnect")
# The following group of commands is a script
library(gmodels) # required to use the CrossTable command
library(XLConnect) # must have installed XLConnect
Nimrod2 <- readWorksheetFromFile("/home/af/Dokumenter/Programs/GraphingDatawithR-R40/data/Nimrod.xls",sheet=1,
header=TRUE)
attach(Nimrod2)
CrossTable(Medium,Level,
prop.r=FALSE,
prop.c=FALSE,
prop.t=FALSE,
prop.chisq=FALSE)
# above command prints table with counts in each cell,
# but no percents
perf_time <- summary(Time) # save summary output
title = "Summary of performance times:"
cat(title,"\n", "\n") # print title and 2 linefeeds
print(perf_time) # print results of summary(time)
detach(Nimrod2)
|
6f4890b9600863f12986494497d7a9f1c9020ce2
|
51007a8928a04dfc0ca28e6a38a4ccdbcc42b9f2
|
/code/R/utilities.R
|
7bd8dae260c3dad56db8c4c5d1bc51ea8de8076c
|
[] |
no_license
|
Joker-Jerome/utmost_update
|
ac02216c4299bc357b2bf3eeac4b9aef4e47e793
|
97ce581531b0e5f739394752bc1f6b29668aa0e7
|
refs/heads/master
| 2021-06-23T11:20:03.869583
| 2021-04-26T15:00:03
| 2021-04-26T15:00:03
| 214,357,198
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 453
|
r
|
utilities.R
|
library(data.table)
library(dplyr)
library(ggplot2)
ghist <- function(vec) {
#vec <- mean_res
df <- data.frame(
x = vec
)
mean_val = mean(vec[is.finite(vec)], na.rm = T)
p <- ggplot(df, aes(x = x)) +
geom_histogram(aes(y=..density..), colour="black", fill="white")+
geom_density(alpha=.2, fill="#FF6666") +
geom_vline(aes(xintercept=mean_val), color="blue", linetype="dashed", size=1)
p
}
|
e8183d88b39214e6e6c12b35925b809953fcce3d
|
ac0063d0365a6c8599069f8a1d00a7e90b763163
|
/R/h2m.R
|
f2142c6bd8afb88a6397d5b17725c55fb2977fa0
|
[] |
no_license
|
pwj6/tR
|
bf488073d4c7630df25536ec6310f0763dc536fe
|
c89d50f21396d2c2ca2298578e77313fb812539c
|
refs/heads/main
| 2023-04-22T22:07:27.843723
| 2021-05-13T09:50:25
| 2021-05-13T09:50:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 812
|
r
|
h2m.R
|
#' how to make herb to molecule or cid of molecule
#'
#' @param x an variable
#'
#' @return molecule and cid
#' @export
#'
#' @examples
#' .h2m(x='Ziziphi Spinosae Semen',type='latin')
#' .h2m(x='houpu',type='pinyin')
#' h2m(x=c('Ziziphi Spinosae Semen','Abri Herba'),type='latin')
h2m<-function(x,type="latin")
{
y<-lapply(x,.h2m,type=type)
names(y)<-(data.frame(x))$x
y
}
#' @export
.h2m<-function(x,type="latin"){
{
type <- match.arg(type,c("latin","pinyin","chinese"))
if(length(x)>1)
stop("Length of x must be 1!")
else if(type=="pinyin")
x<-drug[pinyin%in%tolower(x),]$latin
else if(type=="chinese")
x<-drug[chinese%in%x,]$latin
}
{
if(length(x)==1)
y <- drugchem[herb==x,][,c(3,14)]
else
y<-NA
}
y<-y[!duplicated(y$molecule),]
y
}
|
408fb98e73c631ed577e9776399ed2c15cd334af
|
f8f7371c1357b975a3721f9316f2c7a4a91888d1
|
/R/plot_gradient.R
|
559af131df8a32e2c58782d9ce18ad6ff2694a45
|
[] |
no_license
|
oldiya/LandClimTools
|
5849fd3418fc5bb5633b8fae3c3c8fe2e3ad9df2
|
979a2e56d62606f4ed91be1f27394572ba224911
|
refs/heads/master
| 2021-09-07T23:03:15.957301
| 2018-03-02T17:16:54
| 2018-03-02T17:16:54
| 283,109,268
| 1
| 0
| null | 2020-07-28T05:27:33
| 2020-07-28T05:27:32
| null |
UTF-8
|
R
| false
| false
| 438
|
r
|
plot_gradient.R
|
plot_gradient <- function(x, y, col=NULL, ...) {
if(is.null(dim(y))) {
y <- cbind(null = rep(0, length(x)), y)
} else {
y <- t(apply(y, 1, cumsum))
y <- cbind(null = rep(0, length(x)), y)
}
if(is.null(col)) col <- rainbow(ncol(y)-1)
x.poly <- c(x, rev(x))
plot(y[,ncol(y)] ~ x, type="n", ...)
for(i in 2:ncol(y)){
y.poly <- c(y[,i-1], rev(y[,i]))
polygon(x.poly, y.poly, col=col[i-1])
}
}
|
e0a42ecfd7186f32423c154735a09297c39cdcb5
|
4fd287a7d873aaf616e4d45f06f068e88f59881f
|
/www/outputs/feedback_choices.R
|
381b99b2ad177dbfb543d87d6b79485119d0da39
|
[] |
no_license
|
slphyx/comoTH
|
da20a855d49361cb3cd977373d729ebc045153d0
|
1afa22d1a4dcf13dce904a90639ab86d3b7fbf17
|
refs/heads/master
| 2022-07-04T01:20:44.196428
| 2020-05-15T08:46:11
| 2020-05-15T08:46:11
| 258,414,945
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 369
|
r
|
feedback_choices.R
|
output$feedback_choices <- renderText({
return(
paste0(
strong("Selected Inputs:"),
span("Cases/Deaths:", span(class = "importanttext", input$country_cases),
", demographics: ", span(class = "importanttext", input$country_demographic),
", social contacts: ", span(class = "importanttext", input$country_contact))
)
)
})
|
ee9cf0dab326ae11ce7adcf65d21db076c7ce2ac
|
b5b18f45016c0fdb3b3913c65e925ef3aaecad52
|
/R/phy.sim.R
|
75627b7aa50ac9fc5c755284bdd0264d27b9c84b
|
[] |
no_license
|
cran/pez
|
03e7fbde49853f699579c7fc7c7d44359eab338e
|
d22ffe708aae2c801b609bfc2f89204e7ccf0f72
|
refs/heads/master
| 2022-09-09T18:35:36.722610
| 2022-08-31T17:00:02
| 2022-08-31T17:00:02
| 26,346,285
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,121
|
r
|
phy.sim.R
|
#' Simulate phylogenies
#'
#' Simulate phylogenies under pure birth/death or as a function of
#' trait evolution
#'
#' \code{sim.bd.tree} simulates a pure birth/death speciation
#' model. There are two important things to note: (1) speciation is
#' randomised before extinction, and only one thing can happen to a
#' lineage per timestep. (2) This code works well for my purposes, but
#' absurd parameter values can cause the function to crash.
#'
#' \code{sim.bd.tr.tree} is an extension of \code{sim.bd.tree}, and
#' all its caveats apply to it. It additionally simulated the
#' evolution of a trait under Brownain motion
#' (\code{tr.walk}). Species' speciation/extinction rates change
#' depending on whether they have a trait value similar to other
#' species (\code{sp.tr}, \code{ext.tr}). When a speciation event
#' happens, the two daughters split evenly about the ancestor's trait
#' value, taking values half-way to whatever the nearest species'
#' value is. To be precise: \eqn{p(speciate)_i = speciate_i + sp.tr
#' \times min(trait distance)}{p(speciate) = speciate +
#' sp.tr*min.tr.dist}, \eqn{p(extinct)_i = exinction_i + ext.tr
#' \times min(trait distance)}{p(extinct) = extinction +
#' ext.tr*min.tr.dist}, where \eqn{i}{i} denotes each species.
#'
#' \code{edge2phylo} is an internal function for the
#' \code{\link{sim.phy}} and \code{\link{sim.meta}} function families,
#' which may be of use to you. Check those functions' code for
#' examples of use.
#'
#' These functions are closely related to \code{\link{sim.meta}}; the
#' latter are extensions that simulate meta-community structure at the
#' same time.
#'
#' @param speciate probability each species will speciate in each
#' time-step (0-1)
#' @param extinction probability each species will go extinct in each
#' time-step (0-1)
#' @param time.steps number of time-steps for simulation
#' @return \code{\link[ape:phylo]{phylo}} object with random
#' tip.labels, and trait values if using \code{sim.br.tr.tree}.
#' @author Will Pearse
#' @seealso sim.meta scape
#' @examples
#' tree <- sim.bd.phy(0.1, 0, 10)
#' plot(tree)
#' @rdname sim.phy
#' @name sim.phy
#' @importFrom stats runif
#' @export
sim.bd.phy <- function(speciate=0.1, extinction=0.025, time.steps=20){
#Setup
edge <- matrix(c(1,2,1,3), byrow=TRUE, ncol=2)
species <- c(TRUE, TRUE)
edge.length <- c(1,1)
extinct <- numeric()
#Keep the next iteration's edges separate to avoid running them through too early
next.edge <- edge
next.edge.length <- edge.length
next.species <- species
new.edge <- 4
#Loop over timesteps
for(i in seq(time.steps)){
#Loop over edges
for(j in seq(nrow(edge))){
#Need a breaker to stop things doing multiple things per timestep...
breaker <- TRUE
#We only care about species, not nodes
if(species[j] == TRUE){
#Speciate?
if(breaker & runif(1) <= speciate){
next.edge <- rbind(next.edge, matrix(c(edge[j,2], new.edge, edge[j,2], new.edge+1), byrow=TRUE, nrow=2))
next.species[j] <- FALSE
next.species <- append(next.species, c(TRUE, TRUE))
next.edge.length <- append(next.edge.length, c(1, 1))
new.edge <- new.edge+2
breaker <- FALSE
} else next.edge.length[j] <- next.edge.length[j] + 1
#Extinction?
if(breaker & runif(1) <= extinction){
next.species[j] <- FALSE
extinct <- append(extinct, edge[j,2])
breaker <- FALSE
}
}
}
edge <- next.edge
edge.length <- next.edge.length
species <- next.species
}
#Turn into ape::phylo and return
return(edge2phylo(edge, species, extinct, edge.length))
}
#' @param tr.range vector of length two specifying boundaries for
#' trait values (see notes); initial two species will be at the 25th
#' and 75th percentiles of this space. See also \code{tr.wrap}
#' @param sp.tr speciation rate's interaction with the minimum
#' distance between a species and the species most similar to it (see
#' details)
#' @param ext.tr extinction rate's interaction with the minimum
#' distance between a species and the species most similar to it (see
#' details)
#' @param tr.walk at each time-step a species not undergoing
#' speciation or extinction has its trait value drawn from a
#' distribution centered at its current value and with a standard
#' deviation set by this value. I.e., this is the rate of the Brownian
#' motion trait evolution.
#' @param tr.wrap whether to force species' trait values to stay
#' within the boundary defined by \code{tr.range}; default TRUE.
#' @author Will Pearse
#' @rdname sim.phy
#' @importFrom stats runif rnorm quantile
#' @export
sim.bd.tr.phy <- function(speciate=0.1, extinction=0.025, time.steps=20, tr.range=c(0,1), sp.tr=2, ext.tr=1, tr.walk=0.2, tr.wrap=TRUE){
#Setup
edge <- matrix(c(1,2,1,3), byrow=TRUE, ncol=2)
species <- c(TRUE, TRUE)
edge.length <- c(1,1)
extinct <- extinct.traits <- numeric()
traits <- unname(quantile(tr.range, c(0.25,0.75)))
.wrap <- function(x, range){
if(x < range[1]) return(range[1])
if(x > range[2]) return(range[2])
return(x)
}
#Keep the next iteration's edges separate to avoid running them through too early
next.edge <- edge
next.edge.length <- edge.length
next.species <- species
new.edge <- 4
next.traits <- traits
#Loop over timesteps
for(i in seq(time.steps)){
#Get minimum trait distances (--> alter speciation and extinction rates)
min.dist <- abs(outer(traits, traits, `-`))
diag(min.dist) <- NA
#Be careful; dead species are all NA and so can cause warnings
min.dist <- apply(min.dist, 2, function(x) if(all(is.na(x))) NA else min(x, na.rm=TRUE))
#Loop over edges
for(j in seq(nrow(edge))){
#Need a breaker to stop things doing multiple things per timestep...
breaker <- TRUE
#We only care about species, not nodes
if(species[j] == TRUE){
#Speciate?
if(breaker & runif(1) <= (speciate + sp.tr * min.dist[j])){
next.edge <- rbind(next.edge, matrix(c(edge[j,2], new.edge, edge[j,2], new.edge+1), byrow=TRUE, nrow=2))
next.species[j] <- FALSE
next.species <- append(next.species, c(TRUE, TRUE))
next.traits <- append(next.traits, c(traits[j]-0.5*min.dist[j],traits[j]+0.5*min.dist[j]))
next.edge.length <- append(next.edge.length, c(1, 1))
new.edge <- new.edge+2
breaker <- FALSE
} else next.edge.length[j] <- next.edge.length[j] + 1
#Extinction?
if(breaker & runif(1) <= (extinction + ext.tr * min.dist[j])){
next.species[j] <- FALSE
extinct <- append(extinct, edge[j,2])
extinct.traits <- append(extinct.traits, next.traits[j])
breaker <- FALSE
next.traits[j] <- NA
}
#Brownian motion on trait if nothing else
if(breaker & !is.na(next.traits[j]))
next.traits[j] <- rnorm(1, next.traits[j], sd=tr.walk)
if(tr.wrap & !is.na(next.traits[j]))
next.traits[j] <- .wrap(next.traits[j], tr.range)
}
}
edge <- next.edge
edge.length <- next.edge.length
species <- next.species
traits <- next.traits
}
traits[is.na(traits)] <- extinct.traits
#Turn into ape::phylo and return
return(edge2phylo(edge, species, extinct, edge.length, traits))
}
#' @param edge a two-column matrix where the first column is the start
#' node, the second the destination, as in
#' \code{\link[ape:phylo]{phylo}$edge}
#' @param s which of the rows in the edge matrix represent
#' extant species
#' @param e which of the tips in the edge matrix are extinct
#' (DEFAULT: empty vector, i.e., none)
#' @param el a vector to be used to give edge.length to the
#' phylogeny (default NA, i.e., none)
#' @param t if given (default NA), a vector to be used for traits
#' (\code{$traits} slot) in the phylogeny
#' @author Will Pearse
#' @rdname sim.phy
#' @export
edge2phylo <- function(edge, s, e=numeric(0), el=NA, t=NULL){
spp.no <- sort(c(edge[s,2], e))
spp.edges <- edge[,2] %in% spp.no
to.change <- matrix(0, nrow=nrow(edge), ncol=ncol(edge))
for(i in seq_along(spp.no))
to.change[edge >= spp.no[i]] <- to.change[edge >= spp.no[i]] + 1
edge <- edge - to.change + length(spp.no)
edge[spp.edges,2] <- seq_along(spp.no)
tree <- list(edge=edge, tip.label=paste("r", order(spp.no), sep="_"), edge.length=el, Nnode=length(unique(edge[,1])), traits=t)
class(tree) <- "phylo"
return(tree)
}
|
98c4edd1f8832fa2755dcb86ccb070f0df0af257
|
687df85904e5472055ddbfd2ddd155f4a5b4082a
|
/scripts/PlotMCMC.R
|
3832af6dec7425c21a2a6fc12f08226f8d0e37ff
|
[] |
no_license
|
barbagrigia/ComposableStateSpaceModels
|
2fe80a04b0b8ef43e99dbfe87bf8906034216328
|
d6c3c677efdfcddd0c97e44dfdf06e1c025e3ce3
|
refs/heads/master
| 2021-01-22T18:23:06.168525
| 2017-03-14T16:18:47
| 2017-03-14T16:18:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,104
|
r
|
PlotMCMC.R
|
plot_running_mean = function(chains, parameters, actual_params, pages = 1) {
chains %>%
mutate(chain = as.factor(chain)) %>%
gather(key = parameter, value, -iteration, -chain) %>%
inner_join(actual_params, by = "parameter") %>%
filter(parameter %in% parameters) %>%
arrange(parameter, iteration) %>%
drop_na() %>%
mutate(m = mean(value), rm = cumsum(value)/iteration) %>%
ggplot(aes(x = iteration, y = rm, colour = chain)) +
geom_line() +
geom_hline(aes(yintercept = m)) +
geom_hline(aes(yintercept = actual_value), linetype = "dashed", colour = "#ff0000") +
xlab("Iteration") +
ylab("Running Mean") +
theme(legend.position = "none", text = element_text(family = "Georgia")) +
facet_wrap_paginate(~parameter, ncol = 1, scales = "free_y")
}
traceplot = function(chains, parameters, actual_params, pages = 1) {
chains %>%
mutate(chain = as.factor(chain)) %>%
gather(key = parameter, value, -iteration, -chain) %>%
inner_join(actual_params, by = "parameter") %>%
filter(parameter %in% parameters) %>%
arrange(parameter, iteration) %>%
drop_na() %>%
ggplot(aes(x = iteration, y = value, colour = chain)) +
geom_line() +
facet_wrap_paginate(~parameter, scales = "free_y", ncol = 1) +
geom_hline(aes(yintercept = actual_value), linetype = "dashed", colour = "#ff0000") +
theme(legend.position = "none", text = element_text(family = "Georgia"))
}
plot_density = function(chains, parameters, actual_params, pages = 1) {
chains %>%
mutate(chain = as.factor(chain)) %>%
gather(key = parameter, value, -iteration, -chain) %>%
inner_join(actual_params, by = "parameter") %>%
filter(parameter %in% parameters) %>%
arrange(parameter, iteration) %>%
drop_na() %>%
ggplot(aes(x = value)) +
geom_histogram(binwidth = 0.05) +
facet_wrap_paginate(~parameter, scales = "free", ncol = 1, page = pages) +
geom_vline(aes(xintercept = actual_value), linetype = "dashed", colour = "#ff0000") +
theme(legend.position = "none", text = element_text(family = "Georgia"))
}
|
0bb9c3db6f9862c27d7026dc3f77baa1a3b328ed
|
78063f82eceb719b9cedc6b2d0a64b7d11cf4e53
|
/Recommendations/R_UBCF.R
|
0b85771bfcf378323acf8ad1576b991e034224af
|
[] |
no_license
|
fagan2888/Twitch
|
173263dec1895c1e4ec25fe8d0bfcb228b4a532f
|
0475f95667d365615618c519a1df0cae5aacdcc2
|
refs/heads/master
| 2021-09-16T13:13:12.427357
| 2018-06-21T04:45:07
| 2018-06-21T04:45:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 223
|
r
|
R_UBCF.R
|
install.packages("recommenderlab")
library(recommenderlab)
matrix <- as(read.csv("Games.csv"),"realRatingMatrix")
model <-Recommender(matrix, method = "UBCF")
games <- predict(model, matrix["101",], n=5)
as(games, "list")
|
d899df1d794ea6a449a306afd057b296420c675b
|
2a739305dc86f75385e93d9de81f510d55a89862
|
/code/work/kivisto/fig7.R
|
77f92b5950f38588ddf2deffd253c25ba0c593b9
|
[] |
no_license
|
mikkosk/project_course_addison_steele_spectator_in_estc
|
541c3c75c7a30bc979d60f1ff760562c2479e221
|
61164b306397a0996cf9ff57fc5ee5ea3c511960
|
refs/heads/main
| 2023-04-18T21:42:46.081179
| 2021-04-25T10:24:30
| 2021-04-25T10:24:30
| 323,597,343
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 616
|
r
|
fig7.R
|
#FIG. 7 - Steele most published works
groupWorkS <- steele %>% group_by(finalWorkField) %>% dplyr::summarise(n = n())
groupWorkS <- arrange(groupWorkS, n)
groupWorkS$finalWorkField <- factor(groupWorkS$finalWorkField, levels = groupWorkS$finalWorkField)
fig7 <- ggplot(data = groupWorkS, aes(x = finalWorkField, y = n)) +
geom_bar(position="stack", stat="identity") +
coord_flip() +
theme(axis.text.y = element_text(angle=0,vjust=0.5, size=6,hjust=1)) +
ggtitle("Steele - most published works")
png(file="../../../output/figures/fig7_steele_pub.png",
width=1200, height=700)
print(fig7)
dev.off()
|
163be60f4b40331c845f89fbcb9e21f97f85539e
|
98fadffa9fb4a4fe81874afe37a02569791ce808
|
/scripts/assign_tax.R
|
a458c8586229f9b0566006369596f78cae413241
|
[] |
no_license
|
cErikson/GeneLab_DADA2_snakemake_Pipeline
|
e04c9c54e5355b7f1e92a155425de5541b486559
|
a27fc4d18791dbb7a66e142913be68a0428dc6a4
|
refs/heads/master
| 2020-03-21T04:39:51.206779
| 2018-09-07T18:14:50
| 2018-09-07T18:14:50
| 138,121,254
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,188
|
r
|
assign_tax.R
|
## ++=======================================================++
## || DADA2 taxnonomy script for marker gene GLDS datasets ||
## ++=======================================================++
##
## Christian Erikson: Bio.Erkson@gmail.com christian.b.erikson@nasa.gov
##
library(dada2)
library(dplyr)
library(readr)
# Log errors/ messages to the snakemake log
log_file=file(snakemake@log[[1]], open='wt')
sink(log_file)
# move to the workinging directory
setwd(snakemake@params[["wd"]])
# Read in seqtab file
seqtab=as.matrix(read.table(snakemake@input[['asv_table']]))
# assignTaxonomy
taxa = assignTaxonomy(seqtab, snakemake@params[['tax_train']], multithread=TRUE, verbose = TRUE) # main training set
taxa = addSpecies(taxa, snakemake@params[['tax_species']]) # Add species
taxa = add_rownames(as.data.frame(taxa), var = 'ASV')
if (!is.null(snakemake@params[['add_taxa_ds']])){
for (i in snakemake@params[['add_taxa_ds']]){
add_taxa = assignTaxonomy(seqtab, i, multithread=TRUE, verbose = TRUE)
add_taxa = add_rownames(as.data.frame(add_taxa), var = 'ASV')
taxa = bind_rows(taxa, add_taxa) %>%
distinct(., ASV)
}
}
# Save
write_tsv(taxa, snakemake@output[['tax_table']])
|
99b0b27f47cfe5ce40e85fa3bb106f2314cfad27
|
dfe01adc03e83d935c2207695164f027a9aed9fd
|
/man/azureDataLakeMkdirs.Rd
|
8cf7c703f16b846cfba5840c5212377b1598a466
|
[] |
no_license
|
CharlesCara/AzureSMR
|
155502a9e260e4f1a7631cdd248e0727e59fbcb3
|
199dda1e10a8e313b80d08092c6ffa0526a494d5
|
refs/heads/master
| 2020-03-19T06:16:39.558535
| 2018-07-12T10:27:54
| 2018-07-12T10:27:54
| 136,006,301
| 0
| 0
| null | 2018-06-04T10:01:19
| 2018-06-04T10:01:18
| null |
UTF-8
|
R
| false
| true
| 2,490
|
rd
|
azureDataLakeMkdirs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AzureDataLake.R
\name{azureDataLakeMkdirs}
\alias{azureDataLakeMkdirs}
\title{Create a directory with the provided permission.}
\usage{
azureDataLakeMkdirs(azureActiveContext, azureDataLakeAccount, relativePath,
permission, verbose = FALSE)
}
\arguments{
\item{azureActiveContext}{A container used for caching variables used by \code{AzureSMR}, created by \code{\link[=createAzureContext]{createAzureContext()}}}
\item{azureDataLakeAccount}{Name of the Azure Data Lake account.}
\item{relativePath}{Relative path of a file/directory.}
\item{permission}{Permission to be set for the directory (default is 755).}
\item{verbose}{Print tracing information (default FALSE).}
}
\value{
Returns true if the directory creation succeeds; false otherwise.
Exception IOException
}
\description{
Create a directory with the provided permission.
}
\references{
\url{https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Make_a_Directory}
}
\seealso{
\url{https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/WebHDFS.html#Permission}
\url{https://hadoop.apache.org/docs/current/api/org/apache/hadoop/fs/FileSystem.html#mkdirs-org.apache.hadoop.fs.FileSystem-org.apache.hadoop.fs.Path-org.apache.hadoop.fs.permission.FsPermission-}
Other Azure Data Lake Store functions: \code{\link{adlFileInputStreamAvailable}},
\code{\link{adlFileInputStreamClose}},
\code{\link{adlFileInputStreamGetPos}},
\code{\link{adlFileInputStreamLength}},
\code{\link{adlFileInputStreamMarkSupported}},
\code{\link{adlFileInputStreamMark}},
\code{\link{adlFileInputStreamReadBuffered}},
\code{\link{adlFileInputStreamRead}},
\code{\link{adlFileInputStreamReset}},
\code{\link{adlFileInputStreamSeek}},
\code{\link{adlFileInputStreamSkip}},
\code{\link{adlFileOutputStreamClose}},
\code{\link{adlFileOutputStreamFlush}},
\code{\link{adlFileOutputStreamWrite}},
\code{\link{azureDataLakeAppendBOS}},
\code{\link{azureDataLakeAppendCore}},
\code{\link{azureDataLakeAppend}},
\code{\link{azureDataLakeCreate}},
\code{\link{azureDataLakeDelete}},
\code{\link{azureDataLakeGetFileStatus}},
\code{\link{azureDataLakeListStatus}},
\code{\link{azureDataLakeOpenBIS}},
\code{\link{azureDataLakeReadCore}},
\code{\link{azureDataLakeRead}},
\code{\link{createAdlFileInputStream}},
\code{\link{createAdlFileOutputStream}},
\code{\link{readFromService}}
}
|
1ebf29eb8924ac7fc58b10547ce1fb83e29f3b0b
|
38a88f465320a9682d8b3d8f045059469cf60814
|
/scripts_control.R
|
a9755a0e4d56cb3d803b550f5149958336bcbf8e
|
[] |
no_license
|
ostroskianais/transportation-lp
|
4baac427937ee135b91cbaa2363012efec95984a
|
b1177de02d90963ec642b2c5142f27c02715d735
|
refs/heads/main
| 2023-02-19T10:02:31.872261
| 2021-01-24T21:49:14
| 2021-01-24T21:49:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 190
|
r
|
scripts_control.R
|
# Run before python opt
source("get_sets.R")
source("get_distances.R")
# Run after python opt
source("get_opt_results.R")
source("network_ij.R")
source("network_jk.R")
source("network_viz")
|
0f30a19f64ed5a4b536ae41eb6e6b58e1d4eb7ff
|
c4fda47143d29ddbb284c742a48480a0cf0aa98c
|
/R/balanceAreaHarvested.R
|
fee54bce6e0c5ab7f7f10c26ef6c47f1b08659db
|
[] |
no_license
|
SWS-Methodology/faoswsProduction
|
bad2d75be7a5a845ad0de79cb40b691ff0a6d80e
|
6e1c6ab8042e3c7cd398d65bc6733e0a4d038f3b
|
refs/heads/master
| 2023-04-08T16:10:34.598286
| 2023-03-17T13:05:52
| 2023-03-17T13:05:52
| 33,170,385
| 0
| 2
| null | 2023-03-17T13:05:55
| 2015-03-31T07:08:06
|
R
|
UTF-8
|
R
| false
| false
| 4,923
|
r
|
balanceAreaHarvested.R
|
##' Function to compute area harvested when new production and yield are given.
##'
##' @param data The data.table object containing the data.
##' @param processingParameters A list of the parameters for the production
##' processing algorithms. See \code{productionProcessingParameters} for a
##' starting point.
##' @param formulaParameters A list holding the names and parmater of formulas.
##' See \code{productionFormulaParameters}.
##'
##' @export
##'
balanceAreaHarvested = function(data,
processingParameters,
formulaParameters){
dataCopy = copy(data)
## Data quality check
suppressMessages({
ensureProductionInputs(dataCopy,
processingParameters = processingParameters,
formulaParameters = formulaParameters,
returnData = FALSE,
normalised = FALSE)
})
## Impute only when area and yield are available and production isn't
##Note that missingAreaHarvested does not include the obeservation
##with a methodFlag="-" : basically it means that all the missing areaHarvested
##are computed ad identity (where it is possible), except fot those flagged
##as (M,-)
missingAreaHarvested =
is.na(dataCopy[[formulaParameters$areaHarvestedValue]])&
dataCopy[[formulaParameters$areaHarvestedMethodFlag]]!="-"
nonMissingProduction =
!is.na(dataCopy[[formulaParameters$productionValue]]) &
dataCopy[[formulaParameters$productionObservationFlag]] != processingParameters$missingValueObservationFlag
nonMissingYield =
!is.na(dataCopy[[formulaParameters$yieldValue]]) &
dataCopy[[formulaParameters$yieldObservationFlag]] != processingParameters$missingValueObservationFlag
feasibleFilter =
missingAreaHarvested &
nonMissingProduction &
nonMissingYield
nonZeroYieldFilter =
(dataCopy[[formulaParameters$yieldValue]] != 0)
## Balance area harvested
dataCopy[feasibleFilter,
`:=`(c(formulaParameters$areaHarvestedValue),
computeRatio(get(formulaParameters$productionValue),
get(formulaParameters$yieldValue)) *
formulaParameters$unitConversion)]
## Assign observation flag.
##
## NOTE (Michael): If the denominator (yield is non-zero) then
## perform flag aggregation, if the denominator is zero,
## then assign the missing flag as the computed yield is NA.
##
## NOTE (Michael): Although the yield should never be zero by definition.
dataCopy[feasibleFilter & nonZeroYieldFilter,
`:=`(c(formulaParameters$areaHarvestedObservationFlag),
aggregateObservationFlag(get(formulaParameters$productionObservationFlag),
get(formulaParameters$yieldObservationFlag)))]
dataCopy[feasibleFilter & !nonZeroYieldFilter,
`:=`(c(formulaParameters$areaHarvestedObservationFlag),
processingParameters$missingValueObservationFlag)]
dataCopy[feasibleFilter & !nonZeroYieldFilter,
`:=`(c(formulaParameters$areaHarvestedMethodFlag),
processingParameters$missingValueMethodFlag)]
## Assign method flag
dataCopy[feasibleFilter & nonZeroYieldFilter, `:=`(c(formulaParameters$areaHarvestedMethodFlag),
processingParameters$balanceMethodFlag)]
## If Prod or yield is (M,-) also areaHarvested should be flagged as (M,-)
## Note that only the "missingAreaHarvested" are overwritten!! with (M,-)
MdashProduction = dataCopy[,get(formulaParameters$productionObservationFlag)==processingParameters$missingValueObservationFlag
& get(formulaParameters$productionMethodFlag)=="-"]
blockFilterProd= MdashProduction & missingAreaHarvested
dataCopy[blockFilterProd ,
`:=`(c(formulaParameters$areaHarvestedValue,formulaParameters$areaHarvestedObservationFlag,formulaParameters$areaHarvestedMethodFlag),
list(NA_real_,processingParameters$missingValueObservationFlag, "-"))]
MdashYield= dataCopy[,get(formulaParameters$yieldObservationFlag)==processingParameters$missingValueObservationFlag
& get(formulaParameters$yieldMethodFlag)=="-"]
blockFilterYield= MdashYield & missingAreaHarvested
dataCopy[blockFilterYield ,
`:=`(c(formulaParameters$areaHarvestedValue,formulaParameters$areaHarvestedObservationFlag,formulaParameters$areaHarvestedMethodFlag),
list(NA_real_,processingParameters$missingValueObservationFlag, "-"))]
return(dataCopy)
}
|
b0a978b7a1492a7bc280e9c584a9e989c7a29c5e
|
9e0000dc133163ec7c89af560df6ee11ee4ea9b0
|
/src/01_preprocess/01_unzip_and_subset_WDPA.R
|
ea08adcfbbb05bbcaa9fc3f5cd2e13940fe033b5
|
[
"MIT"
] |
permissive
|
cbig/gpan-connectivity
|
374c2ac758da5448cf0e04bf4aee4b2275a3f60b
|
8790a42fad5558fcd7815ca37c6e7b3cc1a07afe
|
refs/heads/master
| 2016-08-05T09:51:03.946891
| 2016-01-12T20:06:52
| 2016-01-12T20:06:52
| 33,982,666
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,514
|
r
|
01_unzip_and_subset_WDPA.R
|
#!/usr/bin/env r
# Description -------------------------------------------------------------
# Simple helper script that can be used to unzip downloaded WDPA data and
# to select PAs other than marine (using ogr2ogr in gdalUtils).
# NOTE: script assumes that it is run within a RStudio project, i.e. paths
# are not relative to this file. Modify paths if the script is run from
# command line.
# Funcionality ------------------------------------------------------------
# Use regular expression matching so that the zip file is not hardcoded to a
# specific WDPA version.
source_zip <- list.files(path = "../../data/WDPA", pattern = "^(WDPA).+\\.zip$",
full.names = TRUE)
if (length(source_zip) == 0) {
stop("Couldn't find a suitable zip-file.")
}
# Unzip the file
message("Unzipping ", source_zip, " to the same directory...", appendLF = FALSE)
unzip(zipfile = source_zip, exdir = dirname(source_zip), overwrite = TRUE)
message("done")
# Select subset of data ---------------------------------------------------
# First, let's figure out the correct shapefile. We're only interested in the
# polygon data (not points), so let's get that.
source_ds <- list.files(path = dirname(source_zip),
pattern = "^(WDPA).+(polygons)\\.shp$",
full.names = TRUE)
if (length(source_ds) == 0) {
stop("Couldn't find a suitable polygon shapefile.")
}
# Let's also construct a name for a destination dataset (shapefile withouth
# marine PAs)
dest_ds <- unlist(strsplit(x = basename(source_ds), split = "\\."))
dest_ds <- file.path(dirname(source_ds),
paste0(dest_ds[1], "_nonmarine.", dest_ds[2]))
library(gdalUtils)
library(rgdal)
# We'll pre-check to make sure there is a valid GDAL install.
# Note this isn't strictly neccessary, as executing the function will
# force a search for a valid GDAL install.
gdal_setInstallation()
valid_install <- !is.null(getOption("gdalUtils_gdalPath"))
if (valid_install) {
message("Copying all but marine PAs to a new shapefile ", dest_ds,
" ...", appendLF = FALSE)
# Using ogr2ogr, select only terrestrial (0) and coastal (1), i.e. != 2
layer <- ogrListLayers(source_ds)[1]
ogr2ogr(src_datasource_name = source_ds, dst_datasource_name = dest_ds,
layer = layer, skipfailures = TRUE,
where = "MARINE <> '2'", preserve_fid = TRUE, lco = "ENCODING=UTF-8")
message("done")
} else {
stop("No valid GDAL installation found on the system")
}
|
bfb3674a0c92fa069de999d17410315e9bd24fc1
|
887dc03efc71b10900e0fcab0d56e85a877098f8
|
/R/mod_email_validation.R
|
84c18f243c8af03a1b863f789dc0bf44cbec7e56
|
[
"MIT"
] |
permissive
|
ove-ut3/survey.admin
|
36a445459c532bf307dfb5f0f4747b6288494d2c
|
98225711492f50931d868a9277d7fba039bd1efc
|
refs/heads/master
| 2021-01-07T22:48:29.207281
| 2020-05-20T12:57:42
| 2020-05-20T12:57:42
| 241,842,349
| 0
| 0
|
NOASSERTION
| 2020-05-20T12:57:43
| 2020-02-20T09:23:39
|
R
|
UTF-8
|
R
| false
| false
| 11,379
|
r
|
mod_email_validation.R
|
# Module UI
#' @title mod_email_validation_ui and mod_email_validation_server
#' @description A shiny Module.
#'
#' @param id shiny id
#' @param input internal
#' @param output internal
#' @param session internal
#'
#' @rdname mod_email_validation
#'
#' @keywords internal
#' @export
#' @importFrom shiny NS tagList
mod_email_validation_ui <- function(id){
ns <- NS(id)
tagList(
fluidRow(
column(
width = 7,
box(
title = "Email", width = 12,
DT::DTOutput(ns("dt_email"))
)
),
column(
width = 5,
uiOutput(ns("ui_email_malformed")),
box(
title = "Validation", width = 12,
uiOutput(ns("ui_validation"))
),
box(
title = "Stats", width = 12,
selectInput(
ns("select_duplicate_token"),
label = NULL,
choices = c("All emails", "One email per token"),
selected = "All emails"
),
plotly::plotlyOutput(ns("stats"))
)
)
)
)
}
# Module Server
#' @rdname mod_email_validation
#' @export
#' @keywords internal
mod_email_validation_server <- function(input, output, session, rv){
ns <- session$ns
df_validation_email <- reactive({
rv$df_participants_contacts %>%
dplyr::semi_join(
rv$df_participants_filter(),
by = "token"
) %>%
dplyr::filter(.data$key == "email") %>%
dplyr::select(.data$token, email = .data$value, .data$service, .data$status, date = .data$status_date) %>%
dplyr::mutate_at("service", as.factor) %>%
tidyr::replace_na(list(status = "missing")) %>%
dplyr::mutate_at("status", factor, levels = c("valid", "unknown", "invalid", "missing"))
})
output$dt_email <- DT::renderDT({
if (!is.null(input[["dt_email_domains_rows_selected"]])) {
selected_domains <- rv$df_email_domains %>%
dplyr::filter(dplyr::row_number() %in% input[["dt_email_domains_rows_selected"]])
selected_emails <- df_validation_email() %>%
dplyr::mutate(domain = stringr::str_match(.data$email, "@(.+)")[, 2]) %>%
dplyr::semi_join(selected_domains, by = "domain") %>%
dplyr::select(-.data$domain)
} else {
selected_emails <- df_validation_email()
}
selected_emails %>%
DT::datatable(
rownames = FALSE,
filter = 'top',
options = list(
dom = "rti",
scrollY = '72vh',
pageLength = -1
)
)
})
output$ui_email_malformed <- renderUI({
rv$df_malformed_emails <- rv$df_participants_contacts %>%
dplyr::filter(.data$key == "email") %>%
dplyr::inner_join(
rv$df_participants,
by = "token"
) %>%
dplyr::select(.data$token, email = .data$value, .data$firstname, .data$lastname) %>%
dplyr::filter(!str_validate_email(.data$email))
if (nrow(rv$df_malformed_emails) >= 1) {
tagList(
box(
title = "Malformed email or invalid domains", width = 12, collapsible = TRUE, collapsed = TRUE,
DT::DTOutput(ns("dt_malformed_emails")),
actionButton(ns("btn_validate_domains"), "Validate selected domains", icon = icon("check")),
shinyWidgets::addSpinner(
DT::DTOutput(ns("dt_email_domains"))
)
)
)
} else {
tagList(
box(
title = "Malformed email or invalid domains", width = 12, collapsible = TRUE, collapsed = TRUE,
uiOutput(ns("text_no_malformed_emails")),
actionButton(ns("btn_validate_domains"), "Validate selected domains", icon = icon("check")),
shinyWidgets::addSpinner(
DT::DTOutput(ns("dt_email_domains"))
)
)
)
}
})
output$text_no_malformed_emails <- renderUI(HTML("No malformed emails<br><br>"))
output$dt_malformed_emails <- DT::renderDT({
rv$df_malformed_emails %>%
DT::datatable(
rownames = FALSE,
options = list(
dom = "rti",
scrollY = '20vh',
pageLength = -1
)
)
})
observeEvent(input$btn_validate_domains, {
selected_domains <- rv$df_email_domains
if (!is.null(input[["dt_email_domains_rows_selected"]])) {
selected_domains <- selected_domains %>%
dplyr::filter(dplyr::row_number() %in% input[["dt_email_domains_rows_selected"]])
}
withProgress(message = "Email domains validation :", value = 0, detail = "0%", {
for (i in 1:nrow(selected_domains)) {
selected_domains$status[i] <- test_url(selected_domains$domain[i])
incProgress(
1/nrow(selected_domains),
detail = paste0(
round(i/nrow(selected_domains) * 100, 1), "% - ",
selected_domains$domain[i]
)
)
}
})
rv$df_email_domains <- selected_domains %>%
patchr::anti_join_bind(rv$df_email_domains, by = "domain", arrange = FALSE) %>%
dplyr::arrange(-.data$n)
if (nrow(impexp::sqlite_import(golem::get_golem_options("sqlite_base"), "email_domains")) >= 1) {
impexp::sqlite_execute_sql(
golem::get_golem_options("sqlite_base"),
"DELETE FROM email_domains;"
)
}
impexp::sqlite_append_rows(golem::get_golem_options("sqlite_base"), rv$df_email_domains, "email_domains")
})
output$dt_email_domains <- DT::renderDT({
rv$df_email_domains <- rv$df_participants_contacts %>%
dplyr::filter(.data$key == "email") %>%
dplyr::inner_join(
rv$df_participants_filter(),
by = "token"
) %>%
dplyr::select(.data$token, email = .data$value, .data$firstname, .data$lastname) %>%
dplyr::filter(str_validate_email(.data$email)) %>%
dplyr::mutate(domain = stringr::str_match(.data$email, "@(.+)")[, 2]) %>%
dplyr::group_by(.data$domain) %>%
dplyr::summarise(n = dplyr::n()) %>%
dplyr::ungroup() %>%
dplyr::arrange(-.data$n) %>%
dplyr::left_join(
rv$df_email_domains %>%
dplyr::select(.data$domain, .data$status),
by = "domain"
)
impexp::sqlite_export(
golem::get_golem_options("sqlite_base"),
rv$df_email_domains,
"email_domains",
overwrite = TRUE
)
rv$df_email_domains %>%
dplyr::mutate_at("status", as.factor) %>%
DT::datatable(
rownames = FALSE,
colnames = c("Domain" = "domain"),
filter = 'top',
options = list(
dom = "rt",
scrollY = '30vh',
pageLength = -1
)
)
})
output$ui_validation <- renderUI({
validate(
need("survey.api" %in% utils::installed.packages()[, 1], "Package survey.api needs to be installed.")
)
tagList(
div(
style = "display: inline-block; width: 44%; vertical-align: top;",
selectInput(
ns("service_select"),
"Service :",
choices = c("bulkemailchecker.com", "listflow.io", "quickemailverification.com", "emailmarker.com")
)
),
div(
style = "display: inline-block; width: 55%; vertical-align: top;",
numericInput(
ns("sleep_select"),
"Sleep time in seconds between each validation:",
value = 5,
min = 0
)
),
div(
tippy::with_tippy(
actionButton(
ns("validation"),
label = "Validate selected emails",
icon = icon("check")
),
"No selection means all emails will be validated"
)
)
)
})
observeEvent(input$validation, {
dt_selected_emails <- df_validation_email()
if (!is.null(input[["dt_email_rows_selected"]])) {
dt_selected_emails <- dt_selected_emails %>%
dplyr::filter(dplyr::row_number() %in% input[["dt_email_rows_selected"]])
}
rv$df_selected_emails <- dt_selected_emails %>%
dplyr::select(.data$email) %>%
unique()
shinyalert::shinyalert(title = "Do you confirm ?", type = "info", showCancelButton = TRUE, closeOnEsc = FALSE)
})
observeEvent(input$shinyalert, {
if (input$shinyalert) {
fn_validation <- switch(
input$service_select,
bulkemailchecker.com = survey.api::bulk_email_checker,
listflow.io = survey.api::bulk_email_checker,
quickemailverification.com = survey.api::bulk_email_checker,
emailmarker.com = survey.api::bulk_email_checker
)
api_key_config <- switch(
input$service_select,
bulkemailchecker.com = "api_key_bulkemailchecker",
listflow.io = "api_key_listflow",
quickemailverification.com = "api_key_quickemailverification",
emailmarker.com = "api_key_emailmarker"
)
api_key = rv$df_config %>%
dplyr::filter(.data$key == !!api_key_config) %>%
dplyr::pull(.data$value)
output <- dplyr::tibble(
email = rv$df_selected_emails$email,
validation = list(nrow(rv$df_selected_emails))
)
withProgress(message = "Emails validation :", value = 0, detail = "0%", {
for (i in 1:nrow(output)) {
if (i == 1) {
output$validation[[i]] <- fn_validation(output$email[i], key = api_key, sleep = 0)
} else {
output$validation[[i]] <- fn_validation(output$email[i], key = api_key, sleep = input$sleep_select)
}
incProgress(
1 / nrow(output),
detail = paste0(
round(i / nrow(output) * 100, 1), "% - ",
output$email[i]
)
)
}
})
output <- output %>%
tidyr::unnest(.data$validation) %>%
dplyr::mutate(service = input$service_select)
patch <- rv$df_participants_contacts %>%
dplyr::select(.data$token, .data$key, .data$value, .data$source, .data$date) %>%
dplyr::inner_join(
output %>%
dplyr::select(value = .data$email, .data$status, .data$service, status_date = "time"),
by = "value"
)
rv$df_participants_contacts <- patchr::df_update(
rv$df_participants_contacts,
patch,
by = c("token", "key", "value")
)
impexp::sqlite_export(
golem::get_golem_options("sqlite_base"),
rv$df_participants_contacts,
"participants_contacts",
overwrite = TRUE
)
}
})
output$stats <- plotly::renderPlotly({
df <- df_validation_email() %>%
tidyr::replace_na(list(status = "missing"))
if (input$select_duplicate_token == "One email per token") {
df <- df %>%
dplyr::arrange(.data$token, purrr::map_int(.data$status, ~ which(c("valid", "unknown", "invalid", "missing") %in% .))) %>%
dplyr::group_by(.data$token) %>%
dplyr::filter(dplyr::row_number() == 1) %>%
dplyr::ungroup()
}
df %>%
dplyr::pull(.data$status) %>%
graphr::shiny_pie(donut = TRUE)
})
}
|
d09646f07d92ac19309e6f942b8e3c9492734003
|
4e158e6ae5dbe5f073a895689a800900cdc1b4fc
|
/R/uv_linear.R
|
946e1eda055734ee7cd659da931812751570bf07
|
[] |
no_license
|
cran/fastStat
|
7943ac0faa13cef1d00f813dfed91b9162e24c42
|
ba5479eab0ec601bef4aaa0348d722b1272e2a1d
|
refs/heads/master
| 2020-12-21T23:17:37.441282
| 2020-09-17T14:00:13
| 2020-09-17T14:00:13
| 236,596,867
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,111
|
r
|
uv_linear.R
|
#' Looping for Univariable Logistic Regression
#'
#' @param data data
#' @param y y
#' @param variable variable names for univariable logistic regression. If missing, it will be column names of data except y and adjust
#' @param adjust adjust variable names for univariable logistic regression
#' @param round digital round, 3 is defaulted
#' @param p_threshold threshold for p value to show star. 0.05 is defaulted
#' @param order_by.beta logical. TRUE means order in or by decreasing. FLASE is defaulted
#' @importFrom stats lm
#' @return univariable logistic regression results
#' @export
#'
#' @examples
#' uv_linear(data = mtcars,y = 'vs')
uv_linear <- function(data,y,variable,adjust,round=3,
p_threshold=0.05,order_by.beta=TRUE){
#delet na
data=delet_na_df.mtr(data)
if (missing(adjust)){
if (missing(variable)) variable=colnames(data) %not% y
for (i in 1:length(variable)) {
if (i==1){
res=NULL
class=NULL
}
if (is.factor(data[,variable[i]])){
rep_len=length(levels(data[,variable[i]]))-1
if (rep_len>1){
rep_class=rep(i+1,rep_len)
class=c(class,rep_class)
}else{
class=c(class,0)
}
}else{
class=c(class,0)
}
formu=paste0(y,'~',variable[i])
line.i=lm(as.formula(formu),data = data)
line.sum=summary(line.i)
line_coef=as.data.frame(line.sum$coefficients)
line_var=line_coef[-1,]
confint=as.data.frame(suppressWarnings(suppressMessages(confint(line.i))))[-1,]
res.cbind=cbind(beta=line_var[,'Estimate'],
confint,
line_var[,c('Std. Error','t value','Pr(>|t|)')])
res.i=round(res.cbind,round)
res=rbind(res,res.i)
}
res2=cbind(res,class)
if (order_by.beta){
res3=res2[order(res2$class, res2$beta,decreasing = TRUE),-ncol(res2)]
}else{
res3=res2[,-ncol(res2)]
}
star=ifelse(res3$beta>=3,' |***',
ifelse(res3$beta>=1 & res3$beta <3 , ' |** ',
ifelse(res3$beta>0 & res3$beta <1, ' |* ',
ifelse(res3$beta>=-1 & res3$beta<0," *| ",
ifelse(res3$beta>= -3 & res3$beta < -1,' **| ',
ifelse(res3$beta==0,' | ','***| '))))))
res3$star=ifelse(res3$`Pr(>|t|)`<=p_threshold,star,"")
return(res3)
}else{
if (missing(variable)) variable=colnames(data) %not% c(y,adjust)
for (i in 1:length(variable)) {
if (i==1){
res=NULL
class=NULL
}
if (is.factor(data[,variable[i]])){
rep_len=length(levels(data[,variable[i]]))-1
if (rep_len>1){
rep_class=rep(i+1,rep_len)
class=c(class,rep_class)
}else{
class=c(class,0)
}
}else{
class=c(class,0)
}
#logistic for adjust
if (i==1){
formu=paste0(y,'~',paste0(c(adjust),collapse = '+'))
line.i=lm(as.formula(formu),data = data)
line.sum=summary(line.i)
line_coef=as.data.frame(line.sum$coefficients)
nub_row=nrow(line_coef)
}
#logistci for variable
formu=paste0(y,'~',paste0(c(adjust,variable[i]),collapse = '+'))
line.i=lm(as.formula(formu),data = data)
line.sum=summary(line.i)
line_coef=as.data.frame(line.sum$coefficients)
line_var=line_coef[-c(1:nub_row),]
confint=as.data.frame(suppressWarnings(suppressMessages(confint(line.i))))[-c(1:nub_row),]
res.cbind=cbind(beta=exp(line_var[,'Estimate']),
confint,
line_var[,c('Std. Error','t value','Pr(>|t|)')])
res.i=round(res.cbind,round)
res=rbind(res,res.i)
res.i
}
res2=cbind(res,class)
if (order_by.beta){
res3=res2[order(res2$class, res2$beta,decreasing = TRUE),-ncol(res2)]
}else{
res3=res2[,-ncol(res2)]
}
star=ifelse(res3$beta>=6,' |***',
ifelse(res3$beta>=3 & res3$beta <6 , ' |** ',
ifelse(res3$beta>=1 & res3$beta <3, ' |* ',
ifelse(res3$beta>=1/3 & res3$beta<1," *| ",
ifelse(res3$beta>= 1/6 & res3$beta < 1/3,' **| ','***| ')))))
res3$star=ifelse(res3$`Pr(>|t|)`<=p_threshold,star,"")
return(res3)
}
}
|
2c38513be1a85789bb548d397aa32161c892d155
|
62c14804025c9b0a56b3dc43937cd365ec1481b3
|
/output/sorted/GM12874/GM12874.R
|
21e6be2f3e232dd93c7c39bef2add5d286a2d2a7
|
[
"MIT"
] |
permissive
|
Bohdan-Khomtchouk/ENCODE_TF_geneXtendeR_analysis
|
98ad9dd688d78af0a412d7c3defde223c6d1ff50
|
4d055110f2015aa8d65bcd31eea3b0da8e19298f
|
refs/heads/master
| 2021-05-04T06:55:12.446062
| 2019-04-19T00:46:01
| 2019-04-19T00:46:01
| 70,523,421
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 96
|
r
|
GM12874.R
|
peaksInput("CTCF.GM12874.bed")
png("CTCF.GM12874.png")
linePlot(human, 0, 10000, 500)
dev.off()
|
bc954a705fcad1e06eadfc877ac9873f86356063
|
38373485330e50b09d27ea265ee0535b368f0579
|
/code/pca-skill-scores-ggbiplot.R
|
20bdd49b095b29ca6aeff8c5a1e96fc9b7ec8f95
|
[] |
no_license
|
s81320/vis
|
5300e346349acd568cd7ff4ad06751960aeb42b8
|
b96755388ebdbd50c42d145e9e6fc26b2c1c45c4
|
refs/heads/master
| 2022-11-18T03:34:05.794807
| 2020-07-21T17:25:05
| 2020-07-21T17:25:05
| 270,222,860
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,240
|
r
|
pca-skill-scores-ggbiplot.R
|
# require(devtools)
# Sys.setenv(R_REMOTES_NO_ERRORS_FROM_WARNINGS="true")
# install_github("vqv/ggbiplot")
setwd("D:/msc-ds/course-resource/data-visualization/project")
rm(list=ls())
library(RColorBrewer)
library(ggbiplot)
soccer.preprocessed <- read.csv(
"soccer-preprocessed.csv",
encoding = "UTF-8"
)
# Broader.Position
GLK <- c(
"GK"
)
DFN <- c(
"LB",
"LCB",
"CB",
"RCB",
"RB",
"LWB",
"LDM",
"CDM",
"RDM",
"RWB"
)
MDF <- c(
"LCM",
"CM",
"RCM"
)
ATK <- c(
"LM",
"RM",
"LAM",
"CAM",
"RAM",
"LW",
"LF",
"CF",
"RF",
"RW",
"LS",
"ST",
"RS"
)
soccer.preprocessed$Broader.Position <- ifelse(
soccer.preprocessed$Position %in% GLK, "GLK", ifelse(
soccer.preprocessed$Position %in% DFN, "DFN", ifelse(
soccer.preprocessed$Position %in% MDF, "MDF", "ATK"
)
)
)
soccer.preprocessed$Broader.Position <- factor(
soccer.preprocessed$Broader.Position,
levels=c("GLK","DFN","MDF", "ATK")
)
soccer.pca <- prcomp(soccer.preprocessed[, 46:79])
pallete.set2 <- brewer.pal(n=8, name="Set2")
pallete.paired <- brewer.pal(n=12, name="Paired")
pallete.dark2 <- brewer.pal(n=8, name="Dark2")
pallete.greys <- brewer.pal(n=9, name="Greys")
colors <- c(
pallete.paired[2],
pallete.paired[4],
pallete.set2[6],
pallete.dark2[2]
)
shapes <- c(3, 4, 21, 22)
soccer.pca.biplot <- ggbiplot(
soccer.pca,
obs.scale=1,
var.scale=1,
pc.biplot=T,
col=c(pallete.greys[3], pallete.dark2[4]),
groups=soccer.preprocessed$Broader.Position,
alpha=0
) +
scale_color_manual(
name="Broader.Position",
values=colors
) +
scale_shape_manual(
name="Broader.Position",
values=shapes
) +
geom_point(
aes(
colour=soccer.preprocessed$Broader.Position,
shape=soccer.preprocessed$Broader.Position
),
size=1.5,
alpha=0.75
) +
theme_classic() +
theme(
legend.direction='vertical',
legend.position=c(0.85, 0.75),
)
layer_arrows <- soccer.pca.biplot$layers[[1]]
layer_texts <- soccer.pca.biplot$layers[[3]]
layer_points <- soccer.pca.biplot$layers[[4]]
layer_arrows$aes_params$colour <- pallete.greys[8]
layer_texts$aes_params$colour <- pallete.greys[8]
soccer.pca.biplot$layers <- c(
layer_points,
layer_texts,
layer_arrows
)
soccer.pca.biplot
|
a66bd5bf1030528ceb911fa3e8e09a7e9334417f
|
7b102f9c8f2e3f9240090d1d67af50333a2ba98d
|
/gbd_2017/mortality_code/mortality_estimation/life_tables/mltgeneration/R/recalc_u10_nlx_mx_ax.R
|
ba9fa7a478542b6056b1aa1623f35183ebfe5200
|
[] |
no_license
|
Nermin-Ghith/ihme-modeling
|
9c8ec56b249cb0c417361102724fef1e6e0bcebd
|
746ea5fb76a9c049c37a8c15aa089c041a90a6d5
|
refs/heads/main
| 2023-04-13T00:26:55.363986
| 2020-10-28T19:51:51
| 2020-10-28T19:51:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,908
|
r
|
recalc_u10_nlx_mx_ax.R
|
#' Calculate under-10 nlx, mx, and ax values
#'
#' Calculate under-10-specific nlx, mx, and ax values based on Human Life-Table Database k1 parameters
#'
#' @param dt data.table with variables: ihme_loc_id, sex, age, sim, age_length, qx, lx, dx
#' @param id_vars character vector of id variables (last one must be age)
#'
#' @return data.table with variables: ihme_loc_id, sex, age, sim, age_length, qx, lx, dx
#' @export
#'
#' @examples
#'
#' @import data.table
#' @import assertable
recalc_u10_nlx_mx_ax <- function(dt, id_vars) {
## Prep datasets
if(tail(id_vars, 1) != "age") stop("numeric variable age must be the last var specified in id_vars")
key_ids <- id_vars[id_vars != "age"]
setorderv(dt, id_vars)
under_10 <- dt[age <= 5]
## Generate k1 parameter
## Human Life-Table Database -- Type 6. Recalculated abridged life tables (from Coale-Demeny 1983)
under_1 <- dt[age == 0, .SD, .SDcols=c(key_ids, "qx")]
under_1[sex == "male" & qx > .01, k1 := 1.352]
under_1[sex == "male" & qx <=.01, k1 := 1.653 - 3.013 * qx]
under_1[sex == "female" & qx > .01, k1 := 1.361]
under_1[sex == "female" & qx <= .01, k1 := 1.524 - 1.627 * qx]
under_1[, qx := NULL]
assert_values(under_1, "k1", "not_na", quiet=T)
under_10 <- merge(under_10, under_1, by = key_ids)
## Recalculate nLx for 0-1, 1-5, 5-10, 10-15
## Age 0 recalculated using Human Life-Table Database
lx_1_5 <- under_10[age == 1, lx]
lx_5_10 <- under_10[age == 5, lx]
lx_10_15 <- dt[age == 10, lx]
if(length(lx_1_5) != length(lx_5_10) | length(lx_5_10) != length(lx_10_15)) stop("Lx lengths do not match")
## Apply k1 weights -- merge lx_1_merged on so that subsetted results (e.g. '& qx > .1') don't get mixed-up lx values
under_10[age == 0, lx_1_merged := lx_1_5]
under_10[age == 0, nLx := (0.05 + 3 * qx) + (0.95 - 3 * qx) * lx_1_merged]
under_10[age == 0 & qx > .1, nLx := .35 + .65 * lx_1_merged]
under_10[age == 1, nLx := k1 * lx_1_5 + (4 - k1) * lx_5_10]
under_10[age == 5, nLx := 2.5 * (lx_5_10 + lx_10_15)]
under_10[, c("lx_1_merged", "k1") := NULL]
## Generate mx
under_10[, mx := dx/nLx]
## Generate capped ax values
under_10[, ax := (qx + (age_length * mx * qx) - (age_length * mx)) / (mx * qx)]
# ax can be negative if qx is very low compared to mx, Recenter all values that occur this way
under_10[(ax <= 0 | ax >= 1) & age == 0 & sex == "male", ax := .2]
under_10[(ax <= 0 | ax >= 1) & age == 0 & sex == "female", ax := .15]
under_10[(ax <= 0 | ax >= 4) & age == 1 & sex == "male", ax := 1.35]
under_10[(ax <= 0 | ax >= 4) & age == 1 & sex == "female", ax := 1.36]
under_10[(ax <= 0 | ax >= 5) & age == 5 & sex == "male", ax := 2.5]
under_10[(ax <= 0 | ax >= 5) & age == 5 & sex == "female", ax := 2.5]
assert_values(under_10, c("lx", "qx", "nLx", "mx", "ax"), "not_na", quiet=T)
dt <- rbindlist(list(dt[age >= 10], under_10), use.names=T)
return(dt)
}
|
7a3e5999fb47eb1453851ed64e7ffa8762719b79
|
2b7607fa78bf83b2515b9de2f9b40d15c81c2ab2
|
/Scripts/antsASLProcessing.R
|
83668b72e916024cc3c849170097309976cd00ea
|
[
"Apache-2.0"
] |
permissive
|
ANTsX/ANTs
|
3176341b8de664939eafde3e1ebf8c449809a9dd
|
dfd9e6664f2fc5f0dbd05c6c23d5e4895e82abee
|
refs/heads/master
| 2023-08-24T20:43:33.986495
| 2023-08-08T18:23:45
| 2023-08-08T18:23:45
| 7,777,650
| 899
| 286
|
Apache-2.0
| 2023-09-10T18:38:59
| 2013-01-23T15:43:41
|
C++
|
UTF-8
|
R
| false
| false
| 11,646
|
r
|
antsASLProcessing.R
|
#!/usr/bin/env Rscript
library(ANTsR)
library(tools)
if(!usePkg('optparse') | !usePkg('ANTsR')){
stop("optparse and ANTsR packages required.")
}
optlist <- list(
make_option(c('-s', '--pCASL'), default='', help=' raw pCASL image'),
make_option(c('-o', '--outputpre'), default='CBF_',
help='output prefix (defaults to %default)'),
make_option(c('-a', '--antsCorticalThicknessPrefix'),
default='', help='prefix of antsCorticalThickness output'),
make_option(c('-l', '--labelSet'),
default='', help='label set in template space to warp to ASL'),
make_option(c('-t', '--template'),
default='', help='Template to warp output to'),
make_option(c('-c', '--paramFile'), default='',
help='parameter file containing ASL acquisition parameters'),
make_option(c('-x', '--smoothingFWHM'), default=0,
help='Full width half max for smoothing'),
make_option(c('-m', '--method'), default='regression',
help=paste(' method for perfusion calculation. \n\t\tOne of:',
'"regression", "subtraction", "bayesian",',
'"RobustRegression", "BayesianRegression", "LocalBayesianRegression."')),
make_option(c('-d', '--denoising'), default='CompCorMotion',
help=paste('denoising method.',
'Options are: "CompCor", "Motion", "Detrending",',
'\n\t\t"Cross-Validation", "OutlierRejection".',
'Multiple options can be specified',
'(e.g., "CompCorMotion" is legal). Default is %default.')),
make_option(c('-g', '--debug'), default=0,
help=paste('Save debugging information, including motion',
'correction and nuisance variables')),
make_option(c('-b', '--bloodT1'), default=0.67,
help='blood T1 value (defaults to %default s^-1)'),
make_option(c('-r', '--robustness'), default=0.95,
help='robustness parameter (defaults to %default).'),
make_option(c('-n', '--bootstrapNumber'), default=20,
help=' number of bootstrap samples (defaults to %default)'),
make_option(c('-e', '--bootstrapPercent'), default=0.70,
help='percent to sample per bootstrap run (defaults to %default)'),
make_option(c('-k', '--keepTmp'), default=0,
help=paste('keep tmp files, including warps',
'(defaults to %default--takes lots of space to save)')),
make_option(c('-f', '--bootstrapReplace'), default=0,
help=paste('bootstrap with replacement? takes arguments',
'0 or 1; defaults to 0.')),
make_option(c('-v', '--verbose'), default=0,
help='verbose output.'))
usage <- OptionParser(option_list=optlist, usage='Usage: %prog <s> [otlcxmdgbrnekfv]')
opt <- parse_args(usage)
## debug
#opt <- data.frame(
# pCASL=paste('/data/jag/BD2K01/ASL_pipeline/data/AddictionCenter/ABART/imgs/',
# '../processed/ABART_Bac_106/ASL/ABART_Bac_106_pCASL.nii.gz', sep=''),
# outputpre=paste('/data/jag/BD2K01/ASL_pipeline/data/AddictionCenter/ABART/imgs',
# '/../processed/ABART_Bac_106/ASL/ABART_Bac_106_', sep=''),
# antsCorticalThicknessPrefix=paste('/data/jag/BD2K01/ASL_pipeline/',
# 'data/AddictionCenter/ABART/imgs/../processed/ABART_Bac_106',
# '/ASL/../Anatomy/ABART_Bac_106_', sep=''),
# labelSet=paste('/data/jag/BD2K01/ASL_pipeline/templates/',
# 'HarvardOxford/ABART_rois.nii.gz', sep=''),
# template=paste('/data/jag/BD2K01/ASL_pipeline/templates/',
# 'HarvardOxford/MNI152_T1_2mm.nii.gz', sep=''))
# pCASL='data/101_pcasl.nii.gz',
# out='test')
if(!file.exists(as.character(opt$pCASL))) {
stop(paste('pCASL image', opt$pCASL,
'does not exist.'))
}
if(opt$verbose) {
cat('Running antsASLProcessing.R with the following options:\n')
for(option in names(opt)){
cat(paste(option, ': ', opt[option], '\n', sep=''))
}
}
if(length(grep(.Platform$file.sep, opt$outputpre)) > 0) {
outdir <- dirname(opt$outputpre)
if(!file.exists(outdir)) dir.create(outdir)
}
pcasl <- tryCatch({
antsImageRead(as.character(opt$pCASL), 4)
}, error = function(e) {
stop(paste('pCASL image', as.character(opt$pCASL),
'does not exist.'))
})
if(length(opt$paramFile) > 0){
if(file.exists(as.character(opt$paramFile))) {
config <- read.csv(opt$paramFile)
} else {
config <- data.frame(tagFirst=T, sequence='pcasl')
}
}
if (opt$smoothingFWHM > 0) {
mysmoother <- c(rep(opt$smoothingFWHM, 3), 0)
pcasl <- smoothImage(pcasl, mysmoother, FWHM=TRUE)
}
avg <- getAverageOfTimeSeries(pcasl)
avg <- n3BiasFieldCorrection(avg, 2)
avg <- n3BiasFieldCorrection(avg, 2)
mask <- getMask(avg, mean(avg), Inf, 2)
avg[mask==0] <- 0
moco <- antsrMotionCalculation(pcasl, fixed=avg, mask=mask)
tag.first <- config$tagFirst
ts <- timeseries2matrix(moco$moco_img, moco$moco_mask)
if (!tag.first) {
tc <- (rep(c(1, 0), dim(ts)[1])[1:dim(ts)[1]] - 0.5) # control minus tag
} else {
tc <- (rep(c(0, 1), dim(ts)[1])[1:dim(ts)[1]] - 0.5) # tag minus control
}
nuisance <- getASLNoisePredictors(ts, tc, polydegree='loess')
noise.all <- cbind(moco$moco_params, moco$fd$MeanDisplacement, nuisance)
noise.combined <- as.matrix(combineNuisancePredictors(ts, tc, noise.all))
onlypairs <- FALSE
if (opt$method == 'subtract') {
onlypairs <- TRUE
}
censored <- aslCensoring(pcasl, mask, nuis=noise.combined, method='robust',
reject.pairs=onlypairs)
if (length(censored$which.outliers) > 0) {
tc <- tc[-censored$which.outliers]
noise.censored <- noise.combined[-censored$which.outliers, ]
} else {
noise.censored <- noise.combined
}
if (opt$debug) {
mean.ts <- apply(ts, 1, mean)
dat.debug <- cbind(data.frame(MeanTimeSeries=mean.ts), noise.all)
write.csv(dat.debug, file=paste(opt$outputpre, 'TimeSeriesData.csv', sep=''),
row.names=as.character(1:nrow(ts)))
write.csv(data.frame(Outliers=censored$which.outliers),
file=paste(opt$outputpre, 'OutlierTimepoints.csv', sep=''))
}
if (opt$method == 'regression') {
perf <- aslAveraging(censored$asl.inlier, mask=moco$moco_mask,
tc=tc, nuisance=noise.censored, method='regression')
} else if (opt$method == 'bayesian') {
if (length(opt$antsCorticalThicknessPrefix) == 0) {
stop("For Bayesian regression, segmentations are required.")
}
act <- as.character(opt$antsCorticalThicknessPrefix)
braint1 <- tryCatch({
antsImageRead(paste(act, "ExtractedBrain0N4.nii.gz", sep=""))
}, error = function(e) {
print(paste('T1 brain image', paste(act, "ExtractedBrain0N4.nii.gz", sep=""),
'does not exist.'))
})
segmentation <- tryCatch({
antsImageRead(paste(act, "BrainSegmentation.nii.gz", sep=""))
}, error = function(e) {
stop(paste('Segmentation image', paste(act, "BrainSegmentation.nii.gz", sep=""),
'does not exist.'))
})
postnames <- list.files(path=dirname(act),
glob2rx("*BrainSegmentationPosteriors*.nii.gz"), full.names=TRUE)
tissuelist <- tryCatch({
imageFileNames2ImageList(postnames)
}, error = function(e) {
stop(paste("Probability images", postnames, "cannot be loaded."))
})
reg.t12asl <- antsRegistration(fixed=avg, moving=braint1,
typeofTransform="SyNBold", outprefix=as.character(opt$outputpre))
seg.asl <- antsApplyTransforms(avg, segmentation, reg.t12asl$fwdtransforms,
"MultiLabel")
for (ii in 1:length(tissuelist)) {
tissuelist[[ii]] <- antsApplyTransforms(avg, tissuelist[[ii]],
reg.t12asl$fwdtransforms, "Linear")
}
perf <- aslAveraging(censored$asl.inlier, mask=moco$moco_mask,
tc=tc, nuisance=noise.censored, method='bayesian',
segmentation=seg.asl, tissuelist=tissuelist)
} else if(opt$method == 'subtract'){
perf <- aslAveraging(censored$asl.inlier, mask=moco$moco_mask,
tc=tc, method='cubicSubtract')
}
mvals2 <- apply(ts[tc == 0.5, ], 2, mean)
mvals1 <- apply(ts[tc == -0.5, ], 2, mean)
# mean control should exceed mean tag
if (mean(mvals2) > mean(mvals1)) {
m0vals<-mvals2
m1vals<-mvals1
} else {
m0vals<-mvals1
m1vals<-mvals2
}
m0 <- antsImageClone(moco$moco_mask)
m0[moco$moco_mask == 0] <- 0
m0[moco$moco_mask == 1] <- m0vals
m0 <- n3BiasFieldCorrection(m0,4)
m0 <- n3BiasFieldCorrection(m0,2)
if (length(opt$config > 0)) {
tryCatch({
config <- read.csv(opt$config, row.names=1)
}, error = function(e){
print(paste("Configuration file", opt$config, "does not exist."))
})
parameters <- c(list(m0=antsImageClone(m0)), config)
} else {
parameters = list(sequence="pcasl", m0=antsImageClone(m0))
}
if (opt$debug) {
antsImageWrite(perf, paste(opt$outputpre, 'Perfusion.nii.gz', sep=''))
antsImageWrite(m0, paste(opt$outputpre, 'M0.nii.gz', sep=''))
}
cbf <- quantifyCBF(perf, mask=moco$moco_mask, parameters=parameters)
antsImageWrite(cbf$meancbf, paste(opt$outputpre, "CBF.nii.gz", sep=""))
if (nchar(opt$antsCorticalThicknessPrefix) > 0){
act <- as.character(opt$antsCorticalThicknessPrefix)
braint1 <- tryCatch({
antsImageRead(paste(act, "ExtractedBrain0N4.nii.gz", sep=""))
}, error = function(e) {
print(paste('T1 brain image', paste(act, "ExtractedBrain0N4.nii.gz", sep=""),
'does not exist.'))
})
seg <- tryCatch({
antsImageRead(paste(act, "BrainSegmentation.nii.gz", sep=""))
}, error = function(e) {
print(paste('Segmentation image', paste(act, "BrainSegmentation.nii.gz", sep=""),
'does not exist.'))
})
reg.t12asl <- antsRegistration(fixed=avg, moving=braint1,
typeofTransform="SyNBold" )
seg.asl <- antsApplyTransforms(avg, seg, reg.t12asl$fwdtransforms, "MultiLabel")
antsImageWrite(seg.asl, paste(opt$outputpre,
"SegmentationWarpedToASL.nii.gz", sep=''))
segstats <- labelStats(cbf$meancbf, seg.asl)
write.csv(segstats, paste(opt$outputpre, 'TissueStats.csv', sep=''),
row.names=FALSE)
tx.template2t1 <- c(paste(act, "TemplateToSubject0Warp.nii.gz", sep=""),
paste(act, "TemplateToSubject1GenericAffine.mat", sep=""))
tx.t12template <- c(paste(act, "SubjectToTemplate1Warp.nii.gz", sep=""),
paste(act, "SubjectToTemplate0GenericAffine.mat", sep=""))
tx.asl2template <- c(reg.t12asl$invtransforms, tx.t12template)
if (length(opt$template) > 0) {
template <- tryCatch({
antsImageRead(as.character(opt$template))
}, error = function(e) {
print(paste("Template image", template, "does not exist."))
})
asl.warped2template <- antsApplyTransforms(template, cbf$meancbf, tx.asl2template,
whichtoinvert=c(F, F, F, F))
antsImageWrite(asl.warped2template,
paste(opt$outputpre, "CBFWarpedToTemplate.nii.gz", sep=''))
}
tx.template2asl <- c(tx.template2t1, reg.t12asl$fwdtransforms)
if (nchar(as.character(opt$labelSet)) > 0) {
label <- tryCatch({
antsImageRead(as.character(opt$labelSet))
}, error = function(e) {
print(paste("Label image", opt$labelSet, "does not exist."))
})
label.asl <- antsApplyTransforms(avg, label, tx.template2asl, "MultiLabel")
antsImageWrite(label.asl, paste(opt$outputpre,
'LabelWarpedToASL.nii.gz', sep=''))
labelstats.cbf <- labelStats(cbf$meancbf, label.asl)
write.csv(labelstats.cbf, paste(opt$outputpre, 'LabelStats.csv', sep=''),
row.names=FALSE)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.