blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bb81e4f88ade8092b02696f1457f8c9616125dd2
|
effe7cbf3c35d4f219379adb37d7604fc16426cb
|
/R/Classifier_funs.R
|
d32423f9d02535e986e7dc7d66edc24f76ba0c7c
|
[] |
no_license
|
Soleer/LDAandSVM
|
8ac5fd2c7606db42f912c765dedeebef86e3f3ce
|
ba87dfc33b1fddb3d573d4a0a1c71a504c5ecb25
|
refs/heads/master
| 2020-07-11T08:45:10.335407
| 2019-02-22T18:52:06
| 2019-02-22T18:52:06
| 204,493,643
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,689
|
r
|
Classifier_funs.R
|
# #helpfunctions
#
# targets <- function(vector) {
# n <- length(vector)
# En <- diag(1, n, n)
# V <- matrix(vector,
# nrow = n,
# ncol = n,
# byrow = TRUE)
# D <- En - V
# results <- sapply(1:n, function(i) {
# D[i, ] %*% D[i, ]
# })
# return(results)
# }
#
# #return closest target
# class_by_targets <- function(classes, delta) {
# classfunction <- function(x) {
# return(classes[which.min(targets(delta(x)))])
# }
# return(classfunction)
# }
#return max
classify <- function(classes, delta) {
classfunction <- function(x) {
pos_max <- which.max(delta(x))
return(classes[pos_max])
}
return(classfunction)
}
## LDA, QDA, PDA, RDA
#' LDA
#'
#' The LDA classification function as described in Hastie et al. "The Elements Statistical Learning" (2009)
#'
#' @param set A R6 data_set object initialized with make_set. @seealso make_set
#' @return Returns a list with the name of the created LDA function in the given set in the first entry and the actual classification
#' function in the second entry and also saves it in the input set.
#' @examples
#' test <- make_testset()
#' func_name <- LDA(test)[['name']]
#' @export
LDA <- function(set) {
if (!is.data_set(set)) {
stop("Input must be of class 'data_set' (?make_set)")
}
if (length(set$func) > 0) {
slot <- character(0)
sapply(set$func_info, function(l) {
if (!is.null(l[["type"]])) {
if (l[["type"]] == "LDA") {
slot <<- l[["name"]]
}
}
})
if (length(slot) > 0) {
return(list(name = slot, func = set$func[[slot]]))
}
}
G <- set$classes
K <- set$n_classes
p <- log(unlist(set$pi))
mu <- set$mean
sigma <- solve(sigma_est(set))
delta <- function(x) {
result <- sapply(1:K, function(k) {
(x %*% sigma %*% mu[[k]] - 1 / 2 * mu[[k]] %*% sigma %*% mu[[k]])
}) + p
return(result)
}
classify_func <- classify(set$classes, delta)
return(set$set_function(
classify_func,
type = "LDA",
list(base = 'id', description =
"basic LDA function")
))
}
#' QDA
#'
#' The QDA classification function as described in Hastie et al. "The Elements Statistical Learning" (2009)
#'
#' @param set A R6 data_set object initialized with make_set. @seealso make_set
#' @return Returns a list with the name of the created QDA function in the given set in the first entry and the actual classification
#' function in the second entry and saves it in the input set.
#' @examples
#' test <- make_testset()
#' func_name <- QDA(test)[['name']]
#' @export
QDA <- function(set) {
if (!is.data_set(set)) {
stop("Input must be of class 'data_set' (?make_set)")
}
if (length(set$func) > 0) {
slot <- character(0)
sapply(set$func_info, function(l) {
if (!is.null(l[["type"]])) {
if (l[["type"]] == "QDA") {
slot <<- l[["name"]]
}
}
})
if (length(slot) > 0) {
return(list(name = slot, func = set$func[[slot]]))
}
}
G <- set$classes
K <- set$n_classes
p <- log(unlist(set$pi))
mu <- set$mean
sigma_inv <- lapply(set$sigma, solve)
delta <- function(x) {
result <- sapply(1:K, function(k) {
-1 / 2 * log(det(set$sigma[[k]])) - 1 / 2 * t(x - mu[[k]]) %*% sigma_inv[[k]] %*% (x - mu[[k]])
}) + p
return(result)
}
classify_func <- classify(set$classes, delta)
return(set$set_function(
classify_func,
type = "QDA",
list(base = 'id', description =
"basic QDA function")
))
}
#' PDA
#'
#' The PDA classification function as described in Hastie et al. "The Elements Statistical Learning" (2009)
#'
#' @param set A R6 data_set object initialized with make_set. @seealso make_set
#' @param base One of the following strings \itemize{\item "id"; \item "quad"; \item "cube"; \item "sqrt"; \item "log"; \item "abs"}
#' The data gets then expanded. @seealso basis_exp
#' @param omega A penalizer matrix used for the classification. Note that the dimensions must fit the dimension of the
#' (potentially) expanded dataset
#' @return Returns a list with the name of the created PDA function in the given set in the first entry and the actual classification
#' function in the second entry and also saves it in the input set.
#' @examples
#' test <- make_testset()
#' func_name <- PDA(test,base='quad')[['name']]
#' @export
PDA <-
function(set, base = "id", omega) {
##The PDA classification function. A function factory
if (!is.data_set(set)) {
stop("Input must be of class 'data_set' (?make_set)")
}
data_exp <- set$expansion(base)
d <- dim(data_exp)[2]
if (missing(omega)) {
##check for omega
omega <- diag(0, nrow = d, ncol = d) # set 0
}
if(!is.matrix(omega) | ncol(omega) != nrow(omega) | ncol(omega) != d){
stop(paste("Omega must be a quadratic matrix of size", d))
}
if (length(set$func) > 0) {
##check if already calculated
slot <- character(0)
sapply(set$func_info, function(lis) {
l <- lis[['parameter']]
if (!is.null(l[["base"]]) && !is.null(l[["omega"]])) {
if (l[["base"]] == base && l[["omega"]] == omega) {
slot <<- lis[["name"]]
}
}
})
if (length(slot) > 0) {
return(list(name = slot, func = set$func[[slot]]))
}
}
data_exp <- set$expansion(base) ##expand data if needed
h <- basis_exp(base) ##get expansion function
d <- dim(data_exp)[2]
if (missing(omega)) {
##check for omega
omega <- diag(0, nrow = d, ncol = d) # set 0
}
G <-
set$classnames ##Vector containing all unique classes
K <-
set$n_classes ##Number of unique classes
p <-
log(unlist(set$pi)) ##Probability of one class occuring
mu <-
mu_exp(data_exp, set) ##List of class centroid for each class
sigma_list <-
lapply(G, function(class) {
##Calculating expanded Covariances
sigma_class_exp(data_exp[set$results == set$classes[class], ], mu[[class]])
})
Matrix <-
lapply(sigma_list, function(x)
solve(x + omega)) ##Adding the Omega matrix (penalizer) to every class covariance matrix and getting the inverse
names(Matrix) <- set$classnames
delta <-
function(x) {
##The distance function. The same as QDA but with a penalized distance function and with the expanded data.
result <- sapply(G, function(class) {
diff <- h(x) - mu[[class]]
return(-1 / 2 * log(det(Matrix[[class]])) - 1 / 2 * t(diff) %*% Matrix[[class]] %*% (diff))
}) + p
return(result)
}
classify_func <-
classify(set$classes, delta) #from numbers to classes
return(set$set_function(classify_func, type = "PDA", list(
base = base,
dim = d,
omega = omega
)))
}
#'SVM
#'
#' The SVM classification function as described in Hastie et al. "The Elements Statistical Learning" (2009)
#'
#' @param set A R6 data_set object initialized with make_set. @seealso make_set
#' @param C A positive double used to decide how large the margin should be, hence the sensitivity of the
#' SVM function to misclassification. Large values encourage an overfit wiggly boundary, while a
#' small value of C causes a smoother boundary
#' @param kernel One of the following strings \itemize{\item "id"; \item "poly"; \item "radial"; \item "neural"}
#' The feature space gets enlarged using basis expansions such as polynomials('poly') or
#' Neural networks('neural').The kernel functions are:
#' dth-Degree polynomial: K(x,x') = (1+ <x,x'>)^d
#' Radial basis: K(x,x') = exp(-g ||x - x'||^2)
#' Neural network: K(x,x') = tanh(d* <x,x'> + g)
#' Use "id" for no kernel.
#' @param d A positive double used in dth-Degree polynomial and Neural network kernel. See parameter 'kernel'
#' @param g A positive double used in Radial basis and Neural network kernel. See parameter 'kernel'
#' @return Returns a list with the name of the created SVM function in the given set in the first entry and the actual classification
#' function in the second entry and saves the classification function in the R6 object.
#' @examples
#' test <- make_testset()
#' func_name <- SVM(test,C = 1, kernel = 'radial', g = 1)[['name']]
#' @export
SVM <- function(set,
C = 1,
kernel = "id",
d = 1,
g = 1) {
# The SVM classification function. A function factory
# Test input:
if (!is.data_set(set)) {
stop("Input must be of class 'data_set' (?make_set)", call. = FALSE)
}
if ((!is.double(C) && C <= 0 && length(C) != 1) || is.na(C)) {
stop("Input 'C' must be a positive double", call. = FALSE)
}
if (!is.character(kernel) && length(kernel) != 1) {
stop("Input 'kernel' must be a string with length one.")
}
if ((!is.double(d) && d <= 0 && length(d) != 1) || is.na(d)) {
stop("Input 'd' must be a positive double", call. = FALSE)
}
if ((!is.double(g) && g <= 0 && length(d) != 1) || is.na(g)) {
stop("Input 'g' must be a positive double", call. = FALSE)
}
# Test if SVM-classification-function was already calculated using the same parameters.
if (length(set$func) > 0) {
slot <- character(0)
sapply(set$func_info, function(lis) {
l <- lis[['parameter']]
if (!is.null(l[["C"]]) &&
!is.null(l[["kernel"]]) &&
!is.null(l[["d"]]) &&
!is.null(l[["g"]])) {
if (l[["C"]] == C &&
isTRUE(all.equal(l[["kernel"]], kernel)) &&
l[["d"]] == d &&
l[["g"]] == g) {
slot <<- lis[["name"]]
}
}
})
if (length(slot) > 0) {
# If it was already calculated, return the function saved in the data_set.
return(list(name = slot, func = set$func[[slot]]))
}
}
# Save the parameters in a list for easier handling.
values <- list(
"C" = C,
"kernel" = kernel,
"d" = d,
"g" = g
)
# Calculate list of decision-functions.
t <- svm_classify_list(set, values)
# Calculate classification-function.
f <- svm_classify(t, set$classes)
# return function and save it in the data_set.
return(set$set_function(
f,
type = "SVM",
parameter = list(
base = 'id',
dim = NULL,
omega = NULL,
C = C,
kernel = kernel,
d = d,
g = g
)
))
}
#'RDA
#'
#' The regular Discriminant analysis from 4.3.1 as described in Hastie et al. "The Elements Statistical Learning" (2009)
#'
#' @param set A R6 data_set object initialized with make_set. @seealso make_set
#' @param alpha alpha from formula in Hastie between 0 and 1
#' @param gamma gamma from formula between 0 and 1
#' @return Returns a list with the name of the created RDA function in the given set in the first entry and the actual classification
#' function in the second entry and saves the classification function in the R6 object.
#' @examples
#' test <- make_testset()
#' func_name <- RDA(test, alpha = 0, gamma = 1)[['name']]
#' @export
RDA <- function(set, alpha, gamma) {
if (!is.data_set(set)) {
stop("Input must be of class 'data_set' (?make_set)")
}
#alpha and gamma in between 0 and 1
if(!(is.numeric(alpha) && is.numeric(gamma))){
stop("alpha and gamma must be numeric")
}else if(!((alpha <= 1) && (gamma <= 1) && (alpha >= 0) && (gamma >= 0))){
stop("alpha and gamma must be between 0 and 1")
}
if (length(set$func) > 0) {
slot <- character(0)
sapply(set$func_info, function(lis) {
l <- lis[['parameter']]
if (!is.null(l[["alpha"]]) && !is.null(l[["gamma"]])) {
if (l[["alpha"]] == alpha && l[["gamma"]] == gamma) {
slot <<- lis[["name"]]
}
}
})
if (length(slot) > 0) {
return(list(name = slot, func = set$func[[slot]]))
}
}
G <- set$classes
K <- set$n_classes
p <- log(unlist(set$pi))
N <- set$n_obs
mu <- set$mean
# if (missing(alpha) || missing(gamma)) {
# return(RDA_crossFit(set))
# }
kleinesSigma <- small_sigma_est(set)
sigma_est <- sigma_est(set)
n <- ncol(sigma_est)
allSigmas <- set$sigma
if (any(sapply(data_set$data, anyNA))) {
warning("data_set$dat contains Na (RDA)")
}
sigmaAlphaGamma <- lapply(
allSigmas,
FUN = function(sigma_class) {
sigma_estGamma <-
sigma_est * gamma + (1 - gamma) * diag(n) * (kleinesSigma ** 2)
sigma_classAlphaGamma <-
sigma_class * alpha + (1 - alpha) * sigma_estGamma
return(sigma_classAlphaGamma)
}
)
detSigma <- lapply(sigmaAlphaGamma, det)
if(0 %in% detSigma){
#Singularities may occur
return(null)
}
sigma_inv <- lapply(sigmaAlphaGamma, function(X) {
out <- tryCatch({
inverse <- solve(X)
return(inverse)
},
error = function(cond) {
#Singularities may occurwarnings
return(diag(n))# TODO
})
out
})
delta <- function(x) {
result <- sapply(1:K, function(k) {
-1 / 2 * log(detSigma[[k]]) - 1 / 2 * t(x - mu[[k]]) %*% sigma_inv[[k]] %*% (x - mu[[k]])
}) + p
return(result)
}
classify_func <- classify(set$classes, delta)
return(set$set_function(
classify_func,
type = "RDA",
parameter = list(
base = 'id',
description =
"basic RDA function",
alpha = alpha,
gamma = gamma
)
))
}
#'RDA_crossFit
#'
#' The regular Discriminant analysis from 4.3.1 as described in Hastie et al. "The Elements Statistical Learning" (2009)
#' Uses cross validation to determin alpha and gamma @seealso alpha_gamma_crossFit
#' Note that number of Observations per class must be a lot higher than numberOfValidations to avoid errors
#'
#' @param set A R6 data_set object initialized with make_set. @seealso make_set
#' @param numberOfValidations how many validations shall be conducted. Note that though K = 10 is common, it is impractical for RDA
#'@param accuracyOfParameters how many parameters shall be evaluated. Note that Omega(crossFit) = N^2
#' @return Returns a list with the name of the created RDA function in the given set in the first entry and the actual classification
#' function in the second entry and saves the classification function in the R6 object R6.
#' @examples
#' test <- make_testset()
#' func_name <- RDA_crossFit(test, numberOfValidations = 3, accuracyOfParameters = 5)
#' @export
RDA_crossFit <- function(set, numberOfValidations = 3, accuracyOfParameters = 5) {
#accuracyOfParameters and numberOfValidations in between 0 and 1
if(!(is.integer(numberOfValidations) && is.integer(accuracyOfParameters))){
stop("accuracyOfParameters and numberOfValidations must be integer")
}
alpha_gamma <-
alpha_gamma_crossFit(set, N = accuracyOfParameters, K = numberOfValidations)
alpha <- alpha_gamma$alpha
gamma <- alpha_gamma$gamma
print(paste("classifying with RDA, gamma =", gamma, "and alpha =", alpha, "(retrieved by Cross Validation)"))
result <- RDA(set, alpha = alpha, gamma = gamma)
return(result)
}
#
# test_RDA2 <- function() {
# #attributes of each test
# nobservations <- 10 #number of observations per class
# nclass <- 3 #number of classes
# dimParameters <- 2 #number of parameters
#
# test_data <-
# make_testset(N = nobservations, K = nclass, P = dimParameters)
#
# RDA_crossFit(test_data)
#
# print(results)
# }
|
dece2147afe60a73458321ac720aab5502e79e75
|
817aef9be7c091e8f4966b72f8359426b425f6ee
|
/man/cumGainsTable.Rd
|
085ccfbcaadf6edaad5d35dc6c0943f794793f4c
|
[] |
no_license
|
cran/CustomerScoringMetrics
|
07e47dbdeb77b087fb61d1f66cee13ff82de3c2b
|
09f407d8ba99ad86e146dba7af98d4ad0c2d9189
|
refs/heads/master
| 2020-03-08T20:27:40.716795
| 2018-04-06T09:39:01
| 2018-04-06T09:39:01
| 128,382,390
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,319
|
rd
|
cumGainsTable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cumGainsTable.R
\name{cumGainsTable}
\alias{cumGainsTable}
\title{Calculates cumulative gains table}
\usage{
cumGainsTable(predTest, depTest, resolution = 1/10)
}
\arguments{
\item{predTest}{Vector with predictions (real-valued or discrete)}
\item{depTest}{Vector with true class labels}
\item{resolution}{Value for the determination of percentile intervals. Default 1/10 (10\%).}
}
\value{
A gain percentage table.
}
\description{
Calculates a cumulative gains (cumulative lift) table, showing for different percentiles
of predicted scores the percentage of customers with the behavior or characterstic of
interest is reached.
}
\examples{
## Load response modeling predictions
data("response")
## Apply cumGainsTable function to obtain cumulative gains table for test sample results
## and print results
cgt<-cumGainsTable(response$test[,2],response$test[,1])
print(cgt)
}
\references{
Linoff, G.S. and Berry, M.J.A (2011): "Data Mining Techniques: For Marketing, Sales, and
Customer Relationship Management - Third Edition". John Wiley & Sons.
}
\seealso{
\code{\link{topDecileLift}}, \code{\link{liftIndex}}, \code{\link{liftChart}}
}
\author{
Koen W. De Bock, \email{kdebock@audencia.com}
}
|
88d9d68f0fba4204f57cdca90057d1d45b2c1064
|
48db800bd8e8f0f618574de880b099db16452166
|
/src/R/descriptive.R
|
1e1f78410778abf1585702f442e44a659f0b08de
|
[] |
no_license
|
markbuckley/us-geo-analytics
|
e845156f73c60106c14a3e05eaa106031bb4a170
|
995da0a0465ab211a625959ad89cac83378302a7
|
refs/heads/master
| 2020-05-18T10:17:37.482845
| 2014-12-04T04:37:42
| 2014-12-04T04:37:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,750
|
r
|
descriptive.R
|
##
# Descriptive stats and plots of the data set
library(ggplot2)
library(plyr)
# distribution of accounts per salesperson
accountsPerSalesperson <- function(data) {
d <- ddply(data[data$covered_by!="Unclear",], c("salesperson_id", "covered_by"), summarise, numAccounts = length(unique(customer_number)))
print(nrow(d))
g <- ggplot(d[d$numAccounts<100,]) +
geom_histogram(aes(numAccounts, group=covered_by, fill=covered_by),
position = "dodge",
binwidth=10) +
ylab("Number of salespersons") +
xlab("Number of accounts managed")
g
}
# divisions per salesperson
divsPerSalesperson <- function(data) {
d <- ddply(data, "salesperson_id", summarise, numDivisions = length(unique(division)))
table(d$numDivisions)
# only one salesperson works across divisions.
}
areaPerSalesperson <- function(data) {
plotAreaPerSalespersonData(computeAreaPerSalespersonData(data))
}
computeAreaPerSalespersonData <- function(data) {
ddply(data, c("salesperson_id", "covered_by"), summarise, areacovered = computeArea(latitude, longitude))
}
plotAreaPerSalespersonData <- function(d, cutoff=5000) {
g <- ggplot(d[d$areacovered<cutoff,]) +
geom_histogram(aes(areacovered, group=covered_by, fill=covered_by),
position = "dodge",
binwidth=2) +
ylab("Number of salespersons") +
xlab("Area covered by accounts (average of pairwise distances)")
g
}
localAndNationalAccounts <- function(data) {
table(data$business_type)
}
localNationalAccountVsSalespersonType <- function(data) {
with(data, table(business_type, covered_by))
}
businessSegmentVsSalespersonType <- function(data) {
with(data, table(segment, covered_by))
}
|
4277461e9f76239dbb4731ccbc0d61c534f30f61
|
11dc0bf1c11898f8142a6f89326cf65dbbaecafe
|
/data/bialystok/BialystokPopulationData.R
|
eb8c8451f54f8682d427ef04ed6b2b0b9ae047c8
|
[] |
no_license
|
kontrabanda/mgr-2
|
4669eeb21a6e3e8365d9a203c275c08daf01bd76
|
151bbba7005621e4ef1502d105e2c2c8a7b149eb
|
refs/heads/master
| 2021-05-05T18:17:24.743810
| 2018-05-16T19:37:14
| 2018-05-16T19:37:14
| 117,591,492
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,661
|
r
|
BialystokPopulationData.R
|
library(lubridate)
library(dplyr)
source(file="./data/bialystok/BialystokData.R")
BialystokPopulationData <- setRefClass(
Class="BialystokPopulationData",
fields=list(),
methods = list(
initialize = function() {
name <<- "bialystok"
allColnames <<- c("lat", "lng", "day", "month", 'population', "category")
propertiesColnames <<- c("lat", "lng", "day", "month", 'population')
},
readData = function() {
read.csv(const$bialystokPopulation)
},
parseData = function(inputData) {
inputData$DATA <- as.Date(inputData$DATA, "%y/%m/%d")
data <- setNames(data.frame(matrix(ncol = length(allColnames), nrow = nrow(inputData))), allColnames)
data$month <- as.factor(month(as.Date(inputData$DATA, "%y/%m/%d")))
data$day <- as.factor(weekdays(as.Date(inputData$DATA, "%y/%m/%d")))
data$year <- as.factor(year(as.Date(inputData$DATA, "%y/%m/%d")))
data$lat <- inputData$LAT
data$lng <- inputData$LNG
data$category <- inputData$KAT
data$population <- inputData$population
data <- removeIncompeleteData(data)
makeCategoryList <- function(arg) {
list(unique(arg))
}
data <- data %>% group_by(lat, lng, day, month, year, population) %>% summarize(category = makeCategoryList(category))
data
},
getData = function(category) {
data <- rawData[, propertiesColnames]
label <- sapply(rawData$category, function(x) category %in% x)
data$label <- as.factor(ifelse(label, 1, 0))
data
},
getClassificationCategories = function() {
categories
}
),
contains=c("BialystokData")
)
|
fddee30c36e031d298ec0b8d72c2ca794c26a966
|
a598b42a2800d38d920af9a69232fcb1a39eec7a
|
/man/db_disconnect.Rd
|
decbcd7525c2d791c9e937b7cd99619860d52b10
|
[] |
no_license
|
gschofl/rmisc
|
b396d65b3f1a54794cd19d3666afcacea84fb840
|
a49744b49f46d57ea5182407ad6dd2adf21a2d56
|
refs/heads/master
| 2021-01-23T22:43:07.674294
| 2014-06-11T11:53:53
| 2014-06-11T11:55:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 287
|
rd
|
db_disconnect.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{db_disconnect}
\alias{db_disconnect}
\title{Disconnect from an SQLite database.}
\usage{
db_disconnect(...)
}
\arguments{
\item{...}{Connection objects.}
}
\description{
Disconnect from an SQLite database.
}
\keyword{internal}
|
0ba544fed3ace9113b43ff3fb3e80fc6c7f29b9c
|
4b03d6d132ebeb06a8fb3204273235fbc9835c65
|
/R/Project Tools# trans_lines.R
|
331bb230d4f1aa6c0456dc7b6cd5b2f5a867c96b
|
[] |
no_license
|
15652939484/Loafer
|
1ec5f4936ee2311621217d09a21e0d657be3d21e
|
744f6aab5b475ed4ae05fcfc747d09c5db2e5e21
|
refs/heads/master
| 2022-12-26T23:37:36.165130
| 2020-10-16T08:49:07
| 2020-10-16T08:49:07
| 289,851,889
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,634
|
r
|
Project Tools# trans_lines.R
|
#' @title trans mutiple lines and remove or add sth.
#' @description 将多行代码的行首 统一 添加/去除 "#' "前缀##
#' 或者将多行代码统一 添加/去除 行末的 “,". 注意, 去除的时候,可以直接替换成空并且换行,也可以直接替换为”:”
#' 同时,需要注意,在注释中的内容不需要换行。#
#' 在苹果中恐怕无法正常使用。因为苹果系统的剪贴板函数略有不同。
#' @param input_lines input lines. Clipboard containings as default input. ## 输入的变量
#' @param mode add_#', remove_#', add "," or remove ","
#' @return lines with decoration.
#' @export
#'
trans_lines <- function(input_lines = readClipboard(),
mode = "add #'"){
mode <- mode %>% match.arg(., c("add #'", "add ,", "remove #'", "remove ,", "remove ; and | at tail"))
if(mode == "add #'"){output_lines <- input_lines %>% paste0("#' ", .)}
if(mode == "remove #'"){output_lines <- input_lines %>% sub("^#' ", "", .)}
if(mode == "add ,"){
output_lines <- input_lines %>% sub("(#[^']|#$)", ", \\1", .)
clean_lines <- output_lines %>% grepl("#",.) == F
output_lines[clean_lines] <- output_lines[clean_lines] %>% paste0(.,",")
output_lines[length(input_lines)] <- input_lines[length(input_lines)] ## 最后一行不需要替换
# output_lines
}
if(mode == "remove ,"){
output_lines <- input_lines %>% gsub("(#[^']|#$).*","",.) %>% gsub(",","\n",.) %>% sub("\n *$","",.)
}
if(mode == "remove ; and | at tail"){
output_lines <- input_lines %>% sub(";$|\\|$","",.)
}
writeClipboard(output_lines)
output_lines
}
|
a7f74a176daa8b8c5d5ec4e5bff4952fd0b1b368
|
f18e1210ca120c9116e356a8549e89e04219dc75
|
/man/getIsotopicColName.Rd
|
19715a3f4f7add0c76bb3da453c4e21f9c4fbae5
|
[
"BSD-2-Clause"
] |
permissive
|
EMSL-Computing/ftmsRanalysis
|
46c73a727d7c5d5a5320bf97a07e9dac72abd281
|
dd3bc3afbf6d1250d1f86e22b936dcc154f4101d
|
refs/heads/master
| 2023-07-21T18:13:26.355313
| 2023-02-09T17:03:09
| 2023-02-09T17:03:09
| 122,233,846
| 14
| 10
|
NOASSERTION
| 2023-07-11T16:34:15
| 2018-02-20T17:52:18
|
R
|
UTF-8
|
R
| false
| true
| 433
|
rd
|
getIsotopicColName.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colname_methods.R
\name{getIsotopicColName}
\alias{getIsotopicColName}
\title{Get the name of the isotopic column}
\usage{
getIsotopicColName(ftmsObj)
}
\arguments{
\item{ftmsObj}{an object of type ftmsData}
}
\value{
name of isotopic column
}
\description{
Returns the name of the column in the e_meta element that
contains isotopic peak information.
}
|
2ee5812a3cff6f0dde42faf7242bcdf40a64eb39
|
159f2f491d35859ad1405282180513bbfc49a6ef
|
/hydro_modelling/modelling.R
|
b3a0047bf90168fa15185250cc0a5c14b45986a0
|
[] |
no_license
|
YANGOnion/Rainfall-runoff-modelling-in-R
|
7631d4b7671f131f0c82a1083c18492c486e8594
|
5dcc0286820381b5a0c1c9feb298dae3148de238
|
refs/heads/master
| 2020-04-19T04:31:45.494157
| 2019-11-30T03:33:20
| 2019-11-30T03:33:20
| 167,965,055
| 8
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,959
|
r
|
modelling.R
|
############################### extracting areal forcing data for the watershed ####################
## to read the watershed polygon
library(raster)
poly_file='input/wts_polygon/66193.shp'
wts_poly=shapefile(poly_file)
area(wts_poly)/10^6
plot(wts_poly)
## to extract areal mean values of forcing data within the watershed polygon
library(velox)
forcing_file='input/temperature/1960.01.tif'
vx_forcing=velox(forcing_file)
areal_value=vx_forcing$extract(sp=wts_poly,fun=mean,small=T)[1,] # the return value is a matrix
head(areal_value)
## an example for plotting raster forcing data and watershed polygon
library(rasterVis)
ras_forcing=brick('input/temperature/1960.tif')
levelplot(crop(ras_forcing,wts_poly,snap='out')[[1]],margin=list(draw=F),colorkey=list(space='right'),
par.settings=YlOrRdTheme(),main='Mean temperature 1960-01-01')+layer(sp.polygons(wts_poly))
## to output the data.table with date and forcing variables
library(data.table)
dt=data.table(date=seq(as.Date('1960-01-01'),as.Date('1960-01-31'),1),tasmean=areal_value)
head(dt)
################################### model calibration and simulation ################################
source('gr_model.R')
library(data.table)
dt=fread('input/66193.csv')
dt[,date:=as.Date(date)]
## calibration
cab=grcab(dt=dt,start='1960-01-01',end='1979-12-31',snow=F)
cab$crit # calibration NSE
## simulation
sim=grsim(dt=dt,start='1980-01-01',end='2000-12-31',param=cab$param,snow=F)
sim$crit # validation NSE
## extracting simulated series
out=SumOutputGr4j(sim$output)
head(out)
##################################### hydrograph visualization ######################################
source('hydrograph.R')
## the data frame of simulated results
dt_sim=cbind(dt[date>=as.Date('1980-01-01')],out)
head(dt_sim)
hydrograph(dt=dt_sim,rain_var='rainfall',flow_var=c('runoff','Qsim'),start='2000-01-01',end='2000-12-31')
|
3747dfcc35b9f268c3da8df55b653d30175f5ca3
|
aa0a5d7a686f35db5679ef7584f25c777a964cdf
|
/01 Data/treemap.R
|
13629a0c844283f74e7b9aa6f237cbcb90b2c40e
|
[] |
no_license
|
kdk745/DV_FinalProject
|
d391dc2592c46edd587c66884a62b34fe03ac725
|
c2bb8a0ca35722f1dbb9585d4d7420475b6619af
|
refs/heads/master
| 2021-01-10T08:52:35.539385
| 2015-12-07T19:35:23
| 2015-12-07T19:35:23
| 47,467,595
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,991
|
r
|
treemap.R
|
require("portfolio")
require("RColorBrewer")
require("jsonlite")
require("RCurl")
require("extrafont")
df <- data.frame(fromJSON(getURL(URLencode('skipper.cs.utexas.edu:5001/rest/native/?query="SELECT GBDCHILDCAUSES, YEAR, sum(DEATHS_0_TO_4_YEARS) FROM CHILD_DEATHS GROUP BY GBDCHILDCAUSES, YEAR ORDER BY YEAR;"'),httpheader=c(DB='jdbc:oracle:thin:@sayonara.microlab.cs.utexas.edu:1521:orcl', USER='C##cs329e_znk74', PASS='orcl_znk74', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE) ))
#df2 <- data.frame(fromJSON(getURL(URLencode('skipper.cs.utexas.edu:5001/rest/native/?query="SELECT GBDCHILDCAUSES, YEAR, COUNTRY, sum(DEATHS_0_TO_4_YEARS) FROM CHILD_DEATHS GROUP BY GBDCHILDCAUSES, YEAR, COUNTRY ORDER BY YEAR;"'),httpheader=c(DB='jdbc:oracle:thin:@sayonara.microlab.cs.utexas.edu:1521:orcl', USER='C##cs329e_znk74', PASS='orcl_znk74', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE) ))
#df3 <- data.frame(fromJSON(getURL(URLencode('skipper.cs.utexas.edu:5001/rest/native/?query="SELECT * FROM CHILD_DEATHS;"'),httpheader=c(DB='jdbc:oracle:thin:@sayonara.microlab.cs.utexas.edu:1521:orcl', USER='C##cs329e_znk74', PASS='orcl_znk74', MODE='native_mode', MODEL='model', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE) ))
#VAR <- df3 %>% group_by(YEAR) %>% summarise(total=sum(DEATHS_0_TO_4_YEARS))
#df2 %>% filter(YEAR == 2000) %>% mutate(total=cumsum(SUM.DEATHS_0_TO_4_YEARS.)) %>% df2
#df %>% mutate(total=cumsum(DEATHS_0_TO_4_YEARS))
#View(tbl_df)
#names(df)
#head(df2)
View(df)
head(df)
df <- df %>% filter(YEAR == 2010)
#df$GBDCHILDCAUSES is a number of items
map.market(id = df$GBDCHILDCAUSES, area = df$SUM.DEATHS_0_TO_4_YEARS., group = df$YEAR, color = df$SUM.DEATHS_0_TO_4_YEARS.,
lab = c("group" = TRUE, "id" = TRUE, "area" = TRUE, "color" = TRUE),
main = "Deaths 0 to 4 Years by Cause",
print = TRUE)
|
d4eb7d290ac28f6ae44c68898773bad85ea80ddc
|
e99b3404c01305e7b2d1926752939855493d8a12
|
/plot2.R
|
4773ab7297f06e4967cd22dc15fd221fd3a06cbd
|
[] |
no_license
|
tja2000/ExData_Plotting1
|
0d351add68f49453a375205525749c9633a092b7
|
731e81d5ecc09c8c5217601315170ea7beaa34d5
|
refs/heads/master
| 2021-01-12T22:31:03.683217
| 2015-07-08T08:05:43
| 2015-07-08T08:05:43
| 38,739,628
| 0
| 0
| null | 2015-07-08T07:45:24
| 2015-07-08T07:45:24
| null |
UTF-8
|
R
| false
| false
| 938
|
r
|
plot2.R
|
##############################################################################
# Exploratory Data Analysis - Project: Plot 2 #
##############################################################################
library(dplyr)
# We assume that the data is in the same directory
d <- read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
# Subset the data on 1/2 and 2/2 2007
s <- filter(d,d$Date=="1/2/2007" | d$Date=="2/2/2007")
# Prepare date fields
#dt <- paste(s$Date,s$Time,sep=" ")
dt <- strptime(paste(s$Date,s$Time,sep=" "),format="%d/%m/%Y %H:%M:%S")
# Make sure that days are displayed in English
Sys.setlocale("LC_TIME","English")
# Prepare canvas
png("plot2.png", width=480, height=480)
# Draw... convert the data to numeric
plot(dt, as.numeric(s$Global_active_power), type="l", xlab="",ylab="Global Active Power (kilowatts)")
# close.
dev.off()
|
d392a67de35ee5f10e761258edf399e2e00fa24c
|
f0ae5afa687f95f4a1659a21df76f465b6399ecc
|
/HW2/HW2_Answer_Sheet_r08631036.R
|
c74e2df51ec0c33e575676bd2a1f09a5af0228b9
|
[] |
no_license
|
JustinBear99/QA
|
9db1ee9d3e4ed94874985e4a5359423a446f3f7b
|
7c1064ae994427864253d10389d042bce501c48d
|
refs/heads/master
| 2023-07-18T13:43:16.287362
| 2021-08-27T05:55:53
| 2021-08-27T05:55:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,359
|
r
|
HW2_Answer_Sheet_r08631036.R
|
###############################################################################
## HW 2 Answer Sheet ##
## Name :______ NTU ID:________ ##
###############################################################################
##############
# Question 1 #
##############
#(a)#
data(mtcars)
mtcars[7,1:11]
#(b)#
mtcars[1:32,7]
#(c)#
for (i in 1:nrow(mtcars)){
if (mtcars[i,2]==6) print(mtcars[i,1:8])
}
#(d)#
for (i in 1:nrow(mtcars)){
if (mtcars[i,'mpg']>15 && mtcars[i,'vs']==1 && mtcars[i,'hp']<150 && mtcars[i,'hp']>50) print(row.names(mtcars)[i])
}
#(e)#
model_constrained = lm(drat~hp+vs-1, data=mtcars)
model_unconstrained = lm(drat~wt+hp+qsec+vs, data=mtcars)
R2_ur = summary(model_unconstrained)$r.squared
R2_r = summary(model_constrained)$r.squared
if (summary(model_constrained)$fstatistic[1] > summary(model_unconstrained)$fstatistic[1]){
print('The null hypothesis is rejected')
} else {
print('The null hypothesis is not rejected')
}
#(f)#
SST=var(mtcars$drat) *(nrow(mtcars)-1)
SSE_unconstrained = sum(model_unconstrained$residuals^2)
SSE_constrained = sum(model_constrained$residuals^2)
SSR_unconstrained = SST - SSE_unconstrained
SSR_constrained = SST - SSE_constrained
#(g)#
linearHypothesis(model_unconstrained,c("wt = 0", "qsec = 0"))
|
27a2c576c2bedacb23e5d50682750ebf44f0e11f
|
9cfffffa6d5d914ddf3c72317670094754b21912
|
/lib/ALSwithP3.R
|
d214d59fe30485fb48b3a310d6cc691479de046c
|
[] |
no_license
|
TZstatsADS/Spring2020-Project4-group4
|
8506d037bf789a49d27b5840c157c11468e39f4b
|
bd563041f6a5d2164ea7e6aecf0daaedc87ca252
|
refs/heads/master
| 2022-04-21T02:07:30.848703
| 2020-04-22T20:46:22
| 2020-04-22T20:46:22
| 252,845,468
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,508
|
r
|
ALSwithP3.R
|
# Solve RMSE
# RMSE <- function(rating, est_rating){
# sqr_err <- function(obs){
# sqr_error <- (obs[3] - est_rating[as.character(obs[2]), as.character(obs[1])])^2
# return(sqr_error)
# }
# return(sqrt(mean(apply(rating, 1, sqr_err))))
# }
RMSE <- function(rating, est_rating){
sqr_err <- function(id){
sqr_error <- (as.numeric(rating[id,3]) - est_rating[as.character(rating[id,1]), as.character(rating[id,2])])^2
return(sqr_error)
}
error = sapply(1:nrow(rating), sqr_err)
return(sqrt(mean(error)))
}
minFunc <- function(rating, matSolv, lambda){
set.seed(1)
solve(matSolv %*% t(matSolv) + lambda * diag(f)) %*% matSolv %*% rating
}
# The Function used to Find the
findSolve <- function(id, solveBy, train, lambda){
set.seed(1)
id <- as.integer(id)
# Fix Movies, solve User
if(solveBy=="Movies"){
movId <- train[train$userId==id, ]$movieId
movSolv <- Movies[, as.character(movId)]
rating <- train[train$userId==id, ]$rating
minFunc(rating = rating, matSolv = movSolv, lambda = lambda)
}
# Fix User, solve Movie
else if(solveBy=="Users"){
userId <- train[train$movieId==id, ]$userId
userSolv <- Users[, as.character(userId)]
rating <- train[train$movieId==id, ]$rating
minFunc(rating = rating, matSolv = userSolv, lambda = lambda)
}
else return("Please let matSolv be in right way")
}
ALS <- function(data, train, test, f, maxIters, lambda=5){
# Factorized the Movies and User matrices
set.seed(1)
UserId <- unique(data$userId)
U <- length(UserId)
MovieId <- unique(data$movieId)
M <- length(MovieId)
avgRatingByUser <- data %>%
group_by(userId) %>%
summarise(avgRating = mean(rating))
avgRatingByMovie <- data %>%
group_by(movieId) %>%
summarise(avgRating = mean(rating))
Users <- matrix(c(avgRatingByUser$avgRating, runif((f-1)*U, -1, 1)), nrow=f, byrow = T)
colnames(Users) <- UserId
Movies <- matrix(c(avgRatingByMovie$avgRating, rnorm((f-1)*M, -1, 1)), nrow=f, byrow = T)
colnames(Movies) <- MovieId
clusterExport(cl, "minFunc", envir = environment())
clusterExport(cl, "f", envir = environment())
clusterExport(cl, "UserId", envir = environment())
clusterExport(cl, "MovieId", envir = environment())
trainRMSE <- rep(NA, maxIters%/%3)
testRMSE <- rep(NA, maxIters%/%3)
iter <- 1
while(iter <= maxIters){
st <- Sys.time()
# Fix Movie, solve User
clusterExport(cl, "Movies", envir = environment())
clusterExport(cl, "Users", envir = environment())
Users <- parSapply(cl, as.character(UserId), findSolve, solveBy="Movies", train = train, lambda = lambda, USE.NAMES = T)
# Fix User, solve Movie
clusterExport(cl, "Movies", envir = environment())
clusterExport(cl, "Users", envir = environment())
Movies <- parSapply(cl, as.character(MovieId), findSolve, solveBy="Users", train = train, lambda = lambda, USE.NAMES = T)
# cat("Iter:", iter, "\t Time spent:", round(Sys.time()-st, 3), "s\n")
# if(iter%%3==1){
# est_rating <- t(Users) %*% Movies
#
# trainRMSE[iter%/%3+1] <- RMSE(train, est_rating)
# testRMSE[iter%/%3+1] <- RMSE(test, est_rating)
# }
cat(".")
if(iter==maxIters) cat("\n")
iter <- iter + 1
}
# RMSE
est_rating <- t(Users) %*% Movies
# rownames(est_rating)<-UserId
# colnames(est_rating)<-as.character(MovieId)
# trainRMSE <- RMSE(train, est_rating)
# testRMSE <- RMSE(test, est_rating)
return(list("User" = Users,
"Movie" = Movies,
"Rating" = est_rating#,
# "TrainRMSE" = trainRMSE,
# "TestRMSE" = testRMSE
))
}
## Function to Normalize Each Row
norm.row <- function (m) {
std <- function (vec){
return (vec/sqrt(sum(vec^2)))
}
return ((apply(m, 2,std)))
}
## Kernel Ridge Regression
## a function returns a list containing estimated rating matrix, training and testing RMSEs.
rating_krr<-function(u){
set.seed(1)
u=as.numeric(u)
norm.X<-NULL
r <-NULL
## get movie numbers rated by user u
i.rated.by.u <- as.character(train[train$userId==u,]$movieId)
norm.X=norm.X_full[i.rated.by.u,]
r<- train[train$userId==u,]$rating
model <- krr(norm.X,r, lambda = lambda,sigma=sigma)
est_rating <- predict(model, norm.X_full)
est_rating[is.na(est_rating)] <- 0
return(est_rating)
}
KRR.Post <- function (result_ALS, lambda = 10,sigma=1.5, data, train, test) {
U=data$userId%>%unique()%>%length
I=data$movieId%>%unique()%>%length
## Identify Movie Matrix (X), Normalized Movie Matrix (norm.X), and ratings (r) for each user, save in lists
## get estimating matrix
est_rating <- matrix(NA, ncol = I, nrow=U)
colnames(est_rating) <- levels(as.factor(data$movieId))
rownames(est_rating) <- levels(as.factor(data$userId))
X_full <- result_ALS$Movie
norm.X_full <- t(norm.row(X_full))
norm.X_full[is.na(norm.X_full)] <- 0
clusterExport(cl, "train", envir = environment())
clusterExport(cl, "norm.X_full", envir = environment())
clusterExport(cl, "X_full", envir = environment())
clusterExport(cl, "krr", envir = environment())
clusterExport(cl, "lambda", envir = environment())
clusterExport(cl, "sigma", envir = environment())
est_rating=parSapply(cl, as.character(1:U),rating_krr, USE.NAMES = T)
est_rating=t(est_rating)
colnames(est_rating)<-result_ALS$Rating%>%colnames
# Summerize
train_RMSE <- RMSE(train, est_rating)
cat("training RMSE:", train_RMSE, "\t")
test_RMSE <- RMSE(test, est_rating)
cat("test RMSE:",test_RMSE, "\n")
return(list(krr.rating=est_rating, train_RMSE = train_RMSE, test_RMSE = test_RMSE))
}
# Func of ALS with P3
ALS_KRR <- function(data, train, test, f, maxIters, lambda_als, lambda_p, sigma){
set.seed(1)
# data=dat_train
# train=train.data
# test=test.data
# f=f
# maxIters=maxIter
# lambda=lambdas_als
# lambda_p=lambdas_p
# sigma=sigmas
result_ALS <- ALS(data, train, test, f, maxIters, lambda_als)
KRR.Post(result_ALS=result_ALS,lambda=lambda_p, sigma=sigma, data, train, test)
}
KRR.Post_R <- function (result_ALS, lambda = 10,sigma=1.5, data, train, test) {
U=data$userId%>%unique()%>%length
I=data$movieId%>%unique()%>%length
## Identify Movie Matrix (X), Normalized Movie Matrix (norm.X), and ratings (r) for each user, save in lists
## get estimating matrix
est_rating <- matrix(NA, ncol = I, nrow=U)
colnames(est_rating) <- levels(as.factor(data$movieId))
rownames(est_rating) <- levels(as.factor(data$userId))
X_full <- result_ALS$Movie
norm.X_full <- t(norm.row(X_full))
norm.X_full[is.na(norm.X_full)] <- 0
clusterExport(cl, "train", envir = environment())
clusterExport(cl, "norm.X_full", envir = environment())
clusterExport(cl, "X_full", envir = environment())
clusterExport(cl, "krr", envir = environment())
clusterExport(cl, "lambda", envir = environment())
clusterExport(cl, "sigma", envir = environment())
est_rating=parSapply(cl, as.character(1:U),rating_krr, USE.NAMES = T)
est_rating=t(est_rating)
colnames(est_rating)<-result_ALS$Rating%>%colnames
# Summerize
train_RMSE <- RMSE_R(train, est_rating)
cat("training RMSE:", train_RMSE, "\t")
test_RMSE <- RMSE_R(test, est_rating)
cat("test RMSE:",test_RMSE, "\n")
return(list(krr.rating=est_rating, train_RMSE = train_RMSE, test_RMSE = test_RMSE))
}
|
f71a02ef1e61c1f113b6b2a6706aab51cf5024cf
|
5236a8a9c53fb80c783e34996863acd22a321c25
|
/Scripts/Data_Management/Merge_Raw_Data.R
|
e41602bd2635578aef413f0e1b8581d3e5c5aa5e
|
[] |
no_license
|
nnooney/Medical-Mistakes
|
b3b2d0d32e17718f7235c3a704b4044b51cd5d9a
|
7eadc093efb0ea353d272bfd553d4217af27ddaf
|
refs/heads/master
| 2016-09-05T21:38:14.570863
| 2014-12-09T02:44:50
| 2014-12-09T02:44:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,282
|
r
|
Merge_Raw_Data.R
|
## This file reads in data from 2003-2011, and combines it into a large data set.
## The variables "cat_code","epoa1","epoa2","epoa3","epoa4","epoa_p","grouper","sev_code" are excluded from 2008-2011
## The variable "msdrg" from 2008-2011 is combined with "drg" from 2003-2007 to create the variable "drg" in the final dataset.
## The final data set is stored as as an R Object: '~/shared/Data/Raw Data'.
## Two intermediate data sets are stored as '~/shared/Data/Raw Data 08 11' and '~/shared/Data/Raw Data 03 07'.
## This code takes a long time to run.
library(foreign)
year<-rep(as.character(2003:2011),each=3)
extension<-rep(c("_LA_base.dta","_North_base.dta","_South_base.dta"),times=9)
path<-"~/shared/Data/Original_Data/"
files<-paste(path,year,extension,sep="")
# 2003-2007 have the same column names, so we can rbind them
raw.data<-NULL
n<-1
for(name in files[1:15]){
dta<-read.dta(name)
dta$year<-rep(year[n],times=nrow(dta))
raw.data<-rbind(raw.data,dta)
n<-n+1
}
save(raw.data,file='~/shared/Data/Full_Data/Raw_Data_03_07.obj')
# This file is now saved as Raw Data 03 07
# Same for 2008-2011
raw.data2<-NULL
for(name in files[16:27]){
dta<-read.dta(name)
dta$year<-rep(year[n],times=nrow(dta))
raw.data2<-rbind(raw.data2,dta)
n<-n+1
}
save(raw.data2,file='~/shared/Data/Full_Data/Raw_Data_08_11.obj')
# now we will take the extra columns out of 2008-2011 and save them
extra2<-c("cat_code","epoa1","epoa2","epoa3","epoa4","epoa_p","grouper","msdrg","sev_code")
extra.data2<-raw.data2$msdrg # quick to run
raw.data2<-raw.data2[,!is.element(colnames(raw.data2),extra2)] # quick to run
# now we will take the extra columns out of 2003-2007 and save them
extra.data1<-raw.data$drg
raw.data<-raw.data[,colnames(raw.data)!="drg"]
# This data frame shows us that the only non-matching variables are the opoa# and the cpoa# variables.
# Since this is just a name swap, we can simply rbind them.
cbind(colnames(raw.data),colnames(raw.data2),colnames(raw.data)==colnames(raw.data2))
colnames(raw.data)<-colnames(raw.data2) # first we make the column names consistent
raw.data<-rbind(raw.data,raw.data2) # then we rbind them
raw.data$drg<-c(extra.data1,extra.data2) # add the drg variable back in
save(raw.data,file='~/shared/Data/Full_Data/Raw_Data.obj')
|
ac8885583994cd77503fca29f226a23a46296bbd
|
5cdfe09b136d8c56160b7048733d26d391e770b7
|
/Scripts/model_exploration/random_forest.R
|
056425813a47de2bde3b8429e0cedd3534770bfb
|
[] |
no_license
|
DanielRZapataS/Recommendation_System_Retail_Banking
|
a7265b731127c60f9233138f492989649f2be3ce
|
1de13ca704dfa80ba8e4e374ade481d7ba33ecb9
|
refs/heads/master
| 2020-11-25T21:28:36.779429
| 2019-12-18T15:09:49
| 2019-12-18T15:09:49
| 228,851,338
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,010
|
r
|
random_forest.R
|
mtry <- seq((ncol(master)-1)/3, ncol(master) * 0.8, 6) %>% round(0)
nodesize <- seq(3, 9, 3)
mtry <- c(45, 93)
nodesize <- 6
#'Train multiple Random Forest models chosen from random forest parameters to
#'forecast one step ahead, and return Best Random forest model with the
#' lowest recursive forecast error on validation transactions dates.
#' @param master: Matrix with target and variables on training.
#' @param mtry: Vector of possible values for number of variables randomly
#' sampled as candidates at each split.(numeric)
#' @param nodesize: Vector of possible values for minimum size of terminal
#' nodes. (numeric)
#' @param sampsize: Vector of possible values of the size of sample to
#' draw. (numeric)
#' @param target_val: Matrix on validation. (numeric)
#' @return List containing best random forest, auc, and recursive forecast
randomForest_train <- function(master, mtry, nodesize,
dev){
require(pROC)
require(randomForest)
# Create a data frame containing all combinations
hyper_grid <- expand.grid(mtry = mtry, nodesize = nodesize)
RFmodel<-list()
# Write a loop over the rows of hyper_grid to train the grid of models
for (i in 1:nrow(hyper_grid)) {
# Train a Random Forest model
RFmodel[[i]] <- randomForest(formula = target ~ .,
data = master,
mtry = hyper_grid$mtry[i],
nodesize = hyper_grid$nodesize[i],
ntree = 10,
importance = T,
keep.forst = T)
}
# Identify optimal set of hyperparmeters based on valdation error
# Evaluate the grid
# Number of potential models in the grid
num_models <- length(RFmodel)
# Create an empty vector to store auc values
auc_values_RF <- c()
# save recursive forecast
predics_val <- list()
for (i in 1:num_models) {
# Retreive the i^th model from the list
model <- RFmodel[[i]]
# Generate predictions on grade_valid
pred <- predict(object = model,
newdata = dev)
predics_val[[i]] <- pred
roc_model <- ropROC::c(val$target, pred)
# Compute validation auc and add to the
auc_values_RF[i] <- pROC::auc(xgb_auc_roc_obj_master)
}
names(auc_values_RF) <- c(paste0("RF", rep(1:nrow(hyper_grid))))
# Identify the model with smallest validation set auc
RF_model <- RFmodel[[which.max(auc_values_RF)]]
auc_train <- auc(actual = master$target,
predicted = predict(RF_model) )
auc_val <- auc_values_RF[which.max(auc_values_RF)]
fit_train <- predict(RF_model)
fit_val <- predics_val[[which.max(auc_values_RF)]]
return(list(auc_train = auc_train, auc_val = auc_val,
fit_train = fit_train, fit_val = fit_val,
model_name = paste0(RF_model$call)[1],
model = RF_model))
}
|
897670fbbc124dd0d42185dfefaa5a5cf18f7ecf
|
f7fb88ca3b6c0b29a3d10ed20df25efa6e4f8602
|
/man/multipart_upload.Rd
|
b7b7c02e071f9d680985d3879d4c1e3a0c361f59
|
[] |
no_license
|
cran/civis
|
9fdf3e2ff64b72fc5c448d02cd621594d0f603b4
|
e334467f2e8af73bb76c1e80f60e3f6ba460b509
|
refs/heads/master
| 2023-04-07T13:39:21.376985
| 2023-03-31T07:00:03
| 2023-03-31T07:00:03
| 103,786,499
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 532
|
rd
|
multipart_upload.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{multipart_upload}
\alias{multipart_upload}
\title{Upload to files endpoint in parts.}
\usage{
multipart_upload(file, name = "", chunk_size = 32 * 1024, expires_at = NULL)
}
\arguments{
\item{file}{the file}
\item{name}{name of the upload, defaults to}
\item{chunk_size}{size of the chunks in bytes}
\item{expires_at}{when the file expires (default never).}
}
\description{
If a future::plan has been set, will be carried out in parallel.
}
|
e75a8fe7f7aae49f09db7d41b3e16d385edbea41
|
b9cba8ae9cd4a6a25c1e4d51157d549228b72a05
|
/man/DatasetNode.Rd
|
d4db40e79403610d87dc462391ffd99d9afb84e1
|
[] |
permissive
|
BigelowLab/thredds
|
8c379dbe38a7e787d2efcd731ed2c6148c037a6c
|
6d21f3327ef2ce8d8697741696ff3ef4b4e58377
|
refs/heads/master
| 2023-01-13T08:52:19.973214
| 2023-01-06T15:13:08
| 2023-01-06T15:13:08
| 159,708,386
| 2
| 1
|
MIT
| 2022-02-06T20:12:55
| 2018-11-29T18:14:29
|
R
|
UTF-8
|
R
| false
| true
| 5,129
|
rd
|
DatasetNode.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DatasetNode.R
\name{DatasetNode}
\alias{DatasetNode}
\title{A class for a single datatset reference}
\description{
A direct Dataset representation that subclasses from ThreddsNode
}
\note{
For examples see \link{CatalogNode}
}
\section{Super class}{
\code{\link[thredds:ThreddsNode]{thredds::ThreddsNode}} -> \code{DatasetNode}
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{name}}{character, often the filename}
\item{\code{dataSize}}{numeric, size in bytes}
\item{\code{date}}{character, modification date}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-DatasetNode-new}{\code{DatasetNode$new()}}
\item \href{#method-DatasetNode-GET}{\code{DatasetNode$GET()}}
\item \href{#method-DatasetNode-get_url}{\code{DatasetNode$get_url()}}
\item \href{#method-DatasetNode-list_access}{\code{DatasetNode$list_access()}}
\item \href{#method-DatasetNode-print}{\code{DatasetNode$print()}}
\item \href{#method-DatasetNode-clone}{\code{DatasetNode$clone()}}
}
}
\if{html}{\out{
<details open><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="thredds" data-topic="ThreddsNode" data-id="browse"><a href='../../thredds/html/ThreddsNode.html#method-ThreddsNode-browse'><code>thredds::ThreddsNode$browse()</code></a></span></li>
<li><span class="pkg-link" data-pkg="thredds" data-topic="ThreddsNode" data-id="children_names"><a href='../../thredds/html/ThreddsNode.html#method-ThreddsNode-children_names'><code>thredds::ThreddsNode$children_names()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-DatasetNode-new"></a>}}
\if{latex}{\out{\hypertarget{method-DatasetNode-new}{}}}
\subsection{Method \code{new()}}{
initialize an instance of ServiceNode
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{DatasetNode$new(x, ...)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{x}}{url or xml2::xml_node}
\item{\code{...}}{arguments for superclass initialization}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-DatasetNode-GET"></a>}}
\if{latex}{\out{\hypertarget{method-DatasetNode-GET}{}}}
\subsection{Method \code{GET()}}{
Overrides the GET method of the superclass. GET is not permitted
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{DatasetNode$GET()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
NULL
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-DatasetNode-get_url"></a>}}
\if{latex}{\out{\hypertarget{method-DatasetNode-get_url}{}}}
\subsection{Method \code{get_url()}}{
Retrieve the relative URL for a dataset.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{DatasetNode$get_url(
service = c("dap", "opendap", "wms")[1],
sep = c("/", "")[2],
...
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{service}}{character, the service to use. (default 'dap' equivalent to 'opendap')
Ignored if `urlPath` or `href` is in the nodes' attributes.}
\item{\code{sep}}{character, typically "/" or "" (default), used for joined base_url to relative url}
\item{\code{...}}{other arguments for \code{DatasetNode$list_access}}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
character
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-DatasetNode-list_access"></a>}}
\if{latex}{\out{\hypertarget{method-DatasetNode-list_access}{}}}
\subsection{Method \code{list_access()}}{
list access methods
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{DatasetNode$list_access(xpath = build_xpath("access", prefix = self$prefix))}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{xpath}}{charcater, xpath descriptor}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
named list of character vectors or NULL
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-DatasetNode-print"></a>}}
\if{latex}{\out{\hypertarget{method-DatasetNode-print}{}}}
\subsection{Method \code{print()}}{
print method
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{DatasetNode$print(prefix = "")}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{prefix}}{character, to be printed before each line of output (like spaces)}
\item{\code{...}}{other arguments for superclass}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-DatasetNode-clone"></a>}}
\if{latex}{\out{\hypertarget{method-DatasetNode-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{DatasetNode$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
5005d53851a76de0fb73017c0ef5807b7a3b47bd
|
f883128e4e174dc7b3aaa86be32a01e26a4a1c34
|
/Assignment2/plot3.R
|
665cf6940591c89dc0a2520f68a0548cb45ffc3f
|
[] |
no_license
|
hannyhus/Course-Project-2
|
4e848b5537e1d7b45b7de0ed4ca6bbb2dd443c19
|
421fa2484758fc8126382c152a49ffde14db86f1
|
refs/heads/master
| 2021-01-10T11:04:06.133209
| 2016-01-25T17:03:48
| 2016-01-25T17:03:48
| 50,123,924
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 881
|
r
|
plot3.R
|
# Download file and unzip file
project2 <- unzip("exdata-data-NEI_data.zip")
NEI <- readRDS("summarySCC_PM25.rds")
#Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad) variable, which of these four sources have seen decreases in emissions from 1999–2008 for Baltimore City? Which have seen increases in emissions from 1999–2008? Use the ggplot2 plotting system to make a plot answer this question.
#Subset correct data
newdata <- NEI[(NEI$fips == "24510"),]
#Aggregate data accordingly
data <- aggregate(Emissions~year+type,newdata,sum)
#source ggplot2 package
library(ggplot2)
g <- ggplot(data,aes(year,Emissions,color=type))
g + geom_line() + xlab("Years") + ylab("Total PM '[2.5]*' Emission") + ggtitle("Total PM '[2.5]*' Emissions at Various Years in Baltimore, Maryland")
dev.copy(png, file="plot3.png",width=480, height=480)
dev.off()
|
2c091d172f18032cf86fb84b28d7983e6b04ab00
|
67407a5b702bb6fa60c8a6ebb501d0de4c95cead
|
/R/blueprint.R
|
78d328e1507041b7e18b5b394a6dcec46a7ce625
|
[
"MIT"
] |
permissive
|
MyKo101/rando
|
0f1decd713735b465c674e70f264f78ee3eefe04
|
0eefa395ef3c6eabfd846c0698253400a612b9a5
|
refs/heads/master
| 2023-02-19T01:19:15.697552
| 2021-01-23T21:06:10
| 2021-01-23T21:06:10
| 328,973,763
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,369
|
r
|
blueprint.R
|
#' @name blueprint
#'
#' @title Blueprint a Dataset
#'
#' @description
#' Allows for the generation of population based on a prescribed set
#' of rando functions.
#'
#' @param ...
#' arguments used to generate the blueprint, see Examples.
#'
#' @return
#' A function that will produce a [tibble][tibble::tibble-package],
#' which matches the blueprint that was provided. The generated
#' function will take the following arguments:
#'
#' * `...` - any arguments that are used within the blueprinting
#' * `n` - the number of rows that the resulting tibble should be
#' * `.seed` - the random seed to set before generating the data
#'
#'
#' `is_blueprint()` simply checks whether a function is a blueprinting
#' function or not and returns a logical.
#'
#' @examples
#' make_tbl <- blueprint(
#' x = r_norm(),
#' y = r_norm()
#' )
#'
#' make_tbl(n = 2)
#'
#' make_tbl(n = 5)
#'
#' # Blueprints can use additional parameters:
#' make_tbl2 <- blueprint(
#' x = r_norm(mean = x_mu),
#' y = r_unif(min = y_min, max = y_max)
#' )
#'
#' # Which are simply passed to the generated function
#' make_tbl2(x_mu = 10, y_min = -10, y_max = -5)
#'
#' @export
#'
blueprint <- function(...) {
.call <- match.call()
.call[[".rows"]] <- quote(n)
.call[[1]] <- quote(tibble::tibble)
f <- function(..., n = default_n(...), .seed = NULL) NULL
body(f) <- call(
"{",
quote(set_blueprint_n(n)),
quote(on.exit(set_blueprint_n())),
quote(list2env(list(...), environment())),
call("with_seed", quote(.seed), .call)
)
environment(f) <- new.env(parent = parent.frame())
environment(f)[["set_blueprint_n"]] <- set_blueprint_n
structure(
f,
class = c("rando_blueprint_function", "function")
)
}
#' @rdname blueprint
#'
#' @param bp
#' Object to check
#'
#' @examples
#' is_blueprint(make_tbl)
#'
#' @export
#'
is_blueprint <- function(bp) {
inherits(bp, "rando_blueprint_function")
}
#' @name bp_where
#'
#' @title Blueprint based on a condition
#'
#' @description
#' Runs a blueprint function where a condition is true, otherwise
#' returns `NA` values
#'
#'
#' @param condition
#' Condition to check before evaluating. Results will be given where
#' this is \code{TRUE}, and \code{NA} when this is \code{FALSE}
#'
#' @param bp
#' Blueprint function to run based on the condition
#'
#' @param ...
#' arguments passed on to Blueprint, such as \code{.seed}
#'
#' @return a [tibble][tibble::tibble-package]
#'
#' @examples
#' make_tbl <- blueprint(
#' x = r_norm(),
#' y = r_unif()
#' )
#'
#' set_n(10)
#' i <- r_lgl()
#'
#' bp_where(i, make_tbl)
#'
#' df <- tibble::tibble(
#' id = 1:10,
#' cnd = r_lgl()
#' )
#' dplyr::mutate(df, bp_where(cnd, make_tbl))
#' @export
#'
bp_where <- function(condition, bp, ...) {
if (!is.logical(condition)) {
error_glue("condition argument passed to bp_where() should be logical")
}
if (!is_blueprint(bp)) {
error_glue("bp argument passed to bp_where() should be a blueprint function")
}
full_set <- bp(n = sum(condition), ...)
rows <- rep(NA, length(condition))
rows[condition] <- 1:sum(condition)
full_set[rows, ]
}
..blueprint_n <- NULL
set_blueprint_n <- function(n=NULL){
rando_env <- asNamespace("rando")
eval(call("unlockBinding", "..blueprint_n",rando_env))
assign("..blueprint_n",n,rando_env)
eval(call("lockBinding", "..blueprint_n",rando_env))
}
|
6bbf2883b2dab3636147c0e05e500c28392f7225
|
a158048836d33926ef473b43263e2e7551ba465a
|
/R Basics 1.R
|
f8d448f4b5044e79d63e4b7463660bb56ead2373
|
[] |
no_license
|
jezzaayt/r-basics-tutorials
|
e723937f847b06544d77674667b23e7b381589de
|
3b832845fc2d6f891e0ccebbaac6b86f87ed21ed
|
refs/heads/master
| 2023-04-13T18:12:49.914374
| 2021-04-24T19:11:16
| 2021-04-24T19:11:16
| 236,339,740
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 802
|
r
|
R Basics 1.R
|
# R Basics 1
print("Hello World")
print(2+2)
# R Lists
animals <- c("giant panda", "cat", "lion", "dog")
animals[1]
animals[2]
animals[3]
animals[4]
# NA Operation due to out of bounds of the list
animals[5]
#Shows the length of the list
length(animals)
# For Loop
for (animal in animals){
print(animal)
}
print(animals)
#To append use the Append function, apply the list
# Enter the value then position where you wish to add the value
animals <- append(animals, "parrot", 3)
animals <- animals[-2]
animals
#Sorting
animals = sort(animals, decreasing = FALSE)
animals[2]
#Indexing
numbers <- c(1,2,3,4,5,6,7,8,9,10)
numbers[2]
numbers[11]
#number of rows in a data frame
NROW(animals)
#Slicing / sub setting
numbers[-1] # all but first item
numbers[1:5] # first 5
numbers[]
|
09a38b6f791e3b693ad17bf051356e238cf2eef5
|
a442d44c669e43c6f2ec2e4b532c5b33582563d0
|
/R/SVM_1224.R
|
d66d831f9f3800163b339e92ae2b31efc4d78908
|
[] |
no_license
|
seunghyunshin111/Statistics_study
|
4f6cefbf1cc6650112a15dfde1cda885283a52fc
|
d1f5ca257d54fd1b75c800f84282263d6c8e4d97
|
refs/heads/master
| 2020-11-24T05:44:08.553319
| 2020-09-30T04:31:48
| 2020-09-30T04:31:48
| 227,990,765
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,177
|
r
|
SVM_1224.R
|
# 실습
# SVM
setwd("c:/Rwork/data/실습용데이터모음")
set1 <- read.csv("set1.csv", header = T, stringsAsFactors = F)
head(set1)
plot(set1)
#Status가 '1'이면 '중산층', 아니면 '0'
library(MASS)
# x축 food, Y축 book
density <- kde2d(set1$food, set1$book, n = 400)
head(density)
?kde2d
image(density, xlab = "food", ylab = "book")
# 가운데 평균점 근처에 모여 있는 데이터: 보통 사람 데이터
# 이를 분류하기 위해 -> SVM 사용!
library(e1071)
# install.packages("Epi")
library(Epi)
m1 <- svm(status ~ food + book + cul + cloth + travel, type = "C-classification", data = set1)
m1
tune.svm(m1,
predict(m1, set1)
sum(set1$status != predict(m1, set1))
# SVM -> RBF 커널(가우시안 함수) 사용
# install.packages("kernlab")
library(kernlab)
m2 <- ksvm(status ~., kernel = "rbfdot", data = set1)
m2
sum(as.numeric(m2, cet1) > 0.5 != set1$status)
# OVA (One Versus All)
# Multi-class SVM 알고리즘; N개의 클래스가 있다면 1개와 나머지 (n-1)개 클래스의 합집합을 비교하여 선형판별경계를 구함
# OVO (Ine Versus One)
# 모든 2개의 class의 짝의 경우의 수를 구해서 선형판별경계를 구하는 방식
# 한 클래스당 총 n개의 판별경계에 해당하는 클래스에 라벨을 부여하여 OVO 방식은 2개의 클래스 간 모든 경우의 수(n(n-1)/2)만큼 만들어지는 판별 경계에 대하여 가장 많이 해당하는 클래스에 라벨을 부여하는 방식.
# 머신러닝기반의 빅데이터 분석 실전 예제
# 문제 1
# 건물에 입주해있는 상가끼리의 연관성 분석
building <- read.csv("building.csv", header = T)
head(building)
building[is.na(building)] <- 0
building <- building[-1]
building
install.packages("arules")
library(arules)
# 연관성 규칙 분석을 위해 transaction 타입으로 전환
trans <- as.matrix(building, "Transaction")
head(trans)
rules1 <- apriori(trans, parameter = list(supp = 0.2, conf = 0.6, target = "rules"))
rules1
inspect(sort(rules1))
rules2 <- subset(rules1, subset = lhs %pin% '보습학원' & confidence > 0.7)
inspect(sort(rules2))
# 라그랑지이론:
|
8d6fead6178d045a7879d879eec384edc50fb6e2
|
6b8c9af8a6242a518ec14230f190cde6cc48e475
|
/R/bbea.subset.R
|
9463556aecfd7a96810260b49240bf2cdc2ec9e6
|
[] |
no_license
|
catherine-hhs/bbeaR
|
62417d243edeb0c52364ca03878378850356e8b3
|
8d2627c3163421f6b5956c7c1c1f095922b829ec
|
refs/heads/master
| 2023-02-28T11:08:04.091071
| 2021-01-25T16:40:05
| 2021-01-25T16:40:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 451
|
r
|
bbea.subset.R
|
bbea.subset <- function(bbea.obj, statement) {
z<-rownames(bbea.obj$pData)[statement==TRUE]
print(paste0(length(z)," Samples meet the statement criteria"))
L1<-lapply(bbea.obj[(names(bbea.obj)%in%c("Median","NetMFI","Count"))], function(l){l[,z]})
L2<-lapply(bbea.obj[(names(bbea.obj)%in%c("pData"))], function(l){l[z,]})
if (class(bbea.obj$pData$Plate)=="factor") {
L2$pData$Plate <- droplevels(L2$pData$Plate)
}
return(c(L1,L2))
}
|
418102f74cce0c4eca5dd42fa2320ed8848c1e6e
|
29f3181d6eb312973657cfad9b2f77d5614b8f3f
|
/CannabisZoning/App.r
|
35e56a3cf811974dcae0769c3be89c167a5c8fcc
|
[
"MIT"
] |
permissive
|
codeforanchorage/shiny-server
|
3793e4fc632cd10cf5400eeeceb7458132b25615
|
5139e294b3864089c2e4a667b40bed66dc17d8c6
|
refs/heads/master
| 2016-08-11T16:10:18.328378
| 2015-10-20T23:12:44
| 2015-10-20T23:12:44
| 44,632,987
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,510
|
r
|
App.r
|
##Need to combine Geojson into one file before putting into leaflet
library(shinythemes)
library(rgeos)
library(rgdal)
library(leaflet)
library(geojsonio)
load("map.rda")
load("zones.rda")
#input <- list(feetbuffer = 500, zones = "B-3")
ui <- bootstrapPage(theme = shinytheme("spacelab"),
title = "Cannabis Business Zoning",
tags$head(includeScript("google-analytics.js")),
tags$style(type = "text/css", "html, body {width:100%;height:100%}"),
leafletOutput("map", width = "100%", height = "100%"),
absolutePanel(class = "panel panel-default",
top = 10, right = 10, width = 330,
includeMarkdown("docs/about.md"),
numericInput("feetbuffer",
label = h4("Feet From Facility"), 500),
selectInput("zones", "Specific Zone", as.character(levels(zones@data$ZONING_DES))),
actionButton("recalc", "Update"),
br(),br(),
a(img(src = "codeforanc.png"),
href = "http://codeforanchorage.org/"),
br(),br(),
(a("Contact", href = "mailto:hans.thompson1@gmail.com"))
)
)
server <- function(input, output, session) {
buffer <- eventReactive(input$recalc, {
spTransform(map, CRS("+init=epsg:26934")) %>%
gBuffer(width = input$feetbuffer / 3.28084) %>% #input$feetbuffer is actually in meters and is converted to feet in this line
spTransform(CRS("+proj=longlat")) %>%
as("SpatialPolygonsDataFrame")
}, ignoreNULL = FALSE)
filteredZones <- eventReactive(input$updateButton, {
zones_filtered <- zones[as.character(zones@data$ZONING_DES) == input$zone,]
zones_filtered <- as(zones_filtered, "SpatialPolygonsDataFrame")
return(zones_filtered)
})
output$map <- renderLeaflet({
#SpatialPolygons(list(buffers, zones), c("buffer", "zones"))
leaflet() %>%
addTiles(urlTemplate = "http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png") %>%
setView(-149.85, 61.15, zoom = 12) %>%
addGeoJSON(geojson_json(buffer())) %>%
addPolygons(data = filteredZones(),
color="red")
})
}
shinyApp(ui, server)
|
5e9b7e1031fc961a25d92c4331b262ef380fca67
|
341bd816596e08cefca0d8de16054057877b024e
|
/R/sourcing_Data/PhysiData_Clean.R
|
501597db1f90c27ee13c888abc63c13bf6a93ae3
|
[] |
no_license
|
sebastiz/PhysiMineR
|
caf842d85299d7550aea83462fdb8d7857961598
|
c6b2fb1a46b857a6c9568cb5a002e7a22e7273d4
|
refs/heads/master
| 2021-08-01T07:43:55.506905
| 2021-07-24T14:10:25
| 2021-07-24T14:10:25
| 125,661,981
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,580
|
r
|
PhysiData_Clean.R
|
#Data cleaning
#Use after PhysiData_Acq
AllImpedance<-merge(Impedance2,Imp_Symp,by="Imp_Id", all = TRUE)
#Date cleaning
#Clean up the HRM swallows:
HRMImportSwallows<-HRMImportSwallows[!is.na(HRMImportSwallows$panesophagealpressurizationMapSwallowsNum8),]
#Merge the HRM results together (swallows and Main)
AllHRM<-merge(HRMImportMain,HRMImportSwallows,by="HRM_Id",all=TRUE)
rm(HRMImportMain)
#Clean up the HRM HRMImportMain (to get rid of duplicates mainly)
#This looks for all the duplicates by hospital number and DistalLESfromnarescm (the latter so that duplicates means same test rather than same patient on two different days)
HRMImportMain2<-AllHRM %>%
group_by(HospNum_Id,DistalLESfromnarescm) %>%
summarise_all(.funs = function(x) paste(unique(c(dplyr::lag(x, default = NULL), x)), collapse = ":"))
#Clean Up the main HRM
HRMImportMain<-HRMCleanUp1(HRMImportMain2)
#Avoid cluttering things up
rm(AllHRM)
#Also need to get rid of the duplicates from the Diag table:
#Recognise duplicates by the same Hospital Number and HRM_Id (this will be the same even after HRM merging)).
Diag$HospNum_Id<-toupper(Diag$HospNum_Id)
Diag$FileCreationDate<-as.Date(gsub("(\\d{4}-\\d{2}-\\d{2}).*","\\1",Diag$FileCreationDate))
#Convert VisitDate into a Date format and extract the year (the month and day are unreliable here as sometimes American and sometimes British dates are used)
#Convert the dates to date format
Diag$VisitDate2<-as.Date(Diag$VisitDate,format="%d_%m_%Y")
#For NA dates make sure it is not because the dates are the wrong way around
Diag$VisitDate3<-as.Date(Diag$VisitDate,format="%m_%d_%Y")
#Merge the dates if there are separate HRM and Diag results which are reporting the same thing
Diag$VisitDate4<-as.Date(ifelse(is.na(Diag$VisitDate2),Diag$VisitDate3,Diag$VisitDate2),origin = "1970-01-01")
#If still NA then use the file creation date as the visit date
Diag$VisitDate4<-as.Date(ifelse(is.na(Diag$VisitDate4),Diag$FileCreationDate,Diag$VisitDate4),origin = "1970-01-01")
#Extract the Distal LES as is likely to be in both the final and the HRM report so can be merged on this
Diag$DistalLESnares<-gsub(".*Distal LES from nares.*?(\\d+).*","\\1",Diag$WholeReport)
#Get rid of whole reports that are copied over for some reason
Diag$DistalLESnares<-gsub(".*[A-Za-z].*","\\1",Diag$DistalLESnares)
#Extract some diagnoses from the report (using the histopath extractor from EndoMineR)
Diag$Dx<-str_extract_all(Diag$WholeReport, paste0("upragas|neffecti|ackham|utcrack|[Aa]peris|[Ff]requent [Ff]ail" , simplify = FALSE))
Diag$Dx <- sapply(Diag$Dx, toString)
#Merge the diag reports on the basis of being the same Hospital Number and date range here:
Diag2<-Diag %>%
arrange(DistalLESnares,HospNum_Id) %>%
group_by(DistalLESnares,HospNum_Id,lubridate::year(VisitDate4)) %>%
summarise_all(.funs = function(x) paste(unique(c(dplyr::lag(x, default = NULL), x)), collapse = ":"))
rm(Diag)
# Get the whole impedance dataset
#You will need to re-clean the merged dataImpSympImpedance as cleaning function needed to be fixed
Impedance2_Clean<-dataImpClean(AllImpedance)
rm(AllImpedance)
#ImpSymp_Clean<-dataImpSympClean(ImpSymp)
#ImpAll<-merge(Impedance2_Clean,ImpSymp_Clean,by="Imp_Id")
#Get the whole BRAVO dataset:
# Need to get the BRAVO data merged from the two tables
AllBravo<-merge(BravoDay1And2,BravoDay3And4,by="BravoID",all=TRUE)
AllBravo<-merge(AllBravo,BRAVOTotal,by="BravoID",all=TRUE)
AllBravo<-dataBRAVOClean(AllBravo)
AllBravo<-dataBRAVODayLabeller(AllBravo,"HospNum_Id","VisitDate")
rm(BravoDay1And2)
rm(BravoDay3And4)
|
6a4777d94c74c0bc66361c03dc8f12981116ee4f
|
02a1e6db6421a3949e0c2e14867cafd90ec28c96
|
/R/sv_get_inventory_value.R
|
ecef856ffd440ec529699665fb0808153b52ee12
|
[] |
no_license
|
anthonypileggi/skuvaultr
|
ab1780055968173410642a6b18a3e45038dc4ee8
|
c85c73ea1b7cd54bf8b0c2d197ed432d5b046e0a
|
refs/heads/master
| 2022-10-09T15:18:16.353459
| 2022-09-21T17:33:42
| 2022-09-21T17:33:42
| 149,252,034
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,069
|
r
|
sv_get_inventory_value.R
|
#' Get total warehouse inventory value
#' @param skus product skus (character/vector)
#' @importFrom magrittr "%>%"
#' @export
sv_get_inventory_value <- function(skus = NULL) {
# Compute total inventory value (static, right now)
# LOCations to ignore for inventory calcs
ignore_locs <- c(skuvaultr::sv_dropship_locs(), "WHOLEGOODS")
# Load product data
products <- sv_get_products(skus)
# Unfurl additional skus
alt_products <- products %>%
dplyr::select(Sku, Cost, AlternateSku) %>%
dplyr::filter(nchar(AlternateSku) > 0) %>%
dplyr::mutate(
Sku2 = purrr::map(AlternateSku, ~stringr::str_split(.x, "; ")[[1]])
) %>%
tidyr::unnest(Sku2)
if (nrow(alt_products) > 0) {
alt_products <- alt_products %>%
dplyr::select(AltSku = Sku, Sku = Sku2, Cost)
all_products <- products %>%
dplyr::select(Sku, Description, Cost) %>%
dplyr::bind_rows(alt_products)
} else {
all_products <- products %>%
dplyr::select(Sku, Description, Cost) %>%
dplyr::mutate(AltSku = NA_character_)
}
# Load inventory
if (!is.null(skus)) {
alt_skus <- all_products %>%
dplyr::filter(AltSku %in% skus) %>%
dplyr::pull(Sku)
skus <- c(skus, alt_skus)
}
inventory <- sv_get_inventory_locations(skus)
# Attach out-of-stocks SKUs
if (nrow(inventory) == 0) {
inventory <- products %>%
dplyr::select(Sku) %>%
dplyr::mutate(WarehouseCode = "WH1", LocationCode = "Unknown", Quantity = 0, Reserve = F)
} else {
inventory <- products %>%
dplyr::select(Sku) %>%
dplyr::full_join(
dplyr::filter(inventory, !(LocationCode %in% ignore_locs)),
by = "Sku") %>%
tidyr::replace_na(list(WarehouseCode = "WH1", LocationCode = "Unknown", Quantity = 0, Reserve = F))
}
# Ignore dropship inventory
out <- inventory %>%
dplyr::mutate(
Quantity = ifelse(LocationCode %in% ignore_locs, 0, Quantity)
)
# Replace AltSku with base SKU
out <- out %>%
dplyr::left_join(all_products, by = "Sku") %>%
dplyr::mutate(
AltSku = dplyr::case_when(
is.na(AltSku) ~ Sku,
TRUE ~ AltSku
)
) %>%
#dplyr::filter(AltSku %in% c("692187", "731-09312")) %>%
dplyr::select(-Sku) %>%
dplyr::rename(sku = AltSku) %>%
dplyr::group_by(sku, LocationCode) %>%
dplyr::summarize_at(dplyr::vars(Quantity, Cost), mean, na.rm = T) %>%
dplyr::summarize(
quantity = sum(Quantity, na.rm = T),
cost = weighted.mean(Cost, Quantity, na.rm = T)
)
# Summarize inventory value (excluding dropships)
out <- out %>%
dplyr::mutate(
value = ifelse(quantity == 0, 0, cost * quantity)
) %>%
dplyr::arrange(desc(value))
# Attach location(s)
out %>%
dplyr::left_join(
inventory %>%
dplyr::rename(sku = Sku) %>%
dplyr::group_by(sku) %>%
tidyr::nest(location = c(WarehouseCode, LocationCode, Quantity, Reserve)) %>%
dplyr::mutate(
location = purrr::map(location, ~.x)
),
by = "sku"
)
}
|
88e47d398b9fb24f6d79d6a846144387cccb425b
|
cf847547af9e72b3b21456013eb6e85e6a2830e6
|
/R/print.lpc.R
|
526e59f01d2383e490b3b09f6a329bb563ee053a
|
[] |
no_license
|
cran/LPCM
|
48798abd534257e2231d8604de0c31d67634a43d
|
a526c031d6c75c015bad40aa03d6a72069c61acb
|
refs/heads/master
| 2023-04-06T18:54:59.876739
| 2023-01-06T13:50:37
| 2023-01-06T13:50:37
| 17,680,250
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,618
|
r
|
print.lpc.R
|
print.lpc <-
function(x, digits=max(3,getOption('digits')-3), ...){
sx <- as.character(substitute(x))
if (sum(nchar(sx))>200){ sx<-"name.of.this.object"}
cat("\n")
cat("Type plot(", sx, ") to see a graphical display of the fitted object. \n\n")
cat("Type names(", sx, ") to see an overview of items available. \n\n")
if(inherits(x,"lpc")){
if (x$scaled){
cat("The data have been scaled by dividing through \n")
cat(x$Misc$scaled.by)
cat("\n")
} else {
cat("The data have not been scaled. \n")
}
}
else if(inherits(x,"lpc.spline")){
cat("A cubic spline with ", dim(x$knots.coords[[1]])[2], " knots and total arc length ", diff(range(x$knots.pi[[1]])), " has been laid through the local centers of mass representing the local principal curve. \n")
}
}
print.lpc.spline <-print.lpc
print.ms <-
function(x, digits=max(3,getOption('digits')-3), ...){
sx <- as.character(substitute(x))
if (sum(nchar(sx))>200){ sx<-"name.of.this.object"}
cat("\n")
cat("Type plot(", sx, ") to see a graphical display of the fitted object. \n\n")
cat("Type names(", sx, ") to see an overview of items available. \n\n")
if(inherits(x,"ms")){
if (x$scaled){
cat("The data have been scaled by dividing through \n")
cat(x$scaled.by)
cat("\n")
} else {
cat("The data have not been scaled. \n")
}
}
}
|
d34b10f1c632a5c58790e7bec87f1e3d856c9c17
|
acabe90ad16ea6d151e0fb2b69edd07be012007d
|
/week5/R/Predict_newdata.R
|
ad50148d0f589ae92baceadbf9c69d1d74200ae9
|
[] |
no_license
|
Taros007/ubiqum_projects
|
098e24db610e54bb93f0d363f6d01d9c2feed01e
|
566500b13e50e4e9fef2230299290fe63d7c1cd5
|
refs/heads/master
| 2020-04-28T22:05:51.003768
| 2019-07-16T08:14:20
| 2019-07-16T08:14:20
| 175,605,769
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,567
|
r
|
Predict_newdata.R
|
## Predict new sales volumes - week 5
## Toine - March 2019
## Load libraries =================================
library(tidyverse)
library(caret)
library(e1071)
library(magrittr)
library(doParallel)
library(corrplot)
library(cowplot)
# Prepare clusters =================================
cl <- makeCluster(3)
registerDoParallel(cl)
## Import dataset =================================
newProducts <- readr::read_csv2('./input/newproductChristianProud.csv')
## Preprocessing: cleaning up ==========================
newProducts %<>% select(-X1)
names(newProducts) %<>% make.names(.)
names(newProducts$Age_group) <- "Age"
## Preprocessing: alter datatypes & calculate new variables ===============
newProducts %<>%
mutate(
Product_type = as.factor(Product_type),
Depth = as.numeric(Depth),
Age = as.factor(Age),
Professional = as.factor(Professional),
Review_score = (4 * X4Stars + 3 * X3Stars + 2 * X2Stars + X1Stars) / rowSums(select(newProducts, X4Stars:X1Stars))
)
## Bin Best_seller_rank, and convert NAs to 0 ================
newProducts$Best_seller_rank %<>%
findInterval(c(-Inf, 50, 100, Inf)) %<>%
replace_na(0) %<>% as.factor()
## Dummify data =================================
newDataFrame <- dummyVars(" ~ .", data = newProducts)
newDummy <- data.frame(predict(newDataFrame, newdata = newProducts))
#Load model
rfFit1 <- readRDS('./output/RF.rds')
# Predicting testset ================================
existingDummy$Volume <- predict(rfFit1, newDummy)
#Save predictions
write.csv(surveyIncom, './output/Predicted.csv')
|
bf4e2ecf547be9c1a522998a426d213bf311361b
|
de963a3e0ff79f75ec45fc14638877e1be90b7fc
|
/processing-and-analysis/command-files/datacleaning.R
|
0e020873970f3aec645fa0444f5ce563ffbbebf0
|
[] |
no_license
|
jrlangberkeley/ProjectTIER_R_jrl
|
1ce1f868bd66d1fde6388fefe172ab9d85e2c88d
|
4b7da584e0dd09019f42ad7c19ab5ce9ae49542c
|
refs/heads/master
| 2021-01-11T00:37:26.143136
| 2016-10-10T22:39:09
| 2016-10-10T22:39:09
| 70,533,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 60
|
r
|
datacleaning.R
|
# datacleaning.R - created by Jackie Lang on 10 October 2016
|
ea73fc58468954d42435d05907af523d824bcff7
|
e2f3fee3cb8f1abdee08724f0fe8a89b5756cfbe
|
/COSTdbe/man/cvTripDCF.rd
|
e42d66f460072f84936f2312f8dc000ba54f2aad
|
[] |
no_license
|
BackupTheBerlios/cost-project
|
1a88c928f4d99db583a95324b31d6a02d9bd20c9
|
4ab39d16c48f031ca46512545895cb17e5586139
|
refs/heads/master
| 2021-01-21T12:39:53.387734
| 2012-03-26T14:58:36
| 2012-03-26T14:58:36
| 40,071,425
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,814
|
rd
|
cvTripDCF.rd
|
\name{cvTripDCF}
\alias{cvTripDCF}
\alias{cvTripDCF,csDataCons-method}
\docType{methods}
\title{Method for calculating CVs within trips}
\description{This method calculates CVs of numbers-at-length estimates, total number and total weight estimates within each trip
for each stratum from a \emph{csDataCons} object. It also computes a fourth indicator that is the weighted mean of CVs of number-at-length per stratum,
and finally the number of trips per stratum.
}
\usage{
cvTripDCF(csObject,spp,catchCat="LAN",sampPar=TRUE,\dots)
}
\arguments{
\item{csObject}{A \emph{csDataCons} object.}
\item{spp}{Character. The species for which calculation is to be made.}
\item{catchCat}{Character. Specified catch category. Typically "LAN" or "DIS" or both.}
\item{sampPar}{Logical. Parameter characterizing the sampling process and the way 0-values have to be considered in the calculating process. See \emph{sampledFO} method.}
\item{...}{Further arguments}}
\value{A list with one element per species. Each element is a list with 5 elements : \emph{$nbT} is the number of trips per stratum,
\emph{$D} is the CVs of numbers-at-length, \emph{$N} the CVs of the total numbers, and \emph{$W} the CVs of the total weights (along with the means and the SDs),
and \emph{DCF} is the weighted mean of the CVs of the numbers-at-length, per stratum)
}
\author{Mathieu Merzereaud}
\seealso{\code{\link{sampledFO}}, \code{\link[COSTcore]{csDataCons}}}
\examples{
data(sole)
#stratification
strD <- strIni(timeStrata="quarter",techStrata="commCat")
#only market sampling data and biological parameters are kept
csObject <- csDataCons(csDataVal(subset(sole.cs,sampType\%in\%c("M","V"))),strD)
out <- cvTripDCF(csObject,"Solea solea")
}
\keyword{methods}
|
1114c6055319de7d1c93206158b92b32fb3d5ee6
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.networking/man/apigatewayv2_update_model.Rd
|
96db40de56b1afc6569b5cfd04f13c528231b353
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 863
|
rd
|
apigatewayv2_update_model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apigatewayv2_operations.R
\name{apigatewayv2_update_model}
\alias{apigatewayv2_update_model}
\title{Updates a Model}
\usage{
apigatewayv2_update_model(
ApiId,
ContentType = NULL,
Description = NULL,
ModelId,
Name = NULL,
Schema = NULL
)
}
\arguments{
\item{ApiId}{[required] The API identifier.}
\item{ContentType}{The content-type for the model, for example, "application/json".}
\item{Description}{The description of the model.}
\item{ModelId}{[required] The model ID.}
\item{Name}{The name of the model.}
\item{Schema}{The schema for the model. For application/json models, this should be
JSON schema draft 4 model.}
}
\description{
Updates a Model.
See \url{https://www.paws-r-sdk.com/docs/apigatewayv2_update_model/} for full documentation.
}
\keyword{internal}
|
65b03020ae2e2a16e442020481c1dab11ed646a4
|
9252f704fa87364d875552be3c3ca22f8e965dc2
|
/pkg/RcmdrPlugin.lfstat/tests/testthat/test-wmo-guidelines.r
|
2e89c17f646cc030e4f2dfa4124d94d02d0e6590
|
[] |
no_license
|
mundl/lfstat
|
6acb92363c286c947df348572244ed9857c5f0dc
|
bacf1bc3042f00307a5656b7ccad752857f0755b
|
refs/heads/master
| 2022-10-05T14:19:13.348312
| 2022-10-02T18:00:20
| 2022-10-02T18:00:20
| 39,436,614
| 8
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,573
|
r
|
test-wmo-guidelines.r
|
context("Examples from 'Guidance of WMO Software Tool for Low-flow Analysis")
data("ngaruroro")
ng_seventies <- subset(ngaruroro, hyear %in% 1970:1979)
ng_eighties <- subset(ngaruroro, hyear %in% 1980:1989)
ng_nineties <- subset(ngaruroro, hyear %in% 1990:1999)
# when unit testing plot functions, we just check for the presence of a file
# and delete it immediately
expect_plot <- function(..., file = "Rplots.pdf", delete = TRUE) {
pdf(file = file)
x <- expect_silent(...)
dev.off()
exists <- file.exists("Rplots.pdf")
y <- expect_true(exists)
if (delete & exists) file.remove(file)
return(invisible(x))
}
# chapter 2
test_that("2.5.4 Missing values", {
x <- lfnacheck(ngaruroro)
expect_equal(x[["total"]], 214)
expect_equal(x[["percentage"]], 0.0157145)
expect_equal(names(x), c("total", "percentage", "hydrologicalyear", "duration"))
})
test_that("2.6 Flow indices", {
# mean flow
mf <- meanflow(lfobj = ngaruroro, year = "any", yearly = FALSE)
expect_equal(round(mf, 2), 17.24)
# q95
q95 <- Q95(lfobj = ngaruroro, year = "any", yearly = TRUE)
q95expected <- data.frame(
hyear = 1964:2001,
flow = c(
3.72250, 5.68980, 6.12395, 5.35860, 3.64350, 4.94860, 4.34840, 5.11780, 4.57975,
3.20820, 3.33200, 5.35200, 5.64100, 4.82720, 2.82045, 3.46460, 7.82550, 6.43980,
4.53500, 3.00260, 5.97355, 4.92660, 4.52320, 5.50500, 5.63350, 4.44860, 4.68220,
4.36020, 5.11100, 4.94580, 3.92500, 5.69320, 7.50850, 4.65920, 3.82960, 4.85320,
4.41500, 6.78805
)
)
expect_equal(round(q95, 5), q95expected)
# mean annual minimum
mam <- MAM(lfobj = ngaruroro, n = 7, year = "any", yearly = FALSE)
expect_equal(round(mam, 6), 4.380613)
# base flow index
bfi <- BFI(lfobj = ngaruroro, year = "any", yearly = FALSE)
expect_equal(round(bfi, 7), 0.5499149)
# master recession curve
mrc <- recession(
lfobj = ngaruroro, method = "MRC", seglength = 7,
threshold = 70, peaklevel = 0.95,
thresbreaks = "fixed", plotMRC = FALSE
)
expect_equal(round(mrc, 5), 19.80968)
# seasonality index
si <- seasindex(lfobj = ngaruroro, Q = 95)
siexpected <- list(theta = 1.0888, D = 63.2505, r = 0.8552)
expect_equal(lapply(si, round, 4), siexpected)
# seasonality ratio
sr <- seasratio(lfobj = ngaruroro, breakdays = c("01/06", "01/10"), Q = 95)
expect_equal(unname(round(sr, 4)), 2.0614)
})
test_that("2.7 Graphics", {
# not easy to write test for this chapter, but we can test if the functions
# does not stop with an error
expect_plot(hydrograph(lfobj = ngaruroro, amin = TRUE))
# seasonal barchart
expect_plot(sbplot(ngaruroro))
# base flow plot
expect_plot(bfplot(lfobj = ngaruroro, year = 1970))
# Flow duration curve
expect_plot(fdc(
lfobj = ngaruroro, year = "any", breakdays = c(),
ylog = TRUE, xnorm = FALSE,
colors = TRUE, legend = TRUE, separate = FALSE
))
# streamflow deficit plot
expect_plot(streamdefplot(
lfobj = ngaruroro, year = 1970, threslevel = 70,
thresbreaks = "fixed",
breakdays = c("01/06", "01/10")
))
# double mass curve
sub1 <- subset(ngaruroro, hyear %in% 1985:1989)
sub2 <- subset(ngaruroro, hyear %in% 1990:1995)
expect_plot(dmcurve(x = sub1, y = sub2))
})
test_that("2.8.2 Regional Frequency Analysis", {
# L-moment ratio diagram
expect_plot(rfaplot(
lflist = list(ng_eighties, ng_nineties, ng_seventies),
n = 7
))
rfa <- rfa(
lflist = list(
ng_eighties = ng_eighties,
ng_nineties = ng_nineties,
ng_seventies = ng_seventies
),
n = 7, dist = "wei"
)
# Index values
expect_equal(
round(rfa$index, 6),
c(
"ng_eighties" = 4.611629,
"ng_nineties" = 4.414414,
"ng_seventies" = 3.927957
)
)
# T-year region
expect_equal(
round(lmomRFA::regquant(0.01, rfa), 7),
c("0.01" = 0.5998283)
)
})
test_that("2.8 Extreme value", {
rp <- expect_plot(
expect_warning(tyearsn(
lfobj = ngaruroro, event = 100, n = 7,
dist = c("wei", "gev", "ln3", "gum", "pe3")
))
)
rpexpected <- structure(c(
2.596112, 2.596112, 2.475504,
2.635668, 2.461607
),
.Dim = c(1L, 5L),
.Dimnames = structure(list(
`return period` = "100",
distribution = c("wei", "gevR", "ln3", "gum", "pe3")
),
.Names = c("return period", "distribution")
)
)
expect_equal(round(rp$T_Years_Event, 6), round(rpexpected, 6))
})
test_that("3 Case study", {
# figure 3.3
expect_plot(sbplot(ngaruroro))
# text: seasonality ratio
sr <- seasratio(lfobj = ngaruroro, breakdays = c("01/06", "01/10"), Q = 95)
expect_equal(unname(round(sr, 2)), 2.06)
# figure 3.4
expect_plot(bfplot(lfobj = ngaruroro, year = 1973))
# table 3.1
mrc <- recession(
lfobj = ngaruroro, method = "MRC", seglength = 7,
threshold = 70, peaklevel = 0.95, seasonbreakdays = c(),
thresbreaks = "fixed", thresbreakdays = c("01/06", "01/10"),
plotMRC = FALSE, trimIRS = 0.1
)
expect_equal(round(mrc, 1), 19.8)
irs <- recession(
lfobj = ngaruroro, method = "IRS", seglength = 7,
threshold = 70, peaklevel = 0.95, seasonbreakdays = c(),
thresbreaks = "fixed", thresbreakdays = c("01/06", "01/10"),
plotMRC = FALSE, trimIRS = 0.1
)
expect_equal(round(irs, 1), 20.7)
# figure 3.5
expect_plot(fdc(
lfobj = ngaruroro, year = "any", breakdays = c(),
ylog = TRUE, xnorm = FALSE, colors = TRUE, legend = TRUE,
separate = FALSE
))
# table 3.2
fdc <- expect_plot(
fdc(
lfobj = ngaruroro, year = "any", breakdays = c(), ylog = TRUE,
xnorm = FALSE, colors = TRUE, legend = TRUE, separate = FALSE
)
)
expect_equal(round(fdc[96], 1), 4.4)
expect_equal(round(fdc[91], 1), 5.3)
expect_equal(round(fdc[71], 1), 8.4)
# table 3.3
mam1 <- MAM(lfobj = ngaruroro, n = 1, year = "any", yearly = FALSE)
expect_equal(round(mam1, 1), 4.1)
mam7 <- MAM(lfobj = ngaruroro, n = 7, year = "any", yearly = FALSE)
expect_equal(round(mam7, 1), 4.4)
mam30 <- MAM(lfobj = ngaruroro, n = 30, year = "any", yearly = FALSE)
expect_equal(round(mam30, 1), 5.4)
# table 3.4
expect_warning(deficit_table <- lfstat:::streamdefRcmdr(
lfobj = ngaruroro, pooling = "IC", threslevel = 90, thresbreaks = "fixed",
breakdays = c("01/06", "01/10"), MAdays = , tmin = 5, IClevel = 0.1,
mindur = "5", minvol = "0.5%", table = "all", plot = FALSE
))
deficit_table <- subset(deficit_table, event.no %in% 47:55)
tableexpected <- data.frame(
event.no = c(47, 48, 49, 51, 55),
start = c("1972-11-13", "1972-11-25", "1972-12-17", "1973-01-26", "1973-04-30"),
time = c("1972-11-21", "1972-11-30", "1973-01-09", "1973-04-20", "1973-05-04"),
end = c("1972-11-21", "1972-11-30", "1973-01-09", "1973-04-20", "1973-05-04"),
volume = c(257618.9, 229374.7, 1547786.9, 10016913.6, 283953.6),
duration = c(9, 6, 24, 85, 5),
dbt = c(9, 6, 24, 79, 5),
qmin = c(4.652, 4.713, 3.822, 2.780, 4.234),
tqmin = c("1972-11-19", "1972-11-26", "1973-01-05", "1973-03-03", "1973-05-04")
)
expect_equal(deficit_table$duration, tableexpected$duration)
expect_equal(deficit_table$dbt, tableexpected$dbt)
expect_equal(deficit_table$qmin, tableexpected$qmin)
# figure 3.14 left
expect_plot(expect_warning(ty <- tyearsn(
lfobj = ngaruroro, event = 100, n = 1,
dist = c("wei", "gevR", "ln3", "gum", "pe3")
)))
tyexpected <- structure(c(2.494827, 2.494827, 2.431114, 2.596344, 2.372669),
.Dim = c(1L, 5L),
.Dimnames = structure(list(
`return period` = "100",
distribution = c("wei", "gevR", "ln3", "gum", "pe3")
),
.Names = c("return period", "distribution")
)
)
expect_equal(round(ty$T_Years_Event, 6), round(tyexpected, 6))
# figure 3.14 right
ty_weibull <- tyearsn(
lfobj = ngaruroro, event = 100, n = 1, dist = c("wei"),
plot = FALSE
)
ty_weibullexpected <- structure(2.494827,
.Dim = c(1L, 1L),
.Dimnames = structure(list(
`return period` = "100",
distribution = c("wei")
),
.Names = c("return period", "distribution")
)
)
expect_equal(round(ty_weibull$T_Years_Event, 6), round(ty_weibullexpected, 6))
})
|
68bf274571c47b4e048e126fac905c09311d1447
|
63d97198709f3368d1c6d36739442efa699fe61d
|
/advanced algorithm/round3/k-server-analysis-master/data/tests/case031.rd
|
1299484827e6b761a67634840c7c4062f4133749
|
[] |
no_license
|
tawlas/master_2_school_projects
|
f6138d5ade91e924454b93dd8f4902ca5db6fd3c
|
03ce4847155432053d7883f3b5c2debe9fbe1f5f
|
refs/heads/master
| 2023-04-16T15:25:09.640859
| 2021-04-21T03:11:04
| 2021-04-21T03:11:04
| 360,009,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,914
|
rd
|
case031.rd
|
20
1 [11, 2, 18] 4 4 4 10 10
2 [11, 3, 18] 1 2 6 2 12
3 [11, 2, 18] 1 2 8 2 14
4 [11, 3, 18] 1 2 10 2 16
5 [11, 3, 2] 4 1 11 2 18
6 [16, 3, 2] 5 5 16 10 28
7 [17, 3, 2] 1 2 18 2 30
8 [16, 3, 2] 1 2 20 2 32
9 [17, 3, 2] 1 2 22 2 34
10 [16, 3, 2] 1 2 24 2 36
11 [17, 3, 2] 1 2 26 2 38
12 [16, 3, 2] 1 2 28 2 40
13 [17, 3, 2] 1 2 30 2 42
14 [16, 3, 2] 1 2 32 2 44
15 [17, 3, 2] 1 2 34 2 46
16 [16, 3, 2] 1 2 36 2 48
17 [16, 17, 2] 6 0 36 0 48
18 [16, 17, 9] 7 12 48 12 60
19 [16, 17, 10] 1 2 50 2 62
20 [16, 17, 9] 1 2 52 2 64
21 [16, 17, 10] 1 2 54 2 66
22 [16, 17, 9] 1 2 56 2 68
23 [16, 17, 10] 1 2 58 2 70
24 [16, 17, 9] 1 2 60 2 72
25 [16, 17, 10] 1 2 62 2 74
26 [16, 17, 9] 1 2 64 2 76
27 [16, 17, 10] 1 2 66 2 78
28 [16, 17, 9] 1 2 68 2 80
29 [10, 17, 9] 6 2 70 2 82
30 [10, 3, 9] 6 10 80 10 92
31 [10, 4, 9] 1 2 82 2 94
32 [10, 3, 9] 1 2 84 2 96
33 [10, 4, 9] 1 2 86 2 98
34 [10, 3, 9] 1 2 88 2 100
35 [10, 4, 9] 1 2 90 2 102
36 [10, 3, 9] 1 2 92 2 104
37 [10, 4, 9] 1 2 94 2 106
38 [10, 3, 9] 1 2 96 2 108
39 [10, 4, 9] 1 2 98 2 110
40 [10, 3, 9] 1 2 100 2 112
41 [10, 3, 4] 5 0 100 0 112
42 [16, 3, 4] 6 10 110 10 122
43 [17, 3, 4] 1 2 112 2 124
44 [16, 3, 4] 1 2 114 2 126
45 [17, 3, 4] 1 2 116 2 128
46 [16, 3, 4] 1 2 118 2 130
47 [17, 3, 4] 1 2 120 2 132
48 [16, 3, 4] 1 2 122 2 134
49 [17, 3, 4] 1 2 124 2 136
50 [16, 3, 4] 1 2 126 2 138
51 [17, 3, 4] 1 2 128 2 140
52 [16, 3, 4] 1 2 130 2 142
53 [17, 3, 4] 1 2 132 2 144
54 [16, 3, 4] 1 2 134 2 146
55 [16, 17, 4] 6 0 134 0 146
56 [16, 17, 10] 6 10 144 10 156
57 [16, 17, 11] 1 2 146 2 158
58 [16, 17, 10] 1 2 148 2 160
59 [16, 17, 11] 1 2 150 2 162
60 [16, 17, 10] 1 2 152 2 164
61 [16, 17, 11] 1 2 154 2 166
62 [16, 17, 10] 1 2 156 2 168
63 [16, 17, 11] 1 2 158 2 170
64 [16, 17, 10] 1 2 160 2 172
65 [11, 17, 10] 5 2 162 2 174
66 [11, 3, 10] 6 10 172 10 184
67 [11, 4, 10] 1 2 174 2 186
68 [11, 3, 10] 1 2 176 2 188
69 [11, 4, 10] 1 2 178 2 190
70 [11, 3, 10] 1 2 180 2 192
71 [11, 4, 10] 1 2 182 2 194
72 [11, 3, 10] 1 2 184 2 196
73 [11, 4, 10] 1 2 186 2 198
74 [11, 3, 10] 1 2 188 2 200
75 [11, 4, 10] 1 2 190 2 202
76 [11, 3, 10] 1 2 192 2 204
77 [11, 4, 10] 1 2 194 2 206
78 [11, 3, 10] 1 2 196 2 208
79 [11, 3, 4] 6 0 196 0 208
80 [17, 3, 4] 6 10 206 10 218
81 [18, 3, 4] 1 2 208 2 220
82 [17, 3, 4] 1 2 210 2 222
83 [18, 3, 4] 1 2 212 2 224
84 [17, 3, 4] 1 2 214 2 226
85 [18, 3, 4] 1 2 216 2 228
86 [17, 3, 4] 1 2 218 2 230
87 [18, 3, 4] 1 2 220 2 232
88 [17, 3, 4] 1 2 222 2 234
89 [18, 3, 4] 1 2 224 2 236
90 [17, 3, 4] 1 2 226 2 238
91 [17, 18, 4] 5 0 226 0 238
92 [17, 18, 10] 6 10 236 10 248
93 [17, 18, 11] 1 2 238 2 250
94 [17, 18, 10] 1 2 240 2 252
95 [17, 18, 11] 1 2 242 2 254
96 [17, 18, 10] 1 2 244 2 256
97 [17, 18, 11] 1 2 246 2 258
98 [17, 18, 10] 1 2 248 2 260
99 [17, 18, 11] 1 2 250 2 262
100 [17, 18, 10] 1 2 252 2 264
101 [17, 18, 11] 1 2 254 2 266
102 [17, 18, 10] 1 2 256 2 268
103 [11, 18, 10] 6 2 258 2 270
104 [11, 4, 10] 6 10 268 10 280
105 [11, 5, 10] 1 2 270 2 282
106 [11, 4, 10] 1 2 272 2 284
107 [11, 5, 10] 1 2 274 2 286
108 [11, 4, 10] 1 2 276 2 288
109 [11, 5, 10] 1 2 278 2 290
110 [11, 4, 10] 1 2 280 2 292
111 [11, 5, 10] 1 2 282 2 294
112 [11, 4, 10] 1 2 284 2 296
113 [11, 5, 10] 1 2 286 2 298
114 [11, 4, 10] 1 2 288 2 300
115 [11, 4, 5] 5 0 288 0 300
116 [17, 4, 5] 6 10 298 10 310
117 [18, 4, 5] 1 2 300 2 312
118 [17, 4, 5] 1 2 302 2 314
119 [18, 4, 5] 1 2 304 2 316
120 [17, 4, 5] 1 2 306 2 318
121 [18, 4, 5] 1 2 308 2 320
122 [17, 4, 5] 1 2 310 2 322
123 [18, 4, 5] 1 2 312 2 324
124 [17, 4, 5] 1 2 314 2 326
125 [18, 4, 5] 1 2 316 2 328
126 [17, 4, 5] 1 2 318 2 330
127 [18, 4, 5] 1 2 320 2 332
128 [17, 4, 5] 1 2 322 2 334
129 [17, 18, 5] 6 0 322 0 334
130 [17, 18, 11] 6 10 332 10 344
131 [17, 18, 12] 1 2 334 2 346
132 [17, 18, 11] 1 2 336 2 348
133 [17, 18, 12] 1 2 338 2 350
134 [17, 18, 11] 1 2 340 2 352
135 [17, 18, 12] 1 2 342 2 354
136 [17, 18, 11] 1 2 344 2 356
137 [17, 18, 12] 1 2 346 2 358
138 [17, 18, 11] 1 2 348 2 360
139 [12, 18, 11] 5 2 350 2 362
140 [12, 4, 11] 6 10 360 10 372
141 [12, 5, 11] 1 2 362 2 374
142 [12, 4, 11] 1 2 364 2 376
143 [12, 5, 11] 1 2 366 2 378
144 [12, 4, 11] 1 2 368 2 380
145 [12, 5, 11] 1 2 370 2 382
146 [12, 4, 11] 1 2 372 2 384
147 [12, 5, 11] 1 2 374 2 386
148 [12, 4, 11] 1 2 376 2 388
149 [12, 5, 11] 1 2 378 2 390
150 [12, 4, 11] 1 2 380 2 392
151 [12, 5, 11] 1 2 382 2 394
152 [12, 4, 11] 1 2 384 2 396
153 [12, 4, 5] 6 0 384 0 396
154 [18, 4, 5] 6 10 394 10 406
155 [19, 4, 5] 1 2 396 2 408
156 [18, 4, 5] 1 2 398 2 410
157 [19, 4, 5] 1 2 400 2 412
158 [18, 4, 5] 1 2 402 2 414
159 [19, 4, 5] 1 2 404 2 416
160 [18, 4, 5] 1 2 406 2 418
161 [19, 4, 5] 1 2 408 2 420
162 [18, 4, 5] 1 2 410 2 422
163 [19, 4, 5] 1 2 412 2 424
164 [18, 4, 5] 1 2 414 2 426
165 [18, 19, 5] 5 0 414 0 426
166 [18, 19, 11] 6 10 424 10 436
167 [18, 19, 12] 1 2 426 2 438
168 [18, 19, 11] 1 2 428 2 440
169 [18, 19, 12] 1 2 430 2 442
170 [18, 19, 11] 1 2 432 2 444
171 [18, 19, 12] 1 2 434 2 446
172 [18, 19, 11] 1 2 436 2 448
173 [18, 19, 12] 1 2 438 2 450
174 [18, 19, 11] 1 2 440 2 452
175 [18, 19, 12] 1 2 442 2 454
176 [18, 19, 11] 1 2 444 2 456
177 [12, 19, 11] 6 2 446 2 458
178 [12, 5, 11] 6 10 456 10 468
179 [12, 6, 11] 1 2 458 2 470
180 [12, 5, 11] 1 2 460 2 472
181 [12, 6, 11] 1 2 462 2 474
182 [12, 5, 11] 1 2 464 2 476
183 [12, 6, 11] 1 2 466 2 478
184 [12, 5, 11] 1 2 468 2 480
185 [12, 6, 11] 1 2 470 2 482
186 [12, 5, 11] 1 2 472 2 484
187 [12, 6, 11] 1 2 474 2 486
188 [12, 5, 11] 1 2 476 2 488
189 [12, 5, 6] 5 0 476 0 488
190 [18, 5, 6] 6 10 486 10 498
191 [19, 5, 6] 1 2 488 2 500
192 [18, 5, 6] 1 2 490 2 502
193 [19, 5, 6] 1 2 492 2 504
194 [18, 5, 6] 1 2 494 2 506
195 [19, 5, 6] 1 2 496 2 508
196 [18, 5, 6] 1 2 498 2 510
197 [19, 5, 6] 1 2 500 2 512
198 [18, 5, 6] 1 2 502 2 514
199 [19, 5, 6] 1 2 504 2 516
200 [18, 5, 6] 1 2 506 2 518
201 [19, 5, 6] 1 2 508 2 520
202 [18, 5, 6] 1 2 510 2 522
203 [18, 19, 6] 6 0 510 0 522
510 522 440
|
f66c8df830d2b41424467a722e78929e8495740f
|
3eba0e3a61d31ea6e2ce3f172f7e62395027f3bb
|
/Code/2-4 Packages.R
|
77164da35f900d1be6a78476b97c69bdae7e2a71
|
[] |
no_license
|
Shubhi21a/R_Basic
|
36dc6cd59ef3199f5fa89fdcdf421a083fab5ba8
|
11edb34099ee834d094616e42d14c20a633696e8
|
refs/heads/master
| 2021-11-27T02:52:47.360157
| 2021-08-19T08:08:14
| 2021-08-19T08:08:14
| 144,050,798
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 802
|
r
|
2-4 Packages.R
|
# CRAN: Comprehensive R Archive Network
# install.packages("ggplot2") - this is how we install via console - " " required
# otherwise, go to packages tab on bottom right quadrant
# click install, type name of package
# the ones with blue check (in checkbox) are currently activated/in use (sorta)
# can check/uncheck as per need
# if a package is not activated:
# Typing ?functionName() from packkage, gives an error
# to activate package:
library(ggplot2) # here " " is not necessary
# library just a function we use to activate package
# Library does not equal a package
# just a teaser
qplot(data=diamonds,carat,price,colour=clarity,facets=.~clarity)
# click on viewer tab to see the viz
# .~clarity means thaat we want each type of clarity to be plotted
# and it should be plotted side by side
|
25f8d1c753500e31368083da6909d7e6c990c0e4
|
1cd3ba9cf667a869a4bccbc3f71ede4dc2a38644
|
/Final Project/code/VaR_1M_sim_func.R
|
1b9290a30fe44a3087f16cb4282e0bf1d769db94
|
[] |
no_license
|
Richard19leigh93/Financial-Econometrics
|
6f8ab966aad8d47ec6e693bb2fffc5479f9936ae
|
d329940dc70e125f88da7a10fdffd57d075b24ae
|
refs/heads/master
| 2020-04-02T11:14:52.447078
| 2019-02-06T20:51:26
| 2019-02-06T20:51:26
| 147,548,816
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 218
|
r
|
VaR_1M_sim_func.R
|
# VaR for simulations (1 Month from 2018-09-14 to 2018-10-15)
VaR_1M_sim_func <- function(x){x %>% xts_tbl() %>%
gather(Sim, Return, -date) %>%
tbl_xts() %>%
VaR(p = 0.95, method = "modified")
}
|
dac7c83182ee2f0852aba2b970160beb9948c6b9
|
de80ee6aa309cd8d497e29e0f75883c2a388b4b8
|
/man/update_rating.Rd
|
7ba606213076ff09cef0776a4aebacbead9b49a1
|
[
"MIT"
] |
permissive
|
Appsilon/shiny.semantic
|
38b359d639b8a4aee4fcbd3dab421cf15d33b26d
|
4b00b7c294012783e8fe80547f9ace9abf1d2214
|
refs/heads/develop
| 2023-08-15T05:32:08.153175
| 2023-05-04T15:58:05
| 2023-05-04T15:58:05
| 75,114,262
| 514
| 106
|
NOASSERTION
| 2023-05-05T11:05:56
| 2016-11-29T19:24:30
|
CSS
|
UTF-8
|
R
| false
| true
| 1,051
|
rd
|
update_rating.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rating.R
\name{update_rating_input}
\alias{update_rating_input}
\title{Update rating}
\usage{
update_rating_input(session, input_id, label = NULL, value = NULL)
}
\arguments{
\item{session}{shiny object with session info}
\item{input_id}{rating input name}
\item{label}{character with updated label}
\item{value}{new rating value}
}
\description{
Change the value of a rating input on the client. Check
\code{rating_input} to learn more.
}
\examples{
## Only run examples in interactive R sessions
if (interactive()) {
library(shiny)
library(shiny.semantic)
ui <- shinyUI(
semanticPage(
rating_input("rate", "How do you like it?", max = 5,
icon = "heart", color = "yellow"),
numeric_input("numeric_in", "", 0, min = 0, max = 5)
)
)
server <- function(session, input, output) {
observeEvent(input$numeric_in, {
x <- input$numeric_in
update_rating_input(session, "rate", value = x)
}
)
}
shinyApp(ui = ui, server = server)
}
}
|
4bc627b03639b56cb25a556f26ca9642e766687a
|
542422ef12c2ebf036f605f4e36faf2839dad33e
|
/tests/testthat.R
|
de95aed16f1ae8bb241ef9102952c205261648fe
|
[] |
no_license
|
mvattulainen/metaheur
|
95ef144bc4504029e3aec57597a2692dc0f648f9
|
49aaf74b98136d7b9b0b265b9b9a61807a4b8d31
|
refs/heads/master
| 2021-01-19T03:59:30.619802
| 2016-06-30T04:38:35
| 2016-06-30T04:38:35
| 48,600,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 60
|
r
|
testthat.R
|
library(testthat)
library(metaheur)
test_check("metaheur")
|
f345a6a02dcdfdc1a70a09551fc7cc824b4a4dff
|
22e4ae4ce6921d1c765900c6365db9f228c046ce
|
/man/getlasttrades.Rd
|
add7df96db96dbf40461ea6038dc2dcb2ba5a273
|
[] |
no_license
|
jfontestad/rderibit
|
2ceb6908bab982157650d418c73b0377d482ad60
|
17e044dcefeb83fc059edd79eb7470cc326b7950
|
refs/heads/master
| 2022-01-19T04:36:47.670529
| 2018-11-30T07:44:59
| 2018-11-30T07:44:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,402
|
rd
|
getlasttrades.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deribit_api.R
\name{getlasttrades}
\alias{getlasttrades}
\title{Retrieve the latest trades that have occured for a specific instrument.}
\usage{
getlasttrades(instrument, count = NULL, startTimestamp = NULL,
endTimestamp = NULL, startId = NULL, endId = NULL, startSeq = NULL,
endSeq = NULL, simplify = TRUE)
}
\arguments{
\item{instrument}{string [optional], instrument name, example "BTC-28SEP18-3500-P" (or other items in rderibit::getinstruments()). Can as well be "all", "options", "futures", "BTC-THISWEEK", "BTC-NEXTWEEK"}
\item{count}{– integer [optional], count of trades returned (limitation: max. count is 100000, default 100)}
\item{startTimestamp}{timestamp of starting trade in milliseconds}
\item{endTimestamp}{timestamp of ending trade in milliseconds}
\item{startId}{– integer [optional], “since” tradeId, the server returns trades newer than that “since”.}
\item{endId}{- integer [optional], the server returns trades with tradeId smaller than the provided number.}
\item{startSeq}{relates to tradeSeq parameter}
\item{endSeq}{relates to tradeSeq parameter}
}
\description{
Retrieve the latest trades that have occured for a specific instrument.
}
\examples{
getlasttrades(instrument = "BTC-28SEP18-3500-P", startTimestamp = as.numeric(as.POSIXlt("2018-08-05 18:00:00 UTC"))*1000)
}
|
4b4f20d69948e8acbaacbfbbc14bacce7b981b42
|
f25f0ce112516575e7129ae0d370ce4a3031cdf9
|
/man/n2.Rd
|
cc45b2b3a446c047d049b634331f33c8fd59b544
|
[] |
no_license
|
MatheMax/OptReSample
|
83fb7dfc57afafbfa3f4d4939caca7d9c35ce87f
|
8a3856ddb31d01b9f5daae64f58ccd12c3811416
|
refs/heads/master
| 2021-08-06T05:24:51.263279
| 2018-07-18T20:16:54
| 2018-07-18T20:16:54
| 130,708,697
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 776
|
rd
|
n2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/n2.R
\name{n2}
\alias{n2}
\title{Compute n_2 function}
\usage{
n2(parameters, z, n1, lambda1, lambda2, cf, ce)
}
\description{
Computes the optimal n_2-value for a given first stage z_1-value in the Lagrangian framework.
}
\details{
\code{n2} differs from \link{response} only so far that the values are set to 0 outside the interval (cf,ce).
@param parameters Parameters specifying the design
@param z Z_1-value of the first stage
@param n1 First stage sample size
@param lambda1 Penalization parameter for type I error
@param lambda2 Penalization parameter for type II error
@param cf Boundary for stopping for futility
@param ce Boundary for stopping for efficacy
}
|
a31c30e06ebf92a16cbd0518ad996d73ad26cb4b
|
06598a777d099b1a65873ae4508c7ebcab48a9a8
|
/RealData/SummarizeResults.R
|
fa7a91da950f3408b0f346948a5108e6ad154745
|
[] |
no_license
|
DavidKLim/FSCseqPaper
|
0668d3e4193d0a3fb91f28d1e5a70154f410f678
|
85c895bb610ef8e0b912ac627a4ff5b43c4d78a7
|
refs/heads/master
| 2021-12-23T10:03:42.549544
| 2021-11-15T21:03:29
| 2021-11-15T21:03:29
| 228,465,751
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,302
|
r
|
SummarizeResults.R
|
# Replicate Tables/Figures pertaining to BRCA_pure and BRCA_full from FSCseq paper
setwd("./RealData")
tabulateResults = function(dataset=c("BRCA_pure","BRCA_full"),med_filt=500,MAD_filt=50){
library(fpc)
res_dir=sprintf("./%s_med%d_MAD%d",dataset,med_filt,MAD_filt)
FSC.file_F=sprintf("%s/FSC_covarsF_summary.out",res_dir)
FSC.file_T=sprintf("%s/FSC_covarsT_summary.out",res_dir)
FSC2.file_F=sprintf("%s/FSC2_covarsF_summary.out",res_dir) ###
FSC2.file_T=sprintf("%s/FSC2_covarsT_summary.out",res_dir) ###
iCl.file=sprintf("%s/iCl_summary.out",res_dir)
HC.file=sprintf("%s/HC_summary.out",res_dir)
KM.file=sprintf("%s/KM_summary.out",res_dir)
NMF.file=sprintf("%s/NMF_summary.out",res_dir)
NBMB.file=sprintf("%s/NBMB_summary.out",res_dir)
lMC.file=sprintf("%s/lMC_summary.out",res_dir)
vMC.file=sprintf("%s/vMC_summary.out",res_dir)
rMC.file=sprintf("%s/rMC_summary.out",res_dir)
load(FSC.file_F); resF=res; load(FSC.file_T); resT=res
load(FSC2.file_F); resF2=res; load(FSC2.file_T); resT2=res ###
load(iCl.file);load(HC.file);load(KM.file);load(NBMB.file);load(NMF.file);load(lMC.file);load(vMC.file);load(rMC.file)
load(sprintf("%s_env.RData",dataset))
cls=as.numeric(as.factor(cls))
d = as.dist(1-cor(norm_y, method="spearman"))
anno_summary=list(cls=cls,K=length(unique(cls)))
FSC_summary=list(cls=resF$clusters,K=length(unique(resF$clusters)))
FSCadj_summary=list(cls=resT$clusters,K=length(unique(resT$clusters)))
FSC10_summary=list(cls=resF2$clusters,K=length(unique(resF2$clusters))) ###
FSCadj10_summary=list(cls=resT2$clusters,K=length(unique(resT2$clusters))) ###
# summaries = c("anno_summary","FSC_summary","FSCadj_summary","iCl_summary",
# "HC_summary","KM_summary","NBMB_summary","NMF_summary","lMC_summary","vMC_summary","rMC_summary")
summaries = c("anno_summary","FSC_summary","FSC10_summary","FSCadj_summary","FSCadj10_summary","iCl_summary",
"HC_summary","KM_summary","NBMB_summary","NMF_summary","lMC_summary","vMC_summary","rMC_summary") ###
metrics=c("K","RI","NID","NVI","NMI","ARI")
#####RI(up),NID(down),NVI(down),NMI(up),ARI(up),ASW(up),VI(down),PG(up),dunn(up),dunn2(up),entropy(down),ch(up),wb.ratio(down),sindex(up)
library(aricode)
fill.row=function(d,summary){
metrics=c("K","RI","NID","NVI","NMI","ARI")
summary_obj = eval(parse(text=summary))
res = cluster.stats(d,summary_obj$cls,cls)
res2 = clustComp(summary_obj$cls,cls)
res_vec = c(res$cluster.number,res2$RI,res2$NID,res2$NVI,res2$NMI,res2$ARI)
names(res_vec) = metrics
return(res_vec)
}
df=data.frame(matrix(nrow=length(summaries),ncol=length(metrics)))
colnames(df)=metrics
rownames(df)=sapply(strsplit(summaries,"_"),function(x) x[1])
for(i in 1:length(summaries)){
df[i,] = fill.row(d,summaries[i])
}
if(dataset == "BRCA_full"){
resI=cluster.stats(d,as.numeric(as.factor(anno$cls_sigCL_intrinsic)),cls)
resI2=clustComp(as.numeric(as.factor(anno$cls_sigCL_intrinsic)),cls)
resU=cluster.stats(d,as.numeric(as.factor(anno$cls_sigCL_unsupervised)),cls)
resU2=clustComp(as.numeric(as.factor(anno$cls_sigCL_unsupervised)),cls)
res_vecI = c(resI$cluster.number,resI2$RI,resI2$NID,resI2$NVI,resI2$NMI,resI2$ARI,resI$avg.silwidth,resI$vi,resI$pearsongamma,resI$dunn,resI$dunn2,
resI$entropy,resI$ch,resI$wb.ratio,resI$sindex)
res_vecU = c(resU$cluster.number,resU2$RI,resU2$NID,resU2$NVI,resU2$NMI,resU2$ARI,resU$avg.silwidth,resU$vi,resU$pearsongamma,resU$dunn,resU$dunn2,
resU$entropy,resU$ch,resU$wb.ratio,resU$sindex)
names(res_vecI) = metrics
names(res_vecU) = metrics
df=rbind(df,res_vecI,res_vecU)
rownames(df)[c(nrow(df)-1,nrow(df))]=c("Intrinsic","Unsupervised")
}
return(df)
}
#######################
## BRCA_pure results ##
#######################
# Table 3: Main BRCA_pure results
dir_name="BRCA_pure_med500_MAD50"
tab3=tabulateResults(dataset="BRCA_pure",med_filt=500,MAD_filt=50)
save(tab3,file="Results/Tab3.out")
####################################################################################################################
# Figure 3: BRCA_pure PAM50 heatmap
#read in results and data
load(sprintf("%s/FSC_covarsF_summary.out",dir_name)); resF=res
load(sprintf("%s/FSC_covarsT_summary.out",dir_name)); resT=res
load(sprintf("%s/FSC2_covarsT_summary.out",dir_name)); resT2=res
load("BRCA_pure_env.RData"); load("./TCGA_BRCA/BRCA_raw_normalizations.RData") # raw_norm_y plotted later to show all PAM50 genes
#filter raw_norm_y according to same criteria
idx=rowMeds>=500 & mads>=quantile(mads,50/100) # gene pre-filtering criteria
colnames(raw_norm_y)=raw_anno$barcode; rownames(raw_norm_y)=rownames(raw_cts) # make sure column/row names are set
idy = colnames(raw_norm_y) %in% colnames(cts); raw_norm_y=raw_norm_y[,idy] # match according purity filtered samples
colnames(norm_y)=colnames(cts)
#row annotations showing exclusions due to pre-filtering/FSCseq FS process
genelist = rep(NA,nrow(cts))
genelist_all = rep(NA,nrow(raw_cts))
for(i in 1:nrow(cts)){ genelist[i] = unlist(strsplit(rownames(cts)[i],"|",fixed=TRUE))[1] }
for(i in 1:nrow(raw_cts)){ genelist_all[i] = unlist(strsplit(rownames(raw_cts)[i],"|",fixed=TRUE))[1] }
pam50 = c("UBE2T","BIRC5","NUF2","CDC6","CCNB1","TYMS","MYBL2","CEP55","MELK","NDC80",
"RRM2","UBE2C","CENPF","PTTG1","EXO1","ORC6L","ANLN","CCNE1","CDC20","MKI67",
"KIF2C","ACTR3B","MYC","EGFR","KRT5","PHGDH","CDH3","MIA","KRT17","FOXC1",
"SFRP1","KRT14","ESR1","SLC39A6","BAG1","MAPT","PGR","CXXC5","MLPH","BCL2",
"MDM2","NAT1","FOXA1","BLVRA","MMP11","GPR160","FGFR4","GRB7","TMEM45B","ERBB2")
genelist2=genelist[idx]
PF_incl = as.factor(pam50 %in% genelist[idx]) # 41/50 passed pre-filtering
resF_incl = as.factor(pam50 %in% genelist2[resF$discriminatory])
resT_incl = as.factor(pam50 %in% genelist2[resT$discriminatory])
resT2_incl = as.factor(pam50 %in% genelist2[resT2$discriminatory])
annotation_row = data.frame(PF_incl,
resF_incl, resT_incl, resT2_incl) # passed low count filtering (full), passed MAD pre-filtering, selected disc (disc)
colnames(annotation_row)=c("Passed PF","Disc (FSC)", "Disc (FSCadj)", "Disc (FSC10adj)")#expression(paste("Disc (",FSCadj^"10",")")))
rownames(annotation_row)=pam50
mycolors_PF=c("#FFFFFF","#000000")
mycolors_resF=c("#FFFFFF","#000000")
mycolors_resT=c("#FFFFFF","#000000")
mycolors_resT2=c("#FFFFFF","#000000")
names(mycolors_PF)=levels(PF_incl)
names(mycolors_resF)=levels(resF_incl)
names(mycolors_resT)=levels(resT_incl)
names(mycolors_resT2)=levels(resT2_incl)
mycolors2 = list("Passed PF"=mycolors_PF,
"Disc (FSC)"=mycolors_resF,
"Disc (FSCadj)"=mycolors_resT,
"Disc (FSC10adj)"=mycolors_resT2
#expression(paste("Disc (",FSCadj^"10",")"))=mycolors_resT2
)
# annotation col
# competing performers: KM, lMC
load("~/Research/P1/Real_Data/BRCA_pure3_2_med500_MAD50/KM_summary.out")
load("~/Research/P1/Real_Data/BRCA_pure3_2_med500_MAD50/lMC_summary.out")
annotation_col = data.frame(factor(lMC_summary$cls),
factor(KM_summary$cls),
factor(resF$clusters),
factor(resT$clusters),
factor(resT2$clusters),
factor(cls))
colnames(annotation_col)=c("lMC","KM","FSC","FSCadj","FSC10adj","Annotated")
rownames(annotation_col)=colnames(cts)
# order colors manually for highest consistency in coloring
newCols <- colorRampPalette(grDevices::rainbow(5))
mycolors_anno=newCols(3)
mycolors_FSC=newCols(5)[c(2,1,5,4,3)]
mycolors_FSCadj=newCols(3)[c(3,2,1)]
mycolors_FSC10adj=newCols(3)[c(3,2,1)]
mycolors_lMC=newCols(4)[c(2,1,4,3)]
mycolors_KM=newCols(2)[c(2,1)]
names(mycolors_anno)=unique(cls)[order(unique(cls))]
names(mycolors_FSC)=unique(resF$clusters)[order(unique(resF$clusters))]
names(mycolors_FSCadj)=unique(resT$clusters)[order(unique(resT$clusters))]
names(mycolors_FSC10adj)=unique(resT2$clusters)[order(unique(resT2$clusters))]
names(mycolors_lMC)=unique(lMC_summary$cls)[order(unique(lMC_summary$cls))]
names(mycolors_KM)=unique(KM_summary$cls)[order(unique(KM_summary$cls))]
mycolors1 = list(lMC=mycolors_lMC,KM=mycolors_KM,FSC=mycolors_FSC,FSCadj=mycolors_FSCadj,FSC10adj=mycolors_FSC10adj,Annotated=mycolors_anno)
# colors/breaks #
showpanel <- function(Colors){image(matrix(1:length(Colors), ncol=1), col=Colors, xaxt="n", yaxt="n" )}
oldpar <- par(mfrow=c(1,1))
my_cols<-colorRampPalette( c("yellow", "black", "cyan"), space="rgb")(100)
myBreaks <- seq(-2, 2, length.out=101)
dev.off()
# plot pheatmap PAM50 #
library(pheatmap)
rownames(raw_norm_y) = genelist_all
# png("Results/Fig3.png",height=850,width=800,res=300,type="cairo")
pheatmap(log(raw_norm_y[genelist_all %in% pam50,order(cls,resT$clusters,resF$clusters,lMC_summary$cls)]+0.1),scale="row",cluster_cols=F,
annotation_col=annotation_col,annotation_row=annotation_row,annotation_colors=c(mycolors1,mycolors2), color=my_cols,show_colnames=F,
breaks=myBreaks,main="TCGA BRCA High Purity Samples, PAM50 Genes with Annotated Exclusions from Pre-filtering and Analyses",
height=12,width=11,filename="Results/Fig3.png")
# dev.off()
# for prelim presentation
# draw_colnames_45 <- function (coln, gaps, ...) {
# coord <- pheatmap:::find_coordinates(length(coln), gaps)
# x <- coord$coord - 0.5 * coord$size
# res <- grid::textGrob(
# coln, x = x, y = unit(1, "npc") - unit(3,"bigpts"),
# vjust = 0.75, hjust = 1, rot = 45, gp = grid::gpar(...)
# )
# return(res)
# }
# assignInNamespace(
# x = "draw_colnames",
# value = "draw_colnames_45",
# ns = asNamespace("pheatmap")
# )
# png("C:/Users/limdd/Dropbox/Dissertation/Presentation/P1_PAM50.png",height=700,width=900)
# pheatmap(t(log(raw_norm_y[genelist_all %in% pam50,order(cls,resT$clusters,resF$clusters)]+0.1)),scale="column",cluster_rows=F,
# annotation_row=annotation_col,annotation_col=annotation_row,annotation_colors=c(mycolors2,mycolors1), color=my_cols,show_rownames=F,
# breaks=myBreaks,main="TCGA BRCA High Purity Samples, PAM50 Genes with Annotated Exclusions from Pre-filtering and Analyses",)
# dev.off()
####################################################################################################################
# Supp Figure 1: BRCA_pure all cluster-disc genes heatmap
load("./BRCA_pure_med500_MAD50/HC_summary.out")
load("./BRCA_pure_med500_MAD50/vMC_summary.out")
load("./BRCA_pure_med500_MAD50/rMC_summary.out")
load("./BRCA_pure_med500_MAD50/NBMB_summary.out")
load("./BRCA_pure_med500_MAD50/NMF_summary.out")
load("./BRCA_pure_med500_MAD50/iCl_summary.out")
annotation_col = data.frame(factor(rMC_summary$cls),
factor(vMC_summary$cls),
factor(lMC_summary$cls),
factor(NBMB_summary$cls),
factor(NMF_summary$cls),
factor(KM_summary$cls),
factor(HC_summary$cls),
factor(iCl_summary$cls),
factor(resF$clusters),
factor(resT$clusters),
factor(resT2$clusters),
factor(cls))
colnames(annotation_col)=c("rMC","vMC","lMC","NBMB","NMF","KM","HC","iCl","FSC","FSCadj","FSC10adj","Annotated")
rownames(annotation_col)=colnames(cts)
newCols <- colorRampPalette(grDevices::rainbow(8))
mycolors_anno=newCols(3)
mycolors_FSC=newCols(5)[c(2,1,5,4,3)]
mycolors_FSCadj=newCols(3)[c(2,3,1)]
mycolors_FSC10adj=newCols(3)[c(2,3,1)]
mycolors_lMC=newCols(4)[c(4,1,2,3)]
mycolors_rMC=newCols(4)[c(4,1,2,3)]
mycolors_vMC=newCols(4)[c(4,1,2,3)]
mycolors_KM=newCols(3)[c(2,1)]
mycolors_HC=newCols(3)[c(2,1)]
mycolors_iCl=newCols(8)[c(1:8)]
mycolors_NBMB=newCols(4)[c(1,2,4,3)]
mycolors_NMF=newCols(3)[c(2,1)]
names(mycolors_anno)=unique(cls)[order(unique(cls))]
names(mycolors_FSC)=unique(resF$clusters)[order(unique(resF$clusters))]
names(mycolors_FSCadj)=unique(resT$clusters)[order(unique(resT$clusters))]
names(mycolors_FSC10adj)=unique(resT2$clusters)[order(unique(resT2$clusters))]
names(mycolors_lMC)=unique(lMC_summary$cls)[order(unique(lMC_summary$cls))]
names(mycolors_vMC)=unique(vMC_summary$cls)[order(unique(vMC_summary$cls))]
names(mycolors_rMC)=unique(rMC_summary$cls)[order(unique(rMC_summary$cls))]
names(mycolors_KM)=unique(KM_summary$cls)[order(unique(KM_summary$cls))]
names(mycolors_HC)=unique(HC_summary$cls)[order(unique(HC_summary$cls))]
names(mycolors_iCl)=unique(iCl_summary$cls)[order(unique(iCl_summary$cls))]
names(mycolors_NBMB)=unique(NBMB_summary$cls)[order(unique(NBMB_summary$cls))]
names(mycolors_NMF)=unique(NMF_summary$cls)[order(unique(NMF_summary$cls))]
mycolors1 = list(rMC=mycolors_rMC,vMC=mycolors_vMC,lMC=mycolors_lMC,
NBMB=mycolors_NBMB,NMF=mycolors_NMF,KM=mycolors_KM,HC=mycolors_HC,
iCl=mycolors_iCl,FSC=mycolors_FSC,FSCadj=mycolors_FSCadj,FSC10adj=mycolors_FSC10adj,Annotated=mycolors_anno)
# plot pheatmap all disc genes from FSCadj #
# png("Results/SFig1.png",height=1000,width=800,res=300,type="cairo")
pheatmap(log(norm_y[idx,order(cls,resT$clusters,resF$clusters,lMC_summary$cls,vMC_summary$cls,rMC_summary$cls)][resT$discriminatory,]+0.1),scale="row",cluster_cols=F,
annotation_col=annotation_col,annotation_colors=mycolors1, color=my_cols,show_colnames=F,show_rownames=F,
breaks=myBreaks,main="TCGA BRCA High Purity Samples, Cluster-discriminatory Genes",height=14,width=11,filename="Results/SFig1.png")
# dev.off()
####################################################################################################################
# Supp Figure 2: TCGA Gene Ontology Analysis
library(TCGAbiolinks)
# Enrichment Analysis EA
# Gene Ontology (GO) and Pathway enrichment by DEGs list
Genelist <- genelist2[resF$discriminatory]
ansEA <- TCGAanalyze_EAcomplete(TFname="BRCA Disc genes found by FSC",Genelist)
TCGAvisualize_EAbarplot(tf = rownames(ansEA$ResBP),
GOBPTab = ansEA$ResBP,
GOCCTab = ansEA$ResCC,
GOMFTab = ansEA$ResMF,
PathTab = ansEA$ResPat,
nRGTab = Genelist,
nBar = 10,
filename = "./Results/SFig2_FSC.pdf"
)
Genelist <- genelist2[resT$discriminatory]
ansEA <- TCGAanalyze_EAcomplete(TFname="BRCA Disc genes found by FSCadj",Genelist)
TCGAvisualize_EAbarplot(tf = rownames(ansEA$ResBP),
GOBPTab = ansEA$ResBP,
GOCCTab = ansEA$ResCC,
GOMFTab = ansEA$ResMF,
PathTab = ansEA$ResPat,
nRGTab = Genelist,
nBar = 10,
filename = "./Results/SFig2_FSCadj.pdf"
)
####################################################################################################################
# Supp Figure 3: basalUP and basalDOWN, gene clustering --> GSEA Analysis
load("BRCA_pure_env.RData")
# cluster genes --> find genes that are upregulated/downregulated for basal subtype
library(MBCluster.Seq)
set.seed(999)
idx=rowMeds>=500 & mads>=quantile(mads,0.5)
mydata=RNASeq.Data(Count=norm_y[idx,][resT$discriminatory,],Normalize=NULL,Treatment=cls,GeneID=genelist[idx][resT$discriminatory])
c0=KmeansPlus.RNASeq(data=mydata,nK=10)$centers
fit=Cluster.RNASeq(data=mydata,centers=c0,model="nbinom",method="EM")
# manually determine which gene clusters are up/down regulated in basal subtype
pheatmap(log(norm_y[idx,][resT$discriminatory,][fit$cluster==6,order(cls)]+0.1),scale="row",cluster_cols=F,
annotation_col=annotation_col,annotation_colors=mycolors1, color=my_cols,show_colnames=F,
breaks=myBreaks,main="TCGA BRCA High Purity Samples, PAM50 Genes with Annotated Exclusions from Pre-filtering and Analyses")
basalUP_cls=c(5,6,10)
basalDOWN_cls=c(2,3,4,7,8)
lumADOWN_cls=c(1,9)
disc_genelistT=genelist2[resT$discriminatory]
disc_genelistF=genelist2[resF$discriminatory]
basalUP=disc_genelistT[fit$cluster%in% basalUP_cls]
basalDOWN=disc_genelistT[fit$cluster%in% basalDOWN_cls]
# load known gene sets to compare to
library(msigdbr)
m_df=msigdbr::msigdbr(species="Homo sapiens")
m_list = m_df %>% split(x = .$gene_symbol, f = .$gs_name)
# Calculate p-values of overlap with each of the gene sets
library(GeneOverlap)
pvals_up=rep(NA,length(m_list))
pvals_down=rep(NA,length(m_list))
for(i in 1:length(m_list)){
fitUP=newGeneOverlap(m_list[[i]],basalUP)
fitUP=testGeneOverlap(fitUP)
pvals_up[i]=fitUP@pval
fitDOWN=newGeneOverlap(m_list[[i]],basalDOWN)
fitDOWN=testGeneOverlap(fitDOWN)
pvals_down[i]=fitDOWN@pval
}
pvals_up=p.adjust(pvals_up, method = "bonferroni", n = length(pvals_up))
pvals_down=p.adjust(pvals_down, method = "bonferroni", n = length(pvals_down))
head(names(m_list)[order(pvals_up)],n=10)
head(names(m_list)[order(pvals_down)],n=10)
# Plot top overlapping gene sets
library(ggplot2)
library(gridExtra)
bUP_df=data.frame(cbind(names(m_list)[order(pvals_up)],
as.numeric(-log(pvals_up[order(pvals_up)]))))
names(bUP_df)=c("Pathways","pval")
bUP_df2=bUP_df[1:10,]
bUP_df2$Pathways=factor(bUP_df2$Pathways,levels=bUP_df2$Pathways[10:1])
bUP_df2$pval=as.numeric(as.character(bUP_df2$pval))
p1=ggplot(bUP_df2, aes(x=Pathways, y=pval)) +
geom_bar(stat = "identity") + ylab("-log(p-value)") + xlab("Pathways") +
coord_flip()+ggtitle("basalUP")
# p1
bDOWN_df=data.frame(cbind(names(m_list)[order(pvals_down)],
as.numeric(-log(pvals_down[order(pvals_down)]))))
names(bDOWN_df)=c("Pathways","pval")
bDOWN_df2=bDOWN_df[1:10,]
bDOWN_df2$Pathways=factor(bDOWN_df2$Pathways,levels=bDOWN_df2$Pathways[10:1])
bDOWN_df2$pval=as.numeric(as.character(bDOWN_df2$pval))
p2=ggplot(bDOWN_df2, aes(x=Pathways, y=pval)) +
geom_bar(stat = "identity") + ylab("-log(p-value)") + xlab("Pathways") +
coord_flip()+ggtitle("basalDOWN")
# p2
# png("./Results/SFig3.png",width=900,height=900,res=300,type="cairo")
# grid.arrange(p1,p2)
# dev.off()
p = arrangeGrob(p1,p2,nrow=2)
ggsave(file="./Results/SFig3.png",width=10,height=10, p)
####################################################################################################################
#######################
## BRCA_full results ##
#######################
dir_name="BRCA_full_med500_MAD50"
# Supp Table 8: BRCA_full results table
stab8=tabulateResults(dataset="BRCA_full",med_filt=500,MAD_filt=50)
save(stab8,file="Results/STab8.out")
####################################################################################################################
# Supp Figure 4: BRCA_full all cluster-disc genes heatmap
load("./BRCA_full_med500_MAD50/FSC_covarsF_summary.out"); resF=res
load("./BRCA_full_med500_MAD50/FSC2_covarsF_summary.out"); resF2=res
load("./BRCA_full_med500_MAD50/FSC_covarsT_summary.out"); resT=res
load("./BRCA_full_med500_MAD50/FSC2_covarsT_summary.out"); resT2=res
load("BRCA_full_env.RData")
idx=rowMeds>=500 & mads>=quantile(mads,0.5)
# annotation col
# competing best performer: KM
load("./BRCA_full_med500_MAD50/KM_summary.out")
annotation_col = data.frame(factor(anno$cls_sigCL_intrinsic),
factor(anno$cls_sigCL_unsupervised),
factor(KM_summary$cls),
factor(resF$clusters),
factor(resF2$clusters),
factor(resT$clusters),
factor(resT2$clusters),
factor(cls))
colnames(annotation_col)=c("SigI","SigU",
"KM","FSC","FSC10","FSCadj","FSC10adj","Anno")
rownames(annotation_col)=colnames(cts)
newCols <- colorRampPalette(grDevices::rainbow(14))
mycolors_anno=newCols(5)
mycolors_FSC=newCols(7)[c(4,2,3,1,5,6,7)]
mycolors_FSC10=newCols(5)[c(4,5,1,3,2)]
mycolors_FSCadj=newCols(8)[c(3,2,5,7,4,6,1,8)]
mycolors_FSC10adj=newCols(5)[c(3,4,5,1,2)]
mycolors_I=newCols(14)[c(5,2,3,4,1,6:14)]
mycolors_U=newCols(13)
mycolors_KM=newCols(14)[c(12,1)]
names(mycolors_anno)=unique(cls)[order(unique(cls))]
names(mycolors_FSC)=unique(resF$clusters)[order(unique(resF$clusters))]
names(mycolors_FSC10)=unique(resF2$clusters)[order(unique(resF2$clusters))]
names(mycolors_FSCadj)=unique(resT$clusters)[order(unique(resT$clusters))]
names(mycolors_FSC10adj)=unique(resT2$clusters)[order(unique(resT2$clusters))]
names(mycolors_I)=unique(anno$cls_sigCL_intrinsic)[order(unique(anno$cls_sigCL_intrinsic))]
names(mycolors_U)=unique(anno$cls_sigCL_unsupervised)[order(unique(anno$cls_sigCL_unsupervised))]
names(mycolors_KM)=unique(KM_summary$cls)[order(unique(KM_summary$cls))]
mycolors1 = list(SigI=mycolors_I, SigU=mycolors_U,
KM=mycolors_KM,FSC=mycolors_FSC,FSC10=mycolors_FSC10,FSCadj=mycolors_FSCadj,FSC10adj=mycolors_FSC10adj,Anno=mycolors_anno)
# colors/breaks #
showpanel <- function(Colors){image(matrix(1:length(Colors), ncol=1), col=Colors, xaxt="n", yaxt="n" )}
oldpar <- par(mfrow=c(1,1))
my_cols<-colorRampPalette( c("yellow", "black", "cyan"), space="rgb")(100)
myBreaks <- seq(-2, 2, length.out=101)
dev.off()
# png("./Results/SFig4.png",height=980,width=930,res=300,type="cairo")
pheatmap(log(norm_y[idx,order(cls,resT2$clusters,resT$clusters)][resT$discriminatory,]+0.1),scale="row",cluster_cols=F,
annotation_col=annotation_col,annotation_colors=mycolors1, color=my_cols,show_colnames=F,show_rownames=F,
breaks=myBreaks,main="TCGA BRCA All Samples, Disc Genes from FSCseq, Ordered by Subtype",height=14,width=12,filename="./Results/SFig4.png")
# dev.off()
####### Correlation Plots #######
corPlots=function(dataset=c("BRCA_pure","BRCA_plate"),covars){
library(corrplot)
load(sprintf("%s_env.RData",dataset))
filt_idx = rowMeds>=500 & mads>=quantile(mads,0.5)
norm_y=norm_y[filt_idx,]
load(sprintf("%s_med500_MAD50/FSC_covars%s_summary.out", dataset,covars))
discriminatory=res$discriminatory
disc_norm_y = norm_y[discriminatory,]
cors = cor(t(disc_norm_y))
covar_text1 = if(dataset=="BRCA_pure"){"BRCA (high purity samples)"} else{"BRCA (all samples)"}
covar_text2 = if(covars=="T"){"Adjusted for Plate"} else{"Not adjusted for Plate"}
png(sprintf("%s_med500_MAD50/CEM_covars%s_corPlot.png",dataset,covars),res=300,width=1200,height=1300,type="cairo",pointsize=6); par(xpd=TRUE)
corrplot(cors,method="color",tl.pos = "td",tl.cex = 0.2,mar=c(0,0,3,0),tl.col = 'black',type="upper",order="hclust",diag=F,
main=sprintf("Correlations of %s cluster-discriminatory genes \n%s",covar_text1,covar_text2))
dev.off()
}
corPlots("BRCA_pure","F")
corPlots("BRCA_pure","T")
corPlots("BRCA_full","F")
corPlots("BRCA_full","T")
####### FSC2 and FSCadj2 #######
# finding smallest model in bottom 10% of BIC
opt_model = function(dataset,covars){
K_search=2:8; lambda_search=seq(0.25,5,0.25); alpha_search=c(0.01,seq(0.05,0.5,0.05))
npoints = length(K_search)*length(lambda_search)*length(alpha_search)
BICs=matrix(NA,nrow=npoints,ncol=6)
index=1
colnames(BICs) = c("K","l","a","BIC","ndisc","nparams")
for(c in 1:length(K_search)){for(l in 1:length(lambda_search)){for(a in 1:length(alpha_search)){
BICs[index,1:3] = c(K_search[c], lambda_search[l],alpha_search[a])
file.name=sprintf("%s_med500_MAD50/joint%d_%f_%f_gene_CEM_covars%s.out", dataset,K_search[c],lambda_search[l],alpha_search[a],covars)
if(file.exists(file.name)){load(file.name); BICs[index,4:6] = c(res$BIC, sum(res$discriminatory), res$num_est_params)}else{print(file.name)}
index=index+1
}}}
#BICs[order(BICs[,4])[1:100,],]
return(
BICs[order(BICs[,4]),] [which.min(BICs[order(BICs[,4])[1:floor(npoints/10)],][,6]),] # lowest nparams from bottom 10% of BIC
)
}
p1=opt_model("BRCA_pure","F")
p2=opt_model("BRCA_pure","T")
# covarF : 2.00 0.25 0.50 8704580.49 1169.00
# covarT : 3 1.00 0.30 9789277 1324
f1=opt_model("BRCA_full","F")
f2=opt_model("BRCA_full","T")
# covarF: 5.0 1.5 0.1 42533187.3 2800.0
# covarT: 5.00 3.25 0.05 43079204.94 2834.00
check_opt_model = function(dataset,covars,K,lambda,alpha){
load(sprintf("%s_med500_MAD50/joint%d_%f_%f_gene_CEM_covars%s.out",dataset,K,lambda,alpha,covars))
load(sprintf("%s_env.RData",dataset))
library(mclust)
return(list(ARI=adjustedRandIndex(res$clusters,cls),
ndisc=sum(res$discriminatory)))
}
check_opt_model("BRCA_pure","F",p1[1],p1[2],p1[3])
check_opt_model("BRCA_pure","T",p2[1],p2[2],p2[3])
# covarF : 2.00 0.25 0.50 8704580.49 1169.00 9246.00 ----> ARI = 0.486
# covarT : 3 1.00 0.30 9789277 1324 106377.0 ----> ARI = 0.610
check_opt_model("BRCA_full","F",f1[1],f1[2],f1[3])
check_opt_model("BRCA_full","T",f2[1],f2[2],f2[3])
# covarF: 5.0 1.5 0.1 42533187.3 2800.0 12867.0 ----> ARI = 0.254
# covarT: 5.0 1.5 0.1 43072187.8 2900.0 59580.0 ----> ARI = 0.288
|
944ec7e24ce8a84d7a8e11eebb1e60ab32cd9c69
|
b3f7a879eef559db83eae1647f12efde56d16db0
|
/inst/script/testCounts.R
|
e17c5d744ccc66c91014941bc127028867418e9e
|
[] |
no_license
|
alenzhao/copy-number-analysis
|
c97ae46b1dda04d1806f136050038c8cf3a54341
|
865ee38fcd621ef720851f11d30b965d60b8890a
|
refs/heads/master
| 2020-12-24T12:33:51.394361
| 2015-10-16T17:25:09
| 2015-10-16T17:25:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 972
|
r
|
testCounts.R
|
## Our motivation for this gist comes from the fact that we would like
## to check the counts calculated by each of the methods.
## Here we provide a simple unitTest which takes as input a GRanges
## object which contains the reads from the "tumorA.chr4.bam" file.
## we choose 5 regions and get their counts from samtools using a simple
## command :
## samtools view tumorA.chr4.bam chr4:8000001-8010000 | wc -l 67
## we can then use this function to check if the counts coming out of
## a given method are equal to the counts given by samtools
## we assume that the metadata column containing the counts in the GRanges
## object is called as "cnt"
testCounts <- function(grTumor)
{
test_indices <- c(8000001, 8010001,10000001 , 10010001, 1000001)
test_res <- c(67,62,67,74,47) #from samtools
counts <- sapply(test_indices ,
function (x) grTumor[which(start(ranges(grTumor))==x)]$cnt )
checkEquals(test_res,indices,tolerance=2)
}
|
b421ada2bbcb4f146bd5e33d128e44dcb665bdb8
|
9353846d41911d70f722c5688bab6223e94da199
|
/Adonis_PCA/DNA/DNA_Adonis_Variable_P_Value_Matcher.R
|
48368c4ca3026af99dbf7f6b86120a607269d00b
|
[] |
no_license
|
smadapoosi/Lung_Microbiome_Thesis
|
b5bd0f5fd431035827ed00845e947312d73c56b7
|
276f4b14e184be195cb6672702b43a4663aed142
|
refs/heads/master
| 2020-03-27T22:56:13.013664
| 2018-12-03T17:59:49
| 2018-12-03T17:59:49
| 147,273,812
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 219
|
r
|
DNA_Adonis_Variable_P_Value_Matcher.R
|
variables <- read.table("~/DNA_Adonis_Variables.txt")
pvals <- read.table("~/DNA_Adonis_P_Vals.txt")
variable_pval_matrix <- cbind(variables, pvals)
write.csv(variable_pval_matrix, file="~/DNA_Adonis_Matched_PVals.csv")
|
5de6ac20e290d8aab4ca35776b0014d0f6105d33
|
d474efb74fd5268fd908a2a5394a8ecc97e28f3b
|
/R/profiles.R
|
c24e05e94ca91589d0462494f57308f4cb0361dc
|
[] |
no_license
|
bradleycolquitt/seqAnalysis
|
e1f2fbefa867ee11a51801aeeaf57ebc357a0057
|
d2c37fb0609754a0ec4e263dda27681717087523
|
refs/heads/master
| 2021-01-01T05:42:03.060788
| 2017-05-28T02:47:58
| 2017-05-28T02:47:58
| 2,284,790
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,827
|
r
|
profiles.R
|
####
# make profile of seq data signal over aligned features
# use ".profiles"
# tapply based on group
####
source("~/src/R/LEA/dipdata.R")
source("~/src/R/util.R")
#annotations_lib <- list.files("~/lib/annotations_old")
annotation_lib <- list.files("~/lib/annoset_export")
#annotation_lib <- c("exon_ends_W200N25","exon_starts_W200N25","transcSS_W200N50","gene_whole_W200N50F50","transcES_W200N50")
hmedips <- paste(c("omp","ngn","icam"), "hmedip", sep="_")
medips <- paste(c("omp", "ngn", "icam"), "medip", sep="_")
labelDecode <- function(input) {
if(input == "gene") return("gene_whole_W200N50F50")
else if (input == "transcSS") return("transcSS_W200N50")
else if (input == "transcES") return("transcES_W200N50")
else if (input == "exon_starts") return("exon_starts_W200N50")
else if (input == "exon_ends") return("exon_ends_W200N50")
}
intersectDipDataAnno <- function(dipdata.name, type, anno.name, FUN) {
dipdata <- load.DipData(dipdata.name)
genome <- dipdata$genome.data
#anno.path <- "/home/user/lib/annotations"
anno.path <- "/home/user/lib/annoset_export"
anno <- read.delim(paste(anno.path, anno.name, sep="/"), header=FALSE,
colClasses=c('character', 'integer', 'integer', 'character', 'integer',
'character'),
col.names=c('chr','start','end','name','group','strand')
)
chrM.vec <- c("chrM", 0, 0, "", 0, "")
anno <- rbind(anno, chrM.vec)
genome.split <- bigsplit(genome, 'chr', splitcol = 2)
anno.split <- split(anno, anno$chr)
combined.group.index <- foreach(curr.genome= genome.split, curr.anno= anno.split,
.combine = c) %dopar% {
# index <- rep(NA, times=length(curr.genome))
index <- curr.anno[match(curr.genome, curr.anno$start),'group']
# output.mat <- matrix(NA, dim=c(length(curr.genome),2))
# output.vec <- rep(NA, times=length(curr.genome))
# output.mat[,1] <- curr.anno[index, 'group']
return(index)
}
#return(combined.group.index)
result <- tapply(genome[,type], combined.group.index, mean)
plotProfiles(result, paste(dipdata.name, type, anno.name, sep="_"))
return(result)
# result <- tapply(combined.groups[,2], combined.groups[,1], FUN)
}
generateProfiles <- function(samples, type, FUN=mean, write=FALSE) {
output <- list()
annotation_lib <- "tss_W200N100_export"
for (sample in samples) {
for (anno in annotation_lib) {
#cat(paste("Working on ", paste(sample, type, sep="_"),
input.path <- paste("~/analysis/profiles", paste(sample, type, "profiles", sep="_"),
anno, sep = "/")
input <- read.delim(input.path, header = F,
col.names = c("chr", "start", "end",
"name", "group", "strand", "reads"),
colClasses = c("character", "integer", "integer", "character",
"integer", "character", "integer"))
result <- with(input, tapply(reads, group, FUN))
if (write) {
plotProfiles(result, paste(sample, type, paste("profiles/", anno, sep = ""), sep = "_"))
} else {
plotProfiles(result, title=anno)
}
output <- lappend(output, c(result))
}
}
return(output)
}
plotProfiles <- function(profile, title=NULL, file_name=NULL, ...) {
if (is.null(file_name)) {
x11("", 7, 7)
} else {
file.path <- paste("~/analysis/profiles/", file_name, sep = "")
png(paste(file.path, ".png", sep = ""))
}
xlim = c(0,length(profile))
ylim = c(floor(min(profile)), ceiling(max(profile)))
plot(1, 1, type = "n", xlim = xlim, ylim = ylim, main=title )
lines(profile, ...)
if (!is.null(file_name)) dev.off()
}
|
c970725c16b1b7ade3b38da6ddc7a8cf183ec6c8
|
5742ee9f6d59c1db4059f2fcadf17ff71613e281
|
/R/rgb.R
|
15a39acd0b538a7e32e7e73c0cbf9e2361d58106
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
jonclayden/RNifti
|
1fd116cc808f5ebdbed31c4cb1cebfd97106d68b
|
73ab7c0c847e5eac54055c9dbee46572c48cc02e
|
refs/heads/master
| 2023-05-24T08:31:19.263204
| 2023-05-17T18:11:02
| 2023-05-17T18:11:02
| 65,200,850
| 45
| 14
| null | 2020-09-04T14:13:45
| 2016-08-08T12:02:07
|
C
|
UTF-8
|
R
| false
| false
| 5,297
|
r
|
rgb.R
|
#' RGB arrays
#'
#' The \code{rgbArray} function constructs an integer array whose values are
#' byte-packed representations of 8-bit RGBA colour values. The \code{channels}
#' attribute (with value 3 or 4) indicates how many channels are being used.
#' The resulting array can be used to construct an RGB(A) NIfTI image, or
#' converted to standard R colour strings using the \code{as.character} method.
#'
#' @param red A numeric vector (or array) of red channel values. If this is the
#' only channel argument, it can also be a character vector of colour values
#' (including alpha, if required), or a numeric array whose last dimension is
#' 2 (for grey + alpha), 3 (for RGB) or 4 (for RGBA).
#' @param green,blue,alpha Numeric vectors (or arrays) containing values for
#' the appropriate channel. These will be combined with the \code{red} values
#' using \code{cbind}, and hence recycled as necessary. Alpha, or green and
#' blue, can be missing.
#' @param max The maximum possible value for any channel. The default
#' is 255 when the data is of integer mode, and 1 otherwise. Values above
#' this, or below zero, will be clipped to the appropriate extreme.
#' @param dim An integer vector of dimensions for the final array. The
#' dimensions of \code{red} are used if this is \code{NULL}.
#' @param ... For \code{rgbArray}, additional attributes to set on the result,
#' such as \code{pixdim}. These are passed directly to
#' \code{\link{structure}}. For the \code{as.character} method, this argument
#' is ignored.
#' @param x An \code{rgbArray} object.
#' @param flatten Logical value. If \code{FALSE}, the dimensions of \code{x}
#' will be retained in the result. The default is \code{TRUE}, for
#' consistency with the usual behaviour of \code{as.character}, which strips
#' all attributes.
#' @return \code{rgbArray} returns an integer-mode array of class
#' \code{"rgbArray"}. The \code{as.character} method returns a character-mode
#' vector of colour strings.
#'
#' @note The values of an \code{"rgbArray"} are not easily interpreted, and
#' may depend on the endianness of the platform. For manipulation or use as
#' colours they should generally be converted to character mode, or the
#' channels extracted using the \code{\link{channels}} function.
#'
#' @author Jon Clayden <code@@clayden.org>
#' @export
rgbArray <- function (red, green, blue, alpha, max = NULL, dim = NULL, ...)
{
source <- NULL
channels <- 0L
if (!missing(green) && !missing(blue) && !missing(alpha))
{
source <- cbind(red, green, blue, alpha)
channels <- 4L
}
else if (!missing(green) && !missing(blue))
{
source <- cbind(red, green, blue)
channels <- 3L
}
else if (!missing(alpha))
{
source <- cbind(red, alpha)
channels <- 2L
}
else if (is.character(red))
{
channels <- ifelse(max(nchar(red),na.rm=TRUE) > 7L, 4L, 3L)
source <- t(col2rgb(red, alpha=(channels==4L)))
}
else if (is.numeric(red) && is.array(red))
{
source <- red
channels <- dim(red)[ndim(red)]
if (channels < 2L || channels > 4L)
stop("If only one numeric argument is supplied, its last dimension must be 2, 3 or 4")
if (is.null(dim))
dim <- dim(red)[-ndim(red)]
}
else
stop("The combination of channels provided is not supported")
if (is.null(dim))
dim <- dim(red)
if (is.null(dim))
dim <- length(red)
if (is.null(max))
max <- switch(storage.mode(source), integer=255, 1)
result <- .Call("packRgb", source, channels, max, PACKAGE="RNifti")
return (structure(result, ..., channels=channels, dim=dim, class="rgbArray"))
}
#' @rdname rgbArray
#' @export
as.character.rgbArray <- function (x, flatten = TRUE, ...)
{
result <- .Call("rgbToStrings", x, PACKAGE="RNifti")
if (!flatten)
dim(result) <- dim(x)
return (result)
}
#' Extract channels from RGB data
#'
#' Extract one or more channels from an RGB data array that was obtained from
#' an RGB NIfTI image or created by the \code{\link{rgbArray}} function. The
#' result is more amenable to numeric manipulation.
#'
#' @param array An image, an \code{rgbArray}, or another array that can be
#' converted to the latter.
#' @param channels A character vector of channels to extract.
#' @param raw Boolean value: if \code{TRUE}, return a raw array, which is the
#' most compact representation; otherwise return an integer array.
#' @return A raw-mode or integer-mode array with one more dimension than the
#' first argument, corresponding to channels.
#'
#' @author Jon Clayden <code@@clayden.org>
#' @export
channels <- function (array, channels = c("red","green","blue","alpha"), raw = FALSE)
{
if (!inherits(array,"niftiImage") && !inherits(array,"rgbArray"))
array <- rgbArray(array)
channels <- match.arg(channels, several.ok=TRUE)
channelNumbers <- c(red=1L, green=2L, blue=3L, alpha=4L)[channels]
result <- .Call("unpackRgb", array, channelNumbers, PACKAGE="RNifti")
if (!raw)
storage.mode(result) <- "integer"
dimnames(result) <- c(rep(list(NULL),ndim(array)), list(channels))
return (result)
}
|
53a304244a7248d7aa9cf0421213e9b20cd64b9e
|
bda5e3cff73cd238eb4ef476e04a6484a0638733
|
/man/ltsk.Rd
|
ec0df3c6c1b988689a1cde45453ff238c0ab60c1
|
[] |
no_license
|
cran/ltsk
|
4f4468b016fc2195f07c7cab5cea3e1059cf8e95
|
b295006fa023a0536c5394630d64615043fe2e08
|
refs/heads/master
| 2022-11-07T14:54:12.832314
| 2022-10-10T18:40:02
| 2022-10-10T18:40:02
| 17,697,223
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,174
|
rd
|
ltsk.Rd
|
\name{ltsk}
\alias{ltsk}
\title{
Ordinary Local Time and Space Kriging
}
\description{
Function implements ordinary time and space kriging for large data sets, with automatic product-sum variogram estimation.
}
\usage{
ltsk(query, obs, th, xcoord = "x", ycoord = "y", tcoord = "t",
zcoord = "z", vth = NULL, vlen = NULL, llim = c(3, 3),
verbose = T, Large = 2000, future=T, cl = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{query}{
a data frame containing query spatiotemporal locations for which predictions are needed
}
\item{obs}{
a data frame containing spatiotemporal locations and observed data
}
\item{th}{
a vector, distance threshold and time lag to define neighbors of a query point
}
\item{xcoord}{
a character constant, the field name for x coordinate in both \code{query} and \code{obs}
}
\item{ycoord}{
a character constant, the field name for y coordinate in both \code{query} and \code{obs}
}
\item{tcoord}{
a character constant, the field name for time coordinate in both \code{query} and \code{obs}
}
\item{zcoord}{
a character constant, the field name for data in \code{obs}
}
\item{vth}{
thersholds for local spatiotemporal varigoram (default 75\% of the max lag difference)
}
\item{vlen}{
numbers of bins for local spatiotemporal varigram(default,space 15, temporal for each day)
}
\item{llim}{
lower limits for number of regions and intervals with observed data to calculate Kriging (default 3 spatial regions, 3 temporal intervals)
}
\item{verbose}{
logical, whether print details information
}
\item{Large}{
a numeric constant, upper limit of neighbor points, beyond which subsampling is performance
}
\item{future}{
logical, whether including observed points in future relative to query points.
}
\item{cl}{
a \pkg{parallel} cluster object (default number of cores in local PC minus one), 0 means single core
}
}
\details{
Function implements automatic variogram estimation (when possible) within a local spatiotemporal neighborhoods, and ordinary krigng based on the produce-sum variogram within that neighborhood. An variogram is estimated for each query point to allow for possible non-stationarity in the data generating field.
If the number of neighbors exceeds a user-specified upper limit (\code{Large}), neighbors are sub-sampled in a balanced way to reduce the neighborhood size.
Four variogram models: Gaussian, exponential, spherical and Matern are automatically fit to the empirical space and time variogram in the first lag. The range parameter is estimated from the first distance lag where the empirical variogram exceeds 80\% of the maximum. Weighted least square is then used to estimate the nugget and partial sill parameters. Model with minimal residual sum of squares between the empirical and fitted variogram is chosen as the variogram model.
}
\value{
Kriging mean and standard deviation and quality flags.
\tabular{ll}{
0 \tab valid prediction\cr
1 \tab not enough temporal neighbors \cr
2 \tab not enough spatial neighbors \cr
3 \tab not enough neighbors \cr
4 \tab variogram could not be fit \cr
}
}
\references{
Haas, Timothy C. "Local prediction of a spatio-temporal process with an application to wet sulfate deposition." Journal of the American Statistical Association 90.432 (1995): 1189-1199.
Iaco, S. De & Myers, D. E. & Posa, D., 2001. "Space-time analysis using a general product-sum model," Statistics & Probability Letters, Elsevier, vol. 52(1), pages 21-28, March.
Kumar, N., et al. (2013). "Satellite-based PM concentrations and their application to COPD in Cleveland, OH." Journal of Exposure Science and Environmental Epidemiology 23(6): 637-646.
Liang, D. and N. Kumar (2013). "Time-space Kriging to address the spatiotemporal misalignment in the large datasets." Atmospheric Environment 72: 60-69.
}
\author{
Naresh Kumar (NKumar@med.miami.edu)
Dong Liang (dliang@umces.edu)
}
\examples{
## load the data
data(ex)
data(epa_cl)
## apply log transformation
obs[,'pr_pm25'] = log(obs[,'pr_pm25'])
## run kriging
system.time(out <- ltsk(ex2.query[1:2,],obs,c(0.10,10),zcoord='pr_pm25',verbose=FALSE,cl=0))
table(out$flag)
}
|
6a1cebbe17d554709d4734ccf8f0be690db912c9
|
838de16554593428fa0d307a5a72dbea3103f850
|
/tests/testthat/test_parsing.R
|
a180c84f375192864d00fe5ea5555e2ab06f1a6d
|
[
"MIT"
] |
permissive
|
Berghopper/GAPGOM
|
39228f2c68af23811a555730bd202f063a37aadd
|
cc14c79958c60f86ef00ba812921b8e4c4d56161
|
refs/heads/master
| 2021-07-11T20:35:53.671868
| 2020-07-01T11:58:00
| 2020-07-01T11:58:00
| 159,685,342
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,154
|
r
|
test_parsing.R
|
# test functions from parsing files
context("parsing functions")
test_that("entrez raw (fantom) id to entrez", {
expect_equal(.entrezraw_to_entrez("entrezgene:123"), "123")
expect_equal(.entrezraw_to_entrez(c("entrezgene:123", "entrezgene:345")), c("123", "345"))
})
test_that("GO quantification", {
testdf <- data.frame(list(GO=c("GO:123", "GO:123", "GO:1234")))
testdf2 <- data.frame(list(FAKEID=c("1", "2", "3"),
GO=c("GO:123", "GO:123", "GO:1234")))
#result_structure <- structure(
# 1:2, .Label = c("GO:123", "GO:1234"), class = "factor"), N = 2:1),
# row.names = c(NA, -2L), class = "data.frame")
result_structure <- structure(
list(GO = c("GO:123", "GO:1234"), N = 2:1),
row.names = c(NA, -2L), class = "data.frame")
expect_equal(.term_id_to_ext_id(testdf), result_structure)
expect_equal(.term_id_to_ext_id(testdf2), result_structure)
})
test_that("GO quantification subset", {
expect_equal(.ext_id_to_term_id(id_translation_df[
id_translation_df$ORIGID %in% unique(id_translation_df$ORIGID)[1:10],],
c("ENSG00000148516", "ENSG00000006534")), gapgom_tests$goquant2)
})
|
e2a0c76e3a6b8af617ee0ab1fd6692565bdb7328
|
115c4aa1bd1de857dda966c0b5172eaf8b25e563
|
/run_analysis.R
|
bf607990c94a2e015dc71c8253e78c8ed8c96aa2
|
[] |
no_license
|
fourmajor/datacleaningfinalproject
|
53db88531cdf211910f4ce7df484db0a681f9d7b
|
c84e4894b097789f856a42d470b4d9d742aa38c3
|
refs/heads/master
| 2020-05-29T14:40:50.638855
| 2016-07-27T17:51:30
| 2016-07-27T17:51:30
| 64,329,535
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,587
|
r
|
run_analysis.R
|
library(dplyr)
# get the names of the variables and the index for the means
# and standard deviations
labels <- read.table("features.txt")[,2]
labelsstdmeanindex <- grep("mean|std", labels)
# read all the training data, keeping only the columns for means
# and standard deviations
train <- read.table("train/X_train.txt", col.names = labels)[,labelsstdmeanindex] %>%
cbind(read.table("train/subject_train.txt", col.names = "subject")) %>%
cbind(read.table("train/y_train.txt", col.names = "activity", colClasses="factor"))
# read all the testing data, keeping only the columns for means
# and standard deviations
test <- read.table("test/X_test.txt", col.names = labels)[,labelsstdmeanindex] %>%
cbind(read.table("test/subject_test.txt", col.names = "subject")) %>%
cbind(read.table("test/y_test.txt", col.names = "activity", colClasses="factor"))
# combine training and testing data, throwing out temporary variables
alldata <- rbind(train, test)
rm(train, test)
# rename the activity factors
levels(alldata$activity) <- c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING",
"STANDING", "LAYING")
# melt all the data to get one row per subject/activity/variable combo
# then take that melted data and cast it to get the mean for each
# variable for each subject/activity combo
casted <- melt(alldata, id=c("subject", "activity")) %>%
dcast(subject + activity ~ variable, mean)
# save our data from previous step
write.table(casted, file="varAvgsBasedOnSubjectAndActivity.txt", row.name=FALSE)
|
0849512e6389b4dcaa4b73c10fa61818c5166e5c
|
38e8bcc3853deb265ef4c83358311d6ae70dccad
|
/560_process-blast_results_deep.R
|
3a7a941adf15516e0eb7e7ef86bb7e9eaa097809
|
[] |
no_license
|
macrobiotus/ships_and_bugs
|
bedb56644a3989a85b053931713c95f5333e91f9
|
3ec4a17cc6a72e488ec6c650f9cc56cefb350a9f
|
refs/heads/master
| 2023-04-08T04:38:54.878450
| 2023-02-03T09:20:07
| 2023-02-03T09:20:07
| 504,256,465
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,097
|
r
|
560_process-blast_results_deep.R
|
# Calculations for Results section Taxonomy plots possibly per route
# =====================================================================
# For Blast results of deeply rarfied ssamples in
# /Users/paul/Documents/CU_combined/Zenodo/Blast
# Part I: Get taxonomy strings for Blast results
# -----------------------------------------------
# see https://ropensci.org/tutorials/taxize_tutorial/
# for handling blast data and getting correct taxonomy strings from the net
library("blastxml") # read blast xml - get via `library(devtools); install_github("BigelowLab/blastxml")`
library("tidyverse") # work using tibbles
library("janitor") # clean column names
library("taxonomizr") # query taxon names
library("purrr") # dplyr applies
library("furrr") # parallel purrrs
library("openxlsx") # write Excel tables
# define file path components for listing
blast_results_folder <- "/Users/paul/Documents/CU_combined/Zenodo/Blast"
blast_results_pattern <- glob2rx("*deep_overlap_*_ports_blast_result_no_env.txt", trim.head = FALSE, trim.tail = TRUE)
# read all file into lists for `lapply()` usage
blast_results_files <- list.files(path=blast_results_folder, pattern = blast_results_pattern, full.names = TRUE)
# read in xmls files - last done for deep set 20.05.2020 ********************* ********************* *********************
# plan(multiprocess) # enable
# blast_results_list <- furrr::future_map(blast_results_files, blastxml_dump, form = "tibble", .progress = TRUE) # takes 7-10 hours on four cores - avoid by reloading full object from disk
# continue here after 20.05.2020 ********************* ********************* ********************* *********************
# save(blast_results_list, file="/Users/paul/Documents/CU_combined/Zenodo/R_Objects/200520_560_blast-xml-conversion_deep.Rdata")
load(file="/Users/paul/Documents/CU_combined/Zenodo/R_Objects/200520_560_blast-xml-conversion_deep.Rdata", verbose = TRUE)
names(blast_results_list) <- blast_results_files # works
# create one large item from many few, while keeping source file info fo grouping or subsetting
blast_results_list %>% bind_rows(, .id = "src" ) %>% # add source file names as column elements
clean_names(.) %>% # clean columns names
group_by(iteration_query_def) %>% # isolate groups of hits per sequence hash
slice(which.max(hsp_bit_score)) -> blast_results # save subset
nrow(blast_results) # 11978
# prepareDatabase not needed to be run multiple times
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# prepareDatabase(sqlFile = "accessionTaxa.sql", tmpDir = "/Users/paul/Sequences/References/taxonomizR/", vocal = TRUE) # takes a very long time - avoid by reloading full object from disk
# function for mutate to convert NCBI accession numbers to taxonomic IDs.
get_taxid <- function(x) {accessionToTaxa(x, "/Volumes/HGST1TB/Users/paul/Sequences/References/taxonomizR/accessionTaxa.sql", version='base')}
# function for mutate to use taxonomic IDs and add taxonomy strings
get_strng <- function(x) {getTaxonomy(x,"/Volumes/HGST1TB/Users/paul/Sequences/References/taxonomizR/accessionTaxa.sql")}
# add tax ids to table for string lookup - probably takes long time
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
blast_results_appended <- blast_results %>% mutate(tax_id = get_taxid(hit_accession)) # takes some time...
# continue here 21.05.2020
# save(blast_results_appended, file="/Users/paul/Documents/CU_combined/Zenodo/R_Objects/200520_560_blast-xml-conversion_deep_with-tax-id.Rdata")
load(file="/Users/paul/Documents/CU_combined/Zenodo/R_Objects/200520_560_blast-xml-conversion_deep_with-tax-id.Rdata", verbose=TRUE)
length(blast_results_appended$tax_id) # 11978
# look up taxonomy table
tax_table <- as_tibble(get_strng(blast_results_appended$tax_id), rownames = "tax_id") %>% mutate(tax_id= as.numeric(tax_id))
# continue here 21.05.2020
nrow(tax_table) # 11978
# getting a tax table without duplicates to enable proper join command later
tax_table <- tax_table %>% arrange(tax_id) %>% distinct(tax_id, superkingdom, phylum, class, order, family, genus, species, .keep_all= TRUE)
# checks
head(tax_table)
nrow(tax_table) # 3177 - as it should
all(!duplicated(tax_table)) # and no duplicated tax ids anymore
lapply(list(blast_results_appended,tax_table), nrow) # first 11978, second deduplicated and with 3177 - ok
# https://stackoverflow.com/questions/5706437/whats-the-difference-between-inner-join-left-join-right-join-and-full-join
blast_results_final <- left_join(blast_results_appended, tax_table, copy = TRUE)
nrow(blast_results_final) # 11978 - table has correct length now
# correcting factors
blast_results_final %>% ungroup(.) %>% mutate(src = as.factor(src)) -> blast_results_final
levels(blast_results_final$src)
# diagnostic plot - ok
# ggplot(blast_results_final, aes(x = src, y = phylum, fill = phylum)) +
# geom_bar(position="stack", stat="identity") +
# theme(axis.text.x = element_text(angle = 45, hjust = 1))
blast_results_final$src <- plyr::revalue(blast_results_final$src, c("/Users/paul/Documents/CU_combined/Zenodo/Blast/110_85_18S_eDNA_samples_Eukaryotes-deep_overlap_1_ports_blast_result_no_env.txt" = "1 Port(s)",
"/Users/paul/Documents/CU_combined/Zenodo/Blast/110_85_18S_eDNA_samples_Eukaryotes-deep_overlap_10_ports_blast_result_no_env.txt" = "10 Port(s)",
"/Users/paul/Documents/CU_combined/Zenodo/Blast/110_85_18S_eDNA_samples_Eukaryotes-deep_overlap_11_ports_blast_result_no_env.txt" = "11 Port(s)",
"/Users/paul/Documents/CU_combined/Zenodo/Blast/110_85_18S_eDNA_samples_Eukaryotes-deep_overlap_12_ports_blast_result_no_env.txt" = "12 Port(s)",
"/Users/paul/Documents/CU_combined/Zenodo/Blast/110_85_18S_eDNA_samples_Eukaryotes-deep_overlap_13_ports_blast_result_no_env.txt" = "13 Port(s)",
"/Users/paul/Documents/CU_combined/Zenodo/Blast/110_85_18S_eDNA_samples_Eukaryotes-deep_overlap_14_ports_blast_result_no_env.txt" = "14 Port(s)",
"/Users/paul/Documents/CU_combined/Zenodo/Blast/110_85_18S_eDNA_samples_Eukaryotes-deep_overlap_2_ports_blast_result_no_env.txt" = "2 Port(s)",
"/Users/paul/Documents/CU_combined/Zenodo/Blast/110_85_18S_eDNA_samples_Eukaryotes-deep_overlap_3_ports_blast_result_no_env.txt" = "3 Port(s)",
"/Users/paul/Documents/CU_combined/Zenodo/Blast/110_85_18S_eDNA_samples_Eukaryotes-deep_overlap_4_ports_blast_result_no_env.txt" = "4 Port(s)",
"/Users/paul/Documents/CU_combined/Zenodo/Blast/110_85_18S_eDNA_samples_Eukaryotes-deep_overlap_5_ports_blast_result_no_env.txt" = "5 Port(s)",
"/Users/paul/Documents/CU_combined/Zenodo/Blast/110_85_18S_eDNA_samples_Eukaryotes-deep_overlap_6_ports_blast_result_no_env.txt" = "6 Port(s)",
"/Users/paul/Documents/CU_combined/Zenodo/Blast/110_85_18S_eDNA_samples_Eukaryotes-deep_overlap_7_ports_blast_result_no_env.txt" = "7 Port(s)",
"/Users/paul/Documents/CU_combined/Zenodo/Blast/110_85_18S_eDNA_samples_Eukaryotes-deep_overlap_8_ports_blast_result_no_env.txt" = "8 Port(s)",
"/Users/paul/Documents/CU_combined/Zenodo/Blast/110_85_18S_eDNA_samples_Eukaryotes-deep_overlap_9_ports_blast_result_no_env.txt" = "9 Port(s)"))
blast_results_final$src <- factor(blast_results_final$src, levels = c("1 Port(s)", "2 Port(s)","3 Port(s)","4 Port(s)","5 Port(s)","6 Port(s)","7 Port(s)","8 Port(s)","9 Port(s)","10 Port(s)","11 Port(s)","12 Port(s)","13 Port(s)","14 Port(s)"))
# diagnostic plot -ok
# ggplot(blast_results_final, aes(x = src, y = phylum, fill = phylum)) +
# geom_bar(position="stack", stat="identity") +
# theme(axis.text.x = element_text(angle = 45, hjust = 1))
# save object and some time by reloading it
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# save(blast_results_final, file="/Users/paul/Documents/CU_combined/Zenodo/R_Objects/200520_560_blast-xml-conversion_deep_with-ncbi-info.Rdata")
load(file="/Users/paul/Documents/CU_combined/Zenodo/R_Objects/200520_560_blast-xml-conversion_deep_with-ncbi-info.Rdata")
write.xlsx(blast_results_final, "/Users/paul/Documents/CU_combined/Zenodo/Blast/200520_560_blast-xml-conversion_deep_with-ncbi-info.xlsx", overwrite = FALSE)
# Part II: Plot Tax at ports with blast taxonomy
# ----------------------------------------------
ggplot(blast_results_final, aes(x = src, y = phylum, fill = phylum)) +
geom_bar(position="stack", stat="identity") +
ggtitle("Phyla at port(s) (deeply rarefied data)") +
theme_bw() +
theme(axis.text.x = element_text(angle = 45, hjust = 1),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())
ggsave("200521_phyla_at_ports_deep.pdf", plot = last_plot(),
device = "pdf", path = "/Users/paul/Documents/CU_combined/Zenodo/Display_Item_Development/",
scale = 1.5, width = 140, height = 105, units = c("mm"),
dpi = 500, limitsize = TRUE)
# Part III: relate taxonomy ids with route data and plot
# -----------------------------------------------------
# (copy and adjust original blast subsetting code)
# use alluvial diagram
# https://cran.r-project.org/web/packages/ggalluvial/vignettes/ggalluvial.html
|
b8b7883ff44fc0e4c6c84b62cd95b70f7e323c65
|
df559e3a576376882159ea21addd63db4dff77fb
|
/Data_Wrangling_Viz/Case_Study_07/Class_Task_13/Task_13.R
|
f7203f56558fb60c85bb71dc881634379c4afaaa
|
[] |
no_license
|
McKayMDavis/Portfolio
|
ac9c8a4c97a8d881d2a09e4a59ff3581c88b881e
|
ee15ba7b2a8bd7d79bc603d66794477a9e376b9c
|
refs/heads/master
| 2021-01-25T12:29:58.079496
| 2018-03-12T05:22:22
| 2018-03-12T05:22:22
| 123,472,129
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,064
|
r
|
Task_13.R
|
library(riem)
library(tidyverse)
library(lubridate)
library(forcats)
library(ggthemes)
#DO NOT RUN THIS CODE!!!
#measures = list()
#for (i in seq_along(riem_networks()$code)) {
# measures[[i]] <- riem_measures(station = riem_stations(network = riem_networks()$code[i])$id[i], date_start = "2015-06-01", date_end = "2017-07-01")
#}
#measures_big <- dplyr::bind_rows(measures)
RXE <- riem_measures(station = "RXE", date_start = "2015-06-01", date_end = "2017-07-01")
#measures_big2 <- rbind(measures_big, RXE)
#saveRDS(measures_big2, "./Case_Study_07/Class_Task_13/lotsodata.Rds")
saveRDS(RXE, "./Case_Study_07/Class_Task_13/RXE.Rds")
RXE <- readRDS("./Case_Study_07/Class_Task_13/RXE.Rds")
orders <- c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")
p1 <- RXE %>%
ggplot(aes(x = factor(weekdays(.$valid, abbreviate = TRUE), levels = orders), y = tmpf)) +
geom_violin() +
theme_solarized_2(light = FALSE) +
labs(x = "Week-Day", y = "Temperature (Farenheit)", title = "Day With Highest Distribution\nTemperature Mostly Inconclusive")
p2 <- RXE %>%
filter(!is.na(tmpf)) %>%
mutate(weekday = factor(weekdays(.$valid, abbreviate = TRUE), levels = orders)) %>%
group_by(weekday) %>%
mutate(maxtemp = max(tmpf)) %>%
ggplot(aes(x = weekday, y = maxtemp)) +
geom_point(color = "lightblue") +
theme_solarized_2(light = FALSE) +
labs(x = "Week-Day", y = "Temperature (Farenheit)", title = "Day With Highest Temperature")
p3 <- RXE %>%
ggplot(aes(x = hour(.$valid), y = tmpf)) +
geom_smooth() +
geom_vline(aes(xintercept = 14), color = "lightblue") +
theme_solarized_2(light = FALSE) +
facet_grid(. ~ factor(weekdays(.$valid, abbreviate = TRUE), levels = orders)) +
labs(x = "Hour (Military)", y = "Temperature (Farenheit)", title = "Wednesday with Lowest\nDistribution of Temperatures at 2pm")
ggsave("distTemp.png", plot = p1, path = "./Case_Study_07/Class_Task_13")
ggsave("maxTemp.png", plot = p2, path = "./Case_Study_07/Class_Task_13")
ggsave("days2pm.png", plot = p3, path = "./Case_Study_07/Class_Task_13")
|
108f8020c752c8cbf7a5723f077eedbcd26f5c5d
|
4d71074b88a3e201757fd313de87a1ac18694d86
|
/PBS-qwait-dump.R
|
b55ca4bf3f7051f8d54ed6ec00f04248c1fb256b
|
[] |
no_license
|
nick-wilson/hpc-usage-statistics
|
9be117c1cde01d84649451ccbd5d32c00938fc88
|
060ff176a660afb420ec1b4cc7ead5b44a6099a0
|
refs/heads/master
| 2022-10-18T19:08:19.240999
| 2020-05-14T11:37:57
| 2020-05-14T11:37:57
| 158,787,977
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 561
|
r
|
PBS-qwait-dump.R
|
#!/usr/bin/env Rscript
source("config.R")
source("PBS-Application-Stats-Common.R")
# Load cleaned data
load(file=alldata_R)
filename<-paste0("depend.",suffix,".csv")
depend<-read.csv(file=filename,header=TRUE,col.names=c("Job.ID.NoIndex","Dependency"),colClasses=c("character","logical"))
data<-merge(data,depend,all.x=TRUE,all.y=FALSE,sort=FALSE)
data$Dependency[is.na(data$Dependency)]<-FALSE
data<-data%>%filter(Dependency==FALSE)
qwait<-data%>%select(Job.ID,Queue,Wait.Time.Hours)
qwait$suffix<-suffix
save(qwait,file=paste0("qwait.",suffix,".Rdata"))
|
fc0109ff05400a1f8cf32d9333b3a2088b1b74ca
|
131fe3dfba973e0cd9664bfb581b06dbc6d1f300
|
/man/ksarakil.Rd
|
4a7ed36797fc0329aed09b1e54fdf1a7f67de399
|
[
"MIT"
] |
permissive
|
aczepielik/CrossTabCluster
|
d61592ec6a78584ee087abd86181f0fa0721d056
|
bdb4719502e09641cb38fb163207cebc44b7733a
|
refs/heads/master
| 2022-11-11T00:18:08.822129
| 2020-06-13T14:02:34
| 2020-06-13T14:02:34
| 268,132,246
| 0
| 0
|
NOASSERTION
| 2020-06-13T13:10:13
| 2020-05-30T17:54:04
|
R
|
UTF-8
|
R
| false
| true
| 333
|
rd
|
ksarakil.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{ksarakil}
\alias{ksarakil}
\title{Lithic tools from Ksar-Akil excavations}
\format{The 10x5 contingency table.}
\source{
Alesi
}
\usage{
ksarakil
}
\description{
Lithic tools from Ksar-Akil excavations
}
\keyword{datasets}
|
6e584082a798b1469bbb17d64158b5c5179207f7
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610047197-test.R
|
e704f50773376ea19135661837d1e6c3ab12ba45
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 191
|
r
|
1610047197-test.R
|
testlist <- list(hi = 2.56734752865526e-289, lo = 2.56734752865526e-289, mu = 2.56734752865526e-289, sig = 2.56734752865526e-289)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result)
|
a614e4e68c905f9560ae131eb6fb80e08a19f299
|
ece4f2684b23d8058a0bc5aadc9a20e8fff11d36
|
/Source Files/7_Comparison Operators.R
|
f2fb097cdd191ad5d07eeac6d6e5326a83ffb77e
|
[] |
no_license
|
tanussingh/Machine-Learning
|
0e6b961408beb96caebdd73f32f2bdd410bc2977
|
36c5e08c3df7cf77ca81443aa20308c4c0072677
|
refs/heads/master
| 2020-04-24T05:03:29.453955
| 2019-10-09T17:14:15
| 2019-10-09T17:14:15
| 171,724,183
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 591
|
r
|
7_Comparison Operators.R
|
# Comparison Operators
# In R we can use comparison operators to compare variables
# and return logical values.
5 > 6
6 > 5
v1 <- c(1,2,3)
v2 <- c(10,20,30)
v1 < v2
6 >= 6
6 >= 5
6 >= 7
3 < 2
2 <= 2
# Be very careful with comparison operators and negative numbers! Use spacing to keep things clear.
# An example of a dangerous situation:
var <- 1
var
var < -2
var <- 2
var
5 != 2
5 != 5
5 == 5
2 == 3
# Vector Comparisons
# We can apply a comparison of a single number to an entire vector
v <- c(1,2,3,4,5)
v < 2
v == 3
#Ref: www.pieriandata.com
|
bb050c2b841e5d47891b08799c71b2dc3261c654
|
f26781b86f2dea0394809d1951bad4550d82ba3c
|
/script/build_adpx.R
|
333e1b66cb92d2e680fad60be334a7cbebfdd0e8
|
[] |
no_license
|
fyang72/handbook
|
0ac0d616f033747347bce3fe72219223a2553ab8
|
89abb7b557b83d9b651821780b92410623aaa9a2
|
refs/heads/master
| 2022-09-30T10:36:14.303860
| 2019-12-16T19:32:13
| 2019-12-16T19:32:13
| 171,066,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,153
|
r
|
build_adpx.R
|
########################################################################
# pkmerge from adsl, adex, adpc, and others
########################################################################
build_adpx <-function(adsl=NULL, adex=NULL, adpc=NULL, other=NULL) {
validate(need(adsl, message="no adsl in build_adpx"),
need(adex, message="no adsl in build_adex"),
need(adpc, message="no adsl in build_adpc")
)
# toupper
colnames(adsl) <- toupper(colnames(adsl))
colnames(adex) <- toupper(colnames(adex))
colnames(adpc) <- toupper(colnames(adpc))
#
adsl <- adsl %>% select(-ends_with("_ORG"))
adex <- adex %>% select(-ends_with("_ORG"))
adpc <- adpc %>% select(-ends_with("_ORG"))
# adsl
adsl = adsl %>% ungroup() %>% fillUpCol_df(adsl_var_lst)
# adex
col_lst <- c(nmdat_var_lst,
colnames(adex),
colnames(adpc),
colnames(other)
) %>% unique()
# expand to col_lst
adex = adex %>% ungroup() %>% fillUpCol_df(col_lst)
adpc = adpc %>% ungroup() %>% fillUpCol_df(col_lst)
# adpc = adpc + other (convert to characters)
if (!is.null(other)) {
colnames(other) <- toupper(colnames(other))
other <- other %>% select(-ends_with("_ORG"))
other = other %>% ungroup() %>% fillUpCol_df(col_lst)
#w <- which(sapply(other, function(x) tail(class(x),1)) %in% c('factor', 'POSIXt', 'POSIXlt'))
other <- lapply(other, function(x) as.character(x) )
adpc <- lapply(adpc, function(x) as.character(x) )
adpc = bind_rows(adpc[, col_lst], other[, col_lst])
}
# adpx = adex + adpc (convert to characters)
adex <- lapply(adex, function(x) as.character(x) ) %>%
convert_vars_type(nmdat_data_type)
adpc <- lapply(adpc, function(x) as.character(x) ) %>%
convert_vars_type(nmdat_data_type)
adpx = bind_rows(adex[, col_lst], adpc[, col_lst])
#--------------------------------------------------------------
# calculate TIME for adpc
#--------------------------------------------------------------
# adex %>% .[[c("USUBJID")]]
# col_lst = c("USUBJID", "TRTSDTM")
# adpc = adpc %>% select(-TRTSDTM) %>%
# left_join(adex %>%
# select(one_of(col_lst)) %>%
# distinct(USUBJID, .keep_all=TRUE),
# by="USUBJID")
#
# adpc = adpc %>% mutate(
# TIME = difftime(parse_date_time(SAMDTTM, orders="Ymd HMS", truncated = 3),
# parse_date_time(TRTSDTM, orders="Ymd HMS", truncated = 3),
# units = "days"
# ) %>% as_numeric()
# )
#--------------------------------------------------------------
# bring WGTBL to adex, for calculate EXTDOSE (AMT)
#--------------------------------------------------------------
# only need USUBJID and other variables that adex do not have, from adsl
# col_lst = c("USUBJID", "WGTBL")
# adex = adex %>% select(-WGTBL) %>%
# left_join(adsl %>%
# distinct(USUBJID, .keep_all=TRUE) %>%
# select(one_of(col_lst)),
# by="USUBJID")
#
# adex = adex %>% mutate(
# EXTDOSE = ifelse(EXDOSU=="mg/kg", as_numeric(EXDOSE)*as_numeric(WGTBL),
# ifelse(EXDOSU=="mg", as_numeric(EXDOSE), NA)))
#
#------------------------
# TIME
#------------------------
# https://www.rdocumentation.org/packages/lubridate/versions/1.7.4/topics/parse_date_time
# "ymd" "09-01-03 12:02" "09-01-01", "090102", "09-01 03"
# "ymd HM" "09-01-03 12:02"
# x <- c("2011-12-31 12:59:59", "2010-01-01 12:11", "2010-01-01 12", "2010-01-01")
# parse_date_time(x, "Ymd HMS", truncated = 3)
#UTC is n ot a time zone, but a time standard that is the basis for civil time and time zones worldwide. This means
# that no country or territory officially uses UTC as a local time.
# SAMDTTM: start Date/time of sampling TRTSTDTM TRTSDTM
#adex$SAMDTTM = as.POSIXct(adex$EXSTDT*60*60*24 + adex$EXSTTM, origin="1960-01-01", tz="GMT")
#adex$SAMDTTM = as.POSIXct(adex$TRTSDTM, origin="1960-01-01", tz="GMT")
#adex$SAMDTTM = as.POSIXlt(paste(DATE, "T", TIME1, sep=""), format = "%Y-%m-%dT%H:%M", tz="GMT")
#strptime("Tue, 23 Mar 2010 14:36:38 -0400", "%a, %d %b %Y %H:%M:%S %z")
# https://www.rdocumentation.org/packages/lubridate/versions/1.7.4/topics/parse_date_time
# "ymd" "09-01-03 12:02" "09-01-01", "090102", "09-01 03"
# "ymd HM" "09-01-03 12:02"
# x <- c("2011-12-31 12:59:59", "2010-01-01 12:11", "2010-01-01 12", "2010-01-01")
# parse_date_time(x, "Ymd HMS", truncated = 3)
adpx = adpx %>% mutate(
SAMDTTM = standardise_SAMDTTM(SAMDTTM),
EXENDTC = standardise_SAMDTTM(EXENDTC),
EXSTDTC = standardise_SAMDTTM(EXSTDTC)
) %>%
group_by(USUBJID) %>% mutate(TRTSDTM = min(EXSTDTC, na.rm=TRUE)) %>%
ungroup()
# SAMDTTM
ids <- which(is.na(as_numeric(adpx$TIME)))
adpx$TIME[ids] = difftime(
adpx$SAMDTTM[ids], adpx$TRTSDTM[ids], units = "days") %>% as_numeric()
# EXSTDTC
ids <- which(is.na(as_numeric(adpx$TIME)))
adpx$TIME[ids] = difftime(
adpx$EXSTDTC[ids], adpx$TRTSDTM[ids], units = "days") %>% as_numeric()
# EXDUR
adpx = adpx %>% mutate(
EXDUR = difftime(EXENDTC, EXSTDTC, units = "days") %>% as_numeric()
)
# character
adpx = adpx %>%
mutate(SAMDTTM = as.character(SAMDTTM),
TRTSDTM = as.character(TRTSDTM),
EXSTDTC = as.character(EXSTDTC),
EXENDTC = as.character(EXENDTC)
)
#------------------
# merge with adsl
#------------------
col_lst = unique(c(setdiff(adsl_var_lst, NULL), # "WGTBL"
setdiff(colnames(adsl), colnames(adpx))))
# keep "STUDYID", "USUBJID"in adpx, add everything else
adpx = adpx %>%
select(-one_of(setdiff(adsl_var_lst, c("STUDYID", "USUBJID") ))) %>%
left_join(adsl%>%distinct(USUBJID,.keep_all=TRUE) %>%
select(one_of(col_lst)),
by=c("STUDYID", "USUBJID")
)
#----------------------
# dosing events
#----------------------
adpx = adpx %>% mutate(
EXTDOSE = ifelse(EXDOSU=="mg/kg", as_numeric(EXDOSE)*as_numeric(WGTBL),
ifelse(EXDOSU=="mg", as_numeric(EXDOSE), NA)))
#---------------------------------------------
# order columns, and final output
#---------------------------------------------
adpx = adpx[, c(nmdat_var_lst, setdiff(colnames(adpx), nmdat_var_lst))]
adpx <- adpx %>% convert_vars_type(nmdat_data_type)
adpx <- adpx %>% dplyr::arrange(STUDYID, USUBJID, TIME, TESTN)
adpx <- adpx %>% ungroup()
return(adpx)
}
#################################################################
# final output
#################################################################
if (ihandbook) {
data = NULL
table = NULL
adpx <- build_adpx(dataset$adsl, dataset$adex, dataset$adpc, other=NULL)
data[["adpx"]] = adpx
#table <- check_adpx(dataset, adsl, topN=topN) # dataset: original one, # adsl: parsed one
output <- list(data=data)
}
|
dac3a61b1c3333660f5beb7feda6b9a986cfbf0d
|
cc9bc0cb7986d673ecf31cd98529fd927aac9b71
|
/man/hexbinpie.Rd
|
05904052fd0036609aa428754b6f4669ecdb6fc8
|
[] |
no_license
|
cran/somplot
|
712f08f9b9e36883b06338aa73f2610117ad2a5c
|
c84892ebdc715b87b0b18b0a49e3578ba4c637cf
|
refs/heads/master
| 2021-01-01T16:35:16.483867
| 2013-07-21T00:00:00
| 2013-07-21T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,850
|
rd
|
hexbinpie.Rd
|
\name{hexbinpie}
\alias{hexbinpie}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Function, used by som.plot to create plots of Kohonen maps
}
\description{
The function is used by \code{som.plot}. It is not necessary to call \code{hexbinpie} directly.
The function draws pie charts in a hexagonal grid.
}
\usage{
hexbinpie(x, y, kat, xbnds = range(x), ybnds = range(y),
hbc = NA, pal = NA, hex = "gray", circ = "gray50",
cnt = "black", show.counter.border, ...)
}
\arguments{
\item{x}{
vector of X coordinates
}
\item{y}{
vector of Y coordinates
}
\item{kat}{
vector of categories for each data point
}
\item{xbnds}{
limits in X direction
}
\item{ybnds}{
limits in Y direction
}
\item{hbc}{
data frame holding the neurons
}
\item{pal}{
colours to be used to plot the classes of input data
}
\item{hex}{
colour for hexagons
}
\item{circ}{
colour for circles
}
\item{cnt}{
colour for labels in the pies
}
\item{show.counter.border}{
percentile as limit for the display of labels in the pie charts.
}
\item{\dots}{
more arguments for customising the plot
}
}
%%\details{
%% ~~ If necessary, more details than the description above ~~
%%}
\value{
The function returns no value
}
\references{
see function som.plot()
}
\author{
Benjamin Schulz and Andreas Dominik
}
%%\note{
%% ~~further notes~~
%%}
\section{Warning }{
The function is called by som.plot() and not intented to be used directly.
}
%%\seealso{
%%
%%}
\examples{
\dontrun{
hexbinpie(data$x, data$y, kat=data$kat, hbc = hbc, pal = pal, ...)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ High-Level Plots }
\keyword{ Graphics }
|
bffdc8355b8504324a8e9a69f52325810c2cb21d
|
85ef29e3109ac537b0a61a3363dbfe5a22f8ef55
|
/code/Basics.r
|
cdd69967a2664380f9b8cd517b00d58d2c6dc4b9
|
[] |
no_license
|
Redwa/LiveBeginnerJune18
|
e2941bd1a615d14ce97194700aa2741e6ace784f
|
c2debe621a00d68bac78f80a0d81cbae11f8b20d
|
refs/heads/master
| 2020-03-23T15:39:55.869897
| 2018-06-19T16:54:56
| 2018-06-19T16:54:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 72
|
r
|
Basics.r
|
1 - 1
2*4
x <- 2
x
x <- 3
x
x <- c(1, 2, 3)
x
x + 2
x
x*2
3 -> y
y
|
61a63add4861ca1a6f9ac19c49842b27af0e209b
|
83ff66e08bde51d1b31b12ee7b5452fb2fed2432
|
/write-data2.R
|
b916f13905f561c7b478c0b08deab54ea6b4c06f
|
[] |
no_license
|
favrin/CUP
|
1bf23a7b45c04734ec3386434f1968d16a70304c
|
6a9fb89715d7b8e961c68878f60e2706f0b1e84f
|
refs/heads/master
| 2021-01-20T19:59:50.053291
| 2016-08-24T14:08:04
| 2016-08-24T14:08:04
| 61,106,221
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,843
|
r
|
write-data2.R
|
setwd("~/Documents/Dropbox/NEO4J-TEST/CUP-try")
library(gdata)
library(boot)
library(xlsx)
#LINES = 629697
LINES = 300
#data1 <- read.delim("author-journal.csv",header=F,stringsAsFactors=F,,sep=",")
#data2 <- read.csv("article_date2.csv",header=T,stringsAsFactors=F)
data <- read.xlsx("authordata.xlsx",header=T,sheetName=1,colIndex=pippo)
#colnames(data2)[colnames(data2)=="year"]<- "aaa"
#seven <- rep(0,LINES)
#three <-data2[1,]
one1<-rep(0,LINES)
one3<-rep(0,LINES)
one5<-rep(0,LINES)
one7<-rep(0,LINES)
one8<-rep(0,LINES)
one13<-rep(0,LINES)
one15<-rep(0,LINES)
one16<-rep(0,LINES)
one20<-rep(0,LINES)
two1<-rep(0,LINES)
two3<-rep(0,LINES)
two5<-rep(0,LINES)
two7<-rep(0,LINES)
two8<-rep(0,LINES)
two13<-rep(0,LINES)
two15<-rep(0,LINES)
two16<-rep(0,LINES)
two20<-rep(0,LINES)
onenum<-rep(0,LINES)
#for (x in 1:27372)
for (x in 1:LINES)
{
one1[x]<-gsub("\\(","",data[x,1])
one3[x]<-gsub("\\(","",data[x,3])
one5[x]<-gsub("\\(","",data[x,5])
one7[x]<-gsub("\\(","",data[x,7])
one8[x]<-gsub("\\(","",data[x,8])
one13[x]<-gsub("\\(","",data[x,13])
one15[x]<-gsub("\\(","",data[x,15])
one16[x]<-gsub("\\(","",data[x,16])
one20[x]<-gsub("\\(","",data[x,20])
two1[x]<-gsub("\\)","",one1[x])
two3[x]<-gsub("\\)","",one3[x])
two5[x]<-gsub("\\)","",one5[x])
two7[x]<-gsub("\\)","",one7[x])
two8[x]<-gsub("\\)","",one8[x])
two13[x]<-gsub("\\)","",one13[x])
two15[x]<-gsub("\\)","",one15[x])
two16[x]<-gsub("\\)","",one16[x])
two20[x]<-gsub("\\)","",one20[x])
#onenum[x]<- lapply(two1[x],as.numeric)
two7[x] = paste(two7[x],two8[x],sep=" ")
}
three <- cbind(two1,two3,two5,two7,two8,two13,two15,two16,two20)
four <- t(three)
#write(four, file="author_journalV4.txt",ncolumns =9, append=FALSE, sep="|")
write(two1, file="articleidV5.txt",ncolumns =1, append=FALSE)
|
c10d75715236c8128f9dcd5bd85946d88bc840c4
|
e357ee46a147c06a694bcfbb6d9970cc7caefcbb
|
/cachematrix.R
|
05d53164e7a4fed7a312570a3e67c93e9f2eb1c2
|
[] |
no_license
|
StreetsefeR/ProgrammingAssignment2
|
8b1eaab478d84725aa8b8e8941e2b800c06fab91
|
a340a21bf04ae63d522c1e4f1e767feca741cf00
|
refs/heads/master
| 2021-01-20T21:19:55.140638
| 2015-02-21T16:45:34
| 2015-02-21T16:45:34
| 31,128,670
| 0
| 0
| null | 2015-02-21T14:53:44
| 2015-02-21T14:53:44
| null |
UTF-8
|
R
| false
| false
| 1,527
|
r
|
cachematrix.R
|
## functions cache the inverse of a matrix
## limitation: the matrix supplied must be invertible - error handler is not included
## creation of special "matrix" object which is designed to store the matrix and related inversed maxrix
makeCacheMatrix<- function(x = matrix()) {
s <- NULL # assigning local m standing for inversed matrix to NULL
set <- function(y) { # specifying set subfunction
x <<- y # assigning to x in containing (global in this case) envinronment given value
s <<- NULL # freeing m globally
}
get <- function() x # subfunction returning initial matrix
setsolve <- function(solve) s <<- solve # subfunction "memorazing" or caching invesed matrix
getsolve <- function() s # subfunction for extracting saved inversed matrix
list(set = set, get = get, # setting list of subfunctions and their names
setsolve = setsolve,
getsolve = getsolve)
}
## finding of inversed matrix (at first in cache, if not - calculated)
cacheSolve <- function(x, ...) { # computation of the inverse of the special "matrix"
s <- x$getsolve() # retrieving saved inversed matrix
if(!is.null(s)) { # if the inversed value was previously saved
message("retrieving cache") # indicates the usage of cache, may be omitted
return(s)
}
s <- solve(x$get()) # to evade using intermediary variables saved like matr in equal construct:
# matr <- x$get()
# s <- solve(matr)
x$setsolve(s) # caching calculated inversed matrix
s # returning inverted matrix as a rsult of function
}
|
974a246563809c6bf0828f48e0d4cec738b746e3
|
990522e11be5e495a43b0d118ddf31f873bc393a
|
/NYTD_YAR_HOMELESSNESS/imputation.r
|
d36d408625132467b70496ede4518d93c9504d73
|
[] |
no_license
|
cssat-graveyard/nytd
|
32327e8d551c1bc3c19b5676d3102573203b873f
|
4874f8d6e889fe44b70e958d8268bac7db661efa
|
refs/heads/master
| 2021-05-29T06:25:28.275078
| 2015-05-18T21:19:30
| 2015-05-18T21:19:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,161
|
r
|
imputation.r
|
a.test <- sqlQuery(con, "SELECT TOP 1000
datayear
,recnumbr
,st
,[latremlos]
,[settinglos]
,[previouslos]
,[lifelos]
FROM [public_data].[afcars_foster_care_00_12]
WHERE datayear > 2003
ORDER BY datayear")
test <- sqlQuery(con, "SELECT TOP 1000
datayear
,recnumbr
,st
,[latremlos]
,[settinglos]
,IIF([previouslos] IS NULL, lifelos - latremlos, previouslos) AS previouslos
,[lifelos]
FROM [public_data].[afcars_foster_care_00_12]
WHERE datayear > 2003
ORDER BY datayear")
a.test1 <- a.test %>% select(datayear, recnumbr, st, latremlos, previouslos, lifelos)
test.am <- amelia(a.test1, idvars = c('datayear', 'recnumbr', 'st'), m = 10, ord = c('latremlos', 'previouslos', 'lifelos'))
head(a.test, 30)
?amelia
head(test.am$imputation[[1]])
a.test <- sqlQuery(con, "SELECT
npd.*
--,nytd1.[repdate] AS nytd1_repdate
,nytd1.[currfte] AS nytd1_currfte
,nytd1.[currpte] AS nytd1_currpte
,nytd1.[emplysklls] AS nytd1_emplysklls
,nytd1.[socsecrty] AS nytd1_socsecrty
,nytd1.[educaid] AS nytd1_educaid
,nytd1.[pubfinas] AS nytd1_pubfinas
,nytd1.[pubfoodas] AS nytd1_pubfoodas
,nytd1.[pubhousas] AS nytd1_pubhousas
,nytd1.[othrfinas] AS nytd1_othrfinas
,nytd1.[highedcert] AS nytd1_highedcert
,nytd1.[currenroll] AS nytd1_currenroll
,nytd1.[cnctadult] AS nytd1_cnctadult
,nytd1.[homeless] AS nytd1_homeless
,nytd1.[subabuse] AS nytd1_subabuse
,nytd1.[incarc] AS nytd1_incarc
,nytd1.[children] AS nytd1_children
,nytd1.[marriage] AS nytd1_marriage
,nytd1.[medicaid] AS nytd1_medicaid
,nytd1.[othrhlthin] AS nytd1_othrhlthin
,nytd1.[medicalin] AS nytd1_medicalin
,nytd1.[mentlhlthin] AS nytd1_mentlhlthin
,nytd1.[prescripin] AS nytd1_prescripin
--,nytd2.[repdate] AS nytd2_repdate
,nytd2.[outcmfcs] AS nytd2_outcmfcs
,nytd2.[currfte] AS nytd2_currfte
,nytd2.[currpte] AS nytd2_currpte
,nytd2.[emplysklls] AS nytd2_emplysklls
,nytd2.[socsecrty] AS nytd2_socsecrty
,nytd2.[educaid] AS nytd2_educaid
,nytd2.[pubfinas] AS nytd2_pubfinas
,nytd2.[pubfoodas] AS nytd2_pubfoodas
,nytd2.[pubhousas] AS nytd2_pubhousas
,nytd2.[othrfinas] AS nytd2_othrfinas
,nytd2.[highedcert] AS nytd2_highedcert
,nytd2.[currenroll] AS nytd2_currenroll
,nytd2.[cnctadult] AS nytd2_cnctadult
,nytd2.[homeless] AS nytd2_homeless
,nytd2.[subabuse] AS nytd2_subabuse
,nytd2.[incarc] AS nytd2_incarc
,nytd2.[children] AS nytd2_children
,nytd2.[marriage] AS nytd2_marriage
,nytd2.[medicaid] AS nytd2_medicaid
,nytd2.[othrhlthin] AS nytd2_othrhlthin
,nytd2.[medicalin] AS nytd2_medicalin
,nytd2.[mentlhlthin] AS nytd2_mentlhlthin
,nytd2.[prescripin] AS nytd2_prescripin
--,s1.[fcstatsv] AS s1_fcstatsv
--,s1.[lclfipssv] AS s1_lclfipssv
--,s1.[tribesv] AS s1_tribesv
--,s1.[delinqntsv] AS s1_delinqntsv
--,s1.[edlevlsv] AS s1_edlevlsv
--,s1.[specedsv] AS s1_specedsv
--,s1.[ilnasv] AS s1_ilnasv
--,s1.[psedsuppsv] AS s1_psedsuppsv
--,s1.[careersv] AS s1_careersv
--,s1.[emplytrsv] AS s1_emplytrsv
--,s1.[budgetsv] AS s1_budgetsv
--,s1.[housedsv] AS s1_housedsv
--,s1.[hlthedsv] AS s1_hlthedsv
--,s1.[famsuppsv] AS s1_famsuppsv
--,s1.[mentorsv] AS s1_mentorsv
--,s1.[silsv] AS s1_silsv
--,s1.[rmbrdfasv] AS s1_rmbrdfasv
--,s1.[educfinasv] AS s1_educfinasv
--,s1.[othrfinasv] AS s1_othrfinasv
--,s2.[fcstatsv] AS s2_fcstatsv
--,s2.[lclfipssv] AS s2_lclfipssv
--,s2.[tribesv] AS s2_tribesv
--,s2.[delinqntsv] AS s2_delinqntsv
--,s2.[edlevlsv] AS s2_edlevlsv
--,s2.[specedsv] AS s2_specedsv
--,s2.[ilnasv] AS s2_ilnasv
--,s2.[psedsuppsv] AS s2_psedsuppsv
--,s2.[careersv] AS s2_careersv
--,s2.[emplytrsv] AS s2_emplytrsv
--,s2.[budgetsv] AS s2_budgetsv
--,s2.[housedsv] AS s2_housedsv
--,s2.[hlthedsv] AS s2_hlthedsv
--,s2.[famsuppsv] AS s2_famsuppsv
--,s2.[mentorsv] AS s2_mentorsv
--,s2.[silsv] AS s2_silsv
--,s2.[rmbrdfasv] AS s2_rmbrdfasv
--,s2.[educfinasv] AS s2_educfinasv
--,s3.[fcstatsv] AS s3_fcstatsv
--,s3.[lclfipssv] AS s3_lclfipssv
--,s3.[tribesv] AS s3_tribesv
--,s3.[delinqntsv] AS s3_delinqntsv
--,s3.[edlevlsv] AS s3_edlevlsv
--,s3.[specedsv] AS s3_specedsv
--,s3.[ilnasv] AS s3_ilnasv
--,s3.[psedsuppsv] AS s3_psedsuppsv
--,s3.[careersv] AS s3_careersv
--,s3.[emplytrsv] AS s3_emplytrsv
--,s3.[budgetsv] AS s3_budgetsv
--,s3.[housedsv] AS s3_housedsv
--,s3.[hlthedsv] AS s3_hlthedsv
--,s3.[famsuppsv] AS s3_famsuppsv
--,s3.[mentorsv] AS s3_mentorsv
--,s3.[silsv] AS s3_silsv
--,s3.[rmbrdfasv] AS s3_rmbrdfasv
--,s3.[educfinasv] AS s3_educfinasv
--,s3.[othrfinasv] AS s3_othrfinasv
,IIF(fc.[clindis] = 'Yes', 1, IIF(fc.[clindis] = 'No', 0, IIF(fc.[clindis] = 'Not yet determined', 2, NULL))) AS fc_clindis
,IIF(fc.[mr] = 'Yes', '1', IIF(fc.[mr] = 'No', '0', NULL)) AS fc_mr
,IIF(fc.[vishear] = 'Yes', 1, IIF(fc.[vishear]= 'No', 0, NULL)) AS fc_vishear
,IIF(fc.[phydis] = 'Yes', 1, IIF(fc.[phydis] = 'No', 0, NULL)) AS fc_phydis
,IIF(fc.[dsmiii] = 'Yes', 1, IIF(fc.[dsmiii] = 'No', 0, NULL)) AS fc_dsmiii
,IIF(fc.[othermed] = 'Yes', 1, IIF(fc.[othermed] = 'No', 0, NULL)) AS fc_othermed
,IIF(fc.[everadpt] IN ('Yes, child has been legally adopted', 'Yes'), 1, IIF(fc.[everadpt] IN ('No, has never been legally adopted', 'No'), 0, NULL)) AS fc_everadpt
,fc.[ageadopt]
,fc.[totalrem]
,fc.[numplep]
,fc.[manrem]
,IIF(fc.[phyabuse] = 'Yes', 1, IIF(fc.[phyabuse] = 'No', 0, NULL)) AS fc_phyabuse
,IIF(fc.[sexabuse] = 'Yes', 1, IIF(fc.[sexabuse] = 'No', 0, NULL)) AS fc_sexabuse
,IIF(fc.[neglect] = 'Yes', 1, IIF(fc.[neglect] = 'No', 0, NULL)) AS fc_neglect
,IIF(fc.[aaparent] = 'Yes', 1, IIF(fc.[aaparent] = 'No', 0, NULL)) AS fc_aaparent
,IIF(fc.[daparent] = 'Yes', 1, IIF(fc.[daparent] = 'No', 0, NULL)) AS fc_daparent
,IIF(fc.[aachild] = 'Yes', 1, IIF(fc.[aachild] = 'No', 0, NULL)) AS fc_aachild
,IIF(fc.[dachild] = 'Yes', 1, IIF(fc.[dachild] = 'No', 0, NULL)) AS fc_dachild
,IIF(fc.[childis] = 'Yes', 1, IIF(fc.[childis] = 'No', 0, NULL)) AS fc_childis
,IIF(fc.[chbehprb] = 'Yes', 1, IIF(fc.[chbehprb] = 'No', 0, NULL)) AS fc_chbehprb
,IIF(fc.[prtsdied] = 'Yes', 1, IIF(fc.[prtsdied] = 'No', 0, NULL)) AS fc_prtsdied
,IIF(fc.[prtsjail] = 'Yes', 1, IIF(fc.[prtsjail] = 'No', 0, NULL)) AS fc_prtsjail
,IIF(fc.[nocope] = 'Yes', 1, IIF(fc.[nocope] = 'No', 0, NULL)) AS fc_nocope
,IIF(fc.[abandmnt] = 'Yes', 1, IIF(fc.[abandmnt] = 'No', 0, NULL)) AS fc_abandmnt
,IIF(fc.[relinqsh] = 'Yes', 1, IIF(fc.[relinqsh] = 'No', 0, NULL)) AS fc_relinqsh
,IIF(fc.[housing] = 'Yes', 1, IIF(fc.[housing] = 'No', 0, NULL)) AS fc_housing
--,fc.[curplset] -- no longer current setting not sure it adds much
,fc.[placeout]
--,fc.[casegoal] -- not sure that this matters
,fc.[ctkfamst]
--,fc.[ctk1yr] -- not sure how important this is
--,fc.[ctk2yr] -- removed becuse too many nulls
,fc.[fosfamst]
--,fc.[disreasn] -- lots of nulls
,IIF(fc.[ivefc] = 'Yes', 1, IIF(fc.[ivefc] = 'No', 0, NULL)) AS fc_ivefc
,IIF(fc.[iveaa] = 'Yes', 1, IIF(fc.[iveaa] = 'No', 0, NULL)) AS fc_iveaa
,IIF(fc.[ivaafdc] = 'Yes', 1, IIF(fc.[ivaafdc] = 'No', 0, NULL)) AS fc_ivaafdc
,IIF(fc.[ivdchsup] = 'Yes', 1, IIF(fc.[ivdchsup] = 'No', 0, NULL)) AS fc_ivdchsup
,IIF(fc.[xixmedcd] = 'Yes', 1, IIF(fc.[xixmedcd] = 'No', 0, NULL)) AS fc_xixmedcd
,IIF(fc.[ssiother] = 'Yes', 1, IIF(fc.[ssiother] = 'No', 0, NULL)) AS fc_ssiother
,IIF(fc.[noa] = 'Yes', 1, IIF(fc.[noa] = 'No', 0, NULL)) AS fc_noa
,fc.[fcmntpay]
,IIF(fc.[inatstart] = 'Yes', 1, IIF(fc.[inatstart] = 'No', 0, NULL)) AS fc_inatstart
,IIF(fc.[inatend] = 'Yes', 1, IIF(fc.[inatend] = 'No', 0, NULL)) AS fc_inatend
,IIF(fc.[entered] = 'Yes', 1, IIF(fc.[entered] = 'No', 0, NULL)) AS fc_entered
,IIF(fc.[exited] = 'Yes', 1, IIF(fc.[exited] = 'No', 0, NULL)) AS fc_exited
--,IIF(fc.[served] = 'Yes', 1, IIF(fc.[served] = 'No', 0, NULL)) AS fc_served
,IIF(fc.[iswaiting] = 'Yes', 1, IIF(fc.[iswaiting] = 'No', 0, NULL)) AS fc_iswaiting
,IIF(fc.[istpr] = 'Yes', 1, IIF(fc.[istpr] = 'No', 0, NULL)) AS fc_istpr
,fc.[latremlos]
,fc.[settinglos]
--,fc.[previouslos]
,IIF(ageatend NOT IN (17, 18), NULL, fc.[lifelos]) AS fc_lifelos
,fc.datayear
,IIF(ageatend NOT IN (17, 18), NULL, fc.[ageatstart]) AS fc_ageatstart
,IIF(ageatend NOT IN (17, 18), NULL, fc.[ageatlatrem]) AS fc_ageatlatrem
,IIF(ageatend NOT IN (17, 18), NULL, fc.[ageatend]) AS fc_ageatend
,IIF(fc.[agedout] = 'Yes', 1, IIF(fc.[agedout] = 'No', 0, NULL)) AS fc_agedout
FROM [dbCoreAdministrativeTables].[public_data].[NYTD_Outcomes_people_dim] AS npd
JOIN [public_data].[NYTD_Outcomes_Waves_1_2] AS nytd1
ON npd.stchid = nytd1.stchid
AND nytd1.cd_wave = 1
JOIN [public_data].[NYTD_Outcomes_Waves_1_2] AS nytd2
ON npd.stchid = nytd2.stchid
AND nytd2.cd_wave = 2
LEFT JOIN [public_data].[NYTD_Services_2011_2012_2013_truncated] AS s1
ON npd.stchid = s1.stchid
AND npd.sex = s1.sex
AND npd.dobyr = YEAR(CONVERT(date, s1.dob))
AND npd.dobmon = MONTH(CONVERT(date, s1.dob))
AND s1.datayear = 2011
LEFT JOIN [public_data].[NYTD_Services_2011_2012_2013_truncated] AS s2
ON npd.stchid = s2.stchid
AND npd.sex = s2.sex
AND npd.dobyr = YEAR(CONVERT(date, s2.dob))
AND npd.dobmon = MONTH(CONVERT(date, s2.dob))
AND s2.datayear = 2012
LEFT JOIN [public_data].[NYTD_Services_2011_2012_2013_truncated] AS s3
ON npd.stchid = s3.stchid
AND npd.sex = s3.sex
AND npd.dobyr = YEAR(CONVERT(date, s3.dob))
AND npd.dobmon = MONTH(CONVERT(date, s3.dob))
AND s3.datayear = 2013
LEFT JOIN
(SELECT
RANK() OVER(PARTITION BY fc.recnumbr, fc.st, npd.stchid ORDER BY fc.datayear DESC) AS r_order
,fc.recnumbr AS recnumbr_fc
,fc.st
,npd.stchid
,fc.datayear
FROM [dbCoreAdministrativeTables].[public_data].[NYTD_Outcomes_people_dim] AS npd
INNER JOIN [public_data].[afcars_foster_care_00_12] AS fc
ON npd.recnumbr = fc.RecNumbr
AND npd.st = fc.St) AS fcid
ON npd.stchid = fcid.stchid
AND r_order = 1
LEFT JOIN [public_data].[afcars_foster_care_00_12] AS fc
ON npd.recnumbr = fc.recnumbr
AND npd.st = fc.st
AND fcid.datayear = fc.datayear
ORDER BY
npd.stchid")
vars <- as.data.frame(NULL)
for(i in 1:ncol(a.test)) {
missingness <- round(length(na.omit(a.test[[i]])) / dim(a.test)[[1]], 4)
var_name <- paste(names(a.test)[[i]])
if(missingness < .55) {
vars1 <- cbind(var_name, missingness)
vars <- rbind(vars, vars1)
}
}
vars
# a.test <- a.test[! names(a.test) %in% c(paste(as.character((vars[[1]]))))]
test.am <- amelia(a.test, m = 2, idvars = c('stchid', 'recnumbr', 'dob', 'dobyr', 'dobmon', 'placeout'), noms = c('sex', 'everadpt', 'manrem', 'curplset', 'casegoal', 'ctkfamst', 'fosfamst', 'disreasn'), ord = c('st', 'ageadopt', 's1_edlevlsv', 's2_edlevlsv', 's3_edlevlsv'))
test.am <- amelia(a.test1, idvars = c('stchid', 'recnumbr', 'dob', 'dobyr', 'dobmon', 'placeout'), m = 2, noms = c('sex', 'manrem', 'ctkfamst', 'fosfamst'), ord = c('st', 'ageadopt'))
test.am <- amelia(a.test1, idvars = c('stchid', 'recnumbr', 'dob', 'dobyr', 'dobmon'), m = 2, noms = 'sex', ord = 'st')
names(a.test)
with(a.test, plot(fc_vishear, fc_dsmiii))
with(a.test, plot(fc_vishear, fc_clindis))
"fc_clindis" "fc_mr" "fc_vishear"
"fc_phydis" "fc_dsmiii"
for (i in 1:ncol(a.test)) {
}
dim(na.omit(a.test1))
dim(a.test)
missmap(a.test)
names(a.test)
a.test1 <- select(a.test, st, stchid, recnumbr, dob, dobyr, dobmon, sex, amiakn, asian, blkafram, hawaiipi, white, raceunkn, racedcln, hisorgin, nytd1_currfte, nytd1_currpte, nytd1_emplysklls, nytd1_socsecrty, nytd1_educaid, nytd1_pubfinas, nytd1_pubfoodas, nytd1_pubhousas, nytd1_othrfinas, nytd1_highedcert, nytd1_currenroll, nytd1_cnctadult, nytd1_homeless, nytd1_subabuse, nytd1_incarc, nytd1_children, nytd1_marriage, nytd1_medicaid, nytd1_othrhlthin, nytd1_medicalin, nytd1_mentlhlthin, nytd1_prescripin)
"NA" %in% names(a.test)
is.na(any(a.test$nytd2_homeless))
a.test$NA
names(a.test)
head(a.test)
dim(a.test)
length(na.omit(a.test)$fc_phydis)
length(na.omit(a.test)$fosfamst)
cor(as.numeric(na.omit(a.test)$ctkfamst), as.numeric(na.omit(a.test)$fosfamst))
as.numeric((na.omit(a.test)$ctkfamst))
?amelia
table(a.test$nytd1_outcmfcs)
head(a.test)
names(a.test)
missmap(a.test)
for (i in 1:ncol(a.test)) {
print(levels(a.test[[i]]))
}
nytd2_currenroll
nytd2_homeless
nytd2_subabuse
s1_housedsv
s1_rmbrdfasv
s1_educfinasv
s2_fcstatsv
s2_lclfipssv
s2_tribesv
s2_delinqntsv
s2_edlevlsv
s2_specedsv
lm(nytds_currenroll ~ nytd2_homeless)
dim(a.test)
paste(as.character((vars[[1]])))
cbind(paste(names(a.test)[[1]]), test)
test <- round(length(na.omit(a.test$fc_hofcctk2)) * 1.0 / dim(a.test)[[1]], 4)
|
8b3d3d4e1458737b7658405d85454c3b5ffe835a
|
08ccf80fc2b45e33c22e196f20f952f022a56c3a
|
/man/filter_by_ss.Rd
|
c5ba33e993ef3a4322b47e076427194d2cc9bb17
|
[] |
no_license
|
brodriguez97/spotiverseR
|
ef76a982f19c0836dfbe26ae86e9b781df1bb03a
|
8adc3fe0510456a0ea29bae65aa497d7944ffb61
|
refs/heads/master
| 2020-04-11T06:16:11.321025
| 2018-12-13T01:42:23
| 2018-12-13T01:42:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 889
|
rd
|
filter_by_ss.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter_by_ss.R
\name{filter_by_ss}
\alias{filter_by_ss}
\title{Filter Data by a Substring}
\usage{
filter_by_ss(data = data, col = playlist_name, substring)
}
\arguments{
\item{data}{a dataframe of a user's playlists}
\item{col}{playlist_name column set as default}
\item{substring}{a character string}
}
\value{
a data frame
}
\description{
\code{filter_by_ss} locates the desired substring in the column playlist_name from the Christmas playlist
dataframe.
}
\details{
This function takes in a character string and subsets all the playlist names in the playlist_name
column that contains that substring.
}
\examples{
data(christmas_playlists)
filter_by_ss(data = christmas_playlists, col=playlist_name, substring="Jazz")
}
\author{
Belen Rodriguez <brodriguez@wesleyan.edu>
Kim Pham <kpham@wesleyan.edu>
}
|
fab18b84db7167705b008d29a2eb8474622a8b82
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/REDCapR/examples/redcap_upload_file_oneshot.Rd.R
|
0275e079f5ebeadf678100d837628c864711d428
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,196
|
r
|
redcap_upload_file_oneshot.Rd.R
|
library(REDCapR)
### Name: redcap_upload_file_oneshot
### Title: Upload a file into to a REDCap project record.
### Aliases: redcap_upload_file_oneshot
### ** Examples
## Not run:
##D #Define some constants
##D uri <- "https://bbmc.ouhsc.edu/redcap/api/"
##D token <- "D70F9ACD1EDD6F151C6EA78683944E98" #For the simple project (pid 213)
##D field <- "mugshot"
##D event <- "" # only for longitudinal events
##D
##D #Upload a single image file.
##D record <- 1
##D file_path <- base::file.path(devtools::inst(name="REDCapR"), paste0("test-data/mugshot-1.jpg"))
##D
##D redcap_upload_file_oneshot(
##D file_name=file_path, record=record, field=field,
##D redcap_uri=redcap_uri, token=token
##D )
##D
##D #Upload a collection of five images.
##D records <- 1:5
##D file_paths <- base::file.path(
##D devtools::inst(name="REDCapR"),
##D paste0("test-data/mugshot-", records, ".jpg")
##D )
##D
##D for( i in seq_along(records) ) {
##D record <- records[i]
##D file_path <- file_paths[i]
##D redcap_upload_file_oneshot(
##D file_name=file_path, record=record, field=field,
##D redcap_uri=redcap_uri, token=token
##D )
##D }
## End(Not run)
|
8a54c79bb4f8e249b3b41ecae6d09764d2927a5b
|
dad1e39f9c51f652001706bc3e115244f5000b88
|
/man/rchild.Rd
|
a500459a8dcf52241595a8311846ffa583ea914f
|
[] |
no_license
|
cran/rvHPDT
|
3a31b892222801aee5ef5a02dd6ec81a9c3ab73e
|
ba4e6b2ac36f2734fc28d1fb8a10606024a24776
|
refs/heads/master
| 2022-06-07T04:31:16.496808
| 2022-05-12T20:30:02
| 2022-05-12T20:30:02
| 17,699,412
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 195
|
rd
|
rchild.Rd
|
\name{rchild}
\alias{rchild}
\title{Internal function.}
\description{
Generate child's genotype by permuting the transmission and non-transmission status based on parents' genotype.
}
|
dd5efe9a3bac2b8dc1cd44c1eabbe5fba865a0fc
|
0b23ba8385531b4efba2aa385b6640fb19d01713
|
/tests/testthat/test-plot_mx_umap.R
|
5c7ac6962c9aa0f96d351822dc96d4e4b898e0ce
|
[
"MIT"
] |
permissive
|
ColemanRHarris/mxnorm
|
1102246d545d4fe6c057adc5e4a576e7312b62b0
|
ac6cd78772da0624fdc119fcfd764aedad96e559
|
refs/heads/main
| 2023-05-11T20:33:10.211335
| 2023-05-01T16:49:11
| 2023-05-01T16:49:11
| 404,084,403
| 3
| 3
|
NOASSERTION
| 2023-05-01T16:33:34
| 2021-09-07T18:32:41
|
R
|
UTF-8
|
R
| false
| false
| 571
|
r
|
test-plot_mx_umap.R
|
test_that("plotting works", {
mx_data = mx_dataset(mxnorm::mx_sample, "slide_id", "image_id",
c("marker1_vals","marker2_vals","marker3_vals"),
c("metadata1_vals"))
mx_data = mx_normalize(mx_data, transform="log10",method="None")
## no umap
expect_error(plot_mx_umap(mx_data))
## not obj
expect_error(plot_mx_umap(rnorm(100)))
mx_data = run_reduce_umap(mx_data, table="normalized",
c("marker1_vals","marker2_vals","marker3_vals"))
expect_equal(class(plot_mx_umap(mx_data)),c("gg","ggplot"))
})
|
b6912ec19065257eeefa1458b9fc220aaf46b483
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/logKDE/examples/bw.logCV.Rd.R
|
30640aa94aa01518589d95968141083a143fa0d5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 196
|
r
|
bw.logCV.Rd.R
|
library(logKDE)
### Name: bw.logCV
### Title: Optimal CV BW estimation for strictly positive distributions.
### Aliases: bw.logCV
### ** Examples
bw.logCV(rchisq(100,10), grid=21, NB=512)
|
44cda1bf26d3019aa91d7fee51056032588a2d5a
|
272fca8ac645b89b3796ce90453fee8ad090a491
|
/script_20220722/MBC_QDM_biasCorrection_20200623.R
|
cacf6bea30e9e24bbf37a2e520a72bbb7836e4f1
|
[] |
no_license
|
caubrywake/FuturePeyto
|
24e7f3b7e216c72e01b6b3489a35af71de184870
|
6bc6d4262d9415d699c70b8abdfa9b1637d35377
|
refs/heads/master
| 2023-04-14T17:21:14.805835
| 2022-07-23T03:19:21
| 2022-07-23T03:19:21
| 259,411,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,150
|
r
|
MBC_QDM_biasCorrection_20200623.R
|
## Bias correcting the WRF dataswet
# Code from logan
# editd July 1- to get a bias corrected obs for the 5 cell covering peyto correpsonding to different elevations
setwd("D:/FuturePeyto/WRFdataset")
## Load packages
library(CRHMr)
#install.packages('MBC')
library(MBC)
## Import data: Obs, WRF current and WRF PGW
Peyto_obs <- readObsFile('D:/FuturePeyto/dataproc/met/A0/crhmOBS_metMOH_ERAp_20200819.obs','etc/GMT+7')
Peyto_obs <- changeRHtoEa(Peyto_obs)
###################################################################################################
## Cell 1
Peyto_wrf<-readObsFile('D:/WhichWRFCell/data/wrf/rawWRF_percell/rawWRF_CUR_Cell1.obs','etc/GMT+7')
Peyto_wrf_pgw<-readObsFile('D:/WhichWRFCell/data/wrf/rawWRF_percell/rawWRF_PGW_Cell1.obs','etc/GMT+7')
## Apply bias correction
t_QDM<-QDM(Peyto_obs[,2],Peyto_wrf[,2],Peyto_wrf_pgw[,2],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
t_QDM_out<-write.table(t_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/t_QDM_1.txt",sep="\t")
ea_QDM<-QDM(Peyto_obs[,3],Peyto_wrf[,3],Peyto_wrf_pgw[,3],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
ea_QDM_out<-write.table(ea_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/ea_QDM_1.txt",sep="\t")
u_QDM<-QDM(Peyto_obs[,4],Peyto_wrf[,4],Peyto_wrf_pgw[,4],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
u_QDM_out<-write.table(u_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/u_QDM_1.txt",sep="\t")
Qsi_QDM<-QDM(Peyto_obs[,5],Peyto_wrf[,5],Peyto_wrf_pgw[,5],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
Qsi_QDM_out<-write.table(Qsi_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/Qsi_QDM_1.txt",sep="\t")
Qli_QDM<-QDM(Peyto_obs[,6],Peyto_wrf[,6],Peyto_wrf_pgw[,6],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
Qli_QDM_out<-write.table(Qli_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/Qli_QDM_1.txt",sep="\t")
p_QDM<-QDM(Peyto_p[,2],Peyto_wrf[,7],Peyto_wrf_pgw[,7],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
p_QDM_out<-write.table(p_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/p_QDM_1.txt",sep="\t")
###################################################################################################
## Cell 2
Peyto_wrf<-readObsFile('D:/FuturePeyto/dataproc/wrf/rawWRF_perCell/rawWRF_CUR_Cell2_20200701.obs','etc/GMT+7')
Peyto_wrf_pgw<-readObsFile('D:/FuturePeyto/dataproc/wrf/rawWRF_perCell/rawWRF_PGW_Cell2_20200716.obs','etc/GMT+7')
## Apply bias correction
t_QDM<-QDM(Peyto_obs[,2],Peyto_wrf[,2],Peyto_wrf_pgw[,2],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
t_QDM_out<-write.table(t_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/t_QDM_2.txt",sep="\t")
ea_QDM<-QDM(Peyto_obs[,3],Peyto_wrf[,3],Peyto_wrf_pgw[,3],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
ea_QDM_out<-write.table(ea_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/ea_QDM_2.txt",sep="\t")
u_QDM<-QDM(Peyto_obs[,4],Peyto_wrf[,4],Peyto_wrf_pgw[,4],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
u_QDM_out<-write.table(u_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/u_QDM_2.txt",sep="\t")
Qsi_QDM<-QDM(Peyto_obs[,5],Peyto_wrf[,5],Peyto_wrf_pgw[,5],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
Qsi_QDM_out<-write.table(Qsi_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/Qsi_QDM_2.txt",sep="\t")
Qli_QDM<-QDM(Peyto_obs[,6],Peyto_wrf[,6],Peyto_wrf_pgw[,6],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
Qli_QDM_out<-write.table(Qli_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/Qli_QDM_2.txt",sep="\t")
p_QDM<-QDM(Peyto_p[,2],Peyto_wrf[,7],Peyto_wrf_pgw[,7],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
p_QDM_out<-write.table(p_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/p_QDM_2.txt",sep="\t")
###################################################################################################
## Cell 3
Peyto_wrf<-readObsFile('D:/FuturePeyto/dataproc/wrf/rawWRF_perCell/rawWRF_CUR_Cell3_20200701.obs','etc/GMT+7')
Peyto_wrf_pgw<-readObsFile('D:/FuturePeyto/dataproc/wrf/rawWRF_perCell/rawWRF_PGW_Cell3_20200716.obs','etc/GMT+7')
## Apply bias correction
t_QDM<-QDM(Peyto_obs[,2],Peyto_wrf[,2],Peyto_wrf_pgw[,2],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
t_QDM_out<-write.table(t_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/t_QDM_3.txt",sep="\t")
ea_QDM<-QDM(Peyto_obs[,3],Peyto_wrf[,3],Peyto_wrf_pgw[,3],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
ea_QDM_out<-write.table(ea_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/ea_QDM_3.txt",sep="\t")
u_QDM<-QDM(Peyto_obs[,4],Peyto_wrf[,4],Peyto_wrf_pgw[,4],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
u_QDM_out<-write.table(u_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/u_QDM_3.txt",sep="\t")
Qsi_QDM<-QDM(Peyto_obs[,5],Peyto_wrf[,5],Peyto_wrf_pgw[,5],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
Qsi_QDM_out<-write.table(Qsi_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/Qsi_QDM_3.txt",sep="\t")
Qli_QDM<-QDM(Peyto_obs[,6],Peyto_wrf[,6],Peyto_wrf_pgw[,6],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
Qli_QDM_out<-write.table(Qli_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/Qli_QDM_3.txt",sep="\t")
p_QDM<-QDM(Peyto_p[,2],Peyto_wrf[,7],Peyto_wrf_pgw[,7],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
p_QDM_out<-write.table(p_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/p_QDM_3.txt",sep="\t")
###################################################################################################
## Cell 4
Peyto_wrf<-readObsFile('D:/FuturePeyto/dataproc/wrf/rawWRF_perCell/rawWRF_CUR_Cell4_20200701.obs','etc/GMT+7')
Peyto_wrf_pgw<-readObsFile('D:/FuturePeyto/dataproc/wrf/rawWRF_perCell/rawWRF_PGW_Cell4_20200716.obs','etc/GMT+7')
## Apply bias correction
t_QDM<-QDM(Peyto_obs[,2],Peyto_wrf[,2],Peyto_wrf_pgw[,2],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
t_QDM_out<-write.table(t_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/t_QDM_4.txt",sep="\t")
ea_QDM<-QDM(Peyto_obs[,3],Peyto_wrf[,3],Peyto_wrf_pgw[,3],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
ea_QDM_out<-write.table(ea_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/ea_QDM_4.txt",sep="\t")
u_QDM<-QDM(Peyto_obs[,4],Peyto_wrf[,4],Peyto_wrf_pgw[,4],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
u_QDM_out<-write.table(u_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/u_QDM_4.txt",sep="\t")
Qsi_QDM<-QDM(Peyto_obs[,5],Peyto_wrf[,5],Peyto_wrf_pgw[,5],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
Qsi_QDM_out<-write.table(Qsi_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/Qsi_QDM_4.txt",sep="\t")
Qli_QDM<-QDM(Peyto_obs[,6],Peyto_wrf[,6],Peyto_wrf_pgw[,6],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
Qli_QDM_out<-write.table(Qli_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/Qli_QDM_4.txt",sep="\t")
p_QDM<-QDM(Peyto_obs[,2],Peyto_wrf[,7],Peyto_wrf_pgw[,7],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
p_QDM_out<-write.table(p_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/p_QDM_4.txt",sep="\t")
###################################################################################################
## Cell 5
Peyto_wrf<-readObsFile('D:/FuturePeyto/dataproc/wrf/rawWRF_perCell/rawWRF_CUR_Cell5_20200701.obs','etc/GMT+7')
Peyto_wrf_pgw<-readObsFile('D:/FuturePeyto/dataproc/wrf/rawWRF_perCell/rawWRF_PGW_Cell5_20200716.obs','etc/GMT+7')
## Apply bias correction
t_QDM<-QDM(Peyto_obs[,2],Peyto_wrf[,2],Peyto_wrf_pgw[,2],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
t_QDM_out<-write.table(t_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/t_QDM_5.txt",sep="\t")
ea_QDM<-QDM(Peyto_obs[,3],Peyto_wrf[,3],Peyto_wrf_pgw[,3],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
ea_QDM_out<-write.table(ea_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/ea_QDM_5.txt",sep="\t")
u_QDM<-QDM(Peyto_obs[,4],Peyto_wrf[,4],Peyto_wrf_pgw[,4],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
u_QDM_out<-write.table(u_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/u_QDM_5.txt",sep="\t")
Qsi_QDM<-QDM(Peyto_obs[,5],Peyto_wrf[,5],Peyto_wrf_pgw[,5],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
Qsi_QDM_out<-write.table(Qsi_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/Qsi_QDM_5.txt",sep="\t")
Qli_QDM<-QDM(Peyto_obs[,6],Peyto_wrf[,6],Peyto_wrf_pgw[,6],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
Qli_QDM_out<-write.table(Qli_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/Qli_QDM_5.txt",sep="\t")
p_QDM<-QDM(Peyto_obs[,7],Peyto_wrf[,7],Peyto_wrf_pgw[,7],ratio=FALSE,trace=0.05,trace.calc = 0.5*trace,jitter.factor = 0,n.tau=NULL,ratio.max = 2,ratio.max.trace = 10*trace,ECBC=FALSE,ties='first',subsample = NULL,pp.type = 7)
p_QDM_out<-write.table(p_QDM,"D:/FuturePeyto/dataproc/wrf/qdmWRF_perCell/p_QDM_5.txt",sep="\t")
|
239ae890b9af608d723885929b6df0e288dc2346
|
63d30cf510e106dc076ecf0f7fb18fad8457964f
|
/cariceae/R/gbif_functions_JP.r
|
f800413fa3e6932edfeada9b9c8db4b3198c210d
|
[] |
no_license
|
andrew-hipp/mor-systematics
|
09ef8a5a7c0b88572d6b63ba3afdc2c774b5fc3a
|
c183d76adeb0d861b262c8cb621d46b433894b40
|
refs/heads/master
| 2022-12-24T00:06:09.465559
| 2022-12-16T14:03:45
| 2022-12-16T14:03:45
| 33,579,749
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,209
|
r
|
gbif_functions_JP.r
|
###GBIF/NICHE MODELING FUNCTIONS (v1.3 7-5-2013 R code)
### To download gbif data for Cariceae sp, clean up data (remove duplicates, remove less precise georef's to create niche maps of different Taxons
### Marlene Hahn and Andrew Hipp May 2013, as part of Carex project, (based on original R coding by Marcial Escudero and Ian Pearse.)
#v1.1 additions- - now writes out files in download_gbif, and clean_gbif; pdf and jpeg maps creates with axes
#v1.1 issues- download-gbif function is running into some extraction issues that kill the function for some datasets. This is fixed in 1.2, I think.
#v1.2 additions- added log file in mapping functions. no maps with null data are created; maps with "serious mapping issues" are deleted.
#v1.2 issues- maps are created even if there are no data points due to a FALSE flag of inprecision in lat/long.; logfile is made as rows instead of columns which is non-optimal.
# download errors still occur- possibly due to server issues- but I suspect that this might cause us to sometimes lose records in our dataframe.
#v1.3 additions- maps_jpeg_imprecise added to include the mapping of imprecise data points. logfile for map program is corrected.
# *****MAPS Still need to be spot checked for lat and long reversals, and lack of - signs (note in manual error log/fix in dataframe, upload and rerun map program)
## Step 1: install packages (do only once)
#libs <- c("rJava", "rgdal", "sp", "XML", "raster", "dismo", "maps", "maptools","RColorBrewer", "classInt", "mapdata", "MIPHENO")
#lapply(libs, install.packages)
## Step2: Upload species list into R(Genus species etc...)
#speciesfile <- read.delim(file.choose(), as.is = TRUE) ##file must be text, tab delim for this to work; CAREX species names coming from WorldChecklist extraction. (species column needs header as "species")
#note for Carex first run, I only used species marked as accepted species, and excluded intraspecific designations. 5/14/2013- MH
## Step 3: download gbif data for different taxons; you will have to enter in a species list, and a genus name for the species list.
#EX. Kobresia_gbif <-download_gbif(Kobresiaspecies,"Kobresia")
# Schoenoxiphium_gbif <-download_gbif(Schoenoxiphiumspecies,"Schoenoxiphium")
# Carex_gbifdata <-download_gbif(Carexspecies,"Carex")
# Uncinia_gbif <- download_gbif(Unciniaspecies,"Uncinia")
# Cymophyllus_gbif <- download_gbif(Cymophyllusspecies, "Cymophyllus")
download_gbif = function(specieslist, genus, variants=TRUE) { #column of genus in specieslist ?
## We assume that specieslist is either a dataframe or list or matrix with a "species" column, or just a vector of species epithets
# require(rJava)
# require(rgdal)
# require(sp)
# require(XML) # collect log of screen lines?
# require(raster)
require(dismo)
toppath=paste("GBIF-WORK_",format(Sys.time(), "%Y-%m-%d"),sep="")
if (toppath %in% dir()) toppath=paste(toppath,format(Sys.time(), ".%H.%M"), sep="")
dir.create(toppath)
setwd(toppath) # Creates a directory to store ALL GBIF WORK
newpath=paste("rawgbifdata_",format(Sys.time(), "%Y-%m-%d"),sep="")
if (newpath %in% dir()) newpath=paste(newpath,format(Sys.time(), ".%H.%M"), sep="")
dir.create(newpath) # Create a directory to store the R gbif objects
if(class(specieslist) %in% c('matrix', 'list', 'data.frame')) specieslist_work <- specieslist$species
if (variants) specieslist_work<-paste(specieslist_work,"*",sep="") # Unless otherwise specified variants=FALSE, append * to end of specieslist to collect name variants
gbifdata <- lapply(specieslist_work, function(x) {
out <- try(gbif(genus, species=x, ext=NULL, args=NULL, geo=TRUE, sp=FALSE, removeZeros=TRUE, download=TRUE, getAlt=TRUE, ntries=5, nrecs=1000, start=1, end=NULL, feedback=3))
if(class(out) != "try-error") save(out, file = paste(newpath,"/rawgbifdata_",sub(" .*","",sub(" {0,1} ", "_", out$species[1])), '.Rdata', sep = ''))
return(out)
})
# gbifdata <- vector('list', length(specieslist)) # defines gbifdata as list
# for (i in 1:length(specieslist)) gbifdata[[i]] <- try(gbif(genus, species=specieslist[i], ext=NULL, args=NULL, geo=TRUE, sp=FALSE, removeZeros=TRUE, download=TRUE, getAlt=TRUE, ntries=5, nrecs=1000, start=1, end=NULL, feedback=3))
names(gbifdata) <- specieslist$species
return(gbifdata)
}
##Step 4: -Flags specimens with low lat/long precision as false in precise_enough column; flags duplicate specimens (species records with same lat/long coordinates) as false in unique_record column.
#Ex. Schoenoxiphium_cleaned <- clean_gbif(Schoenoxiphium_gbifdata)
clean_gbif = function(gbifdata, clean.by.locality = FALSE) {
newpath=paste("cleanedgbifdata_",format(Sys.time(), "%Y-%m-%d"),sep="")
if (newpath %in% dir()) newpath=paste(newpath,format(Sys.time(), ".%H.%M"), sep="")
dir.create(newpath)
setwd(newpath)
for (i in 1:length(gbifdata)) {
if(class(gbifdata[[i]]) == 'try-error') next #??????????
else gbifdata[[i]] <- as.data.frame(gbifdata[[i]]) #Create dataframe of gbif data
}
xd <- list() #tempfile to use to compare data that will be flagged as unuseable
for (i in 1:length(gbifdata)) {
a = names(gbifdata)[i]
print(paste("Doing", a))
# if(a == 'diluta') browser()
if(class(gbifdata[[i]]) %in% c('try-error', 'NULL')) next
if(try(nrow(gbifdata[[i]])) == 0) next
# if(dim(gbifdata[[i]][1]) %in% c(NULL, 0)) next
gbifdata[[i]]$lat <- as.numeric(gbifdata[[i]]$lat)
gbifdata[[i]]$calc_error <- ifelse(gbifdata[[i]]$lat==as.integer(gbifdata[[i]]$lat), 100, ifelse((10*gbifdata[[i]]$lat)==as.integer(10*gbifdata[[i]]$lat), 10, ifelse((100*gbifdata[[i]]$lat)==as.integer(100*gbifdata[[i]]$lat), 1, ifelse((1000*gbifdata[[i]]$lat)==as.integer(1000*gbifdata[[i]]$lat), 0.1, ifelse((10000*gbifdata[[i]]$lat)==as.integer(10000*gbifdata[[i]]$lat), 0.01, ifelse((100000*gbifdata[[i]]$lat)==as.integer(100000*gbifdata[[i]]$lat), 0.001, 0.0001))))))
gbifdata[[i]]$precise_enough <- ifelse(gbifdata[[i]]$calc_error < 10, TRUE, FALSE)
gbifdata[[i]]$unique_record <- ifelse(!duplicated(gbifdata[[i]]$lat) | !duplicated(gbifdata[[i]]$lon), TRUE, FALSE) #cleans by lat and long
# if(clean.by.locality) gbifdata[[i]]$unique_record <- gbifdata[[i]]$unique_record & ifelse(!duplicated(gbifdata[[i]]$cloc), TRUE, FALSE) -- CLEAN UP NULLS FIRST
write.table(gbifdata[[i]], file = paste('cleaned_',sub(" .*","",sub(" {0,1} ", "_", gbifdata[[i]]$species[[1]])),format(Sys.time(),"%Y-%m-%d"),'.txt'), sep = "|")
xd[[i]]<-subset(gbifdata[[i]], calc_error < 10) # can be cleaned out
} # close i
#browser()
nrowlistx <- lapply(gbifdata, nrow) #this may fail now
nrowlistxd <- lapply(xd, nrow)
number.not.unique <- lapply(gbifdata, function(x) {
out = 1
if(class(x) %in% c('try-error', 'NULL')) out <- 0
if(try(nrow(x) == 0)) out <- 0
if(out == 1) out <- sum(!x$unique_record)
}
) # end lapply function
print("Comparison of # of original rows to # of high precision rows for LAT/LONG Coordinates; third column is number of rows not unique based on lat and long")
print(cbind(nrowlistx,nrowlistxd,number.not.unique))
return(gbifdata)
}
### Step 5: Create maps of data. There are several functions for this, however map_gbif_jpeg_imprecise will give the best map of the 3 options (larger file, both precise and imprecise data, etc)
#Step 5a: Create pdf maps of data (excludes specimen records flagged as low precision or as duplicate record.)
#note maps are still made if originally there was data data but flagged as false. Map without any points...
#Ex. Sch_log_pdf <- map_gbif(Schoenoxiphium_cleaned_dups)
map_gbif = function(gbifdata) {
require(maps)
require(maptools)
require(RColorBrewer)
require(classInt)
require(mapdata)
setwd("../")
newpath=paste("mapsPDF_",format(Sys.time(), "%Y-%m-%d"),sep="")
if (newpath %in% dir()) newpath=paste(newpath,format(Sys.time(), ".%H.%M"), sep="")
dir.create(newpath)
setwd(newpath)
logbook <- matrix()
for (i in 1:length(gbifdata)){
if(class(i) == "try-error") {
message(paste('Dataset', names(gbifdata[i]), 'is an utter failure'))
logbook[i] = (paste('Dataset',names(gbifdata[i]), 'is an utter failure. Most likely download error'))
next
} # close if
if(length(gbifdata[[i]]) == 0) { ##skip over nulls dataframes
message(paste('Dataset', names(gbifdata[i]), 'is NULL'))
logbook[i] = (paste('Dataset',names(gbifdata[i]), 'is NULL.'))
next
} # close if
pdf(file = paste(names(gbifdata)[i],'_map_',format(Sys.time(),"%Y-%m-%d"),'.pdf',sep =''))
map.try <- try(map("worldHires", xlim = c(min(gbifdata[[i]]$lon)-10, max(gbifdata[[i]]$lon)+10), ylim = c(min(gbifdata[[i]]$lat)-10, max(gbifdata[[i]]$lat)+10)))
if(class(map.try) == 'try-error') {
message(paste('Dataset', names(gbifdata[i]), 'has some SERIOUS mapping problems. Check to see if lats and Longs are switched....Check it out.'))
logbook[i] =(paste('Dataset',names(gbifdata[i]), 'has some SERIOUS mapping problems. Check to see if lats and Longs are switched....Check it out.'))
dev.off()
file.remove((file = paste(names(gbifdata)[i],'_map_',format(Sys.time(),"%Y-%m-%d"),'.pdf',sep =''))) ##removed files with errors
next
} # close if
points(gbifdata[[i]]$lon[gbifdata[[i]]$precise_enough & gbifdata[[i]]$unique_record], gbifdata[[i]]$lat[gbifdata[[i]]$precise_enough & gbifdata[[i]]$unique_record], pch = 16, col= 2, cex = 0.5)
map.axes()
title(main = gbifdata[[i]]$species[1], sub = NULL, xlab = NULL, ylab = NULL, line = NA, outer = FALSE)
dev.off(which = dev.cur())
logbook[i] = (paste('PDF Map generated for dataset', names(gbifdata[i])))
} # close i
write.table(logbook, file = paste('PDF_MAP_log',format(Sys.time(),"%Y-%m-%d"),'.txt'), sep = "|") ##writes out log file
return(logbook)
}
##Step 5b: Create jpeg maps of data (excludes specimen records flagged as low precision or as duplicate record.)
map_gbif_jpeg = function(gbifdata) {
require(maps)
require(maptools)
require(RColorBrewer)
require(classInt)
require(mapdata)
setwd("../")
newpath=paste("mapsJPEG_",format(Sys.time(), "%Y-%m-%d"),sep="")
if (newpath %in% dir()) newpath=paste(newpath,format(Sys.time(), ".%H.%M"), sep="")
dir.create(newpath)
setwd(newpath)
logbook <- matrix()
for (i in 1:length(gbifdata)){
if(class(i) == "try-error") {
message(paste('Dataset', i, names(gbifdata[i]), 'is an utter failure'))
logbook[i] = (paste('Dataset',names(gbifdata[i]), 'is an utter failure. Most likely download error'))
next
} # close if
if(length(gbifdata[[i]]) == 0) { ##skip over nulls dataframes
message(paste('Dataset', names(gbifdata[i]), 'is NULL'))
logbook[i] = (paste('Dataset',names(gbifdata[i]), 'is NULL.'))
next
} # close if
jpeg(filename = paste(names(gbifdata)[i],'_map_',format(Sys.time(),"%Y-%m-%d"),'.jpeg',sep =''), width = 480, height = 480, pointsize = 12, quality = 100, bg = "white")
map.try <- try(map("worldHires", xlim = c(min(gbifdata[[i]]$lon)-10, max(gbifdata[[i]]$lon)+10), ylim = c(min(gbifdata[[i]]$lat)-10, max(gbifdata[[i]]$lat)+10)))
if(class(map.try) == 'try-error') {
message(paste('Dataset', i, names(gbifdata[i]), 'has some SERIOUS mapping problems. Check to see if lats and Longs are switched....Check it out.'))
logbook[i] =(paste('Dataset', names(gbifdata[i]), 'has some SERIOUS mapping problems. Check to see if lats and Longs are switched....Check it out.'))
dev.off()
file.remove((file = paste(names(gbifdata)[i],'_map_',format(Sys.time(),"%Y-%m-%d"),'.jpeg',sep =''))) ##removed jpeg files with errors
next
} # close if
points(gbifdata[[i]]$lon[gbifdata[[i]]$precise_enough & gbifdata[[i]]$unique_record], gbifdata[[i]]$lat[gbifdata[[i]]$precise_enough & gbifdata[[i]]$unique_record], pch = 16, col= 2, cex = 0.5)
map.axes()
title(main = gbifdata[[i]]$species[1], sub = NULL, xlab = NULL, ylab = NULL, line = NA, outer = FALSE)
dev.off(which = dev.cur())
logbook[i] = (paste('Jpeg Map generated for dataset', names(gbifdata[i])))
} # close i
write.table(logbook, file = paste('Jpeg_MAP_log',format(Sys.time(),"%Y-%m-%d"),'.txt'), sep = "|") ##writes out log file
}
##STEP 5c: Create larger jpeg maps of data (this function maps both precise and imprecise data)
# Note change genus in function call if not using for Carex. (This is only so that map filenames contain genus)
map_gbif_jpeg_imprecise = function(gbifdata, genus ="Carex", sizes = c(2.5, 5.0)) {
require(maps)
require(maptools)
require(RColorBrewer)
require(classInt)
require(mapdata)
setwd("../")
newpath=paste("mapsJPEG-big_",format(Sys.time(), "%Y-%m-%d"),sep="")
if (newpath %in% dir()) newpath=paste(newpath,format(Sys.time(), ".%H.%M"), sep="")
dir.create(newpath)
setwd(newpath)
logbook <- matrix()
for (i in 1:length(gbifdata)){
if(class(i) == "try-error") {
#message(paste('Dataset', i, names(gbifdata[i]), 'is an utter failure'))
logbook[i] = (paste('Dataset',names(gbifdata[i]), 'is an utter failure. Most likely download error'))
next
} # close if
if(length(gbifdata[[i]]) == 0) { ##skip over nulls dataframes
#message(paste('Dataset', names(gbifdata[i]), 'is NULL'))
logbook[i] = (paste('Dataset',names(gbifdata[i]), 'is NULL.'))
next
} # close if
jpeg(filename = paste(genus,'_', names(gbifdata)[i],'_map_',format(Sys.time(),"%Y-%m-%d"),'.jpeg',sep =''), width = 1200, height = 1200, pointsize = 12, quality = 100, bg = "white")
map.try <- try(map("worldHires", xlim = c(min(gbifdata[[i]]$lon)-15, max(gbifdata[[i]]$lon)+15), ylim = c(min(gbifdata[[i]]$lat)-18, max(gbifdata[[i]]$lat)+18)))
if(class(map.try) == 'try-error') {
#message(paste('Dataset', i, names(gbifdata[i]), 'has some SERIOUS mapping problems. Check to see if lats and Longs are switched....Check it out.'))
logbook[i] =(paste('Dataset', names(gbifdata[i]), 'has some SERIOUS mapping problems. Check to see if lats and Longs are switched....Check it out.'))
dev.off()
file.remove((file = paste(genus,'_', names(gbifdata)[i],'_map_',format(Sys.time(),"%Y-%m-%d"),'.jpeg',sep =''))) ##removed jpeg files with errors
next
} # close if
## now subset by those that are not so precise
gbifdata.temp <- gbifdata[[i]][!gbifdata[[i]]$precise_enough, ]
if(dim(gbifdata.temp)[1] > 0) {
## if(gbifdata[[i]]$precise_enough == FALSE){ ##OLD
points(gbifdata.temp$lon[gbifdata.temp$unique_record], gbifdata.temp$lat[gbifdata.temp$unique_record], pch = 24, col= "green", cex = 1)
points(gbifdata.temp$lon[gbifdata.temp$unique_record], gbifdata.temp$lat[gbifdata.temp$unique_record], pch = 1, col= "green", cex = sapply(gbifdata.temp$calc_error, function(x) switch(as.character(x), '10' = sizes[1], '100' = sizes[2])))
} # end the imprecise-records if
points(gbifdata[[i]]$lon[gbifdata[[i]]$precise_enough & gbifdata[[i]]$unique_record], gbifdata[[i]]$lat[gbifdata[[i]]$precise_enough & gbifdata[[i]]$unique_record], pch = 16, col= 2, cex = 1)
map.axes()
legend("bottomright", c("Precise coordinates", "Imprecise coordinates", "+/- 0.05 degree", "+/- 0.5 degree"), cex=1.5, pt.cex=c(1, 1, sizes[1] , sizes[2]), pch= c(16,24, 1, 1), col= c("red","green", "green", "green"))
title(main = gbifdata[[i]]$species[1], sub = "GBIF Specimen Records", xlab = NULL, ylab = NULL, line = NA, outer = FALSE)
dev.off(which = dev.cur())
logbook[i] = (paste('Jpeg Map generated for dataset', names(gbifdata[i])))
} # close i
write.table(logbook, file = paste(genus,'Jpeg_MAP_log',format(Sys.time(),"%Y-%m-%d"),'.txt'), sep = "|") ##writes out log file
}
##Step6: Download WorldClim Data (http://www.worldclim.org/download) to get bioclim variables
#EX Sch_bioclim <- world_clim(Sch_clean)
## Oct 3 2013 JP changed all bioclim variable names to bioclimdata because of function with the same name in dismo package
world_clim = function(gbifdata) {
require(rJava)
require(rgdal)
require(sp)
require(XML)
require(raster)
require(dismo)
setwd("../")
newpath=paste("bioclimdata_",format(Sys.time(), "%Y-%m-%d"),sep="")
if (newpath %in% dir()) newpath=paste(newpath,format(Sys.time(), ".%H.%M"), sep="")
dir.create(newpath)
clim <-getData('worldclim', var='bio', res=5)
bioclimdata <- vector('list', length(gbifdata))
# we also need to exclude flagged data again....
#worldclim and gbif have reversed long and lats?? which is why code below is [8:7]- double check with Marcial
for (i in 1:length(gbifdata)) {
message(paste('Doing dataset', i, ':', names(gbifdata)[i]))
working.gbif <- gbifdata[[i]][gbifdata[[i]]$precise_enough & gbifdata[[i]]$unique_record, ]
if(dim(working.gbif)[1] < 1) next
# if (gbifdata[[i]]$precise_enough & gbifdata[[i]]$unique_record) {
bioclimdata[[i]] <- extract(clim, working.gbif[, 8:7], method='simple', buffer=NULL, small=FALSE, cellnumbers=FALSE, fun=NULL, na.rm=TRUE)
#}
# gbifdata[[i]]$bio1 <- ifelse(!duplicated(gbifdata[[i]]$lat) | !duplicated(gbifdata[[i]]$lon), TRUE, FALSE)
bioclimdata[[i]] <- cbind(working.gbif[c('catalogNumber','lat', 'lon')], bioclimdata[[i]])
write.csv(bioclimdata[[i]],file=paste(newpath,"/bioclimdata.",names(gbifdata)[i],".csv",sep=""))
} #closes i
save(bioclimdata,file=paste(newpath,"/bioclimdata",format(Sys.time(), ".%H.%M"), sep=""))
return(bioclimdata)
}
##Step7: Remove OUTLIERS from Bioclim data
rm_outliers_fn = function(bioclimdata){
require(MIPHENO)
bioclimnoout <- list()
for(i in 1:length(bioclimdata)) {
if (!is.null(bioclimdata[[i]])) {
if (nrow(bioclimdata[[i]])>1) {
bioclimnoout[[i]] <- rm.outlier(bioclimdata[[i]], fill = TRUE)
}
else {bioclimnoout[[i]]<-bioclimdata[[i]]
}
}
}
names(bioclimnoout)<-specieslist
return(bioclimnoout)
}
##Step 8: Calculate MEAN and Standard Deviation (SD), then generates PCA.
mean_SD = function(bioclimnoout,plotpca=FALSE){
bioclimmean <- list()
bioclimSD <- list()
for (i in 1:length(bioclimnoout)) {
if (!is.null(bioclimnoout[[i]])) {
bioclimmean[[i]] <- colMeans(bioclimnoout[[i]], na.rm = TRUE, dims = 1)
bioclimSD[[i]] <- apply(bioclimnoout[[i]],2, sd, na.rm = TRUE)
}
}
names(bioclimmean)<-names(bioclimnoout)
Means <- do.call(rbind, bioclimmean)
names(bioclimSD)<-names(bioclimnoout)
SDs <- do.call(rbind, bioclimSD)
out<-list(Means=Means,SDs=SDs)
# row.names(Means)<-row.names(SDs)<-names(bioclimSD)
if (plotpca) {
pca <- prcomp(Means, retx = TRUE, center = TRUE, scale. = TRUE, tol = NULL)
plot(pca$x[,"PC1"], pca$x[,"PC2"]) }
return(out)
}
|
d81d8d15ac69c6c2dafec08aa183f40e23d8f0dd
|
263ea8a1ba4b5240af63c086ee54b5605b5326a0
|
/tests/testthat/test-dictionary.R
|
72cb900277c97a5de8c992ca589287e80365f091
|
[] |
no_license
|
iMarcello/Kmisc
|
fa3c733d4b0c7583e15b686af1a639a0cff801bf
|
0a35b51944528ed4adb24d3c6687c8c7cc9fe549
|
refs/heads/master
| 2020-05-26T07:49:16.073307
| 2018-06-12T17:31:55
| 2018-06-12T17:31:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 642
|
r
|
test-dictionary.R
|
context("dictionary")
test_that("dictionary primitives work", {
d <- dict()
d["a"] <- 1
expect_identical(d[["a"]], 1)
expect_identical(d["a"], list(a = 1))
d["b"] <- 2
expect_equal(d[["b"]], 2)
expect_equal(length(d), 2)
d[["c"]] <- 3
expect_equal(d[["c"]], 3)
d[["c"]] <- 4
expect_equal(d[["c"]], 4)
expect_equal(length(d), 3)
en <- enumerate(d, function(key, value) list(key, value))
expect_identical(en, list(list("a", 1), list("b", 2), list("c", 4)))
})
test_that("we throw an error when trying to access an element that doesn't exist", {
d <- dict()
expect_error(d["a"])
expect_error(d[["a"]])
})
|
7e78c1c93e143842699793c418b38fd4ece70726
|
cf8eea265b5047140ced80096665085ee06e279d
|
/old/OF_main.R
|
4bf0aa4c7bd58966abca6eaf5c54e951c25b60b2
|
[] |
no_license
|
mcauchoix/Rscripts
|
bada5e167544ad61ee7f1a17e7e6fb1c451ca874
|
12b87c98d3adb7a3c89a0f79419084b791f021b0
|
refs/heads/master
| 2021-05-06T22:11:39.812896
| 2020-06-19T08:59:46
| 2020-06-19T08:59:46
| 112,726,270
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,999
|
r
|
OF_main.R
|
# Analyse OF data
# mcauchoixxx@gmail.com, 5/12/17
#--------------------------------------------------------
#to do
# remove XXXXXXX and ???????
#--------------------------------------------------------
rm(list = ls())
# function
#---------
source('~/Documents/openfeeder/Rscripts/slideFunct.R')
#files
#---------
out='/Users/maximecauchoix/Dropbox/wild_cog_2017-18/results/'
out_lc='/Users/maximecauchoix/Dropbox/wild_cog_2017-18/results/learning_curves/'
# 1- merge
#-------------------------------
source('/Users/maximecauchoix/Documents/openfeeder/Rscripts/OF_merge_files.R')
#verifiy data merged
unique(d$path)
# quick look at data
summary(d)
# recode if issue
# 2- remove XXXXXXXX and ????
#-------------------------------
# no tag read
indNoTag=d$tag=="XXXXXXXXXX"
print(paste(sum(indNoTag)/dim(d)[1]*100,'% of bird without tag'))
d=d[!indNoTag,]
# bad read
indUnread=d$tag=="??????????"
print(paste(sum(indUnread)/dim(d)[1]*100,'% of bird unread'))
d=d[!indUnread,]
d$tag=factor(d$tag)
d$OFnb=factor(d$OFnb)
# 3- banding summary
#--------------------
source('/Users/maximecauchoix/Documents/openfeeder/Rscripts/OF_summarise_banding.R')
# 4- match with banding keeping all tag
#-------------------------------
#dall <- merge(d,ball,by="tag", all.x=T,sort=F)#all.x=T
# 5- list output for go-nogo 1
#-------------------------------
#source("/Documents/openfeeder/Rscripts/OF_makeList_gonogo_one.R")
# 6 - identify missing tag
# -------------------------
#source('~/Documents/openfeeder/Rscripts/OF_identify_tag_nobanding.R')
# 7- general stat: nb visi by spe, by site
#-------------------------------
d <- merge(d,ball,by="tag", all.x=F,sort=F)
# remove M1 (as there is no data there because of the woodpecker)
d=d[d$site_folder!='M1',]
# count nb of bird by sites
dunique=d[!duplicated(d$tag),]
print(table(dunique$site_folder,dunique$species))
# nb of visit by bird by site by day
DT <- as.data.table(d)
DT=DT[,list(nbtrial = .N,species=unique(species)), by="tag,site_folder,day"]
dt=as.data.frame(DT)
# nb of visit by bird by site by day
DT <- as.data.table(dt)
DT=DT[,list(species=unique(species),meanVisitByDay=mean(nbtrial)), by="tag,site_folder"]
dt=as.data.frame(DT)
dt$duplicated=duplicated(dt$tag)
# keep only decent n
df=dt[dt$meanVisitByDay>5,]
df$duplicated=duplicated(df$tag)
sum(df$duplicated)
df=df[!df$duplicated,]
table(df$species,df$site_folder)
#plot
boxplot(dt$meanVisitByDay~dt$species+dt$site_folder)
# 8-learning go-nogo all (d4) and go-no go one (d1)
#--------------------------------------------------
#d4=d[d$scenario==30,]
#d1=d[d$scenario==33,]
# deal with time
d$fullTime=paste(d$day,d$hour)
time=strptime(d$fullTime,"%d/%m/%y %H:%M:%S")
indOrder=order(time)
ds=d[indOrder,]
# end expe
print( paste('First day of experiment:', min(strptime(d$day,"%d/%m/%y"))))
print( paste('Last day of experiment:', max(strptime(d$day,"%d/%m/%y"))))
# By individual collect wrong answer according to time
uTag=unique(d$tag)
wind=20
for (i in 1:length(uTag)){
iInd=ds$tag==uTag[i]
if (sum(iInd)>wind){
name=paste(unique(ds$species[iInd]),unique(ds$site_folder[iInd]),uTag[i])
# total number of trial
print(paste(sum(iInd)," trials in go no go"))
lc=slideFunct(ds$denied[iInd],wind,1)
pdf(paste0(out_lc,name,".pdf"))
plot(lc,type='l',ylab = 'Error rate',xlab = 'Trial',ylim=c(0,1))
# add limit between go nogo all and go nogo one
lim=sum(ds$scenario==30&iInd)
abline(v=lim,col='red')
title(name)
dev.off()
}
}
# 3- list output for go-nogo 1
#-------------------------------
C1tag=unique(dall$tag[dall$OFnb=="C1"])
sink(paste0(out,'C1.txt'))
cat(as.character(C1tag),sep="")
sink()
# 3- check numbers
#-------------------------------
# tag not read but detected
x=OF_check_data(d)
# 4- general stat
#-------------------------------
# total nb visit by species
# total nb visit by individual by spe
# daily number of visit ...
# 5- foraging plot
#-------------------------------
# 6- social networks
#-------------------------------
|
2c0e275a86b0e0ba91f351ec40f64cbee445a845
|
590c0557362a67bdb240a2da1becb4e8bc74e8d2
|
/man/update_W_mc_H_depprecated.Rd
|
ed6ae1c190e72f0207a1a820a2158eee1dc6734c
|
[] |
no_license
|
zshwuhan/MMLE-GaP
|
df96058574cbd11c20bb988f4e86d691572f5fdc
|
b6824b2883b0f60eb8aec0706d27d9dd79439717
|
refs/heads/master
| 2021-09-21T23:54:22.841935
| 2018-09-03T15:04:03
| 2018-09-03T15:04:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 419
|
rd
|
update_W_mc_H_depprecated.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/update_w.R
\name{update_W_mc_H_depprecated}
\alias{update_W_mc_H_depprecated}
\title{Maximize w.r.t W using H with multiplicative updates}
\usage{
update_W_mc_H_depprecated(W, V, H_samples)
}
\arguments{
\item{W}{the (whole) current dictionary matrix.}
\item{V}{matrix to be factorized.}
\item{H}{activation coefficients matrix (J,K,N)}
}
|
2d2fb147d0527e7d757652dfcd2cf40ed28d3dac
|
527978cc4bc62a41834103aaf31129bcdb2a26a0
|
/functions/data/formatBatting.R
|
a4d42993007424fb0a5f40d627f13eb473be8614
|
[] |
no_license
|
sportModel/mlb
|
8386bf7c556a80894d34926de0a7630ce86d1283
|
138bb3e81c15fe1ef776cf3b3bafc2a01032db3f
|
refs/heads/master
| 2021-01-01T20:44:51.816268
| 2019-10-02T11:03:51
| 2019-10-02T11:03:51
| 98,925,203
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 171
|
r
|
formatBatting.R
|
formatBatting <- function() {
val <- NULL
for (i in mlb.par@team) {
team.batting <- formatTeamBatting(mlb.par@year,i)
val <- rbind(val,team.batting)
}
val
}
|
e8b15ef98a77994e62462e3f54cd35154724bc00
|
1cb7c773f534d184ac189c6b7afbc7728bd39d91
|
/man/model.Rd
|
ed27149617a1db8697eb1a72be89c1f592dcf5ea
|
[] |
no_license
|
gaborcsardi/finmix
|
bc743cfa939aae0918d4471cccf2872a98c2e41a
|
a68ebf10f0348663338731bca1a9ab93598641eb
|
refs/heads/main
| 2023-08-28T04:09:45.447626
| 2021-09-01T07:22:54
| 2021-09-01T07:22:54
| 422,958,511
| 0
| 0
| null | 2021-10-30T18:17:37
| 2021-10-30T18:17:36
| null |
UTF-8
|
R
| false
| false
| 12,451
|
rd
|
model.Rd
|
\name{model}
\docType{class}
\alias{class:model}
\alias{model}
%accessor
\alias{getDist, model-method}
\alias{setDist<-, model-method}
\alias{getR, model-method}
\alias{setR<-, model-method}
\alias{getWeight, model-method}
\alias{setWeight<-, model-method}
\alias{getPar, model-method}
\alias{setPar<-, model-method}
\alias{getIndicmod, model-method}
\alias{setIndicmod<-, model-method}
\alias{getIndicfix, model-method}
\alias{setIndicfix<-, model-method}
\alias{getT, model-method}
\alias{setT<-, model-method}
% constructor
\alias{model}
% checking
\alias{hasWeight, model-method}
\alias{hasPar, model-method}
\alias{hasT, model-method}
% show
\alias{show, model-method}
% plot
\alias{plot, model-method}
\alias{plotPointProc, model-method}
% tools
\alias{mixturemar, model-method}
\title{Finmix Model}
\description{
The \code{model} class the model for a finite mixture distribution.
}
\details{
The \code{model} class is constructed by calling its constructor
\code{model()}. All arguments in the constructor are optional.
}
\section{Constructor}{
\describe{\code{model(dist = "poisson", r, K, weight = matrix(),
par = list(), indicmod = "multinomial", indicfix = TRUE,
T = matrix())}:
Constructs a \code{model} object from the input arguments. All
arguments are optional. If provided, argument \code{T} must be
of type \code{matrix}.
To construct an empty \code{model} object the constructor can be
without any argument provided: \code{model()}.
}
}
\section{Accessors}{
In the following code snippets, \code{x} is an \code{model} object and the symbol
\code{@} represents a slot of this \code{S4} object.
\describe{
\item{}{\code{getDist(x)}, \code{setDist(x)<-}:
Get and set the distribution of the mixture model in \code{@dist}
of \code{x}. The following
distributions are implemented: \code{"poisson"}, \code{"binomial"},
\code{"exponential"}, \code{"normal"}, \code{"student"},
\code{"normult"} (multivariate Normal) and \code{"studmult"}
(multivariate Student-t). Only models with the same distributions
are implemented.
}
\item{}{\code{getR(x)}, \code{setR(x)<-}:
Get and set the dimension of variables in \code{@r} of \code{x}.
The dimension of variables is stored as an \code{integer} and certain
safe guards check for consistency with the remaining slots of
\code{x}. For univariate distributions (\code{"poisson"},
\code{"binomial"}, \code{"exponential"}, \code{"normal"},
\code{"student"}) \code{@r} must be \code{1} and for multivariate
distributions (\code{"normult"}, \code{"studmult"}) \code{@r}
must be \code{>1}.
}
\item{}{\code{getK(x)}, \code{setK(x)<-}:
Get and set the number of components in \code{@K} of \code{x}.
The number of components must be a positive integer.
It is stored as an \code{integer} and certain safe guards check
for validity.
}
\item{}{\code{getWeight(x)}, \code{setWeight(x)<-}:
Get and set the weights of the finite mixture model in \code{@weight}
of \code{x}. The weights must be a \code{1 x @K} \code{matrix} of type
\code{numeric} all \code{<1} and \code{>0} and must sum to \code{1}.
Certain safe guards check for validity and consistency with the remaining
slots of \code{x}.
}
\item{}{\code{getPar(x)}, \code{setPar(x)<-}:
Get and set the component parameters of the finite mixture model
in \code{@par} of \code{x}. If the setter is called parameters
must be provided in a \code{list} with appropriate naming regarding
the distribution of the model in slot \code{@dist}. The following
naming rules apply:
\itemize{
\item \code{"poisson"}: A \code{vector} of positive Poisson
parameters with name \code{$lambda} in the \code{list}.
\item \code{"binomial"}: A \code{vector} of positive Binomial
parameters with name \code{$p} in the \code{list}. All
parameters must be \code{>0} and \code{<1}.
\item \code{"exponential"}: A \code{vector} of positive
Exponential parameters with name \code{$lambda} in the
\code{list}.
\item \code{"normal"}: A \code{vector} of means with name
\code{$mu} in the \code{list} and a \code{vector} of
standard deviations with name \code{$sigma} in the \code{list}.
All standard deviations must be positive.
\item \code{"student"}: A \code{vector} of location parameters
with name \code{$mu} in the \code{list} and a \code{vector}
of scale parameters with name \code{$sigma} in the \code{list}.
All scale parameters must be positive. In addition the
degrees of freedom must be provided as a \code{vector}
with name \code{$df} in the \code{list}.
\item \code{"normult"}: An \code{array} or \code{matrix} of
dimension \code{@r x @K} containing the means for each dimension
and component named \code{$mu} in the \code{list}.
Further, an \code{array} of dimension \code{@r x @r x @K}
containing the variance-covariance matrices named \code{$sigma}
in the \code{list}. All matrices must be stored as a \code{matrix}
and must be positive-definite.
\item \code{"studmult"}: An \code{array} or \code{matrix} of
dimension \code{@r x @K} containing the location parameters
for each dimension and component named \code{$mu} in the \code{list}.
Further, an \code{array} of dimension \code{@r x @r x @K}
containing the scale matrices named \code{$sigma}
in the \code{list}. All matrices must be stored as a \code{matrix}
and must be positive-definite. In addition, degrees of freedom
must be provided as a \code{vector} with name \code{$df} in
the \code{list}.
}
}
\item{}{\code{getIndicmod(x)}, \code{setIndicmod(x)<-}:
Get and set the indicator model in \code{@indicmod} of \code{x}.
Each finite mixture model has an underlying model for its indicators.
Right now only the model \code{"multinomial"} is implemented.
}
\item{}{\code{getIndicfix(x)}, \code{setIndicfix(x)<-}:
Get and set the indicator for a model with fixed indicators in
\code{@indicfix} of \code{x}. A finite mixture model can have
predefined indicators, either because they are observed or
estimated by pre-sample classification. This indicator slot
is stored as \code{logical} and must be either \code{TRUE} in
case fixed indicators are provided in the \code{\link{fdata}}
or \code{FALSE} if otherwise.
}
\item{}{\code{getT(x)}, \code{setT(x)<-}:
Get and set the repetitions \code{matrix} in \code{@T} of \code{x}.
Repetitions are optional and become only relevant in case the
distribution in \code{@dist} is set to \code{"binomial"}.
Repetitions must be stored in a \code{matrix} in case the
setter is called.
}
}
}
\section{Checking}{
In the following code snippets, \code{x} is an \code{model} object and the symbol
\code{@} represents a slot of this \code{S4} object.
\describe{
\item{}{\code{hasWeight(x, verbose = FALSE)}:
Checks wether \code{@weight} of \code{x} is empty. Returns \code{TRUE},
if \code{@weight} contains a \code{matrix} with not all entries
\code{NA} and dimension \code{1 x @K}, otherwise it returns \code{FALSE}.
If \code{verbose} is set to \code{TRUE} an error is thrown in case of
\code{@weight} being empty or having a wrong dimension.
}
\item{}{\code{hasPar(x, verbose = FALSE)}:
Checks wether \code{@par} of \code{x} is empty. Returns \code{TRUE}, if
\code{@par} contains a \code{list} with length appropriately named
entries of correct dimension. See \code{\link{setPar}} for defining
correct parameters for a finite mixture model. In case \code{@par}
is either empty or incorrectly specified the function returns \code{FALSE}.
If \code{verbose} is set to \code{TRUE} an error is thrown in case of
\code{@par} being empty or incorrectly specified.
}
\item{}{\code{hasT(x, verbose = FALSE)}:
Checks wether \code{@T} of \code{x} is empty. Returns \code{TRUE}, if
\code{@T} contains \code{matrix} with not all entries \code{NA},
otherwise it returns \code{FALSE}. If \code{verbose} is set to
\code{TRUE} an error is thrown in case of \code{@T} being empty.
}
}
}
\section{Plotting}{
\describe{\code{plot(x, dev = TRUE)}:
Plots a model as specified by \code{x}.
The following types of plots are returned in regard to \code{@dist}:
\itemize{
\item \code{"poisson"}: A barplot of the probabilities
over the range between minimum and maximum of the Poisson mixture
distribution.
\item \code{"binomial"}: A line diagram of the probabilities over
the range between and minimum and maximum of the Binomial mixture
distribution.
\item \code{"exponential"}: An density plot over the range
between minimum and maximum of the Exponential mixture
distribution.
\item \code{"normal"}: A density plot over the range between minimum
and maximum of the Normal mixture distribution.
\item \code{"student"}: A density plot over the range between minimum
and maximum of the Student-t distribution.
\item \code{"normult"}: In case the dimension of variables in
\code{@r} is equal to \code{2} a perspective plot and a contour
plot of the bivariate density of the bivariate Normal mixture
is returned. In case \code{@r} is \code{>2} contour plots for
all possible marginal Normal mixture models of dimension \code{2}
are returned.
\item \code{"studmult"}: In case the dimension of variables in
\code{@r} is equal to \code{2} a perspective plot and a contour
plot of the bivariate density of the bivariate Student-t mixture
is returned. In case \code{@r} is \code{>2} contour plot for
all possible marginal Student-t mixture models of dimension \code{2}
are returned.
}
If argument \code{dev = FALSE} no graphical device is opened and the
user is able to store all plots to a file using \code{\link{pdf}},
\code{\link{png}}, etc.
}
\describe{\code{plotPointProc(x, dev = TRUE, ...)}:
Plots the point process representation for the mixture model specified by
\code{x}. The following type of plots are returned in regard to \code{@dist}:
\itemize{
\item \code{"poisson"}: A point plot, indicating the position of the
components and their corresponding weights by points of certain
sizes.
}
}
}
\author{ Lars Simon Zehnder }
\examples{
model.obj <- model(dist = "binomial", K = 2, indicfix = TRUE)
model.obj
setT(model.obj) <- as.matrix(100)
setPar(model.obj) <- list(p = c(.3, .7))
setWeight(model.obj) <- matrix(c(.1, .9), nrow = 1, ncol = 2)
plot(model.obj)
}
\keyword{classes}
\keyword{methods}
|
6baf9d8db0e93142950611eee7b6faae3f005eb6
|
abcce11a739b6f3554cbf1a9d19c8c22aed4fcca
|
/man/get_perc_risk_index.Rd
|
0ed9a58b1fff24f4080d6ec5c981f1bd8fdfb64f
|
[
"Apache-2.0"
] |
permissive
|
cran/caliver
|
661f6a1fc8b32143c680945f4f26b1a9d0e2f514
|
ad4aa8197977347f7f67f8a915e50066bd4152ef
|
refs/heads/master
| 2023-03-07T15:49:43.881801
| 2021-02-19T10:00:03
| 2021-02-19T10:00:03
| 340,412,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,455
|
rd
|
get_perc_risk_index.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_perc_risk_index.R
\name{get_perc_risk_index}
\alias{get_perc_risk_index}
\title{get_perc_risk_index}
\usage{
get_perc_risk_index(b, poly, perc_val = 75, mod = "gt")
}
\arguments{
\item{b}{RasterLayer/Brick/Stack containing the historical observations or a
proxy (typically a reanalysis).}
\item{poly}{is the spatial polygon on which to aggregate the values}
\item{perc_val}{is the percentile value used as a threshold}
\item{mod}{defines if the values considered for the mean are above (gt) or
below (lt) the threshold}
}
\value{
The function returns a numeric value (for each layer in \code{b}),
corresponding to the mean of the values of \code{b} above/below a given
percentile of the historical observations.
}
\description{
Generates the mean of the values over a certain percentile
threshold for the portion of the Raster* that intersects a polygon
}
\examples{
\dontrun{
# Read RISICO test data
r_risico <- readRDS(system.file("extdata", "RISICO_raster.rds",
package = "caliver"))
# Set missing crs
raster::crs(r_risico) <- "+proj=longlat +datum=WGS84 +no_defs"
# Read dummy polygon
shape <- as(raster::extent(6, 18, 35, 47), "SpatialPolygons")
# Set missing crs
raster::crs(shape) <- "+proj=longlat +datum=WGS84 +no_defs"
get_perc_risk_index(b = r_risico, poly = shape, perc_val = 75, mod = "gt")
}
}
|
6fba5a4a2cd0b69f670fd8412b7ebdef83744283
|
4846b5b3748b6724d7c379dae7572e9fa90a798d
|
/man/ExtractTranscriptomeSequence.Rd
|
6d8d71d1c2d02c80ab7e28a7612f593566993321
|
[] |
no_license
|
vbusa1/nearBynding
|
d225bcbdb1541b65c3f01604a1affd8ff51b068a
|
9ccf2b0e7fec87c426cf37fe45077d67abef210a
|
refs/heads/master
| 2023-04-07T19:01:47.323219
| 2021-07-30T17:39:58
| 2021-07-30T17:39:58
| 278,680,217
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,940
|
rd
|
ExtractTranscriptomeSequence.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ExtractTranscriptomeSequence.R
\name{ExtractTranscriptomeSequence}
\alias{ExtractTranscriptomeSequence}
\title{ExtractTranscriptomeSequence}
\usage{
ExtractTranscriptomeSequence(
transcript_list,
ref_genome,
genome_gtf,
RNA_fragment = "exon",
exome_prefix = "exome"
)
}
\arguments{
\item{transcript_list}{A vector of transcript names that represent the most
expressed isoform of their respective genes and correspond to GTF annotation
names. Required}
\item{ref_genome}{The name of the reference genome FASTA from which exome
sequences will be derived; a string. Required}
\item{genome_gtf}{The name of the GTF/GFF file that contains all exome
annotations; a string. Coordinates must match the file input for the
ref_genome parameter. Required}
\item{RNA_fragment}{A string of RNA component of interest. Options depend on
the gtf file but often include "gene", "transcript", "exon", "CDS",
"five_prime_utr", and/or "three_prime_utr". Default "exon" for the whole
exome.}
\item{exome_prefix}{A string to add to the prefix for all output files.
Default "exome"}
}
\value{
writes FASTA file of transcriptome sequences into directory
}
\description{
Writes a FASTA file of transcript sequences from a list of
transcripts.
}
\note{
transcript_list, genome_gtf, and RNA_fragment arguments should be the
same as GenomeMappingToChainFile function arguments
}
\examples{
\donttest{
## load transcript list
load(system.file("extdata/transcript_list.Rda", package="nearBynding"))
##get GTF file
gtf<-system.file("extdata/Homo_sapiens.GRCh38.chr4&5.gtf",
package="nearBynding")
ExtractTranscriptomeSequence(transcript_list = transcript_list,
ref_genome = "Homo_sapiens.GRCh38.dna.primary_assembly.fa",
genome_gtf = gtf,
RNA_fragment = "three_prime_utr",
exome_prefix = "chr4and5_3UTR")
}
}
|
b616ff3ed52ecf081e6c2935540a35291af3fa47
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/nonlinearTseries/R/corrDim.R
|
df6de00ac6c9bee6e3557d55f157d9d0fd3c370b
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,273
|
r
|
corrDim.R
|
################################################################################
#' Correlation sum, correlation dimension and generalized correlation dimension
#' (order q >1).
#' @description
#' Functions for estimating the correlation sum and the correlation dimension of a dynamical
#' system from 1-dimensional time series using Takens' vectors.
#' @details
#' The correlation dimension is the most common measure of the fractal dimensionality
#' of a geometrical object embedded in a phase space. In order to estimate the correlation
#' dimension, the correlation sum is defined over the points from the phase space:
#' \deqn{C(r) = \{(number\;of\;points\;(x_i,x_j)\;verifying\;that\;distance\;(x_i,x_j)<r\})/N^2}{C(r) = {number of points(xi,xj) verifying distance(xi,xj)<r}/N^2}
#' However, this estimator is biased when the pairs in the sum are not statistically independent. For example,
#' Taken's vectors that are close in time, are usually close in the phase space due to the non-zero autocorrelation
#' of the original time series. This is solved by using the so-called Theiler window: two Takens' vectors must be
#' separated by, at least, the time steps specified with this window in order to be considered neighbours. By using a Theiler window,
#' we exclude temporally correlated vectors from our estimations.
#'
#' The correlation dimension is estimated using the slope obtained by performing a linear regression of
#' \eqn{\log10(C(r))\;Vs.\;\log10(r)}{log10(C(r)) Vs. log10(r)}. Since this dimension is supposed to be an invariant of the system, it should not
#' depend on the dimension of the Taken's vectors used to estimate it. Thus, the user should plot \eqn{\log10(C(r))\;Vs.\;\log10(r)}{log10(C(r)) Vs. log10(r)} for several embedding
#' dimensions when looking for the correlation
#' dimension and, if for some range \eqn{\log10(C(r))}{log10(C(r))} shows a similar linear behaviour in different embedding dimensions (i.e. parallel
#' slopes), these slopes are an estimate of the
#' correlation dimension. The \emph{estimate} routine allows the user to get always an estimate of the correlation dimension,
#' but the user must check that there is a linear region in the correlation sum over different dimensions.
#' If such a region does not exist, the estimation should be discarded.
#'
#' Note that the correlation sum C(r) may be interpreted as:
#' \eqn{C(r) = <p(r)>,}
#' that is: the mean probability of finding a neighbour in a ball of radius r surrounding
#' a point in the phase space. Thus, it is possible to define a generalization of the correlation dimension by writing:
#' \deqn{C_q(r) = <p(r)^{(q-1)}>}{Cq(r) = <p(r)^(q-1)>.}
#' Note that the correlation sum \deqn{C(r) = C_2(r)}{C(r) = C2(r).}
#'
#' It is possible to determine generalized dimensions Dq using the slope obtained by performing a linear regression of
#' \eqn{log10(Cq(r))\;Vs.\;(q-1)log10(r)}. The case q=1 leads to the information dimension, that is treated separately
#' in this package (\link{infDim}). The considerations discussed for the correlation dimension estimate
#' are also valid for these generalized dimensions.
#' @param time.series The original time series from which the correlation sum will be estimated.
#' @param min.embedding.dim Integer denoting the minimum dimension in which we shall embed the time.series (see \link{buildTakens}).
#' @param max.embedding.dim Integer denoting the maximum dimension in which we shall embed the time.series (see \link{buildTakens}).Thus,
#' we shall estimate the correlation dimension between \emph{min.embedding.dim} and \emph{max.embedding.dim}.
#' @param time.lag Integer denoting the number of time steps that will be use to construct the
#' Takens' vectors (see \link{buildTakens}).
#' @param min.radius Minimum distance used to compute the correlation sum C(r).
#' @param max.radius Maximum distance used to compute the correlation sum C(r).
#' @param corr.order Order of the generalized correlation Dimension q. It must be greater than 1 (corr.order>1). Default, corr.order=2.
#' @param n.points.radius The number of different radius where we shall estimate.
#' C(r). Thus, we will estimate C(r) in n.points.radius between min.radius and max.radius.
#' @param theiler.window Integer denoting the Theiler window: Two Takens' vectors must be separated by more than
#' theiler.window time steps in order to be considered neighbours. By using a Theiler window, we exclude temporally correlated
#' vectors from our estimations.
#' @param do.plot Logical value. If TRUE (default value), a plot of the correlation sum is shown.
#' @param number.boxes Number of boxes that will be used in the box assisted algorithm (see \link{neighbourSearch}). If the user does not specify it, the function
#' uses a proper number of boxes.
#' @param ... Additional plotting parameters.
#' @return A \emph{corrDim} object that consist of a list with four components named \emph{radius}, \emph{embedding.dims}, \emph{order} and \emph{corr.matrix}.
#' \emph{radius} is a vector containing the different radius where we have evaluated C(r). \emph{embedding.dims} is a vector containing
#' all the embedding dimensions in which we have estimated C(r). \emph{order} stores the order of the generalized correlation dimension
#' that has been used. Finally, \emph{corr.matrix} stores all the correlation
#' sums that have been computed. Each row stores the correlation sum for a concrete embedding dimension whereas each colum
#' stores the correlation sum for a specific radius.
#' @examples
#' \dontrun{
#' x=lorenz(sigma=10, rho = 28, beta =8/3, start = c(-10, -11, 47),
#' time = seq(0, 70, by = 0.01), do.plot = FALSE)$x
#' cd=corrDim(time.series=x,min.embedding.dim=3,max.embedding.dim=6,
#' time.lag=10,min.radius=1e-3,max.radius=50,
#' n.points.radius=100,theiler.window=100,
#' number.boxes=100,do.plot=F)
#'
#' plot(cd,type="l")
#' plotLocalScalingExp(cd,cex=0.5,xlim=c(1e-1,5))
#' cd.est = estimate(cd,regression.range=c(0.2,2))
#' cat("expected: 2.05 --- estimate: ",cd.est,"\n")
#' }
#' @references H. Kantz and T. Schreiber: Nonlinear Time series Analysis (Cambridge university press)
#' @author Constantino A. Garcia
#' @rdname corrDim
#' @export corrDim
#' @exportClass corrDim
#' @useDynLib nonlinearTseries
corrDim = function ( time.series, min.embedding.dim=2, max.embedding.dim = 5,
time.lag=1, min.radius,max.radius,corr.order=2,
n.points.radius=5,theiler.window=100,do.plot=TRUE,
number.boxes=NULL,...){
#estimate number of boxes for the box assisted algorithm
if (is.null(number.boxes)) number.boxes = estimateNumberBoxes(time.series, min.radius)
# getTakens vectors for the minimium embedding dimension. This is done for simplicity
# in the C code
takensDimMin=buildTakens(time.series=time.series,embedding.dim=min.embedding.dim,time.lag=time.lag)
numberTakens=nrow(takensDimMin)
#other C params
lenTimeSeries=length(time.series)
number.embeddings=max.embedding.dim-min.embedding.dim+1
corr.matrix=matrix(0,nrow=number.embeddings,ncol=n.points.radius)
#radius vector. It is prepared to have equally spaced points in the log10(radius) axis
log.radius=seq(log10(max.radius),log10(min.radius),len=n.points.radius)
radius=10^log.radius
#call the C program
if (corr.order==2){
sol=.C("corrDim", timeSeries=as.double(time.series),lenTimeSeries = as.integer(length(time.series)),
takensDimMin=as.double(takensDimMin),tau=as.integer(time.lag), numberTakens = as.integer(numberTakens),
minEmbeddingD=as.integer(min.embedding.dim),maxEmbeddingD=as.integer(max.embedding.dim),
eps=as.double(radius),numberEps=as.integer(n.points.radius),
numberBoxes=as.integer(number.boxes),tdist=as.integer(theiler.window),corrMatrix=as.double(corr.matrix),
PACKAGE="nonlinearTseries")
}else{
sol=.C("generalizedCorrDim", time.series=as.double(time.series),lenTimeSeries = as.integer(lenTimeSeries),
takensDimMin=as.double(takensDimMin),tau=as.integer(time.lag), numberTakens = as.integer(numberTakens),
minEmbeddingD=as.integer(min.embedding.dim),maxEmbeddingD=as.integer(max.embedding.dim), q = as.integer(corr.order),
eps=as.double(radius),numberEps=as.integer(n.points.radius),
numberBoxes=as.integer(number.boxes),tdist=as.integer(theiler.window),corrMatrix=as.double(corr.matrix),
PACKAGE="nonlinearTseries")
}
#get the correlation sum matrix
corr.matrix=matrix(sol$corrMatrix,nrow=number.embeddings,ncol=n.points.radius)
dimnames(corr.matrix) = list(min.embedding.dim:max.embedding.dim,radius)
#eliminate columns with at least one 0
wh=which(corr.matrix==0,arr.ind=TRUE)
wh=unique(wh[,'col'])
if (length(wh>0)){
corr.matrix=corr.matrix[,-wh,drop=FALSE]
#eliminate the corresponding radius values in the radius vector
radius=radius[-wh]
}
# create the corrDim object
corr.dim = list(corr.matrix = corr.matrix,embedding.dims = min.embedding.dim:max.embedding.dim,radius=radius,corr.order=corr.order)
class(corr.dim) = "corrDim"
# add attributes
id=deparse(substitute(time.series))
attr(corr.dim,"time.lag") = time.lag
attr(corr.dim,"id") = id
attr(corr.dim,"theiler.window") = theiler.window
# plot if necessary
if (do.plot){
plot(corr.dim,...)
}
return(corr.dim)
}
#' @return The \emph{nlOrder} function returns the order of the correlation sum.
#' @rdname corrDim
#' @method nlOrder corrDim
#' @export
nlOrder.corrDim = function(x){
return (x$corr.order)
}
#' Returns the correlation sums stored in the \emph{corrDim} object
#' @param x A \emph{corrDim} object.
#' @return The \emph{corrMatrix} function returns the correlations matrix
#' storing the correlation sums that have been computed for all the embedding
#' dimensions.
#' @seealso \code{\link{corrDim}}
#' @export corrMatrix
corrMatrix = function(x){
UseMethod("corrMatrix")
}
#' @return The \emph{corrMatrix} function returns the correlations matrix
#' storing the correlation sums that have been computed for all the embedding
#' dimensions.
#' @rdname corrDim
#' @method corrMatrix corrDim
#' @export
corrMatrix.corrDim = function(x){
return (x$corr.matrix)
}
#' @return The \emph{radius} function returns the radius on which the correlation sum function has been evaluated.
#' @rdname corrDim
#' @method radius corrDim
#' @export
radius.corrDim = function(x){
return (radius.default(x))
}
#' @return The \emph{embeddingDims} function returns the embedding dimensions on which
#' the correlation sum function has been evaluated.
#' @rdname corrDim
#' @method embeddingDims corrDim
#' @export
embeddingDims.corrDim = function(x){
return (embeddingDims.default(x))
}
#' @rdname corrDim
#' @method print corrDim
#' @export
print.corrDim = function(x, ...){
print(x$corr.matrix)
}
#' @param main A title for the plot.
#' @param xlab A title for the x axis.
#' @param ylab A title for the y axis.
#' @param type Type of plot (see \code{\link[graphics]{plot}}).
#' @param log A character string which contains "x" if the x axis is to be
#' logarithmic, "y" if the y axis is to be logarithmic and "xy" or "yx" if both
#' axes are to be logarithmic.
#' @param ylim Numeric vector of length 2, giving the y coordinates range.
#' @param col Vector of colors for each of the dimensions of the plot.
#' @param pch Vector of symbols for each of the dimensions of the plot.
#' @param localScalingExp add a plot of the local scaling exponents of the correlation
#' sum.
#' @param add.legend add a legend to the plot?
#' @param cex.legend Magnification value for the legend.
#' @return The \emph{plot} function plots the correlation sum. It is possible to
#' plot the the correlation sum Vs the radius and also the local scaling exponents
#' of the correlation sum Vs radius.
#' @rdname corrDim
#' @method plot corrDim
#' @export
#' @method plot
plot.corrDim = function(x,main="Correlation Sum C(r)",xlab=NULL,
ylab="C(r)",type="b",
log="xy",ylim=NULL, col=NULL,pch=NULL,localScalingExp=T,
add.legend=T,cex.legend=1,...){
# set layout depending on options
if ( add.legend || localScalingExp ){
current.par = par(no.readonly = TRUE)
on.exit(par(current.par))
}
if (add.legend && localScalingExp){
# 3 regions
layout(rbind(1,2,3), heights=c(4,4,2))
}else{
if (add.legend){
# add legend
layout(rbind(1,2), heights=c(8,2))
}
if (localScalingExp){
# add local slopes
layout(rbind(1,2), heights=c(5,5))
}
}
number.embeddings=nrow(x$corr.matrix)
# plot options
if (is.null(ylim)) ylim=range(x$corr.matrix)
if (is.null(xlab)) {
xlab = ifelse(x$corr.order==2,{"Radius r"}, {paste("Radius r^",x$corr.order-1,"",sep="")})
}
col = vectorizePar(col,number.embeddings)
pch = vectorizePar(pch,number.embeddings)
# plot
plot(x$radius^(x$corr.order-1),x$corr.matrix[1,],type=type,log=log,
col=col[[1]], pch=pch[[1]],ylim=ylim, xlab=xlab,ylab=ylab,main=main,...)
i=2
while(i<=number.embeddings){
lines(x$radius^(x$corr.order-1),x$corr.matrix[i,],type=type,col=col[[i]],
pch=pch[[i]],...)
i = i + 1
}
#### add local slopes if needed
if (localScalingExp){
plotLocalScalingExp(x,xlab = xlab,type = type,col = col,pch=pch,
add.legend = F,...)
}
### add legend
if (add.legend){
par(mar=c(0, 0, 0, 0))
plot.new()
legend("center","groups",ncol=ceiling(number.embeddings/2),
bty="n", col=col,lty=rep(1,number.embeddings),pch=pch,
lwd=rep(2.5,number.embeddings), cex=cex.legend,
legend=x$embedding.dims, title="Embedding dimension")
}
}
#' @return The \emph{plotLocalScalingExp} function plots the local scaling exponents
#' of the correlation sum.
#' @rdname corrDim
#' @method plotLocalScalingExp corrDim
#' @export
#' @method plotLocalScalingExp
plotLocalScalingExp.corrDim = function(x,main="Correlation Dimension C(r)",
xlab=NULL,ylab="Local scaling exponents",
type="b",log="x",ylim=NULL,col=NULL,pch=NULL,
add.legend=T, ...){
# Check if it is possible to compute local slopes
if ( ncol(x$corr.matrix) <= 1) {
stop("Cannot compute local scaling exponents (not enough points in the correlation matrix)")
}
number.embeddings=nrow(x$corr.matrix)
if (add.legend){
current.par = par(no.readonly = TRUE)
on.exit(par(current.par))
layout(rbind(1,2), heights=c(8,2))
}
lcm = log10(x$corr.matrix)
dlcm= matrix(
t(apply(lcm,MARGIN=1,differentiate,
h = (x$corr.order-1)*(log10(x$radius[[2]])-log10(x$radius[[1]]))
)),
nrow = number.embeddings)
#dlcm=10^dlcm
radius.axis = differentiateAxis(x$radius)
# obtain default parameters if not specified
if (is.null(ylim)) ylim=range(dlcm)
if (is.null(xlab)) {
xlab = ifelse(x$corr.order==2,{"Radius r"}, {paste("Radius r^",x$corr.order-1,"",sep="")})
}
col = vectorizePar(col,number.embeddings)
pch = vectorizePar(pch,number.embeddings)
# plot
plot(radius.axis^(x$corr.order-1),dlcm[1,],type=type,log=log,col=col[[1]],
pch=pch[[1]],ylim=ylim,xlab=xlab,ylab=ylab,main=main,...)
i=2
while(i <= number.embeddings){
lines(radius.axis^(x$corr.order-1),dlcm[i,],type=type,col=col[[i]],
pch=pch[[i]],...)
i=i+1
}
if (add.legend){
par(mar=c(0, 0, 0, 0))
plot.new()
legend("center","groups",ncol=ceiling(number.embeddings/2),
bty="n", col=col,lty=rep(1,number.embeddings),pch=pch,
lwd=rep(2.5,number.embeddings),
legend=x$embedding.dims, title="Embedding dimension")
}
}
#' @return The \emph{estimate} function estimates the correlation dimension of the
#' \emph{corr.dim} object by averaging the slopes of the embedding dimensions specified in
#' the \emph{use.embeddings} parameter. The slopes are determined by performing a linear regression
#' over the radius' range specified in \emph{regression.range}.If \emph{do.plot} is TRUE,
#' a graphic of the regression over the data is shown.
#' @param use.embeddings A numeric vector specifying which embedding dimensions should the \emph{estimate} function use to compute
#' the correlation dimension.
#' @param fit.col A vector of colors to plot the regression lines.
#' @param fit.lty The type of line to plot the regression lines.
#' @param fit.lwd The width of the line for the regression lines.
#' @param lty The line type of the correlation sums.
#' @param lwd The line width of the correlation sums.
#' @param x A \emph{corrDim} object.
#' @param regression.range Vector with 2 components denoting the range where the function will perform linear regression.
#' @rdname corrDim
#' @export
#' @method estimate corrDim
#'
estimate.corrDim=function(x, regression.range = NULL, do.plot=FALSE,
use.embeddings = NULL,col=NULL,pch=NULL,
fit.col=NULL, fit.lty=2,fit.lwd=2,
add.legend=T, lty=1,lwd=1,...){
corr.matrix = corrMatrix(x)
if (!is.null(use.embeddings)){
corr.matrix = corr.matrix[as.character(use.embeddings),]
}else{
use.embeddings= as.numeric(rownames(corr.matrix))
}
average=0
#x axis
q = nlOrder(x)
radius=radius(x)
number.embeddings=nrow(corr.matrix)
log.radius = log10(radius)
if (is.null(regression.range)){
r.min = min(radius)
r.max = max(radius)
}else{
# transform the regression range in the corresponding radius
r.min = (regression.range[[1]])^(1/(q-1))
r.max = (regression.range[[2]])^(1/(q-1))
}
lcm = log10(corr.matrix)
if (do.plot){
if (add.legend){
current.par = par(no.readonly = TRUE)
on.exit(par(current.par))
layout(rbind(1,2), heights=c(8,2))
}
# obtain vector of graphical parameters if not specified
col = vectorizePar(col,number.embeddings)
pch = vectorizePar(pch,number.embeddings)
fit.col = vectorizePar(fit.col,number.embeddings,col)
# eliminate thos dimensions that are innecesary for plotting
reduced.x=x
reduced.x$corr.matrix = NULL
reduced.x$corr.matrix = corr.matrix
reduced.x$embedding.dims = NULL
reduced.x$embedding.dims = use.embeddings
plot(reduced.x,col=col,pch=pch,lty=lty,lwd=lwd,add.legend=F,localScalingExp=F,...)
}
#average over differents embedding dimensions
for (i in 1:number.embeddings){
new.corr = eliminateDuplicates(corr.matrix[i,] , radius)
indx = which(new.corr$radius >= r.min & new.corr$radius <= r.max)
y.values = log10(new.corr$correlation[indx])
x.values = (q-1)*log10(new.corr$radius[indx])
fit=lm(y.values ~ x.values)
if (do.plot){
lines(new.corr$radius[indx]^(q-1),10^fit$fitted.values,
col=fit.col[[i]],lwd=fit.lwd,lty=fit.lty,...)
}
#print(fit$coefficients[[2]])
average=average + fit$coefficients[[2]]
}
if (add.legend && do.plot ){
par(mar=c(0, 0, 0, 0))
plot.new()
legend("center","groups",ncol=ceiling(number.embeddings/2),
bty="n", col=col,lty=rep(1,number.embeddings),pch=pch,
lwd=rep(2.5,number.embeddings),
legend=use.embeddings, title="Embedding dimension")
}
average=average/number.embeddings
#return the correlation dimension estimate
return(average)
}
#private function
#eliminate duplicate correlation.sums with different radius
eliminateDuplicates = function(correlation.sum , radius){
len.correlation.sum = length(correlation.sum)
unique.correlation.sum = unique(correlation.sum)
unique.radius = c()
len.unique.correlation.sum = length(unique.correlation.sum)
if (len.unique.correlation.sum < len.correlation.sum){
radius.position = 1
for (corr in unique.correlation.sum){
index = which(correlation.sum == corr)
unique.radius[[radius.position]] = median(radius[index])
radius.position = radius.position + 1
}
}else{
unique.radius = radius
}
return (list(correlation = unique.correlation.sum,radius = unique.radius))
}
|
97d8ed695addec423608e23e7382fdbef699b729
|
0a230e74f7343a500cf667c725a798d42ec12539
|
/man/post_html_from_Rmd.Rd
|
7e723b5fd44b5ee956ee55055f5d9f0d5285cf67
|
[
"MIT"
] |
permissive
|
kbttik/posteR
|
2793a0495a226e254a598756e8cc6a4098fdc86b
|
fedbf6d12ad924be9421a0bc12e010be6065eae0
|
refs/heads/master
| 2022-12-11T15:00:10.896933
| 2020-09-10T08:56:31
| 2020-09-10T08:56:31
| 285,782,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 553
|
rd
|
post_html_from_Rmd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/post_html.R
\name{post_html_from_Rmd}
\alias{post_html_from_Rmd}
\title{htmlレポートをPOSTする}
\usage{
post_html_from_Rmd(Rmd_file, interactive = NULL)
}
\arguments{
\item{Rmd_file}{character レポートへのpath}
}
\value{
\code{post_html_from_Rmd()} returns the URL of the html page.
}
\description{
Rmdファイルから、生成されたhtmlをPOSTする。htmlレポートがない場合は、生成を聞く。
}
\examples{
post_html_from_Rmd("README.Rmd")
}
|
ef77337bcc46b39b445bbc278238cdcfafa74df6
|
11de9e10b0b9fa137bbf8607bfd321e9d5fede6e
|
/mPowerEI/man/adjustDriftLowess.Rd
|
a26ae8639fe999cfb86ba4c64538310f58b0e92b
|
[
"MIT"
] |
permissive
|
MonteShaffer/mPowerEI
|
dbc906db491716f15295899f7e10efac6601531b
|
d587c4fda58a377b9bbfb3327411249a11049aa2
|
refs/heads/master
| 2021-05-16T08:48:51.456692
| 2017-10-05T11:03:30
| 2017-10-05T11:03:30
| 104,287,224
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 385
|
rd
|
adjustDriftLowess.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/buildFeatures.R
\name{adjustDriftLowess}
\alias{adjustDriftLowess}
\title{Adjust position based on lowess}
\usage{
adjustDriftLowess(xt, yi)
}
\arguments{
\item{xt}{}
\item{yi}{}
}
\value{
updated yi
}
\description{
Assume walking in straight line so x,y,z should all be straight
Assume mean is zero-ish
}
|
21b992aa24fd9888c4d8ab31262a4cab051f44d6
|
b626eca042800ee5572344262a165c1989235f73
|
/paper/Rcode/19-alt-a-plot.R
|
71a2efe20ca6335bf80c3f7ed35679b6bfa9b072
|
[] |
no_license
|
nicoballarini/SubgrPlots
|
38f04f07478c672cae761b1a9cd1fa5f79062d6e
|
61229c9f36c9650f71b178b970a80d2a7bf9471d
|
refs/heads/master
| 2020-03-13T12:59:46.419273
| 2020-01-29T02:26:45
| 2020-01-29T02:26:45
| 131,130,461
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,121
|
r
|
19-alt-a-plot.R
|
###############################################################################-
##
## This program creates the figures for the manuscript using the
## prca data that is included in this package
##
##
## Instead of using rm(list = ls()), make sure you start with a fresh R
## by restarting R -> Control+Shift+F10
# cat("\014") # Cleans the console
## Load needed libraries
## If SubgrPlots package is not installed, then open project and use following lines or
## in the build window, click Install and Restart
# devtools::build()
# devtools::install()
library(SubgrPlots) # Loads this package. Install it first
library(survival)
library(ggplot2)
library(dplyr)
# # Load the data to be used
data(prca)
dat <- prca
###############################################################################-
## 1. Overlap plot ---------------------------------------------------------------
pdf("paper/figures/19-alt-a-plot.pdf", width = 4, height = 4)
plot_overlap(dat = dat,
covari.sel = c(6,5,4,7),
para = c(0.1, 0.5, 1),
font.size = c(1.2, 1.2, 0.8),
title = NULL)
dev.off()
|
bf3783063028b4ff114f05708f6f06333b90782e
|
3578d3e6b04ff37d299980d648a64dcf26e95e09
|
/R/db/MMdb.R
|
f432b08d564b81faeda3f4d2517f294973186e62
|
[] |
no_license
|
eduardoscopel/util
|
ae6342d9c30a7d0cec55b160301ec9e7bb0db26d
|
f306a2dc4589522d0d5d5b130fdcf21c98cd1db9
|
refs/heads/master
| 2021-05-30T00:25:48.120182
| 2015-12-30T20:26:48
| 2015-12-30T20:26:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,217
|
r
|
MMdb.R
|
MMDB.PATH <- "/db/arrays/megamuga/megamuga.db"
.types.quotes <- function(col, ...) {
col.type <- "int"
col.quotes <- ""
if (col == "name") {
col.type <- "varchar"
col.quotes <- "'"
}
return( list(type = col.type, quotes = col.quotes) )
}
.insert.samples <- function(ids, db, temp.table = "_mysamples", by = c("name","id"), ...) {
require(RSQLite)
stopifnot(!is.null(ids))
stopifnot(inherits(db, "SQLiteConnection"))
.by <- match.arg(by)
tnq <- .types.quotes(.by)
sql <- paste("CREATE TEMP TABLE", temp.table, "(", .by, tnq$type,");")
dbGetQuery(db, sql)
for (s in ids) {
sql <- paste0("INSERT INTO ", temp.table, " (", .by, ") VALUES (", tnq$quotes, s, tnq$quotes, ");")
dbGetQuery(db, sql)
}
}
.insert.markers <- function(ids, db, temp.table = "_mymarkers", by = c("name","id"), ...) {
require(RSQLite)
stopifnot(!is.null(ids))
stopifnot(inherits(db, "SQLiteConnection"))
.by <- match.arg(by)
tnq <- .types.quotes(.by)
sql <- paste("CREATE TEMP TABLE", temp.table, "(", .by, tnq$type,");")
dbGetQuery(db, sql)
for (s in ids) {
sql <- paste0("INSERT INTO ", temp.table, " (", .by, ") VALUES (", tnq$quotes, s, tnq$quotes, ");")
dbGetQuery(db, sql)
}
}
.chunk.query <- function(db, sql, batch.size = -1, ...) {
require(RSQLite)
stopifnot(inherits(db, "SQLiteConnection"))
rez <- dbSendQuery(db, sql)
df <- data.frame()
while (!dbHasCompleted(rez)) {
df <- rbind(df, fetch(rez, n = batch.size))
}
dbClearResult(rez)
dbDisconnect(db)
return(df)
}
fetch.intensities <- function(ids, markers = NULL, chr = NULL, start = NULL, end = NULL, by = c("name","id"), db = MMDB.PATH, batch.size = 1000, verbose = TRUE, ...) {
require(RSQLite)
stopifnot(!is.null(ids))
db <- dbConnect(SQLite(), dbname = db)
.insert.samples(ids, db, by = by)
sql <- paste("SELECT m.name as marker, s.id as sid, s.name as id, m.chromosome as chr, m.position as pos, g.x, g.y, g.call",
"FROM samples as s",
paste0("INNER JOIN _mysamples as sg ON s.", by, " = sg.", by),
"INNER JOIN genotypes as g ON g.sampleID = s.id",
"INNER JOIN snps as m on g.snpID = m.id", sep = "\n")
if (!is.null(markers)) {
.insert.markers(markers, db, by ="name")
sql <- paste0(sql, "\nINNER JOIN _mymarkers as mym ON m.name = mym.name")
}
if (!is.null(chr))
sql <- paste0(sql, "\nWHERE chr = '", gsub("chr","",chr), "'")
if (!is.null(start))
sql <- paste(sql, "AND\npos >=", formatC(start, format = "d"))
if (!is.null(end))
sql <- paste(sql, "AND\npos <=", formatC(end, format = "d"))
sql <- paste0(sql, ";")
if (verbose)
cat(sql, "\n")
.chunk.query(db, sql, batch.size)
}
fetch.samples <- function(ids = NULL, group = NULL, db = MMDB.PATH, by = c("name","id"), exact = TRUE, strict.case = TRUE, verbose = TRUE, ...) {
require(RSQLite)
stopifnot( !all(!is.null(ids), !is.null(group)) )
db <- dbConnect(SQLite(), dbname = db)
cols <- paste("s", c("id", "name", "well", "batch", "sex", "flags", "timestamp"), sep = ".", collapse = ", ")
sql <- paste("SELECT", cols)
if (!is.null(ids)) {
if (exact) {
.ids <- na.omit(ids)
.insert.samples(.ids, db, by = match.arg(by))
sql <- paste0(sql, " FROM samples s ",
"INNER JOIN _mysamples as sg ON s.", by, " = sg.", by)
}
else {
sql <- paste0(sql, " FROM SAMPLES s ",
"WHERE s.name LIKE '", ids[1], "'")
}
}
else if (!is.null(group)) {
sql <- paste0(sql, ", g.name as gname FROM samples s ",
"INNER JOIN samples_groups sg ON sg.sid = s.id ",
"INNER JOIN groups g ON sg.gid = g.id ",
"WHERE g.name LIKE '", group[1], "'")
}
if (!strict.case)
sql <- paste0(sql, " COLLATE NOCASE")
sql <- paste0(sql, ";")
if (verbose)
cat(sql, "\n")
.chunk.query(db, sql, -1)
}
fetch.controls <- function(type = c("all","classical","wild"), ...) {
require(plyr)
tt <- match.arg(type)
filters <- list(classical = c(A = "A/J%",B = "C57BL/6J%", C = "129S1%", D = "NOD/ShiLtJ%",E = "NZO/HILtJ%"),
wild = c(F = "CAST/EiJ%", G = "PWK/PhJ%", H = "WSB/EiJ%"))
ff <- character()
if (tt == "classical") {
ff <- c(ff, filters[[1]])
}
else if (tt == "wild") {
ff <- c(ff, filters[[2]])
}
else {
ff <- unlist(unname(filters))
}
rez <- ldply(ff, fetch.samples, by = "name", exact = FALSE, ...)
colnames(rez)[1] <- "strain"
return(rez)
}
summarize.intensity <- function(df, markers = NULL, by = .(sid), ...) {
require(plyr)
if (!is.null(markers))
df <- subset(df, marker %in% markers)
ddply(df, by, summarize,
si = sum(x+y), n = length(x),
theta = atan2(sum(y), sum(x)), hypo = sum(sqrt(x^2 + y^2)))
}
fetch.genotype.matrix <- function(ids = NULL, markers = NULL, by, verbose = TRUE, ...) {
require(reshape2)
ss <- fetch.samples(ids = ids, by = by, verbose = verbose, ...)
mm <- data.frame()
if (!is.null(markers))
mm <- fetch.intensities(ids = ss$id, markers = markers, by = "id", verbose = verbose, ...)
else
mm <- fetch.intensities(ids = ss$id, by = "id", verbose = verbose, ...)
if (!nrow(mm))
return(NULL)
gty <- dcast(mm, marker + chr + pos ~ sid, value.var = "call")
gty <- gty[ with(gty, order(chr, pos)), ]
return(gty)
}
|
176e2a44be9524d1c0866755f61ac7f5dfc4e095
|
2c7f3011575aebd35c4f954c65541b5a7415f5a3
|
/homework_03.R
|
c8d757c72c98cf47cc8132a1b8193a544614c283
|
[] |
no_license
|
jj-personal/get_clean_data
|
87d9ed13585cbcd09b8323d5d91850c48a83f81d
|
bfb169b107a47b6076ba1eab48fbc14dc7d9c023
|
refs/heads/master
| 2021-01-20T06:12:55.008850
| 2017-05-08T18:46:49
| 2017-05-08T18:46:49
| 89,856,584
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,194
|
r
|
homework_03.R
|
## R code for
## Getting and Cleaning Data
###########################################
## Read and merge x train and test data
x.train <- read.table("./UCI HAR Dataset/train/X_train.txt")
x.test <- read.table("./UCI HAR Dataset/test/X_test.txt")
x.merge <- rbind(x.train, x.test)
## Extract features with "mean" & "std"
feature.name <- read.table("./UCI HAR Dataset/features.txt")
meanstd.grepl <- grepl("mean", feature.name[,2]) | grepl("std",feature.name[,2])
x.merge.extract <- x.merge[,meanstd.grepl]
## Add column names to x.merge.extract
feature.name.2 <- gsub("\\()", "", feature.name[,2])
feature.name.3 <- feature.name.2[meanstd.grepl]
colnames(x.merge.extract) <- feature.name.3
x.merge.extract <- as.data.frame(x.merge.extract)
## Read and merge y train and test data
y.train <- read.table("./UCI HAR Dataset/train/y_train.txt")
y.test <- read.table("./UCI HAR Dataset/test/y_test.txt")
y.merge <- rbind(y.train, y.test)
colnames(y.merge) <- "act_num"
dim(y.merge)
## Read activity labels
act.label <- read.table("./UCI HAR Dataset/activity_labels.txt")
colnames(act.label) <- c("act_num", "act_label")
## Merge y with activity lables
y.merge.label <- merge(y.merge, act.label, by.x = "act_num", by.y = "act_num" )
## Add a column with activity labels to x.merge.extract
x.merge.extract$act_label <- y.merge.label$act_label
## Add a column with subject number to x.merge.extract
subject.train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
subject.test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
subject.merge <- rbind(subject.train, subject.test)
colnames(subject.merge) <- "subject"
x.merge.extract$subject <- subject.merge$subject
## Move subject and activity columsn to be 1st and 2nd columns
col.num <- ncol(x.merge.extract)
x.merge.extract <- x.merge.extract[,c(col.num, col.num-1, 1:(col.num-2))]
## Create a factor concatnate subject and activity columns
fac.subject.act <- paste("Sub", x.merge.extract$subject, x.merge.extract$act_label)
## Step 5 ##
## Aggregate (mean) data by subject and activity
x.agg <- aggregate(x.merge.extract[,-c(1,2)], by=list(fac.subject.act), FUN="mean")
## end ###
|
354c1901464f99b0d9074ea27b3d1aec384f41c2
|
da0bde5d3019691bf541eb0bd0df9de69f3f4e93
|
/r_workshop_codes/slide_67_binomial_distribution.R
|
427a43ef6615f046ed0213ec2c152b66e13320a3
|
[] |
no_license
|
rnradon/r_workshop
|
9de29eca296926a52c285fb3110328ec306da500
|
c39f3a08345e02c91cd8026810c7ba592aebcd64
|
refs/heads/master
| 2021-05-11T19:52:00.856478
| 2018-01-29T16:07:23
| 2018-01-29T16:07:23
| 117,425,358
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 625
|
r
|
slide_67_binomial_distribution.R
|
#DBINORM()
#dbinom(x, size, prob)
# Create a sample of 50 numbers which are incremented by 1.
x <- seq(0,50,by = 1)
# Create the binomial distribution.
y <- dbinom(x,50,0.5)
# Plot the graph for this sample.
plot(x,y)
# Save the file.
dev.off()
#PBINORM()
# Probability of getting 26 or less heads from a 51 tosses of a coin.
x <- pbinom(26,51,0.5)
print(x)
#QBINORM()
# How many heads will have a probability of 0.25 will come out when a coin is tossed 51 times.
x <- qbinom(0.25,51,1/2)
print(x)
#RBINORM()
# Find 8 random values from a sample of 150 with probability of 0.4.
x <- rbinom(8,150,.4)
print(x)
|
3c0129779a94e0508bc88aca161d89d3fbc1a416
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.machine.learning/man/textract_start_document_text_detection.Rd
|
f37ad5058b5fe4367bb2b53e20b354a7e467fab4
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 2,284
|
rd
|
textract_start_document_text_detection.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/textract_operations.R
\name{textract_start_document_text_detection}
\alias{textract_start_document_text_detection}
\title{Starts the asynchronous detection of text in a document}
\usage{
textract_start_document_text_detection(
DocumentLocation,
ClientRequestToken = NULL,
JobTag = NULL,
NotificationChannel = NULL,
OutputConfig = NULL,
KMSKeyId = NULL
)
}
\arguments{
\item{DocumentLocation}{[required] The location of the document to be processed.}
\item{ClientRequestToken}{The idempotent token that's used to identify the start request. If you
use the same token with multiple
\code{\link[=textract_start_document_text_detection]{start_document_text_detection}}
requests, the same \code{JobId} is returned. Use \code{ClientRequestToken} to
prevent the same job from being accidentally started more than once. For
more information, see \href{https://docs.aws.amazon.com/textract/latest/dg/api-async.html}{Calling Amazon Textract Asynchronous Operations}.}
\item{JobTag}{An identifier that you specify that's included in the completion
notification published to the Amazon SNS topic. For example, you can use
\code{JobTag} to identify the type of document that the completion
notification corresponds to (such as a tax form or a receipt).}
\item{NotificationChannel}{The Amazon SNS topic ARN that you want Amazon Textract to publish the
completion status of the operation to.}
\item{OutputConfig}{Sets if the output will go to a customer defined bucket. By default
Amazon Textract will save the results internally to be accessed with the
GetDocumentTextDetection operation.}
\item{KMSKeyId}{The KMS key used to encrypt the inference results. This can be in either
Key ID or Key Alias format. When a KMS key is provided, the KMS key will
be used for server-side encryption of the objects in the customer
bucket. When this parameter is not enabled, the result will be encrypted
server side,using SSE-S3.}
}
\description{
Starts the asynchronous detection of text in a document. Amazon Textract can detect lines of text and the words that make up a line of text.
See \url{https://www.paws-r-sdk.com/docs/textract_start_document_text_detection/} for full documentation.
}
\keyword{internal}
|
21dfec89c5a4c04d10a6572228e77dd3b95f8e05
|
a3c197315237165f64bcd5beacace973e2cf5ca9
|
/analysis/study2.R
|
4fbd6cd64b0d40cfcc84ec4a7ac725daf1fdeb45
|
[] |
no_license
|
FinkAr/mix_setbased_discrete
|
056ef74d77d426313356b32f59e4c885da2c9d95
|
e21797323e78d53800660bd48e6d6f6f7749a474
|
refs/heads/main
| 2023-08-17T00:39:46.497675
| 2021-09-23T21:50:45
| 2021-09-23T21:50:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,327
|
r
|
study2.R
|
library(tools)
library(progress)
library(TestDesign)
fp <- "~/Box Sync/Behaviormetrika_Special_Issue/Article_2_PassageCAT/Results"
# needs to be replaced with the path of your own simulation results folder
# run the simulation first using: simulation/study2.R
fs <- list.files(file.path(fp, "Study2"))
o <- as.data.frame(matrix(NA, length(fs), 5))
colnames(o) <- c("weave", "exposure", "target", "refresh", "replication")
oo <- o
oo$rmse <- NA
for (theta in seq(-2, 2)) {
v <- sprintf("rmse_%s", theta)
oo[[v]] <- NA
}
oo$overexposed <- NA
oo$sd_exposure <- NA
pb <- progress_bar$new(format = "[:bar] :current / :total | :eta", total = length(fs))
for (i in 1:length(fs)) {
f <- fs[i]
solution <- NULL
load(file.path(fp, "Study2", f))
IVs <- strsplit(file_path_sans_ext(f), "_")[[1]][-c(1:2)]
oo[i, 1:5] <- IVs
oo$rmse[i] <- sqrt(mean((solution@final_theta_est - solution@true_theta)**2))
for (theta in seq(-2, 2)) {
v <- sprintf("rmse_%s", theta)
idx <- solution@true_theta == theta
true_theta_subset <- solution@true_theta[idx]
final_theta_subset <- solution@final_theta_est[idx]
oo[[v]][i] <- sqrt(mean((final_theta_subset - true_theta_subset)**2))
}
er <- solution@exposure_rate[, "Item ER"]
oo$overexposed[i] <- mean(er > 0.25)
oo$sd_exposure[i] <- sd(er)
pb$tick(1)
}
oo$weave <- factor(oo$weave , c("interspersed", "ds", "sd", "setbased"))
oo$exposure <- factor(oo$exposure, c("none", "bigm"))
oo$target <- factor(oo$target , c("maxinfo", "goalinfo8", "goalinfo7", "goalinfo6"))
oo$refresh <- factor(oo$refresh , c("always", "setlevel"))
write.csv(oo, "analysis/study2.csv", row.names = FALSE)
oo <- read.csv("analysis/study2.csv", check.names = FALSE)
# ANOVA
oo <- subset(oo, refresh == "setlevel")
DVs <- c("rmse", "overexposed", "sd_exposure")
for (DV in DVs) {
m <- formula(sprintf("`%s` ~ (weave + exposure + target)^2", DV))
fit <- aov(m, oo)
x <- summary(fit)[[1]]
f <- sprintf("analysis/study2_aov_%s.csv", DV)
write.csv(x, f)
}
# main effects
DVs <- c("rmse", "overexposed", "sd_exposure")
for (DV in DVs) {
m <- formula(sprintf("`%s` ~ (weave + exposure + target)", DV))
fit <- lm(eval(m), oo)
x <- coef(summary(fit))
f <- sprintf("analysis/study2_main_%s.csv", DV)
write.csv(x, f)
}
|
ed545756d5e597a8e8685e4d02b74be7d66f1d1e
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/interep/R/penalty.R
|
2a297cc8c66a04d526734888688acf57b755008c
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,958
|
r
|
penalty.R
|
#' This function gives the penalty functions
#' @importFrom stats gaussian
#' @param x matrix of covariates.
#' @param n the sample size.
#' @param p the number of predictors.
#' @param q the number of environment factors.
#' @param beta the coefficient vector.
#' @param lam1 the tuning parameter lambda1 for individual penalty.
#' @param pmethod the penalization method. "mixed" refers to MCP penalty to individual main effects and group MCP penalty to interactions; "individual" means MCP penalty to all effects.
#' @param p1 the number of gene factors.
#' @param lam2 the tuning parameter lambda2 for group penalty.
#' @return
#' \item{E}{the penalty function.}
#' @export
penalty <- function(x,n,p,q,beta,lam1,pmethod,p1,lam2){
eps=0.000001
if(pmethod=="individual"){
beta.mcp=beta[(q+2):p]
x.mcp=x[,(q+2):p]
E.mcp=rep(0,(p-q-1))
for (j in 1:(p-q-1)) {
sub=j
x.sub=x.mcp[,sub]
beta0=beta.mcp[sub]
kj=t(x.sub)%*%x.sub/n
norm = sqrt(mean((x.sub*beta0)^2))
#norm=as.numeric(sqrt(t(beta0)%*%kj%*%beta0))
E.mcp[j]=dmcp(abs(as.vector(beta0)),lam1)/(abs(as.vector(norm))+eps)
#E1=adiag(E1,E.groupmcp[,,j])
}
E=E.mcp
E=c(rep(0,q+1),E)
E=diag(E)
}
if(pmethod=="mixed"){
beta.mcp=beta[1:(p1+q+1)]
x.mcp=x[,1:(p1+q+1)]
E.mcp=rep(0,(p1+q+1))
for (j in 1:(p1+q+1)) {
sub=j
x.sub=x.mcp[,sub]
beta0=beta.mcp[sub]
kj=t(x.sub)%*%x.sub/n
norm = sqrt(mean((x.sub*beta0)^2))
E.mcp[j]=dmcp(abs(as.vector(beta0)),lam1)/(abs(as.vector(norm))+eps)
}
x.gmcp=x[,(p1+q+2):p]
beta.gmcp=beta[c((p1+q+2):p)]
for (j in 1:p1) {
sub=((j-1)*q+1):(j*q)
x.sub=x.gmcp[,sub]
beta0=beta.gmcp[sub]
norm = sqrt(mean((x.sub%*%beta0)^2))
E.mcp=c(E.mcp,dmcp(abs(as.vector(beta0)),lam2)/(abs(as.vector(norm))+eps))
}
E1.mcp=diag(E.mcp)
E1.mcp[,c(1:(1+q))]<-0
E<-E1.mcp
}
return(E)
}
|
f7dad67da8dbacd6963d9282b653d3637735c1df
|
1ac2a49a60a821bd2efa4534b48041b9a279ccbe
|
/019000591.r
|
a23732232a4eb0ea35d637384e17737152597355
|
[] |
no_license
|
erex/MT4113-2016-asmt1-old
|
301ba43d616d529469cdfee16130234174b99827
|
42ceed426fb9980b53d01b724e8855df785ebcb4
|
refs/heads/master
| 2020-05-23T08:11:20.797156
| 2016-09-27T09:38:38
| 2016-09-27T09:38:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,685
|
r
|
019000591.r
|
# MT4113 - Assignment 1
# Random Number Generation
# I confirm that the attached is my own work, except where clearly indicated in the text
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
# PART 1: MARSAGLIA & BRAY'S METHOD
# I have defined a function my.rnorm which will take arguments:
# n = the number of values to be returned
# mean = the mean of the values to be returned, with default mean = 0
# sd = the standard deviation of the values to be returned, with default sd = 0
my.rnorm <- function (n, mean = 0, sd = 1) {
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
#This section checks the inputs for each argument of the function and produces error warnings when appropriate
# If n is not a numeric value, is less than 1 or is a vector, then the execution of the function will cease and an error message will be produced.
if (is.numeric(n) == FALSE || n < 1 || (length(n)==1) == FALSE) {
stop ('invalid arguments')
}
# If the standard deviation is less than or equal to 0, not a numeric value or it is a vector, then the execution of the function will cease and an error message will be produced
if (sd <= 0 || is.numeric(sd) == FALSE || (length(sd)==1) == FALSE) {
stop ('invalid arguments')
}
# If the mean is not a numeric value or it is a vector, then the execution of the function will cease and an error message will be produced
if (is.numeric(mean) == FALSE || (length(mean)==1) == FALSE) {
stop ('invalid arguments')
}
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
#This section checks whether the value of n is an even or odd number.
# If n = even, then it will assign m = n/2, so m pairs of values will be produced in the algorithm, and hence we obtain n values
# If n = odd, then it will assign m = (n+1)/2, so that pairs of values can still be produced. We will end up with one extra observation which we will deal with at the end of the code.
if (n%%2 == 0) {
m <- n/2
} else
m <- (n+1)/2
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
#The following code uses the Marsaglia and Bray algorithm to generate pairs of random normally distributed deviates.
# The for loop, repeats the code m times, so that we have m pairs of x values.
# The while loop generates values for u1 and u2, which follow a uniform distribution and are transformed to a unit square.
# Using these values w can be calculated.
# This cycle is repeated until the criterion: w is less than or equal to one, is met.
# Once met, the loop is exited and v is calculated.
Xvalues <- rep(0, n)
for (i in 1:m) {
while (TRUE) {
u1 <- (2 * runif(1)) - 1
u2 <- (2 * runif(1)) - 1
w <- u1**2 + u2**2
if (w <= 1) break
}
v <- sqrt (( -2 * log(w) ) / w)
x1 <- u1 * v
x2 <- u2 * v
# x1 and x2 can be transformed so that values are returned with the desired mean and sd
x1 <- sd * x1 + mean
x2 <- sd * x2 + mean
# These values are then stored in a vector called X values, which is added to during each loop from i= 1 to m
Xvalues[i*2-1] <- x1
Xvalues[i*2] <-x2
}
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
# If n = even, then the code has produced enough n values and we can return these values to the console.
# If n = odd, then we will have one extra value, hence, we will only return the Xvalues vector for values from 1 to n.
if (n%%2 == 0) {
return (Xvalues)
} else
return (Xvalues[1:(n)])
}
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Part 2: RETURNING PSEUDO-RANDOM NORMALLY-DISTRIBUTED DEVIATES
# For part 2 I have defined a function general.rnorm which takes arguments:
# n = number of values to return
# mean = mean of the values to return, default = 0
# sd = sd of the values to return, default = 1
# method = method to be used to generate the deviates, default = 1 which refers to the Marsaglia & Bray's algorithm
# method = 2 refers to the Box & Mueller algorithm
# method = 3 refers to the central-limit theorem algorithm
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
general.rnorm <- function (n, mean = 0, sd = 1, method = 1) {
# If n: is not a numerical value, is less than 1 or is a vector, then the execution of the function will cease and an error message will be produced.
if (is.numeric(n) == FALSE || n < 1 || (length(n)==1) == FALSE) {
stop ('invalid arguments')
}
# If the standard deviation: is less than or equal to 0, is not a numerical value or it is a vector, then the execution of the function will cease and an error message will be produced.
if (sd <= 0 || is.numeric(sd) == FALSE || (length(sd)==1) == FALSE) {
stop ('invalid arguments')
}
# If the mean: is not a numerical value or it is a vector, then the execution of the function will cease and an error message will be produced.
if (is.numeric(mean) == FALSE || (length(mean)==1) == FALSE) {
stop ('invalid arguments')
}
# If the method: is not a numerical value, is a vector, is not an integer or is not equal to 1, 2 or 3, then the execution of the function will cease and an error message will be produced.
if (is.numeric(method) == FALSE || (length(method)==1) == FALSE || (floor(method)==method)==FALSE) {
stop ('invalid arguments')
}
if ((method==1 || method ==2 || method ==3)== FALSE) {
stop ('invalid arguments')
}
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
# If method 1 is called, the function my.rnorm from above is called and executed
if (method == 1){
X<-my.rnorm(n, mean, sd)
return(X)
} else if (method == 2) {
#For method 2, we check whether n is even or odd, and assign m accordingly so that pairs of deviates can be produced by the algorithm
if (n%%2 == 0) {
m <- n/2
} else
m <- (n + 1)/2
# An empty vector is defined in which to store the deviates
Xvalues <- c()
# Following the algorithm, we generate a pair of u values, u1 and u2, which are then used to generate x1 and x2. These values are then transformed so that they return values with the mean and sd required
for (i in 1:m) {
u1 <- runif(1)
u2 <- runif(1)
x1 <- sin(2 * pi * u1) * sqrt (-2 * log(u2))
x2 <- cos(2 * pi * u1) * sqrt (-2 * log(u2))
x1 <- sd * x1 + mean
x2 <- sd * x2 + mean
# The x values are put into the correct position within the Xvalues vector
Xvalues[i*2-1] <- x1
Xvalues[i*2] <- x2
}
# If n is even, we return all X values and if n is odd, then we return 1 less X value.
if (n%%2 == 0) {
return (Xvalues)
} else
return (Xvalues[1:n])
} else
# For method 3, we define two empty vectors, u and x
u<- c()
x<- c()
# Using a nested loop, we first generate 16 values which are from the uniform distribution.
# using the central limit theorem we can find x from these values and then we transform x using the sd and mean from the arguments of the function
# this loop is run n times to generate n observations of x values, which are then returned in a vector x.
for (j in 1: n) {
for (i in 1: 16) {
u[i] <- runif(1)
}
x[j] <- (sum(u) - 8)*sqrt(12/16)
x[j] <- sd * x[j] + mean
}
return (x)
}
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
# TESTING FUNCTIONS
# Testing that my.rnorm produces the correct number of outputs for when n is odd or even.
# This function is based on the function given on the assignment worksheet.
output.test1 <- function (n) {
output <- my.rnorm(n)
pass.test1 <- (length(output)==n & is.numeric(output))
return (pass.test1)
}
# Testing that general.rnorm also produces the correct number of outputs for when n is odd or even for each method
output.test2 <- function (n) {
output1 <- general.rnorm(n, method = 1)
output2 <- general.rnorm(n, method = 2)
output3 <- general.rnorm(n, method = 3)
pass.test <- (length(output1)==n & length(output2)==n & length(output3)==n & is.numeric(output1) & is.numeric(output2) & is.numeric(output3))
return (pass.test)
}
# Testing the null hypothesis that the sample comes from a population with an underlying normal distribution. Using a 10% significance level, if p>0.1, we conclude that we cannot reject the null hypothesis.
output.test3 <- function (n, mean, sd) {
output <- my.rnorm(n, mean, sd)
s.t <- shapiro.test(output)
pass.test <- (s.t$p>0.1)
return (pass.test)
}
# Same test as above, but for the general.rnorm function
output.test4 <- function (n, mean, sd, method) {
output <- general.rnorm(n, mean, sd, method)
s.t <- shapiro.test(output)
pass.test <- (s.t$p>0.1)
return (pass.test)
}
# A visual test, to see whether the data produced in the output is normally distributed, is a histogram plot.
output.test5 <- function (n, mean, sd, method) {
output <- general.rnorm(n, mean, sd, method)
h <- hist(output, main = "Histogram for the output of data")
return (h)
}
output.test6 <- function (n, mean, sd) {
output <- my.rnorm(n, mean, sd)
h <- hist(output, main = "Histogram for the output of data")
return (h)
}
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------
# END OF ASSIGNMENT 1
|
93ba697635f4ed57ac3320a44857b35f535d0511
|
efc90920e92248b31fee1603f9a038e54efa4e1b
|
/man/cpbR.Rd
|
8b7ad6b759b875664ca40a820621205390eedc68
|
[] |
no_license
|
jtilly/cbpR
|
122a9f9c343294e368f65852a96e0a6a999df74a
|
fd881c5e13c2e95d4b84e1d89855bd36deffc21b
|
refs/heads/master
| 2021-01-22T10:19:08.049589
| 2017-01-03T15:02:06
| 2017-01-03T15:02:06
| 30,133,513
| 15
| 6
| null | 2017-07-07T21:41:09
| 2015-02-01T02:13:01
|
R
|
UTF-8
|
R
| false
| false
| 2,204
|
rd
|
cpbR.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/cpbR.R
\docType{package}
\name{cpbR}
\alias{cpbR}
\alias{cpbR-package}
\title{Get the firm count data from the County Business Patterns (CBP)}
\description{
This is an R Package that downloads and prepares panel data sets from the
Census County Business Patterns (CBP).
}
\details{
It downloads the CPB data on the county level and then allows the user to
aggregate the data up into larger geographic entities such as Metropolitan
Statistical Areas, Micropolitan Statistical Areas, or some user defined
collection of counties.
The file demo/cardealers.R contains a demonstration. It generates a panel
data set for "New Car Dealers" (NAICS 441110). The data set ranges from 2000
to 2009. It aggregates the firm count data from the County Business Patterns
into Micropolitan Statistical Areas and returns a dataset with annual data
on the firm count, employment (if available), firm count by employment, and
population figures for each Micropolitan Statistical Area. The population
estimates are taken from the Annual Estimates of the Population of
Metropolitan and Micropolitan Statistical Areas: April 1, 2000 to July 1,
2009 (CBSA-EST2009-01). The Micropolitan Statistical Area definitions are
taken from the Census 2003-2009 Delineation Files.
The data frame \code{firms} contains the following columns
\itemize{
\item \code{fips} the fips code
\item \code{fipsstate} the state fips code
\item \code{fipscty} the county fips code within the state
\item \code{year} the year of observation
\item \code{est} the number of establishments
\item \code{n1_4} the number of establishments with between 1 and 4 employees
\item \code{n5_9} the number of establishments with between 5 and 9 employees
\item \code{n10_19} ... with between 10 and 19 employees
\item \code{n20_49} ... with between 20 and 49 employees
\item \code{n50_99}... with between 50 and 99 employees
\item \code{n100_249} ... with between 100 and 249 employees
\item \code{n250_499} ... with between 250 and 499 employees
\item \code{n500_999} ... with between 500 and 999 employees
\item \code{n1000plus} ... with between more than 1000 employees
}
}
|
e20b8874b8044766dcb6ef36a4f808b2922f13ef
|
b7aca1dea3c0bcbc208f8ae20e3e7c283df48b2b
|
/man/h_test.Rd
|
53f04a3a328cc45b420b7de7e65cdbc092dd9a12
|
[
"MIT"
] |
permissive
|
thjsal/Dipol2Red
|
df983398b69c73f8d0b1d7e408a6ed8e4355fbef
|
ce5fa892398e781983ebce51b18ddae95ae4c7f2
|
refs/heads/master
| 2022-03-28T16:04:54.074569
| 2020-01-09T12:24:26
| 2020-01-09T12:24:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 509
|
rd
|
h_test.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/h_test.R
\name{h_test}
\alias{h_test}
\title{h_test}
\usage{
h_test(data, p_x = Px, p_y = Py, sg = SG, cov = Q, n = N)
}
\arguments{
\item{data}{Two-row input tibble}
\item{p_x}{Px polarization column name}
\item{p_y}{Py polarization column name}
\item{sg}{Sigma column name}
\item{cov}{Covariance column name}
\item{n}{Number of observations column name}
}
\description{
Performs Hotelling T^2 test on the Dipol-2 averages
}
|
e7341549e30e4ed35109a80c162ecef06c6c564a
|
f6950a133577ee09ab032a0f17a8ccd2141467a6
|
/R/testDriveR-package.R
|
f4e5c0c6bd34ff8cbe21b59043b0cbc1d8a73baf
|
[] |
no_license
|
cran/testDriveR
|
2a610463b44e3d578303a6ba40263ad9ec5ec271
|
1a54e4eda576818500cd98ee0b469199e7d433cf
|
refs/heads/master
| 2022-06-16T16:18:40.243419
| 2022-05-29T04:30:02
| 2022-05-29T04:30:02
| 146,950,995
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,391
|
r
|
testDriveR-package.R
|
#' testDriveR: Teaching Data for Statistics and Data Science
#'
#' @description The goal of \code{testDriveR} is to provide data sets for teaching
#' statistics and data science courses. This package includes a sample of
#' data from John Edmund Kerrich's famous coinflip experiment. These are data
#' that I use for teaching \href{https://slu-soc5050.github.io}{SOC 4015 / SOC 5050}
#' at \href{https://www.slu.edu}{Saint Louis University}.
#'
#' @details There are currently five data sets that are included in the package:
#'
#' \itemize{
#' \item \code{\link{auto17}} - A data set containing model year 2017 vehicles
#' for sale in the United States
#' \item \code{\link{childMortality}} - A data set containing childhood mortality
#' time series data by country from UNICEF
#' \item \code{\link{gss14}} - A data set containing a selection of variables
#' related to work and education from the 2014 General Social Survey
#' \item \code{\link{gss14_simple}} - A simple version of \code{gss14} without
#' factors created and without missing data explicitly declared
#' \item \code{\link{kerrich}} - A data set containing 2000 trials of coin flips by
#' \href{https://en.wikipedia.org/wiki/John_Edmund_Kerrich}{John Edmund Kerrich}
#' }
#'
#' @name testDriveR
#' @docType package
NULL
|
a5eae85da0873d09e6ab9502ea7f9b109057f6c0
|
0ae69401a429092c5a35afe32878e49791e2d782
|
/trinker-lexicon-4c5e22b/inst/scraping_scripts/profanity/profanity_alvarez.R
|
125fc0f646c3e3f280802b1bd2e7ffc19910a84a
|
[] |
no_license
|
pratyushaj/abusive-language-online
|
8e9156d6296726f726f51bead5b429af7257176c
|
4fc4afb1d524c8125e34f12b4abb09f81dacd50d
|
refs/heads/master
| 2020-05-09T20:37:29.914920
| 2019-06-10T19:06:30
| 2019-06-10T19:06:30
| 181,413,619
| 3
| 0
| null | 2019-06-05T17:13:22
| 2019-04-15T04:45:06
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 4,586
|
r
|
profanity_alvarez.R
|
pacman::p_load(xml2, rvest, tidyverse)
profanity_alvarez <- c("*damn", "*dyke", "*fuck*", "*shit*", "@$$", "ahole", "amcik",
"andskota", "anus", "arschloch", "arse*", "ash0le", "ash0les",
"asholes", "ass", "Ass Monkey", "Assface", "assh0le", "assh0lez",
"asshole", "assholes", "assholz", "assrammer", "asswipe", "ayir",
"azzhole", "b!+ch", "b!tch", "b00b*", "b00bs", "b17ch", "b1tch",
"bassterds", "bastard", "bastards", "bastardz", "basterds", "basterdz",
"bi+ch", "bi7ch", "Biatch", "bitch", "bitch*", "bitches", "Blow Job",
"blowjob", "boffing", "boiolas", "bollock*", "boobs", "breasts",
"buceta", "butt-pirate", "butthole", "buttwipe", "c0ck", "c0cks",
"c0k", "cabron", "Carpet Muncher", "cawk", "cawks", "cazzo",
"chink", "chraa", "chuj", "cipa", "clit", "Clit", "clits", "cnts",
"cntz", "cock", "cock-head", "cock-sucker", "Cock*", "cockhead",
"cocks", "CockSucker", "crap", "cum", "cunt", "cunt*", "cunts",
"cuntz", "d4mn", "daygo", "dego", "dick", "dick*", "dike*", "dild0",
"dild0s", "dildo", "dildos", "dilld0", "dilld0s", "dirsa", "dominatricks",
"dominatrics", "dominatrix", "dupa", "dyke", "dziwka", "ejackulate",
"ejakulate", "Ekrem*", "Ekto", "enculer", "enema", "f u c k",
"f u c k e r", "faen", "fag", "fag*", "fag1t", "faget", "fagg1t",
"faggit", "faggot", "fagit", "fags", "fagz", "faig", "faigs",
"fanculo", "fanny", "fart", "fatass", "fcuk", "feces", "feg",
"Felcher", "ficken", "fitt*", "Flikker", "flipping the bird",
"foreskin", "Fotze", "Fu(*", "fuck", "fucker", "fuckin", "fucking",
"fucks", "Fudge Packer", "fuk", "fuk*", "Fukah", "Fuken", "fuker",
"Fukin", "Fukk", "Fukkah", "Fukken", "Fukker", "Fukkin", "futkretzn",
"fux0r", "g00k", "gay", "gayboy", "gaygirl", "gays", "gayz",
"God-damned", "gook", "guiena", "h00r", "h0ar", "h0r", "h0re",
"h4x0r", "hell", "hells", "helvete", "hoar", "hoer", "hoer*",
"honkey", "hoor", "hoore", "hore", "Huevon", "hui", "injun",
"jackoff", "jap", "japs", "jerk-off", "jisim", "jism", "jiss",
"jizm", "jizz", "kanker*", "kawk", "kike", "klootzak", "knob",
"knobs", "knobz", "knulle", "kraut", "kuk", "kuksuger", "kunt",
"kunts", "kuntz", "Kurac", "kurwa", "kusi*", "kyrpa*", "l3i+ch",
"l3itch", "lesbian", "Lesbian", "lesbo", "Lezzian", "Lipshits",
"Lipshitz", "mamhoon", "masochist", "masokist", "massterbait",
"masstrbait", "masstrbate", "masterbaiter", "masterbat*", "masterbat3",
"masterbate", "masterbates", "masturbat*", "masturbate", "merd*",
"mibun", "mofo", "monkleigh", "Motha Fucker", "Motha Fuker",
"Motha Fukkah", "Motha Fukker", "mother-fucker", "Mother Fucker",
"Mother Fukah", "Mother Fuker", "Mother Fukkah", "Mother Fukker",
"motherfucker", "mouliewop", "muie", "mulkku", "muschi", "Mutha Fucker",
"Mutha Fukah", "Mutha Fuker", "Mutha Fukkah", "Mutha Fukker",
"n1gr", "nastt", "nazi", "nazis", "nepesaurio", "nigga", "nigger",
"nigger*", "nigger;", "nigur;", "niiger;", "niigr;", "nutsack",
"orafis", "orgasim;", "orgasm", "orgasum", "oriface", "orifice",
"orifiss", "orospu", "p0rn", "packi", "packie", "packy", "paki",
"pakie", "paky", "paska*", "pecker", "peeenus", "peeenusss",
"peenus", "peinus", "pen1s", "penas", "penis", "penis-breath",
"penus", "penuus", "perse", "Phuc", "phuck", "Phuck", "Phuk",
"Phuker", "Phukker", "picka", "pierdol*", "pillu*", "pimmel",
"pimpis", "piss*", "pizda", "polac", "polack", "polak", "Poonani",
"poontsee", "poop", "porn", "pr0n", "pr1c", "pr1ck", "pr1k",
"preteen", "pula", "pule", "pusse", "pussee", "pussy", "puta",
"puto", "puuke", "puuker", "qahbeh", "queef*", "queer", "queers",
"queerz", "qweers", "qweerz", "qweir", "rautenberg", "recktum",
"rectum", "retard", "s.o.b.", "sadist", "scank", "schaffer",
"scheiss*", "schlampe", "schlong", "schmuck", "screw", "screwing",
"scrotum", "semen", "sex", "sexy", "sh!+", "sh!t", "Sh!t", "sh!t*",
"sh1t", "sh1ter", "sh1ts", "sh1tter", "sh1tz", "sharmuta", "sharmute",
"shemale", "shi+", "shipal", "shit", "shits", "shitter", "Shitty",
"Shity", "shitz", "shiz", "Shyt", "Shyte", "Shytty", "Shyty",
"skanck", "skank", "skankee", "skankey", "skanks", "Skanky",
"skribz", "skurwysyn", "slut", "sluts", "Slutty", "slutz", "smut",
"son-of-a-bitch", "sphencter", "spic", "spierdalaj", "splooge",
"suka", "teets", "teez", "testical", "testicle", "testicle*",
"tit", "tits", "titt", "titt*", "turd", "twat", "va1jina", "vag1na",
"vagiina", "vagina", "vaj1na", "vajina", "vittu", "vullva", "vulva",
"w00se", "w0p", "wank", "wank*", "wetback*", "wh00r", "wh0re",
"whoar", "whore", "wichser", "wop*", "xrated", "xxx", "yed",
"zabourah")
pax::new_data(profanity_alvarez)
pax:::roxdat(profanity_alvarez, 'profanity_alvarez')
|
eb8be18e0f71819a4128db69c05954b5dfe33b47
|
da268d2348146517eeeaeeca46c413abdb854aac
|
/plots.R
|
888ff423ef7752103c15d78a4e114924fbb610fa
|
[] |
no_license
|
ss4357/cv
|
91ac882173d38058dd73514a69f698029361f312
|
d7348dec25c6450685d034a9d96854056a012757
|
refs/heads/main
| 2023-02-25T06:16:21.192693
| 2021-02-18T09:31:29
| 2021-02-18T09:31:29
| 338,507,336
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,148
|
r
|
plots.R
|
library(tidyverse)
library(readxl)
library(ggfortify)
library(ggplot2)
library(EnhancedVolcano)
project_dir <- dirname(dirname(rstudioapi::getActiveDocumentContext()$path))
#ms2 validations---------------------------------------------------------------------------------
valid_lp <- read_xlsx(paste0(project_dir, "/data/2020-05-12/lipidomics_pos_ms2_validation_results.xlsx")) %>%
filter(!is.na(name))
valid_ln <- read_xlsx(paste0(project_dir, "/data/2020-05-12/lipidomics_neg_ms2_validation_results.xlsx")) %>%
filter(!is.na(name))
valid_mp <- read_xlsx(paste0(project_dir, "/data/2020-05-12/metabolomics_pos_ms2_validation_results.xlsx")) %>%
filter(!is.na(name))
valid_mn <- read_xlsx(paste0(project_dir, "/data/2020-05-12/metabolomics_neg_ms2_validation_results.xlsx")) %>%
filter(!is.na(name))
# PCA --------------------------------------------------------------------------------
pcaplots <- function(mode, comparison){
comp_table <- read_csv(paste0(project_dir, "/data/", mode, "/", comparison), col_names = FALSE, skip = 2)
nums <- comp_table[,-1]
pca_temp <- prcomp(nums, scale. = TRUE)
plot_temp <- autoplot(pca_temp, colour = "coral2")
return(plot_temp)
}
rad_v_sham <- "1. radiation_vs_sham.csv"
veh_v_sham <- "2. vehicle_sd4_vs_sham.csv"
exrad1_v_veh <- "3. exrad1_sd4_vs_vehicle_sd4.csv"
exrad2_v_veh <- "4. exrad2_sd4_vs_vehicle_sd4.csv"
ln <- "exrad_serum_lipidomics_NEG"
lp <- "exrad_serum_lipidomics_POS"
mn <- "exrad_serum_metabolomics_NEG"
mp <- "exrad_serum_metabolomics_POS"
jpeg(paste0(project_dir, "/ln_rad_v_sham.jpg"))
pcaplots(ln, rad_v_sham)
dev.off()
pcaplots(ln, veh_v_sham)
pcaplots(ln, exrad1_v_veh)
pcaplots(ln, exrad2_v_veh)
pcaplots(lp, rad_v_sham)
pcaplots(lp, veh_v_sham)
pcaplots(lp, exrad1_v_veh)
pcaplots(lp, exrad2_v_veh)
pcaplots(mn, rad_v_sham)
pcaplots(mn, veh_v_sham)
pcaplots(mn, exrad1_v_veh)
pcaplots(mn, exrad2_v_veh)
pcaplots(mp, rad_v_sham)
pcaplots(mp, veh_v_sham)
pcaplots(mp, exrad1_v_veh)
pcaplots(mp, exrad2_v_veh)
# Volcano -----------------------------------------------------------------------------------------
# The default cut-off for log2FC is >|2|; the default cut-off for P value is 10e-6.
stats_lp <- read_xlsx(paste0(project_dir, "/data/exrad_serum_lipidomics_POS/stats_all.xlsx"),
col_names = c("index", "name",
"p_value_x", "FDR_x", "foldchange_x", "log2FC_x",
"p_value_y", "FDR_y", "foldchange_y", "log2FC_y",
"p_value_x_x", "FDR_x_x", "foldchange_x_x", "log2FC_x_x",
"p_value_y_y", "FDR_y_y", "foldchange_y_y", "log2FC_y_y"), skip = 1)
stats_ln <- read_xlsx(paste0(project_dir, "/data/exrad_serum_lipidomics_NEG/stats_all.xlsx"),
col_names = c("index", "name",
"p_value_x", "FDR_x", "foldchange_x", "log2FC_x",
"p_value_y", "FDR_y", "foldchange_y", "log2FC_y",
"p_value_x_x", "FDR_x_x", "foldchange_x_x", "log2FC_x_x",
"p_value_y_y", "FDR_y_y", "foldchange_y_y", "log2FC_y_y"), skip = 1)
stats_mp <- read_xlsx(paste0(project_dir, "/data/exrad_serum_metabolomics_POS/stats_all.xlsx"),
col_names = c("index", "name",
"p_value_x", "FDR_x", "foldchange_x", "log2FC_x",
"p_value_y", "FDR_y", "foldchange_y", "log2FC_y",
"p_value_x_x", "FDR_x_x", "foldchange_x_x", "log2FC_x_x",
"p_value_y_y", "FDR_y_y", "foldchange_y_y", "log2FC_y_y"), skip = 1)
stats_mn <- read_xlsx(paste0(project_dir, "/data/exrad_serum_metabolomics_NEG/stats_all.xlsx"),
col_names = c("index", "name",
"p_value_x", "FDR_x", "foldchange_x", "log2FC_x",
"p_value_y", "FDR_y", "foldchange_y", "log2FC_y",
"p_value_x_x", "FDR_x_x", "foldchange_x_x", "log2FC_x_x",
"p_value_y_y", "FDR_y_y", "foldchange_y_y", "log2FC_y_y"), skip = 1)
valid_stats_lp <- stats_lp %>% left_join(valid_lp %>% dplyr::select(3, 4)
%>% dplyr::rename(chem_name=name), by = c("name" = "precursor_mz_rt"))
valid_stats_ln <- stats_ln %>% left_join(valid_ln %>% dplyr::select(3, 4)
%>% dplyr::rename(chem_name=name), by = c("name" = "precursor_mz_rt"))
valid_stats_mp <- stats_mp %>% left_join(valid_mp %>% dplyr::select(3, 4)
%>% dplyr::rename(chem_name=name), by = c("name" = "precursor_mz_rt"))
valid_stats_mn <- stats_mn %>% left_join(valid_mn %>% dplyr::select(3, 4)
%>% dplyr::rename(chem_name=name), by = c("name" = "precursor_mz_rt"))
all_stats <- bind_rows(valid_stats_lp, valid_stats_ln, valid_stats_mp, valid_stats_mn)
# Rad v sham volcano
all_stats %>% dplyr::select(2,3,6) %>%
EnhancedVolcano(lab = all_stats$chem_name,
x = 'log2FC_x',
y = "p_value_x",
title = "Radiation Verus Sham")
# Vehicle SD4 vs Sham volcano
all_stats %>% dplyr::select(2,7,10) %>%
EnhancedVolcano(lab = all_stats$name,
x = 'log2(FC).y',
y = "p.value.y",
title = "Vehicle SD4 Versus Sham")
#EXRADI SD4 Versus Vehicle SD4 volcano
all_stats %>% dplyr::select(2, 11, 14) %>%
EnhancedVolcano(lab = all_stats$chem_name,
x = 'log2FC_x_x',
y = "p_value_x_x")
#EXRADI SD4 Versus Vehicle SD4 volcano
all_stats %>% dplyr::select(2, 15, 18) %>%
EnhancedVolcano(lab = all_stats$chem_name,
x = 'log2(FC).y.y',
y = "p.value.y.y",
title = "EXRADII SD4 Versus Vehicle SD4")
# Biomarker identification ------------------------------------------------------------------------------
valid <- bind_rows(valid_ln, valid_lp, valid_mn, valid_mp)
#match validations to stats, by mz_rt
valid <- valid %>%
dplyr::select(mode, precursor_mz_rt, name) %>%
left_join(all_stats, by = c("precursor_mz_rt" = "name")) %>%
dplyr::select(1:3, 8, 12, 16, 20)
#drop column with NA
valid <- valid[-21,]
pheatmap(as.matrix(valid[,4:7]), labels_col = c("rad_v_sham", "vehicle_sd4_v_sham", "exrad1_v_vehicle", "exrad2_v_vehicle"))
pos_rad_v_sham <- filter(valid, valid[,4] > 0)
neg_rad_v_sham <- filter(valid, valid[,4] < 0)
pos_reg_rad_v_sham <- pos_rad_v_sham[,3]
neg_reg_rad_v_sham <- neg_rad_v_sham[,3]
write_tsv(pos_reg_rad_v_sham, paste0(project_dir,
"/pos_reg_rad_v_sham.tsv"))
write_tsv(neg_reg_rad_v_sham, paste0(project_dir,
"/neg_reg_rad_v_sham.tsv"))
|
df98d322ebe0daa83e64c8fa1904a53ff575c6e2
|
e286830b40628a7b920c9977f1995a340e48ea46
|
/man/prepare_categorical_force.Rd
|
9b95bedfd37de65f3b8a237d9f83f6cb14681ba2
|
[] |
no_license
|
cran/rsetse
|
48871acf46ef37e53bf5741dcd73bc467fefe7b3
|
7e412f53e2e2ca38871ec476424a32aafa0075b7
|
refs/heads/master
| 2023-05-25T16:50:57.098677
| 2021-06-11T09:00:02
| 2021-06-11T09:00:02
| 315,994,456
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,406
|
rd
|
prepare_categorical_force.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prepare_categorical_force.R
\name{prepare_categorical_force}
\alias{prepare_categorical_force}
\title{Prepare categorical features for embedding}
\usage{
prepare_categorical_force(g, node_names, force_var, sum_to_one = TRUE)
}
\arguments{
\item{g}{an igraph object}
\item{node_names}{a character string. A vertex attribute which contains the node names.}
\item{force_var}{A vector of force attributes. This describes all the categorical force attributes of the network.
All named attributes must be either character or factor attributes.}
\item{sum_to_one}{Logical. whether the total positive force sums to 1, if FALSE the total is the sum of the positive cases}
}
\value{
A network with the correct node attributes for the embeddings process.
}
\description{
This function prepares a binary network for SETSe projection.
}
\details{
The network takes in an igraph object and produces an undirected igraph object that can be used with the embedding functions.
The purpose of the function is to easily be able to project categorical features using SETSe. The function creates new variables
where each variable represents one level of the categorical variables. For embedding only n-1 of the levels are needed.
The function creates several variables of the format "force_". Vertex attributes representing the force produced by each node
for each categorical value, there will be n of these variables representing each level of the categorical values. The variable names
will be the the name of the variable and the name of the level seperated by and underscore. For example, with a variable group and levels A and B, the created force variables will be
"group_A" and "group_B" The sum of these variables will be 0.
}
\examples{
set.seed(234) #set the random see for generating the network
g <- generate_peels_network(type = "E")
embeddings <- g \%>\%
prepare_edges(k = 500, distance = 1) \%>\%
#prepare the network for a binary embedding
prepare_categorical_force(., node_names = "name",
force_var = "class") \%>\%
#embed the network using auto_setse
setse_auto(., force = "class_A")
}
\seealso{
\link{setse}, \link{setse_auto}, \link{setse_bicomp}, \link{setse_auto_hd}
Other prepare_setse:
\code{\link{prepare_continuous_force}()},
\code{\link{prepare_edges}()}
}
\concept{prepare_setse}
|
9adeeb378b67638e8514ca558c08fa79461d7856
|
5b425ec9d1b4f535330c3c0d39b86273f00e079b
|
/Data_Preprocessing/R/data_preprocessing.R
|
4ebcffe8403d68abeb15eb32821ae4a55fd98d9e
|
[] |
no_license
|
VisualTornado/ML
|
4807ec680457fbfaf7a0ee9ce7f8527f7efab6c0
|
b3596bc232f504707a451845a6d888556f30d3f5
|
refs/heads/master
| 2021-10-16T10:14:13.061232
| 2019-02-10T11:30:50
| 2019-02-10T11:30:50
| 104,111,691
| 2
| 0
| null | 2017-10-04T18:44:27
| 2017-09-19T18:21:27
|
Python
|
UTF-8
|
R
| false
| false
| 1,867
|
r
|
data_preprocessing.R
|
#!/usr/bin/env Rscript
#-----Import the Dataset-----#
getwd() # check current working directory
dataset <- read.csv('Data.csv')
#(dataset)
# Taking care of missing values
#----Calulate mean of the col seprately and apply a function using this mean including the missing values----#
dataset$Age <- ifelse(is.na(dataset$Age), ave(dataset$Age, FUN = function(x) mean(x, na.rm = TRUE)), dataset$Age)
dataset$Salary <- ifelse(is.na(dataset$Salary), ave(dataset$Salary, FUN = function(x) mean(x, na.rm = TRUE)), dataset$Salary)
#(dataset)
# Encoding categorical data
dataset$Country <- factor(dataset$Country, levels = c('France', 'Germany', 'Spain'), labels = c(1, 2, 3)) # Assigning factors to categorical text data using labels
dataset$Purchase <- factor(dataset$Purchase, levels = c('No', 'Yes'), labels = c(0, 1)) # Assigning factors to categorical text data using labels
(dataset)
# Splitting Data in Traning and Test sets
#install.packages('caTools')
library(caTools)
set.seed('123')
split <- sample.split(dataset$Purchased, SplitRatio = 0.8) # Dependent Variable, training set size, o/p is TRUE FALSE around which data is split.
training_set <- subset(dataset, split == TRUE)
test_set <- subset(dataset, split == FALSE)
#(training_set)
#(test_set)
# Scaling
#training_set <- scale(training_set) ==> These 2 would fail as in R factors are not numbers and for scaling we need only numeric values so col 1 and 4 are text/string.
#test_set <- scale(test_set)
training_set[,2:3] <- scale(training_set[,2:3])
test_set[,2:3] <- scale(test_set[,2:3])
(training_set)
(test_set)
""" NOTE: Dummy variables like binary values of country(Encoding categorical data) may or may not be NORMALIZED depending on the senario.
Standardization = [(x-mean(x))/sd(x)]
Normalization = [(x-min(x))/(max(x) - min(x))]"""
|
953c536852cd40c9ec013ddf41602f27ac704294
|
2efd08b36b4d7a60f94617dee81668235d589651
|
/practical_machine_learning/old/regression_analysis.R
|
233ea6b9ea471a5eef44a7462e17c1d6201881de
|
[] |
no_license
|
agawronski/datasciencecoursera
|
cebacf9abf4c73ecb69099f686d449da23de08a3
|
c151a7dcd2e494ba21b8235b2f5919eb670c614a
|
refs/heads/master
| 2021-01-18T16:35:36.933648
| 2015-08-23T23:23:29
| 2015-08-23T23:23:29
| 22,187,047
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 630
|
r
|
regression_analysis.R
|
library(caret)
data(faithful)
set.seed(333)
inTrain <- createDataPartition(y=faithful$waiting, p=0.5, list=FALSE)
trainFaith <- faithful[inTrain,]
testFaith <- faithful[-inTrain,]
head(trainFaith)
plot(trainFaith$eruptions, trainFaith$waiting, pch=19, col="blue", xlab="waiting", ylab="duration")
lm1 <- lm(eruptions ~ waiting, data=trainFaith)
summary(lm1)
plot(trainFaith$waiting, trainFaith$eruptions, pch=19, col="blue", xlab="waiting", ylab="duration")
lines(trainFaith$waiting, lm1$fitted, lwd=3)
#predict a new value
coef(lm1)[1] + coef(lm1)[2]*80
#or like this
newdata <- data.frame(waiting=80)
predict(lm1, newdata)
|
af8a5416e089c976500fe3f1c2415d1bde4c5a36
|
f0e3db3a735307418bb67686e5c29be14e869f04
|
/man/theme_timeline.Rd
|
966627a30f14fdb0b17fa31cc41e56a19856f92e
|
[
"MIT"
] |
permissive
|
sbudiman/MasterRCap
|
e5b83dfa2ee98f73c9b9a1477119bd6a800447a7
|
89ee1d6a6253c49568e6e7b635c2e93dd7e53f61
|
refs/heads/master
| 2020-03-19T19:53:42.463842
| 2018-06-25T00:45:33
| 2018-06-25T00:45:33
| 136,878,185
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 947
|
rd
|
theme_timeline.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GeomDefn.R
\name{theme_timeline}
\alias{theme_timeline}
\title{Theme for better timeline visualization in ggplot2}
\usage{
theme_timeline()
}
\description{
This theme function provides better rendering for
\code{\link{geom_timeline}}
}
\examples{
\dontrun{readr::read_delim(file=file.path("data","results"), delim='\\t') \%>\%
eq_clean_data() \%>\%
dplyr::filter(COUNTRY == c("CHINA","JAPAN"), YEAR >= 2000) \%>\%
ggplot2::ggplot(ggplot2::aes(x = DATE,
y = COUNTRY,
color = as.numeric(TOTAL_DEATHS),
size = as.numeric(EQ_PRIMARY))) +
geom_timeline() +
geom_timeline_label(aes(label = LOCATION_NAME),n_max=3) +
theme_timeline() +
labs(size = "Earthquake Magnitude", color = "Fatalities")}
}
|
6f0634e49f60ec36010c38252972123970d094b9
|
8f27d89b8662758f6ab67ae82da4e93af40cce11
|
/R/simplices.r
|
3fb188b040b26319bbf19e662c0c66e4846ef82a
|
[] |
no_license
|
corybrunson/sc
|
4dd49909f7b7b6c6c29d0edd7be81599b2fed60a
|
c1d3e74d57a5e35e3a29e6801c1c8113292f0447
|
refs/heads/master
| 2020-04-23T11:50:53.289563
| 2019-08-26T19:51:40
| 2019-08-26T19:51:40
| 171,149,725
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,367
|
r
|
simplices.r
|
#' @title Vertices and simplices af a simplicial complex object
#'
#' @description These functions return the vertices and the simplices of an
#' object of class 'sc'.
#'
#' @details
#'
#' **sc** encodes simplicial complexes as bipartite [igraph::igraph] objects
#' prefixed with a new 'sc' class, whose nodes are partitioned into those
#' encoding the _vertices_ versus defining _simplices_ of the complex. The
#' simplices are taken to be these defining simplicies together with all of
#' their _faces_ (sub-simplices).
#'
#' The `"type"` attribute takes the values `FALSE` on the vertex nodes and
#' `TRUE` on the simplex nodes. The functions `sc_vertices()` and
#' `sc_simplices()` return the nodes of the 'sc' object corresponding to the
#' vertices and to the simplices, respectively. To obtain the vertices in a
#' specific set of simplices or the simplices containing a specific set of
#' vertices, pass the indices of these simplices or vertices to the
#' corresponding arguments.
#'
#' @name simplices
#' @import igraph
#' @param sc Bipartite [igraph::igraph] object, representing a simplicial
#' complex.
#' @param vertices,simplices Numeric vertex IDs or character names of nodes of
#' `sc` whose incident nodes are desired (if any). Defaults to `NULL`.
#' @param ... Integer, character, or 'igraph.vs' used to identify nodes of `sc`
#' corresponding to vertices or simplices, as appropriate.
#' @example inst/examples/ex-wiki.r
NULL
#' @rdname simplices
#' @export
sc_vertices <- function(sc, simplices = NULL) {
if (is.null(simplices)) return(V(sc)[!V(sc)$type])
if (is.numeric(simplices) & any(V(sc)[simplices]$type == FALSE)) {
simplices <- simplices + length(which(V(sc)$type == FALSE))
}
stopifnot(all(V(sc)[simplices]$type == TRUE))
lapply(simplices, function(precluster) {
V(sc)[setdiff(neighborhood(graph = sc, order = 1, nodes = precluster)[[1]],
V(sc)[precluster])]
})
}
#' @rdname simplices
#' @export
sc_simplices <- function(sc, vertices = NULL) {
if (is.null(vertices)) return(V(sc)[V(sc)$type])
stopifnot(all(V(sc)[vertices]$type == FALSE))
lapply(vertices, function(actor) {
V(sc)[setdiff(neighborhood(graph = sc, order = 1, nodes = actor)[[1]],
V(sc)[actor])]
})
}
#' @rdname simplices
#' @export
sc_faces <- sc_simplices
sc_containing_simplices <- function(sc, ...) {
vertices <- unlist(list(...))
vertices <- V(sc)[vertices]
if (any(vertices$type)) stop("Some indices correspond to simplex nodes.")
Reduce(intersect, neighborhood(sc, 1, vertices))
}
#' @rdname simplices
#' @export
sc_has_simplex <- function(sc, ...) {
ss <- sc_containing_simplices(sc, ...)
length(ss) > 0
}
#' @rdname simplices
#' @export
sc_simplex <- function(sc, ...) {
vs <- V(sc)[unlist(list(...))]
ss <- V(sc)[sc_containing_simplices(sc, vs)]
if (length(ss) == 0) return(NULL)
s <- ss[1]
attr(s, "face") <- match(vs, neighborhood(sc, 1, s, mindist = 1)[[1]])
s
}
#' @rdname simplices
#' @export
sc_intersection <- function(sc, ...) {
simplices <- unlist(list(...))
simplices <- V(sc)[simplices]
if (any(! simplices$type)) stop("Some indices correspond to vertex nodes.")
Reduce(intersect, neighborhood(sc, 1, simplices))
}
#' @rdname simplices
#' @export
sc_nearness <- function(sc, ...) {
int <- sc_intersection(sc, ...)
length(int) - 1
}
|
61675ddc51944e5375a792e0b228ef99fa7e203b
|
37794cfdab196879e67c3826bae27d44dc86d7f7
|
/Math/Poly.System.Symmetric.S3.Multiple.R
|
b3587bf16cdfafdd9d4406b5de818a5432bb0280
|
[] |
no_license
|
discoleo/R
|
0bbd53a54af392ef53a6e24af85cec4f21133d17
|
e9db8008fb66fb4e6e17ff6f301babde0b2fc1ff
|
refs/heads/master
| 2023-09-05T00:43:32.381031
| 2023-08-31T23:03:27
| 2023-08-31T23:03:27
| 213,750,865
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,791
|
r
|
Poly.System.Symmetric.S3.Multiple.R
|
########################
###
### Leonard Mada
### [the one and only]
###
### Polynomial Systems:
### Symmetric S3:
### Multiple/Composite Monoms
###
### draft v.0.1d
### Heterogenous Symmetric
### Polynomial Systems: 3 Variables
### Symmetric with Composite Monoms
### Example:
# (x*y)^n + (x*z)^n + (y*z)^n = R1
# Note:
# Heterosymmetric & Mixt Systems:
# - are discussed separately, see:
# Poly.Heterosymmetric.S3.R &
# Poly.Heterosymmetric.S3.Mixt.R
###############
###############
### History ###
###############
### draft v.0.1d:
# - Order 2 variant:
# Eq 2: x^2 + y^2 + z^2 = R2;
### draft v.0.1c:
# - basic examples of M1 & D1 extensions;
### draft v.0.1b-name:
# - renamed extensions:
# M1 & D1: [new] modified Eq 1;
# M2 / D2, M3 / D3: the previous M1/D1 & M2/D2;
### draft v.0.1b:
# - some solutions to the system of order 3:
# [with the non-trivial M2D3 extension (extension renamed)]
# (x*y)^3 + (x*z)^3 + (y*z)^3 = R1;
### draft v.0.1a:
# - solutions to the system of order 2:
# (x*y)^2 + (x*z)^2 + (y*z)^2 = R1;
####################
####################
### helper functions
library(polynom)
library(pracma)
# the functions are in the file:
# Polynomials.Helper.R
solve.S = function(S, R, b=0) {
# generic solver (based on existing S)
b2 = if(length(b) > 1) b[2] else 0; # Ext A2;
b3 = if(length(b) > 2) b[3] else 0; # Ext A3;
x = sapply(S, function(x) roots(c(1, -x, R[2] - b2*x, - R[3] + b3*x)))
len = length(S)
S = matrix(S, ncol=len, nrow=3, byrow=T)
yz = R[3]/x - b3
yz.s = S - x
# TODO: robust (when necessary)
yz.d = sqrt(yz.s^2 - 4*yz)
y = (yz.s + yz.d) / 2
z = yz.s - y
cbind(as.vector(x), as.vector(y), as.vector(z))
}
#############################
#############################
### Simple Multiple Terms ###
### (x*y)^n + (x*z)^n + (y*z)^n
### Solution:
# - decomposition into elementary polynomials;
##################
### Extensions ###
### Type A:
# A1: (x*y)^n + (x*z)^n + (y*z)^n + b1*(x+y+z) = R1
# A2: x*y + y*z + z*x + b2*(x+y+z) = R2
# A3: x*y*z + b3*(x+y+z) = R3
### Types Other:
# M1: ((x*y)^n + (x*z)^n + (y*z)^n)*(x+y+z) = R1
# M2: (x*y + y*z + z*x)*(x+y+z) = R2
# M3: x*y*z*(x+y+z) = R3
# D1: ((x*y)^n + (x*z)^n + (y*z)^n)/(x+y+z) = R1
# D2: (x*y + y*z + z*x)/(x+y+z) = R2
# D3: x*y*z/(x+y+z) = R3
################################
##############
### Simple ###
### n = 2 ###
### (x*y)^2 + (x*z)^2 + (y*z)^2 = R1
# [trivial: P3]
### Order 2: n = 2
(x*y)^2 + (x*z)^2 + (y*z)^2 - R1 # = 0
x*y + y*z + z*x - R2 # = 0
x*y*z - R3 # = 0
### Solution:
# [not run]
R2^2 - 2*R3*S - R1 # = 0
S = 1/2 * (R[2]^2 - R[1]) / R[3]
### Solver
solve.sym3 = function(R, b=0) {
S = 1/2 * (R[2]^2 - R[1]) / R[3]
print(S)
solve.S(S, R, b)
}
### Examples:
R = c(1, 2, -1);
sol = solve.sym3(R)
x = sol[,1]; y = sol[,2]; z = sol[,3];
### Test
x^2*y^2 + y^2*z^2 + z^2*x^2 # - R[1] # = 0
x*y + y*z + z*x # - R[2] # = 0
x*y*z # - R[3] # = 0
poly.calc(x)
err = 1 + 2*x + 1.5*x^2 + x^3
round0(err)
##################
### Extensions ###
###########
### Type A:
# TODO
############
### Type M1:
((x*y)^2 + (x*z)^2 + (y*z)^2) * (x+y+z) - R1 # = 0
### Solution:
(E2^2 - 2*E3*S)*S - R1 # = 0
2*E3*S^2 - E2^2*S + R1 # = 0
2*R[3]*S^2 - R[2]^2*S + R[1] # = 0
### Example:
R = c(1, 2, 1)
S = roots(c(2*R[3], - R[2]^2, R[1]))
sol = solve.S(S, R)
x = sol[,1]; y = sol[,2]; z = sol[,3];
### Test
(x^2*y^2 + y^2*z^2 + z^2*x^2)*(x+y+z) # - R[1] # = 0
x*y + y*z + z*x # - R[2] # = 0
x*y*z # - R[3] # = 0
############
### Type D1:
((x*y)^2 + (x*z)^2 + (y*z)^2) / (x+y+z) - R1 # = 0
### Solution:
(E2^2 - 2*E3*S)/S - R1 # = 0
2*E3*S + R1*S - E2^2 # = 0
(2*R3+R1)*S - R2^2 # = 0
S = R[2]^2 / (2*R[3] + R[1])
### Example:
R = c(1, 2, 1)
S = R[2]^2 / (2*R[3] + R[1])
sol = solve.S(S, R)
x = sol[,1]; y = sol[,2]; z = sol[,3];
### Test
(x^2*y^2 + y^2*z^2 + z^2*x^2)/(x+y+z) # - R[1] # = 0
x*y + y*z + z*x # - R[2] # = 0
x*y*z # - R[3] # = 0
############
### Type M2:
(x*y + y*z + z*x)*(x+y+z) - R2 # = 0
### Solution:
# E2 = R2 / S
R2^2 / S^2 - 2*R3*S - R1 # = 0
2*R[3]*S^3 + R[1]*S^2 - R[2]^2 # = 0
#################
### Type M2 & M3:
(x*y + y*z + z*x)*(x+y+z) - R2 # = 0
x*y*z*(x+y+z) - R3 # = 0
### Solution:
# E2 = R2 / S
# E3 = R3 / S
R2^2 / S^2 - 2*R3 - R1 # = 0
(2*R3 + R1)*S^2 - R2^2 # = 0
######################
### Type M1 & M2 & M3:
((x*y)^2 + (x*z)^2 + (y*z)^2) / (x+y+z) - R1 # = 0
(x*y + y*z + z*x)*(x+y+z) - R2 # = 0
x*y*z*(x+y+z) - R3 # = 0
### Solution:
(2*R3 + R1/S)*S^2 - R2^2 # = 0
(2*R3*S + R1)*S - R2^2 # = 0
2*R[3]*S^2 + R[1]*S - R[2]^2 # = 0
#################
### Type M2 & D3:
(x*y + y*z + z*x)*(x+y+z) - R2 # = 0
x*y*z / (x+y+z) - R3 # = 0
### Solution:
# E2 = R2 / S
# E3 = R3 * S
R2^2 / S^2 - 2*R3*S^2 - R1 # = 0
2*R3*S^4 + R1*S^2 - R2^2 # = 0
#################
### Type D2 & D3:
(x*y + y*z + z*x) / (x+y+z) - R2 # = 0
x*y*z / (x+y+z) - R3 # = 0
### Solution:
# E2 = R2 * S
# E3 = R3 * S
(R2^2 - 2*R3)*S^2 - R1 # = 0
##########
### Solver
solve.sym3 = function(R, b=0) {
# TODO: all types!
coeff = c(2*R[3], R[1], 0, - R[2]^2)
len = length(coeff) - 1
S = roots(coeff)
print(S)
b2 = if(length(b) > 1) b[2] else 0; # TODO: Ext 2;
x = sapply(S, function(x) roots(c(1,-x, R[2] / x - b2, -R[3])))
S = matrix(S, ncol=len, nrow=3, byrow=T)
yz = R[3]/x
yz.s = S - x
# TODO: robust
yz.d = sqrt(yz.s^2 - 4*yz)
y = (yz.s + yz.d) / 2
z = yz.s - y
cbind(as.vector(x), as.vector(y), as.vector(z))
}
### Examples:
R = c(1, 2, -1);
sol = solve.sym3(R)
x = sol[,1]; y = sol[,2]; z = sol[,3];
### Test
x^2*y^2 + y^2*z^2 + z^2*x^2 # - R[1] # = 0
(x*y + y*z + z*x)*(x+y+z) # - R[2] # = 0
x*y*z # - R[3] # = 0
poly.calc(x)
err = 1 - 1.5*x^2 + 5*x^3 + 0.5*x^4 - 4*x^5 + 11*x^6 - 0.5*x^8 + x^9
round0(err)
################
### Variants ###
# (x*y)^2 + (x*z)^2 + (y*z)^2 = R1
# x^2 + y^2 + z^2 = R2
# x*y*z = R3
### Solution:
# [not run]
### Eq 2 =>
x^2 + y^2 + z^2 - R2 # = 0
S^2 - 2*E2 - R2 # = 0
# 2*E2 = S^2 - R2
# =>
4*E2^2 - 8*E3*S - 4*R1 # = 0
(S^2 - R2)^2 - 8*R3*S - 4*R1 # = 0
S^4 - 2*R2*S^2 - 8*R3*S + R2^2 - 4*R1 # = 0
### Solution:
solve.SymM2.S2MP2 = function(R, b=0) {
coeff = c(1, 0, - 2*R[2], - 8*R[3], R[2]^2 - 4*R[1])
# Extensions:
b1 = if(length(b) > 0) b[1] else 0;
b2 = if(length(b) > 1) b[2] else 0;
b3 = if(length(b) > 2) b[3] else 0;
if(b1 != 0) coeff = coeff + c(0, 0, 0, 4*b1, 0)
if(b2 != 0) coeff = coeff + c(0, 2*b2, b2^2, -2*b2*R[2], 0)
if(b3 != 0) coeff = coeff + c(0, 0, 8*b3, 0, 0)
S = roots(coeff)
#
E2 = (S^2 - R[2] + b2*S) / 2
E3 = R[3] - b3*S;
len = length(S);
x = sapply(seq(len), function(id) roots(c(1, -S[id], E2[id], -E3[id])))
S = matrix(S, ncol=len, nrow=3, byrow=T)
E2 = matrix(E2, ncol=len, nrow=3, byrow=T)
E3 = matrix(E3, ncol=len, nrow=3, byrow=T)
yz.s = S - x;
yz = E2 - x*yz.s;
yz.d = sqrt(yz.s^2 - 4*yz) # TODO: add also negative values;
y = (yz.s + yz.d) / 2;
z = (yz.s - yz.d) / 2;
return(cbind(x=as.vector(x), y=as.vector(y), z=as.vector(z)))
}
### Examples:
R = c(1,3,2)
sol = solve.SymM2.S2MP2(R)
x = sol[,1]; y = sol[,2]; z = sol[,3]
### Test:
(x*y)^2 + (x*z)^2 + (y*z)^2 # - R1
x^2 + y^2 + z^2 # - R2
x*y*z # - R3
round0.p(poly.calc(x[1:3]^2))
### Ex 2:
R = c(0,2,1)
sol = solve.SymM2.S2MP2(R)
x = sol[,1]; y = sol[,2]; z = sol[,3]
### Test:
(x*y)^2 + (x*z)^2 + (y*z)^2 # - R1
x^2 + y^2 + z^2 # - R2
x*y*z # - R3
### Extensions:
# Classical Polynomial: P12
### Ext A1:
R = c(1,3,2)
b = 1
sol = solve.SymM2.S2MP2(R, b=b)
x = sol[,1]; y = sol[,2]; z = sol[,3]
### Test:
(x*y)^2 + (x*z)^2 + (y*z)^2 + b[1]*(x+y+z) # - R1
x^2 + y^2 + z^2 # - R2
x*y*z # - R3
round0.p(poly.calc(x))
### Ext A2:
R = c(1,3,2)
b = c(1, -1)
sol = solve.SymM2.S2MP2(R, b=b)
x = sol[,1]; y = sol[,2]; z = sol[,3]
### Test:
(x*y)^2 + (x*z)^2 + (y*z)^2 + b[1]*(x+y+z) # - R1
x^2 + y^2 + z^2 + b[2]*(x+y+z) # - R2
x*y*z # - R3
round0.p(poly.calc(x))
### Ext A2:
R = c(1,3,2)
b = c(1, -1, -2)
sol = solve.SymM2.S2MP2(R, b=b)
x = sol[,1]; y = sol[,2]; z = sol[,3]
### Test:
(x*y)^2 + (x*z)^2 + (y*z)^2 + b[1]*(x+y+z) # - R1
x^2 + y^2 + z^2 + b[2]*(x+y+z) # - R2
x*y*z + b[3]*(x+y+z) # - R3
round0.p(poly.calc(x))
#######################
#######################
##############
### Simple ###
### n = 3 ###
### (x*y)^3 + (x*z)^3 + (y*z)^3 = R1
# [trivial: P3]
### Order 2: n = 2
(x*y)^3 + (x*z)^3 + (y*z)^3 - R1 # = 0
x*y + y*z + z*x - R2 # = 0
x*y*z - R3 # = 0
### Solution:
# [not run]
R2^3 - 3*R3*(R2*S - R3) - R1 # = 0
S = (R2^3 + 3*R3^2 - R1) / (3*R2*R3)
#################
### Type M2 & D3:
(x*y + y*z + z*x)*(x+y+z) - R2 # = 0
x*y*z / (x+y+z) - R3 # = 0
### Solution:
# E2 = R2 / S
# E3 = R3 * S
R2^3 / S^3 - 3*R3*S*(R2 - R3*S) - R1 # = 0
R2^3 - 3*R3*S^4*(R2 - R3*S) - R1*S^3 # = 0
3*R3^2*S^5 - 3*R2*R3*S^4 - R1*S^3 + R2^3 # = 0
##########
### Solver
solve.sym3 = function(R, b=0) {
# TODO: all types!
coeff = c(3*R[3]^2, - 3*R[2]*R[3], - R[1], 0, 0, R[2]^3)
len = length(coeff) - 1
S = roots(coeff)
print(S)
b2 = if(length(b) > 1) b[2] else 0; # TODO: Ext 2;
x = sapply(S, function(x) roots(c(1,-x, R[2] / x - b2, -R[3] * x)))
S = matrix(S, ncol=len, nrow=3, byrow=T)
yz = R[3] * S/x
yz.s = S - x
# TODO: robust
yz.d = sqrt(yz.s^2 - 4*yz)
y = (yz.s + yz.d) / 2
z = yz.s - y
cbind(as.vector(x), as.vector(y), as.vector(z))
}
test.sym3 = function(x, y, z, b=0, n=3, R, type="M2D3") {
err1 = x^n*y^n + y^n*z^n + z^n*x^n # - R[1] # = 0
err2 = (x*y + y*z + z*x) # - R[2] # = 0
err3 = x*y*z # - R[3] # = 0
S = x+y+z;
if( ! is.na(pmatch("M2", type))) {
err2 = err2 * S
} else if( ! is.na(pmatch("D2", type))) {
err2 = err2 / S
}
if( ! is.na(pmatch("M3", type))) {
err3 = err3 * S
} else if( ! is.na(pmatch("D3", type))) {
err3 = err3 / S
}
# TODO: b, R;
round0(rbind(err1, err2, err2))
}
### Examples:
### Ex 1:
R = c(3, 3, -1);
sol = solve.sym3(R)
x = sol[,1]; y = sol[,2]; z = sol[,3];
### Test
test.sym3(x, y, z, n=3)
poly.calc(x)
err = -9 - 9*x^2 - 3*x^3 - 18*x^4 + 24*x^5 - 9*x^6 - 30*x^7 + 45*x^8 - 25*x^9 +
- 15*x^10 + 38*x^11 + 12*x^12 - x^13 + 3*x^14 + x^15
round0(err)
### Ex 2:
R = c(0, 3, -1);
sol = solve.sym3(R)
x = sol[,1]; y = sol[,2]; z = sol[,3];
### Test
test.sym3(x, y, z, n=3)
poly.calc(x)
err = -9 - 9*x^2 - 9*x^4 + 18*x^5 + 9*x^6 - 27*x^7 + 9*x^8 - 27*x^9 +
- 6*x^10 + 36*x^11 + 12*x^12 + 3*x^14 + x^15
round0(err)
|
e5290fddef17e2774bf577fb30e7e91b80ac190d
|
e4c1422348ae1cd4aa316aad156fefe59670e776
|
/pkgs/abrem/man/abrem.conf.Rd
|
c986b8e85bcac4706e0eb92d8645dfc5ebca7b0b
|
[] |
no_license
|
thomas4912/pkgs
|
161170c78068340a82ddde7535293f812cc77a73
|
8f060661cca439e13b16990bcd2268cc96aac4b3
|
refs/heads/master
| 2021-01-22T07:27:29.068120
| 2017-02-13T12:17:52
| 2017-02-13T12:17:52
| 81,818,568
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,131
|
rd
|
abrem.conf.Rd
|
\name{abrem.conf}
\alias{abrem.conf}
\alias{contours}
\alias{MLE contours}
\alias{cl}
\alias{unrel.n}
\alias{conf.what}
\alias{method.conf.blives}
\alias{conf.blives.sides}
\alias{S}
\alias{in.legend}
\title{Add Confidence to \code{abrem} Objects}
\description{
This function adds confidence calculations to
various entities in \code{abrem} objects.
}
\usage{abrem.conf(x,which="all",\dots)}
\arguments{
\item{x}{Object of class \code{"abrem"}.}
\item{which}{Calculate which fit in the \code{abrem} object will be processed.}
\item{\dots}{Options for calculating confidence, and for plotting the results.}
}
\details{
This function adds confidence calculations to various entities in
\code{abrem} objects and adds them to the object alongside any pre-existing
confidence calculations.
Additional options for calculating B-life confidence are passed with:
\describe{
\item{\code{cl}}{
Confidence level: A single number from the interval \code{[0,[1}
specifying the confidence level for various confidence calculations.
Defaults to \code{0.9}.
}
\item{\code{conf.blives.sides}}{
Either \code{"lower"}, \code{"upper"} or \code{"double"},
specifying the type of bound(s) to be calculated.
Defaults to \code{c("double")}, the other options are currently
not implemented.
}
\item{\code{unrel.n}}{
An integer controlling the amount of unreliability levels for
which B-life confidence bounds are calculated and ultimately plotted.
Higher numbers will result in smoother confidence bounds. In any
case, confidence intervals will be calculated for:
\itemize{
\item the B-lives at unreliability levels specified with option \code{\link{unrel}}
\item the B-life at \code{50 [\%]} unreliability
\item the B-life at the calculcate characteristic life
or logmean (depending on the fitted distribution)
}
Note: When plotting fits and confidence bounds that are adjusted with
a threshold (see option \code{"threshold"}), it is often the case that
the bounds appear to be cut of on the left. This can be countered by
dramatically increasing \code{unrel.n}, resulting in confidence
bounds that extend to the edge of the plotting area.
Defaults to \code{25}.
}
\item{\code{conf.what}}{
A vector of class \code{"character"} describing for which entities
that confidence should be calculated.
Defaults to \code{c("blives")}, the only type currently supported.
}
\item{\code{unrel}}{
An unordered numeric vector with unreliability levels for which
B-life confidence will be calculated.
Defaults to \code{c(0.1,0.05,0.01)}.
}
\item{\code{method.conf.blives}}{
A vector of class \code{"character"} describing the technique to be
used for calculating confidence for B-lives. Possible values are
\code{"bbb"} (Beta Binomial confidence bounds),
\code{"lrb"} (Likelihood Ratio confidence bounds) and
\code{"mcpivotals"} or \code{"mcpivotal"} (Monte Carlo Pivotal
confidence bounds).
Monte Carlo Pivotal confidence bounds use a large number of
simulations to calculate the confidence bounds. See option
\code{"S"} for more info.
Defaults to \code{c("mcpivotals")}.
}
\item{\code{S}}{
An integer describing the number of Monte Carlo simulations on
which the Monte Carlo pivotal confidence bounds and calculation
of the "prr" goodness-of-fit indicator are based.
High values are needed for good confidence bounds at the lower
end of the fitted model, especially for data with heavy censoring.
Note that \code{S >= 100} and that \code{S} must be divisible by 10.
Defaults to \code{10000}.
}
\item{\code{in.legend}}{
Logical value controlling the inclusion of confidence calculation
results in the legend.
If \code{in.legend=FALSE} is passed ,
the resulting confidence calculations will be omitted from the legend.
Defaults to \code{TRUE}.
}
}
Additionally, one can pass any options available from \code{options.abrem},
such as \code{col} or \code{is.plot.legend}. The graphical options
will be used when plotting the (life-)time observations using \code{plot.abrem}.
}
\value{
The function returns its argument \code{x}, extended with the confidence
calculations and any optional graphical and calculation arguments
as passed to the function.
}
\author{Jurgen Symynck \email{jusy@openreliability.org}}
\note{
\itemize{
\item Currently, only \code{which = "all"} is supported, meaning that a
call to \code{abrem.conf} attempts calculation of confidence for all
fits in the \code{abrem} object.
\item Currently, only \code{conf.what = "blives"} and
\code{conf.blives.sides = "double"} are supported.
}
}
\section{Warning}{
Currently, the Monte Carlo
pivotal confidence bounds are only identical to superSMITH's
MC pivotal bounds for complete, uncensored data. For heavily censored
datasets with few failures, the bounds appear more optimistic than
superSMITH's bounds. Research on this issue is ongoing.
}
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
\code{\link{options.abrem}},
\code{\link{contour.abrem}}
}
\examples{
## full dataset ##
da1 <- Abrem(runif(10,100,1e4),label="Complete data")
da1 <- abrem.fit(da1)
da1 <- abrem.conf(da1,method.conf.blives="mcpivotals",col="red")
da1 <- abrem.conf(da1,method.conf.blives="bbb",col="orange")
da1 <- abrem.conf(da1,method.conf.blives="lrb",col="yellow3")
print(da1$fit[[1]]$conf$blives[[1]])
plot(da1,main="Comparison between MC Pivotal bounds and BB Bounds")
## censored dataset: generates a warning for MC Pivotal confidence bounds ##
da2 <- runif(8,100,1e4)
da2 <- Abrem(fail=da2,susp=rep(max(da2),2),label="Type II censored data")
# generate a 'type 2' censored dataset
da2 <- abrem.fit(da2)
da2 <- abrem.conf(da2,method.conf.blives="mcpivotals",col="blue1")
da2 <- abrem.conf(da2,method.conf.blives="bbb",col="steelblue")
da2 <- abrem.conf(da2,method.conf.blives="lrb",col="cyan3")
plot(da2,main="Comparison between different bound types.")
## show variability in Monte Carlo Pivotal bounds with low S ##
da3 <- Abrem(rweibull(5,3,1000))
da3 <- abrem.fit(da3)
for(i in 1:20) da3 <- abrem.conf(da3,S=1000,lwd=1,col="red")
# just keep adding bounds to the abrem object...
plot(da3,is.plot.legend=FALSE,
main="Variability in MC Pivotal Conf. Bounds for S=1000")
}
|
4c0b97507d1417e7523c055b32e43a659e03b7a1
|
2ed8c70117c8b37dd8e0bbb312cb74a85c41d86b
|
/CovidandMarkets/packages.R
|
c01f44a2845f7f72888d0bc6aed35be12e3868ff
|
[] |
no_license
|
mthnguyener/CovidandMarkets
|
45f0a02281e0e3c34340895dadfa731af2ec5f1a
|
9b88c9df5cae964776a19827bf0da864b41ebb04
|
refs/heads/master
| 2022-12-02T21:37:52.560366
| 2020-08-04T22:57:03
| 2020-08-04T22:57:03
| 278,161,442
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 868
|
r
|
packages.R
|
library(tidyverse)
library(dplyr)
library(purrr)
library(readr)
library(ggplot2)
library(leaps)
library(lmtest)
library(randomForest)
library(glmnet)
library(MASS)
library(car)
library(ISLR)
library(perturb)
library(tree)
library(caret)
library(stringr)
library(lubridate)
library(tidytext)
library(scales)
library(readxl)
library(shiny)
library(readxl)
library(broom)
library(reshape)
library(leaflet)
library(plotly)
library(devtools)
library(usethis)
library(keyring)
library(alphavantager)
library(twitteR)
# John Hopkins COVID19 DATA:
# https://github.com/CSSEGISandData/COVID-19
# https://github.com/CSSEGISandData/COVID-19.git
key_set("key")
key_set("secret")
key_set("atoken")
key_set("asecret")
key <- key_get("key")
secret <- key_get("secret")
atoken <- key_get("atoken")
asecret <- key_get("asecret")
setup_twitter_oauth(key, secret, atoken, asecret)
|
9ab925603fb282e65fc79ce316131d0dfa9d9140
|
7c2e136a2ce427597843354ef1bab5797f8ebbc2
|
/GC_utils.R
|
63fdb9a0cd5e1f5bb4c32999e3ace42bc94bd88f
|
[] |
no_license
|
shulp2211/InfiniteSites
|
4d6515562f70b20c474976b4e39a7eee2d171ddb
|
17514d03786ad3237260b823c31cef37f9493b7a
|
refs/heads/master
| 2023-06-24T00:12:20.609385
| 2021-07-22T09:21:14
| 2021-07-22T09:21:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,131
|
r
|
GC_utils.R
|
## wrapper for pbetabinomial ~ binom.test
betabinom.test <- function (q, size, prob, rho, alternative = c("two.sided", "less", "greater"))
{
# DNAME <- deparse(substitute(q))
# qr <- round(q)
# if (any(is.na(q) | (q < 0)) || max(abs(q - qr)) > 1e-07)
# stop("'q' must be nonnegative and integer")
# q <- qr
# if (length(q) == 2L) {
# size <- sum(q)
# q <- q[1L]
# }
# else if (length(q) == 1L) {
# sizer <- round(size)
# if ((length(size) > 1L) || is.na(size) || (size < 1) || abs(size - sizer) > 1e-07 || (q > sizer))
# stop("'size' must be a positive integer >= 'q'")
# DNAME <- paste(DNAME, "and", deparse(substitute(size)))
# size <- sizer
# }
# else stop("incorrect length of 'q'")
# if (!missing(prob) && (length(prob) > 1L || is.na(prob) || prob < 0 ||
# prob > 1))
# stop("'prob' must be a single number between 0 and 1")
# alternative <- match.arg(alternative)
PVAL <- switch(alternative, less = pbetabinom(q = q, size = size, prob = prob, rho = rho), greater = 1 - pbetabinom(q = q - 1, size = size, prob = prob, rho = rho),
two.sided = { if (prob == 0) (q == 0) else if (prob == 1) (q == size) else {
relErr <- 1 + 1e-07
d <- dbetabinom(x = q, size = size, prob = prob, rho = rho)
m <- size * prob
if (q == m) 1 else if (q < m) {
i <- seq.int(from = ceiling(m), to = size)
y <- sum(dbetabinom(x = i, size = size, prob = prob, rho = rho) <= d * relErr)
p1 <- 1 - pbetabinom(q = size - y, size = size, prob = prob, rho = rho)
if (p1 < 0)
pbetabinom(q = q, size = size, prob = prob, rho = rho)
else
p1 + pbetabinom(q = q, size = size, prob = prob, rho = rho)
} else {
i <- seq.int(from = 0, to = floor(m))
y <- sum(dbetabinom(x = i, size = size, prob = prob, rho = rho) <= d * relErr)
p1 <- 1 - pbetabinom(q = q - 1, size = size, prob = prob, rho = rho)
if (p1 < 0)
pbetabinom(q = y - 1, size = size, prob = prob, rho = rho)
else
p1 + pbetabinom(q = y - 1, size = size, prob = prob, rho = rho)
}
}
})
}
## wrapper for pbetabinomial ~ binom.test
betabinom.test.ab <- function (q, size, shape1, shape2, alternative = c("two.sided", "less", "greater"))
{
DNAME <- deparse(substitute(q))
qr <- round(q)
sizer <- round(size)
if (is.na(q) || (q < 0) || abs(q - qr) > 1e-07) {
warning("'q' must be nonnegative and integer")
return(NA)
} else if ((length(size) > 1L) || is.na(size) || (size < 1) || abs(size - sizer) > 1e-07 || (q > sizer)) {
warning("'size' must be a positive integer >= 'q'")
return(NA)
} else if (is.na(shape1) || is.na(shape2)){
warning("'shapeX' must be nonnegative integers")
return(NA)
}
q <- qr
size <- sizer
# if (length(q) == 2L) {
# size <- sum(q)
# q <- q[1L]
# }
# else if (length(q) == 1L) {
# if ((length(size) > 1L) || is.na(size) || (size < 1) || abs(size - sizer) > 1e-07 || (q > sizer))
# stop("'size' must be a positive integer >= 'q'")
# DNAME <- paste(DNAME, "and", deparse(substitute(size)))
# }
# else stop("incorrect length of 'q'")
# if (!missing(prob) && (length(prob) > 1L || is.na(prob) || prob < 0 ||
# prob > 1))
# stop("'prob' must be a single number between 0 and 1")
alternative <- match.arg(alternative)
PVAL <- switch(alternative, less = pbetabinom.ab(q = q, size = size, shape1 = shape1, shape2 = shape2), greater = 1 - pbetabinom.ab(q = q - 1, size = size, shape1 = shape1, shape2 = shape2),
two.sided = { if (shape1 == 0) (q == 0) else if (shape2 == 0) (q == size) else {
relErr <- 1 + 1e-07
d <- dbetabinom.ab(x = q, size = size, shape1 = shape1, shape2 = shape2)
m <- size * shape1 / (shape1 + shape2)
if (q == m) 1 else if (q < m) {
i <- seq.int(from = ceiling(m), to = size)
y <- sum(dbetabinom.ab(x = i, size = size, shape1 = shape1, shape2 = shape2) <= d * relErr)
p1 <- 1 - pbetabinom.ab(q = size - y, size = size, shape1 = shape1, shape2 = shape2)
if (p1 < 0)
pbetabinom.ab(q = q, size = size, shape1 = shape1, shape2 = shape2)
else
p1 + pbetabinom.ab(q = q, size = size, shape1 = shape1, shape2 = shape2)
} else {
i <- seq.int(from = 0, to = floor(m))
y <- sum(dbetabinom.ab(x = i, size = size, shape1 = shape1, shape2 = shape2) <= d * relErr)
p1 <- 1 - pbetabinom.ab(q = q - 1, size = size, shape1 = shape1, shape2 = shape2)
if (p1 < 0)
pbetabinom.ab(q = y - 1, size = size, shape1 = shape1, shape2 = shape2)
else
p1 + pbetabinom.ab(q = y - 1, size = size, shape1 = shape1, shape2 = shape2)
}
}
})
}
test_betabin_model <- function(size, q, shape1, shape2, idxs, alternative = "two.sided", presched = T, ncores = NCORES) {
pval <- mcmapply(size = size[idxs], q = q[idxs], shape1 = shape1[idxs], shape2 = shape2[idxs],
FUN = betabinom.test.ab, MoreArgs = list(alternative = alternative), mc.preschedule = presched, mc.cores = ncores)
return(pval)
}
#
# test_betabin_model_filter <- function(size, shape1, shape2, idxs, alternative = "two.sided", presched = T, ncores = NCORES) {
# pval <- mcmapply(size = size[idxs], q = size[idxs], shape1 = shape1[idxs], shape2 = shape2[idxs],
# FUN = betabinom.test.ab, MoreArgs = list(alternative = alternative), mc.preschedule = presched, mc.cores = ncores)
# return(pval)
# }
ggd.qqplot = function(sampleid = "", sampledir = ".", suffix = "_QQplot", pvector, main=NULL, tofile = F, ...) {
o = -log10(sort(pvector,decreasing=F))
o <- o[is.finite(o)]
e = -log10( 1:length(o)/length(o) )
png(filename = file.path(sampledir, paste0(sampleid, suffix, ".png")))
plot(e,o,pch=".",cex=2, main=main, ...,
xlab=expression(Expected~~-log[10](italic(p))),
ylab=expression(Observed~~-log[10](italic(p))),
xlim=c(0,max(e)), ylim=c(0,max(o)))
lines(e,e,col="red")
dev.off()
}
ggd_qqplot2 = function(pvector, main=NULL, ...) {
o = -log10(sort(pvector,decreasing=F))
o <- o[is.finite(o)]
e = -log10( 1:length(o)/length(o) )
plot(e,o,pch=".",cex=2, main=main, ...,
xlab=expression(Expected~~-log[10](italic(p))),
ylab=expression(Observed~~-log[10](italic(p))),
xlim=c(0,max(e)), ylim=c(0,max(o)))
lines(e,e,col="red")
}
qqplot2 = function(pvector, pvector_conserv, main=NULL, ...) {
ord <- order(pvector, decreasing = F)
o = -log10(pvector[ord])
o_cons <- -log10(pvector_conserv[ord])
e = -log10( 1:length(o)/length(o) )
plot(e,o,pch=".",cex=2, main=main, ...,
xlab=expression(Expected~~-log[10](italic(p))),
ylab=expression(Observed~~-log[10](italic(p))),
xlim=c(0,max(e)), ylim=c(0,max(o)))
points(e, o_cons, col="blue", cex=1)
lines(e,e,col="red")
}
read_pcawg_release_table <- function(release_table_file) {
release_table <- read.delim(file = release_table_file, as.is = T)
release_table <- release_table[release_table$wgs_exclusion_white_gray != "Excluded", ]
splitaliquots <- strsplit(x = release_table$tumor_wgs_aliquot_id, split = ",")
release_table_dedup <- release_table[rep(1:nrow(release_table), lengths(splitaliquots)), c("wgs_exclusion_white_gray", "dcc_project_code", "normal_wgs_aliquot_id", "tumor_wgs_aliquot_id", "normal_wgs_bwa_alignment_bam_file_name", "tumor_wgs_bwa_alignment_bam_file_name")]
release_table_dedup$tumor_wgs_aliquot_id <- unlist(splitaliquots)
release_table_dedup$tumor_wgs_bwa_alignment_bam_file_name <- unlist(strsplit(x = release_table$tumor_wgs_bwa_alignment_bam_file_name, split = ","))
return(release_table_dedup)
}
# difference betwene two betas P(B2 > B1), see Chris Stucchio
h <- function(alpha_1, beta_1, alpha_2, beta_2) {
j <- seq.int(0, round(alpha_2)-1)
log_vals <- (lbeta(alpha_1 + j, beta_1 + beta_2) - log(beta_2 + j) -
lbeta(1 + j, beta_2) - lbeta(alpha_1, beta_1))
1 - sum(exp(log_vals))
}
betaABfromMeanKappa = function( mean , kappa ) {
a = mean * kappa
b = ( 1.0 - mean ) * kappa
return( list( a=a , b=b ) )
}
betaABfromMeanSD = function( mean , sd ) {
kappa = mean*(1-mean)/sd^2 - 1
a = mean * kappa
b = ( 1.0 - mean ) * kappa
return( c( a=a , b=b ) )
}
load_1000G_reference_alleles <- function(refallelesdir, chrominfo = seqinfo(BSgenome.Hsapiens.1000genomes.hs37d5)) {
refallelefiles <- paste0(refallelesdir, "1000genomesAlleles2012_chr", 1:23, ".txt")
refalleles <- lapply(X = refallelefiles, function(x) read_tsv(file = x, col_types = "iii"))
chr <- rep(c(1:22, "X"), sapply(X = refalleles, FUN = nrow))
refalleles <- as.data.frame(do.call(what = rbind, args = refalleles))
refalleles_gr <- GRanges(seqnames = chr, IRanges(start = refalleles$position, end = refalleles$position), seqinfo = chrominfo)
mcols(refalleles_gr)$ref <- factor(refalleles$a0, levels = 1:4, labels = c("A", "C", "G", "T"))
mcols(refalleles_gr)$alt <- factor(refalleles$a1, levels = 1:4, labels = c("A", "C", "G", "T"))
return(refalleles_gr)
}
HDIofICDF = function( ICDFname , credMass=0.95 , tol=1e-8 , ... ) {
# Arguments:
# ICDFname is R's name for the inverse cumulative density function
# of the distribution.
# credMass is the desired mass of the HDI region.
# tol is passed to R's optimize function.
# Return value:
# Highest density iterval (HDI) limits in a vector.
# Example of use: For determining HDI of a beta(30,12) distribution, type
# HDIofICDF( qbeta , shape1 = 30 , shape2 = 12 )
# Notice that the parameters of the ICDFname must be explicitly named;
# e.g., HDIofICDF( qbeta , 30 , 12 ) does not work.
# Adapted and corrected from Greg Snow's TeachingDemos package.
incredMass = 1.0 - credMass
intervalWidth = function( lowTailPr , ICDFname , credMass , ... ) {
ICDFname( credMass + lowTailPr , ... ) - ICDFname( lowTailPr , ... )
}
optInfo = optimize( intervalWidth , c( 0 , incredMass ) , ICDFname=ICDFname ,
credMass=credMass , tol=tol , ... )
HDIlowTailPr = optInfo$minimum
return( c( ICDFname( HDIlowTailPr , ... ) ,
ICDFname( credMass + HDIlowTailPr , ... ) ) )
}
get_mean_var <- function(x) {
return(c(mu = mean(x, na.rm = T), rho = sqrt(var(x, na.rm = T)) ))
}
get_mean_var_robust <- function(x) {
return(c(mu = median(x, na.rm = T), rho = mad(x, na.rm = T) ))
}
|
548aca0c5d605c8d99a6704d68d22506623207b6
|
23bc04388c4773f6b0ba2eca7747db9a8d72f657
|
/rNPBST/man/wilcoxonRankSum.test.Rd
|
468e06fc48a1d9544b96ad36c74b0746d24fc4bd
|
[] |
no_license
|
JacintoCC/TFG
|
cfe14f140005d9233f67d5aad643ef5d9820163f
|
994f6db722f1fe0576cdd55a0f44f91b397d868c
|
refs/heads/master
| 2020-04-05T23:10:46.983204
| 2016-10-31T09:34:12
| 2016-10-31T09:34:12
| 62,843,829
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 401
|
rd
|
wilcoxonRankSum.test.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NP-LocationTests.R
\name{wilcoxonRankSum.test}
\alias{wilcoxonRankSum.test}
\title{Wilcoxon Rank Sum test for location}
\usage{
wilcoxonRankSum.test(matrix)
}
\arguments{
\item{matrix}{Matrix of data}
}
\value{
A htest object with pvalues and statistics
}
\description{
This function performs the Wilcoxon Rank Sum test
}
|
74c2123dd5b176f96d34bda4c136f23e63ac8a30
|
0f0bc022bcc6dad2b6f9b5085d344695c46bba5c
|
/R/0311/01.R
|
33e6d6e3214d9ad9a5a73213a430f91c36a16775
|
[] |
no_license
|
JiamingPKU/code
|
07e90927341e1a58bc0bebf06d34b95399df323f
|
b939e3ab5cfd76f40aa13b6b74e2fd843b711111
|
refs/heads/master
| 2022-10-23T11:48:26.423604
| 2017-06-18T00:29:00
| 2017-06-18T00:29:00
| 84,429,774
| 0
| 1
| null | 2022-10-19T00:30:34
| 2017-03-09T10:33:21
|
Python
|
UTF-8
|
R
| false
| false
| 1,537
|
r
|
01.R
|
####
#### version: 2010-12-02
#### Jack Q.Ren
#### renqingjie@pku.edu.cn
## pre-work
setwd("0310\\")
ls()
rm(list=ls())
ls()
######Chapter 1: Basic R code
## section 1: introduction
x1 <- 1:100
x2 <- x1 * 2 * pi/100 #another vector
y1 <- sin(x2) # use function to generate vector
plot(x2, y1, type="l")
abline(h=0, lwd=2)
abline(v=(0:4)/2*pi, lty=3, col="gray")
y2 <- cos(x2)
lines(x2, y2, lty=2, col="green")
sink("log1.txt",split=1) #use sink to log the output into a file
sum(y1)
print(y1)
sink() #end the log
## SECTION 2: vector and assignment
marks <-c(10, 6, 4, 7, 8)
x <-c(1:3, 10:13)
x1 <- c(1,2)
x2 <- c(x, x1)
## arguments of vector
mode(x)
length(x)
## calculations of vector with scalar
x+2
x-2
x*2
x/2
x^2
2/x
2^x
## calculations of vector with vector
x1 <- c(1,10)
x2 <- c(4,2)
x1+x2
x1-x2
x1*x2
x1/x2
x1^x2
x1 <- c(1,10)
x3 <- c(1,3,5,7)
x4 <- c(1,3,5,7,9)
x1+x3
x1-x3
x1*x3
x1/x3
x1^x3
x1+x4
x1-x4
x1*x4
x1/x4
x1^x4
## functions of vector
sqrt(x1);log(x1);exp(x1);sin(x1);cos(x1);tan(x1)
sum(x1);mean(x1);var(x1);sd(x1);min(x1);max(x1);range(x1)
cumsum(x1);cumprod(x1)
sort(x);order(x)
## functions to generate a vector
seq(6);seq(2,5);seq(11,15,by=2);seq(0,2*pi,length=100);seq(to=2,from=5)
rep(0,5); rep(c(1,3),2);rep(c(1,3),c(2,4)); rep(c(1,3),each=2)
## character-vectors
paste(c("ab","cd")) #return a character
paste(c("a","b"),c("c","d"));paste(c("x", 1:3));paste("x",1:3)
paste("x",1:3,sep="~");paste("ab","cd",collapse="")
## complex
x<-1:100
y<list(x)
x[1]
y[1]
dim(x)
dim(y)
x1 <- c("a","b","c")
|
c97230a279c54cba6c94507342795576c89025d0
|
c37219af09432a8b8c543aad9aeb56a807f37d01
|
/man/nopodec_Fhat.Rd
|
6692a0555445c9a7ee961b1fb6bcd9ddf8f7fc1f
|
[] |
no_license
|
gibonet/decr
|
819f76eccd0e06235bd2dd4b0d86f0c52de4ce72
|
055d1bb9f38b3515b8c729234d1a68b9950f7a99
|
refs/heads/master
| 2021-09-28T19:23:22.673366
| 2021-09-10T20:01:22
| 2021-09-10T20:01:22
| 70,839,965
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,948
|
rd
|
nopodec_Fhat.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nopodec_Fhat.R
\name{nopodec_Fhat}
\alias{nopodec_Fhat}
\alias{nopodec_Fhat.default}
\alias{nopodec_Fhat.reweighted}
\title{Empirical cumulative distribution function (ecdf) at value y for the two groups, in and out the common support.
In the common support, counterfactual ecdf are estimated.
It also estimates the number of individuals of the two groups.}
\usage{
nopodec_Fhat(...)
\method{nopodec_Fhat}{default}(.reweight_strata_all, y = NULL, weights = NULL, value, ...)
\method{nopodec_Fhat}{reweighted}(.reweighted, value, ...)
}
\arguments{
\item{...}{arguments passed to or from other methods.}
\item{.reweight_strata_all}{output of \code{\link{reweight_strata_all2}}}
\item{y}{name of the outcome variable for which you want to make the decomposition. If NULL (default), the value is inherited from the attributes of \code{.reweight_strata_all}}
\item{weights}{name of the weight variable (sample weights). If NULL (default), the value is inherited from the attributes of \code{.reweight_strata_all}.}
\item{value}{value of y for which the ecdf is estimated}
\item{.reweighted}{an object of class \code{reweighted} (the output of \code{\link{reweight_strata_all4}})}
}
\description{
The results are all the components necessary to perform a
decomposition of the ecdf difference between two groups,
in four components (as in Nopo (2004)).
}
\examples{
data(invented_wages)
r00 <- reweight_strata_all2(invented_wages, treatment = "gender",
variables = c("sector", "education"),
y = "wage", weights = "sample_weights")
nopodec_Fhat(r00, value = 5000)
r00 <- reweight_strata_all4(invented_wages, treatment = "gender",
variables = c("sector", "education"),
y = "wage", weights = "sample_weights")
str(r00)
names(r00)
class(r00)
nopodec_Fhat(r00, value = 5000)
}
|
5e322c736a83007281de56b01f4d2230cbfe5a5f
|
b4fc54d90f295493c02b900cff8c5d24e6cd17a3
|
/analysis/proactive/02_02_safety_margin_perf.R
|
de238cf0b215517e11cebbb596db0f99ad5ac001
|
[
"Apache-2.0"
] |
permissive
|
fabiomorais/ASaaS
|
0b19ecf15197772e1c9963473a769b74a827d3da
|
2452adbca559e0d399c82d7183692cf6962cf1ea
|
refs/heads/master
| 2021-01-01T04:42:54.088109
| 2017-07-18T18:30:31
| 2017-07-18T18:30:31
| 97,231,915
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,479
|
r
|
02_02_safety_margin_perf.R
|
library(dplyr)
library(ggplot2)
library(tidyr)
library(scales)
library(grid)
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
require(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots == 1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
# Abordagem como filtro
dff = read.table(file = "data/general-data-analysis.dat", header = T)
dff = dff %>% filter(CORR == 0, FILTER == "ceiling_max") %>%
select(TRACE, METRIC, SCENARIO, SAFEM, VIOLP, PERF_REL, OVER_REL) %>%
gather("metric", "value", 5:7)
dff$metric[dff$metric == "VIOLP"] = "Percentual\n de violações"
dff$metric[dff$metric == "OVER_REL"] = "Custo realtivo\n ao superprovido"
dff$metric[dff$metric == "PERF_REL"] = "Custo realtivo\n ao perfeito"
dff = dff %>% mutate(METRIC_STR = ifelse(METRIC == "cpu", "CPU", "Memória"))
dff = dff %>% mutate(SCENARIO_STR = ifelse(SCENARIO == "Dynamic", "Dinâmico", as.character(SCENARIO)))
dff$metric = factor(dff$metric, levels = c("Custo realtivo\n ao superprovido", "Custo realtivo\n ao perfeito", "Percentual\n de violações"))
dff$SCENARIO_STR = factor(dff$SCENARIO_STR, levels = c("LW", "AR", "Dinâmico"))
p = ggplot(dff, aes(SCENARIO_STR, value, fill = paste(SAFEM * 100, "%", sep = "")))
p = p + geom_boxplot(position = position_dodge(), width = 0.7)
p = p + facet_grid(metric ~ METRIC_STR, scales = "free")
p = p + scale_fill_brewer("Margem de segurança:", palette = "Set1")
p = p + scale_y_continuous(labels = percent)
p = p + theme_bw(base_size = 24)
p = p + theme(legend.position = "top", legend.key.size = unit(1, "cm"))
p = p + xlab("Abordagem de predição") + ylab(NULL)
p = p + guides(fill = guide_legend(nrow = 1, byrow = TRUE))
p
png(filename = "img/02_02_safety-margin-perf.png", width = 1000, height = 650)
print(p)
dev.off()
# Multiplot
dff1 = filter(dff, metric != "Percentual\n de violações")
p = ggplot(dff1, aes(SCENARIO_STR, value, fill = paste(SAFEM * 100, "%", sep = "")))
p = p + geom_boxplot(position = position_dodge(), width = 0.7)
p = p + facet_grid(metric ~ METRIC_STR, scales = "free")
p = p + scale_fill_brewer("Margem de segurança:", palette = "Set1")
p = p + scale_y_continuous(labels = percent)
p = p + theme_bw(base_size = 24)
p = p + theme(legend.position = "top", legend.key.size = unit(1, "cm"))
p = p + xlab(NULL) + ylab(NULL)
p = p + theme(axis.ticks.x = element_blank(), axis.text.x = element_blank(), strip.text.x = element_blank())
p = p + guides(fill = guide_legend(nrow = 1, byrow = TRUE))
p = p + theme(axis.title.x = element_text(vjust = -0.2))
p1 = p
dff2 = filter(dff, metric == "Percentual\n de violações")
p = ggplot(dff2, aes(SCENARIO_STR, value, fill = paste(SAFEM * 100, "%", sep = "")))
p = p + geom_boxplot(position = position_dodge(), width = 0.7)
p = p + facet_grid(metric ~ METRIC_STR, scales = "free")
p = p + scale_fill_brewer("Margem de segurança:", palette = "Set1")
p = p + scale_y_continuous(labels = percent)
p = p + coord_cartesian(ylim = c(0, 0.1))
p = p + theme_bw(base_size = 24)
p = p + theme(legend.position = "none", legend.key.size = unit(1, "cm"))
p = p + theme(strip.text.x = element_blank())
p = p + theme(plot.margin = unit(c(0,4,5,2.3),"mm"))
p = p + xlab("Abordagem de predição") + ylab(NULL)
p = p + guides(fill = guide_legend(nrow = 1, byrow = TRUE))
p2 = p
Layout <- grid.layout(nrow = 90, ncol = 800, widths = unit(c(1, 1), c("null", "null")), heights = unit(c(1, 1), c("null", "null")))
vplayout <- function(...) {
grid.newpage()
pushViewport(viewport(layout = Layout))
}
subplot <- function(x, y) viewport(layout.pos.row = x,layout.pos.col = y)
png(filename = "img/02_02_safety-margin-perf-multi.png", width = 1000, height = 650)
vplayout()
print(p1, vp = subplot(1:58,1:800))
print(p2, vp = subplot(58:90,1:800))
dev.off()
head(dff)
# ----- Análise de linearidade -----
dff %>% filter(metric == "Percentual\n de violações") %>% filter(SAFEM %in% c(0, 0.5)) %>% group_by(SAFEM) %>%
summarise(mean = mean(value) * 100, median = median(value)*100) %>% data.frame()
dff %>% filter(metric == "Percentual\n de violações") %>% filter(SAFEM %in% c(0, 0.5)) %>% group_by(SCENARIO, SAFEM) %>%
summarise(mean = mean(value) * 100, median = median(value)*100) %>% data.frame()
dff %>% filter(metric == "Custo realtivo\n ao superprovido") %>% filter(SAFEM %in% c(0, 0.5)) %>% group_by(SAFEM) %>%
summarise(mean = mean(value) * 100, median = median(value)*100) %>% data.frame()
dff %>% filter(metric == "Percentual\n de violações") %>% filter(SAFEM %in% c(0, 0.1)) %>% group_by(SAFEM) %>%
summarise(mean = mean(value) * 100, median = median(value)*100) %>% data.frame()
dff %>% filter(metric == "Custo realtivo\n ao superprovido") %>% filter(SAFEM %in% c(0, 0.1)) %>% group_by(SAFEM) %>%
summarise(mean = mean(value) * 100, median = median(value)*100) %>% data.frame()
x = dff %>% filter(metric == "Percentual\n de violações") %>% filter(!SAFEM %in% c(0)) %>% group_by(SAFEM) %>%
summarise(mean = mean(value) * 100, median = median(value)*100) %>% data.frame()
y = dff %>% filter(metric == "Custo realtivo\n ao superprovido") %>% filter(!SAFEM %in% c(0)) %>% group_by(SAFEM) %>%
summarise(mean = mean(value) * 100, median = median(value)*100) %>% data.frame()
plot(x$SAFEM, x$mean)
plot(y$SAFEM, y$mean)
a = lm(x$SAFEM ~ x$mean)
b = lm(y$SAFEM ~ y$mean)
summary(a)
summary(b)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.