blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9bb227772bbda29527e1b67f94be81797630c22d
|
ff8222567174568ce8288b69947d8cdc8eefc69b
|
/Code_Global - Copy/variable_importance.R
|
995ee52aa77482a7e8a1d2c40b8d411f39be9a2f
|
[] |
no_license
|
LarsGorter024/Urban-Expansion-Models
|
96705c674abf52e0b4f6955737209822d1ee647b
|
512dfe7e3c4fd7069ded3f1541c534830d72cf6a
|
refs/heads/main
| 2023-06-15T14:26:46.577229
| 2021-07-06T20:27:23
| 2021-07-06T20:27:23
| 383,587,979
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,955
|
r
|
variable_importance.R
|
##%######################################################%##
# #
#### Variable importance function ####
# #
##%######################################################%##
#' Variable importance
#'
#' Calculate contribution of predictor variables to the model.
#' Function will make a reference prediction of the model using the standard set of variables.
#' Then, the values in predictor variables are randomized, and the prediction is repeated with the set of variables
#' that contain a randomized variable. Correlation coefficient is calculated between the reference prediction and randomized prediction.
#' Given importance value is \code{1 - correlation ** 2} for each variable. Number of randomizations can be set (default is one)
#'
#' @param data Input data with variables for which to calculate the variable importance. With this data you should be able to run predict function on the model.
#' @param model Model to be used for prediction. Function is tested only on glm object class.
#' @param iterations_num Number of randomization iterations. Default is 1 iteration.
#' @param clean Return cleaned data (default is \code{FALSE}). A dataframe will be returned, only with variables that participated in the model (in case of model selection).
#'
#' @return Output is a matrix where rows have variable importance value for each variable, and the columns are individual iterations. If clean = TRUE, return class is dataframe.
#' @export
#'
#' @author Mirza Cengic
#' @examples var_importance(data = mydat, model = my_model, iterations_num = 10)
#' @importFrom magrittr "%>%"
#' @importFrom tibble rownames_to_column
#' @import dplyr
variable_importance <- function(data, model, iterations_num = 1,
clean = FALSE)
{
# Pass here the model and the data. Here we want to check if
# the predictions can be calculated on the data, since the goal
# of the function is to use the correlation between the predictor
# and a randomized value to calculate variable importance.
reference_prediction <- try(predict(model, data))
if (inherits(reference_prediction, "try-error"))
{
stop("Error with reference prediction")
}
# Create matrix in which to store the values for the variable importance
output_matrix <- matrix(0, nrow = length(names(data)),
ncol = iterations_num,
dimnames = list(names(data), paste0("Iter_", 1:iterations_num)))
#### Loop that works (but might not be correct)
for (iter in 1:iterations_num)
{
for(var_name in names(data))
{
# Copy the data so each iteration is independent
dat <- data
# print(var_name)
# Randomize the predictor variable
dat[, var_name] <- sample(dat[, var_name])
# Predict on the dataset with randomized variable
randomized_prediction <- predict(model, dat)
# Calculate correlation between the reference and randomized prediction, and substract it from 1
output_matrix[var_name, iter] <- 1 - round(cor(x = as.numeric(reference_prediction),
y = as.numeric(randomized_prediction),
use = "pairwise.complete.obs",
method = "pearson"), 4)
}
}
if (clean)
{
# Get names of variables that were used for the model
var_names <- names(model$model)
var_names <- var_names[!var_names %in% c("PA", "(weights)")]
output_matrix <- output_matrix %>%
as.data.frame() %>%
tibble::rownames_to_column("Variable") %>%
dplyr::filter(Variable %in% var_names)
return(output_matrix)
} else {
return(output_matrix)
}
}
|
4c08f09ffbf7ec36ba11bdad84feecb0de203f44
|
3706460263d12fa53d7bceec01908f6e121ac43c
|
/shared/R code/eu/simulation.R
|
bb08f8d1172c112da9d390e44e02115dc40385f5
|
[] |
no_license
|
eugene9212/svm_paper
|
083737302af25c25911a304a3d72bd2523e359c6
|
24e7ff72bf1997faa223c83637debd615a78925b
|
refs/heads/master
| 2021-10-26T03:15:17.999988
| 2019-04-10T06:10:18
| 2019-04-10T06:10:18
| 149,591,840
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 4,264
|
r
|
simulation.R
|
rm(list = ls())
# load packages
library(mvtnorm)
library(fda)
# load R codes
setwd('C:/Users/eugene/Desktop/SVM_R/shared/R code/')
source('eu/fsvm.1dim.R')
source('eu/fsvm.1dim.fourier.R')
source('eu/gp.1dim.R')
source('fn/fsvm.pi.path.R')
source('fn/fsvm.sub.pi.path.R')
dyn.load("KernSurf/temp/wsvmqp.dll")
sourceDir <- function(path, trace = TRUE, ...) {
for (nm in list.files(path, pattern = "[.][RrSsQq]$")) {
if(trace) cat(nm,":")
source(file.path(path, nm), ...)
if(trace) cat("\n")
}
}
sourceDir('KernSurf/R')
####================================= <Simulation 1> =====================================
# set up
n.sim <- 100
n <- 100
beta <- 1
t <- seq(0, 1, by = 0.05)
lambda <- 1 # fsvm & pi.path
# storage
result <- matrix(0, n.sim, 1)
for (iter in 1:n.sim) {
# tic <- Sys.time()
seed <- iter
# Data generation
set.seed(iter)
data <- gp.1dim(n, beta, t, seed)
x <- data$x
y <- data$y
print(iter)
obj <- fsvm.1dim(y, x, t, L = 10, lambda, rho = 1, weight = rep(1, n))
K <- obj$K
# pi path
obj_pi <- fsvm.pi.path(lambda, x, y, K)
pi <- obj_pi$pi
alpha <- obj_pi$alpha
alpha0 <- matrix(obj_pi$alpha0, dim(alpha)[1], dim(alpha)[2], byrow=T)
new.gx <- K %*% (alpha * y)
new.fx <- (alpha0 + new.gx)/lambda
pi.star <- rep(0, n)
# pi.star
for (i in 1:n) {
minus <- which(sign(new.fx[i,])<0)
if(min(minus) == 1 && (minus[2]-minus[1]) != 1){
index1 <- minus[2]
}else{
index1 <- min(minus)
}
plus <- which(sign(new.fx[i,])>0)
if(length(plus) == 0){
pi.star[i] <- pi[index1] + 1.0e-8
}else{
index2 <- max(plus)
pi.star[i] <- (pi[index1] + pi[index2])/2
}
}
# Boxplot of pi.star
# png(filename = paste0(iter,".png"))
boxplot(pi.star[y == 1], pi.star[y != 1], xlab=iter)
# dev.off()
# Numerical criteria
Deviance <- sum(y*log(pi.star))
Entropy <- -sum(pi.star*log(pi.star)) # 둘중 뭐가 맞는거지?????????????????????????????????
# Deviance
# results
result[iter,] <- c(Deviance)
# toc <- Sys.time()
# print(toc - tic)
}
# save results
write(result, "eu/result/result.txt")
####================================= <Simulation 2> =====================================
# set up
n.sim <- 100
n <- 100
beta <- 1
t <- seq(0, 1, by = 0.05)
lambda <- 1 # fsvm & pi.path
sd <- 0.3
# storage for result
result1 <- matrix(0, n.sim, 1)
Eresult <- matrix(0, n.sim, 1)
for (iter in 1:n.sim) {
# tic <- Sys.time()
seed <- iter
# Data generation
set.seed(iter)
# create sine function
n <- 50
value <- matrix(sin(2*3.14*t), n, length(t), byrow=T)
value[1:25,] <- value[1:25,] + matrix(rnorm(25*length(t), mean=0, sd=sd), 25, length(t), byrow=T)
value[26:50,] <- value[26:50,] + matrix(rnorm(25*length(t), mean=1, sd=sd), 25, length(t), byrow=T)
value1 <- data.frame(t(value))
x <- x.list <- as.list(value1)
y <- c(rep(-1,25),rep(1,25))
print(iter)
obj <- fsvm.1dim(y, x, t, L = 10, lambda, rho = 1, weight = rep(1, n))
K <- obj$K
# pi path
obj_pi <- fsvm.pi.path(lambda, x, y, K)
pi <- obj_pi$pi
alpha <- obj_pi$alpha
alpha0 <- matrix(obj_pi$alpha0, dim(alpha)[1], dim(alpha)[2], byrow=T)
new.gx <- K %*% (alpha * y)
new.fx <- (alpha0 + new.gx)/lambda
pi.star <- rep(0, n)
# pi.star
for (i in 1:n) {
minus <- which(sign(new.fx[i,])<0)
if(min(minus) == 1 && (minus[2]-minus[1]) != 1){
index1 <- minus[2]
}else{
index1 <- min(minus)
}
plus <- which(sign(new.fx[i,])>0)
if(length(plus) == 0){
pi.star[i] <- pi[index1] + 1.0e-8
}else{
index2 <- max(plus)
pi.star[i] <- (pi[index1] + pi[index2])/2
}
}
# Boxplot of pi.star
# png(filename = paste0(iter,".png"))
boxplot(pi.star[y == 1], pi.star[y != 1], xlab=iter)
# dev.off()
# Numerical criteria
Deviance <- sum(y*log(pi.star))
Entropy <- -sum(pi.star*log(pi.star)) # 둘중 뭐가 맞는거지?????????????????????????????????
# Deviance
# results
result1[iter,] <- c(Deviance)
Eresult[iter,] <- c(Entropy)
# toc <- Sys.time()
# print(toc - tic)
}
result1
Eresult
# save results
write(result1, "eu/result/result1.txt")
|
6faecba81e10c3a4c7e9717d4f6b0d0bdbd8d188
|
90f0c155b0dca1ae98927c7fa9c591cb3e64d3e6
|
/man/final04.Rd
|
773aa6c6a77a95b66a81f5a7f2455615ae8a27b2
|
[] |
no_license
|
desval/wiod
|
03a7f9c7465fa468905dcf8962ddaafe98951809
|
e2513994dc252948f59639493cf4124ad316a3d2
|
refs/heads/master
| 2021-01-11T21:20:52.887336
| 2016-09-28T11:22:45
| 2016-09-28T11:22:45
| 78,770,061
| 1
| 0
| null | 2017-01-12T17:35:30
| 2017-01-12T17:35:30
| null |
UTF-8
|
R
| false
| true
| 200
|
rd
|
final04.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wiod.R
\docType{data}
\name{final04}
\alias{final04}
\title{WIOD 2004 final}
\description{
WIOD 2004 final demand data
}
|
997a61f51826152b91f2be5b3b7c20d7417dc2fb
|
f3e9468bc17c47eba0846e443b9ea73cbc80e5c8
|
/man/biglasso.Rd
|
fc9319d844332949c98b9fe8b438074312156aac
|
[] |
no_license
|
YaohuiZeng/biglasso
|
d8922b3669ed497221ca5ae7ad4b0f84464e7655
|
8f6ede2f14d196b1e5940ab570eba5e8befa511d
|
refs/heads/master
| 2023-04-12T07:26:23.678416
| 2023-04-07T14:24:05
| 2023-04-07T14:24:05
| 52,933,812
| 112
| 30
| null | 2022-01-06T02:44:26
| 2016-03-02T04:27:32
|
C++
|
UTF-8
|
R
| false
| true
| 11,271
|
rd
|
biglasso.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/biglasso.R
\name{biglasso}
\alias{biglasso}
\title{Fit lasso penalized regression path for big data}
\usage{
biglasso(
X,
y,
row.idx = 1:nrow(X),
penalty = c("lasso", "ridge", "enet"),
family = c("gaussian", "binomial", "cox", "mgaussian"),
alg.logistic = c("Newton", "MM"),
screen = c("Adaptive", "SSR", "Hybrid", "None"),
safe.thresh = 0,
update.thresh = 1,
ncores = 1,
alpha = 1,
lambda.min = ifelse(nrow(X) > ncol(X), 0.001, 0.05),
nlambda = 100,
lambda.log.scale = TRUE,
lambda,
eps = 1e-07,
max.iter = 1000,
dfmax = ncol(X) + 1,
penalty.factor = rep(1, ncol(X)),
warn = TRUE,
output.time = FALSE,
return.time = TRUE,
verbose = FALSE
)
}
\arguments{
\item{X}{The design matrix, without an intercept. It must be a
double type \code{\link[bigmemory]{big.matrix}} object. The function
standardizes the data and includes an intercept internally by default during
the model fitting.}
\item{y}{The response vector for \code{family="gaussian"} or \code{family="binomial"}.
For \code{family="cox"}, \code{y} should be a two-column matrix with columns
'time' and 'status'. The latter is a binary variable, with '1' indicating death,
and '0' indicating right censored. For \code{family="mgaussin"}, \code{y}
should be a n*m matrix where n is the sample size and m is the number of
responses.}
\item{row.idx}{The integer vector of row indices of \code{X} that used for
fitting the model. \code{1:nrow(X)} by default.}
\item{penalty}{The penalty to be applied to the model. Either \code{"lasso"}
(the default), \code{"ridge"}, or \code{"enet"} (elastic net).}
\item{family}{Either \code{"gaussian"}, \code{"binomial"}, \code{"cox"} or
\code{"mgaussian"} depending on the response.}
\item{alg.logistic}{The algorithm used in logistic regression. If "Newton"
then the exact hessian is used (default); if "MM" then a
majorization-minimization algorithm is used to set an upper-bound on the
hessian matrix. This can be faster, particularly in data-larger-than-RAM
case.}
\item{screen}{The feature screening rule used at each \code{lambda} that
discards features to speed up computation: \code{"SSR"} (default if
\code{penalty="ridge"} or \code{penalty="enet"} )is the sequential strong rule;
\code{"Hybrid"} is our newly proposed hybrid screening rules which combine the
strong rule with a safe rule. \code{"Adaptive"} (default for \code{penalty="lasso"}
without \code{penalty.factor}) is our newly proposed adaptive rules which
reuse screening reference for multiple lambda values. \strong{Note that:}
(1) for linear regression with elastic net penalty, both \code{"SSR"} and
\code{"Hybrid"} are applicable since version 1.3-0; (2) only \code{"SSR"} is
applicable to elastic-net-penalized logistic regression or cox regression;
(3) active set cycling strategy is incorporated with these screening rules.}
\item{safe.thresh}{the threshold value between 0 and 1 that controls when to
stop safe test. For example, 0.01 means to stop safe test at next lambda
iteration if the number of features rejected by safe test at current lambda
iteration is not larger than 1\% of the total number of features. So 1 means
to always turn off safe test, whereas 0 (default) means to turn off safe test
if the number of features rejected by safe test is 0 at current lambda.}
\item{update.thresh}{the non negative threshold value that controls how often to
update the reference of safe rules for "Adaptive" methods. Smaller value means
updating more often.}
\item{ncores}{The number of OpenMP threads used for parallel computing.}
\item{alpha}{The elastic-net mixing parameter that controls the relative
contribution from the lasso (l1) and the ridge (l2) penalty. The penalty is
defined as \deqn{ \alpha||\beta||_1 + (1-\alpha)/2||\beta||_2^2.}
\code{alpha=1} is the lasso penalty, \code{alpha=0} the ridge penalty,
\code{alpha} in between 0 and 1 is the elastic-net ("enet") penalty.}
\item{lambda.min}{The smallest value for lambda, as a fraction of
lambda.max. Default is .001 if the number of observations is larger than
the number of covariates and .05 otherwise.}
\item{nlambda}{The number of lambda values. Default is 100.}
\item{lambda.log.scale}{Whether compute the grid values of lambda on log
scale (default) or linear scale.}
\item{lambda}{A user-specified sequence of lambda values. By default, a
sequence of values of length \code{nlambda} is computed, equally spaced on
the log scale.}
\item{eps}{Convergence threshold for inner coordinate descent. The
algorithm iterates until the maximum change in the objective after any
coefficient update is less than \code{eps} times the null deviance. Default
value is \code{1e-7}.}
\item{max.iter}{Maximum number of iterations. Default is 1000.}
\item{dfmax}{Upper bound for the number of nonzero coefficients. Default is
no upper bound. However, for large data sets, computational burden may be
heavy for models with a large number of nonzero coefficients.}
\item{penalty.factor}{A multiplicative factor for the penalty applied to
each coefficient. If supplied, \code{penalty.factor} must be a numeric
vector of length equal to the number of columns of \code{X}. The purpose of
\code{penalty.factor} is to apply differential penalization if some
coefficients are thought to be more likely than others to be in the model.
Current package doesn't allow unpenalized coefficients. That
is\code{penalty.factor} cannot be 0. \code{penalty.factor} is only supported
for "SSR" screen.}
\item{warn}{Return warning messages for failures to converge and model
saturation? Default is TRUE.}
\item{output.time}{Whether to print out the start and end time of the model
fitting. Default is FALSE.}
\item{return.time}{Whether to return the computing time of the model
fitting. Default is TRUE.}
\item{verbose}{Whether to output the timing of each lambda iteration.
Default is FALSE.}
}
\value{
An object with S3 class \code{"biglasso"} for
\code{"gaussian", "binomial", "cox"} families, or an object with S3 class
\code{"mbiglasso"} for \code{"mgaussian"} family, with following variables.
\item{beta}{The fitted matrix of coefficients, store in sparse matrix
representation. The number of rows is equal to the number of coefficients,
whereas the number of columns is equal to \code{nlambda}. For \code{"mgaussian"}
family with m responses, it is a list of m such matrices.}
\item{iter}{A vector of length \code{nlambda} containing the number of
iterations until convergence at each value of \code{lambda}.}
\item{lambda}{The sequence of regularization parameter values in the path.}
\item{penalty}{Same as above.}
\item{family}{Same as above.}
\item{alpha}{Same as above.}
\item{loss}{A vector containing either the residual sum of squares
(for \code{"gaussian", "mgaussian"}) or negative log-likelihood
(for \code{"binomial", "cox"}) of the fitted model at each value of \code{lambda}.}
\item{penalty.factor}{Same as above.}
\item{n}{The number of observations used in the model fitting. It's equal to
\code{length(row.idx)}.}
\item{center}{The sample mean vector of the variables, i.e., column mean of
the sub-matrix of \code{X} used for model fitting.}
\item{scale}{The sample standard deviation of the variables, i.e., column
standard deviation of the sub-matrix of \code{X} used for model fitting.}
\item{y}{The response vector used in the model fitting. Depending on
\code{row.idx}, it could be a subset of the raw input of the response vector y.}
\item{screen}{Same as above.}
\item{col.idx}{The indices of features that have 'scale' value greater than
1e-6. Features with 'scale' less than 1e-6 are removed from model fitting.}
\item{rejections}{The number of features rejected at each value of \code{lambda}.}
\item{safe_rejections}{The number of features rejected by safe rules at each
value of \code{lambda}.}
}
\description{
Extend lasso model fitting to big data that cannot be loaded into memory.
Fit solution paths for linear, logistic or Cox regression models penalized by
lasso, ridge, or elastic-net over a grid of values for the regularization
parameter lambda.
}
\details{
The objective function for linear regression or multiple responses linear regression
(\code{family = "gaussian"} or \code{family = "mgaussian"}) is
\deqn{\frac{1}{2n}\textrm{RSS} + \lambda*\textrm{penalty},}{(1/(2n))*RSS+
\lambda*penalty,}
where for \code{family = "mgaussian"}), a group-lasso type penalty is applied.
For logistic regression
(\code{family = "binomial"}) it is \deqn{-\frac{1}{n} loglike +
\lambda*\textrm{penalty},}{-(1/n)*loglike+\lambda*penalty}, for cox regression,
breslow approximation for ties is applied.
Several advanced feature screening rules are implemented. For
lasso-penalized linear regression, all the options of \code{screen} are
applicable. Our proposal adaptive rule - \code{"Adaptive"} - achieves highest speedup
so it's the recommended one, especially for ultrahigh-dimensional large-scale
data sets. For cox regression and/or the elastic net penalty, only
\code{"SSR"} is applicable for now. More efficient rules are under development.
}
\examples{
## Linear regression
data(colon)
X <- colon$X
y <- colon$y
X.bm <- as.big.matrix(X)
# lasso, default
par(mfrow=c(1,2))
fit.lasso <- biglasso(X.bm, y, family = 'gaussian')
plot(fit.lasso, log.l = TRUE, main = 'lasso')
# elastic net
fit.enet <- biglasso(X.bm, y, penalty = 'enet', alpha = 0.5, family = 'gaussian')
plot(fit.enet, log.l = TRUE, main = 'elastic net, alpha = 0.5')
## Logistic regression
data(colon)
X <- colon$X
y <- colon$y
X.bm <- as.big.matrix(X)
# lasso, default
par(mfrow = c(1, 2))
fit.bin.lasso <- biglasso(X.bm, y, penalty = 'lasso', family = "binomial")
plot(fit.bin.lasso, log.l = TRUE, main = 'lasso')
# elastic net
fit.bin.enet <- biglasso(X.bm, y, penalty = 'enet', alpha = 0.5, family = "binomial")
plot(fit.bin.enet, log.l = TRUE, main = 'elastic net, alpha = 0.5')
## Cox regression
set.seed(10101)
N <- 1000; p <- 30; nzc <- p/3
X <- matrix(rnorm(N * p), N, p)
beta <- rnorm(nzc)
fx <- X[, seq(nzc)] \%*\% beta/3
hx <- exp(fx)
ty <- rexp(N, hx)
tcens <- rbinom(n = N, prob = 0.3, size = 1) # censoring indicator
y <- cbind(time = ty, status = 1 - tcens) # y <- Surv(ty, 1 - tcens) with library(survival)
X.bm <- as.big.matrix(X)
fit <- biglasso(X.bm, y, family = "cox")
plot(fit, main = "cox")
## Multiple responses linear regression
set.seed(10101)
n=300; p=300; m=5; s=10; b=1
x = matrix(rnorm(n * p), n, p)
beta = matrix(seq(from=-b,to=b,length.out=s*m),s,m)
y = x[,1:s] \%*\% beta + matrix(rnorm(n*m,0,1),n,m)
x.bm = as.big.matrix(x)
fit = biglasso(x.bm, y, family = "mgaussian")
plot(fit, main = "mgaussian")
}
\seealso{
\code{\link{biglasso-package}}, \code{\link{setupX}},
\code{\link{cv.biglasso}}, \code{\link{plot.biglasso}},
\code{\link[ncvreg]{ncvreg}}
}
\author{
Yaohui Zeng, Chuyi Wang and Patrick Breheny
Maintainer: Yaohui Zeng <yaohui.zeng@gmail.com> and Chuyi Wang <wwaa0208@gmail.com>
}
|
4817650e6330a3902c33af9f8848f85e27f09db5
|
15b4b6ce2eac755d366960dc9d90cb0e0a97f60c
|
/R/translate-sql-base.r
|
b5536abb325469d7b810a5bb5b7c00aafbd1d14b
|
[] |
no_license
|
tlpinney/dplyr
|
232ecbac123d8a2f06bf8500c918af97696a76cf
|
b1ff14e91ddfe29bea2b606cf5ead405f4388ebb
|
refs/heads/master
| 2021-01-17T21:56:20.201548
| 2013-09-25T16:21:07
| 2013-09-25T16:21:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,259
|
r
|
translate-sql-base.r
|
#' @include translate-sql-helpers.r
#' @export
#' @rdname to_sql
base_sql <- new.env(parent = emptyenv())
base_sql$`==` <- sql_infix("=")
base_sql$`!` <- sql_prefix("not")
base_sql$`&` <- sql_infix("and")
base_sql$`&&` <- sql_infix("and")
base_sql$`|` <- sql_infix("or")
base_sql$`||` <- sql_infix("or")
base_sql$`^` <- sql_prefix("power")
base_sql$`%%` <- sql_infix("%")
base_sql$ceiling <- sql_prefix("ceil")
base_sql$mean <- sql_prefix("avg")
base_sql$var <- sql_prefix("variance")
base_sql$tolower <- sql_prefix("lower")
base_sql$toupper <- sql_prefix("upper")
base_sql$nchar <- sql_prefix("length")
base_sql$sql <- function(...) sql(...)
base_sql$`(` <- function(x) {
build_sql("(", x, ")")
}
base_sql$`{` <- function(x) {
build_sql("(", x, ")")
}
base_sql$desc <- function(x) {
build_sql(x, sql(" DESC"))
}
base_sql$xor <- function(x, y) {
sql(sprintf("%1$s OR %2$s AND NOT (%1$s AND %2$s)", escape(x), escape(y)))
}
base_sql$is.null <- function(x) {
build_sql(x, " IS NULL")
}
base_sql$c <- function(...) escape(c(...))
base_sql$`:` <- function(from, to) escape(from:to)
base_sql$n <- sql_prefix("count")
senv <- new.env(parent = emptyenv())
senv$pi <- structure("PI()", class = "sql")
|
b1a938216f863ce622c75bbcd2a64f93200aa2b6
|
618fb0a3bb4520996baa2fb841342bbbf6d4e57a
|
/R/interMLE.R
|
41cd08e2c3a070c9326dc6601d9e005d8c0a0923
|
[] |
no_license
|
cran/AssetCorr
|
ac654a865db222476bf1648d65f65b663dbf1bb9
|
fb249d30060a8722786315638780d8922c05c003
|
refs/heads/master
| 2021-06-03T23:25:10.299461
| 2021-05-05T14:30:02
| 2021-05-05T14:30:02
| 136,313,140
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,889
|
r
|
interMLE.R
|
interMLE <-
function(d1,n1,d2,n2,rho1,rho2,B=0, DB=c(0,0), JC=FALSE,CI=-1, plot=FALSE){
Estimate_Bootstrap=NULL
Estimate_Jackknife=NULL
Estimate_Standard=NULL
if(is.numeric(d1)){d1=d1}else{stop("d1 is not numeric")}
if(is.numeric(n1)){n1=n1}else{stop("n1 is not numeric")}
if(is.numeric(d2)){d2=d2}else{stop("d2 is not numeric")}
if(is.numeric(n2)){n2=n2}else{stop("n2 is not numeric")}
if(is.numeric(rho1)){rho1=rho1}else{stop("rho1 is not numeric")}
if(is.numeric(rho2)){rho2=rho2}else{stop("rho1 is not numeric")}
if(B%%1==0){B=B}else{stop("B is not an integer")}
if(DB[1]%%1==0 && DB[2]%%1==0 ){DB=DB}else{stop("At least one entry in DB is not an integer")}
if(length(d1)==length(n1) && length(d2)==length(n2) && length(d1)==length(d2)){}else{stop("Input vectors do not have the same length")}
def1=rbind(d1,n1)
def2=rbind(d2,n2)
estimate=function(def1,def2,CI){
d1<-def1[1,]
n1<-def1[2,]
d2<-def2[1,]
n2<-def2[2,]
integral=NULL
nll=function(rho){
ll=0
PD1=mean(d1/n1)
PD2=mean(d2/n2)
integral=NULL
for(i in 1:length(d1)){
d1i=d1[i]
n1i=n1[i]
d2i=d2[i]
n2i=n2[i]
integrand=function(x){
PDcond1=pnorm((qnorm(PD1)-sqrt(rho1)*x[,1])/sqrt(1-rho1))
PDcond2=pnorm((qnorm(PD2)-sqrt(rho2)*x[,2])/sqrt(1-rho2))
as.matrix(dbinom(d1i,n1i,PDcond1)*dbinom(d2i,n2i,PDcond2)*dmvnorm(x,sigma=matrix(c(1,rho,rho,1),2)))
}
myGrid <- createNIGrid(dim=2, type="GHe", level=45)
integral[i]=quadrature(integrand, myGrid)
if(is.na(integral[i])){integral[i]=1}
ll=ll+log(integral[i])
}
# print(-ll)
-ll
}
Res2=list()
Res1<- optimise(nll, interval = c(-1, 1), maximum = FALSE)$minimum
if(CI!=-1){hessian1<-hessian(nll,Res1)
SD<- 1/sqrt(hessian1)
CI<- 1-(1-CI)/2
Est<-list(Original =Res1, CI=c(Res1-qnorm(CI)*SD,Res1+qnorm(CI)*SD))
}else{Est<-list(Original =Res1)}
}
Estimate_Standard<-estimate(def1,def2,CI)
E_S<-Estimate_Standard$Original
DEF<-rbind(def1,def2)
if(DB[1]!=0){
IN=DB[1]
OUT=DB[2]
theta1=NULL
theta2=matrix(ncol = OUT, nrow=IN)
for(i in 1:OUT){
N<-length(d1)
Ib<-sample(N,N,replace=TRUE)
Db1<-def1[,Ib]
Db2<-def2[,Ib]
try(theta1[i]<-estimate(Db1,Db2,CI)$Original, silent = TRUE)
for(c in 1:IN){
Ic<-sample(N,N,replace=TRUE)
Db3<-Db1[,Ic]
Db4<-Db2[,Ic]
try( theta2[c,i]<-estimate(Db3,Db4,CI)$Original, silent = TRUE)
}
}
Boot1<- mean(theta1, na.rm = TRUE)
Boot2<- mean(theta2, na.rm = TRUE)
BC<- 2*Estimate_Standard$Original -Boot1
DBC<- (3*Estimate_Standard$Original-3*Boot1+Boot2)
Estimate_DoubleBootstrap<-list(Original = Estimate_Standard$Original, Bootstrap=BC, Double_Bootstrap=DBC, oValues=theta1, iValues=theta2)
}
if(B>0){
N<-length(d1)
theta=NULL
for(i in 1:B){
Ib<-sample(N,N,replace=TRUE) ## sampling with replacement
Db<-DEF[,Ib]
DEF1<- Db[1:2,]
DEF2<- Db[3:4,]
theta[i]<-estimate(DEF1,DEF2,CI)$Original
}
Boot<- mean(theta, na.rm = TRUE)
Estimate_Bootstrap<- 2*Estimate_Standard$Original - Boot
Estimate_Bootstrap<-list(Original = E_S, Bootstrap=2*Estimate_Standard$Original - Boot,bValues=theta )
if(plot==TRUE){
Dens<-density(theta, na.rm = TRUE)
XY<-cbind(Dens$x,Dens$y)
label<-data.frame(rep("Bootstrap density",times=length(Dens$x)))
Plot<-cbind(XY,label)
colnames(Plot)<-c("Estimate","Density","Label")
SD<-cbind(rep(E_S,times=length(Dens$x)), Dens$y,rep("Standard estimate",times=length(Dens$x)))
colnames(SD)<-c("Estimate","Density","Label")
BC<-cbind(rep(Estimate_Bootstrap$Bootstrap,times=length(Dens$x)), Dens$y,rep("Bootstrap corrected estimate",times=length(Dens$x)))
colnames(BC)<-c("Estimate","Density","Label")
Plot<-rbind(Plot,SD, BC)
Plot$Estimate<-as.numeric(Plot$Estimate)
Plot$Density<- as.numeric(Plot$Density)
Estimate<-Plot$Estimate
Density<-Plot$Density
Label<-Plot$Label
P<-ggplot()
P<-P+with(Plot, aes(x=Estimate, y=Density, colour=Label)) +
geom_line()+
scale_colour_manual(values = c("black", "red", "orange"))+
theme_minimal(base_size = 15) +
ggtitle("Bootstrap Density" )+
theme(plot.title = element_text(hjust = 0.5),legend.position="bottom",legend.text = element_text(size = 12),legend.title = element_text( size = 12), legend.justification = "center",axis.text.x= element_text(face = "bold", size = 12))
print(P)
}
}
if(JC==TRUE){
N<-length(d1)
def1=rbind(d1,n1)
def2=rbind(d2,n2)
N<-length(n1)
Test=NULL
for(v in 1:N){
d1<-def1[,-v]
d2<-def2[,-v]
try(Test[v]<-estimate(d1,d2,CI)$Original)
}
Estimate_Jackknife<-list(Original = Estimate_Standard$Original, Jackknife=(N*Estimate_Standard$Original-(N-1)*mean(Test)))
}
if(B>0){return(Estimate_Bootstrap)}
if(JC==TRUE){return(Estimate_Jackknife)}
if(DB[1]!=0){return(Estimate_DoubleBootstrap)}
if(B==0 && JC==FALSE && DB[1]==0){return(Estimate_Standard)}
}
|
728cf0a449be3ba7a5a0007f86bb805c6093a51c
|
ebe9b48ab47175a028db4f87ac8dc9a0382e7b02
|
/man/colloc_leipzig.Rd
|
27f2f54515d70d51dd0311a993f44774f2bf46a0
|
[
"MIT"
] |
permissive
|
gederajeg/corplingr
|
21b0a3018901304e1824b58cec114dc0ba01c445
|
c260a68260bc499df085ab8fac585209bf657a5a
|
refs/heads/master
| 2021-11-30T02:04:06.974202
| 2021-11-12T13:34:46
| 2021-11-12T13:34:46
| 226,963,221
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,069
|
rd
|
colloc_leipzig.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/corplingr_colloc_leipzig.R
\name{colloc_leipzig}
\alias{colloc_leipzig}
\title{Generate tidyverse-style window-span collocates for the Leipzig Corpora}
\usage{
colloc_leipzig(
leipzig_path = NULL,
leipzig_corpus_list = NULL,
pattern = NULL,
window = "b",
span = 2,
case_insensitive = TRUE,
to_lower_colloc = TRUE,
save_results = FALSE,
coll_output_name = "colloc_tidy_colloc_out.txt",
sent_output_name = "colloc_tidy_sent_out.txt"
)
}
\arguments{
\item{leipzig_path}{character strings of (i) file names of the Leipzig corpus if they are in the working directory, or (ii) the complete file path to each of the Leipzig corpus files.}
\item{leipzig_corpus_list}{specify this argument if each Leipzig corpus file has been loaded as R object and acts as an element of a list.
Example of this type of data-input can be seen in \code{data("demo_corpus_leipzig")}.
So specify either \code{leipzig_path} OR \code{leipzig_corpus_list} and set one of them to \code{NULL}.}
\item{pattern}{regular expressions/exact patterns for the target pattern.}
\item{window}{window-span direction of the collocates: \code{"r"} ('\bold{right} of the node'), \code{"l"} ('\bold{left} of the node'), or the DEFAULT is \code{"b"} ('both \bold{left} and \bold{right} context-window').}
\item{span}{integer vector indicating the span of the collocate scope.}
\item{case_insensitive}{whether the search pattern ignores case (TRUE -- the default) or not (FALSE).}
\item{to_lower_colloc}{whether to lowercase the retrieved collocates and the nodes (TRUE -- default) or not (FALSE).}
\item{save_results}{whether to output the collocates into a tab-separated plain text (TRUE) or not (FALSE -- default).}
\item{coll_output_name}{name of the file for the collocate tables.}
\item{sent_output_name}{name of the file for the full sentence match containing the collocates.}
}
\value{
a list of two tibbles: (i) for collocates with sentence number of the match, window span information, and the corpus files, and (ii) full-sentences per match with sentence number and corpus file
}
\description{
The function produces tibble-output collocates for Leipzig Corpora files.
}
\examples{
\dontrun{
# get the corpus filepaths
# so this example use the filepath input rather than list of corpus
leipzig_corpus_path <- c("my/path/to/leipzig_corpus_file_1M-sent_1.txt",
"my/path/to/leipzig_corpus_file_300K-sent_2.txt",
"my/path/to/leipzig_corpus_file_300K-sent_3.txt")
# run the function
colloc <- colloc_leipzig(leipzig_path = leipzig_corpus_path[2:3],
pattern = "\\\\bterelakkan\\\\b",
window = "b",
span = 3,
save_results = FALSE,
to_lower_colloc = TRUE)
# Inspect outputs
## This one outputs the collocates tibble
colloc$collocates
## This one outputs the sentence matches tibble
colloc$sentence_matches
}
}
|
e008e5f456c3bc85255f2e5c3c0cf08d5f9c1029
|
73d79d8a9a12652412d04d7e894e9bf7c27a35f6
|
/man/gap.binary.Rd
|
363f787ca1c1ffa2ffbc122131aeeb90f9ce81ec
|
[
"Artistic-2.0"
] |
permissive
|
hjanime/clustermap
|
133a9899bb9c75977ff4635213fee06b1ceb1db8
|
a9af01cc2b2a59d582e0fdc3ade978d3be3ca596
|
refs/heads/master
| 2020-10-01T20:39:50.347531
| 2019-12-12T14:07:37
| 2019-12-12T14:07:37
| 227,621,177
| 0
| 0
|
Artistic-2.0
| 2019-12-12T14:06:26
| 2019-12-12T14:06:25
| null |
UTF-8
|
R
| false
| true
| 386
|
rd
|
gap.binary.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustermap.R
\name{gap.binary}
\alias{gap.binary}
\title{gap.binary}
\usage{
gap.binary(X, linkage, B, K = 6)
}
\arguments{
\item{X}{matrix}
\item{linkage}{Linkage for clustering.}
\item{B}{integer.}
\item{K}{integer. Default set to 6.}
}
\description{
Identifies number of clusters using method "gap".
}
|
862f56a76385776fcd63197f677e739e79d99e7e
|
92e597e4ffc9b52cfb6b512734fb10c255543d26
|
/man/safeColumnBind.Rd
|
30e45d6e47cc177c036afae2b9ffc72725f0546d
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.utils
|
3b978dba2a86a01d3c11fee1fbcb965dd15a710d
|
0930eaeb9303cd9359892c1403226a73060eed5b
|
refs/heads/master
| 2023-05-12T15:26:14.529039
| 2023-04-21T04:28:29
| 2023-04-21T04:28:29
| 60,531,844
| 9
| 1
|
MIT
| 2023-04-21T04:28:30
| 2016-06-06T13:52:43
|
R
|
UTF-8
|
R
| false
| true
| 694
|
rd
|
safeColumnBind.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/column.R
\name{safeColumnBind}
\alias{safeColumnBind}
\title{"Safe" version of cbind.}
\usage{
safeColumnBind(x1, x2)
}
\arguments{
\item{x1}{first object to be passed to \code{cbind}}
\item{x2}{second object to be passed to \code{cbind}}
}
\value{
result of \code{cbind(x1, x2)} or \code{x2} if \code{x1}
is \code{NULL}.
}
\description{
If \code{x1} is NULL \code{x2} is returned otherwise \code{cbind(x1, x2)}
}
\examples{
x1 <- NULL
for (i in 1:3) {
x2 <- data.frame(a = 1:3, b = rnorm(3))
x1 <- safeColumnBind(x1, x2)
# using cbind would result in an error:
# x1 <- cbind(x1, x2)
}
x1
}
|
f779ac3aef3ca7675725ef67f8e4c7b7eeabcd52
|
32cb83f49a7218c9a208ad5bef561c3573ed944c
|
/ALHE/src/tests.R
|
e229f6b43170117f0015d784c90c556b49dea0f3
|
[] |
no_license
|
przemo509/alhe-modified-differential-evolution
|
f577ceee6b0bb89f3fc7a9f38555f458e8f9bab1
|
cd6918733551b46fe5d1aa780a46047597340d65
|
refs/heads/master
| 2016-09-05T18:36:41.113706
| 2013-04-04T20:34:42
| 2013-04-04T20:34:42
| 32,258,962
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,915
|
r
|
tests.R
|
# Testy wydajnościowe, czyli o ile lepiej wypada jedno podejście od drugiego.
# 1. testCecCalls() - wołanie funckji dla parametrów przygotowanych w różny sposób
#
# Author: Przemo
###############################################################################
library("cec2005benchmark");
source("../src/utilities/logging.R");
# Poniższy test pokazuje przewagę wywoływania funkcji celu dla wielu punktów na raz
# zamiast w pętli dla pojedynczych punktów. Dodatkowo pozytywnie na wydajność wpływa
# użycie R-owych funkcji do budowania sekwencji i macierzy zamiast budowania ich w pętli.
testCecCalls = function(){
initLogging();
loggerINFO("START");
xLen = 1000;
yLen = 1000;
zLen= 100000; # liczba kolorów
lim = 100;
xyLen = xLen * yLen;
x = seq(-lim, lim, length = xLen);
y = seq(-lim, lim, length = yLen);
loggerINFO("Sekwencje utworzone");
########################################################################
# nie testować sposobu 1 dla przypadku 1000x1000 bo się można nie doczekać
# loggerINFO("Sposob 1 - cec w petli");
# z1 = matrix(ncol = xLen, nrow = yLen);
# loggerINFO("1. Macierz utworzona - pusta");
# for(i in 1:xLen) {
# for(j in 1:yLen) {
# z1[i,j] = cecValue(c(x[i], y[j]));
# }
# }
# loggerINFO("1. Wynik funkcji otrzymany, od razu macierz");
# image(x, y, z1, col = gray.colors(zLen, start = 0, end = 1), useRaster = TRUE);
# loggerINFO("1. Wykres narysowany");
# contour(x, y, z1, nlevels = 20, add = TRUE);
# loggerINFO("1. Kontury narysowane");
# loggerINFO("Koniec sposobu 1");
########################################################################
########################################################################
loggerINFO("Sposob 2 - macierz w petli");
z2 = matrix(ncol = 2, nrow = xyLen);
for(i in 1:xyLen) {
xi = (i-1)%%xLen+1;
yj = (i-1)%/%xLen + 1;
z2[i,] = c(x[xi], y[yj]);rep
}
loggerINFO("2. Macierz wypelniona w petli");
res2 = cecValue(z2);
loggerINFO("2. Wynik funkcji otrzymany");
resM2 = matrix(res2, ncol = xLen);
loggerINFO("2. Wynik funkcji przeksztalcony na macierz");
image(x, y, resM2, col = gray.colors(zLen, start = 0, end = 1), useRaster = TRUE);
loggerINFO("2. Wykres narysowany");
contour(x, y, resM2, nlevels = 20, add = TRUE);
loggerINFO("2. Kontury narysowane");
loggerINFO("Koniec sposobu 2");
########################################################################
########################################################################
loggerINFO("Sposob 3 - brak petli");
xx = rep(x, times = yLen);
yy = rep(y, each = xLen);
z3 = cbind(xx, yy);
loggerINFO("3. Macierz wypelniona rep() + cbind()");
res3 = cecValue(z3);
loggerINFO("3. Wynik funkcji otrzymany");
resM3 = matrix(res3, ncol = xLen);
loggerINFO("3. Wynik funkcji przeksztalcony na macierz");
image(x, y, resM3, col = gray.colors(zLen, start = 0, end = 1), useRaster = TRUE);
loggerINFO("3. Wykres narysowany");
contour(x, y, resM3, nlevels = 20, add = TRUE);
loggerINFO("3. Kontury narysowane");
loggerINFO("Koniec sposobu 3");
########################################################################
########################################################################
loggerINFO("Zapisywanie i odtwarzanie wykresu");
savedPlot = recordPlot();
loggerINFO("Wykres zapisany");
replayPlot(savedPlot);
loggerINFO("Wykres odtworzony");
########################################################################
loggerINFO("KONIEC");
return();
}
cecValue = function(point) {
res = cec2005benchmark(1, point);
return(res);
}
|
6f8c477b8023c3eee4d04f339d1b128be862c1e4
|
e835097454b8559248d5e19c667fba9402d60f29
|
/script/subscript_model/model_functions.R
|
ec36f453bbcd83e0041c38c60cdb762fa429ce6a
|
[] |
no_license
|
friend1ws/SF3B1_project
|
a2854ae697716da1611f3955fd5d2e3b8f5cb7ae
|
1a7953125e793d1736f67c62970e0f5da4e6dafc
|
refs/heads/master
| 2020-03-31T15:26:46.755868
| 2019-03-06T01:43:57
| 2019-03-06T01:43:57
| 152,337,248
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,791
|
r
|
model_functions.R
|
library(VGAM)
bb_mloglLikelihood <- function(params, values) {
p_alpha <- params[1]
p_beta <- params[2]
v_n <- values[[1]]
v_k <- values[[2]]
ML <- 0
ML <- ML + sum(lgamma(v_n + 1) - lgamma(v_k + 1) - lgamma(v_n - v_k + 1))
ML <- ML + sum(lgamma(p_alpha + v_k) + lgamma(p_beta + v_n - v_k) - lgamma(p_alpha + p_beta + v_n))
ML <- ML + length(v_n) * (lgamma(p_alpha + p_beta) - lgamma(p_alpha) - lgamma(p_beta))
return(-ML)
}
zibb_mloglLikelihood <- function(params, values) {
p_alpha <- params[1]
p_beta <- params[2]
p_pi <- params[3]
v_n <- values[[1]]
v_k <- values[[2]]
zero_ind <- which(v_k == 0)
non_zero_ind <- setdiff(1:length(v_k), zero_ind)
ML <- 0
ML <- ML + length(non_zero_ind) * log(1 - p_pi) + sum( bb_loglikelihood(p_alpha, p_beta, v_n[non_zero_ind], v_k[non_zero_ind]) )
ML <- ML + sum(log(p_pi + (1 - p_pi) * exp( bb_loglikelihood(p_alpha, p_beta, v_n[zero_ind], v_k[zero_ind])) ))
return(-ML)
}
zip_mlogLikelihood <- function(params, values) {
p_lambda <- params[1]
p_pi <- params[2]
v_k <- values
zero_ind <- which(v_k == 0)
non_zero_ind <- setdiff(1:length(v_k), zero_ind)
ML <- 0
ML <- ML + length(non_zero_ind) * log(1 - p_pi) - length(non_zero_ind) * p_lambda + sum(v_k) * log(p_lambda)
ML <- ML + length(zero_ind) * log(p_pi + (1 - p_pi) * exp(-p_lambda))
}
zib_mlogLikelihood <- function(params, values) {
p_prob <- params[1]
p_pi <- params[2]
v_n <- values[[1]]
v_k <- values[[2]]
zero_ind <- which(v_k == 0)
non_zero_ind <- setdiff(1:length(v_k), zero_ind)
ML <- 0
ML <- ML + length(non_zero_ind) * log(1 - p_pi) + sum(v_k[non_zero_ind]) * log(p_prob) + sum(v_n[non_zero_ind] - v_k[non_zero_ind]) * log(1 - p_prob)
ML <- ML + sum(log(p_pi + (1 - p_pi) * (1 - p_prob)^v_n[zero_ind]))
return(-ML)
}
bb_loglikelihood <- function(p_alpha, p_beta, v_n, v_k) {
ML <- 0
ML <- ML + lgamma(v_n + 1) - lgamma(v_k + 1) - lgamma(v_n - v_k + 1)
ML <- ML + lgamma(p_alpha + v_k) + lgamma(p_beta + v_n - v_k) - lgamma(p_alpha + p_beta + v_n)
ML <- ML + lgamma(p_alpha + p_beta) - lgamma(p_alpha) - lgamma(p_beta)
return(ML)
}
zibb_optim <- function(v_n, v_k) {
cret <- constrOptim(c(5, 5, 0.2), zibb_mloglLikelihood, grad=NULL,
ui = rbind(diag(3), -diag(3)), ci=c(0.1, 0.1, 0.01, -1000, -1000, -0.99),
values = list(v_n, v_k), outer.iterations = 1000, outer.eps = 1e-8)
return(cret)
}
bb_optim <- function(v_n, v_k) {
cret <- constrOptim(c(5, 5), bb_mloglLikelihood, grad=NULL,
ui = rbind(diag(2), -diag(2)), ci=c(0.1, 0.1, -1000, -1000),
values = list(v_n, v_k), outer.iterations = 1000, outer.eps = 1e-8)
return(cret)
}
zib_optim <- function(v_n, v_k) {
cret <- constrOptim(c(0.5, 0.2), zib_mlogLikelihood, grad=NULL,
ui = rbind(diag(2), -diag(2)), ci=c(0, 0.01, -1, -0.99),
values = list(v_n, v_k), outer.iterations = 1000, outer.eps = 1e-8)
return(cret)
}
get_prob_for_zibb <- function(params, s_n, s_k) {
p_alpha <- params[1]
p_beta <- params[2]
p_pi <- params[3]
if (s_k == 0) {
return(p_pi + (1 - p_pi) * dbetabinom.ab(0, s_n, p_alpha, p_beta))
} else {
return((1 - p_pi) * dbetabinom.ab(s_k, s_n, p_alpha, p_beta))
}
}
get_prob_for_bb <- function(params, s_n, s_k) {
p_alpha <- params[1]
p_beta <- params[2]
dbetabinom.ab(s_k, s_n, p_alpha, p_beta)
}
get_prob_for_zib <- function(params, s_n, s_k) {
p_prob <- params[1]
p_pi <- params[2]
if (s_k == 0) {
return(p_pi + (1 - p_pi) * dbinom(0, s_n, p_prob))
} else {
return((1 - p_pi) * dbinom(s_k, s_n, p_prob))
}
}
|
bcb3761f9a14f6e178bfac6dffbe06dbbc20b5c6
|
f9fb8361c3ab28ba67f6345978ef01ac5a997599
|
/Class_Practice/NetworkAnalysis/nba_passing.R
|
c28d08adb2ed506a3bda22a06b3c05017f576a5c
|
[] |
no_license
|
sahNarek/CSE_270_Practice
|
cb14736d0253cf578154ef0b407502235f109a3e
|
9347947eae4d532f6cd32dd3482c0b89e7761394
|
refs/heads/master
| 2020-07-29T15:02:38.199858
| 2019-12-10T18:27:56
| 2019-12-10T18:27:56
| 209,854,014
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,787
|
r
|
nba_passing.R
|
library(SportsAnalytics270)
library(igraph)
library(network)
library(intergraph)
library(ggplot2)
library(circlize)
load("passing.rda")
load("gsw.rda")
x <- gsw$PLAYER_ID
passing <- passing[passing$PASS_TYPE == "made",]
passing <- passing[passing$PASS_TEAMMATE_PLAYER_ID %in% x,]
i_pass <- graph_from_edgelist(
as.matrix(passing[,c("PLAYER_NAME","PASS_TEAMMATE_PLAYER_NAME")],
directed = T))
plot(i_pass)
passing <- passing[passing$PLAYER_NAME != passing$PASS_TEAMMATE_PLAYER_NAME,]
i_pass <- graph_from_edgelist(
as.matrix(passing[,c("PLAYER_NAME","PASS_TEAMMATE_PLAYER_NAME")],
directed = T))
plot(i_pass)
n_pass <- intergraph::asNetwork(i_pass)
plot(n_pass, displaylabels = T)
n_pass %v% "vertex.names"
p <- n_pass %v% "vertex.names"
p
gsw <- gsw[order(match(gsw$PLAYER, p)),]
gsw$PLAYER
network::set.vertex.attribute(n_pass, "position", gsw$POSITION)
n_pass %v% "position"
ngames <- unique(passing[,c("PLAYER_NAME", "G")])
ngames <- ngames[order(match(ngames$PLAYER_NAME,p)),]
ngames
network::set.vertex.attribute(n_pass, "ngames", ngames$G)
n_pass %v% "ngames"
network::set.edge.attribute(n_pass, "passes", passing$PASS)
n_pass %e% "passes"
ggplot(passing, aes(x = FGM)) + geom_histogram() +
labs(x = "Field Goals", title = "FG")
ggplot(passing, aes(x = FG_PCT)) + geom_histogram() +
labs(x = "Field Goal Percentage", title = "FGP")
network::set.edge.attribute(n_pass, "FGP", passing$FG_PCT)
n_pass %e% "FGP"
ggplot(passing, aes(x = PASS)) + geom_histogram() +
labs(x = "Passes", title = "Distribution of passes")
n_pass1 <- get.inducedSubgraph(n_pass,
eid = which(n_pass %e% "passes" > 30))
plot(n_pass, displaylabels = T, mode = "circle")
n_pass_mat <- as.matrix(n_pass, matrix.type = "adjacency",
attrname = "passes")
chordDiagram(n_pass_mat)
x <- n_pass1 %v% "ngames"
z <- 2*(x - min(x)) / (max(x) - min(x))
plot(n_pass1, displaylabels = T, mode = "circle",
vertex.cex = z, vertex.col = "position",
label = paste(n_pass1 %v% "vertex.names", n_pass1 %v% "position", sep = "-"))
lineup <- c("Kevin Durant", "Zaza Pachulia",
"Draymond Green", "Stephen Curry",
"Klay Thompson")
n_pass2 <- get.inducedSubgraph(n_pass1,
v = which(n_pass1 %v% "vertex.names" %in% lineup))
plot(n_pass2, displaylabels = T, mode = "circle",
vertex.cex = z, vertex.col = "position", edge.curve = 0.025, usecurve = T,
label = paste(n_pass2 %v% "vertex.names", n_pass2 %v% "position", sep = "-"))
coords <- plot(n_pass2, displaylabels = T, mode = "circle",
vertex.cex = z, vertex.col = "position", edge.curve = 0.025, usecurve = T,
label = paste(n_pass2 %v% "vertex.names", n_pass2 %v% "position", sep = "-"))
coords
coords[1,] <- c(-2, -3)
coords[2,] <- c(-2, -4.2)
coords[3,] <- c(-3.5, -4.5)
coords[4,] <- c(-3.6, -3)
coords[5,] <- c(-2.7, -3.2)
plot(n_pass2, displaylabels = T, mode = "circle",
vertex.cex = z, vertex.col = "position", edge.curve = 0.025, usecurve = T,
label = paste(n_pass2 %v% "vertex.names", n_pass2 %v% "position", sep = "-"),
coord = coords)
x <- n_pass2 %e% "passes"
z <- 10*(x - min(x)) / (max(x) - min(x))
plot(n_pass2, displaylabels=T, coord = coords,
usecurve = T, edge.curve = 0.015,
edge.lwd = z)
plot(n_pass2, displaylabels=T, coord = coords,
usecurve = T, edge.curve = 0.015,
edge.lwd = z, edge.label = n_pass2 %e% "FGP")
i_pass2 <- intergraph::asIgraph(n_pass2)
plot(i_pass2, vertex.label = V(i_pass2)$vertex.names,
layout = coords)
x <- E(i_pass2)$passes
z <- 10*(x - min(x)) / (max(x) - min(x))
plot(i_pass2, vertex.label = V(i_pass2)$vertex.names,
layout = coords, edge.width = z)
|
e96b5c7325720e570e4a3a85dc1aefa2dd63ce84
|
f757cf4f30fed7d2cdf6176b351160ce14a9f5f6
|
/inst/Ratfor/gethgl.r
|
f8866194c20cfecea4ae852e33a9cd5d4db412e6
|
[] |
no_license
|
cran/hmm.discnp
|
88942124fca13d158ec445bb3b16b0edb88c3c8e
|
059ed3c6e3ad67418e2ee00405d71b0d7e22a85d
|
refs/heads/master
| 2022-10-03T00:38:43.262652
| 2022-09-26T08:10:06
| 2022-09-26T08:10:06
| 17,696,666
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,578
|
r
|
gethgl.r
|
subroutine gethgl(fy,y,ymiss,tpm,xispd,d1pi,d2pi,kstate,n,
npar,d1p,d2p,m,d1f,d2f,alpha,alphw,a,b,aw,bw,
xlc,ll,grad,hess)
implicit double precision(a-h,o-z)
double precision ll
integer y(n)
integer ymiss(n)
dimension fy(kstate,n)
dimension tpm(kstate,kstate), xispd(kstate)
dimension d1pi(kstate,npar), d2pi(kstate,npar,npar)
dimension d1p(kstate,kstate,npar), d2p(kstate,kstate,npar,npar)
dimension d1f(m,kstate,npar), d2f(m,kstate,npar,npar)
dimension alpha(kstate), alphw(kstate)
dimension a(kstate,npar), b(kstate,npar,npar)
dimension aw(kstate,npar), bw(kstate,npar,npar)
dimension xlc(n), grad(npar), hess(npar,npar)
#
# Set zero.
kt = 1
zero = 0.d0
# Initialize; i.e. do the t = 1 case:
sxlc = zero
do j = 1,kstate {
alpha(j) = xispd(j)*fy(j,1)
sxlc = sxlc + alpha(j)
do k1 = 1,npar {
if(ymiss(1) == 1) {
d1fx1 = 0
} else {
d1fx1 = d1f(y(1),j,k1)
}
a(j,k1) = xispd(j)*d1fx1 + fy(j,1)*d1pi(j,k1)
do k2 = 1,npar {
if(ymiss(1) == 1) {
d1fx2 = 0
} else {
d1fx2 = d1f(y(1),j,k2)
}
if(ymiss(1) == 1) {
d2fx = 0
} else {
d2fx = d2f(y(1),j,k1,k2)
}
b(j,k1,k2) = (xispd(j)*d2fx + d1pi(j,k1)*d1fx2 +
d1pi(j,k2)*d1fx1 +
fy(j,1)*d2pi(j,k1,k2))
}
}
}
xlc(1) = sxlc
do j = 1,kstate {
alpha(j) = alpha(j)/sxlc
}
if(n>1) {
do kt = 2,n {
# Do the b's:
do j = 1,kstate {
do k1 = 1,npar {
if(ymiss(kt) == 1) {
d1fx1 = 0
} else {
d1fx1 = d1f(y(kt),j,k1)
}
do k2 = 1,npar {
if(ymiss(kt) == 1) {
d1fx2 = 0
d2fx = 0
} else {
d1fx2 = d1f(y(kt),j,k2)
d2fx = d2f(y(kt),j,k1,k2)
}
vvv = zero
xxx = zero
yy1 = zero
yy2 = zero
zz1 = zero
zz2 = zero
www = zero
do i = 1,kstate {
vvv = vvv+alpha(i)*d2p(i,j,k1,k2)
xxx = (xxx + a(i,k1)*d1p(i,j,k2) + a(i,k2)*d1p(i,j,k1) +
b(i,k1,k2)*tpm(i,j))
yy1 = yy1 + alpha(i)*d1p(i,j,k2)
yy2 = yy2 + a(i,k2)*tpm(i,j)
zz1 = zz1 + alpha(i)*d1p(i,j,k1)
zz2 = zz2 + a(i,k1)*tpm(i,j)
www = www + alpha(i)*tpm(i,j)
}
vvv = fy(j,kt)*vvv
xxx = fy(j,kt)*xxx/sxlc
yyy = d1fx1*(yy1 + yy2/sxlc)
zzz = d1fx2*(zz1 + zz2/sxlc)
www = d2fx*www
bw(j,k1,k2) = vvv + xxx + yyy + zzz + www
}
}
}
do j = 1,kstate {
do k1 = 1,npar {
do k2 = 1,npar {
b(j,k1,k2) = bw(j,k1,k2)
}
}
}
# Do the a's:
do j = 1,kstate {
do k = 1,npar {
if(ymiss(kt) == 1) {
d1fx = 0
} else {
d1fx = d1f(y(kt),j,k)
}
xxx = zero
yyy = zero
zzz = zero
do i = 1, kstate {
xxx = xxx + alpha(i)*d1p(i,j,k)
yyy = yyy + a(i,k)*tpm(i,j)
zzz = zzz + alpha(i)*tpm(i,j)
}
aw(j,k) = fy(j,kt)*(xxx + yyy/sxlc) + d1fx*zzz
}
}
do j = 1,kstate {
do k = 1,npar {
a(j,k) = aw(j,k)
}
}
# Do the alpha's:
sxlc = zero
do j = 1,kstate {
alphw(j) = zero
do i = 1,kstate {
alphw(j) = alphw(j) + alpha(i)*tpm(i,j)
}
alphw(j) = fy(j,kt)*alphw(j)
sxlc = sxlc + alphw(j)
}
xlc(kt) = sxlc
do j = 1,kstate {
alpha(j) = alphw(j)/sxlc
}
}
}
# Finish off:
# Log likelihood.
ll = zero
do kt = 1,n {
ll = ll + log(xlc(kt))
}
# Gradient.
do j = 1,npar {
xxx = zero
do i = 1,kstate {
xxx = xxx + a(i,j)
}
grad(j) = xxx/sxlc
}
# Hessian.
do j1 = 1,npar {
do j2 = 1,npar {
xxx = zero
yyy = zero
zzz = zero
do i = 1,kstate {
xxx = xxx + b(i,j1,j2)
}
hess(j1,j2) = xxx/sxlc - grad(j1)*grad(j2)
}
}
return
end
|
502ae84c29ea29500b5c1173eaa112c38adcc94b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pryr/examples/partial.Rd.R
|
8ec0552b65cd325f4ac43848cd959c316b51aeb7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 903
|
r
|
partial.Rd.R
|
library(pryr)
### Name: partial
### Title: Partial apply a function, filling in some arguments.
### Aliases: partial
### ** Examples
# Partial is designed to replace the use of anonymous functions for
# filling in function arguments. Instead of:
compact1 <- function(x) Filter(Negate(is.null), x)
# we can write:
compact2 <- partial(Filter, Negate(is.null))
# and the generated source code is very similar to what we made by hand
compact1
compact2
# Note that the evaluation occurs "lazily" so that arguments will be
# repeatedly evaluated
f <- partial(runif, n = rpois(1, 5))
f
f()
f()
# You can override this by saying .lazy = FALSE
f <- partial(runif, n = rpois(1, 5), .lazy = FALSE)
f
f()
f()
# This also means that partial works fine with functions that do
# non-standard evaluation
my_long_variable <- 1:10
plot2 <- partial(plot, my_long_variable)
plot2()
plot2(runif(10), type = "l")
|
41cbbc9bd9138f119e49fe95fca5bfd432fbae93
|
502161d749fed214a0036450f29895a3b84e1d3d
|
/eQTL_mapping/3_Run_eigenMT_using_all_CAS_genotype.R
|
4af3a458e0b4a136df209f3d05d149e17309a299
|
[] |
no_license
|
QinqinHuang/CAS_eQTL
|
a5e1f16775a135cb20a6a4f809a7691443605246
|
519ac9d3c68631e931cf93fd7616d1dbe398afc2
|
refs/heads/master
| 2021-05-21T19:11:12.985786
| 2020-10-26T20:12:54
| 2020-10-26T20:12:54
| 252,765,611
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,172
|
r
|
3_Run_eigenMT_using_all_CAS_genotype.R
|
#----------------------------------------------
# 2018-05-05
# 1. Prepare Genotype/SNP pos file for each
# chromosome (eigenMT input)
# 2. Run eigenMT.
#
# Davis et al. recommended using genotype data
# for all individuals and running eigenMT once
# to get the estimated number of effective tests;
# don't have to run it for each of the condition.
# We use the same set of SNPs that were tested
# in eQTL mapping (MAF ≥10% in 135 individuals).
#
# Note that eigenMT works on each chromosome
# separately, so input files (QTL, GEN, GENPOS)
# should be splitted.
#----------------------------------------------
library(foreach)
library(doMC)
#registerDoMC(cores = 10)
options(stringsAsFactors = F)
# Working directory
setwd("/projects/qinqinhuang/CAS/Analysis/eQTL_mapping/eigenMT")
# Get the genotyp data for SNPs that were tested in eQTL mapping in all 215 CAS individuals.
# The list of SNPs
system("cut -f 2 /projects/qinqinhuang/CAS/Analysis/eQTL_mapping/cas_imp_135ind_filtered_maf10.bim > SNPs_tested_eQTL.txt")
# Extract the list of SNPs from the filtered genotype dataset of all 215 individuals
system("plink1.9 --bfile /projects/qinqinhuang/CAS/Genotype_Data/process_data/Michigan_imputation_server_results/cas_auto_sub_filt-updated_auto_chr_2018_01_11_01_15/filtered_geno/cas_michigan_imp_filtered --extract SNPs_tested_eQTL.txt --allow-no-sex --make-bed --out genotype_in_all_cas_imp_215ind")
system("rm SNPs_tested_eQTL.txt")
# Genotype and SNP position files
# Split into 22 chromosomes
if(!file.exists("input_Geno")) {dir.create("input_Geno")}; setwd("input_Geno")
nothing <- foreach(chr = 1:22) %dopar% {
system(paste0("plink1.9 --bfile ../genotype_in_all_cas_imp_215ind --chr ",chr," --allow-no-sex --make-bed --out TEMP_chr",chr))
# SNP dosage
system(paste0("plink1.9 --bfile TEMP_chr",chr," --recode A-transpose --recode-allele ../../HRC_alt_allele.txt --out TEMP2_chr",chr))
system(paste0("cut -f 2,7- TEMP2_chr",chr,".traw > SNP_chr",chr,".txt"))
# SNP location
system(paste0("awk '{print$2,$1,$4}' TEMP2_chr",chr,".traw > snpsloc_chr",chr,".txt"))
return(NULL)
}
system("rm TEMP*")
#----- run eigenMT -----
# Run eigenMT to estimate the empirical number of independent tests.
setwd("/projects/qinqinhuang/CAS/Analysis/eQTL_mapping/eigenMT")
if(!file.exists("output_eigenMT")) {dir.create("output_eigenMT")}
# Run eigenMT for 22 chromosomes
eigenMTout <- foreach(chr = 1:22, .combine = rbind) %dopar% {
cat(" Running eigenMT for Chr",chr,"...\n")
system(paste0("time python ~/software/eigenMT/eigenMT_QH.py --CHROM ", chr, " --QTL ./input_QTL/chr", chr, "_MatrixEQTL_all_cis_tests_pval.txt --GEN ./input_Geno/SNP_chr", chr, ".txt --GENPOS ./input_Geno/snpsloc_chr",chr,".txt --PHEPOS /projects/qinqinhuang/CAS/Expression_Data/clean_data/Gene_location.txt --OUT output_eigenMT/chr", chr, "_eigenMT_output.txt"))
# Read the output
dd <- read.table(paste0("output_eigenMT/chr",chr,"_eigenMT_output.txt"), header = T)
dd <- dd[which(complete.cases(dd$TESTS)), c("gene","TESTS")]
return(dd)
}
write.table(eigenMTout, file = paste0("eigenMT_output_gene_level.txt"), quote = F, sep = "\t", row.names = F)
# 57min
|
c37d0107323458d210e21a8c35ee085bf583f028
|
4f9e68e37bf9130e891136ca90ce8b8faec53b61
|
/R/explore_avey.R
|
432d8eae816bec68671df621b834e436c295d291
|
[] |
no_license
|
robertamezquita/vitech-yhack16
|
218eb3a07f2d8f374197021fb574bc012ab4c60c
|
2ccacbb9eb910f001ab406f4694f0e91be0f1f47
|
refs/heads/master
| 2020-08-04T01:11:34.135757
| 2016-11-12T22:23:06
| 2016-11-12T22:23:06
| 73,532,760
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,918
|
r
|
explore_avey.R
|
###################################################################################
## Explore Vitech data ##
###################################################################################
options(stringsAsFactors = FALSE)
##############
## Packages ##
##############
library(tidyr)
library(dplyr)
library(ggplot2)
library(scales)
tables <- c("participants", "policy_info", "activities")
###################
## Read in files ##
###################
participants <- read.delim("../data/participants.tsv")
str(participants)
policy_info <- read.delim("../data/policy_info.tsv")
str(policy_info)
activities <- read.delim("../data/activities.tsv")
str(activities)
########################
## Clean up Variables ##
########################
## Convert date strings to date objects
participants <- participants %>%
tbl_df() %>%
mutate(date_added = as.Date(substr(date_added, 1, 10)),
dob = as.Date(substr(dob, 1, 10)))
policy_info <- policy_info %>%
tbl_df() %>%
mutate(policy_start_date = as.Date(substr(policy_start_date, 1, 10)))
activities <- activities %>%
tbl_df() %>%
mutate(activity_date = as.Date(substr(activity_date, 1, 10)))
###################################################################################
## Plot basic relationships over time ##
###################################################################################
## PLot new subscriptions over time
ggplot(data = participants) +
geom_histogram(binwidth = 10, aes(x = date_added)) +
## stat_ecdf(aes(x = date_added)) +
scale_x_date(labels = date_format("%Y-%b"), breaks = date_breaks("1 month")) +
scale_y_log10() +
xlab("Date Added") +
ylab("New Plan Subscriptions") +
theme_bw() +
theme(axis.text.x = element_text(angle=45, hjust = 1, vjust = 1))
## Major questions of interest are whether the campaigns are working
|
0515847a1333b31767334fc83c3e226aa4244d3d
|
f91418dcbd255478e021a6081e60e0416a0a7873
|
/R/test_dbi_driver.R
|
70a7b9ecbec6d959ea8c7bd7065588951faa8d64
|
[
"MIT"
] |
permissive
|
zozlak/useR2015
|
f1cb821a958944f1961258c8d17201f93d7f1133
|
88b8deb14975d1d1965b878084507e6d56122520
|
refs/heads/master
| 2020-04-02T13:47:34.921172
| 2015-07-02T11:35:29
| 2015-07-02T11:35:29
| 38,427,023
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,047
|
r
|
test_dbi_driver.R
|
#' @title tests a given dbi impelementation
#' @description
#' Given a handler to the database tests capabilities of the DBI driver.
#' @param conn connection to the database opend using DBI::dbConnect()
#' @return list describing driver capabilities - see test_...() functions
#' description
#' @import DBI
#' @import testthat
#' @examples
#' \dontrun{
#' system('monetdbd start monetdb')
#' handlers = list(
#' SQLite = dbConnect(RSQLite::SQLite(), ":memory:"),
#' MySQL = dbConnect(RMySQL::MySQL(), dbname = 'myDb'),
#' PostgreSQL = dbConnect(RPostgreSQL::PostgreSQL(), dbname = 'myDb'),
#' MonetDB = dbConnect(MonetDB.R::MonetDB.R(), 'pathToMyMonetDb')
#' )
#' sapply(handlers, test_dbi_driver)
#' }
#' @export
test_dbi_driver = function(conn){
result = list(
dbGetQuery = test_dbGetQuery(conn),
dbReadTable = test_dbReadTable(conn),
dbReadTable_another_schema = test_dbReadTable_another_schema(conn),
dbListTables = test_dbListTables(conn),
dbSendQuery = test_dbSendQuery(conn)
)
return(result)
}
|
08b626b8f02fd9d08b3f7ef16d683eca712b63a9
|
ce3bc493274116150497e73aa7539fef1c07442a
|
/man/slackSend.Rd
|
0e0e5c69fa5625c08f69f3cc8bedac49316c2cf6
|
[] |
no_license
|
laresbernardo/lares
|
6c67ff84a60efd53be98d05784a697357bd66626
|
8883d6ef3c3f41d092599ffbdd4c9c352a9becef
|
refs/heads/main
| 2023-08-10T06:26:45.114342
| 2023-07-27T23:47:30
| 2023-07-27T23:48:57
| 141,465,288
| 235
| 61
| null | 2023-07-27T15:58:31
| 2018-07-18T17:04:39
|
R
|
UTF-8
|
R
| false
| true
| 1,639
|
rd
|
slackSend.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slack.R
\name{slackSend}
\alias{slackSend}
\title{Send Slack Message (Webhook)}
\usage{
slackSend(text, title = "", pretext = "", hook = NA, creds = NA)
}
\arguments{
\item{text, title, pretext}{Character. Content on you Slack message.}
\item{hook}{Character. Web hook URL. Ths value will be overwritten by
creds if correctly used.}
\item{creds}{Character. Credential's dir (see \code{get_creds()}). Set
hook URL into the "slack" list in your YML file. Will use first value.}
}
\value{
Invisible POST response
}
\description{
This function send a Slack message using its Webhooks.
}
\details{
For more help, you can follow the
\href{https://api.slack.com/messaging/webhooks#posting_with_webhooks}{Sending messages using Incoming Webhooks}
original documentarion.
}
\examples{
\dontrun{
slackSend(text = "This is a message", title = "TEST", pretext = Sys.info()["user"])
}
}
\seealso{
Other API:
\code{\link{bring_api}()},
\code{\link{fb_accounts}()},
\code{\link{fb_ads}()},
\code{\link{fb_creatives}()},
\code{\link{fb_insights}()},
\code{\link{fb_process}()},
\code{\link{fb_report_check}()},
\code{\link{fb_rf}()},
\code{\link{fb_token}()},
\code{\link{gpt_ask}()},
\code{\link{li_auth}()},
\code{\link{li_profile}()},
\code{\link{queryGA}()}
Other Credentials:
\code{\link{db_download}()},
\code{\link{db_upload}()},
\code{\link{get_credentials}()},
\code{\link{get_tweets}()},
\code{\link{mail_send}()},
\code{\link{queryDB}()},
\code{\link{queryGA}()},
\code{\link{stocks_file}()},
\code{\link{stocks_report}()}
}
\concept{API}
\concept{Credentials}
|
14a40f998d18b2b7b01a7a105db6f4a8e97ace7d
|
cab7c0d9dc98d7d9495a67cb6f94cc3d98eb87a7
|
/sampling.R
|
86024825439bc26d5fe22d5b2de430a9d5b81dea
|
[] |
no_license
|
wikimedia-research/SEO-Experiment-SameAsProp
|
fe478bc1f2b1062ea166a2e68ccb528b12de5c40
|
c760b5e685cde8b0eaa3a0be3567d9e1c0f9f366
|
refs/heads/master
| 2020-04-10T01:05:15.879567
| 2019-03-06T17:10:58
| 2019-03-06T17:10:58
| 160,705,145
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,856
|
r
|
sampling.R
|
library(glue)
library(magrittr)
snapshot <- "2019-01"
# Excluded from test:
# Indonesian: idwiki
# Portuguese: ptwiki
# Punjabi: pawiki, pnbwiki
# Dutch: nlwiki, nds_nlwiki
# Korean: kowiki
# Bhojpuri: bhwiki
# Cherokee: chrwiki
# Kazakh: kkwiki
# Catalan: cawiki
# French: frwiki
# Yoruba: yowiki
# Kalmyk: xalwiki
excluded_codes <- c(
"id", "pt", "pa", "pnb", "nl",
"nds_nl", "ko", "bh", "chr",
"kk", "ca", "fr", "yo", "xal"
)
language_codes <- readr::read_csv("meta.csv") %>%
dplyr::filter(n_articles >= 100) %>%
dplyr::pull(wiki_id) %>%
gsub(".wikipedia", "", .) %>%
setdiff(excluded_codes)
wikis <- tibble::tibble(
wiki_id = paste0(language_codes, ".wikipedia"),
wiki_db = paste0(gsub("-", "_", language_codes, fixed = TRUE), "wiki")
)
recreate_table <- "
USE bearloga;
DROP TABLE IF EXISTS sameas_pages;
CREATE TABLE sameas_pages (
wiki_id STRING COMMENT 'e.g. en.wikipedia',
page_id BIGINT COMMENT 'page ID',
page_random FLOAT COMMENT 'random number 0-1',
test_group STRING COMMENT 'treatment or control'
);
"
message("Recreating 'bearloga.sameas_pages' table")
system(glue('hive -e "{recreate_table}"'))
# SET hive.exec.dynamic.partition.mode=nonstrict;
query <- "
INSERT INTO bearloga.sameas_pages
SELECT
'${wiki_id}' AS wiki_id,
page_id,
page_random,
IF(page_random >= 0.5, 'treatment', 'control') AS test_group
FROM wmf_raw.mediawiki_page
WHERE snapshot = '${snapshot}'
AND wiki_db = '${wiki_db}'
AND NOT page_is_redirect
AND page_namespace = 0
"
load_pages <- function(wiki_id, wiki_db) {
message(glue("Loading pages from {wiki_db} as {wiki_id}"))
query <- glue(query, .open = "${")
system(glue('nice ionice hive -e "{query}"'))
return(invisible(NULL))
}
# iterate over the (wiki_id, wiki_db) pairs to populate the sameas_pages table:
purrr::pwalk(wikis, load_pages)
|
2f2617019674c35d3b0b2cb209dbbe3906ea0f69
|
5241969456b343da0cafa603f6b373c3bc0863eb
|
/R/data_CCspec.R
|
4511d78e2c063fd77b6d4fd6b2c9e005edfb66dd
|
[] |
no_license
|
cran/IDmeasurer
|
880fc937e1eda6c7ca891eeaf3e516d3a5675032
|
c89c6d520a594207d3e099f9edd66287837f4560
|
refs/heads/master
| 2020-05-21T00:44:51.684846
| 2019-05-09T14:10:10
| 2019-05-09T14:10:10
| 185,838,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,357
|
r
|
data_CCspec.R
|
#' Corncrake, \emph{Crex crex} - spectrum properties
#'
#'\itemize{
#' \item \strong{Species:} Corncrake, \emph{Crex crex}
#' \item \strong{Number of individuals:} 33
#' \item \strong{Number of calls per individual:} 10
#' \item \strong{Number of acoustic variables:} 7
#' \item \strong{Individual identity:} HS=5.68
#' \item \strong{Reference:} Budka, M., & Osiejuk, T. S. (2013). Formant
#' Frequencies are Acoustic Cues to Caller Discrimination and are a Weak
#' Indicator of the Body Size of Corncrake Males. Ethology, 119, 960-969.
#' doi:10.1111/eth.12141
#'}
#' Corncrake calls were recorded at three sites in Poland and one in the Czech
#' Republic Recordings were made during the corncrake breeding season, from 8 to
#' 30 July, in 2011 and in 2012. Males were recorded when calling spontaneously,
#' in favourable conditions, at night (from 22.00 to 03.30, local time) from a
#' distance of ca. 5-10 m. The original dataset comprised 104 males with 10
#' calls measured from each male.\cr\cr
#' Seven variables were selected to measure duration of the first syllable of
#' the call and its basic spectral parameters of each first syllable of the call
#' like the peak frequency, distribution of frequency amplitudes within
#' spectrum, and range of the frequencies (minimum and maximum). Additionally,
#' the duration of the call was measured. Variables were extracted in SASLab Pro
#' by Avisoft.
#'
#' @format A data frame with 330 rows and 8 variables:
#'
#' \describe{
#' \item{id}{factor, identity code of an individual emitting the call}
#' \item{dur}{duration of the call, in seconds}
#' \item{df}{frequency of maximum amplitude within the spectrum - peak frequency, in Hertz}
#' \item{minf, maxf}{minimum and maximum fequency at -25dB relative to the call peak amplitude, in Hertz}
#' \item{q25, q50, q75}{frequencies at the three quartiles of amplitude
#' distribution; frequencies below which lie 25, 50 and 75 percent of the energy of
#' the call, respectively, in Hertz}
#' }
#'
#' @source \href{https://onlinelibrary.wiley.com/doi/abs/10.1111/eth.12141}{Budka, M., & Osiejuk, T. S. (2013). Formant Frequencies are Acoustic Cues to Caller Discrimination and are a Weak Indicator of the Body Size of Corncrake Males. Ethology, 119, 960-969. doi:10.1111/eth.12141}
"CCspec"
|
b3a13553201bc03596c14da59cb4dd31f04385a8
|
f49961347a44b3137a465182b70f0158885fbca7
|
/man/qapi_list_surveys.Rd
|
e576af53525479a7e901e720048d33bc1a117b55
|
[
"BSD-3-Clause"
] |
permissive
|
jlpalomino/qtoolkit
|
4ef5dce9f16336253431cb74b4ecc75179e7e124
|
139777e39a97dae23155e73d2b5331080d829c62
|
refs/heads/master
| 2020-04-17T15:13:36.535071
| 2019-02-20T17:01:49
| 2019-02-20T17:01:49
| 166,690,221
| 0
| 0
|
MIT
| 2019-01-20T17:40:01
| 2019-01-20T17:40:01
| null |
UTF-8
|
R
| false
| true
| 278
|
rd
|
qapi_list_surveys.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api.R
\name{qapi_list_surveys}
\alias{qapi_list_surveys}
\title{qapi_list_surveys}
\usage{
qapi_list_surveys()
}
\value{
DF of surveys
}
\description{
QAPI call to list all surveys that a user owns
}
|
800cb8891e348eae47598fad478dfcf290a3c1ae
|
45722b21b67a4c4b734043f266b89c5ae9f30e59
|
/plot1.R
|
c8bdbee54d42ae89eca7f0db24252828c5d4b602
|
[] |
no_license
|
amyr206/ExData_Plotting1
|
d3e79d356b80e84fbcb880f04841819ef6cbb6a7
|
dcc7ca63abda2d098b7bbcb4df7393a34d76ddda
|
refs/heads/master
| 2021-01-18T07:49:20.336074
| 2015-02-08T02:30:35
| 2015-02-08T02:30:35
| 30,278,172
| 0
| 0
| null | 2015-02-04T02:57:56
| 2015-02-04T02:57:56
| null |
UTF-8
|
R
| false
| false
| 2,081
|
r
|
plot1.R
|
# Copyright Amy Richards 2015
# PURPOSE:
# --------
# This script fulfills #1 of 4 deliverables for Course Project 1 for the Johns Hopkins
# Coursera Data Science Specialization class, Exploratory Data Analysis.
# Project description:
# https://class.coursera.org/exdata-011/human_grading/view/courses/973505/assessments/3/submissions
# WHAT THIS SCRIPT DOES:
# ----------------------
# Electric power consumption data downloaded from the UCI Machine Learning Repository
# (https://archive.ics.uci.edu/ml/datasets/Individual+household+electric+power+consumption)
# is read into R, and subsetted to include only data from Feb 1-2, 2007.
# From this data, a histogram showing distribution of global active power in
# kilowatts is plotted.
# OUTPUT:
# -------
# PNG of histogram called plot1.png
# REQUIRED FILES:
# ---------------
# This script assumes that the electric power consumption datafile has been
# downloaded and unzipped into the user's working directory, along with this
# script.
# REQUIRED LIBRARIES:
# -------------------
# Only base R is used, no additional libraries are required.
# Read in the data
rawdata <- read.table("household_power_consumption.txt", header = TRUE, sep =";",
nrows = 2075259, na.strings = "?", as.is = TRUE)
# Subset to include only data from 2007-02-01 and 2007-02-02, and only the
# Global_active_power column
subsetdata <- subset(rawdata, Date == "1/2/2007" | Date == "2/2/2007", select = Global_active_power)
# Rename the Global_active_power column so it's easier to reference in code
names(subsetdata) <- "globalactivepower"
# Set up the PNG output for the histogram we're about to plot
png(filename = "plot1.png",
width = 480,
height = 480,
units = "px",
pointsize = 12)
# plot the histogram, setting the bar colors, the x- and y-axis titles, and
# the plot title to match the example
hist(subsetdata$globalactivepower,
col = "red",
xlab = "Global Active Power (kilowatts)",
ylab = "Frequency",
main = "Global Active Power")
# Close off the graphic device
dev.off()
|
6ad302199c1256709c35e7acbfa77ba8ec3c6207
|
ae729fb624fe40e003e0440bf42d9c4fdb03127d
|
/1. R Basics/DataTypes.R
|
32b42e786aa15b9d1bf343be19f31e34e21b280b
|
[] |
no_license
|
GUY625-del/Data-Science-and-Machine-Learning-with-R
|
bd3afc8bd3e0686685fda075a9f7d79fc65e04b3
|
08cd4f3b19974b8367faa515acad4594267d802b
|
refs/heads/master
| 2023-03-08T04:46:53.950737
| 2021-02-25T01:37:46
| 2021-02-25T01:37:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 207
|
r
|
DataTypes.R
|
# Data Types
# Numeric (float)
a <- 2.2
# Logical (boolean)
b <- TRUE
c <- FALSE
d <- T
e <- F
# Characters (strings)
f <- 'hello'
g <- "hello"
# Data Type
print(class(a))
print(class(b))
print(class(f))
|
33727cb36b86b9c2468aef64cd7a3ca6cc3659e8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/hyper2/examples/hyper2-package.Rd.R
|
6ca37fd62d6c83fa3457a754ad9ea5d035bc77bc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 388
|
r
|
hyper2-package.Rd.R
|
library(hyper2)
### Name: hyper2-package
### Title: A generalization of the Dirichlet distribution
### Aliases: hyper2-package hyperdirichlet2
### Keywords: package
### ** Examples
data(chess)
maxp(chess) # MLE for players' strengths
H <- hyper2(pnames=letters[1:5])
H <- H + order_likelihood(rrank(100,5:1)) # probs = 5/15,4/15,...,1/15
maxp(H) # should be close to (5:1)/15
|
0c52e76b9105ed554f305a8210359f9130ab16ad
|
bef5e21ee3a7fb88e714d22e3e21b3a2162335a4
|
/scripts/additional_analysis/plots_silencing_classes.R
|
7e6b0e14777d56e4ed725009520bd2f2eac7023d
|
[] |
no_license
|
lisa-sousa/xci_epigenetic_control
|
6afbe40a75fca87105a285eadc30e2c4560940b3
|
99b06220923e31c816b16bfd731c6b798939e326
|
refs/heads/master
| 2021-07-08T11:45:30.531661
| 2020-01-28T15:31:29
| 2020-01-28T15:31:29
| 222,667,602
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,970
|
r
|
plots_silencing_classes.R
|
###################################################################################
#libraries
###################################################################################
library(Cairo)
library(ggplot2)
library(cowplot)
library(gridExtra)
library(here)
###################################################################################
#load data
###################################################################################
table_halftimes = read.table(here('data/silencing_halftimes/fitted_data','halftimes_pro_seq_mm9_reannotated_with_rr.bed'))
halftimes = table_halftimes$V5
early = halftimes[halftimes < 0.5]
silenced = halftimes[halftimes < 0.9]
late = halftimes[halftimes > 0.9 & halftimes < 1.3]
not_silenced = halftimes[halftimes > 1.6]
table_pro_seq = data.frame(halftime = early, silencing_class = rep("early",length(early)), model = rep("silencing dynamics model", length(early)))
table_pro_seq = rbind(table_pro_seq, data.frame(halftime = late, silencing_class = rep("late",length(late)), model = rep("silencing dynamics model", length(late))))
table_pro_seq = rbind(table_pro_seq, data.frame(halftime = silenced, silencing_class = rep("silenced",length(silenced)), model = rep("XCI/escape model", length(silenced))))
table_pro_seq = rbind(table_pro_seq, data.frame(halftime = not_silenced, silencing_class = rep("not silenced",length(not_silenced)), model = rep("XCI/escape model", length(not_silenced))))
###################################################################################
#boxplot of different silncing classes
###################################################################################
cairo_pdf(here('plots/additional_analysis','plots_silencing_classes.pdf'),width = 2,height = 3, onefile = TRUE)
ggplot = ggplot(table_pro_seq, aes(x=silencing_class,y=halftime, fill=model)) +
geom_boxplot(colour = "#4d4d4d",alpha = 0.7,outlier.size=-1,lwd=0.4) +
ggtitle("Silencing classes based \non PRO-seq") +
scale_fill_manual("Model",values=c("#2c5aa0", "#a02c2c")) +
scale_x_discrete(name = "silencing class",labels=c("early","late","silenced","not silenced")) +
scale_y_continuous(breaks=c(0,1,2,3,3.5), label=c("0","1","2","3",">3.5"), name='half-time [days]') +
theme_minimal(base_family = "Source Sans Pro") +
theme(panel.grid.minor = element_blank(), panel.grid.major.x = element_blank(),axis.text.x = element_text(size=8, angle = 45, hjust=1, margin = margin(t=0,b=0)), axis.text.y = element_text(size=8),
axis.title=element_text(size=8, margin = margin(t=0)),plot.title = element_text(size=8), legend.text = element_text(size=8), legend.title = element_text(size=8), legend.position = "bottom") +
guides(fill=guide_legend(nrow=2), col=guide_legend(nrow=2))
legend = get_legend(ggplot)
ggplot = ggplot + theme(legend.position="none")
grid.arrange(ggplot,legend,ncol=1,heights=c(2.5,0.5))
dev.off()
###################################################################################
#plots paper 2e-f)
###################################################################################
####load pro-seq data
table_halftimes = read.table(here('data/silencing_halftimes/fitted_data','halftimes_pro_seq_mm9_reannotated_with_rr.bed'))
halftimes = table_halftimes$V5
early = halftimes[halftimes < 0.5]
silenced = halftimes[halftimes < 0.9]
late = halftimes[halftimes > 0.9 & halftimes < 1.3]
not_silenced = halftimes[halftimes > 1.6]
table_pro_seq = data.frame(halftime = early, silencing_class = rep("early",length(early)), model = rep("silencing dynamics model", length(early)))
table_pro_seq = rbind(table_pro_seq, data.frame(halftime = late, silencing_class = rep("late",length(late)), model = rep("silencing dynamics model", length(late))))
table_pro_seq = rbind(table_pro_seq, data.frame(halftime = silenced, silencing_class = rep("silenced",length(silenced)), model = rep("XCI/escape model", length(silenced))))
table_pro_seq = rbind(table_pro_seq, data.frame(halftime = not_silenced, silencing_class = rep("not silenced",length(not_silenced)), model = rep("XCI/escape model", length(not_silenced))))
####load marks data
load(here('data/modelling/feature_matrix','promoter_pro_seq_genes_epigenetic.RData'))
table_halftimes = data.frame(gene = rownames(data_set), halftime = halftime)
table_marks_paper = read.table(file = here('data/annotation_files/silencing_classes','silencing_classes_marks.txt'),sep='\t',header = F)
colnames(table_marks_paper) = c('gene','silencing_class')
table_marks = merge(table_halftimes,table_marks_paper,by='gene')[,2:3]
levels(table_marks$silencing_class) = c("early","escapee","interm.","late")
table_marks$silencing_class = factor(table_marks$silencing_class,levels = c('early','interm.','late','escapee'),ordered = TRUE)
table_marks$model = "none"
table_marks$source = "Differentiating mESCs"
####load borenzstein
load(here('data/modelling/feature_matrix','promoter_pro_seq_genes_epigenetic.RData'))
table_halftimes = data.frame(gene = rownames(data_set), halftime = halftime)
table_NSMB_paper = read.table(file = here('data/annotation_files/silencing_classes','silencing_classes_borensztein.txt'))
colnames(table_NSMB_paper) = c('gene','silencing_class')
table_boren = merge(table_halftimes,table_NSMB_paper,by='gene')
table_boren = table_boren[table_boren$silencing_class != "Bias",2:3]
levels(table_boren$silencing_class) = c("Bias","early","escapee","interm.","late")
table_boren$silencing_class = factor(table_boren$silencing_class,levels = c('early','interm.','late','escapee'),ordered = TRUE)
table_boren$model = "none"
table_boren$source = "Pre-implantation embryos"
####boxplots
table_pro_seq$source = "PRO-seq in undiff. mESC"
table = rbind(table_marks, table_boren, table_pro_seq)
table$source = factor(table$source, levels = c("Differentiating mESCs","Pre-implantation embryos","PRO-seq in undiff. mESC"))
cairo_pdf(here('plots/additional_analysis','paper_figures_silencing_classes.pdf'),width = 4,height = 3.5, onefile = TRUE)
ggplot(table, aes(x=silencing_class,y=halftime, fill=model)) +
geom_boxplot(colour = "#4d4d4d",alpha = 0.7,outlier.size=0.1,lwd=0.4) +
facet_grid(. ~ source, labeller = label_wrap_gen(width = 20, multi_line = TRUE),scales = "free_x") +
scale_fill_manual("Model",values=c("#2c5aa0","white", "#a02c2c")) +
scale_x_discrete(name = "silencing class") +
scale_y_continuous(breaks=c(0,1,2,3,3.5), label=c("0","1","2","3",">3.5"), name='half-time [days]') +
theme_minimal(base_family = "Source Sans Pro") +
theme(panel.grid.minor = element_blank(), panel.grid.major.x = element_blank(),axis.text.x = element_text(size=8, angle = 45, hjust=1, margin = margin(t=0,b=0)), axis.text.y = element_text(size=8),
axis.title=element_text(size=8, margin = margin(t=0)),strip.text = element_text(size=8), legend.text = element_text(size=8), legend.title = element_text(size=8), legend.position = "bottom") +
guides(fill=guide_legend(nrow=2), col=guide_legend(nrow=2))
dev.off()
|
1c0f6091362dbe96df7d4c3bde602dbc4409d3ba
|
55700238a9edb9ffcc94ee9eca9e10f4c151457a
|
/assignment/Homework04/HW04_63130500106/HW04_63130500106.R
|
fa985d0eff39147c193870efcae76a6d0e0b3f61
|
[
"MIT"
] |
permissive
|
kannika2545/027-Quickest-Electric-Cars
|
d0ddf30a0e39cb6b15497fa80d9bcd81b2833643
|
35b42294de49b039f9d29689f685700f2c468bce
|
refs/heads/main
| 2023-08-29T04:35:39.200572
| 2021-10-27T05:08:10
| 2021-10-27T05:08:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,309
|
r
|
HW04_63130500106.R
|
# Import library
library(readr) #read .csv file
library(dplyr) #for use %>% function
library(DescTools) #use some function find Year from Date column
library(forcats)
library(stringr) #rename column
library(ggplot2) #use plot graph
library(scales) #use find percent
#Import dataset
Orders <- read_csv("https://raw.githubusercontent.com/sit-2021-int214/027-Quickest-Electric-Cars/main/assignment/Homework04/HW04_63130500106/train.csv")
#Explore dataset
View(Orders)
glimpse(Orders)
#Data Cleanning
#Changing the types of values
Orders$`Order Date`<-as.Date(Orders$`Order Date`,format = "%d/%m/%Y")
Orders$`Ship Date`<-as.Date(Orders$`Ship Date`,format = "%d/%m/%Y")
Orders$`Ship Mode`<-as.factor(Orders$`Ship Mode`)
Orders$Segment<-as.factor(Orders$Segment)
Orders$Country<-as.factor(Orders$Country)
Orders$City<-as.factor(Orders$City)
Orders$State<-as.factor(Orders$State)
Orders$Region<-as.factor(Orders$Region)
Orders$Category<-as.factor(Orders$Category)
Orders$`Sub-Category`<-as.factor(Orders$`Sub-Category`)
#Exploratory Data Analysis
table(Orders$`Ship Mode`)
table(Orders$Segment)
table(Orders$Country)
table(Orders$City)
table(Orders$State)
table(Orders$Region)
table(Orders$Category)
table(Orders$`Sub-Category`)
#*****PART2***** Safe Learning
#*dplyr package
#group_by : จัดกลุ่มข้อมูล
#group_keys : ดูชื่อของแต่ละกลุ่ม
#tally : นับจำนวนข้อมูลของแต่ละกลุ่ม
Orders %>% group_by(Region) %>% group_keys()
Orders %>% group_by(Region) %>% tally(sort = TRUE)
#*forcats package
#fct_infreq : ใช้การจัดลำดับข้อมูลตามความถี่
Orders %>%
mutate(state = fct_infreq(State)) %>%
count(state)
#*ggplot2 package
#theme_dark : ปรับพื้นหลังกราฟเป็นสีเข้ม
#coord_flip : ใช้สลับแกน x กับแกน y
Orders %>%
ggplot(aes(x = `Sub-Category`)) +
geom_bar(fill="blue") +
theme_dark()+
coord_flip()
#theme_void : เอาพื้นหลังกราฟออก
#coord_polar : ทำเป็นกราฟวงกลม
#geom_text : เพิ่มข้อมูลบนรูปกราฟ
totalPrice_year <- Orders %>%
mutate(year = Year(Orders$`Order Date`)) %>%
group_by(year) %>% summarise(Sum_price = sum(Sales)) %>% arrange(year)
totalPrice_year %>%
ggplot(aes(x=year,y=Sum_price))+
geom_bar(stat = "identity") +
theme_void()+
coord_polar()+
geom_text(aes(label = Sum_price), position = position_identity())
#****PART3*****
#0.เช็คค่าNA
summary(is.na(Orders))
#1.ประเทศใดมีการสั่งซื้อมากที่สุดในชุดข้อมูลนี้
Orders$Country<-as.factor(Orders$Country)
summary(Orders$Country)
#2.เลือกดูข้อมูลที่เกี่ยวกับสินค้าทั้งหมดที่เคยถูกสั่งซื้อในชุดข้อมูลนี้ พร้อมตัดข้อมูลที่ซ้ำกันออก
Orders %>% select(`Product ID`,Category,`Sub-Category`,`Product Name`) %>% distinct()
#3.จัดอันดับยอดรวมราคาสั่งซื้อของแต่ละภูมิภาค ว่าภูมิภาคใดมียอดรวมราคาสั่งซื้อมากที่สุด(เรียงข้อมูลจากมากไปน้อย)
Orders %>% group_by(Region) %>% select(Region,Sales) %>% summarise(Sum_price = sum(Sales)) %>% arrange(desc(Sum_price))
#4.ลูกค้าคนใดมีการสั่งซื้อสินค้าแบบ First Class บ่อยที่สุด
Orders %>% filter(`Ship Mode`=="First Class") %>% group_by(`Customer Name`) %>% tally(sort = TRUE) %>% head(1)
#5.จัดอันดับหมวดหมู่สินค้าย่อยที่มียอดการสั่งซื้อบ่อยครั้งมากที่สุด 10 อันดับ
Orders %>% select(Category,`Sub-Category`) %>%
group_by(`Sub-Category`,Category) %>% tally(sort = TRUE) %>% rename(count=n) %>% head(5)
#6.จงหายอดขายสินค้าของแต่ละปี
Orders %>%
mutate(year = Year(Orders$`Order Date`)) %>%
group_by(year) %>% summarise(Sum_price = sum(Sales)) %>% arrange(year)
#****PART4*****
#1.กราฟแสดงสัดส่วนของประเภทลูกค้าที่เคยสั่งซื้อสินค้าในช่วง 4 ปีที่ผ่านมา
group_segment <- data.frame(table(Orders$Segment))
group_segment <- group_segment %>% rename("segment"=Var1,"count"=Freq)
group_segment %>%
ggplot(aes(x="",y=count,fill=segment)) +
geom_bar(stat="identity", width=1, color="white") +
coord_polar("y", start=0)+
theme_void() +
geom_text(aes(label = percent(count/sum(count))),
position = position_stack(vjust = 0.5))
#2.กราฟแสดงความถี่ในการสั่งซื้อของของลูกค้า ในแต่ละช่วงราคา
SalePrice <- Orders %>% select(Sales)
col1<-table(cut(SalePrice$Sales,breaks=seq(from=0.0,to=10000,by=100)))
col2<-data.frame(col1)
col2<-col2 %>% rename("Range"=Var1)
col2 %>% filter(Freq > 50) %>%
ggplot(aes(x=Range,y=Freq))+
geom_bar(fill="#add8e6",stat = "identity")+
coord_flip()+
geom_text(aes(label = Freq), position = position_identity())
#3.กราฟแสดงยอดรวมราคาสินค้าที่สั่งซื้อในแต่ละปี
totalPrice_year <- Orders %>%
mutate(year = Year(Orders$`Order Date`)) %>%
group_by(year) %>% summarise(Sum_price = sum(Sales)) %>% arrange(year)
totalPrice_year %>%
ggplot(aes(x=year,y=Sum_price))+
geom_bar(fill="#228B22",stat = "identity") +
geom_text(aes(label = Sum_price), position = position_identity()) +
coord_flip()+
theme_light()+
ggtitle("Total price each year of SaleStore")+
xlab("Years") + ylab("Total price")
|
d445d334c11c30cb3503c7b924050d3c70585fa1
|
2f15b2dc16de0471e7bee43f6739b6ad8522c81d
|
/man/replace_dimension.Rd
|
12668812cc67e506e106099326a3eb5abacca2c9
|
[
"MIT"
] |
permissive
|
billster45/starschemar
|
45566be916c95778727a3add3239143d52796aa9
|
5f7e0201494a36f4833f320e4b9535ad02b9bdc1
|
refs/heads/master
| 2022-12-20T13:45:06.773852
| 2020-09-26T03:44:12
| 2020-09-26T03:44:12
| 298,796,838
| 1
| 0
|
NOASSERTION
| 2020-09-26T11:10:30
| 2020-09-26T11:10:30
| null |
UTF-8
|
R
| false
| true
| 697
|
rd
|
replace_dimension.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/star_schema_replace_dimension.R
\name{replace_dimension}
\alias{replace_dimension}
\alias{replace_dimension.star_schema}
\title{Replace a star schema dimension}
\usage{
replace_dimension(st, name, dimension)
\method{replace_dimension}{star_schema}(st, name, dimension)
}
\arguments{
\item{st}{A \code{star_schema} object.}
\item{name}{A string, name of the dimension.}
\item{dimension}{A \code{dimension_table} object.}
}
\value{
A \code{star_schema} object.
}
\description{
Replace dimension with another that contains all the instances of the first
and, possibly, some more, in a star schema.
}
\keyword{internal}
|
82be792a4181f7b6c7b6fbfcee5cdc10214f7108
|
c7a0d98d6246d238811f9ad271109748e0206d0c
|
/R/n3_freq.r
|
edb15a9a5ca7e5f8a2ddddc8b1b80a57be5b0776
|
[
"MIT"
] |
permissive
|
HVoltBb/kodonz
|
68e42eaf260ccace9bf2b5d2aecb021d0142b604
|
5fd777eca9f07a983c485be76de981a52efa42f5
|
refs/heads/master
| 2020-04-09T16:01:08.607738
| 2020-01-09T18:35:08
| 2020-01-09T18:35:08
| 160,441,942
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 786
|
r
|
n3_freq.r
|
#' Nucleotide at the third position of the codon
#'
#' Calculates the nucleotide frequency at the third position of the codon
#' @param x a list of KZsqns objects.
#' @return a matrix of nucleotide composition at the third position
#' @export
n3_freq <- function(x){
if(!is.list(x)){
cat("Just one perhap very long sequence?\n")
x = list(x)
}
ans = matrix(0, length(x), 4)
for(i in 1:length(x)){
if(class(x[[i]])!='KZsqns') warning("KZsqns objects are expected")
freq_table = table(c('A', 'C', 'G', 'T', strsplit(paste0(x[[i]], collapse = ''),'')[[1]][c(F, F, T)]))
ans[i,] = (freq_table-rep(1,4))/length(x[[i]])
}
colnames(ans) <- c('A3', 'C3', 'G3', 'T3')
rownames(ans) <- paste0('s_', 1:length(x), sep='')
return(ans)
}
|
0fb1005c7732e93b90bad2e42f14b7882b90139a
|
cf05cffc62ce53e1dd12d1e1ed71a107c0e1f63b
|
/R/methods-plot.R
|
e7595b29b475a03f4da9ae28628624d22a6b82b2
|
[] |
no_license
|
cran/UncerIn2
|
418fc36b97be0a7b285a28e9d153b1a16c2b48d3
|
1d87b9b175f8f3bb9c4669ef30c9be8cd833bd99
|
refs/heads/master
| 2021-01-10T13:12:18.757019
| 2015-11-24T18:37:19
| 2015-11-24T18:37:19
| 48,090,721
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,877
|
r
|
methods-plot.R
|
## This file is part of the UncertaintyInterpolation 2.0 package.
##
## Copyright 2015 Tomas Burian
#' @title
#' Plotting S4 class UncertainInterpolation
#'
#' @description
#' This function provides the plotting of S4 object class \code{UncertainInterpolation}.
#'
#' @param object Input data type of S4 object class UncertainInterpolation.
#' @param attr1 First plotting atrribute.
#' @param attr2 Second plotting atrribute.
#' @param attr3 Third plotting atrribute.
#' @param cuts Number of cuts.
#' @param pretty Logical value \code{TRUE/FALSE.}(choose colour breaks at pretty numbers?)
#'
#' @usage
#' \S4method{Plot}{UncertainInterpolation}(object, attr1, attr2, attr3, cuts, pretty)
#'
#' \S4method{Plot}{UncertainInterpolation}(object, attr1 = "uncertaintyLower", attr2 = "modalValue",
#' attr3 = "uncertaintyUpper", cuts = 10, pretty=TRUE)
#'
#' @seealso \code{\link[UncerIn2]{UncertainInterpolation-class}}, \code{\link[UncerIn2]{uncertaintyInterpolation2-package}}
#'
#' @name Plot
#' @docType methods
#' @rdname Plot
#' @aliases Plot,UncertainInterpolation-method
#'
#' @exportMethod Plot
setGeneric("Plot",
function(object, ...)
standardGeneric("Plot")
)
setMethod("Plot",
signature(object = "UncertainInterpolation"),
definition = function(object, attr1 = "uncertaintyLower", attr2 = "modalValue", attr3 = "uncertaintyUpper"
, cuts = 10, pretty=TRUE)
{
a = as.UncertainPoints(object)
a = as.dataframe(a)
gridded(a)=~x+y
spplot(a, c(attr1, attr2, attr3), names.attr= c(attr1, attr2, attr3),
colorkey=list(space="bottom"), layout=c(3,1), cuts = cuts, pretty=pretty)
}
)
|
df325ff89dc1f69abfbc10014579feb38523d67d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/easyformatr/examples/easy_format.Rd.R
|
8423e96ec1c389cbb11f0bbe4af9bf83abd58049
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 336
|
r
|
easy_format.Rd.R
|
library(easyformatr)
### Name: easy_format
### Title: Easily build format strings
### Aliases: easy_format
### ** Examples
easy_format(year, month, day, integer, octal, double)
easy_format(decimal(second) )
easy_format(before_decimal(double, 3) )
easy_format(month,
roman(list(day,
minute) ) )
|
9e4b562043992c18b6cdfc409fe7e4bcbc3f65c7
|
6c6334d3d716da34aae8079f7f673c2324ddf480
|
/tests/testthat/test-function-get_elements_by_type.R
|
a5fa1e60effed351c462904fbab6c1a75c091037
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.code
|
94f80f51b2977cd0c0fda094f3c7796e1cea95cf
|
bc81324403e3881124fa2230c023807eba26e32d
|
refs/heads/master
| 2023-08-17T07:40:18.766253
| 2023-07-15T05:50:50
| 2023-07-15T05:50:50
| 140,209,624
| 0
| 0
|
MIT
| 2023-08-06T22:33:32
| 2018-07-08T23:23:47
|
R
|
UTF-8
|
R
| false
| false
| 305
|
r
|
test-function-get_elements_by_type.R
|
test_that("get_elements_by_type() works", {
f <- function(...) kwb.code:::get_elements_by_type(..., dbg = FALSE)
expect_error(f())
x <- parse(text = "square <- function(x) x * x")
result <- f(x)
expect_type(result, "list")
expect_true("language|call|<-|3|" %in% names(result))
})
|
64ec6f4d073d5f95025a3295b5fe859a581cb10e
|
c306b1271e76b4e72520afcbf5652dc4c841c32b
|
/man/get_my_stancode.Rd
|
ef3c436b5685173f2e191b1c0d506bbd63500f10
|
[] |
no_license
|
EoinTravers/mystanmodels
|
0e91f17d620f01e62e4efcba9f91004b76569477
|
1cb8a9a3fe734726563960d161704030e728af09
|
refs/heads/master
| 2022-12-28T04:04:54.010410
| 2020-10-09T11:23:14
| 2020-10-09T11:23:14
| 302,617,809
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 278
|
rd
|
get_my_stancode.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{get_my_stancode}
\alias{get_my_stancode}
\title{Get the stan code for the model specified}
\usage{
get_my_stancode(model_name)
}
\description{
Get the stan code for the model specified
}
|
7825ff754a74a7ab41508cbb1d49684ccb543be9
|
ecf3302ee7a156bf04f05d0e48f1bf8e9565e642
|
/R/fmi_stations.R
|
dcc866def92344b90efb4e5038ce0ca30d3f3706
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
rOpenGov/fmi2
|
cbe82285b5fcb096cdc2da3ce0de03905f308615
|
5194d67c282f40acb4c9f6a1c71bcbf5b32d357e
|
refs/heads/master
| 2023-07-25T05:54:43.900512
| 2023-07-13T13:29:52
| 2023-07-13T13:29:52
| 144,606,564
| 8
| 3
|
MIT
| 2020-11-23T20:13:22
| 2018-08-13T16:36:08
|
R
|
UTF-8
|
R
| false
| false
| 2,516
|
r
|
fmi_stations.R
|
# function fmi_station()
#' Get a table of active FMI observation stations.
#'
#' Data is retrieved using a FMI API stored query.
#'
#' @return a \code{tibble} of active observation stations
#'
#' @seealso \url{https://en.ilmatieteenlaitos.fi/observation-stations}
#'
#' @author Joona Lehtomaki \email{joona.lehtomaki@@gmail.com}
#'
#' @importFrom dplyr bind_rows
#' @importFrom magrittr %>%
#' @importFrom purrr pluck
#' @importFrom rlang .data
#' @importFrom tibble tibble_row
#' @importFrom utils tail
#' @importFrom xml2 as_list
#'
#' @export
#'
#' @aliases fmi_weather_stations
#'
fmi_stations <- function() {
# start and end time must be Dates or characters coercable to Dates, and must
# be in the past
fmi_obj <- fmi_api(request = "getFeature",
storedquery_id = "fmi::ef::stations") %>%
purrr::pluck("content") %>%
xml2::as_list()
parse_nodes <- function(node) {
# First level name in the list is a GML type. Store the value and get the
# rest of the values (children nodes)n
gml_type <- names(node)
children <- purrr::pluck(node, 1)
# The values of interest are a combination of actual list values and
# attributes. More robust implementations would sniff out which one,
# but here we rely on hard coded approach.
# Station identifier
fmisid <- purrr::pluck(children$identifier, 1)
# Station name
name <- purrr::pluck(children$name, 1)
# Station type
type <- attr(children$belongsTo, "title")
# Location data. Get lat/long data
point_data <- children$representativePoint$Point$pos %>%
purrr::pluck(1) %>%
strsplit(split = " ") %>%
unlist()
lat <- as.numeric(point_data[1])
lon <- as.numeric(point_data[2])
# Also get the EPSG code
epsg <- attr(children$representativePoint$Point, "srsName") %>%
strsplit(split = "/") %>%
unlist() %>%
tail(n = 1)
# Operational activity period
oap_start <- children$operationalActivityPeriod$OperationalActivityPeriod$activityTime$TimePeriod$beginPosition %>%
purrr::pluck(1)
oap_end <- children$operationalActivityPeriod$OperationalActivityPeriod$activityTime$TimePeriod$endPosition %>%
attr("indeterminatePosition")
station_data <- tibble::tibble_row(name, fmisid, type, lat, lon, epsg,
oap_start, oap_end)
return(station_data)
}
station_data <- purrr::map(fmi_obj[[1]], parse_nodes) %>%
dplyr::bind_rows()
return(station_data)
}
|
901f8308dd2e576e238568eea5ad8c47e711332f
|
aeae7b5585706a01f0cb70ba92920cf4054e80ce
|
/man/cross_paste0.Rd
|
6218c5a2f6b26eb1af31d56210eb2ae8ee490674
|
[
"MIT"
] |
permissive
|
jixing475/manuscriptsR
|
d3c395511cb34571e770de08126f5c213eb06910
|
f56670fb42ac04f4fc3aaa7ea1a9c7f60c7c77a8
|
refs/heads/master
| 2023-05-01T12:07:34.129853
| 2021-05-20T01:48:11
| 2021-05-20T01:48:11
| 299,823,539
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 854
|
rd
|
cross_paste0.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{cross_paste0}
\alias{cross_paste0}
\title{Create a concatenation cross product of chracter vectors}
\usage{
cross_paste0(chars1, chars2)
}
\arguments{
\item{chars1}{A character vector of string prefixes}
\item{chars2}{A character vector of string suffixes}
}
\value{
A character vector of all prefix/suffix combos.
}
\description{
For every string in two character vectors create the full
outer product of pasting every string in `chars1`` before
every string in `chars2`. This version was created by
Jono Carrol.
}
\examples{
\dontrun{
manuscriptsJX::cross_paste0(c("jixing", "jiren", "jide"),
c("_hezhong", "_yihui"))
#> [1] "jixing_hezhong" "jixing_yihui" "jiren_hezhong" "jiren_yihui"
#> [5] "jide_hezhong" "jide_yihui"
}
}
|
1888e1ea4dc28be5d4542c6351d2b2304c2ec3ec
|
ad9063f4e1d86ec15e194ccd7f623d50b410029b
|
/Analysis.R
|
5dffc6eb7beffb996c6b2ae5340efa3004b7e0fe
|
[] |
no_license
|
M-Pass/ConceptualSpan
|
e9bfc7b3e8a851710f3485e6b2f59a4fdd60f20f
|
8861de55b53942729511e4447e9324a0a9fc81f4
|
refs/heads/master
| 2022-11-29T15:46:30.425402
| 2020-07-31T07:40:47
| 2020-07-31T07:40:47
| 283,974,106
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,138
|
r
|
Analysis.R
|
# Environment setting
rm(list=ls())
library(psych)
library(lavaan)
# Loading Conceptual span data
data <- read.csv("CSitems.csv")
### FACTOR ANALYSIS ###
data_CFA <- data[,4:13]
model <- "F=~ item1 + item2 + item3 + item4 + item5 + item6 + item7 + item8 + item9 + item10
"
fit <- cfa(model, data=data_CFA, std.lv=TRUE)
summary(fit, fit.mea=TRUE)
### Capacity Estimation ###
mean(apply(data_CFA, 1, mean), na.rm=T)*3*5/8
# Loading full dataset
data <- read.csv("puntizeta.csv", na.string="NS")
data <- data[!data$soggetto == "", 1:46]
socioana <- read.csv("socioana.csv")
data <- merge(data, socioana, by.x="soggetto", by.y="Codice") # merging socio-biographical characteristics
colnames(data)[4] <- "ReadingComprehension"
colnames(data)[28] <- "ConceptualSpan"
colnames(data)[25] <- "DirectDigitSpan"
colnames(data)[26] <- "InverseDigitSpan"
colnames(data)[12] <- "ReadingSpeed"
colnames(data)[13] <- "ReadingErrors"
### Linear model ###
fit <- lm(ReadingComprehension ~ DirectDigitSpan + InverseDigitSpan + ReadingSpeed + ReadingErrors + ConceptualSpan, data=data)
summary(fit)
|
54179fb63d3c48b4e02d193d2dc94f71154dff0f
|
75db022357f0aaff30d419c13eafb9dddfce885a
|
/inst/IP/comparisonOfDaysFishedAcrossYears.r
|
449ee8fcfc960ec15b03b85c0ccb244075f0535d
|
[] |
no_license
|
LobsterScience/bio.lobster
|
d4c553f0f55f561bb9f9cd4fac52c585e9cd16f8
|
b2af955291cb70c2d994e58fd99d68c6d7907181
|
refs/heads/master
| 2023-09-01T00:12:23.064363
| 2023-08-23T16:34:12
| 2023-08-23T16:34:12
| 60,636,005
| 11
| 5
| null | 2017-01-20T14:35:09
| 2016-06-07T18:18:28
|
R
|
UTF-8
|
R
| false
| false
| 1,009
|
r
|
comparisonOfDaysFishedAcrossYears.r
|
require(bio.lobster)
require(bio.utilities)
a = lobster.db('process.logs.unfiltered')
aa = aggregate(DATE_FISHED~LFA+SYEAR+VESSEL_NAME+LICENCE_ID,data=a,FUN=function(x) length(unique(x)))
#Potential Fishings Days
dat = lobster.db('season.dates')
dat$DF = dat$END_DATE - dat$START_DATE
dF = aggregate(DF~SYEAR+LFA,data=dat,FUN=sum)
gv = aggregate(DATE_FISHED~LFA+SYEAR,data=aa,FUN='median')
with(subset(gv,LFA==27),plot(SYEAR,DATE_FISHED,type='l'))
pdf('~/tmp/LFA27DaysFished.pdf')
par(mfrow=c(2,2),mar=c(3,4,1.5,0.5),oma=c(0.4,1,1,1))
x = subset(aa,LFA==27 & SYEAR>2003)
y = unique(x$SYEAR)
y = y[order(y)]
for(i in y){
u = subset(x,SYEAR==i)
u = table(u$DATE_FISHED)
u = data.frame(Days=as.numeric(names(u)),Count=u)
id = data.frame(Days=min(u$Days):max(u$Days))
u = merge(id,u,all.x=T)
u = u[,c(1,3)]
u = na.zero(u)
names(u) = c('Days','Freq')
u$cs = cumsum(u$Freq)/sum(u$Freq,na.rm=T)
}
dev.off()
aggregate(DATE_FISHED~LFA,data=subset(gv,SYEAR>2012),FUN=median)
|
d5647b83ac8fd2324b33569769f84dd30048180c
|
1ded44fc1e8ed7f621c8d19654b7efa910d0610f
|
/man/Kronspec.Rd
|
39e01f6120c120a193c06a8183775096f51be967
|
[] |
no_license
|
cran/MTS
|
a0099c7b4ef497190e2088fb6d9abbf1cee26edb
|
a78f81c43d0ac4d54b2b58c0b427c5d7286dd89e
|
refs/heads/master
| 2023-04-21T22:42:23.122748
| 2022-04-11T13:32:30
| 2022-04-11T13:32:30
| 17,680,790
| 6
| 9
| null | 2023-04-08T13:17:28
| 2014-03-12T19:31:12
|
R
|
UTF-8
|
R
| false
| false
| 773
|
rd
|
Kronspec.Rd
|
\name{Kronspec}
\alias{Kronspec}
\title{Kronecler Index Specification
}
\description{For a given set of Kronecker indices, the program
specifies a VARMA model. It gives details of parameter specification.
}
\usage{
Kronspec(kdx, output = TRUE)
}
\arguments{
\item{kdx}{A vector of Kronecker indices
}
\item{output}{A logical switch to control output. Default is with
output.
}
}
\value{
\item{PhiID}{Specification of the AR matrix polynomial. 0 denotes zero
parameter, 1 denotes fixing parameter to 1, and 2 denotes the parameter
requires estimation}
\item{ThetaID}{Specification of the MA matrix polynomial}
}
\references{Tsay (2014, Chapter 4)
}
\author{Ruey S. Tsay
}
\examples{
kdx=c(2,1,1)
m1=Kronspec(kdx)
names(m1)
}
|
3c06c1fc3126911714dd8203c69002dcf5977cd3
|
58ed380e48045a368c06701b61aac8fac41419e7
|
/man/fit_pi.Rd
|
7631df4ed7f95c48b75e4f9ee4f41616faedf741
|
[
"MIT"
] |
permissive
|
systats/deeplyr
|
5c6419316ce23eb1569b0189a18816f81bb91b94
|
3248e73a24527a7717a01e0e5c8e3021d5b8b823
|
refs/heads/master
| 2021-07-05T04:40:23.502291
| 2020-10-02T14:49:12
| 2020-10-02T14:49:12
| 185,884,771
| 11
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 177
|
rd
|
fit_pi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api_pi.R
\name{fit_pi}
\alias{fit_pi}
\title{fit_pi}
\usage{
fit_pi(self)
}
\description{
fit_pi
}
|
83b80ab1b8748cc4041bf75bff7d653f7e2cb83c
|
6a54ede8ce395cd20b3cf027fea709ff454d3511
|
/Lecture 4/04.RawCode.R
|
53a40671e4436f8c58fc21f8a9242b2859e1a932
|
[] |
no_license
|
zmarion1/BayesianGymnasium2018
|
6718471839bc13c8ba304cd3f9a4eb6f48a5293b
|
7d50d14ab21ff225b125c251ae4a0c865e1c0a3f
|
refs/heads/master
| 2021-05-05T06:29:52.086947
| 2018-05-02T15:38:31
| 2018-05-02T15:38:31
| 118,805,349
| 0
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 815
|
r
|
04.RawCode.R
|
library(shinystan)
library(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
setwd("YOUR DIRECTORY")
obs <- rep(c(1,0), times=c(7,3)) # our Bernoulli observations
nObs <- length(obs) # number of observations
alpha <- 1 # Prior for alpha
beta <- 1 # Prior for beta
dat <- list(obs = obs, nObs=nObs, alpha=alpha, beta=beta)
mod1 <- stan(file="04.ex1Bernoulli.stan", #path to .stan file
data=dat,
iter=2000, # number of MCMC iterations
chains=4, # number of independent MCMC chains
seed=3) # set the seed so run is repeatable
traceplot(mod1, par="p")
traceplot(mod1, par="lp__")
print(mod1)
print(mod1, par="p")
stan_dens(mod1, par="p")
stan_dens(mod1, par="p")
|
bc41077e26de48e7656c3137cf9df9418ec0786c
|
5314fc7db5f93546cb11bebd526f75d95db2fa0c
|
/solutions/day_04.R
|
c7dedcee9b28c785d676cf3a96ac5c9d57f85d52
|
[] |
no_license
|
vicnett/AOC-2020
|
36dbb8a3f2e019614bc78353b551eaf033147da2
|
a305e72b1262042e31e8a8072e23495e42bbe3f1
|
refs/heads/main
| 2023-02-03T20:17:16.176421
| 2020-12-23T01:04:43
| 2020-12-23T01:04:43
| 317,705,813
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,459
|
r
|
day_04.R
|
# Advent of code 2020
# Day 4
# Load libraries
library(tidyverse)
# Set working directory to file location
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# Load input
inputData <- read_file("../inputs/day_04")
# Part 1
# Input is "passport" data.
# Determine which passports have all required fields. The expected fields are as follows:
# - byr (Birth Year)
# - iyr (Issue Year)
# - eyr (Expiration Year)
# - hgt (Height)
# - hcl (Hair Color)
# - ecl (Eye Color)
# - pid (Passport ID)
# - cid (Country ID)
# The cid field is optional
# Output should be the number of valid passports
# Preprocess input
passportList <- str_replace_all(str_split(inputData, '\\n\\n')[[1]], "\\n", " ") %>%
as_tibble() %>%
mutate(
# Create an "id" column to keep track of which passport each data point belongs to
id = row_number(),
# And split the strings into key/value pairs
value = str_split(value, " ")
) %>%
# Put all the key/value pairs into one column
unnest(value) %>%
# Separate the key/value pairs into a key and a value columns
separate(value, into = c("key", "value"), sep = ":") %>%
# Remove invalid rows (key and/or value missing)
drop_na() %>%
# Move the keys and values into columns
spread(key, value) %>%
# Replace missing country IDs since we don't care if it's missing
replace_na(list(cid = "none"))
# The answer is simply how many rows have no missing values!
print(paste0("The list contains ", length(drop_na(passportList)$id), " valid passports."))
# Part 2
# Count all valid passports, same as part 1, with the following data validation rules added:
# - byr (Birth Year) - four digits; at least 1920 and at most 2002.
# - iyr (Issue Year) - four digits; at least 2010 and at most 2020.
# - eyr (Expiration Year) - four digits; at least 2020 and at most 2030.
# - hgt (Height) - a number followed by either cm or in:
# - If cm, the number must be at least 150 and at most 193.
# - If in, the number must be at least 59 and at most 76.
# - hcl (Hair Color) - a # followed by exactly six characters 0-9 or a-f.
# - ecl (Eye Color) - exactly one of: amb blu brn gry grn hzl oth.
# - pid (Passport ID) - a nine-digit number, including leading zeroes.
# - cid (Country ID) - ignored, missing or not.
passportListPassingDataValidation <- passportList %>%
# Filter out bad byr values
filter(
str_detect(byr, "^[:digit:]{4,4}$"),
between(as.numeric(byr), 1920, 2002)
) %>%
# Filter out bad iyr values
filter(
str_detect(iyr, "^[:digit:]{4,4}$"),
between(as.numeric(iyr), 2010, 2020)
) %>%
# Filter out bad eyr values
filter(
str_detect(eyr, "^[:digit:]{4,4}$"),
between(as.numeric(eyr), 2020, 2030)
) %>%
# Filter out bad hgt values
# First let's separate the number and the unit
mutate(
hgt_unit = str_sub(hgt, -2, -1),
hgt_number = as.numeric(str_sub(hgt, 1, -3))
) %>%
filter(
(hgt_unit == "cm" & between(hgt_number, 150, 193)) |
(hgt_unit == "in" & between(hgt_number, 59, 76))
) %>%
# Filter out bad hcl values
filter(
str_detect(hcl, "^#[a-f0-9]{6,6}")
) %>%
# Filter out bad ecl values
filter(ecl %in% c("amb", "blu", "brn", "gry", "grn", "hzl", "oth")) %>%
# Filter out bad pid values
filter(
str_detect(pid, "^[:digit:]{9,9}$")
)
print(paste0("The list contains ", length(passportListPassingDataValidation$id), " valid passports, including data validation."))
|
caafc61eabdb293397a0c539ac94d7cc9d03a854
|
a69d9dc1182b184f876435733db2da4f33910811
|
/man/param.mode.Rd
|
a1358317b9f48f5e2fc7cc9f6d09b84d432c0d5d
|
[] |
no_license
|
taylors2/PeriodCPT
|
8d829ab01571da214dbff6d52bff958561413dfe
|
fd8c5f1f9fa863f876ac74748558c4a84f3ecf25
|
refs/heads/master
| 2022-11-26T03:49:27.828966
| 2020-07-30T17:02:21
| 2020-07-30T17:02:21
| 259,919,812
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 643
|
rd
|
param.mode.Rd
|
\name{param.mode}
\alias{param.mode}
\title{
Generic Function -- param.mode
}
\description{
Generic function
}
\usage{
param.mode(object)
}
\arguments{
\item{object}{
Depending on the class of \code{object} depends on the method used (and if one exists)
}
}
\details{
Generic Function
}
\value{
Depends on the calss of \code{object}, see individual methods
}
\author{
Simon Taylor
Rebecca Killick
}
\seealso{
\code{\link{param.mode-methods}}
}
\examples{
x = new("pcpt") # new pcpt object
param.mode(x) = matrix(c(0.2, 0.8), nrow = 1, ncol = 2)
param.mode(x)
}
\keyword{ methods }
\keyword{ pcpt }
\keyword{ internal }
|
24d29153304d330e46e99496fea0e5e746847868
|
aac60cb63641ac946f473d702eddcdc1bbc9f940
|
/man/getExpData.Rd
|
a9779f92991468a657ba8414f146e7febb27cb1b
|
[] |
no_license
|
bhklab/RPharmacoDB
|
0a95fa40ad604825d3c11073817e9295cc7e2363
|
13b221943bd6568e8d31d4c7a323de38e459d772
|
refs/heads/master
| 2020-04-10T23:41:45.822032
| 2015-12-10T03:11:19
| 2015-12-10T03:11:19
| 41,569,735
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,670
|
rd
|
getExpData.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/getData.R
\name{getExpData}
\alias{getExpData}
\title{Obtain a data set according to a study name}
\usage{
getExpData(cellline = NULL, drug = NULL, summary = FALSE, stats = NULL)
}
\arguments{
\item{cellline}{[vector] vector of cell lines for which to obtain data
if cellline=NULL and drug != NULL, then data for all celllines
tested on that vector of drugs is returned.}
\item{drug}{[vector] vector of drugs for which to obtain data
if drug=NULL and cellline != NULL, then data for all
drugs tested on the cellline vector returned
if drug!=NULL and cellline !=NULL, then data for all
experiments in the set cellline, drug pairs for which
there is data is returned}
\item{summary}{[boolean] whether or not to return summary data
if summary=TRUE, summary data is returned
if summary=FALSE, dose response data is returned
default is summary=FALSE}
\item{stats}{[vector] vector of summary statistics values to return
default is stats=NULL because summary=FALSE}
}
\value{
list of data.frame containing data if found or NA if data does not exist
}
\description{
\code{getExpData} requests a particular dataset
according to user specified study
}
\examples{
getExpData(cellline="HCC70") ## get all dose-response curves tested on HCC70
## Get the published IC50 values for all cell lines tested on Erlotinib or 17-AAG
getExpData(drug=c("Erlotinib", "17-AAG"), stats = "IC50_Published"))
## Get all summary statistics for experiments tested on MCF7 and 1321N1
## and with erlotinib or 17-AAG
getExpData(cellline = c("MCF7", "1321N1"), drug = c("erlotinib", "17-AAG"), summary = TRUE)
}
|
66852ee2b56759dec3b3234ebbdff54dd75562d6
|
cba027bab5eb9c7c26ffce776923eab4cafd1d8c
|
/Scripts/Miscellaneous/Wilcox_test.r
|
0ad1a94b6c093ddb9d31ca1e0fc1fe5cfa2dd434
|
[] |
no_license
|
antonio-mastropaolo/automatic-variable-renaming
|
733b34e13e306e3ee1e9b82b75b67195c35c678a
|
f808db3a70d86ed4e8fe9eae5fb6bd65e6a23b03
|
refs/heads/main
| 2023-04-18T17:35:52.915552
| 2022-12-27T09:40:59
| 2022-12-27T09:40:59
| 440,521,300
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,233
|
r
|
Wilcox_test.r
|
library(effsize)
setwd(paste("statistical-Analysis/Box_Plot/NGRAM/","Large-Scale",sep=""))
data<-read.csv("data_reduced.csv",header=TRUE)
perfect <- data[which(data["IS_PERFECT"] == TRUE),]
wrong <- data[which(data["IS_PERFECT"] == FALSE),]
#p-value < 0.05 to be significant
#Tokens
wilcox.test(perfect$Tokens,wrong$Tokens,alternative="two.side",paired=FALSE)$p.value
cliff.delta(perfect$Tokens,wrong$Tokens)$magnitude
#CharPerVar
wilcox.test(perfect$CharPerVar,wrong$CharPerVar,alternative="two.side",paired=FALSE)$p.value
cliff.delta(perfect$CharPerVar,wrong$CharPerVar)$magnitude
#TokenPerVar
wilcox.test(perfect$TokenPerVar,wrong$TokenPerVar,alternative="two.side",paired=FALSE)$p.value
cliff.delta(perfect$TokenPerVar,wrong$TokenPerVar)$magnitude
#Occurences_Within_Training
wilcox.test(perfect$Occurences_Within_Training,wrong$Occurences_Within_Training,alternative="two.side",paired=FALSE)$p.value
cliff.delta(perfect$Occurences_Within_Training,wrong$Occurences_Within_Training)$magnitude
#OccurencesVarPerMethod
wilcox.test(perfect$OccurencesVarPerMethod,wrong$OccurencesVarPerMethod,alternative="two.side",paired=FALSE)$p.value
cliff.delta(perfect$OccurencesVarPerMethod,wrong$OccurencesVarPerMethod)$magnitude
|
e0e5ac433ea1349e8514aff7bbef669ed502a657
|
6f9ab236999fff566b0ed76f6fc2146d63e3f7f1
|
/rotations/R/preliminary.R
|
ce8ef8f13caaba53f32208f613f956a1a390ba8d
|
[
"MIT"
] |
permissive
|
stanfill/rotationsC
|
d4733140b6e40c61b2d9312474c1a8786f1974fb
|
66722f095a68d81e506c29cfac7d4a8a69e664cf
|
refs/heads/master
| 2022-07-22T01:17:54.917073
| 2022-06-24T21:27:24
| 2022-06-24T21:27:24
| 9,582,475
| 0
| 3
| null | 2021-03-11T21:43:23
| 2013-04-21T16:51:07
|
C++
|
UTF-8
|
R
| false
| false
| 18,770
|
r
|
preliminary.R
|
#' Rotational distance
#'
#' Calculate the extrinsic or intrinsic distance between two rotations.
#'
#' This function will calculate the intrinsic (Riemannian) or extrinsic
#' (Euclidean) distance between two rotations. \code{R2} and \code{Q2} are set
#' to the identity rotations by default. For rotations \eqn{R_1}{R1} and
#' \eqn{R_2}{R2} both in \eqn{SO(3)}, the Euclidean distance between them is
#' \deqn{||R_1-R_2||_F}{||R1-R2||} where \eqn{||\cdot||_F}{|| ||} is the
#' Frobenius norm. The Riemannian distance is defined as \deqn{||Log(R_1^\top
#' R_2)||_F}{||Log(R1'R2)||} where \eqn{Log} is the matrix logarithm, and it
#' corresponds to the misorientation angle of \eqn{R_1^\top R_2}{R1'R2}. See
#' the vignette `rotations-intro' for a comparison of these two distance
#' measures.
#'
#' @param x \eqn{n\times p}{n-by-p} matrix where each row corresponds to a
#' random rotation in matrix (\eqn{p=9}) or quaternion (\eqn{p=4}) form.
#' @param R2,Q2 a single, second rotation in the same parametrization as x.
#' @param method string indicating "extrinsic" or "intrinsic" method of
#' distance.
#' @param p the order of the distance.
#' @param ... additional arguments.
#' @return The rotational distance between each rotation in x and R2 or Q2.
#' @export
#' @examples
#' rs <- rcayley(20, kappa = 10)
#' Rs <- genR(rs, S = id.SO3)
#' dEs <- rot.dist(Rs,id.SO3)
#' dRs <- rot.dist(Rs, id.SO3 , method = "intrinsic")
#'
#' #The intrinsic distance between the true central orientation and each observation
#' #is the same as the absolute value of observations' respective misorientation angles
#' all.equal(dRs, abs(rs)) #TRUE
#'
#' #The extrinsic distance is related to the intrinsic distance
#' all.equal(dEs, 2*sqrt(2)*sin(dRs/2)) #TRUE
rot.dist<-function(x,...){
UseMethod("rot.dist")
}
#' @rdname rot.dist
#' @export
rot.dist.SO3 <- function(x, R2=id.SO3, method='extrinsic' , p=1,...) {
R1<-formatSO3(x)
method <- try(match.arg(method,c('projected','extrinsic','intrinsic')),silent=T)
if (isa(method,"try-error"))
stop("method needs to be one of 'projected', 'extrinsic' or 'intrinsic'.")
if(method%in%c('projected','extrinsic')){
n <- nrow(R1)
R1 <- matrix(R1, n, 9)
R2 <- matrix(R2, n, 9, byrow = TRUE)
so3dist<-sqrt(rowSums((R1-R2)^2))^p
}else if(method=='intrinsic'){
R2<-matrix(R2,3,3)
thetas<-c(rdistSO3C(R1,R2))
so3dist<-thetas^p
}else{
stop("Incorrect usage of method argument. Please choose intrinsic or extrinsic")
}
return(so3dist)
}
#' @rdname rot.dist
#' @export
rot.dist.Q4 <- function(x, Q2=id.Q4 ,method='extrinsic', p=1,...) {
Q1<-formatQ4(x)
Q2<-formatQ4(Q2)
method <- try(match.arg(method,c('projected','extrinsic','intrinsic')),silent=T)
if (isa(method,"try-error"))
stop("method needs to be one of 'projected', 'extrinsic' or 'intrinsic'.")
if(method=='intrinsic'){
q4dist<-c(RdistC(Q1,Q2))^p
}else if(method%in%c('projected','extrinsic')){
q4dist<-c(EdistC(Q1,Q2))^p
}else{
stop("Incorrect usage of method argument. Please choose intrinsic or extrinsic")
}
return(q4dist)
}
#' Misorientation angle
#'
#' Compute the misorientation angle of a rotation.
#'
#' Every rotation can be thought of as some reference coordinate system rotated about an axis through an angle. These quantities
#' are referred to as the misorientation axis and misorientation angle, respectively, in the material sciences literature.
#' This function returns the misorentation angle associated with a rotation assuming the reference coordinate system
#' is the identity.
#'
#' @param x \eqn{n\times p}{n-by-p} matrix where each row corresponds to a random rotation in matrix (\eqn{p=9}) or quaternion (\eqn{p=4}) form.
#' @return Angle of rotation.
#' @seealso \code{\link{mis.axis}}
#' @export
#' @examples
#' rs <- rcayley(20, kappa = 20)
#' Rs <- genR(rs, S = id.SO3)
#' mis.angle(Rs)
#'
#' #If the central orientation is id.SO3 then mis.angle(Rs) and abs(rs) are equal
#' all.equal(mis.angle(Rs), abs(rs)) #TRUE
#'
#' #For other reference frames, the data must be centered first
#' S <- genR(pi/2)
#' RsS <- genR(rs, S = S)
#' mis.axis(RsS-S)
#' all.equal(mis.angle(RsS-S),abs(rs)) #TRUE
#'
#' #If the central orientation is NOT id.SO3 then mis.angle(Rs) and abs(rs) are usual unequal
#' Rs <- genR(rs, S = genR(pi/8))
#' all.equal(mis.angle(Rs), abs(rs)) #Mean relative difference > 0
mis.angle<-function(x){
UseMethod("mis.angle")
}
#' @rdname mis.angle
#' @export
mis.angle.SO3 <- function(x){
Rs<-formatSO3(x)
theta<-c(rdistSO3C(Rs,diag(1,3,3)))
return(theta)
}
#' @rdname mis.angle
#' @export
mis.angle.Q4 <- function(x){
Qs<-formatQ4(x)
theta<-2*acos(Qs[,1])
class(theta)<-"numeric"
return(theta)
}
#' Misorientation axis
#'
#' Determine the misorientation axis of a rotation.
#'
#' Every rotation can be interpreted as some reference coordinate system rotated about an axis through an angle. These quantities
#' are referred to as the misorientation axis and misorientation angle, respectively, in the material sciences literature.
#' This function returns the misorentation axis associated with a rotation assuming the reference coordinate system
#' is the identity. The data must be centered before calling \code{mis.axis} if a different coordinate system is required.
#'
#' @param x \eqn{n\times p}{n-by-p} matrix where each row corresponds to a random rotation in matrix (\eqn{p=9}) or quaternion (\eqn{p=4}) form.
#' @param ... additional arguments.
#' @return Axis in form of three dimensional vector of length one.
#' @seealso \code{\link{mis.angle}}
#' @export
#' @examples
#' rs <- rcayley(20, kappa = 20)
#'
#' #If the reference frame is set to id.SO3 then no centering is required
#' Rs <- genR(rs, S = id.SO3)
#' mis.axis(Rs)
#' all.equal(Rs, as.SO3(mis.axis(Rs), mis.angle(Rs)))
#'
#' #For other reference frames, the data must be centered first
#' S <- genR(pi/2)
#' RsS <- genR(rs, S = S)
#' mis.axis(RsS-S)
#' all.equal(mis.angle(RsS-S),abs(rs)) #TRUE
#'
#' Qs <- genR(rs, S = id.Q4, space = "Q4")
#' mis.axis(Qs)
#' all.equal(Qs, as.Q4(mis.axis(Qs), mis.angle(Qs)))
mis.axis<-function(x,...){
UseMethod("mis.axis")
}
#' @rdname mis.axis
#' @export
mis.axis.SO3<-function(x,...){
R<-formatSO3(x)
n<-nrow(R)
u<-matrix(NA,n,3)
for(i in 1:n){
Ri<-matrix(R[i,],3,3)
X <- Ri - t(Ri)
u[i,] <- rev(X[upper.tri(X)])*c(-1,1,-1)
norm<-sqrt(sum(u[i,]^2))
if(norm!=0){
u[i,]<-u[i,]/norm
}
}
return(u) # will be trouble, if R is symmetric, i.e. id, ....
}
#' @rdname mis.axis
#' @export
mis.axis.Q4<- function(x,...){
q<-formatQ4(x)
theta<-mis.angle(q)
u <- q[,2:4]/sin(theta/2)
if(any(is.infinite(u)|is.nan(u))){
infs<-which(is.infinite(u)|is.nan(u))
u[infs]<-0
}
u<-matrix(u,ncol=3)
return(u)
}
eskew <- function(U) {
ulen<-sqrt(sum(U^2))
if(ulen!=0){
U<-U/ulen
}
u <- U[1]
v <- U[2]
w <- U[3]
res <- matrix((-1) * c(0, -w, v, w, 0, -u, -v, u, 0), ncol = 3)
return(res)
}
#' Generate rotations
#'
#' Generate rotations in matrix format using Rodrigues' formula or quaternions.
#'
#' Given a vector \eqn{U=(u_1,u_2,u_3)^\top\in R^3}{U=(u1,u2,u3)' in R^3} of length one and angle of rotation \eqn{r}, a \eqn{3\times 3}{3-by-3} rotation
#' matrix is formed using Rodrigues' formula
#' \deqn{\cos(r)I_{3\times 3}+\sin(r)\Phi(U)+(1-\cos(r))UU^\top}{cos(r)I+sin(r)\Phi(U)+(1-cos(r))UU'}
#' where \eqn{I_{3\times 3}}{I} is the \eqn{3\times 3}{3-by-3} identity matrix, \eqn{\Phi(U)} is a \eqn{3\times 3}{3-by-3} skew-symmetric matrix
#' with upper triangular elements \eqn{-u_3}{-u3}, \eqn{u_2}{u2} and \eqn{-u_1}{-u1} in that order.
#'
#' For the same vector and angle a quaternion is formed according to \deqn{q=[cos(\theta/2),sin(\theta/2)U]^\top.}{q=[cos(theta/2),sin(theta/2)U]'.}
#'
#' @param r vector of angles.
#' @param S central orientation.
#' @param space indicates the desired representation: rotation matrix "SO3" or quaternions "Q4."
#' @return A \eqn{n\times p}{n-by-p} matrix where each row is a random rotation matrix (\eqn{p=9}) or quaternion (\eqn{p=4}).
#' @export
#' @examples
#' r <- rvmises(20, kappa = 0.01)
#' Rs <- genR(r, space = "SO3")
#' Qs <- genR(r, space = "Q4")
genR <- function(r, S = NULL, space='SO3') {
if(!(space %in% c("SO3","Q4")))
stop("Incorrect space argument. Options are: SO3 and Q4. ")
n<-length(r)
theta <- acos(stats::runif(n, -1, 1))
# Generate angles phi from a uniform distribution from -pi to pi
phi <- stats::runif(n, -pi, pi)
u <- matrix(c(sin(theta) * cos(phi), sin(theta) * sin(phi), cos(theta)),n,3)
if(space=="SO3"){
#For now the C++ code is broken, use R functions
#S<-matrix(S,3,3)
#o<-SO3defaultC(u,r)
#o<-genrC(r,S,1,u)
o<-as.SO3.default(x=u,theta=r)
if(is.null(S)){
class(o) <- "SO3"
return(o)
}else{
if(is.Q4(S)){
S <- as.SO3(S)
}
S<-formatSO3(S)
St<-t(matrix(S,3,3))
o<-center.SO3(o,St)
class(o) <- "SO3"
return(o)
}
}else{
#S<-matrix(S,1,4)
#q<-Q4defaultC(u,r)
#q<-genrC(r,S,2,u)
q<-matrix(c(cos(r/2),sin(r/2)*u),n,4)
if(is.null(S)){
class(q)<-"Q4"
return(q)
}else{
if(is.SO3(S)){
S <- as.Q4(S)
}
S<-formatQ4(S)
S<--S
q<-center.Q4(q,S)
class(q)<-"Q4"
return(q)
}
}
}
#' Matrix exponential
#'
#' Compute the matrix exponential for skew-symmetric matrices according to the usual Taylor expansion.
#' The expansion is significantly simplified for skew-symmetric matrices, see \cite{moakher02}.
#' Maps a matrix belonging to the lie algebra \eqn{so(3)} into the lie group \eqn{SO(3)}.
#'
#' @param x single \eqn{3\times 3}{3-by-3} skew-symmetric matrix or \eqn{n\times 9}{n-by-9} sample of skew-symmetric matrices.
#' @return Matrix \eqn{e^{\bm H}}{e^H} in \eqn{SO(3)} .
#' @details moakher02
#' @export
#' @examples
#' Rs <- ruars(20, rcayley)
#' lRs <- log(Rs) #Take the matrix logarithm for rotation matrices
#' Rs2 <- skew.exp(lRs) #Go back to rotation matrices
#' all.equal(Rs, Rs2)
skew.exp <- function(x) {
if(length(x)==9){
H<-matrix(x,3,3)
Hmat<-expskewC(H)
class(Hmat)<-"SO3"
return(Hmat)
}else{
Hmat<-expskewCMulti(x)
class(Hmat)<-"SO3"
return(Hmat)
}
}
#' Rotation logarithm
#'
#' Compute the logarithm of a rotation matrix, which results in a \eqn{3\times 3}{3-by-3} skew-symmetric matrix. This function maps
#' the lie group \eqn{SO(3)} into its tangent space, which is the space of all \eqn{3\times 3}{3-by-3} skew symmetric matrices,
#' the lie algebra \eqn{so(3)}. For details see e.g. \cite{moakher02}.
#'
#' @param x \eqn{n\times 9}{n-by-9} matrix where each row corresponds to a random rotation matrix.
#' @param ... additional arguments.
#' @return Skew symmetric matrix \eqn{\log(R)}{log(R)}.
#' @details moakher02
#' @export
#' @examples
#' Rs <- ruars(20, rcayley)
#'
#' #Here we demonstrate how the logarithm can be used to determine the angle and
#' #axis corresponding to the provided sample
#'
#' lRs <- log(Rs) #Take the logarithm of the sample
#' Ws <- lRs[,c(6, 7, 2)] #The appropriate diagonal entries are the axis*angle
#' lens <- sqrt(rowSums(Ws^2))
#' axes <- mis.axis(Rs)
#' angs <- mis.angle(Rs)
#' all.equal(axes, Ws/lens)
#' all.equal(angs, lens)
log.SO3 <- function(x,...) {
if(length(x)==9){
x<-matrix(x,3,3)
return(logSO3C(x))
}else{
return(logSO3CMulti(x))
}
}
#' Projection into SO(3)
#'
#' Project an arbitrary \eqn{3\times 3}{3-by-3} matrix into \eqn{SO(3)}.
#'
#' This function uses the process detailed in Section 3.1 of \cite{moakher02} to project an arbitrary \eqn{3\times 3}{3-by-3} matrix into \eqn{SO(3)}.
#' More specifically it finds the closest orthogonal 3-by-3 matrix with determinant one to the provided matrix.
#'
#' @param M \eqn{3\times 3}{3-by-3} matrix to project into \eqn{SO(3)}.
#' @return Projection of \eqn{\bm M}{M} into \eqn{SO(3)}.
#' @seealso \code{\link{mean.SO3}}, \code{\link{median.SO3}}
#' @export
#' @examples
#' #Project an arbitrary 3x3 matrix into SO(3)
#' M<-matrix(rnorm(9), 3, 3)
#' project.SO3(M)
#'
#' #Project a sample arithmetic mean into SO(3), same as 'mean'
#' Rs <- ruars(20, rcayley)
#' Rbar <- colSums(Rs)/nrow(Rs)
#' project.SO3(Rbar) #The following is equivalent
#' mean(Rs)
project.SO3 <- function(M) {
M<-matrix(M,3,3)
R<-projectSO3C(M)
return(R)
}
#' Sample distance
#'
#' Compute the sum of the \eqn{p^{th}}{pth} order distances between each row of x and S.
#'
#' @name rotdist.sum
#' @param x \eqn{n\times p}{n-by-p} matrix where each row corresponds to a random rotation in matrix (\eqn{p=9}) or quaternion (\eqn{p=4}) form.
#' @param S the individual matrix of interest, usually an estimate of the mean.
#' @param method type of distance used method in "extrinsic" or "intrinsic"
#' @param p the order of the distances to compute.
#' @return The sum of the pth order distance between each row of x and S.
#' @seealso \code{\link{rot.dist}}
#' @aliases rotdist.sum.SO3 rotdist.sum.Q4
#' @export
#' @examples
#' Rs <- ruars(20, rvmises, kappa = 10)
#'
#' SE1 <- median(Rs) #Projected median
#' SE2 <- mean(Rs) #Projected mean
#' SR2 <- mean(Rs, type = "geometric") #Geometric mean
#'
#' #I will use "rotdist.sum" to verify these three estimators minimize the
#' #loss function they are designed to minimize relative to the other esimators.
#' #All of the following statements should evaluate to "TRUE"
#'
#' #The projected mean minimizes the sum of squared Euclidean distances
#' rotdist.sum(Rs, S = SE2, p = 2) < rotdist.sum(Rs, S = SE1, p = 2)
#' rotdist.sum(Rs, S = SE2, p = 2) < rotdist.sum(Rs, S = SR2, p = 2)
#'
#' #The projected median minimizes the sum of first order Euclidean distances
#' rotdist.sum(Rs, S = SE1, p = 1) < rotdist.sum(Rs, S = SE2, p = 1)
#' rotdist.sum(Rs, S = SE1, p = 1) < rotdist.sum(Rs, S = SR2, p = 1)
#'
#' #The geometric mean minimizes the sum of squared Riemannian distances
#' rotdist.sum(Rs, S = SR2, p = 2, method = "intrinsic") <
#' rotdist.sum(Rs, S = SE1, p = 2, method = "intrinsic")
#' rotdist.sum(Rs, S = SR2, p = 2, method = "intrinsic") <
#' rotdist.sum(Rs, S = SE2, p = 2, method = "intrinsic")
rotdist.sum<-function(x, S = genR(0, space=class(x)), method='extrinsic', p=1){
UseMethod( "rotdist.sum" )
}
#' @rdname rotdist.sum
#' @export
rotdist.sum.SO3 <- function(x, S = id.SO3, method='extrinsic', p=1) {
return(sum(rot.dist(x,S, method=method, p=p)))
}
#' @rdname rotdist.sum
#' @export
rotdist.sum.Q4 <- function(x, S = id.Q4, method='extrinsic', p=1) {
return(sum(rot.dist(x,S, method=method, p=p)))
}
#' Center rotation data
#'
#' This function will take the sample Rs and return the sample Rs centered at
#' S. That is, the ith observation of Rs denoted \eqn{R_i}{Ri} is returned as \eqn{S^\top R_i}{S'Ri}.
#' If S is the true center then the projected mean should be close to the 3-by-3 identity matrix.
#'
#' @param x \eqn{n\times p}{n-by-p} matrix where each row corresponds to a random rotation in matrix (\eqn{p=9}) or quaternion (\eqn{p=4}) form.
#' @param S the rotation or a matrix of \eqn{n\times p}{n-by-p} rotations about which to center each row of x.
#' @return The sample centered about S
#' @export
#' @examples
#' Rs <- ruars(5, rcayley)
#' cRs <- center(Rs, mean(Rs))
#' mean(cRs) #Close to identity matrix
#'
#' all.equal(cRs, Rs - mean(Rs)) #TRUE, center and '-' have the same effect
#' #See ?"-.SO3" for more details
#'
#' center(Rs,Rs) #n-Identity matrices: If the second argument is of the same dimension
#' #as Rs then each row is centered around the corresponding
#' #row in the first argument
center<-function(x,S){
UseMethod( "center" )
}
#' @rdname center
#' @export
center.SO3<-function(x,S){
#This takes a set of observations in SO3 and centers them around S
Rs<-formatSO3(x)
if(length(S)==9){
S<-matrix(formatSO3(S),3,3)
Rs<-centerCpp(Rs,S)
}else if(nrow(x)==nrow(S)){
for(i in 1:nrow(x)){
Rs[i,]<-centerCpp(matrix(Rs[i,],1,9),matrix(S[i,],3,3))
}
}else{
stop("S must either be a single rotation or have as many rows as x.")
}
class(Rs)<-"SO3"
return(Rs)
}
#' @rdname center
#' @export
center.Q4<-function(x,S){
#This takes a set of observations in Q4 and centers them around S
Qs<-formatQ4(x)
S<-formatQ4(S)
if(length(S)==4){
S<--S
for(i in 1:nrow(Qs)){
Qs[i,]<-qMult(S,Qs[i,])
}
}else if(nrow(x)==nrow(S)){
for(i in 1:nrow(Qs)){
Si <- -S[i,]
Qs[i,]<-qMult(Si,Qs[i,])
}
}else{
stop("S must either be a single rotation or have as many rows as x.")
}
class(Qs)<-"Q4"
return(Qs)
}
formatSO3<-function(Rs){
#This function will take input and format it to work with our functions
#It also checks that the data is actually SO3 and of appropriate dimension
len<-length(Rs)
if(len%%9!=0)
stop("Data needs to have length divisible by 9.")
Rs<-matrix(Rs,len/9,9)
if (!all(is.SO3(Rs)))
warning("At least one of the given observations is not in SO(3). Use result with caution.")
class(Rs)<-"SO3"
return(Rs)
}
formatQ4<-function(Qs){
#This condition is checked later on
#if(length(Qs)%%4!=0)
# stop("Data needs to have length divisible by 4.")
Qs<-matrix(Qs,length(Qs)/4,4)
if (!all(is.Q4(Qs)))
warning("At least one of the given observations is not a unit quaternion. Use result with caution.")
#if(length(Qs)==4)
# return(as.Q4(Qs))
#else
class(Qs)<-"Q4"
return(Qs)
}
pMat<-function(p){
#Make the matrix P from quaternion p according to 3.1 of Rancourt, Rivest and Asselin (2000)
#This is one way to multiply quaternions
p<-as.vector(p)
Pmat<-matrix(0,4,4)
Pmat[,1]<-p
Pmat[,2]<-p[c(2,1,4,3)]*c(-1,1,1,-1)
Pmat[,3]<-c(-p[3:4],p[1:2])
Pmat[,4]<-p[4:1]*c(-1,1,-1,1)
return(Pmat)
}
qMult<-function(q1,q2){
#Forms quaternion product q1 x q2, i.e., rotate q2 by q1
#This functions utilizes the
q1<-formatQ4(q1)
q2<-formatQ4(q2)
q1q2<-pMat(q1)%*%matrix(q2,4,1)
return(formatQ4(q1q2))
}
proj<-function(u,v){
#Project the vector v orthogonally onto the line spanned by the vector u
num<-t(u)%*%v
denom<-t(u)%*%u
return(num*u/denom)
}
tLogMat <- function(x, S) {
tra <- log.SO3(t(S) %*% matrix(x, 3, 3))
return(as.vector(tra))
}
tLogMat2 <- function(x, S) {
tra <- log.SO3(matrix(x, 3, 3)%*%t(S))
return(as.vector(tra))
}
vecNorm <- function(x, S, ...) {
n <- sqrt(length(x))
cenX <- x - as.vector(S)
return(norm(matrix(cenX, n, n), ...))
}
|
cb5889970bb86075a8ca5cf6c69c082ddbcaa2d1
|
cab285249f5e5e1fbd40897c522870ff97031a7b
|
/man/bowlerEconRate.Rd
|
af3888403e176b33795656be4414642606dbe60e
|
[] |
no_license
|
dharmang/cricketr
|
61bd5a107fb143d471f36e256132b6f10092891a
|
4e09953d4f9d427771ce928bf8b815398a83206c
|
refs/heads/master
| 2020-05-26T09:22:51.262187
| 2019-03-08T01:24:46
| 2019-03-08T01:24:46
| 188,184,725
| 1
| 0
| null | 2019-05-23T07:36:12
| 2019-05-23T07:36:11
| null |
UTF-8
|
R
| false
| false
| 1,553
|
rd
|
bowlerEconRate.Rd
|
\name{bowlerEconRate}
\alias{bowlerEconRate}
\title{
Compute and plot the Mean Economy Rate versus wickets taken
}
\description{
This function computes the mean economy rate for the wickets taken and plot this
}
\usage{
bowlerEconRate(file, name = "A Bowler")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{file}{
This is the <bowler>.csv file obtained with an initial getPlayerData()
}
\item{name}{
Name of the bowler
}
}
\details{
More details can be found in my short video tutorial in Youtube
https://www.youtube.com/watch?v=q9uMPFVsXsI
}
\value{
None
}
\references{
http://www.espncricinfo.com/ci/content/stats/index.html\cr
https://gigadom.wordpress.com/
}
\author{
Tinniam V Ganesh
}
\note{
Maintainer: Tinniam V Ganesh <tvganesh.85@gmail.com>
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{bowlerWktsFreqPercent}}
\code{\link{relativeBowlingER}}
\code{\link{relativeBowlingPerf}}
}
\examples{
# Get or use the <bowler>.csv obtained with getPlayerData()
# kumble <- getPlayerData(30176,dir=".", file="kumble.csv",type="batting",
# homeOrAway=c(1,2),result=c(1,2,4))
# Retrieve the file path of a data file installed with cricketr
pathToFile <- system.file("data", "kumble.csv", package = "cricketr")
bowlerEconRate(pathToFile,"Anil Kumble")
# Note: This example uses the file kumble.csv from the /data directory. However
# you can use any directory as long as the data file exists in that directory.
# The general format is pkg-function(pathToFile,par1,...)
}
|
591b95107a05f5c12a0480d28fb60a08b13ee8ff
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/ProbReco/man/sim_hierarchy.Rd
|
914f9e147ab5919c5d826e9a8541b87e25361a4b
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 625
|
rd
|
sim_hierarchy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{sim_hierarchy}
\alias{sim_hierarchy}
\title{Synthetic hierarchical data from stationary Gaussian ARMA models.}
\format{
A tibble with a time index Time and one column for each of the seven variables in
the hierarchy
}
\usage{
sim_hierarchy
}
\description{
A synthetic 7-variable hierachy. The series AA and AB aggregate to A, the series BA and
BB aggregate to B, the series A and B aggregate to Tot. All bottom level series are simulated
from ARMA models. There are 1506 observations generated.
}
\keyword{datasets}
|
1c0d746d80259280fb4a9f240e9abfff94e2abf3
|
24cc0a2786cef571aeceffa4008fe0cf7c14b6c6
|
/script/Bil69_NA_plot_enrich.R
|
3a11b572f7d3a7cc60a60c237c1cc6dd89066fbb
|
[] |
no_license
|
nicwulab/N2_evol_contingency
|
568c94c8667ebbb994d2feb50c322f04cea4666c
|
039816375a249bb3d089556557baad5ddffdda27
|
refs/heads/main
| 2023-04-11T18:35:54.918988
| 2022-10-11T15:32:47
| 2022-10-11T15:32:47
| 376,881,476
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,347
|
r
|
Bil69_NA_plot_enrich.R
|
#R code
library(ggplot2)
library(scales)
library(RColorBrewer)
library(readr)
library(tidyr)
library(reshape)
library(stringr)
library(dplyr)
library(ggrepel)
library(gridExtra)
require(cowplot)
PlotCompareFit_Rep <- function(Bil69_data, graphname){
textsize <- 8
p <- ggplot() +
geom_rect(data=NULL,aes(xmin=log10(2),xmax=Inf,ymin=log10(2),ymax=Inf), color=NA, fill=alpha('grey60', 0.5)) +
geom_point(data=Bil69_data, aes(x=log10(Rep1Enrich), y=log10(Rep2Enrich)), pch=16, size=0.6, color='black', alpha=0.5) +
#scale_color_manual(values=c('grey30'),drop=FALSE) +
theme_cowplot(12) +
theme(axis.title=element_text(size=textsize,face="bold"),
axis.text=element_text(size=textsize,face="bold"),
legend.title=element_blank(),
legend.key.size=unit(0.1,'in'),
legend.spacing.x=unit(0.03, 'in'),
legend.text=element_text(size=textsize,face="bold"),
legend.position='top') +
labs(x=expression(bold(log['10']~enrich~'(Rep 1)')),y=expression(bold(log['10']~enrich~'(Rep 2)')))
ggsave(graphname,p,height=2,width=2,dpi=600, bg='white')
}
Bil69_data <- read_tsv('result/Bil69_MultiMutLib_filtered.tsv')
PlotCompareFit_Rep(Bil69_data,'graph/Bil69_mutlib_rep_compare.png')
|
2308e5b477e5281c8cdc5c3723401b7678df7bde
|
15f94600eb4d598d1a46626d3320eb388bd3d914
|
/cachematrix.R
|
53e15b3aa269a14c63912a50b41afd04ed775017
|
[] |
no_license
|
Akshaydalal2511/ProgrammingAssignment2
|
caecffb223ab975eb651b308d668fc1db50eaad5
|
a8c63b960369d6ff901639fe8fc5a4b67c9a2377
|
refs/heads/master
| 2021-05-01T07:37:41.214807
| 2018-02-12T10:53:20
| 2018-02-12T10:53:20
| 121,159,649
| 0
| 0
| null | 2018-02-11T19:36:07
| 2018-02-11T19:36:07
| null |
UTF-8
|
R
| false
| false
| 1,032
|
r
|
cachematrix.R
|
## Caching the inverse of a Matrix
## First Function Caches the matrix and its inverse
## If the same matrix is asked inverse of second function caches the inverse
## rather than recomputing.
## Caches the matrix and its inverse and returns a list
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinv <- function(invMat) i <<- invMat
getinv <- function() i
list(set = set, get = get,
setinv = setinv, getinv = getinv)
}
## CacheSolve first checks if the mean has already been calculated
## If yes than gets the inverse from cache and stops
## If not than computes the inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinv()
if(!is.null(i)) { ##Checks if inverse is cached
message("Getting Cached Inverse Matrix")
i
}
matData <- x$get()
i <- solve(matData, ...) ##Solves for inverse
x$setinv(i)
i
}
|
8b3e666d92bf4698f249f5d8a3ca83d5fd7bb0d8
|
dd8fd76523c4338755517e437e7e9b57bf8e3e15
|
/scripts/build_sce.R
|
af27c91bf90966bc8d19b774689a099f570554b4
|
[] |
no_license
|
jperales/data_KuppeIbrahim2020
|
d887eba107c9d2227be03659271f45be55019170
|
6919eec100906d0d4dac357788d75118fd34777f
|
refs/heads/main
| 2023-03-07T12:28:55.710467
| 2021-02-22T10:39:04
| 2021-02-22T10:39:04
| 341,155,912
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,467
|
r
|
build_sce.R
|
## Setting the environment
### Internal variables
set.seed(1234)
OUTDIR <- "./data/CD10negative/"
### Load libraries
library("Matrix")
library("SingleCellExperiment")
library("scran")
## Load data
dat <- Matrix::readMM("./data/CD10negative/kidneyMap_UMI_counts.mtx")
rowDat <- read.table("./data/CD10negative/kidneyMap_UMI_counts_rowData.txt",
sep=",", header=TRUE, stringsAsFactors = FALSE)
colDat <- read.table("./data/CD10negative/kidneyMap_UMI_counts_colData.txt",
sep=",",header=TRUE, stringsAsFactors = FALSE)
umapCoords <- read.table("data/CD10negative/kidneyMap_UMI_umapCoords.csv",
sep=",")
umapCoords <- as.matrix(umapCoords)
# Genes
rownames(dat) <- rowDat$ENSEMBL.ID
rownames(rowDat) <- rowDat$ENSEMBL.ID
# Cells
colnames(dat) <- paste0("cell",1:ncol(dat))
rownames(colDat) <- paste0("cell",1:ncol(dat))
rownames(umapCoords) <- paste0("cell",1:ncol(dat))
# Metafeatures
colnames(umapCoords) <- c("UMAP_1","UMAP_2")
# Summary of cell metadata
## Create a Single-Cell Experiment
sce <- SingleCellExperiment(assays=list("counts"=dat),
colData=colDat,
rowData=rowDat)
## Normalize data
#NOTE: Params defined by M.Ibrahim
sce = scran::computeSumFactors(sce,
sizes = seq(10, 200, 20),
clusters = sce$Annotation.Level.3,
positive = TRUE)
sce <- logNormCounts(sce)
## Add original UMAP coords
reduceDim(sce, "UMAP") <- umapCoords
# Save data
saveRDS(sce, file=paste0(OUTDIR,"/sce.rds"))
|
a68da64ec93d835f5737a814cec4e47ab8cf8a65
|
5898e63d46de41245d5018c298de944559d70825
|
/kili_nov2013/map/src/map.R
|
c64cb874969f17a602d5552277535ed9d4ac22a2
|
[] |
no_license
|
fdetsch/snippets
|
d7de9ddd77376a40b68912e39c107a363db15cc6
|
8085d950cb247d6e8068ed72201a76c525ef0d5e
|
refs/heads/master
| 2021-01-17T09:30:19.170821
| 2017-02-13T13:26:24
| 2017-02-13T13:26:24
| 10,215,881
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,783
|
r
|
map.R
|
### Environmental settings
# Clear workspace
rm(list = ls(all = T))
# Set working directory
switch(Sys.info()[["sysname"]],
"Linux" = setwd("/media/permanent/phd/kili_nov2013/map"),
"Windows" = setwd("E:/phd/kili_nov2013/map"))
# Load required packages
lib <- c("OpenStreetMap", "raster", "rgdal", "doParallel", "png", "plotrix")
sapply(lib, function(x) require(x, character.only = T))
# Required functions
source("src/getTileCenters.R")
source("src/getOsmTiles.R")
# Settings
rsmpl <- F
# Parallelization
registerDoParallel(cl <- makeCluster(3))
### Data import
## Plot coordinates
plt.shp <- readOGR(dsn = "plot", layer = "Plots_MP")
plt.shp <- plt.shp[plt.shp$VALID == "Y", ]
plt.shp.utm <- spTransform(plt.shp, CRS("+init=epsg:32737"))
# ## Import additional GPS data by David
#
# dvd_1 <- foreach(i = list("12340012_L", "12340012_P", "12340012_A")) %do% {
# tmp.shp <- readOGR(dsn = "../David", layer = i)
# projection(tmp.shp) <- CRS("+init=epsg:4326")
#
# spTransform(tmp.shp, CRS("+init=epsg:32737"))
# }
## OSM data
# Center coordinate of final map (FOD3)
cntr <- data.frame(plt.shp.utm[plt.shp.utm$PLOTID == "fod3", "PLOTID"])
cntr <- data.frame(Lon = cntr[, 2], Lat = cntr[, 3], PlotID = cntr[, 1])
# Get ESRI topo and Skobbler data
jnk <- foreach(plt.rds = rep(30000, 2), plt.res = c(5000, 1000),
path.out = c("tls/esri-topo", "tls/skobbler"),
type = c("esri-topo", "skobbler")) %do% {
tmp.coords <- getTileCenters(plt.rds, plt.res)
tmp.osm <- getOsmTiles(tile.cntr = tmp.coords,
location = cntr,
plot.res = plt.res,
plot.bff = 50,
tmp.folder = "C:/Users/fdetsch/AppData/Local/Temp/R_raster_tmp",
path.out = path.out,
type = type, mergeTiles = T)
}
# Merge ESRI data
fls.esri <- list.files("tls/esri-topo", pattern = ".tif$", full.names = T)
rst.esri <- foreach(i = fls.esri, .packages = lib) %dopar% stack(i)
rst.esri.mrg <- do.call(function(...) {
merge(..., tolerance = 1, overwrite = T, format = "GTiff",
filename = "tls/esri-topo/esri_all")
}, rst.esri)
# Resample and merge Skobbler data
fls.skbl <- list.files("tls/skobbler", pattern = "kili_tile_.*.tif$", full.names = T)
fls.skbl <- fls.skbl[-grep("rsmpl", fls.skbl)]
rst.skbl <- foreach(i = fls.skbl, .packages = lib) %dopar% stack(i)
rst.skbl.ext <- Reduce("union", sapply(rst.skbl, extent))
template <- raster(rst.skbl.ext, crs = projection(rst.skbl[[1]]))
res(template) <- res(rst.skbl[[1]])
jnk <- if (rsmpl == T) {
foreach(i = rst.skbl, j = fls.skbl, .packages = lib) %dopar% {
if (!file.exists(paste(substr(j, 1, nchar(j) - 4), "rsmpl.tif", sep = "_"))) {
crp <- crop(template, i)
resample(i, crp, method = "ngb",
filename = paste(substr(j, 1, nchar(j) - 4), "rsmpl", sep = "_"),
format = "GTiff", overwrite = F)
}
}
}
fls.skbl.rsmpl <- list.files("tls/skobbler", pattern = "rsmpl.tif$",
full.names = T)
rst.skbl.rsmpl <- foreach(i = fls.skbl.rsmpl, .packages = lib) %dopar% stack(i)
rst.skbl.rsmpl.mrg <- do.call(function(...) {
merge(..., tolerance = 1, overwrite = T, format = "GTiff",
filename = "tls/skobbler/skobbler_all", overlap = F)
}, rst.skbl.rsmpl)
# Intersect data from ESRI and Skobbler
rst.esri.mrg <- stack("tls/esri-topo/esri_all.tif")
rst.skbl.rsmpl.mrg <- stack("tls/skobbler/skobbler_all.tif")
rst.esri.mrg.rsmpl <- resample(rst.esri.mrg, rst.skbl.rsmpl.mrg, tolerance = 1,
method = "ngb", format = "GTiff",
filename = "tls/esri-topo/esri_all_rsmpl")
# Replace unoccupied cells in Skobbler data with ESRI data
rst.esri.skbl <- overlay(rst.esri.mrg.rsmpl, rst.skbl.rsmpl.mrg,
fun = function(x, y) {
y[y[] %in% 238:240] <- x[y[] %in% 238:240]
return(y)
}, filename = "tls/esri_skrobbler_mrg", format = "GTiff")
# Reproject composite raster to UTM 32S
# rst.esri.skbl.utm <- projectRaster(rst.esri.skbl, crs = projection(plt.shp.utm),
# filename = "tls/esri_skrobbler_mrg_utm",
# format = "GTiff", method = "ngb")
rst.esri.skbl.utm <- stack("tls/esri_skrobbler_mrg_utm.tif")
# Crop composite raster
plotRGB(rst.esri.skbl.utm)
points(plt.shp.utm)
crp.xtnt <- drawExtent()
rst.esri.skbl.utm.crp <- crop(rst.esri.skbl.utm, crp.xtnt,
filename = "tls/esri_skrobbler_mrg_utm_crp3",
format = "GTiff", method = "ngb", overwrite = T)
rst.esri.skbl.utm.crp <- stack("tls/esri_skrobbler_mrg_utm_crp3.tif")
### Plotting the official poster
# North arrow
north.arrow <- readPNG("north_arrow.png")
# Manual label arrangement
text.pos <- thigmophobe(coordinates(plt.shp.utm)[, 1],
coordinates(plt.shp.utm)[, 2])
text.pos[grep("sun1", plt.shp.utm$PLOTID)] <- 1
text.pos[grep("fpd3", plt.shp.utm$PLOTID)] <- 2
text.pos[grep("fod4", plt.shp.utm$PLOTID)] <- 4
text.pos[grep("fod5", plt.shp.utm$PLOTID)] <- 2
text.pos[grep("fpo2", plt.shp.utm$PLOTID)] <- 2
text.pos[grep("fod2", plt.shp.utm$PLOTID)] <- 4
text.pos[grep("fpd1", plt.shp.utm$PLOTID)] <- 2
text.pos[grep("fer4", plt.shp.utm$PLOTID)] <- 4
text.pos[grep("fer2", plt.shp.utm$PLOTID)] <- 2
text.pos[grep("fer3", plt.shp.utm$PLOTID)] <- 4
text.pos[grep("foc5", plt.shp.utm$PLOTID)] <- 4
text.pos[grep("flm1", plt.shp.utm$PLOTID)] <- 4
text.pos[grep("flm3", plt.shp.utm$PLOTID)] <- 2
# PlotRGB
tiff("out/official_map.tif", width = 12167, height = 8435, units = "px",
compression = "lzw", pointsize = 80)
# pdf("out/official_map.pdf", pointsize = 15, width = 40, height = 30)
plotRGB(rst.esri.skbl.utm.crp, stretch = "lin",
maxpixels = ncell(rst.esri.skbl.utm.crp),
addfun = function(...) {
points(plt.shp.utm, pch = 13, lwd = 3, cex = 2,
col = brewer.pal(5, "YlOrBr")[4])
thigmophobe.labels(coordinates(plt.shp.utm)[, 1],
coordinates(plt.shp.utm)[, 2],
text.pos = text.pos, offset = 1,
labels = plt.shp.utm$PLOTID, cex = 2, font = 2,
col = brewer.pal(5, "YlOrBr")[4])
scalebar(d = 5000, type = "bar", divs = 4, below = "km",
label = c(0, 2.5, 5), xy = c(300000, 9624500), cex = 2,
adj = c(.5, -1))
rasterImage(north.arrow, 301500, 9626250, 303500, 9628750)
})
dev.off()
# Deregister parallel backend
stopCluster(cl)
|
c2ace96c4a4fe64aa19b12b11345ef2e1f105896
|
796b5a173db8207364467bccbc3459d0adb30e57
|
/man/uscolleges.Rd
|
9a58b6cbe5d39e730dd1922cf086772275dfcc3d
|
[] |
no_license
|
jonthegeek/uscolleges
|
296a16a8bddd93aef573efa030bd9198da77e6ec
|
ebf048511e3d5f7aab922a68c282f30676400b4e
|
refs/heads/master
| 2020-03-07T13:35:54.288245
| 2018-04-03T14:18:06
| 2018-04-03T14:18:06
| 127,505,113
| 6
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 654
|
rd
|
uscolleges.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/01data_definitions.R
\docType{data}
\name{uscolleges}
\alias{uscolleges}
\title{Tidy US college scorecard data}
\format{A tibble with 7593 observations of colleges in the United States, and
622 variables. The variables are described in the
\code{\link{uscolleges_data_dictionary}}.}
\source{
\url{https://catalog.data.gov/dataset/college-scorecard}
}
\usage{
uscolleges
}
\description{
The most recent US college scorecard from the US Department of Education,
using the dev-friendly column names from the provided data dictionary.
}
\keyword{datasets}
|
479b00416b2bb16ad12e901d7497d7105fbc828a
|
efa104bf0b9232288455017b8c12b50cf62c3f17
|
/plot1.R
|
36be9977325241b63e061fb13474f23043145047
|
[] |
no_license
|
jai-angle/ExData_Plotting1
|
c9a51ce85b6c354aa00f4f34bad41e815f70202f
|
db3b2e269279986a5438f5de9f8ee11a79a9e43e
|
refs/heads/master
| 2021-01-17T21:38:27.348829
| 2016-05-19T12:02:24
| 2016-05-19T12:02:24
| 59,017,879
| 0
| 0
| null | 2016-05-17T11:51:13
| 2016-05-17T11:51:13
| null |
UTF-8
|
R
| false
| false
| 822
|
r
|
plot1.R
|
## This is the first of the plot assignment.
## It plots the histogram based on Global Active Power data from the household_power_consumption.txt file.
filetoread <- "./ExData_Plotting1Data/household_power_consumption.txt"
##get all the data first
alldata <- read.table(filetoread, header = T, sep = ";", na.strings = "?", stringsAsFactors = FALSE)
##Get the subset of the data as per the requirement of the assignment
plotdata <- subset(alldata, Date == "1/2/2007" | Date == "2/2/2007")
plotvalues <- as.numeric(plotdata$Global_active_power)
#plotvalues
hist(plotvalues, xlab = "Global Active Power (killowatts)", col = "red", main = "Global Active Power")
## once happy with the plot; now put it in the file and close it.
dev.copy(png, file = "~/ExData_Plotting1/plot1.png", width = 480, height = 480)
dev.off()
|
614a68be24206902c8a4195ab063f4bf6ff0ebda
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/An_Introduction_To_Statistical_Methods_And_Data_Analysis_by_R_Lyman_Ott_And_Michael_Longnecker/CH3/EX3.6/Ex3_6.r
|
de4780a4ed349d53318d51065d0e7c1681601ac7
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 448
|
r
|
Ex3_6.r
|
# Page No. 83
ClassInterval <- c("16.25-18.75", "18.75-21.25", "21.25-23.75","23.75-26.25", "26.25-28.75", " 28.75-31.25", " 31.25-33.75", "33.75-36.25","36.25-38.75", "38.75- 41.25", "41.25- 43.75")
freq <- c( 2,7,7,14,17,24,11,11,3,3,1)
mid_interval<- c(17.5,20.0,22.5,25.0,27.5,30.0,32.5,35.0,37.5,40.0,42.5)
fmi<-freq*mid_interval
List<- data.frame(ClassInterval, freq, mid_interval,fmi)
print(List)
print("mean is")
print(sum(fmi)/sum(freq))
|
5a312e99c8c76ff864919a369a356352a5bf30bb
|
583c2374b676c60cdb64ffae1d48e0d0f2cf5e7f
|
/man/BIC.mmlcr.Rd
|
542dd47c18907cb3e7696353b15d7f6dcd765891
|
[] |
no_license
|
cran/mmlcr
|
cf8e1c575b226a20484158f16998ca05bb4573a9
|
d4426714daa734cb546f34441719a80e38344ccc
|
refs/heads/master
| 2021-01-21T11:45:59.129520
| 2006-04-10T00:00:00
| 2006-04-10T00:00:00
| 17,719,035
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,548
|
rd
|
BIC.mmlcr.Rd
|
\name{BIC.mmlcr}
\title{Bayesian Information Criterion}
\usage{
\method{BIC}{mmlcr}(object, ...)
}
\alias{BIC.mmlcr}
\arguments{
\item{object}{a fitted mmlcr object.}
\item{\dots}{optional fitted model objects.}
}
\description{
This generic function calculates the Bayesian information criterion,
also known as Schwarz's Bayesian criterion (SBC), for an mmlcr object for which a log-likelihood value can be obtained,
according to the formula \eqn{-2 \mbox{log-likelihood} + n_{par}
\log(n_{obs})}{-2*log-likelihood + npar*log(nobs)}, where
\eqn{n_{par}}{npar} represents the
number of parameters and \eqn{n_{obs}}{nobs} the number of
observations in the fitted model.
}
\value{
if just one object is provided, returns a numeric value with the
corresponding BIC; if more than one object are provided, returns a
\code{data.frame} with rows corresponding to the objects and columns
representing the number of parameters in the model (\code{df}) and the
BIC.
}
\references{
Schwarz, G. (1978) "Estimating the Dimension of a Model", Annals of
Statistics, 6, 461-464.
}
\seealso{\code{\link{AIC}}, \code{\link{mmlcrObject}}}
\examples{
\dontrun{data(mmlcrdf)}
\dontrun{mmlcrdf.mmlcr2 <- mmlcr(outer = ~ sex + cov1 | id,
components = list(
list(formula = resp1 ~ 1, class = "cnormonce", min = 0, max = 50),
list(formula = resp2 ~ poly(age, 2) + tcov1, class = "poislong"),
list(formula = resp3 ~ poly(age, 2), class = "multinomlong")
), data = mmlcrdf, n.groups = 2)}
\dontrun{BIC(mmlcrdf.mmlcr2)}
}
\keyword{models}
|
81513df1a005646ed3b83ee91c0297071920d394
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/steps/man/habitat_dynamics_functions.Rd
|
60b0bc72d8a1463245d92a33f24912f2874407ad
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 563
|
rd
|
habitat_dynamics_functions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/habitat_dynamics-functions.R
\name{habitat_dynamics_functions}
\alias{habitat_dynamics_functions}
\title{Functions to modify the habitat in a landscape object.}
\description{
Pre-defined functions to operate on habitat suitability (and carrying capacity if a function
is used) during a simulation.
}
\seealso{
\itemize{
\item{\link[steps]{disturbance} to modify the suitability of a landscape with user provided
spatially-explicit layers}
\item{\link[steps]{fire_effects}}
}
}
|
87171ba449e95c26ec4a6346c6e848b905ff676b
|
d8a5e3b9eef3c76bb7ca64d29ef2746cebd4c542
|
/man/isWhitespace.Rd
|
c41264711513653458737b3961b13c46258d0d06
|
[] |
no_license
|
cran/qmrparser
|
0539ad4bf5b97039e50b2bffa16c3012899c6134
|
bb1bb2b50b358d79f6400d521f995e1d2a55a784
|
refs/heads/master
| 2022-05-09T03:49:13.511049
| 2022-04-23T23:00:05
| 2022-04-23T23:00:05
| 17,698,845
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 485
|
rd
|
isWhitespace.Rd
|
%do not edit, edit noweb/qmrparser.nw
\name{isWhitespace}
\alias{isWhitespace}
\title{
Is it a white space?
}
\description{
Checks whether a character belongs to the set \{blank, tabulator, new line, carriage return, page break \}.
}
\usage{
isWhitespace(ch)
}
\arguments{
\item{ch}{character to be checked}
}
\value{
TRUE/FALSE, depending on character belonging to the specified set.
}
\examples{
isWhitespace(' ')
isWhitespace('\n')
isWhitespace('a')
}
\keyword{set of character}
|
a53eab8c429577683af2190aad7da9045ef34438
|
387511286d1a2927a596847a423ff63d4aa08782
|
/kml2ndvi.R
|
7377136e680579babb4c480d1d973b4883ace431
|
[] |
no_license
|
ahernan/kml2ndvi
|
00590b14c06b845182536516be9cf2dfbe966597
|
645e82b9b106a11aa84e529789c348c4ca00dc48
|
refs/heads/master
| 2021-01-12T11:15:28.537884
| 2016-11-06T13:43:53
| 2016-11-06T13:43:53
| 72,789,377
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,627
|
r
|
kml2ndvi.R
|
#------------------- METADATA -------------------
# Descripcion del Script: Retorna un recorte y un reporte de un raster
# a partir de un poligono vectorial
# Raster: Se considera del producto MOD13Q1 de MODIS la banda NDVI y EVI.
# En este caso recortes proporcionados por Patricio Oricchio en formato .img
# Valores en raster almacenado como entero. Si quiero valor real entonces (value=value/1000)
# Vector: Inicialmente se considera un archivo KML con un poligono dentro
# Sistema de referencia de cordenadas (CRS) de Raster y Vector = WGS84 !!!
# Fecha de ultima modificacion: 4-nov-2016
# Participantes
# angelini.hernan@inta.gob.ar
# oricchio.patricio@inta.gob.ar
# bienvenidos otros interesados
#------------------- /METADATA -------------------
#---- Librerias ----
library(rgdal)
library(rgeos)
library(sp)
#---- Construccion de la Pila de RASTER ----
# Almacenadas dentro del directorio NDVI. 23 img por anio.
# de 001-2015 a 353-2015. con xxx-2015@1=NDVI y xxx-2015@2=EVI
# Seteo el wd de IMAGEN
setwd("/home/hernan/Curso_R/git/kml2ndvi/NDVI")
# Lista de nombres de los archivos .img
(lista_img <- list.files(getwd(), pattern = glob2rx("*.img"), full.names = F))
# Se crea en memoria el RasterStack o pila de bandas
# (2 bandas por archivo, solo se trae la 1ra=NDVI)
(RasterStack <- raster::stack(lista_img, bands=1))
#---- /Construccion de la Pila de RASTER ----
#---- Construccion del poligono - VECTOR ----
# Almacenado dentro de la carpeta data, un KML con poligonos dentro
# Se selecciona uno y se procesa
# Seteo el wd para el VECTOR
setwd("/home/hernan/Curso_R/git/kml2ndvi/")
# dsnv = Data Source Name Vector
dsnv <- file.path("data","Lotes.kml") #carga directorio y archivo
ogrListLayers(dsnv) # Funcion para leer objetos espaciales. Para ver que hay dentro del KML
# Selecciono la capa dentro del KML que contiene los poligonos
lotes_layer <- rgdal::readOGR(dsnv, layer = "Lotes")
# Puedo graficar los poligonos con la siguiente instruccion
# raster::plot(lotes_layer)
# Creo un objeto espacial con uno de los poligonos de la capa
prj_string_WGS <- CRS("+proj=longlat +datum=WGS84")
lotev <- SpatialPolygons(lotes_layer@polygons[1])
# raster::plot(lotev)
#---- /Construccion del poligono - VECTOR ----
#---- Construccion del REPORTE - VECTOR y RASTER ----
# Se corta RasterStack con Lote
# Se procesa media de valores y desvio estandar para cada pixel del lote
# Se genera salida en jpg. Hay multiples lecturas de este resultado. Mas o menos validas
# Corto del Stack
lote_raster <- raster::crop(RasterStack, lotev)
#raster::plotRGB(lote_raster) #grafico si hace falta
# Si no quiero el aspecto cuadrado del raster Enmascaro
# loteymascara <- raster::mask(lote_raster, lotev)
# raster::plotRGB(loteymascara)
# Reporta la Media de las fechas para cada pixel
loter_media <- raster::calc(lote_raster, fun=mean)
# Reporta las desviaciones estandar de esas medias
loter_sd <- raster::calc(lote_raster, fun=sd)
# Si quiero ver valores dentro del lote
# raster::values(loter_media)
# raster::values(loter_sd)
#---- /Construccion del REPORTE - VECTOR y RASTER ----
#---- Construccion de la salida grafica - VECTOR y RASTER ----
# Configura espacios de salida
opar <- par(mfrow=c(1,2))
# Grafica la media de NDVI del lote
raster::plot(loter_media, main = "Media")
plot(lotev, add=T, border="green", lwd=3)
# Grafica la media de NDVI del lote
raster::plot(loter_sd, main = "SD", col=c("blue", "yellow", "orange", "red"))
plot(lotev, add=T, border="green", lwd=3)
par(opar)
#---- /Construccion de la salida grafica - VECTOR y RASTER ----
|
be22b15684a08f7ad4186d9df9b824aa2cd4ff60
|
b706bd176b23ade74d16d0997ea209a2a940f688
|
/fikspdfs.R
|
f5e492ed82091ca2a20c417cb79edce82a7dd4a9
|
[] |
no_license
|
chrilur/brassranking
|
7920a91e174845b2d847273bb97bc03ae7eee9ca
|
f236745fe70c552d6081c716a06c4a05f2134bf9
|
refs/heads/master
| 2021-01-22T06:07:05.848172
| 2017-02-12T20:40:41
| 2017-02-12T20:40:41
| 81,736,132
| 0
| 0
| null | null | null | null |
MacCentralEurope
|
R
| false
| false
| 736
|
r
|
fikspdfs.R
|
setwd("C:\\Users\\n633164\\Documents\\R\\brassrank")
library(stringr)
fiks.nm <- function(navn, Śr, div, konk) {
fil <- read.csv(navn, stringsAsFactors = FALSE, header=FALSE, fileEncoding="UTF-8")
fil[,1] <- gsub("[0-9]", "", fil[,1])
fil[,1] <- gsub("\\.", "", fil[,1])
fil[,1] <- gsub("^\\s+|\\s+$", "", fil[,1])
fil[,2] <- Śr
fil[,3] <- div
fil[,4] <- konk
ant <- length(fil[,1])
fil[,5] <- 1:ant
names(fil) <- c("band", "Śr", "div", "konk", "plass")
write.csv(fil, navn, row.names=FALSE, fileEncoding = "UTF-8")
return(fil)
}
comb <- function(liste1, liste2) {
list <- rbind(liste1, liste2)
write.csv(list, "liste.csv", row.names=FALSE, fileEncoding="UTF-8")
return(list)
}
|
df8d14bd89461a63b31066c7ebad954263f0d574
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/crunch/examples/loadDataset.Rd.R
|
48dd522d9cff3da0ce7362d08963f942db427e7d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 211
|
r
|
loadDataset.Rd.R
|
library(crunch)
### Name: loadDataset
### Title: Load a Crunch Dataset
### Aliases: loadDataset
### ** Examples
## Not run:
##D dsName <- listDatasets()[1]
##D ds <- loadDatasets(dsName)
## End(Not run)
|
47a5f54ad8126b81c30d6f94095c581040c773de
|
c6ccaabb627f8b29a7cb32c5b3fe19b72d07e188
|
/R/swarm.R
|
97c871a89d3ae1e7eba818eedadc6f8c14616342
|
[] |
no_license
|
cran/particle.swarm.optimisation
|
5f5a14944f95bfa095c6088f9fbbbe971095ad42
|
e8d7e4b31817a8be4c2e2a52437397c445ee01a8
|
refs/heads/master
| 2023-04-30T00:25:00.363880
| 2021-05-21T07:00:02
| 2021-05-21T07:00:02
| 369,583,860
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 24,659
|
r
|
swarm.R
|
#' @title Swarm
#' @description Particle Swarm, used to launch the Particle Swarm Optimisation, The PSO is used to maximise the fitness.
#' @import rgl
#' @importFrom R6 R6Class
#' @export
#' @examples
#' # In this example we use the PSO to solve the following equation:
#' # a * 5 + b * 25 + 10 = 15
#'
#' fitness_function <- function(values){
#' a <- values[1]
#' b <- values[2]
#' particule_result <- a*5 + b*25 + 10
#' difference <- 15 - particule_result
#' fitness <- 1 - abs(difference)
#' return(fitness)
#' }
#'
#' values_ranges <- list(c(-10^3,10^3),c(-10^3,10^3))
#'
#' swarm <- ParticleSwarm$new(pop_size = 200,
#' values_names = list("a","b"),
#' fitness_function = fitness_function,
#' max_it = 75,
#' acceleration_coefficient_range = list(c(0,1),c(0,1)),
#' inertia = 0.5,
#' ranges_of_values = values_ranges)
#' swarm$run(plot = FALSE,verbose = FALSE,save_file = FALSE)
#' # the solution is :
#' swarm$swarm_best_values
#' swarm$swarm_best_values[[1]]*5 + swarm$swarm_best_values[[2]] *25 + 10
ParticleSwarm <- R6Class('ParticleSwarm',
private = list(
#' @field pop_size (numeric) number of particles in the swarm
.pop_size = NA,
#' @field ranges_of_values (list) range for each value for the particle
.ranges_of_values = NA,
#' @field values_names (list) list of names for each value (optionnal)
.values_names = NA,
#' @field pop (list) list of particle in the swarm
.pop = list(),
#' @field fitness_function (function) fitness function used to find the fitness of the particle
.fitness_function = NA,
#' @field list_fitness (list) list of fitness of the particles
.list_fitness = list(),
#' @field max_it (numeric) maximum number of iteration
.max_it = NA,
#' @field acceleration_coefficient_range (list) coefficient c1 and c2 for the particles
.acceleration_coefficient_range = NA,
#' @field swarm_best_fitness (numeric) best fitness of the swarm
.swarm_best_fitness = NA,
#' @field swarm_best_values (numeric) values of the particle with the best fitness
.swarm_best_values = NA,
#' @field inertia (numeric) inertia of the particles
.inertia = NA
),
active = list(
pop_size = function(value){
if (missing(value)) {
private$.pop_size
} else {
stop("`$pop_size can't be changed after the creation of the Swarm", call. = FALSE)
}
},
ranges_of_values = function(value){
if (missing(value)) {
private$.ranges_of_values
} else {
stop("`$ranges_of_values can't be changed after the creation of the Swarm", call. = FALSE)
}
},
values_names = function(value){
if (missing(value)) {
private$.values_names
} else {
stop("$values_names can't be changed after the creation of the Swarm",call. = FALSE)
}
},
pop = function(value){
if (missing(value)) {
private$.pop
} else {
stop("`$pop can't be changed after the creation of the Swarm", call. = FALSE)
}
},
fitness_function = function(value){
if (missing(value)) {
private$.fitness_function
} else {
stop("`$fitness_function can't be changed after the creation of the Swarm", call. = FALSE)
}
},
list_fitness = function(value){
if (missing(value)) {
private$.list_fitness
} else {
stop("`$list_fitness can't be changed after the creation of the Swarm", call. = FALSE)
}
},
max_it = function(value){
if (missing(value)) {
private$.list_fitness
} else {
private$.max_it <- value
}
},
acceleration_coefficient_range = function(value){
if (missing(value)) {
private$.acceleration_coefficient_range
} else {
stop("`$acceleration_coefficient_range can't be changed after the creation of the Swarm", call. = FALSE)
}
},
swarm_best_fitness = function(value){
if (missing(value)) {
private$.swarm_best_fitness
} else {
stop("`$swarm_best_fitness can't be changed after the creation of the Swarm", call. = FALSE)
}
},
swarm_best_values = function(value){
if (missing(value)) {
private$.swarm_best_values
} else {
stop("`$swarm_best_values can't be changed after the creation of the Swarm", call. = FALSE)
}
},
inertia = function(value){
if (missing(value)) {
private$.inertia
} else {
stop("`$inertia can't be changed after the creation of the Swarm", call. = FALSE)
}
}
),
public = list(
#' @description
#' Create a new ParticleSwarm object.
#' @param pop_size number of individu in the swarm. (numeric)
#' @param ranges_of_values range for each value of the particle (min and max). (List)
#' @param values_names list of names for each value (character)
#' @param fitness_function function used to test the Particle and find his fitness. (function)
#' @param max_it Maximum number of iteration for the PSO. (numeric)
#' @param acceleration_coefficient_range a vector of four values (min and max for c1 and c2) (numeric)
#' @param inertia The inertia for the particle (the influence of the previous velocity on the next velocity). (numeric)
#' @examples
#' # Create a ParticleSwarm object
#' swarm <- ParticleSwarm$new(pop_size=20,
#' values_names=c('a','b'),
#' max_it=20,
#' fitness_function = function(values){return(values[1]+values[2])},
#' acceleration_coefficient=list(c(0.5,1),c(0.5,1)),
#' inertia=0.5,
#' ranges_of_values=list(c(-100,100),c(-100,100)))
#' @return A new `ParticleSwarm` object.
initialize = function(pop_size,
values_names,
fitness_function,
max_it,
acceleration_coefficient_range,
inertia,
ranges_of_values){
if (is.list(ranges_of_values)){
private$.ranges_of_values <- ranges_of_values
} else {stop("ERROR ranges_of_values need to be a list.")}
if(is.function(fitness_function)){
private$.fitness_function <- fitness_function
} else{stop('ERROR fitness_function need to be a function')}
if (length(acceleration_coefficient_range) != 2){
stop('ERROR acceleration_coefficient_range need to be four numeric values c(min_c1,max_c1,min_c2,max_c2)')
}
private$.acceleration_coefficient_range <- acceleration_coefficient_range
if (is.numeric(inertia)){
private$.inertia <- inertia
} else {stop("inertia need to be a numeric value")}
if (is.numeric(max_it)){
if (length(max_it) == 1){
private$.max_it <- max_it
} else{stop('ERROR max_it need to be one number')}
} else {stop('ERROR max_it need to be a numeric')}
if (is.numeric(pop_size)){
private$.pop_size <- pop_size
} else {stop('ERROR pop_size need to be a numeric')}
if (!missing(values_names)){
private$.values_names <- values_names
}
},
#' @description
#' Make the Particle Swarm Optimisation
#' @param verbose print the different step (iteration and individu)
#' @param plot plot the result of each iteration (only for 2D or 3D problem)
#' @param save_file save the population of each Iteration in a file and save the plot if plot=TRUE
#' @param dir_name name of the directory, default value is PSO_pop
#' @return self
#' @examples
#' # Create a ParticleSwarm object
#' swarm <- ParticleSwarm$new(pop_size=20,
#' values_names=c('a','b'),
#' max_it=20,
#' fitness_function = function(values){return(values[1]+values[2])},
#' acceleration_coefficient=list(c(0.5,1),c(0.5,1)),
#' inertia=0.5,
#' ranges_of_values=list(c(-100,100),c(-100,100)))
#' # run the PSO
#' swarm$run(verbose = FALSE,
#' plot = FALSE,
#' save_file = FALSE)
#' # return the best result:
#' print(swarm$swarm_best_values)
run=function(verbose = TRUE, plot = TRUE, save_file = FALSE, dir_name='PSO_pop'){
if (save_file){
if (!dir.exists(dir_name)){
dir.create(dir_name)
}
}
self$generate_pop(verbose)
nb_dim <- length(private$.ranges_of_values)
for (iteration in 1:private$.max_it){
self$move_the_swarm(verbose)
if (nb_dim == 2 && plot){
self$plot_the_swarm_2D(iteration,save_file)
} else if (nb_dim == 3 && plot){
self$plot_the_swarm_3D(iteration,save_file)
}
if (save_file){
self$save_pop(iteration,dir_name)
}
if (verbose){
print(paste('iteration',iteration,sep = ' '))
}
}
invisible(self)
},
#' @description
#' create the population of the swarm (this method is automatically called by the run method)
#' @param verbose print the advancement or not
#' @return self
generate_pop=function(verbose = TRUE){
while (length(private$.pop) != private$.pop_size) {
if (verbose){
print(paste('individu ',length(private$.pop)+1,sep = ''))
}
values <- numeric()
for (i in private$.ranges_of_values) {
values <- append(values,runif(n = 1,min = i[1],max = i[2]))
}
coef <- c(runif(n = 1,
min = unlist(private$.acceleration_coefficient_range[1])[1],
max = unlist(private$.acceleration_coefficient_range[1])[2]),
runif(n = 1,
min = unlist(private$.acceleration_coefficient_range[2])[1],
max = unlist(private$.acceleration_coefficient_range[2])[2]))
individu <- Particle$new(values=values,
values_ranges=private$.ranges_of_values,
fitness_function=private$.fitness_function,
acceleration_coefficient=coef,
inertia=private$.inertia)
individu$get_fitness()
individu$update_personal_best_fitness()
if (is.na(private$.swarm_best_fitness)){
private$.swarm_best_values <- individu$values
private$.swarm_best_fitness <- individu$fitness
} else if (individu$fitness > private$.swarm_best_fitness){
private$.swarm_best_values <- individu$values
private$.swarm_best_fitness <- individu$fitness
}
private$.pop <- append(private$.pop,individu)
}
invisible(self)
},
#' @description
#' The method used to change the location of each particle (this method is automatically called by the run method)
#' @param verbose print or not the advancement
#' @return self
move_the_swarm=function(verbose){
c <- 0
for (individue in private$.pop){
c <- c + 1
individue$update(private$.swarm_best_values)
if (verbose){
print(paste("individu",c,sep = " "))
}
}
for (individue in private$.pop){
if (individue$fitness >= private$.swarm_best_fitness){
private$.swarm_best_fitness <- individue$fitness
private$.swarm_best_values <- individue$values
}
}
invisible(self)
},
#' @description
#' The method used to save the values and fitness of the population in a CSV file (this method is automatically called by the run method if you have chosen to save the result)
#' @param nb_it number of the iteration, used to create the name of the csv file
#' @param dir_name Name of the directory
#' @return self
save_pop=function(nb_it,dir_name){
pop_result <- data.frame()
value <- c(0)
for (i in private$.pop){
for (val in i$values){
value <- cbind(value,val)
}
pop_result <- rbind(pop_result,cbind(value,i$fitness))
value <- c(0)
}
pop_result <- pop_result[,-1]
if (length(pop_result)!=0){
names(pop_result) <- c(private$.values_names,'accuracy')
}
write.csv(pop_result,file = paste(paste(dir_name,"/Iteration",sep=''),nb_it,sep = "_"))
invisible(self)
},
#' @description
#' method used to plot a 2D plot (this method is automatically called by the run method if you have chosen to plot the swarm)
#' @param nb_it number of the iteration used to save the plot as a png
#' @param save_file save the plot as a file
#' @return self
plot_the_swarm_2D=function(nb_it,save_file){
x <- numeric()
y <- numeric()
for (i in private$.pop){
x <- c(x,i$values[1])
y <- c(y,i$values[2])
}
xlim <- c(min(private$.ranges_of_values[[1]]),max(private$.ranges_of_values[[1]]))
ylim <- c(min(private$.ranges_of_values[[2]]),max(private$.ranges_of_values[[2]]))
plot(x,y,
type='p',
xlim=xlim,
ylim=ylim,
pch=20,
xlab=private$.values_names[[1]],
ylab=private$.values_names[[2]])
if(save_file){
png(paste('iteration',nb_it,".png",sep=''))
plot(x,y,
type='p',
xlim=xlim,
ylim=ylim,
pch=20,
xlab=private$.values_names[[1]],
ylab=private$.values_names[[2]])
dev.off()
}
invisible(self)
},
#' @description
#' method used to plot a 3D plot
#' @param nb_it number of the iteration used to save the plot as a png (this method is automatically called by the run method if you have chosen to plot the swarm)
#' @param save_file save the plot as a file
#' @return self
plot_the_swarm_3D=function(nb_it,save_file){
x <- numeric()
y <- numeric()
z <- numeric()
for (i in private$.pop){
x <- c(x,i$values[1])
y <- c(y,i$values[2])
z <- c(z,i$values[3])
}
xlim <- c(min(private$.ranges_of_values[[1]]),max(private$.ranges_of_values[[1]]))
ylim <- c(min(private$.ranges_of_values[[2]]),max(private$.ranges_of_values[[2]]))
zlim <- c(min(private$.ranges_of_values[[3]]),max(private$.ranges_of_values[[3]]))
rgl.clear()
rgl.bg(color = 'white')
plot3d(x,y,z,
type="s",
radius=10,
col="red",
xlim=xlim,
ylim=ylim,
zlim=zlim,
xlab = private$.values_names[[1]],
ylab = private$.values_names[[2]],
zlab = private$.values_names[[3]])
if(save_file){
rgl.snapshot(paste('iteration',nb_it,".png",sep = '_'))
}
invisible(self)
},
#' @description
#' Print the current result of the population
print=function(){
pop_result <- data.frame()
value <- c(0)
for (i in private$.pop){
for (val in i$values){
value <- cbind(value,val)
}
pop_result <- rbind(pop_result,cbind(value,i$fitness))
value <- c(0)
}
pop_result <- pop_result[,-1]
if (length(pop_result)!=0){
names(pop_result) <- c(private$.values_names,'accuracy')
}
print('Population result : ')
print(pop_result)
}
)
)
|
0417d23457c60d2e6e28fdb5ef0d7547abbaa00f
|
2e5cc9b036338bd6b257e1c95f69c4edd6db6b4d
|
/Data_Analytcs_practice/tidyr_dplyr.R
|
b36f6b1995434b0ab3791b14af028487503298a0
|
[] |
no_license
|
abhik-ghosh/R-Labs
|
c808874eb8665b844f082bc8531a677e48baca6d
|
53dd985c42d97f3dc49d8a65c158cad257097e9c
|
refs/heads/master
| 2020-03-12T07:23:04.073530
| 2018-04-28T03:31:30
| 2018-04-28T03:31:30
| 130,505,020
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 925
|
r
|
tidyr_dplyr.R
|
#install.packages("dplyr", dependencies=TRUE, INSTALL_opts = c('--no-lock'))
#install.packages("tidyr", dependencies=TRUE, INSTALL_opts = c('--no-lock'))
#library("dplyr", lib.loc="~/R/x86_64-pc-linux-gnu-library/3.2")
#library("tidyr", lib.loc="~/R/x86_64-pc-linux-gnu-library/3.2")
rm(list=ls())
iris_data <- iris
# tidyR - gather, spread, seperate, unite
#gather() - Reshaping wide format to long format
long_data <- gather(iris_data,iris_header, value, Sepal.Length : Petal.Width)
long_data_concise <- long_data[!(long_data$value == 0),]
str(long_data_concise)
#separate() - Splitting single variable into two
# NOT WORKING - original_data <- separate(long_data, iris_header, c('Sepal.Length', 'Sepal.Width', 'Petal.Length', 'Petal.Width'), sep="")
# spread() - compliment to gather
my_data <- spread(long_data_concise, iris_header, value)
str(my_data)
filter(iris_data, Sepal.Length == 5.1, Petal.Length ==1.5)
|
75edd2cb979946e607879a2317d7c7f2b03d5d56
|
52075930747540d6815040bdf3b9ceca5c7387d7
|
/coursera/data_science_specialization/r/assignments/w4_hospital/rankall.R
|
9f9be0841e64a69f6bdc6b032627bd9126fa31a8
|
[] |
no_license
|
lorenzoconti/courses
|
d03b83d8c4b33c161dd49cd01c670c37cc459932
|
05664e938007a59f4214132e707fd185437c7819
|
refs/heads/master
| 2023-01-28T17:34:41.944637
| 2020-12-09T10:15:14
| 2020-12-09T10:15:14
| 297,916,865
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 601
|
r
|
rankall.R
|
# Data Science Specialization
# Ranking hospitals in all states
rankall <- function(outcome, num = "best") {
source('rankhospital.R')
outcome_data <- read.csv('data/outcome-of-care-measures.csv', colClasses = 'character')
states <- unique(outcome_data$State)
result <- data.frame(hospital = character(), state = character())
for (state in states) {
hname <- rankhospital(state, outcome, num)
row <- data.frame('hospital' = hname, 'state' = state)
result <- rbind(result, row)
}
result <- result[order(result$state), ]
return(result)
}
|
4f2b1f6628c60bb32ad540c18581329132e18c0f
|
6e2df1994ecddfa44072c99c72f8a06f3092f1b7
|
/R/tmle3_Spec_risk.R
|
835437ae104c0bf7cce041ad1898fce46181d659
|
[] |
no_license
|
child-growth/longbowRiskFactors
|
13cade10925688c58ba0db45f3a3e212115f5e91
|
1758b0cbec3cdfab05c352f1cb9681e7af88a5d0
|
refs/heads/master
| 2020-03-14T10:12:09.332962
| 2019-05-03T21:53:50
| 2019-05-03T21:53:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,471
|
r
|
tmle3_Spec_risk.R
|
#' Defines a tmle (minus the data)
#'
#' Current limitations:
#' @importFrom R6 R6Class
#' @importFrom tmle3 tmle3_Spec Param_delta
#'
#' @export
#
tmle3_Spec_risk <- R6Class(
classname = "tmle3_Spec_risk",
portable = TRUE,
class = TRUE,
inherit = tmle3_Spec,
public = list(
initialize = function(baseline_level = NULL, ...) {
super$initialize(baseline_level = baseline_level, ...)
},
make_tmle_task = function(data, node_list, ...) {
# bound Y if continuous
Y_node <- node_list$Y
Y_vals <- unlist(data[, Y_node, with = FALSE])
Y_variable_type <- variable_type(x = Y_vals)
if (Y_variable_type$type == "continuous") {
min_Y <- min(Y_vals)
max_Y <- max(Y_vals)
range <- max_Y - min_Y
lower <- min_Y # - 0.1 * range
upper <- max_Y # + 0.1 * range
Y_variable_type <- variable_type(
type = "continuous",
bounds = c(lower, upper)
)
}
# todo: export and use sl3:::get_levels
A_node <- node_list$A
A_vals <- unlist(data[, A_node, with = FALSE])
if (is.factor(A_vals)) {
A_levels <- sort(unique(A_vals))
A_levels <- factor(A_levels, A_levels)
} else {
A_levels <- sort(unique(A_vals))
}
A_variable_type <- variable_type(
type = "categorical",
levels = A_levels
)
# make tmle_task
npsem <- list(
define_node("W", node_list$W),
define_node("A", node_list$A, c("W"), A_variable_type),
define_node("Y", node_list$Y, c("A", "W"), Y_variable_type)
)
if(!is.null(node_list$id)){
tmle_task <- tmle3_Task$new(data, npsem = npsem, id=node_list$id, ...)
} else {
tmle_task <- tmle3_Task$new(data, npsem = npsem, ...)
}
return(tmle_task)
},
make_params = function(tmle_task, likelihood) {
# todo: export and use sl3:::get_levels
A_vals <- tmle_task$get_tmle_node("A")
if (is.factor(A_vals)) {
A_levels <- sort(unique(A_vals))
A_levels <- factor(A_levels, levels(A_vals))
} else {
A_levels <- sort(unique(A_vals))
}
tsm_params <- lapply(A_levels, function(A_level) {
intervention <- define_lf(LF_static, "A", value = A_level)
tsm <- Param_TSM$new(likelihood, intervention)
return(tsm)
})
# separate baseline and comparisons
baseline_level <- self$options$baseline_level
if(is.null(baseline_level)){
baseline_level = A_levels[[1]]
}
baseline_index <- which(A_levels==baseline_level)
baseline_param <-tsm_params[[baseline_index]]
comparison_params <- tsm_params[-1*baseline_index]
if(is.null(self$options$effect_scale)){
outcome_type <- tmle_task$npsem$Y$variable_type$type
private$.options$effect_scale <- ifelse(outcome_type=="continuous", "additive", "multiplicative")
}
if(self$options$effect_scale=="multiplicative"){
# define RR params
rr_params <- lapply(tsm_params, function(comparison_param){
Param_delta$new(likelihood, delta_param_RR, list(baseline_param, comparison_param))
})
mean_param <- Param_mean$new(likelihood)
# define PAR/PAF params
par <- Param_delta$new(likelihood, delta_param_PAR, list(baseline_param, mean_param))
paf <- Param_delta$new(likelihood, delta_param_PAF, list(baseline_param, mean_param))
tmle_params <- c(tsm_params, mean_param, rr_params, par, paf)
} else {
# define ATE params
ate_params <- lapply(tsm_params, function(comparison_param){
Param_delta$new(likelihood, delta_param_ATE, list(baseline_param, comparison_param))
})
mean_param <- Param_mean$new(likelihood)
par <- Param_delta$new(likelihood, delta_param_PAR, list(baseline_param, mean_param))
tmle_params <- c(tsm_params, mean_param, ate_params, par)
}
return(tmle_params)
}
),
active = list(),
private = list()
)
#' Risk Measures for Binary Outcomes
#'
#' Estimates TSMs, RRs, PAR, and PAF
#'
#' O=(W,A,Y)
#' W=Covariates
#' A=Treatment (binary or categorical)
#' Y=Outcome binary
#' @importFrom sl3 make_learner Lrnr_mean
#' @export
tmle_risk <- function(baseline_level = NULL) {
# todo: unclear why this has to be in a factory function
tmle3_Spec_risk$new(baseline_level = baseline_level)
}
|
f791eeb0efaa33b519747c6733bf2910d3724998
|
6e84f9ba461b7c077b77a23bc7e074ca5ec196dd
|
/cachematrix.R
|
860dd9be60cd93bf933f1d6213a678859bdc3c17
|
[] |
no_license
|
brianschlatter/ProgrammingAssignment2
|
8eeec20a762ec857015884fe408ad83141e55de6
|
00d0774c4e4574d9c996ab2aea7696f689030f0b
|
refs/heads/master
| 2021-01-21T16:34:54.930474
| 2014-04-25T19:58:39
| 2014-04-25T19:58:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,721
|
r
|
cachematrix.R
|
## A couple helper functions that allow for caching the inverse of the matrix
## Usage:
## Initialization...
## a <- makeCacheMatrix(matrix(1:16, 4))
## To see your matrix...
## a$get()
## To get the inverse of this matrix...
## a$getinv()
## Note: The first this is called, you will take the hit to
## do the actual calculation. Subsequent calls used the cached value.
## To assign another matrix...
## a$set(matrix(1:4, 2))
## Use this function to initialize your matrix and cached matrix.
## This is what persists these values past the actual call to "makeCacheMatrix"
makeCacheMatrix <- function(x = matrix()) {
matrix_inverse <- NULL
# We need to blow away our cached value anytime we change our matrix.
set <- function(new_x) {
x <<- new_x
matrix_inverse <<- NULL
}
get <- function() x
setinv <- function(new_matrix_inverse) matrix_inverse <<- new_matrix_inverse
getinv <- function() matrix_inverse
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## This function takes any instance of makeCacheMatrix and uses its
## cached value if it exists. Otherwise, it will calculate the inverse
## of the matrix and update that instance's matrix_inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
matrix_inverse <- x$getinv()
if (!is.null(matrix_inverse)) {
message("returning the cached inverse")
return(matrix_inverse)
}
# The inverse isn't yet calculated. Do the calculation and persist it.
temp_x <- x$get()
matrix_inverse <- solve(temp_x, ...)
x$setinv(matrix_inverse)
matrix_inverse
}
|
de49b086674ef6da432d384aea750361005d62e8
|
b40ff801c83d048177b37e46ab3f4cbc60d51f30
|
/Rpackages/Rcapture/tests/testValidMath.R
|
58c95644cc0285156465b18b14abdf07db249838
|
[] |
no_license
|
murphyjames04/sablefish
|
3423d64c1d07e6a1848826604874f03bbe444cdb
|
ef790f9775c82921604c2dcb72ce6690c9c30e13
|
refs/heads/master
| 2020-05-17T09:19:51.620950
| 2013-07-23T22:18:09
| 2013-07-23T22:18:09
| 11,323,999
| 0
| 1
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 7,449
|
r
|
testValidMath.R
|
context("Mathematical validation")
test_that("the .t and .0 'closedp' and 'closedpCI' functions give the same results for the same models", {
data(hare)
res.t <- closedp.t(X=hare, dfreq=FALSE)
res.0 <- closedp.0(X=hare, dfreq=FALSE)
fct.t <- closedpCI.t(X=hare,dfreq=FALSE,m="Mh",h="Normal")
fct.0 <- closedpCI.0(X=hare,dfreq=FALSE,dtype="hist",m="Mh",h="Normal")
psi <- function(x) { 0.5^x - 1 }
matX.t <- rowSums(histpos.t(6))
Mh.t <- closedpCI.t(X=hare,dfreq=FALSE,mX=matX.t,h=psi)
matX.0 <- histpos.0(6)
Mh.0 <- closedpCI.0(X=hare,dfreq=FALSE,mX=matX.0,h=psi)
expect_that(res.0$results["M0","abundance"], equals(res.t$results["M0","abundance"]))
expect_that(res.0$results["M0","stderr"],
equals(res.t$results["M0","stderr"], tolerance=0.0001))
expect_that(res.0$results["M0","df"], equals(res.t$results["M0","df"]-(2^6-1-6)))
expect_that(res.0$results["Mh Chao (LB)","abundance"],
equals(res.t$results["Mh Chao (LB)","abundance"]))
expect_that(res.0$results["Mh Chao (LB)","stderr"],
equals(res.t$results["Mh Chao (LB)","stderr"], tolerance=0.0001))
expect_that(res.0$results["Mh Chao (LB)","df"],
equals(res.t$results["Mh Chao (LB)","df"]-(2^6-1-6)))
expect_that(res.0$results["Mh Poisson2","abundance"],
equals(res.t$results["Mh Poisson2","abundance"]))
expect_that(res.0$results["Mh Poisson2","stderr"],
equals(res.t$results["Mh Poisson2","stderr"], tolerance=0.0001))
expect_that(res.0$results["Mh Poisson2","df"],
equals(res.t$results["Mh Poisson2","df"]-(2^6-1-6)))
expect_that(res.0$results["Mh Darroch","abundance"],
equals(res.t$results["Mh Darroch","abundance"]))
expect_that(res.0$results["Mh Darroch","stderr"],
equals(res.t$results["Mh Darroch","stderr"], tolerance=0.0001))
expect_that(res.0$results["Mh Darroch","df"],
equals(res.t$results["Mh Darroch","df"]-(2^6-1-6)))
expect_that(res.0$results["Mh Gamma3.5","abundance"],
equals(res.t$results["Mh Gamma3.5","abundance"]))
expect_that(res.0$results["Mh Gamma3.5","stderr"],
equals(res.t$results["Mh Gamma3.5","stderr"], tolerance=0.0001))
expect_that(res.0$results["Mh Gamma3.5","df"],
equals(res.t$results["Mh Gamma3.5","df"]-(2^6-1-6)))
expect_that(fct.0$results[,"abundance"], equals(fct.t$results[,"abundance"]))
expect_that(fct.0$results[,"stderr"], equals(fct.t$results[,"stderr"]))
expect_that(fct.0$results[,"InfCL"], equals(fct.t$results[,"InfCL"]))
expect_that(fct.0$results[,"SupCL"], equals(fct.t$results[,"SupCL"]))
expect_that(fct.0$results[,"df"], equals(fct.t$results[,"df"]-(2^6-1-6)))
expect_that(Mh.0$results[,"abundance"], equals(Mh.t$results[,"abundance"]))
expect_that(Mh.0$results[,"stderr"], equals(Mh.t$results[,"stderr"], tolerance=0.0001))
expect_that(Mh.0$results[,"df"], equals(Mh.t$results[,"df"]-(2^6-1-6)))
expect_that(Mh.0$CI[,"abundance"], equals(Mh.t$CI[,"abundance"]))
expect_that(Mh.0$CI[,"InfCL"], equals(Mh.t$CI[,"InfCL"]))
expect_that(Mh.0$CI[,"SupCL"], equals(Mh.t$CI[,"SupCL"]))
})
test_that("'closedpCI.t' and 'closedp.t' give the same results for the same models", {
data(hare)
res <- closedp.t(X=hare)
resCI <- vector(mode="list")
resCI[[1]] <- closedpCI.t(X=hare,dfreq=FALSE,m="M0")
resCI[[2]] <- closedpCI.t(X=hare,dfreq=FALSE,m="Mt")
resCI[[3]] <- closedpCI.t(X=hare,dfreq=FALSE,m="Mh",h="Chao")
resCI[[4]] <- closedpCI.t(X=hare,dfreq=FALSE,m="Mh",h="Poisson")
resCI[[5]] <- closedpCI.t(X=hare,dfreq=FALSE,m="Mh",h="Darroch")
resCI[[6]] <- closedpCI.t(X=hare,dfreq=FALSE,m="Mh",h="Gamma")
resCI[[7]] <- closedpCI.t(X=hare,dfreq=FALSE,m="Mth",h="Chao")
resCI[[8]] <- closedpCI.t(X=hare,dfreq=FALSE,m="Mth",h="Poisson")
resCI[[9]] <- closedpCI.t(X=hare,dfreq=FALSE,m="Mth",h="Darroch")
resCI[[10]] <- closedpCI.t(X=hare,dfreq=FALSE,m="Mth",h="Gamma")
for (i in 1:10)
expect_that(res$results[i,,drop=FALSE], is_identical_to(resCI[[i]]$results))
})
test_that("the degrees of freedom are good", {
data(BBS2001)
m1 <- closedpCI.0(BBS2001,dfreq=TRUE,dtype="nbcap",t=50,m="Mh",h="Normal")
m2 <- closedpCI.0(BBS2001,dfreq=TRUE,dtype="nbcap",t=50,t0=20,m="Mh",h="Normal")
m3 <- closedpCI.0(BBS2001,dfreq=TRUE,dtype="nbcap",t=Inf,m="Mh",h="Normal")
m4 <- closedpCI.0(BBS2001,dfreq=TRUE,dtype="nbcap",t=Inf,t0=20,m="Mh",h="Normal")
tobs <- max(BBS2001[BBS2001[,2]!=0, 1])
expect_that(m1$results[,"df"], equals(50-3))
expect_that(m2$results[,"df"], equals(20-3))
expect_that(m3$results[,"df"], equals(tobs-3))
expect_that(m4$results[,"df"], equals(20-3))
})
test_that("the mX + h arguments works correctly", {
histpos <- histpos.t(3)
DarR3 <- cbind(histpos, c(72, 155, 7, 71, 13, 53, 43))
# Example avec h="Darroch"
matX <- cbind(histpos,histpos[,1]*histpos[,2],(rowSums(histpos)^2)/2)
rmX <- closedpCI.t(X=DarR3,dfreq=TRUE,mX=matX,mname="Darroch")
matX <- cbind(histpos,histpos[,1]*histpos[,2])
rmXh <- closedpCI.t(X=DarR3,dfreq=TRUE,mX=matX,h="Darroch",mname="Darroch")
expect_that(rmX$results[,"abundance"], equals(rmXh$results[,"abundance"]))
expect_that(rmX$results[,"stderr"], equals(rmXh$results[,"stderr"]))
expect_that(rmX$results[,"deviance"], equals(rmXh$results[,"deviance"]))
expect_that(rmX$results[,"df"], equals(rmXh$results[,"df"]))
# Example avec h="Chao", mais sans eta négatif fixés à zéro
matX <- cbind(histpos,histpos[,1]*histpos[,2],c(1,rep(0,6)))
rmX <- closedpCI.t(X=DarR3,dfreq=TRUE,mX=matX,mname="LB")
matX <- cbind(histpos,histpos[,1]*histpos[,2])
rmXh <- closedpCI.t(X=DarR3,dfreq=TRUE,mX=matX,h="Chao",mname="LB")
expect_that(rmX$results[,"abundance"], equals(rmXh$results[,"abundance"]))
expect_that(rmX$results[,"stderr"], equals(rmXh$results[,"stderr"]))
expect_that(rmX$results[,"deviance"], equals(rmXh$results[,"deviance"]))
expect_that(rmX$results[,"df"], equals(rmXh$results[,"df"]))
# Example avec h="Chao", avec eta négatif fixés à zéro
histpos <- histpos.t(4)
diabetes<-cbind(histpos,c(58,157,18,104,46,650,12,709,14,20,7,74,8,182,10))
matX <- cbind(histpos,histpos[,1]*histpos[,3],histpos[,2]*histpos[,4],histpos[,3]*histpos[,4])
nbcap <- rowSums(histpos)
matX_LB <- cbind(matX, pmax(nbcap-2,0)) # pmax(nbcap-3,0) enlevé car eta négatif
rmX <- closedpCI.t(X=diabetes,dfreq=TRUE,mX=matX_LB,mname="LB")
matX_LB <- cbind(matX)
rmXh <- closedpCI.t(X=diabetes,dfreq=TRUE,mX=matX_LB,h="Chao",mname="LB")
expect_that(rmX$results[,"abundance"], equals(rmXh$results[,"abundance"]))
expect_that(rmX$results[,"stderr"], equals(rmXh$results[,"stderr"]))
expect_that(rmX$results[,"deviance"], equals(rmXh$results[,"deviance"]))
expect_that(rmX$results[,"df"], equals(rmXh$results[,"df"]))
})
|
cdad7b1c881b43ff1ec40ab63cd6f3068037945a
|
c22d5d3203f3a83bbedb2029a5728a067280558d
|
/2_Statistical_modelling_I:Introduction/practicals/loglikelihoods/linear_regression.ll.R
|
92efff5ff2fb6a58ebdba800e300a13be81da6d7
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
crichgriffin/statistics-course
|
745ab897cbe646f2954136b82d12293940e000f9
|
54febc4576044b0f127b3d22bfcd9f0976db6dd8
|
refs/heads/main
| 2023-01-20T08:14:11.513858
| 2020-11-25T12:39:41
| 2020-11-25T12:39:41
| 316,202,064
| 1
| 0
|
MIT
| 2020-11-26T10:54:15
| 2020-11-26T10:54:14
| null |
UTF-8
|
R
| false
| false
| 233
|
r
|
linear_regression.ll.R
|
linear_regression.ll <- function(
outcome,
params = list(
beta,
sigma2
),
X
) {
predictor = X %*% params$beta
gaussian.ll( outcome, params = list( mean = predictor, sigma2 = params$sigma2 ))
}
|
447e2f8a6907aae763c92f49761e80c37ed28189
|
dc1a2a89d6f02e366d31ac695d918066ad144bc8
|
/ressources/getOriginalDatabase.R
|
5d15e6432a524cca9f2c9aeb3fc2acba4a696949
|
[] |
no_license
|
absabry/instragram
|
98bea0f67f37107483b365e02c82a856d010a580
|
280686fdedf5a0ab5c1452460c14f19e3b6e5b32
|
refs/heads/master
| 2021-09-20T03:31:01.005240
| 2018-08-02T18:55:03
| 2018-08-02T18:55:03
| 115,888,313
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,076
|
r
|
getOriginalDatabase.R
|
setwd('C:\\Users\\HP\\Projet')
library(DBI)
dbname=""
host="127.0.0.1"
port=3305
password=""
user=""
dateHistorique = "2014-05-01"
dateDebut="2014-06-1"
dateFin="2014-06-30"
nbPhotosMin=1
con <- dbConnect(RMySQL::MySQL(), dbname = dbname, user=user, password=password, host=host, port=port,encoding = "latin1")
SQL=paste("SELECT instagram.dateCreation as dateCreation,
instagram.idLocation as idLocation,
instagram.idUser as idUser,
instagram_location.name as name,
instagram_location.longitude as longitude,
instagram_location.latitude as latitude
FROM instagram JOIN instagram_location ON instagram.idLocation = instagram_location.id
WHERE instagram.dateCreation BETWEEN STR_TO_DATE('",dateDebut,"','%Y-%m-%d') AND STR_TO_DATE('",dateFin,"','%Y-%m-%d')
",sep="")
data <- dbGetQuery(con, SQL)
print(paste('fin data with', nrow(data),'rows'))
SQL=paste("SELECT idUser, count(*) AS nbImages
FROM instagram
WHERE instagram.dateCreation BETWEEN STR_TO_DATE('",dateHistorique,"','%Y-%m-%d') AND STR_TO_DATE('",dateFin,"','%Y-%m-%d')
Group by idUser
HAVING nbImages>=",nbPhotosMin,";",sep="")
idUser <- dbGetQuery(con, SQL)
print(paste('fin idUser with', nrow(idUser),'rows'))
SQL=paste("SELECT idUser, count(*) AS nbImages
FROM instagram
WHERE instagram.dateCreation BETWEEN STR_TO_DATE('",dateDebut,"','%Y-%m-%d') AND STR_TO_DATE('",dateFin,"','%Y-%m-%d')
Group by idUser
HAVING nbImages>=",nbPhotosMin,";",sep="")
temp <- dbGetQuery(con, SQL)
print(paste('fin temp with', nrow(temp),'rows'))
idUser = merge(x=temp,y=idUser,by="idUser")
print(paste('fin merge idUser with', nrow(idUser),'rows'))
names(idUser)[names(idUser)=="nbImages.x"] <- "nbImages"
names(idUser)[names(idUser)=="nbImages.y"] <- "nbTotalImages"
SQL=paste("SELECT idUser,idLocation,instagram_location.name as name
FROM instagram JOIN instagram_location ON instagram.idLocation = instagram_location.id
WHERE instagram.dateCreation BETWEEN STR_TO_DATE('",dateHistorique,"','%Y-%m-%d') AND STR_TO_DATE('",dateFin,"','%Y-%m-%d')
;",sep="")
historiqueVisite <- dbGetQuery(con, SQL)
historiqueVisite = merge(x=idUser,y=historiqueVisite,by="idUser")# delete the one who arent present here
historiqueVisite$nbImages = NULL
historiqueVisite$nbTotalImages = NULL
SQL="SELECT idUser,Country,nbSejour,nbJours,nbTotal as nbPays FROM instagram_user_paris"
users <-dbGetQuery(con, SQL)
print(paste('fin users with', nrow(users),'rows'))
users = merge(x=idUser,y=users,by="idUser")# delete the one who arent present here
users$nbImages = NULL
users$nbTotalImages = NULL
write.table(data, file = "30days//instagram.csv",row.names=FALSE, na="", sep=",")
write.table(idUser, file = "30days//historiqueUsers.csv",row.names=FALSE, na="", sep=",")
write.table(users, file = "30days//users.csv",row.names=FALSE, na="", sep=",")
write.table(historiqueVisite, file = "30days//historiqueVisite.csv",row.names=FALSE, na="", sep=",")
|
688efc5d48b811789e4c59f9c4b251a94e71faa2
|
ee788a605dfd2b054cb4dc5d769728babfb5dd92
|
/man/lsh_query.Rd
|
64e18f80baa549a09b25544205c5c343ab649ad0
|
[
"MIT"
] |
permissive
|
felipegonzalez/textreuse
|
e49236ef00cf1a4a33bfbbeb28d40f2e078658da
|
789fcdae7aa76ca9c207bc0ed41ff0dcf20feb5a
|
refs/heads/master
| 2021-05-16T16:10:41.929945
| 2018-02-01T16:16:59
| 2018-02-01T16:16:59
| 119,786,401
| 0
| 0
| null | 2018-02-01T05:17:04
| 2018-02-01T05:17:04
| null |
UTF-8
|
R
| false
| true
| 1,044
|
rd
|
lsh_query.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lsh_query.R
\name{lsh_query}
\alias{lsh_query}
\title{Query a LSH cache for matches to a single document}
\usage{
lsh_query(buckets, id)
}
\arguments{
\item{buckets}{An \code{lsh_buckets} object created by \code{\link{lsh}}.}
\item{id}{The document ID to find matches for.}
}
\value{
An \code{lsh_candidates} data frame with matches to the document specified.
}
\description{
This function retrieves the matches for a single document from an \code{lsh_buckets} object created by \code{\link{lsh}}. See \code{\link{lsh_candidates}} to rerieve all pairs of matches.
}
\examples{
dir <- system.file("extdata/legal", package = "textreuse")
minhash <- minhash_generator(200, seed = 235)
corpus <- TextReuseCorpus(dir = dir,
tokenizer = tokenize_ngrams, n = 5,
minhash_func = minhash)
buckets <- lsh(corpus, bands = 50)
lsh_query(buckets, "ny1850-match")
}
\seealso{
\code{\link{lsh}}, \code{\link{lsh_candidates}}
}
|
7766b8d3e48f2b046ee5de9ed665da3d6269d58d
|
cf2d25a2bf4cdc94eec42b92329f72d988f42cc4
|
/bcw-RocchioClusteringSVM.R
|
7782c8f5e44f67edb850290fe2653c788227efab
|
[] |
no_license
|
sneha123456789/data-scooping
|
547c75bf37f8b01c5e262c287c792169f6a8ad8e
|
8eb04ad5ba931f49e58c97db3ed94986ec589ec0
|
refs/heads/master
| 2021-05-07T15:05:32.287846
| 2017-12-19T04:36:35
| 2017-12-19T04:36:35
| 109,973,333
| 0
| 0
| null | 2017-11-08T12:31:25
| 2017-11-08T12:31:24
| null |
UTF-8
|
R
| false
| false
| 3,992
|
r
|
bcw-RocchioClusteringSVM.R
|
source("bcw-RocchioSVM.R")
bcw.getReliableNegativeWithRocchioClustering <- function(bcw.PS, bcw.US) {
bcw.data <- bcw.getReliableNegativeWithRocchio(bcw.PS, bcw.US)
## Split into the sets
bcw.PS <- bcw.data[bcw.data$rocLabel == 4, ]
bcw.RN <- bcw.data[bcw.data$rocLabel == 2, ]
bcw.US <- bcw.data[bcw.data$rocLabel == -1, ]
## k = 10, from the paper: "choice of k does not affect
## classification results much as long as it is not too small"
bcw.RN.fit <- kmeans(bcw.RN[, bcw.features], 10)
bcw.RN$cluster <- bcw.RN.fit$cluster
rocCluster.positiveVectors <- data.frame(
"V1"=numeric(0), "V2"=numeric(0), "V3"=numeric(0),
"V4"=numeric(0), "V5"=numeric(0), "V6"=numeric(0),
"V7"=numeric(0), "V8"=numeric(0), "V9"=numeric(0))
rocCluster.negativeVectors <- data.frame(
"V1"=numeric(0), "V2"=numeric(0), "V3"=numeric(0),
"V4"=numeric(0), "V5"=numeric(0), "V6"=numeric(0),
"V7"=numeric(0), "V8"=numeric(0), "V9"=numeric(0))
for (j in 1:10) {
rocCluster.positiveVectors <- rbind(
rocCluster.positiveVectors,
bcw.rocchioVectorBuilder(
bcw.PS,
bcw.RN[bcw.RN$cluster == j, ]))
rocCluster.negativeVectors <- rbind(
rocCluster.negativeVectors,
bcw.rocchioVectorBuilder(
bcw.RN[bcw.RN$cluster == j, ],
bcw.PS))
}
colnames(rocCluster.positiveVectors) <- bcw.features
colnames(rocCluster.negativeVectors) <- bcw.features
bcw.RN$rocLabel <- 0
for (i in 1:nrow(bcw.RN)) {
temp.row <- bcw.RN[i, ]
temp.pSim <- numeric(0)
temp.nSim <- numeric(0)
for (j in 1:10) {
temp.pSim <- c(temp.pSim, sum(rocCluster.positiveVectors[j, ] * bcw.RN[i , bcw.features]))
}
temp.pSim <- max(temp.pSim)
for (j in 1:10) {
temp.nSim <- sum(rocCluster.negativeVectors[j, ] * bcw.RN[i , bcw.features])
if (temp.nSim > temp.pSim) {
bcw.RN[i, ]$rocLabel <- 2
break
} else {
bcw.RN[i, ]$rocLabel <- 4
}
}
}
bcw.US <- rbind(bcw.US, bcw.RN[bcw.RN$rocLabel == 4, ])
bcw.RN <- bcw.RN[bcw.RN$rocLabel == 2, ]
bcw.RN$cluster <- NULL
bcw.PS$rocLabel <- 4
bcw.US$rocLabel <- -1
bcw.RN$rocLabel <- 2
return(rbind(bcw.PS, bcw.RN, bcw.US))
}
bcw.getRocCluSvmClassifier <- function(bcw.PS, bcw.US) {
bcw.data <- bcw.getReliableNegativeWithRocchioClustering(bcw.PS, bcw.US)
bcw.data$label <- bcw.data$rocLabel
bcw.data$rocLabel <- NULL
bcw.PS <- bcw.data[bcw.data$label == 4, ]
bcw.RN <- bcw.data[bcw.data$label == 2, ]
bcw.US <- bcw.data[bcw.data$label == -1, ]
## Build initial classifier
classifier.svm.0 <- svm(label ~ V1+V2+V3+V4+V5+V6+V7+V8+V9,
data = rbind(bcw.PS, bcw.RN),
type = "C-classification")
## Enter loop to build classifier iteratively
classifier.svm.i <- classifier.svm.0
classifier.i <- 0
while (TRUE) {
classifier.i <- classifier.i + 1
bcw.US$label <- predict(classifier.svm.i, bcw.US)
bcw.w <- bcw.US[bcw.US$label == 2, ]
if (nrow(bcw.w) == 0) {
break
} else {
bcw.US <- bcw.US[bcw.US$label == 4, ]
bcw.RN <- rbind(bcw.RN, bcw.w)
## Build new classifier
classifier.svm.i <- svm(label ~ V1+V2+V3+V4+V5+V6+V7+V8+V9,
data = rbind(bcw.PS, bcw.RN),
type = "C-classification")
}
}
## Additional step: Use final classifier to check PS
bcw.PS$svmlabel <- predict(classifier.svm.i, bcw.PS)
negativeCount <- nrow(bcw.PS[bcw.PS$svmlabel == 2 , ])
## Selecting final classifier
if (negativeCount / nrow(bcw.PS) > 0.05) {
classifier.svm <- classifier.svm.0
} else {
classifier.svm <- classifier.svm.i
}
return (classifier.svm)
}
|
387c88a1d567d97668816b44b0b17068f6f594b3
|
a7b53116bf28e416e6bcf470dd8cc4d60da905a2
|
/mixture_model.r
|
0d70cf367536791f59472442f80f0cd491063ed9
|
[] |
no_license
|
IanMadlenya/Bayesian-Data-Analysis
|
6af1c7216e82e80cf9ea41b6e024df4624be519d
|
255ab88392918379fa241c3362d559a73cf671b6
|
refs/heads/master
| 2021-01-25T13:46:55.660504
| 2017-11-20T05:07:24
| 2017-11-20T05:07:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,163
|
r
|
mixture_model.r
|
set.seed(666)
setwd("C:/Users/Wei/Documents/Purdue STAT 695 Bayesian Data Analysis/HW5")
data = read.csv(file="mix_reg.txt", header=TRUE)
X = data$x
y = data$y[order(X)]
X = sort(X)
n = nrow(data)
X = cbind(1, X, X^2)
### Part 1
shape = 1
scale = 1000
std = 1000
library(invgamma)
sigmoid = function(x) 1 / (1 + exp(-x))
log_priors = function(pars) {
sum(dnorm(pars[1:5], mean=0, sd=std, log=T)) +
sum(dinvgamma(exp(pars[6:7]), shape, rate=1, scale=scale, log=TRUE)) +
dbeta(sigmoid(pars[8]), 1 , 1, log=TRUE)
}
# joint likelihood
log_likelihood = function(pars) {
mu1 = c(pars[1:2], 0)
mu2 = pars[3:5]
sd1 = exp(pars[6])
sd2 = exp(pars[7])
lambda = sigmoid(pars[8])
sum((dnorm(y, mean=X %*% mu1, sd=sd1, log=T) + log(lambda)) * Z) +
sum((dnorm(y, mean=X %*% mu2, sd=sd2, log=T) + log(1 - lambda)) * (1 - Z))
}
log_posterior = function(pars) log_priors(pars) + log_likelihood(pars)
pars = rep(0.5, 8)
Z = rbinom(n, 1, 0.5)
burnIn = 1000
iterations = 2 * burnIn
log_posterior(pars)
for (i in 1: 10) {
optimal = optim(pars, log_posterior, control=list(fnscale=-1), hessian=TRUE)
pars = optimal$par
log_posterior_raw = log_posterior(pars)
chains = array(dim=c(iterations + 1, 8))
chains[1, ] = pars
for (j in 1: iterations) {
# better avoid saving the inverse of a matrix, compute them instead
proposal = chains[j, ] + rnorm(8, sd=0.1)
# write exp(num) as num to avoid overflow; symmetric proposal
log_acceptance_prob = log_posterior(proposal) - log_posterior(chains[j, ])
chains[j + 1, ] = chains[j, ]
if (log(runif(1)) < log_acceptance_prob)
chains[j + 1, ] = proposal
}
pars_draws = chains[-(1: burnIn), ]
print(paste(i, "th round: ", "Acceptance rate", round(nrow(unique(chains)) / nrow(chains), 4)))
pars = tail(pars_draws, 1)
Z = dnorm(y, X %*% c(pars[1:2], 0), sd=exp(pars[6])) > dnorm(y, X %*% pars[3:5], sd=exp(pars[7])) # if the probability one belongs to one group is larger than another
Z = as.numeric(Z)
log_posterior_update = log_posterior(pars)
print(c(log_posterior_raw, log_posterior_update))
}
plot(X[, 2] * Z, y * Z, ylim=c(-10, 60), col="red", pch=19)
points(X[, 2] * (1 - Z), y * (1 - Z), col="black", pch=15)
qt = array(NA, c(200, 2, 3))
pars_draws[, 6] = exp(pars_draws[, 6])
pars_draws[, 7] = exp(pars_draws[, 7])
for (i in 1:200) {
beta = cbind(pars_draws[, 1:2], 0)
std = sqrt(pars_draws[, 6])
y_samples = beta %*% X[i, ] + rnorm(burnIn+1, sd=std)
qt[i, 1, ] = quantile(y_samples, c(0.05, 0.5, 0.95))
beta = pars_draws[, 3:5]
std = sqrt(pars_draws[, 6])
y_samples = beta %*% X[i, ] + rnorm(burnIn+1, sd=std)
qt[i, 2, ] = quantile(y_samples, c(0.05, 0.5, 0.95))
}
plot(X[, 2], y, xlab='X', ylab='y')
lines(X[, 2], qt[, 1, 1], col='red')
lines(X[, 2], qt[, 1, 2], col='red')
lines(X[, 2], qt[, 1, 3], col='red')
lines(X[, 2], qt[, 2, 1], col='blue')
lines(X[, 2], qt[, 2, 2], col='blue')
lines(X[, 2], qt[, 2, 3], col='blue')
|
b122bc2314c62926b51579501910940f62a9b5f9
|
00297979d896712ad90a266af4b7da15657583d0
|
/SetGoalsStatistics/ads/ad-analysis.R
|
c9100b206139e190b88f118777e2b36cc957f111
|
[] |
no_license
|
Jeremywhiteley/SetGoals
|
ac7e72fb8bc765fd70e698d6288aad0a76ddd3f8
|
e5692ea164371ec063dda458d6fb2b5ba3e87af2
|
refs/heads/master
| 2020-07-09T20:38:47.180434
| 2019-06-01T12:24:46
| 2019-06-01T12:24:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,290
|
r
|
ad-analysis.R
|
library(dplyr)
### Functions
AddCostPerConvUsd <- function(matrix, nokCost){
# 90 Day average: 1 NOK = 0.11604 USD
# 20.05.19, Source: https://www.xe.com/currencyconverter/convert/?Amount=1&From=NOK&To=USD
CostPerConvUSD <- round(nokCost * 0.11604 , digits = 2)
names(CostPerConvUSD) <- "CostPerConvUSD"
matrix <- cbind(matrix, CostPerConvUSD)
return(matrix)
}
NumCharWithCommaToNum <- function(column){
column <- as.character(column)
column <- gsub(",", "", column)
column <- as.numeric(column)
}
### Campaign Overview
campaigns <- read.csv('ads/campaign-overview-190531.csv',skip=2)
campaigns <- campaigns[-1]
campaigns <- campaigns[-1]
campaigns <- campaigns[-1]
campaigns <- campaigns[-1]
campaigns <- campaigns[-1]
campaigns <- campaigns[-1]
campaigns <- campaigns[-1]
# Rename some colums that are problematic in pgfplotstable in LaTeX.
colnames(campaigns)[8] <- "AvgCPC"
colnames(campaigns)[2] <- "CostPerConvNOK"
colnames(campaigns)[3] <- "ConvRate"
# Remove %, since the pgfplotstable in LaTeX wont show values after "%"
campaigns$ConvRate <- gsub("%", "", campaigns$ConvRate)
campaigns$CTR <- gsub("%", "", campaigns$CTR)
#campaigns <- campaigns[order(campaigns$Conversions, decreasing = TRUE),]
campaigns <- AddCostPerConvUsd(campaigns, campaigns$CostPerConvNOK)
campaigns$Impressions <- NumCharWithCommaToNum(campaigns$Impressions)
campaigns$Clicks <- NumCharWithCommaToNum(campaigns$Clicks)
campaigns <- campaigns[-c(7,8), ]
campaignStartDates <- c("24.04.19", "29.04.19","05.05.19", "06.05.19", "23.05.19", "24.04.19")
names(campaignStartDates) <- "Start Date"
campaignEndDates <- c("30.04.19", "30.04.19", "06.05.19", "09.05.19","26.05.19", "26.05.19")
names(campaignEndDates) <- "End Date"
campaignAudience <- c("World-wide", "World-wide", "Norway", "Europe", "World-wide")
names(campaignAudience) <- "Target Audience"
campaignTarget <- c("Android", "iOS", "Android", "Android", "Android")
names(campaignAudience) <- "Platform"
campaignNames <- c("And1", "iOS1", "And2", "And3", "And4")
names(campaignNames) <- "Name"
campaigns <- cbind(campaigns, campaignStartDates, campaignEndDates, campaignAudience, campaignTarget,campaignNames)
write.csv(campaigns, "ads/campaign-overview-dates-added-190531.csv", quote = FALSE)
|
085c542c62ea883dfeb608a63f813de6cfea3249
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/miceadds/examples/mice.impute.2lonly.function.Rd.R
|
97ed6047b1630441c7d909d19217625ee1687dba
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,272
|
r
|
mice.impute.2lonly.function.Rd.R
|
library(miceadds)
### Name: mice.impute.2lonly.function
### Title: Imputation at Level 2 (in 'miceadds')
### Aliases: mice.impute.2lonly.function
### ** Examples
## Not run:
##D #############################################################################
##D # EXAMPLE 1: Imputation of level 2 variables
##D #############################################################################
##D
##D #**** Simulate some data
##D # x,y ... level 1 variables
##D # v,w ... level 2 variables
##D
##D set.seed(987)
##D G <- 250 # number of groups
##D n <- 20 # number of persons
##D beta <- .3 # regression coefficient
##D rho <- .30 # residual intraclass correlation
##D rho.miss <- .10 # correlation with missing response
##D missrate <- .50 # missing proportion
##D y1 <- rep( stats::rnorm( G, sd=sqrt(rho)), each=n ) + stats::rnorm(G*n, sd=sqrt(1-rho))
##D w <- rep( round( stats::rnorm(G ), 2 ), each=n )
##D v <- rep( round( stats::runif( G, 0, 3 ) ), each=n )
##D x <- stats::rnorm( G*n )
##D y <- y1 + beta * x + .2 * w + .1 * v
##D dfr0 <- dfr <- data.frame( "group"=rep(1:G, each=n ), "x"=x, "y"=y,
##D "w"=w, "v"=v )
##D dfr[ rho.miss * x + stats::rnorm( G*n, sd=sqrt( 1 - rho.miss ) ) <
##D stats::qnorm(missrate), y" ] <- NA
##D dfr[ rep( stats::rnorm(G), each=n ) < stats::qnorm(missrate), "w" ] <- NA
##D dfr[ rep( stats::rnorm(G), each=n ) < stats::qnorm(missrate), "v" ] <- NA
##D
##D #* initial predictor matrix and imputation methods
##D predM <- mice::make.predictorMatrix(data=dat)
##D impM <- mice::make.method(data=dat)
##D
##D #...
##D # multilevel imputation
##D predM1 <- predM
##D predM1[c("w","v","y"),"group"] <- c(0,0,-2)
##D predM1["y","x"] <- 1 # fixed x effects imputation
##D impM1 <- impM
##D impM1[c("y","w","v")] <- c("2l.continuous", "2lonly.function", "2lonly.function" )
##D # define imputation functions
##D imputationFunction <- list( "w"="sample", "v"="pmm5" )
##D # define cluster variable
##D cluster_var <- list( "w"="group", "v"="group" )
##D
##D # impute
##D imp1 <- mice::mice( as.matrix(dfr), m=1, predictorMatrix=predM1, method=impM1, maxit=5,
##D imputationFunction=imputationFunction, cluster_var=cluster_var )
## End(Not run)
|
8dfc630b2e903367b1dcfa69386082b257622978
|
051880099402393c9249d41526a5ac162f822f8d
|
/man/tg.sample.Rd
|
d5b921d4478e08b91fb8a7f529288820c82df500
|
[
"MIT"
] |
permissive
|
bbTomas/rPraat
|
cd2b309e39e0ee784be4d83a980da60946f4c822
|
4c516e1309377e370c7d05245f6a396b6d4d4b03
|
refs/heads/master
| 2021-12-13T19:32:38.439214
| 2021-12-09T18:42:48
| 2021-12-09T18:42:48
| 54,803,225
| 21
| 7
| null | null | null | null |
UTF-8
|
R
| false
| true
| 310
|
rd
|
tg.sample.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rpraat_sampleData.R
\name{tg.sample}
\alias{tg.sample}
\title{tg.sample}
\usage{
tg.sample()
}
\value{
TextGrid
}
\description{
Returns sample TextGrid.
}
\examples{
tg <- tg.sample()
tg.plot(tg)
}
\seealso{
\code{\link{tg.plot}}
}
|
6b0e7c40f3ba46ca294a8f79c6c29b93d0cd0ce9
|
af77cc9ccadb9cf4d451831fdd07abe13503a879
|
/yelp/wekafiles/packages/RPlugin/mlr/mlr/R/task.desc.r
|
c07e5d90ac273a7389ef1d6f00d8c4ec3f4be48d
|
[] |
no_license
|
tummykung/yelp-dataset-challenge
|
7eed6a4d38b6c9c90011fd09317c5fa40f9bc75c
|
84f12682cba75fa4f10b5b3484ce9f6b6c8dad4a
|
refs/heads/master
| 2021-01-18T14:10:55.722349
| 2013-05-21T09:30:37
| 2013-05-21T09:30:37
| 9,527,545
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,073
|
r
|
task.desc.r
|
#' @include object.r
roxygen()
#' Description object for task.
#'
#' Getter.\cr
#'
#' \describe{
#' \item{id [string]}{Id string of task.}
#' \item{label [string]}{Label string of task.}
#' \item{is.classif [boolean]}{Classification task?}
#' \item{is.regr [boolean]}{Regression task?}
#' \item{has.weights [boolean]}{Are weights available in task for covariates?}
#' \item{has.blocking [boolean]}{Is blocking available in task for observations?}
#' \item{costs [matrix]}{Cost matrix, of dimension (0,0) if not available.}
#' \item{positive [string]}{Positive class label for binary classification, NA else.}
#' \item{negative [string]}{Negative class label for binary classification,, NA else.}
#' }
#' @exportClass task.desc
#' @title Description object for task.
setClass(
"task.desc",
contains = c("object"),
representation = representation(
task.class = "character",
props = "list"
)
)
#' @rdname task.desc-class
setMethod(
f = "[",
signature = signature("task.desc"),
def = function(x,i,j,...,drop) {
if (i == "is.classif")
return(x@task.class == "classif.task")
if (i == "is.regr")
return(x@task.class == "regr.task")
if (i == "id")
return(x@props$id)
if (i == "label")
return(x@props$label)
if (i == "has.weights")
return(x@props$has.weights)
if (i == "has.blocking")
return(x@props$has.blocking)
if (i == "costs")
return(x@props$costs)
if (i == "positive")
return(x@props$positive)
if (i == "negative")
return(x@props$negative)
callNextMethod()
}
)
#' Constructor.
setMethod(
f = "initialize",
signature = signature("task.desc"),
def = function(.Object, task.class, id, label, has.weights, has.blocking, costs, positive, negative) {
.Object@task.class = task.class
.Object@props$id = id
.Object@props$label = label
.Object@props$has.weights = has.weights
.Object@props$has.blocking = has.blocking
.Object@props$costs = costs
.Object@props$positive = positive
.Object@props$negative = negative
return(.Object)
}
)
|
3791f7c5a3204e7294309614748f9322880bf261
|
35709bafc00f6e6b03d730f39fe5e9523883d581
|
/GBM&RFcv.R
|
7e8feeb4e849658b23dc01b15c8594c8d9ff5ecc
|
[] |
no_license
|
hzz1989118/R-ETS-RandomForest
|
0874121d4eb4a4dea4f185c9b836dd0085a8460d
|
7eda3d636c45249d68878e4b31e954258fa38a72
|
refs/heads/master
| 2020-03-19T03:48:35.120999
| 2018-06-01T22:29:35
| 2018-06-01T22:29:35
| 135,765,017
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,132
|
r
|
GBM&RFcv.R
|
#####################
### Votat_group #####
#####################
cleandat_cate <-readRDS("C:/Users/Zhuangzhuang/Downloads/cleandat_cate.rds")
deletedVar <- c("AA", "SA", "SDD", "SR", "SRA", "RR", "ARR", "RRA",
"DDD", "RA", "AR", "DA", "AD", "AE", "DE",
"AAE", "ARE", "DDA", "ADE", "DDE", "RDD", "RD", "ADR",
"DrawRightatFirst", "num_VOTAT")
cleandat_cate_reduced <- cleandat_cate[,-c(apply(as.matrix(deletedVar), 1,
function(x){which(colnames(cleandat_cate) == x)}),81,82)]
##################### GBM ###########################
set.seed(226)
inTrain.GBT <- createDataPartition(cleandat_cate_reduced[,6], p = .3, list = F)
trainVar.GBT <- cleandat_cate_reduced[,c(9:61)][inTrain.GBT,]
testVar.GBT <- cleandat_cate_reduced[,c(9:61)][-inTrain.GBT,]
trainClass.GBT <- as.factor(cleandat_cate_reduced[,6])[inTrain.GBT]
testClass.GBT <- as.factor(cleandat_cate_reduced[,6])[-inTrain.GBT]
GBTgrid <- expand.grid(n.trees = c(50,100,300),
interaction.depth = c(1, c(1:6)*2),
shrinkage = c(0.001, 0.01, 0.1, 1))
GBTControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 3,
summaryFunction = twoClassSummary,
classProbs = T)
cl <- makeCluster(8)
registerDoParallel(cl)
system.time(GBTfit2 <- train(x = trainVar.GBT, y = trainClass.GBT,
method = "gbm", trControl = GBTControl,
tuneGrid = GBTgrid , verbose = F,
metric = c("ROC")))
stopCluster(cl)
plot(GBTfit2)
plot(varImp(GBTfit2), top = 20)
saveRDS(GBTfit2, file = "C:/work/ETS/2015Sintern/R/GBTfit2.rds")
pred2 <- predict(GBTfit2, testVar.GBT)
confusionMatrix(pred2, testClass.GBT)
getTrainPerf(GBTfit2)
#######################################################
##################### RF ###########################
set.seed(1109)
inTrain.RF <- createDataPartition(cleandat_cate_reduced[,6], p = .3, list = F)
trainVar.RF <- cleandat_cate_reduced[,c(9:61)][inTrain.RF,]
testVar.RF <- cleandat_cate_reduced[,c(9:61)][-inTrain.RF,]
trainClass.RF <- as.factor(cleandat_cate_reduced[,6])[inTrain.RF]
testClass.RF <- as.factor(cleandat_cate_reduced[,6])[-inTrain.RF]
RFgrid <- expand.grid("mtry" = c(3, 7, 14))
RFControl <- trainControl(method = "repeatedcv",
number = 10,
repeats = 3,
summaryFunction = twoClassSummary,
classProbs = T)
cl <- makeCluster(8)
registerDoParallel(cl)
system.time(RFfit1 <- train(x = trainVar.RF, y = trainClass.RF,
method = "rf", trControl = RFControl,
tuneGrid = RFgrid , verbose = F,
metric = c("ROC")))
stopCluster(cl)
plot(RFfit1)
plot(varImp(RFfit1), top = 20)
saveRDS(RFfit1, file = "C:/work/ETS/2015Sintern/R/RFfit1.rds")
|
b303f069e7937cbabd2e28d3308affd970eac6ce
|
0e7c5a92009f315e4eb88e416587e6c369097f68
|
/plot1.R
|
5bea85e4728872f54054fe58da3659af85fc47cd
|
[] |
no_license
|
kierlan/ExData_Plotting1
|
011966fc4a1cd0e20de76b822fa0e130506fd935
|
a5dbb95fe83629405867a3f8bb64d4195698b734
|
refs/heads/master
| 2021-01-22T13:17:51.254008
| 2014-05-11T19:44:46
| 2014-05-11T19:44:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,079
|
r
|
plot1.R
|
##For this to work, you need to
#1. Download the data from here:
# https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
#2. Unzip it into your working folder.
##If all steps were followed, you should have a .txt file named 'household_power_consumption.txt" in your working folder
##First we load the data into R
household_power_consumption <- read.csv("./household_power_consumption.txt", sep=";")
#We subset it to the wanted dates - '1/2/2007' or '2/2/2007'
data<-household_power_consumption[(household_power_consumption$Date=="1/2/2007" | household_power_consumption$Date=="2/2/2007"),]
#We retrieve the Global_active_power data and convert the data type from factor to numeric
gap<-as.numeric(as.character(data$Global_active_power))
#we start the png device
png(filename="plot1.png",width=480,height=480)
#We make the histogram, colored red, main title Global active power, with x axis label
hist(gap,col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
#And we shut down the last device used - png
dev.off()
|
da86e2096c34ceeec466f6e46c8aa00c204a2329
|
a90d70d6762234d76978856e18043c6b939f7def
|
/코드/4.모델적합.R
|
bbda74fcd9cee1163becb07f684f17516ae5a953
|
[] |
no_license
|
changyong93/project_Analysis-of-small_business-Data
|
82e18868d2aa8e3280092e20ebdfe35b7baed0fe
|
edc25b62bccf7244cbd4dd046d65dcee970385f4
|
refs/heads/main
| 2023-03-30T04:59:18.270955
| 2021-03-30T19:00:35
| 2021-03-30T19:00:35
| 331,337,366
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,219
|
r
|
4.모델적합.R
|
rm(list = ls())
library(tidyverse)
#파일 불러오기
setwd("C:/Users/ChangYong/Desktop/나노디그리/1.정규강의 학습자료/1차 프로젝트/소상공인/2. 데이터")
load("dataset_set.rda")
#다중선형회귀분석 모델
#모델 적합
vars10_20 <- c(vars_10up[(vars_10up %in% vars_20up)==F])
loc10_20 <- which(colnames(trainset) %in% vars10_20)
#상관계수가 0.2미만 제거한 데이터셋 생성
trainset2 <- trainset[,-loc10_20]
#모델 적합 1차
fit1 <- lm(formula = 매출총액~.,data = trainset) #모든 입력변수
fit2 <- lm(formula = 매출총액~.,data = trainset2) #상관계수가 0.2미만 제거
summary(fit1)
summary(fit2)
#1차 적합 시 회귀계수가 NA인 feature 제거
vars <- c("중분류")
loc1 <- which(colnames(trainset) %in% vars)
loc2 <- which(colnames(trainset2) %in% vars)
#P-value가 NA 컬럼 제거 후 재적합
trainset1_2 <- trainset[,-loc1] ; trainset2_2 <- trainset2[,-loc2]
fit1_2 <- lm(formula = 매출총액~., data = trainset1_2)
fit2_2 <- lm(formula = 매출총액~., data = trainset2_2)
summary(fit1_2)
summary(fit2_2)
#변수소거법을 통한 모델 적합
null <- lm(formula = 매출총액~1.,data = trainset)
full <- lm(formula = 매출총액~.,data = trainset)
fit11 <- step(object = null,
scope = list(lower = null, upper = full),
direction = "both") #stepwise를 통한 단계적 변수 선택
null <- lm(formula = 매출총액~1.,data = trainset2)
full <- lm(formula = 매출총액~.,data = trainset2)
fit22 <- step(object = null,
scope = list(lower = null, upper = full),
direction = "both") #stepwise를 통한 단계적 변수 선택
summary(fit11)
summary(fit22)
#다중공산성 및 아웃라이어 처리
vif_outlier_test <- function(dataset,stepwise_uages){
library(car)
dataset_backup <- dataset
repeat{
dataset <- dataset_backup
name_list <- c()
name_list_backup <- c()
repeat{
if(stepwise_uages ==0){
model = lm(formula = 매출총액~., data = dataset)
} else {
null <- lm(formula = 매출총액~1.,data = dataset)
full <- lm(formula = 매출총액~.,data = dataset)
model <- step(object = null,
scope = list(lower = null, upper = full),
direction = "both")
}
if(length(vif(mod = model))>4){
vif_list <- vif(mod = model)[,3]
} else {
vif_list = vif(mod = model)
}
name <- names(which.max(vif_list[vif_list>2]))
name_list <- c(name_list,name)
data_list = list(model,name_list)
if(length(name_list) == length(name_list_backup)) break
name_list_backup <- name_list
loc <- which(colnames(dataset) %in% name_list)
dataset <- dataset[-loc]
}
outliers <- outlierTest(model = model)
outliers <- as.integer(names(outliers$bonf.p[outliers$bonf.p<0.05]))
if(length(outliers)==0) break
dataset_backup <- dataset_backup %>% slice(-outliers)
}
return(data_list)
}
fit1_2_list <- vif_outlier_test(dataset = trainset1_2,stepwise_uages = 0)
fit11_list <- vif_outlier_test(dataset = trainset,stepwise_uages = 1)
fit2_2_list <- vif_outlier_test(dataset = trainset2_2,stepwise_uages = 0)
fit22_list <- vif_outlier_test(dataset = trainset2,stepwise_uages = 1)
#다중공산성 변수 확인
fit1_2_list[2] ; fit11_list[2]
fit2_2_list[2] ; fit22_list[2]
#다중공산성 컬럼을 제거한 후 적합한 모델
fit1_2 <- fit1_2_list[[1]] ; fit11 <- fit11_list[[1]]
fit2_2 <- fit2_2_list[[1]] ; fit22 <- fit22_list[[1]]
#결과재확인
summary(object = fit1_2)
summary(object = fit11)
summary(object = fit2_2)
summary(object = fit22)
#다중공산성 변수 재확인
vif(mod = fit1_2)
vif(mod = fit11)
vif(mod = fit2_2)
vif(mod = fit22)
#잔차 패턴 확인
# windows()
# par(mfrow = c(2,2))
# plot(x = fit1_2)
# plot(x = fit11)
# plot(x = fit2_2)
# plot(x = fit22)
# par(mfrow = c(1,1))
# #잔차가정 검정
# library(car)
# ncvTest(model = fit1_2)
# durbinWatsonTest(model = fit1_2)
# crPlots(model = fit1_2)
# influencePlot(model = fit1_2)
#
# ncvTest(model = fit2_2)
# durbinWatsonTest(model = fit2_2)
# crPlots(model = fit2_2)
# influencePlot(model = fit2_2)
#
# ncvTest(model = fit2_2)
# durbinWatsonTest(model = fit2_2)
# crPlots(model = fit2_2)
# influencePlot(model = fit2_2)
#
# ncvTest(model = fit2_2)
# durbinWatsonTest(model = fit2_2)
# crPlots(model = fit2_2)
# influencePlot(model = fit2_2)
# 성능 확인
real <- testset$매출총액
performance <- function(model){
#결과 확인
pred <- predict(object = model, newdata = testset, type = "response")
#연속형 결과 확인
rmse <- MLmetrics::RMSE(y_pred = pred, y_true = real)
r2 <- MLmetrics::R2_Score(y_pred = pred, y_true = real)
#rank(범주형) 결과 확인
testset$매출총액_pred <- pred
dataset <- testset %>% group_by(행정구역,대분류) %>%
mutate(rank_real = row_number(desc(매출총액)),
rank_pred = row_number(desc(매출총액_pred)),
top3_real = ifelse(rank_real %in% 1:3, 1,0),
top3_pred = ifelse(rank_pred %in% 1:3, 1,0))
f1 <- MLmetrics::F1_Score(y_true = dataset$top3_real, y_pred = dataset$top3_pred, positive = "1")
data = list(pred = pred, rmse = rmse, r2 = r2, f1 = f1, model = model)
print(data[2:4])
return(data)
}
#모델 성능 확인
result1_2 <- performance(model = fit1_2)
result11 <- performance(model = fit11)
result2_2 <- performance(model = fit1_2)
result22 <- performance(model = fit22)
#성능이 가장 좋은 fit11 모델 사용 ==> NA를 제외한 모든 입력변수로 시작 후 다중공산성 변수 및 아웃라이어 제거한 모형
testset$매출총액_pred <- result11$pred
windows()
testset %>% group_by(행정구역,대분류) %>%
mutate(rank_real = row_number(desc(매출총액)),
rank_pred = row_number(desc(매출총액_pred)),
top3_real = ifelse(rank_real %in% 1:3, 1,0),
top3_rank = ifelse(rank_pred %in% 1:3, 1,0)) %>%
ggplot(aes(x = rank_real, y = rank_pred, color = as.factor(rank_real)))+geom_point(position = position_jitter())
setwd("C:/Users/ChangYong/Desktop/나노디그리/1.정규강의 학습자료/1차 프로젝트/소상공인/2. 데이터")
save(result1_2,result11,result2_2,result22, file = "linearRegression.rda")
# 최종 확인결과, 세 경우 모두 잔차의 목표변수가 정규성을 위배하여 이후 순서 진행불가
#######
# library(olsrr)
# olsrr::ols_plot_cooksd_bar(model = fit2_2) #cook 거리 바플랏
# #해당 관측값이 전체 최소제곱추정량에 미치는 영향력을 보여주는 지표
# ols_plot_dfbetas(model = fit2_2) #해당 관측치의 개별 베타 값에 대한 영향력 지표
# ols_plot_dffits(model = fit2_2) #베타값의 분상 공분상 행렬의 Cov(b^) 추정값에 대한 해당 관측치에 대한 영향력
#
# #5000개 이상 데이터 정규성 확인 => 앤더슨 달링 테스트
# library(nortest)
# ad.test(fit1_lm2_SP$residuals)
# ad.test(fit1_lm2_SP$residuals)
# ad.test(fit1_lm2_SP$residuals)
#-------------------------------------------------------------------------------------------------------
#회귀나무로 모델 만들기
library(tidyverse)
library(rpart)
library(rpart.plot)
library(MLmetrics)
rm(list = ls())
setwd("C:/Users/ChangYong/Desktop/나노디그리/1.정규강의 학습자료/1차 프로젝트/소상공인/2. 데이터")
load("dataset_set.rda")
trainset_dummy <- trainset
testset_dummy <- testset
#1차
trainset <- trainset_dummy
testset <- testset_dummy
grid <- expand.grid(
minsplit = seq(from = 2, to = 20, by = 1),
cp = seq(from = 0.0001, to = 0.001, length.out = 10),
seed = 1234,
RMSE = NA,
F1 = NA,
R2 = NA)
filename <- "1차"
grid_filename <- "grid1"
pred_filename <- "pred1"
#2차 minsplit만 20~40
trainset <- trainset_dummy
testset <- testset_dummy
grid <- expand.grid(
minsplit = seq(from = 20, to = 40, by = 1),
cp = 0.0001,
seed = 1234,
RMSE = NA,
F1 = NA,
R2 = NA)
filename = "2차"
grid_filename <- "grid2"
pred_filename <- "pred2"
#3차 = 변수중요도가 1000부근 및 미만인 변수 제거
vars <- c("행정구역",'매출비율_1724','생존율_3년차','생존율_1년차','소득분위','생존율_5년차','년도','분기')
loc <- which(colnames(trainset_dummy) %in% vars)
trainset <- trainset_dummy[,-loc]
testset <- testset_dummy[,-loc]
grid <- expand.grid(
minsplit = seq(from = 2, to = 20, by = 1),
cp = seq(from = 0.0001, to = 0.001, length.out = 10),
seed = 1234,
RMSE = NA,
F1 = NA,
R2 = NA)
filename = "3차"
grid_filename <- "grid3"
pred_filename <- "pred3"
pred_list <- c()
#모델 튜닝 진행
for(i in 1:nrow(grid)){
sentence <- str_glue('{i}번째 행 실행 중 [minsplit : {grid$minsplit[i]}, cp = {grid$cp[i]}')
print(sentence,"\n")
#정지규칙 설정
ctrl <- rpart.control(minsplit = grid$minsplit[i],
cp = grid$cp[i],
maxdepth = 30L)
#모델적합
set.seed(seed = grid$seed[i])
fit <- rpart(formula = 매출총액~.,
data = trainset,
control = ctrl)
#가지치기 여부 확인 후 적합
num1 <- nrow(fit$cptable)
num2 <- which.min(fit$cptable[,4])
if(num1 != num2){
fit2 <- prune.rpart(tree = fit, cp = grid$cp[num2])
} else {
fit2 = fit
}
#변수중요도 확인
setwd(paste0("C:/Users/ChangYong/Desktop/나노디그리/1.정규강의 학습자료/1차 프로젝트/소상공인/4.모델 적합/회귀나무/",filename))
png(filename = paste0("변수중요도_",i,".png"), width = 8000, height = 4000, res = 500)
plot(x = fit$variable.importance, type = "b")
text(x = fit$variable.importance+100, label = paste0(names(fit$variable.importance),"\n",round(fit$variable.importance,0)))
dev.off()
#성능 분석
real <- testset$매출총액
pred <- predict(object = fit2, newdata = testset, type = "vector")
#RMSE 계산
reg <- MLmetrics::RMSE(y_pred = pred, y_true = real)
R2 <- MLmetrics::R2_Score(y_pred = pred, y_true = real)
#실측값 및 예측값 Rank
result <- testset_dummy
result$매출총액_pred <- pred
result <- result %>% select(행정구역,대분류,중분류,매출총액,매출총액_pred) %>%
group_by(행정구역,대분류) %>%
mutate(rank_real = row_number(desc(매출총액)),
rank_pred = row_number(desc(매출총액_pred)),
top_real = ifelse(rank_real <=3,"1","0"),
top_pred = ifelse(rank_pred <=3,"1","0"))
#Rank Top3 산점도 그리기
result %>%
ggplot(aes(x = rank_real, y = rank_pred, color = as.factor(대분류)))+
geom_point(position = position_jitter(),size = 2, alpha = 0.7)+
ggsave(filename = paste0("rank산점도_",i,"_(대분류).png"), width = 24, height = 12, units = "cm")
result %>%
ggplot(aes(x = rank_real, y = rank_pred, color = as.factor(rank_real)))+
geom_point(position = position_jitter(),size = 2, alpha = 0.7)+
ggsave(filename = paste0("rank산점도_",i,"_(rank).png"), width = 24, height = 12, units = "cm")
#Top3 예측 성능
F1 <- F1_Score(y_true = result$top_real, y_pred = result$top_pred, positive = "1")
#grid라는 dataframe에 RMSE 및 F1_Score, R2_score 저장
grid$RMSE[i] <-reg
grid$F1[i] <- F1
grid$R2[i] <- R2
pred_list <- cbind(pred_list,pred)
write.csv(grid,file = paste0(grid_filename,".csv"))
write.csv(pred_list,file = paste0(pred_filename,"_list.csv"))
cat(str_glue('{round((i)*100/nrow(grid),2)}% 완료'))
}
# R2, F1, CP 선 그래프 그리기
windows()
text <- data.frame(x = rep(nrow(grid)+1,3),
y = as.numeric(grid[nrow(grid),(ncol(grid)-2):ncol(grid)]),
label = colnames(grid)[(ncol(grid)-2):ncol(grid)])
grid %>% mutate(order = row_number()) %>%
ggplot(aes(x = order, y = RMSE))+geom_line(col = "blue")+geom_point(col = "blue")+ylab("")+
geom_vline(xintercept = which.min(grid$RMSE), col = "blue", lty = 1, lwd = 2, alpha = 0.7)+
geom_line(aes(y = F1), col = "red")+geom_point(aes(y = F1),col = "red")+
geom_vline(xintercept = which.max(grid$F1), col = "red", lty = 6, lwd = 1.75)+
geom_line(aes(y = R2), col = "orange")+geom_point(aes(y = R2), col = "orange")+
geom_vline(xintercept = which.max(grid$R2), col = "orange", lty = 2, lwd = 1.2)+
scale_y_continuous(sec.axis = dup_axis(), breaks = seq(0,1,0.05))+
geom_text(data = text, mapping = aes(x = text$x, y = text$y, label = text$label),col = c("blue","red","orange"), size = 10)+
theme_classic()
#RMSE가 가장 낮은 경우, F1이 가장 높은 경우, R2가 가장 높은 경우 세 가지를 선택하고, random set.seed로 가장 성능 좋은 모형 찾기
# grDevices::colors()
RMSE <- which.min(grid$RMSE)
F1 <- which.max(grid$F1)
R2 <- which.max(grid$R2)
cat(RMSE,F1,R2)
#-------------------------------------------------------------------------------------------------------
rm(list = ls())
setwd("C:/Users/ChangYong/Desktop/나노디그리/1.정규강의 학습자료/1차 프로젝트/소상공인/2. 데이터")
load("dataset_set.rda")
#입력변수를 변경하며 모델 생성을 위해 데이터셋 더미 만들어놓기기
trainset_dummy <- trainset
testset_dummy <- testset
library(tidyverse)
library(randomForest)
#반복문을 사용한 모형 튜닝
#1차 #최적 mtry 찾기
trainset <- trainset_dummy
testset <- testset_dummy
grid <- expand.grid(ntree = 200,
mtry = 3:16,
seed = 1234,
error = NA,
RMSE = NA,
F1 = NA,
R2 = NA)
filename = "1차"
grid_filename <- "grid1"
pred_filename <- "pred1"
#2차 error & F1, R2, RMSE가 높았던 mtry 6,9,12,13에서 ntree 변경하여 튜닝
trainset <- trainset_dummy
testset <- testset_dummy
grid <- expand.grid(ntree = seq(from = 200, to = 500, by = 100),
mtry = c(9,12,14),
seed = 1234,
error = NA,
RMSE = NA,
F1 = NA,
R2 = NA)
filename = "2차"
grid_filename <- "grid2"
pred_filename <- "pred2"
#3차 error & F1, R2, RMSE가 높았던 mtry 6,9,12,13에서 ntree 변경하여 튜닝
trainset <- trainset_dummy
testset <- testset_dummy
grid <- expand.grid(ntree = seq(from = 500, to = 1000, by = 100),
mtry = c(9),
seed = 1234,
error = NA,
RMSE = NA,
F1 = NA,
R2 = NA)
filename = "3차"
grid_filename <- "grid3"
pred_filename <- "pred3"
#4차 변수중요도 상위 5개만 선택하여 튜닝
vars <- c("중분류","총매출건수","행정구역","매출비율_0611","매출비율_토104050대","매출총액")
loc <- which(colnames(trainset_dummy) %in% vars)
trainset <- trainset_dummy[,loc]
testset <- testset_dummy[,loc]
grid <- expand.grid(ntree = seq(200,700,100),
mtry = 2:5,
seed = 1234,
error = NA,
RMSE = NA,
F1 = NA,
R2 = NA)
filename = "4차"
grid_filename <- "grid4"
pred_filename <- "pred4"
#4차 변수중요도 상위 5개만 선택하여 튜닝(최종 튜닝 조건으로 실행)
vars <- c("중분류","총매출건수","행정구역","매출비율_0611","매출비율_토104050대","매출총액")
loc <- which(colnames(trainset_dummy) %in% vars)
trainset <- trainset_dummy[,loc]
testset <- testset_dummy[,loc]
grid <- expand.grid(ntree = 700,
mtry = 5,
seed = 1234,
error = NA,
RMSE = NA,
F1 = NA,
R2 = NA)
filename = "5차"
grid_filename <- "grid5"
pred_filename <- "pred5"
pred_list = c()
for(i in 1:nrow(grid)){
disp <- str_glue('현재 {i}행 실행 중! [ntree: {grid$ntree[i]}, mtry: {grid$mtry[i]}] {Sys.time()}')
cat(disp,"\n")
set.seed(seed = grid$seed)
fit <- randomForest(formula = 매출총액~.,
data = trainset,
ntree = grid$ntree[i],
mtry = grid$mtry[i])
grid$error[i] <- tail(x = fit$mse, n = 1)
#변수중요도 플랏 저장
setwd(paste0("C:/Users/ChangYong/Desktop/나노디그리/1.정규강의 학습자료/1차 프로젝트/소상공인/4.모델 적합/랜덤포레스트/",filename))
png(filename = paste0("변수중요도_",i,".png"), width = 8000, height = 4000, res = 500)
varImpPlot(x = fit, main = 'variable importance')
dev.off()
#시험셋으로 목표변수 추정값 생성
pred1 <- predict(object = fit, newdata = testset, type = 'response')
pred_list <- cbind(pred_list,pred1)
#실제 관측치 벡터 생성
real <- testset$매출총액
#실측값과 비교하기 위해 testset 조작
results <- testset_dummy
results$매출총액_pred <- pred1
results <- results %>%
group_by(행정구역,대분류) %>%
mutate(rank_real = row_number(desc(매출총액)),
rank_pred = row_number(desc(매출총액_pred)),
top3_real = ifelse(rank_real <=3,"1","0"),
top3_pred = ifelse(rank_pred <=3,"1","0"))
#rank를 factor형으로 변경
num <- ncol(results)
results[,(num-3):num] <- map_df(.x = results[,(num-3):num],.f = as.factor)
#real_rank와 pred_rank 산점도 그리기
results %>%
ggplot(aes(x = rank_real, y = rank_pred, color = as.factor(rank_real)))+
geom_point(position = position_jitter(),size = 2)+
ggsave(filename = paste0("rank산점도rank_",i,"_",".png"), width = 24, height = 12, units = "cm")
results %>%
ggplot(aes(x = rank_real, y = rank_pred, color = as.factor(rank_real)))+
geom_point(position = position_jitter(),size = 2)+
ggsave(filename = paste0("rank산점도대분류_",i,"_",".png"), width = 24, height = 12, units = "cm")
#회귀값 예측 결과
grid$RMSE[i] <- MLmetrics::RMSE(y_pred = pred1, y_true = real)
#Top3 범주값 예측 결과
grid$F1[i] <- MLmetrics::F1_Score(y_true = results$top3_real, y_pred = results$top3_pred, positive = "1")
grid$R2[i] <- MLmetrics::R2_Score(y_true = real, y_pred = pred1)
disp <- str_glue('현재 {i}행 완료! [{round((i)/nrow(grid),2)*100}% 완료]')
write.csv(grid,file = paste0(grid_filename,".csv"), row.names = F)
Sys.sleep(2)
write.csv(pred_list,file = paste0(pred_filename,"_list.csv"),row.names = F)
Sys.sleep(2)
cat(disp, "\n")
}
#튜닝 결과 확인
windows()
plot(x = grid$error, type = 'b', pch = 19, col = 'gray30', main = 'Grid Search Result')
abline(v = which.min(x = grid$error), col = 'red', lty = 2)
loc <- which.min(x = grid$error)
print(x = loc)
grid[loc,]
#RMSE,F1,R2 플랏
text <- data.frame(x = rep(nrow(grid)+0.5,4),
y = as.numeric(grid[nrow(grid),4:7]),
label = colnames(grid)[4:7])
text[text$label=="error",2] <- text[text$label=="error",2]*10
# windows()
grid %>% mutate(order = row_number()) %>%
ggplot(aes(x = order, y = RMSE))+geom_line(col = "blue")+geom_point(col = "blue")+ylab("")+
geom_vline(xintercept = which.min(grid$RMSE), col = "blue", lty = 1, lwd = 2, alpha = 0.7)+
geom_line(aes(y = F1), col = "red")+geom_point(aes(y = F1),col = "red")+
geom_vline(xintercept = which.max(grid$F1), col = "red", lty = 6, lwd = 1.75)+
geom_line(aes(y = R2), col = "orange")+geom_point(aes(y = R2), col = "orange")+
geom_vline(xintercept = which.max(grid$R2), col = "orange", lty = 2, lwd = 1.2)+
geom_line(aes(y = error*10), col = "black")+geom_point(aes(y = error*10), col = "black")+
geom_vline(xintercept = which.min(grid$error), col = "black", lty = 2, lwd = 1.2)+
scale_y_continuous(name = "RMSE, F1, R2",sec.axis = dup_axis(~./10,name = "MSE Error"), limits = c(0,1), breaks = seq(0,1,0.1))+
geom_text(data = text, mapping = aes(x = text$x, y = text$y, label = text$label),col = c("black","blue","red","orange"), size = 10)+
theme_classic()
which.min(grid$RMSE)
which.max(grid$F1)
which.max(grid$R2)
which.min(grid$error)
|
8c0882015082dd2f1bce70729d73be8b5a8e15b9
|
ed0699c3fc9de97b79a3ceeb52077a2e1ccac5f7
|
/plot1.R
|
9e4b0a0629bb8d668e19add4c7940d1bb1047fa9
|
[] |
no_license
|
rcg-student/ExData_Plotting1
|
956efd94c835a8bfce7ed3de18fb47c05f1bbeb3
|
e0eff9e673c8066b19f6a84372276d3420c112f4
|
refs/heads/master
| 2020-12-26T04:05:21.876189
| 2015-12-12T20:44:21
| 2015-12-12T20:44:21
| 47,610,274
| 0
| 0
| null | 2015-12-08T08:54:28
| 2015-12-08T08:54:26
| null |
UTF-8
|
R
| false
| false
| 684
|
r
|
plot1.R
|
#reading data and using the days we want
electric <- read.table("household_power_consumption.txt", header =TRUE, sep=";")
electric_to_use <- electric[ ((electric$Date == "1/2/2007") | (electric$Date == "2/2/2007"))&(electric$Global_active_power != "?") ,]
#decimal formatting for the plot
electric_to_use$Global_active_power <- format(electric_to_use$Global_active_power, decimal.mark = ".")
#plotting the results
hist(as.numeric(electric_to_use$Global_active_power),col="red",breaks=20,main="Global active power",
xlab = "Global active power (kilowatts)",ylab="Frequency",xlim=c(0,6),ylim=c(0,1200))
## Copy my plot to a PNG file
dev.copy(png, file = "plot1.png")
dev.off()
|
970b8cae0186fe34bd1afca5240ee325758c564a
|
d4ecad10911cb7035bc70c46b772a79ad03a142b
|
/RFig5.R
|
c5d47f8072df2152c19a32f56894f404acc3e63c
|
[] |
no_license
|
yteo/epigenetic-analysis-of-cfdna-from-human
|
a76cca28e6fa5545d891823d28eaa10b7f2d8517
|
ed909be5b72d03e0c03fb83e9722ae20fa9ac3b4
|
refs/heads/master
| 2020-04-27T03:31:11.797231
| 2019-08-28T20:47:27
| 2019-08-28T20:47:27
| 174,026,798
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,216
|
r
|
RFig5.R
|
# adjust %s for sample name accordingly
multi<-read.table(sprintf("%s\\%s_L1HS_multi_frac.txt",sample,sample,sep="\t"))
count<-read.table(sprintf("%s_L1HS_consensuscoord_count.txt",sample,sample,sep="\t"))
num<-read.table(sprintf("%s_L1HS_consensuscoord_num.txt",sample,sample,sep="\t"))
colnames(count)<-c("Pos","Count")
colnames(num)<-c("Pos","Num")
colnames(multi)<-c("repeat","Pos","multi_frac")
# count = unique count
#num=num of L1HS mapped at the consensus
# multi_frac = multi count fraction
count_num<-merge(count,num,by="Pos")
count_num<-merge(count_num,multi,by="Pos")
# combined mulfi fraction and unique counts
count_num$multi_unique<-count_num$Count+count_num$multi_frac
i<-i+1
lib<-libsize[grep(paste(sample),libsize$V1),]$V2
count_num$norm<-count_num$multi_unique/lib
count_num2<-merge(count_num,counter,all=T,by="Pos")
count_num2[is.na(count_num2)] <- 0
count_num2$FFT = filterFFT(count_num2$norm, pcKeepComp=0.01)
lines(count_num2$Pos,count_num2$FFT,col=paste("deepskyblue",i,sep=""),lty=2,lwd=0.1)
dat<-cbind(dat,count_num2$FFT)
dat$ave<-rowMeans(dat[,2:4])
lines(dat$pos,dat$ave,lty=1,col=paste("deepskyblue4"),lwd=2)
print(cor.test(L1HS_map$V4,dat$ave))
|
f994345c9c047e5ebb10a50b2deb9d8c9bb88438
|
8c658d4e178a8b17aa03c4cc93d76e5201a6bed8
|
/Simulating-Sampling-Distributions/server.r
|
813c395f4205936cc801de91784673e8be977aad
|
[] |
no_license
|
hiteshtulsani/Developing-Data-Products-Project
|
610ce7c3ba26035b18fcdcdcbd2d1820160f035c
|
6b82f3fa3cb67527ff5f0fd5f5a975352539d632
|
refs/heads/master
| 2020-05-17T08:15:28.267593
| 2014-12-15T07:31:45
| 2014-12-15T07:31:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,986
|
r
|
server.r
|
library(shiny)
library(ggplot2)
shinyServer(
function(input,output) {
output$newPlot <- renderPlot({
input$simulate
isolate({
set.seed(23051983)
if(input$select == 1) {
means <- apply(matrix(rexp(input$number.samples * input$nosim, input$lambda),
input$nosim), 1, mean)
theoretical.mean <- 1/input$lambda
plot.sd <- 1/input$lambda
} else if(input$select == 2) {
means <- apply(matrix(rnorm(input$number.samples * input$nosim, input$norm.mean, input$norm.sd),input$nosim), 1, mean)
theoretical.mean <- input$norm.mean
plot.sd <- input$norm.sd
} else {
means <- apply(matrix(rpois(input$number.samples * input$nosim, input$rpois.lambda),
input$nosim), 1, mean)
theoretical.mean <- input$rpois.lambda
plot.sd <- sqrt(input$rpois.lambda)
}
mean.of.sample.means <- mean(means)
variance.of.sample.means <- var(means)*input$number.samples
sd.of.sample.means <- sd(means)*sqrt(input$number.samples)
#mean.of.sample.means <- renderPrint({mean(means)})
min.xlim <- as.integer(min(means))
max.xlim <- as.integer(max(means))
Theoretical<-c(theoretical.mean,plot.sd,plot.sd^2)
Sampling<-c(mean.of.sample.means,sd.of.sample.means,variance.of.sample.means)
res_df <- data.frame(Theoretical,Sampling,row.names=c("Mean","Std. Dev","Variance"))
output$res = renderTable({res_df},digits=4)
cols <- c("Simulated"="#87CEEB","Normal"="#000000")
ggplot(data = data.frame(means), aes(x = means)) +
geom_histogram(aes(y = ..density.., colour = "Simulated"),fill = "white",binwidth=0.1) +
geom_density(colour = "lightblue",size = 1) +
geom_vline(xintercept = mean.of.sample.means, color = "lightblue",size = 1.5) +
stat_function(aes(colour ="Normal"),fun = dnorm, size = 1, args = list(mean = theoretical.mean,sd = (plot.sd/sqrt(input$number.samples)))) +
scale_colour_manual("",values = cols) +
geom_vline(xintercept = theoretical.mean,color = "black", size = 1) +
theme_bw()
})
}
)
}
)
|
8b89303ba182a60d1cb0f592d8f82ac3cce25e76
|
66698ecc6835ac9c903260fa83c994a027bd0aa3
|
/src/scripts/R-setup.R
|
6a6f338bf62d65add98a45e0a163b81da5682f81
|
[] |
no_license
|
twineapp/vagrant
|
51798d4f75ef45c796a738a0951896e70bbeeea6
|
28c46fd91735ee982cf28a586899899e94e2b7af
|
refs/heads/dev
| 2021-01-18T21:29:02.320566
| 2015-08-19T12:22:51
| 2015-08-19T12:22:51
| 9,557,922
| 1
| 1
| null | 2015-01-20T17:27:28
| 2013-04-20T02:04:30
|
Shell
|
UTF-8
|
R
| false
| false
| 76
|
r
|
R-setup.R
|
install.packages(c('jsonlite', 'Rserve'), repos = "http://cran.case.edu" )
|
de019b3cedbb7cc632338e3076a673747040765d
|
9d64e54ac35e8adec9577790d543214225769132
|
/R/variance_boxplot.R
|
ac59027f4cfe8ddf2790149bc07fd61a4448738a
|
[] |
no_license
|
biopaw/MetaboDiff
|
5960f8c3e62352e4dcf1d94808750822e60871e1
|
6b1e31aa7c5737e0a9acc4e74a4e98434a341c1f
|
refs/heads/master
| 2023-01-06T01:16:01.638917
| 2020-10-27T19:43:11
| 2020-10-27T19:43:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,006
|
r
|
variance_boxplot.R
|
#' Boxplot comparing the distribution of variance in metabolic measurements across the pathways
#'
#' @param met MultiAssayExperiment object with slots "raw", "imputed", "norm" and "norm_imputed"
#' @param rowAnnotation character name of rowAnnotation to stratify metabolites
#' @return Boxplot comparing the distribution of variance in metabolic measurements across the pathways
#' @examples
#' variance_boxplot(met_example, rowAnnotation="SMPDB.Pathway.Name")
#' @export
variance_boxplot <- function(met, rowAnnotation) {
df = data.frame(value=apply(assays(met)[["norm_imputed"]],1,var),
pathway=as.vector(rowData(met[["norm_imputed"]])[[rowAnnotation]]))
df$pathway_ordered = reorder(df$pathway,df$value,median)
ggplot(df,
mapping=aes(x=pathway_ordered,
y=value,
fill=pathway_ordered)) + coord_flip() + theme_minimal() +
geom_boxplot() + xlab("") + guides(fill=FALSE) + ylab("vsn normalized abundance")
}
|
eed8a8611bebc108d8423cef8bb4c72b76463e91
|
8b3cd7ee200564b65db2d76ca8ab953466e091e2
|
/man/lrflip.Rd
|
d5b2eff9b1772b18f6529787b07cd2f64b4a7fdd
|
[] |
no_license
|
alicejenny/project011
|
3df759dfb96e5a7276bde4dd315bbc81f812a98c
|
7de1339bc4c148bfe41264acb3da9307a605e863
|
refs/heads/master
| 2021-01-10T14:30:09.381069
| 2015-07-15T20:17:40
| 2015-07-15T20:17:40
| 36,937,861
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 426
|
rd
|
lrflip.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/leftrightflip.R
\name{lrflip}
\alias{lrflip}
\title{Left-Right Split & Flip}
\usage{
lrflip(sample, filename, folder)
}
\arguments{
\item{sample}{The input data frame. 3 named columns (x, y, and z, in that order).}
}
\description{
Splits the mandible in half sagittally, then rotates it so the buccal side is on top and culls backfaces.
}
|
656537183bcd5f1c2c63f2962599738420ed278b
|
c01177a666c08b1c5f8bc18877f6674b44178cb5
|
/man/category_search.Rd
|
c300819bb9dbd61f210a130de3a36dff73c4c4df
|
[
"MIT"
] |
permissive
|
mitchellbuchan/trebek
|
70293923841e888716b14f1425011ab5aa8f5c5b
|
476a75a8ed3c3d05f7ec43c9fe6b73e8a4710809
|
refs/heads/master
| 2022-06-03T12:02:44.286728
| 2020-05-04T03:20:06
| 2020-05-04T03:20:06
| 261,059,105
| 0
| 0
|
MIT
| 2020-05-04T01:56:19
| 2020-05-04T01:56:18
| null |
UTF-8
|
R
| false
| true
| 651
|
rd
|
category_search.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_categories.R
\name{category_search}
\alias{category_search}
\title{Allows a user to see their category options.
Defaults to all options, or shows those that contain the provided query as a substring.}
\usage{
category_search(query = "")
}
\arguments{
\item{query}{A substring of the category name to match.}
}
\value{
Category titles and Ids matching the query, as a data frame.
}
\description{
Allows a user to see their category options.
Defaults to all options, or shows those that contain the provided query as a substring.
}
\examples{
category_search("pants")
}
|
e5c932a7c53ecf2b72b6f3b35eca3054d9d79632
|
7678b6b87290c4b30808075736e179c1334d0725
|
/Hands-On Programming with R/9 loop .r
|
f115b669ebd2dd1a241ff40b2e1a84dfde05fe41
|
[] |
no_license
|
xiemeigongzi88/Basic_R
|
eebf48ec032b44d850010139119cd0492be2e25d
|
39b723f486b81b37cffda1a43823ce257b9b61e7
|
refs/heads/master
| 2020-06-04T23:23:53.858024
| 2020-03-18T16:44:17
| 2020-03-18T16:44:17
| 192,035,946
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,685
|
r
|
9 loop .r
|
9 loop
page 168
9.1 期望值
9.2 expand.grid
expand.grid () 函数可以写出 n 个向量元素的所有组合
> die<-c(1:6)
> rolls<-expand.grid(die,die)
> rolls
Var1 Var2
1 1 1
2 2 1
3 3 1
4 4 1
5 5 1
6 6 1
7 1 2
8 2 2
9 3 2
10 4 2
11 5 2
12 6 2
13 1 3
14 2 3
15 3 3
16 4 3
17 5 3
18 6 3
19 1 4
20 2 4
21 3 4
22 4 4
23 5 4
24 6 4
25 1 5
26 2 5
27 3 5
28 4 5
29 5 5
30 6 5
31 1 6
32 2 6
33 3 6
34 4 6
35 5 6
36 6 6
> rolls$Value<-rolls$Var1+rolls$Var2
> rolls
Var1 Var2 Value
1 1 1 2
2 2 1 3
3 3 1 4
4 4 1 5
5 5 1 6
6 6 1 7
7 1 2 3
8 2 2 4
9 3 2 5
10 4 2 6
11 5 2 7
12 6 2 8
13 1 3 4
14 2 3 5
15 3 3 6
16 4 3 7
17 5 3 8
18 6 3 9
19 1 4 5
20 2 4 6
21 3 4 7
22 4 4 8
23 5 4 9
24 6 4 10
25 1 5 6
26 2 5 7
27 3 5 8
28 4 5 9
29 5 5 10
30 6 5 11
31 1 6 7
32 2 6 8
33 3 6 9
34 4 6 10
35 5 6 11
36 6 6 12
> test<-c(1:3)
> a<-expand.grid(test,test,test)
> a
Var1 Var2 Var3
1 1 1 1
2 2 1 1
3 3 1 1
4 1 2 1
5 2 2 1
6 3 2 1
7 1 3 1
8 2 3 1
9 3 3 1
10 1 1 2
11 2 1 2
12 3 1 2
13 1 2 2
14 2 2 2
15 3 2 2
16 1 3 2
17 2 3 2
18 3 3 2
19 1 1 3
20 2 1 3
21 3 1 3
22 1 2 3
23 2 2 3
24 3 2 3
25 1 3 3
26 2 3 3
27 3 3 3
die<-c(1,2,3,4,5,6)
rolls<-expand.grid(die,die)
rolls$value<-rolls$Var1+rolls$Var2
head(rolls,3)
prob<-c("1"=1/8,"2"=1/8,"3"=1/8,"4"=1/8,"5"=1/8,"6"=3/8)
prob[rolls$Var1]
rolls$prob1<-prob[rolls$Var1]
rolls$prob2<-prob[rolls$Var2]
rolls$prob<-rolls$prob1 * rolls$prob2
sum(rolls$value * rolls$prob)
9.3 For loop
page 175
> for ( i in c('my','second','for','loop'))
+ {
+ print(i)
+ }
[1] "my"
[1] "second"
[1] "for"
[1] "loop"
9.4 while loop
page 181
9.5 repeat loop
page 181
repeat{
n<-n+1
if(n>10)
{
break
}
}
|
227b2b59e78b16d3184f4cc75b3768236d53fbcd
|
4127dd6771c46682d42b7af9d00613dcbda8c1d0
|
/R/networks-module.R
|
625de320b29ce845077a9bf583de0d2efe3564f3
|
[
"Apache-2.0"
] |
permissive
|
brainy749/chirp
|
25ae98bfd373f0f7a891accf55d86174b37b7142
|
74c0a1c086226adeb54035f95ac7952fe5eb9711
|
refs/heads/master
| 2020-04-25T20:36:52.657588
| 2019-02-27T23:03:08
| 2019-02-27T23:03:08
| 173,054,322
| 1
| 0
| null | 2019-02-28T06:25:51
| 2019-02-28T06:25:51
| null |
UTF-8
|
R
| false
| false
| 28,138
|
r
|
networks-module.R
|
networks_ui <- function(id){
ns <- NS(id)
tagList(
tags$a(
icon("pencil-ruler", class = "fa-lg"),
onclick = "pushbar.open('save_pushbar');",
class = "btn btn-primary",
`data-pushbar-target` = "save_pushbar",
id = "optsBtn"
),
tags$a(
icon("database", class = "fa-lg"),
onclick = "pushbar.open('search_pushbar');",
class = "btn btn-primary",
`data-pushbar-target` = "search_pushbar",
id = "search"
),
tags$a(
icon("searchengin", class = "fa-lg"),
onclick = "pushbar.open('search_node_pushbar');",
class = "btn btn-primary",
`data-pushbar-target` = "search_node_pushbar",
id = "searchNode"
),
shinyjs::hidden(
actionButton(
ns("hide_tweet"),
"",
icon = icon("times"),
class = "btn-danger"
)
),
conditionalPanel(
"input['networks-network'] != 'hashtags'",
tags$a(
icon("layer-group", class = "fa-lg"),
onclick = "pushbar.open('legend_pushbar');",
class = "btn btn-primary",
`data-pushbar-target` = "legend_pushbar",
id = "legendBottom"
)
),
div(
id = "pushbarSearchNode",
`data-pushbar-id` = "search_node_pushbar",
class = "pushbar from_left",
h4("SEARCH"),
fluidRow(
column(9, uiOutput(ns("node_search_ui"))),
column(
3,
br(),
actionButton(
ns("search_node"),
"",
icon = icon("search-plus"),
width = "100%",
class = "btn-primary"
)
)
),
radioButtons(
ns("zoom"),
"Zoom level",
choices = c(
"High" = "high",
"Medium" = "medium",
"Low" = "low"
),
inline = TRUE,
width = "100%",
selected = "medium"
),
tags$a(
id = "closeSearchNode",
icon("times"), onclick = "pushbar.close();", class = "btn btn-danger"
)
),
actionButton(
"stats",
"",
icon("brain", class = "fa-lg"),
class = "btn-primary",
onclick = "pushbar.open('stats_pushbar');",
),
div(
id = "pushbarBottom",
`data-pushbar-id` = "stats_pushbar",
class = "pushbar from_right",
h4("STATS"),
uiOutput(ns("trend_text")),
reactrend::reactrendOutput(ns("trendline"), width = "100%"),
fluidRow(
column(6, uiOutput(ns("n_nodes"))),
column(6, uiOutput(ns("n_edges")))
),
fluidRow(
column(6, uiOutput(ns("n_tweets")))
),
uiOutput(ns("selected_headline")),
uiOutput(ns("selected_source")),
fluidRow(
column(6, uiOutput(ns("source_indegree"))),
column(6, uiOutput(ns("source_outdegree")))
),
fluidRow(
column(6, uiOutput(ns("source_pagerank"))),
column(6, uiOutput(ns("source_eigen")))
),
uiOutput(ns("arrow_down")),
uiOutput(ns("selected_target")),
fluidRow(
column(6, uiOutput(ns("target_indegree"))),
column(6, uiOutput(ns("target_outdegree")))
),
fluidRow(
column(6, uiOutput(ns("target_pagerank"))),
column(6, uiOutput(ns("target_eigen")))
),
tags$a(
id = "closeStats",
icon("times"), onclick = "pushbar.close();", class = "btn btn-danger"
)
),
div(
id = "pushbarTop",
`data-pushbar-id` = "search_pushbar",
class = "pushbar from_left",
h4("DATA"),
tabsetPanel(
type = "tabs",
tabPanel(
"SEARCH ",
textInput(
ns("q"),
"",
width = "100%",
placeholder = "Query"
),
tippy_this(ns("q"), "Your search query"),
fluidRow(
column(
4,
actionButton(
ns("addOpts"),
"",
icon = icon("plus")
)
),
column(
8,
actionButton(
ns("submit"),
"Search",
icon = icon("search"),
width = "100%",
class = "btn btn-primary"
)
)
),
br(),
div(
id = ns("searchOptions"),
style = "display:none;",
sliderInput(
ns("n"),
label = "Number of tweets",
min = .get_tweet_range("min"),
max = .get_tweet_range("max"),
value = .get_tweet_range("min"),
step = 100,
width = "100%"
),
tippy_this(ns("n"), "Number of tweets to fetch"),
selectInput(
ns("type"),
"Type",
choices = c(
"Recent" = "recent",
"Mixed" = "mixed",
"Popular" = "popular"
),
selected = "recent",
width = "100%"
),
tippy_this(ns("type"), "Type of tweets to fetch"),
fluidRow(
column(
7,
checkboxInput(
ns("include_rts"),
"Include retweets",
TRUE,
width = "100%"
)
),
column(5, checkboxInput(ns("append"), "Append"))
),
tippy_this(ns("include_rts"), "Whether to include retweets"),
textInput(ns("longitude"), "Longitude", value = "", width = "100%"),
textInput(ns("latitude"), "Latitude", value = "", width = "100%"),
fluidRow(
column(6,textInput(ns("radius"), "Radius", value = "", width = "100%")),
column(6, selectInput(ns("metric"), "Metric", choices = c("Kilometer" = "km", "Miles" = "mi")))
)
)
),
tabPanel(
"LOAD",
fileInput(
ns("file"),
label = "Choose one or more previously downloaded Chirp file(s) (.RData)",
accept = c(".RData", ".rdata"),
placeholder = " No file selected",
width = "100%",
multiple = TRUE
),
checkboxInput(ns("append_file"), "Append")
)
),
a(
"chrip.sh",
id = "leftLink",
href = "https://chirp.sh",
target = "_blank"
),
tags$a(
id = "closeSearch",
icon("times"), onclick = "pushbar.close();", class = "btn btn-danger"
)
),
shinyjs::useShinyjs(),
div(
`data-pushbar-id` = "legend_pushbar",
class = "pushbar from_bottom",
fluidRow(
column(12, uiOutput(ns("legend"), class = "center"))
),
tags$a(
style = "right:20px;bottom:20px;position:absolute;",
icon("times"), onclick = "pushbar.close();", class = "btn btn-danger"
)
),
div(
id = "pushbarLeft",
`data-pushbar-id` = "save_pushbar",
class = "pushbar from_right",
h4("OPTIONS"),
br(),
selectInput(
ns("network"),
"NETWORK TYPE",
choices = c(
"Retweets" = "retweet_screen_name",
"Hashtags" = "hashtags",
"Conversations" = "mentions_screen_name"
),
width = "100%"
),
tippy_this(ns("network"), "Type of network to draw"),
conditionalPanel(
"input['networks-network'] != 'retweet_screen_name'",
checkboxInput(
ns("comentions"),
"Co-mentions",
width = "100%"
)
),
conditionalPanel(
"input['networks-network'] == 'retweet_screen_name'",
checkboxInput(
ns("quoted"),
"Include quoted",
width = "100%",
value = TRUE
)
),
fluidRow(
column(
6,
selectInput(
ns("size"),
"SIZE",
choices = c(
"# tweets" = "n_tweets",
"In-degree" = "in_degree",
"Out-degree" = "out_degree",
"Closeness" = "closeness",
"Pagerank" = "pagerank",
"Authority" = "authority",
"Eigen" = "eigen"
),
width = "100%"
),
tippy_this(ns("size"), "Variable to size nodes")
),
column(
6,
selectInput(
ns("colour"),
"COLOUR",
choices = c(
"Cluster" = "group",
"# tweets" = "n_tweets",
"Components" = "components",
"In-degree" = "in_degree",
"Out-degree" = "out_degree",
"Closeness" = "closeness",
"Pagerank" = "pagerank",
"Authority" = "authority",
"Eigen" = "eigen",
"Type" = "type"
),
width = "100%"
),
tippy_this(ns("colour"), "Variable to colour nodes")
)
),
h5("FILTER"),
fluidRow(
column(
8,
checkboxInput(
ns("delete_nodes"),
"DELETE NODES", value = FALSE
),
tippy_this(ns("delete_nodes"), "Tick and click on nodes to delete them")
),
column(
4,
conditionalPanel(
"input['networks-network'] != 'retweet_screen_name'",
checkboxInput(
ns("include_retweets"),
"RTs",
value = TRUE
)
)
)
),
sliderInput(
ns("node_size"),
"Filter node by size",
width = "100%",
min = 3,
max = 17,
value = 17
),
h5("LAYOUT"),
fluidRow(
column(
6,
actionButton(
ns("start_layout"),
"START",
icon = icon("play"),
width = "100%"
)
),
column(
6,
actionButton(
ns("kill_layout"), "
STOP",
icon = icon("stop"),
width = "100%"
)
)
),
br(),
actionButton(
ns("noverlap"),
"NO OVERLAP",
icon = icon("magnet"),
width = "100%"
),
h5("EXPORT"),
fluidRow(
column(
6,
actionButton(
ns("save_img"),
"SAVE IMAGE",
icon = icon("image"),
width = "100%"
)
),
column(
6,
actionButton(
ns("save_svg"),
"SAVE SVG",
icon = icon("html5"),
width = "100%"
)
)
),
br(),
downloadButton(ns("downloadData"), "DOWNLOAD DATA", style = "width:100%;"),
tags$a(
id = "closeOpts",
icon("times"), onclick = "pushbar.close();", class = "btn btn-danger"
)
),
actionButton(
ns("vr"),
"",
icon = icon("vr-cardboard", class = "fa-lg"),
class = "btn btn-primary"
),
shinyjqui::jqui_draggable(
htmlOutput(
ns("display"), style="position:absolute;z-index:99;left:20px;top:70px;"
)
),
shinycustomloader::withLoader(
sigmajs::sigmajsOutput(ns("graph"), height = "99vh"),
type = "html",
loader = "loader9"
),
uiOutput(ns("aforce"))
)
}
networks <- function(input, output, session, dat){
tweets <- reactiveVal(dat)
shinyjs::hide("aforce")
observeEvent(input$submit, {
geocode <- NULL
if(input$longitude != "" && input$latitude != "" && input$radius != "")
geocode <- paste(input$longitude, input$latitude, paste0(input$radius, input$metric), sep = ",")
if(input$q != ""){
session$sendCustomMessage(
"load",
paste("Fetching", prettyNum(input$n, big.mark = ","), "tweets")
)
lim <- .check_rate_limit()
options(search_query = .clean_input(input$q))
if(lim$remaining == 0){
shinyjs::disable("submit")
shinyjs::delay(difftime(Sys.time(), lim$reset_at, units = "secs") * 1000, shinyjs::enable("submit"))
time <- difftime(Sys.time(), lim$reset_at, units = "mins")
time <- ceiling(time)
showModal(
modalDialog(
title = "Rate limit hit!",
"You have hit the rate limit, wait until",
time
, "to make another search.",
easyClose = TRUE,
footer = NULL
)
)
} else {
tw <- rtweet::search_tweets(
input$q,
n = input$n,
type = input$type,
include_rts = input$include_rts,
geocode = geocode,
token = .get_token()
)
if(isTRUE(input$append))
rbind.data.frame(tweets(), tw) %>%
tweets()
else
tweets(tw)
}
session$sendCustomMessage("unload", "") # stop loading
}
})
observeEvent(input$file, {
file <- input$file
s <- ""
if(length(file$datapath))
s <- "s"
session$sendCustomMessage(
"load",
paste0("Loading file", s, "...")
)
tw <- file$datapath %>%
purrr::map_df(function(x){
get(load(x))
})
if(isTRUE(input$append_file))
rbind.data.frame(tweets(), tw) %>%
tweets()
else
tweets(tw)
session$sendCustomMessage("unload", "") # stop loading
})
shinyjs::hide("save_el")
observeEvent(input$save_opts, {
shinyjs::toggle("save_el")
})
observeEvent(input$save_img, {
ns <- session$ns
sigmajs::sigmajsProxy(ns("graph")) %>%
sigmajs::sg_export_img_p(file = "chirp.png")
})
observeEvent(input$save_svg, {
ns <- session$ns
sigmajs::sigmajsProxy(ns("graph")) %>%
sigmajs::sg_export_svg_p(file = "chirp.svg")
})
graph <- reactive({
tw <- tweets() %>%
filter(is_retweet %in% c(FALSE, input$include_retweets))
if(isTRUE(input$comentions) && input$network %in% c("hashtags", "mentions_screen_name"))
edges <- tw %>% gt_co_edges(!!sym(input$network))
else
edges <- tw %>% gt_edges(screen_name, !!sym(input$network))
if(isTRUE(input$quoted) && input$network == "retweet_screen_name")
edges <- edges %>%
gt_edges_bind(screen_name, quoted_screen_name)
graph <- edges %>%
gt_nodes() %>%
gt_collect()
graph <- tbl_graph(
nodes = graph$nodes,
edges = graph$edges
) %>%
activate(nodes) %>%
mutate(
name = nodes,
id = name,
label = name,
n_tweets = n,
out_degree = centrality_degree(mode = "out"),
in_degree = centrality_degree(mode = "in"),
authority = centrality_authority(),
pagerank = centrality_pagerank(),
closeness = centrality_closeness(),
eigen = centrality_eigen(),
components = group_components(type = "weak"),
group = group_walktrap()
) %>%
igraph::as_data_frame("both")
edges <- graph$edges %>%
mutate(
id = 1:n(),
source = from,
target = to,
size = n,
type = "arrow"
) %>%
select(-one_of("to", "from"))
nodes <- graph$vertices %>%
mutate(
group = as.factor(group),
components = as.factor(components)
) %>%
select(-one_of("n", "nodes"))
session$sendCustomMessage("unload", "") # stop loading
list(
nodes = nodes,
edges = edges
)
})
output$legend <- renderUI({
nodes <- .color_nodes(graph()$nodes, "group") %>%
select(label, group, color)
if(input$network == "hashtags"){
return("")
}
leg <- tweets() %>%
select_("hashtags", "screen_name", "v2" = input$network) %>%
mutate(
screen_name = tolower(screen_name),
v2 = tolower(v2)
) %>%
left_join(nodes, by = c("screen_name" = "label")) %>%
left_join(nodes, by = c("v2" = "label"), suffix = c("_source", "_target")) %>%
mutate(
group_source = case_when(
is.na(group_source) ~ group_target,
TRUE ~ group_source,
),
color_source = case_when(
is.na(color_source) ~ color_target,
TRUE ~ color_source,
),
grp = case_when(
group_source == group_target ~ group_source,
TRUE ~ group_source
),
color = case_when(
color_source == color_target ~ color_source,
TRUE ~ color_source
)
) %>%
filter(!is.na(grp)) %>%
tidyr::unnest(hashtags) %>%
mutate(hashtgas = tolower(hashtags)) %>%
group_by(grp, color) %>%
count(hashtags, sort = TRUE) %>%
filter(hashtags != .get_search_query()) %>%
filter(!is.na(hashtags)) %>%
slice(1) %>%
ungroup() %>%
mutate(grp = as.integer(grp)) %>%
arrange(grp) %>%
slice(1:10)
ch <- as.character(unlist(leg$grp))
ch <- c("all", ch)
names(ch) <- c("All nodes", paste0("#", as.character(unlist(leg$hashtags))))
ns <- session$ns
tgs <- radioButtons(
ns("legendOut"),
"FILTER CLUSTERS",
choices = ch,
inline = TRUE,
width = "100%"
)
tgs
})
observeEvent(input$legendOut, {
ns <- session$ns
if(input$legendOut != "all")
sigmajs::sigmajsProxy(ns("graph")) %>%
sigmajs::sg_filter_undo_p("legend-filter") %>%
sigmajs::sg_filter_eq_p(input$legendOut, "group", name = "legend-filter")
else if(input$legendOut == "all")
sigmajs::sigmajsProxy(ns("graph")) %>%
sigmajs::sg_filter_undo_p("legend-filter")
})
output$graph <- sigmajs::renderSigmajs({
g <- graph()
nodes <- g$nodes
nodes <- .color_nodes(nodes, "group")
nodes <- .size_nodes(nodes, "n_tweets")
edges <- g$edges
sigmajs::sigmajs(type = "webgl") %>%
sigmajs::sg_nodes(nodes, id, label, size, color, group) %>%
sigmajs::sg_edges(edges, id, source, target, type, size) %>%
sigmajs::sg_force(slowDown = 4) %>%
sigmajs::sg_neighbours() %>%
sigmajs::sg_kill() %>%
sigmajs::sg_drag_nodes() %>%
sigmajs::sg_force_stop(2500) %>%
sigmajs::sg_layout() %>%
sigmajs::sg_settings(
minArrowSize = 1,
batchEdgesDrawing = TRUE,
edgeColor = "default",
defaultEdgeColor = .get_edge_color(),
font = .get_font(),
labelThreshold = 9999
)
})
observeEvent(input$colour, {
ns <- session$ns
nodes <- isolate(graph()$nodes)
df = .color_nodes(nodes, input$colour)
sigmajs::sigmajsProxy(ns("graph")) %>%
sigmajs::sg_change_nodes_p(df, color, "color")
})
observeEvent(input$size, {
ns <- session$ns
nodes <- isolate(graph()$nodes)
df = .size_nodes(nodes, input$size)
sigmajs::sigmajsProxy(ns("graph")) %>%
sigmajs::sg_change_nodes_p(df, size, "size")
})
output$display <- renderText({
input$graph_click_node
user <- input$graph_click_node$label
user <- gsub("#", "", user)
tw <- ""
if(!is.null(input$graph_click_node$label) & !isTRUE(input$delete_nodes)){
tw <- tweets() %>%
filter(is_retweet %in% c(FALSE, input$include_retweets)) %>%
select(
status_id,
screen_name,
retweet_count,
v2 = !!sym(input$network)
) %>%
tidyr::separate_rows(v2) %>%
mutate(
screen_name = tolower(screen_name),
v2 = tolower(v2)
)
src <- tw %>%
filter(screen_name == user) %>%
arrange(-retweet_count)
if(nrow(src) >= 1)
tw <- src %>%
slice(1) %>%
.get_tweet()
else
tw <- tw %>%
filter(v2 == user) %>%
arrange(-retweet_count) %>%
slice(1) %>%
.get_tweet()
}
if(inherits(tw, "error")){
tw <- ""
shinyjs::hide("display")
}
return(tw)
})
trend <- reactive({
.get_trend <- function(x = "%Y-%m-%d"){
tweets() %>%
filter(is_retweet %in% c(FALSE, input$include_retweets)) %>%
mutate(
created_at = format(created_at, x)
) %>%
count(created_at) %>%
pull(n) %>%
list(
trend = .,
format = x
)
}
trend <- .get_trend()
if(length(trend$trend) < 4)
trend <- .get_trend("%Y-%m-%d %H")
if(length(trend$trend) < 3)
trend <- .get_trend("%Y-%m-%d %H:%M")
if(length(trend$trend) < 2)
trend <- .get_trend("%Y-%m-%d %H:%M:%S")
return(trend)
})
output$trend_text <- renderUI({
p(strong("Tweets"), .get_time_scale(trend()$format))
})
output$trendline <- reactrend::renderReactrend({
trend()$trend %>%
reactrend::reactrend(
draw = TRUE,
gradient = .get_pal(),
smooth = TRUE,
stroke_width = 2,
line_cap = "round"
)
})
output$n_nodes <- renderUI({
p(
strong("Nodes:"),
prettyNum(
nrow(graph()$nodes),
big.mark = ","
)
)
})
output$n_edges <- renderUI({
p(
strong("Edges:"),
prettyNum(
nrow(graph()$edges),
big.mark = ","
)
)
})
output$n_tweets <- renderUI({
p(
strong("Tweets:"),
prettyNum(
nrow(tweets() %>% filter(is_retweet %in% c(FALSE, input$include_retweets))),
big.mark = ","
)
)
})
observeEvent(input$graph_click_node, {
node_clicked <- input$graph_click_node$label
ns <- session$ns
if(isTRUE(input$delete_nodes))
sigmajs::sigmajsProxy(ns("graph")) %>%
sigmajs::sg_drop_node_p(id = input$graph_click_node$id)
else {
shinyjs::show("display")
shinyjs::show("hide_tweet")
}
})
observeEvent(input$start_layout, {
ns <- session$ns
sigmajs::sigmajsProxy(ns("graph")) %>%
sigmajs::sg_force_start_p()
})
observeEvent(input$kill_layout, {
ns <- session$ns
sigmajs::sigmajsProxy(ns("graph")) %>%
sigmajs::sg_force_kill_p()
})
observeEvent(input$noverlap, {
ns <- session$ns
sigmajs::sigmajsProxy(ns("graph")) %>%
sigmajs::sg_noverlap_p(nodeMargin = .05)
})
notification <- NULL
observeEvent(input$delete_nodes, {
if(isTRUE(input$delete_nodes)){
notification <<- showNotification(
"Click a node to delete it.",
duration = NULL,
type = "error",
closeButton = FALSE
)
} else {
if (!is.null(notification)) removeNotification(notification)
notification <<- NULL
}
})
shinyjs::hide("searchOptions")
observeEvent(input$addOpts, {
ns <- session$ns
shinyjs::toggle("searchOptions")
})
output$downloadData <- downloadHandler(
filename = function() {
paste('chirp-', Sys.Date(), '.RData', sep='')
},
content = function(file) {
tw <- tweets()
save(tw, file = file)
}
)
nodes <- data.frame()
nodes_clicked <- reactive({
if(!is.null(input$graph_click_nodes))
nodes <<- rbind.data.frame(input$graph_click_nodes, nodes) %>%
slice(1:2)
})
output$source_indegree <- renderUI({
sel <- .slice_node(nodes_clicked(), 1)
if(is.null(sel))
return("")
span(
strong("In-degree"),
graph()$nodes %>%
filter(label == sel) %>%
pull(in_degree) %>%
round(.3)
)
})
output$source_outdegree <- renderUI({
sel <- .slice_node(nodes_clicked(), 1)
if(is.null(sel))
return("")
span(
strong("Out-degree"),
graph()$nodes %>%
filter(label == sel) %>%
pull(out_degree) %>%
round(.3)
)
})
output$source_pagerank <- renderUI({
sel <- .slice_node(nodes_clicked(), 1)
if(is.null(sel))
return("")
span(
strong("Pagerank"),
graph()$nodes %>%
filter(label == sel) %>%
pull(pagerank) %>%
round(.3)
)
})
output$source_eigen <- renderUI({
sel <- .slice_node(nodes_clicked(), 1)
if(is.null(sel))
return("")
span(
strong("Eigen"),
graph()$nodes %>%
filter(label == sel) %>%
pull(eigen) %>%
round(.3)
)
})
output$target_indegree <- renderUI({
sel <- .slice_node(nodes_clicked(), 2)
if(!length(sel))
return("")
span(
strong("In-degree"),
graph()$nodes %>%
filter(label == sel) %>%
pull(in_degree) %>%
round(.3)
)
})
output$target_outdegree <- renderUI({
sel <- .slice_node(nodes_clicked(), 2)
if(!length(sel))
return("")
span(
strong("Out-degree"),
graph()$nodes %>%
filter(label == sel) %>%
pull(out_degree) %>%
round(.3)
)
})
observeEvent(input$graph_click_stage, {
shinyjs::hide("display")
shinyjs::hide("hide_tweet")
})
observeEvent(input$hide_tweet, {
shinyjs::hide("display")
shinyjs::hide("hide_tweet")
})
output$target_pagerank <- renderUI({
sel <- .slice_node(nodes_clicked(), 2)
if(!length(sel))
return("")
span(
strong("Pagerank"),
graph()$nodes %>%
filter(label == sel) %>%
pull(pagerank) %>%
round(.3)
)
})
output$target_eigen <- renderUI({
sel <- .slice_node(nodes_clicked(), 2)
if(!length(sel))
return("")
span(
strong("Eigen"),
graph()$nodes %>%
filter(label == sel) %>%
pull(eigen) %>%
round(.3)
)
})
output$selected_headline <- renderUI({
sel <- .slice_node(nodes_clicked(), 1)
if(!is.null(sel))
h5(
"SELECTED NODES"
)
})
output$selected_source <- renderUI({
sel <- .slice_node(nodes_clicked(), 1)
if(is.null(sel))
p(
"Select nodes to see their network metrics",
class = "text-warning"
)
else
h5(
tags$a(
.get_random_icon(),
href = paste0("https://twitter.com/", sel),
target = "_blank"
),
sel
)
})
output$arrow_down <- renderUI({
sel <- .slice_node(nodes_clicked(), 2)
if(!length(sel))
""
else
icon("chevron-down", class = "fa-lg center_arrow")
})
output$selected_target <- renderUI({
sel <- .slice_node(nodes_clicked(), 2)
if(!length(sel))
span("")
else
h5(
tags$a(
.get_random_icon(),
href = paste0("https://twitter.com/", sel),
target = "_blank"
),
sel
)
})
output$node_search_ui <- renderUI({
ns <- session$ns
ch <- graph()$nodes %>%
pull(label)
selectizeInput(
ns("node_searched"),
"Search for a node",
multiple = FALSE,
choices = ch,
width = "100%"
)
})
observeEvent(input$search_node, {
ns <- session$ns
ratio <- .zoom(input$zoom)
id <- graph()$nodes %>%
mutate(id = 1:n()) %>%
filter(label == input$node_searched) %>%
pull(id)
sigmajs::sigmajsProxy(ns("graph")) %>%
sigmajs::sg_zoom_p(id - 1, duration = 1500, ratio = ratio)
})
observeEvent(input$node_size, {
ns <- session$ns
sigmajs::sigmajsProxy(ns("graph")) %>%
sigmajs::sg_filter_undo_p("sz") %>% # we undo the filter before applying it
sigmajs::sg_filter_lt_p(input$node_size, "size", name = "sz")
})
aforce <- eventReactive(input$vr, {
vr <- ""
if(input$vr %% 2 == 1){
session$sendCustomMessage(
"load",
"Get your headset!"
)
g <- graph()
nodes <- g$nodes
nodes <- .color_nodes(nodes, "group")
nodes <- .size_nodes(nodes, "n_tweets")
vr <- aforce::aForce$
new(n_label = "label")$ # initialise
nodes(nodes, id, size, color, label)$ # add nodes
links(graph()$edges, source, target)$ # add edges
build( # build
aframer::a_camera(
`wasd-controls` = "fly: true; acceleration: 600",
aframer::a_cursor(opacity = 0.5)
),
aframer::a_sky(color=getOption("vr_background"))
)$
embed(width="100%", height = "80vh")
session$sendCustomMessage(
"unload",
""
)
}
return(vr)
})
output$aforce <- renderUI({
aforce()
})
observeEvent(input$vr, {
shinyjs::toggle("aforce")
})
}
|
a174be6a278c0883c32696f0673d4e8533bf0d83
|
cfc816b9a950b290115918a73b7fb32a8691ede5
|
/scripts/readmission/LTH_ICNARC/1_extract_patients.R
|
6d7aea0efcecaf7cb6dd7fa1a1628031bf8410ff
|
[] |
no_license
|
btcooper22/MIMIC_ICU
|
abcf30a8298fe2c961938952cf7d5ba0cb71aee2
|
41c231d9c74a88ff516ce74dcbebd65647189818
|
refs/heads/main
| 2023-06-24T07:10:51.708553
| 2021-07-27T17:12:38
| 2021-07-27T17:12:38
| 326,666,861
| 1
| 0
| null | 2021-05-25T11:04:40
| 2021-01-04T11:48:05
|
R
|
UTF-8
|
R
| false
| false
| 2,857
|
r
|
1_extract_patients.R
|
# Packages
require(bigrquery)
require(DBI)
require(dplyr)
require(magrittr)
require(tibble)
require(purrr)
require(readr)
# Find project name
bq_projects()
# Find datasets
bq_project_datasets("prd-proj-decovid-358375")
# Find tables
bq_dataset_tables("prd-proj-decovid-358375.icnarc_analytics")
# Set ICNARC table as connection
con <- dbConnect(
bigrquery::bigquery(),
project = "prd-proj-decovid-358375",
dataset = "icnarc_analytics"
)
dbListTables(con)
# Read IDs of surgical patients discharged under normal conditions
surgical_ID <- tbl(con, "ICNARC") %>%
collect() %>%
filter(Source_ClassificationOfSurgery == "4. Elective" &
AdmissionType == "04. Planned local surgical admission") %>%
select(Identifiers_PatientPseudoId) %>%
deframe() %>% unique()
# Load dataset of surgical patients
icnarc <- tbl(con, "ICNARC") %>%
collect() %>%
filter(Identifiers_PatientPseudoId %in% surgical_ID)
# Remove cases where no physiological records
sum(icnarc$Physiology_AllDataMissing)
icnarc %<>%
filter(Physiology_AllDataMissing == FALSE)
# Filter by past medical history available
icnarc %<>%
filter(PastMedicalHistory_AssessmentEvidenceAvailable == TRUE)
# Filter empty demographics
icnarc %<>%
filter(Demographics_UnitAdmissionAgeBand != "")
# Screen merged patients (ensure all patient IDs have same core demographics, at least)
icnarc %<>%
group_by(Identifiers_PatientPseudoId) %>%
summarise(ethnicity = length(unique(Demographics_Ethnicity)),
sex = length(unique(Demographics_Sex))) %>%
filter(ethnicity == 1 & sex == 1) %>%
ungroup() %>%
select(Identifiers_PatientPseudoId) %>%
left_join(icnarc)
# Count readmission rate
count_admissions <- icnarc %>%
group_by(Identifiers_PatientPseudoId) %>%
summarise(entries = n()) %>%
mutate(mult = entries > 1) %>%
ungroup()
count_admissions %>%
summarise(readmission_rate = mean(mult) * 100)
# Write
icnarc %>%
write_csv("data/icnarc_surgical_cohort.csv")
# Multiple in same month - which is elective?
# If can't tell, probably exclude
test <- icnarc %>%
select(PriorToAdmission_HospitalAdmissionDaysBefore,
MonthYearOfAdmission,
UnitDischarge_DischargedDiedOnDays,
UnitDischarge_ClinicallyReadyForDischargeDays,
UltimateHospitalDischarge_DischargedDays,
HospitalDischarge_DateDischarged)
icnarc %>%
group_by(Identifiers_PatientPseudoId) %>%
group_by(Identifiers_PatientPseudoId) %>%
summarise(ethnicity = length(unique(Demographics_Ethnicity)),
sex = length(unique(Demographics_Sex))) %>%
filter(ethnicity != 1 | sex != 1) %>% # age bands non-consecutive
ungroup() %>%
select(Identifiers_PatientPseudoId) %>%
left_join(icnarc)
summarise(n_months = length(unique(MonthYearOfAdmission)),
n_id = length(Identifiers_PatientPseudoId))
|
f3081a3be1c49799b2ef2f57233098438b6141ed
|
875e363e73bd4d06daccad49d030ee9d6a3a5290
|
/man/hot_table.Rd
|
561e4d4afe6ac0a207df7eb396e34b791f76bd46
|
[
"MIT",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
cran/rhandsontable
|
d8f2a0a6d86899e64de6c8b24d8db21aec3a7944
|
796904290381cad5a53988be1bf36c0176090f53
|
refs/heads/master
| 2021-06-14T09:52:46.604189
| 2021-05-27T10:50:03
| 2021-05-27T10:50:03
| 37,997,493
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,635
|
rd
|
hot_table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rhandsontable.R
\name{hot_table}
\alias{hot_table}
\title{Handsontable widget}
\usage{
hot_table(
hot,
contextMenu = TRUE,
stretchH = "none",
customBorders = NULL,
highlightRow = NULL,
highlightCol = NULL,
enableComments = FALSE,
overflow = NULL,
rowHeaderWidth = NULL,
...
)
}
\arguments{
\item{hot}{rhandsontable object}
\item{contextMenu}{logical enabling the right-click menu}
\item{stretchH}{character describing column stretching. Options are 'all', 'right',
and 'none'}
\item{customBorders}{json object}
\item{highlightRow}{logical enabling row highlighting for the selected
cell}
\item{highlightCol}{logical enabling column highlighting for the
selected cell}
\item{enableComments}{logical enabling comments in the table}
\item{overflow}{character setting the css overflow behavior. Options are
auto (default), hidden and visible}
\item{rowHeaderWidth}{numeric width (in px) for the rowHeader column}
\item{...}{passed to \href{https://handsontable.com/}{Handsontable.js} constructor}
}
\description{
Configure table. See
\href{https://handsontable.com/}{Handsontable.js} for details.
}
\examples{
library(rhandsontable)
DF = data.frame(val = 1:10, bool = TRUE, big = LETTERS[1:10],
small = letters[1:10],
dt = seq(from = Sys.Date(), by = "days", length.out = 10),
stringsAsFactors = FALSE)
rhandsontable(DF) \%>\%
hot_table(highlightCol = TRUE, highlightRow = TRUE)
}
\seealso{
\code{\link{rhandsontable}}
}
|
ceb538cc78edd69b508cbbdbce2f197208e962ad
|
9262e777f0812773af7c841cd582a63f92d398a4
|
/inst/userguide/figures/Covar--Covar_sec6_05_month-factor-marss-params.R
|
a1c63c92a9ffbe873eed1b4879a0a7be9e78a7c8
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
nwfsc-timeseries/MARSS
|
f0124f9ba414a28ecac1f50c4596caaab796fdd2
|
a9d662e880cb6d003ddfbd32d2e1231d132c3b7e
|
refs/heads/master
| 2023-06-07T11:50:43.479197
| 2023-06-02T19:20:17
| 2023-06-02T19:20:17
| 438,764,790
| 1
| 2
|
NOASSERTION
| 2023-06-02T19:17:41
| 2021-12-15T20:32:14
|
R
|
UTF-8
|
R
| false
| false
| 747
|
r
|
Covar--Covar_sec6_05_month-factor-marss-params.R
|
###################################################
### code chunk number 15: Covar_sec6_05_month-factor-marss-params
###################################################
# Each taxon has unique density-dependence
B <- "diagonal and unequal"
# Independent process errors
Q <- "diagonal and unequal"
# We have demeaned the data & are fitting a mean-reverting model
# by estimating a diagonal B, thus
U <- "zero"
# Each obs time series is associated with only one process
Z <- "identity"
# The data are demeaned & fluctuate around a mean
A <- "zero"
# Observation errors are independent, but they
# have similar variance due to similar collection methods
R <- "diagonal and equal"
# No covariate effects in the obs equation
D <- "zero"
d <- "zero"
|
9f23b642a8a511a961cbdee26475fa2babb4a84d
|
6ad7738a48f862ac9b022c7422d2b5e7aefb0a1c
|
/R/lsPreview.R
|
8ccab930de93547df8b9ad04f3c2c87ece708bf2
|
[] |
no_license
|
cran/RGISTools
|
79949265b94d74cd95f7f697b283687d7bf8fbb5
|
101bb144ed9463c0fc807dadf74934942c8d42b7
|
refs/heads/master
| 2020-12-22T17:58:59.781343
| 2020-05-20T13:20:06
| 2020-05-20T13:20:06
| 236,882,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,404
|
r
|
lsPreview.R
|
#' Preview Landsat-7 or Landsat-8 satellite images
#'
#' \code{lsPreview} shows a preview of the \code{n}-th image from a set of
#' search results on an interactive map.
#'
#' The function shows a preview of the \code{n}-th output image from a search
#' in the Landsat archives (\code{\link{ls7Search}} or \code{\link{ls8Search}},
#' with \code{browseAvailable = "Y"}). The preview is downloaded from
#' \href{https://www.usgs.gov/land-resources/nli/landsat/bulk-metadata-service}{USGS Bulk Metadata Service}.
#' Please, be aware that only some images may have a preview.
#'
#' @param searchres a \code{data.frame} with the results from
#' \code{\link{ls7Search}} or \code{\link{ls8Search}}.
#' @param dates a vector of \code{Date}s being considered
#' for previewing. This argument is mandatory if
#' \code{n} is not defined.
#' @param n a \code{numeric} argument identifying the location of the image in
#' \code{searchres}.
#' @param lpos vector argument. Defines the position of the red-green-blue
#' layers to enable false color visualization.
#' @param add.Layer logical argument. If \code{TRUE}, the function plots the
#' image on an existing map. Allows combinations of images on a map using
#' \code{\link{senPreview}} and \code{\link{modPreview}} functions.
#' @param verbose logical argument. If \code{TRUE}, the function prints the
#' running steps and warnings.
#' @param ... arguments for nested functions:
#' \itemize{
#' \item arguments allowed by the \code{viewRGB} function from the
#' \code{mapview} packages are valid arguments.
#' }
#' @return this function does not return anything. It displays a preview of
#' one of the search results.
#' @examples
#' \dontrun{
#' # load a spatial polygon object of Navarre
#' data(ex.navarre)
#' wdir <- file.path(tempdir(),"Path_for_downloading_folder")
#' # retrieve jpg images covering Navarre between 2011 and 2013
#' sres <- ls7Search(startDate = as.Date("01-01-2011", "%d-%m-%Y"),
#' endDate = as.Date("31-12-2013", "%d-%m-%Y"),
#' extent = ex.navarre,
#' precise = TRUE,
#' browseAvaliable = "Y",
#' AppRoot = wdir)
#' lsPreview(sres, 1)
#' # filter the images with less than 1% pixels covered by clouds
#' sres.cloud.free = subset(sres, sres$cloudCover < 1)
#' lsPreview(sres.cloud.free, 1)
#' lsPreview(sres.cloud.free, 2,add.Layer = TRUE)
#' # plot all the images in one date
#' lsPreview(sres.cloud.free,dates=as.Date("2013-09-04"))
#' }
lsPreview<-function(searchres,n,dates,lpos=c(3,2,1),add.Layer=FALSE,verbose = FALSE,...){
if(class(searchres)!="ls7res"&&class(searchres)!="ls8res"&&class(searchres)!="lsres"){stop("A response from landsat 7-8 search function is needed.")}
class(searchres)<-"data.frame"
if(missing(dates)){
return(.lsPreviewRecursive(searchres=searchres,n=n,lpos=lpos,add.Layer=add.Layer,verbose=verbose,...))
}else{
searchres<-searchres[as.Date(unlist(searchres$acquisitionDate))%in%dates,]
if(nrow(searchres)>0){
.lsPreviewRecursive(searchres=searchres,n=1,lpos=lpos,add.Layer=add.Layer,verbose=verbose,...)
if(nrow(searchres)>1){
for(x in 2:nrow(searchres)){
.lsPreviewRecursive(searchres=searchres,n=x,lpos=lpos,add.Layer=T,verbose=verbose,...)
}
}
return(getRGISToolsOpt("GMapView"))
}else{
stop("There is no image for preview in ")
}
}
}
.lsPreviewRecursive<-function(searchres,n,dates,lpos,add.Layer,verbose,...){
ser<-searchres[n,]
tmp <- tempfile()
if(verbose){
download.file(unlist(ser$browseURL),tmp,mode="wb")
}else{
suppressWarnings(download.file(unlist(ser$browseURL),tmp,mode="wb"))
}
r<-stack(tmp)
lat<-unlist(ser[grepl("Latitude",names(ser))])
lon<-unlist(ser[grepl("Longitude",names(ser))])
extent(r)<-extent(min(lon),max(lon),min(lat),max(lat))
projection(r)<-st_crs(4326)$proj4string
if(verbose){
return(genMapViewSession(r,lpos,lname=paste0("LS_",ser["path"],ser["row"],"_D",format(as.Date(unlist((ser["acquisitionDate"]))),"%Y%j")),add.Layer=add.Layer,...))
}else{
return(suppressWarnings(genMapViewSession(r,lpos,lname=paste0("LS_",ser["path"],ser["row"],"_D",format(as.Date(unlist((ser["acquisitionDate"]))),"%Y%j")),add.Layer=add.Layer,...)))
}
}
|
2b1349ebf4b595a9e02db7274279575079abb865
|
19acf16c1c613e606e992204645c4fbabcfe6f80
|
/RScript02_SeqCompTest.R
|
1f3529e965a303fdd22a70e04f1ab914038df9ce
|
[] |
no_license
|
snandi/RScripts_BAC
|
d146b2a4a500f334143e2392cdc0c654f548bb42
|
b8c46d12134f00d77a944c7570b592eea6179110
|
refs/heads/master
| 2021-01-10T03:33:30.828352
| 2017-01-05T17:34:07
| 2017-01-05T17:34:07
| 46,696,462
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,238
|
r
|
RScript02_SeqCompTest.R
|
rm(list=ls(all.names=TRUE))
rm(list=objects(all.names=TRUE))
#dev.off()
########################################################################
## This script reads the sequence data from the fasta files, creates
## GCAT ratios for each interval, creates the ggplot objects for the
## sequence plots, and the signals (fluctuation of GC percentages)
########################################################################
## Execute this code as follows:
## nohup R CMD BATCH --no-save '--args Chr="chr7" NCores=11' RScript02_SeqCompTest.R chr7_seqcomp.Rout &
########################################################################
## Run Path definition file ##
########################################################################
RScriptPath <- '~/Project_BAC/RScripts_BAC/'
DataPath <- '~/Project_BAC/Data/'
RDataPath <- '~/Project_BAC/RData/'
RPlotPath <- '~/Project_BAC/Plots/'
Filename.Header <- paste('~/Project_BAC/RScripts_BAC/HeaderFile_BAC.R', sep='')
source(Filename.Header)
source(paste(RScriptPath, 'fn_Library_BAC.R', sep=''))
DataPath.mm52 <- '/z/Proj/newtongroup/snandi/mm52-all7341/intensities_inca34_1pixel/'
Packages_Par <- MyAutoLoads
# source('~/R_Packages/Registration/R/loadPackages.R')
# library(rpart)
# Packages_Par <- c(Packages_Par, 'seqinr')
# ########################################################################
Args <- (commandArgs(TRUE))
for(i in 1:length(Args)){
eval(parse(text = Args[[i]]))
}
Today <- Sys.Date()
ConversionFactor <- 206
BasePairInterval <- ConversionFactor
#Chr <- 'chr7'
#ChrNum <- 7
ChrNum <- gsub(pattern = 'chr', replacement = '', x = Chr)
bp.loc <- fn_load_bploc(
ConversionFactor = ConversionFactor,
Filename.bploc = paste0('/ua/snandi/human_nMaps/GC_Content/mm52_all7431.goldOnly.bploc_', Chr)
)
########################################################################
## Corresponding Nmaps of the BAC DNA regions exist only for Chr 7
########################################################################
FragIndex <- 5
FragIndices <- c(12437:12447)
BackbonePixels <- 1
OpticalRes_Factor <- 1
########################################################################
## Load the list of fragements and the number of molecules aligned to them
########################################################################
Filename_fragTable <- paste0('/z/Proj/newtongroup/snandi/mm52-all7341/RData/', Chr, '/', Chr, '_Table.RData')
load(Filename_fragTable)
Table <- get(paste0(Chr, '_', 'Table'))
FragIndices10 <- subset(Table, numMolecules >= 10)[, 'refStartIndex']
FragIndices20 <- subset(Table, numMolecules >= 20)[, 'refStartIndex']
#########################################################################
BasePairInterval <- 206*OpticalRes_Factor ## Length of base pair interval to estimate gcat %
NumBP_Frag <- subset(bp.loc, alignedChr == Chr & alignedFragIndex == FragIndex)[['BasePairLength']] ## Length of frag in BP
#NumSubFrag <- round(NumBP_Frag/BasePairInterval, 0) ## Number of sub fragments
PixelLength_Theo <- subset(bp.loc, alignedChr == Chr & alignedFragIndex == FragIndex)[['PixelLength_Theo']]
# fn_saveSeqComp(
# Chr = Chr,
# FragIndex = FragIndex,
# bp.loc = bp.loc,
# BasePairInterval = BasePairInterval,
# Save = TRUE,
# DataPath = DataPath.mm52
# )
#########################################################################
## Parallelized execution of saving sequence compositions list of objects
#########################################################################
## For parallel execution, using doParallel
#cl <- makeCluster(NCores)
#cl <- makePSOCKcluster(NCores)
#doParallel::registerDoParallel(cl)
Time1 <- Sys.time()
# For parallel execution, using doSNOW
cl <- makeCluster(NCores, type = "SOCK")
doSNOW::registerDoSNOW(cl)
foreach(FragIndex = FragIndices10[1:20], .inorder = FALSE, .packages = Packages_Par) %dopar% fn_saveSeqComp(
Chr = Chr,
FragIndex = FragIndex,
bp.loc = bp.loc,
BasePairInterval = BasePairInterval,
Save = TRUE,
DataPath = DataPath.mm52
)
stopCluster(cl)
print(Sys.time() - Time1)
## The text files saved by this function contains the following elements:
## Chr, FragIndex, GC_VarIndex, GC_pct, Length_kb, Length_Pixels
|
a61c426d4a52b8f3c402d992ed3557da06231c15
|
72cf63cd0879026594008fcb437ed6c4f074b863
|
/old-code/simulation-code-FDA.R
|
adb019988e07375080306df5500c9e5132463d4f
|
[] |
no_license
|
JiaweiXu-code/BACPAC
|
5dd8b1efd407560cfebc30af458f1bca424537e2
|
d4c066fe597a001312546df79c890eee3e06bde4
|
refs/heads/main
| 2023-04-18T03:50:17.609420
| 2021-05-11T14:12:28
| 2021-05-11T14:12:28
| 344,529,079
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,485
|
r
|
simulation-code-FDA.R
|
source("C:/D/R-package/gibbs.R");
#source("/pine/scr/j/i/jiawei/Rpackage/gibbs.R");
## set seet for reproducibility;
set.seed(1);
## Historical Data (to define power prior);
hst_n = 25.0; ## sample size;
hst_mn = 0.0; ## azm cfb mean (also mean of normal power prior);
hst_sd = 8.0; ## azm sd for cfb;
fb_sd = hst_sd/sqrt(hst_n); ## posterior standard deviation under full borrowing;
## Simulation Precision Inputs
nSims = 500;
nSamp = 500;
## Sample Size / Monitoring Inputs
nMin = 24;
nMax = 70;
nByVec = c(nMin,8,8,8,8,14);
## Substantial Evidence Threshold
ec = 1-0.025;
## Hypothesized treatment effect (T:P increase from week 6 to week 8);
targetDelta = 5.5;
## Randomization information;
## note 1 = treatment, 2 = control;
block = c(1,1,2,2);
block_size = length(block);
## Parameters for enrollment distribution (interarrival times);
enr_mn = 4.0; ## one patient per 4 weeks;
enr_sd = 0.5;
## Parameters for outcome ascertainment distribution;
asc_mn = 8.00; ## 8 weeks +/- 1 week;
asc_sd = 0.25;
## skeptical prior;
skp_mn = 0; ## mean;
skp_sd = targetDelta/qnorm(ec) ## standard deviation;
skp_vr = skp_sd^2; ## variance;
## enthusiastic prior;
ent_mn = targetDelta; ## mean;
ent_sd = targetDelta/qnorm(ec) ## standard deviation;
ent_vr = ent_sd^2; ## variance;
## True parameters in data generation model;
true_sd = 8.0;
## True mean change from week 6 to week 8 in T:T arm;
tmu1 = 0.0;
tmu2 = 5.5;
true_mn = c(tmu1,tmu2);
###############################################################################################################
###############################################################################################################
## begin code for simulation studies;
## create simulation results container;
results=matrix(0,nrow=nSims,ncol=24);
## loop for simulation studies;
start_time <- Sys.time()
for(sim in (1:nSims))
{
############# begin simulation code;
## simulation results containers;
stop_enrollment = 0; ## Indicator for early stoppage of enrollment (or trial if futility criteria met)
final_analysis = 0; ## Indicator for final analysis; Note that final analysis may occur after ongoing patients are followed-up;
n = 0; ## Number of patients currently enrolled
nInt = 0; ## Number of patients at interim where early stoppage takes place
analysis = 0; ## Number of analyses performed
time_of_analysis = c(0,0); ## vector for time of analyses [interim, final]
eff = c(0,0); ## indicator vector for efficacy criterion being met [interim, final]
fut = 0; ## indicator futility stopping criterion being met [interim only];
######## Generate data for the full hypothetical trial;
## Generate enrollment times and outcome ascertainment times;
r = cumsum(rnorm(nMax,mean=enr_mn,sd=enr_sd)); ## cumulative enrollment times;
w = rnorm(nMax,mean=asc_mn,sd=asc_sd); ## ascertainment times;
e = r + w; ## [study start] --> [outcome ascertainment] times
## Simulate treatment group assignments;
z = rep(0,nMax);
totalZ = 0;
while(totalZ<nMax)
{
start = totalZ + 1;
stop = min(totalZ + block_size,nMax);
totalZ = totalZ + block_size;
z[start:stop] = sample(block,block_size,replace=FALSE)[1:(stop-start+1)];
}
## Simulate outcomes;
y = rnorm(nMax,mean=true_mn[z],sd=true_sd);
######## Order patient data based on calendar time-to-outcome ascertainment;
## create a data matrix and order by
dat = cbind(r,w,e,z,y);
dat = dat[order(dat[,3]),];
## re-extract ordered data vectors;
r = dat[,1];
w = dat[,2];
e = dat[,3];
z = dat[,4];
y = dat[,5];
x = cbind(matrix(1,nrow=nMax,ncol=1),(z==2));
## destroy temporary data container;
rm(dat);
loopNumber = 0;
######## Sequentially analyze study data;
repeat
{
if (stop_enrollment==0) { ## indicator trial should continue;
## increment number of outcomes ascertained;
loopNumber = loopNumber + 1;
n = n + nByVec[loopNumber];
## if minimum sample size is reached, increment sample size;
if (n >= nMin) { analysis = analysis + 1 }
## identify time of most recent interim analysis
time_of_analysis[1] = e[n];
## extract current observed data;
yDat = y[1:n];
zDat = z[1:n];
xDat = x[1:n,];
nREF = sum((z==1)[1:n]);
## determine how many subjects are ongoing in the study;
nOngoing = sum(r<time_of_analysis[1])-n;
} else ## final analysis (to take place one interim stoppage criteria are met and ongoing patients are followed up;
{
final_analysis = 1;
## store number of outcomes ascertained at interim analysis;
nInt = n;
if (fut==0) ## perform final data aggregation only if futility criterion has NOT been met;
{
## determine how many subjects are currently already enrolled
enrolled_set = (r<time_of_analysis[1]);
## identify time of final analysis
time_of_analysis[2] = max(e[enrolled_set]);
## extract final data;
yDat = y[enrolled_set];
zDat = z[enrolled_set];
xDat = x[enrolled_set,];
nREF = sum((z==1)[1:n]);
n = length(yDat);
} else ## no further analysis if futility criterion HAS been met;
{
time_of_analysis[2] = time_of_analysis[1];
}
}
## Perform data analysis once minimum number of outcomes are ascertained;
if (n >= nMin)
{
## compute deterministic power prior parameter and associated SD;
a0 = min(1,nREF/hst_n);
## compute prior variance and effective sample size;
hvr = hst_sd^2/(hst_n*a0);
ess = n + a0*hst_n;
## skeptical prior analysis;
## construct covariance matrix (skeptical prior)
cov0 = matrix(c(hvr,0,0,skp_vr),nrow=2,ncol=2);
beta0 = c(hst_mn,skp_mn);
## perform gibbs sampler;
skeptical_results = gibbs_sampler(nSamp,yDat,xDat,cov0,beta0);
skp_pp = skeptical_results[5];
## enthusiastic prior analysis;
## construct covariance matrix (enthusiastic prior)
cov0 = matrix(c(hvr,0,0,ent_vr),nrow=2,ncol=2);
beta0 = c(hst_mn,ent_mn);
## perform gibbs sampler;
enthusiastic_results = gibbs_sampler(nSamp,yDat,xDat,cov0,beta0,targetDelta);
ent_pp = enthusiastic_results[5];
## maximum sample size reached (stop trial, considered final analysis);
if (n>= nMax) {stop_enrollment = 1; final_analysis = 1; }
## evaluate futility criterion (stop trial, considered final analysis);
if ((ent_pp<(1-ec)) & (final_analysis==0)) { stop_enrollment=1; fut=1; final_analysis=1; }
## evaluate efficacy criterion (stop trial + consider follow-up on ongoing patients);
if ((skp_pp>ec) & (final_analysis==0)) { stop_enrollment=1; eff[1]=1; }
if ((skp_pp>ec) & (final_analysis==1)) { eff[2]=1; }
}
## write out final results data from study;
if (stop_enrollment==1 & final_analysis==1)
{
betaHat = solve(t(xDat)%*%xDat)%*%t(xDat)%*%yDat
#betaHat = ginv(t(xDat)%*%xDat)%*%t(xDat)%*%yDat
muHat = c(betaHat[1],sum(betaHat));
results[sim,1:5] = skeptical_results;
results[sim,6:10] = enthusiastic_results;
results[sim,11:24] = c(analysis,nInt,nOngoing,n,ess,betaHat,fut,eff,true_sd,true_mn,a0)
break;
}
}
}
end_time <- Sys.time()
end_time - start_time
cres = c("tau","mu1","mu0","diff","pp");
colnames(results) <- c(paste("skp",cres,sep="_"),paste("ent",cres,sep="_"),c("analysis","nInt","nOngoing","nFin","essFin","y1Fin","y2Fin","fut","effInt","effFin","true_sd","true_mu1","true_mu0","a0"));
#head(results);
results2 = matrix(colMeans(results),nrow=1);
colnames(results2) <- c(paste("skp",cres,sep="_"),paste("ent",cres,sep="_"),c("analysis","nInt","nOngoing","nFin","essFin","y1Fin","y2Fin","fut","effInt","effFin","true_sd","true_mu1","true_mu0","a0"))
#head(results2)
write.csv(results2, file = "C:/D/R-package/R-results.csv")
write.csv(results2, file = "/pine/scr/j/i/jiawei/Rpackage/R-results.csv")
|
9e9c01a4794f58c62981fdfb1e14ffd49e9d443f
|
44ea20642e56ff6cc836029bcda5a29390335b30
|
/man/d.binormal.Rd
|
e5f0224f8974269db4e51f68cf5095cd9e3997af
|
[] |
no_license
|
cran/idr
|
e8906789b0be3ba0663d46da33f36ea46c2cfd96
|
4fa51a408935584f97292a091cf32c6e307d9cc6
|
refs/heads/master
| 2022-07-18T03:32:18.278592
| 2022-06-21T06:30:07
| 2022-06-21T06:30:07
| 17,696,749
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,544
|
rd
|
d.binormal.Rd
|
\name{d.binormal}
\alias{d.binormal}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Log density of bivariate Gaussian distribution with symmetric marginals
}
\description{
Compute the log-density for parameterized bivariate Gaussian
distribution N(mu, mu, sigma, sigma, rho).
}
\usage{
d.binormal(z.1, z.2, mu, sigma, rho)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{z.1}{ a numerical data vector on coordinate 1. }
\item{z.2}{ a numerical data vector on coordinate 1. }
\item{mu}{ mean }
\item{sigma}{ standard deviation }
\item{rho}{ correlation coefficient }
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
Log density of bivariate Gaussian distribution N(mu, mu, sigma, sigma, rho).
}
\references{
Q. Li, J. B. Brown, H. Huang and P. J. Bickel. (2011)
Measuring reproducibility of high-throughput experiments. Annals of Applied Statistics, Vol. 5, No. 3, 1752-1779.
}
\author{
Qunhua Li
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%}
\examples{
z.1 <- rnorm(500, 3, 1)
rho <- 0.8
## The component with higher values is correlated with correlation coefficient=0.8
z.2 <- rnorm(500, 3 + 0.8*(z.1-3), (1-rho^2))
mu <- 3
sigma <- 1
den.z <- d.binormal(z.1, z.2, mu, sigma, rho)
den.z
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{internal}
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
6f3cc6c7f66f38c99d47490fb6afe672fa77ae7c
|
bf378a66012b6470250c2c9b1e8aa9fd33c67da9
|
/R20190807_17.R
|
062fd6d642c71214bfd23a72a6500082285a2c49
|
[] |
no_license
|
meenzoon/R-programming
|
baa30902c9ca232b00f62c988c13e59de97109b8
|
9065fb9f7168b5487dc314a1a82c4456fd981ee2
|
refs/heads/master
| 2022-01-21T02:11:42.853224
| 2019-08-12T08:51:24
| 2019-08-12T08:51:24
| 198,178,623
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,725
|
r
|
R20190807_17.R
|
# 17일차 수업 - 20190807(수)
library(dplyr)
library(ggplot2)
library(readxl)
##########
### 11번째 프로젝트 - 지역별 연령대 비율
# < 1단계 > 변수 검토 및 전처리(지역, 연령대)
# 1-1. 지역 변수 확인, code_region 변수를 통해 region 파생변수를 생성
# code_region - 1:서울, 2:수도권(인천/경기), 3:부산/경남/울산, 4:대구/경북, 5:대전/충남, 6:강원/충북, 7:광주/전남/전북/제주도
class(welfare$code_region)
table(welfare$code_region)
# 지역번호에 해당한 지역명을 가진 데이터프레임을 생성
list_region <- data.frame(code_region = c(1:7), region = c("서울", "수도권(인천/경기", "부산/경남/울산", "대구/경북", "대전/충남", "강원/충북", "광주/전남/전북/제주도"))
list_region
# welfare와 list_region을 가로 결합하여 region 파생변수 생성
welfare <- left_join(welfare, list_region, id = "code_region")
table(welfare$region)
table(is.na(welfare$region)) # FALSE: 14923, 결측치 데이터는 없음
# 1-2. 연령대 변수 확인 - 3번째 프로젝트에서 이미 확인
# < 2단계 > 분석표(통계요약표)
# 2. 지역별로 그룹하여 연령대 별로 비율을 확인
region_ageg <- welfare %>%
filter(!is.na(region) & !is.na(ageg)) %>%
group_by(region, ageg) %>%
summarise(count = n()) %>%
mutate(tot = sum(count)) %>%
mutate(ratio = round(count / tot * 100, 2))
View(region_ageg)
# < 3단계 > 시각화 - 막대 그래프
# 세로 막대 그래프
ggplot(data = region_ageg, aes(x = region, y = ratio, fill = ageg)) + geom_col()
# 가로 막대 그래프
ggplot(data = region_ageg, aes(x = region, y = ratio, fill = ageg)) + geom_col() + coord_flip()
##########
### 12-1번째 프로젝트 - 지역별 연령대 중에서 노년층의 비율
# < 2단계 > 분석표
# region_ageg에서 노년층만 추출한 데이터프레임 생성, 내림차순으로 정렬
region_old <- region_ageg %>%
filter(ageg == "old") %>%
arrange(-ratio)
region_old
# < 3단계 > 시각화
# 가로 막대 그래프
ggplot(data = region_old, aes(x = reorder(region, ratio), y = ratio)) + geom_col() + coord_flip()
# < 4단계 > 분석 결과
# 분석 결과: "대구/경북" 지역의 노년층이 49.3퍼센트로 가장 높고, 그 다음으로는 "강원/충북", "광주/전남/전북/제주도", "부산/경남/울산", "대전/충남", "서울", "수도권(인천/경기)" 순으로 낮을 결과를 나타냄을 알 수 있다. 노년층이 가장 많은 "대구/경북"은 49.3퍼센트이고, 노년층이 가장 적은 "수도권(인천/경기)"는 31.8퍼센트로 17.5퍼센트의 차이를 나타냄을 알 수 있다.
##########
##########
##########
# < 워드 클라우드 프로젝트1 >
# KoNLP(Korea Natural Language Processing) 패키지 - 한글 자연어 분석 패키지
# KoNLP 패키지는 JAVA 언어로 생성 - jdk 설치
# JAVA 환경변수 설정
Sys.setenv(JAVA_HOME = "C:/Program Files/Java/jdk1.8.0_221/")
library(rJava)
library(memoise)
library(KoNLP)
library(stringr)
# 단어 사전 확인 및 설정 (3가지 사전 중 하나를 선택)
# useSystemDic() # 시스템 사전, 28만 단어
# useSejongDic() # 세종 사전, 37만 단어
useNIADic() # NIA 사전, 98만 단어, 선택한 사전
# 워드 클라우드로 분석할 텍스트를 가져옴.
hiphop <- readLines("c:/study/data1/hiphop.txt")
View(hiphop)
class(hiphop) # character
# 특수문자 제거
hiphop <- str_replace_all(hiphop, "\\w", " ") # 모든 특수문자를 공백으로 전환
# 명사 추출
noun <- extractNoun(hiphop)
View(noun)
|
efdacd744e3c5b135fef7c9fbd2cc0889bfc8d49
|
6cbc6e80ae07b8fb1fff0a5cad4ddcd29c358c0a
|
/man/ezr.tbl_to_image.Rd
|
64772c9b0f7dce5449b661b8612ee11d27f19c2c
|
[] |
no_license
|
lenamax2355/easyr
|
d99638b84fd9768774fa7ede84d257b10e0bacf6
|
37ab2fe5c28e83b9b5b3c0e3002f2df45708016b
|
refs/heads/master
| 2022-01-09T20:43:17.801623
| 2019-05-13T02:49:48
| 2019-05-13T02:49:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 830
|
rd
|
ezr.tbl_to_image.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ezr_image_to_table.R
\name{ezr.tbl_to_image}
\alias{ezr.tbl_to_image}
\title{Table to Image}
\usage{
ezr.tbl_to_image(dataset, n_records = 10, only_columns = NULL,
exclude_columns = NULL, theme = NULL)
}
\arguments{
\item{dataset}{Dataframe to make image of.}
\item{n_records}{Only use n records such as when printing a few rows from dataframe. Default is 10.}
\item{only_columns}{Default is FALSE. Only use some columns.}
\item{exclude_columns}{Default is FALSE. Exclude some columns.}
\item{theme}{Theme. See ggpubr::ggtexttable for more. Default is 'mBlack'. 'classic' is other good option.}
}
\value{
Returns a tableGrob which can be plotted with grid::grid.table(). Auto generates image.
}
\description{
Converts a table to an image.
}
|
dc169062f9db44b23ab7cbc081780e0d4fce9945
|
1be7bdfb44a4b4ae98cdc48046042f0739fefde1
|
/LSKAT/R/longskat-plink.r
|
57e2ab0e1b38b9069c62eaa8f73c23209c57f133
|
[] |
no_license
|
wzhy2000/LSKAT
|
c521ebe4247fb592a2bec3d2110a41a523e5ca2c
|
d1dee4cc874195eaaab854c9f5f812a4f17ad27b
|
refs/heads/master
| 2020-04-15T10:53:49.427329
| 2018-06-22T20:21:00
| 2018-06-22T20:21:00
| 52,325,786
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,814
|
r
|
longskat-plink.r
|
setRefClass("PLINK.refer",
fields = list(
options = "list",
snp = "list",
gen.list = "list",
ind.list = "list"),
methods = list(
show = function()
{
cat("Reference PLINK class", classLabel(class(.self)), "\n")
cat("PLINK BED=", options$file.plink.bed,"\n")
cat("PLINK BIM=", options$file.plink.bim,"\n")
cat("PLINK FAM=", options$file.plink.fam,"\n")
cat("PLINK Path=", options$plink.path,"\n")
cat("Individual=", NROW( snp$fam ),"\n")
cat("Gene count=", gen.list$len,"\n")
cat("SNP count=", NROW( snp$bim),"\n")
}
)
);
snp_impute<-function(snp.mat, impute="mean")
{
snp.imp <- snp.mat;
for(i in 1:NCOL(snp.mat) )
{
s.mat.i <- snp.mat[,i] ;
s.miss <- which( is.na(s.mat.i) );
if (length(s.miss)>0)
{
if(impute=="mean")
{
s.mat.i[s.miss] <- mean(s.mat.i, na.rm=T);
}
else
{
n.s0 <- length( which( s.mat.i == 0 ) );
n.s1 <- length( which( s.mat.i == 1 ) );
n.s2 <- length( which( s.mat.i == 2 ) );
n.s <- length(s.mat.i)
r.miss<- runif( length(s.miss) );
r.snp <- rep(2, length(s.miss));
r.snp[r.miss <= n.s0/n.s ]<-0;
r.snp[r.miss <= (n.s0 + n.s1)/n.s ]<-1;
s.mat.i[s.miss] <- r.snp;
}
}
if (mean(s.mat.i)/2>0.5) s.mat.i <- 2 - s.mat.i;
snp.imp[,i] <- s.mat.i;
}
return(snp.imp);
}
colSds<-function(mat, na.rm=T)
{
r<-c();
for(i in 1:dim(mat)[2])
r <- c(r, sd(mat[,i], na.rm=na.rm));
return(r);
}
load_gene_plink <- function(file.plink.bed, file.plink.bim, file.plink.fam, individuals, snps, plink)
{
if( !is.null(plink) && !is.na(plink) )
{
tmp <- tempfile(pattern = "LSKAT.temp.");
tmp.file.ind <- paste(tmp, "ind", sep=".");
tmp.file.snp <- paste(tmp, "snp", sep=".");
tmp.all <- paste(tmp, "*", sep=".");
tb.fam <- read.table(file.plink.fam, header=F);
idx.indi <- match(individuals, tb.fam[,2]);
write.table( tb.fam[idx.indi, c(1,2)], file=tmp.file.ind, col.names=F, row.names=F, quote=F);
write.table( snps, file=tmp.file.snp, col.names=F, row.names=F, quote=F);
plink.cmd <- paste(plink, "--bed", file.plink.bed, "--fam", file.plink.fam, "--bim", file.plink.bim, "--extract", tmp.file.snp, "--keep", tmp.file.ind, "--out", tmp, "--make-bed");
plink.status <- system(plink.cmd, wait=T, intern=T);
snp.mat <- read.plink( paste( tmp, "bed", sep="."), paste(tmp, "bim", sep="."), paste(tmp, "fam", sep=".") );
unlink(c(tmp.all, tmp.file.ind, tmp.file.snp));
}
else
{
snp.mat <- read.plink( file.plink.bed, file.plink.bim, file.plink.fam);
idx.fam <- match( individuals, snp.mat$fam$member );
snp.mat$genotypes<- snp.mat$genotypes[idx.fam,]
snp.mat$fam <- snp.mat$fam[idx.fam,]
}
return(snp.mat);
}
get_gen_group<-function(gen.list, idx)
{
gen.name <- gen.list$genes[idx];
snp.idx <- which(gen.list$snps[,1]==gen.name);
return(list(name=gen.name, snps=gen.list$snps[snp.idx,2]))
}
get_gen_family<-function(gen.lib, gen.name)
{
snp.idx <- which(gen.lib$snps[,1]==gen.name);
if (length(snp.idx)==0)
return(NULL)
else
return(list(name=gen.name, snps=gen.lib$snps[snp.idx,2]));
}
get_gen_individuals<-function(PF.gen)
{
return( as.character(PF.gen$ind.list$member[,2]) );
}
sync_gen_individuals<-function(PF.gen, ids.set)
{
cat("* PLINK (", NROW(PF.gen$ind.list$member) - length(ids.set), ") individuals are removed.\n");
idx.fam <- match( ids.set, PF.gen$ind.list$member[,2] );
if(!is.null(PF.gen$snp$matrix))
{
PF.gen$snp$matrix$genotypes<- PF.gen$snp$matrix$genotypes[idx.fam,]
PF.gen$snp$matrix$fam <- PF.gen$snp$matrix$fam[idx.fam,]
PF.gen$ind.list$removed <- setdiff(PF.gen$ind.list$member[,2], PF.gen$snp$matrix$fam );
PF.gen$ind.list$member <- PF.gen$ind.list$member[idx.fam, ];
}
else
{
PF.gen$ind.list$removed <- setdiff(PF.gen$ind.list$member[,2], ids.set );
PF.gen$ind.list$member <- PF.gen$ind.list$member[idx.fam, ];
}
}
get_gen_mat<-function( PF.gen, idx, impute="mean" )
{
get_plink_mat<-function(plink, snps, impute)
{
snp.idx <- match(as.character(snps), as.character(plink$map[,2]));
if (length(which(is.na(snp.idx)))>0)
snp.idx <- snp.idx[-which(is.na(snp.idx))];
if(length(snp.idx)==0) return(NULL);
map <- plink$map[snp.idx, ,drop=F];
plink.org <- as( plink$genotypes[, snp.idx, drop=F ], "numeric");
nmiss <- apply(plink.org, 2, function(snp){sum(is.na(snp))});
snp.imp <- snp_impute(plink.org , impute=impute)
rownames(snp.imp) <- as.character(plink$fam$member);
return(list(maf=colMeans(snp.imp)/2, snp=snp.imp, nmiss=nmiss, info=map[,c(2,1,4)]) );
}
gen.name <- PF.gen$gen.list$names[idx];
snps_finding <- unique(PF.gen$gen.list$snps[which(PF.gen$gen.list$snps[,1] == gen.name), 2] );
snp.mat <- NULL;
if(!is.null(PF.gen$snp$matrix))
{
snp.mat <- get_plink_mat(PF.gen$snp$matrix, snps_finding, impute)
}
if(is.null(snp.mat))
{
idx.range <- c(idx-50, idx+50);
if (idx.range[1]<1) idx.range[1] <- 1
if (idx.range[2]>PF.gen$gen.list$len) idx.range[2] <- PF.gen$gen.list$len;
gen.names <- PF.gen$gen.list$names[idx.range[1]:idx.range[2]];
snps <- PF.gen$gen.list$snps[which(PF.gen$gen.list$snps[,1] %in% gen.names), 2]
PF.gen$snp$matrix <- load_gene_plink( PF.gen$options$file.plink.bed,
PF.gen$options$file.plink.bim,
PF.gen$options$file.plink.fam,
PF.gen$ind.list$member[,2],
unique(snps),
PF.gen$options$plink );
snp.mat <- get_plink_mat(PF.gen$snp$matrix, snps_finding, impute)
}
if(!is.null(snp.mat))
snp.mat$name <- gen.name;
return(snp.mat);
}
get_snp_mat <- function(PF.gen, idx, impute="mean" )
{
get_plink_snp<-function(plink, snp.name, impute)
{
snp.idx <- match(as.character(snp.name), as.character(plink$map[,2]));
if (length(which(is.na(snp.idx)))>0)
snp.name <- snp.name[-which(is.na(snp.idx))];
if(length(snp.name)==0) return(NULL);
plink.org <- as( plink$genotypes[, snp.idx, drop=F ], "numeric");
nmiss <- apply(plink.org, 2, function(snp){sum(is.na(snp))});
snp.imp <- snp_impute(plink.org , impute=impute)
map <- plink$map[snp.idx, ,drop=F];
gene.name <- "";
if (!is.null(PF.gen$gen.list$snps))
{
gen.idx <- match( snp.name, PF.gen$gen.list$snps[,2])
if (length(gen.idx)>0)
gene.name <- PF.gen$gen.list$snps[gen.idx[1],1];
}
return(list(snp=snp.imp,
name=snp.name, chr=map[1], loc=map[4], gene=gene.name,
maf=colMeans(snp.imp)/2, nmiss=nmiss, info=map[,c(2,1,4)]) );
}
snp.name <- PF.gen$snp$bim[idx,2];
snp.mat <- NULL;
if(!is.null(PF.gen$snp$matrix) )
{
if( !is.na(match(snp.name, PF.gen$snp$matrix$map$snp.name ) ) )
snp.mat <- get_plink_snp(PF.gen$snp$matrix, snp.name, impute)
}
if(is.null(snp.mat))
{
idx.range <- c(idx - 5000, idx + 5000);
if (idx.range[1] < 1 ) idx.range[1] <- 1
if (idx.range[2] > NROW(PF.gen$snp$bim)) idx.range[2] <- NROW(PF.gen$snp$bim);
snp.names <- PF.gen$snp$bim[idx.range, 2];
PF.gen$snp$matrix <- load_gene_plink( PF.gen$options$file.plink.bed,
PF.gen$options$file.plink.bim,
PF.gen$options$file.plink.fam,
PF.gen$ind.list$member[,2],
snp.names,
PF.gen$options$plink );
snp.mat <- get_plink_snp(PF.gen$snp$matrix, snp.name, impute)
}
return( snp.mat)
}
read_gen_plink<-function( file.plink.bed, file.plink.bim, file.plink.fam, file.gene.set, plink.path)
{
gen.list <- list();
if(!is.null(file.gene.set))
{
tb.gen <- read.table(file.gene.set, header=F, stringsAsFactors=F);
gen.names <- unique(tb.gen[,1]);
gen.list <- list( len=NROW(gen.names), names=gen.names, snps=tb.gen);
}
tb.fam <- read.table(file.plink.fam, header=F, stringsAsFactors=F);
ind.list <- list( member=tb.fam[,c(1,2)], removed=c() )
tb.bim <- read.table(file.plink.bim, header=F, stringsAsFactors=F);
#n.snp <- get_large_file_lines( file.plink.bim);
snp <- list()
snp$fam <- as.data.frame(tb.fam);
snp$bim <- as.data.frame(tb.bim);
n.snp <- NROW(snp$bim);
options <- list( plink.path=plink.path, file.plink.bed=file.plink.bed, file.plink.bim=file.plink.bim, file.plink.fam=file.plink.fam );
if( n.snp * 1.0 * NROW(tb.fam) < 50*1000*2000 )
{
snp$matrix <- snpStats::read.plink( file.plink.bed, file.plink.bim, file.plink.fam );
}
else
{
snp$matrix <- NULL;
}
PLINK.refer <- getRefClass("PLINK.refer");
PF.gen <- PLINK.refer(gen.list=gen.list, ind.list=ind.list, snp=snp, options=options);
return(PF.gen);
}
clone_plink_refer<-function(PF.gen)
{
PLINK.refer <- getRefClass("PLINK.refer");
PF.gen.clone <- PLINK.refer(
gen.list=PF.gen$gen.list,
ind.list=PF.gen$ind.list,
snp=list(fam=PF.gen$snp$fam, bim=PF.gen$snp$bim),
options=PF.gen$options);
return(PF.gen.clone);
}
#TO REMOVE
read_gen_dataset<-function( file.set, file.bim )
{
# V2: snp
tb.bim <- read.table(file.bim);
# V2: snp
tb.gen <- read.table(file.set, sep=" ", header=F);
idx.tb <- match( as.character(tb.gen$V2), as.character(tb.bim$V2) )
idx.gen <- c(1:NROW(tb.gen)) [ !is.na(idx.tb) ]
genes <- unique(tb.gen[idx.gen,1]);
return(list(len=length(genes), genes=genes, snps=tb.gen[idx.gen,]));
}
#TO REMOVE
read_gen_phe_cov<-function(file.plink.bed, file.plink.bim, file.plink.fam, file.phe.long, file.phe.time, file.phe.cov)
{
phe.long <- read.csv(file.phe.long, header=T, stringsAsFactors=F, row.names=1);
idx.na <- which( rowSums(is.na(phe.long)) == NCOL(phe.long) );
if( length(idx.na)>0) phe.long <- phe.long[ -idx.na, ];
phe.time <- NULL;
if (!is.null(file.phe.time))
{
phe.time <- read.csv(file.phe.time, header=T, stringsAsFactors=F, row.names=1);
idx.na <- which( rowSums( is.na(phe.time))==NCOL(phe.time) );
if( length(idx.na)>0) phe.time <- phe.time[ -idx.na, ];
}
phe.cov <- read.csv(file.phe.cov, header=T, stringsAsFactors=F, row.names=1);
tb.fam <- read.table(file.plink.fam, header=F);
ids.fam <- as.character(tb.fam[,2]);
ids.phe <- intersect(rownames(phe.long), rownames(phe.cov) );
if(!is.null(phe.time))
ids.phe <- intersect(ids.phe, rownames(phe.time) );
ids.set <- intersect(ids.phe, ids.fam);
cat(" COMMON Individuals=", length(ids.set), "\n");
#eg. c(10:1)[match(c(4, 6,8,2,3), c(10:1))]
idx.long <- match( ids.set, rownames(phe.long) );
phe.long <- phe.long[idx.long, ];
idx.cov <- match( ids.set, rownames(phe.cov) );
phe.cov <- phe.cov[idx.cov, ];
if(!is.null(phe.time))
{
idx.time <- match( ids.set, rownames(phe.time) );
phe.time <- phe.time[idx.time, ];
}
if (!all(ids.set==ids.fam) )
{
idx.fam <- idx.fam[ match( ids.set, ids.fam ) ];
cat("* PLINK (", length(ids.fam) - length(ids.set), ") individuals are removed.\n");
}
if( !is.null(phe.time) && !all( rownames(phe.long) == rownames(phe.time) ) )
stop("! ID MATCH ERROR between PHE.LONG and PHE.TIME. \n");
if (!( all( rownames(phe.long)==rownames(phe.cov)) && all( rownames(phe.long)==ids.fam) ) )
stop("! ID MATCH ERROR among 3 files( PHE.LONG, PHE.COV, PLINK.FAM). \n");
return(list( phe.long=phe.long, phe.time=phe.time, phe.cov = phe.cov, member=idx.fam));
}
#TO REMOVE
shrink_snpmat<-function(snp.mat, gen.list, gene.range )
{
snp.mat0 <- snp.mat;
snp.idx <- which(!is.na(match(gen.list$snps[,1], gen.list$genes[gene.range])))
snp.name <- unique( gen.list$snps[snp.idx,2] );
snp.idx0 <- match( as.character(snp.name), as.character(snp.mat$map[,2]));
if (length(which(is.na(snp.idx0)))>0)
snp.idx0 <- snp.idx0[-which(is.na(snp.idx0))];
if(length(snp.idx0)==0) return(NULL);
snp.mat0$genotypes <- snp.mat$genotypes[, snp.idx0, drop=F ];
snp.mat0$map <- snp.mat$map[snp.idx0,];
return( snp.mat0 );
}
#public
longskat_plink_load <- function( file.plink.bed, file.plink.bim, file.plink.fam, file.gene.set, plink.path=NULL, verbose=FALSE)
{
chk.plink <- check_plink_file( file.plink.bed, file.plink.bim, file.plink.fam )
if ( !chk.plink$bSuccess )
stop("PLINK file can not be loaded by the snpStats package.")
cat( "Starting to load all data files......\n");
PF.gen <- read_gen_plink ( file.plink.bed, file.plink.bim, file.plink.fam, file.gene.set, plink.path );
return(PF.gen);
}
#public
longskat_get_gene <- function( gen.obj, gene.set, snp.impute="mean", verbose = FALSE )
{
gene.name <- list();
snp.list <- list();
nmiss <- list();
maf <- list();
for(i in 1:length(gene.set))
{
gen <- try( get_gen_mat( gen.obj, gene.set[i], snp.impute ) );
if( is.null(gen) || class(gen)=="try-error" || length(gen$maf)==0 )
{
if (verbose) cat("! No SNPS for Gene[", i, "]=", i, "\n");
snp.list[[i]] <- NA;
maf[[i]] <- NA;
nmiss[[i]] <- NA;
gene.name[[i]] <- NA;
}
else
{
if (verbose) cat(" Finding", NCOL(gen$snp), "SNPs...\n");
snp.list[[i]] <- gen$snp;
maf[[i]] <- gen$maf;
nmiss[[i]] <- gen$nmiss;
gene.name[[i]] <- gen$name;
}
}
return(list(snp.mat=snp.list, maf=maf, nmiss=nmiss, gene.name=gene.name ));
}
|
c9cbfd2830bc769de7c79b83e4e024ed95d607ff
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/WaveletComp/examples/USelection2016.Instagram.Rd.R
|
499329e2631d4488e45e98b1250895b30fc064d3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 896
|
r
|
USelection2016.Instagram.Rd.R
|
library(WaveletComp)
### Name: USelection2016.Instagram
### Title: Hourly time series of the number of candidate-related media
### posted on Instagram during the week before the 2016 US presidential
### election
### Aliases: USelection2016.Instagram
### Keywords: datasets
### ** Examples
data(USelection2016.Instagram)
attach(USelection2016.Instagram)
my.date <- as.POSIXct(date, format = "%F %T", tz = "EST5EDT")
plot(my.date, trump.pos, type = "l", col = 1, lwd = 2,
ylab = "number of media posted on Instagram", ylim = c(0,6e+6),
xlab = "the week before the Election Day (Tuesday, 2016-11-08)")
lines(my.date, clinton.pos, col = 2, lwd = 2)
lines(my.date, trump.neg, col = 3, lwd = 2)
lines(my.date, clinton.neg, col = 4, lwd = 2)
legend("topleft", legend=names(USelection2016.Instagram[-1]),
lty = 1, lwd = 2, col = 1:4)
detach(USelection2016.Instagram)
|
90d795c45c51e4b9c191f81b878da12593e7091c
|
de0935ade1f6cfece090e7ff7057692e1404c24f
|
/Scripts/ss_initial_scrub.R
|
62f3beea7f5ca80ccc82a07c7c19aa673e032612
|
[] |
no_license
|
sofisinozich/SURV622_Assignment-2
|
501b09d79eadfd14392b337332e8f42f05de346d
|
51f821f13c67589f1333f23fa46ad35adcc96367
|
refs/heads/master
| 2022-04-11T12:07:04.570317
| 2020-03-23T19:25:40
| 2020-03-23T19:25:40
| 244,040,529
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 370
|
r
|
ss_initial_scrub.R
|
library(rtweet)
library(qdap)
library(tidyverse)
library(magrittr)
oneday_tweets <- parse_stream("Data/ss_streamed_tweets.json")
oneday_tweets %>% select(text)
oneday_scrubbed <- oneday_tweets$text %>% scrubber() %sw% qdapDictionaries::Top200Words
link_regex <- "https : / / t. co / [a-z0-9]{10}"
oneday_scrubbed %<>% gsub(link_regex,"",.)
freq_terms(oneday_scrubbed)
|
846f9a7ac4c081e0e8e99ca2593120b1a6cfd245
|
dce4f2712b1cb826893c77c47d951a25d763630d
|
/man/Rect.Rd
|
ceb3de8f3bc7670109587c1e6b6015adfccde66e
|
[] |
no_license
|
Displayr/flipPictographs
|
dacfad5c35df6bb8dfad1deb397052616a461178
|
b416b465b3bd603cf85bdbe11995eed21ae59fc5
|
refs/heads/master
| 2023-05-25T05:44:35.410505
| 2023-05-22T09:13:03
| 2023-05-22T09:13:03
| 60,046,387
| 0
| 1
| null | 2023-08-17T05:55:04
| 2016-05-31T00:25:48
|
R
|
UTF-8
|
R
| false
| true
| 566
|
rd
|
Rect.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rect.R
\name{Rect}
\alias{Rect}
\title{Rect}
\usage{
Rect(color = "red", opacity = 0.9, print.config = FALSE)
}
\arguments{
\item{color}{One of 'red', 'green' or 'yellow' or a hex color code.}
\item{opacity}{A numeric value between 0 and 1.}
\item{print.config}{If set to \code{TRUE}, the JSON string used to generate pictograph will be printed to standard output. This is useful for debugging.}
}
\description{
Draws a rectangle
}
\examples{
Rect("red")
Rect("#000000", opacity=0.2)
}
|
e40b107c8170269331f3a7d0d9efd2ef6256f6bf
|
c981caf103a3540f7964e6c41a56ca34d67732c4
|
/R/lm_cluster_compute_vcov.R
|
ababdec8e6b042c5b8cb2f64f36e5e74c9518b34
|
[] |
no_license
|
alexanderrobitzsch/miceadds
|
8285b8c98c2563c2c04209d74af6432ce94340ee
|
faab4efffa36230335bfb1603078da2253d29566
|
refs/heads/master
| 2023-03-07T02:53:26.480028
| 2023-03-01T16:26:31
| 2023-03-01T16:26:31
| 95,305,394
| 17
| 2
| null | 2018-05-31T11:41:51
| 2017-06-24T15:16:57
|
R
|
UTF-8
|
R
| false
| false
| 383
|
r
|
lm_cluster_compute_vcov.R
|
## File Name: lm_cluster_compute_vcov.R
## File Version: 0.01
lm_cluster_compute_vcov <- function(mod, cluster, data)
{
require_namespace("sandwich")
if ( length(cluster) > 1 ){
v1 <- cluster
} else {
v1 <- data[,cluster, drop=TRUE]
}
dfr <- data.frame( cluster=v1 )
vcov2 <- sandwich::vcovCL( x=mod, cluster=dfr$cluster)
return(vcov2)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.