blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ec0df17284d99dd0031b835b6bbe339d46f78d40
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/fam2r/examples/LRparamlink.Rd.R
|
13ba317ada74ad866103db54abc75a1017004a1b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 356
|
r
|
LRparamlink.Rd.R
|
library(fam2r)
### Name: LRparamlink
### Title: Calculates likelihoods and likelihood ratios using 'paramlink'
### Aliases: LRparamlink
### ** Examples
data(adoption)
x = Familias2linkdat(adoption$pedigrees, adoption$datamatrix, adoption$loci)
result = LRparamlink(x, ref=2)
# Only marker 11 and 33
result33 = LRparamlink(x, ref=2, marker=c(11,33))
|
e248bb1a6402a0091a9ce2f1c172577143cd90d5
|
5f16d226ba297a3a8886ead66ebf28bc74325e07
|
/letraANum.R
|
9d6811e0ce034be627e7f922b5411360574fe094
|
[
"CC0-1.0"
] |
permissive
|
datajules/UtilidadesR
|
1dc51470d95e9a8af72f0e0a702e7e498c8f0093
|
d5f5cfd587e6f20fd16089fe435ec32f9e72f9d6
|
refs/heads/main
| 2023-06-19T05:22:01.740843
| 2021-07-13T17:57:09
| 2021-07-13T17:57:09
| 385,690,783
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 799
|
r
|
letraANum.R
|
# Función para convertir una letra a número para interpretar de mejor manera
# las columnas de exce.
library(tidyverse)
letranum <- function(letra){
x <- function(letra){
letra = str_to_upper(letra)
salida <- switch(letra,
'A' = 1,
'B' = 2,
'C' = 3,
'D' = 4,
'E' = 5,
'F' = 6,
'G' = 7,
'H' = 8,
'I' = 9,
'J' = 10,
'K' = 11,
'L' = 12,
'M' = 13,
'N' = 14,
'O' = 15,
'P' = 16,
'Q' = 17,
'R' = 18,
'S' = 19,
'T' = 20,
'U' = 21,
'V' = 22,
'W' = 23,
'X' = 24,
'Y' = 25,
'Z' = 26
)
return(salida)
}
salida=0
j=nchar(letra)
for (i in 1:nchar(letra)) {
if (i == nchar(letra)) {
salida = salida + x(substring(letra,i,i))
} else {
salida = x(substring(letra,i,i)) * ((j-1)*26) + salida
}
j=j-1
}
return(salida)
}
letranum("AB")
|
af39d0c8ef492b74feb1e2feda4a70a07a151ca1
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/YPBP/R/ypbp.R
|
33af657f471fc5589763fe2af5acf6de0fa59013
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,355
|
r
|
ypbp.R
|
#---------------------------------------------
ypbp.mle <- function(status, Z, degree, tau, g, G,
baseline=c("hazard", "odds"), hessian, ...) {
n <- nrow(Z)
q <- ncol(Z)
baseline <- match.arg(baseline)
if(baseline=="hazard"){
M <- 1
}else{
M <- 2
}
hyper_parms = list(h1_gamma=0, h2_gamma=4,
mu_psi=0, sigma_psi=4,
mu_phi=0, sigma_phi=4)
stan_data <- list(status=status, Z=Z, q=q, n=n,
m=degree, M=M, approach=0, tau=tau,
g=g, G=G,
h1_gamma=hyper_parms$h1_gamma,
mu_psi=hyper_parms$mu_psi,
mu_phi=hyper_parms$mu_phi,
h2_gamma=hyper_parms$h2_gamma,
sigma_psi=hyper_parms$sigma_psi,
sigma_phi=hyper_parms$sigma_phi)
fit <- rstan::optimizing(stanmodels$ypbp, data=stan_data,
hessian=hessian, ...)
fit$par <- fit$par[-grep("loglik", names(fit$par))]
#fit$par <- fit$par[-grep("log_gamma", names(fit$par))]
fit$theta_tilde <- fit$theta_tilde[-grep("loglik", names(fit$theta_tilde))]
return(fit)
}
#---------------------------------------------
ypbp.bayes <- function(status, Z, degree, tau, g, G,
baseline=c("hazard", "odds"), hyper_parms, ...) {
n <- nrow(Z)
q <- ncol(Z)
baseline <- match.arg(baseline)
if(baseline=="hazard"){
M <- 1
}else{
M <- 2
}
stan_data <- list(status=status, Z=Z, q=q, n=n,
m=degree, M=M, approach=1, tau=tau,
g=g, G=G,
h1_gamma=hyper_parms$h1_gamma,
mu_psi=hyper_parms$mu_psi,
mu_phi=hyper_parms$mu_phi,
h2_gamma=hyper_parms$h2_gamma,
sigma_psi=hyper_parms$sigma_psi,
sigma_phi=hyper_parms$sigma_phi)
pars <- c("psi", "phi", "gamma", "loglik")
fit <- rstan::sampling(stanmodels$ypbp, data=stan_data, pars=pars, ...)
return(fit)
}
#---------------------------------------------
ypbp2.mle <- function(status, Z, X, degree, tau, g, G,
baseline=c("hazard", "odds"), hessian, ...) {
n <- nrow(Z)
q <- ncol(Z)
p <- ncol(X)
baseline <- match.arg(baseline)
if(baseline=="hazard"){
M <- 3
}else{
M <- 4
}
hyper_parms = list(h1_gamma=0, h2_gamma=4,
mu_psi=0, sigma_psi=4,
mu_phi=0, sigma_phi=4,
mu_beta=0, sigma_beta=4)
stan_data <- list(status=status, Z=Z, X=X, q=q, p=p,
n=n, g=g, G=G,
m=degree, M=M, approach=0, tau=tau,
h1_gamma=hyper_parms$h1_gamma,
mu_psi=hyper_parms$mu_psi,
mu_phi=hyper_parms$mu_phi,
mu_beta=hyper_parms$mu_beta,
h2_gamma=hyper_parms$h2_gamma,
sigma_psi=hyper_parms$sigma_psi,
sigma_phi=hyper_parms$sigma_phi,
sigma_beta=hyper_parms$sigma_beta)
fit <- rstan::optimizing(stanmodels$ypbp2, data=stan_data,
hessian=hessian, ...)
fit$par <- fit$par[-grep("loglik", names(fit$par))]
fit$theta_tilde <- fit$theta_tilde[-grep("loglik", names(fit$theta_tilde))]
return(fit)
}
#---------------------------------------------
ypbp2.bayes <- function(status, Z, X, degree, tau,
g, G, baseline=c("hazard", "odds"),
hyper_parms, ...) {
n <- nrow(Z)
q <- ncol(Z)
p <- ncol(X)
baseline <- match.arg(baseline)
if(baseline=="hazard"){
M <- 3
}else{
M <- 4
}
stan_data <- list(status=status, Z=Z, X=X, q=q, p=p, n=n,
m=degree, M=M, approach=1, tau=tau, g=g, G=G,
h1_gamma=hyper_parms$h1_gamma,
mu_psi=hyper_parms$mu_psi,
mu_phi=hyper_parms$mu_phi,
mu_beta=hyper_parms$mu_beta,
h2_gamma=hyper_parms$h2_gamma,
sigma_psi=hyper_parms$sigma_psi,
sigma_phi=hyper_parms$sigma_phi,
sigma_beta=hyper_parms$sigma_beta)
fit <- rstan::sampling(stanmodels$ypbp2, data=stan_data, ...)
return(fit)
}
#---------------------------------------------
#' Fits the Yang and Prentice using Bernstein polynomials to model the baseline distribution.
#' @aliases{ypbp}
#' @export
#' @description Fits the Yang and Prentice model with either the baseline hazard hazard or the baseline odds modeled via Bernstein polynomials.
#' @param formula an object of class "formula" (or one that can be coerced to that class): a symbolic description of the model to be fitted.
#' @param data an optional data frame, list or environment (or object coercible by as.data.frame to a data frame) containing the variables in the model. If not found in data, the variables are taken from environment(formula), typically the environment from which ypbp is called.
#' @param degree number of intervals of the PE distribution. If NULL, default value (square root of n) is used.
#' @param tau the maximum time of follow-up. If NULL, tau = max(time), where time is the vector of observed survival times.
#' @param approach approach to be used to fit the model (mle: maximum likelihood; bayes: Bayesian approach).
#' @param baseline baseline function to be modeled.
#' @param hessian logical; If TRUE (default), the hessian matrix is returned when approach="mle".
#' @param hyper_parms a list containing the hyper-parameters of the prior distributions (when approach = "bayes"). If not specified, default values are used.
#' @param ... Arguments passed to either `rstan::optimizing` or `rstan::sampling` .
#' @return ypbp returns an object of class "ypbp" containing the fitted model.
#'
#' @examples
#' \donttest{
#' library(YPBP)
#' mle1 <- ypbp(Surv(time, status)~trt, data=gastric, baseline = "hazard")
#' mle2 <- ypbp(Surv(time, status)~trt, data=gastric, baseline = "odds")
#' bayes1 <- ypbp(Surv(time, status)~trt, data=gastric, baseline = "hazard",
#' approach = "bayes", chains = 2, iter = 500)
#' bayes2 <- ypbp(Surv(time, status)~trt, data=gastric, baseline = "odds",
#' approach = "bayes", chains = 2, iter = 500)
#' }
#'
#'
ypbp <- function(formula, data, degree=NULL, tau=NULL,
approach = c("mle", "bayes"), baseline=c("hazard", "odds"),
hessian=TRUE, hyper_parms = list(h1_gamma=0, h2_gamma=4,
mu_psi=0, sigma_psi=4,
mu_phi=0, sigma_phi=4,
mu_beta=0, sigma_beta=4), ...){
approach <- match.arg(approach)
baseline <- match.arg(baseline)
formula <- Formula::Formula(formula)
mf <- stats::model.frame(formula=formula, data=data)
Terms <- stats::terms(mf)
resp <- stats::model.response(mf)
time <- resp[,1]
status <- resp[,2]
Z <- stats::model.matrix(formula, data = mf, rhs = 1)
X <- suppressWarnings(try( stats::model.matrix(formula, data = mf, rhs = 2), TRUE))
labels <- colnames(Z)[-1]
labels.ph <- colnames(X)[-1]
Z <- matrix(Z[,-1], ncol=length(labels))
if(ncol(X)>0){
labels.ph <- colnames(X)[-1]
X <- matrix(X[,-1], ncol=length(labels.ph))
}
n <- nrow(Z)
q <- ncol(Z)
p <- ncol(X)
if(is.null(tau)){
tau <- max(time)
}
if(is.null(degree)){
degree <- ceiling(sqrt(length(time)))
}
bases <- bp(time, degree, tau)
g <- bases$b
G <- bases$B
if(approach=="mle"){
if(p==0){
fit <- ypbp.mle(status=status, Z=Z,
degree=degree, tau=tau, g=g, G=G,
baseline=baseline, hessian=hessian, ...)
}else{
fit <- ypbp2.mle(status=status, Z=Z, X=X,
degree=degree, tau=tau, g=g, G=G,
baseline=baseline, hessian=hessian, ...)
}
}else{
if(p==0){
fit <- ypbp.bayes(status=status, Z=Z,
degree=degree, tau=tau, g=g, G=G,
baseline=baseline, hyper_parms=hyper_parms, ...)
}else{
fit <- ypbp2.bayes(status=status, Z=Z, X=X,
degree=degree, tau=tau, g=g, G=G,
baseline=baseline, hyper_parms=hyper_parms, ...)
}
}
output <- list(fit=fit)
output$n <- n
output$q <- q
output$p <- p
output$degree <- degree
output$tau <- tau
output$call <- match.call()
output$formula <- formula
output$terms <- stats::terms.formula(formula)
output$mf <- mf
output$labels <- labels
output$approach <- approach
output$baseline <- baseline
if(p>0){
output$labels.ph <- labels.ph
}
class(output) <- "ypbp"
return(output)
}
#---------------------------------------------
ypbpBoot <- function(formula, data, degree=NULL, tau=NULL,
nboot = 4000, ...){
formula <- Formula::Formula(formula)
mf <- stats::model.frame(formula=formula, data=data)
Terms <- stats::terms(mf)
resp <- stats::model.response(mf)
time <- resp[,1]
status <- resp[,2]
Z <- stats::model.matrix(formula, data = mf, rhs = 1)
X <- suppressWarnings(try( stats::model.matrix(formula, data = mf, rhs = 2), TRUE))
labels <- colnames(Z)[-1]
labels.ph <- colnames(X)[-1]
Z <- matrix(Z[,-1], ncol=length(labels))
if(ncol(X)>0){
labels.ph <- colnames(X)[-1]
X <- matrix(X[,-1], ncol=length(labels.ph))
}
n <- nrow(Z)
q <- ncol(Z)
p <- ncol(X)
if(is.null(tau)){
tau <- max(time)
}
if(is.null(degree)){
degree <- ceiling(sqrt(length(time)))
}
index <- 1:n
index1 <- which(status==1)
index2 <- which(status==0)
n1 <- length(index1)
n2 <- length(index2)
par <- matrix(nrow=nboot, ncol=(2*q+p+degree))
for(step in 1:nboot){
samp1 <- sample(index1, size=n1, replace=TRUE)
samp2 <- sample(index2, size=n2, replace=TRUE)
samp <- c(samp1, samp2)
suppressWarnings({invisible(utils::capture.output(object <- ypbp(formula, data=data[samp,], degree=degree, tau=tau, hessian=FALSE, approach="mle", init=0)))})
if(class(object)!="try-error"){
par[step, ] <- object$fit$par
step <- step + 1
}
}
colnames(par) <- names(object$fit$par[-grep("log_", names(object$fit$par))])
return(par)
}
|
4f2327abf3dcd48cf0b509f2cc48eb04e9b6f46c
|
e7d40077078eae86b06770e95474d245b33472a1
|
/man/degMerge.Rd
|
f34d442312894866940b848e2e58035cdbf2f67f
|
[
"MIT"
] |
permissive
|
lpantano/DEGreport
|
1f90ac81886da7b96c024dfc8dbfe4831cf20469
|
0e961bfc129aab8b70e50892cb017f6668002e1a
|
refs/heads/main
| 2023-01-31T23:33:51.568775
| 2022-11-22T14:40:17
| 2022-11-22T14:40:17
| 17,710,312
| 20
| 14
|
MIT
| 2023-01-20T13:55:22
| 2014-03-13T13:06:49
|
R
|
UTF-8
|
R
| false
| true
| 1,453
|
rd
|
degMerge.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustering.R
\name{degMerge}
\alias{degMerge}
\title{Integrate data comming from degPattern into one data object}
\usage{
degMerge(
matrix_list,
cluster_list,
metadata_list,
summarize = "group",
time = "time",
col = "condition",
scale = TRUE,
mapping = NULL
)
}
\arguments{
\item{matrix_list}{list expression data for each element}
\item{cluster_list}{list df item from degPattern output}
\item{metadata_list}{list data.frames from each element
with design experiment. Normally \code{colData} output}
\item{summarize}{character column to use to group samples}
\item{time}{character column to use as x-axes in figures}
\item{col}{character column to color samples in figures}
\item{scale}{boolean scale by row expression matrix}
\item{mapping}{data.frame mapping table in case elements use
different ID in the row.names of expression matrix. For instance,
when integrating miRNA/mRNA.}
}
\value{
A data.frame with information on what genes are in each cluster in
all data set, and the correlation value for each pair cluster comparison.
}
\description{
The simplest case is if you want to convine the pattern profile
for gene expression data and proteomic data. It will use the first element
as the base for the integration. Then, it will loop through clusters
and run \link{degPatterns} in the second data set to detect patterns that match
this one.
}
|
cafa48b04ed276421a154b5e8c875e1660b80a47
|
2a165938c9e860f88d58d5281589757e735e7a7a
|
/plot5.R
|
4802bdd0bf87f8ce232a4cc7bee161e90e479abb
|
[] |
no_license
|
stevenzchen/ExploratoryPollutantGraphs
|
567f13b3608f6002a58d7bc3a341639af9c77862
|
d9e6092e4a6b17e31ff6bfccb1d4baf932fdf5ee
|
refs/heads/master
| 2021-01-12T04:54:51.479179
| 2017-01-03T03:57:15
| 2017-01-03T03:57:15
| 77,810,177
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 989
|
r
|
plot5.R
|
# Generate plot5: How have motor vehicle emissions from PM2.5 changed in Baltimore
# from 1999 to 2008?
# Answer: Slight downward trend with low confidence
# Steven Chen
library(dplyr)
library(ggplot2)
baltimoreCounty <- "24510"
emissions <- readRDS("summarySCC_PM25.rds")
sources <- readRDS("Source_Classification_Code.rds")
# find all sources that contain motor vehicle terms
sourceIndices <- grepl("Vehicle|Motor", sources$Short.Name)
df <- emissions %>%
filter(SCC %in% sources[sourceIndices, "SCC"]) %>%
filter(fips == baltimoreCounty) %>%
group_by(year) %>%
summarize(total = sum(Emissions))
# plot scatter plot and dotted linear regression line showing downward trend
ggplot(df, aes(x = year, y = total)) +
geom_point() +
geom_smooth(method = "lm") +
ggtitle("Total Emissions for Motor Vehicle Sources in Baltimore, MD") +
xlab("Year") +
ylab("Total Emissions (tons)")
ggsave(file = "plot5.png")
|
38824d70c6b229d5484cf84c05d1e9df0c9d583a
|
308d107fd0cfffb6f13b9101f77bb6ed2f3fe9ae
|
/03 - Population dynamics/00.3 - Functions_Elasticity_SLTRE.R
|
eefb5211c7674852fbadf7b1a4f5e2f6848c42aa
|
[] |
no_license
|
MarcoAndrello/Stoch_Demogr_Comp_Arabis
|
08a5a241c76550aed1e70fb2aecd2b56d4724fba
|
d327e434e3a7634f28f7efa4acc27de7e4f2f25d
|
refs/heads/master
| 2020-08-26T18:22:08.247883
| 2020-02-18T10:23:11
| 2020-02-18T10:23:11
| 217,101,255
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,342
|
r
|
00.3 - Functions_Elasticity_SLTRE.R
|
# Functions to calculate the deterministic intrinsic growth rate and deterministic elasticities to lower-level vital rates
# They areused in the SLTRE
# There is a Main version and a Seed bank version
# Main version
calc.elast.vital.rates <- function(surv, growth, F0, F1, F2){
# Number of stages
k <- length(surv)
# Defining matrices to construct the matrix model
survMat <- matrix(surv,k,k,byrow=T)
G <- matrix(growth,k,k)
F <- 0.02 * matrix(F2,k,1) %*% (F0*10^F1)
A <- G*survMat + F
# Calculating sensitivities, lambda and log-lambda
sens <- sensitivity(A)
lambda <- Re(eigen(A)$values[1])
r = log(lambda)
# Calculating elasticities to lower-level vital rates using the chain rule
ES <- colSums(sens*G) * surv/lambda
EG <- (sens*survMat) * G/lambda
EF0 <- EF1 <- EF2 <- vector()
for (j in 1 : k) {
EF0[j] <- EF1[j] <- sum( F[,j] * sens[,j] ) / lambda # It can be shown analytically that EF0 and EF1 are equal (but the sensitivities are not)
}
for (i in 1 : k) {
EF2[i] <- sum( F[i,] * sens[i,] ) / lambda
}
# Formatting results for output
elast = c(ES, as.vector(EG), EF0, EF1, EF2)
list(r = r, elast = elast)
}
# SEED BANK model
calc.elast.vital.rates_SeedBank <- function(surv, growth, F0, F1, F2, germ){
n <- length(surv) # Determine number of stages and matrix dimension
dim.mat <- n + 1
# Since the input F1 is the log.nfruits
F1 <- 10^F1
# Reshape growth into a matrix
growthMat <- matrix(growth,nrow=n)
# Define coefficient of conversion from fruits to recruits
epsilon <- 0.02
# Construct matrix A', following the equations in the Appendix of the manuscript
A <- matrix(NA,nrow=dim.mat, ncol=dim.mat)
A[1,1] <- (1-germ)
for (k in 2 : dim.mat) {
A[k,1] <- germ * F2[k-1]
}
for (l in 2 : dim.mat) {
A[1,l] <- (1-germ) * epsilon * F0[l-1] * F1[l-1]
}
for (k in 2 : dim.mat) {
for (l in 2 : dim.mat) {
A[k,l] <- surv[l-1]*growthMat[k-1,l-1] + germ * epsilon * F0[l-1] * F1[l-1] * F2[k-1]
}
}
# Calculating sensitivities, lambda and log-lambda of the A' matrix
sens <- sensitivity(A)
lambda <- Re(eigen(A)$values[1])
r = log(lambda)
# Calculating sensitivities to vital rates following the equations in the Appendix
# Sensitivity to surv
Sens_surv <- rep(0,n)
for (j in 1 : n){
for (k in 2 : dim.mat){
Sens_surv[j] <- Sens_surv[j] + sens[k,j+1]*growthMat[k-1,j]
}
}
# Sensitivity to growth
Sens_growthMat <- matrix(0,nrow=n,ncol=n)
for (i in 1 : n) {
for (j in 1 : n){
Sens_growthMat[i,j] <- sens[i+1,j+1]*surv[j]
}
}
Sens_growth <- as.vector(Sens_growthMat) #because growth was vectoriez in the same way (g11, g21, g31 etc.)
# Sensitivity to reproduction
Sens_F0 <- rep(0,n)
for (j in 1 : n){
Sens_F0[j] <- sens[1,j+1] * (1-germ) * epsilon *F1[j]
for (k in 2 : dim.mat){
Sens_F0[j] <- Sens_F0[j] + sens[k,j+1] * germ * epsilon * F1[j] * F2[k-1]
}
}
# Sensitivity to reproductive output
Sens_F1 <- rep(0,n)
for (j in 1 : n){
Sens_F1[j] <- sens[1,j+1] * (1-germ) * epsilon *F0[j]
for (k in 2 : dim.mat){
Sens_F1[j] <- Sens_F1[j] + sens[k,j+1] * germ * epsilon * F0[j] * F2[k-1]
}
}
# Sensitivity to recruit size
Sens_F2 <- rep(0,n)
for (i in 1 : n){
Sens_F2[i] <- sens[i+1,1] * germ
for (l in 2 : dim.mat) {
Sens_F2[i] <- Sens_F2[i] + sens[i+1,l] * germ * epsilon * F0[l-1] * F1[l-1]
}
}
# Sensitivity to germination
Sens_germ <- -sens[1,1]
for (k in 2 : dim.mat){
Sens_germ <- Sens_germ + sens[k,1] * F2[k-1]
}
for (l in 2 : dim.mat){
Sens_germ <- Sens_germ - sens[1,l] * epsilon * F0[l-1] * F1[l-1] # Note the "minus" sign !
}
for (k in 2 : dim.mat){
for (l in 2 : dim.mat) {
Sens_germ <- Sens_germ + sens[k,l] * epsilon * F0[l-1] * F1[l-1] * F2[k-1]
}
}
# Elasticities
ES <- surv / lambda * Sens_surv
EG <- growth / lambda * Sens_growth
EF0 <- F0 / lambda * Sens_F0 # Note that EF0 and EF1 are equal (but the sensitivities are not)
EF1 <- F1 / lambda * Sens_F1 # It is fine, it can be shown analytically
EF2 <- F2 / lambda * Sens_F2
Egerm <- germ / lambda * Sens_germ
# Formatting results for output
elast = c(ES, EG, EF0, EF1, EF2, Egerm)
list(r = r, elast = elast)
}
|
5faba7e24b560f5438fc9fb7d90bfad2cbb2ba80
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/HK80/examples/WGS84GEO_TO_HK80GEO.Rd.R
|
e7b81c7ad4464173c7ddf8b9e3f951ffaa1ad8e9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 488
|
r
|
WGS84GEO_TO_HK80GEO.Rd.R
|
library(HK80)
### Name: WGS84GEO_TO_HK80GEO
### Title: Convert WGS84GEO coordinates to HK80GEO coordinates
### Aliases: WGS84GEO_TO_HK80GEO
### Keywords: WGS84GEO HK80GEO
### ** Examples
options(digits = 15)
WGS84GEO_TO_HK80GEO(22.322172084, 114.141187917)
#### $latitude
#### [1] 22.3236998617778
####
#### $longitude
#### [1] 114.138743472556
#### Answer from the online conversion tool
#### http://www.geodetic.gov.hk/smo/tform/tform.aspx
#### 22.323701767, 114.138734989
|
0c00b4c255d3351bef53fc37e8d832caeb32e60f
|
2d7a1cc54c6ffee066633479428368f496f10ae9
|
/ui_chat.R
|
b681a54d47c006b4c5d556bffd23041e76403723
|
[] |
no_license
|
ed-lau/proturnyze
|
1cd456256ddc23221ff1217f989fdccdc3f522e6
|
50d0e1ae11dddb9dfde12b7f8b6aeb53f6de2b30
|
refs/heads/master
| 2021-01-09T20:12:10.018344
| 2018-10-09T15:50:10
| 2018-10-09T15:50:10
| 62,418,218
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,836
|
r
|
ui_chat.R
|
###
### These are wrapper functions that separate parts of the UIs into separate pages for tidiness.
###
chat_page <- function(){
tabPanel("Help",
tagline(),
sidebarLayout(
sidebarPanel(
h3("Help and Feedback"),
tags$hr(),
p("Provide feedback, ask questions, and share your findings here."),
br()
),
mainPanel(
includeCSS("shinychat.css"),
# And custom JavaScript -- just to send a message when a user hits "enter"
# and automatically scroll the chat window for us. Totally optional.
includeScript("sendOnEnter.js"),
div(
# Setup custom Bootstrap elements here to define a new layout
class = "container-fluid",
div(class = "row-fluid",
# Set the page title
tags$head(tags$title("Chatroom"))
),
# The main panel
div(
class = "row-fluid",
mainPanel(
# Create a spot for a dynamic UI containing the chat contents.
uiOutput("chat"),
# Create the bottom bar to allow users to chat.
fluidRow(
div(class="span10",
textInput("entry", "")
),
div(class="span2 center",
actionButton("send", "Send")
)
)
),
# The right sidebar
sidebarPanel(
# Let the user define his/her own Name
textInput("user", "Your Name:", value=""),
tags$hr(),
h5("Connected Users"),
# Create a spot for a dynamic UI containing the list of users.
uiOutput("userList"),
tags$hr(),
helpText("--------")
)
)
)
)
)
)
}
|
d40dad0b224672dc2d26d4cb30a781a3f7ea8dc8
|
c064ecc411c2e7eed372b45d78875732ebf5e9c5
|
/04 - Data Frames/27 - Importing Data into R.R
|
5dcc2132a4f40b7247b0d49ca2c025505322f131
|
[] |
no_license
|
panchalashish4/R-Programming-A-Z
|
3542078161a6eea51f0f61e49358a4c6abb63b97
|
61f3978cd7d1e2241265dc85cea768cdbc2bdeec
|
refs/heads/main
| 2023-08-07T13:17:54.765000
| 2021-10-03T06:13:28
| 2021-10-03T06:13:28
| 408,997,386
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 291
|
r
|
27 - Importing Data into R.R
|
#Reading file
?read.csv()
#Method1: Select File Manually
stats <- read.csv(file.choose())
stats
#Method2: Set WD and Read Data
getwd() #To check current directory
setwd("C:/Users/Name/Desktop/R Programming") #To set working directory
rm(stats)
stats <- read.csv("P2-Demographic-Data.csv")
|
724c5ea42f1f4d4c5ac519d014a094c054955943
|
5d4dcc088f0c711605e00e90920b9d21b4ffa5dd
|
/marketprofile.R
|
e893161fe5f6f26b727ac70fffd7dcd0c02d9e6c
|
[] |
no_license
|
jes-moore/shinycharts
|
78425d672f08a1b45d7a507284b8f3a632adc4b5
|
ee9289c1913d31752ab495adebf86ea5977c3ac5
|
refs/heads/master
| 2021-05-29T06:49:11.502738
| 2015-09-24T02:22:10
| 2015-09-24T02:22:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,447
|
r
|
marketprofile.R
|
marketprofile <- function(Ticker){
###############Market Profile#########################
stockdata<-read.csv(paste("http://chartapi.finance.yahoo.com/instrument/1.0/",Ticker,".AX/chartdata;type=quote;range=",5,"d/csv",sep = ""),
skip=22,
header = FALSE,
stringsAsFactors = FALSE)
stockdata[,1] <- as.POSIXct(stockdata$V1,origin = "1970-01-01")
colnames(stockdata) <- c("Timestamp","Close","High","Low","Open","Volume")
stockdata$Date <- as.Date(stockdata$Timestamp)
stockdata <- dplyr::group_by(.data = stockdata,Date)
melted <- melt(data = stockdata,id.vars = c("Timestamp","Date","Volume"),measure.vars = "Close")
melted$Time <- cut(melted$Timestamp, breaks="hour")
melted$Time <- strftime(melted$Time, format="%H:%M:%S")
cbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00")
a <- ggplot(data = melted) +
geom_histogram(aes(x = value,fill = Time),binwidth = (range(melted$value)[2] - range(melted$value)[1])/30)+
facet_grid(. ~ Date,scales = "free_y") +
scale_fill_manual(values=cbPalette) +
ylab("Close Interval Count")+
xlab("Share Price")+
scale_x_continuous(breaks = round(seq(min(melted$value), max(melted$value),length.out = 10,),digits = 3)) +
coord_flip()
a
}
marketprofilevol <- function(Ticker){
###############Market Profile#########################
stockdata<-read.csv(paste("http://chartapi.finance.yahoo.com/instrument/1.0/",Ticker,".AX/chartdata;type=quote;range=",5,"d/csv",sep = ""),
skip=22,
header = FALSE,
stringsAsFactors = FALSE)
stockdata[,1] <- as.POSIXct(stockdata$V1,origin = "1970-01-01")
colnames(stockdata) <- c("Timestamp","Close","High","Low","Open","Volume")
stockdata$Date <- as.Date(stockdata$Timestamp)
stockdata <- dplyr::group_by(.data = stockdata,Date)
melted <- melt(data = stockdata,id.vars = c("Timestamp","Date","Volume"),measure.vars = "Close")
melted$Time <- cut(melted$Timestamp, breaks="hour")
melted$Time <- strftime(melted$Time, format="%H:%M:%S")
cbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00")
b <- ggplot(data = melted) +
geom_histogram(aes(x = value,fill = Time,weight = Volume),binwidth = (range(melted$value)[2] - range(melted$value)[1])/30)+
facet_grid(. ~ Date,scales = "free_y") +
scale_fill_manual(values=cbPalette) +
ylab("Volume")+
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
xlab("Share Price")+
scale_x_continuous(breaks = round(seq(min(melted$value), max(melted$value),length.out = 10,),digits = 3)) +
coord_flip()
b
}
marketprofilehighchart <- function(Ticker){
###############Market Profile#########################
stockdata<-read.csv(paste("http://chartapi.finance.yahoo.com/instrument/1.0/",Ticker,".AX/chartdata;type=quote;range=",5,"d/csv",sep = ""),
skip=22,
header = FALSE,
stringsAsFactors = FALSE)
stockdata[,1] <- as.POSIXct(stockdata$V1,origin = "1970-01-01")
colnames(stockdata) <- c("Timestamp","Close","High","Low","Open","Volume")
stockdata$Date <- as.Date(stockdata$Timestamp)
stockdata <- dplyr::group_by(.data = stockdata,Date)
melted <- melt(data = stockdata,id.vars = c("Timestamp","Date","Volume"),measure.vars = "Close")
melted$Time <- cut(melted$Timestamp, breaks="hour")
melted$Time <- strftime(melted$Time, format="%H:%M:%S")
melted <- count(melted, c("Date", "Time","value"))
melted <- melted[melted$Date == Sys.Date(),]
#Create Highchart plot
m1 <- hPlot(data = melted,freq ~ value ,type = "bar", group = "Time", stacking = "normal")
m1$plotOptions(series = list(stacking = 'normal'))
m1$set(width = 750, height = 400)
m1
}
|
216c4c40509e07a332cf95423b9f28b2e6cf1f95
|
235979ce8f957b0ec258bfc9b9f90b64c15798b1
|
/man/iWellPlot.Rd
|
91dbef53dbc51f198e64197bc57711a6e282d244
|
[] |
no_license
|
cran/iScreen
|
d7843cf5bb6b0afcfa30420d10d825dee2136d39
|
859c3f95cd29ad819c39437a647f4615b826c910
|
refs/heads/master
| 2021-01-01T16:40:38.035847
| 2014-02-03T00:00:00
| 2014-02-03T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 480
|
rd
|
iWellPlot.Rd
|
\name{iWellPlot}
\alias{iWellPlot}
\title{Plotting iWell}
\usage{
iWellPlot(object, xlab = "X", ylab = "Y", ...)
}
\arguments{
\item{object}{A iScreen object.}
\item{xlab}{Default is "X".}
\item{ylab}{Default is "Y".}
\item{...}{Arguments to be passed to methods. See
\code{\link{plot}} and \code{\link{par}}}
}
\description{
Function for plotting object returned by iPlate. For more
details about the graphical parameter arguments, see
\code{\link{par}}.
}
|
7cbdb9d721f55e4ed5bad53c1cf6b279de403680
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Mathematical_Statistics_And_Data_Analysis_by_John_A_Rice/CH8/EX8.4.D/Ex_8_4_D.R
|
92815540c0bb5274063e127e7867c783cfa5c061
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 169
|
r
|
Ex_8_4_D.R
|
#Page 119
library(Ryacas)
f = function(x,a) x*(1 + a*x)/2
x = yac_symbol("x")
a = yac_symbol("a")
miu = integrate(f(x,a),"x",-1,1)
print(simplify(miu))
|
4ff5a234f24e203d7615aacf477c4e2bc1ada2ec
|
0ddeb15558a11d46e79f32adeea383ed2bb30389
|
/Part3/section5.R
|
2851a97c2759e375c9b66a08bd792b9166cb0c07
|
[] |
no_license
|
HyungcheolSon/R
|
b79150b0b15cc2fb52ef31c2995b2a51a07debf3
|
e22f85ab75514eb7d815fdaf1c13208cd57d5257
|
refs/heads/master
| 2020-06-02T22:34:16.342852
| 2019-06-14T02:12:17
| 2019-06-14T02:12:17
| 189,161,629
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 780
|
r
|
section5.R
|
<<<<<<< HEAD
var1 <- "aaa"
var2 <- 111
var3 <- Sys.Date()
var4 <- c("a","b","c")
var1
var2
var3
var4
111 -> var5 -> var6
String1 <-"So Easy R Programming"
String1
String2 <- "I'm Hyungcheol Son"
String2
comp <-c(1,"2")
comp
class(comp)
num1<-1
num2<-2
num1+num2
seq1<-1:5
seq1
seq2<-1:11
seq2
String2
rm(String2)
String2
objects()
rm(list=ls())
object()
character(0)
=======
var1 <- "aaa"
var2 <- 111
var3 <- Sys.Date()
var4 <- c("a","b","c")
var1
var2
var3
var4
111 -> var5 -> var6
String1 <-"So Easy R Programming"
String1
String2 <- "I'm Hyungcheol Son"
String2
comp <-c(1,"2")
comp
class(comp)
num1<-1
num2<-2
num1+num2
seq1<-1:5
seq1
seq2<-1:11
seq2
String2
rm(String2)
String2
objects()
rm(list=ls())
object()
character(0)
>>>>>>> 142c7ed7c7be7d19a8a026ec36c8e17319b7af36
|
8fc528161aad18d8784c2ef29fcd65cef3599eae
|
768550e0018f0f6db82d99073736bb7511972eeb
|
/man/get_bref_all_nba_teams.Rd
|
2649c4af30be7471b23bd07074360ae5db7ce59a
|
[] |
no_license
|
chadmillard/nbastatR
|
74964e3af974edec814d9767ba1636ff6d65553c
|
3ab473beeb7564dc21d59036bca4df87b6f2ce89
|
refs/heads/master
| 2020-03-26T12:25:44.419875
| 2018-07-29T17:43:49
| 2018-07-29T17:43:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 607
|
rd
|
get_bref_all_nba_teams.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bref.R
\name{get_bref_all_nba_teams}
\alias{get_bref_all_nba_teams}
\title{All NBA teams}
\usage{
get_bref_all_nba_teams(only_nba = T, return_message = T)
}
\arguments{
\item{only_nba}{if `TRUE` returns only NBA all NBA teams}
\item{return_message}{if `TRUE` returns a message}
}
\value{
a `data_frame`
}
\description{
All NBA teams
}
\examples{
get_bref_all_nba_teams()
}
\seealso{
Other awards: \code{\link{get_bref_awards}},
\code{\link{get_bref_seasons_award_votes}},
\code{\link{get_players_awards}}
}
\concept{awards}
|
4f6cfa88cf5607e3d42f7bd9fb85694ac03ba094
|
9e4df408b72687493cc23144408868a975971f68
|
/SMS_r_prog/flsms/flindex.sms.r
|
4fc3d9e5908a24d203777e13ff7f6c6e7b8418aa
|
[
"MIT"
] |
permissive
|
ices-eg/wg_WGSAM
|
7402ed21ae3e4a5437da2a6edf98125d0d0e47a9
|
54181317b0aa2cae2b4815c6d520ece6b3a9f177
|
refs/heads/master
| 2023-05-12T01:38:30.580056
| 2023-05-04T15:42:28
| 2023-05-04T15:42:28
| 111,518,540
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,058
|
r
|
flindex.sms.r
|
setClass("FLIndex.SMS",
contains="FLIndex",
representation(
range.SMS="vector"
) ,
prototype=prototype(range.SMS=list(season=1, power.age=-1, q.age=0,
var.age.group=as.vector(0,mode="list"),minCV=0.3))
)
FLIndex.SMS <- function(name=character(0), desc=character(0), distribution=character(0),
type=character(0), startf=NA, endf=NA, plusgroup=NA, season=NA, power.age=NA,
q.age=NA,var.age.group=NA,minCV=0.3, ...) {
args <- list(...)
if(length(args)==0)
args <- list(index=FLQuant())
dimnames <- dimnames(args[[names(lapply(args, is.FLQuant)==TRUE)[1]]])
sdimnames <- dimnames
sdimnames[1] <- "all"
if(!is.FLQuant(args['index']))
index <- FLQuant(dimnames=dimnames)
dims <- dims(index)
new <- new("FLIndex.SMS", name = name, desc = desc, distribution = distribution,
type=type,
index = index, index.var = FLQuant(dimnames=dimnames),
index.q = FLQuant(dimnames=dimnames), sel.pattern = FLQuant(dimnames=dimnames),
catch.n = FLQuant(dimnames=dimnames), catch.wt = FLQuant(dimnames=dimnames),
effort = FLQuant(dimnames=sdimnames),
range = unlist(list(min=dims$min, max=dims$max,
plusgroup=NA, minyear=dims$minyear, maxyear=dims$maxyear, startf=startf, endf=endf)))
range.SMS = unlist(list(season=season, power.age=power.age,
q.age=q.age,var.age.group=var.age.group, minCV=minCV))
# Add extra arguments
for(i in names(args)[names(args)!='iniFLQuant'])
slot(new, i) <- args[[i]]
return(new)
}
SMS2FLIndices<-function(control,path=NULL,fleet.inf="fleet_info.dat",fleet.index="fleet_catch.in",
fleet.name="fleet_names.in") {
old.wd<-getwd()
if (!is.null(path)) setwd(path)
nsp<-slot(control,"no.species")
nq<-slot(control,"last.season")
#count number of other predators
info<-slot(control,"species.info")[,"predator"]
no.oth<-sum(info==2)
nsp<-nsp-no.oth
s<-readLines(fleet.name, n=1000)
s<-gsub('_',' ',s)
fl.names<-sub('[[:space:]]+$', '', s)
info<-scan(fleet.inf,comment.char = "#",quiet=TRUE)
minCV<-info[1]
i<-2
n.fleet<-as.vector(info[i:(i-1+nsp)])
i<-i+nsp
sum.fleet<-sum(n.fleet)
fl.info<-matrix(info[i:(i-1+sum.fleet*10)],ncol=10,nrow=sum.fleet,byrow=TRUE)
i<-i+sum.fleet*10
sum.var.age<-sum(fl.info[,10])
fl.var<-as.vector(info[i:(i-1+sum.var.age)])
CE<-scan(fleet.index,comment.char = "#",quiet=TRUE)
# creates empty FLIndices object
FLIndices. <- FLIndices()
i<-1
v<-1
sp.fl<-0
for (sp in 1:nsp) {
for (fl in 1:n.fleet[sp]) {
sp.fl<-sp.fl+1
fy<-fl.info[sp.fl,1]
ly<-fl.info[sp.fl,2]
alfa<-fl.info[sp.fl,3]
beta<-fl.info[sp.fl,4]
fa<-fl.info[sp.fl,5]
la<-fl.info[sp.fl,6]
la.q<-fl.info[sp.fl,7]
la.p<-fl.info[sp.fl,8]
seas<-fl.info[sp.fl,9]
n.var<-fl.info[sp.fl,10]
nyr<-ly-fy+1
nages<-la-fa+1
# template for input to quant
dim<-c(nages,nyr,1,1,1,1)
dim2<-c(1,nyr,1,1,1,1)
dimnames<-list(age=fa:la,year=fy:ly,unit="all",season=seas,area="all",iter="none")
dimnames2<-list(age="all",year=fy:ly,unit="all",season=seas,area="all",iter="none")
tmp<-matrix(CE[i:(nyr*(nages+1)+i-1)],ncol=nages+1,nrow=nyr,byrow=TRUE)
effort<-array(tmp[,1],dim=dim2,dimnames=dimnames2)
catch<-matrix(tmp[,2:(nages+1)],ncol=nyr,nrow=nages,byrow=TRUE)
#print(catch)
catch<-array(catch,dim=dim,dimnames=dimnames)
#print(catch)
index<-catch/rep(effort,each=nages)
indx<-FLIndex.SMS(index=as.FLQuant(index),effort=as.FLQuant(effort),catch.n=as.FLQuant(catch),
name = fl.names[sp.fl], desc = fl.names[sp.fl])
indx@range<-unlist(list("min"=fa,"max"=la,"plusgroup"=NA,"minyear"=fy,"maxyear"=ly,"startf"=alfa,"endf"=beta))
indx@range.SMS<-list("season"=seas, "power.age"=la.p,
"q.age"=la.q,"var.age.group"=as.vector(fl.var[v:(v-1+n.var)]),"minCV"=minCV)
v<-v+n.var
i<-i+nyr*(nages+1)
FLIndices.[[sp.fl]]<-indx
}
}
setwd(old.wd)
FLIndices.
}
FLIndices2SMS<-function(out.path=NULL,indices=NULL,control=NULL,fleet.inf="fleet_info.dat",
fleet.index="fleet_catch.in",fleet.name="fleet_names.in") {
old.wd<-getwd()
if (!is.null(out.path)) setwd(out.path)
if (is.null(indices))
stop("A 'FLIndices' must be given")
if (!inherits(indices, "FLIndices"))
stop("indices must be an 'FLIndices' object!")
for (i in 1:length(indices)) {
if (is.na(indices[[i]]@range["startf"]) || is.na(indices[[i]]@range["endf"]))
stop(paste("Must supply startf & endf for range in FLIndex",i))
if (!all(names(indices[[i]]@range) == c("min","max","plusgroup","minyear","maxyear","startf","endf")))
stop("Range must have names 'min','max','plusgroup','minyear','maxyear','startf','endf'")
}
if (!inherits(control, "FLSMS.control"))
stop("control must be an 'FLSMS.control' object!")
if (!validObject(control)) stop("control is not valid!")
nsp<-slot(control,"no.species")
#count number of other predators
info<-slot(control,"species.info")[,7]
no.oth<-sum(info==2)
nsp<-nsp-no.oth # no of VPA species
first.year<-slot(control,"first.year")
last.year<-slot(control,"last.year.model")
last.season<-slot(control,"last.season")
n.season<-last.season
no.indices<-rep(0,nsp)
info<-matrix(0,ncol=10,nrow=length(indices))
fl.name<-rep('',length(indices))
v.age<-list()
sp<-1; n<-1
old.sp<-substr(indices[[1]]@desc,1,3)
cat("# file fleet_catch.in\n",file=fleet.index)
for (idc in indices) {
fl.name[n]<-idc@name
sp.name<-substr(idc@desc,1,3)
if (nsp>1 & sp.name!=old.sp) {sp<-sp+1; old.sp<-sp.name}
cat("# ",sp.name,",",fl.name[n],"\n",file=fleet.index,append=TRUE)
no.indices[sp]=no.indices[sp]+1
range<-idc@range
info[n,1]<-range["minyear"]
info[n,2]<-range["maxyear"]
info[n,3]<-range["startf"]
info[n,4]<-range["endf"]
info[n,5]<-range["min"]
info[n,6]<-range["max"]
info[n,7]<-idc@range.SMS$q.age
info[n,8]<-idc@range.SMS$power.age
info[n,9]<-idc@range.SMS$season
info[n,10]<-length(idc@range.SMS$var.age.group)
v.age<-c(v.age,list(idc@range.SMS$var.age.group))
minCV<-idc@range.SMS$minCV
write.table(cbind(as.vector(idc@effort),
t(matrix(idc@catch.n,ncol=info[n,2]-info[n,1]+1,byrow=FALSE))),
file=fleet.index,row.names=FALSE,col.names=FALSE,quote=FALSE,append=TRUE)
n<-n+1
}
cat("-999 # Check value\n",file=fleet.index,append=TRUE)
cat(paste("# file: fleet_info.dat\n",minCV," #min CV of CPUE observations\n"),file=fleet.inf)
cat("# number of fleets by species\n",file=fleet.inf,append=TRUE)
write(no.indices,file=fleet.inf,ncolumns=nsp,append=TRUE)
cat("#############",
"\n# 1-2, First year last year,",
"\n# 3-4. Alpha and beta - the start and end of the fishing period for the fleet given as fractions of the season (or year if annual data are used),",
"\n# 5-6 first and last age,",
"\n# 7. last age with age dependent catchability,",
"\n# 8. last age for stock size dependent catchability (power model), -1 indicated no ages uses power model,",
"\n# 9. season for survey,",
"\n# 10. number of variance groups for estimated cathability,",
"\n# by species and fleet",
"\n#############\n", file=fleet.inf,append=TRUE)
i<-0
for (s in (1:nsp)) {
cat("# ",control@species.names[s+no.oth],"\n",file=fleet.inf,append=TRUE)
for (id in (1:no.indices[s])) {
i<-i+1
cat("# ",fl.name[i],"\n",file=fleet.inf,append=TRUE)
write(file=fleet.inf ,info[i,],ncolumns=10,append=TRUE)
}
}
# write.table(file=fleet.inf ,info,row.names=FALSE,col.names=FALSE,quote=FALSE,append=TRUE)
cat("# variance groups\n",file=fleet.inf,append=TRUE)
#for (a in v.age) write(a,file=fleet.inf,append=TRUE)
i<-0
for (s in (1:nsp)) {
cat("# ",control@species.names[s+no.oth],"\n",file=fleet.inf,append=TRUE)
for (id in (1:no.indices[s])) {
i<-i+1
cat("# ",fl.name[i],"\n",file=fleet.inf,append=TRUE)
write(file=fleet.inf ,v.age[[i]],append=TRUE)
}
}
cat("-999 # Check value\n",file=fleet.inf,append=TRUE)
fl.name<-substr(paste(fl.name,"__________________________",sep=''),1,26)
fl.name<-gsub(' ','_',fl.name)
write(fl.name,file=fleet.name)
setwd(old.wd)
}
# test FLIndices2SMS(out.path=out.path,indices=SMS.indices,control=SMS.dat)
|
fdb971d72716f7cc499a3bbe90ea69d19a0dce71
|
92895544621673dc09df46643ffcfe5158de5106
|
/R/rbing.matrix.gibbs.R
|
a804c06c59f69584f686d845829ffa3e996e1b85
|
[] |
no_license
|
pdhoff/rstiefel
|
39f6a1541aa424a8964bd4edf0382e9a343f7b42
|
c19d696e357365f3dc814400fb56e7b254d11983
|
refs/heads/master
| 2021-06-18T23:41:10.289312
| 2021-06-15T13:59:20
| 2021-06-15T13:59:20
| 94,270,392
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,278
|
r
|
rbing.matrix.gibbs.R
|
#' Gibbs Sampling for the Matrix-variate Bingham Distribution
#'
#' Simulate a random orthonormal matrix from the Bingham distribution using
#' Gibbs sampling.
#'
#'
#' @param A a symmetric matrix.
#' @param B a diagonal matrix with decreasing entries.
#' @param X the current value of the random orthonormal matrix.
#' @return a new value of the matrix \code{X} obtained by Gibbs sampling.
#' @note This provides one Gibbs scan. The function should be used iteratively.
#' @author Peter Hoff
#' @references Hoff(2009)
#' @examples
#'
#' Z<-matrix(rnorm(10*5),10,5) ; A<-t(Z)%*%Z
#' B<-diag(sort(rexp(5),decreasing=TRUE))
#' U<-rbing.Op(A,B)
#' U<-rbing.matrix.gibbs(A,B,U)
#'
#' ## The function is currently defined as
#' function (A, B, X)
#' {
#' m <- dim(X)[1]
#' R <- dim(X)[2]
#' if (m > R) {
#' for (r in sample(seq(1, R, length = R))) {
#' N <- NullC(X[, -r])
#' An <- B[r, r] * t(N) %*% (A) %*% N
#' X[, r] <- N %*% rbing.vector.gibbs(An, t(N) %*% X[,
#' r])
#' }
#' }
#' if (m == R) {
#' for (s in seq(1, R, length = R)) {
#' r <- sort(sample(seq(1, R, length = R), 2))
#' N <- NullC(X[, -r])
#' An <- t(N) %*% A %*% N
#' X[, r] <- N %*% rbing.Op(An, B[r, r])
#' }
#' }
#' X
#' }
#'
#' @export rbing.matrix.gibbs
rbing.matrix.gibbs <-
function(A,B,X)
{
#simulate from the matrix bmf distribution as described in Hoff(2009)
#this is one Gibbs step, and must be used iteratively
### assumes B is a diagonal matrix with *decreasing* entries
m<-dim(X)[1] ; R<-dim(X)[2]
if(m>R)
{
for(r in sample( seq(1,R,length=R)))
{
N<-NullC(X[,-r])
An<-B[r,r]*t(N)%*%(A)%*%N
X[,r]<-N%*%rbing.vector.gibbs(An,t(N)%*%X[,r])
}
}
#If m=R then the fc of one vector given all the others is
#just +-1 times the vector in the null space. In this case,
#the matrix needs to be updated at least two columns at a
#time.
if(m==R)
{
for(s in seq(1,R,length=R))
{
r<-sort( sample(seq(1,R,length=R),2) )
N<-NullC( X[,-r] )
An<- t(N)%*%A%*%N
#X[,r]<-N%*%rbing.O2(An,B[r,r])
X[,r]<-N%*%rbing.Op(An,B[r,r])
}
}
X
}
|
bee6289441d52ec5abacc2e05dc1071b2c4318a5
|
0815d30d5e9a9b13466a728b4795e63bf7a81c25
|
/Scripts/4 - Trait GWAS colocalization figure.R
|
2efdb719b500a1f7b7a16504f39c45ed55728976
|
[] |
no_license
|
ntduc11/Sunflower-GWAS-v2
|
5e82506e88a39934f22a8a08b9a03892fb8a36af
|
14bb4d4ef23c25651b1a462d67f89b77d1155910
|
refs/heads/master
| 2022-12-01T06:39:39.065583
| 2020-08-13T13:09:28
| 2020-08-13T13:09:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,374
|
r
|
4 - Trait GWAS colocalization figure.R
|
library(gridExtra)
library(ggpubr)
library(cowplot)
library(wesanderson)
#### read in preferences
prefs<-read.table("Scripts/### Preferences ###",header=F,sep="=",skip=1)
SNPset<-as.character(prefs[2,2])
pheno.name<-as.character(prefs[1,2])
multcomp<-as.numeric(as.character(prefs[3,2]))
##########
colors<-c("#1b9e77", "gray85")
envs<-as.character(read.table("environments_to_run.txt")[,1])
sig.blocks<-read.table("Tables/Blocks/traits_to_genomeblocks_signif.txt", header=T)
sug.blocks<-read.table("Tables/Blocks/traits_to_genomeblocks_sugest.txt", header=T)
sig.list<-read.table("Tables/Blocks/sigsnips_to_genomeblocks.txt",header=T)
sighap_to_genomehap<-read.table("Tables/Blocks/condensed_genome_blocks.txt",header=T)
#### set up data to feed into plotting
colocate<-rbind(sig.blocks,sug.blocks[sug.blocks$hapID%in%sig.blocks$hapID,])
colocate$sighap<-sighap_to_genomehap$sig.hap[match(colocate$hapID,sighap_to_genomehap$genome.hap)]
colocate$region<-sighap_to_genomehap$colocate.region[match(colocate$hapID,sighap_to_genomehap$genome.hap)]
colocate$trait_env<-paste(colocate$trait,colocate$env,sep="_")
traits.per.block<-colocate %>% group_by(sighap) %>% summarise(trait_num=length(trait_env))
colocate<-colocate %>% separate(sighap, sep= "_", c("chromosome","blocknum"),remove=F) %>%
arrange(chromosome, blocknum)
colocate<- colocate %>% mutate(beta.sign=sign(beta))
colocate$region<-factor(colocate$region)
write.table(colocate,"Tables/Blocks/colocate_table.txt")
############## function that helps with settting a region as significant if it's only significant for one of it's component genome haplotype blocks
sig.sug.fun<-function (x) {
if (sum("significant"%in%x)>0) {y<-"signficicant"}
if (sum("significant"%in%x)==0) {y<-"suggestive"}
return(y)
}
##################
##### condense to single entry per region (collapse genome blocks)
colocate<-colocate %>% group_by(region,trait_env) %>% dplyr::summarize(trait=trait[1],
env=env[1],
pvalue=factor(sig.sug.fun(pvalue)),
chromosome=chromosome[1],
beta.sign=factor(sign(mean(beta.sign))))
chrom.borders<-colocate %>% group_by(chromosome)%>% summarise(bin=length(unique(region))) %>% arrange(as.integer(chromosome))
chrom.borders<-cumsum(chrom.borders$bin)
chrom.borders<-chrom.borders+0.5
chrom.borders<-chrom.borders[1:length(chrom.borders)-1]
#### draw the tree environment colocate plots separately
colocate<-colocate[!duplicated(paste(colocate$region,colocate$trait_env)),]
for (i in 1: length(envs)) {
q<-i
plot.data<-as.data.frame(colocate[colocate$env==envs[i],])
source("Scripts/4b - correlation dendrogram.R") ### update script referal after changing names
plot.data$trait.factor<-factor(plot.data$trait,levels=Env.label.order)
baseplot<-ggplot(plot.data,aes(x=region,y=trait.factor,fill=pvalue))
plot.colocate<- baseplot+geom_vline(xintercept=c(1:length(plot.data$region)),colour="darkgrey",linetype=3)+
geom_vline(xintercept=chrom.borders,colour="black")+
geom_tile(fill="white")+
geom_tile(colour="black")+
geom_point(aes(shape=as.factor(beta.sign)))+
scale_shape_manual(values=c("+","-","±"))+
theme_minimal()+
theme(axis.text.y = element_text(hjust = 0))+
scale_fill_manual(values=c(colors[2],colors[1]))+
scale_alpha_manual(values=c(1,0.3))+
scale_x_discrete(drop=F)+
theme_classic()+
theme(axis.title.y=element_blank(),axis.text.x = element_text(angle = 90, vjust = 0.5,hjust=1))+
ggtitle(envs[i])+theme(legend.position = "none")+theme(axis.title.x=element_blank())
# plot.data.dendro<-plot.data.dendro+coord_flip(xlim=c(4,length(plot.data.label.order)-4))+
# theme(axis.text.x=element_text(size=8))+theme(axis.title.x=element_blank())
comb.plot<-plot_grid(Env.dendro+theme(plot.margin = unit(c(0, 0, 0, 0), "cm")),
plot.colocate+theme(plot.margin = unit(c(0, 0, 0, 0), "cm")),
align="h",rel_widths=c(1,9))
trait.to.region.ratio<-length(levels(plot.data$region))/length(Env.label.order)
ggsave(paste("Plots/Colocalization/colocate-",envs[i],".pdf",sep=""),plot=comb.plot,width=22,height=6)
}
|
451d2ba4ae509c2fa8cb72e3a3665bcfd7282edf
|
502905b70f6c559f81539c72f5963527a6c287c5
|
/R/coltable.R
|
f18b84aae10ce0ee2eeb9ddfb9c88e8de0889c8e
|
[] |
no_license
|
aswansyahputra/SensoMineR
|
b524b84fd211a49d57a0ccb6735f6c8ae063783e
|
108ba9af4abec264d69285f021e52c523dde6c7f
|
refs/heads/master
| 2020-04-17T22:44:08.643840
| 2019-01-22T14:35:17
| 2019-01-22T14:35:17
| 167,006,296
| 0
| 0
| null | 2019-01-22T14:16:52
| 2019-01-22T14:16:51
| null |
UTF-8
|
R
| false
| false
| 12,387
|
r
|
coltable.R
|
#' Color the cells of a data frame according to 4 threshold levels
#'
#'
#' Return a colored display of a data frame according to 4 threshold levels.
#'
#' This function is very useful especially when there are a lot of values to
#' check.
#'
#' @param matrice a data frame (or a matrix) with only quantitative variables
#' @param col.mat a data frame (or a matrix) from which the cells of the
#' \code{matrice} data frame are colored; by default,
#' \code{col.mat}=\code{matrice}
#' @param nbrow the number of rows to be displayed (by default,
#' \code{nrow(matrice)})
#' @param nbcol the number of columns to be displayed (by default,
#' \code{ncol(matrice)})
#' @param level.lower the threshold below which cells are colored in
#' \code{col.lower}
#' @param col.lower the color used for \code{level.lower}
#' @param level.upper the threshold above which cells are colored in
#' \code{col.upper}
#' @param col.upper the color used for \code{level.upper}
#' @param cex cf. function \code{\link{par}} in the \pkg{graphics} package
#' @param nbdec the number of decimal places displayed
#' @param main.title title of the graph(s)
#' @param level.lower2 the threshold below which cells are colored in
#' \code{col.lower2}; this level should be less than level.lower
#' @param col.lower2 the color used for \code{level.lower2}
#' @param level.upper2 the threshold above which cells are colored in
#' \code{col.upper2}; this level should be greater than level.upper
#' @param col.upper2 the color used for \code{level.upper2}
#' @param novalue boolean, if TRUE the values are not written
#' @author F Husson, S Le
#' @keywords color
#' @examples
#'
#' ## Example 1
#' data(chocolates)
#' resdecat<-decat(sensochoc, formul = "~Product+Panelist", firstvar = 5,
#' graph = FALSE)
#' resaverage<-averagetable(sensochoc, formul = "~Product+Panelist",
#' firstvar = 5)
#' resaverage.sort = resaverage[rownames(magicsort(resdecat$tabT)),
#' colnames(magicsort(resdecat$tabT))]
#' coltable(resaverage.sort, magicsort(resdecat$tabT),
#' level.lower = -1.96, level.upper = 1.96,
#' main.title = "Average by chocolate")
#'
#' ## Example 3
#' \dontrun{
#' data(chocolates)
#' resperf<-paneliperf(sensochoc,
#' formul = "~Product+Panelist+Product:Panelist",
#' formul.j = "~Product", col.j = 1, firstvar = 5, lastvar = 12,
#' synthesis = FALSE, graph = FALSE)
#' resperfprob<-magicsort(resperf$prob.ind, method = "median")
#' coltable(resperfprob, level.lower = 0.05, level.upper = 1,
#' main.title = "P-value of the F-test (by panelist)")
#'
#' resperfr2<-magicsort(resperf$r2.ind, method = "median",
#' ascending = FALSE)
#' coltable(resperfr2, level.lower = 0.00, level.upper = 0.85,
#' main.title = "Adjusted R-square (by panelist)")
#' }
#'
#' @export coltable
coltable <-function(matrice,col.mat=matrice,nbrow=nrow(matrice),nbcol=ncol(matrice),level.lower=0.05,col.lower="mistyrose",level.upper=1.96,col.upper="lightblue",cex=0,nbdec=4,main.title=NULL,level.lower2=-1e10,col.lower2="red",level.upper2=1e10,col.upper2="blue",novalue=FALSE) {
################################################################
fill <- function(matrice,col.mat=matrice,nbrow,nbcol,pol,level.lower,col.lower="mistyrose",level.upper,col.upper="lightblue",main.title=NULL,level.lower2,col.lower2,level.upper2,col.upper2){
#cadre
dim1 <- dim(matrice)[1]
dim2 <- dim(matrice)[2]
for (i in 0:dim1) rect(0,1-i*(1/(nbrow+1)),1/(nbcol+1),1-(i+1)*(1/(nbrow+1)),col="white",border=NULL)
for (j in 1:(dim2-1)) rect(j*(1/(nbcol+1)),1,(j+1)*(1/(nbcol+1)),1-(1/(nbrow+1)),col="white",border=NULL)
for (j in 1:(dim2-1)){ for (i in 1:dim1){
if (is.na(col.mat[i,j+1])) { rect(j*(1/(nbcol+1)),1-i*(1/(nbrow+1)),(j+1)*(1/(nbcol+1)),1-(i+1)*(1/(nbrow+1)),col="gray",border=NULL) }
else { if (col.mat[i,j+1]<=level.lower2) { rect(j*(1/(nbcol+1)),1-i*(1/(nbrow+1)),(j+1)*(1/(nbcol+1)),1-(i+1)*(1/(nbrow+1)),col=col.lower2,border=NULL)}
else {if (col.mat[i,j+1]<=level.lower) { rect(j*(1/(nbcol+1)),1-i*(1/(nbrow+1)),(j+1)*(1/(nbcol+1)),1-(i+1)*(1/(nbrow+1)),col=col.lower,border=NULL)}
else { if (col.mat[i,j+1]>=level.upper2) { rect(j*(1/(nbcol+1)),1-i*(1/(nbrow+1)),(j+1)*(1/(nbcol+1)),1-(i+1)*(1/(nbrow+1)),col=col.upper2,border=NULL)}
else { if (col.mat[i,j+1]>=level.upper) {rect(j*(1/(nbcol+1)),1-i*(1/(nbrow+1)),(j+1)*(1/(nbcol+1)),1-(i+1)*(1/(nbrow+1)),col=col.upper,border=NULL)}
else rect(j*(1/(nbcol+1)),1-i*(1/(nbrow+1)),(j+1)*(1/(nbcol+1)),1-(i+1)*(1/(nbrow+1)),col="white",border=NULL)
}
}
}
}
}
}
#fill
dim1 <- dim(matrice)[1]
dim2 <- dim(matrice)[2]
for (i in 1:dim1) text((0.5)*(1/(nbcol+1)),1-(i+0.5)*(1/(nbrow+1)),matrice[i,1],cex=pol)
if (!novalue){
for (i in 1:dim1){
for (j in 1:(dim2-1)) text((j+0.5)*(1/(nbcol+1)),1-(i+0.5)*(1/(nbrow+1)),matrice[i,j+1],cex=pol)
}
}
#titre
for (j in 0:nbcol) text((j+0.5)*(1/(nbcol+1)),1-(1/(nbrow+1))/2,names(matrice)[j+1],cex=pol)
}
################################################################
################################################################
police <- function(matrice,nbrow,nbcol,nbdec) {
dev.new()
def.par <- par(no.readonly = TRUE)
par(mar=c(0,0,2,0))
plot.new(); title(main=main.title);
a <- c(rownames(matrice),colnames(matrice))
nb=NULL
for (i in 1:nbdec) nb <- paste(nb,"0",sep="")
nb <- paste(nb,"0.e-00")
a <- c(a,nb)
b <- min(nbcol,15)
return(list(size=(round((1/(b+1))/max(strwidth(a)),2)*100-5)/100,def.par=def.par))
}
################################################################
if (sum(dim(matrice)==dim(col.mat))!=2) stop("The matrices matrice and col.mat should have the same dimensions")
if (level.lower2 > level.lower) stop("level.lower2 should be less than level.lower")
if (level.upper2 < level.upper) stop("level.upper2 should be greater than level.upper")
if (is.numeric(matrice)) matrice <- signif(matrice,nbdec)
matrice=cbind.data.frame(rownames(matrice),matrice)
if (is.numeric(col.mat)) col.mat <- signif(col.mat,nbdec)
col.mat=cbind.data.frame(rownames(col.mat),col.mat)
colnames(matrice)[1]=" "
dim1 <- nrow(matrice)
dim2 <- ncol(matrice)
dim2 <- dim2-1
size <- cex
if (nbrow>dim1){ nbrow <- dim1 }
if (nbcol>dim2){ nbcol <- dim2 }
if (dim2%/%nbcol==dim2/nbcol) {
for (j in 0:(dim2%/%nbcol-1)) {
for (i in 0:(dim1%/%nbrow-1)){
A <- data.frame(matrice[(i*nbrow+1):((i+1)*nbrow),1])
names(A)=names(matrice)[1]
B <- matrice[(i*nbrow+1):((i+1)*nbrow),(1+j*nbcol+1):(1+(j+1)*nbcol)]
B <- cbind(A,B)
A.col <- data.frame(col.mat[(i*nbrow+1):((i+1)*nbrow),1])
B.col <- col.mat[(i*nbrow+1):((i+1)*nbrow),(1+j*nbcol+1):(1+(j+1)*nbcol)]
B.col <- cbind(A.col,B.col)
if (size==0) {
pol <- police(matrice,nbrow,nbcol,nbdec)
size <- pol$size
def.par <- pol$def.par
} else{
dev.new()
def.par <- par(no.readonly = TRUE)
par(mar=c(0,0,2,0))
plot.new(); title(main=main.title);
}
fill(B,B.col,nbrow,nbcol,size,level.lower,col.lower,level.upper,col.upper,main.title=main.title,level.lower2,col.lower2,level.upper2,col.upper2)
par(def.par)
}
if ((dim1%/%nbrow)*nbrow != dim1){
A<-data.frame(matrice[(dim1%/%nbrow*nbrow+1):dim1,1])
names(A)=names(matrice)[1]
B<-data.frame(matrice[(dim1%/%nbrow*nbrow+1):dim1,(1+j*nbcol+1):(1+(j+1)*nbcol)])
names(B)=names(matrice)[(1+j*nbcol+1):(1+(j+1)*nbcol)]
B<-cbind(A,B)
A.col<-data.frame(col.mat[(dim1%/%nbrow*nbrow+1):dim1,1])
B.col<-data.frame(col.mat[(dim1%/%nbrow*nbrow+1):dim1,(1+j*nbcol+1):(1+(j+1)*nbcol)])
B.col<-cbind(A.col,B.col)
if (size==0) {
pol <- police(matrice,nbrow,nbcol,nbdec)
size <- pol$size
def.par <- pol$def.par
} else{
dev.new()
def.par <- par(no.readonly = TRUE)
par(mar=c(0,0,2,0))
plot.new(); title(main=main.title);
}
fill(B,B.col,nbrow,nbcol,size,level.lower,col.lower,level.upper,col.upper,main.title=main.title,level.lower2,col.lower2,level.upper2,col.upper2)
par(def.par)
}
}
}
else {
for (j in 0:(dim2%/%nbcol-1)){ #blocs de descripteurs entiers
for (i in 0:(dim1%/%nbrow-1)){ #blocs de juges entiers
A<-data.frame(matrice[(i*nbrow+1):((i+1)*nbrow),1])
names(A)=names(matrice)[1]
B<-matrice[(i*nbrow+1):((i+1)*nbrow),(1+j*nbcol+1):(1+(j+1)*nbcol)]
B<-cbind(A,B)
A.col<-data.frame(col.mat[(i*nbrow+1):((i+1)*nbrow),1])
B.col<-col.mat[(i*nbrow+1):((i+1)*nbrow),(1+j*nbcol+1):(1+(j+1)*nbcol)]
B.col<-cbind(A.col,B.col)
if (size==0) {
pol <- police(matrice,nbrow,nbcol,nbdec)
size <- pol$size
def.par <- pol$def.par
} else{
dev.new()
def.par <- par(no.readonly = TRUE)
par(mar=c(0,0,2,0))
plot.new(); title(main=main.title);
}
fill(B,B.col,nbrow,nbcol,size,level.lower,col.lower,level.upper,col.upper,main.title=main.title,level.lower2,col.lower2,level.upper2,col.upper2)
par(def.par)
}
if ((dim1%/%nbrow)*nbrow != dim1){
A<-data.frame(matrice[(dim1%/%nbrow*nbrow+1):dim1,1])
names(A)=names(matrice)[1]
B<-matrice[(dim1%/%nbrow*nbrow+1):dim1,(1+j*nbcol+1):(1+(j+1)*nbcol)]
B<-cbind(A,B)
A.col<-data.frame(col.mat[(dim1%/%nbrow*nbrow+1):dim1,1])
B.col<-col.mat[(dim1%/%nbrow*nbrow+1):dim1,(1+j*nbcol+1):(1+(j+1)*nbcol)]
B.col<-cbind(A.col,B.col)
if (size==0) {
pol <- police(matrice,nbrow,nbcol,nbdec)
size <- pol$size
def.par <- pol$def.par
} else{
dev.new()
def.par <- par(no.readonly = TRUE)
par(mar=c(0,0,2,0))
plot.new(); title(main=main.title);
}
fill(B,B.col,nbrow,nbcol,size,level.lower,col.lower,level.upper,col.upper,main.title=main.title,level.lower2,col.lower2,level.upper2,col.upper2)
par(def.par)
}
}
for (i in 0:(dim1%/%nbrow-1)){#pour les blocs d'individus entiers les variables qui manquent
A<-data.frame(matrice[(i*nbrow+1):((i+1)*nbrow),1])
names(A)=names(matrice)[1]
B<-matrice[(i*nbrow+1):((i+1)*nbrow),(1+dim2%/%nbcol*nbcol):dim2+1]
if (is.null(dim(B))) B<-data.frame(B)
names(B)=names(matrice)[(1+dim2%/%nbcol*nbcol):dim2+1]
B<-cbind(A,B)
A.col<-data.frame(col.mat[(i*nbrow+1):((i+1)*nbrow),1])
B.col<-col.mat[(i*nbrow+1):((i+1)*nbrow),(1+dim2%/%nbcol*nbcol):dim2+1]
if (is.null(dim(B))) B.col<-data.frame(B.col)
B.col<-cbind(A.col,B.col)
if (size==0) {
pol <- police(matrice,nbrow,nbcol,nbdec)
size <- pol$size
def.par <- pol$def.par
} else{
dev.new()
def.par <- par(no.readonly = TRUE)
par(mar=c(0,0,2,0))
plot.new(); title(main=main.title);
}
fill(B,B.col,nbrow,nbcol,size,level.lower,col.lower,level.upper,col.upper,main.title=main.title,level.lower2,col.lower2,level.upper2,col.upper2)
par(def.par)
}
if ((dim1%/%nbrow)*nbrow != dim1){
A<-data.frame(matrice[(dim1%/%nbrow*nbrow+1):dim1,1]) #les individus qui manquent et les variables qui manquent
names(A)=names(matrice)[1]
B<-data.frame(matrice[(dim1%/%nbrow*nbrow+1):dim1,(1+dim2%/%nbcol*nbcol):dim2+1])
if (is.null(dim(B))) B<-data.frame(B)
names(B)=names(matrice)[(1+dim2%/%nbcol*nbcol):dim2+1]
B<-cbind(A,B)
A.col<-data.frame(col.mat[(dim1%/%nbrow*nbrow+1):dim1,1]) #les individus qui manquent et les variables qui manquent
B.col<-data.frame(col.mat[(dim1%/%nbrow*nbrow+1):dim1,(1+dim2%/%nbcol*nbcol):dim2+1])
if (is.null(dim(B))) B.col<-data.frame(B.col)
names(B.col)=names(matrice)[(1+dim2%/%nbcol*nbcol):dim2+1]
B.col<-cbind(A.col,B.col)
if (size==0) {
pol <- police(matrice,nbrow,nbcol,nbdec)
size <- pol$size
def.par <- pol$def.par
} else{
dev.new()
def.par <- par(no.readonly = TRUE)
par(mar=c(0,0,2,0))
plot.new(); title(main=main.title)
}
fill(B,B.col,nbrow,nbcol,size,level.lower,col.lower,level.upper,col.upper,main.title=main.title,level.lower2,col.lower2,level.upper2,col.upper2)
par(def.par)
}
}
par(def.par)
}
|
1de97b0cedcb75ff9a985f5fd6f4521f4ad32f08
|
359734ced390a49899f91dc6b1e7ac27d724f3da
|
/scripts/Mapping_example.R
|
c938ea9213fcffc793858874b9bdfc5ced6b68bf
|
[] |
no_license
|
QFCatMSU/R-Mapping-Material
|
5997f3c8a74167e76fc401cc18c01ac3ba2989a2
|
7579d56ac35e09e6a0feb9b6152e49a30c722bde
|
refs/heads/master
| 2022-04-19T11:20:24.548009
| 2020-04-21T18:43:25
| 2020-04-21T18:43:25
| 254,618,906
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,225
|
r
|
Mapping_example.R
|
{
# execute the lines of code from reference.r
source(file="scripts/reference.R");
# read in CSV files
data <- read.csv(file="data/TempLogger_Coordinates.csv")
}
# this code makes a map.... but mapview includes
# toggles, multiple layers, and clickable attribute tables,
# all of which leaflet can't do without additional code.
v1 = leaflet() %>%
addTiles() %>%
addMarkers(lng=data$Longitude, lat=data$Latitude)
print(v1)
## This map made with just leafly gives you a fine interactive map, but leaves much to be desired
# the mapview package can do more with less lines of code
print(mapview(data, xcol="Longitude", ycol="Latitude", crs=4269, grid=FALSE));
templog_location <- st_as_sf(data, coords=c("Longitude", "Latitude"), crs=4269)
print(mapview(templog_location));
# mapshot is how you make a static map! Not very intuitive coding for the function, however.
# Tutorial: https://r-spatial.github.io/mapview/reference/mapshot.html
# templog_map <- mapview(templog_location, map.types = "Esri.NatGeoWorldMap")
# mapshot(templog_map, file = paste0(getwd(), "/map.png"),
# remove_controls = c("zoomControl", "homeButton", "layersControl"))
# Play with different base layer map types
print(mapview(templog_location, map.types = "Stamen.Toner"))
print(mapview(templog_location, map.types = "Stamen.Terrain"))
print(mapview(templog_location, map.types = "Esri.NatGeoWorldMap"))
# or combine them all in a vector wrapper so we can toggle between them all on the same map
print(mapview(templog_location, map.types = c("Stamen.Toner", "Stamen.Terrain", "Esri.NatGeoWorldMap")))
## find different map types here:
### http://leaflet-extras.github.io/leaflet-providers/preview/
# map by variable with a third variable to color the points with zcol =
print(mapview(data, xcol="Longitude",
ycol="Latitude",
zcol = "Project",
crs=4269, grid=FALSE,
map.types = c("Stamen.Toner", "Stamen.Terrain", "Esri.NatGeoWorldMap")))
# Tutorial had a section on choropleths but it used census data that you needed an ID number for...
# You just have to apply for one and you'll get it, but I'm not so sure we want to use
# census data examples for our workshop...
|
8241a9481d8c76ad111a16321ca796e1bb3a5a69
|
960f4ee096f3179e51ea185d4eb9f4f48e5778f7
|
/Pokemon Final.R
|
77a2b52de72f7dbed66337dd71d777c65b2cbd89
|
[] |
no_license
|
tylerjnelson8/PokemonClassification_April2020
|
3adfa40a7e5858bb7a4b529342b11636c718ac5b
|
0d52af0db4dc8c402db6a9c1dd4b78ace850f626
|
refs/heads/master
| 2022-06-09T00:34:26.288159
| 2020-05-04T23:49:59
| 2020-05-04T23:49:59
| 261,076,762
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,123
|
r
|
Pokemon Final.R
|
#Using Multi-class and One-vs-Rest SVMs to Classify Pokemon by their Primary Type
#by: Tyler Nelson
rm(list = ls())
library(EBImage) #used for image processing
library(OpenImageR) #used for image processing
library(reshape2) #used for data cleaning
library(ggplot2) #used for data visualization
library(dplyr) #used for data cleaning
library(tidyverse) # data manipulation
library(cluster) # clustering algorithms
library(factoextra) # clustering algorithms & visualization
library(grid) #used for tuning SVM algorithm
library(gridExtra) #used for tuning
library(doBy) #used for data cleaning
library(imager) #core image processing functions
library(countcolors) #color counting functions
library(magick) #image processing functions
library(e1071) #Used for Principal Component Analysis
library(caTools) #other modeling functions tested
library(caret) #svm tuning functions
library(Morpho) #other image processing functions
#####
##Load Labels
setwd("H:/apps/xp/Desktop")
labels <- read_csv("pokemon.csv")
#Extract file names of .png Pokemon images
filenames <- list.files(path = "H:/apps/xp/Desktop/images/images/", pattern="*.png")
tib <- as_tibble(filenames)
#Load Images
setwd("H:/apps/xp/Desktop/images/images")
pics16 <- tib %>%
filter(str_detect(path_ext(value),
fixed("png", ignore_case = TRUE))) %>%
mutate(data = map(value, load.image))
#Clean up titles of names
pics16$value <- gsub("\\....","",pics16$value)#,0, str_locate("[.]"))
#Join file name with labels
pics16 <- left_join(pics16, labels, by=c("value"="Name"))
pics <- bind_rows(pics16)
#Scale images down to 60x60 images for speed purposes
pics$data<- lapply(pics$data, resize_halfXY)
#Augment data
pics_rot30 <- list()
pics_rotneg30 <- list()
pics_mirror <- list()
pics_flip <- list()
pics_shift10u <- list()
pics_shift10d <- list()
pics_shift10l <- list()
pics_shift10r <- list()
pics_rot30 <- lapply(pics$data, rotate_xy, angle= 30,cx=60,cy=60)
pics_rotneg30 <- lapply(pics$data, rotate_xy, angle= -30,cx=60,cy=60)
pics_mirror <- lapply(pics$data, imager::mirror, axis="y")
pics_flip <- lapply(pics$data, imager::mirror, axis="x")
pics_shift10u<- lapply(pics$data, imshift, delta_y=10)
pics_shift10d <- lapply(pics$data, imshift, delta_y=-10)
pics_shift10l <- lapply(pics$data, imshift, delta_x=-10)
pics_shift10r<- lapply(pics$data, imshift, delta_x=10)
#Merge augmented data with original data
data_aug <- list()
data_aug$data <- rbind(pics$data, pics_rot30, pics_rotneg30, pics_mirror, pics_flip, pics_shift10u, pics_shift10d,pics_shift10r, pics_shift10l)
#Save down image files as vectors
pics_vector <- list()
for(n in 1:length(data_aug$data)){
pics_vector[[n]]<-as.vector(data_aug$data[[n]])
}
#Turn vectors into a matrix
pics_mat <- do.call(rbind,pics_vector)
pics_mat <- rbind(pics_mat)
#Clean the names of the types to all be lowercase
pics_df <-data.frame(type = tolower(pics$Type1), pics_mat)
#Remove the "A" alpha layer of .png images, only keeping the RGB channels + type tag
pics_df <- pics_df[,1:(length(pics_df)*.75+1)]
#Run PCA on the small images, keeping the top 70 Principal Components (80% of the variance)
pca_small <- prcomp(pics_df[,2:length(pics_df)], center = T, scale = F, rank=70)
#Save down to save ~20 minutes in runtime, given PCA has an O(mp^2n+p^3) algorithmic complexity
#save(pca_small, file = "pca_small.RData")
#load('pca_small.RData')
summary(pca_small)
screeplot(pca_small)
data_reduced <- data.frame(type=pics_df$type, pca_small$x)
set.seed(804)
sample <- sample.split(data_reduced$type, SplitRatio = .8)
train <- subset(data_reduced, sample==T)
test <- subset(data_reduced, sample == F)
##Tune multiclass model with 10-fold cross validation across numerous gamma & cost variables
tuned_parameters <- tune.svm(type~., data = train, gamma = c(.05,1), cost = c(.1,.5,1))
summary(tuned_parameters)
model_multiclass <- svm(formula = type ~., data=train, gamma=2, cost=0.01)
plot(model_multiclass, data=train, PC1 ~ PC2)
#How'd we do on multi-class SVM?
#Train - 100%
confusionMatrix(train$type, predict(model_multiclass, newdata=train))
#Test - 14.5% (just guessing Water)
confusionMatrix(test$type, predict(model_multiclass, newdata=test))
model_svm <- list()
#load("model_svm.Rdata")
train_f <- list()
test_f <- list()
res_svm <- list()
res_svm_train <-list()
res_svm_test <- list()
#Calculate all 18 One vs Rest SVMs, by one-hot encoding each type
for( t in levels(pics_df$type)){
train_f[[t]] <- train
train_f[[t]]$type <- as.numeric(train$type==t)
test_f[[t]] <- test
test_f[[t]]$type <- as.numeric(test$type==t)
model_svm[[t]] <- best.svm(formula = factor(type) ~., data =train_f[[t]],gamma=c(0.05,0.2,0.5,1,1.5,2), cost=c(0.01,1,4,10,20,30,60), probability=T)
res_svm_train[[t]] <- predict(model_svm[[t]], newdata = train_f[[t]], type="class", probability=T)
res_svm_test[[t]] <- predict(model_svm[[t]], newdata = test_f[[t]], type="class", probability =T)
}
#save(model_svm, file = "model_svm.RData")
#Plot "water" one-vs-rest SVM
plot(model, data=train_f[['water']], PC11 ~ PC12)
#Training Data
tst <- as.data.frame(res_svm_train)
#tst_scale <- scale(tst)
tst2 <- colnames(tst)[apply(tst, 1,which.max)]
#How'd we do on the training data?
confusionMatrix(train$type, factor(tst2, levels=unique(train$type)))
#Test Data
tst <- as.data.frame(res_svm_test)
#tst_scale <- scale(tst)
tst2 <- colnames(tst)[apply(tst, 1,which.max)]
#How'd we do on the test data?
confusionMatrix(test$type, factor(tst2, levels=unique(train$type)))
#Visualize misclassified test images
incorrect <- which(test$type != factor(tst2))
test_im <- subset(pics_df, sample==F)[incorrect,]
dev.off()
par(mar=c(.1,.1,.1,.1))
layout(matrix(1:35,nr=5),1,1)
for(i in 1:35){
test_im[i,2:10801]%>%
as.numeric()%>%
array(c(60,60,1,3))%>%
as.cimg()%>%
plot(axes=F, main=paste("","",test$type[incorrect[i]], factor(tst2)[incorrect[i]], sep="\n"))
}
|
90c7b01bb0c3f4768280c5def6b683aa7bdbecbb
|
4fcf313905a8be596449cf2d78a8f2f35abdc2ae
|
/tests/generateRd.R
|
32f57e0c2bfa2c5d83de04c73867db25f5ae9290
|
[] |
no_license
|
wangdi2014/gfplots
|
0ceb7dbc0c9fcaf3d31344c2b6ee03cf5eb4fa21
|
2e9bbe64f6bf4b743a4387c326c00e7e08d53998
|
refs/heads/master
| 2020-06-07T05:02:38.624900
| 2018-06-14T14:00:24
| 2018-06-14T14:00:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33
|
r
|
generateRd.R
|
library(roxygen2)
roxygenise()
|
24bede4045c6f0de67d0ac2ae782002bf14b0253
|
b15b9944047fc333f2068d0883d511d295da7ad9
|
/R/methods.R
|
f4274709d440098675a7fcd6fd8d6902e6dc7656
|
[] |
no_license
|
cran/outForest
|
12b9905b9692de1ea9669f153f4d630b7cc225e8
|
8dd25dbef51a4549782cea357e0776ea9e64d7c1
|
refs/heads/master
| 2023-05-25T16:11:19.136495
| 2023-05-21T17:50:02
| 2023-05-21T17:50:02
| 236,635,053
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,050
|
r
|
methods.R
|
#' Prints outForest
#'
#' Print method for an object of class "outForest".
#'
#' @param x A on object of class "outForest".
#' @param ... Further arguments passed from other methods.
#' @returns Invisibly, the input is returned.
#' @export
#' @examples
#' x <- outForest(iris)
#' x
print.outForest <- function(x, ...) {
cat("I am an object of class(es)", paste(class(x), collapse = " and "), "\n\n")
cat("The following number of outliers have been identified:\n\n")
print(cbind(`Number of outliers` = x$n_outliers))
invisible(x)
}
#' Summarizes outForest
#'
#' Summary method for an object of class "outForest".
#' Besides the number of outliers per variables, it also shows the worst outliers.
#'
#' @param object A on object of class "outForest".
#' @param ... Further arguments passed from other methods.
#' @returns A list of summary statistics.
#' @export
#' @examples
#' out <- outForest(iris, seed = 34, verbose = 0)
#' summary(out)
summary.outForest <- function(object, ...) {
if (nrow(outliers(object)) == 0L) {
cat("Congratulations, no outliers found.")
} else {
cat("The following outlier counts have been detected:\n\n")
print(cbind(`Number of outliers` = object$n_outliers))
cat("\nThese are the worst outliers:\n\n")
print(utils::head(outliers(object)))
}
}
#' Plots outForest
#'
#' This function can plot aspects of an "outForest" object.
#' - With `what = "counts"`, the number of outliers per variable is visualized as a
#' barplot.
#' - With `what = "scores"`, outlier scores (i.e., the scaled difference between
#' predicted and observed value) are shown as scatterplot per variable.
#'
#' @param x An object of class "outForest".
#' @param what What should be plotted? Either `"counts"` (the default) or `"scores"`.
#' @param ... Arguments passed to [graphics::barplot()] or [graphics::stripchart()].
#' @returns A list.
#' @export
#' @examples
#' irisWithOutliers <- generateOutliers(iris, seed = 345)
#' x <- outForest(irisWithOutliers, verbose = 0)
#' plot(x)
#' plot(x, what = "scores")
plot.outForest <- function(x, what = c("counts", "scores"), ...) {
what <- match.arg(what)
if (what == "counts") {
yy <- graphics::barplot(
x$n_outliers,
horiz = TRUE,
yaxt = "n",
main = "Number of outliers per variable",
xlab = "Count",
...
)
graphics::text(0.1, yy, names(x$n_outliers), adj = 0)
} else {
if (nrow(outliers(x)) == 0L) {
stop("No outlier to plot")
}
graphics::stripchart(
score ~ col,
data = outliers(x),
vertical = TRUE,
pch = 4,
las = 2,
cex.axis = 0.7,
...
)
graphics::abline(h = c(-1, 1) * outliers(x)$threshold[1], lty = 2)
}
}
#' Type Check
#'
#' Checks if an object inherits class "outForest".
#'
#' @param x Any object.
#' @returns A logical vector of length one.
#' @export
#' @examples
#' a <- outForest(iris)
#' is.outForest(a)
#' is.outForest("a")
is.outForest <- function(x) {
inherits(x, "outForest")
}
#' Extracts Data
#'
#' Extracts data with optionally replaced outliers from object of class "outForest".
#'
#' @param object An object of class "outForest".
#' @param ... Arguments passed from or to other methods.
#' @returns A `data.frame`.
#' @export
#' @examples
#' x <- outForest(iris)
#' head(Data(x))
Data <- function(object, ...) {
UseMethod("Data")
}
#' @describeIn Data Default method not implemented yet.
#' @export
Data.default <- function(object, ...) {
stop("No default method available yet.")
}
#' @describeIn Data Extract data from "outForest" object.
#' @export
Data.outForest <- function(object, ...) {
object$Data
}
#' Extracts Outliers
#'
#' Extracts outliers from object of class "outForest".
#' The outliers are sorted by their absolute score in descending fashion.
#'
#' @param object An object of class "outForest".
#' @param ... Arguments passed from or to other methods.
#' @returns
#' A `data.frame` with one row per outlier. The columns are as follows:
#' - `row`, `col`: Row and column in original data with outlier.
#' - `observed`: Observed value.
#' - `predicted`: Predicted value.
#' - `rmse`: Scaling factor used to normalize the difference between observed
#' and predicted.
#' - `score`: Outlier score defined as (observed-predicted)/RMSE.
#' - `threshold`: Threshold above which an outlier score counts as outlier.
#' - `replacement`: Value used to replace observed value.
#' @export
#' @examples
#' x <- outForest(iris)
#' outliers(x)
outliers <- function(object, ...) {
UseMethod("outliers")
}
#' @describeIn outliers Default method not implemented yet.
#' @export
outliers.default <- function(object, ...) {
stop("No default method available yet.")
}
#' @describeIn outliers Extract outliers from outForest object.
#' @export
outliers.outForest <- function(object, ...) {
object$outliers
}
|
349b0f98ae0c7617226f1636799a080b6e735054
|
ea9fbb9669d73bb53b944b33914c1ddbbe1e7cb3
|
/script_diversity_indices.R
|
7a16b3f7fc3a2dd34ff9382d2c1dbf647c7b7c74
|
[] |
no_license
|
mizubuti/R_code
|
33ebdbbc85e2f282cee2102e2568928f8087652c
|
05ad1e5f7124d097ac6969e768bd8c55a0d562d3
|
refs/heads/master
| 2021-01-02T09:08:03.058788
| 2016-12-30T16:52:55
| 2016-12-30T16:52:55
| 34,257,584
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,434
|
r
|
script_diversity_indices.R
|
# ==========================================================
# scripts for diversity indices
# ======================================= E. M. 17/09/2016 =
# this script uses "vegan" and "vegetarian" package
# organizacao dos dados
# prepare o conjunto de dados com cada populacao em uma linha.
# nas colunas insira as frequencias dos genotipos/fenotipos.
# exemplo: 3 1 18 3 5 1
library(vegan)
library(vegetarian)
dados1 <- read.table("/home/mizubuti/arquivos/R_code/bla_bla_bla.txt")
# Shannon-Wiener / Simpson (Gini-Simpson) / Stoddart & Taylor
# ============ indice de Shannon-Wiener
H(dados1, lev="alpha", q = 1, boot=TRUE, boot.arg=list(num.iter=1000))
# ============ indice de Simpson
H(dados1, lev="alpha", q = 2, boot=TRUE, boot.arg=list(num.iter=1000))
# ============ indice de Gini-Simpson
H(dados1, lev="alpha", q = 2, gini=TRUE, boot=TRUE, boot.arg=list(num.iter=1000))
# ============ indice G de Stoddart & Taylor = Effective number based on Simpson
d(dados1, lev="alpha", q = 2, boot=TRUE, boot.arg=list(num.iter=1000))
# ’Numbers Equivalents’ for Alpha, Beta and Gamma Diversity Indices
#============= N1 de Hill --> Effective number based on Shannon
d(dados1, lev="alpha", q = 1, boot=TRUE, boot.arg=list(num.iter=1000))
#============= N2 de Hill --> Effective number based on Simpson = Stoddart & Taylor
d(dados1, lev="alpha", q = 2, boot=TRUE, boot.arg=list(num.iter=1000))
|
b27a1e50a15009c7e01ff9cac41cd3c775e6882b
|
4974a00cd842967834be1c62b85c1dbe08b788fd
|
/man/getParameterSet.Rd
|
740971cb3c32d32a95e28183e252417e38a1b62f
|
[] |
no_license
|
cran/plethem
|
afcdc38f32a0bfc4f55eac7b8f4a73b261bfb58e
|
fbbd513ab824c0d378b130a25970dcdfca2dfd9a
|
refs/heads/master
| 2021-06-27T00:54:23.595226
| 2020-11-04T14:50:07
| 2020-11-04T14:50:07
| 163,861,169
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 440
|
rd
|
getParameterSet.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pbpkUtils.R
\name{getParameterSet}
\alias{getParameterSet}
\title{Get the values for parameters in a given set}
\usage{
getParameterSet(set_type = "physio", id = 1)
}
\arguments{
\item{set_type}{Either "physio","chem"or "expo"}
\item{id}{integer id for the required set}
}
\description{
Get all the parameter values for a given dataset and id
}
|
a8fd123026a5a1af030a026a8c0ba59924f1a860
|
076291ef89acc7c93bd777dfe280fbe9d401174c
|
/switching_ise.R
|
c13a9b79a932855c21353cb42d4c0d900e8c47d3
|
[] |
no_license
|
vkatkade/R-case
|
c87f8f7d1c0bbd23cc0e7a74cc608af1d783725c
|
fc47ffb04fb744cea404b00dd80e5dbc289d2b9b
|
refs/heads/master
| 2021-01-17T11:57:49.453071
| 2013-09-01T05:39:37
| 2013-09-01T05:39:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,942
|
r
|
switching_ise.R
|
# Script to determine common customers and their respective bookings amongst Switching and ISE customers
# Copyright (c) Vaibhav Katkade - August 2013
setwd("/Users/vkatkade/Desktop")
library(plyr)
cise <- read.csv("CISE.csv")
c6k <- read.csv("fyc6k.csv")
# Remove all the adjustments and disti stock which have negative custID
cise_filt <- subset(cise, cise$End.Customer.Global.Ultimate.Company.Target.ID>0)
c6k_filt <- subset(c6k, c6k$End.Customer.Global.Ultimate.Company.Target.ID>0)
# Filter on ProductID
c6k_filt_s2t <- subset(c6k_filt, grepl("S2T", c6k_filt$Product.ID))
rm(cise)
rm(c6k)
rm(c6k_filt)
# Order by CustID
c6k_ord<-c6k_filt_s2t[with(c6k_filt_s2t, order(End.Customer.Global.Ultimate.Company.Target.ID)),]
cise_ord<-cise_filt[with(cise_filt, order(End.Customer.Global.Ultimate.Company.Target.ID)),]
rm(c6k_filt_s2t)
rm(cise_filt)
# Take subset of interesting data
c6kbook <- data.frame(c6k_ord$End.Customer.Global.Ultimate.Company.Target.ID, c6k_ord$End.Customer.Global.Ultimate.Name, c6k_ord$Cisco.Bookings.Net, c6k_ord$Cisco.Bookings.Quantity)
colnames(c6kbook) <- c("CustID", "CustName", "Bookings", "Quantity")
cisebook <- data.frame(cise_ord$End.Customer.Global.Ultimate.Company.Target.ID, cise_ord$End.Customer.Global.Ultimate.Name, cise_ord$Product.Bookings.Net, cise_ord$Product.Bookings.Quantity)
colnames(cisebook) <- c("CustID", "CustName", "Bookings", "Quantity")
# Aggregate the orders by CustomerID/CustomerName
c6kagg<-ddply(c6kbook, .(CustID, CustName), summarize, Bookings=sum(Bookings), Quantity=sum(Quantity))
ciseagg<-ddply(cisebook, .(CustID, CustName), summarize, Bookings=sum(Bookings), Quantity=sum(Quantity))
# Merge the datasets by CustID
switchise<-merge(ciseagg, c6kagg, by.x="CustID", by.y="CustID")
switchise <- data.frame(switchise$CustID, switchise$CustName.x, switchise$Bookings.x, switchise$Bookings.y)
colnames(switchise) <- c("CustID", "CustName", "ISEBookings", "SupBookings")
|
ee9f3ba40d089e2b1928d273408c497a889591da
|
b545da725d70f13c023f8b34660b46536a27288d
|
/older_files/future_processing.R
|
9a5d70a871d78710d2e42c4b6c0f2a1b6767f321
|
[] |
no_license
|
baeolophus/ou-grassland-bird-survey
|
b64849e5b7c3bf63e1854da5454d3295a4df0709
|
5aa9523a3d09d107a7d92140de7fa8ec62fe411a
|
refs/heads/master
| 2020-04-13T05:23:20.801879
| 2019-05-13T18:59:49
| 2019-05-13T18:59:49
| 68,133,712
| 0
| 1
| null | 2016-09-29T21:01:03
| 2016-09-13T18:01:07
|
R
|
UTF-8
|
R
| false
| false
| 2,284
|
r
|
future_processing.R
|
##################################
#process the future bioclim layers the same way as bioclim (though use the ok_mask_resample to make sure I don't need to crop again later)
#import bioclim layers
setwd("/data/grassland_ensemble")
library(raster)
library(rgdal)
#create temporary raster files on large drive because they occupy 10-30 GB
rasterOptions()$tmpdir
rasterOptions(tmpdir=paste0(getwd(),
"/rastertemp"))
future_list <- list.files(path = file.path(getwd(),
"bc45z"),
pattern = "tif$",
full.names = TRUE)
for(i in future_list) { assign(unlist(strsplit(i,
"[./]"))[5], #splits filenames at / and and . to eliminate folder name and file type.
raster(i)) }
future <- as.list(ls()[sapply(ls(), function(x) class(get(x))) == 'RasterLayer'])
future_stack <- stack (lapply(future, get))
crs(future_stack) <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0" #http://www.worldclim.org/format
studyarea.extent.latlong<-extent(-103,-94,
33,38) # define the extent for latlong to get a smaller file
studyarea.future_stack<-crop(future_stack,
studyarea.extent.latlong)
#show they are in different CRS with different extents
okcensus <- raster("sources/census_utm_30m.tif")
extent(okcensus)
extent(studyarea.future_stack)
#project to the smaller extent and the crs of popdensity_census_raster (Which was made with NLCD)
utm.future <- projectRaster(from = studyarea.future_stack,
to = okcensus)
#Now upload and clip/mask them to ok state polygon.
state<-readOGR(dsn=getwd(),
layer="ok_state_vector_smallest_pdf_3158")
state<-spTransform(x = state,
CRS(as.character("+proj=utm +zone=14 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"))
)
future_OK <- mask(utm.future,
state)
#write the new file to smaller files that I can import later without re-processing
writeRaster(future_OK,
filename = names(future_OK),
format = "GTiff",
bylayer = TRUE,
overwrite = TRUE)
|
935204ee5966f11aec428d39e04aae849c0aa4a0
|
b061608d5d95a8b1c45a0f147db64fff399b9bb9
|
/Generate OD Matrix/Generate-OD-Matrix-undirected.R
|
a8a087cfe74db2246c696fcf7e95e3ed02ed45d3
|
[] |
no_license
|
ecoinformaticalab/city-networks
|
30556e9f28c6db06a8b7028de0347500677dda72
|
6063a281784ac0d605e93ebf51003199d197741b
|
refs/heads/master
| 2020-04-24T04:08:33.677690
| 2019-02-20T15:07:59
| 2019-02-20T15:07:59
| 171,692,867
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,101
|
r
|
Generate-OD-Matrix-undirected.R
|
#Generar matriz HW
library(dplyr)
library(rstudioapi)
#Firstly we obtain the path where "Generate-OD-matrix-undirected.R" is located
path<-print(rstudioapi::getActiveDocumentContext()$path)
folder<-gsub("Generate-OD-Matrix-undirected.R","",path)
#The file where the trajectories list is allocated is called "list.csv"
#it has two columns, first called "Origin" and has the origin labels and the second called "Destination" with destination labels.
read_file<-paste(folder,"list.csv",sep="")
HW <-read.csv(read_file,header=TRUE)
c1<-data.frame(HW[,1])
c2<-data.frame(HW[,2])
colnames(c1)<-c("labels")
colnames(c2)<-c("labels")
names(c1)
a<-bind_rows(c1,c2)
aa<-data.frame(a[,1])
Nodes<-unique(aa)
colnames(Nodes)<-c("labels")
M<-matrix(0, nrow = length(Nodes[,1]), ncol = length(Nodes[,1]))
colnames(M)<-Nodes[,"labels"]
rownames(M)<-Nodes[,"labels"]
for (i in 1:length(HW[,1])) {
fila<-as.character(HW[i,"Origin"])
columna<-as.character(HW[i,"Destination"])
M[fila,columna]=M[fila,columna]+1
M[columna,fila]=M[columna,fila]+1
}
write.csv(M, file = paste(folder,"OD_Matrix_undirected.csv"))
|
196991433242b8d11ce6e261fe3b5a5623d0db3f
|
d3ed30ed9dc4d9f7b6f1184c81bac390927037ff
|
/sinha2017/brc_post_proc.r
|
5eb7e94b985c5e2d0d618eb1cb7cbd1a518d1a44
|
[] |
no_license
|
mikemc/mc_datasets_backup
|
3af6bc916ba8a682c730e38eb5a4aca9248d416a
|
50fc997d8bd8737ea9d87fad19222b22746a0d8a
|
refs/heads/master
| 2020-03-20T10:06:02.493285
| 2018-10-06T21:03:29
| 2018-10-06T21:03:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,188
|
r
|
brc_post_proc.r
|
# Load packages
library(phyloseq)
library(dada2); packageVersion("dada2")
## Paths
# Path for raw sequencing data and pipeline output
data.path <- "~/active_research/metagenomics_calibration/mbqc/data"
# Path for silva training data
silva.path <- '~/data/silva/dada2_format'
#### Get the sample data for incorporation into a phyloseq object
# Will include samples from all labs except HL-A. Samples from other labs not
# being used will be filtered automatically when merging into a phyloseq object
sd.all <- readRDS(file.path(data.path, 'mbqc_sample_data.rds'))
## Determine which of the original sample data fields to keep
# Helper functions for classifying sample variables
is.drylab.var <- function (field) {
a = table(sd.all[, c('dry_lab', field)], useNA='no')
all(rowSums(a>0) <= 1)
}
is.extlab.var <- function (field) {
a = table(sd.all[, c('extraction_wetlab', field)], useNA='no')
all(rowSums(a>0) <= 1)
}
is.seqlab.var <- function (field) {
a = table(sd.all[, c('sequencing_wetlab', field)], useNA='no')
all(rowSums(a>0) <= 1)
}
# Classify variables as dry lab, sequencing, or extraction. Seq and Ext may
# overlap b/c of the crude classification and fact that there is usually only
# local and central extraction. We also miss some extraction vars like kit
# maker and model
fields <- colnames(sd.all)
drylab.vars <- fields[sapply(fields, is.drylab.var)]
seqlab.vars <- fields[sapply(fields, is.seqlab.var)]
extlab.vars <- fields[sapply(fields, is.extlab.var)]
wetlab.vars <- union(seqlab.vars, extlab.vars)
# Make sure dry and wetlab vars don't overlap
intersect(drylab.vars, wetlab.vars)
# See what vars we didn't classify, and which ones we should get rid of
setdiff(fields, union(drylab.vars, wetlab.vars))
# Some of these are measures of diversity in the final processed community data
diversity.vars <- c("observed_species", "simpson_reciprocal", "chao1", "PD_whole_tree")
# Keep all except the drylab and diversity variables
final.vars <- setdiff(fields, union(drylab.vars, diversity.vars))
length(final.vars) # 49 variables remaining
## Build new sample data table
# Restrict to relevant vars and get rid of duplicate rows
sd1 <- unique(sd.all[,final.vars])
# Check that each Bioinformatics.ID now appears exactly oncd
a <- table(sd1$Bioinformatics.ID, useNA='ifany')
# 36 IDs appear twice
a[a>1]
# I think these are the samples that were extracted in lab A (or assigned to A
# but centrally extracted) and sequenced in both A and E. Let's check
# TODO: Ask MBQC about why these samples have duplicated Bioinf IDS
problem.ids <- names(a[a>1])
subset(sd1, Bioinformatics.ID %in% problem.ids,
select=c(extraction_wetlab, sequencing_wetlab, blinded_lab))
# That seems to be the case. So we should be ok if we get rid of all the
# samples sequenced in A
sd2 <- subset_samples(sd1, sequencing_wetlab != 'HL-A')
a2 <- table(sd2$Bioinformatics.ID, useNA='ifany')
all(a2==1) # TRUE
# Pad the IDs to 10 characters and set as sample names
target.length <- 10
sd2$Bioinformatics.ID <- stringr::str_pad(sd2$Bioinformatics.ID,
target.length, side='left',
pad='0')
sample_names(sd2) <- sd2$Bioinformatics.ID
sampledata <- sd2
remove(sd.all, sd1, sd2)
saveRDS(sampledata, file.path(data.path, 'brc_dada_out', "sample_data.rds"))
#### Build sequence table for all specimens
## Build sequence table with chimeras present
labs <- c('B', 'C', 'E', 'F', 'H', 'J', 'K', 'N')
seqtab.paths <- file.path(data.path, 'brc_dada_out', paste0('seqtab_', labs, '.rds'))
seqtabs <- lapply(seqtab.paths, readRDS)
st.all <- do.call(mergeSequenceTables, seqtabs)
saveRDS(st.all, file.path(data.path, 'brc_dada_out', "seqtab_all.rds"))
print(paste(sum(st.all), 'reads across', nrow(st.all), 'samples and', ncol(st.all), 'ASVs'))
# [1] "77705284 reads across 1796 samples and 196025 ASVs"
## Remove chimeras
st <- removeBimeraDenovo(st.all, multithread=TRUE, verbose=TRUE)
saveRDS(st, file.path(data.path, 'brc_dada_out', "seqtab_all_nochim.rds"))
# st <- readRDS(file.path(data.path, 'brc_dada_out', "seqtab_all_nochim.rds"))
print(paste(sum(st), 'reads across', nrow(st), 'samples and', ncol(st), 'ASVs'))
# [1] "68881850 reads across 1796 samples and 31311 ASVs"
remove(seqtabs, st.all)
#### Filter samples and SVs
# Doing this now will speed up taxonomy assignment, since many ASVs have very
# low prevalence and/or total abundance. So let's just do something somewhat
# arbitrary but probably safe to reduce the number of ASVs
asv.prev <- colSums(st>0)
# Over half of ASVs have prevalance = 1. Filtering to prevalence >= 20 reduces
# the number of SVs from ~30,000 to ~5,000
qplot(seq(30), sapply(seq(30), function(min.prev) sum(asv.prev>=min.prev)),
xlab="Minimum Prevalence", ylab="Number of ASVs")
# qplot(seq(20), sapply(seq(20), function(min.prev) sum(st[,asv.prev>=min.prev])),
# xlab="Minimum Prevalence", ylab="Total Reads")
# Filtering to prev >= 20 keeps ~99% of reads
sum(st[,asv.prev>=20]) / sum(st) # 0.9892816
st.filt <- st[,asv.prev>20]
saveRDS(st.filt, file.path(data.path, 'brc_dada_out', "seqtab_filt.rds"))
#### Assign taxonomy
system.time(tax <- assignTaxonomy(st.filt, file.path(silva.path,
"silva_nr_v128_train_set.fa.gz"), multithread=TRUE))
# Took 13 minutes to assign tax for ~5000 ASVs
saveRDS(tax, file.path(data.path, 'brc_dada_out', "taxonomy_filt.rds"))
# Assign species (this just takes one to a few minutes)
spec <- assignSpecies(st.filt, file.path(silva.path,
"silva_species_assignment_v128.fa.gz"), allowMultiple=TRUE)
saveRDS(spec, file.path(data.path, 'brc_dada_out', "species_filt.rds"))
## Also do for the full set of ASVs
tax <- assignTaxonomy(st, file.path(silva.path,
"silva_nr_v128_train_set.fa.gz"), multithread=TRUE)
saveRDS(tax, file.path(data.path, 'brc_dada_out', "taxonomy.rds"))
# species assignment has to be run on the cluster: On my pc, fails with "Error:
# cannot allocate vector of size 17.1 Gb".
spec <- assignSpecies(st, file.path(silva.path,
"silva_species_assignment_v128.fa.gz"), allowMultiple=TRUE)
saveRDS(spec, file.path(data.path, 'brc_dada_out', "species.rds"))
|
e57992569ab2f1087bea9ade4c80600d0013862f
|
e784dc9d52588bc6c00fa18fab014f6cf3fe73b7
|
/R-Finance-Programming/ch03_graph/30_substitute.R
|
072e194ff5b6505eaf102bff80765d27080f966e
|
[] |
no_license
|
Fintecuriosity11/Finance
|
3e073e4719d63f741e9b71d29a97598fa73d565d
|
b80879ece1408d239991d1bb13306cc91de53368
|
refs/heads/master
| 2021-02-20T12:59:51.559033
| 2020-08-08T16:21:49
| 2020-08-08T16:21:49
| 245,337,007
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,339
|
r
|
30_substitute.R
|
##########################################################################################################################################
#(주의) -> 순차적으로 코드를 실행하는 것을 권함!
#에러 발생 시 github Finance/R-Finance-Programming 경로에 issue를 남기면 확인
##########################################################################################################################################
### 그래프에 그리스 문자 입력하기
## substitute함수를 이용해서 그리스 문자를 입력.
x<-seq(-1,50,0.01)
y<-sin(x)*x
plot(x,y,main = substitute(y==Psi*z-sum(beta^gamma)),type='l')
text(0,10, substitute(Delta[k]==1))
text(30,40,substitute(Delta[k]==epsilon))
graphics.off() # 그래프를 지워주는 함수.
############################################################결과값(print)#################################################################
# # # > x<-seq(-1,50,0.01)
# > y<-sin(x)*x
# > plot(x,y,main = substitute(y==Psi*z-sum(beta^gamma)),type='l')
# > text(0,10, substitute(Delta[k]==1))
# > text(30,40,substitute(Delta[k]==epsilon))
##########################################################################################################################################
|
cfe2b9e8206e1bca87f75bc8a69c6ee64d71f5c8
|
cf0c1117b47a005f91a1533046117e6ef69b9914
|
/Sentiment Analysis.R
|
bb2e60fd2ffafd29fd9a5d94ed729bc6bb9a1e64
|
[] |
no_license
|
RCrvro/Social-Media-Analytics-Project
|
5bc46326f75feef8c76ba2dba1038561ad71028b
|
19d9ecff16007a42090ad15965eba42f027aa0d5
|
refs/heads/master
| 2022-11-08T06:55:17.622093
| 2020-06-22T17:58:44
| 2020-06-22T17:58:44
| 269,715,767
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 863
|
r
|
Sentiment Analysis.R
|
##Lexicon-based Sentiment Analysis
#Removed duplicates for each community
#Lemmatization with POS tagging
library(tidytext)
library(syuzhet)
library(dplyr)
library(ggplot2)
library(fmsb)
#Load database
db <- read.csv("/Users/riccardocervero/Desktop/db.csv")
#Analyse total database
result <- get_nrc_sentiment(as.character(db$text))
#Count emotion and polarity
new_result <- data.frame(colSums(result))
names(new_result)[1] <- "count"
new_result <- cbind("sentiment" = rownames(new_result), new_result)
rownames(new_result) <- NULL
new_result
#Plot barcharts
qplot(sentiment, data=new_result[1:8,], weight=count, geom="bar",fill=sentiment)+ggtitle("QAnon tweets - Emotions")
qplot(sentiment, data=new_result[9:10,], weight=count, geom="bar",fill=sentiment)+ggtitle("QAnon tweets - Polarity")
write.csv(result,"/Users/riccardocervero/Desktop/EmotionTab.csv")
|
9d2f7992eb31579f90061f9846320a068a694e7f
|
beb174618e3ba35aab378218151077b32dcb624b
|
/cachematrix.R
|
7badcaf99eff5dac0194df5b0434e87aea8cc411
|
[] |
no_license
|
mattiasherrera/ProgrammingAssignment2
|
d735384e7df4ec7bddad6c5be5de447aa3e9ba01
|
808d7c9853109515cc92942d73c03dce2c458e8b
|
refs/heads/master
| 2021-01-17T08:50:57.795902
| 2015-06-20T05:09:47
| 2015-06-20T05:09:47
| 37,702,365
| 0
| 0
| null | 2015-06-19T04:56:09
| 2015-06-19T04:56:07
| null |
UTF-8
|
R
| false
| false
| 2,267
|
r
|
cachematrix.R
|
##This a function that creates a matrix, sets a matrix, gets the inverse or sets
##the inverse
makeCacheMatrix <- function(x = matrix()) {
##Initialize the Inverse of the matrix inverse_x to NULL
inverse_x <<- NULL
##function to set a new matrix y
setmatrix <- function(y){
##As a new matrix is created (y), reassigns x to the new matrix y
## using <<- to ensure the variable transcends the function environment
##and can be used outside (other functions)
x <<- y
##Asa new matrix y is introduced, we need to blank out the pre-calulated
##if we alredy calculated one
inverse_x <<-NULL
}
##function to store (get) the value of a new matrix x
getmatrix<- function()x
##function to set the inverse (not calculate), just get's an argument and passes it
##to the variable inverse_x (assigned using<<-)
setinverse <-function(inverse) inverse_x <<- inverse
##function to get the inverse inverse_x (not calculate)
getinverse <- function() inverse_x
##now we need to create the vector (length 4) containing all the functions, which
##is ultimately, what the makeCacheMatrix function returns
list(setmatrix=setmatrix,getmatrix=getmatrix,
setinverse=setinverse,getinverse=getinverse)
}
## This function calculates or rerieves the inverse of the matrix we pass on the makematrix
##function
cacheSolve <- function(x, ...) {
#First we pull the mean we already have calculated
inverse_x <- x$getinverse()
##if it's null because we have not calculated it or a new matrix has been
##entered
if(!is.null(inverse_x)){
message("getting cached inverse")
##return the cached inverse
return(inverse_x)
}
##If no inverse has been cached, let's calculate it
matrix<-x$getmatrix() ##get matrix we entered
inverse_x<-solve(matrix) ##calculate the inverse using solve()
x$setinverse(inverse_x) ##set the inverse
inverse_x ##return the inverse
}
|
54d40824a55ff61a850c0b67438f44768e7f0d22
|
f1971a5cbf1829ce6fab9f5144db008d8d9a23e1
|
/packrat/lib/x86_64-pc-linux-gnu/3.2.5/pool/tests/testthat/test-release.R
|
abbf37a0e55389fc00890e823cafb46471620fcc
|
[] |
no_license
|
harryprince/seamonster
|
cc334c87fda44d1c87a0436139d34dab310acec6
|
ddfd738999cd302c71a11aad20b3af2f4538624f
|
refs/heads/master
| 2021-01-12T03:44:33.452985
| 2016-12-22T19:17:01
| 2016-12-22T19:17:01
| 78,260,652
| 1
| 0
| null | 2017-01-07T05:30:42
| 2017-01-07T05:30:42
| null |
UTF-8
|
R
| false
| false
| 1,336
|
r
|
test-release.R
|
source("utils.R")
context("Pool's release method")
describe("release", {
pool <- poolCreate(MockPooledObj$new,
minSize = 1, maxSize = 3, idleTimeout = 1000)
it("throws if object was already released", {
checkCounts(pool, free = 1, taken = 0)
obj <- poolCheckout(pool)
poolReturn(obj)
expect_error(poolReturn(obj),
"This object was already returned to the pool.")
checkCounts(pool, free = 1, taken = 0)
})
it("throws if object is not valid", {
obj <- "a"
expect_error(poolReturn(obj), "Invalid object.")
})
it("warns if onPassivate fails", {
checkCounts(pool, free = 1, taken = 0)
obj <- poolCheckout(pool)
failOnPassivate <<- TRUE
expect_error(poolReturn(obj),
"Object could not be returned back to the pool. ",
"It was destroyed instead.")
failOnPassivate <<- FALSE
checkCounts(pool, free = 0, taken = 0)
})
it("is allowed after the pool is closed", {
checkCounts(pool, free = 0, taken = 0)
obj <- poolCheckout(pool)
checkCounts(pool, free = 0, taken = 1)
expect_warning(poolClose(pool),
"You still have checked out objects.")
checkCounts(pool, free = 0, taken = 1)
poolReturn(obj)
checkCounts(pool, free = 0, taken = 0)
expect_error(poolClose(pool),
"The pool was already closed.")
})
})
|
e8ba8f96e53fca6dda451cb9d1226affc49502b0
|
a64f5b231c65e042ae359ea3088e130bfae5dae8
|
/preregCreator.R
|
812d83b6a0bf6d1f2d8f58923f943e847bd8efa0
|
[
"MIT"
] |
permissive
|
johalgermissen/mapMEEG
|
5d378ed0a9a131adb6592839f9b7b37c630d0cfb
|
fdd034c8b099dd728dae66352ae3b2055edea670
|
refs/heads/master
| 2022-11-07T10:31:23.919454
| 2020-06-19T11:03:53
| 2020-06-19T11:03:53
| 270,663,362
| 1
| 4
|
MIT
| 2020-06-20T07:18:53
| 2020-06-08T12:38:34
|
R
|
UTF-8
|
R
| false
| false
| 1,521
|
r
|
preregCreator.R
|
library(shiny)
library(glue)
source("templates.R")
ui<- fluidPage(
fluidRow(
column(12,
div(id = "header", align = "center",
h1(icon("brain"), "M/EEG pre-registration template creator", icon("brain")),
p("__________________________________"),
p("This app helps you create more structured OSG template for M/EEG pre-registration.")
),
column(5,
div(id = "header",
h2("Fill this form:"),
selectInput("modality", "MODALITY", c("EEG", "MEG")),
uiOutput("after_modality_form")
)
),
column(7,
div(id = "output",
h2("Generated template:"),
p("Copy & Paste that into your OSF pre-registration template."),
div(style = "width:80%;",
verbatimTextOutput("template", placeholder = TRUE)
)
)
)
),
div(id = "footer", align="center",
p("___"),
p("This App has been created @ OHBM BrainHack 2020",
style = "color: white; background: grey;")
)
)
)
server <- function(input, output, session) {
observeEvent(input$modality,{
if (input$modality == "EEG"){
output$template <- renderText(glue::glue(TEMPLATE_TEXT_EEG))
output$after_modality_form <- renderUI(FORM_FIELDS_EEG)
}
if (input$modality == "MEG"){
output$template <- renderText(glue::glue(TEMPLATE_TEXT_MEG))
output$after_modality_form <- renderUI(FORM_FIELDS_MEG)
}
})
}
shinyApp(ui, server)
|
c4e294904efb2707402f72592ed81e2e5750df80
|
154553c5d637755a8aebb29b681480714ad4c819
|
/R/draw_names.R
|
05da7b4fe3e0a7e68c0d8683bddbddb42d117fbf
|
[] |
no_license
|
amirbenmahjoub/Package_DM
|
fb42cfc1e66c57af177e1e400dad0c09a91ee914
|
3b9749e80894e57160ab1dced41c01312534958a
|
refs/heads/master
| 2021-05-08T08:31:11.217927
| 2017-11-24T15:36:32
| 2017-11-24T15:36:32
| 107,041,020
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 564
|
r
|
draw_names.R
|
#' Plot the evolution of names occurences for different names
#'
#' @param names names you want to observe occurences data
#' @import dplyr tidyr ggplot2 prenoms
#' @return graph.
#' @export
#'
#' @examples
#' \dontrun{
#'
#' draw_names(c("name1","name2",...))
#'
#' }
draw_names <- function(names){
assert_that(is.character(class(names)))
data("dataprenoms",package = "myfirstpackage")
dataprenoms %>% filter(name %in% names) %>%
group_by(year,name) %>%
summarize(count=n()) %>%
ggplot(aes(x=year, y=count,color=name))+ geom_line()
}
|
1145d4850c9eed30e47aacd8c14421fd25be657d
|
15d7fe33eeb26d6824d2199f8bfc4e52c2f647c1
|
/carpentry/002_dplyr_simple_joins.R
|
9fc64e60134f16b46926cc4829c8f91010ab6ddc
|
[] |
no_license
|
jalapic/learnR
|
3a5a193295eb96a5aa53ff48b1cce0b6aa0e15d8
|
70ff52dc961facff363bf10ed667d3329dc6c81f
|
refs/heads/master
| 2021-01-21T03:24:47.767208
| 2018-10-31T17:45:45
| 2018-10-31T17:45:45
| 101,895,479
| 21
| 10
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,921
|
r
|
002_dplyr_simple_joins.R
|
### dplyr joins basics
library(tidyverse)
## A common issue in data analysis is when you have two or more files that you
## need to join together in some way.
# this might be two dataframes of same length where you simply join
# this might be unequal lengths of two dataframes
# you may want to join based on one column, or multiple columns.
### Example 1. - 'full_join' - join all columns of x to all columns of y.
# simplest case: Both datframes have one column each with all cases/names we wish to join on.
# new york state data
# columns you're joining on don't have to be in same order
# columns you're joining on have to have name in common.
plumbum <- read.csv("datasets/nyc_children_lead.csv")
population <- read.csv("datasets/new_york_population.csv")
head(plumbum)
head(population)
tail(plumbum)
tail(population)
dim(plumbum)
dim(population)
population %>% full_join(plumbum) #Error: No common variables. Please specify `by` param.
colnames(plumbum)
colnames(plumbum)[1]<-"county"
colnames(plumbum)
population %>% full_join(plumbum)
full_join(population,plumbum) # you can also write it like this
### Example 2 -
# i. full_join can work on multiple columns that uniquely identify rows.
# ii. full_join will give NA if cannot join (i.e. no value exists in one of datasets), but will try.
# Some bird data
birds_pct <- read.csv("datasets/birds_pct.csv")
birds_group <- read.csv("datasets/birds_group.csv")
# two columns match (name / state) -
# e.g. House Finch is in Arizona & New Mexico, House Sparrow is in all 3 states
birds_pct
birds_group
full_join(birds_pct, birds_group) # warning message is ok..
# we have 12 unique bird-state combinations -
# and the full_join() fills all available information together from both dataframes.
# NA is given if it cannot fill.
### Several other types of join exist.....
# e.g. what if we had multiple matches to join on in the 2nd dataframe to the 1st dataframe.
### Example 3 - 'left_join()'
# return all rows from x and all columns from x and y.
# if 2nd datafame (y) has more than one match - will return all of them
teams <- data.frame(
team = c("New York Yankees", "Chicago Cubs", "New York Mets"),
league = c("AL", "NL", "NL")
)
players <- data.frame(
player = c("Derek Jeter", "Mariano Rivera", "Don Mattingly",
"Ernie Banks", "Jake Arietta",
"Mike Piazza", "Keith Hernandez", "Dwight Gooden"),
team = c("New York Yankees", "New York Yankees", "New York Yankees",
"Chicago Cubs", "Chicago Cubs",
"New York Mets","New York Mets","New York Mets")
)
teams
players
teams %>% left_join(players)
players %>% left_join(teams)
### REFERENCES for more join types:
# http://stat545.com/bit001_dplyr-cheatsheet.html
|
b08327a3cf5beec40259c4e3954799047dff2b53
|
e55ffb2edab5f9658f23c46a23b84c78348b99eb
|
/r-ws/foundamental-data-transforming-labels.R
|
42a7c4eaee131b7d91e14b99bb8d9c9dbe3d3006
|
[] |
no_license
|
un-knower/hadoop-ws
|
6689dd20fd8818f18cfef7c7aae329017a01b8a9
|
913bbe328a6b2c9c79588f278ed906138d0341eb
|
refs/heads/master
| 2020-03-17T23:15:21.854515
| 2015-04-25T08:09:03
| 2015-04-25T08:09:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,828
|
r
|
foundamental-data-transforming-labels.R
|
# -----------------------------------------------------------------------------
# 数据变换
# 行号信息
# 标签
# -----------------------------------------------------------------------------
# 将行号信息变为一列 (为了将来melt!)
df <- data.frame(time = 1:10,
a = cumsum(rnorm(10)),
b = cumsum(rnorm(10)),
c = cumsum(rnorm(10)))
rownums <- as.numeric(rownames(df))
df <- data.frame(rownums, df)
# -----------------------------------------------------------------------------
# 标签
# http://www.statmethods.net/input/valuelabels.html
# --------------------------------
# 1. Variable Labels
# If you use the Hmisc package, you can take advantage of some labeling features.
library(Hmisc)
label(mydata$myvar) <- "Variable label for variable myvar" # 设置标签
# Unfortunately the label is only in effect for functions provided by the Hmisc package, such as describe().
#Your other option is to use the variable label as the variable name and then refer to the variable by position index.
names(mydata)[3] <- "This is the label for variable 3"
mydata[3] # list the variable
# --------------------------------
# 2. Value Labels
# To understand value labels in R, you need to understand the data structure factor.
# You can use the factor function to create your own value lables.
# variable v1 is coded 1, 2 or 3
# we want to attach value labels 1=red, 2=blue, 3=green
v1 <- c("low", "middle", "low", "low", "low", "low", "middle", "low", "middle")
v2 <- v1
mydata <- data.frame(v1, v2)
# 下面语句不能成功 因为1,2,3 原来的值不存在! 所以,v2的所有值都变为 <NA>
mydata$v2 <- factor(mydata$v2, levels = c(1,2,3), labels = c("red", "blue", "green"))
# 下面语句成功
mydata$v2 <- v2
mydata$v2 <- factor(mydata$v2, levels = c("low", "middle"), labels = c("a", "b"))
mydata
# 下面语句成功, 但有部分值("middle")变成了 <NA>
mydata$v2 <- v2
mydata$v2 <- factor(mydata$v2, levels = c("low", "xxxxx"), labels = c("a", "b"))
mydata
# 下面语句失败 多了"xxxxx" -> "c" 并不影响!
# Error: unexpected string constant in "mydata$v2 <- factor(mydata$v2, levels = c("low", "middle" "xxxxx""
mydata$v2 <- v2
mydata$v2 <- factor(mydata$v2, levels = c("low", "middle","xxxxx"), labels = c("a", "b", "c"))
mydata
# 下面语句成功,但有报警. 因为 "middle","xxxxx"映射为了 "b"
# Error: unexpected string constant in "mydata$v2 <- factor(mydata$v2, levels = c("low", "middle" "xxxxx""
mydata$v2 <- v2
mydata$v2 <- factor(mydata$v2, levels = c("low", "middle","xxxxx"), labels = c("a", "b", "b"))
mydata
# 下面语句成功,但结果值是 "一个值标签1"和"一个值标签2"
mydata$v2 <- v2
mydata$v2 <- factor(mydata$v2, levels = c("low", "middle"), labels = c("一个值标签"))
mydata
|
a1c2435f57932f2087505f2c2adf5df58730b749
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/partialAR/examples/which.hypothesis.partest.Rd.R
|
25f743ed32a7586c40777238614e6016ecff1606
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 693
|
r
|
which.hypothesis.partest.Rd.R
|
library(partialAR)
### Name: which.hypothesis.partest
### Title: Returns the preferred hypothesis when testing for partial
### autoregression
### Aliases: which.hypothesis.partest
### Keywords: ts models
### ** Examples
set.seed(1)
which.hypothesis.partest(test.par(rpar(1000, 0, 1, 0))) # -> "AR1"
which.hypothesis.partest(test.par(rpar(1000, 0, 0, 1))) # -> "RW"
which.hypothesis.partest(test.par(rpar(1000, 0, 1, 1))) # -> "PAR"
which.hypothesis.partest(test.par(rpar(1000, 0, 1, 0), robust=TRUE)) # -> "RAR1"
which.hypothesis.partest(test.par(rpar(1000, 0, 0, 1), robust=TRUE)) # -> "RRW"
which.hypothesis.partest(test.par(rpar(1000, 0.5, 1, 1), robust=TRUE)) # -> "RPAR"
|
483c3639dad780f50582315990f5bc29248325f3
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.analytics/man/quicksight_update_group.Rd
|
fd026664671b79919ffc71f01b8913b1559edf43
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 901
|
rd
|
quicksight_update_group.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quicksight_operations.R
\name{quicksight_update_group}
\alias{quicksight_update_group}
\title{Changes a group description}
\usage{
quicksight_update_group(GroupName, Description = NULL, AwsAccountId, Namespace)
}
\arguments{
\item{GroupName}{[required] The name of the group that you want to update.}
\item{Description}{The description for the group that you want to update.}
\item{AwsAccountId}{[required] The ID for the Amazon Web Services account that the group is in.
Currently, you use the ID for the Amazon Web Services account that
contains your Amazon QuickSight account.}
\item{Namespace}{[required] The namespace of the group that you want to update.}
}
\description{
Changes a group description.
See \url{https://www.paws-r-sdk.com/docs/quicksight_update_group/} for full documentation.
}
\keyword{internal}
|
99197e82b3efbb88d06396627a28002d995f80f1
|
f61cea74c0ef7a4ae4e0812fcde5bed7bd2772ea
|
/ui.R
|
151d1868cdeea6ca7dae088dccf87356e9d09776
|
[] |
no_license
|
jackytksoon/Shiny-Application-and-Reproducible-Pitch
|
c9038b5bef6fc6a408dd4a86c7c61dee3dc386d7
|
dabe3245fdd35049d1ed3b6cf03f51390123acf8
|
refs/heads/master
| 2021-01-01T05:15:35.953455
| 2016-04-20T03:40:57
| 2016-04-20T03:40:57
| 56,646,248
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,371
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Search Hospitals Name by Rank, State and Outcome"),
sidebarLayout(
sidebarPanel(
helpText("Select the state, outcome and rank you wish to check"),
selectInput("State", "State", c("AK", "AL", "AR", "AZ", "CA", "CO", "CT",
"DC", "DE", "FL", "GA", "GU", "HI", "IA",
"ID", "IL", "IN", "KS", "KY", "LA", "MA",
"MD", "ME", "MI", "MN", "MO", "MS", "MT",
"NC", "ND", "NE", "NH", "NJ", "NM", "NV",
"NY", "OH", "OK", "OR", "PA", "PR", "RI",
"SC", "SD", "TN", "TX", "UT", "VA", "VI",
"VT", "WA", "WI", "WV", "WY")),
radioButtons("Outcome", "Outcome", c("heart attack",
"heart failure",
"pneumonia")),
numericInput("Rank","Rank", value = "1"),
actionButton("action","Submit")
),
mainPanel(
p("This application allow users to search a hospital name
in certain state and outcome about 30-day mortality and readmission
rates for over 4,000 hospitals. The outcome include:"),
p("- Heart Attack"),
p("- Heart Failure"),
p("- Pneumonia"),
br(),
p("The higher the rank of the hospital, the lower the 30-day mortality."),
strong(span(textOutput("text1")),style = "color:blue"),
br(),
br(),
br(),
helpText("Remark:"),
helpText("* If there is a tie for a hospital for a given outcome, then
the hospital names should be sorted in alphabetical order and
the first hospital in that set should be chosen (i.e. if
hospitals \"b\", \"c\", and \"f\" are tied for best, then
hospital \"b\" should be returned)"),
helpText("* The data are come from from the Hospital Compare website
(http://hospitalcompare.hhs.gov) run by the U.S. Department of Health
and Human Services.")
)
)
))
|
93ca244783ee0cbe50c8138601a4f8d6a7b5f675
|
ee4f2c2d6fceba9422623dea19773ae4c1560209
|
/test_lib.R
|
82221cf401cf4d3999109f365b2c7162b0751898
|
[
"MIT"
] |
permissive
|
granek/crne_cna1crz1_rnaseq
|
e51ec4e42450873b5abbbb02eaa12b9ae80b2cfb
|
361abf7f082c52b8dc7bc9ab39089de2ce0c7ec3
|
refs/heads/master
| 2021-01-18T15:56:30.782871
| 2017-02-13T22:08:47
| 2017-02-13T22:08:47
| 62,157,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 641
|
r
|
test_lib.R
|
if (interactive()){
basedir<<-file.path(Sys.getenv("CNA"),"rstudio")
} else {
basedir<<-"."
}
##================================================================================
outdir=file.path(basedir,"results")
annotdir = file.path(basedir,"info")
suppressPackageStartupMessages(library("DESeq2",lib.loc="/Users/josh/Library/R/3.0/library"))
suppressPackageStartupMessages(library("RColorBrewer"))
suppressPackageStartupMessages(library("gplots"))
## writeLines(capture.output(sessionInfo()), file.path(outdir,"sessionInfo.txt"))
print(.libPaths())
print("==================================================")
print(sessionInfo())
|
c190d62799241317bb212aa9621d13581774aa6f
|
20fb140c414c9d20b12643f074f336f6d22d1432
|
/man/NISTkgPerCubMeterTOpoundPerCubFt.Rd
|
ba4d606dbfb432fe9b7c2a06f9950a3c9b73c2b6
|
[] |
no_license
|
cran/NISTunits
|
cb9dda97bafb8a1a6a198f41016eb36a30dda046
|
4a4f4fa5b39546f5af5dd123c09377d3053d27cf
|
refs/heads/master
| 2021-03-13T00:01:12.221467
| 2016-08-11T13:47:23
| 2016-08-11T13:47:23
| 27,615,133
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 928
|
rd
|
NISTkgPerCubMeterTOpoundPerCubFt.Rd
|
\name{NISTkgPerCubMeterTOpoundPerCubFt}
\alias{NISTkgPerCubMeterTOpoundPerCubFt}
\title{Convert kilogram per cubic meter to pound per cubic foot }
\usage{NISTkgPerCubMeterTOpoundPerCubFt(kgPerCubMeter)}
\description{\code{NISTkgPerCubMeterTOpoundPerCubFt} converts from kilogram per cubic meter (kg/m3) to pound per cubic foot (lb/ft3) }
\arguments{
\item{kgPerCubMeter}{kilogram per cubic meter (kg/m3) }
}
\value{pound per cubic foot (lb/ft3) }
\source{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\references{
National Institute of Standards and Technology (NIST), 2014
NIST Guide to SI Units
B.8 Factors for Units Listed Alphabetically
\url{http://physics.nist.gov/Pubs/SP811/appenB8.html}
}
\author{Jose Gama}
\examples{
NISTkgPerCubMeterTOpoundPerCubFt(10)
}
\keyword{programming}
|
40fd6c0e0f4e304c4d680f4f2b696e9c10488cef
|
0d7f82ba1c9293177e67f2db06d07628b46d77a6
|
/R/utils-vector.R
|
3db615984ff2c46a89401a4e25138c1f4929d9a2
|
[
"MIT"
] |
permissive
|
rcodo/coro
|
97430cbc30233f2b696b125535e3efe8695c525a
|
015c6252a05ce6cb1160accbc38085b82c8a8466
|
refs/heads/main
| 2023-01-31T01:51:40.163190
| 2020-12-17T21:00:29
| 2020-12-17T21:00:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,076
|
r
|
utils-vector.R
|
vec_types <- c(
"logical",
"integer",
"double",
"complex",
"character",
"raw",
"list"
)
as_vector_fn <- function(type) {
if (!type %in% vec_types) {
abort("`type` must be a vector type")
}
switch(type,
logical = as.logical,
integer = as.integer,
double = as.double,
complex = as.complex,
# FIXME: explicit rlang::as_character() should serialise input
character = as.character,
raw = as.raw,
# Rewrap lists - Workaround for #32
list = list,
abort("Internal error in `as_vector()`: unexpected type")
)
}
as_vector <- function(x, type) {
as_vector_fn(type)(x)
}
new_vector_fn <- function(type) {
if (!type %in% vec_types) {
abort("`type` must be a vector type")
}
switch(type,
logical = new_logical,
integer = new_integer,
double = new_double,
complex = new_complex,
character = new_character,
raw = new_raw,
list = new_list,
abort("Internal error in `new_vector()`: unexpected type")
)
}
new_vector <- function(type, length = 0L) {
new_vector_fn(type)(length)
}
|
6724e8d2767cde3406f7f61945ee46fc930a7606
|
7bf239bf9446ac3073d0ebb6af4a2f3b2cb47af6
|
/Lab3/Assignment 3/Assignment 3.R
|
6bf42c71601ad40f00b6af93b21b12ac23dd0219
|
[] |
no_license
|
chreduards/TDDE01
|
9346b9dba73dc99d9c92c8b52aa201e09d97b4b1
|
df1920930ac52c77b82b95b585de2175ddfc040e
|
refs/heads/master
| 2020-04-24T02:31:58.720021
| 2019-02-21T10:25:05
| 2019-02-21T10:25:05
| 171,640,297
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,211
|
r
|
Assignment 3.R
|
#### Import and divide data ####
setwd("C:/Users/Christoffer Eduards/OneDrive/R/Lab2/Assignment 1")
library(neuralnet)
set.seed(1234567890)
Var <- runif(50, 0, 10)
trva <- data.frame(Var, Sin = sin(Var))
tr <- trva[1:25, ] #Training
va <- trva[26:50, ] #Validation
#### Main ####
#Randomly generating the initial weights
winit = runif(31, min = -1, max = 1)
MSE = c() #Vector for the MSE
#Training nn different i
for (i in 1:10) {
nn = neuralnet(
Sin ~ Var,
data = tr,
hidden = 10,
threshold = i / 1000,
startweights = winit
)
#Prediction for validation data
pred = compute(nn, va$Var)$net.result
#Plotting iterations for nn training
plot(
va$Var,
pred,
main = paste("Iteration", i),
ylab = "Sin(x)",
xlab = "x"
)
#Plot actual data
points(va, col = "red")
#Mean squared error vs iteration
MSE[i] = mean((va$Sin - pred) ^ 2)
}
#Show mean squared error for different i
plot(MSE, xlab = "Index of i in Threshold(i/1000)", main = "MSE")
#Train optimal nn
nn = neuralnet(
Sin ~ Var,
data = tr,
hidden = 10,
threshold = which.min(MSE) / 1000,
startweights = winit
)
plot(nn)#Picture of the final nn
|
ff389207c87993a6622e1b6caf37bf7119912b02
|
f4a38ecb46a7721ada59c35cb5f573cbb901bee5
|
/man/add_p.adjust.Rd
|
b6570f65e53a41754fb287bd34852f59458a9c75
|
[] |
no_license
|
biodatacore/biodatacoreMTM
|
c080e11c2e5440c9fe9d6c9737df543dd69b40dd
|
bb2541c5132fdb96d79849a50890a9c2c9101913
|
refs/heads/master
| 2021-05-14T18:08:26.872845
| 2018-01-02T22:50:34
| 2018-01-02T22:50:34
| 114,794,630
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,031
|
rd
|
add_p.adjust.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add.R
\name{add_p.adjust}
\alias{add_p.adjust}
\alias{add_bonf_pv}
\alias{add_fdr_pv}
\title{Adds columns of adjusted p-values}
\usage{
add_p.adjust(data, method = stats::p.adjust.methods, p_value = "p.value",
var = NULL)
add_bonf_pv(data, p_value = "p.value", var = NULL)
add_fdr_pv(data, p_value = "p.value", var = NULL)
}
\arguments{
\item{data}{data frame}
\item{method}{scalar character: which adjustment method to use. See
\code{\link[stats]{p.adjust}} for more info.}
\item{p_value}{scalar character: name of the p-value column.}
\item{var}{scalar character: name of the column to be created. if `NULL` then
it attempts to create its own column. To avoid accidenntally overwriting an
existing column, it is safer to always supply a name here.}
}
\value{
data frame
}
\description{
Adds adjusted p-values to a dataframe. Helper functions wrap `add_p.adjust`
for common additions.
}
\seealso{
Other augmenters: \code{\link{add_nlog10_pv}}
}
|
8942f17de972d757c6135e251ac04f7cb9fa5cc3
|
994dc87cc09e2fa8a470f9920492d4a6527465e8
|
/test_people/People_neuronet.r
|
6d8e695b6ccecab96939e7274ddd9609a96ac1e4
|
[] |
no_license
|
Basnor/At_risk_students_analizer
|
2b04cd97ddbf5bf4e5cd041ca694df893b726770
|
51c4617bb3d1927e2d64241953895005563682b5
|
refs/heads/master
| 2023-01-09T17:53:20.286163
| 2020-11-03T06:50:50
| 2020-11-03T06:50:50
| 281,601,434
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,218
|
r
|
People_neuronet.r
|
## -----ЧТЕНИЕ-ВЫБОРКИ-ДЛЯ-ОБУЧЕНИЯ--------------------------------------
PeopleTrain <-read.csv(file = "PeopleTrain.csv", header = TRUE, sep = ";", dec = ",")
# нормализации данных
PeopTrain <- list(class = PeopleTrain$Class, data = PeopleTrain[,c(2:13)])
rm(PeopleTrain)
Y_train = PeopTrain$class
X_train = PeopTrain$data
x_scale_train = scale(X_train, center = TRUE, scale = TRUE)
rm(X_train)
#normalize <- function(x){
# return((x - min(x)) / (max(x) - min(x)))
#}
#x_norm <- as.data.frame(lapply(X, normalize))
#summary(x_norm$Height)
## -----ЧТЕНИЕ-ВЫБОРКИ-ДЛЯ-ТЕСТИРОВАНИЯ----------------------------------
PeopleTest <-read.csv(file = "PeopleTest.csv", header = TRUE, sep = ",", dec = ",")
PeopTest <- list(class = PeopleTest$Class, data = PeopleTest[,c(1:12)])
rm(PeopleTest)
Y_test = PeopTest$class
X_test = PeopTest$data
x_scale_test = scale(X_test, center = TRUE, scale = TRUE)
rm(X_test)
## -----ОБУЧЕНИЕ-МОДЕЛИ-НА-ДАННЫХ----------------------------------------
#install.packages("neuralnet")
require(neuralnet)
softplus <- function(x) { log(1 + exp(x)) }
people_model <- neuralnet(Y_train ~ ., data = data.frame(x_scale_train,Y_train),
hidden = c(12,8), act.fct = softplus)
#plot(people_model)
#c(30,40,12)
## -----ОЦЕНКА-ЭФФЕКТИВНОСТИ-МОДЕЛИ--------------------------------------
model_results <- compute(people_model, x_scale_test)
predicted_class <- model_results$net.result
print(predicted_class)
## -----ОШИБКИ-----------------------------------------------------------
predicted_class <- data.frame("class" = ifelse(max.col(predicted_class[ ,1:4]) == 1, "MN",
ifelse(max.col(predicted_class[ ,1:4]) == 2, "MS",
ifelse(max.col(predicted_class[ ,1:4]) == 3, "FN", "FS"))))
# confusion matrix function
#install.packages("caret", dependencies=TRUE)
#install.packages("ggplot2")
library(caret)
factor_predicted_class <- factor(predicted_class$class, levels = c("FN", "FS", "MN", "MS"))
cm = confusionMatrix(Y_test, factor_predicted_class)
print(cm)
|
ea8488a75d48de5da714d48fd62e8bc072f84d0c
|
eb3cccfd5a08362c0933688b6afc481d29db0100
|
/milk_customer_preference.R
|
ee715c5de0d6309490c4eecbc1d0a14b9a0c5cc0
|
[] |
no_license
|
xjhee/Dairy-Farm-International-Holdings-Ltd.-customer-preference-data-analysis
|
534f62143732dda76839dacd4146dc227b248c15
|
5796de9ea8886abe523c1c02e2003b825e6554b1
|
refs/heads/master
| 2020-08-27T07:24:40.467010
| 2019-10-24T12:21:11
| 2019-10-24T12:21:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,124
|
r
|
milk_customer_preference.R
|
#this study investigates consumer preference in different milk attributes based on eight factors
#x1: Taste
#x2: Fact content
#x3: High quality certification
#x4: Origin
#x5: Price
#x6: Organic certification
#gender; level of education
library(psych)
library(reshape2)
library(ggplot2)
library(forecast)
#load the milk RData
setwd("/Users/clairehe/Desktop")
milk<-load("milk.RData")
head(milk)
#standardize data and then do cluster analysis on variable x1-x6;
x<-scale(milk[,2:7])
dist<-dist(x,method="euclidean")^2
fit <- hclust(dist, method="ward.D")
fit
history<-cbind(fit$merge,fit$height)
#plot according dendrogram
ggplot(mapping=aes(x=(379:399),y=fit$height[379:399]))+
geom_line()+
geom_point()+
labs(x="stage",y="height")
#then we can find how many clusters fit
par(mar=c(1,4,1,1))
plot(fit,hang=-1,main="")
#from graph we conduct a 3-cluster analysis
cluster<-cutree(fit,k=3)
sol <- data.frame(cluster,x)
table(cluster)
tb<-aggregate(x=x, by=list(cluster=sol$cluster),FUN=mean)
tb
#to further validate and to get more information relating to these three clusters, we use K-means method to find out according centers and sizes
#here K-means use the default Hartigan and Wong algorithm
set.seed(123)
fit1<-kmeans(x=x,centers=3)
fit1
tb1<-data.frame(cluster=1:3,fit1$centers)
tbm<-melt(tb1,id.vars='cluster')
tbm$cluster<-factor(tbm1$cluster)
ggplot(tbm,
aes(x = variable, y = value, group = cluster, colour = cluster)) +
geom_line(aes(linetype=cluster))+
geom_point(aes(shape=cluster)) +
geom_hline(yintercept=0) +
labs(x=NULL,y="mean")
#after analyzing customers' clustered preference on factor x1-x6, we then try to find how data relates to gender and educational level
tb<-table(fit1$cluster,milk[,"edu"])
prop.table(tb,margin=1)
aggregate(milk$edu,by=list(fit1$cluster),FUN=mean)
tb<-table(fit1$cluster,milk[,"gender"])
prop.table(tb,margin=1)
aggregate(milk$gender,by=list(fit1$cluster),FUN=mean)
#finally we conduct proper data analysis, ie, find variance, mean expected value on gender and education
chisq.test(tb)
fit2<-lm(milk$edu~as.factor(fit1$cluster))
anova(fit2)
|
32aa64f374d0870a12d80b329ffb500844b874d1
|
6fd02f84552ba4298d8009cbee053f72d189db0b
|
/ui.R
|
420b55745dddc9d6a5402f815d040a4b68b715c5
|
[] |
no_license
|
uc-bd2k/webgimm
|
0573ce671ed611eda1d4b686975068b3157cb085
|
d6116766ab14e3f04ab465096ef2f7eb5858edcb
|
refs/heads/master
| 2020-03-19T08:20:28.107228
| 2018-10-17T13:38:11
| 2018-10-17T13:38:11
| 136,196,960
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,030
|
r
|
ui.R
|
library(shinyjs)
library(shinyBS)
library(DT)
library(morpheus)
library(shinycssloaders)
source("helpers.R")
shinyUI(fluidPage(style = "border-color:#848482",
useShinyjs(),
# Application title
navbarPage(strong(em("Webgimm Server",style = "color:white")), inverse = TRUE, position = "fixed-top"
),
tags$head(tags$style(type="text/css", "
")),
conditionalPanel(condition="$('html').hasClass('shiny-busy')",
tags$div("WORKING...",id="loadmessage")),
fluidRow(column(12,
h5(
HTML("Cluster analysis of gene expression data using Context Specific Infinite Mixture Model "),
a(href="http://eh3.uc.edu/gimm/dcim/",em("Freudenberg et al, BMC Bioinformatics 11:234. 2010"))
),
br(),
br()
)),
mainPanel(
tabPanel(" ", fluidRow(
column(2,
h4("Open Data File")
),
column(2,
actionLink("example",label=h4("(Load example)"))
)
)),
tabPanel(" ",
fluidRow(
column(4,
# wellPanel(
fileInput("inputFile", label = NULL),
bsTooltip("inputFile", "Reading tab-delimited file with data to be clustered. Rows are genes and columns are samples. First two columns provide gene annotations and first row contains column names.", "top", options = list(container = "body")),
withBusyIndicatorUI(actionButton("cluster",label="Run Cluster Analysis", class = "btn-primary", style = "background-color: #32CD32; border-color: #215E21")),
bsTooltip("cluster", "Run the Gibbs sampler and construct hierarchical clustering of genes and samples based on the Gibbs sampler output as described in the reference paper", "top", options = list(container = "body")),
# conditionalPanel(condition="input.cluster%2==1",
conditionalPanel(condition="output.clustResults=='***'",
br(),
uiOutput("treeviewLink"),
bsTooltip("treeviewLink", "Interactively browse clustering results using FTreeView Java application. You will need a recent version of Java installed and enabled.", "top", options = list(container = "body")),
uiOutput("downloadLink"),
bsTooltip("downloadLink", "Download zip archive of clustering results consisting of .cdt, .gtr and .atr files. The clustering can be viewed using FTreeView, TreeView, or similar applications. It can also be imported into R using ctc or CLEAN packages", "top", options = list(container = "body"))
#UNSTABLE CLUSTERING
# numericInput("numclust", "Number of Clusters", value = 1),
# actionButton("hiddenbutton", "Show Clusters", style = "background-color: #D53412; border-color: #80210D", class = "btn-primary")
# )
)),
column(8,
tabsetPanel(id = "tabset",
tabPanel("Data",
br(),
withSpinner(dataTableOutput("toCluster"), color = getOption("spinner.color", default = "#000000"), size = getOption("spinner.size", default = 1.2))
# textOutput("clustResults")
),
tabPanel("Morpheus", value = "morpheusTab",
br(),
print(strong("Morpheus Interface")),
br(),
withSpinner(morpheusOutput("morpheus", width = 900, height = "600px"), color = getOption("spinner.color", default = "#000000"), size = getOption("spinner.size", default = 1.2))
)
#UNSTABLE CLUSTERING
# tabPanel("Clusters", value = "clusterTab",
# withSpinner(textOutput("hiddenbutton"), color = getOption("spinner.color", default = "#000000"), size = getOption("spinner.size", default = 1.2))
)
),
textOutput("clustResults")
))
)))
|
12dabd157f4668461d42ae64e901f60cd5a30f5d
|
655ee959878fc9fa6f0ffdd7fb956f38936c2072
|
/symbolicR/man/create.polynomial.of.random.variable.Rd
|
7c33daad9392eee4c01ad5b7f2a1b70617f79089
|
[] |
no_license
|
isabella232/symbolicR
|
29b1b28334f8889846156d8fd1effdbec6164e6d
|
001707e9a380de37a8f7fe3d2a463cf047733109
|
refs/heads/master
| 2023-03-16T00:08:39.874076
| 2017-12-13T10:00:47
| 2017-12-13T10:00:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 615
|
rd
|
create.polynomial.of.random.variable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/00symbolic.002monomial.R
\name{create.polynomial.of.random.variable}
\alias{create.polynomial.of.random.variable}
\title{create.polynomial.of.random.variable}
\usage{
create.polynomial.of.random.variable(e)
}
\arguments{
\item{e}{expression}
}
\value{
list of list representing sum of list like \code{ [ [coeff1, EPS[1]^1], [coeff2, EPS[1]^2] ] }
}
\description{
Convert an expression \code{e} to a polynomial of \code{ETA[i]}'s or \code{EPS[i]}'s as following:\cr
\code{ e --> [ [ coeff, main.monomial] ] }
}
\author{
Mango solutions
}
|
f6416a48ce9724f071b9474bf7d532355e609bd2
|
967867d8b9dc76f7650de576b858c2ed084ba655
|
/ChIPseq_scripts/TF_polII_topGO.R
|
b759cf95a3753194711c31d9445d554ca1a73ee1
|
[] |
no_license
|
lakhanp1/omics_utils
|
5f5f2ae9b840ef15fc0cd1c26325d9a2dbdb8dc5
|
0485f3d659225b127f9c3e5bc53ab3d84c42609b
|
refs/heads/main
| 2023-08-31T12:29:03.308801
| 2023-08-30T20:40:42
| 2023-08-30T20:40:42
| 160,330,402
| 1
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,392
|
r
|
TF_polII_topGO.R
|
library(dplyr)
library(data.table)
library(tibble)
library(ggplot2)
library(scales)
require(XLConnect)
options(java.parameters = "- Xmx4g")
xlcFreeMemory()
rm(list = ls())
source(file = "E:/Chris_UM/Codes/GO_enrichment/topGO_functions.R")
path = "E:/Chris_UM/Analysis/21_CL2017_ChIPmix_ULAS_MIX/ULAS_AN"
setwd(path)
## This script run GO BP enrichment analysis using topGO tool for all the clusters
##################################################################################
## get the data
TF_profile = "An_laeA_20h_HA"
polII_sample = "An_untagged_20h_polII"
name = "An_laeA_20h_HA"
mapFile = "E:/Chris_UM/Database/A_Nidulans/geneid2go.ANidulans.20171004.map"
TF_dataPath = paste0("TF_data/", TF_profile, collapse = "")
clusterFile = paste0(TF_dataPath, "/", name, "_allGenes_clusters.tab", collapse = "")
topGO_path = paste0(TF_dataPath, "/", "topGO", collapse = "")
outPreTopgo_all = paste0(topGO_path, "/", name, "_allGenes", collapse = "")
outPreTopgo_expressed = paste0(topGO_path, "/", name, "_expressedGenes", collapse = "")
outPreTopgo_sm = paste0(topGO_path, "/", name, "_SM_genes", collapse = "")
outPreTopgo_peaks = paste0(topGO_path, "/", name, "_peaksGenes", collapse = "")
outPreTopgo_pkExp = paste0(topGO_path, "/", name, "_pkExpGenes", collapse = "")
polII_expId = paste("is_expressed(", polII_sample, ")", sep = "")
hasPeakCol = paste("hasPeak(", TF_profile, ")", sep = "")
clusterData = fread(file = clusterFile, sep = "\t",
header = T, stringsAsFactors = F, na.strings = "NA", data.table = F)
if(!dir.exists(topGO_path)){
dir.create(topGO_path)
}
excelOut = paste0(topGO_path, "/", name, "_topGO.xlsx", collapse = "")
unlink(excelOut, recursive = FALSE, force = FALSE)
exc = loadWorkbook(excelOut , create = TRUE)
# ## for testing purpose with one cluster
# tmpData = expressedDf %>% filter(cluster == "Cluster_7")
#
# ## get GO enrichment table
# goData = get_topGO_enrichment(goMapFile = mapFile, genesOfInterest = tmpData$gene)
#
# topGoScatter = topGO_scatterPlot(df = goData, title = "test plot")
#
# imageWd = (min(max(nchar(as.character(goData$Term))), 80) * 30) * 1.5
# imageHt = max(nrow(goData) * 90, 1500)
# # imageRes = max((imageWd * imageHt / 45000), 250)
# imageRes = max(min(imageWd , imageHt) / 12, 200)
#
# png(filename = "topGO_scatter.png", width = imageWd, height = imageHt, res = imageRes)
# print(topGoScatter)
# dev.off()
## function to generate the plot. to be called inside do()
topGO_and_plot_asDf = function(gn, tt, cl, pfx){
tt = paste(tt, cl, sep = "\n")
goData = get_topGO_enrichment(goMapFile = mapFile, genesOfInterest = gn)
if(nrow(goData) == 0){
return(data.frame(height = NA, width = NA, title = NA, res = NA, png = NA, stringsAsFactors = F))
}
topGoScatter = topGO_scatterPlot(df = goData, title = tt)
ht = max(nrow(goData) * 80, 1500)
wd = (min(max(nchar(as.character(goData$Term))), 80) * 30) * 1.5
rs = max(min(wd, ht) / 12, 200)
pngFile = paste(pfx, "_", cl, "_topGO.png", sep = "")
png(filename = pngFile, width = wd, height = ht, res = rs)
print(topGoScatter)
dev.off()
return(data.frame(height = ht,
width = wd,
res = rs,
count = nrow(goData),
title = tt,
png = pngFile,
stringsAsFactors = F))
}
##################################################################################
## run topGO: all genes
allTitle = paste("GO enrichment using topGO for all genes \n TF:", TF_profile, "and polII:", polII_sample, sep = " ")
## GO enrichment table for all the clusters
goEnrichment_all = clusterData %>%
group_by(cluster) %>%
do(get_topGO_enrichment(goMapFile = mapFile, genesOfInterest = .$gene))
fwrite(goEnrichment_all, file = paste(outPreTopgo_all, "_topGO.tab"),
sep = "\t", row.names = F, col.names = T, quote = F)
## write data to Excel
xlcFreeMemory()
wrkSheet = "allGenes"
createSheet(exc, name = wrkSheet)
createFreezePane(exc, sheet = wrkSheet, 2, 2)
setMissingValue(object = exc, value = "NA")
writeWorksheet(object = exc, data = goEnrichment_all, sheet = wrkSheet, header = T)
setAutoFilter(object = exc, sheet = wrkSheet, reference = aref(topLeft = "A1", dimension = dim(goEnrichment_all)))
xlcFreeMemory()
## generate scatter plots for each
topGOPlots = clusterData %>%
group_by(cluster) %>%
do(topGO_and_plot_asDf(gn = .$gene,
tt = allTitle,
cl = unique(.$cluster),
pfx = outPreTopgo_all
)
)
fwrite(topGOPlots, file = paste(outPreTopgo_all, "_topGoStats.tab"),
sep = "\t", row.names = F, col.names = T, quote = F)
##################################################################################
## run topGO: top 10% expressed genes
expTitle = paste("GO enrichment using topGO for polII expressed genes \n TF:", TF_profile, "and polII:", polII_sample, sep = " ")
expressedDf = clusterData %>% filter(UQ(as.name(polII_expId)) == "TRUE")
## GO enrichment table for all the clusters
goEnrichment_exp = expressedDf %>%
group_by(cluster) %>%
do(get_topGO_enrichment(goMapFile = mapFile, genesOfInterest = .$gene))
fwrite(goEnrichment_exp, file = paste(outPreTopgo_expressed, "_topGO.tab"),
sep = "\t", row.names = F, col.names = T, quote = F)
## write data to Excel
xlcFreeMemory()
wrkSheet = "expressedGenes"
createSheet(exc, name = wrkSheet)
createFreezePane(exc, sheet = wrkSheet, 2, 2)
setMissingValue(object = exc, value = "NA")
writeWorksheet(object = exc, data = goEnrichment_exp, sheet = wrkSheet, header = T)
setAutoFilter(object = exc, sheet = wrkSheet, reference = aref(topLeft = "A1", dimension = dim(goEnrichment_exp)))
xlcFreeMemory()
## generate scatter plots for each
topGOPlots_exp = expressedDf %>%
group_by(cluster) %>%
do(topGO_and_plot_asDf(gn = .$gene,
tt = expTitle,
cl = unique(.$cluster),
pfx = outPreTopgo_expressed
)
)
fwrite(topGOPlots_exp, file = paste(outPreTopgo_expressed, "_topGoStats.tab"),
sep = "\t", row.names = F, col.names = T, quote = F)
##################################################################################
## run topGO: genes for which peak was called by macs2
peakTitle = paste("GO enrichment using topGO for genes bound by TF \n TF:", TF_profile, "and polII:", polII_sample, sep = " ")
peaksDf = clusterData %>% filter(UQ(as.name(hasPeakCol)) == "TRUE")
## GO enrichment table for all the clusters
goEnrichment_peak = peaksDf %>%
group_by(cluster) %>%
do(get_topGO_enrichment(goMapFile = mapFile, genesOfInterest = .$gene))
fwrite(goEnrichment_peak, file = paste(outPreTopgo_peaks, "_topGO.tab"),
sep = "\t", row.names = F, col.names = T, quote = F)
## write data to Excel
xlcFreeMemory()
wrkSheet = "tfTargetGenes"
createSheet(exc, name = wrkSheet)
createFreezePane(exc, sheet = wrkSheet, 2, 2)
setMissingValue(object = exc, value = "NA")
writeWorksheet(object = exc, data = goEnrichment_peak, sheet = wrkSheet, header = T)
setAutoFilter(object = exc, sheet = wrkSheet, reference = aref(topLeft = "A1", dimension = dim(goEnrichment_peak)))
xlcFreeMemory()
## generate scatter plots for each
topGOPlots_peak = peaksDf %>%
group_by(cluster) %>%
do(topGO_and_plot_asDf(gn = .$gene,
tt = peakTitle,
cl = unique(.$cluster),
pfx = outPreTopgo_peaks
)
)
fwrite(topGOPlots_peak, file = paste(outPreTopgo_peaks, "_topGoStats.tab"),
sep = "\t", row.names = F, col.names = T, quote = F)
##################################################################################
## run topGO: polII expressed and TF bound genes together
pkExp_title = paste("GO enrichment using topGO for genes bound by TF and expressed in WT polII\n TF:", TF_profile, "and polII:", polII_sample, sep = " ")
## select the genes which are expressed in polII sample OR have TSS peak
pkExpdf = filter_at(.tbl = clusterData, .vars = c(polII_expId, hasPeakCol), .vars_predicate = any_vars(. == "TRUE"))
## GO enrichment table for all the clusters
goEnrichment_pkExp = pkExpdf %>%
group_by(cluster) %>%
do(get_topGO_enrichment(goMapFile = mapFile, genesOfInterest = .$gene))
fwrite(goEnrichment_pkExp, file = paste(outPreTopgo_pkExp, "_topGO.tab"),
sep = "\t", row.names = F, col.names = T, quote = F)
## write data to Excel
xlcFreeMemory()
wrkSheet = "peakExpressedGenes"
createSheet(exc, name = wrkSheet)
createFreezePane(exc, sheet = wrkSheet, 2, 2)
setMissingValue(object = exc, value = "NA")
writeWorksheet(object = exc, data = goEnrichment_pkExp, sheet = wrkSheet, header = T)
setAutoFilter(object = exc, sheet = wrkSheet, reference = aref(topLeft = "A1", dimension = dim(goEnrichment_pkExp)))
xlcFreeMemory()
## generate scatter plots for each
topGOPlots_pkExp = pkExpdf %>%
group_by(cluster) %>%
do(topGO_and_plot_asDf(gn = .$gene,
tt = pkExp_title,
cl = unique(.$cluster),
pfx = outPreTopgo_pkExp
)
)
fwrite(topGOPlots_pkExp, file = paste(outPreTopgo_pkExp, "_topGoStats.tab"),
sep = "\t", row.names = F, col.names = T, quote = F)
saveWorkbook(exc)
|
c6163e73e21379a348ed22443a59a37f06e2d06d
|
2dc56c423107d07d30f7dde3062035f740fde49b
|
/heterozygosity_new_analysis.R
|
9b4fb4f031b05c81bd3394b53a8fcfa327dac6d5
|
[] |
no_license
|
tbilgin/pongo_repeats
|
d1da69381792154ba0f38f3607c50ef521b60e3f
|
d77fa60aaf8ac7ea6d07c2c2f8fceef987c0334c
|
refs/heads/main
| 2023-06-18T07:45:28.051595
| 2021-07-20T14:33:56
| 2021-07-20T14:33:56
| 355,286,765
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,942
|
r
|
heterozygosity_new_analysis.R
|
mean(pongo_abelii_heterozygosity$V4)
t.test(pongo_abelii_heterozygosity$V4)
mean(pongo_pygmaeus_pygmaeus_heterozygosity$V4)
t.test(pongo_pygmaeus_pygmaeus_heterozygosity$V4)
mean(pongo_pygmaeus_morio_heterozygosity$V4)
t.test(pongo_pygmaeus_morio_heterozygosity$V4)
hetPongoAbelii = read.table("~/Desktop/pongo_repeats/alina.voicu/data-from-analyses/heterozygosity-data/het_pongo_abelii_genome_wide_nei.txt", sep = ',',header=FALSE)
hetPongoPygmaeusMorio = read.table("~/Desktop/pongo_repeats/alina.voicu/data-from-analyses/heterozygosity-data/het_pongo_pygmaeus_morio_genome_wide_nei.txt", sep = ',', header=FALSE)
hetPongoPygmaeusPygmaeus = read.table("~/Desktop/pongo_repeats/alina.voicu/data-from-analyses/heterozygosity-data/het_pongo_pygmaeus_pygmaeus_genome_wide_nei.txt", sep = ',', header=FALSE)
heterozygositiesSpecies = list()
heterozygositiesSpecies[["P.abelii"]] = hetPongoAbelii$V4
heterozygositiesSpecies[["P.p.morio"]] = hetPongoPygmaeusMorio$V4
heterozygositiesSpecies[["P.p.pygmaeus"]] = hetPongoPygmaeusPygmaeus$V4
d2 <- data.frame(x = unlist(heterozygositiesSpecies),
grp = rep(names(heterozygositiesSpecies)[1:length(heterozygositiesSpecies)],
times = sapply(heterozygositiesSpecies,length)))
het_plot <- ggplot(d2, aes(x = grp, y = x)) +
geom_violin(aes(fill=grp, face="bold"), width=0.5,
position= position_dodge(width = .5)) +
#coord_flip() +
#geom_boxplot(width=0.03) +
#ggtitle("Heterozygosity for wild orangutan species") +
#xlab("(Sub-)species") +
labs(x=NULL) +
ylab("Heterozygosity") +
theme(axis.text.x= element_text(face="bold", size=10),
axis.text.y= element_text(face="bold", size=9),
plot.title=element_text(size=20, face="bold"),
axis.title.x = element_text(size=16, face="bold"),
axis.title.y = element_text(size=16, face="bold"),
legend.position="none")
plot(het_plot)
|
f8d12cb03ef2dea505cec51f10df165bb4ab4dab
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkAboutDialogNew.Rd
|
9fea526a2610c76b4217069331c25e0815a18f97
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 344
|
rd
|
gtkAboutDialogNew.Rd
|
\alias{gtkAboutDialogNew}
\name{gtkAboutDialogNew}
\title{gtkAboutDialogNew}
\description{Creates a new \code{\link{GtkAboutDialog}}.}
\usage{gtkAboutDialogNew(show = TRUE)}
\details{Since 2.6}
\value{[\code{\link{GtkWidget}}] a newly created \code{\link{GtkAboutDialog}}}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
8125bb516c61905a8978fcc1eea6b23a954f117e
|
a9e69d3c4a5590383e6044947b671f8410c5eaf2
|
/R/studentAllocation/inst/shiny/app.R
|
5c6b8f7e6cae67c1fdacd4c194c37b6b2f362f5f
|
[
"MIT"
] |
permissive
|
richarddmorey/studentProjectAllocation
|
4ea9c3293f9dcc299d06c19917f6d312b86a1743
|
d3b58217af85d259c2f5df16719e0061fbe56187
|
refs/heads/master
| 2023-07-19T21:16:09.073385
| 2023-07-11T17:02:17
| 2023-07-11T17:02:17
| 34,754,193
| 27
| 12
|
MIT
| 2023-07-11T17:02:18
| 2015-04-28T20:29:25
|
JavaScript
|
UTF-8
|
R
| false
| false
| 12,597
|
r
|
app.R
|
library(shiny)
library(shinydashboard)
library(shinyjs)
library(shinycssloaders)
vals <- reactiveValues(lect_list = NULL,
proj_list = NULL,
stud_list = NULL,
total_effective_cap = NULL,
total_students = NULL,
algo_ready = FALSE,
log = NULL,
output_file = NULL)
create_output_file <- function(allocation_output, lect_file, proj_file, stud_file, stud_list, delim){
# set up output directory
td = tempdir(check = TRUE)
save_dir = tempfile(pattern = "allocation_", tmpdir = td)
if(dir.exists(save_dir))
unlink(save_dir, recursive = TRUE)
dir.create(
file.path(save_dir, "original_files"),
recursive = TRUE
)
lecturer_allocation_fn = file.path(save_dir, "lecturer_allocation.csv")
rio::export(
x = studentAllocation::neat_lecturer_output(allocation_output, delim = delim),
file = lecturer_allocation_fn
)
project_allocation_fn = file.path(save_dir, "project_allocation.csv")
rio::export(
x = studentAllocation::neat_project_output(allocation_output, delim = delim),
file = project_allocation_fn
)
student_allocation_fn = file.path(save_dir, "student_allocation.csv")
rio::export(
x = studentAllocation::neat_student_output(allocation_output, stud_list),
file = student_allocation_fn
)
if(length(allocation_output$unallocated)){
unallocated_students_fn = file.path(save_dir, "unallocated_students.txt")
cat(file = unallocated_students_fn, sep = "\n",
allocation_output$unallocated
)
}
## Copy original files over
original_file_paths = c( lect_file$datapath,
proj_file$datapath,
stud_file$datapath )
file.copy(original_file_paths,
file.path(save_dir, "original_files",
c(lect_file$name,
proj_file$name,
stud_file$name))
)
# zip up contents
zip_file = tempfile(tmpdir = td, pattern = "allocation_", fileext = ".zip")
zip::zipr(zipfile = zip_file, files = save_dir)
# Clean up folder
if(dir.exists(save_dir))
unlink(save_dir, recursive = TRUE)
return( zip_file )
}
# Define UI for data upload app ----
ui <- dashboardPage(
dashboardHeader(title = "Project allocation",
tags$li(a(href = 'https://github.com/richarddmorey/studentProjectAllocation',
target = "_blank",
icon("github"),
title = "GitHub repository for this app"),
class = "dropdown")),
# Sidebar layout with input and output definitions ----
dashboardSidebar(
checkboxInput("opt_randomize", "Randomize before", FALSE),
checkboxInput("opt_distribute", "Distribute unallocated", TRUE),
textInput("neat_delim", "Output delimiter", studentAllocation::pkg_options()$neat_delim, width = "5em" ),
numericInput("opt_max_time", "Time limit (s)", 15, min = 1, max = 60, step = 1),
numericInput("opt_max_iters", "Iteration limit", 0, min = 0, step = 25)
),
# Main panel for displaying outputs ----
dashboardBody(
useShinyjs(),
tabBox( width = 12,
tabPanel(HTML("Introduction ▶"), htmlOutput("intro")),
tabPanel(uiOutput("lecturers_tab_label"),
fileInput("lect_file", "Choose lecturers file",
multiple = FALSE,
accept = "text/plain"),
verbatimTextOutput("lect_check"),
htmlOutput("lecturer_help"),
),
tabPanel(uiOutput("projects_tab_label"),
fileInput("proj_file", "Choose projects file",
multiple = FALSE,
accept = "text/plain"),
verbatimTextOutput("proj_check"),
htmlOutput("projects_help"),
),
tabPanel(uiOutput("students_tab_label"),
fileInput("stud_file", "Choose students file",
multiple = FALSE,
accept = "text/plain"),
verbatimTextOutput("stud_check"),
htmlOutput("students_help"),
),
tabPanel(uiOutput("allocation_tab_label"),
withSpinner(htmlOutput("algo_output")),
p(),
hidden(
div(
id = "download_all_div",
downloadLink('download_output', HTML('⭐ Download allocation output⤵')),
br(), br(),
actionButton("rerun_allocation", HTML("⟳ Re-run allocation")),
hr(),
actionButton("toggle_log", "Show/hide log"),
hidden(
div( id = "log_div",
verbatimTextOutput("log_text"),
tags$head(tags$style("#log_text{
overflow-y:scroll; background: ghostwhite; max-height: 30em;}"))
)
)
)
),
),
tabPanel(HTML("ⓘ Options help"), htmlOutput("options_help"))
)
)
)
# Define server logic to read selected file ----
server <- function(input, output, session) {
addClass(selector = "body", class = "sidebar-collapse")
output$allocation_tab_label <- renderUI({
if(vals$algo_ready){
return(HTML("🟢 Allocation"))
}else{
return(HTML("❌ Allocation"))
}
})
output$lecturers_tab_label <- renderUI({
if(is.list(vals$lect_list)){
return(HTML("✅ Lecturers ▶"))
}else{
return(HTML("  Lecturers  "))
}
})
output$projects_tab_label <- renderUI({
if(is.list(vals$proj_list)){
return(HTML("✅ Projects ▶"))
}else{
return(HTML("  Projects  "))
}
})
output$students_tab_label <- renderUI({
if(is.list(vals$stud_list)){
return(HTML("✅ Students ▶"))
}else{
return(HTML("  Students  "))
}
})
observeEvent(input$toggle_log,{
shinyjs::toggle("log_div")
})
observeEvent(vals$log,{
if(is.null(vals$log))
shinyjs::hide("log_div")
})
output$download_output <- downloadHandler(
filename = function() {
paste('allocation-', Sys.Date(), '.zip', sep='')
},
content = function(con) {
fn = vals$output_file
if(!is.null(fn)){
if(file.exists(fn)){
if(file.size(fn)>0){
file.copy(fn, con)
}
}
}else{
return(NULL)
}
}
)
output$intro <- renderUI({
x <- paste0(readLines("include/html/intro.html"))
return(HTML(x))
})
output$lecturer_help <- renderUI({
x <- paste0(readLines("include/html/lecturers.html"))
return(HTML(x))
})
output$students_help <- renderUI({
x <- paste0(readLines("include/html/students.html"))
return(HTML(x))
})
output$projects_help <- renderUI({
x <- paste0(readLines("include/html/projects.html"))
return(HTML(x))
})
output$options_help <- renderUI({
x <- paste0(readLines("include/html/options.html"))
return(HTML(x))
})
output$lect_check <- renderText({
req(input$lect_file)
tryCatch(
{
lect_list <- studentAllocation::read_lecturer_file(input$lect_file$datapath)
},
error = function(e) {
vals$algo_ready = FALSE
vals$log = NULL
vals$lect_list = NULL
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
vals$lect_list = lect_list
if( is.list(vals$lect_list) &
is.list(vals$proj_list) &
is.list(vals$stud_list)
) vals$algo_ready = TRUE
return(
paste( length(lect_list), "lecturer preferences loaded.")
)
})
output$proj_check <- renderText({
req(input$proj_file)
tryCatch(
{
proj_list <- studentAllocation::read_project_file(input$proj_file$datapath)
},
error = function(e) {
vals$algo_ready = FALSE
vals$log = NULL
vals$proj_list = NULL
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
vals$proj_list = proj_list
if( is.list(vals$lect_list) &
is.list(vals$proj_list) &
is.list(vals$stud_list)
) vals$algo_ready = TRUE
return(
paste( length(proj_list), "project definitions loaded.")
)
})
output$stud_check <- renderText({
req(input$stud_file)
tryCatch(
{
stud_list <- studentAllocation::read_student_file(input$stud_file$datapath)
},
error = function(e) {
vals$algo_ready = FALSE
vals$log = NULL
vals$stud_list = NULL
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
vals$stud_list = stud_list
vals$total_students = length(stud_list)
if( is.list(vals$lect_list) &
is.list(vals$proj_list) &
is.list(vals$stud_list)
) vals$algo_ready = TRUE
return(
paste( length(stud_list), "student preferences loaded.")
)
})
output$algo_output <- renderUI({
validate(need(vals$algo_ready, "Upload the required files under the tabs to the left."))
val_input = try(
studentAllocation::check_input_lists(vals$student_list, vals$lecturer_list, vals$project_list ),
silent = TRUE
)
validate(need(!inherits(val_input, "try-error"), val_input))
studentAllocation::pkg_options(print_log = TRUE)
shinyjs::hide("download_all_div")
# Add dependency on re-run button
input$rerun_allocation
ctx = V8::v8()
start_time = Sys.time()
tryCatch(
{
algo_output <- studentAllocation::spa_student(
vals$stud_list,
vals$lect_list,
vals$proj_list,
randomize = isolate(input$opt_randomize),
distribute_unallocated = isolate(input$opt_distribute),
time_limit = isolate(input$opt_max_time),
iteration_limit = ifelse(isolate(input$opt_max_iters) < 1,
Inf,
isolate(input$opt_max_iters)),
ctx = ctx
)
},
error = function(e) {
x = try(ctx$get("s.log"), silent = TRUE)
if(!inherits(x, "try-error")){
vals$log = x$message
}
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
end_time = Sys.time()
vals$algo_done = TRUE
vals$total_effective_cap = sum(studentAllocation::effective_capacity(vals$lect_list, vals$proj_list))
vals$output_file = create_output_file(
allocation_output = algo_output,
lect_file = input$lect_file,
proj_file = input$proj_file,
stud_file = input$stud_file,
stud_list = vals$stud_list,
delim = input$neat_delim
)
shinyjs::show("download_all_div")
vals$log = algo_output$log$message
summary_string = paste0("<p> Performed ", algo_output$iterations,
" iterations in ", round(as.numeric(end_time - start_time), 3), " seconds. ",
" There are ", length(algo_output$unallocated), " unallocated students. ")
if(input$opt_distribute){
summary_string = paste0(
summary_string,
length(algo_output$unallocated_after_SPA),
" students (",
round(100 * (length(algo_output$unallocated_after_SPA) - length(algo_output$unallocated)) / length(vals$stud_list))
,"%) ",
" were assigned random projects."
)
}
if(vals$total_effective_cap < vals$total_students){
summary_string = paste0(
summary_string,
"<p> 🔴 There were ", vals$total_students,
" total students but the total effective capacity of lecturers ",
"(taking into account capacity of projects as well) was only ",
vals$total_effective_cap, " spaces. "
)
}
return(HTML(summary_string))
})
output$log_text <- renderText({
req(vals$log)
l = length(vals$log)
digits = ceiling(log10(l))
fmt = paste0("%0",digits,"d")
lineno = sprintf(fmt, 1:l)
paste(paste(lineno, " ", vals$log), collapse = "\n")
})
}
# Create Shiny app ----
shinyApp(ui, server)
|
724e2f6cf3059c179081c216f76c70b768016624
|
ea27f667dac71c3ce659e3ec7531a190e65e2d6b
|
/scripts/b2_gez_tcc_2010_zonal.R
|
8214ec6192ad12464db86fc9d8ae86f02e7cf0bd
|
[] |
no_license
|
lecrabe/tcc_gez_2010
|
58ec36a0a91be08d7a14250fa39abcd3d387e482
|
e88638de99e5d49ac7eaf125824cec43e52c27e6
|
refs/heads/master
| 2022-12-02T07:54:59.263716
| 2020-08-20T07:48:46
| 2020-08-20T07:48:46
| 288,852,391
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,688
|
r
|
b2_gez_tcc_2010_zonal.R
|
#################### SKIP IF OUTPUTS EXISTS ALREADY
if(!file.exists(tcc_stats_file)){
#############################################################
### RASTERIZE COUNTRY BOUNDARIES ON TCC2010 PRODUCT
#############################################################
system(sprintf("oft-rasterize_attr.py -v %s -i %s -o %s -a %s",
aoi_shp,
tcc_country_file,
aoi_tif,
aoi_field
))
#############################################################
#################### COMBINATION BOUNDARIES AND TCC2010
#############################################################
system(sprintf("gdal_calc.py -A %s -B %s --co COMPRESS=LZW --overwrite --outfile=%s --calc=\"%s\"",
tcc_country_file,
aoi_tif,
tcc_clip_file,
paste0("(B==0)*123+(B>0)*A")
))
#############################################################
################### REPROJECT IN EA PROJECTION
#############################################################
system(sprintf("gdalwarp -t_srs \"%s\" -overwrite -ot Byte -dstnodata none -co COMPRESS=LZW %s %s",
proj_ea ,
tcc_clip_file,
tcc_proj_file
))
#plot(raster(tcc_country_clip_file))
#############################################################
#################### TAKE TCC2010 AS ALIGNMENT GRID
#############################################################
mask <- tcc_proj_file
proj <- proj4string(raster(mask))
extent <- extent(raster(mask))
res <- res(raster(mask))[1]
#############################################################
#################### ALIGN GEZ WITH TCC2010
#############################################################
input <- all_gez
ouput <- country_gez
system(sprintf("gdalwarp -co COMPRESS=LZW -t_srs \"%s\" -te %s %s %s %s -tr %s %s %s %s -overwrite",
proj4string(raster(mask)),
extent(raster(mask))@xmin,
extent(raster(mask))@ymin,
extent(raster(mask))@xmax,
extent(raster(mask))@ymax,
res(raster(mask))[1],
res(raster(mask))[2],
input,
ouput
))
#############################################################
### COMPUTE ZONAL STATISTICS
#############################################################
system(sprintf("oft-zonal_large_list.py -um %s -i %s -o %s -a %s",
country_gez,
tcc_proj_file,
tcc_stats_file,
aoi_field
))
}
|
f2285eab775d8233cce5fd0065aa318fa460a4df
|
1546dc0f386964ec29703e1441595452bbb11385
|
/I-dataset/potential_cal/CA_atoms.r
|
64c4e065f50d902fb401a7fcdf7efeb91eee8c3d
|
[
"MIT"
] |
permissive
|
sagarnikam123/bioinfoProject
|
b86eaf82a8744d4d48772cbd1c4eb25818351fb8
|
3164e82704a28248fd796026bc37f1c681c3cddb
|
refs/heads/master
| 2022-02-06T11:42:58.027408
| 2022-01-26T07:43:18
| 2022-01-26T07:43:18
| 14,169,704
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,238
|
r
|
CA_atoms.r
|
#for identifying CA atoms only(corrupted ids)
rm(list=ls()) #removing previous objects
library(bio3d)
corrupted_id=NULL #takes CA only atoms id
bengali_joker<-NULL #takes chain breaked id
gangster<-"C:\\Users\\exam\\Desktop\\pdb_chain_veer\\"
farebi<-list.files(gangster)
#for-->1 for whole file reading
for(daman in farebi ){
pdb_id<-paste(substr(daman,1,4),".pdb",sep="")
chainiya<-paste(gangster,daman,sep="")
pdb<-read.pdb(chainiya,maxlines=1000000,verbose=FALSE)
seq<-seq.pdb(pdb)
ttr<-torsion.pdb(pdb)
phi<-ttr$phi
psi<-ttr$psi
#checking for CA
#if ---> 1
if(length(seq)!=length(phi)){corrupted_id<-append(corrupted_id,pdb_id);
print(paste("------------------------------------Corruption--",pdb_file_name,spe=""))
}else{
} #else---> 1
}#for--> 1 for whole file reading
writing corrupted ids
write.table(corrupted_id,file="C:\\Users\\exam\\Desktop\\Halwa\\BADMASHI\\corrupted_id.txt",col.names=F,row.names=F)
print(corrupted_id)
#writing breaked chain ids
#write.table(bengali_joker,file="C:\\Users\\exam\\Desktop\\Halwa\\BADMASHI\\chain_breaks_shripati.txt",col.names=F,row.names=F)
#print(bengali_joker)
rm(corrupted_id)
rm(bengali_joker)
|
4ca0d67517c98e20bca1f0d685692ca8af1197f8
|
9252afa6febef3b46823e0af354f6ecad70a7c7b
|
/best.R
|
6a8016c41b0a0a51d3c1047766e186ae5770f4e4
|
[] |
no_license
|
Ujwala89/myRSourceCode
|
06563c1d18dd10c26292141aac5c654aa16635a4
|
066ffd639d570fbf5ed68cfbcee285815787c77b
|
refs/heads/master
| 2020-09-14T11:38:47.623036
| 2016-09-12T04:18:24
| 2016-09-12T04:18:24
| 67,973,176
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,258
|
r
|
best.R
|
best <- function(state, outcome) {
## Read outcome data
## Check that state and outcome are valid
## Return hospital name in that state with lowest 30-day death ## rate
#validate state
if (state %in% state.abb) {}
else{
stop("invalid state")
}
#validate outcome
outcomes <- c("heart attack"=11, "heart failure"=17, "pneumonia"=23)
if (outcome %in% names(outcomes)) {
outcome_indx <- outcomes[outcome]
} else {
stop("invalid outcome")
}
#construct vector for selecting columns & read the csv file & eliminate NAs
select_columns <- c(2,7,outcome_indx)
df <- read.csv("outcome-of-care-measures.csv", colClasses = "character")[,select_columns]
names(df) <- c("Hospital", "State", "outcome")
df[, 3] <- as.numeric(df[, 3])
df <- df[complete.cases(df[,c(3)]),]
#extract data for a given state
df_state <- df[df$State == state,]
#extract rows where the 30day death rate equals minimum for that state
r1<- df_state[which(df_state$outcome == min(df_state$outcome, na.rm = TRUE)), ]
#sort the output data by Hospital name
r2<- r1[order(r1$Hospital),]
#return the first row , this represents theminimum death rate and in order of hospital name
r2[1,1]
}
|
fb9f78ef3db03592fde398f3fee81e0eb3410665
|
3e674458be7851429abf1c92fc2215dab3622104
|
/ui.R
|
63523966ea3e665c484e902afa887da60c2caec9
|
[] |
no_license
|
clavell/ageguess
|
30bf2f5a3db2567b6f2a44455a6cc2804eea978c
|
5e427a676cf542b1bf6d9b4d30e8d20f93967300
|
refs/heads/master
| 2020-09-13T19:20:56.755847
| 2017-07-18T06:17:23
| 2017-07-18T06:17:23
| 94,464,185
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 878
|
r
|
ui.R
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyUI(fluidPage(
# Application title
titlePanel("Accurate age guesser"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
h5("This application will guess your age based on your date of birth!"),
h5("Please enter your date of birth and push submit!"),
h5("If you would like us to guess your age as if it were a
different point in time, change today's date!"),
dateInput("today", label = "Today's date"),
dateInput("dob",label="Date of birth"),
actionButton("goButton","Go")
),
# Show a plot of the generated distribution
mainPanel(
h4(textOutput("Age"))
)
)
))
|
44386f6614ed8f924eaba219e713f865eafdc459
|
4951e7c534f334c22d498bbc7035c5e93c5b928d
|
/developers/jones_prec.R
|
746ee83a18b1fcc40bea4be3a25d49af3680ad4b
|
[] |
no_license
|
Derek-Jones/ESEUR-code-data
|
140f9cf41b2bcc512bbb2e04bcd81b5f82eef3e1
|
2f42f3fb6e46d273a3803db21e7e70eed2c8c09c
|
refs/heads/master
| 2023-04-04T21:32:13.160607
| 2023-03-20T19:19:51
| 2023-03-20T19:19:51
| 49,327,508
| 420
| 50
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,124
|
r
|
jones_prec.R
|
#
# jones_prec.R, 2 Apr 20
# Data from:
# Developer Beliefs about Binary Operator Precedence
# Derek M. Jones
#
# Example from:
# Evidence-based Software Engineering: based on the publicly available data
# Derek M. Jones
#
# TAG experiment_human developer_belief operator_precedence source-code
source("ESEUR_config.r")
library("BradleyTerry2")
pal_col=rainbow(2)
# subj_num,first_op,second_op,prec_order,is_correct,years_exp
# prec_order: first_high=1, equal_prec=2, second_high=3
bin_prec=read.csv(paste0(ESEUR_dir, "developers/jones_bin_prec.csv.xz"), as.is=TRUE)
# Remove equal precedence
nodraws=subset(bin_prec, prec_order != 2)
# Map actual relative precedence and correctness of question back to
# answer given.
first_wl=matrix(data=c(0, 1,
0, 0,
1, 0), nrow=2, ncol=3)
second_wl=matrix(data=c(1, 0,
0, 0,
0, 1), nrow=2, ncol=3)
nodraws$first_wl=first_wl[cbind(1+nodraws$is_correct, nodraws$prec_order)]
nodraws$second_wl=second_wl[cbind(1+nodraws$is_correct, nodraws$prec_order)]
prec_BT=BTm(cbind(first_wl, second_wl), first_op, second_op, data=nodraws)
# summary(prec_BT)
t=BTabilities(prec_BT)
bt=data.frame(ability=t[, 1], se=t[, 2])
bt=bt[order(bt$ability), ]
plot(-5, type="n",
xlim=c(-2.5, 1.5), ylim=c(1, nrow(bt)),
yaxt="n",
xlab=expression(beta), ylab="Operator\n")
axis(2, at=1:nrow(bt), label=rownames(bt))
dum=sapply(1:nrow(bt), function(X)
{
lines(c(-bt$se[X], bt$se[X])+bt$ability[X], c(X, X), col=pal_col[2])
lines(c(bt$ability[X], bt$ability[X]), c(-0.1, 0.1)+X, col=pal_col[1])
})
# plot(qvcalc(BTabilities(prec_BT)), col=point_col,
# main="",
# xlab="Operator", ylab="Relative order")
# Check for a home team effect, i.e., a preference for the
# operator appearing first/second.
# library("dply")
#
# # Sum all cases where each operator appeared first and 'won' against
# # particular second operators.
# all_first_wins=function(df)
# {
# second_looses=function(df)
# {
# return(sum(df$first_wl))
# }
#
# return(ddply(df, .(second_op), second_looses))
# }
#
#
# # Sum all cases where each operator appeared second and 'won' against
# # particular first operators.
# all_second_wins=function(df)
# {
# first_looses=function(df)
# {
# return(sum(df$second_wl))
# }
#
# return(ddply(df, .(first_op), first_looses))
# }
#
#
#
# first_wins=ddply(nodraws, .(first_op), all_first_wins)
# first_wins$first_wl=first_wins$V1
# first_wins$V1=NULL
#
# second_wins=ddply(nodraws, .(second_op), all_second_wins)
# second_wins$second_wl=second_wins$V1
# second_wins$V1=NULL
#
# # Combine first and second 'wins'
# first_second=merge(first_wins, second_wins, all=TRUE)
#
# prec_BT=BTm(cbind(first_wl, second_wl), first_op, second_op,
# data=first_second, id="op")
#
# summary(prec_BT)
#
# # Update model with is first, possible (dis)advantage, information
# first_second$first_op=data.frame(op = first_second$first_op, is_first = 1)
# first_second$second_op=data.frame(op = first_second$second_op, is_first = 0)
#
# ord_prec_BT=update(prec_BT, formula= ~ op+is_first)
# summary(ord_prec_BT)
#
|
8131359f0246e545161f78e665cd70da14609c8f
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/genio/man/read_plink.Rd
|
235e5b7eddff8e7471353811df9277712f8d1e7f
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,832
|
rd
|
read_plink.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_plink.R
\name{read_plink}
\alias{read_plink}
\title{Read genotype and sample data in a plink BED/BIM/FAM file set.}
\usage{
read_plink(file, verbose = TRUE)
}
\arguments{
\item{file}{Input file path, without extensions (each of .bed, .bim, .fam extensions will be added automatically as needed).
Alternatively, input file path may have .bed extension (but not .bim, .fam, or other extensions).}
\item{verbose}{If TRUE (default) function reports the paths of the files being read (after autocompleting the extensions).}
}
\value{
A named list with items in this order: X (genotype matrix), bim (tibble), fam (tibble).
X has row and column names corresponding to the \code{id} values of the bim and fam tibbles.
}
\description{
This function reads a genotype matrix (X) and its associated locus (bim) and individual (fam) data tables in the three plink files in BED, BIM, and FAM formats, respectively.
All inputs must exist or an error is thrown.
This function is a wrapper around the more basic functions
\code{\link{read_bed}},
\code{\link{read_bim}},
\code{\link{read_fam}}.
Below suppose there are \eqn{m} loci and \eqn{n} individuals.
}
\examples{
# first get path to BED file
file <- system.file("extdata", 'sample.bed', package = "genio", mustWork = TRUE)
# read genotypes and annotation tables
plink_data <- read_plink(file)
# genotypes
plink_data$X
# locus annotations
plink_data$bim
# individual annotations
plink_data$fam
# the same works without .bed extension
file <- sub('\\\\.bed$', '', file) # remove extension
# it works!
plink_data <- read_plink(file)
}
\seealso{
\code{\link{read_bed}},
\code{\link{read_bim}},
\code{\link{read_fam}}.
Plink BED/BIM/FAM format reference:
\url{https://www.cog-genomics.org/plink/1.9/formats}
}
|
9956205b27751f6e631d0b3a3bec2de80c02ed75
|
b200d0f16ff7e6bbe72600f8610eae97305f1571
|
/R Analytics.R
|
e7fdd1544a242d6f2b021e64951c483de6d704e4
|
[] |
no_license
|
datacodebr/Ciencia_dos_Dados
|
bede5b5aec6de688c715e128df5b21692f109d40
|
249b836f0ca8f9584a9f8421f40a0b95ea3d49a4
|
refs/heads/master
| 2020-11-28T12:05:10.917217
| 2019-12-23T19:13:24
| 2019-12-23T19:13:24
| 229,808,474
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,043
|
r
|
R Analytics.R
|
####################################################################################
#
# Primeiros Passos na Linguagem R
#
#
####################################################################################
#Avisos Paroquiais
### Caso tenha problemas com a acentuação, consulte este link:
### https://support.rstudio.com/hc/en-us/articles/200532197-Character-Encoding
### Configurando o diretório de trabalho
### Coloque entre aspas o diretório de trabalho que você está usando no seu computador
### Não use diretórios com espaço no nome
setwd("C:/R/RAnalytics")
getwd()
### Nome dos Contributors
contributors()
##### Licença
license()
### Informações sobre a sessão
sessionInfo()
### Imprimir na tela
print('Cientista de Dados - O profissional mais sexy do séc. XXI')
### Criar gráficos
plot(1:25)
### Instalar pacotes
install.packages('randomForest')
install.packages('ggplot2')
install.packages("dplyr")
install.packages("devtools")
### Carregar o pacote
library(ggplot2)
### Descarregar o pacote
detach(package:ggplot2)
### Se souber o nome da função
help(mean)
?mean
### Para buscar mais opções sobre uma função, use o pacote SOS
install.packages("sos")
library(sos)
findFn("fread")
### Se não souber o nome da função
help.search('randomForest')
help.search('matplot')
??matplot
RSiteSearch('matplot')
example('matplot')
### Sair
q()
##########################################################################
### Operadores
##########################################################################
### Operadores Básicos, Relacionais e Lógicos em R
### Obs: Caso tenha problemas com a acentuação, consulte este link:
### https://support.rstudio.com/hc/en-us/articles/200532197-Character-Encoding
### Configurando o diretório de trabalho
### Coloque entre aspas o diretório de trabalho que você está usando no seu computador
### Não use diretórios com espaço no nome
setwd("C:/Users/user/Videos/Ciência dos Dados/Cursos/R Analytics/Scripts")
getwd()
### Operadores Básicos
### Soma
7 + 7
### Subtração
7 - 4
### Multiplicação
5 * 5
### Divisão
6 / 6
### Potência
3^2
3**2
### Módulo
16 %% 3
### Operadores Relacionais
### Atribuindo variáveis
x = 7
x <- 7
y = 5
### Operadores
x > 8
x < 8
x <= 8
x >= 8
x == 8
x != 8 # sinal de diferença
### Operadores lógicos
### And
(x==8) & (x==6)
(x==7) & (x>=5)
(x==8) & (x==7)
### Or
(x==8) | (x>5)
(x==8) | (x>=5)
### Not
x > 8
print(!x > 8)
##########################################################################
### VARIAVEIS
##########################################################################
### Variáveis em R
### Obs: Caso tenha problemas com a acentuação, consulte este link:
### https://support.rstudio.com/hc/en-us/articles/200532197-Character-Encoding
### Configurando o diretório de trabalho
### Coloque entre aspas o diretório de trabalho que você está usando no seu computador
### Não use diretórios com espaço no nome
setwd("C:/Users/user/Videos/Ciência dos Dados/Cursos/R Analytics/Scripts")
getwd()
### Criando Variáveis
var1 = 100
var1
mode(var1)
help("mode")
sqrt(var1)
### Podemos atribuir o valor de uma variável a outra variável
var2 = var1
var2
mode(var2)
typeof(var2)
help("typeof")
### Uma variável pode ser uma lista de elementos
var3 = c("primeiro", "segundo", "terceiro")
var3
mode(var3)
### Uma variável pode ser uma função
var4 = function(x) {x+3}
var4
mode(var4)
### Podemos também mudar o modo do dado.
var5 = as.character(var1)
var5
mode(var5)
### Atribuindo valores a objetos
x <- c(1,2,3)
x
x1 = c(1,2,3)
x1
c(1,2,3) -> y
y
assign("x", c(6.3,4,-2))
x
### Verificando o valor em uma posição específica
x[1]
### Verificar objetos
ls()
objects()
### Remover objetos
rm(x)
x
##########################################################################
### TIPOS DE DADOS
##########################################################################
### Tipos Básicos de Dados em R
### Obs: Caso tenha problemas com a acentuação, consulte este link:
### https://support.rstudio.com/hc/en-us/articles/200532197-Character-Encoding
### Configurando o diretório de trabalho
### Coloque entre aspas o diretório de trabalho que você está usando no seu computador
### Não use diretórios com espaço no nome
setwd("C:/Users/user/Videos/Ciência dos Dados/Cursos/R Analytics/Scripts")
getwd()
### Numeric - Todos os números criados em R são do modo numeric
### São armazenados como números decimais (double)
num1 <- 7
num1
class(num1)
mode(num1)
typeof(num1)
num2 = 16.82
num2
mode(num2)
typeof(num2)
### Integer
### Convertemos tipos numeric para integer
is.integer(num2)
y = as.integer(num2)
y
class(y)
mode(y)
typeof(y)
as.integer('3.17')
as.integer("Joe")
as.integer('Joe')
as.integer(TRUE)
as.integer(FALSE)
as.integer('TRUE')
### Character
char1 = 'A'
char1
mode(char1)
typeof(char1)
char2 = "cientista"
char2
mode(char2)
typeof(char2)
char3 = c("Ciência", "dos", "Dados")
char3
mode(char3)
typeof(char3)
### Complex
compl = 2.5 + 4i
compl
mode(compl)
typeof(compl)
sqrt(-1)
sqrt(-1+0i)
sqrt(as.complex(-1))
### Logic
x = 1; y = 2
z = x > y
z
class(z)
u = TRUE; v = FALSE
class(u)
u & v
u | v
!u
### Operações com 0
5/0
0/5
### Erro
'Joe'/5
#########################################################################################
#########################################################################################
### Tipos Avançados de Dados em R
### Obs: Caso tenha problemas com a acentuação, consulte este link:
### https://support.rstudio.com/hc/en-us/articles/200532197-Character-Encoding
### Configurando o diretório de trabalho
### Coloque entre aspas o diretório de trabalho que você está usando no seu computador
### Não use diretórios com espaço no nome
setwd("C:/Users/user/Videos/Ciência dos Dados/Cursos/R Analytics/Scripts")
getwd()
### Vetor: possui 1 dimensão e 1 tipo de dado
vetor1 <- c(1:20)
vetor1
length(vetor1)
mode(vetor1)
class(vetor1)
typeof(vetor1)
### Matriz: possui 2 dimensões e 1 tipo de dado
matriz1 <- matrix(1:20, nrow = 2)
matriz1
length(matriz1)
mode(matriz1)
class(matriz1)
typeof(matriz1)
### Array: possui 2 ou mais dimensões e 1 tipo de dado
array1 <- array(1:5, dim = c(3,3,3))
array1
length(array1)
mode(array1)
class(array1)
typeof(array1)
### Data Frames: dados de diferentes tipos
### Maneira mais fácil de explicar data frames: é uma matriz com diferentes tipos de dados
View(iris)
length(iris)
mode(iris)
class(iris)
typeof(iris)
### Listas: coleção de diferentes objetos
### Diferentes tipos de dados são possíveis e comuns
lista1 <- list(a = matriz1, b = vetor1)
lista1
length(lista1)
mode(lista1)
class(lista1)
typeof(lista1)
### Funções também são vistas como objetos em R
func1 <- function(x) {
var1 <- x * x
return(var1)
}
func1(5)
class(func1)
### Removendo objetos
objects()
rm( func1)
objects()
##########################################################################
### VETORES
##########################################################################
### Vetores, Operações com Vetores e Vetores Nomeados
### Obs: Caso tenha problemas com a acentuação, consulte este link:
### https://support.rstudio.com/hc/en-us/articles/200532197-Character-Encoding
### Configurando o diretório de trabalho
### Coloque entre aspas o diretório de trabalho que você está usando no seu computador
### Não use diretórios com espaço no nome
setwd("C:/Users/user/Videos/Ciência dos Dados/Cursos/R Analytics/Scripts")
getwd()
### Vetor de strings
vetor_caracter = c("Data", "Science", "é tudo")
vetor_caracter
### Vetor de floats
vetor_numerico = c(1.90, 45.3, 300.5)
vetor_numerico
### Vetor de valores complexos
vetor_complexo = c(5.2+3i, 3.8+4i)
vetor_complexo
### Vetor de valores lógicos
vetor_logico = c(TRUE, FALSE, TRUE, FALSE, FALSE)
vetor_logico
### Vetor de números inteiros
vetor_integer = c(2, 4, 6)
vetor_integer
### Utilizando seq()
vetor1 = seq(1:100)
vetor1
is.vector(vetor1)
### Utilizando rep()
vetor2 = rep(1:5)
vetor2
is.vector(vetor2)
### Indexação de vetores
a <- c(1,2,3,4,5)
a
a[1]
a[6]
b <- c("Data", "Science", "é tudo!")
b
b[1]
b[2]
b[3]
b[4]
### Combinando vetores
v1 = c(2, 3, 5)
v2 = c("aa", "bb", "cc", "dd", "ee")
c(v1, v2)
### Operações com Vetores
x = c(1, 3, 5, 7)
y = c(2, 4, 6, 8)
x * 5
x + y
x - y
x * y
x / y
### Somando vetores com números diferentes de elementos
alfa = c(10, 20, 30)
beta = c(1, 2, 3, 4, 5, 6, 7, 8, 9)
alfa + beta
### Vetor Nomeado
v = c("Nelson", "Rubens")
v
names(v) = c("Nome", "Sobrenome")
v
v["Nome"]
v["Sobrenome"]
##########################################################################
### MATRIZES
##########################################################################
### Matrizes, Operações com Matrizes e Matrizes Nomeados
### Obs: Caso tenha problemas com a acentuação, consulte este link:
### https://support.rstudio.com/hc/en-us/articles/200532197-Character-Encoding
### Configurando o diretório de trabalho
### Coloque entre aspas o diretório de trabalho que você está usando no seu computador
### Não use diretórios com espaço no nome
setwd("C:/Users/user/Videos/Ciência dos Dados/Cursos/R Analytics/Scripts")
getwd()
### Criando Matrizes
### Número de Linhas
matrix (c(1,2,3,4,5,6), nr = 2)
matrix (c(1,2,3,4,5,6), nr = 3)
matrix (c(1,2,3,4,5,6), nr = 6)
### Número de Colunas
matrix (c( 1,2,3,4,5,6), nc = 2)
### Help
?matrix
### Matrizes precisam ter um número de elementos que seja múltiplo do número de linhas
matrix (c(1,2,3,4,5), nc = 2)
### Criando matrizes a partir de vetores e preenchendo a partir das linhas
meus_dados = c(1:10)
meus_dados
matrix(data = meus_dados, nrow = 5, ncol = 2, byrow = T)
matrix(data = meus_dados, nrow = 5, ncol = 2)
### Fatiando a Matriz
mat <- matrix(c(2,3,4,5), nr = 2)
mat
mat[1,2]
mat[2,2]
mat[1,3]
mat[,2]
### Criando uma matriz diagonal
matriz = 1:3
diag(matriz)
### Extraindo vetor de uma matriz diagonal
vetor = diag(matriz)
diag(vetor)
### Transposta da matriz
W <- matrix (c(2,4,8,12 ), nr = 2, ncol = 2)
W
t(W)
U <- t(W)
U
### Obtendo uma matriz inversa
W
solve(W)
### Multiplicação de Matrizes
mat1 <- matrix(c(2,3,4,5), nr = 2)
mat1
mat2 <- matrix(c(6,7,8,9), nr = 2)
mat2
mat1 * mat2
mat1 / mat2
mat1 + mat2
mat1 - mat2
### Multiplicando Matriz com Vetor
x = c(1:4)
x
y <- matrix(c(2,3,4,5), nr = 2)
y
x * y
### Nomeando a Matriz
mat3 <- matrix(c('Terra', 'Marte', 'Saturno', 'Netuno'), nr = 2)
mat3
dimnames(mat3) = (list( c("Linha1", "Linha2"), c("Coluna1", "Coluna2")))
mat3
### Identificando linhas e colunas no momento de criação da Matriz
matrix (c(1,2,3,4), nr = 2, nc = 2, dimnames = list(c("Linha 1", "Linha 2" ), c( "Coluna 1", " Coluna 2") ))
### Combinando Matrizes
mat4 <- matrix(c(2,3,4,5), nr = 2)
mat4
mat5 <- matrix(c(6,7,8,9), nr = 2)
mat5
cbind(mat4, mat5)
rbind(mat4, mat5)
### Desconstruindo a Matriz
c(mat4)
##########################################################################
### LISTAS
##########################################################################
### Listas, Operações com Listas e Listas Nomeadas
### Obs: Caso tenha problemas com a acentuação, consulte este link:
### https://support.rstudio.com/hc/en-us/articles/200532197-Character-Encoding
### Configurando o diretório de trabalho
### Coloque entre aspas o diretório de trabalho que você está usando no seu computador
### Não use diretórios com espaço no nome
setwd("C:/Users/user/Videos/Ciência dos Dados/Cursos/R Analytics/Scripts")
getwd()
### Use list() para criar listas
### Lista de strings
lista_caracter1 = list('A', 'B', 'C')
lista_caracter1
lista_caracter2 = list(c("A", "A"), 'B', 'C')
lista_caracter2
lista_caracter3 = list(matrix(c("A", "A", "A", "A"), nr = 2), 'B', 'C')
lista_caracter3
### Lista de números inteiros
lista_inteiros = list(2, 3, 4)
lista_inteiros
### Lista de floats
lista_numerico = list(1.90, 45.3, 300.5)
lista_numerico
### Lista de números complexos
lista_complexos = list(5.2+3i, 2.4+8i)
lista_complexos
### Lista de valores lógicos
lista_logicos = list(TRUE, FALSE, FALSE)
lista_logicos
### Listas Compostas
lista_composta1 = list("A", 3, TRUE)
lista_composta1
lista1 <- list(1:10, c("Zico", "Ronaldo", "Garrincha"), rnorm(10))
lista1
?rnorm
### Slicing (Fatiamento) da Lista
lista1[1]
lista1[2]
lista1[[2]][1]
lista1[[2]][1] = "Monica"
lista1
### Para nomear os elementos - Listas Nomeadas
names(lista1) <- c("inteiros", "caracteres", "numéricos")
lista1
vec_num <- 1:4
vec_char <- c("A", "B", "C", "D")
lista2 <- list(Numeros = vec_num, Letras = vec_char)
lista2
### Nomear os elementos diretamente
lista2 <- list(elemento1 = 3:5, elemento2 = c(7.2,3.5))
lista2
### Trabalhando com elementos específicos da lista
names(lista1) <- c("inteiros", "caracteres", "numéricos")
lista1
lista1$caracteres
length(lista1$inteiros)
lista1$inteiros
lista1$numéricos
### Verificar o comprimento da lista
length(lista1)
### Podemos extrair um elemento específico dentro de cada nível da lista
lista1$caracteres[2]
### Mode dos elementos
mode(lista1$numéricos)
mode(lista1$caracteres)
### Combinando 2 listas
lista3 <- c(lista1, lista2)
lista3
### Transformando um vetor em lista
v = c(1:3)
v
l = as.list(v)
l
### Unindo 2 elementos em uma lista
mat = matrix(1:4, nrow = 2)
mat
vec = c(1:9)
vec
lst = list(mat, vec)
lst
##########################################################################
### STRINGS
##########################################################################
### Operações com Strings
### Obs: Caso tenha problemas com a acentuação, consulte este link:
### https://support.rstudio.com/hc/en-us/articles/200532197-Character-Encoding
### Configurando o diretório de trabalho
### Coloque entre aspas o diretório de trabalho que você está usando no seu computador
### Não use diretórios com espaço no nome
setwd("C:/Users/user/Videos/Ciência dos Dados/Cursos/R Analytics/Scripts")
getwd()
### String
texto <- "Isso é uma string!"
texto
x = as.character(3.14)
x
class(x)
### Concatenando Strings
nome = "Nelson"; sobrenome = "Rubens"
paste(nome, sobrenome)
cat(nome, sobrenome)
### Extraindo parte da string
texto <- "Isso é uma string!"
texto
substr(texto, start=12, stop=17)
?substr
### Contando o número de caracteres
nchar(texto)
### Alterando a capitalização
tolower("Histogramas e Elementos de Dados")
toupper("Histogramas e Elementos de Dados")
### Usando stringr
library(stringr)
### Dividindo uma string em caracteres
?strsplit
strsplit("Histogramas e Elementos de Dados", NULL)
### Dividindo uma string em caracteres, após o caracter espaço
strsplit("Histogramas e Elementos de Dados", " ")
### Trabalhando com strings
string1 <- c("Esta é a primeira parte da minha string e será a primeira parte do meu vetor",
"Aqui a minha string continua, mas será transformada no segundo vetor")
string1
string2 <- c("testando outras strings - ",
"Análise de Dados em R")
string2
### Adicionando 2 strings
str_c(c(string1, string2), sep = "")
### Detectando padrões nas strings
string1 <- "17 jan 2001"
string2 <- "1 jan 2001"
padrao <- "jan 20"
grepl(pattern = padrao, x = string1)
padrao <- "jan20"
grepl(pattern = padrao, x = string2)
##########################################################################
### DATAFRAME - TABELAS - Dataset
##########################################################################
### DataFrames e Operações com DataFrame
### Obs: Caso tenha problemas com a acentuação, consulte este link:
### https://support.rstudio.com/hc/en-us/articles/200532197-Character-Encoding
### Configurando o diretório de trabalho
### Coloque entre aspas o diretório de trabalho que você está usando no seu computador
### Não use diretórios com espaço no nome
setwd("C:/Users/user/Videos/Ciência dos Dados/Cursos/R Analytics/Scripts")
getwd()
### Criando um dataframe vazio
df <- data.frame()
class(df)
df
### Criando vetores vazios
nomes <- character()
idades <- numeric()
itens <- numeric()
codigos <- integer()
df <- data.frame(c(nomes, idades, itens, codigos))
df
### Criando vetores
pais = c("Portugal", "Inglaterra", "Irlanda", "Egito", "Brasil")
nome = c("Bruno", "Tiago", "Amanda", "Bianca", "Marta")
altura = c(1.88, 1.76, 1.53, 1.69, 1.68)
codigo = c(5001, 2183, 4702, 7965, 8890)
### Criando um dataframe de diversos vetores
pesquisa = data.frame(pais, nome, altura, codigo)
pesquisa
### Adicionando um novo vetor a um dataframe existente
olhos = c("verde", "azul", "azul", "castanho", "castanho")
pesq = cbind(pesquisa, olhos)
pesq
### Informações sobre o dataframe
str(pesq)
dim(pesq)
length(pesq)
### Obtendo um vetor de um dataframe
pesq$pais
pesq$nome
### Extraindo um único valor
pesq[1,1]
pesq[3,2]
### Número de Linhas e Colunas
nrow(pesq)
ncol(pesq)
### Primeiros elementos do dataframe
head(pesq)
head(mtcars)
### Últimos elementos do dataframe
tail(pesq)
tail(mtcars)
### Data frames built-in do R
?mtcars
mtcars
View(mtcars)
### Filtro para um subset de dados que atendem a um critério
pesq[altura < 1.60,]
pesq[altura < 1.60, c('codigo', 'olhos')]
pesq
### Dataframes Nomeados
names(pesq) <- c("País", "Nome", "Altura", "Código", "Olhos")
pesq
colnames(pesq) <- c("Var 1", "Var 2", "Var 3", "Var 4", "Var 5")
rownames(pesq) <- c("Obs 1", "Obs 2", "Obs 3", "Obs 4", "Obs 5")
pesq
### Carregando um arquivo csv
?read.csv
pacientes <- data.frame(read.csv(file = 'pacientes.csv', header = TRUE, sep = ","))
### Visualizando o dataset
View(pacientes)
head(pacientes)
summary(pacientes)
### Visualizando as variáveis
pacientes$Diabete
pacientes$status
pacientes$Status
### Histograma
hist(pacientes$Idade)
### Combinando dataframes
dataset_final <- merge(pesq, pacientes)
dataset_final
##########################################################################
### # Pacotes e Instalação de Pacotes
##########################################################################
# Obs: Caso tenha problemas com a acentuação, consulte este link:
# https://support.rstudio.com/hc/en-us/articles/200532197-Character-Encoding
# Configurando o diretório de trabalho
# Coloque entre aspas o diretório de trabalho que você está usando no seu computador
# Não use diretórios com espaço no nome
getwd()
# De onde vem as funções? Pacotes (conjuntos de funções)
# Quando você inicia o RStudio, alguns pacotes são
# carregados por padrão
# Busca os pacotes carregados
search()
# Instala e carrega os pacotes
install.packages(c("ggvis", "tm", "dplyr"))
library(ggvis)
library(tm)
require(dplyr)
search()
?require
detach(package:dplyr)
# Lista o conteúdo dos pacotes
?ls
ls(pos = "package:tm")
ls(getNamespace("tm"), all.names = TRUE)
# Lista as funções de um pacote
lsf.str("package:tm")
lsf.str("package:ggplot2")
library(ggplot2)
lsf.str("package:ggplot2")
# R possui um conjunto de datasets preinstalados.
library(MASS)
data()
?lynx
head(lynx)
head(iris)
tail(lynx)
summary(lynx)
plot(lynx)
hist(lynx)
head(iris)
iris$Sepal.Length
sum(Sepal.Length)
?attach
attach(iris)
sum(Sepal.Length)
# Instala os pacotes requeridos - Macro
# Lista de pacotes usados no projeto
packages <- c("dplyr", "randomForest", "ROCR")
for (p in packages) {
if(!p %in% rownames(installed.packages())) {
install.packages(p, dependencies = c("Depends", "Suggests"))
}
}
##########################################################################
### Funções Buit In
##########################################################################
# Funções Built-in
abs(-43)
sum(c(1:5))
mean(c(1:5))
round(c(1.1:5.8))
rev(c(1:5))
seq(1:5)
sort(rev(c(1:5)))
append(c(1:5), 6)
|
425191d47d3b5719353baaa7b72c259bad948174
|
ed2892ae0541e9d56f3b234edab712a33a281fe4
|
/R/h_kiener3.R
|
7553d2cbf673546c544b7a70f6e0919d0d7ff56d
|
[] |
no_license
|
cran/FatTailsR
|
c88ccd7d54de67723afb36a652fca1767b4a4caa
|
82796785ae65af40ea504c05710f447788afc88a
|
refs/heads/master
| 2021-07-14T21:46:26.585015
| 2021-03-12T08:00:02
| 2021-03-12T08:00:02
| 21,838,335
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,881
|
r
|
h_kiener3.R
|
#' @include g_kiener2.R
#' @title Asymmetric Kiener Distribution K3
#'
#' @description
#' Density, distribution function, quantile function, random generation,
#' value-at-risk, expected shortfall (+ signed left/right tail mean)
#' and additional formulae for asymmetric Kiener distribution K3.
#'
#' @param x vector of quantiles.
#' @param q vector of quantiles.
#' @param m numeric. The median.
#' @param g numeric. The scale parameter, preferably strictly positive.
#' @param k numeric. The tail parameter, preferably strictly positive.
#' @param d numeric. The distortion parameter between left and right tails.
#' @param p vector of probabilities.
#' @param lp vector of logit of probabilities.
#' @param n number of observations. If length(n) > 1, the length is
#' taken to be the number required.
#' @param log logical. If TRUE, densities are given in log scale.
#' @param lower.tail logical. If TRUE, use p. If FALSE, use 1-p.
#' @param log.p logical. If TRUE, probabilities p are given as log(p).
#' @param signedES logical. FALSE (default) returns positive numbers for
#' left and right tails. TRUE returns negative number
#' (= \code{ltmkiener3}) for left tail and positive number
#' (= \code{rtmkiener3}) for right tail.
#'
#' @details
#' Kiener distributions use the following parameters, some of them being redundant.
#' See \code{\link{aw2k}} and \code{\link{pk2pk}} for the formulas and
#' the conversion between parameters:
#' \itemize{
#' \item{ \code{m} (mu) is the median of the distribution,. }
#' \item{ \code{g} (gamma) is the scale parameter. }
#' \item{ \code{a} (alpha) is the left tail parameter. }
#' \item{ \code{k} (kappa) is the harmonic mean of \code{a} and \code{w}
#' and describes a global tail parameter. }
#' \item{ \code{w} (omega) is the right tail parameter. }
#' \item{ \code{d} (delta) is the distortion parameter. }
#' \item{ \code{e} (epsilon) is the eccentricity parameter. }
#' }
#'
#' Kiener distributions \code{K3(m, g, k, d, ...)} are distributions
#' with asymmetrical left and right fat tails described by a global tail
#' parameter \code{k} and a distortion parameter \code{d}.
#'
#' Distributions K3 (\code{\link{kiener3}})
#' with parameters \code{k} (kappa) and \code{d} (delta) and
#' distributions K4 (\code{\link{kiener4}})
#' with parameters \code{k} (kappa) and \code{e} (epsilon))
#' have been created to disantangle the parameters
#' \code{a} (alpha) and \code{w} (omega) of distributions of
#' distribution K2 (\code{\link{kiener2}}).
#' The tiny difference between distributions K3 and K4 (\eqn{d = e/k})
#' has not yet been fully evaluated. Both should be tested at that moment.
#'
#' \code{k} is the harmonic mean of \code{a} and \code{w} and represents a
#' global tail parameter.
#'
#' \code{d} is a distortion parameter between the left tail parameter
#' \code{a} and the right tail parameter \code{w}.
#' It verifies the inequality: \eqn{-k < d < k}
#' (whereas \code{e} of distribution K4 verifies \eqn{-1 < e < 1}).
#' The conversion functions (see \code{\link{aw2k}}) are:
#'
#' \deqn{1/k = (1/a + 1/w)/2 }
#' \deqn{ d = (-1/a + 1/w)/2 }
#' \deqn{1/a = 1/k - d }
#' \deqn{1/w = 1/k + d}
#'
#' \code{d} (and \code{e}) should be of the same sign than the skewness.
#' A negative value \eqn{ d < 0 } implies \eqn{ a < w } and indicates a left
#' tail heavier than the right tail. A positive value \eqn{ d > 0 } implies
#' \eqn{ a > w } and a right tail heavier than the left tail.
#'
#' \code{m} is the median of the distribution. \code{g} is the scale parameter
#' and the inverse of the density at the median: \eqn{ g = 1 / 8 / f(m) }.
#' As a first estimate, it is approximatively one fourth of the standard
#' deviation \eqn{ g \approx \sigma / 4 } but is independant from it.
#'
#' The d, p functions have no explicit forms. They are provided here for
#' convenience. They are estimated from a reverse optimization on the quantile
#' function and can be (very) slow, depending the number of points to estimate.
#' We recommand to use the quantile function as far as possible.
#' WARNING: Results may become inconsistent when \code{k} is
#' smaller than 1 or for very large absolute values of \code{d}.
#' Hopefully, this case seldom happens in finance.
#'
#' \code{qkiener3} function is defined for p in (0, 1) by:
#' \deqn{ qkiener3(p, m, g, k, d) =
#' m + 2 * g * k * sinh(logit(p) / k) * exp(d * logit(p)) }
#'
#' \code{rkiener3} generates \code{n} random quantiles.
#'
#' In addition to the classical d, p, q, r functions, the prefixes
#' dp, dq, l, dl, ql are also provided.
#'
#' \code{dpkiener3} is the density function calculated from the probability p.
#' The formula is adapted from distribution K2. It is defined for p in (0, 1) by:
#' \deqn{ dpkiener3(p, m, g, k, d) =
#' p * (1 - p) / k / g / ( exp(-logit(p)/a)/a + exp(logit(p)/w)/w }
#' with \code{a} and \code{w} defined from \code{k} and \code{d}
#' with the formula presented above.
#'
#' \code{dqkiener3} is the derivate of the quantile function calculated from
#' the probability p. The formula is adapted from distribution K2.
#' It is defined for p in (0, 1) by:
#' \deqn{ dqkiener3(p, m, g, k, d) =
#' k * g / p / (1 - p) * ( exp(-logit(p)/a)/a + exp(logit(p)/w)/w ) }
#' with \code{a} and \code{w} defined above.
#'
#' \code{lkiener3} function is estimated from a reverse optimization and can
#' be (very) slow depending the number of points to estimate. Initialization
#' is done with a symmetric distribution \code{\link{lkiener1}}
#' of parameter \code{k} (thus \eqn{ d = 0}). Then optimization is performed
#' to take into account the true value of \code{d}.
#' The results can then be compared to the empirical probability logit(p).
#' WARNING: Results may become inconsistent when \code{k} is
#' smaller than 1 or for very large absolute values of \code{d}.
#' Hopefully, this case seldom happens in finance.
#'
#' \code{dlkiener3} is the density function calculated from the logit of the
#' probability lp = logit(p). The formula is adapted from distribution K2.
#' it is defined for lp in (-Inf, +Inf) by:
#' \deqn{ dlkiener3(lp, m, g, k, d) =
#' p * (1 - p) / k / g / ( exp(-lp/a)/a + exp(lp/w)/w ) }
#' with \code{a} and \code{w} defined above.
#'
#' \code{qlkiener3} is the quantile function calculated from the logit of the
#' probability. It is defined for lp in (-Inf, +Inf) by:
#' \deqn{ qlkiener3(lp, m, g, k, d) =
#' m + 2 * g * k * sinh(lp / k) * exp(d * lp) }
#'
#' \code{varkiener3} designates the Value a-risk and turns negative numbers
#' into positive numbers with the following rule:
#' \deqn{ varkiener3 <- if(p <= 0.5) { - qkiener3 } else { qkiener3 } }
#' Usual values in finance are \code{p = 0.01}, \code{p = 0.05}, \code{p = 0.95} and
#' \code{p = 0.99}. \code{lower.tail = FALSE} uses \code{1-p} rather than {p}.
#'
#' \code{ltmkiener3}, \code{rtmkiener3} and \code{eskiener3} are respectively the
#' left tail mean, the right tail mean and the expected shortfall of the distribution
#' (sometimes called average VaR, conditional VaR or tail VaR).
#' Left tail mean is the integrale from \code{-Inf} to \code{p} of the quantile function
#' \code{qkiener3} divided by \code{p}.
#' Right tail mean is the integrale from \code{p} to \code{+Inf} of the quantile function
#' \code{qkiener3} divided by 1-p.
#' Expected shortfall turns negative numbers into positive numbers with the following rule:
#' \deqn{ eskiener3 <- if(p <= 0.5) { - ltmkiener3 } else { rtmkiener3 } }
#' Usual values in finance are \code{p = 0.01}, \code{p = 0.025}, \code{p = 0.975} and
#' \code{p = 0.99}. \code{lower.tail = FALSE} uses \code{1-p} rather than {p}.
#'
#' \code{dtmqkiener3} is the difference between the left tail mean and the quantile
#' when (p <= 0.5) and the difference between the right tail mean and the quantile
#' when (p > 0.5). It is in quantile unit and is an indirect measure of the tail curvature.
#'
#' @references
#' P. Kiener, Explicit models for bilateral fat-tailed distributions and
#' applications in finance with the package FatTailsR, 8th R/Rmetrics Workshop
#' and Summer School, Paris, 27 June 2014. Download it from:
#' \url{https://www.inmodelia.com/exemples/2014-0627-Rmetrics-Kiener-en.pdf}
#'
#' P. Kiener, Fat tail analysis and package FatTailsR,
#' 9th R/Rmetrics Workshop and Summer School, Zurich, 27 June 2015.
#' Download it from:
#' \url{https://www.inmodelia.com/exemples/2015-0627-Rmetrics-Kiener-en.pdf}
#'
#' C. Acerbi, D. Tasche, Expected shortfall: a natural coherent alternative to
#' Value at Risk, 9 May 2001. Download it from:
#' \url{https://www.bis.org/bcbs/ca/acertasc.pdf}
#'
#' @seealso
#' Symmetric Kiener distribution K1 \code{\link{kiener1}},
#' asymmetric Kiener distributions K2, K4 and K7
#' \code{\link{kiener2}}, \code{\link{kiener4}}, \code{\link{kiener7}},
#' conversion functions \code{\link{aw2k}},
#' estimation function \code{\link{fitkienerX}},
#' regression function \code{\link{regkienerLX}}.
#'
#' @examples
#'
#' require(graphics)
#'
#' ### Example 1
#' pp <- c(ppoints(11, a = 1), NA, NaN) ; pp
#' lp <- logit(pp) ; lp
#' qkiener3( p = pp, m = 2, g = 1.5, k = aw2k(4, 6), d = aw2d(4, 6))
#' qlkiener3(lp = lp, m = 2, g = 1.5, k = aw2k(4, 6), d = aw2d(4, 6))
#' dpkiener3( p = pp, m = 2, g = 1.5, k = aw2k(4, 6), d = aw2d(4, 6))
#' dlkiener3(lp = lp, m = 2, g = 1.5, k = aw2k(4, 6), d = aw2d(4, 6))
#' dqkiener3( p = pp, m = 2, g = 1.5, k = aw2k(4, 6), d = aw2d(4, 6))
#'
#'
#' ### Example 2
#' k <- 4.8
#' d <- 0.042
#' set.seed(2014)
#' mainTC <- paste("qkiener3(p, m = 0, g = 1, k = ", k, ", d = ", d, ")")
#' mainsum <- paste("cumulated qkiener3(p, m = 0, g = 1, k = ", k, ", d = ", d, ")")
#' T <- 500
#' C <- 4
#' TC <- qkiener3(p = runif(T*C), m = 0, g = 1, k = k, d = d)
#' matTC <- matrix(TC, nrow = T, ncol = C, dimnames = list(1:T, letters[1:C]))
#' head(matTC)
#' plot.ts(matTC, main = mainTC)
#' #
#' matsum <- apply(matTC, MARGIN=2, cumsum)
#' head(matsum)
#' plot.ts(matsum, plot.type = "single", main = mainsum)
#' ### End example 2
#'
#'
#' ### Example 3 (four plots: probability, density, logit, logdensity)
#' x <- q <- seq(-15, 15, length.out=101)
#' k <- 3.2
#' d <- c(-0.1, -0.03, -0.01, 0.01, 0.03, 0.1) ; names(d) <- d
#' olty <- c(2, 1, 2, 1, 2, 1, 1)
#' olwd <- c(1, 1, 2, 2, 3, 3, 2)
#' ocol <- c(2, 2, 4, 4, 3, 3, 1)
#' lleg <- c("logit(0.999) = 6.9", "logit(0.99) = 4.6", "logit(0.95) = 2.9",
#' "logit(0.50) = 0", "logit(0.05) = -2.9", "logit(0.01) = -4.6",
#' "logit(0.001) = -6.9 ")
#' op <- par(mfrow=c(2,2), mgp=c(1.5,0.8,0), mar=c(3,3,2,1))
#'
#' plot(x, pkiener3(x, k = 3.2, d = 0), type = "l", lwd = 3, ylim = c(0, 1),
#' xaxs = "i", yaxs = "i", xlab = "", ylab = "",
#' main = "pkiener3(q, m, g, k=3.2, d=...)")
#' for (i in 1:length(d)) lines(x, pkiener3(x, k = 3.2, d = d[i]),
#' lty = olty[i], lwd = olwd[i], col = ocol[i] )
#' legend("topleft", title = expression(delta), legend = c(d, "0"),
#' cex = 0.7, inset = 0.02, lty = olty, lwd = olwd, col = ocol )
#'
#' plot(x, dkiener3(x, k = 3.2, d = 0), type = "l", lwd = 3, ylim = c(0, 0.14),
#' xaxs = "i", yaxs = "i", xlab = "", ylab = "",
#' main = "dkiener3(q, m, g, k=3.2, d=...)")
#' for (i in 1:length(d)) lines(x, dkiener3(x, k = 3.2, d = d[i]),
#' lty = olty[i], lwd = olwd[i], col = ocol[i] )
#' legend("topright", title = expression(delta), legend = c(d, "0"),
#' cex = 0.7, inset = 0.02, lty = olty, lwd = olwd, col = ocol )
#'
#' plot(x, lkiener3(x, k = 3.2, d = 0), type = "l", lwd =3, ylim = c(-7.5, 7.5),
#' yaxt="n", xaxs = "i", yaxs = "i", xlab = "", ylab = "",
#' main = "logit(pkiener3(q, m, g, k=3.2, d=...))")
#' axis(2, las=1, at=c(-6.9, -4.6, -2.9, 0, 2.9, 4.6, 6.9) )
#' for (i in 1:length(d)) lines(x, lkiener3(x, k = 3.2, d = d[i]),
#' lty = olty[i], lwd = olwd[i], col = ocol[i] )
#' legend("topleft", legend = lleg, cex = 0.7, inset = 0.02 )
#' legend("bottomright", title = expression(delta), legend = c(d, "0"),
#' cex = 0.7, inset = 0.02, lty = c(olty), lwd = c(olwd), col = c(ocol) )
#'
#' plot(x, dkiener3(x, k = 3.2, d = 0, log = TRUE), type = "l", lwd = 3,
#' ylim = c(-8, -1.5), xaxs = "i", yaxs = "i", xlab = "", ylab = "",
#' main = "log(dkiener3(q, m, g, k=2, d=...))")
#' for (i in 1:length(d)) lines(x, dkiener3(x, k = 3.2, d = d[i], log=TRUE),
#' lty = olty[i], lwd = olwd[i], col = ocol[i] )
#' legend("bottom", title = expression(delta), legend = c(d, "0"),
#' cex = 0.7, inset = 0.02, lty = olty, lwd = olwd, col = ocol )
#' ### End example 3
#'
#'
#' ### Example 4 (four plots: quantile, derivate, density and quantiles from p)
#' p <- ppoints(199, a=0)
#' d <- c(-0.1, -0.03, -0.01, 0.01, 0.03, 0.1) ; names(d) <- d
#' op <- par(mfrow=c(2,2), mgp=c(1.5,0.8,0), mar=c(3,3,2,1))
#'
#' plot(p, qlogis(p, scale = 2), type = "l", lwd = 2, xlim = c(0, 1),
#' ylim = c(-15, 15), xaxs = "i", yaxs = "i", xlab = "", ylab = "",
#' main = "qkiener3(p, m, g, k=3.2, d=...)")
#' for (i in 1:length(d)) lines(p, qkiener3(p, k = 3.2, d = d[i]),
#' lty = olty[i], lwd = olwd[i], col = ocol[i] )
#' legend("topleft", title = expression(delta), legend = c(d, "qlogis(x/2)"),
#' inset = 0.02, lty = olty, lwd = olwd, col = ocol, cex = 0.7 )
#'
#' plot(p, 2/p/(1-p), type = "l", lwd = 2, xlim = c(0, 1), ylim = c(0, 100),
#' xaxs = "i", yaxs = "i", xlab = "", ylab = "",
#' main = "dqkiener3(p, m, g, k=3.2, d=...)")
#' for (i in 1:length(d)) lines(p, dqkiener3(p, k = 3.2, d = d[i]),
#' lty = olty[i], lwd = olwd[i], col = ocol[i] )
#' legend("top", title = expression(delta), legend = c(d, "p*(1-p)/2"),
#' inset = 0.02, lty = olty, lwd = olwd, col = ocol, cex = 0.7 )
#'
#' plot(qlogis(p, scale = 2), p*(1-p)/2, type = "l", lwd = 2, xlim = c(-15, 15),
#' ylim = c(0, 0.14), xaxs = "i", yaxs = "i", xlab = "", ylab = "",
#' main = "qkiener3, dpkiener3(p, m, g, k=3.2, d=...)")
#' for (i in 1:length(d)) {
#' lines(qkiener3(p, k = 3.2, d = d[i]), dpkiener3(p, k = 3.2, d = d[i]),
#' lty = olty[i], lwd = olwd[i], col = ocol[i] ) }
#' legend("topleft", title = expression(delta), legend = c(d, "p*(1-p)/2"),
#' inset = 0.02, lty = olty, lwd = olwd, col = ocol, cex = 0.7 )
#'
#' plot(qlogis(p, scale = 2), p, type = "l", lwd = 2, xlim = c(-15, 15),
#' ylim = c(0, 1), xaxs = "i", yaxs = "i", xlab = "", ylab = "",
#' main = "inverse axis qkiener3(p, m, g, k=3.2, d=...)")
#' for (i in 1:length(d)) lines(qkiener3(p, k = 3.2, d = d[i]), p,
#' lty = olty[i], lwd = olwd[i], col = ocol[i] )
#' legend("topleft", title = expression(delta), legend = c(d, "qlogis(x/2)"),
#' inset = 0.02, lty = olty, lwd = olwd, col = ocol, cex = 0.7 )
#' ### End example 4
#'
#'
#' ### Example 5 (q and VaR, ltm, rtm, and ES)
#' pp <- c(0.001, 0.0025, 0.005, 0.01, 0.025, 0.05,
#' 0.10, 0.20, 0.35, 0.5, 0.65, 0.80, 0.90,
#' 0.95, 0.975, 0.99, 0.995, 0.9975, 0.999)
#' m <- -10 ; g <- 1 ; k <- 4 ; d <- 0.06
#' a <- dk2a(d, k) ; w <- dk2w(d, k) ; e <- dk2e(d, k)
#' round(c(m = m, g = g, a = a, k = k, w = w, d = d, e = e), 2)
#' plot(qkiener3( pp, m=m, k=k, d=d), pp, type ="b")
#' round(cbind(p = pp, "1-p" = 1-pp,
#' q = qkiener3(pp, m, g, k, d),
#' ltm = ltmkiener3(pp, m, g, k, d),
#' rtm = rtmkiener3(pp, m, g, k, d),
#' ES = eskiener3(pp, m, g, k, d),
#' VaR = varkiener3(pp, m, g, k, d)), 4)
#' round(kmean(c(m, g, k, d), model = "K3"), 4) # limit value for ltm and rtm
#' round(cbind(p = pp, "1-p" = 1-pp,
#' q = qkiener3(pp, m, g, k, d, lower.tail = FALSE),
#' ltm = ltmkiener3(pp, m, g, k, d, lower.tail = FALSE),
#' rtm = rtmkiener3(pp, m, g, k, d, lower.tail = FALSE),
#' ES = eskiener3(pp, m, g, k, d, lower.tail = FALSE),
#' VaR = varkiener3(pp, m, g, k, d, lower.tail = FALSE)), 4)
#' ### End example 5
#'
#'
#' @name kiener3
NULL
#' @export
#' @rdname kiener3
dkiener3 <- function(x, m = 0, g = 1, k = 3.2, d = 0, log = FALSE) {
lp <- lkiener3(x, m, g, k, d)
v <- dlkiener3(lp, m, g, k, d)
if(log) return(log(v)) else return(v)
}
#' @export
#' @rdname kiener3
pkiener3 <- function(q, m = 0, g = 1, k = 3.2, d = 0,
lower.tail = TRUE, log.p = FALSE) {
lp <- lkiener3(x = q, m, g, k, d)
if(lower.tail) v <- invlogit(lp) else v <- 1 - invlogit(lp)
if(log.p) return(log(v)) else return(v)
}
#' @export
#' @rdname kiener3
qkiener3 <- function(p, m = 0, g = 1, k = 3.2, d = 0,
lower.tail = TRUE, log.p = FALSE) {
if(log.p) p <- exp(p) else p <- p
if(lower.tail) p <- p else p <- 1-p
v <- m + 2 * g * k * sinh(logit(p) / k) * exp(d * logit(p))
return(v)
}
#' @export
#' @rdname kiener3
rkiener3 <- function(n, m = 0, g = 1, k = 3.2, d = 0) {
p <- runif(n)
v <- qkiener3(p, m, g, k, d)
return(v)
}
#' @export
#' @rdname kiener3
dpkiener3 <- function(p, m = 0, g = 1, k = 3.2, d = 0, log = FALSE) {
a <- kd2a(k, d)
w <- kd2w(k, d)
v <- p * (1 - p) / k / g / ( exp(-logit(p)/a)/a + exp(logit(p)/w)/w )
if(log) return(log(v)) else return(v)
}
#' @export
#' @rdname kiener3
dqkiener3 <- function(p, m = 0, g = 1, k = 3.2, d = 0, log = FALSE) {
# Compute dX/dp
a <- kd2a(k, d)
w <- kd2w(k, d)
v <- k * g / p / (1 - p) * ( exp(-logit(p)/a)/a + exp(logit(p)/w)/w )
if(log) return(log(v)) else return(v)
}
#' @export
#' @rdname kiener3
lkiener3 <- function(x, m = 0, g = 1, k = 3.2, d = 0) {
lp.ini <- lkiener1(x, m, g, k)
f <- function(lp) sum( ( x - qlkiener3(lp, m, g, k, d) )^2 )
lp.fin <- nlm(f, lp.ini)
v <- lp.fin$estimate
return(v)
}
#' @export
#' @rdname kiener3
dlkiener3 <- function(lp, m = 0, g = 1, k = 3.2, d = 0, log = FALSE) {
p <- invlogit(lp)
a <- kd2a(k, d)
w <- kd2w(k, d)
v <- p * (1 - p) / k / g / ( exp(-lp/a)/a + exp(lp/w)/w )
if(log) return(log(v)) else return(v)
} # OK
#' @export
#' @rdname kiener3
qlkiener3 <- function(lp, m = 0, g = 1, k = 3.2, d = 0, lower.tail = TRUE ) {
if(lower.tail) lp <- lp else lp <- -lp
v <- m + 2 * g * k * sinh(lp / k) * exp(d * lp)
return(v)
}
#' @export
#' @rdname kiener3
varkiener3 <- function(p, m = 0, g = 1, k = 3.2, d = 0,
lower.tail = TRUE, log.p = FALSE) {
p <- if(log.p) {exp(p)} else {p}
p <- if(lower.tail) {p} else {1-p}
va <- p
for (i in seq_along(p)) {
va[i] <- ifelse(p[i] <= 0.5,
- qkiener3(p[i], m, g, k, d),
qkiener3(p[i], m, g, k, d))
}
return(va)
}
#' @export
#' @rdname kiener3
ltmkiener3 <- function(p, m = 0, g = 1, k = 3.2, d = 0,
lower.tail = TRUE, log.p = FALSE) {
p <- if(log.p) {exp(p)} else {p}
ltm <- if (lower.tail) {
m+g*k/p*(
-pbeta(p,1+d-1/k, 1-d+1/k)*beta(1+d-1/k, 1-d+1/k)
+pbeta(p,1+d+1/k, 1-d-1/k)*beta(1+d+1/k, 1-d-1/k))
} else {
m+g*k/p*(
-pbeta(p,1-d+1/k, 1+d-1/k)*beta(1-d+1/k, 1+d-1/k)
+pbeta(p,1-d-1/k, 1+d+1/k)*beta(1-d-1/k, 1+d+1/k))
}
return(ltm)
}
#' @export
#' @rdname kiener3
rtmkiener3 <- function(p, m = 0, g = 1, k = 3.2, d = 0,
lower.tail = TRUE, log.p = FALSE) {
p <- if(log.p) {exp(p)} else {p}
rtm <- if (!lower.tail) {
m+g*k/(1-p)*(
-pbeta(1-p, 1+d-1/k, 1-d+1/k)*beta(1+d-1/k, 1-d+1/k)
+pbeta(1-p, 1+d+1/k, 1-d-1/k)*beta(1+d+1/k, 1-d-1/k))
} else {
m+g*k/(1-p)*(
-pbeta(1-p, 1-d+1/k, 1+d-1/k)*beta(1-d+1/k, 1+d-1/k)
+pbeta(1-p, 1-d-1/k, 1+d+1/k)*beta(1-d-1/k, 1+d+1/k))
}
return(rtm)
}
#' @export
#' @rdname kiener3
dtmqkiener3 <- function(p, m = 0, g = 1, k = 3.2, d = 0,
lower.tail = TRUE, log.p = FALSE) {
dtmq <- p
for (i in seq_along(p)) {
dtmq[i] <- ifelse(p[i] <= 0.5,
ltmkiener3(p[i], m, g, k, d, lower.tail, log.p)
- qkiener3(p[i], m, g, k, d, lower.tail, log.p),
rtmkiener3(p[i], m, g, k, d, lower.tail, log.p)
- qkiener3(p[i], m, g, k, d, lower.tail, log.p))
}
return(dtmq)
}
#' @export
#' @rdname kiener3
eskiener3 <- function(p, m = 0, g = 1, k = 3.2, d = 0,
lower.tail = TRUE, log.p = FALSE, signedES = FALSE) {
p <- if(log.p) {exp(p)} else {p}
p <- if(lower.tail) {p} else {1-p}
es <- p
for (i in seq_along(p)) {
if (signedES) {
es[i] <- ifelse(p[i] <= 0.5,
ltmkiener3(p[i], m, g, k, d),
rtmkiener3(p[i], m, g, k, d))
} else {
es[i] <- ifelse(p[i] <= 0.5,
abs(ltmkiener3(p[i], m, g, k, d)),
abs(rtmkiener3(p[i], m, g, k, d)))
}
}
return(es)
}
|
985abea1068c0f2f599602a309866c4d155e699d
|
bff40d50e61358a0c40ed96c76d856247221f786
|
/AnamulHaque_Assignment2.R
|
a2556a05d8642e4c7f2dcdc657f4f27f77d26e3b
|
[] |
no_license
|
anamulmb/Statistics-in-R-Shiny
|
2faa4f63ad8468de67bdff2037fd5d6813ed6691
|
55481a4ba5bffed49820f37d435be9457b5b8822
|
refs/heads/master
| 2023-03-15T00:06:32.195703
| 2021-03-24T14:25:23
| 2021-03-24T14:25:23
| 85,979,281
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 907
|
r
|
AnamulHaque_Assignment2.R
|
#Calculates 1+ 2
1 +2
#Write a string Beat LSU
print ('Beat LSU', quote = FALSE)
#Assign the value 15 to a variable named wins
#Your script should print the variable wins to the screen when run
wins <- 15
print(wins)
#Your script should print Your Name
#On a new line your script should print Your Degree Program
#Create a variable for your height in inches
#Your script should print the value for your height variable
#Create a variable and calculate the value of your height in centimeters
#Your script should print the value of your height in centimeters
My_Name <- "John Doe"
My_Degree_Program <- "Life Science"
My_Height_in_Inches <- 5*12+8 # 1 foot = 12 inches
My_Height_in_Centimeters <- My_Height_in_Inches*2.54 # 1inch = 2.54 Centimeter
print(My_Name, quote = FALSE)
print(My_Degree_Program, quote = FALSE)
print(My_Height_in_Inches)
print(My_Height_in_Centimeters)
|
f5f5bce342ea00f6d6b3db24183f5c38a1eaafd9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dsa/examples/ts2xts.Rd.R
|
c042455d3f207af84fd5a8caa900afa0d4858840
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 165
|
r
|
ts2xts.Rd.R
|
library(dsa)
### Name: ts2xts
### Title: Change ts to xts
### Aliases: ts2xts
### ** Examples
ts2xts(stats::ts(rnorm(1000, 10,1), start=c(2001,1), freq=365))
|
3a632638fbccbc4571f73e4d7644ebe3a173cbb9
|
e5ebddef173d10c4722c68f0ac090e5ecc626b8b
|
/IL2RA/bin/normalmixEM2comp.R
|
201ae21e07c935be40650e361d9f975aa8347076
|
[] |
no_license
|
pontikos/PhD_Projects
|
1179d8f84c1d7a5e3c07943e61699eb3d91316ad
|
fe5cf169d4624cb18bdd09281efcf16ca2a0e397
|
refs/heads/master
| 2021-05-30T09:43:11.106394
| 2016-01-27T15:14:37
| 2016-01-27T15:14:37
| 31,047,996
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 735
|
r
|
normalmixEM2comp.R
|
#!/usr/bin/env Rscript
suppressPackageStartupMessages(library(mixtools))
suppressPackageStartupMessages(library(cluster))
source('~nikolas/bin/FCS/fcs.R')
option_list <- list(
make_option(c("--fcsFile"), help = ""),
make_option(c("--channel"), help = ""),
make_option(c('--RData'), help='')
)
OptionParser(option_list=option_list) -> option.parser
parse_args(option.parser) -> opt
X <- read.FCS(opt$fcsFile, channel=opt$channel, TRANS=log10)[,1]
res.pam <- pam(X,2)
res <- list( mu=res.pam$medoids[,1], lambda=as.numeric(prop.table(table(res.pam$clustering))), sigsqrd=as.numeric(by(X,res.pam$clustering,var)) )
m <- mixtools::normalmixEM2comp( X, mu=res$mu, sigsqrd=res$sigsqrd, lambda=res$lambda )
write(m, file=opt$RData)
|
461e23baaf1dd90d003f62ee7d92ad44f72c807a
|
16e8b1f2886d4dad26757814ce5c65382bd1f829
|
/man/Rat.Rd
|
1b12c89d59542d5e0af654e3702c690cff70a47d
|
[] |
no_license
|
richierocks/gpk
|
f14d80e87b22e0407488c3b24956d1e35c53ae65
|
9a437680da57d8d2cc03fd5997981c0ddf6e6e77
|
refs/heads/master
| 2021-01-21T21:09:30.969109
| 2017-05-24T18:20:45
| 2017-05-24T18:20:45
| 92,310,887
| 2
| 0
| null | 2017-05-24T15:59:51
| 2017-05-24T15:59:51
| null |
UTF-8
|
R
| false
| false
| 1,060
|
rd
|
Rat.Rd
|
\name{Rat}
\alias{Rat}
\docType{data}
\title{
Study of rat burrow architecture
}
\description{
Bandicoot rats live in underground burrows dug by them. 83 burrows were excavated and measured. However, by accident, only the marginal distributions were retained while the original data on joint distribution was lost. Check whether each marginal distribution is normal. It is of interest to estimate proportion of burrows having length greater than average AND depth greater than average. Use the following formula for generating bivariate distribution from marginals.
}
\usage{data(Rat)}
\format{
A data frame with 6 observations on the following 4 variables.
\describe{
\item{\code{Tunnel_Length}}{Total length of tunnel (cm)}
\item{\code{Frequency}}{Frequency}
\item{\code{Tunnel_Depth}}{Depth of tunnel (cm)}
\item{\code{Frequency.1}}{Frequency of tunnel depth}
}
}
\details{
Use the chi-square test for checking univariate normality.
}
\source{
http://ces.iisc.ernet.in/hpg/nvjoshi/statspunedatabook/databook.html
}
\keyword{datasets}
|
667f081134e726c4e3bb225f3bc6fc709dd768d7
|
590142f535831def89b5b2d0f6ac1d47b8306850
|
/man/ParallelBlock-class.Rd
|
bc424783548a57d346ba1e394a9b4f2a155e2f67
|
[] |
no_license
|
jfontestad/makeParallel
|
2b62704c9e26477bc89d505de313ea07aaebdcca
|
6e43f34f51a23692907ec1563d3d47a8e189d7bf
|
refs/heads/master
| 2023-01-13T03:27:16.260825
| 2020-11-17T16:41:04
| 2020-11-17T16:41:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 361
|
rd
|
ParallelBlock-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClass.R
\docType{class}
\name{ParallelBlock-class}
\alias{ParallelBlock-class}
\alias{ParallelBlock}
\title{Code to run in parallel}
\description{
Code to run in parallel
}
\section{Slots}{
\describe{
\item{\code{export}}{names of objects to export from manager to workers.}
}}
|
b7d75e47c4c8160d4cd5cee0f2d02673dc2397ff
|
3b2741d6fc9a489dcd394f9f708f5554db1a1adb
|
/PA1.R
|
bac39e77c90d009018e30152414a6f604310217d
|
[] |
no_license
|
FranciscoAlonso/RepData_PeerAssessment1
|
def9d1d48b768a48af8be8bf7e6e76420202c7da
|
68af630305e73fbff2116fd396a5c9a7e83e774a
|
refs/heads/master
| 2020-12-24T10:23:08.721655
| 2015-08-16T21:01:24
| 2015-08-16T21:01:24
| 40,564,632
| 0
| 0
| null | 2015-08-11T20:51:43
| 2015-08-11T20:51:42
| null |
UTF-8
|
R
| false
| false
| 2,264
|
r
|
PA1.R
|
PA1 <- function()
{
library(lubridate)
library(dplyr)
steps <- read.csv("activity.csv")
#convert to date format
steps$date <- ymd(steps$date)
steps <- na.omit(steps)
allDates <- seq(from = min(steps$date), to = max(steps$date), by = "day")
stepsPerDate <- as.data.frame(allDates, row.names(c("Dates", "Steps")))
stepcount <- c()
stepMean <- c()
stepMedian <- c()
for(day in allDates)
{
temp <- filter(steps, date == day) %>% select(steps) %>% arrange()
temp2 <- order(temp$steps, decreasing = F)
if(length(temp$steps) > 0)
{
stepcount <- c(stepcount, sum(temp$steps, na.rm = T))
stepMean <- c(stepMean, mean(temp$steps, na.rm = T))
stepMedian <- c(stepMedian, myMedian(temp2))
}
else
{
stepcount <- c(stepcount, 0)
stepMean <- c(stepMean, 0)
stepMedian <- c(stepMedian, 0)
}
}
stepsPerDate <- mutate(stepsPerDate, Dates = allDates)
stepsPerDate <- mutate(stepsPerDate, Steps = stepcount)
stepsPerDate <- mutate(stepsPerDate, Mean = stepMean)
stepsPerDate <- mutate(stepsPerDate, Median = stepMedian)
#----
#Total, mean and median of total number ofsteps per day
#histogram of total number of steps per day
#png("figure/P1_Hist-Steps.png") #create the png graph device
hist(stepsPerDate$Steps
, col = "red"
, main = "Histogram of the total number of steps taken each day"
, xlab = "Number of steps per day")
#dev.off()
#----
#png("figure/P1_Dates-Steps.png") #create the png graph device
#plot(stepsPerDate$Dates, stepsPerDate$Steps, type = "l")
#dev.off()
#png("figure/P1_Dates-Mean.png") #create the png graph device
plot(stepsPerDate$Dates, stepsPerDate$Mean, type = "l"
, main = "Mean of total number of steps per date"
, xlab = "Dates"
, ylab = "Mean")
#dev.off()
#png("figure/P1_Dates-Median.png") #create the png graph device
plot(stepsPerDate$Dates, stepsPerDate$Median, type = "l"
, main = "Median of total number of steps per date"
, xlab = "Dates"
, ylab = "Median")
#dev.off()
}
myMedian <- function(x)
{
if(length(x)%%2 == 0)
{
(x[length(x)/2]+x[length(x)/2+1])/2
}
else
{
x[as.integer(length(x)/2)+1]
}
}
|
070744fb954be1040468553efd672b6861f08468
|
9d126e2d47795f1d45cf3bd400a5547b9e6e6b77
|
/eQTL_GWAS_riskSNPs_n596/create_eqtl_table.R
|
d16bbbf9c55240cc08c7f40911d80e1265a9f145
|
[
"MIT"
] |
permissive
|
LieberInstitute/dg_hippo_paper
|
f3bf015f14da7a43ddcceb6992033600daa237d7
|
b2694e0083e96562cfe681d96459a3c670e6ccd8
|
refs/heads/master
| 2021-07-09T05:35:54.995464
| 2020-09-14T17:03:53
| 2020-09-14T17:03:53
| 157,881,996
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,736
|
r
|
create_eqtl_table.R
|
##
library(jaffelab)
library(IRanges)
library(SummarizedExperiment)
#####################
##### Subset of 881 SNPs from PGC
#####################
################
## load eQTLs
load("eqtl_tables/mergedEqtl_output_dg_raggr_4features.rda", verbose=TRUE)
dg = allEqtl[allEqtl$FDR < 0.05,]
load("eqtl_tables/mergedEqtl_output_hippo_raggr_4features.rda", verbose=TRUE)
hippo = allEqtl[allEqtl$FDR < 0.05,]
# load("eqtl_tables/mergedEqtl_output_interaction_4features.rda", verbose=TRUE)
# inter = allEqtl[allEqtl$FDR < 0.05,]
################
## metrics
## total features
nrow(dg) ## 68176
nrow(hippo) ## 54737
# nrow(inter) ##
## per feature
table(dg$Type)
# Exon Gene Jxn Tx
# 39907 6430 11542 10297
table(hippo$Type)
# Exon Gene Jxn Tx
# 31274 4188 9828 9447
# table(inter$Type)
# # Exon Gene Jxn Tx
# #
## unique ensemblIDs
tapply(dg$EnsemblGeneID, dg$Type, function(x) length(unique(x)))
# Exon Gene Jxn Tx
# 280 172 175 233
tapply(hippo$EnsemblGeneID, hippo$Type, function(x) length(unique(x)))
# Exon Gene Jxn Tx
# 282 135 169 221
# tapply(inter$EnsemblGeneID, inter$Type, function(x) length(unique(x)))
# # Exon Gene Jxn Tx
# #
################
## make csv
# hippo$EnsemblGeneID = ss(hippo$EnsemblGeneID, "\\.")
# dg$EnsemblGeneID = ss(dg$EnsemblGeneID, "\\.")
## snpMap
load("../genotype_data/astellas_dg_genotype_data_n263.rda")
snpMap1 = snpMap
snpMap1$hg19POS = paste0(snpMap1$CHR,":",snpMap1$POS)
snpMap1 = snpMap1[which(rownames(snpMap1) %in% c(hippo$snps,dg$snps) ),c("SNP","chr_hg38","pos_hg38","hg19POS")]
load("../genotype_data/BrainSeq_Phase2_RiboZero_Genotypes_n551.rda")
snpMap2 = snpMap
snpMap2$hg19POS = paste0(snpMap2$CHR,":",snpMap2$POS)
snpMap2 = snpMap2[which(rownames(snpMap2) %in% c(hippo$snps,dg$snps) ),c("SNP","chr_hg38","pos_hg38","hg19POS")]
snpMap = snpMap1[snpMap1$hg19POS %in% snpMap2$hg19POS,]
## featMap
load("../count_data/merged_dg_hippo_allSamples_n596.rda", verbose=TRUE)
gMap = as.data.frame(rowRanges(rse_gene_joint))[,c("seqnames","start","end","strand","Class")]
eMap = as.data.frame(rowRanges(rse_exon_joint))[,c("seqnames","start","end","strand","Class")]
jMap = as.data.frame(rowRanges(rse_jxn_joint))[,c("seqnames","start","end","strand","Class")]
txMap = as.data.frame(rowRanges(rse_tx_joint))[,c("seqnames","start","end","strand","source")]
txMap$source = "InGen"
colnames(gMap) = colnames(eMap) = colnames(jMap) = colnames(txMap) =
c("feat_chr","feat_start","feat_end","strand","Class")
featMap = rbind(rbind(rbind(gMap, eMap),jMap),txMap)
featMap$Type = c(rep("Gene",nrow(gMap)),rep("Exon",nrow(eMap)),rep("Jxn",nrow(jMap)),rep("Tx",nrow(txMap)))
geneMap = as.data.frame(rowRanges(rse_gene_joint))[,c("gencodeID","Symbol","ensemblID","gene_type")]
## put together
## hippo
snpMap_temp = snpMap[hippo$snps,]
featMap_temp = featMap[hippo$gene,]
geneMap_temp = geneMap[match(hippo$EnsemblGeneID, geneMap$ensemblID),]
hippo2 = cbind(snpMap_temp,featMap_temp,geneMap_temp,hippo)
hippo3 = hippo2[,c(1:4,16,10,5:9,21:22,14,17:20)]
write.csv(hippo3, "raggr_179_snps_hippo_eqtls_fdr05.csv")
## DG
snpMap_temp = snpMap[dg$snps,]
featMap_temp = featMap[dg$gene,]
geneMap_temp = geneMap[match(dg$EnsemblGeneID, geneMap$ensemblID),]
dg2 = cbind(snpMap_temp,featMap_temp,geneMap_temp,dg)
dg3 = dg2[,c(1:4,16,10,5:9,21:22,14,17:20)]
write.csv(dg3, "raggr_179_snps_dg_eqtls_fdr05.csv")
# ## interaction
# snpMap_temp = snpMap[inter$snps,]
# featMap_temp = featMap[inter$gene,]
# geneMap_temp = geneMap[match(inter$EnsemblGeneID, geneMap$ensemblID),]
# inter2 = cbind(snpMap_temp,featMap_temp,geneMap_temp,inter)
# inter3 = inter2[,c(1:4,16,10,5:9,21:22,14,17:20)]
# write.csv(inter3, "raggr_179_snps_inter_eqtls_fdr05.csv")
|
cdb2111e14f7367fe1742c9a9cdaf4486c6c8882
|
919e3e0a88cf0099a43e0bc0b31eac60c8074bf8
|
/tests/testthat/test-prepareData.R
|
0f6a41dc7bfc1868eb61c1fa45dcac4c40503474
|
[] |
no_license
|
itikadi/EMOGEA
|
929e86a91f09c24b6e7711f87f62f3ccf468151a
|
91dd507efe60070d115b63382a86e423ab228e6f
|
refs/heads/master
| 2023-04-17T00:01:28.534769
| 2021-04-24T15:25:55
| 2021-04-24T15:25:55
| 291,812,012
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 949
|
r
|
test-prepareData.R
|
# Libraries
library(data.table)
# Select the folder where the inputs and expected outputs are located
rdsPath <- "../testdata"
# Test prepareData
testthat::test_that("Test prepareData",
{
# Read files which contain the inputs
expressionData <- fread(file.path(rdsPath, "input_expressionData.csv"))
metaData <- fread(file.path(rdsPath, "input_metaData.csv"))
sampleColumn <- "ID"
conditionColumn <- "condition"
# Perform prepareData
results <- prepareData(
expressionData = expressionData,
metaData = metaData,
sampleColumn = sampleColumn,
conditionColumn = conditionColumn,
applyLogTransformation = FALSE)
# Get the expected results
expectedResults <- readRDS(file.path(rdsPath, "expected_prepareData.rds"))
# Check that results are the same as the expected results
names <- names(expectedResults)
for (name in names)
{
testthat::expect_equal(results[[name]], expectedResults[[name]])
}
})
|
d546596e47e1cefe5c206ca118854cddcedb9056
|
7466dbb3f016774d6cb1ddeb142de1edae496378
|
/man/tf2doc.Rd
|
a0e54f5f6ddebde3f80e9c64610258aeb46e7c2b
|
[] |
no_license
|
cran/chinese.misc
|
0dc04d6470cff7172c76f3a735986ef7128c74da
|
369fd6b193e5d969354a31e568fabe53cb596c8c
|
refs/heads/master
| 2021-01-19T09:55:21.948813
| 2020-09-11T20:50:03
| 2020-09-11T20:50:03
| 82,150,007
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 981
|
rd
|
tf2doc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tf2doc.R
\name{tf2doc}
\alias{tf2doc}
\title{Transform Terms and Frequencies into a Text}
\usage{
tf2doc(term, num)
}
\arguments{
\item{term}{terms that you want to rewrite into a text. A character vector is preferred, but
matrix, list, data frame are also OK. \code{NA} in the argument will be taken as
letters "NA" and repeated.}
\item{num}{frequencies of terms in \code{term}. A numeric vector is preferred, but
matrix, list, data frame are also OK. Its length must be equal to that of \code{term}.
No \code{NA} is allowed.}
}
\value{
a character vector. Terms are pasted with a space.
}
\description{
This function is simply a wrapper of \code{rep}, but allows different structures of input.
For rewriting more texts in the same time, see \code{\link{m2doc}}.
}
\examples{
x <- matrix(c("coffee", "milk", "tea", "cola"), nrow = 2)
y <- factor(c(5:8))
tf2doc(x, y)
}
|
8bb5fab7d3277b7d27920201855e08911ce951ca
|
c2d29768d7a4262e1cabf6688df0e3b290103df3
|
/Assignment-1/R code.R
|
45fd5be18060b8024e546cf7719ebb27e0180d20
|
[] |
no_license
|
Utsav37/Data-Mining
|
4913ab51ffb588f59b8f161d738f12a5b18e3ce9
|
d8d0cf7297b5b4bf3b6f531b5122968975f37508
|
refs/heads/master
| 2020-04-20T01:37:31.693421
| 2017-12-28T00:58:32
| 2017-12-28T00:58:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 75
|
r
|
R code.R
|
x <- seq(1,50,by=2)
y <- 2*x - 30
png("Abhishek plot.png")
plot(x,y)
|
0f297e7fed65cdbca10cafdb38602e5175720c43
|
738589305032d5a35d7c433969377bacf7284983
|
/man/read.digital.surf.file.Rd
|
52aad2a91d179b98fb40a6378adfeb8450eb0603
|
[] |
no_license
|
tanyanap4/x3pr
|
4dd9451cb6b662db4c4aecf504c24c80dcce6947
|
517a04d88ecaad5336ff9c5a7c6f46b1a31763c4
|
refs/heads/master
| 2021-01-10T18:51:07.192671
| 2015-01-14T21:33:23
| 2015-01-14T21:33:23
| 29,158,058
| 0
| 0
| null | 2015-01-14T20:53:45
| 2015-01-12T21:33:44
|
R
|
UTF-8
|
R
| false
| false
| 595
|
rd
|
read.digital.surf.file.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/read.digital.surf.file.R
\name{read.digital.surf.file}
\alias{read.digital.surf.file}
\title{Read in a Digital Surf file wrapper. Just specify the path to the
.sur file.}
\usage{
read.digital.surf.file(file.path)
}
\arguments{
\item{file.path}{}
}
\value{
a matrix.
}
\description{
Read in a Digital Surf file wrapper. Just specify the path to the
.sur file.
}
\details{
Use fle.choose() to open a file chooser window
}
\examples{
Coming soon.
\dontrun{
}
}
\references{
http://open-gps.sourceforge.net/
}
|
fff7423f51a626e824d8917fb33f4ded77a9b33b
|
1a12b5865f377c8eaaa0cbf5cd6b4a1cf5d00810
|
/Rplot02.R
|
ae5baf5efac0de77df1583b8b57ff4807b6c4a67
|
[] |
no_license
|
willk1990/ExData_Plotting1
|
810dbe284fe2487425a24ff0b509bced56e2a395
|
c20b07f0acf4394344120eb304ce51d1b4229e57
|
refs/heads/master
| 2020-11-29T15:28:39.407016
| 2017-04-15T17:35:34
| 2017-04-15T17:35:34
| 87,478,983
| 0
| 0
| null | 2017-04-06T22:01:09
| 2017-04-06T22:01:09
| null |
UTF-8
|
R
| false
| false
| 627
|
r
|
Rplot02.R
|
pwr <- read.table("household_power_consumption.txt",header = TRUE, sep = ";",as.is = c(3,4,5,6))
pwr <- subset(pwr,pwr$Date == "1/2/2007" | pwr$Date == "2/2/2007")
time2 <- paste(pwr$Date,pwr$Time)
time2 <- strptime(time2, format = "%d/%m/%Y %H:%M:%S")
pwr$Global_active_power <- as.numeric(pwr$Global_active_power)
pwr$Global_reactive_power <- as.numeric(pwr$Global_reactive_power)
pwr$Voltage <- as.numeric(pwr$Voltage)
pwr <- cbind(pwr,time2)
png("Plot02.png", width = 480, height = 480)
plot(x = pwr$time2, y = pwr$Global_active_power, type = "l", main = "", xlab = "", ylab = "Global Active Power (Kilowats)")
|
a59c645d54dd7a616af0ed4da3715952093f6937
|
2d0c8242f25ae6cc9a0eaf4097eca9e9d9f42b62
|
/foieGras.R
|
08b21aab00a8042b7b6c5fa32b6e8812a3b5c1b9
|
[] |
no_license
|
makratofil/crc-hawaii-tags
|
a51e33a181129a95c1e415f9791169a302b84266
|
090ec18121c4fce2dbe79a86d43d463783ba98a9
|
refs/heads/master
| 2022-05-19T12:01:36.654699
| 2022-05-11T15:43:55
| 2022-05-11T15:43:55
| 249,754,977
| 2
| 3
| null | 2021-02-16T23:19:44
| 2020-03-24T16:08:42
|
R
|
UTF-8
|
R
| false
| false
| 6,114
|
r
|
foieGras.R
|
##########################################################
# foieGras: fit continous-time random walk or
# correlated random walk models to
# animal movement data
# Michaela A. Kratofil, Cascadia Research
# Updated: 06 AUG 2020
##########################################################
## OVERVIEW ##
# For details on the model, refer to Jonsen et al. (2020),
# "A continous-time state-space model for rapid quality
# control of argos locations from animal-borne tags.
# The model coded here is pretty basic, but various user
# defined parameters can be adjusted to suit your data/
# research questions. Take the time to understand the model!
# Ian Jonsen's paper on this model includes code and an
# easy to follow example; I would reccommend reading this.
# He also has basic vignettes available on his github.
# This script is set up to fit a foieGras model to several
# deployments at once (or single).
# This package/model can deal with least-squares or Kalman
# filtered data. The model also includes a psi parameter to
# account for possible consistent underestimation of the
# Kalman filter-derived location uncertainty. psi re-scales
# all ellipse semi-minor axes, where estimated values > 1
# inflate the uncertainty region around measured locations
# by lengthening the semi-minor axis.
## How it works ##
# Here we will use location data that has already been through
# the Douglas Filter. We'll need to format the data for input
# into foieGras.
############################################################
# load packages
library(tidyverse)
library(lubridate)
library(sf)
library(foieGras)
library(ptolemy)
library(ggspatial)
## read in Douglas filtered locations (Argos only)
tbl_locs <- readr::read_csv("Douglas Filtered/FaTag002-011_DouglasFiltered_KS_r15d3lc2_2020MAYv1.csv",
col_types = cols(animal = col_character(),
ptt = col_integer(),
date = col_datetime(),
longitud = col_double(),
latitude = col_double(),
LC = col_character(),
error_radius = col_integer(),
ellipse_orient = col_integer(),
semi_major = col_integer(),
semi_minor = col_integer()
))
## review data
str(tbl_locs)
summary(tbl_locs)
length(unique(tbl_locs$animal)) # 10 deployments in this dataset
tbl_locs$LC <- factor(tbl_locs$LC, levels = c("DP","L3","L2","L1","L0","LA","LB","LZ")) # assign factor levels
summary(tbl_locs$LC) # check
class(tbl_locs$date) # check class of date is POSIXct or POSIXt
attr(tbl_locs$date, 'tzone') # check TZ of date
## set up variables to fit FG models
tbl_locs <- tbl_locs %>%
rename(
id = animal,
lc = LC,
lon = longitud,
lat = latitude,
smaj = semi_major,
smin = semi_minor,
eor = ellipse_orient
)
tbl_locs <- select(tbl_locs, id, date, lc, lon, lat, smaj, smin, eor) # select columns
## recode LC classes (foieGras panicks if don't), and make DP location L3
tbl_locs$lc <- recode(tbl_locs$lc, DP = '3', L3 = '3', L2 = '2', L1 = '1',
L0 = '0', LA = 'A', LB = 'B')
summary(tbl_locs$lc) # check
## assign DP locations error ellipse info
tbl_locs$smaj[is.na(tbl_locs$smaj)] <- 0
tbl_locs$smin[is.na(tbl_locs$smin)] <- 0
tbl_locs$eor[is.na(tbl_locs$eor)] <- 0
summary(tbl_locs) # check
## project locations using an appropriate CRS - if don't project, the fit_ssm
## function will internally project using the world mercator projection.
## *I've often run into bugs when specifying my own projection, so use default.
## I use the crs EPSG:3750 (NAD83/UTM 4N) here, which works pretty well if data
## doesn't go too far east of the Big Island.
sf_locs <- st_as_sf(tbl_locs, coords = c("lon","lat"), crs = 4326) %>%
st_transform(crs = 3750)
st_crs(sf_locs) # check
## visualize: create tracklines and map
sf_lines <- sf_locs %>%
arrange(id, date) %>%
group_by(id) %>%
summarise(do_union = F) %>%
st_cast("MULTILINESTRING")
st_crs(sf_lines) # check
# get coastline data from ptolemy package
map_base <- ptolemy::extract_gshhg(sf_locs, buffer = 50000, epsg = 3750) # extract polygon data for region of loc data
plot(map_base) # check
# map theme function
theme_map <- function() {
theme_bw() +
theme(panel.background = element_rect(fill = 'white', colour = 'black', size = 1.25),
axis.text = element_text(colour = 'black'),
plot.title = element_text(colour = 'black', face = 'bold')) #+
}
# map tracklines
ggplot() +
annotation_spatial(map_base, fill = 'grey', lwd = 1) +
layer_spatial(sf_lines, size = 1, aes(color = id)) +
theme_map() +
scale_color_viridis_d()
## fit random walk model: we turn spdf (speed filter) off and set a time step of 3 hours. I
## turned off the 'psi' parameter here.
m1 <- fit_ssm(tbl_locs, model = 'rw', spdf = F, time.step = 3, map = list(psi = factor(NA)))
m1$ssm[[1]] # check model parameters for each tag
## quick visualization
plot(m1, what = 'fitted', type = 2) # fitted locations
plot(m1, what = 'predicted', type = 2) # predicted locations
## grab predicted locations
pred1 <- grab(m1, what = 'predicted', as_sf = F)
## If desired, fit move persistence model ** this model is not well fit, just an exmaple of the code
## to fit a move persistence model.
fmp <- m1 %>%
grab(., "p", as_sf = F) %>%
select(id, date, lon, lat) %>%
fit_mpm(., model = "jmpm") # use jmpm to pool variance parameters across all individuals
fmp$mpm[[1]] # can check output
## save predicted location data
write.csv(pred1, "SSM/FaTag002-011_FG_3hTimeStep_2020AUGv1.csv", row.names = F)
|
876e417ea0f00d47b03ec68650c6a3322d8bfaab
|
b781976b9af252036f3a2bd56295aa39a12f79d3
|
/man/neuron_pairs.Rd
|
5c6da22422ab39219df3d94d5729d0656389f083
|
[] |
no_license
|
natverse/nat.nblast
|
18ac30ad38bd3d37b41565183aa012cab43fb6a3
|
f582c7d1eca42c09b3ebef8009dc7129809ea8ab
|
refs/heads/master
| 2023-06-26T22:12:18.232134
| 2023-06-13T18:13:28
| 2023-06-13T18:13:28
| 19,026,348
| 7
| 1
| null | 2023-01-12T17:33:14
| 2014-04-22T10:54:53
|
R
|
UTF-8
|
R
| false
| true
| 908
|
rd
|
neuron_pairs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/smat.r
\name{neuron_pairs}
\alias{neuron_pairs}
\title{Utility function to generate all or random pairs of neurons}
\usage{
neuron_pairs(query, target, n = NA, ignoreSelf = TRUE)
}
\arguments{
\item{query, target}{either \code{\link{neuronlist}}s or character vectors of
names. If target is missing, query will be used as both query and target.}
\item{n}{number of random pairs to draw. When NA, the default, uses
\code{expand.grid} to draw all pairs.}
\item{ignoreSelf}{Logical indicating whether to omit pairs consisting of the
same neuron (default \code{TRUE}).}
}
\value{
a data.frame with two character vector columns, query and target.
}
\description{
Utility function to generate all or random pairs of neurons
}
\examples{
neuron_pairs(nat::kcs20, n=20)
}
\seealso{
\code{\link{calc_score_matrix}, \link{expand.grid}}
}
|
466567a7cd6a9359a733c168744a031db9dc42da
|
08b154beac70fc61b20550e4969d5eaa82003525
|
/demoShiny/server.R
|
3bdd268f10e779cddb8c0d28091ae23b2503e011
|
[] |
no_license
|
mcSamuelDataSci/R-visual-display-workshop
|
01a9df9498626736ef34d965a6278516d6d0c9bb
|
368154028e5e6440fad46151dec1745523d38daf
|
refs/heads/master
| 2020-04-02T05:24:40.178075
| 2019-11-01T21:18:58
| 2019-11-01T21:18:58
| 154,074,323
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 119
|
r
|
server.R
|
shinyServer(function(input, output) {
output$myPlot1 <- renderPlot( deathTrendPlot(input$myCounty))
})
|
1a824b7b8fdd051dc692dad124a785f56f6fef0d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MVar/examples/MFA_English.Rd.R
|
a5a1d662f0576aaec5495a9a77badbfdb0095cb4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 580
|
r
|
MFA_English.Rd.R
|
library(MVar)
### Name: MFA
### Title: Multiple Factor Analysis (MFA).
### Aliases: MFA
### Keywords: Multiple Factor Analysis MFA MFACT
### ** Examples
data(DataMix) # mixed dataset
Data <- DataMix[,2:ncol(DataMix)]
rownames(Data) <- DataMix[1:nrow(DataMix),1]
GroupNames = c("Grade Cafes/Work", "Formation/Dedication", "Coffees")
MF <- MFA(Data, c(2,2,2), TypeGroups = c("n","c","f"), GroupNames) # performs MFA
print("Principal Component Variances:"); round(MF$MatrixA,2)
print("Matrix of the Partial Inertia / Score of the Variables:"); round(MF$MatrixEscVar,2)
|
dec0c4f6f565d064f92a125ee844c028da6b0ee6
|
0d821faa15751b8ede906b2fe870932a118efc00
|
/Riparian_functions_v1.r
|
907fbf8adeb78e0f0cfc7fa08ea6f1a4bd690b8b
|
[] |
no_license
|
GeospatialDaryl/R_Functions
|
2b217aca13d763290216542ba0d5d92f1bdf7dc8
|
61cd6466926e4fc7bf1a07614bdbcfbac7c16ad4
|
refs/heads/master
| 2021-01-20T03:17:13.379698
| 2017-05-17T23:23:05
| 2017-05-17T23:23:05
| 89,520,257
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,613
|
r
|
Riparian_functions_v1.r
|
# http://stackoverflow.com/questions/21815060/dplyr-how-to-use-group-by-inside-a-function
# For programming, group_by_ is the counterpart to group_by:
#
# library(dplyr)
#
# mytable <- function(x, ...) x %>% group_by_(...) %>% summarise(n = n())
# mytable(iris, "Species")
# # or iris %>% mytable("Species")
# which gives:
#
# Species n
# 1 setosa 50
# 2 versicolor 50
# 3 virginica 50
# Update At the time this was written dplyr used %.% which is what was originally used above but now %>% is favored so have changed above to that to keep this relevant.
#
# Update 2 regroup is now deprecated, use group_by_ instead.
#
# Update 3 group_by_(list(...)) now becomes group_by_(...) in new version of dplyr as per Roberto's comment.
#
# Update 4 Added minor variation suggested in comments.
makeBarPlotByGroup <- function( iFactor, iGroup){
# plot by Factor for each in Group
colNum <- findThatColumn(dT, iGroup)
vGroups <- as.character(unique(dT[,colNum]))
print(vGroups)
for ( grp in vGroups){
#print(vGroups)
print(grp)
#flush.console()
goodRows <- which(dT[,colNum] == grp )
this <- dT[goodRows, ]
ggp <- ggplot(this,
aes_string(deparse(substitute(iFactor)))) + geom_bar()
print(ggp)
flush.console()
}
}
#makeBarPlotByGroup("Species", "Ranch")
#makeBarPlotByGroup("Species", "stock_anal")
###############################################################
my_summarise2 <- function(df, expr) {
expr <- enquo(expr)
summarise(df,
mean = mean(!!expr),
sum = sum(!!expr),
n = n()
)
}
# dT, factor, groupo
superGrouper <- function(df, expr1, expr2 ){
expr1 <- enquo(expr1)
expr2 <- enquo(expr2)
this <- filter(df, expr2)
g <- ggplot( this, aes(expr1) )
}
makeGoodDF <- function(inputDF){
# removes all columns with NA elements in them
cNames <- names(inputDF)
t1 <- apply( inputDF, 2, is.na)
apply(t1, 2, sum) -> inputDF_NA
cbind(cNames, inputDF_NA > 0) -> tblNA
as.vector(which(inputDF_NA == 0)) -> indxGood
inputDF[,indxGood] -> outDF
outDF
}
is.finite.data.frame <- function(obj){
#http://stackoverflow.com/questions/8173094/how-to-check-a-data-frame-for-any-non-finite
sapply(obj,FUN = function(x) all(is.finite(x)))
}
':=' <- function(lhs, rhs) {
# http://stackoverflow.com/questions/1826519/how-to-assign-from-a-function-which-returns-more-than-one-value
frame <- parent.frame()
lhs <- as.list(substitute(lhs))
if (length(lhs) > 1)
lhs <- lhs[-1]
if (length(lhs) == 1) {
do.call(`=`, list(lhs[[1]], rhs), envir=frame)
return(invisible(NULL))
}
if (is.function(rhs) || is(rhs, 'formula'))
rhs <- list(rhs)
if (length(lhs) > length(rhs))
rhs <- c(rhs, rep(list(NULL), length(lhs) - length(rhs)))
for (i in 1:length(lhs))
do.call(`=`, list(lhs[[i]], rhs[[i]]), envir=frame)
return(invisible(NULL))
}
willow.survival.cloud <- function(inputDF,
survived = inputDF$status14,
col = inputDF$Ranch ){
g <- ggplot(inputDF, aes(x = Z, y = survived))
g + geom_jitter(aes(color = col))
}
willow.survival.cloud.Stock <- function(inputDF,
survived = inputDF$status14,
col = inputDF$stock_anal ){
g <- ggplot(inputDF, aes(x = Z, y = survived))
g + geom_jitter(aes(color = col))
}
willow.summary <- function(inputDF){
library(Amelia)
missmap(inputDF, main = "Missing values vs observed")
}
willow.examples <- function(inputDF, testP = "b14"){
print(shapiro.test(inputDF$Z))
qqnorm(inputDF$Z)
}
willow.testTrain <- function( inputDF, trainingProportion = 0.75 ){
## 75% of the sample size
smp_size <- floor(trainingProportion * nrow(inputDF))
## set the seed to make your partition reproductible
set.seed(123)
train_ind <- sample(seq_len(nrow(inputDF)), size = smp_size)
train <- inputDF[train_ind, ]
test <- inputDF[-train_ind, ]
return( list(train, test ) )
}
willow.logistic.regression1 <- function(inputDF){
m1 <- glm(inputDF$status14 ~ inputDF$Z + inputDF$Ranch,
family = binomial(link = "logit"),
data = inputDF)
print(summary(m1))
print(anova(m1, test = "Chisq"))
return(m1)
}
willow.logistic.regressionNS <- function(inputDF){
m1 <- glm(inputDF$status14 ~ inputDF$Z + inputDF$NUTM,
family = binomial(link = "logit"),
data = inputDF)
print(summary(m1))
print(anova(m1, test = "Chisq"))
return(m1)
}
willow.logistic.regression <- function(inputDF){
m1 <- glm(inputDF$status14 ~ inputDF$Z,
family = binomial(link = "logit"),
data = inputDF)
print(summary(m1))
print(anova(m1, test = "Chisq"))
return(m1)
}
willow.logistic.regressionOnlyNS <- function(inputDF){
m1 <- glm(inputDF$status14 ~ inputDF$NUTM,
family = binomial(link = "logit"),
data = inputDF)
print(summary(m1))
print(anova(m1, test = "Chisq"))
return(m1)
}
willow.logistic.regressionAll<- function(inputDF){
m1 <- glm(inputDF$status14 ~ . ,
family = binomial(link = "logit"),
data = inputDF)
print(summary(m1))
print(anova(m1, test = "Chisq"))
return(m1)
}
willow.logistic.regression.Stock<- function(inputDF){
m1 <- glm(inputDF$status14 ~ inputDF$stock_anal ,
family = binomial(link = "logit"),
data = inputDF)
print(summary(m1))
print(anova(m1, test = "Chisq"))
return(m1)
}
willow.violin <- function(inputDF, x , y ){
g4 <- ggplot(dRWz, aes(x, y))
g4 + geom_dotplot(binaxis = "y", binwidth = 0.1)
g4 + geom_violin(scale = "area")
#return(g4)
}
|
9126da9650e97bd7b5585213e4cbc740f5252a59
|
b4bba5708aa80327e5a47930fd3483236a4427ad
|
/man/start_end_dates.Rd
|
49a350512913c48be121d66586d1dc3309a5dbd8
|
[
"MIT"
] |
permissive
|
PaulESantos/snowpack
|
bffbffa0dec68bb4f5825d4568fa3d75cc6a7c40
|
7b564e31df5685dc4f66645ba6c588291ae0fd11
|
refs/heads/main
| 2023-03-20T19:58:06.891347
| 2021-03-19T03:22:47
| 2021-03-19T03:22:47
| 343,623,930
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 815
|
rd
|
start_end_dates.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/start_end_dates.R
\name{start_end_dates}
\alias{start_end_dates}
\title{Find start and end dates of HOBO data}
\usage{
start_end_dates(df, ...)
}
\arguments{
\item{df}{A data.frame formatted as the example dataset. Review
hobo_rmbl_data to see details.}
\item{...}{params could be \code{year}, \code{momth}, to have deatiled review
of the dates.}
}
\value{
a summary tibble
}
\description{
\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#experimental}{\figure{lifecycle-experimental.svg}{options: alt='[Experimental]'}}}{\strong{[Experimental]}}
}
\examples{
data("hobo_rmbl_data")
road <- hobo_rmbl_data[[1]]
road \%>\%
start_end_dates()
road \%>\%
start_end_dates(year)
road \%>\%
start_end_dates(year, month)
}
|
dc7a4542bd522a1e3967d154d8fa09c5fc6011f0
|
6e573b701339dd0a470df3a666e6b176bfd59bf7
|
/cachematrix.R
|
e7a0149f2151f4bf53b09e89b8d8518d415eff2a
|
[] |
no_license
|
kenburkman/ProgrammingAssignment2
|
50185afa80399a125a903ea823936ab1c0bc2dab
|
3c6a7c010ac86b8a5a143f556114147295989fcc
|
refs/heads/master
| 2021-01-18T05:59:01.388468
| 2016-07-02T14:05:21
| 2016-07-02T14:05:21
| 62,424,507
| 0
| 0
| null | 2016-07-01T23:17:17
| 2016-07-01T23:17:16
| null |
UTF-8
|
R
| false
| false
| 1,015
|
r
|
cachematrix.R
|
## These two functions allow the user to solve and cache
## the inverse of a matrix so that it can be recalled later
## without the need to recalculate it.
## this function enables you to set and get the matrix and to
## set and get the matrix's inverse.
makeCachematrix<-function(x=matrix()){
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(solve) i <<- solve
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## this function checks to see if the matrix's inverse has already
## been calculated. If it hasn't, then it calculates and stores
## the inverse. If it has, then it prints a message and returns the
## stored inverse. Ta-da!
cacheSolve <- function(x, ...) {
i <- x$getInverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
i
}
|
a3a110a852c194f9c36dcf16f3d12ab08b904336
|
a3a81c268a8e7bd29e3b5e007f521078785e9305
|
/server.R
|
8fa922da50a2f2a2d3311aabfdcd267bdbb94c23
|
[] |
no_license
|
mfatemi/Developing-Data-Products-Shiny
|
a23481196c93ea395167b8b0404cca394b099af3
|
5293460e547cc01146b3d2aadc941ab7e3992026
|
refs/heads/master
| 2020-05-19T10:08:06.905881
| 2015-09-22T18:51:53
| 2015-09-22T18:51:53
| 42,950,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 376
|
r
|
server.R
|
shinyServer(
function(input, output) {
output$text1 <- renderText({input$text1})
t<-reactive(rnorm(input$text1))
output$sum <- renderPrint({
t<-t()
summary(t)
})
output$distPlot <- renderPlot(
{ t<-t()
hist(t)
})
}
)
|
332642ffa69506f6aece64714c374aa69f34d700
|
49ddfd7fd6503e6156a5db40911aa340a06685b0
|
/Basic_statistics_genotype_data/Calculating_basic_satistics_validated_SNPs.R
|
2d2fe69192d045e581294c79ef6f5c07eb653df5
|
[] |
no_license
|
MarineDuperat/Resilience_white_spruce
|
c718ef70f59331bbd5e2cc3332a7009d7c23d90c
|
d58037299a83dda2201986562a3ef7deefcefffe
|
refs/heads/master
| 2022-04-10T17:55:06.413959
| 2020-03-11T19:40:41
| 2020-03-11T19:40:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,934
|
r
|
Calculating_basic_satistics_validated_SNPs.R
|
#############################################################################################
## Introduction
#############################################################################################
#Code for
#"Adaptive genetic variation to drought in a widely distributed conifer
#suggests a potential for increasing forest resilience in a drying climate""
#
#Authors: Claire Depardieu, Martin P. Girardin, Simon Nadeau, Patrick Lenz, Jean Bousquet, Nathalie Isabel
#
#Journal: New Phytologist
#Article acceptance date: 29 February 2020
#
#Author for correspondence: Claire Depardieu, claire.depardieu@canada.ca or calima45@hotmail.fr
##################################################################################
### Downloading libraries
##################################################################################
library("tidyverse")
library("hierfstat")
library(adegenet)
library(dplyr)
##################################################################################
### Analysis: basic statistics for individual SNPs (6,3)
##################################################################################
#Importing the data
file1 <- "C:/Users/...working_directory.../Data.genotype.csv"
data.genotype <- read_delim(file1, delim = ";")
fix(data.genotype)
dim(data.genotype)
#Description of the dataset imported:
#First column of the dataset: Provenance
#Other columns: 6,386 validated SNPs
#1481 rows: 1481 trees
# Format the dataframe...
class(data.genotype) <- "data.frame"
#Ici je demande des stats basiques
BASIC.STATS <- basic.stats(data.genotype, diploid=TRUE)
BASIC.STATS$overall
#Results obtained ---------------------
#Ho Hs Ht Dst Htp Dstp Fst Fstp Fis
#0.3040 0.2922 0.3055 0.0133 0.3059 0.0137 0.0437 0.0447 -0.0405
#Dest
#0.0193
basic.data.perloc=as.data.frame(BASIC.STATS2$perloc)
write.table(basic.data.perloc,"Basic_statistics.txt")
|
f0d0fee29cfa2f0b59b609029e7e7d4446a5c43c
|
9caf26039acdcdb74ccf8025bfdff0c3ef1b190b
|
/Kumejima_Analysis/gakuGeneral_functions.R
|
7d96e7515b29d76fdaf9600c80cd11a6d76a9055
|
[] |
no_license
|
Kohsuke1031/Test2
|
5ff095f0eeade76a73a08bb89b3ae3b2fe139937
|
c21719638ded9836bc29023576430d5cf87fe1e6
|
refs/heads/master
| 2023-06-04T20:13:39.042971
| 2021-06-21T07:10:33
| 2021-06-21T07:10:33
| 378,777,345
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 756
|
r
|
gakuGeneral_functions.R
|
## gaku's fucntions
## 1 縦書き用フォーマット
#h ttps://id.fnshr.info/2017/03/13/r-plot-tategaki/
tategaki <- function(x){
x <- chartr("ー", "丨", x) # 長音符の処理
x <- strsplit(split="", x)
sapply(x, paste, collapse="\n")
}
#df_paths = list.files(path = "./gakuLab_with_Yasui_san/Ishikawa_Analysis/Ishikawa_Data",full.names = T)
## 2 縦書き用フォーマット
read_files <- function(x){
df <- read.csv(x,
header = T,
fileEncoding = "CP932")
return(df)
}
###### function to express y axis desits
ScientificNotation <- function(l) {
l <- format(l, scientific = TRUE)
l <- gsub("^(.*)e", "'\\1'e", l)
l <- gsub("e\\+", "%*%10^", l)
l[1] <- "0"
parse(text = l)}
######
|
136611cec100ea67ee86a1ed00bae628ff43466b
|
5d690f159266b2c0f163e26fcfb9f9e17a0dc541
|
/GET/R/crop.r
|
e43808423faeff856c58952fcd36aa71356f1b59
|
[] |
no_license
|
albrizre/spatstat.revdep
|
3a83ab87085895712d7109c813dcc8acb55493e9
|
b6fc1e73985b0b7ed57d21cbebb9ca4627183108
|
refs/heads/main
| 2023-03-05T14:47:16.628700
| 2021-02-20T01:05:54
| 2021-02-20T01:05:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,159
|
r
|
crop.r
|
#' Crop the curves to a certain interval
#'
#' Crop the curves to a certain interval
#'
#'
#' The curves can be cropped to a certain interval defined by the arguments r_min and r_max.
#' The interval should generally be chosen carefully for classical deviation tests.
#' @param curve_set A curve_set (see \code{\link{create_curve_set}}) or
#' an \code{envelope} object of \pkg{spatstat}. If an envelope object is given,
#' it must contain the summary functions from the simulated patterns which can be
#' achieved by setting savefuns = TRUE when calling the \code{envelope} function.
#' @param r_min The minimum radius to include.
#' @param r_max The maximum radius to include.
#' @return A curve_set object containing the cropped summary functions and
#' the cropped radius vector.
#' @export
crop_curves <- function(curve_set, r_min = NULL, r_max = NULL) {
if(!is.null(r_min) | !is.null(r_max)) if(!is.vector(curve_set$r)) stop("curve_set$r is not a vector: r_min and r_max cannot be used.")
curve_set <- convert_envelope(curve_set, allow_Inf_values = TRUE)
n_r_min <- length(r_min)
if(n_r_min > 0L && (n_r_min != 1L || !is.finite(r_min))) {
stop('r_min must be a finite scalar value or NULL.')
}
n_r_max <- length(r_max)
if(n_r_max > 0L && (n_r_max != 1L || !is.finite(r_max))) {
stop('r_max must be a finite scalar value or NULL.')
}
r <- curve_set[['r']]
if(n_r_min == 1L) {
if(n_r_max == 1L) {
if(r_min >= r_max) {
stop('r_min must be smaller than r_max.')
}
cut_idx <- which(r >= r_min & r <= r_max)
}
else {
cut_idx <- which(r >= r_min)
}
} else {
if(n_r_max == 1L) {
cut_idx <- which(r <= r_max)
}
else {
return(check_curve_set_content(curve_set, allow_Inf_values = FALSE))
}
}
if(length(cut_idx) < 1L) {
stop('r_min and r_max cropped everything away.')
}
curve_set[['r']] <- r[cut_idx]
curve_set[['funcs']] <- curve_set[['funcs']][cut_idx, , drop = FALSE]
theo <- curve_set[['theo']]
if(!is.null(theo)) curve_set[['theo']] <- theo[cut_idx]
check_curve_set_content(curve_set, allow_Inf_values = FALSE)
curve_set
}
|
2dec0b49ceacf123a8de7d3a8194017c2cb56790
|
30e573840e35fac8e0fd7426dbed415a294d80bd
|
/Figure 11.4.R
|
ffcd3c1766ce0614ffefbc2d74bb53e11d608dba
|
[] |
no_license
|
henrylankin/stat6304
|
fbd9490cd00fc9b1ad9b08915f2c30c5147d3f8f
|
2fcdd0590ba1e141d75568f13f50c772e7b93ff8
|
refs/heads/master
| 2021-01-23T05:14:43.235965
| 2017-03-27T04:24:17
| 2017-03-27T04:24:17
| 86,289,594
| 0
| 0
| null | 2017-03-27T04:24:18
| 2017-03-27T03:59:58
| null |
UTF-8
|
R
| false
| false
| 750
|
r
|
Figure 11.4.R
|
#import data
ex11.4 <- read.csv("~/Desktop/School/6304/Data Sets/ASCII-comma/CH11/ex11-4.TXT", quote = "'")
#View(ex11.4)
#scatterplot
plot(ex11.4$x, ex11.4$y, xlab = 'x', ylab = 'y', main = 'Figure 11.4: x vs. y')
#regression line
regression.line <- lm(ex11.4$y~ex11.4$x)
abline(regression.line)
regLine.summary <- summary(regression.line)
print(regLine.summary)
#print regression line
beta.0 <- regression.line$coefficients[1]
beta.1 <- regression.line$coefficients[2]
print(sprintf("Simple Regression Line (SRL): y = %f*(x) + %f", beta.1, beta.0))
print(sprintf("beta-0 = %f", beta.0))
print(sprintf("beta-1 = %f", beta.1))
#predict y value at x = xValue
xValue <- 12
yValue <- beta.1*xValue + beta.0
print(sprintf("y predicted = %f", yValue))
|
3dc6d0d5d2b796611fd9749a0d636e5cdd3c2477
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/available/examples/suggest.Rd.R
|
6cbc9b1cc360e2c9377565430ee373833c8db259
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 512
|
r
|
suggest.Rd.R
|
library(available)
### Name: suggest
### Title: Suggest a package name based on a development package title or
### description
### Aliases: suggest
### ** Examples
## Not run:
##D # Default will use the title from the current path.
##D suggest()
##D
##D # Can also suggest based on the description
##D suggest(field = "Description")
## End(Not run)
# Or by explictly using the text argument
suggest(text =
"A Package for Displaying Visual Scenes as They May Appear to an Animal with Lower Acuity")
|
be2d2fb8d44dc3e63bc2e101173402b0d1f2980c
|
5d18784db64de6f1355e90b7b0a787c0707ddd35
|
/R/ggplot.bfastIR.R
|
156bc03e47a457cc956eb5f2ae0eb63c36f88de8
|
[] |
no_license
|
dondealban/bfastApp
|
61b13523a3ecdf4c9554f66f55d11c35127bab25
|
33416fc025451f3794370127b316079b15a3dcba
|
refs/heads/master
| 2021-05-30T12:25:01.824464
| 2015-07-09T08:02:34
| 2015-07-09T08:02:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,254
|
r
|
ggplot.bfastIR.R
|
ggplot.bfastIR <- function(x, seg = TRUE, order, formula) {
ggdf <- x$df
ggdf[,'breaks'] <- NA
ggdf$breaks[x$breaks$breakpoints] <- 1
xIntercept <- ggdf$time[ggdf$breaks == 1]
gg <- ggplot(ggdf, aes(time, response)) +
geom_line() +
geom_point(color = 'green') +
geom_vline(xintercept = xIntercept, color = 'red', linetype = 'dashed') +
scale_x_continuous(breaks=floor(min(ggdf$time)):ceiling(max(ggdf$time))) +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
if(seg && !is.na(x$breaks$breakpoints)) {
# Segments on time column
segments <- c(ggdf$time[c(1,x$breaks$breakpoints, nrow(ggdf))])
for(i in seq_along(segments[-1])) {
predTs <- bfastts(rep(NA, ncol(ggdf)), date_decimal(ggdf$time), type = 'irregular')
predDf <- bfastpp(predTs, order = order, na.action = na.pass)
predDfSub <- subset(predDf, time <= segments[i + 1] & time >= segments[i])
trainDfSub <- subset(ggdf, time <= segments[i + 1] & time >= segments[i])
model <- lm(formula = formula, data = trainDfSub)
predDfSub$pred <- predict(model, newdata = predDfSub)
gg <- gg + geom_line(data = predDfSub, aes(x = time, y = pred), color = 'blue')
}
}
gg
}
|
89d80735a003703d18098db0cc523bc5f55f3fe7
|
c7c558f492eae205ce72b4c2361827a79a86e65d
|
/UPDE/ATACseq/FSC/30_ATAC_merge_counts.R
|
4dab00b2079a714d2d155f62d4ea9d6fdfdfbb7f
|
[] |
no_license
|
jpezoldt/UDPE
|
6094ddb629939abe2cc93d7ee68711cea8f0db4e
|
262c7c87a36ae4598ed5927ac64cf52e6576000c
|
refs/heads/master
| 2021-07-13T02:06:03.302301
| 2019-01-11T11:01:02
| 2019-01-11T11:01:02
| 143,868,496
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,262
|
r
|
30_ATAC_merge_counts.R
|
# Author: Vicnent Gardeux
# Adapted by: Joern Pezoldt
# 12.07.2018
# Function:
# 1) Merges tables of count per peak from homer ATAC-seq pipeline, by id
#Libraries
require(data.table)
# Input required: Set path to directory with the homer .txt files (output of annotatedpeak.pl)
setwd("/home/pezoldt/NAS2/pezoldt/Analysis/ATACseq/ATAC_FSC_all")
# Input required: Set name of experiment
name = "ATAC_FSC_all"
#Set directory
# Input required: Set path to peak count tables
path <- paste(getwd(),"/homer/Overlap_Group_Merged/Run_4_in_all",sep="")
#Merge tables over id column
merge_counts <- function(path) {
count.list <- list()
countfiles <- list.files(path, pattern=".txt$", full.names = T)
for (i in seq(length(countfiles))){
count.list[[i]] <- fread(file=countfiles[i], skip = 1, header = F, select = c(1,20),
col.names = c("id", (strsplit(basename(countfiles[i]), ".txt")[[1]])))
setkey(count.list[[i]],id)
}
count <- Reduce(merge, count.list)
return(count)
}
#Run function
count <- merge_counts(path)
# Note: For ATAC-seq data multiply by 2x as only one strand is counted
count <- count * 2
#Export table
write.table(count, paste(path,"/", name, ".txt",sep=""), quote = F, sep="\t", row.names = F)
|
ee884eaaf8d93dd000db0c64a7e76c8675f404d1
|
149bc111fbbc1d4772d4af1f5335b83e3f868271
|
/tests/testthat.R
|
807b2edb853b07455867d4b8a99e757d9a926336
|
[] |
no_license
|
NiklasTR/microbiomefhir
|
157fa380caec7c0a114bd11e6023d40465440c92
|
6078851064ebe0ffe8840ed7c83006b79c07b6a2
|
refs/heads/master
| 2020-04-06T16:17:15.029430
| 2019-06-20T11:07:53
| 2019-06-20T11:07:53
| 157,613,751
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
testthat.R
|
library(testthat)
library(mirobiomefhir)
test_check("mirobiomefhir")
|
5735d42d1889cea72f0684480781c4796d434c7b
|
006d56a4efa0a566ea9aeaa2b179d0a765d6ee4a
|
/appDevelopment_nestwatch/appNestwatchTechnicianInterface/fieldOptions.R
|
056d05837e37137fff3dae4359c4f64ffe2d2f34
|
[] |
no_license
|
bsevansunc/shiny
|
83f935fefa33d4118802dfb48f60c5892de2c028
|
5da0768d93d2937f1baad2c88c9f7907107ef87f
|
refs/heads/master
| 2021-01-10T12:53:59.057260
| 2017-12-14T19:03:13
| 2017-12-14T19:03:13
| 47,638,868
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,943
|
r
|
fieldOptions.R
|
#---------------------------------------------------------------------------------*
# ---- VISIT ----
#---------------------------------------------------------------------------------*
# Define fields for visit data:
visitFields <- c('hub', 'site', 'observer',
'longitude', 'latitude', 'accuracy', 'locationNotes',
'netCount6', 'netTime6','netCount9', 'netTime9',
'netCount12', 'netTime12','netCount18', 'netTime18',
'startRsTime', 'endRsTime',
'rsPathDistace', 'amroUnbanded', 'bcchUnbanded', 'brthUnbanded',
'cachUnbanded', 'carwUnbanded', 'eaphUnbanded','grcaUnbanded',
'howrUnbanded',
'nocaUnbanded', 'nomoUnbanded', 'sospUnbanded',
'tutiUnbanded', 'unchUnbanded', 'encounteredBird','visitNotes')
# Visit choices
choiceRegions <- c('Atlanta', 'DC', 'Gainesville', 'Pittsburgh',
'Raleigh', 'Springfield')
names(choiceRegions) <- choiceRegions
choiceSites <- c('', encounters$site %>% unique %>% sort)
choiceDate <- c('', seq(
as.Date(ISOdate(2000, 1, 15)),
as.Date(ISOdate(2030, 1, 1)), 1) %>%
as.character)
timeOfDay0 <- format(seq(ISOdate(2000, 1, 1), ISOdate(2000,1,2),
by = 'min'), '%H:%M') %>%
unique %>% sort
timeOfDay <- timeOfDay0[301:1321]
choiceTimeOfDay <- c('',timeOfDay)
choiceSpecies <- c('', 'AMRO', 'BCCH', 'BRTH', 'CACH', 'CARW',
'EAPH','GRCA','HOWR','NOCA','NOMO','SOSP',
'TUTI','UNCH')
colorValues <- c('', 'A', 'BU', 'BK', 'G', 'O','PK', 'P','R', 'Y', 'W')
choiceNetCount <- c('', seq(0, 12, by = 1))
choiceNetMinutes <- c('', 0:2000)
choiceCount <- c('', 1:100)
#---------------------------------------------------------------------------------*
# ---- ENCOUNTERS ----
#---------------------------------------------------------------------------------*
# Define fields for encounter data:
fieldCodesEnc <- c('hubEnc', 'siteEnc', 'dateEnc', 'bandTime',
'observerEnc','encounterType', 'speciesEnc',
'bandNumber','colorCombo', 'age', 'sex',
'breedingCond','fat', 'mass', 'wing', 'tl',
'tarsus','featherID', 'toenailID', 'bloodID',
'fecalID', 'attachmentID', 'rsLong', 'rsLat', 'notesEnc')
# Define field names for encounter data table:
fieldNamesEnc <- c('Hub', 'Site', 'Date', 'Time', 'Obs.', 'Encounter',
'SPP', 'Band #', 'Color c.', 'Age', 'Sex', 'CP/BP',
'Fat', 'Mass', 'Wing', 'Tl', 'Tars', 'Feather',
'Toenail','Blood','Fecal', 'Attachment', 'rsLong',
'rsLat', 'Notes')
# Define fields for encounter data that will be blank between records:
blankFieldsEnc <- c('bandTime', 'encounterType', 'speciesEnc',
'bandNumber','colorCombo', 'age', 'sex',
'breedingCond','fat', 'mass', 'wing', 'tl',
'tarsus','featherID', 'toenailID', 'bloodID',
'fecalID', 'attachmentID', 'rslong', 'rslat', 'notesEnc')
# Band choices:
choiceAge <- c('', 'HY', 'AHY', 'SY', 'ASY', 'UNK')
choiceEncounterType <- c('','Band', 'Recap',
'Resight-incidental','Resight-targeted', 'Resight-participant')
choiceSex <- c('', 'M', 'F', 'UNK')
choiceBreedingCond <- c('','CP', 'BP','CP-', 'BP-','CP+', 'BP+')
choiceFat <- c('', 0, 0.5, seq(1:5))
choiceDistance <- c('', '0-10', '11-20', '21-30', '31-40', '41-50')
choiceTime <- c('', 3, 2, 5)
#---------------------------------------------------------------------------------*
# ---- POINT COUNTS ----
#---------------------------------------------------------------------------------*
# Define fields for point count data:
fieldCodesPc <- c('hubPc', 'sitePc', 'observerPc', 'datePc',
'startTimePc', 'timePc', 'speciesPc', 'distancePc',
'countPc', 'detectionPc','notesPc')
# Define field names for point count data table:
fieldNamesPc <- c('Hub', 'Site', 'Observer', 'Date', 'Start time',
'Time interval', 'SPP', 'Distance', 'Count',
'Detection', 'Notes')
# Define fields for point count data that WILL be blank between records:
blankFieldsPc <- c('timePc', 'speciesPc', 'distancePc',
'countPc', 'detectionPc','notesPc')
#---------------------------------------------------------------------------------*
# ---- NESTS ----
#---------------------------------------------------------------------------------*
# Define fields for nest data:
fieldCodesNest <- c('hubNest', 'siteNest', 'nestID', 'speciesNest',
'dateNest', 'timeNest', 'stageNest', 'adAttNest',
'nEggNest', 'nYoungNest', 'notesNest',
'observerNest')
# Define field names for nest data table:
fieldNamesNest <- c('Hub', 'Site', 'Nest ID', 'SPP',
'Date', 'Time', 'Stage', 'adAtt',
'nEgg', 'nYoung', 'Notes', 'Obs')
# Define fields for nest data that will be blank between records:
blankFieldsNest <- c('dateNest', 'timeNest', 'stageNest', 'adAttNest',
'nEggNest', 'nYoungNest', 'notesNest',
'observerNest')
# Nest choices:
nestLocationChoices <- c('', 'Nestbox', 'Shrub', 'Tree', 'Other')
nestFateChoices <- c('', 'Successful', 'Successful but parasitized',
'Failed: Predated',
'Failed: Starvation',
'Failed: Human activity related',
'Failed: Weather related ',
'Failed: Parasitized',
'Failed: Unknown',
'Failed: Other')
nestStageChoices <- c('', 'B', 'L', 'I', 'H', 'N', 'F', 'P', 'A')
nestAttendChoices <- c('', '-', 'F', 'M', 'F+M')
nestEggsYoungChoices <- c('', 0:10)
|
a8b21d874c8008e967bb4a8abf02c35c9ae0c2cc
|
f8161c1763d3430e606b1afd6b57e35b33604f91
|
/man/xgx_stat_smooth.Rd
|
b842f8f6b40bb8adcfb1612f1430e4745d990bde
|
[
"MIT"
] |
permissive
|
Novartis/xgxr
|
f9d99dba43afc439e662e2a4bc56455c8cc7144b
|
287d64155ae3d1299befb90dc846b9189db443ad
|
refs/heads/master
| 2023-08-31T10:43:54.993048
| 2023-08-18T22:17:42
| 2023-08-18T22:17:42
| 194,325,753
| 14
| 10
|
NOASSERTION
| 2023-08-18T22:17:43
| 2019-06-28T19:42:53
|
R
|
UTF-8
|
R
| false
| true
| 8,498
|
rd
|
xgx_stat_smooth.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xgx_stat_smooth.R
\name{xgx_stat_smooth}
\alias{xgx_stat_smooth}
\alias{xgx_geom_smooth}
\alias{xgx_geom_smooth_emax}
\title{Wrapper for stat_smooth}
\usage{
xgx_stat_smooth(
mapping = NULL,
data = NULL,
geom = "smooth",
position = "identity",
...,
method = NULL,
formula = NULL,
se = TRUE,
n = 80,
span = 0.75,
n_boot = 200,
fullrange = FALSE,
level = 0.95,
method.args = list(),
na.rm = FALSE,
orientation = "x",
show.legend = NA,
inherit.aes = TRUE
)
xgx_geom_smooth(
mapping = NULL,
data = NULL,
geom = "smooth",
position = "identity",
...,
method = NULL,
formula = NULL,
se = TRUE,
n = 80,
span = 0.75,
fullrange = FALSE,
level = 0.95,
method.args = list(),
na.rm = FALSE,
orientation = "x",
show.legend = NA,
inherit.aes = TRUE
)
xgx_geom_smooth_emax(
mapping = NULL,
data = NULL,
geom = "smooth",
position = "identity",
...,
method = "nlsLM",
formula,
se = TRUE,
n = 80,
span = 0.75,
fullrange = FALSE,
level = 0.95,
method.args = list(),
na.rm = FALSE,
orientation = "x",
show.legend = NA,
inherit.aes = TRUE
)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by `aes` or `aes_`.
If specified and `inherit.aes = TRUE` (the default), it is combined with the
default mapping at the top level of the plot. You must supply mapping if
there is no plot mapping.
Warning: for `method = polr`, do not define `y` aesthetic, use `response` instead.}
\item{data}{The data to be displayed in this layer. There are three options:
If NULL, the default, the data is inherited from the plot data as specified
in the call to ggplot.
A data.frame, or other object, will override the plot data. All objects
will be fortified to produce a data frame. See fortify for which variables
will be created.
A function will be called with a single argument, the plot data. The return
value must be a data.frame., and will be used as the layer data.}
\item{geom}{Use to override the default geom. Can be a list of multiple
geoms, e.g. list("point","line","errorbar"), which is the default.}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function.}
\item{...}{other arguments passed on to layer. These are often aesthetics,
used to set an aesthetic to a fixed value, like color = "red" or size = 3.
They may also be parameters to the paired geom/stat.}
\item{method}{method (function) to use, eg. lm, glm, gam, loess, rlm.
Example: `"polr"` for ordinal data. `"nlsLM"` for nonlinear least squares.
If method is left as `NULL`, then a typical `StatSmooth` is applied,
with the corresponding defaults, i.e. For datasets with n < 1000 default is loess.
For datasets with 1000 or more observations defaults to gam.}
\item{formula}{formula to use in smoothing function, eg. y ~ x, y ~ poly(x, 2), y ~ log(x)}
\item{se}{display confidence interval around smooth? (TRUE by default, see level to control)}
\item{n}{number of points to evaluate smoother at}
\item{span}{Controls the amount of smoothing for the default loess smoother.
Smaller numbers produce wigglier lines, larger numbers produce smoother lines.}
\item{n_boot}{number of bootstraps to perform to compute confidence interval,
currently only used for method = "polr", default is 200}
\item{fullrange}{should the fit span the full range of the plot, or just the data}
\item{level}{The percentile for the confidence interval (should fall
between 0 and 1). The default is 0.95, which corresponds to a 95 percent
confidence interval.}
\item{method.args}{Optional additional arguments passed on to the method.}
\item{na.rm}{If FALSE, the default, missing values are removed with a
warning. If TRUE, missing values are silently removed.}
\item{orientation}{The orientation of the layer, passed on to ggplot2::stat_summary.
Only implemented for ggplot2 v.3.3.0 and later. The default ("x") summarizes y values over
x values (same behavior as ggplot2 v.3.2.1 or earlier). Setting \code{orientation = "y"} will
summarize x values over y values, which may be useful in some situations where you want to flip
the axes, e.g. to create forest plots. Setting \code{orientation = NA} will try to automatically
determine the orientation from the aesthetic mapping (this is more stable for ggplot2 v.3.3.2
compared to v.3.3.0).}
\item{show.legend}{logical. Should this layer be included in the legends?
NA, the default, includes if any aesthetics are mapped. FALSE never
includes, and TRUE always includes.}
\item{inherit.aes}{If FALSE, overrides the default aesthetics, rather
than combining with them. This is most useful for helper functions that
define both data and aesthetics and shouldn't inherit behaviour from the
default plot specification, e.g. borders.}
}
\value{
ggplot2 plot layer
}
\description{
\code{xgx_stat_smooth} and \code{xgx_geom_smooth} produce smooth fits through continuous or categorical data.
For categorical, ordinal, or multinomial data use method = polr.
This wrapper also works with nonlinear methods like nls and nlsLM for continuous data.
\code{xgx_geom_smooth_emax} uses minpack.lm::nlsLM, predictdf.nls, and stat_smooth to display Emax model fit to data
}
\section{Warning}{
\code{nlsLM} uses \code{nls.lm} which implements the Levenberg-Marquardt
algorithm for fitting a nonlinear model, and may fail to converge for a
number of reasons. See \code{?nls.lm} for more information.
\code{nls} uses Gauss-Newton method for estimating parameters,
and could fail if the parameters are not identifiable. If this happens
you will see the following warning message:
Warning message:
Computation failed in `stat_smooth()`:
singular gradient
\code{nls} will also fail if used on artificial "zero-residual" data,
use \code{nlsLM} instead.
}
\examples{
# Example with nonlinear least squares (method = "nlsLM")
Nsubj <- 10
Doses <- c(0, 25, 50, 100, 200)
Ntot <- Nsubj*length(Doses)
times <- c(0,14,30,60,90)
dat1 <- data.frame(ID = 1:(Ntot),
DOSE = rep(Doses, Nsubj),
PD0 = stats::rlnorm(Ntot, log(100), 1),
Kout = exp(stats::rnorm(Ntot,-2, 0.3)),
Imax = 1,
ED50 = 25) \%>\%
dplyr::mutate(PDSS = PD0*(1 - Imax*DOSE/(DOSE + ED50))*exp(stats::rnorm(Ntot, 0.05, 0.3))) \%>\%
merge(data.frame(ID = rep(1:(Ntot), each = length(times)), Time = times), by = "ID") \%>\%
dplyr::mutate(PD = ((PD0 - PDSS)*(exp(-Kout*Time)) + PDSS),
PCHG = (PD - PD0)/PD0)
gg <- ggplot2::ggplot(dat1 \%>\% subset(Time == 90),
ggplot2::aes(x = DOSE, y = PCHG)) +
ggplot2::geom_boxplot(ggplot2::aes(group = DOSE)) +
xgx_theme() +
xgx_scale_y_percentchangelog10() +
ggplot2::ylab("Percent Change from Baseline") +
ggplot2::xlab("Dose (mg)")
gg +
xgx_stat_smooth(method = "nlsLM", formula = y ~ E0 + Emax*x/(ED50 + x),
method.args = list(
start = list(Emax = -0.50, ED50 = 25, E0 = 0),
lower = c(-Inf, 0, -Inf)
),
se = TRUE)
gg +
xgx_geom_smooth_emax()
\dontrun{
# example with ordinal data (method = "polr")
set.seed(12345)
data = data.frame(x = 120*exp(stats::rnorm(100,0,1)),
response = sample(c("Mild","Moderate","Severe"), 100, replace = TRUE),
covariate = sample(c("Male","Female"), 100, replace = TRUE)) \%>\%
dplyr::mutate(y = (50 + 20*x/(200 + x))*exp(stats::rnorm(100, 0, 0.3)))
# example coloring by the response categories
xgx_plot(data = data) +
xgx_stat_smooth(mapping = ggplot2::aes(x = x, response = response,
colour = response, fill = response),
method = "polr") +
ggplot2::scale_y_continuous(labels = scales::percent_format())
# example faceting by the response categories, coloring by a different covariate
xgx_plot(data = data) +
xgx_stat_smooth(mapping = ggplot2::aes(x = x, response = response,
colour = covariate, fill = covariate),
method = "polr", level = 0.80) +
ggplot2::facet_wrap(~response) +
ggplot2::scale_y_continuous(labels = scales::percent_format())
}
}
\seealso{
\code{\link{predictdf.nls}} for information on how nls confidence intervals are calculated.
}
|
d4aea3c9b87a6497dcf131f9fb1b14309bcd3ec2
|
c24c33d7aec329b5617b7a887d515dcde6d16d5b
|
/crispr_db/validateThierFinding.r
|
5500a032feecad718839cf4f67db907733b5e055
|
[] |
no_license
|
cshukai/apache_spark_crispr
|
71814690d86c9ba26c39c727b726b06b000be322
|
dc5dee4eebd2ad3d2957ef5ed209e161a44c4139
|
refs/heads/master
| 2020-05-14T21:00:04.538391
| 2016-07-22T19:16:56
| 2016-07-22T19:16:56
| 181,954,474
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,794
|
r
|
validateThierFinding.r
|
## parse into proper format
without=read.table("noCrisprSpecies.txt",quote = "",header=F,sep="|")
temp=tolower(without[,2])
temp2=gsub(pattern=" ",replacement="_",temp)
temp3=gsub(pattern="\'",replacement="",temp2)
temp4=gsub(pattern="\\/",replacement="_",temp3)
temp5=gsub(pattern="-",replacement="_",temp4)
temp6=gsub(pattern="\\.",replacement="",temp5)
temp7=gsub(pattern="\\(",replacement="",temp6)
temp8=gsub(pattern="\\)",replacement="",temp7)
without[,2]=temp8
write.csv(without,"without.csv",row.names=F)
save.image("parse.RData")
##map with ensemble annotation file
bac_collectoin_home="/home/shchang/data/ensemble/bac_r_29/ftp.ensemblgenomes.org/pub/release-29/bacteria/gtf"
unzip_gtf_path=gz_paths=Sys.glob(file.path(bac_collectoin_home, "*","*","*.gtf"))
target_path=NULL
for(i in 1:nrow(without)){
target_idx=grep(pattern=without[i,2],x=unzip_gtf_path,ignore.case=T)
if(length(target_idx)){
target_path=c(target_path,unzip_gtf_path[target_idx])
}
}
target_path=unique(target_path) # more than species in without , probably due to multiple strains for one specices
save.image("without.RData")
# move working directory to the home of ncbi e-utilities
library("ballgown")
for(i in 1:length(target_path)){
gtf_tbl=gffRead(target_path[i])
gtf_tbl_CDS=gtf_tbl[which(gtf_tbl[,"feature"]=="CDS"),]
target_protein_id=getAttributeField(gtf_tbl_CDS$attributes,field = "protein_id")
target_protein_id=gsub(pattern="\"",replacement="",x=target_protein_id)
id_set=paste(target_protein_id,collapse=",")
tmp= paste( "sh ../epost -db protein -format acc -id" ,id_set,sep=" ")
tmp2=paste(tmp,"sh ../efetch -format fasta",sep="|")
tmp3=unlist(strsplit(split="/",target_path[i]))
file_name=tmp3[length(tmp3)-1]
cmd=paste(tmp2,file_name,sep=">")
system(cmd)
}
###############################################mri process on computing node
load("crisprdb_without.RData")
cwd=getwd()
#form right directories
species_names=NULL
for(i in 1:length(target_path)){
tmp=unlist(strsplit(split="/",target_path[i]))
species_names=c(species_names,tmp[length(tmp)-1])
dir.create(tmp[length(tmp)-1])
}
setwd("crisprdb_without")
ref_fasta=Sys.glob(file.path("*.fasta"))
for(i in 1:length(species_names)){
prefix="/share/sw/blast/2.2.30+/bin/blastp -db"
these_db=paste(species_names[i],"db",sep=".")
tmp=paste(prefix,these_db,sep=" ")
output=paste("-out",paste(cwd,species_names[i],sep="/"),sep=" ")
for(j in 1:length(ref_fasta)){
tmp2=paste("-query",ref_fasta[j],sep=" ")
tmp3=paste(tmp,tmp2,sep=" ")
tmp4=paste(output,ref_fasta[j],sep="/")
tmp5=paste(tmp3,tmp4,sep=" ")
cmd=paste(tmp5, "-outfmt 6", sep=" ")
cat(cmd,file="script.sh",fill=T,append=T)
}
}
setwd(cwd)
save.image("crisprdb_without.RData")
|
960af1a551043f1bc5faf5279584f4a7bfd36e34
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/dng/man/splitt_moments.Rd
|
557ba07bd3e242e423d01943782e6ff309b23a9b
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,176
|
rd
|
splitt_moments.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{splitt_kurtosis}
\alias{splitt_kurtosis}
\alias{splitt_moments}
\alias{splitt_mean}
\alias{splitt_skewness}
\alias{splitt_var}
\title{Moments of the split-t distribution}
\usage{
splitt_kurtosis(df, phi, lmd)
splitt_mean(mu, df, phi, lmd)
splitt_skewness(df, phi, lmd)
splitt_var(df, phi, lmd)
}
\arguments{
\item{df}{degrees of freedom (> 0, can be non-integer). df = Inf is allowed.}
\item{phi}{vector of scale parameters (> 0).}
\item{lmd}{vector of skewness parameters (> 0). If is 1, reduced to
symmetric student t distribution.}
\item{mu}{vector of location parameter. (The mode of the density)}
}
\value{
\code{splitt_mean} gives the mean. \code{splitt_var} gives the
variance. \code{splitt_skewness} gives the skewness. \code{splitt_kurtosis}
gives the kurtosis. (\code{splitt_mean},
\code{splitt_var},\code{splitt_skeness} and \code{splitt_kurtosis} are all
vectors.)
Invalid arguments will result in return value NaN, with a warning.
}
\description{
Computing the mean, variance, skewness and kurtosis for the split student-t
distribution.
}
\section{Functions}{
\itemize{
\item \code{splitt_kurtosis}: Kurtosis for the split-t distribution.
\item \code{splitt_skewness}: Skewness for the split-t distribution.
\item \code{splitt_var}: Variance for the split-t distribution.
}}
\examples{
mu <- c(0,1,2)
df <- rep(10,3)
phi <- c(0.5,1,2)
lmd <- c(1,2,3)
mean0 <- splitt_mean(mu, df, phi, lmd)
var0 <- splitt_var(df, phi, lmd)
skewness0 <- splitt_skewness(df, phi, lmd)
kurtosis0 <- splitt_kurtosis(df, phi, lmd)
}
\references{
Li, F., Villani, M., & Kohn, R. (2010). Flexible modeling of
conditional distributions using smooth mixtures of asymmetric student t
densities. Journal of Statistical Planning & Inference, 140(12), 3638-3654.
}
\seealso{
\code{\link{dsplitt}()}, \code{\link{psplitt}()},
\code{\link{qsplitt}()} and \code{\link{rsplitt}()} for the split-t
distribution.
}
\author{
Feng Li, Jiayue Zeng
}
\keyword{asymmetric}
\keyword{distribution}
\keyword{student-t}
|
dfa32b8e0c900bf33c1114f9ec36ad179eec37d7
|
6dce20dd72eb9eb809c0972bd0f5d479b20f71e6
|
/R/geom_timeline_label.R
|
a65c46e85aa09607bb0cdfe2b5ed880ebca55b9c
|
[
"MIT"
] |
permissive
|
staedi/eqviz
|
9a41ef3567085d4ee436c07f67222970d5c07764
|
6bd6533e76a4c593658203260ca538e8b60f1e05
|
refs/heads/master
| 2022-12-08T02:10:29.469779
| 2020-08-31T00:30:44
| 2020-08-31T00:30:44
| 274,668,542
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,856
|
r
|
geom_timeline_label.R
|
library(ggplot2)
library(dplyr)
#' GeomTimelineLabel: Geom for adding text labels of earthquake locations on the timeline
#'
#' This Geom is the optional addon for GeomTimeline Geom element.
#' This aims to give extra information of locations by adding text labels on top of the pointsGrob element.
#' The text is 45 degree slanted and takes three mandatory aeses, x, label, and n_max.
#' The aes x takes datetime type parameter, label takes the column which would be written, finally n_max is the number of allowed earthquake magnitudes.
#' In that way, this Geom takes top n_max earthquakes.
#'
#' To make the same range of xmin and xmax, same logics are applied inside the draw_group function.
#'
#' @importFrom ggplot2 ggproto Geom aes draw_key_polygon
#' @importFrom dplyr filter top_n
#' @importFrom grid textGrob gpar gTree gList
#' @export
GeomTimelineLabel <- ggplot2::ggproto("GeomTimelineLabel", ggplot2::Geom,
required_aes = c("x","label","n_max"),
optional_aes = c("size","shape","colour","linesize","linetype","stroke","xmin","xmax"),
default_aes = ggplot2::aes(y = 0.1),
draw_key = ggplot2::draw_key_polygon,
draw_group = function(data, panel_scales, coord) {
# Set default xmin and xmax
if (is.null(data$xmin)) data$xmin <- as.Date(min(data$x),origin='1970-01-01')
if (is.null(data$xmax)) data$xmax <- as.Date(max(data$x),origin='1970-01-01')
# Filtering data from xmin to xmax
data <- data %>%
dplyr::filter(as.Date(x,origin='1970-01-01') >= xmin) %>%
dplyr::filter(as.Date(x,origin='1970-01-01') <= xmax)
# Labeling
data <- data %>%
dplyr::top_n(n = as.integer(n_max[1]),size)
# Transform the data
coords <- coord$transform(data, panel_scales)
offset <- 0.1
names <- grid::textGrob(
label = coords$label,
x = unit(coords$x, "npc"),
y = unit(coords$y + offset, "npc"),
just = c("left", "bottom"),
gp = grid::gpar(fontsize = 10, col = "black"),
rot = 45
)
# Construct a segment grob
lines <- grid::polylineGrob(
x = unit(c(coords$x,coords$x),"npc"),
y = unit(c(coords$y,coords$y+offset),"npc"),
id = rep(1:length(coords$x),2),
gp = grid::gpar(col="darkgray")
)
grid::gTree(children = grid::gList(names,lines))
}
)
#' geom_timeline_lable(): geom function to write label of location infos
#'
#' @param mapping a set of aesthetic mappings
#' @param data data to be plotted
#' @param stat stat object (No custom version used here)
#' @param position position object (No custom version used here)
#' @param show.legend inclusion of the legend
#' @param na.rm treatment of missing values
#' @param inherit.aes inheriting aeses from default geom
#' @param ... additional parameters
#'
#' @return None
#' @export
#'
#' @examples
#' \dontrun{
#' x_min <- as.Date('2003-01-01',origin='1970-01-01')
#' x_max <- as.Date('2017-01-01',origin='1970-01-01')
#' eq_load_data('earthquakes.tsv.gz') %>%
#' eq_clean_data() %>%
#' filter(year >= 2000) %>%
#' filter(country %in% c("USA","MEXICO")) %>%
#' ggplot2::ggplot(ggplot2::aes(x=date,y=country,colour=deaths,size=eq_primary)) +
#' geom_timeline(aes(xmin=x_min, xmax=x_max)) +
#' geom_timeline_label(aes(xmin=x_min,xmax=x_max,label=location_name,n_max=5))
#' }
geom_timeline_label <- function(mapping = NULL, data = NULL, stat = 'identity',
position = 'identity', show.legend = NA,
na.rm = FALSE,
inherit.aes = TRUE, ...) {
ggplot2::layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomTimelineLabel,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(na.rm = na.rm,
...)
)
}
|
981bda315175cfecbe80d796abaddb086741f36b
|
6c51327c2cd25f6ac64b0e07983283f90f31e1bc
|
/1_plot_scripts/winter/winter_DJFMP.R
|
4ebbb693fc03bc0864ac9755c5c33f1617ab0055
|
[] |
no_license
|
lmitchell4/Status-and-Trends
|
481bf9b957e9afbd172d7cba8db0f036622c7ab2
|
9c19fe2917496306cc812e27764f0aa2d6b5d6b3
|
refs/heads/master
| 2022-10-03T07:58:30.961149
| 2022-07-19T22:46:57
| 2022-07-19T22:46:57
| 180,163,867
| 0
| 0
| null | 2019-04-08T14:13:12
| 2019-04-08T14:13:11
| null |
UTF-8
|
R
| false
| false
| 3,874
|
r
|
winter_DJFMP.R
|
## Winter = Dec, Jan, Feb
## See a copy of the document "#22 Metadata (Updated May 30, 2019).doc" for reference.
source("setup.R")
library(lubridate)
##########################################################################
## Read in data:
load(file.path(data_root, "chippsData.RData"))
##########################################################################
## Chipps Trawl: Winterrun Chinook
## Fill in missing volumes with an overall average:
any(is.na(unique(chippsData$Volume)))
chippsData$Volume[is.na(chippsData$Volume)] <- mean(chippsData$Volume, na.rm=TRUE)
## Add fields:
chippsData = mutate(chippsData,
Month = month(SampleDate),
Year = year(SampleDate),
Year_f = as.factor(Year))
## Create wide data frame:
chippsData$CommonName <- sub("Chinook salmon","Chinook salmon_",chippsData$CommonName)
chippsData$CommonName <- sub(" ","_",chippsData$CommonName)
chippsData$RaceByLength[is.na(chippsData$RaceByLength)] <- ""
chippsData$CommonName_RaceByLength <- with(chippsData, paste0(CommonName, RaceByLength))
keep_fields_chipps <- c("Year","Year_f","Month","Location","RegionCode","StationCode",
"SampleDate","SampleTime","MethodCode","GearConditionCode",
"TowNumber","Volume","CommonName_RaceByLength","Catch")
chippsWide <- tidyr::spread(chippsData[ ,keep_fields_chipps], CommonName_RaceByLength,
Catch, fill=0)
chippsWide$"No catch" <- NULL
## Truncate the data according to the specified report year and season:
chippsWide_spring <- subset(chippsWide, 1995 <= Year & Year <= report_year &
Month %in% c(12,1,2))
## Calculate indices:
chippsIndexDf <- chippsWide_spring %>%
dplyr::group_by(Year_f, Year, Month) %>%
dplyr::summarize(
chinook_winterByLength_CPUE_YM=sum(Chinook_salmon_Winter/Volume),
.groups="keep"
) %>%
dplyr::ungroup() %>%
dplyr::group_by(Year_f, Year) %>%
dplyr::summarize(
chinook_winterByLengthIndex=mean(chinook_winterByLength_CPUE_YM, na.rm = TRUE) * 1000,
.groups="keep"
) %>%
dplyr::ungroup() %>%
as.data.frame(.)
chippsIndexDf
##########################################################################
## Figures:
# use_ylab <- expression(paste("Chinook Salmon Index\n(Winterrun, Unmarked Fish)"))
use_ylab <- "Index for unmarked fish"
## All years:
Chipps_all_years_WN <- ggplot(chippsIndexDf, aes(x=Year, y=chinook_winterByLengthIndex)) +
geom_bar(stat="identity") +
smr_theme_update() +
smr_x_axis(report_year, "all", "winter") +
ylab(use_ylab) +
stat_lt_avg() +
annotate("text", x=1968, y=0.15, label="Earlier data\nomitted",
hjust=0, size=2.7) +
smr_caption(stat_name="the juvenile winter-run Chinook Salmon passage rate",
report_year=report_year) +
smr_alttext(stat_name="juvenile winter-run Chinook Salmon passage rate")
Chipps_all_years_WN
getCaption(Chipps_all_years_WN)
getAlttext(Chipps_all_years_WN)
## Recent years:
Chipps_all_recent_WN <-
ggplot(chippsIndexDf, aes(x=Year, y=chinook_winterByLengthIndex)) +
geom_bar(stat="identity") +
smr_theme_update() +
smr_x_axis(report_year, "recent", "winter")+
ylab(use_ylab)+
stat_lt_avg() +
smr_caption(stat_name="the juvenile winter-run Chinook Salmon passage rate",
report_year=report_year) +
smr_alttext(stat_name="juvenile winter-run Chinook Salmon passage rate")
Chipps_all_recent_WN
getCaption(Chipps_all_recent_WN)
getAlttext(Chipps_all_recent_WN)
## Save plots:
DJFMP_chinook_winterByLength <- list()
DJFMP_chinook_winterByLength[["Chipps_all_years_WN"]] <- Chipps_all_years_WN
DJFMP_chinook_winterByLength[["Chipps_all_recent_WN"]] <- Chipps_all_recent_WN
save(list="DJFMP_chinook_winterByLength",
file=file.path(fig_root_winter,"DJFMP_chinook_winterByLength.RData"))
|
a522e7d76356aa6fe66791de58a83ecd5a406208
|
d42b70a85ba00da44ce923e1320f27ec4f6a874c
|
/man/GeoLiftMarketSelection.Rd
|
52b533174f1afcab6f8c9ae0f1110e4dbae1162b
|
[
"MIT"
] |
permissive
|
jesse-lapin/GeoLift
|
b8e6e20595e96c2cc700a16d802cc282aff7b391
|
f7c36e566720ec206bb0f19f6ba865e63601aefd
|
refs/heads/main
| 2023-08-27T13:16:17.182241
| 2021-10-29T20:57:25
| 2021-10-29T20:57:25
| 423,981,596
| 0
| 0
|
MIT
| 2021-11-02T19:59:08
| 2021-11-02T19:59:07
| null |
UTF-8
|
R
| false
| true
| 6,342
|
rd
|
GeoLiftMarketSelection.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GeoLift.R
\name{GeoLiftMarketSelection}
\alias{GeoLiftMarketSelection}
\title{GeoLift Market Selection algorithm based on a Power Analysis.}
\usage{
GeoLiftMarketSelection(
data,
treatment_periods,
N = c(),
X = c(),
Y_id = "Y",
location_id = "location",
time_id = "time",
effect_size = seq(0, 0.25, 0.05),
lookback_window = -1,
include_markets = c(),
exclude_markets = c(),
holdout = c(),
cpic = 1,
budget = NULL,
alpha = 0.1,
normalize = FALSE,
model = "none",
fixed_effects = TRUE,
dtw = 0,
ProgressBar = FALSE,
plot_best = FALSE,
run_stochastic_process = FALSE,
parallel = TRUE,
parallel_setup = "sequential",
side_of_test = "two_sided",
import_augsynth_from = "library(augsynth)"
)
}
\arguments{
\item{data}{A data.frame containing the historical conversions by
geographic unit. It requires a "locations" column with the geo name,
a "Y" column with the outcome data (units), a time column with the indicator
of the time period (starting at 1), and covariates.}
\item{treatment_periods}{List of treatment periods to calculate power for.}
\item{N}{List of number of test markets to calculate power for. If left empty (default)
and if no locations are included through \code{include_locations}, it will populate
the list of markets with the deciles of the total number of locations. If left empty
and a set of markets is provided by \code{include_locations} only the deciles larger
or equal than \code{length(include_locations)} will be used.}
\item{X}{List of names of covariates.}
\item{Y_id}{Name of the outcome variable (String).}
\item{location_id}{Name of the location variable (String).}
\item{time_id}{Name of the time variable (String).}
\item{effect_size}{A vector of effect sizes to test by default a
sequence between 0 - 25 percent in 5 percent increments: seq(0,0.25,0.05).
Only input sequences that are entirely positive or negative and that include
zero.}
\item{lookback_window}{A number indicating how far in time the simulations
for the power analysis should go. For instance, a value equal to 5 will simulate
power for the last five possible tests. By default lookback_window = -1 which
will set the window to the smallest provided test \code{min(treatment_periods)}.}
\item{include_markets}{A list of markets or locations that should be part of the
test group. Make sure to specify an N as large or larger than the number of
provided markets or locations.}
\item{exclude_markets}{A list of markets or locations that will be removed from the
analysis.}
\item{holdout}{A vector with two values: the first one the smallest desirable
holdout and the second the largest desirable holdout. If left empty (default)
all market selections will be provided regardless of their size.}
\item{cpic}{Number indicating the Cost Per Incremental Conversion.}
\item{budget}{Number indicating the maximum budget available for a GeoLift test.}
\item{alpha}{Significance Level. By default 0.1.}
\item{normalize}{A logic flag indicating whether to scale the outcome which is
useful to accelerate computing speed when the magnitude of the data is large. The
default is FALSE.}
\item{model}{A string indicating the outcome model used to augment the Augmented
Synthetic Control Method. Augmentation through a prognostic function can improve
fit and reduce L2 imbalance metrics.
\itemize{
\item{"None":}{ ASCM is not augmented by a prognostic function. Defualt.}
\item{"Ridge":}{ Augments with a Ridge regression. Recommended to improve fit
for smaller panels (less than 40 locations and 100 time-stamps.))}
\item{"GSYN":}{ Augments with a Generalized Synthetic Control Method. Recommended
to improve fit for larger panels (more than 40 locations and 100
time-stamps. }
}}
\item{fixed_effects}{A logic flag indicating whether to include unit fixed
effects in the model. Set to TRUE by default.}
\item{dtw}{Emphasis on Dynamic Time Warping (DTW), dtw = 1 focuses exclusively
on this metric while dtw = 0 (default) relies on correlations only.}
\item{ProgressBar}{A logic flag indicating whether to display a progress bar
to track progress. Set to FALSE by default.}
\item{plot_best}{A logic flag indicating whether to plot the best 4 tests for
each treatment length. Set to FALSE by default.}
\item{run_stochastic_process}{A logic flag indicating whether to select test
markets through random sampling of the the similarity matrix. Given that
interpolation biases may be relevant if the synthetic control matches
the characteristics of the test unit by averaging away large discrepancies
between the characteristics of the test and the units in the synthetic controls,
it is recommended to only use random sampling after making sure all units are
similar. This parameter is set by default to FALSE.}
\item{parallel}{A logic flag indicating whether to use parallel computing to
speed up calculations. Set to TRUE by default.}
\item{parallel_setup}{A string indicating parallel workers set-up.
Set to "sequential" by default.}
\item{side_of_test}{A string indicating whether confidence will be determined
using a one sided or a two sided test.
\itemize{
\item{"two_sided":}{ The test statistic is the sum of all treatment effects, i.e. sum(abs(x)). Defualt.}
\item{"one_sided":}{ One-sided test against positive or negaative effects i.e.
If the effect being applied is negative, then defaults to -sum(x). H0: ES >= 0; HA: ES < 0.
If the effect being applied is positive, then defaults to sum(x). H0: ES <= 0; HA: ES > 0.}
}}
\item{import_augsynth_from}{Points to where the augsynth package
should be imported from to send to the nodes.}
}
\value{
A list with two Data Frames. \itemize{
\item{"BestMarkets":}{Data Frame with a ranking of the best markets
based on power, Scaled L2 Imbalance, Minimum Detectable Effect, and
proportion of total KPI in the test markets.}
\item{"PowerCurves":}{Data Frame with the resulting power curves for
each recommended market}
}
}
\description{
\code{GeoLiftMarketSelection} provides a ranking of test markets for a
GeoLift test based on a power analysis.
}
|
809325ffe7b464687097f0d8166e4b925c06e198
|
77a7a3e311fa2dc0d24388061da7c406b48daf86
|
/R/_custom_buffer_points.R
|
54752ba7a0075d80d633678f135fd086277cf7e4
|
[] |
no_license
|
federicotallis/h3forr
|
a6133260e0794e318597f3ee7b6a435cf4a8297e
|
5c74feb961b6dd00319829e935178997e5e84c67
|
refs/heads/master
| 2023-03-28T05:50:32.245915
| 2020-12-05T08:43:08
| 2020-12-05T08:43:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 434
|
r
|
_custom_buffer_points.R
|
buffer_points <- function(points, res = 7, radius = 1, f = NULL) {
if(is.null(f)) f <- function(radius, distance) 1 - distance * 1 / (1 + radius)
geo_to_h3(points, res) %>%
k_ring_distances(radius) %>%
purrr::reduce(rbind) %>%
dplyr::mutate(weight = f(radius, distance)) %>%
dplyr::group_by(h3_index) %>%
dplyr::summarise(weight = sum(weight)) %>%
dplyr::mutate(norm = scales::rescale(weight, c(0, 1)))
}
|
a0cfaf46ec323cd55ff3d87496f573dce2b2d750
|
43fc9173f16d6806446afec2c370414471b39f3b
|
/2a_apply_partykit_onPax.R
|
b77a7e1202546d7f5a13f6650cf8739eb5c0a262
|
[] |
no_license
|
dkremlg/el
|
fbba0c63b22ede2b293916dcd9bde9bd7e1bf3c5
|
1be12e761dadc285fc86ff1aae0f504d93cbda24
|
refs/heads/master
| 2020-05-17T10:50:13.226132
| 2019-06-25T22:40:09
| 2019-06-25T22:40:09
| 183,665,858
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,222
|
r
|
2a_apply_partykit_onPax.R
|
library(partykit)
path='C:/Users/35266/Documents/Python Scripts/el/'
Data=read.csv(paste0(path,'Intermediate_Output/R_Training_Pax.csv'))
Data[Data[,'NumPax']<0,'NumPax']=0
class(eq.mob <- NumPax ~ 1 + Dprio | dday + dtime + Direction + month)
curve.model <- glmtree(
eq.mob,
data = Data,
family = poisson,
alpha = 0.01,
bonferroni = TRUE,
verbose = TRUE,
prune = "BIC",
minsize = 10,
breakties = TRUE,
restart = TRUE,
maxdepth=5)
#######################################################
forecast_bookings=as.numeric(predict(curve.model,newdata=Data,type='response'))
forecast_node=as.numeric(predict(curve.model,newdata=Data,type='node'))
Data=cbind(Data,forecast_bookings,forecast_node)
write.csv(Data,paste0(path,'Intermediate_Output/R_Output_Training_Pax.csv'),row.names=FALSE)
Forecast_Data=read.csv(paste0(path,'Intermediate_Output/R_Test_Pax.csv'))
forecast_bookings=as.numeric(predict(curve.model,newdata=Forecast_Data,type='response'))
forecast_node=as.numeric(predict(curve.model,newdata=Forecast_Data,type='node'))
Forecast_Data=cbind(Forecast_Data,forecast_bookings,forecast_node)
write.csv(Forecast_Data,paste0(path,'Intermediate_Output/R_Output_Test_Pax.csv'),row.names=FALSE)
|
eec8e4f0286dd93ef6257ea522a14471ec205144
|
631ced5674d04dc347e8127a99eff7e3d91773c0
|
/beyond_gridlock/read_indc.R
|
6a1e8a3c845e9bb1608fbb0bf017ba95fb1b3cf5
|
[
"MIT"
] |
permissive
|
gilligan-ees-3310/climate-change-lecture-scripts
|
7864ed93d8078468bbb513d2e470384f2e24a54b
|
d7fe0ba460ad3c1953458deceb93b1313bf2832a
|
refs/heads/main
| 2023-03-29T12:38:34.497472
| 2021-04-05T06:19:47
| 2021-04-05T06:19:47
| 348,622,200
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,219
|
r
|
read_indc.R
|
library(readr)
library(dplyr)
library(stringr)
library(tidyr)
library(ggplot2)
data <- read.csv('data/PBL_INDCs.csv', header = F, as.is=T,
na.strings = c('',' ','-','NA'),
colClasses="character", fill=TRUE, strip.white = TRUE)
n <- data %>% head(1) %>% unlist %>% unname %>% str_replace_all("^\\.|\\.$","")
heads <- data %>% head(4) %>% select(-(1:2))
heads <- heads[,order(n[-(1:2)])]
emissions <- data %>% tail(-4)
names(emissions) <- n
emissions <- emissions %>%
select(-lulucf.co2) %>%
filter(! is.na(country.name)) %>%
gather(key = col, value = val, -country.name)
emissions <- emissions %>%
spread(key = country.name, value = val)
emissions$level <- slice(heads,4) %>% unlist %>% ifelse(is.na(.), "Median", .)
emissions$scenario <- slice(heads,2) %>% unlist
emissions$year <- slice(heads,3) %>% unlist %>% as.numeric
emissions <- emissions %>%
gather(key = country.name, value = val, -col, -level, -scenario, -year) %>%
select(-col) %>% filter(! is.na(val)) %>%
mutate(val = (val %>% str_replace_all(",","") %>% as.numeric()))
emissions <- emissions %>%
spread(key = level, value = val)
ge <- emissions %>% filter(str_detect(country.name, "^World"))
|
d388272ab9f7a5c0b8bc1f9a5011b732d6bbd03e
|
b30a6a9d69305509e197bd36d5307578a05ad46f
|
/formattingfiles.R
|
b602f4890215826d47d2bdf875bdb8adb61cc686
|
[] |
no_license
|
amwootte/analysisscripts
|
49b4d6736d1701805a960425f96d01e7397ef852
|
9ab5dd1a7659664daf652c0138510e5a3644ee62
|
refs/heads/master
| 2022-07-20T05:09:10.418987
| 2022-07-06T15:02:10
| 2022-07-06T15:02:10
| 116,304,534
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 786
|
r
|
formattingfiles.R
|
##################
#
# GeoTIFF write test with 3^5 output
library(ncdf4)
library(rgdal)
library(raster)
library(rasterVis)
library(maps)
library(maptools)
var = "heatwaves"
scen = "rcp26"
test = nc_open(paste("/data2/3to5/I35/ens_means/",var,"_ensmean_absolute_2041-2070.nc",sep=""))
vardata= ncvar_get(test,paste("projmeandiff_",scen,sep=""))
lon = ncvar_get(test,"lon")
lat = ncvar_get(test,"lat")
nc_close(test)
dataras = raster(t(vardata[,length(lat):1]))
extent(dataras) = c(lon[1]-360,lon[length(lon)]-360,lat[1],lat[length(lat)])
plot(dataras)
map("state",add=TRUE)
crs(dataras) <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
rf <- writeRaster(dataras, filename=paste(var,"_",scen,"_meanchange_2041-2070.tif",sep=""), format="GTiff", overwrite=TRUE)
|
f7c0e3ff31adfeb160a936629e9b85bf49263afc
|
38d928387b7ddce39d994af248efab9f34bab684
|
/generalize/man/r.value.Rd
|
0c8111588280ad16411f520cc4edfa641893e988
|
[] |
no_license
|
tamartsi/generalize
|
2d33d73565fde136eb0334be52da6c18641fd73f
|
e09b55ad2e8f845724c40ec392e1a16e5f92da5a
|
refs/heads/master
| 2021-01-10T02:51:04.734117
| 2016-09-21T00:43:46
| 2016-09-21T00:43:46
| 48,065,612
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,013
|
rd
|
r.value.Rd
|
\name{r.value}
\alias{r.value}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
r-value computation
}
\description{
The function computes r-values given two vectors of p-values from primary and
follow-up studies. r-values assess the False Discovery Rate (FDR) of repilcability
claims across the primary and follow-up studies.
This is a function from Ruth Heller, adapted to compute FWER r-values in addition to FDR r-values.
}
\usage{
r.value(p1, p2, m, c2 = 0.5, control.measure = "FDR", l00 = 0.8 )
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{p1}{
Numeric vector of the p-values from study 1
}
\item{p2}{
Numeric vector of the p-values from study 2
}
\item{m}{
Number of features examined in the primary study.
}
\item{c2}{
Parameter for relative boost to the p-values from the primary study.
0.5 (default) is recommended, since was observed in simulations to yield
similar power to procedure with the optimal value (which is unknown for real data).
}
\item{control.measure}{
A sting, either FDR or FWER, depending on the desired measure of control on false generalizations.
}
\item{l00}{
Lower bound of the fraction of features (out of m) with true null hypotheses in both studies.
For example, for GWAS on the whole genome, the choice of 0.8 is conservative
in typical applications.
}
\item{variation}{
When 'use.m.star' is selected m* is used. m* is defined as follows:
\eqn{m^*=m\sum_{i=1}^{m}\frac{1}{i}}{m*=m*sum(1/i)}.
When 'use.t' is selected c1 is computed given the threshold tt.
Both variations guarantee that the procedure that decleares all r-values below q as replicability claims,
controls the FDR at level q, for any type of dependency of the p-values in the primary study.
default is 'none'.
}
\item{tt}{
The selection rule threshold for p-values from the primary study. must be supplied when
variation 'use.t' is selected.
}
\item{Q}{
The level or false generalization (e.g. control FDR at the q level or FWER at the q level).
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Ruth Heller, Shay Yaacoby (shay66@gmail.com), small adaptation by Tamar Sofer.
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
# General example from Ruth Heller's website:
pv <- read.csv("http://www.math.tau.ac.il/~ruheller/Software/CrohnExample.csv")
rv <- r.value(p1=pv$p1,p2=pv$p2,m=635547,c2=0.5,l00=0.8)
rv2 <- r.value(p1=pv$p1,p2=pv$p2,m=635547,c2=0.5,l00=0.8,variation="use.t",tt=1e-5)
#### in this package, the function is called by testGenerelization.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.