content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
## Loading Library
library(ggplot2)
library(reshape2)
library(matrixStats)
### Loading Data
args <- commandArgs(TRUE)
data <- read.table(args[1],header=T)
head(data)
data <- data[,-2]
sample <- read.table(args[2],header=F)
filename <- args[3]
#outdir<-"/WPSnew/wangrui/Project/Human_Eight_Cell/PBAT/StatInfo/01.CNV/FreeC_Result/"
outdir<- args[4]
## Function Definition
do_CNV_Plot_FreeC <- function(x,sample=NULL){
x <- as.data.frame(x)
if(is.null(sample) == FALSE){
colnames(x) <- c("Chr",as.character(sample[,1]))
}
x <- x[!x$Chr == "M",] #
x <- x[!x$Chr == "Y",] #not includ Y chromosome
x$Chr <- factor(x$Chr,levels = c(1:22,"X"),ordered=T)
x <- x[order(factor(x$Chr,levels = c(1:22,"X"),ordered=T)),]
x$Pos <- 1:nrow(x)
chr_window_num <- as.data.frame(table(x$Chr))
colnames(chr_window_num)<-c("Chr","Number")
chr_window_num$position <-rep(0,23)
for(i in 2:23){
chr_window_num[i,2] <-chr_window_num[i,2]+chr_window_num[i-1,2]
}
chr_window_num[1,3] <- floor(chr_window_num[1,2]/2)
for(i in 2:23){
chr_window_num[i,3]<-floor(chr_window_num[i-1,2]+0.5*(chr_window_num[i,2]-chr_window_num[i-1,2]))
}
melt_data <- melt(x,id.vars = c("Chr","Pos"))
colnames(melt_data)<-c("Chr","Pos","Sample","Copy_number")
melt_data$Colors <-2
melt_data[melt_data$Chr%in%c(seq(1,22,2),"X"),"Colors"] <-1
melt_data$Colors <- factor(melt_data$Colors)
P1<- ggplot(data=melt_data,aes(Pos,Copy_number,colour=Colors))+geom_point(size=2)+facet_grid(Sample ~.)+geom_vline(xintercept = chr_window_num[1:23,2],colour="black", linetype = "longdash")+geom_hline(yintercept=c(0.5,1,1.5,2),alpha=0.5,colour="grey")+scale_color_manual(values=c("blue", "red"))+scale_x_discrete(breaks=chr_window_num$position,labels=chr_window_num$Chr)+theme_bw()+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black"))+theme(axis.text.x=element_text(angle=90,hjust=1))+ylim(0,2.3)
print(P1)
melt_data$CNV <- "Normal"
melt_data[melt_data$Copy_number>1.3,"CNV"] <- "Gain"
melt_data[melt_data$Copy_number<0.7,"CNV"] <- "Loss"
melt_data$CNV <- factor(melt_data$CNV,levels=c("Normal","Gain","Loss"),ordered=T)
P2<- ggplot(data=melt_data,aes(Pos,Copy_number,colour=CNV))+geom_point(size=2)+facet_grid(Sample ~.)+geom_vline(xintercept = chr_window_num[1:23,2],colour="black", linetype = "longdash")+geom_hline(yintercept=c(0.5,1,1.5,2),alpha=0.5,colour="grey")+scale_color_manual(values=c("lightgrey","red","blue")) +scale_x_discrete(breaks=chr_window_num$position,labels=chr_window_num$Chr)+theme_bw()+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black"))+theme(axis.text.x=element_text(angle=90,hjust=1))+ylim(0,2.3)
print(P2)
}
pdf(file=paste(outdir,filename,sep="/"),width=17,height=4+nrow(sample)*1.5)
do_CNV_Plot_FreeC(data,sample=sample)
dev.off()
| /CNV_Plot_FreeC.R | no_license | WRui/Colon_FAP | R | false | false | 2,978 | r | ## Loading Library
library(ggplot2)
library(reshape2)
library(matrixStats)
### Loading Data
args <- commandArgs(TRUE)
data <- read.table(args[1],header=T)
head(data)
data <- data[,-2]
sample <- read.table(args[2],header=F)
filename <- args[3]
#outdir<-"/WPSnew/wangrui/Project/Human_Eight_Cell/PBAT/StatInfo/01.CNV/FreeC_Result/"
outdir<- args[4]
## Function Definition
do_CNV_Plot_FreeC <- function(x,sample=NULL){
x <- as.data.frame(x)
if(is.null(sample) == FALSE){
colnames(x) <- c("Chr",as.character(sample[,1]))
}
x <- x[!x$Chr == "M",] #
x <- x[!x$Chr == "Y",] #not includ Y chromosome
x$Chr <- factor(x$Chr,levels = c(1:22,"X"),ordered=T)
x <- x[order(factor(x$Chr,levels = c(1:22,"X"),ordered=T)),]
x$Pos <- 1:nrow(x)
chr_window_num <- as.data.frame(table(x$Chr))
colnames(chr_window_num)<-c("Chr","Number")
chr_window_num$position <-rep(0,23)
for(i in 2:23){
chr_window_num[i,2] <-chr_window_num[i,2]+chr_window_num[i-1,2]
}
chr_window_num[1,3] <- floor(chr_window_num[1,2]/2)
for(i in 2:23){
chr_window_num[i,3]<-floor(chr_window_num[i-1,2]+0.5*(chr_window_num[i,2]-chr_window_num[i-1,2]))
}
melt_data <- melt(x,id.vars = c("Chr","Pos"))
colnames(melt_data)<-c("Chr","Pos","Sample","Copy_number")
melt_data$Colors <-2
melt_data[melt_data$Chr%in%c(seq(1,22,2),"X"),"Colors"] <-1
melt_data$Colors <- factor(melt_data$Colors)
P1<- ggplot(data=melt_data,aes(Pos,Copy_number,colour=Colors))+geom_point(size=2)+facet_grid(Sample ~.)+geom_vline(xintercept = chr_window_num[1:23,2],colour="black", linetype = "longdash")+geom_hline(yintercept=c(0.5,1,1.5,2),alpha=0.5,colour="grey")+scale_color_manual(values=c("blue", "red"))+scale_x_discrete(breaks=chr_window_num$position,labels=chr_window_num$Chr)+theme_bw()+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black"))+theme(axis.text.x=element_text(angle=90,hjust=1))+ylim(0,2.3)
print(P1)
melt_data$CNV <- "Normal"
melt_data[melt_data$Copy_number>1.3,"CNV"] <- "Gain"
melt_data[melt_data$Copy_number<0.7,"CNV"] <- "Loss"
melt_data$CNV <- factor(melt_data$CNV,levels=c("Normal","Gain","Loss"),ordered=T)
P2<- ggplot(data=melt_data,aes(Pos,Copy_number,colour=CNV))+geom_point(size=2)+facet_grid(Sample ~.)+geom_vline(xintercept = chr_window_num[1:23,2],colour="black", linetype = "longdash")+geom_hline(yintercept=c(0.5,1,1.5,2),alpha=0.5,colour="grey")+scale_color_manual(values=c("lightgrey","red","blue")) +scale_x_discrete(breaks=chr_window_num$position,labels=chr_window_num$Chr)+theme_bw()+theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black"))+theme(axis.text.x=element_text(angle=90,hjust=1))+ylim(0,2.3)
print(P2)
}
pdf(file=paste(outdir,filename,sep="/"),width=17,height=4+nrow(sample)*1.5)
do_CNV_Plot_FreeC(data,sample=sample)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/2-plot-functions.R
\name{node}
\alias{node}
\title{Plot a node}
\usage{
node(x, label = "", col = "black", cex_label = 1, cex_node = 1, ...)
}
\arguments{
\item{x}{the center}
\item{label}{the label}
\item{col}{color}
\item{cex_label}{cex parameter to be passed to text}
\item{cex_node}{cex parameter for nodes}
\item{...}{additional parameters passed to \code{par()}}
}
\description{
Plot a node
}
\keyword{internal}
| /man/node.Rd | permissive | FedericoCarli/stagedtrees | R | false | true | 501 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/2-plot-functions.R
\name{node}
\alias{node}
\title{Plot a node}
\usage{
node(x, label = "", col = "black", cex_label = 1, cex_node = 1, ...)
}
\arguments{
\item{x}{the center}
\item{label}{the label}
\item{col}{color}
\item{cex_label}{cex parameter to be passed to text}
\item{cex_node}{cex parameter for nodes}
\item{...}{additional parameters passed to \code{par()}}
}
\description{
Plot a node
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataset.R
\docType{data}
\name{dataset}
\alias{dataset}
\title{Demonstration data created to resemble data collected from an educational
assessment.}
\format{A data frame with 20 rows and 5 variables: \describe{ \item{ID_var}{A
student identification variable} \item{FName}{First names of each student}
\item{Var1}{One score} \item{Var2}{A second score} \item{Perf_Lvl}{Each
student's performance level} \item{dates}{Birthdates}}}
\usage{
dataset
}
\description{
Demonstration data created to resemble data collected from an educational
assessment.
}
\keyword{datasets}
| /man/dataset.Rd | no_license | jenitivecase/inspectr | R | false | true | 654 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataset.R
\docType{data}
\name{dataset}
\alias{dataset}
\title{Demonstration data created to resemble data collected from an educational
assessment.}
\format{A data frame with 20 rows and 5 variables: \describe{ \item{ID_var}{A
student identification variable} \item{FName}{First names of each student}
\item{Var1}{One score} \item{Var2}{A second score} \item{Perf_Lvl}{Each
student's performance level} \item{dates}{Birthdates}}}
\usage{
dataset
}
\description{
Demonstration data created to resemble data collected from an educational
assessment.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/KNN.R
\name{KNN}
\alias{KNN}
\title{Getting cluster numbers for each of the nearest neighbor of a cell}
\usage{
KNN(pathwayscores, index, clusters)
}
\arguments{
\item{pathwayscores}{Raw adjusted p-value matrix}
\item{index}{indices of nearest neighbours obtained from index function}
\item{clusters}{Clusters obtained from hierarchal clustering}
}
\value{
Matrix having cluster or class number for each of the top nearest neighbor of individual cell
}
\description{
Getting cluster numbers for each of the nearest neighbor of a cell
}
\examples{
KNN()
}
| /man/KNN.Rd | no_license | reggenlab/UniPath | R | false | true | 635 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/KNN.R
\name{KNN}
\alias{KNN}
\title{Getting cluster numbers for each of the nearest neighbor of a cell}
\usage{
KNN(pathwayscores, index, clusters)
}
\arguments{
\item{pathwayscores}{Raw adjusted p-value matrix}
\item{index}{indices of nearest neighbours obtained from index function}
\item{clusters}{Clusters obtained from hierarchal clustering}
}
\value{
Matrix having cluster or class number for each of the top nearest neighbor of individual cell
}
\description{
Getting cluster numbers for each of the nearest neighbor of a cell
}
\examples{
KNN()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/updateOptimizationSettings.R
\name{dicoOptimizationSettings}
\alias{dicoOptimizationSettings}
\title{Correspondence between arguments of \code{updateOptimizationSettings} and actual Antares parameters.}
\usage{
dicoOptimizationSettings(arg)
}
\arguments{
\item{arg}{An argument from function \code{updateOptimizationSettings}.}
}
\value{
The corresponding Antares general parameter.
}
\description{
Correspondence between arguments of \code{updateOptimizationSettings} and actual Antares parameters.
}
\examples{
dicoGeneralSettings("year.by.year") # "year-by-year"
}
| /man/dicoOptimizationSettings.Rd | no_license | rte-antares-rpackage/antaresEditObject | R | false | true | 646 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/updateOptimizationSettings.R
\name{dicoOptimizationSettings}
\alias{dicoOptimizationSettings}
\title{Correspondence between arguments of \code{updateOptimizationSettings} and actual Antares parameters.}
\usage{
dicoOptimizationSettings(arg)
}
\arguments{
\item{arg}{An argument from function \code{updateOptimizationSettings}.}
}
\value{
The corresponding Antares general parameter.
}
\description{
Correspondence between arguments of \code{updateOptimizationSettings} and actual Antares parameters.
}
\examples{
dicoGeneralSettings("year.by.year") # "year-by-year"
}
|
### Script para paralelização no servidor Ubuntu (Ale)
# Pacotes para paralelizacao
require(plyr)
require(doMC)
# Script com codigo de simula.neutra.trade
source("simula.neutra.trade_LEVE_dp0_sem_banco_sem_papi_rapido.R")
# Dados do hipercubo
load("dados_arredond_hipercubo_25jul16.RData")
simula.parallel <- function(replica) {
res <- simula.neutra.trade(S = dados3_25jul16[replica,1],
j = round(5000/dados3_25jul16[replica,1]),
xi0 = rep(seq(1,20000,length.out = dados3_25jul16[replica,1]),each=round(5000/dados3_25jul16[replica,1])),
X = 20000,
dp = 0,
dist.pos = if(dados3_25jul16[replica,2]>0 & dados3_25jul16[replica,2]<3e5) round(seq(from = 3e5/(dados3_25jul16[replica,2]+1), to = 3e5-(3e5/(dados3_25jul16[replica,2]+1)), length.out = dados3_25jul16[replica,2])) else if(dados3_25jul16[replica,2]==0) NULL else seq(1,3e5,1),
dist.int = dados3_25jul16[replica,3],
ciclo = 3e5,
step = 100
)
return(res)
}
######## doMC e plyr
registerDoMC(4)
for (i in (seq(1,1000,8)[46:50]))
{
replica.sim <- as.list(i:(i+7))
resultados <- llply(.data = replica.sim, .fun = simula.parallel, .parallel = TRUE)
save(resultados,file=paste("resultados25jul16-",i,"_",i+7,".RData",sep=""))
} | /paralelizacao/simulacoes_25jul16/simula_parallel_servidor_25jul16_361-400.R | no_license | luisanovara/simula-neutra-step | R | false | false | 1,425 | r | ### Script para paralelização no servidor Ubuntu (Ale)
# Pacotes para paralelizacao
require(plyr)
require(doMC)
# Script com codigo de simula.neutra.trade
source("simula.neutra.trade_LEVE_dp0_sem_banco_sem_papi_rapido.R")
# Dados do hipercubo
load("dados_arredond_hipercubo_25jul16.RData")
simula.parallel <- function(replica) {
res <- simula.neutra.trade(S = dados3_25jul16[replica,1],
j = round(5000/dados3_25jul16[replica,1]),
xi0 = rep(seq(1,20000,length.out = dados3_25jul16[replica,1]),each=round(5000/dados3_25jul16[replica,1])),
X = 20000,
dp = 0,
dist.pos = if(dados3_25jul16[replica,2]>0 & dados3_25jul16[replica,2]<3e5) round(seq(from = 3e5/(dados3_25jul16[replica,2]+1), to = 3e5-(3e5/(dados3_25jul16[replica,2]+1)), length.out = dados3_25jul16[replica,2])) else if(dados3_25jul16[replica,2]==0) NULL else seq(1,3e5,1),
dist.int = dados3_25jul16[replica,3],
ciclo = 3e5,
step = 100
)
return(res)
}
######## doMC e plyr
registerDoMC(4)
for (i in (seq(1,1000,8)[46:50]))
{
replica.sim <- as.list(i:(i+7))
resultados <- llply(.data = replica.sim, .fun = simula.parallel, .parallel = TRUE)
save(resultados,file=paste("resultados25jul16-",i,"_",i+7,".RData",sep=""))
} |
d <- read.csv("motor_data_tomas.csv")
| /utilities/explore.R | no_license | Cognitive-Technology-Group/PyOBCI | R | false | false | 39 | r |
d <- read.csv("motor_data_tomas.csv")
|
gg <- ggplot(mapping = aes(x = 1:10, y = rnorm(10))) +
geom_bar(stat = 'identity')
test_that("Download generates a file and link with basic API", {
api_basic <- ggtrack(gg,
qr_content = 'text content here')
dl_basic <- make_download(api_basic,
save_file = c('files', 'basic_ggtrack'),
download_file = c('files', 'basic_ggtrack'),
type = 'button',
date = 'delete',
render = FALSE)
expect_true(file.exists('files/basic_ggtrack_delete.png'))
expect_type(dl_basic, 'character')
# does the link contain the correct URL and type
expect_equal(as.character(strcapture('href="(.*?)"', dl_basic, proto = 'c')),
"files/basic_ggtrack_delete.png")
expect_equal(as.character(strcapture('button type="(.*?)"', dl_basic, proto = 'c')),
'submit')
expect_true(file.remove("files/basic_ggtrack_delete.png"))
})
test_that("Download generates a file and link with advanced API", {
adv <- make_tracker() %>%
add_logo('files/ggtrack-logo.svg', 1) %>%
add_qr('some text', justification = 1) %>%
add_caption('some text') %>%
add_theme(plot.background = element_rect(fill = "#ff9955", size = 0))
api_adv <- gg %>%
add_banner(adv)
dl_adv <- make_download(api_adv,
save_file = c('files', 'adv_ggtrack'),
download_file = c('files', 'adv_ggtrack'),
type = 'link',
date = 'delete',
render = FALSE)
expect_true(file.exists('files/adv_ggtrack_delete.png'))
expect_s3_class(dl_adv, 'shiny.tag')
# does the link contain the correct URL and type
expect_equal(as.character(strcapture('href="(.*?)"', as.character(dl_adv), proto = 'c')),
"files/adv_ggtrack_delete.png")
expect_true(is.na(strcapture('button type="(.*?)"', as.character(dl_adv), proto = 'c')))
expect_true(file.remove("files/adv_ggtrack_delete.png"))
})
| /tests/testthat/test-download.R | permissive | mrjoh3/ggtrack | R | false | false | 2,072 | r |
gg <- ggplot(mapping = aes(x = 1:10, y = rnorm(10))) +
geom_bar(stat = 'identity')
test_that("Download generates a file and link with basic API", {
api_basic <- ggtrack(gg,
qr_content = 'text content here')
dl_basic <- make_download(api_basic,
save_file = c('files', 'basic_ggtrack'),
download_file = c('files', 'basic_ggtrack'),
type = 'button',
date = 'delete',
render = FALSE)
expect_true(file.exists('files/basic_ggtrack_delete.png'))
expect_type(dl_basic, 'character')
# does the link contain the correct URL and type
expect_equal(as.character(strcapture('href="(.*?)"', dl_basic, proto = 'c')),
"files/basic_ggtrack_delete.png")
expect_equal(as.character(strcapture('button type="(.*?)"', dl_basic, proto = 'c')),
'submit')
expect_true(file.remove("files/basic_ggtrack_delete.png"))
})
test_that("Download generates a file and link with advanced API", {
adv <- make_tracker() %>%
add_logo('files/ggtrack-logo.svg', 1) %>%
add_qr('some text', justification = 1) %>%
add_caption('some text') %>%
add_theme(plot.background = element_rect(fill = "#ff9955", size = 0))
api_adv <- gg %>%
add_banner(adv)
dl_adv <- make_download(api_adv,
save_file = c('files', 'adv_ggtrack'),
download_file = c('files', 'adv_ggtrack'),
type = 'link',
date = 'delete',
render = FALSE)
expect_true(file.exists('files/adv_ggtrack_delete.png'))
expect_s3_class(dl_adv, 'shiny.tag')
# does the link contain the correct URL and type
expect_equal(as.character(strcapture('href="(.*?)"', as.character(dl_adv), proto = 'c')),
"files/adv_ggtrack_delete.png")
expect_true(is.na(strcapture('button type="(.*?)"', as.character(dl_adv), proto = 'c')))
expect_true(file.remove("files/adv_ggtrack_delete.png"))
})
|
library(data.table)
library(bit64)
library(descr)
library(dplyr)
library(tidyr)
library(magrittr)
library(lubridate)
library(ggplot2)
library(xtable)
library(lme4)
library(merTools)
library(lmtest)
library(texreg)
library(xlsx)
library(readr)
setwd("~/Documentos/CADUNICO")
CADUNICO <- fread('CADDOM.csv')
CADPES <- fread('CADPES.csv')
metas <- read.csv2('selecao_publico_cadunico.csv', stringsAsFactors = F, header=F, encoding = 'UTF-8', sep = ",")
names(metas) <- c('cd_ibge','nome_munic','nome_regiao','ano_meta')
CADUNICO <- left_join(CADUNICO, CADPES) # faz o merge
CADUNICO <- left_join(CADUNICO, metas)
ranking_munic = fread("intercepto_aleatorio.csv")
CADUNICO <- left_join(CADUNICO, ranking_munic)
setwd("~/Documentos/")
listaBMS <- fread("of38.csv")
#names(metas) <- c('cd_ibge','MUNICIPIO','nome_regiao','ano_meta')
#listaBMS <- left_join(listaBMS, metas)
listaBMS <- na.omit(listaBMS)
listaBMSComResalvas <- listaBMS
listaBMSComResalvasUsuarioFinal <- listaBMS
setwd("~/Documentos/CADUNICO/SRE_Concatenadas")
listaDiamantina <- fread("MergeSres.csv")
names(listaBMS)[3:3] <- c('num_nis_pessoa_atual')
listaBMS <- left_join(listaBMS, CADUNICO, by = "num_nis_pessoa_atual")
#listaBMS <- subset(listaBMS, listaBMS$MUNICIPIO=="DIAMANTINA")
#listaBMSComResalvas <- subset(listaBMSComResalvas, listaBMSComResalvas$MUNICIPIO=="DIAMANTINA")
###################### Conferencia por NIS ############################
nisPertenceCadunico <- NULL
for (nis in listaBMS$num_nis_pessoa_atual){
nisPertenceCadunico <- c(nisPertenceCadunico, (is.element(nis, CADUNICO$num_nis_pessoa_atual)))
}
listaBMSComResalvas <- cbind(listaBMSComResalvas, nisPertenceCadunico)
nisPertenceListaSPE <- NULL
for (nis in listaBMS$num_nis_pessoa_atual){
nisPertenceListaSPE <- c(nisPertenceListaSPE, (is.element(nis, listaDiamantina$NIS)))
}
nisPertenceListaSPE
listaBMSComResalvas <- cbind(listaBMSComResalvas, nisPertenceListaSPE)
###################### Conferencia por CPF ############################
cpfPertenceCadunico <- NULL
for (cpf in listaBMS$CPF){
cpfPertenceCadunico <- c(cpfPertenceCadunico, (is.element(cpf, CADUNICO$num_cpf_pessoa)))
}
cpfPertenceCadunico
listaBMSComResalvas <- cbind(listaBMSComResalvas, cpfPertenceCadunico)
cpfPertenceListaSPE <- NULL
for (cpf in listaBMS$CPF){
cpfPertenceListaSPE <- c(cpfPertenceListaSPE, (is.element(cpf, listaDiamantina$CPF)))
}
cpfPertenceListaSPE
listaBMSComResalvas <- cbind(listaBMSComResalvas, cpfPertenceListaSPE)
###################### Conferencia Nome ################################
nomePertenceCadunico <- NULL
for (nome in listaBMS$NOME){
nomePertenceCadunico <- c(nomePertenceCadunico, (is.element(nome, CADUNICO$nom_pessoa)))
}
nomePertenceCadunico
listaBMSComResalvas <- cbind(listaBMSComResalvas, nomePertenceCadunico)
nomePertenceListaSPE <- NULL
for (nome in listaBMS$NOME){
nomePertenceListaSPE <- c(nomePertenceListaSPE, (is.element(nome, listaDiamantina$Nome)))
}
nomePertenceListaSPE
listaBMSComResalvas <- cbind(listaBMSComResalvas, nomePertenceListaSPE)
#################### Checa se pertence ao ano Meta ######################
fa <- function(x) iconv(x, to = "ASCII//TRANSLIT")
setwd("~/Documentos/")
munic <- read.csv("listaMunic2.csv", header = TRUE, sep = ",", encoding = "UTF-8")
munic2 <- NULL
for(i in munic){
munic2 <- c(munic2, (fa(i) ))
}
#Utilizando Nome Municipio do CadUnico
anoMetaELocalPertencemPrograma <- NULL
for (anoMeta in listaBMS$nome_munic){
anoMetaELocalPertencemPrograma <- c(anoMetaELocalPertencemPrograma, ((is.element(anoMeta, munic2)) || (is.element(anoMeta, munic$MUNIC))))
}
anoMetaELocalPertencemPrograma
listaBMSComResalvas <- cbind(listaBMSComResalvas, anoMetaELocalPertencemPrograma)
##################### Checa se
### CSFC 4) Selecionar apenas domicílios classificados como rurais na base do CADUNICO.
ruralPertence <- NULL
for (rural in listaBMS$cod_local_domic_fam){
ruralPertence <- c(ruralPertence, (rural == "2"))
}
ruralPertence
listaBMSComResalvas <- cbind(listaBMSComResalvas, ruralPertence)
### CSFC 5) Famílias com renda mensal per capita de até meio salário mínimo.
#1 - Ate R$77,00 | 2-Entre R$77,01 ate R$154,00 | 3-Entre R$154,01 ate 1/2 S.M. |4-Acima de 1/2 S.M.
rendaCompativel <- NULL
for (rural in listaBMS$fx_rfpc){
rendaCompativel <- c(rendaCompativel, (rural != 4))
}
rendaCompativel
listaBMSComResalvas <- cbind(listaBMSComResalvas, rendaCompativel)
### CSFC 6) Disponibilidade de água (incluir abastecimento por poço e cisterna)
aguaCanalizada <- NULL
for (dadoAgua in listaBMS$cod_agua_canalizada_fam){
aguaCanalizada <- c(aguaCanalizada, (dadoAgua == 1))
}
aguaCanalizada
listaBMSComResalvas <- cbind(listaBMSComResalvas, aguaCanalizada)
abastAgua <- NULL
for (dadoAgua in listaBMS$cod_abaste_agua_domic_fam){
abastAgua <- c(abastAgua, (dadoAgua == 2 || dadoAgua == 3))
}
abastAgua
listaBMSComResalvas <- cbind(listaBMSComResalvas, abastAgua)
### CSFC 7) Chefes de familia
ChefeDeFámilia <- NULL
for (dado in listaBMS$cod_parentesco_rf_pessoa){
ChefeDeFámilia <- c(ChefeDeFámilia, (dado == 1))
}
ChefeDeFámilia
listaBMSComResalvas <- cbind(listaBMSComResalvas, ChefeDeFámilia)
######################## Compilar o resultado geral ########################
#setwd("~/Documentos/")
#write.csv(listaBMSComResalvas, file = "listaBSMCOMResalvas.csv")
i <- 1
motivo <- NULL
while ( i <= (length(aguaCanalizada))){
print(i)
if(listaBMSComResalvas$nisPertenceCadunico[i][] == FALSE && listaBMSComResalvas$cpfPertenceCadunico[i][] == FALSE){
if ( listaBMSComResalvas$nomePertenceListaSPE[i][] == TRUE && listaBMSComResalvas$nomeMaePertenceCadunico[i][] == TRUE){
if (listaBMSComResalvas$nomePertenceListaSPE [i][] == TRUE){
motivo <- c(motivo, "Está sendo atendido em nossa lista, porém CPF e NIS não batem")
}
else{
motivo <- c(motivo, "Não possui documentos no CADUNICO. NOME e NOME DA MÃE estão no CADUNICO, mas não podemos afirmar se é caso homônimo ou não.")
}
}
else{
motivo <- c(motivo, "Não está cadastrado no CADUNICO")
}
}
else if(listaBMSComResalvas$ruralPertence[i][] != TRUE || is.na(listaBMSComResalvas$ruralPertence[i][])){
motivo <- c(motivo, "Não pertence a área rural")
}
#else if(is.na(listaBMSComResalvas$aguaCanalizada[i][]) && is.na(listaBMSComResalvas$abastAgua[i][])){
# motivo <- c(motivo, "Não há informações sobre água")
#}
#else if(listaBMSComResalvas$aguaCanalizada[i][] == FALSE && listaBMSComResalvas$abastAgua[i][] == FALSE){
# motivo <- c(motivo, "Não possui disponibilidade de água")
#}
else if(listaBMSComResalvas$rendaCompativel[i][] == FALSE){
motivo <- c(motivo, "Renda incompativél")
}
else if(listaBMSComResalvas$nisPertenceListaSPE[i][] == TRUE || listaBMSComResalvas$cpfPertenceListaSPE[i][] == TRUE){
motivo <- c(motivo, "Está sendo atendido em nossa lista")
}
else if(listaBMSComResalvas$anoMetaELocalPertencemPrograma[i][] == FALSE || is.na(listaBMSComResalvas$anoMetaELocalPertencemPrograma[i][])){
motivo <- c(motivo, "Não pertence a um local atendido no ano de 2017")
}
else{
if(is.na(listaBMSComResalvas$ChefeDeFámilia[i][])){
motivo <- c(motivo, "Não há informação sobre chefe de família")
}else if(listaBMSComResalvas$ChefeDeFámilia[i][] == FALSE){
motivo <- c(motivo, "Não é chefe de família")
}else{
motivo <- c(motivo, "Outro")
}
}
i <- i+1
}
length(motivo)
listaBMSComResalvas <- cbind(listaBMSComResalvas, motivo)
listaBMSComResalvasUsuarioFinal <- cbind(listaBMSComResalvasUsuarioFinal, motivo)
setwd("~/Documentos/")
write.csv(listaBMSComResalvas, file = "of38_listacomResalvas.csv")
write.csv(listaBMSComResalvasUsuarioFinal, file = "of38_listacomResalvasUserFinal.csv")
cpf <- subset(CADUNICO, num_cpf_pessoa == 6449587660) #unico cpf encontrado
setwd("~/Documentos/CADUNICO/SRE_Concatenadas")
listaSPE <- fread("MergeSres.csv")
setwd("~/Documentos/ListasBSMComResalva")
listaBSM <- fread("ListaBSMComResalvasCompleta.csv")
listaBSM_atendidos <- subset(listaBSM, motivo = "Está sendo atendido em nossa lista")
atendidos <- NULL
for(i in listaBSM$NIS ){
atendidos <- c(atendidos, is.element(i, listaSPE$NIS))
}
teste <- c("6449587660", "4191169661", "88252957668", "9281197693")
is.element(teste, CADUNICO$num_cpf_pessoa)
| /problemaBSM/of38.r | no_license | rastreia/novosEncontros | R | false | false | 8,426 | r | library(data.table)
library(bit64)
library(descr)
library(dplyr)
library(tidyr)
library(magrittr)
library(lubridate)
library(ggplot2)
library(xtable)
library(lme4)
library(merTools)
library(lmtest)
library(texreg)
library(xlsx)
library(readr)
setwd("~/Documentos/CADUNICO")
CADUNICO <- fread('CADDOM.csv')
CADPES <- fread('CADPES.csv')
metas <- read.csv2('selecao_publico_cadunico.csv', stringsAsFactors = F, header=F, encoding = 'UTF-8', sep = ",")
names(metas) <- c('cd_ibge','nome_munic','nome_regiao','ano_meta')
CADUNICO <- left_join(CADUNICO, CADPES) # faz o merge
CADUNICO <- left_join(CADUNICO, metas)
ranking_munic = fread("intercepto_aleatorio.csv")
CADUNICO <- left_join(CADUNICO, ranking_munic)
setwd("~/Documentos/")
listaBMS <- fread("of38.csv")
#names(metas) <- c('cd_ibge','MUNICIPIO','nome_regiao','ano_meta')
#listaBMS <- left_join(listaBMS, metas)
listaBMS <- na.omit(listaBMS)
listaBMSComResalvas <- listaBMS
listaBMSComResalvasUsuarioFinal <- listaBMS
setwd("~/Documentos/CADUNICO/SRE_Concatenadas")
listaDiamantina <- fread("MergeSres.csv")
names(listaBMS)[3:3] <- c('num_nis_pessoa_atual')
listaBMS <- left_join(listaBMS, CADUNICO, by = "num_nis_pessoa_atual")
#listaBMS <- subset(listaBMS, listaBMS$MUNICIPIO=="DIAMANTINA")
#listaBMSComResalvas <- subset(listaBMSComResalvas, listaBMSComResalvas$MUNICIPIO=="DIAMANTINA")
###################### Conferencia por NIS ############################
nisPertenceCadunico <- NULL
for (nis in listaBMS$num_nis_pessoa_atual){
nisPertenceCadunico <- c(nisPertenceCadunico, (is.element(nis, CADUNICO$num_nis_pessoa_atual)))
}
listaBMSComResalvas <- cbind(listaBMSComResalvas, nisPertenceCadunico)
nisPertenceListaSPE <- NULL
for (nis in listaBMS$num_nis_pessoa_atual){
nisPertenceListaSPE <- c(nisPertenceListaSPE, (is.element(nis, listaDiamantina$NIS)))
}
nisPertenceListaSPE
listaBMSComResalvas <- cbind(listaBMSComResalvas, nisPertenceListaSPE)
###################### Conferencia por CPF ############################
cpfPertenceCadunico <- NULL
for (cpf in listaBMS$CPF){
cpfPertenceCadunico <- c(cpfPertenceCadunico, (is.element(cpf, CADUNICO$num_cpf_pessoa)))
}
cpfPertenceCadunico
listaBMSComResalvas <- cbind(listaBMSComResalvas, cpfPertenceCadunico)
cpfPertenceListaSPE <- NULL
for (cpf in listaBMS$CPF){
cpfPertenceListaSPE <- c(cpfPertenceListaSPE, (is.element(cpf, listaDiamantina$CPF)))
}
cpfPertenceListaSPE
listaBMSComResalvas <- cbind(listaBMSComResalvas, cpfPertenceListaSPE)
###################### Conferencia Nome ################################
nomePertenceCadunico <- NULL
for (nome in listaBMS$NOME){
nomePertenceCadunico <- c(nomePertenceCadunico, (is.element(nome, CADUNICO$nom_pessoa)))
}
nomePertenceCadunico
listaBMSComResalvas <- cbind(listaBMSComResalvas, nomePertenceCadunico)
nomePertenceListaSPE <- NULL
for (nome in listaBMS$NOME){
nomePertenceListaSPE <- c(nomePertenceListaSPE, (is.element(nome, listaDiamantina$Nome)))
}
nomePertenceListaSPE
listaBMSComResalvas <- cbind(listaBMSComResalvas, nomePertenceListaSPE)
#################### Checa se pertence ao ano Meta ######################
fa <- function(x) iconv(x, to = "ASCII//TRANSLIT")
setwd("~/Documentos/")
munic <- read.csv("listaMunic2.csv", header = TRUE, sep = ",", encoding = "UTF-8")
munic2 <- NULL
for(i in munic){
munic2 <- c(munic2, (fa(i) ))
}
#Utilizando Nome Municipio do CadUnico
anoMetaELocalPertencemPrograma <- NULL
for (anoMeta in listaBMS$nome_munic){
anoMetaELocalPertencemPrograma <- c(anoMetaELocalPertencemPrograma, ((is.element(anoMeta, munic2)) || (is.element(anoMeta, munic$MUNIC))))
}
anoMetaELocalPertencemPrograma
listaBMSComResalvas <- cbind(listaBMSComResalvas, anoMetaELocalPertencemPrograma)
##################### Checa se
### CSFC 4) Selecionar apenas domicílios classificados como rurais na base do CADUNICO.
ruralPertence <- NULL
for (rural in listaBMS$cod_local_domic_fam){
ruralPertence <- c(ruralPertence, (rural == "2"))
}
ruralPertence
listaBMSComResalvas <- cbind(listaBMSComResalvas, ruralPertence)
### CSFC 5) Famílias com renda mensal per capita de até meio salário mínimo.
#1 - Ate R$77,00 | 2-Entre R$77,01 ate R$154,00 | 3-Entre R$154,01 ate 1/2 S.M. |4-Acima de 1/2 S.M.
rendaCompativel <- NULL
for (rural in listaBMS$fx_rfpc){
rendaCompativel <- c(rendaCompativel, (rural != 4))
}
rendaCompativel
listaBMSComResalvas <- cbind(listaBMSComResalvas, rendaCompativel)
### CSFC 6) Disponibilidade de água (incluir abastecimento por poço e cisterna)
aguaCanalizada <- NULL
for (dadoAgua in listaBMS$cod_agua_canalizada_fam){
aguaCanalizada <- c(aguaCanalizada, (dadoAgua == 1))
}
aguaCanalizada
listaBMSComResalvas <- cbind(listaBMSComResalvas, aguaCanalizada)
abastAgua <- NULL
for (dadoAgua in listaBMS$cod_abaste_agua_domic_fam){
abastAgua <- c(abastAgua, (dadoAgua == 2 || dadoAgua == 3))
}
abastAgua
listaBMSComResalvas <- cbind(listaBMSComResalvas, abastAgua)
### CSFC 7) Chefes de familia
ChefeDeFámilia <- NULL
for (dado in listaBMS$cod_parentesco_rf_pessoa){
ChefeDeFámilia <- c(ChefeDeFámilia, (dado == 1))
}
ChefeDeFámilia
listaBMSComResalvas <- cbind(listaBMSComResalvas, ChefeDeFámilia)
######################## Compilar o resultado geral ########################
#setwd("~/Documentos/")
#write.csv(listaBMSComResalvas, file = "listaBSMCOMResalvas.csv")
i <- 1
motivo <- NULL
while ( i <= (length(aguaCanalizada))){
print(i)
if(listaBMSComResalvas$nisPertenceCadunico[i][] == FALSE && listaBMSComResalvas$cpfPertenceCadunico[i][] == FALSE){
if ( listaBMSComResalvas$nomePertenceListaSPE[i][] == TRUE && listaBMSComResalvas$nomeMaePertenceCadunico[i][] == TRUE){
if (listaBMSComResalvas$nomePertenceListaSPE [i][] == TRUE){
motivo <- c(motivo, "Está sendo atendido em nossa lista, porém CPF e NIS não batem")
}
else{
motivo <- c(motivo, "Não possui documentos no CADUNICO. NOME e NOME DA MÃE estão no CADUNICO, mas não podemos afirmar se é caso homônimo ou não.")
}
}
else{
motivo <- c(motivo, "Não está cadastrado no CADUNICO")
}
}
else if(listaBMSComResalvas$ruralPertence[i][] != TRUE || is.na(listaBMSComResalvas$ruralPertence[i][])){
motivo <- c(motivo, "Não pertence a área rural")
}
#else if(is.na(listaBMSComResalvas$aguaCanalizada[i][]) && is.na(listaBMSComResalvas$abastAgua[i][])){
# motivo <- c(motivo, "Não há informações sobre água")
#}
#else if(listaBMSComResalvas$aguaCanalizada[i][] == FALSE && listaBMSComResalvas$abastAgua[i][] == FALSE){
# motivo <- c(motivo, "Não possui disponibilidade de água")
#}
else if(listaBMSComResalvas$rendaCompativel[i][] == FALSE){
motivo <- c(motivo, "Renda incompativél")
}
else if(listaBMSComResalvas$nisPertenceListaSPE[i][] == TRUE || listaBMSComResalvas$cpfPertenceListaSPE[i][] == TRUE){
motivo <- c(motivo, "Está sendo atendido em nossa lista")
}
else if(listaBMSComResalvas$anoMetaELocalPertencemPrograma[i][] == FALSE || is.na(listaBMSComResalvas$anoMetaELocalPertencemPrograma[i][])){
motivo <- c(motivo, "Não pertence a um local atendido no ano de 2017")
}
else{
if(is.na(listaBMSComResalvas$ChefeDeFámilia[i][])){
motivo <- c(motivo, "Não há informação sobre chefe de família")
}else if(listaBMSComResalvas$ChefeDeFámilia[i][] == FALSE){
motivo <- c(motivo, "Não é chefe de família")
}else{
motivo <- c(motivo, "Outro")
}
}
i <- i+1
}
length(motivo)
listaBMSComResalvas <- cbind(listaBMSComResalvas, motivo)
listaBMSComResalvasUsuarioFinal <- cbind(listaBMSComResalvasUsuarioFinal, motivo)
setwd("~/Documentos/")
write.csv(listaBMSComResalvas, file = "of38_listacomResalvas.csv")
write.csv(listaBMSComResalvasUsuarioFinal, file = "of38_listacomResalvasUserFinal.csv")
cpf <- subset(CADUNICO, num_cpf_pessoa == 6449587660) #unico cpf encontrado
setwd("~/Documentos/CADUNICO/SRE_Concatenadas")
listaSPE <- fread("MergeSres.csv")
setwd("~/Documentos/ListasBSMComResalva")
listaBSM <- fread("ListaBSMComResalvasCompleta.csv")
listaBSM_atendidos <- subset(listaBSM, motivo = "Está sendo atendido em nossa lista")
atendidos <- NULL
for(i in listaBSM$NIS ){
atendidos <- c(atendidos, is.element(i, listaSPE$NIS))
}
teste <- c("6449587660", "4191169661", "88252957668", "9281197693")
is.element(teste, CADUNICO$num_cpf_pessoa)
|
/data/wind/r/R语言WSQ行情订阅使用案例/wsq_datasave.R | permissive | disappearedgod/QUANTAXIS | R | false | false | 1,766 | r | ||
############################################################################
# risk-based surveillance
###########################################################################
# adj.risk
# epi.calc
# sep.rb.bin
# sep.rb.hypergeo
# sep.rb.bin.varse
# sep.rb.hypergeo.varse
# sep.rb2.bin
# sep.rb2.hypergeo
# sse.rb.2stage
# sse.combined
# n.rb
# n.rb.varse
# include freedom functions
##' Adjusted risk
##' @description Calculates adjusted risk for given
##' relative risk and population proportions. This is an intermediate calculation
##' in the calculation of effective probability of infection for risk-based
##' surveillance activities
##' @param rr relative risk values (vector of values corresponding to the number of risk strata)
##' @param ppr population proportions corresponding to
##' rr values (vector of equal length to rr)
##' @return vector of adjusted risk values (in order corresponding to rr)
##' @keywords methods
##' @export
##' @examples
##' # examples for adj.risk
##' adj.risk(c(5, 1), c(0.1, 0.9))
##' adj.risk(c(5, 3, 1), c(0.1, 0.1, 0.8))
adj.risk<- function(rr, ppr) {
sum.prod<- sum(rr*ppr)
ar<- rr/sum.prod
return(ar)
}
##' Effective probability of infection (EPI)
##' @description Calculates effective probability of infection (adjusted design prevalence)
##' for each risk group for risk-based surveillance activities
##' @param pstar design prevalence (scalar)
##' @param rr relative risk values (vector of values corresponding to
##' the number of risk strata)
##' @param ppr population proportions corresponding to rr values
##' (vector of equal length to rr)
##' @return list of 2 elements, a vector of EPI values and a vector of corresponding
##' adjusted risks (in corresponding order to rr)
##' @keywords methods
##' @export
##' @examples
##' # examples for epi.calc
##' epi.calc(0.1, c(5, 1), c(0.1, 0.9))
##' epi.calc(0.02, c(5, 3, 1), c(0.1, 0.1, 0.8))
epi.calc<- function(pstar, rr, ppr) {
ar<- adj.risk(rr, ppr)
epi<- pstar*ar
return(list(epi=epi, adj.risk=ar))
}
##' Binomial risk-based population sensitivity
##' @description Calculates risk-based population sensitivity with a
##' single risk factor, using binomial method (assumes a large population),
##' allows for unit sensitivity to vary among risk strata
##' @param pstar design prevalence (scalar)
##' @param rr relative risk values (vector of values corresponding to the number of risk strata)
##' @param ppr population proportions corresponding to rr values
##' (vector of equal length to rr)
##' @param n sample size per risk category (vector same length as
##' rr and ppr)
##' @param se unit sensitivity, can vary among risk strata (fixed value or
##' vector same length as rr, ppr, n)
##' @return list of 3 elements, a scalar of population-level sensitivity
##' a vector of EPI values and a vector of corresponding adjusted risks
##' @keywords methods
##' @export
##' @examples
##' # examples for sep.rb.bin
##' sep.rb.bin(0.1, c(5, 3, 1), c(0.1, 0.1, 0.8), c(5, 5, 5), 0.9)
##' sep.rb.bin(0.1, c(5, 1), c(0.1, 0.9), c(10, 5), c(0.95, 0.9))
##' sep.rb.bin(0.1, c(5, 1), c(0.1, 0.9), c(10, 5), c(0.9, 0.9))
##' sep.rb.bin(0.01, c(5, 1), c(0.1, 0.9), c(90, 50), c(0.9, 0.9))
sep.rb.bin<- function(pstar, rr, ppr, n, se) {
epi<- epi.calc(pstar, rr, ppr)
p.all.neg<- (1 - se*epi[[1]])^n
sep<- 1 - prod(p.all.neg)
return(list(sep=sep, epi=epi[[1]], adj.risk=epi[[2]]))
}
##' Hypergeometric risk-based population sensitivity
##' @description Calculates risk-based population sensitivity with a
##' single risk factor, using the hypergeometric method
##' (assuming a finite and known population size),
##' allows for unit sensitivity to vary among risk strata
##' @param pstar design prevalence (scalar)
##' @param rr relative risk values (vector of values corresponding
##' to the number of risk strata)
##' @param n sample size per risk category (vector same length as
##' rr and ppr)
##' @param N Population size per risk category (vector same length
##' as rr and ppr)
##' @param se unit sensitivity, can vary among risk strata (fixed value or a vector the same
##' length as rr, ppr, n)
##' @return list of 3 elements, a scalar of population-level sensitivity
##' a vector of EPI values and a vector of corresponding adjusted risks
##' @keywords methods
##' @export
##' @examples
##' # examples for sep.rb.bin
##' sep.rb.hypergeo(0.1, c(5, 3, 1), c(10, 10, 80), c(5, 5, 5), 0.9)
##' sep.rb.hypergeo(0.1, c(5, 1), c(15, 140), c(10, 5), c(0.95, 0.9))
##' sep.rb.hypergeo(0.1, c(5, 1), c(23, 180), c(10, 5), c(0.9, 0.9))
##' sep.rb.hypergeo(0.01, c(5, 1), c(100, 900), c(90, 50), c(0.9, 0.9))
sep.rb.hypergeo<- function(pstar, rr, N, n, se) {
ppr<- N/sum(N)
epi<- epi.calc(pstar, rr, ppr)
p.all.neg<- (1 - se*n/N)^(epi[[1]]*N)
sep<- 1 - prod(p.all.neg)
return(list(sep=sep, epi=epi[[1]], adj.risk=epi[[2]]))
}
##' Binomial risk-based population sensitivity for varying unit sensitivity
##' @description Calculates population sensitivity for a single risk factor
##' and varying unit sensitivity using binomial method (assumes large population)
##' @param pstar design prevalence (scalar)
##' @param rr relative risk values (vector of values corresponding
##' to the number of risk strata)
##' @param ppr population proportions corresponding to rr values
##' (vector of equal length to rr)
##' @param df dataframe of values for each combination of risk stratum and
##' sensitivity level,
##' col 1 = risk group index, col 2 = unit Se, col 3 = n
##' (sample size for that risk group and unit sensitivity)
##' @return list of 3 elements, a scalar of population-level sensitivity
##' a vector of EPI values and a vector of corresponding adjusted risks
##' @keywords methods
##' @export
##' @examples
##' # examples for sep.rb.bin.varse
##' rg<- c(1, 1, 2, 2)
##' se<- c(0.92, 0.85, 0.92, 0.85)
##' n<- c(80, 30, 20, 30)
##' df<- data.frame(rg, se, n)
##' sep.rb.bin.varse(0.01, c(5, 1), c(0.1, 0.9), df)
##'
##' rg<- c(1, 1, 2, 2)
##' se<- c(0.95, 0.8, 0.95, 0.8)
##' n<- c(20, 10, 10, 5)
##' df<- data.frame(rg, se, n)
##' sep.rb.bin.varse(0.05, c(3, 1), c(0.2, 0.8), df)
##'
##' rg<- c(rep(1, 30), rep(2, 15))
##' se<- c(rep(0.95, 20), rep(0.8, 10), rep(0.95, 10), rep(0.8, 5))
##' n<- rep(1, 45)
##' df<- data.frame(rg, se, n)
##' sep.rb.bin.varse(0.02, c(3, 1), c(0.2, 0.8), df)
##'
##' rg<- c(1, 2, 3, 1, 2, 3)
##' se<- c(0.95, 0.95, 0.95, 0.8, 0.8, 0.8)
##' n<- c(20, 10, 10, 30, 5, 5)
##' df<- data.frame(rg, se, n)
##' sep.rb.bin.varse(0.01, c(5, 3, 1), c(0.1, 0.3, 0.6), df)
sep.rb.bin.varse<- function(pstar, rr, ppr, df) {
epi<- epi.calc(pstar, rr, ppr)
p.all.neg<- (1 - df[,2]*epi[[1]][df[,1]])^df[3]
sep<- 1 - prod(p.all.neg)
return(list(sep=sep, epi=epi[[1]], adj.risk=epi[[2]]))
}
##' Hypergeometric risk-based population sensitivity for varying unit sensitivity
##' @description Calculates population sensitivity for a single risk factor
##' and varying unit sensitivity using hypergeometric approximation method
##' (assumes known population size)
##' @param pstar design prevalence (scalar)
##' @param rr relative risk values (vector of values corresponding
##' to the number of risk strata)
##' @param N vector of population size for each risk group, corresponding to rr values
##' (vector of equal length to rr)
##' @param df dataframe of values for each combination of risk stratum and
##' sensitivity level,
##' col 1 = risk group index, col 2 = unit Se, col 3 = n
##' (sample size for risk group and unit sensitivity)
##' @return list of 5 elements, a scalar of population-level sensitivity
##' a vector of EPI values, a vector of corresponding Adjusted risks
##' a vector of sample sizes (n) per risk group and a vector of
##' mean unit sensitivities per risk group
##' @keywords methods
##' @export
##' @examples
##' # examples for sep.rb.hypergeo.varse
##' rg<- c(1, 1, 2, 2)
##' se<- c(0.92, 0.85, 0.92, 0.85)
##' n<- c(80, 30, 20, 30)
##' df<- data.frame(rg, se, n)
##' sep.rb.hypergeo.varse(0.01, c(5, 1), c(200, 1800), df)
##'
##' rg<- c(1, 1, 2, 2)
##' se<- c(0.95, 0.8, 0.95, 0.8)
##' n<- c(20, 10, 10, 5)
##' df<- data.frame(rg, se, n)
##' sep.rb.hypergeo.varse(0.05, c(3, 1), c(100, 400), df)
##'
##' rg<- c(rep(1, 30), rep(2, 15))
##' se<- c(rep(0.95, 20), rep(0.8, 10), rep(0.95, 10), rep(0.8, 5))
##' n<- rep(1, 45)
##' df<- data.frame(rg, se, n)
##' sep.rb.hypergeo.varse(0.02, c(3, 1), c(100, 400), df)
##'
##' rg<- c(1, 2, 3, 1, 2, 3)
##' se<- c(0.95, 0.95, 0.95, 0.8, 0.8, 0.8)
##' n<- c(20, 10, 10, 30, 5, 5)
##' df<- data.frame(rg, se, n)
##' sep.rb.hypergeo.varse(0.01, c(5, 3, 1), c(100, 300, 600), df)
sep.rb.hypergeo.varse<- function(pstar, rr, N, df) {
ppr<- N/sum(N)
epi<- epi.calc(pstar, rr, ppr)
n<- numeric(length(rr))
se<- n
for (r in 1:length(rr)) {
n[r]<- sum(df[df[,1] == r, 3])
se[r]<- mean(df[df[,1] == r, 2])
}
p.all.neg<- (1-se*n/N)^(epi[[1]]*N)
sep<- 1 - prod(p.all.neg)
return(list(sep=sep, epi=epi[[1]], adj.risk=epi[[2]], n=n, se=se))
}
##' Binomial risk-based population sensitivity for 2 risk factors
##' @description Calculates risk-based population sensitivity for
##' two risk factors, using binomial method (assumes a large population)
##' @param pstar design prevalence (scalar)
##' @param rr1 relative risks for first level risk factor (vector of values corresponding
##' to the number of risk strata)
##' @param rr2 relative risks for second level risk factor,
##' matrix, rows = levels of rr1, cols = levels of rr2
##' @param ppr1 population proportions for first level risk factor (vector of
##' same length as rr1)
##' @param ppr2 population proportions for second level
##' risk factor, matrix, rows = levels of rr1, cols = levels of rr2
##' @param n matrix of number tested for each risk group
##' (rows = levels of rr1, cols = levels of rr2)
##' @param se test unit sensitivity (scalar)
##' @return list of 4 elements, a scalar of population-level sensitivity
##' a matrix of EPI values, a vector of corresponding Adjusted risks for
##' the first risk factor and a matrix of adjusted risks for the second
##' risk factor
##' @keywords methods
##' @export
##' @examples
##' # examples for sep.rb2.binom
##' pstar<- 0.01
##' rr1<- c(3, 1)
##' ppr1<- c(0.2, 0.8)
##' rr2<- rbind(c(4,1), c(4,1))
##' ppr2<- rbind(c(0.1, 0.9), c(0.3, 0.7))
##' se<- 0.8
##' n<- rbind(c(50, 20), c(20, 10))
##' sep.rb2.binom(pstar, rr1, ppr1, rr2, ppr2, n, se)
sep.rb2.binom<- function(pstar, rr1, ppr1, rr2, ppr2, n, se) {
ar1<- adj.risk(rr1, ppr1)
ar2<- array(0, dim = dim(rr2))
rownames(ar2)<- paste("RR1",1:length(rr1), sep = "=")
colnames(ar2)<- paste("RR2",1:ncol(rr2), sep = "=")
epi<- ar2
p.neg<- ar2
if (length(se) == 1) se<- array(se, dim = dim(rr2))
for (i in 1:length(rr1)) {
ar2[i,]<- adj.risk(rr2[i,], ppr2[i,])
epi[i,]<- ar1[i]*ar2[i,]*pstar
p.neg[i,]<- (1 - epi[i,]*se[i,])^n[i,]
}
sep<- 1 - prod(p.neg)
return(list(sep=sep, epi=epi, ar1=ar1, ar2=ar2))
}
##' Hypergeometric risk-based population sensitivity for 2 risk factors
##' @description Calculates risk-based population sensitivity for
##' two risk factors, using hypergeometric approximation method
##' (assumes a known population size)
##' @param pstar design prevalence (scalar)
##' @param rr1 relative risks for first level risk factor (vector of values corresponding
##' to the number of risk strata)
##' @param rr2 relative risks for second level risk factor,
##' matrix, rows = levels of rr1, cols = levels of rr2
##' @param N matrix of population size for each risk group
##' (rows = levels of rr1, cols = levels of rr2)
##' @param n matrix of number tested (sample size) for each risk group
##' (rows = levels of rr1, cols = levels of rr2)
##' @param se test unit sensitivity (scalar)
##' @return list of 6 elements, a scalar of population-level sensitivity
##' a matrix of EPI values, a vector of corresponding Adjusted risks for
##' the first risk factor and a matrix of adjusted risks for the second risk factor,
##' a vector of population proportions for the first risk factor
##' and a matrix of population proportions for the second risk factor
##' @keywords methods
##' @export
##' @examples
##' # examples for sep.rb2.hypergeo
##' pstar<- 0.01
##' rr1<- c(3, 1)
##' rr2<- rbind(c(4,1), c(4,1))
##' N<- rbind(c(100, 500), c(300, 1000))
##' n<- rbind(c(50, 20), c(20, 10))
##' se<- 0.8
##' sep.rb2.hypergeo(pstar, rr1, rr2, N, n, se)
sep.rb2.hypergeo<- function(pstar, rr1, rr2, N, n, se) {
ppr1<- rowSums(N)/sum(N)
ppr2<- array(0, dim = dim(rr2))
rownames(ppr2)<- paste("RR1",1:length(rr1), sep = "=")
colnames(ppr2)<- paste("RR2",1:ncol(rr2), sep = "=")
ar1<- adj.risk(rr1, ppr1)
ar2<- array(0, dim = dim(rr2))
rownames(ar2)<- rownames(ppr2)
colnames(ar2)<- colnames(ppr2)
epi<- ar2
p.neg<- ar2
if (length(se) == 1) se<- array(se, dim = dim(rr2))
for (i in 1:length(rr1)) {
ppr2[i,]<- N[i,]/sum(N[i,])
ar2[i,]<- adj.risk(rr2[i,], ppr2[i,])
epi[i,]<- ar1[i]*ar2[i,]*pstar
p.neg[i,]<- (1 - se[i,]*n[i,]/N[i,])^(epi[i,]*N[i,])
}
sep<- 1 - prod(p.neg)
return(list(sep=sep, epi=epi,
ar1=ar1, ar2=ar2,
ppr1=ppr1, ppr2=ppr2))
}
##' Two-stage risk-based system sensitivity
##' @description Calculates system sensitivity for 2 stage risk-based
##' sampling, llowing for a single risk factor at each stage and
##' using either binomial or hypergeometric approxiation
##' @param C Population size (number of clusters), NA = unknown (default)
##' @param pstar.c cluster level design prevalence (scalar)
##' @param pstar.u unit level design prevalence (scalar)
##' @param rr.c cluster level relative risks (vector with length
##' corresponding to the number of risk strata),
##' use rr.c = c(1,1) if risk factor does not apply
##' @param rr.u unit level relative risks (vector with length
##' corresponding to the number of risk strata),
##' use rr.u = c(1,1) if risk factor does not apply
##' @param ppr.c cluster level population proportions for risk
##' categories (vector), NA if no cluster level risk factor
##' @param N population size per risk group for each cluster,
##' NA or matrix of N for each risk group
##' for each cluster, N=NA means cluster sizes not provided
##' @param rg vector of cluster level risk group (index) for each cluster
##' @param n sample size per risk group for each cluster sampled,
##' matrix, 1 row for each cluster, columns = unit level risk groups
##' @param ppr.u unit level population proportions for each risk group (optional)
##' matrix, 1 row for each cluster, columns = unit level risk groups,
##' not required if N is provided
##' @param se unit sensitivity for each cluster, scalar or
##' vector of values for each cluster, equal in length to n
##' @return list of 2 elements, a scalar of population-level (surveillance system)
##' sensitivity and a vector of cluster-level sensitivities
##' @keywords methods
##' @export
##' @examples
##' # examples for sse.rb.2stage
##' pstar.c<- 0.02
##' pstar.u<- 0.1
##' rr.c<- c(5, 1)
##' ppr.c<- c(0.1, 0.9)
##' rr.u<- c(3, 1)
##' se<- 0.9
##' n<- cbind(rep(10, 50), rep(5, 50))
##' rg<- c(rep(1, 30), rep(2, 20))
##' ppr.u<- cbind(rep(0.2, 50), rep(0.8, 50))
##' N<- cbind(rep(30, 50), rep(120, 50))
##' C<- 500
##' sse.rb.2stage(C=NA, pstar.c, pstar.u, rr.c, ppr.c, rr.u, ppr.u, N=NA, n, rg, se)
##' sse.rb.2stage(C, pstar.c, pstar.u, rr.c, ppr.c, rr.u, ppr.u, N=NA, n, rg, se)
##' sse.rb.2stage(C=NA, pstar.c, pstar.u, rr.c, ppr.c, rr.u, ppr.u, N, n, rg, se)
##' sse.rb.2stage(C, pstar.c, pstar.u, rr.c, ppr.c, rr.u, ppr.u, N, n, rg, se)
sse.rb.2stage<- function(C=NA, pstar.c, pstar.u, rr.c, ppr.c, rr.u, ppr.u, N=NA, n, rg, se) {
if (length(se) == 1) se<- rep(se, nrow(n))
sep<- numeric(nrow(n))
# calculate sep for all clusters
if (length(N) == 1) {
# cluster sizes not provided so use binomial for all clusters
for (i in 1:nrow(n)) {
sep[i]<- sep.rb.bin(pstar.u, rr.u, ppr.u[i,], n[i,], se[i])[[1]]
}
} else {
# cluster sizes provided so use hypergeometric nless NA for specific clusters
for (i in 1:nrow(n)) {
if (is.na(N[i,1])) {
sep[i]<- sep.rb.bin(pstar.u, rr.u, ppr.u[i,], n[i,], se[i])[[1]]
} else {
sep[i]<- sep.rb.hypergeo(pstar.u, rr.u, N[i,], n[i,], se[i])[[1]]
}
}
}
# calculate system sensitivity
if (is.na(C)) {
# Population size unnown, use binomial
sse<- sep.rb.bin.varse(pstar.c, rr.c, ppr.c, df=cbind(rg, sep, 1))
} else {
sse<- sep.rb.hypergeo.varse(pstar.c, rr.c, C*ppr.c, df=cbind(rg, sep, 1))
}
return(list("System sensitivity" = sse[[1]],
"Cluster sensitivity" = sep))
}
##' System sensitivity by combining multiple surveillance components
##' @description Calculates overall system sensitivity for
##' multiple components, accounting for lack of independence
##' (overlap) between components
##' @param C population sizes (number of clusters) for each risk group,
##' NA or vector of same length as rr
##' @param pstar.c cluster level design prevalence (scalar)
##' @param rr cluster level relative risks (vector, length
##' equal to the number of risk strata)
##' @param ppr cluster level population proportions (optional),
##' not required if C is specified (NA or vector of same length as rr)
##' @param sep sep values for clusters in each component and
##' corresponding risk group. A list with multiple elements, each element
##' is a dataframe of sep values from a separate component,
##' first column= clusterid, 2nd =cluster-level risk group index, 3rd col = sep
##' @return list of 2 elements, a matrix (or vector if C not specified)
##' of population-level (surveillance system)
##' sensitivities (binomial and hypergeometric and adjusted vs unadjusted) and
##' a matrix of adjusted and unadjusted component sensitivities for each component
##' @keywords methods
##' @export
##' @examples
##' # example for sse.combined (checked in excel combined components.xlsx)
##' C<- c(300, 1200)
##' pstar<- 0.01
##' rr<- c(3,1)
##' ppr<- c(0.2, 0.8)
##' comp1<- data.frame(id=1:100, rg=c(rep(1,50), rep(2,50)), cse=rep(0.5,100))
##' comp2<- data.frame(id=seq(2, 120, by=2), rg=c(rep(1,25), rep(2,35)), cse=runif(60, 0.5, 0.8))
##' comp3<- data.frame(id=seq(5, 120, by=5), rg=c(rep(1,10), rep(2,14)), cse=runif(24, 0.7, 1))
##' sep<- list(comp1, comp2, comp3)
##' sse.combined(C, pstar, rr, sep = sep)
##' sse.combined(C=NA, pstar, rr, ppr, sep = sep)
sse.combined<- function(C = NA, pstar.c, rr, ppr, sep) {
if (length(C) > 1) ppr<- C/sum(C)
components<- length(sep)
epi<- epi.calc(pstar.c, rr, ppr)[[1]]
# Create master list of clusters sampled
cluster.list<- sep[[1]]
i<- 2
while (i <= components) {
cluster.list<- merge(cluster.list, sep[[i]], by.x = 1, by.y = 1, all.x=T, all.y=T)
i<- i+1
}
# ensure risk group recorded in data
risk.group<- cluster.list[,2]
tmp<- which(is.na(risk.group))
if (length(tmp)>0) {
for (i in tmp) {
j<- 2
while (j<=components && is.na(risk.group[i])) {
risk.group[i]<- cluster.list[i,(j-1)*2+2]
j<- j+1
}
}
}
# Replace NA values with 0
for (i in 2:ncol(cluster.list)) {
cluster.list[is.na(cluster.list[,i]), i]<- 0
}
# set up arrays for epi and p.neg (adjusted and unadjusted) for each cluster and each component
epi.c<- array(0, dim = c(nrow(cluster.list), components))
epi.c[,1]<- epi[risk.group]
# dim 3: 1 = adjusted, 2 = unadjusted (independence)
p.neg<- array(0, dim = c(nrow(cluster.list), components, 2))
p.neg[,1,1]<- 1-cluster.list[,3]*epi.c[,1]
p.neg[,1,2]<- p.neg[,1,1]
for (i in 2:components) {
for (j in 1:nrow(cluster.list)) {
epi.c[j,i]<- 1 - pfree.1(cluster.list[j,(i-1)*2+1], 0, 1-epi.c[j,i-1])[,4]
}
p.neg[,i,1]<- 1-cluster.list[,(i-1)*2+3]*epi.c[,i]
p.neg[,i,2]<- 1-cluster.list[,(i-1)*2+3]*epi.c[,1]
}
# calculate n, mean sep and mean epi for each risk group and component
n<- array(0, dim = c(components, length(rr)))
sep.mean<- array(0, dim = c(components, length(rr)))
epi.mean<- array(0, dim = c(components, length(rr), 2))
for (i in 1:components) {
n[i,]<- table(sep[[i]][2])
sep.mean[i,]<- sapply(split(sep[[i]][3], sep[[i]][2]), FUN=colMeans)
epi.mean[i,,1]<- sapply(split(epi.c[cluster.list[,(i-1)*2+2] > 0,i], cluster.list[cluster.list[,(i-1)*2+2] > 0,(i-1)*2+2]), FUN=mean)
epi.mean[i,,2]<- epi.mean[1,,1]
}
# Calculate cse and sse
cse<- array(0, dim = c(2, components, 2))
rownames(cse)<- c("Adjusted", "Unadjusted")
colnames(cse)<- paste("Component", 1:components)
dimnames(cse)[[3]]<- c("Binomial", "Hypergeometric")
sse<- array(0, dim = c(2, 2))
rownames(sse)<- rownames(cse)
colnames(sse)<- dimnames(cse)[[3]]
rownames(epi.mean)<- colnames(cse)
rownames(sep.mean)<- colnames(cse)
rownames(n)<- colnames(cse)
colnames(epi.mean)<- paste("RR =",rr)
colnames(sep.mean)<- paste("RR =",rr)
colnames(n)<- paste("RR =",rr)
dimnames(epi.mean)[[3]]<- rownames(cse)
# rows = adjusted and unadjusted, dim3 = binomial and hypergeometric
for (i in 1:2) {
for (j in 1:components) {
cse[i,j,1]<- 1 - prod(p.neg[,j,i])
if (length(C) > 1) {
cse[i,j,2]<- 1 - prod((1 - sep.mean[j,]*n[j,]/C)^(epi.mean[j,,i]*C))
}
}
sse[i,1]<- 1- prod(1 - cse[i,,1])
sse[i,2]<- 1- prod(1 - cse[i,,2])
}
if (length(C) <= 1) {
sse<- sse[,1]
cse<- cse[,,1]
}
return(list("System sensitivity"= sse,
"Component sensitivity" = cse))
}
##' Risk-based sample size
##' @description Calculates sample size for risk-based sampling
##' for a single risk factor and using binomial method
##' @param pstar design prevalence (scalar)
##' @param rr relative risk values (vector, length equal to the number of risk strata)
##' @param ppr population proportions corresponding to rr values
##' (vector of equal length to rr)
##' @param spr planned surveillance proportion for each risk group
##' (vector equal length to rr, ppr)
##' @param se unit sensitivity (fixed or vector same length as rr, ppr, n)
##' @param sep required population sensitivity (scalar)
##' @return list of 2 elements, a vector of sample sizes for each risk group
##' a scalar of total sample size, a vector of EPI values and a vector of
##' adjusted risks
##' @keywords methods
##' @export
##' @examples
##' # examples for n.rb
##' n.rb(0.1, c(5, 3, 1), c(0.1, 0.10, 0.80), c(0.5, 0.3, 0.2), 0.9, 0.95)
##' n.rb(0.01, c(5, 1), c(0.1, 0.9), c(0.8, 0.2), c(0.9, 0.95), 0.95)
n.rb<- function(pstar, rr, ppr, spr, se, sep) {
epi<- epi.calc(pstar, rr, ppr)
p.pos<- sum(epi[[1]]*spr*se)
n.total<- ceiling(log(1-sep)/log(1-p.pos))
n<- numeric(length(rr))
for (i in 1:length(rr)) {
if (i<length(rr)) {
n[i]<- ceiling(n.total*spr[i])
} else {
n[i]<- n.total - sum(n)
}
}
return(list(n=n, total=n.total, epi=epi[[1]], adj.risk=epi[[2]]))
}
##' Risk-based sample size for varying unit sensitivity
##' @description Calculates sample size for risk-based sampling
##' for a single risk factor and varying unit sensitivity,
##' using binomial method
##' @param pstar design prevalence (scalar)
##' @param rr relative risk values (vector, length equal to the number of risk strata)
##' @param ppr population proportions for each risk group,
##' vector of same length as rr
##' @param spr planned surveillance proportions for each risk group,
##' vector of same length as rr
##' @param se unit sensitivities (vector of group values)
##' @param spr.rg proportions of samples for each sensitivity value
##' in each risk group (matrix with rows = risk groups, columns = sensitivity values),
##' row sums must equal 1
##' @param sep required population sensitivity (scalar)
##' @return list of 3 elements, a matrix of sample sizes for each risk
##' and sensitivity group, a vector of EPI values and a vector of
##' mean sensitivity for each risk group
##' @keywords methods
##' @export
##' @examples
##' # examples for n.rb.varse
##' m<- rbind(c(0.8, 0.2), c(0.5, 0.5), c(0.7, 0.3))
##' n.rb.varse(0.01, c(5, 3, 1), c(0.1, 0.1, 0.8), c(0.4, 0.4, 0.2), c(0.92, 0.8), m, 0.95)
##'
##' m<- rbind(c(0.8, 0.2), c(0.6, 0.4))
##' n.rb.varse(0.05, c(3, 1), c(0.2, 0.8), c(0.7, 0.3), c(0.95, 0.8), m, 0.95)
##'
##' m<- rbind(c(1), c(1))
##' n.rb.varse(0.05, c(3, 1), c(0.2, 0.8), c(0.7, 0.3), c(0.95), m, 0.99)
n.rb.varse<- function(pstar, rr, ppr, spr, se, spr.rg, sep) {
mean.se<- numeric(length(rr))
for (r in 1:length(rr)) {
mean.se[r]<- sum(spr.rg[r,] * se)
}
epi<- epi.calc(pstar, rr, ppr)[[1]]
p.pos<- sum(epi*mean.se*spr)
n.total<- ceiling(log(1 - sep)/log(1 - p.pos))
n.rg<- numeric(length(rr))
n<- array(0, dim = c(nrow(spr.rg), ncol(spr.rg)))
for (i in 1:length(rr)) {
if (i<length(rr)) {
n.rg[i]<- ceiling(n.total*spr[i])
} else {
n.rg[i]<- n.total - sum(n.rg)
}
for (j in 1:length(se)) {
if (j<length(se)) {
n[i, j]<- ceiling(n.rg[i]*spr.rg[i, j])
} else {
n[i, j]<- n.rg[i] - sum(n[i,])
}
}
}
n<- cbind(n, n.rg)
tmp<- apply(n, FUN=sum, MARGIN=2)
n<- rbind(n, tmp)
colnames(n)<- c(paste("Se =", se), "Total")
rownames(n)<- c(paste("RR =", rr), "Total")
return(list(n=n, epi=epi, mean.se=mean.se))
}
| /RSurveillance/R/risk_based_functions.R | no_license | ingted/R-Examples | R | false | false | 26,208 | r | ############################################################################
# risk-based surveillance
###########################################################################
# adj.risk
# epi.calc
# sep.rb.bin
# sep.rb.hypergeo
# sep.rb.bin.varse
# sep.rb.hypergeo.varse
# sep.rb2.bin
# sep.rb2.hypergeo
# sse.rb.2stage
# sse.combined
# n.rb
# n.rb.varse
# include freedom functions
##' Adjusted risk
##' @description Calculates adjusted risk for given
##' relative risk and population proportions. This is an intermediate calculation
##' in the calculation of effective probability of infection for risk-based
##' surveillance activities
##' @param rr relative risk values (vector of values corresponding to the number of risk strata)
##' @param ppr population proportions corresponding to
##' rr values (vector of equal length to rr)
##' @return vector of adjusted risk values (in order corresponding to rr)
##' @keywords methods
##' @export
##' @examples
##' # examples for adj.risk
##' adj.risk(c(5, 1), c(0.1, 0.9))
##' adj.risk(c(5, 3, 1), c(0.1, 0.1, 0.8))
adj.risk<- function(rr, ppr) {
sum.prod<- sum(rr*ppr)
ar<- rr/sum.prod
return(ar)
}
##' Effective probability of infection (EPI)
##' @description Calculates effective probability of infection (adjusted design prevalence)
##' for each risk group for risk-based surveillance activities
##' @param pstar design prevalence (scalar)
##' @param rr relative risk values (vector of values corresponding to
##' the number of risk strata)
##' @param ppr population proportions corresponding to rr values
##' (vector of equal length to rr)
##' @return list of 2 elements, a vector of EPI values and a vector of corresponding
##' adjusted risks (in corresponding order to rr)
##' @keywords methods
##' @export
##' @examples
##' # examples for epi.calc
##' epi.calc(0.1, c(5, 1), c(0.1, 0.9))
##' epi.calc(0.02, c(5, 3, 1), c(0.1, 0.1, 0.8))
epi.calc<- function(pstar, rr, ppr) {
ar<- adj.risk(rr, ppr)
epi<- pstar*ar
return(list(epi=epi, adj.risk=ar))
}
##' Binomial risk-based population sensitivity
##' @description Calculates risk-based population sensitivity with a
##' single risk factor, using binomial method (assumes a large population),
##' allows for unit sensitivity to vary among risk strata
##' @param pstar design prevalence (scalar)
##' @param rr relative risk values (vector of values corresponding to the number of risk strata)
##' @param ppr population proportions corresponding to rr values
##' (vector of equal length to rr)
##' @param n sample size per risk category (vector same length as
##' rr and ppr)
##' @param se unit sensitivity, can vary among risk strata (fixed value or
##' vector same length as rr, ppr, n)
##' @return list of 3 elements, a scalar of population-level sensitivity
##' a vector of EPI values and a vector of corresponding adjusted risks
##' @keywords methods
##' @export
##' @examples
##' # examples for sep.rb.bin
##' sep.rb.bin(0.1, c(5, 3, 1), c(0.1, 0.1, 0.8), c(5, 5, 5), 0.9)
##' sep.rb.bin(0.1, c(5, 1), c(0.1, 0.9), c(10, 5), c(0.95, 0.9))
##' sep.rb.bin(0.1, c(5, 1), c(0.1, 0.9), c(10, 5), c(0.9, 0.9))
##' sep.rb.bin(0.01, c(5, 1), c(0.1, 0.9), c(90, 50), c(0.9, 0.9))
sep.rb.bin<- function(pstar, rr, ppr, n, se) {
epi<- epi.calc(pstar, rr, ppr)
p.all.neg<- (1 - se*epi[[1]])^n
sep<- 1 - prod(p.all.neg)
return(list(sep=sep, epi=epi[[1]], adj.risk=epi[[2]]))
}
##' Hypergeometric risk-based population sensitivity
##' @description Calculates risk-based population sensitivity with a
##' single risk factor, using the hypergeometric method
##' (assuming a finite and known population size),
##' allows for unit sensitivity to vary among risk strata
##' @param pstar design prevalence (scalar)
##' @param rr relative risk values (vector of values corresponding
##' to the number of risk strata)
##' @param n sample size per risk category (vector same length as
##' rr and ppr)
##' @param N Population size per risk category (vector same length
##' as rr and ppr)
##' @param se unit sensitivity, can vary among risk strata (fixed value or a vector the same
##' length as rr, ppr, n)
##' @return list of 3 elements, a scalar of population-level sensitivity
##' a vector of EPI values and a vector of corresponding adjusted risks
##' @keywords methods
##' @export
##' @examples
##' # examples for sep.rb.bin
##' sep.rb.hypergeo(0.1, c(5, 3, 1), c(10, 10, 80), c(5, 5, 5), 0.9)
##' sep.rb.hypergeo(0.1, c(5, 1), c(15, 140), c(10, 5), c(0.95, 0.9))
##' sep.rb.hypergeo(0.1, c(5, 1), c(23, 180), c(10, 5), c(0.9, 0.9))
##' sep.rb.hypergeo(0.01, c(5, 1), c(100, 900), c(90, 50), c(0.9, 0.9))
sep.rb.hypergeo<- function(pstar, rr, N, n, se) {
ppr<- N/sum(N)
epi<- epi.calc(pstar, rr, ppr)
p.all.neg<- (1 - se*n/N)^(epi[[1]]*N)
sep<- 1 - prod(p.all.neg)
return(list(sep=sep, epi=epi[[1]], adj.risk=epi[[2]]))
}
##' Binomial risk-based population sensitivity for varying unit sensitivity
##' @description Calculates population sensitivity for a single risk factor
##' and varying unit sensitivity using binomial method (assumes large population)
##' @param pstar design prevalence (scalar)
##' @param rr relative risk values (vector of values corresponding
##' to the number of risk strata)
##' @param ppr population proportions corresponding to rr values
##' (vector of equal length to rr)
##' @param df dataframe of values for each combination of risk stratum and
##' sensitivity level,
##' col 1 = risk group index, col 2 = unit Se, col 3 = n
##' (sample size for that risk group and unit sensitivity)
##' @return list of 3 elements, a scalar of population-level sensitivity
##' a vector of EPI values and a vector of corresponding adjusted risks
##' @keywords methods
##' @export
##' @examples
##' # examples for sep.rb.bin.varse
##' rg<- c(1, 1, 2, 2)
##' se<- c(0.92, 0.85, 0.92, 0.85)
##' n<- c(80, 30, 20, 30)
##' df<- data.frame(rg, se, n)
##' sep.rb.bin.varse(0.01, c(5, 1), c(0.1, 0.9), df)
##'
##' rg<- c(1, 1, 2, 2)
##' se<- c(0.95, 0.8, 0.95, 0.8)
##' n<- c(20, 10, 10, 5)
##' df<- data.frame(rg, se, n)
##' sep.rb.bin.varse(0.05, c(3, 1), c(0.2, 0.8), df)
##'
##' rg<- c(rep(1, 30), rep(2, 15))
##' se<- c(rep(0.95, 20), rep(0.8, 10), rep(0.95, 10), rep(0.8, 5))
##' n<- rep(1, 45)
##' df<- data.frame(rg, se, n)
##' sep.rb.bin.varse(0.02, c(3, 1), c(0.2, 0.8), df)
##'
##' rg<- c(1, 2, 3, 1, 2, 3)
##' se<- c(0.95, 0.95, 0.95, 0.8, 0.8, 0.8)
##' n<- c(20, 10, 10, 30, 5, 5)
##' df<- data.frame(rg, se, n)
##' sep.rb.bin.varse(0.01, c(5, 3, 1), c(0.1, 0.3, 0.6), df)
sep.rb.bin.varse<- function(pstar, rr, ppr, df) {
epi<- epi.calc(pstar, rr, ppr)
p.all.neg<- (1 - df[,2]*epi[[1]][df[,1]])^df[3]
sep<- 1 - prod(p.all.neg)
return(list(sep=sep, epi=epi[[1]], adj.risk=epi[[2]]))
}
##' Hypergeometric risk-based population sensitivity for varying unit sensitivity
##' @description Calculates population sensitivity for a single risk factor
##' and varying unit sensitivity using hypergeometric approximation method
##' (assumes known population size)
##' @param pstar design prevalence (scalar)
##' @param rr relative risk values (vector of values corresponding
##' to the number of risk strata)
##' @param N vector of population size for each risk group, corresponding to rr values
##' (vector of equal length to rr)
##' @param df dataframe of values for each combination of risk stratum and
##' sensitivity level,
##' col 1 = risk group index, col 2 = unit Se, col 3 = n
##' (sample size for risk group and unit sensitivity)
##' @return list of 5 elements, a scalar of population-level sensitivity
##' a vector of EPI values, a vector of corresponding Adjusted risks
##' a vector of sample sizes (n) per risk group and a vector of
##' mean unit sensitivities per risk group
##' @keywords methods
##' @export
##' @examples
##' # examples for sep.rb.hypergeo.varse
##' rg<- c(1, 1, 2, 2)
##' se<- c(0.92, 0.85, 0.92, 0.85)
##' n<- c(80, 30, 20, 30)
##' df<- data.frame(rg, se, n)
##' sep.rb.hypergeo.varse(0.01, c(5, 1), c(200, 1800), df)
##'
##' rg<- c(1, 1, 2, 2)
##' se<- c(0.95, 0.8, 0.95, 0.8)
##' n<- c(20, 10, 10, 5)
##' df<- data.frame(rg, se, n)
##' sep.rb.hypergeo.varse(0.05, c(3, 1), c(100, 400), df)
##'
##' rg<- c(rep(1, 30), rep(2, 15))
##' se<- c(rep(0.95, 20), rep(0.8, 10), rep(0.95, 10), rep(0.8, 5))
##' n<- rep(1, 45)
##' df<- data.frame(rg, se, n)
##' sep.rb.hypergeo.varse(0.02, c(3, 1), c(100, 400), df)
##'
##' rg<- c(1, 2, 3, 1, 2, 3)
##' se<- c(0.95, 0.95, 0.95, 0.8, 0.8, 0.8)
##' n<- c(20, 10, 10, 30, 5, 5)
##' df<- data.frame(rg, se, n)
##' sep.rb.hypergeo.varse(0.01, c(5, 3, 1), c(100, 300, 600), df)
sep.rb.hypergeo.varse<- function(pstar, rr, N, df) {
ppr<- N/sum(N)
epi<- epi.calc(pstar, rr, ppr)
n<- numeric(length(rr))
se<- n
for (r in 1:length(rr)) {
n[r]<- sum(df[df[,1] == r, 3])
se[r]<- mean(df[df[,1] == r, 2])
}
p.all.neg<- (1-se*n/N)^(epi[[1]]*N)
sep<- 1 - prod(p.all.neg)
return(list(sep=sep, epi=epi[[1]], adj.risk=epi[[2]], n=n, se=se))
}
##' Binomial risk-based population sensitivity for 2 risk factors
##' @description Calculates risk-based population sensitivity for
##' two risk factors, using binomial method (assumes a large population)
##' @param pstar design prevalence (scalar)
##' @param rr1 relative risks for first level risk factor (vector of values corresponding
##' to the number of risk strata)
##' @param rr2 relative risks for second level risk factor,
##' matrix, rows = levels of rr1, cols = levels of rr2
##' @param ppr1 population proportions for first level risk factor (vector of
##' same length as rr1)
##' @param ppr2 population proportions for second level
##' risk factor, matrix, rows = levels of rr1, cols = levels of rr2
##' @param n matrix of number tested for each risk group
##' (rows = levels of rr1, cols = levels of rr2)
##' @param se test unit sensitivity (scalar)
##' @return list of 4 elements, a scalar of population-level sensitivity
##' a matrix of EPI values, a vector of corresponding Adjusted risks for
##' the first risk factor and a matrix of adjusted risks for the second
##' risk factor
##' @keywords methods
##' @export
##' @examples
##' # examples for sep.rb2.binom
##' pstar<- 0.01
##' rr1<- c(3, 1)
##' ppr1<- c(0.2, 0.8)
##' rr2<- rbind(c(4,1), c(4,1))
##' ppr2<- rbind(c(0.1, 0.9), c(0.3, 0.7))
##' se<- 0.8
##' n<- rbind(c(50, 20), c(20, 10))
##' sep.rb2.binom(pstar, rr1, ppr1, rr2, ppr2, n, se)
sep.rb2.binom<- function(pstar, rr1, ppr1, rr2, ppr2, n, se) {
ar1<- adj.risk(rr1, ppr1)
ar2<- array(0, dim = dim(rr2))
rownames(ar2)<- paste("RR1",1:length(rr1), sep = "=")
colnames(ar2)<- paste("RR2",1:ncol(rr2), sep = "=")
epi<- ar2
p.neg<- ar2
if (length(se) == 1) se<- array(se, dim = dim(rr2))
for (i in 1:length(rr1)) {
ar2[i,]<- adj.risk(rr2[i,], ppr2[i,])
epi[i,]<- ar1[i]*ar2[i,]*pstar
p.neg[i,]<- (1 - epi[i,]*se[i,])^n[i,]
}
sep<- 1 - prod(p.neg)
return(list(sep=sep, epi=epi, ar1=ar1, ar2=ar2))
}
##' Hypergeometric risk-based population sensitivity for 2 risk factors
##' @description Calculates risk-based population sensitivity for
##' two risk factors, using hypergeometric approximation method
##' (assumes a known population size)
##' @param pstar design prevalence (scalar)
##' @param rr1 relative risks for first level risk factor (vector of values corresponding
##' to the number of risk strata)
##' @param rr2 relative risks for second level risk factor,
##' matrix, rows = levels of rr1, cols = levels of rr2
##' @param N matrix of population size for each risk group
##' (rows = levels of rr1, cols = levels of rr2)
##' @param n matrix of number tested (sample size) for each risk group
##' (rows = levels of rr1, cols = levels of rr2)
##' @param se test unit sensitivity (scalar)
##' @return list of 6 elements, a scalar of population-level sensitivity
##' a matrix of EPI values, a vector of corresponding Adjusted risks for
##' the first risk factor and a matrix of adjusted risks for the second risk factor,
##' a vector of population proportions for the first risk factor
##' and a matrix of population proportions for the second risk factor
##' @keywords methods
##' @export
##' @examples
##' # examples for sep.rb2.hypergeo
##' pstar<- 0.01
##' rr1<- c(3, 1)
##' rr2<- rbind(c(4,1), c(4,1))
##' N<- rbind(c(100, 500), c(300, 1000))
##' n<- rbind(c(50, 20), c(20, 10))
##' se<- 0.8
##' sep.rb2.hypergeo(pstar, rr1, rr2, N, n, se)
sep.rb2.hypergeo<- function(pstar, rr1, rr2, N, n, se) {
ppr1<- rowSums(N)/sum(N)
ppr2<- array(0, dim = dim(rr2))
rownames(ppr2)<- paste("RR1",1:length(rr1), sep = "=")
colnames(ppr2)<- paste("RR2",1:ncol(rr2), sep = "=")
ar1<- adj.risk(rr1, ppr1)
ar2<- array(0, dim = dim(rr2))
rownames(ar2)<- rownames(ppr2)
colnames(ar2)<- colnames(ppr2)
epi<- ar2
p.neg<- ar2
if (length(se) == 1) se<- array(se, dim = dim(rr2))
for (i in 1:length(rr1)) {
ppr2[i,]<- N[i,]/sum(N[i,])
ar2[i,]<- adj.risk(rr2[i,], ppr2[i,])
epi[i,]<- ar1[i]*ar2[i,]*pstar
p.neg[i,]<- (1 - se[i,]*n[i,]/N[i,])^(epi[i,]*N[i,])
}
sep<- 1 - prod(p.neg)
return(list(sep=sep, epi=epi,
ar1=ar1, ar2=ar2,
ppr1=ppr1, ppr2=ppr2))
}
##' Two-stage risk-based system sensitivity
##' @description Calculates system sensitivity for 2 stage risk-based
##' sampling, llowing for a single risk factor at each stage and
##' using either binomial or hypergeometric approxiation
##' @param C Population size (number of clusters), NA = unknown (default)
##' @param pstar.c cluster level design prevalence (scalar)
##' @param pstar.u unit level design prevalence (scalar)
##' @param rr.c cluster level relative risks (vector with length
##' corresponding to the number of risk strata),
##' use rr.c = c(1,1) if risk factor does not apply
##' @param rr.u unit level relative risks (vector with length
##' corresponding to the number of risk strata),
##' use rr.u = c(1,1) if risk factor does not apply
##' @param ppr.c cluster level population proportions for risk
##' categories (vector), NA if no cluster level risk factor
##' @param N population size per risk group for each cluster,
##' NA or matrix of N for each risk group
##' for each cluster, N=NA means cluster sizes not provided
##' @param rg vector of cluster level risk group (index) for each cluster
##' @param n sample size per risk group for each cluster sampled,
##' matrix, 1 row for each cluster, columns = unit level risk groups
##' @param ppr.u unit level population proportions for each risk group (optional)
##' matrix, 1 row for each cluster, columns = unit level risk groups,
##' not required if N is provided
##' @param se unit sensitivity for each cluster, scalar or
##' vector of values for each cluster, equal in length to n
##' @return list of 2 elements, a scalar of population-level (surveillance system)
##' sensitivity and a vector of cluster-level sensitivities
##' @keywords methods
##' @export
##' @examples
##' # examples for sse.rb.2stage
##' pstar.c<- 0.02
##' pstar.u<- 0.1
##' rr.c<- c(5, 1)
##' ppr.c<- c(0.1, 0.9)
##' rr.u<- c(3, 1)
##' se<- 0.9
##' n<- cbind(rep(10, 50), rep(5, 50))
##' rg<- c(rep(1, 30), rep(2, 20))
##' ppr.u<- cbind(rep(0.2, 50), rep(0.8, 50))
##' N<- cbind(rep(30, 50), rep(120, 50))
##' C<- 500
##' sse.rb.2stage(C=NA, pstar.c, pstar.u, rr.c, ppr.c, rr.u, ppr.u, N=NA, n, rg, se)
##' sse.rb.2stage(C, pstar.c, pstar.u, rr.c, ppr.c, rr.u, ppr.u, N=NA, n, rg, se)
##' sse.rb.2stage(C=NA, pstar.c, pstar.u, rr.c, ppr.c, rr.u, ppr.u, N, n, rg, se)
##' sse.rb.2stage(C, pstar.c, pstar.u, rr.c, ppr.c, rr.u, ppr.u, N, n, rg, se)
sse.rb.2stage<- function(C=NA, pstar.c, pstar.u, rr.c, ppr.c, rr.u, ppr.u, N=NA, n, rg, se) {
if (length(se) == 1) se<- rep(se, nrow(n))
sep<- numeric(nrow(n))
# calculate sep for all clusters
if (length(N) == 1) {
# cluster sizes not provided so use binomial for all clusters
for (i in 1:nrow(n)) {
sep[i]<- sep.rb.bin(pstar.u, rr.u, ppr.u[i,], n[i,], se[i])[[1]]
}
} else {
# cluster sizes provided so use hypergeometric nless NA for specific clusters
for (i in 1:nrow(n)) {
if (is.na(N[i,1])) {
sep[i]<- sep.rb.bin(pstar.u, rr.u, ppr.u[i,], n[i,], se[i])[[1]]
} else {
sep[i]<- sep.rb.hypergeo(pstar.u, rr.u, N[i,], n[i,], se[i])[[1]]
}
}
}
# calculate system sensitivity
if (is.na(C)) {
# Population size unnown, use binomial
sse<- sep.rb.bin.varse(pstar.c, rr.c, ppr.c, df=cbind(rg, sep, 1))
} else {
sse<- sep.rb.hypergeo.varse(pstar.c, rr.c, C*ppr.c, df=cbind(rg, sep, 1))
}
return(list("System sensitivity" = sse[[1]],
"Cluster sensitivity" = sep))
}
##' System sensitivity by combining multiple surveillance components
##' @description Calculates overall system sensitivity for
##' multiple components, accounting for lack of independence
##' (overlap) between components
##' @param C population sizes (number of clusters) for each risk group,
##' NA or vector of same length as rr
##' @param pstar.c cluster level design prevalence (scalar)
##' @param rr cluster level relative risks (vector, length
##' equal to the number of risk strata)
##' @param ppr cluster level population proportions (optional),
##' not required if C is specified (NA or vector of same length as rr)
##' @param sep sep values for clusters in each component and
##' corresponding risk group. A list with multiple elements, each element
##' is a dataframe of sep values from a separate component,
##' first column= clusterid, 2nd =cluster-level risk group index, 3rd col = sep
##' @return list of 2 elements, a matrix (or vector if C not specified)
##' of population-level (surveillance system)
##' sensitivities (binomial and hypergeometric and adjusted vs unadjusted) and
##' a matrix of adjusted and unadjusted component sensitivities for each component
##' @keywords methods
##' @export
##' @examples
##' # example for sse.combined (checked in excel combined components.xlsx)
##' C<- c(300, 1200)
##' pstar<- 0.01
##' rr<- c(3,1)
##' ppr<- c(0.2, 0.8)
##' comp1<- data.frame(id=1:100, rg=c(rep(1,50), rep(2,50)), cse=rep(0.5,100))
##' comp2<- data.frame(id=seq(2, 120, by=2), rg=c(rep(1,25), rep(2,35)), cse=runif(60, 0.5, 0.8))
##' comp3<- data.frame(id=seq(5, 120, by=5), rg=c(rep(1,10), rep(2,14)), cse=runif(24, 0.7, 1))
##' sep<- list(comp1, comp2, comp3)
##' sse.combined(C, pstar, rr, sep = sep)
##' sse.combined(C=NA, pstar, rr, ppr, sep = sep)
sse.combined<- function(C = NA, pstar.c, rr, ppr, sep) {
if (length(C) > 1) ppr<- C/sum(C)
components<- length(sep)
epi<- epi.calc(pstar.c, rr, ppr)[[1]]
# Create master list of clusters sampled
cluster.list<- sep[[1]]
i<- 2
while (i <= components) {
cluster.list<- merge(cluster.list, sep[[i]], by.x = 1, by.y = 1, all.x=T, all.y=T)
i<- i+1
}
# ensure risk group recorded in data
risk.group<- cluster.list[,2]
tmp<- which(is.na(risk.group))
if (length(tmp)>0) {
for (i in tmp) {
j<- 2
while (j<=components && is.na(risk.group[i])) {
risk.group[i]<- cluster.list[i,(j-1)*2+2]
j<- j+1
}
}
}
# Replace NA values with 0
for (i in 2:ncol(cluster.list)) {
cluster.list[is.na(cluster.list[,i]), i]<- 0
}
# set up arrays for epi and p.neg (adjusted and unadjusted) for each cluster and each component
epi.c<- array(0, dim = c(nrow(cluster.list), components))
epi.c[,1]<- epi[risk.group]
# dim 3: 1 = adjusted, 2 = unadjusted (independence)
p.neg<- array(0, dim = c(nrow(cluster.list), components, 2))
p.neg[,1,1]<- 1-cluster.list[,3]*epi.c[,1]
p.neg[,1,2]<- p.neg[,1,1]
for (i in 2:components) {
for (j in 1:nrow(cluster.list)) {
epi.c[j,i]<- 1 - pfree.1(cluster.list[j,(i-1)*2+1], 0, 1-epi.c[j,i-1])[,4]
}
p.neg[,i,1]<- 1-cluster.list[,(i-1)*2+3]*epi.c[,i]
p.neg[,i,2]<- 1-cluster.list[,(i-1)*2+3]*epi.c[,1]
}
# calculate n, mean sep and mean epi for each risk group and component
n<- array(0, dim = c(components, length(rr)))
sep.mean<- array(0, dim = c(components, length(rr)))
epi.mean<- array(0, dim = c(components, length(rr), 2))
for (i in 1:components) {
n[i,]<- table(sep[[i]][2])
sep.mean[i,]<- sapply(split(sep[[i]][3], sep[[i]][2]), FUN=colMeans)
epi.mean[i,,1]<- sapply(split(epi.c[cluster.list[,(i-1)*2+2] > 0,i], cluster.list[cluster.list[,(i-1)*2+2] > 0,(i-1)*2+2]), FUN=mean)
epi.mean[i,,2]<- epi.mean[1,,1]
}
# Calculate cse and sse
cse<- array(0, dim = c(2, components, 2))
rownames(cse)<- c("Adjusted", "Unadjusted")
colnames(cse)<- paste("Component", 1:components)
dimnames(cse)[[3]]<- c("Binomial", "Hypergeometric")
sse<- array(0, dim = c(2, 2))
rownames(sse)<- rownames(cse)
colnames(sse)<- dimnames(cse)[[3]]
rownames(epi.mean)<- colnames(cse)
rownames(sep.mean)<- colnames(cse)
rownames(n)<- colnames(cse)
colnames(epi.mean)<- paste("RR =",rr)
colnames(sep.mean)<- paste("RR =",rr)
colnames(n)<- paste("RR =",rr)
dimnames(epi.mean)[[3]]<- rownames(cse)
# rows = adjusted and unadjusted, dim3 = binomial and hypergeometric
for (i in 1:2) {
for (j in 1:components) {
cse[i,j,1]<- 1 - prod(p.neg[,j,i])
if (length(C) > 1) {
cse[i,j,2]<- 1 - prod((1 - sep.mean[j,]*n[j,]/C)^(epi.mean[j,,i]*C))
}
}
sse[i,1]<- 1- prod(1 - cse[i,,1])
sse[i,2]<- 1- prod(1 - cse[i,,2])
}
if (length(C) <= 1) {
sse<- sse[,1]
cse<- cse[,,1]
}
return(list("System sensitivity"= sse,
"Component sensitivity" = cse))
}
##' Risk-based sample size
##' @description Calculates sample size for risk-based sampling
##' for a single risk factor and using binomial method
##' @param pstar design prevalence (scalar)
##' @param rr relative risk values (vector, length equal to the number of risk strata)
##' @param ppr population proportions corresponding to rr values
##' (vector of equal length to rr)
##' @param spr planned surveillance proportion for each risk group
##' (vector equal length to rr, ppr)
##' @param se unit sensitivity (fixed or vector same length as rr, ppr, n)
##' @param sep required population sensitivity (scalar)
##' @return list of 2 elements, a vector of sample sizes for each risk group
##' a scalar of total sample size, a vector of EPI values and a vector of
##' adjusted risks
##' @keywords methods
##' @export
##' @examples
##' # examples for n.rb
##' n.rb(0.1, c(5, 3, 1), c(0.1, 0.10, 0.80), c(0.5, 0.3, 0.2), 0.9, 0.95)
##' n.rb(0.01, c(5, 1), c(0.1, 0.9), c(0.8, 0.2), c(0.9, 0.95), 0.95)
n.rb<- function(pstar, rr, ppr, spr, se, sep) {
epi<- epi.calc(pstar, rr, ppr)
p.pos<- sum(epi[[1]]*spr*se)
n.total<- ceiling(log(1-sep)/log(1-p.pos))
n<- numeric(length(rr))
for (i in 1:length(rr)) {
if (i<length(rr)) {
n[i]<- ceiling(n.total*spr[i])
} else {
n[i]<- n.total - sum(n)
}
}
return(list(n=n, total=n.total, epi=epi[[1]], adj.risk=epi[[2]]))
}
##' Risk-based sample size for varying unit sensitivity
##' @description Calculates sample size for risk-based sampling
##' for a single risk factor and varying unit sensitivity,
##' using binomial method
##' @param pstar design prevalence (scalar)
##' @param rr relative risk values (vector, length equal to the number of risk strata)
##' @param ppr population proportions for each risk group,
##' vector of same length as rr
##' @param spr planned surveillance proportions for each risk group,
##' vector of same length as rr
##' @param se unit sensitivities (vector of group values)
##' @param spr.rg proportions of samples for each sensitivity value
##' in each risk group (matrix with rows = risk groups, columns = sensitivity values),
##' row sums must equal 1
##' @param sep required population sensitivity (scalar)
##' @return list of 3 elements, a matrix of sample sizes for each risk
##' and sensitivity group, a vector of EPI values and a vector of
##' mean sensitivity for each risk group
##' @keywords methods
##' @export
##' @examples
##' # examples for n.rb.varse
##' m<- rbind(c(0.8, 0.2), c(0.5, 0.5), c(0.7, 0.3))
##' n.rb.varse(0.01, c(5, 3, 1), c(0.1, 0.1, 0.8), c(0.4, 0.4, 0.2), c(0.92, 0.8), m, 0.95)
##'
##' m<- rbind(c(0.8, 0.2), c(0.6, 0.4))
##' n.rb.varse(0.05, c(3, 1), c(0.2, 0.8), c(0.7, 0.3), c(0.95, 0.8), m, 0.95)
##'
##' m<- rbind(c(1), c(1))
##' n.rb.varse(0.05, c(3, 1), c(0.2, 0.8), c(0.7, 0.3), c(0.95), m, 0.99)
n.rb.varse<- function(pstar, rr, ppr, spr, se, spr.rg, sep) {
mean.se<- numeric(length(rr))
for (r in 1:length(rr)) {
mean.se[r]<- sum(spr.rg[r,] * se)
}
epi<- epi.calc(pstar, rr, ppr)[[1]]
p.pos<- sum(epi*mean.se*spr)
n.total<- ceiling(log(1 - sep)/log(1 - p.pos))
n.rg<- numeric(length(rr))
n<- array(0, dim = c(nrow(spr.rg), ncol(spr.rg)))
for (i in 1:length(rr)) {
if (i<length(rr)) {
n.rg[i]<- ceiling(n.total*spr[i])
} else {
n.rg[i]<- n.total - sum(n.rg)
}
for (j in 1:length(se)) {
if (j<length(se)) {
n[i, j]<- ceiling(n.rg[i]*spr.rg[i, j])
} else {
n[i, j]<- n.rg[i] - sum(n[i,])
}
}
}
n<- cbind(n, n.rg)
tmp<- apply(n, FUN=sum, MARGIN=2)
n<- rbind(n, tmp)
colnames(n)<- c(paste("Se =", se), "Total")
rownames(n)<- c(paste("RR =", rr), "Total")
return(list(n=n, epi=epi, mean.se=mean.se))
}
|
# WARNING: AUTOGENERATED CODE
#
# This code was generated by a tool.
# Autogenerated on: 2023-04-18
#
# Manual changes to this file may cause unexpected behavior in your application.
# Manual changes to this file will be overwritten if the code is regenerated.
# ##############################################################################
#' ProjectClient methods
#' @include AllClasses.R
#' @include AllGenerics.R
#' @include commons.R
#' @description This function implements the OpenCGA calls for managing Projects.
#' The following table summarises the available *actions* for this client:
#'
#' | endpointName | Endpoint WS | parameters accepted |
#' | -- | :-- | --: |
#' | create | /{apiVersion}/projects/create | include, exclude, includeResult, body[*] |
#' | search | /{apiVersion}/projects/search | include, exclude, limit, skip, owner, id, name, fqn, organization, description, study, creationDate, modificationDate, internalStatus, attributes |
#' | aggregationStats | /{apiVersion}/projects/{projects}/aggregationStats | projects[*], default, fileFields, individualFields, familyFields, sampleFields, cohortFields, jobFields |
#' | info | /{apiVersion}/projects/{projects}/info | include, exclude, projects[*] |
#' | incRelease | /{apiVersion}/projects/{project}/incRelease | project[*] |
#' | studies | /{apiVersion}/projects/{project}/studies | include, exclude, limit, skip, project[*] |
#' | update | /{apiVersion}/projects/{project}/update | include, exclude, project[*], includeResult, body[*] |
#'
#' @md
#' @seealso \url{http://docs.opencb.org/display/opencga/Using+OpenCGA} and the RESTful API documentation
#' \url{http://bioinfo.hpc.cam.ac.uk/opencga-prod/webservices/}
#' [*]: Required parameter
#' @export
setMethod("projectClient", "OpencgaR", function(OpencgaR, projects, project, endpointName, params=NULL, ...) {
switch(endpointName,
#' @section Endpoint /{apiVersion}/projects/create:
#' Create a new project.
#' @param include Fields included in the response, whole JSON path must be provided.
#' @param exclude Fields excluded in the response, whole JSON path must be provided.
#' @param includeResult Flag indicating to include the created or updated document result in the response.
#' @param data JSON containing the mandatory parameters.
create=fetchOpenCGA(object=OpencgaR, category="projects", categoryId=NULL, subcategory=NULL,
subcategoryId=NULL, action="create", params=params, httpMethod="POST", as.queryParam=NULL, ...),
#' @section Endpoint /{apiVersion}/projects/search:
#' Search projects.
#' @param include Fields included in the response, whole JSON path must be provided.
#' @param exclude Fields excluded in the response, whole JSON path must be provided.
#' @param limit Number of results to be returned.
#' @param skip Number of results to skip.
#' @param owner Owner of the project.
#' @param id Project [user@]project where project can be either the ID or the alias.
#' @param name Project name.
#' @param fqn Project fqn.
#' @param organization Project organization.
#' @param description Project description.
#' @param study Study id.
#' @param creationDate Creation date. Format: yyyyMMddHHmmss. Examples: >2018, 2017-2018, <201805.
#' @param modificationDate Modification date. Format: yyyyMMddHHmmss. Examples: >2018, 2017-2018, <201805.
#' @param internalStatus Filter by internal status.
#' @param attributes Attributes.
search=fetchOpenCGA(object=OpencgaR, category="projects", categoryId=NULL, subcategory=NULL,
subcategoryId=NULL, action="search", params=params, httpMethod="GET", as.queryParam=NULL, ...),
#' @section Endpoint /{apiVersion}/projects/{projects}/aggregationStats:
#' Fetch catalog project stats.
#' @param projects Comma separated list of projects [user@]project up to a maximum of 100.
#' @param default Calculate default stats.
#' @param fileFields List of file fields separated by semicolons, e.g.: studies;type. For nested fields use >>, e.g.: studies>>biotype;type.
#' @param individualFields List of individual fields separated by semicolons, e.g.: studies;type. For nested fields use >>, e.g.: studies>>biotype;type.
#' @param familyFields List of family fields separated by semicolons, e.g.: studies;type. For nested fields use >>, e.g.: studies>>biotype;type.
#' @param sampleFields List of sample fields separated by semicolons, e.g.: studies;type. For nested fields use >>, e.g.: studies>>biotype;type.
#' @param cohortFields List of cohort fields separated by semicolons, e.g.: studies;type. For nested fields use >>, e.g.: studies>>biotype;type.
#' @param jobFields List of job fields separated by semicolons, e.g.: studies;type. For nested fields use >>, e.g.: studies>>biotype;type.
aggregationStats=fetchOpenCGA(object=OpencgaR, category="projects", categoryId=projects, subcategory=NULL,
subcategoryId=NULL, action="aggregationStats", params=params, httpMethod="GET", as.queryParam=NULL,
...),
#' @section Endpoint /{apiVersion}/projects/{projects}/info:
#' Fetch project information.
#' @param include Fields included in the response, whole JSON path must be provided.
#' @param exclude Fields excluded in the response, whole JSON path must be provided.
#' @param projects Comma separated list of projects [user@]project up to a maximum of 100.
info=fetchOpenCGA(object=OpencgaR, category="projects", categoryId=projects, subcategory=NULL,
subcategoryId=NULL, action="info", params=params, httpMethod="GET", as.queryParam=NULL, ...),
#' @section Endpoint /{apiVersion}/projects/{project}/incRelease:
#' Increment current release number in the project.
#' @param project Project [user@]project where project can be either the ID or the alias.
incRelease=fetchOpenCGA(object=OpencgaR, category="projects", categoryId=project, subcategory=NULL,
subcategoryId=NULL, action="incRelease", params=params, httpMethod="POST", as.queryParam=NULL, ...),
#' @section Endpoint /{apiVersion}/projects/{project}/studies:
#' Fetch all the studies contained in the project.
#' @param include Fields included in the response, whole JSON path must be provided.
#' @param exclude Fields excluded in the response, whole JSON path must be provided.
#' @param limit Number of results to be returned.
#' @param skip Number of results to skip.
#' @param project Project [user@]project where project can be either the ID or the alias.
studies=fetchOpenCGA(object=OpencgaR, category="projects", categoryId=project, subcategory=NULL,
subcategoryId=NULL, action="studies", params=params, httpMethod="GET", as.queryParam=NULL, ...),
#' @section Endpoint /{apiVersion}/projects/{project}/update:
#' Update some project attributes.
#' @param include Fields included in the response, whole JSON path must be provided.
#' @param exclude Fields excluded in the response, whole JSON path must be provided.
#' @param project Project [user@]project where project can be either the ID or the alias.
#' @param includeResult Flag indicating to include the created or updated document result in the response.
#' @param data JSON containing the params to be updated. It will be only possible to update organism fields not previously defined.
update=fetchOpenCGA(object=OpencgaR, category="projects", categoryId=project, subcategory=NULL,
subcategoryId=NULL, action="update", params=params, httpMethod="POST", as.queryParam=NULL, ...),
)
}) | /opencga-client/src/main/R/R/Project-methods.R | permissive | opencb/opencga | R | false | false | 7,960 | r |
# WARNING: AUTOGENERATED CODE
#
# This code was generated by a tool.
# Autogenerated on: 2023-04-18
#
# Manual changes to this file may cause unexpected behavior in your application.
# Manual changes to this file will be overwritten if the code is regenerated.
# ##############################################################################
#' ProjectClient methods
#' @include AllClasses.R
#' @include AllGenerics.R
#' @include commons.R
#' @description This function implements the OpenCGA calls for managing Projects.
#' The following table summarises the available *actions* for this client:
#'
#' | endpointName | Endpoint WS | parameters accepted |
#' | -- | :-- | --: |
#' | create | /{apiVersion}/projects/create | include, exclude, includeResult, body[*] |
#' | search | /{apiVersion}/projects/search | include, exclude, limit, skip, owner, id, name, fqn, organization, description, study, creationDate, modificationDate, internalStatus, attributes |
#' | aggregationStats | /{apiVersion}/projects/{projects}/aggregationStats | projects[*], default, fileFields, individualFields, familyFields, sampleFields, cohortFields, jobFields |
#' | info | /{apiVersion}/projects/{projects}/info | include, exclude, projects[*] |
#' | incRelease | /{apiVersion}/projects/{project}/incRelease | project[*] |
#' | studies | /{apiVersion}/projects/{project}/studies | include, exclude, limit, skip, project[*] |
#' | update | /{apiVersion}/projects/{project}/update | include, exclude, project[*], includeResult, body[*] |
#'
#' @md
#' @seealso \url{http://docs.opencb.org/display/opencga/Using+OpenCGA} and the RESTful API documentation
#' \url{http://bioinfo.hpc.cam.ac.uk/opencga-prod/webservices/}
#' [*]: Required parameter
#' @export
setMethod("projectClient", "OpencgaR", function(OpencgaR, projects, project, endpointName, params=NULL, ...) {
switch(endpointName,
#' @section Endpoint /{apiVersion}/projects/create:
#' Create a new project.
#' @param include Fields included in the response, whole JSON path must be provided.
#' @param exclude Fields excluded in the response, whole JSON path must be provided.
#' @param includeResult Flag indicating to include the created or updated document result in the response.
#' @param data JSON containing the mandatory parameters.
create=fetchOpenCGA(object=OpencgaR, category="projects", categoryId=NULL, subcategory=NULL,
subcategoryId=NULL, action="create", params=params, httpMethod="POST", as.queryParam=NULL, ...),
#' @section Endpoint /{apiVersion}/projects/search:
#' Search projects.
#' @param include Fields included in the response, whole JSON path must be provided.
#' @param exclude Fields excluded in the response, whole JSON path must be provided.
#' @param limit Number of results to be returned.
#' @param skip Number of results to skip.
#' @param owner Owner of the project.
#' @param id Project [user@]project where project can be either the ID or the alias.
#' @param name Project name.
#' @param fqn Project fqn.
#' @param organization Project organization.
#' @param description Project description.
#' @param study Study id.
#' @param creationDate Creation date. Format: yyyyMMddHHmmss. Examples: >2018, 2017-2018, <201805.
#' @param modificationDate Modification date. Format: yyyyMMddHHmmss. Examples: >2018, 2017-2018, <201805.
#' @param internalStatus Filter by internal status.
#' @param attributes Attributes.
search=fetchOpenCGA(object=OpencgaR, category="projects", categoryId=NULL, subcategory=NULL,
subcategoryId=NULL, action="search", params=params, httpMethod="GET", as.queryParam=NULL, ...),
#' @section Endpoint /{apiVersion}/projects/{projects}/aggregationStats:
#' Fetch catalog project stats.
#' @param projects Comma separated list of projects [user@]project up to a maximum of 100.
#' @param default Calculate default stats.
#' @param fileFields List of file fields separated by semicolons, e.g.: studies;type. For nested fields use >>, e.g.: studies>>biotype;type.
#' @param individualFields List of individual fields separated by semicolons, e.g.: studies;type. For nested fields use >>, e.g.: studies>>biotype;type.
#' @param familyFields List of family fields separated by semicolons, e.g.: studies;type. For nested fields use >>, e.g.: studies>>biotype;type.
#' @param sampleFields List of sample fields separated by semicolons, e.g.: studies;type. For nested fields use >>, e.g.: studies>>biotype;type.
#' @param cohortFields List of cohort fields separated by semicolons, e.g.: studies;type. For nested fields use >>, e.g.: studies>>biotype;type.
#' @param jobFields List of job fields separated by semicolons, e.g.: studies;type. For nested fields use >>, e.g.: studies>>biotype;type.
aggregationStats=fetchOpenCGA(object=OpencgaR, category="projects", categoryId=projects, subcategory=NULL,
subcategoryId=NULL, action="aggregationStats", params=params, httpMethod="GET", as.queryParam=NULL,
...),
#' @section Endpoint /{apiVersion}/projects/{projects}/info:
#' Fetch project information.
#' @param include Fields included in the response, whole JSON path must be provided.
#' @param exclude Fields excluded in the response, whole JSON path must be provided.
#' @param projects Comma separated list of projects [user@]project up to a maximum of 100.
info=fetchOpenCGA(object=OpencgaR, category="projects", categoryId=projects, subcategory=NULL,
subcategoryId=NULL, action="info", params=params, httpMethod="GET", as.queryParam=NULL, ...),
#' @section Endpoint /{apiVersion}/projects/{project}/incRelease:
#' Increment current release number in the project.
#' @param project Project [user@]project where project can be either the ID or the alias.
incRelease=fetchOpenCGA(object=OpencgaR, category="projects", categoryId=project, subcategory=NULL,
subcategoryId=NULL, action="incRelease", params=params, httpMethod="POST", as.queryParam=NULL, ...),
#' @section Endpoint /{apiVersion}/projects/{project}/studies:
#' Fetch all the studies contained in the project.
#' @param include Fields included in the response, whole JSON path must be provided.
#' @param exclude Fields excluded in the response, whole JSON path must be provided.
#' @param limit Number of results to be returned.
#' @param skip Number of results to skip.
#' @param project Project [user@]project where project can be either the ID or the alias.
studies=fetchOpenCGA(object=OpencgaR, category="projects", categoryId=project, subcategory=NULL,
subcategoryId=NULL, action="studies", params=params, httpMethod="GET", as.queryParam=NULL, ...),
#' @section Endpoint /{apiVersion}/projects/{project}/update:
#' Update some project attributes.
#' @param include Fields included in the response, whole JSON path must be provided.
#' @param exclude Fields excluded in the response, whole JSON path must be provided.
#' @param project Project [user@]project where project can be either the ID or the alias.
#' @param includeResult Flag indicating to include the created or updated document result in the response.
#' @param data JSON containing the params to be updated. It will be only possible to update organism fields not previously defined.
update=fetchOpenCGA(object=OpencgaR, category="projects", categoryId=project, subcategory=NULL,
subcategoryId=NULL, action="update", params=params, httpMethod="POST", as.queryParam=NULL, ...),
)
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hiReadsProcessor.R
\name{findLTRs}
\alias{findLTRs}
\title{Find the 5' LTRs and add results to SampleInfo object.}
\usage{
findLTRs(
sampleInfo,
showStats = FALSE,
doRC = FALSE,
parallel = TRUE,
samplenames = NULL,
bypassChecks = FALSE,
parallel2 = FALSE,
...
)
}
\arguments{
\item{sampleInfo}{sample information SimpleList object outputted from
\code{\link{findPrimers}}, which holds decoded and primed sequences for
samples per sector/quadrant along with information of sample to LTR
associations.}
\item{showStats}{toggle output of search statistics. Default is FALSE.
For paired end data, stats for "pair2" is relative to decoded and/or
primed reads.}
\item{doRC}{perform reverse complement search of the defined pattern/LTR
sequence. Default is FALSE.}
\item{parallel}{use parallel backend to perform calculation with
\code{\link{BiocParallel}}. Defaults to TRUE. If no parallel backend is
registered, then a serial version is ran using
\code{\link{SerialParam}}. Parllelization is done at sample level per
sector.}
\item{samplenames}{a vector of samplenames to process. Default is NULL,
which processes all samples from sampleInfo object.}
\item{bypassChecks}{skip checkpoints which detect if something was odd with
the data? Default is FALSE.}
\item{parallel2}{perform parallelization is sequence level. Default is FALSE.
Useful in cases where each sector has only one sample with numerous sequences.}
\item{...}{extra parameters to be passed to \code{\link{pairwiseAlignment}}.}
}
\value{
a SimpleList object similar to sampleInfo paramter supplied with new
data added under each sector and sample. New data attributes include: LTRed
}
\description{
Given a sampleInfo object, the function finds 5' LTR following the primer for
each sample per sector and adds the results back to the object. This is a
specialized function which depends on many other functions shown in 'see also
section' to perform specialized trimming of 5' viral LTRs found in the
sampleInfo object. The sequence itself is never trimmed but rather
coordinates of LTR portion is added to primer coordinates and recorded back
to the object and used subsequently by \code{\link{extractSeqs}} function to
perform the trimming. This function heavily relies on
\code{\link{pairwiseAlignSeqs}}.
}
\note{
\itemize{
\item For paired end data, qualityThreshold for pair 2 is decreased by
0.05 to increase chances of matching LTR sequence.
\item If parallel=TRUE, then be sure to have a parallel backend registered
before running the function. One can use any of the following
\code{\link{MulticoreParam}} \code{\link{SnowParam}}
}
}
\examples{
\dontrun{
load(file.path(system.file("data", package = "hiReadsProcessor"),
"FLX_seqProps.RData"))
findLTRs(seqProps, showStats=TRUE)
}
}
\seealso{
\code{\link{pairwiseAlignSeqs}}, \code{\link{vpairwiseAlignSeqs}},
\code{\link{extractFeature}}, \code{\link{extractSeqs}},
\code{\link{primerIDAlignSeqs}}, \code{\link{findPrimers}},
\code{\link{findLinkers}}, \code{\link{findAndTrimSeq}}
}
| /man/findLTRs.Rd | no_license | malnirav/hiReadsProcessor | R | false | true | 3,135 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hiReadsProcessor.R
\name{findLTRs}
\alias{findLTRs}
\title{Find the 5' LTRs and add results to SampleInfo object.}
\usage{
findLTRs(
sampleInfo,
showStats = FALSE,
doRC = FALSE,
parallel = TRUE,
samplenames = NULL,
bypassChecks = FALSE,
parallel2 = FALSE,
...
)
}
\arguments{
\item{sampleInfo}{sample information SimpleList object outputted from
\code{\link{findPrimers}}, which holds decoded and primed sequences for
samples per sector/quadrant along with information of sample to LTR
associations.}
\item{showStats}{toggle output of search statistics. Default is FALSE.
For paired end data, stats for "pair2" is relative to decoded and/or
primed reads.}
\item{doRC}{perform reverse complement search of the defined pattern/LTR
sequence. Default is FALSE.}
\item{parallel}{use parallel backend to perform calculation with
\code{\link{BiocParallel}}. Defaults to TRUE. If no parallel backend is
registered, then a serial version is ran using
\code{\link{SerialParam}}. Parllelization is done at sample level per
sector.}
\item{samplenames}{a vector of samplenames to process. Default is NULL,
which processes all samples from sampleInfo object.}
\item{bypassChecks}{skip checkpoints which detect if something was odd with
the data? Default is FALSE.}
\item{parallel2}{perform parallelization is sequence level. Default is FALSE.
Useful in cases where each sector has only one sample with numerous sequences.}
\item{...}{extra parameters to be passed to \code{\link{pairwiseAlignment}}.}
}
\value{
a SimpleList object similar to sampleInfo paramter supplied with new
data added under each sector and sample. New data attributes include: LTRed
}
\description{
Given a sampleInfo object, the function finds 5' LTR following the primer for
each sample per sector and adds the results back to the object. This is a
specialized function which depends on many other functions shown in 'see also
section' to perform specialized trimming of 5' viral LTRs found in the
sampleInfo object. The sequence itself is never trimmed but rather
coordinates of LTR portion is added to primer coordinates and recorded back
to the object and used subsequently by \code{\link{extractSeqs}} function to
perform the trimming. This function heavily relies on
\code{\link{pairwiseAlignSeqs}}.
}
\note{
\itemize{
\item For paired end data, qualityThreshold for pair 2 is decreased by
0.05 to increase chances of matching LTR sequence.
\item If parallel=TRUE, then be sure to have a parallel backend registered
before running the function. One can use any of the following
\code{\link{MulticoreParam}} \code{\link{SnowParam}}
}
}
\examples{
\dontrun{
load(file.path(system.file("data", package = "hiReadsProcessor"),
"FLX_seqProps.RData"))
findLTRs(seqProps, showStats=TRUE)
}
}
\seealso{
\code{\link{pairwiseAlignSeqs}}, \code{\link{vpairwiseAlignSeqs}},
\code{\link{extractFeature}}, \code{\link{extractSeqs}},
\code{\link{primerIDAlignSeqs}}, \code{\link{findPrimers}},
\code{\link{findLinkers}}, \code{\link{findAndTrimSeq}}
}
|
setGeneric("hasMz", function(object, mz, ...) standardGeneric("hasMz"))
setGeneric("annotateMz", function(object, compounds, ...)
standardGeneric("annotateMz"))
#' @importClassesFrom tibble tbl_df
#'
#' @noRd
setClassUnion("DataFrameOrEquivalent", c("data.frame", "DataFrame", "tbl_df"))
setClassUnion("numericOrDataFrameOrEquivalent",
c("numeric", "DataFrameOrEquivalent"))
| /R/AllGenerics.R | no_license | ezhou89/CompoundDb | R | false | false | 396 | r | setGeneric("hasMz", function(object, mz, ...) standardGeneric("hasMz"))
setGeneric("annotateMz", function(object, compounds, ...)
standardGeneric("annotateMz"))
#' @importClassesFrom tibble tbl_df
#'
#' @noRd
setClassUnion("DataFrameOrEquivalent", c("data.frame", "DataFrame", "tbl_df"))
setClassUnion("numericOrDataFrameOrEquivalent",
c("numeric", "DataFrameOrEquivalent"))
|
\name{data.obj}
\alias{data.obj}
\docType{data}
\title{
Example of a \kbd{corObject}
}
\description{
Example of a \kbd{corObject} that was used for the main vignette.
}
\usage{data("data.obj")}
\format{
The format is:
Formal class 'corObject' [package "miRComb"] with 13 slots
..@ dat.miRNA : numeric matrix
..@ dat.mRNA : numeric matrix
..@ pheno.miRNA :'data.frame'
..@ pheno.mRNA :'data.frame'
..@ cor : numeric matrix
..@ pval : numeric matrix
..@ net :'data.frame'
..@ diffexp.miRNA:'data.frame'
..@ diffexp.mRNA :'data.frame'
..@ sig.miRNA : character
..@ sig.mRNA : character
..@ GO.results :List
..@ info :List
}
\source{
Modified from: \emph{Affò S, Dominguez M, Lozano JJ, Sancho-Bru P et al. Transcriptome analysis identifies TNF superfamily receptors as potential therapeutic targets in alcoholic hepatitis. Gut 2013 Mar;62(3):452-60. PMID: 22637703}
}
\references{
Affò S, Dominguez M, Lozano JJ, Sancho-Bru P et al. Transcriptome analysis identifies TNF superfamily receptors as potential therapeutic targets in alcoholic hepatitis. Gut 2013 Mar;62(3):452-60. PMID: 22637703
}
\examples{
data(data.obj)
str(data.obj)
}
\keyword{datasets}
| /man/data.obj.Rd | no_license | mariavica/mircomb | R | false | false | 1,238 | rd | \name{data.obj}
\alias{data.obj}
\docType{data}
\title{
Example of a \kbd{corObject}
}
\description{
Example of a \kbd{corObject} that was used for the main vignette.
}
\usage{data("data.obj")}
\format{
The format is:
Formal class 'corObject' [package "miRComb"] with 13 slots
..@ dat.miRNA : numeric matrix
..@ dat.mRNA : numeric matrix
..@ pheno.miRNA :'data.frame'
..@ pheno.mRNA :'data.frame'
..@ cor : numeric matrix
..@ pval : numeric matrix
..@ net :'data.frame'
..@ diffexp.miRNA:'data.frame'
..@ diffexp.mRNA :'data.frame'
..@ sig.miRNA : character
..@ sig.mRNA : character
..@ GO.results :List
..@ info :List
}
\source{
Modified from: \emph{Affò S, Dominguez M, Lozano JJ, Sancho-Bru P et al. Transcriptome analysis identifies TNF superfamily receptors as potential therapeutic targets in alcoholic hepatitis. Gut 2013 Mar;62(3):452-60. PMID: 22637703}
}
\references{
Affò S, Dominguez M, Lozano JJ, Sancho-Bru P et al. Transcriptome analysis identifies TNF superfamily receptors as potential therapeutic targets in alcoholic hepatitis. Gut 2013 Mar;62(3):452-60. PMID: 22637703
}
\examples{
data(data.obj)
str(data.obj)
}
\keyword{datasets}
|
library(ggplot2)
library(RColorBrewer)
library(matrixStats)
#### clear environment
rm(list = ls())
#### load data
# on Inspiron 13
setwd("C:/Users/wuxiu/Documents/PhD@UBC/Lab/2ndYear/AnticipatoryPursuit/AnticipatoryPursuitMotionPerception/analysis/R")
plotFolder <- ("C:/Users/wuxiu/Documents/PhD@UBC/Lab/Conferences/Gordon/2019/figures/")
### modify these parameters to plot different conditions
dataFileName <- "slidingW_APvelX_"
pdfFileName <- "slidingW_APvelX_all_se.pdf"
# for plotting
vtPlotWidth <- 16 # width for the pdf file
textSize <- 25
axisLineWidth <- 0.5
velAlpha <- 0.2
# perceptual trials
ylimLow <- -4.2 # range for x axis line
ylimHigh <- 4.2
ytickLow <- -4 # range of y axis line
ytickHigh <- 4
# # standard trials
# ylimLow <- -11 # range for x axis line
# ylimHigh <- 11
# ytickLow <- -10 # range of y axis line
# ytickHigh <- 10
## load and process data
# # mean velocity traces in different conditions
# # first get mean for each condition and the confidence intervals
# probName <-c("50", "70", "90")
# n <- 9
# timePoints <- seq(from = -500, to = 700, by = 1)
# velTrace <- list()
# for (probI in 1:3) {
# fileName = paste(dataFileName,probName[probI],".csv", sep = "")
# velData <- read.csv(fileName, header = FALSE, sep=",")
# velData <- as.matrix(velData)
# for (conI in 1:2) {
# idxStart <- (conI-1)*9+1
# idxEnd <- conI*9
# velDataT <- velData[idxStart:idxEnd, 700:1900] # -500 to 700 ms
# tempM <- colMeans(velDataT, na.rm=TRUE)
# velMean <- as.matrix(tempM)
# velStd <- colSds(velDataT, na.rm=TRUE)
# error <- qt(0.975, df=n-1)*velStd/sqrt(n)
# velLower <- tempM-error
# velUpper <- tempM+error
# velTrace[[(probI-1)*2+conI]] <- data.frame(timePoints, velMean, velLower, velUpper)
# }
# }
# # colourCode <- c("#ffb687", "#897eff", "#71cc64") # different hues
# colourCode <- c("#c6dbef", "#4292c6", "#08306b") # all blue
# ###
# ## plot velocity traces with 95% CI
# # pdf(paste(folder1, "velocityTraces.pdf", sep = ""))
# p <- ggplot(velTrace[[1]], aes(x = timePoints, y = velMean)) +
# geom_line(aes(colour = "50%"), size = 1, linetype = "twodash") + geom_ribbon(aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "50%"), alpha = velAlpha) +
# geom_line(data = velTrace[[2]], aes(x = timePoints, y = velMean, colour = "50%"), size = 1, linetype = "twodash") + geom_ribbon(data = velTrace[[2]], aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "50%"), alpha = velAlpha) +
# geom_line(data = velTrace[[3]], aes(x = timePoints, y = velMean, colour = "70%"), size = 1, linetype = "twodash") + geom_ribbon(data = velTrace[[3]], aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "70%"), alpha = velAlpha) +
# geom_line(data = velTrace[[4]], aes(x = timePoints, y = velMean, colour = "70%"), size = 1, linetype = "twodash") + geom_ribbon(data = velTrace[[4]], aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "70%"), alpha = velAlpha) +
# geom_line(data = velTrace[[5]], aes(x = timePoints, y = velMean, colour = "90%"), size = 1, linetype = "twodash") + geom_ribbon(data = velTrace[[5]], aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "90%"), alpha = velAlpha) +
# geom_line(data = velTrace[[6]], aes(x = timePoints, y = velMean, colour = "90%"), size = 1, linetype = "twodash") + geom_ribbon(data = velTrace[[6]], aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "90%"), alpha = velAlpha) +
# geom_segment(aes_all(c('x', 'y', 'xend', 'yend')), data = data.frame(x = c(-500, -550), xend = c(700, -550), y = c(ylimLow, ytickLow), yend = c(ylimLow, ytickHigh)), size = axisLineWidth) +
# scale_y_continuous(name = "Horinzontal eye velocity (°/s)",limits = c(ylimLow, ylimHigh), breaks=seq(ytickLow, ytickHigh, 2), expand = c(0, 0)) +
# scale_x_continuous(name = "Time (ms)", limits = c(-550, 700), breaks=seq(-500, 700, 100), expand = c(0, 0)) +
# # coord_cartesian(ylim=c(-4, 4)) +
# scale_colour_manual("Probability of rightward motion",
# breaks = c("50%", "70%", "90%"),
# values = c("50%" = colourCode[1], "70%" = colourCode[2], "90%" = colourCode[3])) +
# # scale_fill_manual("Probability of rightward motion",
# # breaks = c("50%", "70%", "90%"),
# # values = c("50%" = colourCode[1], "70%" = colourCode[2], "90%" = colourCode[3])) +
# theme(axis.text=element_text(colour="black"),
# axis.ticks=element_line(colour="black", size = axisLineWidth),
# panel.grid.major = element_blank(),
# panel.grid.minor = element_blank(),
# panel.border = element_blank(),
# panel.background = element_blank(),
# text = element_text(size = textSize),
# legend.background = element_rect(fill="transparent"),
# legend.key = element_rect(colour = "transparent", fill = "white"))
# print(p)
# # dev.off()
# ggsave(paste(plotFolder, pdfFileName, sep = ""), width = vtPlotWidth)
# sliding window analysis
# mean velocity traces in different conditions
# first get mean for each condition and the confidence intervals
probName <-c("50", "70", "90")
n <- 9
velTrace <- list()
for (probI in 1:3) {
fileName = paste(dataFileName,probName[probI],".csv", sep = "")
velData <- read.csv(fileName, header = FALSE, sep=",")
velData <- as.matrix(velData)
lengthD <- dim(velData)[2]
timePoints <- seq(from = 1, to = lengthD, by = 1)
tempM <- colMeans(velData, na.rm=TRUE)
velMean <- as.matrix(tempM)
velStd <- colSds(velData, na.rm=TRUE)
error <- velStd/sqrt(n) # ste
# error <- qt(0.975, df=n-1)*velStd/sqrt(n) # 95% CI
velLower <- tempM-error
velUpper <- tempM+error
velTrace[[probI]] <- data.frame(timePoints, velMean, velLower, velUpper)
}
# colourCode <- c("#ffb687", "#897eff", "#71cc64") # different hues
# colourCode <- c("#c6dbef", "#4292c6", "#08306b") # all blue
colourCode <- c("#d9d9d9", "#737373", "#000000") # all black
###
## plot velocity traces with 95% CI
# pdf(paste(folder1, "velocityTraces.pdf", sep = ""))
p <- ggplot(velTrace[[1]], aes(x = timePoints, y = velMean)) +
geom_line(aes(colour = "50%"), size = 1, linetype = "solid") + geom_ribbon(aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "50%"), alpha = velAlpha) +
geom_line(data = velTrace[[2]], aes(x = timePoints, y = velMean, colour = "70%"), size = 1, linetype = "solid") + geom_ribbon(data = velTrace[[2]], aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "70%"), alpha = velAlpha) +
geom_line(data = velTrace[[3]], aes(x = timePoints, y = velMean, colour = "90%"), size = 1, linetype = "solid") + geom_ribbon(data = velTrace[[3]], aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "90%"), alpha = velAlpha) +
# geom_segment(aes_all(c('x', 'y', 'xend', 'yend')), data = data.frame(x = c(-500, -550), xend = c(700, -550), y = c(ylimLow, ytickLow), yend = c(ylimLow, ytickHigh)), size = axisLineWidth) +
scale_y_continuous(name = "P(perceiving right)-P(motion right) (°/s)", limits = c(-.1, 2), breaks=seq(0, 2, 0.5), expand = c(0, 0)) +
scale_x_continuous(name = "Perceptual trial number", limits = c(-0, 130), breaks=seq(0, 130, 40), expand = c(0, 0)) +
# coord_cartesian(ylim=c(-4, 4)) +
scale_colour_manual("Probability of rightward motion",
breaks = c("50%", "70%", "90%"),
values = c("50%" = colourCode[1], "70%" = colourCode[2], "90%" = colourCode[3])) +
scale_fill_manual("Probability of rightward motion",
breaks = c("50%", "70%", "90%"),
values = c("50%" = colourCode[1], "70%" = colourCode[2], "90%" = colourCode[3])) +
theme(axis.text=element_text(colour="black"),
axis.ticks=element_line(colour="black", size = axisLineWidth),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
text = element_text(size = textSize),
legend.background = element_rect(fill="transparent"),
legend.key = element_rect(colour = "transparent", fill = "white"))
print(p)
# dev.off()
ggsave(paste(plotFolder, pdfFileName, sep = ""), width = vtPlotWidth)
| /anticipatoryTorsion_exp/analysis/R/velocityTraces.R | no_license | CaptainS5/TorsionPerception | R | false | false | 8,576 | r | library(ggplot2)
library(RColorBrewer)
library(matrixStats)
#### clear environment
rm(list = ls())
#### load data
# on Inspiron 13
setwd("C:/Users/wuxiu/Documents/PhD@UBC/Lab/2ndYear/AnticipatoryPursuit/AnticipatoryPursuitMotionPerception/analysis/R")
plotFolder <- ("C:/Users/wuxiu/Documents/PhD@UBC/Lab/Conferences/Gordon/2019/figures/")
### modify these parameters to plot different conditions
dataFileName <- "slidingW_APvelX_"
pdfFileName <- "slidingW_APvelX_all_se.pdf"
# for plotting
vtPlotWidth <- 16 # width for the pdf file
textSize <- 25
axisLineWidth <- 0.5
velAlpha <- 0.2
# perceptual trials
ylimLow <- -4.2 # range for x axis line
ylimHigh <- 4.2
ytickLow <- -4 # range of y axis line
ytickHigh <- 4
# # standard trials
# ylimLow <- -11 # range for x axis line
# ylimHigh <- 11
# ytickLow <- -10 # range of y axis line
# ytickHigh <- 10
## load and process data
# # mean velocity traces in different conditions
# # first get mean for each condition and the confidence intervals
# probName <-c("50", "70", "90")
# n <- 9
# timePoints <- seq(from = -500, to = 700, by = 1)
# velTrace <- list()
# for (probI in 1:3) {
# fileName = paste(dataFileName,probName[probI],".csv", sep = "")
# velData <- read.csv(fileName, header = FALSE, sep=",")
# velData <- as.matrix(velData)
# for (conI in 1:2) {
# idxStart <- (conI-1)*9+1
# idxEnd <- conI*9
# velDataT <- velData[idxStart:idxEnd, 700:1900] # -500 to 700 ms
# tempM <- colMeans(velDataT, na.rm=TRUE)
# velMean <- as.matrix(tempM)
# velStd <- colSds(velDataT, na.rm=TRUE)
# error <- qt(0.975, df=n-1)*velStd/sqrt(n)
# velLower <- tempM-error
# velUpper <- tempM+error
# velTrace[[(probI-1)*2+conI]] <- data.frame(timePoints, velMean, velLower, velUpper)
# }
# }
# # colourCode <- c("#ffb687", "#897eff", "#71cc64") # different hues
# colourCode <- c("#c6dbef", "#4292c6", "#08306b") # all blue
# ###
# ## plot velocity traces with 95% CI
# # pdf(paste(folder1, "velocityTraces.pdf", sep = ""))
# p <- ggplot(velTrace[[1]], aes(x = timePoints, y = velMean)) +
# geom_line(aes(colour = "50%"), size = 1, linetype = "twodash") + geom_ribbon(aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "50%"), alpha = velAlpha) +
# geom_line(data = velTrace[[2]], aes(x = timePoints, y = velMean, colour = "50%"), size = 1, linetype = "twodash") + geom_ribbon(data = velTrace[[2]], aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "50%"), alpha = velAlpha) +
# geom_line(data = velTrace[[3]], aes(x = timePoints, y = velMean, colour = "70%"), size = 1, linetype = "twodash") + geom_ribbon(data = velTrace[[3]], aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "70%"), alpha = velAlpha) +
# geom_line(data = velTrace[[4]], aes(x = timePoints, y = velMean, colour = "70%"), size = 1, linetype = "twodash") + geom_ribbon(data = velTrace[[4]], aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "70%"), alpha = velAlpha) +
# geom_line(data = velTrace[[5]], aes(x = timePoints, y = velMean, colour = "90%"), size = 1, linetype = "twodash") + geom_ribbon(data = velTrace[[5]], aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "90%"), alpha = velAlpha) +
# geom_line(data = velTrace[[6]], aes(x = timePoints, y = velMean, colour = "90%"), size = 1, linetype = "twodash") + geom_ribbon(data = velTrace[[6]], aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "90%"), alpha = velAlpha) +
# geom_segment(aes_all(c('x', 'y', 'xend', 'yend')), data = data.frame(x = c(-500, -550), xend = c(700, -550), y = c(ylimLow, ytickLow), yend = c(ylimLow, ytickHigh)), size = axisLineWidth) +
# scale_y_continuous(name = "Horinzontal eye velocity (°/s)",limits = c(ylimLow, ylimHigh), breaks=seq(ytickLow, ytickHigh, 2), expand = c(0, 0)) +
# scale_x_continuous(name = "Time (ms)", limits = c(-550, 700), breaks=seq(-500, 700, 100), expand = c(0, 0)) +
# # coord_cartesian(ylim=c(-4, 4)) +
# scale_colour_manual("Probability of rightward motion",
# breaks = c("50%", "70%", "90%"),
# values = c("50%" = colourCode[1], "70%" = colourCode[2], "90%" = colourCode[3])) +
# # scale_fill_manual("Probability of rightward motion",
# # breaks = c("50%", "70%", "90%"),
# # values = c("50%" = colourCode[1], "70%" = colourCode[2], "90%" = colourCode[3])) +
# theme(axis.text=element_text(colour="black"),
# axis.ticks=element_line(colour="black", size = axisLineWidth),
# panel.grid.major = element_blank(),
# panel.grid.minor = element_blank(),
# panel.border = element_blank(),
# panel.background = element_blank(),
# text = element_text(size = textSize),
# legend.background = element_rect(fill="transparent"),
# legend.key = element_rect(colour = "transparent", fill = "white"))
# print(p)
# # dev.off()
# ggsave(paste(plotFolder, pdfFileName, sep = ""), width = vtPlotWidth)
# sliding window analysis
# mean velocity traces in different conditions
# first get mean for each condition and the confidence intervals
probName <-c("50", "70", "90")
n <- 9
velTrace <- list()
for (probI in 1:3) {
fileName = paste(dataFileName,probName[probI],".csv", sep = "")
velData <- read.csv(fileName, header = FALSE, sep=",")
velData <- as.matrix(velData)
lengthD <- dim(velData)[2]
timePoints <- seq(from = 1, to = lengthD, by = 1)
tempM <- colMeans(velData, na.rm=TRUE)
velMean <- as.matrix(tempM)
velStd <- colSds(velData, na.rm=TRUE)
error <- velStd/sqrt(n) # ste
# error <- qt(0.975, df=n-1)*velStd/sqrt(n) # 95% CI
velLower <- tempM-error
velUpper <- tempM+error
velTrace[[probI]] <- data.frame(timePoints, velMean, velLower, velUpper)
}
# colourCode <- c("#ffb687", "#897eff", "#71cc64") # different hues
# colourCode <- c("#c6dbef", "#4292c6", "#08306b") # all blue
colourCode <- c("#d9d9d9", "#737373", "#000000") # all black
###
## plot velocity traces with 95% CI
# pdf(paste(folder1, "velocityTraces.pdf", sep = ""))
p <- ggplot(velTrace[[1]], aes(x = timePoints, y = velMean)) +
geom_line(aes(colour = "50%"), size = 1, linetype = "solid") + geom_ribbon(aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "50%"), alpha = velAlpha) +
geom_line(data = velTrace[[2]], aes(x = timePoints, y = velMean, colour = "70%"), size = 1, linetype = "solid") + geom_ribbon(data = velTrace[[2]], aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "70%"), alpha = velAlpha) +
geom_line(data = velTrace[[3]], aes(x = timePoints, y = velMean, colour = "90%"), size = 1, linetype = "solid") + geom_ribbon(data = velTrace[[3]], aes(x = timePoints, ymin=velLower, ymax=velUpper, fill = "90%"), alpha = velAlpha) +
# geom_segment(aes_all(c('x', 'y', 'xend', 'yend')), data = data.frame(x = c(-500, -550), xend = c(700, -550), y = c(ylimLow, ytickLow), yend = c(ylimLow, ytickHigh)), size = axisLineWidth) +
scale_y_continuous(name = "P(perceiving right)-P(motion right) (°/s)", limits = c(-.1, 2), breaks=seq(0, 2, 0.5), expand = c(0, 0)) +
scale_x_continuous(name = "Perceptual trial number", limits = c(-0, 130), breaks=seq(0, 130, 40), expand = c(0, 0)) +
# coord_cartesian(ylim=c(-4, 4)) +
scale_colour_manual("Probability of rightward motion",
breaks = c("50%", "70%", "90%"),
values = c("50%" = colourCode[1], "70%" = colourCode[2], "90%" = colourCode[3])) +
scale_fill_manual("Probability of rightward motion",
breaks = c("50%", "70%", "90%"),
values = c("50%" = colourCode[1], "70%" = colourCode[2], "90%" = colourCode[3])) +
theme(axis.text=element_text(colour="black"),
axis.ticks=element_line(colour="black", size = axisLineWidth),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
text = element_text(size = textSize),
legend.background = element_rect(fill="transparent"),
legend.key = element_rect(colour = "transparent", fill = "white"))
print(p)
# dev.off()
ggsave(paste(plotFolder, pdfFileName, sep = ""), width = vtPlotWidth)
|
## ----message=FALSE, warning=FALSE, include=FALSE------------------------------
library(knitr)
options(knitr.kable.NA = '')
knitr::opts_chunk$set(comment=">")
options(digits=2)
if (!requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE)) {
knitr::opts_chunk$set(eval = FALSE)
}
set.seed(333)
## ----warning=FALSE, message=FALSE---------------------------------------------
library(bayestestR)
library(dplyr)
library(ggplot2)
# Generate a normal distribution
posterior <- distribution_normal(1000)
# Compute HDI and ETI
ci_hdi <- ci(posterior, method = "HDI")
ci_eti <- ci(posterior, method = "ETI")
# Plot the distribution and add the limits of the two CIs
posterior %>%
estimate_density(extend=TRUE) %>%
ggplot(aes(x=x, y=y)) +
geom_area(fill="orange") +
theme_classic() +
# HDI in blue
geom_vline(xintercept=ci_hdi$CI_low, color="royalblue", size=3) +
geom_vline(xintercept=ci_hdi$CI_high, color="royalblue", size=3) +
# Quantile in red
geom_vline(xintercept=ci_eti$CI_low, color="red", size=1) +
geom_vline(xintercept=ci_eti$CI_high, color="red", size=1)
## ----warning=FALSE, message=FALSE---------------------------------------------
library(bayestestR)
library(dplyr)
library(ggplot2)
# Generate a beta distribution
posterior <- distribution_beta(1000, 6, 2)
# Compute HDI and Quantile CI
ci_hdi <- ci(posterior, method = "HDI")
ci_eti <- ci(posterior, method = "ETI")
# Plot the distribution and add the limits of the two CIs
posterior %>%
estimate_density(extend=TRUE) %>%
ggplot(aes(x=x, y=y)) +
geom_area(fill="orange") +
theme_classic() +
# HDI in blue
geom_vline(xintercept=ci_hdi$CI_low, color="royalblue", size=3) +
geom_vline(xintercept=ci_hdi$CI_high, color="royalblue", size=3) +
# ETI in red
geom_vline(xintercept=ci_eti$CI_low, color="red", size=1) +
geom_vline(xintercept=ci_eti$CI_high, color="red", size=1)
## ----warning=FALSE, message=FALSE---------------------------------------------
prior <- distribution_normal(1000, mean = 0, sd = 1)
posterior <- distribution_normal(1000, mean = .5, sd = .3)
si_1 <- si(posterior, prior, BF = 1)
si_3 <- si(posterior, prior, BF = 3)
ggplot(mapping = aes(x=x, y=y)) +
theme_classic() +
# The posterior
geom_area(fill = "orange",
data = estimate_density(posterior, extend = TRUE)) +
# The prior
geom_area(color = "black", fill = NA, size = 1, linetype = "dashed",
data = estimate_density(prior, extend = TRUE)) +
# BF = 1 SI in blue
geom_vline(xintercept=si_1$CI_low, color="royalblue", size=1) +
geom_vline(xintercept=si_1$CI_high, color="royalblue", size=1) +
# BF = 3 SI in red
geom_vline(xintercept=si_3$CI_low, color="red", size=1) +
geom_vline(xintercept=si_3$CI_high, color="red", size=1)
| /build/R4.0.2-win64/bayestestR/doc/credible_interval.R | no_license | hyunsooseol/SimplyAgree | R | false | false | 2,799 | r | ## ----message=FALSE, warning=FALSE, include=FALSE------------------------------
library(knitr)
options(knitr.kable.NA = '')
knitr::opts_chunk$set(comment=">")
options(digits=2)
if (!requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE)) {
knitr::opts_chunk$set(eval = FALSE)
}
set.seed(333)
## ----warning=FALSE, message=FALSE---------------------------------------------
library(bayestestR)
library(dplyr)
library(ggplot2)
# Generate a normal distribution
posterior <- distribution_normal(1000)
# Compute HDI and ETI
ci_hdi <- ci(posterior, method = "HDI")
ci_eti <- ci(posterior, method = "ETI")
# Plot the distribution and add the limits of the two CIs
posterior %>%
estimate_density(extend=TRUE) %>%
ggplot(aes(x=x, y=y)) +
geom_area(fill="orange") +
theme_classic() +
# HDI in blue
geom_vline(xintercept=ci_hdi$CI_low, color="royalblue", size=3) +
geom_vline(xintercept=ci_hdi$CI_high, color="royalblue", size=3) +
# Quantile in red
geom_vline(xintercept=ci_eti$CI_low, color="red", size=1) +
geom_vline(xintercept=ci_eti$CI_high, color="red", size=1)
## ----warning=FALSE, message=FALSE---------------------------------------------
library(bayestestR)
library(dplyr)
library(ggplot2)
# Generate a beta distribution
posterior <- distribution_beta(1000, 6, 2)
# Compute HDI and Quantile CI
ci_hdi <- ci(posterior, method = "HDI")
ci_eti <- ci(posterior, method = "ETI")
# Plot the distribution and add the limits of the two CIs
posterior %>%
estimate_density(extend=TRUE) %>%
ggplot(aes(x=x, y=y)) +
geom_area(fill="orange") +
theme_classic() +
# HDI in blue
geom_vline(xintercept=ci_hdi$CI_low, color="royalblue", size=3) +
geom_vline(xintercept=ci_hdi$CI_high, color="royalblue", size=3) +
# ETI in red
geom_vline(xintercept=ci_eti$CI_low, color="red", size=1) +
geom_vline(xintercept=ci_eti$CI_high, color="red", size=1)
## ----warning=FALSE, message=FALSE---------------------------------------------
prior <- distribution_normal(1000, mean = 0, sd = 1)
posterior <- distribution_normal(1000, mean = .5, sd = .3)
si_1 <- si(posterior, prior, BF = 1)
si_3 <- si(posterior, prior, BF = 3)
ggplot(mapping = aes(x=x, y=y)) +
theme_classic() +
# The posterior
geom_area(fill = "orange",
data = estimate_density(posterior, extend = TRUE)) +
# The prior
geom_area(color = "black", fill = NA, size = 1, linetype = "dashed",
data = estimate_density(prior, extend = TRUE)) +
# BF = 1 SI in blue
geom_vline(xintercept=si_1$CI_low, color="royalblue", size=1) +
geom_vline(xintercept=si_1$CI_high, color="royalblue", size=1) +
# BF = 3 SI in red
geom_vline(xintercept=si_3$CI_low, color="red", size=1) +
geom_vline(xintercept=si_3$CI_high, color="red", size=1)
|
tabpanel.import = list(fluidRow(
box(width = 12, title = "Import",
htmlOutput("import.text"),
br(),
uiOutput("import.ui"),
hidden(
div(id = "loading.message", align = "center",
h4("Loading datasets from OpenML")
)
)
)),
fluidRow(
box(width = 12,
DT::dataTableOutput("import.preview")
)
),
fluidRow(uiOutput("tabpanel.browse.openml"))
)
| /ui/import.R | no_license | konerukeerthi/mlr_shiny | R | false | false | 404 | r | tabpanel.import = list(fluidRow(
box(width = 12, title = "Import",
htmlOutput("import.text"),
br(),
uiOutput("import.ui"),
hidden(
div(id = "loading.message", align = "center",
h4("Loading datasets from OpenML")
)
)
)),
fluidRow(
box(width = 12,
DT::dataTableOutput("import.preview")
)
),
fluidRow(uiOutput("tabpanel.browse.openml"))
)
|
#' @title Regression Discontinuity Design Permutation Test
#'
#' @description A permutation test for continuity of covariates in Sharp Regression Discontinuity Design as described in Canay and Kamat (2017).
#'
#' @param W Character. Vector of covariates names. The procedure will test the null hypothesis of continuity of the distribution of each element in W at the cutoff.
#' @param z Character. Running variable name. This is the scalar random variable that defines, along with the cutoff, the treatment assignment rule in the sharp regression discontinuity design.
#' @param data Data.frame.
#' @param n.perm Numeric. Number of permutations needed for the stochastic approximation of the p-values. See remark 3.2 in Canay and Kamat (2017). The default is B=499.
#' @param q_type A fixed and small (relative to the sample size) natural number that will define the \eqn{q}{q} closest values of the order statistic of \eqn{Z}{Z} to the right and to the left of the cutoff. The default, 'rot', value is given by the feasible rule of thumb in footnote 4 of Canay and Kamat (2017), section 3.1. If 'arot', it calls for the Rule of Thumb described in equation (15) of Canay and Kamat (2017), section 3.1. The default option grows at a slower rate than the optional rule of thumb, but adds a larger constant.
#' @param cutoff Numeric. The scalar defining the threshold of the running variable.
#' @param test.statistic Character. A rank test statistic satisfying rank invariance. The default is a Cramer-von Mises test statistic.
#' @return The functions \code{summary} and \code{plot} are used to obtain and print a summary and plot of
#' the estimated regression discontinuity. The object of class \code{RDperm} is a list
#' containing the following components:
#' \item{results}{Matrix. Test Statistic, P-values and Q}
#' \item{test.statistic}{Test Statistic}
#' \item{q_type}{Type of Q used in the calculations, can be either, "Defined by User", the "Rule of Thumb" or the "Alternative Rule of Thumb".}
#' \item{n_perm}{number of permutations}
#' \item{rv}{Character. Running variable name}
#' \item{Z}{Vector. Running Variable}
#' \item{cutoff}{cutoff}
#' \item{data}{data set}
#' \item{S}{Matrix. Pooled sample of induced order statistics}
#' \item{S_perm}{List. Permutations of the induced order statistic.}
#' @author Maurcio Olivares Gonzalez
#' @author Ignacio Sarmiento Barbieri
#' @references
#' Canay, I and Kamat V, (2017) Approximate Permutation Tests and Induced Order Statistics in the Regression Discontinuity Design. \url{http://faculty.wcas.northwestern.edu/~iac879/wp/RDDPermutations.pdf}
#' @keywords permutation test rdperm
#' @include H.cdf.R
#' @include CvM.stat.R
#' @import quantreg
#' @importFrom stats cor var runif
#' @examples
#' permtest<-RDperm(W=c("demshareprev"),z="difdemshare",data=lee2008)
#' summary(permtest)
#'\dontrun{
#' permtest<-RDperm(W=c("demshareprev","demwinprev"),z="difdemshare",data=lee2008)
#' summary(permtest)
#' }
#' @export
RDperm<-function(W,z,data,n.perm=499,q_type=10,cutoff=0,test.statistic="CvM"){
W_z<-base::subset(data, select=c(W,z))
colnames(W_z)[colnames(W_z)==z]<-"z"
N<-dim(data)[1]
W_left <- W_z[W_z$z<cutoff,]
n_left <- length(W_left$z)
W_right <- W_z[W_z$z>=cutoff,]
n_right <- length(W_right$z)
if(N!=n_left+n_right) stop( paste( "Something is wrong with the number of observations", sep="" ) )
# Induced order of W obs
W_left <- W_left[order(W_left$z),]
W_right <- W_right[order(W_right$z),]
if(!(q_type%in%c("rot","arot")) & length(W)>1 ) {
results<-matrix(NA, nrow=length(W)+1, ncol=3)
} else results<-matrix(NA, nrow=length(W), ncol=3)
# Selecting Q,
if(q_type%in%c("rot","arot")){
w<-as.list(W)
if(q_type=="rot") rot<-lapply(w,qrot,W_z)
if(q_type=="arot") rot<-lapply(w,aqrot,W_z)
w<-mapply(c, w, rot, SIMPLIFY=FALSE)
test<-lapply(w,function(x) {
f<-RDperm.base(x[1],W_left, n_left, W_right, q=as.numeric(x[2]), z, n.perm, test.statistic)
ret<-list()
ret$test_statistic.obs<-f$test_statistic.obs
ret$pvalues<-f$pvalues
ret$q<-as.numeric(f$q)
return(ret)
})
test<-do.call(rbind,test)
test<-t(apply(test,1,function(x) do.call(rbind,x)))
results[,1]<-test[,1]
results[,2]<-test[,2]
results[,3]<-test[,3]
}
if(!(q_type%in%c("rot","arot")) & length(W)>1 ) results[,3]<-rep(q_type,length(W)+1)
if(!(q_type%in%c("rot","arot")) & length(W)==1 ) results[,3]<-rep(q_type,length(W))
q<-min(results[,3])
permtest<-RDperm.base(W,W_left, n_left, W_right, q=q,z, n.perm, test.statistic)
if(!(q_type%in%c("rot","arot")) ){
results[,1]<-permtest$test_statistic.obs
results[,2]<-permtest$pvalues
}
if((q_type%in%c("rot","arot")) & length(W)>1 ){
permtest<-RDperm.base(W,W_left, n_left, W_right, q=q,z, n.perm, test.statistic)
results_updated<-matrix(NA, nrow=length(W)+1, ncol=3)
results_updated[,1]<-c(results[,1],permtest$test_statistic.obs[length(W)+1])
results_updated[,2]<-c(results[,2],permtest$pvalues[length(W)+1])
results_updated[,3]<-c(results[,3],q)
results<-results_updated
}
for(i in 1:3) results[,i]<- as.numeric(results[,i])
colnames(results)<-c("T(Sn)","Pr(>|z|)", "q")
if(length(W)>1){rownames(results)<-c(W,"Joint.Test")
}else rownames(results)<-W
object_perm<-list()
object_perm$results<-results
object_perm$test.statistic<-test.statistic
object_perm$Z<- W_z[,"z"] #running variable
object_perm$rv<- z #name of running variable
object_perm$cutoff<- cutoff #cutoff
if(q_type=="rot"){
object_perm$q_type<-"Rule of Thumb"
}
else if(q_type=="arot"){
object_perm$q_type<-"Alternative Rule of Thumb"
}
else{object_perm$q_type<- "Defined by User"}
object_perm$n_perm<- n.perm #number of permutations
object_perm$data<-data
class(object_perm)<-"RDperm"
return(object_perm)
}
RDperm.base<-function(W,W_left, n_left, W_right, z, q, n.perm, test.statistic){
#Step 1 & 2. Compute the order statistics of Z and the associated values of W
# q closest W from the left/right of threshold
# Equation (10)
W_left_q<-base::subset(W_left[(n_left-q+1):n_left,], select=c(W))
Z_left<-base::subset(W_left[(n_left-q+1):n_left,], select=c(z))
W_right_q<-base::subset(W_right[1:q,], select=c(W))
Z_right <-base::subset(W_right[1:q,], select=c(z))
Sn<-rbind(W_left_q,W_right_q)
if(test.statistic=="CvM"){
#Step 3. Compute the test statistic
test_statistic.obs<-apply(Sn,2,CvM.stat)
if(length(W)>1){
n.test_statistic.obs<-names(test_statistic.obs)
K<-length(W)
c<-C.unitsphere(K)
cS<-as.matrix(Sn)%*%c
TSn.joint<-max(apply(cS,2,calc_stat.CvM))
test_statistic.obs<-c(test_statistic.obs,TSn.joint)
names(test_statistic.obs)<-c(n.test_statistic.obs,"joint")
}
#Step 4. Generate random permutations
sample.indexes = lapply(1:n.perm, function(x) sample(1:(2*q)))
S_perm_list<-lapply(sample.indexes,function(x,db) {db[x,]},Sn)
calc_stat_res<-lapply(S_perm_list,calc_stat.CvM)
#Step 6. Compute the p-value of the test
test_statistic<-"CvM"
ind.rule<-lapply(calc_stat_res,function(x,CvM) {ifelse(x>=CvM,1,0)},test_statistic.obs)
ind.rule<-do.call(cbind,ind.rule)
ind.rule<-rowMeans(ind.rule)
} else{"Need to generate Kologorov Statistic"}
object_perm<-list()
object_perm$test_statistic.obs<-test_statistic.obs
object_perm$pvalues<-ind.rule
object_perm$q<- q #q
#object_perm$S<- S_perm
object_perm$S<- Sn
object_perm$S_perm<- S_perm_list
class(object_perm)<-"RDperm"
return(object_perm)
}
calc_stat.CvM<-function(x){
if(is.vector(x)==T){
stat<-CvM.stat(x)
}
else {
stat<-apply(x,2,CvM.stat)
n.stat<-names(stat)
K<-dim(x)[2]
c<-C.unitsphere(K)
cS<-as.matrix(x)%*%c
TSn.joint<-max(apply(cS,2,calc_stat.CvM))
stat<-c(stat,TSn.joint)
names(stat)<-c(n.stat,"joint")
}
return(stat)
}
aqrot<-function(w,W_z){
w<-W_z[,w]
z<-W_z[,"z"]
N<-length(w)
t <- seq(from=min(z),to=max(z),length.out = 2*N)
f <- quantreg::akj(z,t)$dens
t0 <- which(abs(t-0)==min(abs(t-0)))
f0 <- f[t0]
q<-ceiling(f0*var(z)*sqrt((1-cor(z,w)^2))*(N^{0.9}/log(N)))
if(q<10){
q<-10
}else if(q>N^(0.9)/log(N)){
q<-ceiling(N^(0.9)/log(N))
} else {
q<-q
}
}
qrot<-function(w,W_z){
w<-W_z[,w]
z<-W_z[,"z"]
N<-length(w)
t <- seq(from=min(z),to=max(z),length.out = 2*N)
f <- quantreg::akj(z,t)$dens
t0 <- which(abs(t-0)==min(abs(t-0)))
f0 <- f[t0]
q<-ceiling(f0*var(z)*sqrt(10*(1-cor(z,w)^2))*(N^{0.75}/log(N)))
if(q<10){
q<-10
}else if(q>N^(0.9)/log(N)){
q<-ceiling(N^(0.9)/log(N))
} else {
q<-q
}
}
C.unitsphere <- function(K){
# Store Matrix
C <- matrix(NA,nrow = K,ncol=100-K)
# Fill the matrix with random numbers in [-1,1]
C <- apply(C,2,function(x) stats::runif(x,-1,1))
# Normalize it so each column c of C is such that ||c||=1
C <- apply(C,2,function(x) x/sum(x))
# Return C and the K canonical elemnts (vectors with zeros in
# all coordinates except for one)
return(cbind(C,diag(K)))
}
| /R/RDperm.R | no_license | apoorvalal/RATest | R | false | false | 9,128 | r | #' @title Regression Discontinuity Design Permutation Test
#'
#' @description A permutation test for continuity of covariates in Sharp Regression Discontinuity Design as described in Canay and Kamat (2017).
#'
#' @param W Character. Vector of covariates names. The procedure will test the null hypothesis of continuity of the distribution of each element in W at the cutoff.
#' @param z Character. Running variable name. This is the scalar random variable that defines, along with the cutoff, the treatment assignment rule in the sharp regression discontinuity design.
#' @param data Data.frame.
#' @param n.perm Numeric. Number of permutations needed for the stochastic approximation of the p-values. See remark 3.2 in Canay and Kamat (2017). The default is B=499.
#' @param q_type A fixed and small (relative to the sample size) natural number that will define the \eqn{q}{q} closest values of the order statistic of \eqn{Z}{Z} to the right and to the left of the cutoff. The default, 'rot', value is given by the feasible rule of thumb in footnote 4 of Canay and Kamat (2017), section 3.1. If 'arot', it calls for the Rule of Thumb described in equation (15) of Canay and Kamat (2017), section 3.1. The default option grows at a slower rate than the optional rule of thumb, but adds a larger constant.
#' @param cutoff Numeric. The scalar defining the threshold of the running variable.
#' @param test.statistic Character. A rank test statistic satisfying rank invariance. The default is a Cramer-von Mises test statistic.
#' @return The functions \code{summary} and \code{plot} are used to obtain and print a summary and plot of
#' the estimated regression discontinuity. The object of class \code{RDperm} is a list
#' containing the following components:
#' \item{results}{Matrix. Test Statistic, P-values and Q}
#' \item{test.statistic}{Test Statistic}
#' \item{q_type}{Type of Q used in the calculations, can be either, "Defined by User", the "Rule of Thumb" or the "Alternative Rule of Thumb".}
#' \item{n_perm}{number of permutations}
#' \item{rv}{Character. Running variable name}
#' \item{Z}{Vector. Running Variable}
#' \item{cutoff}{cutoff}
#' \item{data}{data set}
#' \item{S}{Matrix. Pooled sample of induced order statistics}
#' \item{S_perm}{List. Permutations of the induced order statistic.}
#' @author Maurcio Olivares Gonzalez
#' @author Ignacio Sarmiento Barbieri
#' @references
#' Canay, I and Kamat V, (2017) Approximate Permutation Tests and Induced Order Statistics in the Regression Discontinuity Design. \url{http://faculty.wcas.northwestern.edu/~iac879/wp/RDDPermutations.pdf}
#' @keywords permutation test rdperm
#' @include H.cdf.R
#' @include CvM.stat.R
#' @import quantreg
#' @importFrom stats cor var runif
#' @examples
#' permtest<-RDperm(W=c("demshareprev"),z="difdemshare",data=lee2008)
#' summary(permtest)
#'\dontrun{
#' permtest<-RDperm(W=c("demshareprev","demwinprev"),z="difdemshare",data=lee2008)
#' summary(permtest)
#' }
#' @export
RDperm<-function(W,z,data,n.perm=499,q_type=10,cutoff=0,test.statistic="CvM"){
W_z<-base::subset(data, select=c(W,z))
colnames(W_z)[colnames(W_z)==z]<-"z"
N<-dim(data)[1]
W_left <- W_z[W_z$z<cutoff,]
n_left <- length(W_left$z)
W_right <- W_z[W_z$z>=cutoff,]
n_right <- length(W_right$z)
if(N!=n_left+n_right) stop( paste( "Something is wrong with the number of observations", sep="" ) )
# Induced order of W obs
W_left <- W_left[order(W_left$z),]
W_right <- W_right[order(W_right$z),]
if(!(q_type%in%c("rot","arot")) & length(W)>1 ) {
results<-matrix(NA, nrow=length(W)+1, ncol=3)
} else results<-matrix(NA, nrow=length(W), ncol=3)
# Selecting Q,
if(q_type%in%c("rot","arot")){
w<-as.list(W)
if(q_type=="rot") rot<-lapply(w,qrot,W_z)
if(q_type=="arot") rot<-lapply(w,aqrot,W_z)
w<-mapply(c, w, rot, SIMPLIFY=FALSE)
test<-lapply(w,function(x) {
f<-RDperm.base(x[1],W_left, n_left, W_right, q=as.numeric(x[2]), z, n.perm, test.statistic)
ret<-list()
ret$test_statistic.obs<-f$test_statistic.obs
ret$pvalues<-f$pvalues
ret$q<-as.numeric(f$q)
return(ret)
})
test<-do.call(rbind,test)
test<-t(apply(test,1,function(x) do.call(rbind,x)))
results[,1]<-test[,1]
results[,2]<-test[,2]
results[,3]<-test[,3]
}
if(!(q_type%in%c("rot","arot")) & length(W)>1 ) results[,3]<-rep(q_type,length(W)+1)
if(!(q_type%in%c("rot","arot")) & length(W)==1 ) results[,3]<-rep(q_type,length(W))
q<-min(results[,3])
permtest<-RDperm.base(W,W_left, n_left, W_right, q=q,z, n.perm, test.statistic)
if(!(q_type%in%c("rot","arot")) ){
results[,1]<-permtest$test_statistic.obs
results[,2]<-permtest$pvalues
}
if((q_type%in%c("rot","arot")) & length(W)>1 ){
permtest<-RDperm.base(W,W_left, n_left, W_right, q=q,z, n.perm, test.statistic)
results_updated<-matrix(NA, nrow=length(W)+1, ncol=3)
results_updated[,1]<-c(results[,1],permtest$test_statistic.obs[length(W)+1])
results_updated[,2]<-c(results[,2],permtest$pvalues[length(W)+1])
results_updated[,3]<-c(results[,3],q)
results<-results_updated
}
for(i in 1:3) results[,i]<- as.numeric(results[,i])
colnames(results)<-c("T(Sn)","Pr(>|z|)", "q")
if(length(W)>1){rownames(results)<-c(W,"Joint.Test")
}else rownames(results)<-W
object_perm<-list()
object_perm$results<-results
object_perm$test.statistic<-test.statistic
object_perm$Z<- W_z[,"z"] #running variable
object_perm$rv<- z #name of running variable
object_perm$cutoff<- cutoff #cutoff
if(q_type=="rot"){
object_perm$q_type<-"Rule of Thumb"
}
else if(q_type=="arot"){
object_perm$q_type<-"Alternative Rule of Thumb"
}
else{object_perm$q_type<- "Defined by User"}
object_perm$n_perm<- n.perm #number of permutations
object_perm$data<-data
class(object_perm)<-"RDperm"
return(object_perm)
}
RDperm.base<-function(W,W_left, n_left, W_right, z, q, n.perm, test.statistic){
#Step 1 & 2. Compute the order statistics of Z and the associated values of W
# q closest W from the left/right of threshold
# Equation (10)
W_left_q<-base::subset(W_left[(n_left-q+1):n_left,], select=c(W))
Z_left<-base::subset(W_left[(n_left-q+1):n_left,], select=c(z))
W_right_q<-base::subset(W_right[1:q,], select=c(W))
Z_right <-base::subset(W_right[1:q,], select=c(z))
Sn<-rbind(W_left_q,W_right_q)
if(test.statistic=="CvM"){
#Step 3. Compute the test statistic
test_statistic.obs<-apply(Sn,2,CvM.stat)
if(length(W)>1){
n.test_statistic.obs<-names(test_statistic.obs)
K<-length(W)
c<-C.unitsphere(K)
cS<-as.matrix(Sn)%*%c
TSn.joint<-max(apply(cS,2,calc_stat.CvM))
test_statistic.obs<-c(test_statistic.obs,TSn.joint)
names(test_statistic.obs)<-c(n.test_statistic.obs,"joint")
}
#Step 4. Generate random permutations
sample.indexes = lapply(1:n.perm, function(x) sample(1:(2*q)))
S_perm_list<-lapply(sample.indexes,function(x,db) {db[x,]},Sn)
calc_stat_res<-lapply(S_perm_list,calc_stat.CvM)
#Step 6. Compute the p-value of the test
test_statistic<-"CvM"
ind.rule<-lapply(calc_stat_res,function(x,CvM) {ifelse(x>=CvM,1,0)},test_statistic.obs)
ind.rule<-do.call(cbind,ind.rule)
ind.rule<-rowMeans(ind.rule)
} else{"Need to generate Kologorov Statistic"}
object_perm<-list()
object_perm$test_statistic.obs<-test_statistic.obs
object_perm$pvalues<-ind.rule
object_perm$q<- q #q
#object_perm$S<- S_perm
object_perm$S<- Sn
object_perm$S_perm<- S_perm_list
class(object_perm)<-"RDperm"
return(object_perm)
}
calc_stat.CvM<-function(x){
if(is.vector(x)==T){
stat<-CvM.stat(x)
}
else {
stat<-apply(x,2,CvM.stat)
n.stat<-names(stat)
K<-dim(x)[2]
c<-C.unitsphere(K)
cS<-as.matrix(x)%*%c
TSn.joint<-max(apply(cS,2,calc_stat.CvM))
stat<-c(stat,TSn.joint)
names(stat)<-c(n.stat,"joint")
}
return(stat)
}
aqrot<-function(w,W_z){
w<-W_z[,w]
z<-W_z[,"z"]
N<-length(w)
t <- seq(from=min(z),to=max(z),length.out = 2*N)
f <- quantreg::akj(z,t)$dens
t0 <- which(abs(t-0)==min(abs(t-0)))
f0 <- f[t0]
q<-ceiling(f0*var(z)*sqrt((1-cor(z,w)^2))*(N^{0.9}/log(N)))
if(q<10){
q<-10
}else if(q>N^(0.9)/log(N)){
q<-ceiling(N^(0.9)/log(N))
} else {
q<-q
}
}
qrot<-function(w,W_z){
w<-W_z[,w]
z<-W_z[,"z"]
N<-length(w)
t <- seq(from=min(z),to=max(z),length.out = 2*N)
f <- quantreg::akj(z,t)$dens
t0 <- which(abs(t-0)==min(abs(t-0)))
f0 <- f[t0]
q<-ceiling(f0*var(z)*sqrt(10*(1-cor(z,w)^2))*(N^{0.75}/log(N)))
if(q<10){
q<-10
}else if(q>N^(0.9)/log(N)){
q<-ceiling(N^(0.9)/log(N))
} else {
q<-q
}
}
C.unitsphere <- function(K){
# Store Matrix
C <- matrix(NA,nrow = K,ncol=100-K)
# Fill the matrix with random numbers in [-1,1]
C <- apply(C,2,function(x) stats::runif(x,-1,1))
# Normalize it so each column c of C is such that ||c||=1
C <- apply(C,2,function(x) x/sum(x))
# Return C and the K canonical elemnts (vectors with zeros in
# all coordinates except for one)
return(cbind(C,diag(K)))
}
|
#[export]
design_matrix <- function(x,ones=TRUE) {
if(is.null(dim(x))){
return(.Call(Rfast_design_matrix,x,ones))
}
.Call(Rfast_design_matrix_big,x)
} | /fuzzedpackages/Rfast/R/design_matrix.R | no_license | akhikolla/testpackages | R | false | false | 161 | r | #[export]
design_matrix <- function(x,ones=TRUE) {
if(is.null(dim(x))){
return(.Call(Rfast_design_matrix,x,ones))
}
.Call(Rfast_design_matrix_big,x)
} |
file <- read.csv("household_power_consumption.txt",header = TRUE, sep = ";")
x <- file$Date == "1/2/2007"
y <- file$Date == "2/2/2007"
file1 <- file[x,]
file2 <- file[x,]
file <- rbind(file1,file2)
par(mfrow = c(1,1))
par(mar = c(4,4,1,1))
task1 <- file$Global_active_power
task1 <- as.numeric(task1)
hist(task1, col = "red", main = "Global Active Power", xlab = "Global Active Power(kilowatts)")
dev.copy(png, file = "Plot1.png")
dev.off() | /plot1.R | no_license | HamzaJamil100/Assignment | R | false | false | 459 | r | file <- read.csv("household_power_consumption.txt",header = TRUE, sep = ";")
x <- file$Date == "1/2/2007"
y <- file$Date == "2/2/2007"
file1 <- file[x,]
file2 <- file[x,]
file <- rbind(file1,file2)
par(mfrow = c(1,1))
par(mar = c(4,4,1,1))
task1 <- file$Global_active_power
task1 <- as.numeric(task1)
hist(task1, col = "red", main = "Global Active Power", xlab = "Global Active Power(kilowatts)")
dev.copy(png, file = "Plot1.png")
dev.off() |
\name{lightspeed}
\alias{lightspeed}
\docType{data}
\title{ Simon Newcomb's measurements of the speed of light }
\description{
Simon Newcom's measured in the late 1900's the time it took light to cover a
certain distance. The data are reported in Stigler(1977) and have
been widely used since to illustrate statistical inference.
}
\usage{data(lightspeed)}
\format{
A vector with 66 observations of the travel time of light.
}
\source{
Stigler, S.M. (1977) Do robust estimators work with real data? Annals
of Statistics 5, 1055--1098.
}
\references{
van Hulst, R. 2018. Evaluating Scientific Evidence. ms.
}
\examples{
data(lightspeed)
qqnorm(lightspeed)
qqline(lightspeed)
}
\keyword{datasets}
| /man/lightspeed.Rd | no_license | cran/evidence | R | false | false | 713 | rd | \name{lightspeed}
\alias{lightspeed}
\docType{data}
\title{ Simon Newcomb's measurements of the speed of light }
\description{
Simon Newcom's measured in the late 1900's the time it took light to cover a
certain distance. The data are reported in Stigler(1977) and have
been widely used since to illustrate statistical inference.
}
\usage{data(lightspeed)}
\format{
A vector with 66 observations of the travel time of light.
}
\source{
Stigler, S.M. (1977) Do robust estimators work with real data? Annals
of Statistics 5, 1055--1098.
}
\references{
van Hulst, R. 2018. Evaluating Scientific Evidence. ms.
}
\examples{
data(lightspeed)
qqnorm(lightspeed)
qqline(lightspeed)
}
\keyword{datasets}
|
selftest.typeIISS.tck1 <-function(){
options(guiToolkit="tcltk")
w <- gwindow(title = "Type II and III SS")
size(w) <- c(700, 950)
g <- ggroup(container=w, horizontal=FALSE, use.scrollwindow = TRUE)
#------------- Question 1 -----------#
gp1 <- gframe(container = g, spacing = 2, pos = 0, horizontal = FALSE)
gp1.1 <- ggroup(container = gp1, spacing = 2, pos = 0, horizontal = TRUE)
q <- glabel("1) ", container = gp1.1, horizontal = TRUE)
font(q) <- list(weight = "bold")
qq <- glabel("We would use type II or III sums of squares whenever...", container = gp1.1, anchor = c(-1,1))
font(qq) <- list(family = "cambria", size = 11)
ans1 <- c("(a) We have an unbalanced one way ANOVA format.",
"(b) We have a quantitative predictor.",
"(c) We have a balanced multiway ANOVA format.",
"(d) We have either an unbalanced multiway ANOVA format, or some other multiple X format with at least one \n quantitative X variable.")
f1 <- function(h,....){
if(tail(svalue(r1),1) == ans1[1]){
gmessage(msg="Incorrect", icon = "error")
}
if(tail(svalue(r1),1)== ans1[2]){
gmessage(msg="Incorrect", icon = "error")
}
if(tail(svalue(r1),1)== ans1[3]){
gmessage(msg="Incorrect", icon = "error")
}
if(tail(svalue(r1),1)== ans1[4]){
gmessage(msg="Correct")
}
svalue(r1) <- character(0)
}
r1 <- gcheckboxgroup(ans1, container = gp1, checked = FALSE, where = "beginning", handler = f1)
} | /R/selftest.typeIISS.R | no_license | cran/asbio | R | false | false | 1,564 | r | selftest.typeIISS.tck1 <-function(){
options(guiToolkit="tcltk")
w <- gwindow(title = "Type II and III SS")
size(w) <- c(700, 950)
g <- ggroup(container=w, horizontal=FALSE, use.scrollwindow = TRUE)
#------------- Question 1 -----------#
gp1 <- gframe(container = g, spacing = 2, pos = 0, horizontal = FALSE)
gp1.1 <- ggroup(container = gp1, spacing = 2, pos = 0, horizontal = TRUE)
q <- glabel("1) ", container = gp1.1, horizontal = TRUE)
font(q) <- list(weight = "bold")
qq <- glabel("We would use type II or III sums of squares whenever...", container = gp1.1, anchor = c(-1,1))
font(qq) <- list(family = "cambria", size = 11)
ans1 <- c("(a) We have an unbalanced one way ANOVA format.",
"(b) We have a quantitative predictor.",
"(c) We have a balanced multiway ANOVA format.",
"(d) We have either an unbalanced multiway ANOVA format, or some other multiple X format with at least one \n quantitative X variable.")
f1 <- function(h,....){
if(tail(svalue(r1),1) == ans1[1]){
gmessage(msg="Incorrect", icon = "error")
}
if(tail(svalue(r1),1)== ans1[2]){
gmessage(msg="Incorrect", icon = "error")
}
if(tail(svalue(r1),1)== ans1[3]){
gmessage(msg="Incorrect", icon = "error")
}
if(tail(svalue(r1),1)== ans1[4]){
gmessage(msg="Correct")
}
svalue(r1) <- character(0)
}
r1 <- gcheckboxgroup(ans1, container = gp1, checked = FALSE, where = "beginning", handler = f1)
} |
testlist <- list(b = c(-1667466323L, -1667457892L, 67964173L, 67964173L, -1835887972L))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result) | /mcga/inst/testfiles/ByteVectorToDoubles/AFL_ByteVectorToDoubles/ByteVectorToDoubles_valgrind_files/1613106910-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 155 | r | testlist <- list(b = c(-1667466323L, -1667457892L, 67964173L, 67964173L, -1835887972L))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dtf_clean.R
\name{dtf_clean}
\alias{dtf_clean}
\title{Data cleanup}
\usage{
dtf_clean(x, header = TRUE, na.strings = c("NA", "N/A"),
stringsAsFactors = FALSE, ...)
}
\arguments{
\item{x}{a messy table the form of a character string}
\item{header}{does the table include headers? (default TRUE)}
\item{na.strings}{a vector of character strings which will be interpreted
as missing values}
\item{stringsAsFactors}{should strings be read as factors? (default FALSE)}
\item{...}{further arguments passed to \code{read.table}}
}
\description{
Create a data.frame from a messy table
}
\examples{
\dontrun{
x1 <- "
+------------+------+------+----------+--------------------------+
| Date | Emp1 | Case | Priority | PriorityCountinLast7days |
+------------+------+------+----------+--------------------------+
| 2018-06-01 | A | A1 | 0 | 0 |
| 2018-06-03 | A | A2 | 0 | 1 |
| 2018-06-02 | B | B2 | 0 | 2 |
| 2018-06-03 | B | B3 | 0 | 3 |
+------------+------+------+----------+--------------------------+
"
x2 <- '
------------------------------------------------------------------
| Date | Emp1 | Case | Priority | PriorityCountinLast7days |
------------------------------------------------------------------
| 2018-06-01 | A | "A 1" | 0 | 0 |
| 2018-06-03 | A | "A 2" | 0 | 1 |
| 2018-06-02 | B | "B 2" | 0 | 2 |
| 2018-06-03 | B | "B 3" | 0 | 3 |
------------------------------------------------------------------
'
x3 <- "
––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
Date | Emp1 | Case | Priority | PriorityCountinLast7days
––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
2018-06-01 | A | A|1 | 0 | 0
2018-06-03 | A | A|2 | 0 | 1
2018-06-02 | B | B|2 | 0 | 2
2018-06-03 | B | B|3 | 0 | 3
––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
"
x4 <- "
Maths | English | Science | History | Class
0.1 | 0.2 | 0.3 | 0.2 | Y2
0.9 | 0.5 | 0.7 | 0.4 | Y1
0.2 | 0.4 | 0.6 | 0.2 | Y2
0.9 | 0.5 | 0.2 | 0.7 | Y1
"
x5 <- "
Season | Team | W | AHWO
-------------------------------------
1 | 2017/2018 | TeamA | 2 | 1.75
2 | 2017/2018 | TeamB | 1 | 1.85
3 | 2017/2018 | TeamC | 1 | 1.70
4 | 2017/2018 | TeamD | 0 | 3.10
5 | 2016/2017 | TeamA | 1 | 1.49
6 | 2016/2017 | TeamB | 3 | 1.51
7 | 2016/2017 | TeamC | 2 | 1.90
8 | 2016/2017 | TeamD | 0 | N/A
"
lapply(c(x1, x2, x3, x4), dtf_clean)
}
}
| /man/dtf_clean.Rd | no_license | AkselA/R-ymse | R | false | true | 3,353 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dtf_clean.R
\name{dtf_clean}
\alias{dtf_clean}
\title{Data cleanup}
\usage{
dtf_clean(x, header = TRUE, na.strings = c("NA", "N/A"),
stringsAsFactors = FALSE, ...)
}
\arguments{
\item{x}{a messy table the form of a character string}
\item{header}{does the table include headers? (default TRUE)}
\item{na.strings}{a vector of character strings which will be interpreted
as missing values}
\item{stringsAsFactors}{should strings be read as factors? (default FALSE)}
\item{...}{further arguments passed to \code{read.table}}
}
\description{
Create a data.frame from a messy table
}
\examples{
\dontrun{
x1 <- "
+------------+------+------+----------+--------------------------+
| Date | Emp1 | Case | Priority | PriorityCountinLast7days |
+------------+------+------+----------+--------------------------+
| 2018-06-01 | A | A1 | 0 | 0 |
| 2018-06-03 | A | A2 | 0 | 1 |
| 2018-06-02 | B | B2 | 0 | 2 |
| 2018-06-03 | B | B3 | 0 | 3 |
+------------+------+------+----------+--------------------------+
"
x2 <- '
------------------------------------------------------------------
| Date | Emp1 | Case | Priority | PriorityCountinLast7days |
------------------------------------------------------------------
| 2018-06-01 | A | "A 1" | 0 | 0 |
| 2018-06-03 | A | "A 2" | 0 | 1 |
| 2018-06-02 | B | "B 2" | 0 | 2 |
| 2018-06-03 | B | "B 3" | 0 | 3 |
------------------------------------------------------------------
'
x3 <- "
––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
Date | Emp1 | Case | Priority | PriorityCountinLast7days
––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
2018-06-01 | A | A|1 | 0 | 0
2018-06-03 | A | A|2 | 0 | 1
2018-06-02 | B | B|2 | 0 | 2
2018-06-03 | B | B|3 | 0 | 3
––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––
"
x4 <- "
Maths | English | Science | History | Class
0.1 | 0.2 | 0.3 | 0.2 | Y2
0.9 | 0.5 | 0.7 | 0.4 | Y1
0.2 | 0.4 | 0.6 | 0.2 | Y2
0.9 | 0.5 | 0.2 | 0.7 | Y1
"
x5 <- "
Season | Team | W | AHWO
-------------------------------------
1 | 2017/2018 | TeamA | 2 | 1.75
2 | 2017/2018 | TeamB | 1 | 1.85
3 | 2017/2018 | TeamC | 1 | 1.70
4 | 2017/2018 | TeamD | 0 | 3.10
5 | 2016/2017 | TeamA | 1 | 1.49
6 | 2016/2017 | TeamB | 3 | 1.51
7 | 2016/2017 | TeamC | 2 | 1.90
8 | 2016/2017 | TeamD | 0 | N/A
"
lapply(c(x1, x2, x3, x4), dtf_clean)
}
}
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -9.65563814576451e-63, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615832197-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 2,048 | r | testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 1.53632495265886e-311, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -9.65563814576451e-63, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/portal_data.R
\docType{data}
\name{portal_data}
\alias{portal_data}
\title{Portal Project rodent capture survey data}
\format{
A dataframe containing the following fields:
\describe{
\item{moon}{time of sampling in lunar cycles}
\item{DM}{Total captures of species DM}
\item{DO}{Total captures of species DO}
\item{DM}{Total captures of species PP}
\item{DM}{Total captures of species OT}
\item{year}{Sampling year}
\item{month}{Sampling month}
\item{mintemp}{Monthly mean minimum temperature}
\item{precipitation}{Monthly mean precipitation}
\item{ndvi}{Monthly mean Normalised Difference Vegetation Index}
}
}
\source{
\url{https://www.weecology.org/data-projects/portal/}
}
\usage{
portal_data
}
\description{
A dataset containing timeseries of select rodent species captures in control plots from the Portal Project
}
\keyword{datasets}
| /man/portal_data.Rd | permissive | nicholasjclark/mvgam | R | false | true | 919 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/portal_data.R
\docType{data}
\name{portal_data}
\alias{portal_data}
\title{Portal Project rodent capture survey data}
\format{
A dataframe containing the following fields:
\describe{
\item{moon}{time of sampling in lunar cycles}
\item{DM}{Total captures of species DM}
\item{DO}{Total captures of species DO}
\item{DM}{Total captures of species PP}
\item{DM}{Total captures of species OT}
\item{year}{Sampling year}
\item{month}{Sampling month}
\item{mintemp}{Monthly mean minimum temperature}
\item{precipitation}{Monthly mean precipitation}
\item{ndvi}{Monthly mean Normalised Difference Vegetation Index}
}
}
\source{
\url{https://www.weecology.org/data-projects/portal/}
}
\usage{
portal_data
}
\description{
A dataset containing timeseries of select rodent species captures in control plots from the Portal Project
}
\keyword{datasets}
|
here::i_am("R/barchart.R")
data(mtcars)
colors <- c('red','blue','green')
tab <- table(mtcars$cyl, mtcars$gear)
x_names <- paste0("Type ", 1:3)
main <- "Distribution of Gears vs Cylinders"
xlab <- "Gear Types"
png(here::here("figs", "barchart.png"))
barplot(tab,
main = main,
names.arg = x_names,
xlab = xlab,
ylab = "Frequency",
col = colors)
legend(x = "topright",
legend = rownames(tab),
fill = colors)
dev.off() | /R/barchart.R | no_license | stando2/example_project | R | false | false | 489 | r | here::i_am("R/barchart.R")
data(mtcars)
colors <- c('red','blue','green')
tab <- table(mtcars$cyl, mtcars$gear)
x_names <- paste0("Type ", 1:3)
main <- "Distribution of Gears vs Cylinders"
xlab <- "Gear Types"
png(here::here("figs", "barchart.png"))
barplot(tab,
main = main,
names.arg = x_names,
xlab = xlab,
ylab = "Frequency",
col = colors)
legend(x = "topright",
legend = rownames(tab),
fill = colors)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/energybalance_functions.R
\name{heat_transfer_coefficient}
\alias{heat_transfer_coefficient}
\title{Calculate heat transfer coefficient (based on Mitchell 1976)
(Uses Table I: Convective Heat Transfer Relations for Animal Shapes)}
\usage{
heat_transfer_coefficient(V, D, K, nu, taxa = "cylinder")
}
\arguments{
\item{V}{Air velocity m/s.}
\item{D}{Characteristic dimension (e.g., diameter or snout-vent length) in meters.}
\item{K}{Thermal conductivity of air, W m^-1 K^-1, can calculate using DRYAIR or WETAIR in NicheMapR}
\item{nu}{Kinematic Viscocity of air, m^2 s^-1, can calculate using DRYAIR or WETAIR in NicheMapR}
\item{taxa}{Which class of organism, current choices: sphere,cylinder,frog,lizard_surface,lizard_elevated,flyinginsect,spider}
}
\value{
heat transfer coefficient, H_L (W m^-2 K^-1)
}
\description{
Calculate heat transfer coefficient (based on Mitchell 1976)
(Uses Table I: Convective Heat Transfer Relations for Animal Shapes)
}
\details{
This function allows you estimate the heat transfer coefficient for various taxa (based on Mitchell 1976). Based on empirical measurements.
}
\examples{
\dontrun{
heat_transfer_coefficient(V=0.5,D=0.05,K= 25.7 * 10^(-3),nu= 15.3 * 10^(-6), "cylinder")
}
}
\keyword{coefficient}
\keyword{heat}
\keyword{transfer}
| /man/heat_transfer_coefficient.Rd | permissive | Brybrio/TrenchR | R | false | true | 1,360 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/energybalance_functions.R
\name{heat_transfer_coefficient}
\alias{heat_transfer_coefficient}
\title{Calculate heat transfer coefficient (based on Mitchell 1976)
(Uses Table I: Convective Heat Transfer Relations for Animal Shapes)}
\usage{
heat_transfer_coefficient(V, D, K, nu, taxa = "cylinder")
}
\arguments{
\item{V}{Air velocity m/s.}
\item{D}{Characteristic dimension (e.g., diameter or snout-vent length) in meters.}
\item{K}{Thermal conductivity of air, W m^-1 K^-1, can calculate using DRYAIR or WETAIR in NicheMapR}
\item{nu}{Kinematic Viscocity of air, m^2 s^-1, can calculate using DRYAIR or WETAIR in NicheMapR}
\item{taxa}{Which class of organism, current choices: sphere,cylinder,frog,lizard_surface,lizard_elevated,flyinginsect,spider}
}
\value{
heat transfer coefficient, H_L (W m^-2 K^-1)
}
\description{
Calculate heat transfer coefficient (based on Mitchell 1976)
(Uses Table I: Convective Heat Transfer Relations for Animal Shapes)
}
\details{
This function allows you estimate the heat transfer coefficient for various taxa (based on Mitchell 1976). Based on empirical measurements.
}
\examples{
\dontrun{
heat_transfer_coefficient(V=0.5,D=0.05,K= 25.7 * 10^(-3),nu= 15.3 * 10^(-6), "cylinder")
}
}
\keyword{coefficient}
\keyword{heat}
\keyword{transfer}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flex_functions.R
\name{flex.multiTabPlot}
\alias{flex.multiTabPlot}
\title{Generate multi-tab ggplot handle list for flexdashboard}
\usage{
flex.multiTabPlot(plt.list, plt.list.name, fig.width = 5, fig.height = 5)
}
\arguments{
\item{plt.list}{list of ggplot handles}
\item{plt.list.name}{list name}
\item{fig.width}{Numeric. Figure width. Default is 5.}
\item{fig.height}{Numeric. Figure width. Default is 5.}
}
\value{
flexdashboard compatable list of plots
}
\description{
Prepares list of ggplot handles for multi-tab plot presentation in flexdashboards.
}
| /man/flex.multiTabPlot.Rd | permissive | NMikolajewicz/scMiko | R | false | true | 642 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flex_functions.R
\name{flex.multiTabPlot}
\alias{flex.multiTabPlot}
\title{Generate multi-tab ggplot handle list for flexdashboard}
\usage{
flex.multiTabPlot(plt.list, plt.list.name, fig.width = 5, fig.height = 5)
}
\arguments{
\item{plt.list}{list of ggplot handles}
\item{plt.list.name}{list name}
\item{fig.width}{Numeric. Figure width. Default is 5.}
\item{fig.height}{Numeric. Figure width. Default is 5.}
}
\value{
flexdashboard compatable list of plots
}
\description{
Prepares list of ggplot handles for multi-tab plot presentation in flexdashboards.
}
|
# Візуалізації 1-2
library (tidyverse)
merge_year<-data.frame()
pgo_year<- summarise(group_by(pgo_raw, Year),
QNT=sum(ACC),
Type="обліковано проваджень")
merge_year<-pgo_year
pgo_year<- summarise(group_by(pgo_raw, Year),
QNT=sum(INDICM,REL,MED,EDU),
Type="проваджень направлено до суду")
merge_year<-rbind(merge_year,pgo_year)
court_year<- summarise(group_by(court_raw, Year),
QNT=sum(CONVIC),
Type="засуджено осіб")
merge_year<-rbind(merge_year,court_year)
gr<-ggplot(merge_year,
aes(x=factor(Year), y=QNT,
fill=factor(Type, levels=c("засуджено осіб",
"проваджень направлено до суду",
"обліковано проваджень") )))
gr+geom_histogram(stat="identity")+scale_fill_brewer(palette = "Set1")+
labs(title = "Обліковані правопорушення, провадження направлені до суду,
кількість засуджених")+
theme (axis.title=element_blank(),
legend.title = element_blank(), legend.position="bottom")
mysave (vis)
vis<-vis+1
require(scales)
point <- format_format(big.mark = " ", decimal.mark = ",", scientific = FALSE)
mean_merge<-summarise(group_by(merge_year,Type),
mean_v=mean(QNT))
gr2<-ggplot (merge_year, aes(x=factor(Year), y=QNT,fill=Type))
gr2+geom_histogram(stat="identity",show.legend = FALSE)+
scale_fill_brewer(palette = "Set1")+
geom_hline(data = mean_merge, color="chartreuse", show.legend = FALSE,
aes(yintercept=mean_v,
size=1,
alpha=1/2))+
geom_text(data=mean_merge,aes(x=7,
y=mean_v, label=round (mean_v)))+
facet_grid(.~factor(Type,levels = c("обліковано проваджень",
"проваджень направлено до суду",
"засуджено осіб")))+
labs(title =
"Обліковані правопорушення, провадження направлені до суду,кількість засуджених")+
theme(axis.text=element_text(), axis.title=element_blank())+
scale_y_continuous(labels = point)
mysave (vis)
vis<-vis+1
| /plot1-2.R | no_license | Nickolay78/Combating-crime-in-Ukraine-2013-2020 | R | false | false | 2,575 | r | # Візуалізації 1-2
library (tidyverse)
merge_year<-data.frame()
pgo_year<- summarise(group_by(pgo_raw, Year),
QNT=sum(ACC),
Type="обліковано проваджень")
merge_year<-pgo_year
pgo_year<- summarise(group_by(pgo_raw, Year),
QNT=sum(INDICM,REL,MED,EDU),
Type="проваджень направлено до суду")
merge_year<-rbind(merge_year,pgo_year)
court_year<- summarise(group_by(court_raw, Year),
QNT=sum(CONVIC),
Type="засуджено осіб")
merge_year<-rbind(merge_year,court_year)
gr<-ggplot(merge_year,
aes(x=factor(Year), y=QNT,
fill=factor(Type, levels=c("засуджено осіб",
"проваджень направлено до суду",
"обліковано проваджень") )))
gr+geom_histogram(stat="identity")+scale_fill_brewer(palette = "Set1")+
labs(title = "Обліковані правопорушення, провадження направлені до суду,
кількість засуджених")+
theme (axis.title=element_blank(),
legend.title = element_blank(), legend.position="bottom")
mysave (vis)
vis<-vis+1
require(scales)
point <- format_format(big.mark = " ", decimal.mark = ",", scientific = FALSE)
mean_merge<-summarise(group_by(merge_year,Type),
mean_v=mean(QNT))
gr2<-ggplot (merge_year, aes(x=factor(Year), y=QNT,fill=Type))
gr2+geom_histogram(stat="identity",show.legend = FALSE)+
scale_fill_brewer(palette = "Set1")+
geom_hline(data = mean_merge, color="chartreuse", show.legend = FALSE,
aes(yintercept=mean_v,
size=1,
alpha=1/2))+
geom_text(data=mean_merge,aes(x=7,
y=mean_v, label=round (mean_v)))+
facet_grid(.~factor(Type,levels = c("обліковано проваджень",
"проваджень направлено до суду",
"засуджено осіб")))+
labs(title =
"Обліковані правопорушення, провадження направлені до суду,кількість засуджених")+
theme(axis.text=element_text(), axis.title=element_blank())+
scale_y_continuous(labels = point)
mysave (vis)
vis<-vis+1
|
# ---------------------------
library(ggplot2)
# ---------------------------
DATA_DIR <- c("../../data/test/human-machine-pairwise_agreement/with_unison/",
"../../data/test/human-machine-pairwise_agreement/without_unison/")
WITH_UNISON = c(TRUE, FALSE)
OUTPUT_DIR <- "./"
OUTPUT_FILEID <- "notelength"
G_POINTSIZE <- 0.5
TITLE_TEXTSIZE <- 18
YAXIS_TITLE_TEXTSIZE <- 14
STRIP_TEXTSIZE <- 14
AXIS_TEXTSIZE <- 12
LEGEND_TEXTSIZE <- 13
FIG_WID <- 8.09 * 2
FIG_HEI <- 5.00
# ---------------------------
SONG_STYLE <- read.csv("../../song-style.csv", header = TRUE, sep = ",")
# ---------------------------
notelen_m.frame <- data.frame(song = character(), style = character(), transcriber = character(), length = numeric(), unison = logical())
n <- 0
notelen_h.frame <- data.frame(song = character(), style = character(), transcriber = character(), length = numeric(), unison = logical())
m <- 0
# ---------------------------
for (i in 1:length(DATA_DIR)) {
SONG_DIR <- list.files(DATA_DIR[i], full.names = TRUE)
SONG_NAME <- unlist(lapply(SONG_DIR, function(x) strsplit(x, "/")[[1]][7]))
# ---------------------------
for (j in 1:length(SONG_DIR)) {
noteseq_m_file <- list.files(paste(SONG_DIR[j], "/machine/", sep = ""), full.names = TRUE)
MACHINE <- unlist(lapply(unlist(lapply(noteseq_m_file, function(x) strsplit(x, "/")[[1]][9])), function(x) strsplit(x, "_")[[1]][1]))
noteseq_h_file <- list.files(paste(SONG_DIR[j], "/human/", sep = ""), full.names = TRUE)
HUMAN <- unlist(lapply(unlist(lapply(noteseq_h_file, function(x) strsplit(x, "/")[[1]][9])), function(x) strsplit(x, "_")[[1]][1]))
STYLE <- SONG_STYLE[SONG_STYLE$songname == SONG_NAME[j], ]$style
# ---------------------------
for (k in 1:length(MACHINE)) {
notelen_m.frame[n + 1, ]$song <- SONG_NAME[j]
notelen_m.frame[n + 1, ]$style <- STYLE
notelen_m.frame[n + 1, ]$transcriber <- MACHINE[k]
notelen_m.frame[n + 1, ]$unison <- WITH_UNISON[i]
if (file.info(noteseq_m_file[k])$size == 0) {
notelen_m.frame[n + 1, ]$length <- 0
} else {
noteseq <- read.csv(noteseq_m_file[k], header = FALSE, sep = ",")
notelen_m.frame[n + 1, ]$length <- dim(noteseq)[1]
}
n <- n + 1
}
for (k in 1:length(HUMAN)) {
notelen_h.frame[m + 1, ]$song <- SONG_NAME[j]
notelen_h.frame[m + 1, ]$style <- STYLE
notelen_h.frame[m + 1, ]$transcriber <- HUMAN[k]
notelen_h.frame[m + 1, ]$unison <- WITH_UNISON[i]
if (file.info(noteseq_h_file[k])$size == 0) {
notelen_h.frame[m + 1, ]$length <- 0
} else {
noteseq <- read.csv(noteseq_h_file[k], header = FALSE, sep = ",")
notelen_h.frame[m + 1, ]$length <- dim(noteseq)[1]
}
m <- m + 1
}
}
}
# ---------------------------
notelen_m.frame$transcriber <- toupper(notelen_m.frame$transcriber)
notelen_m.frame[grepl(pattern = "TONY [(]FRAME[)]", x = notelen_m.frame$transcriber), ]$transcriber <- "pYIN"
notelen_m.frame[grepl(pattern = "TONY [(]NOTE[)]", x = notelen_m.frame$transcriber), ]$transcriber <- "TONY"
notelen_m.frame[grepl(pattern = "MELODIA", x = notelen_m.frame$transcriber), ]$transcriber <- "Melodia"
notelen_m.frame[grepl(pattern = "SS-PNN", x = notelen_m.frame$transcriber), ]$transcriber <- "SS-nPNN"
notelen_m.frame[grepl(pattern = "MADMOM", x = notelen_m.frame$transcriber), ]$transcriber <- "madmom"
notelen_h.frame[grepl(pattern = "Cons", x = notelen_h.frame$transcriber), ]$transcriber <- "Consensus"
# ---------------------------
for (i in 1:length(WITH_UNISON)) {
notelen_m.frame_i <- notelen_m.frame[notelen_m.frame$unison == WITH_UNISON[i], ]
notelen_h.frame_i <- notelen_h.frame[notelen_h.frame$unison == WITH_UNISON[i], ]
HUMAN <- unique(notelen_h.frame_i$transcriber)
STYLE <- unique(notelen_m.frame_i$style)
if (WITH_UNISON[i]) {
figuretype <- "\"unison\""
figuretype_file <- " (with unison)"
} else {
figuretype <- "\"non-unison\""
figuretype_file <- " (without unison)"
}
# ---------------------------
SONG <- unique(notelen_m.frame_i$song)
SONG_ID <- format(1:length(SONG), digits = 2, nsmall = 0)
for (j in 1:length(SONG)) {
notelen_m.frame_i[notelen_m.frame_i$song == SONG[j], ]$song <- SONG_ID[j]
notelen_h.frame_i[notelen_h.frame_i$song == SONG[j], ]$song <- SONG_ID[j]
}
# ---------------------------
for (j in 1:length(HUMAN)) {
notelen_h.frame_i <- notelen_h.frame_i[notelen_h.frame_i$transcriber == HUMAN[j], ]
# ---------------------------
g <- ggplot()
g <- g + geom_line(data = notelen_m.frame_i, aes(x = song, y = length, group = transcriber, colour = transcriber))
g <- g + geom_point(data = notelen_m.frame_i, aes(x = song, y = length, group = transcriber, colour = transcriber), size = G_POINTSIZE)
g <- g + geom_line(data = notelen_h.frame_i, aes(x = song, y = length, group = transcriber), colour = "black", linetype = "longdash")
g <- g + geom_point(data = notelen_h.frame_i, aes(x = song, y = length, group = transcriber), colour = "black", size = G_POINTSIZE)
# ---------------------------
g <- g + facet_grid(. ~ style, scales = "free_x")
# ---------------------------
g <- g + xlab("Song ID") + ylab("Sequence length")
g <- g + theme(axis.title.x = element_text(size = YAXIS_TITLE_TEXTSIZE), axis.title.y = element_text(size = YAXIS_TITLE_TEXTSIZE))
g <- g + theme(axis.text.x = element_text(size = AXIS_TEXTSIZE), axis.text.y = element_text(size = AXIS_TEXTSIZE))
g <- g + theme(strip.text.x = element_text(size = STRIP_TEXTSIZE))
g <- g + theme(legend.text = element_text(size = LEGEND_TEXTSIZE), legend.title = element_text(size = LEGEND_TEXTSIZE)) +
labs(colour = "Automated method")
titletext <- paste("Comparison of note sequence length (vs. ", HUMAN[j], ", ", figuretype, ")", sep = "")
g <- g + ggtitle(titletext)
g <- g + theme(plot.title = element_text(size = TITLE_TEXTSIZE, hjust = 0.5))
# ---------------------------
ggsave(paste(OUTPUT_DIR, OUTPUT_FILEID, "_", HUMAN[j], figuretype_file, ".png", sep = ""),
plot = g, width = FIG_WID, height = FIG_HEI)
}
} | /Scripts/R/utils/plot-machine-notelength/plot-machine-notelength.R | permissive | comp-music-lab/agreement-human-automated | R | false | false | 6,422 | r | # ---------------------------
library(ggplot2)
# ---------------------------
DATA_DIR <- c("../../data/test/human-machine-pairwise_agreement/with_unison/",
"../../data/test/human-machine-pairwise_agreement/without_unison/")
WITH_UNISON = c(TRUE, FALSE)
OUTPUT_DIR <- "./"
OUTPUT_FILEID <- "notelength"
G_POINTSIZE <- 0.5
TITLE_TEXTSIZE <- 18
YAXIS_TITLE_TEXTSIZE <- 14
STRIP_TEXTSIZE <- 14
AXIS_TEXTSIZE <- 12
LEGEND_TEXTSIZE <- 13
FIG_WID <- 8.09 * 2
FIG_HEI <- 5.00
# ---------------------------
SONG_STYLE <- read.csv("../../song-style.csv", header = TRUE, sep = ",")
# ---------------------------
notelen_m.frame <- data.frame(song = character(), style = character(), transcriber = character(), length = numeric(), unison = logical())
n <- 0
notelen_h.frame <- data.frame(song = character(), style = character(), transcriber = character(), length = numeric(), unison = logical())
m <- 0
# ---------------------------
for (i in 1:length(DATA_DIR)) {
SONG_DIR <- list.files(DATA_DIR[i], full.names = TRUE)
SONG_NAME <- unlist(lapply(SONG_DIR, function(x) strsplit(x, "/")[[1]][7]))
# ---------------------------
for (j in 1:length(SONG_DIR)) {
noteseq_m_file <- list.files(paste(SONG_DIR[j], "/machine/", sep = ""), full.names = TRUE)
MACHINE <- unlist(lapply(unlist(lapply(noteseq_m_file, function(x) strsplit(x, "/")[[1]][9])), function(x) strsplit(x, "_")[[1]][1]))
noteseq_h_file <- list.files(paste(SONG_DIR[j], "/human/", sep = ""), full.names = TRUE)
HUMAN <- unlist(lapply(unlist(lapply(noteseq_h_file, function(x) strsplit(x, "/")[[1]][9])), function(x) strsplit(x, "_")[[1]][1]))
STYLE <- SONG_STYLE[SONG_STYLE$songname == SONG_NAME[j], ]$style
# ---------------------------
for (k in 1:length(MACHINE)) {
notelen_m.frame[n + 1, ]$song <- SONG_NAME[j]
notelen_m.frame[n + 1, ]$style <- STYLE
notelen_m.frame[n + 1, ]$transcriber <- MACHINE[k]
notelen_m.frame[n + 1, ]$unison <- WITH_UNISON[i]
if (file.info(noteseq_m_file[k])$size == 0) {
notelen_m.frame[n + 1, ]$length <- 0
} else {
noteseq <- read.csv(noteseq_m_file[k], header = FALSE, sep = ",")
notelen_m.frame[n + 1, ]$length <- dim(noteseq)[1]
}
n <- n + 1
}
for (k in 1:length(HUMAN)) {
notelen_h.frame[m + 1, ]$song <- SONG_NAME[j]
notelen_h.frame[m + 1, ]$style <- STYLE
notelen_h.frame[m + 1, ]$transcriber <- HUMAN[k]
notelen_h.frame[m + 1, ]$unison <- WITH_UNISON[i]
if (file.info(noteseq_h_file[k])$size == 0) {
notelen_h.frame[m + 1, ]$length <- 0
} else {
noteseq <- read.csv(noteseq_h_file[k], header = FALSE, sep = ",")
notelen_h.frame[m + 1, ]$length <- dim(noteseq)[1]
}
m <- m + 1
}
}
}
# ---------------------------
notelen_m.frame$transcriber <- toupper(notelen_m.frame$transcriber)
notelen_m.frame[grepl(pattern = "TONY [(]FRAME[)]", x = notelen_m.frame$transcriber), ]$transcriber <- "pYIN"
notelen_m.frame[grepl(pattern = "TONY [(]NOTE[)]", x = notelen_m.frame$transcriber), ]$transcriber <- "TONY"
notelen_m.frame[grepl(pattern = "MELODIA", x = notelen_m.frame$transcriber), ]$transcriber <- "Melodia"
notelen_m.frame[grepl(pattern = "SS-PNN", x = notelen_m.frame$transcriber), ]$transcriber <- "SS-nPNN"
notelen_m.frame[grepl(pattern = "MADMOM", x = notelen_m.frame$transcriber), ]$transcriber <- "madmom"
notelen_h.frame[grepl(pattern = "Cons", x = notelen_h.frame$transcriber), ]$transcriber <- "Consensus"
# ---------------------------
for (i in 1:length(WITH_UNISON)) {
notelen_m.frame_i <- notelen_m.frame[notelen_m.frame$unison == WITH_UNISON[i], ]
notelen_h.frame_i <- notelen_h.frame[notelen_h.frame$unison == WITH_UNISON[i], ]
HUMAN <- unique(notelen_h.frame_i$transcriber)
STYLE <- unique(notelen_m.frame_i$style)
if (WITH_UNISON[i]) {
figuretype <- "\"unison\""
figuretype_file <- " (with unison)"
} else {
figuretype <- "\"non-unison\""
figuretype_file <- " (without unison)"
}
# ---------------------------
SONG <- unique(notelen_m.frame_i$song)
SONG_ID <- format(1:length(SONG), digits = 2, nsmall = 0)
for (j in 1:length(SONG)) {
notelen_m.frame_i[notelen_m.frame_i$song == SONG[j], ]$song <- SONG_ID[j]
notelen_h.frame_i[notelen_h.frame_i$song == SONG[j], ]$song <- SONG_ID[j]
}
# ---------------------------
for (j in 1:length(HUMAN)) {
notelen_h.frame_i <- notelen_h.frame_i[notelen_h.frame_i$transcriber == HUMAN[j], ]
# ---------------------------
g <- ggplot()
g <- g + geom_line(data = notelen_m.frame_i, aes(x = song, y = length, group = transcriber, colour = transcriber))
g <- g + geom_point(data = notelen_m.frame_i, aes(x = song, y = length, group = transcriber, colour = transcriber), size = G_POINTSIZE)
g <- g + geom_line(data = notelen_h.frame_i, aes(x = song, y = length, group = transcriber), colour = "black", linetype = "longdash")
g <- g + geom_point(data = notelen_h.frame_i, aes(x = song, y = length, group = transcriber), colour = "black", size = G_POINTSIZE)
# ---------------------------
g <- g + facet_grid(. ~ style, scales = "free_x")
# ---------------------------
g <- g + xlab("Song ID") + ylab("Sequence length")
g <- g + theme(axis.title.x = element_text(size = YAXIS_TITLE_TEXTSIZE), axis.title.y = element_text(size = YAXIS_TITLE_TEXTSIZE))
g <- g + theme(axis.text.x = element_text(size = AXIS_TEXTSIZE), axis.text.y = element_text(size = AXIS_TEXTSIZE))
g <- g + theme(strip.text.x = element_text(size = STRIP_TEXTSIZE))
g <- g + theme(legend.text = element_text(size = LEGEND_TEXTSIZE), legend.title = element_text(size = LEGEND_TEXTSIZE)) +
labs(colour = "Automated method")
titletext <- paste("Comparison of note sequence length (vs. ", HUMAN[j], ", ", figuretype, ")", sep = "")
g <- g + ggtitle(titletext)
g <- g + theme(plot.title = element_text(size = TITLE_TEXTSIZE, hjust = 0.5))
# ---------------------------
ggsave(paste(OUTPUT_DIR, OUTPUT_FILEID, "_", HUMAN[j], figuretype_file, ".png", sep = ""),
plot = g, width = FIG_WID, height = FIG_HEI)
}
} |
#read.ms.output -- a function to read in the output of ms.
#
# This function reads in the output of the program ms, storing the
# results in a list of lists.
#
# The function takes a single argument,either a file name or a vector
# of character strings, one string for each line of the output of ms.
# The function returns a list with some of the following components:
# segsites, times, positions, gametes, probs, nsam, nreps
#
# Example usage reading output from a file (assuming an executable ms
# resides in the current working directory):
#
# system("./ms 5 4 -s 5 >ms.out")
# msout <- read.ms.output(file="ms.out")
#
# In which case, msout$gametes[[1]] is a haplotype array for the
# first sample, msout$gametes[[2]] is the haplotype array for the
# second sample, etc. msout$segsites is a vector of the numbers of
# segregating sites in the samples. So, for example,
# mean( msout$segsites ) returns the mean number of segregating sites
# in the set of 4 samples.
#
# Another example usage, this time reading output from a vector of
# character strings:
#
# msout.txt <- system("./ms 5 4 -s 5 -L", intern=TRUE)
# msout <- read.ms.output(msout.txt)
#
# In this case, msout$time[,1] is then the vector of tmrca's of
# the samples and msout$time[,2] is the vector of total tree
# lengths of the samples.
#
# This function is derived from code first written by Dan Davison.
read.ms.output <- function(fnames)
{
hap.pool.list <- list();
for(ii in 1:length(fnames))
{
hap.pool <- ((readms.output(file.ms.output=fnames[ii]))$gametes);
hap.pool.list <- c(hap.pool.list,hap.pool);
}
return(hap.pool.list);
}
readms.output <- function( txt=NA, file.ms.output=NA ) {
if( !is.na(file.ms.output) ) txt <- scan(file=file.ms.output,
what=character(0), sep="\n", quiet=TRUE)
if( is.na(txt[1]) ){
return()
}
nsam <- as.integer( strsplit(txt[1], split=" ")[[1]][2] )
ndraws <- as.integer( strsplit( txt[1], split=" ")[[1]][3] )
h <- numeric()
result <- list()
gamlist <- list()
positions <- list()
marker <- grep("prob",txt)
probs <- sapply(strsplit(txt[marker], split=":"), function(vec) as.numeric(vec[2]))
marker <- grep("time",txt)
times <- sapply(strsplit(txt[marker], split="\t"), function(vec){ as.numeric(vec[2:3])} )
marker <- grep("segsites", txt)
stopifnot(length(marker) == ndraws)
segsites <- sapply(strsplit(txt[marker], split=" "), function(vec) as.integer(vec[2]) )
for(draw in seq(along=marker)) {
if(!(draw %% 100)) cat(draw, " ")
if(segsites[draw] > 0) {
tpos <- strsplit(txt[marker[draw]+1], split=" ")
positions[[draw]] <- as.numeric( tpos[[1]][ 2:(segsites[draw]+1) ] )
haplotypes <- txt[(marker[draw] + 2):(marker[draw] + 2 + nsam - 1)]
haplotypes <- strsplit(haplotypes, split="")
h <- sapply(haplotypes, function(el) c(as.integer(el)))
if(segsites[draw] == 1) h <- as.matrix(h)
else h <- t(h)
}
else {
h <- matrix(nrow=nsam, ncol=0)
positions[[draw]]<- NA
}
gamlist[[draw]] <- h
stopifnot(all(dim(h) == c(nsam, segsites[draw])))
}
cat("\n")
list(segsites=segsites, gametes=gamlist, probs=probs, times=t(times), positions=positions, nsam=nsam, nreps=ndraws )
}
| /R/read.ms.output.R | no_license | xingyanwang-david/rareGWAMA | R | false | false | 3,434 | r |
#read.ms.output -- a function to read in the output of ms.
#
# This function reads in the output of the program ms, storing the
# results in a list of lists.
#
# The function takes a single argument,either a file name or a vector
# of character strings, one string for each line of the output of ms.
# The function returns a list with some of the following components:
# segsites, times, positions, gametes, probs, nsam, nreps
#
# Example usage reading output from a file (assuming an executable ms
# resides in the current working directory):
#
# system("./ms 5 4 -s 5 >ms.out")
# msout <- read.ms.output(file="ms.out")
#
# In which case, msout$gametes[[1]] is a haplotype array for the
# first sample, msout$gametes[[2]] is the haplotype array for the
# second sample, etc. msout$segsites is a vector of the numbers of
# segregating sites in the samples. So, for example,
# mean( msout$segsites ) returns the mean number of segregating sites
# in the set of 4 samples.
#
# Another example usage, this time reading output from a vector of
# character strings:
#
# msout.txt <- system("./ms 5 4 -s 5 -L", intern=TRUE)
# msout <- read.ms.output(msout.txt)
#
# In this case, msout$time[,1] is then the vector of tmrca's of
# the samples and msout$time[,2] is the vector of total tree
# lengths of the samples.
#
# This function is derived from code first written by Dan Davison.
read.ms.output <- function(fnames)
{
hap.pool.list <- list();
for(ii in 1:length(fnames))
{
hap.pool <- ((readms.output(file.ms.output=fnames[ii]))$gametes);
hap.pool.list <- c(hap.pool.list,hap.pool);
}
return(hap.pool.list);
}
readms.output <- function( txt=NA, file.ms.output=NA ) {
if( !is.na(file.ms.output) ) txt <- scan(file=file.ms.output,
what=character(0), sep="\n", quiet=TRUE)
if( is.na(txt[1]) ){
return()
}
nsam <- as.integer( strsplit(txt[1], split=" ")[[1]][2] )
ndraws <- as.integer( strsplit( txt[1], split=" ")[[1]][3] )
h <- numeric()
result <- list()
gamlist <- list()
positions <- list()
marker <- grep("prob",txt)
probs <- sapply(strsplit(txt[marker], split=":"), function(vec) as.numeric(vec[2]))
marker <- grep("time",txt)
times <- sapply(strsplit(txt[marker], split="\t"), function(vec){ as.numeric(vec[2:3])} )
marker <- grep("segsites", txt)
stopifnot(length(marker) == ndraws)
segsites <- sapply(strsplit(txt[marker], split=" "), function(vec) as.integer(vec[2]) )
for(draw in seq(along=marker)) {
if(!(draw %% 100)) cat(draw, " ")
if(segsites[draw] > 0) {
tpos <- strsplit(txt[marker[draw]+1], split=" ")
positions[[draw]] <- as.numeric( tpos[[1]][ 2:(segsites[draw]+1) ] )
haplotypes <- txt[(marker[draw] + 2):(marker[draw] + 2 + nsam - 1)]
haplotypes <- strsplit(haplotypes, split="")
h <- sapply(haplotypes, function(el) c(as.integer(el)))
if(segsites[draw] == 1) h <- as.matrix(h)
else h <- t(h)
}
else {
h <- matrix(nrow=nsam, ncol=0)
positions[[draw]]<- NA
}
gamlist[[draw]] <- h
stopifnot(all(dim(h) == c(nsam, segsites[draw])))
}
cat("\n")
list(segsites=segsites, gametes=gamlist, probs=probs, times=t(times), positions=positions, nsam=nsam, nreps=ndraws )
}
|
/R/barplot.gg.R | no_license | kwlee58/Sejong | R | false | false | 2,241 | r | ||
# 관련 패키지 설치
pacman::p_load(KoNLP, wordcloud, plyr, twitteR, tm)
consumer_key <- "[YOUR CONSUMER KEY HERE]"
consumer_secret <- "[YOUR CONSUMER SECRET HERE]"
access_token <- "[YOUR ACCESS TOKEN HERE]"
access_secret <- "[YOUR ACCESS SECRET HERE]"
options(httr_oauth_cache=T) #This will enable the use of a local file to cache OAuth access credentials between R sessions.
setup_twitter_oauth(consumer_key,
consumer_secret,
access_token,
access_secret)
# 키워드 설정
keyword <- 'kth'
# 트위터에서 키워드로 검색
result <- searchTwitter(keyword, since='2013-01-21', until='2013-01-27', lang='ko',n=1000)
# 결과 중에서 텍스트에 해당하는 부분만 뽑는다
result.df <- twListToDF(result)
result.text <- result.df$text
# 불필요한 문자를 걸러준다
result.text <- gsub("\n", "", result.text)
result.text <- gsub("\r", "", result.text)
result.text <- gsub("RT", "", result.text)
result.text <- gsub("http", "", result.text)
# 문자 분리
result_nouns <- Map(extractNoun, result.text)
# 쓸모없는 문자들을 제거한다. 특히 영문자의 경우 tm의 stopwords를 활용한다.
result_wordsvec <- unlist(result_nouns, use.name=F)
result_wordsvec <- result_wordsvec[-which(result_wordsvec %in% stopwords("english"))]
result_wordsvec <- gsub("[[:punct:]]","", result_wordsvec)
result_wordsvec <- Filter(function(x){nchar(x)>=2}, result_wordsvec)
# 문자 카운팅
result_wordcount <- table(result_wordsvec)
# 컬러 세팅
pal <- brewer.pal(12,"Paired")
# 폰트 세팅
windowsFonts(malgun=windowsFont("맑은 고딕"))
# 그리기
wordcloud(names(result_wordcount), freq=result_wordcount, scale=c(5,0.5), min.freq=5, random.order=F, rot.per=.1, colors=pal, family="malgun") | /R Cheatsheet/__R/twitterNLP.R | no_license | kingmbc/TestGit | R | false | false | 1,950 | r | # 관련 패키지 설치
pacman::p_load(KoNLP, wordcloud, plyr, twitteR, tm)
consumer_key <- "[YOUR CONSUMER KEY HERE]"
consumer_secret <- "[YOUR CONSUMER SECRET HERE]"
access_token <- "[YOUR ACCESS TOKEN HERE]"
access_secret <- "[YOUR ACCESS SECRET HERE]"
options(httr_oauth_cache=T) #This will enable the use of a local file to cache OAuth access credentials between R sessions.
setup_twitter_oauth(consumer_key,
consumer_secret,
access_token,
access_secret)
# 키워드 설정
keyword <- 'kth'
# 트위터에서 키워드로 검색
result <- searchTwitter(keyword, since='2013-01-21', until='2013-01-27', lang='ko',n=1000)
# 결과 중에서 텍스트에 해당하는 부분만 뽑는다
result.df <- twListToDF(result)
result.text <- result.df$text
# 불필요한 문자를 걸러준다
result.text <- gsub("\n", "", result.text)
result.text <- gsub("\r", "", result.text)
result.text <- gsub("RT", "", result.text)
result.text <- gsub("http", "", result.text)
# 문자 분리
result_nouns <- Map(extractNoun, result.text)
# 쓸모없는 문자들을 제거한다. 특히 영문자의 경우 tm의 stopwords를 활용한다.
result_wordsvec <- unlist(result_nouns, use.name=F)
result_wordsvec <- result_wordsvec[-which(result_wordsvec %in% stopwords("english"))]
result_wordsvec <- gsub("[[:punct:]]","", result_wordsvec)
result_wordsvec <- Filter(function(x){nchar(x)>=2}, result_wordsvec)
# 문자 카운팅
result_wordcount <- table(result_wordsvec)
# 컬러 세팅
pal <- brewer.pal(12,"Paired")
# 폰트 세팅
windowsFonts(malgun=windowsFont("맑은 고딕"))
# 그리기
wordcloud(names(result_wordcount), freq=result_wordcount, scale=c(5,0.5), min.freq=5, random.order=F, rot.per=.1, colors=pal, family="malgun") |
# hilbe.NBR2.F8.1.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Negative binomial regression distributions with
# user specified series of alpha values for a specified mean value
# Figure 8.2-6 default: means at .5, 1, 2, 5, 10 and alpha=0
#
m<- c(0.5,1,2,5,10) #mean values
y<- 0:10 #Observed counts
layout(1)
for (i in 1:length(m)) {
p<- dpois(y, m[i]) #poisson pmf
if (i==1) {
plot(y, p, col=i, type='l', lty=i)
} else {
lines(y, p, col=i, lty=i)
}
}
| /inst/HILBE_NBR2_FIGURES/hilbe.NBR2.F8.1.r | no_license | cran/COUNT | R | false | false | 552 | r | # hilbe.NBR2.F8.1.r
# From Hilbe, Negative Binomial regression, 2nd ed, Cambridge Univ. Press
# Negative binomial regression distributions with
# user specified series of alpha values for a specified mean value
# Figure 8.2-6 default: means at .5, 1, 2, 5, 10 and alpha=0
#
m<- c(0.5,1,2,5,10) #mean values
y<- 0:10 #Observed counts
layout(1)
for (i in 1:length(m)) {
p<- dpois(y, m[i]) #poisson pmf
if (i==1) {
plot(y, p, col=i, type='l', lty=i)
} else {
lines(y, p, col=i, lty=i)
}
}
|
#' Annotate Plots.
#' @description
#' Used to annotate the plots. Add annotation on points chosen on
# the plot. Place the cursor and left click. Repeat for each point desired. Click the middel button to
# escape. Then enter the desired text for each of the chosen points.
#' @param cex provides control over the text font size (default=1).
#' @return Draft \code{\link{text}} commands that might be added to a function.
#' @seealso documentation for \code{\link{locator}}.
#' @examples
#' plot(att.stock.tsd)
#' add.text()
#'
#' @importFrom graphics locator
#' @importFrom graphics text
#' @export
add.text <- function(cex = 1) {
x <- 1
y <- 1
xy <- locator()
x <- xy$x
y <- xy$y
output <- rep("", length(x))
if (!is.null(x)) {
for (i in 1:length(x)) {
cat(paste("Input text for", x[i], y[i], ":"))
stuff <- readline()
text(x[i], y[i], stuff, cex = cex)
output[i] <- paste("text(", x[i], ",", y[i], ",", stuff, ")")
}
}
return(output)
}
| /R/add.text.R | no_license | wqmeeker/RTseries | R | false | false | 1,039 | r | #' Annotate Plots.
#' @description
#' Used to annotate the plots. Add annotation on points chosen on
# the plot. Place the cursor and left click. Repeat for each point desired. Click the middel button to
# escape. Then enter the desired text for each of the chosen points.
#' @param cex provides control over the text font size (default=1).
#' @return Draft \code{\link{text}} commands that might be added to a function.
#' @seealso documentation for \code{\link{locator}}.
#' @examples
#' plot(att.stock.tsd)
#' add.text()
#'
#' @importFrom graphics locator
#' @importFrom graphics text
#' @export
add.text <- function(cex = 1) {
x <- 1
y <- 1
xy <- locator()
x <- xy$x
y <- xy$y
output <- rep("", length(x))
if (!is.null(x)) {
for (i in 1:length(x)) {
cat(paste("Input text for", x[i], y[i], ":"))
stuff <- readline()
text(x[i], y[i], stuff, cex = cex)
output[i] <- paste("text(", x[i], ",", y[i], ",", stuff, ")")
}
}
return(output)
}
|
size <- c(1700, 2100, 1900, 1300, 1600, 2200)
price <- c(51000, 63000, 57000, 39000, 48000, 66000)
d <- data.frame(size, price)
plot(d)
| /101-lesson4.R | no_license | albertmeronyo/Outing2014 | R | false | false | 136 | r | size <- c(1700, 2100, 1900, 1300, 1600, 2200)
price <- c(51000, 63000, 57000, 39000, 48000, 66000)
d <- data.frame(size, price)
plot(d)
|
#----------------------------------------------------------------------------------------------
# File: box-score-url-grabber.R
# Date: 07-06-2012
# Author: Eric Nantz
# URL: https://github.com/thercast/nhl_analysis/blob/master/data/
# Email: theRcast@gmail.com
# Purpose: Assemble valid URLs with box score data from www.hockey-reference.com
# License: Creative Commons Attribution-ShareAlike 3.0 Unported License
#
# Notes:
# - url looks like following: http://www.hockey-reference.com/boxscores/201201050LAK.html
# - There is no boxscore data before 1987
# - code adapted from Ryan Elmore's analysis of baseball boxscore data:
# - https://github.com/rtelmore/Pitch_Count
# - http://www.slideshare.net/rtelmore/user-2012-talk
#
# TO DO:
#
# 1. Find way to import penalty summary (box scores staring with 2006) while mapping correct
# period as an additional column
#
# Possible solution: Count row indeces with populated penalty entries, since these will be the
# the blocks for each period. Then use following function from StackOverflow to count
# adjacent runs of consecutive indeces and assign proper period to each row in the block:
#
# lens <- rle(rows.to.extract - seq_along(rows.to.extract))$lengths
# block.results <- list(lengths = lens, values = unname(split(rows.to.extract, rep(seq_along(lens), lens))))
#----------------------------------------------------------------------------------------------
# load required packages
#library(XML)
library(XML, lib.loc="~/R-dev/")
library(stringr)
library(RMySQL)
# source script to estabilish database credentials
source("~/Dropbox/rpodcast_code/nhl_analysis/lib/mysql.login.R")
# connect to hockey database
mychannel <- dbConnect(MySQL(), user=login$user, password=login$password, dbname=login$dbname)
# rs <- dbSendQuery(mychannel, "select * from SKATER_REGISTER")
#
#
# tmp <- fetch(rs, n=10)
#
# str(tmp)
# Coerces data.frame columns to the specified classes
colClasses <- function(d, colClasses) {
colClasses <- rep(colClasses, len=length(d))
d[] <- lapply(seq_along(d), function(i) switch(colClasses[i],
numeric=as.numeric(d[[i]]),
character=as.character(d[[i]]),
Date=as.Date(d[[i]], origin='1970-01-01'),
POSIXct=as.POSIXct(d[[i]], origin='1970-01-01'),
factor=as.factor(d[[i]]),
as(d[[i]], colClasses[i]) ))
d
}
# load box score url checker data frame
load(file="~/hockey_workspaces/boxscore.url.check.data.RData")
boxscore.valid <- boxscore.tracker[boxscore.tracker$boxscore.stats,]
boxscore.valid <- boxscore.valid[order(boxscore.valid$team, boxscore.valid$year, boxscore.valid$month, boxscore.valid$day),]
# system runs out of memory after a few teams are imported, so need to subset further
boxscore.valid <- boxscore.valid[!boxscore.valid$team %in% c("ANA", "ATL", "BOS", "BUF", "CAR", "CGY", "CHI", "COL", "DAL"),]
rm(boxscore.tracker)
base.url <- "http://www.hockey-reference.com/boxscores/"
player.table.column.names <- c("rk",
"player",
"goals",
"assists",
"points",
"plus.minus",
"pim",
"goals.even",
"goals.pp",
"goals.sh",
"shots",
"shooting.percent",
"shifts",
"time.on.ice",
"home.away.ind",
"team")
player.table.column.classes <- c("integer",
"character",
rep("numeric", 11),
rep("character", 3))
goalie.table.column.names <- c("rk",
"player",
"decision",
"ga",
"sa",
"sv",
"sv.percent",
"so",
"pim",
"min",
"ev.ga",
"pp.ga",
"sh.ga",
"en.ga",
"home.away.ind",
"team")
goalie.table.column.classes <- c("integer",
"character",
"character",
rep("numeric", 11),
rep("character", 2))
# nrow(boxscore.valid)
for(i in 1:nrow(boxscore.valid)) {
team <- as.character(boxscore.valid[i, "team"])
year <- boxscore.valid[i, "year"]
month <- boxscore.valid[i, "month"]
month.url <- ifelse(str_length(month)==1,
paste(0, month, sep=""),
month)
day <- boxscore.valid[i, "day"]
day.url <- ifelse(str_length(day)==1,
paste(0, day, sep=""),
day)
full.url <- paste(base.url, year, month.url, day.url, "0", team,".html", sep="")
out.string <- paste(Sys.time(), "--", team, year, month, day, sep = " ")
#print(out.string)
cat(out.string, "\n", file="~/hockey_workspaces/box.score.grabber.log.txt", append=TRUE)
table.stats <- try(readHTMLTable(full.url, header=FALSE), silent = TRUE)
rm(out.string, full.url, day.url, month.url)
if (!inherits(table.stats, "try-error")) {
player.table.ind <- unlist(str_detect(names(table.stats), "\\_skaters"))
goalie.table.ind <- unlist(str_detect(names(table.stats), "\\_goalies"))
if (sum(player.table.ind, na.rm=TRUE) < 2 | sum(goalie.table.ind, na.rm=TRUE) < 2) next
team.player.table.names <- names(table.stats)[player.table.ind]
team.goalie.table.names <- names(table.stats)[goalie.table.ind]
player.home.team.ind <- str_detect(team.player.table.names, team)
goalie.home.team.ind <- str_detect(team.goalie.table.names, team)
home.team.player.table.name <- team.player.table.names[player.home.team.ind]
home.team.goalie.table.name <- team.goalie.table.names[goalie.home.team.ind]
home.team.clean <- str_replace_all(team.player.table.names[player.home.team.ind], "\\_skaters", "")
away.team.player.table.name <- team.player.table.names[!player.home.team.ind]
away.team.goalie.table.name <- team.goalie.table.names[!goalie.home.team.ind]
away.team.clean <- str_replace_all(team.player.table.names[!player.home.team.ind], "\\_skaters", "")
home.player.table <- as.data.frame(table.stats[home.team.player.table.name])
home.player.table$home.away.ind <- "H"
home.player.table$team <- home.team.clean
away.player.table <- as.data.frame(table.stats[away.team.player.table.name])
away.player.table$home.away.ind <- "A"
away.player.table$team <- away.team.clean
names(home.player.table) <- player.table.column.names
names(away.player.table) <- player.table.column.names
home.goalie.table <- as.data.frame(table.stats[home.team.goalie.table.name])
home.goalie.table$home.away.ind <- "H"
home.goalie.table$team <- home.team.clean
away.goalie.table <- as.data.frame(table.stats[away.team.goalie.table.name])
away.goalie.table$home.away.ind <- "A"
away.goalie.table$team <- away.team.clean
names(home.goalie.table) <- goalie.table.column.names
names(away.goalie.table) <- goalie.table.column.names
rm(table.stats)
player.table <- rbind(home.player.table, away.player.table)
player.table <- colClasses(player.table, player.table.column.classes)
player.table$year <- year
player.table$month <- month
player.table$day <- day
rm(home.player.table, away.player.table)
#skater.dbtable <- str_c(team, "_SKATER_BOXSCORE")
skater.dbtable <- "SKATER_BOXSCORE"
if(dbExistsTable(mychannel, skater.dbtable)) {
dbWriteTable(mychannel, skater.dbtable, player.table, append = T, row.names=FALSE)
} else dbWriteTable(mychannel, skater.dbtable, player.table, row.names=FALSE)
rm(player.table, skater.dbtable)
#all.player.table <- rbind(all.player.table, player.table)
goalie.table <- rbind(home.goalie.table, away.goalie.table)
goalie.table <- colClasses(goalie.table, goalie.table.column.classes)
goalie.table$year <- year
goalie.table$month <- month
goalie.table$day <- day
rm(home.goalie.table, away.goalie.table)
#goalie.dbtable <- str_c(team, "_GOALIE_BOXSCORE")
goalie.dbtable <- "GOALIE_BOXSCORE"
if(dbExistsTable(mychannel, goalie.dbtable)) {
dbWriteTable(mychannel, goalie.dbtable, goalie.table, append = T, row.names=FALSE)
} else dbWriteTable(mychannel, goalie.dbtable, goalie.table, row.names=FALSE)
rm(goalie.table, goalie.dbtable)
# remove other objects
rm(player.home.team.ind, player.table.ind, goalie.home.team.ind, goalie.table.ind,
home.team.goalie.table.name, home.team.player.table.name, away.team.clean, home.team.clean,
away.team.goalie.table.name, away.team.player.table.name, team, year, month, day, i,
team.goalie.table.names, team.player.table.names)
gc()
} else {
next
}
}
dbDisconnect(mychannel)
| /web-scraping/box-score-url-grabber.R | no_license | abresler/nhl_analysis | R | false | false | 9,785 | r | #----------------------------------------------------------------------------------------------
# File: box-score-url-grabber.R
# Date: 07-06-2012
# Author: Eric Nantz
# URL: https://github.com/thercast/nhl_analysis/blob/master/data/
# Email: theRcast@gmail.com
# Purpose: Assemble valid URLs with box score data from www.hockey-reference.com
# License: Creative Commons Attribution-ShareAlike 3.0 Unported License
#
# Notes:
# - url looks like following: http://www.hockey-reference.com/boxscores/201201050LAK.html
# - There is no boxscore data before 1987
# - code adapted from Ryan Elmore's analysis of baseball boxscore data:
# - https://github.com/rtelmore/Pitch_Count
# - http://www.slideshare.net/rtelmore/user-2012-talk
#
# TO DO:
#
# 1. Find way to import penalty summary (box scores staring with 2006) while mapping correct
# period as an additional column
#
# Possible solution: Count row indeces with populated penalty entries, since these will be the
# the blocks for each period. Then use following function from StackOverflow to count
# adjacent runs of consecutive indeces and assign proper period to each row in the block:
#
# lens <- rle(rows.to.extract - seq_along(rows.to.extract))$lengths
# block.results <- list(lengths = lens, values = unname(split(rows.to.extract, rep(seq_along(lens), lens))))
#----------------------------------------------------------------------------------------------
# load required packages
#library(XML)
library(XML, lib.loc="~/R-dev/")
library(stringr)
library(RMySQL)
# source script to estabilish database credentials
source("~/Dropbox/rpodcast_code/nhl_analysis/lib/mysql.login.R")
# connect to hockey database
mychannel <- dbConnect(MySQL(), user=login$user, password=login$password, dbname=login$dbname)
# rs <- dbSendQuery(mychannel, "select * from SKATER_REGISTER")
#
#
# tmp <- fetch(rs, n=10)
#
# str(tmp)
# Coerces data.frame columns to the specified classes
colClasses <- function(d, colClasses) {
colClasses <- rep(colClasses, len=length(d))
d[] <- lapply(seq_along(d), function(i) switch(colClasses[i],
numeric=as.numeric(d[[i]]),
character=as.character(d[[i]]),
Date=as.Date(d[[i]], origin='1970-01-01'),
POSIXct=as.POSIXct(d[[i]], origin='1970-01-01'),
factor=as.factor(d[[i]]),
as(d[[i]], colClasses[i]) ))
d
}
# load box score url checker data frame
load(file="~/hockey_workspaces/boxscore.url.check.data.RData")
boxscore.valid <- boxscore.tracker[boxscore.tracker$boxscore.stats,]
boxscore.valid <- boxscore.valid[order(boxscore.valid$team, boxscore.valid$year, boxscore.valid$month, boxscore.valid$day),]
# system runs out of memory after a few teams are imported, so need to subset further
boxscore.valid <- boxscore.valid[!boxscore.valid$team %in% c("ANA", "ATL", "BOS", "BUF", "CAR", "CGY", "CHI", "COL", "DAL"),]
rm(boxscore.tracker)
base.url <- "http://www.hockey-reference.com/boxscores/"
player.table.column.names <- c("rk",
"player",
"goals",
"assists",
"points",
"plus.minus",
"pim",
"goals.even",
"goals.pp",
"goals.sh",
"shots",
"shooting.percent",
"shifts",
"time.on.ice",
"home.away.ind",
"team")
player.table.column.classes <- c("integer",
"character",
rep("numeric", 11),
rep("character", 3))
goalie.table.column.names <- c("rk",
"player",
"decision",
"ga",
"sa",
"sv",
"sv.percent",
"so",
"pim",
"min",
"ev.ga",
"pp.ga",
"sh.ga",
"en.ga",
"home.away.ind",
"team")
goalie.table.column.classes <- c("integer",
"character",
"character",
rep("numeric", 11),
rep("character", 2))
# nrow(boxscore.valid)
for(i in 1:nrow(boxscore.valid)) {
team <- as.character(boxscore.valid[i, "team"])
year <- boxscore.valid[i, "year"]
month <- boxscore.valid[i, "month"]
month.url <- ifelse(str_length(month)==1,
paste(0, month, sep=""),
month)
day <- boxscore.valid[i, "day"]
day.url <- ifelse(str_length(day)==1,
paste(0, day, sep=""),
day)
full.url <- paste(base.url, year, month.url, day.url, "0", team,".html", sep="")
out.string <- paste(Sys.time(), "--", team, year, month, day, sep = " ")
#print(out.string)
cat(out.string, "\n", file="~/hockey_workspaces/box.score.grabber.log.txt", append=TRUE)
table.stats <- try(readHTMLTable(full.url, header=FALSE), silent = TRUE)
rm(out.string, full.url, day.url, month.url)
if (!inherits(table.stats, "try-error")) {
player.table.ind <- unlist(str_detect(names(table.stats), "\\_skaters"))
goalie.table.ind <- unlist(str_detect(names(table.stats), "\\_goalies"))
if (sum(player.table.ind, na.rm=TRUE) < 2 | sum(goalie.table.ind, na.rm=TRUE) < 2) next
team.player.table.names <- names(table.stats)[player.table.ind]
team.goalie.table.names <- names(table.stats)[goalie.table.ind]
player.home.team.ind <- str_detect(team.player.table.names, team)
goalie.home.team.ind <- str_detect(team.goalie.table.names, team)
home.team.player.table.name <- team.player.table.names[player.home.team.ind]
home.team.goalie.table.name <- team.goalie.table.names[goalie.home.team.ind]
home.team.clean <- str_replace_all(team.player.table.names[player.home.team.ind], "\\_skaters", "")
away.team.player.table.name <- team.player.table.names[!player.home.team.ind]
away.team.goalie.table.name <- team.goalie.table.names[!goalie.home.team.ind]
away.team.clean <- str_replace_all(team.player.table.names[!player.home.team.ind], "\\_skaters", "")
home.player.table <- as.data.frame(table.stats[home.team.player.table.name])
home.player.table$home.away.ind <- "H"
home.player.table$team <- home.team.clean
away.player.table <- as.data.frame(table.stats[away.team.player.table.name])
away.player.table$home.away.ind <- "A"
away.player.table$team <- away.team.clean
names(home.player.table) <- player.table.column.names
names(away.player.table) <- player.table.column.names
home.goalie.table <- as.data.frame(table.stats[home.team.goalie.table.name])
home.goalie.table$home.away.ind <- "H"
home.goalie.table$team <- home.team.clean
away.goalie.table <- as.data.frame(table.stats[away.team.goalie.table.name])
away.goalie.table$home.away.ind <- "A"
away.goalie.table$team <- away.team.clean
names(home.goalie.table) <- goalie.table.column.names
names(away.goalie.table) <- goalie.table.column.names
rm(table.stats)
player.table <- rbind(home.player.table, away.player.table)
player.table <- colClasses(player.table, player.table.column.classes)
player.table$year <- year
player.table$month <- month
player.table$day <- day
rm(home.player.table, away.player.table)
#skater.dbtable <- str_c(team, "_SKATER_BOXSCORE")
skater.dbtable <- "SKATER_BOXSCORE"
if(dbExistsTable(mychannel, skater.dbtable)) {
dbWriteTable(mychannel, skater.dbtable, player.table, append = T, row.names=FALSE)
} else dbWriteTable(mychannel, skater.dbtable, player.table, row.names=FALSE)
rm(player.table, skater.dbtable)
#all.player.table <- rbind(all.player.table, player.table)
goalie.table <- rbind(home.goalie.table, away.goalie.table)
goalie.table <- colClasses(goalie.table, goalie.table.column.classes)
goalie.table$year <- year
goalie.table$month <- month
goalie.table$day <- day
rm(home.goalie.table, away.goalie.table)
#goalie.dbtable <- str_c(team, "_GOALIE_BOXSCORE")
goalie.dbtable <- "GOALIE_BOXSCORE"
if(dbExistsTable(mychannel, goalie.dbtable)) {
dbWriteTable(mychannel, goalie.dbtable, goalie.table, append = T, row.names=FALSE)
} else dbWriteTable(mychannel, goalie.dbtable, goalie.table, row.names=FALSE)
rm(goalie.table, goalie.dbtable)
# remove other objects
rm(player.home.team.ind, player.table.ind, goalie.home.team.ind, goalie.table.ind,
home.team.goalie.table.name, home.team.player.table.name, away.team.clean, home.team.clean,
away.team.goalie.table.name, away.team.player.table.name, team, year, month, day, i,
team.goalie.table.names, team.player.table.names)
gc()
} else {
next
}
}
dbDisconnect(mychannel)
|
\name{fancyTree}
\alias{fancyTree}
\alias{phyloScattergram}
\alias{phenogram95}
\title{Plots special types of phylogenetic trees}
\usage{
fancyTree(tree, type=c("extinction","traitgram3d","droptip","densitymap",
"contmap","phenogram95","scattergram"), ..., control=list())
phyloScattergram(tree, X=NULL, ...)
phenogram95(tree, x=NULL, ...)
}
\arguments{
\item{tree}{an object of class \code{"phylo"}.}
\item{type}{the type of special plot to create. See Description.}
\item{...}{arguments to be passed to different methods. See Description.}
\item{control}{a list of control parameters, depending on \code{type}.}
\item{X}{in \code{phyloScattergram}, a matrix of continuous trait values. Row names in the matrix should correspond to species names in the tree.}
\item{x}{in \code{phenogram95}, a named vector with values for a continuously distributed trait.}
}
\description{
Plots phylogenies (or phylogenetic trees and comparative data) in a variety of different styles.
}
\details{
This function plots a phylogeny or phylogenetic tree and comparative data in a variety of different styles, depending on the value of \code{type}. In some instances, \code{fancyTree} is now just a wrappe for other \pkg{phytools} functions, such as \code{\link{contMap}} and \code{\link{densityMap}}.
If \code{type="extinction"} (or any unambiguous abbreviation) the function will plot a tree in which branches preceding the MRCA of all extant taxa and branches leading only to extinct lineages are plotted with dashed red lines.
If \code{type="traitgram3d"} the function will plot a three dimensional traitgram (that is, a projection of the tree into three dimensional morphospace where two dimensions are the phenotypic trait and the third axis is time since the root). In this case, the additional argument \code{X}, a matrix containing the tip values of all species (with species IDs as row names) should be supplied. Optionally, the user can also supply the matrix \code{A}, which contains the ancestral states in the tree with rows labeled by node number.
If \code{type="droptip"} the function will create a two panel figure in which the first panel is the tree with lineages to be pruned highlighted; and the second panel is the pruned tree. In this case, the additional argument \code{tip}, the tip name or vector of tip names to be dropped, must be supplied.
If \code{type="densitymap"}, a posterior probability density "heat-map" is created based on a set of trees in a \code{"multiSimmap"} object containing a binary [0,1] mapped character. (See \code{\link{densityMap}} for additional optional arguments if \code{type="densitymap"}.)
If \code{type="contmap"}, reconstructed continuous trait evolution is mapped on the tree. Again, see \code{\link{contMap}} for additional arguments if \code{type="contmap"}.
If \code{type="phenogram95"} a 95\% traitgram (aka. "phenogram") is plotted using transparency to visualize uncertainty at ancestral nodes and along branches. Most of the options of \code{\link{phenogram}} are available.
Finally, if \code{type="scattergram"} a phylogenetic scatter plot matrix containing \code{\link{contMap}} style trees on the diagonal and \code{\link{phylomorphospace}} plots in non-diagonal panels is produced. For this type a trait matrix \code{X} must also be supplied. The only additional arguments available for this type are \code{ftype}, \code{fsize}, \code{colors}, and \code{label}. (See \code{\link{phylomorphospace}} for details on how these arguments should be used.) This function calls \code{\link{phyloScattergram}} (which is also now exported to the name space) internally. In addition to creating a plot, \code{phyloScattergram} also returns an object of class \code{"phyloScattergram"} which can be replotted using different options if desired.
Presently only \code{type="traitgram3d"} uses the list \code{control} which can be supplied the same set of control parameters as \code{\link{phylomorphospace3d}}, as well as the control parameter \code{maxit} which will be passed to \code{\link{anc.ML}}.
Finally, the optional argument \code{hold} will be passed to multiple methods if supplied. It is a logical value that indicates whether or not the output to the graphical device should be held using \code{\link{dev.hold}} before plotting (defaults to \code{hold=TRUE}).
}
\value{
This function plots different types of phylogenetic trees. For \code{type="droptip"} the function also returns the pruned tree.
}
\references{
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\seealso{
\code{\link{contMap}}, \code{\link{densityMap}}, \code{\link{drop.tip}}, \code{\link{phenogram}}, \code{\link{phylomorphospace3d}}, \code{\link{plot.phylo}}, \code{\link{plotSimmap}}
}
\examples{
## plot tree with extinction
set.seed(10)
tree<-pbtree(b=1,d=0.4,t=4)
fancyTree(tree,type="extinction")
\dontrun{
## plot 3D traitgram
## load data from Revell & Collar (2009)
data(sunfish.tree)
data(sunfish.data)
fancyTree(sunfish.tree,type="traitgram3d",
X=sunfish.data[,2:3],
control=list(spin=FALSE))}
## plot with dropped tips
tree<-pbtree(n=30)
tips<-sample(tree$tip.label)[1:10]
pruned<-fancyTree(tree,type="droptip",tip=tips)
par(mfrow=c(1,1)) ## reset mfrow to default
\dontrun{
## plot 95-percent CI phenogram
data(mammal.tree)
data(mammal.data)
bodyMass<-setNames(mammal.data$bodyMass,
rownames(mammal.data))
fancyTree(mammal.tree,type="phenogram95",x=bodyMass,
fsize=0.7,ftype="i")}
par(mar=c(5.1,4.1,4.1,2.1)) ## reset mar to defaults
}
\keyword{phylogenetics}
\keyword{plotting}
\keyword{comparative method}
| /man/fancyTree.Rd | no_license | cran/phytools | R | false | false | 5,837 | rd | \name{fancyTree}
\alias{fancyTree}
\alias{phyloScattergram}
\alias{phenogram95}
\title{Plots special types of phylogenetic trees}
\usage{
fancyTree(tree, type=c("extinction","traitgram3d","droptip","densitymap",
"contmap","phenogram95","scattergram"), ..., control=list())
phyloScattergram(tree, X=NULL, ...)
phenogram95(tree, x=NULL, ...)
}
\arguments{
\item{tree}{an object of class \code{"phylo"}.}
\item{type}{the type of special plot to create. See Description.}
\item{...}{arguments to be passed to different methods. See Description.}
\item{control}{a list of control parameters, depending on \code{type}.}
\item{X}{in \code{phyloScattergram}, a matrix of continuous trait values. Row names in the matrix should correspond to species names in the tree.}
\item{x}{in \code{phenogram95}, a named vector with values for a continuously distributed trait.}
}
\description{
Plots phylogenies (or phylogenetic trees and comparative data) in a variety of different styles.
}
\details{
This function plots a phylogeny or phylogenetic tree and comparative data in a variety of different styles, depending on the value of \code{type}. In some instances, \code{fancyTree} is now just a wrappe for other \pkg{phytools} functions, such as \code{\link{contMap}} and \code{\link{densityMap}}.
If \code{type="extinction"} (or any unambiguous abbreviation) the function will plot a tree in which branches preceding the MRCA of all extant taxa and branches leading only to extinct lineages are plotted with dashed red lines.
If \code{type="traitgram3d"} the function will plot a three dimensional traitgram (that is, a projection of the tree into three dimensional morphospace where two dimensions are the phenotypic trait and the third axis is time since the root). In this case, the additional argument \code{X}, a matrix containing the tip values of all species (with species IDs as row names) should be supplied. Optionally, the user can also supply the matrix \code{A}, which contains the ancestral states in the tree with rows labeled by node number.
If \code{type="droptip"} the function will create a two panel figure in which the first panel is the tree with lineages to be pruned highlighted; and the second panel is the pruned tree. In this case, the additional argument \code{tip}, the tip name or vector of tip names to be dropped, must be supplied.
If \code{type="densitymap"}, a posterior probability density "heat-map" is created based on a set of trees in a \code{"multiSimmap"} object containing a binary [0,1] mapped character. (See \code{\link{densityMap}} for additional optional arguments if \code{type="densitymap"}.)
If \code{type="contmap"}, reconstructed continuous trait evolution is mapped on the tree. Again, see \code{\link{contMap}} for additional arguments if \code{type="contmap"}.
If \code{type="phenogram95"} a 95\% traitgram (aka. "phenogram") is plotted using transparency to visualize uncertainty at ancestral nodes and along branches. Most of the options of \code{\link{phenogram}} are available.
Finally, if \code{type="scattergram"} a phylogenetic scatter plot matrix containing \code{\link{contMap}} style trees on the diagonal and \code{\link{phylomorphospace}} plots in non-diagonal panels is produced. For this type a trait matrix \code{X} must also be supplied. The only additional arguments available for this type are \code{ftype}, \code{fsize}, \code{colors}, and \code{label}. (See \code{\link{phylomorphospace}} for details on how these arguments should be used.) This function calls \code{\link{phyloScattergram}} (which is also now exported to the name space) internally. In addition to creating a plot, \code{phyloScattergram} also returns an object of class \code{"phyloScattergram"} which can be replotted using different options if desired.
Presently only \code{type="traitgram3d"} uses the list \code{control} which can be supplied the same set of control parameters as \code{\link{phylomorphospace3d}}, as well as the control parameter \code{maxit} which will be passed to \code{\link{anc.ML}}.
Finally, the optional argument \code{hold} will be passed to multiple methods if supplied. It is a logical value that indicates whether or not the output to the graphical device should be held using \code{\link{dev.hold}} before plotting (defaults to \code{hold=TRUE}).
}
\value{
This function plots different types of phylogenetic trees. For \code{type="droptip"} the function also returns the pruned tree.
}
\references{
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\seealso{
\code{\link{contMap}}, \code{\link{densityMap}}, \code{\link{drop.tip}}, \code{\link{phenogram}}, \code{\link{phylomorphospace3d}}, \code{\link{plot.phylo}}, \code{\link{plotSimmap}}
}
\examples{
## plot tree with extinction
set.seed(10)
tree<-pbtree(b=1,d=0.4,t=4)
fancyTree(tree,type="extinction")
\dontrun{
## plot 3D traitgram
## load data from Revell & Collar (2009)
data(sunfish.tree)
data(sunfish.data)
fancyTree(sunfish.tree,type="traitgram3d",
X=sunfish.data[,2:3],
control=list(spin=FALSE))}
## plot with dropped tips
tree<-pbtree(n=30)
tips<-sample(tree$tip.label)[1:10]
pruned<-fancyTree(tree,type="droptip",tip=tips)
par(mfrow=c(1,1)) ## reset mfrow to default
\dontrun{
## plot 95-percent CI phenogram
data(mammal.tree)
data(mammal.data)
bodyMass<-setNames(mammal.data$bodyMass,
rownames(mammal.data))
fancyTree(mammal.tree,type="phenogram95",x=bodyMass,
fsize=0.7,ftype="i")}
par(mar=c(5.1,4.1,4.1,2.1)) ## reset mar to defaults
}
\keyword{phylogenetics}
\keyword{plotting}
\keyword{comparative method}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinesis_operations.R
\name{kinesis_describe_stream}
\alias{kinesis_describe_stream}
\title{Describes the specified Kinesis data stream}
\usage{
kinesis_describe_stream(StreamName, Limit, ExclusiveStartShardId)
}
\arguments{
\item{StreamName}{[required] The name of the stream to describe.}
\item{Limit}{The maximum number of shards to return in a single call. The default
value is 100. If you specify a value greater than 100, at most 100
shards are returned.}
\item{ExclusiveStartShardId}{The shard ID of the shard to start with.}
}
\description{
Describes the specified Kinesis data stream.
The information returned includes the stream name, Amazon Resource Name
(ARN), creation time, enhanced metric configuration, and shard map. The
shard map is an array of shard objects. For each shard object, there is
the hash key and sequence number ranges that the shard spans, and the
IDs of any earlier shards that played in a role in creating the shard.
Every record ingested in the stream is identified by a sequence number,
which is assigned when the record is put into the stream.
You can limit the number of shards returned by each call. For more
information, see \href{https://docs.aws.amazon.com/streams/latest/dev/}{Retrieving Shards from a Stream} in the \emph{Amazon
Kinesis Data Streams Developer Guide}.
There are no guarantees about the chronological order shards returned.
To process shards in chronological order, use the ID of the parent shard
to track the lineage to the oldest shard.
This operation has a limit of 10 transactions per second per account.
}
\section{Request syntax}{
\preformatted{svc$describe_stream(
StreamName = "string",
Limit = 123,
ExclusiveStartShardId = "string"
)
}
}
\keyword{internal}
| /cran/paws.analytics/man/kinesis_describe_stream.Rd | permissive | sanchezvivi/paws | R | false | true | 1,815 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kinesis_operations.R
\name{kinesis_describe_stream}
\alias{kinesis_describe_stream}
\title{Describes the specified Kinesis data stream}
\usage{
kinesis_describe_stream(StreamName, Limit, ExclusiveStartShardId)
}
\arguments{
\item{StreamName}{[required] The name of the stream to describe.}
\item{Limit}{The maximum number of shards to return in a single call. The default
value is 100. If you specify a value greater than 100, at most 100
shards are returned.}
\item{ExclusiveStartShardId}{The shard ID of the shard to start with.}
}
\description{
Describes the specified Kinesis data stream.
The information returned includes the stream name, Amazon Resource Name
(ARN), creation time, enhanced metric configuration, and shard map. The
shard map is an array of shard objects. For each shard object, there is
the hash key and sequence number ranges that the shard spans, and the
IDs of any earlier shards that played in a role in creating the shard.
Every record ingested in the stream is identified by a sequence number,
which is assigned when the record is put into the stream.
You can limit the number of shards returned by each call. For more
information, see \href{https://docs.aws.amazon.com/streams/latest/dev/}{Retrieving Shards from a Stream} in the \emph{Amazon
Kinesis Data Streams Developer Guide}.
There are no guarantees about the chronological order shards returned.
To process shards in chronological order, use the ID of the parent shard
to track the lineage to the oldest shard.
This operation has a limit of 10 transactions per second per account.
}
\section{Request syntax}{
\preformatted{svc$describe_stream(
StreamName = "string",
Limit = 123,
ExclusiveStartShardId = "string"
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workflow_utils.R
\name{centralImputTrNAs}
\alias{centralImputTrNAs}
\title{Handling NAs in train and test}
\usage{
centralImputTrNAs(train, nORp)
}
\arguments{
\item{train}{training data set}
\item{nORp}{minimum percentage of NA values in a row/column for
that row/column to be discarded from training set}
}
\value{
list with an entry for the training and test sets
(in this order), both now with no NA values
}
\description{
Discard columns/rows with too many NAs and
then impute with central value.
}
| /man/centralImputTrNAs.Rd | no_license | mrfoliveira/STResampling-JDSA2020 | R | false | true | 584 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workflow_utils.R
\name{centralImputTrNAs}
\alias{centralImputTrNAs}
\title{Handling NAs in train and test}
\usage{
centralImputTrNAs(train, nORp)
}
\arguments{
\item{train}{training data set}
\item{nORp}{minimum percentage of NA values in a row/column for
that row/column to be discarded from training set}
}
\value{
list with an entry for the training and test sets
(in this order), both now with no NA values
}
\description{
Discard columns/rows with too many NAs and
then impute with central value.
}
|
#' Função para gerar nowcasting
#'
#' @param dados dados
#' @param caso Lógico. Se TRUE faz casos e FALSE, óbitos
#' @param tipo covid ou srag
#' @param hospitalizados Lógico, filtra apenas hospitalizados
#' @param trim.now corte para nowcasting
#' @param window janela para nowcasting
#' @param trajectories Lógico. Retorna trajetórias
#' @param ... qualquer parâmetro da função NobBS para controlar o ajuste do nowcasting
#'
#' @export
#'
#' @importFrom stats dbinom
#' @importFrom NobBS NobBS
#' @importFrom dplyr filter select mutate
gera.nowcasting <- function(dados, # dados
caso = TRUE, # caso = FALSE faz obitos
tipo, # covid ou srag
hospitalizados = TRUE,
trim.now, # corte para nowcasting
window, # janela para nowcasting
trajectories = FALSE,
...) { # retorna trajetórias
if (trajectories)
NobBS <- NobBS.posterior
# 1. nowcasting de casos ###
if (caso) {
if (hospitalizados)
dados <- dados %>% filter(hospital == 1)
## 1.1 casos covid ####
if (tipo == "covid") {
##COVID##
dados2 <- dados %>%
filter(pcr_sars2 == 1 | classi_fin == 5) %>% #covid com nova classificacao
select(dt_notific, dt_sin_pri, dt_pcr, dt_digita) %>%
mutate(dt_pcr_dig = pmax(dt_pcr, dt_digita, dt_notific, na.rm = TRUE))
}
## 1.2. casos srag ####
if (tipo == "srag") {
## %PIP data de registro é data mais recente entre notificação e digitação, não deve incluir data pcr (dt_pcr)
## pq SRAG não precisa de teste para ser confirmado
dados2 <- dados %>%
select(dt_notific, dt_sin_pri, dt_digita) %>%
mutate(dt_pcr_dig = pmax(dt_digita, dt_notific, na.rm = TRUE)) # nome aqui é pcr mas não tem pcr
}
if (nrow(dados2) != 0) {
dados.now <- NobBS(
data = dados2,
now = max(dados2$dt_sin_pri, na.rm = TRUE) - trim.now,
onset_date = "dt_sin_pri",
report_date = "dt_pcr_dig",
units = "1 day",
moving_window = window,
...)
} else {
dados.now <- NULL
}
# 2. nowcasting de obitos ####
} else {
## 2.1. obitos covid ####
if (tipo == "covid") {
##obitos COVID ####
dados2 <- dados %>%
filter(pcr_sars2 == 1 | classi_fin == 5) %>% # covid com nova classificacao
filter(evolucao == 2) %>%
filter(!is.na(dt_evoluca)) %>%
mutate(dt_encerra = pmax(dt_encerra, dt_digita, dt_evoluca,
na.rm = TRUE)) %>%
select(dt_evoluca, dt_notific, dt_encerra)
}
## 2.2. obitos srag ####
if (tipo == "srag") {
dados2 <- dados %>%
filter(evolucao == 2) %>%
filter(!is.na(dt_evoluca)) %>%
mutate(dt_encerra = pmax(dt_encerra, dt_digita, dt_evoluca,
na.rm = TRUE)) %>%
select(dt_evoluca, dt_notific, dt_encerra)
}
if (nrow(dados2) != 0) {
dados.now <- NobBS(
data = dados2,
now = max(dados2$dt_evoluca) - trim.now, ##PIP: nocwasting vai até última data do evento, no caso data do obito
onset_date = "dt_evoluca",
report_date = "dt_encerra",
units = "1 day",
moving_window = window,
specs = list(beta.priors = dbinom(0:40, size = 40, prob = 15/50)),
...)
} else {
dados.now <- NULL
}
}
out <- list(now = dados.now, dados = dados2)
return(out)
}
| /R/gera.nowcasting.R | permissive | covid19br/now_fcts | R | false | false | 3,590 | r | #' Função para gerar nowcasting
#'
#' @param dados dados
#' @param caso Lógico. Se TRUE faz casos e FALSE, óbitos
#' @param tipo covid ou srag
#' @param hospitalizados Lógico, filtra apenas hospitalizados
#' @param trim.now corte para nowcasting
#' @param window janela para nowcasting
#' @param trajectories Lógico. Retorna trajetórias
#' @param ... qualquer parâmetro da função NobBS para controlar o ajuste do nowcasting
#'
#' @export
#'
#' @importFrom stats dbinom
#' @importFrom NobBS NobBS
#' @importFrom dplyr filter select mutate
gera.nowcasting <- function(dados, # dados
caso = TRUE, # caso = FALSE faz obitos
tipo, # covid ou srag
hospitalizados = TRUE,
trim.now, # corte para nowcasting
window, # janela para nowcasting
trajectories = FALSE,
...) { # retorna trajetórias
if (trajectories)
NobBS <- NobBS.posterior
# 1. nowcasting de casos ###
if (caso) {
if (hospitalizados)
dados <- dados %>% filter(hospital == 1)
## 1.1 casos covid ####
if (tipo == "covid") {
##COVID##
dados2 <- dados %>%
filter(pcr_sars2 == 1 | classi_fin == 5) %>% #covid com nova classificacao
select(dt_notific, dt_sin_pri, dt_pcr, dt_digita) %>%
mutate(dt_pcr_dig = pmax(dt_pcr, dt_digita, dt_notific, na.rm = TRUE))
}
## 1.2. casos srag ####
if (tipo == "srag") {
## %PIP data de registro é data mais recente entre notificação e digitação, não deve incluir data pcr (dt_pcr)
## pq SRAG não precisa de teste para ser confirmado
dados2 <- dados %>%
select(dt_notific, dt_sin_pri, dt_digita) %>%
mutate(dt_pcr_dig = pmax(dt_digita, dt_notific, na.rm = TRUE)) # nome aqui é pcr mas não tem pcr
}
if (nrow(dados2) != 0) {
dados.now <- NobBS(
data = dados2,
now = max(dados2$dt_sin_pri, na.rm = TRUE) - trim.now,
onset_date = "dt_sin_pri",
report_date = "dt_pcr_dig",
units = "1 day",
moving_window = window,
...)
} else {
dados.now <- NULL
}
# 2. nowcasting de obitos ####
} else {
## 2.1. obitos covid ####
if (tipo == "covid") {
##obitos COVID ####
dados2 <- dados %>%
filter(pcr_sars2 == 1 | classi_fin == 5) %>% # covid com nova classificacao
filter(evolucao == 2) %>%
filter(!is.na(dt_evoluca)) %>%
mutate(dt_encerra = pmax(dt_encerra, dt_digita, dt_evoluca,
na.rm = TRUE)) %>%
select(dt_evoluca, dt_notific, dt_encerra)
}
## 2.2. obitos srag ####
if (tipo == "srag") {
dados2 <- dados %>%
filter(evolucao == 2) %>%
filter(!is.na(dt_evoluca)) %>%
mutate(dt_encerra = pmax(dt_encerra, dt_digita, dt_evoluca,
na.rm = TRUE)) %>%
select(dt_evoluca, dt_notific, dt_encerra)
}
if (nrow(dados2) != 0) {
dados.now <- NobBS(
data = dados2,
now = max(dados2$dt_evoluca) - trim.now, ##PIP: nocwasting vai até última data do evento, no caso data do obito
onset_date = "dt_evoluca",
report_date = "dt_encerra",
units = "1 day",
moving_window = window,
specs = list(beta.priors = dbinom(0:40, size = 40, prob = 15/50)),
...)
} else {
dados.now <- NULL
}
}
out <- list(now = dados.now, dados = dados2)
return(out)
}
|
source("plot1.R")
source("plot2.R")
source("plot3.R")
source("plot4.R")
dataset <- read.table("household_power_consumption.txt", sep =";", skip = 66637, nrow=2880)
names(dataset) <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
## Set data types
dataset$Date <- as.POSIXct(dataset$Date, format="%e/%m/%Y")
dataset$Time <- as.POSIXct(paste(dataset$Date, dataset$Time))
dataset$Global_active_power <- as.numeric(dataset$Global_active_power)
dataset$Global_reactive_power <- as.numeric(dataset$Global_reactive_power)
dataset$Voltage <- as.numeric(dataset$Voltage)
dataset$Global_intensity <- as.numeric(dataset$Global_intensity)
dataset$Sub_metering_1 <- as.numeric(dataset$Sub_metering_1)
dataset$Sub_metering_2 <- as.numeric(dataset$Sub_metering_2)
dataset$Sub_metering_3 <- as.numeric(dataset$Sub_metering_3)
makeplot1(dataset)
makeplot2(dataset)
makeplot3(dataset)
makeplot4(dataset) | /main.R | no_license | Mohd-PH/ExData_Plotting1 | R | false | false | 995 | r | source("plot1.R")
source("plot2.R")
source("plot3.R")
source("plot4.R")
dataset <- read.table("household_power_consumption.txt", sep =";", skip = 66637, nrow=2880)
names(dataset) <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
## Set data types
dataset$Date <- as.POSIXct(dataset$Date, format="%e/%m/%Y")
dataset$Time <- as.POSIXct(paste(dataset$Date, dataset$Time))
dataset$Global_active_power <- as.numeric(dataset$Global_active_power)
dataset$Global_reactive_power <- as.numeric(dataset$Global_reactive_power)
dataset$Voltage <- as.numeric(dataset$Voltage)
dataset$Global_intensity <- as.numeric(dataset$Global_intensity)
dataset$Sub_metering_1 <- as.numeric(dataset$Sub_metering_1)
dataset$Sub_metering_2 <- as.numeric(dataset$Sub_metering_2)
dataset$Sub_metering_3 <- as.numeric(dataset$Sub_metering_3)
makeplot1(dataset)
makeplot2(dataset)
makeplot3(dataset)
makeplot4(dataset) |
# define your working directory path here
install.package("data.table")
library(data.table)
rm(list=ls())
rm(list = ls(all.names = TRUE))
x <- matrix(rnorm(200*1000), ncol=200)
y <- matrix(rnorm(1*1000), ncol=1)
data <- data.frame(y,x)
tmp1 <- system.time(
{
model <- lm(y ~ . , data=data)
print(model)
})
times <- t(data.matrix(tmp1))
fwrite(times,"RegTimeR.csv", append=TRUE)
tmp2 <- system.time(
{
modelcache <- lm(y ~ . , data=data)
print(modelcache)
})
timescache <- t(data.matrix(tmp2))
fwrite(timescache,"RegTimeR.csv", append=TRUE)
#########################################################
| /Reproducibility/Table3/R/Reg.R | permissive | ParallelGSReg/GlobalSearchRegression.jl | R | false | false | 606 | r | # define your working directory path here
install.package("data.table")
library(data.table)
rm(list=ls())
rm(list = ls(all.names = TRUE))
x <- matrix(rnorm(200*1000), ncol=200)
y <- matrix(rnorm(1*1000), ncol=1)
data <- data.frame(y,x)
tmp1 <- system.time(
{
model <- lm(y ~ . , data=data)
print(model)
})
times <- t(data.matrix(tmp1))
fwrite(times,"RegTimeR.csv", append=TRUE)
tmp2 <- system.time(
{
modelcache <- lm(y ~ . , data=data)
print(modelcache)
})
timescache <- t(data.matrix(tmp2))
fwrite(timescache,"RegTimeR.csv", append=TRUE)
#########################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{sotu_text}
\alias{sotu_text}
\title{State of the Union Address Text}
\description{
A character vector with one State of the Union in each
element. These line up with the rows of the data in
\code{\link{sotu_meta}}.
}
\references{
\url{http://www.presidency.ucsb.edu/sou.php}
}
\keyword{data}
| /man/sotu_text.Rd | no_license | dhmontgomery/sotu | R | false | true | 398 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{sotu_text}
\alias{sotu_text}
\title{State of the Union Address Text}
\description{
A character vector with one State of the Union in each
element. These line up with the rows of the data in
\code{\link{sotu_meta}}.
}
\references{
\url{http://www.presidency.ucsb.edu/sou.php}
}
\keyword{data}
|
## functions & methods to simulate & plot discrete time Brownian motion
## on a simulated discrete-time tree
simBMphylo<-function(n,t,sig2,plot=TRUE,...){
if(length(sig2)!=t) sig2<-rep(sig2[1],t)
b<-exp((log(n)-log(2))/t)-1
tree<-pbtree(b=b,d=0,t=t,n=n,type="discrete",
tip.label=if(n<=26) LETTERS[n:1] else NULL,
quiet=TRUE)
H<-nodeHeights(tree)
root<-Ntip(tree)+1
xx<-list()
for(i in 1:nrow(tree$edge)){
sd<-sqrt(sig2[H[i,1]+1:tree$edge.length[i]])
x<-rnorm(n=tree$edge.length[i],sd=sd)
x<-c(0,cumsum(x))
if(tree$edge[i,1]!=root){
ii<-which(tree$edge[,2]==tree$edge[i,1])
x<-x+xx[[ii]][length(xx[[ii]])]
}
xx[[i]]<-x
}
object<-list(tree=tree,x=xx)
class(object)<-"simBMphylo"
if(plot) plot(object,...)
invisible(object)
}
plot.simBMphylo<-function(x,...){
xx<-x$x
tree<-x$tree
H<-nodeHeights(tree)
layout(mat=matrix(c(1,2),2,1),
heights=c(1/3,2/3))
if(hasArg(fsize)) fsize<-list(...)$fsize
else fsize<-0.9
if(hasArg(cex.axis)) cex.axis<-list(...)$cex.axis
else cex.axis<-0.9
if(hasArg(cex.lab)) cex.lab=list(...)$cex.lab
else cex.lab<-1
plotTree(tree,mar=c(0.1,4.1,2.1,1.1),
xlim=c(0,1.05*max(H)),
ylim=c(1-2*(Ntip(tree)-1)*0.04,
Ntip(tree)+(Ntip(tree)-1)*0.04),lwd=1,
fsize=fsize)
## axis(1,cex.axis=cex.axis)
mtext("a)",line=0,adj=0,cex=cex.lab)
plot.new()
par(mar=c(5.1,4.1,1.1,1.1))
plot.window(xlim=c(0,1.05*max(H)),ylim=range(xx))
axis(1,cex.axis=cex.axis)
axis(2,cex.axis=cex.axis)
for(i in 1:length(xx))
lines(H[i,1]:H[i,2],xx[[i]])
for(i in 1:Ntip(tree)){
ii<-which(tree$edge[,2]==i)
text(max(H),xx[[ii]][length(xx[[ii]])],
tree$tip.label[i],pos=4,offset=0.4/3,
cex=fsize)
}
mtext("b)",line=0,adj=0,cex=cex.lab)
title(xlab="time",ylab="phenotype",
cex.lab=cex.lab)
}
print.simBMphylo<-function(x,...){
cat(paste("\nObject of class \"simBMphylo\" with",Ntip(x$tree),"taxa.\n"),
sep="")
cat("To print use plot method.\n\n")
}
| /R/simBMphylo.R | no_license | KlausVigo/phytools | R | false | false | 1,926 | r | ## functions & methods to simulate & plot discrete time Brownian motion
## on a simulated discrete-time tree
simBMphylo<-function(n,t,sig2,plot=TRUE,...){
if(length(sig2)!=t) sig2<-rep(sig2[1],t)
b<-exp((log(n)-log(2))/t)-1
tree<-pbtree(b=b,d=0,t=t,n=n,type="discrete",
tip.label=if(n<=26) LETTERS[n:1] else NULL,
quiet=TRUE)
H<-nodeHeights(tree)
root<-Ntip(tree)+1
xx<-list()
for(i in 1:nrow(tree$edge)){
sd<-sqrt(sig2[H[i,1]+1:tree$edge.length[i]])
x<-rnorm(n=tree$edge.length[i],sd=sd)
x<-c(0,cumsum(x))
if(tree$edge[i,1]!=root){
ii<-which(tree$edge[,2]==tree$edge[i,1])
x<-x+xx[[ii]][length(xx[[ii]])]
}
xx[[i]]<-x
}
object<-list(tree=tree,x=xx)
class(object)<-"simBMphylo"
if(plot) plot(object,...)
invisible(object)
}
plot.simBMphylo<-function(x,...){
xx<-x$x
tree<-x$tree
H<-nodeHeights(tree)
layout(mat=matrix(c(1,2),2,1),
heights=c(1/3,2/3))
if(hasArg(fsize)) fsize<-list(...)$fsize
else fsize<-0.9
if(hasArg(cex.axis)) cex.axis<-list(...)$cex.axis
else cex.axis<-0.9
if(hasArg(cex.lab)) cex.lab=list(...)$cex.lab
else cex.lab<-1
plotTree(tree,mar=c(0.1,4.1,2.1,1.1),
xlim=c(0,1.05*max(H)),
ylim=c(1-2*(Ntip(tree)-1)*0.04,
Ntip(tree)+(Ntip(tree)-1)*0.04),lwd=1,
fsize=fsize)
## axis(1,cex.axis=cex.axis)
mtext("a)",line=0,adj=0,cex=cex.lab)
plot.new()
par(mar=c(5.1,4.1,1.1,1.1))
plot.window(xlim=c(0,1.05*max(H)),ylim=range(xx))
axis(1,cex.axis=cex.axis)
axis(2,cex.axis=cex.axis)
for(i in 1:length(xx))
lines(H[i,1]:H[i,2],xx[[i]])
for(i in 1:Ntip(tree)){
ii<-which(tree$edge[,2]==i)
text(max(H),xx[[ii]][length(xx[[ii]])],
tree$tip.label[i],pos=4,offset=0.4/3,
cex=fsize)
}
mtext("b)",line=0,adj=0,cex=cex.lab)
title(xlab="time",ylab="phenotype",
cex.lab=cex.lab)
}
print.simBMphylo<-function(x,...){
cat(paste("\nObject of class \"simBMphylo\" with",Ntip(x$tree),"taxa.\n"),
sep="")
cat("To print use plot method.\n\n")
}
|
# MakePredictDF ####
MakePredictDF <- function(DF,
Length.Out = 100,
HoldNumeric = NULL,
HoldFactor = NULL){
PredList <- list()
if(length(HoldNumeric) == 0){
HoldNumeric <- NULL
}
NumericDF <- DF %>%
dplyr::select_if(is.numeric)
if(ncol(NumericDF) > 0){
for(i in 1:ncol(NumericDF)){
PredList[[i]] <- c(seq(from = min(NumericDF[,i]),
to = max(NumericDF[,i]),
length.out = Length.Out),
mean(NumericDF[,i]))
}
names(PredList) <- colnames(NumericDF)
if(length(intersect(HoldNumeric, names(PredList))) > 0){
intersect(HoldNumeric, names(PredList)) %>%
map(function(a){
PredList[[a]] <<- last(PredList[[a]])
})
}
}
FactorDF <- DF %>% dplyr::select_if(~is.character(.x)|is.factor(.x))
if(ncol(FactorDF) > 0){
for(i in 1:ncol(FactorDF)){
PredList[[length(PredList) + 1]] <- c(unique(FactorDF[,i]))
}
names(PredList)[(ncol(NumericDF) + 1):length(PredList)] <- c(colnames(FactorDF))
if(length(intersect(names(HoldFactor), names(PredList))) > 0){
intersect(names(HoldFactor), names(PredList)) %>%
map(function(a){
PredList[[a]] <<- HoldFactor[[a]]
})
}
}
return(PredList)
}
| /R/MakePredictDF.R | no_license | gfalbery/ggregplot | R | false | false | 1,399 | r |
# MakePredictDF ####
MakePredictDF <- function(DF,
Length.Out = 100,
HoldNumeric = NULL,
HoldFactor = NULL){
PredList <- list()
if(length(HoldNumeric) == 0){
HoldNumeric <- NULL
}
NumericDF <- DF %>%
dplyr::select_if(is.numeric)
if(ncol(NumericDF) > 0){
for(i in 1:ncol(NumericDF)){
PredList[[i]] <- c(seq(from = min(NumericDF[,i]),
to = max(NumericDF[,i]),
length.out = Length.Out),
mean(NumericDF[,i]))
}
names(PredList) <- colnames(NumericDF)
if(length(intersect(HoldNumeric, names(PredList))) > 0){
intersect(HoldNumeric, names(PredList)) %>%
map(function(a){
PredList[[a]] <<- last(PredList[[a]])
})
}
}
FactorDF <- DF %>% dplyr::select_if(~is.character(.x)|is.factor(.x))
if(ncol(FactorDF) > 0){
for(i in 1:ncol(FactorDF)){
PredList[[length(PredList) + 1]] <- c(unique(FactorDF[,i]))
}
names(PredList)[(ncol(NumericDF) + 1):length(PredList)] <- c(colnames(FactorDF))
if(length(intersect(names(HoldFactor), names(PredList))) > 0){
intersect(names(HoldFactor), names(PredList)) %>%
map(function(a){
PredList[[a]] <<- HoldFactor[[a]]
})
}
}
return(PredList)
}
|
#' Network visualization groups options
#'
#' Network visualization groups options. For full documentation, have a look at \link{visDocumentation}.
#'
#' @param graph : a visNetwork object
#' @param useDefaultGroups : Boolean. Default to true. If your nodes have groups defined that are not in the Groups module, the module loops over the groups it does have, allocating one for each unknown group. When all are used, it goes back to the first group. By setting this to false, the default groups will not be used in this cycle.
#' @param groupname : String. Name of target group.
#' @param ... : \link{visNodes}. You can add multiple groups containing styling information that applies to a certain subset of groups. All options described in the nodes module that make sense can be used here (you're not going to set the same id or x,y position for a group of nodes)
#'
#' @examples
#'
#' nodes <- data.frame(id = 1:10, label = paste("Label", 1:10),
#' group = sample(c("A", "B"), 10, replace = TRUE))
#' edges <- data.frame(from = c(2,5,10), to = c(1,2,10))
#'
#' visNetwork(nodes, edges, legend = TRUE) %>%
#' visGroups(groupname = "A", color = "red", shape = "database") %>%
#' visGroups(groupname = "B", color = "yellow", shape = "triangle")
#'
#'@seealso \link{visNodes} for nodes options, \link{visEdges} for edges options, \link{visGroups} for groups options,
#'\link{visLayout} & \link{visHierarchicalLayout} for layout, \link{visPhysics} for physics, \link{visInteraction} for interaction, ...
#'
#'
#' @export
#'
visGroups <- function(graph,
useDefaultGroups = NULL,
groupname = NULL,
...){
groups <- list()
groups$useDefaultGroups = useDefaultGroups
graph$x$options$groups <- mergeLists(graph$x$options$groups, groups)
params <- list(...)
if(length(params) > 0){
if(is.null(groupname)){
stop("Must have a groupname to identify group")
}
groups <- list(list(...))
names(groups) <- groupname
graph$x$options$groups <- mergeLists(graph$x$options$groups, groups)
}
graph
}
| /R/visGroups.R | no_license | hnbeck/visNetwork | R | false | false | 2,114 | r | #' Network visualization groups options
#'
#' Network visualization groups options. For full documentation, have a look at \link{visDocumentation}.
#'
#' @param graph : a visNetwork object
#' @param useDefaultGroups : Boolean. Default to true. If your nodes have groups defined that are not in the Groups module, the module loops over the groups it does have, allocating one for each unknown group. When all are used, it goes back to the first group. By setting this to false, the default groups will not be used in this cycle.
#' @param groupname : String. Name of target group.
#' @param ... : \link{visNodes}. You can add multiple groups containing styling information that applies to a certain subset of groups. All options described in the nodes module that make sense can be used here (you're not going to set the same id or x,y position for a group of nodes)
#'
#' @examples
#'
#' nodes <- data.frame(id = 1:10, label = paste("Label", 1:10),
#' group = sample(c("A", "B"), 10, replace = TRUE))
#' edges <- data.frame(from = c(2,5,10), to = c(1,2,10))
#'
#' visNetwork(nodes, edges, legend = TRUE) %>%
#' visGroups(groupname = "A", color = "red", shape = "database") %>%
#' visGroups(groupname = "B", color = "yellow", shape = "triangle")
#'
#'@seealso \link{visNodes} for nodes options, \link{visEdges} for edges options, \link{visGroups} for groups options,
#'\link{visLayout} & \link{visHierarchicalLayout} for layout, \link{visPhysics} for physics, \link{visInteraction} for interaction, ...
#'
#'
#' @export
#'
visGroups <- function(graph,
useDefaultGroups = NULL,
groupname = NULL,
...){
groups <- list()
groups$useDefaultGroups = useDefaultGroups
graph$x$options$groups <- mergeLists(graph$x$options$groups, groups)
params <- list(...)
if(length(params) > 0){
if(is.null(groupname)){
stop("Must have a groupname to identify group")
}
groups <- list(list(...))
names(groups) <- groupname
graph$x$options$groups <- mergeLists(graph$x$options$groups, groups)
}
graph
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linalg.R
\name{linalg_solve}
\alias{linalg_solve}
\title{Computes the solution of a square system of linear equations with a unique solution.}
\usage{
linalg_solve(A, B)
}
\arguments{
\item{A}{(Tensor): tensor of shape \verb{(*, n, n)} where \code{*} is zero or more batch dimensions.}
\item{B}{(Tensor): right-hand side tensor of shape \verb{(*, n)} or \verb{(*, n, k)} or \verb{(n,)} or \verb{(n, k)}
according to the rules described above}
}
\description{
Letting \teqn{\mathbb{K}} be \teqn{\mathbb{R}} or \teqn{\mathbb{C}},
this function computes the solution \teqn{X \in \mathbb{K}^{n \times k}} of the \strong{linear system} associated to
\teqn{A \in \mathbb{K}^{n \times n}, B \in \mathbb{K}^{m \times k}}, which is defined as
}
\details{
\deqn{
AX = B
}
This system of linear equations has one solution if and only if \teqn{A} is \code{invertible}_.
This function assumes that \teqn{A} is invertible.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if the inputs are batches of matrices then
the output has the same batch dimensions.
Letting \code{*} be zero or more batch dimensions,
\itemize{
\item If \code{A} has shape \verb{(*, n, n)} and \code{B} has shape \verb{(*, n)} (a batch of vectors) or shape
\verb{(*, n, k)} (a batch of matrices or "multiple right-hand sides"), this function returns \code{X} of shape
\verb{(*, n)} or \verb{(*, n, k)} respectively.
\item Otherwise, if \code{A} has shape \verb{(*, n, n)} and \code{B} has shape \verb{(n,)} or \verb{(n, k)}, \code{B}
is broadcasted to have shape \verb{(*, n)} or \verb{(*, n, k)} respectively.
}
This function then returns the solution of the resulting batch of systems of linear equations.
}
\note{
This function computes \code{X = A$inverse() @ B} in a faster and
more numerically stable way than performing the computations separately.
}
\examples{
if (torch_is_installed()) {
A <- torch_randn(3, 3)
b <- torch_randn(3)
x <- linalg_solve(A, b)
torch_allclose(torch_matmul(A, x), b)
}
}
\seealso{
Other linalg:
\code{\link{linalg_cholesky_ex}()},
\code{\link{linalg_cholesky}()},
\code{\link{linalg_det}()},
\code{\link{linalg_eigh}()},
\code{\link{linalg_eigvalsh}()},
\code{\link{linalg_eigvals}()},
\code{\link{linalg_eig}()},
\code{\link{linalg_householder_product}()},
\code{\link{linalg_inv_ex}()},
\code{\link{linalg_inv}()},
\code{\link{linalg_lstsq}()},
\code{\link{linalg_matrix_norm}()},
\code{\link{linalg_matrix_power}()},
\code{\link{linalg_matrix_rank}()},
\code{\link{linalg_multi_dot}()},
\code{\link{linalg_norm}()},
\code{\link{linalg_pinv}()},
\code{\link{linalg_qr}()},
\code{\link{linalg_slogdet}()},
\code{\link{linalg_svdvals}()},
\code{\link{linalg_svd}()},
\code{\link{linalg_tensorinv}()},
\code{\link{linalg_tensorsolve}()},
\code{\link{linalg_vector_norm}()}
}
\concept{linalg}
| /man/linalg_solve.Rd | permissive | snapbuy/torch | R | false | true | 2,925 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linalg.R
\name{linalg_solve}
\alias{linalg_solve}
\title{Computes the solution of a square system of linear equations with a unique solution.}
\usage{
linalg_solve(A, B)
}
\arguments{
\item{A}{(Tensor): tensor of shape \verb{(*, n, n)} where \code{*} is zero or more batch dimensions.}
\item{B}{(Tensor): right-hand side tensor of shape \verb{(*, n)} or \verb{(*, n, k)} or \verb{(n,)} or \verb{(n, k)}
according to the rules described above}
}
\description{
Letting \teqn{\mathbb{K}} be \teqn{\mathbb{R}} or \teqn{\mathbb{C}},
this function computes the solution \teqn{X \in \mathbb{K}^{n \times k}} of the \strong{linear system} associated to
\teqn{A \in \mathbb{K}^{n \times n}, B \in \mathbb{K}^{m \times k}}, which is defined as
}
\details{
\deqn{
AX = B
}
This system of linear equations has one solution if and only if \teqn{A} is \code{invertible}_.
This function assumes that \teqn{A} is invertible.
Supports inputs of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices, and if the inputs are batches of matrices then
the output has the same batch dimensions.
Letting \code{*} be zero or more batch dimensions,
\itemize{
\item If \code{A} has shape \verb{(*, n, n)} and \code{B} has shape \verb{(*, n)} (a batch of vectors) or shape
\verb{(*, n, k)} (a batch of matrices or "multiple right-hand sides"), this function returns \code{X} of shape
\verb{(*, n)} or \verb{(*, n, k)} respectively.
\item Otherwise, if \code{A} has shape \verb{(*, n, n)} and \code{B} has shape \verb{(n,)} or \verb{(n, k)}, \code{B}
is broadcasted to have shape \verb{(*, n)} or \verb{(*, n, k)} respectively.
}
This function then returns the solution of the resulting batch of systems of linear equations.
}
\note{
This function computes \code{X = A$inverse() @ B} in a faster and
more numerically stable way than performing the computations separately.
}
\examples{
if (torch_is_installed()) {
A <- torch_randn(3, 3)
b <- torch_randn(3)
x <- linalg_solve(A, b)
torch_allclose(torch_matmul(A, x), b)
}
}
\seealso{
Other linalg:
\code{\link{linalg_cholesky_ex}()},
\code{\link{linalg_cholesky}()},
\code{\link{linalg_det}()},
\code{\link{linalg_eigh}()},
\code{\link{linalg_eigvalsh}()},
\code{\link{linalg_eigvals}()},
\code{\link{linalg_eig}()},
\code{\link{linalg_householder_product}()},
\code{\link{linalg_inv_ex}()},
\code{\link{linalg_inv}()},
\code{\link{linalg_lstsq}()},
\code{\link{linalg_matrix_norm}()},
\code{\link{linalg_matrix_power}()},
\code{\link{linalg_matrix_rank}()},
\code{\link{linalg_multi_dot}()},
\code{\link{linalg_norm}()},
\code{\link{linalg_pinv}()},
\code{\link{linalg_qr}()},
\code{\link{linalg_slogdet}()},
\code{\link{linalg_svdvals}()},
\code{\link{linalg_svd}()},
\code{\link{linalg_tensorinv}()},
\code{\link{linalg_tensorsolve}()},
\code{\link{linalg_vector_norm}()}
}
\concept{linalg}
|
## Exploratory Data Analysis Project 1
## See README.md for information about the dataset and project goal
## This file is a working space. It will create all required plots but is not
## the submission.
# define path variables for working directory
dCoursera <- "/Users/heidi/files/Box Sync/classes/Coursera/"
dExAnalysis <- file.path(dCoursera, "ExploratoryAnalysis")
dProject1 <- file.path(dExAnalysis, "Project1/ExData_Plotting1")
# set working directory
setwd(dProject1)
# read in data, just for Feb 1-2, 2007
firstline <- 66638
fname <- "household_power_consumption.txt"
head <- scan(fname, what=character(), nlines=1, sep=";", quiet=TRUE)
data <- read.table(fname, sep=";", na.strings="?",
header=TRUE, col.names=head,
nrows=1440*2, skip=firstline-1)
# make datetime variables from the Date and Time fields
data$DateTime <- strptime(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
# make plot 1
binsize <- 0.5
bedges <- seq(0, max(data$Global_active_power)+binsize, binsize)
xname <- "Global Active Power (kilowatts)"
title = "Global Active Power"
png("plot1.png")
hist(data$Global_active_power, breaks=bedges, xlab=xname, main=title, col="red")
dev.off()
# make plot 2
png("plot2.png")
yname <- "Global Active Power (kilowatts)"
xname <- ""
plot(data$DateTime, data$Global_active_power, type="l", xlab=xname, ylab=yname)
dev.off()
# make plot 3
png("plot3.png")
yname <- "Energy sub metering"
xname <- ""
leg = names(data[grepl("Sub", names(data))])
plot(data$DateTime, data$Sub_metering_1, type="l", xlab=xname, ylab=yname)
points(data$DateTime, data$Sub_metering_2, type="l", col="red")
points(data$DateTime, data$Sub_metering_3, type="l", col="blue")
legend("topright", lty=1, col=c("black", "blue", "red"), legend=leg)
dev.off()
# make plot 4
png("plot4.png")
par(mfrow=c(2,2), mar=c(5, 4, 3, 1)+0.1)
# subplot 1
xname <- ""
yname <- "Global Active Power"
plot(data$DateTime, data$Global_active_power, type="l", xlab=xname, ylab=yname)
# subplot 2
xname <- "datetime"
yname <- "Voltage"
plot(data$DateTime, data$Voltage, type="l", xlab=xname, ylab=yname)
# subplot 3
yname <- "Energy sub metering"
xname <- ""
leg = names(data[grepl("Sub", names(data))])
plot(data$DateTime, data$Sub_metering_1, type="l", xlab=xname, ylab=yname)
points(data$DateTime, data$Sub_metering_2, type="l", col="red")
points(data$DateTime, data$Sub_metering_3, type="l", col="blue")
legend("topright", lty=1, col=c("black", "blue", "red"), legend=leg, bty="n")
# subplot 4
xname <- "datetime"
yname <- "Global_reactive_power"
plot(data$DateTime, data$Global_reactive_power, type="l", xlab=xname, ylab=yname)
dev.off() | /Initialize.R | no_license | Heidi-/ExData_Plotting1 | R | false | false | 2,721 | r | ## Exploratory Data Analysis Project 1
## See README.md for information about the dataset and project goal
## This file is a working space. It will create all required plots but is not
## the submission.
# define path variables for working directory
dCoursera <- "/Users/heidi/files/Box Sync/classes/Coursera/"
dExAnalysis <- file.path(dCoursera, "ExploratoryAnalysis")
dProject1 <- file.path(dExAnalysis, "Project1/ExData_Plotting1")
# set working directory
setwd(dProject1)
# read in data, just for Feb 1-2, 2007
firstline <- 66638
fname <- "household_power_consumption.txt"
head <- scan(fname, what=character(), nlines=1, sep=";", quiet=TRUE)
data <- read.table(fname, sep=";", na.strings="?",
header=TRUE, col.names=head,
nrows=1440*2, skip=firstline-1)
# make datetime variables from the Date and Time fields
data$DateTime <- strptime(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
# make plot 1
binsize <- 0.5
bedges <- seq(0, max(data$Global_active_power)+binsize, binsize)
xname <- "Global Active Power (kilowatts)"
title = "Global Active Power"
png("plot1.png")
hist(data$Global_active_power, breaks=bedges, xlab=xname, main=title, col="red")
dev.off()
# make plot 2
png("plot2.png")
yname <- "Global Active Power (kilowatts)"
xname <- ""
plot(data$DateTime, data$Global_active_power, type="l", xlab=xname, ylab=yname)
dev.off()
# make plot 3
png("plot3.png")
yname <- "Energy sub metering"
xname <- ""
leg = names(data[grepl("Sub", names(data))])
plot(data$DateTime, data$Sub_metering_1, type="l", xlab=xname, ylab=yname)
points(data$DateTime, data$Sub_metering_2, type="l", col="red")
points(data$DateTime, data$Sub_metering_3, type="l", col="blue")
legend("topright", lty=1, col=c("black", "blue", "red"), legend=leg)
dev.off()
# make plot 4
png("plot4.png")
par(mfrow=c(2,2), mar=c(5, 4, 3, 1)+0.1)
# subplot 1
xname <- ""
yname <- "Global Active Power"
plot(data$DateTime, data$Global_active_power, type="l", xlab=xname, ylab=yname)
# subplot 2
xname <- "datetime"
yname <- "Voltage"
plot(data$DateTime, data$Voltage, type="l", xlab=xname, ylab=yname)
# subplot 3
yname <- "Energy sub metering"
xname <- ""
leg = names(data[grepl("Sub", names(data))])
plot(data$DateTime, data$Sub_metering_1, type="l", xlab=xname, ylab=yname)
points(data$DateTime, data$Sub_metering_2, type="l", col="red")
points(data$DateTime, data$Sub_metering_3, type="l", col="blue")
legend("topright", lty=1, col=c("black", "blue", "red"), legend=leg, bty="n")
# subplot 4
xname <- "datetime"
yname <- "Global_reactive_power"
plot(data$DateTime, data$Global_reactive_power, type="l", xlab=xname, ylab=yname)
dev.off() |
source("/home/bd/Dropbox/projects/hrs/cognition_attrition/src/00_bigfun.R")
load(file="/home/bd/Dropbox/projects/hrs/cognition_attrition/wd/df.Rdata")
#df<-df[df$wave %in% 5:6,]
########################################################################################
##baseline
maketab<-function(df) {
L<-list()
for (st in c("proxy","attrit","dead")) {
tmp<-df[df$status %in% c(st,"eligible"),]
tmp$status<-ifelse(tmp$status==st,1,0)
L[[st]]<-bigfun(tmp,allmodels=FALSE)
}
tmp<-data.frame(do.call("rbind",L))
tab<-proc(tmp) #zz<-lapply(L,proc) #no extenion: apm quantites, .1=p^m, .2=win
tab$van.1-tab$base.1 -> tab$delta
tab2<-tab[,c("prev","base","base.1","van","van.1","delta","van.2")]
tab2
}
tab<-maketab(df)
tabL<-list()
for (i in 1:10) {
print(i)
index<-sample(1:nrow(df),nrow(df),replace=TRUE)
tabL[[i]]<-maketab(df[index,])
}
nm<-grep("van.2",names(tab),fixed=TRUE)
L<-lapply(tabL,function(x) x[,nm])
est<-do.call("rbind",L)
est<-apply(est,2,quantile,c(.025,.975))
tab2<-cbind(tab,t(est))
library(xtable)
print(xtable(tab2,digits=4),include.rownames=TRUE)
########################################################################################
##all models
source("/home/bd/Dropbox/projects/hrs/cognition_attrition/src/00_bigfun.R")
load(file="/home/bd/Dropbox/projects/hrs/cognition_attrition/wd/df.Rdata")
maketab<-function(df) {
L<-list()
for (st in c("proxy","attrit","dead")) {
tmp<-df[df$status %in% c(st,"eligible"),]
tmp$status<-ifelse(tmp$status==st,1,0)
L[[st]]<-bigfun(tmp,allmodels=TRUE)
}
tmp<-data.frame(do.call("rbind",L))
tab<-proc(tmp) #zz<-lapply(L,proc) #no extenion: apm quantites, .1=p^m, .2=win
tab$van.1-tab$base.1 -> tab$delta
tab2<-tab[,c("base.1","van","delta","van.2","std.2","spl.2")]
tab2
}
tab<-maketab(df)
library(xtable)
ii<-grep(".2",names(tab),fixed=TRUE)
print(xtable(tab[,ii],digits=4),include.rownames=TRUE)
########################################################################################
##stratified by age and sex
load(file="/home/bd/Dropbox/projects/hrs/cognition_attrition/wd/gender.Rdata")
df<-df[!is.na(df$ragender) & !is.na(df$age),]
gr<-cut(df$age,c(-Inf,60,70,80,Inf))
gr<-paste(df$ragender,gr)
L<-split(df,gr)
out<-list()
for (st in c("proxy","dead")) {
for (i in 1:length(L)) {
z<-L[[i]]
tmp<-z[z$status %in% c(st,"eligible"),]
tmp$status<-ifelse(tmp$status==st,1,0)
out[[paste(st,names(L)[i]) ]]<-bigfun(tmp)
}
}
tmp<-data.frame(do.call("rbind",out))
tab<-proc(tmp) #zz<-lapply(L,proc) #no extenion: apm quantites, .1=p^m, .2=win
ii<-grep(".2",names(tab),fixed=TRUE)
tab[,ii]->tab
rownames(tab)->rns
gr<-ifelse(grepl("proxy",rns),"proxy","dead")
sex<-ifelse(grepl("1.male",rns),"male","female")
age<-strsplit(rns,"e ")
age<-sapply(age,"[[",2)
tab<-cbind(sex,age,tab)
print(xtable(tab[gr=='proxy',],digits=4),include.rownames=FALSE)
print(xtable(tab[gr=='dead',],digits=4),include.rownames=FALSE)
| /C1_models.R | no_license | ben-domingue/pred_DementiaDeath | R | false | false | 3,043 | r | source("/home/bd/Dropbox/projects/hrs/cognition_attrition/src/00_bigfun.R")
load(file="/home/bd/Dropbox/projects/hrs/cognition_attrition/wd/df.Rdata")
#df<-df[df$wave %in% 5:6,]
########################################################################################
##baseline
maketab<-function(df) {
L<-list()
for (st in c("proxy","attrit","dead")) {
tmp<-df[df$status %in% c(st,"eligible"),]
tmp$status<-ifelse(tmp$status==st,1,0)
L[[st]]<-bigfun(tmp,allmodels=FALSE)
}
tmp<-data.frame(do.call("rbind",L))
tab<-proc(tmp) #zz<-lapply(L,proc) #no extenion: apm quantites, .1=p^m, .2=win
tab$van.1-tab$base.1 -> tab$delta
tab2<-tab[,c("prev","base","base.1","van","van.1","delta","van.2")]
tab2
}
tab<-maketab(df)
tabL<-list()
for (i in 1:10) {
print(i)
index<-sample(1:nrow(df),nrow(df),replace=TRUE)
tabL[[i]]<-maketab(df[index,])
}
nm<-grep("van.2",names(tab),fixed=TRUE)
L<-lapply(tabL,function(x) x[,nm])
est<-do.call("rbind",L)
est<-apply(est,2,quantile,c(.025,.975))
tab2<-cbind(tab,t(est))
library(xtable)
print(xtable(tab2,digits=4),include.rownames=TRUE)
########################################################################################
##all models
source("/home/bd/Dropbox/projects/hrs/cognition_attrition/src/00_bigfun.R")
load(file="/home/bd/Dropbox/projects/hrs/cognition_attrition/wd/df.Rdata")
maketab<-function(df) {
L<-list()
for (st in c("proxy","attrit","dead")) {
tmp<-df[df$status %in% c(st,"eligible"),]
tmp$status<-ifelse(tmp$status==st,1,0)
L[[st]]<-bigfun(tmp,allmodels=TRUE)
}
tmp<-data.frame(do.call("rbind",L))
tab<-proc(tmp) #zz<-lapply(L,proc) #no extenion: apm quantites, .1=p^m, .2=win
tab$van.1-tab$base.1 -> tab$delta
tab2<-tab[,c("base.1","van","delta","van.2","std.2","spl.2")]
tab2
}
tab<-maketab(df)
library(xtable)
ii<-grep(".2",names(tab),fixed=TRUE)
print(xtable(tab[,ii],digits=4),include.rownames=TRUE)
########################################################################################
##stratified by age and sex
load(file="/home/bd/Dropbox/projects/hrs/cognition_attrition/wd/gender.Rdata")
df<-df[!is.na(df$ragender) & !is.na(df$age),]
gr<-cut(df$age,c(-Inf,60,70,80,Inf))
gr<-paste(df$ragender,gr)
L<-split(df,gr)
out<-list()
for (st in c("proxy","dead")) {
for (i in 1:length(L)) {
z<-L[[i]]
tmp<-z[z$status %in% c(st,"eligible"),]
tmp$status<-ifelse(tmp$status==st,1,0)
out[[paste(st,names(L)[i]) ]]<-bigfun(tmp)
}
}
tmp<-data.frame(do.call("rbind",out))
tab<-proc(tmp) #zz<-lapply(L,proc) #no extenion: apm quantites, .1=p^m, .2=win
ii<-grep(".2",names(tab),fixed=TRUE)
tab[,ii]->tab
rownames(tab)->rns
gr<-ifelse(grepl("proxy",rns),"proxy","dead")
sex<-ifelse(grepl("1.male",rns),"male","female")
age<-strsplit(rns,"e ")
age<-sapply(age,"[[",2)
tab<-cbind(sex,age,tab)
print(xtable(tab[gr=='proxy',],digits=4),include.rownames=FALSE)
print(xtable(tab[gr=='dead',],digits=4),include.rownames=FALSE)
|
#2
# Change to scatter plot and add smoothing curve
ggplot(by_year, aes(year, percent_yes)) +
geom_point() +
geom_smooth()
| /Case study of exploratory analysis in R/Data visualization with ggplot2/2.R | no_license | SaiSharanyaY/DataCamp-Data-Scientist-with-R-Track. | R | false | false | 129 | r | #2
# Change to scatter plot and add smoothing curve
ggplot(by_year, aes(year, percent_yes)) +
geom_point() +
geom_smooth()
|
testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(8.8144277745463e-280, 0, 0), .Dim = c(3L, 1L)))
result <- do.call(distr6:::C_EmpiricalMVPdf,testlist)
str(result) | /distr6/inst/testfiles/C_EmpiricalMVPdf/libFuzzer_C_EmpiricalMVPdf/C_EmpiricalMVPdf_valgrind_files/1610036550-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 186 | r | testlist <- list(data = structure(0, .Dim = c(1L, 1L)), x = structure(c(8.8144277745463e-280, 0, 0), .Dim = c(3L, 1L)))
result <- do.call(distr6:::C_EmpiricalMVPdf,testlist)
str(result) |
library(stringr)
library(miscTools)
#-----data preprocessing----
scRNA <- read.table('GSM3405531_PDAC-B-indrop1.tsv', header=TRUE, sep="\t")
ST <- read.table('GSM3405534_PDAC-B-ST1.tsv', header = TRUE, sep = '\t', quote = "")
rownames(scRNA) <- scRNA[,1]
scRNA <- scRNA[,-1]
rownames(ST) <- ST[,1]
ST <- ST[,-1]
ST <- as.data.frame(t(ST))
# #find the same genes of two data sets(index as well)
# gen_1 <- rownames(scRNA)
# gen_2 <- rownames(ST)
# same_gen <- intersect(gen_1,gen_2)
# G = length(same_gen)
# re1 <- c()
# re2 <- c()
# for (i in 1:G) {
# re1[i] <- which(gen_1 == same_gen[i])
# re2[i] <- which(gen_2 == same_gen[i])
# }
#
# scRNA_same <- scRNA[re1, ]
# ST_same <- ST[re2,]
#Data normalization
tmp = median(colSums(scRNA))/colSums(scRNA)
RNA_norm = floor(sweep(scRNA,2,tmp,'*'))
tmp = median(colSums(ST))/colSums(ST)
ST_norm = floor(sweep(ST,2,tmp,'*'))
#find the 1000 most variable genes
sd_rna = apply(RNA_norm, 1, sd)
sd_st = apply(ST_norm, 1, sd)
# RNA_1000 = RNA_norm[order(sd_rna, decreasing = T),] %>% head(1000)
# ST_1000 = ST_norm[order(sd_st, decreasing = T),] %>% head(1000)
RNA_500 = RNA_norm[order(sd_rna, decreasing = T),] %>% head(500)
ST_500 = ST_norm[order(sd_st, decreasing = T),] %>% head(500)
# write.csv(RNA_500, file = "scRNA_processed.csv")
# write.csv(ST_500, file = "ST_processed.csv")
#----heat map of both data----
# rna_1000 <- scRNA[order(sd_rna, decreasing = T),] %>% head(1000)
# st_1000 <- ST[order(sd_st, decreasing = T),] %>% head(1000)
rna_500 <- scRNA[order(sd_rna, decreasing = T),] %>% head(500)
st_500 <- ST[order(sd_st, decreasing = T),] %>% head(500)
vals <- unique(scales::rescale(c(volcano)))
o <- order(vals, decreasing = FALSE)
cols <- scales::col_numeric("Blues", domain = NULL)(vals)
colz <- setNames(data.frame(vals[o], cols[o]), NULL)
plot_ly(z = as.matrix(rna_500), zmax= 100 ,zmin=0, colorscale = colz, type = "heatmap")%>%
layout(title = "Heatmap of scRNA-seq data",
xaxis = list(title = "Cells"),
yaxis = list(title = "Gene"))
cols2 <- scales::col_numeric("Reds", domain = NULL)(vals)
colz2 <- setNames(data.frame(vals[o], cols2[o]), NULL)
plot_ly(z = as.matrix(st_500), zmax= 100 ,zmin=0, colorscale = colz2, type = "heatmap")%>%
layout(title = "Heatmap of spots data",
xaxis = list(title = "spots"),
yaxis = list(title = "Gene"))
#----transform spots coordinates to numeric ones----
#get the coordinates of each spot.
sp_1 <- colnames(ST_500)
sp_2 <- str_split(sp_1, "x")
index_get <- function(x){
x <- as.numeric(x)
x <- as.vector(x)
}
ind <- as.data.frame(t(sapply(sp_2, index_get)))
names(ind) <- c("row_ind", "col_ind")
#ind <- arrange(ind, col_ind, row_ind)
ind$col_ind <- ind$col_ind - 1
ind$row_ind <- ind$row_ind - 1
#number of rows and colums
L <- length(table(ind$row_ind))
W <- length(table(ind$col_ind))
#----null/NA process----
#NULL:31*33-996 = 27
ST_2 <- as.data.frame(t(ST_500))
ST_2$row_ind <- ind$row_ind
ST_2$col_ind <- ind$col_ind
ST_2$coor_ind <- colnames(ST_500)
ST_2 <- arrange(ST_2, col_ind, row_ind)
ind <- arrange(ind, col_ind, row_ind)
ggplot(ind, aes(col_ind, row_ind)) + geom_point(alpha = 0.6) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
panel.grid.major =element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank())
#from plot: 16 + 11, NULL.
#rearrange ST data by the order of coordinates of spots
ST_500 <- t(ST_2[,-c(501,502,503)])
#find NA coordinates:
find_in <- function(x, coord_ind){
tmp <- coord_ind == x
if(sum(tmp) == 2)
flag <- 1
else
flag <- 0
return(flag)
}
NULL_find <- function(){
Null <- NULL
s = 0
for(i in 1:L){
for(j in 1:W){
tmp <- apply(ind, 1, find_in, coord_ind = c(i, j))
temp <- sum(tmp)
if(temp == 0){
Null <- rbind(Null, c(i,j))
}
}
}
return(Null)
}
#finding neighbors
neigh_ind <- function(ell, w){
tmp <- matrix(c(ell, w-1, ell-1, w,
ell+1, w, ell, w+1), 4, 2, byrow=TRUE)
a <- rowSums((tmp <= 0)|(tmp > L)|(tmp > W))
ind_r <- tmp[a==0,]
ind_x <- NULL
for(i in 1:nrow(ind_r)){
ind_x <- c(ind_x, ind_r[i,1]+(ind_r[i,2]-1)*L)
}#findex in x is rom smaller to bigger
ret_list <- list(ind_r, ind_x)
names(ret_list) <- c("ind_r", "ind_x")
return(ret_list)
}
#null coordinate index
null_na <- as.data.frame(NULL_find())
null_na <- arrange(null_na, null_na[,2], null_na[,1])
null_na
write.csv(null_na, file = 'null_index.csv')
#plot a more clear figure
data_plus <- null_na[17:27,]
names(data_plus) <- c("row_ind", "col_ind")
tmp<- rbind(ind, data_plus)
tmp$I <- c(rep(2,996),rep(13,11))
ggplot(tmp, aes(col_ind, row_ind, color = letters[I])) + geom_point(alpha = 0.8) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
panel.grid.major =element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank()) +
geom_hline(yintercept = c(4.5,0.5), linetype="dotted") +
geom_vline(xintercept = c(0.5,4.5), linetype="dotted") +
scale_fill_discrete(guide=FALSE)
#get neighbor's index of null data(11 x 1)
nei_null <- list()
for(i in 17:27){
nei_null[[i-16]] <- neigh_ind(null_na[i,1], null_na[i,2])[[1]]
}
#11个缺失值用周围的spots数值平均值填充。
find_nei_ind <- function(a){
nr <- nrow(a)
ind_ori <- c()
for(i in 1:nr){
tmp <- apply(ind, 1, find_in, coord_ind = a[i,])
ind_ori[i] <- which(tmp == 1)
}
return(ind_ori)
}
null_nei_ori <- lapply(nei_null, find_nei_ind)
# continuous
RNA_c <- log(2*(RNA_500 +1))
ST_c <- log(2*(ST_500 +1))
null_com <- matrix(NA, 500, 11)
for(i in 1:length(null_nei_ori)){
s = 0
for(j in null_nei_ori[[i]]){
s = s + ST_c[,j]
}
null_com[,i] <- s/length(null_nei_ori[[i]])
}
tmp <- ST_c
j = 1
for(i in 1:length(null_nei_ori)){
tmp <- insertCol(tmp, null_nei_ori[[i]][2] + j , null_com[,i])
j = j+1
}
#first 16 null/na values assumed to be 0 in all genes,
# and initilized as one cluster different from others
c1 <- log(2 * matrix(1, 4, 500))
tmp <- matrix(as.numeric(tmp), dim(tmp)[1], dim(tmp)[2])
for(i in 1:4){
tmp <- insertCol(tmp, L*(i-1) + 1, c1)
}
dim(tmp)
ST_complete <- tmp
rownames(ST_complete) <- rownames(ST_c)
write.csv(ST_complete, file = 'ST_complete.csv')
write.csv(RNA_c, file = 'RNA_c.csv')
| /Code/data_2.R | permissive | jingeyu/single-cell-RNA-cluster | R | false | false | 6,471 | r | library(stringr)
library(miscTools)
#-----data preprocessing----
scRNA <- read.table('GSM3405531_PDAC-B-indrop1.tsv', header=TRUE, sep="\t")
ST <- read.table('GSM3405534_PDAC-B-ST1.tsv', header = TRUE, sep = '\t', quote = "")
rownames(scRNA) <- scRNA[,1]
scRNA <- scRNA[,-1]
rownames(ST) <- ST[,1]
ST <- ST[,-1]
ST <- as.data.frame(t(ST))
# #find the same genes of two data sets(index as well)
# gen_1 <- rownames(scRNA)
# gen_2 <- rownames(ST)
# same_gen <- intersect(gen_1,gen_2)
# G = length(same_gen)
# re1 <- c()
# re2 <- c()
# for (i in 1:G) {
# re1[i] <- which(gen_1 == same_gen[i])
# re2[i] <- which(gen_2 == same_gen[i])
# }
#
# scRNA_same <- scRNA[re1, ]
# ST_same <- ST[re2,]
#Data normalization
tmp = median(colSums(scRNA))/colSums(scRNA)
RNA_norm = floor(sweep(scRNA,2,tmp,'*'))
tmp = median(colSums(ST))/colSums(ST)
ST_norm = floor(sweep(ST,2,tmp,'*'))
#find the 1000 most variable genes
sd_rna = apply(RNA_norm, 1, sd)
sd_st = apply(ST_norm, 1, sd)
# RNA_1000 = RNA_norm[order(sd_rna, decreasing = T),] %>% head(1000)
# ST_1000 = ST_norm[order(sd_st, decreasing = T),] %>% head(1000)
RNA_500 = RNA_norm[order(sd_rna, decreasing = T),] %>% head(500)
ST_500 = ST_norm[order(sd_st, decreasing = T),] %>% head(500)
# write.csv(RNA_500, file = "scRNA_processed.csv")
# write.csv(ST_500, file = "ST_processed.csv")
#----heat map of both data----
# rna_1000 <- scRNA[order(sd_rna, decreasing = T),] %>% head(1000)
# st_1000 <- ST[order(sd_st, decreasing = T),] %>% head(1000)
rna_500 <- scRNA[order(sd_rna, decreasing = T),] %>% head(500)
st_500 <- ST[order(sd_st, decreasing = T),] %>% head(500)
vals <- unique(scales::rescale(c(volcano)))
o <- order(vals, decreasing = FALSE)
cols <- scales::col_numeric("Blues", domain = NULL)(vals)
colz <- setNames(data.frame(vals[o], cols[o]), NULL)
plot_ly(z = as.matrix(rna_500), zmax= 100 ,zmin=0, colorscale = colz, type = "heatmap")%>%
layout(title = "Heatmap of scRNA-seq data",
xaxis = list(title = "Cells"),
yaxis = list(title = "Gene"))
cols2 <- scales::col_numeric("Reds", domain = NULL)(vals)
colz2 <- setNames(data.frame(vals[o], cols2[o]), NULL)
plot_ly(z = as.matrix(st_500), zmax= 100 ,zmin=0, colorscale = colz2, type = "heatmap")%>%
layout(title = "Heatmap of spots data",
xaxis = list(title = "spots"),
yaxis = list(title = "Gene"))
#----transform spots coordinates to numeric ones----
#get the coordinates of each spot.
sp_1 <- colnames(ST_500)
sp_2 <- str_split(sp_1, "x")
index_get <- function(x){
x <- as.numeric(x)
x <- as.vector(x)
}
ind <- as.data.frame(t(sapply(sp_2, index_get)))
names(ind) <- c("row_ind", "col_ind")
#ind <- arrange(ind, col_ind, row_ind)
ind$col_ind <- ind$col_ind - 1
ind$row_ind <- ind$row_ind - 1
#number of rows and colums
L <- length(table(ind$row_ind))
W <- length(table(ind$col_ind))
#----null/NA process----
#NULL:31*33-996 = 27
ST_2 <- as.data.frame(t(ST_500))
ST_2$row_ind <- ind$row_ind
ST_2$col_ind <- ind$col_ind
ST_2$coor_ind <- colnames(ST_500)
ST_2 <- arrange(ST_2, col_ind, row_ind)
ind <- arrange(ind, col_ind, row_ind)
ggplot(ind, aes(col_ind, row_ind)) + geom_point(alpha = 0.6) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
panel.grid.major =element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank())
#from plot: 16 + 11, NULL.
#rearrange ST data by the order of coordinates of spots
ST_500 <- t(ST_2[,-c(501,502,503)])
#find NA coordinates:
find_in <- function(x, coord_ind){
tmp <- coord_ind == x
if(sum(tmp) == 2)
flag <- 1
else
flag <- 0
return(flag)
}
NULL_find <- function(){
Null <- NULL
s = 0
for(i in 1:L){
for(j in 1:W){
tmp <- apply(ind, 1, find_in, coord_ind = c(i, j))
temp <- sum(tmp)
if(temp == 0){
Null <- rbind(Null, c(i,j))
}
}
}
return(Null)
}
#finding neighbors
neigh_ind <- function(ell, w){
tmp <- matrix(c(ell, w-1, ell-1, w,
ell+1, w, ell, w+1), 4, 2, byrow=TRUE)
a <- rowSums((tmp <= 0)|(tmp > L)|(tmp > W))
ind_r <- tmp[a==0,]
ind_x <- NULL
for(i in 1:nrow(ind_r)){
ind_x <- c(ind_x, ind_r[i,1]+(ind_r[i,2]-1)*L)
}#findex in x is rom smaller to bigger
ret_list <- list(ind_r, ind_x)
names(ret_list) <- c("ind_r", "ind_x")
return(ret_list)
}
#null coordinate index
null_na <- as.data.frame(NULL_find())
null_na <- arrange(null_na, null_na[,2], null_na[,1])
null_na
write.csv(null_na, file = 'null_index.csv')
#plot a more clear figure
data_plus <- null_na[17:27,]
names(data_plus) <- c("row_ind", "col_ind")
tmp<- rbind(ind, data_plus)
tmp$I <- c(rep(2,996),rep(13,11))
ggplot(tmp, aes(col_ind, row_ind, color = letters[I])) + geom_point(alpha = 0.8) +
theme(axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
panel.grid.major =element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank()) +
geom_hline(yintercept = c(4.5,0.5), linetype="dotted") +
geom_vline(xintercept = c(0.5,4.5), linetype="dotted") +
scale_fill_discrete(guide=FALSE)
#get neighbor's index of null data(11 x 1)
nei_null <- list()
for(i in 17:27){
nei_null[[i-16]] <- neigh_ind(null_na[i,1], null_na[i,2])[[1]]
}
#11个缺失值用周围的spots数值平均值填充。
find_nei_ind <- function(a){
nr <- nrow(a)
ind_ori <- c()
for(i in 1:nr){
tmp <- apply(ind, 1, find_in, coord_ind = a[i,])
ind_ori[i] <- which(tmp == 1)
}
return(ind_ori)
}
null_nei_ori <- lapply(nei_null, find_nei_ind)
# continuous
RNA_c <- log(2*(RNA_500 +1))
ST_c <- log(2*(ST_500 +1))
null_com <- matrix(NA, 500, 11)
for(i in 1:length(null_nei_ori)){
s = 0
for(j in null_nei_ori[[i]]){
s = s + ST_c[,j]
}
null_com[,i] <- s/length(null_nei_ori[[i]])
}
tmp <- ST_c
j = 1
for(i in 1:length(null_nei_ori)){
tmp <- insertCol(tmp, null_nei_ori[[i]][2] + j , null_com[,i])
j = j+1
}
#first 16 null/na values assumed to be 0 in all genes,
# and initilized as one cluster different from others
c1 <- log(2 * matrix(1, 4, 500))
tmp <- matrix(as.numeric(tmp), dim(tmp)[1], dim(tmp)[2])
for(i in 1:4){
tmp <- insertCol(tmp, L*(i-1) + 1, c1)
}
dim(tmp)
ST_complete <- tmp
rownames(ST_complete) <- rownames(ST_c)
write.csv(ST_complete, file = 'ST_complete.csv')
write.csv(RNA_c, file = 'RNA_c.csv')
|
plot.effects.commOccu <- function(object, # commOccu object
mcmc.list, # mcmc.list (output of fit())
submodel = "state", # "det" or "state"
draws = 1000, # number of posterior samples to use (will draw random sample from posterior distribution if defined).
outdir, # directory to save plots in (optional)
level = 0.95, # confidence level for CIs in plot
keyword_squared = "_squared", # the suffix of a covariate that indicates a quadratic effect (e.g. "elevation" and "elevation_squared" -> will be combined in plot)
...) # additional arguments for ggsave()
{
submodel <- match.arg(submodel, choices = c("det", "state"))
if(submodel == "state") {
keyword_submodel <- "^beta"
keyword_submodel_short <- "beta"
}
if(submodel == "det") {
keyword_submodel <- "^alpha"
keyword_submodel_short <- "alpha"
}
# get covariate information for submodel
cov_info_subset <- object@covariate_info[object@covariate_info$submodel == submodel & object@covariate_info$param == "param",]
if(nrow(cov_info_subset) == 0) stop(paste("No covariates in submodel", submodel), call. = F)
# get intercept information for submodel
cov_info_intercept <- object@covariate_info[object@covariate_info$submodel == submodel & object@covariate_info$param == "intercept",]
# subset parameters of submodel
stopifnot(all(cov_info_subset$coef %in% object@params))
params_submodel <- object@params[grep(keyword_submodel, object@params)]
# subset posterior matrix to number of draws
posterior_matrix <- as.matrix(mcmc.list)
if(hasArg(draws)) {
if(nrow(posterior_matrix) > draws) posterior_matrix <- posterior_matrix[sample(1:nrow(posterior_matrix), draws),]
}
# subset posterior matrix to current submodel
posterior_matrix <- posterior_matrix[, grep(keyword_submodel, colnames(posterior_matrix))]
params_covariate <- cov_info_subset$covariate
if(length(params_covariate) == 0) stop ("No covariates found", call. = F)
list_responses <- list()
# loop over covariates
for(cov in 1:nrow(cov_info_subset)) {
current_cov <- cov_info_subset$covariate[cov]
current_coef <- cov_info_subset$coef[cov]
is_squared <- cov_info_subset$is_quadratic[cov]
if(is_squared) {
attr(params_covariate, "include") [cov] <- FALSE
if(gsub(keyword_squared, "", current_cov) %in% params_covariate) next
}
attr(params_covariate, "include") [cov] <- TRUE
if(!is.na(cov_info_subset$ranef_cov[cov])){
warning(paste(current_cov,
" has a random effect other than species. This is currently not supported. Skipping", call. = F))
next
}
if(cov_info_subset$ranef_nested[cov]) {
warning(paste(current_cov,
" has a nested random effect. This is currently not supported. Skipping", call. = F))
next
}
# check if there is a squared version of the current covariate
has_squared <- cov_info_subset$has_quadratic[cov]
if(paste0(current_cov, keyword_squared) %in% params_covariate){
#has_squared <- TRUE
squared_cov <- paste0(current_cov, keyword_squared)
} #else {
#has_squared <- FALSE
#}
# determine data type of current covariate
covariate_is_numeric <- cov_info_subset$data_type [cov] == "cont"
covariate_is_factor <- cov_info_subset$data_type [cov] == "categ"
# covariate_is_fixed <- !cov_info_subset$ranef[cov]
# covariate_is_ranef <- cov_info_subset$ranef[cov]
effect_type <- ifelse(cov_info_subset$ranef[cov], "ranef",
ifelse(cov_info_subset$independent[cov], "independent", "fixed"))
covariate_is_site_cov <- ifelse(cov_info_subset$covariate_type [cov] == "siteCovs", T, F)
# create values to predict to
if(covariate_is_factor) {
if(covariate_is_site_cov){
values_to_predict <- seq(1,
length(levels(object@data[[current_cov]])))
} else {
values_to_predict <- attr(object@data[[paste0(current_cov, "_integer")]], "levels")
}
}
if(covariate_is_numeric) {
values_to_predict <- seq(min(object@data[[current_cov]]),
max(object@data[[current_cov]]),
length.out = 100)
}
# empty matrix for predicted values
out <- array(data = NA, dim = c(length(values_to_predict), # number of values to predict
object@data$M, # number of species
nrow(posterior_matrix))) # number of posterior draws
# likewise for intercept
out_intercept <- out
if(has_squared){
values_to_predict_sq <- values_to_predict ^ 2
out_sq <- array(data = NA, dim = c(length(values_to_predict_sq),
object@data$M,
nrow(posterior_matrix)))
}
# species loop
for(i in 1:dim(out)[2]){
if(cov_info_intercept$ranef == TRUE | cov_info_intercept$independent == TRUE){ # random or independent intercepts
out_intercept[,i,] <- posterior_matrix[, colnames(posterior_matrix) %in% paste0(keyword_submodel_short, "0", "[", i, "]")]
} else {
out_intercept[,i,] <- posterior_matrix[, grepl(paste0(keyword_submodel_short, "0$"), colnames(posterior_matrix))]
}
# # get intercepts
# if(!paste0(keyword_submodel_short, "0.mean") %in% object@params) {
# # fixed intercept
# out_intercept[,i,] <- posterior_matrix[, grepl(paste0(keyword_submodel_short, "0$"), colnames(posterior_matrix))]
# } else {
#
# # random intercept
# out_intercept[,i,] <- posterior_matrix[, colnames(posterior_matrix) %in% paste0(keyword_submodel_short, "0", "[", i, "]")]
# }
if(covariate_is_numeric) {
if(effect_type == "fixed") {
index_covariate <- grep(paste0(current_coef, "$"), colnames(posterior_matrix))
} else { # ranef or independent
index_covariate <- grep(paste0(current_coef, "[", i, "]"), colnames(posterior_matrix), fixed = T)
}
out[,i,] <- sapply(posterior_matrix[, index_covariate], FUN = function(x){
x * values_to_predict
})
if(has_squared){
out_sq[,i,] <- sapply(posterior_matrix[, grep(paste0(squared_cov, "[", i, "]"), colnames(posterior_matrix), fixed = TRUE)], FUN = function(x){
x * values_to_predict_sq
})
}
}
if(covariate_is_factor) {
if(effect_type == "fixed") index_covariate <- grep(current_coef, colnames(posterior_matrix))
if(effect_type == "ranef") index_covariate <- grep(paste0(current_coef, "[", i, ","), colnames(posterior_matrix), fixed = T)
for(j in 1:length(index_covariate)){
out[j,i,] <- posterior_matrix[, index_covariate [j]]
}
}
suppressWarnings(rm(index_covariate))
} # end species loop
# intercept + linear covariate effect
if(!has_squared){
out_comb <- out_intercept + out
}
# intercept + linear + quadratic covariate effect
if(has_squared){
out_comb <- out_intercept + out + out_sq
}
prob <- exp(out_comb) / (exp(out_comb) + 1) # prediction for each species / habitat value (from mean estimates)
# summarize estimates (across posterior samples)
prob.mean <- apply(prob, MARGIN = c(1,2), mean)
prob.lower <- apply(prob, MARGIN = c(1,2), quantile, (1-level) / 2)
prob.upper <- apply(prob, MARGIN = c(1,2), quantile, (1 - (1-level) / 2))
# make data frame for ggplot
prob.mean2 <- reshape2::melt(prob.mean)
prob.lower2 <- reshape2::melt(prob.lower)
prob.upper2 <- reshape2::melt(prob.upper)
names(prob.mean2) <- c("Index", "Species", "mean")
names(prob.lower2) <- c("Index", "Species", "lower")
names(prob.upper2) <- c("Index", "Species", "upper")
probs <- cbind(prob.mean2, lower = prob.lower2$lower, upper = prob.upper2$upper)
probs <- cbind(cov = values_to_predict,
probs)
colnames(probs) [1] <- current_cov
# assign species names (if available)
if(!is.null(dimnames(object@data$y)[[1]])) {
probs$Species <- dimnames(object@data$y)[[1]][probs$Species]
}
probs <- probs[order(probs$Species, probs[, 1]),]
if(submodel == "det") ylabel <- "Detection probability p"
if(submodel == "state") ylabel <- expression(paste("Occupancy probability ", psi))
main <- paste0(ifelse(covariate_is_site_cov, "Site", "Observation"), " covariate: ", current_cov)
subtitle <- paste0(ifelse(effect_type == "ranef", "Random effect", ifelse(effect_type == "independent", "Independent effects", "Fixed effect")),
ifelse(has_squared, " (with quadratic term)", ""),
ifelse(is_squared, " quadratic term (no linear term)", ""))
# make cran checks happy
lower <- NULL
upper <- NULL
# for squared covariates which have no unsquared version, sqrt-transform covariate and add expand covariate range to negative values (by mirr)
if(is_squared & !has_squared) {
probs[,1] <- sqrt(probs[,1])
probs2 <- probs
probs2[,1] <- -probs2[,1]
probs <- rbind(probs, probs2)
}
# plot
combine <- FALSE
if(covariate_is_numeric){
p <- ggplot(probs, aes_string(x = params_covariate[[cov]], y = "mean", group = "Species")) +
geom_line() +
theme_bw() +
ggtitle(label = main,
subtitle = subtitle) +
xlab (ifelse(is_squared, gsub(keyword_squared, "", current_cov), current_cov)) +
ylab(ylabel) +
xlim(range(probs[, 1])) +
ylim(0, 1) +
theme(panel.grid.minor = element_blank())
# note for later, can optionally plot all species in one plot if combine = TRUE (= ggplot code to this point)
if(!combine){
p <- p + facet_wrap(~Species) +
geom_ribbon(aes_string(ymin = "lower", ymax = "upper"), alpha = 0.2)
}
}
if(covariate_is_factor){
# create x axis labels for factors
if(covariate_is_site_cov){
probs[,1] <- levels(object@data[[current_cov]]) [probs[,1]]
}
p <- ggplot(probs, aes_string(x = params_covariate[[cov]], y = "mean", group = "Species")) +
geom_col() +
facet_wrap(~Species) +
geom_linerange(aes_string(ymin = "lower", ymax = "upper")) +
theme_bw() +
ggtitle(label = main,
subtitle = subtitle) +
xlab (current_cov) +
ylab(ylabel) +
ylim(0, 1) +
theme(panel.grid.minor = element_blank())
# don't know yet how to combine species on one plot.
}
if(hasArg(outdir)) {
ggsave(filename = file.path(outdir, paste0("response_curves_", current_cov, "_", Sys.Date(), ".png")),
plot = p,
...)
}
list_responses [[cov]] <- p
}
names(list_responses) <- params_covariate[attr(params_covariate, "include")]
return(list_responses)
}
setGeneric("plot_effects", function(object, ...) standardGeneric("plot_effects"))
#' Plot Marginal Effects of Covariates
#'
#' Plot marginal effect plots (= response curves if covariates are continuous) for all species in a community (multi-species) occupancy model. Takes into account species-specific intercepts (if any). Currently only supports continuous covariates, not categorical covariates.
#'
#' @aliases plot_effects
#' @param object \code{commOccu} object
#' @param mcmc.list mcmc.list. Output of \code{\link{fit}} called on a \code{commOccu} object
#' @param submodel Submodel to get plots for. Can be "det" or "state"
#' @param draws Number of draws from the posterior to use when generating the plots. If fewer than draws are available, they are all used
#' @param outdir Directory to save plots to (optional)
#' @param level Probability mass to include in the uncertainty interval
#' @param keyword_squared character. A suffix in covariate names in the model that indicates a covariate is a quadratic effect of another covariate which does not carry the suffix in its name.
#' @param ... additional arguments for \code{\link[ggplot2]{ggsave}}
#'
#'
#' @return list of ggplot objects
#' @export
#' @importFrom ggplot2 geom_vline geom_linerange geom_pointrange element_blank theme labs
#' @importFrom ggplot2 scale_color_manual scale_y_discrete aes_string vars facet_grid facet_wrap ylim geom_col
# @import coda
#'
setMethod("plot_effects", signature(object = "commOccu"),
plot.effects.commOccu)
plot.coef.commOccu <- function(object,
mcmc.list,
submodel = "state",
ordered = TRUE,
combine = FALSE,
outdir,
level = c(outer = 0.95, inner = 0.75),
colorby = "significance",
...) {
submodel <- match.arg(submodel, choices = c("det", "state"))
colorby <- match.arg(colorby, choices = c("significance", "Bayesian p-value"))
if(submodel == "state") {
keyword_submodel <- "^beta"
keyword_submodel_short <- "beta"
}
if(submodel == "det") {
keyword_submodel <- "^alpha"
keyword_submodel_short <- "alpha"
}
stopifnot(is.logical(ordered))
stopifnot(is.logical(combine))
if(combine & ordered) {
message("'combine' and 'ordered' can't both be TRUE. Setting 'ordered = FALSE'")
ordered <- FALSE
}
# get covariate information for submodel
cov_info_subset <- object@covariate_info[object@covariate_info$submodel == submodel & object@covariate_info$param == "param",]
list_responses <- list()
# posterior summaries
stopifnot(length(level) == 2)
posteriorSummary <- summary(mcmc.list, quantiles = c((1-level[1]) / 2, # lower outer
(1-level[2]) / 2, # lower inner
0.5, # median
1-((1-level[2]) / 2), # upper inner
1-((1-level[1]) / 2))) # upper outer
df_quantiles <- data.frame(posteriorSummary$quantiles)
#
# all estimates model parameters
params_all <- rownames(df_quantiles)
# container for output plots
p_list <- list()
df_quantiles_list <- list()
# get Bayesian p-values
df_statistics <- posteriorSummary$statistics
df_statistics_Bayes_pvals_overall <- as.data.frame(df_statistics[grep("Bpvalue$", rownames(df_statistics)), , drop = F])
df_statistics_Bayes_pvals_species <- df_statistics[grep("Bpvalue_species", rownames(df_statistics)), ]
# loop over covariates
for(cov in 1:nrow(cov_info_subset)) {
current_cov <- cov_info_subset$covariate[cov]
current_coef <- cov_info_subset$coef[cov]
#if(covariate %in% skip) next
if(!is.na(cov_info_subset$ranef_cov[cov])){
warning(paste(current_cov,
" has a random effect other than species. This is currently not supported. Skipping", call. = F))
next
}
if(cov_info_subset$ranef_nested[cov]) {
warning(paste(current_cov,
" has a nested random effect. This is currently not supported. Skipping", call. = F))
next
}
# determine data type of current covariate
covariate_is_numeric <- cov_info_subset$data_type [cov] == "cont"
covariate_is_factor <- cov_info_subset$data_type [cov] == "categ"
# covariate_is_fixed <- !cov_info_subset$ranef[cov]
# covariate_is_indep <- cov_info_subset$independent[cov]
# covariate_is_ranef <- cov_info_subset$ranef[cov]
effect_type <- ifelse(cov_info_subset$ranef[cov], "ranef",
ifelse(cov_info_subset$independent[cov], "independent", "fixed"))
covariate_is_site_cov <- ifelse(cov_info_subset$covariate_type [cov] == "siteCovs", T, F)
#if(covariate_is_indep) {
# if(covariate_is_numeric){
# index_covariate <- grep(paste0(current_coef, "[" ), rownames(df_quantiles), fixed = T)
#df_quantiles_i <- df_quantiles[index_covariate, ]
#if(covariate_is_fixed) df_quantiles_i$type <- c("mean")
# if(covariate_is_ranef) {
# # get community mean
# index_covariate_mean_ranef <- grep(paste0(current_coef, ".mean$"), rownames(df_quantiles))
#
# df_quantiles_i <- rbind(df_quantiles[index_covariate_mean_ranef, ], df_quantiles_i)
#
# df_quantiles_i$type <- c("mean", rep("species", times = length(index_covariate)))
# # }
# if(covariate_is_indep) {
# # get community mean
# index_covariate_mean_indep <- grep(paste0(current_coef, ".mean$"), rownames(df_quantiles))
#
# df_quantiles_i <- rbind(df_quantiles[index_covariate_mean_indep, ], df_quantiles_i)
# df_quantiles_i$type <- rep("species", times = length(index_covariate))
# }
# }
# } else {
if(covariate_is_numeric){
# if(covariate_is_fixed) index_covariate <- grep(paste0(current_coef, "$"), rownames(df_quantiles))
# if(covariate_is_ranef) index_covariate <- grep(paste0(current_coef, "[" ), rownames(df_quantiles), fixed = T)
if(effect_type == "fixed") index_covariate <- grep(paste0(current_coef, "$"), rownames(df_quantiles))
if(effect_type == "ranef") index_covariate <- grep(paste0(current_coef, "[" ), rownames(df_quantiles), fixed = T)
if(effect_type == "independent") index_covariate <- grep(paste0(current_coef, "[" ), rownames(df_quantiles), fixed = T)
df_quantiles_i <- df_quantiles[index_covariate, ]
if(effect_type == "fixed") df_quantiles_i$type <- c("mean")
if(effect_type == "ranef") {
# get community mean
index_covariate_mean_ranef <- grep(paste0(current_coef, ".mean$"), rownames(df_quantiles))
df_quantiles_i <- rbind(df_quantiles[index_covariate_mean_ranef, ], df_quantiles_i)
df_quantiles_i$type <- c("mean", rep("species", times = length(index_covariate)))
}
if(effect_type == "independent"){
df_quantiles_i$type <- c("species")
}
}
# }
if(covariate_is_factor){
if(covariate_is_site_cov){
levels_tmp <- levels(object@input$siteCovs[, current_cov])
nlev <- length(levels_tmp)
}
if(!covariate_is_site_cov){
levels_tmp <- attr(object@data[[paste0(current_cov, "_integer")]], "levels")
nlev <- length(levels_tmp)
}
if(effect_type == "fixed") index_covariate <- grep(paste0(current_coef, "["), rownames(df_quantiles), fixed = T)
if(effect_type == "ranef") index_covariate <- grep(paste0(current_coef, "[" ), rownames(df_quantiles), fixed = T)
df_quantiles_i <- df_quantiles[index_covariate, ]
if(effect_type == "fixed") df_quantiles_i$type <- c("mean")
if(effect_type == "ranef") {
# add community mean
index_covariate_mean_ranef <- grep(paste0(current_coef, ".mean"), rownames(df_quantiles), fixed = T) # does this affect categ fixed effects?
df_quantiles_i <- rbind(df_quantiles[index_covariate_mean_ranef, ], df_quantiles_i)
df_quantiles_i$type <- c(rep("mean", times = length(index_covariate_mean_ranef)), rep("species", times = length(index_covariate)))
}
}
colnames(df_quantiles_i)[1:5] <- c("lower_outer", "lower_inner", "median", "upper_inner", "upper_outer")
# get significance levels
significance <- rep("no", times = nrow(df_quantiles_i))
significance[which(df_quantiles_i$lower_inner < 0 & df_quantiles_i$upper_inner < 0 |
df_quantiles_i$lower_inner > 0 & df_quantiles_i$upper_inner > 0)] <- "inner"
significance[which(df_quantiles_i$lower_outer < 0 & df_quantiles_i$upper_outer < 0 |
df_quantiles_i$lower_outer > 0 & df_quantiles_i$upper_outer > 0)] <- "outer"
df_quantiles_i$significance <- significance
# add Bayesian p-values
# if colorby == "Bayesian p-value":
# slight inconsistency: values in "level" will still be used for error bar width
# but will affect colors only via Bayesian p-values of the species
# not via the actual parameter estimates.
# So a parameter estimate can be highly significant but grey bc bayesian p-value of the the species is ok
# it's mostly for model checking, so I guess it's fine
if(colorby == "Bayesian p-value"){
if(covariate_is_numeric){
if(effect_type == "fixed") df_pval <- df_statistics_Bayes_pvals_overall
if(effect_type == "ranef") df_pval <- rbind(df_statistics_Bayes_pvals_overall,
df_statistics_Bayes_pvals_species)
}
if(covariate_is_factor){
if(effect_type == "fixed") df_pval <- df_statistics_Bayes_pvals_overall [rep(1, times = nlev),]
if(effect_type == "ranef") df_pval <- rbind(df_statistics_Bayes_pvals_overall [rep(1, times = nlev),],
df_statistics_Bayes_pvals_species [rep(1:nrow(df_statistics_Bayes_pvals_species),
times = nlev),])
}
stopifnot(nrow(df_pval) == nrow(df_quantiles_i))
significance2 <- rep("no", times = nrow(df_pval))
significance2[which(df_pval$Mean < (1-level[2]) / 2 | df_pval$Mean > (1 - (1-level[2]) / 2))] <- "inner"
significance2[which(df_pval$Mean < (1-level[1]) / 2 | df_pval$Mean > (1 - (1-level[1]) / 2))] <- "outer"
df_quantiles_i$significance2 <- significance2
}
# assign species names
if(!is.null(dimnames(object@data$y)[[1]])) speciesnames <- dimnames(object@data$y)[[1]]
if( is.null(dimnames(object@data$y)[[1]])) speciesnames <- seq_len(dim(object@data$y)[1])
if(effect_type == "ranef") {
if(covariate_is_numeric) df_quantiles_i$species <- c("community", speciesnames)
if(covariate_is_factor) {
df_quantiles_i$species <- c(rep("community", times = length(index_covariate_mean_ranef)), rep(speciesnames, times = nlev))
}
}
if(effect_type == "fixed") df_quantiles_i$species <- "community"
if(effect_type == "independent") df_quantiles_i$species <- speciesnames
# add covariate name as column
if(covariate_is_numeric){
df_quantiles_i$covariate <- current_cov
}
if(covariate_is_factor){
if(effect_type == "fixed"){
if(covariate_is_site_cov) df_quantiles_i$covariate <- paste0(current_cov, "_", levels_tmp)
if(!covariate_is_site_cov) df_quantiles_i$covariate <- paste0(current_cov, "_", levels_tmp)
}
if(effect_type == "ranef"){
if(covariate_is_site_cov) df_quantiles_i$covariate <- paste0(current_cov, "_", c(levels_tmp,
rep(levels_tmp, each = object@data$M)))
if(!covariate_is_site_cov) df_quantiles_i$covariate <- paste0(current_cov, "_", c(levels_tmp,
rep(levels_tmp, each = object@data$M)))
}
}
# sort species (either by median effect size or by names)
if(ordered) {
# this currently does not sort categorical covariates with random effects correctly.
# Ideally if combine = FALSE they should be sorted in descending order (at least for the second factor level)
if(covariate_is_numeric){
df_quantiles_i$species <- factor(df_quantiles_i$species,
levels = unique(df_quantiles_i$species[order(df_quantiles_i$median)]))
}
if(covariate_is_factor){
if(effect_type == "fixed") {
df_quantiles_i$species <- factor(df_quantiles_i$species,
levels = unique(df_quantiles_i$species[order(df_quantiles_i$median)]))
}
if(effect_type == "ranef"){
subset_level2 <- df_quantiles_i[df_quantiles_i$covariate == paste0(current_cov, "_", levels_tmp[2]),]
df_quantiles_i$species <- factor(df_quantiles_i$species,
levels = subset_level2$species[order(subset_level2$median)])
}
}
} else {
df_quantiles_i$species <- factor(df_quantiles_i$species, levels = unique(rev(df_quantiles_i$species)))
}
df_quantiles_list[[cov]] <- df_quantiles_i
# plot
type <- NULL # just for CRAN checks
covariate <- NULL
if(colorby == "significance") color_by <- "significance"
if(colorby == "Bayesian p-value") color_by <- "significance2"
if(!combine){
p_list[[cov]] <- ggplot (df_quantiles_i, aes_string(y = "species", x = "median", color = color_by)) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_pointrange(aes_string(xmin = "lower_outer", xmax = "upper_outer")) +
geom_linerange( aes_string(xmin = "lower_inner", xmax = "upper_inner"), size = 1) +
facet_grid(rows = vars(type),
cols = vars(covariate),
scales = "free_y",
space = "free_y"
) +
xlab ("Effect size") + ylab(element_blank()) +
theme_bw() +
theme(panel.grid.minor = element_blank(),
panel.grid.major.y = element_blank(),
strip.background.y = element_blank(),
strip.text.y = element_blank()) +
scale_color_manual(breaks = c("outer", "inner", "no"),
values=c("firebrick", "black", "grey50"),
guide = "none") +
ggtitle(paste("Effect sizes:", current_cov))
if(!covariate_is_factor) {
p_list[[cov]] <- p_list[[cov]] + theme(strip.background.x = element_blank(),
strip.text.x = element_blank())
}
if(color_by == "significance2"){
p_list[[cov]] <- p_list[[cov]] + labs(subtitle = "colors indicate Bayesian p-values of species")
}
if(hasArg(outdir)) {
ggsave(filename = file.path(outdir, paste0("effect_sizes_", submodel, "_", covariate, "_", Sys.Date(), ".png")),
plot = p_list[[cov]],
...)
}
}
} # end covariate loop
if(combine){
df_quantiles_all <- do.call(rbind, df_quantiles_list)
df_quantiles_all$species <- factor(df_quantiles_all$species,
levels = rev(sort(unique(as.character(df_quantiles_all$species)))))
p <- ggplot (df_quantiles_all, aes_string(y = "species", x = "median", color = color_by)) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_pointrange(aes_string(xmin = "lower_outer", xmax = "upper_outer")) +
geom_linerange (aes_string(xmin = "lower_inner", xmax = "upper_inner"), size = 1) +
facet_grid(rows = vars(type),
cols = vars(covariate),
scales = "free_y",
space = "free_y") +
xlab ("Effect size") + ylab(element_blank()) +
theme_bw() +
theme(panel.grid.minor = element_blank(),
panel.grid.major.y = element_blank(),
strip.background.y = element_blank(),
strip.text.y = element_blank()) +
scale_color_manual(breaks = c("outer", "inner", "no"),
values=c("firebrick", "black", "grey50"),
guide = "none")
if(hasArg(outdir)) {
ggsave(filename = file.path(outdir, paste0("effect_sizes_", submodel, "_", paste(cov_info_subset$covariate, collapse = "_"), "_",
ifelse(!colorby == "significance", "Bayesian_pval_", ""), Sys.Date(), ".png")),
plot = p,
...)
}
return(p)
}
if(!combine){
names(p_list) <- cov_info_subset$covariate
return(p_list)
}
}
setGeneric("plot_coef", function(object, ...) standardGeneric("plot_coef"))
#' Plot effect sizes of covariates in community occupancy model
#'
#' Plot effect sizes for all species in a community (multi-species) occupancy model. Currently only supports continuous covariates, not categorical covariates.
#'
#' @aliases plot_coef
#' @param object \code{commOccu} object
#' @param mcmc.list mcmc.list. Output of \code{\link{fit}} called on a \code{commOccu} object
#' @param submodel Submodel to get plots for. Can be "det" or "state"
#' @param ordered logical. Order species in plot by median effect (TRUE) or by species name (FALSE)
#' @param combine logical. Combine multiple plots into one (via facets)?
#' @param outdir Directory to save plots to (optional)
#' @param level Probability mass to include in the uncertainty interval (two values, second value - inner interval - will be plotted thicker)
#' @param colorby Whether to color estimates by "significance" (of the effect estimates), or "Bayesian p-value" (of the species)
#' @param ... additional arguments for \code{\link[ggplot2]{ggsave}}
#'
#' @return list of ggplot objects
#' @export
#'
#'
setMethod("plot_coef", signature(object = "commOccu"),
plot.coef.commOccu)
| /R/plot.R | no_license | cran/camtrapR | R | false | false | 31,657 | r |
plot.effects.commOccu <- function(object, # commOccu object
mcmc.list, # mcmc.list (output of fit())
submodel = "state", # "det" or "state"
draws = 1000, # number of posterior samples to use (will draw random sample from posterior distribution if defined).
outdir, # directory to save plots in (optional)
level = 0.95, # confidence level for CIs in plot
keyword_squared = "_squared", # the suffix of a covariate that indicates a quadratic effect (e.g. "elevation" and "elevation_squared" -> will be combined in plot)
...) # additional arguments for ggsave()
{
submodel <- match.arg(submodel, choices = c("det", "state"))
if(submodel == "state") {
keyword_submodel <- "^beta"
keyword_submodel_short <- "beta"
}
if(submodel == "det") {
keyword_submodel <- "^alpha"
keyword_submodel_short <- "alpha"
}
# get covariate information for submodel
cov_info_subset <- object@covariate_info[object@covariate_info$submodel == submodel & object@covariate_info$param == "param",]
if(nrow(cov_info_subset) == 0) stop(paste("No covariates in submodel", submodel), call. = F)
# get intercept information for submodel
cov_info_intercept <- object@covariate_info[object@covariate_info$submodel == submodel & object@covariate_info$param == "intercept",]
# subset parameters of submodel
stopifnot(all(cov_info_subset$coef %in% object@params))
params_submodel <- object@params[grep(keyword_submodel, object@params)]
# subset posterior matrix to number of draws
posterior_matrix <- as.matrix(mcmc.list)
if(hasArg(draws)) {
if(nrow(posterior_matrix) > draws) posterior_matrix <- posterior_matrix[sample(1:nrow(posterior_matrix), draws),]
}
# subset posterior matrix to current submodel
posterior_matrix <- posterior_matrix[, grep(keyword_submodel, colnames(posterior_matrix))]
params_covariate <- cov_info_subset$covariate
if(length(params_covariate) == 0) stop ("No covariates found", call. = F)
list_responses <- list()
# loop over covariates
for(cov in 1:nrow(cov_info_subset)) {
current_cov <- cov_info_subset$covariate[cov]
current_coef <- cov_info_subset$coef[cov]
is_squared <- cov_info_subset$is_quadratic[cov]
if(is_squared) {
attr(params_covariate, "include") [cov] <- FALSE
if(gsub(keyword_squared, "", current_cov) %in% params_covariate) next
}
attr(params_covariate, "include") [cov] <- TRUE
if(!is.na(cov_info_subset$ranef_cov[cov])){
warning(paste(current_cov,
" has a random effect other than species. This is currently not supported. Skipping", call. = F))
next
}
if(cov_info_subset$ranef_nested[cov]) {
warning(paste(current_cov,
" has a nested random effect. This is currently not supported. Skipping", call. = F))
next
}
# check if there is a squared version of the current covariate
has_squared <- cov_info_subset$has_quadratic[cov]
if(paste0(current_cov, keyword_squared) %in% params_covariate){
#has_squared <- TRUE
squared_cov <- paste0(current_cov, keyword_squared)
} #else {
#has_squared <- FALSE
#}
# determine data type of current covariate
covariate_is_numeric <- cov_info_subset$data_type [cov] == "cont"
covariate_is_factor <- cov_info_subset$data_type [cov] == "categ"
# covariate_is_fixed <- !cov_info_subset$ranef[cov]
# covariate_is_ranef <- cov_info_subset$ranef[cov]
effect_type <- ifelse(cov_info_subset$ranef[cov], "ranef",
ifelse(cov_info_subset$independent[cov], "independent", "fixed"))
covariate_is_site_cov <- ifelse(cov_info_subset$covariate_type [cov] == "siteCovs", T, F)
# create values to predict to
if(covariate_is_factor) {
if(covariate_is_site_cov){
values_to_predict <- seq(1,
length(levels(object@data[[current_cov]])))
} else {
values_to_predict <- attr(object@data[[paste0(current_cov, "_integer")]], "levels")
}
}
if(covariate_is_numeric) {
values_to_predict <- seq(min(object@data[[current_cov]]),
max(object@data[[current_cov]]),
length.out = 100)
}
# empty matrix for predicted values
out <- array(data = NA, dim = c(length(values_to_predict), # number of values to predict
object@data$M, # number of species
nrow(posterior_matrix))) # number of posterior draws
# likewise for intercept
out_intercept <- out
if(has_squared){
values_to_predict_sq <- values_to_predict ^ 2
out_sq <- array(data = NA, dim = c(length(values_to_predict_sq),
object@data$M,
nrow(posterior_matrix)))
}
# species loop
for(i in 1:dim(out)[2]){
if(cov_info_intercept$ranef == TRUE | cov_info_intercept$independent == TRUE){ # random or independent intercepts
out_intercept[,i,] <- posterior_matrix[, colnames(posterior_matrix) %in% paste0(keyword_submodel_short, "0", "[", i, "]")]
} else {
out_intercept[,i,] <- posterior_matrix[, grepl(paste0(keyword_submodel_short, "0$"), colnames(posterior_matrix))]
}
# # get intercepts
# if(!paste0(keyword_submodel_short, "0.mean") %in% object@params) {
# # fixed intercept
# out_intercept[,i,] <- posterior_matrix[, grepl(paste0(keyword_submodel_short, "0$"), colnames(posterior_matrix))]
# } else {
#
# # random intercept
# out_intercept[,i,] <- posterior_matrix[, colnames(posterior_matrix) %in% paste0(keyword_submodel_short, "0", "[", i, "]")]
# }
if(covariate_is_numeric) {
if(effect_type == "fixed") {
index_covariate <- grep(paste0(current_coef, "$"), colnames(posterior_matrix))
} else { # ranef or independent
index_covariate <- grep(paste0(current_coef, "[", i, "]"), colnames(posterior_matrix), fixed = T)
}
out[,i,] <- sapply(posterior_matrix[, index_covariate], FUN = function(x){
x * values_to_predict
})
if(has_squared){
out_sq[,i,] <- sapply(posterior_matrix[, grep(paste0(squared_cov, "[", i, "]"), colnames(posterior_matrix), fixed = TRUE)], FUN = function(x){
x * values_to_predict_sq
})
}
}
if(covariate_is_factor) {
if(effect_type == "fixed") index_covariate <- grep(current_coef, colnames(posterior_matrix))
if(effect_type == "ranef") index_covariate <- grep(paste0(current_coef, "[", i, ","), colnames(posterior_matrix), fixed = T)
for(j in 1:length(index_covariate)){
out[j,i,] <- posterior_matrix[, index_covariate [j]]
}
}
suppressWarnings(rm(index_covariate))
} # end species loop
# intercept + linear covariate effect
if(!has_squared){
out_comb <- out_intercept + out
}
# intercept + linear + quadratic covariate effect
if(has_squared){
out_comb <- out_intercept + out + out_sq
}
prob <- exp(out_comb) / (exp(out_comb) + 1) # prediction for each species / habitat value (from mean estimates)
# summarize estimates (across posterior samples)
prob.mean <- apply(prob, MARGIN = c(1,2), mean)
prob.lower <- apply(prob, MARGIN = c(1,2), quantile, (1-level) / 2)
prob.upper <- apply(prob, MARGIN = c(1,2), quantile, (1 - (1-level) / 2))
# make data frame for ggplot
prob.mean2 <- reshape2::melt(prob.mean)
prob.lower2 <- reshape2::melt(prob.lower)
prob.upper2 <- reshape2::melt(prob.upper)
names(prob.mean2) <- c("Index", "Species", "mean")
names(prob.lower2) <- c("Index", "Species", "lower")
names(prob.upper2) <- c("Index", "Species", "upper")
probs <- cbind(prob.mean2, lower = prob.lower2$lower, upper = prob.upper2$upper)
probs <- cbind(cov = values_to_predict,
probs)
colnames(probs) [1] <- current_cov
# assign species names (if available)
if(!is.null(dimnames(object@data$y)[[1]])) {
probs$Species <- dimnames(object@data$y)[[1]][probs$Species]
}
probs <- probs[order(probs$Species, probs[, 1]),]
if(submodel == "det") ylabel <- "Detection probability p"
if(submodel == "state") ylabel <- expression(paste("Occupancy probability ", psi))
main <- paste0(ifelse(covariate_is_site_cov, "Site", "Observation"), " covariate: ", current_cov)
subtitle <- paste0(ifelse(effect_type == "ranef", "Random effect", ifelse(effect_type == "independent", "Independent effects", "Fixed effect")),
ifelse(has_squared, " (with quadratic term)", ""),
ifelse(is_squared, " quadratic term (no linear term)", ""))
# make cran checks happy
lower <- NULL
upper <- NULL
# for squared covariates which have no unsquared version, sqrt-transform covariate and add expand covariate range to negative values (by mirr)
if(is_squared & !has_squared) {
probs[,1] <- sqrt(probs[,1])
probs2 <- probs
probs2[,1] <- -probs2[,1]
probs <- rbind(probs, probs2)
}
# plot
combine <- FALSE
if(covariate_is_numeric){
p <- ggplot(probs, aes_string(x = params_covariate[[cov]], y = "mean", group = "Species")) +
geom_line() +
theme_bw() +
ggtitle(label = main,
subtitle = subtitle) +
xlab (ifelse(is_squared, gsub(keyword_squared, "", current_cov), current_cov)) +
ylab(ylabel) +
xlim(range(probs[, 1])) +
ylim(0, 1) +
theme(panel.grid.minor = element_blank())
# note for later, can optionally plot all species in one plot if combine = TRUE (= ggplot code to this point)
if(!combine){
p <- p + facet_wrap(~Species) +
geom_ribbon(aes_string(ymin = "lower", ymax = "upper"), alpha = 0.2)
}
}
if(covariate_is_factor){
# create x axis labels for factors
if(covariate_is_site_cov){
probs[,1] <- levels(object@data[[current_cov]]) [probs[,1]]
}
p <- ggplot(probs, aes_string(x = params_covariate[[cov]], y = "mean", group = "Species")) +
geom_col() +
facet_wrap(~Species) +
geom_linerange(aes_string(ymin = "lower", ymax = "upper")) +
theme_bw() +
ggtitle(label = main,
subtitle = subtitle) +
xlab (current_cov) +
ylab(ylabel) +
ylim(0, 1) +
theme(panel.grid.minor = element_blank())
# don't know yet how to combine species on one plot.
}
if(hasArg(outdir)) {
ggsave(filename = file.path(outdir, paste0("response_curves_", current_cov, "_", Sys.Date(), ".png")),
plot = p,
...)
}
list_responses [[cov]] <- p
}
names(list_responses) <- params_covariate[attr(params_covariate, "include")]
return(list_responses)
}
setGeneric("plot_effects", function(object, ...) standardGeneric("plot_effects"))
#' Plot Marginal Effects of Covariates
#'
#' Plot marginal effect plots (= response curves if covariates are continuous) for all species in a community (multi-species) occupancy model. Takes into account species-specific intercepts (if any). Currently only supports continuous covariates, not categorical covariates.
#'
#' @aliases plot_effects
#' @param object \code{commOccu} object
#' @param mcmc.list mcmc.list. Output of \code{\link{fit}} called on a \code{commOccu} object
#' @param submodel Submodel to get plots for. Can be "det" or "state"
#' @param draws Number of draws from the posterior to use when generating the plots. If fewer than draws are available, they are all used
#' @param outdir Directory to save plots to (optional)
#' @param level Probability mass to include in the uncertainty interval
#' @param keyword_squared character. A suffix in covariate names in the model that indicates a covariate is a quadratic effect of another covariate which does not carry the suffix in its name.
#' @param ... additional arguments for \code{\link[ggplot2]{ggsave}}
#'
#'
#' @return list of ggplot objects
#' @export
#' @importFrom ggplot2 geom_vline geom_linerange geom_pointrange element_blank theme labs
#' @importFrom ggplot2 scale_color_manual scale_y_discrete aes_string vars facet_grid facet_wrap ylim geom_col
# @import coda
#'
setMethod("plot_effects", signature(object = "commOccu"),
plot.effects.commOccu)
plot.coef.commOccu <- function(object,
mcmc.list,
submodel = "state",
ordered = TRUE,
combine = FALSE,
outdir,
level = c(outer = 0.95, inner = 0.75),
colorby = "significance",
...) {
submodel <- match.arg(submodel, choices = c("det", "state"))
colorby <- match.arg(colorby, choices = c("significance", "Bayesian p-value"))
if(submodel == "state") {
keyword_submodel <- "^beta"
keyword_submodel_short <- "beta"
}
if(submodel == "det") {
keyword_submodel <- "^alpha"
keyword_submodel_short <- "alpha"
}
stopifnot(is.logical(ordered))
stopifnot(is.logical(combine))
if(combine & ordered) {
message("'combine' and 'ordered' can't both be TRUE. Setting 'ordered = FALSE'")
ordered <- FALSE
}
# get covariate information for submodel
cov_info_subset <- object@covariate_info[object@covariate_info$submodel == submodel & object@covariate_info$param == "param",]
list_responses <- list()
# posterior summaries
stopifnot(length(level) == 2)
posteriorSummary <- summary(mcmc.list, quantiles = c((1-level[1]) / 2, # lower outer
(1-level[2]) / 2, # lower inner
0.5, # median
1-((1-level[2]) / 2), # upper inner
1-((1-level[1]) / 2))) # upper outer
df_quantiles <- data.frame(posteriorSummary$quantiles)
#
# all estimates model parameters
params_all <- rownames(df_quantiles)
# container for output plots
p_list <- list()
df_quantiles_list <- list()
# get Bayesian p-values
df_statistics <- posteriorSummary$statistics
df_statistics_Bayes_pvals_overall <- as.data.frame(df_statistics[grep("Bpvalue$", rownames(df_statistics)), , drop = F])
df_statistics_Bayes_pvals_species <- df_statistics[grep("Bpvalue_species", rownames(df_statistics)), ]
# loop over covariates
for(cov in 1:nrow(cov_info_subset)) {
current_cov <- cov_info_subset$covariate[cov]
current_coef <- cov_info_subset$coef[cov]
#if(covariate %in% skip) next
if(!is.na(cov_info_subset$ranef_cov[cov])){
warning(paste(current_cov,
" has a random effect other than species. This is currently not supported. Skipping", call. = F))
next
}
if(cov_info_subset$ranef_nested[cov]) {
warning(paste(current_cov,
" has a nested random effect. This is currently not supported. Skipping", call. = F))
next
}
# determine data type of current covariate
covariate_is_numeric <- cov_info_subset$data_type [cov] == "cont"
covariate_is_factor <- cov_info_subset$data_type [cov] == "categ"
# covariate_is_fixed <- !cov_info_subset$ranef[cov]
# covariate_is_indep <- cov_info_subset$independent[cov]
# covariate_is_ranef <- cov_info_subset$ranef[cov]
effect_type <- ifelse(cov_info_subset$ranef[cov], "ranef",
ifelse(cov_info_subset$independent[cov], "independent", "fixed"))
covariate_is_site_cov <- ifelse(cov_info_subset$covariate_type [cov] == "siteCovs", T, F)
#if(covariate_is_indep) {
# if(covariate_is_numeric){
# index_covariate <- grep(paste0(current_coef, "[" ), rownames(df_quantiles), fixed = T)
#df_quantiles_i <- df_quantiles[index_covariate, ]
#if(covariate_is_fixed) df_quantiles_i$type <- c("mean")
# if(covariate_is_ranef) {
# # get community mean
# index_covariate_mean_ranef <- grep(paste0(current_coef, ".mean$"), rownames(df_quantiles))
#
# df_quantiles_i <- rbind(df_quantiles[index_covariate_mean_ranef, ], df_quantiles_i)
#
# df_quantiles_i$type <- c("mean", rep("species", times = length(index_covariate)))
# # }
# if(covariate_is_indep) {
# # get community mean
# index_covariate_mean_indep <- grep(paste0(current_coef, ".mean$"), rownames(df_quantiles))
#
# df_quantiles_i <- rbind(df_quantiles[index_covariate_mean_indep, ], df_quantiles_i)
# df_quantiles_i$type <- rep("species", times = length(index_covariate))
# }
# }
# } else {
if(covariate_is_numeric){
# if(covariate_is_fixed) index_covariate <- grep(paste0(current_coef, "$"), rownames(df_quantiles))
# if(covariate_is_ranef) index_covariate <- grep(paste0(current_coef, "[" ), rownames(df_quantiles), fixed = T)
if(effect_type == "fixed") index_covariate <- grep(paste0(current_coef, "$"), rownames(df_quantiles))
if(effect_type == "ranef") index_covariate <- grep(paste0(current_coef, "[" ), rownames(df_quantiles), fixed = T)
if(effect_type == "independent") index_covariate <- grep(paste0(current_coef, "[" ), rownames(df_quantiles), fixed = T)
df_quantiles_i <- df_quantiles[index_covariate, ]
if(effect_type == "fixed") df_quantiles_i$type <- c("mean")
if(effect_type == "ranef") {
# get community mean
index_covariate_mean_ranef <- grep(paste0(current_coef, ".mean$"), rownames(df_quantiles))
df_quantiles_i <- rbind(df_quantiles[index_covariate_mean_ranef, ], df_quantiles_i)
df_quantiles_i$type <- c("mean", rep("species", times = length(index_covariate)))
}
if(effect_type == "independent"){
df_quantiles_i$type <- c("species")
}
}
# }
if(covariate_is_factor){
if(covariate_is_site_cov){
levels_tmp <- levels(object@input$siteCovs[, current_cov])
nlev <- length(levels_tmp)
}
if(!covariate_is_site_cov){
levels_tmp <- attr(object@data[[paste0(current_cov, "_integer")]], "levels")
nlev <- length(levels_tmp)
}
if(effect_type == "fixed") index_covariate <- grep(paste0(current_coef, "["), rownames(df_quantiles), fixed = T)
if(effect_type == "ranef") index_covariate <- grep(paste0(current_coef, "[" ), rownames(df_quantiles), fixed = T)
df_quantiles_i <- df_quantiles[index_covariate, ]
if(effect_type == "fixed") df_quantiles_i$type <- c("mean")
if(effect_type == "ranef") {
# add community mean
index_covariate_mean_ranef <- grep(paste0(current_coef, ".mean"), rownames(df_quantiles), fixed = T) # does this affect categ fixed effects?
df_quantiles_i <- rbind(df_quantiles[index_covariate_mean_ranef, ], df_quantiles_i)
df_quantiles_i$type <- c(rep("mean", times = length(index_covariate_mean_ranef)), rep("species", times = length(index_covariate)))
}
}
colnames(df_quantiles_i)[1:5] <- c("lower_outer", "lower_inner", "median", "upper_inner", "upper_outer")
# get significance levels
significance <- rep("no", times = nrow(df_quantiles_i))
significance[which(df_quantiles_i$lower_inner < 0 & df_quantiles_i$upper_inner < 0 |
df_quantiles_i$lower_inner > 0 & df_quantiles_i$upper_inner > 0)] <- "inner"
significance[which(df_quantiles_i$lower_outer < 0 & df_quantiles_i$upper_outer < 0 |
df_quantiles_i$lower_outer > 0 & df_quantiles_i$upper_outer > 0)] <- "outer"
df_quantiles_i$significance <- significance
# add Bayesian p-values
# if colorby == "Bayesian p-value":
# slight inconsistency: values in "level" will still be used for error bar width
# but will affect colors only via Bayesian p-values of the species
# not via the actual parameter estimates.
# So a parameter estimate can be highly significant but grey bc bayesian p-value of the the species is ok
# it's mostly for model checking, so I guess it's fine
if(colorby == "Bayesian p-value"){
if(covariate_is_numeric){
if(effect_type == "fixed") df_pval <- df_statistics_Bayes_pvals_overall
if(effect_type == "ranef") df_pval <- rbind(df_statistics_Bayes_pvals_overall,
df_statistics_Bayes_pvals_species)
}
if(covariate_is_factor){
if(effect_type == "fixed") df_pval <- df_statistics_Bayes_pvals_overall [rep(1, times = nlev),]
if(effect_type == "ranef") df_pval <- rbind(df_statistics_Bayes_pvals_overall [rep(1, times = nlev),],
df_statistics_Bayes_pvals_species [rep(1:nrow(df_statistics_Bayes_pvals_species),
times = nlev),])
}
stopifnot(nrow(df_pval) == nrow(df_quantiles_i))
significance2 <- rep("no", times = nrow(df_pval))
significance2[which(df_pval$Mean < (1-level[2]) / 2 | df_pval$Mean > (1 - (1-level[2]) / 2))] <- "inner"
significance2[which(df_pval$Mean < (1-level[1]) / 2 | df_pval$Mean > (1 - (1-level[1]) / 2))] <- "outer"
df_quantiles_i$significance2 <- significance2
}
# assign species names
if(!is.null(dimnames(object@data$y)[[1]])) speciesnames <- dimnames(object@data$y)[[1]]
if( is.null(dimnames(object@data$y)[[1]])) speciesnames <- seq_len(dim(object@data$y)[1])
if(effect_type == "ranef") {
if(covariate_is_numeric) df_quantiles_i$species <- c("community", speciesnames)
if(covariate_is_factor) {
df_quantiles_i$species <- c(rep("community", times = length(index_covariate_mean_ranef)), rep(speciesnames, times = nlev))
}
}
if(effect_type == "fixed") df_quantiles_i$species <- "community"
if(effect_type == "independent") df_quantiles_i$species <- speciesnames
# add covariate name as column
if(covariate_is_numeric){
df_quantiles_i$covariate <- current_cov
}
if(covariate_is_factor){
if(effect_type == "fixed"){
if(covariate_is_site_cov) df_quantiles_i$covariate <- paste0(current_cov, "_", levels_tmp)
if(!covariate_is_site_cov) df_quantiles_i$covariate <- paste0(current_cov, "_", levels_tmp)
}
if(effect_type == "ranef"){
if(covariate_is_site_cov) df_quantiles_i$covariate <- paste0(current_cov, "_", c(levels_tmp,
rep(levels_tmp, each = object@data$M)))
if(!covariate_is_site_cov) df_quantiles_i$covariate <- paste0(current_cov, "_", c(levels_tmp,
rep(levels_tmp, each = object@data$M)))
}
}
# sort species (either by median effect size or by names)
if(ordered) {
# this currently does not sort categorical covariates with random effects correctly.
# Ideally if combine = FALSE they should be sorted in descending order (at least for the second factor level)
if(covariate_is_numeric){
df_quantiles_i$species <- factor(df_quantiles_i$species,
levels = unique(df_quantiles_i$species[order(df_quantiles_i$median)]))
}
if(covariate_is_factor){
if(effect_type == "fixed") {
df_quantiles_i$species <- factor(df_quantiles_i$species,
levels = unique(df_quantiles_i$species[order(df_quantiles_i$median)]))
}
if(effect_type == "ranef"){
subset_level2 <- df_quantiles_i[df_quantiles_i$covariate == paste0(current_cov, "_", levels_tmp[2]),]
df_quantiles_i$species <- factor(df_quantiles_i$species,
levels = subset_level2$species[order(subset_level2$median)])
}
}
} else {
df_quantiles_i$species <- factor(df_quantiles_i$species, levels = unique(rev(df_quantiles_i$species)))
}
df_quantiles_list[[cov]] <- df_quantiles_i
# plot
type <- NULL # just for CRAN checks
covariate <- NULL
if(colorby == "significance") color_by <- "significance"
if(colorby == "Bayesian p-value") color_by <- "significance2"
if(!combine){
p_list[[cov]] <- ggplot (df_quantiles_i, aes_string(y = "species", x = "median", color = color_by)) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_pointrange(aes_string(xmin = "lower_outer", xmax = "upper_outer")) +
geom_linerange( aes_string(xmin = "lower_inner", xmax = "upper_inner"), size = 1) +
facet_grid(rows = vars(type),
cols = vars(covariate),
scales = "free_y",
space = "free_y"
) +
xlab ("Effect size") + ylab(element_blank()) +
theme_bw() +
theme(panel.grid.minor = element_blank(),
panel.grid.major.y = element_blank(),
strip.background.y = element_blank(),
strip.text.y = element_blank()) +
scale_color_manual(breaks = c("outer", "inner", "no"),
values=c("firebrick", "black", "grey50"),
guide = "none") +
ggtitle(paste("Effect sizes:", current_cov))
if(!covariate_is_factor) {
p_list[[cov]] <- p_list[[cov]] + theme(strip.background.x = element_blank(),
strip.text.x = element_blank())
}
if(color_by == "significance2"){
p_list[[cov]] <- p_list[[cov]] + labs(subtitle = "colors indicate Bayesian p-values of species")
}
if(hasArg(outdir)) {
ggsave(filename = file.path(outdir, paste0("effect_sizes_", submodel, "_", covariate, "_", Sys.Date(), ".png")),
plot = p_list[[cov]],
...)
}
}
} # end covariate loop
if(combine){
df_quantiles_all <- do.call(rbind, df_quantiles_list)
df_quantiles_all$species <- factor(df_quantiles_all$species,
levels = rev(sort(unique(as.character(df_quantiles_all$species)))))
p <- ggplot (df_quantiles_all, aes_string(y = "species", x = "median", color = color_by)) +
geom_vline(xintercept = 0, alpha = 0.2) +
geom_pointrange(aes_string(xmin = "lower_outer", xmax = "upper_outer")) +
geom_linerange (aes_string(xmin = "lower_inner", xmax = "upper_inner"), size = 1) +
facet_grid(rows = vars(type),
cols = vars(covariate),
scales = "free_y",
space = "free_y") +
xlab ("Effect size") + ylab(element_blank()) +
theme_bw() +
theme(panel.grid.minor = element_blank(),
panel.grid.major.y = element_blank(),
strip.background.y = element_blank(),
strip.text.y = element_blank()) +
scale_color_manual(breaks = c("outer", "inner", "no"),
values=c("firebrick", "black", "grey50"),
guide = "none")
if(hasArg(outdir)) {
ggsave(filename = file.path(outdir, paste0("effect_sizes_", submodel, "_", paste(cov_info_subset$covariate, collapse = "_"), "_",
ifelse(!colorby == "significance", "Bayesian_pval_", ""), Sys.Date(), ".png")),
plot = p,
...)
}
return(p)
}
if(!combine){
names(p_list) <- cov_info_subset$covariate
return(p_list)
}
}
setGeneric("plot_coef", function(object, ...) standardGeneric("plot_coef"))
#' Plot effect sizes of covariates in community occupancy model
#'
#' Plot effect sizes for all species in a community (multi-species) occupancy model. Currently only supports continuous covariates, not categorical covariates.
#'
#' @aliases plot_coef
#' @param object \code{commOccu} object
#' @param mcmc.list mcmc.list. Output of \code{\link{fit}} called on a \code{commOccu} object
#' @param submodel Submodel to get plots for. Can be "det" or "state"
#' @param ordered logical. Order species in plot by median effect (TRUE) or by species name (FALSE)
#' @param combine logical. Combine multiple plots into one (via facets)?
#' @param outdir Directory to save plots to (optional)
#' @param level Probability mass to include in the uncertainty interval (two values, second value - inner interval - will be plotted thicker)
#' @param colorby Whether to color estimates by "significance" (of the effect estimates), or "Bayesian p-value" (of the species)
#' @param ... additional arguments for \code{\link[ggplot2]{ggsave}}
#'
#' @return list of ggplot objects
#' @export
#'
#'
setMethod("plot_coef", signature(object = "commOccu"),
plot.coef.commOccu)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/switch_bot_genotypes.R
\name{switchBOTgenotypes}
\alias{switchBOTgenotypes}
\title{XIBD BOT Genotype Switching}
\usage{
switchBOTgenotypes(ped.genotypes, hapmap.topbot)
}
\arguments{
\item{ped.genotypes}{a named list containing \code{pedigree}, \code{genotypes} and \code{model}.
See \code{Value} description in \code{\link{getGenotypes}} for more details.
The family IDs and individual IDs in \code{pedigree} must match the family IDs and individual IDs in the header of \code{genotypes}.}
\item{hapmap.topbot}{a data frame containing the Illumina TOP/BOT designation for the HapMap SNPs.
This file can be downloaded from \url{http://bioinf.wehi.edu.au/software/XIBD/index.html}.
This file contains the following 7 columns of information:
\enumerate{
\item Chromosome (\code{"numeric"} or \code{"integer"})
\item SNP identifier (type \code{"character"})
\item Genetic map distance (centi morgans cM, or morgans M - default) (type \code{"numeric"})
\item Base-pair position (type \code{"numeric"} or \code{"integer"})
\item Illuminas TOP or BOT designation of the SNP (type \code{"character"})
}
where each row describes a single marker. The data frame should contain the header
\code{chr, snp_id, pos_bp, pos_M} and \code{TOPBOT}.}
}
\value{
A named list of the same format as the input \code{ped.genotypes} with A and B alleles switched for BOT SNPs.
}
\description{
The HapMap allele frequencies in XIBDs HapMap allele frequency files are calculated for the A allele only,
where the A allele is determined by the following rules:
\enumerate{
\item When one of the possible variations of the SNP is adenine (A), then adenine is labeled the A allele
and the remaining variation is labeled the B allele, regardless of what this might be.
\item If adenine (A) is not a variation of the SNP but cytosine (C) is, then cytosine is labeled the A allele
and the remaining variation is labeled the B allele.
\item If neither adenine (A) or cytosine (C) are variants of the SNP then thymine (T) is labeled the A allele.
}
Illuminas convention for the naming of A and B alleles differs to that of the HapMap data
(\url{http://www.illumina.com/documents/products/technotes/technote_topbot.pdf}). Rather, the classification
of A and B alleles depend on the top (TOP) and bottom (BOT) designations of the SNP. This
means that the A allele in the HapMap data is not always the same as the A allele in the Illumina data. In
fact, alleles that have been named according to the BOT designation actually correspond the the B allele
in the HapMap data. To correct for this, \code{switchBOTgenotypes()} switchs the A and B alleles in
the input genotypes for all SNPs corresponding to BOT designations. This mean a homozygous genotype, 0, will be
changed to a homozygous alternative genotype, 2, and vis versa. Heterozygous genotypes will be unchanged.
NOTE: this function should only be implemented with Illumina SNPchip data when XIBD's HapMap reference data is used
and if there is a noticeable discrepancy between population allele frequencies calculated from the HapMap reference data
and those calculated from the input dataset.
}
\examples{
# The following should only be run if you have Illumina data and
# are using the HapMap reference data provided by XIBD.
# format and filter the data
my_genotypes <- getGenotypes(ped.map = example_pedmap,
reference.ped.map = example_reference_pedmap,
snp.ld = example_reference_ld,
model = 2,
maf = 0.01,
sample.max.missing = 0.1,
snp.max.missing = 0.1,
maximum.ld.r2 = 0.99,
chromosomes = NULL,
input.map.distance = "M",
reference.map.distance = "M")
# calculate allele frequencies from the input dataset
input_freq <- calculateAlleleFreq(ped.genotypes = my_genotypes)
hist(abs(my_genotypes[["genotypes"]][,"freq"] - input_freq[,"freq"]),
xlim = c(0,1),
main = "Before BOT change",
xlab = "abs(pop allele freq diff)")
# switch alleles
my_genotypes_2 <- switchBOTgenotypes(ped.genotypes = my_genotypes,
hapmap.topbot = example_hapmap_topbot)
# calculate allele frequencies when BOT alleles switched
input_freq <- calculateAlleleFreq(ped.genotypes = my_genotypes_2)
hist(abs(my_genotypes_2[["genotypes"]][,"freq"] - input_freq[,"freq"]),
xlim = c(0,1),
main = "After BOT change",
xlab = "abs(pop allele freq diff)")
}
| /step3_xIBD/XIBD/man/switchBOTgenotypes.Rd | no_license | RJHFMSTR/PofO_inference | R | false | true | 4,685 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/switch_bot_genotypes.R
\name{switchBOTgenotypes}
\alias{switchBOTgenotypes}
\title{XIBD BOT Genotype Switching}
\usage{
switchBOTgenotypes(ped.genotypes, hapmap.topbot)
}
\arguments{
\item{ped.genotypes}{a named list containing \code{pedigree}, \code{genotypes} and \code{model}.
See \code{Value} description in \code{\link{getGenotypes}} for more details.
The family IDs and individual IDs in \code{pedigree} must match the family IDs and individual IDs in the header of \code{genotypes}.}
\item{hapmap.topbot}{a data frame containing the Illumina TOP/BOT designation for the HapMap SNPs.
This file can be downloaded from \url{http://bioinf.wehi.edu.au/software/XIBD/index.html}.
This file contains the following 7 columns of information:
\enumerate{
\item Chromosome (\code{"numeric"} or \code{"integer"})
\item SNP identifier (type \code{"character"})
\item Genetic map distance (centi morgans cM, or morgans M - default) (type \code{"numeric"})
\item Base-pair position (type \code{"numeric"} or \code{"integer"})
\item Illuminas TOP or BOT designation of the SNP (type \code{"character"})
}
where each row describes a single marker. The data frame should contain the header
\code{chr, snp_id, pos_bp, pos_M} and \code{TOPBOT}.}
}
\value{
A named list of the same format as the input \code{ped.genotypes} with A and B alleles switched for BOT SNPs.
}
\description{
The HapMap allele frequencies in XIBDs HapMap allele frequency files are calculated for the A allele only,
where the A allele is determined by the following rules:
\enumerate{
\item When one of the possible variations of the SNP is adenine (A), then adenine is labeled the A allele
and the remaining variation is labeled the B allele, regardless of what this might be.
\item If adenine (A) is not a variation of the SNP but cytosine (C) is, then cytosine is labeled the A allele
and the remaining variation is labeled the B allele.
\item If neither adenine (A) or cytosine (C) are variants of the SNP then thymine (T) is labeled the A allele.
}
Illuminas convention for the naming of A and B alleles differs to that of the HapMap data
(\url{http://www.illumina.com/documents/products/technotes/technote_topbot.pdf}). Rather, the classification
of A and B alleles depend on the top (TOP) and bottom (BOT) designations of the SNP. This
means that the A allele in the HapMap data is not always the same as the A allele in the Illumina data. In
fact, alleles that have been named according to the BOT designation actually correspond the the B allele
in the HapMap data. To correct for this, \code{switchBOTgenotypes()} switchs the A and B alleles in
the input genotypes for all SNPs corresponding to BOT designations. This mean a homozygous genotype, 0, will be
changed to a homozygous alternative genotype, 2, and vis versa. Heterozygous genotypes will be unchanged.
NOTE: this function should only be implemented with Illumina SNPchip data when XIBD's HapMap reference data is used
and if there is a noticeable discrepancy between population allele frequencies calculated from the HapMap reference data
and those calculated from the input dataset.
}
\examples{
# The following should only be run if you have Illumina data and
# are using the HapMap reference data provided by XIBD.
# format and filter the data
my_genotypes <- getGenotypes(ped.map = example_pedmap,
reference.ped.map = example_reference_pedmap,
snp.ld = example_reference_ld,
model = 2,
maf = 0.01,
sample.max.missing = 0.1,
snp.max.missing = 0.1,
maximum.ld.r2 = 0.99,
chromosomes = NULL,
input.map.distance = "M",
reference.map.distance = "M")
# calculate allele frequencies from the input dataset
input_freq <- calculateAlleleFreq(ped.genotypes = my_genotypes)
hist(abs(my_genotypes[["genotypes"]][,"freq"] - input_freq[,"freq"]),
xlim = c(0,1),
main = "Before BOT change",
xlab = "abs(pop allele freq diff)")
# switch alleles
my_genotypes_2 <- switchBOTgenotypes(ped.genotypes = my_genotypes,
hapmap.topbot = example_hapmap_topbot)
# calculate allele frequencies when BOT alleles switched
input_freq <- calculateAlleleFreq(ped.genotypes = my_genotypes_2)
hist(abs(my_genotypes_2[["genotypes"]][,"freq"] - input_freq[,"freq"]),
xlim = c(0,1),
main = "After BOT change",
xlab = "abs(pop allele freq diff)")
}
|
# pipeline-test
library(PreProcess)
set.seed(147745)
# simulate a moderately realistic looking microarray
nc <- 100
nr <- 100
v <- rexp(nc*nr, 1/1000)
b <- rnorm(nc*nr, 80, 10)
s <- sapply(v-b, max, 1)
ct <- ChannelType('user', 'random', nc, nr, 'fake')
subbed <- Channel(name='fraud', parent='', type=ct, vec=s)
rm(ct, nc, nr, v, b, s) # clean some stuff
# example of standard data processing
processed <- process(subbed, PIPELINE.STANDARD)
summary(processed)
par(mfrow=c(2,1))
plot(processed)
hist(processed)
par(mfrow=c(1,1))
image(processed)
rm(subbed, processed)
| /data/genthat_extracted_code/PreProcess/tests/test06-pipeline.R | no_license | surayaaramli/typeRrh | R | false | false | 605 | r | # pipeline-test
library(PreProcess)
set.seed(147745)
# simulate a moderately realistic looking microarray
nc <- 100
nr <- 100
v <- rexp(nc*nr, 1/1000)
b <- rnorm(nc*nr, 80, 10)
s <- sapply(v-b, max, 1)
ct <- ChannelType('user', 'random', nc, nr, 'fake')
subbed <- Channel(name='fraud', parent='', type=ct, vec=s)
rm(ct, nc, nr, v, b, s) # clean some stuff
# example of standard data processing
processed <- process(subbed, PIPELINE.STANDARD)
summary(processed)
par(mfrow=c(2,1))
plot(processed)
hist(processed)
par(mfrow=c(1,1))
image(processed)
rm(subbed, processed)
|
rose_stat_data <- function(data,petal='oooooooooo',
n,center,
translate.1,
trans_h.2,
trans_v.3,
trans_al.4,
rotate.5,
zoom.6,
mirror.7,
mirror_h.8,
mirror_v.9,
round
){
origion=data
origion$i=1:nrow(origion)
if (petal %in% colnames(data)){
ratio=abs(data[,petal]/max(data[,petal]))
}else{
ratio=rep(1,nrow(data))
}
#generate square coordinate
dd1=lapply(1:n, function(i) c(cos((i*round/n)/180*pi)+center[1],
sin((i*round/n)/180*pi)+center[2]))
dd2=do.call(rbind,dd1)
colnames(dd2)=c('x','y')
# distribute coord for each class
cut=round(seq(1, n, by = n/nrow(data)))
for (i in 2:(length(cut)+1)) {
# generate ddi
if (i==2) ply=NULL
a=cut[i-1]
b=cut[i]
if (i==(length(cut)+1)) b=n
cut;a;b
ddi=dd2[a:b,]
if (i==2){
if (round==360) ddi=rbind(dd2[nrow(dd2),],ddi)
}
ddi=rbind(center,ddi,center)
# ratio each part
x=(ddi[,1]-center[1])*ratio[i-1]+center[1]
y=(ddi[,2]-center[2])*ratio[i-1]+center[2]
ddi=data.frame(x=x,y=y)
plyi=cbind(i=i-1,ddi)
ply=rbind(ply,plyi)
if (i==(length(cut)+1)) rownames(ply)=NULL
}
ply_xy=ply[,c('x','y')]
# center=c(ply_xy[1,1],ply_xy[1,2])
#translate.1 trans_h.2 trans_v.3 trans_al.4
if (!is.null(translate.1)) ply_xy=translate(ply_xy,translate.1)
if (!is.null(trans_h.2)) ply_xy=trans_h(ply_xy,trans_h.2)
if (!is.null(trans_v.3)) ply_xy=trans_v(ply_xy,trans_v.3)
if (!is.null(trans_al.4)) ply_xy=trans_al(ply_xy,trans_al.4)
#rotate.5
center=c(ply_xy[1,1],ply_xy[1,2])
if (rotate.5 != 0) ply_xy=rotate(df=ply_xy,center,rotate.5)
#zoom.6
center=c(ply_xy[1,1],ply_xy[1,2])
if (!is.null(zoom.6)) ply_xy=zoom(df=ply_xy,center,zoom.6)
#mirror.7
if (!is.null(mirror.7)) ply_xy=mirror(ply_xy,mirror.7)
if (!is.null(mirror_h.8)) ply_xy=mirror_h(ply_xy,mirror_h.8)
if (!is.null(mirror_v.9)) ply_xy=mirror_v(ply_xy,mirror_v.9)
ply[,c('x','y')]=ply_xy
data=merge(x=origion,y=ply,by = 'i')
data
}
| /R/data_stat_rose.R | no_license | yikeshu0611/ggrose | R | false | false | 2,556 | r | rose_stat_data <- function(data,petal='oooooooooo',
n,center,
translate.1,
trans_h.2,
trans_v.3,
trans_al.4,
rotate.5,
zoom.6,
mirror.7,
mirror_h.8,
mirror_v.9,
round
){
origion=data
origion$i=1:nrow(origion)
if (petal %in% colnames(data)){
ratio=abs(data[,petal]/max(data[,petal]))
}else{
ratio=rep(1,nrow(data))
}
#generate square coordinate
dd1=lapply(1:n, function(i) c(cos((i*round/n)/180*pi)+center[1],
sin((i*round/n)/180*pi)+center[2]))
dd2=do.call(rbind,dd1)
colnames(dd2)=c('x','y')
# distribute coord for each class
cut=round(seq(1, n, by = n/nrow(data)))
for (i in 2:(length(cut)+1)) {
# generate ddi
if (i==2) ply=NULL
a=cut[i-1]
b=cut[i]
if (i==(length(cut)+1)) b=n
cut;a;b
ddi=dd2[a:b,]
if (i==2){
if (round==360) ddi=rbind(dd2[nrow(dd2),],ddi)
}
ddi=rbind(center,ddi,center)
# ratio each part
x=(ddi[,1]-center[1])*ratio[i-1]+center[1]
y=(ddi[,2]-center[2])*ratio[i-1]+center[2]
ddi=data.frame(x=x,y=y)
plyi=cbind(i=i-1,ddi)
ply=rbind(ply,plyi)
if (i==(length(cut)+1)) rownames(ply)=NULL
}
ply_xy=ply[,c('x','y')]
# center=c(ply_xy[1,1],ply_xy[1,2])
#translate.1 trans_h.2 trans_v.3 trans_al.4
if (!is.null(translate.1)) ply_xy=translate(ply_xy,translate.1)
if (!is.null(trans_h.2)) ply_xy=trans_h(ply_xy,trans_h.2)
if (!is.null(trans_v.3)) ply_xy=trans_v(ply_xy,trans_v.3)
if (!is.null(trans_al.4)) ply_xy=trans_al(ply_xy,trans_al.4)
#rotate.5
center=c(ply_xy[1,1],ply_xy[1,2])
if (rotate.5 != 0) ply_xy=rotate(df=ply_xy,center,rotate.5)
#zoom.6
center=c(ply_xy[1,1],ply_xy[1,2])
if (!is.null(zoom.6)) ply_xy=zoom(df=ply_xy,center,zoom.6)
#mirror.7
if (!is.null(mirror.7)) ply_xy=mirror(ply_xy,mirror.7)
if (!is.null(mirror_h.8)) ply_xy=mirror_h(ply_xy,mirror_h.8)
if (!is.null(mirror_v.9)) ply_xy=mirror_v(ply_xy,mirror_v.9)
ply[,c('x','y')]=ply_xy
data=merge(x=origion,y=ply,by = 'i')
data
}
|
NL <<- "\n"
outcomeSTRINGS <<- c( "heart attack","heart failure" ,"pneumonia")
outcomeINDEX <<- c( 11, 17,23 )
best <- function(state, outcome_name ){
checkarguments(state, outcome_name )
# need to looktable
index <- match(outcome_name , outcomeSTRINGS)
outcome_number <- outcomeINDEX[index]
outcome <- readoutcome(state)
outcomeName <- outcome[]
results <- parse.data(outcome, outcome_number)
print(results[1,2])
}
parse.data<- function (out, outcome_number) {
out.sorted <- out[order(out[outcome_number] ,na.last=NA ),]
}
readoutcome<-function(state){
value<-read.csv("outcome-of-care-measures.csv", colClasses = "character")
table <- data.frame( value, stringsAsFactors = FALSE)
table[,11] <- as.numeric(gsub("Not Available", NA, table[,11]))
table[,17] <- as.numeric(gsub("Not Available", NA, table[,17]))
table[,23] <- as.numeric(gsub("Not Available", NA, table[,23]))
table <- subset(table, State == state)
return(table)
}
checkarguments<-function (state, outcome_name ) {
if (!is.outcome(outcome_name)) stop ("invalid outcome")
if (!is.state(state)) stop ("invalid state")
}
writefile <- function( mydata) {
write.table(mydata, "mydata.txt", sep="\t")
}
is.state<- function (state){
statelist =
c( "AK" ,"AL" ,"AR" ,"AZ" ,"CA" ,"CO" ,"CT" ,"DC" ,"DE" ,"FL" ,
"GA" ,"GU" ,"HI" ,"IA" ,"ID" ,"IL" ,"IN" ,"KS" ,"KY" ,"LA" ,
"MA" ,"MD" ,"ME" ,"MI" ,"MN" ,"MO" ,"MS" ,"MT" ,"NC" ,"ND" ,
"NE" ,"NH" ,"NJ" ,"NM" ,"NV" ,"NY" ,"OH" ,"OK" ,"OR" ,"PA" ,
"PR" ,"RI" ,"SC" ,"SD" ,"TN" ,"TX" ,"UT" ,"VT" ,"VI" ,"VA" ,
"WA" ,"WI" ,"WV" ,"WY")
returnValue <-(! is.na(match( state, statelist)))
}
is.outcome<-function(outcome_name) {
returnValue <-(! is.na(match( outcome_name, outcomeSTRINGS)))
}
| /best.R | no_license | Natachacarvalho/R-course | R | false | false | 1,798 | r |
NL <<- "\n"
outcomeSTRINGS <<- c( "heart attack","heart failure" ,"pneumonia")
outcomeINDEX <<- c( 11, 17,23 )
best <- function(state, outcome_name ){
checkarguments(state, outcome_name )
# need to looktable
index <- match(outcome_name , outcomeSTRINGS)
outcome_number <- outcomeINDEX[index]
outcome <- readoutcome(state)
outcomeName <- outcome[]
results <- parse.data(outcome, outcome_number)
print(results[1,2])
}
parse.data<- function (out, outcome_number) {
out.sorted <- out[order(out[outcome_number] ,na.last=NA ),]
}
readoutcome<-function(state){
value<-read.csv("outcome-of-care-measures.csv", colClasses = "character")
table <- data.frame( value, stringsAsFactors = FALSE)
table[,11] <- as.numeric(gsub("Not Available", NA, table[,11]))
table[,17] <- as.numeric(gsub("Not Available", NA, table[,17]))
table[,23] <- as.numeric(gsub("Not Available", NA, table[,23]))
table <- subset(table, State == state)
return(table)
}
checkarguments<-function (state, outcome_name ) {
if (!is.outcome(outcome_name)) stop ("invalid outcome")
if (!is.state(state)) stop ("invalid state")
}
writefile <- function( mydata) {
write.table(mydata, "mydata.txt", sep="\t")
}
is.state<- function (state){
statelist =
c( "AK" ,"AL" ,"AR" ,"AZ" ,"CA" ,"CO" ,"CT" ,"DC" ,"DE" ,"FL" ,
"GA" ,"GU" ,"HI" ,"IA" ,"ID" ,"IL" ,"IN" ,"KS" ,"KY" ,"LA" ,
"MA" ,"MD" ,"ME" ,"MI" ,"MN" ,"MO" ,"MS" ,"MT" ,"NC" ,"ND" ,
"NE" ,"NH" ,"NJ" ,"NM" ,"NV" ,"NY" ,"OH" ,"OK" ,"OR" ,"PA" ,
"PR" ,"RI" ,"SC" ,"SD" ,"TN" ,"TX" ,"UT" ,"VT" ,"VI" ,"VA" ,
"WA" ,"WI" ,"WV" ,"WY")
returnValue <-(! is.na(match( state, statelist)))
}
is.outcome<-function(outcome_name) {
returnValue <-(! is.na(match( outcome_name, outcomeSTRINGS)))
}
|
# codigo para buscar os sons de acerto com a taxa em 0.775
x = read.delim("C:/Users/Windows 7/test.txt", header=F)
x = x[,1]
n = length(x)
delta_t = 60/n
d = 1000
x = c(x, 0*(1:d))
n = length(x)
x11(); plot(seq(0,len = n, by = delta_t), x, xlab = "t[s]", ylab = "intensity [?]", type = "l", col = "grey")
S = NULL
s = seq(1,n, by = d)
for (i in 2:length(s)) {
b = abs(x[s[i-1]:(s[i]-1)])
a = s[i-1]:(s[i]-1)
z = smooth.spline(a,b)
S = rbind(S, cbind(z$x, z$y))
}
b = abs(x[s[i]:n])
a = s[i]:n
z = smooth.spline(a,b)
S = rbind(S,cbind(z$x,z$y))
S[(S[,2] < 100), 2] = 0
x11(); plot(seq(0,len = n, by = delta_t), x, xlab = "t[s]", ylab = "intensity [?]", type = "l", col = "grey")
points(seq(0, len = n, by = delta_t), S[,2], type = "l")
x11(); plot(seq(0,len = n, by = delta_t), x, xlab = "t[s]", ylab = "intensity [?]", type = "l", col = "grey", xlim = c(12.2,14.2)) #mudar o tempo aqui
points(seq(0, len = n, by = delta_t), S[,2], type = "l")
x11(); plot(S[,1], S[,2], type="l")
t1 = round(12.2/delta_t)
t2 = round(14.2/delta_t)
abline(v = c(t1,t2), col = "blue", lwd = 2)
k = t1 <= S[,1] & S[,1] <=t2
M = cbind(S[k,])
m = nrow(M)
D = 0*S[,1]
for (i in 1:(n-m+1)) { D[i] = cor(M[,2], S[i:((i+m)-1),2]) }
x11(); plot(seq(0,len = n, by = delta_t), D, xlab = "t[s]", ylab = "correlation", type = "l", col = "blue")
mask = D > 0.775
mask = which(mask)
mask = mask[which(diff(mask)>3) - 1]
x11(); plot(S[,1],S[,2], type = "l")
abline(v = S[mask,1], col = "red")
E = S[mask,1]*delta_t
E = cbind(floor(E), round((E-floor(E))*24/0.99))
E = unique(apply(E,1,paste, collapse = ":"))
| /RP_Codigo_Reconhece_Som_Acerto.R | no_license | AmandaPita/Codigos-aulas-usp | R | false | false | 1,629 | r | # codigo para buscar os sons de acerto com a taxa em 0.775
x = read.delim("C:/Users/Windows 7/test.txt", header=F)
x = x[,1]
n = length(x)
delta_t = 60/n
d = 1000
x = c(x, 0*(1:d))
n = length(x)
x11(); plot(seq(0,len = n, by = delta_t), x, xlab = "t[s]", ylab = "intensity [?]", type = "l", col = "grey")
S = NULL
s = seq(1,n, by = d)
for (i in 2:length(s)) {
b = abs(x[s[i-1]:(s[i]-1)])
a = s[i-1]:(s[i]-1)
z = smooth.spline(a,b)
S = rbind(S, cbind(z$x, z$y))
}
b = abs(x[s[i]:n])
a = s[i]:n
z = smooth.spline(a,b)
S = rbind(S,cbind(z$x,z$y))
S[(S[,2] < 100), 2] = 0
x11(); plot(seq(0,len = n, by = delta_t), x, xlab = "t[s]", ylab = "intensity [?]", type = "l", col = "grey")
points(seq(0, len = n, by = delta_t), S[,2], type = "l")
x11(); plot(seq(0,len = n, by = delta_t), x, xlab = "t[s]", ylab = "intensity [?]", type = "l", col = "grey", xlim = c(12.2,14.2)) #mudar o tempo aqui
points(seq(0, len = n, by = delta_t), S[,2], type = "l")
x11(); plot(S[,1], S[,2], type="l")
t1 = round(12.2/delta_t)
t2 = round(14.2/delta_t)
abline(v = c(t1,t2), col = "blue", lwd = 2)
k = t1 <= S[,1] & S[,1] <=t2
M = cbind(S[k,])
m = nrow(M)
D = 0*S[,1]
for (i in 1:(n-m+1)) { D[i] = cor(M[,2], S[i:((i+m)-1),2]) }
x11(); plot(seq(0,len = n, by = delta_t), D, xlab = "t[s]", ylab = "correlation", type = "l", col = "blue")
mask = D > 0.775
mask = which(mask)
mask = mask[which(diff(mask)>3) - 1]
x11(); plot(S[,1],S[,2], type = "l")
abline(v = S[mask,1], col = "red")
E = S[mask,1]*delta_t
E = cbind(floor(E), round((E-floor(E))*24/0.99))
E = unique(apply(E,1,paste, collapse = ":"))
|
# ******************************************************************************
# Project: Generations, article for the WZB-Mitteilungen
# Task: Generate graphs
# ******************************************************************************
library(tibble) # Dataframes
library(rlang)
library(dplyr) # Data wrangling
library(ggplot2) # Graphs
library(scales)
library(magrittr)
# ______________________________________________________________________________
# Load data ====
ess_allctries <- readRDS("data/ess_allctries_19.rds")
#ess <- readRDS("data/ess.rds")
#allbus <- readRDS("./data/allbus-reduced.rds")
ess <- ess_allctries %>%
filter(!is.na(dweight) & !is.na(year)) %>%
filter(age_doi > 15) %>%
filter(western_europe == 1)
# ______________________________________________________________________________
# Collapse at the generation-round ====
ess_gr <-
ess %>%
filter(!is.na(generation)) %>%
group_by(essround, cname_en, generation) %>%
summarize_at(vars(year, polintr, vote, contplt, wrkprty, wrkorg, badge,
sgnptit, pbldmn, bctprd, clsprty),
~weighted.mean(., dweight, na.rm = T )) %>%
group_by(essround, generation) %>%
select(-cname_en) %>% dplyr::summarize_all(mean, na.rm = TRUE) %>%
mutate(year = (essround * 2) + 2000)
# ______________________________________________________________________________
# Graphs ====
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
# Define Langauge and variable labels ----
#
language <- "de"
vars <- tibble(
varname = c("polintr", "vote", "contplt", "wrkprty", "wrkorg", "badge",
"sgnptit", "pbldmn", "bctprd", "clsprty"),
title_en = c("Interested in politics",
"Voted in last national election",
"Contacted politician or government official, in last 12 months",
"Worked in political party or action group, in last 12 months",
"Worked in another organisation or association, in last 12 months",
"Worn or displayed campaign badge/sticker, in last 12 months",
"Signed a petition, in last 12 months",
"Taken part in lawful public demonstration, in last 12 months",
"Boycotted certain products, in last 12 months",
"Feel close to a particular party"),
title_de = c("Politisches Interesse",
"Wahlteilnahme (letzte nationale Wahlen)",
"Kontakt zu einem Politiker oder einer Amtsperson aufgenommen",
"In einer politischen Partei oder Gruppierung mitgearbeitet",
"In einer anderen Organisation oder Ähnlichem mitgearbeitet",
"Abzeichen/Aufkleber einer politischen Kampagne getragen",
"Bürgerbegehren oder Volksbegehren unterschrieben",
"Teilnahme an genehmigter öffentlicher Demonstration",
"Boykott bestimmter Produkte",
"Nähe zu einer Partei"
)
)
labels <- list(
yname_en = "Share in generation (in %)",
yname_de = "Anteil an Generation (in %)",
xname_en = "Age at date of interview",
xname_de = "Alter am Tag des Interviews",
color_en = c("Baby boomers (1955-69)", "Generation X (1970-84)", "Millennials (1985-2000)"),
color_de = c("Baby Boomer (1955-69)", "Generation X (1970-84)", "Millennials (1985-2000)")
)
# Switch language
vars %<>%
rename_with(~stringr::str_remove( ., paste0("_",language))) %>%
select(!contains("_"))
labels %<>%
purrr::set_names(~stringr::str_remove( ., paste0("_",language))) %>%
.[!stringr::str_detect(names(.), "_")]
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
# Loop over variables and export graphs ----
plot_regular_freq <- function(.data, varname, vars, labels ){
gd <- .data %>% mutate(var = !!varname)
varname_quo <- as_name({{ varname }})
labels$title <- vars %>% filter(varname == varname_quo) %>% pull(title)
gd %>%
ggplot(
aes(x = year, y = var*100,
group = generation)) +
geom_line(aes(color = generation), size = 1.05) +
geom_point(aes(color = generation), size = 3) +
scale_x_continuous(
name = NULL,
limits = c(2002,2018),
breaks = seq(2002,2018,2)
) +
scale_y_continuous(
limits = c(0, ceiling(max(gd$var)*100)),
breaks=pretty_breaks()
) +
scale_color_manual(
values = rev(wesanderson::wes_palette("Darjeeling1",3)),
label = labels$color
) +
labs(
title = labels$title,
y = labels$yname,
subtitle = "European Social Survey (2002-2018)",
color = "Generation:"
) +
theme_bw() +
theme(
text = element_text(family = "Crimson", size = 12),
plot.subtitle = element_text(face = "italic"),
legend.position = "bottom"
)
ggsave(
paste0("figures/", quo_name(varname),".png"),
width = 19, height = 19/1.35,
dpi = 300, units = "cm"
)
#filepath <- paste0("figures/", varname[i], ".pdf")
#ggsave(filepath, p)
}
#purrr::walk(vars$varname, ~plot_regular_freq(ess_gr, parse_quo(.x, current_env()), vars, labels))
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
# Compute and plot "Age of Interview" graphs ----
plot_agedoi_freq <- function(.data, varname, vars, labels, fname = "agedoi"){
varname_quo <- as_name({{ varname }})
labels$title <- vars %>% filter(varname == varname_quo) %>% pull(title)
.data %>%
filter(!is.na(generation)) %>%
# Remove last age_doi of a generation
filter(!(age_doi %in% c(64,65))) %>%
filter(!(generation == "Millennial" & age_doi >= 34)) %>%
filter(!(generation == "Xer" & age_doi >= 49)) %>%
# Group age_doi due to small n (to be discussed)
#mutate(
# age_doi = cut(age_doi,
# breaks = c(14, seq(20,65,3)),
# labels = c(14, seq(20,62,3)
# )
#)) %>%
filter(!is.na(age_doi)) %>%
# Compute weighted means
group_by(cname_en, generation, age_doi) %>%
count(!!varname, wt = dweight) %>%
filter(!is.na(!!varname)) %>%
mutate(f = n/sum(n)) %>%
filter(!!varname == 1) %>%
group_by(generation, age_doi) %>%
summarize(f = mean(f)*100) %>%
# Recode age_doi back to numeric and center data point
#mutate(age_doi = as.numeric(as.character(age_doi))+2) %>%
# Generate plot
ggplot(aes(
x = age_doi, y = f,
group = generation, color = (generation)
)) +
geom_point() +
geom_smooth(method = 'loess', formula = 'y ~ x', se = TRUE) +
scale_x_continuous(breaks = c(16, seq(20,60,5), 64)) +
scale_y_continuous(
limits = c(0, NA),
breaks = pretty_breaks()
) +
scale_color_discrete(
labels = labels$color
) +
coord_cartesian(xlim = c(16,64), expand = TRUE) +
labs(
title = labels$title, subtitle = "European Social Survey (2002-2018)",
x = labels$xname, y = labels$yname,
color = "Generation:"
) +
theme_bw() +
theme(
text = element_text(family = "Crimson", size = 12),
plot.subtitle = element_text(face = "italic"),
legend.position = "bottom"
)
ggsave(
paste0("figures/", fname, "-", quo_name(varname),".png"),
width = 19, height = 19/1.35,
dpi = 300, units = "cm"
)
}
purrr::walk(
vars$varname,
~plot_agedoi_freq(ess, parse_quo(.x, env = current_env()), vars, labels)
)
#plot_agedoi_freq(allbus, polintr, fname = "test")
#plot_agedoi_freq(allbus, mmbprty, fname = "agedoi-allbus")
| /scripts/generate_graphs.R | no_license | jolyphil/wzb-generations | R | false | false | 7,507 | r | # ******************************************************************************
# Project: Generations, article for the WZB-Mitteilungen
# Task: Generate graphs
# ******************************************************************************
library(tibble) # Dataframes
library(rlang)
library(dplyr) # Data wrangling
library(ggplot2) # Graphs
library(scales)
library(magrittr)
# ______________________________________________________________________________
# Load data ====
ess_allctries <- readRDS("data/ess_allctries_19.rds")
#ess <- readRDS("data/ess.rds")
#allbus <- readRDS("./data/allbus-reduced.rds")
ess <- ess_allctries %>%
filter(!is.na(dweight) & !is.na(year)) %>%
filter(age_doi > 15) %>%
filter(western_europe == 1)
# ______________________________________________________________________________
# Collapse at the generation-round ====
ess_gr <-
ess %>%
filter(!is.na(generation)) %>%
group_by(essround, cname_en, generation) %>%
summarize_at(vars(year, polintr, vote, contplt, wrkprty, wrkorg, badge,
sgnptit, pbldmn, bctprd, clsprty),
~weighted.mean(., dweight, na.rm = T )) %>%
group_by(essround, generation) %>%
select(-cname_en) %>% dplyr::summarize_all(mean, na.rm = TRUE) %>%
mutate(year = (essround * 2) + 2000)
# ______________________________________________________________________________
# Graphs ====
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
# Define Langauge and variable labels ----
#
language <- "de"
vars <- tibble(
varname = c("polintr", "vote", "contplt", "wrkprty", "wrkorg", "badge",
"sgnptit", "pbldmn", "bctprd", "clsprty"),
title_en = c("Interested in politics",
"Voted in last national election",
"Contacted politician or government official, in last 12 months",
"Worked in political party or action group, in last 12 months",
"Worked in another organisation or association, in last 12 months",
"Worn or displayed campaign badge/sticker, in last 12 months",
"Signed a petition, in last 12 months",
"Taken part in lawful public demonstration, in last 12 months",
"Boycotted certain products, in last 12 months",
"Feel close to a particular party"),
title_de = c("Politisches Interesse",
"Wahlteilnahme (letzte nationale Wahlen)",
"Kontakt zu einem Politiker oder einer Amtsperson aufgenommen",
"In einer politischen Partei oder Gruppierung mitgearbeitet",
"In einer anderen Organisation oder Ähnlichem mitgearbeitet",
"Abzeichen/Aufkleber einer politischen Kampagne getragen",
"Bürgerbegehren oder Volksbegehren unterschrieben",
"Teilnahme an genehmigter öffentlicher Demonstration",
"Boykott bestimmter Produkte",
"Nähe zu einer Partei"
)
)
labels <- list(
yname_en = "Share in generation (in %)",
yname_de = "Anteil an Generation (in %)",
xname_en = "Age at date of interview",
xname_de = "Alter am Tag des Interviews",
color_en = c("Baby boomers (1955-69)", "Generation X (1970-84)", "Millennials (1985-2000)"),
color_de = c("Baby Boomer (1955-69)", "Generation X (1970-84)", "Millennials (1985-2000)")
)
# Switch language
vars %<>%
rename_with(~stringr::str_remove( ., paste0("_",language))) %>%
select(!contains("_"))
labels %<>%
purrr::set_names(~stringr::str_remove( ., paste0("_",language))) %>%
.[!stringr::str_detect(names(.), "_")]
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
# Loop over variables and export graphs ----
plot_regular_freq <- function(.data, varname, vars, labels ){
gd <- .data %>% mutate(var = !!varname)
varname_quo <- as_name({{ varname }})
labels$title <- vars %>% filter(varname == varname_quo) %>% pull(title)
gd %>%
ggplot(
aes(x = year, y = var*100,
group = generation)) +
geom_line(aes(color = generation), size = 1.05) +
geom_point(aes(color = generation), size = 3) +
scale_x_continuous(
name = NULL,
limits = c(2002,2018),
breaks = seq(2002,2018,2)
) +
scale_y_continuous(
limits = c(0, ceiling(max(gd$var)*100)),
breaks=pretty_breaks()
) +
scale_color_manual(
values = rev(wesanderson::wes_palette("Darjeeling1",3)),
label = labels$color
) +
labs(
title = labels$title,
y = labels$yname,
subtitle = "European Social Survey (2002-2018)",
color = "Generation:"
) +
theme_bw() +
theme(
text = element_text(family = "Crimson", size = 12),
plot.subtitle = element_text(face = "italic"),
legend.position = "bottom"
)
ggsave(
paste0("figures/", quo_name(varname),".png"),
width = 19, height = 19/1.35,
dpi = 300, units = "cm"
)
#filepath <- paste0("figures/", varname[i], ".pdf")
#ggsave(filepath, p)
}
#purrr::walk(vars$varname, ~plot_regular_freq(ess_gr, parse_quo(.x, current_env()), vars, labels))
# _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
# Compute and plot "Age of Interview" graphs ----
plot_agedoi_freq <- function(.data, varname, vars, labels, fname = "agedoi"){
varname_quo <- as_name({{ varname }})
labels$title <- vars %>% filter(varname == varname_quo) %>% pull(title)
.data %>%
filter(!is.na(generation)) %>%
# Remove last age_doi of a generation
filter(!(age_doi %in% c(64,65))) %>%
filter(!(generation == "Millennial" & age_doi >= 34)) %>%
filter(!(generation == "Xer" & age_doi >= 49)) %>%
# Group age_doi due to small n (to be discussed)
#mutate(
# age_doi = cut(age_doi,
# breaks = c(14, seq(20,65,3)),
# labels = c(14, seq(20,62,3)
# )
#)) %>%
filter(!is.na(age_doi)) %>%
# Compute weighted means
group_by(cname_en, generation, age_doi) %>%
count(!!varname, wt = dweight) %>%
filter(!is.na(!!varname)) %>%
mutate(f = n/sum(n)) %>%
filter(!!varname == 1) %>%
group_by(generation, age_doi) %>%
summarize(f = mean(f)*100) %>%
# Recode age_doi back to numeric and center data point
#mutate(age_doi = as.numeric(as.character(age_doi))+2) %>%
# Generate plot
ggplot(aes(
x = age_doi, y = f,
group = generation, color = (generation)
)) +
geom_point() +
geom_smooth(method = 'loess', formula = 'y ~ x', se = TRUE) +
scale_x_continuous(breaks = c(16, seq(20,60,5), 64)) +
scale_y_continuous(
limits = c(0, NA),
breaks = pretty_breaks()
) +
scale_color_discrete(
labels = labels$color
) +
coord_cartesian(xlim = c(16,64), expand = TRUE) +
labs(
title = labels$title, subtitle = "European Social Survey (2002-2018)",
x = labels$xname, y = labels$yname,
color = "Generation:"
) +
theme_bw() +
theme(
text = element_text(family = "Crimson", size = 12),
plot.subtitle = element_text(face = "italic"),
legend.position = "bottom"
)
ggsave(
paste0("figures/", fname, "-", quo_name(varname),".png"),
width = 19, height = 19/1.35,
dpi = 300, units = "cm"
)
}
purrr::walk(
vars$varname,
~plot_agedoi_freq(ess, parse_quo(.x, env = current_env()), vars, labels)
)
#plot_agedoi_freq(allbus, polintr, fname = "test")
#plot_agedoi_freq(allbus, mmbprty, fname = "agedoi-allbus")
|
#' A function to calculate the deterministic estimates of the number
#' infected versus time and the average number of descendant infections (ANDI)
#' for a deterministic Susceptible, Infected, Recovered (SIR) model
#' @export
SIR_solve_model = function(N,
R0,
fsusc,
delta_t=0.01){
I_0 = 1
S_0 = fsusc*N-I_0
R_0 = (1-fsusc)*N
tend = 1*365
# note that time is in units of 1/gamma
tbeg = 0
gamma = 1/1
beta = R0*gamma
vparameters = c(gamma=gamma,beta=beta)
inits = c(S=S_0,I=I_0,R=R_0)
iend = 1
while (iend>0.0001){
vt = seq(0,tend,delta_t)
sirmodel = as.data.frame(lsoda(inits, vt, SIRfunc, vparameters))
sirmodel = subset(sirmodel,!is.na(S+I+R))
iend = sirmodel$I[nrow(sirmodel)]
tend = tend*2
}
minS = min(sirmodel$S,na.rm=T)
final = (max(sirmodel$S,na.rm=T)-min(sirmodel$S,na.rm=T))/N
##################################################################################
##################################################################################
# there is a problem when I goes to less than 1, but S-Sinfty is still non-negligble
# For high R0, (S-Sinfty) drops quickly and is close to zero when I drops below 1.
# In contrast, for R0=1.05 (S-Sinfty) is still over 16 when I
# drops below 1
##################################################################################
a = sirmodel
a$newI = beta*a$S*a$I/N
a$wprob = a$newI/sum(a$newI)
a$wfraction_responsible_for = 1/(a$I)
a$wfraction_responsible_for[a$wfraction_responsible_for>1] = 0
iind = which(a$wfraction_responsible_for==0)
n = iind[1]-1
a$wprob[n] = sum(a$wprob[iind])
a$Nafter = a$S-minS
a$NDI = a$Nafter*a$wfraction_responsible_for
ANDI = weighted.mean(a$NDI,a$wprob)
return(list(results = a,
final_size = final,
ANDI = ANDI
))
} # end solve_model function definition
| /R/SIR_solve_model.R | no_license | smtowers/ANDI | R | false | false | 2,015 | r | #' A function to calculate the deterministic estimates of the number
#' infected versus time and the average number of descendant infections (ANDI)
#' for a deterministic Susceptible, Infected, Recovered (SIR) model
#' @export
SIR_solve_model = function(N,
R0,
fsusc,
delta_t=0.01){
I_0 = 1
S_0 = fsusc*N-I_0
R_0 = (1-fsusc)*N
tend = 1*365
# note that time is in units of 1/gamma
tbeg = 0
gamma = 1/1
beta = R0*gamma
vparameters = c(gamma=gamma,beta=beta)
inits = c(S=S_0,I=I_0,R=R_0)
iend = 1
while (iend>0.0001){
vt = seq(0,tend,delta_t)
sirmodel = as.data.frame(lsoda(inits, vt, SIRfunc, vparameters))
sirmodel = subset(sirmodel,!is.na(S+I+R))
iend = sirmodel$I[nrow(sirmodel)]
tend = tend*2
}
minS = min(sirmodel$S,na.rm=T)
final = (max(sirmodel$S,na.rm=T)-min(sirmodel$S,na.rm=T))/N
##################################################################################
##################################################################################
# there is a problem when I goes to less than 1, but S-Sinfty is still non-negligble
# For high R0, (S-Sinfty) drops quickly and is close to zero when I drops below 1.
# In contrast, for R0=1.05 (S-Sinfty) is still over 16 when I
# drops below 1
##################################################################################
a = sirmodel
a$newI = beta*a$S*a$I/N
a$wprob = a$newI/sum(a$newI)
a$wfraction_responsible_for = 1/(a$I)
a$wfraction_responsible_for[a$wfraction_responsible_for>1] = 0
iind = which(a$wfraction_responsible_for==0)
n = iind[1]-1
a$wprob[n] = sum(a$wprob[iind])
a$Nafter = a$S-minS
a$NDI = a$Nafter*a$wfraction_responsible_for
ANDI = weighted.mean(a$NDI,a$wprob)
return(list(results = a,
final_size = final,
ANDI = ANDI
))
} # end solve_model function definition
|
library(ggplot2)
library(ggthemes)
library(scales)
data = read.table('figureS7.depth.muta.list',header = TRUE,sep = "\t")
ggplot(data = data, mapping = aes(x = Reads,y = Munum)) +
geom_point(size = 2,color='steelblue') + geom_smooth(method = lm,alpha=0,color='black')+
theme_bw()+#scale_x_continuous(breaks=datac$number, labels = datac$xzhou)+
theme(axis.text.x=element_text(size=18,angle=0),axis.text.y=element_text(size=18),
axis.title.x=element_text(size=22),axis.title.y=element_text(size=22),
panel.border = element_blank())+
labs(y = 'Mutation number',x='Reads(log10)')+stat_cor(data=data, method = "pearson",size=6,label.x.npc="middle",label.y.npc="top")+
annotate(geom="text",x=7.5,y=18, label='y=7.41+0.363*x',size=8,angle=0,color="black")
lm_eqn = function(data){
m=lm(Munum ~ Reads, data)
eq <- substitute(italic(Munum) == a+b %.% italic(Reads)*","~~italic(r)^2~"="~r2,
list(a = as.numeric(format(coef(m)[1], digits = 3)),
b = as.numeric(format(coef(m)[2], digits = 3)),
r2 = format(summary(m)$r.squared, digits = 3)))
as.character(as.expression(eq))
}
| /SupplementSource/figureS7.plot.R | no_license | NewtonLeibniz/Manuscript.UAE.Figure.Source | R | false | false | 1,209 | r | library(ggplot2)
library(ggthemes)
library(scales)
data = read.table('figureS7.depth.muta.list',header = TRUE,sep = "\t")
ggplot(data = data, mapping = aes(x = Reads,y = Munum)) +
geom_point(size = 2,color='steelblue') + geom_smooth(method = lm,alpha=0,color='black')+
theme_bw()+#scale_x_continuous(breaks=datac$number, labels = datac$xzhou)+
theme(axis.text.x=element_text(size=18,angle=0),axis.text.y=element_text(size=18),
axis.title.x=element_text(size=22),axis.title.y=element_text(size=22),
panel.border = element_blank())+
labs(y = 'Mutation number',x='Reads(log10)')+stat_cor(data=data, method = "pearson",size=6,label.x.npc="middle",label.y.npc="top")+
annotate(geom="text",x=7.5,y=18, label='y=7.41+0.363*x',size=8,angle=0,color="black")
lm_eqn = function(data){
m=lm(Munum ~ Reads, data)
eq <- substitute(italic(Munum) == a+b %.% italic(Reads)*","~~italic(r)^2~"="~r2,
list(a = as.numeric(format(coef(m)[1], digits = 3)),
b = as.numeric(format(coef(m)[2], digits = 3)),
r2 = format(summary(m)$r.squared, digits = 3)))
as.character(as.expression(eq))
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/podzial.R
\name{podziel}
\alias{podziel}
\title{Podzial mandatow (i ew. innych rzeczy)}
\usage{
podziel(x, m, regula)
}
\arguments{
\item{x}{wektor z liczbą głosów (wag)}
\item{m}{liczba mandatów do podziału}
\item{regula}{ciąg znaków definiujacy metodę podziału; jeden z: "D'Hondt", "Hamilton", "Hare-Niemeyer", "Jefferson", "Saint-League", "Webster", "DH", "H", "HN", "J", "SL", "W"}
}
\value{
funkcja zwraca wektor liczb całkowitych o długości równej długości wektora \code{x}
}
\description{
Funkcja wylicza podział mandatów (ew. innych niepodzielnych rzeczy) na podstawie rozkładu głosów (ew. innych wag).
}
\details{
Równoważne sobie wartości parametru \code{regula}:
\itemize{
\item "Saint-League", "Webster", "SL", "W";
\item "D'Hondt", "Jefferson", "DH", "J";
\item "Hamilton", "Hare-Niemeyer", "H", "HN".
}
}
\examples{
v1 = c(47, 16, 15.8, 12, 6.1, 3.1) * 1000
podziel(v1, 10, "SL")
podziel(v1, 10, "DH")
podziel(v1, 10, "HN")
# paradoks Alabamy
vA = c(15, 15, 9, 5, 5, 2) * 100
podziel(vA, 25, "HN")
podziel(vA, 26, "HN")
}
| /man/podziel.Rd | no_license | tzoltak/disprop | R | false | false | 1,148 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/podzial.R
\name{podziel}
\alias{podziel}
\title{Podzial mandatow (i ew. innych rzeczy)}
\usage{
podziel(x, m, regula)
}
\arguments{
\item{x}{wektor z liczbą głosów (wag)}
\item{m}{liczba mandatów do podziału}
\item{regula}{ciąg znaków definiujacy metodę podziału; jeden z: "D'Hondt", "Hamilton", "Hare-Niemeyer", "Jefferson", "Saint-League", "Webster", "DH", "H", "HN", "J", "SL", "W"}
}
\value{
funkcja zwraca wektor liczb całkowitych o długości równej długości wektora \code{x}
}
\description{
Funkcja wylicza podział mandatów (ew. innych niepodzielnych rzeczy) na podstawie rozkładu głosów (ew. innych wag).
}
\details{
Równoważne sobie wartości parametru \code{regula}:
\itemize{
\item "Saint-League", "Webster", "SL", "W";
\item "D'Hondt", "Jefferson", "DH", "J";
\item "Hamilton", "Hare-Niemeyer", "H", "HN".
}
}
\examples{
v1 = c(47, 16, 15.8, 12, 6.1, 3.1) * 1000
podziel(v1, 10, "SL")
podziel(v1, 10, "DH")
podziel(v1, 10, "HN")
# paradoks Alabamy
vA = c(15, 15, 9, 5, 5, 2) * 100
podziel(vA, 25, "HN")
podziel(vA, 26, "HN")
}
|
\name{backdoor}
\alias{backdoor}
\title{Find Set Satisfying the Generalized Backdoor Criterion}
\description{
This function first checks if the total causal effect of
one variable (\code{x}) onto another variable (\code{y}) is
identifiable via the generalized backdoor criterion, and if this is
the case it explicitly gives a set of variables that satisfies the
generalized backdoor criterion with respect to \code{x} and \code{y}
in the given graph.
}
\usage{
backdoor(amat, x, y, type = "pag", max.chordal = 10, verbose=FALSE)
}
\arguments{
\item{amat}{adjacency matrix (see Details for coding) of the given
graph specified in \code{type}.}
\item{x,y}{(integer) position of variable \code{x} or \code{y} in the
adjacency matrix.}
\item{type}{string specifying the type of graph of the adjacency matrix
\code{amat}. It can be a DAG (type="dag"), a CPDAG (type="cpdag"), a
MAG (type="mag"), or a PAG (type="pag").}
\item{max.chordal}{only if \code{type = "mag"}, is used in
\code{\link{pag2magAM}} to determine paths too large to be checked
for chordality.}
\item{verbose}{logical; if true, some output is produced during
computation.}
}
\details{
This function is a generalization of Pearl's backdoor criterion, see
Pearl (1993), defined for directed acyclic graphs (DAGs), for single
interventions and single outcome variable to more general types of
graphs (CPDAGs, MAGs, and PAGs) that describe Markov equivalence
classes of DAGs with and without latent variables but without
selection variables. For more details see Maathuis and Colombo (2013).
The motivation to find a set W that satisfies the generalized backdoor
criterion with respect to \code{x} and \code{y}
in the given graph relies on the result of the generalized backdoor adjustment:
\emph{If a set of variables W satisfies the generalized backdoor
criterion relative to \code{x} and \code{y} in the given graph, then
the causal effect of \code{x} on \code{y} is identifiable and is given
by} \deqn{%
P(Y|do(X = x)) = \sum_W P(Y|X,W) \cdot P(W).}{%
P(Y|do(X = x)) = sum_W P(Y|X,W) * P(W).}
This result allows to write post-intervention densities (the one
written using Pearl's do-calculus) using only observational densities
estimated from the data.
If the input graph is a DAG (\code{type="dag"}), this function reduces
to Pearl's backdoor criterion for single interventions and single
outcome variable, and the parents of \code{x} in the DAG satisfy the
backdoor criterion unless \code{y} is a parent of \code{x}.
If the input graph is a CPDAG C (\code{type="cpdag"}), a MAG M
(\code{type="mag"}), or a PAG P (\code{type="pag"}) (with both M and P
not allowing selection variables), this function first checks if the
total causal effect of \code{x} on \code{y} is identifiable via the
generalized backdoor criterion (see Maathuis and Colombo, 2013). If
the effect is not identifiable in this way, the output is
NA. Otherwise, an explicit set W that satisfies the generalized
backdoor criterion with respect to \code{x} and \code{y} in the given
graph is found.
At this moment this function is not able to work with an RFCI-PAG.
It is important to note that there can be pair of nodes \code{x} and
\code{y} for which there is no set W that satisfies the generalized
backdoor criterion, but the total causal effect might be identifiable
via some other technique.
Coding of adjacency matrix:
If \code{type = dag} or \code{type = cpdag}: coding 0/1 for no edge
or tail / arrowhead; e.g. \code{amat[a,b] = 0} and \code{amat[b,a] =
1} implies a -> b.
Else: coding 0,1,2,3 for no edge, circle, arrowhead, tail; e.g.,
\code{amat[a,b] = 2} and \code{amat[b,a] = 3} implies a -> b.
}
\value{
Either NA if the total causal effect is not identifiable via the
generalized backdoor criterion, or a set if the effect is identifiable
via the generalized backdoor criterion. Note that if the set W is
equal to the empty set, the output is NULL.
}
\references{
M.H. Maathuis and D. Colombo (2013). A generalized backdoor
criterion. Annals of Statistics 43 1060-1088.
J. Pearl (1993). Comment: Graphical models, causality and intervention.
\emph{Statistical Science} \bold{8}, 266--269.
}
\author{Diego Colombo and Markus Kalisch (\email{kalisch@stat.math.ethz.ch})}
\seealso{\code{\link{pc}} for estimating a CPDAG, \code{\link{dag2pag}}
and \code{\link{fci}} for estimating a PAG, and
\code{\link{pag2magAM}} for estimating a MAG.
}
\examples{%% note: Tests in ../tests/test_backdoor.R
#####################################################################
##DAG
#####################################################################
## Simulate the true DAG
set.seed(123)
p <- 7
myDAG <- randomDAG(p, prob = 0.2) ## true DAG
## Extract the adjacency matrix of the true DAG
true.amat <- (amat <- as(myDAG, "matrix")) != 0 # TRUE/FALSE <==> 1/0
print.table(1*true.amat, zero.=".") # "visualization"
## Compute the effect using the generalized backdoor criterion
backdoor(true.amat, 5, 7, type="dag")
\dontshow{stopifnot(backdoor(true.amat, 5, 7, type="dag") == 3)}
#####################################################################
##CPDAG
#####################################################################
##################################################
## Example not identifiable
## Maathuis and Colombo (2013), Fig. 3, p.14
##################################################
## create the graph
p <- 5
. <- 0
amat <- rbind(c(.,.,1,1,1),
c(.,.,1,1,1),
c(.,.,.,1,.),
c(.,.,.,.,1),
c(.,.,.,.,.))
colnames(amat) <- rownames(amat) <- as.character(1:5)
V <- as.character(1:5)
edL <- vector("list",length=5)
names(edL) <- V
edL[[1]] <- list(edges=c(3,4,5),weights=c(1,1,1))
edL[[2]] <- list(edges=c(3,4,5),weights=c(1,1,1))
edL[[3]] <- list(edges=4,weights=c(1))
edL[[4]] <- list(edges=5,weights=c(1))
g <- new("graphNEL", nodes=V, edgeL=edL, edgemode="directed")
## estimate the true CPDAG
myCPDAG <- dag2cpdag(g)
## Extract the adjacency matrix of the true CPDAG
true.amat <- (as(myCPDAG, "matrix") != 0) # 1/0 <==> TRUE/FALSE
## The effect is not identifiable, in fact:
backdoor(true.amat, 3, 5, type="cpdag")
\dontshow{stopifnot(identical(NA, backdoor(true.amat, 3, 5, type="cpdag")))}
##################################################
## Example identifiable
## Maathuis and Colombo (2013), Fig. 4, p.15
##################################################
## create the graph
p <- 6
amat <- rbind(c(0,0,1,1,0,1), c(0,0,1,1,0,1), c(0,0,0,0,1,0),
c(0,0,0,0,1,1), c(0,0,0,0,0,0), c(0,0,0,0,0,0))
colnames(amat) <- rownames(amat) <- as.character(1:6)
V <- as.character(1:6)
edL <- vector("list",length=6)
names(edL) <- V
edL[[1]] <- list(edges=c(3,4,6),weights=c(1,1,1))
edL[[2]] <- list(edges=c(3,4,6),weights=c(1,1,1))
edL[[3]] <- list(edges=5,weights=c(1))
edL[[4]] <- list(edges=c(5,6),weights=c(1,1))
g <- new("graphNEL", nodes=V, edgeL=edL, edgemode="directed")
## estimate the true CPDAG
myCPDAG <- dag2cpdag(g)
## Extract the adjacency matrix of the true CPDAG
true.amat <- as(myCPDAG, "matrix") != 0 # 1/0
## The effect is identifiable and
backdoor(true.amat, 6, 3, type="cpdag")
\dontshow{stopifnot(backdoor(true.amat, 6, 3, type="cpdag") == 1:2)}
##################################################################
##PAG
##################################################################
##################################################
## Example identifiable
## Maathuis and Colombo (2013), Fig. 7, p.17
##################################################
## create the graph
p <- 7
amat <- t(matrix(c(0,0,1,1,0,0,0, 0,0,1,1,0,0,0, 0,0,0,1,0,1,0,
0,0,0,0,0,0,1, 0,0,0,0,0,1,1, 0,0,0,0,0,0,0,
0,0,0,0,0,0,0), 7, 7))
colnames(amat) <- rownames(amat) <- as.character(1:7)
V <- as.character(1:7)
edL <- vector("list",length=7)
names(edL) <- V
edL[[1]] <- list(edges=c(3,4),weights=c(1,1))
edL[[2]] <- list(edges=c(3,4),weights=c(1,1))
edL[[3]] <- list(edges=c(4,6),weights=c(1,1))
edL[[4]] <- list(edges=7,weights=c(1))
edL[[5]] <- list(edges=c(6,7),weights=c(1,1))
g <- new("graphNEL", nodes=V, edgeL=edL, edgemode="directed")
L <- 5
## compute the true covariance matrix of g
cov.mat <- trueCov(g)
## transform covariance matrix into a correlation matrix
true.corr <- cov2cor(cov.mat)
suffStat <- list(C=true.corr, n=10^9)
indepTest <- gaussCItest
## estimate the true PAG
true.pag <- dag2pag(suffStat, indepTest, g, L, alpha = 0.9999)
## The effect is identifiable and the backdoor is {1,2}:
stopifnot(backdoor(true.amat, 6, 3, type="cpdag") == 1:2)
}
\keyword{multivariate}
\keyword{models}
\keyword{graphs}
| /man/backdoor.Rd | no_license | igraph/pcalg | R | false | false | 8,781 | rd | \name{backdoor}
\alias{backdoor}
\title{Find Set Satisfying the Generalized Backdoor Criterion}
\description{
This function first checks if the total causal effect of
one variable (\code{x}) onto another variable (\code{y}) is
identifiable via the generalized backdoor criterion, and if this is
the case it explicitly gives a set of variables that satisfies the
generalized backdoor criterion with respect to \code{x} and \code{y}
in the given graph.
}
\usage{
backdoor(amat, x, y, type = "pag", max.chordal = 10, verbose=FALSE)
}
\arguments{
\item{amat}{adjacency matrix (see Details for coding) of the given
graph specified in \code{type}.}
\item{x,y}{(integer) position of variable \code{x} or \code{y} in the
adjacency matrix.}
\item{type}{string specifying the type of graph of the adjacency matrix
\code{amat}. It can be a DAG (type="dag"), a CPDAG (type="cpdag"), a
MAG (type="mag"), or a PAG (type="pag").}
\item{max.chordal}{only if \code{type = "mag"}, is used in
\code{\link{pag2magAM}} to determine paths too large to be checked
for chordality.}
\item{verbose}{logical; if true, some output is produced during
computation.}
}
\details{
This function is a generalization of Pearl's backdoor criterion, see
Pearl (1993), defined for directed acyclic graphs (DAGs), for single
interventions and single outcome variable to more general types of
graphs (CPDAGs, MAGs, and PAGs) that describe Markov equivalence
classes of DAGs with and without latent variables but without
selection variables. For more details see Maathuis and Colombo (2013).
The motivation to find a set W that satisfies the generalized backdoor
criterion with respect to \code{x} and \code{y}
in the given graph relies on the result of the generalized backdoor adjustment:
\emph{If a set of variables W satisfies the generalized backdoor
criterion relative to \code{x} and \code{y} in the given graph, then
the causal effect of \code{x} on \code{y} is identifiable and is given
by} \deqn{%
P(Y|do(X = x)) = \sum_W P(Y|X,W) \cdot P(W).}{%
P(Y|do(X = x)) = sum_W P(Y|X,W) * P(W).}
This result allows to write post-intervention densities (the one
written using Pearl's do-calculus) using only observational densities
estimated from the data.
If the input graph is a DAG (\code{type="dag"}), this function reduces
to Pearl's backdoor criterion for single interventions and single
outcome variable, and the parents of \code{x} in the DAG satisfy the
backdoor criterion unless \code{y} is a parent of \code{x}.
If the input graph is a CPDAG C (\code{type="cpdag"}), a MAG M
(\code{type="mag"}), or a PAG P (\code{type="pag"}) (with both M and P
not allowing selection variables), this function first checks if the
total causal effect of \code{x} on \code{y} is identifiable via the
generalized backdoor criterion (see Maathuis and Colombo, 2013). If
the effect is not identifiable in this way, the output is
NA. Otherwise, an explicit set W that satisfies the generalized
backdoor criterion with respect to \code{x} and \code{y} in the given
graph is found.
At this moment this function is not able to work with an RFCI-PAG.
It is important to note that there can be pair of nodes \code{x} and
\code{y} for which there is no set W that satisfies the generalized
backdoor criterion, but the total causal effect might be identifiable
via some other technique.
Coding of adjacency matrix:
If \code{type = dag} or \code{type = cpdag}: coding 0/1 for no edge
or tail / arrowhead; e.g. \code{amat[a,b] = 0} and \code{amat[b,a] =
1} implies a -> b.
Else: coding 0,1,2,3 for no edge, circle, arrowhead, tail; e.g.,
\code{amat[a,b] = 2} and \code{amat[b,a] = 3} implies a -> b.
}
\value{
Either NA if the total causal effect is not identifiable via the
generalized backdoor criterion, or a set if the effect is identifiable
via the generalized backdoor criterion. Note that if the set W is
equal to the empty set, the output is NULL.
}
\references{
M.H. Maathuis and D. Colombo (2013). A generalized backdoor
criterion. Annals of Statistics 43 1060-1088.
J. Pearl (1993). Comment: Graphical models, causality and intervention.
\emph{Statistical Science} \bold{8}, 266--269.
}
\author{Diego Colombo and Markus Kalisch (\email{kalisch@stat.math.ethz.ch})}
\seealso{\code{\link{pc}} for estimating a CPDAG, \code{\link{dag2pag}}
and \code{\link{fci}} for estimating a PAG, and
\code{\link{pag2magAM}} for estimating a MAG.
}
\examples{%% note: Tests in ../tests/test_backdoor.R
#####################################################################
##DAG
#####################################################################
## Simulate the true DAG
set.seed(123)
p <- 7
myDAG <- randomDAG(p, prob = 0.2) ## true DAG
## Extract the adjacency matrix of the true DAG
true.amat <- (amat <- as(myDAG, "matrix")) != 0 # TRUE/FALSE <==> 1/0
print.table(1*true.amat, zero.=".") # "visualization"
## Compute the effect using the generalized backdoor criterion
backdoor(true.amat, 5, 7, type="dag")
\dontshow{stopifnot(backdoor(true.amat, 5, 7, type="dag") == 3)}
#####################################################################
##CPDAG
#####################################################################
##################################################
## Example not identifiable
## Maathuis and Colombo (2013), Fig. 3, p.14
##################################################
## create the graph
p <- 5
. <- 0
amat <- rbind(c(.,.,1,1,1),
c(.,.,1,1,1),
c(.,.,.,1,.),
c(.,.,.,.,1),
c(.,.,.,.,.))
colnames(amat) <- rownames(amat) <- as.character(1:5)
V <- as.character(1:5)
edL <- vector("list",length=5)
names(edL) <- V
edL[[1]] <- list(edges=c(3,4,5),weights=c(1,1,1))
edL[[2]] <- list(edges=c(3,4,5),weights=c(1,1,1))
edL[[3]] <- list(edges=4,weights=c(1))
edL[[4]] <- list(edges=5,weights=c(1))
g <- new("graphNEL", nodes=V, edgeL=edL, edgemode="directed")
## estimate the true CPDAG
myCPDAG <- dag2cpdag(g)
## Extract the adjacency matrix of the true CPDAG
true.amat <- (as(myCPDAG, "matrix") != 0) # 1/0 <==> TRUE/FALSE
## The effect is not identifiable, in fact:
backdoor(true.amat, 3, 5, type="cpdag")
\dontshow{stopifnot(identical(NA, backdoor(true.amat, 3, 5, type="cpdag")))}
##################################################
## Example identifiable
## Maathuis and Colombo (2013), Fig. 4, p.15
##################################################
## create the graph
p <- 6
amat <- rbind(c(0,0,1,1,0,1), c(0,0,1,1,0,1), c(0,0,0,0,1,0),
c(0,0,0,0,1,1), c(0,0,0,0,0,0), c(0,0,0,0,0,0))
colnames(amat) <- rownames(amat) <- as.character(1:6)
V <- as.character(1:6)
edL <- vector("list",length=6)
names(edL) <- V
edL[[1]] <- list(edges=c(3,4,6),weights=c(1,1,1))
edL[[2]] <- list(edges=c(3,4,6),weights=c(1,1,1))
edL[[3]] <- list(edges=5,weights=c(1))
edL[[4]] <- list(edges=c(5,6),weights=c(1,1))
g <- new("graphNEL", nodes=V, edgeL=edL, edgemode="directed")
## estimate the true CPDAG
myCPDAG <- dag2cpdag(g)
## Extract the adjacency matrix of the true CPDAG
true.amat <- as(myCPDAG, "matrix") != 0 # 1/0
## The effect is identifiable and
backdoor(true.amat, 6, 3, type="cpdag")
\dontshow{stopifnot(backdoor(true.amat, 6, 3, type="cpdag") == 1:2)}
##################################################################
##PAG
##################################################################
##################################################
## Example identifiable
## Maathuis and Colombo (2013), Fig. 7, p.17
##################################################
## create the graph
p <- 7
amat <- t(matrix(c(0,0,1,1,0,0,0, 0,0,1,1,0,0,0, 0,0,0,1,0,1,0,
0,0,0,0,0,0,1, 0,0,0,0,0,1,1, 0,0,0,0,0,0,0,
0,0,0,0,0,0,0), 7, 7))
colnames(amat) <- rownames(amat) <- as.character(1:7)
V <- as.character(1:7)
edL <- vector("list",length=7)
names(edL) <- V
edL[[1]] <- list(edges=c(3,4),weights=c(1,1))
edL[[2]] <- list(edges=c(3,4),weights=c(1,1))
edL[[3]] <- list(edges=c(4,6),weights=c(1,1))
edL[[4]] <- list(edges=7,weights=c(1))
edL[[5]] <- list(edges=c(6,7),weights=c(1,1))
g <- new("graphNEL", nodes=V, edgeL=edL, edgemode="directed")
L <- 5
## compute the true covariance matrix of g
cov.mat <- trueCov(g)
## transform covariance matrix into a correlation matrix
true.corr <- cov2cor(cov.mat)
suffStat <- list(C=true.corr, n=10^9)
indepTest <- gaussCItest
## estimate the true PAG
true.pag <- dag2pag(suffStat, indepTest, g, L, alpha = 0.9999)
## The effect is identifiable and the backdoor is {1,2}:
stopifnot(backdoor(true.amat, 6, 3, type="cpdag") == 1:2)
}
\keyword{multivariate}
\keyword{models}
\keyword{graphs}
|
#' A user-friendly dictionary of the popler metadata
#'
#' Provides information on the variables of metadata contained in the popler
#' database, and the kind of data contained in those variables.
#'
#' @param full_tbl logical; if \code{TRUE} function returns the variables
#' contained in the full main table. If \code{FALSE}, functions returns only the
#' standard variables. Default is \code{FALSE}.
#' @param md_file Specify the filename and location for
#' the generated markdown file (optional)
#' @param html_file Specify the filename and location for the
#' generated html file (optional)
#'
#' @return This function is called for its side effects and does not
#' return an R object.
#'
#' @importFrom rmarkdown render
#' @importFrom utils browseURL
#' @export
#'
#' @examples
#' \dontrun{
#' # Full dictionary
#' pplr_report_dictionary(full_tbl = TRUE)
#'
#' # "Abridged" version
#' pplr_report_dictionary()
#' }
#'
pplr_report_dictionary <- function(full_tbl=FALSE, md_file=NULL, html_file=NULL){
# store explanations as table of contents
if(full_tbl){
TOC <- int.data$explanations
# remove contents that do not work
TOC <- TOC[-77, ]
} else {
TOC <- int.data$explain_short
}
if(is.null(md_file)){
md_file <- paste0(system.file("",package="popler"),"./dictionary.Rmd")
}
if(is.null(html_file)){
html_file <- paste0(system.file("",package="popler"),"./dictionary.html")
}
# which entries should not be expanded?
wide <- c("proj_metadata_key",
"lter_project_fkey",
"studystartyr",
"studyendyr",
"spatial_replication_level_1_number-of_unique_reps",
"spatial_replication_level_2_number-of_unique_reps",
"spatial_replication_level_3_number-of_unique_reps",
"spatial_replication_level_4_number-of_unique_reps",
"spatial_replication_level_5_number-of_unique_reps",
"tot_spat_rep",
"duration_years")
# store entries
entries <- eval(parse(text=paste0("pplr_dictionary(",
paste0(TOC[,1] ,
collapse=" , "),
")")))
# build the .Rmd file piecewise
header <- c(
'
---
output:
html_document:
self_contained: no
---
<br>
<img src= `r system.file("icon.png",package="popler")` alt="Drawing" style="height: 110px; float: right"/>
<br>
# *popler* Dictionary
***
*Before publishing any data gathered from popler, please review and adhere to the [LTER Network Data Access Policy, Data Access Requirements, and General Data Use Agreement](https://lternet.edu/policies/data-access), as well as any additional requirements indicated by the authors of each study.*
***
<a name="defs"></a>
Column Name | Definition
--- | ----------------------------
'
)
defs <- c(
'| [_NAME_](#C@@@) | _DEFINITION_ |
'
)
end_defs <- c(
'| | <span style="color:white"> ........................................................................................................ </span> |
***
'
)
ents <- c(
'
<a name=C@@@></a>
#### _NAME_
**_DEFINITION_**
_ENTRY_
<div style="text-align: right"> *[back to Definitions](#defs)* </div>
***
'
)
# update defs and ents blocks
defs_new <- rep(NA,nrow(TOC))
ents_new <- rep(NA,nrow(TOC))
for(i in seq_len(length(defs_new))){
# make table of contents + definitions
defs_new[i] <- gsub("_NAME_", TOC[i,1], defs)
defs_new[i] <- gsub("_DEFINITION_", TOC[i,2], defs_new[i])
defs_new[i] <- gsub("@@@",i,defs_new[i])
# make lists of names
if(TOC[i,1] %in% wide){ coll <- " , " } else { coll <- "<br>" }
ents_new[i] <- gsub("_NAME_", TOC[i,1], ents)
ents_new[i] <- gsub("_DEFINITION_", TOC[i,2], ents_new[i])
ents_new[i] <- gsub("_ENTRY_", paste0(sort(entries[[i]]),collapse=coll), ents_new[i])
ents_new[i] <- gsub("@@@",i,ents_new[i])
}
# make markdown file
sink(md_file)
# cat(header, defs_new, end_defs, ents_new)
cat( iconv(header, to="UTF-8"),
iconv(defs_new, to="UTF-8"),
iconv(end_defs, to="UTF-8"),
iconv(ents_new, to="UTF-8") )
sink()
# launch browser window
rmarkdown::render(md_file, quiet=TRUE, encoding = "UTF-8")
browseURL(html_file)
}
| /R/report_dictionary.R | permissive | alegent/popler | R | false | false | 4,404 | r | #' A user-friendly dictionary of the popler metadata
#'
#' Provides information on the variables of metadata contained in the popler
#' database, and the kind of data contained in those variables.
#'
#' @param full_tbl logical; if \code{TRUE} function returns the variables
#' contained in the full main table. If \code{FALSE}, functions returns only the
#' standard variables. Default is \code{FALSE}.
#' @param md_file Specify the filename and location for
#' the generated markdown file (optional)
#' @param html_file Specify the filename and location for the
#' generated html file (optional)
#'
#' @return This function is called for its side effects and does not
#' return an R object.
#'
#' @importFrom rmarkdown render
#' @importFrom utils browseURL
#' @export
#'
#' @examples
#' \dontrun{
#' # Full dictionary
#' pplr_report_dictionary(full_tbl = TRUE)
#'
#' # "Abridged" version
#' pplr_report_dictionary()
#' }
#'
pplr_report_dictionary <- function(full_tbl=FALSE, md_file=NULL, html_file=NULL){
# store explanations as table of contents
if(full_tbl){
TOC <- int.data$explanations
# remove contents that do not work
TOC <- TOC[-77, ]
} else {
TOC <- int.data$explain_short
}
if(is.null(md_file)){
md_file <- paste0(system.file("",package="popler"),"./dictionary.Rmd")
}
if(is.null(html_file)){
html_file <- paste0(system.file("",package="popler"),"./dictionary.html")
}
# which entries should not be expanded?
wide <- c("proj_metadata_key",
"lter_project_fkey",
"studystartyr",
"studyendyr",
"spatial_replication_level_1_number-of_unique_reps",
"spatial_replication_level_2_number-of_unique_reps",
"spatial_replication_level_3_number-of_unique_reps",
"spatial_replication_level_4_number-of_unique_reps",
"spatial_replication_level_5_number-of_unique_reps",
"tot_spat_rep",
"duration_years")
# store entries
entries <- eval(parse(text=paste0("pplr_dictionary(",
paste0(TOC[,1] ,
collapse=" , "),
")")))
# build the .Rmd file piecewise
header <- c(
'
---
output:
html_document:
self_contained: no
---
<br>
<img src= `r system.file("icon.png",package="popler")` alt="Drawing" style="height: 110px; float: right"/>
<br>
# *popler* Dictionary
***
*Before publishing any data gathered from popler, please review and adhere to the [LTER Network Data Access Policy, Data Access Requirements, and General Data Use Agreement](https://lternet.edu/policies/data-access), as well as any additional requirements indicated by the authors of each study.*
***
<a name="defs"></a>
Column Name | Definition
--- | ----------------------------
'
)
defs <- c(
'| [_NAME_](#C@@@) | _DEFINITION_ |
'
)
end_defs <- c(
'| | <span style="color:white"> ........................................................................................................ </span> |
***
'
)
ents <- c(
'
<a name=C@@@></a>
#### _NAME_
**_DEFINITION_**
_ENTRY_
<div style="text-align: right"> *[back to Definitions](#defs)* </div>
***
'
)
# update defs and ents blocks
defs_new <- rep(NA,nrow(TOC))
ents_new <- rep(NA,nrow(TOC))
for(i in seq_len(length(defs_new))){
# make table of contents + definitions
defs_new[i] <- gsub("_NAME_", TOC[i,1], defs)
defs_new[i] <- gsub("_DEFINITION_", TOC[i,2], defs_new[i])
defs_new[i] <- gsub("@@@",i,defs_new[i])
# make lists of names
if(TOC[i,1] %in% wide){ coll <- " , " } else { coll <- "<br>" }
ents_new[i] <- gsub("_NAME_", TOC[i,1], ents)
ents_new[i] <- gsub("_DEFINITION_", TOC[i,2], ents_new[i])
ents_new[i] <- gsub("_ENTRY_", paste0(sort(entries[[i]]),collapse=coll), ents_new[i])
ents_new[i] <- gsub("@@@",i,ents_new[i])
}
# make markdown file
sink(md_file)
# cat(header, defs_new, end_defs, ents_new)
cat( iconv(header, to="UTF-8"),
iconv(defs_new, to="UTF-8"),
iconv(end_defs, to="UTF-8"),
iconv(ents_new, to="UTF-8") )
sink()
# launch browser window
rmarkdown::render(md_file, quiet=TRUE, encoding = "UTF-8")
browseURL(html_file)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/config.R
\docType{class}
\name{VitessceConfigDatasetFile}
\alias{VitessceConfigDatasetFile}
\title{VitessceConfigDatasetFile Class}
\description{
Class representing a file in a dataset in a Vitessce view config.
}
\details{
File in a dataset in a VitessceConfig
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{VitessceConfigDatasetFile$new()}}
\item \href{#method-to_list}{\code{VitessceConfigDatasetFile$to_list()}}
\item \href{#method-clone}{\code{VitessceConfigDatasetFile$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
Create a new dataset file object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VitessceConfigDatasetFile$new(url, data_type, file_type)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{url}}{A URL for the file.}
\item{\code{data_type}}{A data type for the file.}
\item{\code{file_type}}{A file type for the file.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A new \code{VitessceConfigDatasetFile} object.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-to_list"></a>}}
\if{latex}{\out{\hypertarget{method-to_list}{}}}
\subsection{Method \code{to_list()}}{
Convert the object to an R list. Helpful when converting the config to JSON.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VitessceConfigDatasetFile$to_list()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
A \code{list} that can be serialized to JSON using \code{rjson}.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VitessceConfigDatasetFile$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
| /man/VitessceConfigDatasetFile.Rd | permissive | lizhiwen1991/vitessce-htmlwidget | R | false | true | 2,211 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/config.R
\docType{class}
\name{VitessceConfigDatasetFile}
\alias{VitessceConfigDatasetFile}
\title{VitessceConfigDatasetFile Class}
\description{
Class representing a file in a dataset in a Vitessce view config.
}
\details{
File in a dataset in a VitessceConfig
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{VitessceConfigDatasetFile$new()}}
\item \href{#method-to_list}{\code{VitessceConfigDatasetFile$to_list()}}
\item \href{#method-clone}{\code{VitessceConfigDatasetFile$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
Create a new dataset file object.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VitessceConfigDatasetFile$new(url, data_type, file_type)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{url}}{A URL for the file.}
\item{\code{data_type}}{A data type for the file.}
\item{\code{file_type}}{A file type for the file.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A new \code{VitessceConfigDatasetFile} object.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-to_list"></a>}}
\if{latex}{\out{\hypertarget{method-to_list}{}}}
\subsection{Method \code{to_list()}}{
Convert the object to an R list. Helpful when converting the config to JSON.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VitessceConfigDatasetFile$to_list()}\if{html}{\out{</div>}}
}
\subsection{Returns}{
A \code{list} that can be serialized to JSON using \code{rjson}.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{VitessceConfigDatasetFile$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
#'@name readIGNScutprocess
#'@aliases readIGNScutprocess
#'@title To read the .jpeg image of lateral flow assay and apply cuts to cut the
#'control line and test line peaks of the signal intensity vs index
#'@description Function \code{readIGNScutprocess} will provide the values, which can be
#'used to extract the intensities of control line and test line peaks
#'@param imagefile the .jpeg file of lateral flow assay
#'@param fp either TRUE or FALSE
#'@param cut1 the first value on x-axis (index) to cut the peak
#'@param cut2 the second value on x-axis (index) to cut the peak
#'@param pi the intensity (y-axis) to get the processed signal. It's a cutoff intensity
#'to do the baseline correction
#'@return The plots of control line and test line peaks
#'@details Allows the user to read in the .jpeg image of lateral flow assay and returns
#'the cutted and baseline corrected plots of signals of control line and test line.
#'@author Navneet Phogat, Matthias Kohl, \email{Matthias.Kohl@@stamats.de}
#'@keywords gnsdt
#'@examples
#'## to read and see complete plot of signal
#'file3 <- system.file("exData", "Serum30nM_2.JPG", package = "GNSplex")
#'readIGNSplot(file3)
#'## to read and cut the first peak (control line signal) and to do baseline
#'##correction
#'file3 <- system.file("exData", "Serum30nM_2.JPG", package = "GNSplex")
#'readIGNScutprocess(imagefile = file3, fp = TRUE, cut1 = 3, cut2 = 13, pi = 2.0)
#'## To cut the second peak (test line signal) and to do baseline correction
#'readIGNScutprocess(imagefile = file3, fp = FALSE, cut1 = 17, cut2 = 30, pi = 2.0)
#'@export
readIGNScutprocess <- function(imagefile, fp = TRUE, cut1 = 2, cut2 = 18, pi = 1.56){
image1 <- readImage(files = imagefile)
data1 <- image1@.Data
cd1 <- colMeans(data1)
Intensity1 <- 1/rowMeans(cd1)
if (fp == TRUE){
Intensity2 <- Intensity1[cut1:cut2]
plot(Intensity2[Intensity2>pi],xlab = "Index", ylab = "Intensity [arbitrary unit]",
main = "Intensity vs Index")
}
else if (fp == FALSE){
Intensity3 <- Intensity1[cut1:cut2]
plot(Intensity3[Intensity3>pi],xlab = "Index", ylab = "Intensity [arbitrary unit]",
main = "Intensity vs Index")
}
else {
warning("Provide the right value of cut of the image")
}
}
| /R/readIGNScutprocess.R | no_license | NPhogat/GNSplex | R | false | false | 2,262 | r | #'@name readIGNScutprocess
#'@aliases readIGNScutprocess
#'@title To read the .jpeg image of lateral flow assay and apply cuts to cut the
#'control line and test line peaks of the signal intensity vs index
#'@description Function \code{readIGNScutprocess} will provide the values, which can be
#'used to extract the intensities of control line and test line peaks
#'@param imagefile the .jpeg file of lateral flow assay
#'@param fp either TRUE or FALSE
#'@param cut1 the first value on x-axis (index) to cut the peak
#'@param cut2 the second value on x-axis (index) to cut the peak
#'@param pi the intensity (y-axis) to get the processed signal. It's a cutoff intensity
#'to do the baseline correction
#'@return The plots of control line and test line peaks
#'@details Allows the user to read in the .jpeg image of lateral flow assay and returns
#'the cutted and baseline corrected plots of signals of control line and test line.
#'@author Navneet Phogat, Matthias Kohl, \email{Matthias.Kohl@@stamats.de}
#'@keywords gnsdt
#'@examples
#'## to read and see complete plot of signal
#'file3 <- system.file("exData", "Serum30nM_2.JPG", package = "GNSplex")
#'readIGNSplot(file3)
#'## to read and cut the first peak (control line signal) and to do baseline
#'##correction
#'file3 <- system.file("exData", "Serum30nM_2.JPG", package = "GNSplex")
#'readIGNScutprocess(imagefile = file3, fp = TRUE, cut1 = 3, cut2 = 13, pi = 2.0)
#'## To cut the second peak (test line signal) and to do baseline correction
#'readIGNScutprocess(imagefile = file3, fp = FALSE, cut1 = 17, cut2 = 30, pi = 2.0)
#'@export
readIGNScutprocess <- function(imagefile, fp = TRUE, cut1 = 2, cut2 = 18, pi = 1.56){
image1 <- readImage(files = imagefile)
data1 <- image1@.Data
cd1 <- colMeans(data1)
Intensity1 <- 1/rowMeans(cd1)
if (fp == TRUE){
Intensity2 <- Intensity1[cut1:cut2]
plot(Intensity2[Intensity2>pi],xlab = "Index", ylab = "Intensity [arbitrary unit]",
main = "Intensity vs Index")
}
else if (fp == FALSE){
Intensity3 <- Intensity1[cut1:cut2]
plot(Intensity3[Intensity3>pi],xlab = "Index", ylab = "Intensity [arbitrary unit]",
main = "Intensity vs Index")
}
else {
warning("Provide the right value of cut of the image")
}
}
|
# DOCUMENTATION FOR DATA SET
#' @title Rank data: APA
#' @docType data
#' @aliases APA
#' @name APA
#' @format A list containing:
#' \describe{
#' \item{data}{ matrix of size 5738x5 containing the 5738 observed full ranks in ranking representation.
#' The ranking representation r=(r_1,...,r_m) contains the ranks assigned to the objects, and means that the ith object is in r_ith position.
#'
#' For example, if the ranking representation of a rank is (4,3,1,2,5), it means that judge ranks the first object in 4th position, second object in 3rd position, ...}
#'
#' \item{frequency}{matrix of size 120x6. Each row corresponds to one of the different observed rank.
#' The first fifth columns contains the observed ranks (ordering representation) and the sixth column
#' contains the frequency of observation.}
#'
#' \item{m}{ vector with the size of the ranks (5 here).}
#' }
#'
#' @description This dataset contains the 5738 full rankings resulting from the American Psychological Association (APA) presidential election of 1980.
#' For this election, members of APA had to rank five candidates in order of preference.
#'
#' For information, a total of 15449 votes have been registred for this election, but only the 5738 full rankings are reported in the APA dataset. Candidates A and C were research psychologists, candidates D and E were clinical psychologists and candidate B was a community psychologist.
#'
#'
#' @source "Group representations in probability and statistics", P. Diaconis, 1988.
#'
#' @examples
#' data(APA)
#'
#' @family datasets
#'
#' @keywords datasets
NULL
#' @title Rank data: big4
#' @docType data
#' @aliases big4
#' @name big4
#' @format A list containing:
#' \describe{
#' \item{data}{A matrix of size 21*8 containing the 21 Premier League seasons. Each row corresponding to one ranking (ranking representation).
#'
#' The ranking representation r=(r_1,...,r_m) contains the ranks assigned to the objects, and means that the ith object is in r_ith position.
#'
#' For example, if the ranking representation of a rank is (4,3,1,2,5), it means that judge ranks the first object in 4th position, second object in 3rd position, ...}
#' \item{frequency}{matrix of size 21*9. Each row corresponds to one of the 21 different observed rankings, and the last column contains the observation frequency.}
#' \item{m}{the size of the rankings (m=c(4,4) ).}
#' }
#'
#' @description This dataset is composed of the rankings (in ranking notation) of the "Big Four" English football teams (A: Manchester, B: Liverpool, C: Arsenal, D: Chelsea) to the English Championship (Premier League) and according to the UEFA coefficients (statistics used in Europe for ranking and seeding teams in international competitions), from 1993 to 2013.
#'
#' In 2000-2001, Arsenal and Chelsea had the same UEFA coefficient and then are tied. UEFA ranking is (1, 4, 2, 2) for 2000-2001, what means that Manchester United is the first, Liverpool is the last, and the two intermediate positions are for Arsenal and Chelsea in an unknown order.
#'
#' In 2009-2010, Liverpool and Arsenal have also the same UEFA coefficient, the ranking is (1, 2, 2, 4).
#'
#' @source \url{https://en.wikipedia.org/wiki/Premier_League}
#'
#' \url{https://www.uefa.com/memberassociations/uefarankings/club/}
#'
#' @examples
#' data(big4)
#'
#' @family datasets
#'
#' @keywords datasets
NULL
#' @title Multidimensionnal partial rank data: eurovision
#' @docType data
#' @aliases eurovision
#' @name eurovision
#' @format A list containing:
#' \describe{
#' \item{data}{ A matrix of size 34*48. Each row corresponds to the ranking representation of a multidimensionnal ranking.
#' Columns 1 to 8 correspond to the 2007 contest, columns 9 to 18 to the 2008 contest, etc...
#'
#' The ranking representation r=(r_1,...,r_m) contains the ranks assigned to the objects, and means that the ith object is in r_ith position.
#'
#' For example, if the ranking representation of a rank is (4,3,1,2,5), it means that judge ranks the first object in 4th position, second object in 3rd position, ...
#'
#' }
#'
#' \item{frequency}{A matrix of size 34*49 containing the differents multidimensionnal rankings. The 48 first columns are the same as in data, and the last column contains the frequency (1 for all ranks).}
#'
#' \item{m}{ a vector with the sizes of ranks for each dimension.}
#' }
#'
#' @description This dataset contains the ranking of the 8 common finalists of the Eurovision song contest from 2007 to 2012:
#'
#' A: France, B:Germany, C:Greece, D:Romania, E:Russia, F:Spain, G:Ukraine, H:United Kingdom.
#'
#' The number of rankings is 33, corresponding to the 33 European countries having participated to this six editions of the contest.
#'
#' All the rankings are partial since none country has ranked this 8 countries in its 10 preferences. Missing ranking elements are zeros.
#'
#' @source https://eurovision.tv
#'
#' @examples
#' data(eurovision)
#'
#' @family datasets
#'
#' @keywords datasets
NULL
#' @title Multidimensionnal rank data: quiz
#' @docType data
#' @aliases quiz
#' @name quiz
#' @format A list containing:
#' \describe{
#' \item{data}{a matrix of size 70*16. The student's answers are in row and the 16 columns correspond to the 4 rankings (for the 4 quizzes) of size 4 (ranking representation).
#'
#' The ranking representation r=(r_1,...,r_m) contains the ranks assigned to the objects, and means that the ith object is in r_ith position.
#'
#' For example, if the ranking representation of a rank is (4,3,1,2,5), it means that judge ranks the first object in 4th position, second object in 3rd position, ...}
#' \item{frequency}{a matrix of size 63*17. Each row corresponds to one of the 63 differents observed
#' rankings (ranking representation). Each row contains 4 ranks of size 4 and a last column for the frequency.}
#' \item{m}{a vector with the sizes of the ranks for each dimension.}
#'
#' }
#'
#' @description This dataset contains the answers of 70 students (40 of third year and 30 of fourth year) from Polytech'Lille (statistics engineering school, France) to the four following quizzes:
#'
#' \describe{
#'
#' \item{Literature Quiz}{
#' This quiz consists of ranking four french writers according to chronological order:
#' A=Victor Hugo, B=Moliere, C=Albert Camus, D=Jean-Jacques Rousseau.}
#'
#' \item{Football Quiz}{
#' This quiz consists of ranking four national football teams according to increasing number of wins in the football World Cup: A=France, B=Germany, C=Brazil, D=Italy.}
#'
#' \item{Mathematics Quiz}{
#' This quiz consists of ranking four numbers according to increasing order:
#' A=pi/3, B=log(1), C=exp(2), D=(1+sqrt(5))/2.}
#'
#' \item{Cinema Quiz}{
#' This quiz consists of ranking four Tarentino's movies according to chronological order:
#' A=Inglourious Basterds, B=Pulp Fiction, C=Reservoir Dogs, D=Jackie Brown.}
#'
#' }
#'
#' @source Julien Jacques
#'
#' @examples
#' data(quiz)
#'
#' @family datasets
#'
#' @keywords datasets
NULL
#' @title Rank data: sports
#' @docType data
#' @aliases sports
#' @name sports
#' @format A list containing:
#' \describe{
#' \item{data}{a matrix containing 130 ranks of size 7 in ranking representation.
#'
#' The ranking representation r=(r_1,...,r_m) contains the ranks assigned to the objects, and means that the ith object is in r_ith position.
#'
#' For example, if the ranking representation of a rank is (4,3,1,2,5), it means that judge ranks the first object in 4th position, second object in 3rd position, ...}
#'
#' \item{frequency}{a matrix with 123 differents ranks of size 7. In each row the first 7 columns correspond to one observed ranking and the last column contains the observation frequency.}
#' \item{m}{ the size of the rankings (m=7).}
#' }
#'
#' @description This data set is due to Louis Roussos who asked 130 students at the
#' University of Illinois to rank seven sports according to their preference in participating:
#' A = Baseball, B = Football, C = Basketball, D = Tennis, E = Cycling, F =
#' Swimming, G = Jogging.
#'
#' @source J.I. Marden. "Analyzing and modeling rank data, volume 64 of Monographs on Statistics and Applied Probability". Chapman & Hall, London, 1995.
#'
#' @examples
#' data(sports)
#'
#' @family datasets
#'
#' @keywords datasets
NULL
#' @title Rank data: words
#' @docType data
#' @aliases words
#' @name words
#' @format A list containing:
#' \describe{
#' \item{data}{A matrix of size 98*5 containing the 98 answers. Each row corresponding to one ranking (ranking representation).
#'
#' The ranking representation r=(r_1,...,r_m) contains the ranks assigned to the objects, and means that the ith object is in r_ith position.
#'
#' For example, if the ranking representation of a rank is (4,3,1,2,5), it means that judge ranks the first object in 4th position, second object in 3rd position, ...}
#' \item{frequency}{matrix of size 15*6. Each row corresponds to one of the 15 different observed rankings, and the last column contains the observation frequency.}
#' \item{m}{the size of the rankings (m=5).}
#' }
#'
#' @description The data was collected under the auspices of the Graduate Record
#' Examination Board. A sample of 98 college students were asked to rank five words according to strength of association (least to most associated) with the target word "Idea":
#' A = Thought, B = Play, C = Theory, D = Dream and E = Attention.
#'
#' @source M.A. Fligner and J.S. Verducci. "Distance based ranking models". J. Roy. Statist. Soc. Ser. B, 48(3):359-369, 1986.
#'
#' @examples
#' data(sports)
#'
#' @family datasets
#'
#' @keywords datasets
NULL
| /fuzzedpackages/Rankcluster/R/data.R | no_license | akhikolla/testpackages | R | false | false | 9,907 | r | # DOCUMENTATION FOR DATA SET
#' @title Rank data: APA
#' @docType data
#' @aliases APA
#' @name APA
#' @format A list containing:
#' \describe{
#' \item{data}{ matrix of size 5738x5 containing the 5738 observed full ranks in ranking representation.
#' The ranking representation r=(r_1,...,r_m) contains the ranks assigned to the objects, and means that the ith object is in r_ith position.
#'
#' For example, if the ranking representation of a rank is (4,3,1,2,5), it means that judge ranks the first object in 4th position, second object in 3rd position, ...}
#'
#' \item{frequency}{matrix of size 120x6. Each row corresponds to one of the different observed rank.
#' The first fifth columns contains the observed ranks (ordering representation) and the sixth column
#' contains the frequency of observation.}
#'
#' \item{m}{ vector with the size of the ranks (5 here).}
#' }
#'
#' @description This dataset contains the 5738 full rankings resulting from the American Psychological Association (APA) presidential election of 1980.
#' For this election, members of APA had to rank five candidates in order of preference.
#'
#' For information, a total of 15449 votes have been registred for this election, but only the 5738 full rankings are reported in the APA dataset. Candidates A and C were research psychologists, candidates D and E were clinical psychologists and candidate B was a community psychologist.
#'
#'
#' @source "Group representations in probability and statistics", P. Diaconis, 1988.
#'
#' @examples
#' data(APA)
#'
#' @family datasets
#'
#' @keywords datasets
NULL
#' @title Rank data: big4
#' @docType data
#' @aliases big4
#' @name big4
#' @format A list containing:
#' \describe{
#' \item{data}{A matrix of size 21*8 containing the 21 Premier League seasons. Each row corresponding to one ranking (ranking representation).
#'
#' The ranking representation r=(r_1,...,r_m) contains the ranks assigned to the objects, and means that the ith object is in r_ith position.
#'
#' For example, if the ranking representation of a rank is (4,3,1,2,5), it means that judge ranks the first object in 4th position, second object in 3rd position, ...}
#' \item{frequency}{matrix of size 21*9. Each row corresponds to one of the 21 different observed rankings, and the last column contains the observation frequency.}
#' \item{m}{the size of the rankings (m=c(4,4) ).}
#' }
#'
#' @description This dataset is composed of the rankings (in ranking notation) of the "Big Four" English football teams (A: Manchester, B: Liverpool, C: Arsenal, D: Chelsea) to the English Championship (Premier League) and according to the UEFA coefficients (statistics used in Europe for ranking and seeding teams in international competitions), from 1993 to 2013.
#'
#' In 2000-2001, Arsenal and Chelsea had the same UEFA coefficient and then are tied. UEFA ranking is (1, 4, 2, 2) for 2000-2001, what means that Manchester United is the first, Liverpool is the last, and the two intermediate positions are for Arsenal and Chelsea in an unknown order.
#'
#' In 2009-2010, Liverpool and Arsenal have also the same UEFA coefficient, the ranking is (1, 2, 2, 4).
#'
#' @source \url{https://en.wikipedia.org/wiki/Premier_League}
#'
#' \url{https://www.uefa.com/memberassociations/uefarankings/club/}
#'
#' @examples
#' data(big4)
#'
#' @family datasets
#'
#' @keywords datasets
NULL
#' @title Multidimensionnal partial rank data: eurovision
#' @docType data
#' @aliases eurovision
#' @name eurovision
#' @format A list containing:
#' \describe{
#' \item{data}{ A matrix of size 34*48. Each row corresponds to the ranking representation of a multidimensionnal ranking.
#' Columns 1 to 8 correspond to the 2007 contest, columns 9 to 18 to the 2008 contest, etc...
#'
#' The ranking representation r=(r_1,...,r_m) contains the ranks assigned to the objects, and means that the ith object is in r_ith position.
#'
#' For example, if the ranking representation of a rank is (4,3,1,2,5), it means that judge ranks the first object in 4th position, second object in 3rd position, ...
#'
#' }
#'
#' \item{frequency}{A matrix of size 34*49 containing the differents multidimensionnal rankings. The 48 first columns are the same as in data, and the last column contains the frequency (1 for all ranks).}
#'
#' \item{m}{ a vector with the sizes of ranks for each dimension.}
#' }
#'
#' @description This dataset contains the ranking of the 8 common finalists of the Eurovision song contest from 2007 to 2012:
#'
#' A: France, B:Germany, C:Greece, D:Romania, E:Russia, F:Spain, G:Ukraine, H:United Kingdom.
#'
#' The number of rankings is 33, corresponding to the 33 European countries having participated to this six editions of the contest.
#'
#' All the rankings are partial since none country has ranked this 8 countries in its 10 preferences. Missing ranking elements are zeros.
#'
#' @source https://eurovision.tv
#'
#' @examples
#' data(eurovision)
#'
#' @family datasets
#'
#' @keywords datasets
NULL
#' @title Multidimensionnal rank data: quiz
#' @docType data
#' @aliases quiz
#' @name quiz
#' @format A list containing:
#' \describe{
#' \item{data}{a matrix of size 70*16. The student's answers are in row and the 16 columns correspond to the 4 rankings (for the 4 quizzes) of size 4 (ranking representation).
#'
#' The ranking representation r=(r_1,...,r_m) contains the ranks assigned to the objects, and means that the ith object is in r_ith position.
#'
#' For example, if the ranking representation of a rank is (4,3,1,2,5), it means that judge ranks the first object in 4th position, second object in 3rd position, ...}
#' \item{frequency}{a matrix of size 63*17. Each row corresponds to one of the 63 differents observed
#' rankings (ranking representation). Each row contains 4 ranks of size 4 and a last column for the frequency.}
#' \item{m}{a vector with the sizes of the ranks for each dimension.}
#'
#' }
#'
#' @description This dataset contains the answers of 70 students (40 of third year and 30 of fourth year) from Polytech'Lille (statistics engineering school, France) to the four following quizzes:
#'
#' \describe{
#'
#' \item{Literature Quiz}{
#' This quiz consists of ranking four french writers according to chronological order:
#' A=Victor Hugo, B=Moliere, C=Albert Camus, D=Jean-Jacques Rousseau.}
#'
#' \item{Football Quiz}{
#' This quiz consists of ranking four national football teams according to increasing number of wins in the football World Cup: A=France, B=Germany, C=Brazil, D=Italy.}
#'
#' \item{Mathematics Quiz}{
#' This quiz consists of ranking four numbers according to increasing order:
#' A=pi/3, B=log(1), C=exp(2), D=(1+sqrt(5))/2.}
#'
#' \item{Cinema Quiz}{
#' This quiz consists of ranking four Tarentino's movies according to chronological order:
#' A=Inglourious Basterds, B=Pulp Fiction, C=Reservoir Dogs, D=Jackie Brown.}
#'
#' }
#'
#' @source Julien Jacques
#'
#' @examples
#' data(quiz)
#'
#' @family datasets
#'
#' @keywords datasets
NULL
#' @title Rank data: sports
#' @docType data
#' @aliases sports
#' @name sports
#' @format A list containing:
#' \describe{
#' \item{data}{a matrix containing 130 ranks of size 7 in ranking representation.
#'
#' The ranking representation r=(r_1,...,r_m) contains the ranks assigned to the objects, and means that the ith object is in r_ith position.
#'
#' For example, if the ranking representation of a rank is (4,3,1,2,5), it means that judge ranks the first object in 4th position, second object in 3rd position, ...}
#'
#' \item{frequency}{a matrix with 123 differents ranks of size 7. In each row the first 7 columns correspond to one observed ranking and the last column contains the observation frequency.}
#' \item{m}{ the size of the rankings (m=7).}
#' }
#'
#' @description This data set is due to Louis Roussos who asked 130 students at the
#' University of Illinois to rank seven sports according to their preference in participating:
#' A = Baseball, B = Football, C = Basketball, D = Tennis, E = Cycling, F =
#' Swimming, G = Jogging.
#'
#' @source J.I. Marden. "Analyzing and modeling rank data, volume 64 of Monographs on Statistics and Applied Probability". Chapman & Hall, London, 1995.
#'
#' @examples
#' data(sports)
#'
#' @family datasets
#'
#' @keywords datasets
NULL
#' @title Rank data: words
#' @docType data
#' @aliases words
#' @name words
#' @format A list containing:
#' \describe{
#' \item{data}{A matrix of size 98*5 containing the 98 answers. Each row corresponding to one ranking (ranking representation).
#'
#' The ranking representation r=(r_1,...,r_m) contains the ranks assigned to the objects, and means that the ith object is in r_ith position.
#'
#' For example, if the ranking representation of a rank is (4,3,1,2,5), it means that judge ranks the first object in 4th position, second object in 3rd position, ...}
#' \item{frequency}{matrix of size 15*6. Each row corresponds to one of the 15 different observed rankings, and the last column contains the observation frequency.}
#' \item{m}{the size of the rankings (m=5).}
#' }
#'
#' @description The data was collected under the auspices of the Graduate Record
#' Examination Board. A sample of 98 college students were asked to rank five words according to strength of association (least to most associated) with the target word "Idea":
#' A = Thought, B = Play, C = Theory, D = Dream and E = Attention.
#'
#' @source M.A. Fligner and J.S. Verducci. "Distance based ranking models". J. Roy. Statist. Soc. Ser. B, 48(3):359-369, 1986.
#'
#' @examples
#' data(sports)
#'
#' @family datasets
#'
#' @keywords datasets
NULL
|
bar = function(data, x, z = NULL,xlab= NULL, ylab= "Percent", fill.lab= NULL,
colorful=TRUE, limits=c(0,1), ggsave = FALSE, file.name = NULL,
width = 5, height = 5,dpi = 600
) {
library(ggplot2)
library(scales)
if(is.null(xlab)) xlab = x
if(is.null(fill.lab)) fill.lab = z
xx = data[,x][[1]]
# xx = as.factor(xx)
# if(!is.factor(xx)) stop("x must be factor.")
zz = rep(1, length(xx))
if(!is.null(z)) zz = data[,z][[1]]
# u.x = as.vector(unique(xx))
u.x = as.vector(levels(xx))
dum = list()
xx2 = list()
for (i in 1:length(u.x)) {
dum[[i]] <- ifelse(xx == u.x[i], 1, 0)
xx2[[i]] <- rep(u.x[i], length(xx))
}
zz2= rep(zz, time=length(u.x))
xx2= do.call("c", xx2)
dum2 = do.call("c", dum)
D= na.omit(data.frame(zz2, xx2, dum2))
if(length(unique(zz))==1){
cat("aaa")
g= ggplot(D, mapping = aes(x=factor(xx2), y = as.numeric(dum2), fill=factor(xx2) )) +
stat_summary(fun=mean, geom="bar" , show.legend = FALSE) +
stat_summary(fun.data=mean_cl_boot, geom="errorbar", width=0.2, show.legend = FALSE) +
scale_y_continuous(labels=percent_format(), limits=limits) +
theme_bw()+labs(x = xlab, y =ylab )
cat("bbb")
} else {
g= ggplot(mapping = aes(x=factor(xx2), y = dum2 , fill = as.factor(zz2))) +
stat_summary(fun=mean, geom="bar", position="dodge" ) +
stat_summary(fun.data=mean_cl_boot, geom="errorbar",
position=position_dodge( )) +
# scale_y_continuous(labels=percent_format(), limits=limits) +
theme_bw()+labs(x = xlab, y =ylab, fill= fill.lab)
}
# if (!isTRUE(colorful)){
# g = g +
# scale_fill_grey(start = 0.3 , end = 0.7 )
#
# }
# pg <- ggplot_build(g)$data[[2]][,c("x", "group","y","ymin","ymax")]
# pg$ci = paste0(round(pg$y*100,2)," (", round(pg$ymin*100,2),", ", round(pg$ymax*100,2),")")
# pg$x = ggplot_build(g)$layout$panel_params[[1]]$x$get_labels()
# pg$group = rep(unique(zz), length(u.x))
# g$results = pg
# if(isTRUE(ggsave))
# file = paste0("Plot.",round(runif(1),3),".jpeg")
# ggsave(ifelse(is.null(file.name) ,file,file.name), width = width, height = height,
# dpi = dpi)
# list(plot=g, res=pg)
g
}
| /bar.R | no_license | ahadalizadeh/utility_fun | R | false | false | 2,254 | r |
bar = function(data, x, z = NULL,xlab= NULL, ylab= "Percent", fill.lab= NULL,
colorful=TRUE, limits=c(0,1), ggsave = FALSE, file.name = NULL,
width = 5, height = 5,dpi = 600
) {
library(ggplot2)
library(scales)
if(is.null(xlab)) xlab = x
if(is.null(fill.lab)) fill.lab = z
xx = data[,x][[1]]
# xx = as.factor(xx)
# if(!is.factor(xx)) stop("x must be factor.")
zz = rep(1, length(xx))
if(!is.null(z)) zz = data[,z][[1]]
# u.x = as.vector(unique(xx))
u.x = as.vector(levels(xx))
dum = list()
xx2 = list()
for (i in 1:length(u.x)) {
dum[[i]] <- ifelse(xx == u.x[i], 1, 0)
xx2[[i]] <- rep(u.x[i], length(xx))
}
zz2= rep(zz, time=length(u.x))
xx2= do.call("c", xx2)
dum2 = do.call("c", dum)
D= na.omit(data.frame(zz2, xx2, dum2))
if(length(unique(zz))==1){
cat("aaa")
g= ggplot(D, mapping = aes(x=factor(xx2), y = as.numeric(dum2), fill=factor(xx2) )) +
stat_summary(fun=mean, geom="bar" , show.legend = FALSE) +
stat_summary(fun.data=mean_cl_boot, geom="errorbar", width=0.2, show.legend = FALSE) +
scale_y_continuous(labels=percent_format(), limits=limits) +
theme_bw()+labs(x = xlab, y =ylab )
cat("bbb")
} else {
g= ggplot(mapping = aes(x=factor(xx2), y = dum2 , fill = as.factor(zz2))) +
stat_summary(fun=mean, geom="bar", position="dodge" ) +
stat_summary(fun.data=mean_cl_boot, geom="errorbar",
position=position_dodge( )) +
# scale_y_continuous(labels=percent_format(), limits=limits) +
theme_bw()+labs(x = xlab, y =ylab, fill= fill.lab)
}
# if (!isTRUE(colorful)){
# g = g +
# scale_fill_grey(start = 0.3 , end = 0.7 )
#
# }
# pg <- ggplot_build(g)$data[[2]][,c("x", "group","y","ymin","ymax")]
# pg$ci = paste0(round(pg$y*100,2)," (", round(pg$ymin*100,2),", ", round(pg$ymax*100,2),")")
# pg$x = ggplot_build(g)$layout$panel_params[[1]]$x$get_labels()
# pg$group = rep(unique(zz), length(u.x))
# g$results = pg
# if(isTRUE(ggsave))
# file = paste0("Plot.",round(runif(1),3),".jpeg")
# ggsave(ifelse(is.null(file.name) ,file,file.name), width = width, height = height,
# dpi = dpi)
# list(plot=g, res=pg)
g
}
|
# Second Program
x = c(1,2,3,4,7)
x
| /r2.R | no_license | anil911/project1 | R | false | false | 36 | r | # Second Program
x = c(1,2,3,4,7)
x
|
testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(3.94108708470682e-312, -1.26822486837899e-30, -2.24767481577173e-289, -4.791735268887e-131, -1.25892753573147e-30, 6.01362129181413e-317, -2.92293267663603e-277, -1.2437121450743e-30, -1.26836459270829e-30, 3.78576699573368e-270, 1.00891829346111e-309, 3.21619919819582e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(grattan::IncomeTax,testlist)
str(result) | /grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610052010-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 514 | r | testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(3.94108708470682e-312, -1.26822486837899e-30, -2.24767481577173e-289, -4.791735268887e-131, -1.25892753573147e-30, 6.01362129181413e-317, -2.92293267663603e-277, -1.2437121450743e-30, -1.26836459270829e-30, 3.78576699573368e-270, 1.00891829346111e-309, 3.21619919819582e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(grattan::IncomeTax,testlist)
str(result) |
df <- data.frame(var_numeric = c(1, 2, 2, 3, NA, 4),
var_character = c("a", NA, "b", "c", "c", "d"),
var_factor = factor(c("a", "b", "c", "c", "d", NA)),
var_date = seq.Date(as.Date("2020-04-12"), by = "day", length.out = 6),
stringsAsFactors = FALSE)
df_res1 <- data.frame(var_numeric = c(1, 2, 2, 3, NA, 4),
var_character = c("a", NA, "b", "c", "c", "d"),
var_factor = factor(c("a", "b", "c", "c", "d", NA)),
var_date = c(20200412, 20200413, 20200414, 20200415,
20200416, 20200417),
stringsAsFactors = FALSE)
df_res2_de <- data.frame(var_numeric = c(1, 2, 2, 3, NA, 4),
var_character = c("a", NA, "b", "c", "c", "d"),
var_factor = factor(c("a", "b", "c", "c", "d", NA)),
var_date = c(20200412, 20200413, 20200414, 20200415,
20200416, 20200417),
var_date_month = c(4, 4, 4, 4, 4, 4),
var_date_day = c(12, 13, 14, 15, 16, 17),
var_date_weekday = c("Sonntag", "Montag", "Dienstag", "Mittwoch",
"Donnerstag", "Freitag"),
stringsAsFactors = FALSE)
df_res2_en <- data.frame(var_numeric = c(1, 2, 2, 3, NA, 4),
var_character = c("a", NA, "b", "c", "c", "d"),
var_factor = factor(c("a", "b", "c", "c", "d", NA)),
var_date = c(20200412, 20200413, 20200414, 20200415,
20200416, 20200417),
var_date_month = c(4, 4, 4, 4, 4, 4),
var_date_day = c(12, 13, 14, 15, 16, 17),
var_date_weekday = c("Sunday", "Monday", "Tuesday", "Wednesday",
"Thursday", "Friday"),
stringsAsFactors = FALSE)
test_that("class", {
expect_equal(class(SR_feat_eng_date(df)), "data.frame")
})
test_that("result", {
expect_equal(SR_feat_eng_date(df), df_res1)
})
if (Sys.getlocale("LC_TIME") == "de_DE.UTF-8") {
test_that("result, only_date_to_numeric = FALSE, de", {
expect_equal(SR_feat_eng_date(df, only_date_to_numeric = FALSE), df_res2_de)
})
}
if (Sys.getlocale("LC_TIME") == "en_US.UTF-8") {
test_that("result, only_date_to_numeric = FALSE, en", {
expect_equal(SR_feat_eng_date(df, only_date_to_numeric = FALSE), df_res2_en)
})
}
rm(df, df_res1, df_res2_de, df_res2_en)
| /tests/testthat/test-SR_feat_eng_date.R | permissive | samuelreuther/SRfunctions | R | false | false | 2,675 | r | df <- data.frame(var_numeric = c(1, 2, 2, 3, NA, 4),
var_character = c("a", NA, "b", "c", "c", "d"),
var_factor = factor(c("a", "b", "c", "c", "d", NA)),
var_date = seq.Date(as.Date("2020-04-12"), by = "day", length.out = 6),
stringsAsFactors = FALSE)
df_res1 <- data.frame(var_numeric = c(1, 2, 2, 3, NA, 4),
var_character = c("a", NA, "b", "c", "c", "d"),
var_factor = factor(c("a", "b", "c", "c", "d", NA)),
var_date = c(20200412, 20200413, 20200414, 20200415,
20200416, 20200417),
stringsAsFactors = FALSE)
df_res2_de <- data.frame(var_numeric = c(1, 2, 2, 3, NA, 4),
var_character = c("a", NA, "b", "c", "c", "d"),
var_factor = factor(c("a", "b", "c", "c", "d", NA)),
var_date = c(20200412, 20200413, 20200414, 20200415,
20200416, 20200417),
var_date_month = c(4, 4, 4, 4, 4, 4),
var_date_day = c(12, 13, 14, 15, 16, 17),
var_date_weekday = c("Sonntag", "Montag", "Dienstag", "Mittwoch",
"Donnerstag", "Freitag"),
stringsAsFactors = FALSE)
df_res2_en <- data.frame(var_numeric = c(1, 2, 2, 3, NA, 4),
var_character = c("a", NA, "b", "c", "c", "d"),
var_factor = factor(c("a", "b", "c", "c", "d", NA)),
var_date = c(20200412, 20200413, 20200414, 20200415,
20200416, 20200417),
var_date_month = c(4, 4, 4, 4, 4, 4),
var_date_day = c(12, 13, 14, 15, 16, 17),
var_date_weekday = c("Sunday", "Monday", "Tuesday", "Wednesday",
"Thursday", "Friday"),
stringsAsFactors = FALSE)
test_that("class", {
expect_equal(class(SR_feat_eng_date(df)), "data.frame")
})
test_that("result", {
expect_equal(SR_feat_eng_date(df), df_res1)
})
if (Sys.getlocale("LC_TIME") == "de_DE.UTF-8") {
test_that("result, only_date_to_numeric = FALSE, de", {
expect_equal(SR_feat_eng_date(df, only_date_to_numeric = FALSE), df_res2_de)
})
}
if (Sys.getlocale("LC_TIME") == "en_US.UTF-8") {
test_that("result, only_date_to_numeric = FALSE, en", {
expect_equal(SR_feat_eng_date(df, only_date_to_numeric = FALSE), df_res2_en)
})
}
rm(df, df_res1, df_res2_de, df_res2_en)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SIGHR.R
\name{SIGHR}
\alias{SIGHR}
\title{A SIGHR Function}
\usage{
SIGHR(X, Y, D, Dtil, q, coef, prob, iter)
}
\arguments{
\item{X:}{the design matrix with confounding variables, SNPs.}
\item{Y:}{the response variable.}
\item{D:}{the auxiliary matrix.}
\item{Dtil:}{list of CMP infor. 1. loc: each row is a location selected from the auxiliary information space. 2. hyper: each row is a set of hyperparameteres for the corresponding location.}
\item{q:}{the number of confounding (sex, age, BMI... including intercept)}
\item{coef:}{the intial value of (alpha, beta)}
\item{prob:}{the intial value inclusion values of beta, of length (p-q)}
}
\value{
an MCMC sample of all regression coefficients ("coef") including the ones for the intercept and confounders in (1), indicators for all beta's ("indicator.z"), all regression coefficients in the hierarchical logistic regression ("gamma"), and the variance ("sig2") in (1).
}
\description{
This function allows to leverage side information from other studies to inform about the sparsity structure of the regression coefficients.
}
| /man/SIGHR.Rd | no_license | Smalls07/SIGHR | R | false | true | 1,166 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SIGHR.R
\name{SIGHR}
\alias{SIGHR}
\title{A SIGHR Function}
\usage{
SIGHR(X, Y, D, Dtil, q, coef, prob, iter)
}
\arguments{
\item{X:}{the design matrix with confounding variables, SNPs.}
\item{Y:}{the response variable.}
\item{D:}{the auxiliary matrix.}
\item{Dtil:}{list of CMP infor. 1. loc: each row is a location selected from the auxiliary information space. 2. hyper: each row is a set of hyperparameteres for the corresponding location.}
\item{q:}{the number of confounding (sex, age, BMI... including intercept)}
\item{coef:}{the intial value of (alpha, beta)}
\item{prob:}{the intial value inclusion values of beta, of length (p-q)}
}
\value{
an MCMC sample of all regression coefficients ("coef") including the ones for the intercept and confounders in (1), indicators for all beta's ("indicator.z"), all regression coefficients in the hierarchical logistic regression ("gamma"), and the variance ("sig2") in (1).
}
\description{
This function allows to leverage side information from other studies to inform about the sparsity structure of the regression coefficients.
}
|
library(pvclust)
library(snow)
library(dendextend)
library("beeswarm")
library("gplots")
vst.mouse <- read.csv("../../HTSeqCount/HTSeq.humanSymbols.vst.csv",row.names=1,header=T)
rsem <- read.csv("../COAD.gene.rsem.csv",row.names=1,header=T,check.names=F)
rsem <- rsem[apply(rsem,1,mean)>1,]
de.genes <- read.csv("COAD_normal_vs_tumor_results.csv",row.names=1,header=T)
de.genes <- de.genes[!is.na(de.genes$padj),]
de.genes <- de.genes[(de.genes$padj<=0.01) & (abs(de.genes$log2FoldChange)>=3) & (row.names(de.genes) %in% row.names(rsem)),]
de.genes.up <- row.names(de.genes[de.genes$log2FoldChange>0,])
print(length(de.genes.up))
de.genes.down <- row.names(de.genes[de.genes$log2FoldChange<0,])
print(length(de.genes.down))
sink("COAD_gene_signatures.gmt")
cat("COAD_UP\tNULL")
for (i in de.genes.up) {
cat("\t")
cat(i)
}
cat("\n")
cat("COAD_DOWN\tNULL")
for (i in de.genes.down) {
cat("\t")
cat(i)
}
cat("\n")
sink()
common <- intersect(row.names(de.genes),row.names(vst.mouse))
png("de-genes-mouse_coadNormal_vs_tumor.png",width=1000,height=1000)
heatmap.2(
as.matrix(vst.mouse[common,]),
trace="none",
keysize=1.0,
scale="row",
margins=c(8,12),
labRow="",
col=colorpanel(75,"blue","yellow"),
distfun=function(x) as.dist(1-cor(t(x),method="spearman", use = "complete.obs")),
key.title="Gene expression",
main=""
)
dev.off()
| /TengEtAl2017/RNAseq/COAD/main.R | no_license | murphycj/manuscripts | R | false | false | 1,364 | r | library(pvclust)
library(snow)
library(dendextend)
library("beeswarm")
library("gplots")
vst.mouse <- read.csv("../../HTSeqCount/HTSeq.humanSymbols.vst.csv",row.names=1,header=T)
rsem <- read.csv("../COAD.gene.rsem.csv",row.names=1,header=T,check.names=F)
rsem <- rsem[apply(rsem,1,mean)>1,]
de.genes <- read.csv("COAD_normal_vs_tumor_results.csv",row.names=1,header=T)
de.genes <- de.genes[!is.na(de.genes$padj),]
de.genes <- de.genes[(de.genes$padj<=0.01) & (abs(de.genes$log2FoldChange)>=3) & (row.names(de.genes) %in% row.names(rsem)),]
de.genes.up <- row.names(de.genes[de.genes$log2FoldChange>0,])
print(length(de.genes.up))
de.genes.down <- row.names(de.genes[de.genes$log2FoldChange<0,])
print(length(de.genes.down))
sink("COAD_gene_signatures.gmt")
cat("COAD_UP\tNULL")
for (i in de.genes.up) {
cat("\t")
cat(i)
}
cat("\n")
cat("COAD_DOWN\tNULL")
for (i in de.genes.down) {
cat("\t")
cat(i)
}
cat("\n")
sink()
common <- intersect(row.names(de.genes),row.names(vst.mouse))
png("de-genes-mouse_coadNormal_vs_tumor.png",width=1000,height=1000)
heatmap.2(
as.matrix(vst.mouse[common,]),
trace="none",
keysize=1.0,
scale="row",
margins=c(8,12),
labRow="",
col=colorpanel(75,"blue","yellow"),
distfun=function(x) as.dist(1-cor(t(x),method="spearman", use = "complete.obs")),
key.title="Gene expression",
main=""
)
dev.off()
|
#' Simple input-output table for the Netherlands, 2006.
#'
#' This simplified SIOT is taken from the Science Policy Integration for
#' Coastal Systems Assessment project's input-output multiplier
#' specification sheet. It is used as a simple example SIOT for
#' controlled analytical results. The column names were slightly altered
#' to resemble more the current Eurostat conventions and the main example
#' dataset \code{\link{germany_1995}}.
#' @format A data frame with 14 observations and 13 variables.
#' @source Source: Input-Output Multipliers Specification Sheet and Supporting
#' Material in the Spicosa Project Report
#' @usage data(netherlands_2006)
#' @format A data frame of 13 observations in 14 variables.
#' \describe{
#' \item{prod_na}{Product name, simplified, following the Eurostat conventions}
#' \item{agriculture_group}{Simple aggregated agricultural products}
#' \item{mining_group}{Simple aggregated mining products}
#' \item{manufacturing_group}{Simple aggregated manufacturing products}
#' \item{construction_group}{Construction}
#' \item{utilities_group}{Simple aggregated utilities products/services}
#' \item{services_group}{Simple aggregated services products}
#' \item{TOTAL}{Column / row sums, simple summary, not included in the original source}
#' \item{final_consumption_private}{Simple aggregated final private use}
#' \item{final_consumption_households}{Simple aggregated final household consumption}
#' \item{final_consumption_government}{Simple aggregated final government consumption}
#' \item{gross_fixed_capital_formation}{Gross fixed capital formation 'GFCF'}
#' \item{exports}{Simple aggregated exports}
#' \item{total_use}{Simple aggregated total use}
#' }
#' @family Validation datasets
"netherlands_2006"
| /R/data_netherlands_2006.R | permissive | rOpenGov/iotables | R | false | false | 1,813 | r | #' Simple input-output table for the Netherlands, 2006.
#'
#' This simplified SIOT is taken from the Science Policy Integration for
#' Coastal Systems Assessment project's input-output multiplier
#' specification sheet. It is used as a simple example SIOT for
#' controlled analytical results. The column names were slightly altered
#' to resemble more the current Eurostat conventions and the main example
#' dataset \code{\link{germany_1995}}.
#' @format A data frame with 14 observations and 13 variables.
#' @source Source: Input-Output Multipliers Specification Sheet and Supporting
#' Material in the Spicosa Project Report
#' @usage data(netherlands_2006)
#' @format A data frame of 13 observations in 14 variables.
#' \describe{
#' \item{prod_na}{Product name, simplified, following the Eurostat conventions}
#' \item{agriculture_group}{Simple aggregated agricultural products}
#' \item{mining_group}{Simple aggregated mining products}
#' \item{manufacturing_group}{Simple aggregated manufacturing products}
#' \item{construction_group}{Construction}
#' \item{utilities_group}{Simple aggregated utilities products/services}
#' \item{services_group}{Simple aggregated services products}
#' \item{TOTAL}{Column / row sums, simple summary, not included in the original source}
#' \item{final_consumption_private}{Simple aggregated final private use}
#' \item{final_consumption_households}{Simple aggregated final household consumption}
#' \item{final_consumption_government}{Simple aggregated final government consumption}
#' \item{gross_fixed_capital_formation}{Gross fixed capital formation 'GFCF'}
#' \item{exports}{Simple aggregated exports}
#' \item{total_use}{Simple aggregated total use}
#' }
#' @family Validation datasets
"netherlands_2006"
|
#' Plot series with hatched weekends
#'
#' Plot series with hatched in gray weekends
#'
#' \code{series} should be data.frame with column "date"
#'
#' @param series data.frame, with two columns, one is "date".
#' @param ... parameters, passed to plot(...)
#' @return NULL
#' @examples
#' PlotSeries(rates[, c(1, 2)])
PlotSeries <- function(series, ...) {
plot(series, ...)
for(i in 1:length(seq(from = as.Date(series[1, "date"]),
to = as.Date(series[nrow(series), "date"]),
by = 1))) {
date <- as.Date(series[1, "date"]) + i
if(weekdays(date) == "Friday") {
rect(xleft = date, ybottom = par("usr")[3], xright = date + 3,
ytop = par("usr")[4], density = 20, angle = 45,
border = NULL, col = "gray")
}
}
} | /R/plot_series.R | no_license | irudnyts/estudy | R | false | false | 840 | r | #' Plot series with hatched weekends
#'
#' Plot series with hatched in gray weekends
#'
#' \code{series} should be data.frame with column "date"
#'
#' @param series data.frame, with two columns, one is "date".
#' @param ... parameters, passed to plot(...)
#' @return NULL
#' @examples
#' PlotSeries(rates[, c(1, 2)])
PlotSeries <- function(series, ...) {
plot(series, ...)
for(i in 1:length(seq(from = as.Date(series[1, "date"]),
to = as.Date(series[nrow(series), "date"]),
by = 1))) {
date <- as.Date(series[1, "date"]) + i
if(weekdays(date) == "Friday") {
rect(xleft = date, ybottom = par("usr")[3], xright = date + 3,
ytop = par("usr")[4], density = 20, angle = 45,
border = NULL, col = "gray")
}
}
} |
testlist <- list(hi = 0, lo = 9.85057215234069e-318, mu = 0, sig = 0)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result) | /gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610044514-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 126 | r | testlist <- list(hi = 0, lo = 9.85057215234069e-318, mu = 0, sig = 0)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result) |
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix() ) {
MInversa <- NULL
set <- function(y) {
x <<- y
MInversa <<- NULL
}
get <- function() x
setMInversa <- function(solve) MInversa <<- solve
getMInversa <- function() MInversa
list(set = set, get = get,
setMInversa = setMInversa,
getMInversa = getMInversa)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
MInversa <- x$getMInversa()
if(!is.null(MInversa)) {
message("getting cached data")
return(MInversa)
}
data <- x$get()
MInversa<- solve(data, ...)
x$setMInversa(MInversa)
MInversa
}
| /cachematrix.R | no_license | huatusco/ProgrammingAssignment2 | R | false | false | 905 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix() ) {
MInversa <- NULL
set <- function(y) {
x <<- y
MInversa <<- NULL
}
get <- function() x
setMInversa <- function(solve) MInversa <<- solve
getMInversa <- function() MInversa
list(set = set, get = get,
setMInversa = setMInversa,
getMInversa = getMInversa)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
MInversa <- x$getMInversa()
if(!is.null(MInversa)) {
message("getting cached data")
return(MInversa)
}
data <- x$get()
MInversa<- solve(data, ...)
x$setMInversa(MInversa)
MInversa
}
|
\name{DAT}
\alias{DAT}
\docType{data}
\title{
An Example of Dosing History Table
}
\description{
This is a conventional NONMEM input data format.
}
\usage{DAT}
\format{
This data frame has 5 columns with 18 time-points for the simulation.
\describe{
\item{\code{TIME}}{Time}
\item{\code{AMT}}{Amount given for the compartment of \code{CMT} column}
\item{\code{RATE}}{Infusion rate}
\item{\code{CMT}}{Compartment number, 1=gut, 2=central, 3=peripheral, etc.}
\item{\code{DV}}{Currently blank and not used.}
}
}
\details{
To be used at \code{Comp1} or \code{nComp}, expand dosing history with \code{ExpandDH} function.
}
\keyword{datasets}
| /man/DAT.Rd | no_license | asancpt/wnl | R | false | false | 692 | rd | \name{DAT}
\alias{DAT}
\docType{data}
\title{
An Example of Dosing History Table
}
\description{
This is a conventional NONMEM input data format.
}
\usage{DAT}
\format{
This data frame has 5 columns with 18 time-points for the simulation.
\describe{
\item{\code{TIME}}{Time}
\item{\code{AMT}}{Amount given for the compartment of \code{CMT} column}
\item{\code{RATE}}{Infusion rate}
\item{\code{CMT}}{Compartment number, 1=gut, 2=central, 3=peripheral, etc.}
\item{\code{DV}}{Currently blank and not used.}
}
}
\details{
To be used at \code{Comp1} or \code{nComp}, expand dosing history with \code{ExpandDH} function.
}
\keyword{datasets}
|
# Set the working directory where data source file(s) downloaded
setwd("C:/Data-Science/Working_Test1/ExploratoryDataAnalysis_CourseProj2")
# Load the RDS file/ data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# sum the 'Emissions' data by year
EmissionsByYear <- aggregate(NEI[, 'Emissions'], by = list(NEI$year), FUN = sum)
EmissionsByYear$PM <- round(EmissionsByYear[, 2] /1000, 2)
# Create the PNG file
png(filename = "plot1.png")
barplot(EmissionsByYear$PM, EmissionsByYear$Group.1, names.arg = EmissionsByYear$Group.1, main = expression('Total Emission of PM'[2.5]), xlab = 'Year', ylab = 'PM2.5 in Kilotons')
# Shut down the graphic device
dev.off() | /plot1.R | no_license | SidduRam/ExplDataCourseProj2 | R | false | false | 705 | r |
# Set the working directory where data source file(s) downloaded
setwd("C:/Data-Science/Working_Test1/ExploratoryDataAnalysis_CourseProj2")
# Load the RDS file/ data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# sum the 'Emissions' data by year
EmissionsByYear <- aggregate(NEI[, 'Emissions'], by = list(NEI$year), FUN = sum)
EmissionsByYear$PM <- round(EmissionsByYear[, 2] /1000, 2)
# Create the PNG file
png(filename = "plot1.png")
barplot(EmissionsByYear$PM, EmissionsByYear$Group.1, names.arg = EmissionsByYear$Group.1, main = expression('Total Emission of PM'[2.5]), xlab = 'Year', ylab = 'PM2.5 in Kilotons')
# Shut down the graphic device
dev.off() |
#' Up-Sample a Data Set Based on a Factor Variable
#'
#' `step_upsample` creates a *specification* of a recipe step that
#' will replicate rows of a data set to make the occurrence of
#' levels in a specific factor level equal.
#'
#' @inheritParams recipes::step_center
#' @param ... One or more selector functions to choose which
#' variable is used to sample the data. See [selections()]
#' for more details. The selection should result in _single
#' factor variable_. For the `tidy` method, these are not
#' currently used.
#' @param role Not used by this step since no new variables are
#' created.
#' @param column A character string of the variable name that will
#' be populated (eventually) by the `...` selectors.
#' @param over_ratio A numeric value for the ratio of the
#' majority-to-minority frequencies. The default value (1) means
#' that all other levels are sampled up to have the same
#' frequency as the most occurring level. A value of 0.5 would mean
#' that the minority levels will have (at most) (approximately)
#' half as many rows than the majority level.
#' @param ratio Deprecated argument; same as `over_ratio`.
#' @param target An integer that will be used to subsample. This
#' should not be set by the user and will be populated by `prep`.
#' @param seed An integer that will be used as the seed when upsampling.
#' @return An updated version of `recipe` with the new step
#' added to the sequence of existing steps (if any). For the
#' `tidy` method, a tibble with columns `terms` which is
#' the variable used to sample.
#' @details
#' Up-sampling is intended to be performed on the _training_ set
#' alone. For this reason, the default is `skip = TRUE`. It is
#' advisable to use `prep(recipe, retain = TRUE)` when preparing
#' the recipe; in this way [juice()] can be used to obtain the
#' up-sampled version of the data.
#'
#' If there are missing values in the factor variable that is used
#' to define the sampling, missing data are selected at random in
#' the same way that the other factor levels are sampled. Missing
#' values are not used to determine the amount of data in the
#' majority level (see example below).
#'
#' For any data with factor levels occurring with the same
#' frequency as the majority level, all data will be retained.
#'
#' All columns in the data are sampled and returned by [juice()]
#' and [bake()].
#'
#' @keywords datagen
#' @concept preprocessing
#' @concept subsampling
#' @export
#' @examples
#' library(recipes)
#' library(modeldata)
#' data(okc)
#'
#' orig <- table(okc$diet, useNA = "always")
#'
#' sort(orig, decreasing = TRUE)
#'
#' up_rec <- recipe( ~ ., data = okc) %>%
#' # Bring the minority levels up to about 200 each
#' # 200/16562 is approx 0.0121
#' step_upsample(diet, over_ratio = 0.0121) %>%
#' prep(training = okc, retain = TRUE)
#'
#' training <- table(juice(up_rec)$diet, useNA = "always")
#'
#' # Since `skip` defaults to TRUE, baking the step has no effect
#' baked_okc <- bake(up_rec, new_data = okc)
#' baked <- table(baked_okc$diet, useNA = "always")
#'
#' # Note that if the original data contained more rows than the
#' # target n (= ratio * majority_n), the data are left alone:
#' data.frame(
#' level = names(orig),
#' orig_freq = as.vector(orig),
#' train_freq = as.vector(training),
#' baked_freq = as.vector(baked)
#' )
#'
#' library(ggplot2)
#'
#' ggplot(circle_example, aes(x, y, color = class)) +
#' geom_point() +
#' labs(title = "Without upsample")
#'
#' recipe(class ~ ., data = circle_example) %>%
#' step_nearmiss(class) %>%
#' prep() %>%
#' juice() %>%
#' ggplot(aes(x, y, color = class)) +
#' geom_jitter() +
#' labs(title = "With upsample (with jittering)")
step_upsample <-
function(recipe, ..., over_ratio = 1, ratio = NA, role = NA, trained = FALSE,
column = NULL, target = NA, skip = TRUE,
seed = sample.int(10^5, 1),
id = rand_id("upsample")) {
if (!is.na(ratio) & all(over_ratio != ratio)) {
message(
paste(
"The `ratio` argument is now deprecated in favor of `over_ratio`.",
"`ratio` will be removed in a subsequent version."
)
)
if (!is.na(ratio)) {
over_ratio <- ratio
}
}
add_step(recipe,
step_upsample_new(
terms = ellipse_check(...),
over_ratio = over_ratio,
ratio = ratio,
role = role,
trained = trained,
column = column,
target = target,
skip = skip,
seed = seed,
id = id
))
}
step_upsample_new <-
function(terms, over_ratio, ratio, role, trained, column, target, skip, seed,
id) {
step(
subclass = "upsample",
terms = terms,
over_ratio = over_ratio,
ratio = ratio,
role = role,
trained = trained,
column = column,
target = target,
skip = skip,
id = id,
seed = seed
)
}
#' @export
prep.step_upsample <- function(x, training, info = NULL, ...) {
col_name <- terms_select(x$terms, info = info)
if (length(col_name) != 1)
rlang::abort("Please select a single factor variable.")
if (!is.factor(training[[col_name]]))
rlang::abort(paste0(col_name, " should be a factor variable."))
obs_freq <- table(training[[col_name]])
majority <- max(obs_freq)
step_upsample_new(
terms = x$terms,
ratio = x$ratio,
over_ratio = x$over_ratio,
role = x$role,
trained = TRUE,
column = col_name,
target = floor(majority * x$over_ratio),
skip = x$skip,
id = x$id,
seed = x$seed
)
}
supsamp <- function(x, num) {
n <- nrow(x)
if (nrow(x) == num)
out <- x
else
# upsampling is done with replacement
out <- x[sample(1:n, max(num, n), replace = TRUE), ]
out
}
#' @export
bake.step_upsample <- function(object, new_data, ...) {
if (any(is.na(new_data[[object$column]])))
missing <- new_data[is.na(new_data[[object$column]]), ]
else
missing <- NULL
split_up <- split(new_data, new_data[[object$column]])
# Upsample with seed for reproducibility
with_seed(
seed = object$seed,
code = {
new_data <- map_dfr(split_up, supsamp, num = object$target)
if (!is.null(missing)) {
new_data <- bind_rows(new_data, supsamp(missing, object$target))
}
}
)
as_tibble(new_data)
}
print.step_upsample <-
function(x, width = max(20, options()$width - 26), ...) {
cat("Up-sampling based on ", sep = "")
printer(x$column, x$terms, x$trained, width = width)
invisible(x)
}
#' @rdname step_upsample
#' @param x A `step_upsample` object.
#' @export
tidy.step_upsample <- function(x, ...) {
if (is_trained(x)) {
res <- tibble(terms = x$column)
}
else {
term_names <- sel2char(x$terms)
res <- tibble(terms = unname(term_names))
}
res$id <- x$id
res
}
| /R/upsample.R | permissive | minghao2016/themis | R | false | false | 6,967 | r | #' Up-Sample a Data Set Based on a Factor Variable
#'
#' `step_upsample` creates a *specification* of a recipe step that
#' will replicate rows of a data set to make the occurrence of
#' levels in a specific factor level equal.
#'
#' @inheritParams recipes::step_center
#' @param ... One or more selector functions to choose which
#' variable is used to sample the data. See [selections()]
#' for more details. The selection should result in _single
#' factor variable_. For the `tidy` method, these are not
#' currently used.
#' @param role Not used by this step since no new variables are
#' created.
#' @param column A character string of the variable name that will
#' be populated (eventually) by the `...` selectors.
#' @param over_ratio A numeric value for the ratio of the
#' majority-to-minority frequencies. The default value (1) means
#' that all other levels are sampled up to have the same
#' frequency as the most occurring level. A value of 0.5 would mean
#' that the minority levels will have (at most) (approximately)
#' half as many rows than the majority level.
#' @param ratio Deprecated argument; same as `over_ratio`.
#' @param target An integer that will be used to subsample. This
#' should not be set by the user and will be populated by `prep`.
#' @param seed An integer that will be used as the seed when upsampling.
#' @return An updated version of `recipe` with the new step
#' added to the sequence of existing steps (if any). For the
#' `tidy` method, a tibble with columns `terms` which is
#' the variable used to sample.
#' @details
#' Up-sampling is intended to be performed on the _training_ set
#' alone. For this reason, the default is `skip = TRUE`. It is
#' advisable to use `prep(recipe, retain = TRUE)` when preparing
#' the recipe; in this way [juice()] can be used to obtain the
#' up-sampled version of the data.
#'
#' If there are missing values in the factor variable that is used
#' to define the sampling, missing data are selected at random in
#' the same way that the other factor levels are sampled. Missing
#' values are not used to determine the amount of data in the
#' majority level (see example below).
#'
#' For any data with factor levels occurring with the same
#' frequency as the majority level, all data will be retained.
#'
#' All columns in the data are sampled and returned by [juice()]
#' and [bake()].
#'
#' @keywords datagen
#' @concept preprocessing
#' @concept subsampling
#' @export
#' @examples
#' library(recipes)
#' library(modeldata)
#' data(okc)
#'
#' orig <- table(okc$diet, useNA = "always")
#'
#' sort(orig, decreasing = TRUE)
#'
#' up_rec <- recipe( ~ ., data = okc) %>%
#' # Bring the minority levels up to about 200 each
#' # 200/16562 is approx 0.0121
#' step_upsample(diet, over_ratio = 0.0121) %>%
#' prep(training = okc, retain = TRUE)
#'
#' training <- table(juice(up_rec)$diet, useNA = "always")
#'
#' # Since `skip` defaults to TRUE, baking the step has no effect
#' baked_okc <- bake(up_rec, new_data = okc)
#' baked <- table(baked_okc$diet, useNA = "always")
#'
#' # Note that if the original data contained more rows than the
#' # target n (= ratio * majority_n), the data are left alone:
#' data.frame(
#' level = names(orig),
#' orig_freq = as.vector(orig),
#' train_freq = as.vector(training),
#' baked_freq = as.vector(baked)
#' )
#'
#' library(ggplot2)
#'
#' ggplot(circle_example, aes(x, y, color = class)) +
#' geom_point() +
#' labs(title = "Without upsample")
#'
#' recipe(class ~ ., data = circle_example) %>%
#' step_nearmiss(class) %>%
#' prep() %>%
#' juice() %>%
#' ggplot(aes(x, y, color = class)) +
#' geom_jitter() +
#' labs(title = "With upsample (with jittering)")
step_upsample <-
function(recipe, ..., over_ratio = 1, ratio = NA, role = NA, trained = FALSE,
column = NULL, target = NA, skip = TRUE,
seed = sample.int(10^5, 1),
id = rand_id("upsample")) {
if (!is.na(ratio) & all(over_ratio != ratio)) {
message(
paste(
"The `ratio` argument is now deprecated in favor of `over_ratio`.",
"`ratio` will be removed in a subsequent version."
)
)
if (!is.na(ratio)) {
over_ratio <- ratio
}
}
add_step(recipe,
step_upsample_new(
terms = ellipse_check(...),
over_ratio = over_ratio,
ratio = ratio,
role = role,
trained = trained,
column = column,
target = target,
skip = skip,
seed = seed,
id = id
))
}
step_upsample_new <-
function(terms, over_ratio, ratio, role, trained, column, target, skip, seed,
id) {
step(
subclass = "upsample",
terms = terms,
over_ratio = over_ratio,
ratio = ratio,
role = role,
trained = trained,
column = column,
target = target,
skip = skip,
id = id,
seed = seed
)
}
#' @export
prep.step_upsample <- function(x, training, info = NULL, ...) {
col_name <- terms_select(x$terms, info = info)
if (length(col_name) != 1)
rlang::abort("Please select a single factor variable.")
if (!is.factor(training[[col_name]]))
rlang::abort(paste0(col_name, " should be a factor variable."))
obs_freq <- table(training[[col_name]])
majority <- max(obs_freq)
step_upsample_new(
terms = x$terms,
ratio = x$ratio,
over_ratio = x$over_ratio,
role = x$role,
trained = TRUE,
column = col_name,
target = floor(majority * x$over_ratio),
skip = x$skip,
id = x$id,
seed = x$seed
)
}
supsamp <- function(x, num) {
n <- nrow(x)
if (nrow(x) == num)
out <- x
else
# upsampling is done with replacement
out <- x[sample(1:n, max(num, n), replace = TRUE), ]
out
}
#' @export
bake.step_upsample <- function(object, new_data, ...) {
if (any(is.na(new_data[[object$column]])))
missing <- new_data[is.na(new_data[[object$column]]), ]
else
missing <- NULL
split_up <- split(new_data, new_data[[object$column]])
# Upsample with seed for reproducibility
with_seed(
seed = object$seed,
code = {
new_data <- map_dfr(split_up, supsamp, num = object$target)
if (!is.null(missing)) {
new_data <- bind_rows(new_data, supsamp(missing, object$target))
}
}
)
as_tibble(new_data)
}
print.step_upsample <-
function(x, width = max(20, options()$width - 26), ...) {
cat("Up-sampling based on ", sep = "")
printer(x$column, x$terms, x$trained, width = width)
invisible(x)
}
#' @rdname step_upsample
#' @param x A `step_upsample` object.
#' @export
tidy.step_upsample <- function(x, ...) {
if (is_trained(x)) {
res <- tibble(terms = x$column)
}
else {
term_names <- sel2char(x$terms)
res <- tibble(terms = unname(term_names))
}
res$id <- x$id
res
}
|
InspectVariable=function(Feature,N='Feature',i=1,xlim,ylim,sampleSize=100000,main){
#InspectVariable(Feature,i,N)
# ermoeglichst eine Schnelle Verteilungsbetrachtung einzelner variable
#
# INPUT
# Feature[1:n] Vector of Data to be plotted
#
# OPTIONAL
# N string, welcher Variablennamen angibt
# i In for schleife bei vielen Variablen, Nummer der Variablen
# xlim[2] x-Achsengrenzen fuer PDEplot
# ylim[2] y-Achsengrenzen fuer PDEplot
# sampleSize default(100000), sample size, if datavector is to big
# OUTPUT
#
# uses PDEplot()
# uses histopt()
#
# MT 11/2014
isnumber=function(x) return(is.numeric(x)&length(x)==1)
if(!isnumber(i))
stop('"i" is not a numeric number of length 1. Please change Input.')
if(!isnumber(sampleSize))
stop('"sampleSize" is not a numeric number of length 1. Please change Input.')
if(!is.vector(Feature)){
Feature=as.vector(Feature)
warning('Feature is not a vector. Calling as.vector()')
}
if(!is.numeric(Feature)){
Feature=as.numeric(Feature)
warning('Feature is not a numeric. Calling as.numeric()')
}
def.par <-
par(no.readonly = TRUE) # save default, for resetting...
# Daten bereinigen
D = Feature[!is.infinite(Feature)]
MinD = min(D, na.rm = TRUE)
MaxD = max(D, na.rm = TRUE)
#m <- layout(matrix(c(1, 1, 3, 3,1,1,3,3,2,2,3,3,2,2,4,4), 4, 4))
m <-
graphics::layout(matrix(c(1, 1, 3, 3, 1, 1, 3, 3, 2, 2, 4, 4, 2, 2, 5, 5), 4, 4))
par(oma = c(0, 0, 1, 0))#c(u,li,o,re) in
# histogramme
#par(fig=c(0, .51, 0.5, 0.98), new = TRUE)
# optNrOfBins = OptimalNoBins(D)
# minData = min(D,na.rm = TRUE)
# maxData = max(D,na.rm = TRUE)
# i = maxData-minData
# optBreaks = seq(minData, maxData, i/optNrOfBins) # bins in fixed intervals
# hist(D, breaks=optBreaks,xlab=N)
optNrOfBins=OptimalNoBins(D)
optNrOfBins = min(100,optNrOfBins) #
optBreaks <- seq(MinD, MaxD, abs(MinD-MaxD)/optNrOfBins)
# bins haben alle die gleiche groesse
if(length(optBreaks)>1)
temp <- hist(D, breaks=optBreaks, plot=FALSE)
else
temp <- hist(D, plot=FALSE)
#box();
Breaks <- temp$breaks; nB <- length(Breaks)
y <- temp$counts;
xlab=N
ylab='Frequency'
plot(x=c(MinD,MaxD), y=c(0, max(temp$counts,na.rm=TRUE)*1.2), type="n", main='',xaxs='i',yaxs='i',axes=FALSE,xlab=xlab, ylab='',xlim=c(MinD,MaxD), ylim=c(0, max(temp$counts,na.rm=TRUE)*1.2))
par(mgp=c(2.2,0.6,0)) #Abstand: c(Titel, Label, Achse)
rect(Breaks[-nB], 0, Breaks[-1], y, col="blue", border="light blue",xlab='',ylab=ylab,xlim=c(MinD,MaxD), ylim=c(0, max(temp$counts,na.rm=TRUE)*1.2))
axis(1,col="black",las=1,xaxs='i') #x-Achse
axis(2,col="black",las=1,yaxs='i') #y-Achse
title(ylab=ylab)
#histopt(D, '', AxisLabs = TRUE, xlab = N)
#lines(x=a$kernels,y=rep(0,length(a$kernels)),col = "black",lwd = 1)
#Fenster fuer PDEplot
#par(fig=c(0.49, 1, 0.5, 0.98), new = TRUE)
if (length(D) > sampleSize) {
ind = sample(1:length(D), size = sampleSize)
D2 = D[ind]
} else{
D2 = D
}
pdeVal = ParetoDensityEstimation(D2)
if (missing(xlim) && missing(ylim)) {
plot(
pdeVal$kernels,
pdeVal$paretoDensity,
type = 'l',
xaxs = 'i',
yaxs = 'i',
xlab = N,
ylab = 'PDE',
col = 'blue'
)
} else if (missing(ylim)) {
plot(
pdeVal$kernels,
pdeVal$paretoDensity,
type = 'l',
xaxs = 'i',
yaxs = 'i',
xlab = N,
ylab = 'PDE',
xlim = xlim,
ylim = NULL,
col = 'blue'
)
} else{
plot(
pdeVal$kernels,
pdeVal$paretoDensity,
type = 'l',
xaxs = 'i',
yaxs = 'i',
xlab = N,
ylab = 'PDE',
xlim = xlim,
ylim = ylim,
col = 'blue'
)
}
#Fenster fuer QQplot
#par(mgp=c(2,0.5,0)) #Abstand: c(Titel, Label, Achse)
#plot(x=c(-4.5,4.5),y=c(min(Feature),max(Feature)), xlab="Normalverteilung", ylab=N,axes=TRUE,type='n',xlim=c(-4.5,4.5),ylim=c(min(Feature),max(Feature)))
#par(mar=c(3,4,2,1)) #c(u,li,o,re)
par(pty = "s")# Plot immer quadratisch
qqnorm(
D2,
pch = 20,
col = "blue",
axes = TRUE,
xlim = c(-4.5, 4.5),
ylim = c(MinD, MaxD),
main = '',
xlab = "Normal Distribution",
ylab = N
)
axis(4, col = "black", las = 3) #y-Achse
grid(lty = 'dashed', col = 'black')
mtext(
'Normal QQ-Plot',
side = 3,
line = 0,
cex = 1,
col = "black"
)
box(lwd = 1, col = 'White') # box + Liniendick
# Fenster fuer Box-whisker diagramm
#par(fig=c(.75, 1, 0, 0.5), new = TRUE)
par(pty = "m")
boxplot(
D,
axes = FALSE,
ylim = c(MinD, MaxD),
xlim = c(0.7, 1.4)
)
mtext(
paste0('Range:[', round(MinD, 2), ',', round(MaxD, 2), ']'),
side = 3,
line = 0,
cex = 0.6,
col = "black"
)
NaNs = (sum(is.infinite(Feature)) + sum(is.na(Feature))) / length(Feature)
#if(length(NaNs)>0)
barplot(
NaNs,
ylab = 'NaNs in %',
main = paste0(round(NaNs, 4), ' %'),
xlim = c(0, 3),
ylim = c(0, 1)
)
if (any(is.nan(Feature), na.rm = TRUE))
print(
'NaNs in Feature found. This message is only important, if after rounding the percent of NaN is zero in the bar plot.'
)
if (any(is.infinite(Feature), na.rm = TRUE))
warning('Infinite values in Feature found.')
#else
#print('No NaNs found')
def = par(fig = c(0, 0.9, 0.8, 1), new = TRUE)
if (missing(main))
mtext(
paste('VarNr.:', i, N),
side = 3,
line = 1,
cex = 1.5,
col = "black"
)
else
mtext(
main,
side = 3,
line = 1,
cex = 1.5,
col = "black"
)
par(def.par)
#box("outer", col="black")
}
| /DataVisualizations/R/InspectVariable.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 5,855 | r | InspectVariable=function(Feature,N='Feature',i=1,xlim,ylim,sampleSize=100000,main){
#InspectVariable(Feature,i,N)
# ermoeglichst eine Schnelle Verteilungsbetrachtung einzelner variable
#
# INPUT
# Feature[1:n] Vector of Data to be plotted
#
# OPTIONAL
# N string, welcher Variablennamen angibt
# i In for schleife bei vielen Variablen, Nummer der Variablen
# xlim[2] x-Achsengrenzen fuer PDEplot
# ylim[2] y-Achsengrenzen fuer PDEplot
# sampleSize default(100000), sample size, if datavector is to big
# OUTPUT
#
# uses PDEplot()
# uses histopt()
#
# MT 11/2014
isnumber=function(x) return(is.numeric(x)&length(x)==1)
if(!isnumber(i))
stop('"i" is not a numeric number of length 1. Please change Input.')
if(!isnumber(sampleSize))
stop('"sampleSize" is not a numeric number of length 1. Please change Input.')
if(!is.vector(Feature)){
Feature=as.vector(Feature)
warning('Feature is not a vector. Calling as.vector()')
}
if(!is.numeric(Feature)){
Feature=as.numeric(Feature)
warning('Feature is not a numeric. Calling as.numeric()')
}
def.par <-
par(no.readonly = TRUE) # save default, for resetting...
# Daten bereinigen
D = Feature[!is.infinite(Feature)]
MinD = min(D, na.rm = TRUE)
MaxD = max(D, na.rm = TRUE)
#m <- layout(matrix(c(1, 1, 3, 3,1,1,3,3,2,2,3,3,2,2,4,4), 4, 4))
m <-
graphics::layout(matrix(c(1, 1, 3, 3, 1, 1, 3, 3, 2, 2, 4, 4, 2, 2, 5, 5), 4, 4))
par(oma = c(0, 0, 1, 0))#c(u,li,o,re) in
# histogramme
#par(fig=c(0, .51, 0.5, 0.98), new = TRUE)
# optNrOfBins = OptimalNoBins(D)
# minData = min(D,na.rm = TRUE)
# maxData = max(D,na.rm = TRUE)
# i = maxData-minData
# optBreaks = seq(minData, maxData, i/optNrOfBins) # bins in fixed intervals
# hist(D, breaks=optBreaks,xlab=N)
optNrOfBins=OptimalNoBins(D)
optNrOfBins = min(100,optNrOfBins) #
optBreaks <- seq(MinD, MaxD, abs(MinD-MaxD)/optNrOfBins)
# bins haben alle die gleiche groesse
if(length(optBreaks)>1)
temp <- hist(D, breaks=optBreaks, plot=FALSE)
else
temp <- hist(D, plot=FALSE)
#box();
Breaks <- temp$breaks; nB <- length(Breaks)
y <- temp$counts;
xlab=N
ylab='Frequency'
plot(x=c(MinD,MaxD), y=c(0, max(temp$counts,na.rm=TRUE)*1.2), type="n", main='',xaxs='i',yaxs='i',axes=FALSE,xlab=xlab, ylab='',xlim=c(MinD,MaxD), ylim=c(0, max(temp$counts,na.rm=TRUE)*1.2))
par(mgp=c(2.2,0.6,0)) #Abstand: c(Titel, Label, Achse)
rect(Breaks[-nB], 0, Breaks[-1], y, col="blue", border="light blue",xlab='',ylab=ylab,xlim=c(MinD,MaxD), ylim=c(0, max(temp$counts,na.rm=TRUE)*1.2))
axis(1,col="black",las=1,xaxs='i') #x-Achse
axis(2,col="black",las=1,yaxs='i') #y-Achse
title(ylab=ylab)
#histopt(D, '', AxisLabs = TRUE, xlab = N)
#lines(x=a$kernels,y=rep(0,length(a$kernels)),col = "black",lwd = 1)
#Fenster fuer PDEplot
#par(fig=c(0.49, 1, 0.5, 0.98), new = TRUE)
if (length(D) > sampleSize) {
ind = sample(1:length(D), size = sampleSize)
D2 = D[ind]
} else{
D2 = D
}
pdeVal = ParetoDensityEstimation(D2)
if (missing(xlim) && missing(ylim)) {
plot(
pdeVal$kernels,
pdeVal$paretoDensity,
type = 'l',
xaxs = 'i',
yaxs = 'i',
xlab = N,
ylab = 'PDE',
col = 'blue'
)
} else if (missing(ylim)) {
plot(
pdeVal$kernels,
pdeVal$paretoDensity,
type = 'l',
xaxs = 'i',
yaxs = 'i',
xlab = N,
ylab = 'PDE',
xlim = xlim,
ylim = NULL,
col = 'blue'
)
} else{
plot(
pdeVal$kernels,
pdeVal$paretoDensity,
type = 'l',
xaxs = 'i',
yaxs = 'i',
xlab = N,
ylab = 'PDE',
xlim = xlim,
ylim = ylim,
col = 'blue'
)
}
#Fenster fuer QQplot
#par(mgp=c(2,0.5,0)) #Abstand: c(Titel, Label, Achse)
#plot(x=c(-4.5,4.5),y=c(min(Feature),max(Feature)), xlab="Normalverteilung", ylab=N,axes=TRUE,type='n',xlim=c(-4.5,4.5),ylim=c(min(Feature),max(Feature)))
#par(mar=c(3,4,2,1)) #c(u,li,o,re)
par(pty = "s")# Plot immer quadratisch
qqnorm(
D2,
pch = 20,
col = "blue",
axes = TRUE,
xlim = c(-4.5, 4.5),
ylim = c(MinD, MaxD),
main = '',
xlab = "Normal Distribution",
ylab = N
)
axis(4, col = "black", las = 3) #y-Achse
grid(lty = 'dashed', col = 'black')
mtext(
'Normal QQ-Plot',
side = 3,
line = 0,
cex = 1,
col = "black"
)
box(lwd = 1, col = 'White') # box + Liniendick
# Fenster fuer Box-whisker diagramm
#par(fig=c(.75, 1, 0, 0.5), new = TRUE)
par(pty = "m")
boxplot(
D,
axes = FALSE,
ylim = c(MinD, MaxD),
xlim = c(0.7, 1.4)
)
mtext(
paste0('Range:[', round(MinD, 2), ',', round(MaxD, 2), ']'),
side = 3,
line = 0,
cex = 0.6,
col = "black"
)
NaNs = (sum(is.infinite(Feature)) + sum(is.na(Feature))) / length(Feature)
#if(length(NaNs)>0)
barplot(
NaNs,
ylab = 'NaNs in %',
main = paste0(round(NaNs, 4), ' %'),
xlim = c(0, 3),
ylim = c(0, 1)
)
if (any(is.nan(Feature), na.rm = TRUE))
print(
'NaNs in Feature found. This message is only important, if after rounding the percent of NaN is zero in the bar plot.'
)
if (any(is.infinite(Feature), na.rm = TRUE))
warning('Infinite values in Feature found.')
#else
#print('No NaNs found')
def = par(fig = c(0, 0.9, 0.8, 1), new = TRUE)
if (missing(main))
mtext(
paste('VarNr.:', i, N),
side = 3,
line = 1,
cex = 1.5,
col = "black"
)
else
mtext(
main,
side = 3,
line = 1,
cex = 1.5,
col = "black"
)
par(def.par)
#box("outer", col="black")
}
|
#@since 1.8.1
주어진 문자열의 축약어를 만들어주는 모듈입니다.
= module Abbrev
주어진 문자열의 축약어를 만들어주는 모듈입니다.
=== 예
require 'abbrev'
require 'pp'
pp Abbrev.abbrev(%w[ruby rules]).sort
# => [["rub", "ruby"],
# ["ruby", "ruby"],
# ["rul", "rules"],
# ["rule", "rules"],
# ["rules", "rules"]]
== Module Functions
--- abbrev(words, pattern = nil) -> Hash
文字列の配列から一意に決まる短縮形を計算し、
短縮形をキー、元の文字列を値とするハッシュを返します。
第二引数に正規表現を指定すると、words のうちそのパターンにマッチしたものから短縮形を計算します。
第二引数に文字列を指定すると、words のうちその文字列で始まるものから短縮形を計算します。
@param words 元となる文字列の配列。
@param pattern [[c:Regexp]] か [[c:String]] を指定します。
@return 短縮形をキー、元の文字列を値とするハッシュを返します。
# words に同じ文字列が含まれている場合は
# 以下のようにその文字列しか返しません。 pp Abbrev.abbrev(%w[ruby ruby]).sort
# => [["ruby", "ruby"]]
# 空白が含まれていても適切に処理します。 pp Abbrev.abbrev(['ru by']).sort"
# => [["r", "ru by"],
# ["ru", "ru by"],
# ["ru ", "ru by"],
# ["ru b", "ru by"],
# ["ru by", "ru by"]]
# sort していない例
p %w[ruby rubyist].abbrev
#=> {"ruby" => "ruby",
# "rubyi" => "rubyist",
# "rubyis" => "rubyist",
# "rubyist" => "rubyist"}
= reopen Array
== Instance Methods
--- abbrev(pattern = nil) -> Hash
self が文字列の配列の場合、self から一意に決まる短縮形を計算し、
短縮形をキー、元の文字列を値とするハッシュを返します。
引数に正規表現を指定すると、self のうちそのパターンにマッチしたものから短縮形を計算します。
引数に文字列を指定すると、self のうちその文字列で始まるものから短縮形を計算します。
[[m:Abbrev.#abbrev]](self, pattern) と同じです。
@param pattern [[c:Regexp]] か [[c:String]] を指定します。
p %w[ruby rubyist].abbrev
#=> {"ruby" => "ruby",
# "rubyi" => "rubyist",
# "rubyis" => "rubyist",
# "rubyist" => "rubyist"}
@see [[m:Abbrev.#abbrev]]
#@end
| /target/rubydoc/refm/api/src/abbrev.rd | no_license | nacyot/omegat-rurima-ruby | R | false | false | 2,539 | rd | #@since 1.8.1
주어진 문자열의 축약어를 만들어주는 모듈입니다.
= module Abbrev
주어진 문자열의 축약어를 만들어주는 모듈입니다.
=== 예
require 'abbrev'
require 'pp'
pp Abbrev.abbrev(%w[ruby rules]).sort
# => [["rub", "ruby"],
# ["ruby", "ruby"],
# ["rul", "rules"],
# ["rule", "rules"],
# ["rules", "rules"]]
== Module Functions
--- abbrev(words, pattern = nil) -> Hash
文字列の配列から一意に決まる短縮形を計算し、
短縮形をキー、元の文字列を値とするハッシュを返します。
第二引数に正規表現を指定すると、words のうちそのパターンにマッチしたものから短縮形を計算します。
第二引数に文字列を指定すると、words のうちその文字列で始まるものから短縮形を計算します。
@param words 元となる文字列の配列。
@param pattern [[c:Regexp]] か [[c:String]] を指定します。
@return 短縮形をキー、元の文字列を値とするハッシュを返します。
# words に同じ文字列が含まれている場合は
# 以下のようにその文字列しか返しません。 pp Abbrev.abbrev(%w[ruby ruby]).sort
# => [["ruby", "ruby"]]
# 空白が含まれていても適切に処理します。 pp Abbrev.abbrev(['ru by']).sort"
# => [["r", "ru by"],
# ["ru", "ru by"],
# ["ru ", "ru by"],
# ["ru b", "ru by"],
# ["ru by", "ru by"]]
# sort していない例
p %w[ruby rubyist].abbrev
#=> {"ruby" => "ruby",
# "rubyi" => "rubyist",
# "rubyis" => "rubyist",
# "rubyist" => "rubyist"}
= reopen Array
== Instance Methods
--- abbrev(pattern = nil) -> Hash
self が文字列の配列の場合、self から一意に決まる短縮形を計算し、
短縮形をキー、元の文字列を値とするハッシュを返します。
引数に正規表現を指定すると、self のうちそのパターンにマッチしたものから短縮形を計算します。
引数に文字列を指定すると、self のうちその文字列で始まるものから短縮形を計算します。
[[m:Abbrev.#abbrev]](self, pattern) と同じです。
@param pattern [[c:Regexp]] か [[c:String]] を指定します。
p %w[ruby rubyist].abbrev
#=> {"ruby" => "ruby",
# "rubyi" => "rubyist",
# "rubyis" => "rubyist",
# "rubyist" => "rubyist"}
@see [[m:Abbrev.#abbrev]]
#@end
|
\alias{gdkEventCopy}
\name{gdkEventCopy}
\title{gdkEventCopy}
\description{Copies a \code{\link{GdkEvent}}, copying or incrementing the reference count of the
resources associated with it (e.g. \code{\link{GdkWindow}}'s and strings).}
\usage{gdkEventCopy(object)}
\arguments{\item{\code{object}}{[\code{\link{GdkEvent}}] a \code{\link{GdkEvent}}}}
\value{[\code{\link{GdkEvent}}] a copy of \code{event}.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /man/gdkEventCopy.Rd | no_license | cran/RGtk2.10 | R | false | false | 478 | rd | \alias{gdkEventCopy}
\name{gdkEventCopy}
\title{gdkEventCopy}
\description{Copies a \code{\link{GdkEvent}}, copying or incrementing the reference count of the
resources associated with it (e.g. \code{\link{GdkWindow}}'s and strings).}
\usage{gdkEventCopy(object)}
\arguments{\item{\code{object}}{[\code{\link{GdkEvent}}] a \code{\link{GdkEvent}}}}
\value{[\code{\link{GdkEvent}}] a copy of \code{event}.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
testlist <- list(end = NULL, start = NULL, x = structure(c(4.65661649758392e-10, 6.95336823785774e-310, 2.32903286132618e+96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(2L, 7L)), segment_end = structure(0, .Dim = c(1L, 1L)), segment_start = structure(0, .Dim = c(1L, 1L)))
result <- do.call(dynutils::project_to_segments,testlist)
str(result) | /dynutils/inst/testfiles/project_to_segments/AFL_project_to_segments/project_to_segments_valgrind_files/1609871861-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 345 | r | testlist <- list(end = NULL, start = NULL, x = structure(c(4.65661649758392e-10, 6.95336823785774e-310, 2.32903286132618e+96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(2L, 7L)), segment_end = structure(0, .Dim = c(1L, 1L)), segment_start = structure(0, .Dim = c(1L, 1L)))
result <- do.call(dynutils::project_to_segments,testlist)
str(result) |
\encoding{utf-8}
\name{cos2Weights}
\alias{cos2Weights}
\title{Cos-squared model weights}
\usage{
cos2Weights(object, ..., data, eps = 1e-06, maxit = 100,
predict.args = list())
}
\arguments{
\item{object, \dots}{two or more fitted \code{\link{glm}} objects, or a
\code{list} of such, or an \code{\link[=model.avg]{"averaging"}} object.
Currently only \code{lm} and \code{glm} objects are accepted.}
\item{data}{a test data frame in which to look for variables
for use with \link[=predict]{prediction}. If omitted, the fitted linear
predictors are used.}
\item{eps}{tolerance for determining convergence.}
\item{maxit}{maximum number of iterations.}
\item{predict.args}{optionally, a \code{list} of additional arguments to be
passed to \code{predict}.}
}
\value{
The function returns a numeric vector of model weights.
}
\description{
Calculates cos-squared model weights, following the algorithm
outlined in the appendix of Garthwaite & Mubwandarikwa (2010).
}
\examples{
\dontshow{
if(length(find.package("expm", quiet = TRUE)) == 1) \{
}
fm <- lm(y ~ X1 + X2 + X3 + X4, Cement, na.action = na.fail)
# most efficient way to produce a list of all-subsets models
models <- lapply(dredge(fm, evaluate = FALSE), eval)
ma <- model.avg(models)
test.data <- Cement
Weights(ma) <- cos2Weights(models, data = test.data)
predict(ma, data = test.data)
\dontshow{
\} else message("Need CRAN package 'expm' to run this example")
}
}
\author{
Carsten Dormann, adapted by Kamil Barto\enc{ń}{n}
}
\references{
Garthwaite, P. H. and Mubwandarikwa, E. (2010) Selection of weights for
weighted model averaging. \emph{Australian & New Zealand Journal of
Statistics}, 52: 363–382.
Dormann, C. et al. (2018) Model averaging in ecology: a review of Bayesian,
information-theoretic, and tactical approaches for predictive inference.
\emph{Ecological Monographs}, 88, 485–504.
}
\seealso{
\code{\link{Weights}}, \code{\link{model.avg}}
Other model.weights: \code{\link{BGWeights}},
\code{\link{bootWeights}},
\code{\link{jackknifeWeights}},
\code{\link{stackingWeights}}
}
\keyword{models}
| /man/cos2weights.Rd | no_license | funkhou9/MuMIn | R | false | false | 2,117 | rd | \encoding{utf-8}
\name{cos2Weights}
\alias{cos2Weights}
\title{Cos-squared model weights}
\usage{
cos2Weights(object, ..., data, eps = 1e-06, maxit = 100,
predict.args = list())
}
\arguments{
\item{object, \dots}{two or more fitted \code{\link{glm}} objects, or a
\code{list} of such, or an \code{\link[=model.avg]{"averaging"}} object.
Currently only \code{lm} and \code{glm} objects are accepted.}
\item{data}{a test data frame in which to look for variables
for use with \link[=predict]{prediction}. If omitted, the fitted linear
predictors are used.}
\item{eps}{tolerance for determining convergence.}
\item{maxit}{maximum number of iterations.}
\item{predict.args}{optionally, a \code{list} of additional arguments to be
passed to \code{predict}.}
}
\value{
The function returns a numeric vector of model weights.
}
\description{
Calculates cos-squared model weights, following the algorithm
outlined in the appendix of Garthwaite & Mubwandarikwa (2010).
}
\examples{
\dontshow{
if(length(find.package("expm", quiet = TRUE)) == 1) \{
}
fm <- lm(y ~ X1 + X2 + X3 + X4, Cement, na.action = na.fail)
# most efficient way to produce a list of all-subsets models
models <- lapply(dredge(fm, evaluate = FALSE), eval)
ma <- model.avg(models)
test.data <- Cement
Weights(ma) <- cos2Weights(models, data = test.data)
predict(ma, data = test.data)
\dontshow{
\} else message("Need CRAN package 'expm' to run this example")
}
}
\author{
Carsten Dormann, adapted by Kamil Barto\enc{ń}{n}
}
\references{
Garthwaite, P. H. and Mubwandarikwa, E. (2010) Selection of weights for
weighted model averaging. \emph{Australian & New Zealand Journal of
Statistics}, 52: 363–382.
Dormann, C. et al. (2018) Model averaging in ecology: a review of Bayesian,
information-theoretic, and tactical approaches for predictive inference.
\emph{Ecological Monographs}, 88, 485–504.
}
\seealso{
\code{\link{Weights}}, \code{\link{model.avg}}
Other model.weights: \code{\link{BGWeights}},
\code{\link{bootWeights}},
\code{\link{jackknifeWeights}},
\code{\link{stackingWeights}}
}
\keyword{models}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shiny_plot.R
\name{interactive_embedding_exploration}
\alias{interactive_embedding_exploration}
\title{Interactive exploration of embeddings}
\usage{
interactive_embedding_exploration(annoy_model,
default_number_neighbors = 100, default_number_rounds = 500,
default_perplexity = 30)
}
\arguments{
\item{annoy_model}{\link{RcppAnnoy} model generated with \link{get_annoy_model}}
\item{default_number_neighbors}{set the number of neighbors slider to this value}
\item{default_number_rounds}{set the number of \code{T-SNE} rounds slider to this value}
\item{default_perplexity}{set the \code{T-SNE} perplexity slider to this value}
}
\description{
Shiny application to interactively play with embeddings.
User provides a pivot word and the n most similar word
are projected on a scatter plot.
}
\details{
For large list of texts, the auto-complete can be slow.
Increasing the number of neighbors can make things very slow,
in particular with \code{T-SNE} approach.
500 neighbors is usually a good value to have an understanding
of the neighborhood of a vector.
Colors in the scatter plot represents clusters found by \link{dbscan}.
}
\examples{
if(interactive()){
# This example should be run with a higher quality model
# than the one embedded in fastrtext
library(projector)
library(fastrtext)
model_test_path <- system.file("extdata",
"model_unsupervised_test.bin",
package = "fastrtext")
model <- load_model(model_test_path)
word_embeddings <- get_word_vectors(model, words = head(get_dictionary(model), 2e5))
annoy_model <- get_annoy_model(word_embeddings, 5)
interactive_embedding_exploration(annoy_model)
}
}
| /man/interactive_embedding_exploration.Rd | no_license | pommedeterresautee/projector | R | false | true | 1,766 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shiny_plot.R
\name{interactive_embedding_exploration}
\alias{interactive_embedding_exploration}
\title{Interactive exploration of embeddings}
\usage{
interactive_embedding_exploration(annoy_model,
default_number_neighbors = 100, default_number_rounds = 500,
default_perplexity = 30)
}
\arguments{
\item{annoy_model}{\link{RcppAnnoy} model generated with \link{get_annoy_model}}
\item{default_number_neighbors}{set the number of neighbors slider to this value}
\item{default_number_rounds}{set the number of \code{T-SNE} rounds slider to this value}
\item{default_perplexity}{set the \code{T-SNE} perplexity slider to this value}
}
\description{
Shiny application to interactively play with embeddings.
User provides a pivot word and the n most similar word
are projected on a scatter plot.
}
\details{
For large list of texts, the auto-complete can be slow.
Increasing the number of neighbors can make things very slow,
in particular with \code{T-SNE} approach.
500 neighbors is usually a good value to have an understanding
of the neighborhood of a vector.
Colors in the scatter plot represents clusters found by \link{dbscan}.
}
\examples{
if(interactive()){
# This example should be run with a higher quality model
# than the one embedded in fastrtext
library(projector)
library(fastrtext)
model_test_path <- system.file("extdata",
"model_unsupervised_test.bin",
package = "fastrtext")
model <- load_model(model_test_path)
word_embeddings <- get_word_vectors(model, words = head(get_dictionary(model), 2e5))
annoy_model <- get_annoy_model(word_embeddings, 5)
interactive_embedding_exploration(annoy_model)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/skewfit_methods.R
\name{sf_lpdf}
\alias{sf_lpdf}
\title{Log-Likelihood}
\usage{
sf_lpdf(object, ...)
}
\description{
Log-Likelihood
}
| /man/sf_lpdf.Rd | no_license | olssol/skewfit | R | false | true | 212 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/skewfit_methods.R
\name{sf_lpdf}
\alias{sf_lpdf}
\title{Log-Likelihood}
\usage{
sf_lpdf(object, ...)
}
\description{
Log-Likelihood
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/homog_diag.R
\name{homog_diag}
\alias{homog_diag}
\title{Master function: Homogeneity of diagnosticity ratio}
\usage{
homog_diag(lineup_pres_list, lineup_abs_list, pos_list, k)
}
\arguments{
\item{lineup_pres_list}{A list containing k vectors of lineup choices for k lineups, in which the
target was present}
\item{lineup_abs_list}{A list containing k vectors of lineup choices for k lineups, in which the
target was absent}
\item{pos_list}{A numeric vector indexing lineup member positions for the target
present & absent conditions.}
\item{k}{A vector indexing number of members in each lineup pair (nominal size). Must be specified by user (scalar).}
}
\value{
Computes diagnosticity ratio with chi-squared estimate and significance
level for k lineup pairs
}
\description{
This function provides assesses the homogeneity of the diagnosticity ratio of
k lineup pairs.
}
\details{
Master function for assessing homogeneity of diagnosticity ratio for
k independent lineups.
}
\examples{
#Target present data:
A <- round(runif(100,1,6))
B <- round(runif(70,1,5))
C <- round(runif(20,1,4))
lineup_pres_list <- list(A, B, C)
rm(A, B, C)
#Target absent data:
A <- round(runif(100,1,6))
B <- round(runif(70,1,5))
C <- round(runif(20,1,4))
lineup_abs_list <- list(A, B, C)
rm(A, B, C)
#Pos list
lineup1_pos <- c(1, 2, 3, 4, 5, 6)
lineup2_pos <- c(1, 2, 3, 4, 5)
lineup3_pos <- c(1, 2, 3, 4)
pos_list <- list(lineup1_pos, lineup2_pos, lineup3_pos)
rm(lineup1_pos, lineup2_pos, lineup3_pos)
#Nominal size:
k <- c(6, 5, 4)
#Call:
homog_diag(lineup_pres_list, lineup_abs_list, pos_list, k)
}
\references{
Malpass, R. S. (1981). Effective size and defendant bias in
eyewitness identification lineups. \emph{Law and Human Behavior, 5}(4), 299-309.
Malpass, R. S., Tredoux, C., & McQuiston-Surrett, D. (2007). Lineup
construction and lineup fairness. In R. Lindsay, D. F. Ross, J. D. Read,
& M. P. Toglia (Eds.), \emph{Handbook of Eyewitness Psychology, Vol. 2: Memory for
people} (pp. 155-178). Mahwah, NJ: Lawrence Erlbaum Associates.
Tredoux, C. G. (1998). Statistical inference on measures of lineup fairness.
\emph{Law and Human Behavior, 22}(2), 217-237.
Tredoux, C. (1999). Statistical considerations when determining measures of
lineup size and lineup bias. \emph{Applied Cognitive Psychology}, 13, S9-S26.
Wells, G. L.,Leippe, M. R., & Ostrom, T. M. (1979). Guidelines for
empirically assessing the fairness of a lineup. \emph{Law and Human Behavior,
3}(4), 285-293.
}
| /man/homog_diag.Rd | no_license | cran/r4lineups | R | false | true | 2,791 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/homog_diag.R
\name{homog_diag}
\alias{homog_diag}
\title{Master function: Homogeneity of diagnosticity ratio}
\usage{
homog_diag(lineup_pres_list, lineup_abs_list, pos_list, k)
}
\arguments{
\item{lineup_pres_list}{A list containing k vectors of lineup choices for k lineups, in which the
target was present}
\item{lineup_abs_list}{A list containing k vectors of lineup choices for k lineups, in which the
target was absent}
\item{pos_list}{A numeric vector indexing lineup member positions for the target
present & absent conditions.}
\item{k}{A vector indexing number of members in each lineup pair (nominal size). Must be specified by user (scalar).}
}
\value{
Computes diagnosticity ratio with chi-squared estimate and significance
level for k lineup pairs
}
\description{
This function provides assesses the homogeneity of the diagnosticity ratio of
k lineup pairs.
}
\details{
Master function for assessing homogeneity of diagnosticity ratio for
k independent lineups.
}
\examples{
#Target present data:
A <- round(runif(100,1,6))
B <- round(runif(70,1,5))
C <- round(runif(20,1,4))
lineup_pres_list <- list(A, B, C)
rm(A, B, C)
#Target absent data:
A <- round(runif(100,1,6))
B <- round(runif(70,1,5))
C <- round(runif(20,1,4))
lineup_abs_list <- list(A, B, C)
rm(A, B, C)
#Pos list
lineup1_pos <- c(1, 2, 3, 4, 5, 6)
lineup2_pos <- c(1, 2, 3, 4, 5)
lineup3_pos <- c(1, 2, 3, 4)
pos_list <- list(lineup1_pos, lineup2_pos, lineup3_pos)
rm(lineup1_pos, lineup2_pos, lineup3_pos)
#Nominal size:
k <- c(6, 5, 4)
#Call:
homog_diag(lineup_pres_list, lineup_abs_list, pos_list, k)
}
\references{
Malpass, R. S. (1981). Effective size and defendant bias in
eyewitness identification lineups. \emph{Law and Human Behavior, 5}(4), 299-309.
Malpass, R. S., Tredoux, C., & McQuiston-Surrett, D. (2007). Lineup
construction and lineup fairness. In R. Lindsay, D. F. Ross, J. D. Read,
& M. P. Toglia (Eds.), \emph{Handbook of Eyewitness Psychology, Vol. 2: Memory for
people} (pp. 155-178). Mahwah, NJ: Lawrence Erlbaum Associates.
Tredoux, C. G. (1998). Statistical inference on measures of lineup fairness.
\emph{Law and Human Behavior, 22}(2), 217-237.
Tredoux, C. (1999). Statistical considerations when determining measures of
lineup size and lineup bias. \emph{Applied Cognitive Psychology}, 13, S9-S26.
Wells, G. L.,Leippe, M. R., & Ostrom, T. M. (1979). Guidelines for
empirically assessing the fairness of a lineup. \emph{Law and Human Behavior,
3}(4), 285-293.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ebic.R
\name{ebic}
\alias{ebic}
\title{EBIC calculation.}
\usage{
ebic(x, y, beta, sigma2, type = "fusedlasso")
}
\arguments{
\item{x}{is a matrix of order n x p where n is number of observations and p is number of predictor variables.}
\item{y}{y is a vector of response variable of order n x 1.}
\item{beta}{Regression coefficient.}
\item{sigma2}{Dispersion parameter.}
\item{type}{Estimation method from ex posterior distribution sample.}
}
\description{
EBIC calculation.
}
| /man/ebic.Rd | no_license | ksmstg/neggfl | R | false | true | 560 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ebic.R
\name{ebic}
\alias{ebic}
\title{EBIC calculation.}
\usage{
ebic(x, y, beta, sigma2, type = "fusedlasso")
}
\arguments{
\item{x}{is a matrix of order n x p where n is number of observations and p is number of predictor variables.}
\item{y}{y is a vector of response variable of order n x 1.}
\item{beta}{Regression coefficient.}
\item{sigma2}{Dispersion parameter.}
\item{type}{Estimation method from ex posterior distribution sample.}
}
\description{
EBIC calculation.
}
|
library(dplyr)
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
emissions_by_year <- NEI %>%
filter(fips == "24510" & type == "ON-ROAD") %>%
group_by(year) %>%
summarize(total_emissions = sum(Emissions))
emissions_by_year$year <- as.factor(emissions_by_year$year)
ggplot(emissions_by_year, aes(x=year, y=total_emissions)) + geom_bar(stat="identity") + labs(x="Year", y="PM2.5 Emissions (tons)") + ggtitle("Baltimore City PM2.5 Emissions from Vehicle Related Sources")
dev.copy(png,'plot5.png')
dev.off() | /plot5.R | no_license | devang3026/Week_4_EXPLORATORY_DATA_ANALYST | R | false | false | 591 | r | library(dplyr)
library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
emissions_by_year <- NEI %>%
filter(fips == "24510" & type == "ON-ROAD") %>%
group_by(year) %>%
summarize(total_emissions = sum(Emissions))
emissions_by_year$year <- as.factor(emissions_by_year$year)
ggplot(emissions_by_year, aes(x=year, y=total_emissions)) + geom_bar(stat="identity") + labs(x="Year", y="PM2.5 Emissions (tons)") + ggtitle("Baltimore City PM2.5 Emissions from Vehicle Related Sources")
dev.copy(png,'plot5.png')
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taperedGR_moment_release.R
\name{taperedGR_exceedance_rate_derivative}
\alias{taperedGR_exceedance_rate_derivative}
\title{Derivative of Tapered Gutenberg Richter exceedance-rate}
\usage{
taperedGR_exceedance_rate_derivative(M, N_Mt, Mt, Mc, beta)
}
\arguments{
\item{M}{the moment of interest}
\item{N_Mt}{the number of events with moment exceeding the threshold moment Mt}
\item{Mt}{the threshold moment}
\item{Mc}{the corner moment}
\item{beta}{the beta parameter}
}
\value{
The derivative
}
\description{
This gives the derivative of the exceedance-rate with respect to the moment,
at the specified moment M
}
\examples{
# Threshold moment
Mt = M0_2_Mw(5.5, inverse=TRUE)
# Corner moment
Mc = M0_2_Mw(9.2, inverse=TRUE)
# How many events with M > Mt each year on average?
N_Mt = 10.47
# The beta parameter
beta = 0.54
#
# Double check that dN_dM is right by comparison with numerical derivative
#
x = M0_2_Mw(7.5, inverse=TRUE)
# Numerical derivative -- needs a large 'delta h' because x is very large
dh = x/1e+06 # A suitable numerical derivative increment
dN_dM_estimate = (taperedGR_exceedance_rate(x+dh, N_Mt, Mt, Mc, beta)-
taperedGR_exceedance_rate(x-dh, N_Mt, Mt, Mc, beta)
)/(2*dh)
dN_dM_exact = taperedGR_exceedance_rate_derivative(x, N_Mt, Mt, Mc, beta)
test = (abs(dN_dM_estimate - dN_dM_exact) < 1e-06*abs(dN_dM_exact))
stopifnot(test)
# Quick check of the vectorized version
dN_dM_check = taperedGR_exceedance_rate_derivative(c(0, x, x, 1), N_Mt, Mt, Mc, beta)
stopifnot(all(dN_dM_check[c(1, 4)] == 0) &
all(dN_dM_check[2:3] == dN_dM_exact))
}
| /R/rptha/man/taperedGR_exceedance_rate_derivative.Rd | permissive | GeoscienceAustralia/ptha | R | false | true | 1,718 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taperedGR_moment_release.R
\name{taperedGR_exceedance_rate_derivative}
\alias{taperedGR_exceedance_rate_derivative}
\title{Derivative of Tapered Gutenberg Richter exceedance-rate}
\usage{
taperedGR_exceedance_rate_derivative(M, N_Mt, Mt, Mc, beta)
}
\arguments{
\item{M}{the moment of interest}
\item{N_Mt}{the number of events with moment exceeding the threshold moment Mt}
\item{Mt}{the threshold moment}
\item{Mc}{the corner moment}
\item{beta}{the beta parameter}
}
\value{
The derivative
}
\description{
This gives the derivative of the exceedance-rate with respect to the moment,
at the specified moment M
}
\examples{
# Threshold moment
Mt = M0_2_Mw(5.5, inverse=TRUE)
# Corner moment
Mc = M0_2_Mw(9.2, inverse=TRUE)
# How many events with M > Mt each year on average?
N_Mt = 10.47
# The beta parameter
beta = 0.54
#
# Double check that dN_dM is right by comparison with numerical derivative
#
x = M0_2_Mw(7.5, inverse=TRUE)
# Numerical derivative -- needs a large 'delta h' because x is very large
dh = x/1e+06 # A suitable numerical derivative increment
dN_dM_estimate = (taperedGR_exceedance_rate(x+dh, N_Mt, Mt, Mc, beta)-
taperedGR_exceedance_rate(x-dh, N_Mt, Mt, Mc, beta)
)/(2*dh)
dN_dM_exact = taperedGR_exceedance_rate_derivative(x, N_Mt, Mt, Mc, beta)
test = (abs(dN_dM_estimate - dN_dM_exact) < 1e-06*abs(dN_dM_exact))
stopifnot(test)
# Quick check of the vectorized version
dN_dM_check = taperedGR_exceedance_rate_derivative(c(0, x, x, 1), N_Mt, Mt, Mc, beta)
stopifnot(all(dN_dM_check[c(1, 4)] == 0) &
all(dN_dM_check[2:3] == dN_dM_exact))
}
|
#' Distance Matrix Between Codes
#'
#' Generate distance measures to ascertain a mean distance measure between codes.
#'
#' @param dataframe A data frame from the cm_x2long family
#' (\code{cm_range2long}; \code{cm_df2long}; \code{cm_time2long}).
#' @param time.var An optional variable to split the dataframe by (if you have
#' data that is by various times this must be supplied).
#' @param parallel logical. If TRUE runs the cm_distance on multiple cores.
#' This is effective with larger data sets but may actually be slower with
#' smaller data sets.
#' @param code.var The name of the code variable column. Defaults to "codes" as
#' out putted by x2long family.
#' @param causal logical. If TRUE measures the distance between x and y given
#' that x must proceed y.
#' @param start.var The name of the start variable column. Defaults to "start"
#' as out putted by x2long family.
#' @param end.var The name of the end variable column. Defaults to "end" as out
#' putted by x2long family.
#' @param mean.digits The number of digits to be displayed in the mean matrix.
#' @param sd.digits The number of digits to be displayed in the sd (standard
#' deviation) matrix.
#' @param stan.digits The number of digits to use in the standardized mean
#' difference matrix.
#' @return An object of the class cm.dist. This is a list of n lists with the
#' following components per each list (time.var):
#' \item{mean}{A distance matrix of average distances between codes}
#' \item{sd}{A matrix of standard deviations of distances between codes}
#' \item{n}{A matrix of counts of distances between codes}
#' \item{combined}{A matrix of combined mean, sd and n of distances between
#' codes}
#' \item{standardized}{A matrix of standardized values of distances between
#' codes. The closer a value is to zero the closer two codes relate.}
#' @keywords distance
#' @export
#' @examples
#' foo <- list(
#' AA = qcv(terms="02:03, 05"),
#' BB = qcv(terms="1:2, 3:10"),
#' CC = qcv(terms="1:9, 100:150")
#' )
#'
#' foo2 <- list(
#' AA = qcv(terms="40"),
#' BB = qcv(terms="50:90"),
#' CC = qcv(terms="60:90, 100:120, 150"),
#' DD = qcv(terms="")
#' )
#'
#' (dat <- cm_range2long(foo, foo2, v.name = "time"))
#' (out <- cm_distance(dat, time.var = "time", causal=TRUE))
#' names(out)
#' names(out$foo2)
#' out$foo2
#' #========================================
#' x <- list(
#' transcript_time_span = qcv(00:00 - 1:12:00),
#' A = qcv(terms = "2.40:3.00, 6.32:7.00, 9.00,
#' 10.00:11.00, 59.56"),
#' B = qcv(terms = "3.01:3.02, 5.01, 19.00, 1.12.00:1.19.01"),
#' C = qcv(terms = "2.40:3.00, 5.01, 6.32:7.00, 9.00, 17.01")
#' )
#' (dat <- cm_time2long(x))
#' gantt_wrap(dat, "code", border.color = "black", border.size = 5,
#' sig.dig.line.freq = -2)
#' (a <- cm_distance(dat))
#' names(a)
#' names(a$dat)
#' a$dat
cm_distance <-
function(dataframe, time.var = NULL, parallel = FALSE, code.var = "code",
causal = FALSE, start.var = "start", end.var = "end", mean.digits = 2,
sd.digits = 2, stan.digits = 2) {
DIST <- function(DF, CV= code.var,
CAU = causal, SV = start.var, EV = end.var,
MD = mean.digits, SDD = sd.digits, SD = stan.digits) {
L2 <- split(DF, DF[, CV])
L2 <- L2[sapply(L2, nrow) != 0]
NMS <- names(L2)
L3 <- lapply(seq_along(L2), function(i) {
cm_se2vect(L2[[i]][, SV], L2[[i]][, EV])
})
lens <- max(sapply(L3, length))
L4 <- lapply(seq_along(L3), function(i){
c(L3[[i]], rep(0, lens - length(L3[[i]])))
})
dat <- do.call(cbind, L4)
colnames(dat) <- NMS
if (CAU) {inds <- 4} else {inds <- 2}
dism <- function(x, y) {cm_bidist(x, y)[[inds]][1]}
dissd <- function(x, y) {cm_bidist(x, y)[[inds]][2]}
disn <- function(x, y) {cm_bidist(x, y)[[inds]][3]}
FUN <- function(dat, mdigs=MD, sdigs=SDD){
dat <- data.frame(dat)
means <- round(v.outer(dat, dism), digits=mdigs)
sds <- round(v.outer(dat, dissd), digits=sdigs)
ns <- v.outer(dat, disn)
DIM <- dim(means)
pfun <- function(x, y, z) paste0(x, "(", y, ")", "n=", z)
comb <- mgsub(c("(NA)", "NA;n=0"), c(";", NA),
mapply(pfun, means, sds, ns), fixed=TRUE)
dim(comb) <- DIM
dimnames(comb) <- list(rownames(means), colnames(means))
diag(comb) <- gsub("0(0)", "", diag(comb), fixed=TRUE)
scale.all <- function(x) {
dims <- dim(x)
dnms <- dimnames(x)
x <- matrix(scale(c(x), F), dims)
dimnames(x) <- dnms
x
}
stand <- round(scale.all(means)*scale.all(sds),
digits=SD)
stand[is.nan(stand)] <- NA
list(mean=means, sd=sds, n=ns, combined=noquote(comb),
standardized=stand)
}
FUN(dat)
}
if (!is.null(time.var)) {
L1 <- split(dataframe, dataframe[, time.var])
} else {
L1 <- list(dataframe)
names(L1) <- as.character(substitute(dataframe))
}
if (parallel){
cl <- makeCluster(mc <- getOption("cl.cores", detectCores()))
clusterExport(cl=cl, varlist=c("dataframe", "time.var", "code.var",
"causal", "start.var", "end.var", "mean.digits", "sd.digits",
"stan.digits", "cm_se2vect", "v.outer", "cm_bidist", "mgsub"),
envir = environment())
o <- parLapply(cl, L1, DIST)
stopCluster(cl)
} else {
o <- lapply(L1, DIST)
}
class(o) <- "cm_distance"
return(o)
}
#' Prints a cm_distance Object
#'
#' Prints a cm_distance object.
#'
#' @param x The cm_distance object.
#' @param \ldots ignored
#' @method print cm_distance
#' @S3method print cm_distance
print.cm_distance <-
function(x, ...){
x <- unlist(x, recursive=F)
y <- unlist(strsplit(names(x), "\\."))[c(FALSE, TRUE)]
z <- x[y == "standardized"]
invisible(lapply(seq_along(z), function(i) {
a <- strsplit(names(z)[i], "\\.")
if(length(unlist(a)) > 1) {
cat(paste0(a[[1]][1], "\n"))
}
cat(paste0(a[[1]][length(a[[1]])], ":\n"))
print(z[[i]])
cat("\n")
}))
} | /R/cm_distance.R | no_license | trinker/qdap2 | R | false | false | 6,349 | r | #' Distance Matrix Between Codes
#'
#' Generate distance measures to ascertain a mean distance measure between codes.
#'
#' @param dataframe A data frame from the cm_x2long family
#' (\code{cm_range2long}; \code{cm_df2long}; \code{cm_time2long}).
#' @param time.var An optional variable to split the dataframe by (if you have
#' data that is by various times this must be supplied).
#' @param parallel logical. If TRUE runs the cm_distance on multiple cores.
#' This is effective with larger data sets but may actually be slower with
#' smaller data sets.
#' @param code.var The name of the code variable column. Defaults to "codes" as
#' out putted by x2long family.
#' @param causal logical. If TRUE measures the distance between x and y given
#' that x must proceed y.
#' @param start.var The name of the start variable column. Defaults to "start"
#' as out putted by x2long family.
#' @param end.var The name of the end variable column. Defaults to "end" as out
#' putted by x2long family.
#' @param mean.digits The number of digits to be displayed in the mean matrix.
#' @param sd.digits The number of digits to be displayed in the sd (standard
#' deviation) matrix.
#' @param stan.digits The number of digits to use in the standardized mean
#' difference matrix.
#' @return An object of the class cm.dist. This is a list of n lists with the
#' following components per each list (time.var):
#' \item{mean}{A distance matrix of average distances between codes}
#' \item{sd}{A matrix of standard deviations of distances between codes}
#' \item{n}{A matrix of counts of distances between codes}
#' \item{combined}{A matrix of combined mean, sd and n of distances between
#' codes}
#' \item{standardized}{A matrix of standardized values of distances between
#' codes. The closer a value is to zero the closer two codes relate.}
#' @keywords distance
#' @export
#' @examples
#' foo <- list(
#' AA = qcv(terms="02:03, 05"),
#' BB = qcv(terms="1:2, 3:10"),
#' CC = qcv(terms="1:9, 100:150")
#' )
#'
#' foo2 <- list(
#' AA = qcv(terms="40"),
#' BB = qcv(terms="50:90"),
#' CC = qcv(terms="60:90, 100:120, 150"),
#' DD = qcv(terms="")
#' )
#'
#' (dat <- cm_range2long(foo, foo2, v.name = "time"))
#' (out <- cm_distance(dat, time.var = "time", causal=TRUE))
#' names(out)
#' names(out$foo2)
#' out$foo2
#' #========================================
#' x <- list(
#' transcript_time_span = qcv(00:00 - 1:12:00),
#' A = qcv(terms = "2.40:3.00, 6.32:7.00, 9.00,
#' 10.00:11.00, 59.56"),
#' B = qcv(terms = "3.01:3.02, 5.01, 19.00, 1.12.00:1.19.01"),
#' C = qcv(terms = "2.40:3.00, 5.01, 6.32:7.00, 9.00, 17.01")
#' )
#' (dat <- cm_time2long(x))
#' gantt_wrap(dat, "code", border.color = "black", border.size = 5,
#' sig.dig.line.freq = -2)
#' (a <- cm_distance(dat))
#' names(a)
#' names(a$dat)
#' a$dat
cm_distance <-
function(dataframe, time.var = NULL, parallel = FALSE, code.var = "code",
causal = FALSE, start.var = "start", end.var = "end", mean.digits = 2,
sd.digits = 2, stan.digits = 2) {
DIST <- function(DF, CV= code.var,
CAU = causal, SV = start.var, EV = end.var,
MD = mean.digits, SDD = sd.digits, SD = stan.digits) {
L2 <- split(DF, DF[, CV])
L2 <- L2[sapply(L2, nrow) != 0]
NMS <- names(L2)
L3 <- lapply(seq_along(L2), function(i) {
cm_se2vect(L2[[i]][, SV], L2[[i]][, EV])
})
lens <- max(sapply(L3, length))
L4 <- lapply(seq_along(L3), function(i){
c(L3[[i]], rep(0, lens - length(L3[[i]])))
})
dat <- do.call(cbind, L4)
colnames(dat) <- NMS
if (CAU) {inds <- 4} else {inds <- 2}
dism <- function(x, y) {cm_bidist(x, y)[[inds]][1]}
dissd <- function(x, y) {cm_bidist(x, y)[[inds]][2]}
disn <- function(x, y) {cm_bidist(x, y)[[inds]][3]}
FUN <- function(dat, mdigs=MD, sdigs=SDD){
dat <- data.frame(dat)
means <- round(v.outer(dat, dism), digits=mdigs)
sds <- round(v.outer(dat, dissd), digits=sdigs)
ns <- v.outer(dat, disn)
DIM <- dim(means)
pfun <- function(x, y, z) paste0(x, "(", y, ")", "n=", z)
comb <- mgsub(c("(NA)", "NA;n=0"), c(";", NA),
mapply(pfun, means, sds, ns), fixed=TRUE)
dim(comb) <- DIM
dimnames(comb) <- list(rownames(means), colnames(means))
diag(comb) <- gsub("0(0)", "", diag(comb), fixed=TRUE)
scale.all <- function(x) {
dims <- dim(x)
dnms <- dimnames(x)
x <- matrix(scale(c(x), F), dims)
dimnames(x) <- dnms
x
}
stand <- round(scale.all(means)*scale.all(sds),
digits=SD)
stand[is.nan(stand)] <- NA
list(mean=means, sd=sds, n=ns, combined=noquote(comb),
standardized=stand)
}
FUN(dat)
}
if (!is.null(time.var)) {
L1 <- split(dataframe, dataframe[, time.var])
} else {
L1 <- list(dataframe)
names(L1) <- as.character(substitute(dataframe))
}
if (parallel){
cl <- makeCluster(mc <- getOption("cl.cores", detectCores()))
clusterExport(cl=cl, varlist=c("dataframe", "time.var", "code.var",
"causal", "start.var", "end.var", "mean.digits", "sd.digits",
"stan.digits", "cm_se2vect", "v.outer", "cm_bidist", "mgsub"),
envir = environment())
o <- parLapply(cl, L1, DIST)
stopCluster(cl)
} else {
o <- lapply(L1, DIST)
}
class(o) <- "cm_distance"
return(o)
}
#' Prints a cm_distance Object
#'
#' Prints a cm_distance object.
#'
#' @param x The cm_distance object.
#' @param \ldots ignored
#' @method print cm_distance
#' @S3method print cm_distance
print.cm_distance <-
function(x, ...){
x <- unlist(x, recursive=F)
y <- unlist(strsplit(names(x), "\\."))[c(FALSE, TRUE)]
z <- x[y == "standardized"]
invisible(lapply(seq_along(z), function(i) {
a <- strsplit(names(z)[i], "\\.")
if(length(unlist(a)) > 1) {
cat(paste0(a[[1]][1], "\n"))
}
cat(paste0(a[[1]][length(a[[1]])], ":\n"))
print(z[[i]])
cat("\n")
}))
} |
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{nrrd.voxdims}
\alias{nrrd.voxdims}
\title{Return voxel dimensions (by default absolute voxel dimensions)}
\usage{
nrrd.voxdims(file, ReturnAbsoluteDims = TRUE)
}
\arguments{
\item{file}{path to nrrd/nhdr file or a list containing a
nrrd header}
\item{ReturnAbsoluteDims}{Defaults to returning absolute
value of dims even if there are any negative space
directions}
}
\value{
voxel dimensions as numeric vector
}
\description{
Return voxel dimensions (by default absolute voxel
dimensions)
}
\details{
NB Can handle off diagonal terms in space directions
matrix, BUT assumes that space direction vectors are
orthogonal.
}
\author{
jefferis
}
\seealso{
\code{\link{read.nrrd.header}}
}
| /man/nrrd.voxdims.Rd | no_license | mnqslbs2/nat | R | false | false | 755 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{nrrd.voxdims}
\alias{nrrd.voxdims}
\title{Return voxel dimensions (by default absolute voxel dimensions)}
\usage{
nrrd.voxdims(file, ReturnAbsoluteDims = TRUE)
}
\arguments{
\item{file}{path to nrrd/nhdr file or a list containing a
nrrd header}
\item{ReturnAbsoluteDims}{Defaults to returning absolute
value of dims even if there are any negative space
directions}
}
\value{
voxel dimensions as numeric vector
}
\description{
Return voxel dimensions (by default absolute voxel
dimensions)
}
\details{
NB Can handle off diagonal terms in space directions
matrix, BUT assumes that space direction vectors are
orthogonal.
}
\author{
jefferis
}
\seealso{
\code{\link{read.nrrd.header}}
}
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
btn.style.preload <- "color: #fff; background-color: #666666; border-color: #999999"
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
#----------------------------------------------------------------- Basic ----
source("functionsInApp.R")
options(stringsAsFactors = FALSE)
# actually render the dropdownMenu PROGRESS
output$menu <- renderMenu({
dropdownMenu(
type = "tasks", badgeStatus = "success",
taskItem(value = 20, color = "red",
"read information"
),
taskItem(value = 50, color = "orange",
"upload Data"
),
taskItem(value = sum(res$variableTypes$set!=0)/NCOL(res$data)*100, color = "green",
"defined variables"
),
taskItem(value = 90, color = "green",
"clean data"
)
)
})
## Shows certain activations in the console
show.events = F
entwickler = F
## Init. of the reactive values
abc <- reactiveValues(selectedTab = numeric())
temp <- reactiveValues()
res <- reactiveValues(n = 106)
res$temp <- NULL
res$filePath <- "/noPath"
res$fileEnding <- ".noEnding"
exploreVariables_previousSelection <- NULL
exploreVariables_previousPage <- NULL
defineVariables_previousPage <- NULL
# Create a Progress object
progress_defineVar <- shiny::Progress$new()
# Make sure it closes when we exit this reactive, even if there's an error
#on.exit(progress_defineVar$close())
progress_defineVar$set(message = "Define/redefine Variables: ", value = 0)
#----------------------------------------------------------------- 0. Information ----
output$link.Pseudonomisierung <- renderUI({
h5("This process is called pseudonymisation. Find it out",a("here",
href="https://en.wikipedia.org/wiki/ABCD",
target="_blank"))
})
#----------------------------------------------------------------- 1. Get data ----
temp.table.simple <- reactive({
DT::datatable(temp$data,
class = 'cell-border stripe',
options = list(paging = F,
#pageLength = 10,
#autoWidth = TRUE,
server = T,
scrollX='400px',
scrollY='600px'),
selection = 'none')
})
temp.to.res <- reactive({
res$data <- temp$data
res$filePath <- temp$filePath
res$fileEnding <- temp$fileEnding
res$variableTypes <- init.variableTypes(res)
res$classified <- init.classified(res)
res$monitor <- init.monitor(res)
res$varNames <- init.varNames(res)
#temp$data <- NULL
updateTabItems(session, "sidebarmenu", "panelIdDefine2")
print(temp$data)
})
output$preload.xlsx <- renderUI({req(temp$data)
actionButton("actionButton.preload.xlsx","Accept Data!",icon = icon("file-upload"),width = "100%", style = btn.style.preload)})
output$preload.csv <- renderUI({req(temp$data)
actionButton("actionButton.preload.csv","Accept Data!",icon = icon("file-upload"),width = "100%", style = btn.style.preload)})
output$preload.base <- renderUI({req(temp$data)
actionButton("actionButton.preload.base","Accept Data!",icon = icon("file-upload"),width = "100%", style = btn.style.preload)})
output$preload.RNG <- renderUI({req(temp$data)
actionButton("actionButton.preload.RNG","Accept Data!",icon = icon("file-upload"),width = "100%", style = btn.style.preload)})
observeEvent(input$actionButton.preload.xlsx,{
temp.to.res()
})
observeEvent(input$actionButton.preload.csv,{
temp.to.res()
})
observeEvent(input$actionButton.preload.base,{
temp.to.res()
})
observeEvent(input$actionButton.preload.RNG,{
temp.to.res()
})
nextTab <- function(input.tabs,session){
newtab <- switch(input.tabs,
"panelSubIdxlsx" = "panelIdDefine2",
"panelSubIdcsv" = "panelIdDefine2",
"john" = "panelIdDefine2",
"RDatabase" = "panelIdDefine2",
"panelSubIdRNG" = "panelIdDefine2",
"panelIdDefine2"= "panelIdExplore2",
"panelIdExplore2" = "panelIdOverview",
"panelIdOverview" = "panelIdEvaluation"
)
updateTabItems(session, "sidebarmenu", newtab)
}
reloadData <- function(){}
prevTab <- function(input.tabs,session){
newtab <- switch(input.tabs,
# "panelSubIdxlsx" = "panelIdDefine2",
# "panelSubIdcsv" = "panelIdDefine2",
# "john" = "panelIdDefine2",
# "RDatabase" = "panelIdDefine2",
# "panelSubIdRNG" = "panelIdDefine2",
# "panelIdDefine2"= "panelIdExplore2",
"panelIdExplore2" = "panelIdDefine2",
"panelIdOverview" = "panelIdExplore2",
"panelIdEvaluation" = "panelIdOverview"
)
updateTabItems(session, "sidebarmenu", newtab)
}
observeEvent(input$btn_nextTab, {
nextTab(input$sidebarmenu,session)
})
observeEvent(input$btn_prevTab, {
prevTab(input$sidebarmenu,session)
})
#============================================================= CSV OUtPUT ====
output$contents.csv <- DT::renderDataTable({
if (show.events) print("output$contents.csv")
# input$file.csv will be NULL initially. After the user selects
# and uploads a file, head of that data file by default,
# or all rows if selected, will be shown.
req(input$file.csv)
# when reading semicolon separated files,
# having a comma separator causes `read.csv` to error
tryCatch(
{
temp$filePath <- input$file.csv$datapath
print(str(input$file.csv))
temp$fileEnding <- ".csv"
#### implementing for missings
temp$data <- read.csv(input$file.csv$datapath,
header = input$header,
sep = input$sep,
quote = input$quote)
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
return(temp.table.simple())
})
#============================================================= XLSX OUtPUT ====
output$contents.xlsx <- DT::renderDataTable({
if (show.events) print("output$contents.xlsx")
req(input$file.xlsx)
tryCatch(
{
sheet <- input$select.sheet
temp$filePath <- input$file.xlsx$datapath
temp$fileEnding <- ".xlsx"
temp$data <- data.frame(xlsx::read.xlsx(input$file.xlsx$datapath,sheetIndex = sheet))
#print(res$variableTypes)
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
return(temp.table.simple())
})
# interactive selector for sheets
output$selectInput.sheet.xlsx <- renderUI({
req(input$file.xlsx)
gdata::sheetNames(input$file.xlsx$datapath)
selectInput("select.sheet", "Select Sheet",
choices = gdata::sheetNames(input$file.xlsx$datapath),
selected = 1)
})
#============================================================= RDat OUtPUT ====
output$contents.RDat <- DT::renderDataTable({
if (show.events) print("output$contents.RDat")
req(input$inputRDat)
tryCatch(
{
temp$data <- get(input$inputRDat)
temp$filePath <- input$inputRDat
temp$fileEnding <- ".RData"
# temp$variableTypes <- init.variableTypes(res)
# temp$classified <- init.classified(res)
# temp$varNames <- init.varNames(res)
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
return(temp.table.simple())
})
#============================================================= RNG OUtPUT ====
output$contents.RNG <- DT::renderDataTable({
req(input$nVariables,
input$nPatients)
if (show.events) print("output$contents.RNG")
if (is.null(temp$data)){
res$rngString <- c()
temp$data <- data.frame(ID = 1:input$nPatients)
}
if (!is.null(temp$data)){
if (input$nVariables > NCOL(temp$data)){
nAddVars <- input$nVariables-NCOL(temp$data)
res$rngString <- c(res$rngString,create.RNG.String(nAddVars))
temp$data <- add.RNG.Variable(nAddVars,temp$data,res$rngString)
}
if (input$nVariables < NCOL(temp$data)){
nRmVars <- input$nVariables-NCOL(temp$data)
res$rngString <- res$rngString[-(input$nVariables:NCOL(temp$data))]
temp$data <- rm.RNG.Variable(nRmVars,temp$data)
}
if (input$nPatients > NROW(temp$data)){
nAddObs <- input$nPatients-NROW(temp$data)
temp$data <- add.RNG.Obs(nAddObs,temp$data,res$rngString)
}
if (input$nPatients < NROW(temp$data)){
nRmObs <- NROW(temp$data) - input$nPatients
temp$data <- rm.RNG.Obs(nRmObs,temp$data)
}
}
temp$filePath <- "randomData"
temp$fileEnding <- ".RData"
# return( DT::datatable(temp$data,
# class = 'cell-border stripe',
# options = list(pageLength = 10,
# #autoWidth = TRUE,
# server = T,
# scrollX='400px'))
# )
return(temp.table.simple())
})
observeEvent(input$saveDataRNG,{
if (show.events) print("input$saveDataRNG")
res$data <- data.frame(res$temp)
res$variableTypes <- init.variableTypes(res)
res$classified <- init.classified(res)
res$varNames <- init.varNames(res)
})
observeEvent(input$addHazard,{
if (show.events) print("observeEvent(input$addHazard)")
res$temp <- add.Hazard(res$temp)
})
## nothing Placeholder
myOptions <- reactive({
if (show.events) print("myOptions")
list(
page=ifelse(input$pageable==TRUE,'enable','disable'),
pageSize=input$pagesize,
width=550
)
})
#----------------------------------------------------------------- 2 Define Redefine ----
previousPage <- NULL
# collapsable information boxes
observeEvent(input$infoBox_numeric_titleId, {
js$collapse("infoBox_numeric")
})
observeEvent(input$infoBox_factor_titleId, {
js$collapse("infoBox_factor")
})
observeEvent(input$infoBox_ordered_titleId, {
js$collapse("infoBox_ordered")
})
observeEvent(input$infoBox_integer_titleId, {
js$collapse("infoBox_integer")
})
observeEvent(input$infoBox_Date_titleId, {
js$collapse("infoBox_Date")
})
observeEvent(input$infoBox_Date_titleId, {
js$collapse("infoBox_none")
})
output$MainBody=renderUI({
req(res$data)
box(width=12,
DT::dataTableOutput("data"),
# add a progressbar
div(paste0("Variables defined: ",sum(res$variableTypes$set!=0),"/",NCOL(res$data))),
prgoressBar(sum(res$variableTypes$set!=0)/NCOL(res$data)*100, color = "green", striped = TRUE, active = TRUE, size = "sm")
# textOutput('myText')
)})
# myValue <- reactiveValues(employee = '')
# output$myText <- renderText({
# myValue$employee
# })
print.res <- function(){
for(i in names(res)){
print(paste("------------------------------------",i,"------------------------------------"))
print(res[[i]])
}
}
excel.LETTERS <- function(x){
out <- c()
for(i in x){
tmp = c()
while(i>0){
tmp <- c(LETTERS[((i-1) %% 26)+1],tmp)
i <- floor(i/26)
}
out <- c(out,paste0(tmp,collapse = ""))
}
return(out)
}
# renders the class selection table
output$data <- DT::renderDataTable(
{
req(res$varNames)
DT::datatable(
# print(res$varNames)
res$varNames %>%
`colnames<-`(letters[1:NCOL(res$varNames)]),
# server = F,
escape = F,
selection = 'none',
extensions = "FixedColumns",
options = list(#scrollY = '400px',
scrollX = TRUE,
# paging = F,
displayStart = defineVariables_previousPage,
fixedColumns = list(leftColumns = 2)))
}
)
# tooltip = tooltipOptions(title = "Click to see inputs !")
dataModal <- function(id) {
selectedCol <- as.numeric(strsplit(id, "_")[[1]][2])
selectedRow <- as.numeric(strsplit(id, "_")[[1]][3])
modalDialog(
h3("Selected column:",selectedRow,"(Excel:",excel.LETTERS(selectedRow),")"),
#plotOutput("plot")
column(6,
h5("some statistics"),
h5("number of rows"),
h5("number of empty entries"),
h5("number of unique levels"),
h5("number of empty cells")
),
column(6,
DT::dataTableOutput("variableTable"),
),
footer = fluidRow(
column(12,align="center",
modalButton("Cancel")
)
)
,
easyClose = T
)
}
output$plot = renderPlot(plot(res$data[,4]))
output$variableTable = DT::renderDataTable({
req(res$data)
selectedRow <- as.numeric(strsplit(input$select_check, "_")[[1]][3])
res$data[,selectedRow,drop=F]
},options = list(scrollY ='400px',paging = F))
observeEvent(input$select_check, {
showModal(dataModal(input$select_check))
})
#### what does this
observeEvent(input$show,{
print("#================================================================================#")
print(res)
print("res$classified--------------------------------------------------------#")
try(print(head(res$classified)))
print("res$variableTypes----------------------------------------------------#")
try(print(res$variableTypes))
print("res$n-----------------------------------------------------------------#")
try(print(res$n))
print("res$monitor-----------------------------------------------------------------#")
try(print(res$monitor))
# try(print(res$varNames))
#showModal(dataModal("id_1_1_123"))
})
observeEvent(input$select_button, {
print("input$select_button")
# print(input$select_button)
# print(res$varNames)
defineVariables_previousPage <<- input$data_rows_current[1] - 1
print(defineVariables_previousPage)
selectedRow <- as.numeric(strsplit(input$select_button, "_")[[1]][3])
selectedCol <- as.numeric(strsplit(input$select_button, "_")[[1]][2])
# print(res$varNames)
# print(selectedRow)
# print(selectedCol)
if (res$variableTypes$set[selectedRow]!=selectedCol){
res$varNames$`.`[selectedRow] <<- '<i class="far fa-check-circle"></i>'
res$variableTypes$set[selectedRow] = selectedCol
progress_defineVar$set(sum(res$variableTypes$set!=0)/NCOL(res$data), detail = paste("selected",selectedRow))
res$varNames <- toggleSelected(res$varNames,selectedCol,selectedRow,nCol = 6)
res$classified[,selectedRow] <- res$classified[,selectedRow] +
10 * !is.non(res$data[,selectedRow],"varName!",names(def.varNames.buttons)[res$variableTypes$set[selectedRow]])
if (selectedCol == 6){
res$variableTypes$hasMonitor[selectedRow] = NA
} else {
res$variableTypes$hasMonitor[selectedRow] = FALSE
}
### if direct switch reset monitor!!!???xxx
res$variableTypes$nMissing[selectedRow] <- sum(is.missing(res$data[,selectedRow]))
res$variableTypes$nInClass[selectedRow] <- sum(is.non(res$data[,selectedRow],"varName!",names(def.varNames.buttons)[res$variableTypes$set[selectedRow]]))
res$variableTypes$nInMonitor[selectedRow] <- NA
} else {
res$varNames$`.`[selectedRow] <<- '<i class="fa fa-times-circle"></i>'
res$variableTypes$set[selectedRow] = 0
progress_defineVar$set(sum(res$variableTypes$set!=0)/NCOL(res$data), detail = paste("deselected",selectedRow))
res$varNames <- unToggle(res$varNames,selectedRow,nCol = 6)
## Check if monitor is still active! xxx!!!???
res$variableTypes$hasMonitor[selectedRow] = FALSE
res$classified[,selectedRow] <- res$classified[,selectedRow] %% 10
res$variableTypes$hasMonitor[selectedRow] = FALSE
res$variableTypes$nMissing[selectedRow] <- NA
res$variableTypes$nInClass[selectedRow] <- NA
res$variableTypes$nInMonitor[selectedRow] <- NA
}
print("names(def.varNames.buttons)[res$variableTypes$set]")
#print((names(def.varNames.buttons)[res$variableTypes$set])[selectedRow])
#print(res$variableTypes)
})
output$typeInfo_numbers <- renderUI({
print("got me")
HTML(paste0("<font color=\"",get.typeColor("numeric"),"\"><b> numbers</b></font>: everything measurable or countable e.g. days spend in hospital,
number of relapses, age, weight."))
print(
paste0("<font color=\"",get.typeColor("numeric"),"\"><b> numbers</b></font>: everything measurable or countable e.g. days spend in hospital,
number of relapses, age, weight."))
})
output$typeInfo_integers <- renderUI({tagList(HTML(paste0("<font color=\"#556677\"><b>integers</b></font>: just a subclass of numeric with only hole numbers."))) })
output$typeInfo_dates <- renderUI({paste0("<font color=\"",get.typeColor("Date"),"\"><b>dates</b></font>: hard to identify for a computer due to many different formats e.g.
february/1/2010, 1-feb-2010, 01.02.2010, 4533454.") })
output$typeInfo_strings <- renderUI({paste0("<font color=\"",get.typeColor("character"),"\"><b>strings</b></font>: names and basically everything which is not a date or a number.
This might be due to special symbols e.g. > <= % ( ] or different/wrong
formatting.") })
output$typeInfo_factors <- renderUI({paste0("<font color=\"",get.typeColor("factor"),"\"><b>factors</b></font>: Explain me!!!.") })
#----------------------------------------------------------------- 3a. Explore ----
observeEvent(input$infoBox_monitor_titleId, {
js$collapse("infoBox_monitor")
})
output$explore.progress <- renderUI( {
req(res$variableTypes)
list(
div(paste0("monitors defined: ",sum(res$variableTypes$hasMonitor!=0,na.rm = T),"/",sum(!is.na(res$variableTypes$hasMonitor)))),
prgoressBar(sum(res$variableTypes$hasMonitor!=0,na.rm = T)/sum(!is.na(res$variableTypes$hasMonitor))*100, color = "warning", striped = TRUE, active = TRUE, size = "sm")
)
# dynamicUI.explore.progress()
})
output$exploreVarNames <- DT::renderDataTable(
{
req(res$data)
DT::datatable(
data.frame(variable = colnames(res$data),
type=setToType(res$variableTypes$set),
monitor = res$variableTypes$hasMonitor, # set TRUE/FALSE
nMissing = ifelse(is.na(res$variableTypes$nMissing),NA,
sprintf("%i of %i",res$variableTypes$nMissing,NROW(res$data))),
nWrongClass = ifelse(is.na(res$variableTypes$nInClass),NA,
sprintf("%i of %i",res$variableTypes$nInClass,NROW(res$data)-res$variableTypes$nMissing)),
nNotInMonitor = ifelse(is.na(res$variableTypes$nInMonitor),NA,
sprintf("%i of %i",NROW(res$data)-res$variableTypes$nMissing-res$variableTypes$nInClass-res$variableTypes$nInMonitor,
NROW(res$data)-res$variableTypes$nMissing-res$variableTypes$nInClass))
),
extensions = "FixedColumns",
selection = list(mode = "single", target = "row", selected = exploreVariables_previousSelection),
options = list(#scrollY = '400px',
scrollX = TRUE,
# paging = F,
displayStart = exploreVariables_previousPage
#fixedColumns = list(leftColumns = 2)))
))
}
#extensions = "FixedColumns",
)
observeEvent(input$exploreVarNames_rows_selected, {
exploreVariables_previousPage <<- input$exploreVarNames_rows_current[1] - 1
exploreVariables_previousSelection <<- input$exploreVarNames_rows_selected
print("res$variableTypes")
#print(res$variableTypes)
print("names(def.varNames.buttons)[res$variableTypes$set]")
#print(names(def.varNames.buttons)[res$variableTypes$set])
print("input$exploreVarNames_rows_selected")
#print(input$exploreVarNames_rows_selected)
})
output$plot.tmp <- renderPlotly({
req(input$exploreVarNames_rows_selected)
dta <- data.frame(1:NROW(res$data),res$data[,input$exploreVarNames_rows_selected])
colnames(dta) <- c("row.nr.",names(res$data)[input$exploreVarNames_rows_selected])
p <- ggplot(data=dta,aes_string(x="row.nr.",y=names(res$data)[input$exploreVarNames_rows_selected])) +
geom_point() + ylab("a")
ggplotly(p)
})
observeEvent(input$accept_monitor,{
print(input$exploreVarNames_rows_selected)
# update res$variableTypes$hasMonitor
res$variableTypes$hasMonitor[input$exploreVarNames_rows_selected] = TRUE
# generate actual monitor
if (res$variableTypes$set[input$exploreVarNames_rows_selected] == 1){
# update monitor
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$decimal = input$decimal
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$minimum =
ifelse(is.na(input$min_numeric),-Inf,input$min_numeric)
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$maximum =
ifelse(is.na(input$max_numeric),Inf,input$max_numeric)
# print(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]])
# update calssified
# need to unclassifie if change of class!
res$classified[,input$exploreVarNames_rows_selected] <-
res$classified[,input$exploreVarNames_rows_selected] %% 100 + 100 *
(res$data[,input$exploreVarNames_rows_selected] > res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$minimum) *
(res$data[,input$exploreVarNames_rows_selected] < res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$maximum)
## update nNotInMonitor
res$variableTypes$nInMonitor[input$exploreVarNames_rows_selected] <-
sum(floor(res$classified[,input$exploreVarNames_rows_selected] / 100))
}
if (res$variableTypes$set[input$exploreVarNames_rows_selected] == 2){
# update monitor
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$minimum =
ifelse(is.na(input$min_integer),-Inf,input$min_integer)
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$maximum =
ifelse(is.na(input$max_integer),Inf,input$max_integer)
# print(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]])
# update calssified
# need to unclassifie if change of class!
res$classified[,input$exploreVarNames_rows_selected] <-
res$classified[,input$exploreVarNames_rows_selected] %% 100 + 100 *
(res$data[,input$exploreVarNames_rows_selected] > res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$minimum) *
(res$data[,input$exploreVarNames_rows_selected] < res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$maximum)
## update nNotInMonitor
res$variableTypes$nInMonitor[input$exploreVarNames_rows_selected] <-
sum(floor(res$classified[,input$exploreVarNames_rows_selected] / 100))
}
if (res$variableTypes$set[input$exploreVarNames_rows_selected] == 3){
# update monitor
# print("################################-------##############################################")
# print(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]])
# print( res$data[,input$exploreVarNames_rows_selected])
# print( res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels)
# update calssified
res$classified[,input$exploreVarNames_rows_selected] <-
res$classified[,input$exploreVarNames_rows_selected] %% 100 + 100 *
res$data[,input$exploreVarNames_rows_selected] %in%
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels
# update nNotInMonitor
res$variableTypes$nInMonitor[input$exploreVarNames_rows_selected] <-
sum(floor(res$classified[,input$exploreVarNames_rows_selected] / 100))
}
if (res$variableTypes$set[input$exploreVarNames_rows_selected] == 4){
# update monitor
# update calssified
# update nNotInMonitor
}
if (res$variableTypes$set[input$exploreVarNames_rows_selected] == 5){
# update monitor
# update calssified
# update nNotInMonitor
}
})
output$out1 <- renderPrint(head(res$data[!is.missing(res$data[,input$exploreVarNames_rows_selected]),input$exploreVarNames_rows_selected],5))
observeEvent(input$acceptLevels,{
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels <-
c(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels,
input$selectInput_defineLevels)
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$declinedLevels <-
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$declinedLevels[
-which(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$declinedLevels %in%
input$selectInput_defineLevels )]
})
observeEvent(input$removeLevels,{
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$declinedLevels <-
c(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$declinedLevels,
input$selectInput_acceptedLevels)
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels <-
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels[
-which(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels %in%
input$selectInput_acceptedLevels )]
})
explore.rightPanel <- function(set){
if (set==0){
return(
out <- list(h4("If you don't know the type, there is nothing to monitor!"))
)
}
if (set==1){ # numeric
out <- list(
fluidRow(
column(12,
tags$head(
tags$style(
HTML("label{float:left;}")
)
),
radioButtons("decimal",HTML("decimal separator:",HTML(' ')),c(". (dot)",", (comma)"),inline = F)
)
),
fluidRow(
column(6,numericInput("min_numeric","minimum",ifelse(
res$variableTypes$hasMonitor[input$exploreVarNames_rows_selected]==TRUE,
res$monitor[[input$exploreVarNames_rows_selected]]$minimum,-Inf
))),
column(6,numericInput("max_numeric","maximum",ifelse(
res$variableTypes$hasMonitor[input$exploreVarNames_rows_selected]==TRUE,
res$monitor[[input$exploreVarNames_rows_selected]]$maximum,Inf
))),
column(12,h5("If there is no limit, enter nothing."))
)
)
}
if (set==2){ # integer
out <- list(
column(6,numericInput("min_integer","minimum",ifelse(
res$variableTypes$hasMonitor[input$exploreVarNames_rows_selected]==TRUE,
res$monitor[[input$exploreVarNames_rows_selected]]$minimum,-Inf
),)),
column(6,numericInput("max_integer","maximum",ifelse(
res$variableTypes$hasMonitor[input$exploreVarNames_rows_selected]==TRUE,
res$monitor[[input$exploreVarNames_rows_selected]]$maximum,Inf
))),
column(12,h5("If there is no limit, enter nothing."))
)
}
if (set==3){ # categorial
if (is.null(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels) &
is.null(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$declinedLevels)){
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$declinedLevels <-
unique(res$data[!is.missing(res$data[,input$exploreVarNames_rows_selected]),
input$exploreVarNames_rows_selected])
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels <- NULL
}
out <- list(
h4("Define the correct levels"),
# checkboxGroupInput('in3', NULL, unique(res$data[,input$exploreVarNames_rows_selected]))
selectInput('selectInput_defineLevels', "All levels:",
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$declinedLevels,
multiple=TRUE, selectize=FALSE),
fluidRow(
column(6,
actionButton("acceptLevels","Accept",icon = icon("angle-down"),width = "100%")
),
column(6,
actionButton("removeLevels","Remove",icon = icon("angle-up"),width = "100%")
)),
selectInput('selectInput_acceptedLevels', "Correct levels:",
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels,
multiple=TRUE, selectize=FALSE)
)
}
if (set==4){ # ordered factor
out <- list(h3("Define the correct classes"),
h3("And order."),
##sortable is the shit xxx???!!!
###library(sortable)
orderInput("a","a",unique(res$data[!is.missing(res$data[,input$exploreVarNames_rows_selected]),
input$exploreVarNames_rows_selected]),
connect ="b"),
orderInput("b","b",NULL,connect = "a")
)
}
if (set==5){ # date
out <- list(
h5("The first five dates are recognized as follows:"),
verbatimTextOutput('out1'),
h5("Define date format. If they are only numbers e.g. 492933 then write: dddd and define a Origin"),
textInput("textinput","date format","e.g. 01.02.1988 -> dd.mm.yyyy or 08/14/70 -> mm/dd/yy"),
h5("Some software saves dates by a different origin. check your dates and consider updating the origin."),
dateInput("origin","Origin:", "1900-01-01")
)
}
if (set==6){
return(
out <- list(h4("No structure -> nothing to monitor!"))
)
}
return(
append(
out,
list(
tags$hr(),
column(12,
actionButton("accept_monitor","Accept Monitor!",width = "100%",style = btn.style.preload)
)
)
)
)
}
dynamicUI.explore.rightPanel <- reactive({
#if (1==1)
req(input$exploreVarNames_rows_selected)
return(
#append(
# list(h5(paste("rowSelected:",input$exploreVarNames_rows_selected)),
# h5(paste("setSelected:",res$variableTypes$set[input$exploreVarNames_rows_selected]))),
explore.rightPanel(res$variableTypes$set[input$exploreVarNames_rows_selected])
#)
)
})
output$explore.sidePanel <- renderUI( {
req(res$data)
if (is.null(input$exploreVarNames_rows_selected)){
h5("Select a row in the overview-table!")
} else{
dynamicUI.explore.rightPanel()
}
})
#----------------------------------------------------------------- 4. Overview ----
output$complete.Obs <- renderPlot({
if (show.events) print("output$complete.Obs")
req(res$data)
return(plot.complete(res$data,res$variableTypes))
})
# colors have problems
output$Overview.out.right.dist <- renderPlot({
if (show.events) print("output$Overview.out.right.dist")
req(res$data)
df <- res$variableTypes %>%
mutate(newest = get.newestTypes(.),
col = get.typeColor(newest))
collist <- df[!duplicated(df$newest),] %>%
mutate(col = get.typeColor(newest))
col = collist$col
names(col) = collist$newest
df %>%
ggplot(aes(x="newest",fill=newest)) + geom_bar(stat = "count") + #,fill = unique(get.typesDf(res$data)$col)) +
scale_fill_manual(values = col) +
coord_polar("y", start=0) + theme_void()+
theme(axis.text.x=element_blank())
})
output$Overview.out.left.verbatim <- renderPrint({
if (show.events) print("output$Overview.out.left.verbatim")
req(res$data)
list(nCol = NCOL(res$data),
nRow = NROW(res$data),
nComplete = sum(complete.cases(res$data)),
nMissing = sum(is.na(res$data)),
fileName = "Not yet implemented!",
types = get.typesDf(res$data))
})
#----------------------------------------------------------------- 5. Evaluation (Interactive Histogram) ----
## nothing Placeholder
dynamicUi <- reactive({
return(
list(
sliderInput("obs",
"Number of observations:",
min = 1,
max = 1000,
value = res$n),
actionButton("action","Add somthing!",width = 120)
)
)
})
## nothing Placeholder
output$sidePanel <- renderUI( {
dynamicUi()
})
## nothing Placeholder
output$distPlot <- renderPlot({
# generate an rnorm distribution and plot it
dist <- rnorm(res$n)
hist(dist)
})
## nothing Placeholder
observeEvent(input$action,{
print(ls())
if (show.events) print("input$action")
res$n <- res$n + 1
})
observeEvent(input$saveRes,{
x <- reactiveValuesToList(res,all.names = TRUE)
x <- reactiveValuesToList(res,all.names = TRUE)
x <- reactiveValuesToList(res,all.names = TRUE)
str(x)
resSaveName <- "res.RData"
save(x,file = resSaveName)
print(paste0("res saved in ",resSaveName,"."))
})
observeEvent(input$obs,{
print("-------------------------------- env.server ------------------------------")
try(print(get.memory.allocation(env.server)))
print("-------------------------------- .GlobalEnv --------------------------------")
try(print(get.memory.allocation(.GlobalEnv)))
#n <<- rnorm(1000000)
#print(object.size())
if (show.events) print("input$obs")
res$n <- input$obs
})
#----------------------------------------------------------------- entwickler = T ----
if (entwickler){
env.server <- environment()
env <- .GlobalEnv
dynamicUi <- reactive({
return(
list(
actionButton("showMemory","print memory in console"),
tableOutput('fara'),
tableOutput('foo'),
tableOutput('fobs')
)
)
})
output$memoryUsage <- renderUI( {
dynamicUi()
})
observeEvent(input$showMemory,{
print("-------------------------------- env.server ------------------------------")
try(print(get.memory.allocation(env.server)))
print("-------------------------------- .GlobalEnv --------------------------------")
try(print(get.memory.allocation(.GlobalEnv)))
})
output$foo <- renderTable({
get.memory.allocation(env.server)
})
output$fara <- renderTable(get.memory.allocation(env.server,detail = F))
output$fobs <- renderTable(gc())
}
})
| /server.R | no_license | ChristophAnten/CleanDataApp | R | false | false | 36,479 | r | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
btn.style.preload <- "color: #fff; background-color: #666666; border-color: #999999"
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
#----------------------------------------------------------------- Basic ----
source("functionsInApp.R")
options(stringsAsFactors = FALSE)
# actually render the dropdownMenu PROGRESS
output$menu <- renderMenu({
dropdownMenu(
type = "tasks", badgeStatus = "success",
taskItem(value = 20, color = "red",
"read information"
),
taskItem(value = 50, color = "orange",
"upload Data"
),
taskItem(value = sum(res$variableTypes$set!=0)/NCOL(res$data)*100, color = "green",
"defined variables"
),
taskItem(value = 90, color = "green",
"clean data"
)
)
})
## Shows certain activations in the console
show.events = F
entwickler = F
## Init. of the reactive values
abc <- reactiveValues(selectedTab = numeric())
temp <- reactiveValues()
res <- reactiveValues(n = 106)
res$temp <- NULL
res$filePath <- "/noPath"
res$fileEnding <- ".noEnding"
exploreVariables_previousSelection <- NULL
exploreVariables_previousPage <- NULL
defineVariables_previousPage <- NULL
# Create a Progress object
progress_defineVar <- shiny::Progress$new()
# Make sure it closes when we exit this reactive, even if there's an error
#on.exit(progress_defineVar$close())
progress_defineVar$set(message = "Define/redefine Variables: ", value = 0)
#----------------------------------------------------------------- 0. Information ----
output$link.Pseudonomisierung <- renderUI({
h5("This process is called pseudonymisation. Find it out",a("here",
href="https://en.wikipedia.org/wiki/ABCD",
target="_blank"))
})
#----------------------------------------------------------------- 1. Get data ----
temp.table.simple <- reactive({
DT::datatable(temp$data,
class = 'cell-border stripe',
options = list(paging = F,
#pageLength = 10,
#autoWidth = TRUE,
server = T,
scrollX='400px',
scrollY='600px'),
selection = 'none')
})
temp.to.res <- reactive({
res$data <- temp$data
res$filePath <- temp$filePath
res$fileEnding <- temp$fileEnding
res$variableTypes <- init.variableTypes(res)
res$classified <- init.classified(res)
res$monitor <- init.monitor(res)
res$varNames <- init.varNames(res)
#temp$data <- NULL
updateTabItems(session, "sidebarmenu", "panelIdDefine2")
print(temp$data)
})
output$preload.xlsx <- renderUI({req(temp$data)
actionButton("actionButton.preload.xlsx","Accept Data!",icon = icon("file-upload"),width = "100%", style = btn.style.preload)})
output$preload.csv <- renderUI({req(temp$data)
actionButton("actionButton.preload.csv","Accept Data!",icon = icon("file-upload"),width = "100%", style = btn.style.preload)})
output$preload.base <- renderUI({req(temp$data)
actionButton("actionButton.preload.base","Accept Data!",icon = icon("file-upload"),width = "100%", style = btn.style.preload)})
output$preload.RNG <- renderUI({req(temp$data)
actionButton("actionButton.preload.RNG","Accept Data!",icon = icon("file-upload"),width = "100%", style = btn.style.preload)})
observeEvent(input$actionButton.preload.xlsx,{
temp.to.res()
})
observeEvent(input$actionButton.preload.csv,{
temp.to.res()
})
observeEvent(input$actionButton.preload.base,{
temp.to.res()
})
observeEvent(input$actionButton.preload.RNG,{
temp.to.res()
})
nextTab <- function(input.tabs,session){
newtab <- switch(input.tabs,
"panelSubIdxlsx" = "panelIdDefine2",
"panelSubIdcsv" = "panelIdDefine2",
"john" = "panelIdDefine2",
"RDatabase" = "panelIdDefine2",
"panelSubIdRNG" = "panelIdDefine2",
"panelIdDefine2"= "panelIdExplore2",
"panelIdExplore2" = "panelIdOverview",
"panelIdOverview" = "panelIdEvaluation"
)
updateTabItems(session, "sidebarmenu", newtab)
}
reloadData <- function(){}
prevTab <- function(input.tabs,session){
newtab <- switch(input.tabs,
# "panelSubIdxlsx" = "panelIdDefine2",
# "panelSubIdcsv" = "panelIdDefine2",
# "john" = "panelIdDefine2",
# "RDatabase" = "panelIdDefine2",
# "panelSubIdRNG" = "panelIdDefine2",
# "panelIdDefine2"= "panelIdExplore2",
"panelIdExplore2" = "panelIdDefine2",
"panelIdOverview" = "panelIdExplore2",
"panelIdEvaluation" = "panelIdOverview"
)
updateTabItems(session, "sidebarmenu", newtab)
}
observeEvent(input$btn_nextTab, {
nextTab(input$sidebarmenu,session)
})
observeEvent(input$btn_prevTab, {
prevTab(input$sidebarmenu,session)
})
#============================================================= CSV OUtPUT ====
output$contents.csv <- DT::renderDataTable({
if (show.events) print("output$contents.csv")
# input$file.csv will be NULL initially. After the user selects
# and uploads a file, head of that data file by default,
# or all rows if selected, will be shown.
req(input$file.csv)
# when reading semicolon separated files,
# having a comma separator causes `read.csv` to error
tryCatch(
{
temp$filePath <- input$file.csv$datapath
print(str(input$file.csv))
temp$fileEnding <- ".csv"
#### implementing for missings
temp$data <- read.csv(input$file.csv$datapath,
header = input$header,
sep = input$sep,
quote = input$quote)
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
return(temp.table.simple())
})
#============================================================= XLSX OUtPUT ====
output$contents.xlsx <- DT::renderDataTable({
if (show.events) print("output$contents.xlsx")
req(input$file.xlsx)
tryCatch(
{
sheet <- input$select.sheet
temp$filePath <- input$file.xlsx$datapath
temp$fileEnding <- ".xlsx"
temp$data <- data.frame(xlsx::read.xlsx(input$file.xlsx$datapath,sheetIndex = sheet))
#print(res$variableTypes)
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
return(temp.table.simple())
})
# interactive selector for sheets
output$selectInput.sheet.xlsx <- renderUI({
req(input$file.xlsx)
gdata::sheetNames(input$file.xlsx$datapath)
selectInput("select.sheet", "Select Sheet",
choices = gdata::sheetNames(input$file.xlsx$datapath),
selected = 1)
})
#============================================================= RDat OUtPUT ====
output$contents.RDat <- DT::renderDataTable({
if (show.events) print("output$contents.RDat")
req(input$inputRDat)
tryCatch(
{
temp$data <- get(input$inputRDat)
temp$filePath <- input$inputRDat
temp$fileEnding <- ".RData"
# temp$variableTypes <- init.variableTypes(res)
# temp$classified <- init.classified(res)
# temp$varNames <- init.varNames(res)
},
error = function(e) {
# return a safeError if a parsing error occurs
stop(safeError(e))
}
)
return(temp.table.simple())
})
#============================================================= RNG OUtPUT ====
output$contents.RNG <- DT::renderDataTable({
req(input$nVariables,
input$nPatients)
if (show.events) print("output$contents.RNG")
if (is.null(temp$data)){
res$rngString <- c()
temp$data <- data.frame(ID = 1:input$nPatients)
}
if (!is.null(temp$data)){
if (input$nVariables > NCOL(temp$data)){
nAddVars <- input$nVariables-NCOL(temp$data)
res$rngString <- c(res$rngString,create.RNG.String(nAddVars))
temp$data <- add.RNG.Variable(nAddVars,temp$data,res$rngString)
}
if (input$nVariables < NCOL(temp$data)){
nRmVars <- input$nVariables-NCOL(temp$data)
res$rngString <- res$rngString[-(input$nVariables:NCOL(temp$data))]
temp$data <- rm.RNG.Variable(nRmVars,temp$data)
}
if (input$nPatients > NROW(temp$data)){
nAddObs <- input$nPatients-NROW(temp$data)
temp$data <- add.RNG.Obs(nAddObs,temp$data,res$rngString)
}
if (input$nPatients < NROW(temp$data)){
nRmObs <- NROW(temp$data) - input$nPatients
temp$data <- rm.RNG.Obs(nRmObs,temp$data)
}
}
temp$filePath <- "randomData"
temp$fileEnding <- ".RData"
# return( DT::datatable(temp$data,
# class = 'cell-border stripe',
# options = list(pageLength = 10,
# #autoWidth = TRUE,
# server = T,
# scrollX='400px'))
# )
return(temp.table.simple())
})
observeEvent(input$saveDataRNG,{
if (show.events) print("input$saveDataRNG")
res$data <- data.frame(res$temp)
res$variableTypes <- init.variableTypes(res)
res$classified <- init.classified(res)
res$varNames <- init.varNames(res)
})
observeEvent(input$addHazard,{
if (show.events) print("observeEvent(input$addHazard)")
res$temp <- add.Hazard(res$temp)
})
## nothing Placeholder
myOptions <- reactive({
if (show.events) print("myOptions")
list(
page=ifelse(input$pageable==TRUE,'enable','disable'),
pageSize=input$pagesize,
width=550
)
})
#----------------------------------------------------------------- 2 Define Redefine ----
previousPage <- NULL
# collapsable information boxes
observeEvent(input$infoBox_numeric_titleId, {
js$collapse("infoBox_numeric")
})
observeEvent(input$infoBox_factor_titleId, {
js$collapse("infoBox_factor")
})
observeEvent(input$infoBox_ordered_titleId, {
js$collapse("infoBox_ordered")
})
observeEvent(input$infoBox_integer_titleId, {
js$collapse("infoBox_integer")
})
observeEvent(input$infoBox_Date_titleId, {
js$collapse("infoBox_Date")
})
observeEvent(input$infoBox_Date_titleId, {
js$collapse("infoBox_none")
})
output$MainBody=renderUI({
req(res$data)
box(width=12,
DT::dataTableOutput("data"),
# add a progressbar
div(paste0("Variables defined: ",sum(res$variableTypes$set!=0),"/",NCOL(res$data))),
prgoressBar(sum(res$variableTypes$set!=0)/NCOL(res$data)*100, color = "green", striped = TRUE, active = TRUE, size = "sm")
# textOutput('myText')
)})
# myValue <- reactiveValues(employee = '')
# output$myText <- renderText({
# myValue$employee
# })
print.res <- function(){
for(i in names(res)){
print(paste("------------------------------------",i,"------------------------------------"))
print(res[[i]])
}
}
excel.LETTERS <- function(x){
out <- c()
for(i in x){
tmp = c()
while(i>0){
tmp <- c(LETTERS[((i-1) %% 26)+1],tmp)
i <- floor(i/26)
}
out <- c(out,paste0(tmp,collapse = ""))
}
return(out)
}
# renders the class selection table
output$data <- DT::renderDataTable(
{
req(res$varNames)
DT::datatable(
# print(res$varNames)
res$varNames %>%
`colnames<-`(letters[1:NCOL(res$varNames)]),
# server = F,
escape = F,
selection = 'none',
extensions = "FixedColumns",
options = list(#scrollY = '400px',
scrollX = TRUE,
# paging = F,
displayStart = defineVariables_previousPage,
fixedColumns = list(leftColumns = 2)))
}
)
# tooltip = tooltipOptions(title = "Click to see inputs !")
dataModal <- function(id) {
selectedCol <- as.numeric(strsplit(id, "_")[[1]][2])
selectedRow <- as.numeric(strsplit(id, "_")[[1]][3])
modalDialog(
h3("Selected column:",selectedRow,"(Excel:",excel.LETTERS(selectedRow),")"),
#plotOutput("plot")
column(6,
h5("some statistics"),
h5("number of rows"),
h5("number of empty entries"),
h5("number of unique levels"),
h5("number of empty cells")
),
column(6,
DT::dataTableOutput("variableTable"),
),
footer = fluidRow(
column(12,align="center",
modalButton("Cancel")
)
)
,
easyClose = T
)
}
output$plot = renderPlot(plot(res$data[,4]))
output$variableTable = DT::renderDataTable({
req(res$data)
selectedRow <- as.numeric(strsplit(input$select_check, "_")[[1]][3])
res$data[,selectedRow,drop=F]
},options = list(scrollY ='400px',paging = F))
observeEvent(input$select_check, {
showModal(dataModal(input$select_check))
})
#### what does this
observeEvent(input$show,{
print("#================================================================================#")
print(res)
print("res$classified--------------------------------------------------------#")
try(print(head(res$classified)))
print("res$variableTypes----------------------------------------------------#")
try(print(res$variableTypes))
print("res$n-----------------------------------------------------------------#")
try(print(res$n))
print("res$monitor-----------------------------------------------------------------#")
try(print(res$monitor))
# try(print(res$varNames))
#showModal(dataModal("id_1_1_123"))
})
observeEvent(input$select_button, {
print("input$select_button")
# print(input$select_button)
# print(res$varNames)
defineVariables_previousPage <<- input$data_rows_current[1] - 1
print(defineVariables_previousPage)
selectedRow <- as.numeric(strsplit(input$select_button, "_")[[1]][3])
selectedCol <- as.numeric(strsplit(input$select_button, "_")[[1]][2])
# print(res$varNames)
# print(selectedRow)
# print(selectedCol)
if (res$variableTypes$set[selectedRow]!=selectedCol){
res$varNames$`.`[selectedRow] <<- '<i class="far fa-check-circle"></i>'
res$variableTypes$set[selectedRow] = selectedCol
progress_defineVar$set(sum(res$variableTypes$set!=0)/NCOL(res$data), detail = paste("selected",selectedRow))
res$varNames <- toggleSelected(res$varNames,selectedCol,selectedRow,nCol = 6)
res$classified[,selectedRow] <- res$classified[,selectedRow] +
10 * !is.non(res$data[,selectedRow],"varName!",names(def.varNames.buttons)[res$variableTypes$set[selectedRow]])
if (selectedCol == 6){
res$variableTypes$hasMonitor[selectedRow] = NA
} else {
res$variableTypes$hasMonitor[selectedRow] = FALSE
}
### if direct switch reset monitor!!!???xxx
res$variableTypes$nMissing[selectedRow] <- sum(is.missing(res$data[,selectedRow]))
res$variableTypes$nInClass[selectedRow] <- sum(is.non(res$data[,selectedRow],"varName!",names(def.varNames.buttons)[res$variableTypes$set[selectedRow]]))
res$variableTypes$nInMonitor[selectedRow] <- NA
} else {
res$varNames$`.`[selectedRow] <<- '<i class="fa fa-times-circle"></i>'
res$variableTypes$set[selectedRow] = 0
progress_defineVar$set(sum(res$variableTypes$set!=0)/NCOL(res$data), detail = paste("deselected",selectedRow))
res$varNames <- unToggle(res$varNames,selectedRow,nCol = 6)
## Check if monitor is still active! xxx!!!???
res$variableTypes$hasMonitor[selectedRow] = FALSE
res$classified[,selectedRow] <- res$classified[,selectedRow] %% 10
res$variableTypes$hasMonitor[selectedRow] = FALSE
res$variableTypes$nMissing[selectedRow] <- NA
res$variableTypes$nInClass[selectedRow] <- NA
res$variableTypes$nInMonitor[selectedRow] <- NA
}
print("names(def.varNames.buttons)[res$variableTypes$set]")
#print((names(def.varNames.buttons)[res$variableTypes$set])[selectedRow])
#print(res$variableTypes)
})
output$typeInfo_numbers <- renderUI({
print("got me")
HTML(paste0("<font color=\"",get.typeColor("numeric"),"\"><b> numbers</b></font>: everything measurable or countable e.g. days spend in hospital,
number of relapses, age, weight."))
print(
paste0("<font color=\"",get.typeColor("numeric"),"\"><b> numbers</b></font>: everything measurable or countable e.g. days spend in hospital,
number of relapses, age, weight."))
})
output$typeInfo_integers <- renderUI({tagList(HTML(paste0("<font color=\"#556677\"><b>integers</b></font>: just a subclass of numeric with only hole numbers."))) })
output$typeInfo_dates <- renderUI({paste0("<font color=\"",get.typeColor("Date"),"\"><b>dates</b></font>: hard to identify for a computer due to many different formats e.g.
february/1/2010, 1-feb-2010, 01.02.2010, 4533454.") })
output$typeInfo_strings <- renderUI({paste0("<font color=\"",get.typeColor("character"),"\"><b>strings</b></font>: names and basically everything which is not a date or a number.
This might be due to special symbols e.g. > <= % ( ] or different/wrong
formatting.") })
output$typeInfo_factors <- renderUI({paste0("<font color=\"",get.typeColor("factor"),"\"><b>factors</b></font>: Explain me!!!.") })
#----------------------------------------------------------------- 3a. Explore ----
observeEvent(input$infoBox_monitor_titleId, {
js$collapse("infoBox_monitor")
})
output$explore.progress <- renderUI( {
req(res$variableTypes)
list(
div(paste0("monitors defined: ",sum(res$variableTypes$hasMonitor!=0,na.rm = T),"/",sum(!is.na(res$variableTypes$hasMonitor)))),
prgoressBar(sum(res$variableTypes$hasMonitor!=0,na.rm = T)/sum(!is.na(res$variableTypes$hasMonitor))*100, color = "warning", striped = TRUE, active = TRUE, size = "sm")
)
# dynamicUI.explore.progress()
})
output$exploreVarNames <- DT::renderDataTable(
{
req(res$data)
DT::datatable(
data.frame(variable = colnames(res$data),
type=setToType(res$variableTypes$set),
monitor = res$variableTypes$hasMonitor, # set TRUE/FALSE
nMissing = ifelse(is.na(res$variableTypes$nMissing),NA,
sprintf("%i of %i",res$variableTypes$nMissing,NROW(res$data))),
nWrongClass = ifelse(is.na(res$variableTypes$nInClass),NA,
sprintf("%i of %i",res$variableTypes$nInClass,NROW(res$data)-res$variableTypes$nMissing)),
nNotInMonitor = ifelse(is.na(res$variableTypes$nInMonitor),NA,
sprintf("%i of %i",NROW(res$data)-res$variableTypes$nMissing-res$variableTypes$nInClass-res$variableTypes$nInMonitor,
NROW(res$data)-res$variableTypes$nMissing-res$variableTypes$nInClass))
),
extensions = "FixedColumns",
selection = list(mode = "single", target = "row", selected = exploreVariables_previousSelection),
options = list(#scrollY = '400px',
scrollX = TRUE,
# paging = F,
displayStart = exploreVariables_previousPage
#fixedColumns = list(leftColumns = 2)))
))
}
#extensions = "FixedColumns",
)
observeEvent(input$exploreVarNames_rows_selected, {
exploreVariables_previousPage <<- input$exploreVarNames_rows_current[1] - 1
exploreVariables_previousSelection <<- input$exploreVarNames_rows_selected
print("res$variableTypes")
#print(res$variableTypes)
print("names(def.varNames.buttons)[res$variableTypes$set]")
#print(names(def.varNames.buttons)[res$variableTypes$set])
print("input$exploreVarNames_rows_selected")
#print(input$exploreVarNames_rows_selected)
})
output$plot.tmp <- renderPlotly({
req(input$exploreVarNames_rows_selected)
dta <- data.frame(1:NROW(res$data),res$data[,input$exploreVarNames_rows_selected])
colnames(dta) <- c("row.nr.",names(res$data)[input$exploreVarNames_rows_selected])
p <- ggplot(data=dta,aes_string(x="row.nr.",y=names(res$data)[input$exploreVarNames_rows_selected])) +
geom_point() + ylab("a")
ggplotly(p)
})
observeEvent(input$accept_monitor,{
print(input$exploreVarNames_rows_selected)
# update res$variableTypes$hasMonitor
res$variableTypes$hasMonitor[input$exploreVarNames_rows_selected] = TRUE
# generate actual monitor
if (res$variableTypes$set[input$exploreVarNames_rows_selected] == 1){
# update monitor
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$decimal = input$decimal
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$minimum =
ifelse(is.na(input$min_numeric),-Inf,input$min_numeric)
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$maximum =
ifelse(is.na(input$max_numeric),Inf,input$max_numeric)
# print(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]])
# update calssified
# need to unclassifie if change of class!
res$classified[,input$exploreVarNames_rows_selected] <-
res$classified[,input$exploreVarNames_rows_selected] %% 100 + 100 *
(res$data[,input$exploreVarNames_rows_selected] > res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$minimum) *
(res$data[,input$exploreVarNames_rows_selected] < res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$maximum)
## update nNotInMonitor
res$variableTypes$nInMonitor[input$exploreVarNames_rows_selected] <-
sum(floor(res$classified[,input$exploreVarNames_rows_selected] / 100))
}
if (res$variableTypes$set[input$exploreVarNames_rows_selected] == 2){
# update monitor
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$minimum =
ifelse(is.na(input$min_integer),-Inf,input$min_integer)
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$maximum =
ifelse(is.na(input$max_integer),Inf,input$max_integer)
# print(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]])
# update calssified
# need to unclassifie if change of class!
res$classified[,input$exploreVarNames_rows_selected] <-
res$classified[,input$exploreVarNames_rows_selected] %% 100 + 100 *
(res$data[,input$exploreVarNames_rows_selected] > res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$minimum) *
(res$data[,input$exploreVarNames_rows_selected] < res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$maximum)
## update nNotInMonitor
res$variableTypes$nInMonitor[input$exploreVarNames_rows_selected] <-
sum(floor(res$classified[,input$exploreVarNames_rows_selected] / 100))
}
if (res$variableTypes$set[input$exploreVarNames_rows_selected] == 3){
# update monitor
# print("################################-------##############################################")
# print(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]])
# print( res$data[,input$exploreVarNames_rows_selected])
# print( res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels)
# update calssified
res$classified[,input$exploreVarNames_rows_selected] <-
res$classified[,input$exploreVarNames_rows_selected] %% 100 + 100 *
res$data[,input$exploreVarNames_rows_selected] %in%
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels
# update nNotInMonitor
res$variableTypes$nInMonitor[input$exploreVarNames_rows_selected] <-
sum(floor(res$classified[,input$exploreVarNames_rows_selected] / 100))
}
if (res$variableTypes$set[input$exploreVarNames_rows_selected] == 4){
# update monitor
# update calssified
# update nNotInMonitor
}
if (res$variableTypes$set[input$exploreVarNames_rows_selected] == 5){
# update monitor
# update calssified
# update nNotInMonitor
}
})
output$out1 <- renderPrint(head(res$data[!is.missing(res$data[,input$exploreVarNames_rows_selected]),input$exploreVarNames_rows_selected],5))
observeEvent(input$acceptLevels,{
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels <-
c(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels,
input$selectInput_defineLevels)
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$declinedLevels <-
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$declinedLevels[
-which(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$declinedLevels %in%
input$selectInput_defineLevels )]
})
observeEvent(input$removeLevels,{
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$declinedLevels <-
c(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$declinedLevels,
input$selectInput_acceptedLevels)
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels <-
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels[
-which(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels %in%
input$selectInput_acceptedLevels )]
})
explore.rightPanel <- function(set){
if (set==0){
return(
out <- list(h4("If you don't know the type, there is nothing to monitor!"))
)
}
if (set==1){ # numeric
out <- list(
fluidRow(
column(12,
tags$head(
tags$style(
HTML("label{float:left;}")
)
),
radioButtons("decimal",HTML("decimal separator:",HTML(' ')),c(". (dot)",", (comma)"),inline = F)
)
),
fluidRow(
column(6,numericInput("min_numeric","minimum",ifelse(
res$variableTypes$hasMonitor[input$exploreVarNames_rows_selected]==TRUE,
res$monitor[[input$exploreVarNames_rows_selected]]$minimum,-Inf
))),
column(6,numericInput("max_numeric","maximum",ifelse(
res$variableTypes$hasMonitor[input$exploreVarNames_rows_selected]==TRUE,
res$monitor[[input$exploreVarNames_rows_selected]]$maximum,Inf
))),
column(12,h5("If there is no limit, enter nothing."))
)
)
}
if (set==2){ # integer
out <- list(
column(6,numericInput("min_integer","minimum",ifelse(
res$variableTypes$hasMonitor[input$exploreVarNames_rows_selected]==TRUE,
res$monitor[[input$exploreVarNames_rows_selected]]$minimum,-Inf
),)),
column(6,numericInput("max_integer","maximum",ifelse(
res$variableTypes$hasMonitor[input$exploreVarNames_rows_selected]==TRUE,
res$monitor[[input$exploreVarNames_rows_selected]]$maximum,Inf
))),
column(12,h5("If there is no limit, enter nothing."))
)
}
if (set==3){ # categorial
if (is.null(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels) &
is.null(res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$declinedLevels)){
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$declinedLevels <-
unique(res$data[!is.missing(res$data[,input$exploreVarNames_rows_selected]),
input$exploreVarNames_rows_selected])
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels <- NULL
}
out <- list(
h4("Define the correct levels"),
# checkboxGroupInput('in3', NULL, unique(res$data[,input$exploreVarNames_rows_selected]))
selectInput('selectInput_defineLevels', "All levels:",
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$declinedLevels,
multiple=TRUE, selectize=FALSE),
fluidRow(
column(6,
actionButton("acceptLevels","Accept",icon = icon("angle-down"),width = "100%")
),
column(6,
actionButton("removeLevels","Remove",icon = icon("angle-up"),width = "100%")
)),
selectInput('selectInput_acceptedLevels', "Correct levels:",
res$monitor[[res$variableTypes$Variables[input$exploreVarNames_rows_selected]]]$acceptedLevels,
multiple=TRUE, selectize=FALSE)
)
}
if (set==4){ # ordered factor
out <- list(h3("Define the correct classes"),
h3("And order."),
##sortable is the shit xxx???!!!
###library(sortable)
orderInput("a","a",unique(res$data[!is.missing(res$data[,input$exploreVarNames_rows_selected]),
input$exploreVarNames_rows_selected]),
connect ="b"),
orderInput("b","b",NULL,connect = "a")
)
}
if (set==5){ # date
out <- list(
h5("The first five dates are recognized as follows:"),
verbatimTextOutput('out1'),
h5("Define date format. If they are only numbers e.g. 492933 then write: dddd and define a Origin"),
textInput("textinput","date format","e.g. 01.02.1988 -> dd.mm.yyyy or 08/14/70 -> mm/dd/yy"),
h5("Some software saves dates by a different origin. check your dates and consider updating the origin."),
dateInput("origin","Origin:", "1900-01-01")
)
}
if (set==6){
return(
out <- list(h4("No structure -> nothing to monitor!"))
)
}
return(
append(
out,
list(
tags$hr(),
column(12,
actionButton("accept_monitor","Accept Monitor!",width = "100%",style = btn.style.preload)
)
)
)
)
}
dynamicUI.explore.rightPanel <- reactive({
#if (1==1)
req(input$exploreVarNames_rows_selected)
return(
#append(
# list(h5(paste("rowSelected:",input$exploreVarNames_rows_selected)),
# h5(paste("setSelected:",res$variableTypes$set[input$exploreVarNames_rows_selected]))),
explore.rightPanel(res$variableTypes$set[input$exploreVarNames_rows_selected])
#)
)
})
output$explore.sidePanel <- renderUI( {
req(res$data)
if (is.null(input$exploreVarNames_rows_selected)){
h5("Select a row in the overview-table!")
} else{
dynamicUI.explore.rightPanel()
}
})
#----------------------------------------------------------------- 4. Overview ----
output$complete.Obs <- renderPlot({
if (show.events) print("output$complete.Obs")
req(res$data)
return(plot.complete(res$data,res$variableTypes))
})
# colors have problems
output$Overview.out.right.dist <- renderPlot({
if (show.events) print("output$Overview.out.right.dist")
req(res$data)
df <- res$variableTypes %>%
mutate(newest = get.newestTypes(.),
col = get.typeColor(newest))
collist <- df[!duplicated(df$newest),] %>%
mutate(col = get.typeColor(newest))
col = collist$col
names(col) = collist$newest
df %>%
ggplot(aes(x="newest",fill=newest)) + geom_bar(stat = "count") + #,fill = unique(get.typesDf(res$data)$col)) +
scale_fill_manual(values = col) +
coord_polar("y", start=0) + theme_void()+
theme(axis.text.x=element_blank())
})
output$Overview.out.left.verbatim <- renderPrint({
if (show.events) print("output$Overview.out.left.verbatim")
req(res$data)
list(nCol = NCOL(res$data),
nRow = NROW(res$data),
nComplete = sum(complete.cases(res$data)),
nMissing = sum(is.na(res$data)),
fileName = "Not yet implemented!",
types = get.typesDf(res$data))
})
#----------------------------------------------------------------- 5. Evaluation (Interactive Histogram) ----
## nothing Placeholder
dynamicUi <- reactive({
return(
list(
sliderInput("obs",
"Number of observations:",
min = 1,
max = 1000,
value = res$n),
actionButton("action","Add somthing!",width = 120)
)
)
})
## nothing Placeholder
output$sidePanel <- renderUI( {
dynamicUi()
})
## nothing Placeholder
output$distPlot <- renderPlot({
# generate an rnorm distribution and plot it
dist <- rnorm(res$n)
hist(dist)
})
## nothing Placeholder
observeEvent(input$action,{
print(ls())
if (show.events) print("input$action")
res$n <- res$n + 1
})
observeEvent(input$saveRes,{
x <- reactiveValuesToList(res,all.names = TRUE)
x <- reactiveValuesToList(res,all.names = TRUE)
x <- reactiveValuesToList(res,all.names = TRUE)
str(x)
resSaveName <- "res.RData"
save(x,file = resSaveName)
print(paste0("res saved in ",resSaveName,"."))
})
observeEvent(input$obs,{
print("-------------------------------- env.server ------------------------------")
try(print(get.memory.allocation(env.server)))
print("-------------------------------- .GlobalEnv --------------------------------")
try(print(get.memory.allocation(.GlobalEnv)))
#n <<- rnorm(1000000)
#print(object.size())
if (show.events) print("input$obs")
res$n <- input$obs
})
#----------------------------------------------------------------- entwickler = T ----
if (entwickler){
env.server <- environment()
env <- .GlobalEnv
dynamicUi <- reactive({
return(
list(
actionButton("showMemory","print memory in console"),
tableOutput('fara'),
tableOutput('foo'),
tableOutput('fobs')
)
)
})
output$memoryUsage <- renderUI( {
dynamicUi()
})
observeEvent(input$showMemory,{
print("-------------------------------- env.server ------------------------------")
try(print(get.memory.allocation(env.server)))
print("-------------------------------- .GlobalEnv --------------------------------")
try(print(get.memory.allocation(.GlobalEnv)))
})
output$foo <- renderTable({
get.memory.allocation(env.server)
})
output$fara <- renderTable(get.memory.allocation(env.server,detail = F))
output$fobs <- renderTable(gc())
}
})
|
## Modified version of FD_escaneo_por_ventanas_union_de_ventanas_graficarlas.R
# available at: https://github.com/ericgonzalezs/Characterization_of_introgression_from_Zea_mays_ssp._mexicana_to_Mexican_highland_maize/blob/master/Introgression_analyses/
#
#Arguments for the script:
#First argument is the file with the ABBA BABA counts per sites created with ABBA_BABA.v1.pl during the ABBA_BABA-pipeline
#Second argument is the output prefix
#Loading packages
if(require("dplyr")){
print("dplyr is loaded correctly")
} else {
print("trying to install dplyr")
install.packages("dplyr")
if(require("dplyr")){
print("dplyr installed and loaded")
} else {
stop("could not install dplyr")
}
}
if(require("data.table")){
print("data.table is loaded correctly")
} else {
print("trying to install data.table")
install.packages("data.table")
if(require("data.table")){
print("data.table installed and loaded")
} else {
stop("could not install data.table")
}
}
## Loading input
inputfile <- commandArgs(trailingOnly = TRUE)
data <- fread(inputfile[1],header=T,sep = "\t",fill=T)
data <- data[-c(nrow(data),(nrow(data)-1),(nrow(data)-2)),]
Fs <- data[,c(1,2,15:22)]
Fs_1 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[1])
Fs_2 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[2])
Fs_3 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[3])
Fs_4 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[4])
Fs_5 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[5])
Fs_6 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[6])
Fs_7 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[7])
Fs_8 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[8])
Fs_9 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[9])
Fs_10 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[10])
Fs_11 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[11])
Fs_12 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[12])
#####
#50 SNP windows
#chr1
table1 <- c()
a <- 1
for (i in 1:round((nrow(Fs_1)/50))) {
window <- Fs_1[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table1 <- rbind(table1,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table1) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table1)[6:12],"Fd")
#chr2
table2 <- c()
a <- 1
for (i in 1:round((nrow(Fs_2)/50))) {
window <- Fs_2[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table2 <- rbind(table2,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table2) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table2)[6:12],"Fd")
#chr3
table3 <- c()
a <- 1
for (i in 1:round((nrow(Fs_3)/50))) {
window <- Fs_3[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table3 <- rbind(table3,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table3) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table3)[6:12],"Fd")
#chr4
table4 <- c()
a <- 1
for (i in 1:round((nrow(Fs_4)/50))) {
window <- Fs_4[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table4 <- rbind(table4,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table4) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table4)[6:12],"Fd")
#chr5
table5 <- c()
a <- 1
for (i in 1:round((nrow(Fs_5)/50))) {
window <- Fs_5[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table5 <- rbind(table5,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table5) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table5)[6:12],"Fd")
#chr6
table6 <- c()
a <- 1
for (i in 1:round((nrow(Fs_6)/50))) {
window <- Fs_6[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table6 <- rbind(table6,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table6) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table6)[6:12],"Fd")
#chr7
table7 <- c()
a <- 1
for (i in 1:round((nrow(Fs_7)/50))) {
window <- Fs_7[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table7 <- rbind(table7,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table7) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table7)[6:12],"Fd")
#chr8
table8 <- c()
a <- 1
for (i in 1:round((nrow(Fs_8)/50))) {
window <- Fs_8[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table8 <- rbind(table8,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table8) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table8)[6:12],"Fd")
#chr9
table9 <- c()
a <- 1
for (i in 1:round((nrow(Fs_9)/50))) {
window <- Fs_9[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table9 <- rbind(table9,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table9) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table9)[6:12],"Fd")
#chr10
table10 <- c()
a <- 1
for (i in 1:round((nrow(Fs_10)/50))) {
window <- Fs_10[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table10 <- rbind(table10,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table10) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table10)[6:12],"Fd")
#chr11
table11 <- c()
a <- 1
for (i in 1:round((nrow(Fs_11)/50))) {
window <- Fs_11[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table11 <- rbind(table11,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table11) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table11)[6:12],"Fd")
#chr12
table12 <- c()
a <- 1
for (i in 1:round((nrow(Fs_12)/50))) {
window <- Fs_12[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table12 <- rbind(table12,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table12) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table12)[6:12],"Fd")
#####
#Filter for D < 0 and Fd between 0 and 1
table1_subset <- table1 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table2_subset <- table2 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table3_subset <- table3 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table4_subset <- table4 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table5_subset <- table5 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table6_subset <- table6 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table7_subset <- table7 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table8_subset <- table8 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table9_subset <- table9 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table10_subset <- table10 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table11_subset <- table11 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table12_subset <- table12 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table_without_filters <- rbind.data.frame(table1, table2, table3, table4, table5, table6, table7, table8, table9, table10,table11,table12)
table_with_filters <- rbind.data.frame(table1_subset, table2_subset, table3_subset, table4_subset, table5_subset, table6_subset, table7_subset, table8_subset, table9_subset, table10_subset,table11_subset,table12_subset)
fwrite(table_without_filters, file = paste0(inputfile[2],"_50SNP_WindowsWithoutFiltering.csv"))
fwrite(table_with_filters, file = paste0(inputfile[2],"_50SNP_WindowsWithFiltering.csv"))
###################################################
#write.csv(table_with_filters, file = "")
##################################################
#####
#Unification of windows
#Chr1
quant_90 <- quantile(table1_subset$Fd, 0.90)
quant_99 <- quantile(table1_subset$Fd, 0.99)
table1_subset$YesOrNo_90 <- ifelse(table1_subset$Fd >= quant_90, "Y","N")
table1_subset$YesOrNo_99 <- ifelse(table1_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table1_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table1_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table1_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table1_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table1_subset$Window_start, 1L),window_start)
}
Chr1_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr1_unified_windows_90$Chromosome <- "Chr1"
Chr1_unified_windows_90$Window_size <- Chr1_unified_windows_90$window_end - Chr1_unified_windows_90$window_start
YesOrNo <- paste(table1_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table1_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table1_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table1_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table1_subset$Window_start, 1L),window_start)
}
Chr1_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr1_unified_windows_99$Chromosome <- "Chr1"
Chr1_unified_windows_99$Window_size <- Chr1_unified_windows_99$window_end - Chr1_unified_windows_99$window_start
#Chr2
quant_90 <- quantile(table2_subset$Fd, 0.90)
quant_99 <- quantile(table2_subset$Fd, 0.99)
table2_subset$YesOrNo_90 <- ifelse(table2_subset$Fd >= quant_90, "Y","N")
table2_subset$YesOrNo_99 <- ifelse(table2_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table2_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table2_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table2_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table2_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table2_subset$Window_start, 1L),window_start)
}
Chr2_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr2_unified_windows_90$Chromosome <- "Chr2"
Chr2_unified_windows_90$Window_size <- Chr2_unified_windows_90$window_end - Chr2_unified_windows_90$window_start
YesOrNo <- paste(table2_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table2_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table2_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table2_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table2_subset$Window_start, 1L),window_start)
}
Chr2_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr2_unified_windows_99$Chromosome <- "Chr2"
Chr2_unified_windows_99$Window_size <- Chr2_unified_windows_99$window_end - Chr2_unified_windows_99$window_start
#Chr3
quant_90 <- quantile(table3_subset$Fd, 0.90)
quant_99 <- quantile(table3_subset$Fd, 0.99)
table3_subset$YesOrNo_90 <- ifelse(table3_subset$Fd >= quant_90, "Y","N")
table3_subset$YesOrNo_99 <- ifelse(table3_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table3_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table3_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table3_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table3_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table3_subset$Window_start, 1L),window_start)
}
Chr3_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr3_unified_windows_90$Chromosome <- "Chr3"
Chr3_unified_windows_90$Window_size <- Chr3_unified_windows_90$window_end - Chr3_unified_windows_90$window_start
YesOrNo <- paste(table3_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table3_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table3_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table3_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table3_subset$Window_start, 1L),window_start)
}
Chr3_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr3_unified_windows_99$Chromosome <- "Chr3"
Chr3_unified_windows_99$Window_size <- Chr3_unified_windows_99$window_end - Chr3_unified_windows_99$window_start
#Chr4
quant_90 <- quantile(table4_subset$Fd, 0.90)
quant_99 <- quantile(table4_subset$Fd, 0.99)
table4_subset$YesOrNo_90 <- ifelse(table4_subset$Fd >= quant_90, "Y","N")
table4_subset$YesOrNo_99 <- ifelse(table4_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table4_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table4_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table4_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table4_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table4_subset$Window_start, 1L),window_start)
}
Chr4_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr4_unified_windows_90$Chromosome <- "Chr4"
Chr4_unified_windows_90$Window_size <- Chr4_unified_windows_90$window_end - Chr4_unified_windows_90$window_start
YesOrNo <- paste(table4_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table4_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table4_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table4_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table4_subset$Window_start, 1L),window_start)
}
Chr4_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr4_unified_windows_99$Chromosome <- "Chr4"
Chr4_unified_windows_99$Window_size <- Chr4_unified_windows_99$window_end - Chr4_unified_windows_99$window_start
#Chr5
quant_90 <- quantile(table5_subset$Fd, 0.90)
quant_99 <- quantile(table5_subset$Fd, 0.99)
table5_subset$YesOrNo_90 <- ifelse(table5_subset$Fd >= quant_90, "Y","N")
table5_subset$YesOrNo_99 <- ifelse(table5_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table5_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table5_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table5_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table5_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table5_subset$Window_start, 1L),window_start)
}
Chr5_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr5_unified_windows_90$Chromosome <- "Chr5"
Chr5_unified_windows_90$Window_size <- Chr5_unified_windows_90$window_end - Chr5_unified_windows_90$window_start
YesOrNo <- paste(table5_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table5_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table5_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table5_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table5_subset$Window_start, 1L),window_start)
}
Chr5_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr5_unified_windows_99$Chromosome <- "Chr5"
Chr5_unified_windows_99$Window_size <- Chr5_unified_windows_99$window_end - Chr5_unified_windows_99$window_start
#Chr6
quant_90 <- quantile(table6_subset$Fd, 0.90)
quant_99 <- quantile(table6_subset$Fd, 0.99)
table6_subset$YesOrNo_90 <- ifelse(table6_subset$Fd >= quant_90, "Y","N")
table6_subset$YesOrNo_99 <- ifelse(table6_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table6_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table6_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table6_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table6_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table6_subset$Window_start, 1L),window_start)
}
Chr6_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr6_unified_windows_90$Chromosome <- "Chr6"
Chr6_unified_windows_90$Window_size <- Chr6_unified_windows_90$window_end - Chr6_unified_windows_90$window_start
YesOrNo <- paste(table6_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table6_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table6_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table6_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table6_subset$Window_start, 1L),window_start)
}
Chr6_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr6_unified_windows_99$Chromosome <- "Chr6"
Chr6_unified_windows_99$Window_size <- Chr6_unified_windows_99$window_end - Chr6_unified_windows_99$window_start
#Chr7
quant_90 <- quantile(table7_subset$Fd, 0.90)
quant_99 <- quantile(table7_subset$Fd, 0.99)
table7_subset$YesOrNo_90 <- ifelse(table7_subset$Fd >= quant_90, "Y","N")
table7_subset$YesOrNo_99 <- ifelse(table7_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table7_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table7_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table7_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table7_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table7_subset$Window_start, 1L),window_start)
}
Chr7_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr7_unified_windows_90$Chromosome <- "Chr7"
Chr7_unified_windows_90$Window_size <- Chr7_unified_windows_90$window_end - Chr7_unified_windows_90$window_start
YesOrNo <- paste(table7_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table7_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table7_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table7_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table7_subset$Window_start, 1L),window_start)
}
Chr7_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr7_unified_windows_99$Chromosome <- "Chr7"
Chr7_unified_windows_99$Window_size <- Chr7_unified_windows_99$window_end - Chr7_unified_windows_99$window_start
#Chr8
quant_90 <- quantile(table8_subset$Fd, 0.90)
quant_99 <- quantile(table8_subset$Fd, 0.99)
table8_subset$YesOrNo_90 <- ifelse(table8_subset$Fd >= quant_90, "Y","N")
table8_subset$YesOrNo_99 <- ifelse(table8_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table8_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table8_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table8_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table8_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table8_subset$Window_start, 1L),window_start)
}
Chr8_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr8_unified_windows_90$Chromosome <- "Chr8"
Chr8_unified_windows_90$Window_size <- Chr8_unified_windows_90$window_end - Chr8_unified_windows_90$window_start
YesOrNo <- paste(table8_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table8_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table8_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table8_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table8_subset$Window_start, 1L),window_start)
}
Chr8_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr8_unified_windows_99$Chromosome <- "Chr8"
Chr8_unified_windows_99$Window_size <- Chr8_unified_windows_99$window_end - Chr8_unified_windows_99$window_start
#Chr9
quant_90 <- quantile(table9_subset$Fd, 0.90)
quant_99 <- quantile(table9_subset$Fd, 0.99)
table9_subset$YesOrNo_90 <- ifelse(table9_subset$Fd >= quant_90, "Y","N")
table9_subset$YesOrNo_99 <- ifelse(table9_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table9_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table9_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table9_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table9_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table9_subset$Window_start, 1L),window_start)
}
Chr9_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr9_unified_windows_90$Chromosome <- "Chr9"
Chr9_unified_windows_90$Window_size <- Chr9_unified_windows_90$window_end - Chr9_unified_windows_90$window_start
YesOrNo <- paste(table9_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table9_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table9_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table9_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table9_subset$Window_start, 1L),window_start)
}
Chr9_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr9_unified_windows_99$Chromosome <- "Chr9"
Chr9_unified_windows_99$Window_size <- Chr9_unified_windows_99$window_end - Chr9_unified_windows_99$window_start
#Chr10
quant_90 <- quantile(table10_subset$Fd, 0.90)
quant_99 <- quantile(table10_subset$Fd, 0.99)
table10_subset$YesOrNo_90 <- ifelse(table10_subset$Fd >= quant_90, "Y","N")
table10_subset$YesOrNo_99 <- ifelse(table10_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table10_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table10_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table10_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table10_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table10_subset$Window_start, 1L),window_start)
}
Chr10_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr10_unified_windows_90$Chromosome <- "Chr10"
Chr10_unified_windows_90$Window_size <- Chr10_unified_windows_90$window_end - Chr10_unified_windows_90$window_start
YesOrNo <- paste(table10_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table10_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table10_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table10_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table10_subset$Window_start, 1L),window_start)
}
Chr10_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr10_unified_windows_99$Chromosome <- "Chr10"
Chr10_unified_windows_99$Window_size <- Chr10_unified_windows_99$window_end - Chr10_unified_windows_99$window_start
#Chr11
quant_90 <- quantile(table11_subset$Fd, 0.90)
quant_99 <- quantile(table11_subset$Fd, 0.99)
table11_subset$YesOrNo_90 <- ifelse(table11_subset$Fd >= quant_90, "Y","N")
table11_subset$YesOrNo_99 <- ifelse(table11_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table11_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table11_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table11_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table11_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table11_subset$Window_start, 1L),window_start)
}
Chr11_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr11_unified_windows_90$Chromosome <- "Chr11"
Chr11_unified_windows_90$Window_size <- Chr11_unified_windows_90$window_end - Chr11_unified_windows_90$window_start
YesOrNo <- paste(table11_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table11_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table11_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table11_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table11_subset$Window_start, 1L),window_start)
}
Chr11_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr11_unified_windows_99$Chromosome <- "Chr11"
Chr11_unified_windows_99$Window_size <- Chr11_unified_windows_99$window_end - Chr11_unified_windows_99$window_start
#Chr12
quant_90 <- quantile(table12_subset$Fd, 0.90)
quant_99 <- quantile(table12_subset$Fd, 0.99)
table12_subset$YesOrNo_90 <- ifelse(table12_subset$Fd >= quant_90, "Y","N")
table12_subset$YesOrNo_99 <- ifelse(table12_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table12_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table12_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table12_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table12_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table12_subset$Window_start, 1L),window_start)
}
Chr12_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr12_unified_windows_90$Chromosome <- "Chr12"
Chr12_unified_windows_90$Window_size <- Chr12_unified_windows_90$window_end - Chr12_unified_windows_90$window_start
YesOrNo <- paste(table12_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table12_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table12_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table12_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table12_subset$Window_start, 1L),window_start)
}
Chr12_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr12_unified_windows_99$Chromosome <- "Chr12"
Chr12_unified_windows_99$Window_size <- Chr12_unified_windows_99$window_end - Chr12_unified_windows_99$window_start
Unified_windows_90 <- bind_rows(Chr1_unified_windows_90,Chr2_unified_windows_90,Chr3_unified_windows_90,Chr4_unified_windows_90,Chr5_unified_windows_90,Chr6_unified_windows_90,Chr7_unified_windows_90,Chr8_unified_windows_90,Chr9_unified_windows_90,Chr10_unified_windows_90,Chr11_unified_windows_90,Chr12_unified_windows_90)
Unified_windows_99 <- bind_rows(Chr1_unified_windows_99,Chr2_unified_windows_99,Chr3_unified_windows_99,Chr4_unified_windows_99,Chr5_unified_windows_99,Chr6_unified_windows_99,Chr7_unified_windows_99,Chr8_unified_windows_99,Chr9_unified_windows_99,Chr10_unified_windows_99,Chr11_unified_windows_99,Chr12_unified_windows_99)
fwrite(Unified_windows_90, file = paste0(inputfile[2],"_50SNP_UnifiedWindows_10percent.csv"))
fwrite(Unified_windows_99, file = paste0(inputfile[2],"_50SNP_UnifiedWindows_1percent.csv"))
| /Fd_50_site_windows.R | no_license | LanzlTobias/Speciation_geneflow_tomato | R | false | false | 36,006 | r | ## Modified version of FD_escaneo_por_ventanas_union_de_ventanas_graficarlas.R
# available at: https://github.com/ericgonzalezs/Characterization_of_introgression_from_Zea_mays_ssp._mexicana_to_Mexican_highland_maize/blob/master/Introgression_analyses/
#
#Arguments for the script:
#First argument is the file with the ABBA BABA counts per sites created with ABBA_BABA.v1.pl during the ABBA_BABA-pipeline
#Second argument is the output prefix
#Loading packages
if(require("dplyr")){
print("dplyr is loaded correctly")
} else {
print("trying to install dplyr")
install.packages("dplyr")
if(require("dplyr")){
print("dplyr installed and loaded")
} else {
stop("could not install dplyr")
}
}
if(require("data.table")){
print("data.table is loaded correctly")
} else {
print("trying to install data.table")
install.packages("data.table")
if(require("data.table")){
print("data.table installed and loaded")
} else {
stop("could not install data.table")
}
}
## Loading input
inputfile <- commandArgs(trailingOnly = TRUE)
data <- fread(inputfile[1],header=T,sep = "\t",fill=T)
data <- data[-c(nrow(data),(nrow(data)-1),(nrow(data)-2)),]
Fs <- data[,c(1,2,15:22)]
Fs_1 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[1])
Fs_2 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[2])
Fs_3 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[3])
Fs_4 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[4])
Fs_5 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[5])
Fs_6 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[6])
Fs_7 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[7])
Fs_8 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[8])
Fs_9 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[9])
Fs_10 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[10])
Fs_11 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[11])
Fs_12 <- subset(Fs, Fs$CHR==unique(Fs$CHR)[12])
#####
#50 SNP windows
#chr1
table1 <- c()
a <- 1
for (i in 1:round((nrow(Fs_1)/50))) {
window <- Fs_1[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table1 <- rbind(table1,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table1) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table1)[6:12],"Fd")
#chr2
table2 <- c()
a <- 1
for (i in 1:round((nrow(Fs_2)/50))) {
window <- Fs_2[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table2 <- rbind(table2,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table2) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table2)[6:12],"Fd")
#chr3
table3 <- c()
a <- 1
for (i in 1:round((nrow(Fs_3)/50))) {
window <- Fs_3[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table3 <- rbind(table3,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table3) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table3)[6:12],"Fd")
#chr4
table4 <- c()
a <- 1
for (i in 1:round((nrow(Fs_4)/50))) {
window <- Fs_4[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table4 <- rbind(table4,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table4) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table4)[6:12],"Fd")
#chr5
table5 <- c()
a <- 1
for (i in 1:round((nrow(Fs_5)/50))) {
window <- Fs_5[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table5 <- rbind(table5,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table5) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table5)[6:12],"Fd")
#chr6
table6 <- c()
a <- 1
for (i in 1:round((nrow(Fs_6)/50))) {
window <- Fs_6[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table6 <- rbind(table6,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table6) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table6)[6:12],"Fd")
#chr7
table7 <- c()
a <- 1
for (i in 1:round((nrow(Fs_7)/50))) {
window <- Fs_7[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table7 <- rbind(table7,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table7) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table7)[6:12],"Fd")
#chr8
table8 <- c()
a <- 1
for (i in 1:round((nrow(Fs_8)/50))) {
window <- Fs_8[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table8 <- rbind(table8,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table8) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table8)[6:12],"Fd")
#chr9
table9 <- c()
a <- 1
for (i in 1:round((nrow(Fs_9)/50))) {
window <- Fs_9[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table9 <- rbind(table9,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table9) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table9)[6:12],"Fd")
#chr10
table10 <- c()
a <- 1
for (i in 1:round((nrow(Fs_10)/50))) {
window <- Fs_10[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table10 <- rbind(table10,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table10) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table10)[6:12],"Fd")
#chr11
table11 <- c()
a <- 1
for (i in 1:round((nrow(Fs_11)/50))) {
window <- Fs_11[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table11 <- rbind(table11,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table11) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table11)[6:12],"Fd")
#chr12
table12 <- c()
a <- 1
for (i in 1:round((nrow(Fs_12)/50))) {
window <- Fs_12[a:c(a+49),]
a <- a+50
FdNum <- sum(window$FdNum)
FdDenom <- sum(window$FdDenom)
DNum <- sum(window$DNum)
DDenom <- sum(window$DDenom)
FhomNum <- sum(window$FhomNum)
FhomDenom <- sum(window$FhomDenom)
D <- DNum/DDenom
FdRes <- FdNum/FdDenom
window_start <- window[1,]
window_end <- window[50,]
window_mid <- (window_start[,2]+window_end[,2])/2
table12 <- rbind(table12,data.frame(window[1,1],window_start[,2],window_end[,2],window_end[,2]-window_start[,2],window_mid,FdNum, FdDenom, DNum, DDenom, FhomNum, FhomDenom, D, FdRes))
}
names(table12) <- c("Chromosome","Window_start","Window_end","Window_size","Window_mid",names(table12)[6:12],"Fd")
#####
#Filter for D < 0 and Fd between 0 and 1
table1_subset <- table1 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table2_subset <- table2 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table3_subset <- table3 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table4_subset <- table4 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table5_subset <- table5 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table6_subset <- table6 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table7_subset <- table7 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table8_subset <- table8 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table9_subset <- table9 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table10_subset <- table10 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table11_subset <- table11 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table12_subset <- table12 %>%
filter(D > 0, Fd >= 0 & Fd <= 1)
table_without_filters <- rbind.data.frame(table1, table2, table3, table4, table5, table6, table7, table8, table9, table10,table11,table12)
table_with_filters <- rbind.data.frame(table1_subset, table2_subset, table3_subset, table4_subset, table5_subset, table6_subset, table7_subset, table8_subset, table9_subset, table10_subset,table11_subset,table12_subset)
fwrite(table_without_filters, file = paste0(inputfile[2],"_50SNP_WindowsWithoutFiltering.csv"))
fwrite(table_with_filters, file = paste0(inputfile[2],"_50SNP_WindowsWithFiltering.csv"))
###################################################
#write.csv(table_with_filters, file = "")
##################################################
#####
#Unification of windows
#Chr1
quant_90 <- quantile(table1_subset$Fd, 0.90)
quant_99 <- quantile(table1_subset$Fd, 0.99)
table1_subset$YesOrNo_90 <- ifelse(table1_subset$Fd >= quant_90, "Y","N")
table1_subset$YesOrNo_99 <- ifelse(table1_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table1_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table1_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table1_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table1_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table1_subset$Window_start, 1L),window_start)
}
Chr1_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr1_unified_windows_90$Chromosome <- "Chr1"
Chr1_unified_windows_90$Window_size <- Chr1_unified_windows_90$window_end - Chr1_unified_windows_90$window_start
YesOrNo <- paste(table1_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table1_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table1_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table1_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table1_subset$Window_start, 1L),window_start)
}
Chr1_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr1_unified_windows_99$Chromosome <- "Chr1"
Chr1_unified_windows_99$Window_size <- Chr1_unified_windows_99$window_end - Chr1_unified_windows_99$window_start
#Chr2
quant_90 <- quantile(table2_subset$Fd, 0.90)
quant_99 <- quantile(table2_subset$Fd, 0.99)
table2_subset$YesOrNo_90 <- ifelse(table2_subset$Fd >= quant_90, "Y","N")
table2_subset$YesOrNo_99 <- ifelse(table2_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table2_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table2_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table2_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table2_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table2_subset$Window_start, 1L),window_start)
}
Chr2_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr2_unified_windows_90$Chromosome <- "Chr2"
Chr2_unified_windows_90$Window_size <- Chr2_unified_windows_90$window_end - Chr2_unified_windows_90$window_start
YesOrNo <- paste(table2_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table2_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table2_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table2_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table2_subset$Window_start, 1L),window_start)
}
Chr2_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr2_unified_windows_99$Chromosome <- "Chr2"
Chr2_unified_windows_99$Window_size <- Chr2_unified_windows_99$window_end - Chr2_unified_windows_99$window_start
#Chr3
quant_90 <- quantile(table3_subset$Fd, 0.90)
quant_99 <- quantile(table3_subset$Fd, 0.99)
table3_subset$YesOrNo_90 <- ifelse(table3_subset$Fd >= quant_90, "Y","N")
table3_subset$YesOrNo_99 <- ifelse(table3_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table3_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table3_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table3_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table3_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table3_subset$Window_start, 1L),window_start)
}
Chr3_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr3_unified_windows_90$Chromosome <- "Chr3"
Chr3_unified_windows_90$Window_size <- Chr3_unified_windows_90$window_end - Chr3_unified_windows_90$window_start
YesOrNo <- paste(table3_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table3_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table3_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table3_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table3_subset$Window_start, 1L),window_start)
}
Chr3_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr3_unified_windows_99$Chromosome <- "Chr3"
Chr3_unified_windows_99$Window_size <- Chr3_unified_windows_99$window_end - Chr3_unified_windows_99$window_start
#Chr4
quant_90 <- quantile(table4_subset$Fd, 0.90)
quant_99 <- quantile(table4_subset$Fd, 0.99)
table4_subset$YesOrNo_90 <- ifelse(table4_subset$Fd >= quant_90, "Y","N")
table4_subset$YesOrNo_99 <- ifelse(table4_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table4_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table4_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table4_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table4_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table4_subset$Window_start, 1L),window_start)
}
Chr4_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr4_unified_windows_90$Chromosome <- "Chr4"
Chr4_unified_windows_90$Window_size <- Chr4_unified_windows_90$window_end - Chr4_unified_windows_90$window_start
YesOrNo <- paste(table4_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table4_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table4_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table4_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table4_subset$Window_start, 1L),window_start)
}
Chr4_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr4_unified_windows_99$Chromosome <- "Chr4"
Chr4_unified_windows_99$Window_size <- Chr4_unified_windows_99$window_end - Chr4_unified_windows_99$window_start
#Chr5
quant_90 <- quantile(table5_subset$Fd, 0.90)
quant_99 <- quantile(table5_subset$Fd, 0.99)
table5_subset$YesOrNo_90 <- ifelse(table5_subset$Fd >= quant_90, "Y","N")
table5_subset$YesOrNo_99 <- ifelse(table5_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table5_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table5_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table5_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table5_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table5_subset$Window_start, 1L),window_start)
}
Chr5_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr5_unified_windows_90$Chromosome <- "Chr5"
Chr5_unified_windows_90$Window_size <- Chr5_unified_windows_90$window_end - Chr5_unified_windows_90$window_start
YesOrNo <- paste(table5_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table5_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table5_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table5_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table5_subset$Window_start, 1L),window_start)
}
Chr5_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr5_unified_windows_99$Chromosome <- "Chr5"
Chr5_unified_windows_99$Window_size <- Chr5_unified_windows_99$window_end - Chr5_unified_windows_99$window_start
#Chr6
quant_90 <- quantile(table6_subset$Fd, 0.90)
quant_99 <- quantile(table6_subset$Fd, 0.99)
table6_subset$YesOrNo_90 <- ifelse(table6_subset$Fd >= quant_90, "Y","N")
table6_subset$YesOrNo_99 <- ifelse(table6_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table6_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table6_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table6_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table6_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table6_subset$Window_start, 1L),window_start)
}
Chr6_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr6_unified_windows_90$Chromosome <- "Chr6"
Chr6_unified_windows_90$Window_size <- Chr6_unified_windows_90$window_end - Chr6_unified_windows_90$window_start
YesOrNo <- paste(table6_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table6_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table6_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table6_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table6_subset$Window_start, 1L),window_start)
}
Chr6_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr6_unified_windows_99$Chromosome <- "Chr6"
Chr6_unified_windows_99$Window_size <- Chr6_unified_windows_99$window_end - Chr6_unified_windows_99$window_start
#Chr7
quant_90 <- quantile(table7_subset$Fd, 0.90)
quant_99 <- quantile(table7_subset$Fd, 0.99)
table7_subset$YesOrNo_90 <- ifelse(table7_subset$Fd >= quant_90, "Y","N")
table7_subset$YesOrNo_99 <- ifelse(table7_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table7_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table7_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table7_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table7_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table7_subset$Window_start, 1L),window_start)
}
Chr7_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr7_unified_windows_90$Chromosome <- "Chr7"
Chr7_unified_windows_90$Window_size <- Chr7_unified_windows_90$window_end - Chr7_unified_windows_90$window_start
YesOrNo <- paste(table7_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table7_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table7_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table7_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table7_subset$Window_start, 1L),window_start)
}
Chr7_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr7_unified_windows_99$Chromosome <- "Chr7"
Chr7_unified_windows_99$Window_size <- Chr7_unified_windows_99$window_end - Chr7_unified_windows_99$window_start
#Chr8
quant_90 <- quantile(table8_subset$Fd, 0.90)
quant_99 <- quantile(table8_subset$Fd, 0.99)
table8_subset$YesOrNo_90 <- ifelse(table8_subset$Fd >= quant_90, "Y","N")
table8_subset$YesOrNo_99 <- ifelse(table8_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table8_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table8_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table8_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table8_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table8_subset$Window_start, 1L),window_start)
}
Chr8_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr8_unified_windows_90$Chromosome <- "Chr8"
Chr8_unified_windows_90$Window_size <- Chr8_unified_windows_90$window_end - Chr8_unified_windows_90$window_start
YesOrNo <- paste(table8_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table8_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table8_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table8_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table8_subset$Window_start, 1L),window_start)
}
Chr8_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr8_unified_windows_99$Chromosome <- "Chr8"
Chr8_unified_windows_99$Window_size <- Chr8_unified_windows_99$window_end - Chr8_unified_windows_99$window_start
#Chr9
quant_90 <- quantile(table9_subset$Fd, 0.90)
quant_99 <- quantile(table9_subset$Fd, 0.99)
table9_subset$YesOrNo_90 <- ifelse(table9_subset$Fd >= quant_90, "Y","N")
table9_subset$YesOrNo_99 <- ifelse(table9_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table9_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table9_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table9_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table9_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table9_subset$Window_start, 1L),window_start)
}
Chr9_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr9_unified_windows_90$Chromosome <- "Chr9"
Chr9_unified_windows_90$Window_size <- Chr9_unified_windows_90$window_end - Chr9_unified_windows_90$window_start
YesOrNo <- paste(table9_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table9_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table9_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table9_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table9_subset$Window_start, 1L),window_start)
}
Chr9_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr9_unified_windows_99$Chromosome <- "Chr9"
Chr9_unified_windows_99$Window_size <- Chr9_unified_windows_99$window_end - Chr9_unified_windows_99$window_start
#Chr10
quant_90 <- quantile(table10_subset$Fd, 0.90)
quant_99 <- quantile(table10_subset$Fd, 0.99)
table10_subset$YesOrNo_90 <- ifelse(table10_subset$Fd >= quant_90, "Y","N")
table10_subset$YesOrNo_99 <- ifelse(table10_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table10_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table10_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table10_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table10_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table10_subset$Window_start, 1L),window_start)
}
Chr10_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr10_unified_windows_90$Chromosome <- "Chr10"
Chr10_unified_windows_90$Window_size <- Chr10_unified_windows_90$window_end - Chr10_unified_windows_90$window_start
YesOrNo <- paste(table10_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table10_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table10_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table10_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table10_subset$Window_start, 1L),window_start)
}
Chr10_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr10_unified_windows_99$Chromosome <- "Chr10"
Chr10_unified_windows_99$Window_size <- Chr10_unified_windows_99$window_end - Chr10_unified_windows_99$window_start
#Chr11
quant_90 <- quantile(table11_subset$Fd, 0.90)
quant_99 <- quantile(table11_subset$Fd, 0.99)
table11_subset$YesOrNo_90 <- ifelse(table11_subset$Fd >= quant_90, "Y","N")
table11_subset$YesOrNo_99 <- ifelse(table11_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table11_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table11_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table11_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table11_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table11_subset$Window_start, 1L),window_start)
}
Chr11_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr11_unified_windows_90$Chromosome <- "Chr11"
Chr11_unified_windows_90$Window_size <- Chr11_unified_windows_90$window_end - Chr11_unified_windows_90$window_start
YesOrNo <- paste(table11_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table11_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table11_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table11_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table11_subset$Window_start, 1L),window_start)
}
Chr11_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr11_unified_windows_99$Chromosome <- "Chr11"
Chr11_unified_windows_99$Window_size <- Chr11_unified_windows_99$window_end - Chr11_unified_windows_99$window_start
#Chr12
quant_90 <- quantile(table12_subset$Fd, 0.90)
quant_99 <- quantile(table12_subset$Fd, 0.99)
table12_subset$YesOrNo_90 <- ifelse(table12_subset$Fd >= quant_90, "Y","N")
table12_subset$YesOrNo_99 <- ifelse(table12_subset$Fd >= quant_99, "Y","N")
YesOrNo <- paste(table12_subset$YesOrNo_90, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table12_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table12_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table12_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table12_subset$Window_start, 1L),window_start)
}
Chr12_unified_windows_90 <- cbind.data.frame(window_start,window_end)
Chr12_unified_windows_90$Chromosome <- "Chr12"
Chr12_unified_windows_90$Window_size <- Chr12_unified_windows_90$window_end - Chr12_unified_windows_90$window_start
YesOrNo <- paste(table12_subset$YesOrNo_99, collapse = "")
starts <- gregexpr("NY",YesOrNo)
stops <- gregexpr("YN",YesOrNo)
window_start <- c()
for (i in starts[[1]]){
window_start <- append(window_start, table12_subset$Window_start[i+1])
}
window_end <- c()
for (i in stops[[1]]){
window_end <- append(window_end, table12_subset$Window_end[i])
}
if (length(window_start) > length(window_end)) {
window_end[length(window_start)] <- tail(table12_subset$Window_end, 1L)
}
if (length(window_start) < length(window_end)) {
window_start <- c(head(table12_subset$Window_start, 1L),window_start)
}
Chr12_unified_windows_99 <- cbind.data.frame(window_start,window_end)
Chr12_unified_windows_99$Chromosome <- "Chr12"
Chr12_unified_windows_99$Window_size <- Chr12_unified_windows_99$window_end - Chr12_unified_windows_99$window_start
Unified_windows_90 <- bind_rows(Chr1_unified_windows_90,Chr2_unified_windows_90,Chr3_unified_windows_90,Chr4_unified_windows_90,Chr5_unified_windows_90,Chr6_unified_windows_90,Chr7_unified_windows_90,Chr8_unified_windows_90,Chr9_unified_windows_90,Chr10_unified_windows_90,Chr11_unified_windows_90,Chr12_unified_windows_90)
Unified_windows_99 <- bind_rows(Chr1_unified_windows_99,Chr2_unified_windows_99,Chr3_unified_windows_99,Chr4_unified_windows_99,Chr5_unified_windows_99,Chr6_unified_windows_99,Chr7_unified_windows_99,Chr8_unified_windows_99,Chr9_unified_windows_99,Chr10_unified_windows_99,Chr11_unified_windows_99,Chr12_unified_windows_99)
fwrite(Unified_windows_90, file = paste0(inputfile[2],"_50SNP_UnifiedWindows_10percent.csv"))
fwrite(Unified_windows_99, file = paste0(inputfile[2],"_50SNP_UnifiedWindows_1percent.csv"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deepboost.R
\name{deepboost.formula}
\alias{deepboost.formula}
\title{Main function for deepboost model creation, using a formula}
\usage{
deepboost.formula(formula, data, instance_weights = NULL, tree_depth = 5,
num_iter = 1, beta = 0, lambda = 0.05, loss_type = "l",
verbose = TRUE)
}
\arguments{
\item{formula}{A R Formula object see : ?formula}
\item{data}{A data.frame of samples to train on}
\item{instance_weights}{The weight of each example}
\item{tree_depth}{maximum depth for a single decision tree in the model}
\item{num_iter}{number of iterations = number of trees in ensemble}
\item{beta}{regularisation for scores (L1)}
\item{lambda}{regularisation for tree depth}
\item{loss_type}{- "l" logistic, "e" exponential}
\item{verbose}{- print extra data while training TRUE / FALSE}
}
\value{
A trained Deepbost model
}
\description{
Main function for deepboost model creation, using a formula
}
\examples{
deepboost.formula(y ~ .,
data.frame(x1=rep(c(0,0,1,1),2),x2=rep(c(0,1,0,1),2),y=factor(rep(c(0,0,0,1),2))),
num_iter=1)
deepboost.formula(y ~ .,
data.frame(x1=rep(c(0,0,1,1),2),x2=rep(c(0,1,0,1),2),y=factor(rep(c(0,0,0,1),2))),
num_iter=2, beta=0.1, lambda=0.00125)
}
| /man/deepboost.formula.Rd | no_license | wangg12/CRAN_deepboost | R | false | true | 1,280 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deepboost.R
\name{deepboost.formula}
\alias{deepboost.formula}
\title{Main function for deepboost model creation, using a formula}
\usage{
deepboost.formula(formula, data, instance_weights = NULL, tree_depth = 5,
num_iter = 1, beta = 0, lambda = 0.05, loss_type = "l",
verbose = TRUE)
}
\arguments{
\item{formula}{A R Formula object see : ?formula}
\item{data}{A data.frame of samples to train on}
\item{instance_weights}{The weight of each example}
\item{tree_depth}{maximum depth for a single decision tree in the model}
\item{num_iter}{number of iterations = number of trees in ensemble}
\item{beta}{regularisation for scores (L1)}
\item{lambda}{regularisation for tree depth}
\item{loss_type}{- "l" logistic, "e" exponential}
\item{verbose}{- print extra data while training TRUE / FALSE}
}
\value{
A trained Deepbost model
}
\description{
Main function for deepboost model creation, using a formula
}
\examples{
deepboost.formula(y ~ .,
data.frame(x1=rep(c(0,0,1,1),2),x2=rep(c(0,1,0,1),2),y=factor(rep(c(0,0,0,1),2))),
num_iter=1)
deepboost.formula(y ~ .,
data.frame(x1=rep(c(0,0,1,1),2),x2=rep(c(0,1,0,1),2),y=factor(rep(c(0,0,0,1),2))),
num_iter=2, beta=0.1, lambda=0.00125)
}
|
############################################################################################
#Create one R script called run_analysis.R that does the following.
# 1) Merges the training and the test sets to create one data set.
# 2) Extracts only the measurements on the mean and standard deviation for each measurement.
# 3) Uses descriptive activity names to name the activities in the data set
# 4) Appropriately labels the data set with descriptive variable names.
# 5) From the data set in step 4, creates a second, independent tidy data set with the
# average of each variable for each activity and each subject.
############################################################################################
library(reshape2)
# Give a name for the file we want to download
filename <- "UCI_HAR_Dataset.zip"
# Download and unzip the dataset.
# Check to see if the file already exists, if not, download it
if (!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip "
download.file(fileURL, filename, method="curl")
}
# Check to see if the directory already exists, if not, create it and unzip file
if (!file.exists("data")) {
unzip(filename, exdir = "./data")
}
# Load activity labels file into a variable
activityLabels <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
activityLabels[,2] <- as.character(activityLabels[,2])
#
features <- read.table("./data/UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
# Make meaningful names for the mean and std observations.
# Only these observations will be used in selecting the observations from the
# test and train "X" files
meanStdDevCols <- grep(".*mean.*|.*std.*", features[,2])
meanStdDevColsNames <- features[meanStdDevCols,2]
meanStdDevColsNames = gsub('-mean', 'Mean', meanStdDevColsNames)
meanStdDevColsNames = gsub('-std', 'StdDev', meanStdDevColsNames)
meanStdDevColsNames = gsub('[-()]', '', meanStdDevColsNames)
# Load into tables the datasets in train and test directories for Data, Subjects, and Activities
# Only read into the observations dataset the rows we need (mean and std)
trainData <- read.table("./data/UCI HAR Dataset/train/X_train.txt")[meanStdDevCols]
trainActivities <- read.table("./data/UCI HAR Dataset/train/Y_train.txt")
trainSubjects <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
trainCombined <- cbind(trainData, trainSubjects, trainActivities)
testData <- read.table("./data/UCI HAR Dataset/test/X_test.txt")[meanStdDevCols]
testActivities <- read.table("./data/UCI HAR Dataset/test/Y_test.txt")
testSubjects <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
testCombined <- cbind(testData, testSubjects, testActivities)
# merge test and train datasets and add labels to the merged dataset
mergedData <- rbind(trainCombined, testCombined)
colnames(mergedData) <- c(meanStdDevColsNames, "subject", "activity")
# turn activities & subjects into factors
mergedData$activity <- factor(mergedData$activity, levels = activityLabels[,1], labels = activityLabels[,2])
mergedData$subject <- as.factor(mergedData$subject)
#melt data frame to subject and activity id variables
meltedData <- melt(mergedData, id = c("subject", "activity"))
#create data frame from the melted data frame
meanData <- dcast(meltedData, subject + activity ~ variable, mean)
# write out the new tidy dataset consisting of average values of the selected mean and std. deviation columns
write.table(meanData, "tidyMeanData.txt", row.names = FALSE, quote = FALSE)
| /run_analysis.R | no_license | tkramer7/GettingAndCleaningData | R | false | false | 3,578 | r |
############################################################################################
#Create one R script called run_analysis.R that does the following.
# 1) Merges the training and the test sets to create one data set.
# 2) Extracts only the measurements on the mean and standard deviation for each measurement.
# 3) Uses descriptive activity names to name the activities in the data set
# 4) Appropriately labels the data set with descriptive variable names.
# 5) From the data set in step 4, creates a second, independent tidy data set with the
# average of each variable for each activity and each subject.
############################################################################################
library(reshape2)
# Give a name for the file we want to download
filename <- "UCI_HAR_Dataset.zip"
# Download and unzip the dataset.
# Check to see if the file already exists, if not, download it
if (!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip "
download.file(fileURL, filename, method="curl")
}
# Check to see if the directory already exists, if not, create it and unzip file
if (!file.exists("data")) {
unzip(filename, exdir = "./data")
}
# Load activity labels file into a variable
activityLabels <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
activityLabels[,2] <- as.character(activityLabels[,2])
#
features <- read.table("./data/UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
# Make meaningful names for the mean and std observations.
# Only these observations will be used in selecting the observations from the
# test and train "X" files
meanStdDevCols <- grep(".*mean.*|.*std.*", features[,2])
meanStdDevColsNames <- features[meanStdDevCols,2]
meanStdDevColsNames = gsub('-mean', 'Mean', meanStdDevColsNames)
meanStdDevColsNames = gsub('-std', 'StdDev', meanStdDevColsNames)
meanStdDevColsNames = gsub('[-()]', '', meanStdDevColsNames)
# Load into tables the datasets in train and test directories for Data, Subjects, and Activities
# Only read into the observations dataset the rows we need (mean and std)
trainData <- read.table("./data/UCI HAR Dataset/train/X_train.txt")[meanStdDevCols]
trainActivities <- read.table("./data/UCI HAR Dataset/train/Y_train.txt")
trainSubjects <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
trainCombined <- cbind(trainData, trainSubjects, trainActivities)
testData <- read.table("./data/UCI HAR Dataset/test/X_test.txt")[meanStdDevCols]
testActivities <- read.table("./data/UCI HAR Dataset/test/Y_test.txt")
testSubjects <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
testCombined <- cbind(testData, testSubjects, testActivities)
# merge test and train datasets and add labels to the merged dataset
mergedData <- rbind(trainCombined, testCombined)
colnames(mergedData) <- c(meanStdDevColsNames, "subject", "activity")
# turn activities & subjects into factors
mergedData$activity <- factor(mergedData$activity, levels = activityLabels[,1], labels = activityLabels[,2])
mergedData$subject <- as.factor(mergedData$subject)
#melt data frame to subject and activity id variables
meltedData <- melt(mergedData, id = c("subject", "activity"))
#create data frame from the melted data frame
meanData <- dcast(meltedData, subject + activity ~ variable, mean)
# write out the new tidy dataset consisting of average values of the selected mean and std. deviation columns
write.table(meanData, "tidyMeanData.txt", row.names = FALSE, quote = FALSE)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.