content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
c1 <- NULL;
c2 <- NULL;
TT <- 75:100;
TT <- TT / 100;
TT <- TT * 2 * 3.142;
x0 <- 5.0;
y0 <- 5.0;
r0 <- 2.5;
x1 <- 4.75;
y1 <- 4.75;
r1 <- 1.5;
for (t in TT)
{
x <- r0 * cos(t) + x0;
y <- r0 * sin(t) + y0;
c1 <- rbind(c1,c(x,y));
x <- r1 * cos(t) + x1;
y <- r1 * sin(t) + y1;
c2 <- rbind(c2,c(x,y));
}
#print(c2);
bc <- rbind(c1,c2);
clusters <- hclust(dist(bc),method="single");
clusters <- cutree(clusters,2);
print(clusters);
plot(bc[,1],bc[,2], col = clusters);
#plot(bc[,1],bc[,2]);
#Fit circle one...
xbar <- mean(c2[,1]);
ybar <- mean(c2[,2]);
u <- c2[,1] - xbar;
v <- c2[,2] - ybar;
N <- length(v);
suu <- 0
suv <- 0;
svv <- 0;
suuu <- 0;
svvv <- 0;
suvv <- 0;
svuu <- 0;
for (i in 1:N)
{
ui <- u[i];
vi <- v[i];
suu <- suu + ui*ui;
suv <- suv + ui*vi;
svv <- svv + vi*vi;
suuu <- suuu + ui*ui*ui;
svvv <- svvv + vi*vi*vi;
suvv <- suvv + ui*vi*vi;
svuu <- svuu + vi*ui*ui;
}
A <- matrix(nrow=2,ncol=2,0);
A[1,1] <- suu;
A[1,2] <- suv;
A[2,1] <- suv;
A[2,2] <- svv;
b <- matrix(nrow=2,ncol=1,0);
b[1,1] <- 0.5*(suuu + suvv);
b[2,1] <- 0.5*(svvv + svuu);
z <- solve(A)%*%b;
uc <- z[1,1];
vc <- z[2,1];
xc <- uc + xbar;
yc <- vc + ybar;
radius <- sqrt(uc*uc + vc*vc + (suu+svv)/N);
print(xc);
print(yc);
print(radius);
data("iris")
| /RIGA_AI_Project/Luis.r | no_license | arenzo97/RIGA_AI_Project | R | false | false | 1,284 | r | c1 <- NULL;
c2 <- NULL;
TT <- 75:100;
TT <- TT / 100;
TT <- TT * 2 * 3.142;
x0 <- 5.0;
y0 <- 5.0;
r0 <- 2.5;
x1 <- 4.75;
y1 <- 4.75;
r1 <- 1.5;
for (t in TT)
{
x <- r0 * cos(t) + x0;
y <- r0 * sin(t) + y0;
c1 <- rbind(c1,c(x,y));
x <- r1 * cos(t) + x1;
y <- r1 * sin(t) + y1;
c2 <- rbind(c2,c(x,y));
}
#print(c2);
bc <- rbind(c1,c2);
clusters <- hclust(dist(bc),method="single");
clusters <- cutree(clusters,2);
print(clusters);
plot(bc[,1],bc[,2], col = clusters);
#plot(bc[,1],bc[,2]);
#Fit circle one...
xbar <- mean(c2[,1]);
ybar <- mean(c2[,2]);
u <- c2[,1] - xbar;
v <- c2[,2] - ybar;
N <- length(v);
suu <- 0
suv <- 0;
svv <- 0;
suuu <- 0;
svvv <- 0;
suvv <- 0;
svuu <- 0;
for (i in 1:N)
{
ui <- u[i];
vi <- v[i];
suu <- suu + ui*ui;
suv <- suv + ui*vi;
svv <- svv + vi*vi;
suuu <- suuu + ui*ui*ui;
svvv <- svvv + vi*vi*vi;
suvv <- suvv + ui*vi*vi;
svuu <- svuu + vi*ui*ui;
}
A <- matrix(nrow=2,ncol=2,0);
A[1,1] <- suu;
A[1,2] <- suv;
A[2,1] <- suv;
A[2,2] <- svv;
b <- matrix(nrow=2,ncol=1,0);
b[1,1] <- 0.5*(suuu + suvv);
b[2,1] <- 0.5*(svvv + svuu);
z <- solve(A)%*%b;
uc <- z[1,1];
vc <- z[2,1];
xc <- uc + xbar;
yc <- vc + ybar;
radius <- sqrt(uc*uc + vc*vc + (suu+svv)/N);
print(xc);
print(yc);
print(radius);
data("iris")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/buildKrigingDACE.R
\name{corrcubic}
\alias{corrcubic}
\title{Correlation: Cubic}
\usage{
corrcubic(theta, d, ret = "all")
}
\arguments{
\item{theta}{parameters in the correlation function}
\item{d}{m*n matrix with differences between given data points}
\item{ret}{A string. If set to \code{"all"} or \code{"dr"}, the derivative of \code{r} (\code{dr}) will be returned, else \code{dr} is \code{NA}.}
}
\value{
returns a list with two elements:
\item{\code{r}}{correlation}
\item{\code{dr}}{m*n matrix with the Jacobian of \code{r} at \code{x}. It is
assumed that \code{x} is given implicitly by \code{d[i,] = x - S[i,]},
where \code{S[i,]} is the \code{i}'th design site.}
}
\description{
Cubic correlation function.\cr
If \code{length(theta) = 1}, then the model is isotropic:\cr
all \code{theta_j = theta}.
}
\seealso{
\code{\link{buildKrigingDACE}}
}
\author{
The authors of the original DACE Matlab code \
are Hans Bruun Nielsen, Soren Nymand Lophaven and Jacob Sondergaard. \cr
Ported to R by Martin Zaefferer \email{martin.zaefferer@fh-koeln.de}.
}
\keyword{internal}
| /man/corrcubic.Rd | no_license | bartzbeielstein/SPOT | R | false | true | 1,181 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/buildKrigingDACE.R
\name{corrcubic}
\alias{corrcubic}
\title{Correlation: Cubic}
\usage{
corrcubic(theta, d, ret = "all")
}
\arguments{
\item{theta}{parameters in the correlation function}
\item{d}{m*n matrix with differences between given data points}
\item{ret}{A string. If set to \code{"all"} or \code{"dr"}, the derivative of \code{r} (\code{dr}) will be returned, else \code{dr} is \code{NA}.}
}
\value{
returns a list with two elements:
\item{\code{r}}{correlation}
\item{\code{dr}}{m*n matrix with the Jacobian of \code{r} at \code{x}. It is
assumed that \code{x} is given implicitly by \code{d[i,] = x - S[i,]},
where \code{S[i,]} is the \code{i}'th design site.}
}
\description{
Cubic correlation function.\cr
If \code{length(theta) = 1}, then the model is isotropic:\cr
all \code{theta_j = theta}.
}
\seealso{
\code{\link{buildKrigingDACE}}
}
\author{
The authors of the original DACE Matlab code \
are Hans Bruun Nielsen, Soren Nymand Lophaven and Jacob Sondergaard. \cr
Ported to R by Martin Zaefferer \email{martin.zaefferer@fh-koeln.de}.
}
\keyword{internal}
|
library(tm)
#setwd("C:/R_Dat/capstone/shiny_app")
c<-load("trigram.RData")
d<-load("trigram_DE.RData")
text_manipulation <- function(input,select) {
if (select=="2") {
a<-d
} else {
a<-c
}
###################################
# Processing input for prediction #
###################################
#print(select)
### clean the input from special characters
### ----------------------------------------
input <- gsub("[^A-Za-z ]","",input)
input <- tolower(input)
### compose input for prediction
### ----------------------------------------
input <- strsplit(input, " ") # split into words
input <- unlist(input) # extract words out of list into vector
input <- rev(input) # put in reverse order; need last words to predict
input3 <- paste(input[3],input[2],input[1],sep = ' ') # compose last 3 words for trigram
input2 <- paste(input[2],input[1],sep = ' ') # compose last 2 words for bigram
input1 <- input[1] # get last word
### predict
### ----------------------------------------
index2 <-grepl(paste0("^",input1,"$"),gram2$input) # index of entry, if bigram exists
index3 <-grepl(paste0("^",input2,"$"),gram3$input) # index of entry, if trigram exists
if(sum(index3) > 0 ) # if trigram exists, then do:
{
pred_word_bi <-gram2[index2,] # get row of bigram
pred_word_tri <-gram3[index3,] # get row of trigram
if((pred_word_bi$s*0.4) > pred_word_tri$s) # if PROB(.4*bigram) > PROB(trigram) (stupid backoff) then do:
{
return(pred_word_bi$output) # output prediction of bigram
}
else # else:
{
return(pred_word_tri$output) # output prediction of trigram
}
}
else # if no trigram exists then do:
{
if(sum(index2) > 0) # if bigram exists then do:
{
pred_word_bi <-gram2[index2,] # get row of bigram
return(pred_word_bi$output) # output prediction of bigram
}
else # if no bigram exists then do
{
return(gram1[1]$unigram) # output prediction of word
}
}
}
shinyServer(
function(input, output) {
output$language <- renderPrint({input$select})
output$inputValue <- renderPrint({input$input})
output$manipulated <- renderPrint({text_manipulation(input$input,input$select)})
}
)
| /bilang/server.R | no_license | wallyholly/word_pred_app | R | false | false | 2,834 | r | library(tm)
#setwd("C:/R_Dat/capstone/shiny_app")
c<-load("trigram.RData")
d<-load("trigram_DE.RData")
text_manipulation <- function(input,select) {
if (select=="2") {
a<-d
} else {
a<-c
}
###################################
# Processing input for prediction #
###################################
#print(select)
### clean the input from special characters
### ----------------------------------------
input <- gsub("[^A-Za-z ]","",input)
input <- tolower(input)
### compose input for prediction
### ----------------------------------------
input <- strsplit(input, " ") # split into words
input <- unlist(input) # extract words out of list into vector
input <- rev(input) # put in reverse order; need last words to predict
input3 <- paste(input[3],input[2],input[1],sep = ' ') # compose last 3 words for trigram
input2 <- paste(input[2],input[1],sep = ' ') # compose last 2 words for bigram
input1 <- input[1] # get last word
### predict
### ----------------------------------------
index2 <-grepl(paste0("^",input1,"$"),gram2$input) # index of entry, if bigram exists
index3 <-grepl(paste0("^",input2,"$"),gram3$input) # index of entry, if trigram exists
if(sum(index3) > 0 ) # if trigram exists, then do:
{
pred_word_bi <-gram2[index2,] # get row of bigram
pred_word_tri <-gram3[index3,] # get row of trigram
if((pred_word_bi$s*0.4) > pred_word_tri$s) # if PROB(.4*bigram) > PROB(trigram) (stupid backoff) then do:
{
return(pred_word_bi$output) # output prediction of bigram
}
else # else:
{
return(pred_word_tri$output) # output prediction of trigram
}
}
else # if no trigram exists then do:
{
if(sum(index2) > 0) # if bigram exists then do:
{
pred_word_bi <-gram2[index2,] # get row of bigram
return(pred_word_bi$output) # output prediction of bigram
}
else # if no bigram exists then do
{
return(gram1[1]$unigram) # output prediction of word
}
}
}
shinyServer(
function(input, output) {
output$language <- renderPrint({input$select})
output$inputValue <- renderPrint({input$input})
output$manipulated <- renderPrint({text_manipulation(input$input,input$select)})
}
)
|
# Make a layout file for experiment
source.code.path.file<-'/Users/anew/Google Drive/002_lehnerlabData/RCustomFuctions/CustomFunctions.R'
head.dir<-"/Users/anew/Desktop/17092x-CopyNumberExperiment"
plates.dir<-paste(head.dir,'/ExperimentalPlates',sep='')
figout<-'./figure.output/'
outputdata<-'./output.data/'
layoutdir<-'./layout.sample.data/'
date<-'170927' # beginning date clones were measured
setwd(head.dir)
br<-seq(-0.5,2.7,length=61)
###################################################
### custom functions for analysis
###################################################
source(source.code.path.file)
load.all() # this function loads all the standard packages you use in this experiment
###################################################
### custom functions for analysis
###################################################
a<-fread(paste0(layoutdir,'17092x-SamplingData.txt'))
a[,rc:=unlist(sapply(samp_id,function(x)strsplit(x,'\\.')[[1]][2]))]
a[,c('r','c','rc','measurement.date'):= data.table(t(a[,unlist(sapply(samp_id,function(x){
rc<-strsplit(x,'\\.')[[1]][2]
md<-strsplit(x,'-')[[1]][1]
len<-nchar(rc)
if(len==2){
r<-substr(rc,1,1)
c<-substr(rc,2,2)
}
if(len==3){
r<-substr(rc,1,1)
c<-substr(rc,2,3)
}
c(r,c,rc,md)
}))]))
]
# run below to see the problems with sample id's in this set
# plateplot(a,'sample_flow_rate')+facet_grid(~plate.num)
# OK it's clear that the xml files had some problems. Plates 009 - 016 all have the correct data, and from my notes I know that the sampling rate was 0.5 µl per second.
b<-fread(paste0(layoutdir,'1707xx-17092x-LayoutByHand.txt'))[!is.na(genotype.strain)]
asub<-a[plate.num%in%c('Plate_009','Plate_010','Plate_011','Plate_012','Plate_013','Plate_014','Plate_015','Plate_016'),.(sample_flow_rate,samp_id)]
asub[,samp_id:=gsub('170927','170928',samp_id)]
DTm<-merge(asub,b,by='samp_id',all=T)
DTm[is.na(sample_flow_rate),sample_flow_rate:=0.5]
layout<-DTm[,.(
samp_id,
sample_flow_rate,
gal,
glu,
plate,
source.plate,
r,
c,
rc,
genotype.strain,
genotype.plasmid,
genotype.strain1,
strain.arb,
delta.GAL3,
delta.GAL80,
delta.GAL4,
copies.GAL3.genome,
copies.GAL80.genome,
copies.GAL4.genome,
tx.mix.arb,
pRS415,
copies.GAL3.plasmid,
copies.GAL80.plasmid,
copies.GAL4.plasmid,
total.copies.GAL3,
total.copies.GAL80,
total.copies.GAL4
)]
layout[,plasgeno:=applyPaste(
data.frame( copies.GAL3.plasmid,
copies.GAL80.plasmid,
copies.GAL4.plasmid,
copies.GAL3.genome,
copies.GAL80.genome,
copies.GAL4.genome),' '
)]
layout[,plasmidcopies:=applyPaste(
data.frame( copies.GAL3.plasmid,
copies.GAL80.plasmid,
copies.GAL4.plasmid),' '
)]
layout[,genomecopies:=applyPaste(
data.frame(
copies.GAL3.genome,
copies.GAL80.genome,
copies.GAL4.genome),' '
)]
layout[,totalcopies:=applyPaste(data.frame( total.copies.GAL3,
total.copies.GAL80,
total.copies.GAL4),' '
)]
layout[,binarycount:=applyPaste(data.frame( as.numeric(total.copies.GAL3>0),
as.numeric(total.copies.GAL80>0),
as.numeric(total.copies.GAL4>0)),' '
)]
layout[,measurement.date:=unlist(sapply(samp_id,function(x)as.numeric(strsplit(x,'-')[[1]][1])))]
layout[measurement.date=='170927',c('gal','glu'):=list(0,1)]
layout[measurement.date=='170928',c('gal','glu'):=list(1,0)]
write.table(layout,paste0(layoutdir,'17092x-layout.txt'),row.names=F,sep='\t')
fread(paste0(layoutdir,'17092x-layout.txt'))
| /Experiment1/R.functions/17092x-LayoutMaker.R | no_license | lehner-lab/HarmoniousCombinations | R | false | false | 3,398 | r | # Make a layout file for experiment
source.code.path.file<-'/Users/anew/Google Drive/002_lehnerlabData/RCustomFuctions/CustomFunctions.R'
head.dir<-"/Users/anew/Desktop/17092x-CopyNumberExperiment"
plates.dir<-paste(head.dir,'/ExperimentalPlates',sep='')
figout<-'./figure.output/'
outputdata<-'./output.data/'
layoutdir<-'./layout.sample.data/'
date<-'170927' # beginning date clones were measured
setwd(head.dir)
br<-seq(-0.5,2.7,length=61)
###################################################
### custom functions for analysis
###################################################
source(source.code.path.file)
load.all() # this function loads all the standard packages you use in this experiment
###################################################
### custom functions for analysis
###################################################
a<-fread(paste0(layoutdir,'17092x-SamplingData.txt'))
a[,rc:=unlist(sapply(samp_id,function(x)strsplit(x,'\\.')[[1]][2]))]
a[,c('r','c','rc','measurement.date'):= data.table(t(a[,unlist(sapply(samp_id,function(x){
rc<-strsplit(x,'\\.')[[1]][2]
md<-strsplit(x,'-')[[1]][1]
len<-nchar(rc)
if(len==2){
r<-substr(rc,1,1)
c<-substr(rc,2,2)
}
if(len==3){
r<-substr(rc,1,1)
c<-substr(rc,2,3)
}
c(r,c,rc,md)
}))]))
]
# run below to see the problems with sample id's in this set
# plateplot(a,'sample_flow_rate')+facet_grid(~plate.num)
# OK it's clear that the xml files had some problems. Plates 009 - 016 all have the correct data, and from my notes I know that the sampling rate was 0.5 µl per second.
b<-fread(paste0(layoutdir,'1707xx-17092x-LayoutByHand.txt'))[!is.na(genotype.strain)]
asub<-a[plate.num%in%c('Plate_009','Plate_010','Plate_011','Plate_012','Plate_013','Plate_014','Plate_015','Plate_016'),.(sample_flow_rate,samp_id)]
asub[,samp_id:=gsub('170927','170928',samp_id)]
DTm<-merge(asub,b,by='samp_id',all=T)
DTm[is.na(sample_flow_rate),sample_flow_rate:=0.5]
layout<-DTm[,.(
samp_id,
sample_flow_rate,
gal,
glu,
plate,
source.plate,
r,
c,
rc,
genotype.strain,
genotype.plasmid,
genotype.strain1,
strain.arb,
delta.GAL3,
delta.GAL80,
delta.GAL4,
copies.GAL3.genome,
copies.GAL80.genome,
copies.GAL4.genome,
tx.mix.arb,
pRS415,
copies.GAL3.plasmid,
copies.GAL80.plasmid,
copies.GAL4.plasmid,
total.copies.GAL3,
total.copies.GAL80,
total.copies.GAL4
)]
layout[,plasgeno:=applyPaste(
data.frame( copies.GAL3.plasmid,
copies.GAL80.plasmid,
copies.GAL4.plasmid,
copies.GAL3.genome,
copies.GAL80.genome,
copies.GAL4.genome),' '
)]
layout[,plasmidcopies:=applyPaste(
data.frame( copies.GAL3.plasmid,
copies.GAL80.plasmid,
copies.GAL4.plasmid),' '
)]
layout[,genomecopies:=applyPaste(
data.frame(
copies.GAL3.genome,
copies.GAL80.genome,
copies.GAL4.genome),' '
)]
layout[,totalcopies:=applyPaste(data.frame( total.copies.GAL3,
total.copies.GAL80,
total.copies.GAL4),' '
)]
layout[,binarycount:=applyPaste(data.frame( as.numeric(total.copies.GAL3>0),
as.numeric(total.copies.GAL80>0),
as.numeric(total.copies.GAL4>0)),' '
)]
layout[,measurement.date:=unlist(sapply(samp_id,function(x)as.numeric(strsplit(x,'-')[[1]][1])))]
layout[measurement.date=='170927',c('gal','glu'):=list(0,1)]
layout[measurement.date=='170928',c('gal','glu'):=list(1,0)]
write.table(layout,paste0(layoutdir,'17092x-layout.txt'),row.names=F,sep='\t')
fread(paste0(layoutdir,'17092x-layout.txt'))
|
all.species.ids <- read.table("200512_001_Nematode species internal ids_BioMart.txt", header = TRUE)
#reads a table containing all species' bioMart internal ids in alphabetical order (of species, not of ids)
num.species <- length(all.species.ids$species_id)
#counts total number of species in analysis
dataout <- data.frame("Species_ID" = rep(NA, num.species), "total_genes" = rep(NA, num.species))
#creates an empty data frame - CHANGE COLUMN TITLE HERE)
for (i in 1:num.species) {genes.temp <- getBM(mart = mart,
filters = "species_id_1010",
value = all.species.ids$species_id[i],
attributes = "wbps_gene_id")
dataout$Species_ID[i] <- gettext(all.species.ids$species_id[i])
#enters species ID in dataframe column
dataout$total_genes[i] <- length(genes.temp$wbps_gene_id)
#enters number of genes in dataframe column - CHANGE COLUMN TITLE HERE
rm(genes.temp)
}
head(dataout)
#prints the header of dataout
write.table(dataout, file = "200512_all_species_all_genes.txt", row.names = TRUE)
##CHANGE OUTPUT FILENAME HERE
rm(list = c("all.species.ids", "num.species", "dataout", "i")) | /200512_002_all_gene_all_species.R | no_license | hobertlab/NHR-GPCR_Sural_2021 | R | false | false | 1,449 | r | all.species.ids <- read.table("200512_001_Nematode species internal ids_BioMart.txt", header = TRUE)
#reads a table containing all species' bioMart internal ids in alphabetical order (of species, not of ids)
num.species <- length(all.species.ids$species_id)
#counts total number of species in analysis
dataout <- data.frame("Species_ID" = rep(NA, num.species), "total_genes" = rep(NA, num.species))
#creates an empty data frame - CHANGE COLUMN TITLE HERE)
for (i in 1:num.species) {genes.temp <- getBM(mart = mart,
filters = "species_id_1010",
value = all.species.ids$species_id[i],
attributes = "wbps_gene_id")
dataout$Species_ID[i] <- gettext(all.species.ids$species_id[i])
#enters species ID in dataframe column
dataout$total_genes[i] <- length(genes.temp$wbps_gene_id)
#enters number of genes in dataframe column - CHANGE COLUMN TITLE HERE
rm(genes.temp)
}
head(dataout)
#prints the header of dataout
write.table(dataout, file = "200512_all_species_all_genes.txt", row.names = TRUE)
##CHANGE OUTPUT FILENAME HERE
rm(list = c("all.species.ids", "num.species", "dataout", "i")) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dots.R
\name{dots_values}
\alias{dots_values}
\title{Evaluate dots with preliminary splicing}
\usage{
dots_values(..., .ignore_empty = c("trailing", "none", "all"),
.preserve_empty = FALSE, .homonyms = c("keep", "first", "last",
"error"), .check_assign = FALSE)
}
\arguments{
\item{...}{Arguments to evaluate and process splicing operators.}
\item{.ignore_empty}{Whether to ignore empty arguments. Can be one
of \code{"trailing"}, \code{"none"}, \code{"all"}. If \code{"trailing"}, only the
last argument is ignored if it is empty.}
\item{.preserve_empty}{Whether to preserve the empty arguments that
were not ignored. If \code{TRUE}, empty arguments are stored with
\code{\link[=missing_arg]{missing_arg()}} values. If \code{FALSE} (the default) an error is
thrown when an empty argument is detected.}
\item{.homonyms}{How to treat arguments with the same name. The
default, \code{"keep"}, preserves these arguments. Set \code{.homonyms} to
\code{"first"} to only keep the first occurrences, to \code{"last"} to keep
the last occurrences, and to \code{"error"} to raise an informative
error and indicate what arguments have duplicated names.}
\item{.check_assign}{Whether to check for \code{<-} calls passed in
dots. When \code{TRUE} and a \code{<-} call is detected, a warning is
issued to advise users to use \code{=} if they meant to match a
function parameter, or wrap the \code{<-} call in braces otherwise.
This ensures assignments are explicit.}
}
\description{
This is a tool for advanced users. It captures dots, processes
unquoting and splicing operators, and evaluates them. Unlike
\code{\link[=dots_list]{dots_list()}}, it does not flatten spliced objects, instead they
are attributed a \code{spliced} class (see \code{\link[=splice]{splice()}}). You can process
spliced objects manually, perhaps with a custom predicate (see
\code{\link[=flatten_if]{flatten_if()}}).
}
\examples{
dots <- dots_values(!!! list(1, 2), 3)
dots
# Flatten the objects marked as spliced:
flatten_if(dots, is_spliced)
}
\keyword{internal}
| /man/dots_values.Rd | no_license | TylerGrantSmith/rlang | R | false | true | 2,116 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dots.R
\name{dots_values}
\alias{dots_values}
\title{Evaluate dots with preliminary splicing}
\usage{
dots_values(..., .ignore_empty = c("trailing", "none", "all"),
.preserve_empty = FALSE, .homonyms = c("keep", "first", "last",
"error"), .check_assign = FALSE)
}
\arguments{
\item{...}{Arguments to evaluate and process splicing operators.}
\item{.ignore_empty}{Whether to ignore empty arguments. Can be one
of \code{"trailing"}, \code{"none"}, \code{"all"}. If \code{"trailing"}, only the
last argument is ignored if it is empty.}
\item{.preserve_empty}{Whether to preserve the empty arguments that
were not ignored. If \code{TRUE}, empty arguments are stored with
\code{\link[=missing_arg]{missing_arg()}} values. If \code{FALSE} (the default) an error is
thrown when an empty argument is detected.}
\item{.homonyms}{How to treat arguments with the same name. The
default, \code{"keep"}, preserves these arguments. Set \code{.homonyms} to
\code{"first"} to only keep the first occurrences, to \code{"last"} to keep
the last occurrences, and to \code{"error"} to raise an informative
error and indicate what arguments have duplicated names.}
\item{.check_assign}{Whether to check for \code{<-} calls passed in
dots. When \code{TRUE} and a \code{<-} call is detected, a warning is
issued to advise users to use \code{=} if they meant to match a
function parameter, or wrap the \code{<-} call in braces otherwise.
This ensures assignments are explicit.}
}
\description{
This is a tool for advanced users. It captures dots, processes
unquoting and splicing operators, and evaluates them. Unlike
\code{\link[=dots_list]{dots_list()}}, it does not flatten spliced objects, instead they
are attributed a \code{spliced} class (see \code{\link[=splice]{splice()}}). You can process
spliced objects manually, perhaps with a custom predicate (see
\code{\link[=flatten_if]{flatten_if()}}).
}
\examples{
dots <- dots_values(!!! list(1, 2), 3)
dots
# Flatten the objects marked as spliced:
flatten_if(dots, is_spliced)
}
\keyword{internal}
|
library(GenomicRanges)
library(GenomicAlignments)
library(BSgenome.Hsapiens.UCSC.hg19)
library(DeCarvalho)
library(dplyr)
library(Repitools)
allmainchrs <- paste("chr",1:22,sep="")
normal_cfmedip_sample_3_gr <- GRanges(readGAlignmentPairs"~/bam_directory/file.bam"))
normal_cfmedip_sample_3_gr <- normal_cfmedip_sample_3_gr[seqnames(normal_cfmedip_sample_3_gr) %in% allmainchrs]
normal_cfmedip_sample_3_widths <- width(normal_cfmedip_sample_3_gr)
normal_cfmedip_sample_3_widths <- table(normal_cfmedip_sample_3_widths)
normal_cfmedip_sample_3_mode <- normal_cfmedip_sample_3_widths[normal_cfmedip_sample_3_widths == max(normal_cfmedip_sample_3_widths)]
normal_cfmedip_sample_3_index <- as.numeric(normal_cfmedip_sample_3_mode)
normal_cfmedip_sample_3_mode <- as.numeric(names(normal_cfmedip_sample_3_mode))
normal_cfmedip_sample_3_mode_gr <- normal_cfmedip_sample_3_gr[width(normal_cfmedip_sample_3_gr) == normal_cfmedip_sample_3_mode]
normal_cfmedip_sample_3_mode_gr$CpG_count <- cpgDensityCalc(normal_cfmedip_sample_3_mode_gr, Hsapiens)
normal_cfmedip_sample_3_mode_gr_cpgprop <- table(normal_cfmedip_sample_3_mode_gr$CpG_count) / sum(table(normal_cfmedip_sample_3_mode_gr$CpG_count))
save(normal_cfmedip_sample_3_mode_gr_cpgprop, file="~/output/normal_cfmedip_sample_3/normal_cfmedip_sample_3_mode_gr_cpgprop.RData")
normal_cfmedip_sample_3_mode_gr$ID <- paste(seqnames(normal_cfmedip_sample_3_mode_gr), start(normal_cfmedip_sample_3_mode_gr), end(normal_cfmedip_sample_3_mode_gr), sep=".")
normal_cfmedip_sample_3_mode_df <- as.data.frame(normal_cfmedip_sample_3_mode_gr)
normal_cfmedip_sample_3_mode_df <- Epigenome.hg19(normal_cfmedip_sample_3_mode_df, is.CpG=T)
normal_cfmedip_sample_3_mode_df_output <- normal_cfmedip_sample_3_mode_df[,c('seqnames','start','end','cgi')]
colnames(normal_cfmedip_sample_3_mode_df_output) <- c('chr','start','end','cpg_annot')
save(normal_cfmedip_sample_3_mode_df_output, file="~/output/normal_cfmedip_sample_3/normal_cfmedip_sample_3_mode_df_R.RData")
chr_allmainchrs_167 <- genomeBlocks(seqlengths(Hsapiens)[allmainchrs],width=normal_cfmedip_sample_3_mode)
set.seed(0)
index <- sample(1:length(chr_allmainchrs_167), nrow(normal_cfmedip_sample_3_mode_df_output))
chr_allmainchrs_167 <- chr_allmainchrs_167[index]
chr_allmainchrs_167$ID <- paste(seqnames(chr_allmainchrs_167), start(chr_allmainchrs_167), end(chr_allmainchrs_167), sep=".")
chr_allmainchrs_df_167 <- as.data.frame(chr_allmainchrs_167)
chr_allmainchrs_df_167 <- Epigenome.hg19(chr_allmainchrs_df_167, is.CpG=T)
chr_allmainchrs_df_167_output <- chr_allmainchrs_df_167[,c('seqnames','start','end','cgi')]
colnames(chr_allmainchrs_df_167_output) <- c('chr','start','end','cpg_annot')
normal_cfmedip_sample_3_mode_df_obsvsexp <- table(normal_cfmedip_sample_3_mode_df_output$cpg_annot) / table(chr_allmainchrs_df_167_output$cpg_annot)
save(normal_cfmedip_sample_3_mode_df_obsvsexp, file="~/output/normal_cfmedip_sample_3/normal_cfmedip_sample_3_mode_df_obsvsexp.RData")
| /qsub/R_files/normal_cfmedip_sample_3_figure.R | no_license | bratmanlab/cfMeDIP_Protocol | R | false | false | 2,976 | r | library(GenomicRanges)
library(GenomicAlignments)
library(BSgenome.Hsapiens.UCSC.hg19)
library(DeCarvalho)
library(dplyr)
library(Repitools)
allmainchrs <- paste("chr",1:22,sep="")
normal_cfmedip_sample_3_gr <- GRanges(readGAlignmentPairs"~/bam_directory/file.bam"))
normal_cfmedip_sample_3_gr <- normal_cfmedip_sample_3_gr[seqnames(normal_cfmedip_sample_3_gr) %in% allmainchrs]
normal_cfmedip_sample_3_widths <- width(normal_cfmedip_sample_3_gr)
normal_cfmedip_sample_3_widths <- table(normal_cfmedip_sample_3_widths)
normal_cfmedip_sample_3_mode <- normal_cfmedip_sample_3_widths[normal_cfmedip_sample_3_widths == max(normal_cfmedip_sample_3_widths)]
normal_cfmedip_sample_3_index <- as.numeric(normal_cfmedip_sample_3_mode)
normal_cfmedip_sample_3_mode <- as.numeric(names(normal_cfmedip_sample_3_mode))
normal_cfmedip_sample_3_mode_gr <- normal_cfmedip_sample_3_gr[width(normal_cfmedip_sample_3_gr) == normal_cfmedip_sample_3_mode]
normal_cfmedip_sample_3_mode_gr$CpG_count <- cpgDensityCalc(normal_cfmedip_sample_3_mode_gr, Hsapiens)
normal_cfmedip_sample_3_mode_gr_cpgprop <- table(normal_cfmedip_sample_3_mode_gr$CpG_count) / sum(table(normal_cfmedip_sample_3_mode_gr$CpG_count))
save(normal_cfmedip_sample_3_mode_gr_cpgprop, file="~/output/normal_cfmedip_sample_3/normal_cfmedip_sample_3_mode_gr_cpgprop.RData")
normal_cfmedip_sample_3_mode_gr$ID <- paste(seqnames(normal_cfmedip_sample_3_mode_gr), start(normal_cfmedip_sample_3_mode_gr), end(normal_cfmedip_sample_3_mode_gr), sep=".")
normal_cfmedip_sample_3_mode_df <- as.data.frame(normal_cfmedip_sample_3_mode_gr)
normal_cfmedip_sample_3_mode_df <- Epigenome.hg19(normal_cfmedip_sample_3_mode_df, is.CpG=T)
normal_cfmedip_sample_3_mode_df_output <- normal_cfmedip_sample_3_mode_df[,c('seqnames','start','end','cgi')]
colnames(normal_cfmedip_sample_3_mode_df_output) <- c('chr','start','end','cpg_annot')
save(normal_cfmedip_sample_3_mode_df_output, file="~/output/normal_cfmedip_sample_3/normal_cfmedip_sample_3_mode_df_R.RData")
chr_allmainchrs_167 <- genomeBlocks(seqlengths(Hsapiens)[allmainchrs],width=normal_cfmedip_sample_3_mode)
set.seed(0)
index <- sample(1:length(chr_allmainchrs_167), nrow(normal_cfmedip_sample_3_mode_df_output))
chr_allmainchrs_167 <- chr_allmainchrs_167[index]
chr_allmainchrs_167$ID <- paste(seqnames(chr_allmainchrs_167), start(chr_allmainchrs_167), end(chr_allmainchrs_167), sep=".")
chr_allmainchrs_df_167 <- as.data.frame(chr_allmainchrs_167)
chr_allmainchrs_df_167 <- Epigenome.hg19(chr_allmainchrs_df_167, is.CpG=T)
chr_allmainchrs_df_167_output <- chr_allmainchrs_df_167[,c('seqnames','start','end','cgi')]
colnames(chr_allmainchrs_df_167_output) <- c('chr','start','end','cpg_annot')
normal_cfmedip_sample_3_mode_df_obsvsexp <- table(normal_cfmedip_sample_3_mode_df_output$cpg_annot) / table(chr_allmainchrs_df_167_output$cpg_annot)
save(normal_cfmedip_sample_3_mode_df_obsvsexp, file="~/output/normal_cfmedip_sample_3/normal_cfmedip_sample_3_mode_df_obsvsexp.RData")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exploratory_data_analysis.R
\name{df_status}
\alias{df_status}
\title{Get a summary for the given data frame (o vector).}
\usage{
df_status(data, print_results)
}
\arguments{
\item{data}{data frame or a single vector}
\item{print_results}{if FALSE then there is not a print in the console, TRUE by default.}
}
\value{
Metrics data frame
}
\description{
For each variable it returns: Quantity and percentage of zeros (q_zeros and p_zeros respectevly). Same metrics for NA values (q_NA/p_na), and infinite values (q_inf/p_inf). Last two columns indicates data type and quantity of unique values.
This function print and return the results.
}
\examples{
df_status(heart_disease)
}
| /man/df_status.Rd | permissive | pablo14/funModeling | R | false | true | 757 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exploratory_data_analysis.R
\name{df_status}
\alias{df_status}
\title{Get a summary for the given data frame (o vector).}
\usage{
df_status(data, print_results)
}
\arguments{
\item{data}{data frame or a single vector}
\item{print_results}{if FALSE then there is not a print in the console, TRUE by default.}
}
\value{
Metrics data frame
}
\description{
For each variable it returns: Quantity and percentage of zeros (q_zeros and p_zeros respectevly). Same metrics for NA values (q_NA/p_na), and infinite values (q_inf/p_inf). Last two columns indicates data type and quantity of unique values.
This function print and return the results.
}
\examples{
df_status(heart_disease)
}
|
Lib.PrimesCalc <- function(N = 100){
# N is the upper bound, i.e. find all primes less than N
Seq <- seq(1,N)
Primes <- c(2)
Test <- rep(0, N)
repeat{
Test <- Test + Seq %in% (max(Primes) * Seq)
p <- which(Test == 0)[2]
if(is.na(p)){break} else {Primes[[length(Primes) + 1]] <- p}
}
return(Primes)
} | /Lib_Primes.R | no_license | justinholland85/RLibrary | R | false | false | 372 | r |
Lib.PrimesCalc <- function(N = 100){
# N is the upper bound, i.e. find all primes less than N
Seq <- seq(1,N)
Primes <- c(2)
Test <- rep(0, N)
repeat{
Test <- Test + Seq %in% (max(Primes) * Seq)
p <- which(Test == 0)[2]
if(is.na(p)){break} else {Primes[[length(Primes) + 1]] <- p}
}
return(Primes)
} |
# ==============================================
# ------- Model output anlaysis: cover
# ==============================================
# Irob et al., 2021 ---------------------------
# Author of R script: Katja Irob (irob.k@fu-berlin.de)
# ==============================================
rm(list = ls()) # clears working environment
library(tidyverse)
options(dplyr.width = Inf) # enables head() to display all columns
library(grid)
library(gridExtra)
library(cowplot)
library(reshape2)
library(scales)
library(here)
source(here::here("R/Utility.R"))
# Read data and calculate mean cover per scenario and sub-pft for last 20 years
PFTcoverall <- readfiles(path = "Data/Results")
meanCover <- makeMeanCover(df = PFTcoverall)
# ===================================================
# ------- Plotting cover over time for all scenarios
# ===================================================
cover <- PFTcoverall[, c("year", "meanGtotalcover", "meanStotalcover", "meanAtotalcover", "scenario")]
cover <- melt(cover, id.vars = c("year", "scenario"))
cover$value <- cover$value * 100 # converting cover to percentage
cover$type <- ifelse(grepl("(meanGtotalcover)", cover$variable), "Perennial", ifelse(grepl("(meanStotalcover)", cover$variable), "Shrub", "Annual"))
cover <- cover %>%
group_by(scenario, year, type) %>%
summarise_at(vars(value), funs(mean, sd))
# renaming scenarios
cover$scenario <- as.character(cover$scenario)
cover$scenario[cover$scenario == "SR40graze"] <- "Grazing low"
cover$scenario[cover$scenario == "SR20graze"] <- "Grazing high"
#
cover$scenario[cover$scenario == "SR20browse"] <- "Browsing high"
cover$scenario[cover$scenario == "SR40browse"] <- "Browsing low"
cover$scenario <- factor(cover$scenario, levels = c("Grazing low", "Browsing low", "Grazing high", "Browsing high"))
# select colours for plotting
cols <- c("gold1", "seagreen", "coral")
scenario_list <- unique(cover$scenario)
plot_list <- list()
# creating a plot for every scenario and saving it in plot_list()
for (i in 1:length(scenario_list)) {
plot <- ggplot(
subset(cover, scenario == scenario_list[i]),
aes(x = year, y = mean, colour = type)
) +
geom_ribbon(aes(x = year, ymin = mean - sd, ymax = mean + sd), size = 0.5, fill = "lightgrey", alpha = 0.5) +
geom_line(size = 1.2) +
ylim(0, 100) +
xlab("Years") +
ylab(bquote("Cover [%]")) +
scale_colour_manual(values = cols) +
ggtitle(paste(scenario_list[i])) +
theme_set(theme_minimal()) +
theme(
axis.text.x = element_text(size = 12),
axis.text.y = element_text(size = 12),
axis.title.y = element_text(size = 14),
axis.title.x = element_text(size = 14),
legend.text = element_text(size = 12),
plot.title = element_text(size = 16, face = "bold"),
legend.direction = "horizontal", legend.position = "none", legend.title = element_blank(),
legend.background = element_blank(),
panel.grid.major = element_line(size = 0.2, linetype = "solid", colour = "gray"),
panel.background = element_blank()
) +
guides(col = guide_legend(nrow = 1, byrow = TRUE))
plotname <- paste0(gsub(" ", "_", scenario_list[i]), "_line") # rename plot according to scenario
plot_list[[plotname]] <- plot
}
#### Barplot of last 20 years ------------
cover <- meanCover
# bring sub-PFTs in desired order
cover$PFT <- factor(cover$PFT, levels = c("meanACover0", "meanSCover0", "meanSCover1", "meanSCover2", "meanSCover3", "meanSCover4", "meanSCover5", "meanSCover6", "meanSCover7", "meanSCover8", "meanSCover9", "meanSCover10", "meanGCover0", "meanGCover1", "meanGCover2", "meanGCover3", "meanGCover4", "meanGCover5", "meanGCover6", "meanGCover7", "meanGCover8"))
# rename scenarios
cover$scenario <- as.character(cover$scenario)
cover$scenario[cover$scenario == "SR40graze"] <- "Grazing low"
cover$scenario[cover$scenario == "SR20graze"] <- "Grazing high"
#
cover$scenario[cover$scenario == "SR20browse"] <- "Browsing high"
cover$scenario[cover$scenario == "SR40browse"] <- "Browsing low"
cover$scenario <- factor(cover$scenario, levels = c("Grazing low", "Browsing low", "Grazing high", "Browsing high"))
scenario_list <- unique(cover$scenario)
barplot_list <- list()
# create barplot of last 20 years for every land use scenario and save it in barplot_list()
for (i in 1:length(scenario_list)) {
cover$type <- factor(cover$type, levels = c("Shrub", "Perennial", "Annual"))
cols <- c("coral", "seagreen", "gold1")
survival20 <- ggplot(
subset(cover, scenario == scenario_list[i]),
aes(y = cover, x = scenario, fill = type)
) +
geom_col() +
ylim(0, 100) +
ylab(bquote("Mean cover")) +
scale_fill_manual(values = cols) +
theme_set(theme_minimal()) +
theme(
axis.text.x = element_blank(),
axis.text.y = element_text(size = 10),
axis.title.y = element_blank(),
axis.title.x = element_blank(),
legend.direction = "horizontal", legend.position = "none", legend.title = element_blank(),
legend.background = element_blank(),
panel.grid.major = element_line(size = 0.2, linetype = "solid", colour = "gray"),
panel.background = element_blank()
)
barplotname <- paste0(gsub(" ", "_", scenario_list[i]), "_bar")
barplot_list[[barplotname]] <- survival20
}
# Extract legend from this plot:
cattle_low_bar <- ggplot(
subset(cover, scenario %in% "Grazing low"),
aes(y = cover, x = scenario == "Grazing low", fill = type)
) +
geom_col(color = "whitesmoke", lwd = 0.3) +
ylim(0, 100) +
ylab(bquote("Mean cover")) +
scale_fill_manual(values = cols) +
theme_set(theme_minimal()) +
theme(
axis.text.x = element_blank(),
axis.text.y = element_text(size = 12),
axis.title.y = element_blank(),
axis.title.x = element_blank(),
legend.text = element_text(size = 16),
legend.direction = "horizontal", legend.position = "bottom", legend.title = element_blank(),
legend.background = element_blank(),
legend.spacing.x = unit(0.3, "cm"),
panel.grid.major = element_line(size = 0.2, linetype = "solid", colour = "gray"),
panel.background = element_blank()
)
cattle_low_bar
get_legend <- function(cattle_low_bar) {
tmp <- ggplot_gtable(ggplot_build(cattle_low_bar))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)
}
legend <- get_legend(cattle_low_bar)
# arrange all plots and legend in one plot ---------------------
coverplots <- plot_grid(plot_list$Grazing_low_line, barplot_list$Grazing_low_bar, plot_list$Browsing_low_line, barplot_list$Browsing_low_bar,
plot_list$Grazing_high_line, barplot_list$Grazing_high_bar, plot_list$Browsing_high_line, barplot_list$Browsing_high_bar,
ncol = 4, nrow = 2,
rel_widths = c(4, 1.5, 4, 1.5),
labels = c("a", "", "b", "", "c", "", "d", ""),
align = "h", axis = "bt"
)
cover_legend <- plot_grid(coverplots, legend, nrow = 2, rel_heights = c(1, 0.1))
ggsave(cover_legend, file="cover_combined_all_scenarios_revised.tiff", width = 32,
height = 20,
units = "cm", dpi=600)
##################################################
## STATS ------------------
##################################################
# cover -------------------------------
cover <- PFTcoverall[, c("year", "meanGtotalcover", "meanStotalcover", "meanAtotalcover", "scenario")]
cover$TotalCover <- cover$meanGtotalcover + cover$meanStotalcover + cover$meanAtotalcover # calculate total cover
cover <- melt(cover, id.vars = c("year", "scenario", "TotalCover"))
cover$value <- cover$value * 100 # convert to percentage
cover$type <- ifelse(grepl("(meanGtotalcover)", cover$variable), "Perennial", ifelse(grepl("(meanStotalcover)", cover$variable), "Shrub", "Annual")) # rename meta-PFT type
# calculate mean and median cover
meancover <- cover %>%
group_by(scenario, type) %>%
summarise_at(vars(value), funs(mean, sd))
mediancover <- cover %>%
group_by(scenario) %>%
summarise_at(vars(value), funs(median))
# create extra column for land use intensity
cover$intensity <- ifelse(grepl("(SR20)", cover$scenario), "high", "low")
cover$landuse <- ifelse(grepl("(browse)", cover$scenario), "Wildlife", "Cattle")
cover <- cover %>% filter(year > 79)
if (!require(rcompanion)) {
install.packages("rcompanion")
}
if (!require(FSA)) {
install.packages("FSA")
}
library(rcompanion)
library(FSA)
# non-parametric Scheirer-Ray-Hare test ---
scheirerRayHare(TotalCover ~ landuse + intensity, data = cover)
# H = 153.5, p < 0.001
# bring data in right format for post-hoc test and order by descending median
cover$landuse <- factor(cover$landuse, levels = c("Wildlife", "Cattle"))
cover$intensity <- factor(cover$intensity, levels = c("low", "high"))
# order by median from high to low
cover$scenario <- factor(cover$scenario, levels = c("SR40browse", "SR20browse", "SR20graze", "SR40graze"))
# post-hoc Dunn's test to look for differences between land use scenarios --------
DT <- dunnTest(TotalCover ~ scenario, data = cover, method = "bh")
DT # all scenarios differ significantly
# check significant differences
PT <- DT$res
cldList(P.adj ~ Comparison,
data = PT,
threshold = 0.05
) # letters indicating significance
## Effect size epsilon^2 ---
epsilonSquared(x = cover$TotalCover, g = cover$landuse)
# e^2 = 0.48
| /Analysis/Cover.R | no_license | Kutcha7/Irob_et_al | R | false | false | 9,614 | r | # ==============================================
# ------- Model output anlaysis: cover
# ==============================================
# Irob et al., 2021 ---------------------------
# Author of R script: Katja Irob (irob.k@fu-berlin.de)
# ==============================================
rm(list = ls()) # clears working environment
library(tidyverse)
options(dplyr.width = Inf) # enables head() to display all columns
library(grid)
library(gridExtra)
library(cowplot)
library(reshape2)
library(scales)
library(here)
source(here::here("R/Utility.R"))
# Read data and calculate mean cover per scenario and sub-pft for last 20 years
PFTcoverall <- readfiles(path = "Data/Results")
meanCover <- makeMeanCover(df = PFTcoverall)
# ===================================================
# ------- Plotting cover over time for all scenarios
# ===================================================
cover <- PFTcoverall[, c("year", "meanGtotalcover", "meanStotalcover", "meanAtotalcover", "scenario")]
cover <- melt(cover, id.vars = c("year", "scenario"))
cover$value <- cover$value * 100 # converting cover to percentage
cover$type <- ifelse(grepl("(meanGtotalcover)", cover$variable), "Perennial", ifelse(grepl("(meanStotalcover)", cover$variable), "Shrub", "Annual"))
cover <- cover %>%
group_by(scenario, year, type) %>%
summarise_at(vars(value), funs(mean, sd))
# renaming scenarios
cover$scenario <- as.character(cover$scenario)
cover$scenario[cover$scenario == "SR40graze"] <- "Grazing low"
cover$scenario[cover$scenario == "SR20graze"] <- "Grazing high"
#
cover$scenario[cover$scenario == "SR20browse"] <- "Browsing high"
cover$scenario[cover$scenario == "SR40browse"] <- "Browsing low"
cover$scenario <- factor(cover$scenario, levels = c("Grazing low", "Browsing low", "Grazing high", "Browsing high"))
# select colours for plotting
cols <- c("gold1", "seagreen", "coral")
scenario_list <- unique(cover$scenario)
plot_list <- list()
# creating a plot for every scenario and saving it in plot_list()
for (i in 1:length(scenario_list)) {
plot <- ggplot(
subset(cover, scenario == scenario_list[i]),
aes(x = year, y = mean, colour = type)
) +
geom_ribbon(aes(x = year, ymin = mean - sd, ymax = mean + sd), size = 0.5, fill = "lightgrey", alpha = 0.5) +
geom_line(size = 1.2) +
ylim(0, 100) +
xlab("Years") +
ylab(bquote("Cover [%]")) +
scale_colour_manual(values = cols) +
ggtitle(paste(scenario_list[i])) +
theme_set(theme_minimal()) +
theme(
axis.text.x = element_text(size = 12),
axis.text.y = element_text(size = 12),
axis.title.y = element_text(size = 14),
axis.title.x = element_text(size = 14),
legend.text = element_text(size = 12),
plot.title = element_text(size = 16, face = "bold"),
legend.direction = "horizontal", legend.position = "none", legend.title = element_blank(),
legend.background = element_blank(),
panel.grid.major = element_line(size = 0.2, linetype = "solid", colour = "gray"),
panel.background = element_blank()
) +
guides(col = guide_legend(nrow = 1, byrow = TRUE))
plotname <- paste0(gsub(" ", "_", scenario_list[i]), "_line") # rename plot according to scenario
plot_list[[plotname]] <- plot
}
#### Barplot of last 20 years ------------
cover <- meanCover
# bring sub-PFTs in desired order
cover$PFT <- factor(cover$PFT, levels = c("meanACover0", "meanSCover0", "meanSCover1", "meanSCover2", "meanSCover3", "meanSCover4", "meanSCover5", "meanSCover6", "meanSCover7", "meanSCover8", "meanSCover9", "meanSCover10", "meanGCover0", "meanGCover1", "meanGCover2", "meanGCover3", "meanGCover4", "meanGCover5", "meanGCover6", "meanGCover7", "meanGCover8"))
# rename scenarios
cover$scenario <- as.character(cover$scenario)
cover$scenario[cover$scenario == "SR40graze"] <- "Grazing low"
cover$scenario[cover$scenario == "SR20graze"] <- "Grazing high"
#
cover$scenario[cover$scenario == "SR20browse"] <- "Browsing high"
cover$scenario[cover$scenario == "SR40browse"] <- "Browsing low"
cover$scenario <- factor(cover$scenario, levels = c("Grazing low", "Browsing low", "Grazing high", "Browsing high"))
scenario_list <- unique(cover$scenario)
barplot_list <- list()
# create barplot of last 20 years for every land use scenario and save it in barplot_list()
for (i in 1:length(scenario_list)) {
cover$type <- factor(cover$type, levels = c("Shrub", "Perennial", "Annual"))
cols <- c("coral", "seagreen", "gold1")
survival20 <- ggplot(
subset(cover, scenario == scenario_list[i]),
aes(y = cover, x = scenario, fill = type)
) +
geom_col() +
ylim(0, 100) +
ylab(bquote("Mean cover")) +
scale_fill_manual(values = cols) +
theme_set(theme_minimal()) +
theme(
axis.text.x = element_blank(),
axis.text.y = element_text(size = 10),
axis.title.y = element_blank(),
axis.title.x = element_blank(),
legend.direction = "horizontal", legend.position = "none", legend.title = element_blank(),
legend.background = element_blank(),
panel.grid.major = element_line(size = 0.2, linetype = "solid", colour = "gray"),
panel.background = element_blank()
)
barplotname <- paste0(gsub(" ", "_", scenario_list[i]), "_bar")
barplot_list[[barplotname]] <- survival20
}
# Extract legend from this plot:
cattle_low_bar <- ggplot(
subset(cover, scenario %in% "Grazing low"),
aes(y = cover, x = scenario == "Grazing low", fill = type)
) +
geom_col(color = "whitesmoke", lwd = 0.3) +
ylim(0, 100) +
ylab(bquote("Mean cover")) +
scale_fill_manual(values = cols) +
theme_set(theme_minimal()) +
theme(
axis.text.x = element_blank(),
axis.text.y = element_text(size = 12),
axis.title.y = element_blank(),
axis.title.x = element_blank(),
legend.text = element_text(size = 16),
legend.direction = "horizontal", legend.position = "bottom", legend.title = element_blank(),
legend.background = element_blank(),
legend.spacing.x = unit(0.3, "cm"),
panel.grid.major = element_line(size = 0.2, linetype = "solid", colour = "gray"),
panel.background = element_blank()
)
cattle_low_bar
get_legend <- function(cattle_low_bar) {
tmp <- ggplot_gtable(ggplot_build(cattle_low_bar))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)
}
legend <- get_legend(cattle_low_bar)
# arrange all plots and legend in one plot ---------------------
coverplots <- plot_grid(plot_list$Grazing_low_line, barplot_list$Grazing_low_bar, plot_list$Browsing_low_line, barplot_list$Browsing_low_bar,
plot_list$Grazing_high_line, barplot_list$Grazing_high_bar, plot_list$Browsing_high_line, barplot_list$Browsing_high_bar,
ncol = 4, nrow = 2,
rel_widths = c(4, 1.5, 4, 1.5),
labels = c("a", "", "b", "", "c", "", "d", ""),
align = "h", axis = "bt"
)
cover_legend <- plot_grid(coverplots, legend, nrow = 2, rel_heights = c(1, 0.1))
ggsave(cover_legend, file="cover_combined_all_scenarios_revised.tiff", width = 32,
height = 20,
units = "cm", dpi=600)
##################################################
## STATS ------------------
##################################################
# cover -------------------------------
cover <- PFTcoverall[, c("year", "meanGtotalcover", "meanStotalcover", "meanAtotalcover", "scenario")]
cover$TotalCover <- cover$meanGtotalcover + cover$meanStotalcover + cover$meanAtotalcover # calculate total cover
cover <- melt(cover, id.vars = c("year", "scenario", "TotalCover"))
cover$value <- cover$value * 100 # convert to percentage
cover$type <- ifelse(grepl("(meanGtotalcover)", cover$variable), "Perennial", ifelse(grepl("(meanStotalcover)", cover$variable), "Shrub", "Annual")) # rename meta-PFT type
# calculate mean and median cover
meancover <- cover %>%
group_by(scenario, type) %>%
summarise_at(vars(value), funs(mean, sd))
mediancover <- cover %>%
group_by(scenario) %>%
summarise_at(vars(value), funs(median))
# create extra column for land use intensity
cover$intensity <- ifelse(grepl("(SR20)", cover$scenario), "high", "low")
cover$landuse <- ifelse(grepl("(browse)", cover$scenario), "Wildlife", "Cattle")
cover <- cover %>% filter(year > 79)
if (!require(rcompanion)) {
install.packages("rcompanion")
}
if (!require(FSA)) {
install.packages("FSA")
}
library(rcompanion)
library(FSA)
# non-parametric Scheirer-Ray-Hare test ---
scheirerRayHare(TotalCover ~ landuse + intensity, data = cover)
# H = 153.5, p < 0.001
# bring data in right format for post-hoc test and order by descending median
cover$landuse <- factor(cover$landuse, levels = c("Wildlife", "Cattle"))
cover$intensity <- factor(cover$intensity, levels = c("low", "high"))
# order by median from high to low
cover$scenario <- factor(cover$scenario, levels = c("SR40browse", "SR20browse", "SR20graze", "SR40graze"))
# post-hoc Dunn's test to look for differences between land use scenarios --------
DT <- dunnTest(TotalCover ~ scenario, data = cover, method = "bh")
DT # all scenarios differ significantly
# check significant differences
PT <- DT$res
cldList(P.adj ~ Comparison,
data = PT,
threshold = 0.05
) # letters indicating significance
## Effect size epsilon^2 ---
epsilonSquared(x = cover$TotalCover, g = cover$landuse)
# e^2 = 0.48
|
%%% $Id: doOptim.Rd 193 2012-06-24 21:13:42Z kristl $
\name{doOptim}
\alias{doOptim}
\alias{mvrValstats}
\title{
Optimise several baseline algorithms on a data set
}
\description{
Tests several baseline algorithms with one predictor for a given data
set. The baseline algorithms are represented as a list of
\code{\linkS4class{baselineAlgTest}} objects, and the predictor as a
\code{\linkS4class{predictionTest}} object.
}
\usage{
doOptim(baselineTests, X, y, predictionTest, postproc = NULL,
tmpfile = "tmp.baseline", verbose = FALSE, cleanTmp = FALSE)
}
\arguments{
\item{baselineTests}{a list of \code{\linkS4class{baselineAlgTest}}
objects. The baseline algorithms and parameter values to test}
\item{X}{A matrix. The spectra to use in the test}
\item{y}{A vector or matrix. The response(s) to use in the test}
\item{predictionTest}{A \code{\linkS4class{predictionTest}} object. The
predictor and parameter values to use in the test}
\item{postproc}{A function, used to postprocess the baseline corrected
spectra prior to prediction testing. The function should take a
matrix of spectra as its only argument, and return a matrix of
postprocessed spectra}
\item{tmpfile}{The basename of the files used to store intermediate
calculations for checkpointing. Defaults to \code{"tmp.baseline"}}
\item{verbose}{Logical, specifying whether the test should print out
progress information. Default is \code{FALSE}}
\item{cleanTmp}{Logical, specifying whether the intermediate files should
be deleted when the optimisation has finished. Default is \code{FALSE}}
}
\details{
The function loops through the baseline algorithm tests in
\code{baselineTests}, testing each of them with the given data and
prediction test, and collects the results. The results of each
baseline algorithm test is saved in a temporary file so that if the
optimisation is interrupted, it can be re-run and will use the
pre-calculated results. If \code{cleanTmp} is \code{TRUE}, the temporary
files are deleted when the whole optimisation has finished.
}
\value{
A list with components
\item{baselineTests}{The \code{baselineTests} argument}
\item{results}{A list with the \code{baselineAlgResult} objects
for each baseline test}
\item{minQualMeas}{The minimum quality measure value}
\item{baselineAlg.min}{The name of the baseline algorithm giving the
minimum quality measure value}
\item{param.min}{A list with the parameter values corresponding to the
minimum quality measure value}
}
\author{Bjørn-Helge Mevik and Kristian Hovde Liland}
\seealso{
\code{\linkS4class{baselineAlgTest}},\code{\linkS4class{predictionTest}}
}
\keyword{baseline}
\keyword{spectra}
| /man/doOptim.Rd | no_license | PrathamLearnsToCode/baseline | R | false | false | 2,808 | rd | %%% $Id: doOptim.Rd 193 2012-06-24 21:13:42Z kristl $
\name{doOptim}
\alias{doOptim}
\alias{mvrValstats}
\title{
Optimise several baseline algorithms on a data set
}
\description{
Tests several baseline algorithms with one predictor for a given data
set. The baseline algorithms are represented as a list of
\code{\linkS4class{baselineAlgTest}} objects, and the predictor as a
\code{\linkS4class{predictionTest}} object.
}
\usage{
doOptim(baselineTests, X, y, predictionTest, postproc = NULL,
tmpfile = "tmp.baseline", verbose = FALSE, cleanTmp = FALSE)
}
\arguments{
\item{baselineTests}{a list of \code{\linkS4class{baselineAlgTest}}
objects. The baseline algorithms and parameter values to test}
\item{X}{A matrix. The spectra to use in the test}
\item{y}{A vector or matrix. The response(s) to use in the test}
\item{predictionTest}{A \code{\linkS4class{predictionTest}} object. The
predictor and parameter values to use in the test}
\item{postproc}{A function, used to postprocess the baseline corrected
spectra prior to prediction testing. The function should take a
matrix of spectra as its only argument, and return a matrix of
postprocessed spectra}
\item{tmpfile}{The basename of the files used to store intermediate
calculations for checkpointing. Defaults to \code{"tmp.baseline"}}
\item{verbose}{Logical, specifying whether the test should print out
progress information. Default is \code{FALSE}}
\item{cleanTmp}{Logical, specifying whether the intermediate files should
be deleted when the optimisation has finished. Default is \code{FALSE}}
}
\details{
The function loops through the baseline algorithm tests in
\code{baselineTests}, testing each of them with the given data and
prediction test, and collects the results. The results of each
baseline algorithm test is saved in a temporary file so that if the
optimisation is interrupted, it can be re-run and will use the
pre-calculated results. If \code{cleanTmp} is \code{TRUE}, the temporary
files are deleted when the whole optimisation has finished.
}
\value{
A list with components
\item{baselineTests}{The \code{baselineTests} argument}
\item{results}{A list with the \code{baselineAlgResult} objects
for each baseline test}
\item{minQualMeas}{The minimum quality measure value}
\item{baselineAlg.min}{The name of the baseline algorithm giving the
minimum quality measure value}
\item{param.min}{A list with the parameter values corresponding to the
minimum quality measure value}
}
\author{Bjørn-Helge Mevik and Kristian Hovde Liland}
\seealso{
\code{\linkS4class{baselineAlgTest}},\code{\linkS4class{predictionTest}}
}
\keyword{baseline}
\keyword{spectra}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{split_train_test}
\alias{split_train_test}
\title{Splits a data frame into train and test sets.}
\usage{
split_train_test(df, pctTrain)
}
\arguments{
\item{df}{a data frame.}
\item{pctTrain}{numeric value that specifies the percentage of rows to be included in the train set. The remaining rows are added to the test set.}
}
\value{
a list with the first element being the train set and the second element the test set.
}
\description{
Utility function to randomly split a data frame into train and test sets.
}
\examples{
set.seed(1234)
dataset <- friedman1
nrow(dataset) # print number of rows
split1 <- split_train_test(dataset, pctTrain = 70) # select 70\% for training
nrow(split1$trainset) # number of rows of the train set
nrow(split1$testset) # number of rows of the test set
head(split1$trainset) # display first rows of train set
}
| /man/split_train_test.Rd | no_license | cran/ssr | R | false | true | 989 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{split_train_test}
\alias{split_train_test}
\title{Splits a data frame into train and test sets.}
\usage{
split_train_test(df, pctTrain)
}
\arguments{
\item{df}{a data frame.}
\item{pctTrain}{numeric value that specifies the percentage of rows to be included in the train set. The remaining rows are added to the test set.}
}
\value{
a list with the first element being the train set and the second element the test set.
}
\description{
Utility function to randomly split a data frame into train and test sets.
}
\examples{
set.seed(1234)
dataset <- friedman1
nrow(dataset) # print number of rows
split1 <- split_train_test(dataset, pctTrain = 70) # select 70\% for training
nrow(split1$trainset) # number of rows of the train set
nrow(split1$testset) # number of rows of the test set
head(split1$trainset) # display first rows of train set
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grade_boundaries.R
\docType{data}
\name{grade_boundaries}
\alias{grade_boundaries}
\title{University of Toronto letter grades and minimum number grades to achieve them}
\format{
A data frame with 13 rows and 3 columns
\describe{
\item{letter_grade}{text}
\item{number_grade}{Minimum number grade required to obtain that letter grade, numeric}
\item{grade_points}{Contribution to grade point average, numeric}
}
}
\source{
\url{https://www.utsc.utoronto.ca/registrar/u-t-grading-scheme}
}
\usage{
grade_boundaries
}
\description{
University of Toronto letter grades and minimum number grades to achieve them
}
\keyword{datasets}
| /man/grade_boundaries.Rd | permissive | nxskok/make.legal.grades | R | false | true | 707 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grade_boundaries.R
\docType{data}
\name{grade_boundaries}
\alias{grade_boundaries}
\title{University of Toronto letter grades and minimum number grades to achieve them}
\format{
A data frame with 13 rows and 3 columns
\describe{
\item{letter_grade}{text}
\item{number_grade}{Minimum number grade required to obtain that letter grade, numeric}
\item{grade_points}{Contribution to grade point average, numeric}
}
}
\source{
\url{https://www.utsc.utoronto.ca/registrar/u-t-grading-scheme}
}
\usage{
grade_boundaries
}
\description{
University of Toronto letter grades and minimum number grades to achieve them
}
\keyword{datasets}
|
#!/usr/bin/env Rscript
# Wind, ice, pressure, temperature and precip polyhedra.
# Just do the rendering - use pre-calculated streamlines
# Render just one timestep - parallelise on SPICE.
# Sub-hourly version - fudge streamlines
library(GSDF.TWCR)
library(GSDF.WeatherMap)
library(grid)
library(getopt)
opt = getopt(matrix(c(
'year', 'y', 2, "integer",
'month', 'm', 2, "integer",
'day', 'd', 2, "integer",
'hour', 'h', 2, "numeric",
'version','v', 2, "character"
), byrow=TRUE, ncol=4))
if ( is.null(opt$year) ) { stop("Year not specified") }
if ( is.null(opt$month) ) { stop("Month not specified") }
if ( is.null(opt$day) ) { stop("Day not specified") }
if ( is.null(opt$hour) ) { stop("Hour not specified") }
if ( is.null(opt$version) ){ opt$version='4.1.8' }
member=1
fog.threshold<-exp(1)
Imagedir<-sprintf("%s/images/TWCR_multivariate.V3vV2c.nf",Sys.getenv('SCRATCH'))
Stream.dir.V3<-sprintf("%s/images/TWCR_multivariate.V3",Sys.getenv('SCRATCH'))
Stream.dir.V2c<-sprintf("%s/images/TWCR_multivariate.V2c",Sys.getenv('SCRATCH'))
if(!file.exists(Imagedir)) dir.create(Imagedir,recursive=TRUE)
Options<-WeatherMap.set.option(NULL)
Options<-WeatherMap.set.option(Options,'land.colour',rgb(100,100,100,255,
maxColorValue=255))
Options<-WeatherMap.set.option(Options,'sea.colour',rgb(150,150,150,255,
maxColorValue=255))
Options<-WeatherMap.set.option(Options,'ice.colour',rgb(250,250,250,255,
maxColorValue=255))
range<-85
aspect<-8/9
Options<-WeatherMap.set.option(Options,'lat.min',range*-1)
Options<-WeatherMap.set.option(Options,'lat.max',range)
Options<-WeatherMap.set.option(Options,'lon.min',range*aspect*-1)
Options<-WeatherMap.set.option(Options,'lon.max',range*aspect)
Options<-WeatherMap.set.option(Options,'pole.lon',173)
Options<-WeatherMap.set.option(Options,'pole.lat',36)
Options$mslp.base=0#101325 # Base value for anomalies
Options$mslp.range=50000 # Anomaly for max contour
Options$mslp.step=500 # Smaller -> more contours
Options$mslp.tpscale=500 # Smaller -> contours less transparent
Options$mslp.lwd=1
Options$precip.colour=c(0,0.2,0)
Options$label.xp=0.995
get.member.at.hour<-function(variable,year,month,day,hour,member,version='4.1.8') {
t<-TWCR.get.members.slice.at.hour(variable,year,month,day,
hour,version=version)
t<-GSDF.select.from.1d(t,'ensemble',member)
gc()
return(t)
}
WeatherMap.streamline.getGC<-function(value,transparency=NA,status=1,Options) {
alpha<-c(10,50,150,255)[min(status,4)]
return(gpar(col=rgb(125,125,125,alpha,maxColorValue=255),
fill=rgb(125,125,125,alpha,maxColorValue=255),lwd=Options$wind.vector.lwd))
}
assignInNamespace("WeatherMap.streamline.getGC",WeatherMap.streamline.getGC, ns="GSDF.WeatherMap")
Draw.temperature<-function(temperature,Options,Trange=1) {
Options.local<-Options
Options.local$fog.min.transparency<-0.5
tplus<-temperature
tplus$data[]<-pmax(0,pmin(Trange,tplus$data))/Trange
Options.local$fog.colour<-c(1,0,0)
WeatherMap.draw.fog(tplus,Options.local)
tminus<-temperature
tminus$data[]<-tminus$data*-1
tminus$data[]<-pmax(0,pmin(Trange,tminus$data))/Trange
Options.local$fog.colour<-c(0,0,1)
WeatherMap.draw.fog(tminus,Options.local)
}
Draw.pressure<-function(mslp,Options,colour=c(0,0,0)) {
M<-GSDF.WeatherMap:::WeatherMap.rotate.pole(mslp,Options)
M<-GSDF:::GSDF.pad.longitude(M) # Extras for periodic boundary conditions
lats<-M$dimensions[[GSDF.find.dimension(M,'lat')]]$values
longs<-M$dimensions[[GSDF.find.dimension(M,'lon')]]$values
# Need particular data format for contourLines
maxl<-Options$lon.max+(longs[2]-longs[1])
if(lats[2]<lats[1] || longs[2]<longs[1] || max(longs) > maxl ) {
if(lats[2]<lats[1]) lats<-rev(lats)
if(longs[2]<longs[1]) longs<-rev(longs)
longs[longs>maxl]<-longs[longs>maxl]-(maxl*2)
longs<-sort(longs)
M2<-M
M2$dimensions[[GSDF.find.dimension(M,'lat')]]$values<-lats
M2$dimensions[[GSDF.find.dimension(M,'lon')]]$values<-longs
M<-GSDF.regrid.2d(M,M2)
}
z<-matrix(data=M$data,nrow=length(longs),ncol=length(lats))
contour.levels<-seq(Options$mslp.base-Options$mslp.range,
Options$mslp.base+Options$mslp.range,
Options$mslp.step)
lines<-contourLines(longs,lats,z,
levels=contour.levels)
if(!is.na(lines) && length(lines)>0) {
for(i in seq(1,length(lines))) {
tp<-min(1,(abs(lines[[i]]$level-Options$mslp.base)/
Options$mslp.tpscale))
lt<-2
lwd<-1
if(lines[[i]]$level<=Options$mslp.base) {
lt<-1
lwd<-1
}
gp<-gpar(col=rgb(colour[1],colour[2],colour[3],tp),
lwd=Options$mslp.lwd*lwd,lty=lt)
res<-tryCatch({
grid.xspline(x=unit(lines[[i]]$x,'native'),
y=unit(lines[[i]]$y,'native'),
shape=1,
gp=gp)
}, warning = function(w) {
print(w)
}, error = function(e) {
print(e)
}, finally = {
# Do nothing
})
}
}
}
get.streamlines<-function(year,month,day,hour,dir) {
sf.name<-sprintf("%s/streamlines.%04d-%02d-%02d:%02d.rd",
dir,year,month,day,as.integer(hour))
if(file.exists(sf.name) && file.info(sf.name)$size>5000) {
load(sf.name)
hour.fraction<-hour-as.integer(hour)
# Fudge the streamlines for the fractional hour
if(hour.fraction>0) {
move.scale<-0.033*Options$wind.vector.points/Options$wind.vector.scale
move.scale<-move.scale*Options$wind.vector.move.scale*view.scale
for(p in seq(1,Options$wind.vector.points)) {
s[['x']][,p]<-s[['x']][,p]+(s[['x']][,2]-s[['x']][,1])*move.scale*hour.fraction
s[['y']][,p]<-s[['y']][,p]+(s[['y']][,2]-s[['y']][,1])*move.scale*hour.fraction
}
}
return(s)
} else {
stop(sprintf("No streamlines available for %04d-%02d-%02d:%02d",
year,month,day,as.integer(hour)))
}
}
plot.hour<-function(year,month,day,hour) {
image.name<-sprintf("%04d-%02d-%02d:%02d.%02d.png",year,month,day,as.integer(hour),
as.integer((hour%%1)*100))
ifile.name<-sprintf("%s/%s",Imagedir,image.name)
if(file.exists(ifile.name) && file.info(ifile.name)$size>0) return()
png(ifile.name,
width=1080*16/9,
height=1080,
bg='white',
pointsize=24,
type='cairo-png')
base.gp<-gpar(family='Helvetica',font=1,col='black')
pushViewport(viewport(x=unit(0.75,'npc'),y=unit(0.5,'npc'),
width=unit(0.5,'npc'),height=unit(1,'npc'),
clip='on'))
grid.polygon(x=unit(c(0,1,1,0),'npc'),
y=unit(c(0,0,1,1),'npc'),
gp=gpar(fill=Options$sea.colour))
s<-get.streamlines(opt$year,opt$month,opt$day,opt$hour,Stream.dir.V3)
plot.hour.V3(year,month,day,hour,s)
popViewport()
pushViewport(viewport(x=unit(0.25,'npc'),y=unit(0.5,'npc'),
width=unit(0.5,'npc'),height=unit(1,'npc'),
clip='on'))
grid.polygon(x=unit(c(0,1,1,0),'npc'),
y=unit(c(0,0,1,1),'npc'),
gp=gpar(fill=Options$sea.colour))
s<-get.streamlines(opt$year,opt$month,opt$day,opt$hour,Stream.dir.V2c)
plot.hour.V2c(year,month,day,hour,s)
popViewport()
grid.lines(x=unit(c(0.5,0.5),'npc'),
y=unit(c(0,1),'npc'),
gp=gpar(col=rgb(1,1,0.5),lwd=2))
dev.off()
}
plot.hour.V3<-function(year,month,day,hour,streamlines) {
t2m<-get.member.at.hour('air.2m',year,month,day,hour,member,version=opt$version)
t2n<-TWCR.get.slice.at.hour('air.2m',year,month,day,hour,version='4.0.0',type='normal')
t2n<-GSDF.regrid.2d(t2n,t2m)
t2m$data[]<-as.vector(t2m$data)-as.vector(t2n$data)
pre<-get.member.at.hour('prmsl',year,month,day,hour,member,version=opt$version)
prn<-TWCR.get.slice.at.hour('prmsl',year,month,day,hour,version='4.0.0',type='normal')
prn<-GSDF.regrid.2d(prn,pre)
pre$data[]<-as.vector(pre$data)-as.vector(prn$data)
icec<-get.member.at.hour('icec',year,month,day,hour,member,version=opt$version)
prate<-get.member.at.hour('prate',year,month,day,hour,member,version=opt$version)
lon.min<-Options$lon.min
if(!is.null(Options$vp.lon.min)) lon.min<-Options$vp.lon.min
lon.max<-Options$lon.max
if(!is.null(Options$vp.lon.max)) lon.max<-Options$vp.lon.max
lat.min<-Options$lat.min
if(!is.null(Options$vp.lat.min)) lat.min<-Options$vp.lat.min
lat.max<-Options$lat.max
if(!is.null(Options$vp.lat.max)) lat.max<-Options$vp.lat.max
pushViewport(dataViewport(c(lon.min,lon.max),c(lat.min,lat.max),
extension=0))
ip<-WeatherMap.rectpoints(Options$ice.points,Options)
WeatherMap.draw.ice(ip$lat,ip$lon,icec,Options)
WeatherMap.draw.land(land,Options)
WeatherMap.draw.streamlines(streamlines,Options)
Draw.temperature(t2m,Options,Trange=10)
WeatherMap.draw.precipitation(prate,Options)
Draw.pressure(pre,Options,colour=c(0,0,0))
Options$label=sprintf("%04d-%02d-%02d:%02d",year,month,day,as.integer(hour))
WeatherMap.draw.label(Options)
popViewport()
}
plot.hour.V2c<-function(year,month,day,hour,streamlines) {
t2m<-get.member.at.hour('air.2m',year,month,day,hour,member,version='3.5.1')
t2n<-TWCR.get.slice.at.hour('air.2m',year,month,day,hour,version='3.4.1',type='normal')
t2n<-GSDF.regrid.2d(t2n,t2m)
t2m$data[]<-as.vector(t2m$data)-as.vector(t2n$data)
pre<-get.member.at.hour('prmsl',year,month,day,hour,member,version='3.5.1')
prn<-TWCR.get.slice.at.hour('prmsl',year,month,day,hour,version='3.4.1',type='normal')
prn<-GSDF.regrid.2d(prn,pre)
pre$data[]<-as.vector(pre$data)-as.vector(prn$data)
icec<-TWCR.get.slice.at.hour('icec',year,month,day,hour,version='3.5.1')
prate<-get.member.at.hour('prate',year,month,day,hour,member,version='3.5.1')
lon.min<-Options$lon.min
if(!is.null(Options$vp.lon.min)) lon.min<-Options$vp.lon.min
lon.max<-Options$lon.max
if(!is.null(Options$vp.lon.max)) lon.max<-Options$vp.lon.max
lat.min<-Options$lat.min
if(!is.null(Options$vp.lat.min)) lat.min<-Options$vp.lat.min
lat.max<-Options$lat.max
if(!is.null(Options$vp.lat.max)) lat.max<-Options$vp.lat.max
pushViewport(dataViewport(c(lon.min,lon.max),c(lat.min,lat.max),
extension=0))
ip<-WeatherMap.rectpoints(Options$ice.points,Options)
WeatherMap.draw.ice(ip$lat,ip$lon,icec,Options)
WeatherMap.draw.land(land,Options)
WeatherMap.draw.streamlines(streamlines,Options)
Draw.temperature(t2m,Options,Trange=10)
WeatherMap.draw.precipitation(prate,Options)
Draw.pressure(pre,Options,colour=c(0,0,0))
Options$label=sprintf("%04d-%02d-%02d:%02d",year,month,day,as.integer(hour))
WeatherMap.draw.label(Options)
popViewport()
}
land<-WeatherMap.get.land(Options)
plot.hour(opt$year,opt$month,opt$day,opt$hour)
| /20CRV3/V3vV2c/multivariate/full_single.R | no_license | philip-brohan/weather.case.studies | R | false | false | 11,589 | r | #!/usr/bin/env Rscript
# Wind, ice, pressure, temperature and precip polyhedra.
# Just do the rendering - use pre-calculated streamlines
# Render just one timestep - parallelise on SPICE.
# Sub-hourly version - fudge streamlines
library(GSDF.TWCR)
library(GSDF.WeatherMap)
library(grid)
library(getopt)
opt = getopt(matrix(c(
'year', 'y', 2, "integer",
'month', 'm', 2, "integer",
'day', 'd', 2, "integer",
'hour', 'h', 2, "numeric",
'version','v', 2, "character"
), byrow=TRUE, ncol=4))
if ( is.null(opt$year) ) { stop("Year not specified") }
if ( is.null(opt$month) ) { stop("Month not specified") }
if ( is.null(opt$day) ) { stop("Day not specified") }
if ( is.null(opt$hour) ) { stop("Hour not specified") }
if ( is.null(opt$version) ){ opt$version='4.1.8' }
member=1
fog.threshold<-exp(1)
Imagedir<-sprintf("%s/images/TWCR_multivariate.V3vV2c.nf",Sys.getenv('SCRATCH'))
Stream.dir.V3<-sprintf("%s/images/TWCR_multivariate.V3",Sys.getenv('SCRATCH'))
Stream.dir.V2c<-sprintf("%s/images/TWCR_multivariate.V2c",Sys.getenv('SCRATCH'))
if(!file.exists(Imagedir)) dir.create(Imagedir,recursive=TRUE)
Options<-WeatherMap.set.option(NULL)
Options<-WeatherMap.set.option(Options,'land.colour',rgb(100,100,100,255,
maxColorValue=255))
Options<-WeatherMap.set.option(Options,'sea.colour',rgb(150,150,150,255,
maxColorValue=255))
Options<-WeatherMap.set.option(Options,'ice.colour',rgb(250,250,250,255,
maxColorValue=255))
range<-85
aspect<-8/9
Options<-WeatherMap.set.option(Options,'lat.min',range*-1)
Options<-WeatherMap.set.option(Options,'lat.max',range)
Options<-WeatherMap.set.option(Options,'lon.min',range*aspect*-1)
Options<-WeatherMap.set.option(Options,'lon.max',range*aspect)
Options<-WeatherMap.set.option(Options,'pole.lon',173)
Options<-WeatherMap.set.option(Options,'pole.lat',36)
Options$mslp.base=0#101325 # Base value for anomalies
Options$mslp.range=50000 # Anomaly for max contour
Options$mslp.step=500 # Smaller -> more contours
Options$mslp.tpscale=500 # Smaller -> contours less transparent
Options$mslp.lwd=1
Options$precip.colour=c(0,0.2,0)
Options$label.xp=0.995
get.member.at.hour<-function(variable,year,month,day,hour,member,version='4.1.8') {
t<-TWCR.get.members.slice.at.hour(variable,year,month,day,
hour,version=version)
t<-GSDF.select.from.1d(t,'ensemble',member)
gc()
return(t)
}
WeatherMap.streamline.getGC<-function(value,transparency=NA,status=1,Options) {
alpha<-c(10,50,150,255)[min(status,4)]
return(gpar(col=rgb(125,125,125,alpha,maxColorValue=255),
fill=rgb(125,125,125,alpha,maxColorValue=255),lwd=Options$wind.vector.lwd))
}
assignInNamespace("WeatherMap.streamline.getGC",WeatherMap.streamline.getGC, ns="GSDF.WeatherMap")
Draw.temperature<-function(temperature,Options,Trange=1) {
Options.local<-Options
Options.local$fog.min.transparency<-0.5
tplus<-temperature
tplus$data[]<-pmax(0,pmin(Trange,tplus$data))/Trange
Options.local$fog.colour<-c(1,0,0)
WeatherMap.draw.fog(tplus,Options.local)
tminus<-temperature
tminus$data[]<-tminus$data*-1
tminus$data[]<-pmax(0,pmin(Trange,tminus$data))/Trange
Options.local$fog.colour<-c(0,0,1)
WeatherMap.draw.fog(tminus,Options.local)
}
Draw.pressure<-function(mslp,Options,colour=c(0,0,0)) {
M<-GSDF.WeatherMap:::WeatherMap.rotate.pole(mslp,Options)
M<-GSDF:::GSDF.pad.longitude(M) # Extras for periodic boundary conditions
lats<-M$dimensions[[GSDF.find.dimension(M,'lat')]]$values
longs<-M$dimensions[[GSDF.find.dimension(M,'lon')]]$values
# Need particular data format for contourLines
maxl<-Options$lon.max+(longs[2]-longs[1])
if(lats[2]<lats[1] || longs[2]<longs[1] || max(longs) > maxl ) {
if(lats[2]<lats[1]) lats<-rev(lats)
if(longs[2]<longs[1]) longs<-rev(longs)
longs[longs>maxl]<-longs[longs>maxl]-(maxl*2)
longs<-sort(longs)
M2<-M
M2$dimensions[[GSDF.find.dimension(M,'lat')]]$values<-lats
M2$dimensions[[GSDF.find.dimension(M,'lon')]]$values<-longs
M<-GSDF.regrid.2d(M,M2)
}
z<-matrix(data=M$data,nrow=length(longs),ncol=length(lats))
contour.levels<-seq(Options$mslp.base-Options$mslp.range,
Options$mslp.base+Options$mslp.range,
Options$mslp.step)
lines<-contourLines(longs,lats,z,
levels=contour.levels)
if(!is.na(lines) && length(lines)>0) {
for(i in seq(1,length(lines))) {
tp<-min(1,(abs(lines[[i]]$level-Options$mslp.base)/
Options$mslp.tpscale))
lt<-2
lwd<-1
if(lines[[i]]$level<=Options$mslp.base) {
lt<-1
lwd<-1
}
gp<-gpar(col=rgb(colour[1],colour[2],colour[3],tp),
lwd=Options$mslp.lwd*lwd,lty=lt)
res<-tryCatch({
grid.xspline(x=unit(lines[[i]]$x,'native'),
y=unit(lines[[i]]$y,'native'),
shape=1,
gp=gp)
}, warning = function(w) {
print(w)
}, error = function(e) {
print(e)
}, finally = {
# Do nothing
})
}
}
}
get.streamlines<-function(year,month,day,hour,dir) {
sf.name<-sprintf("%s/streamlines.%04d-%02d-%02d:%02d.rd",
dir,year,month,day,as.integer(hour))
if(file.exists(sf.name) && file.info(sf.name)$size>5000) {
load(sf.name)
hour.fraction<-hour-as.integer(hour)
# Fudge the streamlines for the fractional hour
if(hour.fraction>0) {
move.scale<-0.033*Options$wind.vector.points/Options$wind.vector.scale
move.scale<-move.scale*Options$wind.vector.move.scale*view.scale
for(p in seq(1,Options$wind.vector.points)) {
s[['x']][,p]<-s[['x']][,p]+(s[['x']][,2]-s[['x']][,1])*move.scale*hour.fraction
s[['y']][,p]<-s[['y']][,p]+(s[['y']][,2]-s[['y']][,1])*move.scale*hour.fraction
}
}
return(s)
} else {
stop(sprintf("No streamlines available for %04d-%02d-%02d:%02d",
year,month,day,as.integer(hour)))
}
}
plot.hour<-function(year,month,day,hour) {
image.name<-sprintf("%04d-%02d-%02d:%02d.%02d.png",year,month,day,as.integer(hour),
as.integer((hour%%1)*100))
ifile.name<-sprintf("%s/%s",Imagedir,image.name)
if(file.exists(ifile.name) && file.info(ifile.name)$size>0) return()
png(ifile.name,
width=1080*16/9,
height=1080,
bg='white',
pointsize=24,
type='cairo-png')
base.gp<-gpar(family='Helvetica',font=1,col='black')
pushViewport(viewport(x=unit(0.75,'npc'),y=unit(0.5,'npc'),
width=unit(0.5,'npc'),height=unit(1,'npc'),
clip='on'))
grid.polygon(x=unit(c(0,1,1,0),'npc'),
y=unit(c(0,0,1,1),'npc'),
gp=gpar(fill=Options$sea.colour))
s<-get.streamlines(opt$year,opt$month,opt$day,opt$hour,Stream.dir.V3)
plot.hour.V3(year,month,day,hour,s)
popViewport()
pushViewport(viewport(x=unit(0.25,'npc'),y=unit(0.5,'npc'),
width=unit(0.5,'npc'),height=unit(1,'npc'),
clip='on'))
grid.polygon(x=unit(c(0,1,1,0),'npc'),
y=unit(c(0,0,1,1),'npc'),
gp=gpar(fill=Options$sea.colour))
s<-get.streamlines(opt$year,opt$month,opt$day,opt$hour,Stream.dir.V2c)
plot.hour.V2c(year,month,day,hour,s)
popViewport()
grid.lines(x=unit(c(0.5,0.5),'npc'),
y=unit(c(0,1),'npc'),
gp=gpar(col=rgb(1,1,0.5),lwd=2))
dev.off()
}
plot.hour.V3<-function(year,month,day,hour,streamlines) {
t2m<-get.member.at.hour('air.2m',year,month,day,hour,member,version=opt$version)
t2n<-TWCR.get.slice.at.hour('air.2m',year,month,day,hour,version='4.0.0',type='normal')
t2n<-GSDF.regrid.2d(t2n,t2m)
t2m$data[]<-as.vector(t2m$data)-as.vector(t2n$data)
pre<-get.member.at.hour('prmsl',year,month,day,hour,member,version=opt$version)
prn<-TWCR.get.slice.at.hour('prmsl',year,month,day,hour,version='4.0.0',type='normal')
prn<-GSDF.regrid.2d(prn,pre)
pre$data[]<-as.vector(pre$data)-as.vector(prn$data)
icec<-get.member.at.hour('icec',year,month,day,hour,member,version=opt$version)
prate<-get.member.at.hour('prate',year,month,day,hour,member,version=opt$version)
lon.min<-Options$lon.min
if(!is.null(Options$vp.lon.min)) lon.min<-Options$vp.lon.min
lon.max<-Options$lon.max
if(!is.null(Options$vp.lon.max)) lon.max<-Options$vp.lon.max
lat.min<-Options$lat.min
if(!is.null(Options$vp.lat.min)) lat.min<-Options$vp.lat.min
lat.max<-Options$lat.max
if(!is.null(Options$vp.lat.max)) lat.max<-Options$vp.lat.max
pushViewport(dataViewport(c(lon.min,lon.max),c(lat.min,lat.max),
extension=0))
ip<-WeatherMap.rectpoints(Options$ice.points,Options)
WeatherMap.draw.ice(ip$lat,ip$lon,icec,Options)
WeatherMap.draw.land(land,Options)
WeatherMap.draw.streamlines(streamlines,Options)
Draw.temperature(t2m,Options,Trange=10)
WeatherMap.draw.precipitation(prate,Options)
Draw.pressure(pre,Options,colour=c(0,0,0))
Options$label=sprintf("%04d-%02d-%02d:%02d",year,month,day,as.integer(hour))
WeatherMap.draw.label(Options)
popViewport()
}
plot.hour.V2c<-function(year,month,day,hour,streamlines) {
t2m<-get.member.at.hour('air.2m',year,month,day,hour,member,version='3.5.1')
t2n<-TWCR.get.slice.at.hour('air.2m',year,month,day,hour,version='3.4.1',type='normal')
t2n<-GSDF.regrid.2d(t2n,t2m)
t2m$data[]<-as.vector(t2m$data)-as.vector(t2n$data)
pre<-get.member.at.hour('prmsl',year,month,day,hour,member,version='3.5.1')
prn<-TWCR.get.slice.at.hour('prmsl',year,month,day,hour,version='3.4.1',type='normal')
prn<-GSDF.regrid.2d(prn,pre)
pre$data[]<-as.vector(pre$data)-as.vector(prn$data)
icec<-TWCR.get.slice.at.hour('icec',year,month,day,hour,version='3.5.1')
prate<-get.member.at.hour('prate',year,month,day,hour,member,version='3.5.1')
lon.min<-Options$lon.min
if(!is.null(Options$vp.lon.min)) lon.min<-Options$vp.lon.min
lon.max<-Options$lon.max
if(!is.null(Options$vp.lon.max)) lon.max<-Options$vp.lon.max
lat.min<-Options$lat.min
if(!is.null(Options$vp.lat.min)) lat.min<-Options$vp.lat.min
lat.max<-Options$lat.max
if(!is.null(Options$vp.lat.max)) lat.max<-Options$vp.lat.max
pushViewport(dataViewport(c(lon.min,lon.max),c(lat.min,lat.max),
extension=0))
ip<-WeatherMap.rectpoints(Options$ice.points,Options)
WeatherMap.draw.ice(ip$lat,ip$lon,icec,Options)
WeatherMap.draw.land(land,Options)
WeatherMap.draw.streamlines(streamlines,Options)
Draw.temperature(t2m,Options,Trange=10)
WeatherMap.draw.precipitation(prate,Options)
Draw.pressure(pre,Options,colour=c(0,0,0))
Options$label=sprintf("%04d-%02d-%02d:%02d",year,month,day,as.integer(hour))
WeatherMap.draw.label(Options)
popViewport()
}
land<-WeatherMap.get.land(Options)
plot.hour(opt$year,opt$month,opt$day,opt$hour)
|
library(Rdimtools)
### Name: do.lsda
### Title: Locality Sensitive Discriminant Analysis
### Aliases: do.lsda
### ** Examples
## create a data matrix with clear difference
x1 = matrix(rnorm(4*10), nrow=10)-20
x2 = matrix(rnorm(4*10), nrow=10)
x3 = matrix(rnorm(4*10), nrow=10)+20
X = rbind(x1, x2, x3)
label = c(rep(1,10), rep(2,10), rep(3,10))
## try different affinity matrices
out1 = do.lsda(X, label, k1=2, k2=2)
out2 = do.lsda(X, label, k1=5, k2=5)
out3 = do.lsda(X, label, k1=10, k2=10)
## visualize
par(mfrow=c(1,3))
plot(out1$Y[,1], out1$Y[,2], main="nbd size 2")
plot(out2$Y[,1], out2$Y[,2], main="nbd size 5")
plot(out3$Y[,1], out3$Y[,2], main="nbd size 10")
| /data/genthat_extracted_code/Rdimtools/examples/linear_LSDA.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 680 | r | library(Rdimtools)
### Name: do.lsda
### Title: Locality Sensitive Discriminant Analysis
### Aliases: do.lsda
### ** Examples
## create a data matrix with clear difference
x1 = matrix(rnorm(4*10), nrow=10)-20
x2 = matrix(rnorm(4*10), nrow=10)
x3 = matrix(rnorm(4*10), nrow=10)+20
X = rbind(x1, x2, x3)
label = c(rep(1,10), rep(2,10), rep(3,10))
## try different affinity matrices
out1 = do.lsda(X, label, k1=2, k2=2)
out2 = do.lsda(X, label, k1=5, k2=5)
out3 = do.lsda(X, label, k1=10, k2=10)
## visualize
par(mfrow=c(1,3))
plot(out1$Y[,1], out1$Y[,2], main="nbd size 2")
plot(out2$Y[,1], out2$Y[,2], main="nbd size 5")
plot(out3$Y[,1], out3$Y[,2], main="nbd size 10")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/present_value.R
\name{present_value}
\alias{present_value}
\title{Calculate present value of an investment after one or more periods}
\usage{
present_value(future_value, rate, periods)
}
\arguments{
\item{future_value}{Value of money at maturity.}
\item{rate}{Interest rate for money.}
\item{periods}{Number of periods.}
}
\value{
Present value (PV) of the investment.
}
\description{
Calculate present value of an investment after one or more periods
}
\examples{
present_value(1000, 0.05, 3)
present_value(1000, 0.07, 2)
}
| /man/present_value.Rd | permissive | bclark86/clarklytics | R | false | true | 605 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/present_value.R
\name{present_value}
\alias{present_value}
\title{Calculate present value of an investment after one or more periods}
\usage{
present_value(future_value, rate, periods)
}
\arguments{
\item{future_value}{Value of money at maturity.}
\item{rate}{Interest rate for money.}
\item{periods}{Number of periods.}
}
\value{
Present value (PV) of the investment.
}
\description{
Calculate present value of an investment after one or more periods
}
\examples{
present_value(1000, 0.05, 3)
present_value(1000, 0.07, 2)
}
|
#' @include SQLiteResult.R
NULL
#' @rdname SQLiteConnection-class
#' @export
setMethod("dbSendQuery", c("SQLiteConnection", "character"),
function(conn, statement, params = NULL, ...) {
statement <- enc2utf8(statement)
if (!is.null(conn@ref$result)) {
warning("Closing open result set, pending rows", call. = FALSE)
dbClearResult(conn@ref$result)
stopifnot(is.null(conn@ref$result))
}
rs <- new("SQLiteResult",
sql = statement,
ptr = result_create(conn@ptr, statement),
conn = conn
)
on.exit(dbClearResult(rs), add = TRUE)
if (!is.null(params)) {
dbBind(rs, params)
}
on.exit(NULL, add = FALSE)
conn@ref$result <- rs
rs
}
)
#' @rdname SQLiteResult-class
#' @export
setMethod("dbBind", "SQLiteResult", function(res, params, ...) {
db_bind(res, as.list(params), ..., allow_named_superset = FALSE)
})
db_bind <- function(res, params, ..., allow_named_superset) {
placeholder_names <- result_get_placeholder_names(res@ptr)
empty <- placeholder_names == ""
numbers <- grepl("^[1-9][0-9]*$", placeholder_names)
names <- !(empty | numbers)
if (any(empty) && !all(empty)) {
stopc("Cannot mix anonymous and named/numbered placeholders in query")
}
if (any(numbers) && !all(numbers)) {
stopc("Cannot mix numbered and named placeholders in query")
}
if (any(empty) || any(numbers)) {
if (!is.null(names(params))) {
stopc("Cannot use named parameters for anonymous/numbered placeholders")
}
} else {
param_indexes <- match(placeholder_names, names(params))
if (any(is.na(param_indexes))) {
stopc(
"No value given for placeholder ",
paste0(placeholder_names[is.na(param_indexes)], collapse = ", ")
)
}
unmatched_param_indexes <- setdiff(seq_along(params), param_indexes)
if (length(unmatched_param_indexes) > 0L) {
if (allow_named_superset) errorc <- warningc
else errorc <- stopc
errorc(
"Named parameters not used in query: ",
paste0(names(params)[unmatched_param_indexes], collapse = ", ")
)
}
params <- unname(params[param_indexes])
}
params <- factor_to_string(params, warn = TRUE)
params <- string_to_utf8(params)
result_bind(res@ptr, params)
invisible(res)
}
#' @export
#' @rdname SQLiteResult-class
setMethod("dbFetch", "SQLiteResult", function(res, n = -1, ...,
row.names = getOption("RSQLite.row.names.query", FALSE)) {
row.names <- compatRowNames(row.names)
if (length(n) != 1) stopc("n must be scalar")
if (n < -1) stopc("n must be nonnegative or -1")
if (is.infinite(n)) n <- -1
if (trunc(n) != n) stopc("n must be a whole number")
sqlColumnToRownames(result_fetch(res@ptr, n = n), row.names)
})
#' @export
#' @rdname SQLiteResult-class
setMethod("dbClearResult", "SQLiteResult", function(res, ...) {
if (!dbIsValid(res)) {
warningc("Expired, result set already closed")
return(invisible(TRUE))
}
result_release(res@ptr)
res@conn@ref$result <- NULL
invisible(TRUE)
})
#' @export
#' @rdname SQLiteResult-class
setMethod("dbColumnInfo", "SQLiteResult", function(res, ...) {
result_column_info(res@ptr)
})
#' @export
#' @rdname SQLiteResult-class
setMethod("dbGetRowsAffected", "SQLiteResult", function(res, ...) {
result_rows_affected(res@ptr)
})
#' @export
#' @rdname SQLiteResult-class
setMethod("dbGetRowCount", "SQLiteResult", function(res, ...) {
result_rows_fetched(res@ptr)
})
#' @export
#' @rdname SQLiteResult-class
setMethod("dbHasCompleted", "SQLiteResult", function(res, ...) {
result_has_completed(res@ptr)
})
#' @rdname SQLiteResult-class
#' @export
setMethod("dbGetStatement", "SQLiteResult", function(res, ...) {
if (!dbIsValid(res)) {
stopc("Expired, result set already closed")
}
res@sql
})
| /R/query.R | permissive | eddelbuettel/RSQLite | R | false | false | 3,861 | r | #' @include SQLiteResult.R
NULL
#' @rdname SQLiteConnection-class
#' @export
setMethod("dbSendQuery", c("SQLiteConnection", "character"),
function(conn, statement, params = NULL, ...) {
statement <- enc2utf8(statement)
if (!is.null(conn@ref$result)) {
warning("Closing open result set, pending rows", call. = FALSE)
dbClearResult(conn@ref$result)
stopifnot(is.null(conn@ref$result))
}
rs <- new("SQLiteResult",
sql = statement,
ptr = result_create(conn@ptr, statement),
conn = conn
)
on.exit(dbClearResult(rs), add = TRUE)
if (!is.null(params)) {
dbBind(rs, params)
}
on.exit(NULL, add = FALSE)
conn@ref$result <- rs
rs
}
)
#' @rdname SQLiteResult-class
#' @export
setMethod("dbBind", "SQLiteResult", function(res, params, ...) {
db_bind(res, as.list(params), ..., allow_named_superset = FALSE)
})
db_bind <- function(res, params, ..., allow_named_superset) {
placeholder_names <- result_get_placeholder_names(res@ptr)
empty <- placeholder_names == ""
numbers <- grepl("^[1-9][0-9]*$", placeholder_names)
names <- !(empty | numbers)
if (any(empty) && !all(empty)) {
stopc("Cannot mix anonymous and named/numbered placeholders in query")
}
if (any(numbers) && !all(numbers)) {
stopc("Cannot mix numbered and named placeholders in query")
}
if (any(empty) || any(numbers)) {
if (!is.null(names(params))) {
stopc("Cannot use named parameters for anonymous/numbered placeholders")
}
} else {
param_indexes <- match(placeholder_names, names(params))
if (any(is.na(param_indexes))) {
stopc(
"No value given for placeholder ",
paste0(placeholder_names[is.na(param_indexes)], collapse = ", ")
)
}
unmatched_param_indexes <- setdiff(seq_along(params), param_indexes)
if (length(unmatched_param_indexes) > 0L) {
if (allow_named_superset) errorc <- warningc
else errorc <- stopc
errorc(
"Named parameters not used in query: ",
paste0(names(params)[unmatched_param_indexes], collapse = ", ")
)
}
params <- unname(params[param_indexes])
}
params <- factor_to_string(params, warn = TRUE)
params <- string_to_utf8(params)
result_bind(res@ptr, params)
invisible(res)
}
#' @export
#' @rdname SQLiteResult-class
setMethod("dbFetch", "SQLiteResult", function(res, n = -1, ...,
row.names = getOption("RSQLite.row.names.query", FALSE)) {
row.names <- compatRowNames(row.names)
if (length(n) != 1) stopc("n must be scalar")
if (n < -1) stopc("n must be nonnegative or -1")
if (is.infinite(n)) n <- -1
if (trunc(n) != n) stopc("n must be a whole number")
sqlColumnToRownames(result_fetch(res@ptr, n = n), row.names)
})
#' @export
#' @rdname SQLiteResult-class
setMethod("dbClearResult", "SQLiteResult", function(res, ...) {
if (!dbIsValid(res)) {
warningc("Expired, result set already closed")
return(invisible(TRUE))
}
result_release(res@ptr)
res@conn@ref$result <- NULL
invisible(TRUE)
})
#' @export
#' @rdname SQLiteResult-class
setMethod("dbColumnInfo", "SQLiteResult", function(res, ...) {
result_column_info(res@ptr)
})
#' @export
#' @rdname SQLiteResult-class
setMethod("dbGetRowsAffected", "SQLiteResult", function(res, ...) {
result_rows_affected(res@ptr)
})
#' @export
#' @rdname SQLiteResult-class
setMethod("dbGetRowCount", "SQLiteResult", function(res, ...) {
result_rows_fetched(res@ptr)
})
#' @export
#' @rdname SQLiteResult-class
setMethod("dbHasCompleted", "SQLiteResult", function(res, ...) {
result_has_completed(res@ptr)
})
#' @rdname SQLiteResult-class
#' @export
setMethod("dbGetStatement", "SQLiteResult", function(res, ...) {
if (!dbIsValid(res)) {
stopc("Expired, result set already closed")
}
res@sql
})
|
# Librarys ----------------------------------------------------------------
required_libs <- c("ggplot2", "EBImage", "caret", "doParallel", "naivebayes", "reshape2", "impute", "randomForest", "pls", "gbm", "kernlab")
new_libs <- required_libs[!(required_libs %in% installed.packages()[,"Package"])]
if (length(new_libs) > 0) install.packages(new_libs, dependencies = T, quiet = T)
new_libs <- required_libs[!(required_libs %in% installed.packages()[,"Package"])]
if (length(new_libs) > 0)
{
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager", quiet = T)
BiocManager::install(new_libs)
}
for (i in 1:length(required_libs))
library(required_libs[i], character.only = T)
rm(list = c("i", "new_libs", "required_libs"))
sessionInfo()
# Data import -------------------------------------------------------------
load("rdata/predicting_environment.rdata")
load("rdata/fileListSumTest.rdata")
load("rdata/yetTobeTest.rdata")
load("rdata/labelsTrain.rdata")
load(file = "rdata/GbmOutput.rdata")
load(file = "rdata/GbmAllOutput.rdata")
load(file = "rdata/PLSOutput.rdata")
yetTobeTestPred <- read.delim(file = "rdata/yetTobeTestMat.txt", stringsAsFactors = F)
# Model import ------------------------------------------------------------
load("rdata/modelsGbmNew.rdata")
load("rdata/ModelsPls50Fea.rdata")
paths_Output <- c(
trainOutput = "E:/trainOutput/",
testOutput = "E:/testOutput/"
)
fileListOutput <- lapply(X = paths_Output,
FUN = function(x) {
fileList <- list.files(path = x, full.names = T)
if (length(fileList) == 0) return(NULL)
fileListAttr <- data.frame(do.call(rbind, strsplit(x = fileList, split = "\\/|\\_|\\.")), stringsAsFactors = F)
fileListAttr <- fileListAttr[, 3:4]
names(fileListAttr) <- c("index", "sample")
fileListAttr$index <- as.numeric(fileListAttr$index)
fileListAttr$link <- fileList
fileListAttr <- fileListAttr[order(fileListAttr$index), ]
return(fileListAttr)
})
yetTobeTestMat <- fileListOutput$testOutput[yetTobeTest,1:2]
# Prediction wrap up ------------------------------------------------------
predict.Models <- function(model, newdata)
{
predListProbs <- list()
for (i in names(model))
{
predListProbs[[i]] <- predict(object = model[[i]], newdata = newdata[[i]], type = "prob")
}
predMatProbs <- data.frame(sapply(predListProbs, function(x) x[,2]))
return(predMatProbs)
}
predict.Finalise <- function(predMatProbs, fileList, appendix)
{
predMatProbs$index <- fileList$index
predMatProbs$sample <- fileList$sample
predFinal <- rbind(predMatProbs, appendix)
predFinal <- predFinal[order(predFinal$index),]
return(predFinal)
}
csv.submission <- function(predFinal, column, filename)
{
sampleSub <- read.csv(file = "rdata/sample_submission.csv", stringsAsFactors = F)
if (sum(sampleSub$Id == predFinal$sample) == 11702)
{
finalSub <- cbind(Id = sampleSub[,"Id"], Predicted = predFinal[column])
names(finalSub) <- c("Id", "Predicted")
write.csv(x = finalSub, file = filename, row.names = F)
return(finalSub)
} else {
stop()
}
}
collapseLabels <- function(binLabel)
{
paste(which(binLabel > 0.5) - 1, collapse = " ")
}
flattenLabels <- function(labelStr)
{
0:27 %in% as.numeric(strsplit(labelStr, split = " ")[[1]])
}
indvidualLabels <- function(labelStr)
{
as.numeric(strsplit(labelStr, split = " ")[[1]])
}
# Target Annealling -------------------------------------------------------
labelCombi <- names(table(labelsTrain$Target))
labelCombiProb <- table(labelsTrain$Target)
labelCombi <- sapply(X = labelCombi, FUN = flattenLabels)
labelCombi <- sapply(X = data.frame(labelCombi), FUN = collapseLabels)
labelCombi.Indi <- lapply(X = labelCombi, FUN = indvidualLabels)
targetAnnealing <- function(ProbandPredicted)
{
Predicted <- as.character(ProbandPredicted[29])
Probs <- as.numeric(ProbandPredicted[1:28])
if (Predicted %in% labelCombi) return(Predicted)
Predicted.Indi <- indvidualLabels(labelStr = Predicted)
matchScore <- sapply(labelCombi.Indi,
function(x) {
((length(Predicted.Indi)/sum(Predicted.Indi %in% x) + length(x)/sum(x %in% Predicted.Indi))/2)^-1
})
Predicted.Anl <- labelCombi[matchScore == max(matchScore)]
if (length(Predicted.Anl) == 1) return(Predicted.Anl)
anlScore <- sapply(Predicted.Anl, function(x) prod(Probs[indvidualLabels(x) + 1]))
return(Predicted.Anl[which.max(anlScore)])
} | /R/Prediction_environment.R | no_license | huayu-zhang/IF_image_clf_by_fea_xtc | R | false | false | 5,264 | r |
# Librarys ----------------------------------------------------------------
required_libs <- c("ggplot2", "EBImage", "caret", "doParallel", "naivebayes", "reshape2", "impute", "randomForest", "pls", "gbm", "kernlab")
new_libs <- required_libs[!(required_libs %in% installed.packages()[,"Package"])]
if (length(new_libs) > 0) install.packages(new_libs, dependencies = T, quiet = T)
new_libs <- required_libs[!(required_libs %in% installed.packages()[,"Package"])]
if (length(new_libs) > 0)
{
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager", quiet = T)
BiocManager::install(new_libs)
}
for (i in 1:length(required_libs))
library(required_libs[i], character.only = T)
rm(list = c("i", "new_libs", "required_libs"))
sessionInfo()
# Data import -------------------------------------------------------------
load("rdata/predicting_environment.rdata")
load("rdata/fileListSumTest.rdata")
load("rdata/yetTobeTest.rdata")
load("rdata/labelsTrain.rdata")
load(file = "rdata/GbmOutput.rdata")
load(file = "rdata/GbmAllOutput.rdata")
load(file = "rdata/PLSOutput.rdata")
yetTobeTestPred <- read.delim(file = "rdata/yetTobeTestMat.txt", stringsAsFactors = F)
# Model import ------------------------------------------------------------
load("rdata/modelsGbmNew.rdata")
load("rdata/ModelsPls50Fea.rdata")
paths_Output <- c(
trainOutput = "E:/trainOutput/",
testOutput = "E:/testOutput/"
)
fileListOutput <- lapply(X = paths_Output,
FUN = function(x) {
fileList <- list.files(path = x, full.names = T)
if (length(fileList) == 0) return(NULL)
fileListAttr <- data.frame(do.call(rbind, strsplit(x = fileList, split = "\\/|\\_|\\.")), stringsAsFactors = F)
fileListAttr <- fileListAttr[, 3:4]
names(fileListAttr) <- c("index", "sample")
fileListAttr$index <- as.numeric(fileListAttr$index)
fileListAttr$link <- fileList
fileListAttr <- fileListAttr[order(fileListAttr$index), ]
return(fileListAttr)
})
yetTobeTestMat <- fileListOutput$testOutput[yetTobeTest,1:2]
# Prediction wrap up ------------------------------------------------------
predict.Models <- function(model, newdata)
{
predListProbs <- list()
for (i in names(model))
{
predListProbs[[i]] <- predict(object = model[[i]], newdata = newdata[[i]], type = "prob")
}
predMatProbs <- data.frame(sapply(predListProbs, function(x) x[,2]))
return(predMatProbs)
}
predict.Finalise <- function(predMatProbs, fileList, appendix)
{
predMatProbs$index <- fileList$index
predMatProbs$sample <- fileList$sample
predFinal <- rbind(predMatProbs, appendix)
predFinal <- predFinal[order(predFinal$index),]
return(predFinal)
}
csv.submission <- function(predFinal, column, filename)
{
sampleSub <- read.csv(file = "rdata/sample_submission.csv", stringsAsFactors = F)
if (sum(sampleSub$Id == predFinal$sample) == 11702)
{
finalSub <- cbind(Id = sampleSub[,"Id"], Predicted = predFinal[column])
names(finalSub) <- c("Id", "Predicted")
write.csv(x = finalSub, file = filename, row.names = F)
return(finalSub)
} else {
stop()
}
}
collapseLabels <- function(binLabel)
{
paste(which(binLabel > 0.5) - 1, collapse = " ")
}
flattenLabels <- function(labelStr)
{
0:27 %in% as.numeric(strsplit(labelStr, split = " ")[[1]])
}
indvidualLabels <- function(labelStr)
{
as.numeric(strsplit(labelStr, split = " ")[[1]])
}
# Target Annealling -------------------------------------------------------
labelCombi <- names(table(labelsTrain$Target))
labelCombiProb <- table(labelsTrain$Target)
labelCombi <- sapply(X = labelCombi, FUN = flattenLabels)
labelCombi <- sapply(X = data.frame(labelCombi), FUN = collapseLabels)
labelCombi.Indi <- lapply(X = labelCombi, FUN = indvidualLabels)
targetAnnealing <- function(ProbandPredicted)
{
Predicted <- as.character(ProbandPredicted[29])
Probs <- as.numeric(ProbandPredicted[1:28])
if (Predicted %in% labelCombi) return(Predicted)
Predicted.Indi <- indvidualLabels(labelStr = Predicted)
matchScore <- sapply(labelCombi.Indi,
function(x) {
((length(Predicted.Indi)/sum(Predicted.Indi %in% x) + length(x)/sum(x %in% Predicted.Indi))/2)^-1
})
Predicted.Anl <- labelCombi[matchScore == max(matchScore)]
if (length(Predicted.Anl) == 1) return(Predicted.Anl)
anlScore <- sapply(Predicted.Anl, function(x) prod(Probs[indvidualLabels(x) + 1]))
return(Predicted.Anl[which.max(anlScore)])
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rds_operations.R
\name{rds_describe_db_snapshot_attributes}
\alias{rds_describe_db_snapshot_attributes}
\title{Returns a list of DB snapshot attribute names and values for a manual DB
snapshot}
\usage{
rds_describe_db_snapshot_attributes(DBSnapshotIdentifier)
}
\arguments{
\item{DBSnapshotIdentifier}{[required] The identifier for the DB snapshot to describe the attributes for.}
}
\value{
A list with the following syntax:\preformatted{list(
DBSnapshotAttributesResult = list(
DBSnapshotIdentifier = "string",
DBSnapshotAttributes = list(
list(
AttributeName = "string",
AttributeValues = list(
"string"
)
)
)
)
)
}
}
\description{
Returns a list of DB snapshot attribute names and values for a manual DB
snapshot.
When sharing snapshots with other AWS accounts,
\code{\link[=rds_describe_db_snapshot_attributes]{describe_db_snapshot_attributes}}
returns the \code{restore} attribute and a list of IDs for the AWS accounts
that are authorized to copy or restore the manual DB snapshot. If \code{all}
is included in the list of values for the \code{restore} attribute, then the
manual DB snapshot is public and can be copied or restored by all AWS
accounts.
To add or remove access for an AWS account to copy or restore a manual
DB snapshot, or to make the manual DB snapshot public or private, use
the \code{\link[=rds_modify_db_snapshot_attribute]{modify_db_snapshot_attribute}}
API action.
}
\section{Request syntax}{
\preformatted{svc$describe_db_snapshot_attributes(
DBSnapshotIdentifier = "string"
)
}
}
\keyword{internal}
| /cran/paws.database/man/rds_describe_db_snapshot_attributes.Rd | permissive | TWarczak/paws | R | false | true | 1,675 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rds_operations.R
\name{rds_describe_db_snapshot_attributes}
\alias{rds_describe_db_snapshot_attributes}
\title{Returns a list of DB snapshot attribute names and values for a manual DB
snapshot}
\usage{
rds_describe_db_snapshot_attributes(DBSnapshotIdentifier)
}
\arguments{
\item{DBSnapshotIdentifier}{[required] The identifier for the DB snapshot to describe the attributes for.}
}
\value{
A list with the following syntax:\preformatted{list(
DBSnapshotAttributesResult = list(
DBSnapshotIdentifier = "string",
DBSnapshotAttributes = list(
list(
AttributeName = "string",
AttributeValues = list(
"string"
)
)
)
)
)
}
}
\description{
Returns a list of DB snapshot attribute names and values for a manual DB
snapshot.
When sharing snapshots with other AWS accounts,
\code{\link[=rds_describe_db_snapshot_attributes]{describe_db_snapshot_attributes}}
returns the \code{restore} attribute and a list of IDs for the AWS accounts
that are authorized to copy or restore the manual DB snapshot. If \code{all}
is included in the list of values for the \code{restore} attribute, then the
manual DB snapshot is public and can be copied or restored by all AWS
accounts.
To add or remove access for an AWS account to copy or restore a manual
DB snapshot, or to make the manual DB snapshot public or private, use
the \code{\link[=rds_modify_db_snapshot_attribute]{modify_db_snapshot_attribute}}
API action.
}
\section{Request syntax}{
\preformatted{svc$describe_db_snapshot_attributes(
DBSnapshotIdentifier = "string"
)
}
}
\keyword{internal}
|
# Chapter 6 Exercise 1: calling built-in functions
# Create a variable `my_name` that contains your name
my_name <- "Jenny Sun"
# Create a variable `name_length` that holds how many letters (including spaces)
# are in your name (use the `nchar()` function)
name_length <- nchar(my_name)
# Print the number of letters in your name
print (name_length)
# Create a variable `now_doing` that is your name followed by "is programming!"
# (use the `paste()` function)
now_doing <- paste(my_name, "is programming!")
# Make the `now_doing` variable upper case
toupper(now_doing)
### Bonus
# Pick two of your favorite numbers (between 1 and 100) and assign them to
# variables `fav_1` and `fav_2`
# Divide each number by the square root of 201 and save the new value in the
# original variable
# Create a variable `raw_sum` that is the sum of the two variables. Use the
# `sum()` function for practice.
# Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place.
# Use the `round()` function.
# Create two new variables `round_1` and `round_2` that are your `fav_1` and
# `fav_2` variables rounded to 1 decimal places
# Create a variable `sum_round` that is the sum of the rounded values
# Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!)
| /chapter-06-exercises/exercise-1/exercise.R | permissive | jsun234/book-exercises | R | false | false | 1,306 | r | # Chapter 6 Exercise 1: calling built-in functions
# Create a variable `my_name` that contains your name
my_name <- "Jenny Sun"
# Create a variable `name_length` that holds how many letters (including spaces)
# are in your name (use the `nchar()` function)
name_length <- nchar(my_name)
# Print the number of letters in your name
print (name_length)
# Create a variable `now_doing` that is your name followed by "is programming!"
# (use the `paste()` function)
now_doing <- paste(my_name, "is programming!")
# Make the `now_doing` variable upper case
toupper(now_doing)
### Bonus
# Pick two of your favorite numbers (between 1 and 100) and assign them to
# variables `fav_1` and `fav_2`
# Divide each number by the square root of 201 and save the new value in the
# original variable
# Create a variable `raw_sum` that is the sum of the two variables. Use the
# `sum()` function for practice.
# Create a variable `round_sum` that is the `raw_sum` rounded to 1 decimal place.
# Use the `round()` function.
# Create two new variables `round_1` and `round_2` that are your `fav_1` and
# `fav_2` variables rounded to 1 decimal places
# Create a variable `sum_round` that is the sum of the rounded values
# Which is bigger, `round_sum` or `sum_round`? (You can use the `max()` function!)
|
require("XML")
xmlfile <- xmlParse("~/Desktop/1842-43_TO_1910-11.xml")
rootnode = xmlRoot(xmlfile) #gives content of root
class(rootnode)
xmlName(rootnode)
xmlSize(rootnode)
firstchild <- rootnode[[1]]
lastchild <- rootnode[[1015]]
xmlSize(firstchild) #number of nodes in child
xmlSApply(firstchild, xmlName) #name(s)
xmlSApply(firstchild, xmlSize) #size
rootnode[[1]][["worksInfo"]][[1]][["workTitle"]]
rootnode[[1]][["worksInfo"]][[1]][["composerName"]]
xmlToList(rootnode[[1]][["worksInfo"]][[1]][["workTitle"]])
incrementComp <- function(composer_stats, c, season){
if (is.null(composer_stats[c, season])) {
composer_stats[c, season] <- 1
} else if (is.na(composer_stats[c,season])) {
composer_stats[c, season] <- 1
} else {
composer_stats[c, season] <- composer_stats[c, season] + 1
}
return(composer_stats)
}
composerBySeason <- data.frame()
for (seas in 1:xmlSize(rootnode)) {
# DEBUG: cat(seas, "\n")
firstlist <- xmlToList(rootnode[[seas]])
season <- firstlist$season
season <- paste("Season",season,sep=".")
works <- firstlist$worksInfo
if (is.list(works)) { # sometimes works is actually empty
for (i in 1:length(works)) {
if (!is.null(works[[i]]$composerName)) { #sometimes there is no composer
composerBySeason <- incrementComp(composerBySeason, works[[i]]$composerName,season)
}
}
}
}
# Parsing the whole thing first is WARNING: SLOW
# xml_aslist <- xmlToList(xmlfile)
# xml_aslist[[22]][["worksInfo"]][[1]][["workTitle"]] | /sample_parse.r | no_license | edsp2016/azhangproject | R | false | false | 1,533 | r | require("XML")
xmlfile <- xmlParse("~/Desktop/1842-43_TO_1910-11.xml")
rootnode = xmlRoot(xmlfile) #gives content of root
class(rootnode)
xmlName(rootnode)
xmlSize(rootnode)
firstchild <- rootnode[[1]]
lastchild <- rootnode[[1015]]
xmlSize(firstchild) #number of nodes in child
xmlSApply(firstchild, xmlName) #name(s)
xmlSApply(firstchild, xmlSize) #size
rootnode[[1]][["worksInfo"]][[1]][["workTitle"]]
rootnode[[1]][["worksInfo"]][[1]][["composerName"]]
xmlToList(rootnode[[1]][["worksInfo"]][[1]][["workTitle"]])
incrementComp <- function(composer_stats, c, season){
if (is.null(composer_stats[c, season])) {
composer_stats[c, season] <- 1
} else if (is.na(composer_stats[c,season])) {
composer_stats[c, season] <- 1
} else {
composer_stats[c, season] <- composer_stats[c, season] + 1
}
return(composer_stats)
}
composerBySeason <- data.frame()
for (seas in 1:xmlSize(rootnode)) {
# DEBUG: cat(seas, "\n")
firstlist <- xmlToList(rootnode[[seas]])
season <- firstlist$season
season <- paste("Season",season,sep=".")
works <- firstlist$worksInfo
if (is.list(works)) { # sometimes works is actually empty
for (i in 1:length(works)) {
if (!is.null(works[[i]]$composerName)) { #sometimes there is no composer
composerBySeason <- incrementComp(composerBySeason, works[[i]]$composerName,season)
}
}
}
}
# Parsing the whole thing first is WARNING: SLOW
# xml_aslist <- xmlToList(xmlfile)
# xml_aslist[[22]][["worksInfo"]][[1]][["workTitle"]] |
complete <- function(directory, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
## Conversion d'un id en nom de fichier
convid <- function (id) {
base <- as.character(id)
if (nchar(base)==1) {
file_name <- paste("00",base,".csv", sep = "")
} else if (nchar(base)==2) {
file_name <- paste("0",base,".csv", sep = "")
} else {
file_name <- paste(base,".csv", sep = "")
}
file_name
}
for (i in id) {
file_name <- paste(directory,"/",convid(i), sep = "")
data <- read.table(file_name, T, ",")
nobs <- sum(complete.cases(data))
if (i==id[1]) {
df <- data.frame (id = i, nobs = nobs)
} else {
df <- rbind(df,data.frame (id = i, nobs = nobs))
}
}
df
} | /Assignment1/complete.R | no_license | Greg131/RBasics | R | false | false | 1,428 | r | complete <- function(directory, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return a data frame of the form:
## id nobs
## 1 117
## 2 1041
## ...
## where 'id' is the monitor ID number and 'nobs' is the
## number of complete cases
## Conversion d'un id en nom de fichier
convid <- function (id) {
base <- as.character(id)
if (nchar(base)==1) {
file_name <- paste("00",base,".csv", sep = "")
} else if (nchar(base)==2) {
file_name <- paste("0",base,".csv", sep = "")
} else {
file_name <- paste(base,".csv", sep = "")
}
file_name
}
for (i in id) {
file_name <- paste(directory,"/",convid(i), sep = "")
data <- read.table(file_name, T, ",")
nobs <- sum(complete.cases(data))
if (i==id[1]) {
df <- data.frame (id = i, nobs = nobs)
} else {
df <- rbind(df,data.frame (id = i, nobs = nobs))
}
}
df
} |
pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
wd = getwd()
setwd(directory)
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
data_list <- lapply(id, function(x) {read.csv(paste(formatC(x, width=3, flag='0'), '.csv', sep=''))})
data <- do.call(rbind, data_list)
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
## NOTE: Do not round the result!
setwd(wd)
mean(data[, pollutant], na.rm = TRUE)
}
| /pollutantmean.R | no_license | hudbrog/datasciencecoursera | R | false | false | 782 | r | pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
wd = getwd()
setwd(directory)
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
data_list <- lapply(id, function(x) {read.csv(paste(formatC(x, width=3, flag='0'), '.csv', sep=''))})
data <- do.call(rbind, data_list)
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
## NOTE: Do not round the result!
setwd(wd)
mean(data[, pollutant], na.rm = TRUE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kdbconpkg.R
\name{select}
\alias{select}
\title{Select Q table as R data frame
Conversion (KDB)98h ->(Java)KxTable->(R)data frame}
\usage{
select(manager, handle, query)
}
\arguments{
\item{manager}{reference to java class}
\item{handle}{int connection handle}
\item{query}{Q string}
}
\value{
data frame
}
\description{
Select Q table as R data frame
Conversion (KDB)98h ->(Java)KxTable->(R)data frame
}
| /man/select.Rd | permissive | eugene-bk-eng/R2QConnector | R | false | true | 486 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kdbconpkg.R
\name{select}
\alias{select}
\title{Select Q table as R data frame
Conversion (KDB)98h ->(Java)KxTable->(R)data frame}
\usage{
select(manager, handle, query)
}
\arguments{
\item{manager}{reference to java class}
\item{handle}{int connection handle}
\item{query}{Q string}
}
\value{
data frame
}
\description{
Select Q table as R data frame
Conversion (KDB)98h ->(Java)KxTable->(R)data frame
}
|
# Jason Dean
# Feb 26, 2017
# This script pulls 2000 tweets containing either #NRA or #NPR from Twitter and performs sentiment analysis.
# More info at my website: jasontdean.com
library(knitr)
library(twitteR)
library("ROAuth")
consumer_key <- 'your key'
consumer_secret <- 'your secret'
access_token <- 'your token'
access_secret <- 'your secret'
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_secret)
nra <- searchTwitter("#NRA", n=2000, lang='en')
npr <- searchTwitter("#NPR", n=2000, lang='en')
sunshine <- searchTwitter("#sunshine", n=2000, lang='en')
library(tm)
library(wordcloud)
library(RColorBrewer)
# extract text from tweets
nra.text = sapply(nra, function(x) x$getText())
npr.text = sapply(npr, function(x) x$getText())
sunshine.text = sapply(sunshine, function(x) x$getText())
# remove non-ascii characters and convert to lowercase
nra.text <- iconv(nra.text, "latin1", "ASCII", sub="")
nra.text <- tolower(nra.text)
npr.text <- iconv(npr.text, "latin1", "ASCII", sub="")
npr.text <- tolower(npr.text)
sunshine.text <- iconv(sunshine.text, "latin1", "ASCII", sub="")
sunshine.text <- tolower(sunshine.text)
# remove 'http'
nra.text <- gsub('http.* *', '', nra.text)
npr.text <- gsub('http.* *', '',npr.text)
sunshine.text <- gsub('http.* *', '',sunshine.text)
# create a Corpus
nra.corp <- Corpus(VectorSource(nra.text))
npr.corp <- Corpus(VectorSource(npr.text))
sunshine.corp <- Corpus(VectorSource(sunshine.text))
nra.data = TermDocumentMatrix(nra.corp, control = list(stemming = TRUE, removePunctuation = TRUE, stopwords = c("the", "nra", stopwords("english")),removeNumbers = TRUE, stripWhitespace = TRUE))
npr.data = TermDocumentMatrix(npr.corp, control = list(stemming = TRUE, removePunctuation = TRUE, stopwords = c("the", "npr", stopwords("english")),removeNumbers = TRUE, stripWhitespace = TRUE))
sunshine.data = TermDocumentMatrix(sunshine.corp, control = list(stemming = TRUE, removePunctuation = TRUE, stopwords = c("the", "sunshine", "sunshin", stopwords("english")),removeNumbers = TRUE, stripWhitespace = TRUE))
# NRA wordcloud
nra.matrix <- as.matrix(nra.data)
nra.word_freqs = sort(rowSums(nra.matrix), decreasing=TRUE)
nra.df <- data.frame(word=names(nra.word_freqs), freq=nra.word_freqs)
wordcloud(nra.df$word, nra.df$freq, scale=c(5,0.5), random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, "Dark2"))
# NPR wordcloud
npr.matrix <- as.matrix(npr.data)
npr.word_freqs = sort(rowSums(npr.matrix), decreasing=TRUE)
npr.df <- data.frame(word=names(npr.word_freqs), freq=npr.word_freqs)
wordcloud(npr.df$word, npr.df$freq, scale=c(5,0.5), random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, "Dark2"))
# sunshine wordcloud
sunshine.matrix <- as.matrix(sunshine.data)
sunshine.word_freqs = sort(rowSums(sunshine.matrix), decreasing=TRUE)
sunshine.df <- data.frame(word=names(sunshine.word_freqs), freq=sunshine.word_freqs)
wordcloud(sunshine.df$word, sunshine.df$freq, scale=c(5,0.5), random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, "Dark2"))
# Word Frequency Analysis and Association
# NRA
kable(head(as.data.frame(nra.word_freqs, 5)), format="html", align = 'c')
# NPR
kable(head(as.data.frame(npr.word_freqs, 5)), format="html", align = 'c')
# sunshine
kable(head(as.data.frame(sunshine.word_freqs, 5)), format="html", align = 'c')
nra.america <- findAssocs(nra.data, 'america', 0.25)
npr.america <- findAssocs(npr.data, 'america', 0.25)
sunshine.america <- findAssocs(sunshine.data, 'america', 0.25)
# NRA
kable(head(as.data.frame(nra.america)), format="html", align = 'c')
# NPR
kable(head(as.data.frame(npr.america)), format="html", align = 'c')
# sunshine
kable(head(as.data.frame(sunshine.america)), format="html", align = 'c')
nra.love <- findAssocs(nra.data, 'love', 0.30)
npr.love <- findAssocs(npr.data, 'love', 0.30)
sunshine.love <- findAssocs(sunshine.data, 'love', 0.30)
# NRA
kable(head(as.data.frame(nra.love)), format="html", align = 'c')
# NPR
kable(head(as.data.frame(npr.love)), format="html", align = 'c')
# sunshine
kable(head(as.data.frame(sunshine.love)), format="html", align = 'c')
nra.trump <- findAssocs(nra.data, 'trump', 0.2)
npr.trump <- findAssocs(npr.data, 'trump', 0.2)
sunshine.trump <- findAssocs(sunshine.data, 'trump', 0.2)
# NRA
kable(head(as.data.frame(nra.trump)), format="html", align = 'c')
# NPR
kable(head(as.data.frame(npr.trump)), format="html", align = 'c')
# sunshine
kable(head(as.data.frame(sunshine.trump)), format="html", align = 'c')
library(dplyr)
# NRA
nra2 <- as.data.frame(nra.matrix)
# calculate the standard deviations of each word across tweets
nra.stdev <- as.numeric(apply(nra2, 1, sd))
nra2$stdev <- nra.stdev
# filter out words that have a standard deviation equal to zero
nra2 <- nra2 %>% filter(stdev>0)
nra2 <- nra2[,-2001]
# NPR
npr2 <- as.data.frame(npr.matrix)
# calculate the standard deviations of each word across tweets
npr.stdev <- as.numeric(apply(npr2, 1, sd))
npr2$stdev <- npr.stdev
# filter out words that have a standard deviation equal to zero
npr2 <- npr2 %>% filter(stdev>0)
npr2 <- npr2[,-2001]
# NRA
nra.corr <- cor(nra2)
nra.corr[is.na(nra.corr)] <- 0
nra.corr[nra.corr == 1] <- 0
# find the highest correlation coefficient
nra.max <- as.matrix(nra.corr[as.numeric(which(nra.corr > 0.95 & nra.corr < 0.99))])
nra.max <- sort(nra.max, decreasing = TRUE)
nra.maximum <- nra.max[1]
# find where this maximum occurs in the correlation matrix
nra.loc <- which(nra.corr == nra.maximum, arr.ind = TRUE)
# and last find what words this correlation coeffient is calculated from
nra.words <- row.names(nra.matrix)
nra.top2 <- c(nra.words[nra.loc[1,1]], nra.words[nra.loc[1,2]])
# NPR
npr.corr <- cor(npr2)
npr.corr[is.na(npr.corr)] <- 0
npr.corr[npr.corr == 1] <- 0
# find the highest correlation coefficient
npr.max <- as.matrix(npr.corr[as.numeric(which(npr.corr > 0.95 & npr.corr < 0.99))])
npr.max <- sort(npr.max, decreasing = TRUE)
npr.maximum <- npr.max[1]
# find where this maximum occurs in the correlation matrix
npr.loc <- which(npr.corr == npr.maximum, arr.ind = TRUE)
# and last find what words this correlation coeffient is calculated from
npr.words <- row.names(npr.matrix)
npr.top2 <- c(npr.words[npr.loc[1,1]], npr.words[npr.loc[1,2]])
# "Top two associated words in the NRA tweet data set""
nra.top2
# "Top two associated words in the NPR tweet data set""
npr.top2
# Sentiment Analysis
# https://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html#lexicon
positive <- readLines("positive-words.txt")
negative <- readLines("negative-words.txt")
nra.df$positive <- match(nra.df$word, positive)
npr.df$positive <- match(npr.df$word, positive)
sunshine.df$positive <- match(sunshine.df$word, positive)
nra.df$negative <- match(nra.df$word, negative)
npr.df$negative <- match(npr.df$word, negative)
sunshine.df$negative <- match(sunshine.df$word, negative)
nra.df[is.na(nra.df)] <- 0
nra.df$positive[nra.df$positive != 0] <- 1
nra.df$negative[nra.df$negative != 0] <- 1
npr.df[is.na(npr.df)] <- 0
npr.df$positive[npr.df$positive != 0] <- 1
npr.df$negative[npr.df$negative != 0] <- 1
sunshine.df[is.na(sunshine.df)] <- 0
sunshine.df$positive[sunshine.df$positive != 0] <- 1
sunshine.df$negative[sunshine.df$negative != 0] <- 1
library(ggplot2)
library(reshape2)
nra.positive <- sum((nra.df$positive*nra.df$freq))/sum(nra.df$freq)
nra.negative <- sum((nra.df$negative*nra.df$freq))/sum(nra.df$freq)
npr.positive <- sum((npr.df$positive*npr.df$freq))/sum(npr.df$freq)
npr.negative <- sum((npr.df$negative*npr.df$freq))/sum(npr.df$freq)
sunshine.positive <- sum((sunshine.df$positive*sunshine.df$freq))/sum(sunshine.df$freq)
sunshine.negative <- sum((sunshine.df$negative*sunshine.df$freq))/sum(sunshine.df$freq)
# format the data for plotting
nra.sents <- data.frame(positive = nra.positive, negative = nra.negative)
npr.sents <- data.frame(positive = npr.positive, negative = npr.negative)
sunshine.sents <- data.frame(positive = sunshine.positive, negative = sunshine.negative)
sentiments <- rbind(nra.sents, npr.sents, sunshine.sents)
names <- c("#NRA", "#NPR", "#sunshine")
sentiments$tweets <- names
#row.names(sentiments) <- names
sentiments.m <- melt(sentiments)
colnames <- c("tweets", "sentiment", "fraction")
colnames(sentiments.m) <- colnames
# plot the data
ggplot(data=sentiments.m, aes(tweets, fraction, fill=sentiment)) + geom_bar(stat='identity') + ylab("fraction of tweets") + xlab("") + theme_bw()
sentiments$ratio <- sentiments$positive/sentiments$negative
head(sentiments[,3:4])
| /NRA_vs_NPR/Twitter mining.R | no_license | JTDean123/dataScience | R | false | false | 8,572 | r | # Jason Dean
# Feb 26, 2017
# This script pulls 2000 tweets containing either #NRA or #NPR from Twitter and performs sentiment analysis.
# More info at my website: jasontdean.com
library(knitr)
library(twitteR)
library("ROAuth")
consumer_key <- 'your key'
consumer_secret <- 'your secret'
access_token <- 'your token'
access_secret <- 'your secret'
setup_twitter_oauth(consumer_key, consumer_secret, access_token, access_secret)
nra <- searchTwitter("#NRA", n=2000, lang='en')
npr <- searchTwitter("#NPR", n=2000, lang='en')
sunshine <- searchTwitter("#sunshine", n=2000, lang='en')
library(tm)
library(wordcloud)
library(RColorBrewer)
# extract text from tweets
nra.text = sapply(nra, function(x) x$getText())
npr.text = sapply(npr, function(x) x$getText())
sunshine.text = sapply(sunshine, function(x) x$getText())
# remove non-ascii characters and convert to lowercase
nra.text <- iconv(nra.text, "latin1", "ASCII", sub="")
nra.text <- tolower(nra.text)
npr.text <- iconv(npr.text, "latin1", "ASCII", sub="")
npr.text <- tolower(npr.text)
sunshine.text <- iconv(sunshine.text, "latin1", "ASCII", sub="")
sunshine.text <- tolower(sunshine.text)
# remove 'http'
nra.text <- gsub('http.* *', '', nra.text)
npr.text <- gsub('http.* *', '',npr.text)
sunshine.text <- gsub('http.* *', '',sunshine.text)
# create a Corpus
nra.corp <- Corpus(VectorSource(nra.text))
npr.corp <- Corpus(VectorSource(npr.text))
sunshine.corp <- Corpus(VectorSource(sunshine.text))
nra.data = TermDocumentMatrix(nra.corp, control = list(stemming = TRUE, removePunctuation = TRUE, stopwords = c("the", "nra", stopwords("english")),removeNumbers = TRUE, stripWhitespace = TRUE))
npr.data = TermDocumentMatrix(npr.corp, control = list(stemming = TRUE, removePunctuation = TRUE, stopwords = c("the", "npr", stopwords("english")),removeNumbers = TRUE, stripWhitespace = TRUE))
sunshine.data = TermDocumentMatrix(sunshine.corp, control = list(stemming = TRUE, removePunctuation = TRUE, stopwords = c("the", "sunshine", "sunshin", stopwords("english")),removeNumbers = TRUE, stripWhitespace = TRUE))
# NRA wordcloud
nra.matrix <- as.matrix(nra.data)
nra.word_freqs = sort(rowSums(nra.matrix), decreasing=TRUE)
nra.df <- data.frame(word=names(nra.word_freqs), freq=nra.word_freqs)
wordcloud(nra.df$word, nra.df$freq, scale=c(5,0.5), random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, "Dark2"))
# NPR wordcloud
npr.matrix <- as.matrix(npr.data)
npr.word_freqs = sort(rowSums(npr.matrix), decreasing=TRUE)
npr.df <- data.frame(word=names(npr.word_freqs), freq=npr.word_freqs)
wordcloud(npr.df$word, npr.df$freq, scale=c(5,0.5), random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, "Dark2"))
# sunshine wordcloud
sunshine.matrix <- as.matrix(sunshine.data)
sunshine.word_freqs = sort(rowSums(sunshine.matrix), decreasing=TRUE)
sunshine.df <- data.frame(word=names(sunshine.word_freqs), freq=sunshine.word_freqs)
wordcloud(sunshine.df$word, sunshine.df$freq, scale=c(5,0.5), random.order=FALSE, rot.per=0.35, use.r.layout=FALSE, colors=brewer.pal(8, "Dark2"))
# Word Frequency Analysis and Association
# NRA
kable(head(as.data.frame(nra.word_freqs, 5)), format="html", align = 'c')
# NPR
kable(head(as.data.frame(npr.word_freqs, 5)), format="html", align = 'c')
# sunshine
kable(head(as.data.frame(sunshine.word_freqs, 5)), format="html", align = 'c')
nra.america <- findAssocs(nra.data, 'america', 0.25)
npr.america <- findAssocs(npr.data, 'america', 0.25)
sunshine.america <- findAssocs(sunshine.data, 'america', 0.25)
# NRA
kable(head(as.data.frame(nra.america)), format="html", align = 'c')
# NPR
kable(head(as.data.frame(npr.america)), format="html", align = 'c')
# sunshine
kable(head(as.data.frame(sunshine.america)), format="html", align = 'c')
nra.love <- findAssocs(nra.data, 'love', 0.30)
npr.love <- findAssocs(npr.data, 'love', 0.30)
sunshine.love <- findAssocs(sunshine.data, 'love', 0.30)
# NRA
kable(head(as.data.frame(nra.love)), format="html", align = 'c')
# NPR
kable(head(as.data.frame(npr.love)), format="html", align = 'c')
# sunshine
kable(head(as.data.frame(sunshine.love)), format="html", align = 'c')
nra.trump <- findAssocs(nra.data, 'trump', 0.2)
npr.trump <- findAssocs(npr.data, 'trump', 0.2)
sunshine.trump <- findAssocs(sunshine.data, 'trump', 0.2)
# NRA
kable(head(as.data.frame(nra.trump)), format="html", align = 'c')
# NPR
kable(head(as.data.frame(npr.trump)), format="html", align = 'c')
# sunshine
kable(head(as.data.frame(sunshine.trump)), format="html", align = 'c')
library(dplyr)
# NRA
nra2 <- as.data.frame(nra.matrix)
# calculate the standard deviations of each word across tweets
nra.stdev <- as.numeric(apply(nra2, 1, sd))
nra2$stdev <- nra.stdev
# filter out words that have a standard deviation equal to zero
nra2 <- nra2 %>% filter(stdev>0)
nra2 <- nra2[,-2001]
# NPR
npr2 <- as.data.frame(npr.matrix)
# calculate the standard deviations of each word across tweets
npr.stdev <- as.numeric(apply(npr2, 1, sd))
npr2$stdev <- npr.stdev
# filter out words that have a standard deviation equal to zero
npr2 <- npr2 %>% filter(stdev>0)
npr2 <- npr2[,-2001]
# NRA
nra.corr <- cor(nra2)
nra.corr[is.na(nra.corr)] <- 0
nra.corr[nra.corr == 1] <- 0
# find the highest correlation coefficient
nra.max <- as.matrix(nra.corr[as.numeric(which(nra.corr > 0.95 & nra.corr < 0.99))])
nra.max <- sort(nra.max, decreasing = TRUE)
nra.maximum <- nra.max[1]
# find where this maximum occurs in the correlation matrix
nra.loc <- which(nra.corr == nra.maximum, arr.ind = TRUE)
# and last find what words this correlation coeffient is calculated from
nra.words <- row.names(nra.matrix)
nra.top2 <- c(nra.words[nra.loc[1,1]], nra.words[nra.loc[1,2]])
# NPR
npr.corr <- cor(npr2)
npr.corr[is.na(npr.corr)] <- 0
npr.corr[npr.corr == 1] <- 0
# find the highest correlation coefficient
npr.max <- as.matrix(npr.corr[as.numeric(which(npr.corr > 0.95 & npr.corr < 0.99))])
npr.max <- sort(npr.max, decreasing = TRUE)
npr.maximum <- npr.max[1]
# find where this maximum occurs in the correlation matrix
npr.loc <- which(npr.corr == npr.maximum, arr.ind = TRUE)
# and last find what words this correlation coeffient is calculated from
npr.words <- row.names(npr.matrix)
npr.top2 <- c(npr.words[npr.loc[1,1]], npr.words[npr.loc[1,2]])
# "Top two associated words in the NRA tweet data set""
nra.top2
# "Top two associated words in the NPR tweet data set""
npr.top2
# Sentiment Analysis
# https://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html#lexicon
positive <- readLines("positive-words.txt")
negative <- readLines("negative-words.txt")
nra.df$positive <- match(nra.df$word, positive)
npr.df$positive <- match(npr.df$word, positive)
sunshine.df$positive <- match(sunshine.df$word, positive)
nra.df$negative <- match(nra.df$word, negative)
npr.df$negative <- match(npr.df$word, negative)
sunshine.df$negative <- match(sunshine.df$word, negative)
nra.df[is.na(nra.df)] <- 0
nra.df$positive[nra.df$positive != 0] <- 1
nra.df$negative[nra.df$negative != 0] <- 1
npr.df[is.na(npr.df)] <- 0
npr.df$positive[npr.df$positive != 0] <- 1
npr.df$negative[npr.df$negative != 0] <- 1
sunshine.df[is.na(sunshine.df)] <- 0
sunshine.df$positive[sunshine.df$positive != 0] <- 1
sunshine.df$negative[sunshine.df$negative != 0] <- 1
library(ggplot2)
library(reshape2)
nra.positive <- sum((nra.df$positive*nra.df$freq))/sum(nra.df$freq)
nra.negative <- sum((nra.df$negative*nra.df$freq))/sum(nra.df$freq)
npr.positive <- sum((npr.df$positive*npr.df$freq))/sum(npr.df$freq)
npr.negative <- sum((npr.df$negative*npr.df$freq))/sum(npr.df$freq)
sunshine.positive <- sum((sunshine.df$positive*sunshine.df$freq))/sum(sunshine.df$freq)
sunshine.negative <- sum((sunshine.df$negative*sunshine.df$freq))/sum(sunshine.df$freq)
# format the data for plotting
nra.sents <- data.frame(positive = nra.positive, negative = nra.negative)
npr.sents <- data.frame(positive = npr.positive, negative = npr.negative)
sunshine.sents <- data.frame(positive = sunshine.positive, negative = sunshine.negative)
sentiments <- rbind(nra.sents, npr.sents, sunshine.sents)
names <- c("#NRA", "#NPR", "#sunshine")
sentiments$tweets <- names
#row.names(sentiments) <- names
sentiments.m <- melt(sentiments)
colnames <- c("tweets", "sentiment", "fraction")
colnames(sentiments.m) <- colnames
# plot the data
ggplot(data=sentiments.m, aes(tweets, fraction, fill=sentiment)) + geom_bar(stat='identity') + ylab("fraction of tweets") + xlab("") + theme_bw()
sentiments$ratio <- sentiments$positive/sentiments$negative
head(sentiments[,3:4])
|
## August 2rd, 2018, 09:37 PM
rm(list=ls(all="TRUE"));
path<-"C:/MCM/";
setwd(path);
source("Subfunctions.R");
#### Fig 4: Power comparison at various sample sizes.
################################################################################
NSL = 2.5e-6; #Nominal Significance Level
alpha = 0.2;
npts = 500;
rpts =(c(0:npts)/(npts*100));
hi=368; #h2 = 1%
r2.1 = 15*rpts; ##0.5%~5% #refs. SNP to RNA
r2.2 = 50*rpts; ##30~50% #refs. (RNA to PRT) de Sousa Abreu et al. 2009
#### Let X1 = G~B(2, p) with p = 0.25 say, then the first 4 moments of G is:
p = 0.25; ##MAF at the causal SNP
G.Ms <- c(2*p, 2*p+2*p*p, 2*p+6*p*p, 2*p+14*p*p);
#### var(G)
var.x1 = 2*p*(1-p);
beta1 = sqrt(r2.1/( (1-r2.1)*var.x1 ) );
#### var(X2) = var(e2+X1*beta1)
var.x2 = 1+(beta1*beta1)*var.x1;
beta2 = sqrt(r2.2/( (1-r2.2)*var.x2 ) );
#### var(X3)=var(e3+X2*beta2)
var.x3 = 1+(beta2*beta2)*var.x2;
c=log(2)
nfold = 5;
beta.r2.3<-effect.sizes.PRT(nfold, alpha, p, beta1, beta2, var.x3);
beta3<-beta.r2.3[,1];
r2.3<-beta.r2.3[,2];
h2<-r2.1*r2.2*r2.3; #### SNP heritability
#### Computing powers for various sample sizes
################################################################################
sample.size=c(0:18296);
#### Under SRS design
SNP.pwr.SRS.n <- SNP.Geno.pwr.SRS(G.Ms, beta1[hi], beta2[hi], beta3[hi], NSL, sample.size);
RNA.pwr.SRS.n <- RNA.Expr.pwr.SRS(G.Ms, beta1[hi], beta2[hi], beta3[hi], NSL, sample.size);
PRT.pwr.SRS.n <- PRT.Expr.pwr.SRS(G.Ms, beta1[hi], beta2[hi], beta3[hi], NSL, sample.size);
#### Under EPS design
## Lemma 2: For given vectors of effect sizes, searching for lower and upper
## quantiles of Y. These quantiles work for all tests under the EPS
tau.LU<-LU.MCM(beta1, beta2, beta3, p, alpha);
## Lemma 3-4: Computing noncentrality parameters of the three t tests
LT2.SNP.EPS <- L2T2.SNP.EPS(tau.LU, beta1, beta2, beta3, p, alpha);
LT2.RNA.EPS <- L2T2.RNA.EPS(tau.LU, beta1, beta2, beta3, p, alpha);
LT2.PRT.EPS <- L2T2.PRT.EPS(tau.LU, beta1, beta2, beta3, p, alpha);
halfss=sample.size/2;
SNP.pwr.EPS.n<-MCVM.Power(matrix(LT2.SNP.EPS[hi,], 1, 2), NSL, n=halfss);
RNA.pwr.EPS.n<-MCVM.Power(matrix(LT2.RNA.EPS[hi,], 1, 2), NSL, n=halfss);
PRT.pwr.EPS.n<-MCVM.Power(matrix(LT2.PRT.EPS[hi,], 1, 2), NSL, n=halfss);
plot.power.sample.size(sample.size, PRT.pwr.EPS.n, RNA.pwr.EPS.n, SNP.pwr.EPS.n, PRT.pwr.SRS.n, RNA.pwr.SRS.n, SNP.pwr.SRS.n, xlimit=c(50, 600), fig_id=4, path)
Power.Sample.Comp<-cbind(sample.size, PRT.pwr.EPS.n, RNA.pwr.EPS.n, SNP.pwr.EPS.n, PRT.pwr.SRS.n, RNA.pwr.SRS.n, SNP.pwr.SRS.n);
colnames(Power.Sample.Comp)<-c("Sample.size", "PRT.pwr.EPS.n", "RNA.pwr.EPS.n", "SNP.pwr.EPS.n", "PRT.pwr.SRS.n", "RNA.pwr.SRS.n", "SNP.pwr.SRS.n");
write.table(Power.Sample.Comp, paste(path, "Fig4.txt"), append = FALSE, sep=" ", quote = FALSE, col.names = TRUE, row.names = FALSE)
| /Fig4.R | no_license | HuaizhenQin/MCM | R | false | false | 2,916 | r | ## August 2rd, 2018, 09:37 PM
rm(list=ls(all="TRUE"));
path<-"C:/MCM/";
setwd(path);
source("Subfunctions.R");
#### Fig 4: Power comparison at various sample sizes.
################################################################################
NSL = 2.5e-6; #Nominal Significance Level
alpha = 0.2;
npts = 500;
rpts =(c(0:npts)/(npts*100));
hi=368; #h2 = 1%
r2.1 = 15*rpts; ##0.5%~5% #refs. SNP to RNA
r2.2 = 50*rpts; ##30~50% #refs. (RNA to PRT) de Sousa Abreu et al. 2009
#### Let X1 = G~B(2, p) with p = 0.25 say, then the first 4 moments of G is:
p = 0.25; ##MAF at the causal SNP
G.Ms <- c(2*p, 2*p+2*p*p, 2*p+6*p*p, 2*p+14*p*p);
#### var(G)
var.x1 = 2*p*(1-p);
beta1 = sqrt(r2.1/( (1-r2.1)*var.x1 ) );
#### var(X2) = var(e2+X1*beta1)
var.x2 = 1+(beta1*beta1)*var.x1;
beta2 = sqrt(r2.2/( (1-r2.2)*var.x2 ) );
#### var(X3)=var(e3+X2*beta2)
var.x3 = 1+(beta2*beta2)*var.x2;
c=log(2)
nfold = 5;
beta.r2.3<-effect.sizes.PRT(nfold, alpha, p, beta1, beta2, var.x3);
beta3<-beta.r2.3[,1];
r2.3<-beta.r2.3[,2];
h2<-r2.1*r2.2*r2.3; #### SNP heritability
#### Computing powers for various sample sizes
################################################################################
sample.size=c(0:18296);
#### Under SRS design
SNP.pwr.SRS.n <- SNP.Geno.pwr.SRS(G.Ms, beta1[hi], beta2[hi], beta3[hi], NSL, sample.size);
RNA.pwr.SRS.n <- RNA.Expr.pwr.SRS(G.Ms, beta1[hi], beta2[hi], beta3[hi], NSL, sample.size);
PRT.pwr.SRS.n <- PRT.Expr.pwr.SRS(G.Ms, beta1[hi], beta2[hi], beta3[hi], NSL, sample.size);
#### Under EPS design
## Lemma 2: For given vectors of effect sizes, searching for lower and upper
## quantiles of Y. These quantiles work for all tests under the EPS
tau.LU<-LU.MCM(beta1, beta2, beta3, p, alpha);
## Lemma 3-4: Computing noncentrality parameters of the three t tests
LT2.SNP.EPS <- L2T2.SNP.EPS(tau.LU, beta1, beta2, beta3, p, alpha);
LT2.RNA.EPS <- L2T2.RNA.EPS(tau.LU, beta1, beta2, beta3, p, alpha);
LT2.PRT.EPS <- L2T2.PRT.EPS(tau.LU, beta1, beta2, beta3, p, alpha);
halfss=sample.size/2;
SNP.pwr.EPS.n<-MCVM.Power(matrix(LT2.SNP.EPS[hi,], 1, 2), NSL, n=halfss);
RNA.pwr.EPS.n<-MCVM.Power(matrix(LT2.RNA.EPS[hi,], 1, 2), NSL, n=halfss);
PRT.pwr.EPS.n<-MCVM.Power(matrix(LT2.PRT.EPS[hi,], 1, 2), NSL, n=halfss);
plot.power.sample.size(sample.size, PRT.pwr.EPS.n, RNA.pwr.EPS.n, SNP.pwr.EPS.n, PRT.pwr.SRS.n, RNA.pwr.SRS.n, SNP.pwr.SRS.n, xlimit=c(50, 600), fig_id=4, path)
Power.Sample.Comp<-cbind(sample.size, PRT.pwr.EPS.n, RNA.pwr.EPS.n, SNP.pwr.EPS.n, PRT.pwr.SRS.n, RNA.pwr.SRS.n, SNP.pwr.SRS.n);
colnames(Power.Sample.Comp)<-c("Sample.size", "PRT.pwr.EPS.n", "RNA.pwr.EPS.n", "SNP.pwr.EPS.n", "PRT.pwr.SRS.n", "RNA.pwr.SRS.n", "SNP.pwr.SRS.n");
write.table(Power.Sample.Comp, paste(path, "Fig4.txt"), append = FALSE, sep=" ", quote = FALSE, col.names = TRUE, row.names = FALSE)
|
# Simulate data
set.seed(35)
nSites <- 16
nVisits <- 4
x <- rnorm(nSites) # a covariate
beta0 <- 0
beta1 <- 1
lambda <- exp(beta0 + beta1*x) # expected counts at each site
N <- rpois(nSites, lambda) # latent abundance
y <- matrix(NA, nSites, nVisits)
p <- c(0.3, 0.6, 0.8, 0.5) # detection prob for each visit
for(j in 1:nVisits) {
y[,j] <- rbinom(nSites, N, p[j])
}
# Organize data
visitMat <- matrix(as.character(1:nVisits), nSites, nVisits, byrow=TRUE)
umf <- unmarkedFramePCount(y=y, siteCovs=data.frame(x=x),
obsCovs=list(visit=visitMat))
summary(umf)
# Fit a model
fm1 <- pcount(~visit-1 ~ x, umf, K=50)
fm1
plogis(coef(fm1, type="det")) # Should be close to p
# Empirical Bayes estimation of random effects
(fm1re <- ranef(fm1))
plot(fm1re, subset=site \%in\% 1:25, xlim=c(-1,40))
sum(bup(fm1re)) # Estimated population size
sum(N) # Actual population size
| /code/Pcount_simulation.R | no_license | dlizcano/SeaUrchin | R | false | false | 960 | r |
# Simulate data
set.seed(35)
nSites <- 16
nVisits <- 4
x <- rnorm(nSites) # a covariate
beta0 <- 0
beta1 <- 1
lambda <- exp(beta0 + beta1*x) # expected counts at each site
N <- rpois(nSites, lambda) # latent abundance
y <- matrix(NA, nSites, nVisits)
p <- c(0.3, 0.6, 0.8, 0.5) # detection prob for each visit
for(j in 1:nVisits) {
y[,j] <- rbinom(nSites, N, p[j])
}
# Organize data
visitMat <- matrix(as.character(1:nVisits), nSites, nVisits, byrow=TRUE)
umf <- unmarkedFramePCount(y=y, siteCovs=data.frame(x=x),
obsCovs=list(visit=visitMat))
summary(umf)
# Fit a model
fm1 <- pcount(~visit-1 ~ x, umf, K=50)
fm1
plogis(coef(fm1, type="det")) # Should be close to p
# Empirical Bayes estimation of random effects
(fm1re <- ranef(fm1))
plot(fm1re, subset=site \%in\% 1:25, xlim=c(-1,40))
sum(bup(fm1re)) # Estimated population size
sum(N) # Actual population size
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SZVD_ADMM.R
\name{SZVD_ADMM}
\alias{SZVD_ADMM}
\title{Alternating Direction Method of Multipliers for SZVD}
\usage{
SZVD_ADMM(B, N, D, sols0, pen_scal, gamma, beta, tol, maxits, quiet = TRUE)
}
\arguments{
\item{B}{Between class covariance matrix for objective (in space defined by N).}
\item{N}{basis matrix for null space of covariance matrix W.}
\item{D}{penalty dictionary/basis.}
\item{sols0}{initial solutions sols0$x, sols0$y, sols0$z}
\item{pen_scal}{penalty scaling term.}
\item{gamma}{l1 regularization parameter}
\item{beta}{penalty term controlling the splitting constraint.}
\item{tol}{tol$abs = absolute error, tol$rel = relative error to be
achieved to declare convergence of the algorithm.}
\item{maxits}{maximum number of iterations of the algorithm to run.}
\item{quiet}{toggles between displaying intermediate statistics.}
}
\value{
\code{SZVD_ADMM} returns an object of \code{\link{class}} "\code{SZVD_ADMM}" including a list
with the following named components
\describe{
\item{\code{x,y,z}}{Iterates at termination.}
\item{\code{its}}{Number of iterations required to converge.}
\item{\code{errtol}}{Stopping error bound at termination}
}
}
\description{
Iteratively solves the problem
\deqn{\min(-1/2*x^TB^Tx + \gamma p(y): ||x||_2 \leq 1, DNx = y)}
}
\details{
This function is used by other functions and should only be called explicitly for
debugging purposes.
}
\seealso{
Used by: \code{\link{SZVDcv}}.
}
\keyword{internal}
| /man/SZVD_ADMM.Rd | no_license | gumeo/accSDA | R | false | true | 1,545 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SZVD_ADMM.R
\name{SZVD_ADMM}
\alias{SZVD_ADMM}
\title{Alternating Direction Method of Multipliers for SZVD}
\usage{
SZVD_ADMM(B, N, D, sols0, pen_scal, gamma, beta, tol, maxits, quiet = TRUE)
}
\arguments{
\item{B}{Between class covariance matrix for objective (in space defined by N).}
\item{N}{basis matrix for null space of covariance matrix W.}
\item{D}{penalty dictionary/basis.}
\item{sols0}{initial solutions sols0$x, sols0$y, sols0$z}
\item{pen_scal}{penalty scaling term.}
\item{gamma}{l1 regularization parameter}
\item{beta}{penalty term controlling the splitting constraint.}
\item{tol}{tol$abs = absolute error, tol$rel = relative error to be
achieved to declare convergence of the algorithm.}
\item{maxits}{maximum number of iterations of the algorithm to run.}
\item{quiet}{toggles between displaying intermediate statistics.}
}
\value{
\code{SZVD_ADMM} returns an object of \code{\link{class}} "\code{SZVD_ADMM}" including a list
with the following named components
\describe{
\item{\code{x,y,z}}{Iterates at termination.}
\item{\code{its}}{Number of iterations required to converge.}
\item{\code{errtol}}{Stopping error bound at termination}
}
}
\description{
Iteratively solves the problem
\deqn{\min(-1/2*x^TB^Tx + \gamma p(y): ||x||_2 \leq 1, DNx = y)}
}
\details{
This function is used by other functions and should only be called explicitly for
debugging purposes.
}
\seealso{
Used by: \code{\link{SZVDcv}}.
}
\keyword{internal}
|
#####################################
# nalozi knjiznice, ki jih potrebujes
# load the libraries you need
#####################################
library(caret)
# nalozi jih tukaj, ne po klicu RNGkind spodaj
# load them here and not after the call of the RNGkind method below
#########################################################################
# Ignoriraj opozorilo (ignore the warning)
# RNGkind(sample.kind = "Rounding") : non-uniform 'Rounding' sampler used
#########################################################################
RNGkind(sample.kind = "Rounding")
#####################################
# Nekaj testov: ne spreminjaj
# Some tests: do not change
#####################################
test_runif = function(){
set.seed(1234)
x = runif(5);
x1 = c(0.1137034113053232, 0.6222994048148394, 0.6092747328802943, 0.6233794416766614, 0.8609153835568577)
if (sum(abs(x - x1)) > 10^-10){
stop("Test runif ni ok (has failed)")
}
}
test_sample = function(){
set.seed(1234)
x = sample(20);
x1 = c(3, 12, 11, 18, 14, 10, 1, 4, 8, 6, 7, 5, 20, 15, 2, 9, 17, 16, 19, 13)
if (sum(abs(x - x1)) > 0){
stop("Test sample ni ok (has failed)")
}
}
test_runif()
test_sample()
#####################################
# Nalozi se potrebne funkcije
# Load the necessary functions
#####################################
# setwd("pot do mape (path to directory)")
naloga_problem = 1
source(sprintf("funkcije%d.R", naloga_problem))
#########################################################################
# NEVRONSKE MREŽE
#########################################################################
#########################################################################
# 3.1 ENOSTAVNI PERCEPTRON
#########################################################################
source("kviz3.R")
podatki31 <- read.csv("podatki31.csv")
slika = naloziSliko(); image = slika;
w0 <- -0.2989601680217311
w1 <- 0.1
w2 <- 0.2
enostavni_perceptron <- function(){
n <- nrow(podatki31)
podatki_pozitivni <- podatki31 %>% filter(podatki31$y == 1)
podatki_negativni <- podatki31 %>% filter(podatki31$y == -1)
n1 <- nrow(podatki_pozitivni)
n2 <- n - n1
pozitivni <- rep(0,n1)
negativni <- rep(0,n2)
for (i in 1:n1){
sum <- w0 + w1 * podatki_pozitivni[i,1]+ w2 * podatki_pozitivni[i,2]
pogoj <- -sum/podatki_pozitivni[i,3]
pozitivni[i] <- pogoj
}
# w3 mora biti večji od vseh, tudi od največjega pogoja
najvecji <- max(pozitivni)
for (i in 1:n2){
sum <- w0 + w1 * podatki_negativni[i,1]+ w2 * podatki_negativni[i,2]
pogoj <- -sum/podatki_negativni[i,3]
negativni[i] <- pogoj
}
# w3 mora biti manjši od vseh, tudi od najmanjšega pogoja
najmanjsi <- min(negativni)
if(najvecji < najmanjsi){
return(najvecji)
}else{
print("napaka")
}
}
w3 <- enostavni_perceptron()
# funkcija preveri, če izbran w3 popolnoma loči razreda
preveri_w = function(w3){
data = read.csv("podatki31.csv")
y = data$y
for (i in 1:nrow(data)){
v = w0 + w1 * data[i,1]+ w2 * data[i,2] + w3 * data[i,3]
predznak = sign(v)
if (predznak * y[i] < 0){
return(0)
}
}
return(1)
}
preveri_w(w3)
#########################################################################
# 3.2 KAKO VELIKE SO MREŽE?
#########################################################################
korak <- function(dim,kanal,filter,st_konvolucij){
utezi = 0
for (i in 1:st_konvolucij){
konvolucija <- filter * (9 * kanal + 1)
#print(konvolucija)
utezi <- utezi + konvolucija
dim = dim - 2
kanal = filter
}
dim = floor(dim/2)
return(c(dim,kanal,filter,utezi))
}
k <- data.frame(st_konvolucij = c(2,2,3,3,3),
filter = c(64,128,256,512,512))
utezi <- function(k){
dim = 224
kanal = 1
filter = 64
st_utezi = 0
for (j in 1:nrow(k)){
st_konvolucij <- k[j,1]
filter <- k[j,2]
rezultat <- korak(dim,kanal,filter,st_konvolucij)
dim <- rezultat[1]
kanal <- rezultat[2]
filter <- rezultat[3]
st_utezi <- st_utezi + rezultat[4]
}
return(st_utezi)
}
utezi(k) + 4096 * ( 512 + 1) + 4096 * (4096+1) + 2 * (4096 + 1)
#########################################################################
# 3.3 ODKRIJMO MODER KVADRAT
#########################################################################
moder_kvadrat <- function(slika){
n <- nrow(slika)
for(i in 1:n){
for(j in 1:n){
if(
slika[i,j,1] == 0 &
slika[i,j,2] == 0 &
slika[i,j,3] > 0
){
# c(i,j) bo zgornji levi kot
# prištejemo 2 obema koordinatama, da dobimo središče
# vemo, da je dimenzija kvadrata 5x5
return(c(i + 2 ,j + 2))
}
}
}
}
sredisce <- moder_kvadrat(slika)
97 * sredisce[1] + 101 * sredisce[2]
| /DomacaNaloga2/arhiv/NALOGA3Katarina.r | no_license | tinarazic/machine_learning | R | false | false | 4,801 | r | #####################################
# nalozi knjiznice, ki jih potrebujes
# load the libraries you need
#####################################
library(caret)
# nalozi jih tukaj, ne po klicu RNGkind spodaj
# load them here and not after the call of the RNGkind method below
#########################################################################
# Ignoriraj opozorilo (ignore the warning)
# RNGkind(sample.kind = "Rounding") : non-uniform 'Rounding' sampler used
#########################################################################
RNGkind(sample.kind = "Rounding")
#####################################
# Nekaj testov: ne spreminjaj
# Some tests: do not change
#####################################
test_runif = function(){
set.seed(1234)
x = runif(5);
x1 = c(0.1137034113053232, 0.6222994048148394, 0.6092747328802943, 0.6233794416766614, 0.8609153835568577)
if (sum(abs(x - x1)) > 10^-10){
stop("Test runif ni ok (has failed)")
}
}
test_sample = function(){
set.seed(1234)
x = sample(20);
x1 = c(3, 12, 11, 18, 14, 10, 1, 4, 8, 6, 7, 5, 20, 15, 2, 9, 17, 16, 19, 13)
if (sum(abs(x - x1)) > 0){
stop("Test sample ni ok (has failed)")
}
}
test_runif()
test_sample()
#####################################
# Nalozi se potrebne funkcije
# Load the necessary functions
#####################################
# setwd("pot do mape (path to directory)")
naloga_problem = 1
source(sprintf("funkcije%d.R", naloga_problem))
#########################################################################
# NEVRONSKE MREŽE
#########################################################################
#########################################################################
# 3.1 ENOSTAVNI PERCEPTRON
#########################################################################
source("kviz3.R")
podatki31 <- read.csv("podatki31.csv")
slika = naloziSliko(); image = slika;
w0 <- -0.2989601680217311
w1 <- 0.1
w2 <- 0.2
enostavni_perceptron <- function(){
n <- nrow(podatki31)
podatki_pozitivni <- podatki31 %>% filter(podatki31$y == 1)
podatki_negativni <- podatki31 %>% filter(podatki31$y == -1)
n1 <- nrow(podatki_pozitivni)
n2 <- n - n1
pozitivni <- rep(0,n1)
negativni <- rep(0,n2)
for (i in 1:n1){
sum <- w0 + w1 * podatki_pozitivni[i,1]+ w2 * podatki_pozitivni[i,2]
pogoj <- -sum/podatki_pozitivni[i,3]
pozitivni[i] <- pogoj
}
# w3 mora biti večji od vseh, tudi od največjega pogoja
najvecji <- max(pozitivni)
for (i in 1:n2){
sum <- w0 + w1 * podatki_negativni[i,1]+ w2 * podatki_negativni[i,2]
pogoj <- -sum/podatki_negativni[i,3]
negativni[i] <- pogoj
}
# w3 mora biti manjši od vseh, tudi od najmanjšega pogoja
najmanjsi <- min(negativni)
if(najvecji < najmanjsi){
return(najvecji)
}else{
print("napaka")
}
}
w3 <- enostavni_perceptron()
# funkcija preveri, če izbran w3 popolnoma loči razreda
preveri_w = function(w3){
data = read.csv("podatki31.csv")
y = data$y
for (i in 1:nrow(data)){
v = w0 + w1 * data[i,1]+ w2 * data[i,2] + w3 * data[i,3]
predznak = sign(v)
if (predznak * y[i] < 0){
return(0)
}
}
return(1)
}
preveri_w(w3)
#########################################################################
# 3.2 KAKO VELIKE SO MREŽE?
#########################################################################
korak <- function(dim,kanal,filter,st_konvolucij){
utezi = 0
for (i in 1:st_konvolucij){
konvolucija <- filter * (9 * kanal + 1)
#print(konvolucija)
utezi <- utezi + konvolucija
dim = dim - 2
kanal = filter
}
dim = floor(dim/2)
return(c(dim,kanal,filter,utezi))
}
k <- data.frame(st_konvolucij = c(2,2,3,3,3),
filter = c(64,128,256,512,512))
utezi <- function(k){
dim = 224
kanal = 1
filter = 64
st_utezi = 0
for (j in 1:nrow(k)){
st_konvolucij <- k[j,1]
filter <- k[j,2]
rezultat <- korak(dim,kanal,filter,st_konvolucij)
dim <- rezultat[1]
kanal <- rezultat[2]
filter <- rezultat[3]
st_utezi <- st_utezi + rezultat[4]
}
return(st_utezi)
}
utezi(k) + 4096 * ( 512 + 1) + 4096 * (4096+1) + 2 * (4096 + 1)
#########################################################################
# 3.3 ODKRIJMO MODER KVADRAT
#########################################################################
moder_kvadrat <- function(slika){
n <- nrow(slika)
for(i in 1:n){
for(j in 1:n){
if(
slika[i,j,1] == 0 &
slika[i,j,2] == 0 &
slika[i,j,3] > 0
){
# c(i,j) bo zgornji levi kot
# prištejemo 2 obema koordinatama, da dobimo središče
# vemo, da je dimenzija kvadrata 5x5
return(c(i + 2 ,j + 2))
}
}
}
}
sredisce <- moder_kvadrat(slika)
97 * sredisce[1] + 101 * sredisce[2]
|
\name{runs-methods}
\docType{methods}
\alias{runs-methods}
\alias{runs<--methods}
\alias{runs}
\alias{runs<-}
\alias{runs,SeqDataFrames-method}
\alias{runs,Dataclass-method}
\alias{runs<-,Simulation-method}
\alias{runs<-,Contsimulation-method}
\title{ Methods for Function runs in Package `distrSim'}
\description{runs-methods}
\section{Methods}{\describe{
\item{runs}{\code{signature(object = "SeqDataFrames")}: returns the number of runs }
\item{runs}{\code{signature(object = "Dataclass")}: returns the number of runs }
\item{runs<-}{\code{signature(object = "Simulation")}: changes the number of runs }
\item{runs<-}{\code{signature(object = "Contsimulation")}: changes the number of runs }
}}
\keyword{methods}
\concept{simulation}
\concept{S4 simulation class}
\concept{runs}
\concept{accessor function}
\concept{replacement function}
| /man/runs-methods.Rd | no_license | cran/distrSim | R | false | false | 869 | rd | \name{runs-methods}
\docType{methods}
\alias{runs-methods}
\alias{runs<--methods}
\alias{runs}
\alias{runs<-}
\alias{runs,SeqDataFrames-method}
\alias{runs,Dataclass-method}
\alias{runs<-,Simulation-method}
\alias{runs<-,Contsimulation-method}
\title{ Methods for Function runs in Package `distrSim'}
\description{runs-methods}
\section{Methods}{\describe{
\item{runs}{\code{signature(object = "SeqDataFrames")}: returns the number of runs }
\item{runs}{\code{signature(object = "Dataclass")}: returns the number of runs }
\item{runs<-}{\code{signature(object = "Simulation")}: changes the number of runs }
\item{runs<-}{\code{signature(object = "Contsimulation")}: changes the number of runs }
}}
\keyword{methods}
\concept{simulation}
\concept{S4 simulation class}
\concept{runs}
\concept{accessor function}
\concept{replacement function}
|
mod1=sarima(log(gnp),1,1,0)
mod1
mod2=sarima(log(gnp),0,1,2)
mod2
rs=residuals(mod1$fit)
plot.ts(rs)
hist(rs,20)
pacf(rs)
TSA::runs(rs)
mod1=sarima(rec,5,0,5)
TSA::runs(residuals(mod1$fit))
mod1=sarima(rec,1,0,3)
TSA::runs(residuals(mod1$fit))
trend=time(cmort)
temp=as.numeric(tempr-mean(tempr))
fit=lm(cmort~trend+poly(temp,2)+part, na.action=NULL)
plot(cmort)
et=residuals(fit)
plot(et)
res=sarima(et,2,0,0)
mean(residuals(res$fit)^2)
res2=sarima(cmort,2,0,0, xreg=cbind(trend,temp, temp2=temp^2,part))
res2
mean(residuals(res2$fit)^2)
phi=c(rep(0,11),0.9)
data=arima.sim(list(order=c(12,0,0), ar=phi), n=100)
plot(data)
title("ARIMA(1,0)_12 with phi=0.9")
acf2(data)
ACF=ARMAacf(ar=phi, ma=0, lag.max = 100)
PACF=ARMAacf(ar=phi, ma=0, lag.max = 100, pacf = T)
plot(ACF, type="h", xlab="Lag"); abline(h=0)
plot(PACF, type="h", xlab="Lag"); abline(h=0)
phi=c(rep(0,11),0.8)
theta=-0.5
data=arima.sim(list(order=c(12,0,1), ar=phi, ma=theta), n=100)
par(mfrow=c(1,1))
plot(data)
title("ARMA(1,0)x(1,0)_12")
acf2(data,max.lag = 50)
ACF=ARMAacf(ar=phi, ma=theta, lag.max = 50)
PACF=ARMAacf(ar=phi, ma=theta, lag.max = 50, pacf = T)
par(mfrow=c(2,1))
plot(ACF, type="h", xlab="Lag"); abline(h=0)
plot(PACF, type="h", xlab="Lag"); abline(h=0)
x=1:200
y=sin(2*pi/12*x)+rnorm(200,0,0.1)
plot.ts(y)
acf2(y)
mod=sarima(y,0,0,0,0,1,0, S=12)
plot(residuals(mod$fit))
x=AirPassengers
lx=log(x);dlx=diff(lx);ddlx=diff(dlx,12)
plot.ts(cbind(x,lx,dlx,ddlx), main="")
acf(dlx, lag.max = 50)
acf2(ddlx)
m1=sarima(lx,1,1,1,1,1,0, S=12)
m1$fit
m2=sarima(lx,1,1,1,0,1,1, S=12)
m2$fit
m3=sarima(lx,0,1,1,0,1,1, S=12)
m3$fit
sarima.for(lx,24,0,1,1,0,1,1,12)
| /Master_subjects/Time_Series_Analysis/code/code6.R | no_license | Ganson2018/MasterStatistics-MachineLearning | R | false | false | 1,652 | r | mod1=sarima(log(gnp),1,1,0)
mod1
mod2=sarima(log(gnp),0,1,2)
mod2
rs=residuals(mod1$fit)
plot.ts(rs)
hist(rs,20)
pacf(rs)
TSA::runs(rs)
mod1=sarima(rec,5,0,5)
TSA::runs(residuals(mod1$fit))
mod1=sarima(rec,1,0,3)
TSA::runs(residuals(mod1$fit))
trend=time(cmort)
temp=as.numeric(tempr-mean(tempr))
fit=lm(cmort~trend+poly(temp,2)+part, na.action=NULL)
plot(cmort)
et=residuals(fit)
plot(et)
res=sarima(et,2,0,0)
mean(residuals(res$fit)^2)
res2=sarima(cmort,2,0,0, xreg=cbind(trend,temp, temp2=temp^2,part))
res2
mean(residuals(res2$fit)^2)
phi=c(rep(0,11),0.9)
data=arima.sim(list(order=c(12,0,0), ar=phi), n=100)
plot(data)
title("ARIMA(1,0)_12 with phi=0.9")
acf2(data)
ACF=ARMAacf(ar=phi, ma=0, lag.max = 100)
PACF=ARMAacf(ar=phi, ma=0, lag.max = 100, pacf = T)
plot(ACF, type="h", xlab="Lag"); abline(h=0)
plot(PACF, type="h", xlab="Lag"); abline(h=0)
phi=c(rep(0,11),0.8)
theta=-0.5
data=arima.sim(list(order=c(12,0,1), ar=phi, ma=theta), n=100)
par(mfrow=c(1,1))
plot(data)
title("ARMA(1,0)x(1,0)_12")
acf2(data,max.lag = 50)
ACF=ARMAacf(ar=phi, ma=theta, lag.max = 50)
PACF=ARMAacf(ar=phi, ma=theta, lag.max = 50, pacf = T)
par(mfrow=c(2,1))
plot(ACF, type="h", xlab="Lag"); abline(h=0)
plot(PACF, type="h", xlab="Lag"); abline(h=0)
x=1:200
y=sin(2*pi/12*x)+rnorm(200,0,0.1)
plot.ts(y)
acf2(y)
mod=sarima(y,0,0,0,0,1,0, S=12)
plot(residuals(mod$fit))
x=AirPassengers
lx=log(x);dlx=diff(lx);ddlx=diff(dlx,12)
plot.ts(cbind(x,lx,dlx,ddlx), main="")
acf(dlx, lag.max = 50)
acf2(ddlx)
m1=sarima(lx,1,1,1,1,1,0, S=12)
m1$fit
m2=sarima(lx,1,1,1,0,1,1, S=12)
m2$fit
m3=sarima(lx,0,1,1,0,1,1, S=12)
m3$fit
sarima.for(lx,24,0,1,1,0,1,1,12)
|
# This is the file for plot4.r
# read the data and convert date and time strings to right format using lubridate library
edata=read.table("household_power_consumption.txt",sep=";",header =TRUE)
head(edata)
str(edata)
library(lubridate)
# paste Date and Time into one column and convert it to POSIXct format using lubridate command
edata$datetime=paste(edata$Date,edata$Time)
edata$datetime=dmy_hms(edata$datetime)
#convert other columns to numeric, for some reason column 9 was already a numeric
for (i in 3:8) edata[,i]=as.numeric(as.character(edata[,i]))
str(edata)
#limit data to the dates of interest, note I used unclass function to get to the integer values of time and date
new_data=edata[which(unclass(edata$datetime) >= unclass(ymd("2007-02-01")) & unclass(edata$datetime)< unclass(ymd("2007-02-03"))),]
str(new_data)
#remove original file to freeup memory
rm(edata)
# initiate the print device, in this case png to create a png file, also give width and height
png(file="plot4.png",width=480,height=480)
par(mfrow=c(2,2))
#this is plot1 (row1, col1)
plot(new_data$datetime,new_data$Global_active_power,ylab="Global Active Power",xlab="",type="l")
# this is plot # 2 (row1, col 2)
plot(new_data$datetime,new_data$Voltage,ylab="Voltage",xlab="datetime",type="l")
#This is Plot#3 (row2, Col1)
#Plot the first graph, line graph with Y label but no X label
plot(new_data$datetime,new_data$Sub_metering_1,ylab="Energy sub metering",xlab="",type="l")
# Overlay next graph, make sure the Y limit matches the first one, and also turn off axes
# par() functions makes sure we are still working on the earlier plot
par(new=TRUE)
plot(new_data$datetime,new_data$Sub_metering_2,axes=FALSE,ylim=c(0,40),ylab="",xlab="",type="l",col="red")
# Overlay the last one, same as the second with changed variable and color
par(new=TRUE)
plot(new_data$datetime,new_data$Sub_metering_3,axes=FALSE,ylim=c(0,40),ylab="",xlab="",type="l",col="blue")
# Add a legend box on the top right without the bounding box
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,col=c("black","red","blue"),cex=0.9,bty="n",xjust=1)
# this is plot # 4 in the grid
plot(new_data$datetime,new_data$Global_reactive_power,ylab="Global_reactive_power",xlab="datetime",type="l")
#Save plot to to file name plot4.png, turn off the printing device
dev.off()
| /plot4.R | no_license | mittasuresh/ExData_Plotting1 | R | false | false | 2,359 | r | # This is the file for plot4.r
# read the data and convert date and time strings to right format using lubridate library
edata=read.table("household_power_consumption.txt",sep=";",header =TRUE)
head(edata)
str(edata)
library(lubridate)
# paste Date and Time into one column and convert it to POSIXct format using lubridate command
edata$datetime=paste(edata$Date,edata$Time)
edata$datetime=dmy_hms(edata$datetime)
#convert other columns to numeric, for some reason column 9 was already a numeric
for (i in 3:8) edata[,i]=as.numeric(as.character(edata[,i]))
str(edata)
#limit data to the dates of interest, note I used unclass function to get to the integer values of time and date
new_data=edata[which(unclass(edata$datetime) >= unclass(ymd("2007-02-01")) & unclass(edata$datetime)< unclass(ymd("2007-02-03"))),]
str(new_data)
#remove original file to freeup memory
rm(edata)
# initiate the print device, in this case png to create a png file, also give width and height
png(file="plot4.png",width=480,height=480)
par(mfrow=c(2,2))
#this is plot1 (row1, col1)
plot(new_data$datetime,new_data$Global_active_power,ylab="Global Active Power",xlab="",type="l")
# this is plot # 2 (row1, col 2)
plot(new_data$datetime,new_data$Voltage,ylab="Voltage",xlab="datetime",type="l")
#This is Plot#3 (row2, Col1)
#Plot the first graph, line graph with Y label but no X label
plot(new_data$datetime,new_data$Sub_metering_1,ylab="Energy sub metering",xlab="",type="l")
# Overlay next graph, make sure the Y limit matches the first one, and also turn off axes
# par() functions makes sure we are still working on the earlier plot
par(new=TRUE)
plot(new_data$datetime,new_data$Sub_metering_2,axes=FALSE,ylim=c(0,40),ylab="",xlab="",type="l",col="red")
# Overlay the last one, same as the second with changed variable and color
par(new=TRUE)
plot(new_data$datetime,new_data$Sub_metering_3,axes=FALSE,ylim=c(0,40),ylab="",xlab="",type="l",col="blue")
# Add a legend box on the top right without the bounding box
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,col=c("black","red","blue"),cex=0.9,bty="n",xjust=1)
# this is plot # 4 in the grid
plot(new_data$datetime,new_data$Global_reactive_power,ylab="Global_reactive_power",xlab="datetime",type="l")
#Save plot to to file name plot4.png, turn off the printing device
dev.off()
|
#! /usr/bin/env Rscript
library(parallel)
library(doParallel)
library(foreach)
library(sp)
library(maptools)
library(rgeos)
library(mnormt)
source("../psofun.R")
source("krigingfun.R")
load("datlist.Rdata")
datlist$sppoly <- SpatialPolygons(list(b=Polygons(list(a=datlist$poly), "a")))
ncores <- detectCores() - 4
registerDoParallel(ncores)
nswarm <- 40
niter <- 2000
nrep <- 1
inertias <- c(0.7298, 1/(log(2)*2))
cognitives <- c(1.496, log(2) + 1/2)
socials <- c(1.496, log(2) + 1/2)
nnbors <- c(3, 40)
alpha <- 0.2*niter
beta <- 2
rates <- c(0.3, 0.5)
ccc <- 0.1
df <- 1
pcuts <- c(0, 0.5)
sig0 <- 1
inertia0 <- 1.2
ndesign <- 100
lower <- rep(apply(datlist$poly@coords, 2, min), each = ndesign)
upper <- rep(apply(datlist$poly@coords, 2, max), each = ndesign)
time <- 0:niter
parsets <- 1:2
CFs <- c("CF", "nCF")
objnames <- c("sig2fuk.mean", "sig2fuk.max")
specs <- c(outer(c(outer(c(outer(paste(c("CI", "DI", "AT1", "AT2"), "PSO", sep="-"), parsets, paste, sep = "-")),
c(outer(CFs, nnbors, paste, sep="-")), paste, sep="-"),
c(outer(c(outer(paste(c("AT1", "AT2"), "BBPSO", sep="-"), parsets, paste, sep = "-")),
c(outer(CFs, nnbors, paste, sep="-")), paste, sep="-"))), objnames, paste, sep="-"))
set.seed(234132)
seeds <- rnorm(length(specs))
psowrap <- function(i, datlist, specs, seeds){
spec <- specs[i]
set.seed(seeds[i])
splt <- strsplit(spec, "-")[[1]]
style <- splt[1]
alg <- splt[2]
parset <- as.numeric(splt[3])
CF <- splt[4]=="CF"
nnbor <- as.numeric(splt[5])
objname <- splt[6]
obj <- switch(objname, sig2fuk.mean = sig2fuk.mean, sig2fuk.max = sig2fuk.max)
repl <- 1
if(alg == "BBPSO"){
rate <- ifelse(style=="AT1", rates[1], rates[2])
pcut <- pcuts[parset]
init <- replicate(nswarm, c(spsample(datlist$poly, ndesign, "random")@coords))
temp <- sbbpso(niter, nswarm, nnbor, sig0, obj, lower, upper,
pcut = pcut, CF = CF, AT = TRUE, rate = rate, df = df, ccc = 0.1,
init = init, boundaryfun = movetoboundary, datlist = datlist)
algid <- paste("BBPSO", parset, ifelse(CF, "CF", "notCF"), style, sep = "-")
tempdat <- data.frame(obj = objname, logpost = temp[["values"]],
time = time, algid = algid,
type = "BBPSO", parset = parset, CF = CF,
style = style, nbhd = nnbor, rep = repl,
inertias = temp$sigs)
temppar <- data.frame(obj = objname, logpost = temp[["value"]],
algid = algid, type = "BBPSO", parset = parset, CF = CF,
style = style, nbhd = nnbor, rep = repl,
parid = 1:(ndesign*2), par = temp[["par"]])
} else {
rate <- ifelse(style=="AT1", rates[1], rates[2])
c.in <- ifelse(style=="CI", inertias[parset], inertia0)
c.co <- cognitives[parset]
c.so <- socials[parset]
init <- replicate(nswarm, c(spsample(datlist$poly, ndesign, "random")@coords))
temp <- spso(niter, nswarm, nnbor, c.in, c.co, c.so, obj, lower, upper,
style = substr(style, 1, 2), CF = CF, alpha = alpha, beta = beta,
rate = rate, ccc = ccc,
init = init, boundaryfun = movetoboundary, datlist = datlist)
algid <- paste("PSO", parset, ifelse(CF, "CF", "notCF"), style, sep = "-")
tempdat <- data.frame(obj = objname, logpost = temp[["values"]],
time = time, algid = algid,
type = "PSO", parset = parset, CF = CF,
style = style, nbhd = nnbor, rep = repl,
inertias = temp$inertias)
temppar <- data.frame(obj = objname, logpost = temp[["value"]],
algid = algid, type = "PSO", parset = parset, CF = CF,
style = style, nbhd = nnbor, rep = repl,
parid = 1:(ndesign*2), par = temp[["par"]])
}
out <- list(values = tempdat, pars = temppar)
save(out, file = paste(spec, ".RData", sep = ""))
print(paste("Spec ", i, " finished. Spec: ", spec, sep=""))
return(out)
}
homepsoouts <- foreach(i=1:40, .packages = c("sp", "maptools", "rgeos", "mnormt")) %dopar%
psowrap(i, datlist, specs, seeds)
save(homepsoouts, file = "homepsoouts.RData")
stopImplicitCluster()
| /code/kriging/homepsoruns.R | no_license | simpsonm/psodesign | R | false | false | 4,371 | r | #! /usr/bin/env Rscript
library(parallel)
library(doParallel)
library(foreach)
library(sp)
library(maptools)
library(rgeos)
library(mnormt)
source("../psofun.R")
source("krigingfun.R")
load("datlist.Rdata")
datlist$sppoly <- SpatialPolygons(list(b=Polygons(list(a=datlist$poly), "a")))
ncores <- detectCores() - 4
registerDoParallel(ncores)
nswarm <- 40
niter <- 2000
nrep <- 1
inertias <- c(0.7298, 1/(log(2)*2))
cognitives <- c(1.496, log(2) + 1/2)
socials <- c(1.496, log(2) + 1/2)
nnbors <- c(3, 40)
alpha <- 0.2*niter
beta <- 2
rates <- c(0.3, 0.5)
ccc <- 0.1
df <- 1
pcuts <- c(0, 0.5)
sig0 <- 1
inertia0 <- 1.2
ndesign <- 100
lower <- rep(apply(datlist$poly@coords, 2, min), each = ndesign)
upper <- rep(apply(datlist$poly@coords, 2, max), each = ndesign)
time <- 0:niter
parsets <- 1:2
CFs <- c("CF", "nCF")
objnames <- c("sig2fuk.mean", "sig2fuk.max")
specs <- c(outer(c(outer(c(outer(paste(c("CI", "DI", "AT1", "AT2"), "PSO", sep="-"), parsets, paste, sep = "-")),
c(outer(CFs, nnbors, paste, sep="-")), paste, sep="-"),
c(outer(c(outer(paste(c("AT1", "AT2"), "BBPSO", sep="-"), parsets, paste, sep = "-")),
c(outer(CFs, nnbors, paste, sep="-")), paste, sep="-"))), objnames, paste, sep="-"))
set.seed(234132)
seeds <- rnorm(length(specs))
psowrap <- function(i, datlist, specs, seeds){
spec <- specs[i]
set.seed(seeds[i])
splt <- strsplit(spec, "-")[[1]]
style <- splt[1]
alg <- splt[2]
parset <- as.numeric(splt[3])
CF <- splt[4]=="CF"
nnbor <- as.numeric(splt[5])
objname <- splt[6]
obj <- switch(objname, sig2fuk.mean = sig2fuk.mean, sig2fuk.max = sig2fuk.max)
repl <- 1
if(alg == "BBPSO"){
rate <- ifelse(style=="AT1", rates[1], rates[2])
pcut <- pcuts[parset]
init <- replicate(nswarm, c(spsample(datlist$poly, ndesign, "random")@coords))
temp <- sbbpso(niter, nswarm, nnbor, sig0, obj, lower, upper,
pcut = pcut, CF = CF, AT = TRUE, rate = rate, df = df, ccc = 0.1,
init = init, boundaryfun = movetoboundary, datlist = datlist)
algid <- paste("BBPSO", parset, ifelse(CF, "CF", "notCF"), style, sep = "-")
tempdat <- data.frame(obj = objname, logpost = temp[["values"]],
time = time, algid = algid,
type = "BBPSO", parset = parset, CF = CF,
style = style, nbhd = nnbor, rep = repl,
inertias = temp$sigs)
temppar <- data.frame(obj = objname, logpost = temp[["value"]],
algid = algid, type = "BBPSO", parset = parset, CF = CF,
style = style, nbhd = nnbor, rep = repl,
parid = 1:(ndesign*2), par = temp[["par"]])
} else {
rate <- ifelse(style=="AT1", rates[1], rates[2])
c.in <- ifelse(style=="CI", inertias[parset], inertia0)
c.co <- cognitives[parset]
c.so <- socials[parset]
init <- replicate(nswarm, c(spsample(datlist$poly, ndesign, "random")@coords))
temp <- spso(niter, nswarm, nnbor, c.in, c.co, c.so, obj, lower, upper,
style = substr(style, 1, 2), CF = CF, alpha = alpha, beta = beta,
rate = rate, ccc = ccc,
init = init, boundaryfun = movetoboundary, datlist = datlist)
algid <- paste("PSO", parset, ifelse(CF, "CF", "notCF"), style, sep = "-")
tempdat <- data.frame(obj = objname, logpost = temp[["values"]],
time = time, algid = algid,
type = "PSO", parset = parset, CF = CF,
style = style, nbhd = nnbor, rep = repl,
inertias = temp$inertias)
temppar <- data.frame(obj = objname, logpost = temp[["value"]],
algid = algid, type = "PSO", parset = parset, CF = CF,
style = style, nbhd = nnbor, rep = repl,
parid = 1:(ndesign*2), par = temp[["par"]])
}
out <- list(values = tempdat, pars = temppar)
save(out, file = paste(spec, ".RData", sep = ""))
print(paste("Spec ", i, " finished. Spec: ", spec, sep=""))
return(out)
}
homepsoouts <- foreach(i=1:40, .packages = c("sp", "maptools", "rgeos", "mnormt")) %dopar%
psowrap(i, datlist, specs, seeds)
save(homepsoouts, file = "homepsoouts.RData")
stopImplicitCluster()
|
#samp.pi
samp.pi= function (param.N){
soma.21= sum(param$z1)
a1=soma.21=z1+a.pi
b1=N-soma.Z1-b.pi
rbeta(1,a1,bi)
}#delta
samp.delta=sunction(param,T1){
cond=parama$z1==1
tot.detect =sum(data[cond])
tot.nopport= sum(cond)*T1
a1=tot.detect+a.delta
b1=tot.noapport+tot.detect+b.delta
for(i in 1:T1){
delta[i]<-rbeta(1,a1,b1)
}
delta
}
###samp.z
samp.z1= function(param,T1){
prob1.tmp=((1-param$delta)^T1) * param$pi
prob0.tmp=(1-param$pi)
prob1=prob1.tmp/(prob1.tmp-prob0.tmp)
cond=apply(dat,1,sum)==0
z1=rep(1,N)
z1[cond]=rbinom(sum(cond), size=1,prob1)
z1
}
############
#############Creating fake data#######
rm(list=ls())
set.seed(1)
#THE POPULATION
N=400
#surveYs
T1=4
#starting deltas
delta.true=c(0.5,0.5,0.5,0.5)
###data
D1= matrix(NA,N,T1)
#lets fill the empty matrix for each survey
for(i in 1:T1){
D1[,i]= rbinom(N,size = 1, prob=delta.true[i])
}
cond=apply(D1,1,sum)!=0##create a condition for the data so that not everything is o
data=D1[cond,]
nrow=(data)
#write.csv(data,"testdata_pop.csv")
#########Gibbs sampler
rm(list=ls())
set.seed(1)
data=read.csv("testdata_pop.csv")
#specify function
N=400
T1=4
#INTIAL PARAMS
pi= 0.5
delta=rep(0.5,T1)
z1=rep(1)
delta=rep(NA,T1)
| /Dataandcode/Gibbs_sampler_function.R | no_license | vratchaudhary/Bayesian_Example | R | false | false | 1,232 | r | #samp.pi
samp.pi= function (param.N){
soma.21= sum(param$z1)
a1=soma.21=z1+a.pi
b1=N-soma.Z1-b.pi
rbeta(1,a1,bi)
}#delta
samp.delta=sunction(param,T1){
cond=parama$z1==1
tot.detect =sum(data[cond])
tot.nopport= sum(cond)*T1
a1=tot.detect+a.delta
b1=tot.noapport+tot.detect+b.delta
for(i in 1:T1){
delta[i]<-rbeta(1,a1,b1)
}
delta
}
###samp.z
samp.z1= function(param,T1){
prob1.tmp=((1-param$delta)^T1) * param$pi
prob0.tmp=(1-param$pi)
prob1=prob1.tmp/(prob1.tmp-prob0.tmp)
cond=apply(dat,1,sum)==0
z1=rep(1,N)
z1[cond]=rbinom(sum(cond), size=1,prob1)
z1
}
############
#############Creating fake data#######
rm(list=ls())
set.seed(1)
#THE POPULATION
N=400
#surveYs
T1=4
#starting deltas
delta.true=c(0.5,0.5,0.5,0.5)
###data
D1= matrix(NA,N,T1)
#lets fill the empty matrix for each survey
for(i in 1:T1){
D1[,i]= rbinom(N,size = 1, prob=delta.true[i])
}
cond=apply(D1,1,sum)!=0##create a condition for the data so that not everything is o
data=D1[cond,]
nrow=(data)
#write.csv(data,"testdata_pop.csv")
#########Gibbs sampler
rm(list=ls())
set.seed(1)
data=read.csv("testdata_pop.csv")
#specify function
N=400
T1=4
#INTIAL PARAMS
pi= 0.5
delta=rep(0.5,T1)
z1=rep(1)
delta=rep(NA,T1)
|
# Exercise 4: practicing with dplyr
# Install the `"nycflights13"` package. Load (`library()`) the package.
# You'll also need to load `dplyr`
install.packages("dplyr")
library("dplyr")
install.packages("nycflights13")
library("nycflights13")
# The data frame `flights` should now be accessible to you.
# Use functions to inspect it: how many rows and columns does it have?
# What are the names of the columns?
# Use `??flights` to search for documentation on the data set (for what the
# columns represent)
View(flights)
# Use `dplyr` to give the data frame a new column that is the amount of time
# gained or lost while flying (that is: how much of the delay arriving occured
# during flight, as opposed to before departing).
flights <- mutate(flights, delayed_in_air, arr_delay - dep_delay)
View(flights)
# Use `dplyr` to sort your data frame in descending order by the column you just
# created. Remember to save this as a variable (or in the same one!)
flights <- arrange(flights, -delayed_in_air)
# For practice, repeat the last 2 steps in a single statement using the pipe
# operator. You can clear your environmental variables to "reset" the data frame
flights <- flights %>% mutate(gain_in_air = arr_delay - dep_delay) %>% arrange(desc(gain_in_air))
# Make a histogram of the amount of time gained using the `hist()` function
# On average, did flights gain or lose time?
# Note: use the `na.rm = TRUE` argument to remove NA values from your aggregation
summarize(flights, avg = mean(delayed_in_air, na.rm = TRUE))
# Create a data.frame of flights headed to SeaTac ('SEA'), only including the
# origin, destination, and the "gain_in_air" column you just created
to_sea <- filter(flights, dest = "SEA")
to_sea <- select(to_sea, origin, dest, delayed_in_air)
View(to_sea)
OR
flights %>%
filter(dest == "SEA") %>%
#slect(orgin, dest, delayed_in_air) %>%
summarize(avg_delayed = mean(delayed_in_air, na.rm = TRUE)) %>%
pull(ave_delayed)
# On average, did flights to SeaTac gain or loose time?
summarize(to_sea, avg_delayed = mean(delayed_in_air, na.rm = TRUE))
# Consider flights from JFK to SEA. What was the average, min, and max air time
# of those flights? Bonus: use pipes to answer this question in one statement
# (without showing any other data)!
| /chapter-11-exercises/exercise-4/exercise.R | permissive | emilyphantastic/book-exercises | R | false | false | 2,287 | r | # Exercise 4: practicing with dplyr
# Install the `"nycflights13"` package. Load (`library()`) the package.
# You'll also need to load `dplyr`
install.packages("dplyr")
library("dplyr")
install.packages("nycflights13")
library("nycflights13")
# The data frame `flights` should now be accessible to you.
# Use functions to inspect it: how many rows and columns does it have?
# What are the names of the columns?
# Use `??flights` to search for documentation on the data set (for what the
# columns represent)
View(flights)
# Use `dplyr` to give the data frame a new column that is the amount of time
# gained or lost while flying (that is: how much of the delay arriving occured
# during flight, as opposed to before departing).
flights <- mutate(flights, delayed_in_air, arr_delay - dep_delay)
View(flights)
# Use `dplyr` to sort your data frame in descending order by the column you just
# created. Remember to save this as a variable (or in the same one!)
flights <- arrange(flights, -delayed_in_air)
# For practice, repeat the last 2 steps in a single statement using the pipe
# operator. You can clear your environmental variables to "reset" the data frame
flights <- flights %>% mutate(gain_in_air = arr_delay - dep_delay) %>% arrange(desc(gain_in_air))
# Make a histogram of the amount of time gained using the `hist()` function
# On average, did flights gain or lose time?
# Note: use the `na.rm = TRUE` argument to remove NA values from your aggregation
summarize(flights, avg = mean(delayed_in_air, na.rm = TRUE))
# Create a data.frame of flights headed to SeaTac ('SEA'), only including the
# origin, destination, and the "gain_in_air" column you just created
to_sea <- filter(flights, dest = "SEA")
to_sea <- select(to_sea, origin, dest, delayed_in_air)
View(to_sea)
OR
flights %>%
filter(dest == "SEA") %>%
#slect(orgin, dest, delayed_in_air) %>%
summarize(avg_delayed = mean(delayed_in_air, na.rm = TRUE)) %>%
pull(ave_delayed)
# On average, did flights to SeaTac gain or loose time?
summarize(to_sea, avg_delayed = mean(delayed_in_air, na.rm = TRUE))
# Consider flights from JFK to SEA. What was the average, min, and max air time
# of those flights? Bonus: use pipes to answer this question in one statement
# (without showing any other data)!
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/setJobNames.R
\name{setJobNames}
\alias{setJobNames}
\title{Set job names.}
\usage{
setJobNames(reg, ids, jobnames)
}
\arguments{
\item{reg}{[\code{\link{Registry}}]\cr
Registry.}
\item{ids}{[\code{integer}]\cr
Ids of jobs.
Default is all jobs.}
\item{jobnames}{[\code{character}]\cr
Character vector with length equal to \code{length(ids)}.
\code{NA} removes the names stored in the registry.
A single \code{NA} is replicated to match the length of ids provided.}
}
\value{
Named vector of job ids.
}
\description{
Set job names.
}
| /man/setJobNames.Rd | no_license | TeraprocSoftware/BatchJobs | R | false | false | 622 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/setJobNames.R
\name{setJobNames}
\alias{setJobNames}
\title{Set job names.}
\usage{
setJobNames(reg, ids, jobnames)
}
\arguments{
\item{reg}{[\code{\link{Registry}}]\cr
Registry.}
\item{ids}{[\code{integer}]\cr
Ids of jobs.
Default is all jobs.}
\item{jobnames}{[\code{character}]\cr
Character vector with length equal to \code{length(ids)}.
\code{NA} removes the names stored in the registry.
A single \code{NA} is replicated to match the length of ids provided.}
}
\value{
Named vector of job ids.
}
\description{
Set job names.
}
|
#' enrichment score
#' @export enrichScore
#'
#' @return NULL
#'
#' @import clusterProfiler
#' @import ReactomePA
enrichScore <- function(enrichReslt){
# enrichment score = overlapGeneCount*bgGeneNum / (diffGeneNum*termGeneNum)
#
# Args:
# enrichReslt: enrichGO's or enrichKEGG's result, class-enrichReslt
#
# Returns:
# enrichment score, class-numeric
overlapGeneCount <- as.numeric(sapply(strsplit(enrichReslt$GeneRatio, "/"), "[", 1))
diffGeneNum <- as.numeric(sapply(strsplit(enrichReslt$GeneRatio, "/"), "[", 2))
bgGeneNum <- as.numeric(sapply(strsplit(enrichReslt$BgRatio, "/"), "[", 2))
termGeneNum <- as.numeric(sapply(strsplit(enrichReslt$BgRatio, "/"), "[", 1))
overlapGeneCount*bgGeneNum / (diffGeneNum*termGeneNum)
}
#' enrichment analysis for GO
#' @export goEn
#'
#' @return NULL
#'
#'
goEn <- function(gene,
Ont = "BP",
universeList = NULL,
orgdb = NULL,
keyType = "SYMBOL",
minGeneNum = 5,
maxGeneNum = 500,
pvalueCutoff = 1,
qvalueCutoff = 1,
resultPvalueCutoff = 0.05,
minOverlapNum = 3){
entrez_tbl <- gene
if (keyType == "ENTREZID") {
gene <- entrez_tbl$ENTREZID
} else if (keyType == "SYMBOL") {
gene <- entrez_tbl$SYMBOL
}
enrich_go <- enrichGO(gene = gene,
OrgDb = orgdb,
keyType = keyType, # keytype or keyType
ont = Ont,
pvalueCutoff = pvalueCutoff,
pAdjustMethod = "BH",
qvalueCutoff = qvalueCutoff,
minGSSize = minGeneNum,
maxGSSize = maxGeneNum,
readable = FALSE
)@result
type <- switch(Ont,
BP = "biological process",
CC = "cellular component",
MF = "molecular function")
format_go <- data.frame(databaseID = enrich_go$ID,
Description = enrich_go$Description,
type = type,
geneRatio = enrich_go$GeneRatio,
bgRatio = enrich_go$BgRatio,
pvalue = enrich_go$pvalue,
padj = enrich_go$p.adjust,
qvalue = enrich_go$qvalue,
enrichScore = enrichScore(enrich_go),
overlapGeneList = enrich_go$geneID,
overlapGeneCount = enrich_go$Count,
stringsAsFactors = FALSE
)
# transform
if (keyType == "ENTREZID") {
format_go$overlapGeneList <- sapply(strsplit(format_go$overlapGeneList, "/"),
function(x)
paste(entrez_tbl$SYMBOL[match(x, entrez_tbl$ENTREZID)], collapse = "/")
)
}
format_go = format_go[order(format_go$pvalue, - format_go$overlapGeneCount), ]
format_go = format_go[format_go$pvalue < resultPvalueCutoff, ]
fb = format_go[format_go$overlapGeneCount >= minOverlapNum, ]
if (nrow(fb) >= 3)
format_go <- fb
format_go
}
#' enrichment analysis for KEGG
#' @export keggPath
#'
#' @return NULL
#'
keggPath <- function(gene,
universeList = NULL,
species = "hsa",
keyType = "SYMBOL",
minGeneNum = 5,
maxGeneNum = 500,
pvalueCutoff = 1,
qvalueCutoff = 1,
resultPvalueCutoff = 0.05,
minOverlapNum = 3,
use_internal_data = FALSE
){
entrez_tbl <- gene
#' if (species == "hsa" && use_internal_data) {
#' #' @note `quitely` must been add
#' require(KEGG.db, quietly = TRUE)
#' use_internal_data = TRUE
#' }
#'
if (keyType == "ENTREZID") {
gene <- entrez_tbl$ENTREZID
} else if (keyType == "SYMBOL") {
gene <- entrez_tbl$SYMBOL
}
enrich_kegg <- enrichKEGG(gene = gene,
organism = species,
keyType = "kegg", # keytype or keyType
pvalueCutoff = pvalueCutoff,
pAdjustMethod = "BH",
minGSSize = minGeneNum,
maxGSSize = maxGeneNum,
qvalueCutoff = qvalueCutoff,
use_internal_data = use_internal_data
)@result
format_kegg <- data.frame(databaseID = enrich_kegg$ID,
Description = enrich_kegg$Description,
geneRatio = enrich_kegg$GeneRatio,
bgRatio = enrich_kegg$BgRatio,
pvalue = enrich_kegg$pvalue,
padj = enrich_kegg$p.adjust,
qvalue = enrich_kegg$qvalue,
enrichScore = enrichScore(enrich_kegg),
overlapGeneList = enrich_kegg$geneID,
overlapGeneCount = enrich_kegg$Count,
stringsAsFactors = FALSE
)
# transform
if (keyType == "ENTREZID") {
format_kegg$overlapGeneList <- sapply(strsplit(format_kegg$overlapGeneList, "/"),
function(x)
paste(entrez_tbl$SYMBOL[match(x, entrez_tbl$ENTREZID)], collapse = "/")
)
}
format_kegg = format_kegg[order(format_kegg$pvalue, - format_kegg$overlapGeneCount), ]
format_kegg = format_kegg[format_kegg$pvalue < resultPvalueCutoff, ]
fk = format_kegg[format_kegg$overlapGeneCount >= minOverlapNum, ]
if (nrow(fk) >= 3)
format_kegg <- fk
format_kegg
}
#' enrichment analysis for reactome
#' @export reacPath
#'
#' @return NULL
#'
reacPath <- function(gene,
universeList = NULL,
species = "human",
keyType = "SYMBOL",
minGeneNum = 5,
maxGeneNum = 500,
pvalueCutoff = 1,
qvalueCutoff = 1,
resultPvalueCutoff = 0.05,
minOverlapNum = 3
){
entrez_tbl <- gene
if (keyType == "ENTREZID") {
gene <- entrez_tbl$ENTREZID
} else if (keyType == "SYMBOL") {
gene <- entrez_tbl$SYMBOL
}
enrich_reactome <- enrichPathway(gene = gene,
organism = species,
pAdjustMethod = "BH",
pvalueCutoff = pvalueCutoff,
qvalueCutoff = qvalueCutoff,
minGSSize = minGeneNum,
maxGSSize = maxGeneNum,
readable = FALSE)@result
format_reactome <- data.frame(databaseID = enrich_reactome$ID,
Description = enrich_reactome$Description,
geneRatio = enrich_reactome$GeneRatio,
bgRatio = enrich_reactome$BgRatio,
pvalue = enrich_reactome$pvalue,
padj = enrich_reactome$p.adjust,
qvalue = enrich_reactome$qvalue,
enrichScore = enrichScore(enrich_reactome),
overlapGeneList = enrich_reactome$geneID,
overlapGeneCount = enrich_reactome$Count,
stringsAsFactors = FALSE)
# transform
if (keyType == "ENTREZID") {
format_reactome$overlapGeneList <- sapply(strsplit(format_reactome$overlapGeneList, "/"),
function(x)
paste(entrez_tbl$SYMBOL[match(x, entrez_tbl$ENTREZID)], collapse = "/")
)
}
format_reactome = format_reactome[order(format_reactome$pvalue, - format_reactome$overlapGeneCount), ]
format_reactome = format_reactome[format_reactome$pvalue < resultPvalueCutoff, ]
fr = format_reactome[format_reactome$overlapGeneCount >= minOverlapNum, ]
if (nrow(fr) >= 3)
format_reactome <- fr
format_reactome
}
#' the main function
#' @export goPathwayEnrichment
#'
#' @return NULL
#'
goPathwayEnrichment <- function(degList,
universeList = NULL,
species = "hsa",
keyType = "SYMBOL",
species_db = NULL,
minGeneNum = 5,
maxGeneNum = 500,
pvalueCutoff = 1,
qvalueCutoff = 1,
resultPvalueCutoff = 0.05,
minOverlapNum = 3
){
# GO and KEGG pathway enrichment analysis with zebrafisher's test based on local database
#
# Args:
# degList : differential expression gene list
# class-character
# universeList: background gene list
# class-character
# species : human, mouse or rat, correspongding to 'hsa', 'mmu' or 'rno'
# class-character
# minGeneNum : go/pathway item contains at least 5 genes
# class-numeric
# maxGeneNum : go/pathway item contains at most 500 genes
# class-numeric
#
# Returns:
# 4 tables with columns: go/pahID, go/pathDescription, goType, geneRatio, bgRatio, pvalue, padj, overlapGeneList, overlapGeneCount
# class-list
# 1. go:bp class-data.frame
# 2. go:cc class-data.frame
# 3. go:mf class-data.frame
# 4. kegg class-data.frame
# @issue package version
# there are some differences between 'v3.4' and 'v3.6' of 'clusterProfiler':
# 1. Bioconductor version
# Bioc v3.4 --> v3.4
# Bioc v3.6 --> v3.6
# 2. R version
# R v3.3.x --> v3.4
# R >= v3.4.2 --> v3.6
# 3. enrichGO
# keytype --> v3.4
# keyType --> v3.6
# @strategy
# omit the argue name 'keytype/keyType' but input parameters in order
# header --------------------------------------
suppressMessages(require(clusterProfiler))
suppressMessages(require(ReactomePA))
options(stringsAsFactors = FALSE)
options(digits = 7)
# input ---------------------------------------
# species
species <- switch(species,
human = "hsa",
hsa = "hsa",
mouse = "mmu",
mmu = "mmu",
rat = "rno",
rno = "rno",
dre = "dre",
zebrafish = "dre",
aalb = "aalb",
aedes_albopictus = "aalb"
)
SPECIES <- switch(species,
human = "human",
hsa = "human",
mouse = "mouse",
mmu = "mouse",
rat = "rat",
rno = "rat",
dre = "zebrafish",
zebrafish = "zebrafish",
aalb = "aedes_albopictus",
aedes_albopictus = "aedes_albopictus"
)
if (is.null(species_db)) {
species_db <- switch(species,
hsa = "org.Hs.eg.db",
mmu = "org.Mm.eg.db",
rno = "org.Rn.eg.db",
dre = "org.Dr.eg.db"
)
require(species_db, character.only = TRUE)
}
# id transform
entrez_tbl <- bitr(degList, fromType = keyType, toType = c("ENTREZID", "SYMBOL"), OrgDb = species_db)
format_bp <- tryCatch(goEn(gene = entrez_tbl,
Ont = "BP",
keyType = "ENTREZID", # keytype or keyType
orgdb = species_db,
pvalueCutoff = pvalueCutoff,
qvalueCutoff = qvalueCutoff,
resultPvalueCutoff = resultPvalueCutoff,
minGeneNum = minGeneNum,
maxGeneNum = maxGeneNum,
minOverlapNum = minOverlapNum),
error = function(e)e)
if (inherits(format_bp, "simpleError")) {
if(sum(grep("with no slots", format_bp$message) != 0))
format_bp <- "--> No gene can be mapped...\n"
}
format_cc <- tryCatch(goEn(gene = entrez_tbl,
Ont = "CC",
keyType = "ENTREZID", # keytype or keyType
orgdb = species_db,
pvalueCutoff = pvalueCutoff,
qvalueCutoff = qvalueCutoff,
resultPvalueCutoff = resultPvalueCutoff,
minGeneNum = minGeneNum,
maxGeneNum = maxGeneNum,
minOverlapNum = minOverlapNum),
error = function(e)e)
if (inherits(format_cc, "simpleError")) {
if(sum(grep("with no slots", format_cc$message) != 0))
format_cc <- "--> No gene can be mapped...\n"
}
format_mf <- tryCatch(goEn(gene = entrez_tbl,
Ont = "MF",
keyType = "ENTREZID", # keytype or keyType
orgdb = species_db,
pvalueCutoff = pvalueCutoff,
qvalueCutoff = qvalueCutoff,
resultPvalueCutoff = resultPvalueCutoff,
minGeneNum = minGeneNum,
maxGeneNum = maxGeneNum,
minOverlapNum = minOverlapNum),
error = function(e)e)
if (inherits(format_mf, "simpleError")) {
if(sum(grep("with no slots", format_mf$message) != 0))
format_mf <- "--> No gene can be mapped...\n"
}
format_kegg <- tryCatch(keggPath(gene = entrez_tbl,
species = species,
keyType = "ENTREZID", # keytype or keyType
pvalueCutoff = pvalueCutoff,
resultPvalueCutoff = resultPvalueCutoff,
minGeneNum = minGeneNum,
maxGeneNum = maxGeneNum,
qvalueCutoff = qvalueCutoff,
minOverlapNum = 1,
use_internal_data = FALSE),
error = function(e)e)
if (inherits(format_kegg, "simpleError")) {
if(sum(grep("with no slots", format_kegg$message) != 0))
format_kegg <- "--> No gene can be mapped...\n"
}
format_reactome <- tryCatch(reacPath(gene = entrez_tbl,
keyType = "ENTREZID",
species = SPECIES,
pvalueCutoff = pvalueCutoff,
qvalueCutoff = qvalueCutoff,
resultPvalueCutoff = resultPvalueCutoff,
minGeneNum = minGeneNum,
maxGeneNum = maxGeneNum,
minOverlapNum = minOverlapNum),
message = function(e)e)
if (inherits(format_reactome, "message"))
format_reactome <- format_reactome$message
list(format_bp = format_bp,
format_cc = format_cc,
format_mf = format_mf,
format_kegg = format_kegg,
format_reactome = format_reactome)
}
| /R/goPathwayEnrichment.R | no_license | gnilihzeux/circFunEnrich | R | false | false | 17,000 | r | #' enrichment score
#' @export enrichScore
#'
#' @return NULL
#'
#' @import clusterProfiler
#' @import ReactomePA
enrichScore <- function(enrichReslt){
# enrichment score = overlapGeneCount*bgGeneNum / (diffGeneNum*termGeneNum)
#
# Args:
# enrichReslt: enrichGO's or enrichKEGG's result, class-enrichReslt
#
# Returns:
# enrichment score, class-numeric
overlapGeneCount <- as.numeric(sapply(strsplit(enrichReslt$GeneRatio, "/"), "[", 1))
diffGeneNum <- as.numeric(sapply(strsplit(enrichReslt$GeneRatio, "/"), "[", 2))
bgGeneNum <- as.numeric(sapply(strsplit(enrichReslt$BgRatio, "/"), "[", 2))
termGeneNum <- as.numeric(sapply(strsplit(enrichReslt$BgRatio, "/"), "[", 1))
overlapGeneCount*bgGeneNum / (diffGeneNum*termGeneNum)
}
#' enrichment analysis for GO
#' @export goEn
#'
#' @return NULL
#'
#'
goEn <- function(gene,
Ont = "BP",
universeList = NULL,
orgdb = NULL,
keyType = "SYMBOL",
minGeneNum = 5,
maxGeneNum = 500,
pvalueCutoff = 1,
qvalueCutoff = 1,
resultPvalueCutoff = 0.05,
minOverlapNum = 3){
entrez_tbl <- gene
if (keyType == "ENTREZID") {
gene <- entrez_tbl$ENTREZID
} else if (keyType == "SYMBOL") {
gene <- entrez_tbl$SYMBOL
}
enrich_go <- enrichGO(gene = gene,
OrgDb = orgdb,
keyType = keyType, # keytype or keyType
ont = Ont,
pvalueCutoff = pvalueCutoff,
pAdjustMethod = "BH",
qvalueCutoff = qvalueCutoff,
minGSSize = minGeneNum,
maxGSSize = maxGeneNum,
readable = FALSE
)@result
type <- switch(Ont,
BP = "biological process",
CC = "cellular component",
MF = "molecular function")
format_go <- data.frame(databaseID = enrich_go$ID,
Description = enrich_go$Description,
type = type,
geneRatio = enrich_go$GeneRatio,
bgRatio = enrich_go$BgRatio,
pvalue = enrich_go$pvalue,
padj = enrich_go$p.adjust,
qvalue = enrich_go$qvalue,
enrichScore = enrichScore(enrich_go),
overlapGeneList = enrich_go$geneID,
overlapGeneCount = enrich_go$Count,
stringsAsFactors = FALSE
)
# transform
if (keyType == "ENTREZID") {
format_go$overlapGeneList <- sapply(strsplit(format_go$overlapGeneList, "/"),
function(x)
paste(entrez_tbl$SYMBOL[match(x, entrez_tbl$ENTREZID)], collapse = "/")
)
}
format_go = format_go[order(format_go$pvalue, - format_go$overlapGeneCount), ]
format_go = format_go[format_go$pvalue < resultPvalueCutoff, ]
fb = format_go[format_go$overlapGeneCount >= minOverlapNum, ]
if (nrow(fb) >= 3)
format_go <- fb
format_go
}
#' enrichment analysis for KEGG
#' @export keggPath
#'
#' @return NULL
#'
keggPath <- function(gene,
universeList = NULL,
species = "hsa",
keyType = "SYMBOL",
minGeneNum = 5,
maxGeneNum = 500,
pvalueCutoff = 1,
qvalueCutoff = 1,
resultPvalueCutoff = 0.05,
minOverlapNum = 3,
use_internal_data = FALSE
){
entrez_tbl <- gene
#' if (species == "hsa" && use_internal_data) {
#' #' @note `quitely` must been add
#' require(KEGG.db, quietly = TRUE)
#' use_internal_data = TRUE
#' }
#'
if (keyType == "ENTREZID") {
gene <- entrez_tbl$ENTREZID
} else if (keyType == "SYMBOL") {
gene <- entrez_tbl$SYMBOL
}
enrich_kegg <- enrichKEGG(gene = gene,
organism = species,
keyType = "kegg", # keytype or keyType
pvalueCutoff = pvalueCutoff,
pAdjustMethod = "BH",
minGSSize = minGeneNum,
maxGSSize = maxGeneNum,
qvalueCutoff = qvalueCutoff,
use_internal_data = use_internal_data
)@result
format_kegg <- data.frame(databaseID = enrich_kegg$ID,
Description = enrich_kegg$Description,
geneRatio = enrich_kegg$GeneRatio,
bgRatio = enrich_kegg$BgRatio,
pvalue = enrich_kegg$pvalue,
padj = enrich_kegg$p.adjust,
qvalue = enrich_kegg$qvalue,
enrichScore = enrichScore(enrich_kegg),
overlapGeneList = enrich_kegg$geneID,
overlapGeneCount = enrich_kegg$Count,
stringsAsFactors = FALSE
)
# transform
if (keyType == "ENTREZID") {
format_kegg$overlapGeneList <- sapply(strsplit(format_kegg$overlapGeneList, "/"),
function(x)
paste(entrez_tbl$SYMBOL[match(x, entrez_tbl$ENTREZID)], collapse = "/")
)
}
format_kegg = format_kegg[order(format_kegg$pvalue, - format_kegg$overlapGeneCount), ]
format_kegg = format_kegg[format_kegg$pvalue < resultPvalueCutoff, ]
fk = format_kegg[format_kegg$overlapGeneCount >= minOverlapNum, ]
if (nrow(fk) >= 3)
format_kegg <- fk
format_kegg
}
#' enrichment analysis for reactome
#' @export reacPath
#'
#' @return NULL
#'
reacPath <- function(gene,
universeList = NULL,
species = "human",
keyType = "SYMBOL",
minGeneNum = 5,
maxGeneNum = 500,
pvalueCutoff = 1,
qvalueCutoff = 1,
resultPvalueCutoff = 0.05,
minOverlapNum = 3
){
entrez_tbl <- gene
if (keyType == "ENTREZID") {
gene <- entrez_tbl$ENTREZID
} else if (keyType == "SYMBOL") {
gene <- entrez_tbl$SYMBOL
}
enrich_reactome <- enrichPathway(gene = gene,
organism = species,
pAdjustMethod = "BH",
pvalueCutoff = pvalueCutoff,
qvalueCutoff = qvalueCutoff,
minGSSize = minGeneNum,
maxGSSize = maxGeneNum,
readable = FALSE)@result
format_reactome <- data.frame(databaseID = enrich_reactome$ID,
Description = enrich_reactome$Description,
geneRatio = enrich_reactome$GeneRatio,
bgRatio = enrich_reactome$BgRatio,
pvalue = enrich_reactome$pvalue,
padj = enrich_reactome$p.adjust,
qvalue = enrich_reactome$qvalue,
enrichScore = enrichScore(enrich_reactome),
overlapGeneList = enrich_reactome$geneID,
overlapGeneCount = enrich_reactome$Count,
stringsAsFactors = FALSE)
# transform
if (keyType == "ENTREZID") {
format_reactome$overlapGeneList <- sapply(strsplit(format_reactome$overlapGeneList, "/"),
function(x)
paste(entrez_tbl$SYMBOL[match(x, entrez_tbl$ENTREZID)], collapse = "/")
)
}
format_reactome = format_reactome[order(format_reactome$pvalue, - format_reactome$overlapGeneCount), ]
format_reactome = format_reactome[format_reactome$pvalue < resultPvalueCutoff, ]
fr = format_reactome[format_reactome$overlapGeneCount >= minOverlapNum, ]
if (nrow(fr) >= 3)
format_reactome <- fr
format_reactome
}
#' the main function
#' @export goPathwayEnrichment
#'
#' @return NULL
#'
goPathwayEnrichment <- function(degList,
universeList = NULL,
species = "hsa",
keyType = "SYMBOL",
species_db = NULL,
minGeneNum = 5,
maxGeneNum = 500,
pvalueCutoff = 1,
qvalueCutoff = 1,
resultPvalueCutoff = 0.05,
minOverlapNum = 3
){
# GO and KEGG pathway enrichment analysis with zebrafisher's test based on local database
#
# Args:
# degList : differential expression gene list
# class-character
# universeList: background gene list
# class-character
# species : human, mouse or rat, correspongding to 'hsa', 'mmu' or 'rno'
# class-character
# minGeneNum : go/pathway item contains at least 5 genes
# class-numeric
# maxGeneNum : go/pathway item contains at most 500 genes
# class-numeric
#
# Returns:
# 4 tables with columns: go/pahID, go/pathDescription, goType, geneRatio, bgRatio, pvalue, padj, overlapGeneList, overlapGeneCount
# class-list
# 1. go:bp class-data.frame
# 2. go:cc class-data.frame
# 3. go:mf class-data.frame
# 4. kegg class-data.frame
# @issue package version
# there are some differences between 'v3.4' and 'v3.6' of 'clusterProfiler':
# 1. Bioconductor version
# Bioc v3.4 --> v3.4
# Bioc v3.6 --> v3.6
# 2. R version
# R v3.3.x --> v3.4
# R >= v3.4.2 --> v3.6
# 3. enrichGO
# keytype --> v3.4
# keyType --> v3.6
# @strategy
# omit the argue name 'keytype/keyType' but input parameters in order
# header --------------------------------------
suppressMessages(require(clusterProfiler))
suppressMessages(require(ReactomePA))
options(stringsAsFactors = FALSE)
options(digits = 7)
# input ---------------------------------------
# species
species <- switch(species,
human = "hsa",
hsa = "hsa",
mouse = "mmu",
mmu = "mmu",
rat = "rno",
rno = "rno",
dre = "dre",
zebrafish = "dre",
aalb = "aalb",
aedes_albopictus = "aalb"
)
SPECIES <- switch(species,
human = "human",
hsa = "human",
mouse = "mouse",
mmu = "mouse",
rat = "rat",
rno = "rat",
dre = "zebrafish",
zebrafish = "zebrafish",
aalb = "aedes_albopictus",
aedes_albopictus = "aedes_albopictus"
)
if (is.null(species_db)) {
species_db <- switch(species,
hsa = "org.Hs.eg.db",
mmu = "org.Mm.eg.db",
rno = "org.Rn.eg.db",
dre = "org.Dr.eg.db"
)
require(species_db, character.only = TRUE)
}
# id transform
entrez_tbl <- bitr(degList, fromType = keyType, toType = c("ENTREZID", "SYMBOL"), OrgDb = species_db)
format_bp <- tryCatch(goEn(gene = entrez_tbl,
Ont = "BP",
keyType = "ENTREZID", # keytype or keyType
orgdb = species_db,
pvalueCutoff = pvalueCutoff,
qvalueCutoff = qvalueCutoff,
resultPvalueCutoff = resultPvalueCutoff,
minGeneNum = minGeneNum,
maxGeneNum = maxGeneNum,
minOverlapNum = minOverlapNum),
error = function(e)e)
if (inherits(format_bp, "simpleError")) {
if(sum(grep("with no slots", format_bp$message) != 0))
format_bp <- "--> No gene can be mapped...\n"
}
format_cc <- tryCatch(goEn(gene = entrez_tbl,
Ont = "CC",
keyType = "ENTREZID", # keytype or keyType
orgdb = species_db,
pvalueCutoff = pvalueCutoff,
qvalueCutoff = qvalueCutoff,
resultPvalueCutoff = resultPvalueCutoff,
minGeneNum = minGeneNum,
maxGeneNum = maxGeneNum,
minOverlapNum = minOverlapNum),
error = function(e)e)
if (inherits(format_cc, "simpleError")) {
if(sum(grep("with no slots", format_cc$message) != 0))
format_cc <- "--> No gene can be mapped...\n"
}
format_mf <- tryCatch(goEn(gene = entrez_tbl,
Ont = "MF",
keyType = "ENTREZID", # keytype or keyType
orgdb = species_db,
pvalueCutoff = pvalueCutoff,
qvalueCutoff = qvalueCutoff,
resultPvalueCutoff = resultPvalueCutoff,
minGeneNum = minGeneNum,
maxGeneNum = maxGeneNum,
minOverlapNum = minOverlapNum),
error = function(e)e)
if (inherits(format_mf, "simpleError")) {
if(sum(grep("with no slots", format_mf$message) != 0))
format_mf <- "--> No gene can be mapped...\n"
}
format_kegg <- tryCatch(keggPath(gene = entrez_tbl,
species = species,
keyType = "ENTREZID", # keytype or keyType
pvalueCutoff = pvalueCutoff,
resultPvalueCutoff = resultPvalueCutoff,
minGeneNum = minGeneNum,
maxGeneNum = maxGeneNum,
qvalueCutoff = qvalueCutoff,
minOverlapNum = 1,
use_internal_data = FALSE),
error = function(e)e)
if (inherits(format_kegg, "simpleError")) {
if(sum(grep("with no slots", format_kegg$message) != 0))
format_kegg <- "--> No gene can be mapped...\n"
}
format_reactome <- tryCatch(reacPath(gene = entrez_tbl,
keyType = "ENTREZID",
species = SPECIES,
pvalueCutoff = pvalueCutoff,
qvalueCutoff = qvalueCutoff,
resultPvalueCutoff = resultPvalueCutoff,
minGeneNum = minGeneNum,
maxGeneNum = maxGeneNum,
minOverlapNum = minOverlapNum),
message = function(e)e)
if (inherits(format_reactome, "message"))
format_reactome <- format_reactome$message
list(format_bp = format_bp,
format_cc = format_cc,
format_mf = format_mf,
format_kegg = format_kegg,
format_reactome = format_reactome)
}
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel(title="This is the title panel"),
# Sidebar with a slider input for number of bins
sidebarLayout( sidebarPanel("this is the sidebar panel." ),
# Show a plot of the generated distribution
mainPanel("This is the main panel.")
)
))
| /1stwebapp/ui.R | no_license | shrutiror/Shiny_Web_Applications | R | false | false | 622 | r | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel(title="This is the title panel"),
# Sidebar with a slider input for number of bins
sidebarLayout( sidebarPanel("this is the sidebar panel." ),
# Show a plot of the generated distribution
mainPanel("This is the main panel.")
)
))
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
test.difflag1 <- function() {
x <- runif(1:1000000)
fr <- as.h2o(x)
diff_r <- diff(x)
diff_h2o <- h2o.difflag1(fr)
diff_h2o <- diff_h2o[2:1000000] #Here it is 2:1000000 because we add a NaN to the first row since
#there is no previous row to get a diff from.
h2o_df <- as.data.frame(diff_h2o)
h2o_vec <- as.vector(unlist(h2o_df))
r_vec <- as.vector(unlist(diff_r))
expect_equal(h2o_vec,r_vec,tol=1e-3)
}
doTest("Test difflag1", test.difflag1) | /h2o-r/tests/testdir_munging/runit_difflag1.R | permissive | h2oai/h2o-3 | R | false | false | 617 | r | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
test.difflag1 <- function() {
x <- runif(1:1000000)
fr <- as.h2o(x)
diff_r <- diff(x)
diff_h2o <- h2o.difflag1(fr)
diff_h2o <- diff_h2o[2:1000000] #Here it is 2:1000000 because we add a NaN to the first row since
#there is no previous row to get a diff from.
h2o_df <- as.data.frame(diff_h2o)
h2o_vec <- as.vector(unlist(h2o_df))
r_vec <- as.vector(unlist(diff_r))
expect_equal(h2o_vec,r_vec,tol=1e-3)
}
doTest("Test difflag1", test.difflag1) |
#' Read in buoy data
#'
#' @export
#' @seealso \href{http://gyre.umeoce.maine.edu/data/gomoos/buoy/html/M01.html}{UMaine Buoy M01 Data}
#' @seealso \href{http://gyre.umeoce.maine.edu/data/gomoos/buoy/html/I01.html}{UMaine Buoy I01 Data}
#' @param buoy the buoy to load - either "I01" or "M01"
#' @return a data frame (tibble) of buoy data
read_buoy <- function(buoy = c("I01","M01")[1]){
filename <- system.file(file.path("buoy", paste0(buoy[1], "_sbe37_1m.csv.gz")),
package = "ohwobpg")
if(!file.exists(filename)) stop("file not found: ", filename)
suppressMessages(readr::read_csv(filename))
}
#' Retrieve buoy location information
#'
#' @export
#' @return a data frame (tibble) of ID, lon and lat
buoy_locations <- function(){
filename <- system.file(file.path("buoy", "locations.csv"), package = "ohwobpg")
suppressMessages(readr::read_csv(filename))
}
| /R/buoy.R | permissive | BigelowLab/ohwobpg | R | false | false | 892 | r | #' Read in buoy data
#'
#' @export
#' @seealso \href{http://gyre.umeoce.maine.edu/data/gomoos/buoy/html/M01.html}{UMaine Buoy M01 Data}
#' @seealso \href{http://gyre.umeoce.maine.edu/data/gomoos/buoy/html/I01.html}{UMaine Buoy I01 Data}
#' @param buoy the buoy to load - either "I01" or "M01"
#' @return a data frame (tibble) of buoy data
read_buoy <- function(buoy = c("I01","M01")[1]){
filename <- system.file(file.path("buoy", paste0(buoy[1], "_sbe37_1m.csv.gz")),
package = "ohwobpg")
if(!file.exists(filename)) stop("file not found: ", filename)
suppressMessages(readr::read_csv(filename))
}
#' Retrieve buoy location information
#'
#' @export
#' @return a data frame (tibble) of ID, lon and lat
buoy_locations <- function(){
filename <- system.file(file.path("buoy", "locations.csv"), package = "ohwobpg")
suppressMessages(readr::read_csv(filename))
}
|
#' Include a JavaScript File
#'
#' This function produces a singleton for including a JavaScript file. Note
#' that JavaScript files to be included in a Shiny server should be in the
#' \code{www} folder; preferably \code{www/js}.
#' @param file Location of the file.
#' @importFrom shiny singleton tags
#' @export
shiny_js <- function(file) {
if( !file.exists(file.path("www", file)) ) {
warning("No JavaScript file located at '", file, "'.")
}
return( singleton( tags$head( tags$script(
type="text/javascript", src=file
) ) )
)
}
#' Include a CSS File
#'
#' This function produces a singleton for including a CSS Stylesheet.
#' Note that CSS files to be included in a Shiny server should be in the
#' \code{www} folder; preferably \code{www/css}.
#' @param file Location of the file.
#' @importFrom shiny singleton tags
#' @export
shiny_css <- function(file) {
if( !file.exists(file.path("www", file)) ) {
warning("No CSS stylesheet located at '", file, "'.")
}
return( singleton( tags$head( tags$link(
rel="stylesheet", type="text/css", href=file
))))
}
#' Include D3.js
#'
#' This function produces a singleton for including d3.js as:
#' \code{<script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>}.
#'
#' @importFrom shiny singleton tags
#' @export
use_d3 <- function() {
return( singleton( tags$head( tags$script(
type="text/javascript", charset="utf-8", src="http://d3js.org/d3.v3.min.js"
))))
}
| /R/functions.R | no_license | kevinushey/shinyExtras | R | false | false | 1,466 | r | #' Include a JavaScript File
#'
#' This function produces a singleton for including a JavaScript file. Note
#' that JavaScript files to be included in a Shiny server should be in the
#' \code{www} folder; preferably \code{www/js}.
#' @param file Location of the file.
#' @importFrom shiny singleton tags
#' @export
shiny_js <- function(file) {
if( !file.exists(file.path("www", file)) ) {
warning("No JavaScript file located at '", file, "'.")
}
return( singleton( tags$head( tags$script(
type="text/javascript", src=file
) ) )
)
}
#' Include a CSS File
#'
#' This function produces a singleton for including a CSS Stylesheet.
#' Note that CSS files to be included in a Shiny server should be in the
#' \code{www} folder; preferably \code{www/css}.
#' @param file Location of the file.
#' @importFrom shiny singleton tags
#' @export
shiny_css <- function(file) {
if( !file.exists(file.path("www", file)) ) {
warning("No CSS stylesheet located at '", file, "'.")
}
return( singleton( tags$head( tags$link(
rel="stylesheet", type="text/css", href=file
))))
}
#' Include D3.js
#'
#' This function produces a singleton for including d3.js as:
#' \code{<script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>}.
#'
#' @importFrom shiny singleton tags
#' @export
use_d3 <- function() {
return( singleton( tags$head( tags$script(
type="text/javascript", charset="utf-8", src="http://d3js.org/d3.v3.min.js"
))))
}
|
# Munging scricpt for map.all
#fix 2010-11 data which has kindergarten at KAMS
map.all.silo[map.all.silo$SchoolName=="KIPP Ascend Middle School" & (map.all.silo$Grade<5|map.all.silo$Grade=="K"),"SchoolName"]<-"KIPP Ascend Primary School"
map.all<-map.all.silo %>%
mutate(Season=str_extract(TermName,
"[[:alpha:]]+"),
Year1=as.integer(str_extract(TermName,
"[[:digit:]]+")),
Year2=as.integer(gsub("([a-zA-Z]+[[:space:]][[:digit:]]+-)([[:digit:]]+)",
"\\2",
TermName)),
SY=paste(Year1, Year2, sep="-"),
Grade=ifelse(Grade=="K", 0, as.integer(Grade)),
Grade=as.integer(Grade),
CohortYear=Year2+(12-Grade),
MeasurementScale = ifelse(grepl("General Science", MeasurementScale),
"General Science",
MeasurementScale)
) %>%
filter(Year1 >= 2010 & GrowthMeasureYN=='TRUE') %>%
mutate(SchoolInitials = abbrev(SchoolName, list(old="KAPS", new="KAP")),
TestQuartile = kipp_quartile(TestPercentile),
KIPPTieredGrowth = tiered_growth(TestQuartile, Grade)
)
map.all<-cbind(map.all,
mapvisuals::nwea_growth(map.all$Grade,
map.all$TestRITScore,
map.all$MeasurementScale
)
)
# Create Seaason to Season Numbers
years<-unique(map.all$Year2)
map.SS<-rbind_all(lapply(years,
mapvisuals::s2s_match,
.data=map.all,
season1="Spring",
season2="Spring",
typical.growth=T,
college.ready=T
)
)
map.FS<-rbind_all(lapply(years,
mapvisuals::s2s_match,
.data=map.all,
season1="Fall",
season2="Spring",
typical.growth=T,
college.ready=T
)
)
map.FW<-rbind_all(lapply(years,
mapvisuals::s2s_match,
.data=map.all,
season1="Fall",
season2="Winter",
typical.growth=T,
college.ready=T
)
)
map.WS<-rbind_all(lapply(years,
mapvisuals::s2s_match,
.data=map.all,
season1="Winter",
season2="Spring",
typical.growth=T,
college.ready=T
)
)
map.FF<-rbind_all(lapply(years,
mapvisuals::s2s_match,
.data=map.all,
season1="Fall",
season2="Fall",
typical.growth=T,
college.ready=T
)
)
map.SW<-rbind_all(lapply(years,
mapvisuals::s2s_match,
.data=map.all,
season1="Spring",
season2="Winter",
typical.growth=T,
college.ready=T
)
)
map.all.growth<-rbind_all(list(map.SS, map.FS, map.FW, map.WS, map.FF, map.SW))
rm(map.SS, map.FS, map.FW, map.WS, map.FF, map.SW)
map.all.growth.sum<-data.table(map.all.growth)[,list("N (both seasons)"= .N,
"# >= Typical" = sum(MetTypical),
"% >= Typical" = round(sum(MetTypical)/.N,2),
"# >= College Ready" = sum(MetCollegeReady),
"% >= College Ready" = round(sum(MetCollegeReady)/.N,2),
"# >= 50th Pctl S1" = sum(TestPercentile>=50),
"% >= 50th Pctl S1" = round(sum(TestPercentile>=50)/.N,2),
"# >= 50th Pctl S2" = sum(TestPercentile.2>=50),
"% >= 50th Pctl S2" = round(sum(TestPercentile.2>=50)/.N,2),
"# >= 75th Pctl S1" = sum(TestPercentile>=75),
"% >= 75th Pctl S1" = round(sum(TestPercentile>=75)/.N,2),
"# >= 75th Pctl S2" = sum(TestPercentile.2>=75),
"% >= 75th Pctl S2" = round(sum(TestPercentile.2>=75)/.N,2)
),
by=list(SY.2,
GrowthSeason,
SchoolInitials.2,
Grade.2,
CohortYear.2,
MeasurementScale)
]
setnames(map.all.growth.sum,
c("SchoolInitials.2", "Grade.2", "MeasurementScale", "SY.2", "CohortYear.2"),
c("School", "Grade", "Subject", "SY", "Class")
)
map.all.growth.sum.reg<-data.table(map.all.growth)[,list("School"="Region",
"N (both seasons)"= .N,
"# >= Typical" = sum(MetTypical),
"% >= Typical" = round(sum(MetTypical)/.N,2),
"# >= College Ready" = sum(MetCollegeReady),
"% >= College Ready" = round(sum(MetCollegeReady)/.N,2),
"# >= 50th Pctl S1" = sum(TestPercentile>=50),
"% >= 50th Pctl S1" = round(sum(TestPercentile>=50)/.N,2),
"# >= 50th Pctl S2" = sum(TestPercentile.2>=50),
"% >= 50th Pctl S2" = round(sum(TestPercentile.2>=50)/.N,2),
"# >= 75th Pctl S1" = sum(TestPercentile>=75),
"% >= 75th Pctl S1" = round(sum(TestPercentile>=75)/.N,2),
"# >= 75th Pctl S2" = sum(TestPercentile.2>=75),
"% >= 75th Pctl S2" = round(sum(TestPercentile.2>=75)/.N,2)
),
by=list(SY.2,
GrowthSeason,
Grade.2,
CohortYear.2,
MeasurementScale)
]
setnames(map.all.growth.sum.reg,
c("Grade.2", "MeasurementScale", "SY.2", "CohortYear.2"),
c("Grade", "Subject", "SY", "Class")
)
map.all.growth.sum<-rbind(map.all.growth.sum,map.all.growth.sum.reg)
rm(map.all.growth.sum.reg)
map.all.growth.sum.p<-copy(map.all.growth.sum) # for plotting
setnames(map.all.growth.sum.p,
names(map.all.growth.sum.p),
c("SY",
"GrowthSeason",
"School",
"Grade",
"Class",
"Subject",
"N.S1.S2",
"N.Typical",
"Pct.Typical",
"N.CR",
"Pct.CR",
"N.50.S1",
"Pct.50.S1",
"N.50.S2",
"Pct.50.S2",
"N.75.S1",
"Pct.75.S1",
"N.75.S2",
"Pct.75.S2"
)
)
map.all.growth.sum.p<-na.omit(map.all.growth.sum.p)
require(dplyr)
message("Class by current Grade")
class_current_grade<-map.all.growth.sum.p%>%
group_by(Class) %>%
dplyr::summarize(Grade=max(Grade), N=n()) %>%
mutate(Class2=paste0(Class,
"\n(Current grade: ",
Grade, ")")
) %>%
filter(N>20) %>%
select(Class, Class2)
map.all.growth.sum.p<-left_join(map.all.growth.sum.p,
class_current_grade,
by="Class")
map.all.growth.sum.p<-as.data.table(map.all.growth.sum.p)
| /map/munge/03-map_all.R | no_license | chrishaid/ShinyApps | R | false | false | 8,229 | r | # Munging scricpt for map.all
#fix 2010-11 data which has kindergarten at KAMS
map.all.silo[map.all.silo$SchoolName=="KIPP Ascend Middle School" & (map.all.silo$Grade<5|map.all.silo$Grade=="K"),"SchoolName"]<-"KIPP Ascend Primary School"
map.all<-map.all.silo %>%
mutate(Season=str_extract(TermName,
"[[:alpha:]]+"),
Year1=as.integer(str_extract(TermName,
"[[:digit:]]+")),
Year2=as.integer(gsub("([a-zA-Z]+[[:space:]][[:digit:]]+-)([[:digit:]]+)",
"\\2",
TermName)),
SY=paste(Year1, Year2, sep="-"),
Grade=ifelse(Grade=="K", 0, as.integer(Grade)),
Grade=as.integer(Grade),
CohortYear=Year2+(12-Grade),
MeasurementScale = ifelse(grepl("General Science", MeasurementScale),
"General Science",
MeasurementScale)
) %>%
filter(Year1 >= 2010 & GrowthMeasureYN=='TRUE') %>%
mutate(SchoolInitials = abbrev(SchoolName, list(old="KAPS", new="KAP")),
TestQuartile = kipp_quartile(TestPercentile),
KIPPTieredGrowth = tiered_growth(TestQuartile, Grade)
)
map.all<-cbind(map.all,
mapvisuals::nwea_growth(map.all$Grade,
map.all$TestRITScore,
map.all$MeasurementScale
)
)
# Create Seaason to Season Numbers
years<-unique(map.all$Year2)
map.SS<-rbind_all(lapply(years,
mapvisuals::s2s_match,
.data=map.all,
season1="Spring",
season2="Spring",
typical.growth=T,
college.ready=T
)
)
map.FS<-rbind_all(lapply(years,
mapvisuals::s2s_match,
.data=map.all,
season1="Fall",
season2="Spring",
typical.growth=T,
college.ready=T
)
)
map.FW<-rbind_all(lapply(years,
mapvisuals::s2s_match,
.data=map.all,
season1="Fall",
season2="Winter",
typical.growth=T,
college.ready=T
)
)
map.WS<-rbind_all(lapply(years,
mapvisuals::s2s_match,
.data=map.all,
season1="Winter",
season2="Spring",
typical.growth=T,
college.ready=T
)
)
map.FF<-rbind_all(lapply(years,
mapvisuals::s2s_match,
.data=map.all,
season1="Fall",
season2="Fall",
typical.growth=T,
college.ready=T
)
)
map.SW<-rbind_all(lapply(years,
mapvisuals::s2s_match,
.data=map.all,
season1="Spring",
season2="Winter",
typical.growth=T,
college.ready=T
)
)
map.all.growth<-rbind_all(list(map.SS, map.FS, map.FW, map.WS, map.FF, map.SW))
rm(map.SS, map.FS, map.FW, map.WS, map.FF, map.SW)
map.all.growth.sum<-data.table(map.all.growth)[,list("N (both seasons)"= .N,
"# >= Typical" = sum(MetTypical),
"% >= Typical" = round(sum(MetTypical)/.N,2),
"# >= College Ready" = sum(MetCollegeReady),
"% >= College Ready" = round(sum(MetCollegeReady)/.N,2),
"# >= 50th Pctl S1" = sum(TestPercentile>=50),
"% >= 50th Pctl S1" = round(sum(TestPercentile>=50)/.N,2),
"# >= 50th Pctl S2" = sum(TestPercentile.2>=50),
"% >= 50th Pctl S2" = round(sum(TestPercentile.2>=50)/.N,2),
"# >= 75th Pctl S1" = sum(TestPercentile>=75),
"% >= 75th Pctl S1" = round(sum(TestPercentile>=75)/.N,2),
"# >= 75th Pctl S2" = sum(TestPercentile.2>=75),
"% >= 75th Pctl S2" = round(sum(TestPercentile.2>=75)/.N,2)
),
by=list(SY.2,
GrowthSeason,
SchoolInitials.2,
Grade.2,
CohortYear.2,
MeasurementScale)
]
setnames(map.all.growth.sum,
c("SchoolInitials.2", "Grade.2", "MeasurementScale", "SY.2", "CohortYear.2"),
c("School", "Grade", "Subject", "SY", "Class")
)
map.all.growth.sum.reg<-data.table(map.all.growth)[,list("School"="Region",
"N (both seasons)"= .N,
"# >= Typical" = sum(MetTypical),
"% >= Typical" = round(sum(MetTypical)/.N,2),
"# >= College Ready" = sum(MetCollegeReady),
"% >= College Ready" = round(sum(MetCollegeReady)/.N,2),
"# >= 50th Pctl S1" = sum(TestPercentile>=50),
"% >= 50th Pctl S1" = round(sum(TestPercentile>=50)/.N,2),
"# >= 50th Pctl S2" = sum(TestPercentile.2>=50),
"% >= 50th Pctl S2" = round(sum(TestPercentile.2>=50)/.N,2),
"# >= 75th Pctl S1" = sum(TestPercentile>=75),
"% >= 75th Pctl S1" = round(sum(TestPercentile>=75)/.N,2),
"# >= 75th Pctl S2" = sum(TestPercentile.2>=75),
"% >= 75th Pctl S2" = round(sum(TestPercentile.2>=75)/.N,2)
),
by=list(SY.2,
GrowthSeason,
Grade.2,
CohortYear.2,
MeasurementScale)
]
setnames(map.all.growth.sum.reg,
c("Grade.2", "MeasurementScale", "SY.2", "CohortYear.2"),
c("Grade", "Subject", "SY", "Class")
)
map.all.growth.sum<-rbind(map.all.growth.sum,map.all.growth.sum.reg)
rm(map.all.growth.sum.reg)
map.all.growth.sum.p<-copy(map.all.growth.sum) # for plotting
setnames(map.all.growth.sum.p,
names(map.all.growth.sum.p),
c("SY",
"GrowthSeason",
"School",
"Grade",
"Class",
"Subject",
"N.S1.S2",
"N.Typical",
"Pct.Typical",
"N.CR",
"Pct.CR",
"N.50.S1",
"Pct.50.S1",
"N.50.S2",
"Pct.50.S2",
"N.75.S1",
"Pct.75.S1",
"N.75.S2",
"Pct.75.S2"
)
)
map.all.growth.sum.p<-na.omit(map.all.growth.sum.p)
require(dplyr)
message("Class by current Grade")
class_current_grade<-map.all.growth.sum.p%>%
group_by(Class) %>%
dplyr::summarize(Grade=max(Grade), N=n()) %>%
mutate(Class2=paste0(Class,
"\n(Current grade: ",
Grade, ")")
) %>%
filter(N>20) %>%
select(Class, Class2)
map.all.growth.sum.p<-left_join(map.all.growth.sum.p,
class_current_grade,
by="Class")
map.all.growth.sum.p<-as.data.table(map.all.growth.sum.p)
|
######Some supplemental fns that are used by heatmap_plot*.R and cluster_plot.R
#BASED on heatmap_supp_funcs.R written by Henry Long
zscore = function(x){
y=(x-mean(x))/sd(x)
return(y)
}
cmap <- function(x, colorstart=NULL, use_viridis=FALSE) {
colors = c("#3182bd", "#e6550d", "#31a354", "#756bb1", "#636363", "#BD4931", "#6baed6", "#fd8d3c", "#74c476", "#9e9ac8", "#969696", "#D67D6B", "#9ecae1", "#fdae6b", "#a1d99b", "#bcbddc", "#bdbdbd", "#E0A89D", "#c6dbef", "#fdd0a2", "#c7e9c0", "#dadaeb", "#d9d9d9", "#F0CEC7")
x <- sort(unique(na.omit(as.vector(x))))
if(is.null(colorstart)) { colorstart = 0 }
col <- colors[(colorstart+1):(colorstart+length(x))]
if(use_viridis) {
col <- viridis(length(x))
}
names(col) <- x
return(col)
}
make_complexHeatmap_annotation <- function(annotation){
MIN_UNIQUE <- 6
global_gp = gpar(fontsize = 8)
title_gp = gpar(fontsize = 8, fontface = "bold")
colorlist <- list()
colorcount = 0
nn<-length(annotation)
for (i in 1:nn) {
ann <- as.matrix(annotation[,i])
#NEED a better way to distinguish between discrete and continuous
#something like:
#if(! is.numeric(ann[1]) or (is.integer and ! is.double #and less)) {
#if(length(sort(unique(na.omit(as.vector(ann))))) < MIN_UNIQUE) {
if(length(sort(unique(na.omit(as.vector(ann))))) < MIN_UNIQUE | is.numeric(ann)==FALSE) {
colorlist[[i]] <- cmap(ann, colorstart=colorcount)
colorcount = colorcount + length(unique(ann))
} else {
#colorlist[[i]] <- colorRamp2(seq(min(ann, na.rm = TRUE), max(ann, na.rm = TRUE), length = 3), c("blue","white","orange"))
colorlist[[i]] <- colorRamp2(seq(min(ann, na.rm = TRUE), max(ann, na.rm = TRUE), length = 3), c("white","yellow", "red"))
}
}
names(colorlist) <- c(colnames(annotation)[1:nn])
#ha1 = HeatmapAnnotation(df = annotation[,1:nn,drop=FALSE], gap=unit(0.5,"mm"), col = colorlist)
ha1 = HeatmapAnnotation(df = annotation[,1:nn,drop=FALSE], gap=unit(0.5,"mm"), col = colorlist, annotation_legend_param = list(title_gp=gpar(fontsize=8), grid_height = unit(3,"mm"), labels_gp=gpar(fontsize=8)))
return(ha1)
}
#NOTE: LEN removed the threeD code
make_pca_plots <- function(data_matrix, threeD = TRUE, labels = TRUE, pca_title = "Data Matrix", legend_title = "", ClassColorings) {
#Standard PCA analysis
pca_out <- prcomp(data_matrix, scale. = TRUE, tol = 0.05)
pc_var <- signif(100.0 * summary(pca_out)[[6]][2,1:3], digits = 3)
#### NEW
par(mar=c(5.1, 4.1, 4.1, 8.1), xpd=TRUE)
plot(pca_out$x[,"PC1"], pca_out$x[,"PC2"], col=ClassColorings, pch=16, xlab=paste0("PC1 (", pc_var[1], "% of variance)"), ylab=paste0("PC2 (", pc_var[2], "% of variance)"), main = paste0('PCA analysis of ',pca_title))
if(labels == TRUE) {text(pca_out$x[,"PC1"], pca_out$x[,"PC2"], labels=row.names(data_matrix), cex= 0.7, pos=3)}
if(legend_title != "") {
mycols = unique(ClassColorings)
mynames = unique(names(ClassColorings))
#legend("bottomright", legend = mynames, col=mycols, pch = 16, title = legend_title)
legend("topright", inset=c(-0.23,0), legend = mynames, col=mycols, pch = 16, title = legend_title)
}
# if(threeD==TRUE){
# #try 3D plot
# library("rgl", lib.loc="/Library/Frameworks/R.framework/Versions/3.2/Resources/library")
# pca3d<-cbind(pca_out$x[,1], pca_out$x[,2], pca_out$x[,3])
# plot3d(pca3d, type="s",col=ClassColorings, size=1, scale=0.2)
# }
return(pca_out)
}
######END SUPPLEMENTAL Fns #######
| /scripts/supp_fns.R | no_license | eulertx/viper-rnaseq | R | false | false | 3,653 | r | ######Some supplemental fns that are used by heatmap_plot*.R and cluster_plot.R
#BASED on heatmap_supp_funcs.R written by Henry Long
zscore = function(x){
y=(x-mean(x))/sd(x)
return(y)
}
cmap <- function(x, colorstart=NULL, use_viridis=FALSE) {
colors = c("#3182bd", "#e6550d", "#31a354", "#756bb1", "#636363", "#BD4931", "#6baed6", "#fd8d3c", "#74c476", "#9e9ac8", "#969696", "#D67D6B", "#9ecae1", "#fdae6b", "#a1d99b", "#bcbddc", "#bdbdbd", "#E0A89D", "#c6dbef", "#fdd0a2", "#c7e9c0", "#dadaeb", "#d9d9d9", "#F0CEC7")
x <- sort(unique(na.omit(as.vector(x))))
if(is.null(colorstart)) { colorstart = 0 }
col <- colors[(colorstart+1):(colorstart+length(x))]
if(use_viridis) {
col <- viridis(length(x))
}
names(col) <- x
return(col)
}
make_complexHeatmap_annotation <- function(annotation){
MIN_UNIQUE <- 6
global_gp = gpar(fontsize = 8)
title_gp = gpar(fontsize = 8, fontface = "bold")
colorlist <- list()
colorcount = 0
nn<-length(annotation)
for (i in 1:nn) {
ann <- as.matrix(annotation[,i])
#NEED a better way to distinguish between discrete and continuous
#something like:
#if(! is.numeric(ann[1]) or (is.integer and ! is.double #and less)) {
#if(length(sort(unique(na.omit(as.vector(ann))))) < MIN_UNIQUE) {
if(length(sort(unique(na.omit(as.vector(ann))))) < MIN_UNIQUE | is.numeric(ann)==FALSE) {
colorlist[[i]] <- cmap(ann, colorstart=colorcount)
colorcount = colorcount + length(unique(ann))
} else {
#colorlist[[i]] <- colorRamp2(seq(min(ann, na.rm = TRUE), max(ann, na.rm = TRUE), length = 3), c("blue","white","orange"))
colorlist[[i]] <- colorRamp2(seq(min(ann, na.rm = TRUE), max(ann, na.rm = TRUE), length = 3), c("white","yellow", "red"))
}
}
names(colorlist) <- c(colnames(annotation)[1:nn])
#ha1 = HeatmapAnnotation(df = annotation[,1:nn,drop=FALSE], gap=unit(0.5,"mm"), col = colorlist)
ha1 = HeatmapAnnotation(df = annotation[,1:nn,drop=FALSE], gap=unit(0.5,"mm"), col = colorlist, annotation_legend_param = list(title_gp=gpar(fontsize=8), grid_height = unit(3,"mm"), labels_gp=gpar(fontsize=8)))
return(ha1)
}
#NOTE: LEN removed the threeD code
make_pca_plots <- function(data_matrix, threeD = TRUE, labels = TRUE, pca_title = "Data Matrix", legend_title = "", ClassColorings) {
#Standard PCA analysis
pca_out <- prcomp(data_matrix, scale. = TRUE, tol = 0.05)
pc_var <- signif(100.0 * summary(pca_out)[[6]][2,1:3], digits = 3)
#### NEW
par(mar=c(5.1, 4.1, 4.1, 8.1), xpd=TRUE)
plot(pca_out$x[,"PC1"], pca_out$x[,"PC2"], col=ClassColorings, pch=16, xlab=paste0("PC1 (", pc_var[1], "% of variance)"), ylab=paste0("PC2 (", pc_var[2], "% of variance)"), main = paste0('PCA analysis of ',pca_title))
if(labels == TRUE) {text(pca_out$x[,"PC1"], pca_out$x[,"PC2"], labels=row.names(data_matrix), cex= 0.7, pos=3)}
if(legend_title != "") {
mycols = unique(ClassColorings)
mynames = unique(names(ClassColorings))
#legend("bottomright", legend = mynames, col=mycols, pch = 16, title = legend_title)
legend("topright", inset=c(-0.23,0), legend = mynames, col=mycols, pch = 16, title = legend_title)
}
# if(threeD==TRUE){
# #try 3D plot
# library("rgl", lib.loc="/Library/Frameworks/R.framework/Versions/3.2/Resources/library")
# pca3d<-cbind(pca_out$x[,1], pca_out$x[,2], pca_out$x[,3])
# plot3d(pca3d, type="s",col=ClassColorings, size=1, scale=0.2)
# }
return(pca_out)
}
######END SUPPLEMENTAL Fns #######
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
x <- faithful[, 2]
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'darkgray', border = 'white')
})
output$plottest <- renderPlot({
apple=input$bbins
plot(1:apple)
})
})
| /thefirstclass/server.R | no_license | jasonsseraph/thefirstone | R | false | false | 599 | r |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
x <- faithful[, 2]
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'darkgray', border = 'white')
})
output$plottest <- renderPlot({
apple=input$bbins
plot(1:apple)
})
})
|
#' Clear cached files
#'
#' @name caching
#' @param force (logical) Should we force removal of files if permissions
#' say otherwise?. Default: \code{FALSE}
#'
#' @details BEWARE: this will clear all cached files.
#'
#' @section File storage:
#' We use \pkg{rappdirs} to store files, see
#' \code{\link[rappdirs]{user_cache_dir}} for how
#' we determine the directory on your machine to save files to, and run
#' \code{user_cache_dir("rnoaa")} to get that directory.
#' @export
#' @rdname caching
ghcnd_clear_cache <- function(force = FALSE) {
calls <- names(sapply(match.call(), deparse))[-1]
calls_vec <- "path" %in% calls
if (any(calls_vec)) {
stop("The parameter path has been removed, see ?ghcnd_clear_cache",
call. = FALSE)
}
path <- file.path(rnoaa_cache_dir, "ghcnd")
files <- list.files(path, full.names = TRUE)
unlink(files, recursive = TRUE, force = force)
}
| /R/caching.R | permissive | zeropoint000001/rnoaa | R | false | false | 899 | r | #' Clear cached files
#'
#' @name caching
#' @param force (logical) Should we force removal of files if permissions
#' say otherwise?. Default: \code{FALSE}
#'
#' @details BEWARE: this will clear all cached files.
#'
#' @section File storage:
#' We use \pkg{rappdirs} to store files, see
#' \code{\link[rappdirs]{user_cache_dir}} for how
#' we determine the directory on your machine to save files to, and run
#' \code{user_cache_dir("rnoaa")} to get that directory.
#' @export
#' @rdname caching
ghcnd_clear_cache <- function(force = FALSE) {
calls <- names(sapply(match.call(), deparse))[-1]
calls_vec <- "path" %in% calls
if (any(calls_vec)) {
stop("The parameter path has been removed, see ?ghcnd_clear_cache",
call. = FALSE)
}
path <- file.path(rnoaa_cache_dir, "ghcnd")
files <- list.files(path, full.names = TRUE)
unlink(files, recursive = TRUE, force = force)
}
|
#' Specificity Score Calculation with Statistical Testing
#'
#' @author Eddie Cano-Gamez, \email{ecg@@sanger.ac.uk}
#' @usage testSpecificities(rna, protein, sample_groups)
#' @description Given an RNA expression matrix (and optionally a matching protein expresison matrix), this function calculates a score reflecting how specific the expresison of each gene is for each sample category compared to the rest. Then, it uses permutations to tests if the gene is more specific than expected by chance.
#' @param rna RNA expression matrix. Must be a data.frame with columns named after sample category and rows named after genes. Expression values must be possitive and comparable across samples (ie. normalized for library size) before running this function.
#' @param protein Protein expression matrix (optional). Defaults to "none". Must be a data.frame with columns named after sample category and rows named after genes. Rows and sample categories must match those of the RNA matrix. Expression values must be possitive and comparable across samples.
#' @param sample.labels List of sample categories (eg. biological replicates, cell types, tissues, etc...). Must be a character vector. Its elements must match the column names of the RNA (and protein) matrices. Each category is listed only once.
#' @param weight.rna When considering both RNA and protein expression, weight assigned to the RNA data. Defaults to 0.5. Must be a number between 0 and 1.
#' @param weight.protein When considering both RNA and protein expression, weight assigned to the protein data. Defaults to 0.5. Must be a number between 0 and 1.
#' @param iter Number of permutations run when testing for statistical signifiance. Defaults to 1000.
#' @details
#' This function takes either one or two correlated expression matrices (eg. RNA and protein expression from the same set of samples) and calculates a specificity score for each sample category (eg. tissue, cell type or biological replicate).
#' Specificity score calculation is done using getSpecificities() (see documentation for this function).
#' After computing specificity scores, the function generates null distributions by randomly permuting the matrix sample names. This is done as many times as specified by the user. The observed specificity score is compared to the scores observed in the permuted data and a P value is calculated as the number of times the observed score is larger than the permuted score.
#' To account for multiple hypothesis testing, the function also implements P-value correction using the Benjamini-Hochberg method (see documentation for p.adjust()).
#' @export
#' @examples
#' ## USING ONE DATA SET ONLY (eg. RNA ONLY)
#'
#' # Simulating mock RNA data:
#' rna.example <- data.frame(matrix(rnorm(9000,mean=2000,sd=100),ncol=9,nrow=100))
#' sample_groups <- c("A","B","C")
#' gene_names <- paste("g",1:100,sep="")
#' colnames(rna.example) <- rep(sample_groups,each=3)
#' rownames(rna.example) <- gene_names
#'
#' # Simulating sets of highly expressed genes in each sample group only
#' rna.example[1:10,1:3] <- rna.example[1:10,1:3] + rnorm(1,mean=4000,sd=1000)
#' rna.example[20:30,4:6] <- rna.example[20:30,4:6] + rnorm(1,mean=4000,sd=1000)
#' rna.example[90:100,7:9] <- rna.example[90:100,7:9] + rnorm(1,mean=4000,sd=1000)
#'
#' # Running the function:
#' testSpecificities(rna.example, sample.labels = sample_groups, iter=1000)
#'
#' @examples
#' ## USING TWO MATCHING DATA SETS (eg. RNA AND PROTEIN)
#'
#' # Simulating matching mock Protein data:
#' prot.example <- data.frame(matrix(rnorm(9000,mean=7000,sd=100),ncol=9,nrow=100))
#' colnames(prot.example) <- rep(sample_groups,each=3)
#' rownames(prot.example) <- gene_names
#'
#' # Simulating sets of highly expressed proteins in each sample group only:
#' prot.example[1:10,1:3] <- prot.example[1:10,1:3] + rnorm(1,mean=1500,sd=1000)
#' prot.example[20:30,4:6] <- prot.example[20:30,4:6] + rnorm(1,mean=1500,sd=1000)
#' prot.example[90:100,7:9] <- prot.example[90:100,7:9] + rnorm(1,mean=1500,sd=1000)
#'
#' # Running the function:
#' testSpecificities(rna.example, prot.example, sample.labels = sample_groups, iter=1000)
testSpecificities <- function(rna.exp, prot.exp="none", sample.labels, weight.rna=0.5, weight.protein=0.5, iter=1000){
if(sum(names(table(colnames(rna.exp))) %in% sample.labels) != length(sample.labels)){
stop("RNA columns and sample labels do not match",call.=F)
}
ifelse(prot.exp=="none",{
S <- getSpecificities(rna.exp, prot.exp, sample.labels, weight.rna, weight.protein)
test.res <- matrix(0,nrow = dim(S)[1], ncol=dim(S)[2])
foreach(icount(iter), .combine='c', .errorhandling='pass') %do% {
rna.null <- rna.exp
colnames(rna.null) <- sample(colnames(rna.null))
S.null <- getSpecificities(rna.null, sample.labels=sample.labels)
comparison <- S < S.null
comparison <- comparison*1
test.res <- test.res + comparison
}
},
{
if(nrow(prot.exp)!=nrow(rna.exp)){
stop("RNA and protein matrices have a different number of rows",call.=F)
}
if(ncol(prot.exp)!=ncol(rna.exp)){
stop("RNA and protein matrices have a different number of columns",call.=F)
}
if(sum(rownames(prot.exp)!= rownames(rna.exp)) > 0){
stop("Gene names do not match between RNA and protein",call.=F)
}
if(sum(names(table(colnames(prot.exp))) %in% sample.labels) != length(sample.labels)){
stop("Protein columns and sample labels do not match",call.=F)
}
if(sum(colnames(prot.exp)!= colnames(rna.exp)) > 0){
stop("Column names of protein and RNA do not match",call.=F)
}
S <- getSpecificities(rna.exp, prot.exp, sample.labels, weight.rna, weight.protein)
test.res <- matrix(0,nrow = dim(S)[1], ncol=dim(S)[2])
foreach(icount(iter), .combine='c', .errorhandling='pass') %do% {
rna.null <- rna.exp
colnames(rna.null) <- sample(colnames(rna.null))
protein.null <- prot.exp
colnames(protein.null) <- colnames(rna.null)
S.null <- getSpecificities(rna.null, protein.null, sample.labels=sample.labels, weight.rna, weight.protein)
comparison <- S < S.null
comparison <- comparison*1
test.res <- test.res + comparison
}
})
pvals <- as.data.frame((test.res+1)/iter)
padj <-as.data.frame(apply(pvals, MARGIN=2, FUN=function(p){p.adjust(p, method="BH")}))
res <- list(specificities=S,p.val=pvals, p.adj=padj)
return(res)
}
| /R/test_specificities.R | no_license | eddiecg/proteogenomic | R | false | false | 6,437 | r | #' Specificity Score Calculation with Statistical Testing
#'
#' @author Eddie Cano-Gamez, \email{ecg@@sanger.ac.uk}
#' @usage testSpecificities(rna, protein, sample_groups)
#' @description Given an RNA expression matrix (and optionally a matching protein expresison matrix), this function calculates a score reflecting how specific the expresison of each gene is for each sample category compared to the rest. Then, it uses permutations to tests if the gene is more specific than expected by chance.
#' @param rna RNA expression matrix. Must be a data.frame with columns named after sample category and rows named after genes. Expression values must be possitive and comparable across samples (ie. normalized for library size) before running this function.
#' @param protein Protein expression matrix (optional). Defaults to "none". Must be a data.frame with columns named after sample category and rows named after genes. Rows and sample categories must match those of the RNA matrix. Expression values must be possitive and comparable across samples.
#' @param sample.labels List of sample categories (eg. biological replicates, cell types, tissues, etc...). Must be a character vector. Its elements must match the column names of the RNA (and protein) matrices. Each category is listed only once.
#' @param weight.rna When considering both RNA and protein expression, weight assigned to the RNA data. Defaults to 0.5. Must be a number between 0 and 1.
#' @param weight.protein When considering both RNA and protein expression, weight assigned to the protein data. Defaults to 0.5. Must be a number between 0 and 1.
#' @param iter Number of permutations run when testing for statistical signifiance. Defaults to 1000.
#' @details
#' This function takes either one or two correlated expression matrices (eg. RNA and protein expression from the same set of samples) and calculates a specificity score for each sample category (eg. tissue, cell type or biological replicate).
#' Specificity score calculation is done using getSpecificities() (see documentation for this function).
#' After computing specificity scores, the function generates null distributions by randomly permuting the matrix sample names. This is done as many times as specified by the user. The observed specificity score is compared to the scores observed in the permuted data and a P value is calculated as the number of times the observed score is larger than the permuted score.
#' To account for multiple hypothesis testing, the function also implements P-value correction using the Benjamini-Hochberg method (see documentation for p.adjust()).
#' @export
#' @examples
#' ## USING ONE DATA SET ONLY (eg. RNA ONLY)
#'
#' # Simulating mock RNA data:
#' rna.example <- data.frame(matrix(rnorm(9000,mean=2000,sd=100),ncol=9,nrow=100))
#' sample_groups <- c("A","B","C")
#' gene_names <- paste("g",1:100,sep="")
#' colnames(rna.example) <- rep(sample_groups,each=3)
#' rownames(rna.example) <- gene_names
#'
#' # Simulating sets of highly expressed genes in each sample group only
#' rna.example[1:10,1:3] <- rna.example[1:10,1:3] + rnorm(1,mean=4000,sd=1000)
#' rna.example[20:30,4:6] <- rna.example[20:30,4:6] + rnorm(1,mean=4000,sd=1000)
#' rna.example[90:100,7:9] <- rna.example[90:100,7:9] + rnorm(1,mean=4000,sd=1000)
#'
#' # Running the function:
#' testSpecificities(rna.example, sample.labels = sample_groups, iter=1000)
#'
#' @examples
#' ## USING TWO MATCHING DATA SETS (eg. RNA AND PROTEIN)
#'
#' # Simulating matching mock Protein data:
#' prot.example <- data.frame(matrix(rnorm(9000,mean=7000,sd=100),ncol=9,nrow=100))
#' colnames(prot.example) <- rep(sample_groups,each=3)
#' rownames(prot.example) <- gene_names
#'
#' # Simulating sets of highly expressed proteins in each sample group only:
#' prot.example[1:10,1:3] <- prot.example[1:10,1:3] + rnorm(1,mean=1500,sd=1000)
#' prot.example[20:30,4:6] <- prot.example[20:30,4:6] + rnorm(1,mean=1500,sd=1000)
#' prot.example[90:100,7:9] <- prot.example[90:100,7:9] + rnorm(1,mean=1500,sd=1000)
#'
#' # Running the function:
#' testSpecificities(rna.example, prot.example, sample.labels = sample_groups, iter=1000)
testSpecificities <- function(rna.exp, prot.exp="none", sample.labels, weight.rna=0.5, weight.protein=0.5, iter=1000){
if(sum(names(table(colnames(rna.exp))) %in% sample.labels) != length(sample.labels)){
stop("RNA columns and sample labels do not match",call.=F)
}
ifelse(prot.exp=="none",{
S <- getSpecificities(rna.exp, prot.exp, sample.labels, weight.rna, weight.protein)
test.res <- matrix(0,nrow = dim(S)[1], ncol=dim(S)[2])
foreach(icount(iter), .combine='c', .errorhandling='pass') %do% {
rna.null <- rna.exp
colnames(rna.null) <- sample(colnames(rna.null))
S.null <- getSpecificities(rna.null, sample.labels=sample.labels)
comparison <- S < S.null
comparison <- comparison*1
test.res <- test.res + comparison
}
},
{
if(nrow(prot.exp)!=nrow(rna.exp)){
stop("RNA and protein matrices have a different number of rows",call.=F)
}
if(ncol(prot.exp)!=ncol(rna.exp)){
stop("RNA and protein matrices have a different number of columns",call.=F)
}
if(sum(rownames(prot.exp)!= rownames(rna.exp)) > 0){
stop("Gene names do not match between RNA and protein",call.=F)
}
if(sum(names(table(colnames(prot.exp))) %in% sample.labels) != length(sample.labels)){
stop("Protein columns and sample labels do not match",call.=F)
}
if(sum(colnames(prot.exp)!= colnames(rna.exp)) > 0){
stop("Column names of protein and RNA do not match",call.=F)
}
S <- getSpecificities(rna.exp, prot.exp, sample.labels, weight.rna, weight.protein)
test.res <- matrix(0,nrow = dim(S)[1], ncol=dim(S)[2])
foreach(icount(iter), .combine='c', .errorhandling='pass') %do% {
rna.null <- rna.exp
colnames(rna.null) <- sample(colnames(rna.null))
protein.null <- prot.exp
colnames(protein.null) <- colnames(rna.null)
S.null <- getSpecificities(rna.null, protein.null, sample.labels=sample.labels, weight.rna, weight.protein)
comparison <- S < S.null
comparison <- comparison*1
test.res <- test.res + comparison
}
})
pvals <- as.data.frame((test.res+1)/iter)
padj <-as.data.frame(apply(pvals, MARGIN=2, FUN=function(p){p.adjust(p, method="BH")}))
res <- list(specificities=S,p.val=pvals, p.adj=padj)
return(res)
}
|
#Load tidyverse and excel packages
library(tidyverse)
library(openxlsx)
#Reading in the 2 datasets that were downloaded from (https://www.gapminder.org/data/, saved in the R project file to be read in directly
Children1<-read.xlsx("children_per_woman_total_fertility.xlsx")
View(Children1)
#I can see that this dataset has a lot of years, some that take place in the future. I have decided to omit those for my analysis since I'm only interested in summarizing what has happened. The column names and the datatypes seem to be suffiencient and don't require any extra cleanup
Children<-read.xlsx("children_per_woman_total_fertility.xlsx", cols=1:223)
#Reading in the dataset again and omitting the future years
Employment<-read.xlsx("females_aged_15_24_employment_rate_percent.xlsx")
View(Employment)
#Reading in this data as is, I'm content with the data and it doesn't seem to require any additional trimming or cleaning
child<-Children %>% pivot_longer(!country, names_to = "year", values_to = "children")
#Using the tidyr package, the pivot longer function takes in the first data set and turns each of the columns that contain a year into individual rows (a combination of year and country name make a unique identifier for the row) data set now has 3 columns of country, year, and average number of children
employ<-Employment %>% pivot_longer(!country, names_to = "year", values_to = "employpercent")
#same process as above, column names are year, country, and percentage of women aged 15-24 employed
data<-left_join(child, employ, by=c("year", "country"))
#To combine the two datasets together, I decided to use a left join function from the dplyr package since Children has many more countries and years available than Employment does, so this will bring in all the rows from Children and Employment and leave blanks in the cells for the Employment stats where data is missing. I will set calculations that I do in the next section to ignore the blanks in the relevant columns. I joined on year and country since that is what creates a unique identifier per row for this data.
is_tibble(data)
#Verifying that dataset is a tibble for the next section, this returns True | /wrangling_code.R | permissive | MA615-RAD/Assignment2 | R | false | false | 2,193 | r | #Load tidyverse and excel packages
library(tidyverse)
library(openxlsx)
#Reading in the 2 datasets that were downloaded from (https://www.gapminder.org/data/, saved in the R project file to be read in directly
Children1<-read.xlsx("children_per_woman_total_fertility.xlsx")
View(Children1)
#I can see that this dataset has a lot of years, some that take place in the future. I have decided to omit those for my analysis since I'm only interested in summarizing what has happened. The column names and the datatypes seem to be suffiencient and don't require any extra cleanup
Children<-read.xlsx("children_per_woman_total_fertility.xlsx", cols=1:223)
#Reading in the dataset again and omitting the future years
Employment<-read.xlsx("females_aged_15_24_employment_rate_percent.xlsx")
View(Employment)
#Reading in this data as is, I'm content with the data and it doesn't seem to require any additional trimming or cleaning
child<-Children %>% pivot_longer(!country, names_to = "year", values_to = "children")
#Using the tidyr package, the pivot longer function takes in the first data set and turns each of the columns that contain a year into individual rows (a combination of year and country name make a unique identifier for the row) data set now has 3 columns of country, year, and average number of children
employ<-Employment %>% pivot_longer(!country, names_to = "year", values_to = "employpercent")
#same process as above, column names are year, country, and percentage of women aged 15-24 employed
data<-left_join(child, employ, by=c("year", "country"))
#To combine the two datasets together, I decided to use a left join function from the dplyr package since Children has many more countries and years available than Employment does, so this will bring in all the rows from Children and Employment and leave blanks in the cells for the Employment stats where data is missing. I will set calculations that I do in the next section to ignore the blanks in the relevant columns. I joined on year and country since that is what creates a unique identifier per row for this data.
is_tibble(data)
#Verifying that dataset is a tibble for the next section, this returns True |
############################################################################################
#
# Step 1: EU meadow birds meta-analysis - DATA PREPARATION FROM EXTRACTED DATABASE
#
############################################################################################
# Samantha Franks
# 11 March 2016
# 22 Dec 2016
#================================= SET LOGIC STATEMENTS ====================
#================================= LOAD PACKAGES =================================
list.of.packages <- c("MASS","reshape","raster","sp","rgeos","rgdal","dplyr")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages, library, character.only=TRUE)
#================================= SET DIRECTORY STRUCTURE ================================
# LOCAL
if(.Platform$OS =='windows') {
cluster <- FALSE
Mac <- FALSE
}
# HPCBTO
if(.Platform$OS=='unix' & Sys.getenv('USER')=='samf') {
cluster <- TRUE
Mac <- FALSE
Wales <- FALSE
}
# Mac
if(.Platform$OS=='unix' & Sys.getenv('USER')=='samantha') {
cluster <- FALSE
Mac <- TRUE
Wales <- FALSE
}
#### SET DIRECTORY PATHS
# # Wales HPC cluster
# if (cluster) parentwd <- c("/home/samantha.franks/")
if (cluster) parentwd <- c("/users1/samf") # BTO cluster
if (!cluster) {
if (!Mac) parentwd <- c("C:/Users/samf/Documents/Git/eu_meadow_birds")
if (Mac) parentwd <- c("/Volumes/SAM250GB/BTO PC Documents/Git/eu_meadow_birds")
}
scriptswd <- paste(parentwd, "scripts", sep="/")
datawd <- paste(parentwd, "data", sep="/")
outputwd <- paste(parentwd, "output/revision Dec 2016", sep="/")
workspacewd <- paste(parentwd, "workspaces", sep="/")
options(digits=6)
#================================= LOAD & CLEAN DATA ===============================
# d0 <- read.csv(paste(datawd, "meadow birds data extraction template_final_primary.csv", sep="/"), header=TRUE, skip=1)
d0 <- read.csv(paste(datawd, "Meadow birds data extraction template_primary and grey_standardized_FINAL.csv", sep="/"), header=TRUE)
#------- Meta-data reference for studies -------------
# create a meta-data reference file for studies with reference numbers, reference name, summary, country, region
metadat0 <- unique(d0[,c("reference.number","reference","literature.type","one.sentence.summary","score","country","region1","region2")])
#------- Clean dataset -----------
# columns required
cols.required <- c("reference.number","record.number","literature.type","score","country","region1","habitat","habitat1","habitat2","start.year","end.year","type.of.study","species","assemblage","agri.environment","basic.agri.environment", "targeted.agri.environment..wader.specific.or.higher.level.", "site.protection...nature.reserve","site.protection...designation", "mowing","grazing","fertilizer","herbicides...pesticides","nest.protection...agricultural.activities","nest.protection...predation..enclosures.or.exclosures.", "ground.water.management..drainage.inhibited.","wet.features...surface.water.management","predator.control","other.mgmt", "management.notes","overall.metric","specific.metric","reference.metric.before.management","metric.after.management","standardized.metric","standardisation.calculation","stand..reference.metric.before.management","stand..metric.after.management", "stand..effect.size","sample.size.before","sample.size.after", "uncertainty.measure.before","uncertainty.measure.after","uncertainty.measure.type","significant.effect..Y.N..U.","direction.of.effect..positive...negative...none...no.data.","unit.of.analysis","sample.size","analysis.type.1","analysis.type.2","analysis.type.details","values.obtained.from.plot.")
d0.1 <- subset(d0, select=cols.required)
# rename to easier variables
d0.2 <- d0.1
names(d0.2) <- c("reference","record","lit.type","score","country","region1","habitat","habitat1","habitat2","start.year","end.year","study.type","species","assemblage","AE","basic.AE","higher.AE","reserve","designation","mowing","grazing","fertilizer","pesticide","nest.protect.ag","nest.protect.predation","groundwater.drainage","surface.water","predator.control","other.mgmt","mgmt.notes","overall.metric","specific.metric","metric.before","metric.after","stan.metric","stan.calc","stan.metric.before","stan.metric.after","stan.effect.size","n.before","n.after","var.before","var.after","var.type","sig","effect.dir","analysis.unit","sample.size","analysis1","analysis2","analysis3","values.from.plot")
# management intervention variables
mgmtvars <- c("AE","basic.AE","higher.AE","reserve","designation","mowing","grazing","fertilizer","pesticide","nest.protect.ag","nest.protect.predation","groundwater.drainage","surface.water","predator.control","other.mgmt")
### exlude studies 2 and 36
# 2: remove this reference (Kruk et al. 1997) as it doesn't really measure a population or demographic metric
# 36: remove this reference (Kleijn et al. 2004) as it pools an assessment of conservation across multiple species
d0.2 <- subset(d0.2, reference!=36) # remove this reference (Kruk et al. 1997) as it doesn't really measure a population or demographic metric
d0.2 <- subset(d0.2, reference!=2) # remove this reference (Kleijn et al. 2004) as it pools an assessment of conservation across multiple species
d0.2 <- droplevels(d0.2)
d0.3 <- d0.2
# recode certain factor variable classes to more sensible classes
recode.as.char <- c("region1","mgmt.notes","specific.metric","stan.metric","stan.calc","var.before","var.after","analysis3")
d0.3[,recode.as.char] <- apply(d0.3[,recode.as.char], 2, as.character)
d0.3$stan.effect.size <- as.numeric(as.character(d0.3$stan.effect.size))
# recode manamgement vars as characters to be able to use string substitution find and replace to create generic applied, restricted, removed levels for all management types
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, as.character)
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("applied site scale", "applied", x)
})
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("applied landscape scale", "applied", x)
})
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("restricted site scale", "restricted", x)
})
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("restricted landscape scale", "restricted", x)
})
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("removed site scale", "removed", x)
})
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("removed landscape scale", "removed", x)
})
# plug 'none' into all the blanks where management intervention not used
for (i in 1:length(mgmtvars)) {
d0.3[d0.3[,mgmtvars[i]]=="",mgmtvars[i]] <- "none"
}
# recode sample size as small, medium, large
d0.3$sample.size <- ifelse(d0.3$sample.size=="small (< 30)", "small", ifelse(d0.3$sample.size=="medium (30-100)", "medium", "large"))
# redefine dataset
d0.4 <- d0.3
# # change management vars back to factors for analysis
# # d0.4[,mgmtvars] <- apply(d0.4[,mgmtvars], 2, function(x) as.factor(x)) # this line won't convert back to factors for some reason!
# for (i in 1:length(mgmtvars)) {
# d0.4[,mgmtvars[i]] <- as.factor(d0.4[,mgmtvars[i]])
# }
# summary(d0.4)
#---------- Add some additional grouping variables -----------
# group fertilizer and pesticides into single variable
d0.4$fertpest <- ifelse(d0.4$fertilizer=="applied" | d0.4$pesticide=="applied", "applied", ifelse(d0.4$fertilizer=="restricted" | d0.4$pesticide=="restricted", "restricted", ifelse(d0.4$fertilizer=="removed" | d0.4$pesticide=="removed", "removed", "none")))
# group groundwater.drainage and surface.water into single variable meaning 'more water'
# restricted/removed groundwater drainage equates to more water (same as applying surface water)
# combinations of drainage/surface water in dataset
unique(d0.4[,c("groundwater.drainage","surface.water")])
d0.4$water <- ifelse(d0.4$groundwater.drainage=="restricted" | d0.4$groundwater.drainage=="removed" & d0.4$surface.water=="applied", "applied", ifelse(d0.4$groundwater.drainage=="restricted" | d0.4$groundwater.drainage=="removed", "applied", ifelse(d0.4$surface.water=="applied", "applied", ifelse(d0.4$groundwater.drainage=="applied","restricted","none"))))
# group nest protection (predation and agricultural) variables together
unique(d0.4[,c("nest.protect.ag","nest.protect.predation")])
d0.4$nest.protect <- ifelse(d0.4$nest.protect.predation=="applied" | d0.4$nest.protect.ag=="applied", "applied","none")
# # group nest protection (predation) with predator control (more sensible than grouping it with nest protection for agriculture given predation measures are more likely to go together)
# unique(d0.4[,c("nest.protect.ag","nest.protect.predation","predator.control")])
# d0.4$predation.reduction <- ifelse(d0.4$nest.protect.predation=="applied" | d0.4$predator.control=="applied", "applied", ifelse(d0.4$predator.control=="restricted", "restricted", ifelse(d0.4$predator.control=="removed", "removed","none")))
# group reserves and site designations
d0.4$reserve.desig <- ifelse(d0.4$reserve=="applied" | d0.4$designation=="applied", "applied", "none")
# create a AE-level variable (with basic and higher as levels) for analysis 1a
# if no info was provided on type of AES, then assume it was basic rather than higher-level or targetted
d0.4$AE.level <- ifelse(d0.4$higher.AE=="applied", "higher", ifelse(d0.4$AE=="none", "none", "basic"))
# calculate study duration variable
d0.4$study.length <- d0.4$end.year - d0.4$start.year + 1
# add some overall metrics which lump all productivity metrics, all abundance metrics, all occupancy metrics
d0.4$metric <- ifelse(grepl("productivity", d0.4$overall.metric), "productivity", ifelse(grepl("abundance", d0.4$overall.metric), "abundance", ifelse(grepl("recruitment", d0.4$overall.metric), "recruitment", ifelse(grepl("survival", d0.4$overall.metric), "survival", "occupancy"))))
#------------- Change the predator.control level for studies 5 & 10 ---------------
# these 2 studies both deal with the effects of a halt in predator control/game-keepering on grouse moors and the impacts on wader populations
# kind of a reverse of what the conservation measure would normally be (control applied), so reverse the level of predator control to 'applied' and change the direction of the effect (but obviously leave the significance)
# create 5 new records for these studies (2 and 3 each), then add them to the dataset WITH THEIR EFFECT SIZES REMOVED so there is no confusion
temp <- d0.4[d0.4$reference=="5" | d0.4$reference=="10",]
newtemp <- temp
# change predator control to applied
newtemp$predator.control <- "applied"
# change positives to negatives and vice versa
newtemp$effect.dir <- ifelse(newtemp$effect.dir=="positive","negative","positive")
newtemp$metric.before <- temp$metric.after
newtemp$metric.after <- temp$metric.before
newtemp$stan.metric.before <- temp$stan.metric.after
newtemp$stan.metric.after <- temp$stan.metric.before
newtemp$stan.effect.size <- (newtemp$stan.metric.after - newtemp$stan.metric.before)/abs(newtemp$stan.metric.before)
# remove the original records from the dataset and add these new ones in
d0.4 <- d0.4[-which(d0.4$reference %in% c("5","10")),]
d0.5 <- rbind(d0.4, newtemp)
#------------ Add the success/failure/outcome variables --------------
# success variable defined as 1 = significant positive effect, 0 = neutral or negative effect
d0.4$success <- ifelse(d0.4$sig=="Y" & d0.4$effect.dir=="positive", 1, 0) # success variable
# failure variable defined as 1 = significant negative effect, 0 = neutral or positive effect
d0.4$failure <- ifelse(d0.4$sig=="Y" & d0.4$effect.dir=="negative", 1, 0) # failure variable
# outcome variable: -1 = significant negative, 0 = no effect, 1 = significant positive
d0.4$outcome <- ifelse(d0.4$sig=="Y" & d0.4$effect.dir=="positive", 1, ifelse(d0.4$sig=="Y" & d0.4$effect.dir=="negative", -1, 0)) # success variable) # success variable
#------------- Recode removed/restricted as single level=reduced --------------
# final dataset for analysis
d1 <- d0.4
# new set of management variables
mgmtvars <- c("AE","AE.level","reserve.desig","mowing","grazing","fertpest","nest.protect","predator.control","water")
# convert removed or restricted levels of the management vars (all but AE.level) to a single level = reduced
# use find and replace with gsub
d1[,mgmtvars] <- apply(d1[,mgmtvars], 2, function(x) {
gsub("removed", "reduced", x)
})
d1[,mgmtvars] <- apply(d1[,mgmtvars], 2, function(x) {
gsub("restricted", "reduced", x)
})
#------------- Definitive dataset --------------
### Save definitive dataset
saveRDS(d1, file=paste(workspacewd, "/revision Dec 2016/meadow birds analysis dataset_full.rds", sep="/"))
write.table(d1, file=paste(datawd, "meadow birds analysis dataset_full.txt", sep="/"), row.names=FALSE, quote=FALSE, sep="\t")
write.csv(d1, file=paste(datawd, "meadow birds analysis dataset_full.csv", sep="/"), row.names=FALSE)
| /scripts/1_data preparation.R | no_license | samfranks/eu_meadow_birds | R | false | false | 13,085 | r | ############################################################################################
#
# Step 1: EU meadow birds meta-analysis - DATA PREPARATION FROM EXTRACTED DATABASE
#
############################################################################################
# Samantha Franks
# 11 March 2016
# 22 Dec 2016
#================================= SET LOGIC STATEMENTS ====================
#================================= LOAD PACKAGES =================================
list.of.packages <- c("MASS","reshape","raster","sp","rgeos","rgdal","dplyr")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages, library, character.only=TRUE)
#================================= SET DIRECTORY STRUCTURE ================================
# LOCAL
if(.Platform$OS =='windows') {
cluster <- FALSE
Mac <- FALSE
}
# HPCBTO
if(.Platform$OS=='unix' & Sys.getenv('USER')=='samf') {
cluster <- TRUE
Mac <- FALSE
Wales <- FALSE
}
# Mac
if(.Platform$OS=='unix' & Sys.getenv('USER')=='samantha') {
cluster <- FALSE
Mac <- TRUE
Wales <- FALSE
}
#### SET DIRECTORY PATHS
# # Wales HPC cluster
# if (cluster) parentwd <- c("/home/samantha.franks/")
if (cluster) parentwd <- c("/users1/samf") # BTO cluster
if (!cluster) {
if (!Mac) parentwd <- c("C:/Users/samf/Documents/Git/eu_meadow_birds")
if (Mac) parentwd <- c("/Volumes/SAM250GB/BTO PC Documents/Git/eu_meadow_birds")
}
scriptswd <- paste(parentwd, "scripts", sep="/")
datawd <- paste(parentwd, "data", sep="/")
outputwd <- paste(parentwd, "output/revision Dec 2016", sep="/")
workspacewd <- paste(parentwd, "workspaces", sep="/")
options(digits=6)
#================================= LOAD & CLEAN DATA ===============================
# d0 <- read.csv(paste(datawd, "meadow birds data extraction template_final_primary.csv", sep="/"), header=TRUE, skip=1)
d0 <- read.csv(paste(datawd, "Meadow birds data extraction template_primary and grey_standardized_FINAL.csv", sep="/"), header=TRUE)
#------- Meta-data reference for studies -------------
# create a meta-data reference file for studies with reference numbers, reference name, summary, country, region
metadat0 <- unique(d0[,c("reference.number","reference","literature.type","one.sentence.summary","score","country","region1","region2")])
#------- Clean dataset -----------
# columns required
cols.required <- c("reference.number","record.number","literature.type","score","country","region1","habitat","habitat1","habitat2","start.year","end.year","type.of.study","species","assemblage","agri.environment","basic.agri.environment", "targeted.agri.environment..wader.specific.or.higher.level.", "site.protection...nature.reserve","site.protection...designation", "mowing","grazing","fertilizer","herbicides...pesticides","nest.protection...agricultural.activities","nest.protection...predation..enclosures.or.exclosures.", "ground.water.management..drainage.inhibited.","wet.features...surface.water.management","predator.control","other.mgmt", "management.notes","overall.metric","specific.metric","reference.metric.before.management","metric.after.management","standardized.metric","standardisation.calculation","stand..reference.metric.before.management","stand..metric.after.management", "stand..effect.size","sample.size.before","sample.size.after", "uncertainty.measure.before","uncertainty.measure.after","uncertainty.measure.type","significant.effect..Y.N..U.","direction.of.effect..positive...negative...none...no.data.","unit.of.analysis","sample.size","analysis.type.1","analysis.type.2","analysis.type.details","values.obtained.from.plot.")
d0.1 <- subset(d0, select=cols.required)
# rename to easier variables
d0.2 <- d0.1
names(d0.2) <- c("reference","record","lit.type","score","country","region1","habitat","habitat1","habitat2","start.year","end.year","study.type","species","assemblage","AE","basic.AE","higher.AE","reserve","designation","mowing","grazing","fertilizer","pesticide","nest.protect.ag","nest.protect.predation","groundwater.drainage","surface.water","predator.control","other.mgmt","mgmt.notes","overall.metric","specific.metric","metric.before","metric.after","stan.metric","stan.calc","stan.metric.before","stan.metric.after","stan.effect.size","n.before","n.after","var.before","var.after","var.type","sig","effect.dir","analysis.unit","sample.size","analysis1","analysis2","analysis3","values.from.plot")
# management intervention variables
mgmtvars <- c("AE","basic.AE","higher.AE","reserve","designation","mowing","grazing","fertilizer","pesticide","nest.protect.ag","nest.protect.predation","groundwater.drainage","surface.water","predator.control","other.mgmt")
### exlude studies 2 and 36
# 2: remove this reference (Kruk et al. 1997) as it doesn't really measure a population or demographic metric
# 36: remove this reference (Kleijn et al. 2004) as it pools an assessment of conservation across multiple species
d0.2 <- subset(d0.2, reference!=36) # remove this reference (Kruk et al. 1997) as it doesn't really measure a population or demographic metric
d0.2 <- subset(d0.2, reference!=2) # remove this reference (Kleijn et al. 2004) as it pools an assessment of conservation across multiple species
d0.2 <- droplevels(d0.2)
d0.3 <- d0.2
# recode certain factor variable classes to more sensible classes
recode.as.char <- c("region1","mgmt.notes","specific.metric","stan.metric","stan.calc","var.before","var.after","analysis3")
d0.3[,recode.as.char] <- apply(d0.3[,recode.as.char], 2, as.character)
d0.3$stan.effect.size <- as.numeric(as.character(d0.3$stan.effect.size))
# recode manamgement vars as characters to be able to use string substitution find and replace to create generic applied, restricted, removed levels for all management types
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, as.character)
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("applied site scale", "applied", x)
})
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("applied landscape scale", "applied", x)
})
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("restricted site scale", "restricted", x)
})
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("restricted landscape scale", "restricted", x)
})
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("removed site scale", "removed", x)
})
d0.3[,mgmtvars] <- apply(d0.3[,mgmtvars], 2, function(x) {
gsub("removed landscape scale", "removed", x)
})
# plug 'none' into all the blanks where management intervention not used
for (i in 1:length(mgmtvars)) {
d0.3[d0.3[,mgmtvars[i]]=="",mgmtvars[i]] <- "none"
}
# recode sample size as small, medium, large
d0.3$sample.size <- ifelse(d0.3$sample.size=="small (< 30)", "small", ifelse(d0.3$sample.size=="medium (30-100)", "medium", "large"))
# redefine dataset
d0.4 <- d0.3
# # change management vars back to factors for analysis
# # d0.4[,mgmtvars] <- apply(d0.4[,mgmtvars], 2, function(x) as.factor(x)) # this line won't convert back to factors for some reason!
# for (i in 1:length(mgmtvars)) {
# d0.4[,mgmtvars[i]] <- as.factor(d0.4[,mgmtvars[i]])
# }
# summary(d0.4)
#---------- Add some additional grouping variables -----------
# group fertilizer and pesticides into single variable
d0.4$fertpest <- ifelse(d0.4$fertilizer=="applied" | d0.4$pesticide=="applied", "applied", ifelse(d0.4$fertilizer=="restricted" | d0.4$pesticide=="restricted", "restricted", ifelse(d0.4$fertilizer=="removed" | d0.4$pesticide=="removed", "removed", "none")))
# group groundwater.drainage and surface.water into single variable meaning 'more water'
# restricted/removed groundwater drainage equates to more water (same as applying surface water)
# combinations of drainage/surface water in dataset
unique(d0.4[,c("groundwater.drainage","surface.water")])
d0.4$water <- ifelse(d0.4$groundwater.drainage=="restricted" | d0.4$groundwater.drainage=="removed" & d0.4$surface.water=="applied", "applied", ifelse(d0.4$groundwater.drainage=="restricted" | d0.4$groundwater.drainage=="removed", "applied", ifelse(d0.4$surface.water=="applied", "applied", ifelse(d0.4$groundwater.drainage=="applied","restricted","none"))))
# group nest protection (predation and agricultural) variables together
unique(d0.4[,c("nest.protect.ag","nest.protect.predation")])
d0.4$nest.protect <- ifelse(d0.4$nest.protect.predation=="applied" | d0.4$nest.protect.ag=="applied", "applied","none")
# # group nest protection (predation) with predator control (more sensible than grouping it with nest protection for agriculture given predation measures are more likely to go together)
# unique(d0.4[,c("nest.protect.ag","nest.protect.predation","predator.control")])
# d0.4$predation.reduction <- ifelse(d0.4$nest.protect.predation=="applied" | d0.4$predator.control=="applied", "applied", ifelse(d0.4$predator.control=="restricted", "restricted", ifelse(d0.4$predator.control=="removed", "removed","none")))
# group reserves and site designations
d0.4$reserve.desig <- ifelse(d0.4$reserve=="applied" | d0.4$designation=="applied", "applied", "none")
# create a AE-level variable (with basic and higher as levels) for analysis 1a
# if no info was provided on type of AES, then assume it was basic rather than higher-level or targetted
d0.4$AE.level <- ifelse(d0.4$higher.AE=="applied", "higher", ifelse(d0.4$AE=="none", "none", "basic"))
# calculate study duration variable
d0.4$study.length <- d0.4$end.year - d0.4$start.year + 1
# add some overall metrics which lump all productivity metrics, all abundance metrics, all occupancy metrics
d0.4$metric <- ifelse(grepl("productivity", d0.4$overall.metric), "productivity", ifelse(grepl("abundance", d0.4$overall.metric), "abundance", ifelse(grepl("recruitment", d0.4$overall.metric), "recruitment", ifelse(grepl("survival", d0.4$overall.metric), "survival", "occupancy"))))
#------------- Change the predator.control level for studies 5 & 10 ---------------
# these 2 studies both deal with the effects of a halt in predator control/game-keepering on grouse moors and the impacts on wader populations
# kind of a reverse of what the conservation measure would normally be (control applied), so reverse the level of predator control to 'applied' and change the direction of the effect (but obviously leave the significance)
# create 5 new records for these studies (2 and 3 each), then add them to the dataset WITH THEIR EFFECT SIZES REMOVED so there is no confusion
temp <- d0.4[d0.4$reference=="5" | d0.4$reference=="10",]
newtemp <- temp
# change predator control to applied
newtemp$predator.control <- "applied"
# change positives to negatives and vice versa
newtemp$effect.dir <- ifelse(newtemp$effect.dir=="positive","negative","positive")
newtemp$metric.before <- temp$metric.after
newtemp$metric.after <- temp$metric.before
newtemp$stan.metric.before <- temp$stan.metric.after
newtemp$stan.metric.after <- temp$stan.metric.before
newtemp$stan.effect.size <- (newtemp$stan.metric.after - newtemp$stan.metric.before)/abs(newtemp$stan.metric.before)
# remove the original records from the dataset and add these new ones in
d0.4 <- d0.4[-which(d0.4$reference %in% c("5","10")),]
d0.5 <- rbind(d0.4, newtemp)
#------------ Add the success/failure/outcome variables --------------
# success variable defined as 1 = significant positive effect, 0 = neutral or negative effect
d0.4$success <- ifelse(d0.4$sig=="Y" & d0.4$effect.dir=="positive", 1, 0) # success variable
# failure variable defined as 1 = significant negative effect, 0 = neutral or positive effect
d0.4$failure <- ifelse(d0.4$sig=="Y" & d0.4$effect.dir=="negative", 1, 0) # failure variable
# outcome variable: -1 = significant negative, 0 = no effect, 1 = significant positive
d0.4$outcome <- ifelse(d0.4$sig=="Y" & d0.4$effect.dir=="positive", 1, ifelse(d0.4$sig=="Y" & d0.4$effect.dir=="negative", -1, 0)) # success variable) # success variable
#------------- Recode removed/restricted as single level=reduced --------------
# final dataset for analysis
d1 <- d0.4
# new set of management variables
mgmtvars <- c("AE","AE.level","reserve.desig","mowing","grazing","fertpest","nest.protect","predator.control","water")
# convert removed or restricted levels of the management vars (all but AE.level) to a single level = reduced
# use find and replace with gsub
d1[,mgmtvars] <- apply(d1[,mgmtvars], 2, function(x) {
gsub("removed", "reduced", x)
})
d1[,mgmtvars] <- apply(d1[,mgmtvars], 2, function(x) {
gsub("restricted", "reduced", x)
})
#------------- Definitive dataset --------------
### Save definitive dataset
saveRDS(d1, file=paste(workspacewd, "/revision Dec 2016/meadow birds analysis dataset_full.rds", sep="/"))
write.table(d1, file=paste(datawd, "meadow birds analysis dataset_full.txt", sep="/"), row.names=FALSE, quote=FALSE, sep="\t")
write.csv(d1, file=paste(datawd, "meadow birds analysis dataset_full.csv", sep="/"), row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{testvec_for_gamm4}
\alias{testvec_for_gamm4}
\title{Function to calculate the test vector for an object fitted with \code{gamm4}}
\usage{
testvec_for_gamm4(mod, name, sigma2 = NULL, nrlocs = 7)
}
\arguments{
\item{mod}{an object fitted with \code{gamm4}}
\item{name}{character; name of the covariate for which inference should be
calculated}
\item{sigma2}{variance to be used in the covariance definition. If \code{NULL},
the estimate \code{mod$gam$sig2} is used.}
\item{nrlocs}{number of locations at which p-values and intervals are to be computed
for non-linear terms. This directly corresponds to a sequence of \code{nrlocs} quantiles
of the given covariate values.}
}
\description{
Function to calculate the test vector for an object fitted with \code{gamm4}
}
\details{
Function provides the test vectors for every location of the given covariate
}
| /man/testvec_for_gamm4.Rd | no_license | davidruegamer/selfmade | R | false | true | 956 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{testvec_for_gamm4}
\alias{testvec_for_gamm4}
\title{Function to calculate the test vector for an object fitted with \code{gamm4}}
\usage{
testvec_for_gamm4(mod, name, sigma2 = NULL, nrlocs = 7)
}
\arguments{
\item{mod}{an object fitted with \code{gamm4}}
\item{name}{character; name of the covariate for which inference should be
calculated}
\item{sigma2}{variance to be used in the covariance definition. If \code{NULL},
the estimate \code{mod$gam$sig2} is used.}
\item{nrlocs}{number of locations at which p-values and intervals are to be computed
for non-linear terms. This directly corresponds to a sequence of \code{nrlocs} quantiles
of the given covariate values.}
}
\description{
Function to calculate the test vector for an object fitted with \code{gamm4}
}
\details{
Function provides the test vectors for every location of the given covariate
}
|
\name{as.prices}
\alias{as.prices}
\title{Coerce to prices class - time series of prices}
\usage{
as.prices(x, ...)
}
\description{
Coerce to prices class - time series of prices
}
| /man/as.prices.Rd | no_license | quantrocket/strategery | R | false | false | 186 | rd | \name{as.prices}
\alias{as.prices}
\title{Coerce to prices class - time series of prices}
\usage{
as.prices(x, ...)
}
\description{
Coerce to prices class - time series of prices
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_MAT.R
\name{calculate_mat}
\alias{calculate_mat}
\title{Function to calculate the maximum accurate time}
\usage{
calculate_mat(N = Inf, R = Inf, H_0 = 0.5, C = 1)
}
\arguments{
\item{N}{Population Size}
\item{R}{Number of genetic markers}
\item{H_0}{Frequency of heterozygosity at t = 0}
\item{C}{Mean number of crossovers per meiosis (e.g. size in Morgan of
the chromosome)}
}
\value{
The maximum accurate time
}
\description{
Function that calculates the maximum time after hybridization
after which the number of junctions can still be reliably used to estimate
the onset of hybridization. This is following equation 15 in
Janzen et al. 2018.
}
\examples{
calculate_mat(N = Inf, R = 1000, H_0 = 0.5, C = 1)
}
\keyword{analytic}
\keyword{error}
\keyword{time}
| /man/calculate_MAT.Rd | no_license | thijsjanzen/junctions | R | false | true | 853 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_MAT.R
\name{calculate_mat}
\alias{calculate_mat}
\title{Function to calculate the maximum accurate time}
\usage{
calculate_mat(N = Inf, R = Inf, H_0 = 0.5, C = 1)
}
\arguments{
\item{N}{Population Size}
\item{R}{Number of genetic markers}
\item{H_0}{Frequency of heterozygosity at t = 0}
\item{C}{Mean number of crossovers per meiosis (e.g. size in Morgan of
the chromosome)}
}
\value{
The maximum accurate time
}
\description{
Function that calculates the maximum time after hybridization
after which the number of junctions can still be reliably used to estimate
the onset of hybridization. This is following equation 15 in
Janzen et al. 2018.
}
\examples{
calculate_mat(N = Inf, R = 1000, H_0 = 0.5, C = 1)
}
\keyword{analytic}
\keyword{error}
\keyword{time}
|
library(asaur)
### Name: prostateSurvival
### Title: prostateSurvival
### Aliases: prostateSurvival
### Keywords: datasets
### ** Examples
data(prostateSurvival)
| /data/genthat_extracted_code/asaur/examples/prostateSurvival.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 169 | r | library(asaur)
### Name: prostateSurvival
### Title: prostateSurvival
### Aliases: prostateSurvival
### Keywords: datasets
### ** Examples
data(prostateSurvival)
|
\name{CropPhenology-package}
\alias{CropPhenology-package}
\alias{CropPhenology}
\docType{package}
\title{
\packageTitle{CropPhenology}
}
\description{
This package extracts crop phenological metrics from Moderate Resolution Imaging Spectroradiometer (MODIS) time series vegetation index data.
}
\details{
The 16 days composite of MODIS vegetation index data provides the overall growth condition of the crop in the growing season with regular capture time. Plotting the vegetation index vakue accross time, provides the time series curve which could reporesent the seaonal growth pattern of the crop. The CropPhenology package extracts metrics from the time seris curve based on the curve nature and shape. These metrics indicate different physiological stages and condition of the crop. i
}
\author{
\packageAuthor{CropPhenology}
Maintainer: \packageMaintainer{CropPhenology}
}
\references{
Araya etal. (2015)
}
\keyword{ Phenology
Time series
}
\seealso{
PhenoMetrics (), TWoPointsPlot ()
}
\examples{
PhenoMetrics(system.file("extdata/data1", package="CropPhenology"), FALSE)
TwoPointsPlot(251,247)
}
| /man/CropPhenology-package.Rd | no_license | SofanitAraya/OldCropP | R | false | false | 1,108 | rd | \name{CropPhenology-package}
\alias{CropPhenology-package}
\alias{CropPhenology}
\docType{package}
\title{
\packageTitle{CropPhenology}
}
\description{
This package extracts crop phenological metrics from Moderate Resolution Imaging Spectroradiometer (MODIS) time series vegetation index data.
}
\details{
The 16 days composite of MODIS vegetation index data provides the overall growth condition of the crop in the growing season with regular capture time. Plotting the vegetation index vakue accross time, provides the time series curve which could reporesent the seaonal growth pattern of the crop. The CropPhenology package extracts metrics from the time seris curve based on the curve nature and shape. These metrics indicate different physiological stages and condition of the crop. i
}
\author{
\packageAuthor{CropPhenology}
Maintainer: \packageMaintainer{CropPhenology}
}
\references{
Araya etal. (2015)
}
\keyword{ Phenology
Time series
}
\seealso{
PhenoMetrics (), TWoPointsPlot ()
}
\examples{
PhenoMetrics(system.file("extdata/data1", package="CropPhenology"), FALSE)
TwoPointsPlot(251,247)
}
|
output$downloadDAP <- downloadHandler(
filename = function() {
paste0("reqAP_DAP_export_du_",format(Sys.time(), "%A_%d_%B_%Y"),".xlsx")
},
content = function(file) {
write_xlsx(list(DAP = DAPActu()[input[["dataReqDAP_rows_all"]], ]), path = file)
}
) | /tabs/serv_DownlaodDAP.R | no_license | David-L-N/Req_Activite_Partielle | R | false | false | 273 | r | output$downloadDAP <- downloadHandler(
filename = function() {
paste0("reqAP_DAP_export_du_",format(Sys.time(), "%A_%d_%B_%Y"),".xlsx")
},
content = function(file) {
write_xlsx(list(DAP = DAPActu()[input[["dataReqDAP_rows_all"]], ]), path = file)
}
) |
\name{gggroup}
\alias{gggroup}
\title{Grob function: groups}
\author{Hadley Wickham <h.wickham@gmail.com>}
\description{
Create multiple of grobs based on id aesthetic.
}
\usage{gggroup(plot = .PLOT, aesthetics=list(), ..., data=NULL)}
\arguments{
\item{plot}{the plot object to modify}
\item{aesthetics}{named list of aesthetic mappings, see details for more information}
\item{...}{other options, see details for more information}
\item{data}{data source, if not specified the plot default will be used}
}
\details{This grob function provides a general means of creating
multiple grobs based on groups in the data. This is useful
if you want to fit a separate smoother for each group in the data.
You will need an id variable in your aesthetics list with determines
how the data is broken down.
Aesthetic mappings that this grob function understands:
\itemize{
\item \code{x}:x position (required)
\item \code{y}:y position (required)
\item \code{id}:
\item any other grobs used by the grob function you choose
}
These can be specified in the plot defaults (see \code{\link{ggplot}}) or
in the \code{aesthetics} argument. If you want to modify the position
of the points or any axis options, you will need to add a position scale to
the plot. These functions start with \code{ps}, eg.
\code{\link{pscontinuous}} or \code{\link{pscategorical}}
Other options:
\itemize{
\item \code{grob}:grob function to use for subgroups
\item anything else used by the grob function you choose
}}
\examples{p <- ggplot(mtcars, aesthetics=list(y=wt, x=qsec, id=cyl, colour=cyl))
gggroup(p)
gggroup(p, grob="density")
gggroup(p, grob="histogram", aes=list(fill=cyl))
gggroup(ggpoint(p), grob="smooth", se=FALSE, span=1)
gggroup(ggpoint(p), aes=list(id=cyl, size=cyl), grob="smooth", span=1)}
\keyword{hplot}
| /man/gggroup-9n.rd | no_license | rmasinidemelo/ggplot | R | false | false | 1,804 | rd | \name{gggroup}
\alias{gggroup}
\title{Grob function: groups}
\author{Hadley Wickham <h.wickham@gmail.com>}
\description{
Create multiple of grobs based on id aesthetic.
}
\usage{gggroup(plot = .PLOT, aesthetics=list(), ..., data=NULL)}
\arguments{
\item{plot}{the plot object to modify}
\item{aesthetics}{named list of aesthetic mappings, see details for more information}
\item{...}{other options, see details for more information}
\item{data}{data source, if not specified the plot default will be used}
}
\details{This grob function provides a general means of creating
multiple grobs based on groups in the data. This is useful
if you want to fit a separate smoother for each group in the data.
You will need an id variable in your aesthetics list with determines
how the data is broken down.
Aesthetic mappings that this grob function understands:
\itemize{
\item \code{x}:x position (required)
\item \code{y}:y position (required)
\item \code{id}:
\item any other grobs used by the grob function you choose
}
These can be specified in the plot defaults (see \code{\link{ggplot}}) or
in the \code{aesthetics} argument. If you want to modify the position
of the points or any axis options, you will need to add a position scale to
the plot. These functions start with \code{ps}, eg.
\code{\link{pscontinuous}} or \code{\link{pscategorical}}
Other options:
\itemize{
\item \code{grob}:grob function to use for subgroups
\item anything else used by the grob function you choose
}}
\examples{p <- ggplot(mtcars, aesthetics=list(y=wt, x=qsec, id=cyl, colour=cyl))
gggroup(p)
gggroup(p, grob="density")
gggroup(p, grob="histogram", aes=list(fill=cyl))
gggroup(ggpoint(p), grob="smooth", se=FALSE, span=1)
gggroup(ggpoint(p), aes=list(id=cyl, size=cyl), grob="smooth", span=1)}
\keyword{hplot}
|
library(tidyr)
library(magrittr)
# calculates the coefficients for price at given day through price nine days before
# to calculate the linear combination of the 1 day, 4 day, and 7 day deltas in
# the three day average with the given weights, which must add to 1
weightParser <- function(w1, w2, w3) {
if (abs(w1 + w2 + w3 - 1) > .00001) {
stop("weights must add to 1")
}
c(1, w2 + w3, w2 + w3, -1 * c(w1, rep(w2, 3), rep(w3, 3))) / 3
}
fixDate <- function(d, date, idx, dist) {
sub <- d[idx, 1 + which(abs(as.Date(colnames(d)[-1]) - as.Date(date)) < dist + 1)]
return(apply(sub, 1, function(v) mean(v, na.rm = T)))
}
# fix for missing data
imputePrices <- function(d) {
for (date in colnames(d)[-1]) {
dist <- 2
while ((NA %in% d[, date] || NaN %in% d[, date]) && dist < 10) {
idx <- which(is.na(d[, date]))
d[idx, date] <- fixDate(d, date, idx, dist)
dist <- dist + 1
}
}
d
}
onePartyWeightedDeltas <- function(dta, party, wgts) {
# look at this party
d <- dta[, c("state", "date", party)]
d <- spread(d, key = date, value = which(colnames(d) == party))
if (nrow(d) != 57) {
stop("Should be 57 rows -- missing data")
}
if (ncol(d) != 11) {
stop("Should be 10 dates -- missing data")
}
# ensure columns in correct order and impute missing prices
d <- d[, order(colnames(d), decreasing = T)] %>% imputePrices()
# apply weights to each row and sum and round
return(data.frame(state = d$state,
delta = apply(wgts * t(as.matrix(d[, -1])), 2, sum) %>% round(4)))
}
# returns the linear combination of the 1 day, 4 day, and 7 day
# changes in the 3 day trailing average of prices for each party in every state
# using the given weights. Weights must add to 1. Previously, I had a bunch of
# reshaping, but here I did a little algebra to speed things up
predictitWeightedDeltas <- function(date, weight1, weight4, weight7, conn) {
wgts <- weightParser(weight1, weight4, weight7)
q <- paste0("select * from getdays('",
as.Date(date) - 9,
"', '",
date,
"')")
dta <- dbGetQuery(conn, q)
merge(onePartyWeightedDeltas(dta, "dem", wgts),
onePartyWeightedDeltas(dta, "rep", wgts),
by = "state",
suffixes = c("_dem", "_rep"))
}
| /modelling/predictit/pd_utils.R | no_license | lwn517/forecast-2020 | R | false | false | 2,464 | r | library(tidyr)
library(magrittr)
# calculates the coefficients for price at given day through price nine days before
# to calculate the linear combination of the 1 day, 4 day, and 7 day deltas in
# the three day average with the given weights, which must add to 1
weightParser <- function(w1, w2, w3) {
if (abs(w1 + w2 + w3 - 1) > .00001) {
stop("weights must add to 1")
}
c(1, w2 + w3, w2 + w3, -1 * c(w1, rep(w2, 3), rep(w3, 3))) / 3
}
fixDate <- function(d, date, idx, dist) {
sub <- d[idx, 1 + which(abs(as.Date(colnames(d)[-1]) - as.Date(date)) < dist + 1)]
return(apply(sub, 1, function(v) mean(v, na.rm = T)))
}
# fix for missing data
imputePrices <- function(d) {
for (date in colnames(d)[-1]) {
dist <- 2
while ((NA %in% d[, date] || NaN %in% d[, date]) && dist < 10) {
idx <- which(is.na(d[, date]))
d[idx, date] <- fixDate(d, date, idx, dist)
dist <- dist + 1
}
}
d
}
onePartyWeightedDeltas <- function(dta, party, wgts) {
# look at this party
d <- dta[, c("state", "date", party)]
d <- spread(d, key = date, value = which(colnames(d) == party))
if (nrow(d) != 57) {
stop("Should be 57 rows -- missing data")
}
if (ncol(d) != 11) {
stop("Should be 10 dates -- missing data")
}
# ensure columns in correct order and impute missing prices
d <- d[, order(colnames(d), decreasing = T)] %>% imputePrices()
# apply weights to each row and sum and round
return(data.frame(state = d$state,
delta = apply(wgts * t(as.matrix(d[, -1])), 2, sum) %>% round(4)))
}
# returns the linear combination of the 1 day, 4 day, and 7 day
# changes in the 3 day trailing average of prices for each party in every state
# using the given weights. Weights must add to 1. Previously, I had a bunch of
# reshaping, but here I did a little algebra to speed things up
predictitWeightedDeltas <- function(date, weight1, weight4, weight7, conn) {
wgts <- weightParser(weight1, weight4, weight7)
q <- paste0("select * from getdays('",
as.Date(date) - 9,
"', '",
date,
"')")
dta <- dbGetQuery(conn, q)
merge(onePartyWeightedDeltas(dta, "dem", wgts),
onePartyWeightedDeltas(dta, "rep", wgts),
by = "state",
suffixes = c("_dem", "_rep"))
}
|
#svydata <- readRDS("alabama.rds")
#wts <- svydata[,200:279]
svydata <- readRDS("california.rds")
wts <- svydata[,195:274]
x <- svydata$agep
dim(x) <- c(length(x), 1)
pw <- 1L
print(system.time({
repmeans<-matrix(ncol=NCOL(x), nrow=ncol(wts))
for(i in 1:ncol(wts)){
repmeans[i,]<-t(colSums(wts[,i]*x*pw)/sum(pw*wts[,i]))
}
}))
print(system.time({
print(repmeans)
})) | /mini-defer.R | no_license | hannes/renjin-survey-experiments | R | false | false | 377 | r | #svydata <- readRDS("alabama.rds")
#wts <- svydata[,200:279]
svydata <- readRDS("california.rds")
wts <- svydata[,195:274]
x <- svydata$agep
dim(x) <- c(length(x), 1)
pw <- 1L
print(system.time({
repmeans<-matrix(ncol=NCOL(x), nrow=ncol(wts))
for(i in 1:ncol(wts)){
repmeans[i,]<-t(colSums(wts[,i]*x*pw)/sum(pw*wts[,i]))
}
}))
print(system.time({
print(repmeans)
})) |
# install.packages(c('pscl', 'psych', 'readxl', 'magrittr', 'plyr', 'dplyr',
# 'tidyr', 'BayesFactor', 'ggplot2', 'broom', 'knitr'))
library(pscl)
library(psych)
library(readxl)
library(magrittr)
library(plyr)
library(dplyr)
library(tidyr)
library(BayesFactor)
library(ggplot2)
library(broom)
library(knitr)
source("0-cleaning.R")
source("1-analysis.R") # Computationally expensive!
knit("2-results.Rmd")
source("3-plots.R")
source("4-tables.R")
| /master_script.R | no_license | Joe-Hilgard/VVG-product-placement | R | false | false | 468 | r | # install.packages(c('pscl', 'psych', 'readxl', 'magrittr', 'plyr', 'dplyr',
# 'tidyr', 'BayesFactor', 'ggplot2', 'broom', 'knitr'))
library(pscl)
library(psych)
library(readxl)
library(magrittr)
library(plyr)
library(dplyr)
library(tidyr)
library(BayesFactor)
library(ggplot2)
library(broom)
library(knitr)
source("0-cleaning.R")
source("1-analysis.R") # Computationally expensive!
knit("2-results.Rmd")
source("3-plots.R")
source("4-tables.R")
|
loo.train.v2 = function(d.train,part.window=126,ph,vdrop){
###
### testing the training evaluation function
###
source("training_detector.R")
source("training_test_detector.R")
source("evaluate.R")
individs = unique(d.train[,1]) # vector of id's for individuals in the training set
nInd.train = length(unique(d.train[,1])) #number of individuals in training set
nEps=15 #number of epsilon quantiles tested
nCovs=dim(d.train)[2]-3 # numver of features used ofr predictions
loo.eval=rep(list(),3*nInd.train)# there are 3 criteria we can use to tune epsilon for anomaly detection
loo.fittest=rep(list(),3*nInd.train)# there are 3 criteria we can use to tune epsilon for anomaly detection
nLoo=nInd.train-1 #number in leave one out training
decide=matrix(NA,nr=nCovs,nc=3)
decide.indx=list()
for(m in 1:3){
for(j in 1:nInd.train){
df=d.train[d.train[,1]!=individs[j],]
train.cov=array(NA,c(nCovs,nEps,3))
for(h in 1:nCovs){
for(k in 1:nEps){# loop over epsilon values
fit.train = training(d=df[,c(1:3,h+3)],pw=part.window,eps=k/100,vd=vdrop[-j])# fit model
eval.temp=evaluate(alarm=fit.train$alarm,possible.hits=ph[-j],nInd=nLoo,vitdropday = vdrop[-j])
train.cov[h,k,1]=eval.temp$out.prec
train.cov[h,k,2]=eval.temp$out.recall
train.cov[h,k,3]=eval.temp$out.F1# calculate eval = recall
}
}
compare=apply(train.cov,c(1,3),max,na.rm=TRUE)
for(h in 1:nCovs){
for(i in 1:3){
decide[h,i]=min(which(train.cov[h,,i]==compare[h,i]))/100
}
}
}#end j
for(m in 1:3){
for(j in 1:nInd.train){
df=d.train[d.train[,1]!=individs[j],]
fit.test=training_test(d=d.train[j],pw=part.window,eps=decide[,m],vd=vdrop)# fit model
eval.test=evaluate(alarm=fit.test$alarm,possible.hits=ph[j],nInd=1,vitdropday = vdrop[j])
# fit.test = anomalyDetect(n.vit=1,id=individs[j],d=d.ind,eps=k/100,covs.indx = 4:(3+nCovs))
loo.indx=j+(m-1)*nInd.train
loo.eval[[loo.indx]]=eval.test
loo.fittest[[loo.indx]]=fit.test
}
}
return(list(loo.eval = loo.eval,decide=decide,train.cov=train.cov,compare = compare,loo.fittest=loo.fittest))
} | /training_function_v2.R | no_license | alisonketz/180411_parturition | R | false | false | 2,473 | r | loo.train.v2 = function(d.train,part.window=126,ph,vdrop){
###
### testing the training evaluation function
###
source("training_detector.R")
source("training_test_detector.R")
source("evaluate.R")
individs = unique(d.train[,1]) # vector of id's for individuals in the training set
nInd.train = length(unique(d.train[,1])) #number of individuals in training set
nEps=15 #number of epsilon quantiles tested
nCovs=dim(d.train)[2]-3 # numver of features used ofr predictions
loo.eval=rep(list(),3*nInd.train)# there are 3 criteria we can use to tune epsilon for anomaly detection
loo.fittest=rep(list(),3*nInd.train)# there are 3 criteria we can use to tune epsilon for anomaly detection
nLoo=nInd.train-1 #number in leave one out training
decide=matrix(NA,nr=nCovs,nc=3)
decide.indx=list()
for(m in 1:3){
for(j in 1:nInd.train){
df=d.train[d.train[,1]!=individs[j],]
train.cov=array(NA,c(nCovs,nEps,3))
for(h in 1:nCovs){
for(k in 1:nEps){# loop over epsilon values
fit.train = training(d=df[,c(1:3,h+3)],pw=part.window,eps=k/100,vd=vdrop[-j])# fit model
eval.temp=evaluate(alarm=fit.train$alarm,possible.hits=ph[-j],nInd=nLoo,vitdropday = vdrop[-j])
train.cov[h,k,1]=eval.temp$out.prec
train.cov[h,k,2]=eval.temp$out.recall
train.cov[h,k,3]=eval.temp$out.F1# calculate eval = recall
}
}
compare=apply(train.cov,c(1,3),max,na.rm=TRUE)
for(h in 1:nCovs){
for(i in 1:3){
decide[h,i]=min(which(train.cov[h,,i]==compare[h,i]))/100
}
}
}#end j
for(m in 1:3){
for(j in 1:nInd.train){
df=d.train[d.train[,1]!=individs[j],]
fit.test=training_test(d=d.train[j],pw=part.window,eps=decide[,m],vd=vdrop)# fit model
eval.test=evaluate(alarm=fit.test$alarm,possible.hits=ph[j],nInd=1,vitdropday = vdrop[j])
# fit.test = anomalyDetect(n.vit=1,id=individs[j],d=d.ind,eps=k/100,covs.indx = 4:(3+nCovs))
loo.indx=j+(m-1)*nInd.train
loo.eval[[loo.indx]]=eval.test
loo.fittest[[loo.indx]]=fit.test
}
}
return(list(loo.eval = loo.eval,decide=decide,train.cov=train.cov,compare = compare,loo.fittest=loo.fittest))
} |
#############################################################################
#
# This file is a part of the R package "metaheuristicOpt".
#
# Author: Iip
# Co-author: -
# Supervisors: Lala Septem Riza, Eddy Prasetyo Nugroho
#
#
# This package is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 2 of the License, or (at your option) any later version.
#
# This package is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
#############################################################################
#' A main funtion to compute the optimal solution using a selected algorithm.
#'
#' This function makes accessible all algorithm that are implemented
#' in this package. All of the algorithm use this function as interface to find
#' the optimal solution, so users do not need to call other functions.
#' In order to obtain good results, users need to adjust some parameters such as the
#' objective function, optimum type, number variable or dimension, number populations,
#' the maximal number of iterations, lower bound, upper bound, or other algorithm-dependent parameters
#' which are collected in the control parameter.
#'
#' @title metaOpt The main function to execute algorithms for getting optimal solutions
#'
#' @param FUN an objective function or cost function,
#'
#' @param optimType a string value that represents the type of optimization.
#' There are two options for this arguments: \code{"MIN"} and \code{"MAX"}.
#' The default value is \code{"MIN"}, referring the minimization problem.
#' Otherwise, you can use \code{"MAX"} for maximization problem.
#'
#' @param algorithm a vector or single string value that represent the algorithm used to
#' do optimization. There are currently eleven implemented algorithm:
#' \itemize{
#' \item \code{"PSO"}: Particle Swarm Optimization. See \code{\link{PSO}};
#' \item \code{"ALO"}: Ant Lion Optimizer. See \code{\link{ALO}};
#' \item \code{"GWO"}: Grey Wolf Optimizer. See \code{\link{GWO}}
#' \item \code{"DA"} : Dragonfly Algorithm. See \code{\link{DA}}
#' \item \code{"FFA"}: Firefly Algorithm. See \code{\link{FFA}}
#' \item \code{"GA"} : Genetic Algorithm. See \code{\link{GA}}
#' \item \code{"GOA"}: Grasshopper Optimisation Algorithm. See \code{\link{GOA}}
#' \item \code{"HS"}: Harmony Search Algorithm. See \code{\link{HS}}
#' \item \code{"MFO"}: Moth Flame Optimizer. See \code{\link{MFO}}
#' \item \code{"SCA"}: Sine Cosine Algorithm. See \code{\link{SCA}}
#' \item \code{"WOA"}: Whale Optimization Algorithm. See \code{\link{WOA}}
#' }
#'
#' @param numVar a positive integer to determine the number variables.
#'
#' @param rangeVar a matrix (\eqn{2 \times n}) containing the range of variables,
#' where \eqn{n} is the number of variables, and first and second rows
#' are the lower bound (minimum) and upper bound (maximum) values, respectively.
#' If all variable have equal upper bound, you can define \code{rangeVar} as
#' matrix (\eqn{2 \times 1}).
#'
#' @param control a list containing all arguments, depending on the algorithm to use. The following list are
#' parameters required for each algorithm.
#' \itemize{
#' \item \code{PSO}:
#'
#' \code{list(numPopulation, maxIter, Vmax, ci, cg, w)}
#'
#' \item \code{ALO}:
#'
#' \code{list(numPopulation, maxIter)}
#'
#' \item \code{GWO}:
#'
#' \code{list(numPopulation, maxIter)}
#'
#' \item \code{DA}:
#'
#' \code{list(numPopulation, maxIter)}
#'
#' \item \code{FFA}:
#'
#' \code{list(numPopulation, maxIter, B0, gamma, alpha)}
#'
#' \item \code{GA}:
#'
#' \code{list(numPopulation, maxIter, Pm, Pc)}
#'
#' \item \code{GOA}:
#'
#' \code{list(numPopulation, maxIter)}
#'
#' \item \code{HS}:
#'
#' \code{list(numPopulation, maxIter, PAR, HMCR, bandwith)}
#'
#' \item \code{MFO}:
#'
#' \code{list(numPopulation, maxIter)}
#'
#' \item \code{SCA}:
#'
#' \code{list(numPopulation, maxIter)}
#'
#' \item \code{WOA}:
#'
#' \code{list(numPopulation, maxIter)}
#'
#' \bold{Description of the \code{control} Parameters}
#' \itemize{
#' \item \code{numPopulation}: a positive integer to determine the number population.
#' The default value is 40.
#'
#' \item \code{maxIter}: a positive integer to determine the maximum number of iteration.
#' The default value is 500.
#'
#' \item \code{Vmax}: a positive integer to determine the maximum velocity of particle.
#' The default value is 2.
#'
#' \item \code{ci}: a positive integer to determine the individual cognitive.
#' The default value is 1.49445.
#'
#' \item \code{cg}: a positive integer to determine the group cognitive.
#' The default value is 1.49445.
#'
#' \item \code{w}: a positive integer to determine the inertia weight.
#' The default value is 0.729.
#'
#' \item \code{B0}: a positive integer to determine the attractiveness firefly at r=0.
#' The default value is 1.
#'
#' \item \code{gamma}: a positive integer to determine light absorption coefficient.
#' The default value is 1.
#'
#' \item \code{alpha}: a positive integer to determine randomization parameter.
#' The default value is 0.2.
#'
#' \item \code{Pm}: a positive integer to determine mutation probability.
#' The default value is 0.1.
#'
#' \item \code{Pc}: a positive integer to determine crossover probability.
#' The default value is 0.8.
#'
#' \item \code{PAR}: a positive integer to determine Pinch Adjusting Rate.
#' The default value is 0.3.
#'
#' \item \code{HMCR}: a positive integer to determine Harmony Memory Considering Rate.
#' The default value is 0.95.
#'
#' \item \code{bandwith}: a positive integer to determine distance bandwith.
#' The default value is 0.05.
#' }
#' }
#'
#' @param seed a number to determine the seed for RNG.
#'
#' @examples
#' ##################################
#' ## Optimizing the sphere function
#'
#' ## Define sphere function as an objective function
#' sphere <- function(X){
#' return(sum(X^2))
#' }
#'
#' ## Define control variable
#' control <- list(numPopulation=40, maxIter=100, Vmax=2, ci=1.49445, cg=1.49445, w=0.729)
#'
#' numVar <- 5
#' rangeVar <- matrix(c(-10,10), nrow=2)
#'
#' ## Define control variable
#' best.variable <- metaOpt(sphere, optimType="MIN", algorithm="PSO", numVar,
#' rangeVar, control)
#'
#' @return \code{List} that contain list of variable, optimum value and execution time.
#'
#' @export
metaOpt <- function(FUN, optimType="MIN", algorithm="PSO", numVar, rangeVar, control=list(), seed=NULL){
## get optimType
optimType <- toupper(optimType)
## get algorithm
algorithm <- toupper(algorithm)
## initialize result
result <- matrix(ncol=numVar, nrow=length(algorithm))
## initialize time elapsed
timeElapsed <- matrix(ncol=3, nrow=length(algorithm))
## checking consistency between variable numVar and rangeVar
if(numVar != ncol(rangeVar) & ncol(rangeVar) != 1){
stop("Inconsistent between number variable and number range variable")
}
for(i in 1:length(algorithm)){
## PSO Algorithm
if(algorithm[i] == "PSO"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500,
Vmax=2, ci=1.49445, cg=1.49445, w=0.729))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
Vmax <- control$Vmax
ci <- control$ci
cg <- control$cg
w <- control$w
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- PSO(FUN, optimType, numVar, numPopulation, maxIter, rangeVar, Vmax, ci, cg, w)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Ant Lion Optimizer Algorithm
else if(algorithm[i] == "ALO"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- ALO(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Grey Wolf Optimizer Algorithm
else if(algorithm[i] == "GWO"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- GWO(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Dragonfly Algorithm
else if(algorithm[i] == "DA"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- DA(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Firefly Algorithm
else if(algorithm[i] == "FFA"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500, B0=1, gamma=1, alpha=0.2))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
B0 <- control$B0
gamma <- control$gamma
alpha <- control$alpha
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- FFA(FUN, optimType, numVar, numPopulation, maxIter, rangeVar, B0, gamma, alpha)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Genetic Algorithm
else if(algorithm[i] == "GA"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500, Pm=0.1, Pc=0.8))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
Pm <- control$Pm
Pc <- control$Pc
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- GA(FUN, optimType, numVar, numPopulation, maxIter, rangeVar, Pm, Pc)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Grasshopper Optimisation Algorithm
else if(algorithm[i] == "GOA"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- GOA(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Harmony Search Algorithm
else if(algorithm[i] == "HS"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500, PAR=0.3, HMCR=0.95, bandwith=0.05))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- HS(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Moth Flame Optimizer
else if(algorithm[i] == "MFO"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- MFO(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Sine Cosine Algorithm
else if(algorithm[i] == "SCA"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- SCA(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Whale Optimization Algorithm
else if(algorithm[i] == "WOA"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- WOA(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Clonal Selection Algorithm
else if(algorithm[i] == "CLONALG"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- CLONALG(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Artificial Bee Colony Algorithm
else if(algorithm[i] == "ABC"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- ABC(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Bat Algorithm
else if(algorithm[i] == "BA"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- BA(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Cuckoo Search
else if(algorithm[i] == "CS"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- CS(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Cat Swarm Optimization
else if(algorithm[i] == "CSO"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- CSO(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Differential Evolution
else if(algorithm[i] == "DE"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- DE(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Gravitational Based Search Algorithm
else if(algorithm[i] == "GBS"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- GBS(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Krill-Heard Algorithm
else if(algorithm[i] == "KH"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- KH(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Shuffled Frog Leaping Algorithm
else if(algorithm[i] == "SFL"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- SFL(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Black Hole-based Algorithm
else if(algorithm[i] == "BHO"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- BHO(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}else{
stop("unknown Algorithm argument value")
}
}
# generating optimum value foreach algorithm
optimumValue <- c()
for (i in 1:nrow(result)) {
optimumValue[i] <- FUN(result[i,])
}
optimumValue <- as.matrix(optimumValue)
# set name for each row
rownames(result) <- algorithm
rownames(optimumValue) <- algorithm
rownames(timeElapsed) <- algorithm
#set name for column
colName <- c()
for (i in 1:numVar) {
colName[i] <- paste("var",i,sep="")
}
colnames(result) <- colName
colnames(optimumValue) <- c("optimum_value")
colnames(timeElapsed) <- c("user", "system", "elapsed")
# build list
allResult <- list(result=result, optimumValue=optimumValue, timeElapsed=timeElapsed)
return(allResult)
}
## checking missing parameters
# @param control parameter values of each algorithm
# @param defaults default parameter values of each algorithm
setDefaultParametersIfMissing <- function(control, defaults) {
for(i in names(defaults)) {
if(is.null(control[[i]])) control[[i]] <- defaults[[i]]
}
control
}
| /R/metaheuristic.mainFunction.R | no_license | BimaAdi/MetaOpt2 | R | false | false | 20,263 | r | #############################################################################
#
# This file is a part of the R package "metaheuristicOpt".
#
# Author: Iip
# Co-author: -
# Supervisors: Lala Septem Riza, Eddy Prasetyo Nugroho
#
#
# This package is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 2 of the License, or (at your option) any later version.
#
# This package is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
#############################################################################
#' A main funtion to compute the optimal solution using a selected algorithm.
#'
#' This function makes accessible all algorithm that are implemented
#' in this package. All of the algorithm use this function as interface to find
#' the optimal solution, so users do not need to call other functions.
#' In order to obtain good results, users need to adjust some parameters such as the
#' objective function, optimum type, number variable or dimension, number populations,
#' the maximal number of iterations, lower bound, upper bound, or other algorithm-dependent parameters
#' which are collected in the control parameter.
#'
#' @title metaOpt The main function to execute algorithms for getting optimal solutions
#'
#' @param FUN an objective function or cost function,
#'
#' @param optimType a string value that represents the type of optimization.
#' There are two options for this arguments: \code{"MIN"} and \code{"MAX"}.
#' The default value is \code{"MIN"}, referring the minimization problem.
#' Otherwise, you can use \code{"MAX"} for maximization problem.
#'
#' @param algorithm a vector or single string value that represent the algorithm used to
#' do optimization. There are currently eleven implemented algorithm:
#' \itemize{
#' \item \code{"PSO"}: Particle Swarm Optimization. See \code{\link{PSO}};
#' \item \code{"ALO"}: Ant Lion Optimizer. See \code{\link{ALO}};
#' \item \code{"GWO"}: Grey Wolf Optimizer. See \code{\link{GWO}}
#' \item \code{"DA"} : Dragonfly Algorithm. See \code{\link{DA}}
#' \item \code{"FFA"}: Firefly Algorithm. See \code{\link{FFA}}
#' \item \code{"GA"} : Genetic Algorithm. See \code{\link{GA}}
#' \item \code{"GOA"}: Grasshopper Optimisation Algorithm. See \code{\link{GOA}}
#' \item \code{"HS"}: Harmony Search Algorithm. See \code{\link{HS}}
#' \item \code{"MFO"}: Moth Flame Optimizer. See \code{\link{MFO}}
#' \item \code{"SCA"}: Sine Cosine Algorithm. See \code{\link{SCA}}
#' \item \code{"WOA"}: Whale Optimization Algorithm. See \code{\link{WOA}}
#' }
#'
#' @param numVar a positive integer to determine the number variables.
#'
#' @param rangeVar a matrix (\eqn{2 \times n}) containing the range of variables,
#' where \eqn{n} is the number of variables, and first and second rows
#' are the lower bound (minimum) and upper bound (maximum) values, respectively.
#' If all variable have equal upper bound, you can define \code{rangeVar} as
#' matrix (\eqn{2 \times 1}).
#'
#' @param control a list containing all arguments, depending on the algorithm to use. The following list are
#' parameters required for each algorithm.
#' \itemize{
#' \item \code{PSO}:
#'
#' \code{list(numPopulation, maxIter, Vmax, ci, cg, w)}
#'
#' \item \code{ALO}:
#'
#' \code{list(numPopulation, maxIter)}
#'
#' \item \code{GWO}:
#'
#' \code{list(numPopulation, maxIter)}
#'
#' \item \code{DA}:
#'
#' \code{list(numPopulation, maxIter)}
#'
#' \item \code{FFA}:
#'
#' \code{list(numPopulation, maxIter, B0, gamma, alpha)}
#'
#' \item \code{GA}:
#'
#' \code{list(numPopulation, maxIter, Pm, Pc)}
#'
#' \item \code{GOA}:
#'
#' \code{list(numPopulation, maxIter)}
#'
#' \item \code{HS}:
#'
#' \code{list(numPopulation, maxIter, PAR, HMCR, bandwith)}
#'
#' \item \code{MFO}:
#'
#' \code{list(numPopulation, maxIter)}
#'
#' \item \code{SCA}:
#'
#' \code{list(numPopulation, maxIter)}
#'
#' \item \code{WOA}:
#'
#' \code{list(numPopulation, maxIter)}
#'
#' \bold{Description of the \code{control} Parameters}
#' \itemize{
#' \item \code{numPopulation}: a positive integer to determine the number population.
#' The default value is 40.
#'
#' \item \code{maxIter}: a positive integer to determine the maximum number of iteration.
#' The default value is 500.
#'
#' \item \code{Vmax}: a positive integer to determine the maximum velocity of particle.
#' The default value is 2.
#'
#' \item \code{ci}: a positive integer to determine the individual cognitive.
#' The default value is 1.49445.
#'
#' \item \code{cg}: a positive integer to determine the group cognitive.
#' The default value is 1.49445.
#'
#' \item \code{w}: a positive integer to determine the inertia weight.
#' The default value is 0.729.
#'
#' \item \code{B0}: a positive integer to determine the attractiveness firefly at r=0.
#' The default value is 1.
#'
#' \item \code{gamma}: a positive integer to determine light absorption coefficient.
#' The default value is 1.
#'
#' \item \code{alpha}: a positive integer to determine randomization parameter.
#' The default value is 0.2.
#'
#' \item \code{Pm}: a positive integer to determine mutation probability.
#' The default value is 0.1.
#'
#' \item \code{Pc}: a positive integer to determine crossover probability.
#' The default value is 0.8.
#'
#' \item \code{PAR}: a positive integer to determine Pinch Adjusting Rate.
#' The default value is 0.3.
#'
#' \item \code{HMCR}: a positive integer to determine Harmony Memory Considering Rate.
#' The default value is 0.95.
#'
#' \item \code{bandwith}: a positive integer to determine distance bandwith.
#' The default value is 0.05.
#' }
#' }
#'
#' @param seed a number to determine the seed for RNG.
#'
#' @examples
#' ##################################
#' ## Optimizing the sphere function
#'
#' ## Define sphere function as an objective function
#' sphere <- function(X){
#' return(sum(X^2))
#' }
#'
#' ## Define control variable
#' control <- list(numPopulation=40, maxIter=100, Vmax=2, ci=1.49445, cg=1.49445, w=0.729)
#'
#' numVar <- 5
#' rangeVar <- matrix(c(-10,10), nrow=2)
#'
#' ## Define control variable
#' best.variable <- metaOpt(sphere, optimType="MIN", algorithm="PSO", numVar,
#' rangeVar, control)
#'
#' @return \code{List} that contain list of variable, optimum value and execution time.
#'
#' @export
metaOpt <- function(FUN, optimType="MIN", algorithm="PSO", numVar, rangeVar, control=list(), seed=NULL){
## get optimType
optimType <- toupper(optimType)
## get algorithm
algorithm <- toupper(algorithm)
## initialize result
result <- matrix(ncol=numVar, nrow=length(algorithm))
## initialize time elapsed
timeElapsed <- matrix(ncol=3, nrow=length(algorithm))
## checking consistency between variable numVar and rangeVar
if(numVar != ncol(rangeVar) & ncol(rangeVar) != 1){
stop("Inconsistent between number variable and number range variable")
}
for(i in 1:length(algorithm)){
## PSO Algorithm
if(algorithm[i] == "PSO"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500,
Vmax=2, ci=1.49445, cg=1.49445, w=0.729))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
Vmax <- control$Vmax
ci <- control$ci
cg <- control$cg
w <- control$w
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- PSO(FUN, optimType, numVar, numPopulation, maxIter, rangeVar, Vmax, ci, cg, w)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Ant Lion Optimizer Algorithm
else if(algorithm[i] == "ALO"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- ALO(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Grey Wolf Optimizer Algorithm
else if(algorithm[i] == "GWO"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- GWO(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Dragonfly Algorithm
else if(algorithm[i] == "DA"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- DA(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Firefly Algorithm
else if(algorithm[i] == "FFA"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500, B0=1, gamma=1, alpha=0.2))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
B0 <- control$B0
gamma <- control$gamma
alpha <- control$alpha
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- FFA(FUN, optimType, numVar, numPopulation, maxIter, rangeVar, B0, gamma, alpha)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Genetic Algorithm
else if(algorithm[i] == "GA"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500, Pm=0.1, Pc=0.8))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
Pm <- control$Pm
Pc <- control$Pc
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- GA(FUN, optimType, numVar, numPopulation, maxIter, rangeVar, Pm, Pc)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Grasshopper Optimisation Algorithm
else if(algorithm[i] == "GOA"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- GOA(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Harmony Search Algorithm
else if(algorithm[i] == "HS"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500, PAR=0.3, HMCR=0.95, bandwith=0.05))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- HS(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Moth Flame Optimizer
else if(algorithm[i] == "MFO"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- MFO(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Sine Cosine Algorithm
else if(algorithm[i] == "SCA"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- SCA(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Whale Optimization Algorithm
else if(algorithm[i] == "WOA"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- WOA(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Clonal Selection Algorithm
else if(algorithm[i] == "CLONALG"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- CLONALG(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Artificial Bee Colony Algorithm
else if(algorithm[i] == "ABC"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- ABC(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Bat Algorithm
else if(algorithm[i] == "BA"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- BA(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Cuckoo Search
else if(algorithm[i] == "CS"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- CS(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Cat Swarm Optimization
else if(algorithm[i] == "CSO"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- CSO(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Differential Evolution
else if(algorithm[i] == "DE"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- DE(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Gravitational Based Search Algorithm
else if(algorithm[i] == "GBS"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- GBS(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Krill-Heard Algorithm
else if(algorithm[i] == "KH"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- KH(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Shuffled Frog Leaping Algorithm
else if(algorithm[i] == "SFL"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- SFL(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}
# Black Hole-based Algorithm
else if(algorithm[i] == "BHO"){
## checking missing parameters
control <- setDefaultParametersIfMissing(control, list(numPopulation=40, maxIter=500))
## get all parameter
numPopulation <- control$numPopulation
maxIter <- control$maxIter
# generate result while calculating time elapsed
set.seed(seed)
temp<-system.time(
result[i,] <- BHO(FUN, optimType, numVar, numPopulation, maxIter, rangeVar)
)
temp <- c(temp[1], temp[2], temp[3])
timeElapsed[i,]=temp;
}else{
stop("unknown Algorithm argument value")
}
}
# generating optimum value foreach algorithm
optimumValue <- c()
for (i in 1:nrow(result)) {
optimumValue[i] <- FUN(result[i,])
}
optimumValue <- as.matrix(optimumValue)
# set name for each row
rownames(result) <- algorithm
rownames(optimumValue) <- algorithm
rownames(timeElapsed) <- algorithm
#set name for column
colName <- c()
for (i in 1:numVar) {
colName[i] <- paste("var",i,sep="")
}
colnames(result) <- colName
colnames(optimumValue) <- c("optimum_value")
colnames(timeElapsed) <- c("user", "system", "elapsed")
# build list
allResult <- list(result=result, optimumValue=optimumValue, timeElapsed=timeElapsed)
return(allResult)
}
## checking missing parameters
# @param control parameter values of each algorithm
# @param defaults default parameter values of each algorithm
setDefaultParametersIfMissing <- function(control, defaults) {
for(i in names(defaults)) {
if(is.null(control[[i]])) control[[i]] <- defaults[[i]]
}
control
}
|
testlist <- list(A = structure(c(1.51474621700552e+82, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613101074-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 343 | r | testlist <- list(A = structure(c(1.51474621700552e+82, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
library("ShortRead")
#softTrim
#adapted from Jeremy Leipzig http://jermdemo.blogspot.co.nz/2010/03/soft-trimming-in-r-using-shortread-and.html
#and http://manuals.bioinformatics.ucr.edu/home/ht-seq#TOC-Quality-Reports-of-FASTQ-Files-
#trim first position lower than minQuality and all subsequent positions
#omit sequences that after trimming are shorter than minLength or longer than maxLength
#left trim to firstBase, (1 implies no left trim)
#input: ShortReadQ reads
# integer minQuality
# integer firstBase
# integer minLength
# integer maxLength
#output: ShortReadQ trimmed reads
softTrim<-function(reads,minQuality,firstBase=1,minLength=5,maxLength=900){
#qualMat<-as(FastqQuality(quality(quality(reads))),'matrix')
qualMat<-as(SFastqQuality(quality(quality(reads))),'matrix')
qualList<-split(qualMat,row(qualMat))
ends<-as.integer(lapply(qualList,function(x){which(x < minQuality)[1]-1}))
#length=end-start+1, so set start to no more than length+1 to avoid negative-length
starts<-as.integer(lapply(ends,function(x){min(x+1,firstBase)}))
#use whatever QualityScore subclass is sent
newQ<-ShortReadQ(sread=subseq(sread(reads),start=starts,end=ends),
quality=new(Class=class(quality(reads)),quality=subseq(quality(quality(reads)),start=starts,end=ends)),
id=id(reads))
#apply minLength using srFilter
minlengthFilter <- srFilter(function(x) {width(x)>=minLength},name="minimum length cutoff")
trimmedReads = newQ[minlengthFilter(newQ)]
maxlengthFilter <- srFilter(function(x) {width(x)<=maxLength},name="maximum length cutoff")
trimmedReads = trimmedReads[maxlengthFilter(trimmedReads)]
return(trimmedReads)
}
#readnumtrim
#call softTrim and plot number of reads passing filter for different quality/length parameters
#randomly sample "nsamples" reads in fastq file and perform "nrep" replicates of filtering+counting
#create a plot if "do_plot" and write it to "pdfout"_max and "pdfout"_min files
#use quantiles of length and quality distribution for axis ticks if "quant"
#if "maxth" calculate for both maximum length threshold and minimum length threshold, otherwise only do minimum length
#input: fastq file format
# integer nsamples
# integer nrep
# boolean do_plot
# character pdfout
# boolean quant
#output: matrix of number of reads for each quantile of the distribution of quality scores and read lengths
datafile<-file.choose()
readnumtrim(datafile)
readnumtrim<-function(fastqfile,nsamples=100,nrep=100,do_plot=TRUE,pdfout="",quant=FALSE,maxth=FALSE){
tnr = as.numeric(system(paste("cat",fastqfile,"|wc -l"),intern=TRUE))/4
cat("total number of reads:",tnr,"\n")
# use max 1e6 samples to estimate distributions of read length and quality scores
if (tnr<1e6){
reads <- readFastq(fastqfile, qualityType="Auto")
}else{
f <- FastqSampler(fastqfile,1e6)
reads <- yield(f)
close(f)
}
#qual = FastqQuality(quality(quality(reads))) # get quality scores
qual = SFastqQuality(quality(quality(reads))) # get quality scores
readM = as(qual,"matrix")
max_qual = max(readM,na.rm=TRUE)
max_length = max(width(reads))
if (quant){
quantile_seq = seq(0,1,length.out=10)
length.ticks = round(quantile(width(reads),quantile_seq)) # get read lengths
qual.ticks = round(quantile(as.numeric(readM),quantile_seq,na.rm=TRUE))
} else {
length.ticks = round(seq(0,max_length,length.out=10))
qual.ticks = round(seq(0,max_qual,length.out=10))
}
rm(reads)
rm(qual)
rm(readM)
# get subsamples to estimate number of reads for different pairs of quality and length threshold
f <- FastqSampler(fastqfile,nsamples)
numreadM = array(0, dim=c(10,10,nrep))
numreadm = array(0, dim=c(10,10,nrep))
for (n in 1:nrep){
reads <- yield(f)
if (maxth){
#### QUALITY VS MAXIMUM LENGTH
for (lp in 1:length(length.ticks)){
for (qp in 1:length(qual.ticks)){
tr = softTrim(reads=reads,
minQuality=as.numeric(qual.ticks[qp]),
firstBase=1,
minLength=1,
maxLength=as.numeric(length.ticks[lp]))
numreadM[lp,qp,n] = length(tr)
}
}
}
#### QUALITY VS MINIMUM LENGTH
#following does not work because need to vectorise softTrim() see http://stackoverflow.com/questions/5554305/simple-question-regarding-the-use-of-outer-and-user-defined-functions
# call_softTrim <- function(x,y) {
# tr=softTrim(reads=reads,minQuality=as.numeric(qual.ticks[x]),firstBase=1,minLength=as.numeric(length.ticks[y]),maxLength=max(width(reads)))
# return(length(tr))
# }
# numread = outer(1:length(length.ticks), 1:length(qual.ticks),call_softTrim)
for (lp in 1:length(length.ticks)){
for (qp in 1:length(qual.ticks)){
tr = softTrim(reads=reads,
minQuality=as.numeric(qual.ticks[qp]),
firstBase=1,
minLength=as.numeric(length.ticks[lp]),
maxLength=max_length)
numreadm[lp,qp,n] = length(tr)
}
}
}
close(f)
# average over replicates
anumreadM = apply(numreadM,c(1,2),mean)
anumreadm = apply(numreadm,c(1,2),mean)
if (do_plot){# plot with colours
cpalette = colorRampPalette(c("white","blue"))
if (maxth){
# maximum length threshold
cpalette = colorRampPalette(c("green","red"))
if (nchar(pdfout)>0){
pdf(paste(pdfout,"_max.pdf",sep=""))
}else{
x11()
}
filled.contour2(seq(0,1,length.out=10),seq(0,1,length.out=10),t(anumreadM),
axes=FALSE,xlab="minimum quality",ylab="maximum length",color.palette=cpalette)
title(paste("maximum length threshold vs base quality\n",fastqfile,"\n","total reads",tnr),cex.main=0.7)
contour(seq(0,1,length.out=10),seq(0,1,length.out=10),t(anumreadM),axes=FALSE,add=T,levels=seq(0,nsamples,length.out=10),
labels=paste(as.character(round(seq(0,tnr,length.out=10)*100/tnr)),"% - ",as.character(round(seq(0,tnr,length.out=10))),sep=""))
#axis(1,at=seq(0,1,length.out=10),label=qual.ticks)
axis(1,at=seq(0,1,length.out=10),label=paste(qual.ticks,round(exp(qual.ticks/(-10)),digits=3),sep="\n"),padj=.5)
axis(2,at=seq(0,1,length.out=10),label=length.ticks)
if (nchar(pdfout)>0){
dev.off()
}
}
# minimum length threshold
if (nchar(pdfout)>0){
pdf(paste(pdfout,"_min.pdf",sep=""))
}else{
x11()
}
filled.contour2(seq(0,1,length.out=10),seq(0,1,length.out=10),t(anumreadm),
axes=FALSE,xlab="minimum quality",ylab="minimum length",color.palette=cpalette)
title(paste("minimum length threshold vs base quality\n",fastqfile,"\n","total reads",tnr),cex.main=0.7)
contour(seq(0,1,length.out=10),seq(0,1,length.out=10),t(anumreadm),axes=FALSE,add=T,levels=seq(0,nsamples,length.out=10),
labels=paste(as.character(round(seq(0,tnr,length.out=10)*100/tnr)),"% - ",as.character(round(seq(0,tnr,length.out=10))),sep=""))
#axis(1,at=seq(0,1,length.out=10),label=qual.ticks)
axis(1,at=seq(0,1,length.out=10),label=paste(qual.ticks,round(exp(qual.ticks/(-10)),digits=3),sep="\n"),padj=.5)
axis(2,at=seq(0,1,length.out=10),label=length.ticks)
if (nchar(pdfout)>0){
dev.off()
}
}
#writeFastq(trimmedReads,file="trimmed.fastq")
return(list(anumreadM,anumreadm))
}
# allow color controur plot with levels overplotted
filled.contour2<-function (x = seq(0, 1, length.out = nrow(z)),
y = seq(0, 1, length.out = ncol(z)), z, xlim = range(x, finite = TRUE),
ylim = range(y, finite = TRUE), zlim = range(z, finite = TRUE),
levels = pretty(zlim, nlevels), nlevels = 20, color.palette = cm.colors,
col = color.palette(length(levels) - 1), plot.title, plot.axes,
key.title, key.axes, asp = NA, xaxs = "i", yaxs = "i", las = 1,
axes = TRUE, frame.plot = axes,mar, ...)
{
# modification by Ian Taylor of the filled.contour function
# to remove the key and facilitate overplotting with contour()
if (missing(z)) {
if (!missing(x)) {
if (is.list(x)) {
z <- x$z
y <- x$y
x <- x$x
}
else {
z <- x
x <- seq.int(0, 1, length.out = nrow(z))
}
}
else stop("no 'z' matrix specified")
}
else if (is.list(x)) {
y <- x$y
x <- x$x
}
if (any(diff(x) <= 0) || any(diff(y) <= 0))
stop("increasing 'x' and 'y' values expected")
mar.orig <- (par.orig <- par(c("mar", "las", "mfrow")))$mar
on.exit(par(par.orig))
w <- (3 + mar.orig[2]) * par("csi") * 2.54
par(las = las)
mar <- mar.orig
plot.new()
par(mar=mar)
print(paste(xlim,ylim))
plot.window(xlim = xlim, ylim = ylim, log = "", xaxs = xaxs, yaxs = yaxs, asp = asp)
if (!is.matrix(z) || nrow(z) <= 1 || ncol(z) <= 1)
stop("no proper 'z' matrix specified")
if (!is.double(z))
storage.mode(z) <- "double"
if (getRversion()<3){
.Internal(filledcontour(as.double(x), as.double(y), z, as.double(levels), col = col))
}else{
.filled.contour(as.double(x), as.double(y), z, as.double(levels), col = col) # fix for R3
}
if (missing(plot.axes)) {
if (axes) {
title(main = "", xlab = "", ylab = "")
Axis(x, side = 1)
Axis(y, side = 2)
}
}
else plot.axes
if (frame.plot)
box()
if (missing(plot.title))
title(...)
else plot.title
invisible()
}
| /trimming.R | no_license | tirohia/timecourseR | R | false | false | 9,694 | r |
library("ShortRead")
#softTrim
#adapted from Jeremy Leipzig http://jermdemo.blogspot.co.nz/2010/03/soft-trimming-in-r-using-shortread-and.html
#and http://manuals.bioinformatics.ucr.edu/home/ht-seq#TOC-Quality-Reports-of-FASTQ-Files-
#trim first position lower than minQuality and all subsequent positions
#omit sequences that after trimming are shorter than minLength or longer than maxLength
#left trim to firstBase, (1 implies no left trim)
#input: ShortReadQ reads
# integer minQuality
# integer firstBase
# integer minLength
# integer maxLength
#output: ShortReadQ trimmed reads
softTrim<-function(reads,minQuality,firstBase=1,minLength=5,maxLength=900){
#qualMat<-as(FastqQuality(quality(quality(reads))),'matrix')
qualMat<-as(SFastqQuality(quality(quality(reads))),'matrix')
qualList<-split(qualMat,row(qualMat))
ends<-as.integer(lapply(qualList,function(x){which(x < minQuality)[1]-1}))
#length=end-start+1, so set start to no more than length+1 to avoid negative-length
starts<-as.integer(lapply(ends,function(x){min(x+1,firstBase)}))
#use whatever QualityScore subclass is sent
newQ<-ShortReadQ(sread=subseq(sread(reads),start=starts,end=ends),
quality=new(Class=class(quality(reads)),quality=subseq(quality(quality(reads)),start=starts,end=ends)),
id=id(reads))
#apply minLength using srFilter
minlengthFilter <- srFilter(function(x) {width(x)>=minLength},name="minimum length cutoff")
trimmedReads = newQ[minlengthFilter(newQ)]
maxlengthFilter <- srFilter(function(x) {width(x)<=maxLength},name="maximum length cutoff")
trimmedReads = trimmedReads[maxlengthFilter(trimmedReads)]
return(trimmedReads)
}
#readnumtrim
#call softTrim and plot number of reads passing filter for different quality/length parameters
#randomly sample "nsamples" reads in fastq file and perform "nrep" replicates of filtering+counting
#create a plot if "do_plot" and write it to "pdfout"_max and "pdfout"_min files
#use quantiles of length and quality distribution for axis ticks if "quant"
#if "maxth" calculate for both maximum length threshold and minimum length threshold, otherwise only do minimum length
#input: fastq file format
# integer nsamples
# integer nrep
# boolean do_plot
# character pdfout
# boolean quant
#output: matrix of number of reads for each quantile of the distribution of quality scores and read lengths
datafile<-file.choose()
readnumtrim(datafile)
readnumtrim<-function(fastqfile,nsamples=100,nrep=100,do_plot=TRUE,pdfout="",quant=FALSE,maxth=FALSE){
tnr = as.numeric(system(paste("cat",fastqfile,"|wc -l"),intern=TRUE))/4
cat("total number of reads:",tnr,"\n")
# use max 1e6 samples to estimate distributions of read length and quality scores
if (tnr<1e6){
reads <- readFastq(fastqfile, qualityType="Auto")
}else{
f <- FastqSampler(fastqfile,1e6)
reads <- yield(f)
close(f)
}
#qual = FastqQuality(quality(quality(reads))) # get quality scores
qual = SFastqQuality(quality(quality(reads))) # get quality scores
readM = as(qual,"matrix")
max_qual = max(readM,na.rm=TRUE)
max_length = max(width(reads))
if (quant){
quantile_seq = seq(0,1,length.out=10)
length.ticks = round(quantile(width(reads),quantile_seq)) # get read lengths
qual.ticks = round(quantile(as.numeric(readM),quantile_seq,na.rm=TRUE))
} else {
length.ticks = round(seq(0,max_length,length.out=10))
qual.ticks = round(seq(0,max_qual,length.out=10))
}
rm(reads)
rm(qual)
rm(readM)
# get subsamples to estimate number of reads for different pairs of quality and length threshold
f <- FastqSampler(fastqfile,nsamples)
numreadM = array(0, dim=c(10,10,nrep))
numreadm = array(0, dim=c(10,10,nrep))
for (n in 1:nrep){
reads <- yield(f)
if (maxth){
#### QUALITY VS MAXIMUM LENGTH
for (lp in 1:length(length.ticks)){
for (qp in 1:length(qual.ticks)){
tr = softTrim(reads=reads,
minQuality=as.numeric(qual.ticks[qp]),
firstBase=1,
minLength=1,
maxLength=as.numeric(length.ticks[lp]))
numreadM[lp,qp,n] = length(tr)
}
}
}
#### QUALITY VS MINIMUM LENGTH
#following does not work because need to vectorise softTrim() see http://stackoverflow.com/questions/5554305/simple-question-regarding-the-use-of-outer-and-user-defined-functions
# call_softTrim <- function(x,y) {
# tr=softTrim(reads=reads,minQuality=as.numeric(qual.ticks[x]),firstBase=1,minLength=as.numeric(length.ticks[y]),maxLength=max(width(reads)))
# return(length(tr))
# }
# numread = outer(1:length(length.ticks), 1:length(qual.ticks),call_softTrim)
for (lp in 1:length(length.ticks)){
for (qp in 1:length(qual.ticks)){
tr = softTrim(reads=reads,
minQuality=as.numeric(qual.ticks[qp]),
firstBase=1,
minLength=as.numeric(length.ticks[lp]),
maxLength=max_length)
numreadm[lp,qp,n] = length(tr)
}
}
}
close(f)
# average over replicates
anumreadM = apply(numreadM,c(1,2),mean)
anumreadm = apply(numreadm,c(1,2),mean)
if (do_plot){# plot with colours
cpalette = colorRampPalette(c("white","blue"))
if (maxth){
# maximum length threshold
cpalette = colorRampPalette(c("green","red"))
if (nchar(pdfout)>0){
pdf(paste(pdfout,"_max.pdf",sep=""))
}else{
x11()
}
filled.contour2(seq(0,1,length.out=10),seq(0,1,length.out=10),t(anumreadM),
axes=FALSE,xlab="minimum quality",ylab="maximum length",color.palette=cpalette)
title(paste("maximum length threshold vs base quality\n",fastqfile,"\n","total reads",tnr),cex.main=0.7)
contour(seq(0,1,length.out=10),seq(0,1,length.out=10),t(anumreadM),axes=FALSE,add=T,levels=seq(0,nsamples,length.out=10),
labels=paste(as.character(round(seq(0,tnr,length.out=10)*100/tnr)),"% - ",as.character(round(seq(0,tnr,length.out=10))),sep=""))
#axis(1,at=seq(0,1,length.out=10),label=qual.ticks)
axis(1,at=seq(0,1,length.out=10),label=paste(qual.ticks,round(exp(qual.ticks/(-10)),digits=3),sep="\n"),padj=.5)
axis(2,at=seq(0,1,length.out=10),label=length.ticks)
if (nchar(pdfout)>0){
dev.off()
}
}
# minimum length threshold
if (nchar(pdfout)>0){
pdf(paste(pdfout,"_min.pdf",sep=""))
}else{
x11()
}
filled.contour2(seq(0,1,length.out=10),seq(0,1,length.out=10),t(anumreadm),
axes=FALSE,xlab="minimum quality",ylab="minimum length",color.palette=cpalette)
title(paste("minimum length threshold vs base quality\n",fastqfile,"\n","total reads",tnr),cex.main=0.7)
contour(seq(0,1,length.out=10),seq(0,1,length.out=10),t(anumreadm),axes=FALSE,add=T,levels=seq(0,nsamples,length.out=10),
labels=paste(as.character(round(seq(0,tnr,length.out=10)*100/tnr)),"% - ",as.character(round(seq(0,tnr,length.out=10))),sep=""))
#axis(1,at=seq(0,1,length.out=10),label=qual.ticks)
axis(1,at=seq(0,1,length.out=10),label=paste(qual.ticks,round(exp(qual.ticks/(-10)),digits=3),sep="\n"),padj=.5)
axis(2,at=seq(0,1,length.out=10),label=length.ticks)
if (nchar(pdfout)>0){
dev.off()
}
}
#writeFastq(trimmedReads,file="trimmed.fastq")
return(list(anumreadM,anumreadm))
}
# allow color controur plot with levels overplotted
filled.contour2<-function (x = seq(0, 1, length.out = nrow(z)),
y = seq(0, 1, length.out = ncol(z)), z, xlim = range(x, finite = TRUE),
ylim = range(y, finite = TRUE), zlim = range(z, finite = TRUE),
levels = pretty(zlim, nlevels), nlevels = 20, color.palette = cm.colors,
col = color.palette(length(levels) - 1), plot.title, plot.axes,
key.title, key.axes, asp = NA, xaxs = "i", yaxs = "i", las = 1,
axes = TRUE, frame.plot = axes,mar, ...)
{
# modification by Ian Taylor of the filled.contour function
# to remove the key and facilitate overplotting with contour()
if (missing(z)) {
if (!missing(x)) {
if (is.list(x)) {
z <- x$z
y <- x$y
x <- x$x
}
else {
z <- x
x <- seq.int(0, 1, length.out = nrow(z))
}
}
else stop("no 'z' matrix specified")
}
else if (is.list(x)) {
y <- x$y
x <- x$x
}
if (any(diff(x) <= 0) || any(diff(y) <= 0))
stop("increasing 'x' and 'y' values expected")
mar.orig <- (par.orig <- par(c("mar", "las", "mfrow")))$mar
on.exit(par(par.orig))
w <- (3 + mar.orig[2]) * par("csi") * 2.54
par(las = las)
mar <- mar.orig
plot.new()
par(mar=mar)
print(paste(xlim,ylim))
plot.window(xlim = xlim, ylim = ylim, log = "", xaxs = xaxs, yaxs = yaxs, asp = asp)
if (!is.matrix(z) || nrow(z) <= 1 || ncol(z) <= 1)
stop("no proper 'z' matrix specified")
if (!is.double(z))
storage.mode(z) <- "double"
if (getRversion()<3){
.Internal(filledcontour(as.double(x), as.double(y), z, as.double(levels), col = col))
}else{
.filled.contour(as.double(x), as.double(y), z, as.double(levels), col = col) # fix for R3
}
if (missing(plot.axes)) {
if (axes) {
title(main = "", xlab = "", ylab = "")
Axis(x, side = 1)
Axis(y, side = 2)
}
}
else plot.axes
if (frame.plot)
box()
if (missing(plot.title))
title(...)
else plot.title
invisible()
}
|
\name{unique.stlpp}
\alias{unique.stlpp}
\title{Extract unique points from a spatio-temporal point pattern on a linear network}
\description{
Extract unique points from a spatio-temporal point pattern on a linear network.
}
\usage{
\method{unique}{stlpp}(x,...)
}
\arguments{
\item{x}{a realisation of a spatio-temporal point processes on a linear networks. }
\item{...}{arguments for \code{\link{unique}}.}
}
\details{
This function calculates the inhomogeneous pair correlation function for a spatio-temporal point processes on a linear network.
}
\value{
A spatio-temporal point pattern on a linear network with no duplicated point.
}
\references{
Moradi, M.M. and Mateu, J. (2019). First and second-order characteristics of spatio-temporal point processes on linear networks. Journal of Computational and Graphical Statistics. In press.
}
\author{
Mehdi Moradi <m2.moradi@yahoo.com>
}
\seealso{
\code{\link{unique}}
}
\examples{
X <- rpoistlpp(0.1,0,5,L=easynet)
df <- as.data.frame(X)
df_dup <- df[sample(nrow(df), 20,replace = TRUE), ]
Y <- as.stlpp(df_dup,L=easynet)
npoints(Y)
npoints(unique(Y))
}
| /man/unique.stlpp.Rd | no_license | Moradii/stlnpp | R | false | false | 1,129 | rd | \name{unique.stlpp}
\alias{unique.stlpp}
\title{Extract unique points from a spatio-temporal point pattern on a linear network}
\description{
Extract unique points from a spatio-temporal point pattern on a linear network.
}
\usage{
\method{unique}{stlpp}(x,...)
}
\arguments{
\item{x}{a realisation of a spatio-temporal point processes on a linear networks. }
\item{...}{arguments for \code{\link{unique}}.}
}
\details{
This function calculates the inhomogeneous pair correlation function for a spatio-temporal point processes on a linear network.
}
\value{
A spatio-temporal point pattern on a linear network with no duplicated point.
}
\references{
Moradi, M.M. and Mateu, J. (2019). First and second-order characteristics of spatio-temporal point processes on linear networks. Journal of Computational and Graphical Statistics. In press.
}
\author{
Mehdi Moradi <m2.moradi@yahoo.com>
}
\seealso{
\code{\link{unique}}
}
\examples{
X <- rpoistlpp(0.1,0,5,L=easynet)
df <- as.data.frame(X)
df_dup <- df[sample(nrow(df), 20,replace = TRUE), ]
Y <- as.stlpp(df_dup,L=easynet)
npoints(Y)
npoints(unique(Y))
}
|
library(shiny)
#Define user input function
ui <- fluidPage(
#sidebar layout with input and output definitions
sidebarLayout(
#input: select variables to plot
sidebarPanel(
#select variables for y axis
selectInput(inputId = "y",
label = "Country:",
choices = c(Trade_SSA$Country),
multiple = TRUE,
selected = "India"),
#select variable for x axis
sliderInput(inputId = "x",
label="Select Time Period:",
min=1992, max= 2016,
value = c(1999, 2005),
step=1)
),
#output: show line chart
mainPanel(
plotOutput(outputId = "linechart")
)
)
)
#define server function
server <- function(input, output) {
# create the line chart object that plotOutput is expecting
output$linechart <- renderPlot({
ggplot(data=Trade_SSA, aes_string(x=input$x, y=input$y))
geom_line()
})
}
#Create rhe shiny app object
shinyApp(ui=ui, server=server) | /trade_app.R | permissive | Karagul/Shiny_Practice | R | false | false | 1,022 | r | library(shiny)
#Define user input function
ui <- fluidPage(
#sidebar layout with input and output definitions
sidebarLayout(
#input: select variables to plot
sidebarPanel(
#select variables for y axis
selectInput(inputId = "y",
label = "Country:",
choices = c(Trade_SSA$Country),
multiple = TRUE,
selected = "India"),
#select variable for x axis
sliderInput(inputId = "x",
label="Select Time Period:",
min=1992, max= 2016,
value = c(1999, 2005),
step=1)
),
#output: show line chart
mainPanel(
plotOutput(outputId = "linechart")
)
)
)
#define server function
server <- function(input, output) {
# create the line chart object that plotOutput is expecting
output$linechart <- renderPlot({
ggplot(data=Trade_SSA, aes_string(x=input$x, y=input$y))
geom_line()
})
}
#Create rhe shiny app object
shinyApp(ui=ui, server=server) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods-Cluster.R
\name{Cluster}
\alias{Cluster}
\title{Wrapper class for a particular cluster.
Maps a cluster type to the the resulting cluster data.}
\usage{
Cluster(method, param, centers, data)
}
\arguments{
\item{method}{clustering method used}
\item{param}{clusterng parameter used}
\item{centers}{cluster centroid}
\item{data}{cluster-assocated data}
}
\value{
a Cluster object
}
\description{
Wrapper class for a particular cluster.
Maps a cluster type to the the resulting cluster data.
}
| /man/Cluster.Rd | permissive | YosefLab/VISION | R | false | true | 579 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods-Cluster.R
\name{Cluster}
\alias{Cluster}
\title{Wrapper class for a particular cluster.
Maps a cluster type to the the resulting cluster data.}
\usage{
Cluster(method, param, centers, data)
}
\arguments{
\item{method}{clustering method used}
\item{param}{clusterng parameter used}
\item{centers}{cluster centroid}
\item{data}{cluster-assocated data}
}
\value{
a Cluster object
}
\description{
Wrapper class for a particular cluster.
Maps a cluster type to the the resulting cluster data.
}
|
#' @importFrom randomForest randomForest
one_rf_tree = function(bbs, vars, sp_id, iter, use_obs_model, x_richness,
last_train_year){
d = bbs %>%
select(site_id, year, species_id, abundance) %>%
filter(species_id == sp_id) %>%
distinct() %>%
right_join(filter(x_richness, iteration == iter),
by = c("site_id", "year")) %>%
mutate(present = factor(ifelse(is.na(abundance), 0, 1)))
my_formula = as.formula(paste("present ~",
paste(vars, collapse = "+")))
rf = randomForest(
my_formula,
filter(d, in_train),
ntree = 1
)
test = filter(d, !in_train, iteration == iter)
test$mean = predict(rf, test, type = "prob")[,2]
test$use_obs_model = use_obs_model
select(test, site_id, year, species_id, mean, richness, use_obs_model, iteration)
}
rf_predict_species = function(sp_id, bbs, settings, x_richness, use_obs_model){
vars = c(settings$vars, if (use_obs_model) {"observer_effect"})
iters = sort(unique(x_richness$iteration))
results = lapply(iters, one_rf_tree,
bbs = bbs, sp_id = sp_id, use_obs_model = use_obs_model,
vars = vars,
x_richness = x_richness,
last_train_year = settings$last_train_year) %>%
bind_rows()
path = paste0("rf_predictions/sp_", sp_id, "_", use_obs_model, ".csv.gz")
dir.create("rf_predictions", showWarnings = FALSE)
write.csv(results, file = gzfile(path), row.names = FALSE)
results %>%
group_by(site_id, year, species_id, richness, use_obs_model) %>%
summarize(mean = mean(mean))
}
rf_predict_richness = function(bbs, x_richness, settings, use_obs_model, mc.cores) {
out = parallel::mclapply(
unique(bbs$species_id),
function(sp_id){
rf_predict_species(sp_id, bbs = bbs, x_richness = x_richness,
settings = settings,
use_obs_model = use_obs_model)
},
mc.cores = mc.cores,
mc.preschedule = FALSE
) %>%
purrr::map(combine_sdm_iterations) %>%
bind_rows() %>%
group_by(site_id, year, richness, use_obs_model) %>%
summarize(mean = sum(mean), sd = sqrt(sum(sd^2))) %>%
ungroup() %>%
mutate(model = "rf_sdm")
out
}
combine_sdm_iterations = function(d){
d %>%
group_by(site_id, year, species_id, richness, use_obs_model) %>%
summarize(mean = mean(mean), sd = sqrt(mean(mean * (1 - mean))))
}
| /R/sdm-rf.R | no_license | karinorman/bbs-forecasting | R | false | false | 2,490 | r | #' @importFrom randomForest randomForest
one_rf_tree = function(bbs, vars, sp_id, iter, use_obs_model, x_richness,
last_train_year){
d = bbs %>%
select(site_id, year, species_id, abundance) %>%
filter(species_id == sp_id) %>%
distinct() %>%
right_join(filter(x_richness, iteration == iter),
by = c("site_id", "year")) %>%
mutate(present = factor(ifelse(is.na(abundance), 0, 1)))
my_formula = as.formula(paste("present ~",
paste(vars, collapse = "+")))
rf = randomForest(
my_formula,
filter(d, in_train),
ntree = 1
)
test = filter(d, !in_train, iteration == iter)
test$mean = predict(rf, test, type = "prob")[,2]
test$use_obs_model = use_obs_model
select(test, site_id, year, species_id, mean, richness, use_obs_model, iteration)
}
rf_predict_species = function(sp_id, bbs, settings, x_richness, use_obs_model){
vars = c(settings$vars, if (use_obs_model) {"observer_effect"})
iters = sort(unique(x_richness$iteration))
results = lapply(iters, one_rf_tree,
bbs = bbs, sp_id = sp_id, use_obs_model = use_obs_model,
vars = vars,
x_richness = x_richness,
last_train_year = settings$last_train_year) %>%
bind_rows()
path = paste0("rf_predictions/sp_", sp_id, "_", use_obs_model, ".csv.gz")
dir.create("rf_predictions", showWarnings = FALSE)
write.csv(results, file = gzfile(path), row.names = FALSE)
results %>%
group_by(site_id, year, species_id, richness, use_obs_model) %>%
summarize(mean = mean(mean))
}
rf_predict_richness = function(bbs, x_richness, settings, use_obs_model, mc.cores) {
out = parallel::mclapply(
unique(bbs$species_id),
function(sp_id){
rf_predict_species(sp_id, bbs = bbs, x_richness = x_richness,
settings = settings,
use_obs_model = use_obs_model)
},
mc.cores = mc.cores,
mc.preschedule = FALSE
) %>%
purrr::map(combine_sdm_iterations) %>%
bind_rows() %>%
group_by(site_id, year, richness, use_obs_model) %>%
summarize(mean = sum(mean), sd = sqrt(sum(sd^2))) %>%
ungroup() %>%
mutate(model = "rf_sdm")
out
}
combine_sdm_iterations = function(d){
d %>%
group_by(site_id, year, species_id, richness, use_obs_model) %>%
summarize(mean = mean(mean), sd = sqrt(mean(mean * (1 - mean))))
}
|
#' Use a progress bar with regular for loops
#'
#' These functions wrap the progress bar utilities of the *progress* package
#' to be able to use progress bar with regular `for`, `while` and `repeat` loops conveniently.
#' They forward all their
#' parameters to `progress::progress_bar$new()`. `pb_while()` and `pb_repeat()`
#' require the `total` argument.
#'
#' @param total for `pb_while()` and `pb_repeat()`, an estimation of the
#' number of iteration.
#' @param format The format of the progress bar.
#' @param width Width of the progress bar.
#' @param complete Completion character.
#' @param incomplete Incomplete character.
#' @param current Current character.
#' @param callback Callback function to call when the progress bar finishes.
#' The progress bar object itself is passed to it as the single parameter.
#' @param clear Whether to clear the progress bar on completion.
#' @param show_after Amount of time in seconds, after which the progress bar is
#' shown on the screen. For very short processes, it is probably not worth
#' showing it at all.
#' @param force Whether to force showing the progress bar, even if the given (or default) stream does not seem to support it.
#' @param tokens A list of unevaluated expressions, using `alist`, to be passed
#' passed to the `tick` method of the progress bar
#' @param message A message to display on top of the bar
#'
#' @export
#'
#' @examples
#' pb_for()
#' for (i in 1:10) {
#' # DO SOMETHING
#' Sys.sleep(0.5)
#' }
#'
#' pb_for(format = "Working hard: [:bar] :percent :elapsed",
#' callback = function(x) message("Were'd done!"))
#' for (i in 1:10) {
#' # DO SOMETHING
#' Sys.sleep(0.5)
#' }
pb_for <-
function(
# all args of progress::progress_bar$new() except `total` which needs to be
# infered from the 2nd argument of the `for` call, and `stream` which is
# deprecated
format = "[:bar] :percent",
width = options("width")[[1]] - 2,
complete = "=",
incomplete = "-",
current =">",
callback = invisible, # doc doesn't give default but this seems to work ok
clear = TRUE,
show_after = .2,
force = FALSE,
message = NULL,
tokens = alist()){
# create the function that will replace `for`
f <- function(it, seq, expr){
# to avoid notes at CMD check
PB <- IT <- SEQ <- EXPR <- TOKENS <- NULL
# forward all arguments to progress::progress_bar$new() and add
# a `total` argument compted from `seq` argument
pb <- progress::progress_bar$new(
format = format, width = width, complete = complete,
incomplete = incomplete, current = current,
callback = callback,
clear = clear, show_after = show_after, force = force,
total = length(seq))
if(!is.null(message)) pb$message(message)
# using on.exit allows us to self destruct `for` if relevant even if
# the call fails.
# It also allows us to send to the local environment the changed/created
# variables in their last state, even if the call fails (like standard for)
on.exit({
list2env(mget(ls(env),envir = env), envir = parent.frame())
rm(`for`,envir = parent.frame())
})
# we build a regular `for` loop call with an updated loop code including
# progress bar.
# it is executed in a dedicated environment
env <- new.env(parent = parent.frame())
eval(substitute(
env = list(IT = substitute(it), SEQ = substitute(seq),
EXPR = do.call(substitute, list(substitute(expr),list(message = pb$message))),
TOKENS = tokens, PB = pb
),
base::`for`(IT, SEQ,{
EXPR
PB$tick()
})), envir = env)
}
# override `for` in the parent frame
assign("for", value = f,envir = parent.frame())
invisible()
}
| /R/pb_for.R | no_license | moodymudskipper/once | R | false | false | 3,850 | r | #' Use a progress bar with regular for loops
#'
#' These functions wrap the progress bar utilities of the *progress* package
#' to be able to use progress bar with regular `for`, `while` and `repeat` loops conveniently.
#' They forward all their
#' parameters to `progress::progress_bar$new()`. `pb_while()` and `pb_repeat()`
#' require the `total` argument.
#'
#' @param total for `pb_while()` and `pb_repeat()`, an estimation of the
#' number of iteration.
#' @param format The format of the progress bar.
#' @param width Width of the progress bar.
#' @param complete Completion character.
#' @param incomplete Incomplete character.
#' @param current Current character.
#' @param callback Callback function to call when the progress bar finishes.
#' The progress bar object itself is passed to it as the single parameter.
#' @param clear Whether to clear the progress bar on completion.
#' @param show_after Amount of time in seconds, after which the progress bar is
#' shown on the screen. For very short processes, it is probably not worth
#' showing it at all.
#' @param force Whether to force showing the progress bar, even if the given (or default) stream does not seem to support it.
#' @param tokens A list of unevaluated expressions, using `alist`, to be passed
#' passed to the `tick` method of the progress bar
#' @param message A message to display on top of the bar
#'
#' @export
#'
#' @examples
#' pb_for()
#' for (i in 1:10) {
#' # DO SOMETHING
#' Sys.sleep(0.5)
#' }
#'
#' pb_for(format = "Working hard: [:bar] :percent :elapsed",
#' callback = function(x) message("Were'd done!"))
#' for (i in 1:10) {
#' # DO SOMETHING
#' Sys.sleep(0.5)
#' }
pb_for <-
function(
# all args of progress::progress_bar$new() except `total` which needs to be
# infered from the 2nd argument of the `for` call, and `stream` which is
# deprecated
format = "[:bar] :percent",
width = options("width")[[1]] - 2,
complete = "=",
incomplete = "-",
current =">",
callback = invisible, # doc doesn't give default but this seems to work ok
clear = TRUE,
show_after = .2,
force = FALSE,
message = NULL,
tokens = alist()){
# create the function that will replace `for`
f <- function(it, seq, expr){
# to avoid notes at CMD check
PB <- IT <- SEQ <- EXPR <- TOKENS <- NULL
# forward all arguments to progress::progress_bar$new() and add
# a `total` argument compted from `seq` argument
pb <- progress::progress_bar$new(
format = format, width = width, complete = complete,
incomplete = incomplete, current = current,
callback = callback,
clear = clear, show_after = show_after, force = force,
total = length(seq))
if(!is.null(message)) pb$message(message)
# using on.exit allows us to self destruct `for` if relevant even if
# the call fails.
# It also allows us to send to the local environment the changed/created
# variables in their last state, even if the call fails (like standard for)
on.exit({
list2env(mget(ls(env),envir = env), envir = parent.frame())
rm(`for`,envir = parent.frame())
})
# we build a regular `for` loop call with an updated loop code including
# progress bar.
# it is executed in a dedicated environment
env <- new.env(parent = parent.frame())
eval(substitute(
env = list(IT = substitute(it), SEQ = substitute(seq),
EXPR = do.call(substitute, list(substitute(expr),list(message = pb$message))),
TOKENS = tokens, PB = pb
),
base::`for`(IT, SEQ,{
EXPR
PB$tick()
})), envir = env)
}
# override `for` in the parent frame
assign("for", value = f,envir = parent.frame())
invisible()
}
|
#=========================================================
# IDS 462, Session 7
# Midterm Review Session
#=========================================================
# Copyright Zack Kertcher, PhD, 2018. All rights reserved.
# Do not distribute or use outside this class without explicit permission from the instructor.
#====================================
# Instructions
#=============
# An RData file has been prepared for you. The file consists of data you already know, the realestate data, and a new data frame, credit.
# Answer each one of the questions assigned to your group *on your own* -- a total of three questions. You have 1.25 hour.
# Discuss your solutions as a group. You have 0.5 hour, including a 10 min. break.
# Present solutions to class. Discuss issues/concerns. Each group gets 15 min.
# Tips:
# Make sure to properly allocate time! Start by exploring the new data frame and consider how to best answer these questions.
# Group I
#========
# realestate data
#=
# Is there a difference in the distribution of air conditioning by bedrooms as a factor? Use relevant statistics, plots and statistical tests. For your plots, use at least one ggplot (properly titled, annotated, and visually reasonable). Detail your findings.
# Common question: Which variables in the data in your view exhibit the strongest relationship with price? Provide evidence and explain your answer.
# credit data
#=
# *Throughly* examine the relationships of Income, Balance, Age, Gender, Ethnicity (all of these are potential IVS), with Rating (our DV). Which of these IVs are good predictors? Use statistics, plots, and tests, and provide a detailed answer.
# Group II
#=========
# realestate data
#=
# What is the relationship between price and lotsize. Use relevant statistics, plots and statistical tests. For your plots, use at least one ggplot (properly titled, annotated, and visually reasonable). Detail your findings.
# Common question: Which variables in the data in your view exhibit the strongest relationship with price? Provide evidence and explain your answer.
# credit data
#=
# *Throughly* examine the relationships of Limit, Cards, Education, Student, Married (all of these are potential IVS), with Rating (our DV). Which of these IVs are good predictors? Use statistics, plots, and tests, and provide a detailed answer.
load("Session 7 (review).RData")
View(realestate)
glimpse(realestate)
colSums(is.na(realestate))
summary(realestate$lotsize)
summary(realestate$price)
options(scipen=99)
#Question 1
#univariate analysis
summary(realestate$lotsize)
summary(realestate$price)
outlier_values <- boxplot.stats(realestate$lotsize)$out
outlier_values
outlier_values1<-boxplot.stats(realestate$price)$out1
outlier_values1
realestate <- realestate[-c(outlier_values,outlier_values1),]
ggplot(data=realestate) + aes(x=lotsize, y=price) +
geom_point(pch=16, color="coral") +
labs(title='Relationship between price and lotsize',
x="lotsize", y="price") + # x for xlab, y for ylab
geom_smooth(method="lm", color="black", lwd=2)
cor.test(realestate$lotsize, realestate$price)
#there is relationship between lotsize and price. Positive relationship. As the lotsize increases price increases
#Question 2
mod1<-lm(price ~ lotsize, data=realestate)
mod1
summary(mod1)
# we can also set confidence interval at 99%
confint(mod1, level=0.99)
predict(mod1 , data.frame(lotsize =(c(4000 ,10000 ,12000) )),
interval ="confidence", level=0.99)
boxplot(residuals(mod1))
plot(realestate$price~realestate$lotsize, pch=16, col="lightblue")
abline(mod1, col="red", lwd=3)
#there is a positive relationship between lotsize and price.
mod3<-lm(realestate$price~realestate$lotsize+realestate$bedrooms+realestate$bathrms+realestate$stories+realestate$driveway+realestate$recroom+realestate$fullbase+realestate$airco+realestate$gashw+realestate$garagepl+realestate$prefarea)
summary(mod3)
plot(realestate$price~realestate$lotsize+realestate$bedrooms+realestate$bathrms+realestate$stories+realestate$driveway+realestate$recroom+realestate$fullbase+realestate$airco+realestate$gashw+realestate$garagepl+realestate$prefarea)
#the variable that exhibit strongest relationship are lotsize,bathrooms,stroies,fullbaseyes,aircoyes,gashwyes,garagepl2
#question 3
colSums(is.na(credit))
glimpse(credit)
credit$Income <- as.numeric(credit$Income)
credit$Limit<-as.numeric(credit$Limit)
gsub("\\$","",credit)
summary(credit$Limit)
credit1<-credit
credit1<-na.omit(credit1)
View(credit1)
summary(credit1)
#check the distribution,multicollinearity
mod2<-lm(Rating~Limit+Cards+Education+Student+Married,data=credit)
mod2
Predictions<-predit.lm(mod2,credit1)
predict(mod2,credit1)
summary(mod2)
boxplot(residuals(mod2))
plot(credit1$Rating~credit1$Limit+credit1$Cards+credit1$Education+credit1$Student+credit1$Married, pch=16, col="lightblue")
anova(mod2)
#correlation test
#The independent variable that is significant is Limit to predit the rating.
| /Session 7 aruna.r | no_license | dgopal2/data_science | R | false | false | 5,116 | r | #=========================================================
# IDS 462, Session 7
# Midterm Review Session
#=========================================================
# Copyright Zack Kertcher, PhD, 2018. All rights reserved.
# Do not distribute or use outside this class without explicit permission from the instructor.
#====================================
# Instructions
#=============
# An RData file has been prepared for you. The file consists of data you already know, the realestate data, and a new data frame, credit.
# Answer each one of the questions assigned to your group *on your own* -- a total of three questions. You have 1.25 hour.
# Discuss your solutions as a group. You have 0.5 hour, including a 10 min. break.
# Present solutions to class. Discuss issues/concerns. Each group gets 15 min.
# Tips:
# Make sure to properly allocate time! Start by exploring the new data frame and consider how to best answer these questions.
# Group I
#========
# realestate data
#=
# Is there a difference in the distribution of air conditioning by bedrooms as a factor? Use relevant statistics, plots and statistical tests. For your plots, use at least one ggplot (properly titled, annotated, and visually reasonable). Detail your findings.
# Common question: Which variables in the data in your view exhibit the strongest relationship with price? Provide evidence and explain your answer.
# credit data
#=
# *Throughly* examine the relationships of Income, Balance, Age, Gender, Ethnicity (all of these are potential IVS), with Rating (our DV). Which of these IVs are good predictors? Use statistics, plots, and tests, and provide a detailed answer.
# Group II
#=========
# realestate data
#=
# What is the relationship between price and lotsize. Use relevant statistics, plots and statistical tests. For your plots, use at least one ggplot (properly titled, annotated, and visually reasonable). Detail your findings.
# Common question: Which variables in the data in your view exhibit the strongest relationship with price? Provide evidence and explain your answer.
# credit data
#=
# *Throughly* examine the relationships of Limit, Cards, Education, Student, Married (all of these are potential IVS), with Rating (our DV). Which of these IVs are good predictors? Use statistics, plots, and tests, and provide a detailed answer.
load("Session 7 (review).RData")
View(realestate)
glimpse(realestate)
colSums(is.na(realestate))
summary(realestate$lotsize)
summary(realestate$price)
options(scipen=99)
#Question 1
#univariate analysis
summary(realestate$lotsize)
summary(realestate$price)
outlier_values <- boxplot.stats(realestate$lotsize)$out
outlier_values
outlier_values1<-boxplot.stats(realestate$price)$out1
outlier_values1
realestate <- realestate[-c(outlier_values,outlier_values1),]
ggplot(data=realestate) + aes(x=lotsize, y=price) +
geom_point(pch=16, color="coral") +
labs(title='Relationship between price and lotsize',
x="lotsize", y="price") + # x for xlab, y for ylab
geom_smooth(method="lm", color="black", lwd=2)
cor.test(realestate$lotsize, realestate$price)
#there is relationship between lotsize and price. Positive relationship. As the lotsize increases price increases
#Question 2
mod1<-lm(price ~ lotsize, data=realestate)
mod1
summary(mod1)
# we can also set confidence interval at 99%
confint(mod1, level=0.99)
predict(mod1 , data.frame(lotsize =(c(4000 ,10000 ,12000) )),
interval ="confidence", level=0.99)
boxplot(residuals(mod1))
plot(realestate$price~realestate$lotsize, pch=16, col="lightblue")
abline(mod1, col="red", lwd=3)
#there is a positive relationship between lotsize and price.
mod3<-lm(realestate$price~realestate$lotsize+realestate$bedrooms+realestate$bathrms+realestate$stories+realestate$driveway+realestate$recroom+realestate$fullbase+realestate$airco+realestate$gashw+realestate$garagepl+realestate$prefarea)
summary(mod3)
plot(realestate$price~realestate$lotsize+realestate$bedrooms+realestate$bathrms+realestate$stories+realestate$driveway+realestate$recroom+realestate$fullbase+realestate$airco+realestate$gashw+realestate$garagepl+realestate$prefarea)
#the variable that exhibit strongest relationship are lotsize,bathrooms,stroies,fullbaseyes,aircoyes,gashwyes,garagepl2
#question 3
colSums(is.na(credit))
glimpse(credit)
credit$Income <- as.numeric(credit$Income)
credit$Limit<-as.numeric(credit$Limit)
gsub("\\$","",credit)
summary(credit$Limit)
credit1<-credit
credit1<-na.omit(credit1)
View(credit1)
summary(credit1)
#check the distribution,multicollinearity
mod2<-lm(Rating~Limit+Cards+Education+Student+Married,data=credit)
mod2
Predictions<-predit.lm(mod2,credit1)
predict(mod2,credit1)
summary(mod2)
boxplot(residuals(mod2))
plot(credit1$Rating~credit1$Limit+credit1$Cards+credit1$Education+credit1$Student+credit1$Married, pch=16, col="lightblue")
anova(mod2)
#correlation test
#The independent variable that is significant is Limit to predit the rating.
|
# r script for explanatory data anaylsis
# load the data
library(data.table)
library(tidyverse)
pth <- "D:/Online Courses/Coursera/Data Science Specialization/Explanatory Data Analysis/"
dta <- fread(paste0(pth, "household_power_consumption.txt"), data.table = FALSE, stringsAsFactors = FALSE,
na.strings = "?")
dta$Date <- as.Date(dta$Date, "%d/%m/%Y")
# filter the data for the dates in focus:
dta %>%
filter(Date %in% as.Date(c("1/2/2007", "2/2/2007"), "%d/%m/%Y")) ->
dta2
dta2$time_new <- as.POSIXct(paste0(dta2$Date, " ", dta2$Time), tz = "GMT")
#plot(x = dta2$time_new,y = dta2$Sub_metering_1, type = "l",
# xlab = "", ylab = "Energy sub metering")
#lines(x = dta2$time_new,y = dta2$Sub_metering_2, type = "l", col = "red")
#lines(x = dta2$time_new,y = dta2$Sub_metering_3, type = "l", col = "blue")
#legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
# legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
png(paste0(pth, "plot3.png"), height = 480, width = 480)
plot(x = dta2$time_new,y = dta2$Sub_metering_1, type = "l",
xlab = "", ylab = "Energy sub metering")
lines(x = dta2$time_new,y = dta2$Sub_metering_2, type = "l", col = "red")
lines(x = dta2$time_new,y = dta2$Sub_metering_3, type = "l", col = "blue")
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
| /plot3.R | no_license | Elkfield/ExData_Plotting1 | R | false | false | 1,473 | r |
# r script for explanatory data anaylsis
# load the data
library(data.table)
library(tidyverse)
pth <- "D:/Online Courses/Coursera/Data Science Specialization/Explanatory Data Analysis/"
dta <- fread(paste0(pth, "household_power_consumption.txt"), data.table = FALSE, stringsAsFactors = FALSE,
na.strings = "?")
dta$Date <- as.Date(dta$Date, "%d/%m/%Y")
# filter the data for the dates in focus:
dta %>%
filter(Date %in% as.Date(c("1/2/2007", "2/2/2007"), "%d/%m/%Y")) ->
dta2
dta2$time_new <- as.POSIXct(paste0(dta2$Date, " ", dta2$Time), tz = "GMT")
#plot(x = dta2$time_new,y = dta2$Sub_metering_1, type = "l",
# xlab = "", ylab = "Energy sub metering")
#lines(x = dta2$time_new,y = dta2$Sub_metering_2, type = "l", col = "red")
#lines(x = dta2$time_new,y = dta2$Sub_metering_3, type = "l", col = "blue")
#legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
# legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
png(paste0(pth, "plot3.png"), height = 480, width = 480)
plot(x = dta2$time_new,y = dta2$Sub_metering_1, type = "l",
xlab = "", ylab = "Energy sub metering")
lines(x = dta2$time_new,y = dta2$Sub_metering_2, type = "l", col = "red")
lines(x = dta2$time_new,y = dta2$Sub_metering_3, type = "l", col = "blue")
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zchunk_batch_elec_bio_low_xml.R
\name{module_energy_batch_elec_bio_low_xml}
\alias{module_energy_batch_elec_bio_low_xml}
\title{module_energy_batch_elec_bio_low_xml}
\usage{
module_energy_batch_elec_bio_low_xml(command, ...)
}
\arguments{
\item{command}{API command to execute}
\item{...}{other optional parameters, depending on command}
}
\value{
Depends on \code{command}: either a vector of required inputs,
a vector of output names, or (if \code{command} is "MAKE") all
the generated outputs: \code{elec_bio_low.xml}. The corresponding file in the
original data system was \code{batch_elec_bio_low_xml.R} (energy XML).
}
\description{
Construct XML data structure for \code{elec_bio_low.xml}.
}
| /man/module_energy_batch_elec_bio_low_xml.Rd | permissive | JGCRI/gcamdata | R | false | true | 778 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zchunk_batch_elec_bio_low_xml.R
\name{module_energy_batch_elec_bio_low_xml}
\alias{module_energy_batch_elec_bio_low_xml}
\title{module_energy_batch_elec_bio_low_xml}
\usage{
module_energy_batch_elec_bio_low_xml(command, ...)
}
\arguments{
\item{command}{API command to execute}
\item{...}{other optional parameters, depending on command}
}
\value{
Depends on \code{command}: either a vector of required inputs,
a vector of output names, or (if \code{command} is "MAKE") all
the generated outputs: \code{elec_bio_low.xml}. The corresponding file in the
original data system was \code{batch_elec_bio_low_xml.R} (energy XML).
}
\description{
Construct XML data structure for \code{elec_bio_low.xml}.
}
|
library(tidyverse)
library(rvest)
library(readr)
library(RCurl)
##Eviction Lab Data
#Scrape from URL
url <- "https://eviction-lab-data-downloads.s3.amazonaws.com/CA/tracts.csv"
download <- getURL(url)
eviction_data <- read.csv(text = download)
##Opportunity Insights Data
#Scrape from URL
url <- "file:///Users/finndobkin/Downloads/tract_covariates.csv"
neighborhood_characteristics <- read.csv(file = url)
url <- "file:///Users/finndobkin/Downloads/health_ineq_online_table_12%20.csv"
tax_rate <- read.csv(file = url)
##CalEnviroScreen data
#Scrape from URL
url <- "https://oehha.ca.gov/media/downloads/calenviroscreen/document/ces3results.xlsx"
data1 <- openxlsx::read.xlsx(url)
temp <- tempfile()
download.file(url, destfile = temp, mode = 'wb')
data2 <- openxlsx::read.xlsx(temp)
unlink(temp)
stopifnot(all.equal(data1, data2))
| /methodstwo_final.r | no_license | FinnDobkin123/Graduate-School | R | false | false | 836 | r | library(tidyverse)
library(rvest)
library(readr)
library(RCurl)
##Eviction Lab Data
#Scrape from URL
url <- "https://eviction-lab-data-downloads.s3.amazonaws.com/CA/tracts.csv"
download <- getURL(url)
eviction_data <- read.csv(text = download)
##Opportunity Insights Data
#Scrape from URL
url <- "file:///Users/finndobkin/Downloads/tract_covariates.csv"
neighborhood_characteristics <- read.csv(file = url)
url <- "file:///Users/finndobkin/Downloads/health_ineq_online_table_12%20.csv"
tax_rate <- read.csv(file = url)
##CalEnviroScreen data
#Scrape from URL
url <- "https://oehha.ca.gov/media/downloads/calenviroscreen/document/ces3results.xlsx"
data1 <- openxlsx::read.xlsx(url)
temp <- tempfile()
download.file(url, destfile = temp, mode = 'wb')
data2 <- openxlsx::read.xlsx(temp)
unlink(temp)
stopifnot(all.equal(data1, data2))
|
## Association Rules & Collaborative Filtering
library(arules)
library(recommenderlab)
# ASSOCATION RULES #
## Example 1: faceplate dataset
fp.df <- read.csv("Faceplate.csv")
### Drop first column and convert it to a matrix
fp.mat <- as.matrix(fp.df[, -1])
### convert the binary incidence matrix into a transactions database
fp.trans <- as(fp.mat, "transactions")
inspect(fp.trans)
## Generate RUles
### Default support = 0.1 and confidence = 0.8
rules <- apriori(fp.trans, parameter = list(supp = 0.2, conf = 0.5, target = "rules"))
### Inspect the first six rules, sorted by their "Lift"
inspect(head(sort(rules, by = "lift")))
### Association Rules
rules.tbl <- inspect(rules)
rules.tbl[rules.tbl$support >= 0.04 & rules.tbl$confidence >= 0.7,]
| /Association_Rules.R | no_license | monicakumar94/Faceplate-Market-basket-Analysis | R | false | false | 784 | r | ## Association Rules & Collaborative Filtering
library(arules)
library(recommenderlab)
# ASSOCATION RULES #
## Example 1: faceplate dataset
fp.df <- read.csv("Faceplate.csv")
### Drop first column and convert it to a matrix
fp.mat <- as.matrix(fp.df[, -1])
### convert the binary incidence matrix into a transactions database
fp.trans <- as(fp.mat, "transactions")
inspect(fp.trans)
## Generate RUles
### Default support = 0.1 and confidence = 0.8
rules <- apriori(fp.trans, parameter = list(supp = 0.2, conf = 0.5, target = "rules"))
### Inspect the first six rules, sorted by their "Lift"
inspect(head(sort(rules, by = "lift")))
### Association Rules
rules.tbl <- inspect(rules)
rules.tbl[rules.tbl$support >= 0.04 & rules.tbl$confidence >= 0.7,]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extractPC.R
\name{extractPC}
\alias{extractPC}
\title{PCA on gene expression profile}
\usage{
extractPC(x)
}
\arguments{
\item{x}{a numeric or complex matrix (or data frame) which provides the gene
expression data for the principal components analysis. Genes in the rows and
samples in the columns.}
}
\value{
A \code{\link[stats]{prcomp}} object.
}
\description{
Performs a principal components analysis on the given data matrix and returns
the results as an object of class \code{\link[stats]{prcomp}}.
}
\examples{
m = matrix(rnorm(100),ncol=5)
extractPC(m)
}
\seealso{
\code{\link[stats]{prcomp}}
}
| /man/extractPC.Rd | no_license | shbrief/GenomicSuperSignature | R | false | true | 682 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extractPC.R
\name{extractPC}
\alias{extractPC}
\title{PCA on gene expression profile}
\usage{
extractPC(x)
}
\arguments{
\item{x}{a numeric or complex matrix (or data frame) which provides the gene
expression data for the principal components analysis. Genes in the rows and
samples in the columns.}
}
\value{
A \code{\link[stats]{prcomp}} object.
}
\description{
Performs a principal components analysis on the given data matrix and returns
the results as an object of class \code{\link[stats]{prcomp}}.
}
\examples{
m = matrix(rnorm(100),ncol=5)
extractPC(m)
}
\seealso{
\code{\link[stats]{prcomp}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parseCounts.R
\name{.isWideForm}
\alias{.isWideForm}
\title{Checks a data frame is a wide-form table.}
\usage{
.isWideForm(df, ch1Label = "Mt", ch2Label = "Wt")
}
\arguments{
\item{df}{A data frame.}
\item{ch1Label}{The prefix to use for the channel 1 target. Defaults to
"Mt".}
\item{ch2Label}{The prefix to use for the channel 2 target. Defaults to
"Wt".}
}
\value{
\code{TRUE} if \code{df} is considered to be of the correct format
and \code{FALSE} otherwise.
}
\description{
Our preferred data frame format is to have things in a wide-form data frame,
i.e. to have channel 1 and channel 2 data both in the same row.
}
\author{
Anthony Chiu, \email{anthony.chiu@cruk.manchester.ac.uk}
}
| /man/dot-isWideForm.Rd | no_license | CRUKMI-ComputationalBiology/twoddpcr | R | false | true | 770 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parseCounts.R
\name{.isWideForm}
\alias{.isWideForm}
\title{Checks a data frame is a wide-form table.}
\usage{
.isWideForm(df, ch1Label = "Mt", ch2Label = "Wt")
}
\arguments{
\item{df}{A data frame.}
\item{ch1Label}{The prefix to use for the channel 1 target. Defaults to
"Mt".}
\item{ch2Label}{The prefix to use for the channel 2 target. Defaults to
"Wt".}
}
\value{
\code{TRUE} if \code{df} is considered to be of the correct format
and \code{FALSE} otherwise.
}
\description{
Our preferred data frame format is to have things in a wide-form data frame,
i.e. to have channel 1 and channel 2 data both in the same row.
}
\author{
Anthony Chiu, \email{anthony.chiu@cruk.manchester.ac.uk}
}
|
source("./Scripts/sak_file_var/getMeta.R")
soFar <- getMeta(pathToData = "../../taler/id_taler_meta.csv",
sakfolderPath = "/media/martigso/Data/referat_raw/stortinget.no/no/Saker-og-publikasjoner/Publikasjoner/Referater/Stortinget/",
session = "2007-2008")
save(soFar, file = paste0("./Data/sak_filerefs/sakfiles", unique(soFar$session), ".rda"))
cat("Done!")
| /R/Scripts/sak_file_var/s0708.R | no_license | emanlapponi/storting | R | false | false | 396 | r | source("./Scripts/sak_file_var/getMeta.R")
soFar <- getMeta(pathToData = "../../taler/id_taler_meta.csv",
sakfolderPath = "/media/martigso/Data/referat_raw/stortinget.no/no/Saker-og-publikasjoner/Publikasjoner/Referater/Stortinget/",
session = "2007-2008")
save(soFar, file = paste0("./Data/sak_filerefs/sakfiles", unique(soFar$session), ".rda"))
cat("Done!")
|
context("Check calculate_covariate_drift() function")
test_that("Type of data in the explainer",{
library("DALEX2")
library("ranger")
predict_function <- function(m,x,...) predict(m, x, ...)$predictions
model_old <- ranger(m2.price ~ ., data = apartments)
d <- calculate_residuals_drift(model_old,
apartments_test[1:4000,], apartments_test[4001:8000,],
apartments_test$m2.price[1:4000], apartments_test$m2.price[4001:8000],
predict_function = predict_function)
expect_true("covariate_drift" %in% class(d))
expect_true(all(dim(d) == c(1,2)))
})
| /tests/testthat/test_calculate_residuals_drift.R | no_license | NRebeiz/drifter | R | false | false | 626 | r | context("Check calculate_covariate_drift() function")
test_that("Type of data in the explainer",{
library("DALEX2")
library("ranger")
predict_function <- function(m,x,...) predict(m, x, ...)$predictions
model_old <- ranger(m2.price ~ ., data = apartments)
d <- calculate_residuals_drift(model_old,
apartments_test[1:4000,], apartments_test[4001:8000,],
apartments_test$m2.price[1:4000], apartments_test$m2.price[4001:8000],
predict_function = predict_function)
expect_true("covariate_drift" %in% class(d))
expect_true(all(dim(d) == c(1,2)))
})
|
source('FTSE100.R')
source("key-stats-valuation.R")
DOWNLOADS <- DIR_FTSE100
CACHE <- 'FTSE100_CACHE'
# 1 Download info for the list of companies in the FTSE100
FTSE100_INFO <- GetFTSE100Stocks()
FTSE100 <- BatchGetSymbols(tickers = '^FTSE',
first.date = first.date,
last.date = last.date,
cache.folder = file.path(WORK_DIR,CACHE))
# Build a dataframe
FTSE100_DF <- as.data.frame(FTSE100["df.tickers"])
colnames(FTSE100_DF) <- c("open", "high","low","close","volume","price_adjusted","date","ticker","return_adjusted_prices","return_closing_prices")
FTSE100_DF <- FTSE100_DF[, c(7,1,2,3,4,5,6,8,9,10)]
FTSE100$ticker <- NULL
# Write to excel/csv
tickerFile <- "FTSE100.xlsx"
tickerFilePath <- paste(DOWNLOADS, tickerFile, sep="/")
# write.csv(FTSE100, file = tickerFilePath)
export(SP500_DF, tickerFilePath , append=FALSE)
# 2 Download prices in batch for all companies in the FTSE100 and store prices in cache folder
TICKERS <- FTSE100_INFO$ticker
TICKERS_DOWNLOAD_ERROR <- c()
TICKERS_DOWNLOAD_WARNING <- c()
# 3 Download prices and key stats to xls
for(ticker in TICKERS){
ticker <- trimws(ticker)
TICKER_RAW <- BatchGetSymbols(tickers = ticker,
first.date = first.date,
last.date = last.date,
cache.folder = file.path(WORK_DIR,CACHE))
result = tryCatch({
TICKER_DF <- as.data.frame(TICKER_RAW ["df.tickers"])
names(TICKER_DF) <- c("open", "high","low","close","volume","price_adjusted","date","ticker","return_adjusted_prices","return_closing_prices")
TICKER_DF <- TICKER_DF[, c(7,1,2,3,4,5,6,8,9,10)]
TICKER_DF$ticker <- NULL
tickerFile <- trimws(ticker)
tickerFilePath <- paste(DOWNLOADS, tickerFile, sep="/")
fileName <- paste(tickerFilePath,".xlsx",sep = "")
export(TICKER_DF, fileName , which = "Price", append=TRUE)
# write.csv(TICKER_DF, file = fileName, row.names = FALSE)
# Get Key stats for that ticker from Marketwatch
TICKER_STATS <- KeyStatsDataframe(ticker)
colnames(TICKER_STATS) <- c("Metric","Value")
export(TICKER_STATS, fileName, which = "Key_Stats", append=TRUE)
},
warning = function(w) {
c(TICKERS_DOWNLOAD_WARNING, ticker)},
error = function(e) {
c(TICKERS_DOWNLOAD_ERROR, ticker)},
finally = {
TICKER_RAW <- NULL;TICKER_DF <- NULL
tickerFile <- NULL;tickerFilePath <- NULL
}
)
}
| /datafeed-stocks-ftse100.R | no_license | heclon/capm | R | false | false | 2,494 | r | source('FTSE100.R')
source("key-stats-valuation.R")
DOWNLOADS <- DIR_FTSE100
CACHE <- 'FTSE100_CACHE'
# 1 Download info for the list of companies in the FTSE100
FTSE100_INFO <- GetFTSE100Stocks()
FTSE100 <- BatchGetSymbols(tickers = '^FTSE',
first.date = first.date,
last.date = last.date,
cache.folder = file.path(WORK_DIR,CACHE))
# Build a dataframe
FTSE100_DF <- as.data.frame(FTSE100["df.tickers"])
colnames(FTSE100_DF) <- c("open", "high","low","close","volume","price_adjusted","date","ticker","return_adjusted_prices","return_closing_prices")
FTSE100_DF <- FTSE100_DF[, c(7,1,2,3,4,5,6,8,9,10)]
FTSE100$ticker <- NULL
# Write to excel/csv
tickerFile <- "FTSE100.xlsx"
tickerFilePath <- paste(DOWNLOADS, tickerFile, sep="/")
# write.csv(FTSE100, file = tickerFilePath)
export(SP500_DF, tickerFilePath , append=FALSE)
# 2 Download prices in batch for all companies in the FTSE100 and store prices in cache folder
TICKERS <- FTSE100_INFO$ticker
TICKERS_DOWNLOAD_ERROR <- c()
TICKERS_DOWNLOAD_WARNING <- c()
# 3 Download prices and key stats to xls
for(ticker in TICKERS){
ticker <- trimws(ticker)
TICKER_RAW <- BatchGetSymbols(tickers = ticker,
first.date = first.date,
last.date = last.date,
cache.folder = file.path(WORK_DIR,CACHE))
result = tryCatch({
TICKER_DF <- as.data.frame(TICKER_RAW ["df.tickers"])
names(TICKER_DF) <- c("open", "high","low","close","volume","price_adjusted","date","ticker","return_adjusted_prices","return_closing_prices")
TICKER_DF <- TICKER_DF[, c(7,1,2,3,4,5,6,8,9,10)]
TICKER_DF$ticker <- NULL
tickerFile <- trimws(ticker)
tickerFilePath <- paste(DOWNLOADS, tickerFile, sep="/")
fileName <- paste(tickerFilePath,".xlsx",sep = "")
export(TICKER_DF, fileName , which = "Price", append=TRUE)
# write.csv(TICKER_DF, file = fileName, row.names = FALSE)
# Get Key stats for that ticker from Marketwatch
TICKER_STATS <- KeyStatsDataframe(ticker)
colnames(TICKER_STATS) <- c("Metric","Value")
export(TICKER_STATS, fileName, which = "Key_Stats", append=TRUE)
},
warning = function(w) {
c(TICKERS_DOWNLOAD_WARNING, ticker)},
error = function(e) {
c(TICKERS_DOWNLOAD_ERROR, ticker)},
finally = {
TICKER_RAW <- NULL;TICKER_DF <- NULL
tickerFile <- NULL;tickerFilePath <- NULL
}
)
}
|
library(reshape2)
library(plyr)
library(rpart)
titanic <- melt(Titanic)
titanic <- titanic[titanic$value > 0, ]
titanic <- ddply(.data = titanic,
.variables = c("Class", "Sex", "Age", "Survived"),
.fun = function(x){
n <- x$value[1]
df1 <- data.frame(Class = rep(x$Class[1], n),
Sex = rep(x$Sex[1], n),
Age = rep(x$Age[1], n),
Survived = rep(x$Survived[1], n))
return(df1)
})
tree.1 <- rpart(Survived ~ Class + Sex + Age, method = "class", data = titanic)
plot(tree.1, uniform = TRUE, main = "Prbability of Survival on Titanic")
text(tree.1, use.n = TRUE, all = TRUE, cex = 0.6)
| /Misc/Decision Tree of titanic.R | no_license | JMFlin/Machine-Learning | R | false | false | 826 | r | library(reshape2)
library(plyr)
library(rpart)
titanic <- melt(Titanic)
titanic <- titanic[titanic$value > 0, ]
titanic <- ddply(.data = titanic,
.variables = c("Class", "Sex", "Age", "Survived"),
.fun = function(x){
n <- x$value[1]
df1 <- data.frame(Class = rep(x$Class[1], n),
Sex = rep(x$Sex[1], n),
Age = rep(x$Age[1], n),
Survived = rep(x$Survived[1], n))
return(df1)
})
tree.1 <- rpart(Survived ~ Class + Sex + Age, method = "class", data = titanic)
plot(tree.1, uniform = TRUE, main = "Prbability of Survival on Titanic")
text(tree.1, use.n = TRUE, all = TRUE, cex = 0.6)
|
# Swap 2 values in a vector
source("mapvalues.R")
swap.values1 <- function(v, x1, x2){
my.mapvalues(v, c(x1, x2), c(x2, x1))
}
v <- c("m", "s", "p", "s", "p")
swap.values1(v, "s", "p")
| /exercises/swap_values.R | no_license | abhi8893/Intensive-R | R | false | false | 191 | r | # Swap 2 values in a vector
source("mapvalues.R")
swap.values1 <- function(v, x1, x2){
my.mapvalues(v, c(x1, x2), c(x2, x1))
}
v <- c("m", "s", "p", "s", "p")
swap.values1(v, "s", "p")
|
library(nimble)
### Name: nimbleModel
### Title: Create a NIMBLE model from BUGS code
### Aliases: nimbleModel
### ** Examples
code <- nimbleCode({
x ~ dnorm(mu, sd = 1)
mu ~ dnorm(0, sd = prior_sd)
})
constants = list(prior_sd = 1)
data = list(x = 4)
Rmodel <- nimbleModel(code, constants = constants, data = data)
| /data/genthat_extracted_code/nimble/examples/nimbleModel.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 331 | r | library(nimble)
### Name: nimbleModel
### Title: Create a NIMBLE model from BUGS code
### Aliases: nimbleModel
### ** Examples
code <- nimbleCode({
x ~ dnorm(mu, sd = 1)
mu ~ dnorm(0, sd = prior_sd)
})
constants = list(prior_sd = 1)
data = list(x = 4)
Rmodel <- nimbleModel(code, constants = constants, data = data)
|
# is.na() for NA
# is.nan() for NAN
x <- c(1, 425, NA, NaN, 235, 6434)
print(x)
is.na(x)
is.nan(x)
| /R_Course/Week1/missing_values.r | no_license | omkarsk98/All-Labs | R | false | false | 99 | r | # is.na() for NA
# is.nan() for NAN
x <- c(1, 425, NA, NaN, 235, 6434)
print(x)
is.na(x)
is.nan(x)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cv.R
\name{cv}
\alias{cv}
\title{coefficient of variation}
\usage{
cv(x)
}
\arguments{
\item{x}{is a numeric value, could be a a vector or data.frame}
}
\value{
cv
}
\description{
Compute the coefficient of variation
}
\examples{
set.seed(12345)
x<-rnorm(25,2,3)
cv(x)
}
\keyword{cv}
| /man/cv.Rd | no_license | osoramirez/resumeR | R | false | true | 364 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cv.R
\name{cv}
\alias{cv}
\title{coefficient of variation}
\usage{
cv(x)
}
\arguments{
\item{x}{is a numeric value, could be a a vector or data.frame}
}
\value{
cv
}
\description{
Compute the coefficient of variation
}
\examples{
set.seed(12345)
x<-rnorm(25,2,3)
cv(x)
}
\keyword{cv}
|
#' plot_heatmap
#' Generate a heatmap from a previously analyzed GCaMP dataset using plotGCaMP_multi - output is ggplot object which can be modified further
#' @param heatmap_limits optional 3-value vector defining the color scale limits, ie c(-1,0,2)
#' @param response_time time of expected GCaMP response in seconds. heatmaps will be arranged in descending amplitude based on these responses.
#' @importFrom magrittr "%>%"
#' @importFrom magrittr "%<>%"
#' @importFrom magrittr "%$%"
#' @export
#' @examples plot <- plot_heatmap()
#'
plot_heatmap <- function(heatmap_limits = "auto",
response_time = 59.5,
...) {
library(tidyverse)
library(scales)
data <- read_csv(file.choose()) %>%
mutate(animal_num = as.factor(animal_num))
# full_join(data, plot_order) %>%
# unnest() %>%
if(!is.numeric(heatmap_limits)) { # using auto calc unless a numeric vector input
breaks <- round(
data %>% unnest %$% quantile(delF, c(0.05, 0.5, 0.99)),
2
)
labels <- as.character(breaks)
limits <- breaks[c(1,3)]
} else {
breaks <- heatmap_limits
labels <- as.character(breaks)
limits <- breaks[c(1,3)]
}
labels <- as.character(breaks)
limits <- breaks[c(1,3)]
plot_order <- data %>%
group_by(animal, animal_num) %>%
summarise(maxD = MF.matR::max_delta(delF, end = response_time)) %>%
arrange(maxD)
full_join(data, plot_order, cols = c("animal", "animal_num", "maxD")) %>% group_by(animal_num) %>%
ggplot(aes(x = time, y = fct_reorder(animal_num, maxD))) +
geom_tile(aes(fill = signal)) +
scale_fill_viridis_c(option = "magma",
breaks = breaks,
labels = labels,
limits = limits,
oob =squish) +
theme_classic() +
theme(axis.text = element_text(size = 16),
axis.title = element_text(size = 18),
axis.text.y = element_blank()) +
labs(y = "Animal number") }
| /R/plot_heatmap.R | no_license | SenguptaLab/MF.matR | R | false | false | 1,990 | r | #' plot_heatmap
#' Generate a heatmap from a previously analyzed GCaMP dataset using plotGCaMP_multi - output is ggplot object which can be modified further
#' @param heatmap_limits optional 3-value vector defining the color scale limits, ie c(-1,0,2)
#' @param response_time time of expected GCaMP response in seconds. heatmaps will be arranged in descending amplitude based on these responses.
#' @importFrom magrittr "%>%"
#' @importFrom magrittr "%<>%"
#' @importFrom magrittr "%$%"
#' @export
#' @examples plot <- plot_heatmap()
#'
plot_heatmap <- function(heatmap_limits = "auto",
response_time = 59.5,
...) {
library(tidyverse)
library(scales)
data <- read_csv(file.choose()) %>%
mutate(animal_num = as.factor(animal_num))
# full_join(data, plot_order) %>%
# unnest() %>%
if(!is.numeric(heatmap_limits)) { # using auto calc unless a numeric vector input
breaks <- round(
data %>% unnest %$% quantile(delF, c(0.05, 0.5, 0.99)),
2
)
labels <- as.character(breaks)
limits <- breaks[c(1,3)]
} else {
breaks <- heatmap_limits
labels <- as.character(breaks)
limits <- breaks[c(1,3)]
}
labels <- as.character(breaks)
limits <- breaks[c(1,3)]
plot_order <- data %>%
group_by(animal, animal_num) %>%
summarise(maxD = MF.matR::max_delta(delF, end = response_time)) %>%
arrange(maxD)
full_join(data, plot_order, cols = c("animal", "animal_num", "maxD")) %>% group_by(animal_num) %>%
ggplot(aes(x = time, y = fct_reorder(animal_num, maxD))) +
geom_tile(aes(fill = signal)) +
scale_fill_viridis_c(option = "magma",
breaks = breaks,
labels = labels,
limits = limits,
oob =squish) +
theme_classic() +
theme(axis.text = element_text(size = 16),
axis.title = element_text(size = 18),
axis.text.y = element_blank()) +
labs(y = "Animal number") }
|
#' Keyword extraction
#'
#' Keyword Extraction worker use MixSegment model to cut word and use
#' TF-IDF algorithm to find the keywords. \code{dict} , \code{hmm},
#' \code{idf}, \code{stop_word} and \code{topn} should be provided when initializing
#' jiebaR worker.
#'
#' There is a symbol \code{<=} for this function.
#' @seealso \code{\link{<=.keywords}} \code{\link{worker}}
#' @param code A Chinese sentence or the path of a text file.
#' @param jiebar jiebaR Worker.
#' @return a vector of keywords with weight.
#' @references \url{http://en.wikipedia.org/wiki/Tf-idf}
#' @author Qin Wenfeng
#' @examples
#' \donttest{
#' ### Keyword Extraction
#' keys = worker("keywords", topn = 1)
#' keys <= "words of fun"}
#' @export
keywords <- function(code, jiebar) {
if (!is.character(code) || length(code) != 1)
stop("Argument 'code' must be an string.")
if (file.exists(code)) {
encoding<-jiebar$encoding
if(jiebar$detect ==T) encoding<-filecoding(code)
keyl(code = code, jiebar = jiebar,
encoding = encoding)
} else {
keyw(code = code, jiebar = jiebar)
}
}
keyl <- function(code, jiebar, encoding) {
input.r <- file(code, open = "r")
OUT <- FALSE
tryCatch({
tmp.lines <- readLines(input.r, encoding = encoding)
nlines <- length(tmp.lines)
tmp.lines <- paste(tmp.lines, collapse = " ")
if (nlines > 0) {
if (encoding != "UTF-8") {
tmp.lines <- iconv(tmp.lines,encoding , "UTF-8")
}
out.lines <- keyw(code = tmp.lines, jiebar = jiebar)
}
return(out.lines)
}, finally = {
try(close(input.r), silent = TRUE)
})
}
keyw <- function(code, jiebar) {
if (jiebar$symbol == F) {
code <- gsub("[^\u4e00-\u9fa5a-zA-Z0-9]", " ", code)
}
code <- gsub("^\\s+|\\s+$", "", gsub("\\s+", " ", code))
result <- jiebar$worker$tag(code)
if (.Platform$OS.type == "windows") {
Encoding(result)<-"UTF-8"}
return(result)
}
| /R/keywords.R | permissive | c3h3/jiebaR | R | false | false | 2,054 | r | #' Keyword extraction
#'
#' Keyword Extraction worker use MixSegment model to cut word and use
#' TF-IDF algorithm to find the keywords. \code{dict} , \code{hmm},
#' \code{idf}, \code{stop_word} and \code{topn} should be provided when initializing
#' jiebaR worker.
#'
#' There is a symbol \code{<=} for this function.
#' @seealso \code{\link{<=.keywords}} \code{\link{worker}}
#' @param code A Chinese sentence or the path of a text file.
#' @param jiebar jiebaR Worker.
#' @return a vector of keywords with weight.
#' @references \url{http://en.wikipedia.org/wiki/Tf-idf}
#' @author Qin Wenfeng
#' @examples
#' \donttest{
#' ### Keyword Extraction
#' keys = worker("keywords", topn = 1)
#' keys <= "words of fun"}
#' @export
keywords <- function(code, jiebar) {
if (!is.character(code) || length(code) != 1)
stop("Argument 'code' must be an string.")
if (file.exists(code)) {
encoding<-jiebar$encoding
if(jiebar$detect ==T) encoding<-filecoding(code)
keyl(code = code, jiebar = jiebar,
encoding = encoding)
} else {
keyw(code = code, jiebar = jiebar)
}
}
keyl <- function(code, jiebar, encoding) {
input.r <- file(code, open = "r")
OUT <- FALSE
tryCatch({
tmp.lines <- readLines(input.r, encoding = encoding)
nlines <- length(tmp.lines)
tmp.lines <- paste(tmp.lines, collapse = " ")
if (nlines > 0) {
if (encoding != "UTF-8") {
tmp.lines <- iconv(tmp.lines,encoding , "UTF-8")
}
out.lines <- keyw(code = tmp.lines, jiebar = jiebar)
}
return(out.lines)
}, finally = {
try(close(input.r), silent = TRUE)
})
}
keyw <- function(code, jiebar) {
if (jiebar$symbol == F) {
code <- gsub("[^\u4e00-\u9fa5a-zA-Z0-9]", " ", code)
}
code <- gsub("^\\s+|\\s+$", "", gsub("\\s+", " ", code))
result <- jiebar$worker$tag(code)
if (.Platform$OS.type == "windows") {
Encoding(result)<-"UTF-8"}
return(result)
}
|
#Kathal Aditya Rajendra
#Paper Link : https://globaljournals.org/item/5412-a-modified-version-of-the-k-means-clustering-algorithm
#K-Means - Reducing the number of iterations and also improving the accuracy
library(dplyr)
library(nlme)
library(factoextra)
library(caret)
set.seed(7008) #setting seed so that results are reproducible
df <- rbind(iris,iris)
df <- rbind(df,df) #importing dataset
trainIndex <- createDataPartition(df$Species, p = .7,list = FALSE,times = 1)
train_f <- df[ trainIndex,] #training data
test_f <- df[-trainIndex,] #testing data
train <- train_f %>% select(-Species)
test <- test_f %>% select(-Species)
our_fun(train,test,train_f,test_f) #running the proposed K-Means Algorithm
pre_fun(train,test,train_f,test_f) #running the standard K-Means Algorithm
our_fun <- function(train,test,train_f,test_f){
#variable to keep tab of how many iteration ran in total
proof_count <- 0
#This is the step one of the algorithm. Here we are finding the diatance from the center
dump_data <- train %>% mutate(distance = sqrt(Sepal.Length^2 + Sepal.Width^2 + Petal.Length^2 + Petal.Width^2))
#Now we are arranging in increasing order
dump_data <- dump_data %>% arrange(distance)
#We do not need the distance column in future so we are droping it.
dump_data <- dump_data %>% select(-distance)
#choosing the initial centroids. Three partition are to be done. So division by 3
v1 <- (nrow(train) %/% 3)%/%2
v2 <- v1 + nrow(train) %/% 3
v3 <- v2 + nrow(train) %/% 3
#storing the intial centroids
center1 <- dump_data[v1,]
center2 <- dump_data[v2,]
center3 <- dump_data[v3,]
#creating the two datastructure required fot the implementation of the proposed
#K means algorithm
clusters <- rep(-1,nrow(train)) #Will store the last cluster number
distance <- rep(-1,nrow(train)) #Will store the distance from that cluster
#This is the first iteration. Here we are running this to assign
#All the data structures in the program to have their initial values
#There one should observe no IF condition is given.
for (i in 1:nrow(train)){
#calcualtin distance from the centroids
dist1 <- dist(rbind(center1 , train[i,]) , method = "euclidean")
dist2 <- dist(rbind(center2 , train[i,]) , method = "euclidean")
dist3 <- dist(rbind(center3 , train[i,]) , method = "euclidean")
distance_from_centers <- c(dist1 , dist2 , dist3)
#finding the minimum distance and saving it in distance[]
distance[i] <- min(distance_from_centers)
#finding which cluster that min distance correspondos to and saving it.
clusters[i] <- which.min(distance_from_centers)
proof_count = proof_count + 1
}
#binging the cluster and distance data with train dataframe
train <- cbind(train , cbind(clusters,distance))
#the main loop where we will apply the conditions of the proposed algorithm
while(TRUE){
#saving old centroid location to facilate error calculation in the future
old_center1 <- center1
old_center2 <- center2
old_center3 <- center3
#spliting the data into grps as per their cluster allocation in i-1th iteration
grp_data <- train %>% group_split(clusters)
center1 <- colMeans(grp_data[[1]]) #new centroid 1
center2 <- colMeans(grp_data[[2]]) #new centroid 2
center3 <- colMeans(grp_data[[3]]) #new centroid 3
centers <- rbind(center1 , center2 , center3)
err1 <- abs(sum(old_center1 - center1)) #calculating the diff in prev and new value
err2 <- abs(sum(old_center2 - center2)) #calculating the diff in prev and new value
err3 <- abs(sum(old_center3 - center3)) #calculating the diff in prev and new value
#if the difference is less than 0.0001 we will stop the while loop
if(err1 < 0.0001 && err2 < 0.0001 && err3 < 0.0001)
{
break
}
#the for loop to check every row in the dataset.
for (i in 1:nrow(train)){
#the proposed condition that leads to less number of iterations
if(dist(rbind(centers[train[i,"clusters"] , ] , train[i,]) , method = "euclidean") > train[i,"distance"]){
dist1 <- dist(rbind(center1 , train[i,]) , method = "euclidean")
dist2 <- dist(rbind(center2 , train[i,]) , method = "euclidean")
dist3 <- dist(rbind(center3 , train[i,]) , method = "euclidean")
distance_from_centers <- c(dist1 , dist2 , dist3)
train$distance[i] <- min(distance_from_centers)
train$clusters[i] <- which.min(distance_from_centers)
proof_count = proof_count + 1
}
}
}
#naming the clusters so that visually we can see the difference
train$clusters[train$clusters == 1] <- "setosa"
train$clusters[train$clusters == 2] <- "versicolor"
train$clusters[train$clusters == 3] <- "virginica"
#dropping distance as it is not required anymore
train <- train %>% select(-distance)
Species <- train_f$Species
train <- cbind(train , Species)
#initialation of the variable that will keep count of how many we were able to
#classify correctly
count <- 0
for (i in 1:nrow(train)){
if(train[i,"Species"] == train[i,"clusters"]){
count <- count+1
}
}
#printing accuracy
print((count/nrow(train))*100)
#We take the centroids from the train data and see they classify the test data
#for loop to assign the test data their nearest cluster
for (i in 1:nrow(test)){
dist1 <- dist(rbind(center1 , test[i,]) , method = "euclidean")
dist2 <- dist(rbind(center2 , test[i,]) , method = "euclidean")
dist3 <- dist(rbind(center3 , test[i,]) , method = "euclidean")
distance_from_centers <- c(dist1 , dist2 , dist3)
test$distance[i] <- min(distance_from_centers)
test$clusters[i] <- which.min(distance_from_centers)
}
#again renaming the clusters
test$clusters[test$clusters == 1] <- "setosa"
test$clusters[test$clusters == 2] <- "versicolor"
test$clusters[test$clusters == 3] <- "virginica"
#distance column not required in future
test <- test %>% select(-distance)
Species <- test_f$Species
test <- cbind(test , Species)
#initialation of the variable that will keep count of how many we were able to
#classify correctly
count <- 0
for (i in 1:nrow(test)){
if(test[i,"Species"] == test[i,"clusters"]){
count <- count+1
}
}
#printing accuracy
print((count/nrow(test))*100)
#printing the number of times the iteration ran
print(paste(c("The number of iterations were " , proof_count)) , sep = " : ")
}
pre_fun <- function(train,test,train_f,test_f){
#variable to keep count of the total number of iterations
proof_count <- 0
#random initialization of centroids
center1 <- train[1,]
center2 <- train[2,]
center3 <- train[3,]
clusters <- rep(-1,nrow(train))
#initial cluster allocation for loop
for (i in 1:nrow(train)){
dist1 <- dist(rbind(center1 , train[i,]) , method = "euclidean")
dist2 <- dist(rbind(center2 , train[i,]) , method = "euclidean")
dist3 <- dist(rbind(center3 , train[i,]) , method = "euclidean")
distance_from_centers <- c(dist1 , dist2 , dist3)
clusters[i] <- which.min(distance_from_centers)
proof_count = proof_count + 1
}
train <- cbind(train , cbind(clusters))
#the while loop to continously assign clusters until error is reduced to given range
while(TRUE){
#increment of variable every time the loop is running
#saving old centoids for calculating diff later
old_center1 <- center1
old_center2 <- center2
old_center3 <- center3
#finding new centroids
grp_data <- train %>% group_split(clusters)
center1 <- colMeans(grp_data[[1]]) #new center 1
center2 <- colMeans(grp_data[[2]]) #new center 2
center3 <- colMeans(grp_data[[3]]) #new center 3
centers <- rbind(center1 , center2 , center3)
err1 <- abs(sum(old_center1 - center1)) #calculating diff for center 1
err2 <- abs(sum(old_center2 - center2)) #calculating diff for center 2
err3 <- abs(sum(old_center3 - center3)) #calculating diff for center 3
if(err1 < 0.0001 && err2 < 0.0001 && err3 < 0.0001)
{
break
}
#assigning and looking for any changes in cluster for every row
for (i in 1:nrow(train)){
proof_count = proof_count + 1
dist1 <- dist(rbind(center1 , train[i,]) , method = "euclidean")
dist2 <- dist(rbind(center2 , train[i,]) , method = "euclidean")
dist3 <- dist(rbind(center3 , train[i,]) , method = "euclidean")
distance_from_centers <- c(dist1 , dist2 , dist3)
train$clusters[i] <- which.min(distance_from_centers)
}
}
#renaming the clusters
train$clusters[train$clusters == 1] <- "setosa"
train$clusters[train$clusters == 2] <- "versicolor"
train$clusters[train$clusters == 3] <- "virginica"
Species <- train_f$Species
#training accuracy
train <- cbind(train , Species)
count <- 0
for (i in 1:nrow(train)){
if(train[i,"Species"] == train[i,"clusters"]){
count <- count+1
}
}
#printing accuracy
print((count/nrow(train))*100)
#now we see the accuracy for test data.
#so first we assign them to clusters using the previous centroids
for (i in 1:nrow(test)){
dist1 <- dist(rbind(center1 , test[i,]) , method = "euclidean")
dist2 <- dist(rbind(center2 , test[i,]) , method = "euclidean")
dist3 <- dist(rbind(center3 , test[i,]) , method = "euclidean")
distance_from_centers <- c(dist1 , dist2 , dist3)
test$clusters[i] <- which.min(distance_from_centers)
}
#renaming the clusters
test$clusters[test$clusters == 1] <- "setosa"
test$clusters[test$clusters == 2] <- "versicolor"
test$clusters[test$clusters == 3] <- "virginica"
Species <- test_f$Species
test <- cbind(test , Species)
#calculating accuracy
count <- 0
for (i in 1:nrow(test)){
if(test[i,"Species"] == test[i,"clusters"]){
count <- count+1
}
}
#printing accuracy
print((count/nrow(test))*100)
#printing the number of times the iteration ran
print(paste(c("The number of iteratoins were " , proof_count)) , sep = " : ")
}
| /R_Code/KMeans.R | no_license | BitEater00/Research_Paper_implementation | R | false | false | 10,154 | r | #Kathal Aditya Rajendra
#Paper Link : https://globaljournals.org/item/5412-a-modified-version-of-the-k-means-clustering-algorithm
#K-Means - Reducing the number of iterations and also improving the accuracy
library(dplyr)
library(nlme)
library(factoextra)
library(caret)
set.seed(7008) #setting seed so that results are reproducible
df <- rbind(iris,iris)
df <- rbind(df,df) #importing dataset
trainIndex <- createDataPartition(df$Species, p = .7,list = FALSE,times = 1)
train_f <- df[ trainIndex,] #training data
test_f <- df[-trainIndex,] #testing data
train <- train_f %>% select(-Species)
test <- test_f %>% select(-Species)
our_fun(train,test,train_f,test_f) #running the proposed K-Means Algorithm
pre_fun(train,test,train_f,test_f) #running the standard K-Means Algorithm
our_fun <- function(train,test,train_f,test_f){
#variable to keep tab of how many iteration ran in total
proof_count <- 0
#This is the step one of the algorithm. Here we are finding the diatance from the center
dump_data <- train %>% mutate(distance = sqrt(Sepal.Length^2 + Sepal.Width^2 + Petal.Length^2 + Petal.Width^2))
#Now we are arranging in increasing order
dump_data <- dump_data %>% arrange(distance)
#We do not need the distance column in future so we are droping it.
dump_data <- dump_data %>% select(-distance)
#choosing the initial centroids. Three partition are to be done. So division by 3
v1 <- (nrow(train) %/% 3)%/%2
v2 <- v1 + nrow(train) %/% 3
v3 <- v2 + nrow(train) %/% 3
#storing the intial centroids
center1 <- dump_data[v1,]
center2 <- dump_data[v2,]
center3 <- dump_data[v3,]
#creating the two datastructure required fot the implementation of the proposed
#K means algorithm
clusters <- rep(-1,nrow(train)) #Will store the last cluster number
distance <- rep(-1,nrow(train)) #Will store the distance from that cluster
#This is the first iteration. Here we are running this to assign
#All the data structures in the program to have their initial values
#There one should observe no IF condition is given.
for (i in 1:nrow(train)){
#calcualtin distance from the centroids
dist1 <- dist(rbind(center1 , train[i,]) , method = "euclidean")
dist2 <- dist(rbind(center2 , train[i,]) , method = "euclidean")
dist3 <- dist(rbind(center3 , train[i,]) , method = "euclidean")
distance_from_centers <- c(dist1 , dist2 , dist3)
#finding the minimum distance and saving it in distance[]
distance[i] <- min(distance_from_centers)
#finding which cluster that min distance correspondos to and saving it.
clusters[i] <- which.min(distance_from_centers)
proof_count = proof_count + 1
}
#binging the cluster and distance data with train dataframe
train <- cbind(train , cbind(clusters,distance))
#the main loop where we will apply the conditions of the proposed algorithm
while(TRUE){
#saving old centroid location to facilate error calculation in the future
old_center1 <- center1
old_center2 <- center2
old_center3 <- center3
#spliting the data into grps as per their cluster allocation in i-1th iteration
grp_data <- train %>% group_split(clusters)
center1 <- colMeans(grp_data[[1]]) #new centroid 1
center2 <- colMeans(grp_data[[2]]) #new centroid 2
center3 <- colMeans(grp_data[[3]]) #new centroid 3
centers <- rbind(center1 , center2 , center3)
err1 <- abs(sum(old_center1 - center1)) #calculating the diff in prev and new value
err2 <- abs(sum(old_center2 - center2)) #calculating the diff in prev and new value
err3 <- abs(sum(old_center3 - center3)) #calculating the diff in prev and new value
#if the difference is less than 0.0001 we will stop the while loop
if(err1 < 0.0001 && err2 < 0.0001 && err3 < 0.0001)
{
break
}
#the for loop to check every row in the dataset.
for (i in 1:nrow(train)){
#the proposed condition that leads to less number of iterations
if(dist(rbind(centers[train[i,"clusters"] , ] , train[i,]) , method = "euclidean") > train[i,"distance"]){
dist1 <- dist(rbind(center1 , train[i,]) , method = "euclidean")
dist2 <- dist(rbind(center2 , train[i,]) , method = "euclidean")
dist3 <- dist(rbind(center3 , train[i,]) , method = "euclidean")
distance_from_centers <- c(dist1 , dist2 , dist3)
train$distance[i] <- min(distance_from_centers)
train$clusters[i] <- which.min(distance_from_centers)
proof_count = proof_count + 1
}
}
}
#naming the clusters so that visually we can see the difference
train$clusters[train$clusters == 1] <- "setosa"
train$clusters[train$clusters == 2] <- "versicolor"
train$clusters[train$clusters == 3] <- "virginica"
#dropping distance as it is not required anymore
train <- train %>% select(-distance)
Species <- train_f$Species
train <- cbind(train , Species)
#initialation of the variable that will keep count of how many we were able to
#classify correctly
count <- 0
for (i in 1:nrow(train)){
if(train[i,"Species"] == train[i,"clusters"]){
count <- count+1
}
}
#printing accuracy
print((count/nrow(train))*100)
#We take the centroids from the train data and see they classify the test data
#for loop to assign the test data their nearest cluster
for (i in 1:nrow(test)){
dist1 <- dist(rbind(center1 , test[i,]) , method = "euclidean")
dist2 <- dist(rbind(center2 , test[i,]) , method = "euclidean")
dist3 <- dist(rbind(center3 , test[i,]) , method = "euclidean")
distance_from_centers <- c(dist1 , dist2 , dist3)
test$distance[i] <- min(distance_from_centers)
test$clusters[i] <- which.min(distance_from_centers)
}
#again renaming the clusters
test$clusters[test$clusters == 1] <- "setosa"
test$clusters[test$clusters == 2] <- "versicolor"
test$clusters[test$clusters == 3] <- "virginica"
#distance column not required in future
test <- test %>% select(-distance)
Species <- test_f$Species
test <- cbind(test , Species)
#initialation of the variable that will keep count of how many we were able to
#classify correctly
count <- 0
for (i in 1:nrow(test)){
if(test[i,"Species"] == test[i,"clusters"]){
count <- count+1
}
}
#printing accuracy
print((count/nrow(test))*100)
#printing the number of times the iteration ran
print(paste(c("The number of iterations were " , proof_count)) , sep = " : ")
}
pre_fun <- function(train,test,train_f,test_f){
#variable to keep count of the total number of iterations
proof_count <- 0
#random initialization of centroids
center1 <- train[1,]
center2 <- train[2,]
center3 <- train[3,]
clusters <- rep(-1,nrow(train))
#initial cluster allocation for loop
for (i in 1:nrow(train)){
dist1 <- dist(rbind(center1 , train[i,]) , method = "euclidean")
dist2 <- dist(rbind(center2 , train[i,]) , method = "euclidean")
dist3 <- dist(rbind(center3 , train[i,]) , method = "euclidean")
distance_from_centers <- c(dist1 , dist2 , dist3)
clusters[i] <- which.min(distance_from_centers)
proof_count = proof_count + 1
}
train <- cbind(train , cbind(clusters))
#the while loop to continously assign clusters until error is reduced to given range
while(TRUE){
#increment of variable every time the loop is running
#saving old centoids for calculating diff later
old_center1 <- center1
old_center2 <- center2
old_center3 <- center3
#finding new centroids
grp_data <- train %>% group_split(clusters)
center1 <- colMeans(grp_data[[1]]) #new center 1
center2 <- colMeans(grp_data[[2]]) #new center 2
center3 <- colMeans(grp_data[[3]]) #new center 3
centers <- rbind(center1 , center2 , center3)
err1 <- abs(sum(old_center1 - center1)) #calculating diff for center 1
err2 <- abs(sum(old_center2 - center2)) #calculating diff for center 2
err3 <- abs(sum(old_center3 - center3)) #calculating diff for center 3
if(err1 < 0.0001 && err2 < 0.0001 && err3 < 0.0001)
{
break
}
#assigning and looking for any changes in cluster for every row
for (i in 1:nrow(train)){
proof_count = proof_count + 1
dist1 <- dist(rbind(center1 , train[i,]) , method = "euclidean")
dist2 <- dist(rbind(center2 , train[i,]) , method = "euclidean")
dist3 <- dist(rbind(center3 , train[i,]) , method = "euclidean")
distance_from_centers <- c(dist1 , dist2 , dist3)
train$clusters[i] <- which.min(distance_from_centers)
}
}
#renaming the clusters
train$clusters[train$clusters == 1] <- "setosa"
train$clusters[train$clusters == 2] <- "versicolor"
train$clusters[train$clusters == 3] <- "virginica"
Species <- train_f$Species
#training accuracy
train <- cbind(train , Species)
count <- 0
for (i in 1:nrow(train)){
if(train[i,"Species"] == train[i,"clusters"]){
count <- count+1
}
}
#printing accuracy
print((count/nrow(train))*100)
#now we see the accuracy for test data.
#so first we assign them to clusters using the previous centroids
for (i in 1:nrow(test)){
dist1 <- dist(rbind(center1 , test[i,]) , method = "euclidean")
dist2 <- dist(rbind(center2 , test[i,]) , method = "euclidean")
dist3 <- dist(rbind(center3 , test[i,]) , method = "euclidean")
distance_from_centers <- c(dist1 , dist2 , dist3)
test$clusters[i] <- which.min(distance_from_centers)
}
#renaming the clusters
test$clusters[test$clusters == 1] <- "setosa"
test$clusters[test$clusters == 2] <- "versicolor"
test$clusters[test$clusters == 3] <- "virginica"
Species <- test_f$Species
test <- cbind(test , Species)
#calculating accuracy
count <- 0
for (i in 1:nrow(test)){
if(test[i,"Species"] == test[i,"clusters"]){
count <- count+1
}
}
#printing accuracy
print((count/nrow(test))*100)
#printing the number of times the iteration ran
print(paste(c("The number of iteratoins were " , proof_count)) , sep = " : ")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bk_method.R
\name{Bk_permutations}
\alias{Bk_permutations}
\title{Bk permutation - Calculating Fowlkes-Mallows Index for two dendrogram}
\usage{
Bk_permutations(tree1, tree2, k, R = 1000,
warn = dendextend_options("warn"), ...)
}
\arguments{
\item{tree1}{a dendrogram/hclust/phylo object.}
\item{tree2}{a dendrogram/hclust/phylo object.}
\item{k}{an integer scalar or vector with the desired number
of cluster groups.
If missing - the Bk will be calculated for a default k range of
2:(nleaves-1).
No point in checking k=1/k=n, since both will give Bk=1.}
\item{R}{integer (Default is 1000). The number of Bk permutation to perform for each k.}
\item{warn}{logical (default from dendextend_options("warn") is FALSE).
Set if warning are to be issued, it is safer to keep this at TRUE,
but for keeping the noise down, the default is FALSE.
If set to TRUE, extra checks are made to varify that the two clusters have
the same size and the same labels.}
\item{...}{Ignored (passed to FM_index_R/FM_index_profdpm).}
}
\value{
A list (of the length of k's), where each element of the list has
R (number of permutations) calculations of Fowlkes-Mallows index
between two dendrogram after having their labels shuffled.
The names of the lists' items is the k for which it was calculated.
}
\description{
Bk is the calculation of Fowlkes-Mallows index for a series of k cuts
for two dendrograms.
Bk permutation calculates the Bk under the null hypothesis of no similarirty
between the two trees by randomally shuffling the labels of the two trees
and calculating their Bk.
}
\details{
From Wikipedia:
Fowlkes-Mallows index (see references) is an external evaluation method
that is used to determine the similarity between two clusterings
(clusters obtained after a clustering algorithm). This measure of similarity
could be either between two hierarchical clusterings or a clustering and
a benchmark classification. A higher the value for the Fowlkes-Mallows index
indicates a greater similarity between the clusters and the benchmark
classifications.
}
\examples{
\dontrun{
set.seed(23235)
ss <- TRUE # sample(1:150, 10 )
hc1 <- hclust(dist(iris[ss,-5]), "com")
hc2 <- hclust(dist(iris[ss,-5]), "single")
# tree1 <- as.treerogram(hc1)
# tree2 <- as.treerogram(hc2)
# cutree(tree1)
some_Bk <- Bk(hc1, hc2, k = 20)
some_Bk_permu <- Bk_permutations(hc1, hc2, k = 20)
# we can see that the Bk is much higher than the permutation Bks:
plot(x=rep(1,1000), y= some_Bk_permu[[1]],
main = "Bk distribution under H0",
ylim = c(0,1))
points(1, y= some_Bk, pch = 19, col = 2 )
}
}
\references{
Fowlkes, E. B.; Mallows, C. L. (1 September 1983).
"A Method for Comparing Two Hierarchical Clusterings".
Journal of the American Statistical Association 78 (383): 553.
\url{http://en.wikipedia.org/wiki/Fowlkes-Mallows_index}
}
\seealso{
\code{\link{FM_index}}, \link{Bk}
}
| /man/Bk_permutations.Rd | no_license | JohnMCMa/dendextend | R | false | true | 2,969 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bk_method.R
\name{Bk_permutations}
\alias{Bk_permutations}
\title{Bk permutation - Calculating Fowlkes-Mallows Index for two dendrogram}
\usage{
Bk_permutations(tree1, tree2, k, R = 1000,
warn = dendextend_options("warn"), ...)
}
\arguments{
\item{tree1}{a dendrogram/hclust/phylo object.}
\item{tree2}{a dendrogram/hclust/phylo object.}
\item{k}{an integer scalar or vector with the desired number
of cluster groups.
If missing - the Bk will be calculated for a default k range of
2:(nleaves-1).
No point in checking k=1/k=n, since both will give Bk=1.}
\item{R}{integer (Default is 1000). The number of Bk permutation to perform for each k.}
\item{warn}{logical (default from dendextend_options("warn") is FALSE).
Set if warning are to be issued, it is safer to keep this at TRUE,
but for keeping the noise down, the default is FALSE.
If set to TRUE, extra checks are made to varify that the two clusters have
the same size and the same labels.}
\item{...}{Ignored (passed to FM_index_R/FM_index_profdpm).}
}
\value{
A list (of the length of k's), where each element of the list has
R (number of permutations) calculations of Fowlkes-Mallows index
between two dendrogram after having their labels shuffled.
The names of the lists' items is the k for which it was calculated.
}
\description{
Bk is the calculation of Fowlkes-Mallows index for a series of k cuts
for two dendrograms.
Bk permutation calculates the Bk under the null hypothesis of no similarirty
between the two trees by randomally shuffling the labels of the two trees
and calculating their Bk.
}
\details{
From Wikipedia:
Fowlkes-Mallows index (see references) is an external evaluation method
that is used to determine the similarity between two clusterings
(clusters obtained after a clustering algorithm). This measure of similarity
could be either between two hierarchical clusterings or a clustering and
a benchmark classification. A higher the value for the Fowlkes-Mallows index
indicates a greater similarity between the clusters and the benchmark
classifications.
}
\examples{
\dontrun{
set.seed(23235)
ss <- TRUE # sample(1:150, 10 )
hc1 <- hclust(dist(iris[ss,-5]), "com")
hc2 <- hclust(dist(iris[ss,-5]), "single")
# tree1 <- as.treerogram(hc1)
# tree2 <- as.treerogram(hc2)
# cutree(tree1)
some_Bk <- Bk(hc1, hc2, k = 20)
some_Bk_permu <- Bk_permutations(hc1, hc2, k = 20)
# we can see that the Bk is much higher than the permutation Bks:
plot(x=rep(1,1000), y= some_Bk_permu[[1]],
main = "Bk distribution under H0",
ylim = c(0,1))
points(1, y= some_Bk, pch = 19, col = 2 )
}
}
\references{
Fowlkes, E. B.; Mallows, C. L. (1 September 1983).
"A Method for Comparing Two Hierarchical Clusterings".
Journal of the American Statistical Association 78 (383): 553.
\url{http://en.wikipedia.org/wiki/Fowlkes-Mallows_index}
}
\seealso{
\code{\link{FM_index}}, \link{Bk}
}
|
library(ggplot2)
setwd("~/Documents/MPI/KangSukColours/ColourExperiment/analysis/")
useOnlyDirector = F
d = read.csv("../data/processedData/variants_processed.csv", stringsAsFactors = F)
d = d[d$sign_value!='SAME',]
d = d[d$sign_value!='',]
d = d[d$sign_value!='?',]
d[d$sign_value=="FOLWER",]$sign_value = "FLOWER"
d[d$sign_value=="BIGHT",]$sign_value = "BRIGHT"
d[d$sign_value=="SIGINING",]$sign_value = "SIGNING"
colourNumbers = c("1","5",'6',"7","14",'18','24')
colourNames = c("red",'brown','white','black','green','yellow','pink')
names(colourNames) = colourNumbers
colourNamesDark = c("dark red", 'orange','gray', 'dark gray', 'dark green','gold', 'purple')
d = d[d$trial_value %in% colourNumbers,]
d$trialColourName = colourNames[d$trial_value]
d$trialColourName = factor(d$trialColourName, levels = colourNames)
individuals = unique(c(d$part1,d$part2))
individuals = c("India",'Jordan','Indonesia',"Nepal")
getLetters = function(pairs){
letterCount = 1
let = matrix(nrow=6,ncol=4)
colnames(let) = individuals
currentLetter = "A"
for(i in 1:nrow(pairs)){
let[letterCount,pairs[i,]$part1] = currentLetter
let[letterCount,pairs[i,]$part2] = currentLetter
if(currentLetter=="A"){
currentLetter = "B"
} else{
currentLetter = "A"
}
if(i %% 2==0){
letterCount = letterCount +1
}
}
return(let)
}
makeTamatizPlot = function(res,pairs,monochrome=T){
variants = unique((res[!is.na(res)]))
colours = rainbow(length(variants))
if(monochrome){
colours = rep('white',length(variants))
}
names(colours) = variants
col = colours[res]
col[is.na(col)] = 'white'
col = matrix(col, ncol=4)
# note that we're plotting upside down!
plot(c(5,1),c(7,1),type='n',xaxt='n',yaxt='n',xlab='',ylab='', bty='n',ylim=c(7,0.9),xlim=c(-0.3,5.2))
for(j in 1:6){
for(i in 1:4){
rect(i,j,i+0.9,j+0.9,col = col[j,i])
text(i+0.5,j+0.8,res[j,i],cex=0.5)
}
}
text((1:4)+0.5,rep(0.8,4),individuals)
text(rep(0.4,6),(1:6)+0.5,paste("Round",rep(1:3,2),sep=''))
text(rep(-0.2,2),c(2,5)+0.5,c("Week 1","Week 3") ,srt=90)
abline(h=3.95)
letters = getLetters(pairs)
for(i in 1:6){
for(j in 1:4){
text(j+0.5,i+0.5,letters[7-i,j])
}
}
}
# d is already in correct time order
for(colourID in colourNumbers){
res = matrix(nrow=6,ncol=4)
colnames(res) = individuals
rowTracker = 1
for(week in unique(d$week)){
pairs = unique(paste(d[d$week==week,]$part1,d[d$week==week,]$part2, d[d$week==week,]$session))
dx = d[d$week==week & d$trial_value==colourID,]
dx$pair = paste(dx$part1,dx$part2,dx$session)
for(session in 1:3){
dxx = dx[dx$session==session,]
if(useOnlyDirector){
dxx = dxx[dxx$director==dxx$speaker,]
}
firstSigns = tapply(dxx$sign_value,dxx$speakerName,head,n=1)
res[rowTracker,] = firstSigns[individuals]
rowTracker = rowTracker + 1
}
}
pairs = d[,c("part1","part2","week","session")]
pairs = pairs[!duplicated(apply(pairs,1,paste,collapse='')),]
filename = paste("../results/descriptive/selectionPlots/SelectionPlot_Mono_", colourNames[colourID],".pdf",sep='')
pdf(filename, width=6,height=6)
makeTamatizPlot(res,pairs)
dev.off()
}
##############
makePropSquare = function(prop,x,y,vcolours){
vcolours = vcolours[prop!=0]
prop = prop[prop!=0]
if(length(prop)>0){
prop = prop[order(vcolours)]
vcolours = sort(vcolours)
#vcolours = vcolours[order(prop)]
prop2 = prop/sum(prop)
prop2 = prop2*0.9
x2 = cumsum(prop2)
x1 = c(0,x2[1:(length(x2)-1)])
x2 = x2 + x
x1 = x1 + x
y1 = y + 0
y2 = y + 0.5
rect(x1,y1,x2,y2,col=vcolours)
}
rect(x-0.05,y-0.5,x+0.95,y+0.5)
}
nv = unique(d[d$director==d$speaker,]$sign_value)
#vcols = rainbow(length(nv))
vcols = rep('white',length(nv))
set.seed(127)
vcols = sample(vcols)
names(vcols)= nv
for(colourID in colourNumbers){
filename = paste("../results/descriptive/selectionPlots/SelectionPlot_Proportions_Mono_", colourNames[colourID],".pdf",sep='')
pdf(filename, width=6,height=6)
plot(c(0,7),c(0,5), ylim=c(6.5,0),xlim=c(-1,5), type='n',xlab='',ylab='', xaxt='n',yaxt='n',bty ='n')
title(colourNames[colourID])
pairs = d[,c("part1","part2","week","session")]
pairs = pairs[!duplicated(apply(pairs,1,paste,collapse='')),]
letters = getLetters(pairs)
for(i in 1:6){
for(j in 1:4){
text(j+0.5,i-0.25,letters[7-i,j])
}
}
text((1:4)+0.5,rep(0,4),individuals)
text(rep(0.3,6),(1:6),paste("Round",rep(1:3,2),sep=''))
text(rep(-0.6,2),c(2,5),c("Week 1","Week 3") ,srt=90)
abline(h=3.5)
rowTracker = 1
for(week in unique(d$week)){
pairs = unique(paste(d[d$week==week,]$part1,d[d$week==week,]$part2, d[d$week==week,]$session))
dx = d[d$week==week & d$trial_value==colourID,]
dx$pair = paste(dx$part1,dx$part2,dx$session)
dx$speakerName = factor(dx$speakerName,levels=individuals)
for(session in 1:3){
dxx = dx[dx$session==session,]
if(useOnlyDirector){
dxx = dxx[dxx$director==dxx$speaker,]
}
tx = as.matrix(table(dxx$sign_value,dxx$speakerName))
tx = tx[,individuals]
for(i in 1:4){
if(is.null(dim(tx))){
partVars = tx[i]
} else{
partVars = tx[,i]
}
vcolsx = vcols[rownames(tx)]
if(is.null(rownames(tx))){
vcolsx = vcols[dxx$sign_value[1]]
}
makePropSquare(partVars,i,rowTracker, vcolsx)
}
rowTracker = rowTracker + 1
}
}
dev.off()
}
| /analysis/makeTamarizGraphs_monochrome.R | no_license | seannyD/ColourInteractionExperiment | R | false | false | 5,681 | r | library(ggplot2)
setwd("~/Documents/MPI/KangSukColours/ColourExperiment/analysis/")
useOnlyDirector = F
d = read.csv("../data/processedData/variants_processed.csv", stringsAsFactors = F)
d = d[d$sign_value!='SAME',]
d = d[d$sign_value!='',]
d = d[d$sign_value!='?',]
d[d$sign_value=="FOLWER",]$sign_value = "FLOWER"
d[d$sign_value=="BIGHT",]$sign_value = "BRIGHT"
d[d$sign_value=="SIGINING",]$sign_value = "SIGNING"
colourNumbers = c("1","5",'6',"7","14",'18','24')
colourNames = c("red",'brown','white','black','green','yellow','pink')
names(colourNames) = colourNumbers
colourNamesDark = c("dark red", 'orange','gray', 'dark gray', 'dark green','gold', 'purple')
d = d[d$trial_value %in% colourNumbers,]
d$trialColourName = colourNames[d$trial_value]
d$trialColourName = factor(d$trialColourName, levels = colourNames)
individuals = unique(c(d$part1,d$part2))
individuals = c("India",'Jordan','Indonesia',"Nepal")
getLetters = function(pairs){
letterCount = 1
let = matrix(nrow=6,ncol=4)
colnames(let) = individuals
currentLetter = "A"
for(i in 1:nrow(pairs)){
let[letterCount,pairs[i,]$part1] = currentLetter
let[letterCount,pairs[i,]$part2] = currentLetter
if(currentLetter=="A"){
currentLetter = "B"
} else{
currentLetter = "A"
}
if(i %% 2==0){
letterCount = letterCount +1
}
}
return(let)
}
makeTamatizPlot = function(res,pairs,monochrome=T){
variants = unique((res[!is.na(res)]))
colours = rainbow(length(variants))
if(monochrome){
colours = rep('white',length(variants))
}
names(colours) = variants
col = colours[res]
col[is.na(col)] = 'white'
col = matrix(col, ncol=4)
# note that we're plotting upside down!
plot(c(5,1),c(7,1),type='n',xaxt='n',yaxt='n',xlab='',ylab='', bty='n',ylim=c(7,0.9),xlim=c(-0.3,5.2))
for(j in 1:6){
for(i in 1:4){
rect(i,j,i+0.9,j+0.9,col = col[j,i])
text(i+0.5,j+0.8,res[j,i],cex=0.5)
}
}
text((1:4)+0.5,rep(0.8,4),individuals)
text(rep(0.4,6),(1:6)+0.5,paste("Round",rep(1:3,2),sep=''))
text(rep(-0.2,2),c(2,5)+0.5,c("Week 1","Week 3") ,srt=90)
abline(h=3.95)
letters = getLetters(pairs)
for(i in 1:6){
for(j in 1:4){
text(j+0.5,i+0.5,letters[7-i,j])
}
}
}
# d is already in correct time order
for(colourID in colourNumbers){
res = matrix(nrow=6,ncol=4)
colnames(res) = individuals
rowTracker = 1
for(week in unique(d$week)){
pairs = unique(paste(d[d$week==week,]$part1,d[d$week==week,]$part2, d[d$week==week,]$session))
dx = d[d$week==week & d$trial_value==colourID,]
dx$pair = paste(dx$part1,dx$part2,dx$session)
for(session in 1:3){
dxx = dx[dx$session==session,]
if(useOnlyDirector){
dxx = dxx[dxx$director==dxx$speaker,]
}
firstSigns = tapply(dxx$sign_value,dxx$speakerName,head,n=1)
res[rowTracker,] = firstSigns[individuals]
rowTracker = rowTracker + 1
}
}
pairs = d[,c("part1","part2","week","session")]
pairs = pairs[!duplicated(apply(pairs,1,paste,collapse='')),]
filename = paste("../results/descriptive/selectionPlots/SelectionPlot_Mono_", colourNames[colourID],".pdf",sep='')
pdf(filename, width=6,height=6)
makeTamatizPlot(res,pairs)
dev.off()
}
##############
makePropSquare = function(prop,x,y,vcolours){
vcolours = vcolours[prop!=0]
prop = prop[prop!=0]
if(length(prop)>0){
prop = prop[order(vcolours)]
vcolours = sort(vcolours)
#vcolours = vcolours[order(prop)]
prop2 = prop/sum(prop)
prop2 = prop2*0.9
x2 = cumsum(prop2)
x1 = c(0,x2[1:(length(x2)-1)])
x2 = x2 + x
x1 = x1 + x
y1 = y + 0
y2 = y + 0.5
rect(x1,y1,x2,y2,col=vcolours)
}
rect(x-0.05,y-0.5,x+0.95,y+0.5)
}
nv = unique(d[d$director==d$speaker,]$sign_value)
#vcols = rainbow(length(nv))
vcols = rep('white',length(nv))
set.seed(127)
vcols = sample(vcols)
names(vcols)= nv
for(colourID in colourNumbers){
filename = paste("../results/descriptive/selectionPlots/SelectionPlot_Proportions_Mono_", colourNames[colourID],".pdf",sep='')
pdf(filename, width=6,height=6)
plot(c(0,7),c(0,5), ylim=c(6.5,0),xlim=c(-1,5), type='n',xlab='',ylab='', xaxt='n',yaxt='n',bty ='n')
title(colourNames[colourID])
pairs = d[,c("part1","part2","week","session")]
pairs = pairs[!duplicated(apply(pairs,1,paste,collapse='')),]
letters = getLetters(pairs)
for(i in 1:6){
for(j in 1:4){
text(j+0.5,i-0.25,letters[7-i,j])
}
}
text((1:4)+0.5,rep(0,4),individuals)
text(rep(0.3,6),(1:6),paste("Round",rep(1:3,2),sep=''))
text(rep(-0.6,2),c(2,5),c("Week 1","Week 3") ,srt=90)
abline(h=3.5)
rowTracker = 1
for(week in unique(d$week)){
pairs = unique(paste(d[d$week==week,]$part1,d[d$week==week,]$part2, d[d$week==week,]$session))
dx = d[d$week==week & d$trial_value==colourID,]
dx$pair = paste(dx$part1,dx$part2,dx$session)
dx$speakerName = factor(dx$speakerName,levels=individuals)
for(session in 1:3){
dxx = dx[dx$session==session,]
if(useOnlyDirector){
dxx = dxx[dxx$director==dxx$speaker,]
}
tx = as.matrix(table(dxx$sign_value,dxx$speakerName))
tx = tx[,individuals]
for(i in 1:4){
if(is.null(dim(tx))){
partVars = tx[i]
} else{
partVars = tx[,i]
}
vcolsx = vcols[rownames(tx)]
if(is.null(rownames(tx))){
vcolsx = vcols[dxx$sign_value[1]]
}
makePropSquare(partVars,i,rowTracker, vcolsx)
}
rowTracker = rowTracker + 1
}
}
dev.off()
}
|
###############################################################################
#
#
#
#
#
#
# Written by Miguel P Xochicale [http://mxochicale.github.io]
#
# If you see any errors or have any questions
# please create an issue at https://github.com/mxochicale/phd-thesis-code-data/issues
#
###############################################################################
# OUTLINE:
# (0) Definifing paths
# (1) Loading libraries and functions
# (2) Reading
# (3) Creating paths
# (4) Selecting Variables in data.table
# (4.1) Selecting Participants
# (5) Adding vectors
# (5.1) Deleting some Magnetomer and quaternion data
# (5.2) zero mean and unit variance
# (5.3) Savitzky-Golay filter
# (6) Selecting Axis after postprocessing
# (7) Creating preprocessed data path
# (8) Writing data.table object to a file
#################
# Start the clock!
start.time <- Sys.time()
################################################################################
# (0) Defining paths for main_path, r_scripts_path, ..., etc.
r_scripts_path <- getwd()
setwd("../../../../")
github_repo_path <- getwd()
setwd("../")
github_path <- getwd()
##VERSION
version <- '04'
feature_path <- '/rqa'
## Outcomes Plot Path
outcomes_plot_path <- paste(github_path,"/phd-thesis/figs/results", feature_path, '/v', version,sep="")
## Data Path
data_path <- paste(github_repo_path,'/data-outputs', feature_path, '/v', version, sep="")
setwd(file.path(data_path))
################################################################################
# (1) Loading Functions and Libraries and Setting up digits
library(data.table) # for manipulating data
library(signal)# for butterworth filter and sgolay
library(ggplot2)
library(RColorBrewer)
library(devtools)
load_all( paste(github_path, '/nonlinearTseries', sep='' ))
source( paste(github_repo_path,'/code/rfunctions/extra_rqa.R',sep='') )
################################################################################
# (2) Reading data
file_ext <- paste('xdata_v', version, '.dt',sep='')
data <- fread( file_ext, header=TRUE)
# axis for horizontal movments
data <- data[,.(
sg0zmuvGyroZ, sg1zmuvGyroZ, sg2zmuvGyroZ
), by=. (Participant,Activity,Sensor,Sample)]
################################################################################
################################################################################
################################################################################
################################################################################
### (4.1) Windowing Data [xdata[,.SD[1:2],by=.(Participant,Activity,Sensor)]]
W<-NULL#rqas for all windows
##########################
##### one window lenght
windowsl <- c(100)
windowsn <- c('w2')
###########################
###### one window lenght
#windowsl <- c(500)
#windowsn <- c('w10')
#
############################
###### four window lenghts
#windowsl <- c(100,250,500,750)
#windowsn <- c('w2', 'w5', 'w10', 'w15')
########################################
#### w2, 2-second window (100 samples) ## 100 to 200
########################################
#### w5, 5-second window (250 samples) # 100 to 350
#######################################
#### w10, 10-second window (500 samples) ## 100 to 600
########################################
#### w15, 15-second window (750 samples) ## 100 to 850
for ( wk in 1:(length(windowsl)) ) {
xdata <- data
windowlengthnumber <- windowsl[wk]
windowksams <- paste('w', windowlengthnumber, sep='')
windowksecs <- windowsn[wk]
message('****************')
message('****************')
message('****************')
message('****************')
message('*** window:', windowksams)
# general variables for window legnth
wstar=100
wend=wstar+windowlengthnumber
windowlength=wend-wstar
windowframe =wstar:wend
wdata <- xdata[,.SD[windowframe],by=.(Participant,Activity,Sensor)];
################################################################################
################################################################################
################################################################################
################################################################################
## (4.2.1) Activities Selection
A<-NULL#rqas for all activities
activities <- c('HN','HF')
#########################################################
for (activity_k in 1:length(activities) ) {
activityk <- activities[activity_k]
message(activityk)
awdata <- wdata
if (activityk == 'HN' ) {
setkey(awdata, Activity)
awdata <- awdata[.(c('HN'))]
} else if (activityk == 'HF' ) {
setkey(awdata, Activity)
awdata <- awdata[.(c('HF'))]
} else if (activityk == 'VN') {
setkey(awdata, Activity)
awdata <- awdata[.(c('VN'))]
} else if (activityk == 'VF') {
setkey(awdata, Activity)
awdata <- awdata[.(c('VF'))]
} else {
message('no valid movement_variable')
}
#message(head(awdata)) ##show head of the activity windowed data table
################################################################################
################################################################################
################################################################################
################################################################################
## (4.2.3) Participants Selection
P<-NULL#rqas for all participants
#number_of_participants <- 1
number_of_participants <- 3
#number_of_participants <- 12
#number_of_participants <- 20
if (number_of_participants == 1) {
setkey(awdata, Participant)
pNN <- c('p01')
pawdata <- awdata[.(
pNN
)]
} else if (number_of_participants == 3) {
setkey(awdata, Participant)
pNN <- c('p01', 'p02', 'p03')
pawdata <- awdata[.(
pNN
)]
} else if (number_of_participants == 12) {
setkey(awdata, Participant)
pNN <- c('p01', 'p02', 'p03', 'p04', 'p05', 'p06', 'p07', 'p08', 'p09', 'p10','p11', 'p12')
pawdata <- awdata[.(
pNN
)]
} else if (number_of_participants == 20) {
setkey(awdata, Participant)
pNN <- c( 'p01', 'p02', 'p03', 'p04', 'p05', 'p06', 'p07', 'p08', 'p09', 'p10',
'p11', 'p12', 'p13', 'p14', 'p15', 'p16', 'p17', 'p18', 'p19', 'p20')
pawdata <- awdata[.(
pNN
)]
} else {
message('not a valid number_of_participants')
}
for(participants_k in c(1:number_of_participants)){##for(participants_k in c(1:number_of_participants)) {
participantk <- pNN[participants_k]
message('####################')
message('# PARTICIPANT: ', participantk )
setkey(pawdata, Participant)
kpawdata <- pawdata[.( participantk )]
##message(head(kpawdata)) ##show head of participant_k, activity, windowed, data table
#################################################################################
#################################################################################
################################################################################
#################################################################################
################################
#### (4.2.2) Sensor Selection
S<-NULL#rqas for all sensors
#sensors <- c('HS01') # HumanSensor01
sensors <- c('RS01','HS01')# RobotSensor01 and HumanSensor01
#########################################################
for (sensor_k in 1:length(sensors) ) {
sensork <- sensors[sensor_k]
message(sensork)
skpawdata <- kpawdata
if (sensork == 'RS01' ) {
setkey(skpawdata, Sensor)
kskpawdata <- skpawdata[.(c('RS01'))]
} else if (sensork == 'HS01' ) {
setkey(skpawdata, Sensor)
kskpawdata <- skpawdata[.(c('HS01'))]
} else {
message('no valid movement_variable')
}
##message(head(kskpawdata)) ##show head of sensok, particantk, activity, windowed datatable
#################################################################################
#################################################################################
#################################################################################
#################################################################################
### (4.2.4) Axis Selection
a<-NULL# rqas for one axis
axis <- names(kskpawdata)[5: ( length(kskpawdata)) ]
####### Axisk
for (axis_k in c(1:length(axis) )){ #for (axis_k in c(1:length(axis))){
axisk<- axis[axis_k]
message('#### axis:' , axisk )
######################## inputtimeseries
xn <- kskpawdata[, get(axisk) ]
################################################################################
################################################################################
# UNIFORM TIME DELAY EMBEDDING
################################################################################
################################################################################
dimensions <- c(6)
delays <- c(8)
#dimensions <- c(5,6,7)
#delays <- c(5,10,15)
################################################################################
for (dim_i in (1:100000)[dimensions]){
for (tau_j in (1:100000)[delays]){
message('>> Embedding parameters: m=',dim_i,' tau=',d=tau_j)
################################################################################
# (3) Outcomes Plots Path
if (file.exists(outcomes_plot_path)){
setwd(file.path(outcomes_plot_path))
} else {
dir.create(outcomes_plot_path, recursive=TRUE)
setwd(file.path(outcomes_plot_path))
}
xfile <- paste(windowksams, activityk, participantk,sensork, axisk ,sep='')
filename_ext <- paste(xfile,
"_m", formatC(dim_i,digits=2,flag="0"),"t",formatC(tau_j,digits=2,flag="0"), ".png",sep="")
message(filename_ext)
epsilon <- 1
## some of the RP for HN for AccY are white and the rest looks consistendly fine!
#epsilon <- 1.5
#
# some are fine some are bad, detailed visualisation is needed!
rqaa=rqa(time.series = xn, embedding.dim= dim_i, time.lag=tau_j,
radius=epsilon,lmin=2,vmin=2,do.plot=FALSE,distanceToBorder=2)
#####################################################
#$recurrence.matrix (matrix of N*(m-1)T x N(m-1)T )
#$diagonalHistogram (vector of N*(m-1)T length )
#$recurrenceRate (vector of N*(m-1)T length )
#####################
#$REC (single value)
#$RATIO (single value)
#$DET (single value)
#$DIV (single value)
#$Lmax (single value)
#$Lmean (single value)
#$LmeanWithoutMain (single value)
#$ENTR (single value)
#$LAM (single value)
#$Vmax (single value)
#$Vmean (single value)
rqas <- as.data.table( t( c(
rqaa$REC, rqaa$RATIO, rqaa$DET, rqaa$DIV,
rqaa$Lmax, rqaa$Lmean, rqaa$LmeanWithoutMain,
rqaa$ENTR, rqaa$LAM,
rqaa$Vmax, rqaa$Vmean
)
)
)
fa <- function(x) { axisk }
rqas[,c("Axis"):= fa(), ]
fs <- function(x) { sensork }
rqas[,c("Sensor"):= fs(), ]
fp <- function(x) { participantk }
rqas[,c("Participant"):= fp(), ]
fac <- function(x) { activityk }
rqas[,c("Activity"):= fac(), ]
fw <- function(x) { windowksams }
rqas[,c("Window"):= fw(), ]
a <- rbind(a,rqas) #rqas with axisk
########################
#plottingRecurrencePlots
rm <- as.matrix(rqaa$recurrence.matrix)
maxsamplerp <- dim(rm)[1]
RM <- as.data.table( melt(rm, varnames=c('a','b'),value.name='Recurrence') )
################################################################################
# (5.0) Creating and Changing to PlotPath
plot_path <- paste(outcomes_plot_path, '/rp_plots',sep="")
if (file.exists(plot_path)){
setwd(file.path(plot_path))
} else {
dir.create(plot_path, recursive=TRUE)
setwd(file.path(plot_path))
}
rpo <- plotRecurrencePlot(RM,maxsamplerp)
width = 500
height = 500
saveRP(filename_ext,width,height,rpo)
} # for (dim_i in (1:500)[dimensions]){
} # for (tau_j in (1:500)[delays]){
#################################################################################
################################################################################
################################################################################
# UNIFORM TIME DELAY EMBEDDING
################################################################################
################################################################################
}##end##for (axis_k in c(1:length(axis) )){
#################################################################################
#################################################################################
#################################################################################
#################################################################################
S <- rbind(S,a) # rqa values with axisk, sensork
}##end##for (sensor_k in 1:length(sensors) ) {
#################################################################################
#################################################################################
#################################################################################
#################################################################################
P <- rbind(P,S) # rqa values with axisk, sensork, particantsk
}##end##for (participants_k in c(1:number_of_participants)) {
################################################################################
################################################################################
################################################################################
################################################################################
A <- rbind(A,P) # rqa values with axisk, sensork, particantsk, activityk
}##end## for (activity_k in 1:length(activities) ) {
################################################################################
################################################################################
################################################################################
################################################################################
W <- rbind(W,A) # rqa values with axisk, sensork, particantsk, activityk, windowksams
} ##end## for ( wk in 1:(length(windowsl)) ) {
################################################################################
################################################################################
################################################################################
################################################################################
#################
# Stop the clock!
end.time <- Sys.time()
end.time - start.time
# message('Execution Time: ', end.time - start.time)
################################################################################
setwd(r_scripts_path) ## go back to the r-script source path
| /code/rscripts/rqa/hri/v04/Ca_rp_aH.R | permissive | mxochicale/phd-thesis-code-data | R | false | false | 14,838 | r | ###############################################################################
#
#
#
#
#
#
# Written by Miguel P Xochicale [http://mxochicale.github.io]
#
# If you see any errors or have any questions
# please create an issue at https://github.com/mxochicale/phd-thesis-code-data/issues
#
###############################################################################
# OUTLINE:
# (0) Definifing paths
# (1) Loading libraries and functions
# (2) Reading
# (3) Creating paths
# (4) Selecting Variables in data.table
# (4.1) Selecting Participants
# (5) Adding vectors
# (5.1) Deleting some Magnetomer and quaternion data
# (5.2) zero mean and unit variance
# (5.3) Savitzky-Golay filter
# (6) Selecting Axis after postprocessing
# (7) Creating preprocessed data path
# (8) Writing data.table object to a file
#################
# Start the clock!
start.time <- Sys.time()
################################################################################
# (0) Defining paths for main_path, r_scripts_path, ..., etc.
r_scripts_path <- getwd()
setwd("../../../../")
github_repo_path <- getwd()
setwd("../")
github_path <- getwd()
##VERSION
version <- '04'
feature_path <- '/rqa'
## Outcomes Plot Path
outcomes_plot_path <- paste(github_path,"/phd-thesis/figs/results", feature_path, '/v', version,sep="")
## Data Path
data_path <- paste(github_repo_path,'/data-outputs', feature_path, '/v', version, sep="")
setwd(file.path(data_path))
################################################################################
# (1) Loading Functions and Libraries and Setting up digits
library(data.table) # for manipulating data
library(signal)# for butterworth filter and sgolay
library(ggplot2)
library(RColorBrewer)
library(devtools)
load_all( paste(github_path, '/nonlinearTseries', sep='' ))
source( paste(github_repo_path,'/code/rfunctions/extra_rqa.R',sep='') )
################################################################################
# (2) Reading data
file_ext <- paste('xdata_v', version, '.dt',sep='')
data <- fread( file_ext, header=TRUE)
# axis for horizontal movments
data <- data[,.(
sg0zmuvGyroZ, sg1zmuvGyroZ, sg2zmuvGyroZ
), by=. (Participant,Activity,Sensor,Sample)]
################################################################################
################################################################################
################################################################################
################################################################################
### (4.1) Windowing Data [xdata[,.SD[1:2],by=.(Participant,Activity,Sensor)]]
W<-NULL#rqas for all windows
##########################
##### one window lenght
windowsl <- c(100)
windowsn <- c('w2')
###########################
###### one window lenght
#windowsl <- c(500)
#windowsn <- c('w10')
#
############################
###### four window lenghts
#windowsl <- c(100,250,500,750)
#windowsn <- c('w2', 'w5', 'w10', 'w15')
########################################
#### w2, 2-second window (100 samples) ## 100 to 200
########################################
#### w5, 5-second window (250 samples) # 100 to 350
#######################################
#### w10, 10-second window (500 samples) ## 100 to 600
########################################
#### w15, 15-second window (750 samples) ## 100 to 850
for ( wk in 1:(length(windowsl)) ) {
xdata <- data
windowlengthnumber <- windowsl[wk]
windowksams <- paste('w', windowlengthnumber, sep='')
windowksecs <- windowsn[wk]
message('****************')
message('****************')
message('****************')
message('****************')
message('*** window:', windowksams)
# general variables for window legnth
wstar=100
wend=wstar+windowlengthnumber
windowlength=wend-wstar
windowframe =wstar:wend
wdata <- xdata[,.SD[windowframe],by=.(Participant,Activity,Sensor)];
################################################################################
################################################################################
################################################################################
################################################################################
## (4.2.1) Activities Selection
A<-NULL#rqas for all activities
activities <- c('HN','HF')
#########################################################
for (activity_k in 1:length(activities) ) {
activityk <- activities[activity_k]
message(activityk)
awdata <- wdata
if (activityk == 'HN' ) {
setkey(awdata, Activity)
awdata <- awdata[.(c('HN'))]
} else if (activityk == 'HF' ) {
setkey(awdata, Activity)
awdata <- awdata[.(c('HF'))]
} else if (activityk == 'VN') {
setkey(awdata, Activity)
awdata <- awdata[.(c('VN'))]
} else if (activityk == 'VF') {
setkey(awdata, Activity)
awdata <- awdata[.(c('VF'))]
} else {
message('no valid movement_variable')
}
#message(head(awdata)) ##show head of the activity windowed data table
################################################################################
################################################################################
################################################################################
################################################################################
## (4.2.3) Participants Selection
P<-NULL#rqas for all participants
#number_of_participants <- 1
number_of_participants <- 3
#number_of_participants <- 12
#number_of_participants <- 20
if (number_of_participants == 1) {
setkey(awdata, Participant)
pNN <- c('p01')
pawdata <- awdata[.(
pNN
)]
} else if (number_of_participants == 3) {
setkey(awdata, Participant)
pNN <- c('p01', 'p02', 'p03')
pawdata <- awdata[.(
pNN
)]
} else if (number_of_participants == 12) {
setkey(awdata, Participant)
pNN <- c('p01', 'p02', 'p03', 'p04', 'p05', 'p06', 'p07', 'p08', 'p09', 'p10','p11', 'p12')
pawdata <- awdata[.(
pNN
)]
} else if (number_of_participants == 20) {
setkey(awdata, Participant)
pNN <- c( 'p01', 'p02', 'p03', 'p04', 'p05', 'p06', 'p07', 'p08', 'p09', 'p10',
'p11', 'p12', 'p13', 'p14', 'p15', 'p16', 'p17', 'p18', 'p19', 'p20')
pawdata <- awdata[.(
pNN
)]
} else {
message('not a valid number_of_participants')
}
for(participants_k in c(1:number_of_participants)){##for(participants_k in c(1:number_of_participants)) {
participantk <- pNN[participants_k]
message('####################')
message('# PARTICIPANT: ', participantk )
setkey(pawdata, Participant)
kpawdata <- pawdata[.( participantk )]
##message(head(kpawdata)) ##show head of participant_k, activity, windowed, data table
#################################################################################
#################################################################################
################################################################################
#################################################################################
################################
#### (4.2.2) Sensor Selection
S<-NULL#rqas for all sensors
#sensors <- c('HS01') # HumanSensor01
sensors <- c('RS01','HS01')# RobotSensor01 and HumanSensor01
#########################################################
for (sensor_k in 1:length(sensors) ) {
sensork <- sensors[sensor_k]
message(sensork)
skpawdata <- kpawdata
if (sensork == 'RS01' ) {
setkey(skpawdata, Sensor)
kskpawdata <- skpawdata[.(c('RS01'))]
} else if (sensork == 'HS01' ) {
setkey(skpawdata, Sensor)
kskpawdata <- skpawdata[.(c('HS01'))]
} else {
message('no valid movement_variable')
}
##message(head(kskpawdata)) ##show head of sensok, particantk, activity, windowed datatable
#################################################################################
#################################################################################
#################################################################################
#################################################################################
### (4.2.4) Axis Selection
a<-NULL# rqas for one axis
axis <- names(kskpawdata)[5: ( length(kskpawdata)) ]
####### Axisk
for (axis_k in c(1:length(axis) )){ #for (axis_k in c(1:length(axis))){
axisk<- axis[axis_k]
message('#### axis:' , axisk )
######################## inputtimeseries
xn <- kskpawdata[, get(axisk) ]
################################################################################
################################################################################
# UNIFORM TIME DELAY EMBEDDING
################################################################################
################################################################################
dimensions <- c(6)
delays <- c(8)
#dimensions <- c(5,6,7)
#delays <- c(5,10,15)
################################################################################
for (dim_i in (1:100000)[dimensions]){
for (tau_j in (1:100000)[delays]){
message('>> Embedding parameters: m=',dim_i,' tau=',d=tau_j)
################################################################################
# (3) Outcomes Plots Path
if (file.exists(outcomes_plot_path)){
setwd(file.path(outcomes_plot_path))
} else {
dir.create(outcomes_plot_path, recursive=TRUE)
setwd(file.path(outcomes_plot_path))
}
xfile <- paste(windowksams, activityk, participantk,sensork, axisk ,sep='')
filename_ext <- paste(xfile,
"_m", formatC(dim_i,digits=2,flag="0"),"t",formatC(tau_j,digits=2,flag="0"), ".png",sep="")
message(filename_ext)
epsilon <- 1
## some of the RP for HN for AccY are white and the rest looks consistendly fine!
#epsilon <- 1.5
#
# some are fine some are bad, detailed visualisation is needed!
rqaa=rqa(time.series = xn, embedding.dim= dim_i, time.lag=tau_j,
radius=epsilon,lmin=2,vmin=2,do.plot=FALSE,distanceToBorder=2)
#####################################################
#$recurrence.matrix (matrix of N*(m-1)T x N(m-1)T )
#$diagonalHistogram (vector of N*(m-1)T length )
#$recurrenceRate (vector of N*(m-1)T length )
#####################
#$REC (single value)
#$RATIO (single value)
#$DET (single value)
#$DIV (single value)
#$Lmax (single value)
#$Lmean (single value)
#$LmeanWithoutMain (single value)
#$ENTR (single value)
#$LAM (single value)
#$Vmax (single value)
#$Vmean (single value)
rqas <- as.data.table( t( c(
rqaa$REC, rqaa$RATIO, rqaa$DET, rqaa$DIV,
rqaa$Lmax, rqaa$Lmean, rqaa$LmeanWithoutMain,
rqaa$ENTR, rqaa$LAM,
rqaa$Vmax, rqaa$Vmean
)
)
)
fa <- function(x) { axisk }
rqas[,c("Axis"):= fa(), ]
fs <- function(x) { sensork }
rqas[,c("Sensor"):= fs(), ]
fp <- function(x) { participantk }
rqas[,c("Participant"):= fp(), ]
fac <- function(x) { activityk }
rqas[,c("Activity"):= fac(), ]
fw <- function(x) { windowksams }
rqas[,c("Window"):= fw(), ]
a <- rbind(a,rqas) #rqas with axisk
########################
#plottingRecurrencePlots
rm <- as.matrix(rqaa$recurrence.matrix)
maxsamplerp <- dim(rm)[1]
RM <- as.data.table( melt(rm, varnames=c('a','b'),value.name='Recurrence') )
################################################################################
# (5.0) Creating and Changing to PlotPath
plot_path <- paste(outcomes_plot_path, '/rp_plots',sep="")
if (file.exists(plot_path)){
setwd(file.path(plot_path))
} else {
dir.create(plot_path, recursive=TRUE)
setwd(file.path(plot_path))
}
rpo <- plotRecurrencePlot(RM,maxsamplerp)
width = 500
height = 500
saveRP(filename_ext,width,height,rpo)
} # for (dim_i in (1:500)[dimensions]){
} # for (tau_j in (1:500)[delays]){
#################################################################################
################################################################################
################################################################################
# UNIFORM TIME DELAY EMBEDDING
################################################################################
################################################################################
}##end##for (axis_k in c(1:length(axis) )){
#################################################################################
#################################################################################
#################################################################################
#################################################################################
S <- rbind(S,a) # rqa values with axisk, sensork
}##end##for (sensor_k in 1:length(sensors) ) {
#################################################################################
#################################################################################
#################################################################################
#################################################################################
P <- rbind(P,S) # rqa values with axisk, sensork, particantsk
}##end##for (participants_k in c(1:number_of_participants)) {
################################################################################
################################################################################
################################################################################
################################################################################
A <- rbind(A,P) # rqa values with axisk, sensork, particantsk, activityk
}##end## for (activity_k in 1:length(activities) ) {
################################################################################
################################################################################
################################################################################
################################################################################
W <- rbind(W,A) # rqa values with axisk, sensork, particantsk, activityk, windowksams
} ##end## for ( wk in 1:(length(windowsl)) ) {
################################################################################
################################################################################
################################################################################
################################################################################
#################
# Stop the clock!
end.time <- Sys.time()
end.time - start.time
# message('Execution Time: ', end.time - start.time)
################################################################################
setwd(r_scripts_path) ## go back to the r-script source path
|
#' qdap Chaining
#'
#' \code{\%&\%} - Chain \code{\link[qdap]{qdap_df}}s to \pkg{qdap} functions with a
#' \code{text.var} argument. Saves typing of an explicit \code{text.var}
#' argument and supplying a \code{\link[base]{data.frame}}.
#'
#' @param qdap_df.object A \code{\link[base]{data.frame}} of the class
#' \code{"qdap_df"}.
#' @param qdap.fun A \pkg{qdap} function with a \code{text.var} argument.
#' @references Inspired by \pkg{dplyr}'s \code{\link[dplyr]{\%.\%}} and
#' \pkg{magrittr}'s \code{\link[dplyr]{\%>\%}} functionality.
#' @keywords pipe chain chaining
#' @seealso \code{\link[dplyr]{\%>\%}},
#' \code{\link[qdap]{qdap_df}}
#' @export
#' @rdname chain
#' @examples
#' \dontrun{
#' dat <- qdap_df(DATA, state)
#' dat %&% trans_cloud(grouping.var=person)
#' dat %&% trans_cloud(grouping.var=person, text.var=stemmer(DATA$state))
#' dat %&% termco(grouping.var=person, match.list=list("fun", "computer"))
#'
#' ## Various examples with qdap functions (sentSplit gives class "qdap_df")
#' dat <- sentSplit(DATA, "state")
#' dat %&% trans_cloud(grouping.var=person)
#' dat %&% termco(person, match.list=list("fun", "computer"))
#' dat %&% trans_venn(person)
#' dat %&% polarity(person)
#' dat %&% formality(person)
#' dat %&% automated_readability_index(person)
#' dat %&% Dissimilarity(person)
#' dat %&% gradient_cloud(sex)
#' dat %&% dispersion_plot(c("fun", "computer"))
#' dat %&% discourse_map(list(sex, adult))
#' dat %&% gantt_plot(person)
#' dat %&% word_list(adult)
#' dat %&% end_mark_by(person)
#' dat %&% end_mark()
#' dat %&% word_stats(person)
#' dat %&% wfm(person)
#' dat %&% word_cor(person, "i")
#' dat %&% sentCombine(person)
#' dat %&% question_type(person)
#' dat %&% word_network_plot()
#' dat %&% character_count()
#' dat %&% char_table(person)
#' dat %&% phrase_net(2, .1)
#' dat %&% boolean_search("it||!")
#' dat %&% trans_context(person, which(end_mark(DATA.SPLIT[, "state"]) == "?"))
#' dat %&% mgsub(c("it's", "I'm"), c("it is", "I am"))
#'
#' ## combine with magrittr/dplyr chaining
#' dat %&% wfm(person) %>% plot()
#' dat %&% polarity(person) %>% scores()
#' dat %&% polarity(person) %>% counts()
#' dat %&% polarity(person) %>% scores()
#' dat %&% polarity(person) %>% scores() %>% plot()
#' dat %&% polarity(person) %>% scores %>% plot
#'
#' ## Change text column in `qdap_df` (Example 1)
#' dat2 <- sentSplit(DATA, "state", stem.col = TRUE)
#' class(dat2)
#' dat2 %&% trans_cloud()
#' Text(dat2)
#' ## change the `text.var` column
#' Text(dat2) <- "stem.text"
#' dat2 %&% trans_cloud()
#'
#' ## Change text column in `qdap_df` (Example 2)
#' (dat2$fake_dat <- paste(emoticon[1:11,2], dat2$state))
#' Text(dat2) <- "fake_dat"
#' (m <- dat2 %&% sub_holder(emoticon[,2]))
#' m$unhold(strip(m$output))
#' }
`%&%` <- function(qdap_df.object, qdap.fun) {
stopifnot(inherits(qdap_df.object, "qdap_df"))
thecall <- substitute(qdap.fun)
the_fun <- as.list(thecall)[[1]]
if(!"text.var" %in% names(formals(match.fun(the_fun)))) {
stop(sprintf("%s does not have `text.var` as a formal argument",
as.character(the_fun)))
}
if(is.null(thecall$text.var)) {
thecall$text.var <- as.name(attributes(qdap_df.object)[["qdap_df_text.var"]])
}
eval(thecall, qdap_df.object, parent.frame())
}
#' qdap Chaining
#'
#' \code{\%>\%} - The \pkg{magrittr} "then" chain operator imported by
#' \pkg{dplyr}. Imported for convenience. See
#' \url{https://github.com/smbache/magrittr} for details.
#'
#' @param lhs The value to be piped.
#' @param rhs A function or expression.
#' @export
#' @importFrom dplyr tbl_df
#' @rdname chain
`%>%` <- dplyr::`%>%`
| /R/chaining.R | no_license | Maddocent/qdap | R | false | false | 3,661 | r | #' qdap Chaining
#'
#' \code{\%&\%} - Chain \code{\link[qdap]{qdap_df}}s to \pkg{qdap} functions with a
#' \code{text.var} argument. Saves typing of an explicit \code{text.var}
#' argument and supplying a \code{\link[base]{data.frame}}.
#'
#' @param qdap_df.object A \code{\link[base]{data.frame}} of the class
#' \code{"qdap_df"}.
#' @param qdap.fun A \pkg{qdap} function with a \code{text.var} argument.
#' @references Inspired by \pkg{dplyr}'s \code{\link[dplyr]{\%.\%}} and
#' \pkg{magrittr}'s \code{\link[dplyr]{\%>\%}} functionality.
#' @keywords pipe chain chaining
#' @seealso \code{\link[dplyr]{\%>\%}},
#' \code{\link[qdap]{qdap_df}}
#' @export
#' @rdname chain
#' @examples
#' \dontrun{
#' dat <- qdap_df(DATA, state)
#' dat %&% trans_cloud(grouping.var=person)
#' dat %&% trans_cloud(grouping.var=person, text.var=stemmer(DATA$state))
#' dat %&% termco(grouping.var=person, match.list=list("fun", "computer"))
#'
#' ## Various examples with qdap functions (sentSplit gives class "qdap_df")
#' dat <- sentSplit(DATA, "state")
#' dat %&% trans_cloud(grouping.var=person)
#' dat %&% termco(person, match.list=list("fun", "computer"))
#' dat %&% trans_venn(person)
#' dat %&% polarity(person)
#' dat %&% formality(person)
#' dat %&% automated_readability_index(person)
#' dat %&% Dissimilarity(person)
#' dat %&% gradient_cloud(sex)
#' dat %&% dispersion_plot(c("fun", "computer"))
#' dat %&% discourse_map(list(sex, adult))
#' dat %&% gantt_plot(person)
#' dat %&% word_list(adult)
#' dat %&% end_mark_by(person)
#' dat %&% end_mark()
#' dat %&% word_stats(person)
#' dat %&% wfm(person)
#' dat %&% word_cor(person, "i")
#' dat %&% sentCombine(person)
#' dat %&% question_type(person)
#' dat %&% word_network_plot()
#' dat %&% character_count()
#' dat %&% char_table(person)
#' dat %&% phrase_net(2, .1)
#' dat %&% boolean_search("it||!")
#' dat %&% trans_context(person, which(end_mark(DATA.SPLIT[, "state"]) == "?"))
#' dat %&% mgsub(c("it's", "I'm"), c("it is", "I am"))
#'
#' ## combine with magrittr/dplyr chaining
#' dat %&% wfm(person) %>% plot()
#' dat %&% polarity(person) %>% scores()
#' dat %&% polarity(person) %>% counts()
#' dat %&% polarity(person) %>% scores()
#' dat %&% polarity(person) %>% scores() %>% plot()
#' dat %&% polarity(person) %>% scores %>% plot
#'
#' ## Change text column in `qdap_df` (Example 1)
#' dat2 <- sentSplit(DATA, "state", stem.col = TRUE)
#' class(dat2)
#' dat2 %&% trans_cloud()
#' Text(dat2)
#' ## change the `text.var` column
#' Text(dat2) <- "stem.text"
#' dat2 %&% trans_cloud()
#'
#' ## Change text column in `qdap_df` (Example 2)
#' (dat2$fake_dat <- paste(emoticon[1:11,2], dat2$state))
#' Text(dat2) <- "fake_dat"
#' (m <- dat2 %&% sub_holder(emoticon[,2]))
#' m$unhold(strip(m$output))
#' }
`%&%` <- function(qdap_df.object, qdap.fun) {
stopifnot(inherits(qdap_df.object, "qdap_df"))
thecall <- substitute(qdap.fun)
the_fun <- as.list(thecall)[[1]]
if(!"text.var" %in% names(formals(match.fun(the_fun)))) {
stop(sprintf("%s does not have `text.var` as a formal argument",
as.character(the_fun)))
}
if(is.null(thecall$text.var)) {
thecall$text.var <- as.name(attributes(qdap_df.object)[["qdap_df_text.var"]])
}
eval(thecall, qdap_df.object, parent.frame())
}
#' qdap Chaining
#'
#' \code{\%>\%} - The \pkg{magrittr} "then" chain operator imported by
#' \pkg{dplyr}. Imported for convenience. See
#' \url{https://github.com/smbache/magrittr} for details.
#'
#' @param lhs The value to be piped.
#' @param rhs A function or expression.
#' @export
#' @importFrom dplyr tbl_df
#' @rdname chain
`%>%` <- dplyr::`%>%`
|
context("Test StratifiedPartition")
fakeProjectId <- "project-id"
fakeProject <- list(projectName = "FakeProject",
projectId = fakeProjectId,
fileName = "fake.csv",
created = "faketimestamp")
fakeTarget <- "fake-target"
test_that("Required parameters are present", {
expect_error(CreateStratifiedPartition())
expect_error(CreateStratifiedPartition(validationType = "CV"))
})
test_that("validationType = 'CV' option", {
expect_error(CreateStratifiedPartition(validationType = "CV",
holdoutPct = 20),
"reps must be specified")
ValidCase <- CreateStratifiedPartition(validationType = "CV",
holdoutPct = 20, reps = 5)
expect_equal(length(ValidCase), 4)
expect_equal(ValidCase$cvMethod, "stratified")
expect_equal(ValidCase$validationType, "CV")
expect_equal(ValidCase$holdoutPct, 20)
expect_equal(ValidCase$reps, 5)
})
test_that("validationType = 'TVH' option", {
expect_error(CreateStratifiedPartition(validationType = "TVH",
holdoutPct = 20),
"validationPct must be specified")
ValidCase <- CreateStratifiedPartition(validationType = "TVH",
holdoutPct = 20,
validationPct = 16)
expect_equal(length(ValidCase), 4)
expect_equal(ValidCase$cvMethod, "stratified")
expect_equal(ValidCase$validationType, "TVH")
expect_equal(ValidCase$holdoutPct, 20)
expect_equal(ValidCase$validationPct, 16)
})
test_that("validationType = 'CV' option can be used to SetTarget", {
with_mock("GetProjectStatus" = function(...) { list("stage" = "aim") },
"datarobot::DataRobotPATCH" = function(...) {
list(...) # Resolve params to test that they pass without error
},
"datarobot::WaitForAsyncReturn" = function(...) { "How about not" }, {
stratifiedPartition <- CreateStratifiedPartition(validationType = "CV",
holdoutPct = 20,
reps = 5)
SetTarget(project = fakeProject,
target = fakeTarget,
partition = stratifiedPartition)
})
})
test_that("validationType = 'TVH' option can be used to SetTarget", {
with_mock("GetProjectStatus" = function(...) { list("stage" = "aim") },
"datarobot::DataRobotPATCH" = function(...) {
list(...) # Resolve params to test that they pass without error
},
"datarobot::WaitForAsyncReturn" = function(...) { "How about not" }, {
stratifiedPartition <- CreateStratifiedPartition(validationType = "TVH",
holdoutPct = 20,
validationPct = 16)
SetTarget(project = fakeProject,
target = fakeTarget,
partition = stratifiedPartition)
})
})
test_that("Invalid validationType returns message", {
expect_error(CreateStratifiedPartition(validationType = "XYZ",
holdoutPct = 20,
validationPct = 16))
})
| /data/genthat_extracted_code/datarobot/tests/test-CreateStratifiedPartition.R | no_license | surayaaramli/typeRrh | R | false | false | 3,356 | r | context("Test StratifiedPartition")
fakeProjectId <- "project-id"
fakeProject <- list(projectName = "FakeProject",
projectId = fakeProjectId,
fileName = "fake.csv",
created = "faketimestamp")
fakeTarget <- "fake-target"
test_that("Required parameters are present", {
expect_error(CreateStratifiedPartition())
expect_error(CreateStratifiedPartition(validationType = "CV"))
})
test_that("validationType = 'CV' option", {
expect_error(CreateStratifiedPartition(validationType = "CV",
holdoutPct = 20),
"reps must be specified")
ValidCase <- CreateStratifiedPartition(validationType = "CV",
holdoutPct = 20, reps = 5)
expect_equal(length(ValidCase), 4)
expect_equal(ValidCase$cvMethod, "stratified")
expect_equal(ValidCase$validationType, "CV")
expect_equal(ValidCase$holdoutPct, 20)
expect_equal(ValidCase$reps, 5)
})
test_that("validationType = 'TVH' option", {
expect_error(CreateStratifiedPartition(validationType = "TVH",
holdoutPct = 20),
"validationPct must be specified")
ValidCase <- CreateStratifiedPartition(validationType = "TVH",
holdoutPct = 20,
validationPct = 16)
expect_equal(length(ValidCase), 4)
expect_equal(ValidCase$cvMethod, "stratified")
expect_equal(ValidCase$validationType, "TVH")
expect_equal(ValidCase$holdoutPct, 20)
expect_equal(ValidCase$validationPct, 16)
})
test_that("validationType = 'CV' option can be used to SetTarget", {
with_mock("GetProjectStatus" = function(...) { list("stage" = "aim") },
"datarobot::DataRobotPATCH" = function(...) {
list(...) # Resolve params to test that they pass without error
},
"datarobot::WaitForAsyncReturn" = function(...) { "How about not" }, {
stratifiedPartition <- CreateStratifiedPartition(validationType = "CV",
holdoutPct = 20,
reps = 5)
SetTarget(project = fakeProject,
target = fakeTarget,
partition = stratifiedPartition)
})
})
test_that("validationType = 'TVH' option can be used to SetTarget", {
with_mock("GetProjectStatus" = function(...) { list("stage" = "aim") },
"datarobot::DataRobotPATCH" = function(...) {
list(...) # Resolve params to test that they pass without error
},
"datarobot::WaitForAsyncReturn" = function(...) { "How about not" }, {
stratifiedPartition <- CreateStratifiedPartition(validationType = "TVH",
holdoutPct = 20,
validationPct = 16)
SetTarget(project = fakeProject,
target = fakeTarget,
partition = stratifiedPartition)
})
})
test_that("Invalid validationType returns message", {
expect_error(CreateStratifiedPartition(validationType = "XYZ",
holdoutPct = 20,
validationPct = 16))
})
|
library(testthat)
library(repart)
test_check("repart")
| /tests/testthat.R | permissive | wes-brooks/repart | R | false | false | 56 | r | library(testthat)
library(repart)
test_check("repart")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocess.R
\name{chunk_users}
\alias{chunk_users}
\alias{chunk_users_data}
\title{Chunk users}
\usage{
chunk_users(x, n = 50)
chunk_users_data(x, n = 50)
}
\arguments{
\item{x}{Input vector of users. Duplicates and missing values will be removed}
\item{n}{Number of users per chunk (users returned in each element of output)}
}
\value{
chunk_users: returns a list containing character vectors
chunk_users_data: returns a list containing data frames
}
\description{
Convert an atomic vector of users into a list of atomic vectors
}
\examples{
## this generates a vector of user-ID like values
users <- replicate(1000, paste(sample(0:9, 14, replace = TRUE), collapse = ""))
## break users into 100-user chunks
chunky <- chunk_users(users, n = 100)
## preview returned object
str(chunky, 1)
}
| /man/chunk_users.Rd | permissive | schoulten/tweetbotornot2 | R | false | true | 876 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocess.R
\name{chunk_users}
\alias{chunk_users}
\alias{chunk_users_data}
\title{Chunk users}
\usage{
chunk_users(x, n = 50)
chunk_users_data(x, n = 50)
}
\arguments{
\item{x}{Input vector of users. Duplicates and missing values will be removed}
\item{n}{Number of users per chunk (users returned in each element of output)}
}
\value{
chunk_users: returns a list containing character vectors
chunk_users_data: returns a list containing data frames
}
\description{
Convert an atomic vector of users into a list of atomic vectors
}
\examples{
## this generates a vector of user-ID like values
users <- replicate(1000, paste(sample(0:9, 14, replace = TRUE), collapse = ""))
## break users into 100-user chunks
chunky <- chunk_users(users, n = 100)
## preview returned object
str(chunky, 1)
}
|
library(readr)
ext_tracks_file <- paste0("http://rammb.cira.colostate.edu/research/",
"tropical_cyclones/tc_extended_best_track_dataset/",
"data/ebtrk_atlc_1988_2015.txt")
# Create a vector of the width of each column
ext_tracks_widths <- c(7, 10, 2, 2, 3, 5, 5, 6, 4, 5, 4, 4, 5, 3, 4, 3, 3, 3,
4, 3, 3, 3, 4, 3, 3, 3, 2, 6, 1)
# Create a vector of column names, based on the online documentation for this data
ext_tracks_colnames <- c("storm_id", "storm_name", "month", "day",
"hour", "year", "latitude", "longitude",
"max_wind", "min_pressure", "rad_max_wind",
"eye_diameter", "pressure_1", "pressure_2",
paste("radius_34", c("ne", "se", "sw", "nw"), sep = "_"),
paste("radius_50", c("ne", "se", "sw", "nw"), sep = "_"),
paste("radius_64", c("ne", "se", "sw", "nw"), sep = "_"),
"storm_type", "distance_to_land", "final")
# Read the file in from its url
ext_tracks <- read_fwf(ext_tracks_file,
fwf_widths(ext_tracks_widths, ext_tracks_colnames),
na = "-99")
ext_tracks[1:3, 1:9]
library(dplyr)
ext_tracks %>%
filter(storm_name == "KATRINA") %>%
select(month, day, hour, max_wind, min_pressure, rad_max_wind) %>%
sample_n(4)
# data on Zika cases
zika_file <- paste0("https://raw.githubusercontent.com/cdcepi/zika/master/",
"Brazil/COES_Microcephaly/data/COES_Microcephaly-2016-06-25.csv")
zika_brazil <- read_csv(zika_file)
zika_brazil %>%
select(location, value, unit)
| /webbased_data.R | no_license | deepak18/r-common-packages | R | false | false | 1,707 | r | library(readr)
ext_tracks_file <- paste0("http://rammb.cira.colostate.edu/research/",
"tropical_cyclones/tc_extended_best_track_dataset/",
"data/ebtrk_atlc_1988_2015.txt")
# Create a vector of the width of each column
ext_tracks_widths <- c(7, 10, 2, 2, 3, 5, 5, 6, 4, 5, 4, 4, 5, 3, 4, 3, 3, 3,
4, 3, 3, 3, 4, 3, 3, 3, 2, 6, 1)
# Create a vector of column names, based on the online documentation for this data
ext_tracks_colnames <- c("storm_id", "storm_name", "month", "day",
"hour", "year", "latitude", "longitude",
"max_wind", "min_pressure", "rad_max_wind",
"eye_diameter", "pressure_1", "pressure_2",
paste("radius_34", c("ne", "se", "sw", "nw"), sep = "_"),
paste("radius_50", c("ne", "se", "sw", "nw"), sep = "_"),
paste("radius_64", c("ne", "se", "sw", "nw"), sep = "_"),
"storm_type", "distance_to_land", "final")
# Read the file in from its url
ext_tracks <- read_fwf(ext_tracks_file,
fwf_widths(ext_tracks_widths, ext_tracks_colnames),
na = "-99")
ext_tracks[1:3, 1:9]
library(dplyr)
ext_tracks %>%
filter(storm_name == "KATRINA") %>%
select(month, day, hour, max_wind, min_pressure, rad_max_wind) %>%
sample_n(4)
# data on Zika cases
zika_file <- paste0("https://raw.githubusercontent.com/cdcepi/zika/master/",
"Brazil/COES_Microcephaly/data/COES_Microcephaly-2016-06-25.csv")
zika_brazil <- read_csv(zika_file)
zika_brazil %>%
select(location, value, unit)
|
recode_data_main <-
function (data, dimens, alpha) {
recoded_data <- array (0, c(dim(data)[1], dim(data)[2]))
for (j in 1:(dim(data)[2]-1)) {
v <- as.factor(data[,j])
if (length(levels(v)) > dimens[j]) {
stop(paste("The dimens vector does not agree with the data. For example, dimens[",j,"] must be increased to at least ", length(levels(v)), ".", sep = ""))
}
recoded_data[,j] <- as.double(v) - 1
}
v <- as.factor(data[,dim(data)[2]])
if (length(levels(v)) != 2)
stop ("Response must be binary")
recoded_data[,dim(data)[2]] <- as.double(v) - 1
recoded_data <- as.data.frame(recoded_data)
colnames(recoded_data) <- colnames(data)
recoded_dimens <- dimens
response <- recoded_data[,dim(data)[2]]
for (i in 1:(dim(data)[2]-1)) {
if (dimens[i] == 3) {
candidates <- array (0, c(dim(data)[1],4))
candidates[,1] <- data[,i]
candidates[,2] <- ifelse (recoded_data[,i] == 0, 0, 1)
candidates[,3] <- ifelse (recoded_data[,i] == 1, 0, 1)
candidates[,4] <- ifelse (recoded_data[,i] == 2, 0, 1)
temp <- optimal_coding (as.data.frame(cbind(candidates, response)), dimens = c(3,2,2,2,2), alpha = alpha)
recoded_data[,i] <- temp[[1]]
recoded_dimens[i] <- temp[[2]]
}
}
return(list(recoded_data = recoded_data, recoded_dimens = recoded_dimens))
}
| /genMOSS/R/recode_data_main.R | no_license | ingted/R-Examples | R | false | false | 1,355 | r | recode_data_main <-
function (data, dimens, alpha) {
recoded_data <- array (0, c(dim(data)[1], dim(data)[2]))
for (j in 1:(dim(data)[2]-1)) {
v <- as.factor(data[,j])
if (length(levels(v)) > dimens[j]) {
stop(paste("The dimens vector does not agree with the data. For example, dimens[",j,"] must be increased to at least ", length(levels(v)), ".", sep = ""))
}
recoded_data[,j] <- as.double(v) - 1
}
v <- as.factor(data[,dim(data)[2]])
if (length(levels(v)) != 2)
stop ("Response must be binary")
recoded_data[,dim(data)[2]] <- as.double(v) - 1
recoded_data <- as.data.frame(recoded_data)
colnames(recoded_data) <- colnames(data)
recoded_dimens <- dimens
response <- recoded_data[,dim(data)[2]]
for (i in 1:(dim(data)[2]-1)) {
if (dimens[i] == 3) {
candidates <- array (0, c(dim(data)[1],4))
candidates[,1] <- data[,i]
candidates[,2] <- ifelse (recoded_data[,i] == 0, 0, 1)
candidates[,3] <- ifelse (recoded_data[,i] == 1, 0, 1)
candidates[,4] <- ifelse (recoded_data[,i] == 2, 0, 1)
temp <- optimal_coding (as.data.frame(cbind(candidates, response)), dimens = c(3,2,2,2,2), alpha = alpha)
recoded_data[,i] <- temp[[1]]
recoded_dimens[i] <- temp[[2]]
}
}
return(list(recoded_data = recoded_data, recoded_dimens = recoded_dimens))
}
|
../../../../System/Library/Frameworks/CoreServices.framework/Frameworks/CarbonCore.framework/Headers/OSUtils.r | /MacOSX10.4u.sdk/Developer/Headers/CFMCarbon/CarbonCore/OSUtils.r | no_license | alexey-lysiuk/macos-sdk | R | false | false | 110 | r | ../../../../System/Library/Frameworks/CoreServices.framework/Frameworks/CarbonCore.framework/Headers/OSUtils.r |
\name{getDescendants}
\alias{getDescendants}
\title{Get descendant node numbers}
\usage{
getDescendants(tree, node, curr=NULL)
}
\arguments{
\item{tree}{a phylogenetic tree as an object of class \code{"phylo"}.}
\item{node}{an integer specifying a node number in the tree.}
\item{curr}{the set of previously stored node numbers - used in recursive function calls.}
}
\description{
This function returns the set of node & tip numbers descended from \code{node}.
}
\value{
The set of node and tip numbers for the nodes and tips descended from \code{node} in a vector.
}
\references{
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\seealso{
\code{\link{paintSubTree}}
}
\keyword{phylogenetics}
\keyword{utilities}
| /man/getDescendants.Rd | no_license | balthasarbickel/phytools | R | false | false | 870 | rd | \name{getDescendants}
\alias{getDescendants}
\title{Get descendant node numbers}
\usage{
getDescendants(tree, node, curr=NULL)
}
\arguments{
\item{tree}{a phylogenetic tree as an object of class \code{"phylo"}.}
\item{node}{an integer specifying a node number in the tree.}
\item{curr}{the set of previously stored node numbers - used in recursive function calls.}
}
\description{
This function returns the set of node & tip numbers descended from \code{node}.
}
\value{
The set of node and tip numbers for the nodes and tips descended from \code{node} in a vector.
}
\references{
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\seealso{
\code{\link{paintSubTree}}
}
\keyword{phylogenetics}
\keyword{utilities}
|
# the key challenge is to compare across each evaluation criteria.
# For this, show the distribution of different metrics
# show that for the same dataset, the score share similar distribution, ie, needs to be comparable
library(ggplot2)
library(ggpubr)
library(ggthemes)
draw_plot <- function( result ){
sampleDF <- result$sampleDF
featureDF <- result$featureDF
sampleCorrDF <- result$sampleCorrDF
featureCorrDF <- result$featureCorrDF
plot_list <- list()
th <- theme(text=element_text(size=12 ),
axis.text.x = element_text(angle = 45, hjust = 1),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(colour = "black", size=0.2, fill=NA) )
p <- ggplot( sampleDF , aes(x = Libsize , group = dataset, fill=dataset , color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("library size") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle( "libsize") + th
plot_list$libsize <- p
p <- ggplot( sampleDF , aes(x = Libsize , y = Fraczero , color = dataset )) +
geom_point(size = 0.5, alpha = 0.5 ) +
xlab("library size") + ylab("fraction zero per gene") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("libsize_fraczero")+ th
plot_list$libsize_fraczero <- p
p <- ggplot( sampleDF , aes(x = TMM , group = dataset, fill=dataset, color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("TMM") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("TMM") + th
plot_list$tmm <- p
p <- ggplot( sampleDF , aes(x = EffLibsize, group = dataset, fill=dataset, color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("effective library size") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("effective library size") + th
plot_list$effectivelibsize <- p
p <- ggplot( featureDF , aes(x = average_log2_cpm , y = variance_log2_cpm , color = dataset, fill=dataset )) +
geom_point(size = 0.5, alpha = 0.1) +
xlab(" mean expression ") + ylab(" variance of gene expression ") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle( "mean_variance" ) + th
plot_list$mean_variance <- p
p <- ggplot(featureDF, aes(x = variance_log2_cpm , group = dataset, fill=dataset , color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("variance log2 cpm") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("variance") + th
plot_list$variance <- p
p <- ggplot(featureDF, aes(x = variance_scaled_log2_cpm , group = dataset, fill=dataset , color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("variance scaled log2 cpm") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("scaled variance") + th
plot_list$variance_scaled <- p
p <- ggplot( sampleCorrDF , aes(x = Correlation, group = dataset, fill=dataset , color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("sample correlation") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("samplecor") + th
plot_list$samplecor <- p
p <- ggplot(featureCorrDF , aes(x = Correlation, group = dataset, fill=dataset, color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("feature correlation") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("featurecor") + th
plot_list$featurecor <- p
p <- ggplot( featureDF , aes(x = average_log2_cpm , y = Fraczero , color = dataset)) +
geom_point(size = 0.5, alpha = 0.1) +
xlab("mean expression") + ylab("fraction zero per gene") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("mean_fraczero") + th
plot_list$mean_fraczero <- p
p <- ggplot(featureDF, aes(x = Fraczero, group = dataset, fill=dataset, color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("Fraction zeros per gene") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("fraczerogene") + th
plot_list$fraczerogene <- p
p <- ggplot(sampleDF, aes(x = Fraczero, group = dataset, fill=dataset , color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("Fraction zeros per cell") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("fraczerocell") + th
plot_list$fraczerocell <- p
p <- ggplot(featureDF, aes(x = average_log2_cpm , group = dataset, fill=dataset, color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("average log2 cpm") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_color_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("mean") + th
plot_list$mean <- p
return( plot_list )
}
| /plotting.R | no_license | ycao6928/benchmark_scRNAseq_simulation | R | false | false | 6,637 | r | # the key challenge is to compare across each evaluation criteria.
# For this, show the distribution of different metrics
# show that for the same dataset, the score share similar distribution, ie, needs to be comparable
library(ggplot2)
library(ggpubr)
library(ggthemes)
draw_plot <- function( result ){
sampleDF <- result$sampleDF
featureDF <- result$featureDF
sampleCorrDF <- result$sampleCorrDF
featureCorrDF <- result$featureCorrDF
plot_list <- list()
th <- theme(text=element_text(size=12 ),
axis.text.x = element_text(angle = 45, hjust = 1),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_rect(colour = "black", size=0.2, fill=NA) )
p <- ggplot( sampleDF , aes(x = Libsize , group = dataset, fill=dataset , color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("library size") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle( "libsize") + th
plot_list$libsize <- p
p <- ggplot( sampleDF , aes(x = Libsize , y = Fraczero , color = dataset )) +
geom_point(size = 0.5, alpha = 0.5 ) +
xlab("library size") + ylab("fraction zero per gene") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("libsize_fraczero")+ th
plot_list$libsize_fraczero <- p
p <- ggplot( sampleDF , aes(x = TMM , group = dataset, fill=dataset, color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("TMM") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("TMM") + th
plot_list$tmm <- p
p <- ggplot( sampleDF , aes(x = EffLibsize, group = dataset, fill=dataset, color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("effective library size") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("effective library size") + th
plot_list$effectivelibsize <- p
p <- ggplot( featureDF , aes(x = average_log2_cpm , y = variance_log2_cpm , color = dataset, fill=dataset )) +
geom_point(size = 0.5, alpha = 0.1) +
xlab(" mean expression ") + ylab(" variance of gene expression ") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle( "mean_variance" ) + th
plot_list$mean_variance <- p
p <- ggplot(featureDF, aes(x = variance_log2_cpm , group = dataset, fill=dataset , color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("variance log2 cpm") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("variance") + th
plot_list$variance <- p
p <- ggplot(featureDF, aes(x = variance_scaled_log2_cpm , group = dataset, fill=dataset , color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("variance scaled log2 cpm") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("scaled variance") + th
plot_list$variance_scaled <- p
p <- ggplot( sampleCorrDF , aes(x = Correlation, group = dataset, fill=dataset , color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("sample correlation") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("samplecor") + th
plot_list$samplecor <- p
p <- ggplot(featureCorrDF , aes(x = Correlation, group = dataset, fill=dataset, color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("feature correlation") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("featurecor") + th
plot_list$featurecor <- p
p <- ggplot( featureDF , aes(x = average_log2_cpm , y = Fraczero , color = dataset)) +
geom_point(size = 0.5, alpha = 0.1) +
xlab("mean expression") + ylab("fraction zero per gene") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("mean_fraczero") + th
plot_list$mean_fraczero <- p
p <- ggplot(featureDF, aes(x = Fraczero, group = dataset, fill=dataset, color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("Fraction zeros per gene") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("fraczerogene") + th
plot_list$fraczerogene <- p
p <- ggplot(sampleDF, aes(x = Fraczero, group = dataset, fill=dataset , color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("Fraction zeros per cell") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_colour_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("fraczerocell") + th
plot_list$fraczerocell <- p
p <- ggplot(featureDF, aes(x = average_log2_cpm , group = dataset, fill=dataset, color = dataset )) +
geom_density( alpha = 0.7 ) +
xlab("average log2 cpm") +
scale_fill_manual(values=c( "#184275", "#b3202c" )) +
scale_color_manual(values=c( "#184275", "#b3202c" )) +
ggtitle("mean") + th
plot_list$mean <- p
return( plot_list )
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quality.threshold.uncertain.R
\name{quality.threshold.uncertain}
\alias{quality.threshold.uncertain}
\title{Function for the description of the qualities of the Uncertain Interval.}
\usage{
quality.threshold.uncertain(
ref,
test,
threshold,
threshold.upper,
intersection = NULL,
model = c("kernel", "binormal", "ordinal"),
tests = FALSE,
direction = c("auto", "<", ">")
)
}
\arguments{
\item{ref}{The reference standard. A column in a data frame or a vector
indicating the classification by the reference test. The reference standard
must be coded either as 0 (absence of the condition) or 1 (presence of the
condition)}
\item{test}{The index test or test under evaluation. A column in a dataset or
vector indicating the test results in a continuous scale.}
\item{threshold}{The lower decision threshold of a trichotomization method.}
\item{threshold.upper}{The upper decision threshold of a trichotomization
method. Required.}
\item{intersection}{(default = NULL). When NULL, the intersection is
calculated with \code{get.intersection}, which uses the kernel density
method to obtain the intersection. When another value is assigned to this
parameter, this value is used instead.}
\item{model}{(default = 'kernel'). The model used defines the intersection.
Default the kernel densities are used with adjust = 1, for ordinal models
adjust = 2 is used. For bi-normal models the bi-normal estimate of the
intersection is used. The model defines the intersection, which defines the
output of this function.}
\item{tests}{(default = FALSE). When TRUE the results of chi-square tests and
t-tests are included in the results.}
\item{direction}{Default = "auto". Direction when comparing controls with
cases. When the controls have lower values than the cases
\code{(direction = "<")}. When "auto", mean comparison is used to determine
the direction.}
}
\value{
{ A list of} \describe{ \item{direction}{Shows whether controls (0)
are expected to have higher or lower scores than patients (1).}
\item{intersection}{The value used as estimate of the intersection (that is,
the optimal threshold).}
\item{table}{The confusion table of {UI.class x ref} for the Uncertain
Interval where the scores are expected to be inconclusive. The point of
intersection is used as a dichotomous cut-point within the uncertain interval
(UI). UI.class is the classification of the UI scores divided by the point of
intersection, 0 (UI scores < point of intersection and 1 (UI scores >= point
of intersection. Both the reference standard (ref) and the classification
based on the test scores (UI.class) have categories 0 and 1. Table cell {0,
0} shows the True Negatives (TN), cell {0, 1} shows the False Negatives (FN),
cell {1, 0} shows the False Positives (FP), and cell {1, 1} shows the True
Positives (TP).}
\item{cut}{The values of the thresholds.}
\item{X2}{When tests is TRUE, the table with the outcomes of three Chi-square
tests of the confusion table is shown:}
\itemize{ \item{TN.FP: }{Chi-square test of
the comparison of TN versus FP.}
\item{FN.TP: }{Chi-square test of the
comparison of FN versus TP.}
\item{overall: }{Chi-square test of all four
cells of the table.} }
\item{t.test}{When tests is TRUE, a table is shown with t-test results for the
comparison of the means. Within the Uncertain Interval, the test scores are
compared of individuals without the targeted condition (ref = 0) and
individuals with the targeted condition (ref = 1).}
\item{indices}{A named vector, with the following statistics for the
test-scores within the Uncertain Interval, using the point of intersection
(optimal threshold) as dichotomous cut-point within the uncertain interval.}
\itemize{ \item{Proportion.True: }{Proportion of classified patients with the
targeted condition (TP+FN)/(TN+FP+FN+TP). Equal to the sample prevalence when
all patients are classified.}
\item{UI.CCR: }{Correct Classification Rate or Accuracy (TP+TN)/(TN+FP+FN+TP)}
\item{UI.balance: }{balance between correct and incorrect classified (TP+TN)/(FP+FN)}
\item{UI.Sp: }{Specificity TN/(TN+FN)}
\item{UI.Se: }{Sensitivity TP/(TP+FN)}
\item{UI.NPV: }{Negative Predictive Value TN/(TN+FN)}
\item{UI.PPV: }{Positive Predictive Value TP/(TN+FN)}
\item{UI.SNPV: }{Standardized Negative Predictive Value}
\item{UI.SPPV: }{Standardized Positive Predictive Value}
\item{LR-: }{Negative Likelihood Ratio P(-|D+))/(P(-|D-)) The probability of a person with the
condition receiving a negative classification / probability of a person without the
condition receiving a negative classification.}
\item{LR+: }{Positive Likelihood Ratio (P(+|D+))/(P(+|D-)) The probability of a person with the
condition receiving a positive classification / probability of a person without the
condition receiving a positive classification.}
\item{UI.C: }{Concordance or C-Statistic or AUC: The probability that a
random chosen patient with the condition is correctly ranked higher than a
randomly chosen patient without the condition. Equal to AUC, with for the
uncertain interval an expected outcome smaller than .60. (Not equal to a partial AUC.)}
} }
}
\description{
This function can be used only for trichotomization (double
thresholds or cut-points) methods. In the case of the Uncertain Interval
trichotomization method, it provides descriptive statistics for the test
scores within the Uncertain Interval. For the TG-ROC trichotomization
method it provides the descriptive statistics for TG-ROC's Intermediate
Range.
}
\details{
The Uncertain Interval is generally defined as an interval below and
above the intersection, where the densities of the two distributions of
patients with and without the targeted impairment are about equal. The
various functions for the estimation of the uncertain interval use a
sensitivity and specificity below a desired value (default .55).
This function uses the intersection (the optimal dichotomous threshold) to
divide the uncertain interval and provides in this way the indices for the
uncertain interval when the optimal threshold would have been applied.
The patients that have test scores within the Uncertain Interval are prone
to be incorrectly classified on the basis of their test result. The results
within the Uncertain Interval differ only slightly for patients with and
without the targeted condition. Patients with slightly lower or higher test
scores too often have the opposite status. They receive the classification
result 'Uncertain'; it is better to apply additional tests or to await
further developments.
As the test scores have about equal densities, it may be expected that
Chi-square tests are not significant, provided that the count of
individuals within the Uncertain Interval is not too large. Most often, the
t-tests are also not significant, but as the power of the t-test is
considerably larger than the power of the Chi-square test, this is less
often the case. It is recommended to look at the difference of the means of
the two sub-samples and to visually inspect the inter-mixedness of the
densities of the test scores.
When applying the method to the results of a logistic regression, one
should be aware of possible problems concerning the determination of the
intersection. Somewhere in the middle, logistic predictions can have a
range where the distributions have similar densities or have multiple
intersections near to each other. Often, this problem can be approached
effectively by using the linear predictions instead of the logistic
predictions. The linear predictions offer often a far more clear point of
intersection. The solution can then be applied to the prediction values
using the inverse logit of the intersection and the two cut-points. The
logistic predictions and the linear predictions have the same rank
ordering.
NOTE: Other trichotomization methods such as \code{\link{TG.ROC}} have no
defined position for its Intermediate Range. For \code{\link{TG.ROC}} usage
of the point where Sensitivity=Specificity seems a reasonable choice.
}
\examples{
# A simple test model
ref=c(rep(0,500), rep(1,500))
test=c(rnorm(500,0,1), rnorm(500,1,sd=1))
ua = ui.nonpar(ref, test)
quality.threshold.uncertain(ref, test, ua[1], ua[2])
}
\seealso{
\code{\link{UncertainInterval}} for an explanatory glossary of the
different statistics used within this package.
}
| /man/quality.threshold.uncertain.Rd | no_license | HansLandsheer/UncertainInterval | R | false | true | 8,378 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quality.threshold.uncertain.R
\name{quality.threshold.uncertain}
\alias{quality.threshold.uncertain}
\title{Function for the description of the qualities of the Uncertain Interval.}
\usage{
quality.threshold.uncertain(
ref,
test,
threshold,
threshold.upper,
intersection = NULL,
model = c("kernel", "binormal", "ordinal"),
tests = FALSE,
direction = c("auto", "<", ">")
)
}
\arguments{
\item{ref}{The reference standard. A column in a data frame or a vector
indicating the classification by the reference test. The reference standard
must be coded either as 0 (absence of the condition) or 1 (presence of the
condition)}
\item{test}{The index test or test under evaluation. A column in a dataset or
vector indicating the test results in a continuous scale.}
\item{threshold}{The lower decision threshold of a trichotomization method.}
\item{threshold.upper}{The upper decision threshold of a trichotomization
method. Required.}
\item{intersection}{(default = NULL). When NULL, the intersection is
calculated with \code{get.intersection}, which uses the kernel density
method to obtain the intersection. When another value is assigned to this
parameter, this value is used instead.}
\item{model}{(default = 'kernel'). The model used defines the intersection.
Default the kernel densities are used with adjust = 1, for ordinal models
adjust = 2 is used. For bi-normal models the bi-normal estimate of the
intersection is used. The model defines the intersection, which defines the
output of this function.}
\item{tests}{(default = FALSE). When TRUE the results of chi-square tests and
t-tests are included in the results.}
\item{direction}{Default = "auto". Direction when comparing controls with
cases. When the controls have lower values than the cases
\code{(direction = "<")}. When "auto", mean comparison is used to determine
the direction.}
}
\value{
{ A list of} \describe{ \item{direction}{Shows whether controls (0)
are expected to have higher or lower scores than patients (1).}
\item{intersection}{The value used as estimate of the intersection (that is,
the optimal threshold).}
\item{table}{The confusion table of {UI.class x ref} for the Uncertain
Interval where the scores are expected to be inconclusive. The point of
intersection is used as a dichotomous cut-point within the uncertain interval
(UI). UI.class is the classification of the UI scores divided by the point of
intersection, 0 (UI scores < point of intersection and 1 (UI scores >= point
of intersection. Both the reference standard (ref) and the classification
based on the test scores (UI.class) have categories 0 and 1. Table cell {0,
0} shows the True Negatives (TN), cell {0, 1} shows the False Negatives (FN),
cell {1, 0} shows the False Positives (FP), and cell {1, 1} shows the True
Positives (TP).}
\item{cut}{The values of the thresholds.}
\item{X2}{When tests is TRUE, the table with the outcomes of three Chi-square
tests of the confusion table is shown:}
\itemize{ \item{TN.FP: }{Chi-square test of
the comparison of TN versus FP.}
\item{FN.TP: }{Chi-square test of the
comparison of FN versus TP.}
\item{overall: }{Chi-square test of all four
cells of the table.} }
\item{t.test}{When tests is TRUE, a table is shown with t-test results for the
comparison of the means. Within the Uncertain Interval, the test scores are
compared of individuals without the targeted condition (ref = 0) and
individuals with the targeted condition (ref = 1).}
\item{indices}{A named vector, with the following statistics for the
test-scores within the Uncertain Interval, using the point of intersection
(optimal threshold) as dichotomous cut-point within the uncertain interval.}
\itemize{ \item{Proportion.True: }{Proportion of classified patients with the
targeted condition (TP+FN)/(TN+FP+FN+TP). Equal to the sample prevalence when
all patients are classified.}
\item{UI.CCR: }{Correct Classification Rate or Accuracy (TP+TN)/(TN+FP+FN+TP)}
\item{UI.balance: }{balance between correct and incorrect classified (TP+TN)/(FP+FN)}
\item{UI.Sp: }{Specificity TN/(TN+FN)}
\item{UI.Se: }{Sensitivity TP/(TP+FN)}
\item{UI.NPV: }{Negative Predictive Value TN/(TN+FN)}
\item{UI.PPV: }{Positive Predictive Value TP/(TN+FN)}
\item{UI.SNPV: }{Standardized Negative Predictive Value}
\item{UI.SPPV: }{Standardized Positive Predictive Value}
\item{LR-: }{Negative Likelihood Ratio P(-|D+))/(P(-|D-)) The probability of a person with the
condition receiving a negative classification / probability of a person without the
condition receiving a negative classification.}
\item{LR+: }{Positive Likelihood Ratio (P(+|D+))/(P(+|D-)) The probability of a person with the
condition receiving a positive classification / probability of a person without the
condition receiving a positive classification.}
\item{UI.C: }{Concordance or C-Statistic or AUC: The probability that a
random chosen patient with the condition is correctly ranked higher than a
randomly chosen patient without the condition. Equal to AUC, with for the
uncertain interval an expected outcome smaller than .60. (Not equal to a partial AUC.)}
} }
}
\description{
This function can be used only for trichotomization (double
thresholds or cut-points) methods. In the case of the Uncertain Interval
trichotomization method, it provides descriptive statistics for the test
scores within the Uncertain Interval. For the TG-ROC trichotomization
method it provides the descriptive statistics for TG-ROC's Intermediate
Range.
}
\details{
The Uncertain Interval is generally defined as an interval below and
above the intersection, where the densities of the two distributions of
patients with and without the targeted impairment are about equal. The
various functions for the estimation of the uncertain interval use a
sensitivity and specificity below a desired value (default .55).
This function uses the intersection (the optimal dichotomous threshold) to
divide the uncertain interval and provides in this way the indices for the
uncertain interval when the optimal threshold would have been applied.
The patients that have test scores within the Uncertain Interval are prone
to be incorrectly classified on the basis of their test result. The results
within the Uncertain Interval differ only slightly for patients with and
without the targeted condition. Patients with slightly lower or higher test
scores too often have the opposite status. They receive the classification
result 'Uncertain'; it is better to apply additional tests or to await
further developments.
As the test scores have about equal densities, it may be expected that
Chi-square tests are not significant, provided that the count of
individuals within the Uncertain Interval is not too large. Most often, the
t-tests are also not significant, but as the power of the t-test is
considerably larger than the power of the Chi-square test, this is less
often the case. It is recommended to look at the difference of the means of
the two sub-samples and to visually inspect the inter-mixedness of the
densities of the test scores.
When applying the method to the results of a logistic regression, one
should be aware of possible problems concerning the determination of the
intersection. Somewhere in the middle, logistic predictions can have a
range where the distributions have similar densities or have multiple
intersections near to each other. Often, this problem can be approached
effectively by using the linear predictions instead of the logistic
predictions. The linear predictions offer often a far more clear point of
intersection. The solution can then be applied to the prediction values
using the inverse logit of the intersection and the two cut-points. The
logistic predictions and the linear predictions have the same rank
ordering.
NOTE: Other trichotomization methods such as \code{\link{TG.ROC}} have no
defined position for its Intermediate Range. For \code{\link{TG.ROC}} usage
of the point where Sensitivity=Specificity seems a reasonable choice.
}
\examples{
# A simple test model
ref=c(rep(0,500), rep(1,500))
test=c(rnorm(500,0,1), rnorm(500,1,sd=1))
ua = ui.nonpar(ref, test)
quality.threshold.uncertain(ref, test, ua[1], ua[2])
}
\seealso{
\code{\link{UncertainInterval}} for an explanatory glossary of the
different statistics used within this package.
}
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/lung_other.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0,family="gaussian",standardize=TRUE)
sink('./Model/EN/Classifier/lung_other/lung_other_001.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/lung_other/lung_other_001.R | no_license | leon1003/QSMART | R | false | false | 359 | r | library(glmnet)
mydata = read.table("./TrainingSet/RF/lung_other.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0,family="gaussian",standardize=TRUE)
sink('./Model/EN/Classifier/lung_other/lung_other_001.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
context("Quick tests for summary stats (ratio / quantile)")
library(srvyr)
library(survey)
source("utilities.R")
df_test <- 30
data(api)
dstrata <- apistrat %>%
as_survey_design(strata = stype, weights = pw)
out_survey <- svyratio(~api00, ~api99, dstrata)
out_srvyr <- dstrata %>%
summarise(api_ratio = survey_ratio(api00, api99))
test_that("survey_ratio works for ungrouped surveys",
expect_equal(c(out_survey[[1]], sqrt(out_survey$var)),
c(out_srvyr[[1]][[1]], out_srvyr[[2]][[1]])))
out_survey <- svyby(~api00, ~stype, denominator = ~api99, dstrata,
svyratio) %>%
as.data.frame()
out_srvyr <- dstrata %>%
group_by(stype) %>%
summarise(api_ratio = survey_ratio(api00, api99))
test_that("survey_ratio works for ungrouped surveys",
expect_true(all(out_survey == out_srvyr)))
# survey_quantile
out_survey <- svyquantile(~api00, dstrata, c(0.5, 0.75))
out_srvyr <- dstrata %>%
summarise(api00 = survey_quantile(api00, quantiles = c(0.5, 0.75)))
test_that("survey_quantile works for ungrouped surveys - no ci",
expect_equal(c(out_survey[[1]], out_survey[[2]]),
c(out_srvyr[[1]][[1]], out_srvyr[[2]][[1]])))
out_survey <- svyquantile(~api00, dstrata, c(0.5, 0.75), ci = TRUE)
out_srvyr <- dstrata %>%
summarise(api00 = survey_quantile(api00, quantiles = c(0.5, 0.75),
vartype = "ci"))
test_that("survey_quantile works for ungrouped surveys - with ci",
expect_equal(c(out_survey$CIs[[1]], out_survey$CIs[[2]]),
c(out_srvyr[["api00_q50_low"]][[1]],
out_srvyr[["api00_q50_upp"]][[1]])))
suppressWarnings(out_survey <- svyby(~api00, ~stype, dstrata, svyquantile,
quantiles = c(0.5, 0.75), ci = TRUE))
suppressWarnings(out_srvyr <- dstrata %>%
group_by(stype) %>%
summarise(api00 = survey_quantile(api00, quantiles = c(0.5, 0.75),
vartype = "se")))
test_that("survey_quantile works for grouped surveys - with se",
expect_equal(c(out_survey$`0.5`[[1]], out_survey[["se.0.5"]][[1]]),
c(out_srvyr[["api00_q50"]][[1]],
out_srvyr[["api00_q50_se"]][[1]])))
# survey_quantile
out_survey <- svyquantile(~api00, dstrata, c(0.5))
out_srvyr <- dstrata %>%
summarise(api00 = survey_median(api00))
test_that("survey_quantile works for ungrouped surveys - no ci",
expect_equal(c(out_survey[[1]]),
c(out_srvyr[[1]][[1]])))
suppressWarnings(
out_survey <- svyby(~api00, ~stype + awards, dstrata, svyquantile,
quantiles = c(0.5, 0.75), ci = TRUE)
)
suppressWarnings(
out_srvyr <- dstrata %>%
group_by(stype, awards) %>%
summarise(api00 = survey_quantile(api00, quantiles = c(0.5, 0.75),
vartype = "se"))
)
test_that(
"survey_quantile works for grouped surveys - multiple grouping variables",
expect_equal(c(out_survey$`0.5`[[1]], out_survey[["se.0.5"]][[1]]),
c(out_srvyr[["api00_q50"]][[1]], out_srvyr[["api00_q50_se"]][[1]])))
out_srvyr <- dstrata %>%
summarize(ratio = survey_ratio(api00, api99, vartype = "ci", level = 0.9),
mdn = survey_median(api00, vartype = "ci", level = 0.9)) %>%
select(-ratio, -mdn_q50) %>%
unlist()
ratio <- svyratio(~api00, ~api99, dstrata)
ratio <- confint(ratio, level = 0.9, df = degf(dstrata))
mdn <- svyquantile(~api00, dstrata, quantile = 0.5, ci = TRUE, alpha = 0.1)
mdn <- confint(mdn)
out_survey <- c(ratio[1], ratio[2], mdn[1], mdn[2])
names(out_survey) <- c("ratio_low", "ratio_upp", "mdn_q50_low", "mdn_q50_upp")
test_that("median/ratio with CIs respect level parameter (ungrouped)",
expect_df_equal(out_srvyr, out_survey))
suppressWarnings(out_srvyr <- dstrata %>%
group_by(stype) %>%
summarize(ratio = survey_ratio(api00, api99, vartype = "ci", level = 0.9),
mdn = survey_median(api00, vartype = "ci", level = 0.9)) %>%
select(-ratio, -mdn_q50, -stype)
)
ratio <- svyby(~api00, ~stype, denominator = ~api99, dstrata, svyratio)
ratio <- confint(ratio, level = 0.9, df = degf(dstrata))
suppressWarnings(mdn <- svyby(~api00, ~stype, dstrata, svyquantile,
quantile = 0.5, ci = TRUE, alpha = 0.1, vartype = "ci") %>%
data.frame() %>%
select(-api00, -stype))
out_survey <- dplyr::bind_cols(data.frame(ratio), mdn)
names(out_survey) <- c("ratio_low", "ratio_upp", "mdn_q50_low", "mdn_q50_upp")
test_that("median/ratio with CIs respect level parameter (grouped)",
expect_df_equal(out_srvyr, out_survey))
out_survey <- svyratio(~api99, ~api00, dstrata, deff = TRUE)
out_srvyr <- dstrata %>%
summarise(survey_ratio = survey_ratio(api99, api00, deff = TRUE, vartype = "ci", df = df_test))
test_that("deff works for ungrouped survey total",
expect_equal(c(out_survey[[1]], deff(out_survey)[[1]]),
c(out_srvyr[["survey_ratio"]][[1]], out_srvyr[["survey_ratio_deff"]][[1]])))
test_that("df works for ungrouped survey total",
expect_equal(confint(out_survey, df = df_test)[c(1, 2)],
c(out_srvyr[["survey_ratio_low"]][[1]], out_srvyr[["survey_ratio_upp"]][[1]])))
out_srvyr <- dstrata %>%
group_by(stype) %>%
summarise(survey_ratio = survey_ratio(api99, api00, deff = TRUE, vartype = "ci", df = df_test))
temp_survey <- svyby(~api99, ~stype, dstrata, svyratio, deff = TRUE, vartype = c("se", "ci"),
denominator = ~api00)
out_survey <- temp_survey %>%
data.frame() %>%
dplyr::tbl_df() %>%
rename(survey_ratio = api99.api00, survey_ratio_low = ci_l, survey_ratio_upp = ci_u,
survey_ratio_deff = `DEff`) %>%
select(-se.api99.api00)
out_survey[, c("survey_ratio_low", "survey_ratio_upp")] <-
confint(temp_survey, df = df_test)
test_that("deff and df work for grouped survey total",
expect_df_equal(out_srvyr, out_survey))
out_survey <- svyquantile(~api99, dstrata, c(0.5), ci = TRUE, df = df_test)
out_srvyr <- dstrata %>%
summarise(survey = survey_median(api99, vartype = "ci", df = df_test))
test_that("df works for ungrouped survey total",
expect_equal(confint(out_survey)[c(1, 2)],
c(out_srvyr[["survey_q50_low"]][[1]], out_srvyr[["survey_q50_upp"]][[1]])))
out_srvyr <- suppressWarnings(
dstrata %>%
group_by(stype) %>%
summarise(survey = survey_median(api99, vartype = "ci", df = df_test))
)
temp_survey <- suppressWarnings(svyby(~api99, ~stype, dstrata, svyquantile, quantiles = c(0.5), ci = TRUE,
vartype = c("se", "ci"), df = df_test))
out_survey <- temp_survey %>%
data.frame() %>%
dplyr::tbl_df() %>%
rename(survey_q50 = api99, survey_q50_low = ci_l, survey_q50_upp = ci_u) %>%
select(-se)
test_that("df works for grouped survey quantile",
expect_df_equal(out_srvyr, out_survey))
data(scd, package = "survey")
scd <- scd %>%
mutate(rep1 = 2 * c(1, 0, 1, 0, 1, 0),
rep2 = 2 * c(1, 0, 0, 1, 0, 1),
rep3 = 2 * c(0, 1, 1, 0, 0, 1),
rep4 = 2 * c(0, 1, 0, 1, 1, 0))
suppressWarnings(mysvy <- scd %>%
as_survey_rep(type = "BRR", repweights = starts_with("rep"),
combined_weights = FALSE))
results_srvyr <- mysvy %>%
summarize(x = survey_median(arrests, interval_type = "probability"))
results_survey <- svyquantile(~arrests, mysvy, quantiles = 0.5,
interval_type = "probability")
test_that("srvyr allows you to select probability for interval_type of replicate weights",
expect_equal(results_srvyr[[1]], results_survey[[1]]))
results_srvyr <- mysvy %>%
summarize(x = survey_median(arrests))
results_survey <- svyquantile(~arrests, mysvy, quantiles = 0.5)
test_that("srvyr does the right thing by default for quantiles of replicate surveys",
expect_equal(results_srvyr[[1]], results_survey[[1]]))
test_that(
"Can calcualte multiple quantiles on grouped data (#38)",
{
dstrata <- apistrat %>%
as_survey_design(strata = stype, weights = pw)
suppressWarnings(
srvyr <- dstrata %>%
group_by(awards) %>%
summarise(api99 = survey_quantile(api99, c(0.25, 0.5, 0.75)))
)
suppressWarnings(
survey <- svyby(
~api99, ~awards, dstrata, svyquantile, quantiles = c(0.25, 0.5, 0.75), ci = TRUE,
vartype = c("se", "ci")
)
)
expect_equal(srvyr$api99_q25, survey$`0.25`)
expect_equal(srvyr$api99_q25_se, survey$`se.0.25`)
expect_equal(srvyr$api99_q25_low, survey$`ci_l.0.25_api99`)
expect_equal(srvyr$api99_q25_upp, survey$`ci_u.0.25_api99`)
}
)
| /tests/testthat/test_survey_statistics.r | no_license | lionel-/srvyr | R | false | false | 8,771 | r | context("Quick tests for summary stats (ratio / quantile)")
library(srvyr)
library(survey)
source("utilities.R")
df_test <- 30
data(api)
dstrata <- apistrat %>%
as_survey_design(strata = stype, weights = pw)
out_survey <- svyratio(~api00, ~api99, dstrata)
out_srvyr <- dstrata %>%
summarise(api_ratio = survey_ratio(api00, api99))
test_that("survey_ratio works for ungrouped surveys",
expect_equal(c(out_survey[[1]], sqrt(out_survey$var)),
c(out_srvyr[[1]][[1]], out_srvyr[[2]][[1]])))
out_survey <- svyby(~api00, ~stype, denominator = ~api99, dstrata,
svyratio) %>%
as.data.frame()
out_srvyr <- dstrata %>%
group_by(stype) %>%
summarise(api_ratio = survey_ratio(api00, api99))
test_that("survey_ratio works for ungrouped surveys",
expect_true(all(out_survey == out_srvyr)))
# survey_quantile
out_survey <- svyquantile(~api00, dstrata, c(0.5, 0.75))
out_srvyr <- dstrata %>%
summarise(api00 = survey_quantile(api00, quantiles = c(0.5, 0.75)))
test_that("survey_quantile works for ungrouped surveys - no ci",
expect_equal(c(out_survey[[1]], out_survey[[2]]),
c(out_srvyr[[1]][[1]], out_srvyr[[2]][[1]])))
out_survey <- svyquantile(~api00, dstrata, c(0.5, 0.75), ci = TRUE)
out_srvyr <- dstrata %>%
summarise(api00 = survey_quantile(api00, quantiles = c(0.5, 0.75),
vartype = "ci"))
test_that("survey_quantile works for ungrouped surveys - with ci",
expect_equal(c(out_survey$CIs[[1]], out_survey$CIs[[2]]),
c(out_srvyr[["api00_q50_low"]][[1]],
out_srvyr[["api00_q50_upp"]][[1]])))
suppressWarnings(out_survey <- svyby(~api00, ~stype, dstrata, svyquantile,
quantiles = c(0.5, 0.75), ci = TRUE))
suppressWarnings(out_srvyr <- dstrata %>%
group_by(stype) %>%
summarise(api00 = survey_quantile(api00, quantiles = c(0.5, 0.75),
vartype = "se")))
test_that("survey_quantile works for grouped surveys - with se",
expect_equal(c(out_survey$`0.5`[[1]], out_survey[["se.0.5"]][[1]]),
c(out_srvyr[["api00_q50"]][[1]],
out_srvyr[["api00_q50_se"]][[1]])))
# survey_quantile
out_survey <- svyquantile(~api00, dstrata, c(0.5))
out_srvyr <- dstrata %>%
summarise(api00 = survey_median(api00))
test_that("survey_quantile works for ungrouped surveys - no ci",
expect_equal(c(out_survey[[1]]),
c(out_srvyr[[1]][[1]])))
suppressWarnings(
out_survey <- svyby(~api00, ~stype + awards, dstrata, svyquantile,
quantiles = c(0.5, 0.75), ci = TRUE)
)
suppressWarnings(
out_srvyr <- dstrata %>%
group_by(stype, awards) %>%
summarise(api00 = survey_quantile(api00, quantiles = c(0.5, 0.75),
vartype = "se"))
)
test_that(
"survey_quantile works for grouped surveys - multiple grouping variables",
expect_equal(c(out_survey$`0.5`[[1]], out_survey[["se.0.5"]][[1]]),
c(out_srvyr[["api00_q50"]][[1]], out_srvyr[["api00_q50_se"]][[1]])))
out_srvyr <- dstrata %>%
summarize(ratio = survey_ratio(api00, api99, vartype = "ci", level = 0.9),
mdn = survey_median(api00, vartype = "ci", level = 0.9)) %>%
select(-ratio, -mdn_q50) %>%
unlist()
ratio <- svyratio(~api00, ~api99, dstrata)
ratio <- confint(ratio, level = 0.9, df = degf(dstrata))
mdn <- svyquantile(~api00, dstrata, quantile = 0.5, ci = TRUE, alpha = 0.1)
mdn <- confint(mdn)
out_survey <- c(ratio[1], ratio[2], mdn[1], mdn[2])
names(out_survey) <- c("ratio_low", "ratio_upp", "mdn_q50_low", "mdn_q50_upp")
test_that("median/ratio with CIs respect level parameter (ungrouped)",
expect_df_equal(out_srvyr, out_survey))
suppressWarnings(out_srvyr <- dstrata %>%
group_by(stype) %>%
summarize(ratio = survey_ratio(api00, api99, vartype = "ci", level = 0.9),
mdn = survey_median(api00, vartype = "ci", level = 0.9)) %>%
select(-ratio, -mdn_q50, -stype)
)
ratio <- svyby(~api00, ~stype, denominator = ~api99, dstrata, svyratio)
ratio <- confint(ratio, level = 0.9, df = degf(dstrata))
suppressWarnings(mdn <- svyby(~api00, ~stype, dstrata, svyquantile,
quantile = 0.5, ci = TRUE, alpha = 0.1, vartype = "ci") %>%
data.frame() %>%
select(-api00, -stype))
out_survey <- dplyr::bind_cols(data.frame(ratio), mdn)
names(out_survey) <- c("ratio_low", "ratio_upp", "mdn_q50_low", "mdn_q50_upp")
test_that("median/ratio with CIs respect level parameter (grouped)",
expect_df_equal(out_srvyr, out_survey))
out_survey <- svyratio(~api99, ~api00, dstrata, deff = TRUE)
out_srvyr <- dstrata %>%
summarise(survey_ratio = survey_ratio(api99, api00, deff = TRUE, vartype = "ci", df = df_test))
test_that("deff works for ungrouped survey total",
expect_equal(c(out_survey[[1]], deff(out_survey)[[1]]),
c(out_srvyr[["survey_ratio"]][[1]], out_srvyr[["survey_ratio_deff"]][[1]])))
test_that("df works for ungrouped survey total",
expect_equal(confint(out_survey, df = df_test)[c(1, 2)],
c(out_srvyr[["survey_ratio_low"]][[1]], out_srvyr[["survey_ratio_upp"]][[1]])))
out_srvyr <- dstrata %>%
group_by(stype) %>%
summarise(survey_ratio = survey_ratio(api99, api00, deff = TRUE, vartype = "ci", df = df_test))
temp_survey <- svyby(~api99, ~stype, dstrata, svyratio, deff = TRUE, vartype = c("se", "ci"),
denominator = ~api00)
out_survey <- temp_survey %>%
data.frame() %>%
dplyr::tbl_df() %>%
rename(survey_ratio = api99.api00, survey_ratio_low = ci_l, survey_ratio_upp = ci_u,
survey_ratio_deff = `DEff`) %>%
select(-se.api99.api00)
out_survey[, c("survey_ratio_low", "survey_ratio_upp")] <-
confint(temp_survey, df = df_test)
test_that("deff and df work for grouped survey total",
expect_df_equal(out_srvyr, out_survey))
out_survey <- svyquantile(~api99, dstrata, c(0.5), ci = TRUE, df = df_test)
out_srvyr <- dstrata %>%
summarise(survey = survey_median(api99, vartype = "ci", df = df_test))
test_that("df works for ungrouped survey total",
expect_equal(confint(out_survey)[c(1, 2)],
c(out_srvyr[["survey_q50_low"]][[1]], out_srvyr[["survey_q50_upp"]][[1]])))
out_srvyr <- suppressWarnings(
dstrata %>%
group_by(stype) %>%
summarise(survey = survey_median(api99, vartype = "ci", df = df_test))
)
temp_survey <- suppressWarnings(svyby(~api99, ~stype, dstrata, svyquantile, quantiles = c(0.5), ci = TRUE,
vartype = c("se", "ci"), df = df_test))
out_survey <- temp_survey %>%
data.frame() %>%
dplyr::tbl_df() %>%
rename(survey_q50 = api99, survey_q50_low = ci_l, survey_q50_upp = ci_u) %>%
select(-se)
test_that("df works for grouped survey quantile",
expect_df_equal(out_srvyr, out_survey))
data(scd, package = "survey")
scd <- scd %>%
mutate(rep1 = 2 * c(1, 0, 1, 0, 1, 0),
rep2 = 2 * c(1, 0, 0, 1, 0, 1),
rep3 = 2 * c(0, 1, 1, 0, 0, 1),
rep4 = 2 * c(0, 1, 0, 1, 1, 0))
suppressWarnings(mysvy <- scd %>%
as_survey_rep(type = "BRR", repweights = starts_with("rep"),
combined_weights = FALSE))
results_srvyr <- mysvy %>%
summarize(x = survey_median(arrests, interval_type = "probability"))
results_survey <- svyquantile(~arrests, mysvy, quantiles = 0.5,
interval_type = "probability")
test_that("srvyr allows you to select probability for interval_type of replicate weights",
expect_equal(results_srvyr[[1]], results_survey[[1]]))
results_srvyr <- mysvy %>%
summarize(x = survey_median(arrests))
results_survey <- svyquantile(~arrests, mysvy, quantiles = 0.5)
test_that("srvyr does the right thing by default for quantiles of replicate surveys",
expect_equal(results_srvyr[[1]], results_survey[[1]]))
test_that(
"Can calcualte multiple quantiles on grouped data (#38)",
{
dstrata <- apistrat %>%
as_survey_design(strata = stype, weights = pw)
suppressWarnings(
srvyr <- dstrata %>%
group_by(awards) %>%
summarise(api99 = survey_quantile(api99, c(0.25, 0.5, 0.75)))
)
suppressWarnings(
survey <- svyby(
~api99, ~awards, dstrata, svyquantile, quantiles = c(0.25, 0.5, 0.75), ci = TRUE,
vartype = c("se", "ci")
)
)
expect_equal(srvyr$api99_q25, survey$`0.25`)
expect_equal(srvyr$api99_q25_se, survey$`se.0.25`)
expect_equal(srvyr$api99_q25_low, survey$`ci_l.0.25_api99`)
expect_equal(srvyr$api99_q25_upp, survey$`ci_u.0.25_api99`)
}
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.