content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
install.packages("rjags")
install.packages("coda")
install.packages("ggplot2")
|
/install.R
|
no_license
|
julianpistorius/Binder_ResBaz_JAGS_tutorial
|
R
| false
| false
| 79
|
r
|
install.packages("rjags")
install.packages("coda")
install.packages("ggplot2")
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{makeDisplay}
\alias{makeDisplay}
\title{Create a Trelliscope Display}
\usage{
makeDisplay(data, name, group = "common", desc = "", height = 500,
width = 500, panelFn = NULL, lims = list(x = "free", y = "free",
prepanelFn = NULL), cogFn = NULL, state = NULL, preRender = FALSE,
cogConn = dfCogConn(), output = NULL, conn = getOption("vdbConn"),
verbose = TRUE, params = NULL, packages = NULL, control = NULL)
}
\arguments{
\item{data}{data of class "ddo" or "ddf" (see \code{\link{ddo}}, \code{\link{ddf}})}
\item{name}{the name of the display (no spaces or special characters)}
\item{group}{the group the display belongs to (displays are organized into groups). Defaults to "common"}
\item{desc}{a description of the display (used in the viewer and in notebooks)}
\item{height}{reference dimensions (in pixels) for each panel (panels will be resized based on available space in the viewer)}
\item{width}{reference dimensions (in pixels) for each panel (panels will be resized based on available space in the viewer)}
\item{panelFn}{a function that produces a plot and takes one argument, which will be the current split of the data being passed to it. Useful to test with panelFn(divExample(dat)). Must return either an object of class "ggplot", "trellis", or "expression" (of base plot commands)}
\item{lims}{either an object of class "trsLims" as obtained from \code{\link{setLims}} or a list with elements x, y, and prepanelFn, that specify how to apply \code{\link{prepanel}} and \code{\link{setLims}}}
\item{cogFn}{a function that produces a single row of a data frame where each column is a cognostic feature . The function should takes one argument, which will be the current split of the data being passed to it. Useful to test with cogFn(divExample(dat))}
\item{state}{if specified, this tells the viewer the default parameter settings (such as layout, sorting, filtering, etc.) to use when the display is viewed (see \code{\link{validateState}} for details)}
\item{preRender}{should the panels be pre-rendered and stored (\code{TRUE}), or rendered on-the-fly in the viewer (\code{FALSE}, default)? Default is recommended unless rendering is very expensive.}
\item{cogConn}{a connection to store the cognostics data. By default, this is \code{\link{dfCogConn}()}.}
\item{output}{how to store the panels and metadata for the display (unnecessary to specify in most cases -- see details)}
\item{conn}{VDB connection info, typically stored in options("vdbConn") at the beginning of a session, and not necessary to specify here if a valid "vdbConn" object exists}
\item{verbose}{print status messages?}
\item{params}{a named list of parameters external to the input data that are needed in the distributed computing (most should be taken care of automatically such that this is rarely necessary to specify)}
\item{packages}{a vector of R package names that contain functions used in \code{panelFn} or \code{cogFn} (most should be taken care of automatically such that this is rarely necessary to specify)}
\item{control}{parameters specifying how the backend should handle things (most-likely parameters to \code{rhwatch} in RHIPE) - see \code{\link[datadr]{rhipeControl}} and \code{\link[datadr]{localDiskControl}}}
}
\description{
Create a trelliscope display and add it to a visualization database (VDB)
}
\details{
Many of the parameters are optional or have defaults. For several examples, see the documentation on github: \url{http://hafen.github.io/trelliscope}
Panels by default are not pre-rendered. Instead, this function creates a display object and computes and stores the cognostics. Then panels are rendered on the fly. If a user would like to pre-render the images, then by default these will be stored to a local disk connection (see \code{\link{localDiskConn}}) inside the VDB directory, organized in subdirectories by group and name of the display. Optionally, the user can specify the \code{output} parameter to be any valid "kvConnection" object, as long as it is one that persists on disk (e.g. \code{\link{hdfsConn}}).
}
\examples{
# see docs
}
\author{
Ryan Hafen
}
\seealso{
\code{\link{prepanel}}, \code{\link{setLims}}, \code{\link{divide}}
}
|
/Analyze/man/makeDisplay.Rd
|
permissive
|
alacer/renaissance
|
R
| false
| false
| 4,272
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{makeDisplay}
\alias{makeDisplay}
\title{Create a Trelliscope Display}
\usage{
makeDisplay(data, name, group = "common", desc = "", height = 500,
width = 500, panelFn = NULL, lims = list(x = "free", y = "free",
prepanelFn = NULL), cogFn = NULL, state = NULL, preRender = FALSE,
cogConn = dfCogConn(), output = NULL, conn = getOption("vdbConn"),
verbose = TRUE, params = NULL, packages = NULL, control = NULL)
}
\arguments{
\item{data}{data of class "ddo" or "ddf" (see \code{\link{ddo}}, \code{\link{ddf}})}
\item{name}{the name of the display (no spaces or special characters)}
\item{group}{the group the display belongs to (displays are organized into groups). Defaults to "common"}
\item{desc}{a description of the display (used in the viewer and in notebooks)}
\item{height}{reference dimensions (in pixels) for each panel (panels will be resized based on available space in the viewer)}
\item{width}{reference dimensions (in pixels) for each panel (panels will be resized based on available space in the viewer)}
\item{panelFn}{a function that produces a plot and takes one argument, which will be the current split of the data being passed to it. Useful to test with panelFn(divExample(dat)). Must return either an object of class "ggplot", "trellis", or "expression" (of base plot commands)}
\item{lims}{either an object of class "trsLims" as obtained from \code{\link{setLims}} or a list with elements x, y, and prepanelFn, that specify how to apply \code{\link{prepanel}} and \code{\link{setLims}}}
\item{cogFn}{a function that produces a single row of a data frame where each column is a cognostic feature . The function should takes one argument, which will be the current split of the data being passed to it. Useful to test with cogFn(divExample(dat))}
\item{state}{if specified, this tells the viewer the default parameter settings (such as layout, sorting, filtering, etc.) to use when the display is viewed (see \code{\link{validateState}} for details)}
\item{preRender}{should the panels be pre-rendered and stored (\code{TRUE}), or rendered on-the-fly in the viewer (\code{FALSE}, default)? Default is recommended unless rendering is very expensive.}
\item{cogConn}{a connection to store the cognostics data. By default, this is \code{\link{dfCogConn}()}.}
\item{output}{how to store the panels and metadata for the display (unnecessary to specify in most cases -- see details)}
\item{conn}{VDB connection info, typically stored in options("vdbConn") at the beginning of a session, and not necessary to specify here if a valid "vdbConn" object exists}
\item{verbose}{print status messages?}
\item{params}{a named list of parameters external to the input data that are needed in the distributed computing (most should be taken care of automatically such that this is rarely necessary to specify)}
\item{packages}{a vector of R package names that contain functions used in \code{panelFn} or \code{cogFn} (most should be taken care of automatically such that this is rarely necessary to specify)}
\item{control}{parameters specifying how the backend should handle things (most-likely parameters to \code{rhwatch} in RHIPE) - see \code{\link[datadr]{rhipeControl}} and \code{\link[datadr]{localDiskControl}}}
}
\description{
Create a trelliscope display and add it to a visualization database (VDB)
}
\details{
Many of the parameters are optional or have defaults. For several examples, see the documentation on github: \url{http://hafen.github.io/trelliscope}
Panels by default are not pre-rendered. Instead, this function creates a display object and computes and stores the cognostics. Then panels are rendered on the fly. If a user would like to pre-render the images, then by default these will be stored to a local disk connection (see \code{\link{localDiskConn}}) inside the VDB directory, organized in subdirectories by group and name of the display. Optionally, the user can specify the \code{output} parameter to be any valid "kvConnection" object, as long as it is one that persists on disk (e.g. \code{\link{hdfsConn}}).
}
\examples{
# see docs
}
\author{
Ryan Hafen
}
\seealso{
\code{\link{prepanel}}, \code{\link{setLims}}, \code{\link{divide}}
}
|
require(MASS)
data(iris)
#relatively_pure_example = c(1, 1, 1, 1, 1, 1, 1, 1, 1, 2)
#unpure_example = c(1, 2, 2, 1, 2)
getGiniImpurity <- function(y){
n = unique(y)
sum_impurity = 0
for(i in range(n)){
f = sum(y == i) / length(y)
sum_impurity = sum_impurity + f * (1 - f)
}
return(sum_impurity)
}
#print(getGiniImpurity(relatively_pure_example))
#print(getGiniImpurity(unpure_example))
summary(iris)
# first split, I am lazy atm, so just going to hard code
iris$class_s = (iris$Species == "setosa") * 1
plot(iris$Sepal.Length, iris$class_s)
split = min(iris$Sepal.Length) + 0.05
min_split = 999999
min_gini = 999999
while(split < max(iris$Sepal.Length)) {
gini1 = getGiniImpurity(iris[iris$Sepal.Length < split,][,"class_s"])
gini2 = getGiniImpurity(iris[iris$Sepal.Length >= split,][,"class_s"])
totalGini = gini1 + gini2
if(totalGini < min_gini){
min_split <- split
min_gini <- totalGini
}
split = split + 0.05
}
print(min_split)
print(min_gini)
|
/re_implemented/tree/decision_tree.R
|
no_license
|
albertlim/Quick-Data-Science-Experiments-2016
|
R
| false
| false
| 989
|
r
|
require(MASS)
data(iris)
#relatively_pure_example = c(1, 1, 1, 1, 1, 1, 1, 1, 1, 2)
#unpure_example = c(1, 2, 2, 1, 2)
getGiniImpurity <- function(y){
n = unique(y)
sum_impurity = 0
for(i in range(n)){
f = sum(y == i) / length(y)
sum_impurity = sum_impurity + f * (1 - f)
}
return(sum_impurity)
}
#print(getGiniImpurity(relatively_pure_example))
#print(getGiniImpurity(unpure_example))
summary(iris)
# first split, I am lazy atm, so just going to hard code
iris$class_s = (iris$Species == "setosa") * 1
plot(iris$Sepal.Length, iris$class_s)
split = min(iris$Sepal.Length) + 0.05
min_split = 999999
min_gini = 999999
while(split < max(iris$Sepal.Length)) {
gini1 = getGiniImpurity(iris[iris$Sepal.Length < split,][,"class_s"])
gini2 = getGiniImpurity(iris[iris$Sepal.Length >= split,][,"class_s"])
totalGini = gini1 + gini2
if(totalGini < min_gini){
min_split <- split
min_gini <- totalGini
}
split = split + 0.05
}
print(min_split)
print(min_gini)
|
/classwork1/arithmetics.R
|
permissive
|
KichiginaTatiana/MD-DA-2017
|
R
| false
| false
| 273
|
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auxillary-methods.R
\docType{methods}
\name{getAlleleQuality}
\alias{getAlleleQuality}
\alias{getAlleleQuality,GAlignmentsList-method}
\title{snp quality data}
\usage{
getAlleleQuality(BamList, ...)
\S4method{getAlleleQuality}{GAlignmentsList}(
BamList,
GRvariants,
fastq.format = "illumina.1.8",
return.class = "array",
verbose = TRUE,
...
)
}
\arguments{
\item{BamList}{A \code{GAlignmentsList object} or \code{GRangesList object}
containing data imported from a bam file}
\item{...}{parameters to pass on}
\item{GRvariants}{A \code{GRanges object} that contains positions of SNPs to
retrieve.}
\item{fastq.format}{default 'illumina.1.8'}
\item{return.class}{'list' or 'array'}
\item{verbose}{Setting \code{verbose=TRUE} makes function more talkative}
}
\value{
\code{getAlleleQuality} returns a list of several data.frame objects,
each storing the count data for one SNP.
}
\description{
Given the positions of known SNPs, this function returns allele quality from
a BamGRL object
}
\details{
This function is used to retrieve the allele quality strings from specified positions
in a set of RNA-seq reads. The \code{BamList} argument will typically have
been created using the \code{impBamGAL} function on bam-files. The
\code{GRvariants} is either a GRanges with user-specified locations or else
it is generated through scanning the same bam-files as in \code{BamList} for
heterozygote locations (e.g. using \code{scanForHeterozygotes}). The
GRvariants will currently only accept locations having width=1,
corresponding to bi-allelic SNPs. The strand type information will be kept in the
returned object. If the strand is marked as unknown "*", it will be forced to the "+"
strand.
quaity information is extracted from the BamList object, and requires the presence of
mcols(BamList)[["qual"]] to contain quality sequences.
}
\examples{
#load example data
data(reads)
data(GRvariants)
#get counts at the three positions specified in GRvariants
alleleQualityArray <- getAlleleQuality(BamList=reads,GRvariants)
#place in ASEset object
alleleCountsArray <- getAlleleCounts(BamList=reads,GRvariants,
strand='*', return.class="array")
a <- ASEsetFromArrays(GRvariants, countsUnknown = alleleCountsArray)
aquals(a) <- alleleQualityArray
}
\author{
Jesper R. Gadin, Lasse Folkersen
}
\keyword{allele}
\keyword{quality}
|
/man/getAlleleQuality.Rd
|
no_license
|
pappewaio/AllelicImbalance
|
R
| false
| true
| 2,439
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/auxillary-methods.R
\docType{methods}
\name{getAlleleQuality}
\alias{getAlleleQuality}
\alias{getAlleleQuality,GAlignmentsList-method}
\title{snp quality data}
\usage{
getAlleleQuality(BamList, ...)
\S4method{getAlleleQuality}{GAlignmentsList}(
BamList,
GRvariants,
fastq.format = "illumina.1.8",
return.class = "array",
verbose = TRUE,
...
)
}
\arguments{
\item{BamList}{A \code{GAlignmentsList object} or \code{GRangesList object}
containing data imported from a bam file}
\item{...}{parameters to pass on}
\item{GRvariants}{A \code{GRanges object} that contains positions of SNPs to
retrieve.}
\item{fastq.format}{default 'illumina.1.8'}
\item{return.class}{'list' or 'array'}
\item{verbose}{Setting \code{verbose=TRUE} makes function more talkative}
}
\value{
\code{getAlleleQuality} returns a list of several data.frame objects,
each storing the count data for one SNP.
}
\description{
Given the positions of known SNPs, this function returns allele quality from
a BamGRL object
}
\details{
This function is used to retrieve the allele quality strings from specified positions
in a set of RNA-seq reads. The \code{BamList} argument will typically have
been created using the \code{impBamGAL} function on bam-files. The
\code{GRvariants} is either a GRanges with user-specified locations or else
it is generated through scanning the same bam-files as in \code{BamList} for
heterozygote locations (e.g. using \code{scanForHeterozygotes}). The
GRvariants will currently only accept locations having width=1,
corresponding to bi-allelic SNPs. The strand type information will be kept in the
returned object. If the strand is marked as unknown "*", it will be forced to the "+"
strand.
quaity information is extracted from the BamList object, and requires the presence of
mcols(BamList)[["qual"]] to contain quality sequences.
}
\examples{
#load example data
data(reads)
data(GRvariants)
#get counts at the three positions specified in GRvariants
alleleQualityArray <- getAlleleQuality(BamList=reads,GRvariants)
#place in ASEset object
alleleCountsArray <- getAlleleCounts(BamList=reads,GRvariants,
strand='*', return.class="array")
a <- ASEsetFromArrays(GRvariants, countsUnknown = alleleCountsArray)
aquals(a) <- alleleQualityArray
}
\author{
Jesper R. Gadin, Lasse Folkersen
}
\keyword{allele}
\keyword{quality}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opt_function.R
\name{opt}
\alias{opt}
\title{Calculate the Life Orientation Test - Revised (LOT-R)}
\usage{
opt(start_col, end_col, data)
}
\arguments{
\item{start_col}{The column number where the scale begins, reference by number only.}
\item{end_col}{The column number where the scale end, reference by number only.}
\item{data}{The reference dataframe.}
}
\description{
This function renames columns; converts all columns to numerics; tests if the scores are outside of the scale limits;
recodes requiste columns; and calculates a LOT-R score.
}
\note{
This function is designed to work with the validated question order as printed in the reference article. This function will
give inaccurate results if question order is different from the published validated scale.
}
\examples{
x <- c(0:4)
df <- data.frame(matrix(sample(x, 10*10, replace = TRUE), nrow = 5, ncol = 10))
opt(1, 10, df)
}
\references{
Scheier, M. F., Carver, C. S., & Bridges, M. W. (1994). Distinguishing optimism from neuroticism (and trait anxiety, self-mastery, and self-esteem): A reevaluation of the Life Orientation Test. Journal of Personality and Social Psychology, 67(6), 1063-1078.
}
\keyword{-}
\keyword{Life}
\keyword{Optimism,}
\keyword{Orientation}
\keyword{Revised}
\keyword{Test}
|
/man/opt.Rd
|
no_license
|
jonathanbart/baxtr
|
R
| false
| true
| 1,348
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opt_function.R
\name{opt}
\alias{opt}
\title{Calculate the Life Orientation Test - Revised (LOT-R)}
\usage{
opt(start_col, end_col, data)
}
\arguments{
\item{start_col}{The column number where the scale begins, reference by number only.}
\item{end_col}{The column number where the scale end, reference by number only.}
\item{data}{The reference dataframe.}
}
\description{
This function renames columns; converts all columns to numerics; tests if the scores are outside of the scale limits;
recodes requiste columns; and calculates a LOT-R score.
}
\note{
This function is designed to work with the validated question order as printed in the reference article. This function will
give inaccurate results if question order is different from the published validated scale.
}
\examples{
x <- c(0:4)
df <- data.frame(matrix(sample(x, 10*10, replace = TRUE), nrow = 5, ncol = 10))
opt(1, 10, df)
}
\references{
Scheier, M. F., Carver, C. S., & Bridges, M. W. (1994). Distinguishing optimism from neuroticism (and trait anxiety, self-mastery, and self-esteem): A reevaluation of the Life Orientation Test. Journal of Personality and Social Psychology, 67(6), 1063-1078.
}
\keyword{-}
\keyword{Life}
\keyword{Optimism,}
\keyword{Orientation}
\keyword{Revised}
\keyword{Test}
|
checkFcn <- function(x, p=.5){
return((x < 0)*-1*x*(1-p) + (x>0)*p*x)
}
|
/factorQR/R/checkFcn.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 74
|
r
|
checkFcn <- function(x, p=.5){
return((x < 0)*-1*x*(1-p) + (x>0)*p*x)
}
|
igraph2neo4j <- function(igr, label = 'b', type = 'bb') {
library(d3Network)
library(igraph)
if ( is.null(V(igr)$name) ) V(igr)$name <- as.character(V(igr))
n <- lapply(V(igr)$name, function(x)
createNode(graph, label, name = x))
x <- apply(get.edges(igr, seq(E(igr))), 1,
function(x) createRel(n[[e[1]]], type, n[[x[2]]]))
list(nodes = n, edges = x)
}
|
/rcode/igraph2neo4j.R
|
no_license
|
darrkj/darrkj.github.io
|
R
| false
| false
| 389
|
r
|
igraph2neo4j <- function(igr, label = 'b', type = 'bb') {
library(d3Network)
library(igraph)
if ( is.null(V(igr)$name) ) V(igr)$name <- as.character(V(igr))
n <- lapply(V(igr)$name, function(x)
createNode(graph, label, name = x))
x <- apply(get.edges(igr, seq(E(igr))), 1,
function(x) createRel(n[[e[1]]], type, n[[x[2]]]))
list(nodes = n, edges = x)
}
|
#' abetadms__run_bayesian_double_fitness
#'
#' Estimate fitness of double mutants using bayesian framework.
#'
#' @param wt_dt data.table with WT (required)
#' @param singles_dt data.table with double AA mutants (required)
#' @param doubles_dt data.table with double AA mutants (required)
#' @param outpath output path for plots and saved objects (required)
#' @param all_reps list of replicates to retain (required)
#' @param min_mean_input_read_count minimum mean input read count for high confidence variants (default:10)
#' @param min_input_read_count_doubles minimum input read count for doubles used to derive prior for Bayesian doubles correction (default:50)
#' @param lam_d Poisson distribution for score likelihood (default:0.025)
#' @param numCores Number of available CPU cores (default:1)
#'
#' @return Nothing
#' @export
#' @import data.table
abetadms__run_bayesian_double_fitness <- function(
wt_dt,
singles_dt,
doubles_dt,
outpath,
all_reps,
min_mean_input_read_count = 10,
min_input_read_count_doubles = 50,
lam_d = 0.025,
numCores = 10
){
#Display status
message(paste("\n\n*******", "Estimating fitness of double mutants using bayesian framework (this might take a while; you may want to adjust 'numCores' argument, DEFAULT=10)", "*******\n\n"))
### Globals
###########################
all_reps_str <- paste0(all_reps, collapse = "")
### Bayesian framework for fitness estimation for double mutants
###########################
#Bin mean counts for replicate 1
doubles_dt[,counts_for_bins := .SD[[1]],,.SDcols = paste0("count_e",all_reps[1],"_s0")]
doubles_dt[,bin_count := findInterval(log10(counts_for_bins),seq(0.5,4,0.25))]
doubles_dt[,.(.N,mean(counts_for_bins)),bin_count][order(bin_count)]
#Plot fitness densities for different mean count bins (replicate 1)
doubles_dt[,fitness_for_bins := .SD[[1]],,.SDcols = paste0("fitness",all_reps[1],"_uncorr")]
d <- ggplot2::ggplot(doubles_dt[between(bin_count,2,8)],ggplot2::aes(fitness_for_bins,..density..,color=factor(bin_count))) +
ggplot2::geom_density(adjust=1)
ggplot2::ggsave(file.path(outpath, "4_doubles_bayesian_framework1.pdf"), d, width = 7, height = 5, useDingbats=FALSE)
#Plot fitness densities for mean counts greater/less than 50 (replicate 1)
d <- ggplot2::ggplot(doubles_dt,ggplot2::aes(fitness_for_bins,..density..,color=counts_for_bins >= min_input_read_count_doubles)) +
ggplot2::geom_density(adjust=1)
ggplot2::ggsave(file.path(outpath, "4_doubles_bayesian_framework2.pdf"), d, width = 7, height = 5, useDingbats=FALSE)
#>> try to estimate what fitness scores are for variants with low sequence coverage
# use double mutants with variants >= min_input_read_count_doubles counts
# save.image(file = file.path(outpath, "Rsession1.RData"))
## calculate posterior double mutant fitness based on prior from single mutants
postpois_conditioned_singleF <- function(i){
require(data.table)
count_in = double_data[i,count_in]
count_out = double_data[i,count_out]
lam_in = exp(seq(floor(log(count_in+0.1)-max(c(0.5,1/log10(count_in+1.75)))),(log(count_in+0.1)+max(c(0.5,1/log10(count_in+1.75)))),lam_d))
lam_out = exp(seq(floor(log(count_out+0.1)-max(c(0.5,1/log10(count_out+1.75)))),(log(count_out+0.1)+max(c(0.5,1/log10(count_out+1.75)))),lam_d))
lam_low = range(log(lam_out))[1] - range(log(lam_in))[2]
lam_high = range(log(lam_out))[2] - range(log(lam_in))[1]
idx = row(matrix(NA,nrow=length(lam_out),ncol=length(lam_in))) - col(matrix(NA,nrow=length(lam_out),ncol=length(lam_in)))
likelihood = sapply(split(outer(dpois(count_out,lambda = lam_out),dpois(count_in,lambda = lam_in)),idx),sum)
score_prior = density(score_prior_cond[,.(fdist = sqrt((double_data[i,F1]-F1)^2+(double_data[i,F2]-F2)^2),F)][
order(fdist)][1:Nneighbours,F],
from = (lam_low-wt_corr),
to = (lam_high-wt_corr),
n = as.integer(as.character(round((lam_high-lam_low)/lam_d + 1)))) #super weird bug
posterior = score_prior$y*likelihood
moments = list()
moments[1] = weighted.mean(x = score_prior$x,w = posterior)
moments[2] = sqrt(sum(( moments[[1]]-score_prior$x)^2 * posterior)/
sum(posterior))
return(moments)
}
# Setup cluster
clust <- parallel::makeCluster(numCores) #This line will take time
#Calculate conditional fitness and sigma
for (E in all_reps) {
#wildtype "correction" to calculate scores
wt_corr <- wt_dt[,log(unlist(.SD[,1]) / unlist(.SD[,2])),,.SDcols = c(paste0("count_e",E,"_s1"),paste0("count_e",E,"_s0"))]
#data for prior calculation
double_data <- doubles_dt[!is.na(get(paste0("fitness",E,"_uncorr"))),.(Pos1,Mut1,Pos2,Mut2,count_in = unlist(.SD[,1]),count_out = unlist(.SD[,2]),
F = unlist(.SD[,3])),,
.SDcols = c(paste0("count_e",E,"_s0"),paste0("count_e",E,"_s1"),paste0("fitness",E,"_uncorr"))]
# double_data = merge(double_data,singles_dt[,.(Pos,Mut,F1 = .SD),,.SDcols = paste0("fitness",E)],by.x = c("Pos1","Mut1"),by.y = c("Pos","Mut"))
double_data <- merge(double_data,singles_dt[!is.na(singles_dt[,paste0("fitness",E)]) & is.reads0==T,.(Pos,Mut,F1 = .SD),,.SDcols = paste0("fitness",E)],by.x = c("Pos1","Mut1"),by.y = c("Pos","Mut"))
# double_data = merge(double_data,singles_dt[,.(Pos,Mut,F2 = .SD),,.SDcols = paste0("fitness",E)],by.x = c("Pos2","Mut2"),by.y = c("Pos","Mut"))
double_data <- merge(double_data,singles_dt[!is.na(singles_dt[,paste0("fitness",E)]) & is.reads0==T,.(Pos,Mut,F2 = .SD),,.SDcols = paste0("fitness",E)],by.x = c("Pos2","Mut2"),by.y = c("Pos","Mut"))
Nneighbours <- 500
score_prior_cond <- double_data[count_in >= min_input_read_count_doubles & F > -Inf & F1 > -Inf & F2 > -Inf]
# set seed stream and make variables available to each core's workspace
parallel::clusterSetRNGStream(cl = clust,1234567)
parallel::clusterExport(clust, list("double_data","lam_d","wt_corr","score_prior_cond","Nneighbours"), envir = environment())
#posterior fitness conditioned on single fitness
t=proc.time()
helper <- parallel::parSapply(clust,X = 1:nrow(double_data), postpois_conditioned_singleF)
print(proc.time()-t)
helper1 <- matrix(unlist(helper),nrow=2)
double_data[,paste0("fitness",E,"_cond") := helper1[1,]]
double_data[,paste0("sigma",E,"_cond") := helper1[2,]]
doubles_dt <- merge(doubles_dt, double_data[,.SD,,.SDcols = c("Pos1", "Pos2", "Mut1", "Mut2", paste0("fitness",E,"_cond"), paste0("sigma",E,"_cond"))], by = c("Pos1", "Pos2", "Mut1", "Mut2"), all.x = T)
}
parallel::stopCluster(clust)
#Scatterplot matrix - singles
d <- GGally::ggpairs(singles_dt[Nham_aa==1,grep(names(singles_dt),pattern="fitness"),with=F],
upper=list(continuous = "cor"))
ggplot2::ggsave(file.path(outpath, "4_doubles_bayesian_framework_scattermatrix_singles.pdf"), d, width = 10, height = 10, useDingbats=FALSE)
#Scatterplot matrix - doubles, uncorrected
set.seed(1)
d <- GGally::ggpairs(doubles_dt[apply(doubles_dt[,.SD,,.SDcols = paste0("fitness",all_reps,"_uncorr")]==(-Inf), 1, sum)==0
][sample(x = .N,1000),grep(names(doubles_dt),pattern=paste0("fitness[", all_reps_str, "]_uncorr")),with=F],
upper=list(continuous = "cor"))
ggplot2::ggsave(file.path(outpath, "4_doubles_bayesian_framework_scattermatrix_doubles_uncorr.pdf"), d, width = 10, height = 10, useDingbats=FALSE)
#Scatterplot matrix - doubles, conditional
set.seed(1)
d <- GGally::ggpairs(doubles_dt[apply(doubles_dt[,.SD,,.SDcols = paste0("fitness",all_reps,"_uncorr")]==(-Inf), 1, sum)==0
][sample(x = .N,1000),grep(names(doubles_dt),pattern=paste0("fitness[", all_reps_str, "]_cond")),with=F],
upper=list(continuous = "cor"))
ggplot2::ggsave(file.path(outpath, "4_doubles_bayesian_framework_scattermatrix_doubles_cond.pdf"), d, width = 10, height = 10, useDingbats=FALSE)
### Merge fitness values
###########################
#### doubles
# #uncorrected fitness
# fitness_rx = doubles_dt[,.SD,.SDcols = grep(paste0("fitness[", all_reps_str, "]_uncorr"),colnames(doubles_dt))]
# # sigma_rx = sqrt(doubles_dt[,.SD,.SDcols = grep(paste0("sigma[", all_reps_str, "]_uncorr"),colnames(doubles_dt))]^2 +
# # matrix(replicate_error^2,nrow = dim(fitness_rx)[1],ncol = dim(fitness_rx)[2]))
# sigma_rx = doubles_dt[,.SD,.SDcols = grep(paste0("sigma[", all_reps_str, "]_uncorr"),colnames(doubles_dt))]
# # sigma_s2 = random_effect_model(fitness_rx,sigma_rx)
# # doubles_dt[,fitness_uncorr := rowSums(fitness_rx/(sigma_rx^2 + sigma_s2),na.rm=T)/rowSums(1/(sigma_rx^2 + sigma_s2),na.rm=T)]
# doubles_dt[,fitness_uncorr := rowSums(fitness_rx/(sigma_rx^2),na.rm=T)/rowSums(1/(sigma_rx^2),na.rm=T)]
# doubles_dt[,sigma_uncorr := sqrt(1/rowSums(1/(sigma_rx^2+sigma_s2),na.rm=T))]
# doubles_dt[,sigma_uncorr := sqrt(1/rowSums(1/(sigma_rx^2),na.rm=T))]
d <- ggplot2::ggplot(doubles_dt,ggplot2::aes(fitness_uncorr,sigma_uncorr)) +
ggplot2::geom_hex() +
ggplot2::scale_y_log10() +
ggplot2::coord_cartesian(ylim=c(0.01,10))
ggplot2::ggsave(file.path(outpath, "5_sigma_vs_fitness_doubles_uncorr.pdf"), d, width = 5, height = 5, useDingbats=FALSE)
#conditioned fitness
fitness_rx = doubles_dt[,.SD,.SDcols = grep(paste0("fitness[", all_reps_str, "]_cond"),colnames(doubles_dt))]
# sigma_rx = sqrt(doubles_dt[,.SD,.SDcols = grep(paste0("sigma[", all_reps_str, "]_cond"),colnames(doubles_dt))]^2 +
# matrix(replicate_error^2,nrow = dim(fitness_rx)[1],ncol = dim(fitness_rx)[2]))
sigma_rx = doubles_dt[,.SD,.SDcols = grep(paste0("sigma[", all_reps_str, "]_uncorr"),colnames(doubles_dt))]
# sigma_s2 = random_effect_model(fitness_rx,sigma_rx)
# doubles_dt[,fitness_cond := rowSums(fitness_rx/(sigma_rx^2 + sigma_s2),na.rm=T)/rowSums(1/(sigma_rx^2 + sigma_s2),na.rm=T)]
doubles_dt[,fitness_cond := rowSums(fitness_rx/(sigma_rx^2),na.rm=T)/rowSums(1/(sigma_rx^2),na.rm=T)]
# doubles_dt[,sigma_cond := sqrt(1/rowSums(1/(sigma_rx^2+sigma_s2),na.rm=T))]
doubles_dt[,sigma_cond := sqrt(1/rowSums(1/(sigma_rx^2),na.rm=T))]
d <- ggplot2::ggplot(doubles_dt,ggplot2::aes(fitness_cond,sigma_cond)) +
ggplot2::geom_hex() +
ggplot2::scale_y_log10() +
ggplot2::coord_cartesian(ylim=c(0.01,10))
ggplot2::ggsave(file.path(outpath, "5_sigma_vs_fitness_doubles_cond.pdf"), d, width = 5, height = 5, useDingbats=FALSE)
#Plot to compare double fitness estimates
p1=ggplot2::ggplot(doubles_dt,ggplot2::aes(mean_count,fitness_uncorr)) +
ggplot2::geom_hex() +
ggplot2::scale_x_log10() +
ggplot2::scale_fill_continuous(trans="log10")
p2=ggplot2::ggplot(doubles_dt,ggplot2::aes(mean_count,fitness_cond)) +
ggplot2::geom_hex()+
ggplot2::scale_x_log10() +
ggplot2::scale_fill_continuous(trans="log10")
p3=ggplot2::ggplot(doubles_dt[between(bin_count,2,8)],ggplot2::aes(fitness_uncorr,..scaled..,color=factor(bin_count))) +
ggplot2::geom_density(adjust=1)
p4=ggplot2::ggplot(doubles_dt[between(bin_count,2,8)],ggplot2::aes(fitness_cond,..scaled..,color=factor(bin_count))) +
ggplot2::geom_density(adjust=1)
p5=ggplot2::ggplot(doubles_dt,ggplot2::aes(fitness_uncorr,sigma_uncorr)) +
ggplot2::geom_hex() +
ggplot2::scale_y_log10()# +
# ggplot2::coord_cartesian(ylim = c(0.05,2))
p6=ggplot2::ggplot(doubles_dt,ggplot2::aes(fitness_cond,sigma_cond)) +
ggplot2::geom_hex()+
ggplot2::scale_y_log10()# +
# ggplot2::coord_cartesian(ylim = c(0.05,2))
ggplot2::theme_set(ggplot2::theme_minimal())
#Plot
d <- cowplot::plot_grid(plotlist = list(p1,p2,p3,p4,p5,p6),nrow=3)
rm(p1,p2,p3,p4,p5,p6)
ggplot2::ggsave(file.path(outpath, "5_doubles_fitness_estimates.pdf"), d, width = 10, height = 10, useDingbats=FALSE)
#Plot fitness values against each other
set.seed(1)
d <- GGally::ggpairs(doubles_dt[sample(.N,1000),.(fitness_uncorr,fitness_cond)])
ggplot2::ggsave(file.path(outpath, "5_doubles_fitness_estimates_scattermatrix.pdf"), d, width = 10, height = 10, useDingbats=FALSE)
#Plot sigma values against each other
d <- GGally::ggpairs(doubles_dt[,.(sigma_uncorr,sigma_cond)])
ggplot2::ggsave(file.path(outpath, "5_doubles_sigma_estimates_scattermatrix.pdf"), d, width = 10, height = 10, useDingbats=FALSE)
return(doubles_dt)
}
|
/R/abetadms__run_bayesian_double_fitness.R
|
permissive
|
lehner-lab/abetadms
|
R
| false
| false
| 12,399
|
r
|
#' abetadms__run_bayesian_double_fitness
#'
#' Estimate fitness of double mutants using bayesian framework.
#'
#' @param wt_dt data.table with WT (required)
#' @param singles_dt data.table with double AA mutants (required)
#' @param doubles_dt data.table with double AA mutants (required)
#' @param outpath output path for plots and saved objects (required)
#' @param all_reps list of replicates to retain (required)
#' @param min_mean_input_read_count minimum mean input read count for high confidence variants (default:10)
#' @param min_input_read_count_doubles minimum input read count for doubles used to derive prior for Bayesian doubles correction (default:50)
#' @param lam_d Poisson distribution for score likelihood (default:0.025)
#' @param numCores Number of available CPU cores (default:1)
#'
#' @return Nothing
#' @export
#' @import data.table
abetadms__run_bayesian_double_fitness <- function(
wt_dt,
singles_dt,
doubles_dt,
outpath,
all_reps,
min_mean_input_read_count = 10,
min_input_read_count_doubles = 50,
lam_d = 0.025,
numCores = 10
){
#Display status
message(paste("\n\n*******", "Estimating fitness of double mutants using bayesian framework (this might take a while; you may want to adjust 'numCores' argument, DEFAULT=10)", "*******\n\n"))
### Globals
###########################
all_reps_str <- paste0(all_reps, collapse = "")
### Bayesian framework for fitness estimation for double mutants
###########################
#Bin mean counts for replicate 1
doubles_dt[,counts_for_bins := .SD[[1]],,.SDcols = paste0("count_e",all_reps[1],"_s0")]
doubles_dt[,bin_count := findInterval(log10(counts_for_bins),seq(0.5,4,0.25))]
doubles_dt[,.(.N,mean(counts_for_bins)),bin_count][order(bin_count)]
#Plot fitness densities for different mean count bins (replicate 1)
doubles_dt[,fitness_for_bins := .SD[[1]],,.SDcols = paste0("fitness",all_reps[1],"_uncorr")]
d <- ggplot2::ggplot(doubles_dt[between(bin_count,2,8)],ggplot2::aes(fitness_for_bins,..density..,color=factor(bin_count))) +
ggplot2::geom_density(adjust=1)
ggplot2::ggsave(file.path(outpath, "4_doubles_bayesian_framework1.pdf"), d, width = 7, height = 5, useDingbats=FALSE)
#Plot fitness densities for mean counts greater/less than 50 (replicate 1)
d <- ggplot2::ggplot(doubles_dt,ggplot2::aes(fitness_for_bins,..density..,color=counts_for_bins >= min_input_read_count_doubles)) +
ggplot2::geom_density(adjust=1)
ggplot2::ggsave(file.path(outpath, "4_doubles_bayesian_framework2.pdf"), d, width = 7, height = 5, useDingbats=FALSE)
#>> try to estimate what fitness scores are for variants with low sequence coverage
# use double mutants with variants >= min_input_read_count_doubles counts
# save.image(file = file.path(outpath, "Rsession1.RData"))
## calculate posterior double mutant fitness based on prior from single mutants
postpois_conditioned_singleF <- function(i){
require(data.table)
count_in = double_data[i,count_in]
count_out = double_data[i,count_out]
lam_in = exp(seq(floor(log(count_in+0.1)-max(c(0.5,1/log10(count_in+1.75)))),(log(count_in+0.1)+max(c(0.5,1/log10(count_in+1.75)))),lam_d))
lam_out = exp(seq(floor(log(count_out+0.1)-max(c(0.5,1/log10(count_out+1.75)))),(log(count_out+0.1)+max(c(0.5,1/log10(count_out+1.75)))),lam_d))
lam_low = range(log(lam_out))[1] - range(log(lam_in))[2]
lam_high = range(log(lam_out))[2] - range(log(lam_in))[1]
idx = row(matrix(NA,nrow=length(lam_out),ncol=length(lam_in))) - col(matrix(NA,nrow=length(lam_out),ncol=length(lam_in)))
likelihood = sapply(split(outer(dpois(count_out,lambda = lam_out),dpois(count_in,lambda = lam_in)),idx),sum)
score_prior = density(score_prior_cond[,.(fdist = sqrt((double_data[i,F1]-F1)^2+(double_data[i,F2]-F2)^2),F)][
order(fdist)][1:Nneighbours,F],
from = (lam_low-wt_corr),
to = (lam_high-wt_corr),
n = as.integer(as.character(round((lam_high-lam_low)/lam_d + 1)))) #super weird bug
posterior = score_prior$y*likelihood
moments = list()
moments[1] = weighted.mean(x = score_prior$x,w = posterior)
moments[2] = sqrt(sum(( moments[[1]]-score_prior$x)^2 * posterior)/
sum(posterior))
return(moments)
}
# Setup cluster
clust <- parallel::makeCluster(numCores) #This line will take time
#Calculate conditional fitness and sigma
for (E in all_reps) {
#wildtype "correction" to calculate scores
wt_corr <- wt_dt[,log(unlist(.SD[,1]) / unlist(.SD[,2])),,.SDcols = c(paste0("count_e",E,"_s1"),paste0("count_e",E,"_s0"))]
#data for prior calculation
double_data <- doubles_dt[!is.na(get(paste0("fitness",E,"_uncorr"))),.(Pos1,Mut1,Pos2,Mut2,count_in = unlist(.SD[,1]),count_out = unlist(.SD[,2]),
F = unlist(.SD[,3])),,
.SDcols = c(paste0("count_e",E,"_s0"),paste0("count_e",E,"_s1"),paste0("fitness",E,"_uncorr"))]
# double_data = merge(double_data,singles_dt[,.(Pos,Mut,F1 = .SD),,.SDcols = paste0("fitness",E)],by.x = c("Pos1","Mut1"),by.y = c("Pos","Mut"))
double_data <- merge(double_data,singles_dt[!is.na(singles_dt[,paste0("fitness",E)]) & is.reads0==T,.(Pos,Mut,F1 = .SD),,.SDcols = paste0("fitness",E)],by.x = c("Pos1","Mut1"),by.y = c("Pos","Mut"))
# double_data = merge(double_data,singles_dt[,.(Pos,Mut,F2 = .SD),,.SDcols = paste0("fitness",E)],by.x = c("Pos2","Mut2"),by.y = c("Pos","Mut"))
double_data <- merge(double_data,singles_dt[!is.na(singles_dt[,paste0("fitness",E)]) & is.reads0==T,.(Pos,Mut,F2 = .SD),,.SDcols = paste0("fitness",E)],by.x = c("Pos2","Mut2"),by.y = c("Pos","Mut"))
Nneighbours <- 500
score_prior_cond <- double_data[count_in >= min_input_read_count_doubles & F > -Inf & F1 > -Inf & F2 > -Inf]
# set seed stream and make variables available to each core's workspace
parallel::clusterSetRNGStream(cl = clust,1234567)
parallel::clusterExport(clust, list("double_data","lam_d","wt_corr","score_prior_cond","Nneighbours"), envir = environment())
#posterior fitness conditioned on single fitness
t=proc.time()
helper <- parallel::parSapply(clust,X = 1:nrow(double_data), postpois_conditioned_singleF)
print(proc.time()-t)
helper1 <- matrix(unlist(helper),nrow=2)
double_data[,paste0("fitness",E,"_cond") := helper1[1,]]
double_data[,paste0("sigma",E,"_cond") := helper1[2,]]
doubles_dt <- merge(doubles_dt, double_data[,.SD,,.SDcols = c("Pos1", "Pos2", "Mut1", "Mut2", paste0("fitness",E,"_cond"), paste0("sigma",E,"_cond"))], by = c("Pos1", "Pos2", "Mut1", "Mut2"), all.x = T)
}
parallel::stopCluster(clust)
#Scatterplot matrix - singles
d <- GGally::ggpairs(singles_dt[Nham_aa==1,grep(names(singles_dt),pattern="fitness"),with=F],
upper=list(continuous = "cor"))
ggplot2::ggsave(file.path(outpath, "4_doubles_bayesian_framework_scattermatrix_singles.pdf"), d, width = 10, height = 10, useDingbats=FALSE)
#Scatterplot matrix - doubles, uncorrected
set.seed(1)
d <- GGally::ggpairs(doubles_dt[apply(doubles_dt[,.SD,,.SDcols = paste0("fitness",all_reps,"_uncorr")]==(-Inf), 1, sum)==0
][sample(x = .N,1000),grep(names(doubles_dt),pattern=paste0("fitness[", all_reps_str, "]_uncorr")),with=F],
upper=list(continuous = "cor"))
ggplot2::ggsave(file.path(outpath, "4_doubles_bayesian_framework_scattermatrix_doubles_uncorr.pdf"), d, width = 10, height = 10, useDingbats=FALSE)
#Scatterplot matrix - doubles, conditional
set.seed(1)
d <- GGally::ggpairs(doubles_dt[apply(doubles_dt[,.SD,,.SDcols = paste0("fitness",all_reps,"_uncorr")]==(-Inf), 1, sum)==0
][sample(x = .N,1000),grep(names(doubles_dt),pattern=paste0("fitness[", all_reps_str, "]_cond")),with=F],
upper=list(continuous = "cor"))
ggplot2::ggsave(file.path(outpath, "4_doubles_bayesian_framework_scattermatrix_doubles_cond.pdf"), d, width = 10, height = 10, useDingbats=FALSE)
### Merge fitness values
###########################
#### doubles
# #uncorrected fitness
# fitness_rx = doubles_dt[,.SD,.SDcols = grep(paste0("fitness[", all_reps_str, "]_uncorr"),colnames(doubles_dt))]
# # sigma_rx = sqrt(doubles_dt[,.SD,.SDcols = grep(paste0("sigma[", all_reps_str, "]_uncorr"),colnames(doubles_dt))]^2 +
# # matrix(replicate_error^2,nrow = dim(fitness_rx)[1],ncol = dim(fitness_rx)[2]))
# sigma_rx = doubles_dt[,.SD,.SDcols = grep(paste0("sigma[", all_reps_str, "]_uncorr"),colnames(doubles_dt))]
# # sigma_s2 = random_effect_model(fitness_rx,sigma_rx)
# # doubles_dt[,fitness_uncorr := rowSums(fitness_rx/(sigma_rx^2 + sigma_s2),na.rm=T)/rowSums(1/(sigma_rx^2 + sigma_s2),na.rm=T)]
# doubles_dt[,fitness_uncorr := rowSums(fitness_rx/(sigma_rx^2),na.rm=T)/rowSums(1/(sigma_rx^2),na.rm=T)]
# doubles_dt[,sigma_uncorr := sqrt(1/rowSums(1/(sigma_rx^2+sigma_s2),na.rm=T))]
# doubles_dt[,sigma_uncorr := sqrt(1/rowSums(1/(sigma_rx^2),na.rm=T))]
d <- ggplot2::ggplot(doubles_dt,ggplot2::aes(fitness_uncorr,sigma_uncorr)) +
ggplot2::geom_hex() +
ggplot2::scale_y_log10() +
ggplot2::coord_cartesian(ylim=c(0.01,10))
ggplot2::ggsave(file.path(outpath, "5_sigma_vs_fitness_doubles_uncorr.pdf"), d, width = 5, height = 5, useDingbats=FALSE)
#conditioned fitness
fitness_rx = doubles_dt[,.SD,.SDcols = grep(paste0("fitness[", all_reps_str, "]_cond"),colnames(doubles_dt))]
# sigma_rx = sqrt(doubles_dt[,.SD,.SDcols = grep(paste0("sigma[", all_reps_str, "]_cond"),colnames(doubles_dt))]^2 +
# matrix(replicate_error^2,nrow = dim(fitness_rx)[1],ncol = dim(fitness_rx)[2]))
sigma_rx = doubles_dt[,.SD,.SDcols = grep(paste0("sigma[", all_reps_str, "]_uncorr"),colnames(doubles_dt))]
# sigma_s2 = random_effect_model(fitness_rx,sigma_rx)
# doubles_dt[,fitness_cond := rowSums(fitness_rx/(sigma_rx^2 + sigma_s2),na.rm=T)/rowSums(1/(sigma_rx^2 + sigma_s2),na.rm=T)]
doubles_dt[,fitness_cond := rowSums(fitness_rx/(sigma_rx^2),na.rm=T)/rowSums(1/(sigma_rx^2),na.rm=T)]
# doubles_dt[,sigma_cond := sqrt(1/rowSums(1/(sigma_rx^2+sigma_s2),na.rm=T))]
doubles_dt[,sigma_cond := sqrt(1/rowSums(1/(sigma_rx^2),na.rm=T))]
d <- ggplot2::ggplot(doubles_dt,ggplot2::aes(fitness_cond,sigma_cond)) +
ggplot2::geom_hex() +
ggplot2::scale_y_log10() +
ggplot2::coord_cartesian(ylim=c(0.01,10))
ggplot2::ggsave(file.path(outpath, "5_sigma_vs_fitness_doubles_cond.pdf"), d, width = 5, height = 5, useDingbats=FALSE)
#Plot to compare double fitness estimates
p1=ggplot2::ggplot(doubles_dt,ggplot2::aes(mean_count,fitness_uncorr)) +
ggplot2::geom_hex() +
ggplot2::scale_x_log10() +
ggplot2::scale_fill_continuous(trans="log10")
p2=ggplot2::ggplot(doubles_dt,ggplot2::aes(mean_count,fitness_cond)) +
ggplot2::geom_hex()+
ggplot2::scale_x_log10() +
ggplot2::scale_fill_continuous(trans="log10")
p3=ggplot2::ggplot(doubles_dt[between(bin_count,2,8)],ggplot2::aes(fitness_uncorr,..scaled..,color=factor(bin_count))) +
ggplot2::geom_density(adjust=1)
p4=ggplot2::ggplot(doubles_dt[between(bin_count,2,8)],ggplot2::aes(fitness_cond,..scaled..,color=factor(bin_count))) +
ggplot2::geom_density(adjust=1)
p5=ggplot2::ggplot(doubles_dt,ggplot2::aes(fitness_uncorr,sigma_uncorr)) +
ggplot2::geom_hex() +
ggplot2::scale_y_log10()# +
# ggplot2::coord_cartesian(ylim = c(0.05,2))
p6=ggplot2::ggplot(doubles_dt,ggplot2::aes(fitness_cond,sigma_cond)) +
ggplot2::geom_hex()+
ggplot2::scale_y_log10()# +
# ggplot2::coord_cartesian(ylim = c(0.05,2))
ggplot2::theme_set(ggplot2::theme_minimal())
#Plot
d <- cowplot::plot_grid(plotlist = list(p1,p2,p3,p4,p5,p6),nrow=3)
rm(p1,p2,p3,p4,p5,p6)
ggplot2::ggsave(file.path(outpath, "5_doubles_fitness_estimates.pdf"), d, width = 10, height = 10, useDingbats=FALSE)
#Plot fitness values against each other
set.seed(1)
d <- GGally::ggpairs(doubles_dt[sample(.N,1000),.(fitness_uncorr,fitness_cond)])
ggplot2::ggsave(file.path(outpath, "5_doubles_fitness_estimates_scattermatrix.pdf"), d, width = 10, height = 10, useDingbats=FALSE)
#Plot sigma values against each other
d <- GGally::ggpairs(doubles_dt[,.(sigma_uncorr,sigma_cond)])
ggplot2::ggsave(file.path(outpath, "5_doubles_sigma_estimates_scattermatrix.pdf"), d, width = 10, height = 10, useDingbats=FALSE)
return(doubles_dt)
}
|
library(rvest)
library(dplyr)
news = data.frame()
for (page_number in seq(from = 0, to = 3200, by = 1))
{
url=paste0("https://newslookup.com/results?p=",page_number,"&q=myanmar&dp=5&mt=-1&ps=10&s=&cat=-1&fmt=&groupby=no&site=&dp=5&tp=-720")
page = read_html(url)
title = page %>% html_nodes(".title") %>% html_text()
source = page %>% html_nodes("br+ .source") %>% html_text()
date = page %>% html_nodes(".stime") %>% html_text()
intro = page %>% html_nodes(".desc") %>% html_text()
link = page %>% html_nodes(".title") %>% html_attr("href")
news = rbind(news, data.frame(title, source, date, intro, link, stringsAsFactors = F))
print(paste("Page:", page_number))
}
write.csv(news,"news.csv")
|
/news_scrapping.R
|
no_license
|
SanHtet-Sanpai/Myanmar-News-R-Project
|
R
| false
| false
| 726
|
r
|
library(rvest)
library(dplyr)
news = data.frame()
for (page_number in seq(from = 0, to = 3200, by = 1))
{
url=paste0("https://newslookup.com/results?p=",page_number,"&q=myanmar&dp=5&mt=-1&ps=10&s=&cat=-1&fmt=&groupby=no&site=&dp=5&tp=-720")
page = read_html(url)
title = page %>% html_nodes(".title") %>% html_text()
source = page %>% html_nodes("br+ .source") %>% html_text()
date = page %>% html_nodes(".stime") %>% html_text()
intro = page %>% html_nodes(".desc") %>% html_text()
link = page %>% html_nodes(".title") %>% html_attr("href")
news = rbind(news, data.frame(title, source, date, intro, link, stringsAsFactors = F))
print(paste("Page:", page_number))
}
write.csv(news,"news.csv")
|
#Tutorial-6 Solution:
# Multiple Linear Regression
# Importing the dataset
dataset = read.csv('D:/R Programming/Analytics in R/Analytics in R_P1/Luxury_Cars.csv')
View(dataset)
#Is there any missing value in the dataset
summary(dataset)
sum(is.na(dataset))
#Lets check the data type of each field:
str(dataset)
#We observe that fields: "Make, Model, Type, Origin, DriveTrain" are having
#character data type, so we need to convert them to factors.
#Also field: "Cylinders" is wrongly assigned numerical data type, so we need to
#convert it to factor type.
#Also field: "Model" may not be significant to predict the mileage of the cars.
#Hence,we can remove it.
table(dataset$Model)
#First removing "Model" column
dataset <- dataset[,-2]
View(dataset)
str(dataset)
#We can use transform method to change the in-built data type of above char var.
dataset <- transform(dataset,
Make=as.factor(Make),
Type=as.factor(Type),
Origin=as.factor(Origin),
DriveTrain=as.factor(DriveTrain),
Cylinders=as.factor(Cylinders)
)
str(dataset)
#Encoding categorical data
# ----------------------- Make --------------------------------
# To check Plan Subscription ratio in dataset
table(dataset$Make)/nrow(dataset)
# Avg Premium by state# To check Plan Subscription ratio in dataset
library(sqldf)
sqldf("select Make, COUNT (*) as obs, avg(MPG_Mileage) from dataset GROUP BY 1")
#Since here none of the levels in "Make" have large/significant no. of obs,
#so lets ignore this var.
dataset <- dataset[,-1]
View(dataset)
# ----------------------- Type --------------------------------
# To check Plan Subscription ratio in dataset
table(dataset$Type)/nrow(dataset)
# Avg Premium by state# To check Plan Subscription ratio in dataset
library(sqldf)
sqldf("select Type, COUNT (*) as obs, avg(MPG_Mileage) from dataset GROUP BY 1")
#Create dummy variables for Type:
#Since here out of 6 Types only 3 have large/significant no.of obs,
#Hence, the no. of dummy var here, will be 3-1 = 2.
dataset_1 <- dataset
dataset_1$Type1 <- ifelse(dataset_1$Type=='Sedan',1,0)
dataset_1$Type2 <- ifelse(dataset_1$Type=='SUV',1,0)
View(dataset_1)
# ----------------------- Origin --------------------------------
# To check Plan Subscription ratio in dataset
table(dataset$Origin)/nrow(dataset)
# Avg Premium by state# To check Plan Subscription ratio in dataset
library(sqldf)
sqldf("select Origin, COUNT (*) as obs, avg(MPG_Mileage) from dataset GROUP BY 1")
#Create dummy variables for Type:
#Since here all 3 Origins have large/significant no.of obs,
#Hence, the no. of dummy var here, will be 3-1 = 2.
dataset_1$Origin1 <- ifelse(dataset_1$Origin=='Asia',1,0)
dataset_1$Origin2 <- ifelse(dataset_1$Origin=='USA',1,0)
# ----------------------- DriveTrain --------------------------------
# To check Plan Subscription ratio in dataset
table(dataset$DriveTrain)/nrow(dataset)
# Avg Premium by state# To check Plan Subscription ratio in dataset
library(sqldf)
sqldf("select DriveTrain, COUNT (*) as obs, avg(MPG_Mileage) from dataset GROUP BY 1")
#Create dummy variables for Type:
#Since here all 3 DriveTrains have large/significant no.of obs,
#Hence, the no. of dummy var here, will be 3-1 = 2.
dataset_1$DriveTrain1 <- ifelse(dataset_1$DriveTrain=='Front',1,0)
dataset_1$DriveTrain2 <- ifelse(dataset_1$DriveTrain=='Rear',1,0)
# ----------------------- Cylinders --------------------------------
# To check Plan Subscription ratio in dataset
table(dataset$Cylinders)/nrow(dataset)
# Avg Premium by state# To check Plan Subscription ratio in dataset
library(sqldf)
sqldf("select Cylinders, COUNT (*) as obs, avg(MPG_Mileage) from dataset GROUP BY 1")
#Create dummy variables for Type:
#Since here out of 7 Cylinder types only 3 have large/significant no.of obs,
#Hence, the no. of dummy var here, will be 3-1 = 2.
dataset_1$Cylinders1 <- ifelse(dataset_1$Cylinders=='6',1,0)
dataset_1$Cylinders2 <- ifelse(dataset_1$Cylinders=='4',1,0)
View(dataset_1)
#Feature Engg.: Since we have created dummy var for few char var,
#so lets remove those char var whose dummy var we have created.
dataset_1 <- dataset_1[,-c(1,2,3,5)]
View(dataset_1)
# Splitting the dataset into the Training set and Test set
trainDataIndex <- sample(1:nrow(dataset_1),0.7*nrow(dataset_1), replace = F)
trainData <-dataset_1[trainDataIndex, ]
testData <- dataset_1[-trainDataIndex, ]
View(trainData)
View(testData)
# Fitting Multiple Regression to the Training set
regressor = lm(MPG_Mileage ~ .,data = trainData)
summary(regressor)
regressor1 = lm(MPG_Mileage ~ Type2+#Type1+
#Origin1+
Origin2+
DriveTrain1+#DriveTrain2+
#Engine_Size+
Wheelbase_inch+
Cylinders2+
Cylinders1+Horsepower+Weight_LBS#+Length_inch
,data = trainData)
summary(regressor1)
# Predicting the Test set results
y_pred = predict(regressor1, newdata = testData)
testData$Pred_MPG = y_pred
#Accuracy of the Model:
#MAPE(MeanAbsolutePercentageError):
#Lower its value better is the accuracy of the model.
#MAPE Calculation:
mape <- mean(abs((testData$Pred_MPG - testData$MPG_Mileage))/testData$MPG_Mileage)
mape
# Mape using mape function
#install.packages("Metrics")
#Regression Eqn:
#Mileage = 41.85 -1.9*(Type2) -1.2*(Origin2) +2*(DriveTrain1) +0.10*(Wheelbase)
# -2.58(cylinders2) -3.02(cylinder1) -0.02(horsepower) -0.005(weight)
#Since the error term is around 0.08,
#it means there is only 8% error in our model's prediction.
|
/MultipleRegression_Tutorial_6.R
|
no_license
|
Nitikagurral/Analytics-in-R
|
R
| false
| false
| 5,742
|
r
|
#Tutorial-6 Solution:
# Multiple Linear Regression
# Importing the dataset
dataset = read.csv('D:/R Programming/Analytics in R/Analytics in R_P1/Luxury_Cars.csv')
View(dataset)
#Is there any missing value in the dataset
summary(dataset)
sum(is.na(dataset))
#Lets check the data type of each field:
str(dataset)
#We observe that fields: "Make, Model, Type, Origin, DriveTrain" are having
#character data type, so we need to convert them to factors.
#Also field: "Cylinders" is wrongly assigned numerical data type, so we need to
#convert it to factor type.
#Also field: "Model" may not be significant to predict the mileage of the cars.
#Hence,we can remove it.
table(dataset$Model)
#First removing "Model" column
dataset <- dataset[,-2]
View(dataset)
str(dataset)
#We can use transform method to change the in-built data type of above char var.
dataset <- transform(dataset,
Make=as.factor(Make),
Type=as.factor(Type),
Origin=as.factor(Origin),
DriveTrain=as.factor(DriveTrain),
Cylinders=as.factor(Cylinders)
)
str(dataset)
#Encoding categorical data
# ----------------------- Make --------------------------------
# To check Plan Subscription ratio in dataset
table(dataset$Make)/nrow(dataset)
# Avg Premium by state# To check Plan Subscription ratio in dataset
library(sqldf)
sqldf("select Make, COUNT (*) as obs, avg(MPG_Mileage) from dataset GROUP BY 1")
#Since here none of the levels in "Make" have large/significant no. of obs,
#so lets ignore this var.
dataset <- dataset[,-1]
View(dataset)
# ----------------------- Type --------------------------------
# To check Plan Subscription ratio in dataset
table(dataset$Type)/nrow(dataset)
# Avg Premium by state# To check Plan Subscription ratio in dataset
library(sqldf)
sqldf("select Type, COUNT (*) as obs, avg(MPG_Mileage) from dataset GROUP BY 1")
#Create dummy variables for Type:
#Since here out of 6 Types only 3 have large/significant no.of obs,
#Hence, the no. of dummy var here, will be 3-1 = 2.
dataset_1 <- dataset
dataset_1$Type1 <- ifelse(dataset_1$Type=='Sedan',1,0)
dataset_1$Type2 <- ifelse(dataset_1$Type=='SUV',1,0)
View(dataset_1)
# ----------------------- Origin --------------------------------
# To check Plan Subscription ratio in dataset
table(dataset$Origin)/nrow(dataset)
# Avg Premium by state# To check Plan Subscription ratio in dataset
library(sqldf)
sqldf("select Origin, COUNT (*) as obs, avg(MPG_Mileage) from dataset GROUP BY 1")
#Create dummy variables for Type:
#Since here all 3 Origins have large/significant no.of obs,
#Hence, the no. of dummy var here, will be 3-1 = 2.
dataset_1$Origin1 <- ifelse(dataset_1$Origin=='Asia',1,0)
dataset_1$Origin2 <- ifelse(dataset_1$Origin=='USA',1,0)
# ----------------------- DriveTrain --------------------------------
# To check Plan Subscription ratio in dataset
table(dataset$DriveTrain)/nrow(dataset)
# Avg Premium by state# To check Plan Subscription ratio in dataset
library(sqldf)
sqldf("select DriveTrain, COUNT (*) as obs, avg(MPG_Mileage) from dataset GROUP BY 1")
#Create dummy variables for Type:
#Since here all 3 DriveTrains have large/significant no.of obs,
#Hence, the no. of dummy var here, will be 3-1 = 2.
dataset_1$DriveTrain1 <- ifelse(dataset_1$DriveTrain=='Front',1,0)
dataset_1$DriveTrain2 <- ifelse(dataset_1$DriveTrain=='Rear',1,0)
# ----------------------- Cylinders --------------------------------
# To check Plan Subscription ratio in dataset
table(dataset$Cylinders)/nrow(dataset)
# Avg Premium by state# To check Plan Subscription ratio in dataset
library(sqldf)
sqldf("select Cylinders, COUNT (*) as obs, avg(MPG_Mileage) from dataset GROUP BY 1")
#Create dummy variables for Type:
#Since here out of 7 Cylinder types only 3 have large/significant no.of obs,
#Hence, the no. of dummy var here, will be 3-1 = 2.
dataset_1$Cylinders1 <- ifelse(dataset_1$Cylinders=='6',1,0)
dataset_1$Cylinders2 <- ifelse(dataset_1$Cylinders=='4',1,0)
View(dataset_1)
#Feature Engg.: Since we have created dummy var for few char var,
#so lets remove those char var whose dummy var we have created.
dataset_1 <- dataset_1[,-c(1,2,3,5)]
View(dataset_1)
# Splitting the dataset into the Training set and Test set
trainDataIndex <- sample(1:nrow(dataset_1),0.7*nrow(dataset_1), replace = F)
trainData <-dataset_1[trainDataIndex, ]
testData <- dataset_1[-trainDataIndex, ]
View(trainData)
View(testData)
# Fitting Multiple Regression to the Training set
regressor = lm(MPG_Mileage ~ .,data = trainData)
summary(regressor)
regressor1 = lm(MPG_Mileage ~ Type2+#Type1+
#Origin1+
Origin2+
DriveTrain1+#DriveTrain2+
#Engine_Size+
Wheelbase_inch+
Cylinders2+
Cylinders1+Horsepower+Weight_LBS#+Length_inch
,data = trainData)
summary(regressor1)
# Predicting the Test set results
y_pred = predict(regressor1, newdata = testData)
testData$Pred_MPG = y_pred
#Accuracy of the Model:
#MAPE(MeanAbsolutePercentageError):
#Lower its value better is the accuracy of the model.
#MAPE Calculation:
mape <- mean(abs((testData$Pred_MPG - testData$MPG_Mileage))/testData$MPG_Mileage)
mape
# Mape using mape function
#install.packages("Metrics")
#Regression Eqn:
#Mileage = 41.85 -1.9*(Type2) -1.2*(Origin2) +2*(DriveTrain1) +0.10*(Wheelbase)
# -2.58(cylinders2) -3.02(cylinder1) -0.02(horsepower) -0.005(weight)
#Since the error term is around 0.08,
#it means there is only 8% error in our model's prediction.
|
##Matrix inversion is usually a costly computation and their may be some benefit to
##caching the inverse of a matrix rather than compute it repeatedly
##These pair of functions cache the inverse of a matrix.
#This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
#This function creates a special "matrix" object that can cache its inverse.
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
#If the inverse has already been calculated (and the matrix has not changed),
#then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
ilovephysics/ProgrammingAssignment2
|
R
| false
| false
| 1,246
|
r
|
##Matrix inversion is usually a costly computation and their may be some benefit to
##caching the inverse of a matrix rather than compute it repeatedly
##These pair of functions cache the inverse of a matrix.
#This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
#This function creates a special "matrix" object that can cache its inverse.
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
#If the inverse has already been calculated (and the matrix has not changed),
#then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
\alias{gtkPaintExpander}
\name{gtkPaintExpander}
\title{gtkPaintExpander}
\description{Draws an expander as used in \code{\link{GtkTreeView}}. \code{x} and \code{y} specify the
center the expander. The size of the expander is determined by the
"expander-size" style property of \code{widget}. (If widget is not
specified or doesn't have an "expander-size" property, an
unspecified default size will be used, since the caller doesn't
have sufficient information to position the expander, this is
likely not useful.) The expander is expander_size pixels tall
in the collapsed position and expander_size pixels wide in the
expanded position.}
\usage{gtkPaintExpander(object, window, state.type, area = NULL, widget = NULL,
detail = NULL, x, y, expander.style)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkStyle}}}
\item{\verb{window}}{a \code{\link{GdkWindow}}}
\item{\verb{state.type}}{a state}
\item{\verb{area}}{clip rectangle, or \code{NULL} if the
output should not be clipped. \emph{[ \acronym{allow-none} ]}}
\item{\verb{widget}}{the widget. \emph{[ \acronym{allow-none} ]}}
\item{\verb{detail}}{a style detail. \emph{[ \acronym{allow-none} ]}}
\item{\verb{x}}{the x position to draw the expander at}
\item{\verb{y}}{the y position to draw the expander at}
\item{\verb{expander.style}}{the style to draw the expander in; determines
whether the expander is collapsed, expanded, or in an
intermediate state.}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
/RGtk2/man/gtkPaintExpander.Rd
|
no_license
|
lawremi/RGtk2
|
R
| false
| false
| 1,502
|
rd
|
\alias{gtkPaintExpander}
\name{gtkPaintExpander}
\title{gtkPaintExpander}
\description{Draws an expander as used in \code{\link{GtkTreeView}}. \code{x} and \code{y} specify the
center the expander. The size of the expander is determined by the
"expander-size" style property of \code{widget}. (If widget is not
specified or doesn't have an "expander-size" property, an
unspecified default size will be used, since the caller doesn't
have sufficient information to position the expander, this is
likely not useful.) The expander is expander_size pixels tall
in the collapsed position and expander_size pixels wide in the
expanded position.}
\usage{gtkPaintExpander(object, window, state.type, area = NULL, widget = NULL,
detail = NULL, x, y, expander.style)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkStyle}}}
\item{\verb{window}}{a \code{\link{GdkWindow}}}
\item{\verb{state.type}}{a state}
\item{\verb{area}}{clip rectangle, or \code{NULL} if the
output should not be clipped. \emph{[ \acronym{allow-none} ]}}
\item{\verb{widget}}{the widget. \emph{[ \acronym{allow-none} ]}}
\item{\verb{detail}}{a style detail. \emph{[ \acronym{allow-none} ]}}
\item{\verb{x}}{the x position to draw the expander at}
\item{\verb{y}}{the y position to draw the expander at}
\item{\verb{expander.style}}{the style to draw the expander in; determines
whether the expander is collapsed, expanded, or in an
intermediate state.}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
library(ggmap)
#Read in all of the csv data files
get_csvdata_droploads <- function(){
buses <<- read.csv("data/rawdata/BPA_230kV_data/buses.csv")
substations <<- read.csv("data/rawdata/BPA_230kV_data/subs.csv")
bus_info <<- read.csv("data/rawdata/BPA_230kV_data/bus_info.csv")
pmus <<- read.csv("data/rawdata/BPA_230kV_data/pmu_buses.csv")
gens <<- read.csv("data/rawdata/BPA_230kV_data/gens.csv")
Freq <<- read.csv("data/rawdata/BPA_230kV_data/weccbpaabove230_drop_load_pmu_freq.csv")
Pangle <<- read.csv("data/rawdata/BPA_230kV_data/weccbpaabove230_drop_load_pmu_vang.csv")
Volt <<- read.csv("data/rawdata/BPA_230kV_data/weccbpaabove230_drop_load_pmu_vmag.csv")
}
#Change up the names so that they all match
clean_names_droploads <- function(){
fn <- colnames(Freq)
fn <- gsub("X","",fn)
# fn <- gsub("[.]Frequency","",fn)
colnames(Freq) <<- fn
pn <- colnames(Pangle)
pn <- gsub("X","",pn)
#pn <- gsub("[.]V[.]angle","",pn)
colnames(Pangle) <<- pn
vn <- colnames(Volt)
vn <- gsub("X","",vn)
# vn <- gsub("[.]V[.]pu","",vn)
colnames(Volt) <<- vn
bus_locs <<- data.frame()
}
#Create all of the merged data frames that will be used by the plotting functions
get_merged_data_droploads <- function(){
# sub_buses <<- merge(buses,substations,by = c("Sub.ID","Sub.Name"))
bus_locs_full <<- data.frame(pmus$Bus.Number,pmus$Bus.Name,pmus$Sub.Name,pmus$Latitude,pmus$Longitude,"Frequency","Voltage","Angle")
colnames(bus_locs_full) <<- c("Bus.Name","Bus.Name.unused","Sub.Name", "Latitude","Longitude","Frequency","Voltage","Angle")
bus_locs_full$Frequency <<- 0
bus_locs_full$Voltage <<- 0
bus_locs_full$Angle <<- 0
#Remove buses with no long/lat coordinates
bus_locs_full<<-bus_locs_full[!(bus_locs_full$Latitude=="" | bus_locs_full$Longitude=="" | is.na(bus_locs_full$Latitude) | is.na(bus_locs_full$Longitude)),]
bus_locs_full$Longitude <<- as.numeric(as.character(bus_locs_full$Longitude))
bus_locs_full$Latitude <<- as.numeric(as.character(bus_locs_full$Latitude))
# missing_pmu <- colnames(Volt)[!colnames(Volt) %in% bus_locs_full$Bus.Num][-1]
#bus_locs contains just the buses that have PMU readings
#bus_locs <<- bus_locs_full[bus_locs_full$Bus.Num %in% pmus$Bus.Number,]
bus_locs <<- bus_locs_full[bus_locs_full$Bus.Name %in% colnames(Freq),]
bus_locs$Longitude <<- as.numeric(as.character(bus_locs$Longitude))
bus_locs$Latitude <<- as.numeric(as.character(bus_locs$Latitude))
}
#Create the map and ggmap to be used by the plot functions (the map ones at least)
get_map_data_droploads <- function(){
#Create the map to use as the background for the ggplot
mapten <<- get_map(location = c(lon = mean(bus_locs$Longitude), lat = mean(bus_locs$Latitude)), zoom = 4, maptype = "roadmap", scale = 2)
#maplocs <<- get_map(location = c(min(bus_locs$Longitude), min(bus_locs$Latitude),
# max(bus_locs$Longitude),max(bus_locs$Latitude)),
# maptype = "roadmap")
map_lims <<- c(-124, -104,31, 50) #xmin,xmax,ymin,ymax
m_ratio <<- abs(map_lims[2]-map_lims[1])/abs(map_lims[4]-map_lims[3])
g <<- ggmap(mapten) +
# coord_fixed(xlim = c(map_lims[1], map_lims[2]),ylim = c(map_lims[3], map_lims[4]),expand = FALSE)
scale_x_continuous(limits = c(-124, -104), expand = c(0, 0)) +
scale_y_continuous(limits = c(31, 50), expand = c(0, 0))
}
#Call all the functions in order
import_data <- function(){
get_csvdata_droploads()
clean_names_droploads()
get_merged_data_droploads()
get_map_data_droploads()
}
#Name of the data set
name <- function(){
n <- "Dropped Load"
n
}
#How many time points is the data
nsamples <- function(){
nrow(Freq)
}
#Returns a list of the plots that this data can be used to create
use_plots <- function(){
list('linear.R','map.R','heatmap.R','bar.R','histogram.R')
}
|
/data/import_droploads.R
|
no_license
|
grahamhome/PowerApp
|
R
| false
| false
| 3,895
|
r
|
library(ggmap)
#Read in all of the csv data files
get_csvdata_droploads <- function(){
buses <<- read.csv("data/rawdata/BPA_230kV_data/buses.csv")
substations <<- read.csv("data/rawdata/BPA_230kV_data/subs.csv")
bus_info <<- read.csv("data/rawdata/BPA_230kV_data/bus_info.csv")
pmus <<- read.csv("data/rawdata/BPA_230kV_data/pmu_buses.csv")
gens <<- read.csv("data/rawdata/BPA_230kV_data/gens.csv")
Freq <<- read.csv("data/rawdata/BPA_230kV_data/weccbpaabove230_drop_load_pmu_freq.csv")
Pangle <<- read.csv("data/rawdata/BPA_230kV_data/weccbpaabove230_drop_load_pmu_vang.csv")
Volt <<- read.csv("data/rawdata/BPA_230kV_data/weccbpaabove230_drop_load_pmu_vmag.csv")
}
#Change up the names so that they all match
clean_names_droploads <- function(){
fn <- colnames(Freq)
fn <- gsub("X","",fn)
# fn <- gsub("[.]Frequency","",fn)
colnames(Freq) <<- fn
pn <- colnames(Pangle)
pn <- gsub("X","",pn)
#pn <- gsub("[.]V[.]angle","",pn)
colnames(Pangle) <<- pn
vn <- colnames(Volt)
vn <- gsub("X","",vn)
# vn <- gsub("[.]V[.]pu","",vn)
colnames(Volt) <<- vn
bus_locs <<- data.frame()
}
#Create all of the merged data frames that will be used by the plotting functions
get_merged_data_droploads <- function(){
# sub_buses <<- merge(buses,substations,by = c("Sub.ID","Sub.Name"))
bus_locs_full <<- data.frame(pmus$Bus.Number,pmus$Bus.Name,pmus$Sub.Name,pmus$Latitude,pmus$Longitude,"Frequency","Voltage","Angle")
colnames(bus_locs_full) <<- c("Bus.Name","Bus.Name.unused","Sub.Name", "Latitude","Longitude","Frequency","Voltage","Angle")
bus_locs_full$Frequency <<- 0
bus_locs_full$Voltage <<- 0
bus_locs_full$Angle <<- 0
#Remove buses with no long/lat coordinates
bus_locs_full<<-bus_locs_full[!(bus_locs_full$Latitude=="" | bus_locs_full$Longitude=="" | is.na(bus_locs_full$Latitude) | is.na(bus_locs_full$Longitude)),]
bus_locs_full$Longitude <<- as.numeric(as.character(bus_locs_full$Longitude))
bus_locs_full$Latitude <<- as.numeric(as.character(bus_locs_full$Latitude))
# missing_pmu <- colnames(Volt)[!colnames(Volt) %in% bus_locs_full$Bus.Num][-1]
#bus_locs contains just the buses that have PMU readings
#bus_locs <<- bus_locs_full[bus_locs_full$Bus.Num %in% pmus$Bus.Number,]
bus_locs <<- bus_locs_full[bus_locs_full$Bus.Name %in% colnames(Freq),]
bus_locs$Longitude <<- as.numeric(as.character(bus_locs$Longitude))
bus_locs$Latitude <<- as.numeric(as.character(bus_locs$Latitude))
}
#Create the map and ggmap to be used by the plot functions (the map ones at least)
get_map_data_droploads <- function(){
#Create the map to use as the background for the ggplot
mapten <<- get_map(location = c(lon = mean(bus_locs$Longitude), lat = mean(bus_locs$Latitude)), zoom = 4, maptype = "roadmap", scale = 2)
#maplocs <<- get_map(location = c(min(bus_locs$Longitude), min(bus_locs$Latitude),
# max(bus_locs$Longitude),max(bus_locs$Latitude)),
# maptype = "roadmap")
map_lims <<- c(-124, -104,31, 50) #xmin,xmax,ymin,ymax
m_ratio <<- abs(map_lims[2]-map_lims[1])/abs(map_lims[4]-map_lims[3])
g <<- ggmap(mapten) +
# coord_fixed(xlim = c(map_lims[1], map_lims[2]),ylim = c(map_lims[3], map_lims[4]),expand = FALSE)
scale_x_continuous(limits = c(-124, -104), expand = c(0, 0)) +
scale_y_continuous(limits = c(31, 50), expand = c(0, 0))
}
#Call all the functions in order
import_data <- function(){
get_csvdata_droploads()
clean_names_droploads()
get_merged_data_droploads()
get_map_data_droploads()
}
#Name of the data set
name <- function(){
n <- "Dropped Load"
n
}
#How many time points is the data
nsamples <- function(){
nrow(Freq)
}
#Returns a list of the plots that this data can be used to create
use_plots <- function(){
list('linear.R','map.R','heatmap.R','bar.R','histogram.R')
}
|
#' @rdname select
#' @export
select <- function(obj,...) UseMethod("select")
#' Select an value of lambda along a grpreg path
#'
#' Selects a point along the regularization path of a fitted grpreg object
#' according to the AIC, BIC, or GCV criteria.
#'
#' The criteria are defined as follows, where \eqn{L}{L} is the deviance (i.e,
#' -2 times the log-likelihood), \eqn{\nu}{df} is the degrees of freedom, and
#' \eqn{n}{n} is the sample size:
#'
#' \deqn{AIC = L + 2\nu}{AIC = L + 2*df} \deqn{BIC = L + \log(n)\nu}{BIC = L +
#' log(n)*df} \deqn{GCV = \frac{L}{(1-\nu/n)^2}}{GCV= L/((1-df/n)^2)}
#' \deqn{AICc = AIC + 2\frac{\nu(\nu+1)}{n-\nu-1}}{AICc = AIC +
#' 2*df*(df+1)/(n-df-1)} \deqn{EBIC = BIC + 2 \log{p \choose \nu}}{EBIC = BIC +
#' 2*log(p choose df)}
#'
#' @rdname select
#'
#' @param obj A fitted grpreg object.
#' @param criterion The criterion by which to select the regularization
#' parameter. One of \code{"AIC"}, \code{"BIC"}, \code{"GCV"}, \code{"AICc"},
#' or \code{"EBIC"}; default is \code{"BIC"}.
#' @param df.method How should effective model parameters be calculated? One
#' of: \code{"active"}, which counts the number of nonzero coefficients; or
#' \code{"default"}, which uses the calculated \code{df} returned by
#' \code{grpreg}. Default is \code{"default"}.
#' @param smooth Applies a smoother to the information criteria before
#' selecting the optimal value.
#' @param \dots For S3 method compatibility.
#'
#' @return A list containing:
#' \describe{
#' \item{lambda}{The selected value of the regularization parameter, `lambda`.}
#' \item{beta}{The vector of coefficients at the chosen value of `lambda`.}
#' \item{df}{The effective number of model parameters at the chosen value of `lambda`.}
#' \item{IC}{A vector of the calculated model selection criteria for each point on the regularization path.}
#' }
#'
#' @seealso [grpreg()]
#'
#' @examples
#' data(Birthwt)
#' X <- Birthwt$X
#' y <- Birthwt$bwt
#' group <- Birthwt$group
#' fit <- grpreg(X, y, group, penalty="grLasso")
#' select(fit)
#' select(fit,crit="AIC",df="active")
#' plot(fit)
#' abline(v=select(fit)$lambda)
#' par(mfrow=c(1,3))
#' l <- fit$lambda
#' xlim <- rev(range(l))
#' plot(l, select(fit)$IC, xlim=xlim, pch=19, type="o", ylab="BIC")
#' plot(l, select(fit,"AIC")$IC, xlim=xlim, pch=19, type="o",ylab="AIC")
#' plot(l, select(fit,"GCV")$IC, xlim=xlim, pch=19, type="o",ylab="GCV")
#' @export
select.grpreg <- function(obj, criterion=c("BIC","AIC","GCV","AICc","EBIC"), df.method=c("default","active"), smooth=FALSE, ...) {
criterion <- match.arg(criterion)
df.method <- match.arg(df.method)
ll <- logLik(obj, df.method=df.method, ...)
df <- as.double(attr(ll,"df"))
d <- dim(obj$beta)
p <- if (length(d)==2) d[1] - 1 else d[2] - 1
j <- if(obj$family=="gaussian") df - 2 else df - 1
IC <- switch(criterion,
AIC = AIC(ll),
BIC = BIC(ll),
GCV = (1/obj$n) * (-2) * as.double(ll) / (1-df/obj$n)^2,
AICc = AIC(ll) + 2*df*(df+1)/(obj$n-df-1),
EBIC = BIC(ll) + 2*(lgamma(p+1) - lgamma(j+1) - lgamma(p-j+1)))
n.l <- length(obj$lambda)
if (smooth & (n.l < 4)) {
smooth <- FALSE
warning("Need at least 4 points to use smooth=TRUE", call.=FALSE)
}
if (smooth) {
fit.ss <- smooth.spline(IC[is.finite(IC)])
d <- diff(fit.ss$y)
if (all(d<0)) i <- n.l
else i <- min(which(d>0))-1
if (i==0) i <- 1
} else i <- which.min(IC)
if (min(obj$lambda) == obj$lambda[i]) {
warning(paste("minimum lambda selected for", obj$penalty), call.=FALSE)
} else if ((max(obj$lambda) == obj$lambda[i]) & obj$penalty=="gBridge") {
warning("maximum lambda selected", call.=FALSE)
}
return(list(beta=obj$beta[,i],
lambda=obj$lambda[i],
df=df[i],
IC=IC))
}
|
/R/select.R
|
no_license
|
pbreheny/grpreg
|
R
| false
| false
| 3,835
|
r
|
#' @rdname select
#' @export
select <- function(obj,...) UseMethod("select")
#' Select an value of lambda along a grpreg path
#'
#' Selects a point along the regularization path of a fitted grpreg object
#' according to the AIC, BIC, or GCV criteria.
#'
#' The criteria are defined as follows, where \eqn{L}{L} is the deviance (i.e,
#' -2 times the log-likelihood), \eqn{\nu}{df} is the degrees of freedom, and
#' \eqn{n}{n} is the sample size:
#'
#' \deqn{AIC = L + 2\nu}{AIC = L + 2*df} \deqn{BIC = L + \log(n)\nu}{BIC = L +
#' log(n)*df} \deqn{GCV = \frac{L}{(1-\nu/n)^2}}{GCV= L/((1-df/n)^2)}
#' \deqn{AICc = AIC + 2\frac{\nu(\nu+1)}{n-\nu-1}}{AICc = AIC +
#' 2*df*(df+1)/(n-df-1)} \deqn{EBIC = BIC + 2 \log{p \choose \nu}}{EBIC = BIC +
#' 2*log(p choose df)}
#'
#' @rdname select
#'
#' @param obj A fitted grpreg object.
#' @param criterion The criterion by which to select the regularization
#' parameter. One of \code{"AIC"}, \code{"BIC"}, \code{"GCV"}, \code{"AICc"},
#' or \code{"EBIC"}; default is \code{"BIC"}.
#' @param df.method How should effective model parameters be calculated? One
#' of: \code{"active"}, which counts the number of nonzero coefficients; or
#' \code{"default"}, which uses the calculated \code{df} returned by
#' \code{grpreg}. Default is \code{"default"}.
#' @param smooth Applies a smoother to the information criteria before
#' selecting the optimal value.
#' @param \dots For S3 method compatibility.
#'
#' @return A list containing:
#' \describe{
#' \item{lambda}{The selected value of the regularization parameter, `lambda`.}
#' \item{beta}{The vector of coefficients at the chosen value of `lambda`.}
#' \item{df}{The effective number of model parameters at the chosen value of `lambda`.}
#' \item{IC}{A vector of the calculated model selection criteria for each point on the regularization path.}
#' }
#'
#' @seealso [grpreg()]
#'
#' @examples
#' data(Birthwt)
#' X <- Birthwt$X
#' y <- Birthwt$bwt
#' group <- Birthwt$group
#' fit <- grpreg(X, y, group, penalty="grLasso")
#' select(fit)
#' select(fit,crit="AIC",df="active")
#' plot(fit)
#' abline(v=select(fit)$lambda)
#' par(mfrow=c(1,3))
#' l <- fit$lambda
#' xlim <- rev(range(l))
#' plot(l, select(fit)$IC, xlim=xlim, pch=19, type="o", ylab="BIC")
#' plot(l, select(fit,"AIC")$IC, xlim=xlim, pch=19, type="o",ylab="AIC")
#' plot(l, select(fit,"GCV")$IC, xlim=xlim, pch=19, type="o",ylab="GCV")
#' @export
select.grpreg <- function(obj, criterion=c("BIC","AIC","GCV","AICc","EBIC"), df.method=c("default","active"), smooth=FALSE, ...) {
criterion <- match.arg(criterion)
df.method <- match.arg(df.method)
ll <- logLik(obj, df.method=df.method, ...)
df <- as.double(attr(ll,"df"))
d <- dim(obj$beta)
p <- if (length(d)==2) d[1] - 1 else d[2] - 1
j <- if(obj$family=="gaussian") df - 2 else df - 1
IC <- switch(criterion,
AIC = AIC(ll),
BIC = BIC(ll),
GCV = (1/obj$n) * (-2) * as.double(ll) / (1-df/obj$n)^2,
AICc = AIC(ll) + 2*df*(df+1)/(obj$n-df-1),
EBIC = BIC(ll) + 2*(lgamma(p+1) - lgamma(j+1) - lgamma(p-j+1)))
n.l <- length(obj$lambda)
if (smooth & (n.l < 4)) {
smooth <- FALSE
warning("Need at least 4 points to use smooth=TRUE", call.=FALSE)
}
if (smooth) {
fit.ss <- smooth.spline(IC[is.finite(IC)])
d <- diff(fit.ss$y)
if (all(d<0)) i <- n.l
else i <- min(which(d>0))-1
if (i==0) i <- 1
} else i <- which.min(IC)
if (min(obj$lambda) == obj$lambda[i]) {
warning(paste("minimum lambda selected for", obj$penalty), call.=FALSE)
} else if ((max(obj$lambda) == obj$lambda[i]) & obj$penalty=="gBridge") {
warning("maximum lambda selected", call.=FALSE)
}
return(list(beta=obj$beta[,i],
lambda=obj$lambda[i],
df=df[i],
IC=IC))
}
|
## This code is to create a shiny APP for the platypus month data
## It does the following
#1 load libraries
#2 Read in the platypus month data
#3 set up a ui for the shiny app
#1 load libraries
#install.packages("dplyr")
#install.packages("tidyr")
#install.packages("lubridate")
#install.packages("ggplot2")
#install.packages("shiny")
#install.packages("leaflet")
#install.packages("plotly")
# install.packages("DT")
library(dplyr)
library(tidyr)
library(lubridate)
library(ggplot2)
library(shiny)
library(leaflet)
library(plotly)
library(DT)
#2 Read and tidy platypus month data
platy <- read.csv("survey_data.csv")
platy <- platy%>% mutate(
date = as.Date(date, format = "%d/%m/%Y"),
species = trimws(species),
site_id = as.factor(site_id)
)
reaches <- platy %>% select("site_id", "latitude", "longitude") %>%
unique()
cleanup <- theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(color = "black"),
panel.grid.major.y = element_line(colour = "gray", linetype = "dotted", size = 0.1),
axis.text.x = element_text(color = "black"),
axis.text.y = element_text(color = "black"))
platyIcons <- iconList(
"survey" = makeIcon("platy_image.png", 40, 40),
"adhoc" = makeIcon("platy_image_2.png", 40, 40)
)
#3 set up a ui for the shiny app
ui <- fluidPage(tags$head(
# Note the wrapping of the string in HTML()
tags$style(HTML("
body {
background-color: powderblue;
color: midnightblue;
}
"))
),
# App title
fluidRow(h1("Platypus and Rakali observations - ACT Waterwatch", align = "center", style = "color:midnightblue;")),
fluidRow(
column(3,
br(),
br(),
br(),
br(),
br(),
#select species
wellPanel(selectInput(inputId = "species", label = "Species",
choices = unique(platy$species),
selected = "platypus"),
# select site
selectInput(inputId = "site", label = "Site",
choices = unique(platy$site_id),
selected = "Cooma_Ck"),
# select the time period for the output
dateRangeInput(inputId = "date",
label = "Date range",
format = "dd/mm/yyyy",
start = "2014-01-01",
separator = "to"))),
column(9,
h2(textOutput("figurehead")),
plotlyOutput(outputId = "col_plot", height = "400px"),
"Figure 1. Number of sightings. Red dot indicates that a survey was conducted and bars indicate the
number of individuals observed")
),
fluidRow(
# Add leaflet map
column(3,
h2("Site map"),
"Click on Platypus icon to show site name and select site.",
leafletOutput("mymap")),
# Add summary data table
column(2,
h2("Data summary table"),
DT::dataTableOutput(outputId = "survey_table")),
column(5,
h2("Details of the surveys"),
tags$br(),
"Platypus month is conducted during August.
The sites currently being surveyed are Cooma Creek, Jerrabomberra Creek, below Coppins Crossing, Mittagang Crossing, Point Hut Crossing, Scottsdale, Queanbeyan and Tidbinbilla.
Data from some sites that have been previously included in the survey are also available.
Currently, each year four surveys are conducted at each site to keep survey effort consistent across the sites.
In previous years a different number of surveys were conducted at the different sites. At each site, 8 to 10 points are surveyed along a section of 800 m to 1 km.
At each site surveys are conducted at dawn and dusk when Platypus are most active.",
),
column(2, tags$img(height = 100, width = 250,
src = "https://f079a2602f82498e927c63b0219b50f9.app.rstudio.cloud/file_show?path=%2Fcloud%2Fproject%2FWaterwatch_logo_Upper_Murrumbidgee.png"))
))
#4 Define server function
html_legend <- "<img src='https://github.com/Citizen-science-ACT-Gov/platy_images/blob/main/platy_image.png'>survey<br/>
<img src='https://github.com/Citizen-science-ACT-Gov/platy_images/blob/main/platy_image_2.png'>Adhoc"
server <- function (input, output, session) {
output$mymap <- renderLeaflet({
leaflet(reaches) %>%
addTiles() %>%
fitBounds(lng1 = 148.8, lat1 = -35.1, lng2 = 149.35, lat2 = -36.3)%>%
addProviderTiles("Esri.WorldImagery") %>%
addMarkers(data = reaches, ~longitude, ~latitude, layerId = ~site_id,
popup = ~site_id, icon = platyIcons[p$Type],
labelOptions = labelOptions(noHide = T, direction = "bottom",
style = list("color" = "black",
"font-family" = "serif",
"font-size" = "12px")))
})
leafletOutput('mymap', height = 600)
# subset data for plot
plot_data <- reactive({
p %>%
filter(
species == input$species &
site_id == input$site &
date >= input$date[1] &
date <= input$date[2])
})
# update selected site based on map click
observeEvent(input$mymap_marker_click, {
p <- input$mymap_marker_click
#updateSelectInput(session, "site", selected = p$Siteid)
updateSelectInput(session, "site", "Update my site", selected = p$id)
})
# Create a reactive plot based on the inputs listed
output$figurehead <- renderText({paste0(input$species, " surveys at ", input$site)})
output$col_plot <- renderPlotly({
ggplotly(
ggplot(plot_data(), )+
geom_col(aes(date, number, colour = "Animals observed"))+
geom_point(aes(date, Survey, colour = "Survey conducted"))+
ggtitle("Number of animals observed")+
scale_x_date(name = "Date", date_breaks = "6 months", date_labels = "%m/%y", limits = c(input$date[1], input$date[2])) +
ylab("Number of individuals")+
scale_colour_manual(values = c("black", "Red"))+
labs(colour = "Legend")+
cleanup +
theme(plot.title = element_text(family = "Helvetica", face = "bold", size = (15), colour = "blue4", hjust = 0.5),
axis.title = element_text(size = (15), colour = "blue4", face = "bold")))
})
# subset data for table
table_data <- reactive({
platy %>%
filter(
species == input$species &
site_id == input$site) %>%
mutate(year = substr(date, 1,4))%>%
select("year", "number")%>%
group_by(year)%>%
summarise("Max count" = max(number), "Surveys conducted" = n())
})
## Create reactive table for data
output$survey_table <- DT::renderDataTable({DT::datatable(table_data (), options=list(iDisplayLength=5, # initial number of records
aLengthMenu=c(5,10), # records/page options
bLengthChange=0, # show/hide records per page dropdown
bFilter=0, # global search box on/off
bInfo=0)) })
}
# 5 run the shiny APP
shinyApp(ui = ui, server = server)
# establish and connect to shinyapps.io
install.packages('rsconnect')
rsconnect::setAccountInfo(name='citizen-science-act-gov', token='6BAD85804410F294674F83FE6AD75B0C', secret='LWBMqwGfkVcSqOQvw1rSNLne6k3mH2dsQjrYMGpF')
|
/shiny_plat_2.R
|
no_license
|
Citizen-science-ACT-Gov/Platypus_shiny
|
R
| false
| false
| 8,235
|
r
|
## This code is to create a shiny APP for the platypus month data
## It does the following
#1 load libraries
#2 Read in the platypus month data
#3 set up a ui for the shiny app
#1 load libraries
#install.packages("dplyr")
#install.packages("tidyr")
#install.packages("lubridate")
#install.packages("ggplot2")
#install.packages("shiny")
#install.packages("leaflet")
#install.packages("plotly")
# install.packages("DT")
library(dplyr)
library(tidyr)
library(lubridate)
library(ggplot2)
library(shiny)
library(leaflet)
library(plotly)
library(DT)
#2 Read and tidy platypus month data
platy <- read.csv("survey_data.csv")
platy <- platy%>% mutate(
date = as.Date(date, format = "%d/%m/%Y"),
species = trimws(species),
site_id = as.factor(site_id)
)
reaches <- platy %>% select("site_id", "latitude", "longitude") %>%
unique()
cleanup <- theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(color = "black"),
panel.grid.major.y = element_line(colour = "gray", linetype = "dotted", size = 0.1),
axis.text.x = element_text(color = "black"),
axis.text.y = element_text(color = "black"))
platyIcons <- iconList(
"survey" = makeIcon("platy_image.png", 40, 40),
"adhoc" = makeIcon("platy_image_2.png", 40, 40)
)
#3 set up a ui for the shiny app
ui <- fluidPage(tags$head(
# Note the wrapping of the string in HTML()
tags$style(HTML("
body {
background-color: powderblue;
color: midnightblue;
}
"))
),
# App title
fluidRow(h1("Platypus and Rakali observations - ACT Waterwatch", align = "center", style = "color:midnightblue;")),
fluidRow(
column(3,
br(),
br(),
br(),
br(),
br(),
#select species
wellPanel(selectInput(inputId = "species", label = "Species",
choices = unique(platy$species),
selected = "platypus"),
# select site
selectInput(inputId = "site", label = "Site",
choices = unique(platy$site_id),
selected = "Cooma_Ck"),
# select the time period for the output
dateRangeInput(inputId = "date",
label = "Date range",
format = "dd/mm/yyyy",
start = "2014-01-01",
separator = "to"))),
column(9,
h2(textOutput("figurehead")),
plotlyOutput(outputId = "col_plot", height = "400px"),
"Figure 1. Number of sightings. Red dot indicates that a survey was conducted and bars indicate the
number of individuals observed")
),
fluidRow(
# Add leaflet map
column(3,
h2("Site map"),
"Click on Platypus icon to show site name and select site.",
leafletOutput("mymap")),
# Add summary data table
column(2,
h2("Data summary table"),
DT::dataTableOutput(outputId = "survey_table")),
column(5,
h2("Details of the surveys"),
tags$br(),
"Platypus month is conducted during August.
The sites currently being surveyed are Cooma Creek, Jerrabomberra Creek, below Coppins Crossing, Mittagang Crossing, Point Hut Crossing, Scottsdale, Queanbeyan and Tidbinbilla.
Data from some sites that have been previously included in the survey are also available.
Currently, each year four surveys are conducted at each site to keep survey effort consistent across the sites.
In previous years a different number of surveys were conducted at the different sites. At each site, 8 to 10 points are surveyed along a section of 800 m to 1 km.
At each site surveys are conducted at dawn and dusk when Platypus are most active.",
),
column(2, tags$img(height = 100, width = 250,
src = "https://f079a2602f82498e927c63b0219b50f9.app.rstudio.cloud/file_show?path=%2Fcloud%2Fproject%2FWaterwatch_logo_Upper_Murrumbidgee.png"))
))
#4 Define server function
html_legend <- "<img src='https://github.com/Citizen-science-ACT-Gov/platy_images/blob/main/platy_image.png'>survey<br/>
<img src='https://github.com/Citizen-science-ACT-Gov/platy_images/blob/main/platy_image_2.png'>Adhoc"
server <- function (input, output, session) {
output$mymap <- renderLeaflet({
leaflet(reaches) %>%
addTiles() %>%
fitBounds(lng1 = 148.8, lat1 = -35.1, lng2 = 149.35, lat2 = -36.3)%>%
addProviderTiles("Esri.WorldImagery") %>%
addMarkers(data = reaches, ~longitude, ~latitude, layerId = ~site_id,
popup = ~site_id, icon = platyIcons[p$Type],
labelOptions = labelOptions(noHide = T, direction = "bottom",
style = list("color" = "black",
"font-family" = "serif",
"font-size" = "12px")))
})
leafletOutput('mymap', height = 600)
# subset data for plot
plot_data <- reactive({
p %>%
filter(
species == input$species &
site_id == input$site &
date >= input$date[1] &
date <= input$date[2])
})
# update selected site based on map click
observeEvent(input$mymap_marker_click, {
p <- input$mymap_marker_click
#updateSelectInput(session, "site", selected = p$Siteid)
updateSelectInput(session, "site", "Update my site", selected = p$id)
})
# Create a reactive plot based on the inputs listed
output$figurehead <- renderText({paste0(input$species, " surveys at ", input$site)})
output$col_plot <- renderPlotly({
ggplotly(
ggplot(plot_data(), )+
geom_col(aes(date, number, colour = "Animals observed"))+
geom_point(aes(date, Survey, colour = "Survey conducted"))+
ggtitle("Number of animals observed")+
scale_x_date(name = "Date", date_breaks = "6 months", date_labels = "%m/%y", limits = c(input$date[1], input$date[2])) +
ylab("Number of individuals")+
scale_colour_manual(values = c("black", "Red"))+
labs(colour = "Legend")+
cleanup +
theme(plot.title = element_text(family = "Helvetica", face = "bold", size = (15), colour = "blue4", hjust = 0.5),
axis.title = element_text(size = (15), colour = "blue4", face = "bold")))
})
# subset data for table
table_data <- reactive({
platy %>%
filter(
species == input$species &
site_id == input$site) %>%
mutate(year = substr(date, 1,4))%>%
select("year", "number")%>%
group_by(year)%>%
summarise("Max count" = max(number), "Surveys conducted" = n())
})
## Create reactive table for data
output$survey_table <- DT::renderDataTable({DT::datatable(table_data (), options=list(iDisplayLength=5, # initial number of records
aLengthMenu=c(5,10), # records/page options
bLengthChange=0, # show/hide records per page dropdown
bFilter=0, # global search box on/off
bInfo=0)) })
}
# 5 run the shiny APP
shinyApp(ui = ui, server = server)
# establish and connect to shinyapps.io
install.packages('rsconnect')
rsconnect::setAccountInfo(name='citizen-science-act-gov', token='6BAD85804410F294674F83FE6AD75B0C', secret='LWBMqwGfkVcSqOQvw1rSNLne6k3mH2dsQjrYMGpF')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/circGLM.R
\name{circGLM}
\alias{circGLM}
\title{Fitting Bayesian circular General Linear Models}
\usage{
circGLM(formula, data, th, X = if (missing(th)) { model.matrix(formula,
data)[, -1, drop = FALSE] } else { matrix(nrow = length(th), ncol = 0) },
conj_prior = rep(0, 3), bt_prior_musd = c(mu = 0, sd = 1),
starting_values = c(0, 1, rep(0, ncol(X))), bwb = rep(0.05, ncol(X)),
Q = 10000, burnin = 1000, thin = 1, kappaModeEstBandwith = 0.1,
CIsize = 0.95, r = 2, returnPostSample = TRUE, output = "list",
SDDBFDensEstMethod = "density", reparametrize = TRUE,
groupMeanComparisons = TRUE, skipDichSplit = FALSE, centerOnly = FALSE)
}
\arguments{
\item{formula}{an optional object of class "formula" (or one that can be
coerced to that class): a symbolic description of the model to be fitted.}
\item{data}{an optional data frame or object coercible by
\code{\link{as.data.frame}} to a data frame, containing the variables in
the model.}
\item{th}{An optional vector of angles in radians or degrees, representing
the circular outcome we want to predict. If any value is larger than
\code{2 * pi}, the input is transformed to radians. Otherwise, \code{th} is
treated as radians.}
\item{X}{An optional matrix of predictors, both continuous (linear) and
categorical (as dummies). If categorical predictors are included, the
dummies must already be made and they must be in (0, 1), because this is
checked to be able to separate them from the continuous predictors, so that
they are treated differently. If not, or if \cite{skipDichSplit = TRUE},
they will be treated as linear predictors.}
\item{conj_prior}{A numeric vector of length 3, containing, in that order,
prior mean direction, prior resultant length, and prior sample size. Used
for the von Mises part of the model, beta_0 and kappa.}
\item{bt_prior_musd}{A numeric vector of length 2, or \code{NA}. If
\code{bt_prior_musd = NA}, a constant prior is used. If it is a numeric
vector of length 2, a Normal prior is used so that the first value is the
mean, and the second value is the standard deviation.}
\item{starting_values}{A numeric vector with starting values for the mcmc
sampler. The length of the numeric vector should be 2 plus the number of
columns in X.}
\item{bwb}{A numeric vector, where the length is at least the number of
continuous predictors. This is a tuning parameters used in sampling of
beta. New values are sampled uniformly around the current value of beta
with bounds at \code{bt_cur - bwb} and \code{bt_cur + bwb}. If
\code{reparametrize = TRUE}, bwb corresponds to the bounds around the
reparametrized values.}
\item{Q}{Integer; The number of iterations to perform.}
\item{burnin}{Integer; The number of burn-in (warmup) iterations.}
\item{thin}{Integer; The number of parameters sets to sample for each
parameter set that is saved. Can be used to save memory if \code{Q} is
large.}
\item{kappaModeEstBandwith}{Numeric between 0 and 1. The mode of \code{kappa}
is estimated by taking the midpoint of a highest density interval.
Specifically, it is the midpoint of the interval that contains
\code{kappaModeEstBandwith} of the density of the posterior. Reasonable
values are roughly between .005 and .2, although lower values may be
reasonable if Q is large.}
\item{CIsize}{The size of the credible intervals. This is used for all
parameters, whether they use highest density intervals, circular quantiles
or regular quantiles.}
\item{r}{A numeric. \code{r} is the parameter used in the link function
\eqn{g(x, r) = r atan(x)}. If \code{r = 2}, the link function maps the real
line to the full circle. If \code{r < 2} the link functions maps to a
proportion \code{r / 2} of the circle. If \code{r > 2}, the link functions
can reach the same are of the circle multiple times, which is unlikely to
be useful, and should be used with caution.}
\item{returnPostSample}{Logical indicating whether the mcmc sample itself
should be returned. Should only be set to \code{FALSE} if there are memory
constraints, as many subsequent analyses rely on the posterior sample
directly.}
\item{output}{A character string, either \code{"list"} or \code{"vector"}. In
most situations, \code{"list"} should be used, which returns a circGLM
object. The \code{"vector"} options is only useful for simulation studies
etc.}
\item{SDDBFDensEstMethod}{A character string, either \code{"density"} or
\code{"histogram"}. Gives the method to If \code{SDDBFDensEstMethod =
"density"}, the default, the Bayes Factors are computed based on the
density estimate given by a spline interpolation of the \code{density()}
function, so they are calculated in R rather than C++. This method should
be much more stable than the histogram method, especially if there is low
probability at 0 in the posterior. If \code{SDDBFDensEstMethod =
"histogram"}, Bayes factors are computed by estimating the density from the
posterior sample as the midpoint of a histogram bar at 0 containing 10\% of
the data.}
\item{reparametrize}{Logical; If \code{TRUE}, proposals for beta are drawn
uniformly around a reparametrization \code{zt = pi * atan(bt) / 2}, so from
\code{zt_can = runif(1, zt - bwb, zt + bwb)}, which is then transformed
back. Then, the proposals amount to the truncated cauchy pdf. If
\code{FALSE}, proposals for beta are drawn on uniformly around beta, so
from \code{bt_can = runif(1, bt_cur - bwb, bt_cur + bwb)}.}
\item{groupMeanComparisons}{Logical indicating whether mean comparisons in
the form of Bayes Factors and posterior model probabilities should be
computed.}
\item{skipDichSplit}{Logical indicating whether to treat categorical
predictor specially. Usually, \code{skipDichSplit = TRUE} should be used.
This removes the arbitrary dependence on the labeling of categorical
predictors and ensures that each group has a regression line of the same
shape. If \code{skipDichSplit = FALSE}, the model will be the same as
\code{lm.circular} from the package \code{circular} in that no separate
treatment for categorical variables is performed.}
\item{centerOnly}{Logical; If \code{TRUE}, the continuous predictors are
centered only, not standardized. If \code{FALSE}, the continuous predictors
are standardized.}
}
\value{
A \code{circGLM} object, which can be further analyzed with its
associated \code{\link{plot.circGLM}}, \code{\link{coef.circGLM}} and
\code{\link{print.circGLM}} functions.
An object of class \code{circGLM} contains the following elements (although
some elements are not returned if not applicable):
\describe{
\item{\code{b0_meandir}}{The posterior mean direction of \eqn{\beta_0}, the
circular intercept.} \item{\code{b0_CCI}}{The circular credible interval of
of \eqn{\beta_0}, the circular intercept.} \item{\code{kp_mean}}{The
posterior mean of \eqn{\kappa}, the concentration parameter.}
\item{\code{kp_mode}}{The posterior mode of \eqn{\kappa}, the concentration
parameter.} \item{\code{kp_HDI}}{The \code{CIsize} highest posterior
density interval of \eqn{\kappa}.} \item{\code{kp_propacc}}{The acceptance
proportion of the rejection sampler for \eqn{\kappa}.}
\item{\code{bt_mean}}{The posterior means of the regression coefficients
\eqn{\beta}.} \item{\code{bt_CCI}}{The credible intervals of the regression
coefficients \eqn{\beta}.} \item{\code{bt_propacc}}{The acceptance
proportions of the Metropolis-Hastings sampler for \eqn{\beta}.}
\item{\code{dt_meandir}}{The posterior mean directions of the group
difference parameters, \eqn{\delta}.} \item{\code{dt_CCI}}{The circular
credible intervals of the group difference parameters, \eqn{\delta}.}
\item{\code{dt_propacc}}{The acceptance proportions of the
Metropolis-Hastings sampler for \eqn{\delta}.} \item{\code{zt_mean}}{The
posterior means of the reparametrized coefficients \eqn{\zeta}.}
\item{\code{zt_mdir}}{The posterior mean directions of the reparametrized
coefficients \eqn{\zeta}.} \item{\code{zt_CCI}}{The credible intervals of
the reparametrized coefficients \eqn{\zeta}.}
\item{\code{lppd}}{Ingredient for information criteria; Log posterior
predictive density.} \item{\code{n_par}}{Ingredient for information
criteria; Number of parameters.} \item{\code{ll_th_estpars}}{Ingredient for
information criteria; Log-likelihood of the dataset at estimated parameter
set.} \item{\code{ll_each_th_curpars}}{Ingredient for information criteria;
Log-likelihood of each data point at each sampled parameter set.}
\item{\code{ll_th_curpars}}{Ingredient for information criteria;
Log-likelihood of the dataset at each sampled parameter set.}
\item{\code{th_hat}}{An n-vector of predicted angles.}
\item{\code{b0_chain}}{A Q-vector of sampled circular intercepts.}
\item{\code{kp_chain}}{A Q-vector of sampled concentration parameters.}
\item{\code{bt_chain}}{A matrix of sampled circular regression
coefficients.} \item{\code{dt_chain}}{A matrix of sampled group difference
parameters.} \item{\code{zt_chain}}{A matrix of sampled reparametrized
circular regression coefficients.} \item{\code{mu_chain}}{A matrix of
sampled group means.} \item{\code{AIC_Bayes}}{A version of the AIC where
posterior estimates are used to compute the log-likelihood.}
\item{\code{p_DIC}}{Ingredient for DIC.} \item{\code{p_DIC_alt}}{Ingredient
for DIC.} \item{\code{DIC}}{The DIC.} \item{\code{DIC_alt}}{The alternative
formulation of the DIC as given in Bayesian Data Analysis, Gelman et al.
(2003).} \item{\code{p_WAIC1}}{Ingredient for WAIC1.}
\item{\code{p_WAIC2}}{Ingredient for WAIC2.} \item{\code{WAIC1}}{The first
formulation of the WAIC as given in Bayesian Data Analysis, Gelman et al.
(2003).} \item{\code{WAIC2}}{The second formulation of the WAIC as given in
Bayesian Data Analysis, Gelman et al. (2003).}
\item{\code{DeltaIneqBayesFactors}}{A matrix of inequality Bayes factors
for group difference parameters.} \item{\code{BetaIneqBayesFactors}}{A
matrix of inequality Bayes factors for regression parameters.}
\item{\code{BetaSDDBayesFactors}}{A matrix of equality Bayes factors
(Savage-Dickey Density ratio) for group difference parameters.}
\item{\code{MuIneqBayesFactors}}{A matrix of inequality Bayes factors for
group mean parameters.} \item{\code{MuSDDBayesFactors}}{A matrix of
equality Bayes factors (Savage-Dickey Density ratio) for group mean
parameters.} \item{\code{SavedIts}}{Number of iterations returned, without
thinned iterations and burn-in.} \item{\code{TotalIts}}{Number of
iterations performed, including thinning and burn-in.}
\item{\code{TimeTaken}}{Seconds taken for analysis.}
\item{\code{BetaBayesFactors}}{Matrix of Bayes factors for regression
parameters.} \item{\code{MuBayesFactors}}{Matrix of Bayes factors for mean
parameters.} \item{\code{all_chains}}{A matrix with all sampled values of
all parameters.} \item{\code{Call}}{The matched call.}
\item{\code{thin}}{Thinning factor used.} \item{\code{burnin}}{Burn-in
used.} \item{\code{data_th}}{The original dataset.}
\item{\code{data_X}}{Matrix of used continuous predictors.}
\item{\code{data_d}}{Matrix of used categorical predictors.}
\item{\code{data_stX}}{Matrix of used standardized categorical predictors.}
\item{\code{r}}{Used parameter of the link function.} }
}
\description{
The main function for running Bayesian circular GLMs. The model predicts some
circular outcome \eqn{\theta} and has the form \deqn{\theta_i = \beta_0 +
\delta^t d_i + g(\beta^t x_i) + \epsilon_i,} where \eqn{\beta_0} is an
circular intercept, \eqn{\delta} are group difference parameters, \eqn{d_i}
is a vector of dummy variables indicating group membership, \eqn{g(.)} is a
link function given by \eqn{g(x) = r atan(x)} where \code{r} can be chosen,
\eqn{\beta} is a vector of regression coefficients, \eqn{x_i} is a vector of
covariates, and \eqn{\epsilon_i} is a von Mises distributed error with
residual concentration \eqn{\kappa}. This function returns a \code{circGLM}
object which can be further investigated with standard functions \code{plot},
\code{print}, \code{coef}, \code{residuals}, and special functions
\code{mcmc_summary.circGLM} for results for all MCMC chains,
\code{IC_compare.circGLM} for a comparison of information criteria of one or
more circGLM models, \code{BF.circGLM} to obtain Bayes Factors, and
\code{predict_function.circGLM} to create a prediction function.
}
\details{
The model can be passed either as a combination of a \code{formula} and a
data frame or matrix \code{data}, as in \code{lm()}, or as an outcome vector
\code{th} and a matrix of predictors \code{X}. If categorical variables are
to be included that are not yet given as dummies, formula syntax is
recommended as this will automatically take care of dummy creation.
\code{circGLM} performs an mcmc sampler that generates a sample from the
posterior of the intercept \eqn{\beta_0}, regression coefficients
\eqn{\beta}, group mean direction differences \eqn{\delta} and residual
\eqn{\kappa}.
An attempt is made to split the predictor matrix \code{X} into continuous and
categorical predictors. This is done so that the categorical predictors can
be treated differently, which removes the arbitrary dependence on the
labeling of categorical predictors and ensures that each group has a
regression line of the same shape.
If categorical predictors are passed as factors, formula syntax is
recommended, as it will automatically generate dummy variables. If the
predictors are passed as a matrix \code{X}, categorical variables must be
entered as dummy (dichotomous) variables.
The main results obtained are estimates and credible intervals for the
parameters, posterior samples, and Bayes factors for various standard
hypothesis comparisons.
As with all mcmc samplers, convergence must be checked, and tuning parameters
\code{bwb} and \code{reparametrize} can be tweaked if the sampler converges
poorly. The circGLM object that is returned contains proportions accepted
which can be used to monitor performance.
}
\examples{
dat <- generateCircGLMData()
m <- circGLM(th ~ ., dat)
print(m)
print(m, type = "all")
plot(m, type = "tracestack")
}
\seealso{
\code{\link{print.circGLM}}, \code{\link{plot.circGLM}},
\code{\link{coef.circGLM}}, \code{\link{BF.circGLM}},
\code{\link{residuals.circGLM}}, \code{\link{predict.circGLM}},
\code{\link{predict_function.circGLM}}, \code{\link{mcmc_summary.circGLM}},
\code{\link{IC_compare.circGLM}}.
}
|
/man/circGLM.Rd
|
no_license
|
ArjanHuizing/CircGLMBayes
|
R
| false
| true
| 14,534
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/circGLM.R
\name{circGLM}
\alias{circGLM}
\title{Fitting Bayesian circular General Linear Models}
\usage{
circGLM(formula, data, th, X = if (missing(th)) { model.matrix(formula,
data)[, -1, drop = FALSE] } else { matrix(nrow = length(th), ncol = 0) },
conj_prior = rep(0, 3), bt_prior_musd = c(mu = 0, sd = 1),
starting_values = c(0, 1, rep(0, ncol(X))), bwb = rep(0.05, ncol(X)),
Q = 10000, burnin = 1000, thin = 1, kappaModeEstBandwith = 0.1,
CIsize = 0.95, r = 2, returnPostSample = TRUE, output = "list",
SDDBFDensEstMethod = "density", reparametrize = TRUE,
groupMeanComparisons = TRUE, skipDichSplit = FALSE, centerOnly = FALSE)
}
\arguments{
\item{formula}{an optional object of class "formula" (or one that can be
coerced to that class): a symbolic description of the model to be fitted.}
\item{data}{an optional data frame or object coercible by
\code{\link{as.data.frame}} to a data frame, containing the variables in
the model.}
\item{th}{An optional vector of angles in radians or degrees, representing
the circular outcome we want to predict. If any value is larger than
\code{2 * pi}, the input is transformed to radians. Otherwise, \code{th} is
treated as radians.}
\item{X}{An optional matrix of predictors, both continuous (linear) and
categorical (as dummies). If categorical predictors are included, the
dummies must already be made and they must be in (0, 1), because this is
checked to be able to separate them from the continuous predictors, so that
they are treated differently. If not, or if \cite{skipDichSplit = TRUE},
they will be treated as linear predictors.}
\item{conj_prior}{A numeric vector of length 3, containing, in that order,
prior mean direction, prior resultant length, and prior sample size. Used
for the von Mises part of the model, beta_0 and kappa.}
\item{bt_prior_musd}{A numeric vector of length 2, or \code{NA}. If
\code{bt_prior_musd = NA}, a constant prior is used. If it is a numeric
vector of length 2, a Normal prior is used so that the first value is the
mean, and the second value is the standard deviation.}
\item{starting_values}{A numeric vector with starting values for the mcmc
sampler. The length of the numeric vector should be 2 plus the number of
columns in X.}
\item{bwb}{A numeric vector, where the length is at least the number of
continuous predictors. This is a tuning parameters used in sampling of
beta. New values are sampled uniformly around the current value of beta
with bounds at \code{bt_cur - bwb} and \code{bt_cur + bwb}. If
\code{reparametrize = TRUE}, bwb corresponds to the bounds around the
reparametrized values.}
\item{Q}{Integer; The number of iterations to perform.}
\item{burnin}{Integer; The number of burn-in (warmup) iterations.}
\item{thin}{Integer; The number of parameters sets to sample for each
parameter set that is saved. Can be used to save memory if \code{Q} is
large.}
\item{kappaModeEstBandwith}{Numeric between 0 and 1. The mode of \code{kappa}
is estimated by taking the midpoint of a highest density interval.
Specifically, it is the midpoint of the interval that contains
\code{kappaModeEstBandwith} of the density of the posterior. Reasonable
values are roughly between .005 and .2, although lower values may be
reasonable if Q is large.}
\item{CIsize}{The size of the credible intervals. This is used for all
parameters, whether they use highest density intervals, circular quantiles
or regular quantiles.}
\item{r}{A numeric. \code{r} is the parameter used in the link function
\eqn{g(x, r) = r atan(x)}. If \code{r = 2}, the link function maps the real
line to the full circle. If \code{r < 2} the link functions maps to a
proportion \code{r / 2} of the circle. If \code{r > 2}, the link functions
can reach the same are of the circle multiple times, which is unlikely to
be useful, and should be used with caution.}
\item{returnPostSample}{Logical indicating whether the mcmc sample itself
should be returned. Should only be set to \code{FALSE} if there are memory
constraints, as many subsequent analyses rely on the posterior sample
directly.}
\item{output}{A character string, either \code{"list"} or \code{"vector"}. In
most situations, \code{"list"} should be used, which returns a circGLM
object. The \code{"vector"} options is only useful for simulation studies
etc.}
\item{SDDBFDensEstMethod}{A character string, either \code{"density"} or
\code{"histogram"}. Gives the method to If \code{SDDBFDensEstMethod =
"density"}, the default, the Bayes Factors are computed based on the
density estimate given by a spline interpolation of the \code{density()}
function, so they are calculated in R rather than C++. This method should
be much more stable than the histogram method, especially if there is low
probability at 0 in the posterior. If \code{SDDBFDensEstMethod =
"histogram"}, Bayes factors are computed by estimating the density from the
posterior sample as the midpoint of a histogram bar at 0 containing 10\% of
the data.}
\item{reparametrize}{Logical; If \code{TRUE}, proposals for beta are drawn
uniformly around a reparametrization \code{zt = pi * atan(bt) / 2}, so from
\code{zt_can = runif(1, zt - bwb, zt + bwb)}, which is then transformed
back. Then, the proposals amount to the truncated cauchy pdf. If
\code{FALSE}, proposals for beta are drawn on uniformly around beta, so
from \code{bt_can = runif(1, bt_cur - bwb, bt_cur + bwb)}.}
\item{groupMeanComparisons}{Logical indicating whether mean comparisons in
the form of Bayes Factors and posterior model probabilities should be
computed.}
\item{skipDichSplit}{Logical indicating whether to treat categorical
predictor specially. Usually, \code{skipDichSplit = TRUE} should be used.
This removes the arbitrary dependence on the labeling of categorical
predictors and ensures that each group has a regression line of the same
shape. If \code{skipDichSplit = FALSE}, the model will be the same as
\code{lm.circular} from the package \code{circular} in that no separate
treatment for categorical variables is performed.}
\item{centerOnly}{Logical; If \code{TRUE}, the continuous predictors are
centered only, not standardized. If \code{FALSE}, the continuous predictors
are standardized.}
}
\value{
A \code{circGLM} object, which can be further analyzed with its
associated \code{\link{plot.circGLM}}, \code{\link{coef.circGLM}} and
\code{\link{print.circGLM}} functions.
An object of class \code{circGLM} contains the following elements (although
some elements are not returned if not applicable):
\describe{
\item{\code{b0_meandir}}{The posterior mean direction of \eqn{\beta_0}, the
circular intercept.} \item{\code{b0_CCI}}{The circular credible interval of
of \eqn{\beta_0}, the circular intercept.} \item{\code{kp_mean}}{The
posterior mean of \eqn{\kappa}, the concentration parameter.}
\item{\code{kp_mode}}{The posterior mode of \eqn{\kappa}, the concentration
parameter.} \item{\code{kp_HDI}}{The \code{CIsize} highest posterior
density interval of \eqn{\kappa}.} \item{\code{kp_propacc}}{The acceptance
proportion of the rejection sampler for \eqn{\kappa}.}
\item{\code{bt_mean}}{The posterior means of the regression coefficients
\eqn{\beta}.} \item{\code{bt_CCI}}{The credible intervals of the regression
coefficients \eqn{\beta}.} \item{\code{bt_propacc}}{The acceptance
proportions of the Metropolis-Hastings sampler for \eqn{\beta}.}
\item{\code{dt_meandir}}{The posterior mean directions of the group
difference parameters, \eqn{\delta}.} \item{\code{dt_CCI}}{The circular
credible intervals of the group difference parameters, \eqn{\delta}.}
\item{\code{dt_propacc}}{The acceptance proportions of the
Metropolis-Hastings sampler for \eqn{\delta}.} \item{\code{zt_mean}}{The
posterior means of the reparametrized coefficients \eqn{\zeta}.}
\item{\code{zt_mdir}}{The posterior mean directions of the reparametrized
coefficients \eqn{\zeta}.} \item{\code{zt_CCI}}{The credible intervals of
the reparametrized coefficients \eqn{\zeta}.}
\item{\code{lppd}}{Ingredient for information criteria; Log posterior
predictive density.} \item{\code{n_par}}{Ingredient for information
criteria; Number of parameters.} \item{\code{ll_th_estpars}}{Ingredient for
information criteria; Log-likelihood of the dataset at estimated parameter
set.} \item{\code{ll_each_th_curpars}}{Ingredient for information criteria;
Log-likelihood of each data point at each sampled parameter set.}
\item{\code{ll_th_curpars}}{Ingredient for information criteria;
Log-likelihood of the dataset at each sampled parameter set.}
\item{\code{th_hat}}{An n-vector of predicted angles.}
\item{\code{b0_chain}}{A Q-vector of sampled circular intercepts.}
\item{\code{kp_chain}}{A Q-vector of sampled concentration parameters.}
\item{\code{bt_chain}}{A matrix of sampled circular regression
coefficients.} \item{\code{dt_chain}}{A matrix of sampled group difference
parameters.} \item{\code{zt_chain}}{A matrix of sampled reparametrized
circular regression coefficients.} \item{\code{mu_chain}}{A matrix of
sampled group means.} \item{\code{AIC_Bayes}}{A version of the AIC where
posterior estimates are used to compute the log-likelihood.}
\item{\code{p_DIC}}{Ingredient for DIC.} \item{\code{p_DIC_alt}}{Ingredient
for DIC.} \item{\code{DIC}}{The DIC.} \item{\code{DIC_alt}}{The alternative
formulation of the DIC as given in Bayesian Data Analysis, Gelman et al.
(2003).} \item{\code{p_WAIC1}}{Ingredient for WAIC1.}
\item{\code{p_WAIC2}}{Ingredient for WAIC2.} \item{\code{WAIC1}}{The first
formulation of the WAIC as given in Bayesian Data Analysis, Gelman et al.
(2003).} \item{\code{WAIC2}}{The second formulation of the WAIC as given in
Bayesian Data Analysis, Gelman et al. (2003).}
\item{\code{DeltaIneqBayesFactors}}{A matrix of inequality Bayes factors
for group difference parameters.} \item{\code{BetaIneqBayesFactors}}{A
matrix of inequality Bayes factors for regression parameters.}
\item{\code{BetaSDDBayesFactors}}{A matrix of equality Bayes factors
(Savage-Dickey Density ratio) for group difference parameters.}
\item{\code{MuIneqBayesFactors}}{A matrix of inequality Bayes factors for
group mean parameters.} \item{\code{MuSDDBayesFactors}}{A matrix of
equality Bayes factors (Savage-Dickey Density ratio) for group mean
parameters.} \item{\code{SavedIts}}{Number of iterations returned, without
thinned iterations and burn-in.} \item{\code{TotalIts}}{Number of
iterations performed, including thinning and burn-in.}
\item{\code{TimeTaken}}{Seconds taken for analysis.}
\item{\code{BetaBayesFactors}}{Matrix of Bayes factors for regression
parameters.} \item{\code{MuBayesFactors}}{Matrix of Bayes factors for mean
parameters.} \item{\code{all_chains}}{A matrix with all sampled values of
all parameters.} \item{\code{Call}}{The matched call.}
\item{\code{thin}}{Thinning factor used.} \item{\code{burnin}}{Burn-in
used.} \item{\code{data_th}}{The original dataset.}
\item{\code{data_X}}{Matrix of used continuous predictors.}
\item{\code{data_d}}{Matrix of used categorical predictors.}
\item{\code{data_stX}}{Matrix of used standardized categorical predictors.}
\item{\code{r}}{Used parameter of the link function.} }
}
\description{
The main function for running Bayesian circular GLMs. The model predicts some
circular outcome \eqn{\theta} and has the form \deqn{\theta_i = \beta_0 +
\delta^t d_i + g(\beta^t x_i) + \epsilon_i,} where \eqn{\beta_0} is an
circular intercept, \eqn{\delta} are group difference parameters, \eqn{d_i}
is a vector of dummy variables indicating group membership, \eqn{g(.)} is a
link function given by \eqn{g(x) = r atan(x)} where \code{r} can be chosen,
\eqn{\beta} is a vector of regression coefficients, \eqn{x_i} is a vector of
covariates, and \eqn{\epsilon_i} is a von Mises distributed error with
residual concentration \eqn{\kappa}. This function returns a \code{circGLM}
object which can be further investigated with standard functions \code{plot},
\code{print}, \code{coef}, \code{residuals}, and special functions
\code{mcmc_summary.circGLM} for results for all MCMC chains,
\code{IC_compare.circGLM} for a comparison of information criteria of one or
more circGLM models, \code{BF.circGLM} to obtain Bayes Factors, and
\code{predict_function.circGLM} to create a prediction function.
}
\details{
The model can be passed either as a combination of a \code{formula} and a
data frame or matrix \code{data}, as in \code{lm()}, or as an outcome vector
\code{th} and a matrix of predictors \code{X}. If categorical variables are
to be included that are not yet given as dummies, formula syntax is
recommended as this will automatically take care of dummy creation.
\code{circGLM} performs an mcmc sampler that generates a sample from the
posterior of the intercept \eqn{\beta_0}, regression coefficients
\eqn{\beta}, group mean direction differences \eqn{\delta} and residual
\eqn{\kappa}.
An attempt is made to split the predictor matrix \code{X} into continuous and
categorical predictors. This is done so that the categorical predictors can
be treated differently, which removes the arbitrary dependence on the
labeling of categorical predictors and ensures that each group has a
regression line of the same shape.
If categorical predictors are passed as factors, formula syntax is
recommended, as it will automatically generate dummy variables. If the
predictors are passed as a matrix \code{X}, categorical variables must be
entered as dummy (dichotomous) variables.
The main results obtained are estimates and credible intervals for the
parameters, posterior samples, and Bayes factors for various standard
hypothesis comparisons.
As with all mcmc samplers, convergence must be checked, and tuning parameters
\code{bwb} and \code{reparametrize} can be tweaked if the sampler converges
poorly. The circGLM object that is returned contains proportions accepted
which can be used to monitor performance.
}
\examples{
dat <- generateCircGLMData()
m <- circGLM(th ~ ., dat)
print(m)
print(m, type = "all")
plot(m, type = "tracestack")
}
\seealso{
\code{\link{print.circGLM}}, \code{\link{plot.circGLM}},
\code{\link{coef.circGLM}}, \code{\link{BF.circGLM}},
\code{\link{residuals.circGLM}}, \code{\link{predict.circGLM}},
\code{\link{predict_function.circGLM}}, \code{\link{mcmc_summary.circGLM}},
\code{\link{IC_compare.circGLM}}.
}
|
# In this lesson, we will explore some basic building blocks of the R programming language.
# In its simplest form, R can be used as an interactive calculator. Type 5 + 7
# R simply prints the result of 12 by default. However, R is a programming language and
# often the reason we use a programming language as opposed to a calculator is to
# automate some process or avoid unnecessary repetition.
# In this case, we may want to use our result from above in a second calculation.
# Instead of retyping 5 + 7 every time we need it, we can just create a new variable
# that stores the result.
# The way you assign a value to a variable in R is by using the assignment operator,
# which is just a 'less than' symbol followed by a 'minus' sign. It looks like this: <-
# Think of the assignment operator as an arrow. You are assigning the value on the right
# side of the arrow to the variable name on the left side of the arrow.
# To assign the result of 5 + 7 to a new variable called x, you type x <- 5 + 7. This
# can be read as 'x gets 5 plus 7'.
x <- 5 + 7
# You ll notice that R did not print the result of 12 this time. When you use the
# assignment operator, R assumes that you dont want to see the result immediately, but
# rather that you intend to use the result for something else later on.
# store the result of x - 3 in a new variable called y.
Ans: y<-x-3
# Now, lets create a small collection of numbers called a vector. Any object that
# contains data is called a data structure and numeric vectors are the simplest type of
# data structure in R. In fact, even a single number is considered a vector of length
# one.
# The easiest way to create a vector is with the c() function, which stands for
# 'concatenate' or 'combine'. To create a vector containing the numbers 1.1, 9, and
# 3.14, type c(1.1, 9, 3.14). Try it now and store the result in a variable called z.
Ans=z<-c(1.1,9,3.14)
# You can combine vectors to make a new vector. Create a new vector that contains z,
# 555, then z again in that order. Dont assign this vector to a new variable, so that
# we can just see the result immediately.
Ans: c(z,555,z)
# Numeric vectors can be used in arithmetic expressions. Type the following to see what
# happens: z * 2 + 100.
Ans : z*2+100
# First, R multiplied each of the three elements in z by 2. Then it added 100 to each
# element to get the result you see above.
# Other common arithmetic operators are `+`, `-`, `/`, and `^` (where x^2 means 'x
# squared'). To take the square root, use the sqrt() function and to take the absolute
# value, use the abs() function.
#
# Take the square root of z - 1 and assign it to a new variable called my_sqrt.
######################
my_sqrt<-sqrt(z-1)
# Before we view the contents of the my_sqrt variable, what do you think it contains?
# 1: a vector of length 3
# 2: a single number (i.e a vector of length 1)
# 3: a vector of length 0 (i.e. an empty vector)
#
# As you may have guessed, R first subtracted 1 from each element of z, then took the
# square root of each element. This leaves you with a vector of the same length as the
# original vector z.
#
# create a new variable called my_div that gets the value of z divided by my_sqrt
# my_div<-z/my_sqrt
#
# Which statement do you think is true?
#
# 1: my_div is a single number (i.e a vector of length 1)
# 2: my_div is undefined
# 3: The first element of my_div is equal to the first element of z divided by the first element of my_sqrt, and so on...
#
# When given two vectors of the same length, R simply performs the specified arithmetic
# operation (`+`, `-`, `*`, etc.) element-by-element. If the vectors are of different
# lengths, R 'recycles' the shorter vector until it is the same length as the longer
# vector.
#
# When we did z * 2 + 100 in our earlier example, z was a vector of length 3, but
# technically 2 and 100 are each vectors of length 1.
#
# Behind the scenes, R is 'recycling' the 2 to make a vector of 2s and the 100 to make a
# vector of 100s. In other words, when you ask R to compute z * 2 + 100, what it really
# computes is this: z * c(2, 2, 2) + c(100, 100, 100).
#
# To see another example of how this vector 'recycling' works, try adding
# c(1, 2, 3) + c(0, 10)
#
# If the length of the shorter vector does not divide evenly into the length of the
# longer vector, R will still apply the 'recycling' method, but will throw a warning to
# let you know something fishy might be going on.
#
#
# c(1, 2, 3, 4) + c(0, 10, 100)
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
# ---------------------------Vector----------------------
# Creating a Vector
x <- c(1, 5, 4, 9, 0)
typeof(x)
length(x)
x <- c(1, 5.4, TRUE, "hello")
x
typeof(x)
x <- 1:7
x
y <- 2:-2
y
seq(1, 3, by=0.2) # specify step size
# Accessing Elements in Vector :
# Using integer vector as index:
x
x[3] # access 3rd element
x[c(2, 4)] # access 2nd and 4th element
x[-1] # access all but 1st element
x[c(2, -4)] # cannot mix positive and negative integers
x[c(-2, -4)]
# Using logical vector as index :
# x[c(TRUE, FALSE, FALSE, TRUE)]
x[x>3] # filtering vectors based on conditions
x[x>0]
# Using character vector as index
# This type of indexing is useful when dealing with named vectors. We can name each elements of a vector.
x <- c("first"=3, "second"=0, "third"=9)
names(x)
x["second"]
x[c("first", "third")]
# Modifying a Vector
# We can modify a vector using the assignment operator. We can use the techniques discussed above to access specific elements and modify them. If we want to truncate the elements, we can use reassignments.
x
x[2] <- 4; x # modify 2nd element
x[x>3] <- 5; x # modify elements less than 0
x <- x[1:4]; x # truncate x to first 4 elements
# Deleting a Vector
# We can delete a vector by simply assigning a NULL to it.
x
x <- NULL
x
x[3]
# -----------------------------------R Programming Matrix------------------------------
# Creating a Matrix
x<-matrix(1:9, nrow=3, ncol=3)
# same result is obtained by providing only one dimension
x<-matrix(1:9, nrow=3)
x<-matrix(1:9, nrow=3, byrow=TRUE)
t1<-c(1,2,3)
t2<-c(4,5,6)
cbind(t1,t2)
cbind(c(1,2,3),c(4,5,6))
c1<-c(1,2,3)
c2<-c(4,5,6)
cbind(c1,c2)
rbind(c(1,2,3),c(4,5,6))
# Accessing Elements in Matrix
x
x[1,c(2,3)]
x[c(1,2),c(2,3)] # select rows 1 & 2 and columns 2 & 3
x[c(3,2),] # leaving column field blank will select entire columns
x[,] # leaving row as well as column field blank will select entire matrix
x[,1]
x[-1,] # select all rows except first
# Modifying a Matrix
x[2,2] <- 10; x
x[x<5] <- 0; x
t(x)
cbind(x,c(1,2,3))
j
j<-rbind(x,c(1,2,3))
dim(j) <- c(6,2); x # change to 3X2 matrix
dim(x) <- c(1,6); x # change to 1X6 matrix
x[1:4]
# -----------------------------------R Programming List------------------------------
# Creating a List
# List can be created using the list() function.
x <- list("a"=2.5, "b"=FALSE, "c"=1:3)
x <- list(2.5,TRUE,1:3)
# Accessing Components in List
x[c(1:3)] # index using integer vector
x<-x[-2] # using negative integer to exclude second compon
x["a"]
typeof(x["a"]) # single [ returns a list
#Diff btwn [ and[[
x[["a"]] # double [[ returns the content
typeof(x[["a"]])
# An alternative to [[, which is used often while accessing content of a list is the $ operator.
x$a # same as x[["name"]]
x$a # partial matching, same as x$ag or x$age
# Modifying a List:
x[["a"]] <- "Clair";x
# Adding Component
x[["married"]] <- FALSE
# Deleting Component
x[["a"]] <- NULL
x$married <- NULL
# -----------------------------------R Programming Data Frame------------------------------
# Creating a Data Frame
x <- data.frame("SN"=1:2,"Age"=c(21,15),"Name"=c("John","Dora"))
# stringsAsFactors=FALSE.
# Accessing Components in Data Frame :
# Accessing like a list
# We can use either [, [[ or $ operator to access columns of data frame.
x["Name"]
x$Name
x[["Name"]]
x[[3]]
x[3]
# Accessing like a matrix :
# in buile we have trees variables in R
datasets::trees
trees[2:3,1:2] # select 2nd and 3rd row
trees[trees$Height > 82,] # selects rows with Height greater than 82
trees[10:12,2]
# rees[10:12,2, drop=FALSE]
# Deleting Component :
x$Age <- NULL
trees[-3,-2]<-0
# -----------------------------------R Programming Factor------------------------------
# General Definition :
# Factor is a data structure used for fields that takes only predefined, finite number of values (categorical data).
# For example, a data field such as marital status may contain only values from single, married, separated, divorced, or widowed.
# In such case, we know the possible values beforehand and these predefined, distinct values are called levels.
# Following is an example of factor in R.
x <- factor(c("single","married","married","single")); x
x <- factor(c("single","married","married","single"), levels=c("single","married","divorced")); x
# Factors are closely related with vectors. In fact, factors are stored as integer vectors. This is clearly seen from its structure.
x <- factor(c("single","married","married","single"))
str(x)
# We see that levels are stored in a character vector and the individual elements are actually stored as indices.
# ---------------------------------------------------------------------------------------------------
x <- 6
n <- 1:4
let <- LETTERS[1:4]
df <- data.frame(n, let)
# List currently defined variables
ls()
# Check if a variable named "x" exists
exists("x")
# Check if "y" exists
exists("y")
# Delete variable x
rm(x)
str(df)
# Length probably doesn't give us what we want here:
length(df)
# Number of rows
nrow(df)
# Number of columns
ncol(df)
# Get rows and columns
dim(df)
# ---------------------------------------------------------------------------------
# Indexing into a Data structure
# Problem
# You want to get part of a data structure.
# Solution
# Elements from a vector, matrix, or data frame can be extracted using numeric indexing, or by using a boolean vector of the appropriate length.
v <- c(1,4,4,3,2,2,3)
v[c(2,3,4)]
v[2:4]
v[c(2,4,3)]
# With a data frame:
# Create a sample data frame
data <- read.table(header=T, text='
subject sex size
1 M 7
2 F 6
3 F 9
4 M 11
')
# Get the element at row 1, column 3
data[1,3]
data[1,"size"]
# Get rows 1 and 2, and all columns
data[1:2, ]
data[c(1,2), ]
# Get rows 1 and 2, and only column 2
data[1:2, 2]
data[c(1,2), 2]
# Get rows 1 and 2, and only the columns named "sex" and "size"
data[1:2, c("sex","size")]
data[c(1,2), c(2,3)]
# Indexing with a boolean vector
# With the vector v from above:
v > 2
v[v>2]
v[ c(F,T,T,T,F,F,T)]
# With the data frame from above:
# A boolean vector
data$subject < 3
data[data$subject < 3, ]
data[c(TRUE,TRUE,FALSE,FALSE), ]
# It is also possible to get the numeric indices of the TRUEs
which(data$subject < 3)
v
# Drop the first element
v[-1]
# Drop first three
v[-1:-3]
# Drop just the last element
v[-length(v)]
# --------------------------------------------------------------------------------
1:20
# We could also use it to create a sequence of real numbers. For example,
# try
pi:10
15:1
# In the case of an operator like the colon used above, you must enclose the symbol in backticks like this: ?`:`
# seq() function serves this purpose
seq(1, 20)
seq(0, 10, by=0.5)
my_seq<-seq(5, 10, length=30)
my_seq
# One more function related to creating sequences of numbers is rep()
rep(0, times = 40)
# If instead we want our vector to contain 10 repetitions of the vector (0, 1, 2), wecan do rep(c(0, 1, 2), times = 10)
rep(c(0, 1, 2), each = 10)
|
/R programming files/Basics in R -2.R
|
no_license
|
harimurugesan/Data-Science-With-Python-and-R-programming
|
R
| false
| false
| 14,739
|
r
|
# In this lesson, we will explore some basic building blocks of the R programming language.
# In its simplest form, R can be used as an interactive calculator. Type 5 + 7
# R simply prints the result of 12 by default. However, R is a programming language and
# often the reason we use a programming language as opposed to a calculator is to
# automate some process or avoid unnecessary repetition.
# In this case, we may want to use our result from above in a second calculation.
# Instead of retyping 5 + 7 every time we need it, we can just create a new variable
# that stores the result.
# The way you assign a value to a variable in R is by using the assignment operator,
# which is just a 'less than' symbol followed by a 'minus' sign. It looks like this: <-
# Think of the assignment operator as an arrow. You are assigning the value on the right
# side of the arrow to the variable name on the left side of the arrow.
# To assign the result of 5 + 7 to a new variable called x, you type x <- 5 + 7. This
# can be read as 'x gets 5 plus 7'.
x <- 5 + 7
# You ll notice that R did not print the result of 12 this time. When you use the
# assignment operator, R assumes that you dont want to see the result immediately, but
# rather that you intend to use the result for something else later on.
# store the result of x - 3 in a new variable called y.
Ans: y<-x-3
# Now, lets create a small collection of numbers called a vector. Any object that
# contains data is called a data structure and numeric vectors are the simplest type of
# data structure in R. In fact, even a single number is considered a vector of length
# one.
# The easiest way to create a vector is with the c() function, which stands for
# 'concatenate' or 'combine'. To create a vector containing the numbers 1.1, 9, and
# 3.14, type c(1.1, 9, 3.14). Try it now and store the result in a variable called z.
Ans=z<-c(1.1,9,3.14)
# You can combine vectors to make a new vector. Create a new vector that contains z,
# 555, then z again in that order. Dont assign this vector to a new variable, so that
# we can just see the result immediately.
Ans: c(z,555,z)
# Numeric vectors can be used in arithmetic expressions. Type the following to see what
# happens: z * 2 + 100.
Ans : z*2+100
# First, R multiplied each of the three elements in z by 2. Then it added 100 to each
# element to get the result you see above.
# Other common arithmetic operators are `+`, `-`, `/`, and `^` (where x^2 means 'x
# squared'). To take the square root, use the sqrt() function and to take the absolute
# value, use the abs() function.
#
# Take the square root of z - 1 and assign it to a new variable called my_sqrt.
######################
my_sqrt<-sqrt(z-1)
# Before we view the contents of the my_sqrt variable, what do you think it contains?
# 1: a vector of length 3
# 2: a single number (i.e a vector of length 1)
# 3: a vector of length 0 (i.e. an empty vector)
#
# As you may have guessed, R first subtracted 1 from each element of z, then took the
# square root of each element. This leaves you with a vector of the same length as the
# original vector z.
#
# create a new variable called my_div that gets the value of z divided by my_sqrt
# my_div<-z/my_sqrt
#
# Which statement do you think is true?
#
# 1: my_div is a single number (i.e a vector of length 1)
# 2: my_div is undefined
# 3: The first element of my_div is equal to the first element of z divided by the first element of my_sqrt, and so on...
#
# When given two vectors of the same length, R simply performs the specified arithmetic
# operation (`+`, `-`, `*`, etc.) element-by-element. If the vectors are of different
# lengths, R 'recycles' the shorter vector until it is the same length as the longer
# vector.
#
# When we did z * 2 + 100 in our earlier example, z was a vector of length 3, but
# technically 2 and 100 are each vectors of length 1.
#
# Behind the scenes, R is 'recycling' the 2 to make a vector of 2s and the 100 to make a
# vector of 100s. In other words, when you ask R to compute z * 2 + 100, what it really
# computes is this: z * c(2, 2, 2) + c(100, 100, 100).
#
# To see another example of how this vector 'recycling' works, try adding
# c(1, 2, 3) + c(0, 10)
#
# If the length of the shorter vector does not divide evenly into the length of the
# longer vector, R will still apply the 'recycling' method, but will throw a warning to
# let you know something fishy might be going on.
#
#
# c(1, 2, 3, 4) + c(0, 10, 100)
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
# ---------------------------Vector----------------------
# Creating a Vector
x <- c(1, 5, 4, 9, 0)
typeof(x)
length(x)
x <- c(1, 5.4, TRUE, "hello")
x
typeof(x)
x <- 1:7
x
y <- 2:-2
y
seq(1, 3, by=0.2) # specify step size
# Accessing Elements in Vector :
# Using integer vector as index:
x
x[3] # access 3rd element
x[c(2, 4)] # access 2nd and 4th element
x[-1] # access all but 1st element
x[c(2, -4)] # cannot mix positive and negative integers
x[c(-2, -4)]
# Using logical vector as index :
# x[c(TRUE, FALSE, FALSE, TRUE)]
x[x>3] # filtering vectors based on conditions
x[x>0]
# Using character vector as index
# This type of indexing is useful when dealing with named vectors. We can name each elements of a vector.
x <- c("first"=3, "second"=0, "third"=9)
names(x)
x["second"]
x[c("first", "third")]
# Modifying a Vector
# We can modify a vector using the assignment operator. We can use the techniques discussed above to access specific elements and modify them. If we want to truncate the elements, we can use reassignments.
x
x[2] <- 4; x # modify 2nd element
x[x>3] <- 5; x # modify elements less than 0
x <- x[1:4]; x # truncate x to first 4 elements
# Deleting a Vector
# We can delete a vector by simply assigning a NULL to it.
x
x <- NULL
x
x[3]
# -----------------------------------R Programming Matrix------------------------------
# Creating a Matrix
x<-matrix(1:9, nrow=3, ncol=3)
# same result is obtained by providing only one dimension
x<-matrix(1:9, nrow=3)
x<-matrix(1:9, nrow=3, byrow=TRUE)
t1<-c(1,2,3)
t2<-c(4,5,6)
cbind(t1,t2)
cbind(c(1,2,3),c(4,5,6))
c1<-c(1,2,3)
c2<-c(4,5,6)
cbind(c1,c2)
rbind(c(1,2,3),c(4,5,6))
# Accessing Elements in Matrix
x
x[1,c(2,3)]
x[c(1,2),c(2,3)] # select rows 1 & 2 and columns 2 & 3
x[c(3,2),] # leaving column field blank will select entire columns
x[,] # leaving row as well as column field blank will select entire matrix
x[,1]
x[-1,] # select all rows except first
# Modifying a Matrix
x[2,2] <- 10; x
x[x<5] <- 0; x
t(x)
cbind(x,c(1,2,3))
j
j<-rbind(x,c(1,2,3))
dim(j) <- c(6,2); x # change to 3X2 matrix
dim(x) <- c(1,6); x # change to 1X6 matrix
x[1:4]
# -----------------------------------R Programming List------------------------------
# Creating a List
# List can be created using the list() function.
x <- list("a"=2.5, "b"=FALSE, "c"=1:3)
x <- list(2.5,TRUE,1:3)
# Accessing Components in List
x[c(1:3)] # index using integer vector
x<-x[-2] # using negative integer to exclude second compon
x["a"]
typeof(x["a"]) # single [ returns a list
#Diff btwn [ and[[
x[["a"]] # double [[ returns the content
typeof(x[["a"]])
# An alternative to [[, which is used often while accessing content of a list is the $ operator.
x$a # same as x[["name"]]
x$a # partial matching, same as x$ag or x$age
# Modifying a List:
x[["a"]] <- "Clair";x
# Adding Component
x[["married"]] <- FALSE
# Deleting Component
x[["a"]] <- NULL
x$married <- NULL
# -----------------------------------R Programming Data Frame------------------------------
# Creating a Data Frame
x <- data.frame("SN"=1:2,"Age"=c(21,15),"Name"=c("John","Dora"))
# stringsAsFactors=FALSE.
# Accessing Components in Data Frame :
# Accessing like a list
# We can use either [, [[ or $ operator to access columns of data frame.
x["Name"]
x$Name
x[["Name"]]
x[[3]]
x[3]
# Accessing like a matrix :
# in buile we have trees variables in R
datasets::trees
trees[2:3,1:2] # select 2nd and 3rd row
trees[trees$Height > 82,] # selects rows with Height greater than 82
trees[10:12,2]
# rees[10:12,2, drop=FALSE]
# Deleting Component :
x$Age <- NULL
trees[-3,-2]<-0
# -----------------------------------R Programming Factor------------------------------
# General Definition :
# Factor is a data structure used for fields that takes only predefined, finite number of values (categorical data).
# For example, a data field such as marital status may contain only values from single, married, separated, divorced, or widowed.
# In such case, we know the possible values beforehand and these predefined, distinct values are called levels.
# Following is an example of factor in R.
x <- factor(c("single","married","married","single")); x
x <- factor(c("single","married","married","single"), levels=c("single","married","divorced")); x
# Factors are closely related with vectors. In fact, factors are stored as integer vectors. This is clearly seen from its structure.
x <- factor(c("single","married","married","single"))
str(x)
# We see that levels are stored in a character vector and the individual elements are actually stored as indices.
# ---------------------------------------------------------------------------------------------------
x <- 6
n <- 1:4
let <- LETTERS[1:4]
df <- data.frame(n, let)
# List currently defined variables
ls()
# Check if a variable named "x" exists
exists("x")
# Check if "y" exists
exists("y")
# Delete variable x
rm(x)
str(df)
# Length probably doesn't give us what we want here:
length(df)
# Number of rows
nrow(df)
# Number of columns
ncol(df)
# Get rows and columns
dim(df)
# ---------------------------------------------------------------------------------
# Indexing into a Data structure
# Problem
# You want to get part of a data structure.
# Solution
# Elements from a vector, matrix, or data frame can be extracted using numeric indexing, or by using a boolean vector of the appropriate length.
v <- c(1,4,4,3,2,2,3)
v[c(2,3,4)]
v[2:4]
v[c(2,4,3)]
# With a data frame:
# Create a sample data frame
data <- read.table(header=T, text='
subject sex size
1 M 7
2 F 6
3 F 9
4 M 11
')
# Get the element at row 1, column 3
data[1,3]
data[1,"size"]
# Get rows 1 and 2, and all columns
data[1:2, ]
data[c(1,2), ]
# Get rows 1 and 2, and only column 2
data[1:2, 2]
data[c(1,2), 2]
# Get rows 1 and 2, and only the columns named "sex" and "size"
data[1:2, c("sex","size")]
data[c(1,2), c(2,3)]
# Indexing with a boolean vector
# With the vector v from above:
v > 2
v[v>2]
v[ c(F,T,T,T,F,F,T)]
# With the data frame from above:
# A boolean vector
data$subject < 3
data[data$subject < 3, ]
data[c(TRUE,TRUE,FALSE,FALSE), ]
# It is also possible to get the numeric indices of the TRUEs
which(data$subject < 3)
v
# Drop the first element
v[-1]
# Drop first three
v[-1:-3]
# Drop just the last element
v[-length(v)]
# --------------------------------------------------------------------------------
1:20
# We could also use it to create a sequence of real numbers. For example,
# try
pi:10
15:1
# In the case of an operator like the colon used above, you must enclose the symbol in backticks like this: ?`:`
# seq() function serves this purpose
seq(1, 20)
seq(0, 10, by=0.5)
my_seq<-seq(5, 10, length=30)
my_seq
# One more function related to creating sequences of numbers is rep()
rep(0, times = 40)
# If instead we want our vector to contain 10 repetitions of the vector (0, 1, 2), wecan do rep(c(0, 1, 2), times = 10)
rep(c(0, 1, 2), each = 10)
|
library (dplyr)
library (ggplot2)
library (xkcd)
library (extrafont)
download.file("http://simonsoftware.se/other/xkcd.ttf",
dest="xkcd.ttf", mode="wb")
system("cp xkcd.ttf ~/Library/Fonts")
font_import(path="~/Library/Fonts", pattern = "xkcd", prompt=FALSE)
fonts()
fonttable()
if(.Platform$OS.type != "unix") {
## Register fonts for Windows bitmap output
loadfonts(device="win")
} else {
loadfonts()
}
# extract historic results
history <- read.csv("https://raw.githubusercontent.com/tuangauss/Various-projects/master/data/history.csv", stringsAsFactors = FALSE)
# get info from the 2010 up to 2018
seasons <- sapply(10:17, function(x) paste0(2000+x,'-',x+1))
graph_func <- function(season){
if (season[1] == "2017-18"){
title = "Last season: 2017-2018"
}
else{
title = "From 2010-11 to 2017-18"
}
data <- history %>%
filter (Season %in% season, div == 'E0') %>%
mutate (total = FTAG + FTHG)
ave_score <- mean(data$total)
prob_data <- data %>%
group_by(total) %>%
summarize (prob = n()/nrow(data))
ggplot(data=prob_data, aes(x=total, y=prob)) +
geom_bar(stat="identity", color="blue", fill="grey") +
scale_x_continuous(breaks=seq(0,10,1)) +
geom_line(aes(x = total, y = dpois(x=total, lambda = ave_score)),
col = "red", size = 0.5) +
geom_point(aes(x = total, y = dpois(x=total, lambda = ave_score)),
col = "black", size = 3) +
ggtitle(title) + labs (x = "Total Goal", y = "Probability") +
theme_xkcd()
}
graph_func(seasons)
graph_func(c('2017-18'))
|
/visualize.R
|
no_license
|
harrydet/betting
|
R
| false
| false
| 1,587
|
r
|
library (dplyr)
library (ggplot2)
library (xkcd)
library (extrafont)
download.file("http://simonsoftware.se/other/xkcd.ttf",
dest="xkcd.ttf", mode="wb")
system("cp xkcd.ttf ~/Library/Fonts")
font_import(path="~/Library/Fonts", pattern = "xkcd", prompt=FALSE)
fonts()
fonttable()
if(.Platform$OS.type != "unix") {
## Register fonts for Windows bitmap output
loadfonts(device="win")
} else {
loadfonts()
}
# extract historic results
history <- read.csv("https://raw.githubusercontent.com/tuangauss/Various-projects/master/data/history.csv", stringsAsFactors = FALSE)
# get info from the 2010 up to 2018
seasons <- sapply(10:17, function(x) paste0(2000+x,'-',x+1))
graph_func <- function(season){
if (season[1] == "2017-18"){
title = "Last season: 2017-2018"
}
else{
title = "From 2010-11 to 2017-18"
}
data <- history %>%
filter (Season %in% season, div == 'E0') %>%
mutate (total = FTAG + FTHG)
ave_score <- mean(data$total)
prob_data <- data %>%
group_by(total) %>%
summarize (prob = n()/nrow(data))
ggplot(data=prob_data, aes(x=total, y=prob)) +
geom_bar(stat="identity", color="blue", fill="grey") +
scale_x_continuous(breaks=seq(0,10,1)) +
geom_line(aes(x = total, y = dpois(x=total, lambda = ave_score)),
col = "red", size = 0.5) +
geom_point(aes(x = total, y = dpois(x=total, lambda = ave_score)),
col = "black", size = 3) +
ggtitle(title) + labs (x = "Total Goal", y = "Probability") +
theme_xkcd()
}
graph_func(seasons)
graph_func(c('2017-18'))
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.2281062393359e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615775203-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 361
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.2281062393359e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
# titanic_association_practice
Titanic
class(Titanic)
titan.df <- as.data.frame(Titanic)
head(titan.df)
titan.df[4, "Freq"]
#for(i in 1:nrow(titan.df))
# for(j in 0:titan.df[i, "Freq"]-1)
# titan <- rbind(titan, titan.df[i,])
# titanic <- NULL
titanic <- NULL
for(i in 1:4) titanic <- cbind(titanic, rep(as.character(titan.df[,i]), titan.df$Freq))
titanic <- as.data.frame(titanic)
names(titanic) <- c("Class", "Sex", "Age", "Survived")
library(arules)
myrules <- apriori(data = titanic, parameter = list(support = 0.01, confidence = 0.25, minlen = 2))
summary(myrules)
inspect(myrules[1:50])
not_survived <- subset(myrules, rhs %in% "Survived=No")
inspect(sort(not_survived, by='confidence'))
|
/association_rule/titanic_association_practice.R
|
no_license
|
qwerop553/Data-mining
|
R
| false
| false
| 706
|
r
|
# titanic_association_practice
Titanic
class(Titanic)
titan.df <- as.data.frame(Titanic)
head(titan.df)
titan.df[4, "Freq"]
#for(i in 1:nrow(titan.df))
# for(j in 0:titan.df[i, "Freq"]-1)
# titan <- rbind(titan, titan.df[i,])
# titanic <- NULL
titanic <- NULL
for(i in 1:4) titanic <- cbind(titanic, rep(as.character(titan.df[,i]), titan.df$Freq))
titanic <- as.data.frame(titanic)
names(titanic) <- c("Class", "Sex", "Age", "Survived")
library(arules)
myrules <- apriori(data = titanic, parameter = list(support = 0.01, confidence = 0.25, minlen = 2))
summary(myrules)
inspect(myrules[1:50])
not_survived <- subset(myrules, rhs %in% "Survived=No")
inspect(sort(not_survived, by='confidence'))
|
#this function will take as input a transposed mat and a factor with genoData
#it will return the selected genes on a matrix based on rfe feature selection using function crossval
FSelection <- function (esetMatT,genoData) {
#finds the most important features/genes
res.rfe <- crossval(esetMatT,genoData,DEBUG=TRUE, theta.fit=fit.rfe)
#we save the most important features/genes on a matrix
x <- as.matrix(extractFeatures(res.rfe,toFile=FALSE))
temp <- colnames(esetMatT) #columns' names for if statements below
selectedMat <- 0 #this will be the selected genes matrix
#checks which genes are the important ones and creates a matrix with only those
#instead of this for i can also do , selectedMat <- tData[,selectedGenes$time.choosen.Var1]
for( y in 1 : nrow(x))
{
for (i in 1 :ncol(esetMatT))
{
if(temp[i]==x[y] & y == 1)
{selectedMat<-as.matrix(esetMatT[,i])
break}
if(temp[i]==x[y] & y != 1)
{selectedMat<-cbind(selectedMat,esetMatT[,i])
break}
}
}
#we put the names of the genes to the columns
colnames(selectedMat)<-t(x[,1]);
return(selectedMat)
}
|
/FSelection.R
|
no_license
|
MrSud0/Pr2-SVM-Model-
|
R
| false
| false
| 1,165
|
r
|
#this function will take as input a transposed mat and a factor with genoData
#it will return the selected genes on a matrix based on rfe feature selection using function crossval
FSelection <- function (esetMatT,genoData) {
#finds the most important features/genes
res.rfe <- crossval(esetMatT,genoData,DEBUG=TRUE, theta.fit=fit.rfe)
#we save the most important features/genes on a matrix
x <- as.matrix(extractFeatures(res.rfe,toFile=FALSE))
temp <- colnames(esetMatT) #columns' names for if statements below
selectedMat <- 0 #this will be the selected genes matrix
#checks which genes are the important ones and creates a matrix with only those
#instead of this for i can also do , selectedMat <- tData[,selectedGenes$time.choosen.Var1]
for( y in 1 : nrow(x))
{
for (i in 1 :ncol(esetMatT))
{
if(temp[i]==x[y] & y == 1)
{selectedMat<-as.matrix(esetMatT[,i])
break}
if(temp[i]==x[y] & y != 1)
{selectedMat<-cbind(selectedMat,esetMatT[,i])
break}
}
}
#we put the names of the genes to the columns
colnames(selectedMat)<-t(x[,1]);
return(selectedMat)
}
|
#' Listen to a random song
#'
#' Run to open a random song in youtube. Provide an index to listen to a specific
#' song.
#'
#' @param index Which song to listen to (index in the palette table)
#' @param takes Palette table
#' @export
first_take <- function(index = sample(seq_len(nrow(takes)), 1), takes=ftpals::first_takes){
cat(paste0("Index: ",
index,
". You're listening to ",
takes[['song']][[index]],
" by ",
takes[['artist']][[index]],
". Enjoy the music!"))
show_pals(index)
utils::browseURL(paste0("https://www.youtube.com/watch?v=", takes[["videoId"]][[index]]))
}
|
/R/first_take.R
|
permissive
|
tsostarics/ftpals
|
R
| false
| false
| 683
|
r
|
#' Listen to a random song
#'
#' Run to open a random song in youtube. Provide an index to listen to a specific
#' song.
#'
#' @param index Which song to listen to (index in the palette table)
#' @param takes Palette table
#' @export
first_take <- function(index = sample(seq_len(nrow(takes)), 1), takes=ftpals::first_takes){
cat(paste0("Index: ",
index,
". You're listening to ",
takes[['song']][[index]],
" by ",
takes[['artist']][[index]],
". Enjoy the music!"))
show_pals(index)
utils::browseURL(paste0("https://www.youtube.com/watch?v=", takes[["videoId"]][[index]]))
}
|
#############################################################################################
# Purpose: Relate observed nest densities for 4 species (BBWO, HAWO, WHWO, NOFL) with HSIs. #
# This version is for manuscript for peer-reviewed publication. #
#############################################################################################
library(foreign)
library(R.utils)
#devtools::install_github("qureshlatif/WoodpeckerHSI") # Run this upon first use.
# Note: You might get some disconcerting warnings upon first install.
#To avoid them, restart R after installing package.
library(WoodpeckerHSI)
#____________________________________ Inputs _______________________________________#
#setwd("F:/research stuff/FS_PostDoc/consult_&_collaborate/PtBlue_Sierra/")
setwd("C:/Users/Quresh.Latif/files/projects/prior/PtBlue_Sierra/")
load("Data_compiled.RData") # Workspace containing data
#___________________________________________________________________________________#
##########################
# Remotely sensed models #
##########################
###_________BBWO, remotely sensed model___________###
mod <- loadObject("Model_RS_BBWO")
covs <- c("ccmort_loc", "blk_lndcc", "canhi_loc")
cov.names <- c("LocBurn", "LandBurn", "LocCC")
for(i in 1:length(covs)) {
dat.plot <- data.frame(x = seq(min(dat[, covs[i]]), max(dat[, covs[i]]), length.out = 20))
dat.plotz <- matrix(0, nrow = 20, ncol = length(covs)) %>% data.frame
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- (dat.plot$x - scale.factors[covs[i], "mean"]) /
scale.factors[covs[i], "SD"]
dat.plot$HSI <- predict(mod, dat.plotz, type = "response")
link <- predict(mod, dat.plotz, type = "link", se.fit = T)
dat.plot$HSI.lo <- expit(link$fit - 1.96 * link$se.fit)
dat.plot$HSI.hi <- expit(link$fit + 1.96 * link$se.fit)
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_ribbon(aes(ymin = HSI.lo, ymax = HSI.hi), alpha = 0.3) +
geom_line(size = 3) +
ylim(0,1) +
geom_hline(yintercept = 0.35, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.65, linetype = "dotted", size = 1) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
assign(paste0("plt.", cov.names[i]), plt)
}
# Put plots together #
theme_set(theme_bw())
p <- ggdraw() +
draw_plot(plt.LocBurn, x = 0.05, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LandBurn, x = 0.525, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LocCC, x = 0.05, y = 0, width = 0.475, height = 0.475) +
draw_plot_label(label = c("Habitat Suitability Index (HSI)", "BBWO, remotely sensed model"),
size=c(27, 30), x=c(0.01, 0.2), y=c(0.06, 1), angle = c(90, 0))
#p
save_plot("manuscript/Figure_BBWO_RS_relations.tiff", p, ncol = 3, nrow = 3, dpi = 600)
###_______________________________________________###
###_________HAWO, remotely sensed model___________###
mod <- loadObject("Model_RS_HAWO")
covs <- c("ccmort_loc", "canhi_lnd", "sizlrg_loc")
cov.names <- c("LocBurn", "LandCC", "LocSizeLrg")
for(i in 1:length(covs)) {
dat.plot <- data.frame(x = seq(min(dat[, covs[i]]), max(dat[, covs[i]]), length.out = 20))
dat.plotz <- matrix(0, nrow = 20, ncol = length(covs)) %>% data.frame
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- (dat.plot$x - scale.factors[covs[i], "mean"]) /
scale.factors[covs[i], "SD"]
dat.plot$HSI <- predict(mod, dat.plotz, type = "response")
link <- predict(mod, dat.plotz, type = "link", se.fit = T)
dat.plot$HSI.lo <- expit(link$fit - 1.96 * link$se.fit)
dat.plot$HSI.hi <- expit(link$fit + 1.96 * link$se.fit)
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_ribbon(aes(ymin = HSI.lo, ymax = HSI.hi), alpha = 0.3) +
geom_line(size = 3) +
ylim(0,1) +
geom_hline(yintercept = 0.25, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.51, linetype = "dotted", size = 1) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
assign(paste0("plt.", cov.names[i]), plt)
}
# Put plots together #
theme_set(theme_bw())
p <- ggdraw() +
draw_plot(plt.LocBurn, x = 0.05, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LandCC, x = 0.525, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LocSizeLrg, x = 0.05, y = 0, width = 0.475, height = 0.475) +
draw_plot_label(label = c("Habitat Suitability Index (HSI)", "HAWO, remotely sensed model"),
size=c(27, 30), x=c(0.01, 0.2), y=c(0.06, 1), angle = c(90, 0))
#p
save_plot("manuscript/Figure_HAWO_RS_relations.tiff", p, ncol = 3, nrow = 3, dpi = 600)
###_______________________________________________###
###_________WHWO, remotely sensed model___________###
mod <- loadObject("Model_RS_WHWO")
covs <- c("ccmort_loc", "blk_lndcc")
cov.names <- c("LocBurn", "LandBurn")
for(i in 1:length(covs)) {
dat.plot <- data.frame(x = seq(min(dat[, covs[i]]), max(dat[, covs[i]]), length.out = 20))
dat.plotz <- matrix(0, nrow = 20, ncol = length(covs)) %>% data.frame
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- (dat.plot$x - scale.factors[covs[i], "mean"]) /
scale.factors[covs[i], "SD"]
dat.plot$HSI <- predict(mod, dat.plotz, type = "response")
link <- predict(mod, dat.plotz, type = "link", se.fit = T)
dat.plot$HSI.lo <- expit(link$fit - 1.96 * link$se.fit)
dat.plot$HSI.hi <- expit(link$fit + 1.96 * link$se.fit)
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_ribbon(aes(ymin = HSI.lo, ymax = HSI.hi), alpha = 0.3) +
geom_line(size = 3) +
ylim(0,1) +
geom_hline(yintercept = 0.4, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.7, linetype = "dotted", size = 1) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
assign(paste0("plt.", cov.names[i]), plt)
}
# Put plots together #
theme_set(theme_bw())
p <- ggdraw() +
draw_plot(plt.LocBurn, x = 0.05, y = 0, width = 0.475, height = 0.9) +
draw_plot(plt.LandBurn, x = 0.525, y = 0, width = 0.475, height = 0.9) +
draw_plot_label(label = c("Habitat Suitability Index (HSI)", "WHWO, remotely sensed model"),
size=c(27, 30), x=c(0.01, 0.35), y=c(0.04, 1), angle = c(90, 0), hjust = c(0, 0))
#p
save_plot("manuscript/Figure_WHWO_RS_relations.tiff", p, ncol = 3, nrow = 1.5, dpi = 600)
###_______________________________________________###
###_________NOFL, remotely sensed model___________###
mod <- loadObject("Model_RS_NOFL")
covs <- c("slope", "ccmort_loc", "sizlrg_loc", "sizlrg_lnd")
cov.names <- c("SLOPE", "LocBurn", "LocSizeLrg", "LandSizeLrg")
for(i in 1:length(covs)) {
dat.plot <- data.frame(x = seq(min(dat[, covs[i]]), max(dat[, covs[i]]), length.out = 20))
dat.plotz <- matrix(0, nrow = 20, ncol = length(covs)) %>% data.frame
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- (dat.plot$x - scale.factors[covs[i], "mean"]) /
scale.factors[covs[i], "SD"]
dat.plot$HSI <- predict(mod, dat.plotz, type = "response")
link <- predict(mod, dat.plotz, type = "link", se.fit = T)
dat.plot$HSI.lo <- expit(link$fit - 1.96 * link$se.fit)
dat.plot$HSI.hi <- expit(link$fit + 1.96 * link$se.fit)
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_ribbon(aes(ymin = HSI.lo, ymax = HSI.hi), alpha = 0.3) +
geom_line(size = 3) +
ylim(0,1) +
geom_hline(yintercept = 0.3, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.55, linetype = "dotted", size = 1) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
assign(paste0("plt.", cov.names[i]), plt)
}
# Put plots together #
theme_set(theme_bw())
p <- ggdraw() +
draw_plot(plt.SLOPE, x = 0.05, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LocBurn, x = 0.525, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LocSizeLrg, x = 0.05, y = 0, width = 0.475, height = 0.475) +
draw_plot(plt.LandSizeLrg, x = 0.525, y = 0, width = 0.475, height = 0.475) +
draw_plot_label(label = c("Habitat Suitability Index (HSI)", "NOFL, remotely sensed model"),
size=c(27, 30), x=c(0.01, 0.2), y=c(0.06, 1), angle = c(90, 0))
#p
save_plot("manuscript/Figure_NOFL_RS_relations.tiff", p, ncol = 3, nrow = 3, dpi = 600)
###_______________________________________________###
######################
# Combination models #
######################
###_________BBWO, combination model___________###
mod <- loadObject("Model_CMB_BBWO")
covs <- c("ccmort_loc", "blk_lndcc", "DBH")
cov.names <- c("LocBurn", "LandBurn", "DBH")
for(i in 1:length(covs)) {
dat.plot <- data.frame(x = seq(min(dat[, covs[i]]), max(dat[, covs[i]]), length.out = 20))
dat.plotz <- matrix(0, nrow = 20, ncol = length(covs)) %>% data.frame
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- (dat.plot$x - scale.factors[covs[i], "mean"]) /
scale.factors[covs[i], "SD"]
dat.plot$HSI <- predict(mod, dat.plotz, type = "response")
link <- predict(mod, dat.plotz, type = "link", se.fit = T)
dat.plot$HSI.lo <- expit(link$fit - 1.96 * link$se.fit)
dat.plot$HSI.hi <- expit(link$fit + 1.96 * link$se.fit)
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_ribbon(aes(ymin = HSI.lo, ymax = HSI.hi), alpha = 0.3) +
geom_line(size = 3) +
ylim(0,1) +
geom_hline(yintercept = 0.2, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.55, linetype = "dotted", size = 1) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
assign(paste0("plt.", cov.names[i]), plt)
}
# Put plots together #
theme_set(theme_bw())
p <- ggdraw() +
draw_plot(plt.LocBurn, x = 0.05, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LandBurn, x = 0.525, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.DBH, x = 0.05, y = 0, width = 0.95, height = 0.475) +
draw_plot_label(label = c("Habitat Suitability Index (HSI)", "BBWO, combination model"),
size=c(27, 30), x=c(0.01, 0.23), y=c(0.06, 1), angle = c(90, 0))
#p
save_plot("manuscript/Figure_BBWO_CMB_relations.tiff", p, ncol = 3, nrow = 3, dpi = 600)
###_______________________________________________###
###_________HAWO, combination model___________###
mod <- loadObject("Model_CMB_HAWO")
covs <- c("ccmort_loc", "sizlrg_loc", "DBH", "BRKN")
cov.names <- c("LocBurn", "LocSizeLrg", "DBH", "BRKN")
categorical <- which(!covs %in% row.names(scale.factors))
for(i in 1:length(covs)) {
if(i %in% categorical) {
dat.plot <- data.frame(x = c(0, 1))
dat.plotz <- matrix(0, nrow = 2, ncol = length(covs)) %>% data.frame
for(cv in categorical)
dat.plotz[, cv] <- mean(dat[, covs[cv]] %>% as.matrix %>% as.numeric %>% mean)
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- dat.plot[, "x"]
} else {
dat.plot <- data.frame(x = seq(min(dat[, covs[i]]), max(dat[, covs[i]]), length.out = 20))
dat.plotz <- matrix(0, nrow = 20, ncol = length(covs)) %>% data.frame
for(cv in categorical)
dat.plotz[, cv] <- mean(dat[, covs[cv]] %>% as.matrix %>% as.numeric %>% mean)
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- (dat.plot$x - scale.factors[covs[i], "mean"]) /
scale.factors[covs[i], "SD"]
}
dat.plot$HSI <- predict(mod, dat.plotz, type = "response")
link <- predict(mod, dat.plotz, type = "link", se.fit = T)
dat.plot$HSI.lo <- expit(link$fit - 1.96 * link$se.fit)
dat.plot$HSI.hi <- expit(link$fit + 1.96 * link$se.fit)
if(!i %in% categorical) {
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_ribbon(aes(ymin = HSI.lo, ymax = HSI.hi), alpha = 0.3) +
geom_line(size = 3) +
ylim(0,1) +
geom_hline(yintercept = 0.3, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.55, linetype = "dotted", size = 1) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
} else {
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_errorbar(aes(ymin = HSI.lo, ymax = HSI.hi), size = 2, width = 0.15) +
geom_point(size = 10, shape = 16) +
ylim(0,1) +
geom_hline(yintercept = 0.3, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.55, linetype = "dotted", size = 1) +
scale_x_continuous(limits = c(-0.5, 1.5), breaks = c(0, 1)) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
}
assign(paste0("plt.", cov.names[i]), plt)
}
# Put plots together #
theme_set(theme_bw())
p <- ggdraw() +
draw_plot(plt.LocBurn, x = 0.05, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LocSizeLrg, x = 0.525, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.DBH, x = 0.05, y = 0, width = 0.475, height = 0.475) +
draw_plot(plt.BRKN, x = 0.525, y = 0, width = 0.475, height = 0.475) +
draw_plot_label(label = c("Habitat Suitability Index (HSI)", "HAWO, combination model"),
size=c(27, 30), x=c(0.01, 0.23), y=c(0.06, 1), angle = c(90, 0))
#p
save_plot("manuscript/Figure_HAWO_CMB_relations.tiff", p, ncol = 3, nrow = 3, dpi = 600)
###_______________________________________________###
###_________WHWO, combination model___________###
mod <- loadObject("Model_CMB_WHWO")
covs <- c("ccmort_loc", "blk_lndcc", "DBH", "BRKN")
cov.names <- c("LocBurn", "LandBurn", "DBH", "BRKN")
categorical <- which(!covs %in% row.names(scale.factors))
for(i in 1:length(covs)) {
if(i %in% categorical) {
dat.plot <- data.frame(x = c(0, 1))
dat.plotz <- matrix(0, nrow = 2, ncol = length(covs)) %>% data.frame
for(cv in categorical)
dat.plotz[, cv] <- mean(dat[, covs[cv]] %>% as.matrix %>% as.numeric %>% mean)
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- dat.plot[, "x"]
} else {
dat.plot <- data.frame(x = seq(min(dat[, covs[i]]), max(dat[, covs[i]]), length.out = 20))
dat.plotz <- matrix(0, nrow = 20, ncol = length(covs)) %>% data.frame
for(cv in categorical)
dat.plotz[, cv] <- mean(dat[, covs[cv]] %>% as.matrix %>% as.numeric %>% mean)
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- (dat.plot$x - scale.factors[covs[i], "mean"]) /
scale.factors[covs[i], "SD"]
}
dat.plot$HSI <- predict(mod, dat.plotz, type = "response")
link <- predict(mod, dat.plotz, type = "link", se.fit = T)
dat.plot$HSI.lo <- expit(link$fit - 1.96 * link$se.fit)
dat.plot$HSI.hi <- expit(link$fit + 1.96 * link$se.fit)
if(!i %in% categorical) {
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_ribbon(aes(ymin = HSI.lo, ymax = HSI.hi), alpha = 0.3) +
geom_line(size = 3) +
ylim(0,1) +
geom_hline(yintercept = 0.3, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.63, linetype = "dotted", size = 1) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
} else {
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_errorbar(aes(ymin = HSI.lo, ymax = HSI.hi), size = 2, width = 0.15) +
geom_point(size = 10, shape = 16) +
ylim(0,1) +
geom_hline(yintercept = 0.3, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.63, linetype = "dotted", size = 1) +
scale_x_continuous(limits = c(-0.5, 1.5), breaks = c(0, 1)) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
}
assign(paste0("plt.", cov.names[i]), plt)
}
# Put plots together #
theme_set(theme_bw())
p <- ggdraw() +
draw_plot(plt.LocBurn, x = 0.05, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LandBurn, x = 0.525, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.DBH, x = 0.05, y = 0, width = 0.475, height = 0.475) +
draw_plot(plt.BRKN, x = 0.525, y = 0, width = 0.475, height = 0.475) +
draw_plot_label(label = c("Habitat Suitability Index (HSI)", "WHWO, combination model"),
size=c(27, 30), x=c(0.01, 0.23), y=c(0.06, 1), angle = c(90, 0))
#p
save_plot("manuscript/Figure_WHWO_CMB_relations.tiff", p, ncol = 3, nrow = 3, dpi = 600)
###_______________________________________________###
###_________NOFL, combination model___________###
mod <- loadObject("Model_CMB_NOFL")
covs <- c("DBH", "BRKN")
cov.names <- c("DBH", "BRKN")
categorical <- which(!covs %in% row.names(scale.factors))
for(i in 1:length(covs)) {
if(i %in% categorical) {
dat.plot <- data.frame(x = c(0, 1))
dat.plotz <- matrix(0, nrow = 2, ncol = length(covs)) %>% data.frame
for(cv in categorical)
dat.plotz[, cv] <- mean(dat[, covs[cv]] %>% as.matrix %>% as.numeric %>% mean)
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- dat.plot[, "x"]
} else {
dat.plot <- data.frame(x = seq(min(dat[, covs[i]]), max(dat[, covs[i]]), length.out = 500))
dat.plotz <- matrix(0, nrow = 500, ncol = length(covs)) %>% data.frame
for(cv in categorical)
dat.plotz[, cv] <- mean(dat[, covs[cv]] %>% as.matrix %>% as.numeric %>% mean)
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- (dat.plot$x - scale.factors[covs[i], "mean"]) /
scale.factors[covs[i], "SD"]
}
dat.plot$HSI <- predict(mod, dat.plotz, type = "response")
link <- predict(mod, dat.plotz, type = "link", se.fit = T)
dat.plot$HSI.lo <- expit(link$fit - 1.96 * link$se.fit)
dat.plot$HSI.hi <- expit(link$fit + 1.96 * link$se.fit)
if(!i %in% categorical) {
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_ribbon(aes(ymin = HSI.lo, ymax = HSI.hi), alpha = 0.3) +
geom_line(size = 3) +
ylim(0,1) +
geom_hline(yintercept = 0.68, linetype = "dotted", size = 1) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
} else {
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_errorbar(aes(ymin = HSI.lo, ymax = HSI.hi), size = 2, width = 0.15) +
geom_point(size = 10, shape = 16) +
ylim(0,1) +
geom_hline(yintercept = 0.68, linetype = "dotted", size = 1) +
scale_x_continuous(limits = c(-0.5, 1.5), breaks = c(0, 1)) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
}
assign(paste0("plt.", cov.names[i]), plt)
}
# Put plots together #
theme_set(theme_bw())
p <- ggdraw() +
draw_plot(plt.DBH, x = 0.05, y = 0, width = 0.475, height = 0.9) +
draw_plot(plt.BRKN, x = 0.525, y = 0, width = 0.475, height = 0.9) +
draw_plot_label(label = c("Habitat Suitability Index (HSI)", "NOFL, combination model"),
size=c(27, 30), x=c(0.01, 0.4), y=c(0.04, 1), angle = c(90, 0),
hjust = c(0, 0))
#p
save_plot("manuscript/Figure_NOFL_CMB_relations.tiff", p, ncol = 3, nrow = 1.5, dpi = 600)
###_______________________________________________###
|
/05-Covariate_relation_plots.R
|
no_license
|
qureshlatif/Northern-Sierra-Woodpeckers
|
R
| false
| false
| 20,165
|
r
|
#############################################################################################
# Purpose: Relate observed nest densities for 4 species (BBWO, HAWO, WHWO, NOFL) with HSIs. #
# This version is for manuscript for peer-reviewed publication. #
#############################################################################################
library(foreign)
library(R.utils)
#devtools::install_github("qureshlatif/WoodpeckerHSI") # Run this upon first use.
# Note: You might get some disconcerting warnings upon first install.
#To avoid them, restart R after installing package.
library(WoodpeckerHSI)
#____________________________________ Inputs _______________________________________#
#setwd("F:/research stuff/FS_PostDoc/consult_&_collaborate/PtBlue_Sierra/")
setwd("C:/Users/Quresh.Latif/files/projects/prior/PtBlue_Sierra/")
load("Data_compiled.RData") # Workspace containing data
#___________________________________________________________________________________#
##########################
# Remotely sensed models #
##########################
###_________BBWO, remotely sensed model___________###
mod <- loadObject("Model_RS_BBWO")
covs <- c("ccmort_loc", "blk_lndcc", "canhi_loc")
cov.names <- c("LocBurn", "LandBurn", "LocCC")
for(i in 1:length(covs)) {
dat.plot <- data.frame(x = seq(min(dat[, covs[i]]), max(dat[, covs[i]]), length.out = 20))
dat.plotz <- matrix(0, nrow = 20, ncol = length(covs)) %>% data.frame
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- (dat.plot$x - scale.factors[covs[i], "mean"]) /
scale.factors[covs[i], "SD"]
dat.plot$HSI <- predict(mod, dat.plotz, type = "response")
link <- predict(mod, dat.plotz, type = "link", se.fit = T)
dat.plot$HSI.lo <- expit(link$fit - 1.96 * link$se.fit)
dat.plot$HSI.hi <- expit(link$fit + 1.96 * link$se.fit)
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_ribbon(aes(ymin = HSI.lo, ymax = HSI.hi), alpha = 0.3) +
geom_line(size = 3) +
ylim(0,1) +
geom_hline(yintercept = 0.35, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.65, linetype = "dotted", size = 1) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
assign(paste0("plt.", cov.names[i]), plt)
}
# Put plots together #
theme_set(theme_bw())
p <- ggdraw() +
draw_plot(plt.LocBurn, x = 0.05, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LandBurn, x = 0.525, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LocCC, x = 0.05, y = 0, width = 0.475, height = 0.475) +
draw_plot_label(label = c("Habitat Suitability Index (HSI)", "BBWO, remotely sensed model"),
size=c(27, 30), x=c(0.01, 0.2), y=c(0.06, 1), angle = c(90, 0))
#p
save_plot("manuscript/Figure_BBWO_RS_relations.tiff", p, ncol = 3, nrow = 3, dpi = 600)
###_______________________________________________###
###_________HAWO, remotely sensed model___________###
mod <- loadObject("Model_RS_HAWO")
covs <- c("ccmort_loc", "canhi_lnd", "sizlrg_loc")
cov.names <- c("LocBurn", "LandCC", "LocSizeLrg")
for(i in 1:length(covs)) {
dat.plot <- data.frame(x = seq(min(dat[, covs[i]]), max(dat[, covs[i]]), length.out = 20))
dat.plotz <- matrix(0, nrow = 20, ncol = length(covs)) %>% data.frame
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- (dat.plot$x - scale.factors[covs[i], "mean"]) /
scale.factors[covs[i], "SD"]
dat.plot$HSI <- predict(mod, dat.plotz, type = "response")
link <- predict(mod, dat.plotz, type = "link", se.fit = T)
dat.plot$HSI.lo <- expit(link$fit - 1.96 * link$se.fit)
dat.plot$HSI.hi <- expit(link$fit + 1.96 * link$se.fit)
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_ribbon(aes(ymin = HSI.lo, ymax = HSI.hi), alpha = 0.3) +
geom_line(size = 3) +
ylim(0,1) +
geom_hline(yintercept = 0.25, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.51, linetype = "dotted", size = 1) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
assign(paste0("plt.", cov.names[i]), plt)
}
# Put plots together #
theme_set(theme_bw())
p <- ggdraw() +
draw_plot(plt.LocBurn, x = 0.05, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LandCC, x = 0.525, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LocSizeLrg, x = 0.05, y = 0, width = 0.475, height = 0.475) +
draw_plot_label(label = c("Habitat Suitability Index (HSI)", "HAWO, remotely sensed model"),
size=c(27, 30), x=c(0.01, 0.2), y=c(0.06, 1), angle = c(90, 0))
#p
save_plot("manuscript/Figure_HAWO_RS_relations.tiff", p, ncol = 3, nrow = 3, dpi = 600)
###_______________________________________________###
###_________WHWO, remotely sensed model___________###
mod <- loadObject("Model_RS_WHWO")
covs <- c("ccmort_loc", "blk_lndcc")
cov.names <- c("LocBurn", "LandBurn")
for(i in 1:length(covs)) {
dat.plot <- data.frame(x = seq(min(dat[, covs[i]]), max(dat[, covs[i]]), length.out = 20))
dat.plotz <- matrix(0, nrow = 20, ncol = length(covs)) %>% data.frame
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- (dat.plot$x - scale.factors[covs[i], "mean"]) /
scale.factors[covs[i], "SD"]
dat.plot$HSI <- predict(mod, dat.plotz, type = "response")
link <- predict(mod, dat.plotz, type = "link", se.fit = T)
dat.plot$HSI.lo <- expit(link$fit - 1.96 * link$se.fit)
dat.plot$HSI.hi <- expit(link$fit + 1.96 * link$se.fit)
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_ribbon(aes(ymin = HSI.lo, ymax = HSI.hi), alpha = 0.3) +
geom_line(size = 3) +
ylim(0,1) +
geom_hline(yintercept = 0.4, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.7, linetype = "dotted", size = 1) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
assign(paste0("plt.", cov.names[i]), plt)
}
# Put plots together #
theme_set(theme_bw())
p <- ggdraw() +
draw_plot(plt.LocBurn, x = 0.05, y = 0, width = 0.475, height = 0.9) +
draw_plot(plt.LandBurn, x = 0.525, y = 0, width = 0.475, height = 0.9) +
draw_plot_label(label = c("Habitat Suitability Index (HSI)", "WHWO, remotely sensed model"),
size=c(27, 30), x=c(0.01, 0.35), y=c(0.04, 1), angle = c(90, 0), hjust = c(0, 0))
#p
save_plot("manuscript/Figure_WHWO_RS_relations.tiff", p, ncol = 3, nrow = 1.5, dpi = 600)
###_______________________________________________###
###_________NOFL, remotely sensed model___________###
mod <- loadObject("Model_RS_NOFL")
covs <- c("slope", "ccmort_loc", "sizlrg_loc", "sizlrg_lnd")
cov.names <- c("SLOPE", "LocBurn", "LocSizeLrg", "LandSizeLrg")
for(i in 1:length(covs)) {
dat.plot <- data.frame(x = seq(min(dat[, covs[i]]), max(dat[, covs[i]]), length.out = 20))
dat.plotz <- matrix(0, nrow = 20, ncol = length(covs)) %>% data.frame
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- (dat.plot$x - scale.factors[covs[i], "mean"]) /
scale.factors[covs[i], "SD"]
dat.plot$HSI <- predict(mod, dat.plotz, type = "response")
link <- predict(mod, dat.plotz, type = "link", se.fit = T)
dat.plot$HSI.lo <- expit(link$fit - 1.96 * link$se.fit)
dat.plot$HSI.hi <- expit(link$fit + 1.96 * link$se.fit)
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_ribbon(aes(ymin = HSI.lo, ymax = HSI.hi), alpha = 0.3) +
geom_line(size = 3) +
ylim(0,1) +
geom_hline(yintercept = 0.3, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.55, linetype = "dotted", size = 1) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
assign(paste0("plt.", cov.names[i]), plt)
}
# Put plots together #
theme_set(theme_bw())
p <- ggdraw() +
draw_plot(plt.SLOPE, x = 0.05, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LocBurn, x = 0.525, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LocSizeLrg, x = 0.05, y = 0, width = 0.475, height = 0.475) +
draw_plot(plt.LandSizeLrg, x = 0.525, y = 0, width = 0.475, height = 0.475) +
draw_plot_label(label = c("Habitat Suitability Index (HSI)", "NOFL, remotely sensed model"),
size=c(27, 30), x=c(0.01, 0.2), y=c(0.06, 1), angle = c(90, 0))
#p
save_plot("manuscript/Figure_NOFL_RS_relations.tiff", p, ncol = 3, nrow = 3, dpi = 600)
###_______________________________________________###
######################
# Combination models #
######################
###_________BBWO, combination model___________###
mod <- loadObject("Model_CMB_BBWO")
covs <- c("ccmort_loc", "blk_lndcc", "DBH")
cov.names <- c("LocBurn", "LandBurn", "DBH")
for(i in 1:length(covs)) {
dat.plot <- data.frame(x = seq(min(dat[, covs[i]]), max(dat[, covs[i]]), length.out = 20))
dat.plotz <- matrix(0, nrow = 20, ncol = length(covs)) %>% data.frame
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- (dat.plot$x - scale.factors[covs[i], "mean"]) /
scale.factors[covs[i], "SD"]
dat.plot$HSI <- predict(mod, dat.plotz, type = "response")
link <- predict(mod, dat.plotz, type = "link", se.fit = T)
dat.plot$HSI.lo <- expit(link$fit - 1.96 * link$se.fit)
dat.plot$HSI.hi <- expit(link$fit + 1.96 * link$se.fit)
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_ribbon(aes(ymin = HSI.lo, ymax = HSI.hi), alpha = 0.3) +
geom_line(size = 3) +
ylim(0,1) +
geom_hline(yintercept = 0.2, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.55, linetype = "dotted", size = 1) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
assign(paste0("plt.", cov.names[i]), plt)
}
# Put plots together #
theme_set(theme_bw())
p <- ggdraw() +
draw_plot(plt.LocBurn, x = 0.05, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LandBurn, x = 0.525, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.DBH, x = 0.05, y = 0, width = 0.95, height = 0.475) +
draw_plot_label(label = c("Habitat Suitability Index (HSI)", "BBWO, combination model"),
size=c(27, 30), x=c(0.01, 0.23), y=c(0.06, 1), angle = c(90, 0))
#p
save_plot("manuscript/Figure_BBWO_CMB_relations.tiff", p, ncol = 3, nrow = 3, dpi = 600)
###_______________________________________________###
###_________HAWO, combination model___________###
mod <- loadObject("Model_CMB_HAWO")
covs <- c("ccmort_loc", "sizlrg_loc", "DBH", "BRKN")
cov.names <- c("LocBurn", "LocSizeLrg", "DBH", "BRKN")
categorical <- which(!covs %in% row.names(scale.factors))
for(i in 1:length(covs)) {
if(i %in% categorical) {
dat.plot <- data.frame(x = c(0, 1))
dat.plotz <- matrix(0, nrow = 2, ncol = length(covs)) %>% data.frame
for(cv in categorical)
dat.plotz[, cv] <- mean(dat[, covs[cv]] %>% as.matrix %>% as.numeric %>% mean)
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- dat.plot[, "x"]
} else {
dat.plot <- data.frame(x = seq(min(dat[, covs[i]]), max(dat[, covs[i]]), length.out = 20))
dat.plotz <- matrix(0, nrow = 20, ncol = length(covs)) %>% data.frame
for(cv in categorical)
dat.plotz[, cv] <- mean(dat[, covs[cv]] %>% as.matrix %>% as.numeric %>% mean)
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- (dat.plot$x - scale.factors[covs[i], "mean"]) /
scale.factors[covs[i], "SD"]
}
dat.plot$HSI <- predict(mod, dat.plotz, type = "response")
link <- predict(mod, dat.plotz, type = "link", se.fit = T)
dat.plot$HSI.lo <- expit(link$fit - 1.96 * link$se.fit)
dat.plot$HSI.hi <- expit(link$fit + 1.96 * link$se.fit)
if(!i %in% categorical) {
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_ribbon(aes(ymin = HSI.lo, ymax = HSI.hi), alpha = 0.3) +
geom_line(size = 3) +
ylim(0,1) +
geom_hline(yintercept = 0.3, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.55, linetype = "dotted", size = 1) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
} else {
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_errorbar(aes(ymin = HSI.lo, ymax = HSI.hi), size = 2, width = 0.15) +
geom_point(size = 10, shape = 16) +
ylim(0,1) +
geom_hline(yintercept = 0.3, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.55, linetype = "dotted", size = 1) +
scale_x_continuous(limits = c(-0.5, 1.5), breaks = c(0, 1)) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
}
assign(paste0("plt.", cov.names[i]), plt)
}
# Put plots together #
theme_set(theme_bw())
p <- ggdraw() +
draw_plot(plt.LocBurn, x = 0.05, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LocSizeLrg, x = 0.525, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.DBH, x = 0.05, y = 0, width = 0.475, height = 0.475) +
draw_plot(plt.BRKN, x = 0.525, y = 0, width = 0.475, height = 0.475) +
draw_plot_label(label = c("Habitat Suitability Index (HSI)", "HAWO, combination model"),
size=c(27, 30), x=c(0.01, 0.23), y=c(0.06, 1), angle = c(90, 0))
#p
save_plot("manuscript/Figure_HAWO_CMB_relations.tiff", p, ncol = 3, nrow = 3, dpi = 600)
###_______________________________________________###
###_________WHWO, combination model___________###
mod <- loadObject("Model_CMB_WHWO")
covs <- c("ccmort_loc", "blk_lndcc", "DBH", "BRKN")
cov.names <- c("LocBurn", "LandBurn", "DBH", "BRKN")
categorical <- which(!covs %in% row.names(scale.factors))
for(i in 1:length(covs)) {
if(i %in% categorical) {
dat.plot <- data.frame(x = c(0, 1))
dat.plotz <- matrix(0, nrow = 2, ncol = length(covs)) %>% data.frame
for(cv in categorical)
dat.plotz[, cv] <- mean(dat[, covs[cv]] %>% as.matrix %>% as.numeric %>% mean)
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- dat.plot[, "x"]
} else {
dat.plot <- data.frame(x = seq(min(dat[, covs[i]]), max(dat[, covs[i]]), length.out = 20))
dat.plotz <- matrix(0, nrow = 20, ncol = length(covs)) %>% data.frame
for(cv in categorical)
dat.plotz[, cv] <- mean(dat[, covs[cv]] %>% as.matrix %>% as.numeric %>% mean)
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- (dat.plot$x - scale.factors[covs[i], "mean"]) /
scale.factors[covs[i], "SD"]
}
dat.plot$HSI <- predict(mod, dat.plotz, type = "response")
link <- predict(mod, dat.plotz, type = "link", se.fit = T)
dat.plot$HSI.lo <- expit(link$fit - 1.96 * link$se.fit)
dat.plot$HSI.hi <- expit(link$fit + 1.96 * link$se.fit)
if(!i %in% categorical) {
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_ribbon(aes(ymin = HSI.lo, ymax = HSI.hi), alpha = 0.3) +
geom_line(size = 3) +
ylim(0,1) +
geom_hline(yintercept = 0.3, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.63, linetype = "dotted", size = 1) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
} else {
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_errorbar(aes(ymin = HSI.lo, ymax = HSI.hi), size = 2, width = 0.15) +
geom_point(size = 10, shape = 16) +
ylim(0,1) +
geom_hline(yintercept = 0.3, linetype = "dotted", size = 1) +
geom_hline(yintercept = 0.63, linetype = "dotted", size = 1) +
scale_x_continuous(limits = c(-0.5, 1.5), breaks = c(0, 1)) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
}
assign(paste0("plt.", cov.names[i]), plt)
}
# Put plots together #
theme_set(theme_bw())
p <- ggdraw() +
draw_plot(plt.LocBurn, x = 0.05, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.LandBurn, x = 0.525, y = 0.475, width = 0.475, height = 0.475) +
draw_plot(plt.DBH, x = 0.05, y = 0, width = 0.475, height = 0.475) +
draw_plot(plt.BRKN, x = 0.525, y = 0, width = 0.475, height = 0.475) +
draw_plot_label(label = c("Habitat Suitability Index (HSI)", "WHWO, combination model"),
size=c(27, 30), x=c(0.01, 0.23), y=c(0.06, 1), angle = c(90, 0))
#p
save_plot("manuscript/Figure_WHWO_CMB_relations.tiff", p, ncol = 3, nrow = 3, dpi = 600)
###_______________________________________________###
###_________NOFL, combination model___________###
mod <- loadObject("Model_CMB_NOFL")
covs <- c("DBH", "BRKN")
cov.names <- c("DBH", "BRKN")
categorical <- which(!covs %in% row.names(scale.factors))
for(i in 1:length(covs)) {
if(i %in% categorical) {
dat.plot <- data.frame(x = c(0, 1))
dat.plotz <- matrix(0, nrow = 2, ncol = length(covs)) %>% data.frame
for(cv in categorical)
dat.plotz[, cv] <- mean(dat[, covs[cv]] %>% as.matrix %>% as.numeric %>% mean)
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- dat.plot[, "x"]
} else {
dat.plot <- data.frame(x = seq(min(dat[, covs[i]]), max(dat[, covs[i]]), length.out = 500))
dat.plotz <- matrix(0, nrow = 500, ncol = length(covs)) %>% data.frame
for(cv in categorical)
dat.plotz[, cv] <- mean(dat[, covs[cv]] %>% as.matrix %>% as.numeric %>% mean)
names(dat.plotz) <- covs
dat.plotz[, covs[i]] <- (dat.plot$x - scale.factors[covs[i], "mean"]) /
scale.factors[covs[i], "SD"]
}
dat.plot$HSI <- predict(mod, dat.plotz, type = "response")
link <- predict(mod, dat.plotz, type = "link", se.fit = T)
dat.plot$HSI.lo <- expit(link$fit - 1.96 * link$se.fit)
dat.plot$HSI.hi <- expit(link$fit + 1.96 * link$se.fit)
if(!i %in% categorical) {
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_ribbon(aes(ymin = HSI.lo, ymax = HSI.hi), alpha = 0.3) +
geom_line(size = 3) +
ylim(0,1) +
geom_hline(yintercept = 0.68, linetype = "dotted", size = 1) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
} else {
plt <- ggplot(dat.plot, aes(x, HSI)) +
geom_errorbar(aes(ymin = HSI.lo, ymax = HSI.hi), size = 2, width = 0.15) +
geom_point(size = 10, shape = 16) +
ylim(0,1) +
geom_hline(yintercept = 0.68, linetype = "dotted", size = 1) +
scale_x_continuous(limits = c(-0.5, 1.5), breaks = c(0, 1)) +
ylab(NULL) + xlab(cov.names[i]) +
theme(axis.text.x=element_text(size=15)) +
theme(axis.text.y=element_text(size=15)) +
theme(axis.title.x=element_text(size=20)) +
theme(axis.title.y=element_text(size=20))
}
assign(paste0("plt.", cov.names[i]), plt)
}
# Put plots together #
theme_set(theme_bw())
p <- ggdraw() +
draw_plot(plt.DBH, x = 0.05, y = 0, width = 0.475, height = 0.9) +
draw_plot(plt.BRKN, x = 0.525, y = 0, width = 0.475, height = 0.9) +
draw_plot_label(label = c("Habitat Suitability Index (HSI)", "NOFL, combination model"),
size=c(27, 30), x=c(0.01, 0.4), y=c(0.04, 1), angle = c(90, 0),
hjust = c(0, 0))
#p
save_plot("manuscript/Figure_NOFL_CMB_relations.tiff", p, ncol = 3, nrow = 1.5, dpi = 600)
###_______________________________________________###
|
library(dplyr)
disease_train<-read.csv("tt.csv")
b<-data.frame(disease_train)
disease_test<-read.csv("TB_Test.csv")
cleantable <- disease_train %>%
select(
# Age = Age,
Gender = Gender,
Food.Preference = Food.Preference,
Marital.Status = Marital.Status,
Symptoms_1 = Symptoms_1,
Symptoms_2 = Symptoms_2,
Symptoms_3 = Symptoms_3,
Symptoms_4 = Symptoms_4,
Symptoms_5 = Symptoms_5,
Symptoms_6 = Symptoms_6,
Symptoms_7 = Symptoms_7
)
|
/Disease Pred/global.R
|
no_license
|
dharanisugumar/diseasePrediction
|
R
| false
| false
| 505
|
r
|
library(dplyr)
disease_train<-read.csv("tt.csv")
b<-data.frame(disease_train)
disease_test<-read.csv("TB_Test.csv")
cleantable <- disease_train %>%
select(
# Age = Age,
Gender = Gender,
Food.Preference = Food.Preference,
Marital.Status = Marital.Status,
Symptoms_1 = Symptoms_1,
Symptoms_2 = Symptoms_2,
Symptoms_3 = Symptoms_3,
Symptoms_4 = Symptoms_4,
Symptoms_5 = Symptoms_5,
Symptoms_6 = Symptoms_6,
Symptoms_7 = Symptoms_7
)
|
#' kernel_C
#' to be documented
#' @usage kernel_C(XX, J, hh, Mykernel, Minv, TIME, Decay, DDiagNull, normWW)
#' @param XX to be documented
#' @param J to be documented
#' @param hh to be documented
#' @param Mykernel to be documented
#' @param Minv to be documented
#' @param TIME to be documented
#' @param Decay to be documented
#' @param DDiagNull to be documented
#' @param normWW to be documented
#' @keywords internal
#' @return to be documented
kernel_C <-
function (XX, J, hh, Mykernel, Minv, TIME, Decay, DDiagNull,
normWW)
.Call("kernel_C", XX, J, hh, Mykernel, Minv, TIME, Decay, DDiagNull,
normWW, PACKAGE = "mgwrsar")
|
/R/kernel_C.R
|
no_license
|
shepherdmeng/mgwrsar
|
R
| false
| false
| 649
|
r
|
#' kernel_C
#' to be documented
#' @usage kernel_C(XX, J, hh, Mykernel, Minv, TIME, Decay, DDiagNull, normWW)
#' @param XX to be documented
#' @param J to be documented
#' @param hh to be documented
#' @param Mykernel to be documented
#' @param Minv to be documented
#' @param TIME to be documented
#' @param Decay to be documented
#' @param DDiagNull to be documented
#' @param normWW to be documented
#' @keywords internal
#' @return to be documented
kernel_C <-
function (XX, J, hh, Mykernel, Minv, TIME, Decay, DDiagNull,
normWW)
.Call("kernel_C", XX, J, hh, Mykernel, Minv, TIME, Decay, DDiagNull,
normWW, PACKAGE = "mgwrsar")
|
# ==============================================================================
# smartvote-00044-script-antwortverteilung_gewaehlte_kand_budget-5.R
#
# Skript zur Visualisierung der Antwortverteilung gewaehlte Kandidierende - Fragetyp Budget-5
#
# R-Script - Bachelor Thesis - smartvote
#
# Author: wackt1.bfh@gmail.com
# Date: 22.05.2020
# ==============================================================================
# ------------------------------------------------------------------------------
# IMPORT PACKAGES
# ------------------------------------------------------------------------------
library(jsonlite)
library(tidyverse)
library(ggplot2)
library(yaml)
library(viridis)
# ------------------------------------------------------------------------------
# CONSTANTS
# ------------------------------------------------------------------------------
config_file = yaml.load_file("_parameter.yml")
PARAM_JSON_URL_CANDIDATES <- config_file$param_JSON_URL$JSON_URL_CANDIDATES_DATA
PARAM_JSON_URL_QUESTIONS <- config_file$param_JSON_URL$JSON_URL_QUESTIONS_DATA
PARAM_DISTRICT <- config_file$params_wahlkreis$WAHLKREIS
# ------------------------------------------------------------------------------
# FUNCTIONS
# ------------------------------------------------------------------------------
wrapper <- function(x, ...)
{
paste(strwrap(x, ...), collapse = "\n")
}
# ------------------------------------------------------------------------------
# MAIN
# ------------------------------------------------------------------------------
# --- read input
smartvotedata_json_candidates <- jsonlite::fromJSON(PARAM_JSON_URL_CANDIDATES)
smartvotedata_json_questions <- jsonlite::fromJSON(PARAM_JSON_URL_QUESTIONS)
# --- preprocess
if(PARAM_DISTRICT != "NA") {
smartvotedata_json_candidates <- filter(smartvotedata_json_candidates, district == PARAM_DISTRICT)
}
# --- analyze
data_unnested <- unnest(smartvotedata_json_candidates, answers)
data_unnested_questions <- select(data_unnested, "questionId")
data_unnested_questions %<>%
count(questionId) %>%
select(-n) %>%
as_tibble()
data_unnested_filtered <- select(data_unnested, "party_short", "elected", "questionId", "answer")
data_unnested_filtered %<>%
filter(questionId == 3472, elected == TRUE) %>%
count(answer, party_short, elected) %>%
group_by(party_short) %>%
as_tibble()
smartvotedata_json_questions %<>%
filter(ID_question == 3472) %>%
as_tibble()
question_title <- smartvotedata_json_questions$question
question_chart_vis_party <- ggplot(data = data_unnested_filtered, aes(x = answer, y = reorder(party_short, answer), fill = n)) +
geom_tile(color = "white") +
scale_fill_viridis(name = "Anzahl\nKandidierende") +
scale_x_continuous(breaks = round(seq(min(data_unnested_filtered$answer), max(100), by = 25), 0)) +
xlab(paste0("Antworten\n\nSkala: 0 = \"Deutich weniger\" - 25 = \"Eher weniger\" - 50 = \"Gleich viel\"\n75 = \"Eher mehr\" - \"100 = \"Deutlich mehr\"")) +
ylab("Partei") +
ggtitle(wrapper(paste0("Antwortverteilung zur Frage (pro Partei - gewaehlte Kandidierende): ", question_title))) +
theme_bw()
# --- visualize
question_chart_vis_party
# ==============================================================================
# END
# ==============================================================================
|
/smartvote-00044-script-antwortverteilung_gewaehlte_kand_budget-5.R
|
no_license
|
wackt1/automated-report-in-R
|
R
| false
| false
| 3,478
|
r
|
# ==============================================================================
# smartvote-00044-script-antwortverteilung_gewaehlte_kand_budget-5.R
#
# Skript zur Visualisierung der Antwortverteilung gewaehlte Kandidierende - Fragetyp Budget-5
#
# R-Script - Bachelor Thesis - smartvote
#
# Author: wackt1.bfh@gmail.com
# Date: 22.05.2020
# ==============================================================================
# ------------------------------------------------------------------------------
# IMPORT PACKAGES
# ------------------------------------------------------------------------------
library(jsonlite)
library(tidyverse)
library(ggplot2)
library(yaml)
library(viridis)
# ------------------------------------------------------------------------------
# CONSTANTS
# ------------------------------------------------------------------------------
config_file = yaml.load_file("_parameter.yml")
PARAM_JSON_URL_CANDIDATES <- config_file$param_JSON_URL$JSON_URL_CANDIDATES_DATA
PARAM_JSON_URL_QUESTIONS <- config_file$param_JSON_URL$JSON_URL_QUESTIONS_DATA
PARAM_DISTRICT <- config_file$params_wahlkreis$WAHLKREIS
# ------------------------------------------------------------------------------
# FUNCTIONS
# ------------------------------------------------------------------------------
wrapper <- function(x, ...)
{
paste(strwrap(x, ...), collapse = "\n")
}
# ------------------------------------------------------------------------------
# MAIN
# ------------------------------------------------------------------------------
# --- read input
smartvotedata_json_candidates <- jsonlite::fromJSON(PARAM_JSON_URL_CANDIDATES)
smartvotedata_json_questions <- jsonlite::fromJSON(PARAM_JSON_URL_QUESTIONS)
# --- preprocess
if(PARAM_DISTRICT != "NA") {
smartvotedata_json_candidates <- filter(smartvotedata_json_candidates, district == PARAM_DISTRICT)
}
# --- analyze
data_unnested <- unnest(smartvotedata_json_candidates, answers)
data_unnested_questions <- select(data_unnested, "questionId")
data_unnested_questions %<>%
count(questionId) %>%
select(-n) %>%
as_tibble()
data_unnested_filtered <- select(data_unnested, "party_short", "elected", "questionId", "answer")
data_unnested_filtered %<>%
filter(questionId == 3472, elected == TRUE) %>%
count(answer, party_short, elected) %>%
group_by(party_short) %>%
as_tibble()
smartvotedata_json_questions %<>%
filter(ID_question == 3472) %>%
as_tibble()
question_title <- smartvotedata_json_questions$question
question_chart_vis_party <- ggplot(data = data_unnested_filtered, aes(x = answer, y = reorder(party_short, answer), fill = n)) +
geom_tile(color = "white") +
scale_fill_viridis(name = "Anzahl\nKandidierende") +
scale_x_continuous(breaks = round(seq(min(data_unnested_filtered$answer), max(100), by = 25), 0)) +
xlab(paste0("Antworten\n\nSkala: 0 = \"Deutich weniger\" - 25 = \"Eher weniger\" - 50 = \"Gleich viel\"\n75 = \"Eher mehr\" - \"100 = \"Deutlich mehr\"")) +
ylab("Partei") +
ggtitle(wrapper(paste0("Antwortverteilung zur Frage (pro Partei - gewaehlte Kandidierende): ", question_title))) +
theme_bw()
# --- visualize
question_chart_vis_party
# ==============================================================================
# END
# ==============================================================================
|
#library(MASS)
#library(fda)
#source("~/Desktop/implant/input.list.R")
fanova_mean = function(Y.na.mat, X, tt, formula, K.int = 6, order = 4,
#interact = 0, #p = 2,
d0 = 0, d1 = 2, d2 = 2, lower = -10, upper = 15){
data.time = rbind(tt,Y.na.mat)
T_na = Y.na.mat
for ( i in 1:dim(Y.na.mat)[1]){
for (j in 1:dim(Y.na.mat)[2]){
if(is.na(Y.na.mat[i,j])){
T_na[i,j] = NA
}
else{
T_na[i,j] = data.time[1,j]
}
}
}
Y = input.list(data = Y.na.mat, Time_na = as.matrix(T_na), p = 0)$Y
T = input.list(data = Y.na.mat, Time_na = as.matrix(T_na), p = 0)$T
Y.vec = Reduce(c,Y)
T.vec = Reduce(c,T)
n = length(Y)
#tt = sort(as.numeric(unique(T.vec)))
#J = length(tt) # number of time points
### basis functions ###
order = order # In default, order = 4, cubic splines, for second derivative, using order 6 rather than 4
total.time = max(tt)
knots = total.time*(1:K.int)/(1+K.int)
# K is the number of basis functions in a spline basis system
K = length(knots) + order # number of basis functions
basis = create.bspline.basis(c(0,total.time), K, norder = order)
#design_matrix = NULL
#if (interact == 1) {
# design_matrix = model.matrix(as.formula(paste("~.^", p, sep='')), data = X)
# }
#if (interact == 0){
design_matrix = model.matrix(as.formula(formula), data = X)
#}
## evaluate original, the first, and second derivatives of basis functions ##
#dimension of BS is the length of tt by K
#BS = eval.basis(tt,basis,d0)
## penalty matrix ###
Omega = inprod(basis,basis,d1,d2) # for second derivative, using 4 rather than 2, 2 means the original function
### Design matrix ###
# if(regular == 1)
#{
#dimension of BS is the length of tt by K, i.e. just one dummy variable, r1
# BS = eval.basis(tt,basis,d0)
#Xmat = design_matrix %x% BS
#}
#if(regular == 0){
BS = eval.basis(T[[1]],basis,d0)
for ( i in 2: n ){
BSnew = eval.basis(T[[i]],basis,d0)
BS = rbind(BS,BSnew)
}
###############
nf = ncol(design_matrix)
if( nf == 1) {
BSmat = BS
}
if( nf > 1){
BSmat = BS
for ( i in 2: nf){
BSnew1 = BS
BSmat = cbind(BSmat,BSnew1)
}
}
#####Construct the design_matrix
m = matrix(NA, nrow = n , ncol = 1)
for ( i in 1: n){
m[i] = length(as.numeric(T[[i]]))
}
des_X = matrix(NA, nrow = nrow(BSmat), ncol = ncol(BSmat))
for(j in 1:ncol(design_matrix)){
for ( i in 1:1){
des_X[(1:m[i]),((j-1)*K+1):((j-1)*K+1):(j*K)] = design_matrix[i,j]
}
for ( i in 2:nrow(design_matrix)){
des_X[(1:m[i]),(1:K)] = design_matrix[i,1]
des_X[(sum(m[1:(i-1)])+1):(sum(m[1:i])),((j-1)*K+1):(j*K)] = design_matrix[i,j]
}
}
Xmat = des_X * BSmat
#}
### Penalized least squares estimates ###
tuning_nointer = function(lower, upper, Omega, Xmat, Y.vec){
lam.list=exp(seq(lower,upper,1))
gcv=rep(0,length(lam.list))
for(ii in 1:length(lam.list)){
Omega_lam = matrix(0, nrow = ncol(design_matrix)*nrow(Omega),ncol = ncol(design_matrix)*ncol(Omega))
for ( i in 1: ncol(design_matrix)){
Omega_lam[((1+(i-1)*dim(Omega)[1]):(i*dim(Omega)[1])),((1+(i-1)*dim(Omega)[2]):(i*dim(Omega)[2]))] = Omega*lam.list[ii]
}
#A <- solve(t(Xmat) %*% Xmat + Omega_lam)
A <- solve(t(Xmat) %*% Xmat + Omega_lam)
Y.vec.hat <- (Xmat%*%A) %*% (t(Xmat)%*%Y.vec)
diag.mean <- sum(diag(t(Xmat)%*%Xmat%*%A))/(dim(Xmat)[1])
gcv[ii] <- mean((Y.vec-Y.vec.hat)^2)/(1-diag.mean)^2
}
ind=which(gcv == min(gcv))
lam.list[ind]
}
#Find the tunning parameter lambda
lam = tuning_nointer(lower,upper,Omega,Xmat,Y.vec)
#Using lam to define the matrix adiag(Omega*lambda)
Omegabylam = matrix(0, nrow = ncol(design_matrix)*nrow(Omega),ncol = ncol(design_matrix)*ncol(Omega))
for ( i in 1: ncol(design_matrix)){
Omegabylam[((1+(i-1)*dim(Omega)[1]):(i*dim(Omega)[1])),((1+(i-1)*dim(Omega)[2]):(i*dim(Omega)[2]))] = Omega*lam
}
#bhat = solve(t(Xmat)%*%Xmat+Omegabylam)%*%t(Xmat)%*%Y.vec
bhat = solve(t(Xmat)%*%Xmat+Omegabylam)%*%t(Xmat)%*%Y.vec
#S = solve(t(Xmat)%*%Xmat+Omegabylam)%*%t(Xmat)
S = solve(t(Xmat)%*%Xmat+Omegabylam)%*%t(Xmat)
########### estimated curve ###########
BS = eval.basis(tt,basis,d0)
m = length(tt)
#para is the matrix of new_Phi(with t time points) by the betahat of each variable, each column is a t*1 vector
para = matrix(0,nrow = m,ncol = ncol(design_matrix))
for ( i in 1:ncol(design_matrix)){
para[,i] = BS %*% bhat[((i-1)*K+1):((i-1)*K+K)]
}
#return the matrix of estimated mean functions
return(list(est_fun = para,lambda = lam, bhat = bhat, S = S, Phi = Xmat,
order = order, K = K, total.time = total.time, tps = tt, X = X, Y_na = Y.na.mat, formula = formula,
d0 = d0, d1 = d1, d2 = d2 ))
}
|
/R/fanova_mean.R
|
no_license
|
rwang14/abc
|
R
| false
| false
| 4,877
|
r
|
#library(MASS)
#library(fda)
#source("~/Desktop/implant/input.list.R")
fanova_mean = function(Y.na.mat, X, tt, formula, K.int = 6, order = 4,
#interact = 0, #p = 2,
d0 = 0, d1 = 2, d2 = 2, lower = -10, upper = 15){
data.time = rbind(tt,Y.na.mat)
T_na = Y.na.mat
for ( i in 1:dim(Y.na.mat)[1]){
for (j in 1:dim(Y.na.mat)[2]){
if(is.na(Y.na.mat[i,j])){
T_na[i,j] = NA
}
else{
T_na[i,j] = data.time[1,j]
}
}
}
Y = input.list(data = Y.na.mat, Time_na = as.matrix(T_na), p = 0)$Y
T = input.list(data = Y.na.mat, Time_na = as.matrix(T_na), p = 0)$T
Y.vec = Reduce(c,Y)
T.vec = Reduce(c,T)
n = length(Y)
#tt = sort(as.numeric(unique(T.vec)))
#J = length(tt) # number of time points
### basis functions ###
order = order # In default, order = 4, cubic splines, for second derivative, using order 6 rather than 4
total.time = max(tt)
knots = total.time*(1:K.int)/(1+K.int)
# K is the number of basis functions in a spline basis system
K = length(knots) + order # number of basis functions
basis = create.bspline.basis(c(0,total.time), K, norder = order)
#design_matrix = NULL
#if (interact == 1) {
# design_matrix = model.matrix(as.formula(paste("~.^", p, sep='')), data = X)
# }
#if (interact == 0){
design_matrix = model.matrix(as.formula(formula), data = X)
#}
## evaluate original, the first, and second derivatives of basis functions ##
#dimension of BS is the length of tt by K
#BS = eval.basis(tt,basis,d0)
## penalty matrix ###
Omega = inprod(basis,basis,d1,d2) # for second derivative, using 4 rather than 2, 2 means the original function
### Design matrix ###
# if(regular == 1)
#{
#dimension of BS is the length of tt by K, i.e. just one dummy variable, r1
# BS = eval.basis(tt,basis,d0)
#Xmat = design_matrix %x% BS
#}
#if(regular == 0){
BS = eval.basis(T[[1]],basis,d0)
for ( i in 2: n ){
BSnew = eval.basis(T[[i]],basis,d0)
BS = rbind(BS,BSnew)
}
###############
nf = ncol(design_matrix)
if( nf == 1) {
BSmat = BS
}
if( nf > 1){
BSmat = BS
for ( i in 2: nf){
BSnew1 = BS
BSmat = cbind(BSmat,BSnew1)
}
}
#####Construct the design_matrix
m = matrix(NA, nrow = n , ncol = 1)
for ( i in 1: n){
m[i] = length(as.numeric(T[[i]]))
}
des_X = matrix(NA, nrow = nrow(BSmat), ncol = ncol(BSmat))
for(j in 1:ncol(design_matrix)){
for ( i in 1:1){
des_X[(1:m[i]),((j-1)*K+1):((j-1)*K+1):(j*K)] = design_matrix[i,j]
}
for ( i in 2:nrow(design_matrix)){
des_X[(1:m[i]),(1:K)] = design_matrix[i,1]
des_X[(sum(m[1:(i-1)])+1):(sum(m[1:i])),((j-1)*K+1):(j*K)] = design_matrix[i,j]
}
}
Xmat = des_X * BSmat
#}
### Penalized least squares estimates ###
tuning_nointer = function(lower, upper, Omega, Xmat, Y.vec){
lam.list=exp(seq(lower,upper,1))
gcv=rep(0,length(lam.list))
for(ii in 1:length(lam.list)){
Omega_lam = matrix(0, nrow = ncol(design_matrix)*nrow(Omega),ncol = ncol(design_matrix)*ncol(Omega))
for ( i in 1: ncol(design_matrix)){
Omega_lam[((1+(i-1)*dim(Omega)[1]):(i*dim(Omega)[1])),((1+(i-1)*dim(Omega)[2]):(i*dim(Omega)[2]))] = Omega*lam.list[ii]
}
#A <- solve(t(Xmat) %*% Xmat + Omega_lam)
A <- solve(t(Xmat) %*% Xmat + Omega_lam)
Y.vec.hat <- (Xmat%*%A) %*% (t(Xmat)%*%Y.vec)
diag.mean <- sum(diag(t(Xmat)%*%Xmat%*%A))/(dim(Xmat)[1])
gcv[ii] <- mean((Y.vec-Y.vec.hat)^2)/(1-diag.mean)^2
}
ind=which(gcv == min(gcv))
lam.list[ind]
}
#Find the tunning parameter lambda
lam = tuning_nointer(lower,upper,Omega,Xmat,Y.vec)
#Using lam to define the matrix adiag(Omega*lambda)
Omegabylam = matrix(0, nrow = ncol(design_matrix)*nrow(Omega),ncol = ncol(design_matrix)*ncol(Omega))
for ( i in 1: ncol(design_matrix)){
Omegabylam[((1+(i-1)*dim(Omega)[1]):(i*dim(Omega)[1])),((1+(i-1)*dim(Omega)[2]):(i*dim(Omega)[2]))] = Omega*lam
}
#bhat = solve(t(Xmat)%*%Xmat+Omegabylam)%*%t(Xmat)%*%Y.vec
bhat = solve(t(Xmat)%*%Xmat+Omegabylam)%*%t(Xmat)%*%Y.vec
#S = solve(t(Xmat)%*%Xmat+Omegabylam)%*%t(Xmat)
S = solve(t(Xmat)%*%Xmat+Omegabylam)%*%t(Xmat)
########### estimated curve ###########
BS = eval.basis(tt,basis,d0)
m = length(tt)
#para is the matrix of new_Phi(with t time points) by the betahat of each variable, each column is a t*1 vector
para = matrix(0,nrow = m,ncol = ncol(design_matrix))
for ( i in 1:ncol(design_matrix)){
para[,i] = BS %*% bhat[((i-1)*K+1):((i-1)*K+K)]
}
#return the matrix of estimated mean functions
return(list(est_fun = para,lambda = lam, bhat = bhat, S = S, Phi = Xmat,
order = order, K = K, total.time = total.time, tps = tt, X = X, Y_na = Y.na.mat, formula = formula,
d0 = d0, d1 = d1, d2 = d2 ))
}
|
#' Utilities for Looping to Read In Documents
#'
#' `loop_counter` - A simple loop counter for tracking the progress of reading in
#' a batch of files.
#'
#' @param i Iteration of the loop.
#' @param total Total number of iterations.
#' @param file The file name of that iteration to print out.
#' @param ... ignored
#' @return `loop_counter` - Prints loop information.
#' @export
#' @rdname loop_utilities
#' @examples
#' \dontrun{
#' files <- dir(
#' system.file("docs", package = "textreadr"),
#' full.names = TRUE,
#' recursive = TRUE,
#' pattern = '\\.(R?md|Rd?$|txt|sql|html|pdf|doc|ppt|tex)'
#' )
#'
#' max_wait <- 30
#' total <- length(files)
#' content <- vector(mode = "list", total)
#'
#' for (i in seq_along(files)){
#'
#' loop_counter(i, total, base_name(files[i]))
#'
#' content[[i]] <- try_limit(
#' textreadr::read_document(files[i]),
#' max.time = max_wait,
#' zero.length.return = NA
#' )
#' }
#'
#'
#' sapply(content, is.null)
#' sapply(content, function(x) length(x) == 1 && is.na(x))
#' content
#' }
loop_counter <- function(i, total, file, ...){
percent <- round(100*i/total, 0)
pcnt <- paste0(strrep(' ', 3 - nchar(percent)), '(', percent, '%)')
cat(sprintf(
'%s of %s %s \'%s\'\n',
sprintf(paste0("%0", nchar(total), "d"), i),
total,
pcnt,
file
))
utils::flush.console()
}
#' Utilities for Looping to Read In Documents
#'
#' `base_name` - Like `base::basename` but doesn't choke on long paths.
#'
#' @param path A character vector, containing path names.
#' @export
#' @return `base_name` - Returns just the basename of the path.
#' @rdname loop_utilities
base_name <- function(path) gsub('^.+/', '', path)
#' Utilities for Looping to Read In Documents
#'
#' `try_limit` - Limits the amount of try that an expression can run for. This
#' works to limit how long an attempted read-in of a document may take. Most
#' useful in a loop with a few very long running document read-ins (e.g., .pdf
#' files that require [**tesseract** package](https://CRAN.R-project.org/package=tesseract)).
#' Note that `max.time` can not stop a `system` call (as many read-in functions
#' are essentially utilizing, but it can limit how many `system` calls are made.
#' This means a .pdf with multiple
#' [**tesseract**](https://CRAN.R-project.org/package=tesseract)) pages will only
#' allow the first page to read-in before returning an error result. Note that
#' this approach does not distinguish between errors running the `expr` and
#' time-out errors.
#'
#' @param expr An expression to run.
#' @param max.time Max allotted elapsed run time in seconds.
#' @param timeout.return Value to return for timeouts.
#' @param zero.length.return Value to return for length zero expression evaluations.
#' @param silent logical. If `TRUE` report of error messages.
#' @export
#' @rdname loop_utilities
try_limit <- function(expr, max.time = Inf, timeout.return = NULL,
zero.length.return = "", silent = TRUE, ...){
setTimeLimit(cpu = max.time, elapsed = max.time, transient=TRUE)
on.exit(setTimeLimit(cpu = Inf, elapsed = Inf, transient = FALSE))
out <- try(expr, silent = silent)
if (is.null(out) | inherits(out, "try-error")) return(timeout.return)
if (length(out) == 0) zero.length.return else out
}
|
/R/loop_utilities.R
|
no_license
|
trinker/textreadr
|
R
| false
| false
| 3,391
|
r
|
#' Utilities for Looping to Read In Documents
#'
#' `loop_counter` - A simple loop counter for tracking the progress of reading in
#' a batch of files.
#'
#' @param i Iteration of the loop.
#' @param total Total number of iterations.
#' @param file The file name of that iteration to print out.
#' @param ... ignored
#' @return `loop_counter` - Prints loop information.
#' @export
#' @rdname loop_utilities
#' @examples
#' \dontrun{
#' files <- dir(
#' system.file("docs", package = "textreadr"),
#' full.names = TRUE,
#' recursive = TRUE,
#' pattern = '\\.(R?md|Rd?$|txt|sql|html|pdf|doc|ppt|tex)'
#' )
#'
#' max_wait <- 30
#' total <- length(files)
#' content <- vector(mode = "list", total)
#'
#' for (i in seq_along(files)){
#'
#' loop_counter(i, total, base_name(files[i]))
#'
#' content[[i]] <- try_limit(
#' textreadr::read_document(files[i]),
#' max.time = max_wait,
#' zero.length.return = NA
#' )
#' }
#'
#'
#' sapply(content, is.null)
#' sapply(content, function(x) length(x) == 1 && is.na(x))
#' content
#' }
loop_counter <- function(i, total, file, ...){
percent <- round(100*i/total, 0)
pcnt <- paste0(strrep(' ', 3 - nchar(percent)), '(', percent, '%)')
cat(sprintf(
'%s of %s %s \'%s\'\n',
sprintf(paste0("%0", nchar(total), "d"), i),
total,
pcnt,
file
))
utils::flush.console()
}
#' Utilities for Looping to Read In Documents
#'
#' `base_name` - Like `base::basename` but doesn't choke on long paths.
#'
#' @param path A character vector, containing path names.
#' @export
#' @return `base_name` - Returns just the basename of the path.
#' @rdname loop_utilities
base_name <- function(path) gsub('^.+/', '', path)
#' Utilities for Looping to Read In Documents
#'
#' `try_limit` - Limits the amount of try that an expression can run for. This
#' works to limit how long an attempted read-in of a document may take. Most
#' useful in a loop with a few very long running document read-ins (e.g., .pdf
#' files that require [**tesseract** package](https://CRAN.R-project.org/package=tesseract)).
#' Note that `max.time` can not stop a `system` call (as many read-in functions
#' are essentially utilizing, but it can limit how many `system` calls are made.
#' This means a .pdf with multiple
#' [**tesseract**](https://CRAN.R-project.org/package=tesseract)) pages will only
#' allow the first page to read-in before returning an error result. Note that
#' this approach does not distinguish between errors running the `expr` and
#' time-out errors.
#'
#' @param expr An expression to run.
#' @param max.time Max allotted elapsed run time in seconds.
#' @param timeout.return Value to return for timeouts.
#' @param zero.length.return Value to return for length zero expression evaluations.
#' @param silent logical. If `TRUE` report of error messages.
#' @export
#' @rdname loop_utilities
try_limit <- function(expr, max.time = Inf, timeout.return = NULL,
zero.length.return = "", silent = TRUE, ...){
setTimeLimit(cpu = max.time, elapsed = max.time, transient=TRUE)
on.exit(setTimeLimit(cpu = Inf, elapsed = Inf, transient = FALSE))
out <- try(expr, silent = silent)
if (is.null(out) | inherits(out, "try-error")) return(timeout.return)
if (length(out) == 0) zero.length.return else out
}
|
# Scratch (For Testing)
# Removing NULL Values From A Vector
numbers.data <- as.character(c(1, 2, 3, NULL, 5)); numbers.data
letters.data <- c(letters[1:5])
numbers.str <- as.character(numbers.data); numbers.str
numbers.str <- replace(numbers.str, "NULL", NA); numbers.str
data <- data.frame(numbers.data, letters.data)
letters.data[complete.cases(numbers.data)]
numbers.data[complete.cases(numbers.data)]
|
/incomplete/scratchpad.r
|
no_license
|
anhnguyendepocen/r-code-examples
|
R
| false
| false
| 415
|
r
|
# Scratch (For Testing)
# Removing NULL Values From A Vector
numbers.data <- as.character(c(1, 2, 3, NULL, 5)); numbers.data
letters.data <- c(letters[1:5])
numbers.str <- as.character(numbers.data); numbers.str
numbers.str <- replace(numbers.str, "NULL", NA); numbers.str
data <- data.frame(numbers.data, letters.data)
letters.data[complete.cases(numbers.data)]
numbers.data[complete.cases(numbers.data)]
|
#' @title Nucleotide Divergence
#' @description Calculate distributions of between- and within-strata
#' nucleotide divergence (sequence distance), which includes
#' Nei's \eqn{\pi} (usually referred to as "nucleotide diversity") and
#' Nei's dA between strata.
#'
#' @param g a \linkS4class{gtypes} object.
#' @param probs a numeric vector of probabilities of the pairwise distance
#' distributions with values in \code{0:1}.
#' @param model evolutionary model to be used. see \code{\link[ape]{dist.dna}}
#' for options.
#' @param ... other arguments passed to \code{\link[ape]{dist.dna}}.
#'
#' @return a list with summaries of the \code{$within} and \code{$between} strata
#' pairwise distances including Nei's dA (in \code{$between}).
#' Nei's \eqn{\pi} is the mean between-strata divergence.
#'
#' @references Nei, M., and S. Kumar (2000) Molecular Evolution and
#' Phylogenetics. Oxford University Press, Oxford. (dA: pp. 256, eqn 12.67)
#'
#' @author Eric Archer \email{eric.archer@@noaa.gov}
#'
#' @examples
#' data(dloop.g)
#'
#' nd <- nucleotideDivergence(dloop.g)
#' nd$within
#' nd$between
#'
#' @aliases dA
#' @export
#'
nucleotideDivergence <- function(g, probs = c(0, 0.025, 0.5, 0.975, 1),
model = "raw", ...) {
if(getPloidy(g) > 1) stop("'g' must be haploid")
if(is.null(g@sequences)) stop("'g' must have sequences")
.pair.dist.smry <- function(haps, d, probs) {
pws.dist <- apply(haps, 2, function(h) {
if(any(is.na(h))) return(NA)
d[h[1], h[2]]
})
dist.quant <- stats::quantile(pws.dist, probs, na.rm = TRUE)
names(dist.quant) <- paste0("q.", probs)
c(mean = mean(pws.dist, na.rm = TRUE), dist.quant)
}
.expand.smry.cols <- function(df) {
dplyr::bind_cols(
df,
as.data.frame(do.call(rbind, df$smry))
) %>%
dplyr::select(-.data$smry) %>%
as.data.frame()
}
g <- g[, , getStrataNames(g)]
hap.dist <- sapply(
getSequences(g, as.haplotypes = TRUE, simplify = FALSE),
ape::dist.dna,
model = model,
as.matrix = TRUE,
...,
simplify = FALSE
)
bad.dists <- sapply(hap.dist, function(x) any(is.nan(x) | is.infinite(x)))
if(any(bad.dists)) {
warning(
"NaN or InF returned for some pairwise distances. ",
"See Note in ?ape::dist.dna for an explanation."
)
}
within <- g@data %>%
dplyr::group_by(.data$locus, .data$stratum) %>%
dplyr::do(smry = {
if(nrow(.data) == 1) {
as.numeric(c(
mean = NA,
stats::setNames(rep(NA, length(probs)), paste0("q.", probs))
))
} else {
haps <- utils::combn(.data$allele, 2)
loc <- unique(.data$locus)
.pair.dist.smry(haps, hap.dist[[loc]], probs)
}
})
within <- .expand.smry.cols(within)
st.pairs <- .strataPairs(g)
between <- if(is.null(st.pairs)) NA else {
result <- do.call(rbind, lapply(getLociNames(g), function(loc) {
cbind(locus = loc, st.pairs)
})) %>%
dplyr::group_by(.data$locus, .data$strata.1, .data$strata.2) %>%
dplyr::do(smry = {
st.1 <- .data$strata.1
st.2 <- .data$strata.2
loc <- unique(.data$locus)
h1 <- g@data %>%
dplyr::filter(.data$stratum == st.1) %>%
dplyr::pull(.data$allele)
h2 <- g@data %>%
dplyr::filter(.data$stratum == st.2) %>%
dplyr::pull(.data$allele)
haps <- t(expand.grid(h1, h2))
smry <- .pair.dist.smry(haps, hap.dist[[loc]], probs)
wthn.sum <- within %>%
dplyr::filter(.data$locus == loc & .data$stratum %in% c(st.1, st.2)) %>%
dplyr::summarize(sum = sum(.data$mean, na.rm = TRUE)) %>%
dplyr::pull("sum")
dA <- unname(smry["mean"] - (wthn.sum / 2))
c(dA = dA, smry)
})
.expand.smry.cols(result)
}
list(within = within, between = between)
}
|
/R/nucleotideDivergence.R
|
no_license
|
EricArcher/strataG
|
R
| false
| false
| 3,939
|
r
|
#' @title Nucleotide Divergence
#' @description Calculate distributions of between- and within-strata
#' nucleotide divergence (sequence distance), which includes
#' Nei's \eqn{\pi} (usually referred to as "nucleotide diversity") and
#' Nei's dA between strata.
#'
#' @param g a \linkS4class{gtypes} object.
#' @param probs a numeric vector of probabilities of the pairwise distance
#' distributions with values in \code{0:1}.
#' @param model evolutionary model to be used. see \code{\link[ape]{dist.dna}}
#' for options.
#' @param ... other arguments passed to \code{\link[ape]{dist.dna}}.
#'
#' @return a list with summaries of the \code{$within} and \code{$between} strata
#' pairwise distances including Nei's dA (in \code{$between}).
#' Nei's \eqn{\pi} is the mean between-strata divergence.
#'
#' @references Nei, M., and S. Kumar (2000) Molecular Evolution and
#' Phylogenetics. Oxford University Press, Oxford. (dA: pp. 256, eqn 12.67)
#'
#' @author Eric Archer \email{eric.archer@@noaa.gov}
#'
#' @examples
#' data(dloop.g)
#'
#' nd <- nucleotideDivergence(dloop.g)
#' nd$within
#' nd$between
#'
#' @aliases dA
#' @export
#'
nucleotideDivergence <- function(g, probs = c(0, 0.025, 0.5, 0.975, 1),
model = "raw", ...) {
if(getPloidy(g) > 1) stop("'g' must be haploid")
if(is.null(g@sequences)) stop("'g' must have sequences")
.pair.dist.smry <- function(haps, d, probs) {
pws.dist <- apply(haps, 2, function(h) {
if(any(is.na(h))) return(NA)
d[h[1], h[2]]
})
dist.quant <- stats::quantile(pws.dist, probs, na.rm = TRUE)
names(dist.quant) <- paste0("q.", probs)
c(mean = mean(pws.dist, na.rm = TRUE), dist.quant)
}
.expand.smry.cols <- function(df) {
dplyr::bind_cols(
df,
as.data.frame(do.call(rbind, df$smry))
) %>%
dplyr::select(-.data$smry) %>%
as.data.frame()
}
g <- g[, , getStrataNames(g)]
hap.dist <- sapply(
getSequences(g, as.haplotypes = TRUE, simplify = FALSE),
ape::dist.dna,
model = model,
as.matrix = TRUE,
...,
simplify = FALSE
)
bad.dists <- sapply(hap.dist, function(x) any(is.nan(x) | is.infinite(x)))
if(any(bad.dists)) {
warning(
"NaN or InF returned for some pairwise distances. ",
"See Note in ?ape::dist.dna for an explanation."
)
}
within <- g@data %>%
dplyr::group_by(.data$locus, .data$stratum) %>%
dplyr::do(smry = {
if(nrow(.data) == 1) {
as.numeric(c(
mean = NA,
stats::setNames(rep(NA, length(probs)), paste0("q.", probs))
))
} else {
haps <- utils::combn(.data$allele, 2)
loc <- unique(.data$locus)
.pair.dist.smry(haps, hap.dist[[loc]], probs)
}
})
within <- .expand.smry.cols(within)
st.pairs <- .strataPairs(g)
between <- if(is.null(st.pairs)) NA else {
result <- do.call(rbind, lapply(getLociNames(g), function(loc) {
cbind(locus = loc, st.pairs)
})) %>%
dplyr::group_by(.data$locus, .data$strata.1, .data$strata.2) %>%
dplyr::do(smry = {
st.1 <- .data$strata.1
st.2 <- .data$strata.2
loc <- unique(.data$locus)
h1 <- g@data %>%
dplyr::filter(.data$stratum == st.1) %>%
dplyr::pull(.data$allele)
h2 <- g@data %>%
dplyr::filter(.data$stratum == st.2) %>%
dplyr::pull(.data$allele)
haps <- t(expand.grid(h1, h2))
smry <- .pair.dist.smry(haps, hap.dist[[loc]], probs)
wthn.sum <- within %>%
dplyr::filter(.data$locus == loc & .data$stratum %in% c(st.1, st.2)) %>%
dplyr::summarize(sum = sum(.data$mean, na.rm = TRUE)) %>%
dplyr::pull("sum")
dA <- unname(smry["mean"] - (wthn.sum / 2))
c(dA = dA, smry)
})
.expand.smry.cols(result)
}
list(within = within, between = between)
}
|
#' Step 1 of annex 4-10 integration
#'
#' @param id, character used to specify namespace, see \code{shiny::\link[shiny]{NS}}
#'
#' @return a \code{shiny::\link[shiny]{tagList}} containing UI elements
#'
#'
importstep1UI <- function(id){
ns <- NS(id)
tagList(useShinyjs(),
tags$hr(),
h2("step 1 : Compare with database"),
fluidRow(
fluidRow(column(width=2,
actionButton(ns("check_duplicate_button"), "Check duplicate")),
column(width=2,
actionButton(ns("clean_output"), "Clean Output"))),
box(fluidRow(
column(width=5,
h3("Duplicated data"),
htmlOutput(ns("step1_message_duplicates")),
DT::dataTableOutput(ns("dt_duplicates")),
h3("Updated data"),
htmlOutput(ns("step1_message_updated")),
DT::dataTableOutput(ns("dt_updated_values")),
h3("Deleted data"),
htmlOutput(ns("step1_message_deleted")),
DT::dataTableOutput(ns("dt_deleted_values"))),
column(width=5,
h3("New values"),
htmlOutput(ns("step1_message_new")),
DT::dataTableOutput(ns("dt_new")))),
fluidRow(
column(width=5,
h3("Summary modifications"),
DT::dataTableOutput(ns("dt_check_duplicates"))),
column(width=5,
h3("summary still missing"),
DT::dataTableOutput(ns("dt_missing")))), collapsible=TRUE, width=12)
))
}
#' Step 1 of annex 4-10 integration
#'
#' @param id, character used to specify namespace, see \code{shiny::\link[shiny]{NS}}
#' @param globaldata a reactive value with global variable
#' @param loaded_data data from step 0
#'
#' @return nothing
importstep1Server <- function(id,globaldata, loaded_data){
moduleServer(id,
function(input, output, session) {
observeEvent(input$clean_output,
{
shinyCatch({
output$dt_duplicates<-renderDataTable(data.frame())
output$dt_check_duplicates<-renderDataTable(data.frame())
output$dt_new<-renderDataTable(data.frame())
output$dt_missing<-renderDataTable(data.frame())
output$dt_updated_values <- renderDataTable(data.frame())
output$dt_deleted_values <- renderDataTable(data.frame())
if ("updated_values_table" %in% names(globaldata)) {
globaldata$updated_values_table<-data.frame()
}
if ("deleted_values_table" %in% names(globaldata)) {
globaldata$deleted_values_table<-data.frame()
}
}) #shinyCatch
})
##################################################
# Events triggerred by step1_button
###################################################
##########################
# When check_duplicate_button is clicked
# this will render a datatable containing rows
# with duplicates values
#############################
observeEvent(input$check_duplicate_button,
{ #browser()# you can put browseR here
shinyCatch({
shinybusy::show_modal_spinner(text = "Checking File", color="#337ab7",spin="fading-circle")
# see step0load_data returns a list with res and messages
# and within res data and a dataframe of errors
validate(
need(length(loaded_data$res) > 0, "Please select a data set")
)
data_from_excel<- loaded_data$res$data
switch (loaded_data$file_type, "catch_landings"={
data_from_base <- extract_data("landings", quality=c(0,1,2,3,4), quality_check=TRUE)
updated_from_excel <- loaded_data$res$updated_data
deleted_from_excel <- loaded_data$res$deleted_data
},
"release"={
data_from_base <-extract_data("release", quality=c(0,1,2,3,4), quality_check=TRUE)
updated_from_excel <- loaded_data$res$updated_data
deleted_from_excel <- loaded_data$res$deleted_data
},
"aquaculture"={
data_from_base <- extract_data("aquaculture", quality=c(0,1,2,3,4), quality_check=TRUE)
updated_from_excel <- loaded_data$res$updated_data
deleted_from_excel <- loaded_data$res$deleted_data
},
"biomass"={
# bug in excel file - fixed in the template
#colnames(data_from_excel)[colnames(data_from_excel)=="typ_name"]<-"eel_typ_name"
deleted_from_excel<- loaded_data$res$deleted_data
updated_from_excel<- loaded_data$res$updated_data
data_from_excel$eel_lfs_code <- 'S' #always S
data_from_excel$eel_hty_code <- 'AL' #always AL
data_from_excel <- data_from_excel %>%
rename_with(function(x) tolower(gsub("biom_", "", x)),
starts_with("biom_")) %>%
mutate_at(vars(starts_with("perc_")), function(x) as.numeric(ifelse(x=='NP','-1',x)))
data_from_excel$eel_area_division <- as.vector(rep(NA,nrow(data_from_excel)),"character")
data_from_base<-rbind(
extract_data("b0", quality=c(0,1,2,3,4), quality_check=TRUE),
extract_data("bbest", quality=c(0,1,2,3,4), quality_check=TRUE),
extract_data("bcurrent", quality=c(0,1,2,3,4), quality_check=TRUE))
data_from_base <- data_from_base %>%
rename_with(function(x) tolower(gsub("biom_", "", x)),
starts_with("biom_"))
},
"potential_available_habitat"={
data_from_base<-extract_data("potential_available_habitat", quality=c(0,1,2,3,4), quality_check=TRUE)
},
# mortality in silver eel equivalent
"silver_eel_equivalents"={
data_from_base<-extract_data("silver_eel_equivalents", quality=c(0,1,2,3,4), quality_check=TRUE)
},
"mortality_rates"={
deleted_from_excel<- loaded_data$res$deleted_data
updated_from_excel<- loaded_data$res$updated_data
data_from_excel$eel_lfs_code <- 'S' #always S
data_from_excel$eel_hty_code <- 'AL' #always AL
data_from_excel <- data_from_excel %>%
rename_with(function(x) tolower(gsub("mort_", "", x)),
starts_with("mort_")) %>%
mutate_at(vars(starts_with("perc_")), function(x) as.numeric(ifelse(x=='NP','-1',x)))
data_from_excel$eel_area_division <- as.vector(rep(NA,nrow(data_from_excel)),"character")
data_from_base<-rbind(
extract_data("sigmaa", quality=c(0,1,2,3,4), quality_check=TRUE),
extract_data("sigmaf", quality=c(0,1,2,3,4), quality_check=TRUE),
extract_data("sigmah", quality=c(0,1,2,3,4), quality_check=TRUE))
data_from_base <- data_from_base %>%
rename_with(function(x) tolower(gsub("mort_", "", x)),
starts_with("biom_"))
}
)
# the compare_with_database function will compare
# what is in the database and the content of the excel file
# previously loaded. It will return a list with two components
# the first duplicates contains elements to be returned to the user
# the second new contains a dataframe to be inserted straight into
# the database
#cat("step0")
if (nrow(data_from_excel)>0){
# this select eel type names 4 6
data_from_excel$eel_typ_name[data_from_excel$eel_typ_name %in% c("rec_landings","com_landings")] <- paste(data_from_excel$eel_typ_name[data_from_excel$eel_typ_name %in% c("rec_landings","com_landings")],"_kg",sep="")
eel_typ_valid <- switch(loaded_data$file_type,
"biomass"=13:15,
"mortality_rates"=17:25)
list_comp<-compare_with_database(data_from_excel,data_from_base,eel_typ_valid)
duplicates <- list_comp$duplicates
new <- list_comp$new
current_cou_code <- list_comp$current_cou_code
#cat("step1")
#####################
# Duplicates values
#####################
if (nrow(duplicates)==0) {
output$"step1_message_duplicates"<-renderUI(
HTML(
paste(
h4("No duplicates")
)))
}else{
output$"step1_message_duplicates"<-renderUI(
HTML(
paste(
h4("Table of duplicates (xls)"),
"<p align='left'>Please click on excel",
"to download this file. In <strong>keep new value</strong> choose true",
"to replace data using the new datacall data (true)",
"if new is selected don't forget to qualify your data in column <strong> eel_qal_id.xls, eel_qal_comment.xls </strong>",
"once this is done download the file and proceed to next step.",
"Rows with false will be ignored and kept as such in the database",
"Rows with true will use the column labelled .xls for the new insertion, and flag existing values as removed ",
"If you see an error in old data, use panel datacorrection (on top of the application), this will allow you to make changes directly in the database <p>"
)))
}
# table of number of duplicates values per year (hilaire)
years=sort(unique(c(duplicates$eel_year,new$eel_year)))
output$dt_duplicates <-DT::renderDataTable({
validate(need(globaldata$connectOK,"No connection"))
datatable(duplicates,
rownames=FALSE,
extensions = "Buttons",
option=list(
rownames = FALSE,
scroller = TRUE,
scrollX = TRUE,
scrollY = "500px",
order=list(3,"asc"),
lengthMenu=list(c(-1,5,20,50),c("All","5","20","50")),
"pagelength"=-1,
dom= "Blfrtip",
buttons=list(
list(extend="excel",
filename = paste0("duplicates_",loaded_data$file_type,"_",Sys.Date(),current_cou_code)))
))
})
if (nrow(new)==0) {
output$"step1_message_new"<-renderUI(
HTML(
paste(
h4("No new values")
)))
} else {
output$"step1_message_new"<-renderUI(
HTML(
paste(
h4("Table of new values (xls)"),
"<p align='left'>Please click on excel ",
"to download this file and qualify your data with columns <strong>qal_id, qal_comment</strong> ",
"once this is done download the file with button <strong>download new</strong> and proceed to next step.<p>"
)))
}
output$dt_new <-DT::renderDataTable({
validate(need(globaldata$connectOK,"No connection"))
datatable(new,
rownames=FALSE,
extensions = "Buttons",
option=list(
scroller = TRUE,
scrollX = TRUE,
scrollY = "500px",
order=list(3,"asc"),
lengthMenu=list(c(-1,5,20,50),c("All","5","20","50")),
"pagelength"=-1,
dom= "Blfrtip",
scrollX = T,
buttons=list(
list(extend="excel",
filename = paste0("new_",loaded_data$file_type,"_",Sys.Date(),current_cou_code)))
))
})
######
#Missing data
######
if (loaded_data$file_type == "catch_landings" & nrow(list_comp$complete)>0) {
output$dt_missing <- DT::renderDataTable({
validate(need(globaldata$connectOK,"No connection"))
check_missing_data(list_comp$complete, new)
})
}
} else {
output$dt_new <- DT::renderDataTable({validate(need(FALSE,"No data"))})
output$dt_duplicates <- DT::renderDataTable({validate(need(FALSE,"No data"))})
current_cou_code <- ""
}# closes if nrow(...
if (loaded_data$file_type %in% c("catch_landings","release", "aquaculture", "biomass","mortality_rates" )){
if (nrow(updated_from_excel)>0){
output$"step1_message_updated"<-renderUI(
HTML(
paste(
h4("Table of updated values (xls)"),
"<p align='left'>Please click on excel",
"to download this file. <p>"
)))
globaldata$updated_values_table <- compare_with_database_updated_values(updated_from_excel,data_from_base)
if (nrow(globaldata$updated_values_table)==0) stop("step1 compare_wih_database_updated_values did not return any values")
output$dt_updated_values <- DT::renderDataTable(
globaldata$updated_values_table,
rownames=FALSE,
extensions = "Buttons",
option=list(
scroller = TRUE,
scrollX = TRUE,
scrollY = "500px",
order=list(3,"asc"),
lengthMenu=list(c(-1,5,20,50),c("All","5","20","50")),
"pagelength"=-1,
dom= "Blfrtip",
scrollX = T,
buttons=list(
list(extend="excel",
filename = paste0("updated_",loaded_data$file_type,"_",Sys.Date(),current_cou_code)))
))
}else{
output$"step1_message_updated" <- renderUI("No data")
}
if (nrow(deleted_from_excel)>0){
output$"step1_message_deleted"<-renderUI(
HTML(
paste(
h4("Table of deleted values (xls)"),
"<p align='left'>Please click on excel",
"to download this file. <p>"
)))
globaldata$deleted_values_table <- compare_with_database_deleted_values(deleted_from_excel,data_from_base)
output$dt_deleted_values <- DT::renderDataTable(
globaldata$deleted_values_table,
rownames=FALSE,
extensions = "Buttons",
option=list(
scroller = TRUE,
scrollX = TRUE,
scrollY = "500px",
order=list(3,"asc"),
lengthMenu=list(c(-1,5,20,50),c("All","5","20","50")),
"pagelength"=-1,
dom= "Blfrtip",
scrollX = T,
buttons=list(
list(extend="excel",
filename = paste0("deleted_",loaded_data$file_type,"_",Sys.Date(),current_cou_code)))
))
}else{
output$"step1_message_deleted"<-renderUI("No data")
}
}
if (exists("years")){
summary_check_duplicates=data.frame(years=years,
nb_new=sapply(years, function(y) length(which(new$eel_year==y))),
nb_duplicates_updated=sapply(years,function(y) length(which(duplicates$eel_year==y & (duplicates$eel_value.base!=duplicates$eel_value.xls)))),
nb_duplicates_no_changes=sapply(years,function(y) length(which(duplicates$eel_year==y & (duplicates$eel_value.base==duplicates$eel_value.xls)))),
nb_updated_values=sapply(years, function(y) length(which(updated_from_excel$eel_year==y))),
nb_deleted_values=sapply(years,function(y) length(which(deleted_from_excel$eel_year==y))))
output$dt_check_duplicates <-DT::renderDataTable({
validate(need(globaldata$connectOK,"No connection"))
datatable(summary_check_duplicates,
rownames=FALSE,
options=list(dom="t",
rownames = FALSE,
scroller = TRUE,
scrollX = TRUE,
scrollY = "500px"
))
})
}
#data$new <- new # new is stored in the reactive dataset to be inserted later.
}) # shiny catch
shinybusy::remove_modal_spinner()
} ,# expr for browser
ignoreInit = TRUE)
})
}
|
/R/shiny_data_integration/shiny_di/importstep1.R
|
no_license
|
ices-eg/wg_WGEEL
|
R
| false
| false
| 17,155
|
r
|
#' Step 1 of annex 4-10 integration
#'
#' @param id, character used to specify namespace, see \code{shiny::\link[shiny]{NS}}
#'
#' @return a \code{shiny::\link[shiny]{tagList}} containing UI elements
#'
#'
importstep1UI <- function(id){
ns <- NS(id)
tagList(useShinyjs(),
tags$hr(),
h2("step 1 : Compare with database"),
fluidRow(
fluidRow(column(width=2,
actionButton(ns("check_duplicate_button"), "Check duplicate")),
column(width=2,
actionButton(ns("clean_output"), "Clean Output"))),
box(fluidRow(
column(width=5,
h3("Duplicated data"),
htmlOutput(ns("step1_message_duplicates")),
DT::dataTableOutput(ns("dt_duplicates")),
h3("Updated data"),
htmlOutput(ns("step1_message_updated")),
DT::dataTableOutput(ns("dt_updated_values")),
h3("Deleted data"),
htmlOutput(ns("step1_message_deleted")),
DT::dataTableOutput(ns("dt_deleted_values"))),
column(width=5,
h3("New values"),
htmlOutput(ns("step1_message_new")),
DT::dataTableOutput(ns("dt_new")))),
fluidRow(
column(width=5,
h3("Summary modifications"),
DT::dataTableOutput(ns("dt_check_duplicates"))),
column(width=5,
h3("summary still missing"),
DT::dataTableOutput(ns("dt_missing")))), collapsible=TRUE, width=12)
))
}
#' Step 1 of annex 4-10 integration
#'
#' @param id, character used to specify namespace, see \code{shiny::\link[shiny]{NS}}
#' @param globaldata a reactive value with global variable
#' @param loaded_data data from step 0
#'
#' @return nothing
importstep1Server <- function(id,globaldata, loaded_data){
moduleServer(id,
function(input, output, session) {
observeEvent(input$clean_output,
{
shinyCatch({
output$dt_duplicates<-renderDataTable(data.frame())
output$dt_check_duplicates<-renderDataTable(data.frame())
output$dt_new<-renderDataTable(data.frame())
output$dt_missing<-renderDataTable(data.frame())
output$dt_updated_values <- renderDataTable(data.frame())
output$dt_deleted_values <- renderDataTable(data.frame())
if ("updated_values_table" %in% names(globaldata)) {
globaldata$updated_values_table<-data.frame()
}
if ("deleted_values_table" %in% names(globaldata)) {
globaldata$deleted_values_table<-data.frame()
}
}) #shinyCatch
})
##################################################
# Events triggerred by step1_button
###################################################
##########################
# When check_duplicate_button is clicked
# this will render a datatable containing rows
# with duplicates values
#############################
observeEvent(input$check_duplicate_button,
{ #browser()# you can put browseR here
shinyCatch({
shinybusy::show_modal_spinner(text = "Checking File", color="#337ab7",spin="fading-circle")
# see step0load_data returns a list with res and messages
# and within res data and a dataframe of errors
validate(
need(length(loaded_data$res) > 0, "Please select a data set")
)
data_from_excel<- loaded_data$res$data
switch (loaded_data$file_type, "catch_landings"={
data_from_base <- extract_data("landings", quality=c(0,1,2,3,4), quality_check=TRUE)
updated_from_excel <- loaded_data$res$updated_data
deleted_from_excel <- loaded_data$res$deleted_data
},
"release"={
data_from_base <-extract_data("release", quality=c(0,1,2,3,4), quality_check=TRUE)
updated_from_excel <- loaded_data$res$updated_data
deleted_from_excel <- loaded_data$res$deleted_data
},
"aquaculture"={
data_from_base <- extract_data("aquaculture", quality=c(0,1,2,3,4), quality_check=TRUE)
updated_from_excel <- loaded_data$res$updated_data
deleted_from_excel <- loaded_data$res$deleted_data
},
"biomass"={
# bug in excel file - fixed in the template
#colnames(data_from_excel)[colnames(data_from_excel)=="typ_name"]<-"eel_typ_name"
deleted_from_excel<- loaded_data$res$deleted_data
updated_from_excel<- loaded_data$res$updated_data
data_from_excel$eel_lfs_code <- 'S' #always S
data_from_excel$eel_hty_code <- 'AL' #always AL
data_from_excel <- data_from_excel %>%
rename_with(function(x) tolower(gsub("biom_", "", x)),
starts_with("biom_")) %>%
mutate_at(vars(starts_with("perc_")), function(x) as.numeric(ifelse(x=='NP','-1',x)))
data_from_excel$eel_area_division <- as.vector(rep(NA,nrow(data_from_excel)),"character")
data_from_base<-rbind(
extract_data("b0", quality=c(0,1,2,3,4), quality_check=TRUE),
extract_data("bbest", quality=c(0,1,2,3,4), quality_check=TRUE),
extract_data("bcurrent", quality=c(0,1,2,3,4), quality_check=TRUE))
data_from_base <- data_from_base %>%
rename_with(function(x) tolower(gsub("biom_", "", x)),
starts_with("biom_"))
},
"potential_available_habitat"={
data_from_base<-extract_data("potential_available_habitat", quality=c(0,1,2,3,4), quality_check=TRUE)
},
# mortality in silver eel equivalent
"silver_eel_equivalents"={
data_from_base<-extract_data("silver_eel_equivalents", quality=c(0,1,2,3,4), quality_check=TRUE)
},
"mortality_rates"={
deleted_from_excel<- loaded_data$res$deleted_data
updated_from_excel<- loaded_data$res$updated_data
data_from_excel$eel_lfs_code <- 'S' #always S
data_from_excel$eel_hty_code <- 'AL' #always AL
data_from_excel <- data_from_excel %>%
rename_with(function(x) tolower(gsub("mort_", "", x)),
starts_with("mort_")) %>%
mutate_at(vars(starts_with("perc_")), function(x) as.numeric(ifelse(x=='NP','-1',x)))
data_from_excel$eel_area_division <- as.vector(rep(NA,nrow(data_from_excel)),"character")
data_from_base<-rbind(
extract_data("sigmaa", quality=c(0,1,2,3,4), quality_check=TRUE),
extract_data("sigmaf", quality=c(0,1,2,3,4), quality_check=TRUE),
extract_data("sigmah", quality=c(0,1,2,3,4), quality_check=TRUE))
data_from_base <- data_from_base %>%
rename_with(function(x) tolower(gsub("mort_", "", x)),
starts_with("biom_"))
}
)
# the compare_with_database function will compare
# what is in the database and the content of the excel file
# previously loaded. It will return a list with two components
# the first duplicates contains elements to be returned to the user
# the second new contains a dataframe to be inserted straight into
# the database
#cat("step0")
if (nrow(data_from_excel)>0){
# this select eel type names 4 6
data_from_excel$eel_typ_name[data_from_excel$eel_typ_name %in% c("rec_landings","com_landings")] <- paste(data_from_excel$eel_typ_name[data_from_excel$eel_typ_name %in% c("rec_landings","com_landings")],"_kg",sep="")
eel_typ_valid <- switch(loaded_data$file_type,
"biomass"=13:15,
"mortality_rates"=17:25)
list_comp<-compare_with_database(data_from_excel,data_from_base,eel_typ_valid)
duplicates <- list_comp$duplicates
new <- list_comp$new
current_cou_code <- list_comp$current_cou_code
#cat("step1")
#####################
# Duplicates values
#####################
if (nrow(duplicates)==0) {
output$"step1_message_duplicates"<-renderUI(
HTML(
paste(
h4("No duplicates")
)))
}else{
output$"step1_message_duplicates"<-renderUI(
HTML(
paste(
h4("Table of duplicates (xls)"),
"<p align='left'>Please click on excel",
"to download this file. In <strong>keep new value</strong> choose true",
"to replace data using the new datacall data (true)",
"if new is selected don't forget to qualify your data in column <strong> eel_qal_id.xls, eel_qal_comment.xls </strong>",
"once this is done download the file and proceed to next step.",
"Rows with false will be ignored and kept as such in the database",
"Rows with true will use the column labelled .xls for the new insertion, and flag existing values as removed ",
"If you see an error in old data, use panel datacorrection (on top of the application), this will allow you to make changes directly in the database <p>"
)))
}
# table of number of duplicates values per year (hilaire)
years=sort(unique(c(duplicates$eel_year,new$eel_year)))
output$dt_duplicates <-DT::renderDataTable({
validate(need(globaldata$connectOK,"No connection"))
datatable(duplicates,
rownames=FALSE,
extensions = "Buttons",
option=list(
rownames = FALSE,
scroller = TRUE,
scrollX = TRUE,
scrollY = "500px",
order=list(3,"asc"),
lengthMenu=list(c(-1,5,20,50),c("All","5","20","50")),
"pagelength"=-1,
dom= "Blfrtip",
buttons=list(
list(extend="excel",
filename = paste0("duplicates_",loaded_data$file_type,"_",Sys.Date(),current_cou_code)))
))
})
if (nrow(new)==0) {
output$"step1_message_new"<-renderUI(
HTML(
paste(
h4("No new values")
)))
} else {
output$"step1_message_new"<-renderUI(
HTML(
paste(
h4("Table of new values (xls)"),
"<p align='left'>Please click on excel ",
"to download this file and qualify your data with columns <strong>qal_id, qal_comment</strong> ",
"once this is done download the file with button <strong>download new</strong> and proceed to next step.<p>"
)))
}
output$dt_new <-DT::renderDataTable({
validate(need(globaldata$connectOK,"No connection"))
datatable(new,
rownames=FALSE,
extensions = "Buttons",
option=list(
scroller = TRUE,
scrollX = TRUE,
scrollY = "500px",
order=list(3,"asc"),
lengthMenu=list(c(-1,5,20,50),c("All","5","20","50")),
"pagelength"=-1,
dom= "Blfrtip",
scrollX = T,
buttons=list(
list(extend="excel",
filename = paste0("new_",loaded_data$file_type,"_",Sys.Date(),current_cou_code)))
))
})
######
#Missing data
######
if (loaded_data$file_type == "catch_landings" & nrow(list_comp$complete)>0) {
output$dt_missing <- DT::renderDataTable({
validate(need(globaldata$connectOK,"No connection"))
check_missing_data(list_comp$complete, new)
})
}
} else {
output$dt_new <- DT::renderDataTable({validate(need(FALSE,"No data"))})
output$dt_duplicates <- DT::renderDataTable({validate(need(FALSE,"No data"))})
current_cou_code <- ""
}# closes if nrow(...
if (loaded_data$file_type %in% c("catch_landings","release", "aquaculture", "biomass","mortality_rates" )){
if (nrow(updated_from_excel)>0){
output$"step1_message_updated"<-renderUI(
HTML(
paste(
h4("Table of updated values (xls)"),
"<p align='left'>Please click on excel",
"to download this file. <p>"
)))
globaldata$updated_values_table <- compare_with_database_updated_values(updated_from_excel,data_from_base)
if (nrow(globaldata$updated_values_table)==0) stop("step1 compare_wih_database_updated_values did not return any values")
output$dt_updated_values <- DT::renderDataTable(
globaldata$updated_values_table,
rownames=FALSE,
extensions = "Buttons",
option=list(
scroller = TRUE,
scrollX = TRUE,
scrollY = "500px",
order=list(3,"asc"),
lengthMenu=list(c(-1,5,20,50),c("All","5","20","50")),
"pagelength"=-1,
dom= "Blfrtip",
scrollX = T,
buttons=list(
list(extend="excel",
filename = paste0("updated_",loaded_data$file_type,"_",Sys.Date(),current_cou_code)))
))
}else{
output$"step1_message_updated" <- renderUI("No data")
}
if (nrow(deleted_from_excel)>0){
output$"step1_message_deleted"<-renderUI(
HTML(
paste(
h4("Table of deleted values (xls)"),
"<p align='left'>Please click on excel",
"to download this file. <p>"
)))
globaldata$deleted_values_table <- compare_with_database_deleted_values(deleted_from_excel,data_from_base)
output$dt_deleted_values <- DT::renderDataTable(
globaldata$deleted_values_table,
rownames=FALSE,
extensions = "Buttons",
option=list(
scroller = TRUE,
scrollX = TRUE,
scrollY = "500px",
order=list(3,"asc"),
lengthMenu=list(c(-1,5,20,50),c("All","5","20","50")),
"pagelength"=-1,
dom= "Blfrtip",
scrollX = T,
buttons=list(
list(extend="excel",
filename = paste0("deleted_",loaded_data$file_type,"_",Sys.Date(),current_cou_code)))
))
}else{
output$"step1_message_deleted"<-renderUI("No data")
}
}
if (exists("years")){
summary_check_duplicates=data.frame(years=years,
nb_new=sapply(years, function(y) length(which(new$eel_year==y))),
nb_duplicates_updated=sapply(years,function(y) length(which(duplicates$eel_year==y & (duplicates$eel_value.base!=duplicates$eel_value.xls)))),
nb_duplicates_no_changes=sapply(years,function(y) length(which(duplicates$eel_year==y & (duplicates$eel_value.base==duplicates$eel_value.xls)))),
nb_updated_values=sapply(years, function(y) length(which(updated_from_excel$eel_year==y))),
nb_deleted_values=sapply(years,function(y) length(which(deleted_from_excel$eel_year==y))))
output$dt_check_duplicates <-DT::renderDataTable({
validate(need(globaldata$connectOK,"No connection"))
datatable(summary_check_duplicates,
rownames=FALSE,
options=list(dom="t",
rownames = FALSE,
scroller = TRUE,
scrollX = TRUE,
scrollY = "500px"
))
})
}
#data$new <- new # new is stored in the reactive dataset to be inserted later.
}) # shiny catch
shinybusy::remove_modal_spinner()
} ,# expr for browser
ignoreInit = TRUE)
})
}
|
library(gghalves)
library(ggsignif)
library(ggsci)
library(ggpubr)
library(readxl)
library(ggsignif)
mf <- read_excel("Desktop/饥饿文章/r包用Excel/肌纤维箱线图.xlsx")
library(tidyverse)
# 统计摘要Mfdi
summ_mf<- mf %>%
group_by(Species) %>%
summarise(
mean = mean(Mfdi),
sd = sd(Mfdi),
n = n()
) %>%
mutate(se = sd/sqrt(n),
Species = factor(Species, levels = c("S0F0", "S0F3", "S3F0","S3F3","S6F0","S0F6")))
summ_mf
# 数据转换
mf_plot <- mf %>%
mutate(Species = factor(Species, levels = c("S0F0", "S0F3", "S3F0","S3F3","S6F0","S0F6")))
head(mf_plot)
p1<- ggplot(mf_plot , aes(x = Species, y = Mfdi, fill = Species))+
geom_half_violin(aes(fill = Species),
position = position_nudge(x = .15, y = 0),
adjust=1.5, trim=FALSE, colour=NA, side = 'r') +
geom_point(aes(x = as.numeric(Species)-0.1,
y = Mfdi,color = Species),
position = position_jitter(width = .05),size = .25, shape = 20) +
geom_boxplot(aes(x = Species,y = Mfdi, fill = Species),
outlier.shape = NA,
width = .05,
color = "black")+
geom_point(data=summ_mf,
aes(x=Species,y = mean,group = Species, color = Species),
shape=18,
size = 1.5,
position = position_nudge(x = .1,y = 0)) +
geom_errorbar(data = summ_mf,
aes(x = Species, y = mean, group = Species, colour = Species,
ymin = mean-se, ymax = mean+se),
width=.05,
position=position_nudge(x = .1, y = 0)
) +
scale_color_aaas() +
scale_fill_aaas()+
theme(panel.background = element_rect(fill = "NA"),
axis.line = element_line(size = 0.5, colour = "black")
)+
stat_compare_means(label.y = 2.4, method="t.test")+
geom_signif(
comparisons = list(c("S0F0","S0F3"),
c("S0F0", "S3F0"),
c("S3F0","S3F3"),
c("S0F0", "S6F0"),
c("S0F0", "S0F6")
),
step_increase = 0.1,
map_signif_level = T,
test = t.test)+
theme(legend.position = "none",
axis.title.x = element_blank())
p1
# 统计摘要Mfd
summ_mf<- mf %>%
group_by(Species) %>%
summarise(
mean = mean(Mfd),
sd = sd(Mfd),
n = n()
) %>%
mutate(se = sd/sqrt(n),
Species = factor(Species, levels = c("S0F0", "S0F3", "S3F0","S3F3","S6F0","S0F6")))
summ_mf
p2<- ggplot(mf_plot , aes(x = Species, y = Mfd, fill = Species))+
geom_half_violin(aes(fill = Species),
position = position_nudge(x = .15, y = 0),
adjust=1.5, trim=FALSE, colour=NA, side = 'r') +
geom_point(aes(x = as.numeric(Species)-0.1,
y = Mfd,color = Species),
position = position_jitter(width = .05),size = .25, shape = 20) +
geom_boxplot(aes(x = Species,y = Mfd, fill = Species),
outlier.shape = NA,
width = .05,
color = "black")+
geom_point(data=summ_mf,
aes(x=Species,y = mean,group = Species, color = Species),
shape=18,
size = 1.5,
position = position_nudge(x = .1,y = 0)) +
geom_errorbar(data = summ_mf,
aes(x = Species, y = mean, group = Species, colour = Species,
ymin = mean-se, ymax = mean+se),
width=.05,
position=position_nudge(x = .1, y = 0)
) +
scale_color_aaas() +
scale_fill_aaas() +
theme(panel.background = element_rect(fill = "NA"),
axis.line = element_line(size = 0.5, colour = "black")
)+
stat_compare_means( label.y = 2, method="t.test")+
geom_signif(
comparisons = list(c("S0F0","S0F3"),
c("S0F0", "S3F0"),
c("S3F0","S3F3"),
c("S0F0", "S6F0"),
c("S0F0", "S0F6")
),
step_increase = 0.1,
map_signif_level = T,
test = t.test)+
theme(legend.position = "none",
axis.title.x = element_blank())
p2
# 统计摘要Mfa
summ_mf<- mf %>%
group_by(Species) %>%
summarise(
mean = mean(Mfa),
sd = sd(Mfa),
n = n()
) %>%
mutate(se = sd/sqrt(n),
Species = factor(Species, levels = c("S0F0", "S0F3", "S3F0","S3F3","S6F0","S0F6")))
summ_mf
p3<- ggplot(mf_plot , aes(x = Species, y = Mfa, fill = Species))+
geom_half_violin(aes(fill = Species),
position = position_nudge(x = .15, y = 0),
adjust=1.5, trim=FALSE, colour=NA, side = 'r') +
geom_point(aes(x = as.numeric(Species)-0.1,
y = Mfa,color = Species),
position = position_jitter(width = .05),size = .25, shape = 20) +
geom_boxplot(aes(x = Species,y = Mfa, fill = Species),
outlier.shape = NA,
width = .05,
color = "black")+
geom_point(data=summ_mf,
aes(x=Species,y = mean,group = Species, color = Species),
shape=18,
size = 1.5,
position = position_nudge(x = .1,y = 0)) +
geom_errorbar(data = summ_mf,
aes(x = Species, y = mean, group = Species, colour = Species,
ymin = mean-se, ymax = mean+se),
width=.05,
position=position_nudge(x = .1, y = 0)
) +
scale_color_aaas() +
scale_fill_aaas() +
theme(panel.background = element_rect(fill = "NA"),
axis.line = element_line(size = 0.5, colour = "black")
)+
stat_compare_means(label.y = 2.2, method="t.test")+
geom_signif(
comparisons = list(c("S0F0","S0F3"),
c("S0F0", "S3F0"),
c("S3F0","S3F3"),
c("S0F0", "S6F0"),
c("S0F0", "S0F6")
),
step_increase = 0.1,
map_signif_level = T,
test = t.test)+
theme(legend.position = "none",
axis.title.x = element_blank())
p3
library(ggplot2)
library(dplyr)
library(hrbrthemes)
library(viridis)
library(readxl)
library(ggsci)
# The dataset is provided in the gapminder library
library(gapminder)
data <- read_excel("Desktop/饥饿文章/r包用Excel/肌纤维气泡图.xlsx")
data$Mfd
Mfd<-1/data$Mfd
Mfd
mfd<-Mfd
data<-data%>%cbind(mfd)
data
# Most basic bubble plot
p13<-data %>%
arrange(desc('Mfa')) %>%
#mutate(sample = factor(sample, sample)) %>%
ggplot(aes(x=`Mfdi`, y=`Mfd`, size=`Mfa`, color=Groups, position_stack(vjust = 1))) +
geom_point(alpha=0.5) +
scale_size(range = c(0.1, 15), name="Mfa") +
scale_fill_viridis(discrete=TRUE, guide=FALSE, option="A") +
theme(legend.position="right") +
theme_classic() +
geom_smooth(aes(
col=Groups
), method = "loess", se=F
)+
scale_color_aaas()+
theme(legend.position = c(.95, .95),
legend.justification = c("right", "top"),
legend.box.just = "right",
legend.margin = margin(6, 6, 6, 6)
)
p13
library(cowplot)
p14<-plot_grid(p1, p2, p3,
labels=c('A','B','C'),
ncol=1, nrow = 3,align="hv")
p14
plot_grid(p14,p13,labels='AUTO',nrow = 1)
|
/肌纤维箱线图+气泡图组图.R
|
no_license
|
mineraltsai/manuscript-public
|
R
| false
| false
| 7,133
|
r
|
library(gghalves)
library(ggsignif)
library(ggsci)
library(ggpubr)
library(readxl)
library(ggsignif)
mf <- read_excel("Desktop/饥饿文章/r包用Excel/肌纤维箱线图.xlsx")
library(tidyverse)
# 统计摘要Mfdi
summ_mf<- mf %>%
group_by(Species) %>%
summarise(
mean = mean(Mfdi),
sd = sd(Mfdi),
n = n()
) %>%
mutate(se = sd/sqrt(n),
Species = factor(Species, levels = c("S0F0", "S0F3", "S3F0","S3F3","S6F0","S0F6")))
summ_mf
# 数据转换
mf_plot <- mf %>%
mutate(Species = factor(Species, levels = c("S0F0", "S0F3", "S3F0","S3F3","S6F0","S0F6")))
head(mf_plot)
p1<- ggplot(mf_plot , aes(x = Species, y = Mfdi, fill = Species))+
geom_half_violin(aes(fill = Species),
position = position_nudge(x = .15, y = 0),
adjust=1.5, trim=FALSE, colour=NA, side = 'r') +
geom_point(aes(x = as.numeric(Species)-0.1,
y = Mfdi,color = Species),
position = position_jitter(width = .05),size = .25, shape = 20) +
geom_boxplot(aes(x = Species,y = Mfdi, fill = Species),
outlier.shape = NA,
width = .05,
color = "black")+
geom_point(data=summ_mf,
aes(x=Species,y = mean,group = Species, color = Species),
shape=18,
size = 1.5,
position = position_nudge(x = .1,y = 0)) +
geom_errorbar(data = summ_mf,
aes(x = Species, y = mean, group = Species, colour = Species,
ymin = mean-se, ymax = mean+se),
width=.05,
position=position_nudge(x = .1, y = 0)
) +
scale_color_aaas() +
scale_fill_aaas()+
theme(panel.background = element_rect(fill = "NA"),
axis.line = element_line(size = 0.5, colour = "black")
)+
stat_compare_means(label.y = 2.4, method="t.test")+
geom_signif(
comparisons = list(c("S0F0","S0F3"),
c("S0F0", "S3F0"),
c("S3F0","S3F3"),
c("S0F0", "S6F0"),
c("S0F0", "S0F6")
),
step_increase = 0.1,
map_signif_level = T,
test = t.test)+
theme(legend.position = "none",
axis.title.x = element_blank())
p1
# 统计摘要Mfd
summ_mf<- mf %>%
group_by(Species) %>%
summarise(
mean = mean(Mfd),
sd = sd(Mfd),
n = n()
) %>%
mutate(se = sd/sqrt(n),
Species = factor(Species, levels = c("S0F0", "S0F3", "S3F0","S3F3","S6F0","S0F6")))
summ_mf
p2<- ggplot(mf_plot , aes(x = Species, y = Mfd, fill = Species))+
geom_half_violin(aes(fill = Species),
position = position_nudge(x = .15, y = 0),
adjust=1.5, trim=FALSE, colour=NA, side = 'r') +
geom_point(aes(x = as.numeric(Species)-0.1,
y = Mfd,color = Species),
position = position_jitter(width = .05),size = .25, shape = 20) +
geom_boxplot(aes(x = Species,y = Mfd, fill = Species),
outlier.shape = NA,
width = .05,
color = "black")+
geom_point(data=summ_mf,
aes(x=Species,y = mean,group = Species, color = Species),
shape=18,
size = 1.5,
position = position_nudge(x = .1,y = 0)) +
geom_errorbar(data = summ_mf,
aes(x = Species, y = mean, group = Species, colour = Species,
ymin = mean-se, ymax = mean+se),
width=.05,
position=position_nudge(x = .1, y = 0)
) +
scale_color_aaas() +
scale_fill_aaas() +
theme(panel.background = element_rect(fill = "NA"),
axis.line = element_line(size = 0.5, colour = "black")
)+
stat_compare_means( label.y = 2, method="t.test")+
geom_signif(
comparisons = list(c("S0F0","S0F3"),
c("S0F0", "S3F0"),
c("S3F0","S3F3"),
c("S0F0", "S6F0"),
c("S0F0", "S0F6")
),
step_increase = 0.1,
map_signif_level = T,
test = t.test)+
theme(legend.position = "none",
axis.title.x = element_blank())
p2
# 统计摘要Mfa
summ_mf<- mf %>%
group_by(Species) %>%
summarise(
mean = mean(Mfa),
sd = sd(Mfa),
n = n()
) %>%
mutate(se = sd/sqrt(n),
Species = factor(Species, levels = c("S0F0", "S0F3", "S3F0","S3F3","S6F0","S0F6")))
summ_mf
p3<- ggplot(mf_plot , aes(x = Species, y = Mfa, fill = Species))+
geom_half_violin(aes(fill = Species),
position = position_nudge(x = .15, y = 0),
adjust=1.5, trim=FALSE, colour=NA, side = 'r') +
geom_point(aes(x = as.numeric(Species)-0.1,
y = Mfa,color = Species),
position = position_jitter(width = .05),size = .25, shape = 20) +
geom_boxplot(aes(x = Species,y = Mfa, fill = Species),
outlier.shape = NA,
width = .05,
color = "black")+
geom_point(data=summ_mf,
aes(x=Species,y = mean,group = Species, color = Species),
shape=18,
size = 1.5,
position = position_nudge(x = .1,y = 0)) +
geom_errorbar(data = summ_mf,
aes(x = Species, y = mean, group = Species, colour = Species,
ymin = mean-se, ymax = mean+se),
width=.05,
position=position_nudge(x = .1, y = 0)
) +
scale_color_aaas() +
scale_fill_aaas() +
theme(panel.background = element_rect(fill = "NA"),
axis.line = element_line(size = 0.5, colour = "black")
)+
stat_compare_means(label.y = 2.2, method="t.test")+
geom_signif(
comparisons = list(c("S0F0","S0F3"),
c("S0F0", "S3F0"),
c("S3F0","S3F3"),
c("S0F0", "S6F0"),
c("S0F0", "S0F6")
),
step_increase = 0.1,
map_signif_level = T,
test = t.test)+
theme(legend.position = "none",
axis.title.x = element_blank())
p3
library(ggplot2)
library(dplyr)
library(hrbrthemes)
library(viridis)
library(readxl)
library(ggsci)
# The dataset is provided in the gapminder library
library(gapminder)
data <- read_excel("Desktop/饥饿文章/r包用Excel/肌纤维气泡图.xlsx")
data$Mfd
Mfd<-1/data$Mfd
Mfd
mfd<-Mfd
data<-data%>%cbind(mfd)
data
# Most basic bubble plot
p13<-data %>%
arrange(desc('Mfa')) %>%
#mutate(sample = factor(sample, sample)) %>%
ggplot(aes(x=`Mfdi`, y=`Mfd`, size=`Mfa`, color=Groups, position_stack(vjust = 1))) +
geom_point(alpha=0.5) +
scale_size(range = c(0.1, 15), name="Mfa") +
scale_fill_viridis(discrete=TRUE, guide=FALSE, option="A") +
theme(legend.position="right") +
theme_classic() +
geom_smooth(aes(
col=Groups
), method = "loess", se=F
)+
scale_color_aaas()+
theme(legend.position = c(.95, .95),
legend.justification = c("right", "top"),
legend.box.just = "right",
legend.margin = margin(6, 6, 6, 6)
)
p13
library(cowplot)
p14<-plot_grid(p1, p2, p3,
labels=c('A','B','C'),
ncol=1, nrow = 3,align="hv")
p14
plot_grid(p14,p13,labels='AUTO',nrow = 1)
|
/RW/randomForest.R
|
no_license
|
shimaXX/Rsource
|
R
| false
| false
| 2,653
|
r
| ||
require('data.table')
library(xgboost)
library(Matrix)
library(ggplot2)
library(Rtsne)
# https://beta.oreilly.com/learning/an-illustrated-introduction-to-the-t-sne-algorithm
setwd("~/Dropbox/kddcup2015/r")
# load data
## train data
train.feature = fread('../data/train_course_feature.csv')
train.truth = fread('../data/truth_train.csv')
train.truth = train.truth[1:nrow(train.truth),]
#train.feature$fst_day <- NULL
#train.feature$lst_day <- NULL
setnames(train.truth, colnames(train.truth), c('enrollment_id', 'dropout'))
train.dataset = merge(train.feature, train.truth, by='enrollment_id')
train.dataset$enrollment_id <- NULL
train.feature$enrollment_id <- NULL
train.feature = 1/(1+exp(-sqrt(train.feature)))
tsne <- Rtsne(as.matrix(train.feature), check_duplicates = FALSE, pca = TRUE, perplexity=30, theta=0.5, dims=2)
embedding <- as.data.frame(tsne$Y)
embedding$Class <- as.factor(train.dataset$dropout)
p <- ggplot(embedding, aes(x=V1, y=V2, color=Class)) +
geom_point(size=0.5) +
guides(colour = guide_legend(override.aes = list(size=6))) +
xlab("") + ylab("") +
ggtitle("t-SNE 2D Embedding of Dropout Data") +
theme_light(base_size=20) +
theme(strip.background = element_blank(),
strip.text.x = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.line = element_blank(),
panel.border = element_blank())
ggsave("tsne1.png", p, width=8, height=6, units="in")
|
/r/tsne.R
|
no_license
|
hezila/kdd2015
|
R
| false
| false
| 1,565
|
r
|
require('data.table')
library(xgboost)
library(Matrix)
library(ggplot2)
library(Rtsne)
# https://beta.oreilly.com/learning/an-illustrated-introduction-to-the-t-sne-algorithm
setwd("~/Dropbox/kddcup2015/r")
# load data
## train data
train.feature = fread('../data/train_course_feature.csv')
train.truth = fread('../data/truth_train.csv')
train.truth = train.truth[1:nrow(train.truth),]
#train.feature$fst_day <- NULL
#train.feature$lst_day <- NULL
setnames(train.truth, colnames(train.truth), c('enrollment_id', 'dropout'))
train.dataset = merge(train.feature, train.truth, by='enrollment_id')
train.dataset$enrollment_id <- NULL
train.feature$enrollment_id <- NULL
train.feature = 1/(1+exp(-sqrt(train.feature)))
tsne <- Rtsne(as.matrix(train.feature), check_duplicates = FALSE, pca = TRUE, perplexity=30, theta=0.5, dims=2)
embedding <- as.data.frame(tsne$Y)
embedding$Class <- as.factor(train.dataset$dropout)
p <- ggplot(embedding, aes(x=V1, y=V2, color=Class)) +
geom_point(size=0.5) +
guides(colour = guide_legend(override.aes = list(size=6))) +
xlab("") + ylab("") +
ggtitle("t-SNE 2D Embedding of Dropout Data") +
theme_light(base_size=20) +
theme(strip.background = element_blank(),
strip.text.x = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.line = element_blank(),
panel.border = element_blank())
ggsave("tsne1.png", p, width=8, height=6, units="in")
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2742
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2742
c
c Input Parameter (command line, file):
c input filename QBFLIB/Tentrup/mult-matrix/mult_bool_matrix_2_3_7.sat.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 959
c no.of clauses 2742
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2742
c
c QBFLIB/Tentrup/mult-matrix/mult_bool_matrix_2_3_7.sat.qdimacs 959 2742 E1 [] 0 28 931 2742 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Tentrup/mult-matrix/mult_bool_matrix_2_3_7.sat/mult_bool_matrix_2_3_7.sat.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 650
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 2742
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2742
c
c Input Parameter (command line, file):
c input filename QBFLIB/Tentrup/mult-matrix/mult_bool_matrix_2_3_7.sat.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 959
c no.of clauses 2742
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2742
c
c QBFLIB/Tentrup/mult-matrix/mult_bool_matrix_2_3_7.sat.qdimacs 959 2742 E1 [] 0 28 931 2742 NONE
|
############################ housekeeping
rm(list = ls())
graphics.off()
setwd("~/Documents/ResearchProject/Code/")
# load up the relevant packages
library(caper)
library(phytools)
library(phylobase)
library(data.table)
library(geiger)
library(pez)
# import data
load("../Data/Red_Categorymod.Rdata")
load("../Data/one_tree.Rdata")
load("../Data/OrderData_Groups.Rdata")
tree <- read.csv("../Data/PFC_taxonomy.csv", stringsAsFactors = F)
tree$scientificName <- gsub(" ", "_", tree$genus.species)
#### source functions
source("EDGE_PD_LOSS_NONNY_v2_modified.R")
load("../Data/pext_for_HPC.Rdata")
pext$pext[which(is.na(pext$category))] <- sample(pext.NA, length(pext$category[which(is.na(pext$category))]), replace = T)
### run EDGE
print("Running EDGE calcs")
whole.tree <- EDGE.only.calc(one.tree, pext)
print("Saving results")
save(whole.tree, file = "../Data/EDGE_wholetree_runresults_v2.Rdata")
load("../Data/EDGE_wholetree_runresults_v2.Rdata")
print("Now looking to find PD loss")
one.phylo4 <- as(one.tree, "phylo4")
# extract the edge lengths
tot <- c()
for (i in 1:length(one.phylo4@edge.length)) {
l <- one.phylo4@edge.length[[i]]
tot <- c(tot, l)
}
tot <- tot[-1]
PD.untransformed <- sum(as.numeric(((tot)))) # full PD
tot.t <- c()
whole.tree.edge <- whole.tree[[1]]
whole.tree.phylo <- whole.tree[[2]]
for (i in 1:length(whole.tree.phylo@edge.length)) {
l <- whole.tree.phylo@edge.length[[i]]
tot.t <- c(tot.t, l)
}############################ housekeeping
rm(list = ls())
graphics.off()
setwd("~/Documents/ResearchProject/Code/")
# load up the relevant packages
library(caper)
library(phytools)
library(phylobase)
library(data.table)
library(geiger)
library(pez)
# import data
load("../Data/Red_Categorymod.Rdata")
load("../Data/one_tree.Rdata")
load("../Data/OrderData_Groups.Rdata")
tree <- read.csv("../Data/PFC_taxonomy.csv", stringsAsFactors = F)
tree$scientificName <- gsub(" ", "_", tree$genus.species)
#### source functions
source("EDGE_PD_LOSS_NONNY_v2_modified.R")
load("../Data/pext_for_HPC.Rdata")
pext$pext[which(is.na(pext$category))] <- sample(pext.NA, length(pext$category[which(is.na(pext$category))]), replace = T)
#### Step one: split the orders into groups
# then create a tree for just that order group
print("Running EDGE calcs")
whole.tree <- EDGE.only.calc(one.tree, pext)
print("Saving results")
save(whole.tree, file = "../Data/EDGE_wholetree_runresults_v2.Rdata")
load("../Data/EDGE_wholetree_runresults_v2.Rdata")
print("Now looking to find PD loss")
one.phylo4 <- as(one.tree, "phylo4")
# extract the edge lengths
tot <- c()
for (i in 1:length(one.phylo4@edge.length)) {
l <- one.phylo4@edge.length[[i]]
tot <- c(tot, l)
}
tot <- tot[-1]
PD.untransformed <- sum(as.numeric(((tot)))) # full PD
tot.t <- c()
whole.tree.edge <- whole.tree[[1]]
tot.t <- tot.t[-1]
total.PD.loss <- sum(as.numeric(tot.t))
loss.percentage <- total.PD.loss / PD.untransformed * 100
results <- list(tot, PD.untransformed, tot.t, total.PD.loss, loss.percentage)
print("Saving results")
save(results, "../Data/EDGE_wholetree_run.Rdata")
print("Finished running script")
|
/Code/EDGE_wholetree.R
|
no_license
|
OScott19/ResearchProject
|
R
| false
| false
| 3,159
|
r
|
############################ housekeeping
rm(list = ls())
graphics.off()
setwd("~/Documents/ResearchProject/Code/")
# load up the relevant packages
library(caper)
library(phytools)
library(phylobase)
library(data.table)
library(geiger)
library(pez)
# import data
load("../Data/Red_Categorymod.Rdata")
load("../Data/one_tree.Rdata")
load("../Data/OrderData_Groups.Rdata")
tree <- read.csv("../Data/PFC_taxonomy.csv", stringsAsFactors = F)
tree$scientificName <- gsub(" ", "_", tree$genus.species)
#### source functions
source("EDGE_PD_LOSS_NONNY_v2_modified.R")
load("../Data/pext_for_HPC.Rdata")
pext$pext[which(is.na(pext$category))] <- sample(pext.NA, length(pext$category[which(is.na(pext$category))]), replace = T)
### run EDGE
print("Running EDGE calcs")
whole.tree <- EDGE.only.calc(one.tree, pext)
print("Saving results")
save(whole.tree, file = "../Data/EDGE_wholetree_runresults_v2.Rdata")
load("../Data/EDGE_wholetree_runresults_v2.Rdata")
print("Now looking to find PD loss")
one.phylo4 <- as(one.tree, "phylo4")
# extract the edge lengths
tot <- c()
for (i in 1:length(one.phylo4@edge.length)) {
l <- one.phylo4@edge.length[[i]]
tot <- c(tot, l)
}
tot <- tot[-1]
PD.untransformed <- sum(as.numeric(((tot)))) # full PD
tot.t <- c()
whole.tree.edge <- whole.tree[[1]]
whole.tree.phylo <- whole.tree[[2]]
for (i in 1:length(whole.tree.phylo@edge.length)) {
l <- whole.tree.phylo@edge.length[[i]]
tot.t <- c(tot.t, l)
}############################ housekeeping
rm(list = ls())
graphics.off()
setwd("~/Documents/ResearchProject/Code/")
# load up the relevant packages
library(caper)
library(phytools)
library(phylobase)
library(data.table)
library(geiger)
library(pez)
# import data
load("../Data/Red_Categorymod.Rdata")
load("../Data/one_tree.Rdata")
load("../Data/OrderData_Groups.Rdata")
tree <- read.csv("../Data/PFC_taxonomy.csv", stringsAsFactors = F)
tree$scientificName <- gsub(" ", "_", tree$genus.species)
#### source functions
source("EDGE_PD_LOSS_NONNY_v2_modified.R")
load("../Data/pext_for_HPC.Rdata")
pext$pext[which(is.na(pext$category))] <- sample(pext.NA, length(pext$category[which(is.na(pext$category))]), replace = T)
#### Step one: split the orders into groups
# then create a tree for just that order group
print("Running EDGE calcs")
whole.tree <- EDGE.only.calc(one.tree, pext)
print("Saving results")
save(whole.tree, file = "../Data/EDGE_wholetree_runresults_v2.Rdata")
load("../Data/EDGE_wholetree_runresults_v2.Rdata")
print("Now looking to find PD loss")
one.phylo4 <- as(one.tree, "phylo4")
# extract the edge lengths
tot <- c()
for (i in 1:length(one.phylo4@edge.length)) {
l <- one.phylo4@edge.length[[i]]
tot <- c(tot, l)
}
tot <- tot[-1]
PD.untransformed <- sum(as.numeric(((tot)))) # full PD
tot.t <- c()
whole.tree.edge <- whole.tree[[1]]
tot.t <- tot.t[-1]
total.PD.loss <- sum(as.numeric(tot.t))
loss.percentage <- total.PD.loss / PD.untransformed * 100
results <- list(tot, PD.untransformed, tot.t, total.PD.loss, loss.percentage)
print("Saving results")
save(results, "../Data/EDGE_wholetree_run.Rdata")
print("Finished running script")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot.dabest}
\alias{plot.dabest}
\title{Estimation Plot}
\usage{
\method{plot}{dabest}(x, ..., color.column = NULL, palette = "Set1",
float.contrast = TRUE, slopegraph = TRUE,
group.summaries = "mean_sd", rawplot.type = c("swarmplot",
"sinaplot"), rawplot.ylim = NULL, rawplot.ylabel = NULL,
rawplot.markersize = 2, rawplot.groupwidth = 0.3,
effsize.ylim = NULL, effsize.ylabel = NULL, effsize.markersize = 4,
theme = ggplot2::theme_classic(), tick.fontsize = 11,
axes.title.fontsize = 14, swarmplot.params = NULL,
sinaplot.params = NULL, slopegraph.params = NULL)
}
\arguments{
\item{x}{A \code{dabest} object, generated by the function of the same name.}
\item{...}{Signature for S3 generic function.}
\item{color.column}{default \code{NULL}. This is a column in the data.frame
passed to the \code{dabest} function. This column will be treated as a
\link{factor} and used to color the datapoints in the rawdata swarmplot.}
\item{palette}{default "Set1". Accepts any one of the ggplot2 palettes.
See the "Palettes" section in \link{scale_color_brewer}.}
\item{float.contrast}{default \code{TRUE}. If \code{idx} in the
\code{dabest} object contains only 2 groups, \code{float.contrast = TRUE}
will plot the effect size and the bootstrap confidence interval in a
horizontally-aligned axes (also known as a
\href{https://bit.ly/2NhqUAb}{Gardner-Altman plot.})}
\item{slopegraph}{boolean, default \code{TRUE}. If the \code{dabest} object
contains paired comparisons, \code{slopegraph = TRUE} will plot the rawdata
as a \href{http://charliepark.org/slopegraphs/}{Tufte slopegraph}.}
\item{group.summaries}{"mean_sd", "median_quartiles", or \code{NULL}. Plots
the summary statistics for each group. If 'mean_sd', then the mean and
standard deviation of each group is plotted as a gapped line beside each
group. If 'median_quartiles', then the median and 25th & 75th percentiles
of each group is plotted instead. If \code{group.summaries = NULL}, the
summaries are not shown.}
\item{rawplot.type}{default "beeswarm". Accepts either "beeswarm" or
"sinaplot". See \link{geom_quasirandom} and \link{geom_sina} for more
information.}
\item{rawplot.ylim}{default \code{NULL}. Enter a custom y-limit for the
rawdata plot. Accepts a vector of length 2 (e.g. c(-50, 50)) that will be
passed along to \link{coord_cartesian}.}
\item{rawplot.ylabel}{default \code{NULL}. Accepts a string that is used to
label the rawdata y-axis. If \code{NULL}, the column name passed to
\code{y} is used.}
\item{rawplot.markersize}{default 2. This is the size (in points) of the dots
used to plot the individual datapoints. There are 72 points in one inch.
See \href{https://en.wikipedia.org/wiki/Point_(typography)}{this article}
for more info.}
\item{rawplot.groupwidth}{default 0.3. This is the maximum amount of spread
(in the x-direction) allowed, for each group.}
\item{effsize.ylim}{default \code{NULL}. Enter a custom y-limit for the
effect size plot. This parameter is ignored if \code{float.contrast =
TRUE}. Accepts a vector of length 2 (e.g. \code{c(-50, 50)}) that will be
passed along to \link{coord_cartesian}.}
\item{effsize.ylabel}{default \code{NULL}. Accepts a string that is used to
label the effect size y-axis. If \code{NULL}, this axes will be labeled
"(un)paired func difference" , where \code{func} is the function passed
to \code{dabest}.}
\item{effsize.markersize}{default 4. This is the size (in points) of the dots
used to indicate the effect size.}
\item{theme}{default \link{theme_classic}.}
\item{tick.fontsize}{default 11. This controls the font size (in points) of
all tick labels.}
\item{axes.title.fontsize}{default 14. This determines the font size (in
points) of the axes titles.}
\item{swarmplot.params}{default \code{NULL}. Supply list of
\code{keyword = value} pairs to \link{geom_quasirandom}.}
\item{sinaplot.params}{default \code{NULL}. Supply list of
\code{keyword = value} pairs to \code{ggforce::geom_sina()}.}
\item{slopegraph.params}{default \code{NULL}. Supply list of
\code{keyword = value} pairs to \code{ggplot2::geom_line()}. This
controls the appearance of the lines plotted for a paired slopegraph.}
}
\value{
A \code{ggplot} object.
}
\description{
An estimation plot has two key features.
\enumerate{
\item{It presents all datapoints as a
\href{https://github.com/eclarke/ggbeeswarm#introduction}{swarmplot} or
\href{https://CRAN.R-project.org/package=sinaplot}{sinaplot},
which orders each point to display the underlying distribution.}
\item{It presents the effect size as a bootstrap 95 percent confidence
interval on a separate but aligned axes.}
}
Estimation plots emerge from estimation statistics, an intuitive framework
that avoids the pitfalls of significance testing. It uses familiar
statistical concepts: means, mean differences, and error bars.
More importantly, it focuses on the effect size of one's
experiment/intervention, as opposed to a false dichotomy engendered
by \emph{P} values.
This function takes the output of the \code{\link{dabest}} function
and produces an estimation plot.
}
\section{References}{
\href{https://doi.org/10.1101/377978}{Moving beyond P values: Everyday data
analysis with estimation plots.} (2018) Joses Ho, Tayfun Tumkaya, Sameer
Aryal, Hyungwon Choi, Adam Claridge-Chang
}
\examples{
# Performing unpaired (two independent groups) analysis.
unpaired_mean_diff <- dabest(iris, Species, Petal.Width,
idx = c("setosa", "versicolor"),
paired = FALSE)
# Create a Gardner-Altman estimation plot.
plot(unpaired_mean_diff)
# Create a Cumming estimation plot instead.
plot(unpaired_mean_diff, float.contrast = FALSE)
# Comparing versicolor and virginica petal width to setosa petal width.
shared_control_data <- dabest(iris, Species, Petal.Width,
idx = c("setosa", "versicolor", "virginica"),
paired = FALSE
)
# Create a Cumming estimation plot.
plot(shared_control_data)
}
\seealso{
The \code{\link{dabest}} function.
Run \code{vignette("Using dabestr", package = "dabestr")} in the console to
read more about using parameters to control the plot features.
}
|
/man/plot.dabest.Rd
|
permissive
|
anhnguyendepocen/dabestr
|
R
| false
| true
| 6,328
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot.dabest}
\alias{plot.dabest}
\title{Estimation Plot}
\usage{
\method{plot}{dabest}(x, ..., color.column = NULL, palette = "Set1",
float.contrast = TRUE, slopegraph = TRUE,
group.summaries = "mean_sd", rawplot.type = c("swarmplot",
"sinaplot"), rawplot.ylim = NULL, rawplot.ylabel = NULL,
rawplot.markersize = 2, rawplot.groupwidth = 0.3,
effsize.ylim = NULL, effsize.ylabel = NULL, effsize.markersize = 4,
theme = ggplot2::theme_classic(), tick.fontsize = 11,
axes.title.fontsize = 14, swarmplot.params = NULL,
sinaplot.params = NULL, slopegraph.params = NULL)
}
\arguments{
\item{x}{A \code{dabest} object, generated by the function of the same name.}
\item{...}{Signature for S3 generic function.}
\item{color.column}{default \code{NULL}. This is a column in the data.frame
passed to the \code{dabest} function. This column will be treated as a
\link{factor} and used to color the datapoints in the rawdata swarmplot.}
\item{palette}{default "Set1". Accepts any one of the ggplot2 palettes.
See the "Palettes" section in \link{scale_color_brewer}.}
\item{float.contrast}{default \code{TRUE}. If \code{idx} in the
\code{dabest} object contains only 2 groups, \code{float.contrast = TRUE}
will plot the effect size and the bootstrap confidence interval in a
horizontally-aligned axes (also known as a
\href{https://bit.ly/2NhqUAb}{Gardner-Altman plot.})}
\item{slopegraph}{boolean, default \code{TRUE}. If the \code{dabest} object
contains paired comparisons, \code{slopegraph = TRUE} will plot the rawdata
as a \href{http://charliepark.org/slopegraphs/}{Tufte slopegraph}.}
\item{group.summaries}{"mean_sd", "median_quartiles", or \code{NULL}. Plots
the summary statistics for each group. If 'mean_sd', then the mean and
standard deviation of each group is plotted as a gapped line beside each
group. If 'median_quartiles', then the median and 25th & 75th percentiles
of each group is plotted instead. If \code{group.summaries = NULL}, the
summaries are not shown.}
\item{rawplot.type}{default "beeswarm". Accepts either "beeswarm" or
"sinaplot". See \link{geom_quasirandom} and \link{geom_sina} for more
information.}
\item{rawplot.ylim}{default \code{NULL}. Enter a custom y-limit for the
rawdata plot. Accepts a vector of length 2 (e.g. c(-50, 50)) that will be
passed along to \link{coord_cartesian}.}
\item{rawplot.ylabel}{default \code{NULL}. Accepts a string that is used to
label the rawdata y-axis. If \code{NULL}, the column name passed to
\code{y} is used.}
\item{rawplot.markersize}{default 2. This is the size (in points) of the dots
used to plot the individual datapoints. There are 72 points in one inch.
See \href{https://en.wikipedia.org/wiki/Point_(typography)}{this article}
for more info.}
\item{rawplot.groupwidth}{default 0.3. This is the maximum amount of spread
(in the x-direction) allowed, for each group.}
\item{effsize.ylim}{default \code{NULL}. Enter a custom y-limit for the
effect size plot. This parameter is ignored if \code{float.contrast =
TRUE}. Accepts a vector of length 2 (e.g. \code{c(-50, 50)}) that will be
passed along to \link{coord_cartesian}.}
\item{effsize.ylabel}{default \code{NULL}. Accepts a string that is used to
label the effect size y-axis. If \code{NULL}, this axes will be labeled
"(un)paired func difference" , where \code{func} is the function passed
to \code{dabest}.}
\item{effsize.markersize}{default 4. This is the size (in points) of the dots
used to indicate the effect size.}
\item{theme}{default \link{theme_classic}.}
\item{tick.fontsize}{default 11. This controls the font size (in points) of
all tick labels.}
\item{axes.title.fontsize}{default 14. This determines the font size (in
points) of the axes titles.}
\item{swarmplot.params}{default \code{NULL}. Supply list of
\code{keyword = value} pairs to \link{geom_quasirandom}.}
\item{sinaplot.params}{default \code{NULL}. Supply list of
\code{keyword = value} pairs to \code{ggforce::geom_sina()}.}
\item{slopegraph.params}{default \code{NULL}. Supply list of
\code{keyword = value} pairs to \code{ggplot2::geom_line()}. This
controls the appearance of the lines plotted for a paired slopegraph.}
}
\value{
A \code{ggplot} object.
}
\description{
An estimation plot has two key features.
\enumerate{
\item{It presents all datapoints as a
\href{https://github.com/eclarke/ggbeeswarm#introduction}{swarmplot} or
\href{https://CRAN.R-project.org/package=sinaplot}{sinaplot},
which orders each point to display the underlying distribution.}
\item{It presents the effect size as a bootstrap 95 percent confidence
interval on a separate but aligned axes.}
}
Estimation plots emerge from estimation statistics, an intuitive framework
that avoids the pitfalls of significance testing. It uses familiar
statistical concepts: means, mean differences, and error bars.
More importantly, it focuses on the effect size of one's
experiment/intervention, as opposed to a false dichotomy engendered
by \emph{P} values.
This function takes the output of the \code{\link{dabest}} function
and produces an estimation plot.
}
\section{References}{
\href{https://doi.org/10.1101/377978}{Moving beyond P values: Everyday data
analysis with estimation plots.} (2018) Joses Ho, Tayfun Tumkaya, Sameer
Aryal, Hyungwon Choi, Adam Claridge-Chang
}
\examples{
# Performing unpaired (two independent groups) analysis.
unpaired_mean_diff <- dabest(iris, Species, Petal.Width,
idx = c("setosa", "versicolor"),
paired = FALSE)
# Create a Gardner-Altman estimation plot.
plot(unpaired_mean_diff)
# Create a Cumming estimation plot instead.
plot(unpaired_mean_diff, float.contrast = FALSE)
# Comparing versicolor and virginica petal width to setosa petal width.
shared_control_data <- dabest(iris, Species, Petal.Width,
idx = c("setosa", "versicolor", "virginica"),
paired = FALSE
)
# Create a Cumming estimation plot.
plot(shared_control_data)
}
\seealso{
The \code{\link{dabest}} function.
Run \code{vignette("Using dabestr", package = "dabestr")} in the console to
read more about using parameters to control the plot features.
}
|
## run_analysis.R
library(reshape2)
# step1
# Merges the training and the test sets to create one data set.
# argument: [path] - data set path (ex: /work/Getting-and-Cleaning-Data/data)
# return: [step1_val] - train & test data
step1 <- function(path) {
# backup and replace working dir
backup_wd <- getwd()
setwd(path)
xtrain <- read.table("train/X_train.txt")
ytrain <- read.table("train/y_train.txt")
subtrain <- read.table("train/subject_train.txt")
xtest <- read.table("test/X_test.txt")
ytest <- read.table("test/y_test.txt")
subtest <- read.table("test/subject_test.txt")
# restore working dir
setwd(backup_wd)
wholedata <- rbind(cbind(xtrain,ytrain,subtrain),cbind(xtest,ytest,subtest))
return(wholedata)
}
# step2
# Extracts only the measurements on the mean and standard deviation for each measurement.
# argument: [path] - data set path (ex: /work/Getting-and-Cleaning-Data/data)
# [step1_val] - result step1 data
# return: [step2_val] - Extracts data set
step2 <- function(path, step1_val) {
# backup and replace working dir
backup_wd <- getwd()
setwd(path)
features <- read.table("features.txt")
# restore working dir
setwd(backup_wd)
# retrive the index that contains the "mean" or "std" in a variable
retrive_indices <- grep("mean\\(\\)|std\\(\\)", features[, 2])
# leaves the Y and Subject to index
retrivedata <- step1_val[, append(retrive_indices, c(length(step1_val)-1,length(step1_val)))]
# name on the label
namelist <- gsub("\\(\\)", "", features[retrive_indices, 2])
names(retrivedata) <- append(namelist,c("label","subject"))
return(retrivedata)
}
# step3
# Uses descriptive activity names to name the activities in the data set
# argument: [path] - data set path (ex: /work/Getting-and-Cleaning-Data/data)
# [step2_val] - result step2 data
# return: [step3_val] - name the activities in the data set
step3 <- function(path, step2_val) {
# backup and replace working dir
backup_wd <- getwd()
setwd(path)
activity <- read.table("activity_labels.txt")
# restore working dir
setwd(backup_wd)
# create activityLabel and bind data
activityLabel <- activity[step2_val[, length(step2_val)-1], 2]
activityjoindata <- cbind(step2_val, activityLabel)
names(activityjoindata)[length(activityjoindata)] <- "activity"
return(activityjoindata)
}
# step4
# Appropriately labels the data set with descriptive variable names.
# argument: [step3_val] - result step3 data
# return: [step4_val] - name on the label
step4 <- function(step3_val) {
step4_val <- step3_val
t4name <- names(step4_val)
t4name <- gsub("^t","denote time ",t4name)
t4name <- gsub("^f","indicate frequency domain signals ",t4name)
t4name <- gsub("Acc","Acceleration",t4name)
t4name <- gsub("Gyro","Gyroscope",t4name)
t4name <- gsub("Mag"," Magnitude",t4name)
names(step4_val) <- t4name
return(step4_val)
}
# step5
# From the data set in step 4, creates a second,
# independent tidy data set with the average of each variable for each activity and each subject.
# argument: [step4_val] - result step4 data
# return: [step5_val] - result data set
step5 <- function(step4_val) {
v_melt <- melt(step4_val, id = c("subject","activity"))
v_mean <- dcast(v_melt, subject + activity ~ variable, mean)
return(v_mean)
}
# step_1to5
# cleanup data and write txt file
# argument: [path] - data set path (ex: /work/Getting-and-Cleaning-Data/data)
# return: [step5_val] - result step5 data
#
step_1to5 <- function(path) {
t1 <- step1(path)
t2 <- step2(path,t1)
t3 <- step3(path,t2)
t4 <- step4(t3)
t5 <- step5(t4)
backup_wd <- getwd()
setwd(path)
write.table(t5, "outputdata_step5.txt", row.name=FALSE)
# restore working dir
setwd(backup_wd)
return(t5)
}
|
/run_analysis.R
|
no_license
|
daxanya1/Getting-and-Cleaning-Data
|
R
| false
| false
| 3,985
|
r
|
## run_analysis.R
library(reshape2)
# step1
# Merges the training and the test sets to create one data set.
# argument: [path] - data set path (ex: /work/Getting-and-Cleaning-Data/data)
# return: [step1_val] - train & test data
step1 <- function(path) {
# backup and replace working dir
backup_wd <- getwd()
setwd(path)
xtrain <- read.table("train/X_train.txt")
ytrain <- read.table("train/y_train.txt")
subtrain <- read.table("train/subject_train.txt")
xtest <- read.table("test/X_test.txt")
ytest <- read.table("test/y_test.txt")
subtest <- read.table("test/subject_test.txt")
# restore working dir
setwd(backup_wd)
wholedata <- rbind(cbind(xtrain,ytrain,subtrain),cbind(xtest,ytest,subtest))
return(wholedata)
}
# step2
# Extracts only the measurements on the mean and standard deviation for each measurement.
# argument: [path] - data set path (ex: /work/Getting-and-Cleaning-Data/data)
# [step1_val] - result step1 data
# return: [step2_val] - Extracts data set
step2 <- function(path, step1_val) {
# backup and replace working dir
backup_wd <- getwd()
setwd(path)
features <- read.table("features.txt")
# restore working dir
setwd(backup_wd)
# retrive the index that contains the "mean" or "std" in a variable
retrive_indices <- grep("mean\\(\\)|std\\(\\)", features[, 2])
# leaves the Y and Subject to index
retrivedata <- step1_val[, append(retrive_indices, c(length(step1_val)-1,length(step1_val)))]
# name on the label
namelist <- gsub("\\(\\)", "", features[retrive_indices, 2])
names(retrivedata) <- append(namelist,c("label","subject"))
return(retrivedata)
}
# step3
# Uses descriptive activity names to name the activities in the data set
# argument: [path] - data set path (ex: /work/Getting-and-Cleaning-Data/data)
# [step2_val] - result step2 data
# return: [step3_val] - name the activities in the data set
step3 <- function(path, step2_val) {
# backup and replace working dir
backup_wd <- getwd()
setwd(path)
activity <- read.table("activity_labels.txt")
# restore working dir
setwd(backup_wd)
# create activityLabel and bind data
activityLabel <- activity[step2_val[, length(step2_val)-1], 2]
activityjoindata <- cbind(step2_val, activityLabel)
names(activityjoindata)[length(activityjoindata)] <- "activity"
return(activityjoindata)
}
# step4
# Appropriately labels the data set with descriptive variable names.
# argument: [step3_val] - result step3 data
# return: [step4_val] - name on the label
step4 <- function(step3_val) {
step4_val <- step3_val
t4name <- names(step4_val)
t4name <- gsub("^t","denote time ",t4name)
t4name <- gsub("^f","indicate frequency domain signals ",t4name)
t4name <- gsub("Acc","Acceleration",t4name)
t4name <- gsub("Gyro","Gyroscope",t4name)
t4name <- gsub("Mag"," Magnitude",t4name)
names(step4_val) <- t4name
return(step4_val)
}
# step5
# From the data set in step 4, creates a second,
# independent tidy data set with the average of each variable for each activity and each subject.
# argument: [step4_val] - result step4 data
# return: [step5_val] - result data set
step5 <- function(step4_val) {
v_melt <- melt(step4_val, id = c("subject","activity"))
v_mean <- dcast(v_melt, subject + activity ~ variable, mean)
return(v_mean)
}
# step_1to5
# cleanup data and write txt file
# argument: [path] - data set path (ex: /work/Getting-and-Cleaning-Data/data)
# return: [step5_val] - result step5 data
#
step_1to5 <- function(path) {
t1 <- step1(path)
t2 <- step2(path,t1)
t3 <- step3(path,t2)
t4 <- step4(t3)
t5 <- step5(t4)
backup_wd <- getwd()
setwd(path)
write.table(t5, "outputdata_step5.txt", row.name=FALSE)
# restore working dir
setwd(backup_wd)
return(t5)
}
|
testlist <- list(A = structure(c(6.6399836774179e+81, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613109594-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 256
|
r
|
testlist <- list(A = structure(c(6.6399836774179e+81, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_faculty_titles.R
\name{add_faculty_titles}
\alias{add_faculty_titles}
\title{Add Faculty Titles}
\format{\describe{
\item{title}{Faculty's title}
\item{rank}{Faculty's rank; one of "Librarian", "Instructor", "Fellow", "Lecturer",
"Visiting Professor", "Assistant Professor", "Associate Professor", "Professor"}
\item{status}{Faculty's status; as one of "Athletic", "Visiting", "Part-time",
"Tenure-track", or "Tenured"}
}}
\usage{
add_faculty_titles(x)
}
\arguments{
\item{x}{data frame with raw.text column}
}
\value{
the input data frame along with a new columns.
}
\description{
This function takes as input a data frame which includes the raw
text associated with each faculty member. It returns that data frame along with
2 new columns: for the facutly's title and faculty's rank
}
|
/man/add_faculty_titles.Rd
|
no_license
|
davidkane9/williams
|
R
| false
| true
| 888
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_faculty_titles.R
\name{add_faculty_titles}
\alias{add_faculty_titles}
\title{Add Faculty Titles}
\format{\describe{
\item{title}{Faculty's title}
\item{rank}{Faculty's rank; one of "Librarian", "Instructor", "Fellow", "Lecturer",
"Visiting Professor", "Assistant Professor", "Associate Professor", "Professor"}
\item{status}{Faculty's status; as one of "Athletic", "Visiting", "Part-time",
"Tenure-track", or "Tenured"}
}}
\usage{
add_faculty_titles(x)
}
\arguments{
\item{x}{data frame with raw.text column}
}
\value{
the input data frame along with a new columns.
}
\description{
This function takes as input a data frame which includes the raw
text associated with each faculty member. It returns that data frame along with
2 new columns: for the facutly's title and faculty's rank
}
|
# Code from Andy, 29 Dec 2016
# Mike reorganised the plotting commands, adding devAskNewPage and show.plot.
# Mike moved beta1 and npix to the arguments.
sim.spatialHDS <-
function(lam0 = 4 , sigma= 1.5, B=3, nsites=100, beta1 = 1, npix = 20, show.plot=3){
# Function simulates coordinates of individuals on a square
# Square is [0,2B] x[0,2B], with a count location on the point (B,B)
# lam0: expected population size in the square
# sigma: scale of half-normal detection function
# B: circle radius
if(show.plot > 0) {
op <- par(mar=c(3,3,3,6)) ; on.exit(par(op))
oldAsk <- devAskNewPage(ask = TRUE) ; on.exit(devAskNewPage(oldAsk), add=TRUE)
}
# npix<- 20
data<- NULL
beta0<- log(lam0/(npix*npix))
# beta1<- 1
Z<- matrix(NA,nrow=npix*npix, ncol=nsites)
delta<- (2*B-0)/npix
grx<- seq(delta/2, 2*B - delta/2, delta)
gr<- expand.grid(grx,grx)
V<- exp(-e2dist(gr,gr)/1)
N<- rep(NA,nsites)
for(s in 1:nsites){
z<- t(chol(V))%*%rnorm( npix^2 )
Z[,s]<- z
# Note Poisson assumption which means in each pixel is also Poisson
N[s]<- rpois(1, sum(exp( beta0 + beta1*Z[,s])))
# cat(N[s],fill=TRUE)
probs<- exp(beta1*Z[,s])/sum(exp(beta1*Z[,s]))
pixel.id<- sample(1:(npix^2), N[s], replace=TRUE, prob=probs)
# could simulate ranomdly within the pixel but it won't matter
u1<- gr[pixel.id,1]
u2<- gr[pixel.id,2]
d <- sqrt((u1 - B)^2 + (u2-B)^2) # distance to center point of square
p<- exp(-d*d/(2*sigma*sigma))
# Now we decide whether each individual is detected or not
y <- rbinom(N[s], 1, p)
if(s <= show.plot) {
img<- rasterFromXYZ(cbind(gr,z))
image(img, col=topo.colors(10))
#draw.circle(3,3,B)
image_scale(z,col=topo.colors(10))
points(u1,u2,pch=16,col='black')
# points(u1[d<= B], u2[d<= B], pch = 16, col = "black")
points(u1[y==1], u2[y==1], pch = 16, col = "red")
points(B, B, ,pch = "+", cex = 3)
# draw.circle(3, 3, B)
}
if(sum(y)>0) {
data<- rbind(data, cbind(rep(s,length(u1)),u1=u1,u2=u2,d=d,y=y))
} else {
data<- rbind(data, c(s, NA, NA, NA, NA))
}
}
dimnames(data)<-list(NULL,c("site","u1","u2","d","y"))
return(list(data=data, B=B, Habitat=Z, grid=gr,N=N,nsites=nsites))
}
|
/R/sim-spatialHDS.R
|
no_license
|
guillaumesouchay/AHMbook
|
R
| false
| false
| 2,283
|
r
|
# Code from Andy, 29 Dec 2016
# Mike reorganised the plotting commands, adding devAskNewPage and show.plot.
# Mike moved beta1 and npix to the arguments.
sim.spatialHDS <-
function(lam0 = 4 , sigma= 1.5, B=3, nsites=100, beta1 = 1, npix = 20, show.plot=3){
# Function simulates coordinates of individuals on a square
# Square is [0,2B] x[0,2B], with a count location on the point (B,B)
# lam0: expected population size in the square
# sigma: scale of half-normal detection function
# B: circle radius
if(show.plot > 0) {
op <- par(mar=c(3,3,3,6)) ; on.exit(par(op))
oldAsk <- devAskNewPage(ask = TRUE) ; on.exit(devAskNewPage(oldAsk), add=TRUE)
}
# npix<- 20
data<- NULL
beta0<- log(lam0/(npix*npix))
# beta1<- 1
Z<- matrix(NA,nrow=npix*npix, ncol=nsites)
delta<- (2*B-0)/npix
grx<- seq(delta/2, 2*B - delta/2, delta)
gr<- expand.grid(grx,grx)
V<- exp(-e2dist(gr,gr)/1)
N<- rep(NA,nsites)
for(s in 1:nsites){
z<- t(chol(V))%*%rnorm( npix^2 )
Z[,s]<- z
# Note Poisson assumption which means in each pixel is also Poisson
N[s]<- rpois(1, sum(exp( beta0 + beta1*Z[,s])))
# cat(N[s],fill=TRUE)
probs<- exp(beta1*Z[,s])/sum(exp(beta1*Z[,s]))
pixel.id<- sample(1:(npix^2), N[s], replace=TRUE, prob=probs)
# could simulate ranomdly within the pixel but it won't matter
u1<- gr[pixel.id,1]
u2<- gr[pixel.id,2]
d <- sqrt((u1 - B)^2 + (u2-B)^2) # distance to center point of square
p<- exp(-d*d/(2*sigma*sigma))
# Now we decide whether each individual is detected or not
y <- rbinom(N[s], 1, p)
if(s <= show.plot) {
img<- rasterFromXYZ(cbind(gr,z))
image(img, col=topo.colors(10))
#draw.circle(3,3,B)
image_scale(z,col=topo.colors(10))
points(u1,u2,pch=16,col='black')
# points(u1[d<= B], u2[d<= B], pch = 16, col = "black")
points(u1[y==1], u2[y==1], pch = 16, col = "red")
points(B, B, ,pch = "+", cex = 3)
# draw.circle(3, 3, B)
}
if(sum(y)>0) {
data<- rbind(data, cbind(rep(s,length(u1)),u1=u1,u2=u2,d=d,y=y))
} else {
data<- rbind(data, c(s, NA, NA, NA, NA))
}
}
dimnames(data)<-list(NULL,c("site","u1","u2","d","y"))
return(list(data=data, B=B, Habitat=Z, grid=gr,N=N,nsites=nsites))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sparta.r
\docType{package}
\name{sparta}
\alias{sparta}
\alias{sparta-package}
\title{\pkg{sparta} Trend Analysis for Unstructured Data}
\description{
The Sparta package includes methods used to analyse trends in
unstructured occurrence datasets. Methods included in the package
include Frescalo, Telfer's change index, Reporting rate models
and Bayesian Occupancy models. These methods are reviewed in
Issac et al (2014), available at \url{http://onlinelibrary.wiley.com/doi/10.1111/2041-210X.12254/abstract}
}
|
/man/sparta.Rd
|
no_license
|
flovv/sparta
|
R
| false
| true
| 592
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sparta.r
\docType{package}
\name{sparta}
\alias{sparta}
\alias{sparta-package}
\title{\pkg{sparta} Trend Analysis for Unstructured Data}
\description{
The Sparta package includes methods used to analyse trends in
unstructured occurrence datasets. Methods included in the package
include Frescalo, Telfer's change index, Reporting rate models
and Bayesian Occupancy models. These methods are reviewed in
Issac et al (2014), available at \url{http://onlinelibrary.wiley.com/doi/10.1111/2041-210X.12254/abstract}
}
|
#### WINDBELT-mapping low impact sites####
library(sf)
library(tidyverse)
library(sp)
#install.packages("gstat","maps")
library(gstat)
library(maps)
library(leaflet)
library(dplyr)
Windbelt_Df <- read_csv("G:/Data/wind project dataset/ventyx_withLR.csv")
Windbelt_Df$lowimpact <- Windbelt_Df$lr_tif_1
Windbelt_Df <- Windbelt_Df %>%
select(-lr_tif) %>%
mutate(lowimpact = recode(lowimpact, "0" = 'highimpactarea',
'1' = 'lowimpactarea',
'2' = "lowimpactarea-developed",
'NA' = 'NA'))
unique(Windbelt_Df$lowimpact)
Windbelt_df_3years <- Windbelt_Df %>%
filter()
pal <- colorNumeric(c("red", "green", "blue"), 1:10)
pal
pal <- colorFactor(palette = )
pal <- colorFactor(c("red", "green", "blue"), 1:10)
pal
Windbelt_Df$lowimpact <- as.factor(Windbelt_Df$lowimpact)
levels(Windbelt_Df$lowimpact)
factpal <- colorFactor(topo.colors(3), Windbelt_Df$lowimpact)
m <- leaflet(Windbelt_Df) %>%
addTiles() %>%
addCircles(lat =~Latitude,
lng = ~Longitude,
color = ~factpal(lowimpact),
popup = ~ProjectNam, weight = 3, radius = 30) %>%
addLegend("bottomright", pal=factpal, values = ~lowimpact,
title = "windfarms in low vs. high impact areas")
addLegend("bottomright", pal = pal, values = ~gdp_md_est,
title = "Est. GDP (2010)",
labFormat = labelFormat(prefix = "$"),
opacity = 1)
|
/TextAnalysis/NexisUni/R/WindDev_run.R
|
no_license
|
dlroney/WindBelt
|
R
| false
| false
| 1,468
|
r
|
#### WINDBELT-mapping low impact sites####
library(sf)
library(tidyverse)
library(sp)
#install.packages("gstat","maps")
library(gstat)
library(maps)
library(leaflet)
library(dplyr)
Windbelt_Df <- read_csv("G:/Data/wind project dataset/ventyx_withLR.csv")
Windbelt_Df$lowimpact <- Windbelt_Df$lr_tif_1
Windbelt_Df <- Windbelt_Df %>%
select(-lr_tif) %>%
mutate(lowimpact = recode(lowimpact, "0" = 'highimpactarea',
'1' = 'lowimpactarea',
'2' = "lowimpactarea-developed",
'NA' = 'NA'))
unique(Windbelt_Df$lowimpact)
Windbelt_df_3years <- Windbelt_Df %>%
filter()
pal <- colorNumeric(c("red", "green", "blue"), 1:10)
pal
pal <- colorFactor(palette = )
pal <- colorFactor(c("red", "green", "blue"), 1:10)
pal
Windbelt_Df$lowimpact <- as.factor(Windbelt_Df$lowimpact)
levels(Windbelt_Df$lowimpact)
factpal <- colorFactor(topo.colors(3), Windbelt_Df$lowimpact)
m <- leaflet(Windbelt_Df) %>%
addTiles() %>%
addCircles(lat =~Latitude,
lng = ~Longitude,
color = ~factpal(lowimpact),
popup = ~ProjectNam, weight = 3, radius = 30) %>%
addLegend("bottomright", pal=factpal, values = ~lowimpact,
title = "windfarms in low vs. high impact areas")
addLegend("bottomright", pal = pal, values = ~gdp_md_est,
title = "Est. GDP (2010)",
labFormat = labelFormat(prefix = "$"),
opacity = 1)
|
rm(list=ls())
library(rvest)
# getwd()
# setwd("C:/Rstudy/Day_8_191030")
url = "http://www.saramin.co.kr/zf_user/search?search_area=main&search_done=y&search_optional_item=n&searchType=default_mysearch&searchword=DATA%20SCIENTIST"
html = read_html(url)
tech_name_nodes = html_nodes(html, '.swiper-slide')
tech_name = html_text(tech_name_nodes, trim = T)
tech_name = gsub("[[:punct:][:cntrl:][:space:][:digit:]삭제]", "", tech_name)
tech_name = tech_name[-(length(tech_name))]
info_count = html_text(tech_name_nodes, trim = T)
info_count = gsub("[[:space:][:punct:][:cntrl:][:alpha:]]", "", info_count)
info_count = info_count[-(length(info_count))]
result = data.frame(tech_name, info_count)
write.csv(result, "saramin.csv")
|
/R_training/실습제출/이종현/191030/saramin.R
|
no_license
|
BaeYS-marketing/R
|
R
| false
| false
| 731
|
r
|
rm(list=ls())
library(rvest)
# getwd()
# setwd("C:/Rstudy/Day_8_191030")
url = "http://www.saramin.co.kr/zf_user/search?search_area=main&search_done=y&search_optional_item=n&searchType=default_mysearch&searchword=DATA%20SCIENTIST"
html = read_html(url)
tech_name_nodes = html_nodes(html, '.swiper-slide')
tech_name = html_text(tech_name_nodes, trim = T)
tech_name = gsub("[[:punct:][:cntrl:][:space:][:digit:]삭제]", "", tech_name)
tech_name = tech_name[-(length(tech_name))]
info_count = html_text(tech_name_nodes, trim = T)
info_count = gsub("[[:space:][:punct:][:cntrl:][:alpha:]]", "", info_count)
info_count = info_count[-(length(info_count))]
result = data.frame(tech_name, info_count)
write.csv(result, "saramin.csv")
|
testlist <- list(A = structure(c(2.17107980817984e+205, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), .Dim = c(5L, 9L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613122874-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 354
|
r
|
testlist <- list(A = structure(c(2.17107980817984e+205, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), .Dim = c(5L, 9L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
# Change this to an appropriate directory for your environment
setwd("~/Coursera/Exploratory Data Analysis/Project 2")
library(dplyr)
## Download data
if (!file.exists("data")) {
dir.create("data")
}
if (!file.exists("./data/exdata-data-NEI_data.zip")) {
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(fileUrl, destfile = "./data/exdata-data-NEI_data.zip", mode = "wb")
}
## Unzip data
if (!file.exists("./data/summarySCC_PM25.rds")) unzip("./data/exdata-data-NEI_data.zip", exdir = "./data")
## Load data
NEI <- readRDS("./data/summarySCC_PM25.rds")
SCC <- readRDS("./data/Source_Classification_Code.rds")
png(filename = "plot1.png")
totals <- group_by(NEI, year) %>%
summarise(total_emissions = sum(Emissions, na.rm = TRUE))
plot(totals, main = "Total Emissions by Year",
ylab = "Total Emissions in Tons")
abline(lm(total_emissions ~ year, totals))
dev.off()
|
/plot1.R
|
no_license
|
mcb2/ExpDataProject2
|
R
| false
| false
| 938
|
r
|
# Change this to an appropriate directory for your environment
setwd("~/Coursera/Exploratory Data Analysis/Project 2")
library(dplyr)
## Download data
if (!file.exists("data")) {
dir.create("data")
}
if (!file.exists("./data/exdata-data-NEI_data.zip")) {
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(fileUrl, destfile = "./data/exdata-data-NEI_data.zip", mode = "wb")
}
## Unzip data
if (!file.exists("./data/summarySCC_PM25.rds")) unzip("./data/exdata-data-NEI_data.zip", exdir = "./data")
## Load data
NEI <- readRDS("./data/summarySCC_PM25.rds")
SCC <- readRDS("./data/Source_Classification_Code.rds")
png(filename = "plot1.png")
totals <- group_by(NEI, year) %>%
summarise(total_emissions = sum(Emissions, na.rm = TRUE))
plot(totals, main = "Total Emissions by Year",
ylab = "Total Emissions in Tons")
abline(lm(total_emissions ~ year, totals))
dev.off()
|
testlist <- list(x = c(1.78505693566084e-308, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
result <- do.call(myTAI:::cpp_harmonic_mean,testlist)
str(result)
|
/myTAI/inst/testfiles/cpp_harmonic_mean/AFL_cpp_harmonic_mean/cpp_harmonic_mean_valgrind_files/1615844522-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 268
|
r
|
testlist <- list(x = c(1.78505693566084e-308, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ))
result <- do.call(myTAI:::cpp_harmonic_mean,testlist)
str(result)
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/TCGA_pipe.R
\name{TCGA.pipe}
\alias{TCGA.pipe}
\title{ELMER analysis pipe for TCGA data.}
\usage{
TCGA.pipe(disease, analysis = "all", wd = "./", cores = NULL,
Data = NULL, ...)
}
\arguments{
\item{disease}{TCGA short form disease name such as COAD}
\item{analysis}{a vector of characters listing the analysis need to be done.
Analysis are "download","distal.enhancer","diffMeth","pair","motif","TF.search".
Default is "all" meaning all the analysis will be processed.}
\item{wd}{a path showing working dirctory. Default is "./"}
\item{cores}{A interger which defines number of core to be used in parallel process.
Default is NULL: don't use parallel process.}
\item{Data}{A path showing the folder containing DNA methylation, expression and clinic data}
\item{...}{A list of parameters for functions: GetNearGenes, get.feature.probe,
get.diff.meth, get.pair,}
}
\value{
Different analysis results.
}
\description{
ELMER analysis pipe for TCGA data.
}
\examples{
\dontrun{
distal.probe <- TCGA.pipe(disease = "LUSC", analysis="distal.enhancer", wd="~/")
}
}
|
/man/TCGA.pipe.Rd
|
no_license
|
scoetzee/ELMER
|
R
| false
| false
| 1,152
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/TCGA_pipe.R
\name{TCGA.pipe}
\alias{TCGA.pipe}
\title{ELMER analysis pipe for TCGA data.}
\usage{
TCGA.pipe(disease, analysis = "all", wd = "./", cores = NULL,
Data = NULL, ...)
}
\arguments{
\item{disease}{TCGA short form disease name such as COAD}
\item{analysis}{a vector of characters listing the analysis need to be done.
Analysis are "download","distal.enhancer","diffMeth","pair","motif","TF.search".
Default is "all" meaning all the analysis will be processed.}
\item{wd}{a path showing working dirctory. Default is "./"}
\item{cores}{A interger which defines number of core to be used in parallel process.
Default is NULL: don't use parallel process.}
\item{Data}{A path showing the folder containing DNA methylation, expression and clinic data}
\item{...}{A list of parameters for functions: GetNearGenes, get.feature.probe,
get.diff.meth, get.pair,}
}
\value{
Different analysis results.
}
\description{
ELMER analysis pipe for TCGA data.
}
\examples{
\dontrun{
distal.probe <- TCGA.pipe(disease = "LUSC", analysis="distal.enhancer", wd="~/")
}
}
|
## Figure 1: barplot #### top genes ######
out.m<-read.table("GENIE_DiffGENE_EarlyvsTypical_CRC_02192021-MSS-ALL.txt",head=TRUE,sep="\t")
out.m<-as.data.frame(out.m)
out.m <- out.m[out.m$P < 0.05,]
out.mbar<-out.m[,8:10]
rownames(out.mbar)<-out.m$Gene
#out.mbar<-out.mbar[,-(1:2)]
#out.mbar$V2 <- as.numeric(as.character(out.mbar$V2))
#out.mbar$V3 <- as.numeric(as.character(out.mbar$V3))
#out.mbar$V4 <- as.numeric(as.character(out.mbar$V4))
out.mbar<-out.mbar[rev(order(out.mbar$Freq)),]
#out.mbar<- out.mbar[rownames(out.mbar)%in% CD$Gene,]
library(reshape)
library(RColorBrewer)
coul <- brewer.pal(2, "Set2")[1:2]
#out.mbar<-melt(out.m,measure.vars = c("V2","V3","V4"))
pdf("Figure1.barplot_early_old_topMF.pdf")
barplot(t(out.mbar[,1]),beside=TRUE,ylim=c(0,0.8),border=NA)
barplot(t(out.mbar[,2:3]),beside=TRUE,col=coul, ylim=c(0,0.8),border=NA)
dev.off()
pdf("Figure1.Sig.barplot_early_old_topMF.pdf")
#barplot(t(out.mbar[,1]),beside=TRUE,ylim=c(0,0.8),border=NA)
barplot(t(out.mbar[,2:3]),beside=TRUE,col=coul, ylim=c(0,0.8),border=NA)
dev.off()
#### mutation freq plot by race #######
sigList1<-c("PIK3CA","APC","FAT1")
sigList2<-c("TGFBR2","CREBBP")
sigList3<-c("LRP1B","TP53","TCF7L2","KDR","DOCK8","FLT4","SMAD2","SMAD3")
sigList<-sigList2
Htest1<-c(sigList1,sigList2,sigList3)
out.m1<-read.table("GENIE_DiffGENE_EarlyvsTypical_CRC_02192021-MSS-Asian1.txt",head=TRUE,sep="\t")
out.m2<-read.table("GENIE_DiffGENE_EarlyvsTypical_CRC_02192021-MSS-Black1.txt",head=TRUE,sep="\t")
out.m3<-read.table("GENIE_DiffGENE_EarlyvsTypical_CRC_02192021-MSS-White1.txt",head=TRUE,sep="\t")
out.m1 <- out.m1[out.m1$Gene %in% sigList,]
out.m2 <- out.m2[out.m2$Gene %in% sigList,]
out.m3 <- out.m3[out.m3$Gene %in% sigList,]
rownames(out.m1)<-out.m1$Gene
rownames(out.m2)<-out.m2$Gene
rownames(out.m3)<-out.m3$Gene
out.m1<-out.m1[sigList,]
out.m2<-out.m2[sigList,]
out.m3<-out.m3[sigList,]
out.m<-rbind(out.m1,out.m2,out.m3)
out.m<-as.data.frame(out.m)
label.1<-rep(1:2,3)
label.2<-c(rep("a",2),rep("b",2), rep("c",2))
label.1<-paste(label.1,label.2,sep="")
out.m<-cbind(out.m,label.1)
out.m<-out.m[order(out.m$label.1),]
out.mbar<-out.m[,8:10]
#rownames(out.mbar)<-out.m$Gene
#out.mbar<-out.mbar[,-(1:2)]
#out.mbar$V2 <- as.numeric(as.character(out.mbar$V2))
#out.mbar$V3 <- as.numeric(as.character(out.mbar$V3))
#out.mbar$V4 <- as.numeric(as.character(out.mbar$V4))
#out.mbar<-out.mbar[rev(order(out.mbar$Freq)),]
#out.mbar<- out.mbar[rownames(out.mbar)%in% CD$Gene,]
#library(reshape)
#library(RColorBrewer)
#coul <- brewer.pal(2, "Set2")[1:2]
#out.mbar<-melt(out.m,measure.vars = c("V2","V3","V4"))
den1<-rep(c(10,10,20,20,30,30),2)
ang1<-rep(c(45,45,0,0,135,135),2)
pdf("Figure1.SigBlack.barplot_early_old_topMF.pdf")
#barplot(t(out.mbar[,1]),beside=TRUE,ylim=c(0,0.8),border=NA)
barplot(t(out.mbar[,2:3]),beside=TRUE,col=coul,density= den1, angle=ang1, ylim=c(0,0.8))
dev.off()
### foreast plot #####
# An example for foreast plot searched from the google.
### "/Users/xingyi/Dropbox/project/GENIE/CRC/newanalysis03082021/R" ###
### figure: forest plot ####
library(forestplot)
library(metafor)
genes_df <- read.table("GENIE_DiffGENE_EarlyvsTypical_CRC_02192021-MSS-ALL.txt", header=T, sep="\t")
genes_df <- genes_df[genes_df$P > 0 & genes_df$P < 0.05,c(1:7)]
labs <- genes_df$Gene
yi <- genes_df$Beta
sei <- genes_df$SE
res <- rma(yi=yi, sei=sei, method="FE")
data <- structure(list(OR = c(NA,genes_df$OR),
low = c(NA,genes_df$X95CI1),
high = c(NA,genes_df$X95CI2)),
.Names = c("OR", "low", "high"),
# row.names = c(NA,-11L),
class = "data.frame")
labels <- cbind(c("Gene_ID","Gene_1","Gene_2","Gene_3","Gene_4","Gene_5","Gene_6"),
c("HR","0.83","0.61","0.85","0.77","0.75","0.81"),
c("low","0.78","0.51","0.8","0.7","0.68","0.76"),
c("high","0.89","0.74","0.9","0.84","0.83","0.87"))
print("....Creating the plot....")
#jpeg(filename="Hazard_ratio_plot.jpg",units="cm",width=20,height=17, res=800)
pdf("MSS-all.pdf")
forestplot(labels,
data,new_page = TRUE,
boxsize = .25,
zero = 0.707,
ci.vertices = TRUE,
ci.vertices.height = 0.25,
xlog=TRUE,
cex = 0.1,
graph.pos = 2,
lwd.zero = gpar(lty=1, alpha = 1),
lineheight = "auto",
title = " ",
txt_gp = fpTxtGp(label=gpar(fontfamily="Calibri")),
col = fpColors(box="blue",line="black",zero = "black"),
xlab="Odd ratio")
dev.off()
|
/Plot/Barplot+Forest.R
|
no_license
|
XingyiGuo/CRC_early-onset
|
R
| false
| false
| 4,621
|
r
|
## Figure 1: barplot #### top genes ######
out.m<-read.table("GENIE_DiffGENE_EarlyvsTypical_CRC_02192021-MSS-ALL.txt",head=TRUE,sep="\t")
out.m<-as.data.frame(out.m)
out.m <- out.m[out.m$P < 0.05,]
out.mbar<-out.m[,8:10]
rownames(out.mbar)<-out.m$Gene
#out.mbar<-out.mbar[,-(1:2)]
#out.mbar$V2 <- as.numeric(as.character(out.mbar$V2))
#out.mbar$V3 <- as.numeric(as.character(out.mbar$V3))
#out.mbar$V4 <- as.numeric(as.character(out.mbar$V4))
out.mbar<-out.mbar[rev(order(out.mbar$Freq)),]
#out.mbar<- out.mbar[rownames(out.mbar)%in% CD$Gene,]
library(reshape)
library(RColorBrewer)
coul <- brewer.pal(2, "Set2")[1:2]
#out.mbar<-melt(out.m,measure.vars = c("V2","V3","V4"))
pdf("Figure1.barplot_early_old_topMF.pdf")
barplot(t(out.mbar[,1]),beside=TRUE,ylim=c(0,0.8),border=NA)
barplot(t(out.mbar[,2:3]),beside=TRUE,col=coul, ylim=c(0,0.8),border=NA)
dev.off()
pdf("Figure1.Sig.barplot_early_old_topMF.pdf")
#barplot(t(out.mbar[,1]),beside=TRUE,ylim=c(0,0.8),border=NA)
barplot(t(out.mbar[,2:3]),beside=TRUE,col=coul, ylim=c(0,0.8),border=NA)
dev.off()
#### mutation freq plot by race #######
sigList1<-c("PIK3CA","APC","FAT1")
sigList2<-c("TGFBR2","CREBBP")
sigList3<-c("LRP1B","TP53","TCF7L2","KDR","DOCK8","FLT4","SMAD2","SMAD3")
sigList<-sigList2
Htest1<-c(sigList1,sigList2,sigList3)
out.m1<-read.table("GENIE_DiffGENE_EarlyvsTypical_CRC_02192021-MSS-Asian1.txt",head=TRUE,sep="\t")
out.m2<-read.table("GENIE_DiffGENE_EarlyvsTypical_CRC_02192021-MSS-Black1.txt",head=TRUE,sep="\t")
out.m3<-read.table("GENIE_DiffGENE_EarlyvsTypical_CRC_02192021-MSS-White1.txt",head=TRUE,sep="\t")
out.m1 <- out.m1[out.m1$Gene %in% sigList,]
out.m2 <- out.m2[out.m2$Gene %in% sigList,]
out.m3 <- out.m3[out.m3$Gene %in% sigList,]
rownames(out.m1)<-out.m1$Gene
rownames(out.m2)<-out.m2$Gene
rownames(out.m3)<-out.m3$Gene
out.m1<-out.m1[sigList,]
out.m2<-out.m2[sigList,]
out.m3<-out.m3[sigList,]
out.m<-rbind(out.m1,out.m2,out.m3)
out.m<-as.data.frame(out.m)
label.1<-rep(1:2,3)
label.2<-c(rep("a",2),rep("b",2), rep("c",2))
label.1<-paste(label.1,label.2,sep="")
out.m<-cbind(out.m,label.1)
out.m<-out.m[order(out.m$label.1),]
out.mbar<-out.m[,8:10]
#rownames(out.mbar)<-out.m$Gene
#out.mbar<-out.mbar[,-(1:2)]
#out.mbar$V2 <- as.numeric(as.character(out.mbar$V2))
#out.mbar$V3 <- as.numeric(as.character(out.mbar$V3))
#out.mbar$V4 <- as.numeric(as.character(out.mbar$V4))
#out.mbar<-out.mbar[rev(order(out.mbar$Freq)),]
#out.mbar<- out.mbar[rownames(out.mbar)%in% CD$Gene,]
#library(reshape)
#library(RColorBrewer)
#coul <- brewer.pal(2, "Set2")[1:2]
#out.mbar<-melt(out.m,measure.vars = c("V2","V3","V4"))
den1<-rep(c(10,10,20,20,30,30),2)
ang1<-rep(c(45,45,0,0,135,135),2)
pdf("Figure1.SigBlack.barplot_early_old_topMF.pdf")
#barplot(t(out.mbar[,1]),beside=TRUE,ylim=c(0,0.8),border=NA)
barplot(t(out.mbar[,2:3]),beside=TRUE,col=coul,density= den1, angle=ang1, ylim=c(0,0.8))
dev.off()
### foreast plot #####
# An example for foreast plot searched from the google.
### "/Users/xingyi/Dropbox/project/GENIE/CRC/newanalysis03082021/R" ###
### figure: forest plot ####
library(forestplot)
library(metafor)
genes_df <- read.table("GENIE_DiffGENE_EarlyvsTypical_CRC_02192021-MSS-ALL.txt", header=T, sep="\t")
genes_df <- genes_df[genes_df$P > 0 & genes_df$P < 0.05,c(1:7)]
labs <- genes_df$Gene
yi <- genes_df$Beta
sei <- genes_df$SE
res <- rma(yi=yi, sei=sei, method="FE")
data <- structure(list(OR = c(NA,genes_df$OR),
low = c(NA,genes_df$X95CI1),
high = c(NA,genes_df$X95CI2)),
.Names = c("OR", "low", "high"),
# row.names = c(NA,-11L),
class = "data.frame")
labels <- cbind(c("Gene_ID","Gene_1","Gene_2","Gene_3","Gene_4","Gene_5","Gene_6"),
c("HR","0.83","0.61","0.85","0.77","0.75","0.81"),
c("low","0.78","0.51","0.8","0.7","0.68","0.76"),
c("high","0.89","0.74","0.9","0.84","0.83","0.87"))
print("....Creating the plot....")
#jpeg(filename="Hazard_ratio_plot.jpg",units="cm",width=20,height=17, res=800)
pdf("MSS-all.pdf")
forestplot(labels,
data,new_page = TRUE,
boxsize = .25,
zero = 0.707,
ci.vertices = TRUE,
ci.vertices.height = 0.25,
xlog=TRUE,
cex = 0.1,
graph.pos = 2,
lwd.zero = gpar(lty=1, alpha = 1),
lineheight = "auto",
title = " ",
txt_gp = fpTxtGp(label=gpar(fontfamily="Calibri")),
col = fpColors(box="blue",line="black",zero = "black"),
xlab="Odd ratio")
dev.off()
|
rm(list=ls(all=TRUE))
graphics.off()
# First clear the workspace
library(psych)
library(lm.beta)
library(rgl)
library(tidyverse)
library(gridExtra)
library(lsr)
library(MVA)
library(ggplot2)
library(car)
# Load packages
data = read.csv("https://raw.githubusercontent.com/kekecsz/PSYP13_Data_analysis_class-2019/master/home_sample_1.csv ")
# Open dataset
data1 = data
View(data)
describe(data)
summary(data)
ggplot() +
aes(x = data$pain) +
geom_histogram( bins = 50)
ggplot() +
aes(x = data$age) +
geom_histogram( bins = 40)
ggplot() +
aes(x = data$STAI_trait) +
geom_histogram( bins = 40)
ggplot() +
aes(x = data$pain_cat) +
geom_histogram( bins = 40)
ggplot() +
aes(x = data$mindfulness) +
geom_histogram( bins = 50)
ggplot() +
aes(x = data$cortisol_serum) +
geom_histogram( bins = 50)
ggplot() +
aes(x = data$cortisol_saliva) +
geom_histogram( bins = 50)
# Histograms for variables pain, age, STAI trait, pain catastrophizing, mindfulness and cortisol measures
# Checking data for any abnormalities or deviations
data1$STAI_trait[data1$STAI_trait==3.5]=35.0
# Correcting STAI trait value of participant number 18 from "3.5" to "35.0", assumed typo
data1$sex = as.numeric(data1$sex)
# Changing variable sex from character to numeric
data2 = data1[,c("age","sex","pain","STAI_trait","pain_cat","mindfulness","cortisol_serum","cortisol_saliva")]
###########################################
model1 <- lm(pain ~ age + sex, data = data2)
# Linear model 1 with multiple predictors: age and sex
summary(model1)
# View linear model 1
model2 <- lm(formula = pain ~ age + sex + STAI_trait + pain_cat + mindfulness + cortisol_serum + cortisol_saliva, data = data2)
# Linear model 2 with multiple predictors: age, sex, STAI trait, pain catastrophizing and cortisol measures
summary(model2)
# View linear model 2
###########################################
lev = hat(model.matrix(model1))
plot(lev)
data2[lev > .05,]
# Checking for multivariate outliers in linear model 1 using leverage
N= nrow(data2)
mahad=(N-1)*(lev-1 / N)
tail(sort(mahad),5)
order(mahad,decreasing=T)[c(5,4,3,2,1)]
# Checking for multivariate outliers in linear model 1 using Mahalanobis distance, looking at the five most extreme scores
# Looking at Chi-square table and a p value that equals .001 and a degrees of freedom of 2, the cutoff score is 13.82 - no participant is considered a multivariate outlier
lev = hat(model.matrix(model2))
plot(lev)
data2[lev > .10,]
# Checking for multivariate outliers in linear model 2 using leverage
N= nrow(data2)
mahad=(N-1)*(lev-1 / N)
tail(sort(mahad),5)
order(mahad,decreasing=T)[c(5,4,3,2,1)]
# Looking at Chi-square table and a p value that equals .001 and a degrees of freedom of 7, the cutoff score is 24.32 - participant number 59 is considered a multivariate outlier and will thus be excluded from the dataset
data2 <- data2[-59,]
# Removing participant number 59
model1 = update(model1)
model2 = update(model2)
# Updating the models after the participant is removed
###########################################
lev = hat(model.matrix(model1))
plot(lev)
data2[lev > .05,]
N= nrow(data2)
mahad=(N-1)*(lev-1 / N)
tail(sort(mahad),5)
order(mahad,decreasing=T)[c(5,4,3,2,1)]
# Looking at Chi-square table and a p value that equals .001 and a degrees of freedom of 2, the cutoff score is 13.82 - no participant is considered a multivariate outlier
lev = hat(model.matrix(model2))
plot(lev)
data2[lev > .10,]
N= nrow(data2)
mahad=(N-1)*(lev-1 / N)
tail(sort(mahad),5)
order(mahad,decreasing=T)[c(5,4,3,2,1)]
# Looking at Chi-square table and a p value that equals .001 and a degrees of freedom of 7, the cutoff score is 24.32 - no participant is considered a multivariate outlier
# Rerunning tests for multivariate outliers - no outliers found
###########################################
plot(x = model1, which = 4)
plot(x = model2, which = 4)
# Checking regression model 1 and 2 for influental outliers using Cook's distance
# No outliers detected
resid <- residuals(model1)
hist(resid)
describe(resid)
qqnorm(resid)
shapiro.test(resid)
resid <- residuals(model2)
hist(resid)
describe(resid)
qqnorm(resid)
shapiro.test(resid)
# Testing regression model 1 and 2 for the normality of residuals using the Shapiro-Wilk test
# Tests not showing any abnormalities
yhat.2 <- fitted.values(object = model1)
plot( x = yhat.2,
y = data2$pain,
xlab = "Fitted Values",
ylab = "Observed Values")
yhat.2 <- fitted.values(object = model2)
plot( x = yhat.2,
y = data2$pain,
xlab = "Fitted Values",
ylab = "Observed Values")
# Checking the linearity of the relationship between predictors and outcomes in regression model 1 and 2
# Tests not showing any abnormalities
plot(x = model1, which = 3)
plot(x = model2, which = 3)
ncvTest(model1)
ncvTest(model2)
# Checking the homogeneity of variance for regression model 1 and 2
# Tests not showing any abnormalities
vif(mod = model1)
vif(mod = model2)
# Checking for multicollinearity in regression model 1 and 2
# Regression model 2 showing two fairly large correlations between predictor variables cotrisol serum and cortisol saliva
# Variable cortisol saliva has a higher value than cortisol serum and will thus be excluded from a third regression model
###########################################
model3 <- lm(formula = pain ~ age + sex + STAI_trait + pain_cat + mindfulness + cortisol_serum, data = data2)
# Creating an updated second model without the variable salivary cortisol
#####################################################
lev = hat(model.matrix(model3))
plot(lev)
data2[lev > .10,]
N= nrow(data2)
mahad=(N-1)*(lev-1 / N)
tail(sort(mahad),5)
order(mahad,decreasing=T)[c(5,4,3,2,1)]
plot(x = model3, which = 4)
resid <- residuals(model3)
hist(resid)
describe(resid)
qqnorm(resid)
shapiro.test(resid)
yhat.2 <- fitted.values(object = model3)
plot( x = yhat.2,
y = data2$pain,
xlab = "Fitted Values",
ylab = "Observed Values")
plot(x = model3, which = 3)
ncvTest(model3)
vif(model3, which = 3)
# Rerunning model diagnostics for the updated model
# No outliers, abnormalities, or deviations found
#####################################################
summary(model1)
summary(model3)
# Model test statistics for model 1 and the updated model 2
# When looking at adjusted R-squared, the updaed model 2 explains more of the variance in the data than model 1
AIC(model1)
AIC(model3)
# Looking at the two models fit using the AIC function
# The difference between the AIC for model 1 and the updated model 2 are bigger than 2
# The updated model 2 has a smaller AIC value, meaning that it fits the data better than model 1
anova(model1, model3)
# Looking at the two models fit, based on residual error and degrees of freedom, using the anova function
# The updated model 2 is significantly better at predicting pain than model 1
coef_table = function(model){
mod_sum = summary(model3)
mod_sum_p_values = as.character(round(mod_sum$coefficients[,4], 3))
mod_sum_p_values[mod_sum_p_values != "0" & mod_sum_p_values != "1"] = substr(mod_sum_p_values[mod_sum_p_values != "0" & mod_sum_p_values != "1"], 2, nchar(mod_sum_p_values[mod_sum_p_values != "0" & mod_sum_p_values != "1"]))
mod_sum_p_values[mod_sum_p_values == "0"] = "<.001"
mod_sum_table = cbind(as.data.frame(round(cbind(coef(model), confint(model), c(0, lm.beta(model)$standardized.coefficients[c(2:length(model$coefficients))])), 2)), mod_sum_p_values)
names(mod_sum_table) = c("b", "95%CI lb", "95%CI ub", "Std.Beta", "p-value")
mod_sum_table["(Intercept)","Std.Beta"] = "0"
return(mod_sum_table)
}
sm_table = coef_table(model3)
sm_table
# Creating a coefficient table for the updated second model
# All variables except for STAI trait are significant when it comes to predicting pain in this dataset
coef_table = function(model){
mod_sum = summary(model1)
mod_sum_p_values = as.character(round(mod_sum$coefficients[,4], 3))
mod_sum_p_values[mod_sum_p_values != "0" & mod_sum_p_values != "1"] = substr(mod_sum_p_values[mod_sum_p_values != "0" & mod_sum_p_values != "1"], 2, nchar(mod_sum_p_values[mod_sum_p_values != "0" & mod_sum_p_values != "1"]))
mod_sum_p_values[mod_sum_p_values == "0"] = "<.001"
mod_sum_table = cbind(as.data.frame(round(cbind(coef(model), confint(model), c(0, lm.beta(model)$standardized.coefficients[c(2:length(model$coefficients))])), 2)), mod_sum_p_values)
names(mod_sum_table) = c("b", "95%CI lb", "95%CI ub", "Std.Beta", "p-value")
mod_sum_table["(Intercept)","Std.Beta"] = "0"
return(mod_sum_table)
}
sm_table = coef_table(model1)
sm_table
# Creating a coefficient table for the first model
# All variables are significant when it comes to predicing pain in this dataset
|
/Home assignment1Z.R
|
no_license
|
hannapersson/PSYP13-Home-Assignment-ZK
|
R
| false
| false
| 9,090
|
r
|
rm(list=ls(all=TRUE))
graphics.off()
# First clear the workspace
library(psych)
library(lm.beta)
library(rgl)
library(tidyverse)
library(gridExtra)
library(lsr)
library(MVA)
library(ggplot2)
library(car)
# Load packages
data = read.csv("https://raw.githubusercontent.com/kekecsz/PSYP13_Data_analysis_class-2019/master/home_sample_1.csv ")
# Open dataset
data1 = data
View(data)
describe(data)
summary(data)
ggplot() +
aes(x = data$pain) +
geom_histogram( bins = 50)
ggplot() +
aes(x = data$age) +
geom_histogram( bins = 40)
ggplot() +
aes(x = data$STAI_trait) +
geom_histogram( bins = 40)
ggplot() +
aes(x = data$pain_cat) +
geom_histogram( bins = 40)
ggplot() +
aes(x = data$mindfulness) +
geom_histogram( bins = 50)
ggplot() +
aes(x = data$cortisol_serum) +
geom_histogram( bins = 50)
ggplot() +
aes(x = data$cortisol_saliva) +
geom_histogram( bins = 50)
# Histograms for variables pain, age, STAI trait, pain catastrophizing, mindfulness and cortisol measures
# Checking data for any abnormalities or deviations
data1$STAI_trait[data1$STAI_trait==3.5]=35.0
# Correcting STAI trait value of participant number 18 from "3.5" to "35.0", assumed typo
data1$sex = as.numeric(data1$sex)
# Changing variable sex from character to numeric
data2 = data1[,c("age","sex","pain","STAI_trait","pain_cat","mindfulness","cortisol_serum","cortisol_saliva")]
###########################################
model1 <- lm(pain ~ age + sex, data = data2)
# Linear model 1 with multiple predictors: age and sex
summary(model1)
# View linear model 1
model2 <- lm(formula = pain ~ age + sex + STAI_trait + pain_cat + mindfulness + cortisol_serum + cortisol_saliva, data = data2)
# Linear model 2 with multiple predictors: age, sex, STAI trait, pain catastrophizing and cortisol measures
summary(model2)
# View linear model 2
###########################################
lev = hat(model.matrix(model1))
plot(lev)
data2[lev > .05,]
# Checking for multivariate outliers in linear model 1 using leverage
N= nrow(data2)
mahad=(N-1)*(lev-1 / N)
tail(sort(mahad),5)
order(mahad,decreasing=T)[c(5,4,3,2,1)]
# Checking for multivariate outliers in linear model 1 using Mahalanobis distance, looking at the five most extreme scores
# Looking at Chi-square table and a p value that equals .001 and a degrees of freedom of 2, the cutoff score is 13.82 - no participant is considered a multivariate outlier
lev = hat(model.matrix(model2))
plot(lev)
data2[lev > .10,]
# Checking for multivariate outliers in linear model 2 using leverage
N= nrow(data2)
mahad=(N-1)*(lev-1 / N)
tail(sort(mahad),5)
order(mahad,decreasing=T)[c(5,4,3,2,1)]
# Looking at Chi-square table and a p value that equals .001 and a degrees of freedom of 7, the cutoff score is 24.32 - participant number 59 is considered a multivariate outlier and will thus be excluded from the dataset
data2 <- data2[-59,]
# Removing participant number 59
model1 = update(model1)
model2 = update(model2)
# Updating the models after the participant is removed
###########################################
lev = hat(model.matrix(model1))
plot(lev)
data2[lev > .05,]
N= nrow(data2)
mahad=(N-1)*(lev-1 / N)
tail(sort(mahad),5)
order(mahad,decreasing=T)[c(5,4,3,2,1)]
# Looking at Chi-square table and a p value that equals .001 and a degrees of freedom of 2, the cutoff score is 13.82 - no participant is considered a multivariate outlier
lev = hat(model.matrix(model2))
plot(lev)
data2[lev > .10,]
N= nrow(data2)
mahad=(N-1)*(lev-1 / N)
tail(sort(mahad),5)
order(mahad,decreasing=T)[c(5,4,3,2,1)]
# Looking at Chi-square table and a p value that equals .001 and a degrees of freedom of 7, the cutoff score is 24.32 - no participant is considered a multivariate outlier
# Rerunning tests for multivariate outliers - no outliers found
###########################################
plot(x = model1, which = 4)
plot(x = model2, which = 4)
# Checking regression model 1 and 2 for influental outliers using Cook's distance
# No outliers detected
resid <- residuals(model1)
hist(resid)
describe(resid)
qqnorm(resid)
shapiro.test(resid)
resid <- residuals(model2)
hist(resid)
describe(resid)
qqnorm(resid)
shapiro.test(resid)
# Testing regression model 1 and 2 for the normality of residuals using the Shapiro-Wilk test
# Tests not showing any abnormalities
yhat.2 <- fitted.values(object = model1)
plot( x = yhat.2,
y = data2$pain,
xlab = "Fitted Values",
ylab = "Observed Values")
yhat.2 <- fitted.values(object = model2)
plot( x = yhat.2,
y = data2$pain,
xlab = "Fitted Values",
ylab = "Observed Values")
# Checking the linearity of the relationship between predictors and outcomes in regression model 1 and 2
# Tests not showing any abnormalities
plot(x = model1, which = 3)
plot(x = model2, which = 3)
ncvTest(model1)
ncvTest(model2)
# Checking the homogeneity of variance for regression model 1 and 2
# Tests not showing any abnormalities
vif(mod = model1)
vif(mod = model2)
# Checking for multicollinearity in regression model 1 and 2
# Regression model 2 showing two fairly large correlations between predictor variables cotrisol serum and cortisol saliva
# Variable cortisol saliva has a higher value than cortisol serum and will thus be excluded from a third regression model
###########################################
model3 <- lm(formula = pain ~ age + sex + STAI_trait + pain_cat + mindfulness + cortisol_serum, data = data2)
# Creating an updated second model without the variable salivary cortisol
#####################################################
lev = hat(model.matrix(model3))
plot(lev)
data2[lev > .10,]
N= nrow(data2)
mahad=(N-1)*(lev-1 / N)
tail(sort(mahad),5)
order(mahad,decreasing=T)[c(5,4,3,2,1)]
plot(x = model3, which = 4)
resid <- residuals(model3)
hist(resid)
describe(resid)
qqnorm(resid)
shapiro.test(resid)
yhat.2 <- fitted.values(object = model3)
plot( x = yhat.2,
y = data2$pain,
xlab = "Fitted Values",
ylab = "Observed Values")
plot(x = model3, which = 3)
ncvTest(model3)
vif(model3, which = 3)
# Rerunning model diagnostics for the updated model
# No outliers, abnormalities, or deviations found
#####################################################
summary(model1)
summary(model3)
# Model test statistics for model 1 and the updated model 2
# When looking at adjusted R-squared, the updaed model 2 explains more of the variance in the data than model 1
AIC(model1)
AIC(model3)
# Looking at the two models fit using the AIC function
# The difference between the AIC for model 1 and the updated model 2 are bigger than 2
# The updated model 2 has a smaller AIC value, meaning that it fits the data better than model 1
anova(model1, model3)
# Looking at the two models fit, based on residual error and degrees of freedom, using the anova function
# The updated model 2 is significantly better at predicting pain than model 1
coef_table = function(model){
mod_sum = summary(model3)
mod_sum_p_values = as.character(round(mod_sum$coefficients[,4], 3))
mod_sum_p_values[mod_sum_p_values != "0" & mod_sum_p_values != "1"] = substr(mod_sum_p_values[mod_sum_p_values != "0" & mod_sum_p_values != "1"], 2, nchar(mod_sum_p_values[mod_sum_p_values != "0" & mod_sum_p_values != "1"]))
mod_sum_p_values[mod_sum_p_values == "0"] = "<.001"
mod_sum_table = cbind(as.data.frame(round(cbind(coef(model), confint(model), c(0, lm.beta(model)$standardized.coefficients[c(2:length(model$coefficients))])), 2)), mod_sum_p_values)
names(mod_sum_table) = c("b", "95%CI lb", "95%CI ub", "Std.Beta", "p-value")
mod_sum_table["(Intercept)","Std.Beta"] = "0"
return(mod_sum_table)
}
sm_table = coef_table(model3)
sm_table
# Creating a coefficient table for the updated second model
# All variables except for STAI trait are significant when it comes to predicting pain in this dataset
coef_table = function(model){
mod_sum = summary(model1)
mod_sum_p_values = as.character(round(mod_sum$coefficients[,4], 3))
mod_sum_p_values[mod_sum_p_values != "0" & mod_sum_p_values != "1"] = substr(mod_sum_p_values[mod_sum_p_values != "0" & mod_sum_p_values != "1"], 2, nchar(mod_sum_p_values[mod_sum_p_values != "0" & mod_sum_p_values != "1"]))
mod_sum_p_values[mod_sum_p_values == "0"] = "<.001"
mod_sum_table = cbind(as.data.frame(round(cbind(coef(model), confint(model), c(0, lm.beta(model)$standardized.coefficients[c(2:length(model$coefficients))])), 2)), mod_sum_p_values)
names(mod_sum_table) = c("b", "95%CI lb", "95%CI ub", "Std.Beta", "p-value")
mod_sum_table["(Intercept)","Std.Beta"] = "0"
return(mod_sum_table)
}
sm_table = coef_table(model1)
sm_table
# Creating a coefficient table for the first model
# All variables are significant when it comes to predicing pain in this dataset
|
makeMatrix <- function(initial_matrix = matrix())
{
im <- NULL
set <- function(m)
{
initma <<- m
im <<- NULL
}
get <- function() {
return(initma)
}
setinverse <- function(inverse) {
im <<- inverse
}
getinverse <- function() {
return(im)
}
list(set=set,get=get,setinverse=setinverse,
getinverse=getinverse)
}
cacheSolve <- function(x) {
y = NULL
if(class(x) == "matrix") {
makeCObject <- makeMatrix()
makeCObject$set(x)
y <- makeCObject$getinverse()
}
if(class(x) == "list"){
makeCObject <- x
y <- x$getinverse()
}
if(!is.null(y)){
message("getting cached data")
return(y)
}
else {
y <- solve(makeCObject$get())
# cache the inverse matrix
makeCObject$setinverse(y)
#return the inverse matrix
return(y)
}
return(NULL)
}
|
/cachematrix.R
|
no_license
|
vickythakre/ProgrammingAssignment2
|
R
| false
| false
| 1,030
|
r
|
makeMatrix <- function(initial_matrix = matrix())
{
im <- NULL
set <- function(m)
{
initma <<- m
im <<- NULL
}
get <- function() {
return(initma)
}
setinverse <- function(inverse) {
im <<- inverse
}
getinverse <- function() {
return(im)
}
list(set=set,get=get,setinverse=setinverse,
getinverse=getinverse)
}
cacheSolve <- function(x) {
y = NULL
if(class(x) == "matrix") {
makeCObject <- makeMatrix()
makeCObject$set(x)
y <- makeCObject$getinverse()
}
if(class(x) == "list"){
makeCObject <- x
y <- x$getinverse()
}
if(!is.null(y)){
message("getting cached data")
return(y)
}
else {
y <- solve(makeCObject$get())
# cache the inverse matrix
makeCObject$setinverse(y)
#return the inverse matrix
return(y)
}
return(NULL)
}
|
get_PRMS_GIS_varnames<-function(fname) {
## INPUT: name of PRMS GIS file
## OUTPUT: list with variable names
# open file connection
f <- file(fname, open="rt")
var_txt <- character()
num_vars <- 0
inheader <- FALSE
n <- 0
repeat {
n <- n + 1
if(n>512) break()
txt <- scan(f, what=character(),sep=",",nlines=1)
cat(txt,"\n")
if("# End DBF" %in% txt) {
inheader <- FALSE
break()
}
if(inheader==TRUE) {
num_vars <- num_vars + 1
var_txt[num_vars] <- gsub("# ","",txt[1])
}
if("# Begin DBF" %in% txt) {
inheader <- TRUE
}
}
return(list(con=f,vars=var_txt))
}
read_PRMS_GIS_records<-function(con, hrus, vars) {
## INPUT: name of PRMS GIS file, list of hrus desired
## OUTPUT: data frame for a single simulation day, for selected HRUs
if(!isOpen(con)) {
stop("Connection must be open before calling this function. \n")
}
var_txt <- character()
num_vars <- 0
inheader <- FALSE
n <- 0
repeat {
txt <- scan(f, what=character(),sep=",",nlines=1, comment.char="#")
}
return()
}
|
/R/read_prms_GIS.R
|
permissive
|
smwesten-usgs/prmsParameterizer
|
R
| false
| false
| 1,137
|
r
|
get_PRMS_GIS_varnames<-function(fname) {
## INPUT: name of PRMS GIS file
## OUTPUT: list with variable names
# open file connection
f <- file(fname, open="rt")
var_txt <- character()
num_vars <- 0
inheader <- FALSE
n <- 0
repeat {
n <- n + 1
if(n>512) break()
txt <- scan(f, what=character(),sep=",",nlines=1)
cat(txt,"\n")
if("# End DBF" %in% txt) {
inheader <- FALSE
break()
}
if(inheader==TRUE) {
num_vars <- num_vars + 1
var_txt[num_vars] <- gsub("# ","",txt[1])
}
if("# Begin DBF" %in% txt) {
inheader <- TRUE
}
}
return(list(con=f,vars=var_txt))
}
read_PRMS_GIS_records<-function(con, hrus, vars) {
## INPUT: name of PRMS GIS file, list of hrus desired
## OUTPUT: data frame for a single simulation day, for selected HRUs
if(!isOpen(con)) {
stop("Connection must be open before calling this function. \n")
}
var_txt <- character()
num_vars <- 0
inheader <- FALSE
n <- 0
repeat {
txt <- scan(f, what=character(),sep=",",nlines=1, comment.char="#")
}
return()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VSEplot.R
\name{VSEplot}
\alias{VSEplot}
\title{VSEplot}
\usage{
VSEplot(data, padj = 0.01, ...)
}
\arguments{
\item{data}{A list of matrices outputted by the function VariantSetEnrichment}
\item{padj}{Bonferroni adjusted p-value cutoff. Default: 0.01}
\item{...}{Arguments from boxplot}
}
\description{
This function will generate a figure for VSE analysis
}
\examples{
#Load pre-saved object "bca.vse" as an example VSE output
load(file.path(system.file("extdata", "vse_output.Rda", package="VSE")))
VSEplot(bca.vse, las=2,pch=20, cex=1, cex.main=0.6, padj=0.05)
}
\keyword{VSE,}
\keyword{boxplot}
|
/man/VSEplot.Rd
|
no_license
|
cran/VSE
|
R
| false
| true
| 681
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VSEplot.R
\name{VSEplot}
\alias{VSEplot}
\title{VSEplot}
\usage{
VSEplot(data, padj = 0.01, ...)
}
\arguments{
\item{data}{A list of matrices outputted by the function VariantSetEnrichment}
\item{padj}{Bonferroni adjusted p-value cutoff. Default: 0.01}
\item{...}{Arguments from boxplot}
}
\description{
This function will generate a figure for VSE analysis
}
\examples{
#Load pre-saved object "bca.vse" as an example VSE output
load(file.path(system.file("extdata", "vse_output.Rda", package="VSE")))
VSEplot(bca.vse, las=2,pch=20, cex=1, cex.main=0.6, padj=0.05)
}
\keyword{VSE,}
\keyword{boxplot}
|
library(h2o)
h2o.init()
# Import a sample binary outcome train/test set into H2O
train <- h2o.importFile("https://raw.githubusercontent.com/caiomsouza/ml-open-datasets/master/csv-dataset/kaggle-santander-train.csv")
test <- h2o.importFile("https://raw.githubusercontent.com/caiomsouza/ml-open-datasets/master/csv-dataset/kaggle-santander-test.csv")
#head(train)
#summary(train)
#head(test)
#summary(test)
aml <- h2o.automl(y = "TARGET", training_frame = train, max_runtime_secs = 60)
lb <- aml@leaderboard
#lb
#aml
#aml@leader
#aml@project_name
#pred <- h2o.predict(aml, test) # predict(aml, test) also works
# or:
pred <- h2o.predict(aml@leader, test)
pred.df <- as.data.frame(pred)
setwd("~/GitHub/PWorld17/AutoML/output")
# Write CSV in R
write.csv(pred.df, file = "pred_h2o_automl.csv")
#test$ID
#head(test)
#pred.df$predict
testIds<-as.data.frame(test$ID)
submission<-data.frame(cbind(testIds,pred.df$predict))
colnames(submission)<-c("ID","PredictedProb")
write.csv(submission,"pred_h2o_automl_with_ID.csv",row.names=T)
write.csv(submission,"pred_h2o_automl_with_ID_no_Row_name.csv",row.names=F)
|
/AutoML/h2o_automl_kaggle.R
|
no_license
|
AnyaRum/PWorld17
|
R
| false
| false
| 1,135
|
r
|
library(h2o)
h2o.init()
# Import a sample binary outcome train/test set into H2O
train <- h2o.importFile("https://raw.githubusercontent.com/caiomsouza/ml-open-datasets/master/csv-dataset/kaggle-santander-train.csv")
test <- h2o.importFile("https://raw.githubusercontent.com/caiomsouza/ml-open-datasets/master/csv-dataset/kaggle-santander-test.csv")
#head(train)
#summary(train)
#head(test)
#summary(test)
aml <- h2o.automl(y = "TARGET", training_frame = train, max_runtime_secs = 60)
lb <- aml@leaderboard
#lb
#aml
#aml@leader
#aml@project_name
#pred <- h2o.predict(aml, test) # predict(aml, test) also works
# or:
pred <- h2o.predict(aml@leader, test)
pred.df <- as.data.frame(pred)
setwd("~/GitHub/PWorld17/AutoML/output")
# Write CSV in R
write.csv(pred.df, file = "pred_h2o_automl.csv")
#test$ID
#head(test)
#pred.df$predict
testIds<-as.data.frame(test$ID)
submission<-data.frame(cbind(testIds,pred.df$predict))
colnames(submission)<-c("ID","PredictedProb")
write.csv(submission,"pred_h2o_automl_with_ID.csv",row.names=T)
write.csv(submission,"pred_h2o_automl_with_ID_no_Row_name.csv",row.names=F)
|
library(plotrix)
### Name: pie3D.labels
### Title: Display labels on a 3D pie chart
### Aliases: pie3D.labels
### Keywords: misc
### ** Examples
pieval<-c(2,4,6,8)
bisectors<-pie3D(pieval,explode=0.1,main="3D PIE OPINIONS")
pielabels<-
c("We hate\n pies","We oppose\n pies","We don't\n care","We just love pies")
pie3D.labels(bisectors,labels=pielabels)
|
/data/genthat_extracted_code/plotrix/examples/pie3D.labels.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 369
|
r
|
library(plotrix)
### Name: pie3D.labels
### Title: Display labels on a 3D pie chart
### Aliases: pie3D.labels
### Keywords: misc
### ** Examples
pieval<-c(2,4,6,8)
bisectors<-pie3D(pieval,explode=0.1,main="3D PIE OPINIONS")
pielabels<-
c("We hate\n pies","We oppose\n pies","We don't\n care","We just love pies")
pie3D.labels(bisectors,labels=pielabels)
|
# Load libraries
x <- c('scales', 'gridExtra', 'tidyverse', 'readxl', 'dummies', 'plotly', 'broom')
lapply(x, require, character.only = TRUE)
# Load data
df <- read_csv('jp_map.csv')
# Fit a Diminishing Return Curve to the points
## 1. This function returns the sum of squared error between
## the original point and the points derived from the curve suggested
Ufun<-function(x, Spend, Return) {
predictedReturn = x[2] + (x[1] - x[2])*((Spend^x[3])/(x[4] + (Spend^x[3])))
errorSq = (predictedReturn - Return)^2
sumSqError = sum(errorSq)
return(sumSqError)
}
for (program_plot in unique(df$program)) {
# Set some parameters
channelName = program_plot
# If you want to filter your data set, there's a filter for you here
df_1 <- df %>%
filter(program == program_plot)
# For an initial plot of the spend vs return data, set the axis length
maxX = 1.05*max(df_1$Spend)
maxY = 1.05*max(df_1$total_conversion_value)
# Set up data frame for plotting
myPlotDataDF = data.frame(Return = df_1$total_conversion_value, Spend = df_1$Spend)
# Create a simple scatter plot with ggplot2
simpleScatterPlot <- ggplot(myPlotDataDF, aes(x = Spend, y = Return)) +
geom_point(color="black") +
theme(panel.background = element_rect(fill = 'grey85'),
panel.grid.major = element_line(colour = "white")) +
coord_cartesian(ylim = c(0,maxY), xlim = c(0,maxX)) +
scale_x_continuous(labels = dollar) +
scale_y_continuous(labels = comma) +
ggtitle(paste(channelName))
print(simpleScatterPlot)
## 2. Set parameters to 'step through' in part #3
## in the vector c(a,b,c,d):
## a -> The maximum amount of return that an individual spend could achieve over a long term
## b -> The minimum amount of return that an individual spend could achieve over a long term
## c -> Shape of the curve (should probably start at 1)
## d -> The intercept, or an amount of return that's expected when no spend has occurred
startValVec = c(0,
0,
1,
min(df_1$total_conversion_value))
minValVec = c(min(df_1$total_conversion_value),
min(df_1$total_conversion_value)-0.5*min(df_1$total_conversion_value),
1.001,
min(df_1$total_conversion_value))
maxValVec = c(max(df_1$total_conversion_value)*2,
min(df_1$total_conversion_value),
2,
min(df_1$total_conversion_value))
# 3. Optimize the diminishing return curve
## The nlminb() function will find the optimal fit by
## stepping through the parameters in step 2
optim.parms<-nlminb(objective=Ufun,start=startValVec,
lower=minValVec,
upper=maxValVec,
control=list(iter.max=10000,eval.max=1000),
Spend = df_1$Spend,
Return = df_1$total_conversion_value)
optim.parms
a = optim.parms$par[1]
b = optim.parms$par[2]
c = optim.parms$par[3]
d = optim.parms$par[4]
## What were the optimal params?
a
b
c
d
# 4. Plot the diminishing return curve
## The points that construct the curve have to be computed:
## The spend is on the X axis, so we generate 10k X values to plot
curveDFx = seq(from=0, to=max(df_1$Spend)*2, length.out=1000)
## The return is on the Y axis, and those are generated by our optimized function, given the 10k X values
curveDFy = b+(a-b)*((curveDFx^c)/(d+(curveDFx^c)))
## Construct the dataframe to plot
curveDF = data.frame(Spend = curveDFx, Return = curveDFy)
## For an initial plot of the spend vs return data, set the axis length
maxX = 1.05*max(curveDFx, max(df_1$Spend))
maxY = 1.05*max(curveDFy, max(df_1$total_conversion_value))
## Assemble the data frames
myPlotDataDF = data.frame(Return = df_1$total_conversion_value, Spend = df_1$Spend)
optimLineDF = data.frame(Spend = curveDFx, Return = curveDFy)
# 5. Plot the points and the diminishing return curve
scatterPlotPlusFit <- ggplot(myPlotDataDF, aes(x = Spend, y = Return)) +
geom_point(color="black", shape = 16) +
theme(panel.background = element_rect(fill = 'grey85'),
panel.grid.major = element_line(colour = "white")) +
geom_line(data = optimLineDF, aes(x = Spend, y = Return, color = "darkgreen")) +
scale_color_manual(labels = "Optimized ADBUDG Fit",values=c('darkgreen')) +
theme(legend.title=element_blank(), legend.position = "bottom") +
coord_cartesian(ylim = c(0,maxY), xlim = c(0,maxX)) +
scale_x_continuous(labels = dollar) +
scale_y_continuous(labels = comma) +
ggtitle(paste(channelName, "Data & Model Fit", sep = " "))
print(scatterPlotPlusFit)
}
|
/drc_jp.R
|
no_license
|
nikhilkaul1234/attribution-master
|
R
| false
| false
| 4,764
|
r
|
# Load libraries
x <- c('scales', 'gridExtra', 'tidyverse', 'readxl', 'dummies', 'plotly', 'broom')
lapply(x, require, character.only = TRUE)
# Load data
df <- read_csv('jp_map.csv')
# Fit a Diminishing Return Curve to the points
## 1. This function returns the sum of squared error between
## the original point and the points derived from the curve suggested
Ufun<-function(x, Spend, Return) {
predictedReturn = x[2] + (x[1] - x[2])*((Spend^x[3])/(x[4] + (Spend^x[3])))
errorSq = (predictedReturn - Return)^2
sumSqError = sum(errorSq)
return(sumSqError)
}
for (program_plot in unique(df$program)) {
# Set some parameters
channelName = program_plot
# If you want to filter your data set, there's a filter for you here
df_1 <- df %>%
filter(program == program_plot)
# For an initial plot of the spend vs return data, set the axis length
maxX = 1.05*max(df_1$Spend)
maxY = 1.05*max(df_1$total_conversion_value)
# Set up data frame for plotting
myPlotDataDF = data.frame(Return = df_1$total_conversion_value, Spend = df_1$Spend)
# Create a simple scatter plot with ggplot2
simpleScatterPlot <- ggplot(myPlotDataDF, aes(x = Spend, y = Return)) +
geom_point(color="black") +
theme(panel.background = element_rect(fill = 'grey85'),
panel.grid.major = element_line(colour = "white")) +
coord_cartesian(ylim = c(0,maxY), xlim = c(0,maxX)) +
scale_x_continuous(labels = dollar) +
scale_y_continuous(labels = comma) +
ggtitle(paste(channelName))
print(simpleScatterPlot)
## 2. Set parameters to 'step through' in part #3
## in the vector c(a,b,c,d):
## a -> The maximum amount of return that an individual spend could achieve over a long term
## b -> The minimum amount of return that an individual spend could achieve over a long term
## c -> Shape of the curve (should probably start at 1)
## d -> The intercept, or an amount of return that's expected when no spend has occurred
startValVec = c(0,
0,
1,
min(df_1$total_conversion_value))
minValVec = c(min(df_1$total_conversion_value),
min(df_1$total_conversion_value)-0.5*min(df_1$total_conversion_value),
1.001,
min(df_1$total_conversion_value))
maxValVec = c(max(df_1$total_conversion_value)*2,
min(df_1$total_conversion_value),
2,
min(df_1$total_conversion_value))
# 3. Optimize the diminishing return curve
## The nlminb() function will find the optimal fit by
## stepping through the parameters in step 2
optim.parms<-nlminb(objective=Ufun,start=startValVec,
lower=minValVec,
upper=maxValVec,
control=list(iter.max=10000,eval.max=1000),
Spend = df_1$Spend,
Return = df_1$total_conversion_value)
optim.parms
a = optim.parms$par[1]
b = optim.parms$par[2]
c = optim.parms$par[3]
d = optim.parms$par[4]
## What were the optimal params?
a
b
c
d
# 4. Plot the diminishing return curve
## The points that construct the curve have to be computed:
## The spend is on the X axis, so we generate 10k X values to plot
curveDFx = seq(from=0, to=max(df_1$Spend)*2, length.out=1000)
## The return is on the Y axis, and those are generated by our optimized function, given the 10k X values
curveDFy = b+(a-b)*((curveDFx^c)/(d+(curveDFx^c)))
## Construct the dataframe to plot
curveDF = data.frame(Spend = curveDFx, Return = curveDFy)
## For an initial plot of the spend vs return data, set the axis length
maxX = 1.05*max(curveDFx, max(df_1$Spend))
maxY = 1.05*max(curveDFy, max(df_1$total_conversion_value))
## Assemble the data frames
myPlotDataDF = data.frame(Return = df_1$total_conversion_value, Spend = df_1$Spend)
optimLineDF = data.frame(Spend = curveDFx, Return = curveDFy)
# 5. Plot the points and the diminishing return curve
scatterPlotPlusFit <- ggplot(myPlotDataDF, aes(x = Spend, y = Return)) +
geom_point(color="black", shape = 16) +
theme(panel.background = element_rect(fill = 'grey85'),
panel.grid.major = element_line(colour = "white")) +
geom_line(data = optimLineDF, aes(x = Spend, y = Return, color = "darkgreen")) +
scale_color_manual(labels = "Optimized ADBUDG Fit",values=c('darkgreen')) +
theme(legend.title=element_blank(), legend.position = "bottom") +
coord_cartesian(ylim = c(0,maxY), xlim = c(0,maxX)) +
scale_x_continuous(labels = dollar) +
scale_y_continuous(labels = comma) +
ggtitle(paste(channelName, "Data & Model Fit", sep = " "))
print(scatterPlotPlusFit)
}
|
library(targets) # devtools::install_github("ropensci/targets")
tar_option_set(
packages = c(
"tidyverse",
"icedisas", # devtools::install_github("awconway/icedisas")
"gtsummary",
"gt"
)
)
list(
tar_target(
data_raw,
read_csv("https://raw.githubusercontent.com/awconway/hfnosedrct/6b6374222d8e413cfe05448d0fb8adb675ea0849/data/HighFlowNasalOxygenT_DATA_2020-05-21_1511.csv")
),
tar_target(data_label, label_data(data_raw) %>%
filter(str_detect(id, "P")) %>%
select(everything(), -starts_with("screen")) %>%
# P084 removed (procedure was not performed)
filter(id != "P084")),
tar_target(
data,
data_label %>%
rowwise() %>%
mutate(isas_mean = mean(c(
isasvomit,
isassameanesthetic,
isasitch,
isasrelaxed,
isaspain,
isassafe,
isastoocoldhot,
isassurgerypain,
isassatisfiedcare,
isasfeltgood,
isashurt
), na.rm = TRUE)) %>%
ungroup() %>%
select(
id,
age,
sex.factor,
lastfood,
lastfluids,
procedurestart,
procedureend,
propofol,
midazolam,
fentanyl,
remifentanil,
otheropioidname,
otheropioiddose,
otheropioidunits,
admward.factor,
admsource.factor,
asaclass.factor,
procedure.factor,
deepsedation.factor,
isas_mean,
starts_with("isas"),
)
),
tar_target(
isas_plot,
data %>%
mutate(ISAS = "ISAS") %>%
ggplot() +
ggbeeswarm::geom_beeswarm(aes(y = isas_mean, x = ISAS)) +
theme_minimal() +
theme(
axis.title.x = element_blank(),
legend.position = "none"
) +
labs(y = "ISAS score")
),
tar_target(model_data, data %>%
mutate(
procedure.factor = fct_drop(procedure.factor),
admward.factor = fct_drop(admward.factor),
sex.factor = fct_drop(sex.factor),
procedure.factor = case_when(
procedure.factor == "PPM" ~ "PPM",
procedure.factor == "PPM generator change" ~ "PPM",
procedure.factor == "PPM lead revision" ~ "PPM",
procedure.factor == "ICD" ~ "ICD",
procedure.factor == "ICD lead revision" ~ "ICD",
procedure.factor == "ICD generator change" ~ "ICD",
procedure.factor == "CRT-D" ~ "CRT",
procedure.factor == "CRT-P" ~ "CRT",
TRUE ~ "Other"
),
`Cardiac Resynchronization Therapy` = ifelse(
procedure.factor == "CRT", 1, 0
),
other_sedation = ifelse(is.na(otheropioiddose), 1, 0),
`Total dose of fentanyl (mcg)` = ifelse(
!is.na(remifentanil), remifentanil, fentanyl
),
`Procedure duration (hours)` =
as.numeric(procedureend - procedurestart) / 60,
food_fast = procedurestart - lastfood,
fluid_fast = procedurestart - lastfluids,
`Day case` = ifelse(admward.factor == "Day surgery", 1, 0)
) %>%
replace_na(list(
propofol = 0,
midazolam = 0,
fentanyl = 0
)) %>%
rename(`ASA Class` = "asaclass.factor")),
tar_target(model, lm(isas_mean ~
midazolam +
propofol +
`Total dose of fentanyl (mcg)` +
age +
sex.factor +
`ASA Class` +
`Day case` +
`Cardiac Resynchronization Therapy` +
`Procedure duration (hours)`,
data = model_data
)),
tar_target(model_table, model %>%
tbl_regression() %>%
bold_p(t = 0.05))
)
|
/_targets.R
|
permissive
|
awconway/icedisas
|
R
| false
| false
| 3,491
|
r
|
library(targets) # devtools::install_github("ropensci/targets")
tar_option_set(
packages = c(
"tidyverse",
"icedisas", # devtools::install_github("awconway/icedisas")
"gtsummary",
"gt"
)
)
list(
tar_target(
data_raw,
read_csv("https://raw.githubusercontent.com/awconway/hfnosedrct/6b6374222d8e413cfe05448d0fb8adb675ea0849/data/HighFlowNasalOxygenT_DATA_2020-05-21_1511.csv")
),
tar_target(data_label, label_data(data_raw) %>%
filter(str_detect(id, "P")) %>%
select(everything(), -starts_with("screen")) %>%
# P084 removed (procedure was not performed)
filter(id != "P084")),
tar_target(
data,
data_label %>%
rowwise() %>%
mutate(isas_mean = mean(c(
isasvomit,
isassameanesthetic,
isasitch,
isasrelaxed,
isaspain,
isassafe,
isastoocoldhot,
isassurgerypain,
isassatisfiedcare,
isasfeltgood,
isashurt
), na.rm = TRUE)) %>%
ungroup() %>%
select(
id,
age,
sex.factor,
lastfood,
lastfluids,
procedurestart,
procedureend,
propofol,
midazolam,
fentanyl,
remifentanil,
otheropioidname,
otheropioiddose,
otheropioidunits,
admward.factor,
admsource.factor,
asaclass.factor,
procedure.factor,
deepsedation.factor,
isas_mean,
starts_with("isas"),
)
),
tar_target(
isas_plot,
data %>%
mutate(ISAS = "ISAS") %>%
ggplot() +
ggbeeswarm::geom_beeswarm(aes(y = isas_mean, x = ISAS)) +
theme_minimal() +
theme(
axis.title.x = element_blank(),
legend.position = "none"
) +
labs(y = "ISAS score")
),
tar_target(model_data, data %>%
mutate(
procedure.factor = fct_drop(procedure.factor),
admward.factor = fct_drop(admward.factor),
sex.factor = fct_drop(sex.factor),
procedure.factor = case_when(
procedure.factor == "PPM" ~ "PPM",
procedure.factor == "PPM generator change" ~ "PPM",
procedure.factor == "PPM lead revision" ~ "PPM",
procedure.factor == "ICD" ~ "ICD",
procedure.factor == "ICD lead revision" ~ "ICD",
procedure.factor == "ICD generator change" ~ "ICD",
procedure.factor == "CRT-D" ~ "CRT",
procedure.factor == "CRT-P" ~ "CRT",
TRUE ~ "Other"
),
`Cardiac Resynchronization Therapy` = ifelse(
procedure.factor == "CRT", 1, 0
),
other_sedation = ifelse(is.na(otheropioiddose), 1, 0),
`Total dose of fentanyl (mcg)` = ifelse(
!is.na(remifentanil), remifentanil, fentanyl
),
`Procedure duration (hours)` =
as.numeric(procedureend - procedurestart) / 60,
food_fast = procedurestart - lastfood,
fluid_fast = procedurestart - lastfluids,
`Day case` = ifelse(admward.factor == "Day surgery", 1, 0)
) %>%
replace_na(list(
propofol = 0,
midazolam = 0,
fentanyl = 0
)) %>%
rename(`ASA Class` = "asaclass.factor")),
tar_target(model, lm(isas_mean ~
midazolam +
propofol +
`Total dose of fentanyl (mcg)` +
age +
sex.factor +
`ASA Class` +
`Day case` +
`Cardiac Resynchronization Therapy` +
`Procedure duration (hours)`,
data = model_data
)),
tar_target(model_table, model %>%
tbl_regression() %>%
bold_p(t = 0.05))
)
|
# This code corresponds to Fig S10 in Alexander et al.
# It calcualtes statistics presented in the main results text
# 1. Loading necessary libraries
library(tidyverse)
# 2. Setwd
setwd("chickadee/output/")
# 3. Reading in data (tab delimited), dropping last blank row
temp <- read_tsv("../data/Table_S1.txt")
temp <- temp[1:165,]
#4. Appleton City genetic cluster temporal comparisons
# Smithsonian percentages
temp %>% filter(Sampling_period=="SMITHSONIAN") %>% filter(grepl("Appleton",Specific_locality)) %>% mutate(status=ifelse(BC_genetic_cluster_assignment>=0.95,"BC",ifelse(CC_genetic_cluster_assignment>=0.95,"CC","hybrid"))) %>% filter(!(is.na(status))) %>% group_by(status) %>% tally() %>% mutate(perc=n/sum(n)*100)
# Modern percentages
temp %>% filter(Sampling_period=="MODERN") %>% filter(grepl("Appleton",Specific_locality)) %>% mutate(status=ifelse(BC_genetic_cluster_assignment>=0.95,"BC",ifelse(CC_genetic_cluster_assignment>=0.95,"CC","hybrid"))) %>% group_by(status) %>% tally() %>% mutate(perc=n/sum(n)*100)
# Mann-Whitney U-test
smithsonian <- as.matrix(temp %>% filter(Sampling_period=="SMITHSONIAN") %>% filter(grepl("Appleton",Specific_locality)) %>% select(BC_genetic_cluster_assignment))[,1]
smithsonian <- cbind(smithsonian,0)
modern <- as.matrix(temp %>% filter(Sampling_period=="MODERN") %>% filter(grepl("Appleton",Specific_locality)) %>% select(BC_genetic_cluster_assignment))[,1]
modern <- cbind(modern,1)
data <- rbind(smithsonian,modern)
data <- as.data.frame(data)
names(data) <- c("structure","population")
wilcox.test(data$structure~data$population)
#4. Rockville genetic cluster temporal comparisons
# Smithsonian percentages
temp %>% filter(Sampling_period=="SMITHSONIAN") %>% filter(grepl("Rockville",Specific_locality)) %>% mutate(status=ifelse(BC_genetic_cluster_assignment>=0.95,"BC",ifelse(CC_genetic_cluster_assignment>=0.95,"CC","hybrid"))) %>% group_by(status) %>% tally() %>% mutate(perc=n/sum(n)*100)
# Modern percentages
temp %>% filter(Sampling_period=="MODERN") %>% filter(grepl("Rockville",Specific_locality)) %>% mutate(status=ifelse(BC_genetic_cluster_assignment>=0.95,"BC",ifelse(CC_genetic_cluster_assignment>=0.95,"CC","hybrid"))) %>% group_by(status) %>% tally() %>% mutate(perc=n/sum(n)*100)
# Mann-Whitney U-test
smithsonian <- as.matrix(temp %>% filter(Sampling_period=="SMITHSONIAN") %>% filter(grepl("Rockville",Specific_locality)) %>% select(BC_genetic_cluster_assignment))[,1]
smithsonian <- cbind(smithsonian,0)
modern <- as.matrix(temp %>% filter(Sampling_period=="MODERN") %>% filter(grepl("Rockville",Specific_locality)) %>% select(BC_genetic_cluster_assignment))[,1]
modern <- cbind(modern,1)
data <- rbind(smithsonian,modern)
data <- as.data.frame(data)
names(data) <- c("structure","population")
wilcox.test(data$structure~data$population)
#5. Song genetic cluster comparisons
BCsong <- as.matrix(temp %>% filter(Song_summary=="PUREBC") %>% select(BC_genetic_cluster_assignment))[,1]
sd(BCsong)
CCsong <- as.matrix(temp %>% filter(Song_summary=="PURECC") %>% select(CC_genetic_cluster_assignment))[,1]
sd(CCsong)
t.test(BCsong,CCsong)
|
/chickadee/scripts/Fig_S10_stats_presented_in_text.R
|
permissive
|
laninsky/project_logs
|
R
| false
| false
| 3,125
|
r
|
# This code corresponds to Fig S10 in Alexander et al.
# It calcualtes statistics presented in the main results text
# 1. Loading necessary libraries
library(tidyverse)
# 2. Setwd
setwd("chickadee/output/")
# 3. Reading in data (tab delimited), dropping last blank row
temp <- read_tsv("../data/Table_S1.txt")
temp <- temp[1:165,]
#4. Appleton City genetic cluster temporal comparisons
# Smithsonian percentages
temp %>% filter(Sampling_period=="SMITHSONIAN") %>% filter(grepl("Appleton",Specific_locality)) %>% mutate(status=ifelse(BC_genetic_cluster_assignment>=0.95,"BC",ifelse(CC_genetic_cluster_assignment>=0.95,"CC","hybrid"))) %>% filter(!(is.na(status))) %>% group_by(status) %>% tally() %>% mutate(perc=n/sum(n)*100)
# Modern percentages
temp %>% filter(Sampling_period=="MODERN") %>% filter(grepl("Appleton",Specific_locality)) %>% mutate(status=ifelse(BC_genetic_cluster_assignment>=0.95,"BC",ifelse(CC_genetic_cluster_assignment>=0.95,"CC","hybrid"))) %>% group_by(status) %>% tally() %>% mutate(perc=n/sum(n)*100)
# Mann-Whitney U-test
smithsonian <- as.matrix(temp %>% filter(Sampling_period=="SMITHSONIAN") %>% filter(grepl("Appleton",Specific_locality)) %>% select(BC_genetic_cluster_assignment))[,1]
smithsonian <- cbind(smithsonian,0)
modern <- as.matrix(temp %>% filter(Sampling_period=="MODERN") %>% filter(grepl("Appleton",Specific_locality)) %>% select(BC_genetic_cluster_assignment))[,1]
modern <- cbind(modern,1)
data <- rbind(smithsonian,modern)
data <- as.data.frame(data)
names(data) <- c("structure","population")
wilcox.test(data$structure~data$population)
#4. Rockville genetic cluster temporal comparisons
# Smithsonian percentages
temp %>% filter(Sampling_period=="SMITHSONIAN") %>% filter(grepl("Rockville",Specific_locality)) %>% mutate(status=ifelse(BC_genetic_cluster_assignment>=0.95,"BC",ifelse(CC_genetic_cluster_assignment>=0.95,"CC","hybrid"))) %>% group_by(status) %>% tally() %>% mutate(perc=n/sum(n)*100)
# Modern percentages
temp %>% filter(Sampling_period=="MODERN") %>% filter(grepl("Rockville",Specific_locality)) %>% mutate(status=ifelse(BC_genetic_cluster_assignment>=0.95,"BC",ifelse(CC_genetic_cluster_assignment>=0.95,"CC","hybrid"))) %>% group_by(status) %>% tally() %>% mutate(perc=n/sum(n)*100)
# Mann-Whitney U-test
smithsonian <- as.matrix(temp %>% filter(Sampling_period=="SMITHSONIAN") %>% filter(grepl("Rockville",Specific_locality)) %>% select(BC_genetic_cluster_assignment))[,1]
smithsonian <- cbind(smithsonian,0)
modern <- as.matrix(temp %>% filter(Sampling_period=="MODERN") %>% filter(grepl("Rockville",Specific_locality)) %>% select(BC_genetic_cluster_assignment))[,1]
modern <- cbind(modern,1)
data <- rbind(smithsonian,modern)
data <- as.data.frame(data)
names(data) <- c("structure","population")
wilcox.test(data$structure~data$population)
#5. Song genetic cluster comparisons
BCsong <- as.matrix(temp %>% filter(Song_summary=="PUREBC") %>% select(BC_genetic_cluster_assignment))[,1]
sd(BCsong)
CCsong <- as.matrix(temp %>% filter(Song_summary=="PURECC") %>% select(CC_genetic_cluster_assignment))[,1]
sd(CCsong)
t.test(BCsong,CCsong)
|
% Copyright 2016-2018 Lingfei Wang
%
% This file is part of Findr.
%
% Findr is free software: you can redistribute it and/or modify
% it under the terms of the GNU Affero General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% Findr is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU Affero General Public License for more details.
%
% You should have received a copy of the GNU Affero General Public License
% along with Findr. If not, see <http://www.gnu.org/licenses/>.
%
\name{findr.pijs_gassist_pv}
\alias{findr.pijs_gassist_pv}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Predict pairwise regulation p-values with expression and eQTL data
}
\description{
Inference of regulation E(A)->A->B p-values with expression data of A,B and best eQTL data for A as E(A). For highest precision, the user should not pre-select secondary target genes (B), but instead include as many B's of the same type as possible, and pick the gene pairs of interest only after Findr's calculation.
}
\usage{
findr.pijs_gassist_pv(dg, dt, dt2, na = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dg}{
Input matrix of best eQTL genotype data E(A), each row of which is the best eQTL of the corresponding row of t. Data is in matrix format. Element [i,j] is the genotype value of the best eQTL of gene i of sample j, and should be among values 0,1,...,na. The matrix has dimension (nt,ns).
}
\item{dt}{
Input matrix of expression levels of A in matrix format. Element [i,j] is the expression level of gene i of sample j. The matrix has dimension (nt,ns).
}
\item{dt2}{
Input matrix of expression levels of B in matrix format. Element [i,j] is the expression level of gene i of sample j. The matrix has dimension (nt2,ns)
}
\item{na}{
Number of alleles for the species considered. This constrains every genotype data to be among 0,1,...,na. If unspecified (NULL), na is automatically determined as the maximum value of dg.
}
}
\value{
\item{p1}{Output vector of p-values of test 1, E(A)->A (alternative) versus E(A) A (null), in array format. Element [i] is the probability of best eQTL of gene i regulates gene i. The vector has dimension (nt).}
\item{p2}{Output matrix of p-values of test 2, E(A)->A- - -B with E(A)->B (alternative) versus E(A)->A<-B (null), in matrix format. Element [i,j] is the probability of alternative hypothesis for A = gene i and B = gene j. The matrix has dimension (nt,nt2).}
\item{p3}{Output matrix of p-values of test 3, E(A)->A->B (null) versus E(A)->A- - -B with E(A)->B (alternative), in matrix format. Element [i,j] is the probability of null hypothesis for A = gene i and B = gene j. The matrix has dimension (nt,nt2).}
\item{p4}{Output matrix of p-values of test 4, E(A)->A- - -B with E(A)->B (alternative) versus E(A)->A B, in matrix format. Element [i,j] is the probability of alternative hypothesis for A = gene i and B = gene j. The matrix has dimension (nt,nt2).}
\item{p5}{Output matrix of p-values of test 5, E(A)->A- - -B with E(A)->B (alternative) versus B<-E(A)->A, in matrix format. Element [i,j] is the probability of alternative hypothesis for A = gene i and B = gene j. The matrix has dimension (nt,nt2).}
}
\note{
The methodology is to first calculate the log likelihood ratio (LLR) for each test, and then convert the LLRs into p-values.
nt: Number of genes for A.
nt2: Number of genes for B.
ns: Number of samples.
}
\seealso{
findr.pijs_gassist
findr.pijs_cassist_pv
findr.pij_rank_pv
}
\examples{
library(findr)
data(geuvadis)
ans=findr.pijs_gassist_pv(geuvadis$dg,geuvadis$dmi,geuvadis$dt2)
}
|
/findr/man/findr.pijs_gassist_pv.Rd
|
no_license
|
junghyunJJ/findr-R
|
R
| false
| false
| 3,838
|
rd
|
% Copyright 2016-2018 Lingfei Wang
%
% This file is part of Findr.
%
% Findr is free software: you can redistribute it and/or modify
% it under the terms of the GNU Affero General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% Findr is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU Affero General Public License for more details.
%
% You should have received a copy of the GNU Affero General Public License
% along with Findr. If not, see <http://www.gnu.org/licenses/>.
%
\name{findr.pijs_gassist_pv}
\alias{findr.pijs_gassist_pv}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Predict pairwise regulation p-values with expression and eQTL data
}
\description{
Inference of regulation E(A)->A->B p-values with expression data of A,B and best eQTL data for A as E(A). For highest precision, the user should not pre-select secondary target genes (B), but instead include as many B's of the same type as possible, and pick the gene pairs of interest only after Findr's calculation.
}
\usage{
findr.pijs_gassist_pv(dg, dt, dt2, na = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dg}{
Input matrix of best eQTL genotype data E(A), each row of which is the best eQTL of the corresponding row of t. Data is in matrix format. Element [i,j] is the genotype value of the best eQTL of gene i of sample j, and should be among values 0,1,...,na. The matrix has dimension (nt,ns).
}
\item{dt}{
Input matrix of expression levels of A in matrix format. Element [i,j] is the expression level of gene i of sample j. The matrix has dimension (nt,ns).
}
\item{dt2}{
Input matrix of expression levels of B in matrix format. Element [i,j] is the expression level of gene i of sample j. The matrix has dimension (nt2,ns)
}
\item{na}{
Number of alleles for the species considered. This constrains every genotype data to be among 0,1,...,na. If unspecified (NULL), na is automatically determined as the maximum value of dg.
}
}
\value{
\item{p1}{Output vector of p-values of test 1, E(A)->A (alternative) versus E(A) A (null), in array format. Element [i] is the probability of best eQTL of gene i regulates gene i. The vector has dimension (nt).}
\item{p2}{Output matrix of p-values of test 2, E(A)->A- - -B with E(A)->B (alternative) versus E(A)->A<-B (null), in matrix format. Element [i,j] is the probability of alternative hypothesis for A = gene i and B = gene j. The matrix has dimension (nt,nt2).}
\item{p3}{Output matrix of p-values of test 3, E(A)->A->B (null) versus E(A)->A- - -B with E(A)->B (alternative), in matrix format. Element [i,j] is the probability of null hypothesis for A = gene i and B = gene j. The matrix has dimension (nt,nt2).}
\item{p4}{Output matrix of p-values of test 4, E(A)->A- - -B with E(A)->B (alternative) versus E(A)->A B, in matrix format. Element [i,j] is the probability of alternative hypothesis for A = gene i and B = gene j. The matrix has dimension (nt,nt2).}
\item{p5}{Output matrix of p-values of test 5, E(A)->A- - -B with E(A)->B (alternative) versus B<-E(A)->A, in matrix format. Element [i,j] is the probability of alternative hypothesis for A = gene i and B = gene j. The matrix has dimension (nt,nt2).}
}
\note{
The methodology is to first calculate the log likelihood ratio (LLR) for each test, and then convert the LLRs into p-values.
nt: Number of genes for A.
nt2: Number of genes for B.
ns: Number of samples.
}
\seealso{
findr.pijs_gassist
findr.pijs_cassist_pv
findr.pij_rank_pv
}
\examples{
library(findr)
data(geuvadis)
ans=findr.pijs_gassist_pv(geuvadis$dg,geuvadis$dmi,geuvadis$dt2)
}
|
\name{kernel2dmeitsjer}
\alias{kernel2dmeitsjer}
\title{
Create a Kernel Matrix
}
\description{
Create a kernel matrix to be used in a 2-D convolution smooth (e.g., using kernel2dsmooth).
}
\usage{
kernel2dmeitsjer(type = "gauss", ...)
}
\arguments{
\item{type}{
character name of the kernel to be created.
}
\item{\dots}{
Other arguments to the specific kernel type. See Details section below.
}
}
\details{
The specific types of kernels that can be made are as follows. In each case, h=||x-x_c|| is the distance from the center of the kernel. Every kernel that requires single numerics nx and ny be specified returns an nx by ny metrix. Distances h are found by setting up a grid based on 1 to nx and 1 to ny denoting the points as (xgrid, ygrid), finding the center of the grid as (x.center, y.center)=(nx/2,ny/2), and then h = sqrt( (xgrid - x.center)^2 + (ygrid - y.center)^2). For kernels that better reflect distance (e.g., using great-circle distance, anisotropic distances, etc.), the matrix h can be passed instead of nx and ny, but only for those kernels that take h as an argument. In each case with sigma as an argument, sigma is the smoothing parameter. There are many kernel functions allowed here, and not all of them make sense for every purpose.
\dQuote{average} gives a kernel that will give an average of the nearest neighbors in each direction (can take an average grid points further in the x- direction than the y-direction, and vice versa). Requires that nx and ny be specified, and the resulting kernel is defined by an nx by ny matrix with each element equal to 1/(nx*ny). If nx = ny, then the result is the same as the boxcar kernel below.
\dQuote{boxcar} the boxcar kernel is an n by n matrix of 1/n^2. This results in a neighborhood smoothing when used with kernel2dsmooth giving the type of smoothed fields utilized, e.g., in Roberts and Lean (2008) and Ebert (2008). Requires that n be specified. Note that usually the boxcar is a square matrix of ones, which gives the sum of the nearest n^2 grid points. This gives the average.
\dQuote{cauchy} The Cauchy kernel is given by K(sigma)=1/(1+h^2/sigma). Requires the arguments nx, ny and sigma. See Souza (2010) for more details.
\dQuote{disk} gives a circular averaging (pill-box) kernel (aka, disk kernel). Similar to \dQuote{average} or \dQuote{boxcar}, but this kernel accounts for a specific distance in all directions from the center (i.e., an average of grid squares within a circular radius of the central point). This results in the convolution radius smoothing applied in Davis et al. (2006a,2006b). Requires that r (the desired radius) be supplied, and a square matrix of appropriate dimension is returned.
\dQuote{epanechnikov} The Epanechnikov kernel is defined by max(0, 3/4*(1-h/(sigma^2))). See, e.g., Hastie and Tibshirani (1990). Requires arguments nx, ny, and sigma.
\dQuote{exponential} The exponential kernel is given by K(sigma) = a*exp(-h/(2*sigma)). Requires the arguments nx, ny and sigma, and optionally takes the argument a (default is a=1). An nx by ny matrix is returned. See Souza (2010) for more details.
\dQuote{gauss} The Gaussian kernel defined by K(sigma) = 1/(2*pi*sigma^2)*exp(-h/(2*sigma)). Requires the arguments nx, ny and sigma be specified. The convolution with this kernel results in a Gaussian smoothed field as used in the practically perfect hindcast method of Brooks et al. (1998) (see also Ebert 2008) and studied by Sobash et al (2011) for spatial forecast verification purposes. Returns an nx by ny matrix.
\dQuote{laplacian} Laplacian Operator kernel, which gives the sum of second partial derivatives for each direction. It is often used for edge detection because it identifies areas of rapid intensity change. Typically, it is first applied to a field that has been smoothed first by a Gaussian kernel smoother (or an approximation thereof; cf. type \dQuote{LoG} below). This method optionally the parameter alpha, which controls the shape of the Laplacian kernel, which must be between 0 and 1 (inclusive), or else it will be set to 0 (if < 0) or 1 (if > 1). Returns a 3 by 3 kernel matrix.
\dQuote{LoG} Laplacian of Gaussian kernel. This combines the Laplacian Operator kernel with that of a Gaussian kernel. The form is given by K(sigma) = -1/(pi*sigma^4)*exp(-h/(2*sigma^2))*(1-h/(2*sigma^2)). Requires the arguments nx, ny and sigma be specified. Returns an nx by ny matrix.
\dQuote{minvar} A minimum variance kernel, which is given by 3/8*(3 - 5*h/sigma^2) if h <= 1, and zero otherwise (see, e.g., Hastie and Tibshirani, 1990). Requires the arguments nx, ny, and sigma be specified. Returns an nx by ny matrix.
\dQuote{multiquad} The multiquadratic kernel is similar to the rational quadratic kernel, and is given by K(a) = sqrt(h + a^2). The inverse is given by 1/K(a). Requires the arguments nx, ny and a be specified. Optionally takes a logical named inverse determining whether to return the inverse multiquadratic kernel or not.
\dQuote{prewitt} Prewitt filter kernel, which emphasizes horizontal (vertical) edges through approximation of a vertical (horizontal) gradient. Optionally takes a logical argument named transpose, which if FALSE (default) emphasis is on horizontal, and if TRUE emphasis is on vertical. Returns a 3 by 3 matrix whose first row is all ones, second row is all zeros, and third row is all negative ones for the transpose=FALSE case, and the transpose of this matrix in the transpose=TRUE case.
\dQuote{power} The power kernel is defined by K(p) = -h^p. The log power kernel is similarly defined as K(p) = -log(h^p+1). Requires specification of the arguments nx, ny and p. Alternatively takes the logical do.log to determine whether the log power kernel should be returned (TRUE) or not (FALSE). Default if not passed is to do the power kernel. Returns an nx by ny matrix. See Souza (2010) for more details.
\dQuote{radial} The radial kernel is returns a*|h|^(2*m-d)*log(|h|) if d is even and a*|h|^(2*m-d) otherwise. Requires arguments a, m, d nx and ny. Replaces any missing values with zero.
\dQuote{ratquad} The rational quadratic kernel is used as an alternative to the Gaussian, and is given by K(a) = 1 - h/(h+a). Requires the arguments nx, ny and a, and returns an nx by ny matrix. See Souza (2010) for more details.
\dQuote{sobel} Same as prewitt, except that the elements 1,2 and 3,2 are replaced by two and neative two, resp.
\dQuote{student} The generalized Student's t kernel is defined by K(p)=1/(1+h^p). Requires the arguments nx, ny and p be specified. Returns an nx by ny matrix. See Souza (2010) for more details.
\dQuote{unsharp} Unsharp contrast enhancement filter. This is simply given by a 3 by 3 matrix of al zeros, except for a one in the center subtracted by a laplacian operator kernel matrix. Requires the same arguments as for \dQuote{laplacian}. Returns a 3 by 3 matrix.
\dQuote{wave} The wave kernel is defined by K(phi) = phi/h * sin( h/phi). Requires arguments nx, ny and phi be specified. Returns an nx by ny matrix.
\dQuote{oval} The oval kernel is like it sounds, it yields an oval-shaped kernel. Allows arguments a (scale in x-direction), b (scale in y-direction), n (size of kernel in x-direction) and m (size of kernel in y-direction). Default for a and b is 1, and default for n is the maximum of a and b plus two. The default for m is to be identical to n.
}
\value{
matrix of dimension determined by the specific type of kernel, and possibly user passed arguments giving the kernel to be used by \code{kernel2dsmooth}.
}
\references{
Brooks, H. E., Kay, M., and Hart, J. A. (1998) Objective limits on forecasting skill of rare events. \emph{19th Conf. Severe Local Storms}. Amer. Met. Soc., 552--555.
Davis, C. A., Brown, B. G. and Bullock, R. G. (2006a) Object-based verification of precipitation forecasts, Part I: Methodology and application to mesoscale rain areas. \emph{Mon. Wea. Rev.}, \bold{134}, 1772--1784.
Davis, C. A., Brown, B. G. and Bullock, R. G. (2006b) Object-based verification of precipitation forecasts, Part II: Application to convective rain systems. \emph{Mon. Wea. Rev.}, \bold{134}, 1785--1795.
Ebert, E. E. (2008) Fuzzy verification of high resolution gridded forecasts: A review and proposed framework. \emph{Meteorol. Appl.}, \bold{15}, 51-64. \doi{10.1002/met.25}.
Hastie, T. J. and Tibshirani, R. J. (1990) \emph{Generalized Additive Models}. Chapman \& Hall/CRC Monographs on Statistics and Applied Probability 43, 335pp.
Roberts, N. M. and Lean, H. W. (2008) Scale-selective verification of rainfall accumulations from high-resolution forecasts of convective events. \emph{Mon. Wea. Rev.}, \bold{136}, 78--97. \doi{10.1175/2007MWR2123.1}.
Sobash, R. A., Kain, J. S. Bright, D. R. Dean, A. R. Coniglio, M. C. and Weiss, S. J. (2011) Probabilistic forecast guidance for severe thunderstorms based on the identification of extreme phenomena in convection-allowing model forecasts. \emph{Wea. Forecasting}, \bold{26}, 714--728.
Souza, C. R. (2010) \emph{Kernel Functions for Machine Learning Applications}. 17 Mar 2010. Web. \url{http://crsouza.blogspot.com/2010/03/kernel-functions-for-machine-learning.html}.
}
\author{
Eric Gilleland
}
\seealso{
\code{\link{fft}}, \code{\link{kernel2dsmooth}}
}
\examples{
x <- matrix( 0, 10, 12)
x[4,5] <- 1
kmat <- kernel2dmeitsjer( "average", nx=7, ny=5)
kernel2dsmooth( x, K=kmat)
##
## Can also call 'kernel2dsmooth' directly.
##
kernel2dsmooth( x, kernel.type="boxcar", n=5)
kernel2dsmooth( x, kernel.type="cauchy", sigma=20, nx=10, ny=12)
kernel2dsmooth( x, kernel.type="disk", r=3)
kernel2dsmooth( x, kernel.type="epanechnikov", nx=10, ny=12, sigma=4)
kernel2dsmooth( x, kernel.type="exponential", a=0.1, sigma=4, nx=10, ny=12)
kernel2dsmooth( x, kernel.type="gauss", nx=10, ny=12, sigma=4)
kernel2dsmooth( x, kernel.type="laplacian", alpha=0)
kernel2dsmooth( x, kernel.type="LoG", nx=10, ny=12, sigma=1)
kernel2dsmooth( x, kernel.type="minvar", nx=10, ny=12, sigma=4)
kernel2dsmooth( x, kernel.type="multiquad", a=0.1, nx=10, ny=12)
kernel2dsmooth( x, kernel.type="power", p=0.5, nx=10, ny=12)
kernel2dsmooth( x, kernel.type="prewitt")
kernel2dsmooth( x, kernel.type="prewitt", transpose=TRUE)
kernel2dsmooth( x, kernel.type="radial", a=1, m=2, d=1, nx=10, ny=12)
kernel2dsmooth( x, kernel.type="ratquad", a=0.1, nx=10, ny=12)
kernel2dsmooth( x, kernel.type="sobel")
kernel2dsmooth( x, kernel.type="sobel", transpose=TRUE)
kernel2dsmooth( x, kernel.type="student", p=1.5, nx=10, ny=12)
kernel2dsmooth( x, kernel.type="unsharp", alpha=0)
kernel2dsmooth( x, kernel.type="wave", phi=45, nx=10, ny=12)
\dontrun{
## the lennon image is in package 'fields'.
data(lennon)
kmat <- kernel2dmeitsjer( "average", nx=7, ny=5)
lennon.smAvg <- kernel2dsmooth( lennon, K=kmat)
## Can also just make a call to kernel2dsmooth, which
## will call this function.
lennon.smBox <- kernel2dsmooth( lennon, kernel.type="boxcar", n=7)
lennon.smDsk <- kernel2dsmooth( lennon, kernel.type="disk", r=5)
par( mfrow=c(2,2), mar=rep(0.1,4))
image.plot( lennon, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smAvg, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smBox, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smDsk, col=tim.colors(256), axes=FALSE)
lennon.smEpa <- kernel2dsmooth( lennon, kernel.type="epanechnikov", nx=10, ny=10, sigma=20)
lennon.smGau <- kernel2dsmooth( lennon, kernel.type="gauss", nx=10, ny=10, sigma=20)
lennon.smMvr <- kernel2dsmooth( lennon, kernel.type="minvar", nx=10, ny=10, sigma=20)
par( mfrow=c(2,2), mar=rep(0.1,4))
image.plot( lennon, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smEpa, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smGau, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smMvr, col=tim.colors(256), axes=FALSE)
lennon.smLa0 <- kernel2dsmooth( lennon, kernel.type="laplacian", alpha=0)
lennon.smLap <- kernel2dsmooth( lennon, kernel.type="laplacian", alpha=0.999)
lennon.smLoG <- kernel2dsmooth( lennon, kernel.type="LoG", nx=10, ny=10, sigma=20)
par( mfrow=c(2,2), mar=rep(0.1,4))
image.plot( lennon, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smLa0, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smLap, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smLoG, col=tim.colors(256), axes=FALSE)
lennon.smPrH <- kernel2dsmooth( lennon, kernel.type="prewitt")
lennon.smPrV <- kernel2dsmooth( lennon, kernel.type="prewitt", transpose=TRUE)
lennon.smSoH <- kernel2dsmooth( lennon, kernel.type="sobel")
lennon.smSoV <- kernel2dsmooth( lennon, kernel.type="sobel", transpose=TRUE)
par( mfrow=c(2,2), mar=rep(0.1,4))
image.plot( lennon.smPrH, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smPrV, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smSoH, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smSoV, col=tim.colors(256), axes=FALSE)
lennon.smUsh <- kernel2dsmooth( lennon, kernel.type="unsharp", alpha=0.999)
par( mfrow=c(2,1), mar=rep(0.1,4))
image.plot( lennon, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smUsh, col=tim.colors(256), axes=FALSE)
lennon.smRad1 <- kernel2dsmooth( lennon, kernel.type="radial", a=2, m=2, d=1, nx=10, ny=10)
lennon.smRad2 <- kernel2dsmooth( lennon, kernel.type="radial", a=2, m=2, d=2, nx=10, ny=10)
par( mfrow=c(2,1), mar=rep(0.1,4))
image.plot( lennon.smRad1, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smRad2, col=tim.colors(256), axes=FALSE)
lennon.smRQd <- kernel2dsmooth( lennon, kernel.type="ratquad", a=0.5, nx=10, ny=10)
lennon.smExp <- kernel2dsmooth( lennon, kernel.type="exponential", a=0.5, sigma=20, nx=10, ny=10)
lennon.smMQd <- kernel2dsmooth( lennon, kernel.type="multiquad", a=0.5, nx=10, ny=10)
par( mfrow=c(2,2), mar=rep(0.1,4))
image.plot( lennon.smGau, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smRQd, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smExp, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smMQd, col=tim.colors(256), axes=FALSE)
lennon.smIMQ <- kernel2dsmooth( lennon, kernel.type="multiquad", a=0.5, nx=10, ny=10, inverse=TRUE)
par( mfrow=c(2,1), mar=rep(0.1,4))
image.plot( lennon.smMQd, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smIMQ, col=tim.colors(256), axes=FALSE)
lennon.smWav <- kernel2dsmooth( lennon, kernel.type="wave", phi=45, nx=10, ny=10)
par( mfrow=c(1,1), mar=rep(0.1,4))
image.plot( lennon.smWav, col=tim.colors(256), axes=FALSE)
lennon.smPow <- kernel2dsmooth( lennon, kernel.type="power", p=0.5, nx=10, ny=10)
lennon.smLpw <- kernel2dsmooth( lennon, kernel.type="power", p=0.5, nx=10, ny=10, do.log=TRUE)
par( mfrow=c(2,1), mar=rep(0.1,4))
image.plot( lennon.smPow, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smLpw, col=tim.colors(256), axes=FALSE)
lennon.smCau <- kernel2dsmooth( lennon, kernel.type="cauchy", sigma=20, nx=10, ny=10)
lennon.smStd <- kernel2dsmooth( lennon, kernel.type="student", p=1.5, nx=10, ny=10)
par( mfrow=c(2,1), mar=rep(0.1,4))
image.plot( lennon.smCau, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smStd, col=tim.colors(256), axes=FALSE)
image.plot( lennon, kernel.type = "oval", n = 10, m = 15, a = 6, b = 3 )
}
}
\keyword{ math }
\keyword{ manip }
|
/man/kernel2dmeitsjer.Rd
|
no_license
|
cran/smoothie
|
R
| false
| false
| 15,212
|
rd
|
\name{kernel2dmeitsjer}
\alias{kernel2dmeitsjer}
\title{
Create a Kernel Matrix
}
\description{
Create a kernel matrix to be used in a 2-D convolution smooth (e.g., using kernel2dsmooth).
}
\usage{
kernel2dmeitsjer(type = "gauss", ...)
}
\arguments{
\item{type}{
character name of the kernel to be created.
}
\item{\dots}{
Other arguments to the specific kernel type. See Details section below.
}
}
\details{
The specific types of kernels that can be made are as follows. In each case, h=||x-x_c|| is the distance from the center of the kernel. Every kernel that requires single numerics nx and ny be specified returns an nx by ny metrix. Distances h are found by setting up a grid based on 1 to nx and 1 to ny denoting the points as (xgrid, ygrid), finding the center of the grid as (x.center, y.center)=(nx/2,ny/2), and then h = sqrt( (xgrid - x.center)^2 + (ygrid - y.center)^2). For kernels that better reflect distance (e.g., using great-circle distance, anisotropic distances, etc.), the matrix h can be passed instead of nx and ny, but only for those kernels that take h as an argument. In each case with sigma as an argument, sigma is the smoothing parameter. There are many kernel functions allowed here, and not all of them make sense for every purpose.
\dQuote{average} gives a kernel that will give an average of the nearest neighbors in each direction (can take an average grid points further in the x- direction than the y-direction, and vice versa). Requires that nx and ny be specified, and the resulting kernel is defined by an nx by ny matrix with each element equal to 1/(nx*ny). If nx = ny, then the result is the same as the boxcar kernel below.
\dQuote{boxcar} the boxcar kernel is an n by n matrix of 1/n^2. This results in a neighborhood smoothing when used with kernel2dsmooth giving the type of smoothed fields utilized, e.g., in Roberts and Lean (2008) and Ebert (2008). Requires that n be specified. Note that usually the boxcar is a square matrix of ones, which gives the sum of the nearest n^2 grid points. This gives the average.
\dQuote{cauchy} The Cauchy kernel is given by K(sigma)=1/(1+h^2/sigma). Requires the arguments nx, ny and sigma. See Souza (2010) for more details.
\dQuote{disk} gives a circular averaging (pill-box) kernel (aka, disk kernel). Similar to \dQuote{average} or \dQuote{boxcar}, but this kernel accounts for a specific distance in all directions from the center (i.e., an average of grid squares within a circular radius of the central point). This results in the convolution radius smoothing applied in Davis et al. (2006a,2006b). Requires that r (the desired radius) be supplied, and a square matrix of appropriate dimension is returned.
\dQuote{epanechnikov} The Epanechnikov kernel is defined by max(0, 3/4*(1-h/(sigma^2))). See, e.g., Hastie and Tibshirani (1990). Requires arguments nx, ny, and sigma.
\dQuote{exponential} The exponential kernel is given by K(sigma) = a*exp(-h/(2*sigma)). Requires the arguments nx, ny and sigma, and optionally takes the argument a (default is a=1). An nx by ny matrix is returned. See Souza (2010) for more details.
\dQuote{gauss} The Gaussian kernel defined by K(sigma) = 1/(2*pi*sigma^2)*exp(-h/(2*sigma)). Requires the arguments nx, ny and sigma be specified. The convolution with this kernel results in a Gaussian smoothed field as used in the practically perfect hindcast method of Brooks et al. (1998) (see also Ebert 2008) and studied by Sobash et al (2011) for spatial forecast verification purposes. Returns an nx by ny matrix.
\dQuote{laplacian} Laplacian Operator kernel, which gives the sum of second partial derivatives for each direction. It is often used for edge detection because it identifies areas of rapid intensity change. Typically, it is first applied to a field that has been smoothed first by a Gaussian kernel smoother (or an approximation thereof; cf. type \dQuote{LoG} below). This method optionally the parameter alpha, which controls the shape of the Laplacian kernel, which must be between 0 and 1 (inclusive), or else it will be set to 0 (if < 0) or 1 (if > 1). Returns a 3 by 3 kernel matrix.
\dQuote{LoG} Laplacian of Gaussian kernel. This combines the Laplacian Operator kernel with that of a Gaussian kernel. The form is given by K(sigma) = -1/(pi*sigma^4)*exp(-h/(2*sigma^2))*(1-h/(2*sigma^2)). Requires the arguments nx, ny and sigma be specified. Returns an nx by ny matrix.
\dQuote{minvar} A minimum variance kernel, which is given by 3/8*(3 - 5*h/sigma^2) if h <= 1, and zero otherwise (see, e.g., Hastie and Tibshirani, 1990). Requires the arguments nx, ny, and sigma be specified. Returns an nx by ny matrix.
\dQuote{multiquad} The multiquadratic kernel is similar to the rational quadratic kernel, and is given by K(a) = sqrt(h + a^2). The inverse is given by 1/K(a). Requires the arguments nx, ny and a be specified. Optionally takes a logical named inverse determining whether to return the inverse multiquadratic kernel or not.
\dQuote{prewitt} Prewitt filter kernel, which emphasizes horizontal (vertical) edges through approximation of a vertical (horizontal) gradient. Optionally takes a logical argument named transpose, which if FALSE (default) emphasis is on horizontal, and if TRUE emphasis is on vertical. Returns a 3 by 3 matrix whose first row is all ones, second row is all zeros, and third row is all negative ones for the transpose=FALSE case, and the transpose of this matrix in the transpose=TRUE case.
\dQuote{power} The power kernel is defined by K(p) = -h^p. The log power kernel is similarly defined as K(p) = -log(h^p+1). Requires specification of the arguments nx, ny and p. Alternatively takes the logical do.log to determine whether the log power kernel should be returned (TRUE) or not (FALSE). Default if not passed is to do the power kernel. Returns an nx by ny matrix. See Souza (2010) for more details.
\dQuote{radial} The radial kernel is returns a*|h|^(2*m-d)*log(|h|) if d is even and a*|h|^(2*m-d) otherwise. Requires arguments a, m, d nx and ny. Replaces any missing values with zero.
\dQuote{ratquad} The rational quadratic kernel is used as an alternative to the Gaussian, and is given by K(a) = 1 - h/(h+a). Requires the arguments nx, ny and a, and returns an nx by ny matrix. See Souza (2010) for more details.
\dQuote{sobel} Same as prewitt, except that the elements 1,2 and 3,2 are replaced by two and neative two, resp.
\dQuote{student} The generalized Student's t kernel is defined by K(p)=1/(1+h^p). Requires the arguments nx, ny and p be specified. Returns an nx by ny matrix. See Souza (2010) for more details.
\dQuote{unsharp} Unsharp contrast enhancement filter. This is simply given by a 3 by 3 matrix of al zeros, except for a one in the center subtracted by a laplacian operator kernel matrix. Requires the same arguments as for \dQuote{laplacian}. Returns a 3 by 3 matrix.
\dQuote{wave} The wave kernel is defined by K(phi) = phi/h * sin( h/phi). Requires arguments nx, ny and phi be specified. Returns an nx by ny matrix.
\dQuote{oval} The oval kernel is like it sounds, it yields an oval-shaped kernel. Allows arguments a (scale in x-direction), b (scale in y-direction), n (size of kernel in x-direction) and m (size of kernel in y-direction). Default for a and b is 1, and default for n is the maximum of a and b plus two. The default for m is to be identical to n.
}
\value{
matrix of dimension determined by the specific type of kernel, and possibly user passed arguments giving the kernel to be used by \code{kernel2dsmooth}.
}
\references{
Brooks, H. E., Kay, M., and Hart, J. A. (1998) Objective limits on forecasting skill of rare events. \emph{19th Conf. Severe Local Storms}. Amer. Met. Soc., 552--555.
Davis, C. A., Brown, B. G. and Bullock, R. G. (2006a) Object-based verification of precipitation forecasts, Part I: Methodology and application to mesoscale rain areas. \emph{Mon. Wea. Rev.}, \bold{134}, 1772--1784.
Davis, C. A., Brown, B. G. and Bullock, R. G. (2006b) Object-based verification of precipitation forecasts, Part II: Application to convective rain systems. \emph{Mon. Wea. Rev.}, \bold{134}, 1785--1795.
Ebert, E. E. (2008) Fuzzy verification of high resolution gridded forecasts: A review and proposed framework. \emph{Meteorol. Appl.}, \bold{15}, 51-64. \doi{10.1002/met.25}.
Hastie, T. J. and Tibshirani, R. J. (1990) \emph{Generalized Additive Models}. Chapman \& Hall/CRC Monographs on Statistics and Applied Probability 43, 335pp.
Roberts, N. M. and Lean, H. W. (2008) Scale-selective verification of rainfall accumulations from high-resolution forecasts of convective events. \emph{Mon. Wea. Rev.}, \bold{136}, 78--97. \doi{10.1175/2007MWR2123.1}.
Sobash, R. A., Kain, J. S. Bright, D. R. Dean, A. R. Coniglio, M. C. and Weiss, S. J. (2011) Probabilistic forecast guidance for severe thunderstorms based on the identification of extreme phenomena in convection-allowing model forecasts. \emph{Wea. Forecasting}, \bold{26}, 714--728.
Souza, C. R. (2010) \emph{Kernel Functions for Machine Learning Applications}. 17 Mar 2010. Web. \url{http://crsouza.blogspot.com/2010/03/kernel-functions-for-machine-learning.html}.
}
\author{
Eric Gilleland
}
\seealso{
\code{\link{fft}}, \code{\link{kernel2dsmooth}}
}
\examples{
x <- matrix( 0, 10, 12)
x[4,5] <- 1
kmat <- kernel2dmeitsjer( "average", nx=7, ny=5)
kernel2dsmooth( x, K=kmat)
##
## Can also call 'kernel2dsmooth' directly.
##
kernel2dsmooth( x, kernel.type="boxcar", n=5)
kernel2dsmooth( x, kernel.type="cauchy", sigma=20, nx=10, ny=12)
kernel2dsmooth( x, kernel.type="disk", r=3)
kernel2dsmooth( x, kernel.type="epanechnikov", nx=10, ny=12, sigma=4)
kernel2dsmooth( x, kernel.type="exponential", a=0.1, sigma=4, nx=10, ny=12)
kernel2dsmooth( x, kernel.type="gauss", nx=10, ny=12, sigma=4)
kernel2dsmooth( x, kernel.type="laplacian", alpha=0)
kernel2dsmooth( x, kernel.type="LoG", nx=10, ny=12, sigma=1)
kernel2dsmooth( x, kernel.type="minvar", nx=10, ny=12, sigma=4)
kernel2dsmooth( x, kernel.type="multiquad", a=0.1, nx=10, ny=12)
kernel2dsmooth( x, kernel.type="power", p=0.5, nx=10, ny=12)
kernel2dsmooth( x, kernel.type="prewitt")
kernel2dsmooth( x, kernel.type="prewitt", transpose=TRUE)
kernel2dsmooth( x, kernel.type="radial", a=1, m=2, d=1, nx=10, ny=12)
kernel2dsmooth( x, kernel.type="ratquad", a=0.1, nx=10, ny=12)
kernel2dsmooth( x, kernel.type="sobel")
kernel2dsmooth( x, kernel.type="sobel", transpose=TRUE)
kernel2dsmooth( x, kernel.type="student", p=1.5, nx=10, ny=12)
kernel2dsmooth( x, kernel.type="unsharp", alpha=0)
kernel2dsmooth( x, kernel.type="wave", phi=45, nx=10, ny=12)
\dontrun{
## the lennon image is in package 'fields'.
data(lennon)
kmat <- kernel2dmeitsjer( "average", nx=7, ny=5)
lennon.smAvg <- kernel2dsmooth( lennon, K=kmat)
## Can also just make a call to kernel2dsmooth, which
## will call this function.
lennon.smBox <- kernel2dsmooth( lennon, kernel.type="boxcar", n=7)
lennon.smDsk <- kernel2dsmooth( lennon, kernel.type="disk", r=5)
par( mfrow=c(2,2), mar=rep(0.1,4))
image.plot( lennon, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smAvg, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smBox, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smDsk, col=tim.colors(256), axes=FALSE)
lennon.smEpa <- kernel2dsmooth( lennon, kernel.type="epanechnikov", nx=10, ny=10, sigma=20)
lennon.smGau <- kernel2dsmooth( lennon, kernel.type="gauss", nx=10, ny=10, sigma=20)
lennon.smMvr <- kernel2dsmooth( lennon, kernel.type="minvar", nx=10, ny=10, sigma=20)
par( mfrow=c(2,2), mar=rep(0.1,4))
image.plot( lennon, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smEpa, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smGau, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smMvr, col=tim.colors(256), axes=FALSE)
lennon.smLa0 <- kernel2dsmooth( lennon, kernel.type="laplacian", alpha=0)
lennon.smLap <- kernel2dsmooth( lennon, kernel.type="laplacian", alpha=0.999)
lennon.smLoG <- kernel2dsmooth( lennon, kernel.type="LoG", nx=10, ny=10, sigma=20)
par( mfrow=c(2,2), mar=rep(0.1,4))
image.plot( lennon, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smLa0, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smLap, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smLoG, col=tim.colors(256), axes=FALSE)
lennon.smPrH <- kernel2dsmooth( lennon, kernel.type="prewitt")
lennon.smPrV <- kernel2dsmooth( lennon, kernel.type="prewitt", transpose=TRUE)
lennon.smSoH <- kernel2dsmooth( lennon, kernel.type="sobel")
lennon.smSoV <- kernel2dsmooth( lennon, kernel.type="sobel", transpose=TRUE)
par( mfrow=c(2,2), mar=rep(0.1,4))
image.plot( lennon.smPrH, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smPrV, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smSoH, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smSoV, col=tim.colors(256), axes=FALSE)
lennon.smUsh <- kernel2dsmooth( lennon, kernel.type="unsharp", alpha=0.999)
par( mfrow=c(2,1), mar=rep(0.1,4))
image.plot( lennon, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smUsh, col=tim.colors(256), axes=FALSE)
lennon.smRad1 <- kernel2dsmooth( lennon, kernel.type="radial", a=2, m=2, d=1, nx=10, ny=10)
lennon.smRad2 <- kernel2dsmooth( lennon, kernel.type="radial", a=2, m=2, d=2, nx=10, ny=10)
par( mfrow=c(2,1), mar=rep(0.1,4))
image.plot( lennon.smRad1, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smRad2, col=tim.colors(256), axes=FALSE)
lennon.smRQd <- kernel2dsmooth( lennon, kernel.type="ratquad", a=0.5, nx=10, ny=10)
lennon.smExp <- kernel2dsmooth( lennon, kernel.type="exponential", a=0.5, sigma=20, nx=10, ny=10)
lennon.smMQd <- kernel2dsmooth( lennon, kernel.type="multiquad", a=0.5, nx=10, ny=10)
par( mfrow=c(2,2), mar=rep(0.1,4))
image.plot( lennon.smGau, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smRQd, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smExp, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smMQd, col=tim.colors(256), axes=FALSE)
lennon.smIMQ <- kernel2dsmooth( lennon, kernel.type="multiquad", a=0.5, nx=10, ny=10, inverse=TRUE)
par( mfrow=c(2,1), mar=rep(0.1,4))
image.plot( lennon.smMQd, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smIMQ, col=tim.colors(256), axes=FALSE)
lennon.smWav <- kernel2dsmooth( lennon, kernel.type="wave", phi=45, nx=10, ny=10)
par( mfrow=c(1,1), mar=rep(0.1,4))
image.plot( lennon.smWav, col=tim.colors(256), axes=FALSE)
lennon.smPow <- kernel2dsmooth( lennon, kernel.type="power", p=0.5, nx=10, ny=10)
lennon.smLpw <- kernel2dsmooth( lennon, kernel.type="power", p=0.5, nx=10, ny=10, do.log=TRUE)
par( mfrow=c(2,1), mar=rep(0.1,4))
image.plot( lennon.smPow, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smLpw, col=tim.colors(256), axes=FALSE)
lennon.smCau <- kernel2dsmooth( lennon, kernel.type="cauchy", sigma=20, nx=10, ny=10)
lennon.smStd <- kernel2dsmooth( lennon, kernel.type="student", p=1.5, nx=10, ny=10)
par( mfrow=c(2,1), mar=rep(0.1,4))
image.plot( lennon.smCau, col=tim.colors(256), axes=FALSE)
image.plot( lennon.smStd, col=tim.colors(256), axes=FALSE)
image.plot( lennon, kernel.type = "oval", n = 10, m = 15, a = 6, b = 3 )
}
}
\keyword{ math }
\keyword{ manip }
|
# Deliverable 1
Mecha_car <- read.csv(file='MechaCar_mpg.csv',check.names=F,stringsAsFactors = F)
lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD,data=Mecha_car) #generated multiple linear regression model
summary(lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD,data=Mecha_car)) #generated summary statistics
# Deliverable 2
coil_table <- read.csv(file='Suspension_Coil.csv',check.names=F,stringsAsFactors = F)
total_summary <- coil_table %>% summarize(Mean=mean(PSI),Median=median(PSI),Variance=var(PSI),SD=sd(PSI),.groups="keep") #created summary table
lot_summary <- coil_table %>%
group_by(Manufacturing_Lot) %>%
summarize(Mean=mean(PSI),Median=median(PSI),Variance=var(PSI),SD=sd(PSI), .groups = 'keep') #creating summary table using groupby
# Deliverable 3
t.test(coil_table$PSI, mu=1500) #comparing sample versus population means
t.test(subset(coil_table,Manufacturing_Lot =='Lot1',select='PSI'),mu=1500) # comparing mean of lot 1
t.test(subset(coil_table,Manufacturing_Lot =='Lot2',select='PSI'),mu=1500) # comparing mean of lot 2
t.test(subset(coil_table,Manufacturing_Lot =='Lot3',select='PSI'),mu=1500) # comparing mean of lot 3
|
/MechaCarChallenge.R
|
no_license
|
Salmanbasharat/MechaCar_Statistical_Analysis
|
R
| false
| false
| 1,300
|
r
|
# Deliverable 1
Mecha_car <- read.csv(file='MechaCar_mpg.csv',check.names=F,stringsAsFactors = F)
lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD,data=Mecha_car) #generated multiple linear regression model
summary(lm(mpg ~ vehicle_length + vehicle_weight + spoiler_angle + ground_clearance + AWD,data=Mecha_car)) #generated summary statistics
# Deliverable 2
coil_table <- read.csv(file='Suspension_Coil.csv',check.names=F,stringsAsFactors = F)
total_summary <- coil_table %>% summarize(Mean=mean(PSI),Median=median(PSI),Variance=var(PSI),SD=sd(PSI),.groups="keep") #created summary table
lot_summary <- coil_table %>%
group_by(Manufacturing_Lot) %>%
summarize(Mean=mean(PSI),Median=median(PSI),Variance=var(PSI),SD=sd(PSI), .groups = 'keep') #creating summary table using groupby
# Deliverable 3
t.test(coil_table$PSI, mu=1500) #comparing sample versus population means
t.test(subset(coil_table,Manufacturing_Lot =='Lot1',select='PSI'),mu=1500) # comparing mean of lot 1
t.test(subset(coil_table,Manufacturing_Lot =='Lot2',select='PSI'),mu=1500) # comparing mean of lot 2
t.test(subset(coil_table,Manufacturing_Lot =='Lot3',select='PSI'),mu=1500) # comparing mean of lot 3
|
wav <-
function(we,me){
# Computes weighted average
#we =vector of weights
#me=vector of means
me<-as.matrix(me)
index<-!is.na(me)
wa<-(we[index] %*%me[index])/sum(we[index])
return(wa) }
|
/quint/R/wav.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 219
|
r
|
wav <-
function(we,me){
# Computes weighted average
#we =vector of weights
#me=vector of means
me<-as.matrix(me)
index<-!is.na(me)
wa<-(we[index] %*%me[index])/sum(we[index])
return(wa) }
|
\name{tree.ppp}
\alias{tree.ppp}
\docType{data}
\title{
Sample data for analizing the forest spatial structure.
}
\description{
A sample data of field survey, to help us understanding the method of forest spatial structure analysis.class is "ppp".
}
\usage{data("tree.ppp")}
\details{
The function "ppp" in the package spatstat
}
\references{
None
}
\examples{
data(tree.ppp)
}
|
/man/tree.ppp.Rd
|
no_license
|
cran/forestSAS
|
R
| false
| false
| 399
|
rd
|
\name{tree.ppp}
\alias{tree.ppp}
\docType{data}
\title{
Sample data for analizing the forest spatial structure.
}
\description{
A sample data of field survey, to help us understanding the method of forest spatial structure analysis.class is "ppp".
}
\usage{data("tree.ppp")}
\details{
The function "ppp" in the package spatstat
}
\references{
None
}
\examples{
data(tree.ppp)
}
|
library(lmtest)
library(parallel)
library(MAST)
library(MultiAssayExperiment)
library(SummarizedExperiment)
library(tidyverse)
library(data.table)
#################### This script contains wrappers used throughout other scripts, mostly to run
#################### various Gene Differential Expression methods
## Wrapper for logistic regression
do_logreg <- function(data, response, tx2gene, covariates = NULL) {
stopifnot(nrow(data) == length(tx2gene))
## Split into a list of matrix. Each element = matrix of (transcripts mapping to a
## given gene x cells)
y <- response
tx_split <- split_matrix(as.matrix(data), tx2gene)
tx_split <- tx_split[sort(unique(tx2gene))]
tx_split <- lapply(tx_split, t)
tx_split <- lapply(tx_split, cbind, y)
if (!is.null(colnames(covariates))) {
tx_split <- lapply(tx_split, cbind, covariates) ## Adding potential covariates (e.g. batch effect, CDR)
}
tx_split <- lapply(tx_split, as.data.frame)
## On each gene-wise matrix, run multivariate LR and report likelihood-ratio test
## p-value
regs <- mclapply(tx_split, function(x, y) {
fmla <- paste("y~", paste(setdiff(colnames(x), "y"), collapse = "+"), sep = "")
glm <- glm(fmla, data = x, family = binomial())
if (is.null(colnames(covariates))) {
nullmodel <- glm(x[, "y"] ~ 1, data = x, family = binomial())
} else {
fmla <- paste0("y~", paste0(colnames(covariates), collapse = "+"))
nullmodel <- glm(fmla, data = x, family = binomial())
}
p <- lrtest(glm, nullmodel)[2, 5]
}, y = y, mc.cores = getOption("mc.cores", 1L))
return(setNames(unlist(regs), names(tx_split)))
}
## Wrapper for MAST - Sidak data: sca=SingleCellAssay with all the annotations
do_mast_tx <- function(fmla, sca, LRT) {
## Make sure we are splitting the matrix using a character vector. Factors would
## lead in extremely slow behaviour.
rowData(sca)[, "gene_ensembl"] <- as.character(rowData(sca)[, "gene_ensembl"])
## Run MAST
zlmCond <- zlm(as.formula(fmla), sca)
summaryCond <- summary(zlmCond, doLRT = LRT) ## Get p-values for cell types at the transcript level
summaryDt <- summaryCond$datatable
## Extract results and reshape
fcHurdle <- merge(summaryDt[contrast == LRT & component == "H", .(primerid, `Pr(>Chisq)`)],
summaryDt[contrast == LRT & component == "logFC", .(primerid, coef, ci.hi,
ci.lo)], by = "primerid" # logFC coefficients
)
summaryDt_LRT <- subset(summaryDt, contrast == LRT & component %in% c("H", "D",
"C"))
summaryDt_LRT <- left_join(summaryDt_LRT, as_tibble(rowData(sca)), by = c(primerid = "transcript"))
summaryDt_LRT <- split(summaryDt_LRT, summaryDt_LRT$component)
## Sidak aggregation of p-values from transcripts to genes
summaryDt_LRT <- lapply(summaryDt_LRT, function(x) {
do.call(rbind, mclapply(split(x, as.character(x$gene_ensembl)), FUN = function(slice) {
n <- nrow(slice)
x <- slice[, "Pr(>Chisq)"]
x[x == 0] <- .Machine$double.xmin
res <- 1 - (1 - min(x))^n
if (res == 0) {
res <- n * min(x)
}
res <- data.frame(gene_ensembl = slice[1, "gene_ensembl"], sidak = res,
n = n, stringsAsFactors = FALSE)
res
}, mc.cores = getOption("mc.cores", 1L)))
})
fcHurdle$fdr <- p.adjust(fcHurdle$`Pr(>Chisq)`, method = "fdr")
fcHurdle <- left_join(fcHurdle, as_tibble(rowData(sca)), by = c(primerid = "transcript"))
res <- list()
res$tx <- setNames(fcHurdle$`Pr(>Chisq)`, fcHurdle$primerid) ## Save p-values MAST univariate on transcripts
res$fcHurdle <- fcHurdle
res$summaryDt <- summaryDt
res$summaryDt_LRT <- summaryDt_LRT
res$gn_list <- lapply(summaryDt_LRT, function(x) {
setNames(x[, "sidak"], x[, "gene_ensembl"])
})
## This should give identical result as the code that is directly below yet it
## outputs many NaN (Inf) when there is no apparent reason to. So we rewrote this
## in base R instead of tidyverse
## fcHurdle%>%as_tibble()%>%group_by(gene_ensembl)%>%summarise( sidak={
## x=get('Pr(>Chisq)') x[x==0]=.Machine$double.xmin res=1-(1-min(x))^n()
## if(res==0){ res=n*min(x) } res }, n=n() )->fcHurdle2
## Perform MAST univariate on genes
fcHurdle <- split(fcHurdle, as.character(fcHurdle$gene_ensembl))
fcHurdle2 <- lapply(fcHurdle, function(slice) {
n <- nrow(slice)
x <- slice[, "Pr(>Chisq)"]
x[x == 0] <- .Machine$double.xmin
res <- 1 - (1 - min(x))^n
if (res == 0) {
res <- n * min(x)
}
res <- data.frame(gene_ensembl = slice[1, "gene_ensembl"], sidak = res, n = n,
stringsAsFactors = FALSE)
})
fcHurdle2 <- do.call(rbind, fcHurdle2[order(names(fcHurdle2))])
res$gn <- setNames(fcHurdle2$sidak, fcHurdle2$gene_ensembl)
return(res)
}
## Not used. Would do MAST univariate on genes. We use do_mast_tx() instead and
## use the $tx element of the output to obtain the same result (although it is
## slower) do_mast_gn=function(fmla,sca,LRT){ ##data: SingleCellAssay with all the
## annotations zlmCond=zlm(as.formula(fmla),sca) summaryCond=summary(zlmCond,
## doLRT=LRT) summaryDt=summaryCond$datatable
## fcHurdle = merge( summaryDt[contrast==LRT & component=='H',.(primerid,
## `Pr(>Chisq)`)], #hurdle P values summaryDt[contrast==LRT & component=='logFC',
## .(primerid, coef, ci.hi, ci.lo)], by='primerid' #logFC coefficients )
## fcHurdle$fdr = p.adjust(fcHurdle$`Pr(>Chisq)`,method='fdr')
## return(list(gn=setNames(fcHurdle$`Pr(>Chisq)`,fcHurdle$primerid),fcHurdle=fcHurdle))
## }
## Sidak aggregation of p-values
sidak <- function(x) {
n <- length(x)
x[x == 0] <- .Machine$double.xmin
res <- 1 - (1 - min(x))^n
if (res == 0) {
res <- n * min(x)
}
res
}
do_t_tests <- function(response, data, t2g, covariates = NULL) {
if (missing(covariates)) {
covariates <- rep(1, length(response))
}
response <- as.factor(response)
aovs <- apply(data, 1, function(x, response, covariates) {
summary(aov(x ~ response + covariates))[[1]][[5]][1]
}, response = response, covariates = covariates)
res <- list()
res$tx <- aovs
res$tx[is.nan(res$tx)] <- 1
res$gn <- sapply(split(res$tx, t2g), sidak)
res
}
plot_gene <- function(sca) {
gene_data <- as.data.table(rowData(sca))
cell_data <- as.data.table(colData(sca))
gene_data[, `:=`(transcript, paste0(transcript, ifelse(de_transcript, " (DE)",
"")))]
rownames(sca) <- unlist(gene_data[, "transcript"])
dt <- data.table(assays(sca)$tpm, keep.rownames = "transcript")
dt_long <- suppressWarnings(melt(dt, variable.name = "Cell", value.name = "tpm"))
dt_long <- merge(gene_data, dt_long, by = "transcript")
dt_long <- merge(cell_data, dt_long, by = "Cell")
ggplot(dt_long, aes(x = paste0(sub("Batch", "B", Batch), "_", sub("Group", "G",
Group)), y = log10(1 + tpm))) + geom_violin() + geom_jitter() + facet_wrap(~transcript) +
theme_bw() + xlab(paste0(unique(gene_data$gene_ensembl), collapse = "/"))
}
## Adapted from
## https://github.com/pachterlab/NYMP_2018/blob/master/simulations/RSEM/R/roc_helpers.R
## should be slightly faster and also returns false positive rate
library(dplyr)
calculate_fdr <- function(true_DE, pvalues, title) {
df <- data.frame(true_DE = true_DE, pvalues = pvalues)
df <- df[order(df$pvalues), ]
total_positive <- sum(true_DE)
total_negative <- sum(!true_DE)
df <- df %>% group_by(pvalues) %>% summarise(p = sum(true_DE), n = sum(!true_DE),
l = n())
## print(head(df)) fdr <- sapply(seq(df$pvalues), function(i) sum(df$n[1:i]) /
## sum(df$l[1:i]))
fdr <- cumsum(df$n)/cumsum(df$l)
## sensitivity <- sapply(seq(df$pvalues), function(i)
## sum(df$p[1:i])/total_positive)
sensitivity <- cumsum(df$p)/total_positive
## falsepositiverate <- sapply(seq(df$pvalues), function(i)
## sum(df$n[1:i])/total_negative)
falsepositiverate <- cumsum(df$n)/total_negative
n_found <- sapply(seq(df$pvalues), function(i) sum(df$p[1:i]))
n_de <- sapply(seq(df$pvalues), function(i) sum(df$l[1:i]))
five <- min(which(df$pvalues > 0.05)) - 1
ten <- min(which(df$pvalues > 0.1)) - 1
twenty <- min(which(df$pvalues > 0.2)) - 1
list(fdr = fdr, sensitivity = sensitivity, n_found = n_found, n_de = n_de, pvalues = df$pvalues,
five = five, ten = ten, twenty = twenty, title = title, FalsePositiveRate = falsepositiverate)
}
#### Below are misc. functions used throughout the other scripts:
#' Aggregates matrix or data frame
#'
#' @param df A matrix or data frame
#' @param groups A vector of groups (discrete values)
#' @param fun A function that aggregates a vector into objects of length 1
#' @param margin If 1, aggregates rows, if 2 aggregates columns. Defaults to 1
#' @param ... passed to fun
#' @return A data.frame with aggregated rows or columns
#' @export
aggregate_df <- function(df, groups, fun = mean, margin = 1, ...) {
if (length(groups) != dim(df)[margin]) {
stop("Size of 'groups' vector is different that the one of the specified data margin")
}
if (is.data.frame(df)) {
if (margin == 2) {
df <- as.data.frame(t(df))
} else {
df <- as.data.frame(df)
}
df <- split(df, groups)
} else if (is.matrix(df)) {
df <- split_matrix(df, groups, byrow = margin == 1)
}
res <- do.call(rbind, lapply(df, function(x) {
apply(x, 2, fun, ...)
}))
if (margin == 2) {
return(t(res))
} else {
return(res)
}
}
#' Makes a color transparent
#' @export
#' @param colors A vector of colors as in `?col2rgb`
#' @param alpha transparency value (0=fully transparent, 255=fully opaque)
#'
## credit
## http://stackoverflow.com/questions/8047668/transparent-equivalent-of-given-color
makeTransparent <- function(colors, alpha = 255) {
sapply(colors, function(col) {
col <- col2rgb(col)
rgb(red = col[1, ], green = col[2, ], blue = col[3, ], alpha = alpha, maxColorValue = 255)
})
}
#' Fast splitting of matrix to list (avoids conversion to data.frame)
#' @export
split_matrix <- function(mat, vector, byrow = TRUE) {
if (byrow & nrow(mat) != length(vector)) {
stop("if byrow=TRUE, vector's length should have length nrow(mat)")
} else if (!byrow & ncol(mat) != length(vector)) {
!byrow & ncol(mat) != length(vector)
stop("if byrow=FALSE, vector's length should have length ncol(mat)")
}
if (byrow) {
levels <- split(1:nrow(mat), vector)
res <- lapply(levels, function(x) {
mat[x, , drop = FALSE]
})
} else {
levels <- split(1:ncol(mat), vector)
res <- lapply(levels, function(x) {
mat[, x, drop = FALSE]
})
}
res
}
|
/wrappers.R
|
permissive
|
GabrielHoffman/logistic_regresion_for_GDE
|
R
| false
| false
| 11,166
|
r
|
library(lmtest)
library(parallel)
library(MAST)
library(MultiAssayExperiment)
library(SummarizedExperiment)
library(tidyverse)
library(data.table)
#################### This script contains wrappers used throughout other scripts, mostly to run
#################### various Gene Differential Expression methods
## Wrapper for logistic regression
do_logreg <- function(data, response, tx2gene, covariates = NULL) {
stopifnot(nrow(data) == length(tx2gene))
## Split into a list of matrix. Each element = matrix of (transcripts mapping to a
## given gene x cells)
y <- response
tx_split <- split_matrix(as.matrix(data), tx2gene)
tx_split <- tx_split[sort(unique(tx2gene))]
tx_split <- lapply(tx_split, t)
tx_split <- lapply(tx_split, cbind, y)
if (!is.null(colnames(covariates))) {
tx_split <- lapply(tx_split, cbind, covariates) ## Adding potential covariates (e.g. batch effect, CDR)
}
tx_split <- lapply(tx_split, as.data.frame)
## On each gene-wise matrix, run multivariate LR and report likelihood-ratio test
## p-value
regs <- mclapply(tx_split, function(x, y) {
fmla <- paste("y~", paste(setdiff(colnames(x), "y"), collapse = "+"), sep = "")
glm <- glm(fmla, data = x, family = binomial())
if (is.null(colnames(covariates))) {
nullmodel <- glm(x[, "y"] ~ 1, data = x, family = binomial())
} else {
fmla <- paste0("y~", paste0(colnames(covariates), collapse = "+"))
nullmodel <- glm(fmla, data = x, family = binomial())
}
p <- lrtest(glm, nullmodel)[2, 5]
}, y = y, mc.cores = getOption("mc.cores", 1L))
return(setNames(unlist(regs), names(tx_split)))
}
## Wrapper for MAST - Sidak data: sca=SingleCellAssay with all the annotations
do_mast_tx <- function(fmla, sca, LRT) {
## Make sure we are splitting the matrix using a character vector. Factors would
## lead in extremely slow behaviour.
rowData(sca)[, "gene_ensembl"] <- as.character(rowData(sca)[, "gene_ensembl"])
## Run MAST
zlmCond <- zlm(as.formula(fmla), sca)
summaryCond <- summary(zlmCond, doLRT = LRT) ## Get p-values for cell types at the transcript level
summaryDt <- summaryCond$datatable
## Extract results and reshape
fcHurdle <- merge(summaryDt[contrast == LRT & component == "H", .(primerid, `Pr(>Chisq)`)],
summaryDt[contrast == LRT & component == "logFC", .(primerid, coef, ci.hi,
ci.lo)], by = "primerid" # logFC coefficients
)
summaryDt_LRT <- subset(summaryDt, contrast == LRT & component %in% c("H", "D",
"C"))
summaryDt_LRT <- left_join(summaryDt_LRT, as_tibble(rowData(sca)), by = c(primerid = "transcript"))
summaryDt_LRT <- split(summaryDt_LRT, summaryDt_LRT$component)
## Sidak aggregation of p-values from transcripts to genes
summaryDt_LRT <- lapply(summaryDt_LRT, function(x) {
do.call(rbind, mclapply(split(x, as.character(x$gene_ensembl)), FUN = function(slice) {
n <- nrow(slice)
x <- slice[, "Pr(>Chisq)"]
x[x == 0] <- .Machine$double.xmin
res <- 1 - (1 - min(x))^n
if (res == 0) {
res <- n * min(x)
}
res <- data.frame(gene_ensembl = slice[1, "gene_ensembl"], sidak = res,
n = n, stringsAsFactors = FALSE)
res
}, mc.cores = getOption("mc.cores", 1L)))
})
fcHurdle$fdr <- p.adjust(fcHurdle$`Pr(>Chisq)`, method = "fdr")
fcHurdle <- left_join(fcHurdle, as_tibble(rowData(sca)), by = c(primerid = "transcript"))
res <- list()
res$tx <- setNames(fcHurdle$`Pr(>Chisq)`, fcHurdle$primerid) ## Save p-values MAST univariate on transcripts
res$fcHurdle <- fcHurdle
res$summaryDt <- summaryDt
res$summaryDt_LRT <- summaryDt_LRT
res$gn_list <- lapply(summaryDt_LRT, function(x) {
setNames(x[, "sidak"], x[, "gene_ensembl"])
})
## This should give identical result as the code that is directly below yet it
## outputs many NaN (Inf) when there is no apparent reason to. So we rewrote this
## in base R instead of tidyverse
## fcHurdle%>%as_tibble()%>%group_by(gene_ensembl)%>%summarise( sidak={
## x=get('Pr(>Chisq)') x[x==0]=.Machine$double.xmin res=1-(1-min(x))^n()
## if(res==0){ res=n*min(x) } res }, n=n() )->fcHurdle2
## Perform MAST univariate on genes
fcHurdle <- split(fcHurdle, as.character(fcHurdle$gene_ensembl))
fcHurdle2 <- lapply(fcHurdle, function(slice) {
n <- nrow(slice)
x <- slice[, "Pr(>Chisq)"]
x[x == 0] <- .Machine$double.xmin
res <- 1 - (1 - min(x))^n
if (res == 0) {
res <- n * min(x)
}
res <- data.frame(gene_ensembl = slice[1, "gene_ensembl"], sidak = res, n = n,
stringsAsFactors = FALSE)
})
fcHurdle2 <- do.call(rbind, fcHurdle2[order(names(fcHurdle2))])
res$gn <- setNames(fcHurdle2$sidak, fcHurdle2$gene_ensembl)
return(res)
}
## Not used. Would do MAST univariate on genes. We use do_mast_tx() instead and
## use the $tx element of the output to obtain the same result (although it is
## slower) do_mast_gn=function(fmla,sca,LRT){ ##data: SingleCellAssay with all the
## annotations zlmCond=zlm(as.formula(fmla),sca) summaryCond=summary(zlmCond,
## doLRT=LRT) summaryDt=summaryCond$datatable
## fcHurdle = merge( summaryDt[contrast==LRT & component=='H',.(primerid,
## `Pr(>Chisq)`)], #hurdle P values summaryDt[contrast==LRT & component=='logFC',
## .(primerid, coef, ci.hi, ci.lo)], by='primerid' #logFC coefficients )
## fcHurdle$fdr = p.adjust(fcHurdle$`Pr(>Chisq)`,method='fdr')
## return(list(gn=setNames(fcHurdle$`Pr(>Chisq)`,fcHurdle$primerid),fcHurdle=fcHurdle))
## }
## Sidak aggregation of p-values
sidak <- function(x) {
n <- length(x)
x[x == 0] <- .Machine$double.xmin
res <- 1 - (1 - min(x))^n
if (res == 0) {
res <- n * min(x)
}
res
}
do_t_tests <- function(response, data, t2g, covariates = NULL) {
if (missing(covariates)) {
covariates <- rep(1, length(response))
}
response <- as.factor(response)
aovs <- apply(data, 1, function(x, response, covariates) {
summary(aov(x ~ response + covariates))[[1]][[5]][1]
}, response = response, covariates = covariates)
res <- list()
res$tx <- aovs
res$tx[is.nan(res$tx)] <- 1
res$gn <- sapply(split(res$tx, t2g), sidak)
res
}
plot_gene <- function(sca) {
gene_data <- as.data.table(rowData(sca))
cell_data <- as.data.table(colData(sca))
gene_data[, `:=`(transcript, paste0(transcript, ifelse(de_transcript, " (DE)",
"")))]
rownames(sca) <- unlist(gene_data[, "transcript"])
dt <- data.table(assays(sca)$tpm, keep.rownames = "transcript")
dt_long <- suppressWarnings(melt(dt, variable.name = "Cell", value.name = "tpm"))
dt_long <- merge(gene_data, dt_long, by = "transcript")
dt_long <- merge(cell_data, dt_long, by = "Cell")
ggplot(dt_long, aes(x = paste0(sub("Batch", "B", Batch), "_", sub("Group", "G",
Group)), y = log10(1 + tpm))) + geom_violin() + geom_jitter() + facet_wrap(~transcript) +
theme_bw() + xlab(paste0(unique(gene_data$gene_ensembl), collapse = "/"))
}
## Adapted from
## https://github.com/pachterlab/NYMP_2018/blob/master/simulations/RSEM/R/roc_helpers.R
## should be slightly faster and also returns false positive rate
library(dplyr)
calculate_fdr <- function(true_DE, pvalues, title) {
df <- data.frame(true_DE = true_DE, pvalues = pvalues)
df <- df[order(df$pvalues), ]
total_positive <- sum(true_DE)
total_negative <- sum(!true_DE)
df <- df %>% group_by(pvalues) %>% summarise(p = sum(true_DE), n = sum(!true_DE),
l = n())
## print(head(df)) fdr <- sapply(seq(df$pvalues), function(i) sum(df$n[1:i]) /
## sum(df$l[1:i]))
fdr <- cumsum(df$n)/cumsum(df$l)
## sensitivity <- sapply(seq(df$pvalues), function(i)
## sum(df$p[1:i])/total_positive)
sensitivity <- cumsum(df$p)/total_positive
## falsepositiverate <- sapply(seq(df$pvalues), function(i)
## sum(df$n[1:i])/total_negative)
falsepositiverate <- cumsum(df$n)/total_negative
n_found <- sapply(seq(df$pvalues), function(i) sum(df$p[1:i]))
n_de <- sapply(seq(df$pvalues), function(i) sum(df$l[1:i]))
five <- min(which(df$pvalues > 0.05)) - 1
ten <- min(which(df$pvalues > 0.1)) - 1
twenty <- min(which(df$pvalues > 0.2)) - 1
list(fdr = fdr, sensitivity = sensitivity, n_found = n_found, n_de = n_de, pvalues = df$pvalues,
five = five, ten = ten, twenty = twenty, title = title, FalsePositiveRate = falsepositiverate)
}
#### Below are misc. functions used throughout the other scripts:
#' Aggregates matrix or data frame
#'
#' @param df A matrix or data frame
#' @param groups A vector of groups (discrete values)
#' @param fun A function that aggregates a vector into objects of length 1
#' @param margin If 1, aggregates rows, if 2 aggregates columns. Defaults to 1
#' @param ... passed to fun
#' @return A data.frame with aggregated rows or columns
#' @export
aggregate_df <- function(df, groups, fun = mean, margin = 1, ...) {
if (length(groups) != dim(df)[margin]) {
stop("Size of 'groups' vector is different that the one of the specified data margin")
}
if (is.data.frame(df)) {
if (margin == 2) {
df <- as.data.frame(t(df))
} else {
df <- as.data.frame(df)
}
df <- split(df, groups)
} else if (is.matrix(df)) {
df <- split_matrix(df, groups, byrow = margin == 1)
}
res <- do.call(rbind, lapply(df, function(x) {
apply(x, 2, fun, ...)
}))
if (margin == 2) {
return(t(res))
} else {
return(res)
}
}
#' Makes a color transparent
#' @export
#' @param colors A vector of colors as in `?col2rgb`
#' @param alpha transparency value (0=fully transparent, 255=fully opaque)
#'
## credit
## http://stackoverflow.com/questions/8047668/transparent-equivalent-of-given-color
makeTransparent <- function(colors, alpha = 255) {
sapply(colors, function(col) {
col <- col2rgb(col)
rgb(red = col[1, ], green = col[2, ], blue = col[3, ], alpha = alpha, maxColorValue = 255)
})
}
#' Fast splitting of matrix to list (avoids conversion to data.frame)
#' @export
split_matrix <- function(mat, vector, byrow = TRUE) {
if (byrow & nrow(mat) != length(vector)) {
stop("if byrow=TRUE, vector's length should have length nrow(mat)")
} else if (!byrow & ncol(mat) != length(vector)) {
!byrow & ncol(mat) != length(vector)
stop("if byrow=FALSE, vector's length should have length ncol(mat)")
}
if (byrow) {
levels <- split(1:nrow(mat), vector)
res <- lapply(levels, function(x) {
mat[x, , drop = FALSE]
})
} else {
levels <- split(1:ncol(mat), vector)
res <- lapply(levels, function(x) {
mat[, x, drop = FALSE]
})
}
res
}
|
## Getting full dataset
data_full <- read.csv("./Data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 1
hist(data$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
## Saving to file
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
|
/plot1.R
|
no_license
|
RLI0602/ExData_Plotting1
|
R
| false
| false
| 714
|
r
|
## Getting full dataset
data_full <- read.csv("./Data/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 1
hist(data$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
## Saving to file
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
|
#' @title Student Answers for Specific Student
#'
#' @description Outputs a students random answers to a multiple choice exam in
#' the form of a dataframe given the number of questions asked and total number
#' of questions available. Answers can be from a to e, with NA's indicating the
#' question was not answered. If writeToFile = TRUE, then studentID and moduleID
#' must be provided so that the appropriate file name can be created. Files are
#' created as .tsv's.
#'
#' @param totalNumberofQuestions the total number of questions in an exam
#'
#' @param numberOfQuestionsToAnswer the amount of questions that the student is
#' asked
#'
#' @param writeToFile if TRUE, a file is created with the dataframe inside. The
#' default value is set to FALSE.
#'
#' @param molduleID a string identifying the name of the module. Only
#' needed if a file is being written.
#'
#' @param studentID a string or value identifying the student that took the exam
#' Only needed if a file is being written.
#'
#' @return a dataframe with a question and answer column with the option to
#' write dataframe to a file
#'
#' @examples
#' ## create answers for an exam with 100 questions and 30 questions asked
#' generateStudentAnswersForExam(100, 30)
#'
#' ## write the student's randomized answers to a file "BS281_answers_12.tsv"
#' generateStudentAnswersForExam(100, 30, writeToFile = TRUE,
#' moduleID = 'BS281', studentID = '12')
#'
#' @author Andrew Davis \email{adavisd@essex.ac.uk}
generateStudentAnswersForExam = function(totalNumberofQuestions,
numberOfQuestionsToAnswer,
writeToFile = FALSE, moduleID,
studentID){
# make sure numberOfQuestionsToAnswer < totalNumberofQuestions
stopifnot(numberOfQuestionsToAnswer < totalNumberofQuestions)
# grab a random subset of questions from the total number of questions
# number grabbed depends on numberOfQuestionsToAnswer
question = sort(sample.int(totalNumberofQuestions, numberOfQuestionsToAnswer))
# randomly generate the students answers for the questions asked
# NA values are included for questiosn that the student skipped
answer = sample(c(letters[1:5], NA), numberOfQuestionsToAnswer,
replace = TRUE)
# merge question and answer into dataframe for output
stuAnswers = data.frame(question, answer)
# write dataframe to file using moduleID and studentID
if(writeToFile == TRUE) {
filename = paste0(moduleID, '_answers_', studentID, '.tsv')
write.table(stuAnswers, file = filename,
row.names = F, quote = F, col.names = T)
}
# or just ourput dataframe to console
return(stuAnswers)
}
#' @title All Student Answers for a Given Exam
#'
#' @description Outputs random answers to a multiple choice exam for a given
#' module for all students. Answers can be from a to e, with NA's indicating the
#' question was not answered. If writeToFile = TRUE, then a folder is created
#' with all of the student answers. A file is also created with the list of
#' students that took a given exam. Files are created as .tsv's. If
#' readFromFiles = TRUE, then the arguements numberOfQuestions, allStudentIDs,
#' and examsPerSubject are read from files instead of from dataframes.
#'
#' @param molduleID a string identifying the name of the module.
#'
#' @param numberOfQuestions the dataframe that contains the amount of questions
#' each student needs to answer for each exam. The defualt value, questions, is
#' a dataframe included in the package.
#'
#' @param allStudentIDs the dataframe that contains the ID for each student and
#' what degree course they are on. The defualt value, students, is a dataframe
#' included in the package.
#'
#' @param examsPerSubject the dataframe that contains a dataframe that lists
#' what modules a given degree course takes. The first column should list
#' modules, the second column should list the options for options for
#' degree 1, and the third column should list options for
#' degree 2. The possible options are "Yes", "No", and "Optional". If a string
#' is supplied that is not one of the three then it is evaluated as "Yes". if a
#' module is "Optional", then a random amount of students is picked to take the
#' exam, with a higher number of students being more likely. The defualt value,
#' exams, is a dataframe included in the package, with degree 1 being
#' "Biological Sciences" and degree 2 being "Genetics".
#'
#' @param writeToFile if TRUE, a folder, named based on the moduleID is created
#' with a file for each student's answers. A file is also created that lists all
#' of the students that took the exam. The default value is set to FALSE.
#'
#' @param readFromFiles if TRUE, filenames are used to read in data for the
#' relevant arguements instead of dataframes within R.
#'
#' @param degreeNames if degree names are not "Biological Sciences" and
#' "Genetics" then a string should be entered with the two degree courses that
#' the student set belongs to.
#'
#' @return A list with 2 elements, a data frame of students that took the module
#' and a list of answers by each student. If writeToFile = TRUE, then files are
#' written instead.
#'
#' @examples
#' ## create answers for BS284 and output to console
#' generateAllStudentsAnswersForExam('BS284', writeToFile = FALSE)
#'
#' ## create files with student answer files and a list of students taking exam
#' generateAllStudentsAnswersForExam('BS284', writeToFile = TRUE)
#'
#' @author Andrew Davis \email{adavisd@essex.ac.uk}
generateAllStudentsAnswersForExam = function(moduleID,
numberOfQuestions = questions, allStudentIDs = students,
examsPerSubject = exams, writeToFile = FALSE, readFromFiles = FALSE,
degreeNames = NULL){
# read in files from arguements if arguements are interpreted as filenames
if(readFromFiles == TRUE){
numberOfQuestions = read.table(file = number_of_questions, header = T)
# add total number fo questions asked for each exam to numberOfQuestions
totalQuestions = NULL
for(i in 1:5){
totalQuestions[i] = length(readLines(
paste0('correct_answers_BS28', i, '.dat')))
}
numberOfQuestions = cbind(numberOfQuestions, totalQuestions)
allStudentIDs = read.table(file = allStudentIDs, header = T)
examsPerSubject = read.table(file = examsPerSubject, header = T)
}
# otherwise just interpret arguements as objects
else{ }
# edit degree names if degreeNames is not NULL
if(!is.null(degreeNames)) degree = degreeNames
else degree = c("Biological Sciences", "Genetics")
colnames(examsPerSubject) =c('module', degree[1], degree[2])
# add in stop and warning statements to ensure data is ran correctly
if(moduleID %in% examsPerSubject[,1] == FALSE){
stop('moduleID not listed in examsPerSubject')
}
stopifnot(names(examsPerSubject) == c("module", degree[1], degree[2]))
stopifnot(unique(allStudentIDs[,2]) == degree)
if(numberOfQuestions[numberOfQuestions[,1] == moduleID, 2] >
numberOfQuestions[numberOfQuestions[,1] == moduleID, 3]){
stop('Number of questions asked is more than in module exam answer key')
}
# subset students if only genetic students in module
if(examsPerSubject[examsPerSubject[,1] == moduleID, 2] == 'No'){
allStudentIDs = allStudentIDs[allStudentIDs[,2] == degree[2],]
}
# subset students if only biological sciences in module
else if(examsPerSubject[examsPerSubject[,1] == moduleID, 3] == 'No'){
allStudentIDs = allStudentIDs[allStudentIDs[,2] == degree[1],]
}
# subset students if optional for genetics
# select all biological sciences and a random subset of genetics
else if(examsPerSubject[examsPerSubject[,1] == moduleID, 3] == 'Optional'){
bioStudents = allStudentIDs[allStudentIDs[,2] == degree[1],]
geneticsOpt = allStudentIDs[allStudentIDs[,2] == degree[2],]
geneticsOpt = geneticsOpt[sample(1:nrow(geneticsOpt),
sample(1:nrow(geneticsOpt), 1,
prob = seq(.2, .8, .6/
(nrow(geneticsOpt) - 1)))),]
allStudentIDs = rbind(bioStudents, geneticsOpt)
}
# subset students if optional for biological sciences
# select all genetics and a random subset of biological sciences
else if(examsPerSubject[examsPerSubject[,1] == moduleID, 2] == 'Optional'){
geneticsStudents = allStudentIDs[allStudentIDs[,2] == degree[1],]
bioOpt = allStudentIDs[allStudentIDs[,2] == degree[2],]
bioOpt = bioOpt[sample(1:nrow(bioOpt), sample(1:nrow(bioOpt), 1,
prob = seq(.2, .8, .6/
(nrow(bioOpt) - 1)))),]
allStudentIDs = rbind(geneticsStudents, bioOpt)
}
# select random number fo students if module is optional for both degrees
else if(examsPerSubject[examsPerSubject[,1] == moduleID, 2] == 'Optional' &
examsPerSubject[examsPerSubject[,1] == moduleID, 3] == 'Optional'){
allStudentIDs = allStudentIDs[sample(1:nrow(allStudentIDs),
sample(1:nrow(allStudentIDs), 1,
prob = seq(.2, .8, .6/
(nrow(allStudentIDs) - 1)))),]
}
# if both degrees take course, then no need to subset students
else { }
# write student answers to files in a folder
if(writeToFile == TRUE) {
# create directory and change working directory to place files inside
dir.create(paste0(moduleID, 'studentAnswerFiles'))
setwd(paste0(moduleID, 'studentAnswerFiles'))
# use generateStudentAnswersForExam function to create student answers for
# all students
for(i in allStudentIDs[,1]) {
generateStudentAnswersForExam(
numberOfQuestions[numberOfQuestions == moduleID, 3],
numberOfQuestions[numberOfQuestions == moduleID, 2],
TRUE, moduleID, i)
}
# reset workign directory to original location
setwd('../')
# create file with list of students to be able to check what students were
# randomly selected
filename = paste('studentList_', moduleID, '.tsv', sep = '')
write.table(allStudentIDs, file = filename, col.names = TRUE,
row.names = FALSE)
}
# otherwise output student answers to console
else {
# use generateStudentAnswersForExam to create list of answers for each
# student with exact number of answers and questions contingent on moduleID
allStuAnswers = lapply(allStudentIDs[,1], generateStudentAnswersForExam,
totalNumberofQuestions =
numberOfQuestions[numberOfQuestions ==moduleID, 3],
numberOfQuestionsToAnswer =
numberOfQuestions[numberOfQuestions == moduleID, 2])
names(allStuAnswers) = allStudentIDs[,1]
# create list with the dataframe of all students taking exam and the list of
# student answers
allStuAnswers = list(allStudentIDs, allStuAnswers)
names(allStuAnswers) = c('student list', 'answers')
return(allStuAnswers)
}
}
#' @title Generate Exam Answer Key
#'
#' @description Outputs a randomized answer key for a given exam based on
#' number of questions. If writeToFile = TRUE, then moduleID must be prodived to
#' create the file name.
#'
#' @param numberOfQuestions a numerica value that specifies how many answers
#' should be generated for the key.
#'
#' @param writeToFile if TRUE, a file, named based on the moduleID, is created
#' with the answer key inside.
#'
#' @param moduleID a string that designates what the name of the module is. If
#' writeToFile = TRUE, then this arguement must be specified.
#'
#' @param ansOptions the possible answers in the multiple choice exam key. The
#' default value, letters[1:5], specifies that there are 5 different options for
#' each question, a, b, c, d, and e.
#'
#' @return a vector that contains a randomized answer for each question number.
#' If writeToFile = TRUE, then this vector is written to a file.
#'
#' @examples
#' ## create 100 question answer key
#' createAnswerKey(100)
#'
#' ## write answer to key yot file
#' createAnswerKey(100, writeToFile = TRUE, 'BS281')
#'
#' @author Andrew Davis \email{adavisd@essex.ac.uk}
createAnswerKey = function(numberOfQuestions, writeToFile = FALSE, moduleID,
ansOptions = letters[1:5]){
answer = sample(ansOptions, numberOfQuestions, replace = TRUE)
if(writeToFile == TRUE){
write.table(answer, file = paste0('correct_answers_', moduleID, '.dat'),
row.names = F, quote = F, col.names = 'answer')
}
else return(answer)
}
|
/examMarks/R/generateAnswerSheets.R
|
no_license
|
ddavis3739/examMarks-R-package
|
R
| false
| false
| 12,761
|
r
|
#' @title Student Answers for Specific Student
#'
#' @description Outputs a students random answers to a multiple choice exam in
#' the form of a dataframe given the number of questions asked and total number
#' of questions available. Answers can be from a to e, with NA's indicating the
#' question was not answered. If writeToFile = TRUE, then studentID and moduleID
#' must be provided so that the appropriate file name can be created. Files are
#' created as .tsv's.
#'
#' @param totalNumberofQuestions the total number of questions in an exam
#'
#' @param numberOfQuestionsToAnswer the amount of questions that the student is
#' asked
#'
#' @param writeToFile if TRUE, a file is created with the dataframe inside. The
#' default value is set to FALSE.
#'
#' @param molduleID a string identifying the name of the module. Only
#' needed if a file is being written.
#'
#' @param studentID a string or value identifying the student that took the exam
#' Only needed if a file is being written.
#'
#' @return a dataframe with a question and answer column with the option to
#' write dataframe to a file
#'
#' @examples
#' ## create answers for an exam with 100 questions and 30 questions asked
#' generateStudentAnswersForExam(100, 30)
#'
#' ## write the student's randomized answers to a file "BS281_answers_12.tsv"
#' generateStudentAnswersForExam(100, 30, writeToFile = TRUE,
#' moduleID = 'BS281', studentID = '12')
#'
#' @author Andrew Davis \email{adavisd@essex.ac.uk}
generateStudentAnswersForExam = function(totalNumberofQuestions,
numberOfQuestionsToAnswer,
writeToFile = FALSE, moduleID,
studentID){
# make sure numberOfQuestionsToAnswer < totalNumberofQuestions
stopifnot(numberOfQuestionsToAnswer < totalNumberofQuestions)
# grab a random subset of questions from the total number of questions
# number grabbed depends on numberOfQuestionsToAnswer
question = sort(sample.int(totalNumberofQuestions, numberOfQuestionsToAnswer))
# randomly generate the students answers for the questions asked
# NA values are included for questiosn that the student skipped
answer = sample(c(letters[1:5], NA), numberOfQuestionsToAnswer,
replace = TRUE)
# merge question and answer into dataframe for output
stuAnswers = data.frame(question, answer)
# write dataframe to file using moduleID and studentID
if(writeToFile == TRUE) {
filename = paste0(moduleID, '_answers_', studentID, '.tsv')
write.table(stuAnswers, file = filename,
row.names = F, quote = F, col.names = T)
}
# or just ourput dataframe to console
return(stuAnswers)
}
#' @title All Student Answers for a Given Exam
#'
#' @description Outputs random answers to a multiple choice exam for a given
#' module for all students. Answers can be from a to e, with NA's indicating the
#' question was not answered. If writeToFile = TRUE, then a folder is created
#' with all of the student answers. A file is also created with the list of
#' students that took a given exam. Files are created as .tsv's. If
#' readFromFiles = TRUE, then the arguements numberOfQuestions, allStudentIDs,
#' and examsPerSubject are read from files instead of from dataframes.
#'
#' @param molduleID a string identifying the name of the module.
#'
#' @param numberOfQuestions the dataframe that contains the amount of questions
#' each student needs to answer for each exam. The defualt value, questions, is
#' a dataframe included in the package.
#'
#' @param allStudentIDs the dataframe that contains the ID for each student and
#' what degree course they are on. The defualt value, students, is a dataframe
#' included in the package.
#'
#' @param examsPerSubject the dataframe that contains a dataframe that lists
#' what modules a given degree course takes. The first column should list
#' modules, the second column should list the options for options for
#' degree 1, and the third column should list options for
#' degree 2. The possible options are "Yes", "No", and "Optional". If a string
#' is supplied that is not one of the three then it is evaluated as "Yes". if a
#' module is "Optional", then a random amount of students is picked to take the
#' exam, with a higher number of students being more likely. The defualt value,
#' exams, is a dataframe included in the package, with degree 1 being
#' "Biological Sciences" and degree 2 being "Genetics".
#'
#' @param writeToFile if TRUE, a folder, named based on the moduleID is created
#' with a file for each student's answers. A file is also created that lists all
#' of the students that took the exam. The default value is set to FALSE.
#'
#' @param readFromFiles if TRUE, filenames are used to read in data for the
#' relevant arguements instead of dataframes within R.
#'
#' @param degreeNames if degree names are not "Biological Sciences" and
#' "Genetics" then a string should be entered with the two degree courses that
#' the student set belongs to.
#'
#' @return A list with 2 elements, a data frame of students that took the module
#' and a list of answers by each student. If writeToFile = TRUE, then files are
#' written instead.
#'
#' @examples
#' ## create answers for BS284 and output to console
#' generateAllStudentsAnswersForExam('BS284', writeToFile = FALSE)
#'
#' ## create files with student answer files and a list of students taking exam
#' generateAllStudentsAnswersForExam('BS284', writeToFile = TRUE)
#'
#' @author Andrew Davis \email{adavisd@essex.ac.uk}
generateAllStudentsAnswersForExam = function(moduleID,
numberOfQuestions = questions, allStudentIDs = students,
examsPerSubject = exams, writeToFile = FALSE, readFromFiles = FALSE,
degreeNames = NULL){
# read in files from arguements if arguements are interpreted as filenames
if(readFromFiles == TRUE){
numberOfQuestions = read.table(file = number_of_questions, header = T)
# add total number fo questions asked for each exam to numberOfQuestions
totalQuestions = NULL
for(i in 1:5){
totalQuestions[i] = length(readLines(
paste0('correct_answers_BS28', i, '.dat')))
}
numberOfQuestions = cbind(numberOfQuestions, totalQuestions)
allStudentIDs = read.table(file = allStudentIDs, header = T)
examsPerSubject = read.table(file = examsPerSubject, header = T)
}
# otherwise just interpret arguements as objects
else{ }
# edit degree names if degreeNames is not NULL
if(!is.null(degreeNames)) degree = degreeNames
else degree = c("Biological Sciences", "Genetics")
colnames(examsPerSubject) =c('module', degree[1], degree[2])
# add in stop and warning statements to ensure data is ran correctly
if(moduleID %in% examsPerSubject[,1] == FALSE){
stop('moduleID not listed in examsPerSubject')
}
stopifnot(names(examsPerSubject) == c("module", degree[1], degree[2]))
stopifnot(unique(allStudentIDs[,2]) == degree)
if(numberOfQuestions[numberOfQuestions[,1] == moduleID, 2] >
numberOfQuestions[numberOfQuestions[,1] == moduleID, 3]){
stop('Number of questions asked is more than in module exam answer key')
}
# subset students if only genetic students in module
if(examsPerSubject[examsPerSubject[,1] == moduleID, 2] == 'No'){
allStudentIDs = allStudentIDs[allStudentIDs[,2] == degree[2],]
}
# subset students if only biological sciences in module
else if(examsPerSubject[examsPerSubject[,1] == moduleID, 3] == 'No'){
allStudentIDs = allStudentIDs[allStudentIDs[,2] == degree[1],]
}
# subset students if optional for genetics
# select all biological sciences and a random subset of genetics
else if(examsPerSubject[examsPerSubject[,1] == moduleID, 3] == 'Optional'){
bioStudents = allStudentIDs[allStudentIDs[,2] == degree[1],]
geneticsOpt = allStudentIDs[allStudentIDs[,2] == degree[2],]
geneticsOpt = geneticsOpt[sample(1:nrow(geneticsOpt),
sample(1:nrow(geneticsOpt), 1,
prob = seq(.2, .8, .6/
(nrow(geneticsOpt) - 1)))),]
allStudentIDs = rbind(bioStudents, geneticsOpt)
}
# subset students if optional for biological sciences
# select all genetics and a random subset of biological sciences
else if(examsPerSubject[examsPerSubject[,1] == moduleID, 2] == 'Optional'){
geneticsStudents = allStudentIDs[allStudentIDs[,2] == degree[1],]
bioOpt = allStudentIDs[allStudentIDs[,2] == degree[2],]
bioOpt = bioOpt[sample(1:nrow(bioOpt), sample(1:nrow(bioOpt), 1,
prob = seq(.2, .8, .6/
(nrow(bioOpt) - 1)))),]
allStudentIDs = rbind(geneticsStudents, bioOpt)
}
# select random number fo students if module is optional for both degrees
else if(examsPerSubject[examsPerSubject[,1] == moduleID, 2] == 'Optional' &
examsPerSubject[examsPerSubject[,1] == moduleID, 3] == 'Optional'){
allStudentIDs = allStudentIDs[sample(1:nrow(allStudentIDs),
sample(1:nrow(allStudentIDs), 1,
prob = seq(.2, .8, .6/
(nrow(allStudentIDs) - 1)))),]
}
# if both degrees take course, then no need to subset students
else { }
# write student answers to files in a folder
if(writeToFile == TRUE) {
# create directory and change working directory to place files inside
dir.create(paste0(moduleID, 'studentAnswerFiles'))
setwd(paste0(moduleID, 'studentAnswerFiles'))
# use generateStudentAnswersForExam function to create student answers for
# all students
for(i in allStudentIDs[,1]) {
generateStudentAnswersForExam(
numberOfQuestions[numberOfQuestions == moduleID, 3],
numberOfQuestions[numberOfQuestions == moduleID, 2],
TRUE, moduleID, i)
}
# reset workign directory to original location
setwd('../')
# create file with list of students to be able to check what students were
# randomly selected
filename = paste('studentList_', moduleID, '.tsv', sep = '')
write.table(allStudentIDs, file = filename, col.names = TRUE,
row.names = FALSE)
}
# otherwise output student answers to console
else {
# use generateStudentAnswersForExam to create list of answers for each
# student with exact number of answers and questions contingent on moduleID
allStuAnswers = lapply(allStudentIDs[,1], generateStudentAnswersForExam,
totalNumberofQuestions =
numberOfQuestions[numberOfQuestions ==moduleID, 3],
numberOfQuestionsToAnswer =
numberOfQuestions[numberOfQuestions == moduleID, 2])
names(allStuAnswers) = allStudentIDs[,1]
# create list with the dataframe of all students taking exam and the list of
# student answers
allStuAnswers = list(allStudentIDs, allStuAnswers)
names(allStuAnswers) = c('student list', 'answers')
return(allStuAnswers)
}
}
#' @title Generate Exam Answer Key
#'
#' @description Outputs a randomized answer key for a given exam based on
#' number of questions. If writeToFile = TRUE, then moduleID must be prodived to
#' create the file name.
#'
#' @param numberOfQuestions a numerica value that specifies how many answers
#' should be generated for the key.
#'
#' @param writeToFile if TRUE, a file, named based on the moduleID, is created
#' with the answer key inside.
#'
#' @param moduleID a string that designates what the name of the module is. If
#' writeToFile = TRUE, then this arguement must be specified.
#'
#' @param ansOptions the possible answers in the multiple choice exam key. The
#' default value, letters[1:5], specifies that there are 5 different options for
#' each question, a, b, c, d, and e.
#'
#' @return a vector that contains a randomized answer for each question number.
#' If writeToFile = TRUE, then this vector is written to a file.
#'
#' @examples
#' ## create 100 question answer key
#' createAnswerKey(100)
#'
#' ## write answer to key yot file
#' createAnswerKey(100, writeToFile = TRUE, 'BS281')
#'
#' @author Andrew Davis \email{adavisd@essex.ac.uk}
createAnswerKey = function(numberOfQuestions, writeToFile = FALSE, moduleID,
ansOptions = letters[1:5]){
answer = sample(ansOptions, numberOfQuestions, replace = TRUE)
if(writeToFile == TRUE){
write.table(answer, file = paste0('correct_answers_', moduleID, '.dat'),
row.names = F, quote = F, col.names = 'answer')
}
else return(answer)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{MsigdbMapping}
\alias{MsigdbMapping}
\title{MsigdbMapping}
\format{List}
\source{
\url{AMARETTO}
}
\usage{
MsigdbMapping
}
\description{
A dataset containing all MSIGDB pathways and their descriptions. .
}
\keyword{datasets}
|
/man/MsigdbMapping.Rd
|
permissive
|
gevaertlab/AMARETTO
|
R
| false
| true
| 331
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{MsigdbMapping}
\alias{MsigdbMapping}
\title{MsigdbMapping}
\format{List}
\source{
\url{AMARETTO}
}
\usage{
MsigdbMapping
}
\description{
A dataset containing all MSIGDB pathways and their descriptions. .
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mytable.R
\name{mytable}
\alias{mytable}
\title{Cross Tabulation and Table Creation}
\usage{
mytable(...)
}
\arguments{
\item{...}{Arguments provided to \code{\link{table}}.}
}
\value{
An array of integer values of class "table".
}
\description{
Build a contingency table of the counts at each combination of factor levels,
incorporating missing values by default.
}
\examples{
mytable(c(1, 1, 1, 2, NA, 3, 4, 1, 10, 3))
}
|
/man/mytable.Rd
|
no_license
|
JVAdams/jvamisc
|
R
| false
| true
| 502
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mytable.R
\name{mytable}
\alias{mytable}
\title{Cross Tabulation and Table Creation}
\usage{
mytable(...)
}
\arguments{
\item{...}{Arguments provided to \code{\link{table}}.}
}
\value{
An array of integer values of class "table".
}
\description{
Build a contingency table of the counts at each combination of factor levels,
incorporating missing values by default.
}
\examples{
mytable(c(1, 1, 1, 2, NA, 3, 4, 1, 10, 3))
}
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Main function of simer
#'
#' Build date: Jan 7, 2019
#' Last update: Oct 13, 2019
#'
#' @author Dong Yin, Lilin Yin, Haohao Zhang and Xiaolei Liu
#'
#' @param num.gen number of generations in simulation
#' @param replication replication index of simulation
#' @param verbose whether to print detail
#' @param mrk.dense whether markers are dense, it is TRUE when sequencing data
#' @param incols the column number of an individual in the input genotype matrix, it can be 1 or 2
#' @param outcols the column number of an individual in the output genotype matrix, it can be 1 or 2
#' @param out prefix of output file name
#' @param outpath path of output files
#' @param selPath the path of breeding_plan
#' @param out.format format of output, "numeric" or "plink"
#' @param seed.sim random seed of a simulation process
#' @param out.geno.gen indice of generations of output genotype
#' @param out.pheno.gen indice of generations of output phenotype
#' @param rawgeno1 extrinsic genotype matrix1
#' @param rawgeno2 extrinsic genotype matrix2
#' @param rawgeno3 extrinsic genotype matrix3
#' @param rawgeno4 extrinsic genotype matrix4
#' @param num.ind population size of the base population
#' @param prob weight of "0" and "1" in genotype matrix, the sum of elements in vector equal to 1
#' @param input.map map that should be input, the marker number should be consistent in both map file and genotype data
#' @param len.block length of every blocks
#' @param range.hot range of number of chromosome crossovers in a hot spot block
#' @param range.cold range of number of chromosome crossovers in a cold spot block
#' @param rate.mut mutation rate between 1e-8 and 1e-6
#' @param cal.model phenotype model with the options: "A", "AD", "ADI"
#' @param FR list of fixed effects, random effects, and their combination
#' @param cv list of population Coefficient of Variation or family Coefficient of Variation
#' @param var.pheno the phenotype variance, only used in single-trait simulation
#' @param h2.tr1 heritability vector of a single trait, every element are corresponding to a, d, aXa, aXd, dXa, dXd respectively
#' @param num.qtn.tr1 integer or integer vector, the number of QTN in a single trait
#' @param sd.tr1 standard deviation of different effects, the last 5 vector elements are corresponding to d, aXa, aXd, dXa, dXd respectively and the rest elements are corresponding to a
#' @param dist.qtn.tr1 distributions of the QTN effects with the options: "normal", "geometry", "gamma", and "beta", vector elements are corresponding to a, d, aXa, aXd, dXa, dXd respectively
#' @param prob.tr1 unit effect of geometric distribution of a single trait, its length should be same as dist.qtn.tr1
#' @param shape.tr1 shape of gamma distribution of a single trait, its length should be same as dist.qtn.tr1
#' @param scale.tr1 scale of gamma distribution of a single trait, its length should be same as dist.qtn.tr1
#' @param shape1.tr1 non-negative parameters of the Beta distribution, its length should be same as dist.qtn.tr1
#' @param shape2.tr1 non-negative parameters of the Beta distribution, its length should be same as dist.qtn.tr1
#' @param ncp.tr1 non-centrality parameter, its length should be same as dist.qtn.tr1
#' @param multrait whether to apply multiple traits, TRUE represents applying, FALSE represents not
#' @param num.qtn.trn QTN distribution matrix, diagonal elements are total QTN number of the trait, non-diagonal elements are QTN number of overlap QTN between two traits
#' @param sd.trn a matrix with the standard deviation of the QTN effects
#' @param gnt.cov genetic covaiance matrix among all traits
#' @param h2.trn heritability among all traits
#' @param qtn.spot QTN probability in every block
#' @param maf Minor Allele Frequency, marker selection range is from maf to 0.5
#' @param sel.crit selection criteria with the options: "TGV", "TBV", "pEBVs", "gEBVs", "ssEBVs", and "pheno"
#' @param sel.on whether to add selection
#' @param mtd.reprod different reproduction methods with the options: "clone", "dh", "selfpol", "singcro", "tricro", "doubcro", "backcro","randmate", "randexself", and "userped"
#' @param userped user-designed pedigree to control mating process
#' @param num.prog litter size of dams
#' @param ratio ratio of the males in all individuals
#' @param prog.tri litter size of the first single cross process in trible cross process
#' @param prog.doub litter size of the first two single cross process in double cross process
#' @param prog.back a vector with litter size in every generation of back-cross
#' @param refresh refresh ratio of core population of sires and dams, only used in ps > 1
#' @param keep.max.gen the max keep generation range in the selection for sires and dams, only used in ps > 1
#' @param ps if ps <= 1, fraction selected in selection of males and females; if ps > 1, ps is number of selected males and females
#' @param decr whether to sort by descreasing
#' @param sel.multi selection method of multiple traits with options: "tdm", "indcul" and "index"
#' @param index.wt economic weights of selection index method, its length should equals to the number of traits
#' @param index.tdm index represents which trait is being selected
#' @param goal.perc percentage of goal more than mean of scores of individuals
#' @param pass.perc percentage of expected excellent individuals
#' @param sel.sing selection method of single trait with options: "ind", "fam", "infam" and "comb"
#'
#' @return a list with population information, genotype matrix, map information, selection intensity
#' @export
#' @import bigmemory
#' @importFrom stats aov cor dnorm qnorm rgamma rnorm rbeta rgeom runif var shapiro.test
#' @importFrom utils write.table read.delim packageVersion
#' @importFrom methods getPackageName
#' @importFrom MASS mvrnorm ginv
#' @importFrom rMVP MVP.Data.MVP2Bfile
#'
#' @examples
#' \donttest{
#' # get map file, map is neccessary
#' data(simdata)
#'
#' # run simer
#' simer.list <-
#' simer(num.gen = 5,
#' replication = 1,
#' verbose = TRUE,
#' mrk.dense = TRUE,
#' incols = 2,
#' outcols = 1,
#' out = "simer",
#' outpath = NULL,
#' selPath = NULL,
#' out.format = "numeric",
#' seed.sim = runif(1, 0, 100),
#' out.geno.gen = 3:5,
#' out.pheno.gen = 1:5,
#' rawgeno1 = rawgeno,
#' rawgeno2 = NULL,
#' rawgeno3 = NULL,
#' rawgeno4 = NULL,
#' num.ind = NULL,
#' prob = c(0.5, 0.5),
#' input.map = input.map,
#' len.block = 5e7,
#' range.hot = 4:6,
#' range.cold = 1:5,
#' rate.mut = 1e-8,
#' cal.model = "A",
#' FR = NULL,
#' cv = NULL,
#' h2.tr1 = c(0.3, 0.1, 0.05, 0.05, 0.05, 0.01),
#' num.qtn.tr1 = 500,
#' sd.tr1 = c(0.4, 0.2, 0.02, 0.02, 0.02, 0.02),
#' dist.qtn.tr1 = rep("normal", 6),
#' prob.tr1 = rep(0.5, 6),
#' shape.tr1 = rep(1, 6),
#' scale.tr1 = rep(1, 6),
#' shape1.tr1 = rep(1, 6),
#' shape2.tr1 = rep(1, 6),
#' ncp.tr1 = rep(0, 6),
#' multrait = FALSE,
#' num.qtn.trn = matrix(c(400, 100, 100, 400), 2, 2),
#' sd.trn = matrix(c(0.07, 0, 0, 0.07), 2, 2),
#' gnt.cov = matrix(c(1, 2, 2, 16), 2, 2),
#' h2.trn = c(0.3, 0.5),
#' qtn.spot = rep(0.1, 10),
#' maf = 0,
#' sel.crit = "pheno",
#' sel.on = TRUE,
#' mtd.reprod = "randmate",
#' userped = userped,
#' num.prog = 2,
#' ratio = 0.5,
#' prog.tri = 2,
#' prog.doub = 2,
#' prog.back = rep(2, 5),
#' ps = rep(0.8, 2),
#' decr = TRUE,
#' sel.multi = "index",
#' index.wt = c(0.5, 0.5),
#' index.tdm = 1,
#' goal.perc = 0.1,
#' pass.perc = 0.9,
#' sel.sing = "comb")
#' pop <- simer.list$pop
#' effs <- simer.list$effs
#' trait <- simer.list$trait
#' geno <- simer.list$geno
#' genoid <- simer.list$genoid
#' map <- simer.list$map
#' si <- simer.list$si
#' head(pop)
#' str(effs)
#' str(trait)
#' geno[1:6, 1:6]
#' genoid[1:6]
#' str(map)
#' si
#' }
simer <-
function(num.gen = 5,
replication = 1,
verbose = TRUE,
mrk.dense = TRUE,
incols = 2,
outcols = 1,
out = "simer",
outpath = NULL,
selPath = NULL,
out.format = "numeric",
seed.sim = runif(1, 0, 100),
out.geno.gen = (num.gen-2):num.gen,
out.pheno.gen = 1:num.gen,
rawgeno1 = NULL,
rawgeno2 = NULL,
rawgeno3 = NULL,
rawgeno4 = NULL,
num.ind = 100,
prob = c(0.5, 0.5),
input.map = NULL,
len.block = 5e7,
range.hot = 4:6,
range.cold = 1:5,
rate.mut = 1e-8,
cal.model = "A",
FR = NULL,
cv = NULL,
var.pheno = NULL,
h2.tr1 = c(0.3, 0.1, 0.05, 0.05, 0.05, 0.01),
num.qtn.tr1 = 500,
sd.tr1 = c(0.4, 0.2, 0.02, 0.02, 0.02, 0.02),
dist.qtn.tr1 = rep("normal", 6),
prob.tr1 = rep(0.5, 6),
shape.tr1 = rep(1, 6),
scale.tr1 = rep(1, 6),
shape1.tr1 = rep(1, 6),
shape2.tr1 = rep(1, 6),
ncp.tr1 = rep(0, 6),
multrait = FALSE,
num.qtn.trn = matrix(c(400, 100, 100, 400), 2, 2),
sd.trn = matrix(c(1, 0, 0, 0.5), 2, 2),
gnt.cov = matrix(c(1, 2, 2, 16), 2, 2),
h2.trn = c(0.3, 0.5),
qtn.spot = rep(0.1, 10),
maf = 0,
sel.crit = "pheno",
sel.on = TRUE,
mtd.reprod = "randmate",
userped = NULL,
num.prog = 2,
ratio = 0.5,
prog.tri = 2,
prog.doub = 2,
prog.back = rep(2, num.gen),
refresh = c(1, 0.6),
keep.max.gen = rep(3, 2),
ps = rep(0.8, 2),
decr = TRUE,
sel.multi = "index",
index.wt = c(0.5, 0.5),
index.tdm = 1,
goal.perc = 0.1,
pass.perc = 0.9,
sel.sing = "comb") {
# Start simer
# TODO: How to generate inbreeding sirs and uninbreeding dams
# TODO: optcontri.sel
# TODO: add MVP for output
# TODO: correct pedigree
# TODO: add superior limit of homo
# TODO: add multiple fix and random effects
# TODO: add summary() to population information
# TODO: add inbreeding coeficient
# TODO: updata index selection
# TODO: add true block distribution
# TODO: genomic mating
# TODO: inbreeding change in every generations
simer.Version(width = 70, verbose = verbose)
inner.env <- environment()
# initialize logging
if (!is.null(outpath)) {
if (!dir.exists(outpath)) stop(paste0("Please check your output path: ", outpath))
if (verbose) {
logging.initialize("Simer", outpath = outpath)
}
}
################### MAIN_FUNCTION_SETTING ###################
logging.log("--------------------------- replication ", replication, "---------------------------\n", verbose = verbose)
op <- Sys.time()
logging.log(" SIMER BEGIN AT", as.character(op), "\n", verbose = verbose)
set.seed(seed.sim)
if (incols == 1) outcols <- 1
################### BASE_POPULATION ###################
# stablish genotype of base population if there isn't by two ways:
# 1. input rawgeno
# 2. input num.marker and num.ind
nmrk <- nrow(input.map)
# combine genotype matrix
if (is.list(rawgeno1)) {
if (!(mtd.reprod == "randmate" || mtd.reprod == "randexself"))
stop("Only random matings support genotype list!")
nsir <- ncol(rawgeno1$sir) / incols
ndam <- ncol(rawgeno1$dam) / incols
nind <- nsir + ndam
basepop <- getpop(nind, 1, nsir/nind)
rawgeno1 <- cbind(rawgeno1$sir[], rawgeno1$dam[])
} else {
# set base population information
nind <- ifelse(is.null(rawgeno1), num.ind, ncol(rawgeno1) / incols)
nsir <- nind * ratio
ndam <- nind * (1-ratio)
basepop <- getpop(nind, 1, ratio)
}
num.marker <- nrow(input.map)
logging.log(" --- base population 1 ---\n", verbose = verbose)
basepop.geno <-
genotype(rawgeno = rawgeno1,
incols = incols,
num.marker = num.marker,
num.ind = num.ind,
prob = prob,
verbose = verbose)
# set block information and recombination information
num.ind <- nind
pos.map <- check.map(input.map = input.map, num.marker = nmrk, len.block = len.block)
blk.rg <- cal.blk(pos.map)
recom.spot <- as.numeric(pos.map[blk.rg[, 1], 7])
# calculate for marker information
effs <-
cal.effs(pop.geno = basepop.geno,
incols = incols,
cal.model = cal.model,
num.qtn.tr1 = num.qtn.tr1,
sd.tr1 = sd.tr1,
dist.qtn.tr1 = dist.qtn.tr1,
prob.tr1 = prob.tr1,
shape.tr1 = shape.tr1,
scale.tr1 = scale.tr1,
shape1.tr1 = shape1.tr1,
shape2.tr1 = shape2.tr1,
ncp.tr1 = ncp.tr1,
multrait = multrait,
num.qtn.trn = num.qtn.trn,
sd.trn = sd.trn,
qtn.spot = qtn.spot,
maf = maf,
verbose = verbose)
# calculate phenotype according to genotype
if (sel.on) {
pop1.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = basepop,
pop.geno = basepop.geno,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = basepop,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
basepop <- pop1.pheno$pop
pop1.pheno$pop <- NULL
}
# only mutation in clone and doubled haploid
if (mtd.reprod == "clone" || mtd.reprod == "dh" || mtd.reprod == "selfpol") {
basepop$sex <- 0
recom.spot <- NULL
ratio <- 0
}
basepop.geno.em <- # genotype matrix after Mutation
genotype(geno = basepop.geno,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
rate.mut = rate.mut,
verbose = verbose)
if (mtd.reprod == "singcro" || mtd.reprod == "tricro" || mtd.reprod == "doubcro" || mtd.reprod == "backcro") {
# set base population information
basepop$sex <- 1
if (is.null(rawgeno2)) {
logging.log(" --- base population 2 ---\n", verbose = verbose)
prob1 <- runif(1)
prob <- c(prob1, 1 - prob1)
pop2.geno <- genotype(incols = incols, num.marker = num.marker, num.ind = num.ind, prob = prob, verbose = verbose)
} else {
pop2.geno <- genotype(rawgeno = rawgeno2, verbose = verbose)
}
# set base population information
nind2 <- ncol(pop2.geno) / incols
pop2 <- getpop(nind2, nind+1, 0)
# calculate phenotype according to genotype
if (sel.on) {
pop2.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop2,
pop.geno = pop2.geno,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop2,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop2 <- pop2.pheno$pop
pop2.pheno$pop <- NULL
# reset trait
if (mtd.reprod != "backcro") {
trait <- list()
trait$pop.sir1 <- pop1.pheno
if (mtd.reprod == "tricro") {
trait$pop.sir2 <- pop2.pheno
} else {
trait$pop.dam1 <- pop2.pheno
}
}
}
pop2.geno.em <- # genotype matrix after Mutation
genotype(geno = pop2.geno,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
# recom.cri = "cri3",
rate.mut = rate.mut,
verbose = verbose)
pop3.geno.em <- NULL
pop4.geno.em <- NULL
}
if (mtd.reprod == "tricro" || mtd.reprod == "doubcro") {
if (is.null(rawgeno3)) {
logging.log(" --- base population 3 ---\n", verbose = verbose)
prob1 <- runif(1)
prob <- c(prob1, 1 - prob1)
pop3.geno <- genotype(incols = incols, num.marker = num.marker, num.ind = num.ind, prob = prob, verbose = verbose)
} else {
pop3.geno <- genotype(rawgeno = rawgeno3, verbose = verbose)
}
# set base population information
nind3 <- ncol(pop3.geno) / incols
pop3 <- getpop(nind3, nind+nind2+1, 1)
# calculate phenotype according to genotype
if (sel.on) {
pop3.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop3,
pop.geno = pop3.geno,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop3,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop3 <- pop3.pheno$pop
pop3.pheno$pop <- NULL
if (mtd.reprod == "tricro") {
trait$pop.dam1 <- pop3.pheno
} else {
trait$pop.sir2 <- pop3.pheno
}
}
pop3.geno.em <- # genotype matrix after Mutation
genotype(geno = pop3.geno,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
# recom.cri = "cri3",
rate.mut = rate.mut,
verbose = verbose)
pop4.geno.em <- NULL
}
if (mtd.reprod == "doubcro") {
logging.log(" --- base population 4 ---\n", verbose = verbose)
if (is.null(rawgeno4)) {
prob1 <- runif(1)
prob <- c(prob1, 1 - prob1)
pop4.geno <- genotype(incols = incols, num.marker = num.marker, num.ind = num.ind, prob = prob, verbose = verbose)
} else {
pop4.geno <- genotype(rawgeno = rawgeno4, verbose = verbose)
}
# set base population information
nind4 <- ncol(pop4.geno) / incols
pop4 <- getpop(nind4, nind+nind2+nind3+1, 0)
# calculate phenotype according to genotype
if (sel.on) {
pop4.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop4,
pop.geno = pop4.geno,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop4,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop4 <- pop4.pheno$pop
pop4.pheno$pop <- NULL
trait$pop.dam2 <- pop4.pheno
}
pop4.geno.em <- # genotype matrix after Mutation
genotype(geno = pop4.geno,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
# recom.cri = "cri3",
rate.mut = rate.mut,
verbose = verbose)
}
################### SETTING_PROCESS ###################
# 1. setting of number of progenies in every generation.
# 2. setting of directory
# adjust for genetic correlation
if (!(all(ps <= 1) | all(ps > 1))) stop("Please input a correct ps!")
ps[1] <- ifelse(sel.on, ps[1], 1)
ps[2] <- ifelse(sel.on, ps[2], 1)
# calculate number of individuals in every generation
count.ind <- rep(nind, num.gen)
count.sir <- count.dam <- NULL
if (mtd.reprod == "clone" || mtd.reprod == "dh" || mtd.reprod == "selfpol" || mtd.reprod == "randmate" || mtd.reprod == "randexself") {
if (num.gen > 1) {
count.sir <- ifelse(all(ps <= 1), round(nsir * ps[1]), ps[1])
count.dam <- ifelse(all(ps <= 1), round(ndam * ps[2]), ps[2])
count.ind[2] <- count.dam * num.prog
if (num.gen > 2) {
for(i in 3:num.gen) {
count.sir[i-1] <- ifelse(all(ps <= 1), round(count.ind[i-1] * ratio * ps[1]), ps[1])
count.dam[i-1] <- ifelse(all(ps <= 1), round(count.ind[i-1] * (1-ratio) * ps[2]), ps[2])
count.ind[i] <- count.dam[i-1] * num.prog
}
}
}
} else if (mtd.reprod == "singcro") {
count.sir <- ifelse(all(ps <= 1), round(nrow(basepop) * ps[1]), ps[1])
count.dam <- ifelse(all(ps <= 1), round(nrow(pop2) * ps[2]), ps[2])
sing.ind <- count.dam * num.prog
count.ind <- c(nrow(basepop), nrow(pop2), sing.ind)
} else if (mtd.reprod == "tricro") {
num.sir2 <- ifelse(all(ps <= 1), round(nrow(pop2) * ps[1]), ps[1])
num.dam1 <- ifelse(all(ps <= 1), round(nrow(pop3) * ps[2]), ps[2])
dam21.ind <- num.dam1 * prog.tri
num.sir1 <- ifelse(all(ps <= 1), round(nrow(basepop) * ps[1]), ps[1])
num.dam21 <- ifelse(all(ps <= 1), round(dam21.ind * (1-ratio) * ps[2]), ps[2])
tri.ind <- num.dam21 * num.prog
count.sir <- c(num.sir2, num.sir1)
count.dam <- c(num.dam1, num.dam21)
count.ind <- c(nrow(basepop), nrow(pop2), nrow(pop3), dam21.ind, tri.ind)
} else if (mtd.reprod == "doubcro") {
num.sir1 <- ifelse(all(ps <= 1), round(nrow(basepop) * ps[1]), ps[1])
num.dam1 <- ifelse(all(ps <= 1), round(nrow(pop2) * ps[2]), ps[2])
sir11.ind <- num.dam1 * prog.doub
num.sir2 <- ifelse(all(ps <= 1), round(nrow(pop3) * ps[1]), ps[1])
num.dam2 <- ifelse(all(ps <= 1), round(nrow(pop4) * ps[2]), ps[2])
dam22.ind <- num.dam2 * prog.doub
num.sir11 <- ifelse(all(ps <= 1), round(sir11.ind * ratio * ps[2]), ps[2])
num.dam22 <- ifelse(all(ps <= 1), round(dam22.ind * (1-ratio) * ps[2]), ps[2])
doub.ind <- num.dam22 * num.prog
count.sir <- c(num.sir1, num.sir2, num.sir11)
count.dam <- c(num.dam1, num.dam2, num.dam22)
count.ind <- c(nrow(basepop), nrow(pop2), nrow(pop3), nrow(pop4), sir11.ind, dam22.ind, doub.ind)
} else if (mtd.reprod == "backcro") {
count.ind[1] <- nrow(basepop) + nrow(pop2)
if (num.gen > 1) {
count.sir[1] <- ifelse(all(ps <= 1), round(nrow(basepop) * ps[1]), ps[1])
count.dam[1] <- ifelse(all(ps <= 1), round(nrow(pop2) * ps[2]), ps[2])
count.ind[2] <- count.dam[1] * num.prog
for(i in 3:num.gen) {
count.sir[i-1] <- count.sir[i-2]
count.dam[i-1] <- ifelse(all(ps <= 1), round(count.ind[i-1] * (1-ratio) * ps[2]), ps[2])
count.ind[i] <- count.dam[i-1] * num.prog
}
}
} # end if mtd.reprod
if (mtd.reprod != "userped") {
# Create a folder to save files
if (!is.null(outpath)) {
if (!dir.exists(outpath)) stop("Please check your outpath!")
if (out.format == "numeric") {
outpath = paste0(outpath, .Platform$file.sep, sum(count.ind), "_Simer_Data_numeric")
} else if (out.format == "plink"){
outpath = paste0(outpath, .Platform$file.sep, sum(count.ind), "_Simer_Data_plink")
} else {
stop("out.format should be 'numeric' or 'plink'!")
}
if (!dir.exists(outpath)) dir.create(outpath)
directory.rep <- paste0(outpath, .Platform$file.sep, "replication", replication)
if (dir.exists(directory.rep)) {
remove_bigmatrix(file.path(directory.rep, out))
unlink(directory.rep, recursive = TRUE)
}
dir.create(directory.rep)
}
}
if (all(ps <= 1)) {
# calculate selection intensity
sel.i <- dnorm(qnorm(1 -ps)) / ps
logging.log(" --- selection intensity ---\n", verbose = verbose)
logging.log(" Selection intensity is", sel.i, "for males and females\n", verbose = verbose)
} else if (all(ps > 1)) {
sel.i <- ps
logging.log(" --- selected individuals number ---\n", verbose = verbose)
logging.log(" Number of selected individuals is", sel.i, "for males and females in every generation\n", verbose = verbose)
}
################### REPRODUCTION_PROCESS ###################
# 1. Reproduction based on basepop and basepop.geno according
# to different reproduction method.
logging.log(" --- start reproduction ---\n", verbose = verbose)
# multi-generation: clone, dh, selpol, randmate, randexself
geno.back <- paste0(out, ".geno.bin")
geno.desc <- paste0(out, ".geno.desc")
ind.stays <- ind.stay <- NULL
core.stays <- core.stay <- NULL
if (mtd.reprod == "clone" || mtd.reprod == "dh" || mtd.reprod == "selfpol" || mtd.reprod == "randmate" || mtd.reprod == "randexself") {
out.geno.gen <- out.geno.gen[out.geno.gen > 0]
out.pheno.gen <- out.pheno.gen[out.pheno.gen > 0]
out.geno.index <- getindex(count.ind, out.geno.gen)
out.pheno.index <- getindex(count.ind, out.pheno.gen)
# store all genotype
geno.total.temp <- big.matrix(
nrow = num.marker,
ncol = outcols*sum(count.ind),
init = 3,
type = 'char')
if (!is.null(outpath)) {
geno.total <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * sum(count.ind[out.geno.gen]),
init = 3,
type = 'char',
backingpath = directory.rep,
backingfile = geno.back,
descriptorfile = geno.desc)
options(bigmemory.typecast.warning=FALSE)
} else {
geno.total <- big.matrix(
nrow = num.marker,
ncol = outcols * sum(count.ind[out.geno.gen]),
init = 3,
type = 'char')
options(bigmemory.typecast.warning=FALSE)
}
# set total population
pop.total <- basepop
gc <- basepop.geno
if (incols == 2 & outcols == 1) gc <- geno.cvt1(gc)
if (1 %in% out.geno.gen) {
input.geno(geno.total, gc, outcols*count.ind[1], mrk.dense)
}
input.geno(geno.total.temp, gc, outcols*count.ind[1], mrk.dense)
logging.log(" After generation 1 ,", count.ind[1], "individuals are generated...\n", verbose = verbose)
if (num.gen > 1) {
# add selection to generation1
if (sel.on) {
ind.ordered <-
selects(pop = basepop,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = basepop,
pop.pheno = pop1.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered <- ind.ordered[-1]
} else {
ind.ordered <- basepop$index
}
core.stays[[1]] <- core.stay <- ind.stays[[1]] <- ind.stay <- getsd(ind.ordered, basepop, count.sir[1], count.dam[1])
pop.last <- basepop
pop.geno.last <- basepop.geno.em
pop.geno.core <- basepop.geno.em[, getgmt(c(ind.stay$sir, ind.stay$dam), incols = incols)]
pop1.geno.id <- basepop$index
for (i in 2:num.gen) {
pop.gp <- # pop.gp with genotype and pop information
reproduces(pop1 = pop.last,
pop1.geno.id = pop1.geno.id,
pop1.geno = pop.geno.last,
incols = incols,
ind.stay = ind.stay,
mtd.reprod = mtd.reprod,
num.prog = num.prog,
ratio = ratio)
pop.geno.curr <- pop.gp$geno
pop.curr <- pop.gp$pop
pop1.geno.id <- pop.curr$index
isd <- c(2, 5, 6)
# input genotype
gc <- pop.geno.curr
if (incols == 2 & outcols == 1) gc <- geno.cvt1(gc)
if (i %in% out.geno.gen) {
out.gg <- out.geno.gen[1:which(out.geno.gen == i)]
input.geno(geno.total, gc, outcols * sum(count.ind[out.gg]), mrk.dense)
}
input.geno(geno.total.temp, gc, outcols*sum(count.ind[1:i]), mrk.dense)
pop.total.temp <- rbind(pop.total[1:sum(count.ind[1:(i-1)]), isd], pop.curr[, isd])
if (sel.on) {
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.curr,
pop.geno = pop.geno.curr,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total.temp,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.curr <- pop.pheno$pop
pop.pheno$pop <- NULL
}
pop.total <- rbind(pop.total, pop.curr)
logging.log(" After generation", i, ",", sum(count.ind[1:i]), "individuals are generated...\n", verbose = verbose)
if (i == num.gen) break
# output index.tdm and ordered individuals indice
if (sel.on) {
ind.ordered <-
selects(pop = pop.curr,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop.total.temp,
pop.pheno = pop.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered <- ind.ordered[-1]
} else {
ind.ordered <- pop.curr$index
}
core.stays[[i]] <- core.stay <- ind.stays[[i]] <- ind.stay <- getsd(ind.ordered, pop.curr, count.sir[i], count.dam[i])
pop.geno.last <- # genotype matrix after Exchange and Mutation
genotype(geno = pop.geno.curr,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
# recom.cri = "cri3",
rate.mut = rate.mut,
verbose = verbose)
pop.last <- pop.curr
if (sel.on & all(ps > 1)) {
info.core <- sel.core(ind.stay, core.stay, refresh, keep.max.gen,
incols, pop.total = pop.total, pop.geno.curr, pop.geno.core)
core.stays[[i]] <- core.stay <- info.core$core.stay
pop.geno.last <- pop.geno.core <- info.core$core.geno
pop1.geno.id <- c(core.stay$sir, core.stay$dam)
ind.stay <- core.stay
}
} # end for
}
if(num.gen > 1) {
names(ind.stays) <- paste0("gen", 1:(num.gen-1))
names(core.stays) <- paste0("gen", 1:(num.gen-1))
}
# if traits have genetic correlation
# generate phenotype at last
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.total,
pop.geno = geno.total.temp,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total,
sel.on = FALSE,
inner.env = inner.env,
verbose = verbose)
pop.total <- pop.pheno$pop
pop.pheno$pop <- NULL
trait <- pop.pheno
if (!is.null(outpath)) {
# write files
logging.log(" --- write files of total population ---\n", verbose = verbose)
write.file(pop.total, geno.total, pos.map, out.geno.index, out.pheno.index, out, directory.rep, out.format, verbose)
flush(geno.total)
}
if (num.gen > 1) {
rm(pop.gp); rm(pop.curr); rm(pop.geno.curr); rm(pop.last);
rm(pop.geno.last); rm(pop.total.temp);
}
rm(basepop); rm(basepop.geno); rm(basepop.geno.em); rm(geno.total.temp); gc()
# certain-generation: singcro, tricro, doubcro
} else if (mtd.reprod == "singcro") {
out.geno.index <- 1:sum(count.ind)
logging.log(" After generation", 1, ",", sum(count.ind[1:2]), "individuals are generated...\n", verbose = verbose)
if (!is.null(outpath)) {
dir.sir <- paste0(directory.rep, .Platform$file.sep, count.ind[1], "_sir")
dir.dam <- paste0(directory.rep, .Platform$file.sep, count.ind[2], "_dam")
dir.sgc <- paste0(directory.rep, .Platform$file.sep, count.ind[3], "_single_cross")
if (dir.exists(dir.sir)) { unlink(dir.sir, recursive = TRUE) }
if (dir.exists(dir.dam)) { unlink(dir.dam, recursive = TRUE) }
if (dir.exists(dir.sgc)) { unlink(dir.sgc, recursive = TRUE) }
dir.create(dir.sir)
dir.create(dir.dam)
dir.create(dir.sgc)
geno.sir <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[1],
init = 3,
type = 'char',
backingpath = dir.sir,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.dam <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[2],
init = 3,
type = 'char',
backingpath = dir.dam,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.singcro <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[3],
init = 3,
type = 'char',
backingpath = dir.sgc,
backingfile = geno.back,
descriptorfile = geno.desc)
options(bigmemory.typecast.warning=FALSE)
} else {
geno.sir <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[1],
init = 3,
type = 'char')
geno.dam <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[2],
init = 3,
type = 'char')
geno.singcro <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[3],
init = 3,
type = 'char')
options(bigmemory.typecast.warning=FALSE)
}
if (sel.on) {
# output index.tdm and ordered individuals indice
ind.ordered <-
selects(pop = basepop,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = basepop,
pop.pheno = pop1.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered1 <- ind.ordered[-1]
ind.ordered <-
selects(pop = pop2,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop2,
pop.pheno = pop2.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered2 <- ind.ordered[-1]
} else {
ind.ordered1 <- basepop$index
ind.ordered2 <- pop2$index
}
ind.stays[[1]] <- getsd(ind.ordered1, basepop, count.sir, 0)
ind.stays[[2]] <- getsd(ind.ordered2, pop2, 0, count.dam)
names(ind.stays) <- c("basepop", "pop2")
ind.stay$sir <- ind.stays[[1]]$sir
ind.stay$dam <- ind.stays[[2]]$dam
core.stays[[1]] <- ind.stay
names(core.stays) <- "gen1"
pop1.geno.id <- basepop$index
pop2.geno.id <- pop2$index
pop.gp <-
reproduces(pop1 = basepop,
pop2 = pop2,
pop1.geno.id = basepop$index,
pop2.geno.id = pop2$index,
pop1.geno = basepop.geno.em,
pop2.geno = pop2.geno.em,
incols = incols,
ind.stay = ind.stay,
mtd.reprod = mtd.reprod,
num.prog = num.prog,
ratio = ratio)
pop.geno.singcro <- pop.gp$geno
pop.singcro <- pop.gp$pop
isd <- c(2, 5, 6)
pop.total.temp <- rbind(basepop[, isd], pop2[, isd], pop.singcro[, isd])
if (sel.on) {
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.singcro,
pop.geno = pop.geno.singcro,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total.temp,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.singcro <- pop.pheno$pop
pop.pheno$pop <- NULL
trait$pop.singcro <- pop.pheno
}
logging.log(" After generation", 2, ",", sum(count.ind[1:3]), "individuals are generated...\n", verbose = verbose)
gc.sir <- basepop.geno
gc.dam <- pop2.geno
gc.singcro <- pop.geno.singcro
if (incols == 2 & outcols == 1) {
gc.sir <- geno.cvt1(gc.sir)
gc.dam <- geno.cvt1(gc.dam)
gc.singcro <- geno.cvt1(gc.singcro)
}
input.geno(geno.sir, gc.sir, ncol(geno.sir), mrk.dense)
input.geno(geno.dam, gc.dam, ncol(geno.dam), mrk.dense)
input.geno(geno.singcro, gc.singcro, ncol(geno.singcro), mrk.dense)
# if traits have genetic correlation
# generate phenotype at last
if (!sel.on) {
pop.total <- rbind(basepop, pop2, pop.singcro)
geno.total <- cbind(basepop.geno, pop2.geno, pop.geno.singcro)
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.total,
pop.geno = geno.total,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.total <- pop.pheno$pop
pop.pheno$pop <- NULL
trait <- pop.pheno
basepop <- pop.total[1:nind, ]
pop2 <- pop.total[(nind+1):(nind+nind2), ]
pop.singcro <- pop.total[(nind+nind2+1):(nind+nind2+nrow(pop.singcro)), ]
}
if (!is.null(outpath)) {
flush(geno.sir)
flush(geno.dam)
flush(geno.singcro)
# write files
logging.log(" --- write files of sirs ---\n", verbose = verbose)
write.file(basepop, geno.sir, pos.map, 1:nrow(basepop), 1:nrow(basepop), out, dir.sir, out.format, verbose)
logging.log(" --- write files of dams ---\n", verbose = verbose)
write.file(pop2, geno.dam, pos.map, 1:nrow(pop2), 1:nrow(pop2), out, dir.dam, out.format, verbose)
logging.log(" --- write files of progenies ---\n", verbose = verbose)
write.file(pop.singcro, geno.singcro, pos.map, 1:nrow(pop.singcro), 1:nrow(pop.singcro), out, dir.sgc, out.format, verbose)
}
# set total information of population and genotype
pop.total <- list(pop.sir1 = basepop, pop.dam1 = pop2, pop.singcro = pop.singcro)
geno.total <- list(geno.sir1 = gc.sir, geno.dam1 = gc.dam, geno.singcro = gc.singcro)
rm(basepop); rm(basepop.geno); rm(basepop.geno.em); rm(pop2); rm(pop2.geno); rm(pop2.geno.em);
rm(geno.sir); rm(geno.dam); rm(geno.singcro); rm(pop.gp); rm(pop.singcro); rm(pop.geno.singcro);
rm(gc.sir); rm(gc.dam); rm(gc.singcro); rm(pop.total.temp); gc()
} else if (mtd.reprod == "tricro") {
out.geno.index <- 1:sum(count.ind)
logging.log(" After generation", 1, ",", sum(count.ind[1:3]), "individuals are generated...\n", verbose = verbose)
if (!is.null(outpath)) {
dir.sir1 <- paste0(directory.rep, .Platform$file.sep, count.ind[1], "_sir1")
dir.dam1 <- paste0(directory.rep, .Platform$file.sep, count.ind[2], "_dam1")
dir.sir2 <- paste0(directory.rep, .Platform$file.sep, count.ind[3], "_sir2")
dir.dam21 <- paste0(directory.rep, .Platform$file.sep, count.ind[4], "_dam21")
dir.trc <- paste0(directory.rep, .Platform$file.sep, count.ind[5], "_three-ways_cross")
if (dir.exists(dir.sir1)) { unlink(dir.sir1, recursive = TRUE) }
if (dir.exists(dir.dam1)) { unlink(dir.dam1, recursive = TRUE) }
if (dir.exists(dir.sir2)) { unlink(dir.sir2, recursive = TRUE) }
if (dir.exists(dir.dam21)) { unlink(dir.dam21, recursive = TRUE) }
if (dir.exists(dir.trc)) { unlink(dir.trc, recursive = TRUE) }
dir.create(dir.sir1)
dir.create(dir.dam1)
dir.create(dir.sir2)
dir.create(dir.dam21)
dir.create(dir.trc)
geno.sir1 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[1],
init = 3,
type = 'char',
backingpath = dir.sir1,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.dam1 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[2],
init = 3,
type = 'char',
backingpath = dir.dam1,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.sir2 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[3],
init = 3,
type = 'char',
backingpath = dir.sir2,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.dam21 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[4],
init = 3,
type = 'char',
backingpath = dir.dam21,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.tricro <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[5],
init = 3,
type = 'char',
backingpath = dir.trc,
backingfile = geno.back,
descriptorfile = geno.desc)
options(bigmemory.typecast.warning=FALSE)
} else {
geno.sir1 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[1],
init = 3,
type = 'char')
geno.dam1 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[2],
init = 3,
type = 'char')
geno.sir2 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[3],
init = 3,
type = 'char')
geno.dam21 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[4],
init = 3,
type = 'char')
geno.tricro <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[5],
init = 3,
type = 'char')
options(bigmemory.typecast.warning=FALSE)
}
# correct the sex
pop2$sex <- 1
pop3$sex <- 2
if (sel.on) {
# add selection to generation1
ind.ordered <-
selects(pop = pop2,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop2,
pop.pheno = pop2.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered1 <- ind.ordered[-1]
ind.ordered <-
selects(pop = pop3,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop3,
pop.pheno = pop3.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered2 <- ind.ordered[-1]
} else {
ind.ordered1 <- pop2$index
ind.ordered2 <- pop3$index
}
core.stays[[1]] <- core.stay <- ind.stays[[1]] <- getsd(ind.ordered1, pop2, count.sir[1], 0)
core.stays[[2]] <- core.stay <- ind.stays[[2]] <- getsd(ind.ordered2, pop3, 0, count.dam[1])
ind.stay$sir <- ind.stays[[1]]$sir
ind.stay$dam <- ind.stays[[2]]$dam
core.stays[[1]] <- ind.stay
# the first generation to the second generation
pop.gp <-
reproduces(pop1 = pop2,
pop2 = pop3,
pop1.geno.id = pop2$index,
pop2.geno.id = pop3$index,
pop1.geno = pop2.geno.em,
pop2.geno = pop3.geno.em,
incols = incols,
ind.stay = ind.stay,
mtd.reprod = "singcro",
num.prog = prog.tri,
ratio = ratio)
pop.geno.dam21 <- pop.gp$geno
pop.dam21 <- pop.gp$pop
isd <- c(2, 5, 6)
pop.total.temp <- rbind(basepop[, isd], pop2[, isd], pop3[, isd], pop.dam21[, isd])
if (sel.on) {
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.dam21,
pop.geno = pop.geno.dam21,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total.temp,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.dam21 <- pop.pheno$pop
pop.pheno$pop <- NULL
trait$pop.dam21 <- pop.pheno
# output index.tdm and ordered individuals indice
ind.ordered <-
selects(pop = basepop,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = basepop,
pop.pheno = pop1.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered1 <- ind.ordered[-1]
ind.ordered <-
selects(pop = pop.dam21,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop.total.temp,
pop.pheno = pop.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered2 <- ind.ordered[-1]
} else {
ind.ordered1 <- basepop$index
ind.ordered2 <- pop.dam21$index
}
ind.stays[[3]] <- getsd(ind.ordered1, basepop, count.sir[2], 0)
ind.stays[[4]] <- getsd(ind.ordered2, pop.dam21, 0, count.dam[2])
names(ind.stays) <- c("pop2", "pop3", "basepop", "pop.dam21")
ind.stay$sir <- ind.stays[[3]]$sir
ind.stay$dam <- ind.stays[[4]]$dam
core.stays[[2]] <- ind.stay
names(core.stays) <- c("gen1", "gen2")
logging.log(" After generation", 2, ",", sum(count.ind[1:4]), "individuals are generated...\n", verbose = verbose)
pop.geno.dam21.em <- # genotype matrix after Exchange and Mutation
genotype(geno = pop.geno.dam21,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
# recom.cri = "cri3",
rate.mut = rate.mut,
verbose = verbose)
# the second generation to the third generation
pop.gp <-
reproduces(pop1 = basepop,
pop2 = pop.dam21,
pop1.geno.id = basepop$index,
pop2.geno.id = pop.dam21$index,
pop1.geno = basepop.geno.em,
pop2.geno = pop.geno.dam21.em,
incols = incols,
ind.stay = ind.stay,
mtd.reprod = "singcro",
num.prog = num.prog,
ratio = ratio)
pop.geno.tricro <- pop.gp$geno
pop.tricro <- pop.gp$pop
isd <- c(2, 5, 6)
pop.total.temp <- rbind(pop.total.temp, pop.tricro[, isd])
if (sel.on) {
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.tricro,
pop.geno = pop.geno.tricro,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total.temp,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.tricro <- pop.pheno$pop
pop.pheno$pop <- NULL
trait$pop.tricro <- pop.pheno
}
logging.log(" After generation", 3, ",", sum(count.ind[1:5]), "individuals are generated...\n", verbose = verbose)
gc.sir1 <- basepop.geno
gc.sir2 <- pop2.geno
gc.dam1 <- pop3.geno
gc.dam21 <- pop.geno.dam21
gc.tricro <- pop.geno.tricro
if (incols == 2 & outcols == 1) {
gc.sir1 <- geno.cvt1(gc.sir1)
gc.sir2 <- geno.cvt1(gc.sir2)
gc.dam1 <- geno.cvt1(gc.dam1)
gc.dam21 <- geno.cvt1(gc.dam21)
gc.tricro <- geno.cvt1(gc.tricro)
}
input.geno(geno.sir1, gc.sir1, ncol(geno.sir1), mrk.dense)
input.geno(geno.sir2, gc.sir2, ncol(geno.dam1), mrk.dense)
input.geno(geno.dam1, gc.dam1, ncol(geno.sir2), mrk.dense)
input.geno(geno.dam21, gc.dam21, ncol(geno.dam21), mrk.dense)
input.geno(geno.tricro, gc.tricro, ncol(geno.tricro), mrk.dense)
# if traits have genetic correlation
# generate phenotype at last
if (!sel.on) {
pop.total <- rbind(basepop, pop2, pop3, pop.dam21, pop.tricro)
geno.total <- cbind(basepop.geno, pop2.geno, pop3.geno, pop.geno.dam21[], pop.geno.tricro[])
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.total,
pop.geno = geno.total,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.total <- pop.pheno$pop
pop.pheno$pop <- NULL
trait <- pop.pheno
basepop <- pop.total[1:nind, ]
pop2 <- pop.total[(nind+1):(nind+nind2), ]
pop3 <- pop.total[(nind+nind2+1):(nind+nind2+nind3), ]
pop.dam21 <- pop.total[(nind+nind2+nind3+1):(nind+nind2+nind3+nrow(pop.dam21)), ]
pop.tricro <- pop.total[(nind+nind2+nind3+nrow(pop.dam21)+1):(nind+nind2+nind3+nrow(pop.dam21)+nrow(pop.tricro)), ]
}
if (!is.null(outpath)) {
flush(geno.sir1)
flush(geno.dam1)
flush(geno.sir2)
flush(geno.dam21)
flush(geno.tricro)
# write files
logging.log(" --- write files of sir1s ---\n", verbose = verbose)
write.file(basepop, geno.sir1, pos.map, 1:nrow(basepop), 1:nrow(basepop), out, dir.sir1, out.format, verbose)
logging.log(" --- write files of sir2s ---\n", verbose = verbose)
write.file(pop2, geno.sir2, pos.map, 1:nrow(pop2), 1:nrow(pop2), out, dir.sir2, out.format, verbose)
logging.log(" --- write files of dam1s ---\n", verbose = verbose)
write.file(pop3, geno.dam1, pos.map, 1:nrow(pop3), 1:nrow(pop3), out, dir.dam1, out.format, verbose)
logging.log(" --- write files of dam21s ---\n", verbose = verbose)
write.file(pop.dam21, geno.dam21, pos.map, 1:nrow(pop.dam21), 1:nrow(pop.dam21), out, dir.dam21, out.format, verbose)
logging.log(" --- write files of progenies ---\n", verbose = verbose)
write.file(pop.tricro, geno.tricro, pos.map, 1:nrow(pop.tricro), 1:nrow(pop.tricro), out, dir.trc, out.format, verbose)
}
# set total information of population and genotype
pop.total <- list(pop.sir1 = basepop, pop.sir2 = pop2, pop.dam1 = pop3, pop.dam21 = pop.dam21, pop.tricro = pop.tricro)
geno.total <- list(geno.sir1 = gc.sir1, geno.sir2 = gc.sir2, geno.dam1 = gc.dam1, geno.dam21 = gc.dam21, geno.tricro = gc.tricro)
rm(basepop); rm(basepop.geno); rm(basepop.geno.em); rm(pop2); rm(pop2.geno); rm(pop2.geno.em);
rm(pop3); rm(pop3.geno); rm(pop3.geno.em); rm(geno.sir1); rm(geno.dam1); rm(geno.sir2);
rm(pop.gp); rm(pop.dam21); rm(geno.dam21); rm(pop.geno.dam21); rm(pop.geno.dam21.em);
rm(gc.sir1); rm(gc.sir2); rm(gc.dam1); rm(gc.dam21); rm(gc.tricro);
rm(pop.tricro); rm(geno.tricro); rm(pop.total.temp); gc()
} else if (mtd.reprod == "doubcro") {
out.geno.index <- 1:sum(count.ind)
logging.log(" After generation", 1, ",", sum(count.ind[1:4]), "individuals are generated...\n", verbose = verbose)
if (!is.null(outpath)) {
dir.sir1 <- paste0(directory.rep, .Platform$file.sep, count.ind[1], "_sir1")
dir.dam1 <- paste0(directory.rep, .Platform$file.sep, count.ind[2], "_dam1")
dir.sir2 <- paste0(directory.rep, .Platform$file.sep, count.ind[3], "_sir2")
dir.dam2 <- paste0(directory.rep, .Platform$file.sep, count.ind[4], "_dam2")
dir.sir11 <- paste0(directory.rep, .Platform$file.sep, count.ind[5], "_sir11")
dir.dam22 <- paste0(directory.rep, .Platform$file.sep, count.ind[6], "_dam22")
dir.dbc <- paste0(directory.rep, .Platform$file.sep, count.ind[7], "_double_cross")
if (dir.exists(dir.sir1)) { unlink(dir.sir1, recursive = TRUE) }
if (dir.exists(dir.dam1)) { unlink(dir.dam1, recursive = TRUE) }
if (dir.exists(dir.sir2)) { unlink(dir.sir2, recursive = TRUE) }
if (dir.exists(dir.dam2)) { unlink(dir.dam2, recursive = TRUE) }
if (dir.exists(dir.sir11)) { unlink(dir.sir11, recursive = TRUE) }
if (dir.exists(dir.dam22)) { unlink(dir.dam22, recursive = TRUE) }
if (dir.exists(dir.dbc)) { unlink(dir.dbc, recursive = TRUE) }
dir.create(dir.sir1)
dir.create(dir.dam1)
dir.create(dir.sir2)
dir.create(dir.dam2)
dir.create(dir.sir11)
dir.create(dir.dam22)
dir.create(dir.dbc)
geno.sir1 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[1],
init = 3,
type = 'char',
backingpath = dir.sir1,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.dam1 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[2],
init = 3,
type = 'char',
backingpath = dir.dam1,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.sir2 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[3],
init = 3,
type = 'char',
backingpath = dir.sir2,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.dam2 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[4],
init = 3,
type = 'char',
backingpath = dir.dam2,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.sir11 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[5],
init = 3,
type = 'char',
backingpath = dir.sir11,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.dam22 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[6],
init = 3,
type = 'char',
backingpath = dir.dam22,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.doubcro <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[7],
init = 3,
type = 'char',
backingpath = dir.dbc,
backingfile = geno.back,
descriptorfile = geno.desc)
options(bigmemory.typecast.warning=FALSE)
} else {
geno.sir1 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[1],
init = 3,
type = 'char')
geno.dam1 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[2],
init = 3,
type = 'char')
geno.sir2 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[3],
init = 3,
type = 'char')
geno.dam2 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[4],
init = 3,
type = 'char')
geno.sir11 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[5],
init = 3,
type = 'char')
geno.dam22 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[6],
init = 3,
type = 'char')
geno.doubcro <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[7],
init = 3,
type = 'char')
options(bigmemory.typecast.warning=FALSE)
}
if (sel.on) {
# add selection to generation1
ind.ordered <-
selects(pop = basepop,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = basepop,
pop.pheno = pop1.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered1 <- ind.ordered[-1]
ind.ordered <-
selects(pop = pop2,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop2,
pop.pheno = pop2.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered2 <- ind.ordered[-1]
} else {
ind.ordered1 <- basepop$index
ind.ordered2 <- pop2$index
}
ind.stays[[1]] <- getsd(ind.ordered1, basepop, count.sir[1], 0)
ind.stays[[2]] <- getsd(ind.ordered2, pop2, 0, count.dam[1])
ind.stay$sir <- ind.stays[[1]]$sir
ind.stay$dam <- ind.stays[[2]]$dam
core.stays[[1]] <- ind.stay
# the first generation to the second generation(the first two populations)
pop.gp <-
reproduces(pop1 = basepop,
pop2 = pop2,
pop1.geno.id = basepop$index,
pop2.geno.id = pop2$index,
pop1.geno = basepop.geno.em,
pop2.geno = pop2.geno.em,
incols = incols,
ind.stay = ind.stay,
mtd.reprod = "singcro",
num.prog = prog.doub,
ratio = ratio)
pop.geno.sir11 <- pop.gp$geno
pop.sir11 <- pop.gp$pop
pop.sir11$index <- pop.sir11$index - pop.sir11$index[1] + 1 + pop4$index[length(pop4$index)]
isd <- c(2, 5, 6)
pop.total.temp <- rbind(basepop[, isd], pop2[, isd], pop3[, isd], pop4[, isd], pop.sir11[, isd])
if (sel.on) {
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.sir11,
pop.geno = pop.geno.sir11,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total.temp,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.sir11 <- pop.pheno$pop
pop.pheno$pop <- NULL
trait$pop.sir11 <- pop.pheno
# output index.tdm and ordered individuals indice
ind.ordered <-
selects(pop = pop.sir11,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop.total.temp,
pop.pheno = pop.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered.sir11 <- ind.ordered[-1]
} else {
ind.ordered.sir11 <- pop.sir11$index
}
ind.stays[[5]] <- getsd(ind.ordered.sir11, pop.sir11, count.sir[3], 0)
pop.geno.sir11.em <- # genotype matrix after Exchange and Mutation
genotype(geno = pop.geno.sir11,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
# recom.cri = "cri3",
rate.mut = rate.mut,
verbose = verbose)
if (sel.on) {
# add selection to generation1
ind.ordered <-
selects(pop = pop3,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop3,
pop.pheno = pop3.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered1 <- ind.ordered[-1]
ind.ordered <-
selects(pop = pop4,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop4,
pop.pheno = pop4.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered2 <- ind.ordered[-1]
} else {
ind.ordered1 <- pop3$index
ind.ordered2 <- pop4$index
}
ind.stays[[3]] <- getsd(ind.ordered1, pop3, count.sir[2], 0)
ind.stays[[4]] <- getsd(ind.ordered2, pop4, 0, count.dam[2])
ind.stay$sir <- ind.stays[[3]]$sir
ind.stay$dam <- ind.stays[[4]]$dam
core.stays[[2]] <- ind.stay
# the first generation to the second generation(the last two populations)
pop.gp <-
reproduces(pop1 = pop3,
pop2 = pop4,
pop1.geno.id = pop3$index,
pop2.geno.id = pop4$index,
pop1.geno = pop3.geno.em,
pop2.geno = pop4.geno.em,
incols = incols,
ind.stay = ind.stay,
mtd.reprod = "singcro",
num.prog = prog.doub,
ratio = ratio)
pop.geno.dam22 <- pop.gp$geno
pop.dam22 <- pop.gp$pop
pop.dam22$index <- pop.dam22$index - pop.dam22$index[1] + 1 + pop.sir11$index[length(pop.sir11$index)]
pop.total.temp <- rbind(pop.total.temp, pop.dam22[, isd])
if (sel.on) {
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.dam22,
pop.geno = pop.geno.dam22,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total.temp,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.dam22 <- pop.pheno$pop
pop.pheno$pop <- NULL
trait$pop.dam22 <- pop.pheno
# output index.tdm and ordered individuals indice
ind.ordered <-
selects(pop = pop.dam22,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop.total.temp,
pop.pheno = pop.pheno,
verbose = verbose)
# index.tdm <- ind.ordered[1]
ind.ordered.dam22 <- ind.ordered[-1]
} else {
ind.ordered.dam22 <- pop.dam22$index
}
ind.stays[[6]] <- getsd(ind.ordered.dam22, pop.dam22, 0, count.dam[3])
names(ind.stays) <- c("basepop", "pop2", "pop3", "pop4", "pop.sir11", "pop.dam22")
ind.stay$sir <- ind.stays[[5]]$sir
ind.stay$dam <- ind.stays[[6]]$dam
core.stays[[3]] <- ind.stay
names(core.stays) <- c("gen1.0", "gen1.5", "gen2")
pop.geno.dam22.em <- # genotype matrix after Exchange and Mutation
genotype(geno = pop.geno.dam22,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
# recom.cri = "cri3",
rate.mut = rate.mut,
verbose = verbose)
logging.log(" After generation", 2, ",", sum(count.ind[1:6]), "individuals are generated...\n", verbose = verbose)
# the second generation to the third generation
pop.gp <-
reproduces(pop1 = pop.sir11,
pop2 = pop.dam22,
pop1.geno.id = pop.sir11$index,
pop2.geno.id = pop.dam22$index,
pop1.geno = pop.geno.sir11.em,
pop2.geno = pop.geno.dam22.em,
incols = incols,
ind.stay = ind.stay,
mtd.reprod = "singcro",
num.prog = num.prog,
ratio = ratio)
pop.geno.doubcro <- pop.gp$geno
pop.doubcro <- pop.gp$pop
pop.total.temp <- rbind(pop.total.temp, pop.doubcro[, isd])
if (sel.on) {
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.doubcro,
pop.geno = pop.geno.doubcro,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total.temp,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.doubcro <- pop.pheno$pop
pop.pheno$pop <- NULL
trait$pop.doubcro <- pop.pheno
}
logging.log(" After generation", 3, ",", sum(count.ind[1:7]), "individuals are generated...\n", verbose = verbose)
gc.sir1 <- basepop.geno
gc.dam1 <- pop2.geno
gc.sir2 <- pop3.geno
gc.dam2 <- pop4.geno
gc.sir11 <- pop.geno.sir11
gc.dam22 <- pop.geno.dam22
gc.doubcro <- pop.geno.doubcro
if (incols == 2 & outcols == 1) {
gc.sir1 <- geno.cvt1(gc.sir1)
gc.dam1 <- geno.cvt1(gc.dam1)
gc.sir2 <- geno.cvt1(gc.sir2)
gc.dam2 <- geno.cvt1(gc.dam2)
gc.sir11 <- geno.cvt1(gc.sir11)
gc.dam22 <- geno.cvt1(gc.dam22)
gc.doubcro <- geno.cvt1(gc.doubcro)
}
input.geno(geno.sir1, gc.sir1, ncol(geno.sir1), mrk.dense)
input.geno(geno.dam1, gc.dam1, ncol(geno.dam1), mrk.dense)
input.geno(geno.sir2, gc.sir2, ncol(geno.sir2), mrk.dense)
input.geno(geno.dam2, gc.dam2, ncol(geno.dam2), mrk.dense)
input.geno(geno.sir11, gc.sir11, ncol(geno.sir11), mrk.dense)
input.geno(geno.dam22, gc.dam22, ncol(geno.dam22), mrk.dense)
input.geno(geno.doubcro, gc.doubcro, ncol(geno.doubcro), mrk.dense)
# if traits have genetic correlation
# generate phenotype at last
if (!sel.on) {
pop.total <- rbind(basepop, pop2, pop3, pop4, pop.sir11, pop.dam22, pop.doubcro)
geno.total <- cbind(basepop.geno, pop2.geno, pop3.geno, pop4.geno, pop.geno.sir11[], pop.geno.dam22[], pop.geno.doubcro[])
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.total,
pop.geno = geno.total,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.total <- pop.pheno$pop
pop.pheno$pop <- NULL
trait <- pop.pheno
basepop <- pop.total[1:nind, ]
pop2 <- pop.total[(nind+1):(nind+nind2), ]
pop3 <- pop.total[(nind+nind2+1):(nind+nind2+nind3), ]
pop4 <- pop.total[(nind+nind2+nind3+1):(nind+nind2+nind3+nind4), ]
pop.sir11 <- pop.total[(nind+nind2+nind3+nind4+1):(nind+nind2+nind3+nind4+nrow(pop.sir11)), ]
pop.dam22 <- pop.total[(nind+nind2+nind3+nind4+nrow(pop.sir11)+1):(nind+nind2+nind3+nind4+nrow(pop.sir11)+nrow(pop.dam22)), ]
pop.doubcro <- pop.total[(nind+nind2+nind3+nrow(pop.sir11)+nrow(pop.dam22)+1):(nind+nind2+nind3+nind4+nrow(pop.sir11)+nrow(pop.dam22)+nrow(pop.doubcro)), ]
}
if (!is.null(outpath)) {
flush(geno.sir1)
flush(geno.dam1)
flush(geno.sir2)
flush(geno.dam2)
flush(geno.sir11)
flush(geno.dam22)
flush(geno.doubcro)
# write files
logging.log(" --- write files of sir1s ---\n", verbose = verbose)
write.file(basepop, geno.sir1, pos.map, 1:nrow(basepop), 1:nrow(basepop), out, dir.sir1, out.format, verbose)
logging.log(" --- write files of dam1s ---\n", verbose = verbose)
write.file(pop2, geno.dam1, pos.map, 1:nrow(pop2), 1:nrow(pop2), out, dir.dam1, out.format, verbose)
logging.log(" --- write files of sir2s ---\n", verbose = verbose)
write.file(pop3, geno.sir2, pos.map, 1:nrow(pop3), 1:nrow(pop3), out, dir.sir2, out.format, verbose)
logging.log(" --- write files of dam2s ---\n", verbose = verbose)
write.file(pop4, geno.dam2, pos.map, 1:nrow(pop4), 1:nrow(pop4), out, dir.dam2, out.format, verbose)
logging.log(" --- write files of sir11s ---\n", verbose = verbose)
write.file(pop.sir11, geno.sir11, pos.map, 1:nrow(pop.sir11), 1:nrow(pop.sir11), out, dir.sir11, out.format, verbose)
logging.log(" --- write files of dam22s ---\n", verbose = verbose)
write.file(pop.dam22, geno.dam22, pos.map, 1:nrow(pop.dam22), 1:nrow(pop.dam22), out, dir.dam22, out.format, verbose)
logging.log(" --- write files of progenies ---\n", verbose = verbose)
write.file(pop.doubcro, geno.doubcro, pos.map, 1:nrow(pop.doubcro), 1:nrow(pop.doubcro), out, dir.dbc, out.format, verbose)
}
# set total information of population and genotype
pop.total <- list(pop.sir1 = basepop, pop.dam1 = pop2, pop.sir2 = pop3, pop.dam2 = pop4, pop.sir11 = pop.sir11, pop.dam22 = pop.dam22, pop.doubcro = pop.doubcro)
geno.total <- list(geno.sir1 = gc.sir1, geno.dam1 = gc.dam1, geno.sir2 = gc.sir2, geno.dam2 = gc.dam2, geno.sir11 = gc.sir11, geno.dam22 = gc.dam22, geno.doubcro = gc.doubcro)
rm(basepop); rm(basepop.geno); rm(basepop.geno.em); rm(geno.sir1);
rm(pop2); rm(pop2.geno); rm(pop2.geno.em); rm(geno.dam1);
rm(pop3); rm(pop3.geno); rm(pop3.geno.em); rm(geno.sir2);
rm(pop4); rm(pop4.geno); rm(pop4.geno.em); rm(geno.dam2);
rm(pop.sir11); rm(pop.geno.sir11); rm(pop.geno.sir11.em);
rm(pop.dam22); rm(pop.geno.dam22); rm(pop.geno.dam22.em);
rm(pop.gp); rm(pop.doubcro); rm(pop.geno.doubcro);
rm(gc.sir1); rm(gc.dam1); rm(gc.sir2); rm(gc.dam2);
rm(gc.sir11); rm(gc.dam22); rm(gc.doubcro);
rm(pop.total.temp); gc()
} else if (mtd.reprod == "backcro") {
if (num.gen != length(prog.back))
stop(" Number of generation should equal to the length of prog.back!")
out.geno.gen <- out.geno.gen[out.geno.gen > 0]
out.pheno.gen <- out.pheno.gen[out.pheno.gen > 0]
out.geno.index <- getindex(count.ind, out.geno.gen)
out.pheno.index <- getindex(count.ind, out.pheno.gen)
# store all genotype
geno.total.temp <- big.matrix(
nrow = num.marker,
ncol = outcols * sum(count.ind),
init = 3,
type = 'char')
if (!is.null(outpath)) {
geno.total <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * sum(count.ind[out.geno.gen]),
init = 3,
type = 'char',
backingpath = directory.rep,
backingfile = geno.back,
descriptorfile = geno.desc)
options(bigmemory.typecast.warning=FALSE)
} else {
geno.total <- big.matrix(
nrow = num.marker,
ncol = outcols * sum(count.ind[out.geno.gen]),
init = 3,
type = 'char')
options(bigmemory.typecast.warning=FALSE)
}
# set total population
pop.total <- rbind(basepop, pop2)
gc.base <- basepop.geno
gc.pop2 <- pop2.geno
if (incols == 2 & outcols == 1) {
gc.base <- geno.cvt1(gc.base)
gc.pop2 <- geno.cvt1(gc.pop2)
}
if (1 %in% out.geno.gen) {
input.geno(geno.total, gc.base, outcols * nrow(basepop), mrk.dense)
input.geno(geno.total, gc.pop2, outcols * count.ind[1], mrk.dense)
}
if (!sel.on) {
input.geno(geno.total.temp, gc.base, outcols*nrow(basepop), mrk.dense)
input.geno(geno.total.temp, gc.pop2, outcols*count.ind[1], mrk.dense)
}
logging.log(" After generation 1 ,", count.ind[1], "individuals are generated...\n", verbose = verbose)
if (num.gen > 1) {
if (sel.on) {
# add selection to generation1
ind.ordered <-
selects(pop = basepop,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = basepop,
pop.pheno = pop1.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered1 <- ind.ordered[-1]
ind.ordered <-
selects(pop = pop2,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop2,
pop.pheno = pop2.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered2 <- ind.ordered[-1]
} else {
ind.ordered1 <- basepop$index
ind.ordered2 <- pop2$index
}
ind.stays[[1]] <- getsd(ind.ordered1, basepop, count.sir[1], 0)
ind.stays[[2]] <- getsd(ind.ordered2, pop2, 0, count.dam[1])
ind.stay$sir <- ind.stays[[1]]$sir
ind.stay$dam <- ind.stays[[2]]$dam
core.stays[[1]] <- ind.stay
for (i in 2:num.gen) {
pop.gp <-
reproduces(pop1 = basepop,
pop2 = pop2,
pop1.geno.id = basepop$index,
pop2.geno.id = pop2$index,
pop1.geno = basepop.geno.em,
pop2.geno = pop2.geno.em,
incols = incols,
ind.stay = ind.stay,
mtd.reprod = "singcro",
num.prog = num.prog,
ratio = ratio)
pop.geno.curr <- pop.gp$geno
pop.curr <- pop.gp$pop
if (i %in% out.geno.gen) {
gc <- pop.geno.curr
if (incols == 2 & outcols == 1) gc <- geno.cvt1(gc)
out.gg <- out.geno.gen[1:which(out.geno.gen == i)]
input.geno(geno.total, gc, outcols * sum(count.ind[out.gg]), mrk.dense)
}
input.geno(geno.total.temp, pop.geno.curr, outcols*sum(count.ind[1:i]), mrk.dense)
isd <- c(2, 5, 6)
pop.total.temp <- rbind(pop.total[1:sum(count.ind[1:(i-1)]), isd], pop.curr[, isd])
if (sel.on) {
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.curr,
pop.geno = pop.geno.curr,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total.temp,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.curr <- pop.pheno$pop
pop.pheno$pop <- NULL
}
pop.total <- rbind(pop.total, pop.curr)
logging.log(" After generation", i, ",", sum(count.ind[1:i]), "individuals are generated...\n", verbose = verbose)
if (i == num.gen) break
if (sel.on) {
# output index.tdm and ordered individuals indice
ind.ordered <-
selects(pop = pop.curr,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop.total,
pop.pheno = pop.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered2 <- ind.ordered[-1]
} else {
ind.ordered2 <- pop.curr$index
}
ind.stays[[i+1]] <- getsd(ind.ordered2, pop.curr, 0, count.dam[i])
ind.stay$sir <- ind.stays[[1]]$sir
ind.stay$dam <- ind.stays[[i+1]]$dam
core.stays[[i]] <- ind.stay
pop2.geno.em <- # genotype matrix after Exchange and Mutation
genotype(geno = pop.geno.curr,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
# recom.cri = "cri3",
rate.mut = rate.mut,
verbose = verbose)
pop2 <- pop.curr
} # end for
}
if(num.gen > 1) {
names(ind.stays) <- c("basepop", "pop2", paste0("gen", 2:(num.gen-1)))
names(core.stays) <- paste0("gen", 1:(num.gen-1))
}
names(core.stays) <- paste0("gen", 1:(num.gen-1))
# if traits have genetic correlation
# generate phenotype at last
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.total,
pop.geno = geno.total.temp,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total,
sel.on = FALSE,
inner.env = inner.env,
verbose = verbose)
pop.total <- pop.pheno$pop
pop.pheno$pop <- NULL
trait <- pop.pheno
if (!is.null(outpath)) {
flush(geno.total)
# write files
logging.log(" --- write files of total population ---\n", verbose = verbose)
write.file(pop.total, geno.total, pos.map, out.geno.index, out.pheno.index, out, directory.rep, out.format, verbose)
}
if (num.gen > 1) {
rm(pop.gp); rm(pop.curr); rm(pop.geno.curr); rm(pop.total.temp)
}
rm(basepop); rm(basepop.geno); rm(basepop.geno.em); rm(pop2); rm(pop2.geno);
rm(pop2.geno.em); rm(geno.total.temp); gc()
} else if (mtd.reprod == "userped") {
pop1.geno.copy <- basepop.geno
if (is.null(userped)) {
stop(" Please input pedigree in the process userped!")
}
rawped <- userped
rawped[is.na(rawped)] <- "0"
if (as.numeric(rawped[1, 2]) < basepop$index[1]) {
stop(" The index of the first sir should be in index of pop1!")
}
# Thanks to YinLL for sharing codes of pedigree sorting
pedx <- as.matrix(rawped)
pedx0 <- c(setdiff(pedx[, 2],pedx[, 1]), setdiff(pedx[, 3],pedx[, 1]))
if(length(pedx0) != 0){
pedx <- rbind(cbind(pedx0, "0", "0"), pedx)
}
pedx <- pedx[pedx[, 1] != "0", ]
pedx <- pedx[!duplicated(pedx), ]
pedx <- pedx[!duplicated(pedx[, 1]), ]
pedx1 <- cbind(1:(ncol(basepop.geno)/2), "0", "0")
pedx2 <- pedx[!(pedx[, 2] == "0" & pedx[, 3] == "0"), ]
go = TRUE
i <- 1
count.ind <- nrow(pedx1)
logging.log(" After generation", i, ",", sum(count.ind[1:i]), "individuals are generated...\n", verbose = verbose)
while(go == TRUE) {
i <- i + 1
Cpedx <- c(pedx1[, 1])
idx <- (pedx2[, 2] %in% Cpedx) & (pedx2[, 3] %in% Cpedx)
if (sum(idx) == 0) {
logging.log(" Some individuals in pedigree are not in mating process!\n They are", verbose = verbose)
simer.print(pedx2[, 1], verbose = verbose)
pedx2 <- pedx2[-c(1:nrow(pedx2)), ]
} else {
index.sir <- as.numeric(pedx2[idx, 2])
index.dam <- as.numeric(pedx2[idx, 3])
pop.geno.curr <- mate(pop.geno = pop1.geno.copy, index.sir = index.sir, index.dam = index.dam)
pop1.geno.copy <- cbind(pop1.geno.copy[], pop.geno.curr[])
pedx1 <- rbind(pedx1, pedx2[idx, ])
pedx2 <- pedx2[!idx, ]
count.ind <- c(count.ind, length(index.dam))
logging.log(" After generation", i, ",", sum(count.ind[1:i]), "individuals are generated...\n", verbose = verbose)
}
if (class(pedx2) == "character") pedx2 <- matrix(pedx2, 1)
if (dim(pedx2)[1] == 0) go = FALSE
}
ped <- pedx1
rm(pedx1);rm(pedx2);gc()
# Create a folder to save files
if (!is.null(outpath)) {
if (!dir.exists(outpath)) stop("Please check your outpath!")
if (out.format == "numeric") {
outpath = paste0(outpath, .Platform$file.sep, sum(count.ind), "_Simer_Data_numeric")
} else if (out.format == "plink"){
outpath = paste0(outpath, .Platform$file.sep, sum(count.ind), "_Simer_Data_plink")
} else {
stop("out.format should be 'numeric' or 'plink'!")
}
if (!dir.exists(outpath)) { dir.create(outpath) }
directory.rep <- paste0(outpath, .Platform$file.sep, "replication", replication)
if (dir.exists(directory.rep)) {
remove_bigmatrix(file.path(directory.rep, "simer"))
unlink(directory.rep, recursive = TRUE)
}
dir.create(directory.rep)
}
index <- ped[, 1]
out.geno.index <- index
ped.sir <- ped[, 2]
ped.dam <- ped[, 3]
sex <- rep(0, length(index))
sex[index %in% unique(ped.sir)] <- 1
sex[index %in% unique(ped.dam)] <- 2
sex[sex == 0] <- sample(1:2, sum(sex == 0), replace = TRUE)
fam.temp <- getfam(ped.sir, ped.dam, 1, "pm")
gen <- rep(1:length(count.ind), count.ind)
pop.total <- data.frame(gen = gen, index = index, fam = fam.temp[, 1], infam = fam.temp[, 2], sir = ped.sir, dam = ped.dam, sex = sex)
gc <- pop1.geno.copy
if (incols == 2 & outcols == 1) gc <- geno.cvt1(gc)
if (!is.null(outpath)) {
geno.total <- filebacked.big.matrix(
nrow = num.marker,
ncol = ncol(gc),
init = 3,
type = 'char',
backingpath = directory.rep,
backingfile = geno.back,
descriptorfile = geno.desc)
options(bigmemory.typecast.warning=FALSE)
} else {
geno.total <- big.matrix(
nrow = num.marker,
ncol = ncol(gc),
init = 3,
type = 'char')
options(bigmemory.typecast.warning=FALSE)
}
input.geno(geno.total, gc, ncol(geno.total), mrk.dense)
isd <- c(2, 5, 6)
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.total,
pop.geno = pop1.geno.copy,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total[, isd],
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.total <- pop.pheno$pop
pop.pheno$pop <- NULL
trait <- pop.pheno
if (!is.null(outpath)) {
flush(geno.total)
logging.log(" --- write files of total population ---\n", verbose = verbose)
write.file(pop.total, geno.total, pos.map, index, index, out, directory.rep, out.format, verbose)
}
rm(basepop); rm(basepop.geno); rm(basepop.geno.em); rm(userped); rm(rawped); rm(ped); gc()
} else {
stop("Please input correct reproduction method!")
}
# total information list
simer.list <- list(pop = pop.total, effs = effs, trait = trait, geno = geno.total, genoid = out.geno.index, map = pos.map, si = sel.i, ind.stays = ind.stays, core.stays = core.stays)
rm(ind.stays); rm(effs); rm(trait); rm(pop.total); rm(geno.total); rm(input.map); rm(pos.map); gc()
if (!is.null(selPath)) {
goal.plan <- complan(simls = simer.list, FR = FR, index.wt = index.wt, decr = decr, selPath = selPath, verbose = verbose)
simer.list$goal.plan <- goal.plan
rm(goal.plan); gc()
}
print_accomplished(width = 70, verbose = verbose)
# Return the last directory
ed <- Sys.time()
logging.log(" SIMER DONE WITHIN TOTAL RUN TIME:", format_time(as.numeric(ed)-as.numeric(op)), "\n", verbose = verbose)
return(simer.list)
}
|
/R/simer.r
|
permissive
|
ntduc11/SIMER
|
R
| false
| false
| 88,642
|
r
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Main function of simer
#'
#' Build date: Jan 7, 2019
#' Last update: Oct 13, 2019
#'
#' @author Dong Yin, Lilin Yin, Haohao Zhang and Xiaolei Liu
#'
#' @param num.gen number of generations in simulation
#' @param replication replication index of simulation
#' @param verbose whether to print detail
#' @param mrk.dense whether markers are dense, it is TRUE when sequencing data
#' @param incols the column number of an individual in the input genotype matrix, it can be 1 or 2
#' @param outcols the column number of an individual in the output genotype matrix, it can be 1 or 2
#' @param out prefix of output file name
#' @param outpath path of output files
#' @param selPath the path of breeding_plan
#' @param out.format format of output, "numeric" or "plink"
#' @param seed.sim random seed of a simulation process
#' @param out.geno.gen indice of generations of output genotype
#' @param out.pheno.gen indice of generations of output phenotype
#' @param rawgeno1 extrinsic genotype matrix1
#' @param rawgeno2 extrinsic genotype matrix2
#' @param rawgeno3 extrinsic genotype matrix3
#' @param rawgeno4 extrinsic genotype matrix4
#' @param num.ind population size of the base population
#' @param prob weight of "0" and "1" in genotype matrix, the sum of elements in vector equal to 1
#' @param input.map map that should be input, the marker number should be consistent in both map file and genotype data
#' @param len.block length of every blocks
#' @param range.hot range of number of chromosome crossovers in a hot spot block
#' @param range.cold range of number of chromosome crossovers in a cold spot block
#' @param rate.mut mutation rate between 1e-8 and 1e-6
#' @param cal.model phenotype model with the options: "A", "AD", "ADI"
#' @param FR list of fixed effects, random effects, and their combination
#' @param cv list of population Coefficient of Variation or family Coefficient of Variation
#' @param var.pheno the phenotype variance, only used in single-trait simulation
#' @param h2.tr1 heritability vector of a single trait, every element are corresponding to a, d, aXa, aXd, dXa, dXd respectively
#' @param num.qtn.tr1 integer or integer vector, the number of QTN in a single trait
#' @param sd.tr1 standard deviation of different effects, the last 5 vector elements are corresponding to d, aXa, aXd, dXa, dXd respectively and the rest elements are corresponding to a
#' @param dist.qtn.tr1 distributions of the QTN effects with the options: "normal", "geometry", "gamma", and "beta", vector elements are corresponding to a, d, aXa, aXd, dXa, dXd respectively
#' @param prob.tr1 unit effect of geometric distribution of a single trait, its length should be same as dist.qtn.tr1
#' @param shape.tr1 shape of gamma distribution of a single trait, its length should be same as dist.qtn.tr1
#' @param scale.tr1 scale of gamma distribution of a single trait, its length should be same as dist.qtn.tr1
#' @param shape1.tr1 non-negative parameters of the Beta distribution, its length should be same as dist.qtn.tr1
#' @param shape2.tr1 non-negative parameters of the Beta distribution, its length should be same as dist.qtn.tr1
#' @param ncp.tr1 non-centrality parameter, its length should be same as dist.qtn.tr1
#' @param multrait whether to apply multiple traits, TRUE represents applying, FALSE represents not
#' @param num.qtn.trn QTN distribution matrix, diagonal elements are total QTN number of the trait, non-diagonal elements are QTN number of overlap QTN between two traits
#' @param sd.trn a matrix with the standard deviation of the QTN effects
#' @param gnt.cov genetic covaiance matrix among all traits
#' @param h2.trn heritability among all traits
#' @param qtn.spot QTN probability in every block
#' @param maf Minor Allele Frequency, marker selection range is from maf to 0.5
#' @param sel.crit selection criteria with the options: "TGV", "TBV", "pEBVs", "gEBVs", "ssEBVs", and "pheno"
#' @param sel.on whether to add selection
#' @param mtd.reprod different reproduction methods with the options: "clone", "dh", "selfpol", "singcro", "tricro", "doubcro", "backcro","randmate", "randexself", and "userped"
#' @param userped user-designed pedigree to control mating process
#' @param num.prog litter size of dams
#' @param ratio ratio of the males in all individuals
#' @param prog.tri litter size of the first single cross process in trible cross process
#' @param prog.doub litter size of the first two single cross process in double cross process
#' @param prog.back a vector with litter size in every generation of back-cross
#' @param refresh refresh ratio of core population of sires and dams, only used in ps > 1
#' @param keep.max.gen the max keep generation range in the selection for sires and dams, only used in ps > 1
#' @param ps if ps <= 1, fraction selected in selection of males and females; if ps > 1, ps is number of selected males and females
#' @param decr whether to sort by descreasing
#' @param sel.multi selection method of multiple traits with options: "tdm", "indcul" and "index"
#' @param index.wt economic weights of selection index method, its length should equals to the number of traits
#' @param index.tdm index represents which trait is being selected
#' @param goal.perc percentage of goal more than mean of scores of individuals
#' @param pass.perc percentage of expected excellent individuals
#' @param sel.sing selection method of single trait with options: "ind", "fam", "infam" and "comb"
#'
#' @return a list with population information, genotype matrix, map information, selection intensity
#' @export
#' @import bigmemory
#' @importFrom stats aov cor dnorm qnorm rgamma rnorm rbeta rgeom runif var shapiro.test
#' @importFrom utils write.table read.delim packageVersion
#' @importFrom methods getPackageName
#' @importFrom MASS mvrnorm ginv
#' @importFrom rMVP MVP.Data.MVP2Bfile
#'
#' @examples
#' \donttest{
#' # get map file, map is neccessary
#' data(simdata)
#'
#' # run simer
#' simer.list <-
#' simer(num.gen = 5,
#' replication = 1,
#' verbose = TRUE,
#' mrk.dense = TRUE,
#' incols = 2,
#' outcols = 1,
#' out = "simer",
#' outpath = NULL,
#' selPath = NULL,
#' out.format = "numeric",
#' seed.sim = runif(1, 0, 100),
#' out.geno.gen = 3:5,
#' out.pheno.gen = 1:5,
#' rawgeno1 = rawgeno,
#' rawgeno2 = NULL,
#' rawgeno3 = NULL,
#' rawgeno4 = NULL,
#' num.ind = NULL,
#' prob = c(0.5, 0.5),
#' input.map = input.map,
#' len.block = 5e7,
#' range.hot = 4:6,
#' range.cold = 1:5,
#' rate.mut = 1e-8,
#' cal.model = "A",
#' FR = NULL,
#' cv = NULL,
#' h2.tr1 = c(0.3, 0.1, 0.05, 0.05, 0.05, 0.01),
#' num.qtn.tr1 = 500,
#' sd.tr1 = c(0.4, 0.2, 0.02, 0.02, 0.02, 0.02),
#' dist.qtn.tr1 = rep("normal", 6),
#' prob.tr1 = rep(0.5, 6),
#' shape.tr1 = rep(1, 6),
#' scale.tr1 = rep(1, 6),
#' shape1.tr1 = rep(1, 6),
#' shape2.tr1 = rep(1, 6),
#' ncp.tr1 = rep(0, 6),
#' multrait = FALSE,
#' num.qtn.trn = matrix(c(400, 100, 100, 400), 2, 2),
#' sd.trn = matrix(c(0.07, 0, 0, 0.07), 2, 2),
#' gnt.cov = matrix(c(1, 2, 2, 16), 2, 2),
#' h2.trn = c(0.3, 0.5),
#' qtn.spot = rep(0.1, 10),
#' maf = 0,
#' sel.crit = "pheno",
#' sel.on = TRUE,
#' mtd.reprod = "randmate",
#' userped = userped,
#' num.prog = 2,
#' ratio = 0.5,
#' prog.tri = 2,
#' prog.doub = 2,
#' prog.back = rep(2, 5),
#' ps = rep(0.8, 2),
#' decr = TRUE,
#' sel.multi = "index",
#' index.wt = c(0.5, 0.5),
#' index.tdm = 1,
#' goal.perc = 0.1,
#' pass.perc = 0.9,
#' sel.sing = "comb")
#' pop <- simer.list$pop
#' effs <- simer.list$effs
#' trait <- simer.list$trait
#' geno <- simer.list$geno
#' genoid <- simer.list$genoid
#' map <- simer.list$map
#' si <- simer.list$si
#' head(pop)
#' str(effs)
#' str(trait)
#' geno[1:6, 1:6]
#' genoid[1:6]
#' str(map)
#' si
#' }
simer <-
function(num.gen = 5,
replication = 1,
verbose = TRUE,
mrk.dense = TRUE,
incols = 2,
outcols = 1,
out = "simer",
outpath = NULL,
selPath = NULL,
out.format = "numeric",
seed.sim = runif(1, 0, 100),
out.geno.gen = (num.gen-2):num.gen,
out.pheno.gen = 1:num.gen,
rawgeno1 = NULL,
rawgeno2 = NULL,
rawgeno3 = NULL,
rawgeno4 = NULL,
num.ind = 100,
prob = c(0.5, 0.5),
input.map = NULL,
len.block = 5e7,
range.hot = 4:6,
range.cold = 1:5,
rate.mut = 1e-8,
cal.model = "A",
FR = NULL,
cv = NULL,
var.pheno = NULL,
h2.tr1 = c(0.3, 0.1, 0.05, 0.05, 0.05, 0.01),
num.qtn.tr1 = 500,
sd.tr1 = c(0.4, 0.2, 0.02, 0.02, 0.02, 0.02),
dist.qtn.tr1 = rep("normal", 6),
prob.tr1 = rep(0.5, 6),
shape.tr1 = rep(1, 6),
scale.tr1 = rep(1, 6),
shape1.tr1 = rep(1, 6),
shape2.tr1 = rep(1, 6),
ncp.tr1 = rep(0, 6),
multrait = FALSE,
num.qtn.trn = matrix(c(400, 100, 100, 400), 2, 2),
sd.trn = matrix(c(1, 0, 0, 0.5), 2, 2),
gnt.cov = matrix(c(1, 2, 2, 16), 2, 2),
h2.trn = c(0.3, 0.5),
qtn.spot = rep(0.1, 10),
maf = 0,
sel.crit = "pheno",
sel.on = TRUE,
mtd.reprod = "randmate",
userped = NULL,
num.prog = 2,
ratio = 0.5,
prog.tri = 2,
prog.doub = 2,
prog.back = rep(2, num.gen),
refresh = c(1, 0.6),
keep.max.gen = rep(3, 2),
ps = rep(0.8, 2),
decr = TRUE,
sel.multi = "index",
index.wt = c(0.5, 0.5),
index.tdm = 1,
goal.perc = 0.1,
pass.perc = 0.9,
sel.sing = "comb") {
# Start simer
# TODO: How to generate inbreeding sirs and uninbreeding dams
# TODO: optcontri.sel
# TODO: add MVP for output
# TODO: correct pedigree
# TODO: add superior limit of homo
# TODO: add multiple fix and random effects
# TODO: add summary() to population information
# TODO: add inbreeding coeficient
# TODO: updata index selection
# TODO: add true block distribution
# TODO: genomic mating
# TODO: inbreeding change in every generations
simer.Version(width = 70, verbose = verbose)
inner.env <- environment()
# initialize logging
if (!is.null(outpath)) {
if (!dir.exists(outpath)) stop(paste0("Please check your output path: ", outpath))
if (verbose) {
logging.initialize("Simer", outpath = outpath)
}
}
################### MAIN_FUNCTION_SETTING ###################
logging.log("--------------------------- replication ", replication, "---------------------------\n", verbose = verbose)
op <- Sys.time()
logging.log(" SIMER BEGIN AT", as.character(op), "\n", verbose = verbose)
set.seed(seed.sim)
if (incols == 1) outcols <- 1
################### BASE_POPULATION ###################
# stablish genotype of base population if there isn't by two ways:
# 1. input rawgeno
# 2. input num.marker and num.ind
nmrk <- nrow(input.map)
# combine genotype matrix
if (is.list(rawgeno1)) {
if (!(mtd.reprod == "randmate" || mtd.reprod == "randexself"))
stop("Only random matings support genotype list!")
nsir <- ncol(rawgeno1$sir) / incols
ndam <- ncol(rawgeno1$dam) / incols
nind <- nsir + ndam
basepop <- getpop(nind, 1, nsir/nind)
rawgeno1 <- cbind(rawgeno1$sir[], rawgeno1$dam[])
} else {
# set base population information
nind <- ifelse(is.null(rawgeno1), num.ind, ncol(rawgeno1) / incols)
nsir <- nind * ratio
ndam <- nind * (1-ratio)
basepop <- getpop(nind, 1, ratio)
}
num.marker <- nrow(input.map)
logging.log(" --- base population 1 ---\n", verbose = verbose)
basepop.geno <-
genotype(rawgeno = rawgeno1,
incols = incols,
num.marker = num.marker,
num.ind = num.ind,
prob = prob,
verbose = verbose)
# set block information and recombination information
num.ind <- nind
pos.map <- check.map(input.map = input.map, num.marker = nmrk, len.block = len.block)
blk.rg <- cal.blk(pos.map)
recom.spot <- as.numeric(pos.map[blk.rg[, 1], 7])
# calculate for marker information
effs <-
cal.effs(pop.geno = basepop.geno,
incols = incols,
cal.model = cal.model,
num.qtn.tr1 = num.qtn.tr1,
sd.tr1 = sd.tr1,
dist.qtn.tr1 = dist.qtn.tr1,
prob.tr1 = prob.tr1,
shape.tr1 = shape.tr1,
scale.tr1 = scale.tr1,
shape1.tr1 = shape1.tr1,
shape2.tr1 = shape2.tr1,
ncp.tr1 = ncp.tr1,
multrait = multrait,
num.qtn.trn = num.qtn.trn,
sd.trn = sd.trn,
qtn.spot = qtn.spot,
maf = maf,
verbose = verbose)
# calculate phenotype according to genotype
if (sel.on) {
pop1.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = basepop,
pop.geno = basepop.geno,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = basepop,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
basepop <- pop1.pheno$pop
pop1.pheno$pop <- NULL
}
# only mutation in clone and doubled haploid
if (mtd.reprod == "clone" || mtd.reprod == "dh" || mtd.reprod == "selfpol") {
basepop$sex <- 0
recom.spot <- NULL
ratio <- 0
}
basepop.geno.em <- # genotype matrix after Mutation
genotype(geno = basepop.geno,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
rate.mut = rate.mut,
verbose = verbose)
if (mtd.reprod == "singcro" || mtd.reprod == "tricro" || mtd.reprod == "doubcro" || mtd.reprod == "backcro") {
# set base population information
basepop$sex <- 1
if (is.null(rawgeno2)) {
logging.log(" --- base population 2 ---\n", verbose = verbose)
prob1 <- runif(1)
prob <- c(prob1, 1 - prob1)
pop2.geno <- genotype(incols = incols, num.marker = num.marker, num.ind = num.ind, prob = prob, verbose = verbose)
} else {
pop2.geno <- genotype(rawgeno = rawgeno2, verbose = verbose)
}
# set base population information
nind2 <- ncol(pop2.geno) / incols
pop2 <- getpop(nind2, nind+1, 0)
# calculate phenotype according to genotype
if (sel.on) {
pop2.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop2,
pop.geno = pop2.geno,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop2,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop2 <- pop2.pheno$pop
pop2.pheno$pop <- NULL
# reset trait
if (mtd.reprod != "backcro") {
trait <- list()
trait$pop.sir1 <- pop1.pheno
if (mtd.reprod == "tricro") {
trait$pop.sir2 <- pop2.pheno
} else {
trait$pop.dam1 <- pop2.pheno
}
}
}
pop2.geno.em <- # genotype matrix after Mutation
genotype(geno = pop2.geno,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
# recom.cri = "cri3",
rate.mut = rate.mut,
verbose = verbose)
pop3.geno.em <- NULL
pop4.geno.em <- NULL
}
if (mtd.reprod == "tricro" || mtd.reprod == "doubcro") {
if (is.null(rawgeno3)) {
logging.log(" --- base population 3 ---\n", verbose = verbose)
prob1 <- runif(1)
prob <- c(prob1, 1 - prob1)
pop3.geno <- genotype(incols = incols, num.marker = num.marker, num.ind = num.ind, prob = prob, verbose = verbose)
} else {
pop3.geno <- genotype(rawgeno = rawgeno3, verbose = verbose)
}
# set base population information
nind3 <- ncol(pop3.geno) / incols
pop3 <- getpop(nind3, nind+nind2+1, 1)
# calculate phenotype according to genotype
if (sel.on) {
pop3.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop3,
pop.geno = pop3.geno,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop3,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop3 <- pop3.pheno$pop
pop3.pheno$pop <- NULL
if (mtd.reprod == "tricro") {
trait$pop.dam1 <- pop3.pheno
} else {
trait$pop.sir2 <- pop3.pheno
}
}
pop3.geno.em <- # genotype matrix after Mutation
genotype(geno = pop3.geno,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
# recom.cri = "cri3",
rate.mut = rate.mut,
verbose = verbose)
pop4.geno.em <- NULL
}
if (mtd.reprod == "doubcro") {
logging.log(" --- base population 4 ---\n", verbose = verbose)
if (is.null(rawgeno4)) {
prob1 <- runif(1)
prob <- c(prob1, 1 - prob1)
pop4.geno <- genotype(incols = incols, num.marker = num.marker, num.ind = num.ind, prob = prob, verbose = verbose)
} else {
pop4.geno <- genotype(rawgeno = rawgeno4, verbose = verbose)
}
# set base population information
nind4 <- ncol(pop4.geno) / incols
pop4 <- getpop(nind4, nind+nind2+nind3+1, 0)
# calculate phenotype according to genotype
if (sel.on) {
pop4.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop4,
pop.geno = pop4.geno,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop4,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop4 <- pop4.pheno$pop
pop4.pheno$pop <- NULL
trait$pop.dam2 <- pop4.pheno
}
pop4.geno.em <- # genotype matrix after Mutation
genotype(geno = pop4.geno,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
# recom.cri = "cri3",
rate.mut = rate.mut,
verbose = verbose)
}
################### SETTING_PROCESS ###################
# 1. setting of number of progenies in every generation.
# 2. setting of directory
# adjust for genetic correlation
if (!(all(ps <= 1) | all(ps > 1))) stop("Please input a correct ps!")
ps[1] <- ifelse(sel.on, ps[1], 1)
ps[2] <- ifelse(sel.on, ps[2], 1)
# calculate number of individuals in every generation
count.ind <- rep(nind, num.gen)
count.sir <- count.dam <- NULL
if (mtd.reprod == "clone" || mtd.reprod == "dh" || mtd.reprod == "selfpol" || mtd.reprod == "randmate" || mtd.reprod == "randexself") {
if (num.gen > 1) {
count.sir <- ifelse(all(ps <= 1), round(nsir * ps[1]), ps[1])
count.dam <- ifelse(all(ps <= 1), round(ndam * ps[2]), ps[2])
count.ind[2] <- count.dam * num.prog
if (num.gen > 2) {
for(i in 3:num.gen) {
count.sir[i-1] <- ifelse(all(ps <= 1), round(count.ind[i-1] * ratio * ps[1]), ps[1])
count.dam[i-1] <- ifelse(all(ps <= 1), round(count.ind[i-1] * (1-ratio) * ps[2]), ps[2])
count.ind[i] <- count.dam[i-1] * num.prog
}
}
}
} else if (mtd.reprod == "singcro") {
count.sir <- ifelse(all(ps <= 1), round(nrow(basepop) * ps[1]), ps[1])
count.dam <- ifelse(all(ps <= 1), round(nrow(pop2) * ps[2]), ps[2])
sing.ind <- count.dam * num.prog
count.ind <- c(nrow(basepop), nrow(pop2), sing.ind)
} else if (mtd.reprod == "tricro") {
num.sir2 <- ifelse(all(ps <= 1), round(nrow(pop2) * ps[1]), ps[1])
num.dam1 <- ifelse(all(ps <= 1), round(nrow(pop3) * ps[2]), ps[2])
dam21.ind <- num.dam1 * prog.tri
num.sir1 <- ifelse(all(ps <= 1), round(nrow(basepop) * ps[1]), ps[1])
num.dam21 <- ifelse(all(ps <= 1), round(dam21.ind * (1-ratio) * ps[2]), ps[2])
tri.ind <- num.dam21 * num.prog
count.sir <- c(num.sir2, num.sir1)
count.dam <- c(num.dam1, num.dam21)
count.ind <- c(nrow(basepop), nrow(pop2), nrow(pop3), dam21.ind, tri.ind)
} else if (mtd.reprod == "doubcro") {
num.sir1 <- ifelse(all(ps <= 1), round(nrow(basepop) * ps[1]), ps[1])
num.dam1 <- ifelse(all(ps <= 1), round(nrow(pop2) * ps[2]), ps[2])
sir11.ind <- num.dam1 * prog.doub
num.sir2 <- ifelse(all(ps <= 1), round(nrow(pop3) * ps[1]), ps[1])
num.dam2 <- ifelse(all(ps <= 1), round(nrow(pop4) * ps[2]), ps[2])
dam22.ind <- num.dam2 * prog.doub
num.sir11 <- ifelse(all(ps <= 1), round(sir11.ind * ratio * ps[2]), ps[2])
num.dam22 <- ifelse(all(ps <= 1), round(dam22.ind * (1-ratio) * ps[2]), ps[2])
doub.ind <- num.dam22 * num.prog
count.sir <- c(num.sir1, num.sir2, num.sir11)
count.dam <- c(num.dam1, num.dam2, num.dam22)
count.ind <- c(nrow(basepop), nrow(pop2), nrow(pop3), nrow(pop4), sir11.ind, dam22.ind, doub.ind)
} else if (mtd.reprod == "backcro") {
count.ind[1] <- nrow(basepop) + nrow(pop2)
if (num.gen > 1) {
count.sir[1] <- ifelse(all(ps <= 1), round(nrow(basepop) * ps[1]), ps[1])
count.dam[1] <- ifelse(all(ps <= 1), round(nrow(pop2) * ps[2]), ps[2])
count.ind[2] <- count.dam[1] * num.prog
for(i in 3:num.gen) {
count.sir[i-1] <- count.sir[i-2]
count.dam[i-1] <- ifelse(all(ps <= 1), round(count.ind[i-1] * (1-ratio) * ps[2]), ps[2])
count.ind[i] <- count.dam[i-1] * num.prog
}
}
} # end if mtd.reprod
if (mtd.reprod != "userped") {
# Create a folder to save files
if (!is.null(outpath)) {
if (!dir.exists(outpath)) stop("Please check your outpath!")
if (out.format == "numeric") {
outpath = paste0(outpath, .Platform$file.sep, sum(count.ind), "_Simer_Data_numeric")
} else if (out.format == "plink"){
outpath = paste0(outpath, .Platform$file.sep, sum(count.ind), "_Simer_Data_plink")
} else {
stop("out.format should be 'numeric' or 'plink'!")
}
if (!dir.exists(outpath)) dir.create(outpath)
directory.rep <- paste0(outpath, .Platform$file.sep, "replication", replication)
if (dir.exists(directory.rep)) {
remove_bigmatrix(file.path(directory.rep, out))
unlink(directory.rep, recursive = TRUE)
}
dir.create(directory.rep)
}
}
if (all(ps <= 1)) {
# calculate selection intensity
sel.i <- dnorm(qnorm(1 -ps)) / ps
logging.log(" --- selection intensity ---\n", verbose = verbose)
logging.log(" Selection intensity is", sel.i, "for males and females\n", verbose = verbose)
} else if (all(ps > 1)) {
sel.i <- ps
logging.log(" --- selected individuals number ---\n", verbose = verbose)
logging.log(" Number of selected individuals is", sel.i, "for males and females in every generation\n", verbose = verbose)
}
################### REPRODUCTION_PROCESS ###################
# 1. Reproduction based on basepop and basepop.geno according
# to different reproduction method.
logging.log(" --- start reproduction ---\n", verbose = verbose)
# multi-generation: clone, dh, selpol, randmate, randexself
geno.back <- paste0(out, ".geno.bin")
geno.desc <- paste0(out, ".geno.desc")
ind.stays <- ind.stay <- NULL
core.stays <- core.stay <- NULL
if (mtd.reprod == "clone" || mtd.reprod == "dh" || mtd.reprod == "selfpol" || mtd.reprod == "randmate" || mtd.reprod == "randexself") {
out.geno.gen <- out.geno.gen[out.geno.gen > 0]
out.pheno.gen <- out.pheno.gen[out.pheno.gen > 0]
out.geno.index <- getindex(count.ind, out.geno.gen)
out.pheno.index <- getindex(count.ind, out.pheno.gen)
# store all genotype
geno.total.temp <- big.matrix(
nrow = num.marker,
ncol = outcols*sum(count.ind),
init = 3,
type = 'char')
if (!is.null(outpath)) {
geno.total <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * sum(count.ind[out.geno.gen]),
init = 3,
type = 'char',
backingpath = directory.rep,
backingfile = geno.back,
descriptorfile = geno.desc)
options(bigmemory.typecast.warning=FALSE)
} else {
geno.total <- big.matrix(
nrow = num.marker,
ncol = outcols * sum(count.ind[out.geno.gen]),
init = 3,
type = 'char')
options(bigmemory.typecast.warning=FALSE)
}
# set total population
pop.total <- basepop
gc <- basepop.geno
if (incols == 2 & outcols == 1) gc <- geno.cvt1(gc)
if (1 %in% out.geno.gen) {
input.geno(geno.total, gc, outcols*count.ind[1], mrk.dense)
}
input.geno(geno.total.temp, gc, outcols*count.ind[1], mrk.dense)
logging.log(" After generation 1 ,", count.ind[1], "individuals are generated...\n", verbose = verbose)
if (num.gen > 1) {
# add selection to generation1
if (sel.on) {
ind.ordered <-
selects(pop = basepop,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = basepop,
pop.pheno = pop1.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered <- ind.ordered[-1]
} else {
ind.ordered <- basepop$index
}
core.stays[[1]] <- core.stay <- ind.stays[[1]] <- ind.stay <- getsd(ind.ordered, basepop, count.sir[1], count.dam[1])
pop.last <- basepop
pop.geno.last <- basepop.geno.em
pop.geno.core <- basepop.geno.em[, getgmt(c(ind.stay$sir, ind.stay$dam), incols = incols)]
pop1.geno.id <- basepop$index
for (i in 2:num.gen) {
pop.gp <- # pop.gp with genotype and pop information
reproduces(pop1 = pop.last,
pop1.geno.id = pop1.geno.id,
pop1.geno = pop.geno.last,
incols = incols,
ind.stay = ind.stay,
mtd.reprod = mtd.reprod,
num.prog = num.prog,
ratio = ratio)
pop.geno.curr <- pop.gp$geno
pop.curr <- pop.gp$pop
pop1.geno.id <- pop.curr$index
isd <- c(2, 5, 6)
# input genotype
gc <- pop.geno.curr
if (incols == 2 & outcols == 1) gc <- geno.cvt1(gc)
if (i %in% out.geno.gen) {
out.gg <- out.geno.gen[1:which(out.geno.gen == i)]
input.geno(geno.total, gc, outcols * sum(count.ind[out.gg]), mrk.dense)
}
input.geno(geno.total.temp, gc, outcols*sum(count.ind[1:i]), mrk.dense)
pop.total.temp <- rbind(pop.total[1:sum(count.ind[1:(i-1)]), isd], pop.curr[, isd])
if (sel.on) {
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.curr,
pop.geno = pop.geno.curr,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total.temp,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.curr <- pop.pheno$pop
pop.pheno$pop <- NULL
}
pop.total <- rbind(pop.total, pop.curr)
logging.log(" After generation", i, ",", sum(count.ind[1:i]), "individuals are generated...\n", verbose = verbose)
if (i == num.gen) break
# output index.tdm and ordered individuals indice
if (sel.on) {
ind.ordered <-
selects(pop = pop.curr,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop.total.temp,
pop.pheno = pop.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered <- ind.ordered[-1]
} else {
ind.ordered <- pop.curr$index
}
core.stays[[i]] <- core.stay <- ind.stays[[i]] <- ind.stay <- getsd(ind.ordered, pop.curr, count.sir[i], count.dam[i])
pop.geno.last <- # genotype matrix after Exchange and Mutation
genotype(geno = pop.geno.curr,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
# recom.cri = "cri3",
rate.mut = rate.mut,
verbose = verbose)
pop.last <- pop.curr
if (sel.on & all(ps > 1)) {
info.core <- sel.core(ind.stay, core.stay, refresh, keep.max.gen,
incols, pop.total = pop.total, pop.geno.curr, pop.geno.core)
core.stays[[i]] <- core.stay <- info.core$core.stay
pop.geno.last <- pop.geno.core <- info.core$core.geno
pop1.geno.id <- c(core.stay$sir, core.stay$dam)
ind.stay <- core.stay
}
} # end for
}
if(num.gen > 1) {
names(ind.stays) <- paste0("gen", 1:(num.gen-1))
names(core.stays) <- paste0("gen", 1:(num.gen-1))
}
# if traits have genetic correlation
# generate phenotype at last
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.total,
pop.geno = geno.total.temp,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total,
sel.on = FALSE,
inner.env = inner.env,
verbose = verbose)
pop.total <- pop.pheno$pop
pop.pheno$pop <- NULL
trait <- pop.pheno
if (!is.null(outpath)) {
# write files
logging.log(" --- write files of total population ---\n", verbose = verbose)
write.file(pop.total, geno.total, pos.map, out.geno.index, out.pheno.index, out, directory.rep, out.format, verbose)
flush(geno.total)
}
if (num.gen > 1) {
rm(pop.gp); rm(pop.curr); rm(pop.geno.curr); rm(pop.last);
rm(pop.geno.last); rm(pop.total.temp);
}
rm(basepop); rm(basepop.geno); rm(basepop.geno.em); rm(geno.total.temp); gc()
# certain-generation: singcro, tricro, doubcro
} else if (mtd.reprod == "singcro") {
out.geno.index <- 1:sum(count.ind)
logging.log(" After generation", 1, ",", sum(count.ind[1:2]), "individuals are generated...\n", verbose = verbose)
if (!is.null(outpath)) {
dir.sir <- paste0(directory.rep, .Platform$file.sep, count.ind[1], "_sir")
dir.dam <- paste0(directory.rep, .Platform$file.sep, count.ind[2], "_dam")
dir.sgc <- paste0(directory.rep, .Platform$file.sep, count.ind[3], "_single_cross")
if (dir.exists(dir.sir)) { unlink(dir.sir, recursive = TRUE) }
if (dir.exists(dir.dam)) { unlink(dir.dam, recursive = TRUE) }
if (dir.exists(dir.sgc)) { unlink(dir.sgc, recursive = TRUE) }
dir.create(dir.sir)
dir.create(dir.dam)
dir.create(dir.sgc)
geno.sir <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[1],
init = 3,
type = 'char',
backingpath = dir.sir,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.dam <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[2],
init = 3,
type = 'char',
backingpath = dir.dam,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.singcro <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[3],
init = 3,
type = 'char',
backingpath = dir.sgc,
backingfile = geno.back,
descriptorfile = geno.desc)
options(bigmemory.typecast.warning=FALSE)
} else {
geno.sir <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[1],
init = 3,
type = 'char')
geno.dam <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[2],
init = 3,
type = 'char')
geno.singcro <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[3],
init = 3,
type = 'char')
options(bigmemory.typecast.warning=FALSE)
}
if (sel.on) {
# output index.tdm and ordered individuals indice
ind.ordered <-
selects(pop = basepop,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = basepop,
pop.pheno = pop1.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered1 <- ind.ordered[-1]
ind.ordered <-
selects(pop = pop2,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop2,
pop.pheno = pop2.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered2 <- ind.ordered[-1]
} else {
ind.ordered1 <- basepop$index
ind.ordered2 <- pop2$index
}
ind.stays[[1]] <- getsd(ind.ordered1, basepop, count.sir, 0)
ind.stays[[2]] <- getsd(ind.ordered2, pop2, 0, count.dam)
names(ind.stays) <- c("basepop", "pop2")
ind.stay$sir <- ind.stays[[1]]$sir
ind.stay$dam <- ind.stays[[2]]$dam
core.stays[[1]] <- ind.stay
names(core.stays) <- "gen1"
pop1.geno.id <- basepop$index
pop2.geno.id <- pop2$index
pop.gp <-
reproduces(pop1 = basepop,
pop2 = pop2,
pop1.geno.id = basepop$index,
pop2.geno.id = pop2$index,
pop1.geno = basepop.geno.em,
pop2.geno = pop2.geno.em,
incols = incols,
ind.stay = ind.stay,
mtd.reprod = mtd.reprod,
num.prog = num.prog,
ratio = ratio)
pop.geno.singcro <- pop.gp$geno
pop.singcro <- pop.gp$pop
isd <- c(2, 5, 6)
pop.total.temp <- rbind(basepop[, isd], pop2[, isd], pop.singcro[, isd])
if (sel.on) {
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.singcro,
pop.geno = pop.geno.singcro,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total.temp,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.singcro <- pop.pheno$pop
pop.pheno$pop <- NULL
trait$pop.singcro <- pop.pheno
}
logging.log(" After generation", 2, ",", sum(count.ind[1:3]), "individuals are generated...\n", verbose = verbose)
gc.sir <- basepop.geno
gc.dam <- pop2.geno
gc.singcro <- pop.geno.singcro
if (incols == 2 & outcols == 1) {
gc.sir <- geno.cvt1(gc.sir)
gc.dam <- geno.cvt1(gc.dam)
gc.singcro <- geno.cvt1(gc.singcro)
}
input.geno(geno.sir, gc.sir, ncol(geno.sir), mrk.dense)
input.geno(geno.dam, gc.dam, ncol(geno.dam), mrk.dense)
input.geno(geno.singcro, gc.singcro, ncol(geno.singcro), mrk.dense)
# if traits have genetic correlation
# generate phenotype at last
if (!sel.on) {
pop.total <- rbind(basepop, pop2, pop.singcro)
geno.total <- cbind(basepop.geno, pop2.geno, pop.geno.singcro)
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.total,
pop.geno = geno.total,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.total <- pop.pheno$pop
pop.pheno$pop <- NULL
trait <- pop.pheno
basepop <- pop.total[1:nind, ]
pop2 <- pop.total[(nind+1):(nind+nind2), ]
pop.singcro <- pop.total[(nind+nind2+1):(nind+nind2+nrow(pop.singcro)), ]
}
if (!is.null(outpath)) {
flush(geno.sir)
flush(geno.dam)
flush(geno.singcro)
# write files
logging.log(" --- write files of sirs ---\n", verbose = verbose)
write.file(basepop, geno.sir, pos.map, 1:nrow(basepop), 1:nrow(basepop), out, dir.sir, out.format, verbose)
logging.log(" --- write files of dams ---\n", verbose = verbose)
write.file(pop2, geno.dam, pos.map, 1:nrow(pop2), 1:nrow(pop2), out, dir.dam, out.format, verbose)
logging.log(" --- write files of progenies ---\n", verbose = verbose)
write.file(pop.singcro, geno.singcro, pos.map, 1:nrow(pop.singcro), 1:nrow(pop.singcro), out, dir.sgc, out.format, verbose)
}
# set total information of population and genotype
pop.total <- list(pop.sir1 = basepop, pop.dam1 = pop2, pop.singcro = pop.singcro)
geno.total <- list(geno.sir1 = gc.sir, geno.dam1 = gc.dam, geno.singcro = gc.singcro)
rm(basepop); rm(basepop.geno); rm(basepop.geno.em); rm(pop2); rm(pop2.geno); rm(pop2.geno.em);
rm(geno.sir); rm(geno.dam); rm(geno.singcro); rm(pop.gp); rm(pop.singcro); rm(pop.geno.singcro);
rm(gc.sir); rm(gc.dam); rm(gc.singcro); rm(pop.total.temp); gc()
} else if (mtd.reprod == "tricro") {
out.geno.index <- 1:sum(count.ind)
logging.log(" After generation", 1, ",", sum(count.ind[1:3]), "individuals are generated...\n", verbose = verbose)
if (!is.null(outpath)) {
dir.sir1 <- paste0(directory.rep, .Platform$file.sep, count.ind[1], "_sir1")
dir.dam1 <- paste0(directory.rep, .Platform$file.sep, count.ind[2], "_dam1")
dir.sir2 <- paste0(directory.rep, .Platform$file.sep, count.ind[3], "_sir2")
dir.dam21 <- paste0(directory.rep, .Platform$file.sep, count.ind[4], "_dam21")
dir.trc <- paste0(directory.rep, .Platform$file.sep, count.ind[5], "_three-ways_cross")
if (dir.exists(dir.sir1)) { unlink(dir.sir1, recursive = TRUE) }
if (dir.exists(dir.dam1)) { unlink(dir.dam1, recursive = TRUE) }
if (dir.exists(dir.sir2)) { unlink(dir.sir2, recursive = TRUE) }
if (dir.exists(dir.dam21)) { unlink(dir.dam21, recursive = TRUE) }
if (dir.exists(dir.trc)) { unlink(dir.trc, recursive = TRUE) }
dir.create(dir.sir1)
dir.create(dir.dam1)
dir.create(dir.sir2)
dir.create(dir.dam21)
dir.create(dir.trc)
geno.sir1 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[1],
init = 3,
type = 'char',
backingpath = dir.sir1,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.dam1 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[2],
init = 3,
type = 'char',
backingpath = dir.dam1,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.sir2 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[3],
init = 3,
type = 'char',
backingpath = dir.sir2,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.dam21 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[4],
init = 3,
type = 'char',
backingpath = dir.dam21,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.tricro <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[5],
init = 3,
type = 'char',
backingpath = dir.trc,
backingfile = geno.back,
descriptorfile = geno.desc)
options(bigmemory.typecast.warning=FALSE)
} else {
geno.sir1 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[1],
init = 3,
type = 'char')
geno.dam1 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[2],
init = 3,
type = 'char')
geno.sir2 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[3],
init = 3,
type = 'char')
geno.dam21 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[4],
init = 3,
type = 'char')
geno.tricro <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[5],
init = 3,
type = 'char')
options(bigmemory.typecast.warning=FALSE)
}
# correct the sex
pop2$sex <- 1
pop3$sex <- 2
if (sel.on) {
# add selection to generation1
ind.ordered <-
selects(pop = pop2,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop2,
pop.pheno = pop2.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered1 <- ind.ordered[-1]
ind.ordered <-
selects(pop = pop3,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop3,
pop.pheno = pop3.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered2 <- ind.ordered[-1]
} else {
ind.ordered1 <- pop2$index
ind.ordered2 <- pop3$index
}
core.stays[[1]] <- core.stay <- ind.stays[[1]] <- getsd(ind.ordered1, pop2, count.sir[1], 0)
core.stays[[2]] <- core.stay <- ind.stays[[2]] <- getsd(ind.ordered2, pop3, 0, count.dam[1])
ind.stay$sir <- ind.stays[[1]]$sir
ind.stay$dam <- ind.stays[[2]]$dam
core.stays[[1]] <- ind.stay
# the first generation to the second generation
pop.gp <-
reproduces(pop1 = pop2,
pop2 = pop3,
pop1.geno.id = pop2$index,
pop2.geno.id = pop3$index,
pop1.geno = pop2.geno.em,
pop2.geno = pop3.geno.em,
incols = incols,
ind.stay = ind.stay,
mtd.reprod = "singcro",
num.prog = prog.tri,
ratio = ratio)
pop.geno.dam21 <- pop.gp$geno
pop.dam21 <- pop.gp$pop
isd <- c(2, 5, 6)
pop.total.temp <- rbind(basepop[, isd], pop2[, isd], pop3[, isd], pop.dam21[, isd])
if (sel.on) {
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.dam21,
pop.geno = pop.geno.dam21,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total.temp,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.dam21 <- pop.pheno$pop
pop.pheno$pop <- NULL
trait$pop.dam21 <- pop.pheno
# output index.tdm and ordered individuals indice
ind.ordered <-
selects(pop = basepop,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = basepop,
pop.pheno = pop1.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered1 <- ind.ordered[-1]
ind.ordered <-
selects(pop = pop.dam21,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop.total.temp,
pop.pheno = pop.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered2 <- ind.ordered[-1]
} else {
ind.ordered1 <- basepop$index
ind.ordered2 <- pop.dam21$index
}
ind.stays[[3]] <- getsd(ind.ordered1, basepop, count.sir[2], 0)
ind.stays[[4]] <- getsd(ind.ordered2, pop.dam21, 0, count.dam[2])
names(ind.stays) <- c("pop2", "pop3", "basepop", "pop.dam21")
ind.stay$sir <- ind.stays[[3]]$sir
ind.stay$dam <- ind.stays[[4]]$dam
core.stays[[2]] <- ind.stay
names(core.stays) <- c("gen1", "gen2")
logging.log(" After generation", 2, ",", sum(count.ind[1:4]), "individuals are generated...\n", verbose = verbose)
pop.geno.dam21.em <- # genotype matrix after Exchange and Mutation
genotype(geno = pop.geno.dam21,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
# recom.cri = "cri3",
rate.mut = rate.mut,
verbose = verbose)
# the second generation to the third generation
pop.gp <-
reproduces(pop1 = basepop,
pop2 = pop.dam21,
pop1.geno.id = basepop$index,
pop2.geno.id = pop.dam21$index,
pop1.geno = basepop.geno.em,
pop2.geno = pop.geno.dam21.em,
incols = incols,
ind.stay = ind.stay,
mtd.reprod = "singcro",
num.prog = num.prog,
ratio = ratio)
pop.geno.tricro <- pop.gp$geno
pop.tricro <- pop.gp$pop
isd <- c(2, 5, 6)
pop.total.temp <- rbind(pop.total.temp, pop.tricro[, isd])
if (sel.on) {
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.tricro,
pop.geno = pop.geno.tricro,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total.temp,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.tricro <- pop.pheno$pop
pop.pheno$pop <- NULL
trait$pop.tricro <- pop.pheno
}
logging.log(" After generation", 3, ",", sum(count.ind[1:5]), "individuals are generated...\n", verbose = verbose)
gc.sir1 <- basepop.geno
gc.sir2 <- pop2.geno
gc.dam1 <- pop3.geno
gc.dam21 <- pop.geno.dam21
gc.tricro <- pop.geno.tricro
if (incols == 2 & outcols == 1) {
gc.sir1 <- geno.cvt1(gc.sir1)
gc.sir2 <- geno.cvt1(gc.sir2)
gc.dam1 <- geno.cvt1(gc.dam1)
gc.dam21 <- geno.cvt1(gc.dam21)
gc.tricro <- geno.cvt1(gc.tricro)
}
input.geno(geno.sir1, gc.sir1, ncol(geno.sir1), mrk.dense)
input.geno(geno.sir2, gc.sir2, ncol(geno.dam1), mrk.dense)
input.geno(geno.dam1, gc.dam1, ncol(geno.sir2), mrk.dense)
input.geno(geno.dam21, gc.dam21, ncol(geno.dam21), mrk.dense)
input.geno(geno.tricro, gc.tricro, ncol(geno.tricro), mrk.dense)
# if traits have genetic correlation
# generate phenotype at last
if (!sel.on) {
pop.total <- rbind(basepop, pop2, pop3, pop.dam21, pop.tricro)
geno.total <- cbind(basepop.geno, pop2.geno, pop3.geno, pop.geno.dam21[], pop.geno.tricro[])
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.total,
pop.geno = geno.total,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.total <- pop.pheno$pop
pop.pheno$pop <- NULL
trait <- pop.pheno
basepop <- pop.total[1:nind, ]
pop2 <- pop.total[(nind+1):(nind+nind2), ]
pop3 <- pop.total[(nind+nind2+1):(nind+nind2+nind3), ]
pop.dam21 <- pop.total[(nind+nind2+nind3+1):(nind+nind2+nind3+nrow(pop.dam21)), ]
pop.tricro <- pop.total[(nind+nind2+nind3+nrow(pop.dam21)+1):(nind+nind2+nind3+nrow(pop.dam21)+nrow(pop.tricro)), ]
}
if (!is.null(outpath)) {
flush(geno.sir1)
flush(geno.dam1)
flush(geno.sir2)
flush(geno.dam21)
flush(geno.tricro)
# write files
logging.log(" --- write files of sir1s ---\n", verbose = verbose)
write.file(basepop, geno.sir1, pos.map, 1:nrow(basepop), 1:nrow(basepop), out, dir.sir1, out.format, verbose)
logging.log(" --- write files of sir2s ---\n", verbose = verbose)
write.file(pop2, geno.sir2, pos.map, 1:nrow(pop2), 1:nrow(pop2), out, dir.sir2, out.format, verbose)
logging.log(" --- write files of dam1s ---\n", verbose = verbose)
write.file(pop3, geno.dam1, pos.map, 1:nrow(pop3), 1:nrow(pop3), out, dir.dam1, out.format, verbose)
logging.log(" --- write files of dam21s ---\n", verbose = verbose)
write.file(pop.dam21, geno.dam21, pos.map, 1:nrow(pop.dam21), 1:nrow(pop.dam21), out, dir.dam21, out.format, verbose)
logging.log(" --- write files of progenies ---\n", verbose = verbose)
write.file(pop.tricro, geno.tricro, pos.map, 1:nrow(pop.tricro), 1:nrow(pop.tricro), out, dir.trc, out.format, verbose)
}
# set total information of population and genotype
pop.total <- list(pop.sir1 = basepop, pop.sir2 = pop2, pop.dam1 = pop3, pop.dam21 = pop.dam21, pop.tricro = pop.tricro)
geno.total <- list(geno.sir1 = gc.sir1, geno.sir2 = gc.sir2, geno.dam1 = gc.dam1, geno.dam21 = gc.dam21, geno.tricro = gc.tricro)
rm(basepop); rm(basepop.geno); rm(basepop.geno.em); rm(pop2); rm(pop2.geno); rm(pop2.geno.em);
rm(pop3); rm(pop3.geno); rm(pop3.geno.em); rm(geno.sir1); rm(geno.dam1); rm(geno.sir2);
rm(pop.gp); rm(pop.dam21); rm(geno.dam21); rm(pop.geno.dam21); rm(pop.geno.dam21.em);
rm(gc.sir1); rm(gc.sir2); rm(gc.dam1); rm(gc.dam21); rm(gc.tricro);
rm(pop.tricro); rm(geno.tricro); rm(pop.total.temp); gc()
} else if (mtd.reprod == "doubcro") {
out.geno.index <- 1:sum(count.ind)
logging.log(" After generation", 1, ",", sum(count.ind[1:4]), "individuals are generated...\n", verbose = verbose)
if (!is.null(outpath)) {
dir.sir1 <- paste0(directory.rep, .Platform$file.sep, count.ind[1], "_sir1")
dir.dam1 <- paste0(directory.rep, .Platform$file.sep, count.ind[2], "_dam1")
dir.sir2 <- paste0(directory.rep, .Platform$file.sep, count.ind[3], "_sir2")
dir.dam2 <- paste0(directory.rep, .Platform$file.sep, count.ind[4], "_dam2")
dir.sir11 <- paste0(directory.rep, .Platform$file.sep, count.ind[5], "_sir11")
dir.dam22 <- paste0(directory.rep, .Platform$file.sep, count.ind[6], "_dam22")
dir.dbc <- paste0(directory.rep, .Platform$file.sep, count.ind[7], "_double_cross")
if (dir.exists(dir.sir1)) { unlink(dir.sir1, recursive = TRUE) }
if (dir.exists(dir.dam1)) { unlink(dir.dam1, recursive = TRUE) }
if (dir.exists(dir.sir2)) { unlink(dir.sir2, recursive = TRUE) }
if (dir.exists(dir.dam2)) { unlink(dir.dam2, recursive = TRUE) }
if (dir.exists(dir.sir11)) { unlink(dir.sir11, recursive = TRUE) }
if (dir.exists(dir.dam22)) { unlink(dir.dam22, recursive = TRUE) }
if (dir.exists(dir.dbc)) { unlink(dir.dbc, recursive = TRUE) }
dir.create(dir.sir1)
dir.create(dir.dam1)
dir.create(dir.sir2)
dir.create(dir.dam2)
dir.create(dir.sir11)
dir.create(dir.dam22)
dir.create(dir.dbc)
geno.sir1 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[1],
init = 3,
type = 'char',
backingpath = dir.sir1,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.dam1 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[2],
init = 3,
type = 'char',
backingpath = dir.dam1,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.sir2 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[3],
init = 3,
type = 'char',
backingpath = dir.sir2,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.dam2 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[4],
init = 3,
type = 'char',
backingpath = dir.dam2,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.sir11 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[5],
init = 3,
type = 'char',
backingpath = dir.sir11,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.dam22 <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[6],
init = 3,
type = 'char',
backingpath = dir.dam22,
backingfile = geno.back,
descriptorfile = geno.desc)
geno.doubcro <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[7],
init = 3,
type = 'char',
backingpath = dir.dbc,
backingfile = geno.back,
descriptorfile = geno.desc)
options(bigmemory.typecast.warning=FALSE)
} else {
geno.sir1 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[1],
init = 3,
type = 'char')
geno.dam1 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[2],
init = 3,
type = 'char')
geno.sir2 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[3],
init = 3,
type = 'char')
geno.dam2 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[4],
init = 3,
type = 'char')
geno.sir11 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[5],
init = 3,
type = 'char')
geno.dam22 <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[6],
init = 3,
type = 'char')
geno.doubcro <- big.matrix(
nrow = num.marker,
ncol = outcols * count.ind[7],
init = 3,
type = 'char')
options(bigmemory.typecast.warning=FALSE)
}
if (sel.on) {
# add selection to generation1
ind.ordered <-
selects(pop = basepop,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = basepop,
pop.pheno = pop1.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered1 <- ind.ordered[-1]
ind.ordered <-
selects(pop = pop2,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop2,
pop.pheno = pop2.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered2 <- ind.ordered[-1]
} else {
ind.ordered1 <- basepop$index
ind.ordered2 <- pop2$index
}
ind.stays[[1]] <- getsd(ind.ordered1, basepop, count.sir[1], 0)
ind.stays[[2]] <- getsd(ind.ordered2, pop2, 0, count.dam[1])
ind.stay$sir <- ind.stays[[1]]$sir
ind.stay$dam <- ind.stays[[2]]$dam
core.stays[[1]] <- ind.stay
# the first generation to the second generation(the first two populations)
pop.gp <-
reproduces(pop1 = basepop,
pop2 = pop2,
pop1.geno.id = basepop$index,
pop2.geno.id = pop2$index,
pop1.geno = basepop.geno.em,
pop2.geno = pop2.geno.em,
incols = incols,
ind.stay = ind.stay,
mtd.reprod = "singcro",
num.prog = prog.doub,
ratio = ratio)
pop.geno.sir11 <- pop.gp$geno
pop.sir11 <- pop.gp$pop
pop.sir11$index <- pop.sir11$index - pop.sir11$index[1] + 1 + pop4$index[length(pop4$index)]
isd <- c(2, 5, 6)
pop.total.temp <- rbind(basepop[, isd], pop2[, isd], pop3[, isd], pop4[, isd], pop.sir11[, isd])
if (sel.on) {
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.sir11,
pop.geno = pop.geno.sir11,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total.temp,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.sir11 <- pop.pheno$pop
pop.pheno$pop <- NULL
trait$pop.sir11 <- pop.pheno
# output index.tdm and ordered individuals indice
ind.ordered <-
selects(pop = pop.sir11,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop.total.temp,
pop.pheno = pop.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered.sir11 <- ind.ordered[-1]
} else {
ind.ordered.sir11 <- pop.sir11$index
}
ind.stays[[5]] <- getsd(ind.ordered.sir11, pop.sir11, count.sir[3], 0)
pop.geno.sir11.em <- # genotype matrix after Exchange and Mutation
genotype(geno = pop.geno.sir11,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
# recom.cri = "cri3",
rate.mut = rate.mut,
verbose = verbose)
if (sel.on) {
# add selection to generation1
ind.ordered <-
selects(pop = pop3,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop3,
pop.pheno = pop3.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered1 <- ind.ordered[-1]
ind.ordered <-
selects(pop = pop4,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop4,
pop.pheno = pop4.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered2 <- ind.ordered[-1]
} else {
ind.ordered1 <- pop3$index
ind.ordered2 <- pop4$index
}
ind.stays[[3]] <- getsd(ind.ordered1, pop3, count.sir[2], 0)
ind.stays[[4]] <- getsd(ind.ordered2, pop4, 0, count.dam[2])
ind.stay$sir <- ind.stays[[3]]$sir
ind.stay$dam <- ind.stays[[4]]$dam
core.stays[[2]] <- ind.stay
# the first generation to the second generation(the last two populations)
pop.gp <-
reproduces(pop1 = pop3,
pop2 = pop4,
pop1.geno.id = pop3$index,
pop2.geno.id = pop4$index,
pop1.geno = pop3.geno.em,
pop2.geno = pop4.geno.em,
incols = incols,
ind.stay = ind.stay,
mtd.reprod = "singcro",
num.prog = prog.doub,
ratio = ratio)
pop.geno.dam22 <- pop.gp$geno
pop.dam22 <- pop.gp$pop
pop.dam22$index <- pop.dam22$index - pop.dam22$index[1] + 1 + pop.sir11$index[length(pop.sir11$index)]
pop.total.temp <- rbind(pop.total.temp, pop.dam22[, isd])
if (sel.on) {
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.dam22,
pop.geno = pop.geno.dam22,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total.temp,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.dam22 <- pop.pheno$pop
pop.pheno$pop <- NULL
trait$pop.dam22 <- pop.pheno
# output index.tdm and ordered individuals indice
ind.ordered <-
selects(pop = pop.dam22,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop.total.temp,
pop.pheno = pop.pheno,
verbose = verbose)
# index.tdm <- ind.ordered[1]
ind.ordered.dam22 <- ind.ordered[-1]
} else {
ind.ordered.dam22 <- pop.dam22$index
}
ind.stays[[6]] <- getsd(ind.ordered.dam22, pop.dam22, 0, count.dam[3])
names(ind.stays) <- c("basepop", "pop2", "pop3", "pop4", "pop.sir11", "pop.dam22")
ind.stay$sir <- ind.stays[[5]]$sir
ind.stay$dam <- ind.stays[[6]]$dam
core.stays[[3]] <- ind.stay
names(core.stays) <- c("gen1.0", "gen1.5", "gen2")
pop.geno.dam22.em <- # genotype matrix after Exchange and Mutation
genotype(geno = pop.geno.dam22,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
# recom.cri = "cri3",
rate.mut = rate.mut,
verbose = verbose)
logging.log(" After generation", 2, ",", sum(count.ind[1:6]), "individuals are generated...\n", verbose = verbose)
# the second generation to the third generation
pop.gp <-
reproduces(pop1 = pop.sir11,
pop2 = pop.dam22,
pop1.geno.id = pop.sir11$index,
pop2.geno.id = pop.dam22$index,
pop1.geno = pop.geno.sir11.em,
pop2.geno = pop.geno.dam22.em,
incols = incols,
ind.stay = ind.stay,
mtd.reprod = "singcro",
num.prog = num.prog,
ratio = ratio)
pop.geno.doubcro <- pop.gp$geno
pop.doubcro <- pop.gp$pop
pop.total.temp <- rbind(pop.total.temp, pop.doubcro[, isd])
if (sel.on) {
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.doubcro,
pop.geno = pop.geno.doubcro,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total.temp,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.doubcro <- pop.pheno$pop
pop.pheno$pop <- NULL
trait$pop.doubcro <- pop.pheno
}
logging.log(" After generation", 3, ",", sum(count.ind[1:7]), "individuals are generated...\n", verbose = verbose)
gc.sir1 <- basepop.geno
gc.dam1 <- pop2.geno
gc.sir2 <- pop3.geno
gc.dam2 <- pop4.geno
gc.sir11 <- pop.geno.sir11
gc.dam22 <- pop.geno.dam22
gc.doubcro <- pop.geno.doubcro
if (incols == 2 & outcols == 1) {
gc.sir1 <- geno.cvt1(gc.sir1)
gc.dam1 <- geno.cvt1(gc.dam1)
gc.sir2 <- geno.cvt1(gc.sir2)
gc.dam2 <- geno.cvt1(gc.dam2)
gc.sir11 <- geno.cvt1(gc.sir11)
gc.dam22 <- geno.cvt1(gc.dam22)
gc.doubcro <- geno.cvt1(gc.doubcro)
}
input.geno(geno.sir1, gc.sir1, ncol(geno.sir1), mrk.dense)
input.geno(geno.dam1, gc.dam1, ncol(geno.dam1), mrk.dense)
input.geno(geno.sir2, gc.sir2, ncol(geno.sir2), mrk.dense)
input.geno(geno.dam2, gc.dam2, ncol(geno.dam2), mrk.dense)
input.geno(geno.sir11, gc.sir11, ncol(geno.sir11), mrk.dense)
input.geno(geno.dam22, gc.dam22, ncol(geno.dam22), mrk.dense)
input.geno(geno.doubcro, gc.doubcro, ncol(geno.doubcro), mrk.dense)
# if traits have genetic correlation
# generate phenotype at last
if (!sel.on) {
pop.total <- rbind(basepop, pop2, pop3, pop4, pop.sir11, pop.dam22, pop.doubcro)
geno.total <- cbind(basepop.geno, pop2.geno, pop3.geno, pop4.geno, pop.geno.sir11[], pop.geno.dam22[], pop.geno.doubcro[])
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.total,
pop.geno = geno.total,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.total <- pop.pheno$pop
pop.pheno$pop <- NULL
trait <- pop.pheno
basepop <- pop.total[1:nind, ]
pop2 <- pop.total[(nind+1):(nind+nind2), ]
pop3 <- pop.total[(nind+nind2+1):(nind+nind2+nind3), ]
pop4 <- pop.total[(nind+nind2+nind3+1):(nind+nind2+nind3+nind4), ]
pop.sir11 <- pop.total[(nind+nind2+nind3+nind4+1):(nind+nind2+nind3+nind4+nrow(pop.sir11)), ]
pop.dam22 <- pop.total[(nind+nind2+nind3+nind4+nrow(pop.sir11)+1):(nind+nind2+nind3+nind4+nrow(pop.sir11)+nrow(pop.dam22)), ]
pop.doubcro <- pop.total[(nind+nind2+nind3+nrow(pop.sir11)+nrow(pop.dam22)+1):(nind+nind2+nind3+nind4+nrow(pop.sir11)+nrow(pop.dam22)+nrow(pop.doubcro)), ]
}
if (!is.null(outpath)) {
flush(geno.sir1)
flush(geno.dam1)
flush(geno.sir2)
flush(geno.dam2)
flush(geno.sir11)
flush(geno.dam22)
flush(geno.doubcro)
# write files
logging.log(" --- write files of sir1s ---\n", verbose = verbose)
write.file(basepop, geno.sir1, pos.map, 1:nrow(basepop), 1:nrow(basepop), out, dir.sir1, out.format, verbose)
logging.log(" --- write files of dam1s ---\n", verbose = verbose)
write.file(pop2, geno.dam1, pos.map, 1:nrow(pop2), 1:nrow(pop2), out, dir.dam1, out.format, verbose)
logging.log(" --- write files of sir2s ---\n", verbose = verbose)
write.file(pop3, geno.sir2, pos.map, 1:nrow(pop3), 1:nrow(pop3), out, dir.sir2, out.format, verbose)
logging.log(" --- write files of dam2s ---\n", verbose = verbose)
write.file(pop4, geno.dam2, pos.map, 1:nrow(pop4), 1:nrow(pop4), out, dir.dam2, out.format, verbose)
logging.log(" --- write files of sir11s ---\n", verbose = verbose)
write.file(pop.sir11, geno.sir11, pos.map, 1:nrow(pop.sir11), 1:nrow(pop.sir11), out, dir.sir11, out.format, verbose)
logging.log(" --- write files of dam22s ---\n", verbose = verbose)
write.file(pop.dam22, geno.dam22, pos.map, 1:nrow(pop.dam22), 1:nrow(pop.dam22), out, dir.dam22, out.format, verbose)
logging.log(" --- write files of progenies ---\n", verbose = verbose)
write.file(pop.doubcro, geno.doubcro, pos.map, 1:nrow(pop.doubcro), 1:nrow(pop.doubcro), out, dir.dbc, out.format, verbose)
}
# set total information of population and genotype
pop.total <- list(pop.sir1 = basepop, pop.dam1 = pop2, pop.sir2 = pop3, pop.dam2 = pop4, pop.sir11 = pop.sir11, pop.dam22 = pop.dam22, pop.doubcro = pop.doubcro)
geno.total <- list(geno.sir1 = gc.sir1, geno.dam1 = gc.dam1, geno.sir2 = gc.sir2, geno.dam2 = gc.dam2, geno.sir11 = gc.sir11, geno.dam22 = gc.dam22, geno.doubcro = gc.doubcro)
rm(basepop); rm(basepop.geno); rm(basepop.geno.em); rm(geno.sir1);
rm(pop2); rm(pop2.geno); rm(pop2.geno.em); rm(geno.dam1);
rm(pop3); rm(pop3.geno); rm(pop3.geno.em); rm(geno.sir2);
rm(pop4); rm(pop4.geno); rm(pop4.geno.em); rm(geno.dam2);
rm(pop.sir11); rm(pop.geno.sir11); rm(pop.geno.sir11.em);
rm(pop.dam22); rm(pop.geno.dam22); rm(pop.geno.dam22.em);
rm(pop.gp); rm(pop.doubcro); rm(pop.geno.doubcro);
rm(gc.sir1); rm(gc.dam1); rm(gc.sir2); rm(gc.dam2);
rm(gc.sir11); rm(gc.dam22); rm(gc.doubcro);
rm(pop.total.temp); gc()
} else if (mtd.reprod == "backcro") {
if (num.gen != length(prog.back))
stop(" Number of generation should equal to the length of prog.back!")
out.geno.gen <- out.geno.gen[out.geno.gen > 0]
out.pheno.gen <- out.pheno.gen[out.pheno.gen > 0]
out.geno.index <- getindex(count.ind, out.geno.gen)
out.pheno.index <- getindex(count.ind, out.pheno.gen)
# store all genotype
geno.total.temp <- big.matrix(
nrow = num.marker,
ncol = outcols * sum(count.ind),
init = 3,
type = 'char')
if (!is.null(outpath)) {
geno.total <- filebacked.big.matrix(
nrow = num.marker,
ncol = outcols * sum(count.ind[out.geno.gen]),
init = 3,
type = 'char',
backingpath = directory.rep,
backingfile = geno.back,
descriptorfile = geno.desc)
options(bigmemory.typecast.warning=FALSE)
} else {
geno.total <- big.matrix(
nrow = num.marker,
ncol = outcols * sum(count.ind[out.geno.gen]),
init = 3,
type = 'char')
options(bigmemory.typecast.warning=FALSE)
}
# set total population
pop.total <- rbind(basepop, pop2)
gc.base <- basepop.geno
gc.pop2 <- pop2.geno
if (incols == 2 & outcols == 1) {
gc.base <- geno.cvt1(gc.base)
gc.pop2 <- geno.cvt1(gc.pop2)
}
if (1 %in% out.geno.gen) {
input.geno(geno.total, gc.base, outcols * nrow(basepop), mrk.dense)
input.geno(geno.total, gc.pop2, outcols * count.ind[1], mrk.dense)
}
if (!sel.on) {
input.geno(geno.total.temp, gc.base, outcols*nrow(basepop), mrk.dense)
input.geno(geno.total.temp, gc.pop2, outcols*count.ind[1], mrk.dense)
}
logging.log(" After generation 1 ,", count.ind[1], "individuals are generated...\n", verbose = verbose)
if (num.gen > 1) {
if (sel.on) {
# add selection to generation1
ind.ordered <-
selects(pop = basepop,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = basepop,
pop.pheno = pop1.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered1 <- ind.ordered[-1]
ind.ordered <-
selects(pop = pop2,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop2,
pop.pheno = pop2.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered2 <- ind.ordered[-1]
} else {
ind.ordered1 <- basepop$index
ind.ordered2 <- pop2$index
}
ind.stays[[1]] <- getsd(ind.ordered1, basepop, count.sir[1], 0)
ind.stays[[2]] <- getsd(ind.ordered2, pop2, 0, count.dam[1])
ind.stay$sir <- ind.stays[[1]]$sir
ind.stay$dam <- ind.stays[[2]]$dam
core.stays[[1]] <- ind.stay
for (i in 2:num.gen) {
pop.gp <-
reproduces(pop1 = basepop,
pop2 = pop2,
pop1.geno.id = basepop$index,
pop2.geno.id = pop2$index,
pop1.geno = basepop.geno.em,
pop2.geno = pop2.geno.em,
incols = incols,
ind.stay = ind.stay,
mtd.reprod = "singcro",
num.prog = num.prog,
ratio = ratio)
pop.geno.curr <- pop.gp$geno
pop.curr <- pop.gp$pop
if (i %in% out.geno.gen) {
gc <- pop.geno.curr
if (incols == 2 & outcols == 1) gc <- geno.cvt1(gc)
out.gg <- out.geno.gen[1:which(out.geno.gen == i)]
input.geno(geno.total, gc, outcols * sum(count.ind[out.gg]), mrk.dense)
}
input.geno(geno.total.temp, pop.geno.curr, outcols*sum(count.ind[1:i]), mrk.dense)
isd <- c(2, 5, 6)
pop.total.temp <- rbind(pop.total[1:sum(count.ind[1:(i-1)]), isd], pop.curr[, isd])
if (sel.on) {
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.curr,
pop.geno = pop.geno.curr,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total.temp,
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.curr <- pop.pheno$pop
pop.pheno$pop <- NULL
}
pop.total <- rbind(pop.total, pop.curr)
logging.log(" After generation", i, ",", sum(count.ind[1:i]), "individuals are generated...\n", verbose = verbose)
if (i == num.gen) break
if (sel.on) {
# output index.tdm and ordered individuals indice
ind.ordered <-
selects(pop = pop.curr,
decr = decr,
sel.multi = sel.multi,
index.wt = index.wt,
index.tdm = index.tdm,
goal.perc = goal.perc,
pass.perc = pass.perc,
sel.sing = sel.sing,
pop.total = pop.total,
pop.pheno = pop.pheno,
verbose = verbose)
index.tdm <- ind.ordered[1]
ind.ordered2 <- ind.ordered[-1]
} else {
ind.ordered2 <- pop.curr$index
}
ind.stays[[i+1]] <- getsd(ind.ordered2, pop.curr, 0, count.dam[i])
ind.stay$sir <- ind.stays[[1]]$sir
ind.stay$dam <- ind.stays[[i+1]]$dam
core.stays[[i]] <- ind.stay
pop2.geno.em <- # genotype matrix after Exchange and Mutation
genotype(geno = pop.geno.curr,
incols = incols,
blk.rg = blk.rg,
recom.spot = recom.spot,
range.hot = range.hot,
range.cold = range.cold,
# recom.cri = "cri3",
rate.mut = rate.mut,
verbose = verbose)
pop2 <- pop.curr
} # end for
}
if(num.gen > 1) {
names(ind.stays) <- c("basepop", "pop2", paste0("gen", 2:(num.gen-1)))
names(core.stays) <- paste0("gen", 1:(num.gen-1))
}
names(core.stays) <- paste0("gen", 1:(num.gen-1))
# if traits have genetic correlation
# generate phenotype at last
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.total,
pop.geno = geno.total.temp,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total,
sel.on = FALSE,
inner.env = inner.env,
verbose = verbose)
pop.total <- pop.pheno$pop
pop.pheno$pop <- NULL
trait <- pop.pheno
if (!is.null(outpath)) {
flush(geno.total)
# write files
logging.log(" --- write files of total population ---\n", verbose = verbose)
write.file(pop.total, geno.total, pos.map, out.geno.index, out.pheno.index, out, directory.rep, out.format, verbose)
}
if (num.gen > 1) {
rm(pop.gp); rm(pop.curr); rm(pop.geno.curr); rm(pop.total.temp)
}
rm(basepop); rm(basepop.geno); rm(basepop.geno.em); rm(pop2); rm(pop2.geno);
rm(pop2.geno.em); rm(geno.total.temp); gc()
} else if (mtd.reprod == "userped") {
pop1.geno.copy <- basepop.geno
if (is.null(userped)) {
stop(" Please input pedigree in the process userped!")
}
rawped <- userped
rawped[is.na(rawped)] <- "0"
if (as.numeric(rawped[1, 2]) < basepop$index[1]) {
stop(" The index of the first sir should be in index of pop1!")
}
# Thanks to YinLL for sharing codes of pedigree sorting
pedx <- as.matrix(rawped)
pedx0 <- c(setdiff(pedx[, 2],pedx[, 1]), setdiff(pedx[, 3],pedx[, 1]))
if(length(pedx0) != 0){
pedx <- rbind(cbind(pedx0, "0", "0"), pedx)
}
pedx <- pedx[pedx[, 1] != "0", ]
pedx <- pedx[!duplicated(pedx), ]
pedx <- pedx[!duplicated(pedx[, 1]), ]
pedx1 <- cbind(1:(ncol(basepop.geno)/2), "0", "0")
pedx2 <- pedx[!(pedx[, 2] == "0" & pedx[, 3] == "0"), ]
go = TRUE
i <- 1
count.ind <- nrow(pedx1)
logging.log(" After generation", i, ",", sum(count.ind[1:i]), "individuals are generated...\n", verbose = verbose)
while(go == TRUE) {
i <- i + 1
Cpedx <- c(pedx1[, 1])
idx <- (pedx2[, 2] %in% Cpedx) & (pedx2[, 3] %in% Cpedx)
if (sum(idx) == 0) {
logging.log(" Some individuals in pedigree are not in mating process!\n They are", verbose = verbose)
simer.print(pedx2[, 1], verbose = verbose)
pedx2 <- pedx2[-c(1:nrow(pedx2)), ]
} else {
index.sir <- as.numeric(pedx2[idx, 2])
index.dam <- as.numeric(pedx2[idx, 3])
pop.geno.curr <- mate(pop.geno = pop1.geno.copy, index.sir = index.sir, index.dam = index.dam)
pop1.geno.copy <- cbind(pop1.geno.copy[], pop.geno.curr[])
pedx1 <- rbind(pedx1, pedx2[idx, ])
pedx2 <- pedx2[!idx, ]
count.ind <- c(count.ind, length(index.dam))
logging.log(" After generation", i, ",", sum(count.ind[1:i]), "individuals are generated...\n", verbose = verbose)
}
if (class(pedx2) == "character") pedx2 <- matrix(pedx2, 1)
if (dim(pedx2)[1] == 0) go = FALSE
}
ped <- pedx1
rm(pedx1);rm(pedx2);gc()
# Create a folder to save files
if (!is.null(outpath)) {
if (!dir.exists(outpath)) stop("Please check your outpath!")
if (out.format == "numeric") {
outpath = paste0(outpath, .Platform$file.sep, sum(count.ind), "_Simer_Data_numeric")
} else if (out.format == "plink"){
outpath = paste0(outpath, .Platform$file.sep, sum(count.ind), "_Simer_Data_plink")
} else {
stop("out.format should be 'numeric' or 'plink'!")
}
if (!dir.exists(outpath)) { dir.create(outpath) }
directory.rep <- paste0(outpath, .Platform$file.sep, "replication", replication)
if (dir.exists(directory.rep)) {
remove_bigmatrix(file.path(directory.rep, "simer"))
unlink(directory.rep, recursive = TRUE)
}
dir.create(directory.rep)
}
index <- ped[, 1]
out.geno.index <- index
ped.sir <- ped[, 2]
ped.dam <- ped[, 3]
sex <- rep(0, length(index))
sex[index %in% unique(ped.sir)] <- 1
sex[index %in% unique(ped.dam)] <- 2
sex[sex == 0] <- sample(1:2, sum(sex == 0), replace = TRUE)
fam.temp <- getfam(ped.sir, ped.dam, 1, "pm")
gen <- rep(1:length(count.ind), count.ind)
pop.total <- data.frame(gen = gen, index = index, fam = fam.temp[, 1], infam = fam.temp[, 2], sir = ped.sir, dam = ped.dam, sex = sex)
gc <- pop1.geno.copy
if (incols == 2 & outcols == 1) gc <- geno.cvt1(gc)
if (!is.null(outpath)) {
geno.total <- filebacked.big.matrix(
nrow = num.marker,
ncol = ncol(gc),
init = 3,
type = 'char',
backingpath = directory.rep,
backingfile = geno.back,
descriptorfile = geno.desc)
options(bigmemory.typecast.warning=FALSE)
} else {
geno.total <- big.matrix(
nrow = num.marker,
ncol = ncol(gc),
init = 3,
type = 'char')
options(bigmemory.typecast.warning=FALSE)
}
input.geno(geno.total, gc, ncol(geno.total), mrk.dense)
isd <- c(2, 5, 6)
pop.pheno <-
phenotype(effs = effs,
FR = FR,
cv = cv,
pop = pop.total,
pop.geno = pop1.geno.copy,
pos.map = pos.map,
var.pheno = var.pheno,
h2.tr1 = h2.tr1,
gnt.cov = gnt.cov,
h2.trn = h2.trn,
sel.crit = sel.crit,
pop.total = pop.total[, isd],
sel.on = sel.on,
inner.env = inner.env,
verbose = verbose)
pop.total <- pop.pheno$pop
pop.pheno$pop <- NULL
trait <- pop.pheno
if (!is.null(outpath)) {
flush(geno.total)
logging.log(" --- write files of total population ---\n", verbose = verbose)
write.file(pop.total, geno.total, pos.map, index, index, out, directory.rep, out.format, verbose)
}
rm(basepop); rm(basepop.geno); rm(basepop.geno.em); rm(userped); rm(rawped); rm(ped); gc()
} else {
stop("Please input correct reproduction method!")
}
# total information list
simer.list <- list(pop = pop.total, effs = effs, trait = trait, geno = geno.total, genoid = out.geno.index, map = pos.map, si = sel.i, ind.stays = ind.stays, core.stays = core.stays)
rm(ind.stays); rm(effs); rm(trait); rm(pop.total); rm(geno.total); rm(input.map); rm(pos.map); gc()
if (!is.null(selPath)) {
goal.plan <- complan(simls = simer.list, FR = FR, index.wt = index.wt, decr = decr, selPath = selPath, verbose = verbose)
simer.list$goal.plan <- goal.plan
rm(goal.plan); gc()
}
print_accomplished(width = 70, verbose = verbose)
# Return the last directory
ed <- Sys.time()
logging.log(" SIMER DONE WITHIN TOTAL RUN TIME:", format_time(as.numeric(ed)-as.numeric(op)), "\n", verbose = verbose)
return(simer.list)
}
|
# Jake Yeung
# Date of Creation: 2020-01-20
# File: ~/projects/scchic/scripts/rstudioserver_analysis/BM_all_merged/6-correct_variance_LDA_k4me3.R
# K4me3
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(topicmodels)
library(JFuncs)
library(scchicFuncs)
library(hash)
library(igraph)
library(umap)
# Constants ---------------------------------------------------------------
binsize <- 50000
mergesize <- 1000
bigbinsize <- 50000 * mergesize
jsystem <- "BoneMarrow"
jmark <- "H3K4me3"
jcutoff.ncuts.var <- 0.3
outdir <- "/home/jyeung/hpc/scChiC/from_rstudioserver/quality_control_postLDA_var_BM.2020-01-31"
dir.create(outdir)
outname <- paste0("BM_", jmark, ".varcutoff_", jcutoff.ncuts.var, ".platesRemoved.SmoothBinSize_", mergesize, ".AllMerged.rds")
outname.unenriched <- paste0("BM_", jmark, ".varcutoff_", jcutoff.ncuts.var, ".platesRemoved.SmoothBinSize_", mergesize, ".Unenriched.rds")
pdfname <- paste0("BM_", jmark, ".varcutoff_", jcutoff.ncuts.var, ".platesRemoved.SmoothBinSize_", mergesize, ".pdf")
outf <- file.path(outdir, outname)
outf.unenriched <- file.path(outdir, outname.unenriched)
outpdf <- file.path(outdir, pdfname)
bad.plates.grep <- paste("B6-13W1-BM-H3K4me3-4", sep = "|")
bm.unenriched.plates.grep <- paste("B6-13W1-BM-H3K4me3-1", "B6-13W1-BM-H3K4me3-2", "B6-13W1-BM-H3K4me3-3", sep = "|")
# Set up -----------------------------------------------------------------
inf <- paste0("/home/jyeung/hpc/scChiC/raw_demultiplexed/LDA_outputs_all/ldaAnalysisBins_B6BM_All_allmarks.2020-01-12.bsizestepsize_50000_50000.NoSliding/lda_outputs.count_mat.", jmark, ".countcutoff_1000-500-1000-1000.TAcutoff_0.5.K-30.binarize.FALSE/ldaOut.count_mat.", jmark, ".countcutoff_1000-500-1000-1000.TAcutoff_0.5.K-30.Robj")
load(inf, v=T)
# Show UMAP ---------------------------------------------------------------
jsettings <- umap.defaults
jsettings$n_neighbors <- 30
jsettings$min_dist <- 0.1
jsettings$random_state <- 123
tm.result <- posterior(out.lda)
topics.mat <- tm.result$topics
umap.out <- umap(topics.mat, config = jsettings)
dat.umap.long <- data.frame(cell = rownames(umap.out[["layout"]]), umap1 = umap.out[["layout"]][, 1], umap2 = umap.out[["layout"]][, 2], stringsAsFactors = FALSE)
dat.umap.long <- DoLouvain(topics.mat, jsettings, dat.umap.long)
cbPalette <- c("#696969", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf")
m.umap <- ggplot(dat.umap.long, aes(x = umap1, y = umap2, color = louvain)) + geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + scale_color_manual(values = cbPalette)
dat.impute.log <- log2(t(tm.result$topics %*% tm.result$terms))
jchromos <- paste("chr", c(seq(19), "X", "Y"), sep = "")
dat.var <- CalculateVarAll(dat.impute.log, jchromos)
dat.merge <- left_join(dat.umap.long, dat.var) %>%
rowwise() %>%
mutate(experi = ClipLast(cell, jsep = "-"),
plate = ClipLast(cell, jsep = "_"),
prefix = gsub("PZ-Bl6-BM", "Linneg", paste(strsplit(gsub("PZ-", "", cell), "-")[[1]][1:4], collapse = "-")))
m.umap.plates <- ggplot(dat.merge, aes(x = umap1, y = umap2, color = louvain)) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_manual(values = cbPalette)
m.umap.var <- ggplot(dat.merge, aes(x = umap1, y = umap2, color = cell.var.within.sum.norm)) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = -1)
m.umap.var.plates <- ggplot(dat.merge, aes(x = umap1, y = umap2, color = cell.var.within.sum.norm)) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = -1) + facet_wrap(~plate)
# Show raw ---------------------------------------------------------------
dat.var.raw <- CalculateVarRaw(count.mat, merge.size = mergesize, chromo.exclude.grep = "^chrX|^chrY", jpseudocount = 1, jscale = 10^6, calculate.ncuts = TRUE)
dat.merge2 <- left_join(dat.merge, dat.var.raw)
# Correlate raw intrachrom var with UMAP ---------------------------------
m.rawvar.vs.imputevar <- ggplot(dat.merge2, aes(x = ncuts.var, y = cell.var.within.sum.norm, color = prefix)) + geom_point(alpha = 0.5) +
scale_x_log10() + scale_y_log10() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
xlab(paste("Intrachromo var raw from", bigbinsize/10^6, "MB bins")) +
ylab("Imputed intrachromo var from LDA") +
ggtitle(jmark, jsystem) + geom_vline(xintercept = jcutoff.ncuts.var)
print(m.rawvar.vs.imputevar)
m.rawvar.vs.imputevar.plates <- ggplot(dat.merge2, aes(x = ncuts.var, y = cell.var.within.sum.norm, color = prefix)) + geom_point(alpha = 0.5) +
scale_x_log10() + scale_y_log10() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
xlab(paste("Intrachromo var raw from", bigbinsize/10^6, "MB bins")) +
ylab("Imputed intrachromo var from LDA") + facet_wrap(~plate) +
ggtitle(jmark, jsystem)
print(m.rawvar.vs.imputevar.plates)
# show ncuts vs var
m.ncutsVSvar <- ggplot(dat.merge2, aes(x = ncuts, y = ncuts.var, color = prefix)) + geom_point(alpha = 0.25) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_x_log10() + scale_y_log10() + geom_hline(yintercept = jcutoff.ncuts.var)
m.ncutsVSvar.plate <- ggplot(dat.merge2, aes(x = ncuts, y = ncuts.var, color = prefix)) + geom_point(alpha = 0.25) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_x_log10() + scale_y_log10() + facet_wrap(~plate) + geom_hline(yintercept = jcutoff.ncuts.var)
# filter bad plates and bad cells and redo?
unique(dat.merge2$prefix)
# calculate intrachromo cutoff
dat.linneg <- subset(dat.merge2, grepl("Linneg", plate))
m.var.cutoff <- ggplot(dat.linneg, aes(x = ncuts.var)) + geom_density() + scale_x_log10() +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
geom_vline(xintercept = jcutoff.ncuts.var)
m.ncutsVSvar.plate.cutoff <- ggplot(dat.merge2, aes(x = ncuts, y = ncuts.var, color = prefix)) + geom_point(alpha = 0.25) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_x_log10() + scale_y_log10() + facet_wrap(~plate) + geom_hline(yintercept = jcutoff.ncuts.var)
# Write new count mat -----------------------------------------------------
dat.keep <- subset(dat.merge2, !grepl(bad.plates.grep, plate) & ncuts.var > jcutoff.ncuts.var)
cells.keep <- dat.keep$cell
dat.keep.unenriched <- subset(dat.merge2, grepl(bm.unenriched.plates.grep, plate) & ncuts.var > jcutoff.ncuts.var)
cells.keep.unenriched <- dat.keep.unenriched$cell
count.mat.keep.unenriched <- count.mat[, cells.keep.unenriched]
print(unique(dat.keep$prefix))
# plot UMAP after removing bad cells and plates
m.umap.var.plates.filt <- ggplot(dat.merge %>% filter(cell %in% cells.keep), aes(x = umap1, y = umap2, color = cell.var.within.sum.norm)) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = -1) + facet_wrap(~plate)
count.mat.keep <- count.mat[, cells.keep]
print(dim(count.mat))
print(dim(count.mat.keep))
saveRDS(count.mat.keep, outf)
saveRDS(count.mat.keep.unenriched, outf.unenriched)
pdf(file = outpdf, useDingbats = FALSE)
m.umap
m.umap.plates
m.umap.var
m.umap.var.plates
m.ncutsVSvar
m.ncutsVSvar.plate
m.ncutsVSvar.plate.cutoff
# plot(pca.out$sdev ^ 2 / sum(pca.out$sdev ^ 2))
m.rawvar.vs.imputevar
m.rawvar.vs.imputevar.plates
m.var.cutoff
m.umap.var.plates.filt
dev.off()
|
/scripts/rstudioserver_analysis/BM_all_merged/6-correct_variance_LDA_k4me3_remove_plates_write_unenriched_and_stem.R
|
no_license
|
jakeyeung/sortchicAllScripts
|
R
| false
| false
| 8,127
|
r
|
# Jake Yeung
# Date of Creation: 2020-01-20
# File: ~/projects/scchic/scripts/rstudioserver_analysis/BM_all_merged/6-correct_variance_LDA_k4me3.R
# K4me3
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
library(Matrix)
library(topicmodels)
library(JFuncs)
library(scchicFuncs)
library(hash)
library(igraph)
library(umap)
# Constants ---------------------------------------------------------------
binsize <- 50000
mergesize <- 1000
bigbinsize <- 50000 * mergesize
jsystem <- "BoneMarrow"
jmark <- "H3K4me3"
jcutoff.ncuts.var <- 0.3
outdir <- "/home/jyeung/hpc/scChiC/from_rstudioserver/quality_control_postLDA_var_BM.2020-01-31"
dir.create(outdir)
outname <- paste0("BM_", jmark, ".varcutoff_", jcutoff.ncuts.var, ".platesRemoved.SmoothBinSize_", mergesize, ".AllMerged.rds")
outname.unenriched <- paste0("BM_", jmark, ".varcutoff_", jcutoff.ncuts.var, ".platesRemoved.SmoothBinSize_", mergesize, ".Unenriched.rds")
pdfname <- paste0("BM_", jmark, ".varcutoff_", jcutoff.ncuts.var, ".platesRemoved.SmoothBinSize_", mergesize, ".pdf")
outf <- file.path(outdir, outname)
outf.unenriched <- file.path(outdir, outname.unenriched)
outpdf <- file.path(outdir, pdfname)
bad.plates.grep <- paste("B6-13W1-BM-H3K4me3-4", sep = "|")
bm.unenriched.plates.grep <- paste("B6-13W1-BM-H3K4me3-1", "B6-13W1-BM-H3K4me3-2", "B6-13W1-BM-H3K4me3-3", sep = "|")
# Set up -----------------------------------------------------------------
inf <- paste0("/home/jyeung/hpc/scChiC/raw_demultiplexed/LDA_outputs_all/ldaAnalysisBins_B6BM_All_allmarks.2020-01-12.bsizestepsize_50000_50000.NoSliding/lda_outputs.count_mat.", jmark, ".countcutoff_1000-500-1000-1000.TAcutoff_0.5.K-30.binarize.FALSE/ldaOut.count_mat.", jmark, ".countcutoff_1000-500-1000-1000.TAcutoff_0.5.K-30.Robj")
load(inf, v=T)
# Show UMAP ---------------------------------------------------------------
jsettings <- umap.defaults
jsettings$n_neighbors <- 30
jsettings$min_dist <- 0.1
jsettings$random_state <- 123
tm.result <- posterior(out.lda)
topics.mat <- tm.result$topics
umap.out <- umap(topics.mat, config = jsettings)
dat.umap.long <- data.frame(cell = rownames(umap.out[["layout"]]), umap1 = umap.out[["layout"]][, 1], umap2 = umap.out[["layout"]][, 2], stringsAsFactors = FALSE)
dat.umap.long <- DoLouvain(topics.mat, jsettings, dat.umap.long)
cbPalette <- c("#696969", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7", "#006400", "#FFB6C1", "#32CD32", "#0b1b7f", "#ff9f7d", "#eb9d01", "#7fbedf")
m.umap <- ggplot(dat.umap.long, aes(x = umap1, y = umap2, color = louvain)) + geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) + scale_color_manual(values = cbPalette)
dat.impute.log <- log2(t(tm.result$topics %*% tm.result$terms))
jchromos <- paste("chr", c(seq(19), "X", "Y"), sep = "")
dat.var <- CalculateVarAll(dat.impute.log, jchromos)
dat.merge <- left_join(dat.umap.long, dat.var) %>%
rowwise() %>%
mutate(experi = ClipLast(cell, jsep = "-"),
plate = ClipLast(cell, jsep = "_"),
prefix = gsub("PZ-Bl6-BM", "Linneg", paste(strsplit(gsub("PZ-", "", cell), "-")[[1]][1:4], collapse = "-")))
m.umap.plates <- ggplot(dat.merge, aes(x = umap1, y = umap2, color = louvain)) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_manual(values = cbPalette)
m.umap.var <- ggplot(dat.merge, aes(x = umap1, y = umap2, color = cell.var.within.sum.norm)) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = -1)
m.umap.var.plates <- ggplot(dat.merge, aes(x = umap1, y = umap2, color = cell.var.within.sum.norm)) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = -1) + facet_wrap(~plate)
# Show raw ---------------------------------------------------------------
dat.var.raw <- CalculateVarRaw(count.mat, merge.size = mergesize, chromo.exclude.grep = "^chrX|^chrY", jpseudocount = 1, jscale = 10^6, calculate.ncuts = TRUE)
dat.merge2 <- left_join(dat.merge, dat.var.raw)
# Correlate raw intrachrom var with UMAP ---------------------------------
m.rawvar.vs.imputevar <- ggplot(dat.merge2, aes(x = ncuts.var, y = cell.var.within.sum.norm, color = prefix)) + geom_point(alpha = 0.5) +
scale_x_log10() + scale_y_log10() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
xlab(paste("Intrachromo var raw from", bigbinsize/10^6, "MB bins")) +
ylab("Imputed intrachromo var from LDA") +
ggtitle(jmark, jsystem) + geom_vline(xintercept = jcutoff.ncuts.var)
print(m.rawvar.vs.imputevar)
m.rawvar.vs.imputevar.plates <- ggplot(dat.merge2, aes(x = ncuts.var, y = cell.var.within.sum.norm, color = prefix)) + geom_point(alpha = 0.5) +
scale_x_log10() + scale_y_log10() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
xlab(paste("Intrachromo var raw from", bigbinsize/10^6, "MB bins")) +
ylab("Imputed intrachromo var from LDA") + facet_wrap(~plate) +
ggtitle(jmark, jsystem)
print(m.rawvar.vs.imputevar.plates)
# show ncuts vs var
m.ncutsVSvar <- ggplot(dat.merge2, aes(x = ncuts, y = ncuts.var, color = prefix)) + geom_point(alpha = 0.25) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_x_log10() + scale_y_log10() + geom_hline(yintercept = jcutoff.ncuts.var)
m.ncutsVSvar.plate <- ggplot(dat.merge2, aes(x = ncuts, y = ncuts.var, color = prefix)) + geom_point(alpha = 0.25) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_x_log10() + scale_y_log10() + facet_wrap(~plate) + geom_hline(yintercept = jcutoff.ncuts.var)
# filter bad plates and bad cells and redo?
unique(dat.merge2$prefix)
# calculate intrachromo cutoff
dat.linneg <- subset(dat.merge2, grepl("Linneg", plate))
m.var.cutoff <- ggplot(dat.linneg, aes(x = ncuts.var)) + geom_density() + scale_x_log10() +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
geom_vline(xintercept = jcutoff.ncuts.var)
m.ncutsVSvar.plate.cutoff <- ggplot(dat.merge2, aes(x = ncuts, y = ncuts.var, color = prefix)) + geom_point(alpha = 0.25) +
theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_x_log10() + scale_y_log10() + facet_wrap(~plate) + geom_hline(yintercept = jcutoff.ncuts.var)
# Write new count mat -----------------------------------------------------
dat.keep <- subset(dat.merge2, !grepl(bad.plates.grep, plate) & ncuts.var > jcutoff.ncuts.var)
cells.keep <- dat.keep$cell
dat.keep.unenriched <- subset(dat.merge2, grepl(bm.unenriched.plates.grep, plate) & ncuts.var > jcutoff.ncuts.var)
cells.keep.unenriched <- dat.keep.unenriched$cell
count.mat.keep.unenriched <- count.mat[, cells.keep.unenriched]
print(unique(dat.keep$prefix))
# plot UMAP after removing bad cells and plates
m.umap.var.plates.filt <- ggplot(dat.merge %>% filter(cell %in% cells.keep), aes(x = umap1, y = umap2, color = cell.var.within.sum.norm)) +
geom_point() + theme_bw() + theme(aspect.ratio=1, panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
scale_color_viridis_c(direction = -1) + facet_wrap(~plate)
count.mat.keep <- count.mat[, cells.keep]
print(dim(count.mat))
print(dim(count.mat.keep))
saveRDS(count.mat.keep, outf)
saveRDS(count.mat.keep.unenriched, outf.unenriched)
pdf(file = outpdf, useDingbats = FALSE)
m.umap
m.umap.plates
m.umap.var
m.umap.var.plates
m.ncutsVSvar
m.ncutsVSvar.plate
m.ncutsVSvar.plate.cutoff
# plot(pca.out$sdev ^ 2 / sum(pca.out$sdev ^ 2))
m.rawvar.vs.imputevar
m.rawvar.vs.imputevar.plates
m.var.cutoff
m.umap.var.plates.filt
dev.off()
|
d <- generateFakeData()
# compute small area estimates
sae <- fSAE(y0 ~ x + area2, data=d$sam, area="area", popdata=d$Xpop,
method="hybrid", keep.data=TRUE)
# compute unit weights
w <- uweights(sae, forTotal=TRUE)
summary(w) # summary statistics
plot(w) # histogram of weights
# checks
all.equal(sum(w * sae$y), sum(EST(sae) * sae$Narea))
all.equal(colSums(w * as.matrix(sae$X)), colSums(sae$Xp * sae$Narea))
|
/R/examples/uweights.R
|
no_license
|
cran/hbsae
|
R
| false
| false
| 438
|
r
|
d <- generateFakeData()
# compute small area estimates
sae <- fSAE(y0 ~ x + area2, data=d$sam, area="area", popdata=d$Xpop,
method="hybrid", keep.data=TRUE)
# compute unit weights
w <- uweights(sae, forTotal=TRUE)
summary(w) # summary statistics
plot(w) # histogram of weights
# checks
all.equal(sum(w * sae$y), sum(EST(sae) * sae$Narea))
all.equal(colSums(w * as.matrix(sae$X)), colSums(sae$Xp * sae$Narea))
|
##############################
# #
# Group_project MSI Data #
# Machine Learning #
##############################
# Clear workspace
rm(list=ls())
# Close any open graphics devices
graphics.off()
library(caret)
library(kernlab)
library(gmodels)
library(openxlsx)
source("CreateObj.R")
# Read datasets
MSI <- read.xlsx("MSIdata_CT_R1_2_updated.xlsx", sheet=1, rowName=T)
bacteria<- MSI[,37:38]##Bacterial count
MSI <- MSI[,1:18]
MSI_raw<- CreateObj(MSI, bacteria)
MSI_TVC <- MSI_raw[,-(20)]###remove pseudomnas
MSI_Ps <- MSI_raw[,-(19)]###remove pseudomnas
# prepare training scheme
control <- trainControl(method="repeatedcv", number=10, repeats=3)
# Split training and test set for TVC count
set.seed(123)
train.index <- createDataPartition(MSI_TVC$TVC, p = .7,list = FALSE, groups=3, times = 50)
##################################################################################
################### Linear Model for TVC count by MSI_data #####################
##################################################################################
RMSELMT<-c()
for (i in 1:50) {
trainN <- MSI_TVC[train.index[,i],]
testN <- MSI_TVC[-train.index[,i],]
lm.model.fit.TVC <- train(TVC ~., method= 'lm', data=trainN, trControl= control)##train
predicted.lm <- predict(lm.model.fit.TVC, testN[,-19])##Prediction
RMSE.lm.TVC <- RMSE(testN$TVC, predicted.lm)##RMSE calculation
RMSELMT<-c(RMSELMT, RMSE.lm.TVC)
}
lm_TVCM<-mean(RMSELMT)##calculating mean
iteration <- as.array(c(1:50))
lm_TVCS<-sd(RMSELMT)##calculating SD
LM_TVC_95<-ci(RMSELMT, confidence = 0.95)
plot(iteration, RMSELMT,ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("RMSE for 50 iterations \n( RMSE.mean = ",
round(lm_TVCM, digits = 3)," +/- ",
round(lm_TVCS,digits = 3), " )\nThere is a 95% likelihood that the range",
round(LM_TVC_95[2], digits = 3), "to",
round(LM_TVC_95[3], digits = 3), "covers the true error of the model."))##Plotting RMSE
##################################################################################
################ k-nearest neighbours for TVC count by MSI_data ################
##################################################################################
RMSEKNNT<-c()
for (i in 1:50) {
trainN <- MSI_TVC[train.index[,i],]
testN <- MSI_TVC[-train.index[,i],]
knn.model.fit.TVC <- train(TVC ~ ., method='knn', data=trainN, trcontrol = control, tuneGrid=expand.grid(k=1:20))
predicted.knn <- predict(knn.model.fit.TVC,testN[,-19])
RMSE.knn <- RMSE(testN$TVC, predicted.knn)
RMSEKNNT<-c(RMSE.knn, RMSEKNNT)
}
KNN_TVCM<-mean(RMSEKNNT)##calculating mean
KNN_TVCS<-sd(RMSEKNNT)##calculating SD
Knn_TVC_95<-ci(RMSEKNNT, confidence = 0.95)
plot(iteration, RMSEKNNT,ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("Knn Model(MSI_TVC) Mean RMSE:",
round(KNN_TVCM, digits = 3), "CI 95%:",
round(Knn_TVC_95[2], digits = 3), "-",
round(Knn_TVC_95[3], digits = 3), "SD:",
round(KNN_TVCS,digits = 3)))##Plotting RMSE
###################################################################################
################### Random Forests for TVC count by MSI_data ####################
###################################################################################
RMSERFT<-c()
for (i in 1:50) {
trainN <- MSI_TVC[train.index[,i],]
testN <- MSI_TVC[-train.index[,i],]
RF.model.fit.TVC <- train(TVC ~ ., method='rf', trcontrol = control, trainN,tuneGrid=expand.grid(mtry=1:30))
predicted.rf <- predict(RF.model.fit.TVC, testN[,-19])
RMSE.rf <- RMSE(testN$TVC,predicted.rf)
RMSERFT<-c(RMSERFT, RMSE.rf)
}
RF_TVCM<-mean(RMSERFT)##calculating mean
RF_TVCS<-sd(RMSERFT)##calculating SD
RF_TVC_95<-ci(RMSERFT, confidence = 0.95)
plot(iteration, RMSERFT, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("RandomForest Model(MSI_TVC) Mean RMSE:",
round(RF_TVCM, digits = 3), "CI 95%:",
round(RF_TVC_95[2], digits = 3), "-",
round(RF_TVC_95[3], digits = 3), "SD:",
round(RF_TVCS,digits = 3)))##Plotting RMSE
###################################################################################
################### SVM_Ploy for TVC count by MSI_data ###################
###################################################################################
RMSESVMPLOYT<-c()
for (i in 1:50) {
trainN <- MSI_TVC[train.index[,i],]
testN <- MSI_TVC[-train.index[,i],]
SVM.model.fit.TVC <- train(TVC ~ ., method='svmPoly', data= trainN)
predicted.SVM <- predict(SVM.model.fit.TVC,testN[,-19])
RMSE.SVMP <- RMSE(testN$TVC,predicted.SVM)##calculating RMSE
RMSESVMPLOYT<-c(RMSESVMPLOYT, RMSE.SVMP)
}
SPoly_TVCM<-mean(RMSESVMPLOYT)##calculating mean
SPoly_TVCS<-sd(RMSESVMPLOYT)##calculating SD
SP_TVC_95<-ci(RMSESVMPLOYT, confidence = 0.95)
plot(iteration, RMSESVMPLOYT, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("SVM_Poly Model(MSI_TVC) Mean RMSE:",
round(SPoly_TVCM, digits = 3), "CI 95%:",
round(SP_TVC_95[2], digits = 3), "-",
round(SP_TVC_95[3], digits = 3), "SD:",
round(SPoly_TVCS,digits = 3)))##Plotting RMSE
###################################################################################
#########################SVM_Radial for TVC count by MSI_data #####################
###################################################################################
RMSESVMRAD<-c()
for (i in 1:50) {
trainN <- MSI_TVC[train.index[,i],]
testN <- MSI_TVC[-train.index[,i],]
regressor = train(TVC ~ .,
data = trainN,
method = 'svmRadial',
trcontrol = control)
y_pred = predict(regressor, testN[,-19])
RMSE.SVMR.TVC <- RMSE(testN$TVC, y_pred)##calculating RMSE
RMSESVMRAD<-c(RMSESVMRAD,RMSE.SVMR.TVC)
}
SRAD_TVCM<-mean(RMSESVMRAD)##calculating mean
SRAD_TVCS<-sd(RMSESVMRAD)##calculating SD
SR_TVC_95<-ci(RMSESVMRAD, confidence = 0.95)
plot(iteration, RMSESVMRAD, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("SVM_Radial Model(MSI_TVC) Mean RMSE:",
round(SRAD_TVCM, digits = 3), "CI 95%:",
round(SR_TVC_95[2], digits = 3), "-",
round(SR_TVC_95[3], digits = 3), "SD:",
round(SRAD_TVCS,digits = 3)))##Plotting RMSE
###########################################################################################
###################################Svm_Linear for TVC count by MSI_data####################
###########################################################################################
RMSESVMLM<-c()
for (i in 1:50){
trainN <- MSI_TVC[train.index[,i],]
testN <- MSI_TVC[-train.index[,i],]
SVMLM.model.fit.TVC <- train(TVC ~ ., method='svmLinear', data= trainN)
predicted.SVMLM.TVC <- predict(SVMLM.model.fit.TVC,testN[,-19])
##plot predicted vs Observed with cl-95%
RMSE.SVMLM.TVC <- RMSE(testN$TVC, predicted.SVMLM.TVC)##RMSE calculation
RMSESVMLM<- c(RMSESVMLM,RMSE.SVMLM.TVC)
}
SLM_TVCM<-mean(RMSESVMLM)
iteration <- as.array(c(1:50))
SLM_TVCS<-sd(RMSESVMLM)
SLM_TVC_95<-ci(RMSESVMLM, confidence = 0.95)
plot(iteration, RMSESVMLM, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("SVM_Linear Model(MSI_TVC) Mean RMSE:",
round(SLM_TVCM, digits = 3), "CI 95%:",
round(SLM_TVC_95[2], digits = 3), "-",
round(SLM_TVC_95[3], digits = 3), "SD:",
round(SLM_TVCS,digits = 3)))##Plotting RMSE
#####################################################################################################################################################
###########################################Pseudomonas MSI_data #####################################################################################
#####################################################################################################################################################
set.seed(123)
train.indexs <- createDataPartition(MSI_Ps$Pseudomonas.spp., p = .7,list = FALSE, times = 50, groups = 3)
##################################################################################
################### Linear Model for TVC count by MSI_data #####################
##################################################################################
RMSELMP<-c()
for (i in 1:50){
trainNP <- MSI_Ps[train.indexs[,i],]
testNP <- MSI_Ps[-train.indexs[,i],]
lm.model.fit.ps <- train(Pseudomonas.spp. ~ ., method= 'lm', data=trainNP, trcontrol= control)
predicted.lm.ps <- predict(lm.model.fit.ps,testNP[,-19])
RMSE.lm.ps <- RMSE(testNP$Pseudomonas.spp., predicted.lm.ps)##RMSE calculation
RMSELMP<- c(RMSELMP, RMSE.lm.ps)
}
LM_PM<-mean(RMSELMP)
iteration <- as.array(c(1:50))
LM_PS<-sd(RMSELMP)
LM_PS_95<-ci(RMSELMP, confidence = 0.95)
plot(iteration, RMSELMP, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("RMSE for 50 iterations \n( RMSE.mean = ",
round(LM_PM, digits = 3), " +/- ",
round(LM_PS,digits = 3)," )\nThere is a 95% likelihood that the range",
round(LM_PS_95[2], digits = 3), "to",
round(LM_PS_95[3], digits = 3), "covers the true error of the model."))##Plotting RMSE
###################################################################################
################### Random Forests for TVC count by MSI_data ####################
###################################################################################
RMSERFP<-c()
for (i in 1:50){
trainNP <- MSI_Ps[train.indexs[,i],]
testNP <- MSI_Ps[-train.indexs[,i],]
RF.model.fit.Ps <- train(Pseudomonas.spp. ~ ., method='rf', trcontrol = control, trainNP)
predicted.rf.ps <- predict(RF.model.fit.Ps, testNP[,-19])
RMSE.rf.ps <- RMSE(testNP$Pseudomonas.spp.,predicted.rf.ps)
RMSERFP<-c(RMSERFP,RMSE.rf.ps)
}
RF_PM<-mean(RMSERFP)
iteration <- as.array(c(1:50))
RF_PS<-sd(RMSERFP)
RF_PS_95<-ci(RMSERFP, confidence = 0.95)
plot(iteration, RMSERFP, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("RandomForest Model(MSI_PS) Mean RMSE:",
round(RF_PM, digits = 3), "CI 95%:",
round(RF_PS_95[2], digits = 3), "-",
round(RF_PS_95[3], digits = 3), "SD:",
round(RF_PS,digits = 3)))##Plotting RMSE
###################################################################################
################### SVMPloy for Pseudomonas count by MSI_data ############
###################################################################################
RMSESVPP<-c()
#train.index <- createDataPartition(MSI_Ps$Pseudomonas.spp., p = .7,list = FALSE, times = 50)
for (i in 1:50){
trainNP <- MSI_Ps[train.indexs[,i],]
testNP <- MSI_Ps[-train.indexs[,i],]
SVM.model.fit.PS <- train(Pseudomonas.spp. ~ ., method='svmPoly', data= trainNP)
predicted.SVM.PS <- predict(SVM.model.fit.PS,testNP[,-19])
RMSE.SVMP.ps <- RMSE(testNP$Pseudomonas.spp., predicted.SVM.PS)##RMSE calculation
RMSESVPP<- c(RMSESVPP, RMSE.SVMP.ps)
}
SP_PM<-mean(RMSESVPP)
iteration <- as.array(c(1:50))
SP_PS<-sd(RMSESVPP)
SP_PS_95<-ci(RMSESVPP, confidence = 0.95)
plot(iteration, RMSESVPP, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("SVM_Poly Model(MSI_PS) Mean RMSE:",
round(SP_PM, digits = 3), "CI 95%:",
round(SP_PS_95[2], digits = 3), "-",
round(SP_PS_95[3], digits = 3), "SD:",
round(SP_PS,digits = 3)))##Plotting RMSE
###################################################################################
################### SVM Linear for Pseudomonas count by MS_data ############
###################################################################################
RMSESLMP<-c()
for (i in 1:50){
trainNP <- MSI_Ps[train.indexs[,i],]
testNP <- MSI_Ps[-train.indexs[,i],]
SVMLM.model.fit.PS <- train(Pseudomonas.spp. ~ ., method='svmLinear', data= trainNP)
predicted.SVMLM.PS <- predict(SVMLM.model.fit.PS,testNP[,-19])
RMSE.SVMLM.ps <- RMSE(testNP$Pseudomonas.spp., predicted.SVMLM.PS)##RMSE calculation
RMSESLMP<-c(RMSESLMP, RMSE.SVMLM.ps)
}
LM_PSM<-mean(RMSESLMP)
iteration <- as.array(c(1:50))
LM_PSS<-sd(RMSESLMP)
SLM_PS_95<-ci(RMSESLMP, confidence = 0.95)
plot(iteration, RMSESLMP, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("SVM_Linear Model(MSI_PS) Mean RMSE:",
round(LM_PSM, digits = 3), "CI 95%:",
round(SLM_PS_95[2], digits = 3), "-",
round(SLM_PS_95[3], digits = 3), "SD:",
round(LM_PSS,digits = 3)))##Plotting RMSE
##################################################################################
################ k-nearest neighbours for PS count by FTIR_data #################
##################################################################################
RMSESKNNP<-c()
for (i in 1:50){
trainNP <- MSI_Ps[train.indexs[,i],]
testNP <- MSI_Ps[-train.indexs[,i],]
knn.model.fit.ps <- train(Pseudomonas.spp. ~ ., method='knn', data=trainNP, trcontrol = control, tuneGrid=expand.grid(k=1:40))
predicted.knn.ps <- predict(knn.model.fit.ps, testNP[,-19])
RMSE.knn.ps <- RMSE(testNP$Pseudomonas.spp.,predicted.knn.ps)##RMSE calculation
RMSESKNNP<- c(RMSESKNNP, RMSE.knn.ps)
}
KNN_PSM<-mean(RMSESKNNP)
iteration <- as.array(c(1:50))
KNN_PSS<-sd(RMSESKNNP)
Knn_PS_95<-ci(RMSESKNNP, confidence = 0.95)
plot(iteration, RMSESKNNP, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("Knn Model(MSI_PS) Mean RMSE:",
round(KNN_PSM, digits = 3), "CI 95%:",
round(Knn_PS_95[2], digits = 3), "-",
round(Knn_PS_95[3], digits = 3), "SD:",
round(KNN_PSS,digits = 3)))##Plotting RMSE
###################################################################################
#########################SVMRadial for PS count by FTIR_data#######################
###################################################################################
RMSESRADP<-c()
for (i in 1:50){
trainNP <- MSI_Ps[train.indexs[,i],]
testNP <- MSI_Ps[-train.indexs[,i],]
regressor.ps = train(Pseudomonas.spp. ~ .,
data = trainNP,
method = 'svmRadial',
trcontrol = control)
y_pred.ps = predict(regressor.ps, testNP[,-19])
RMSE.SVMR.ps <- RMSE(testNP$Pseudomonas.spp., y_pred.ps)
RMSESRADP<- c(RMSESRADP, RMSE.SVMR.ps)
}
RAD_PSM<-mean(RMSESRADP)
iteration <- as.array(c(1:50))
RAD_PSS<-sd(RMSESRADP)
SR_PS_95<-ci(RMSESRADP, confidence = 0.95)
plot(iteration, RMSESRADP, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("SVM_Radial Model(MSI_PS) Mean RMSE:",
round(RAD_PSM, digits = 3), "CI 95%:",
round(SR_PS_95[2], digits = 3), "-",
round(SR_PS_95[3], digits = 3), "SD:",
round(RAD_PSS,digits = 3)))##Plotting RMSE
|
/MachineLearning/MSI/MSI_AB_50_Iteration.R
|
permissive
|
saha-19/FreshAPI
|
R
| false
| false
| 19,675
|
r
|
##############################
# #
# Group_project MSI Data #
# Machine Learning #
##############################
# Clear workspace
rm(list=ls())
# Close any open graphics devices
graphics.off()
library(caret)
library(kernlab)
library(gmodels)
library(openxlsx)
source("CreateObj.R")
# Read datasets
MSI <- read.xlsx("MSIdata_CT_R1_2_updated.xlsx", sheet=1, rowName=T)
bacteria<- MSI[,37:38]##Bacterial count
MSI <- MSI[,1:18]
MSI_raw<- CreateObj(MSI, bacteria)
MSI_TVC <- MSI_raw[,-(20)]###remove pseudomnas
MSI_Ps <- MSI_raw[,-(19)]###remove pseudomnas
# prepare training scheme
control <- trainControl(method="repeatedcv", number=10, repeats=3)
# Split training and test set for TVC count
set.seed(123)
train.index <- createDataPartition(MSI_TVC$TVC, p = .7,list = FALSE, groups=3, times = 50)
##################################################################################
################### Linear Model for TVC count by MSI_data #####################
##################################################################################
RMSELMT<-c()
for (i in 1:50) {
trainN <- MSI_TVC[train.index[,i],]
testN <- MSI_TVC[-train.index[,i],]
lm.model.fit.TVC <- train(TVC ~., method= 'lm', data=trainN, trControl= control)##train
predicted.lm <- predict(lm.model.fit.TVC, testN[,-19])##Prediction
RMSE.lm.TVC <- RMSE(testN$TVC, predicted.lm)##RMSE calculation
RMSELMT<-c(RMSELMT, RMSE.lm.TVC)
}
lm_TVCM<-mean(RMSELMT)##calculating mean
iteration <- as.array(c(1:50))
lm_TVCS<-sd(RMSELMT)##calculating SD
LM_TVC_95<-ci(RMSELMT, confidence = 0.95)
plot(iteration, RMSELMT,ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("RMSE for 50 iterations \n( RMSE.mean = ",
round(lm_TVCM, digits = 3)," +/- ",
round(lm_TVCS,digits = 3), " )\nThere is a 95% likelihood that the range",
round(LM_TVC_95[2], digits = 3), "to",
round(LM_TVC_95[3], digits = 3), "covers the true error of the model."))##Plotting RMSE
##################################################################################
################ k-nearest neighbours for TVC count by MSI_data ################
##################################################################################
RMSEKNNT<-c()
for (i in 1:50) {
trainN <- MSI_TVC[train.index[,i],]
testN <- MSI_TVC[-train.index[,i],]
knn.model.fit.TVC <- train(TVC ~ ., method='knn', data=trainN, trcontrol = control, tuneGrid=expand.grid(k=1:20))
predicted.knn <- predict(knn.model.fit.TVC,testN[,-19])
RMSE.knn <- RMSE(testN$TVC, predicted.knn)
RMSEKNNT<-c(RMSE.knn, RMSEKNNT)
}
KNN_TVCM<-mean(RMSEKNNT)##calculating mean
KNN_TVCS<-sd(RMSEKNNT)##calculating SD
Knn_TVC_95<-ci(RMSEKNNT, confidence = 0.95)
plot(iteration, RMSEKNNT,ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("Knn Model(MSI_TVC) Mean RMSE:",
round(KNN_TVCM, digits = 3), "CI 95%:",
round(Knn_TVC_95[2], digits = 3), "-",
round(Knn_TVC_95[3], digits = 3), "SD:",
round(KNN_TVCS,digits = 3)))##Plotting RMSE
###################################################################################
################### Random Forests for TVC count by MSI_data ####################
###################################################################################
RMSERFT<-c()
for (i in 1:50) {
trainN <- MSI_TVC[train.index[,i],]
testN <- MSI_TVC[-train.index[,i],]
RF.model.fit.TVC <- train(TVC ~ ., method='rf', trcontrol = control, trainN,tuneGrid=expand.grid(mtry=1:30))
predicted.rf <- predict(RF.model.fit.TVC, testN[,-19])
RMSE.rf <- RMSE(testN$TVC,predicted.rf)
RMSERFT<-c(RMSERFT, RMSE.rf)
}
RF_TVCM<-mean(RMSERFT)##calculating mean
RF_TVCS<-sd(RMSERFT)##calculating SD
RF_TVC_95<-ci(RMSERFT, confidence = 0.95)
plot(iteration, RMSERFT, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("RandomForest Model(MSI_TVC) Mean RMSE:",
round(RF_TVCM, digits = 3), "CI 95%:",
round(RF_TVC_95[2], digits = 3), "-",
round(RF_TVC_95[3], digits = 3), "SD:",
round(RF_TVCS,digits = 3)))##Plotting RMSE
###################################################################################
################### SVM_Ploy for TVC count by MSI_data ###################
###################################################################################
RMSESVMPLOYT<-c()
for (i in 1:50) {
trainN <- MSI_TVC[train.index[,i],]
testN <- MSI_TVC[-train.index[,i],]
SVM.model.fit.TVC <- train(TVC ~ ., method='svmPoly', data= trainN)
predicted.SVM <- predict(SVM.model.fit.TVC,testN[,-19])
RMSE.SVMP <- RMSE(testN$TVC,predicted.SVM)##calculating RMSE
RMSESVMPLOYT<-c(RMSESVMPLOYT, RMSE.SVMP)
}
SPoly_TVCM<-mean(RMSESVMPLOYT)##calculating mean
SPoly_TVCS<-sd(RMSESVMPLOYT)##calculating SD
SP_TVC_95<-ci(RMSESVMPLOYT, confidence = 0.95)
plot(iteration, RMSESVMPLOYT, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("SVM_Poly Model(MSI_TVC) Mean RMSE:",
round(SPoly_TVCM, digits = 3), "CI 95%:",
round(SP_TVC_95[2], digits = 3), "-",
round(SP_TVC_95[3], digits = 3), "SD:",
round(SPoly_TVCS,digits = 3)))##Plotting RMSE
###################################################################################
#########################SVM_Radial for TVC count by MSI_data #####################
###################################################################################
RMSESVMRAD<-c()
for (i in 1:50) {
trainN <- MSI_TVC[train.index[,i],]
testN <- MSI_TVC[-train.index[,i],]
regressor = train(TVC ~ .,
data = trainN,
method = 'svmRadial',
trcontrol = control)
y_pred = predict(regressor, testN[,-19])
RMSE.SVMR.TVC <- RMSE(testN$TVC, y_pred)##calculating RMSE
RMSESVMRAD<-c(RMSESVMRAD,RMSE.SVMR.TVC)
}
SRAD_TVCM<-mean(RMSESVMRAD)##calculating mean
SRAD_TVCS<-sd(RMSESVMRAD)##calculating SD
SR_TVC_95<-ci(RMSESVMRAD, confidence = 0.95)
plot(iteration, RMSESVMRAD, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("SVM_Radial Model(MSI_TVC) Mean RMSE:",
round(SRAD_TVCM, digits = 3), "CI 95%:",
round(SR_TVC_95[2], digits = 3), "-",
round(SR_TVC_95[3], digits = 3), "SD:",
round(SRAD_TVCS,digits = 3)))##Plotting RMSE
###########################################################################################
###################################Svm_Linear for TVC count by MSI_data####################
###########################################################################################
RMSESVMLM<-c()
for (i in 1:50){
trainN <- MSI_TVC[train.index[,i],]
testN <- MSI_TVC[-train.index[,i],]
SVMLM.model.fit.TVC <- train(TVC ~ ., method='svmLinear', data= trainN)
predicted.SVMLM.TVC <- predict(SVMLM.model.fit.TVC,testN[,-19])
##plot predicted vs Observed with cl-95%
RMSE.SVMLM.TVC <- RMSE(testN$TVC, predicted.SVMLM.TVC)##RMSE calculation
RMSESVMLM<- c(RMSESVMLM,RMSE.SVMLM.TVC)
}
SLM_TVCM<-mean(RMSESVMLM)
iteration <- as.array(c(1:50))
SLM_TVCS<-sd(RMSESVMLM)
SLM_TVC_95<-ci(RMSESVMLM, confidence = 0.95)
plot(iteration, RMSESVMLM, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("SVM_Linear Model(MSI_TVC) Mean RMSE:",
round(SLM_TVCM, digits = 3), "CI 95%:",
round(SLM_TVC_95[2], digits = 3), "-",
round(SLM_TVC_95[3], digits = 3), "SD:",
round(SLM_TVCS,digits = 3)))##Plotting RMSE
#####################################################################################################################################################
###########################################Pseudomonas MSI_data #####################################################################################
#####################################################################################################################################################
set.seed(123)
train.indexs <- createDataPartition(MSI_Ps$Pseudomonas.spp., p = .7,list = FALSE, times = 50, groups = 3)
##################################################################################
################### Linear Model for TVC count by MSI_data #####################
##################################################################################
RMSELMP<-c()
for (i in 1:50){
trainNP <- MSI_Ps[train.indexs[,i],]
testNP <- MSI_Ps[-train.indexs[,i],]
lm.model.fit.ps <- train(Pseudomonas.spp. ~ ., method= 'lm', data=trainNP, trcontrol= control)
predicted.lm.ps <- predict(lm.model.fit.ps,testNP[,-19])
RMSE.lm.ps <- RMSE(testNP$Pseudomonas.spp., predicted.lm.ps)##RMSE calculation
RMSELMP<- c(RMSELMP, RMSE.lm.ps)
}
LM_PM<-mean(RMSELMP)
iteration <- as.array(c(1:50))
LM_PS<-sd(RMSELMP)
LM_PS_95<-ci(RMSELMP, confidence = 0.95)
plot(iteration, RMSELMP, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("RMSE for 50 iterations \n( RMSE.mean = ",
round(LM_PM, digits = 3), " +/- ",
round(LM_PS,digits = 3)," )\nThere is a 95% likelihood that the range",
round(LM_PS_95[2], digits = 3), "to",
round(LM_PS_95[3], digits = 3), "covers the true error of the model."))##Plotting RMSE
###################################################################################
################### Random Forests for TVC count by MSI_data ####################
###################################################################################
RMSERFP<-c()
for (i in 1:50){
trainNP <- MSI_Ps[train.indexs[,i],]
testNP <- MSI_Ps[-train.indexs[,i],]
RF.model.fit.Ps <- train(Pseudomonas.spp. ~ ., method='rf', trcontrol = control, trainNP)
predicted.rf.ps <- predict(RF.model.fit.Ps, testNP[,-19])
RMSE.rf.ps <- RMSE(testNP$Pseudomonas.spp.,predicted.rf.ps)
RMSERFP<-c(RMSERFP,RMSE.rf.ps)
}
RF_PM<-mean(RMSERFP)
iteration <- as.array(c(1:50))
RF_PS<-sd(RMSERFP)
RF_PS_95<-ci(RMSERFP, confidence = 0.95)
plot(iteration, RMSERFP, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("RandomForest Model(MSI_PS) Mean RMSE:",
round(RF_PM, digits = 3), "CI 95%:",
round(RF_PS_95[2], digits = 3), "-",
round(RF_PS_95[3], digits = 3), "SD:",
round(RF_PS,digits = 3)))##Plotting RMSE
###################################################################################
################### SVMPloy for Pseudomonas count by MSI_data ############
###################################################################################
RMSESVPP<-c()
#train.index <- createDataPartition(MSI_Ps$Pseudomonas.spp., p = .7,list = FALSE, times = 50)
for (i in 1:50){
trainNP <- MSI_Ps[train.indexs[,i],]
testNP <- MSI_Ps[-train.indexs[,i],]
SVM.model.fit.PS <- train(Pseudomonas.spp. ~ ., method='svmPoly', data= trainNP)
predicted.SVM.PS <- predict(SVM.model.fit.PS,testNP[,-19])
RMSE.SVMP.ps <- RMSE(testNP$Pseudomonas.spp., predicted.SVM.PS)##RMSE calculation
RMSESVPP<- c(RMSESVPP, RMSE.SVMP.ps)
}
SP_PM<-mean(RMSESVPP)
iteration <- as.array(c(1:50))
SP_PS<-sd(RMSESVPP)
SP_PS_95<-ci(RMSESVPP, confidence = 0.95)
plot(iteration, RMSESVPP, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("SVM_Poly Model(MSI_PS) Mean RMSE:",
round(SP_PM, digits = 3), "CI 95%:",
round(SP_PS_95[2], digits = 3), "-",
round(SP_PS_95[3], digits = 3), "SD:",
round(SP_PS,digits = 3)))##Plotting RMSE
###################################################################################
################### SVM Linear for Pseudomonas count by MS_data ############
###################################################################################
RMSESLMP<-c()
for (i in 1:50){
trainNP <- MSI_Ps[train.indexs[,i],]
testNP <- MSI_Ps[-train.indexs[,i],]
SVMLM.model.fit.PS <- train(Pseudomonas.spp. ~ ., method='svmLinear', data= trainNP)
predicted.SVMLM.PS <- predict(SVMLM.model.fit.PS,testNP[,-19])
RMSE.SVMLM.ps <- RMSE(testNP$Pseudomonas.spp., predicted.SVMLM.PS)##RMSE calculation
RMSESLMP<-c(RMSESLMP, RMSE.SVMLM.ps)
}
LM_PSM<-mean(RMSESLMP)
iteration <- as.array(c(1:50))
LM_PSS<-sd(RMSESLMP)
SLM_PS_95<-ci(RMSESLMP, confidence = 0.95)
plot(iteration, RMSESLMP, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("SVM_Linear Model(MSI_PS) Mean RMSE:",
round(LM_PSM, digits = 3), "CI 95%:",
round(SLM_PS_95[2], digits = 3), "-",
round(SLM_PS_95[3], digits = 3), "SD:",
round(LM_PSS,digits = 3)))##Plotting RMSE
##################################################################################
################ k-nearest neighbours for PS count by FTIR_data #################
##################################################################################
RMSESKNNP<-c()
for (i in 1:50){
trainNP <- MSI_Ps[train.indexs[,i],]
testNP <- MSI_Ps[-train.indexs[,i],]
knn.model.fit.ps <- train(Pseudomonas.spp. ~ ., method='knn', data=trainNP, trcontrol = control, tuneGrid=expand.grid(k=1:40))
predicted.knn.ps <- predict(knn.model.fit.ps, testNP[,-19])
RMSE.knn.ps <- RMSE(testNP$Pseudomonas.spp.,predicted.knn.ps)##RMSE calculation
RMSESKNNP<- c(RMSESKNNP, RMSE.knn.ps)
}
KNN_PSM<-mean(RMSESKNNP)
iteration <- as.array(c(1:50))
KNN_PSS<-sd(RMSESKNNP)
Knn_PS_95<-ci(RMSESKNNP, confidence = 0.95)
plot(iteration, RMSESKNNP, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("Knn Model(MSI_PS) Mean RMSE:",
round(KNN_PSM, digits = 3), "CI 95%:",
round(Knn_PS_95[2], digits = 3), "-",
round(Knn_PS_95[3], digits = 3), "SD:",
round(KNN_PSS,digits = 3)))##Plotting RMSE
###################################################################################
#########################SVMRadial for PS count by FTIR_data#######################
###################################################################################
RMSESRADP<-c()
for (i in 1:50){
trainNP <- MSI_Ps[train.indexs[,i],]
testNP <- MSI_Ps[-train.indexs[,i],]
regressor.ps = train(Pseudomonas.spp. ~ .,
data = trainNP,
method = 'svmRadial',
trcontrol = control)
y_pred.ps = predict(regressor.ps, testNP[,-19])
RMSE.SVMR.ps <- RMSE(testNP$Pseudomonas.spp., y_pred.ps)
RMSESRADP<- c(RMSESRADP, RMSE.SVMR.ps)
}
RAD_PSM<-mean(RMSESRADP)
iteration <- as.array(c(1:50))
RAD_PSS<-sd(RMSESRADP)
SR_PS_95<-ci(RMSESRADP, confidence = 0.95)
plot(iteration, RMSESRADP, ylim=c(0,2), xlim=c(0,50), xlab="Iteration", ylab="RMSE",type='l', main=paste("SVM_Radial Model(MSI_PS) Mean RMSE:",
round(RAD_PSM, digits = 3), "CI 95%:",
round(SR_PS_95[2], digits = 3), "-",
round(SR_PS_95[3], digits = 3), "SD:",
round(RAD_PSS,digits = 3)))##Plotting RMSE
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EstHMMCop.R
\name{EstHMMCop}
\alias{EstHMMCop}
\title{Estimation of bivariate Markov regime switching bivariate copula model}
\usage{
EstHMMCop(y, reg, family, max_iter, eps)
}
\arguments{
\item{y}{(nx2) data matrix (observations or residuals) that will be transformed to pseudo-observations}
\item{reg}{number of regimes}
\item{family}{'gaussian' , 't' , 'clayton' , 'frank' , 'gumbel'}
\item{max_iter}{maximum number of iterations of the EM algorithm}
\item{eps}{precision (stopping criteria); suggestion 0.0001.}
}
\value{
\item{theta}{(1 x reg) estimated parameter of the copula according to CRAN copula package (except for Frank copula, where theta = log(theta_R_Package)) for each regime (except for degrees of freedom)}
\item{dof}{estimated degree of freedom, only for the Student copula}
\item{Q}{(reg x reg) estimated transition matrix}
\item{eta}{(n x reg) conditional probabilities of being in regime k at time t given observations up to time t}
\item{tau}{estimated Kendall tau for each regime}
\item{U}{(n x 2) matrix of Rosenblatt transforms}
\item{cvm}{Cramer-von-Mises statistic for goodness-of-fit}
\item{W}{regime probabilities for the conditional distribution given the past Kendall's tau}
}
\description{
This function estimates parameters from a bivariate Markov regime switching bivariate copula model
}
\examples{
Q <- matrix(c(0.8, 0.3, 0.2, 0.7),2,2) ; kendallTau <- c(0.3 ,0.7) ;
data <- SimHMMCop(Q, 'clayton', kendallTau, 10)$SimData;
estimations <- EstHMMCop(data,2,'clayton',10000,0.0001)
}
|
/man/EstHMMCop.Rd
|
no_license
|
cran/HMMcopula
|
R
| false
| true
| 1,658
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EstHMMCop.R
\name{EstHMMCop}
\alias{EstHMMCop}
\title{Estimation of bivariate Markov regime switching bivariate copula model}
\usage{
EstHMMCop(y, reg, family, max_iter, eps)
}
\arguments{
\item{y}{(nx2) data matrix (observations or residuals) that will be transformed to pseudo-observations}
\item{reg}{number of regimes}
\item{family}{'gaussian' , 't' , 'clayton' , 'frank' , 'gumbel'}
\item{max_iter}{maximum number of iterations of the EM algorithm}
\item{eps}{precision (stopping criteria); suggestion 0.0001.}
}
\value{
\item{theta}{(1 x reg) estimated parameter of the copula according to CRAN copula package (except for Frank copula, where theta = log(theta_R_Package)) for each regime (except for degrees of freedom)}
\item{dof}{estimated degree of freedom, only for the Student copula}
\item{Q}{(reg x reg) estimated transition matrix}
\item{eta}{(n x reg) conditional probabilities of being in regime k at time t given observations up to time t}
\item{tau}{estimated Kendall tau for each regime}
\item{U}{(n x 2) matrix of Rosenblatt transforms}
\item{cvm}{Cramer-von-Mises statistic for goodness-of-fit}
\item{W}{regime probabilities for the conditional distribution given the past Kendall's tau}
}
\description{
This function estimates parameters from a bivariate Markov regime switching bivariate copula model
}
\examples{
Q <- matrix(c(0.8, 0.3, 0.2, 0.7),2,2) ; kendallTau <- c(0.3 ,0.7) ;
data <- SimHMMCop(Q, 'clayton', kendallTau, 10)$SimData;
estimations <- EstHMMCop(data,2,'clayton',10000,0.0001)
}
|
#######################################################
#
#
library(splm)
library(dplyr)
#
#
rm(list=ls())
Pdata <- read.csv("_dataW04/PanelData.csv")
str(Pdata)
summary(Pdata)
#
#
### Data
#
# Pdata$EUR_HAB ... Pdata$PPS_HAB_EU - GDP per capita, etc.
#
# Pdata$U_pc ... nemployment %
#
# Pdata$A_B ... Pdata$S ... labor market structure, see
# http://appsso.eurostat.ec.europa.eu/nui/show.do?dataset=htec_emp_reg2&lang=en
#
# Pdata$long, lat - centroids
#
# Pdata$HUClstr
# High unemployment cluster in:
# HU10, HU31, PL21, PL22, PL32, PL33, SK03, SK04
#
# Pdata$Y2010, etc. - Year dummies
#
# Pdata$OLD.EU Austria, West Germany (inc. Berlin)
#
#
# Individual (within) means for CRE estimation....
Avgs <- Pdata %>% group_by(NUTS_ID) %>% arrange(time) %>% mutate(time=ceiling(rank(time)/6)) %>%
group_by(NUTS_ID, time) %>%
summarise(A.EUR_HAB_EU=mean(EUR_HAB_EU), A.KIS=mean(KIS), A.HTC=mean(HTC)) %>% ungroup
Pdata$EUR_HAB_EU_bar <- rep(Avgs$A.EUR_HAB_EU, times = 1, each = 6)
Pdata$HTC_bar <- rep(Avgs$A.HTC, times = 1, each = 6)
Pdata$KIS_bar <- rep(Avgs$A.KIS, times = 1, each = 6)
#
#
# Geographic information
coords <- Pdata[Pdata$Y2011==1,c("long", "lat")]
coords <- coordinates(coords)
summary(coords)
IDs <- Pdata[Pdata$Y2011==1,"NUTS_ID"]
nb <- dnearneigh(coords, d1=0, d2=240, longlat=T, row.names = IDs)
CE_data.listw <- nb2listw(nb)
# Note: should you plot the data, use NUTS rev. 2010
#
#
#
#
#
#
### Model estimaton
#
#
# Provide model formula separately, for syntax simplicity
fm1 <- U_pc ~ EUR_HAB_EU + HTC +HUClstr +I(HTC*HUClstr) + EUR_HAB_EU_bar + HTC_bar
#
#
# ML - pooled regression
?spml
mod.1 <- spml(formula = fm1, data = Pdata, index = c("NUTS_ID","time"),
listw = CE_data.listw, model = "pooling",
lag = F, spatial.error = "none")
summary(mod.1)
#
#
#
# ML - RE Spatial lag model
Lag_mod <- spml(formula = fm1, data = Pdata, index = c("NUTS_ID","time"),
listw = CE_data.listw, model = "random",
lag = T, spatial.error = "none",
effect = "individual",
LeeYu = T, Hess = F)
summary(Lag_mod)
#
#
### Marginal effects - impacts
#
time <- length(unique(Pdata$time))
#
# ?impacts
set.seed(1128)
imp1 <- impacts(Lag_mod, listw = CE_data.listw, time = time, R = 1000)
imp2 <- summary(imp1, zstats = T, short = T)
imp2
plot(imp1$sres$direct[,1:3])
plot(imp1$sres$direct[,4:6])
plot(imp1$sres$indirect[,1:3])
plot(imp1$sres$indirect[,4:6])
#
#
#
s2.df <- NULL
s2.df <- data.frame(max.dist=0, LL=0, Lambda=0, Lambda.SE=0,
GDP.DI=0, GDP.DI.SE = 0,
GDP.IN=0, GDP.IN.SE = 0,
HTC.DI=0, HTC.DI.SE = 0,
HTC.IN=0, HTC.IN.SE = 0,
Interact.DI=0, Interact.DI.SE = 0,
Interact.IN=0, Interact.IN.SE = 0)
set.seed(300)
for(j in 16:100) {
nb <- dnearneigh(coords, d1=0, d2=j*10, longlat=T, row.names = IDs)
CE.listw <- nb2listw(nb)
#
Lag_mod <- spml(formula = fm1, data = Pdata, index = c("NUTS_ID","time"),
listw = CE.listw, model = "random",
lag = T, spatial.error = "none",
effect = "individual",
LeeYu = T, Hess = F)
sumLagmod <- summary(Lag_mod)
#
imp1 <- impacts(Lag_mod, listw = CE.listw, time = 6, R = 1000)
imp2 <- summary(imp1, zstats = T, short = T)
#
s2.df <- rbind(s2.df, c(j*10, Lag_mod$logLik, sumLagmod$ARCoefTable[1,1], sumLagmod$ARCoefTable[1,2],
imp2$res$direct[1], imp2$res$direct[1]/imp2$zmat[1,1],
imp2$res$indirect[1], imp2$res$indirect[1]/imp2$zmat[1,2],
imp2$res$direct[2], imp2$res$direct[2]/imp2$zmat[2,1],
imp2$res$indirect[2], imp2$res$indirect[2]/imp2$zmat[2,2],
imp2$res$direct[4], imp2$res$direct[4]/imp2$zmat[4,1],
imp2$res$indirect[4], imp2$res$indirect[4]/imp2$zmat[4,2]))
}
s2.df <- s2.df[-1,]
#
par(mfrow=c(4,2))
#
plot(s2.df$LL~s2.df$max.dist, type="l", ylab="LogLik",xlab="Maximum neighbor distance (km)")
abline(v=220, lty=3)
#
plot(s2.df$Lambda~s2.df$max.dist, type="l", ylab="[Lambda]",xlab="Maximum neighbor distance (km)",
ylim=c(0.5, 1))
lines((s2.df$Lambda+s2.df$Lambda.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
lines((s2.df$Lambda-s2.df$Lambda.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
abline(v=220, lty=3)
abline(h=1, lty=3, col="red")
#
#
plot(s2.df$GDP.DI~s2.df$max.dist, type="l", ylab="[GDP Direct impact]",xlab="Maximum neighbor distance (km)",
ylim=c(-0.15,0.05))
lines((s2.df$GDP.DI-s2.df$GDP.DI.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
lines((s2.df$GDP.DI+s2.df$GDP.DI.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
abline(v=220, lty=3)
abline(h=0, lty=3, col="red")
#
plot(s2.df$GDP.IN~s2.df$max.dist, type="l", ylab="[GDP Indirect impact]",xlab="Maximum neighbor distance (km)",
ylim=c(-0.6,0.1))
lines((s2.df$GDP.IN-s2.df$GDP.IN.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
lines((s2.df$GDP.IN+s2.df$GDP.IN.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
abline(v=220, lty=3)
abline(h=0, lty=3, col="red")
#
plot(s2.df$HTC.DI~s2.df$max.dist, type="l", ylab="[TechEmp Direct impact]",xlab="Maximum neighbor distance (km)",
ylim=c(-0.1,0.7))
lines((s2.df$HTC.DI+s2.df$HTC.DI.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
lines((s2.df$HTC.DI-s2.df$HTC.DI.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
abline(v=220, lty=3)
abline(h=0, lty=3, col="red")
#
plot(s2.df$HTC.IN~s2.df$max.dist, type="l", ylab="[TechEmp Indirect impact]",xlab="Maximum neighbor distance (km)",
ylim=c(-0.3, 4.5))
lines((s2.df$HTC.IN+s2.df$HTC.IN.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
lines((s2.df$HTC.IN-s2.df$HTC.IN.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
abline(v=220, lty=3)
abline(h=0, lty=3, col="red")
#
plot(s2.df$Interact.DI~s2.df$max.dist, type="l", ylab="[HTC:HUCL Direct impact]",xlab="Maximum neighbor distance (km)",
ylim=c(-1, 0.3))
lines((s2.df$Interact.DI+s2.df$Interact.DI.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
lines((s2.df$Interact.DI-s2.df$Interact.DI.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
abline(v=220, lty=3)
abline(h=0, lty=3, col="red")
#
plot(s2.df$Interact.IN~s2.df$max.dist, type="l", ylab="[HTC:HUCL Indirect impact]",xlab="Maximum neighbor distance (km)",
ylim=c(-4, 0.5))
lines((s2.df$Interact.IN+s2.df$Interact.IN.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
lines((s2.df$Interact.IN-s2.df$Interact.IN.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
abline(v=220, lty=3)
abline(h=0, lty=3, col="red")
#
par(mfrow=c(1,1))
#
#
#
#
#
#
#
#
#
#
|
/Week_05/W4_R_15_Spatial_panel.R
|
no_license
|
jfsantosm/4EK417
|
R
| false
| false
| 6,891
|
r
|
#######################################################
#
#
library(splm)
library(dplyr)
#
#
rm(list=ls())
Pdata <- read.csv("_dataW04/PanelData.csv")
str(Pdata)
summary(Pdata)
#
#
### Data
#
# Pdata$EUR_HAB ... Pdata$PPS_HAB_EU - GDP per capita, etc.
#
# Pdata$U_pc ... nemployment %
#
# Pdata$A_B ... Pdata$S ... labor market structure, see
# http://appsso.eurostat.ec.europa.eu/nui/show.do?dataset=htec_emp_reg2&lang=en
#
# Pdata$long, lat - centroids
#
# Pdata$HUClstr
# High unemployment cluster in:
# HU10, HU31, PL21, PL22, PL32, PL33, SK03, SK04
#
# Pdata$Y2010, etc. - Year dummies
#
# Pdata$OLD.EU Austria, West Germany (inc. Berlin)
#
#
# Individual (within) means for CRE estimation....
Avgs <- Pdata %>% group_by(NUTS_ID) %>% arrange(time) %>% mutate(time=ceiling(rank(time)/6)) %>%
group_by(NUTS_ID, time) %>%
summarise(A.EUR_HAB_EU=mean(EUR_HAB_EU), A.KIS=mean(KIS), A.HTC=mean(HTC)) %>% ungroup
Pdata$EUR_HAB_EU_bar <- rep(Avgs$A.EUR_HAB_EU, times = 1, each = 6)
Pdata$HTC_bar <- rep(Avgs$A.HTC, times = 1, each = 6)
Pdata$KIS_bar <- rep(Avgs$A.KIS, times = 1, each = 6)
#
#
# Geographic information
coords <- Pdata[Pdata$Y2011==1,c("long", "lat")]
coords <- coordinates(coords)
summary(coords)
IDs <- Pdata[Pdata$Y2011==1,"NUTS_ID"]
nb <- dnearneigh(coords, d1=0, d2=240, longlat=T, row.names = IDs)
CE_data.listw <- nb2listw(nb)
# Note: should you plot the data, use NUTS rev. 2010
#
#
#
#
#
#
### Model estimaton
#
#
# Provide model formula separately, for syntax simplicity
fm1 <- U_pc ~ EUR_HAB_EU + HTC +HUClstr +I(HTC*HUClstr) + EUR_HAB_EU_bar + HTC_bar
#
#
# ML - pooled regression
?spml
mod.1 <- spml(formula = fm1, data = Pdata, index = c("NUTS_ID","time"),
listw = CE_data.listw, model = "pooling",
lag = F, spatial.error = "none")
summary(mod.1)
#
#
#
# ML - RE Spatial lag model
Lag_mod <- spml(formula = fm1, data = Pdata, index = c("NUTS_ID","time"),
listw = CE_data.listw, model = "random",
lag = T, spatial.error = "none",
effect = "individual",
LeeYu = T, Hess = F)
summary(Lag_mod)
#
#
### Marginal effects - impacts
#
time <- length(unique(Pdata$time))
#
# ?impacts
set.seed(1128)
imp1 <- impacts(Lag_mod, listw = CE_data.listw, time = time, R = 1000)
imp2 <- summary(imp1, zstats = T, short = T)
imp2
plot(imp1$sres$direct[,1:3])
plot(imp1$sres$direct[,4:6])
plot(imp1$sres$indirect[,1:3])
plot(imp1$sres$indirect[,4:6])
#
#
#
s2.df <- NULL
s2.df <- data.frame(max.dist=0, LL=0, Lambda=0, Lambda.SE=0,
GDP.DI=0, GDP.DI.SE = 0,
GDP.IN=0, GDP.IN.SE = 0,
HTC.DI=0, HTC.DI.SE = 0,
HTC.IN=0, HTC.IN.SE = 0,
Interact.DI=0, Interact.DI.SE = 0,
Interact.IN=0, Interact.IN.SE = 0)
set.seed(300)
for(j in 16:100) {
nb <- dnearneigh(coords, d1=0, d2=j*10, longlat=T, row.names = IDs)
CE.listw <- nb2listw(nb)
#
Lag_mod <- spml(formula = fm1, data = Pdata, index = c("NUTS_ID","time"),
listw = CE.listw, model = "random",
lag = T, spatial.error = "none",
effect = "individual",
LeeYu = T, Hess = F)
sumLagmod <- summary(Lag_mod)
#
imp1 <- impacts(Lag_mod, listw = CE.listw, time = 6, R = 1000)
imp2 <- summary(imp1, zstats = T, short = T)
#
s2.df <- rbind(s2.df, c(j*10, Lag_mod$logLik, sumLagmod$ARCoefTable[1,1], sumLagmod$ARCoefTable[1,2],
imp2$res$direct[1], imp2$res$direct[1]/imp2$zmat[1,1],
imp2$res$indirect[1], imp2$res$indirect[1]/imp2$zmat[1,2],
imp2$res$direct[2], imp2$res$direct[2]/imp2$zmat[2,1],
imp2$res$indirect[2], imp2$res$indirect[2]/imp2$zmat[2,2],
imp2$res$direct[4], imp2$res$direct[4]/imp2$zmat[4,1],
imp2$res$indirect[4], imp2$res$indirect[4]/imp2$zmat[4,2]))
}
s2.df <- s2.df[-1,]
#
par(mfrow=c(4,2))
#
plot(s2.df$LL~s2.df$max.dist, type="l", ylab="LogLik",xlab="Maximum neighbor distance (km)")
abline(v=220, lty=3)
#
plot(s2.df$Lambda~s2.df$max.dist, type="l", ylab="[Lambda]",xlab="Maximum neighbor distance (km)",
ylim=c(0.5, 1))
lines((s2.df$Lambda+s2.df$Lambda.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
lines((s2.df$Lambda-s2.df$Lambda.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
abline(v=220, lty=3)
abline(h=1, lty=3, col="red")
#
#
plot(s2.df$GDP.DI~s2.df$max.dist, type="l", ylab="[GDP Direct impact]",xlab="Maximum neighbor distance (km)",
ylim=c(-0.15,0.05))
lines((s2.df$GDP.DI-s2.df$GDP.DI.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
lines((s2.df$GDP.DI+s2.df$GDP.DI.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
abline(v=220, lty=3)
abline(h=0, lty=3, col="red")
#
plot(s2.df$GDP.IN~s2.df$max.dist, type="l", ylab="[GDP Indirect impact]",xlab="Maximum neighbor distance (km)",
ylim=c(-0.6,0.1))
lines((s2.df$GDP.IN-s2.df$GDP.IN.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
lines((s2.df$GDP.IN+s2.df$GDP.IN.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
abline(v=220, lty=3)
abline(h=0, lty=3, col="red")
#
plot(s2.df$HTC.DI~s2.df$max.dist, type="l", ylab="[TechEmp Direct impact]",xlab="Maximum neighbor distance (km)",
ylim=c(-0.1,0.7))
lines((s2.df$HTC.DI+s2.df$HTC.DI.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
lines((s2.df$HTC.DI-s2.df$HTC.DI.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
abline(v=220, lty=3)
abline(h=0, lty=3, col="red")
#
plot(s2.df$HTC.IN~s2.df$max.dist, type="l", ylab="[TechEmp Indirect impact]",xlab="Maximum neighbor distance (km)",
ylim=c(-0.3, 4.5))
lines((s2.df$HTC.IN+s2.df$HTC.IN.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
lines((s2.df$HTC.IN-s2.df$HTC.IN.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
abline(v=220, lty=3)
abline(h=0, lty=3, col="red")
#
plot(s2.df$Interact.DI~s2.df$max.dist, type="l", ylab="[HTC:HUCL Direct impact]",xlab="Maximum neighbor distance (km)",
ylim=c(-1, 0.3))
lines((s2.df$Interact.DI+s2.df$Interact.DI.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
lines((s2.df$Interact.DI-s2.df$Interact.DI.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
abline(v=220, lty=3)
abline(h=0, lty=3, col="red")
#
plot(s2.df$Interact.IN~s2.df$max.dist, type="l", ylab="[HTC:HUCL Indirect impact]",xlab="Maximum neighbor distance (km)",
ylim=c(-4, 0.5))
lines((s2.df$Interact.IN+s2.df$Interact.IN.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
lines((s2.df$Interact.IN-s2.df$Interact.IN.SE)~s2.df$max.dist, type="l", lty=2, col="blue")
abline(v=220, lty=3)
abline(h=0, lty=3, col="red")
#
par(mfrow=c(1,1))
#
#
#
#
#
#
#
#
#
#
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/plot.twitterdat.R
\name{plot.twitterdat}
\alias{plot.twitterdat}
\title{plot method for twitterdat}
\usage{
\method{plot}{twitterdat}(x, ...)
}
\arguments{
\item{x}{data to be plotted}
}
\description{
plot method for twitterdat
}
|
/man/plot.twitterdat.Rd
|
no_license
|
petermeissner/twitter_analytics
|
R
| false
| false
| 317
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/plot.twitterdat.R
\name{plot.twitterdat}
\alias{plot.twitterdat}
\title{plot method for twitterdat}
\usage{
\method{plot}{twitterdat}(x, ...)
}
\arguments{
\item{x}{data to be plotted}
}
\description{
plot method for twitterdat
}
|
library(qtl)
library(snowfall)
load("/share/malooflab/Ruijuan/F2/QTL_analysis/data/LG.f2.after.crossover.43traits.Rdata")
LG.f2.after.crossover <- sim.geno(LG.f2.after.crossover,step=1,n.draws=32)
LG.f2.after.crossover <- calc.genoprob(LG.f2.after.crossover,step=1)
sfInit(parallel = TRUE, cpus = 16)
sfExport("LG.f2.after.crossover")
sfLibrary(qtl)
# run scantwo for all traits at once
set.seed(2)
system.time(
scantwo.perm.imp.6.2 <-
scantwo(LG.f2.after.crossover,pheno.col=12:13,method="hk",n.perm=10,n.cluster = 16)
)
sfStop()
# save output
save(scantwo.perm.imp.6.2, file = "/share/malooflab/Ruijuan/F2/QTL_analysis/output/scantwo/scantwo.perm.imp.6.2.Rdata")
|
/F2/scantwo/scantwo_perm_6.2_new.R
|
no_license
|
leejimmy93/KIAT_cabernet
|
R
| false
| false
| 677
|
r
|
library(qtl)
library(snowfall)
load("/share/malooflab/Ruijuan/F2/QTL_analysis/data/LG.f2.after.crossover.43traits.Rdata")
LG.f2.after.crossover <- sim.geno(LG.f2.after.crossover,step=1,n.draws=32)
LG.f2.after.crossover <- calc.genoprob(LG.f2.after.crossover,step=1)
sfInit(parallel = TRUE, cpus = 16)
sfExport("LG.f2.after.crossover")
sfLibrary(qtl)
# run scantwo for all traits at once
set.seed(2)
system.time(
scantwo.perm.imp.6.2 <-
scantwo(LG.f2.after.crossover,pheno.col=12:13,method="hk",n.perm=10,n.cluster = 16)
)
sfStop()
# save output
save(scantwo.perm.imp.6.2, file = "/share/malooflab/Ruijuan/F2/QTL_analysis/output/scantwo/scantwo.perm.imp.6.2.Rdata")
|
library(knitr)
knit("Simulation_scalar_function_scalar.Rmd")
|
/Simulations/Model_based_Simulation/gypsie/sfs_simulation/Simulation_scalar_function_scalar.R
|
no_license
|
ywebbvar/functional_mediation
|
R
| false
| false
| 60
|
r
|
library(knitr)
knit("Simulation_scalar_function_scalar.Rmd")
|
## this function constructs a scored gene network that will be used for searching gene modules
## Yuanlong LIU
## 08-12-2017
## updated 05-01-2018
construct_scored_net <- function( gene_network_data, interaction_indices, weight_index=NULL, gene_ps, gene_scores, genes2exclude = NULL )
{
if( !missing( gene_ps ) & !missing( gene_scores ) ) { stop('\nPlease provide either a gene p-value data or a gene score data, not both') }
if( !missing( gene_ps ) )
{
cat('\nYou have provided a gene_ps data. Gene p-values will be first converted to scores by calling the \"p2score\" function\n')
if( !all(c('gene', 'p') %in% names( gene_ps ) ) ) { stop('\nColumn names of gene_ps should contain \'gene\' and \'p\'') }
gene_scores = p2score( gene_ps )
}
if( !missing( gene_scores ) )
if( !all(c('gene', 'score') %in% names( gene_scores ) ) ) { stop('\nColumn names of gene_scores should contain \'gene\' and \'score\'') }
gene_scores$gene = toupper( gene_scores$gene )
gene_network_data[, interaction_indices[1]] = toupper( gene_network_data[, interaction_indices[1]] )
gene_network_data[, interaction_indices[2]] = toupper( gene_network_data[, interaction_indices[2]] )
if( !is.null( genes2exclude ) )
{
gene2exclude = toupper( genes2exclude )
gene_scores = subset( gene_scores, !(gene %in% gene2exclude) )
}
ncols = ncol( gene_network_data )
weight_real = which( colnames( gene_network_data ) == 'weight' )
if( is.null(weight_index) )
{
if( length(weight_real) !=0 )
{
stop('\nThe name of the ', weight_real, 'th column of the gene_network_data is \'weight\'. By default (of the igraph package), this column will be converted to the interaction/edge weight. If you do not want this column to be converted to the interaction/edge weight, please change the column name as a value other than \'weight\'. If you want to convert this column as the interaction/edge weight, please assign the column index to the \'weight_index\' argument\n')
}
}
if( !is.null(weight_index) )
{
if( length(weight_real) !=0 )
if( weight_index != weight_real )
{
stop('\nColumn ', weight_real, ' of the gene_network_data has a name \'weight\'\nBy default, the values of this column will be converted to the interaction/edge weight\nBut as you specified a different column as interaction/edge weight, you need to change the name of this column as a value other than \'weight\'')
}
names( gene_network_data )[ weight_index ] = 'weight'
n_is_numeric = any(!is.numeric(gene_network_data$weight) )
is_na = any(is.na(gene_network_data$weight) )
is_neg = any(gene_network_data$weight < 0 )
if( n_is_numeric ) stop('Please make sure all interaction weights should be in a numeric format')
if( is_na ) stop('Please make sure all interaction weights should not be a missing value')
if( is_neg ) stop('Please make sure all interaction weights are equal to or greater than 0')
}
main_indices = c(interaction_indices, weight_index)
gene_network_data = gene_network_data[, c( main_indices, setdiff( 1:ncols, main_indices ) )]
raw_net = graph_from_data_frame( gene_network_data, directed = FALSE, vertices = NULL )
if( !is_simple( raw_net ) )
{
## the following comments should be removed
## cat('\nNote:\nThe network you provided is not a simple network\nA simple network is a network which do not contain loop and multiple edges\nWe convert it to a simple network using the igraph::simplify function\n')
raw_net = simplify( raw_net )
}
net = raw_net
if( !'weight' %in% edge_attr_names( net ) )
{
## the following comments should be removed
# cat('The network provided by the user does not have interaction/edge weight information. We set all interaction/edge weights as 1\n')
E( net )$weight = 1
}
V(net)$weight = gene_scores[match(V(net)$name, gene_scores[, 'gene']), 'score']
net = induced.subgraph(net, !is.na(V(net)$weight))
net = induced.subgraph( net, degree(net) >=1 )
if( 'p' %in% colnames(gene_scores) ) V(net)$p = gene_scores$p[match( V(net)$name, gene_scores$gene )]
logfile = './constructed_scored_net.log'
cat(paste(rep("#", 100), collapse=''), file=logfile, append=FALSE)
cat('\n\nA scored-network has been successfully constructed\n', file=logfile, append=TRUE)
cat('This scored-network consists', vcount(net), 'nodes and ', ecount(net), 'interactions\n', file=logfile, append=TRUE)
cat('This scored-network is saved as an igraph object named \"net\" in \'constructed_scored_net.Rdata\'\n\n', file=logfile, append=TRUE)
cat(paste(rep("#", 100), collapse=''), file=logfile, append=TRUE)
save(net, file='constructed_scored_net.Rdata')
return( net )
}
|
/SigMod_v2/full/R/construct_scored_net.R
|
no_license
|
SafiaSafa/stage
|
R
| false
| false
| 4,842
|
r
|
## this function constructs a scored gene network that will be used for searching gene modules
## Yuanlong LIU
## 08-12-2017
## updated 05-01-2018
construct_scored_net <- function( gene_network_data, interaction_indices, weight_index=NULL, gene_ps, gene_scores, genes2exclude = NULL )
{
if( !missing( gene_ps ) & !missing( gene_scores ) ) { stop('\nPlease provide either a gene p-value data or a gene score data, not both') }
if( !missing( gene_ps ) )
{
cat('\nYou have provided a gene_ps data. Gene p-values will be first converted to scores by calling the \"p2score\" function\n')
if( !all(c('gene', 'p') %in% names( gene_ps ) ) ) { stop('\nColumn names of gene_ps should contain \'gene\' and \'p\'') }
gene_scores = p2score( gene_ps )
}
if( !missing( gene_scores ) )
if( !all(c('gene', 'score') %in% names( gene_scores ) ) ) { stop('\nColumn names of gene_scores should contain \'gene\' and \'score\'') }
gene_scores$gene = toupper( gene_scores$gene )
gene_network_data[, interaction_indices[1]] = toupper( gene_network_data[, interaction_indices[1]] )
gene_network_data[, interaction_indices[2]] = toupper( gene_network_data[, interaction_indices[2]] )
if( !is.null( genes2exclude ) )
{
gene2exclude = toupper( genes2exclude )
gene_scores = subset( gene_scores, !(gene %in% gene2exclude) )
}
ncols = ncol( gene_network_data )
weight_real = which( colnames( gene_network_data ) == 'weight' )
if( is.null(weight_index) )
{
if( length(weight_real) !=0 )
{
stop('\nThe name of the ', weight_real, 'th column of the gene_network_data is \'weight\'. By default (of the igraph package), this column will be converted to the interaction/edge weight. If you do not want this column to be converted to the interaction/edge weight, please change the column name as a value other than \'weight\'. If you want to convert this column as the interaction/edge weight, please assign the column index to the \'weight_index\' argument\n')
}
}
if( !is.null(weight_index) )
{
if( length(weight_real) !=0 )
if( weight_index != weight_real )
{
stop('\nColumn ', weight_real, ' of the gene_network_data has a name \'weight\'\nBy default, the values of this column will be converted to the interaction/edge weight\nBut as you specified a different column as interaction/edge weight, you need to change the name of this column as a value other than \'weight\'')
}
names( gene_network_data )[ weight_index ] = 'weight'
n_is_numeric = any(!is.numeric(gene_network_data$weight) )
is_na = any(is.na(gene_network_data$weight) )
is_neg = any(gene_network_data$weight < 0 )
if( n_is_numeric ) stop('Please make sure all interaction weights should be in a numeric format')
if( is_na ) stop('Please make sure all interaction weights should not be a missing value')
if( is_neg ) stop('Please make sure all interaction weights are equal to or greater than 0')
}
main_indices = c(interaction_indices, weight_index)
gene_network_data = gene_network_data[, c( main_indices, setdiff( 1:ncols, main_indices ) )]
raw_net = graph_from_data_frame( gene_network_data, directed = FALSE, vertices = NULL )
if( !is_simple( raw_net ) )
{
## the following comments should be removed
## cat('\nNote:\nThe network you provided is not a simple network\nA simple network is a network which do not contain loop and multiple edges\nWe convert it to a simple network using the igraph::simplify function\n')
raw_net = simplify( raw_net )
}
net = raw_net
if( !'weight' %in% edge_attr_names( net ) )
{
## the following comments should be removed
# cat('The network provided by the user does not have interaction/edge weight information. We set all interaction/edge weights as 1\n')
E( net )$weight = 1
}
V(net)$weight = gene_scores[match(V(net)$name, gene_scores[, 'gene']), 'score']
net = induced.subgraph(net, !is.na(V(net)$weight))
net = induced.subgraph( net, degree(net) >=1 )
if( 'p' %in% colnames(gene_scores) ) V(net)$p = gene_scores$p[match( V(net)$name, gene_scores$gene )]
logfile = './constructed_scored_net.log'
cat(paste(rep("#", 100), collapse=''), file=logfile, append=FALSE)
cat('\n\nA scored-network has been successfully constructed\n', file=logfile, append=TRUE)
cat('This scored-network consists', vcount(net), 'nodes and ', ecount(net), 'interactions\n', file=logfile, append=TRUE)
cat('This scored-network is saved as an igraph object named \"net\" in \'constructed_scored_net.Rdata\'\n\n', file=logfile, append=TRUE)
cat(paste(rep("#", 100), collapse=''), file=logfile, append=TRUE)
save(net, file='constructed_scored_net.Rdata')
return( net )
}
|
methodDict <- data.table(keyword=c('glm', 'glmer', 'lmer', 'bayesglm','ridge', 'blmer'),
lmMethod=c('GLMlike', 'LMERlike','LMERlike', 'BayesGLMlike','RidgeBGLMlike', 'bLMERlike'),
implementsEbayes=c(TRUE, FALSE, FALSE, TRUE, TRUE, FALSE))
if(getRversion() >= "2.15.1") globalVariables(c(
'keyword',
'lmMethod',
'implementsEbayes')) #zlm
##' @import stringr
.zlm <- function(formula, data, method='bayesglm',silent=TRUE, ...){
## perhaps we should be generic, but since we are dispatching on second argument, which might be an S3 class, let's just do this instead.
if(!inherits(data, 'data.frame')) stop("'data' must be data.frame, not matrix or array")
if(!is(formula, 'formula')) stop("'formula' must be class 'formula'")
## get response
resp <- eval(formula[[2]], data)
RHS <- removeResponse(formula, warn=FALSE)
obj <- new(methodDict[keyword==method, lmMethod], formula=RHS, design=data, response=resp)
obj <- fit(obj)
list(cont=obj@fitC, disc=obj@fitD)
}
summary.zlm <- function(out){
summary(out$cont)
summary(out$disc)
}
##' @export
zlm.SingleCellAssay <- function(...){
.Deprecated('zlm')
zlm(...)
}
##' Zero-inflated regression for SingleCellAssay
##'
##' For each gene in sca, fits the hurdle model in \code{formula} (linear for et>0), logistic for et==0 vs et>0.
##' Return an object of class \code{ZlmFit} containing slots giving the coefficients, variance-covariance matrices, etc.
##' After each gene, optionally run the function on the fit named by 'hook'
##'
##' @section Empirical Bayes variance regularization:
##' The empirical bayes regularization of the gene variance assumes that the precision (1/variance) is drawn from a
##' gamma distribution with unknown parameters.
##' These parameters are estimated by considering the distribution of sample variances over all genes.
##' The procedure used for this is determined from
##' \code{ebayesControl}, a named list with components 'method' (one of 'MOM' or 'MLE') and 'model' (one of 'H0' or 'H1')
##' method MOM uses a method-of-moments estimator, while MLE using the marginal likelihood.
##' H0 model estimates the precisions using the intercept alone in each gene, while H1 fits the full model specified by \code{formula}
##'
##' @param formula a formula with the measurement variable on the LHS and predictors present in colData on the RHS
##' @param sca SingleCellAssay object
##' @param method character vector, either 'glm', 'glmer' or 'bayesglm'
##' @param silent Silence common problems with fitting some genes
##' @param ebayes if TRUE, regularize variance using empirical bayes method
##' @param ebayesControl list with parameters for empirical bayes procedure. See \link{ebayes}.
##' @param force Should we continue testing genes even after many errors have occurred?
##' @param hook a function called on the \code{fit} after each gene.
##' @param parallel If TRUE and \code{option(mc.cores)>1} then multiple cores will be used in fitting.
##' @param LMlike if provided, then the model defined in this object will be used, rather than following the formulas. This is intended for internal use.
##' @param onlyCoef If TRUE then only an array of model coefficients will be returned (probably only useful for bootstrapping).
##' @param ... arguments passed to the S4 model object upon construction. For example, \code{fitArgsC} and \code{fitArgsD}, or \code{coefPrior}.
##' @return a object of class \code{ZlmFit} with methods to extract coefficients, etc.
##' OR, if data is a \code{data.frame} just a list of the discrete and continuous fits.
##' @seealso ZlmFit-class, ebayes, GLMlike-class, BayesGLMlike-class
##' @aliases zlm.SingleCellAssay
##' @examples
##' data(vbetaFA)
##' zlmVbeta <- zlm(~ Stim.Condition, subset(vbetaFA, ncells==1)[1:10,])
##' slotNames(zlmVbeta)
##' #A matrix of coefficients
##' coef(zlmVbeta, 'D')['CCL2',]
##' #An array of covariance matrices
##' vcov(zlmVbeta, 'D')[,,'CCL2']
##' waldTest(zlmVbeta, CoefficientHypothesis('Stim.ConditionUnstim'))
##'
##' ## Can also provide just a \code{data.frame} instead
##' data<- data.frame(x=rnorm(500), z=rbinom(500, 1, .3))
##' logit.y <- with(data, x*2 + z*2); mu.y <- with(data, 10+10*x+10*z + rnorm(500))
##' y <- (runif(500)<exp(logit.y)/(1+exp(logit.y)))*1
##' y[y>0] <- mu.y[y>0]
##' data$y <- y
##' fit <- zlm(y ~ x+z, data)
##' summary.glm(fit$disc)
##' summary.glm(fit$cont)
##' @export
zlm <- function(formula, sca, method='bayesglm', silent=TRUE, ebayes=TRUE, ebayesControl=NULL, force=FALSE, hook=NULL, parallel=TRUE, LMlike, onlyCoef=FALSE, ...){
## could also provide argument `data`
dotsdata = list(...)$data
if(!is.null(dotsdata)){
if(!missing(sca)) stop("Cannot provide both `sca` and `data`")
sca = dotsdata
}
## Are we just a data.frame? Call simplified method.
if(!inherits(sca, 'SingleCellAssay')){
if(inherits(sca, 'data.frame')){
if(!is.null(dotsdata)){
return(.zlm(formula, method=method, silent=silent, ...) )
} else{
return(.zlm(formula, data=sca, method=method, silent=silent, ...) )
}
} else{
stop('`sca` must inherit from `data.frame` or `SingleCellAssay`')
}
}
## Default call
if(missing(LMlike)){
## Which class are we using for the fits...look it up by keyword
method <- match.arg(method, methodDict[,keyword])
method <- methodDict[keyword==method,lmMethod]
if(!is(sca, 'SingleCellAssay')) stop("'sca' must be (or inherit) 'SingleCellAssay'")
if(!is(formula, 'formula')) stop("'formula' must be class 'formula'")
Formula <- removeResponse(formula)
## Empirical bayes method
priorVar <- 1
priorDOF <- 0
if(ebayes){
if(!methodDict[lmMethod==method,implementsEbayes]) stop('Method', method, ' does not implement empirical bayes variance shrinkage.')
ebparm <- ebayes(sca, ebayesControl, Formula)
priorVar <- ebparm[['v']]
priorDOF <- ebparm[['df']]
stopifnot(all(!is.na(ebparm)))
}
## initial value of priorVar, priorDOF default to no shrinkage
obj <- new(method, design=colData(sca), formula=Formula, priorVar=priorVar, priorDOF=priorDOF, ...)
## End Default Call
} else{
## Refitting
if(!missing(formula)) warning("Ignoring formula and using model defined in 'objLMLike'")
if(!inherits(LMlike, 'LMlike')) stop("'LMlike' must inherit from class 'LMlike'")
## update design matrix with possibly new/permuted colData
##obj <- update(LMlike, design=colData(sca))
obj <- LMlike
}
## avoiding repeated calls to the S4 object speeds calls on large sca
## due to overzealous copying semantics on R's part
ee <- exprs(sca)
genes <- colnames(ee)
ng <- length(genes)
MM <- model.matrix(obj)
coefNames <- colnames(MM)
## to facilitate our call to mclapply
listEE <- setNames(seq_len(ng), genes)
## in hopes of finding a typical gene
upperQgene <- which(rank(freq(sca), ties.method='random')==floor(.75*ng))
obj <- fit(obj, ee[,upperQgene], silent=silent)
## called internally to do fitting, but want to get local variables in scope of function
nerror <- 0
.fitGeneSet <- function(idx){
## initialize outputs
hookOut <- NULL
tt <- try({
obj <- fit(obj, response=ee[,idx], silent=silent, quick=TRUE)
if(!is.null(hook)) hookOut <- hook(obj)
nerror <- 0
if((idx %% 20)==0) message('.', appendLF=FALSE)
})
if(is(tt, 'try-error')){
obj@fitC <- obj@fitD <- NULL
obj@fitted <- c(C=FALSE, D=FALSE)
message('!', appendLF=FALSE)
nerror <- nerror+1
if(nerror>5 & !force) {
stop("We seem to be having a lot of problems here...are your tests specified correctly? \n If you're sure, set force=TRUE.", tt)
}
}
if(onlyCoef) return(cbind(C=coef(obj, 'C'), D=coef(obj, 'D')))
summaries <- summarize(obj)
structure(summaries, hookOut=hookOut)
}
if(!parallel || getOption('mc.cores', 1L)==1){
listOfSummaries <- lapply(listEE, .fitGeneSet)
} else{
listOfSummaries <- parallel::mclapply(listEE, .fitGeneSet, mc.preschedule=TRUE, mc.silent=silent)
}
if(onlyCoef){
out <- do.call(abind, c(listOfSummaries, rev.along=0))
return(aperm(out, c(3,1,2)))
}
## test for try-errors
cls <- sapply(listOfSummaries, function(x) class(x))
complain <- if(force) warning else stop
if(mean(cls=='try-error')>.5) complain('Lots of errors here..something is amiss.')
## gethooks
hookOut <- NULL
if(!is.null(hook)) hookOut <- lapply(listOfSummaries, attr, which='hookOut')
message('\nDone!')
summaries <- collectSummaries(listOfSummaries)
## add rest of slots, plus class name
summaries[['LMlike']] <- obj
summaries[['sca']] <- sca
summaries[['priorVar']] <- obj@priorVar
summaries[['priorDOF']] <- obj@priorDOF
summaries[['hookOut']] <- hookOut
summaries[['Class']] <- 'ZlmFit'
## everything we need to call new
zfit <- do.call(new, as.list(summaries))
## tests, summarized objects, example fit, hooks
zfit
}
|
/R/zeroinf.R
|
no_license
|
lagzxadr/MAST
|
R
| false
| false
| 9,621
|
r
|
methodDict <- data.table(keyword=c('glm', 'glmer', 'lmer', 'bayesglm','ridge', 'blmer'),
lmMethod=c('GLMlike', 'LMERlike','LMERlike', 'BayesGLMlike','RidgeBGLMlike', 'bLMERlike'),
implementsEbayes=c(TRUE, FALSE, FALSE, TRUE, TRUE, FALSE))
if(getRversion() >= "2.15.1") globalVariables(c(
'keyword',
'lmMethod',
'implementsEbayes')) #zlm
##' @import stringr
.zlm <- function(formula, data, method='bayesglm',silent=TRUE, ...){
## perhaps we should be generic, but since we are dispatching on second argument, which might be an S3 class, let's just do this instead.
if(!inherits(data, 'data.frame')) stop("'data' must be data.frame, not matrix or array")
if(!is(formula, 'formula')) stop("'formula' must be class 'formula'")
## get response
resp <- eval(formula[[2]], data)
RHS <- removeResponse(formula, warn=FALSE)
obj <- new(methodDict[keyword==method, lmMethod], formula=RHS, design=data, response=resp)
obj <- fit(obj)
list(cont=obj@fitC, disc=obj@fitD)
}
summary.zlm <- function(out){
summary(out$cont)
summary(out$disc)
}
##' @export
zlm.SingleCellAssay <- function(...){
.Deprecated('zlm')
zlm(...)
}
##' Zero-inflated regression for SingleCellAssay
##'
##' For each gene in sca, fits the hurdle model in \code{formula} (linear for et>0), logistic for et==0 vs et>0.
##' Return an object of class \code{ZlmFit} containing slots giving the coefficients, variance-covariance matrices, etc.
##' After each gene, optionally run the function on the fit named by 'hook'
##'
##' @section Empirical Bayes variance regularization:
##' The empirical bayes regularization of the gene variance assumes that the precision (1/variance) is drawn from a
##' gamma distribution with unknown parameters.
##' These parameters are estimated by considering the distribution of sample variances over all genes.
##' The procedure used for this is determined from
##' \code{ebayesControl}, a named list with components 'method' (one of 'MOM' or 'MLE') and 'model' (one of 'H0' or 'H1')
##' method MOM uses a method-of-moments estimator, while MLE using the marginal likelihood.
##' H0 model estimates the precisions using the intercept alone in each gene, while H1 fits the full model specified by \code{formula}
##'
##' @param formula a formula with the measurement variable on the LHS and predictors present in colData on the RHS
##' @param sca SingleCellAssay object
##' @param method character vector, either 'glm', 'glmer' or 'bayesglm'
##' @param silent Silence common problems with fitting some genes
##' @param ebayes if TRUE, regularize variance using empirical bayes method
##' @param ebayesControl list with parameters for empirical bayes procedure. See \link{ebayes}.
##' @param force Should we continue testing genes even after many errors have occurred?
##' @param hook a function called on the \code{fit} after each gene.
##' @param parallel If TRUE and \code{option(mc.cores)>1} then multiple cores will be used in fitting.
##' @param LMlike if provided, then the model defined in this object will be used, rather than following the formulas. This is intended for internal use.
##' @param onlyCoef If TRUE then only an array of model coefficients will be returned (probably only useful for bootstrapping).
##' @param ... arguments passed to the S4 model object upon construction. For example, \code{fitArgsC} and \code{fitArgsD}, or \code{coefPrior}.
##' @return a object of class \code{ZlmFit} with methods to extract coefficients, etc.
##' OR, if data is a \code{data.frame} just a list of the discrete and continuous fits.
##' @seealso ZlmFit-class, ebayes, GLMlike-class, BayesGLMlike-class
##' @aliases zlm.SingleCellAssay
##' @examples
##' data(vbetaFA)
##' zlmVbeta <- zlm(~ Stim.Condition, subset(vbetaFA, ncells==1)[1:10,])
##' slotNames(zlmVbeta)
##' #A matrix of coefficients
##' coef(zlmVbeta, 'D')['CCL2',]
##' #An array of covariance matrices
##' vcov(zlmVbeta, 'D')[,,'CCL2']
##' waldTest(zlmVbeta, CoefficientHypothesis('Stim.ConditionUnstim'))
##'
##' ## Can also provide just a \code{data.frame} instead
##' data<- data.frame(x=rnorm(500), z=rbinom(500, 1, .3))
##' logit.y <- with(data, x*2 + z*2); mu.y <- with(data, 10+10*x+10*z + rnorm(500))
##' y <- (runif(500)<exp(logit.y)/(1+exp(logit.y)))*1
##' y[y>0] <- mu.y[y>0]
##' data$y <- y
##' fit <- zlm(y ~ x+z, data)
##' summary.glm(fit$disc)
##' summary.glm(fit$cont)
##' @export
zlm <- function(formula, sca, method='bayesglm', silent=TRUE, ebayes=TRUE, ebayesControl=NULL, force=FALSE, hook=NULL, parallel=TRUE, LMlike, onlyCoef=FALSE, ...){
## could also provide argument `data`
dotsdata = list(...)$data
if(!is.null(dotsdata)){
if(!missing(sca)) stop("Cannot provide both `sca` and `data`")
sca = dotsdata
}
## Are we just a data.frame? Call simplified method.
if(!inherits(sca, 'SingleCellAssay')){
if(inherits(sca, 'data.frame')){
if(!is.null(dotsdata)){
return(.zlm(formula, method=method, silent=silent, ...) )
} else{
return(.zlm(formula, data=sca, method=method, silent=silent, ...) )
}
} else{
stop('`sca` must inherit from `data.frame` or `SingleCellAssay`')
}
}
## Default call
if(missing(LMlike)){
## Which class are we using for the fits...look it up by keyword
method <- match.arg(method, methodDict[,keyword])
method <- methodDict[keyword==method,lmMethod]
if(!is(sca, 'SingleCellAssay')) stop("'sca' must be (or inherit) 'SingleCellAssay'")
if(!is(formula, 'formula')) stop("'formula' must be class 'formula'")
Formula <- removeResponse(formula)
## Empirical bayes method
priorVar <- 1
priorDOF <- 0
if(ebayes){
if(!methodDict[lmMethod==method,implementsEbayes]) stop('Method', method, ' does not implement empirical bayes variance shrinkage.')
ebparm <- ebayes(sca, ebayesControl, Formula)
priorVar <- ebparm[['v']]
priorDOF <- ebparm[['df']]
stopifnot(all(!is.na(ebparm)))
}
## initial value of priorVar, priorDOF default to no shrinkage
obj <- new(method, design=colData(sca), formula=Formula, priorVar=priorVar, priorDOF=priorDOF, ...)
## End Default Call
} else{
## Refitting
if(!missing(formula)) warning("Ignoring formula and using model defined in 'objLMLike'")
if(!inherits(LMlike, 'LMlike')) stop("'LMlike' must inherit from class 'LMlike'")
## update design matrix with possibly new/permuted colData
##obj <- update(LMlike, design=colData(sca))
obj <- LMlike
}
## avoiding repeated calls to the S4 object speeds calls on large sca
## due to overzealous copying semantics on R's part
ee <- exprs(sca)
genes <- colnames(ee)
ng <- length(genes)
MM <- model.matrix(obj)
coefNames <- colnames(MM)
## to facilitate our call to mclapply
listEE <- setNames(seq_len(ng), genes)
## in hopes of finding a typical gene
upperQgene <- which(rank(freq(sca), ties.method='random')==floor(.75*ng))
obj <- fit(obj, ee[,upperQgene], silent=silent)
## called internally to do fitting, but want to get local variables in scope of function
nerror <- 0
.fitGeneSet <- function(idx){
## initialize outputs
hookOut <- NULL
tt <- try({
obj <- fit(obj, response=ee[,idx], silent=silent, quick=TRUE)
if(!is.null(hook)) hookOut <- hook(obj)
nerror <- 0
if((idx %% 20)==0) message('.', appendLF=FALSE)
})
if(is(tt, 'try-error')){
obj@fitC <- obj@fitD <- NULL
obj@fitted <- c(C=FALSE, D=FALSE)
message('!', appendLF=FALSE)
nerror <- nerror+1
if(nerror>5 & !force) {
stop("We seem to be having a lot of problems here...are your tests specified correctly? \n If you're sure, set force=TRUE.", tt)
}
}
if(onlyCoef) return(cbind(C=coef(obj, 'C'), D=coef(obj, 'D')))
summaries <- summarize(obj)
structure(summaries, hookOut=hookOut)
}
if(!parallel || getOption('mc.cores', 1L)==1){
listOfSummaries <- lapply(listEE, .fitGeneSet)
} else{
listOfSummaries <- parallel::mclapply(listEE, .fitGeneSet, mc.preschedule=TRUE, mc.silent=silent)
}
if(onlyCoef){
out <- do.call(abind, c(listOfSummaries, rev.along=0))
return(aperm(out, c(3,1,2)))
}
## test for try-errors
cls <- sapply(listOfSummaries, function(x) class(x))
complain <- if(force) warning else stop
if(mean(cls=='try-error')>.5) complain('Lots of errors here..something is amiss.')
## gethooks
hookOut <- NULL
if(!is.null(hook)) hookOut <- lapply(listOfSummaries, attr, which='hookOut')
message('\nDone!')
summaries <- collectSummaries(listOfSummaries)
## add rest of slots, plus class name
summaries[['LMlike']] <- obj
summaries[['sca']] <- sca
summaries[['priorVar']] <- obj@priorVar
summaries[['priorDOF']] <- obj@priorDOF
summaries[['hookOut']] <- hookOut
summaries[['Class']] <- 'ZlmFit'
## everything we need to call new
zfit <- do.call(new, as.list(summaries))
## tests, summarized objects, example fit, hooks
zfit
}
|
library(glmnet)
library(ggplot2)
library(reshape2)
library(BayesBridge)
########################
## simulation example
########################
# Data Generate
N = 50
theta = 5
x = rnorm(N,sd=10)
X = as.matrix(data.frame(rep(1,N),x))
y = rnorm(N) + x*theta + 1 # y=1+5x+noise
# Weighted Bootstrap
T = 1000
theta_w = NULL
l = 10
for(t in 1:T)
{
# weight
w = rexp(N+1)
w = w/mean(w)
# lasso
res = glmnet(X,y,lambda = l*w[N+1]/N,weights = w[1:N])
theta_w = c(theta_w,res$beta[2])
}
hist(theta_w,breaks = 20)
mean(theta_w)
# regularization path (takes about 2 mins)
ptm <- proc.time()
l = exp(seq(0,15,0.1))
T = 500
theta_mean = NULL
for(i in 1:length(l))
{
print(i)
theta_w2 = NULL
for(t in 1:T)
{
# weight
w = rexp(N+1)
w = w/mean(w)
# lasso
res = glmnet(X,y,lambda = l[i]*w[N+1]/N,weights = w[1:N])
theta_w2 = c(theta_w2,res$beta[2])
}
theta_mean[i] = mean(theta_w2)
}
proc.time() - ptm
plot(log(l),theta_mean,"l",
xlab = "Log(lambda)",ylab = "Posterior mean",
main = "Regularization path")
########################
## diabetes example
########################
data("diabetes")
# number of observations
N = dim(diabetes$x)[1]
# number of variables
p = dim(diabetes$x)[2]
# variable names
name = colnames(diabetes$x)
# Center the data.
y = diabetes$y - mean(diabetes$y);
X = scale(diabetes$x)
res0 = cv.glmnet(X,y,nfolds = N)
l = res0$lambda.min
T = 1000
theta_w3 = matrix(NA,T,p) # weighted prior
colnames(theta_w3) = name
theta_w4 = matrix(NA,T,p) # k-fold cross-validation
colnames(theta_w4) = name
theta_w5 = matrix(NA,T,p) # bayes brigde
colnames(theta_w5) = name
tt = bridge.reg.stb(y, X, nsamp=T, alpha=1,nu.shape=2.0, nu.rate=2.0)
theta_w5 = tt$beta
for(t in 1:T)
{
w = rexp(N)
w = w/mean(w)
res = glmnet(X,y,lambda = rexp(1)*l,weights = w)
res.cv = glmnet(X,y,lambda = l, weights = w)
theta_w3[t,] = as.vector(res$beta)
theta_w4[t,] = as.vector(res.cv$beta)
}
########################
## posterior plot
########################
theta_wprior = melt(theta_w3)[,-1]
theta_cv = melt(theta_w4)[,-1]
theta_bb = melt(theta_w5)[,-1]
theta = rbind(theta_wprior,theta_cv,theta_bb)
method = c(rep("weighted_prior",T*p),rep("fixed_prior",T*p),rep("bayes_bridge",T*p))
theta = cbind(method,theta)
colnames(theta)[2:3] = c("variable","beta")
p1 = ggplot(theta, aes(beta,colour = method)) +
geom_density(alpha = 1) +
facet_wrap(~ variable,scales = "free",ncol = 2)+
theme_bw()
p1
|
/oldwbb/WBB/WBB2.R
|
no_license
|
wiscstatman/optimizetointegrate
|
R
| false
| false
| 2,505
|
r
|
library(glmnet)
library(ggplot2)
library(reshape2)
library(BayesBridge)
########################
## simulation example
########################
# Data Generate
N = 50
theta = 5
x = rnorm(N,sd=10)
X = as.matrix(data.frame(rep(1,N),x))
y = rnorm(N) + x*theta + 1 # y=1+5x+noise
# Weighted Bootstrap
T = 1000
theta_w = NULL
l = 10
for(t in 1:T)
{
# weight
w = rexp(N+1)
w = w/mean(w)
# lasso
res = glmnet(X,y,lambda = l*w[N+1]/N,weights = w[1:N])
theta_w = c(theta_w,res$beta[2])
}
hist(theta_w,breaks = 20)
mean(theta_w)
# regularization path (takes about 2 mins)
ptm <- proc.time()
l = exp(seq(0,15,0.1))
T = 500
theta_mean = NULL
for(i in 1:length(l))
{
print(i)
theta_w2 = NULL
for(t in 1:T)
{
# weight
w = rexp(N+1)
w = w/mean(w)
# lasso
res = glmnet(X,y,lambda = l[i]*w[N+1]/N,weights = w[1:N])
theta_w2 = c(theta_w2,res$beta[2])
}
theta_mean[i] = mean(theta_w2)
}
proc.time() - ptm
plot(log(l),theta_mean,"l",
xlab = "Log(lambda)",ylab = "Posterior mean",
main = "Regularization path")
########################
## diabetes example
########################
data("diabetes")
# number of observations
N = dim(diabetes$x)[1]
# number of variables
p = dim(diabetes$x)[2]
# variable names
name = colnames(diabetes$x)
# Center the data.
y = diabetes$y - mean(diabetes$y);
X = scale(diabetes$x)
res0 = cv.glmnet(X,y,nfolds = N)
l = res0$lambda.min
T = 1000
theta_w3 = matrix(NA,T,p) # weighted prior
colnames(theta_w3) = name
theta_w4 = matrix(NA,T,p) # k-fold cross-validation
colnames(theta_w4) = name
theta_w5 = matrix(NA,T,p) # bayes brigde
colnames(theta_w5) = name
tt = bridge.reg.stb(y, X, nsamp=T, alpha=1,nu.shape=2.0, nu.rate=2.0)
theta_w5 = tt$beta
for(t in 1:T)
{
w = rexp(N)
w = w/mean(w)
res = glmnet(X,y,lambda = rexp(1)*l,weights = w)
res.cv = glmnet(X,y,lambda = l, weights = w)
theta_w3[t,] = as.vector(res$beta)
theta_w4[t,] = as.vector(res.cv$beta)
}
########################
## posterior plot
########################
theta_wprior = melt(theta_w3)[,-1]
theta_cv = melt(theta_w4)[,-1]
theta_bb = melt(theta_w5)[,-1]
theta = rbind(theta_wprior,theta_cv,theta_bb)
method = c(rep("weighted_prior",T*p),rep("fixed_prior",T*p),rep("bayes_bridge",T*p))
theta = cbind(method,theta)
colnames(theta)[2:3] = c("variable","beta")
p1 = ggplot(theta, aes(beta,colour = method)) +
geom_density(alpha = 1) +
facet_wrap(~ variable,scales = "free",ncol = 2)+
theme_bw()
p1
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ReadNclamp.R
\name{SummariseSweepFile}
\alias{SummariseSweepFile}
\title{Extract summary information from an Nclamp/Igor Sweep File}
\usage{
SummariseSweepFile(f, Verbose = FALSE)
}
\arguments{
\item{f}{path to an Nclamp/Igor PXP format sweep file.}
\item{Verbose}{if \code{TRUE}, print details while parsing underlying PXP
file.}
}
\value{
A list of about 25 fields summarising the sweep file.
}
\description{
e.g. for import into Physiology database
}
\examples{
l <- SummariseSweepFile(system.file("igor", "WedJul407c2_001.pxp", package="IgorR"))
cat("There are", l$NumWaves, "waves in the file each of total duration", l$StimWaveLength,
"ms and sample duration", l$StimSampleInterval, "ms \\n")
}
\author{
jefferis
}
|
/man/SummariseSweepFile.Rd
|
no_license
|
cran/IgorR
|
R
| false
| true
| 803
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ReadNclamp.R
\name{SummariseSweepFile}
\alias{SummariseSweepFile}
\title{Extract summary information from an Nclamp/Igor Sweep File}
\usage{
SummariseSweepFile(f, Verbose = FALSE)
}
\arguments{
\item{f}{path to an Nclamp/Igor PXP format sweep file.}
\item{Verbose}{if \code{TRUE}, print details while parsing underlying PXP
file.}
}
\value{
A list of about 25 fields summarising the sweep file.
}
\description{
e.g. for import into Physiology database
}
\examples{
l <- SummariseSweepFile(system.file("igor", "WedJul407c2_001.pxp", package="IgorR"))
cat("There are", l$NumWaves, "waves in the file each of total duration", l$StimWaveLength,
"ms and sample duration", l$StimSampleInterval, "ms \\n")
}
\author{
jefferis
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/AgainStart.R
\docType{data}
\name{COWS}
\alias{COWS}
\title{Butterfat of Cows}
\format{A data frame with 100 observations on the following 3 variables:
\itemize{
\item \code{butterfat} (average butterfat percentage)
\item \code{age} (a factor with levels \code{2 years old} and \code{Mature})
\item \code{breed} (a factor with levels \code{Ayrshire}, \code{Canadian}, \code{Guernsey}, \code{Holstein-Friesian}, and \code{Jersey})
}}
\source{
Canadian record book of purebred dairy cattle.
}
\usage{
COWS
}
\description{
Random samples of ten mature (five-years-old and older) and ten two-year-old cows were taken from each of five breeds. The average butterfat percentage of these 100 cows is stored in the variable \code{butterfat} with the type of cow stored in the variable \code{breed} and the age of the cow stored in the variable \code{age}.
}
\examples{
ggplot(data = COWS, aes(x = breed, y = butterfat, fill = age)) +
geom_boxplot(position = position_dodge(1.0)) +
labs(x = "", y = "Average butterfat percentage") + scale_fill_brewer()
summary(aov(butterfat ~ breed + age, data = COWS))
}
\references{
\itemize{ \item Sokal, R. R. and Rohlf, F. J. 1994. \emph{Biometry}. W. H. Freeman, New York, third edition. \item Ugarte, M. D., Militino, A. F., and Arnholt, A. T. 2015. \emph{Probability and Statistics with R}, Second Edition. Chapman & Hall / CRC.}
}
\keyword{datasets}
|
/man/COWS.Rd
|
no_license
|
darokun/PASWR2
|
R
| false
| false
| 1,472
|
rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/AgainStart.R
\docType{data}
\name{COWS}
\alias{COWS}
\title{Butterfat of Cows}
\format{A data frame with 100 observations on the following 3 variables:
\itemize{
\item \code{butterfat} (average butterfat percentage)
\item \code{age} (a factor with levels \code{2 years old} and \code{Mature})
\item \code{breed} (a factor with levels \code{Ayrshire}, \code{Canadian}, \code{Guernsey}, \code{Holstein-Friesian}, and \code{Jersey})
}}
\source{
Canadian record book of purebred dairy cattle.
}
\usage{
COWS
}
\description{
Random samples of ten mature (five-years-old and older) and ten two-year-old cows were taken from each of five breeds. The average butterfat percentage of these 100 cows is stored in the variable \code{butterfat} with the type of cow stored in the variable \code{breed} and the age of the cow stored in the variable \code{age}.
}
\examples{
ggplot(data = COWS, aes(x = breed, y = butterfat, fill = age)) +
geom_boxplot(position = position_dodge(1.0)) +
labs(x = "", y = "Average butterfat percentage") + scale_fill_brewer()
summary(aov(butterfat ~ breed + age, data = COWS))
}
\references{
\itemize{ \item Sokal, R. R. and Rohlf, F. J. 1994. \emph{Biometry}. W. H. Freeman, New York, third edition. \item Ugarte, M. D., Militino, A. F., and Arnholt, A. T. 2015. \emph{Probability and Statistics with R}, Second Edition. Chapman & Hall / CRC.}
}
\keyword{datasets}
|
# get names of installed packages
packs <- installed.packages()
exc <- names(packs[,'Package'])
# get available package names
av <- names(available.packages()[,1])
# create loooong string
ins <- av[!av %in% exc]
install.packages(ins)
|
/all_r_packages.R
|
no_license
|
Albertoafonsojunior/R-Programming-Language
|
R
| false
| false
| 246
|
r
|
# get names of installed packages
packs <- installed.packages()
exc <- names(packs[,'Package'])
# get available package names
av <- names(available.packages()[,1])
# create loooong string
ins <- av[!av %in% exc]
install.packages(ins)
|
#jscode <- "shinyjs.refresh = function() { location.reload(); }"
#jscode <- "shinyjs.refresh = function() { location.reload(); }"
project_specific_admin_FieldTrialIdentity_Modal <-
div(
div(id = "project_specific_admin_FieldTrialIdentity_Form",
fluidRow(
column(10, offset = 1,
rHandsontableOutput("project_specific_admin_FieldTrialIdentity_Table", height = "200px"),
tags$style(type="text/css", "#table1 th {font-weight:bold;}"), br(),
column(3, textInput("project_specific_admin_FieldTrialIdentity_AddNewFieldTrialIdentity", "Field Trial Identity", value = "", width = "100%")),
column(6, textInput("project_specific_admin_FieldTrialIdentity_AddNewDescription", "Description",value = "", width = "100%")),
column(2, br(), actionBttn("project_specific_admin_FieldTrialIdentity_AddNew", "Add New", style = "jelly", size = "xs", color = "primary", block=T))
),
column(10, offset = 1,
column(3, disabled(textInput("project_specific_admin_FieldTrialIdentity_UpdateFieldTrialIdentity", "", value = "", width = "100%"))),
column(6, textInput("project_specific_admin_FieldTrialIdentity_UpdateDescription", "", value="", width = "100%")),
column(2, br(), actionBttn("project_specific_admin_FieldTrialIdentity_Update", "Update", style = "jelly", size = "xs", color = "primary", block=T))
)
)
), br(), br(),
fluidRow(
column(10, offset = 1,
column(2, actionBttn("project_specific_admin_FieldTrialIdentity_FormToPicture", "Form to Picture", style = "jelly", size = "xs", color = "success", block=T)),
column(2, actionBttn("project_specific_admin_FieldTrialIdentity_Clear", "Clear", style = "jelly", size = "xs", color = "warning", block=T)),
column(2, actionBttn("project_specific_admin_FieldTrialIdentity_Refresh", "Refresh", style = "jelly", size = "xs", color = "success", block=T)),
column(2, actionBttn("project_specific_admin_FieldTrialIdentity_ControlForm", "Control Form", style = "jelly", size = "xs", color = "warning", block=T))
)
)
)
|
/tabs/server/project_specific/administration/field_trial_identity.R
|
no_license
|
mkaranja/Banana-Tracker
|
R
| false
| false
| 2,255
|
r
|
#jscode <- "shinyjs.refresh = function() { location.reload(); }"
#jscode <- "shinyjs.refresh = function() { location.reload(); }"
project_specific_admin_FieldTrialIdentity_Modal <-
div(
div(id = "project_specific_admin_FieldTrialIdentity_Form",
fluidRow(
column(10, offset = 1,
rHandsontableOutput("project_specific_admin_FieldTrialIdentity_Table", height = "200px"),
tags$style(type="text/css", "#table1 th {font-weight:bold;}"), br(),
column(3, textInput("project_specific_admin_FieldTrialIdentity_AddNewFieldTrialIdentity", "Field Trial Identity", value = "", width = "100%")),
column(6, textInput("project_specific_admin_FieldTrialIdentity_AddNewDescription", "Description",value = "", width = "100%")),
column(2, br(), actionBttn("project_specific_admin_FieldTrialIdentity_AddNew", "Add New", style = "jelly", size = "xs", color = "primary", block=T))
),
column(10, offset = 1,
column(3, disabled(textInput("project_specific_admin_FieldTrialIdentity_UpdateFieldTrialIdentity", "", value = "", width = "100%"))),
column(6, textInput("project_specific_admin_FieldTrialIdentity_UpdateDescription", "", value="", width = "100%")),
column(2, br(), actionBttn("project_specific_admin_FieldTrialIdentity_Update", "Update", style = "jelly", size = "xs", color = "primary", block=T))
)
)
), br(), br(),
fluidRow(
column(10, offset = 1,
column(2, actionBttn("project_specific_admin_FieldTrialIdentity_FormToPicture", "Form to Picture", style = "jelly", size = "xs", color = "success", block=T)),
column(2, actionBttn("project_specific_admin_FieldTrialIdentity_Clear", "Clear", style = "jelly", size = "xs", color = "warning", block=T)),
column(2, actionBttn("project_specific_admin_FieldTrialIdentity_Refresh", "Refresh", style = "jelly", size = "xs", color = "success", block=T)),
column(2, actionBttn("project_specific_admin_FieldTrialIdentity_ControlForm", "Control Form", style = "jelly", size = "xs", color = "warning", block=T))
)
)
)
|
## These two functions, makeCacheMatrix and cacheSolve, are
## a pair of functions which will calculate the inverse of a matrix
## and save it to a cache which can be recalled.
## makeCacheMatrix creates a special "matrix", which is really a list
## containing functions to
## (1) set the value of the matrix
## (2) get the value of the matrix
## (3) set the value of the inverse of the matrix
## (4) get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
## m will hold the value for inverse of the matrix
## to begin, we will cleared it and set to null
m <- NULL
## (1) 'set' is a function which is taking object 'y' into x,
## and clears m, like just above.
## these are <<- because 'set' is not in parent env of x and m
set <- function(y) {
## x, parent env, takes y's value
x <<- y
## m, parent env, is cleared
## this prevents an old m returning by mistake
m <<- NULL
}
## (2) 'get' is a function which returns the matrix x
get <- function() x
## (3) 'setsolve' function puts the inverse into m; parent env
setinverse <- function(inverse) m <<- inverse
## (4) 'getsolve' is a function, returns the value of inverted matrix.
getinverse <- function() m
## Now setting makeCacheMatrix as a list,
## containing all of the functions just defined,
## returns them to parent env.
## they are named, so can be called by 'x$...' in the next function
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve is a function that computes the inverse of the special 'matrix' above
## if the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## m takes the value of getsolve above
m <- x$getinverse()
## if not null, then m is a valid inverted matrix, return it.
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## using the makeCacheMatrix functions, to invert the matrix.
## putting matrix x into data.
data <- x$get()
## the solve function result is run on data and put into m
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
evae2/ProgrammingAssignment2
|
R
| false
| false
| 2,592
|
r
|
## These two functions, makeCacheMatrix and cacheSolve, are
## a pair of functions which will calculate the inverse of a matrix
## and save it to a cache which can be recalled.
## makeCacheMatrix creates a special "matrix", which is really a list
## containing functions to
## (1) set the value of the matrix
## (2) get the value of the matrix
## (3) set the value of the inverse of the matrix
## (4) get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
## m will hold the value for inverse of the matrix
## to begin, we will cleared it and set to null
m <- NULL
## (1) 'set' is a function which is taking object 'y' into x,
## and clears m, like just above.
## these are <<- because 'set' is not in parent env of x and m
set <- function(y) {
## x, parent env, takes y's value
x <<- y
## m, parent env, is cleared
## this prevents an old m returning by mistake
m <<- NULL
}
## (2) 'get' is a function which returns the matrix x
get <- function() x
## (3) 'setsolve' function puts the inverse into m; parent env
setinverse <- function(inverse) m <<- inverse
## (4) 'getsolve' is a function, returns the value of inverted matrix.
getinverse <- function() m
## Now setting makeCacheMatrix as a list,
## containing all of the functions just defined,
## returns them to parent env.
## they are named, so can be called by 'x$...' in the next function
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve is a function that computes the inverse of the special 'matrix' above
## if the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## m takes the value of getsolve above
m <- x$getinverse()
## if not null, then m is a valid inverted matrix, return it.
if(!is.null(m)) {
message("getting cached data")
return(m)
}
## using the makeCacheMatrix functions, to invert the matrix.
## putting matrix x into data.
data <- x$get()
## the solve function result is run on data and put into m
m <- solve(data, ...)
x$setinverse(m)
m
}
|
# Purpose: Run file for high frequency and data quality checks for the survey of public officials
# Author: Brian Stacy
# This file will run four R scripts in order.
# Each file can be run independently, but you will be prompted for certain paths that may not be specified
# This file will sequency the R scripts the correct way to produce an
# R markdown html file with high frequency and data quality checks for the school survey.
# 1. public_officials_api.R #This file will access the Survey Solutions API and pull rawdata and paradata
# 2. public_officials_data_cleaner.R #This file opens the raw data and cleans it to produce our indicators for the Dashboard
# 3. public_officials_paradata.R #This file opens paradata produced by Survey Solutions to calculate length
# of time per module and other checks
# 4. public_officials_data_quality_checks.Rmd #This file produces an R Markdown report containing several quality checks.
######################################
# Load Required Packages#
######################################
library(here)
library(knitr)
library(markdown)
library(rmarkdown)
######################################
# User Inputs for Run File #
######################################
# Here you need to indicate the path where you replicated the folder structures on your own computer
here() #"C:/Users/wb469649/Documents/Github/GEPD"
#Country name
country <-'MDG'
country_name <- "Madagascar"
year <- '2021'
#########################
# File paths #
#########################
#The download_folder will be the location of where raw data is downloaded from the API
#The save_folder will be the location of where cleaned data is stored
if (Sys.getenv("USERNAME") == "WB469649"){
#project_folder <- "//wbgfscifs01/GEDEDU/datalib-edu/projects/gepd"
project_folder <- "C:/Users/wb469649/WBG/HEDGE Files - HEDGE Documents/GEPD-Confidential/CNT/"
download_folder <-file.path(paste(project_folder,country,paste(country,year,"GEPD", sep="_"),paste(country,year,"GEPD_v01_RAW", sep="_"),"Data/raw/Public_Officials", sep="/"))
confidential_folder <- file.path(paste(project_folder,country,paste(country,year,"GEPD", sep="_"),paste(country,year,"GEPD_v01_RAW", sep="_"),"Data/confidential/Public_Officials", sep="/"))
save_folder <- file.path(paste(project_folder,country,paste(country,year,"GEPD", sep="_"),paste(country,year,"GEPD_v01_RAW", sep="_"),"Data/anonymized/Public_Officials", sep="/"))
backup_onedrive="no"
} else if (Sys.getenv("USERNAME") == "wb550666"){
#project_folder <- "//wbgfscifs01/GEDEDU/datalib-edu/projects/gepd"
project_folder <- "C:/Users/wb550666/WBG/Ezequiel Molina - Dashboard (Team Folder)/Country_Work/"
download_folder <-file.path(paste(project_folder,country_name,year,"Data/raw/Public_Officials", sep="/"))
save_folder <- file.path(paste(project_folder,country_name,year,"Data/clean/Public_Officials", sep="/"))
# This is experimental and not currently in use.
backup_onedrive="yes"
save_folder_onedrive <- file.path(paste("C:/Users/wb550666/WBG/Ezequiel Molina - Dashboard (Team Folder)/Country_Work/", country_name,year,"Data/clean/Public_Officials", sep="/"))
} else {
download_folder <- choose.dir(default = "", caption = "Select folder to open data downloaded from API")
save_folder <- choose.dir(default = "", caption = "Select folder to save final data")
save_folder_onedrive <- choose.dir(default = "", caption = "Select folder to save backed up data to onedrive")
}
#########################
# Launch Code
########################
#move working directory to github main folder
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
#main file name:
po_file<-"public_officials_final.dta"
#launch file to access data from API
need_api=0
source('02_public_officials_api.R', local=TRUE)
#launch file to clean data
source('03_public_officials_cleaner.R', local=TRUE)
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
source('04_public_officials_anonymizer.R', local=TRUE)
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
|
/Countries/Madagascar/2021/Public_Officials/01_data/01_public_officials_run.R
|
permissive
|
worldbank/GEPD
|
R
| false
| false
| 4,161
|
r
|
# Purpose: Run file for high frequency and data quality checks for the survey of public officials
# Author: Brian Stacy
# This file will run four R scripts in order.
# Each file can be run independently, but you will be prompted for certain paths that may not be specified
# This file will sequency the R scripts the correct way to produce an
# R markdown html file with high frequency and data quality checks for the school survey.
# 1. public_officials_api.R #This file will access the Survey Solutions API and pull rawdata and paradata
# 2. public_officials_data_cleaner.R #This file opens the raw data and cleans it to produce our indicators for the Dashboard
# 3. public_officials_paradata.R #This file opens paradata produced by Survey Solutions to calculate length
# of time per module and other checks
# 4. public_officials_data_quality_checks.Rmd #This file produces an R Markdown report containing several quality checks.
######################################
# Load Required Packages#
######################################
library(here)
library(knitr)
library(markdown)
library(rmarkdown)
######################################
# User Inputs for Run File #
######################################
# Here you need to indicate the path where you replicated the folder structures on your own computer
here() #"C:/Users/wb469649/Documents/Github/GEPD"
#Country name
country <-'MDG'
country_name <- "Madagascar"
year <- '2021'
#########################
# File paths #
#########################
#The download_folder will be the location of where raw data is downloaded from the API
#The save_folder will be the location of where cleaned data is stored
if (Sys.getenv("USERNAME") == "WB469649"){
#project_folder <- "//wbgfscifs01/GEDEDU/datalib-edu/projects/gepd"
project_folder <- "C:/Users/wb469649/WBG/HEDGE Files - HEDGE Documents/GEPD-Confidential/CNT/"
download_folder <-file.path(paste(project_folder,country,paste(country,year,"GEPD", sep="_"),paste(country,year,"GEPD_v01_RAW", sep="_"),"Data/raw/Public_Officials", sep="/"))
confidential_folder <- file.path(paste(project_folder,country,paste(country,year,"GEPD", sep="_"),paste(country,year,"GEPD_v01_RAW", sep="_"),"Data/confidential/Public_Officials", sep="/"))
save_folder <- file.path(paste(project_folder,country,paste(country,year,"GEPD", sep="_"),paste(country,year,"GEPD_v01_RAW", sep="_"),"Data/anonymized/Public_Officials", sep="/"))
backup_onedrive="no"
} else if (Sys.getenv("USERNAME") == "wb550666"){
#project_folder <- "//wbgfscifs01/GEDEDU/datalib-edu/projects/gepd"
project_folder <- "C:/Users/wb550666/WBG/Ezequiel Molina - Dashboard (Team Folder)/Country_Work/"
download_folder <-file.path(paste(project_folder,country_name,year,"Data/raw/Public_Officials", sep="/"))
save_folder <- file.path(paste(project_folder,country_name,year,"Data/clean/Public_Officials", sep="/"))
# This is experimental and not currently in use.
backup_onedrive="yes"
save_folder_onedrive <- file.path(paste("C:/Users/wb550666/WBG/Ezequiel Molina - Dashboard (Team Folder)/Country_Work/", country_name,year,"Data/clean/Public_Officials", sep="/"))
} else {
download_folder <- choose.dir(default = "", caption = "Select folder to open data downloaded from API")
save_folder <- choose.dir(default = "", caption = "Select folder to save final data")
save_folder_onedrive <- choose.dir(default = "", caption = "Select folder to save backed up data to onedrive")
}
#########################
# Launch Code
########################
#move working directory to github main folder
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
#main file name:
po_file<-"public_officials_final.dta"
#launch file to access data from API
need_api=0
source('02_public_officials_api.R', local=TRUE)
#launch file to clean data
source('03_public_officials_cleaner.R', local=TRUE)
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
source('04_public_officials_anonymizer.R', local=TRUE)
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
|
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt", col.names="subject")
label_train <- read.table("./UCI HAR Dataset/train/Y_train.txt", col.names="activity_id")
features <- read.table("./UCI HAR Dataset/features.txt")
data_train <- read.table("./UCI HAR Dataset/train/X_train.txt", col.names=features[,2])
df_train <- cbind(subject_train, label_train, data_train)
rm(data_train, label_train, subject_train)
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt", col.names="subject")
label_test <- read.table("./UCI HAR Dataset/test/Y_test.txt", col.names="activity_id")
data_test <- read.table("./UCI HAR Dataset/test/X_test.txt", col.names=features[,2])
df_test <- cbind(subject_test, label_test, data_test)
rm(data_test, label_test, subject_test, features)
merged_df <- rbind(df_train, df_test)
rm(df_train, df_test)
library(dplyr)
merged_df <- tbl_df(merged_df)
selected_df <- select(merged_df, subject, activity_id, contains("mean"), contains("std"), -contains("meanFreq"), -contains("angle"))
activity_label <- read.table("./UCI HAR Dataset/activity_labels.txt", col.names=c("activity_id","activity_label"))
selected_df2 <- merge(selected_df, activity_label, by.x="activity_id", by.y="activity_id")
tidy_data <- select(selected_df2, subject, activity_label, 3:69)
data_melt <- melt(tidy_data, id.vars=c("subject", "activity_label"))
tidy_data2 <- dcast(data_melt, subject+activity_label ~ variable, mean)
rm(merged_df, activity_label, selected_df, selected_df2, data_melt)
write.table(tidy_data2, file="./course_project_dataset.txt", row.name=FALSE)
|
/run_analysis.R
|
no_license
|
olga-demidova/Getdata_CourseProject
|
R
| false
| false
| 1,593
|
r
|
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt", col.names="subject")
label_train <- read.table("./UCI HAR Dataset/train/Y_train.txt", col.names="activity_id")
features <- read.table("./UCI HAR Dataset/features.txt")
data_train <- read.table("./UCI HAR Dataset/train/X_train.txt", col.names=features[,2])
df_train <- cbind(subject_train, label_train, data_train)
rm(data_train, label_train, subject_train)
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt", col.names="subject")
label_test <- read.table("./UCI HAR Dataset/test/Y_test.txt", col.names="activity_id")
data_test <- read.table("./UCI HAR Dataset/test/X_test.txt", col.names=features[,2])
df_test <- cbind(subject_test, label_test, data_test)
rm(data_test, label_test, subject_test, features)
merged_df <- rbind(df_train, df_test)
rm(df_train, df_test)
library(dplyr)
merged_df <- tbl_df(merged_df)
selected_df <- select(merged_df, subject, activity_id, contains("mean"), contains("std"), -contains("meanFreq"), -contains("angle"))
activity_label <- read.table("./UCI HAR Dataset/activity_labels.txt", col.names=c("activity_id","activity_label"))
selected_df2 <- merge(selected_df, activity_label, by.x="activity_id", by.y="activity_id")
tidy_data <- select(selected_df2, subject, activity_label, 3:69)
data_melt <- melt(tidy_data, id.vars=c("subject", "activity_label"))
tidy_data2 <- dcast(data_melt, subject+activity_label ~ variable, mean)
rm(merged_df, activity_label, selected_df, selected_df2, data_melt)
write.table(tidy_data2, file="./course_project_dataset.txt", row.name=FALSE)
|
library(tidyverse)
library(gdata)
library(hablar)
library(naniar)
reason_raw <- read.xls ("migration/data/tab-a-5.xls", sheet = 1, header = FALSE)
# remove header/footer info rows
reason_df <- reason_raw %>%
.[6:54, 1:22] %>%
as_tibble() %>%
row_to_names(row_number = 1) %>%
.[-c(1), -c(11)]
# create tibble of percentage info and remove duplicate rows
percent <- reason_df %>%
.[25:47, -c(2)] %>%
.[-c(10, 12), ] %>%
clean_names()
# rename column 1 to "period"
colnames(percent)[1] <- "period"
# function to extract number before "-" in year
clean_year <- function(x) {
sub("-.*", "", x) %>%
as.integer(x)
}
percent$period <- map_int(percent$period, clean_year)
# convert all columns (except period) to type double
cleaned_percent <- percent %>%
convert(dbl(contains("_"))) %>%
convert(dbl(retired))
# replace "-" with 0
cleaned_percent[is.na(cleaned_percent)] <- 0
# add aggregate reasons as 4 new cols
cleaned_percent <- cleaned_percent %>%
mutate(family = change_in_marital_status + to_establish_own_household + other_family_reason,
employment = new_job_or_job_transfer + to_look_for_work_or_lost_job + to_be_closer_to_work_easier_commute + retired + other_job_related_reason,
housing = wanted_to_own_home_not_rent + wanted_new_or_better_home_apartment + wanted_better_neighborhood_less_crime + wanted_cheaper_housing + foreclosure_eviction_5 + other_housing_reason,
other = to_attend_or_leave_college + change_of_climate + health_reasons + natural_disaster_8 + other_reasons)
# save locally as csv file
write.csv(cleaned_percent, "migration/data/reason.csv", row.names=FALSE)
|
/R/parse_reason.R
|
no_license
|
jessieou/migration
|
R
| false
| false
| 1,655
|
r
|
library(tidyverse)
library(gdata)
library(hablar)
library(naniar)
reason_raw <- read.xls ("migration/data/tab-a-5.xls", sheet = 1, header = FALSE)
# remove header/footer info rows
reason_df <- reason_raw %>%
.[6:54, 1:22] %>%
as_tibble() %>%
row_to_names(row_number = 1) %>%
.[-c(1), -c(11)]
# create tibble of percentage info and remove duplicate rows
percent <- reason_df %>%
.[25:47, -c(2)] %>%
.[-c(10, 12), ] %>%
clean_names()
# rename column 1 to "period"
colnames(percent)[1] <- "period"
# function to extract number before "-" in year
clean_year <- function(x) {
sub("-.*", "", x) %>%
as.integer(x)
}
percent$period <- map_int(percent$period, clean_year)
# convert all columns (except period) to type double
cleaned_percent <- percent %>%
convert(dbl(contains("_"))) %>%
convert(dbl(retired))
# replace "-" with 0
cleaned_percent[is.na(cleaned_percent)] <- 0
# add aggregate reasons as 4 new cols
cleaned_percent <- cleaned_percent %>%
mutate(family = change_in_marital_status + to_establish_own_household + other_family_reason,
employment = new_job_or_job_transfer + to_look_for_work_or_lost_job + to_be_closer_to_work_easier_commute + retired + other_job_related_reason,
housing = wanted_to_own_home_not_rent + wanted_new_or_better_home_apartment + wanted_better_neighborhood_less_crime + wanted_cheaper_housing + foreclosure_eviction_5 + other_housing_reason,
other = to_attend_or_leave_college + change_of_climate + health_reasons + natural_disaster_8 + other_reasons)
# save locally as csv file
write.csv(cleaned_percent, "migration/data/reason.csv", row.names=FALSE)
|
### Zeitreihe: Selbstwertgefühl
library(tidyverse)
library(dplyr)
library(ggplot2)
## a) Mittelwerte der Variable "selfworth" für jedes Jahr bilden
# Erstellen eines Datensatzes, der nur die beiden Variablen "selfworth"
# und "year" enthält
df1 <- data.frame(dfc$selfworth, dfc$year)
View(df1)
# Die Variablen "Selfworth" und "Year" werden als Vektoren definiert
Year <- c(df1$dfc.year)
Selfworth <- c(df1$dfc.selfworth)
# Erstellen eines neuen Datensatzes mit den Variablen "Year" and "Selfworth"
df1 <- data.frame(Year, Selfworth)
# Gruppieren des Datensatzes df1 nach Jahr anhand des Befehl group_by(). Der Befehle summarise()
# generiert eine Variable "averageSelfworth" mit den Mittelwerten von "Selfworth"
mean_by_year1 <- df1 %>%
group_by(df1$Year) %>%
summarise(averagedSelfworth = mean(Selfworth, na.rm = TRUE))
## b) Graphische Darstellung der Zeitreihe
# Defining a time series object for the average selfworth starting with the observation of year
# 2011 and ending in year 2018, using a frequency of 1 because the data are collected anually
ts_averageSelfworth = ts(mean_by_year1$averagedSelfworth, start = 2011, end = 2018,
frequency = 1)
# The command plot() creates a time series graph only mention the time series (R knows that
# the object is a time series), time on the x-axis and the average selfworth on the y-axis
# The parament lwd controls the line width of the time series curve, the parameter cex.main
# defines the size of the head line
plot(ts_averageSelfworth, main = "Trend of the average selfworth", xlab = "Time",
ylab = "Average selfworth", col = "blue", lwd = 2,
cex.main = 1.25, ylim = c(2.0,3.2))
text(2016, 2.9, "Average selfworth", col = "blue", adj = 0.3, cex = 0.9)
box(which = "figure")
## c) Linear trend component:
library(estimatr)
library(stats)
# Defining the length of the time series "ts_averageSelfworth" with the function length()
# Defining the vector of the time indices t with the function seq() from t = 1 (2011)
# to t = n (2018)
n <- length(ts_averageSelfworth)
t <- seq(from = 1, to = n)
# Simple OLS regression:
# The command lm_robust regress the time series "ts_averageSelfworth" (y-variable / outcome)
# on the time indices (x-variable) using a linear regression model and robust standard errors
# Saving the OLS regression under "linear trend"
linearTrend1 <- lm_robust(ts_averageSelfworth ~ t)
summary(linearTrend1)
# Saving the fitted values of the linear regression for every year under "linearTrend_fit"
linearTrend_fit1 <- linearTrend1$fitted.values
# Transforming the vector in a time series
linearTrend_fit1 <- ts(linearTrend_fit1, start = 2011, end = 2018, frequency = 1)
# Drawing the trend line in the time series graphic with the command lines() which includes
# the time series "linearTrend_fit" as data
plot(ts_averageSelfworth, main = "Trend of the average selfworth", xlab = "Time",
ylab = "Average selfworth", col = "blue", lwd = 2,
cex.main = 1.25, ylim = c(2.0,3.2))
lines(linearTrend_fit1, col = "red", lwd = 1.5)
text(2016, 2.9, "Average selfworth", col = "blue", adj = 0.3, cex = 0.9)
text(2013, 2.85, "Linear trend", col = "red", cex = 0.9)
box(which = "figure")
## d) Gleitender Durchschnitt 3. Ordnung (simple moving average):
# Before calculating the ma we have to detach the package "dplyr"
detach("package:dplyr")
# The command filter() can be used for calculating simple moving average for the time series
# "ts_averageSelfworth. We choose the time frame (t-1, t+1) resulting in 3 time periods which are
# so weighted with 1/3 (the command rep() replicates the values in x). With sides = 2 we use
# a centered moving average (= standard setting).
ts_averageSelfworth_ma <- filter(ts_averageSelfworth, filter = rep(1/3,3), sides = 2)
View(ts_averageSelfworth_ma)
# Additional to the previous R code for the graphic illustration, the command lines() draws
# the moving averages in the time series graphic
# With trend lines:
plot(ts_averageSelfworth, main = "Trend of the average selfworth", xlab = "Time",
ylab = "Average selfworth", col = "blue", lwd = 2,
cex.main = 1.25, ylim = c(2.0,3.2))
lines(linearTrend_fit1, col = "red", lwd = 1.5)
lines(ts_averageSelfworth_ma, col = "green", lwd = 1.5)
text(2016, 2.9, "Average selfworth", col = "blue", adj = 0.3, cex = 0.9)
text(2013, 2.85, "Linear trend", col = "red", cex = 0.9)
text(2014.5, 3.1, "Moving average", col = "green", cex = 0.9)
box(which = "figure")
|
/ANALYSIS/DATA/ts_selfworth.R
|
no_license
|
rafael-schuetz/Pareto
|
R
| false
| false
| 4,509
|
r
|
### Zeitreihe: Selbstwertgefühl
library(tidyverse)
library(dplyr)
library(ggplot2)
## a) Mittelwerte der Variable "selfworth" für jedes Jahr bilden
# Erstellen eines Datensatzes, der nur die beiden Variablen "selfworth"
# und "year" enthält
df1 <- data.frame(dfc$selfworth, dfc$year)
View(df1)
# Die Variablen "Selfworth" und "Year" werden als Vektoren definiert
Year <- c(df1$dfc.year)
Selfworth <- c(df1$dfc.selfworth)
# Erstellen eines neuen Datensatzes mit den Variablen "Year" and "Selfworth"
df1 <- data.frame(Year, Selfworth)
# Gruppieren des Datensatzes df1 nach Jahr anhand des Befehl group_by(). Der Befehle summarise()
# generiert eine Variable "averageSelfworth" mit den Mittelwerten von "Selfworth"
mean_by_year1 <- df1 %>%
group_by(df1$Year) %>%
summarise(averagedSelfworth = mean(Selfworth, na.rm = TRUE))
## b) Graphische Darstellung der Zeitreihe
# Defining a time series object for the average selfworth starting with the observation of year
# 2011 and ending in year 2018, using a frequency of 1 because the data are collected anually
ts_averageSelfworth = ts(mean_by_year1$averagedSelfworth, start = 2011, end = 2018,
frequency = 1)
# The command plot() creates a time series graph only mention the time series (R knows that
# the object is a time series), time on the x-axis and the average selfworth on the y-axis
# The parament lwd controls the line width of the time series curve, the parameter cex.main
# defines the size of the head line
plot(ts_averageSelfworth, main = "Trend of the average selfworth", xlab = "Time",
ylab = "Average selfworth", col = "blue", lwd = 2,
cex.main = 1.25, ylim = c(2.0,3.2))
text(2016, 2.9, "Average selfworth", col = "blue", adj = 0.3, cex = 0.9)
box(which = "figure")
## c) Linear trend component:
library(estimatr)
library(stats)
# Defining the length of the time series "ts_averageSelfworth" with the function length()
# Defining the vector of the time indices t with the function seq() from t = 1 (2011)
# to t = n (2018)
n <- length(ts_averageSelfworth)
t <- seq(from = 1, to = n)
# Simple OLS regression:
# The command lm_robust regress the time series "ts_averageSelfworth" (y-variable / outcome)
# on the time indices (x-variable) using a linear regression model and robust standard errors
# Saving the OLS regression under "linear trend"
linearTrend1 <- lm_robust(ts_averageSelfworth ~ t)
summary(linearTrend1)
# Saving the fitted values of the linear regression for every year under "linearTrend_fit"
linearTrend_fit1 <- linearTrend1$fitted.values
# Transforming the vector in a time series
linearTrend_fit1 <- ts(linearTrend_fit1, start = 2011, end = 2018, frequency = 1)
# Drawing the trend line in the time series graphic with the command lines() which includes
# the time series "linearTrend_fit" as data
plot(ts_averageSelfworth, main = "Trend of the average selfworth", xlab = "Time",
ylab = "Average selfworth", col = "blue", lwd = 2,
cex.main = 1.25, ylim = c(2.0,3.2))
lines(linearTrend_fit1, col = "red", lwd = 1.5)
text(2016, 2.9, "Average selfworth", col = "blue", adj = 0.3, cex = 0.9)
text(2013, 2.85, "Linear trend", col = "red", cex = 0.9)
box(which = "figure")
## d) Gleitender Durchschnitt 3. Ordnung (simple moving average):
# Before calculating the ma we have to detach the package "dplyr"
detach("package:dplyr")
# The command filter() can be used for calculating simple moving average for the time series
# "ts_averageSelfworth. We choose the time frame (t-1, t+1) resulting in 3 time periods which are
# so weighted with 1/3 (the command rep() replicates the values in x). With sides = 2 we use
# a centered moving average (= standard setting).
ts_averageSelfworth_ma <- filter(ts_averageSelfworth, filter = rep(1/3,3), sides = 2)
View(ts_averageSelfworth_ma)
# Additional to the previous R code for the graphic illustration, the command lines() draws
# the moving averages in the time series graphic
# With trend lines:
plot(ts_averageSelfworth, main = "Trend of the average selfworth", xlab = "Time",
ylab = "Average selfworth", col = "blue", lwd = 2,
cex.main = 1.25, ylim = c(2.0,3.2))
lines(linearTrend_fit1, col = "red", lwd = 1.5)
lines(ts_averageSelfworth_ma, col = "green", lwd = 1.5)
text(2016, 2.9, "Average selfworth", col = "blue", adj = 0.3, cex = 0.9)
text(2013, 2.85, "Linear trend", col = "red", cex = 0.9)
text(2014.5, 3.1, "Moving average", col = "green", cex = 0.9)
box(which = "figure")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_location_details.R
\name{get_location_details}
\alias{get_location_details}
\title{Show location details}
\usage{
get_location_details(location_id, api_key)
}
\arguments{
\item{location_id}{Location ID. (integer)}
\item{api_key}{An active GTmetrix API key. (string)}
}
\value{
A data.frame object that contains available locations and their meta data.
}
\description{
Get details for a specific locations ID.
}
\examples{
\dontrun{output_table <- get_location_details(location_id = 3, api_key = "API_KEY")}
}
|
/man/get_location_details.Rd
|
no_license
|
s-mohanasundaram/rgtmx
|
R
| false
| true
| 592
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_location_details.R
\name{get_location_details}
\alias{get_location_details}
\title{Show location details}
\usage{
get_location_details(location_id, api_key)
}
\arguments{
\item{location_id}{Location ID. (integer)}
\item{api_key}{An active GTmetrix API key. (string)}
}
\value{
A data.frame object that contains available locations and their meta data.
}
\description{
Get details for a specific locations ID.
}
\examples{
\dontrun{output_table <- get_location_details(location_id = 3, api_key = "API_KEY")}
}
|
####Complete Case Analysis QoL#######
#patients that had responses for all time points
#Summary Statistics
tapply(completed$QoL_score, completed$timing_of_survey, mean)
#0 1 3
#64.53901 63.12057 67.73050
##Mean, minimum and maximum values of QoL and uncertainty for each age group
tapply(completed$QoL_score, completed$Age_Group, mean)
#0 1
#67.59259 62.56039
tapply(completed$QoL_score, completed$Age_Group, min)
#0 1
#16.66667 0.00000
tapply(completed$QoL_score, completed$Age_Group, max)
#0 1
#100 100
########################################################################
#####Plots of QoL over time and a function of Age#####
completed$ID <- as.numeric(as.character(completed$ID))
#Spaghetti plots for each patient
xyplot(QoL_score~timing_of_survey | ID,
data=completed,
panel=function(x,y){
panel.xyplot(x, y)
panel.lmline(x,y)
}, as.table=T)
##smoothing
xyplot(QoL_score~timing_of_survey | ID, data=completed,
prepanel = function(x, y) prepanel.loess(x, y, family="gaussian"),
xlab = "Time", ylab = "QoL",
panel = function(x, y) {
panel.xyplot(x, y)
panel.loess(x,y, family="gaussian") }, as.table=T)
##ols plots (same as first plot)
#xyplot(QoL_score ~ timing_of_survey | ID, data=completed,
# panel = function(x, y){
# panel.xyplot(x, y)
# panel.lmline(x, y)
# }, as.table=T)
#Plotting regressions for each subject
qol_ols_pp <- by(completed, factor(completed$ID), function(x) summary(lm(QoL_score ~ timing_of_survey, data=x)))
## stem plot for fitted rate of change
rate <- by(completed, factor(completed$ID), function(data) coefficients(lm(QoL_score ~ timing_of_survey, data = data))[[2]])
rate <- unlist(rate)
names(rate) <- NULL
summary(rate)
stem(rate, scale=2)
## stem plot for R sq
rsq <- by(completed, factor(completed$ID), function(data) summary(lm(QoL_score~ timing_of_survey, data = data))$r.squared)
rsq <- unlist(rsq)
names(rsq) <- NULL
summary(rsq)
stem(rsq, scale=2)
##Using ggplot
p <- ggplot(data = completed, aes(x = timing_of_survey, y = QoL_score, group = ID))
p + geom_line() #raw data
p + stat_smooth( method = "lm", se = FALSE) #ols for each patient across time
p + geom_line() + stat_smooth(aes(group=1), method = "lm", se = FALSE)
#obtaining the slopes from linear model by id
slopes <- by(completed, completed$ID,
function(data) coefficients(lm(QoL_score ~ timing_of_survey, data=data))[[2]])
slopes1 <- unlist(slopes)
names(slopes1) <- NULL
mean(slopes1)
sqrt(var(slopes1))
p + geom_line() + facet_grid(. ~ Age_Group) #separate by age group
p + stat_smooth( method = "lm", se = FALSE) +
facet_grid(.~Age_Group) #ols by age group
p + geom_line() +
stat_summary(aes(group = 1), geom = "point", fun.y = mean, shape = 17, size = 3) +
facet_grid(. ~ Age_Group) #grand mean at each time point for each age group
p + geom_line() +
stat_summary(aes(group = 1), geom = "point", fun.y = quantile, fun.args=(list(probs = c(0.25, 0.75))), shape = 17, size = 3) +
facet_grid(. ~ Age_Group) #25th and 75th percentile
p + geom_line() +
stat_smooth(aes(group = 1)) +
stat_summary(aes(group = 1), geom = "point", fun.y = mean, shape = 17, size = 3) +
facet_grid(. ~ Age_Group) #smoothing for each age group
p + geom_line() +
stat_smooth(aes(group = 1), method = "lm", se = FALSE) +
stat_summary(aes(group = 1), geom = "point", fun.y = mean, shape = 17, size = 3) +
facet_grid(. ~ Age_Group) #grand ols for each age group
|
/R96/Statistical_Program/complete_cases_analysis_qol.R
|
no_license
|
stilianoudakis/Biostatistical_Consulting_Lab
|
R
| false
| false
| 3,563
|
r
|
####Complete Case Analysis QoL#######
#patients that had responses for all time points
#Summary Statistics
tapply(completed$QoL_score, completed$timing_of_survey, mean)
#0 1 3
#64.53901 63.12057 67.73050
##Mean, minimum and maximum values of QoL and uncertainty for each age group
tapply(completed$QoL_score, completed$Age_Group, mean)
#0 1
#67.59259 62.56039
tapply(completed$QoL_score, completed$Age_Group, min)
#0 1
#16.66667 0.00000
tapply(completed$QoL_score, completed$Age_Group, max)
#0 1
#100 100
########################################################################
#####Plots of QoL over time and a function of Age#####
completed$ID <- as.numeric(as.character(completed$ID))
#Spaghetti plots for each patient
xyplot(QoL_score~timing_of_survey | ID,
data=completed,
panel=function(x,y){
panel.xyplot(x, y)
panel.lmline(x,y)
}, as.table=T)
##smoothing
xyplot(QoL_score~timing_of_survey | ID, data=completed,
prepanel = function(x, y) prepanel.loess(x, y, family="gaussian"),
xlab = "Time", ylab = "QoL",
panel = function(x, y) {
panel.xyplot(x, y)
panel.loess(x,y, family="gaussian") }, as.table=T)
##ols plots (same as first plot)
#xyplot(QoL_score ~ timing_of_survey | ID, data=completed,
# panel = function(x, y){
# panel.xyplot(x, y)
# panel.lmline(x, y)
# }, as.table=T)
#Plotting regressions for each subject
qol_ols_pp <- by(completed, factor(completed$ID), function(x) summary(lm(QoL_score ~ timing_of_survey, data=x)))
## stem plot for fitted rate of change
rate <- by(completed, factor(completed$ID), function(data) coefficients(lm(QoL_score ~ timing_of_survey, data = data))[[2]])
rate <- unlist(rate)
names(rate) <- NULL
summary(rate)
stem(rate, scale=2)
## stem plot for R sq
rsq <- by(completed, factor(completed$ID), function(data) summary(lm(QoL_score~ timing_of_survey, data = data))$r.squared)
rsq <- unlist(rsq)
names(rsq) <- NULL
summary(rsq)
stem(rsq, scale=2)
##Using ggplot
p <- ggplot(data = completed, aes(x = timing_of_survey, y = QoL_score, group = ID))
p + geom_line() #raw data
p + stat_smooth( method = "lm", se = FALSE) #ols for each patient across time
p + geom_line() + stat_smooth(aes(group=1), method = "lm", se = FALSE)
#obtaining the slopes from linear model by id
slopes <- by(completed, completed$ID,
function(data) coefficients(lm(QoL_score ~ timing_of_survey, data=data))[[2]])
slopes1 <- unlist(slopes)
names(slopes1) <- NULL
mean(slopes1)
sqrt(var(slopes1))
p + geom_line() + facet_grid(. ~ Age_Group) #separate by age group
p + stat_smooth( method = "lm", se = FALSE) +
facet_grid(.~Age_Group) #ols by age group
p + geom_line() +
stat_summary(aes(group = 1), geom = "point", fun.y = mean, shape = 17, size = 3) +
facet_grid(. ~ Age_Group) #grand mean at each time point for each age group
p + geom_line() +
stat_summary(aes(group = 1), geom = "point", fun.y = quantile, fun.args=(list(probs = c(0.25, 0.75))), shape = 17, size = 3) +
facet_grid(. ~ Age_Group) #25th and 75th percentile
p + geom_line() +
stat_smooth(aes(group = 1)) +
stat_summary(aes(group = 1), geom = "point", fun.y = mean, shape = 17, size = 3) +
facet_grid(. ~ Age_Group) #smoothing for each age group
p + geom_line() +
stat_smooth(aes(group = 1), method = "lm", se = FALSE) +
stat_summary(aes(group = 1), geom = "point", fun.y = mean, shape = 17, size = 3) +
facet_grid(. ~ Age_Group) #grand ols for each age group
|
#' @title fun_name
#'
#' @description kolejna funkcja podmieniona
#'
#' @param param fun_name
#'
#'
#'
#' @export
as.data.frame.complex<- function(params){
rap <- c("Czesc czesc tu Sebol nawija, Mordo nie ma gandy a ja wbijam klina",
"Tutaj start, mega bujanka. Zaczynamy tutaj strefe jaranka",
"Odwiedzam czlowieka, mlody chlop kaleka. Ktos tu z nim steka,jest krecona beka",
"Przy piwerku boski chillout Gruba toczy sie rozkmina",
"Wez ziomalku sie nie spinaj DJ Werset znow zabija")
rapek <- sample(rap, 1)
if(runif(1,0,1) < 0.5){
rapek
}else{base::as.data.frame.complex(params)
}
}
|
/R/as.data.frame.complex.R
|
no_license
|
granatb/RapeR
|
R
| false
| false
| 697
|
r
|
#' @title fun_name
#'
#' @description kolejna funkcja podmieniona
#'
#' @param param fun_name
#'
#'
#'
#' @export
as.data.frame.complex<- function(params){
rap <- c("Czesc czesc tu Sebol nawija, Mordo nie ma gandy a ja wbijam klina",
"Tutaj start, mega bujanka. Zaczynamy tutaj strefe jaranka",
"Odwiedzam czlowieka, mlody chlop kaleka. Ktos tu z nim steka,jest krecona beka",
"Przy piwerku boski chillout Gruba toczy sie rozkmina",
"Wez ziomalku sie nie spinaj DJ Werset znow zabija")
rapek <- sample(rap, 1)
if(runif(1,0,1) < 0.5){
rapek
}else{base::as.data.frame.complex(params)
}
}
|
testlist <- list(score = NULL, id = NULL, item_score = integer(0), person_id = c(13750737L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::im_booklet_score,testlist)
str(result)
|
/dexterMST/inst/testfiles/im_booklet_score/libFuzzer_im_booklet_score/im_booklet_score_valgrind_files/1612728196-test.R
|
no_license
|
akhikolla/updatedatatype-list1
|
R
| false
| false
| 531
|
r
|
testlist <- list(score = NULL, id = NULL, item_score = integer(0), person_id = c(13750737L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::im_booklet_score,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Multicast.R
\name{dinvgamma}
\alias{dinvgamma}
\title{dinvgamma}
\usage{
dinvgamma(x, shape, scale)
}
\arguments{
\item{x}{Scalar location to evaluate density}
\item{shape}{Scalar shape parameter}
\item{scale}{Scalar shape parameter}
}
\value{
evaluate the density at x
}
\description{
log of inverse-gamma pdf
}
|
/pkg/man/dinvgamma.Rd
|
no_license
|
desmarais-lab/MulticastNetwork
|
R
| false
| true
| 393
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Multicast.R
\name{dinvgamma}
\alias{dinvgamma}
\title{dinvgamma}
\usage{
dinvgamma(x, shape, scale)
}
\arguments{
\item{x}{Scalar location to evaluate density}
\item{shape}{Scalar shape parameter}
\item{scale}{Scalar shape parameter}
}
\value{
evaluate the density at x
}
\description{
log of inverse-gamma pdf
}
|
# ALBUM METHODS
#' Get the metadata and tracklist for an album.
#'
#' Get the metadata and tracklist for an album on Last.fm using the album name or a musicbrainz id.
#' Implementation of last.fm's \emph{album.getInfo} API method
#' (\url{http://www.last.fm/api/show/album.getInfo})
#'
#' @param artist The artist name.
#' @param album The album name.
#' @param mbid The musicbrainz id for the album.
#' @param autocorrect Transform misspelled artist names into correct artist names,
#' returning the correct version instead.
#' The corrected artist name will be returned in the response. [0|1]
#' @param username The username for the context of the request.
#' If supplied, the user's playcount for this album is included in the response.
#' @param lang The language to return the biography in,
#' expressed as an ISO 639 alpha-2 code.
#' @return A list of the metadata and tracklist for an album.
#' @examples
#' \dontrun{
#' album_getInfo("Father John Misty", "Fear Fun")
#' }
#' @export
album_getInfo <- function(artist, album, mbid = NA, autocorrect = NA, username = NA, lang = NA) {
query <- list(
method = "album.getInfo",
artist = artist,
album = album,
mbid = mbid,
autocorrect = autocorrect,
username = username,
lang = lang
)
res <- request(query)
temp_tracks <- list(flatten(res$tracks$track))
temp_tags <- list(flatten(res$tags$tag))
res$image <- distinct(res$image, `#text`, .keep_all = TRUE) %>%
spread(size, `#text`)
res$tracks <- NA
res$tags <- NA
res <- as.data.frame(res, stringsAsFactors = FALSE)
res$tracks <- temp_tracks
res$tags <- temp_tags
res
}
#' Get the tags applied by an user to an album.
#'
#' Get the tags applied by an user to an album on Last.fm.
#' To retrieve the list of top tags applied to an album by all users use
#' \code{\link{album_getTopTags}}.
#' Implementation of last.fm's \emph{album.getTags} API method
#' (\url{http://www.last.fm/api/show/album.getTags})
#'
#' @param artist The artist name.
#' @param album The album name.
#' @param user The user name for the context of the request.
#' @param mbid The musicbrainz id for the album.
#' @param autocorrect Transform misspelled artist names into correct artist names,
#' returning the correct version instead.
#' The corrected artist name will be returned in the response. [0|1]
#' If supplied, the user's playcount for this album is included in the response.
#' @return A list of the tags applied by an user to an album.
#' @examples
#' \dontrun{
#' album_getTags("Sufjan Stevens", "Carrie & Lowell", "platyjus")
#' }
#' @export
album_getTags <- function(artist, album, user, mbid = NA, autocorrect = NA) {
query <- list(
method = "album.getTags",
artist = artist,
album = album,
user = user,
mbid = mbid,
autocorrect = autocorrect
)
res <- request(query)
process_df(res)
}
#' Get the top tags for an album, ordered by popularity.
#'
#' Get the top tags for an album on Last.fm, ordered by popularity.
#' Implementation of last.fm's \emph{album.getTopTags} API method
#' (\url{http://www.last.fm/api/show/album.getTopTags})
#'
#' @param artist The artist name.
#' @param album The album name.
#' @param mbid The musicbrainz id for the album.
#' @param autocorrect Transform misspelled artist names into correct artist names,
#' returning the correct version instead.
#' The corrected artist name will be returned in the response. [0|1]
#' @return A list of the top tags for an album.
#' @examples
#' \dontrun{
#' album_getTopTags("Miles Davis", "Kind of Blue")
#' }
#' @export
album_getTopTags <- function(artist, album, mbid = NA, autocorrect = NA) {
query <- list(
method = "album.getTopTags",
artist = artist,
album = album,
mbid = mbid,
autocorrect = autocorrect
)
res <- request(query)
process_geo(res)
}
#' Search for an album by name.
#'
#' Search for an album by name. Returns album matches sorted by relevance.
#' Implementation of last.fm's \emph{album.search} API method
#' (\url{http://www.last.fm/api/show/album.search})
#'
#' @param album The album name.
#' @param limit The number of results to fetch per page. Defaults to 50.
#' @param page The page number you wish to scan to.
#' @return A list of the searched albums.
#' @examples
#' \dontrun{
#' album_search("Rhythm & Reason")
#' }
#' @export
album_search <- function(album, limit = NA, page = NA) {
query <- list(
method = "album.search",
album = album,
limit = limit,
page = page
)
res <- request(query)
process_search(res)
}
|
/R/album.R
|
no_license
|
juyeongkim/lastfmr
|
R
| false
| false
| 4,556
|
r
|
# ALBUM METHODS
#' Get the metadata and tracklist for an album.
#'
#' Get the metadata and tracklist for an album on Last.fm using the album name or a musicbrainz id.
#' Implementation of last.fm's \emph{album.getInfo} API method
#' (\url{http://www.last.fm/api/show/album.getInfo})
#'
#' @param artist The artist name.
#' @param album The album name.
#' @param mbid The musicbrainz id for the album.
#' @param autocorrect Transform misspelled artist names into correct artist names,
#' returning the correct version instead.
#' The corrected artist name will be returned in the response. [0|1]
#' @param username The username for the context of the request.
#' If supplied, the user's playcount for this album is included in the response.
#' @param lang The language to return the biography in,
#' expressed as an ISO 639 alpha-2 code.
#' @return A list of the metadata and tracklist for an album.
#' @examples
#' \dontrun{
#' album_getInfo("Father John Misty", "Fear Fun")
#' }
#' @export
album_getInfo <- function(artist, album, mbid = NA, autocorrect = NA, username = NA, lang = NA) {
query <- list(
method = "album.getInfo",
artist = artist,
album = album,
mbid = mbid,
autocorrect = autocorrect,
username = username,
lang = lang
)
res <- request(query)
temp_tracks <- list(flatten(res$tracks$track))
temp_tags <- list(flatten(res$tags$tag))
res$image <- distinct(res$image, `#text`, .keep_all = TRUE) %>%
spread(size, `#text`)
res$tracks <- NA
res$tags <- NA
res <- as.data.frame(res, stringsAsFactors = FALSE)
res$tracks <- temp_tracks
res$tags <- temp_tags
res
}
#' Get the tags applied by an user to an album.
#'
#' Get the tags applied by an user to an album on Last.fm.
#' To retrieve the list of top tags applied to an album by all users use
#' \code{\link{album_getTopTags}}.
#' Implementation of last.fm's \emph{album.getTags} API method
#' (\url{http://www.last.fm/api/show/album.getTags})
#'
#' @param artist The artist name.
#' @param album The album name.
#' @param user The user name for the context of the request.
#' @param mbid The musicbrainz id for the album.
#' @param autocorrect Transform misspelled artist names into correct artist names,
#' returning the correct version instead.
#' The corrected artist name will be returned in the response. [0|1]
#' If supplied, the user's playcount for this album is included in the response.
#' @return A list of the tags applied by an user to an album.
#' @examples
#' \dontrun{
#' album_getTags("Sufjan Stevens", "Carrie & Lowell", "platyjus")
#' }
#' @export
album_getTags <- function(artist, album, user, mbid = NA, autocorrect = NA) {
query <- list(
method = "album.getTags",
artist = artist,
album = album,
user = user,
mbid = mbid,
autocorrect = autocorrect
)
res <- request(query)
process_df(res)
}
#' Get the top tags for an album, ordered by popularity.
#'
#' Get the top tags for an album on Last.fm, ordered by popularity.
#' Implementation of last.fm's \emph{album.getTopTags} API method
#' (\url{http://www.last.fm/api/show/album.getTopTags})
#'
#' @param artist The artist name.
#' @param album The album name.
#' @param mbid The musicbrainz id for the album.
#' @param autocorrect Transform misspelled artist names into correct artist names,
#' returning the correct version instead.
#' The corrected artist name will be returned in the response. [0|1]
#' @return A list of the top tags for an album.
#' @examples
#' \dontrun{
#' album_getTopTags("Miles Davis", "Kind of Blue")
#' }
#' @export
album_getTopTags <- function(artist, album, mbid = NA, autocorrect = NA) {
query <- list(
method = "album.getTopTags",
artist = artist,
album = album,
mbid = mbid,
autocorrect = autocorrect
)
res <- request(query)
process_geo(res)
}
#' Search for an album by name.
#'
#' Search for an album by name. Returns album matches sorted by relevance.
#' Implementation of last.fm's \emph{album.search} API method
#' (\url{http://www.last.fm/api/show/album.search})
#'
#' @param album The album name.
#' @param limit The number of results to fetch per page. Defaults to 50.
#' @param page The page number you wish to scan to.
#' @return A list of the searched albums.
#' @examples
#' \dontrun{
#' album_search("Rhythm & Reason")
#' }
#' @export
album_search <- function(album, limit = NA, page = NA) {
query <- list(
method = "album.search",
album = album,
limit = limit,
page = page
)
res <- request(query)
process_search(res)
}
|
RLibrary = function( ... ) {
#\\ used to (re)load libraries conveniently
ll = unique(c(...))
pkgs = .packages(all.available = TRUE)
pkgsLoaded = .packages()
found = intersect( pkgs, ll )
if (length(found) > 0 ) {
for ( pkg in found ) {
# try( detach( paste("package", pkg, sep=":"), unload=TRUE, character.only=TRUE, force=TRUE ), silent=TRUE )
try ( suppressMessages( require( pkg, character.only = TRUE )), silent = TRUE )
}
}
notfound = setdiff( ll, pkgs )
if (length(notfound) > 0) {
print( "Missing some dependencies...")
print( notfound )
n = readline(prompt="Install them? (y/n): ")
if ( tolower(n) %in% c("y", "yes") ) {
for ( nf in notfound ) {
try( utils::install.packages( nf, dependencies=TRUE ) )
try( require( pkg, character.only = TRUE ) )
}
}
}
if ( "INLA" %in% notfound ) {
print( "To install INLA:")
print( ' install.packages("INLA", repos="https://www.math.ntnu.no/inla/R/stable") ' )
}
return( ll )
}
|
/R/RLibrary.r
|
permissive
|
PEDsnowcrab/aegis
|
R
| false
| false
| 1,032
|
r
|
RLibrary = function( ... ) {
#\\ used to (re)load libraries conveniently
ll = unique(c(...))
pkgs = .packages(all.available = TRUE)
pkgsLoaded = .packages()
found = intersect( pkgs, ll )
if (length(found) > 0 ) {
for ( pkg in found ) {
# try( detach( paste("package", pkg, sep=":"), unload=TRUE, character.only=TRUE, force=TRUE ), silent=TRUE )
try ( suppressMessages( require( pkg, character.only = TRUE )), silent = TRUE )
}
}
notfound = setdiff( ll, pkgs )
if (length(notfound) > 0) {
print( "Missing some dependencies...")
print( notfound )
n = readline(prompt="Install them? (y/n): ")
if ( tolower(n) %in% c("y", "yes") ) {
for ( nf in notfound ) {
try( utils::install.packages( nf, dependencies=TRUE ) )
try( require( pkg, character.only = TRUE ) )
}
}
}
if ( "INLA" %in% notfound ) {
print( "To install INLA:")
print( ' install.packages("INLA", repos="https://www.math.ntnu.no/inla/R/stable") ' )
}
return( ll )
}
|
# read only one row to get column names
colNames <- read.csv("household_power_consumption.txt", sep=";", nrows=1)
powerUse <- read.csv("household_power_consumption.txt", sep=";",
skip=46*24*60, # skip the first 46 days
nrows=3*24*60, # read in only 3 days to save RAM
col.names = names(colNames), # use previously read column names
# read Date, Time as character, other columns as numeric
colClasses=c("character","character",rep("numeric",7)))
# filter, data only for days 1/2/2007 and 2/2/2007
powerUse2Days <- powerUse[powerUse$Date %in% c("1/2/2007","2/2/2007"),]
# add new column 'timestamp' with POSIXct object
powerUse2Days$timestamp <- strptime(paste(powerUse2Days$Date,
powerUse2Days$Time),
format = "%d/%m/%Y %H:%M:%S")
png(filename = "plot3.png", width=480, height=480, bg="white")
with(powerUse2Days, {
plot(timestamp, Sub_metering_1, type = "l",
ylab = "Energy sub metering", xlab ="")
points(timestamp, Sub_metering_2, col = "red", type = "l")
points(timestamp, Sub_metering_3, col = "blue", type = "l")
legend("topright", lty=1, col = c("black", "red", "blue"),
legend = names(colNames)[7:9])
})
dev.off()
|
/plot3.R
|
no_license
|
marhew/ExData_Plotting1
|
R
| false
| false
| 1,351
|
r
|
# read only one row to get column names
colNames <- read.csv("household_power_consumption.txt", sep=";", nrows=1)
powerUse <- read.csv("household_power_consumption.txt", sep=";",
skip=46*24*60, # skip the first 46 days
nrows=3*24*60, # read in only 3 days to save RAM
col.names = names(colNames), # use previously read column names
# read Date, Time as character, other columns as numeric
colClasses=c("character","character",rep("numeric",7)))
# filter, data only for days 1/2/2007 and 2/2/2007
powerUse2Days <- powerUse[powerUse$Date %in% c("1/2/2007","2/2/2007"),]
# add new column 'timestamp' with POSIXct object
powerUse2Days$timestamp <- strptime(paste(powerUse2Days$Date,
powerUse2Days$Time),
format = "%d/%m/%Y %H:%M:%S")
png(filename = "plot3.png", width=480, height=480, bg="white")
with(powerUse2Days, {
plot(timestamp, Sub_metering_1, type = "l",
ylab = "Energy sub metering", xlab ="")
points(timestamp, Sub_metering_2, col = "red", type = "l")
points(timestamp, Sub_metering_3, col = "blue", type = "l")
legend("topright", lty=1, col = c("black", "red", "blue"),
legend = names(colNames)[7:9])
})
dev.off()
|
#Step 1: Download required package
#Note: You only need to do this the first time!
install.packages("openxlsx")
#Step 2: Enter your variables
filename <- "Plate map.xlsx" #Name of your plate map Excel file
plate <- 384 #Number of wells in your plate (must be 384 or 96)
folder <- "20181128" #Name of the folder where your MS data will be stored, usually today's date
outputfile <- "Linearity test plate map.xlsx" #Name for the file that the program will output
#Step 3: Navigate to the directory containing your data file
#Find it in the files tab, then click More and Set As Working Directory
#Step 4: Run the code
#Press control a to select all the code, and then press control enter to run.
#You can find the exported excel sheet and heat map in your current directory!
#Load required package
require(openxlsx)
#Open file
map <- openxlsx::read.xlsx(filename, colNames = FALSE, skipEmptyCols = FALSE, skipEmptyRows = FALSE)
#Create MS file function
MakeMSfile <- function(platemap, foldername, platesize, path = "D:\\MassHunter\\Data\\Dingyin\\") { #path must use double slashes!
if (platesize == 384) {
#Make empty MS file
MSfile <- as.data.frame(matrix(ncol=2, nrow=384))
platecols <- c(rep(1:24, each=16))
MSfile[,1] <- paste0("P2-", LETTERS[1:16], platecols)
#Add plate map data
unlisted <- unlist(platemap, use.names = FALSE)
unlisted <- c(unlisted, rep(NA, 384 - length(unlisted)))
MSfile[,2] <- paste0(path, foldername, "\\", unlisted, "-Column", rep(1:24, each = 16), "-r", 1:16, ".d")
MSfile <- MSfile[!is.na(unlisted), ]
return(MSfile)
} else if (platesize == 96) {
#Make empty MS file
MSfile <- as.data.frame(matrix(ncol=2, nrow=96))
platecols <- c(rep(1:12, each=8))
MSfile[,1] <- paste0("P2-", LETTERS[1:8], platecols)
#Add plate map data
unlisted <- unlist(platemap, use.names = FALSE)
unlisted <- c(unlisted, rep(NA, 96 - length(unlisted)))
MSfile[,2] <- paste0(path, foldername, "\\", unlisted, "-Column", rep(1:12, each = 8), "-r", 1:8, ".d")
MSfile <- MSfile[!is.na(unlisted), ]
return(MSfile)
} else {
print("Error! Platesize must be 96 or 384.")
}
}
#Convert plate map to MS file
MSfile <- MakeMSfile(map, foldername = folder, platesize = plate)
#Output to new excel file
openxlsx::write.xlsx(MSfile, file = outputfile, colNames = FALSE, rowNames = FALSE)
|
/Plate map to MS file/Plate map to MS file.R
|
no_license
|
maya123z/MassSpecFileHandler
|
R
| false
| false
| 2,476
|
r
|
#Step 1: Download required package
#Note: You only need to do this the first time!
install.packages("openxlsx")
#Step 2: Enter your variables
filename <- "Plate map.xlsx" #Name of your plate map Excel file
plate <- 384 #Number of wells in your plate (must be 384 or 96)
folder <- "20181128" #Name of the folder where your MS data will be stored, usually today's date
outputfile <- "Linearity test plate map.xlsx" #Name for the file that the program will output
#Step 3: Navigate to the directory containing your data file
#Find it in the files tab, then click More and Set As Working Directory
#Step 4: Run the code
#Press control a to select all the code, and then press control enter to run.
#You can find the exported excel sheet and heat map in your current directory!
#Load required package
require(openxlsx)
#Open file
map <- openxlsx::read.xlsx(filename, colNames = FALSE, skipEmptyCols = FALSE, skipEmptyRows = FALSE)
#Create MS file function
MakeMSfile <- function(platemap, foldername, platesize, path = "D:\\MassHunter\\Data\\Dingyin\\") { #path must use double slashes!
if (platesize == 384) {
#Make empty MS file
MSfile <- as.data.frame(matrix(ncol=2, nrow=384))
platecols <- c(rep(1:24, each=16))
MSfile[,1] <- paste0("P2-", LETTERS[1:16], platecols)
#Add plate map data
unlisted <- unlist(platemap, use.names = FALSE)
unlisted <- c(unlisted, rep(NA, 384 - length(unlisted)))
MSfile[,2] <- paste0(path, foldername, "\\", unlisted, "-Column", rep(1:24, each = 16), "-r", 1:16, ".d")
MSfile <- MSfile[!is.na(unlisted), ]
return(MSfile)
} else if (platesize == 96) {
#Make empty MS file
MSfile <- as.data.frame(matrix(ncol=2, nrow=96))
platecols <- c(rep(1:12, each=8))
MSfile[,1] <- paste0("P2-", LETTERS[1:8], platecols)
#Add plate map data
unlisted <- unlist(platemap, use.names = FALSE)
unlisted <- c(unlisted, rep(NA, 96 - length(unlisted)))
MSfile[,2] <- paste0(path, foldername, "\\", unlisted, "-Column", rep(1:12, each = 8), "-r", 1:8, ".d")
MSfile <- MSfile[!is.na(unlisted), ]
return(MSfile)
} else {
print("Error! Platesize must be 96 or 384.")
}
}
#Convert plate map to MS file
MSfile <- MakeMSfile(map, foldername = folder, platesize = plate)
#Output to new excel file
openxlsx::write.xlsx(MSfile, file = outputfile, colNames = FALSE, rowNames = FALSE)
|
\name{exportBatchInteractions2text}
\alias{exportBatchInteractions2text}
\alias{exportBatchInteractions2text,r3CseqInBatch-method}
\title{export identified interaction regions to the tab separated format for replicates analysis}
\description{
export interaction regions from RagedData to the tab separated format for replicates analysis
}
\usage{
exportBatchInteractions2text(object)
}
\arguments{
\item{object}{
r3CseqInBatch object
}
}
\value{
The text file in the tab separated format
}
\seealso{
\link{export3Cseq2bedGraph},
\link{exportInteractions2text}
}
\author{
S. Thongjuea
}
\examples{
#See the vignette
}
%\keyword{export identified interaction regions}
|
/man/exportBatchInteractions2text.Rd
|
no_license
|
supatt-lab/r3Cseq
|
R
| false
| false
| 693
|
rd
|
\name{exportBatchInteractions2text}
\alias{exportBatchInteractions2text}
\alias{exportBatchInteractions2text,r3CseqInBatch-method}
\title{export identified interaction regions to the tab separated format for replicates analysis}
\description{
export interaction regions from RagedData to the tab separated format for replicates analysis
}
\usage{
exportBatchInteractions2text(object)
}
\arguments{
\item{object}{
r3CseqInBatch object
}
}
\value{
The text file in the tab separated format
}
\seealso{
\link{export3Cseq2bedGraph},
\link{exportInteractions2text}
}
\author{
S. Thongjuea
}
\examples{
#See the vignette
}
%\keyword{export identified interaction regions}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.