content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary_functions.R
\name{IRMSE}
\alias{IRMSE}
\title{Compute the integrated root mean-square error}
\usage{
IRMSE(
estimate,
parameter,
fn,
density = function(theta, ...) 1,
lower = -Inf,
upper = Inf,
...
)
}
\arguments{
\item{estimate}{a vector of parameter estimates}
\item{parameter}{a vector of population parameters}
\item{fn}{a continuous function where the first argument is to be integrated and the second argument is
a vector of parameters or parameter estimates. This function
represents a implied continuous function which uses the sample estimates or population parameters}
\item{density}{(optional) a density function used to marginalize (i.e., average), where the first
argument is to be integrated, and must be of the form \code{density(theta, ...)} or
\code{density(theta, param1, param2)}, where \code{param1} is a placeholder name for the
hyper-parameters associated with the probability density function. If omitted then
the cumulative different between the respective functions will be computed instead}
\item{lower}{lower bound to begin numerical integration from}
\item{upper}{upper bound to finish numerical integration to}
\item{...}{additional parameters to pass to \code{fnest}, \code{fnparam}, \code{density},
and \code{\link{integrate}},}
}
\value{
returns a single \code{numeric} term indicating the average/cumulative deviation
given the supplied continuous functions
}
\description{
Computes the average/cumulative deviation given two continuous functions and an optional
function representing the probability density function. Only one-dimensional integration
is supported.
}
\details{
The integrated root mean-square error (IRMSE) is of the form
\deqn{IRMSE(\theta) = \sqrt{\int [f(\theta, \hat{\psi}) - f(\theta, \psi)]^2 g(\theta, ...)}}
where \eqn{g(\theta, ...)} is the density function used to marginalize the continuous sample
(\eqn{f(\theta, \hat{\psi})}) and population (\eqn{f(\theta, \psi)}) functions.
}
\examples{
# logistic regression function with one slope and intercept
fn <- function(theta, param) 1 / (1 + exp(-(param[1] + param[2] * theta)))
# sample and population sets
est <- c(-0.4951, 1.1253)
pop <- c(-0.5, 1)
theta <- seq(-10,10,length.out=1000)
plot(theta, fn(theta, pop), type = 'l', col='red', ylim = c(0,1))
lines(theta, fn(theta, est), col='blue', lty=2)
# cumulative result (i.e., standard integral)
IRMSE(est, pop, fn)
# integrated RMSE result by marginalizing over a N(0,1) distribution
den <- function(theta, mean, sd) dnorm(theta, mean=mean, sd=sd)
IRMSE(est, pop, fn, den, mean=0, sd=1)
# this specification is equivalent to the above
den2 <- function(theta, ...) dnorm(theta, ...)
IRMSE(est, pop, fn, den2, mean=0, sd=1)
}
\references{
Sigal, M. J., & Chalmers, R. P. (2016). Play it again: Teaching statistics with Monte
Carlo simulation. \code{Journal of Statistics Education, 24}(3), 136-156.
\doi{10.1080/10691898.2016.1246953}
}
\seealso{
\code{\link{RMSE}}
}
\author{
Phil Chalmers \email{rphilip.chalmers@gmail.com}
}
| /man/IRMSE.Rd | no_license | GloriaColmenares/SimDesign | R | false | true | 3,103 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary_functions.R
\name{IRMSE}
\alias{IRMSE}
\title{Compute the integrated root mean-square error}
\usage{
IRMSE(
estimate,
parameter,
fn,
density = function(theta, ...) 1,
lower = -Inf,
upper = Inf,
...
)
}
\arguments{
\item{estimate}{a vector of parameter estimates}
\item{parameter}{a vector of population parameters}
\item{fn}{a continuous function where the first argument is to be integrated and the second argument is
a vector of parameters or parameter estimates. This function
represents a implied continuous function which uses the sample estimates or population parameters}
\item{density}{(optional) a density function used to marginalize (i.e., average), where the first
argument is to be integrated, and must be of the form \code{density(theta, ...)} or
\code{density(theta, param1, param2)}, where \code{param1} is a placeholder name for the
hyper-parameters associated with the probability density function. If omitted then
the cumulative different between the respective functions will be computed instead}
\item{lower}{lower bound to begin numerical integration from}
\item{upper}{upper bound to finish numerical integration to}
\item{...}{additional parameters to pass to \code{fnest}, \code{fnparam}, \code{density},
and \code{\link{integrate}},}
}
\value{
returns a single \code{numeric} term indicating the average/cumulative deviation
given the supplied continuous functions
}
\description{
Computes the average/cumulative deviation given two continuous functions and an optional
function representing the probability density function. Only one-dimensional integration
is supported.
}
\details{
The integrated root mean-square error (IRMSE) is of the form
\deqn{IRMSE(\theta) = \sqrt{\int [f(\theta, \hat{\psi}) - f(\theta, \psi)]^2 g(\theta, ...)}}
where \eqn{g(\theta, ...)} is the density function used to marginalize the continuous sample
(\eqn{f(\theta, \hat{\psi})}) and population (\eqn{f(\theta, \psi)}) functions.
}
\examples{
# logistic regression function with one slope and intercept
fn <- function(theta, param) 1 / (1 + exp(-(param[1] + param[2] * theta)))
# sample and population sets
est <- c(-0.4951, 1.1253)
pop <- c(-0.5, 1)
theta <- seq(-10,10,length.out=1000)
plot(theta, fn(theta, pop), type = 'l', col='red', ylim = c(0,1))
lines(theta, fn(theta, est), col='blue', lty=2)
# cumulative result (i.e., standard integral)
IRMSE(est, pop, fn)
# integrated RMSE result by marginalizing over a N(0,1) distribution
den <- function(theta, mean, sd) dnorm(theta, mean=mean, sd=sd)
IRMSE(est, pop, fn, den, mean=0, sd=1)
# this specification is equivalent to the above
den2 <- function(theta, ...) dnorm(theta, ...)
IRMSE(est, pop, fn, den2, mean=0, sd=1)
}
\references{
Sigal, M. J., & Chalmers, R. P. (2016). Play it again: Teaching statistics with Monte
Carlo simulation. \code{Journal of Statistics Education, 24}(3), 136-156.
\doi{10.1080/10691898.2016.1246953}
}
\seealso{
\code{\link{RMSE}}
}
\author{
Phil Chalmers \email{rphilip.chalmers@gmail.com}
}
|
# Pin devtools to version 1.13.6 & install its dependencies
# devtools is the package we use to install specific versions of other packages
# see also: https://github.com/AlexsLemonade/refinebio/pull/752
# From https://github.com/AlexsLemonade/refinebio/blob/d55616dff8270aa46179a26c8e86318c8535df32/common/install_devtools.R
# Treat warnings as errors, set CRAN mirror, and set parallelization:
options(warn=2)
options(repos=structure(c(CRAN="http://lib.stat.cmu.edu/R/CRAN/")))
install_package_version <- function(package_name, version) {
# This function install a specific version of a package.
# However, because the most current version of a package lives in a
# different location than the older versions, we have to check where
# it can be found.
package_tarball <- paste0(package_name, "_", version, ".tar.gz")
package_url <- paste0("http://lib.stat.cmu.edu/R/CRAN/src/contrib/", package_tarball)
# Give CRAN a full minute to timeout since it's not always the most reliable.
curl_result <- system(paste0("curl --head --connect-timeout 60 ", package_url), intern=TRUE)
if (grepl("404", curl_result[1])) {
package_url <- paste0("http://lib.stat.cmu.edu/R/CRAN/src/contrib/Archive/", package_name, "/", package_tarball)
# Make sure the package actually exists in the archive!
curl_result <- system(paste0("curl --head --connect-timeout 120 ", package_url), intern=TRUE)
if (grepl("404", curl_result[1])) {
stop(paste("Package", package_name, "version", version, "does not exist!"))
}
}
install.packages(package_url)
}
install_package_version("jsonlite", "1.5")
install_package_version("mime", "0.6")
install_package_version("curl", "3.2")
install_package_version("openssl", "1.0.2")
install_package_version("R6", "2.3.0")
install_package_version("httr", "1.3.1")
install_package_version("digest", "0.6.18")
install_package_version("memoise", "1.1.0")
install_package_version("whisker", "0.3-2")
install_package_version("rstudioapi", "0.8")
install_package_version("git2r", "0.23.0")
install_package_version("withr", "2.1.2")
install_package_version("devtools", "1.13.6")
# Use devtools::install_version() to install packages in cran.
devtools::install_version('data.table', version='1.11.0')
devtools::install_version('optparse', version='1.4.4')
devtools::install_version('rlang', version='0.2.2')
devtools::install_version('dplyr', version='0.7.4')
devtools::install_version('readr', version='1.1.1')
devtools::install_version('tidyr', version='0.8.2')
# BiocInstaller, required by devtools::install_url()
install.packages('https://bioconductor.org/packages/3.6/bioc/src/contrib/BiocInstaller_1.28.0.tar.gz')
# Helper function that installs a list of packages based on input URL
install_with_url <- function(main_url, packages) {
lapply(packages,
function(pkg) devtools::install_url(paste0(main_url, pkg)))
}
bioc_url <- 'https://bioconductor.org/packages/3.6/bioc/src/contrib/'
bioc_pkgs <- c(
'oligo_1.42.0.tar.gz',
'Biobase_2.38.0.tar.gz',
'affy_1.56.0.tar.gz',
'affyio_1.48.0.tar.gz',
'AnnotationDbi_1.40.0.tar.gz'
)
install_with_url(bioc_url, bioc_pkgs)
annotation_url <- 'https://bioconductor.org/packages/3.6/data/annotation/src/contrib/'
annotation_pkgs <- c(
'org.Hs.eg.db_3.5.0.tar.gz',
'org.Mm.eg.db_3.5.0.tar.gz',
'org.Dm.eg.db_3.5.0.tar.gz',
'org.Ce.eg.db_3.5.0.tar.gz',
'org.Bt.eg.db_3.5.0.tar.gz',
'org.Cf.eg.db_3.5.0.tar.gz',
'org.Gg.eg.db_3.5.0.tar.gz',
'org.Rn.eg.db_3.5.0.tar.gz',
'org.Ss.eg.db_3.5.0.tar.gz',
'org.Dr.eg.db_3.5.0.tar.gz'
)
install_with_url(annotation_url, annotation_pkgs)
# Invoke another R script to install BrainArray ensg packages
source("install_ensg_pkgs.R")
# Install Bioconductor platform design (pd) packages
experiment_url <- 'https://bioconductor.org/packages/release/data/experiment/src/contrib/'
pd_experiment_pkgs <- c(
'pd.atdschip.tiling_0.16.0.tar.gz'
)
install_with_url(experiment_url, pd_experiment_pkgs)
| /R/dependencies.R | permissive | AlexsLemonade/identifier-refinery | R | false | false | 3,973 | r | # Pin devtools to version 1.13.6 & install its dependencies
# devtools is the package we use to install specific versions of other packages
# see also: https://github.com/AlexsLemonade/refinebio/pull/752
# From https://github.com/AlexsLemonade/refinebio/blob/d55616dff8270aa46179a26c8e86318c8535df32/common/install_devtools.R
# Treat warnings as errors, set CRAN mirror, and set parallelization:
options(warn=2)
options(repos=structure(c(CRAN="http://lib.stat.cmu.edu/R/CRAN/")))
install_package_version <- function(package_name, version) {
# This function install a specific version of a package.
# However, because the most current version of a package lives in a
# different location than the older versions, we have to check where
# it can be found.
package_tarball <- paste0(package_name, "_", version, ".tar.gz")
package_url <- paste0("http://lib.stat.cmu.edu/R/CRAN/src/contrib/", package_tarball)
# Give CRAN a full minute to timeout since it's not always the most reliable.
curl_result <- system(paste0("curl --head --connect-timeout 60 ", package_url), intern=TRUE)
if (grepl("404", curl_result[1])) {
package_url <- paste0("http://lib.stat.cmu.edu/R/CRAN/src/contrib/Archive/", package_name, "/", package_tarball)
# Make sure the package actually exists in the archive!
curl_result <- system(paste0("curl --head --connect-timeout 120 ", package_url), intern=TRUE)
if (grepl("404", curl_result[1])) {
stop(paste("Package", package_name, "version", version, "does not exist!"))
}
}
install.packages(package_url)
}
install_package_version("jsonlite", "1.5")
install_package_version("mime", "0.6")
install_package_version("curl", "3.2")
install_package_version("openssl", "1.0.2")
install_package_version("R6", "2.3.0")
install_package_version("httr", "1.3.1")
install_package_version("digest", "0.6.18")
install_package_version("memoise", "1.1.0")
install_package_version("whisker", "0.3-2")
install_package_version("rstudioapi", "0.8")
install_package_version("git2r", "0.23.0")
install_package_version("withr", "2.1.2")
install_package_version("devtools", "1.13.6")
# Use devtools::install_version() to install packages in cran.
devtools::install_version('data.table', version='1.11.0')
devtools::install_version('optparse', version='1.4.4')
devtools::install_version('rlang', version='0.2.2')
devtools::install_version('dplyr', version='0.7.4')
devtools::install_version('readr', version='1.1.1')
devtools::install_version('tidyr', version='0.8.2')
# BiocInstaller, required by devtools::install_url()
install.packages('https://bioconductor.org/packages/3.6/bioc/src/contrib/BiocInstaller_1.28.0.tar.gz')
# Helper function that installs a list of packages based on input URL
install_with_url <- function(main_url, packages) {
lapply(packages,
function(pkg) devtools::install_url(paste0(main_url, pkg)))
}
bioc_url <- 'https://bioconductor.org/packages/3.6/bioc/src/contrib/'
bioc_pkgs <- c(
'oligo_1.42.0.tar.gz',
'Biobase_2.38.0.tar.gz',
'affy_1.56.0.tar.gz',
'affyio_1.48.0.tar.gz',
'AnnotationDbi_1.40.0.tar.gz'
)
install_with_url(bioc_url, bioc_pkgs)
annotation_url <- 'https://bioconductor.org/packages/3.6/data/annotation/src/contrib/'
annotation_pkgs <- c(
'org.Hs.eg.db_3.5.0.tar.gz',
'org.Mm.eg.db_3.5.0.tar.gz',
'org.Dm.eg.db_3.5.0.tar.gz',
'org.Ce.eg.db_3.5.0.tar.gz',
'org.Bt.eg.db_3.5.0.tar.gz',
'org.Cf.eg.db_3.5.0.tar.gz',
'org.Gg.eg.db_3.5.0.tar.gz',
'org.Rn.eg.db_3.5.0.tar.gz',
'org.Ss.eg.db_3.5.0.tar.gz',
'org.Dr.eg.db_3.5.0.tar.gz'
)
install_with_url(annotation_url, annotation_pkgs)
# Invoke another R script to install BrainArray ensg packages
source("install_ensg_pkgs.R")
# Install Bioconductor platform design (pd) packages
experiment_url <- 'https://bioconductor.org/packages/release/data/experiment/src/contrib/'
pd_experiment_pkgs <- c(
'pd.atdschip.tiling_0.16.0.tar.gz'
)
install_with_url(experiment_url, pd_experiment_pkgs)
|
x=c(21,25,26,27,28,30,50,40,60,90,100)
x
hist(x)
| /randomnommers.R | no_license | youngstacpt/mushongac | R | false | false | 49 | r | x=c(21,25,26,27,28,30,50,40,60,90,100)
x
hist(x)
|
library(tidyverse)
library(magrittr)
library(grid)
library(gridExtra)
library(extrafont)
main = function(histone_spn1_depletion_westerns_rdata,
chipseq_abundance_barplots_h3_rdata,
h3_mods_non_h3_norm_rdata,
h3_mods_facet_expression_rdata,
fig_width=8.5,
fig_height=9/16 * 8.5 * 2,
pdf_out="test.pdf"){
layout = rbind(c(1,1,1,1,2,2,2,2,2,NA,NA,NA),
c(1,1,1,1,2,2,2,2,2,NA,NA,NA),
c(1,1,1,1,2,2,2,2,2,NA,NA,NA),
c(1,1,1,1,3,3,3,3,3,3,3,3),
c(1,1,1,1,3,3,3,3,3,3,3,3),
c(NA,NA,NA,NA,3,3,3,3,3,3,3,3),
c(4,4,4,4,4,4,4,4,4,5,5,5),
c(4,4,4,4,4,4,4,4,4,5,5,5),
c(4,4,4,4,4,4,4,4,4,5,5,5),
c(4,4,4,4,4,4,4,4,4,5,5,5),
c(4,4,4,4,4,4,4,4,4,5,5,5),
c(4,4,4,4,4,4,4,4,4,5,5,5))
load(histone_spn1_depletion_westerns_rdata)
load(chipseq_abundance_barplots_h3_rdata)
load(h3_mods_non_h3_norm_rdata)
load(h3_mods_facet_expression_rdata)
figure_h3_mods = arrangeGrob(histone_spn1_depletion_westerns,
chipseq_abundance_barplot,
h3_mods_non_h3_norm,
h3_mods_facet_expression,
nullGrob(),
layout_matrix=layout)
ggsave(pdf_out,
plot=figure_h3_mods,
width=fig_width,
height=fig_height,
units="cm",
device=cairo_pdf)
}
main(histone_spn1_depletion_westerns_rdata = snakemake@input[["histone_spn1_depletion_westerns"]],
chipseq_abundance_barplots_h3_rdata = snakemake@input[["chipseq_abundance_barplots_h3"]],
h3_mods_non_h3_norm_rdata = snakemake@input[["h3_mods_non_h3_norm"]],
h3_mods_facet_expression_rdata = snakemake@input[["h3_mods_facet_expression"]],
fig_width = snakemake@params[["fig_width"]],
fig_height = snakemake@params[["fig_height"]],
pdf_out = snakemake@output[["pdf"]])
| /scripts/assemble_figure_h3_mods_supp.R | no_license | winston-lab/spn1_paper_figures | R | false | false | 2,154 | r | library(tidyverse)
library(magrittr)
library(grid)
library(gridExtra)
library(extrafont)
main = function(histone_spn1_depletion_westerns_rdata,
chipseq_abundance_barplots_h3_rdata,
h3_mods_non_h3_norm_rdata,
h3_mods_facet_expression_rdata,
fig_width=8.5,
fig_height=9/16 * 8.5 * 2,
pdf_out="test.pdf"){
layout = rbind(c(1,1,1,1,2,2,2,2,2,NA,NA,NA),
c(1,1,1,1,2,2,2,2,2,NA,NA,NA),
c(1,1,1,1,2,2,2,2,2,NA,NA,NA),
c(1,1,1,1,3,3,3,3,3,3,3,3),
c(1,1,1,1,3,3,3,3,3,3,3,3),
c(NA,NA,NA,NA,3,3,3,3,3,3,3,3),
c(4,4,4,4,4,4,4,4,4,5,5,5),
c(4,4,4,4,4,4,4,4,4,5,5,5),
c(4,4,4,4,4,4,4,4,4,5,5,5),
c(4,4,4,4,4,4,4,4,4,5,5,5),
c(4,4,4,4,4,4,4,4,4,5,5,5),
c(4,4,4,4,4,4,4,4,4,5,5,5))
load(histone_spn1_depletion_westerns_rdata)
load(chipseq_abundance_barplots_h3_rdata)
load(h3_mods_non_h3_norm_rdata)
load(h3_mods_facet_expression_rdata)
figure_h3_mods = arrangeGrob(histone_spn1_depletion_westerns,
chipseq_abundance_barplot,
h3_mods_non_h3_norm,
h3_mods_facet_expression,
nullGrob(),
layout_matrix=layout)
ggsave(pdf_out,
plot=figure_h3_mods,
width=fig_width,
height=fig_height,
units="cm",
device=cairo_pdf)
}
main(histone_spn1_depletion_westerns_rdata = snakemake@input[["histone_spn1_depletion_westerns"]],
chipseq_abundance_barplots_h3_rdata = snakemake@input[["chipseq_abundance_barplots_h3"]],
h3_mods_non_h3_norm_rdata = snakemake@input[["h3_mods_non_h3_norm"]],
h3_mods_facet_expression_rdata = snakemake@input[["h3_mods_facet_expression"]],
fig_width = snakemake@params[["fig_width"]],
fig_height = snakemake@params[["fig_height"]],
pdf_out = snakemake@output[["pdf"]])
|
library(oce) #used for despiking
library(hydroGOF) #forgot what I used this for
library(pls) #Load the pls package
#******Specify file paths and names
inPath<-"C:/Users/FBLab/Documents/GitHub/Brittany/inputFiles/" #Specify folder where data is located
outPath<-"C:/Users/FBLab/Documents/GitHub/Brittany/output/"
fitPath<-"C:/Users/FBLab/Downloads/FITEVAL2_win/FITEVAL2_win/" #fiteval_out.txt"
fitEval<-paste(fitPath,"fiteval",sep="")
fitFile<-paste(fitPath,"PLSR.in",sep="")
fitFileOut<-paste(fitPath,"PLSR_out.txt",sep="")
filename<-c("OriginalBrittany.csv" ,"Brittany1stDerative.csv","TubidityCompensatedBrittany.csv","TurbidityCompensated1stDerivativeBrittany.csv")
#store names for the lab analytes
Chem<-c("CL", "NO2", "NNO2","NO3","NNO3","SO4","DOC","DIC","UV254", "PPO43","Ptot", "MES",
"NNH4", "Ntot", "NTotFilt", "Silica", "Turbidity");
#read the data specified by the vector filename
counter<-1
Components<-matrix(nrow=70,ncol=4)
#load data for checking number of components on
for (chem in 1:17){
#all chem analytes are listed in different columns
#1 CL 2 NO2 3 NNO2 4 NO3 5 NNO3 6 SO4
#7 DOC 8 DIC 9 UV254 10 PPO43 11 Ptot 12 MES
#13 NNH4 14 Ntot 15 NTotFilt 16 Silic 17 Turb
#open a file to make a 4 panel plot (one for each of the 4 fingerprint files)
jpeg(file=paste(outPath,".",Chem[chem],"all.jpg",sep=""))
par(mfrow=c(2,2))
for (fn in 1:4){#for each filename
#load the data
myData<-loadDataFile(inPath,filename[1])
#data are returned in a list myData$fingerPrints
# myData$realTime
# myData$ChemData
#pull out the column of chem data of interest
ChemConc<-as.matrix(myData$ChemData[,chem]) #take out the only one we care about
fp<-cbind(myData$fingerPrints,myData$realTime,as.matrix(ChemConc)) #bind the two matrices together to determine complete cases
fp<-fp[complete.cases(fp[,2:dim(fp)[2]]),] #just keep the fingerprints that have analytical data
ChemConc<-as.matrix(fp[,dim(fp)[2]]) #pull chem back out
fp<-fp[,-dim(fp)[2]] #pull off the fingerprint
realTime<-as.matrix(fp[,dim(fp)[2]]) #pull real time off (now the last column)
fp<-fp[,-dim(fp)[2]] #pop it off the end of the dataframe
Comps<-30
#calculate the PLSR model for the available data
doISubset<-0 #here is a switch to subset or not subset the data
if (doISubset==1){
#subset if you like (comment out if not subset)
subset<-densityDependentSubset(ChemConc,realTime,fp,0.5,TRUE)
if(length(subset$ChemConc)<=39){
Comps<-round(length(subset$ChemConc)*0.5)
}
modelRMSEP<-RMSEP(plsr(subset$ChemConc~data.matrix(subset$fingerPrint),ncomp=Comps,validation="CV"))
}
if (doISubset==0){
modelRMSEP<-RMSEP(plsr(ChemConc~data.matrix(fp),ncomp=30,validation="CV"))
#and pull out the RMSEP values for each component
}#end if (doIsubset)
#*****Thishas a probelm the comlet cases might get the x axis off by removing a value, but not keeping the row count correct.
#could be fixed by 1.)avoiding nans in the RMSEP
#or adding a column of index variables so the complete cases keeps the index even though it throws out a row of bad data
rmsepIndex<-complete.cases(as.matrix(modelRMSEP$val[2,1,]))
rmsepValues<-modelRMSEP$val[2,1,rmsepIndex]
nComps<-min(which(rmsepValues==min(rmsepValues)))-1
plot(0:(length(rmsepValues)-1),rmsepValues,xlab=c("number of comoponents"),ylab=c("RMSEP"),main=paste(filename[fn], Chem[chem],sep=" " ))
points(nComps,rmsepValues[nComps+1],col="green")
if(nComps>15){
nComps2<-which(abs(diff(rmsepValues))==min(abs(diff(rmsepValues))))
points(nComps2,rmsepValues[nComps2],col="red")
}
#find a place where the diff is minimized try that as a logical breakpoint
#}
Components[counter,1]<-nComps
Components[counter,2]<-fn
Components[counter,3]<-chem
if(nComps>15){
Components[counter,4]<-nComps2
}
counter<-counter+1
#}
} #for each file
dev.off()
} #for each chemical | /olderCode/tests/estimateComponentsForPLSRForAllDataInputs.R | no_license | BirgandLab/Brittany | R | false | false | 5,042 | r | library(oce) #used for despiking
library(hydroGOF) #forgot what I used this for
library(pls) #Load the pls package
#******Specify file paths and names
inPath<-"C:/Users/FBLab/Documents/GitHub/Brittany/inputFiles/" #Specify folder where data is located
outPath<-"C:/Users/FBLab/Documents/GitHub/Brittany/output/"
fitPath<-"C:/Users/FBLab/Downloads/FITEVAL2_win/FITEVAL2_win/" #fiteval_out.txt"
fitEval<-paste(fitPath,"fiteval",sep="")
fitFile<-paste(fitPath,"PLSR.in",sep="")
fitFileOut<-paste(fitPath,"PLSR_out.txt",sep="")
filename<-c("OriginalBrittany.csv" ,"Brittany1stDerative.csv","TubidityCompensatedBrittany.csv","TurbidityCompensated1stDerivativeBrittany.csv")
#store names for the lab analytes
Chem<-c("CL", "NO2", "NNO2","NO3","NNO3","SO4","DOC","DIC","UV254", "PPO43","Ptot", "MES",
"NNH4", "Ntot", "NTotFilt", "Silica", "Turbidity");
#read the data specified by the vector filename
counter<-1
Components<-matrix(nrow=70,ncol=4)
#load data for checking number of components on
for (chem in 1:17){
#all chem analytes are listed in different columns
#1 CL 2 NO2 3 NNO2 4 NO3 5 NNO3 6 SO4
#7 DOC 8 DIC 9 UV254 10 PPO43 11 Ptot 12 MES
#13 NNH4 14 Ntot 15 NTotFilt 16 Silic 17 Turb
#open a file to make a 4 panel plot (one for each of the 4 fingerprint files)
jpeg(file=paste(outPath,".",Chem[chem],"all.jpg",sep=""))
par(mfrow=c(2,2))
for (fn in 1:4){#for each filename
#load the data
myData<-loadDataFile(inPath,filename[1])
#data are returned in a list myData$fingerPrints
# myData$realTime
# myData$ChemData
#pull out the column of chem data of interest
ChemConc<-as.matrix(myData$ChemData[,chem]) #take out the only one we care about
fp<-cbind(myData$fingerPrints,myData$realTime,as.matrix(ChemConc)) #bind the two matrices together to determine complete cases
fp<-fp[complete.cases(fp[,2:dim(fp)[2]]),] #just keep the fingerprints that have analytical data
ChemConc<-as.matrix(fp[,dim(fp)[2]]) #pull chem back out
fp<-fp[,-dim(fp)[2]] #pull off the fingerprint
realTime<-as.matrix(fp[,dim(fp)[2]]) #pull real time off (now the last column)
fp<-fp[,-dim(fp)[2]] #pop it off the end of the dataframe
Comps<-30
#calculate the PLSR model for the available data
doISubset<-0 #here is a switch to subset or not subset the data
if (doISubset==1){
#subset if you like (comment out if not subset)
subset<-densityDependentSubset(ChemConc,realTime,fp,0.5,TRUE)
if(length(subset$ChemConc)<=39){
Comps<-round(length(subset$ChemConc)*0.5)
}
modelRMSEP<-RMSEP(plsr(subset$ChemConc~data.matrix(subset$fingerPrint),ncomp=Comps,validation="CV"))
}
if (doISubset==0){
modelRMSEP<-RMSEP(plsr(ChemConc~data.matrix(fp),ncomp=30,validation="CV"))
#and pull out the RMSEP values for each component
}#end if (doIsubset)
#*****Thishas a probelm the comlet cases might get the x axis off by removing a value, but not keeping the row count correct.
#could be fixed by 1.)avoiding nans in the RMSEP
#or adding a column of index variables so the complete cases keeps the index even though it throws out a row of bad data
rmsepIndex<-complete.cases(as.matrix(modelRMSEP$val[2,1,]))
rmsepValues<-modelRMSEP$val[2,1,rmsepIndex]
nComps<-min(which(rmsepValues==min(rmsepValues)))-1
plot(0:(length(rmsepValues)-1),rmsepValues,xlab=c("number of comoponents"),ylab=c("RMSEP"),main=paste(filename[fn], Chem[chem],sep=" " ))
points(nComps,rmsepValues[nComps+1],col="green")
if(nComps>15){
nComps2<-which(abs(diff(rmsepValues))==min(abs(diff(rmsepValues))))
points(nComps2,rmsepValues[nComps2],col="red")
}
#find a place where the diff is minimized try that as a logical breakpoint
#}
Components[counter,1]<-nComps
Components[counter,2]<-fn
Components[counter,3]<-chem
if(nComps>15){
Components[counter,4]<-nComps2
}
counter<-counter+1
#}
} #for each file
dev.off()
} #for each chemical |
##loading data to R
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
##subseting loaded data as per required dates
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#extracting variable of interenst
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot1.png", width=480, height=480)
hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off() | /plot1.R | no_license | musakarim/ExData_Plotting1 | R | false | false | 487 | r | ##loading data to R
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
##subseting loaded data as per required dates
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
#extracting variable of interenst
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot1.png", width=480, height=480)
hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off() |
#' Create a call by "hand"
#'
#' @param f Function to call. For \code{make_call}, either a string, a symbol
#' or a quoted call. For \code{do_call}, a bare function name or call.
#' @param ...,.args Arguments to the call either in or out of a list
#' @export
#' @examples
#' # f can either be a string, a symbol or a call
#' call_new("f", a = 1)
#' call_new(quote(f), a = 1)
#' call_new(quote(f()), a = 1)
#'
#' #' Can supply arguments individually or in a list
#' call_new(quote(f), a = 1, b = 2)
#' call_new(quote(f), .args = list(a = 1, b = 2))
call_new <- function(f, ..., .args = list()) {
if (is.character(f)) {
if (length(f) != 1) {
stop("Character `f` must be length 1", call. = FALSE)
}
f <- as.name(f)
}
args <- c(list(...), as.list(.args))
as.call(c(f, args))
}
#' Modify the arguments of a call.
#'
#' @param call A call to modify. It is first standardised with
#' \code{\link{call_standardise}}.
#' @param env Environment in which to look up call value.
#' @param new_args A named list of expressions (constants, names or calls)
#' used to modify the call. Use \code{NULL} to remove arguments.
#' @export
#' @examples
#' call <- quote(mean(x, na.rm = TRUE))
#' call_standardise(call)
#'
#' # Modify an existing argument
#' call_modify(call, list(na.rm = FALSE))
#' call_modify(call, list(x = quote(y)))
#'
#' # Remove an argument
#' call_modify(call, list(na.rm = NULL))
#'
#' # Add a new argument
#' call_modify(call, list(trim = 0.1))
#'
#' # Add an explicit missing argument
#' call_modify(call, list(na.rm = quote(expr = )))
call_modify <- function(call, new_args, env = parent.frame()) {
stopifnot(is.call(call), is.list(new_args))
call <- call_standardise(call, env)
if (!all(has_names(new_args))) {
stop("All new arguments must be named", call. = FALSE)
}
for (nm in names(new_args)) {
call[[nm]] <- new_args[[nm]]
}
call
}
#' @rdname call_modify
#' @export
call_standardise <- function(call, env = parent.frame()) {
stopifnot(is_call(call))
f <- eval(call[[1]], env)
if (is.primitive(f)) return(call)
match.call(f, call)
}
| /R/call.R | no_license | hadley/lazyeval | R | false | false | 2,114 | r | #' Create a call by "hand"
#'
#' @param f Function to call. For \code{make_call}, either a string, a symbol
#' or a quoted call. For \code{do_call}, a bare function name or call.
#' @param ...,.args Arguments to the call either in or out of a list
#' @export
#' @examples
#' # f can either be a string, a symbol or a call
#' call_new("f", a = 1)
#' call_new(quote(f), a = 1)
#' call_new(quote(f()), a = 1)
#'
#' #' Can supply arguments individually or in a list
#' call_new(quote(f), a = 1, b = 2)
#' call_new(quote(f), .args = list(a = 1, b = 2))
call_new <- function(f, ..., .args = list()) {
if (is.character(f)) {
if (length(f) != 1) {
stop("Character `f` must be length 1", call. = FALSE)
}
f <- as.name(f)
}
args <- c(list(...), as.list(.args))
as.call(c(f, args))
}
#' Modify the arguments of a call.
#'
#' @param call A call to modify. It is first standardised with
#' \code{\link{call_standardise}}.
#' @param env Environment in which to look up call value.
#' @param new_args A named list of expressions (constants, names or calls)
#' used to modify the call. Use \code{NULL} to remove arguments.
#' @export
#' @examples
#' call <- quote(mean(x, na.rm = TRUE))
#' call_standardise(call)
#'
#' # Modify an existing argument
#' call_modify(call, list(na.rm = FALSE))
#' call_modify(call, list(x = quote(y)))
#'
#' # Remove an argument
#' call_modify(call, list(na.rm = NULL))
#'
#' # Add a new argument
#' call_modify(call, list(trim = 0.1))
#'
#' # Add an explicit missing argument
#' call_modify(call, list(na.rm = quote(expr = )))
call_modify <- function(call, new_args, env = parent.frame()) {
stopifnot(is.call(call), is.list(new_args))
call <- call_standardise(call, env)
if (!all(has_names(new_args))) {
stop("All new arguments must be named", call. = FALSE)
}
for (nm in names(new_args)) {
call[[nm]] <- new_args[[nm]]
}
call
}
#' @rdname call_modify
#' @export
call_standardise <- function(call, env = parent.frame()) {
stopifnot(is_call(call))
f <- eval(call[[1]], env)
if (is.primitive(f)) return(call)
match.call(f, call)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interpolation-class.R
\name{interpolate.VolSurface}
\alias{interpolate.VolSurface}
\title{Interpolate a \code{VolSurface} object.}
\usage{
\method{interpolate}{VolSurface}(x, at, ...)
}
\arguments{
\item{x}{object of class \code{VolSurface} to be interpolated.}
\item{at}{indicates the coordinates at which the interpolation is performed.
\code{at} should be given as a \code{\link[tibble:tibble]{tibble::tibble()}} with two column names named
\code{maturity} and \code{smile}. e.g. list(maturity = c(1, 2), smile = c(72, 92)).}
\item{...}{unused in this model.}
}
\value{
\code{numeric} vector with length equal to the number of rows of \code{at}.
}
\description{
This method is used to interpolate a \code{VolSurface} object at multiple points of
the plane. The interpolation depends on the type of the surface, if the vols are
given by strikes, delta, moneyness.
}
\examples{
x <- build_vol_surface()
at <- tibble::tibble(
maturity = c(as.Date("2020-03-31"), as.Date("2021-03-31")),
smile = c(40, 80)
)
interpolate(x, at)
}
\seealso{
Other interpolate functions: \code{\link{interpolate.ZeroCurve}},
\code{\link{interpolate_dfs}},
\code{\link{interpolate_zeros}},
\code{\link{interpolate}}
}
\concept{interpolate functions}
| /man/interpolate.VolSurface.Rd | no_license | sefhamada/fmbasics | R | false | true | 1,317 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interpolation-class.R
\name{interpolate.VolSurface}
\alias{interpolate.VolSurface}
\title{Interpolate a \code{VolSurface} object.}
\usage{
\method{interpolate}{VolSurface}(x, at, ...)
}
\arguments{
\item{x}{object of class \code{VolSurface} to be interpolated.}
\item{at}{indicates the coordinates at which the interpolation is performed.
\code{at} should be given as a \code{\link[tibble:tibble]{tibble::tibble()}} with two column names named
\code{maturity} and \code{smile}. e.g. list(maturity = c(1, 2), smile = c(72, 92)).}
\item{...}{unused in this model.}
}
\value{
\code{numeric} vector with length equal to the number of rows of \code{at}.
}
\description{
This method is used to interpolate a \code{VolSurface} object at multiple points of
the plane. The interpolation depends on the type of the surface, if the vols are
given by strikes, delta, moneyness.
}
\examples{
x <- build_vol_surface()
at <- tibble::tibble(
maturity = c(as.Date("2020-03-31"), as.Date("2021-03-31")),
smile = c(40, 80)
)
interpolate(x, at)
}
\seealso{
Other interpolate functions: \code{\link{interpolate.ZeroCurve}},
\code{\link{interpolate_dfs}},
\code{\link{interpolate_zeros}},
\code{\link{interpolate}}
}
\concept{interpolate functions}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rte-api.R
\name{get_hydraulique_fil_de_l_eau_eclusee}
\alias{get_hydraulique_fil_de_l_eau_eclusee}
\title{Get hydraulique data drom eco2mix}
\usage{
get_hydraulique_fil_de_l_eau_eclusee(from = NULL, to = NULL,
user = NULL, proxy_pwd = NULL)
}
\arguments{
\item{from}{date from which to retrieve data, if \code{NULL} set to previous saturday before previous friday.}
\item{to}{date until which to recover data, if \code{NULL} set to previous friday.}
\item{user}{Username (NNI) for proxy if needed.}
\item{proxy_pwd}{Password for proxy if needed.}
}
\value{
a \code{data.table}
}
\description{
Get hydraulique data drom eco2mix
}
\examples{
\dontrun{
fil_eau <- get_hydraulique_fil_de_l_eau_eclusee(
user = "NNI", proxy_pwd = "PASSWORD"
)
}
}
| /man/get_hydraulique_fil_de_l_eau_eclusee.Rd | no_license | rte-antares-rpackage/antaresWeeklyMargin | R | false | true | 829 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rte-api.R
\name{get_hydraulique_fil_de_l_eau_eclusee}
\alias{get_hydraulique_fil_de_l_eau_eclusee}
\title{Get hydraulique data drom eco2mix}
\usage{
get_hydraulique_fil_de_l_eau_eclusee(from = NULL, to = NULL,
user = NULL, proxy_pwd = NULL)
}
\arguments{
\item{from}{date from which to retrieve data, if \code{NULL} set to previous saturday before previous friday.}
\item{to}{date until which to recover data, if \code{NULL} set to previous friday.}
\item{user}{Username (NNI) for proxy if needed.}
\item{proxy_pwd}{Password for proxy if needed.}
}
\value{
a \code{data.table}
}
\description{
Get hydraulique data drom eco2mix
}
\examples{
\dontrun{
fil_eau <- get_hydraulique_fil_de_l_eau_eclusee(
user = "NNI", proxy_pwd = "PASSWORD"
)
}
}
|
library(tempR)
### Name: tcata.line.plot
### Title: Temporal Check-All-That-Apply (TCATA) curve
### Aliases: tcata.line.plot
### ** Examples
# example using 'syrah' data set
low1 <- t(syrah[seq(3, 1026, by = 6), -c(1:4)])
colnames(low1) <- 10:180
tcata.line.plot(get.smooth(low1), lwd = 2, main = "Low-ethanol wine (Sip 1)")
# example using 'ojtcata' data set
data(ojtcata)
x <- aggregate(ojtcata[, -c(1:4)], list(samp = ojtcata$samp, attribute = ojtcata$attribute), sum)
p.1.checked <- x[x$samp == 1, -c(1:2)]
p.1.eval <- length(unique(ojtcata$cons))
p.not1.checked <- aggregate(x[, -c(1:2)], list(attribute = x$attribute), sum)[, -1]
p.not1.eval <- length(unique(ojtcata$cons)) * (length(unique(ojtcata$samp)) - 1)
# reference lines for contrast products
p.1.refline <- p.not1.checked / p.not1.eval
# decluttering matrix corresponds to the dimensions of p.1.refline
p.1.declutter <- matrix(get.decluttered(x = unlist(p.1.checked), n.x = p.1.eval,
y = unlist(p.not1.checked), n.y = p.not1.eval),
nrow = nrow(p.1.checked))
times <- get.times(colnames(x)[-c(1:2)])
attributes <- unique(x$attribute)
palettes <- make.palettes(length(attributes))
tcata.line.plot(p.1.checked, n = p.1.eval, attributes = attributes, times = times,
reference = p.1.refline, ref.lty = 3, declutter = p.1.declutter,
highlight = TRUE, highlight.lwd = 4,
line.col = palettes$pal, highlight.col = palettes$pal.light,
main = "Sample 1", height = 7, width = 11, legend.cex = 0.7)
| /data/genthat_extracted_code/tempR/examples/tcata.line.plot.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,588 | r | library(tempR)
### Name: tcata.line.plot
### Title: Temporal Check-All-That-Apply (TCATA) curve
### Aliases: tcata.line.plot
### ** Examples
# example using 'syrah' data set
low1 <- t(syrah[seq(3, 1026, by = 6), -c(1:4)])
colnames(low1) <- 10:180
tcata.line.plot(get.smooth(low1), lwd = 2, main = "Low-ethanol wine (Sip 1)")
# example using 'ojtcata' data set
data(ojtcata)
x <- aggregate(ojtcata[, -c(1:4)], list(samp = ojtcata$samp, attribute = ojtcata$attribute), sum)
p.1.checked <- x[x$samp == 1, -c(1:2)]
p.1.eval <- length(unique(ojtcata$cons))
p.not1.checked <- aggregate(x[, -c(1:2)], list(attribute = x$attribute), sum)[, -1]
p.not1.eval <- length(unique(ojtcata$cons)) * (length(unique(ojtcata$samp)) - 1)
# reference lines for contrast products
p.1.refline <- p.not1.checked / p.not1.eval
# decluttering matrix corresponds to the dimensions of p.1.refline
p.1.declutter <- matrix(get.decluttered(x = unlist(p.1.checked), n.x = p.1.eval,
y = unlist(p.not1.checked), n.y = p.not1.eval),
nrow = nrow(p.1.checked))
times <- get.times(colnames(x)[-c(1:2)])
attributes <- unique(x$attribute)
palettes <- make.palettes(length(attributes))
tcata.line.plot(p.1.checked, n = p.1.eval, attributes = attributes, times = times,
reference = p.1.refline, ref.lty = 3, declutter = p.1.declutter,
highlight = TRUE, highlight.lwd = 4,
line.col = palettes$pal, highlight.col = palettes$pal.light,
main = "Sample 1", height = 7, width = 11, legend.cex = 0.7)
|
closestPair<-function(x,y)
{
distancev <- function(pointsv)
{
x1 <- pointsv[1]
y1 <- pointsv[2]
x2 <- pointsv[3]
y2 <- pointsv[4]
sqrt((x1 - x2)^2 + (y1 - y2)^2)
}
pairstocompare <- t(combn(length(x),2))
pointsv <- cbind(x[pairstocompare[,1]],y[pairstocompare[,1]],x[pairstocompare[,2]],y[pairstocompare[,2]])
pairstocompare <- cbind(pairstocompare,apply(pointsv,1,distancev))
minrow <- pairstocompare[pairstocompare[,3] == min(pairstocompare[,3])]
if (!is.null(nrow(minrow))) {print("More than one point at this distance!"); minrow <- minrow[1,]}
cat("The closest pair is:\n\tPoint 1: ",x[minrow[1]],", ",y[minrow[1]],
"\n\tPoint 2: ",x[minrow[2]],", ",y[minrow[2]],
"\n\tDistance: ",minrow[3],"\n",sep="")
c(distance=minrow[3],x1.x=x[minrow[1]],y1.y=y[minrow[1]],x2.x=x[minrow[2]],y2.y=y[minrow[2]])
}
| /Programming Language Detection/Experiment-2/Dataset/Train/R/closest-pair-problem-2.r | no_license | dlaststark/machine-learning-projects | R | false | false | 904 | r | closestPair<-function(x,y)
{
distancev <- function(pointsv)
{
x1 <- pointsv[1]
y1 <- pointsv[2]
x2 <- pointsv[3]
y2 <- pointsv[4]
sqrt((x1 - x2)^2 + (y1 - y2)^2)
}
pairstocompare <- t(combn(length(x),2))
pointsv <- cbind(x[pairstocompare[,1]],y[pairstocompare[,1]],x[pairstocompare[,2]],y[pairstocompare[,2]])
pairstocompare <- cbind(pairstocompare,apply(pointsv,1,distancev))
minrow <- pairstocompare[pairstocompare[,3] == min(pairstocompare[,3])]
if (!is.null(nrow(minrow))) {print("More than one point at this distance!"); minrow <- minrow[1,]}
cat("The closest pair is:\n\tPoint 1: ",x[minrow[1]],", ",y[minrow[1]],
"\n\tPoint 2: ",x[minrow[2]],", ",y[minrow[2]],
"\n\tDistance: ",minrow[3],"\n",sep="")
c(distance=minrow[3],x1.x=x[minrow[1]],y1.y=y[minrow[1]],x2.x=x[minrow[2]],y2.y=y[minrow[2]])
}
|
get_catalog_scpenetration <-
function( data_name = "scpenetration" , output_dir , ... ){
catalog <- NULL
for( ma_pd in c( "MA" , "PDP" ) ){
pene_url <- paste0( "https://www.cms.gov/Research-Statistics-Data-and-Systems/Statistics-Trends-and-Reports/MCRAdvPartDEnrolData/" , ma_pd , "-State-County-Penetration.html" )
all_dates <- rvest::html_table( xml2::read_html( pene_url ) )
all_dates <- all_dates[[1]][ , "Report Period" ]
all_links <- rvest::html_nodes( xml2::read_html( pene_url ) , xpath = '//td/a' )
prefix <- "https://www.cms.gov/"
all_links <- gsub( '<a href=\"' , prefix , all_links )
all_links <- gsub( "\">(.*)" , "" , all_links )
this_catalog <-
data.frame(
output_filename = paste0( output_dir , "/" , tolower( ma_pd ) , "_sc penetration.rds" ) ,
full_url = as.character( all_links ) ,
year_month = all_dates ,
stringsAsFactors = FALSE
)
for( this_row in seq( nrow( this_catalog ) ) ){
link_text <- readLines( this_catalog[ this_row , 'full_url' ] )
link_line <- grep( "zip" , link_text , value = TRUE )
link_line <- gsub( '(.*) href=\"' , "" , gsub( '(.*) href=\"/' , prefix , link_line ) )
this_catalog[ this_row , 'full_url' ] <- gsub( '\">(.*)' , "" , link_line )
}
this_catalog$ma_pd <- ma_pd
catalog <- rbind( catalog , this_catalog )
}
catalog[ order( catalog$year_month ) , ]
}
lodown_scpenetration <-
function( data_name = "scpenetration" , catalog , ... ){
on.exit( print( catalog ) )
tf <- tempfile()
unique_savefiles <- unique( catalog$output_filename )
for( this_savefile in unique_savefiles ){
these_entries <- catalog[ catalog$output_filename == this_savefile , ]
this_result <- NULL
for ( i in seq_len( nrow( these_entries ) ) ){
# download the file
cachaca( these_entries[ i , "full_url" ] , tf , mode = 'wb' )
# extract the contents of the zipped file
# into the current year-month-specific directory
# and (at the same time) create an object called
# `unzipped_files` that contains the paths on
# your local computer to each of the unzipped files
unzipped_files <- unzip_warn_fail( tf , exdir = np_dirname( these_entries[ i , 'output_filename' ] ) )
x <- data.frame( readr::read_csv( grep( "State_County" , unzipped_files , value = TRUE ) , guess_max = 100000 ) )
x$year_month <- these_entries[ i , 'year_month' ]
x <- unique( x )
names( x ) <- tolower( names( x ) )
names( x ) <- gsub( "\\." , "_" , names( x ) )
x$eligibles <- as.numeric( gsub( "," , "" , x$eligibles ) )
x$enrolled <- as.numeric( gsub( "," , "" , x$enrolled ) )
x$penetration <- as.numeric( gsub( "\\%" , "" , x$penetration ) )
this_result <- rbind( this_result , x )
# add the number of records to the catalog
catalog[ catalog$output_filename == this_savefile , ][ i , 'case_count' ] <- nrow( x )
# delete the temporary files
file.remove( tf , unzipped_files )
}
saveRDS( this_result , file = this_savefile )
cat( paste0( data_name , " catalog entry " , which( this_savefile == unique_savefiles ) , " of " , length( unique_savefiles ) , " stored at '" , this_savefile , "'\r\n\n" ) )
}
on.exit()
catalog
}
| /R/scpenetration.R | no_license | yluair/lodown | R | false | false | 3,317 | r | get_catalog_scpenetration <-
function( data_name = "scpenetration" , output_dir , ... ){
catalog <- NULL
for( ma_pd in c( "MA" , "PDP" ) ){
pene_url <- paste0( "https://www.cms.gov/Research-Statistics-Data-and-Systems/Statistics-Trends-and-Reports/MCRAdvPartDEnrolData/" , ma_pd , "-State-County-Penetration.html" )
all_dates <- rvest::html_table( xml2::read_html( pene_url ) )
all_dates <- all_dates[[1]][ , "Report Period" ]
all_links <- rvest::html_nodes( xml2::read_html( pene_url ) , xpath = '//td/a' )
prefix <- "https://www.cms.gov/"
all_links <- gsub( '<a href=\"' , prefix , all_links )
all_links <- gsub( "\">(.*)" , "" , all_links )
this_catalog <-
data.frame(
output_filename = paste0( output_dir , "/" , tolower( ma_pd ) , "_sc penetration.rds" ) ,
full_url = as.character( all_links ) ,
year_month = all_dates ,
stringsAsFactors = FALSE
)
for( this_row in seq( nrow( this_catalog ) ) ){
link_text <- readLines( this_catalog[ this_row , 'full_url' ] )
link_line <- grep( "zip" , link_text , value = TRUE )
link_line <- gsub( '(.*) href=\"' , "" , gsub( '(.*) href=\"/' , prefix , link_line ) )
this_catalog[ this_row , 'full_url' ] <- gsub( '\">(.*)' , "" , link_line )
}
this_catalog$ma_pd <- ma_pd
catalog <- rbind( catalog , this_catalog )
}
catalog[ order( catalog$year_month ) , ]
}
lodown_scpenetration <-
function( data_name = "scpenetration" , catalog , ... ){
on.exit( print( catalog ) )
tf <- tempfile()
unique_savefiles <- unique( catalog$output_filename )
for( this_savefile in unique_savefiles ){
these_entries <- catalog[ catalog$output_filename == this_savefile , ]
this_result <- NULL
for ( i in seq_len( nrow( these_entries ) ) ){
# download the file
cachaca( these_entries[ i , "full_url" ] , tf , mode = 'wb' )
# extract the contents of the zipped file
# into the current year-month-specific directory
# and (at the same time) create an object called
# `unzipped_files` that contains the paths on
# your local computer to each of the unzipped files
unzipped_files <- unzip_warn_fail( tf , exdir = np_dirname( these_entries[ i , 'output_filename' ] ) )
x <- data.frame( readr::read_csv( grep( "State_County" , unzipped_files , value = TRUE ) , guess_max = 100000 ) )
x$year_month <- these_entries[ i , 'year_month' ]
x <- unique( x )
names( x ) <- tolower( names( x ) )
names( x ) <- gsub( "\\." , "_" , names( x ) )
x$eligibles <- as.numeric( gsub( "," , "" , x$eligibles ) )
x$enrolled <- as.numeric( gsub( "," , "" , x$enrolled ) )
x$penetration <- as.numeric( gsub( "\\%" , "" , x$penetration ) )
this_result <- rbind( this_result , x )
# add the number of records to the catalog
catalog[ catalog$output_filename == this_savefile , ][ i , 'case_count' ] <- nrow( x )
# delete the temporary files
file.remove( tf , unzipped_files )
}
saveRDS( this_result , file = this_savefile )
cat( paste0( data_name , " catalog entry " , which( this_savefile == unique_savefiles ) , " of " , length( unique_savefiles ) , " stored at '" , this_savefile , "'\r\n\n" ) )
}
on.exit()
catalog
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/primaryKey.R
\name{minimumPK}
\alias{minimumPK}
\title{Find the minimum fields which make a valid Primary Key}
\usage{
minimumPK(
x,
fieldOrder = character(),
excludeFields = character(),
maxFields = ncol(x),
minFieldSet = character()
)
}
\arguments{
\item{x}{a \code{data.frame} equivalent to a table (no duplicated registers, \code{unique(x)}).}
\item{fieldOrder}{a character vector with the sorted preferences in the fields part of the PK.}
\item{excludeFields}{columns which will be excluded from the potential primary key. Can be the index or the column
names.}
\item{maxFields}{maximum number of fields in th primary key.}
\item{minFieldSet}{columns that will be forced to be included in the primary key.}
}
\description{
Find the minimum fields which make a valid Primary Key
}
| /man/minimumPK.Rd | no_license | jmaspons/dbTools | R | false | true | 877 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/primaryKey.R
\name{minimumPK}
\alias{minimumPK}
\title{Find the minimum fields which make a valid Primary Key}
\usage{
minimumPK(
x,
fieldOrder = character(),
excludeFields = character(),
maxFields = ncol(x),
minFieldSet = character()
)
}
\arguments{
\item{x}{a \code{data.frame} equivalent to a table (no duplicated registers, \code{unique(x)}).}
\item{fieldOrder}{a character vector with the sorted preferences in the fields part of the PK.}
\item{excludeFields}{columns which will be excluded from the potential primary key. Can be the index or the column
names.}
\item{maxFields}{maximum number of fields in th primary key.}
\item{minFieldSet}{columns that will be forced to be included in the primary key.}
}
\description{
Find the minimum fields which make a valid Primary Key
}
|
## path to file holding data file
file <- ".\\exdata_data_household_power_consumption\\household_power_consumption.txt"
## read the file
power <- read.table(file, sep=";", header=TRUE, colClasses="character")
## Subset power into partialPower for days Feb 1, 2007 and Feb 2, 2007
partialPower <- power[power$Date == "1/2/2007" | power$Date == "2/2/2007",]
## Create date, time format string
format <- "%d/%m/%Y %X"
## Combine Date and Time columns in partialPower into a POSIXct column
## DateTime
partialPower$DateTime <- with(partialPower, strptime(paste(Date, Time), format))
## Convert Sub_metering columns to numbers
partialPower$Sub_metering_1 <- as.numeric(partialPower$Sub_metering_1)
partialPower$Sub_metering_2 <- as.numeric(partialPower$Sub_metering_2)
partialPower$Sub_metering_3 <- as.numeric(partialPower$Sub_metering_3)
## Find maximums
max1 <- max(partialPower$Sub_metering_1)
max2 <- max(partialPower$Sub_metering_2)
max3 <- max(partialPower$Sub_metering_3)
png("plot3.png")
## Draw empty graph for maximum Sub_metering, so size of plot will hold
## all the data
if(max1 >= max2 && max1 >= max3) {
with(partialPower, plot(DateTime, Sub_metering_1, type="n",
xlab="", ylab="Energy sub metering"))
} else if(max2 >= max3) {
with(partialPower, plot(DateTime, Sub_metering_2,type="n",
xlab="", ylab="Energy sub metering"))
} else {
with(partialPower, plot(DateTime, Sub_metering_3, type="n",
xlab="", ylab="Energy sub metering"))
}
with(partialPower, {
lines(DateTime, Sub_metering_1, col="black")
lines(DateTime, Sub_metering_2, col="red")
lines(DateTime, Sub_metering_3, col="blue")
})
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1,1,1), col=c("black", "red", "blue"))
dev.off() | /plot3.R | no_license | Cooter/ExData_Plotting1 | R | false | false | 1,817 | r | ## path to file holding data file
file <- ".\\exdata_data_household_power_consumption\\household_power_consumption.txt"
## read the file
power <- read.table(file, sep=";", header=TRUE, colClasses="character")
## Subset power into partialPower for days Feb 1, 2007 and Feb 2, 2007
partialPower <- power[power$Date == "1/2/2007" | power$Date == "2/2/2007",]
## Create date, time format string
format <- "%d/%m/%Y %X"
## Combine Date and Time columns in partialPower into a POSIXct column
## DateTime
partialPower$DateTime <- with(partialPower, strptime(paste(Date, Time), format))
## Convert Sub_metering columns to numbers
partialPower$Sub_metering_1 <- as.numeric(partialPower$Sub_metering_1)
partialPower$Sub_metering_2 <- as.numeric(partialPower$Sub_metering_2)
partialPower$Sub_metering_3 <- as.numeric(partialPower$Sub_metering_3)
## Find maximums
max1 <- max(partialPower$Sub_metering_1)
max2 <- max(partialPower$Sub_metering_2)
max3 <- max(partialPower$Sub_metering_3)
png("plot3.png")
## Draw empty graph for maximum Sub_metering, so size of plot will hold
## all the data
if(max1 >= max2 && max1 >= max3) {
with(partialPower, plot(DateTime, Sub_metering_1, type="n",
xlab="", ylab="Energy sub metering"))
} else if(max2 >= max3) {
with(partialPower, plot(DateTime, Sub_metering_2,type="n",
xlab="", ylab="Energy sub metering"))
} else {
with(partialPower, plot(DateTime, Sub_metering_3, type="n",
xlab="", ylab="Energy sub metering"))
}
with(partialPower, {
lines(DateTime, Sub_metering_1, col="black")
lines(DateTime, Sub_metering_2, col="red")
lines(DateTime, Sub_metering_3, col="blue")
})
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=c(1,1,1), col=c("black", "red", "blue"))
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wind_functions2.R
\name{cost.FMGS}
\alias{cost.FMGS}
\alias{flow.dispersion}
\title{Compute flow-based cost or conductance}
\usage{
cost.FMGS(wind.direction, wind.speed, target, type = "active")
flow.dispersion(x, fun = cost.FMGS, output = "transitionLayer", ...)
}
\arguments{
\item{wind.direction}{A vector or scalar containing wind directions.}
\item{wind.speed}{A vector or scalar containing wind speeds.}
\item{target}{direction of the target cell}
\item{type}{Could be either "passive" or "active".In "passive" mode,
movement against flow direction is not allowed (deviations from the wind
direction higher than 90). In "active" mode, the movement can go against flow
direction, by increasing the cost.}
\item{x}{RasterStack object with layers obtained from wind2raster
function ("rWind" package) with direction and speed flow values.}
\item{fun}{A function to compute the cost to move between cells. The default
is \code{cost.FMGS} from Felicísimo et al. (2008), see details.}
\item{output}{This argument allows to select different kinds of output. "raw"
mode creates a matrix (class "dgCMatrix") with transition costs between all
cells in the raster. "transitionLayer" creates a TransitionLayer object with
conductance values to be used with "gdistance" package.}
\item{...}{Further arguments passed to or from other methods.}
}
\value{
In "transitionLayer" output, the function returns conductance values
(1/cost)to move between all cells in a raster having into account flow speed
and direction obtained from wind.fit function("rWind" package). As wind or
sea currents implies directionality, flow.dispersion produces an anisotropic
conductance matrix (asymmetric). Conductance values are used later to built a
TransitionLayer object from "gdistance" package.
In "raw" output, flow.dispersion creates a sparse Matrix with cost values.
}
\description{
\code{flow.dispersion} computes movement conductance through a flow either, sea
or wind currents. It implements the formula described in Felícisimo et al.
2008:
}
\details{
Cost=(1/Speed)*(HorizontalFactor)
being HorizontalFactor a "function that incrementally penalized angular
deviations from the wind direction" (Felicísimo et al. 2008).
}
\note{
Note that for large data sets, it could take a while. For large study
areas is strongly advised perform the analysis in a remote computer or a
cluster.
}
\examples{
require(gdistance)
data(wind.data)
wind <- wind2raster(wind.data)
Conductance <- flow.dispersion(wind, type = "passive")
transitionMatrix(Conductance)
image(transitionMatrix(Conductance))
}
\references{
Felicísimo, Á. M., Muñoz, J., & González-Solis, J. (2008). Ocean surface
winds drive dynamics of transoceanic aerial movements. PLoS One, 3(8),
e2928.
Jacob van Etten (2017). R Package gdistance: Distances and Routes on
Geographical Grids. Journal of Statistical Software, 76(13), 1-21.
doi:10.18637/jss.v076.i13
}
\seealso{
\code{\link{wind.dl}}, \code{\link{wind2raster}}
}
\author{
Javier Fernández-López; Klaus Schliep; Yurena Arjona
}
\keyword{~anisotropy}
\keyword{~conductance}
| /man/flow.dispersion.Rd | no_license | cran/rWind | R | false | true | 3,167 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wind_functions2.R
\name{cost.FMGS}
\alias{cost.FMGS}
\alias{flow.dispersion}
\title{Compute flow-based cost or conductance}
\usage{
cost.FMGS(wind.direction, wind.speed, target, type = "active")
flow.dispersion(x, fun = cost.FMGS, output = "transitionLayer", ...)
}
\arguments{
\item{wind.direction}{A vector or scalar containing wind directions.}
\item{wind.speed}{A vector or scalar containing wind speeds.}
\item{target}{direction of the target cell}
\item{type}{Could be either "passive" or "active".In "passive" mode,
movement against flow direction is not allowed (deviations from the wind
direction higher than 90). In "active" mode, the movement can go against flow
direction, by increasing the cost.}
\item{x}{RasterStack object with layers obtained from wind2raster
function ("rWind" package) with direction and speed flow values.}
\item{fun}{A function to compute the cost to move between cells. The default
is \code{cost.FMGS} from Felicísimo et al. (2008), see details.}
\item{output}{This argument allows to select different kinds of output. "raw"
mode creates a matrix (class "dgCMatrix") with transition costs between all
cells in the raster. "transitionLayer" creates a TransitionLayer object with
conductance values to be used with "gdistance" package.}
\item{...}{Further arguments passed to or from other methods.}
}
\value{
In "transitionLayer" output, the function returns conductance values
(1/cost)to move between all cells in a raster having into account flow speed
and direction obtained from wind.fit function("rWind" package). As wind or
sea currents implies directionality, flow.dispersion produces an anisotropic
conductance matrix (asymmetric). Conductance values are used later to built a
TransitionLayer object from "gdistance" package.
In "raw" output, flow.dispersion creates a sparse Matrix with cost values.
}
\description{
\code{flow.dispersion} computes movement conductance through a flow either, sea
or wind currents. It implements the formula described in Felícisimo et al.
2008:
}
\details{
Cost=(1/Speed)*(HorizontalFactor)
being HorizontalFactor a "function that incrementally penalized angular
deviations from the wind direction" (Felicísimo et al. 2008).
}
\note{
Note that for large data sets, it could take a while. For large study
areas is strongly advised perform the analysis in a remote computer or a
cluster.
}
\examples{
require(gdistance)
data(wind.data)
wind <- wind2raster(wind.data)
Conductance <- flow.dispersion(wind, type = "passive")
transitionMatrix(Conductance)
image(transitionMatrix(Conductance))
}
\references{
Felicísimo, Á. M., Muñoz, J., & González-Solis, J. (2008). Ocean surface
winds drive dynamics of transoceanic aerial movements. PLoS One, 3(8),
e2928.
Jacob van Etten (2017). R Package gdistance: Distances and Routes on
Geographical Grids. Journal of Statistical Software, 76(13), 1-21.
doi:10.18637/jss.v076.i13
}
\seealso{
\code{\link{wind.dl}}, \code{\link{wind2raster}}
}
\author{
Javier Fernández-López; Klaus Schliep; Yurena Arjona
}
\keyword{~anisotropy}
\keyword{~conductance}
|
install.packages("devtools")
library(devtools)
devtools::install_github(repo="maksimhorowitz/nflscrapR")
library(nflscrapR)
season_2015 <- season_play_by_play(2015)
summary(season_2015)
head(season_2015)
season_2015$PlayType
#just have run or pass
nspec_season_2015 <- season_2015 %>% filter(PlayType == "Run" | PlayType == "Pass")
#chart teams offensive tendencies
pass_season_2015 <- nspec_season_2015 %>% filter(PlayType == "Pass") %>% count(posteam, PassLength, PassLocation, PassOutcome, Receiver)
write.csv(pass_season_2015, "pass distribution 2015.csv")
run_season_2015 <- nspec_season_2015 %>% filter(PlayType == "Run") %>% count(posteam, RunLocation, RunGap)
write.csv(run_season_2015, "run distribution 2015.csv")
#what teams tend to face defensively
def_pass_season_2015 <- nspec_season_2015 %>% filter(PlayType == "Pass") %>% count(DefensiveTeam, PassLength, PassLocation, PassOutcome, Receiver)
write.csv(def_pass_season_2015, "def pass distribution 2015.csv")
def_run_season_2015 <- nspec_season_2015 %>% filter(PlayType == "Run") %>% count(DefensiveTeam, RunLocation, RunGap)
write.csv(def_run_season_2015, "def run distribution 2015.csv")
#season 2014
season_2014 <- season_play_by_play(2014)
#just have run or pass
nspec_season_2014 <- season_2014 %>% filter(PlayType == "Run" | PlayType == "Pass")
pass_season_2014 <- nspec_season_2014 %>% filter(PlayType == "Pass") %>% count(posteam, PassLength, PassLocation, PassOutcome, Receiver)
write.csv(pass_season_2014, "pass distribution 2014.csv")
run_season_2014 <- nspec_season_2014 %>% filter(PlayType == "Pass") %>% count(posteam, RunLocation, RunGap)
write.csv(run_season_2014, "run distribution 2014.csv")
def_pass_season_2014 <- nspec_season_2014 %>% filter(PlayType == "Pass") %>% count(DefensiveTeam, PassLength, PassLocation, PassOutcome, Receiver)
write.csv(def_pass_season_2014, "def pass distribution 2014.csv")
def_run_reason_2014 <- nspec_season_2014 %>% filter(PlayType == "Run") %>% count(DefensiveTeam, RunLocation, RunGap)
write.csv(def_run_season_2014, "def run distribution 2014.csv")
season_2013 <- season_play_by_play(2013)
season_2012 <- season_play_by_play(2012)
season_2011 <- season_play_by_play(2011)
season_2010 <- season_play_by_play(2010)
season_2009 <- season_play_by_play(2009)
| /nfl scraper.R | no_license | dbrait/NFL | R | false | false | 2,296 | r | install.packages("devtools")
library(devtools)
devtools::install_github(repo="maksimhorowitz/nflscrapR")
library(nflscrapR)
season_2015 <- season_play_by_play(2015)
summary(season_2015)
head(season_2015)
season_2015$PlayType
#just have run or pass
nspec_season_2015 <- season_2015 %>% filter(PlayType == "Run" | PlayType == "Pass")
#chart teams offensive tendencies
pass_season_2015 <- nspec_season_2015 %>% filter(PlayType == "Pass") %>% count(posteam, PassLength, PassLocation, PassOutcome, Receiver)
write.csv(pass_season_2015, "pass distribution 2015.csv")
run_season_2015 <- nspec_season_2015 %>% filter(PlayType == "Run") %>% count(posteam, RunLocation, RunGap)
write.csv(run_season_2015, "run distribution 2015.csv")
#what teams tend to face defensively
def_pass_season_2015 <- nspec_season_2015 %>% filter(PlayType == "Pass") %>% count(DefensiveTeam, PassLength, PassLocation, PassOutcome, Receiver)
write.csv(def_pass_season_2015, "def pass distribution 2015.csv")
def_run_season_2015 <- nspec_season_2015 %>% filter(PlayType == "Run") %>% count(DefensiveTeam, RunLocation, RunGap)
write.csv(def_run_season_2015, "def run distribution 2015.csv")
#season 2014
season_2014 <- season_play_by_play(2014)
#just have run or pass
nspec_season_2014 <- season_2014 %>% filter(PlayType == "Run" | PlayType == "Pass")
pass_season_2014 <- nspec_season_2014 %>% filter(PlayType == "Pass") %>% count(posteam, PassLength, PassLocation, PassOutcome, Receiver)
write.csv(pass_season_2014, "pass distribution 2014.csv")
run_season_2014 <- nspec_season_2014 %>% filter(PlayType == "Pass") %>% count(posteam, RunLocation, RunGap)
write.csv(run_season_2014, "run distribution 2014.csv")
def_pass_season_2014 <- nspec_season_2014 %>% filter(PlayType == "Pass") %>% count(DefensiveTeam, PassLength, PassLocation, PassOutcome, Receiver)
write.csv(def_pass_season_2014, "def pass distribution 2014.csv")
def_run_reason_2014 <- nspec_season_2014 %>% filter(PlayType == "Run") %>% count(DefensiveTeam, RunLocation, RunGap)
write.csv(def_run_season_2014, "def run distribution 2014.csv")
season_2013 <- season_play_by_play(2013)
season_2012 <- season_play_by_play(2012)
season_2011 <- season_play_by_play(2011)
season_2010 <- season_play_by_play(2010)
season_2009 <- season_play_by_play(2009)
|
#==========================================================================================#
#==========================================================================================#
# This sub-routine makes the first letter of every entry capitalised, whilst leaving #
# the other letters lower case. #
# Original subroutine comes from the help on toupper. Here I modified it just to deal #
# with NAs. #
#------------------------------------------------------------------------------------------#
capwords <<- function(s, strict = FALSE) {
#----- Function to be applied for each element of s. ----------------------------------#
cap = function(x,strict=FALSE){
#----- First letter is always upper case. -----------------------------------------#
first = toupper(substring(x,1,1))
#----- Check whether to force the remainder to be lower case or not. --------------#
if (strict){
remainder = tolower(substring(x,2))
}else{
remainder = substring(x,2)
}#end if
#----------------------------------------------------------------------------------#
ans = paste(first,remainder,sep="",collapse=" ")
return(ans)
}#end function
#--------------------------------------------------------------------------------------#
#----- Remember which elements were NA, then we reset them to NA. ---------------------#
sel = is.na(s)
#--------------------------------------------------------------------------------------#
#----- Fix case for all dataset. ------------------------------------------------------#
ans = sapply( X=strsplit(s, split = " "),FUN=cap,strict=strict
, USE.NAMES = !is.null(names(s)))
#--------------------------------------------------------------------------------------#
#---- Force NAs to remain NAs. --------------------------------------------------------#
ans[sel] = NA
return(ans)
#--------------------------------------------------------------------------------------#
}#end if
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
# This function deletes spaces from strings, trimming. Default is to trim both #
# sides, but you can also trim the left or the right only. #
#------------------------------------------------------------------------------------------#
trim <<- function(x,side="both"){
if (side %in% c("both","left") ) x = sub(pattern="^\\s+",replacement="",x=x)
if (side %in% c("both","right")) x = sub(pattern="\\s+$",replacement="",x=x)
return(x)
}#end function trim
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
# This function concatenates two strings, but skips the NAs. #
#------------------------------------------------------------------------------------------#
concatenate.message <<- function(x1,x2,sep="; "){
if (length(x1) != length(x2)) stop(" Message vectors must have the same length!")
only.x1 = ( ! is.na(x1) ) & is.na(x2)
only.x2 = is.na(x1) & ( ! is.na(x2) )
both = ( ! is.na(x1) ) & ( ! is.na(x2) )
full = rep(NA_character_,times=length(x1))
full[only.x1] = x1[only.x1]
full[only.x2] = x2[only.x2]
full[both ] = paste(x1[both],x2[both],sep=sep)
return(full)
}#end function
#==========================================================================================#
#==========================================================================================#
| /R-utils/charutils.r | no_license | yjkim1028/ED2.mixed | R | false | false | 4,348 | r | #==========================================================================================#
#==========================================================================================#
# This sub-routine makes the first letter of every entry capitalised, whilst leaving #
# the other letters lower case. #
# Original subroutine comes from the help on toupper. Here I modified it just to deal #
# with NAs. #
#------------------------------------------------------------------------------------------#
capwords <<- function(s, strict = FALSE) {
#----- Function to be applied for each element of s. ----------------------------------#
cap = function(x,strict=FALSE){
#----- First letter is always upper case. -----------------------------------------#
first = toupper(substring(x,1,1))
#----- Check whether to force the remainder to be lower case or not. --------------#
if (strict){
remainder = tolower(substring(x,2))
}else{
remainder = substring(x,2)
}#end if
#----------------------------------------------------------------------------------#
ans = paste(first,remainder,sep="",collapse=" ")
return(ans)
}#end function
#--------------------------------------------------------------------------------------#
#----- Remember which elements were NA, then we reset them to NA. ---------------------#
sel = is.na(s)
#--------------------------------------------------------------------------------------#
#----- Fix case for all dataset. ------------------------------------------------------#
ans = sapply( X=strsplit(s, split = " "),FUN=cap,strict=strict
, USE.NAMES = !is.null(names(s)))
#--------------------------------------------------------------------------------------#
#---- Force NAs to remain NAs. --------------------------------------------------------#
ans[sel] = NA
return(ans)
#--------------------------------------------------------------------------------------#
}#end if
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
# This function deletes spaces from strings, trimming. Default is to trim both #
# sides, but you can also trim the left or the right only. #
#------------------------------------------------------------------------------------------#
trim <<- function(x,side="both"){
if (side %in% c("both","left") ) x = sub(pattern="^\\s+",replacement="",x=x)
if (side %in% c("both","right")) x = sub(pattern="\\s+$",replacement="",x=x)
return(x)
}#end function trim
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
#==========================================================================================#
# This function concatenates two strings, but skips the NAs. #
#------------------------------------------------------------------------------------------#
concatenate.message <<- function(x1,x2,sep="; "){
if (length(x1) != length(x2)) stop(" Message vectors must have the same length!")
only.x1 = ( ! is.na(x1) ) & is.na(x2)
only.x2 = is.na(x1) & ( ! is.na(x2) )
both = ( ! is.na(x1) ) & ( ! is.na(x2) )
full = rep(NA_character_,times=length(x1))
full[only.x1] = x1[only.x1]
full[only.x2] = x2[only.x2]
full[both ] = paste(x1[both],x2[both],sep=sep)
return(full)
}#end function
#==========================================================================================#
#==========================================================================================#
|
library(class)
x = c(151, 174, 138, 186, 128, 136, 179, 163, 152, 131)
y = c(63, 81, 56, 91, 47, 57, 76, 72, 62, 48)
xandy = cbind(x, y)
xy_train = xandy[1:8, ]
xy_test = xandy[9:10, ]
ycl = y[1:8]
pred = knn(train = xy_train, test = xy_test, cl = ycl, k =5)
pred
y[9:10] | /DatSciInClassStuff/Mar29.R | no_license | wesleymerrick/Data-Sci-Class | R | false | false | 273 | r | library(class)
x = c(151, 174, 138, 186, 128, 136, 179, 163, 152, 131)
y = c(63, 81, 56, 91, 47, 57, 76, 72, 62, 48)
xandy = cbind(x, y)
xy_train = xandy[1:8, ]
xy_test = xandy[9:10, ]
ycl = y[1:8]
pred = knn(train = xy_train, test = xy_test, cl = ycl, k =5)
pred
y[9:10] |
/formacao-cientista/3. regressao/regressao_multipla.R | no_license | robsongcruz/machinelearning | R | false | false | 516 | r | ||
library(dplyr)
library(shiny)
library(DT)
topTen <- read.csv("topProjected.csv", check.names = FALSE)
loss <- read.csv("historicAndProjectedLoss.csv", check.names = FALSE)
shinyServer(function(input, output) {
output$distPlot <- DT::renderDataTable(
topTen,
options = list(dom = "t"),
rownames = FALSE)
output$loss <- DT::renderDataTable(
loss,
options = list(dom = "t"),
rownames = FALSE)
})
| /resCareDataRequest/server.R | no_license | bekahdevore/rKW | R | false | false | 508 | r | library(dplyr)
library(shiny)
library(DT)
topTen <- read.csv("topProjected.csv", check.names = FALSE)
loss <- read.csv("historicAndProjectedLoss.csv", check.names = FALSE)
shinyServer(function(input, output) {
output$distPlot <- DT::renderDataTable(
topTen,
options = list(dom = "t"),
rownames = FALSE)
output$loss <- DT::renderDataTable(
loss,
options = list(dom = "t"),
rownames = FALSE)
})
|
library(shiny)
library(shinythemes)
library(shinyFiles)
shinythemes::themeSelector()
navbarPage(
theme = shinytheme("cerulean"),
"HCMMCNVs",
# First bar: Title Search
tabPanel("Data pre-processing",
sidebarPanel(
tags$div(tags$label(h4("1. Choose bam files directory"))),
shinyDirButton("bam_dir", "Choose bam files", "Select directory of bam files"),
tags$div(class="form-group shiny-input-container",
tags$div(tags$label(h4("2. Bed file input"))),
tags$div(tags$label("Choose folder", class="btn btn-primary",
tags$input(id = "fileIn", webkitfile = TRUE, type = "file", style="display: none;", onchange="pressed()"))),
#tags$label("No folder choosen", id = "noFile"),
tags$div(id="fileIn_progress", class="progress progress-striped active shiny-file-input-progress",
tags$div(class="progress-bar")
)
),
selectInput("chr_selected", h4("3. Chromosome"),
choices = c(1:22), selectize = FALSE, selected = 19),
numericInput("min_cov", label = h4("4. Minimum mean coverage"), value = 10),
textInput("cov_filename", label = h4("5. Output file name"), value = "Test"),
actionButton("action_bar1", "Run")
#numericInput("number_clusters", label = h4("5. Number of clusters"), value = 3)
#radioButtons("gender_bar1", label = h3("Gender"),
#choices = list("Men" = 1, "Women" = 2),
#selected = 1),
#sliderInput("yr_range_bar1", h3("Year Range:"),
#min = 1968, max = 2015, value = c(2009,2010)),
#textInput("player_name_bar1", h3("Player's Name"), "Rafael Nadal"),
#actionButton("action_bar1", "Update")
#submitButton("Update")
),
mainPanel(theme = "bootstrap.css",
includeScript("./www/text.js"),
tags$div(tags$label(h5("1. Bam files directory"))),
verbatimTextOutput("text_bam_dir"),
tags$div(tags$label(h5("2. Data Summary"))),
textOutput("text_bam_numbers"),
textOutput("text_bed_regions"),
textOutput("text_cov_summary"),
textOutput("text_cov_rdata"),
tags$div(tags$label(h5("3. Selected bed file"))),
tabPanel("Files table", dataTableOutput("tbl"))
#textOutput("Message_bar1"),
#tableOutput("Stat_Table_bar1")
#h4("output$dir"),
#verbatimTextOutput("dir"), br()
#tableOutput("contents")
)
),
# Second bar: Hierarchical Clustering Mixture Model Copy Number Variants
tabPanel("Run HCMMCNVs",
sidebarPanel(
#radioButtons("Cov_Mtx_bar2", label = h4("Coverage Matrix"),
# choices = list("Processed data" = 1, "Saved data" = 2),
# selected = 1),
tags$div(tags$label(h4("1. Load the coverage .RData"))),
tags$div(class="form-group shiny-input-container",
tags$div(tags$label("Choose Coverage RData", class="btn btn-primary",
tags$input(id = "CovfileIn", webkitfile = TRUE, type = "file", style="display: none;", onchange="pressed()"))),
#tags$label("No folder choosen", id = "CovnoFile"),
tags$div(id="CovfileIn_progress", class="progress progress-striped active shiny-file-input-progress",
tags$div(class="progress-bar")
)
),
numericInput("HC_n_clusters", label = h4("2. Hierarchical Clustering: number of clusters"), value = 3),
tags$div(tags$label(h4("3. Cancer cell line only (optional)"))),
radioButtons("radio_Ploidy", label = "Add ploidy input?",
choices = list("No" = 1, "Yes" = 2),
selected = 1),
fileInput("input_Ploidy_estimation", label = "Choose a file: "),
textInput("CBS_filename", label = h4("4. Output file name"), value = "Test"),
#selectInput('yr_bar2', h3('Year'),
# choices = c(1968:2015), selectize = FALSE),
#textInput("player_name_bar2", h3("Player's Name"), "Rafael Nadal"),
actionButton("action_bar2", "Run")
),
mainPanel(
textOutput("test")
#textOutput("Message_bar2"),
#tableOutput("Stat_Table_bar2")
)
),
# Third bar: Visualization
tabPanel("Visulization",
sidebarPanel(
tags$div(tags$label(h4("1. Load the CBS result"))),
tags$div(class="form-group shiny-input-container",
tags$div(tags$label("Choose CBS results", class="btn btn-primary",
tags$input(id = "CBSfileIn", webkitfile = TRUE, type = "file", style="display: none;", onchange="pressed()"))),
#tags$label("No folder choosen", id = "CBSnoFile"),
tags$div(id="CBSfileIn_progress", class="progress progress-striped active shiny-file-input-progress",
tags$div(class="progress-bar")
)
),
#selectInput("tourney_bar3", h3("Select Tourney"),
# choices = NULL),
selectizeInput("sample_bar3", h4("Select sample"),
choices = NULL, multiple = F),
actionButton("action_bar3", "Plot")
),
mainPanel(
plotOutput("plot1"),
downloadButton(outputId = "download_fig", label = "Download the plot")
)
)
)
| /ui.R | no_license | lunching/HCMM_CNVs | R | false | false | 6,002 | r | library(shiny)
library(shinythemes)
library(shinyFiles)
shinythemes::themeSelector()
navbarPage(
theme = shinytheme("cerulean"),
"HCMMCNVs",
# First bar: Title Search
tabPanel("Data pre-processing",
sidebarPanel(
tags$div(tags$label(h4("1. Choose bam files directory"))),
shinyDirButton("bam_dir", "Choose bam files", "Select directory of bam files"),
tags$div(class="form-group shiny-input-container",
tags$div(tags$label(h4("2. Bed file input"))),
tags$div(tags$label("Choose folder", class="btn btn-primary",
tags$input(id = "fileIn", webkitfile = TRUE, type = "file", style="display: none;", onchange="pressed()"))),
#tags$label("No folder choosen", id = "noFile"),
tags$div(id="fileIn_progress", class="progress progress-striped active shiny-file-input-progress",
tags$div(class="progress-bar")
)
),
selectInput("chr_selected", h4("3. Chromosome"),
choices = c(1:22), selectize = FALSE, selected = 19),
numericInput("min_cov", label = h4("4. Minimum mean coverage"), value = 10),
textInput("cov_filename", label = h4("5. Output file name"), value = "Test"),
actionButton("action_bar1", "Run")
#numericInput("number_clusters", label = h4("5. Number of clusters"), value = 3)
#radioButtons("gender_bar1", label = h3("Gender"),
#choices = list("Men" = 1, "Women" = 2),
#selected = 1),
#sliderInput("yr_range_bar1", h3("Year Range:"),
#min = 1968, max = 2015, value = c(2009,2010)),
#textInput("player_name_bar1", h3("Player's Name"), "Rafael Nadal"),
#actionButton("action_bar1", "Update")
#submitButton("Update")
),
mainPanel(theme = "bootstrap.css",
includeScript("./www/text.js"),
tags$div(tags$label(h5("1. Bam files directory"))),
verbatimTextOutput("text_bam_dir"),
tags$div(tags$label(h5("2. Data Summary"))),
textOutput("text_bam_numbers"),
textOutput("text_bed_regions"),
textOutput("text_cov_summary"),
textOutput("text_cov_rdata"),
tags$div(tags$label(h5("3. Selected bed file"))),
tabPanel("Files table", dataTableOutput("tbl"))
#textOutput("Message_bar1"),
#tableOutput("Stat_Table_bar1")
#h4("output$dir"),
#verbatimTextOutput("dir"), br()
#tableOutput("contents")
)
),
# Second bar: Hierarchical Clustering Mixture Model Copy Number Variants
tabPanel("Run HCMMCNVs",
sidebarPanel(
#radioButtons("Cov_Mtx_bar2", label = h4("Coverage Matrix"),
# choices = list("Processed data" = 1, "Saved data" = 2),
# selected = 1),
tags$div(tags$label(h4("1. Load the coverage .RData"))),
tags$div(class="form-group shiny-input-container",
tags$div(tags$label("Choose Coverage RData", class="btn btn-primary",
tags$input(id = "CovfileIn", webkitfile = TRUE, type = "file", style="display: none;", onchange="pressed()"))),
#tags$label("No folder choosen", id = "CovnoFile"),
tags$div(id="CovfileIn_progress", class="progress progress-striped active shiny-file-input-progress",
tags$div(class="progress-bar")
)
),
numericInput("HC_n_clusters", label = h4("2. Hierarchical Clustering: number of clusters"), value = 3),
tags$div(tags$label(h4("3. Cancer cell line only (optional)"))),
radioButtons("radio_Ploidy", label = "Add ploidy input?",
choices = list("No" = 1, "Yes" = 2),
selected = 1),
fileInput("input_Ploidy_estimation", label = "Choose a file: "),
textInput("CBS_filename", label = h4("4. Output file name"), value = "Test"),
#selectInput('yr_bar2', h3('Year'),
# choices = c(1968:2015), selectize = FALSE),
#textInput("player_name_bar2", h3("Player's Name"), "Rafael Nadal"),
actionButton("action_bar2", "Run")
),
mainPanel(
textOutput("test")
#textOutput("Message_bar2"),
#tableOutput("Stat_Table_bar2")
)
),
# Third bar: Visualization
tabPanel("Visulization",
sidebarPanel(
tags$div(tags$label(h4("1. Load the CBS result"))),
tags$div(class="form-group shiny-input-container",
tags$div(tags$label("Choose CBS results", class="btn btn-primary",
tags$input(id = "CBSfileIn", webkitfile = TRUE, type = "file", style="display: none;", onchange="pressed()"))),
#tags$label("No folder choosen", id = "CBSnoFile"),
tags$div(id="CBSfileIn_progress", class="progress progress-striped active shiny-file-input-progress",
tags$div(class="progress-bar")
)
),
#selectInput("tourney_bar3", h3("Select Tourney"),
# choices = NULL),
selectizeInput("sample_bar3", h4("Select sample"),
choices = NULL, multiple = F),
actionButton("action_bar3", "Plot")
),
mainPanel(
plotOutput("plot1"),
downloadButton(outputId = "download_fig", label = "Download the plot")
)
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fetch-donations.R
\name{fetch_donations}
\alias{fetch_donations}
\title{Fetch donations from GDQ across multiple pages}
\usage{
fetch_donations(event = c("cgdq", "agdq2011", "jrdq", "agdq2016", "sgdq2016"),
min_page = 1, max_page = 0)
}
\arguments{
\item{event}{name of the event}
\item{min_page}{first page to include}
\item{max_page}{last page to include}
}
\value{
data frame containing raw donations data
}
\description{
Fetch donations from GDQ across multiple pages
}
\examples{
\donttest{fetch_donations("sgdq2016")}
\donttest{fetch_donations("sgdq2016", 2, 30)}
}
| /man/fetch_donations.Rd | no_license | bkkkk/gdqr | R | false | true | 656 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fetch-donations.R
\name{fetch_donations}
\alias{fetch_donations}
\title{Fetch donations from GDQ across multiple pages}
\usage{
fetch_donations(event = c("cgdq", "agdq2011", "jrdq", "agdq2016", "sgdq2016"),
min_page = 1, max_page = 0)
}
\arguments{
\item{event}{name of the event}
\item{min_page}{first page to include}
\item{max_page}{last page to include}
}
\value{
data frame containing raw donations data
}
\description{
Fetch donations from GDQ across multiple pages
}
\examples{
\donttest{fetch_donations("sgdq2016")}
\donttest{fetch_donations("sgdq2016", 2, 30)}
}
|
#!/usr/bin/env Rscript
## This function will do the data processing for all of the plotting scripts.
## It will output a .Rda file for fast loading that the individual scripts
## can use. The plotting scripts will check the current working directory
## for the presence of the .Rda file. If found it will be loaded, if not found
## this script will be sourced in and processData will be run.
processData <- function(dates=c('1/2/2007', '2/2/2007')) {
elec <- read.table('household_power_consumption.txt', header=TRUE,
sep=';', na.strings='?', stringsAsFactors=FALSE,
nrows=2075260, colClasses=c('character',
'character', rep('numeric', 7)))
dateIndex <- elec$Date %in% dates
elecData <- elec[dateIndex,]
dateTime <- paste(elecData[,1], elecData[,2])
elecData$dateTime <- as.POSIXct(dateTime, format="%d/%m/%Y %H:%M:%S")
save(elecData, file='elecData.Rda')
}
## Allow the script to be called from the terminal.
if (! interactive()) processData()
## Potential improvements:
## - Currently the two dates are hardcoded in. From the interpretor we
## could pass different values in, as long as they are a character
## vector. It would be more user friendly to accept a date range,
## (start, stop). In addition the hardcoded file output would need to
## changed into a parameter to be specified.
## - Currently the script accepts no command line args, it would be
## possible, and potentially beneficial, to accept them.
| /code/processData.R | no_license | mailb88/ExData_Plotting1 | R | false | false | 1,530 | r | #!/usr/bin/env Rscript
## This function will do the data processing for all of the plotting scripts.
## It will output a .Rda file for fast loading that the individual scripts
## can use. The plotting scripts will check the current working directory
## for the presence of the .Rda file. If found it will be loaded, if not found
## this script will be sourced in and processData will be run.
processData <- function(dates=c('1/2/2007', '2/2/2007')) {
elec <- read.table('household_power_consumption.txt', header=TRUE,
sep=';', na.strings='?', stringsAsFactors=FALSE,
nrows=2075260, colClasses=c('character',
'character', rep('numeric', 7)))
dateIndex <- elec$Date %in% dates
elecData <- elec[dateIndex,]
dateTime <- paste(elecData[,1], elecData[,2])
elecData$dateTime <- as.POSIXct(dateTime, format="%d/%m/%Y %H:%M:%S")
save(elecData, file='elecData.Rda')
}
## Allow the script to be called from the terminal.
if (! interactive()) processData()
## Potential improvements:
## - Currently the two dates are hardcoded in. From the interpretor we
## could pass different values in, as long as they are a character
## vector. It would be more user friendly to accept a date range,
## (start, stop). In addition the hardcoded file output would need to
## changed into a parameter to be specified.
## - Currently the script accepts no command line args, it would be
## possible, and potentially beneficial, to accept them.
|
library(tidyverse)
library(rvest)
library(clipr)
library(lubridate)
library(RSelenium)
library(xml2)
library(jsonlite)
library(googledrive)
library(googlesheets4)
library(wdman)
#install.packages('DescTools')
library(DescTools)
# library(germanpolls)
| /src/config.R | no_license | sueddeutsche/sz-poll-collect | R | false | false | 256 | r | library(tidyverse)
library(rvest)
library(clipr)
library(lubridate)
library(RSelenium)
library(xml2)
library(jsonlite)
library(googledrive)
library(googlesheets4)
library(wdman)
#install.packages('DescTools')
library(DescTools)
# library(germanpolls)
|
library(shiny)
library(googleAuthR)
options(googleAuthR.scopes.selected = "https://www.googleapis.com/auth/urlshortener")
options(googleAuthR.webapp.client_id = "201908948134-cjjs89cffh3k429vi7943ftpk3jg36ed.apps.googleusercontent.com")
options(googleAuthR.webapp.client_secret = "mE7rHl0-iNtzyI1MQia-mg1o")
options(shiny.port = 3838)
shorten_url <- function(url){
body = list(
longUrl = url
)
f <- gar_api_generator("https://www.googleapis.com/urlshortener/v1/url",
"POST",
data_parse_function = function(x) x$id)
f(the_body = body)
}
## server.R
server <- function(input, output, session){
## Create access token and render login button
access_token <- callModule(googleAuth, "loginButton", approval_prompt = "force")
short_url_output <- eventReactive(input$submit, {
## wrap existing function with_shiny
## pass the reactive token in shiny_access_token
## pass other named arguments
with_shiny(f = shorten_url,
shiny_access_token = access_token(),
url=input$url)
})
output$short_url <- renderText({
short_url_output()
})
}
| /Shiny/server.R | no_license | Vaibhav-PublicisSapient/Shiny | R | false | false | 1,188 | r | library(shiny)
library(googleAuthR)
options(googleAuthR.scopes.selected = "https://www.googleapis.com/auth/urlshortener")
options(googleAuthR.webapp.client_id = "201908948134-cjjs89cffh3k429vi7943ftpk3jg36ed.apps.googleusercontent.com")
options(googleAuthR.webapp.client_secret = "mE7rHl0-iNtzyI1MQia-mg1o")
options(shiny.port = 3838)
shorten_url <- function(url){
body = list(
longUrl = url
)
f <- gar_api_generator("https://www.googleapis.com/urlshortener/v1/url",
"POST",
data_parse_function = function(x) x$id)
f(the_body = body)
}
## server.R
server <- function(input, output, session){
## Create access token and render login button
access_token <- callModule(googleAuth, "loginButton", approval_prompt = "force")
short_url_output <- eventReactive(input$submit, {
## wrap existing function with_shiny
## pass the reactive token in shiny_access_token
## pass other named arguments
with_shiny(f = shorten_url,
shiny_access_token = access_token(),
url=input$url)
})
output$short_url <- renderText({
short_url_output()
})
}
|
library(shiny)
### Name: getQueryString
### Title: Get the query string / hash component from the URL
### Aliases: getQueryString getUrlHash
### ** Examples
## Only run this example in interactive R sessions
if (interactive()) {
## App 1: getQueryString
## Printing the value of the query string
## (Use the back and forward buttons to see how the browser
## keeps a record of each state)
shinyApp(
ui = fluidPage(
textInput("txt", "Enter new query string"),
helpText("Format: ?param1=val1¶m2=val2"),
actionButton("go", "Update"),
hr(),
verbatimTextOutput("query")
),
server = function(input, output, session) {
observeEvent(input$go, {
updateQueryString(input$txt, mode = "push")
})
output$query <- renderText({
query <- getQueryString()
queryText <- paste(names(query), query,
sep = "=", collapse=", ")
paste("Your query string is:\n", queryText)
})
}
)
## App 2: getUrlHash
## Printing the value of the URL hash
## (Use the back and forward buttons to see how the browser
## keeps a record of each state)
shinyApp(
ui = fluidPage(
textInput("txt", "Enter new hash"),
helpText("Format: #hash"),
actionButton("go", "Update"),
hr(),
verbatimTextOutput("hash")
),
server = function(input, output, session) {
observeEvent(input$go, {
updateQueryString(input$txt, mode = "push")
})
output$hash <- renderText({
hash <- getUrlHash()
paste("Your hash is:\n", hash)
})
}
)
}
| /data/genthat_extracted_code/shiny/examples/getQueryString.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,624 | r | library(shiny)
### Name: getQueryString
### Title: Get the query string / hash component from the URL
### Aliases: getQueryString getUrlHash
### ** Examples
## Only run this example in interactive R sessions
if (interactive()) {
## App 1: getQueryString
## Printing the value of the query string
## (Use the back and forward buttons to see how the browser
## keeps a record of each state)
shinyApp(
ui = fluidPage(
textInput("txt", "Enter new query string"),
helpText("Format: ?param1=val1¶m2=val2"),
actionButton("go", "Update"),
hr(),
verbatimTextOutput("query")
),
server = function(input, output, session) {
observeEvent(input$go, {
updateQueryString(input$txt, mode = "push")
})
output$query <- renderText({
query <- getQueryString()
queryText <- paste(names(query), query,
sep = "=", collapse=", ")
paste("Your query string is:\n", queryText)
})
}
)
## App 2: getUrlHash
## Printing the value of the URL hash
## (Use the back and forward buttons to see how the browser
## keeps a record of each state)
shinyApp(
ui = fluidPage(
textInput("txt", "Enter new hash"),
helpText("Format: #hash"),
actionButton("go", "Update"),
hr(),
verbatimTextOutput("hash")
),
server = function(input, output, session) {
observeEvent(input$go, {
updateQueryString(input$txt, mode = "push")
})
output$hash <- renderText({
hash <- getUrlHash()
paste("Your hash is:\n", hash)
})
}
)
}
|
#! /usr/bin/env Rscript
rm(list=ls(all=TRUE))
library(TCGAbiolinks)
library(SummarizedExperiment)
library(plyr)
require("getopt", quietly=TRUE)
library(limma)
library(sva)
#################################
#Rscript thisscript.R -i TCGA-BRCA -d brcaExp_PreprocessedData_wo_batch.rda -f female_onlyTandN.txt -t 6 -s 4 -1 female_85203.txt -2 female_84803.txt -3 female_85003.txt -4 female_85223.txt -5 #female_85233.txt -6 female_85753.txt -7 stage1.txt -8 stage2.txt -9 stage3.txt -0 stage4.txt
##################################
# User input:
#1) -i --projectID: the project of interest (e.g. TCGA-LUAD).
#2) -d --data: Count data set to be used (.rda-file containing SummarizedExperiment object consisting of: genes (rows) & samples (columns))
#3) -f --tumournormal: a .txt containing a list of samples in question (here: all female T adn N) with newline as delimiter (\n).
#4) -t --tumoutTypeCount: specify how many tumour types you will input as lists
#5) -s --stageCount: specify how many tumour stages you will input as lists
#6) -1 --condition1: a .txt containing a list of samples in question with newline as delimiter (\n).
#7) -2 --condition2: a .txt containing a list of samples in question with newline as delimiter (\n).
#8,9,10,11) -3,-4,-5,-6 more conditions (subtypes)
# 12,13,14,15) -7,-8,-9,-0 more conditons (stages)
## NB subtypes must always come before stages, unless only stages are used.
# flag specification # ref: https://tgmstat.wordpress.com/2014/05/21/r-scripts/
spec = matrix(c(
"projectID", "i", 1, "character",
"data", "d", 1, "character",
"tumournormal", "f", 1, "character", # extracts all FEMALE t/n samples. #specify this to get all normal samples
"tumourTypeCount", "t", 2, "double", #how many subtypes of tumours youre going to specify
"stageCount", "s", 2, "double", # how many stages youre going to specigy
"condition1", "1", 2, "character", #takes up to 10 conditions
"condition2", "2", 2, "character",
"condition3", "3", 2, "character",
"condition4", "4", 2, "character",
"condition5", "5", 2, "character",
"condition6", "6", 2, "character",
"condition7", "7", 2, "character",
"condition8", "8", 2, "character",
"condition9", "9", 2, "character",
"condition0", "0", 2, "character"
), byrow=TRUE, ncol=4)
opt = getopt(spec)
argsProjectID <- opt$projectID
argsData <- opt$data
dataSE<-get(load(argsData))
conditions_specified <-vector(mode="character", length=0)
if (is.null(opt$tumourTypeCount)) {
argsTTcount <- 0
} else {
argsTTcount <- opt$tumourTypeCount
}
if (is.null(opt$stageCount)) {
argsScount <- 0
} else {
argsScount <- opt$stageCount
}
if (is.null(opt$condition1)) {
argsCond1 <- FALSE
} else {
argsCond1 <- opt$condition1
conditions_specified<- append(conditions_specified, argsCond1)
}
if (is.null(opt$condition2)) {
argsCond2 <- FALSE
} else {
argsCond2 <- opt$condition2
conditions_specified<- append(conditions_specified, argsCond2)
}
if (is.null(opt$condition3)) {
argsCond3 <- FALSE
} else {
argsCond3 <- opt$condition3
conditions_specified<- append(conditions_specified, argsCond3)
}
if (is.null(opt$condition4)) {
argsCond4 <- FALSE
} else {
argsCond4 <- opt$condition4
conditions_specified<- append(conditions_specified, argsCond4)
}
if (is.null(opt$condition5)) {
argsCond5 <- FALSE
} else {
argsCond5 <- opt$condition5
conditions_specified<- append(conditions_specified, argsCond5)
}
if (is.null(opt$condition6)) {
argsCond6 <- FALSE
} else {
argsCond6 <- opt$condition6
conditions_specified<- append(conditions_specified, argsCond6)
}
if (is.null(opt$condition7)) {
argsCond7 <- FALSE
} else {
argsCond7 <- opt$condition7
conditions_specified<- append(conditions_specified, argsCond7)
}
if (is.null(opt$condition8)) {
argsCond8 <- FALSE
} else {
argsCond8 <- opt$condition8
conditions_specified<- append(conditions_specified, argsCond8)
}
if (is.null(opt$condition9)) {
argsCond9 <- FALSE
} else {
argsCond9 <- opt$condition9
conditions_specified<- append(conditions_specified, argsCond9)
}
if (is.null(opt$condition0)) {
argsCond0 <- FALSE
} else {
argsCond0 <- opt$condition0
conditions_specified<- append(conditions_specified, argsCond0)
}
if( sum(argsTTcount,argsScount) != length(conditions_specified) ) {
# throw an error
stop("The number of specified tumour types ans stages does not match the specified condition files! \n OR you did not spesity the number of files using -t and -s flags. ")
} else {
print(paste0(length(conditions_specified), " conditions were specified:"))
print (conditions_specified)
cat("\n")
}
if (is.null(opt$tumournormal)) {
argsTN <- FALSE
} else {
argsTN <- opt$tumournormal #if this is the only argument supplied, then the provided list will be split into TP/NT (not full dataset)
}
getDataBarcodes <- function(argsProjectID, barcodeList, paired=FALSE){
# NB even though we are searching by short barcodes, GDCquery will return long ones
query.exp <- GDCquery(project = argsProjectID,
legacy = TRUE,
data.category = "Gene expression",
data.type = "Gene expression quantification",
platform = "Illumina HiSeq",
file.type = "results",
experimental.strategy = "RNA-Seq",
barcode = barcodeList)
if (paired == TRUE){ # tumour/normal case
# select samples that are primary solid tumor
dataSmTP <- TCGAquery_SampleTypes(query.exp$results[[1]]$cases,"TP")
# select samples that are solid tissue normal
dataSmNT <- TCGAquery_SampleTypes(query.exp$results[[1]]$cases,"NT")
output = (list(TP=dataSmTP, NT=dataSmNT))
} else { # it is not a paired case; return just tumour samples for the queries barcodes (subtype)
# select samples that are primary solid tumor
dataSmTP <- TCGAquery_SampleTypes(query.exp$results[[1]]$cases,"TP")
output = dataSmTP
}
return (output)
}
# extract tumour/normal
argsTN<-as.vector(read.table(argsTN, as.is = T, header = FALSE) )$V1
print (paste0("Querying long sample barcodes for Tumour/Normal."))
pairedSamples<-getDataBarcodes(argsProjectID, argsTN, paired=TRUE)
allTumourSamples <- pairedSamples$TP
allNormalSamples <- pairedSamples$NT
print (paste0("Normal samples count: ", length(allNormalSamples)))
tumourName <- "Tumour"
normalName <- "Normal"
# extract samples in each conditiona nd store thim in R list with condX as handles (same indexing can be used to extact condi actual name)
if ( length(conditions_specified) != 0 ) {
samplesToExtract<-list()
for (i in 1:length(conditions_specified)) {
print (paste0("Querying long sample barcodes for Condition ", i, ": ", conditions_specified[i]))
this_condition_Samples<-as.vector(read.table(conditions_specified[i], as.is = T, header = FALSE))$V1
condSamples<-getDataBarcodes(argsProjectID, this_condition_Samples)
samplesToExtract[[ substr(conditions_specified[i],1,nchar(conditions_specified[i])-4) ]] <- condSamples
}
# add also normal samples to conditions list
#samplesToExtract[["normal"]] <- allNormalSamples
}
print (names(samplesToExtract))
print ("Extraction of selected groups is finished.")
cat("\n")
print ("################### Data summary: ####################")
print (paste0("Preprocessed data contains ",dim(dataSE)[1]," genes and ", dim(dataSE)[2], " samples. "))
for (i in 1:length(samplesToExtract)) {
print (paste0("There are ", length(samplesToExtract[[i]]), " samples in condition ", names(samplesToExtract)[i] ," (", conditions_specified[i], ")"))
}
print ("######################################################")
cat("\n")
get_IDs <- function(data) {
IDs <- strsplit(c(colnames(data)), "-") #split by hyphen into a list
IDs <- ldply(IDs, rbind) #make a matrix samples VS barcode bits
colnames(IDs) <- c('project', 'tss','participant', 'sample', "portion", "plate", "center")
cols <- c("project", "tss", "participant")
IDs$patient <- apply(IDs[,cols],1,paste,collapse = "-" ) #take 'cols' columns and make a new one
barcode <- colnames(data) #get original sample names from input data
IDs <- cbind(IDs, barcode) #add them to matrix
condition <- gsub("11+[[:alpha:]]", "normal", as.character(IDs$sample)) #replace barcode nomenclature 11 for tumour
condition <- gsub("01+[[:alpha:]]", "cancer", condition) # 01 for normal # [[:alpha]] matches any letter (not important)
IDs$condition <- condition #add condition column in the matrix
IDs$myorder <- 1:nrow(IDs)
##test<-IDs[sort.list(IDs[,3]), ] #sort by participant (to see pairs)
return(IDs)
}
# keep only samples of the subtypes we're investigation in the data frame
newdataSE<-dataSE[, c(unique(unlist(samplesToExtract, use.names=FALSE)), allNormalSamples)]
samplesMatrix <- get_IDs(newdataSE)
print (paste0("Currently looking at ",dim(samplesMatrix)[1], " samples."))
addCondition <- function(samplesMatrix, conditionsList, TTcount=argsTTcount, TScount=argsScount){
all_samples <- samplesMatrix$barcode
# this will work if both types and stages been specified (but this block only deals with typess)
if (TTcount != 0) {
tumourTypes <- vector(mode="character", length=0)
TT_list <- conditionsList[1:TTcount]
print(paste0("Found " , TTcount, " tumour subtypes:"))
print(names(TT_list))
for (i in 1:length(all_samples)){
barcode<-all_samples[i]
#now iterating over tumour types
for (j in 1:length(TT_list)){
current_type<-unlist(TT_list[j], use.names = FALSE) # get barcodes of the curr tumout type
if (barcode %in% current_type){
tumourTypes[i]<- names(TT_list[j])
break
}else{
tumourTypes[i]<- "unknown"
}
}
}
print (paste0("Labelled this many tumour types not unknown: ",length(tumourTypes[tumourTypes != "unknown"])) )
}
cat("\n")
# this will work if both types and stages been specified (but this block only deals with stages)
if ( (TTcount != 0) & (TScount != 0) ){
tumourStages <- vector(mode="character", length=0)
TS_list <- tail(conditionsList, TScount) #assuming stages come after types
print(paste0("Found " , TScount, " tumour stages:"))
print(names(TS_list))
for (i in 1:length(all_samples)){
barcode<-all_samples[i]
#now iterating over tumour types
for (j in 1:length(TS_list)){
current_stage<-unlist(TS_list[j], use.names = FALSE) # get barcodes of the curr tumout type
if (barcode %in% current_stage){
tumourStages[i]<- names(TS_list[j])
break
}else{
tumourStages[i]<- "unknown"
}
}
}
print (paste0("Labelled this many tumour not unknown: ",length(tumourStages[tumourStages != "unknown"])) )
}
# this will work if ONLY stages been specified
if ((TTcount == 0) & (TScount != 0)){
tumourStages <- vector(mode="character", length=0)
TS_list <- conditionsList[1:TScount] #assuming stages come after types
print(paste0("Found " , TScount, " tumour stages:"))
print(names(TS_list))
for (i in 1:length(all_samples)){
barcode<-all_samples[i]
#now iterating over tumour types
for (j in 1:length(TS_list)){
current_stage<-unlist(TS_list[j], use.names = FALSE) # get barcodes of the curr tumout type
if (barcode %in% current_stage){
tumourStages[i]<- names(TS_list[j])
break
}else{
tumourStages[i]<- "unknown"
}
}
}
print (paste0("Labelled this many tumour not unknown: ",length(tumourStages[tumourStages != "unknown"])) )
}
if (TTcount != 0) {
samplesMatrix$tumourTypes <- tumourTypes }
if (TScount != 0) {
samplesMatrix$tumourStages <- tumourStages}
return (samplesMatrix)
}
samplesMatrix<- addCondition(samplesMatrix, samplesToExtract, TTcount=argsTTcount, TScount=argsScount)
cat("\n")
print ("Assigned the following tumour types")
print (unique(samplesMatrix$tumourTypes))
print ("Assigned the following tumour stages")
print (unique(samplesMatrix$tumourStages))
print ("Assigned the following conditions")
print (unique(samplesMatrix$condition))
#replace 'unknowm' in normal samples with NA
samplesMatrix<- within(samplesMatrix, tumourTypes[condition == 'normal'] <- NA)
samplesMatrix <- within(samplesMatrix, tumourStages[condition == 'normal'] <- NA)
cat("\n")
cat("\n")
print (paste0("The new Summarized Experiment has dimentions: ", dim(newdataSE)[1]," " ,dim(newdataSE)[2]))
print (paste0("The new samples matrix has dimentions: ", dim(samplesMatrix)[1]," " ,dim(samplesMatrix)[2] ))
cat("\n")
# saving a new SE (only samples in question)
save(newdataSE,file=paste0(dirname(argsData),"/",unlist(strsplit(basename(argsData),".", fixed = T))[1],"_updatedSE_allTypes_allStages.rda"))
# saving the 'my_IDs' equivant but with types
save(samplesMatrix, file=paste0(dirname(argsData),"/",unlist(strsplit(basename(argsData),".", fixed = T))[1],"_sampleMatrix_allTypes_allStages.rda"))
print("Data saved.")
#reading in the data!
# testing<-get(load(paste0(dirname(argsData),"/",unlist(strsplit(basename(argsData),".", fixed = T))[1],"_allTypes_allStages.rda")))
# print (dim(testing))
#print("Starting DEA ...")
###### DEA anaylsis from TCGAbiolinks ---- to be replaced!
#dataDEGs <- TCGAanalyze_DEA(mat1 = dataSE[,cond1Samples],
# mat2 = dataSE[,cond2Samples],
# Cond1type = argsCond1,
# Cond2type = argsCond2,
# fdr.cut = 0.01 ,
# logFC.cut = 1,
# method = "glmLRT")
#print (dataDEGs)
| /my_old/specifyTypesStages.R | no_license | mutual-ai/TCGA_RNA-seq | R | false | false | 13,866 | r | #! /usr/bin/env Rscript
rm(list=ls(all=TRUE))
library(TCGAbiolinks)
library(SummarizedExperiment)
library(plyr)
require("getopt", quietly=TRUE)
library(limma)
library(sva)
#################################
#Rscript thisscript.R -i TCGA-BRCA -d brcaExp_PreprocessedData_wo_batch.rda -f female_onlyTandN.txt -t 6 -s 4 -1 female_85203.txt -2 female_84803.txt -3 female_85003.txt -4 female_85223.txt -5 #female_85233.txt -6 female_85753.txt -7 stage1.txt -8 stage2.txt -9 stage3.txt -0 stage4.txt
##################################
# User input:
#1) -i --projectID: the project of interest (e.g. TCGA-LUAD).
#2) -d --data: Count data set to be used (.rda-file containing SummarizedExperiment object consisting of: genes (rows) & samples (columns))
#3) -f --tumournormal: a .txt containing a list of samples in question (here: all female T adn N) with newline as delimiter (\n).
#4) -t --tumoutTypeCount: specify how many tumour types you will input as lists
#5) -s --stageCount: specify how many tumour stages you will input as lists
#6) -1 --condition1: a .txt containing a list of samples in question with newline as delimiter (\n).
#7) -2 --condition2: a .txt containing a list of samples in question with newline as delimiter (\n).
#8,9,10,11) -3,-4,-5,-6 more conditions (subtypes)
# 12,13,14,15) -7,-8,-9,-0 more conditons (stages)
## NB subtypes must always come before stages, unless only stages are used.
# flag specification # ref: https://tgmstat.wordpress.com/2014/05/21/r-scripts/
spec = matrix(c(
"projectID", "i", 1, "character",
"data", "d", 1, "character",
"tumournormal", "f", 1, "character", # extracts all FEMALE t/n samples. #specify this to get all normal samples
"tumourTypeCount", "t", 2, "double", #how many subtypes of tumours youre going to specify
"stageCount", "s", 2, "double", # how many stages youre going to specigy
"condition1", "1", 2, "character", #takes up to 10 conditions
"condition2", "2", 2, "character",
"condition3", "3", 2, "character",
"condition4", "4", 2, "character",
"condition5", "5", 2, "character",
"condition6", "6", 2, "character",
"condition7", "7", 2, "character",
"condition8", "8", 2, "character",
"condition9", "9", 2, "character",
"condition0", "0", 2, "character"
), byrow=TRUE, ncol=4)
opt = getopt(spec)
argsProjectID <- opt$projectID
argsData <- opt$data
dataSE<-get(load(argsData))
conditions_specified <-vector(mode="character", length=0)
if (is.null(opt$tumourTypeCount)) {
argsTTcount <- 0
} else {
argsTTcount <- opt$tumourTypeCount
}
if (is.null(opt$stageCount)) {
argsScount <- 0
} else {
argsScount <- opt$stageCount
}
if (is.null(opt$condition1)) {
argsCond1 <- FALSE
} else {
argsCond1 <- opt$condition1
conditions_specified<- append(conditions_specified, argsCond1)
}
if (is.null(opt$condition2)) {
argsCond2 <- FALSE
} else {
argsCond2 <- opt$condition2
conditions_specified<- append(conditions_specified, argsCond2)
}
if (is.null(opt$condition3)) {
argsCond3 <- FALSE
} else {
argsCond3 <- opt$condition3
conditions_specified<- append(conditions_specified, argsCond3)
}
if (is.null(opt$condition4)) {
argsCond4 <- FALSE
} else {
argsCond4 <- opt$condition4
conditions_specified<- append(conditions_specified, argsCond4)
}
if (is.null(opt$condition5)) {
argsCond5 <- FALSE
} else {
argsCond5 <- opt$condition5
conditions_specified<- append(conditions_specified, argsCond5)
}
if (is.null(opt$condition6)) {
argsCond6 <- FALSE
} else {
argsCond6 <- opt$condition6
conditions_specified<- append(conditions_specified, argsCond6)
}
if (is.null(opt$condition7)) {
argsCond7 <- FALSE
} else {
argsCond7 <- opt$condition7
conditions_specified<- append(conditions_specified, argsCond7)
}
if (is.null(opt$condition8)) {
argsCond8 <- FALSE
} else {
argsCond8 <- opt$condition8
conditions_specified<- append(conditions_specified, argsCond8)
}
if (is.null(opt$condition9)) {
argsCond9 <- FALSE
} else {
argsCond9 <- opt$condition9
conditions_specified<- append(conditions_specified, argsCond9)
}
if (is.null(opt$condition0)) {
argsCond0 <- FALSE
} else {
argsCond0 <- opt$condition0
conditions_specified<- append(conditions_specified, argsCond0)
}
if( sum(argsTTcount,argsScount) != length(conditions_specified) ) {
# throw an error
stop("The number of specified tumour types ans stages does not match the specified condition files! \n OR you did not spesity the number of files using -t and -s flags. ")
} else {
print(paste0(length(conditions_specified), " conditions were specified:"))
print (conditions_specified)
cat("\n")
}
if (is.null(opt$tumournormal)) {
argsTN <- FALSE
} else {
argsTN <- opt$tumournormal #if this is the only argument supplied, then the provided list will be split into TP/NT (not full dataset)
}
getDataBarcodes <- function(argsProjectID, barcodeList, paired=FALSE){
# NB even though we are searching by short barcodes, GDCquery will return long ones
query.exp <- GDCquery(project = argsProjectID,
legacy = TRUE,
data.category = "Gene expression",
data.type = "Gene expression quantification",
platform = "Illumina HiSeq",
file.type = "results",
experimental.strategy = "RNA-Seq",
barcode = barcodeList)
if (paired == TRUE){ # tumour/normal case
# select samples that are primary solid tumor
dataSmTP <- TCGAquery_SampleTypes(query.exp$results[[1]]$cases,"TP")
# select samples that are solid tissue normal
dataSmNT <- TCGAquery_SampleTypes(query.exp$results[[1]]$cases,"NT")
output = (list(TP=dataSmTP, NT=dataSmNT))
} else { # it is not a paired case; return just tumour samples for the queries barcodes (subtype)
# select samples that are primary solid tumor
dataSmTP <- TCGAquery_SampleTypes(query.exp$results[[1]]$cases,"TP")
output = dataSmTP
}
return (output)
}
# extract tumour/normal
argsTN<-as.vector(read.table(argsTN, as.is = T, header = FALSE) )$V1
print (paste0("Querying long sample barcodes for Tumour/Normal."))
pairedSamples<-getDataBarcodes(argsProjectID, argsTN, paired=TRUE)
allTumourSamples <- pairedSamples$TP
allNormalSamples <- pairedSamples$NT
print (paste0("Normal samples count: ", length(allNormalSamples)))
tumourName <- "Tumour"
normalName <- "Normal"
# extract samples in each conditiona nd store thim in R list with condX as handles (same indexing can be used to extact condi actual name)
if ( length(conditions_specified) != 0 ) {
samplesToExtract<-list()
for (i in 1:length(conditions_specified)) {
print (paste0("Querying long sample barcodes for Condition ", i, ": ", conditions_specified[i]))
this_condition_Samples<-as.vector(read.table(conditions_specified[i], as.is = T, header = FALSE))$V1
condSamples<-getDataBarcodes(argsProjectID, this_condition_Samples)
samplesToExtract[[ substr(conditions_specified[i],1,nchar(conditions_specified[i])-4) ]] <- condSamples
}
# add also normal samples to conditions list
#samplesToExtract[["normal"]] <- allNormalSamples
}
print (names(samplesToExtract))
print ("Extraction of selected groups is finished.")
cat("\n")
print ("################### Data summary: ####################")
print (paste0("Preprocessed data contains ",dim(dataSE)[1]," genes and ", dim(dataSE)[2], " samples. "))
for (i in 1:length(samplesToExtract)) {
print (paste0("There are ", length(samplesToExtract[[i]]), " samples in condition ", names(samplesToExtract)[i] ," (", conditions_specified[i], ")"))
}
print ("######################################################")
cat("\n")
get_IDs <- function(data) {
IDs <- strsplit(c(colnames(data)), "-") #split by hyphen into a list
IDs <- ldply(IDs, rbind) #make a matrix samples VS barcode bits
colnames(IDs) <- c('project', 'tss','participant', 'sample', "portion", "plate", "center")
cols <- c("project", "tss", "participant")
IDs$patient <- apply(IDs[,cols],1,paste,collapse = "-" ) #take 'cols' columns and make a new one
barcode <- colnames(data) #get original sample names from input data
IDs <- cbind(IDs, barcode) #add them to matrix
condition <- gsub("11+[[:alpha:]]", "normal", as.character(IDs$sample)) #replace barcode nomenclature 11 for tumour
condition <- gsub("01+[[:alpha:]]", "cancer", condition) # 01 for normal # [[:alpha]] matches any letter (not important)
IDs$condition <- condition #add condition column in the matrix
IDs$myorder <- 1:nrow(IDs)
##test<-IDs[sort.list(IDs[,3]), ] #sort by participant (to see pairs)
return(IDs)
}
# keep only samples of the subtypes we're investigation in the data frame
newdataSE<-dataSE[, c(unique(unlist(samplesToExtract, use.names=FALSE)), allNormalSamples)]
samplesMatrix <- get_IDs(newdataSE)
print (paste0("Currently looking at ",dim(samplesMatrix)[1], " samples."))
addCondition <- function(samplesMatrix, conditionsList, TTcount=argsTTcount, TScount=argsScount){
all_samples <- samplesMatrix$barcode
# this will work if both types and stages been specified (but this block only deals with typess)
if (TTcount != 0) {
tumourTypes <- vector(mode="character", length=0)
TT_list <- conditionsList[1:TTcount]
print(paste0("Found " , TTcount, " tumour subtypes:"))
print(names(TT_list))
for (i in 1:length(all_samples)){
barcode<-all_samples[i]
#now iterating over tumour types
for (j in 1:length(TT_list)){
current_type<-unlist(TT_list[j], use.names = FALSE) # get barcodes of the curr tumout type
if (barcode %in% current_type){
tumourTypes[i]<- names(TT_list[j])
break
}else{
tumourTypes[i]<- "unknown"
}
}
}
print (paste0("Labelled this many tumour types not unknown: ",length(tumourTypes[tumourTypes != "unknown"])) )
}
cat("\n")
# this will work if both types and stages been specified (but this block only deals with stages)
if ( (TTcount != 0) & (TScount != 0) ){
tumourStages <- vector(mode="character", length=0)
TS_list <- tail(conditionsList, TScount) #assuming stages come after types
print(paste0("Found " , TScount, " tumour stages:"))
print(names(TS_list))
for (i in 1:length(all_samples)){
barcode<-all_samples[i]
#now iterating over tumour types
for (j in 1:length(TS_list)){
current_stage<-unlist(TS_list[j], use.names = FALSE) # get barcodes of the curr tumout type
if (barcode %in% current_stage){
tumourStages[i]<- names(TS_list[j])
break
}else{
tumourStages[i]<- "unknown"
}
}
}
print (paste0("Labelled this many tumour not unknown: ",length(tumourStages[tumourStages != "unknown"])) )
}
# this will work if ONLY stages been specified
if ((TTcount == 0) & (TScount != 0)){
tumourStages <- vector(mode="character", length=0)
TS_list <- conditionsList[1:TScount] #assuming stages come after types
print(paste0("Found " , TScount, " tumour stages:"))
print(names(TS_list))
for (i in 1:length(all_samples)){
barcode<-all_samples[i]
#now iterating over tumour types
for (j in 1:length(TS_list)){
current_stage<-unlist(TS_list[j], use.names = FALSE) # get barcodes of the curr tumout type
if (barcode %in% current_stage){
tumourStages[i]<- names(TS_list[j])
break
}else{
tumourStages[i]<- "unknown"
}
}
}
print (paste0("Labelled this many tumour not unknown: ",length(tumourStages[tumourStages != "unknown"])) )
}
if (TTcount != 0) {
samplesMatrix$tumourTypes <- tumourTypes }
if (TScount != 0) {
samplesMatrix$tumourStages <- tumourStages}
return (samplesMatrix)
}
samplesMatrix<- addCondition(samplesMatrix, samplesToExtract, TTcount=argsTTcount, TScount=argsScount)
cat("\n")
print ("Assigned the following tumour types")
print (unique(samplesMatrix$tumourTypes))
print ("Assigned the following tumour stages")
print (unique(samplesMatrix$tumourStages))
print ("Assigned the following conditions")
print (unique(samplesMatrix$condition))
#replace 'unknowm' in normal samples with NA
samplesMatrix<- within(samplesMatrix, tumourTypes[condition == 'normal'] <- NA)
samplesMatrix <- within(samplesMatrix, tumourStages[condition == 'normal'] <- NA)
cat("\n")
cat("\n")
print (paste0("The new Summarized Experiment has dimentions: ", dim(newdataSE)[1]," " ,dim(newdataSE)[2]))
print (paste0("The new samples matrix has dimentions: ", dim(samplesMatrix)[1]," " ,dim(samplesMatrix)[2] ))
cat("\n")
# saving a new SE (only samples in question)
save(newdataSE,file=paste0(dirname(argsData),"/",unlist(strsplit(basename(argsData),".", fixed = T))[1],"_updatedSE_allTypes_allStages.rda"))
# saving the 'my_IDs' equivant but with types
save(samplesMatrix, file=paste0(dirname(argsData),"/",unlist(strsplit(basename(argsData),".", fixed = T))[1],"_sampleMatrix_allTypes_allStages.rda"))
print("Data saved.")
#reading in the data!
# testing<-get(load(paste0(dirname(argsData),"/",unlist(strsplit(basename(argsData),".", fixed = T))[1],"_allTypes_allStages.rda")))
# print (dim(testing))
#print("Starting DEA ...")
###### DEA anaylsis from TCGAbiolinks ---- to be replaced!
#dataDEGs <- TCGAanalyze_DEA(mat1 = dataSE[,cond1Samples],
# mat2 = dataSE[,cond2Samples],
# Cond1type = argsCond1,
# Cond2type = argsCond2,
# fdr.cut = 0.01 ,
# logFC.cut = 1,
# method = "glmLRT")
#print (dataDEGs)
|
#### Preamble ####
# Purpose: Prepare and clean the survey data downloaded from [...UPDATE ME!!!!!]
# Author: Rohan Alexander and Sam Caetano [CHANGE THIS TO YOUR NAME!!!!]
# Data: 22 October 2020
# Contact: rohan.alexander@utoronto.ca [PROBABLY CHANGE THIS ALSO!!!!]
# License: MIT
# Pre-requisites:
# - Need to have downloaded the data from X and save the folder that you're
# interested in to inputs/data
# - Don't forget to gitignore it!
#### Workspace setup ####
library(haven)
library(tidyverse)
setwd("/Users/sunyiyun/Desktop/sta304/ps3")
# Read in the raw data (You might need to change this if you use a different dataset)
raw_data <- read_dta("ns20200625/ns20200625.dta")
# Add the labels
raw_data <- labelled::to_factor(raw_data)
# Just keep some variables
reduced_data <-
raw_data %>%
select(registration,
vote_2016,
vote_intention,
vote_2020,
employment,
foreign_born,
gender,
race_ethnicity,
household_income,
education,
state,
age,
weight)
#### What else???? ####
# Maybe make some age-groups?
# Maybe check the values?
# Is vote a binary? If not, what are you going to do?
reduced_data$age<-as.numeric(reduced_data$age)
reduced_data<-
reduced_data %>%
mutate(vote_trump =
ifelse(vote_2020=="Donald Trump", 1, 0)) %>%
mutate(vote_biden =
ifelse(vote_2020 =="Joe Biden", 1, 0))
reduced_data <-
reduced_data %>%
filter(registration == "Registered") %>%
filter(education != "Completed some graduate, but no degree")
reduced_data <- na.omit(reduced_data)
reduced_data$education[reduced_data$education=="Other post high school vocational training"]<-"High school graduate"
otherandpaci<-c("Asian (Asian Indian)","Asian (Vietnamese)","Asian (Korean)","Asian (Other)","Asian (Filipino)",
"Pacific Islander (Native Hawaiian)","Pacific Islander (Other)",
"Pacific Islander (Samoan)","Pacific Islander (Guamanian)")
reduced_data<-reduced_data %>%
mutate(race = case_when(race_ethnicity =="White" ~ 'White',
race_ethnicity =="Black, or African American" ~ 'Black, or African American',
race_ethnicity =="Asian (Japanese)" ~ 'Japanese',
race_ethnicity =="Asian (Chinese)" ~ 'Chinese',
race_ethnicity %in% otherandpaci ~"Other asian or pacific islander",
race_ethnicity =="Some other race" ~ 'Other race',
race_ethnicity=="American Indian or Alaska Native"~"American Indian or Alaska Native"
))
reduced_data <- na.omit(reduced_data)
# Saving the survey/sample data as a csv file in my
# working directory
write_csv(reduced_data, "survey_data.csv")
| /01-data_cleaning-survey1.R | no_license | jlkuee/Sta304-problemset3 | R | false | false | 2,830 | r | #### Preamble ####
# Purpose: Prepare and clean the survey data downloaded from [...UPDATE ME!!!!!]
# Author: Rohan Alexander and Sam Caetano [CHANGE THIS TO YOUR NAME!!!!]
# Data: 22 October 2020
# Contact: rohan.alexander@utoronto.ca [PROBABLY CHANGE THIS ALSO!!!!]
# License: MIT
# Pre-requisites:
# - Need to have downloaded the data from X and save the folder that you're
# interested in to inputs/data
# - Don't forget to gitignore it!
#### Workspace setup ####
library(haven)
library(tidyverse)
setwd("/Users/sunyiyun/Desktop/sta304/ps3")
# Read in the raw data (You might need to change this if you use a different dataset)
raw_data <- read_dta("ns20200625/ns20200625.dta")
# Add the labels
raw_data <- labelled::to_factor(raw_data)
# Just keep some variables
reduced_data <-
raw_data %>%
select(registration,
vote_2016,
vote_intention,
vote_2020,
employment,
foreign_born,
gender,
race_ethnicity,
household_income,
education,
state,
age,
weight)
#### What else???? ####
# Maybe make some age-groups?
# Maybe check the values?
# Is vote a binary? If not, what are you going to do?
reduced_data$age<-as.numeric(reduced_data$age)
reduced_data<-
reduced_data %>%
mutate(vote_trump =
ifelse(vote_2020=="Donald Trump", 1, 0)) %>%
mutate(vote_biden =
ifelse(vote_2020 =="Joe Biden", 1, 0))
reduced_data <-
reduced_data %>%
filter(registration == "Registered") %>%
filter(education != "Completed some graduate, but no degree")
reduced_data <- na.omit(reduced_data)
reduced_data$education[reduced_data$education=="Other post high school vocational training"]<-"High school graduate"
otherandpaci<-c("Asian (Asian Indian)","Asian (Vietnamese)","Asian (Korean)","Asian (Other)","Asian (Filipino)",
"Pacific Islander (Native Hawaiian)","Pacific Islander (Other)",
"Pacific Islander (Samoan)","Pacific Islander (Guamanian)")
reduced_data<-reduced_data %>%
mutate(race = case_when(race_ethnicity =="White" ~ 'White',
race_ethnicity =="Black, or African American" ~ 'Black, or African American',
race_ethnicity =="Asian (Japanese)" ~ 'Japanese',
race_ethnicity =="Asian (Chinese)" ~ 'Chinese',
race_ethnicity %in% otherandpaci ~"Other asian or pacific islander",
race_ethnicity =="Some other race" ~ 'Other race',
race_ethnicity=="American Indian or Alaska Native"~"American Indian or Alaska Native"
))
reduced_data <- na.omit(reduced_data)
# Saving the survey/sample data as a csv file in my
# working directory
write_csv(reduced_data, "survey_data.csv")
|
#!/usr/bin/Rscript
args <- commandArgs(TRUE)
# now args is a character vector containing the arguments.# Suppose the first argument should be interpreted as a number
# and the second as a character string and the third as a boolean:
numericArg <- as.numeric(args[1])
charArg <- args[2]
logicalArg <- as.logical(args[3])
cat("First arg is, ", numericArg, "; second is: ", charArg, "; third is: ", logicalArg, ".\n")
| /units/exampleRscript.R | no_license | potatopaul/stat243-fall-2014 | R | false | false | 417 | r | #!/usr/bin/Rscript
args <- commandArgs(TRUE)
# now args is a character vector containing the arguments.# Suppose the first argument should be interpreted as a number
# and the second as a character string and the third as a boolean:
numericArg <- as.numeric(args[1])
charArg <- args[2]
logicalArg <- as.logical(args[3])
cat("First arg is, ", numericArg, "; second is: ", charArg, "; third is: ", logicalArg, ".\n")
|
#Rolling Window RMSE Function
# series is the array of the series
# horizon is how far you want to predict into the future
# d is the order of the differencing: (1-B^)^d
# s is the order of the seasonality: (1-B^s)
# phi = coefficients of the stationary AR term
# theta = coefficients of the invertible MA term
# It simply takes the given horizon and the model in the form of s,d,phis and
# thetas and figures out how many windows it can create in the data (series) and then calculates the ASE for each window.
#The output is the average off all the ASEs from each individual window.
roll.win.rmse.wge = function(series, horizon = 2, s = 0, d = 0, phi = 0, theta = 0)
{
#DEFINE fore.arma.wge2
fore.arma.wge2=function(x,phi=0,theta=0,n.ahead=5,lastn=FALSE, plot=FALSE,alpha=.05,limits=TRUE, xbar2 = NULL)
{
# lastn=TRUE indicates that the last n data values are to be forecast
# lastn=FALSE (default) indicates we want foreacts for n values beyond the end of the realization
n=length(x)
p=length(phi)
if(sum(phi^2)==0) {p=0}
q=length(theta)
if(sum(theta^2)==0) {q=0}
#resid=rep(0,n)
npn.ahead=n+n.ahead
xhat=rep(0,npn.ahead)
if(is.null(xbar2))
{
xbar=mean(x)
}
else
{
xbar = xbar2
}
const=1
if (p > 0) {for(jp in 1:p) {const=const-phi[jp]}}
#
#
# Calculate Box-Jenkins Forecasts
#
#
#Calculating Residuals
#
resid=backcast.wge(x,phi,theta,n.back=50)
#
#
#maconst=const*xbar
#p1=max(p+1,q+1)
#for (i in p1:n) {resid[i]=x[i]
# if ( p > 0) {for (jp in 1:p) {resid[i]=resid[i]-phi[jp]*x[i-jp]}}
# if (q > 0) {for (jq in 1:q) {resid[i]=resid[i]+theta[jq]*resid[i-jq]}}
# resid[i]=resid[i]-maconst}
#
# Calculating Forecasts
#
#
npn.ahead=n+n.ahead
xhat=rep(0,npn.ahead)
mm=n
#
#lastn = TRUE
#
if(lastn==TRUE) {mm=n-n.ahead}
#
#
for (i in 1:mm) {xhat[i]=x[i]}
for (h in 1:n.ahead) {
if (p > 0) {for (jp in 1:p) {xhat[mm+h]=xhat[mm+h]+phi[jp]*xhat[mm+h-jp]}}
if ((h<=q)&(h>0)) {for(jq in h:q) {xhat[mm+h]=xhat[mm+h]-theta[jq]*resid[mm+h-jq]}}
xhat[mm+h]=xhat[mm+h]+xbar*const}
#
#
# Calculate psi weights for forecasts limits
#
#
xi=psi.weights.wge(phi,theta,lag.max=n.ahead)
#
#
#
#Setting up for plots
nap1=n.ahead+1
fplot=rep(0,nap1)
maxh=mm+n.ahead
llplot=rep(0,nap1)
ulplot=rep(0,nap1)
f=rep(0,nap1)
ll=rep(0,nap1)
ul=rep(0,nap1)
wnv=0
xisq=rep(0,n.ahead)
se=rep(0,n.ahead)
se0=1
for (i in 1:n) {wnv=wnv+resid[i]**2}
wnv=wnv/n
xisq[1]=1
for (i in 2:n.ahead) {xisq[i]=xisq[i-1]+xi[i-1]^2}
for (i in 1:n.ahead) {se[i]=sqrt(wnv*xisq[i])}
fplot[1]=x[mm]
for (i in 1:n.ahead) {fplot[i+1]=xhat[mm+i]}
ulplot[1]=x[mm]
#for (i in 1:n.ahead) { ulplot[i+1]=fplot[i+1]+1.96*se[i]}
for (i in 1:n.ahead) { ulplot[i+1]=fplot[i+1]-qnorm(alpha/2)*se[i]}
llplot[1]=x[mm]
#for (i in 1:n.ahead) { llplot[i+1]=fplot[i+1]-1.96*se[i]}
for (i in 1:n.ahead) { llplot[i+1]=fplot[i+1]+qnorm(alpha/2)*se[i]}
#
if(limits==FALSE) {
if(lastn==TRUE) {max=max(x,xhat[1:n])
min=min(x,xhat[1:n])}
else {max=max(x,xhat)
min=min(x,xhat)}}
if(limits==TRUE) {min=min(x,llplot)
max=max(x,ulplot)}
#numrows <- 1
#numcols <- 1
timelab <- 'Time'
valuelab <- ''
#fig.width <- 5
#fig.height <- 2.5
cex.labs <- c(.8,.7,.8)
#par(mfrow=c(numrows,numcols),mar=c(6,2,3,1))
t<-1:n;
np1=n+1
np.ahead=mm+n.ahead
tf<-mm:np.ahead
#if (plot=='TRUE') {
#fig.width <- 5
#fig.height <- 2.5
#cex.labs <- c(1.2,1.2,1.2)
#par(mfrow=c(numrows,numcols),mar=c(9,4,3,2))
#plot(t,x,type='o',xaxt='n',yaxt='n',cex=.8,pch=16,cex.lab=1,cex.axis=1,lwd=1,xlab='',ylab='',xlim=c(1,maxh),ylim=c(min,max),col=1)
#axis(side=1,cex.axis=1.1,mgp=c(3,0.15,0),tcl=-.3);
#axis(side=2,las=1,cex.axis=1.1,mgp=c(3,.4,0),tcl=-.3)
#abline=mean(x)
#mtext(side=c(1,2,1),cex=cex.labs,text=c(timelab,valuelab,""),line=c(1.2,2.1,1.8))
#points(tf,fplot,type='o',lty=1,cex=.6,lwd=1,pch=1,col=2);
#if(limits=='TRUE') {points(tf,ulplot,type='l',lty=2,cex=0.6,lwd=.75,pch=1,col=4)
#points(tf,llplot,type='l',lty=3,cex=0.6,lwd=.75,pch=1,col=4)
# }
#}
np1=n+1
nap1=n.ahead+1
f=fplot[2:nap1]
# Calculate RMSE and MAD
if(lastn==TRUE){
t.start=n-n.ahead
sum.rmse=0
sum.mad=0
for(i in 1:n.ahead) {sum.rmse=sum.rmse+(f[i]-x[t.start+i])^2
sum.mad=sum.mad+abs(f[i]-x[t.start+i])}
mse=sum.rmse/n.ahead
rmse=sqrt(mse)
mad=sum.mad/n.ahead
}
ll=llplot[2:nap1]
ul=ulplot[2:nap1]
if(lastn==TRUE){out1=list(f=f)}
if(lastn==FALSE){out1=list(f=f)}
return(out1)
}
numwindows = 0
RMSEHolder = numeric()
if(s == 0 & d == 0)
{
trainingSize = max(length(phi),length(theta)) + 1 # The plus 1 is for the backcast residuals which helps with ARMA model with q > 0
numwindows = length(series)-(trainingSize + horizon) + 1
RMSEHolder = numeric(numwindows)
print(paste("Please Hold For a Moment, TSWGE is processing the Rolling Window RMSE with", numwindows, "windows."))
for( i in 1:numwindows)
{
forecasts <- fore.arma.wge2(series[i:(i+(trainingSize-1))], plot = TRUE, phi = phi, theta = theta,n.ahead = horizon, xbar = mean(series))
RMSE = sqrt(mean((series[(trainingSize+i):(trainingSize+ i + (horizon) - 1)] - forecasts$f)^2))
RMSEHolder[i] = RMSE
}
}
else
{
trainingSize = sum(length(phi),length(theta),s, d) + 1 # sum and plus one is to help backcast.wge, lag.max and ylim plotting issue in fore.arima.wge
numwindows = length(series)-(trainingSize + horizon) + 1
RMSEHolder = numeric(numwindows)
print(paste("Please Hold For a Moment, TSWGE is processing the Rolling Window RMSE with", numwindows, "windows."))
for( i in 1:numwindows)
{
#invisible(capture.output(forecasts <- fore.arima.wge(series[i:(i+(trainingSize-1))],phi = phis, theta = thetas, s = s, d = d,n.ahead = horizon)))
forecasts <- fore.arima.wge(series[i:(i+(trainingSize-1))],phi = phi, s = s, d = d, theta = theta,n.ahead = horizon)
RMSE = sqrt(mean((series[(trainingSize+i):(trainingSize+ i + (horizon) - 1)] - forecasts$f)^2))
RMSEHolder[i] = RMSE
}
}
RMSEHolder
hist(RMSEHolder, main = "RMSEs for Individual Windows")
WindowedRMSE = mean(RMSEHolder)
print("The Summary Statistics for the Rolling Window RMSE Are:")
print(summary(RMSEHolder))
print(paste("The Rolling Window RMSE is: ",round(WindowedRMSE,3)))
#output
invisible(list(rwRMSE = WindowedRMSE, trainingSize = trainingSize, numwindows = numwindows, horizon = horizon, s = s, d = d, phi = phi, theta = theta, RMSEs = RMSEHolder))
} | /R/roll.win.rmse.wge.R | no_license | cran/tswge | R | false | false | 7,000 | r | #Rolling Window RMSE Function
# series is the array of the series
# horizon is how far you want to predict into the future
# d is the order of the differencing: (1-B^)^d
# s is the order of the seasonality: (1-B^s)
# phi = coefficients of the stationary AR term
# theta = coefficients of the invertible MA term
# It simply takes the given horizon and the model in the form of s,d,phis and
# thetas and figures out how many windows it can create in the data (series) and then calculates the ASE for each window.
#The output is the average off all the ASEs from each individual window.
roll.win.rmse.wge = function(series, horizon = 2, s = 0, d = 0, phi = 0, theta = 0)
{
#DEFINE fore.arma.wge2
fore.arma.wge2=function(x,phi=0,theta=0,n.ahead=5,lastn=FALSE, plot=FALSE,alpha=.05,limits=TRUE, xbar2 = NULL)
{
# lastn=TRUE indicates that the last n data values are to be forecast
# lastn=FALSE (default) indicates we want foreacts for n values beyond the end of the realization
n=length(x)
p=length(phi)
if(sum(phi^2)==0) {p=0}
q=length(theta)
if(sum(theta^2)==0) {q=0}
#resid=rep(0,n)
npn.ahead=n+n.ahead
xhat=rep(0,npn.ahead)
if(is.null(xbar2))
{
xbar=mean(x)
}
else
{
xbar = xbar2
}
const=1
if (p > 0) {for(jp in 1:p) {const=const-phi[jp]}}
#
#
# Calculate Box-Jenkins Forecasts
#
#
#Calculating Residuals
#
resid=backcast.wge(x,phi,theta,n.back=50)
#
#
#maconst=const*xbar
#p1=max(p+1,q+1)
#for (i in p1:n) {resid[i]=x[i]
# if ( p > 0) {for (jp in 1:p) {resid[i]=resid[i]-phi[jp]*x[i-jp]}}
# if (q > 0) {for (jq in 1:q) {resid[i]=resid[i]+theta[jq]*resid[i-jq]}}
# resid[i]=resid[i]-maconst}
#
# Calculating Forecasts
#
#
npn.ahead=n+n.ahead
xhat=rep(0,npn.ahead)
mm=n
#
#lastn = TRUE
#
if(lastn==TRUE) {mm=n-n.ahead}
#
#
for (i in 1:mm) {xhat[i]=x[i]}
for (h in 1:n.ahead) {
if (p > 0) {for (jp in 1:p) {xhat[mm+h]=xhat[mm+h]+phi[jp]*xhat[mm+h-jp]}}
if ((h<=q)&(h>0)) {for(jq in h:q) {xhat[mm+h]=xhat[mm+h]-theta[jq]*resid[mm+h-jq]}}
xhat[mm+h]=xhat[mm+h]+xbar*const}
#
#
# Calculate psi weights for forecasts limits
#
#
xi=psi.weights.wge(phi,theta,lag.max=n.ahead)
#
#
#
#Setting up for plots
nap1=n.ahead+1
fplot=rep(0,nap1)
maxh=mm+n.ahead
llplot=rep(0,nap1)
ulplot=rep(0,nap1)
f=rep(0,nap1)
ll=rep(0,nap1)
ul=rep(0,nap1)
wnv=0
xisq=rep(0,n.ahead)
se=rep(0,n.ahead)
se0=1
for (i in 1:n) {wnv=wnv+resid[i]**2}
wnv=wnv/n
xisq[1]=1
for (i in 2:n.ahead) {xisq[i]=xisq[i-1]+xi[i-1]^2}
for (i in 1:n.ahead) {se[i]=sqrt(wnv*xisq[i])}
fplot[1]=x[mm]
for (i in 1:n.ahead) {fplot[i+1]=xhat[mm+i]}
ulplot[1]=x[mm]
#for (i in 1:n.ahead) { ulplot[i+1]=fplot[i+1]+1.96*se[i]}
for (i in 1:n.ahead) { ulplot[i+1]=fplot[i+1]-qnorm(alpha/2)*se[i]}
llplot[1]=x[mm]
#for (i in 1:n.ahead) { llplot[i+1]=fplot[i+1]-1.96*se[i]}
for (i in 1:n.ahead) { llplot[i+1]=fplot[i+1]+qnorm(alpha/2)*se[i]}
#
if(limits==FALSE) {
if(lastn==TRUE) {max=max(x,xhat[1:n])
min=min(x,xhat[1:n])}
else {max=max(x,xhat)
min=min(x,xhat)}}
if(limits==TRUE) {min=min(x,llplot)
max=max(x,ulplot)}
#numrows <- 1
#numcols <- 1
timelab <- 'Time'
valuelab <- ''
#fig.width <- 5
#fig.height <- 2.5
cex.labs <- c(.8,.7,.8)
#par(mfrow=c(numrows,numcols),mar=c(6,2,3,1))
t<-1:n;
np1=n+1
np.ahead=mm+n.ahead
tf<-mm:np.ahead
#if (plot=='TRUE') {
#fig.width <- 5
#fig.height <- 2.5
#cex.labs <- c(1.2,1.2,1.2)
#par(mfrow=c(numrows,numcols),mar=c(9,4,3,2))
#plot(t,x,type='o',xaxt='n',yaxt='n',cex=.8,pch=16,cex.lab=1,cex.axis=1,lwd=1,xlab='',ylab='',xlim=c(1,maxh),ylim=c(min,max),col=1)
#axis(side=1,cex.axis=1.1,mgp=c(3,0.15,0),tcl=-.3);
#axis(side=2,las=1,cex.axis=1.1,mgp=c(3,.4,0),tcl=-.3)
#abline=mean(x)
#mtext(side=c(1,2,1),cex=cex.labs,text=c(timelab,valuelab,""),line=c(1.2,2.1,1.8))
#points(tf,fplot,type='o',lty=1,cex=.6,lwd=1,pch=1,col=2);
#if(limits=='TRUE') {points(tf,ulplot,type='l',lty=2,cex=0.6,lwd=.75,pch=1,col=4)
#points(tf,llplot,type='l',lty=3,cex=0.6,lwd=.75,pch=1,col=4)
# }
#}
np1=n+1
nap1=n.ahead+1
f=fplot[2:nap1]
# Calculate RMSE and MAD
if(lastn==TRUE){
t.start=n-n.ahead
sum.rmse=0
sum.mad=0
for(i in 1:n.ahead) {sum.rmse=sum.rmse+(f[i]-x[t.start+i])^2
sum.mad=sum.mad+abs(f[i]-x[t.start+i])}
mse=sum.rmse/n.ahead
rmse=sqrt(mse)
mad=sum.mad/n.ahead
}
ll=llplot[2:nap1]
ul=ulplot[2:nap1]
if(lastn==TRUE){out1=list(f=f)}
if(lastn==FALSE){out1=list(f=f)}
return(out1)
}
numwindows = 0
RMSEHolder = numeric()
if(s == 0 & d == 0)
{
trainingSize = max(length(phi),length(theta)) + 1 # The plus 1 is for the backcast residuals which helps with ARMA model with q > 0
numwindows = length(series)-(trainingSize + horizon) + 1
RMSEHolder = numeric(numwindows)
print(paste("Please Hold For a Moment, TSWGE is processing the Rolling Window RMSE with", numwindows, "windows."))
for( i in 1:numwindows)
{
forecasts <- fore.arma.wge2(series[i:(i+(trainingSize-1))], plot = TRUE, phi = phi, theta = theta,n.ahead = horizon, xbar = mean(series))
RMSE = sqrt(mean((series[(trainingSize+i):(trainingSize+ i + (horizon) - 1)] - forecasts$f)^2))
RMSEHolder[i] = RMSE
}
}
else
{
trainingSize = sum(length(phi),length(theta),s, d) + 1 # sum and plus one is to help backcast.wge, lag.max and ylim plotting issue in fore.arima.wge
numwindows = length(series)-(trainingSize + horizon) + 1
RMSEHolder = numeric(numwindows)
print(paste("Please Hold For a Moment, TSWGE is processing the Rolling Window RMSE with", numwindows, "windows."))
for( i in 1:numwindows)
{
#invisible(capture.output(forecasts <- fore.arima.wge(series[i:(i+(trainingSize-1))],phi = phis, theta = thetas, s = s, d = d,n.ahead = horizon)))
forecasts <- fore.arima.wge(series[i:(i+(trainingSize-1))],phi = phi, s = s, d = d, theta = theta,n.ahead = horizon)
RMSE = sqrt(mean((series[(trainingSize+i):(trainingSize+ i + (horizon) - 1)] - forecasts$f)^2))
RMSEHolder[i] = RMSE
}
}
RMSEHolder
hist(RMSEHolder, main = "RMSEs for Individual Windows")
WindowedRMSE = mean(RMSEHolder)
print("The Summary Statistics for the Rolling Window RMSE Are:")
print(summary(RMSEHolder))
print(paste("The Rolling Window RMSE is: ",round(WindowedRMSE,3)))
#output
invisible(list(rwRMSE = WindowedRMSE, trainingSize = trainingSize, numwindows = numwindows, horizon = horizon, s = s, d = d, phi = phi, theta = theta, RMSEs = RMSEHolder))
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normalization.R
\name{setmin.dat}
\alias{setmin.dat}
\title{Normalize data in data frame to minimal value (Null value) by subtraction of difference.}
\usage{
setmin.dat(df)
}
\arguments{
\item{df}{Data frame from mean data file.}
}
\value{
data frame (mean data)
}
\description{
Normalize data in data frame to minimal value (Null value) by subtraction of difference.
}
| /man/setmin.dat.Rd | no_license | suvarzz/MNuc | R | false | true | 448 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normalization.R
\name{setmin.dat}
\alias{setmin.dat}
\title{Normalize data in data frame to minimal value (Null value) by subtraction of difference.}
\usage{
setmin.dat(df)
}
\arguments{
\item{df}{Data frame from mean data file.}
}
\value{
data frame (mean data)
}
\description{
Normalize data in data frame to minimal value (Null value) by subtraction of difference.
}
|
library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(bin2mi)
library(m2imp)
alpha <- 0.025
power <- 0.85
cor_xl <- 0.4
pc <- 0.8
pt <- 0.775
m1 <- 0.23
n_obs <- 250
#rate of clinical experts opinios we observe
obs_rate <- 0.03
#parameters tbu in the clinical experts opinions model (to calculate probability to be non/observed)
b1 <- - 0.8
xcov <- matrix(c(4^2, 4*0.05*cor_xl, 4*0.05*cor_xl, 0.05^2), 2, 2)
x1 <- parallel::mclapply(X = 1:1000,
mc.cores = 7,
FUN= function(x){
#population of physicians consists of 1000 doctors
set.seed(100*5 + x)
dt_pop0 <- mvrnorm(1000, mu = c(15, 0.7), Sigma = xcov)
dt_pop <- tibble::tibble(x = dt_pop0[,1],
lambda = dt_pop0[,2],
ph_id = seq(1, length(dt_pop0[,1])))
dt_sample <- dt_pop%>%
dplyr::sample_frac(size = 0.3)
int <- log((1 - obs_rate)/obs_rate) - b1*mean(dt_sample$x)/10
#observe only k physicians
dt_all <- dt_sample%>%
dplyr::mutate(pmiss = 1/(1 + exp(- int - b1*x/10)),
pthresh = runif(n()),
r = ifelse(pmiss > pthresh, 1, 0))%>%
dplyr::select(-c(pmiss, pthresh))
#the below condition added in order to make sure that at least 4 responses are observed in the survey
while(length(dt_all$r[dt_all$r==0])<4){
dt_all <- dt_sample%>%
dplyr::mutate(pmiss = 1/(1 + exp(- int - b1*x/10)),
pthresh = runif(n()),
r = ifelse(pmiss > pthresh, 1, 0))%>%
dplyr::select(-c(pmiss, pthresh))
}
#mean/sd lambda for the whole representitive sample of MDs
mdsur_all <- dt_all%>%
dplyr::summarise(mean_l = mean(lambda), sd_l = sd(lambda), n_l = n())
#mean/sd lambda for the observed sample of MDs
mdsur_obs <- dt_all%>%
dplyr::filter(r==0)%>%
dplyr::summarise(mean_l = mean(lambda), sd_l = sd(lambda), n_l = n())
#mask unobserved values from the sample of MDs
dt_obs <- dt_all%>%
dplyr::mutate(lambda = ifelse(r==0, lambda, NA))
mdsur_mi <- m2_mi(dt_obs, num_m = 10)%>%
dplyr::mutate(sim_id = x)
out <- list(mdsur_mi)%>%
purrr::set_names("dfch")
return(out)
})
saveRDS(x1, "checks/dfchecks/results/mdsu_obs3_checkdf_sc5.rds")
| /checks/dfchecks/pgms/mdsur_obs3_dfcheck_sc5.R | no_license | yuliasidi/ch2sim | R | false | false | 2,386 | r | library(tidyr, warn.conflicts = F, quietly = T)
library(dplyr, warn.conflicts = F, quietly = T)
library(purrr, warn.conflicts = F, quietly = T)
library(MASS, warn.conflicts = F, quietly = T)
library(bin2mi)
library(m2imp)
alpha <- 0.025
power <- 0.85
cor_xl <- 0.4
pc <- 0.8
pt <- 0.775
m1 <- 0.23
n_obs <- 250
#rate of clinical experts opinios we observe
obs_rate <- 0.03
#parameters tbu in the clinical experts opinions model (to calculate probability to be non/observed)
b1 <- - 0.8
xcov <- matrix(c(4^2, 4*0.05*cor_xl, 4*0.05*cor_xl, 0.05^2), 2, 2)
x1 <- parallel::mclapply(X = 1:1000,
mc.cores = 7,
FUN= function(x){
#population of physicians consists of 1000 doctors
set.seed(100*5 + x)
dt_pop0 <- mvrnorm(1000, mu = c(15, 0.7), Sigma = xcov)
dt_pop <- tibble::tibble(x = dt_pop0[,1],
lambda = dt_pop0[,2],
ph_id = seq(1, length(dt_pop0[,1])))
dt_sample <- dt_pop%>%
dplyr::sample_frac(size = 0.3)
int <- log((1 - obs_rate)/obs_rate) - b1*mean(dt_sample$x)/10
#observe only k physicians
dt_all <- dt_sample%>%
dplyr::mutate(pmiss = 1/(1 + exp(- int - b1*x/10)),
pthresh = runif(n()),
r = ifelse(pmiss > pthresh, 1, 0))%>%
dplyr::select(-c(pmiss, pthresh))
#the below condition added in order to make sure that at least 4 responses are observed in the survey
while(length(dt_all$r[dt_all$r==0])<4){
dt_all <- dt_sample%>%
dplyr::mutate(pmiss = 1/(1 + exp(- int - b1*x/10)),
pthresh = runif(n()),
r = ifelse(pmiss > pthresh, 1, 0))%>%
dplyr::select(-c(pmiss, pthresh))
}
#mean/sd lambda for the whole representitive sample of MDs
mdsur_all <- dt_all%>%
dplyr::summarise(mean_l = mean(lambda), sd_l = sd(lambda), n_l = n())
#mean/sd lambda for the observed sample of MDs
mdsur_obs <- dt_all%>%
dplyr::filter(r==0)%>%
dplyr::summarise(mean_l = mean(lambda), sd_l = sd(lambda), n_l = n())
#mask unobserved values from the sample of MDs
dt_obs <- dt_all%>%
dplyr::mutate(lambda = ifelse(r==0, lambda, NA))
mdsur_mi <- m2_mi(dt_obs, num_m = 10)%>%
dplyr::mutate(sim_id = x)
out <- list(mdsur_mi)%>%
purrr::set_names("dfch")
return(out)
})
saveRDS(x1, "checks/dfchecks/results/mdsu_obs3_checkdf_sc5.rds")
|
# Load packages: ----------------------------------------------------------
library(httr) # Tools for Working with URLs and HTTP, CRAN v1.4.2
library(jsonlite) # A Simple and Robust JSON Parser and Generator for R, CRAN v1.7.1
library(tidyverse) # Easily Install and Load the 'Tidyverse', CRAN v1.3.0
library(lubridate) # Make Dealing with Dates a Little Easier, CRAN v1.7.9
# Get the raw data from the TFGM API: -------------------------------------
raw <- GET("https://api.tfgm.com/odata/ScootLoops?$expand=EndLocation,StartLocation,ScootDetails&$top=4000",
add_headers("Ocp-Apim-Subscription-Key"= "YOUR_KEY"))
# Transform the data: -----------------------------------------------------
junction_data <- fromJSON(rawToChar(raw$content))$value %>%
filter(str_detect(SCN, "^N22161"),
SCN != 'N22161N')%>%
unnest_wider(ScootDetails, names_repair = "unique") %>%
mutate(start_coords = StartLocation$LocationSpatial$Geography$WellKnownText,
start_lat = as.numeric(str_sub(start_coords, start = 8, end = 24)),
start_long = as.numeric(str_sub(start_coords, start = 26, end = 41)),
end_coords = EndLocation$LocationSpatial$Geography$WellKnownText,
end_lat = as.numeric(str_sub(end_coords, start = 8, end = 24)),
end_long = as.numeric(str_sub(end_coords, start = 26, end = 41)),
time = now(tz = "GB"))%>%
janitor::clean_names()%>%
select(scoot_name = scn_2,
description,
time,
congestion_percentage,
current_flow,
average_speed,
link_travel_time,
start_lat,
start_long,
end_lat,
end_long) %>%
as_tibble()
# Save the output as csv: -------------------------------------------------
time <- gsub(" |:","_",Sys.time())
junction_data %>%
write_csv(
paste0("/Users/nathankhadaroo/Desktop/PhD/CCAP_project/Data/",time,".csv")
)
sessionInfo()
| /tfgm_webscrape.R | permissive | NathanKhadaroo/tfgm_webscraping | R | false | false | 1,931 | r |
# Load packages: ----------------------------------------------------------
library(httr) # Tools for Working with URLs and HTTP, CRAN v1.4.2
library(jsonlite) # A Simple and Robust JSON Parser and Generator for R, CRAN v1.7.1
library(tidyverse) # Easily Install and Load the 'Tidyverse', CRAN v1.3.0
library(lubridate) # Make Dealing with Dates a Little Easier, CRAN v1.7.9
# Get the raw data from the TFGM API: -------------------------------------
raw <- GET("https://api.tfgm.com/odata/ScootLoops?$expand=EndLocation,StartLocation,ScootDetails&$top=4000",
add_headers("Ocp-Apim-Subscription-Key"= "YOUR_KEY"))
# Transform the data: -----------------------------------------------------
junction_data <- fromJSON(rawToChar(raw$content))$value %>%
filter(str_detect(SCN, "^N22161"),
SCN != 'N22161N')%>%
unnest_wider(ScootDetails, names_repair = "unique") %>%
mutate(start_coords = StartLocation$LocationSpatial$Geography$WellKnownText,
start_lat = as.numeric(str_sub(start_coords, start = 8, end = 24)),
start_long = as.numeric(str_sub(start_coords, start = 26, end = 41)),
end_coords = EndLocation$LocationSpatial$Geography$WellKnownText,
end_lat = as.numeric(str_sub(end_coords, start = 8, end = 24)),
end_long = as.numeric(str_sub(end_coords, start = 26, end = 41)),
time = now(tz = "GB"))%>%
janitor::clean_names()%>%
select(scoot_name = scn_2,
description,
time,
congestion_percentage,
current_flow,
average_speed,
link_travel_time,
start_lat,
start_long,
end_lat,
end_long) %>%
as_tibble()
# Save the output as csv: -------------------------------------------------
time <- gsub(" |:","_",Sys.time())
junction_data %>%
write_csv(
paste0("/Users/nathankhadaroo/Desktop/PhD/CCAP_project/Data/",time,".csv")
)
sessionInfo()
|
context("lin_pow")
library(sars)
test_that("lin_pow returns correct results", {
data(galap)
fit <- lin_pow(galap, con = 1)
expect_equal(round(fit$Model$coefficients[2], 2), 0.34)
expect_equal(round(fit$normaTest[[2]]$p.value, 2), 0.35)
})
| /tests/testthat/test_lin_pow.R | no_license | Bhanditz/sars | R | false | false | 261 | r | context("lin_pow")
library(sars)
test_that("lin_pow returns correct results", {
data(galap)
fit <- lin_pow(galap, con = 1)
expect_equal(round(fit$Model$coefficients[2], 2), 0.34)
expect_equal(round(fit$normaTest[[2]]$p.value, 2), 0.35)
})
|
cas <- smart_read("cas500.csv")
not_a_df <- 1:10
rda <- file.path(tempdir(), "my_files.rda")
on.exit(unlink(rda))
save(cas, iris, not_a_df, file = rda)
test_that("Load returns list of data frames", {
res <- load_rda(rda)
expect_type(res, "list")
expect_equal(names(res), c("cas", "iris"))
expect_equal(res$iris, iris)
expect_equal(res$cas, cas)
})
test_that("Load has valid code", {
expect_equal(
code(load_rda(rda)),
sprintf("load('%s')", rda)
)
})
test_that("Save writes file with correct name", {
fp <- chartr("\\", "/", file.path(tempdir(), "irisdata.rda"))
on.exit(unlink(fp))
x <- save_rda(iris, fp, "my_iris")
expect_true(x)
expect_equal(code(x), sprintf("save(my_iris, file = '%s')", fp))
load(fp)
expect_equal(my_iris, iris)
})
| /tests/testthat/test_load_save.R | no_license | cran/iNZightTools | R | false | false | 813 | r | cas <- smart_read("cas500.csv")
not_a_df <- 1:10
rda <- file.path(tempdir(), "my_files.rda")
on.exit(unlink(rda))
save(cas, iris, not_a_df, file = rda)
test_that("Load returns list of data frames", {
res <- load_rda(rda)
expect_type(res, "list")
expect_equal(names(res), c("cas", "iris"))
expect_equal(res$iris, iris)
expect_equal(res$cas, cas)
})
test_that("Load has valid code", {
expect_equal(
code(load_rda(rda)),
sprintf("load('%s')", rda)
)
})
test_that("Save writes file with correct name", {
fp <- chartr("\\", "/", file.path(tempdir(), "irisdata.rda"))
on.exit(unlink(fp))
x <- save_rda(iris, fp, "my_iris")
expect_true(x)
expect_equal(code(x), sprintf("save(my_iris, file = '%s')", fp))
load(fp)
expect_equal(my_iris, iris)
})
|
# clean USGS Rio Vista Bridge (SRV) stage and flow data for modeling
library(dplyr)
library(readr)
library(glue)
library(contentid)
library(janitor)
library(tidyr)
f_clean_flow_usgs_11455420 <- function(){
# get raw data ID:
SRV_flow <- contentid::store("data_raw/raw_flow_usgs_11455420.zip")
SRV_flow_id <- contentid::resolve("hash://sha256/7c2b6318b8b2efccc4ede3021a33f1c32c0a7c9498877e4d29a378e461bee89a")
# read in data
SRVuv <- read_csv(SRV_flow_id)
#subset and clean column headers
SRVuv <- rename(SRVuv, Q_tf = x_72137_inst)
SRVuv <- subset(SRVuv, select = c(date_time, gh_inst, Q_tf))
#downstep SRV stage and flow to daily mean
SRVdv <- SRVuv %>%
mutate(date = as.Date(date_time)) %>%
group_by(date) %>%
summarize(gh = mean(gh_inst, na.rm=TRUE), Q_tf = mean(Q_tf, na.rm = TRUE))
#explore data - notice step change between WY05 and WY06
library(ggplot2)
plot <- ggplot() + geom_line(data = SRVdv, aes(x=date, y=gh), color = "red")
plot
#explore offset to correct WY05 and earlier data to current datum
#subset WY05 and previous water years
SRV_WY05 <- SRVuv %>% filter(date_time <= "2005-09-30 23:45:00")
#subset WY06 to present
SRV_WY06 <- SRVuv %>% filter(date_time >= "2005-10-01 00:00:00")
#summary statistics for Sept. 2005
SRV_WY05_calc <- SRV_WY05 %>% filter(date_time >= "2005-09-01 00:00:00") %>% filter(date_time <= "2005-09-30 23:45:00")
summary(SRV_WY05_calc)
#summary statistics for Oct. 2005 - mean gage height is 11.96
SRV_WY06_calc <- SRV_WY06 %>% filter(date_time >= "2005-10-01 00:00:00") %>% filter(date_time <= "2005-10-31 23:45:00")
summary(SRV_WY06_calc) #mean gage height is 4.094
#11.96 - 4.094 is 7.866, round to 7.87
#apply offset based on exploration in lines 47 - 69
SRV_WY05$gh_off <- SRV_WY05$gh_inst - 7.87
#make dummy column with WY06 so can row bind
SRV_WY06$gh_off <- SRV_WY06$gh_inst
#rowbind WY05 and earlier, WY06 and later
SRV_off <- rbind(SRV_WY05, SRV_WY06)
#downstop offset data to daily mean
SRVdv_off <- SRV_off %>%
mutate(date = as.Date(date_time)) %>%
group_by(date) %>%
summarize(gh = mean(gh_off, na.rm=TRUE), Q_tf = mean(Q_tf, na.rm = TRUE))
#view new timeseries with offset
plot <- ggplot()+ geom_line(data = SRVdv_off, aes(x=date, y=gh), color = "blue")
plot
#write new file
write.csv(SRVdv_off, "data_clean/clean_flow_usgs_11455420.csv")
}
# run function
f_clean_flow_usgs_11455420()
| /scripts/functions/f_clean_flow_usgs_11455420.R | no_license | Delta-Stewardship-Council/swg-21-connectivity | R | false | false | 2,481 | r | # clean USGS Rio Vista Bridge (SRV) stage and flow data for modeling
library(dplyr)
library(readr)
library(glue)
library(contentid)
library(janitor)
library(tidyr)
f_clean_flow_usgs_11455420 <- function(){
# get raw data ID:
SRV_flow <- contentid::store("data_raw/raw_flow_usgs_11455420.zip")
SRV_flow_id <- contentid::resolve("hash://sha256/7c2b6318b8b2efccc4ede3021a33f1c32c0a7c9498877e4d29a378e461bee89a")
# read in data
SRVuv <- read_csv(SRV_flow_id)
#subset and clean column headers
SRVuv <- rename(SRVuv, Q_tf = x_72137_inst)
SRVuv <- subset(SRVuv, select = c(date_time, gh_inst, Q_tf))
#downstep SRV stage and flow to daily mean
SRVdv <- SRVuv %>%
mutate(date = as.Date(date_time)) %>%
group_by(date) %>%
summarize(gh = mean(gh_inst, na.rm=TRUE), Q_tf = mean(Q_tf, na.rm = TRUE))
#explore data - notice step change between WY05 and WY06
library(ggplot2)
plot <- ggplot() + geom_line(data = SRVdv, aes(x=date, y=gh), color = "red")
plot
#explore offset to correct WY05 and earlier data to current datum
#subset WY05 and previous water years
SRV_WY05 <- SRVuv %>% filter(date_time <= "2005-09-30 23:45:00")
#subset WY06 to present
SRV_WY06 <- SRVuv %>% filter(date_time >= "2005-10-01 00:00:00")
#summary statistics for Sept. 2005
SRV_WY05_calc <- SRV_WY05 %>% filter(date_time >= "2005-09-01 00:00:00") %>% filter(date_time <= "2005-09-30 23:45:00")
summary(SRV_WY05_calc)
#summary statistics for Oct. 2005 - mean gage height is 11.96
SRV_WY06_calc <- SRV_WY06 %>% filter(date_time >= "2005-10-01 00:00:00") %>% filter(date_time <= "2005-10-31 23:45:00")
summary(SRV_WY06_calc) #mean gage height is 4.094
#11.96 - 4.094 is 7.866, round to 7.87
#apply offset based on exploration in lines 47 - 69
SRV_WY05$gh_off <- SRV_WY05$gh_inst - 7.87
#make dummy column with WY06 so can row bind
SRV_WY06$gh_off <- SRV_WY06$gh_inst
#rowbind WY05 and earlier, WY06 and later
SRV_off <- rbind(SRV_WY05, SRV_WY06)
#downstop offset data to daily mean
SRVdv_off <- SRV_off %>%
mutate(date = as.Date(date_time)) %>%
group_by(date) %>%
summarize(gh = mean(gh_off, na.rm=TRUE), Q_tf = mean(Q_tf, na.rm = TRUE))
#view new timeseries with offset
plot <- ggplot()+ geom_line(data = SRVdv_off, aes(x=date, y=gh), color = "blue")
plot
#write new file
write.csv(SRVdv_off, "data_clean/clean_flow_usgs_11455420.csv")
}
# run function
f_clean_flow_usgs_11455420()
|
## File Name: tam2mirt.R
## File Version: 0.292
# convert a fitted tam object into a mirt object
tam2mirt <- function( tamobj )
{
est.mirt <- FALSE
# extract intercept
AXsi <- tamobj$AXsi
# extract loadings
B <- tamobj$B
# number of dimensions
D <- dim(B)[3]
# extract trait distribution
mean.trait <- tamobj$beta
cov.trait <- tamobj$variance
# extract data
dat <- tamobj$resp
# factors
if (D==1){
factors <- 'F1'
}
if (D>1){
factors <- dimnames(tamobj$B)[[3]]
}
# lavaan syntax with fixed values
lavsyn <- tam2mirt_fix( D=D, factors=factors, B=B, dat=dat, AXsi=AXsi,
mean.trait=mean.trait, cov.trait=cov.trait, tamobj=tamobj )
# lavaan syntax with freed values
lavsyn.freed <- tam2mirt_freed( D=D, factors=factors, B=B, dat=dat,
AXsi=AXsi, mean.trait=mean.trait, cov.trait=cov.trait, tamobj=tamobj )
# pseudo-estimate model in mirt: just create mirt object structure
res <- lavaan2mirt( dat=dat, lavmodel=lavsyn, est.mirt=TRUE )
#--- include parameters in mirt object
res$mirt@Model$nest <- as.integer(tamobj$ic$np ) # number of estimated parameters
# recalculate AIC, BIC, AICc and SABIC
res$mirt@Fit$AIC <- tamobj$ic$AIC
res$mirt@Fit$BIC <- tamobj$ic$BIC
res$mirt@Fit$AICc <- tamobj$ic$AICc
res$mirt@Fit$SABIC <- tamobj$ic$aBIC
# use theta grid from estimation in TAM
res$mirt@Model$Theta <- tamobj$theta
res$mirt@Options$quadpts <- nrow(tamobj$theta)
# output
res$lavaan.syntax.fixed <- lavsyn
res$lavaan.syntax.freed <- lavsyn.freed
# res$tamobj <- tamobj
return(res)
}
| /R/tam2mirt.R | no_license | alexanderrobitzsch/sirt | R | false | false | 1,677 | r | ## File Name: tam2mirt.R
## File Version: 0.292
# convert a fitted tam object into a mirt object
tam2mirt <- function( tamobj )
{
est.mirt <- FALSE
# extract intercept
AXsi <- tamobj$AXsi
# extract loadings
B <- tamobj$B
# number of dimensions
D <- dim(B)[3]
# extract trait distribution
mean.trait <- tamobj$beta
cov.trait <- tamobj$variance
# extract data
dat <- tamobj$resp
# factors
if (D==1){
factors <- 'F1'
}
if (D>1){
factors <- dimnames(tamobj$B)[[3]]
}
# lavaan syntax with fixed values
lavsyn <- tam2mirt_fix( D=D, factors=factors, B=B, dat=dat, AXsi=AXsi,
mean.trait=mean.trait, cov.trait=cov.trait, tamobj=tamobj )
# lavaan syntax with freed values
lavsyn.freed <- tam2mirt_freed( D=D, factors=factors, B=B, dat=dat,
AXsi=AXsi, mean.trait=mean.trait, cov.trait=cov.trait, tamobj=tamobj )
# pseudo-estimate model in mirt: just create mirt object structure
res <- lavaan2mirt( dat=dat, lavmodel=lavsyn, est.mirt=TRUE )
#--- include parameters in mirt object
res$mirt@Model$nest <- as.integer(tamobj$ic$np ) # number of estimated parameters
# recalculate AIC, BIC, AICc and SABIC
res$mirt@Fit$AIC <- tamobj$ic$AIC
res$mirt@Fit$BIC <- tamobj$ic$BIC
res$mirt@Fit$AICc <- tamobj$ic$AICc
res$mirt@Fit$SABIC <- tamobj$ic$aBIC
# use theta grid from estimation in TAM
res$mirt@Model$Theta <- tamobj$theta
res$mirt@Options$quadpts <- nrow(tamobj$theta)
# output
res$lavaan.syntax.fixed <- lavsyn
res$lavaan.syntax.freed <- lavsyn.freed
# res$tamobj <- tamobj
return(res)
}
|
library(RSurveillance)
### Name: n.rb.2stage.2
### Title: Sample size for 2-stage risk-based surveillance, allowing for
### risk factors at either or both cluster and unit level
### Aliases: n.rb.2stage.2
### Keywords: methods
### ** Examples
rr.c<- c(5,3,1)
ppr.c<- c(0.1, 0.2, 0.7)
spr.c<- c(0.4, 0.4, 0.2)
rr.u<- c(4,1)
ppr.u<- c(0.1, 0.9)
spr.u<- c(1, 0)
n.rb.2stage.2(rr.c, ppr.c, spr.c, pstar.c=0.02, rr.u, ppr.u,
spr.u, 0.1, se=0.9, sep.c=0.5, sep.sys=0.95)
n.rb.2stage.2(c(3,1), c(0.2,0.8), c(0.7,0.3), pstar.c=0.05,
pstar.u=0.1, se=0.9, sep.c=0.95, sep.sys=0.99)
| /data/genthat_extracted_code/RSurveillance/examples/n.rb.2stage.2.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 588 | r | library(RSurveillance)
### Name: n.rb.2stage.2
### Title: Sample size for 2-stage risk-based surveillance, allowing for
### risk factors at either or both cluster and unit level
### Aliases: n.rb.2stage.2
### Keywords: methods
### ** Examples
rr.c<- c(5,3,1)
ppr.c<- c(0.1, 0.2, 0.7)
spr.c<- c(0.4, 0.4, 0.2)
rr.u<- c(4,1)
ppr.u<- c(0.1, 0.9)
spr.u<- c(1, 0)
n.rb.2stage.2(rr.c, ppr.c, spr.c, pstar.c=0.02, rr.u, ppr.u,
spr.u, 0.1, se=0.9, sep.c=0.5, sep.sys=0.95)
n.rb.2stage.2(c(3,1), c(0.2,0.8), c(0.7,0.3), pstar.c=0.05,
pstar.u=0.1, se=0.9, sep.c=0.95, sep.sys=0.99)
|
###############################################################
## make contrast matrix for pairwise comparisons
###############################################################
#' @keywords internal
.makeContrast <- function(groups) {
ncomp <- length(groups) * (length(groups) - 1) / 2 # Number of comparison
contrast.matrix <- matrix(rep(0, length(groups) * ncomp), ncol = length(groups))
colnames(contrast.matrix) <- groups
count <- 0
contrast.matrix.rownames <- NULL
for(j in seq_len(length(groups)-1)){
for(k in (j+1):length(groups)){
count <- count + 1
# save row name
contrast.matrix.rownames <- c(contrast.matrix.rownames, paste(groups[j], groups[k], sep = "-"))
# set constrast value
contrast.matrix[count, groups[j]] <- 1
contrast.matrix[count, groups[k]] <- -1
}
}
rownames(contrast.matrix) <- contrast.matrix.rownames
return(contrast.matrix)
}
###############################################################
## check single subject within each condition in each mixture
###############################################################
#' @keywords internal
.checkSingleSubject <- function(annotation) {
temp <- unique(annotation[, c("Mixture", "Group", "Subject")])
temp$Group <- factor(temp$Group)
temp$Mixture <- factor(temp$Mixture)
temp1 <- xtabs(~ Mixture+Group, data=temp)
singleSubject <- all(temp1 <= "1")
return(singleSubject)
}
#############################################
## check .checkTechReplicate
#############################################
#' @keywords internal
.checkTechReplicate <- function(annotation) {
temp <- unique(annotation[, c("Mixture", "Run")])
temp$Mixture <- factor(temp$Mixture)
temp1 <- xtabs(~ Mixture, data=temp)
TechReplicate <- all(temp1 != "1")
return(TechReplicate)
}
#############################################
## check whether there are multiple biological mixtures
#############################################
#' @keywords internal
.checkMulBioMixture <- function(annotation) {
temp <- unique(annotation[, "Mixture"])
temp <- as.vector(as.matrix(temp))
return(length(temp)>1)
}
#############################################
## check whether there is only single run
#############################################
#' @keywords internal
.checkSingleRun <- function(annotation) {
temp <- unique(annotation[, "Run"])
temp <- as.vector(as.matrix(temp))
return(length(temp)==1)
}
#############################################
## fit the full model with mixture, techrep and subject effects
#############################################
#' @importFrom lmerTest lmer
#' @keywords internal
#' fit the whole plot and subplot model if the data has
#' multiple mixtures, multiple technical replicate runs per mixture and biological variation
fit_full_model <- function(data) {
fit <- suppressMessages(try(lmerTest::lmer(Abundance ~ 1 + (1|Mixture) + (1|Mixture:TechRepMixture) + # whole plot
Group + #subplot
(1|Subject:Group:Mixture), data = data), TRUE))
if(!inherits(fit, "try-error")){
return(fit)
} else{ # if the parameters are not estimable, return null
return(NULL)
}
}
#############################################
## fit the reduced model with run and subject effects
#############################################
#' @importFrom lmerTest lmer
#' @keywords internal
#' fit the whole plot and subplot model if the data has
#' single mixture with multiple technical replicate runs
fit_reduced_model_techrep <- function(data) {
fit <- suppressMessages(try(lmerTest::lmer(Abundance ~ 1 + (1|Run) + # whole plot
Group + #subplot
(1|Subject:Group), data = data), TRUE))
if(!inherits(fit, "try-error")){
return(fit)
} else{ # if the parameters are not estimable, return null
return(NULL)
}
}
#############################################
## fit the reduced model with mixture and techrep effects
#############################################
#' @importFrom lmerTest lmer
#' @keywords internal
#' fit the whole plot and subplot model if the data has no biological variation,
#' multiple mixtures with multiple technical replicate runs
fit_full_model_spikedin <- function(data) {
fit <- suppressMessages(try(lmerTest::lmer(Abundance ~ 1 + (1|Mixture) + (1|Mixture:TechRepMixture)
+ Group, data = data), TRUE))
if(!inherits(fit, "try-error")){
return(fit)
} else{ # if the parameters are not estimable, return null
return(NULL)
}
}
#############################################
## fit the reduced with only run effect
#############################################
#' @importFrom lmerTest lmer
#' @keywords internal
#' fit the whole plot and subplot model if the data has no biological variation,
#' multiple mixtures or multiple technical replicate runs
#' or if the data has multiple mixtures but single technical replicate MS run
fit_reduced_model_mulrun <- function(data) {
fit <- suppressMessages(try(lmerTest::lmer(Abundance ~ 1 + (1|Run) + Group, data = data), TRUE))
if(!inherits(fit, "try-error")){
return(fit)
} else{ # if the parameters are not estimable, return null
return(NULL)
}
}
#############################################
## fit one-way anova model
#############################################
#' @importFrom lmerTest lmer
#' @keywords internal
#' fit the whole plot and subplot model if the data has single run
fit_reduced_model_onerun <- function(data) {
fit <- suppressMessages(try(lm(Abundance ~ 1 + Group, data = data), TRUE))
if(!inherits(fit, "try-error")){
return(fit)
} else{ # if the parameters are not estimable, return null
return(NULL)
}
}
#############################################
## fit the proper linear model for each protein
#############################################
#' @importFrom lme4 fixef
#' @import lmerTest
#' @importFrom stats vcov
#' @importFrom dplyr filter
#' @keywords internal
#' fit the proper linear model for each protein
.linear.model.fitting <- function(data){
Abundance <- Group <- Protein <- NULL
data$Protein <- as.character(data$Protein) ## make sure protein names are character
proteins <- as.character(unique(data$Protein)) ## proteins
num.protein <- length(proteins)
linear.models <- list() # linear models
s2.all <- NULL # sigma^2
s2_df.all <- NULL # degree freedom of sigma^2
pro.all <- NULL # testable proteins
coeff.all <- list() # coefficients
## do inference for each protein individually
for(i in seq_along(proteins)) {
message(paste("Model fitting for Protein :", proteins[i] , "(", i, " of ", num.protein, ")"))
sub_data <- data %>% dplyr::filter(Protein == proteins[i]) ## data for protein i
# sub_groups <- as.character(unique(sub_data$Group))
# if(length(sub_groups) == 1){
# stop("Only one condition!")
# }
## Record the annotation information
sub_annot <- unique(sub_data[, c('Run', 'Channel', 'Subject',
'Group', 'Mixture', 'TechRepMixture')])
## check the experimental design
sub_singleSubject <- .checkSingleSubject(sub_annot)
sub_TechReplicate <- .checkTechReplicate(sub_annot)
sub_bioMixture <- .checkMulBioMixture(sub_annot)
sub_singleRun <- .checkSingleRun(sub_annot)
if(sub_singleSubject){ # no biological variation within each condition and mixture
if(sub_TechReplicate & sub_bioMixture){ # multiple mixtures and technical replicates
# fit the full model with mixture and techrep effects for spiked-in data
fit <- fit_full_model_spikedin(sub_data)
if(is.null(fit)){ # full model is not applicable
# fit the reduced model with only run effect
fit <- fit_reduced_model_mulrun(sub_data)
}
if(is.null(fit)){ # the second model is not applicable
# fit one-way anova model
fit <- fit_reduced_model_onerun(sub_data)
}
} else{
if(sub_TechReplicate | sub_bioMixture){ # multiple mixtures or multiple technical replicates
# fit the reduced model with only run effect
fit <- fit_reduced_model_mulrun(sub_data)
if(is.null(fit)){ # the second model is not applicable
# fit one-way anova model
fit <- fit_reduced_model_onerun(sub_data)
}
} else{ # single run case
# fit one-way anova model
fit <- fit_reduced_model_onerun(sub_data)
}
}
} else{ # biological variation exists within each condition and mixture
if (sub_bioMixture) { # multiple biological mixtures
if (sub_TechReplicate) { # multiple technical replicate MS runs
# fit the full model with mixture, techrep, subject effects
fit <- fit_full_model(sub_data)
if(is.null(fit)){ # full model is not applicable
# fit the reduced model with run and subject effects
fit <- fit_reduced_model_techrep(sub_data)
}
if(is.null(fit)){ # second model is not applicable
# fit one-way anova model
fit <- fit_reduced_model_onerun(sub_data)
}
} else { # single technical replicate MS run
# fit the reduced model with only run effect
fit <- fit_reduced_model_mulrun(sub_data)
if(is.null(fit)){ # second model is not applicable
# fit one-way anova model
fit <- fit_reduced_model_onerun(sub_data)
}
}
} else { # single biological mixture
if (sub_TechReplicate) { # multiple technical replicate MS runs
# fit the reduced model with run and subject effects
fit <- fit_reduced_model_techrep(sub_data)
if(is.null(fit)){ # second model is not applicable
# fit one-way anova model
fit <- fit_reduced_model_onerun(sub_data)
}
} else { # single run
# fit one-way anova model
fit <- fit_reduced_model_onerun(sub_data)
} # single technical replicate MS run
} # single biological mixture
} # biological variation
## estimate variance and df from linear models
if(!is.null(fit)){ # the model is fittable
if(inherits(fit, "lm")){# single run case
## Estimate the coeff from fixed model
av <- anova(fit)
coeff <- coef(fit)
s2_df <- av["Residuals", "Df"]
if(s2_df == 0){
s2 <- 0
} else{
# use error variance for testing
s2 <- av["Residuals", "Mean Sq"]
}
linear.models[[proteins[i]]] <- list(model = fit)
} else{
## Estimate the coeff from lmerTest model
rho <- list() ## environment containing info about model
rho <- .rhoInit(rho, fit, TRUE) ## save lmer outcome in rho envir variable
rho$A <- .calcApvar(rho) ## asymptotic variance-covariance matrix for theta and sigma
av <- anova(rho$model)
coeff <- lme4::fixef(rho$model)
s2_df <- av$DenDF
s2 <- av$'Mean Sq'/av$'F value'
linear.models[[proteins[i]]] <- rho
}
pro.all <- c(pro.all, proteins[i])
s2.all <- c(s2.all, s2)
s2_df.all <- c(s2_df.all, s2_df)
coeff.all[[proteins[i]]] <- coeff
} else{ # the model is not fittble
# message(proteins[i], " is untestable due to no enough measurements.")
linear.models[[proteins[i]]] <- "unfittable"
pro.all <- c(pro.all, proteins[i])
s2.all <- c(s2.all, NA)
s2_df.all <- c(s2_df.all, NA)
coeff.all[[proteins[i]]] <- NA
}
} # for each protein
names(s2.all) <- proteins
names(s2_df.all) <- proteins
return(list(protein = pro.all,
model = linear.models,
s2 = s2.all,
s2_df = s2_df.all,
coeff = coeff.all))
}
#############################################
## check the reason for results with NA
#############################################
#' @keywords internal
#' check the possible reason for untestable comparison
.issue.checking <- function(data,
contrast.matrix){
## choose each comparison
contrast.matrix.sub <- contrast.matrix
# groups in the sub data
sub_groups <- as.character(unique(data$Group))
# groups with positive coefficients
positive.groups <- names(contrast.matrix.sub)[contrast.matrix.sub>0]
# groups with negative coefficients
negative.groups <- names(contrast.matrix.sub)[contrast.matrix.sub<0]
if(is.null(positive.groups) | is.null(negative.groups)){
stop("Please check the contrast.matrix.
Each row must have both positive and negative values,
and their sum must be 1!")
}
if(any(positive.groups %in% sub_groups) &
any(negative.groups %in% sub_groups)){
logFC = NA
issue = "unfittableModel"
} else{
# more than one condition
if(all(!positive.groups %in% sub_groups) &
any(negative.groups %in% sub_groups)){
logFC = (-Inf)
issue = "oneConditionMissing"
} else{
if(any(positive.groups %in% sub_groups) &
all(!negative.groups %in% sub_groups)){
logFC = Inf
issue = "oneConditionMissing"
} else{
logFC = NA
issue = "completeMissing"
}
}
}
return(list(logFC = logFC, issue = issue))
}
#############################################
## make constrast
#############################################
# MSstats
#' @importFrom stats coef
#' @importFrom lme4 fixef
#' @keywords internal
.make.contrast.single <- function(fit, contrast, sub_data) {
## when there are some groups which are all missing
sub_groups <- as.character(levels(sub_data[, c("Group")]))
# groups with positive coefficients
positive.groups <- names(contrast)[contrast>0]
# groups with negative coefficients
negative.groups <- names(contrast)[contrast<0]
# if some groups not exist in the protein data
if(!(all(positive.groups %in% sub_groups) &
all(negative.groups %in% sub_groups))){
contrast.single <- contrast[sub_groups]
## tune the coefficients of positive groups so that their summation is 1
temp <- contrast.single[contrast.single > 0]
temp <- temp*(1/sum(temp, na.rm = TRUE))
contrast.single[contrast.single > 0] <- temp
## tune the coefficients of positive groups so that their summation is 1
temp2 <- contrast.single[contrast.single < 0]
temp2 <- temp2*abs(1/sum(temp2, na.rm = TRUE))
contrast.single[contrast.single < 0] <- temp2
## set the coefficients of non-existing groups to zero
contrast[] <- 0
contrast[sub_groups] <- contrast.single
}
if (inherits(fit, "lm")) {
coef_name <- names(stats::coef(fit))
} else {
coef_name <- names(lme4::fixef(fit))
}
## intercept
temp <- coef_name[grep("Intercept", coef_name)]
intercept_c <- rep(0, length(temp))
names(intercept_c) <- temp
if (length(temp) == 0) {
intercept_c <- NULL
}
## group
temp <- coef_name[grep("Group", coef_name)]
tempcontrast <- contrast[sub_groups]
group_c <- tempcontrast[gsub("Group", "", temp)]
names(group_c) <- temp
if (length(temp) == 0) {
group_c<-NULL
}
## combine all
newcontrast <- c(intercept_c, group_c)
if(inherits(fit, "lm")) {
contrast1 <- newcontrast[!is.na(stats::coef(fit))]
} else {
contrast1 <- newcontrast[!is.na(lme4::fixef(fit))]
}
return(contrast1)
}
# retired fuction (2020.04.13)
# #############################################
# ## get the unscaled covariance matrix
# #############################################
# # statOmics, MSqRob hurdle model
# # Created 2020
# .getVcovUnscaled <- function(model){
#
# if(inherits(fixed.model, "lm")){
# vcov <- summary(model)$cov.unscaled
#
# } else{
# p <- ncol(lme4::getME(model,"X"))
# q <- nrow(lme4::getME(model,"Zt"))
# Ct <- rbind2(t(lme4::getME(model,"X")),lme4::getME(model,"Zt"))
# Ginv <- Matrix::solve(Matrix::tcrossprod(lme4::getME(model,"Lambda"))+Matrix::Diagonal(q,1e-18))
# vcovInv <- Matrix::tcrossprod(Ct)
# vcovInv[((p+1):(q+p)),((p+1):(q+p))] <- vcovInv[((p+1):(q+p)),((p+1):(q+p))]+Ginv
#
# #remove rows with only zeros, making it uninvertible
# defined <- rowSums(as.matrix(vcovInv==0))!=ncol(vcovInv)
# defined[is.na(defined)] <- TRUE
# vcovInv <- vcovInv[defined, defined, drop=FALSE]
#
# #Estimated variance-covariance matrix vcov:
# vcov <- tryCatch(as.matrix(Matrix::solve(vcovInv)), error=function(e){
# return(vcovInv*NA)
# })
#
# rownames(vcov) <- colnames(vcovInv)
# colnames(vcov) <- rownames(vcovInv)
# }
#
# return(vcov)
# } | /R/linearModel.functions.R | no_license | zzsnow/MSstatsTMT | R | false | false | 17,215 | r | ###############################################################
## make contrast matrix for pairwise comparisons
###############################################################
#' @keywords internal
.makeContrast <- function(groups) {
ncomp <- length(groups) * (length(groups) - 1) / 2 # Number of comparison
contrast.matrix <- matrix(rep(0, length(groups) * ncomp), ncol = length(groups))
colnames(contrast.matrix) <- groups
count <- 0
contrast.matrix.rownames <- NULL
for(j in seq_len(length(groups)-1)){
for(k in (j+1):length(groups)){
count <- count + 1
# save row name
contrast.matrix.rownames <- c(contrast.matrix.rownames, paste(groups[j], groups[k], sep = "-"))
# set constrast value
contrast.matrix[count, groups[j]] <- 1
contrast.matrix[count, groups[k]] <- -1
}
}
rownames(contrast.matrix) <- contrast.matrix.rownames
return(contrast.matrix)
}
###############################################################
## check single subject within each condition in each mixture
###############################################################
#' @keywords internal
.checkSingleSubject <- function(annotation) {
temp <- unique(annotation[, c("Mixture", "Group", "Subject")])
temp$Group <- factor(temp$Group)
temp$Mixture <- factor(temp$Mixture)
temp1 <- xtabs(~ Mixture+Group, data=temp)
singleSubject <- all(temp1 <= "1")
return(singleSubject)
}
#############################################
## check .checkTechReplicate
#############################################
#' @keywords internal
.checkTechReplicate <- function(annotation) {
temp <- unique(annotation[, c("Mixture", "Run")])
temp$Mixture <- factor(temp$Mixture)
temp1 <- xtabs(~ Mixture, data=temp)
TechReplicate <- all(temp1 != "1")
return(TechReplicate)
}
#############################################
## check whether there are multiple biological mixtures
#############################################
#' @keywords internal
.checkMulBioMixture <- function(annotation) {
temp <- unique(annotation[, "Mixture"])
temp <- as.vector(as.matrix(temp))
return(length(temp)>1)
}
#############################################
## check whether there is only single run
#############################################
#' @keywords internal
.checkSingleRun <- function(annotation) {
temp <- unique(annotation[, "Run"])
temp <- as.vector(as.matrix(temp))
return(length(temp)==1)
}
#############################################
## fit the full model with mixture, techrep and subject effects
#############################################
#' @importFrom lmerTest lmer
#' @keywords internal
#' fit the whole plot and subplot model if the data has
#' multiple mixtures, multiple technical replicate runs per mixture and biological variation
fit_full_model <- function(data) {
fit <- suppressMessages(try(lmerTest::lmer(Abundance ~ 1 + (1|Mixture) + (1|Mixture:TechRepMixture) + # whole plot
Group + #subplot
(1|Subject:Group:Mixture), data = data), TRUE))
if(!inherits(fit, "try-error")){
return(fit)
} else{ # if the parameters are not estimable, return null
return(NULL)
}
}
#############################################
## fit the reduced model with run and subject effects
#############################################
#' @importFrom lmerTest lmer
#' @keywords internal
#' fit the whole plot and subplot model if the data has
#' single mixture with multiple technical replicate runs
fit_reduced_model_techrep <- function(data) {
fit <- suppressMessages(try(lmerTest::lmer(Abundance ~ 1 + (1|Run) + # whole plot
Group + #subplot
(1|Subject:Group), data = data), TRUE))
if(!inherits(fit, "try-error")){
return(fit)
} else{ # if the parameters are not estimable, return null
return(NULL)
}
}
#############################################
## fit the reduced model with mixture and techrep effects
#############################################
#' @importFrom lmerTest lmer
#' @keywords internal
#' fit the whole plot and subplot model if the data has no biological variation,
#' multiple mixtures with multiple technical replicate runs
fit_full_model_spikedin <- function(data) {
fit <- suppressMessages(try(lmerTest::lmer(Abundance ~ 1 + (1|Mixture) + (1|Mixture:TechRepMixture)
+ Group, data = data), TRUE))
if(!inherits(fit, "try-error")){
return(fit)
} else{ # if the parameters are not estimable, return null
return(NULL)
}
}
#############################################
## fit the reduced with only run effect
#############################################
#' @importFrom lmerTest lmer
#' @keywords internal
#' fit the whole plot and subplot model if the data has no biological variation,
#' multiple mixtures or multiple technical replicate runs
#' or if the data has multiple mixtures but single technical replicate MS run
fit_reduced_model_mulrun <- function(data) {
fit <- suppressMessages(try(lmerTest::lmer(Abundance ~ 1 + (1|Run) + Group, data = data), TRUE))
if(!inherits(fit, "try-error")){
return(fit)
} else{ # if the parameters are not estimable, return null
return(NULL)
}
}
#############################################
## fit one-way anova model
#############################################
#' @importFrom lmerTest lmer
#' @keywords internal
#' fit the whole plot and subplot model if the data has single run
fit_reduced_model_onerun <- function(data) {
fit <- suppressMessages(try(lm(Abundance ~ 1 + Group, data = data), TRUE))
if(!inherits(fit, "try-error")){
return(fit)
} else{ # if the parameters are not estimable, return null
return(NULL)
}
}
#############################################
## fit the proper linear model for each protein
#############################################
#' @importFrom lme4 fixef
#' @import lmerTest
#' @importFrom stats vcov
#' @importFrom dplyr filter
#' @keywords internal
#' fit the proper linear model for each protein
.linear.model.fitting <- function(data){
Abundance <- Group <- Protein <- NULL
data$Protein <- as.character(data$Protein) ## make sure protein names are character
proteins <- as.character(unique(data$Protein)) ## proteins
num.protein <- length(proteins)
linear.models <- list() # linear models
s2.all <- NULL # sigma^2
s2_df.all <- NULL # degree freedom of sigma^2
pro.all <- NULL # testable proteins
coeff.all <- list() # coefficients
## do inference for each protein individually
for(i in seq_along(proteins)) {
message(paste("Model fitting for Protein :", proteins[i] , "(", i, " of ", num.protein, ")"))
sub_data <- data %>% dplyr::filter(Protein == proteins[i]) ## data for protein i
# sub_groups <- as.character(unique(sub_data$Group))
# if(length(sub_groups) == 1){
# stop("Only one condition!")
# }
## Record the annotation information
sub_annot <- unique(sub_data[, c('Run', 'Channel', 'Subject',
'Group', 'Mixture', 'TechRepMixture')])
## check the experimental design
sub_singleSubject <- .checkSingleSubject(sub_annot)
sub_TechReplicate <- .checkTechReplicate(sub_annot)
sub_bioMixture <- .checkMulBioMixture(sub_annot)
sub_singleRun <- .checkSingleRun(sub_annot)
if(sub_singleSubject){ # no biological variation within each condition and mixture
if(sub_TechReplicate & sub_bioMixture){ # multiple mixtures and technical replicates
# fit the full model with mixture and techrep effects for spiked-in data
fit <- fit_full_model_spikedin(sub_data)
if(is.null(fit)){ # full model is not applicable
# fit the reduced model with only run effect
fit <- fit_reduced_model_mulrun(sub_data)
}
if(is.null(fit)){ # the second model is not applicable
# fit one-way anova model
fit <- fit_reduced_model_onerun(sub_data)
}
} else{
if(sub_TechReplicate | sub_bioMixture){ # multiple mixtures or multiple technical replicates
# fit the reduced model with only run effect
fit <- fit_reduced_model_mulrun(sub_data)
if(is.null(fit)){ # the second model is not applicable
# fit one-way anova model
fit <- fit_reduced_model_onerun(sub_data)
}
} else{ # single run case
# fit one-way anova model
fit <- fit_reduced_model_onerun(sub_data)
}
}
} else{ # biological variation exists within each condition and mixture
if (sub_bioMixture) { # multiple biological mixtures
if (sub_TechReplicate) { # multiple technical replicate MS runs
# fit the full model with mixture, techrep, subject effects
fit <- fit_full_model(sub_data)
if(is.null(fit)){ # full model is not applicable
# fit the reduced model with run and subject effects
fit <- fit_reduced_model_techrep(sub_data)
}
if(is.null(fit)){ # second model is not applicable
# fit one-way anova model
fit <- fit_reduced_model_onerun(sub_data)
}
} else { # single technical replicate MS run
# fit the reduced model with only run effect
fit <- fit_reduced_model_mulrun(sub_data)
if(is.null(fit)){ # second model is not applicable
# fit one-way anova model
fit <- fit_reduced_model_onerun(sub_data)
}
}
} else { # single biological mixture
if (sub_TechReplicate) { # multiple technical replicate MS runs
# fit the reduced model with run and subject effects
fit <- fit_reduced_model_techrep(sub_data)
if(is.null(fit)){ # second model is not applicable
# fit one-way anova model
fit <- fit_reduced_model_onerun(sub_data)
}
} else { # single run
# fit one-way anova model
fit <- fit_reduced_model_onerun(sub_data)
} # single technical replicate MS run
} # single biological mixture
} # biological variation
## estimate variance and df from linear models
if(!is.null(fit)){ # the model is fittable
if(inherits(fit, "lm")){# single run case
## Estimate the coeff from fixed model
av <- anova(fit)
coeff <- coef(fit)
s2_df <- av["Residuals", "Df"]
if(s2_df == 0){
s2 <- 0
} else{
# use error variance for testing
s2 <- av["Residuals", "Mean Sq"]
}
linear.models[[proteins[i]]] <- list(model = fit)
} else{
## Estimate the coeff from lmerTest model
rho <- list() ## environment containing info about model
rho <- .rhoInit(rho, fit, TRUE) ## save lmer outcome in rho envir variable
rho$A <- .calcApvar(rho) ## asymptotic variance-covariance matrix for theta and sigma
av <- anova(rho$model)
coeff <- lme4::fixef(rho$model)
s2_df <- av$DenDF
s2 <- av$'Mean Sq'/av$'F value'
linear.models[[proteins[i]]] <- rho
}
pro.all <- c(pro.all, proteins[i])
s2.all <- c(s2.all, s2)
s2_df.all <- c(s2_df.all, s2_df)
coeff.all[[proteins[i]]] <- coeff
} else{ # the model is not fittble
# message(proteins[i], " is untestable due to no enough measurements.")
linear.models[[proteins[i]]] <- "unfittable"
pro.all <- c(pro.all, proteins[i])
s2.all <- c(s2.all, NA)
s2_df.all <- c(s2_df.all, NA)
coeff.all[[proteins[i]]] <- NA
}
} # for each protein
names(s2.all) <- proteins
names(s2_df.all) <- proteins
return(list(protein = pro.all,
model = linear.models,
s2 = s2.all,
s2_df = s2_df.all,
coeff = coeff.all))
}
#############################################
## check the reason for results with NA
#############################################
#' @keywords internal
#' check the possible reason for untestable comparison
.issue.checking <- function(data,
contrast.matrix){
## choose each comparison
contrast.matrix.sub <- contrast.matrix
# groups in the sub data
sub_groups <- as.character(unique(data$Group))
# groups with positive coefficients
positive.groups <- names(contrast.matrix.sub)[contrast.matrix.sub>0]
# groups with negative coefficients
negative.groups <- names(contrast.matrix.sub)[contrast.matrix.sub<0]
if(is.null(positive.groups) | is.null(negative.groups)){
stop("Please check the contrast.matrix.
Each row must have both positive and negative values,
and their sum must be 1!")
}
if(any(positive.groups %in% sub_groups) &
any(negative.groups %in% sub_groups)){
logFC = NA
issue = "unfittableModel"
} else{
# more than one condition
if(all(!positive.groups %in% sub_groups) &
any(negative.groups %in% sub_groups)){
logFC = (-Inf)
issue = "oneConditionMissing"
} else{
if(any(positive.groups %in% sub_groups) &
all(!negative.groups %in% sub_groups)){
logFC = Inf
issue = "oneConditionMissing"
} else{
logFC = NA
issue = "completeMissing"
}
}
}
return(list(logFC = logFC, issue = issue))
}
#############################################
## make constrast
#############################################
# MSstats
#' @importFrom stats coef
#' @importFrom lme4 fixef
#' @keywords internal
.make.contrast.single <- function(fit, contrast, sub_data) {
## when there are some groups which are all missing
sub_groups <- as.character(levels(sub_data[, c("Group")]))
# groups with positive coefficients
positive.groups <- names(contrast)[contrast>0]
# groups with negative coefficients
negative.groups <- names(contrast)[contrast<0]
# if some groups not exist in the protein data
if(!(all(positive.groups %in% sub_groups) &
all(negative.groups %in% sub_groups))){
contrast.single <- contrast[sub_groups]
## tune the coefficients of positive groups so that their summation is 1
temp <- contrast.single[contrast.single > 0]
temp <- temp*(1/sum(temp, na.rm = TRUE))
contrast.single[contrast.single > 0] <- temp
## tune the coefficients of positive groups so that their summation is 1
temp2 <- contrast.single[contrast.single < 0]
temp2 <- temp2*abs(1/sum(temp2, na.rm = TRUE))
contrast.single[contrast.single < 0] <- temp2
## set the coefficients of non-existing groups to zero
contrast[] <- 0
contrast[sub_groups] <- contrast.single
}
if (inherits(fit, "lm")) {
coef_name <- names(stats::coef(fit))
} else {
coef_name <- names(lme4::fixef(fit))
}
## intercept
temp <- coef_name[grep("Intercept", coef_name)]
intercept_c <- rep(0, length(temp))
names(intercept_c) <- temp
if (length(temp) == 0) {
intercept_c <- NULL
}
## group
temp <- coef_name[grep("Group", coef_name)]
tempcontrast <- contrast[sub_groups]
group_c <- tempcontrast[gsub("Group", "", temp)]
names(group_c) <- temp
if (length(temp) == 0) {
group_c<-NULL
}
## combine all
newcontrast <- c(intercept_c, group_c)
if(inherits(fit, "lm")) {
contrast1 <- newcontrast[!is.na(stats::coef(fit))]
} else {
contrast1 <- newcontrast[!is.na(lme4::fixef(fit))]
}
return(contrast1)
}
# retired fuction (2020.04.13)
# #############################################
# ## get the unscaled covariance matrix
# #############################################
# # statOmics, MSqRob hurdle model
# # Created 2020
# .getVcovUnscaled <- function(model){
#
# if(inherits(fixed.model, "lm")){
# vcov <- summary(model)$cov.unscaled
#
# } else{
# p <- ncol(lme4::getME(model,"X"))
# q <- nrow(lme4::getME(model,"Zt"))
# Ct <- rbind2(t(lme4::getME(model,"X")),lme4::getME(model,"Zt"))
# Ginv <- Matrix::solve(Matrix::tcrossprod(lme4::getME(model,"Lambda"))+Matrix::Diagonal(q,1e-18))
# vcovInv <- Matrix::tcrossprod(Ct)
# vcovInv[((p+1):(q+p)),((p+1):(q+p))] <- vcovInv[((p+1):(q+p)),((p+1):(q+p))]+Ginv
#
# #remove rows with only zeros, making it uninvertible
# defined <- rowSums(as.matrix(vcovInv==0))!=ncol(vcovInv)
# defined[is.na(defined)] <- TRUE
# vcovInv <- vcovInv[defined, defined, drop=FALSE]
#
# #Estimated variance-covariance matrix vcov:
# vcov <- tryCatch(as.matrix(Matrix::solve(vcovInv)), error=function(e){
# return(vcovInv*NA)
# })
#
# rownames(vcov) <- colnames(vcovInv)
# colnames(vcov) <- rownames(vcovInv)
# }
#
# return(vcov)
# } |
# This is the server logic
library(shiny)
shinyServer(function(input, output) {
output$Plot <- renderPlot({
# Graph cars using blue points overlayed by a line
plot(data, type="o", col="blue", xaxt='n')
ticks<-c(1:nrow(data))
axis(1,at=ticks,labels=ticks)
})
})
| /R-Scripts/PredictingLSD/server.R | permissive | dmpe/BIInternship | R | false | false | 319 | r |
# This is the server logic
library(shiny)
shinyServer(function(input, output) {
output$Plot <- renderPlot({
# Graph cars using blue points overlayed by a line
plot(data, type="o", col="blue", xaxt='n')
ticks<-c(1:nrow(data))
axis(1,at=ticks,labels=ticks)
})
})
|
navbarMenu("Hydrograph",
# tabPanel("raw queried data",
# # tags$head(tags$script(HTML(jscode.mup))),
tabPanel("daily-mean data",
source(file.path("ui/hydrograph", "discharge.R"), local = TRUE)$value
),
tabPanel("(baseflow) separation",
source(file.path("ui/hydrograph", "separation.R"), local = TRUE)$value
),
tabPanel("disaggregation",
source(file.path("ui/hydrograph", "disaggregation.R"), local = TRUE)$value
),
tabPanel("data quality (counts)",
source(file.path("ui/hydrograph/data", "data_qual.R"), local = TRUE)$value
),
tabPanel("aggregated data summary",
source(file.path("ui/hydrograph/data", "data_summary.R"), local = TRUE)$value
),
tabPanel("Download data",
source(file.path("ui/hydrograph/data", "data_table.R"), local = TRUE)$value
)
)
| /ui/hydrograph.R | permissive | maseology/sHydrology_analysis | R | false | false | 833 | r | navbarMenu("Hydrograph",
# tabPanel("raw queried data",
# # tags$head(tags$script(HTML(jscode.mup))),
tabPanel("daily-mean data",
source(file.path("ui/hydrograph", "discharge.R"), local = TRUE)$value
),
tabPanel("(baseflow) separation",
source(file.path("ui/hydrograph", "separation.R"), local = TRUE)$value
),
tabPanel("disaggregation",
source(file.path("ui/hydrograph", "disaggregation.R"), local = TRUE)$value
),
tabPanel("data quality (counts)",
source(file.path("ui/hydrograph/data", "data_qual.R"), local = TRUE)$value
),
tabPanel("aggregated data summary",
source(file.path("ui/hydrograph/data", "data_summary.R"), local = TRUE)$value
),
tabPanel("Download data",
source(file.path("ui/hydrograph/data", "data_table.R"), local = TRUE)$value
)
)
|
# create evt140_0021500411.RDS data file with LaVine speed/dist metrics
library(dplyr)
library(animation)
source("../graphics.R")
pt <- readRDS("0021500411.RDS")
drive <- pt
drive$game <- pt$game %>%
filter(event_id == 140)
# drop time after the event takes place (i.e. when the game clock stops)
gc_stop_indx <- which(diff(drive$game$game_clock) == 0)
drive$game <- drive$game %>%
filter(row_number() < head(gc_stop_indx,1))
lavine_coords <- drive$game %>%
select(game_clock, unknown1, a5_ent, a5_x, a5_y) %>%
mutate(wc_diff = c(NA, tail(unknown1/1e3,-1) - head(unknown1/1e3,-1)))
# construct player speed for event
lavine_speed <- c(NA)
for (i in 2:nrow(lavine_coords)) {
d_mat <- lavine_coords[(i-1):i, c(4,5)]
lavine_speed[i] <- dist(d_mat) / lavine_coords$wc_diff[i]
}
# construct player distance from hoop for event
right_hoop <- c((94 - 5.25), 25)
lavine_dist <- c()
for (i in 1:nrow(lavine_coords)) {
d_mat <- rbind(lavine_coords[i,c(4,5)],
right_hoop)
lavine_dist[i] <- dist(d_mat)
}
drive$game$lavine_speed <- lavine_speed
drive$game$lavine_dist <- lavine_dist
saveRDS(drive, "evt140_0021500411.RDS")
# create animation with variables
ani.options(ani.width=900, ani.height=600, interval= 0.05, autobrowse = FALSE, ani.dev = "png", ani.type = "png")
saveVideo({
for (i in 1:nrow(drive$game)) {
plot_fullcourt()
text(1,48, paste0("Q",drive$game$quarter[i]," | GC: ",drive$game$game_clock[i]), pos=4, cex=1.5)
plot_shot(drive, loop = i, static = F)
}
}, video.name = paste0("../media/event_140",".mp4"))
# create animation with speed/dist variables
ani.options(ani.width=600, ani.height=700, interval= 0.05, autobrowse = FALSE, ani.dev = "png", ani.type = "png")
saveVideo({
for (i in 1:nrow(drive$game)) {
layout(matrix(1:3, ncol = 1), heights = c(1,1,2))
plot(lavine_dist[-1], type = "l", ylab = "Distance From Hoop", xlab = "Time (25hz)")
abline(v = i, col = "red", lwd = 2)
plot(lavine_speed[-1], type = "l", ylab = "Speed", xlab = "Time (25hz)")
abline(v = i, col = "red", lwd = 2)
plot_fullcourt()
text(1,48, paste0("Q",drive$game$quarter[i]," | GC: ",drive$game$game_clock[i]), pos=4, cex=1.5)
plot_shot(drive, loop = i, static = F)
}
}, video.name = paste0("../media/event_140_stats",".mp4"))
# looks like lavine drives between index 300:400
# during this time his distance from the hoop decreases and his speed increases
| /data/pt_data_evt140_drive.R | permissive | imadmali/bball-hmm | R | false | false | 2,435 | r | # create evt140_0021500411.RDS data file with LaVine speed/dist metrics
library(dplyr)
library(animation)
source("../graphics.R")
pt <- readRDS("0021500411.RDS")
drive <- pt
drive$game <- pt$game %>%
filter(event_id == 140)
# drop time after the event takes place (i.e. when the game clock stops)
gc_stop_indx <- which(diff(drive$game$game_clock) == 0)
drive$game <- drive$game %>%
filter(row_number() < head(gc_stop_indx,1))
lavine_coords <- drive$game %>%
select(game_clock, unknown1, a5_ent, a5_x, a5_y) %>%
mutate(wc_diff = c(NA, tail(unknown1/1e3,-1) - head(unknown1/1e3,-1)))
# construct player speed for event
lavine_speed <- c(NA)
for (i in 2:nrow(lavine_coords)) {
d_mat <- lavine_coords[(i-1):i, c(4,5)]
lavine_speed[i] <- dist(d_mat) / lavine_coords$wc_diff[i]
}
# construct player distance from hoop for event
right_hoop <- c((94 - 5.25), 25)
lavine_dist <- c()
for (i in 1:nrow(lavine_coords)) {
d_mat <- rbind(lavine_coords[i,c(4,5)],
right_hoop)
lavine_dist[i] <- dist(d_mat)
}
drive$game$lavine_speed <- lavine_speed
drive$game$lavine_dist <- lavine_dist
saveRDS(drive, "evt140_0021500411.RDS")
# create animation with variables
ani.options(ani.width=900, ani.height=600, interval= 0.05, autobrowse = FALSE, ani.dev = "png", ani.type = "png")
saveVideo({
for (i in 1:nrow(drive$game)) {
plot_fullcourt()
text(1,48, paste0("Q",drive$game$quarter[i]," | GC: ",drive$game$game_clock[i]), pos=4, cex=1.5)
plot_shot(drive, loop = i, static = F)
}
}, video.name = paste0("../media/event_140",".mp4"))
# create animation with speed/dist variables
ani.options(ani.width=600, ani.height=700, interval= 0.05, autobrowse = FALSE, ani.dev = "png", ani.type = "png")
saveVideo({
for (i in 1:nrow(drive$game)) {
layout(matrix(1:3, ncol = 1), heights = c(1,1,2))
plot(lavine_dist[-1], type = "l", ylab = "Distance From Hoop", xlab = "Time (25hz)")
abline(v = i, col = "red", lwd = 2)
plot(lavine_speed[-1], type = "l", ylab = "Speed", xlab = "Time (25hz)")
abline(v = i, col = "red", lwd = 2)
plot_fullcourt()
text(1,48, paste0("Q",drive$game$quarter[i]," | GC: ",drive$game$game_clock[i]), pos=4, cex=1.5)
plot_shot(drive, loop = i, static = F)
}
}, video.name = paste0("../media/event_140_stats",".mp4"))
# looks like lavine drives between index 300:400
# during this time his distance from the hoop decreases and his speed increases
|
# Up and running with R: Lynda.com. Early Oct 2017.
2 + 2 # vbasic math
#control-return runs it if you're on that line
# [1] = index number for the vector
1:100 #print numbers 1-100 on several lines
#[21] = index number for vector
#no command terminator
#for command more than one line, use parentheses
print("Hello World!")
#just like python
x <- 1:5 # puts numbers 1-5 in var x
# <- "gets" x gets numbers 1-5
#in workspace, shows x var
x #display vals in x
# c= concatenate
y <- c(6, 7, 8, 9, 10)
#now have varialbe y, which is numeric (x is int)
y
#can do vector based math - operations w/o loops
x + y #add x vals to y vals
# 1+6, 2+7, etc.
x*2
#R will also create vars with = but <- is better form, according to R style guides
x <- 0:10
x
y <-c(5, 4, 1, 6, 7, 2, 2, 3, 2, 8)
y
ls() #list objects - same as python
#i have a lot more b/c using stuff from summrstats
#easy to take data from .csv variable
#social network.csv has missing data!
#R converts missing data to NA (not available)
# have to specify header
header = T
#social_network.csv <- read.csv("~/Desktop/R/summerstats2git", header = T, sep = )
#sn.csv <- read.csv("~/Desktop", header = T)
#above ones DO NOT WORK. Below do.
library(readr)
social_network <- read_csv("~/Desktop/R/summerstats2git/social_network.csv")
social_network <- read.csv("~/Desktop/R/summerstats2git/social_network.csv")
social_network <- read.csv("~/Desktop/R/summerstats2git/social_network.csv", header = T)
#str(social_network.csv) doesn't work
#so underscores super just don't work huh
sn.csv <- read.csv("~/Desktop/sn.csv", header = T)
str(sn.csv)
vignette(
)
update.packages()
y
#3. charts and statistics for one var
#have to create table w/ frequencies for R to make a bar graph
site.freq <- table(sn.csv$Site)
barplot(site.freq) #shows in "plots" field the bar graph
? barplot #opens a help window for barplots
barplot(site.freq[order(site.freq, decreasing =T)])
#this puts bars in descending order
barplot(site.freq[order(site.freq)], horiz = T)
#this puts the bar chart horizontally
#Below: fbba = facebook blue. concatenate - facebook blue. repeat gray, 5 times
fbba <- c(rep ("gray", 5),
rgb(59, 89, 152, maxColorValue = 255))
#how to break code across 2 lines - one single command
barplot(site.freq[order(site.freq)],
horiz = T,
col = fbba)
#color = use vector fbba
barplot(site.freq[order(site.freq)],
horiz = T, #horizontal
col = fbba, #fb color
border = NA, # no bordres
xlim = c(0,100), #scale from 0-100
main = "Preferred Social Netwrking Site \nA Survey of 202 Users",
xlab = "Number of Users")
#Bottom two give label for graph and for the x-axis
#how to export the chart? Click on the chart and save as PDF it under "export"
#charts can help you make sure that your variables look right
sn.csv <- read.csv("~/Desktop/sn.csv", header = T)
#histograms:
hist(sn.csv$Age)
hist(sn.csv$Age,
#border = NA
col = "beige", #or use: col = colors() [18]
main = "Ages of Respondents\nSocial Networking Survey of 202 Users",
xlab = "Age of Respondents")
#box plots: look at distribution and outliers
boxplot(sn.csv$Age)
#median age around 30, low of 10, high of 70
boxplot(sn.csv$Age,
col = "beige",
notch = T,
horizontal = T,
main = "Ages of Respondents]n Social Networking Survey",
xlab = "Age of Respondents")
#Calculating Frequencies:
table(sn.csv$Site) #creates frequency table in alpha order
site.freq <- table(sn.csv$Site) #saves table
site.freq #print table
#replace this table w/ a sorted version of itself
site.freq <- site.freq[order(site.freq, decreasing = T)] #decreasing = True
site.freq #print table
prop.table(site.freq) #gives proportions of total
round(prop.table(site.freq), 2) #gives proportions to 2 decimal points
#Calculating Descriptives:
summary(sn.csv$Age) #summary for one variable - gives min/max/quartiles/mean/missing = NA
summary(sn.csv)
#Tukey's five number summary: min/quart/mean/3rdquart/max
fivenum(sn.csv$Age)
#alt descriptive statistics - sd, kurtosis, skew, range = like sum, d in Stat
install.packages("psych")
library("psych")
describe(sn.csv)
#gender and site are categorical= have *'s to denote that
#gender, b/c of one missing - gives values 1, 2, 3 (with 1=missing?)
#Recoding Variables:
#looking at variable: Times
hist(sn.csv$Times)
#very skewed histogram - most people in earliest categories
times.z <- scale(sn.csv$Times) #standardizes the distribution
hist(times.z)
describe(times.z)
#skewness and kurtosis are both still bad for this variable.
#Kurtosis - affected a lot by outliers
#log
times.ln0 <- log(sn.csv$Times)
hist(times.ln0)
describe(times.ln0) #see some weird stuff in this description, b/c of the 0s in dataset
times.ln1 <-log(sn.csv$Times +1) #wow, should have done this for Cynthia paper!
hist(times.ln1)
describe(times.ln1)
#Ranking
times.rank <- rank(sn.csv$Times)
hist(times.rank)
describe(times.rank)
#ties.method : what to do when vals are tied?= c( "average", "first", "random", "max", "min")
times.rankr <- rank(sn.csv$Times, ties.method = "random") #this flattens the dist
hist(times.rankr)
describe(times.rankr)
#Dichotomizing
#use wisely and purposefully! - we are losing information
#if else function: create new var. if time =>1, give val 1, if not, give 0)
time.gt1 <- ifelse(sn.csv$Times > 1, 1, 0)
time.gt1 #this is the whole dataset, with binary values
#Computing New Variables:
#create variable n1 with 1 million random normal values
n1 <- rnorm(1000000) #give random values from normal dist
hist(n1)
#do it again
n2 <- rnorm(1000000)
hist(n2)
#Average scores cross two variables
n.add <- n1 + n2 #new var= n1+n2 (adds 1st item of n1 to 1st in n2, etc.)
hist(n.add) #also gives normal looking bell curve
#Multiple scores across two variables
n.mult <- n1 * n2
hist(n.mult) #hist is much slimmer now. multiplying vals gives huge # of outliers
kurtosi(n1)
kurtosi(n2)
kurtosi(n.add)
kurtosi(n.mult)
#kurtosis is largest diff b/w our mult and add new n vars
#Vector based options in R are very simple and easy to do
#Now, time for bivariate associations.
#Bar charts for group means
google <- read.csv("~/Desktop/R/summerstats2git/google_correlate.csv", header = T)
names(google) #gives names of vars in dataset
str(google) #gives more info about these vars
#does interest in data viz vary by region?
#split data by region, create new data frame
viz.reg.dist <- split(google$data_viz, google$region) #split: from google, take dataviz, split by region
boxplot(viz.reg.dist, col = "lavender")
#shows relative interest in data viz by region - West has widest variation, less in NE
#outliers in NE and in South, South has highest var
#Barplot w/ means
viz.reg.mean <- sapply(viz.reg.dist, mean)
barplot(viz.reg.mean,
col = "beige", #below, 2nd backslash means print the quotation marks
main = "Average Google Search Shape of\n\"Data Visualization\" by Region of US")
abline(h = 0) #gives reference line of zero
describeBy(google$data_viz, google$region)
#gives descriptives stats for each group
#Scatterplots
names(google)
#is there assn b/w coldeg and dataviz search?
plot(google$degree, google$data_viz) #x, y
#strong positive assn
plot(google$degree, google$data_viz,
main = "interest in data viz searches\nby %with col deg",
xlab = "pop with col deg",
ylab = "searches for \"data viz\"",
pch = 20,
col = "grey")
#want to add regression line
abline(lm(google$data_viz ~ google$degree), col="red")
#this means: add line, linear model, predicting data viz by degree (red color)
#lowess smoother line (x,y) #this line matches the shape of the data
lines(lowess(google$degree, google$data_viz), col="blue")
#order of vars is different = here x and then y
#Scatterplot matrices
#when you have several scatterplots arranged in rows & cols
#here we specify dataset separately instead of google$ for all
pairs(~data_viz + degree + facebook + nba,
data = google,
pch = 20,
main = "simple scatterplot matrix")
#what this means: data viz x degree, then fb, then nba, and each by each other
pairs.panels(google[c(3, 7, 4, 5)], gap = 0)
#here, specifying which vars we want to use by order in which appear in dataset
#no gap b/w columns
#have hist for each 4 vars, on top we have overlaid kernal density estimation
#scatterplots on bottom left side of matrix, dot for means, lowess line,
#ellipse for correlation coefficient - rounder = less associated, longer = more
#upper corner, correlation coeffs
#this matrix gives us a lot of information
google <- read.csv("~/Desktop/R/summerstats2git/google_correlate.csv", header = T)
install.packages("rgl")
library("rgl")
plot3d(google$data_viz, #x var
google$degree, #y var
google$facebook, #z var
xlab = "data_viz",
ylab = "degree",
zlab = "facebook",
col = "red",
size = 3)
#meh, not sure how helpful this is. But can move it around
#Correlations.
g.quant <- google[c (3, 7, 4, 5)]
#create new dataset w/ only quantiative vars
cor(g.quant)
#gives correlation all vars in dataset. dataviz$degree strong, dataviz$facebook strong/neg
#can test one pair of vars at a time as hypothesis test
cor.test(g.quant$data_viz, g.quant$degree)
#passes test of stat sig
install.packages("Hmisc")
library("Hmisc")
rcorr(as.matrix(g.quant))
#turn from dataframe into matrix
#only 2 decimals, and n size, and probabilities
#sig probabilities: dataviz$fb, fb$nba
#Regressions.
#outcome ~ (is a function of: vars, vars come from google dataset)
reg1 <- lm(data_viz ~
degree + stats_ed + facebook + nba + has_nba + region,
data = google)
#stats_ed currently enetered as text, region as categorical w/ 4 levels
#R is smart, so we don't need to transform these vars
summary(reg1)
#residuals - how well model fits the data
#stats_edyes = made into dummy var w/ 1=yes
#has nba_yes also turned into dummy
#regions = one region is omitted
#degree and fb are both sig
#R2, adj = good prediction model (~60% of variance explained by model)
#Crosstabs.
#create a contingency table
sn <- read.csv("~/Desktop/R/summerstats2git/sn.csv", header = T)
sn.tab <- table(sn$Gender, sn$Site)
sn.tab
#can also get marginal frequences
margin.table(sn.tab, 1) #row marginal freqs
margin.table(sn.tab, 2) #col marginal freqs
#each of these give a tab of each var, gender and then female
round(prop.table(sn.tab), 2) #cell %
round(prop.table(sn.tab, 1), 2) #row % #add to 100 going across
round(prop.table(sn.tab, 2), 2)#col % add to 100 going down
#Chi-squared test
chisq.test(sn.tab)
#yes, statistically significant.
#warning message = this is b/c of small sample, sparse cells
#for reliable x2, want expected frequencies of at least 5-10 per cell
#T-tests.
google <- read.csv("~/Desktop/R/summerstats2git/google_correlate.csv", header = T)
t.test(google$nba ~ google$has_nba)
#do more ppl search for nba if they have their own bball group?
#outcome var ~ predictor, t test
#yes, it is significant
#groups w/o nba team, have -0.5 mean, 0.6 if have a team (standardized)
#Analysis Of Variance.
anova1 <- aov(data_viz ~ region, data = google)
summary(anova1)
#no statistically significant difference b/w these groups - 38% chance to get diffs by random
#Two-way factorial design
anova2a <- aov(data_viz ~
region + stats_ed + region:stats_ed,
data=google)
summary(anova2a)
#have stats ed, also interaction b/w stats ed and region
#is there a diff by stats ed, is there a dif by region, does region vary by stats ed
#not significant
anova2b <- aov(data_viz ~
region*stats_ed,
data = google)
summary(anova2b)
#if you put the interaction, it will give you the main effects too | /intro R.R | no_license | bethcozz/ssi2017_ex | R | false | false | 11,824 | r | # Up and running with R: Lynda.com. Early Oct 2017.
2 + 2 # vbasic math
#control-return runs it if you're on that line
# [1] = index number for the vector
1:100 #print numbers 1-100 on several lines
#[21] = index number for vector
#no command terminator
#for command more than one line, use parentheses
print("Hello World!")
#just like python
x <- 1:5 # puts numbers 1-5 in var x
# <- "gets" x gets numbers 1-5
#in workspace, shows x var
x #display vals in x
# c= concatenate
y <- c(6, 7, 8, 9, 10)
#now have varialbe y, which is numeric (x is int)
y
#can do vector based math - operations w/o loops
x + y #add x vals to y vals
# 1+6, 2+7, etc.
x*2
#R will also create vars with = but <- is better form, according to R style guides
x <- 0:10
x
y <-c(5, 4, 1, 6, 7, 2, 2, 3, 2, 8)
y
ls() #list objects - same as python
#i have a lot more b/c using stuff from summrstats
#easy to take data from .csv variable
#social network.csv has missing data!
#R converts missing data to NA (not available)
# have to specify header
header = T
#social_network.csv <- read.csv("~/Desktop/R/summerstats2git", header = T, sep = )
#sn.csv <- read.csv("~/Desktop", header = T)
#above ones DO NOT WORK. Below do.
library(readr)
social_network <- read_csv("~/Desktop/R/summerstats2git/social_network.csv")
social_network <- read.csv("~/Desktop/R/summerstats2git/social_network.csv")
social_network <- read.csv("~/Desktop/R/summerstats2git/social_network.csv", header = T)
#str(social_network.csv) doesn't work
#so underscores super just don't work huh
sn.csv <- read.csv("~/Desktop/sn.csv", header = T)
str(sn.csv)
vignette(
)
update.packages()
y
#3. charts and statistics for one var
#have to create table w/ frequencies for R to make a bar graph
site.freq <- table(sn.csv$Site)
barplot(site.freq) #shows in "plots" field the bar graph
? barplot #opens a help window for barplots
barplot(site.freq[order(site.freq, decreasing =T)])
#this puts bars in descending order
barplot(site.freq[order(site.freq)], horiz = T)
#this puts the bar chart horizontally
#Below: fbba = facebook blue. concatenate - facebook blue. repeat gray, 5 times
fbba <- c(rep ("gray", 5),
rgb(59, 89, 152, maxColorValue = 255))
#how to break code across 2 lines - one single command
barplot(site.freq[order(site.freq)],
horiz = T,
col = fbba)
#color = use vector fbba
barplot(site.freq[order(site.freq)],
horiz = T, #horizontal
col = fbba, #fb color
border = NA, # no bordres
xlim = c(0,100), #scale from 0-100
main = "Preferred Social Netwrking Site \nA Survey of 202 Users",
xlab = "Number of Users")
#Bottom two give label for graph and for the x-axis
#how to export the chart? Click on the chart and save as PDF it under "export"
#charts can help you make sure that your variables look right
sn.csv <- read.csv("~/Desktop/sn.csv", header = T)
#histograms:
hist(sn.csv$Age)
hist(sn.csv$Age,
#border = NA
col = "beige", #or use: col = colors() [18]
main = "Ages of Respondents\nSocial Networking Survey of 202 Users",
xlab = "Age of Respondents")
#box plots: look at distribution and outliers
boxplot(sn.csv$Age)
#median age around 30, low of 10, high of 70
boxplot(sn.csv$Age,
col = "beige",
notch = T,
horizontal = T,
main = "Ages of Respondents]n Social Networking Survey",
xlab = "Age of Respondents")
#Calculating Frequencies:
table(sn.csv$Site) #creates frequency table in alpha order
site.freq <- table(sn.csv$Site) #saves table
site.freq #print table
#replace this table w/ a sorted version of itself
site.freq <- site.freq[order(site.freq, decreasing = T)] #decreasing = True
site.freq #print table
prop.table(site.freq) #gives proportions of total
round(prop.table(site.freq), 2) #gives proportions to 2 decimal points
#Calculating Descriptives:
summary(sn.csv$Age) #summary for one variable - gives min/max/quartiles/mean/missing = NA
summary(sn.csv)
#Tukey's five number summary: min/quart/mean/3rdquart/max
fivenum(sn.csv$Age)
#alt descriptive statistics - sd, kurtosis, skew, range = like sum, d in Stat
install.packages("psych")
library("psych")
describe(sn.csv)
#gender and site are categorical= have *'s to denote that
#gender, b/c of one missing - gives values 1, 2, 3 (with 1=missing?)
#Recoding Variables:
#looking at variable: Times
hist(sn.csv$Times)
#very skewed histogram - most people in earliest categories
times.z <- scale(sn.csv$Times) #standardizes the distribution
hist(times.z)
describe(times.z)
#skewness and kurtosis are both still bad for this variable.
#Kurtosis - affected a lot by outliers
#log
times.ln0 <- log(sn.csv$Times)
hist(times.ln0)
describe(times.ln0) #see some weird stuff in this description, b/c of the 0s in dataset
times.ln1 <-log(sn.csv$Times +1) #wow, should have done this for Cynthia paper!
hist(times.ln1)
describe(times.ln1)
#Ranking
times.rank <- rank(sn.csv$Times)
hist(times.rank)
describe(times.rank)
#ties.method : what to do when vals are tied?= c( "average", "first", "random", "max", "min")
times.rankr <- rank(sn.csv$Times, ties.method = "random") #this flattens the dist
hist(times.rankr)
describe(times.rankr)
#Dichotomizing
#use wisely and purposefully! - we are losing information
#if else function: create new var. if time =>1, give val 1, if not, give 0)
time.gt1 <- ifelse(sn.csv$Times > 1, 1, 0)
time.gt1 #this is the whole dataset, with binary values
#Computing New Variables:
#create variable n1 with 1 million random normal values
n1 <- rnorm(1000000) #give random values from normal dist
hist(n1)
#do it again
n2 <- rnorm(1000000)
hist(n2)
#Average scores cross two variables
n.add <- n1 + n2 #new var= n1+n2 (adds 1st item of n1 to 1st in n2, etc.)
hist(n.add) #also gives normal looking bell curve
#Multiple scores across two variables
n.mult <- n1 * n2
hist(n.mult) #hist is much slimmer now. multiplying vals gives huge # of outliers
kurtosi(n1)
kurtosi(n2)
kurtosi(n.add)
kurtosi(n.mult)
#kurtosis is largest diff b/w our mult and add new n vars
#Vector based options in R are very simple and easy to do
#Now, time for bivariate associations.
#Bar charts for group means
google <- read.csv("~/Desktop/R/summerstats2git/google_correlate.csv", header = T)
names(google) #gives names of vars in dataset
str(google) #gives more info about these vars
#does interest in data viz vary by region?
#split data by region, create new data frame
viz.reg.dist <- split(google$data_viz, google$region) #split: from google, take dataviz, split by region
boxplot(viz.reg.dist, col = "lavender")
#shows relative interest in data viz by region - West has widest variation, less in NE
#outliers in NE and in South, South has highest var
#Barplot w/ means
viz.reg.mean <- sapply(viz.reg.dist, mean)
barplot(viz.reg.mean,
col = "beige", #below, 2nd backslash means print the quotation marks
main = "Average Google Search Shape of\n\"Data Visualization\" by Region of US")
abline(h = 0) #gives reference line of zero
describeBy(google$data_viz, google$region)
#gives descriptives stats for each group
#Scatterplots
names(google)
#is there assn b/w coldeg and dataviz search?
plot(google$degree, google$data_viz) #x, y
#strong positive assn
plot(google$degree, google$data_viz,
main = "interest in data viz searches\nby %with col deg",
xlab = "pop with col deg",
ylab = "searches for \"data viz\"",
pch = 20,
col = "grey")
#want to add regression line
abline(lm(google$data_viz ~ google$degree), col="red")
#this means: add line, linear model, predicting data viz by degree (red color)
#lowess smoother line (x,y) #this line matches the shape of the data
lines(lowess(google$degree, google$data_viz), col="blue")
#order of vars is different = here x and then y
#Scatterplot matrices
#when you have several scatterplots arranged in rows & cols
#here we specify dataset separately instead of google$ for all
pairs(~data_viz + degree + facebook + nba,
data = google,
pch = 20,
main = "simple scatterplot matrix")
#what this means: data viz x degree, then fb, then nba, and each by each other
pairs.panels(google[c(3, 7, 4, 5)], gap = 0)
#here, specifying which vars we want to use by order in which appear in dataset
#no gap b/w columns
#have hist for each 4 vars, on top we have overlaid kernal density estimation
#scatterplots on bottom left side of matrix, dot for means, lowess line,
#ellipse for correlation coefficient - rounder = less associated, longer = more
#upper corner, correlation coeffs
#this matrix gives us a lot of information
google <- read.csv("~/Desktop/R/summerstats2git/google_correlate.csv", header = T)
install.packages("rgl")
library("rgl")
plot3d(google$data_viz, #x var
google$degree, #y var
google$facebook, #z var
xlab = "data_viz",
ylab = "degree",
zlab = "facebook",
col = "red",
size = 3)
#meh, not sure how helpful this is. But can move it around
#Correlations.
g.quant <- google[c (3, 7, 4, 5)]
#create new dataset w/ only quantiative vars
cor(g.quant)
#gives correlation all vars in dataset. dataviz$degree strong, dataviz$facebook strong/neg
#can test one pair of vars at a time as hypothesis test
cor.test(g.quant$data_viz, g.quant$degree)
#passes test of stat sig
install.packages("Hmisc")
library("Hmisc")
rcorr(as.matrix(g.quant))
#turn from dataframe into matrix
#only 2 decimals, and n size, and probabilities
#sig probabilities: dataviz$fb, fb$nba
#Regressions.
#outcome ~ (is a function of: vars, vars come from google dataset)
reg1 <- lm(data_viz ~
degree + stats_ed + facebook + nba + has_nba + region,
data = google)
#stats_ed currently enetered as text, region as categorical w/ 4 levels
#R is smart, so we don't need to transform these vars
summary(reg1)
#residuals - how well model fits the data
#stats_edyes = made into dummy var w/ 1=yes
#has nba_yes also turned into dummy
#regions = one region is omitted
#degree and fb are both sig
#R2, adj = good prediction model (~60% of variance explained by model)
#Crosstabs.
#create a contingency table
sn <- read.csv("~/Desktop/R/summerstats2git/sn.csv", header = T)
sn.tab <- table(sn$Gender, sn$Site)
sn.tab
#can also get marginal frequences
margin.table(sn.tab, 1) #row marginal freqs
margin.table(sn.tab, 2) #col marginal freqs
#each of these give a tab of each var, gender and then female
round(prop.table(sn.tab), 2) #cell %
round(prop.table(sn.tab, 1), 2) #row % #add to 100 going across
round(prop.table(sn.tab, 2), 2)#col % add to 100 going down
#Chi-squared test
chisq.test(sn.tab)
#yes, statistically significant.
#warning message = this is b/c of small sample, sparse cells
#for reliable x2, want expected frequencies of at least 5-10 per cell
#T-tests.
google <- read.csv("~/Desktop/R/summerstats2git/google_correlate.csv", header = T)
t.test(google$nba ~ google$has_nba)
#do more ppl search for nba if they have their own bball group?
#outcome var ~ predictor, t test
#yes, it is significant
#groups w/o nba team, have -0.5 mean, 0.6 if have a team (standardized)
#Analysis Of Variance.
anova1 <- aov(data_viz ~ region, data = google)
summary(anova1)
#no statistically significant difference b/w these groups - 38% chance to get diffs by random
#Two-way factorial design
anova2a <- aov(data_viz ~
region + stats_ed + region:stats_ed,
data=google)
summary(anova2a)
#have stats ed, also interaction b/w stats ed and region
#is there a diff by stats ed, is there a dif by region, does region vary by stats ed
#not significant
anova2b <- aov(data_viz ~
region*stats_ed,
data = google)
summary(anova2b)
#if you put the interaction, it will give you the main effects too |
library(plotly)
library(gapminder)
# Iris
graf <- plotly::plot_ly(data = iris, x = ~Sepal.Length, y = ~Sepal.Width,
type = "scatter", mode = "markers",
color = ~Species, size = ~iris$Petal.Length,
frame = ~Species)
graf
# Gapminder
graf <- plotly::plot_ly(data = gapminder, x = ~lifeExp, y = ~pop,
type = "scatter", color = ~continent,
mode = 'markers', text = ~country,
frame = ~year)
graf
# Filtrar continente
gapm = gapminder[gapminder$continent=="Americas",]
graf <- plotly::plot_ly(data = gapm, x = ~lifeExp, y = ~pop,
type = "scatter", color = ~country,
mode = 'markers', text = ~country,
frame = ~year)
graf
| /Graficos_avancados/template_graficosAnimados.R | no_license | joscelino/Graficos_em_R | R | false | false | 832 | r | library(plotly)
library(gapminder)
# Iris
graf <- plotly::plot_ly(data = iris, x = ~Sepal.Length, y = ~Sepal.Width,
type = "scatter", mode = "markers",
color = ~Species, size = ~iris$Petal.Length,
frame = ~Species)
graf
# Gapminder
graf <- plotly::plot_ly(data = gapminder, x = ~lifeExp, y = ~pop,
type = "scatter", color = ~continent,
mode = 'markers', text = ~country,
frame = ~year)
graf
# Filtrar continente
gapm = gapminder[gapminder$continent=="Americas",]
graf <- plotly::plot_ly(data = gapm, x = ~lifeExp, y = ~pop,
type = "scatter", color = ~country,
mode = 'markers', text = ~country,
frame = ~year)
graf
|
\name{ActivityInfo-package}
\alias{ActivityInfo-package}
\alias{ActivityInfo}
\docType{package}
\title{
R Client for ActivityInfo.org
}
\description{
More about what it does (maybe more than one line)
~~ A concise (1-5 lines) description of the package ~~
}
\details{
\tabular{ll}{
Package: \tab ActivityInfo\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2014-05-09\cr
License: \tab What license is it under?\cr
}
}
\author{
Alex Bertram
}
\examples{
\dontrun{
activityInfoLogin()
getSites(activityId=33)
}
}
| /man/ActivityInfo-package.Rd | no_license | Edouard-Legoupil/activityinfo-R | R | false | false | 534 | rd | \name{ActivityInfo-package}
\alias{ActivityInfo-package}
\alias{ActivityInfo}
\docType{package}
\title{
R Client for ActivityInfo.org
}
\description{
More about what it does (maybe more than one line)
~~ A concise (1-5 lines) description of the package ~~
}
\details{
\tabular{ll}{
Package: \tab ActivityInfo\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2014-05-09\cr
License: \tab What license is it under?\cr
}
}
\author{
Alex Bertram
}
\examples{
\dontrun{
activityInfoLogin()
getSites(activityId=33)
}
}
|
setwd("E:\\SS\\Coursera Data Science Specialization\\exploratory-data-analysis\\Week1\\Course Project 1")
## Getting full dataset
data_full <- read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 1
hist(data$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
## Saving to file
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
| /plot1.R | no_license | shan4224/ExData_Plotting1 | R | false | false | 865 | r | setwd("E:\\SS\\Coursera Data Science Specialization\\exploratory-data-analysis\\Week1\\Course Project 1")
## Getting full dataset
data_full <- read.csv("./household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 1
hist(data$Global_active_power, main="Global Active Power",
xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
## Saving to file
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
|
airquality <-
structure(list(Ozone = c(41L, 36L, 12L, 18L, NA, 28L, 23L, 19L,
8L, NA, 7L, 16L, 11L, 14L, 18L, 14L, 34L, 6L, 30L, 11L, 1L, 11L,
4L, 32L, NA, NA, NA, 23L, 45L, 115L, 37L, NA, NA, NA, NA, NA,
NA, 29L, NA, 71L, 39L, NA, NA, 23L, NA, NA, 21L, 37L, 20L, 12L,
13L, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 135L, 49L, 32L,
NA, 64L, 40L, 77L, 97L, 97L, 85L, NA, 10L, 27L, NA, 7L, 48L,
35L, 61L, 79L, 63L, 16L, NA, NA, 80L, 108L, 20L, 52L, 82L, 50L,
64L, 59L, 39L, 9L, 16L, 78L, 35L, 66L, 122L, 89L, 110L, NA, NA,
44L, 28L, 65L, NA, 22L, 59L, 23L, 31L, 44L, 21L, 9L, NA, 45L,
168L, 73L, NA, 76L, 118L, 84L, 85L, 96L, 78L, 73L, 91L, 47L,
32L, 20L, 23L, 21L, 24L, 44L, 21L, 28L, 9L, 13L, 46L, 18L, 13L,
24L, 16L, 13L, 23L, 36L, 7L, 14L, 30L, NA, 14L, 18L, 20L), Solar.R = c(190L,
118L, 149L, 313L, NA, NA, 299L, 99L, 19L, 194L, NA, 256L, 290L,
274L, 65L, 334L, 307L, 78L, 322L, 44L, 8L, 320L, 25L, 92L, 66L,
266L, NA, 13L, 252L, 223L, 279L, 286L, 287L, 242L, 186L, 220L,
264L, 127L, 273L, 291L, 323L, 259L, 250L, 148L, 332L, 322L, 191L,
284L, 37L, 120L, 137L, 150L, 59L, 91L, 250L, 135L, 127L, 47L,
98L, 31L, 138L, 269L, 248L, 236L, 101L, 175L, 314L, 276L, 267L,
272L, 175L, 139L, 264L, 175L, 291L, 48L, 260L, 274L, 285L, 187L,
220L, 7L, 258L, 295L, 294L, 223L, 81L, 82L, 213L, 275L, 253L,
254L, 83L, 24L, 77L, NA, NA, NA, 255L, 229L, 207L, 222L, 137L,
192L, 273L, 157L, 64L, 71L, 51L, 115L, 244L, 190L, 259L, 36L,
255L, 212L, 238L, 215L, 153L, 203L, 225L, 237L, 188L, 167L, 197L,
183L, 189L, 95L, 92L, 252L, 220L, 230L, 259L, 236L, 259L, 238L,
24L, 112L, 237L, 224L, 27L, 238L, 201L, 238L, 14L, 139L, 49L,
20L, 193L, 145L, 191L, 131L, 223L), Wind = c(7.4, 8, 12.6, 11.5,
14.3, 14.9, 8.6, 13.8, 20.1, 8.6, 6.9, 9.7, 9.2, 10.9, 13.2,
11.5, 12, 18.4, 11.5, 9.7, 9.7, 16.6, 9.7, 12, 16.6, 14.9, 8,
12, 14.9, 5.7, 7.4, 8.6, 9.7, 16.1, 9.2, 8.6, 14.3, 9.7, 6.9,
13.8, 11.5, 10.9, 9.2, 8, 13.8, 11.5, 14.9, 20.7, 9.2, 11.5,
10.3, 6.3, 1.7, 4.6, 6.3, 8, 8, 10.3, 11.5, 14.9, 8, 4.1, 9.2,
9.2, 10.9, 4.6, 10.9, 5.1, 6.3, 5.7, 7.4, 8.6, 14.3, 14.9, 14.9,
14.3, 6.9, 10.3, 6.3, 5.1, 11.5, 6.9, 9.7, 11.5, 8.6, 8, 8.6,
12, 7.4, 7.4, 7.4, 9.2, 6.9, 13.8, 7.4, 6.9, 7.4, 4.6, 4, 10.3,
8, 8.6, 11.5, 11.5, 11.5, 9.7, 11.5, 10.3, 6.3, 7.4, 10.9, 10.3,
15.5, 14.3, 12.6, 9.7, 3.4, 8, 5.7, 9.7, 2.3, 6.3, 6.3, 6.9,
5.1, 2.8, 4.6, 7.4, 15.5, 10.9, 10.3, 10.9, 9.7, 14.9, 15.5,
6.3, 10.9, 11.5, 6.9, 13.8, 10.3, 10.3, 8, 12.6, 9.2, 10.3, 10.3,
16.6, 6.9, 13.2, 14.3, 8, 11.5), Temp = c(67L, 72L, 74L, 62L,
56L, 66L, 65L, 59L, 61L, 69L, 74L, 69L, 66L, 68L, 58L, 64L, 66L,
57L, 68L, 62L, 59L, 73L, 61L, 61L, 57L, 58L, 57L, 67L, 81L, 79L,
76L, 78L, 74L, 67L, 84L, 85L, 79L, 82L, 87L, 90L, 87L, 93L, 92L,
82L, 80L, 79L, 77L, 72L, 65L, 73L, 76L, 77L, 76L, 76L, 76L, 75L,
78L, 73L, 80L, 77L, 83L, 84L, 85L, 81L, 84L, 83L, 83L, 88L, 92L,
92L, 89L, 82L, 73L, 81L, 91L, 80L, 81L, 82L, 84L, 87L, 85L, 74L,
81L, 82L, 86L, 85L, 82L, 86L, 88L, 86L, 83L, 81L, 81L, 81L, 82L,
86L, 85L, 87L, 89L, 90L, 90L, 92L, 86L, 86L, 82L, 80L, 79L, 77L,
79L, 76L, 78L, 78L, 77L, 72L, 75L, 79L, 81L, 86L, 88L, 97L, 94L,
96L, 94L, 91L, 92L, 93L, 93L, 87L, 84L, 80L, 78L, 75L, 73L, 81L,
76L, 77L, 71L, 71L, 78L, 67L, 76L, 68L, 82L, 64L, 71L, 81L, 69L,
63L, 70L, 77L, 75L, 76L, 68L), Month = c(5L, 5L, 5L, 5L, 5L,
5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L,
5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 6L, 6L, 6L, 6L, 6L, 6L,
6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L,
6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L,
7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L,
7L, 7L, 7L, 7L, 7L, 7L, 7L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L,
8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L,
8L, 8L, 8L, 8L, 8L, 8L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L,
9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L,
9L, 9L, 9L, 9L), Day = c(1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L,
10L, 11L, 12L, 13L, 14L, 15L, 16L, 17L, 18L, 19L, 20L, 21L, 22L,
23L, 24L, 25L, 26L, 27L, 28L, 29L, 30L, 31L, 1L, 2L, 3L, 4L,
5L, 6L, 7L, 8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L, 16L, 17L, 18L,
19L, 20L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 30L, 1L,
2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L,
16L, 17L, 18L, 19L, 20L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L,
29L, 30L, 31L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L, 11L,
12L, 13L, 14L, 15L, 16L, 17L, 18L, 19L, 20L, 21L, 22L, 23L, 24L,
25L, 26L, 27L, 28L, 29L, 30L, 31L, 1L, 2L, 3L, 4L, 5L, 6L, 7L,
8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L, 16L, 17L, 18L, 19L, 20L,
21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 30L)), .Names = c("Ozone",
"Solar.R", "Wind", "Temp", "Month", "Day"), class = "data.frame", row.names = c(NA,
-153L))
| /airrQ.R | no_license | CesarAAG/Programacion_Actuarial_III_OT16 | R | false | false | 4,774 | r | airquality <-
structure(list(Ozone = c(41L, 36L, 12L, 18L, NA, 28L, 23L, 19L,
8L, NA, 7L, 16L, 11L, 14L, 18L, 14L, 34L, 6L, 30L, 11L, 1L, 11L,
4L, 32L, NA, NA, NA, 23L, 45L, 115L, 37L, NA, NA, NA, NA, NA,
NA, 29L, NA, 71L, 39L, NA, NA, 23L, NA, NA, 21L, 37L, 20L, 12L,
13L, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, 135L, 49L, 32L,
NA, 64L, 40L, 77L, 97L, 97L, 85L, NA, 10L, 27L, NA, 7L, 48L,
35L, 61L, 79L, 63L, 16L, NA, NA, 80L, 108L, 20L, 52L, 82L, 50L,
64L, 59L, 39L, 9L, 16L, 78L, 35L, 66L, 122L, 89L, 110L, NA, NA,
44L, 28L, 65L, NA, 22L, 59L, 23L, 31L, 44L, 21L, 9L, NA, 45L,
168L, 73L, NA, 76L, 118L, 84L, 85L, 96L, 78L, 73L, 91L, 47L,
32L, 20L, 23L, 21L, 24L, 44L, 21L, 28L, 9L, 13L, 46L, 18L, 13L,
24L, 16L, 13L, 23L, 36L, 7L, 14L, 30L, NA, 14L, 18L, 20L), Solar.R = c(190L,
118L, 149L, 313L, NA, NA, 299L, 99L, 19L, 194L, NA, 256L, 290L,
274L, 65L, 334L, 307L, 78L, 322L, 44L, 8L, 320L, 25L, 92L, 66L,
266L, NA, 13L, 252L, 223L, 279L, 286L, 287L, 242L, 186L, 220L,
264L, 127L, 273L, 291L, 323L, 259L, 250L, 148L, 332L, 322L, 191L,
284L, 37L, 120L, 137L, 150L, 59L, 91L, 250L, 135L, 127L, 47L,
98L, 31L, 138L, 269L, 248L, 236L, 101L, 175L, 314L, 276L, 267L,
272L, 175L, 139L, 264L, 175L, 291L, 48L, 260L, 274L, 285L, 187L,
220L, 7L, 258L, 295L, 294L, 223L, 81L, 82L, 213L, 275L, 253L,
254L, 83L, 24L, 77L, NA, NA, NA, 255L, 229L, 207L, 222L, 137L,
192L, 273L, 157L, 64L, 71L, 51L, 115L, 244L, 190L, 259L, 36L,
255L, 212L, 238L, 215L, 153L, 203L, 225L, 237L, 188L, 167L, 197L,
183L, 189L, 95L, 92L, 252L, 220L, 230L, 259L, 236L, 259L, 238L,
24L, 112L, 237L, 224L, 27L, 238L, 201L, 238L, 14L, 139L, 49L,
20L, 193L, 145L, 191L, 131L, 223L), Wind = c(7.4, 8, 12.6, 11.5,
14.3, 14.9, 8.6, 13.8, 20.1, 8.6, 6.9, 9.7, 9.2, 10.9, 13.2,
11.5, 12, 18.4, 11.5, 9.7, 9.7, 16.6, 9.7, 12, 16.6, 14.9, 8,
12, 14.9, 5.7, 7.4, 8.6, 9.7, 16.1, 9.2, 8.6, 14.3, 9.7, 6.9,
13.8, 11.5, 10.9, 9.2, 8, 13.8, 11.5, 14.9, 20.7, 9.2, 11.5,
10.3, 6.3, 1.7, 4.6, 6.3, 8, 8, 10.3, 11.5, 14.9, 8, 4.1, 9.2,
9.2, 10.9, 4.6, 10.9, 5.1, 6.3, 5.7, 7.4, 8.6, 14.3, 14.9, 14.9,
14.3, 6.9, 10.3, 6.3, 5.1, 11.5, 6.9, 9.7, 11.5, 8.6, 8, 8.6,
12, 7.4, 7.4, 7.4, 9.2, 6.9, 13.8, 7.4, 6.9, 7.4, 4.6, 4, 10.3,
8, 8.6, 11.5, 11.5, 11.5, 9.7, 11.5, 10.3, 6.3, 7.4, 10.9, 10.3,
15.5, 14.3, 12.6, 9.7, 3.4, 8, 5.7, 9.7, 2.3, 6.3, 6.3, 6.9,
5.1, 2.8, 4.6, 7.4, 15.5, 10.9, 10.3, 10.9, 9.7, 14.9, 15.5,
6.3, 10.9, 11.5, 6.9, 13.8, 10.3, 10.3, 8, 12.6, 9.2, 10.3, 10.3,
16.6, 6.9, 13.2, 14.3, 8, 11.5), Temp = c(67L, 72L, 74L, 62L,
56L, 66L, 65L, 59L, 61L, 69L, 74L, 69L, 66L, 68L, 58L, 64L, 66L,
57L, 68L, 62L, 59L, 73L, 61L, 61L, 57L, 58L, 57L, 67L, 81L, 79L,
76L, 78L, 74L, 67L, 84L, 85L, 79L, 82L, 87L, 90L, 87L, 93L, 92L,
82L, 80L, 79L, 77L, 72L, 65L, 73L, 76L, 77L, 76L, 76L, 76L, 75L,
78L, 73L, 80L, 77L, 83L, 84L, 85L, 81L, 84L, 83L, 83L, 88L, 92L,
92L, 89L, 82L, 73L, 81L, 91L, 80L, 81L, 82L, 84L, 87L, 85L, 74L,
81L, 82L, 86L, 85L, 82L, 86L, 88L, 86L, 83L, 81L, 81L, 81L, 82L,
86L, 85L, 87L, 89L, 90L, 90L, 92L, 86L, 86L, 82L, 80L, 79L, 77L,
79L, 76L, 78L, 78L, 77L, 72L, 75L, 79L, 81L, 86L, 88L, 97L, 94L,
96L, 94L, 91L, 92L, 93L, 93L, 87L, 84L, 80L, 78L, 75L, 73L, 81L,
76L, 77L, 71L, 71L, 78L, 67L, 76L, 68L, 82L, 64L, 71L, 81L, 69L,
63L, 70L, 77L, 75L, 76L, 68L), Month = c(5L, 5L, 5L, 5L, 5L,
5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L,
5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 5L, 6L, 6L, 6L, 6L, 6L, 6L,
6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L,
6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L,
7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L, 7L,
7L, 7L, 7L, 7L, 7L, 7L, 7L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L,
8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 8L,
8L, 8L, 8L, 8L, 8L, 8L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L,
9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L, 9L,
9L, 9L, 9L, 9L), Day = c(1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L,
10L, 11L, 12L, 13L, 14L, 15L, 16L, 17L, 18L, 19L, 20L, 21L, 22L,
23L, 24L, 25L, 26L, 27L, 28L, 29L, 30L, 31L, 1L, 2L, 3L, 4L,
5L, 6L, 7L, 8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L, 16L, 17L, 18L,
19L, 20L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 30L, 1L,
2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L,
16L, 17L, 18L, 19L, 20L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L,
29L, 30L, 31L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L, 11L,
12L, 13L, 14L, 15L, 16L, 17L, 18L, 19L, 20L, 21L, 22L, 23L, 24L,
25L, 26L, 27L, 28L, 29L, 30L, 31L, 1L, 2L, 3L, 4L, 5L, 6L, 7L,
8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L, 16L, 17L, 18L, 19L, 20L,
21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 30L)), .Names = c("Ozone",
"Solar.R", "Wind", "Temp", "Month", "Day"), class = "data.frame", row.names = c(NA,
-153L))
|
# Read data from CSV file
data <- read.table(file = "hw1_data.csv", header = TRUE, sep = ",")
# Getting column names of the dataset
names(data)
# Extracting the first 2 rows of the data frame
head(data, 2)
# Number of observations in the data frame
nrow(data)
# Extracting the first 2 rows of the data frame
tail(data, 2)
# Getting the value of Ozone in the 47th row
data[47, "Ozone"]
# Counting missing values in the Ozone column
length(data[is.na(data[,"Ozone"]), "Ozone"])
# Mean of the Ozone column (excluding missing values)
mean(data[!is.na(data[,"Ozone"]), "Ozone"])
# Extract the subset of rows of the data frame where Ozone values are
# above 31 and Temp values are above 90. What is the mean of Solar.R in
# this subset?
x <- subset(data, Ozone > 31 & Temp > 90)[,"Solar.R"]
mean(x[!is.na(x)])
# Calculating the mean of "Temp" when "Month" is equal to 6
x <- subset(data, Month == 6)[,"Temp"]
mean(x[!is.na(x)])
# Getting the maximum ozone value in the month of May
x <- subset(data, Month == 5)[,"Ozone"]
max(x[!is.na(x)])
| /coursera/comp-data-analys/assignment1.R | no_license | Peque/peque | R | false | false | 1,044 | r | # Read data from CSV file
data <- read.table(file = "hw1_data.csv", header = TRUE, sep = ",")
# Getting column names of the dataset
names(data)
# Extracting the first 2 rows of the data frame
head(data, 2)
# Number of observations in the data frame
nrow(data)
# Extracting the first 2 rows of the data frame
tail(data, 2)
# Getting the value of Ozone in the 47th row
data[47, "Ozone"]
# Counting missing values in the Ozone column
length(data[is.na(data[,"Ozone"]), "Ozone"])
# Mean of the Ozone column (excluding missing values)
mean(data[!is.na(data[,"Ozone"]), "Ozone"])
# Extract the subset of rows of the data frame where Ozone values are
# above 31 and Temp values are above 90. What is the mean of Solar.R in
# this subset?
x <- subset(data, Ozone > 31 & Temp > 90)[,"Solar.R"]
mean(x[!is.na(x)])
# Calculating the mean of "Temp" when "Month" is equal to 6
x <- subset(data, Month == 6)[,"Temp"]
mean(x[!is.na(x)])
# Getting the maximum ozone value in the month of May
x <- subset(data, Month == 5)[,"Ozone"]
max(x[!is.na(x)])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cforest_LUR.R
\name{cforest_LUR}
\alias{cforest_LUR}
\title{Random forest for LUR (using cforest)}
\usage{
cforest_LUR(
variabledf,
vis1 = T,
y_varname = c("day_value", "night_value", "value_mean"),
training,
test,
grepstring,
...
)
}
\arguments{
\item{variabledf}{the dataframe containing predictors and dependent variable}
\item{y_varname}{name of the dependent variable.}
\item{training}{the index for the rows used for training.}
\item{test}{the index for the rows used for testing.}
\item{grepstring}{the variable/column names of predictors in Lasso, grepl stlye, e.g. 'ROAD|pop|temp|wind|Rsp|OMI|eleva|coast'}
}
\value{
plot variable importance and an error matrix
}
\description{
Random forest for LUR (using cforest)
}
| /man/cforest_LUR.Rd | no_license | mengluchu/APMtools | R | false | true | 823 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cforest_LUR.R
\name{cforest_LUR}
\alias{cforest_LUR}
\title{Random forest for LUR (using cforest)}
\usage{
cforest_LUR(
variabledf,
vis1 = T,
y_varname = c("day_value", "night_value", "value_mean"),
training,
test,
grepstring,
...
)
}
\arguments{
\item{variabledf}{the dataframe containing predictors and dependent variable}
\item{y_varname}{name of the dependent variable.}
\item{training}{the index for the rows used for training.}
\item{test}{the index for the rows used for testing.}
\item{grepstring}{the variable/column names of predictors in Lasso, grepl stlye, e.g. 'ROAD|pop|temp|wind|Rsp|OMI|eleva|coast'}
}
\value{
plot variable importance and an error matrix
}
\description{
Random forest for LUR (using cforest)
}
|
## analysis of ColonCancer OncodriveCIS Input dataset using PLRS
library(plrs)
# load data
load("/home/anita/Benchmarking/two_omics/ColonCancerCompleteDataAnalysis/ColonCancerRawDataset_OncodriveCISInput.Rdata")
# Reading GISTIC 2.0 copy number data
cnv_gistic <- read.table(gzfile("/home/anita/Integrated analysis in R/All_Cancers/COAD_Gistic2_CopyNumber_Gistic2_all_data_by_genes.gz"),
header = T, sep = "\t")
rownames(cnv_gistic) <- cnv_gistic[,1]
cnv_gistic[,1] <- NULL
a <- intersect(rownames(coad.ge), rownames(cnv_gistic))
b <- intersect(colnames(coad.ge), colnames(cnv_gistic))
coad.cnv_gistic <- cnv_gistic[a,b]
# Substituting -2 to -1 in thresholded copy number data
coad.cnv[coad.cnv==-2] <- -1
coad.ge <- coad.ge[,order(colnames(coad.ge))]
coad.cnv <- coad.cnv[,order(colnames(coad.cnv))]
coad.cnv_gistic <- coad.cnv_gistic[,order(colnames(coad.cnv_gistic))]
all(colnames(coad.ge) == colnames(coad.cnv))
all(colnames(coad.ge) == colnames(coad.cnv_gistic))
coad.ge <- as.matrix(coad.ge)
coad.cnv <- as.matrix(coad.cnv)
coad.cnv_gistic <- as.matrix(coad.cnv_gistic)
# running plrs
library(tictoc)
tic("plrs")
resNoSel <- plrs.series(expr=coad.ge, cghseg=coad.cnv_gistic, cghcall=coad.cnv, control.select=NULL, control.model=list(min.obs=3))
toc()
# In progress...
#
# 10% done (1237 genes), time elapsed = 0:00:39
# 20% done (2472 genes), time elapsed = 0:01:16
# 30% done (3708 genes), time elapsed = 0:01:55
# 40% done (4944 genes), time elapsed = 0:02:36
# 50% done (6180 genes), time elapsed = 0:03:17
# 60% done (7415 genes), time elapsed = 0:04:02
# 70% done (8651 genes), time elapsed = 0:04:49
# 80% done (9887 genes), time elapsed = 0:05:31
# 90% done (11122 genes), time elapsed = 0:06:12
# 100% done (12358 genes), time elapsed = 0:06:46
#
# > toc()
# plrs: 406.489 sec elapsed
summary(resNoSel)
# Results of test for each gene
head(resNoSel@test)
results <- data.frame(resNoSel@test)
results$Gene <- rownames(coad.ge)
# saving results
setwd("/home/anita/Benchmarking/two_omics/ColonCancerCompleteDataAnalysis/plrs/PLRS_UsingOncodriveCISInput/")
write.table(results, file = "PLRS_COAD_Results_OncodriveInput.tsv", row.names = T, sep = "\t", quote = F)
| /Multi-staged tools/ColorectalCancer/PLRS/PLRS_COAD_Analysis_OncodriveInput.R | no_license | AtinaSat/Evaluation-of-integration-tools | R | false | false | 2,234 | r | ## analysis of ColonCancer OncodriveCIS Input dataset using PLRS
library(plrs)
# load data
load("/home/anita/Benchmarking/two_omics/ColonCancerCompleteDataAnalysis/ColonCancerRawDataset_OncodriveCISInput.Rdata")
# Reading GISTIC 2.0 copy number data
cnv_gistic <- read.table(gzfile("/home/anita/Integrated analysis in R/All_Cancers/COAD_Gistic2_CopyNumber_Gistic2_all_data_by_genes.gz"),
header = T, sep = "\t")
rownames(cnv_gistic) <- cnv_gistic[,1]
cnv_gistic[,1] <- NULL
a <- intersect(rownames(coad.ge), rownames(cnv_gistic))
b <- intersect(colnames(coad.ge), colnames(cnv_gistic))
coad.cnv_gistic <- cnv_gistic[a,b]
# Substituting -2 to -1 in thresholded copy number data
coad.cnv[coad.cnv==-2] <- -1
coad.ge <- coad.ge[,order(colnames(coad.ge))]
coad.cnv <- coad.cnv[,order(colnames(coad.cnv))]
coad.cnv_gistic <- coad.cnv_gistic[,order(colnames(coad.cnv_gistic))]
all(colnames(coad.ge) == colnames(coad.cnv))
all(colnames(coad.ge) == colnames(coad.cnv_gistic))
coad.ge <- as.matrix(coad.ge)
coad.cnv <- as.matrix(coad.cnv)
coad.cnv_gistic <- as.matrix(coad.cnv_gistic)
# running plrs
library(tictoc)
tic("plrs")
resNoSel <- plrs.series(expr=coad.ge, cghseg=coad.cnv_gistic, cghcall=coad.cnv, control.select=NULL, control.model=list(min.obs=3))
toc()
# In progress...
#
# 10% done (1237 genes), time elapsed = 0:00:39
# 20% done (2472 genes), time elapsed = 0:01:16
# 30% done (3708 genes), time elapsed = 0:01:55
# 40% done (4944 genes), time elapsed = 0:02:36
# 50% done (6180 genes), time elapsed = 0:03:17
# 60% done (7415 genes), time elapsed = 0:04:02
# 70% done (8651 genes), time elapsed = 0:04:49
# 80% done (9887 genes), time elapsed = 0:05:31
# 90% done (11122 genes), time elapsed = 0:06:12
# 100% done (12358 genes), time elapsed = 0:06:46
#
# > toc()
# plrs: 406.489 sec elapsed
summary(resNoSel)
# Results of test for each gene
head(resNoSel@test)
results <- data.frame(resNoSel@test)
results$Gene <- rownames(coad.ge)
# saving results
setwd("/home/anita/Benchmarking/two_omics/ColonCancerCompleteDataAnalysis/plrs/PLRS_UsingOncodriveCISInput/")
write.table(results, file = "PLRS_COAD_Results_OncodriveInput.tsv", row.names = T, sep = "\t", quote = F)
|
library(fitODBOD)
### Name: pTRI
### Title: Triangular Distribution bounded between [0,1]
### Aliases: pTRI
### ** Examples
#plotting the random variables and probability values
col<-rainbow(4)
x<-seq(0.2,0.8,by=0.2)
plot(0,0,main="Probability density graph",xlab="Random variable",
ylab="Probability density values",xlim = c(0,1),ylim = c(0,3))
for (i in 1:4)
{
lines(seq(0,1,by=0.01),dTRI(seq(0,1,by=0.01),x[i])$pdf,col = col[i])
}
dTRI(seq(0,1,by=0.05),0.3)$pdf #extracting the pdf values
dTRI(seq(0,1,by=0.01),0.3)$mean #extracting the mean
dTRI(seq(0,1,by=0.01),0.3)$var #extracting the variance
#plotting the random variables and cumulative probability values
col<-rainbow(4)
x<-seq(0.2,0.8,by=0.2)
plot(0,0,main="Cumulative density graph",xlab="Random variable",
ylab="Cumulative density values",xlim = c(0,1),ylim = c(0,1))
for (i in 1:4)
{
lines(seq(0,1,by=0.01),pTRI(seq(0,1,by=0.01),x[i]),col = col[i])
}
pTRI(seq(0,1,by=0.05),0.3) #acquiring the cumulative probability values
mazTRI(1.4,.3) #acquiring the moment about zero values
mazTRI(2,.3)-mazTRI(1,.3)^2 #variance for when is mode 0.3
#only the integer value of moments is taken here because moments cannot be decimal
mazTRI(1.9,0.5)
| /data/genthat_extracted_code/fitODBOD/examples/pTRI.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,249 | r | library(fitODBOD)
### Name: pTRI
### Title: Triangular Distribution bounded between [0,1]
### Aliases: pTRI
### ** Examples
#plotting the random variables and probability values
col<-rainbow(4)
x<-seq(0.2,0.8,by=0.2)
plot(0,0,main="Probability density graph",xlab="Random variable",
ylab="Probability density values",xlim = c(0,1),ylim = c(0,3))
for (i in 1:4)
{
lines(seq(0,1,by=0.01),dTRI(seq(0,1,by=0.01),x[i])$pdf,col = col[i])
}
dTRI(seq(0,1,by=0.05),0.3)$pdf #extracting the pdf values
dTRI(seq(0,1,by=0.01),0.3)$mean #extracting the mean
dTRI(seq(0,1,by=0.01),0.3)$var #extracting the variance
#plotting the random variables and cumulative probability values
col<-rainbow(4)
x<-seq(0.2,0.8,by=0.2)
plot(0,0,main="Cumulative density graph",xlab="Random variable",
ylab="Cumulative density values",xlim = c(0,1),ylim = c(0,1))
for (i in 1:4)
{
lines(seq(0,1,by=0.01),pTRI(seq(0,1,by=0.01),x[i]),col = col[i])
}
pTRI(seq(0,1,by=0.05),0.3) #acquiring the cumulative probability values
mazTRI(1.4,.3) #acquiring the moment about zero values
mazTRI(2,.3)-mazTRI(1,.3)^2 #variance for when is mode 0.3
#only the integer value of moments is taken here because moments cannot be decimal
mazTRI(1.9,0.5)
|
forward.adaptive <- function(data, params, logbeta.init, logomega.init, alpha, dcc.true) {
### Data:
## int<lower=0> T; // time periods
## int<lower=0> N; // population
## int<lower=0> K; // # weather preds
## matrix[T, K] weather;
## real ii_init; // usually 0, so provide as known
### Parameters:
## // parameters
## vector[T] logbeta;
## vector[T-1] logomega;
## real<lower=2, upper=17> invsigma; // below 2 and daily step doesn't work
## real<lower=2, upper=17> invgamma; // below 2 and daily step doesn't work
## real<lower=1, upper=17> invkappa; // below 1 and daily step doesn't work
## real<lower=1, upper=17> invtheta; // below 1 and daily step doesn't work
## real<lower=0, upper=.1> deathrate; // rate of death
## real<lower=-.01, upper=0> deathlearning; // decreases deathrate
## real<lower=0, upper=1> deathomegaplus; // additional rate of reported deaths
## // effect of weather
## vector<lower=-.1, upper=.1>[K] effect;
## vector<lower=-.1, upper=.1>[K] omegaeffect;
## vector<lower=-.1, upper=.1>[6] doweffect6;
## vector<lower=-.1, upper=.1>[6] dowomegaeffect6;
## // latent variables
## vector<lower=0>[T-1] eein;
### Variables defined
## // latent variables
## vector<lower=0>[T] ss; // susceptible
## vector<lower=0>[T-1] new_ee1; // newly exposed
## vector<lower=0>[T] ee1; // exposed
## vector<lower=0>[T] ee2;
## vector<lower=0>[T] ii1; // infected
## vector<lower=0>[T] ii2;
## vector<lower=0>[T] qq; // waiting to be tested
## vector<lower=0>[T] rr; // waiting to be reported
## vector[T-1] omega;
## vector<lower=0>[T-1] dcc; // confirmed cases
## vector<lower=0>[T-1] ddeaths; // deaths
### Forward simulation
doweffect <- rep(0, 7)
dowomegaeffect <- rep(0, 7)
for (dd in 1:6) {
doweffect[dd] <- params$doweffect6[dd];
dowomegaeffect[dd] <- params$dowomegaeffect6[dd];
}
logbeta <- rep(NA, data$T)
logbeta[1] <- logbeta.init[1]
logomega <- rep(NA, data$T)
logomega[1] <- logomega.init[1]
ss <- rep(NA, data$T)
new_ee1 <- rep(NA, data$T - 1)
ee1 <- rep(NA, data$T)
ee2 <- rep(NA, data$T)
ii1 <- rep(NA, data$T)
ii2 <- rep(NA, data$T)
qq <- rep(NA, data$T)
rr <- rep(NA, data$T)
omega <- rep(NA, data$T - 1)
dcc <- rep(NA, data$T - 1)
ddeaths <- rep(NA, data$T - 1)
ss[1] <- data$N;
ee1[1] <- data$ii_init;
ee2[1] <- data$ii_init;
ii1[1] <- data$ii_init;
ii2[1] <- data$ii_init;
qq[1] <- data$ii_init;
rr[1] <- data$ii_init;
for (tt in 2:data$T) {
new_ee1[tt-1] <- exp(logbeta[tt-1] + doweffect[1 + (tt %% 7)] + sum(data$weather[tt-1,] * params$effect))*ss[tt-1]*(ii1[tt-1] + ii2[tt-1]) / data$N;
ss[tt] <- ss[tt-1] - new_ee1[tt-1];
ee1[tt] <- ee1[tt-1] + new_ee1[tt-1] - 2*ee1[tt-1]/params$invsigma + params$eein[tt-1];
ee2[tt] <- ee2[tt-1] + 2*ee1[tt-1]/params$invsigma - 2*ee2[tt-1]/params$invsigma;
ii1[tt] <- ii1[tt-1] + 2*ee2[tt-1]/params$invsigma - 2*ii1[tt-1]/params$invgamma;
ii2[tt] <- ii2[tt-1] + 2*ii1[tt-1]/params$invgamma - 2*ii2[tt-1]/params$invgamma;
qq[tt] <- qq[tt-1] + new_ee1[tt-1] - qq[tt-1]/params$invkappa;
omega[tt-1] <- (exp(logomega[tt-1]) / (1 + exp(logomega[tt-1]))) * exp(dowomegaeffect[1 + (tt %% 7)] + sum(data$weather[tt-1,] * params$omegaeffect));
rr[tt] <- rr[tt-1] + omega[tt-1] * qq[tt-1]/params$invkappa - rr[tt-1]/params$invtheta;
dcc[tt-1] <- rr[tt-1]/params$invtheta;
ddeaths[tt-1] <- (2*ii2[tt-1]/params$invgamma) * params$deathrate * exp(tt * params$deathlearning) * (omega[tt-1] + (1 - omega[tt-1]) * params$deathomegaplus);
## Construct new logbeta
if (is.na(dcc.true[tt-1]) || dcc[tt-1] == dcc.true[tt-1]) {
logbeta[tt] <- logbeta[tt-1] + (logbeta.init[tt] - logbeta.init[tt-1])
logomega[tt] <- logomega[tt-1] + (logomega.init[tt] - logomega.init[tt-1])
} else if (dcc[tt-1] < dcc.true[tt-1]) {
logbeta[tt] <- min(-.5, logbeta[tt-1] + alpha + (logbeta.init[tt] - logbeta.init[tt-1]))
logomega[tt] <- min(-.5, logomega[tt-1] + alpha + (logomega.init[tt] - logomega.init[tt-1]))
} else if (dcc[tt-1] > dcc.true[tt-1]) {
logbeta[tt] <- logbeta[tt-1] - alpha + (logbeta.init[tt] - logbeta.init[tt-1])
logomega[tt] <- logomega[tt-1] + (logomega.init[tt] - logomega.init[tt-1])
}
}
## return(list(ss=ss, new_ee1=new_ee1, ee1=ee1, ee2=ee2, ii1=ii1, ii2=ii2, qq=qq, rr=rr, omega=omega, dcc=dcc, ddeaths=ddeaths))
return(data.frame(TT=1:data$T, ss=ss, new_ee1=c(0, new_ee1), ee1=ee1, ee2=ee2, ii1=ii1, ii2=ii2, qq=qq, rr=rr, omega=c(0, omega), dcc=c(0, dcc), ddeaths=c(0, ddeaths), logbeta, logomega))
}
| /seir-model/old-versions/forward-0105-adaptive.R | no_license | openmodels/coronaclimate | R | false | false | 4,860 | r | forward.adaptive <- function(data, params, logbeta.init, logomega.init, alpha, dcc.true) {
### Data:
## int<lower=0> T; // time periods
## int<lower=0> N; // population
## int<lower=0> K; // # weather preds
## matrix[T, K] weather;
## real ii_init; // usually 0, so provide as known
### Parameters:
## // parameters
## vector[T] logbeta;
## vector[T-1] logomega;
## real<lower=2, upper=17> invsigma; // below 2 and daily step doesn't work
## real<lower=2, upper=17> invgamma; // below 2 and daily step doesn't work
## real<lower=1, upper=17> invkappa; // below 1 and daily step doesn't work
## real<lower=1, upper=17> invtheta; // below 1 and daily step doesn't work
## real<lower=0, upper=.1> deathrate; // rate of death
## real<lower=-.01, upper=0> deathlearning; // decreases deathrate
## real<lower=0, upper=1> deathomegaplus; // additional rate of reported deaths
## // effect of weather
## vector<lower=-.1, upper=.1>[K] effect;
## vector<lower=-.1, upper=.1>[K] omegaeffect;
## vector<lower=-.1, upper=.1>[6] doweffect6;
## vector<lower=-.1, upper=.1>[6] dowomegaeffect6;
## // latent variables
## vector<lower=0>[T-1] eein;
### Variables defined
## // latent variables
## vector<lower=0>[T] ss; // susceptible
## vector<lower=0>[T-1] new_ee1; // newly exposed
## vector<lower=0>[T] ee1; // exposed
## vector<lower=0>[T] ee2;
## vector<lower=0>[T] ii1; // infected
## vector<lower=0>[T] ii2;
## vector<lower=0>[T] qq; // waiting to be tested
## vector<lower=0>[T] rr; // waiting to be reported
## vector[T-1] omega;
## vector<lower=0>[T-1] dcc; // confirmed cases
## vector<lower=0>[T-1] ddeaths; // deaths
### Forward simulation
doweffect <- rep(0, 7)
dowomegaeffect <- rep(0, 7)
for (dd in 1:6) {
doweffect[dd] <- params$doweffect6[dd];
dowomegaeffect[dd] <- params$dowomegaeffect6[dd];
}
logbeta <- rep(NA, data$T)
logbeta[1] <- logbeta.init[1]
logomega <- rep(NA, data$T)
logomega[1] <- logomega.init[1]
ss <- rep(NA, data$T)
new_ee1 <- rep(NA, data$T - 1)
ee1 <- rep(NA, data$T)
ee2 <- rep(NA, data$T)
ii1 <- rep(NA, data$T)
ii2 <- rep(NA, data$T)
qq <- rep(NA, data$T)
rr <- rep(NA, data$T)
omega <- rep(NA, data$T - 1)
dcc <- rep(NA, data$T - 1)
ddeaths <- rep(NA, data$T - 1)
ss[1] <- data$N;
ee1[1] <- data$ii_init;
ee2[1] <- data$ii_init;
ii1[1] <- data$ii_init;
ii2[1] <- data$ii_init;
qq[1] <- data$ii_init;
rr[1] <- data$ii_init;
for (tt in 2:data$T) {
new_ee1[tt-1] <- exp(logbeta[tt-1] + doweffect[1 + (tt %% 7)] + sum(data$weather[tt-1,] * params$effect))*ss[tt-1]*(ii1[tt-1] + ii2[tt-1]) / data$N;
ss[tt] <- ss[tt-1] - new_ee1[tt-1];
ee1[tt] <- ee1[tt-1] + new_ee1[tt-1] - 2*ee1[tt-1]/params$invsigma + params$eein[tt-1];
ee2[tt] <- ee2[tt-1] + 2*ee1[tt-1]/params$invsigma - 2*ee2[tt-1]/params$invsigma;
ii1[tt] <- ii1[tt-1] + 2*ee2[tt-1]/params$invsigma - 2*ii1[tt-1]/params$invgamma;
ii2[tt] <- ii2[tt-1] + 2*ii1[tt-1]/params$invgamma - 2*ii2[tt-1]/params$invgamma;
qq[tt] <- qq[tt-1] + new_ee1[tt-1] - qq[tt-1]/params$invkappa;
omega[tt-1] <- (exp(logomega[tt-1]) / (1 + exp(logomega[tt-1]))) * exp(dowomegaeffect[1 + (tt %% 7)] + sum(data$weather[tt-1,] * params$omegaeffect));
rr[tt] <- rr[tt-1] + omega[tt-1] * qq[tt-1]/params$invkappa - rr[tt-1]/params$invtheta;
dcc[tt-1] <- rr[tt-1]/params$invtheta;
ddeaths[tt-1] <- (2*ii2[tt-1]/params$invgamma) * params$deathrate * exp(tt * params$deathlearning) * (omega[tt-1] + (1 - omega[tt-1]) * params$deathomegaplus);
## Construct new logbeta
if (is.na(dcc.true[tt-1]) || dcc[tt-1] == dcc.true[tt-1]) {
logbeta[tt] <- logbeta[tt-1] + (logbeta.init[tt] - logbeta.init[tt-1])
logomega[tt] <- logomega[tt-1] + (logomega.init[tt] - logomega.init[tt-1])
} else if (dcc[tt-1] < dcc.true[tt-1]) {
logbeta[tt] <- min(-.5, logbeta[tt-1] + alpha + (logbeta.init[tt] - logbeta.init[tt-1]))
logomega[tt] <- min(-.5, logomega[tt-1] + alpha + (logomega.init[tt] - logomega.init[tt-1]))
} else if (dcc[tt-1] > dcc.true[tt-1]) {
logbeta[tt] <- logbeta[tt-1] - alpha + (logbeta.init[tt] - logbeta.init[tt-1])
logomega[tt] <- logomega[tt-1] + (logomega.init[tt] - logomega.init[tt-1])
}
}
## return(list(ss=ss, new_ee1=new_ee1, ee1=ee1, ee2=ee2, ii1=ii1, ii2=ii2, qq=qq, rr=rr, omega=omega, dcc=dcc, ddeaths=ddeaths))
return(data.frame(TT=1:data$T, ss=ss, new_ee1=c(0, new_ee1), ee1=ee1, ee2=ee2, ii1=ii1, ii2=ii2, qq=qq, rr=rr, omega=c(0, omega), dcc=c(0, dcc), ddeaths=c(0, ddeaths), logbeta, logomega))
}
|
#' Locality Pursuit Embedding
#'
#' Locality Pursuit Embedding (LPE) is an unsupervised linear dimension reduction method.
#' It aims at preserving local structure by solving a variational problem that models
#' the local geometrical structure by the Euclidean distances.
#'
#' @param X an \eqn{(n\times p)} matrix or data frame whose rows are observations
#' and columns represent independent variables.
#' @param ndim an integer-valued target dimension.
#' @param preprocess an additional option for preprocessing the data.
#' Default is "center". See also \code{\link{aux.preprocess}} for more details.
#' @param numk size of \eqn{k}-nn neighborhood in original dimensional space.
#'
#' @return a named list containing
#' \describe{
#' \item{Y}{an \eqn{(n\times ndim)} matrix whose rows are embedded observations.}
#' \item{trfinfo}{a list containing information for out-of-sample prediction.}
#' \item{projection}{a \eqn{(p\times ndim)} whose columns are basis for projection.}
#' }
#'
#' @examples
#' \dontrun{
#' ## generate swiss roll with auxiliary dimensions
#' n = 100
#' theta = runif(n)
#' h = runif(n)
#' t = (1+2*theta)*(3*pi/2)
#' X = array(0,c(n,10))
#' X[,1] = t*cos(t)
#' X[,2] = 21*h
#' X[,3] = t*sin(t)
#' X[,4:10] = matrix(runif(7*n), nrow=n)
#'
#' ## try with different neighborhood sizes
#' out1 = do.lpe(X, numk=5)
#' out2 = do.lpe(X, numk=10)
#' out3 = do.lpe(X, numk=25)
#'
#' ## visualize
#' par(mfrow=c(1,3))
#' plot(out1$Y[,1], out1$Y[,2], main="LPE::numk=5")
#' plot(out2$Y[,1], out2$Y[,2], main="LPE::numk=10")
#' plot(out3$Y[,1], out3$Y[,2], main="LPE::numk=25")
#' }
#'
#' @references
#' \insertRef{min_locality_2004}{Rdimtools}
#'
#' @author Kisung You
#' @rdname linear_LPE
#' @export
do.lpe <- function(X, ndim=2, preprocess=c("center","scale","cscale","decorrelate","whiten"), numk=max(ceiling(nrow(X)/10),2)){
#------------------------------------------------------------------------
## PREPROCESSING
# 1. data matrix
aux.typecheck(X)
n = nrow(X)
p = ncol(X)
# 2. ndim
ndim = as.integer(ndim)
if (!check_ndim(ndim,p)){stop("* do.lpe : 'ndim' is a positive integer in [1,#(covariates)).")}
# 3. numk
numk = as.integer(numk)
if (!check_NumMM(numk,1,n/2,compact=FALSE)){stop("* do.lpe : 'numk' should be an integer in [2,nrow(X)/2).")}
# 4. preprocess
if (missing(preprocess)){ algpreprocess = "center" }
else { algpreprocess = match.arg(preprocess) }
#------------------------------------------------------------------------
## COMPUTATION : PRELIMINARY
# 1. preprocessing
tmplist = aux.preprocess.hidden(X,type=algpreprocess,algtype="linear")
trfinfo = tmplist$info
pX = tmplist$pX
# 2. neighborhood creation
nbdtype = c("knn",numk)
nbdsymmetric = "asymmetric"
nbdstruct = aux.graphnbd(pX,method="euclidean",
type=nbdtype,symmetric=nbdsymmetric)
nbdmask = nbdstruct$mask
#------------------------------------------------------------------------
## COMPUTATION : MAIN PART FOR LPE
# 1. build L
L = array(0,c(n,n))
onesN = array(1,c(n,n))
for (i in 1:n){
vecdi = (as.vector(nbdmask[i,])*1.0)
K = sum(vecdi)
Di = diag(vecdi)
L = L + Di + ((1/K)*(Di%*%onesN%*%Di))
}
# 2. find cost function
costTop = t(pX)%*%L%*%pX
# 3. find projection matrix
projection = aux.adjprojection(RSpectra::eigs(costTop, ndim)$vectors)
#------------------------------------------------------------------------
## RETURN
result = list()
result$Y = pX%*%projection
result$trfinfo = trfinfo
result$projection = projection
return(result)
}
| /R/linear_LPE.R | no_license | rcannood/Rdimtools | R | false | false | 3,639 | r | #' Locality Pursuit Embedding
#'
#' Locality Pursuit Embedding (LPE) is an unsupervised linear dimension reduction method.
#' It aims at preserving local structure by solving a variational problem that models
#' the local geometrical structure by the Euclidean distances.
#'
#' @param X an \eqn{(n\times p)} matrix or data frame whose rows are observations
#' and columns represent independent variables.
#' @param ndim an integer-valued target dimension.
#' @param preprocess an additional option for preprocessing the data.
#' Default is "center". See also \code{\link{aux.preprocess}} for more details.
#' @param numk size of \eqn{k}-nn neighborhood in original dimensional space.
#'
#' @return a named list containing
#' \describe{
#' \item{Y}{an \eqn{(n\times ndim)} matrix whose rows are embedded observations.}
#' \item{trfinfo}{a list containing information for out-of-sample prediction.}
#' \item{projection}{a \eqn{(p\times ndim)} whose columns are basis for projection.}
#' }
#'
#' @examples
#' \dontrun{
#' ## generate swiss roll with auxiliary dimensions
#' n = 100
#' theta = runif(n)
#' h = runif(n)
#' t = (1+2*theta)*(3*pi/2)
#' X = array(0,c(n,10))
#' X[,1] = t*cos(t)
#' X[,2] = 21*h
#' X[,3] = t*sin(t)
#' X[,4:10] = matrix(runif(7*n), nrow=n)
#'
#' ## try with different neighborhood sizes
#' out1 = do.lpe(X, numk=5)
#' out2 = do.lpe(X, numk=10)
#' out3 = do.lpe(X, numk=25)
#'
#' ## visualize
#' par(mfrow=c(1,3))
#' plot(out1$Y[,1], out1$Y[,2], main="LPE::numk=5")
#' plot(out2$Y[,1], out2$Y[,2], main="LPE::numk=10")
#' plot(out3$Y[,1], out3$Y[,2], main="LPE::numk=25")
#' }
#'
#' @references
#' \insertRef{min_locality_2004}{Rdimtools}
#'
#' @author Kisung You
#' @rdname linear_LPE
#' @export
do.lpe <- function(X, ndim=2, preprocess=c("center","scale","cscale","decorrelate","whiten"), numk=max(ceiling(nrow(X)/10),2)){
#------------------------------------------------------------------------
## PREPROCESSING
# 1. data matrix
aux.typecheck(X)
n = nrow(X)
p = ncol(X)
# 2. ndim
ndim = as.integer(ndim)
if (!check_ndim(ndim,p)){stop("* do.lpe : 'ndim' is a positive integer in [1,#(covariates)).")}
# 3. numk
numk = as.integer(numk)
if (!check_NumMM(numk,1,n/2,compact=FALSE)){stop("* do.lpe : 'numk' should be an integer in [2,nrow(X)/2).")}
# 4. preprocess
if (missing(preprocess)){ algpreprocess = "center" }
else { algpreprocess = match.arg(preprocess) }
#------------------------------------------------------------------------
## COMPUTATION : PRELIMINARY
# 1. preprocessing
tmplist = aux.preprocess.hidden(X,type=algpreprocess,algtype="linear")
trfinfo = tmplist$info
pX = tmplist$pX
# 2. neighborhood creation
nbdtype = c("knn",numk)
nbdsymmetric = "asymmetric"
nbdstruct = aux.graphnbd(pX,method="euclidean",
type=nbdtype,symmetric=nbdsymmetric)
nbdmask = nbdstruct$mask
#------------------------------------------------------------------------
## COMPUTATION : MAIN PART FOR LPE
# 1. build L
L = array(0,c(n,n))
onesN = array(1,c(n,n))
for (i in 1:n){
vecdi = (as.vector(nbdmask[i,])*1.0)
K = sum(vecdi)
Di = diag(vecdi)
L = L + Di + ((1/K)*(Di%*%onesN%*%Di))
}
# 2. find cost function
costTop = t(pX)%*%L%*%pX
# 3. find projection matrix
projection = aux.adjprojection(RSpectra::eigs(costTop, ndim)$vectors)
#------------------------------------------------------------------------
## RETURN
result = list()
result$Y = pX%*%projection
result$trfinfo = trfinfo
result$projection = projection
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/choosePositions.BchronologyRun.R
\name{choosePositions}
\alias{choosePositions}
\title{Compute positions to date next which result in maximal decrease of chronological uncertainty}
\usage{
choosePositions(
bchrRun,
N = 1,
newSds = 30,
newThicknesses = 0,
positions = bchrRun$predictPositions,
newCalCurve = "intcal13",
newOutlierProb = 0.05,
level = 0.5,
plot = TRUE,
count = 1,
linesAt = NULL
)
}
\arguments{
\item{bchrRun}{A run of the current chronology as output from \code{\link{Bchronology}}}
\item{N}{The number of new positions required}
\item{newSds}{The new standard deviations of the psuedo-added dates}
\item{newThicknesses}{The new thicknesses of the psuedo-added dates}
\item{positions}{The positions allowed to estimate the new positions to date. Defaults to the value of \code{predictPositions} from the
\code{\link{Bchronology}} run}
\item{newCalCurve}{The new calibration curve of the psuedo-added dates}
\item{newOutlierProb}{The new outlier probabilities of the psuedo-added dates}
\item{level}{The confidence level required for minimising the uncertainty. Defaults to 50\%. (Note: this will be estimated more robustly than the 95\% level)}
\item{plot}{Whether to plot the chronologies as they are produced}
\item{count}{Counter function (not for use other than by the function itself)}
\item{linesAt}{Horizontal line positions (not for use other than by the function itself)}
}
\value{
Some plots and the positions to date next
}
\description{
This function finds, for a given current chronology, created via
\code{\link{Bchronology}}, which positions (depths) to date next
If N = 1 it just finds the position with the biggest uncertainty
If N>1 it puts a date at the N = 1 position and re-runs
\code{\link{Bchronology}} with the extra psuedo date. It uses the
\code{\link{unCalibrate}} function with the un-calibrated age estimated
at the median of the chronology and the sd as specified via the
\code{newSds} argument. Other arguments specify the new thicknesses,
calibration curves, and outlier probabilities for newly inserted psuedo-dates.
}
\examples{
\donttest{
data(Glendalough)
GlenOut = Bchronology(ages=Glendalough$ages,
ageSds=Glendalough$ageSds,
calCurves=Glendalough$calCurves,
positions=Glendalough$position,
positionThicknesses=Glendalough$thickness,
ids=Glendalough$id,
predictPositions=seq(0,1500,by=10))
# Find out which two positions (depths) to date if we have room for two more dates
# Here going to choose 3 new positions to date
newPositions = choosePositions(GlenOut, N = 3)
print(newPositions)
# Suppose you are only interested in dating the new depths at 500, 600, or 700 cm
newPositions2 = choosePositions(GlenOut, N = 2,
positions = seq(500, 700, by = 10))
print(newPositions2)
}
}
\seealso{
\code{\link{Bchronology}} for the main function to create chronologies, \code{\link{unCalibrate}} for the ability to invert calendar dates for a given calibration curve.
}
| /man/choosePositions.Rd | no_license | allisonstegner/Bchron | R | false | true | 3,198 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/choosePositions.BchronologyRun.R
\name{choosePositions}
\alias{choosePositions}
\title{Compute positions to date next which result in maximal decrease of chronological uncertainty}
\usage{
choosePositions(
bchrRun,
N = 1,
newSds = 30,
newThicknesses = 0,
positions = bchrRun$predictPositions,
newCalCurve = "intcal13",
newOutlierProb = 0.05,
level = 0.5,
plot = TRUE,
count = 1,
linesAt = NULL
)
}
\arguments{
\item{bchrRun}{A run of the current chronology as output from \code{\link{Bchronology}}}
\item{N}{The number of new positions required}
\item{newSds}{The new standard deviations of the psuedo-added dates}
\item{newThicknesses}{The new thicknesses of the psuedo-added dates}
\item{positions}{The positions allowed to estimate the new positions to date. Defaults to the value of \code{predictPositions} from the
\code{\link{Bchronology}} run}
\item{newCalCurve}{The new calibration curve of the psuedo-added dates}
\item{newOutlierProb}{The new outlier probabilities of the psuedo-added dates}
\item{level}{The confidence level required for minimising the uncertainty. Defaults to 50\%. (Note: this will be estimated more robustly than the 95\% level)}
\item{plot}{Whether to plot the chronologies as they are produced}
\item{count}{Counter function (not for use other than by the function itself)}
\item{linesAt}{Horizontal line positions (not for use other than by the function itself)}
}
\value{
Some plots and the positions to date next
}
\description{
This function finds, for a given current chronology, created via
\code{\link{Bchronology}}, which positions (depths) to date next
If N = 1 it just finds the position with the biggest uncertainty
If N>1 it puts a date at the N = 1 position and re-runs
\code{\link{Bchronology}} with the extra psuedo date. It uses the
\code{\link{unCalibrate}} function with the un-calibrated age estimated
at the median of the chronology and the sd as specified via the
\code{newSds} argument. Other arguments specify the new thicknesses,
calibration curves, and outlier probabilities for newly inserted psuedo-dates.
}
\examples{
\donttest{
data(Glendalough)
GlenOut = Bchronology(ages=Glendalough$ages,
ageSds=Glendalough$ageSds,
calCurves=Glendalough$calCurves,
positions=Glendalough$position,
positionThicknesses=Glendalough$thickness,
ids=Glendalough$id,
predictPositions=seq(0,1500,by=10))
# Find out which two positions (depths) to date if we have room for two more dates
# Here going to choose 3 new positions to date
newPositions = choosePositions(GlenOut, N = 3)
print(newPositions)
# Suppose you are only interested in dating the new depths at 500, 600, or 700 cm
newPositions2 = choosePositions(GlenOut, N = 2,
positions = seq(500, 700, by = 10))
print(newPositions2)
}
}
\seealso{
\code{\link{Bchronology}} for the main function to create chronologies, \code{\link{unCalibrate}} for the ability to invert calendar dates for a given calibration curve.
}
|
context("XML imports/exports")
require("datasets")
test_that("Export to XML", {
expect_true(export(iris, "iris.xml") %in% dir())
})
test_that("Import from XML", {
expect_true(is.data.frame(import("iris.xml")))
})
unlink("iris.xml")
| /data/genthat_extracted_code/rio/tests/test_format_xml.R | no_license | surayaaramli/typeRrh | R | false | false | 255 | r | context("XML imports/exports")
require("datasets")
test_that("Export to XML", {
expect_true(export(iris, "iris.xml") %in% dir())
})
test_that("Import from XML", {
expect_true(is.data.frame(import("iris.xml")))
})
unlink("iris.xml")
|
/notes/EQG_2016/Day1/Exercise1.1/Exercise_1.1_instructions.R | no_license | nicolise/UW_EQG_2017 | R | false | false | 6,142 | r | ||
\name{pythTheo}
\alias{pythTheo}
\title{pythTheo}
\description{pythTheo described}
\usage{
pythTheo(x)
}
\arguments{
\item{pythTheo}{This formula figures out the third side of a triangle when you only 2 of the sides}
}
\value{
aSquared plus bSquared equals cSquared. return "Length of side a: " + aSquared + ", length of side b: " + bSquared + ", length of side c: " + cSquared
}
\author{Jessica Carnes}
| /man/pythTheo.Rd | no_license | jcarnes1/lis4370-finalProject | R | false | false | 406 | rd | \name{pythTheo}
\alias{pythTheo}
\title{pythTheo}
\description{pythTheo described}
\usage{
pythTheo(x)
}
\arguments{
\item{pythTheo}{This formula figures out the third side of a triangle when you only 2 of the sides}
}
\value{
aSquared plus bSquared equals cSquared. return "Length of side a: " + aSquared + ", length of side b: " + bSquared + ", length of side c: " + cSquared
}
\author{Jessica Carnes}
|
train <- read.csv("../input/train.csv", stringsAsFactors = F)
test <- read.csv("../input/test.csv", stringsAsFactors = F)
head(train)
head(test)
test["Survived"] = 0
submission = test[, c("PassengerId", "Survived")]
head(submission)
write.csv(submission, file = "nosurvivors.csv", row.names = F)
test[test$Sex == "male", "PredGender"] = 0
test[test$Sex == "female", "PredGender"] = 1
submission = test[, c("PassengerId", "PredGender")]
names(submission)[2] <- "Survived"
head(submission)
write.csv(submission, file = "womensurvive.csv", row.names = F)
| /src/r/kernels/analyticsdojo-titanic-baseline-models-analyticsdojo-r/script/titanic-baseline-models-analyticsdojo-r.r | no_license | PRL-PRG/trustworthy-titanic | R | false | false | 552 | r | train <- read.csv("../input/train.csv", stringsAsFactors = F)
test <- read.csv("../input/test.csv", stringsAsFactors = F)
head(train)
head(test)
test["Survived"] = 0
submission = test[, c("PassengerId", "Survived")]
head(submission)
write.csv(submission, file = "nosurvivors.csv", row.names = F)
test[test$Sex == "male", "PredGender"] = 0
test[test$Sex == "female", "PredGender"] = 1
submission = test[, c("PassengerId", "PredGender")]
names(submission)[2] <- "Survived"
head(submission)
write.csv(submission, file = "womensurvive.csv", row.names = F)
|
require(Cairo)
require(ggplot2)
#require(RColorBrewer)
data <- read.delim("./xval/scores.tab", header=TRUE)
p <- ggplot(data) +
aes_string(x="reg", y="accuracy") +
stat_summary(fun.data="mean_cl_boot") +
scale_x_log10(name="Regularization") +
scale_y_continuous(name="Accuracy") +
theme_bw()
ggsave("xval/scores.pdf", p, width=4, height=3, units="in")
| /exp/02_2020-06-03_interpretation/21_2021-03-03_run/xval/scores.R | no_license | marjanfarahbod/interpretation | R | false | false | 365 | r |
require(Cairo)
require(ggplot2)
#require(RColorBrewer)
data <- read.delim("./xval/scores.tab", header=TRUE)
p <- ggplot(data) +
aes_string(x="reg", y="accuracy") +
stat_summary(fun.data="mean_cl_boot") +
scale_x_log10(name="Regularization") +
scale_y_continuous(name="Accuracy") +
theme_bw()
ggsave("xval/scores.pdf", p, width=4, height=3, units="in")
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 29374
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 29374
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#136.A#48.c#.w#9.s#33.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 9975
c no.of clauses 29374
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 29374
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#136.A#48.c#.w#9.s#33.asp.qdimacs 9975 29374 E1 [] 0 136 9839 29374 NONE
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#136.A#48.c#.w#9.s#33.asp/ctrl.e#1.a#3.E#136.A#48.c#.w#9.s#33.asp.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 732 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 29374
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 29374
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#136.A#48.c#.w#9.s#33.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 9975
c no.of clauses 29374
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 29374
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#136.A#48.c#.w#9.s#33.asp.qdimacs 9975 29374 E1 [] 0 136 9839 29374 NONE
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FDboostLSS.R
\name{cvrisk.FDboostLSS}
\alias{cvrisk.FDboostLSS}
\title{Cross-validation for FDboostLSS}
\usage{
\method{cvrisk}{FDboostLSS}(object, folds = cvLong(id = object[[1]]$id,
weights = model.weights(object[[1]])), grid = NULL, papply = mclapply,
trace = TRUE, fun = NULL, ...)
}
\arguments{
\item{object}{an object of class \code{FDboostLSS}.}
\item{folds}{a weight matrix a weight matrix with number of rows equal to the number of observations.
The number of columns corresponds to the number of cross-validation runs,
defaults to 25 bootstrap samples, resampling whole curves}
\item{grid}{defaults to a grid up to the current number of boosting iterations.
The default generates the grid according to the defaults of
\code{\link[gamboostLSS]{cvrisk.mboostLSS}} and \code{\link[gamboostLSS]{cvrisk.nc_mboostLSS}} for
models with cyclic or noncyclic fitting.}
\item{papply}{(parallel) apply function, defaults to \code{\link[parallel]{mclapply}},
see \code{\link[gamboostLSS]{cvrisk.mboostLSS}} for details}
\item{trace}{print status information during cross-validation? Defaults to \code{TRUE}.}
\item{fun}{if \code{fun} is \code{NULL}, the out-of-sample risk is returned.
\code{fun}, as a function of \code{object},
may extract any other characteristic of the cross-validated models. These are returned as is.}
\item{...}{additional arguments passed to \code{\link[parallel]{mclapply}}.}
}
\value{
An object of class \code{cvriskLSS} (when \code{fun} was not specified),
basically a matrix containing estimates of the empirical risk for a varying number
of bootstrap iterations. \code{plot} and \code{print} methods are available as well as an
\code{mstop} method, see \code{\link[gamboostLSS]{cvrisk.mboostLSS}}.
}
\description{
Multidimensional cross-validated estimation of the empirical risk for hyper-parameter selection,
for an object of class \code{FDboostLSS} setting the folds per default to resampling curves.
}
\details{
The function \code{cvrisk.FDboostLSS} is a wrapper for
\code{\link[gamboostLSS]{cvrisk.mboostLSS}} in package gamboostLSS.
It overrieds the default for the folds, so that the folds are sampled on the level of curves
(not on the level of single observations, which does not make sense for functional response).
}
\seealso{
\code{\link[gamboostLSS]{cvrisk.mboostLSS}} in packge gamboostLSS.
}
| /man/cvrisk.FDboostLSS.Rd | no_license | AEBilgrau/FDboost | R | false | true | 2,437 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FDboostLSS.R
\name{cvrisk.FDboostLSS}
\alias{cvrisk.FDboostLSS}
\title{Cross-validation for FDboostLSS}
\usage{
\method{cvrisk}{FDboostLSS}(object, folds = cvLong(id = object[[1]]$id,
weights = model.weights(object[[1]])), grid = NULL, papply = mclapply,
trace = TRUE, fun = NULL, ...)
}
\arguments{
\item{object}{an object of class \code{FDboostLSS}.}
\item{folds}{a weight matrix a weight matrix with number of rows equal to the number of observations.
The number of columns corresponds to the number of cross-validation runs,
defaults to 25 bootstrap samples, resampling whole curves}
\item{grid}{defaults to a grid up to the current number of boosting iterations.
The default generates the grid according to the defaults of
\code{\link[gamboostLSS]{cvrisk.mboostLSS}} and \code{\link[gamboostLSS]{cvrisk.nc_mboostLSS}} for
models with cyclic or noncyclic fitting.}
\item{papply}{(parallel) apply function, defaults to \code{\link[parallel]{mclapply}},
see \code{\link[gamboostLSS]{cvrisk.mboostLSS}} for details}
\item{trace}{print status information during cross-validation? Defaults to \code{TRUE}.}
\item{fun}{if \code{fun} is \code{NULL}, the out-of-sample risk is returned.
\code{fun}, as a function of \code{object},
may extract any other characteristic of the cross-validated models. These are returned as is.}
\item{...}{additional arguments passed to \code{\link[parallel]{mclapply}}.}
}
\value{
An object of class \code{cvriskLSS} (when \code{fun} was not specified),
basically a matrix containing estimates of the empirical risk for a varying number
of bootstrap iterations. \code{plot} and \code{print} methods are available as well as an
\code{mstop} method, see \code{\link[gamboostLSS]{cvrisk.mboostLSS}}.
}
\description{
Multidimensional cross-validated estimation of the empirical risk for hyper-parameter selection,
for an object of class \code{FDboostLSS} setting the folds per default to resampling curves.
}
\details{
The function \code{cvrisk.FDboostLSS} is a wrapper for
\code{\link[gamboostLSS]{cvrisk.mboostLSS}} in package gamboostLSS.
It overrieds the default for the folds, so that the folds are sampled on the level of curves
(not on the level of single observations, which does not make sense for functional response).
}
\seealso{
\code{\link[gamboostLSS]{cvrisk.mboostLSS}} in packge gamboostLSS.
}
|
# 1. summary() ----
# INSTALL AND LOAD PACKAGES ################################
library(datasets) # Load/unload base packages manually
# LOAD DATA ################################################
head(iris)
# SUMMARY() ################################################
summary(iris$Species) # Categorical variable
summary(iris$Sepal.Length) # Quantitative variable
summary(iris) # Entire data frame
# 2. describe()----
# Installs pacman ("package manager") if needed
if (!require("pacman")) install.packages("pacman")
# Use pacman to load add-on packages as desired
pacman::p_load(pacman, psych)
library(pacman, psych)
# LOAD DATA ################################################
head(iris)
# PSYCH PACKAGE ############################################
# Get info on package
# p_help(psych) # Opens package PDF in browser
p_help(psych, web = F) # Opens help in R Viewer
# DESCRIBE() ###############################################
# For quantitative variables only.
describe(iris$Sepal.Length) # One quantitative variable
describe(iris) # Entire data frame
hist(iris$Petal.Length)
summary(iris$Petal.Length)
summary(iris$Species) # Get names and n for each species
# 3.Select by category ----
# Versicolor
hist(iris$Petal.Length[iris$Species == "versicolor"],
main = "Petal Length: Versicolor")
# Virginica
hist(iris$Petal.Length[iris$Species == "virginica"],
main = "Petal Length: Virginica")
# Setosa
hist(iris$Petal.Length[iris$Species == "setosa"],
main = "Petal Length: Setosa")
# SELECT BY VALUE ##########################################
# Short petals only (all Setosa)
hist(iris$Petal.Length[iris$Petal.Length < 2],
main = "Petal Length < 2")
# MULTIPLE SELECTORS #######################################
# Short Virginica petals only
hist(iris$Petal.Length[iris$Species == "virginica" &
iris$Petal.Length < 5.5],
main = "Petal Length: Short Virginica")
# CREATE SUBSAMPLE #########################################
# Format: data[rows, columns]
# Leave rows or columns blank to select all
i.setosa <- iris[iris$Species == "setosa", ]
# EXPLORE SUBSAMPLE ########################################
head(i.setosa)
summary(i.setosa$Petal.Length)
hist(i.setosa$Petal.Length)
# 4. explore package() ----
# Reference:
# https://cran.r-project.org/web/packages/explore/vignettes/explore_mtcars.html
pacman::p_load(pacman)
p_load(explore)
explore_tbl(mtcars)
# describe(mtcars)
# explore(mtcars)
explore_all(mtcars)
#
# Is there a difference between cars with 3,4 and 5 gears? #############
# proportion of cars with 3, 4 and 5 gears
explore(mtcars, gear)
# Check relation between some of the variables and gear ########
p_load(tidyverse)
mtcars %>%
select(gear, mpg, hp, cyl, am) %>%
explore_all(target = gear)
# We see that 100% of cars with am = 0 (automatic) have 3 gears.
# All cars with am = 1 (manual) have 5 gears.
# high MPG: define cars that have mpg (miles per gallon) > 25
data <- mtcars %>%
mutate(highmpg = if_else(mpg > 25, 1, 0, 0)) %>%
select(-mpg)
data %>% explore(highmpg)
# What else is special about them?
data %>%
select(highmpg, cyl, disp, hp) %>%
explore_all(target = highmpg)
#
data %>%
select(highmpg, drat, wt, qsec, vs) %>%
explore_all(target = highmpg)
#
data %>%
select(highmpg, am, gear, carb) %>%
explore_all(target = highmpg)
# create decision tree
data %>%
explain_tree(target = highmpg) %>%
.$obj
#
# we have 6 highmpg out of 32 observations (18.75%)
# 7 cars are identified as highmpg.
# 1 car is being wrongly classied as highmpg.
# 6 cars are correctly classied as highmpg (0.8571)
#
# https://bradleyboehmke.github.io/HOML/DT.html
# we use rpart()
p_load(rpart, rpart.plot)
#
cart.model<- rpart(highmpg ~. ,
data = data,
method = "anova")
cart.model
#
prp(cart.model, # model
faclen = 0, # no abbrev. for variables
fallen.leaves = TRUE, # vertical leaves
shadow.col = "gray", # shadow
# number of correct classifications / number of observations in that node
extra=1)
# There seems to be a very strong correlation between wt (weight) and “high mpg”.
# Cars with a low weight are much more likely to have “high mpg”.
data %>% explore(wt, target = highmpg)
#
mtcars %>% explore(wt, mpg)
| /R01_4.R | no_license | benitairmadiani/summer2020 | R | false | false | 4,401 | r | # 1. summary() ----
# INSTALL AND LOAD PACKAGES ################################
library(datasets) # Load/unload base packages manually
# LOAD DATA ################################################
head(iris)
# SUMMARY() ################################################
summary(iris$Species) # Categorical variable
summary(iris$Sepal.Length) # Quantitative variable
summary(iris) # Entire data frame
# 2. describe()----
# Installs pacman ("package manager") if needed
if (!require("pacman")) install.packages("pacman")
# Use pacman to load add-on packages as desired
pacman::p_load(pacman, psych)
library(pacman, psych)
# LOAD DATA ################################################
head(iris)
# PSYCH PACKAGE ############################################
# Get info on package
# p_help(psych) # Opens package PDF in browser
p_help(psych, web = F) # Opens help in R Viewer
# DESCRIBE() ###############################################
# For quantitative variables only.
describe(iris$Sepal.Length) # One quantitative variable
describe(iris) # Entire data frame
hist(iris$Petal.Length)
summary(iris$Petal.Length)
summary(iris$Species) # Get names and n for each species
# 3.Select by category ----
# Versicolor
hist(iris$Petal.Length[iris$Species == "versicolor"],
main = "Petal Length: Versicolor")
# Virginica
hist(iris$Petal.Length[iris$Species == "virginica"],
main = "Petal Length: Virginica")
# Setosa
hist(iris$Petal.Length[iris$Species == "setosa"],
main = "Petal Length: Setosa")
# SELECT BY VALUE ##########################################
# Short petals only (all Setosa)
hist(iris$Petal.Length[iris$Petal.Length < 2],
main = "Petal Length < 2")
# MULTIPLE SELECTORS #######################################
# Short Virginica petals only
hist(iris$Petal.Length[iris$Species == "virginica" &
iris$Petal.Length < 5.5],
main = "Petal Length: Short Virginica")
# CREATE SUBSAMPLE #########################################
# Format: data[rows, columns]
# Leave rows or columns blank to select all
i.setosa <- iris[iris$Species == "setosa", ]
# EXPLORE SUBSAMPLE ########################################
head(i.setosa)
summary(i.setosa$Petal.Length)
hist(i.setosa$Petal.Length)
# 4. explore package() ----
# Reference:
# https://cran.r-project.org/web/packages/explore/vignettes/explore_mtcars.html
pacman::p_load(pacman)
p_load(explore)
explore_tbl(mtcars)
# describe(mtcars)
# explore(mtcars)
explore_all(mtcars)
#
# Is there a difference between cars with 3,4 and 5 gears? #############
# proportion of cars with 3, 4 and 5 gears
explore(mtcars, gear)
# Check relation between some of the variables and gear ########
p_load(tidyverse)
mtcars %>%
select(gear, mpg, hp, cyl, am) %>%
explore_all(target = gear)
# We see that 100% of cars with am = 0 (automatic) have 3 gears.
# All cars with am = 1 (manual) have 5 gears.
# high MPG: define cars that have mpg (miles per gallon) > 25
data <- mtcars %>%
mutate(highmpg = if_else(mpg > 25, 1, 0, 0)) %>%
select(-mpg)
data %>% explore(highmpg)
# What else is special about them?
data %>%
select(highmpg, cyl, disp, hp) %>%
explore_all(target = highmpg)
#
data %>%
select(highmpg, drat, wt, qsec, vs) %>%
explore_all(target = highmpg)
#
data %>%
select(highmpg, am, gear, carb) %>%
explore_all(target = highmpg)
# create decision tree
data %>%
explain_tree(target = highmpg) %>%
.$obj
#
# we have 6 highmpg out of 32 observations (18.75%)
# 7 cars are identified as highmpg.
# 1 car is being wrongly classied as highmpg.
# 6 cars are correctly classied as highmpg (0.8571)
#
# https://bradleyboehmke.github.io/HOML/DT.html
# we use rpart()
p_load(rpart, rpart.plot)
#
cart.model<- rpart(highmpg ~. ,
data = data,
method = "anova")
cart.model
#
prp(cart.model, # model
faclen = 0, # no abbrev. for variables
fallen.leaves = TRUE, # vertical leaves
shadow.col = "gray", # shadow
# number of correct classifications / number of observations in that node
extra=1)
# There seems to be a very strong correlation between wt (weight) and “high mpg”.
# Cars with a low weight are much more likely to have “high mpg”.
data %>% explore(wt, target = highmpg)
#
mtcars %>% explore(wt, mpg)
|
### Jinliang Yang
### March 31, 2015
### Run HAPMIX
source("lib/hapmixPar.R")
###### run in the linux
source("~/Documents/Github/zmSNPtools/Rcodes/setUpslurm.R")
run_hapmix <- function(gen=1:10, pwd="largedata/hapmixrun", slurmsh_name="slurm-scripts/run_hapmix.sh"){
outsh <- paste0("cd ", pwd)
for(geni in gen){
for(chri in 1:10){
parfileid <- paste0("hprun_","gen", geni, "_chr", chri, ".par")
hapmixPar(lambda=geni, parfile= parfileid,
ref1geno= paste0("mex12_chr", chri, ".out"), ref2geno= paste0("maizeland23_chr", chri, ".out"),
ref1snp= paste0("snp_mex_chr", chri, ".info"), ref2snp= paste0("snp_maize_chr", chri, ".info"),
admixsnp= paste0("toton_chr", chri, ".snpinfo"), admixgeno= paste0("toton_chr", chri, ".out"),
admixind= paste0("toton_chr", chri, ".ind"),
ref1label="MEX", ref2label="MZ",
rates= paste0("toton_chr", chri, ".rate"), admixlabel="TOTON",
chr= chri, outdir= "HPOUT", pwd=pwd,
mode="LOCAL_ANC")
temsh <- paste0("perl bin/runHapmix.pl ", parfileid)
outsh <- c(outsh, temsh)
}
}
#### setup the slurmsh
jobid <- gsub(".*/", "", slurmsh_name)
setUpslurm(slurmsh=slurmsh_name,
codesh= outsh,
wd=NULL, jobid=jobid, email="yangjl0930@gmail.com")
}
##########################################
run_hapmix(gen=1610, pwd="largedata/hapmixrun", slurmsh_name="slurm-scripts/run_hapmix.sh")
###>>> In this path: cd /home/jolyang/Documents/Github/N2
###>>> [ note: --ntasks=INT, number of cup ]
###>>> [ note: --mem=16000, 16G memory ]
###>>> RUN: sbatch -p bigmemh --ntasks=1 --mem 8G --time=30:00:00 slurm-scripts/run_hapmix.sh
#perl bin/runHapmix.pl hprun1_chr10.par
gen <- seq(10, 5000, by=10)
run_hapmix(gen=gen[1:50], pwd="largedata/hapmixrun1", slurmsh_name="slurm-scripts/run_hapmix1.sh")
run_hapmix(gen=gen[51:100], pwd="largedata/hapmixrun2", slurmsh_name="slurm-scripts/run_hapmix2.sh")
run_hapmix(gen=gen[101:150], pwd="largedata/hapmixrun3", slurmsh_name="slurm-scripts/run_hapmix3.sh")
run_hapmix(gen=gen[151:200], pwd="largedata/hapmixrun4", slurmsh_name="slurm-scripts/run_hapmix4.sh")
run_hapmix(gen=gen[201:250], pwd="largedata/hapmixrun5", slurmsh_name="slurm-scripts/run_hapmix5.sh")
run_hapmix(gen=gen[251:300], pwd="largedata/hapmixrun6", slurmsh_name="slurm-scripts/run_hapmix6.sh")
run_hapmix(gen=gen[301:350], pwd="largedata/hapmixrun7", slurmsh_name="slurm-scripts/run_hapmix7.sh")
run_hapmix(gen=gen[351:400], pwd="largedata/hapmixrun8", slurmsh_name="slurm-scripts/run_hapmix8.sh")
run_hapmix(gen=gen[401:450], pwd="largedata/hapmixrun9", slurmsh_name="slurm-scripts/run_hapmix9.sh")
run_hapmix(gen=gen[451:500], pwd="largedata/hapmixrun10", slurmsh_name="slurm-scripts/run_hapmix10.sh")
| /profiling/1.Introgression-redo/1.C.3_hapmix_par.R | no_license | yangjl/N2 | R | false | false | 2,954 | r | ### Jinliang Yang
### March 31, 2015
### Run HAPMIX
source("lib/hapmixPar.R")
###### run in the linux
source("~/Documents/Github/zmSNPtools/Rcodes/setUpslurm.R")
run_hapmix <- function(gen=1:10, pwd="largedata/hapmixrun", slurmsh_name="slurm-scripts/run_hapmix.sh"){
outsh <- paste0("cd ", pwd)
for(geni in gen){
for(chri in 1:10){
parfileid <- paste0("hprun_","gen", geni, "_chr", chri, ".par")
hapmixPar(lambda=geni, parfile= parfileid,
ref1geno= paste0("mex12_chr", chri, ".out"), ref2geno= paste0("maizeland23_chr", chri, ".out"),
ref1snp= paste0("snp_mex_chr", chri, ".info"), ref2snp= paste0("snp_maize_chr", chri, ".info"),
admixsnp= paste0("toton_chr", chri, ".snpinfo"), admixgeno= paste0("toton_chr", chri, ".out"),
admixind= paste0("toton_chr", chri, ".ind"),
ref1label="MEX", ref2label="MZ",
rates= paste0("toton_chr", chri, ".rate"), admixlabel="TOTON",
chr= chri, outdir= "HPOUT", pwd=pwd,
mode="LOCAL_ANC")
temsh <- paste0("perl bin/runHapmix.pl ", parfileid)
outsh <- c(outsh, temsh)
}
}
#### setup the slurmsh
jobid <- gsub(".*/", "", slurmsh_name)
setUpslurm(slurmsh=slurmsh_name,
codesh= outsh,
wd=NULL, jobid=jobid, email="yangjl0930@gmail.com")
}
##########################################
run_hapmix(gen=1610, pwd="largedata/hapmixrun", slurmsh_name="slurm-scripts/run_hapmix.sh")
###>>> In this path: cd /home/jolyang/Documents/Github/N2
###>>> [ note: --ntasks=INT, number of cup ]
###>>> [ note: --mem=16000, 16G memory ]
###>>> RUN: sbatch -p bigmemh --ntasks=1 --mem 8G --time=30:00:00 slurm-scripts/run_hapmix.sh
#perl bin/runHapmix.pl hprun1_chr10.par
gen <- seq(10, 5000, by=10)
run_hapmix(gen=gen[1:50], pwd="largedata/hapmixrun1", slurmsh_name="slurm-scripts/run_hapmix1.sh")
run_hapmix(gen=gen[51:100], pwd="largedata/hapmixrun2", slurmsh_name="slurm-scripts/run_hapmix2.sh")
run_hapmix(gen=gen[101:150], pwd="largedata/hapmixrun3", slurmsh_name="slurm-scripts/run_hapmix3.sh")
run_hapmix(gen=gen[151:200], pwd="largedata/hapmixrun4", slurmsh_name="slurm-scripts/run_hapmix4.sh")
run_hapmix(gen=gen[201:250], pwd="largedata/hapmixrun5", slurmsh_name="slurm-scripts/run_hapmix5.sh")
run_hapmix(gen=gen[251:300], pwd="largedata/hapmixrun6", slurmsh_name="slurm-scripts/run_hapmix6.sh")
run_hapmix(gen=gen[301:350], pwd="largedata/hapmixrun7", slurmsh_name="slurm-scripts/run_hapmix7.sh")
run_hapmix(gen=gen[351:400], pwd="largedata/hapmixrun8", slurmsh_name="slurm-scripts/run_hapmix8.sh")
run_hapmix(gen=gen[401:450], pwd="largedata/hapmixrun9", slurmsh_name="slurm-scripts/run_hapmix9.sh")
run_hapmix(gen=gen[451:500], pwd="largedata/hapmixrun10", slurmsh_name="slurm-scripts/run_hapmix10.sh")
|
\name{ProjectTemplate-package}
\alias{ProjectTemplate-package}
\alias{ProjectTemplate}
\docType{package}
\title{
Automates the creation of new statistical analysis projects.
}
\description{
ProjectTemplate provides functions to automatically build a directory structure for a new R project. Using this structure, ProjectTemplate is able to automate data loading, preprocessing, library importing and unit testing.
}
\details{
\tabular{ll}{
Package: \tab ProjectTemplate\cr
Type: \tab Package\cr
Version: \tab 0.1-3\cr
Date: \tab 2010-10-02\cr
License: \tab Artistic-2.0\cr
LazyLoad: \tab yes\cr
}
create.project('project_name')
}
\references{
This code is inspired by the skeleton structure used by Ruby on Rails.
}
\keyword{ package }
\examples{
\dontrun{
library('ProjectTemplate')
create.project('project_name')
setwd('project_name')
load.project()}}
| /man/ProjectTemplate-package.Rd | no_license | rtelmore/ProjectTemplate | R | false | false | 857 | rd | \name{ProjectTemplate-package}
\alias{ProjectTemplate-package}
\alias{ProjectTemplate}
\docType{package}
\title{
Automates the creation of new statistical analysis projects.
}
\description{
ProjectTemplate provides functions to automatically build a directory structure for a new R project. Using this structure, ProjectTemplate is able to automate data loading, preprocessing, library importing and unit testing.
}
\details{
\tabular{ll}{
Package: \tab ProjectTemplate\cr
Type: \tab Package\cr
Version: \tab 0.1-3\cr
Date: \tab 2010-10-02\cr
License: \tab Artistic-2.0\cr
LazyLoad: \tab yes\cr
}
create.project('project_name')
}
\references{
This code is inspired by the skeleton structure used by Ruby on Rails.
}
\keyword{ package }
\examples{
\dontrun{
library('ProjectTemplate')
create.project('project_name')
setwd('project_name')
load.project()}}
|
\name{make_template}
\alias{make_template}
\title{Make a template that feeds into JASPAR databases}
\usage{
make_template(x, PARAM = NA, TAG = NA, sep = "\t",
outFpre = NULL)
}
\arguments{
\item{x}{matrix, the pfm}
\item{PARAM}{a list, the PARAM(s)}
\item{TAG}{a list, the TAG(s)}
\item{sep}{a string, the delimiter}
\item{outFpre}{a string, a file path to save}
}
\value{
A string of the template, and save it in output format of
`.template' and `.matrix' if `outFpre' specified.
}
\description{
Make a template that feeds into JASPAR databases
}
\details{
NA
}
\examples{
x <-
rbind(
c(3, 0, 0, 0, 0, 0),
c(8, 0, 23, 0, 0, 0),
c(2, 23, 0, 23, 0, 24),
c(11, 1, 1, 1, 24, 0)
)
PARAM <-
list(
INT_ID=NULL,
BASE_ID="MA0006",
COLLECTION="CORE",
VERSION=1,
NAME="Arnt-Ahr",
SPECIES="10090")
TAG <-
list(
class="bHLH",
medline="7592839",
tax_group="vertebrate",
sysgroup="vertebrate",
acc="P30561",
acc="P53762",
comment="dimer",
type="SELEX",
newest=1
)
cat(make_template(x=x,PARAM=PARAM,TAG=TAG))
}
\author{
Xiaobei Zhao
}
| /man/make_template.Rd | no_license | cran/JASPAR | R | false | false | 1,210 | rd | \name{make_template}
\alias{make_template}
\title{Make a template that feeds into JASPAR databases}
\usage{
make_template(x, PARAM = NA, TAG = NA, sep = "\t",
outFpre = NULL)
}
\arguments{
\item{x}{matrix, the pfm}
\item{PARAM}{a list, the PARAM(s)}
\item{TAG}{a list, the TAG(s)}
\item{sep}{a string, the delimiter}
\item{outFpre}{a string, a file path to save}
}
\value{
A string of the template, and save it in output format of
`.template' and `.matrix' if `outFpre' specified.
}
\description{
Make a template that feeds into JASPAR databases
}
\details{
NA
}
\examples{
x <-
rbind(
c(3, 0, 0, 0, 0, 0),
c(8, 0, 23, 0, 0, 0),
c(2, 23, 0, 23, 0, 24),
c(11, 1, 1, 1, 24, 0)
)
PARAM <-
list(
INT_ID=NULL,
BASE_ID="MA0006",
COLLECTION="CORE",
VERSION=1,
NAME="Arnt-Ahr",
SPECIES="10090")
TAG <-
list(
class="bHLH",
medline="7592839",
tax_group="vertebrate",
sysgroup="vertebrate",
acc="P30561",
acc="P53762",
comment="dimer",
type="SELEX",
newest=1
)
cat(make_template(x=x,PARAM=PARAM,TAG=TAG))
}
\author{
Xiaobei Zhao
}
|
#load library
library(data.table)
#url of the data
data_url='https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip'
#download
download.file(data_url,'./UCI HAR Dataset.zip', mode='wb')
#unzip
unzip("UCI HAR Dataset.zip", exdir=getwd())
#features
features <- read.csv('./UCI HAR Dataset/features.txt', header=FALSE, sep=' ')
features <- as.character(features[,2])
#trainining set
data.train.x <- read.table('./UCI HAR Dataset/train/X_train.txt')
data.train.activity <- read.csv('./UCI HAR Dataset/train/y_train.txt', header=FALSE, sep=' ')
data.train.subject <- read.csv('./UCI HAR Dataset/train/subject_train.txt', header=FALSE, sep=' ')
data.train <- data.frame(data.train.subject, data.train.activity, data.train.x)
names(data.train) <- c(c('subject', 'activity'), features)
#testing set
data.test.x <- read.table('./UCI HAR Dataset/test/X_test.txt')
data.test.activity <- read.csv('./UCI HAR Dataset/test/y_test.txt', header=FALSE, sep=' ')
data.test.subject <- read.csv('./UCI HAR Dataset/test/subject_test.txt', header=FALSE, sep=' ')
data.test <- data.frame(data.test.subject, data.test.activity, data.test.x)
names(data.test) <- c(c('subject', 'activity'), features)
#1. merge training and testing sets
data.all <- rbind(data.train, data.test)
#2. mean and standard deviation for each measurement
mean_std.select <- grep('mean|std', features)
data.sub <- data.all[,c(1,2,mean_std.select + 2)]
#3. descriptive activity names
activity.labels <- read.table('./UCI HAR Dataset/activity_labels.txt', header=FALSE)
activity.labels <- as.character(activity.labels[,2])
data.sub$activity <- activity.labels[data.sub$activity]
#4. descriptive variable names
name.new <- names(data.sub)
name.new <- gsub("[(][)]", "", name.new)
name.new <- gsub("^t", "TimeDomain_", name.new)
name.new <- gsub("^f", "FrequencyDomain_", name.new)
name.new <- gsub("Acc", "Accelerometer", name.new)
name.new <- gsub("Gyro", "Gyroscope", name.new)
name.new <- gsub("Mag", "Magnitude", name.new)
name.new <- gsub("-mean-", "_Mean_", name.new)
name.new <- gsub("-std-", "_StandardDeviation_", name.new)
name.new <- gsub("-", "_", name.new)
names(data.sub) <- name.new
#5. data set with the average of each variable for each activity and each subject
data.tidy <- aggregate(data.sub[,3:81], by=list(activity=data.sub$activity, subject=data.sub$subject), FUN=mean)
write.table(x=data.tidy, file="data_tidy.txt", row.names=FALSE) | /run_analysis.R | no_license | jrgalia/cleaningdata | R | false | false | 2,448 | r | #load library
library(data.table)
#url of the data
data_url='https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip'
#download
download.file(data_url,'./UCI HAR Dataset.zip', mode='wb')
#unzip
unzip("UCI HAR Dataset.zip", exdir=getwd())
#features
features <- read.csv('./UCI HAR Dataset/features.txt', header=FALSE, sep=' ')
features <- as.character(features[,2])
#trainining set
data.train.x <- read.table('./UCI HAR Dataset/train/X_train.txt')
data.train.activity <- read.csv('./UCI HAR Dataset/train/y_train.txt', header=FALSE, sep=' ')
data.train.subject <- read.csv('./UCI HAR Dataset/train/subject_train.txt', header=FALSE, sep=' ')
data.train <- data.frame(data.train.subject, data.train.activity, data.train.x)
names(data.train) <- c(c('subject', 'activity'), features)
#testing set
data.test.x <- read.table('./UCI HAR Dataset/test/X_test.txt')
data.test.activity <- read.csv('./UCI HAR Dataset/test/y_test.txt', header=FALSE, sep=' ')
data.test.subject <- read.csv('./UCI HAR Dataset/test/subject_test.txt', header=FALSE, sep=' ')
data.test <- data.frame(data.test.subject, data.test.activity, data.test.x)
names(data.test) <- c(c('subject', 'activity'), features)
#1. merge training and testing sets
data.all <- rbind(data.train, data.test)
#2. mean and standard deviation for each measurement
mean_std.select <- grep('mean|std', features)
data.sub <- data.all[,c(1,2,mean_std.select + 2)]
#3. descriptive activity names
activity.labels <- read.table('./UCI HAR Dataset/activity_labels.txt', header=FALSE)
activity.labels <- as.character(activity.labels[,2])
data.sub$activity <- activity.labels[data.sub$activity]
#4. descriptive variable names
name.new <- names(data.sub)
name.new <- gsub("[(][)]", "", name.new)
name.new <- gsub("^t", "TimeDomain_", name.new)
name.new <- gsub("^f", "FrequencyDomain_", name.new)
name.new <- gsub("Acc", "Accelerometer", name.new)
name.new <- gsub("Gyro", "Gyroscope", name.new)
name.new <- gsub("Mag", "Magnitude", name.new)
name.new <- gsub("-mean-", "_Mean_", name.new)
name.new <- gsub("-std-", "_StandardDeviation_", name.new)
name.new <- gsub("-", "_", name.new)
names(data.sub) <- name.new
#5. data set with the average of each variable for each activity and each subject
data.tidy <- aggregate(data.sub[,3:81], by=list(activity=data.sub$activity, subject=data.sub$subject), FUN=mean)
write.table(x=data.tidy, file="data_tidy.txt", row.names=FALSE) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apigateway_operations.R
\name{apigateway_get_usage_plan_key}
\alias{apigateway_get_usage_plan_key}
\title{Gets a usage plan key of a given key identifier}
\usage{
apigateway_get_usage_plan_key(usagePlanId, keyId)
}
\arguments{
\item{usagePlanId}{[required] The Id of the UsagePlan resource representing the usage plan containing
the to-be-retrieved UsagePlanKey resource representing a plan customer.}
\item{keyId}{[required] The key Id of the to-be-retrieved UsagePlanKey resource representing a
plan customer.}
}
\description{
Gets a usage plan key of a given key identifier.
See \url{https://www.paws-r-sdk.com/docs/apigateway_get_usage_plan_key/} for full documentation.
}
\keyword{internal}
| /cran/paws.networking/man/apigateway_get_usage_plan_key.Rd | permissive | paws-r/paws | R | false | true | 776 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apigateway_operations.R
\name{apigateway_get_usage_plan_key}
\alias{apigateway_get_usage_plan_key}
\title{Gets a usage plan key of a given key identifier}
\usage{
apigateway_get_usage_plan_key(usagePlanId, keyId)
}
\arguments{
\item{usagePlanId}{[required] The Id of the UsagePlan resource representing the usage plan containing
the to-be-retrieved UsagePlanKey resource representing a plan customer.}
\item{keyId}{[required] The key Id of the to-be-retrieved UsagePlanKey resource representing a
plan customer.}
}
\description{
Gets a usage plan key of a given key identifier.
See \url{https://www.paws-r-sdk.com/docs/apigateway_get_usage_plan_key/} for full documentation.
}
\keyword{internal}
|
set.seed( 40 )
library(mvtnorm)
library(fields)
library(Rcpp)
library(mclust)
library(kernlab)
library(ConsensusClusterPlus)
simu=function(s){
prob_glcm<-function(c,s=s,mc=30000){
mu<-c(2+c,14-c)
sigma<-matrix(s*c(1,-0.7,-0.7,1),nrow=2)
elip<-rmvnorm(mc,mu,sigma)
# par(xaxs='i',yaxs='i')
# plot(elip,xlim =c(0,16) ,ylim=c(0,16))
# abline(16,-1,col='red')
# abline(h=16);abline(h=15);abline(h=14);abline(h=13);abline(h=12);abline(h=11);abline(h=10);abline(h=9);
# abline(h=8);abline(h=7);abline(h=6);abline(h=5);abline(h=4);abline(h=3);abline(h=2);abline(h=1);abline(h=0)
# abline(v=16);abline(v=15);abline(v=14);abline(v=13);abline(v=12);abline(v=11);abline(v=10);abline(v=9);
# abline(v=0);abline(v=1);abline(v=2);abline(v=3);abline(v=4);abline(v=5);abline(v=6);abline(v=7);abline(v=8)
cell_count<-rep(0,16*16)
for (i in 1:mc)
{
for (m in 1:16) {
for (k in 16:1) {
if (( (m-1) <elip[i,1])&(elip[i,1]< m)&( (k-1) <elip[i,2])&(elip[i,2]< k)) {
cell_count[16-k+1+16*(m-1)]=cell_count[16-k+1+16*(m-1)]+1}
}
}
}
## -c(2:16,19:32,36:48,53:64,70:80,87:96,104:112,121:128,138:144,155:160,172:176,189:192,206:208,223:224,240)
z<-cell_count/sum(cell_count)
z_whole<-z[c(1,17,33,49,65,81,97,113,129,145,161,177,193,209,225,241,
17,18,34,50,66,82,98,114,130,146,162,178,194,210,226,242,
33,34,35,51,67,83,99,115,131,147,163,179,195,211,227,243,
49,50,51,52,68,84,100,116,132,148,164,180,196,212,228,244,
65,66,67,68,69,85,101,117,133,149,165,181,197,213,229,245,
81,82,83,84,85,86,102,118,134,150,166,182,198,214,230,246,
97,98,99,100,101,102,103,119,135,151,167,183,199,215,231,247,
113,114,115,116,117,118,119,120,136,152,168,184,200,216,232,248,
129,130,131,132,133,134,135,136,137,153,169,185,201,217,233,249,
145,146,147,148,149,150,151,152,153,154,170,186,202,218,234,250,
161,162,163,164,165,166,167,168,169,170,171,187,203,219,235,251,
177,178,179,180,181,182,183,184,185,186,187,188,204,220,236,252,
193,194,195,196,197,198,199,200,201,202,203,204,205,221,237,253,
209,210,211,212,213,214,215,216,217,218,219,220,221,222,238,254,
225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,255,
241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256)]
arg. <- expand.grid(c(0.5:15.5),c(15.5:0.5))
I = as.image( Z=z_whole, x=arg., grid=list(x=seq(0.5,15.5,1), y=seq(0.5,15.5,1)))
image(I)
smooth.I <- image.smooth(I, theta=1);
#################################################
### notice the order of this sommthed image ###
#################################################
den=c()
for (r in 1:16) {
for (w in 1:r) {
den=c(den,smooth.I$z[r,16-(w-1)])
}
}
prob<-den/sum(den)
return(prob)
}
prob1=prob_glcm(c=5,s=s)
prob2=prob_glcm(c=5.5,s=s)
prob3=prob_glcm(c=6,s=s)
prob4=prob_glcm(c=6.5,s=s)
prob5=prob_glcm(c=7,s=s)
glcm=matrix(0,nrow=20*5,ncol=136)
for (j in 1:20)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob1)
}
for (j in 21:40)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob2)
}
for (j in 41:60)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob3)
}
for (j in 61:80)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob4)
}
for (j in 81:100)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob5)
}
glcm
}
Z=simu(s=12)
Z_met=Z
T_met=nrow(Z_met)
n=ncol(Z_met)
X=apply(Z_met,1,sum)
X_met=X
sX_met=(X-mean(X))/sd(X)
R=array(data = NA,dim = c(2,n,T_met))
for (t in 1: nrow(Z_met)) R[,,t]=matrix(rep(c(1,sX_met[t]),times=n),byrow = FALSE,nrow=2,ncol=n)
############################################################################
########################## MCMC ########################
############################################################################
library(HI)
library(invgamma)
source('/gstore/scratch/u/lix233/RGSDP/sdp_functions_selfwriting_V12_cpp.R')
sourceCpp('/gstore/scratch/u/lix233/RGSDP/rgsdp.cpp')
D=read.csv('/gstore/scratch/u/lix233/RGSDP/D_16.csv',header=TRUE)
W=read.csv('/gstore/scratch/u/lix233/RGSDP/W_16.csv',header=TRUE)
N=20000;Burnin=N/2
Y_iter_met=Theta_iter_met=array(data=NA,dim = c(T_met,n,N))
try=matrix(0,nrow =T_met ,ncol = n)
for (i in 1:T_met){
for (j in 1:n){
if (Z_met[i,j]==0) {
try[i,j]=rnorm(1,mean=-10,sd=1)
} else {
try[i,j]=rnorm(1,mean=Z_met[i,j],sd=1)
}
}
}
g=update_Y(Z=Z_met,X=X_met,tau2=100,Theta = try,Beta =c(0.1,0.1),R)
sum(g==Inf)+sum(g==-Inf)
Theta_iter_met[,,1]=try
tau2_met=v_met=rho_met=sig2_met=rep(NA,N)
tau2_met[1]=50
v_met[1]=0.8
rho_met[1]=0.9
sig2_met[1]=10
# v_met=rep(1,N) # Fix v
av=bv=1
atau=0.0001 ;btau=0.0001
asig=0.0001 ;bsig=0.0001
Betam=c(0,0);Sigma_m=matrix(c(10^5,0,0,10^5),nrow=2,ncol=2)
Beta_iter_met=matrix(NA,nrow=N,ncol=nrow(R[,,1]))
Beta_iter_met[1,]=c(40,20)
for (iter in 2:N) {
Y_iter_met[,,iter]=update_Y(Z_met,X_met,tau2_met[iter-1],Theta_iter_met[,,iter-1],Beta_iter_met[iter-1,],R)
Theta_iter_met[,,iter]=update_theta(as.vector(X_met),Y_iter_met[,,iter],as.matrix(D),as.matrix(W),rho_met[iter-1],Theta_iter_met[,,iter-1],sig2_met[iter-1],tau2_met[iter-1],v_met[iter-1],Beta_iter_met[iter-1,],R)
Beta_iter_met[iter,]=update_Beta(Betam,Sigma_m,tau2_met[iter-1],X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],R)
tau2_met[iter] = update_tau2(X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],atau,btau,Beta_iter_met[iter,],R)
sig2_met[iter]= update_sig2(asig,bsig,D,W,rho_met[iter-1],Theta_iter_met[,,iter])
rho_met[iter] = update_rho(D,W,Theta_iter_met[,,iter],sig2_met[iter])
v_met[iter]=update_v(Z_met,v_met[iter-1],Tstar=nrow(unique.matrix(Theta_iter_met[,,iter])),av,bv)
}
library(coda)
mcmc_beta=mcmc(Beta_iter_met[(1+Burnin):N,])
pnorm(abs(geweke.diag(mcmc_beta)$z),lower.tail=FALSE)*2
mcmc_rho=mcmc(rho_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_rho)$z),lower.tail=FALSE)*2
mcmc_sig2=mcmc(sig2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_sig2)$z),lower.tail=FALSE)*2
mcmc_tau2=mcmc(tau2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_tau2)$z),lower.tail=FALSE)*2
mcmc_v=mcmc(v_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_v)$z),lower.tail=FALSE)*2
Theta_ave=Theta_sum=matrix(0,nrow=nrow(Theta_iter_met[,,1]),ncol=ncol(Theta_iter_met[,,1]))
for (i in (Burnin+1):N) {
Theta_sum=Theta_sum+Theta_iter_met[,,i]
}
Theta_ave=Theta_sum/(N-Burnin)
library('NbClust')
NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')
HRGSDP=NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')$Best.partition
glcm_whole=Z[,c(1,2,4,7,11,16,22,29,37,46,56,67,79,92,106,121,
2,3,5,8,12,17,23,30,38,47,57,68,80,93,107,122,
4,5,6,9,13,18,24,31,39,48,58,69,81,94,108,123,
7,8,9,10,14,19,25,32,40,49,59,70,82,95,109,124,
11,12,13,14,15,20,26,33,41,50,60,71,83,96,110,125,
16,17,18,19,20,21,27,34,42,51,61,72,84,97,111,126,
22,23,24,25,26,27,28,35,43,52,62,73,85,98,112,127,
29,30,31,32,33,34,35,36,44,53,63,74,86,99,113,128,
37,38,39,40,41,42,43,44,45,54,64,75,87,100,114,129,
46,47,48,49,50,51,52,53,54,55,65,76,88,101,115,130,
56,57,58,59,60,61,62,63,64,65,66,77,89,102,116,131,
67,68,69,70,71,72,73,74,75,76,77,78,90,103,117,132,
79,80,81,82,83,84,85,86,87,88,89,90,91,104,118,133,
92,93,94,95,96,97,98,99,100,101,102,103,104,105,119,134,
106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,135,
121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136)]
source('/gstore/scratch/u/lix233/RGSDP/cal_stat.R')
features=cal_stat(glcm_whole)
GMM=Mclust(features,5)
my.dist <- function(x) dist(x, method='euclidean')
my.hclust <- function(d) hclust(d, method='ward.D2')
HC<-cutree(my.hclust(my.dist(data.matrix(features))),k=5)
KM=kmeans(features,5)
SC=specc(features,5)
CO <- ConsensusClusterPlus(t(features),maxK=9,reps=100,pItem=0.90, pFeature=1,
clusterAlg='hc',distance='euclidean',plot=FALSE)
CO <- CO[[5]]$consensusClass
aa <- table(rep(1:5,each=20), CO)
bb <- table(rep(1:5,each=20), GMM$classification)
cc <- table(rep(1:5,each=20), HC)
dd <- table(rep(1:5,each=20), KM$cluster)
ee <- table(rep(1:5,each=20), SC)
ff <- table(rep(1:5,each=20), HRGSDP)
res_FeaCO=c(chisq.test(aa,correct = TRUE)$statistic,ncol(aa),error_rate(aa), 'FeaCO')
res_FeaGMM=c(chisq.test(bb,correct = TRUE)$statistic,ncol(bb),error_rate(bb), 'FeaGMM')
res_FeaHC=c(chisq.test(cc,correct = TRUE)$statistic,ncol(cc),error_rate(cc), 'FeaHC')
res_FeaKM=c(chisq.test(dd,correct = TRUE)$statistic,ncol(dd),error_rate(dd), 'FeaKM')
res_FeaSC=c(chisq.test(ee,correct = TRUE)$statistic,ncol(ee),error_rate(ee), 'FeaSC')
res_HRGSDP=c(chisq.test(ff,correct = TRUE)$statistic,ncol(ff),error_rate(ff), 'HRGSDP')
xx = rbind(res_FeaCO, res_FeaGMM, res_FeaHC, res_FeaKM, res_FeaSC, res_HRGSDP)
colnames(xx) = c('pearson.chi.sq', 'nunber of clusters', 'error.rate', 'method')
xx = as.data.frame(xx)
print(xx)
| /s=12/simu_40.R | no_license | mguindanigroup/Radiomics-Hierarchical-Rounded-Gaussian-Spatial-Dirichlet-Process | R | false | false | 9,293 | r | set.seed( 40 )
library(mvtnorm)
library(fields)
library(Rcpp)
library(mclust)
library(kernlab)
library(ConsensusClusterPlus)
simu=function(s){
prob_glcm<-function(c,s=s,mc=30000){
mu<-c(2+c,14-c)
sigma<-matrix(s*c(1,-0.7,-0.7,1),nrow=2)
elip<-rmvnorm(mc,mu,sigma)
# par(xaxs='i',yaxs='i')
# plot(elip,xlim =c(0,16) ,ylim=c(0,16))
# abline(16,-1,col='red')
# abline(h=16);abline(h=15);abline(h=14);abline(h=13);abline(h=12);abline(h=11);abline(h=10);abline(h=9);
# abline(h=8);abline(h=7);abline(h=6);abline(h=5);abline(h=4);abline(h=3);abline(h=2);abline(h=1);abline(h=0)
# abline(v=16);abline(v=15);abline(v=14);abline(v=13);abline(v=12);abline(v=11);abline(v=10);abline(v=9);
# abline(v=0);abline(v=1);abline(v=2);abline(v=3);abline(v=4);abline(v=5);abline(v=6);abline(v=7);abline(v=8)
cell_count<-rep(0,16*16)
for (i in 1:mc)
{
for (m in 1:16) {
for (k in 16:1) {
if (( (m-1) <elip[i,1])&(elip[i,1]< m)&( (k-1) <elip[i,2])&(elip[i,2]< k)) {
cell_count[16-k+1+16*(m-1)]=cell_count[16-k+1+16*(m-1)]+1}
}
}
}
## -c(2:16,19:32,36:48,53:64,70:80,87:96,104:112,121:128,138:144,155:160,172:176,189:192,206:208,223:224,240)
z<-cell_count/sum(cell_count)
z_whole<-z[c(1,17,33,49,65,81,97,113,129,145,161,177,193,209,225,241,
17,18,34,50,66,82,98,114,130,146,162,178,194,210,226,242,
33,34,35,51,67,83,99,115,131,147,163,179,195,211,227,243,
49,50,51,52,68,84,100,116,132,148,164,180,196,212,228,244,
65,66,67,68,69,85,101,117,133,149,165,181,197,213,229,245,
81,82,83,84,85,86,102,118,134,150,166,182,198,214,230,246,
97,98,99,100,101,102,103,119,135,151,167,183,199,215,231,247,
113,114,115,116,117,118,119,120,136,152,168,184,200,216,232,248,
129,130,131,132,133,134,135,136,137,153,169,185,201,217,233,249,
145,146,147,148,149,150,151,152,153,154,170,186,202,218,234,250,
161,162,163,164,165,166,167,168,169,170,171,187,203,219,235,251,
177,178,179,180,181,182,183,184,185,186,187,188,204,220,236,252,
193,194,195,196,197,198,199,200,201,202,203,204,205,221,237,253,
209,210,211,212,213,214,215,216,217,218,219,220,221,222,238,254,
225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,255,
241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256)]
arg. <- expand.grid(c(0.5:15.5),c(15.5:0.5))
I = as.image( Z=z_whole, x=arg., grid=list(x=seq(0.5,15.5,1), y=seq(0.5,15.5,1)))
image(I)
smooth.I <- image.smooth(I, theta=1);
#################################################
### notice the order of this sommthed image ###
#################################################
den=c()
for (r in 1:16) {
for (w in 1:r) {
den=c(den,smooth.I$z[r,16-(w-1)])
}
}
prob<-den/sum(den)
return(prob)
}
prob1=prob_glcm(c=5,s=s)
prob2=prob_glcm(c=5.5,s=s)
prob3=prob_glcm(c=6,s=s)
prob4=prob_glcm(c=6.5,s=s)
prob5=prob_glcm(c=7,s=s)
glcm=matrix(0,nrow=20*5,ncol=136)
for (j in 1:20)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob1)
}
for (j in 21:40)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob2)
}
for (j in 41:60)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob3)
}
for (j in 61:80)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob4)
}
for (j in 81:100)
{
t<-round(runif(1,500,20000),0)
glcm[j,]=round(t*prob5)
}
glcm
}
Z=simu(s=12)
Z_met=Z
T_met=nrow(Z_met)
n=ncol(Z_met)
X=apply(Z_met,1,sum)
X_met=X
sX_met=(X-mean(X))/sd(X)
R=array(data = NA,dim = c(2,n,T_met))
for (t in 1: nrow(Z_met)) R[,,t]=matrix(rep(c(1,sX_met[t]),times=n),byrow = FALSE,nrow=2,ncol=n)
############################################################################
########################## MCMC ########################
############################################################################
library(HI)
library(invgamma)
source('/gstore/scratch/u/lix233/RGSDP/sdp_functions_selfwriting_V12_cpp.R')
sourceCpp('/gstore/scratch/u/lix233/RGSDP/rgsdp.cpp')
D=read.csv('/gstore/scratch/u/lix233/RGSDP/D_16.csv',header=TRUE)
W=read.csv('/gstore/scratch/u/lix233/RGSDP/W_16.csv',header=TRUE)
N=20000;Burnin=N/2
Y_iter_met=Theta_iter_met=array(data=NA,dim = c(T_met,n,N))
try=matrix(0,nrow =T_met ,ncol = n)
for (i in 1:T_met){
for (j in 1:n){
if (Z_met[i,j]==0) {
try[i,j]=rnorm(1,mean=-10,sd=1)
} else {
try[i,j]=rnorm(1,mean=Z_met[i,j],sd=1)
}
}
}
g=update_Y(Z=Z_met,X=X_met,tau2=100,Theta = try,Beta =c(0.1,0.1),R)
sum(g==Inf)+sum(g==-Inf)
Theta_iter_met[,,1]=try
tau2_met=v_met=rho_met=sig2_met=rep(NA,N)
tau2_met[1]=50
v_met[1]=0.8
rho_met[1]=0.9
sig2_met[1]=10
# v_met=rep(1,N) # Fix v
av=bv=1
atau=0.0001 ;btau=0.0001
asig=0.0001 ;bsig=0.0001
Betam=c(0,0);Sigma_m=matrix(c(10^5,0,0,10^5),nrow=2,ncol=2)
Beta_iter_met=matrix(NA,nrow=N,ncol=nrow(R[,,1]))
Beta_iter_met[1,]=c(40,20)
for (iter in 2:N) {
Y_iter_met[,,iter]=update_Y(Z_met,X_met,tau2_met[iter-1],Theta_iter_met[,,iter-1],Beta_iter_met[iter-1,],R)
Theta_iter_met[,,iter]=update_theta(as.vector(X_met),Y_iter_met[,,iter],as.matrix(D),as.matrix(W),rho_met[iter-1],Theta_iter_met[,,iter-1],sig2_met[iter-1],tau2_met[iter-1],v_met[iter-1],Beta_iter_met[iter-1,],R)
Beta_iter_met[iter,]=update_Beta(Betam,Sigma_m,tau2_met[iter-1],X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],R)
tau2_met[iter] = update_tau2(X_met,Y_iter_met[,,iter],Theta_iter_met[,,iter],atau,btau,Beta_iter_met[iter,],R)
sig2_met[iter]= update_sig2(asig,bsig,D,W,rho_met[iter-1],Theta_iter_met[,,iter])
rho_met[iter] = update_rho(D,W,Theta_iter_met[,,iter],sig2_met[iter])
v_met[iter]=update_v(Z_met,v_met[iter-1],Tstar=nrow(unique.matrix(Theta_iter_met[,,iter])),av,bv)
}
library(coda)
mcmc_beta=mcmc(Beta_iter_met[(1+Burnin):N,])
pnorm(abs(geweke.diag(mcmc_beta)$z),lower.tail=FALSE)*2
mcmc_rho=mcmc(rho_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_rho)$z),lower.tail=FALSE)*2
mcmc_sig2=mcmc(sig2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_sig2)$z),lower.tail=FALSE)*2
mcmc_tau2=mcmc(tau2_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_tau2)$z),lower.tail=FALSE)*2
mcmc_v=mcmc(v_met[(1+Burnin):N])
pnorm(abs(geweke.diag(mcmc_v)$z),lower.tail=FALSE)*2
Theta_ave=Theta_sum=matrix(0,nrow=nrow(Theta_iter_met[,,1]),ncol=ncol(Theta_iter_met[,,1]))
for (i in (Burnin+1):N) {
Theta_sum=Theta_sum+Theta_iter_met[,,i]
}
Theta_ave=Theta_sum/(N-Burnin)
library('NbClust')
NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')
HRGSDP=NbClust(Theta_ave,distance='euclidean',method='ward.D2',index='kl')$Best.partition
glcm_whole=Z[,c(1,2,4,7,11,16,22,29,37,46,56,67,79,92,106,121,
2,3,5,8,12,17,23,30,38,47,57,68,80,93,107,122,
4,5,6,9,13,18,24,31,39,48,58,69,81,94,108,123,
7,8,9,10,14,19,25,32,40,49,59,70,82,95,109,124,
11,12,13,14,15,20,26,33,41,50,60,71,83,96,110,125,
16,17,18,19,20,21,27,34,42,51,61,72,84,97,111,126,
22,23,24,25,26,27,28,35,43,52,62,73,85,98,112,127,
29,30,31,32,33,34,35,36,44,53,63,74,86,99,113,128,
37,38,39,40,41,42,43,44,45,54,64,75,87,100,114,129,
46,47,48,49,50,51,52,53,54,55,65,76,88,101,115,130,
56,57,58,59,60,61,62,63,64,65,66,77,89,102,116,131,
67,68,69,70,71,72,73,74,75,76,77,78,90,103,117,132,
79,80,81,82,83,84,85,86,87,88,89,90,91,104,118,133,
92,93,94,95,96,97,98,99,100,101,102,103,104,105,119,134,
106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,135,
121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136)]
source('/gstore/scratch/u/lix233/RGSDP/cal_stat.R')
features=cal_stat(glcm_whole)
GMM=Mclust(features,5)
my.dist <- function(x) dist(x, method='euclidean')
my.hclust <- function(d) hclust(d, method='ward.D2')
HC<-cutree(my.hclust(my.dist(data.matrix(features))),k=5)
KM=kmeans(features,5)
SC=specc(features,5)
CO <- ConsensusClusterPlus(t(features),maxK=9,reps=100,pItem=0.90, pFeature=1,
clusterAlg='hc',distance='euclidean',plot=FALSE)
CO <- CO[[5]]$consensusClass
aa <- table(rep(1:5,each=20), CO)
bb <- table(rep(1:5,each=20), GMM$classification)
cc <- table(rep(1:5,each=20), HC)
dd <- table(rep(1:5,each=20), KM$cluster)
ee <- table(rep(1:5,each=20), SC)
ff <- table(rep(1:5,each=20), HRGSDP)
res_FeaCO=c(chisq.test(aa,correct = TRUE)$statistic,ncol(aa),error_rate(aa), 'FeaCO')
res_FeaGMM=c(chisq.test(bb,correct = TRUE)$statistic,ncol(bb),error_rate(bb), 'FeaGMM')
res_FeaHC=c(chisq.test(cc,correct = TRUE)$statistic,ncol(cc),error_rate(cc), 'FeaHC')
res_FeaKM=c(chisq.test(dd,correct = TRUE)$statistic,ncol(dd),error_rate(dd), 'FeaKM')
res_FeaSC=c(chisq.test(ee,correct = TRUE)$statistic,ncol(ee),error_rate(ee), 'FeaSC')
res_HRGSDP=c(chisq.test(ff,correct = TRUE)$statistic,ncol(ff),error_rate(ff), 'HRGSDP')
xx = rbind(res_FeaCO, res_FeaGMM, res_FeaHC, res_FeaKM, res_FeaSC, res_HRGSDP)
colnames(xx) = c('pearson.chi.sq', 'nunber of clusters', 'error.rate', 'method')
xx = as.data.frame(xx)
print(xx)
|
library(parallel)
simRep <- 5000 # Replication times in one simulation
pvalue.true <- .05 # Testing type I error
b.var <- c(0) # The set of varaince of random covariates b as random slope
smooth <- 1 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1
cores <- 1
r.sim <- b.var
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
library(MASS)
set.seed(iter+10000)
D <- 80 # grid number total
nSubj <- 50 # 200 # I the number of curves
nRep <- 50 # 20 # datasets for each covariance function
totalN <- nSubj * nRep
thetaK.true <- 2
timeGrid <- (1:D)/D
npc.true <- 3
percent <- 0.95
SNR <- 3 # 5, signal noise ratio'
sd.epsilon <- 1 # or 0.5
delta.true <- 0.5
a.mean <- 0
gamma.true <- 2
gammaVar.true <- 1
# hot
gammaI.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI.true <- gammaI.true[rep(1:nrow(gammaI.true), each = nRep), ]
# warm
gammaI2.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI2.true <- gammaI2.true[rep(1:nrow(gammaI2.true), each = nRep), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX
#generate functional covariates
lambda.sim <- function(degree) {
return(0.5^(degree - 1))
}
psi.fourier <- function(t, degree) {
result <- NA
if(degree == 1){
result <- sqrt(2) * sinpi(2*t)
}else if(degree == 2){
result <- sqrt(2) * cospi(4*t)
}else if(degree == 3){
result <- sqrt(2) * sinpi(4*t)
}
return(result)
}
lambdaVec.true <- mapply(lambda.sim, 1: npc.true)
psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)),
nrow = npc.true,
ncol = D,
byrow = TRUE)
ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true))
Mt.true <- ascore.true %*% psi.true
error <- rnorm(totalN, mean = 0, sd = sd.epsilon)
thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(rep(r.sim, npc.true)))
thetaIK.true <- thetaIK.true[rep(1:nrow(thetaIK.true), each = nRep), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
##########################################################################
ID <- rep(1:nSubj, each = nRep)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
# M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0
t <- (1:D)/D
knots <- 5 # previous setting 10
p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
npc <- results$npc
score <- results$scores
ascore <- score[, 1:npc]/sqrt(D)
# plot(results$efunctions[,2]*sqrt(D))
# lines(1:80, psi.fourier(timeGrid, 2)) #match very well
# to compare lambda: results$evalues/(D))
# to compare estimated M, Mt.hat, Mt.true
# a<-results$scores %*% t(results$efunctions)
# plot(M[300,]) #Mt.hat
# lines(a[300,]+results$mu,col="red") # estimated M
# lines(Mt.true[300,], col="blue") #true Mt
###########################################################################
dummyX <- cbind(dummyX, -dummyX + 1)
z.sim.uni = c()
ID.uni <- c(rbind(matrix(1:(nSubj*npc),
nrow = npc,
ncol = nSubj),
matrix(0, nrow = nRep - npc, ncol = nSubj)))
for(k in 1:nSubj){
svd <- svd(ascore[((k-1)*nRep+1):(k*nRep), ] %*% t(ascore[((k-1)*nRep+1):(k*nRep), ])) #SVD on A_i
u.tra <- t(svd$v)
u <- svd$u
d <- (svd$d)[1:npc]
# u <- cbind(u, Null(u))
Y[((k-1)*nRep+1):(k*nRep)] <- u.tra %*% Y[((k-1)*nRep+1):(k*nRep)]
dummyX[((k-1)*nRep+1):(k*nRep), ] <- u.tra %*% dummyX[((k-1)*nRep+1):(k*nRep), ]
ascore[((k-1)*nRep+1):(k*nRep), ] <- rbind(u.tra[1:npc, ] %*% ascore[((k-1)*nRep+1):(k*nRep), ],
matrix(0,
nrow = nRep - npc,
ncol = npc))
z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep - npc))
}
###########################################################################
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ID.uni = as.factor(ID.uni),
ascore = ascore,
z.sim.uni = z.sim.uni)
# 'lmer' model
designMatrix.lmm <- designMatrix
additive0.sim <- paste(1:npc, collapse = " + ascore.")
additive.sim <- paste(1:npc, collapse = " | ID) + (0 + ascore.")
# Confusion of modifying
model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)",
sep = ""))
fullReml <- lmer(model.sim, data = designMatrix.lmm)
f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + z.sim.uni | ID.uni)",
sep = ""))
m.slope <- lmer(f.slope, data = designMatrix.lmm)
f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)")
m0 <- update(fullReml, f0)
tests2 <- exactRLRT(m.slope, fullReml, m0)
pvalues.bonf <- tests2$p[1]
###################################################################################
return(list(realTau = r.sim,
pvalues.bonf = pvalues.bonf,
Merror.Var = Merror.Var,
smooth = smooth,
npc = npc,
tests2 = tests2))
}
# Setup parallel
#cores <- detectCores()
cluster <- makeCluster(cores)
clusterSetRNGStream(cluster, 20170822)
# for(nRandCovariate in 1 * 2){ # START out-outer loop
# clusterExport(cluster, c("nRandCovariate")) # casting the coefficient parameter on the random effects' covariance function
# fileName <- paste("power_", b.var, "_grp20-rep20-", nRandCovariate,".RData", sep = "") # Saving file's name
clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function
fileName <- paste("f_", smooth, "_seed3_grp50-rep50.RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
# resultDoubleList.sim <- list()
#power1.sim <- list()
power2.sim <- list()
# for(r.sim in b.var){ # START outer loop
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
#result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalues.bonf = x$pvalues.bonf,
# smooth = x$smooth,
# npc = x$npc)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
table2.sim <- sapply(node_results, function(x) {
c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))})
Power2 <- mean(table2.sim)
#cat("Power2: ", Power2, fill = TRUE)
power2.sim[[loopIndex]] <- list(Power = Power2, realTau = r.sim, smooth = smooth)
# loopIndex <- loopIndex + 1
# } # End outer loop
save.image(file=fileName) # Auto Save
# par(mfrow=c(2,1))
# Histogram plots
# hist(sapply(result1.sim, function(x) x$pvalue),
# main = "Histogram of p-value for lme model",
# xlab = "p-value")
# hist(sapply(result2.sim, function(x) x$pvalues.bonf),
# main = "Histogram of p-value for lmer model",
# xlab = "p-value")
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests1)$statistic[1]),
# breaks = (0:110)/10,
# main = "Histogram of test-statistic for lme model",
# xlab = "Test Statistics")
#
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests2)[1,1]),
# breaks = (0:100)/10,
# main = "Histogram of test-statistic for lmer model",
# xlab = "Test Statistics")
#} # End out-outer loop
stopCluster(cluster) | /full simulation/03.13.2018/5000simu/seed3/pca_s_seed3_50_50.R | no_license | wma9/FMRI-project | R | false | false | 9,143 | r | library(parallel)
simRep <- 5000 # Replication times in one simulation
pvalue.true <- .05 # Testing type I error
b.var <- c(0) # The set of varaince of random covariates b as random slope
smooth <- 1 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1
cores <- 1
r.sim <- b.var
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
library(MASS)
set.seed(iter+10000)
D <- 80 # grid number total
nSubj <- 50 # 200 # I the number of curves
nRep <- 50 # 20 # datasets for each covariance function
totalN <- nSubj * nRep
thetaK.true <- 2
timeGrid <- (1:D)/D
npc.true <- 3
percent <- 0.95
SNR <- 3 # 5, signal noise ratio'
sd.epsilon <- 1 # or 0.5
delta.true <- 0.5
a.mean <- 0
gamma.true <- 2
gammaVar.true <- 1
# hot
gammaI.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI.true <- gammaI.true[rep(1:nrow(gammaI.true), each = nRep), ]
# warm
gammaI2.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI2.true <- gammaI2.true[rep(1:nrow(gammaI2.true), each = nRep), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX
#generate functional covariates
lambda.sim <- function(degree) {
return(0.5^(degree - 1))
}
psi.fourier <- function(t, degree) {
result <- NA
if(degree == 1){
result <- sqrt(2) * sinpi(2*t)
}else if(degree == 2){
result <- sqrt(2) * cospi(4*t)
}else if(degree == 3){
result <- sqrt(2) * sinpi(4*t)
}
return(result)
}
lambdaVec.true <- mapply(lambda.sim, 1: npc.true)
psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)),
nrow = npc.true,
ncol = D,
byrow = TRUE)
ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true))
Mt.true <- ascore.true %*% psi.true
error <- rnorm(totalN, mean = 0, sd = sd.epsilon)
thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(rep(r.sim, npc.true)))
thetaIK.true <- thetaIK.true[rep(1:nrow(thetaIK.true), each = nRep), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
##########################################################################
ID <- rep(1:nSubj, each = nRep)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
# M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0
t <- (1:D)/D
knots <- 5 # previous setting 10
p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
npc <- results$npc
score <- results$scores
ascore <- score[, 1:npc]/sqrt(D)
# plot(results$efunctions[,2]*sqrt(D))
# lines(1:80, psi.fourier(timeGrid, 2)) #match very well
# to compare lambda: results$evalues/(D))
# to compare estimated M, Mt.hat, Mt.true
# a<-results$scores %*% t(results$efunctions)
# plot(M[300,]) #Mt.hat
# lines(a[300,]+results$mu,col="red") # estimated M
# lines(Mt.true[300,], col="blue") #true Mt
###########################################################################
dummyX <- cbind(dummyX, -dummyX + 1)
z.sim.uni = c()
ID.uni <- c(rbind(matrix(1:(nSubj*npc),
nrow = npc,
ncol = nSubj),
matrix(0, nrow = nRep - npc, ncol = nSubj)))
for(k in 1:nSubj){
svd <- svd(ascore[((k-1)*nRep+1):(k*nRep), ] %*% t(ascore[((k-1)*nRep+1):(k*nRep), ])) #SVD on A_i
u.tra <- t(svd$v)
u <- svd$u
d <- (svd$d)[1:npc]
# u <- cbind(u, Null(u))
Y[((k-1)*nRep+1):(k*nRep)] <- u.tra %*% Y[((k-1)*nRep+1):(k*nRep)]
dummyX[((k-1)*nRep+1):(k*nRep), ] <- u.tra %*% dummyX[((k-1)*nRep+1):(k*nRep), ]
ascore[((k-1)*nRep+1):(k*nRep), ] <- rbind(u.tra[1:npc, ] %*% ascore[((k-1)*nRep+1):(k*nRep), ],
matrix(0,
nrow = nRep - npc,
ncol = npc))
z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep - npc))
}
###########################################################################
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ID.uni = as.factor(ID.uni),
ascore = ascore,
z.sim.uni = z.sim.uni)
# 'lmer' model
designMatrix.lmm <- designMatrix
additive0.sim <- paste(1:npc, collapse = " + ascore.")
additive.sim <- paste(1:npc, collapse = " | ID) + (0 + ascore.")
# Confusion of modifying
model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)",
sep = ""))
fullReml <- lmer(model.sim, data = designMatrix.lmm)
f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + z.sim.uni | ID.uni)",
sep = ""))
m.slope <- lmer(f.slope, data = designMatrix.lmm)
f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)")
m0 <- update(fullReml, f0)
tests2 <- exactRLRT(m.slope, fullReml, m0)
pvalues.bonf <- tests2$p[1]
###################################################################################
return(list(realTau = r.sim,
pvalues.bonf = pvalues.bonf,
Merror.Var = Merror.Var,
smooth = smooth,
npc = npc,
tests2 = tests2))
}
# Setup parallel
#cores <- detectCores()
cluster <- makeCluster(cores)
clusterSetRNGStream(cluster, 20170822)
# for(nRandCovariate in 1 * 2){ # START out-outer loop
# clusterExport(cluster, c("nRandCovariate")) # casting the coefficient parameter on the random effects' covariance function
# fileName <- paste("power_", b.var, "_grp20-rep20-", nRandCovariate,".RData", sep = "") # Saving file's name
clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function
fileName <- paste("f_", smooth, "_seed3_grp50-rep50.RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
# resultDoubleList.sim <- list()
#power1.sim <- list()
power2.sim <- list()
# for(r.sim in b.var){ # START outer loop
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
#result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalues.bonf = x$pvalues.bonf,
# smooth = x$smooth,
# npc = x$npc)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
table2.sim <- sapply(node_results, function(x) {
c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))})
Power2 <- mean(table2.sim)
#cat("Power2: ", Power2, fill = TRUE)
power2.sim[[loopIndex]] <- list(Power = Power2, realTau = r.sim, smooth = smooth)
# loopIndex <- loopIndex + 1
# } # End outer loop
save.image(file=fileName) # Auto Save
# par(mfrow=c(2,1))
# Histogram plots
# hist(sapply(result1.sim, function(x) x$pvalue),
# main = "Histogram of p-value for lme model",
# xlab = "p-value")
# hist(sapply(result2.sim, function(x) x$pvalues.bonf),
# main = "Histogram of p-value for lmer model",
# xlab = "p-value")
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests1)$statistic[1]),
# breaks = (0:110)/10,
# main = "Histogram of test-statistic for lme model",
# xlab = "Test Statistics")
#
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests2)[1,1]),
# breaks = (0:100)/10,
# main = "Histogram of test-statistic for lmer model",
# xlab = "Test Statistics")
#} # End out-outer loop
stopCluster(cluster) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download.R
\name{doAPICall}
\alias{doAPICall}
\title{Perform an API call to the OpenML server.}
\usage{
doAPICall(
api.call,
id = NULL,
url.args = list(),
post.args = list(),
file = NULL,
verbosity = NULL,
method,
...
)
}
\arguments{
\item{api.call}{[\code{character(1)}]\cr
API endpoints listed in \href{https://github.com/openml/OpenML/wiki/API-v1}{APIv1}.}
\item{id}{[\code{integer(1)}]\cr
Optional ID we pass to the API, like runs/list/1.}
\item{url.args}{[\code{list}]\cr
Named list of key-value pairs passed as HTTP GET parameters, e.g.,
key1=value1&key2=value2 to the API call.}
\item{post.args}{[\code{list}]\cr
Optional. A list passed to the \code{body}-arg for \code{\link[httr]{POST}} requests.}
\item{file}{[\code{character(1)}]\cr
Optional filename to write the XML content to.}
\item{verbosity}{[\code{integer(1)}]\cr
Print verbose output on console? Possible values are:\cr
\code{0}: normal output,\cr
\code{1}: info output,\cr
\code{2}: debug output.\cr
Default is set via \code{\link{setOMLConfig}}.}
\item{method}{[\code{character(1)}]\cr
HTTP request method. Currently one of GET, POST or DELETE.}
\item{...}{Another possibility to pass key-value pairs for the HTTP request query.
Arguments passed via ... have a higher priority.}
}
\value{
[\code{character(1)}]\cr Unparsed content of the returned XML file.
}
\description{
The function always returns the XML file content provided by the server.
}
\keyword{internal}
| /man/doAPICall.Rd | no_license | cran/OpenML | R | false | true | 1,593 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download.R
\name{doAPICall}
\alias{doAPICall}
\title{Perform an API call to the OpenML server.}
\usage{
doAPICall(
api.call,
id = NULL,
url.args = list(),
post.args = list(),
file = NULL,
verbosity = NULL,
method,
...
)
}
\arguments{
\item{api.call}{[\code{character(1)}]\cr
API endpoints listed in \href{https://github.com/openml/OpenML/wiki/API-v1}{APIv1}.}
\item{id}{[\code{integer(1)}]\cr
Optional ID we pass to the API, like runs/list/1.}
\item{url.args}{[\code{list}]\cr
Named list of key-value pairs passed as HTTP GET parameters, e.g.,
key1=value1&key2=value2 to the API call.}
\item{post.args}{[\code{list}]\cr
Optional. A list passed to the \code{body}-arg for \code{\link[httr]{POST}} requests.}
\item{file}{[\code{character(1)}]\cr
Optional filename to write the XML content to.}
\item{verbosity}{[\code{integer(1)}]\cr
Print verbose output on console? Possible values are:\cr
\code{0}: normal output,\cr
\code{1}: info output,\cr
\code{2}: debug output.\cr
Default is set via \code{\link{setOMLConfig}}.}
\item{method}{[\code{character(1)}]\cr
HTTP request method. Currently one of GET, POST or DELETE.}
\item{...}{Another possibility to pass key-value pairs for the HTTP request query.
Arguments passed via ... have a higher priority.}
}
\value{
[\code{character(1)}]\cr Unparsed content of the returned XML file.
}
\description{
The function always returns the XML file content provided by the server.
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Circle.R
\name{CircleOA}
\alias{CircleOA}
\title{Circle given by its center and a point}
\usage{
CircleOA(O, A)
}
\arguments{
\item{O}{the center of the circle}
\item{A}{a point of the circle}
}
\value{
A \code{Circle} object.
}
\description{
Return the circle given by its center and a point it
passes through.
}
| /man/CircleOA.Rd | no_license | stla/PlaneGeometry | R | false | true | 393 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Circle.R
\name{CircleOA}
\alias{CircleOA}
\title{Circle given by its center and a point}
\usage{
CircleOA(O, A)
}
\arguments{
\item{O}{the center of the circle}
\item{A}{a point of the circle}
}
\value{
A \code{Circle} object.
}
\description{
Return the circle given by its center and a point it
passes through.
}
|
#load the data
pokemon<-read.csv("Pokemon.csv",header=TRUE)
attach(pokemon)
View(pokemon)
#Size of the data
nrow(pokemon);ncol(pokemon)
#I'm gonna make correlation plot and conduct Random Forest
#Let's rename the variables
colnames(pokemon) <- c("number", "name", "type1", "type2", "total", "hp",
"attack", "defense", "sp.atk", "sp.def", "speed",
"generation", "legendary")
sum(is.na(pokemon)) #'0' meansno missing values
#First, let's divide the variables into categorical and numerical to make life eaiser
categorical<-pokemon[,c(1:4,12:13)]
numerical<-pokemon[,c(5:11)]
#Create the correlation plot
library(corrplot)
M<-cor(numerical)
corrplot(M, method="circle")
#Let's start Random Forest
#Step1 divide the dataset into training and test dataset
## 70% of the sample size
smp_size <- floor(0.7 * nrow(pokemon))
## set the seed to make your partition reproductible
set.seed(123)
train_ind <- sample(seq_len(nrow(pokemon)), size = smp_size)
train <- pokemon[train_ind, ]
test <- pokemon[-train_ind, ]
#Create the model for random forest
set.seed(12345)
library(randomForest)
library(miscTools)
library(ggplot2)
class(pokemon$total)
str(pokemon)
#Create RF model
rf=randomForest(total~.,data=train[-c(1,2)],ntree=200,mtry=2,nodesize=1,rules=TRUE)
rf_test_prediction=predict(rf,test)
#Variable Importance graph
rf_var_imp=data.frame(rf$importance)
rf_var_imp$Variables=row.names(rf_var_imp)
rf_var_imp
p<-ggplot(data=rf_var_imp,aes(x=rf_var_imp$Variables,y=rf_var_imp$IncNodePurity))+ geom_bar(stat="identity", width=0.7, fill="steelblue")
p
#create the metrics:r squared, mse
(r2 <- rSquared(test$total, test$total - predict(rf, test[,-5])))
(mse <- mean((test$total - predict(rf, test[,-5]))^2))
#Plot R square
p1 <- ggplot(aes(x=actual, y=pred),
data=data.frame(actual=test$total, pred=predict(rf, test[,-5])))
p1 + geom_point() +
geom_abline(color="red") +
ggtitle(paste("RandomForest Regression in R r^2=", round(r2,2),"MSE=",round(mse,2), sep=""))
p1 + geom_point() +
geom_abline(color="red") +
ggtitle(paste("RandomForest Regression in MSE=", mse, sep=""))
| /pokemon.R | no_license | amygko/R-programming | R | false | false | 2,165 | r | #load the data
pokemon<-read.csv("Pokemon.csv",header=TRUE)
attach(pokemon)
View(pokemon)
#Size of the data
nrow(pokemon);ncol(pokemon)
#I'm gonna make correlation plot and conduct Random Forest
#Let's rename the variables
colnames(pokemon) <- c("number", "name", "type1", "type2", "total", "hp",
"attack", "defense", "sp.atk", "sp.def", "speed",
"generation", "legendary")
sum(is.na(pokemon)) #'0' meansno missing values
#First, let's divide the variables into categorical and numerical to make life eaiser
categorical<-pokemon[,c(1:4,12:13)]
numerical<-pokemon[,c(5:11)]
#Create the correlation plot
library(corrplot)
M<-cor(numerical)
corrplot(M, method="circle")
#Let's start Random Forest
#Step1 divide the dataset into training and test dataset
## 70% of the sample size
smp_size <- floor(0.7 * nrow(pokemon))
## set the seed to make your partition reproductible
set.seed(123)
train_ind <- sample(seq_len(nrow(pokemon)), size = smp_size)
train <- pokemon[train_ind, ]
test <- pokemon[-train_ind, ]
#Create the model for random forest
set.seed(12345)
library(randomForest)
library(miscTools)
library(ggplot2)
class(pokemon$total)
str(pokemon)
#Create RF model
rf=randomForest(total~.,data=train[-c(1,2)],ntree=200,mtry=2,nodesize=1,rules=TRUE)
rf_test_prediction=predict(rf,test)
#Variable Importance graph
rf_var_imp=data.frame(rf$importance)
rf_var_imp$Variables=row.names(rf_var_imp)
rf_var_imp
p<-ggplot(data=rf_var_imp,aes(x=rf_var_imp$Variables,y=rf_var_imp$IncNodePurity))+ geom_bar(stat="identity", width=0.7, fill="steelblue")
p
#create the metrics:r squared, mse
(r2 <- rSquared(test$total, test$total - predict(rf, test[,-5])))
(mse <- mean((test$total - predict(rf, test[,-5]))^2))
#Plot R square
p1 <- ggplot(aes(x=actual, y=pred),
data=data.frame(actual=test$total, pred=predict(rf, test[,-5])))
p1 + geom_point() +
geom_abline(color="red") +
ggtitle(paste("RandomForest Regression in R r^2=", round(r2,2),"MSE=",round(mse,2), sep=""))
p1 + geom_point() +
geom_abline(color="red") +
ggtitle(paste("RandomForest Regression in MSE=", mse, sep=""))
|
##--------------------------------------------------------------------------------------
##
## Update existing strds with new scenes in .tar.gz format
## Ben DeVries
## 19-02-14, updated 10-11-15
##
## Usage:
## <from within GRASS session>
## Rscript update_strds_Landsat.R strds /path/to/scene(s)[.tar.gz] /path/to/outdir pattern label cpus [overwrite]
##
## Notes:
## - scenes were downloaded via http://espa.cr.usgs.gov
## - gtiff format was requested
## - scene .tar.gz file must be named according to Landsat scene name convention
## - only one band at a time is processed with this script (using 'pattern')
## - cloud masking is done automatically by searching for the *cfmask.tif band
##
##-------------------------------------------------------------------------------------
args <- commandArgs(trailingOnly = TRUE)
strds <- args[1]
fl <- args[2]
outdir <- args[3]
pattern <- args[4]
label <- args[5]
cpus <- as.numeric(args[6])
if(length(args) > 6 & args[7] == 'overwrite') {
overwrite = TRUE
} else {
overwrite = FALSE
}
library(bfastSpatial)
library(spgrass7)
junk <- Sys.setlocale('LC_TIME', 'en_US.utf-8')
# check if fl is a filename or a folder
# if it's a folder, replace fl with all .tar.gz files within that folder
if(file.info(fl)$isdir) {
fl <- list.files(fl, pattern = glob2rx("*.tar.gz"), full.names = TRUE)
}
# get timestamp from fl
s <- getSceneinfo(fl)
dates <- tolower(format(s$date, format = '%d %b %Y'))
end_dates <- tolower(format(s$date + 1, format = '%d %b %Y'))
# function for batch import and timestamping of raster maps
r.in.gdal.timestamp <- function(r, name, date) {
if(!overwrite) {
system(sprintf("r.in.gdal input=%s output=%s", r, name))
} else {
system(sprintf("r.in.gdal --o input=%s output=%s", r, name))
}
system(sprintf("r.timestamp map=%s date=\'%s\'", name, date))
}
# look-up table if spectral bands are needed (not applicable for metrics)
lut <- data.frame(band = c("blue", "green", "red", "NIR", "SWIR1", "SWIR2"),
TMETM = sprintf("sr_band%s", c(1:5, 7)),
OLI = sprintf("sr_band%s", c(2:7)),
stringsAsFactors = FALSE)
# loop through fl and extract appropriate bands
if(cpus == 1) {
for(i in 1:length(fl)) {
if(pattern %in% c("blue", "green", "red", "NIR", "SWIR1", "SWIR2")) {
if(s$sensor[i] == "OLI") {
vi <- lut$OLI[lut$band == pattern]
} else {
vi <- lut$TMETM[lut$band == pattern]
}
} else {
vi <- pattern
}
# process appropriate bands
processLandsat(fl[i], vi = vi, srdir = ".", outdir = outdir, delete = TRUE, mask = "cfmask", fileExt = "tif", overwrite = overwrite)
# import raster maps with timestamps to mapset
sname <- sprintf("%s_%s", row.names(s)[i], label)
outfl <- sprintf('%s/%s.%s.tif', outdir, vi, row.names(s)[i])
r.in.gdal.timestamp(outfl, sname, dates[i])
# write .txt with start and end times for registering the raster maps to the strds
start_date <- as.character(s$date[i])
end_date <- as.character(s$date[i] + 1) # 1 day later
lines <- sprintf("%s|%s|%s", sname, start_date, end_date)
if(i == 1) {
fileConn <- file('scenes_time.txt', open = 'w')
} else {
fileConn <- file('scenes_time.txt', open = 'a')
}
writeLines(lines, fileConn)
close(fileConn)
}
} else {
## TODO: multi-core
}
# register to strds
command <- sprintf("t.register input=%s file=scenes_time.txt", strds)
system(command)
junk <- file.remove("scenes_time.txt")
# show info
system(sprintf("t.info type=strds input=%s", strds))
cat("\n\nFinished.")
| /Rscript/update_strds_Landsat.R | no_license | bendv/tgrass | R | false | false | 3,657 | r | ##--------------------------------------------------------------------------------------
##
## Update existing strds with new scenes in .tar.gz format
## Ben DeVries
## 19-02-14, updated 10-11-15
##
## Usage:
## <from within GRASS session>
## Rscript update_strds_Landsat.R strds /path/to/scene(s)[.tar.gz] /path/to/outdir pattern label cpus [overwrite]
##
## Notes:
## - scenes were downloaded via http://espa.cr.usgs.gov
## - gtiff format was requested
## - scene .tar.gz file must be named according to Landsat scene name convention
## - only one band at a time is processed with this script (using 'pattern')
## - cloud masking is done automatically by searching for the *cfmask.tif band
##
##-------------------------------------------------------------------------------------
args <- commandArgs(trailingOnly = TRUE)
strds <- args[1]
fl <- args[2]
outdir <- args[3]
pattern <- args[4]
label <- args[5]
cpus <- as.numeric(args[6])
if(length(args) > 6 & args[7] == 'overwrite') {
overwrite = TRUE
} else {
overwrite = FALSE
}
library(bfastSpatial)
library(spgrass7)
junk <- Sys.setlocale('LC_TIME', 'en_US.utf-8')
# check if fl is a filename or a folder
# if it's a folder, replace fl with all .tar.gz files within that folder
if(file.info(fl)$isdir) {
fl <- list.files(fl, pattern = glob2rx("*.tar.gz"), full.names = TRUE)
}
# get timestamp from fl
s <- getSceneinfo(fl)
dates <- tolower(format(s$date, format = '%d %b %Y'))
end_dates <- tolower(format(s$date + 1, format = '%d %b %Y'))
# function for batch import and timestamping of raster maps
r.in.gdal.timestamp <- function(r, name, date) {
if(!overwrite) {
system(sprintf("r.in.gdal input=%s output=%s", r, name))
} else {
system(sprintf("r.in.gdal --o input=%s output=%s", r, name))
}
system(sprintf("r.timestamp map=%s date=\'%s\'", name, date))
}
# look-up table if spectral bands are needed (not applicable for metrics)
lut <- data.frame(band = c("blue", "green", "red", "NIR", "SWIR1", "SWIR2"),
TMETM = sprintf("sr_band%s", c(1:5, 7)),
OLI = sprintf("sr_band%s", c(2:7)),
stringsAsFactors = FALSE)
# loop through fl and extract appropriate bands
if(cpus == 1) {
for(i in 1:length(fl)) {
if(pattern %in% c("blue", "green", "red", "NIR", "SWIR1", "SWIR2")) {
if(s$sensor[i] == "OLI") {
vi <- lut$OLI[lut$band == pattern]
} else {
vi <- lut$TMETM[lut$band == pattern]
}
} else {
vi <- pattern
}
# process appropriate bands
processLandsat(fl[i], vi = vi, srdir = ".", outdir = outdir, delete = TRUE, mask = "cfmask", fileExt = "tif", overwrite = overwrite)
# import raster maps with timestamps to mapset
sname <- sprintf("%s_%s", row.names(s)[i], label)
outfl <- sprintf('%s/%s.%s.tif', outdir, vi, row.names(s)[i])
r.in.gdal.timestamp(outfl, sname, dates[i])
# write .txt with start and end times for registering the raster maps to the strds
start_date <- as.character(s$date[i])
end_date <- as.character(s$date[i] + 1) # 1 day later
lines <- sprintf("%s|%s|%s", sname, start_date, end_date)
if(i == 1) {
fileConn <- file('scenes_time.txt', open = 'w')
} else {
fileConn <- file('scenes_time.txt', open = 'a')
}
writeLines(lines, fileConn)
close(fileConn)
}
} else {
## TODO: multi-core
}
# register to strds
command <- sprintf("t.register input=%s file=scenes_time.txt", strds)
system(command)
junk <- file.remove("scenes_time.txt")
# show info
system(sprintf("t.info type=strds input=%s", strds))
cat("\n\nFinished.")
|
## Week 3 Assignment for R Programming
## Aim of the assignment is to write functions that can cache the inverse of a matrix to save time-
## consuming computations.
## Creates a special matrix that can cache its inverse - assuming the matrix is always invertible.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
set_inv <- function(inverse) inv <<- inverse
get_inv <- function() inv
list(set = set, get = get,
set_inv = set_inv,
get_inv = get_inv)
}
## Computes the inverse of the special matrix returned by the function above. If the inverse has already
## been calculated and the matrix has not changed then the cacheSolve will retrieve the inverse from the
## cache.
cacheSolve <- function(x, ...) {
inv <- x$get_inv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$set_inv(inv)
inv
}
| /cachematrix.R | no_license | od13132/ProgrammingAssignment2 | R | false | false | 1,131 | r | ## Week 3 Assignment for R Programming
## Aim of the assignment is to write functions that can cache the inverse of a matrix to save time-
## consuming computations.
## Creates a special matrix that can cache its inverse - assuming the matrix is always invertible.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
set_inv <- function(inverse) inv <<- inverse
get_inv <- function() inv
list(set = set, get = get,
set_inv = set_inv,
get_inv = get_inv)
}
## Computes the inverse of the special matrix returned by the function above. If the inverse has already
## been calculated and the matrix has not changed then the cacheSolve will retrieve the inverse from the
## cache.
cacheSolve <- function(x, ...) {
inv <- x$get_inv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$set_inv(inv)
inv
}
|
div(
wellPanel(
h4(portal_txt$parag0_title),
p(HTML(portal_txt$patag1_image),
HTML(portal_txt$parag0_contents_1)),
p(HTML(portal_txt$parag0_contents_2))
),
wellPanel(
h4("Guidance for Users"),
fluidRow(
column(width = 4,
h5("For Beginners"),
p(HTML(portal_txt$beginners))
),
column(width = 4,
h5("For Teachers"),
p(HTML(portal_txt$teachers))
),
column(width = 4,
h5("For Experts"),
p(HTML(portal_txt$experts))
)
)
),
wellPanel(
# HTML('<img src="pictures/modules.png" alt="SQuID">'),
h4(portal_txt$parag3_title),
p(HTML(portal_txt$parag3_contents1)),
p(HTML(portal_txt$parag3_contents2))
),
wellPanel(
p(strong("References:")),
p(HTML("Allegue, H., Araya-Ajoy, Y. G., Dingemanse, N. J., Dochtermann, N. A., Garamszegi, L. Z.,
Nakagawa, S., Réale, D., Schielzeth, H.& Westneat, D. F. (2016) Statistical Quantification of
Individual Differences: an educational and statistical tool for understanding multi-level phenotypic
data in linear mixed models. <i>Methods in Ecology and Evolution</i>, 8, 257-267.
<a href='https://doi.org/10.1111/2041-210X.12659' target='_blank'>doi: 10.1111/2041-210X.12659</a>")),
p(HTML("Dingemanse, N. J.& Dochtermann, N. A. (2013) Quantifying individual variation in behaviour:
mixed-effect modelling approaches. <i>Journal of Animal Ecology</i>, 82, 39-54.
<a href='https://doi.org/10.1111/1365-2656.12013' target='_blank'>doi: 10.1111/1365-2656.12013</a>"))
)
) | /inst/shiny-squid/source/pages/portal/ui_portal.R | no_license | cran/squid | R | false | false | 1,739 | r | div(
wellPanel(
h4(portal_txt$parag0_title),
p(HTML(portal_txt$patag1_image),
HTML(portal_txt$parag0_contents_1)),
p(HTML(portal_txt$parag0_contents_2))
),
wellPanel(
h4("Guidance for Users"),
fluidRow(
column(width = 4,
h5("For Beginners"),
p(HTML(portal_txt$beginners))
),
column(width = 4,
h5("For Teachers"),
p(HTML(portal_txt$teachers))
),
column(width = 4,
h5("For Experts"),
p(HTML(portal_txt$experts))
)
)
),
wellPanel(
# HTML('<img src="pictures/modules.png" alt="SQuID">'),
h4(portal_txt$parag3_title),
p(HTML(portal_txt$parag3_contents1)),
p(HTML(portal_txt$parag3_contents2))
),
wellPanel(
p(strong("References:")),
p(HTML("Allegue, H., Araya-Ajoy, Y. G., Dingemanse, N. J., Dochtermann, N. A., Garamszegi, L. Z.,
Nakagawa, S., Réale, D., Schielzeth, H.& Westneat, D. F. (2016) Statistical Quantification of
Individual Differences: an educational and statistical tool for understanding multi-level phenotypic
data in linear mixed models. <i>Methods in Ecology and Evolution</i>, 8, 257-267.
<a href='https://doi.org/10.1111/2041-210X.12659' target='_blank'>doi: 10.1111/2041-210X.12659</a>")),
p(HTML("Dingemanse, N. J.& Dochtermann, N. A. (2013) Quantifying individual variation in behaviour:
mixed-effect modelling approaches. <i>Journal of Animal Ecology</i>, 82, 39-54.
<a href='https://doi.org/10.1111/1365-2656.12013' target='_blank'>doi: 10.1111/1365-2656.12013</a>"))
)
) |
plot.weathermap <- function(x,y,dataset,filled=FALSE,interval=100) {
## This simple function will plot weather data on a map
## of the world. It will zoom to the particular region
## you desire and plot the weather data there. You can
## only plot a single variable in this version, although
## it is possible to plot additional weather data manually.
## Data must be a matrix that has the same structure as the
## grid of the x,y data, with the longitudes as columns.
## For example, if you have a 15 longitude by
## 10 latitude grid, you would need a 10X15 data matrix to use as
## input.
## levels is a vector of desired contour levels that the user can input.
## Without it specified, the function will use the default values.
## Check to be sure the dimensions are correct.
error=FALSE
if (length(x) != dim(dataset)[2]) {error = TRUE}
if (length(y) != dim(dataset)[1]) {error = TRUE}
par(cex=2.5)
if (error) { ## Check for errors in the data dimensions.
print("The dimensions are incorrect. You may want to try the transpose.")
} else {
key.axis <- pretty(range(dataset),round((max(dataset)-min(dataset))/interval))
# key.axis <- key.axis[-length(key.axis)]
# key.axis <- key.axis[-1]
## Set up window margins
marg.1 <- (range(y)[2]-range(y)[1])/1.375
marg.2 <- (range(x)[2]-range(x)[1])/1.75
windows(marg.2,marg.1)
par(mai=c(1.1,1.1,1.1,1.1))
if (filled==TRUE) { ## shaded is true
rgb.palette <- colorRampPalette(c("blue", "green", "orange","red"),space = "rgb")
filled.contour(x,y,t(dataset),plot.axes={axis(1);axis(2);map('world',add=T)})
try(map('state',projection='rectangular',parameters=0,add=TRUE),silent=T)
} else { ## end if shaded is true
rgb.palette <- colorRampPalette(c("blue", "green","orange", "red"),space = "rgb")
contour(x,y,t(dataset),levels=key.axis,labcex=1.25,col=rgb.palette(length(key.axis)),xlab="Longitude (E)",ylab="Latitude (N)",)
map(database='world',xlim=c(min(x),max(x)),ylim=c(min(y),max(y)),add=TRUE)
try(map(database='state',xlim=c(min(x),max(x)),ylim=c(min(y),max(y)),add=TRUE),silent=T)
} ## End the else statement
} ## End the error checking else statement
} ## End the function
| /plot.weathermap.R | no_license | ChrisZarzar/data_exploration | R | false | false | 2,247 | r | plot.weathermap <- function(x,y,dataset,filled=FALSE,interval=100) {
## This simple function will plot weather data on a map
## of the world. It will zoom to the particular region
## you desire and plot the weather data there. You can
## only plot a single variable in this version, although
## it is possible to plot additional weather data manually.
## Data must be a matrix that has the same structure as the
## grid of the x,y data, with the longitudes as columns.
## For example, if you have a 15 longitude by
## 10 latitude grid, you would need a 10X15 data matrix to use as
## input.
## levels is a vector of desired contour levels that the user can input.
## Without it specified, the function will use the default values.
## Check to be sure the dimensions are correct.
error=FALSE
if (length(x) != dim(dataset)[2]) {error = TRUE}
if (length(y) != dim(dataset)[1]) {error = TRUE}
par(cex=2.5)
if (error) { ## Check for errors in the data dimensions.
print("The dimensions are incorrect. You may want to try the transpose.")
} else {
key.axis <- pretty(range(dataset),round((max(dataset)-min(dataset))/interval))
# key.axis <- key.axis[-length(key.axis)]
# key.axis <- key.axis[-1]
## Set up window margins
marg.1 <- (range(y)[2]-range(y)[1])/1.375
marg.2 <- (range(x)[2]-range(x)[1])/1.75
windows(marg.2,marg.1)
par(mai=c(1.1,1.1,1.1,1.1))
if (filled==TRUE) { ## shaded is true
rgb.palette <- colorRampPalette(c("blue", "green", "orange","red"),space = "rgb")
filled.contour(x,y,t(dataset),plot.axes={axis(1);axis(2);map('world',add=T)})
try(map('state',projection='rectangular',parameters=0,add=TRUE),silent=T)
} else { ## end if shaded is true
rgb.palette <- colorRampPalette(c("blue", "green","orange", "red"),space = "rgb")
contour(x,y,t(dataset),levels=key.axis,labcex=1.25,col=rgb.palette(length(key.axis)),xlab="Longitude (E)",ylab="Latitude (N)",)
map(database='world',xlim=c(min(x),max(x)),ylim=c(min(y),max(y)),add=TRUE)
try(map(database='state',xlim=c(min(x),max(x)),ylim=c(min(y),max(y)),add=TRUE),silent=T)
} ## End the else statement
} ## End the error checking else statement
} ## End the function
|
researchTable <- data.frame(read.table("/home/john/Database.csv", header=TRUE, sep=","))
#групи жени - 1, 2, 3, 4
groupTable <- table(researchTable$group)
groupPercentage <- round(prop.table(groupTable)*100, 2)
pie(groupTable, labels = groupPercentage, main = "Таргет трупи в %", col = rainbow(n = length(groupTable)))
groups <- unique(researchTable$group)
legend(x = 'bottomleft', legend = groups, cex = 0.8, fill = rainbow(length(groupTable)))
barplotRegion <- barplot(height = groupTable, col = "seagreen", main = "Брой жени във всяка група ", las = 1)
frequencies <- tabulate(researchTable$group)
text(x = barplotRegion, y = frequencies - 3, label = frequencies, pos = 3, cex = 1, col = "black")
getMode <- function(values) {
uniqueValues <- unique(values)
uniqueValues[which.max(tabulate(match(values,
uniqueValues)))]
}
getDescriptiveWithHisto <- function(values, xlabArg, mainArg) {
print(summary(values)) #min, max median, mean
print(var(values)) #дисперсия
print(sd(values)) #стандартно отклонение
print(getMode(values)) #мода
h<-hist(values, breaks=10, col="red", xlab=xlabArg,
main=mainArg)
xfit<-seq(min(values),max(values),length=40)
yfit<-dnorm(xfit,mean=mean(values),sd=sd(values))
yfit <- yfit*diff(h$mids[1:2])*length(values)
lines(xfit, yfit, col="blue", lwd=2)
shapiro.test(values)
#From the output, the p-value > 0.05 implying that the distribution of the data are not significantly different from normal distribution.
#In other words, we can assume the normality.
}
groups.group1 <- researchTable[which(researchTable$group == 1), ]
groups.group2 <- researchTable[which(researchTable$group == 2), ]
groups.group3 <- researchTable[which(researchTable$group == 3), ]
groups.group4 <- researchTable[which(researchTable$group == 4), ]
group1CHOL <- groups.group1$CHOL.mmol.l
group2CHOL <- groups.group2$CHOL.mmol.l
group3CHOL <- groups.group3$CHOL.mmol.l
group4CHOL <- groups.group4$CHOL.mmol.l
group1GLUC <- groups.group1$GLUC.mmol.l
group2GLUC <- groups.group2$GLUC.mmol.l
group3GLUC <- groups.group3$GLUC.mmol.l
group4GLUC <- groups.group4$GLUC.mmol.l
group1Tg <- groups.group1$Tg.mmol.l
group2Tg <- groups.group2$Tg.mmol.l
group3Tg <- groups.group3$Tg.mmol.l
group4Tg <- groups.group4$Tg.mmol.l
getDescriptiveWithHisto(group1CHOL, "CHOL", "CHOL group 1") # p-value = 0.02293 < 0.005 we assume abnormal distribution of the data
getDescriptiveWithHisto(group2CHOL, "CHOL", "CHOL group 2") # p-value = 0.2117 > 0.05 normal distribution
getDescriptiveWithHisto(group3CHOL, "CHOL", "CHOL group 3") # p-value = 0.344 > 0.05 normal distribution
getDescriptiveWithHisto(group4CHOL, "CHOL", "CHOL group 4") # 0.1965
getDescriptiveWithHisto(group1GLUC, "GLUC", "GLUC group 1") # 0.2761
getDescriptiveWithHisto(group2GLUC, "GLUC", "GLUC group 2") # 0.04414
getDescriptiveWithHisto(group3GLUC, "GLUC", "GLUC group 3") # 0.2576
getDescriptiveWithHisto(group4GLUC, "GLUC", "GLUC group 4") #0.0194
getDescriptiveWithHisto(group1Tg, "Tg", "Tg group 1") # 0.1082
getDescriptiveWithHisto(group2Tg, "Tg", "Tg group 2") # 0.01227
getDescriptiveWithHisto(group3Tg, "Tg", "Tg group 3") # 0.05004
getDescriptiveWithHisto(group4Tg, "Tg", "Tg group 4") # 0.050002
#group vs gluc
kruskal.test(GLUC.mmol.l ~ group, data = researchTable) # p-value is < 0.05 so we can conclude that there are significant differences between the treatment groups.
boxplot(researchTable$GLUC.mmol.l ~ researchTable$group, names = c("1", "2", "3", "4"), xlab = "Groups", ylab = "GLUC",
main = "GLUC / Groups", col = rainbow(length(groupTable)))
wilcox.test(groups.group1$GLUC.mmol.l, groups.group4$GLUC.mmol.l)
wilcox.test(groups.group2$GLUC.mmol.l, groups.group4$GLUC.mmol.l)
wilcox.test(groups.group3$GLUC.mmol.l, groups.group4$GLUC.mmol.l)
wilcox.test(groups.group1$CHOL.mmol.l, groups.group4$CHOL.mmol.l)
#group vs chol
kruskal.test(CHOL.mmol.l ~ group, data = researchTable) # p-value = 0.3941 > 0.05 so we can conclude that there are not any significant differences between the treatment groups.
boxplot(researchTable$CHOL.mmol.l ~ researchTable$group, names = c("1", "2", "3", "4"), xlab = "Groups", ylab = "CHOL",
main = "CHOL / Groups", col = rainbow(length(groupTable)))
#group vs tg
kruskal.test(Tg.mmol.l ~ group, data = researchTable) # p-value = 0.00001172 < 0.05 so we can conclude that there are significant differences between the treatment groups.
boxplot(researchTable$Tg.mmol.l ~ researchTable$group, names = c("1", "2", "3", "4"), xlab = "Groups", ylab = "Tg",
main = "Tg / Groups", col = rainbow(length(groupTable)))
#group 1
plot(groups.group1$GLUC.mmol.l, groups.group1$CHOL.mmol.l, main = "GLUC / CHOl Group 1", xlab = "GLUC", ylab = "CHOL")
cor(groups.group1$GLUC.mmol.l, groups.group1$CHOL.mmol.l, method = "spearman")
model <- lm(GLUC.mmol.l ~ CHOL.mmol.l, data = groups.group1)
summary(model)
#group 4
plot(groups.group4$GLUC.mmol.l, groups.group4$CHOL.mmol.l, main = "GLUC / CHOl Group 4", xlab = "GLUC", ylab = "CHOL")
cor(groups.group4$GLUC.mmol.l, groups.group4$CHOL.mmol.l, method = "spearman")
model <- lm(GLUC.mmol.l ~ CHOL.mmol.l, data = groups.group4)
summary(model)
#group 2
plot(groups.group2$GLUC.mmol.l, groups.group2$Tg.mmol.l, main = "GLUC / Tg Group 2", xlab = "GLUC", ylab = "Tg")
cor(groups.group2$GLUC.mmol.l, groups.group2$Tg.mmol.l, method = "spearman")
model <- lm(GLUC.mmol.l ~ Tg.mmol.l, data = groups.group2)
summary(model)
#group 1
plot(groups.group1$GLUC.mmol.l, groups.group1$Tg.mmol.l, main = "GLUC / Tg Group 1", xlab = "GLUC", ylab = "Tg")
cor(groups.group1$GLUC.mmol.l, groups.group1$Tg.mmol.l, method = "spearman")
model <- lm(GLUC.mmol.l ~ Tg.mmol.l, data = groups.group1)
summary(model)
#tg vs chol
#group 1
plot(groups.group1$CHOL.mmol.l, groups.group1$Tg.mmol.l, main = "CHOL / Tg Group 1", xlab = "GLUC", ylab = "Tg")
cor(groups.group1$CHOL.mmol.l, groups.group1$Tg.mmol.l, method = "spearman")
model <- lm(GLUC.mmol.l ~ Tg.mmol.l, data = groups.group1)
summary(model)
#group 2
plot(groups.group2$CHOL.mmol.l, groups.group2$Tg.mmol.l, main = "CHOL / Tg Group 2", xlab = "GLUC", ylab = "Tg")
cor(groups.group2$CHOL.mmol.l, groups.group2$Tg.mmol.l, method = "spearman")
model <- lm(CHOL.mmol.l ~ Tg.mmol.l, data = groups.group2)
summary(model)
| /script.R | no_license | 18ivan18/StatisticsProjectFMI | R | false | false | 6,396 | r | researchTable <- data.frame(read.table("/home/john/Database.csv", header=TRUE, sep=","))
#групи жени - 1, 2, 3, 4
groupTable <- table(researchTable$group)
groupPercentage <- round(prop.table(groupTable)*100, 2)
pie(groupTable, labels = groupPercentage, main = "Таргет трупи в %", col = rainbow(n = length(groupTable)))
groups <- unique(researchTable$group)
legend(x = 'bottomleft', legend = groups, cex = 0.8, fill = rainbow(length(groupTable)))
barplotRegion <- barplot(height = groupTable, col = "seagreen", main = "Брой жени във всяка група ", las = 1)
frequencies <- tabulate(researchTable$group)
text(x = barplotRegion, y = frequencies - 3, label = frequencies, pos = 3, cex = 1, col = "black")
getMode <- function(values) {
uniqueValues <- unique(values)
uniqueValues[which.max(tabulate(match(values,
uniqueValues)))]
}
getDescriptiveWithHisto <- function(values, xlabArg, mainArg) {
print(summary(values)) #min, max median, mean
print(var(values)) #дисперсия
print(sd(values)) #стандартно отклонение
print(getMode(values)) #мода
h<-hist(values, breaks=10, col="red", xlab=xlabArg,
main=mainArg)
xfit<-seq(min(values),max(values),length=40)
yfit<-dnorm(xfit,mean=mean(values),sd=sd(values))
yfit <- yfit*diff(h$mids[1:2])*length(values)
lines(xfit, yfit, col="blue", lwd=2)
shapiro.test(values)
#From the output, the p-value > 0.05 implying that the distribution of the data are not significantly different from normal distribution.
#In other words, we can assume the normality.
}
groups.group1 <- researchTable[which(researchTable$group == 1), ]
groups.group2 <- researchTable[which(researchTable$group == 2), ]
groups.group3 <- researchTable[which(researchTable$group == 3), ]
groups.group4 <- researchTable[which(researchTable$group == 4), ]
group1CHOL <- groups.group1$CHOL.mmol.l
group2CHOL <- groups.group2$CHOL.mmol.l
group3CHOL <- groups.group3$CHOL.mmol.l
group4CHOL <- groups.group4$CHOL.mmol.l
group1GLUC <- groups.group1$GLUC.mmol.l
group2GLUC <- groups.group2$GLUC.mmol.l
group3GLUC <- groups.group3$GLUC.mmol.l
group4GLUC <- groups.group4$GLUC.mmol.l
group1Tg <- groups.group1$Tg.mmol.l
group2Tg <- groups.group2$Tg.mmol.l
group3Tg <- groups.group3$Tg.mmol.l
group4Tg <- groups.group4$Tg.mmol.l
getDescriptiveWithHisto(group1CHOL, "CHOL", "CHOL group 1") # p-value = 0.02293 < 0.005 we assume abnormal distribution of the data
getDescriptiveWithHisto(group2CHOL, "CHOL", "CHOL group 2") # p-value = 0.2117 > 0.05 normal distribution
getDescriptiveWithHisto(group3CHOL, "CHOL", "CHOL group 3") # p-value = 0.344 > 0.05 normal distribution
getDescriptiveWithHisto(group4CHOL, "CHOL", "CHOL group 4") # 0.1965
getDescriptiveWithHisto(group1GLUC, "GLUC", "GLUC group 1") # 0.2761
getDescriptiveWithHisto(group2GLUC, "GLUC", "GLUC group 2") # 0.04414
getDescriptiveWithHisto(group3GLUC, "GLUC", "GLUC group 3") # 0.2576
getDescriptiveWithHisto(group4GLUC, "GLUC", "GLUC group 4") #0.0194
getDescriptiveWithHisto(group1Tg, "Tg", "Tg group 1") # 0.1082
getDescriptiveWithHisto(group2Tg, "Tg", "Tg group 2") # 0.01227
getDescriptiveWithHisto(group3Tg, "Tg", "Tg group 3") # 0.05004
getDescriptiveWithHisto(group4Tg, "Tg", "Tg group 4") # 0.050002
#group vs gluc
kruskal.test(GLUC.mmol.l ~ group, data = researchTable) # p-value is < 0.05 so we can conclude that there are significant differences between the treatment groups.
boxplot(researchTable$GLUC.mmol.l ~ researchTable$group, names = c("1", "2", "3", "4"), xlab = "Groups", ylab = "GLUC",
main = "GLUC / Groups", col = rainbow(length(groupTable)))
wilcox.test(groups.group1$GLUC.mmol.l, groups.group4$GLUC.mmol.l)
wilcox.test(groups.group2$GLUC.mmol.l, groups.group4$GLUC.mmol.l)
wilcox.test(groups.group3$GLUC.mmol.l, groups.group4$GLUC.mmol.l)
wilcox.test(groups.group1$CHOL.mmol.l, groups.group4$CHOL.mmol.l)
#group vs chol
kruskal.test(CHOL.mmol.l ~ group, data = researchTable) # p-value = 0.3941 > 0.05 so we can conclude that there are not any significant differences between the treatment groups.
boxplot(researchTable$CHOL.mmol.l ~ researchTable$group, names = c("1", "2", "3", "4"), xlab = "Groups", ylab = "CHOL",
main = "CHOL / Groups", col = rainbow(length(groupTable)))
#group vs tg
kruskal.test(Tg.mmol.l ~ group, data = researchTable) # p-value = 0.00001172 < 0.05 so we can conclude that there are significant differences between the treatment groups.
boxplot(researchTable$Tg.mmol.l ~ researchTable$group, names = c("1", "2", "3", "4"), xlab = "Groups", ylab = "Tg",
main = "Tg / Groups", col = rainbow(length(groupTable)))
#group 1
plot(groups.group1$GLUC.mmol.l, groups.group1$CHOL.mmol.l, main = "GLUC / CHOl Group 1", xlab = "GLUC", ylab = "CHOL")
cor(groups.group1$GLUC.mmol.l, groups.group1$CHOL.mmol.l, method = "spearman")
model <- lm(GLUC.mmol.l ~ CHOL.mmol.l, data = groups.group1)
summary(model)
#group 4
plot(groups.group4$GLUC.mmol.l, groups.group4$CHOL.mmol.l, main = "GLUC / CHOl Group 4", xlab = "GLUC", ylab = "CHOL")
cor(groups.group4$GLUC.mmol.l, groups.group4$CHOL.mmol.l, method = "spearman")
model <- lm(GLUC.mmol.l ~ CHOL.mmol.l, data = groups.group4)
summary(model)
#group 2
plot(groups.group2$GLUC.mmol.l, groups.group2$Tg.mmol.l, main = "GLUC / Tg Group 2", xlab = "GLUC", ylab = "Tg")
cor(groups.group2$GLUC.mmol.l, groups.group2$Tg.mmol.l, method = "spearman")
model <- lm(GLUC.mmol.l ~ Tg.mmol.l, data = groups.group2)
summary(model)
#group 1
plot(groups.group1$GLUC.mmol.l, groups.group1$Tg.mmol.l, main = "GLUC / Tg Group 1", xlab = "GLUC", ylab = "Tg")
cor(groups.group1$GLUC.mmol.l, groups.group1$Tg.mmol.l, method = "spearman")
model <- lm(GLUC.mmol.l ~ Tg.mmol.l, data = groups.group1)
summary(model)
#tg vs chol
#group 1
plot(groups.group1$CHOL.mmol.l, groups.group1$Tg.mmol.l, main = "CHOL / Tg Group 1", xlab = "GLUC", ylab = "Tg")
cor(groups.group1$CHOL.mmol.l, groups.group1$Tg.mmol.l, method = "spearman")
model <- lm(GLUC.mmol.l ~ Tg.mmol.l, data = groups.group1)
summary(model)
#group 2
plot(groups.group2$CHOL.mmol.l, groups.group2$Tg.mmol.l, main = "CHOL / Tg Group 2", xlab = "GLUC", ylab = "Tg")
cor(groups.group2$CHOL.mmol.l, groups.group2$Tg.mmol.l, method = "spearman")
model <- lm(CHOL.mmol.l ~ Tg.mmol.l, data = groups.group2)
summary(model)
|
load_packages = function() {
suppressPackageStartupMessages(library(drake))
suppressPackageStartupMessages(library(rgdal))
suppressPackageStartupMessages(library(hsdar))
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(sf))
suppressPackageStartupMessages(library(purrr))
suppressPackageStartupMessages(library(glue))
suppressPackageStartupMessages(library(R.utils))
suppressPackageStartupMessages(library(furrr))
suppressPackageStartupMessages(library(future))
suppressPackageStartupMessages(library(future.callr))
suppressPackageStartupMessages(library(future.apply))
suppressPackageStartupMessages(library(magrittr))
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(furrr))
suppressPackageStartupMessages(library(data.table))
suppressPackageStartupMessages(library(mlrCPO))
suppressPackageStartupMessages(library(curl))
suppressPackageStartupMessages(library(fs))
suppressPackageStartupMessages(library(stringr))
suppressWarnings(suppressPackageStartupMessages(library(mapview)))
suppressPackageStartupMessages(library(raster))
suppressPackageStartupMessages(library(mlrMBO))
suppressPackageStartupMessages(library(emoa))
suppressPackageStartupMessages(library(parallelMap))
suppressPackageStartupMessages(library(rgenoud))
suppressPackageStartupMessages(library(knitr))
suppressPackageStartupMessages(library(getSpatialData))
suppressPackageStartupMessages(library(gdalUtils))
suppressPackageStartupMessages(library(ggspatial))
suppressPackageStartupMessages(library(ggpubr))
suppressPackageStartupMessages(library(here))
suppressPackageStartupMessages(library(workflowr))
suppressPackageStartupMessages(library(praznik))
suppressPackageStartupMessages(library(mRMRe))
suppressPackageStartupMessages(library(kernlab))
suppressPackageStartupMessages(library(ggcorrplot))
}
| /code/99-packages.R | permissive | cgpu/2019-feature-selection | R | false | false | 1,925 | r | load_packages = function() {
suppressPackageStartupMessages(library(drake))
suppressPackageStartupMessages(library(rgdal))
suppressPackageStartupMessages(library(hsdar))
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(sf))
suppressPackageStartupMessages(library(purrr))
suppressPackageStartupMessages(library(glue))
suppressPackageStartupMessages(library(R.utils))
suppressPackageStartupMessages(library(furrr))
suppressPackageStartupMessages(library(future))
suppressPackageStartupMessages(library(future.callr))
suppressPackageStartupMessages(library(future.apply))
suppressPackageStartupMessages(library(magrittr))
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(furrr))
suppressPackageStartupMessages(library(data.table))
suppressPackageStartupMessages(library(mlrCPO))
suppressPackageStartupMessages(library(curl))
suppressPackageStartupMessages(library(fs))
suppressPackageStartupMessages(library(stringr))
suppressWarnings(suppressPackageStartupMessages(library(mapview)))
suppressPackageStartupMessages(library(raster))
suppressPackageStartupMessages(library(mlrMBO))
suppressPackageStartupMessages(library(emoa))
suppressPackageStartupMessages(library(parallelMap))
suppressPackageStartupMessages(library(rgenoud))
suppressPackageStartupMessages(library(knitr))
suppressPackageStartupMessages(library(getSpatialData))
suppressPackageStartupMessages(library(gdalUtils))
suppressPackageStartupMessages(library(ggspatial))
suppressPackageStartupMessages(library(ggpubr))
suppressPackageStartupMessages(library(here))
suppressPackageStartupMessages(library(workflowr))
suppressPackageStartupMessages(library(praznik))
suppressPackageStartupMessages(library(mRMRe))
suppressPackageStartupMessages(library(kernlab))
suppressPackageStartupMessages(library(ggcorrplot))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MetaNeighbor.R
\name{MetaNeighbor}
\alias{MetaNeighbor}
\title{Runs MetaNeighbor}
\usage{
MetaNeighbor(
dat,
i = 1,
experiment_labels,
celltype_labels,
genesets,
bplot = TRUE,
fast_version = FALSE,
node_degree_normalization = TRUE
)
}
\arguments{
\item{dat}{A SummarizedExperiment object containing gene-by-sample
expression matrix.}
\item{i}{default value 1; non-zero index value of assay containing the matrix
data}
\item{experiment_labels}{A numerical vector that indicates the source of each
sample.}
\item{celltype_labels}{A matrix that indicates the cell type of each sample.}
\item{genesets}{Gene sets of interest provided as a list of vectors.}
\item{bplot}{default true, beanplot is generated}
\item{fast_version}{default value FALSE; a boolean flag indicating whether
to use the fast and low memory version of MetaNeighbor}
\item{node_degree_normalization}{default value TRUE; a boolean flag indicating
whether to use normalize votes by dividing through total node degree.}
}
\value{
A matrix of AUROC scores representing the mean for each gene set
tested for each celltype is returned directly (see \code{\link{neighborVoting}}).
}
\description{
For each gene set of interest, the function builds a network of rank
correlations between all cells. Next,It builds a network of rank correlations
between all cells for a gene set. Next, the neighbor voting predictor
produces a weighted matrix of predicted labels by performing matrix
multiplication between the network and the binary vector indicating cell type
membership, then dividing each element by the null predictor (i.e., node
degree). That is, each cell is given a score equal to the fraction of its
neighbors (including itself), which are part of a given cell type. For
cross-validation, we permute through all possible combinations of
leave-one-dataset-out cross-validation, and we report how well we can recover
cells of the same type as area under the receiver operator characteristic
curve (AUROC). This is repeated for all folds of cross-validation, and the
mean AUROC across folds is reported. Calls
\code{\link{neighborVoting}}.
}
\examples{
data("mn_data")
data("GOmouse")
library(SummarizedExperiment)
AUROC_scores = MetaNeighbor(dat = mn_data,
experiment_labels = as.numeric(factor(mn_data$study_id)),
celltype_labels = metadata(colData(mn_data))[["cell_labels"]],
genesets = GOmouse,
bplot = TRUE)
}
\seealso{
\code{\link{neighborVoting}}
}
| /man/MetaNeighbor.Rd | permissive | bharris12/MetaNeighbor | R | false | true | 2,633 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MetaNeighbor.R
\name{MetaNeighbor}
\alias{MetaNeighbor}
\title{Runs MetaNeighbor}
\usage{
MetaNeighbor(
dat,
i = 1,
experiment_labels,
celltype_labels,
genesets,
bplot = TRUE,
fast_version = FALSE,
node_degree_normalization = TRUE
)
}
\arguments{
\item{dat}{A SummarizedExperiment object containing gene-by-sample
expression matrix.}
\item{i}{default value 1; non-zero index value of assay containing the matrix
data}
\item{experiment_labels}{A numerical vector that indicates the source of each
sample.}
\item{celltype_labels}{A matrix that indicates the cell type of each sample.}
\item{genesets}{Gene sets of interest provided as a list of vectors.}
\item{bplot}{default true, beanplot is generated}
\item{fast_version}{default value FALSE; a boolean flag indicating whether
to use the fast and low memory version of MetaNeighbor}
\item{node_degree_normalization}{default value TRUE; a boolean flag indicating
whether to use normalize votes by dividing through total node degree.}
}
\value{
A matrix of AUROC scores representing the mean for each gene set
tested for each celltype is returned directly (see \code{\link{neighborVoting}}).
}
\description{
For each gene set of interest, the function builds a network of rank
correlations between all cells. Next,It builds a network of rank correlations
between all cells for a gene set. Next, the neighbor voting predictor
produces a weighted matrix of predicted labels by performing matrix
multiplication between the network and the binary vector indicating cell type
membership, then dividing each element by the null predictor (i.e., node
degree). That is, each cell is given a score equal to the fraction of its
neighbors (including itself), which are part of a given cell type. For
cross-validation, we permute through all possible combinations of
leave-one-dataset-out cross-validation, and we report how well we can recover
cells of the same type as area under the receiver operator characteristic
curve (AUROC). This is repeated for all folds of cross-validation, and the
mean AUROC across folds is reported. Calls
\code{\link{neighborVoting}}.
}
\examples{
data("mn_data")
data("GOmouse")
library(SummarizedExperiment)
AUROC_scores = MetaNeighbor(dat = mn_data,
experiment_labels = as.numeric(factor(mn_data$study_id)),
celltype_labels = metadata(colData(mn_data))[["cell_labels"]],
genesets = GOmouse,
bplot = TRUE)
}
\seealso{
\code{\link{neighborVoting}}
}
|
#' Ribbons and area plots.
#'
#' For each continuous x value, \code{geom_interval} displays a y interval.
#' \code{geom_area} is a special case of \code{geom_ribbon}, where the
#' minimum of the range is fixed to 0.
#'
#' An area plot is the continuous analog of a stacked bar chart (see
#' \code{\link{geom_bar}}), and can be used to show how composition of the
#' whole varies over the range of x. Choosing the order in which different
#' components is stacked is very important, as it becomes increasing hard to
#' see the individual pattern as you move up the stack.
#'
#' @section Aesthetics:
#' \Sexpr[results=rd,stage=build]{ggplot2:::rd_aesthetics("geom", "ribbon")}
#'
#' @seealso
#' \code{\link{geom_bar}} for discrete intervals (bars),
#' \code{\link{geom_linerange}} for discrete intervals (lines),
#' \code{\link{geom_polygon}} for general polygons
#' @inheritParams geom_point
#' @export
#' @examples
#' # Generate data
#' huron <- data.frame(year = 1875:1972, level = as.vector(LakeHuron))
#' h <- ggplot(huron, aes(year))
#'
#' h + geom_ribbon(aes(ymin=0, ymax=level))
#' h + geom_area(aes(y = level))
#'
#' # Add aesthetic mappings
#' h +
#' geom_ribbon(aes(ymin = level - 1, ymax = level + 1), fill = "grey70") +
#' geom_line(aes(y = level))
geom_ribbon <- function(mapping = NULL, data = NULL, stat = "identity",
position = "identity", na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE, ...) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomRibbon,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(...)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
GeomRibbon <- ggproto("GeomRibbon", Geom,
default_aes = aes(colour = NA, fill = "grey20", size = 0.5, linetype = 1,
alpha = NA),
required_aes = c("x", "ymin", "ymax"),
draw_key = draw_key_polygon,
draw_group = function(self, data, scales, coordinates, na.rm = FALSE, ...) {
if (na.rm) data <- data[stats::complete.cases(data[self$required_aes]), ]
data <- data[order(data$group, data$x), ]
# Check that aesthetics are constant
aes <- unique(data[c("colour", "fill", "size", "linetype", "alpha")])
if (nrow(aes) > 1) {
stop("Aesthetics can not vary with a ribbon")
}
aes <- as.list(aes)
# Instead of removing NA values from the data and plotting a single
# polygon, we want to "stop" plotting the polygon whenever we're
# missing values and "start" a new polygon as soon as we have new
# values. We do this by creating an id vector for polygonGrob that
# has distinct polygon numbers for sequences of non-NA values and NA
# for NA values in the original data. Example: c(NA, 2, 2, 2, NA, NA,
# 4, 4, 4, NA)
missing_pos <- !stats::complete.cases(data[self$required_aes])
ids <- cumsum(missing_pos) + 1
ids[missing_pos] <- NA
positions <- plyr::summarise(data,
x = c(x, rev(x)), y = c(ymax, rev(ymin)), id = c(ids, rev(ids)))
munched <- coord_munch(coordinates,positions, scales)
ggname("geom_ribbon", polygonGrob(
munched$x, munched$y, id = munched$id,
default.units = "native",
gp = gpar(
fill = alpha(aes$fill, aes$alpha),
col = aes$colour,
lwd = aes$size * .pt,
lty = aes$linetype)
))
}
)
#' @rdname geom_ribbon
#' @export
geom_area <- function(mapping = NULL, data = NULL, stat = "identity",
position = "stack", na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE, ...) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomArea,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
GeomArea <- ggproto("GeomArea", GeomRibbon,
default_aes = aes(colour = NA, fill = "grey20", size = 0.5, linetype = 1,
alpha = NA),
required_aes = c("x", "y"),
reparameterise = function(df, params) {
transform(df, ymin = 0, ymax = y)
}
)
| /R/geom-ribbon-.r | no_license | bbolker/ggplot2 | R | false | false | 4,180 | r | #' Ribbons and area plots.
#'
#' For each continuous x value, \code{geom_interval} displays a y interval.
#' \code{geom_area} is a special case of \code{geom_ribbon}, where the
#' minimum of the range is fixed to 0.
#'
#' An area plot is the continuous analog of a stacked bar chart (see
#' \code{\link{geom_bar}}), and can be used to show how composition of the
#' whole varies over the range of x. Choosing the order in which different
#' components is stacked is very important, as it becomes increasing hard to
#' see the individual pattern as you move up the stack.
#'
#' @section Aesthetics:
#' \Sexpr[results=rd,stage=build]{ggplot2:::rd_aesthetics("geom", "ribbon")}
#'
#' @seealso
#' \code{\link{geom_bar}} for discrete intervals (bars),
#' \code{\link{geom_linerange}} for discrete intervals (lines),
#' \code{\link{geom_polygon}} for general polygons
#' @inheritParams geom_point
#' @export
#' @examples
#' # Generate data
#' huron <- data.frame(year = 1875:1972, level = as.vector(LakeHuron))
#' h <- ggplot(huron, aes(year))
#'
#' h + geom_ribbon(aes(ymin=0, ymax=level))
#' h + geom_area(aes(y = level))
#'
#' # Add aesthetic mappings
#' h +
#' geom_ribbon(aes(ymin = level - 1, ymax = level + 1), fill = "grey70") +
#' geom_line(aes(y = level))
geom_ribbon <- function(mapping = NULL, data = NULL, stat = "identity",
position = "identity", na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE, ...) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomRibbon,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(...)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
GeomRibbon <- ggproto("GeomRibbon", Geom,
default_aes = aes(colour = NA, fill = "grey20", size = 0.5, linetype = 1,
alpha = NA),
required_aes = c("x", "ymin", "ymax"),
draw_key = draw_key_polygon,
draw_group = function(self, data, scales, coordinates, na.rm = FALSE, ...) {
if (na.rm) data <- data[stats::complete.cases(data[self$required_aes]), ]
data <- data[order(data$group, data$x), ]
# Check that aesthetics are constant
aes <- unique(data[c("colour", "fill", "size", "linetype", "alpha")])
if (nrow(aes) > 1) {
stop("Aesthetics can not vary with a ribbon")
}
aes <- as.list(aes)
# Instead of removing NA values from the data and plotting a single
# polygon, we want to "stop" plotting the polygon whenever we're
# missing values and "start" a new polygon as soon as we have new
# values. We do this by creating an id vector for polygonGrob that
# has distinct polygon numbers for sequences of non-NA values and NA
# for NA values in the original data. Example: c(NA, 2, 2, 2, NA, NA,
# 4, 4, 4, NA)
missing_pos <- !stats::complete.cases(data[self$required_aes])
ids <- cumsum(missing_pos) + 1
ids[missing_pos] <- NA
positions <- plyr::summarise(data,
x = c(x, rev(x)), y = c(ymax, rev(ymin)), id = c(ids, rev(ids)))
munched <- coord_munch(coordinates,positions, scales)
ggname("geom_ribbon", polygonGrob(
munched$x, munched$y, id = munched$id,
default.units = "native",
gp = gpar(
fill = alpha(aes$fill, aes$alpha),
col = aes$colour,
lwd = aes$size * .pt,
lty = aes$linetype)
))
}
)
#' @rdname geom_ribbon
#' @export
geom_area <- function(mapping = NULL, data = NULL, stat = "identity",
position = "stack", na.rm = FALSE, show.legend = NA,
inherit.aes = TRUE, ...) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomArea,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...)
)
}
#' @rdname ggplot2-ggproto
#' @format NULL
#' @usage NULL
#' @export
GeomArea <- ggproto("GeomArea", GeomRibbon,
default_aes = aes(colour = NA, fill = "grey20", size = 0.5, linetype = 1,
alpha = NA),
required_aes = c("x", "y"),
reparameterise = function(df, params) {
transform(df, ymin = 0, ymax = y)
}
)
|
dlaplacelike2 <-
function(par,x)
{
p <- par[1]
q <- par[2]
sum(-log(ddlaplace2(x,p,q)))
}
| /DiscreteLaplace/R/dlaplacelike2.R | no_license | ingted/R-Examples | R | false | false | 97 | r | dlaplacelike2 <-
function(par,x)
{
p <- par[1]
q <- par[2]
sum(-log(ddlaplace2(x,p,q)))
}
|
/Source/sitelevel_humboldt.R | no_license | callum-lawson/Butterflies | R | false | false | 39,579 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/single_markers_statistics.R
\name{single_markers_statistics}
\alias{single_markers_statistics}
\title{Provide statistics for each marker.}
\usage{
single_markers_statistics(data_long)
}
\arguments{
\item{data_long}{a data.frame in long format returned by combiroc_long().}
}
\value{
a list object containing:
\itemize{
\item 'Statistics': a dataframe containing the main statistics for each marker in each class.
\item 'Plots': a named list of scatter plots showing signal intensity values.
}
}
\description{
A function that computes the statistics and a scatter-plot for each marker.
}
\details{
This function computes the main statistics of the signal values distribution of each marker in both classes. In addition it also shows the values through scatter plots.
}
\examples{
demo_data # combiroc built-in demo data (proteomics data from Zingaretti et al. 2012 - PMC3518104)
data_long <- combiroc_long(demo_data) # reshape data in long format
sms <- single_markers_statistics(data_long)
sms$Statistics # to visualize the statistics of each single marker
sms$Plots[[1]] # to visualize the scatterplot of the first marker
}
| /man/single_markers_statistics.Rd | permissive | minghao2016/combiroc | R | false | true | 1,206 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/single_markers_statistics.R
\name{single_markers_statistics}
\alias{single_markers_statistics}
\title{Provide statistics for each marker.}
\usage{
single_markers_statistics(data_long)
}
\arguments{
\item{data_long}{a data.frame in long format returned by combiroc_long().}
}
\value{
a list object containing:
\itemize{
\item 'Statistics': a dataframe containing the main statistics for each marker in each class.
\item 'Plots': a named list of scatter plots showing signal intensity values.
}
}
\description{
A function that computes the statistics and a scatter-plot for each marker.
}
\details{
This function computes the main statistics of the signal values distribution of each marker in both classes. In addition it also shows the values through scatter plots.
}
\examples{
demo_data # combiroc built-in demo data (proteomics data from Zingaretti et al. 2012 - PMC3518104)
data_long <- combiroc_long(demo_data) # reshape data in long format
sms <- single_markers_statistics(data_long)
sms$Statistics # to visualize the statistics of each single marker
sms$Plots[[1]] # to visualize the scatterplot of the first marker
}
|
#This script will be used to run the F-test between the variables for zip codes with and without cases
#Rather than use HW 7, I am going to do this using the built in var.test() function because it is simpler and provides
#more organized output compared to the output we produced in class.
#Now I will test whether the variance of the noCase zipcodes and case zipcodes are equal.
#I will need to use the f test for this
#Ho: The variance of mean values in the noCase dataset and the mean values in the case dataset are equal
#Ha: The variance of mean values in the noCase dataset and the mean values in the case dataset are not equal
#I may just bring this all into one script with the F test.
print("f-test for bird slopes in 2002")
#set these first two variables to changes what datasets will be calculated
noCase.data <- slope.02bnoCase.data
case.data <- slope.02bcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for bird slopes in 2003")
noCase.data <- slope.03bnoCase.data
case.data <- slope.03bcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for bird aspect in 2002")
noCase.data <- aspect.02bnoCase.data
case.data <- aspect.02bcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for bird aspect in 2003")
noCase.data <- aspect.03bnoCase.data
case.data <- aspect.03bcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for bird elev in 2002")
noCase.data <- elev.02bnoCase.data
case.data <- elev.02bcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for bird elev in 2003")
noCase.data <- elev.03bnoCase.data
case.data <- elev.03bcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for human slopes in 2002")
noCase.data <- slope.02hnoCase.data
case.data <- slope.02hcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for huamn slopes in 2003")
noCase.data <- slope.03hnoCase.data
case.data <- slope.03hcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for human aspect in 2002")
noCase.data <- aspect.02hnoCase.data
case.data <- aspect.02hcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for human aspect in 2003")
noCase.data <- aspect.03hnoCase.data
case.data <- aspect.03hcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for human elev in 2002")
noCase.data <- elev.02hnoCase.data
case.data <- elev.02hcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for human elev in 2003")
noCase.data <- elev.03hnoCase.data
case.data <- elev.03hcase.data
var.test(noCase.data[,8],case.data[,8])
| /z_gisResearchApps_FtestV2.R | no_license | ChrisZarzar/gis_research_apps_class | R | false | false | 2,661 | r | #This script will be used to run the F-test between the variables for zip codes with and without cases
#Rather than use HW 7, I am going to do this using the built in var.test() function because it is simpler and provides
#more organized output compared to the output we produced in class.
#Now I will test whether the variance of the noCase zipcodes and case zipcodes are equal.
#I will need to use the f test for this
#Ho: The variance of mean values in the noCase dataset and the mean values in the case dataset are equal
#Ha: The variance of mean values in the noCase dataset and the mean values in the case dataset are not equal
#I may just bring this all into one script with the F test.
print("f-test for bird slopes in 2002")
#set these first two variables to changes what datasets will be calculated
noCase.data <- slope.02bnoCase.data
case.data <- slope.02bcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for bird slopes in 2003")
noCase.data <- slope.03bnoCase.data
case.data <- slope.03bcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for bird aspect in 2002")
noCase.data <- aspect.02bnoCase.data
case.data <- aspect.02bcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for bird aspect in 2003")
noCase.data <- aspect.03bnoCase.data
case.data <- aspect.03bcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for bird elev in 2002")
noCase.data <- elev.02bnoCase.data
case.data <- elev.02bcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for bird elev in 2003")
noCase.data <- elev.03bnoCase.data
case.data <- elev.03bcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for human slopes in 2002")
noCase.data <- slope.02hnoCase.data
case.data <- slope.02hcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for huamn slopes in 2003")
noCase.data <- slope.03hnoCase.data
case.data <- slope.03hcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for human aspect in 2002")
noCase.data <- aspect.02hnoCase.data
case.data <- aspect.02hcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for human aspect in 2003")
noCase.data <- aspect.03hnoCase.data
case.data <- aspect.03hcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for human elev in 2002")
noCase.data <- elev.02hnoCase.data
case.data <- elev.02hcase.data
var.test(noCase.data[,8],case.data[,8])
print("f-test for human elev in 2003")
noCase.data <- elev.03hnoCase.data
case.data <- elev.03hcase.data
var.test(noCase.data[,8],case.data[,8])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/F_respFunScoreMat.R
\name{respFunScoreMat}
\alias{respFunScoreMat}
\title{Derivative of the Lagrangian of the parametric response function}
\usage{
respFunScoreMat(
betas,
X,
reg,
thetaMat,
muMarg,
psi,
p,
v,
allowMissingness,
naId,
...
)
}
\arguments{
\item{betas}{a vector of length (deg+1)*(p+1) with regression parameters with
deg the degree of the response function and the lagrangian multipliers}
\item{X}{the nxp data matrix}
\item{reg}{a matrix of regressors with the dimension nx(deg+1)}
\item{thetaMat}{The n-by-p matrix with dispersion parameters}
\item{muMarg}{offset matrix of size nxp}
\item{psi}{a scalar, the importance parameter}
\item{p}{an integer, the number of taxa}
\item{v}{an integer, one plus the degree of the response function}
\item{allowMissingness}{A boolean, are missing values present}
\item{naId}{The numeric index of the missing values in X}
\item{...}{further arguments passed on to the jacobian
The parameters are restricted to be normalized, i.e. all squared intercepts,
first order and second order parameters sum to 1}
}
\value{
The evaluation of the score functions, a vector of length (p+1)*
(deg+1)
}
\description{
Derivative of the Lagrangian of the parametric response function
}
| /man/respFunScoreMat.Rd | no_license | CenterForStatistics-UGent/RCM | R | false | true | 1,336 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/F_respFunScoreMat.R
\name{respFunScoreMat}
\alias{respFunScoreMat}
\title{Derivative of the Lagrangian of the parametric response function}
\usage{
respFunScoreMat(
betas,
X,
reg,
thetaMat,
muMarg,
psi,
p,
v,
allowMissingness,
naId,
...
)
}
\arguments{
\item{betas}{a vector of length (deg+1)*(p+1) with regression parameters with
deg the degree of the response function and the lagrangian multipliers}
\item{X}{the nxp data matrix}
\item{reg}{a matrix of regressors with the dimension nx(deg+1)}
\item{thetaMat}{The n-by-p matrix with dispersion parameters}
\item{muMarg}{offset matrix of size nxp}
\item{psi}{a scalar, the importance parameter}
\item{p}{an integer, the number of taxa}
\item{v}{an integer, one plus the degree of the response function}
\item{allowMissingness}{A boolean, are missing values present}
\item{naId}{The numeric index of the missing values in X}
\item{...}{further arguments passed on to the jacobian
The parameters are restricted to be normalized, i.e. all squared intercepts,
first order and second order parameters sum to 1}
}
\value{
The evaluation of the score functions, a vector of length (p+1)*
(deg+1)
}
\description{
Derivative of the Lagrangian of the parametric response function
}
|
context("test_soft.threshold")
test_that("soft.threshold works", {
x <- rnorm(10)
out <- soft.threshold(x, 0.5)
expect_equal(out, c(0, 0, 0, 0, 0, 0, 0, 1.34653177497057e-10, 0, 0))
})
| /tests/testthat/test-soft.threshold.R | no_license | jimhester/RGCCA | R | false | false | 192 | r | context("test_soft.threshold")
test_that("soft.threshold works", {
x <- rnorm(10)
out <- soft.threshold(x, 0.5)
expect_equal(out, c(0, 0, 0, 0, 0, 0, 0, 1.34653177497057e-10, 0, 0))
})
|
#' Transform values of a vector
#'
#' @author Bruno Vilela
#'
#' @description Transform each element of a vector.
#'
#' @usage lets.transf(x, y, z, NUMERIC=TRUE)
#'
#' @param x A vector to be transformed.
#' @param y levels to be transformed.
#' @param z The value to be atributed to each level (same order as y).
#' @param NUMERIC logical, if \code{TRUE} z will be considered numbers.
#'
#' @return Return a vector with changed values.
#'
#' @examples \dontrun{
#' status <- sample(c("EN","VU", "NT", "CR", "DD", "LC"), 30, replace=TRUE)
#' TE <- "Threatened"
#' NT <- "Non-Threatened"
#' new <- c(TE, TE, NT, TE, "Data Deficient", NT)
#' old <- c("EN","VU", "NT", "CR", "DD", "LC")
#' statustrans <- lets.transf(status, old, new, NUMERIC=FALSE)
#'
#' }
#'
#' @export
lets.transf <- function (x, y, z, NUMERIC=TRUE)
{
if(is.factor(x)){
x <- as.numeric(levels(x))[x]
}
if(is.factor(y)){
y <- as.numeric(levels(y))[y]
}
for(i in 1:length(y)){
x[x == y[i]] <- z[i]
}
if(NUMERIC==TRUE){
x <- as.numeric(x)
}
return(x)
}
| /R/lets_transf.R | no_license | SaraVarela/letsR-1 | R | false | false | 1,085 | r | #' Transform values of a vector
#'
#' @author Bruno Vilela
#'
#' @description Transform each element of a vector.
#'
#' @usage lets.transf(x, y, z, NUMERIC=TRUE)
#'
#' @param x A vector to be transformed.
#' @param y levels to be transformed.
#' @param z The value to be atributed to each level (same order as y).
#' @param NUMERIC logical, if \code{TRUE} z will be considered numbers.
#'
#' @return Return a vector with changed values.
#'
#' @examples \dontrun{
#' status <- sample(c("EN","VU", "NT", "CR", "DD", "LC"), 30, replace=TRUE)
#' TE <- "Threatened"
#' NT <- "Non-Threatened"
#' new <- c(TE, TE, NT, TE, "Data Deficient", NT)
#' old <- c("EN","VU", "NT", "CR", "DD", "LC")
#' statustrans <- lets.transf(status, old, new, NUMERIC=FALSE)
#'
#' }
#'
#' @export
lets.transf <- function (x, y, z, NUMERIC=TRUE)
{
if(is.factor(x)){
x <- as.numeric(levels(x))[x]
}
if(is.factor(y)){
y <- as.numeric(levels(y))[y]
}
for(i in 1:length(y)){
x[x == y[i]] <- z[i]
}
if(NUMERIC==TRUE){
x <- as.numeric(x)
}
return(x)
}
|
library(tidyverse)
library(Lahman)
data("Batting")
Batting %>%
group_by(playerID) %>%
summarise(HR = sum(HR)) %>%
arrange(-HR) %>%
head(10)
| /quiz.R | no_license | chanyoung2da/data_analysis | R | false | false | 155 | r | library(tidyverse)
library(Lahman)
data("Batting")
Batting %>%
group_by(playerID) %>%
summarise(HR = sum(HR)) %>%
arrange(-HR) %>%
head(10)
|
summary.meta <- function(object,
comb.fixed=object$comb.fixed,
comb.random=object$comb.random,
prediction=object$prediction,
backtransf=object$backtransf,
bylab=object$bylab,
print.byvar=object$print.byvar,
bystud=FALSE,
print.CMH=object$print.CMH,
warn=object$warn,
...){
##
##
## (1) Check for meta object and upgrade older meta objects
##
##
chkclass(object, "meta")
##
if (inherits(object, "metacum")){
warning("Summary method not defined for objects of class \"metacum\".")
return(object)
}
##
if (inherits(object, "metainf")){
warning("Summary method not defined for objects of class \"metainf\".")
return(object)
}
##
if (length(warn)==0)
warn <- .settings$warn
object <- updateversion(object)
##
##
## (2) Check other arguments
##
##
chklogical(comb.fixed)
chklogical(comb.random)
chklogical(prediction)
chklogical(backtransf)
if (!is.null(print.byvar))
chklogical(print.byvar)
chklogical(bystud)
if (!is.null(print.CMH))
chklogical(print.CMH)
chklogical(warn)
##
cl <- class(object)[1]
addargs <- names(list(...))
##
fun <- "summary.meta"
##
warnarg("byvar", addargs, fun, cl)
warnarg("level", addargs, fun, cl)
warnarg("level.comb", addargs, fun, cl)
warnarg("level.predict", addargs, fun, cl)
##
##
## (3) Results for individual studies
##
##
ci.study <- list(TE=object$TE,
seTE=object$seTE,
lower=object$lower,
upper=object$upper,
z=object$zval,
p=object$pval,
level=object$level,
df=NA)
##
if (inherits(object, "metaprop")){
ci.study$event <- object$event
ci.study$n <- object$n
}
##
##
## (4) Results for meta-analysis
##
##
ci.f <- list(TE=object$TE.fixed,
seTE=object$seTE.fixed,
lower=object$lower.fixed,
upper=object$upper.fixed,
z=object$zval.fixed,
p=object$pval.fixed,
level=object$level.comb)
if (inherits(object, "metaprop"))
ci.f$harmonic.mean <- mean(1/object$n)
##
ci.r <- list(TE=object$TE.random,
seTE=object$seTE.random,
lower=object$lower.random,
upper=object$upper.random,
z=object$zval.random,
p=object$pval.random,
level=object$level.comb,
df=if (!is.null(object$df.hakn)) object$df.hakn else NA)
if (inherits(object, "metaprop"))
ci.r$harmonic.mean <- mean(1/object$n)
##
ci.H <- list(TE=object$H, lower=object$lower.H, upper=object$upper.H)
##
ci.I2 <- list(TE=object$I2, lower=object$lower.I2, upper=object$upper.I2)
##
ci.p <- list(TE=NA,
seTE=object$seTE.predict,
lower=object$lower.predict,
upper=object$upper.predict,
z=NA,
p=NA,
level=object$level.predict,
df=object$k-2)
##
ci.lab <- paste(round(100*object$level.comb, 1), "%-CI", sep="")
##
##
## (5) Generate R object
##
##
res <- list(study=ci.study,
fixed=ci.f, random=ci.r,
predict=ci.p,
k=object$k, Q=object$Q, df.Q=object$df.Q,
tau=object$tau, H=ci.H, I2=ci.I2,
tau.preset=object$tau.preset,
k.all=length(object$TE),
Q.CMH=object$Q.CMH,
sm=object$sm, method=object$method,
call=match.call(),
ci.lab=ci.lab,
comb.fixed=comb.fixed,
comb.random=comb.random,
prediction=prediction)
##
res$se.tau2 <- object$se.tau2
res$hakn <- object$hakn
res$df.hakn <- object$df.hakn
res$method.tau <- object$method.tau
res$TE.tau <- object$TE.tau
res$C <- object$C
##
## Add results from subgroup analysis
##
if (length(object$byvar)>0){
##
ci.fixed.w <- list(TE=object$TE.fixed.w,
seTE=object$seTE.fixed.w,
lower=object$lower.fixed.w,
upper=object$upper.fixed.w,
z=object$zval.fixed.w,
p=object$pval.fixed.w,
level=object$level.comb,
harmonic.mean=object$n.harmonic.mean.w)
##
ci.random.w <- list(TE=object$TE.random.w,
seTE=object$seTE.random.w,
lower=object$lower.random.w,
upper=object$upper.random.w,
z=object$zval.random.w,
p=object$pval.random.w,
level=object$level.comb,
df=object$df.hakn.w,
harmonic.mean=object$n.harmonic.mean.w)
##
ci.H <- list(TE=object$H.w, lower=object$lower.H.w, upper=object$upper.H.w)
ci.I2 <- list(TE=object$I2.w, lower=object$lower.I2.w, upper=object$upper.I2.w)
##
res$within.fixed <- ci.fixed.w
res$within.random <- ci.random.w
res$k.w <- object$k.w
res$Q.w <- object$Q.w
res$Q.w.fixed <- object$Q.w.fixed
res$Q.w.random <- object$Q.w.random
res$df.Q.w <- object$df.Q.w
res$Q.b.fixed <- object$Q.b.fixed
res$Q.b.random <- object$Q.b.random
res$df.Q.b <- object$df.Q.b
res$tau.w <- object$tau.w
res$C.w <- object$C.w
res$H.w <- ci.H
res$I2.w <- ci.I2
res$bylab <- object$bylab
res$tau.common <- object$tau.common
res$bylevs <- object$bylevs
res$within <- "Returned list 'within' replaced by lists 'within.fixed' and 'within.random'."
}
##
class(res) <- "summary.meta"
##
if (inherits(object, "metabin")){
res$sparse <- object$sparse
res$incr <- object$incr
res$allincr <- object$allincr
res$addincr <- object$addincr
res$MH.exact <- object$MH.exact
##
class(res) <- c(class(res), "metabin")
}
##
if (inherits(object, "metacont")){
res$pooledvar <- object$pooledvar
res$method.smd <- object$method.smd
res$sd.glass <- object$sd.glass
res$exact.smd <- object$exact.smd
##
class(res) <- c(class(res), "metacont")
}
##
if (inherits(object, "metacor")){
res$cor <- object$cor
res$n <- object$n
##
class(res) <- c(class(res), "metacor")
}
##
if (inherits(object, "metainc")){
class(res) <- c(class(res), "metainc")
res$sparse <- object$sparse
res$incr <- object$incr
res$allincr <- object$allincr
res$addincr <- object$addincr
}
##
if (inherits(object, "metaprop")){
res$event <- object$event
res$n <- object$n
res$sparse <- object$sparse
res$incr <- object$incr
res$allincr <- object$allincr
res$addincr <- object$addincr
res$method.ci <- object$method.ci
##
class(res) <- c(class(res), "metaprop")
}
##
if (inherits(object, "trimfill")){
res$object <- object
res$k0 <- object$k0
##
class(res) <- c(class(res), "trimfill")
}
##
res$complab <- object$complab
res$outclab <- object$outclab
res$title <- object$title
##
res$print.byvar <- print.byvar
res$print.CMH <- print.CMH
##
res$data <- object$data
res$subset <- object$subset
##
res$backtransf <- backtransf
##
res$version <- packageDescription("meta")$Version
res
}
| /meta/R/summary.meta.R | no_license | ingted/R-Examples | R | false | false | 7,767 | r | summary.meta <- function(object,
comb.fixed=object$comb.fixed,
comb.random=object$comb.random,
prediction=object$prediction,
backtransf=object$backtransf,
bylab=object$bylab,
print.byvar=object$print.byvar,
bystud=FALSE,
print.CMH=object$print.CMH,
warn=object$warn,
...){
##
##
## (1) Check for meta object and upgrade older meta objects
##
##
chkclass(object, "meta")
##
if (inherits(object, "metacum")){
warning("Summary method not defined for objects of class \"metacum\".")
return(object)
}
##
if (inherits(object, "metainf")){
warning("Summary method not defined for objects of class \"metainf\".")
return(object)
}
##
if (length(warn)==0)
warn <- .settings$warn
object <- updateversion(object)
##
##
## (2) Check other arguments
##
##
chklogical(comb.fixed)
chklogical(comb.random)
chklogical(prediction)
chklogical(backtransf)
if (!is.null(print.byvar))
chklogical(print.byvar)
chklogical(bystud)
if (!is.null(print.CMH))
chklogical(print.CMH)
chklogical(warn)
##
cl <- class(object)[1]
addargs <- names(list(...))
##
fun <- "summary.meta"
##
warnarg("byvar", addargs, fun, cl)
warnarg("level", addargs, fun, cl)
warnarg("level.comb", addargs, fun, cl)
warnarg("level.predict", addargs, fun, cl)
##
##
## (3) Results for individual studies
##
##
ci.study <- list(TE=object$TE,
seTE=object$seTE,
lower=object$lower,
upper=object$upper,
z=object$zval,
p=object$pval,
level=object$level,
df=NA)
##
if (inherits(object, "metaprop")){
ci.study$event <- object$event
ci.study$n <- object$n
}
##
##
## (4) Results for meta-analysis
##
##
ci.f <- list(TE=object$TE.fixed,
seTE=object$seTE.fixed,
lower=object$lower.fixed,
upper=object$upper.fixed,
z=object$zval.fixed,
p=object$pval.fixed,
level=object$level.comb)
if (inherits(object, "metaprop"))
ci.f$harmonic.mean <- mean(1/object$n)
##
ci.r <- list(TE=object$TE.random,
seTE=object$seTE.random,
lower=object$lower.random,
upper=object$upper.random,
z=object$zval.random,
p=object$pval.random,
level=object$level.comb,
df=if (!is.null(object$df.hakn)) object$df.hakn else NA)
if (inherits(object, "metaprop"))
ci.r$harmonic.mean <- mean(1/object$n)
##
ci.H <- list(TE=object$H, lower=object$lower.H, upper=object$upper.H)
##
ci.I2 <- list(TE=object$I2, lower=object$lower.I2, upper=object$upper.I2)
##
ci.p <- list(TE=NA,
seTE=object$seTE.predict,
lower=object$lower.predict,
upper=object$upper.predict,
z=NA,
p=NA,
level=object$level.predict,
df=object$k-2)
##
ci.lab <- paste(round(100*object$level.comb, 1), "%-CI", sep="")
##
##
## (5) Generate R object
##
##
res <- list(study=ci.study,
fixed=ci.f, random=ci.r,
predict=ci.p,
k=object$k, Q=object$Q, df.Q=object$df.Q,
tau=object$tau, H=ci.H, I2=ci.I2,
tau.preset=object$tau.preset,
k.all=length(object$TE),
Q.CMH=object$Q.CMH,
sm=object$sm, method=object$method,
call=match.call(),
ci.lab=ci.lab,
comb.fixed=comb.fixed,
comb.random=comb.random,
prediction=prediction)
##
res$se.tau2 <- object$se.tau2
res$hakn <- object$hakn
res$df.hakn <- object$df.hakn
res$method.tau <- object$method.tau
res$TE.tau <- object$TE.tau
res$C <- object$C
##
## Add results from subgroup analysis
##
if (length(object$byvar)>0){
##
ci.fixed.w <- list(TE=object$TE.fixed.w,
seTE=object$seTE.fixed.w,
lower=object$lower.fixed.w,
upper=object$upper.fixed.w,
z=object$zval.fixed.w,
p=object$pval.fixed.w,
level=object$level.comb,
harmonic.mean=object$n.harmonic.mean.w)
##
ci.random.w <- list(TE=object$TE.random.w,
seTE=object$seTE.random.w,
lower=object$lower.random.w,
upper=object$upper.random.w,
z=object$zval.random.w,
p=object$pval.random.w,
level=object$level.comb,
df=object$df.hakn.w,
harmonic.mean=object$n.harmonic.mean.w)
##
ci.H <- list(TE=object$H.w, lower=object$lower.H.w, upper=object$upper.H.w)
ci.I2 <- list(TE=object$I2.w, lower=object$lower.I2.w, upper=object$upper.I2.w)
##
res$within.fixed <- ci.fixed.w
res$within.random <- ci.random.w
res$k.w <- object$k.w
res$Q.w <- object$Q.w
res$Q.w.fixed <- object$Q.w.fixed
res$Q.w.random <- object$Q.w.random
res$df.Q.w <- object$df.Q.w
res$Q.b.fixed <- object$Q.b.fixed
res$Q.b.random <- object$Q.b.random
res$df.Q.b <- object$df.Q.b
res$tau.w <- object$tau.w
res$C.w <- object$C.w
res$H.w <- ci.H
res$I2.w <- ci.I2
res$bylab <- object$bylab
res$tau.common <- object$tau.common
res$bylevs <- object$bylevs
res$within <- "Returned list 'within' replaced by lists 'within.fixed' and 'within.random'."
}
##
class(res) <- "summary.meta"
##
if (inherits(object, "metabin")){
res$sparse <- object$sparse
res$incr <- object$incr
res$allincr <- object$allincr
res$addincr <- object$addincr
res$MH.exact <- object$MH.exact
##
class(res) <- c(class(res), "metabin")
}
##
if (inherits(object, "metacont")){
res$pooledvar <- object$pooledvar
res$method.smd <- object$method.smd
res$sd.glass <- object$sd.glass
res$exact.smd <- object$exact.smd
##
class(res) <- c(class(res), "metacont")
}
##
if (inherits(object, "metacor")){
res$cor <- object$cor
res$n <- object$n
##
class(res) <- c(class(res), "metacor")
}
##
if (inherits(object, "metainc")){
class(res) <- c(class(res), "metainc")
res$sparse <- object$sparse
res$incr <- object$incr
res$allincr <- object$allincr
res$addincr <- object$addincr
}
##
if (inherits(object, "metaprop")){
res$event <- object$event
res$n <- object$n
res$sparse <- object$sparse
res$incr <- object$incr
res$allincr <- object$allincr
res$addincr <- object$addincr
res$method.ci <- object$method.ci
##
class(res) <- c(class(res), "metaprop")
}
##
if (inherits(object, "trimfill")){
res$object <- object
res$k0 <- object$k0
##
class(res) <- c(class(res), "trimfill")
}
##
res$complab <- object$complab
res$outclab <- object$outclab
res$title <- object$title
##
res$print.byvar <- print.byvar
res$print.CMH <- print.CMH
##
res$data <- object$data
res$subset <- object$subset
##
res$backtransf <- backtransf
##
res$version <- packageDescription("meta")$Version
res
}
|
library(parallel)
library(XML)
#' @export
VPC_XAXIS_T=1
#' @export
VPC_XAXIS_TAD=2
#' @export
VPC_XAXIS_PRED=3
#' @export
VPC_XAXIS_OTHER=4
#' @export
XAxisNames=c("t","TAD","PRED","Other")
#' @export
VPC_BIN_NONE=1
#' @export
VPC_BIN_KMEANS=2
#' @export
VPC_BIN_EXP_CENTERS=3
#' @export
VPC_BIN_EXP_BOUNDARIES=4
#' @export
VPC_PRED_NONE=1
#' @export
VPC_PRED_PROPOTIONAL=2
#' @export
VPC_PRED_ADDITIVE=3
#' @export
VPC_OBSERVE_T=1
#' @export
VPC_MULTI_T=1
#' @export
VPC_LL_T=1
#' @export
VPC_COUNT_T=1
#' @export
VPC_ORDINAL_T=1
#' @export
VPC_EVENT_T=1
#' @export
ObserveTypeNames=c("observe","multi","LL","count","ordinal","event")
#'
#' NlmeSimTableDef : Parameters for VPC/Simulation runs
#'
#' @param name Name of the generated simulation file
#' @param timesList List of time values
#' @param variablesList List of variables
#' @param timeAfterDose Time after dose flag
#'
#' @export NlmeSimTableDef
#'
NlmeSimTableDef = setClass("NlmeSimTableDef",representation(
name="character",
timesList="character",
variablesList="character",
timeAfterDose="logical"))
setMethod("initialize","NlmeSimTableDef",
function(.Object,
name="",
timesList="",
variablesList="",
timeAfterDose=FALSE,...){
.Object@name=name
.Object@timesList=timesList
.Object@variablesList=variablesList
.Object@timeAfterDose=timeAfterDose
.Object
})
assign("NlmeSimTableDef",NlmeSimTableDef,env=.GlobalEnv)
#'
#' NlmeObservationVar : Describes an observation(observe,multi,...)
#'
#'
#' @param name of observation variable
#' @param type of observation
#' @param xaxis One of:VPC_XAXIS_T,VPC_XAXIS_TAD,VPC_XAXIS_PRED,VPC_XAXIS_OTHER
#' @param binningMethod VPC_BIN_NONE,VPC_BIN_KMEANS,VPC_BIN_EXP_CENTERS,VPC_BIN_EXP_BOUNDARIES
#' @param binningOption comma separated list to specify centers or boundary values
#' @param quantilesValues comma separated list
#' @param quantilesSecondaryValues comma separated list
#'
#' @export NlmeObservationVar
#'
#' @examples
#'
#' var = NlmeObservationVar(
#' name="Cobs",
#' type=VPC_OBSERVE_T,
#' xaxis=VPC_XAXIS_TAD,
#' binningMethod=VPC_BIN_NONE,
#' quantilesValues ="5,50,95")
#'
NlmeObservationVar = setClass("NlmeObservationVar",representation(
name="character",
type="numeric",
xaxis="numeric",
xaxisLabel="character",
binningMethod="numeric",
binningOption="character",
timeToEvent="character",
quantilesValues="character",
isBql="logical",
quantilesSecondaryValues="character"))
setMethod("initialize","NlmeObservationVar",
function(.Object,
name="",
type=VPC_OBSERVE_T,
xaxis=VPC_XAXIS_T,
xaxisLabel="",
binningMethod=VPC_BIN_NONE,
binningOption="",
timeToEvent="",
quantilesValues="5,50,95",
isBql=FALSE,
quantilesSecondaryValues=""){
.Object@name=name
.Object@type=type
.Object@xaxis=xaxis
.Object@xaxisLabel=xaxisLabel
.Object@binningMethod=binningMethod
.Object@binningOption=binningOption
.Object@timeToEvent=timeToEvent
.Object@quantilesValues=quantilesValues
.Object@isBql=isBql
.Object@quantilesSecondaryValues=quantilesSecondaryValues
.Object
})
#'
#' @export
#'
GetObservationVariables <-function(dataset=NULL, modelLines=c())
{
obsVars=c()
if ( length(modelLines) == 0 )
lines = DatasetGetObserveParams(dataset)
else
lines = modelLines
for ( l in unlist(lines) ) {
# l=gsub("\t","",l)
type=which(sapply(ObserveTypeNames, grepl, l))[[1]]
isBql= length(grep("bql",l)) != 0
name=unlist(strsplit(l,split="[(,=,,]"))[2]
obsVar=NlmeObservationVar(name=name,type=type,isBql=isBql)
obsVars=c(obsVars,obsVar)
}
return(obsVars)
}
assign("GetObservationVariables",GetObservationVariables,env=.GlobalEnv)
setGeneric(name="observationParameters",
def=function(.Object)
{
standardGeneric("observationParameters")
})
#'
#' @export observationParameters
#'
setMethod(f="observationParameters",
signature="NlmeObservationVar",
definition=function(.Object){
print(.Object)
})
assign("observationParameters",observationParameters,env=.GlobalEnv)
#'
#' @export
#'
setGeneric(name="observationParameters<-",
def=function(.Object,value)
{
standardGeneric("observationParameters<-")
})
#'
#' @export observationParameters<-
#'
setMethod(f="observationParameters<-",
signature="NlmeObservationVar",
definition=function(.Object,value){
if( ! is.na(value["name"]) )
.Object@age = value["name"]
if( ! is.na(value["xaxis"]) )
.Object@xaxis = as.integer(value["xaxis"])
if( ! is.na(value["xaxisLabel"]) )
.Object@xaxisLabel = value["xaxisLabel"]
if( ! is.na(value["binningMethod"]) )
.Object@binningMethod = as.integer(value["binningMethod"])
if( ! is.na(value["binningOption"]) )
.Object@binningOption = value["binningOption"]
if( ! is.na(value["timeToEvent"]) )
.Object@timeToEvent = value["timeToEvent"]
if( ! is.na(value["quantilesValues"]) )
.Object@quantilesValues = value["quantilesValues"]
if( ! is.na(value["quantilesSecondaryValues"]) )
.Object@quantilesSecondaryValues =value["quantilesSecondaryValues"]
if( ! is.na(value["isBql"]) )
.Object@isBql = as.logical(value["isBql"])
return(.Object)
})
#'
#' NlmeVpcParams : Parameters for VPC runs
#'
#' @param numReplicates Number of replicates to simulate
#' @param seed Random number generator seed
#' @param predCorrection One of VPC_PRED_NONE,VPC_PRED_PROPOTIONAL,VPC_PRED_ADDITIVE
#' @param predVarCorr flag to use Prediction Variance Correction
#' @param stratifyColumns List of covariates for Stratified PC
#' @param observactionVars (NlmeObservationVar)
#' @param simulationTables Optional list of simulatio tables (NlmeSimTableDef)
#'
#' @export NlmeVpcParams
#'
#' @examples
#'
#' observe1 = NlmeObservationVar(name="Cobs",
#' type=VPC_OBSERVE_T,
#' xaxis=VPC_XAXIS_TAD,
#' binningMethod=VPC_BIN_NONE,
#' quantilesValues ="5,50,95")
#'
#' observe2 = NlmeObservationVar(name="Iobs",
#' type=VPC_MULTI_T,
#' xaxis=VPC_XAXIS_PRED,
#' quantilesValues ="5,50,95")
#'
#' observe3 = NlmeObservationVar(name="Eobs",
#' type=VPC_LL_T,
#' timeToEvent="seq(1,10)"
#' quantilesValues ="5,50,95")
#'
#' table1=NlmeSimTableDef(name="simulate.csv",
#' timesList="0,2,4,12,24",
#' variablesList="V,Cl",
#' timeAfterDose=TRUE)
#'
#' vpc = NlmeVpcParams(numReplicates=10,
#' seed=29423,
#' predCorrection=VPC_PRED_PROPOTIONAL,
#' predVarCorr=TRUE,
#' stratifyColumns="sex,race,dosing",
#' observationVars=c(observe1,observe2,observe3),
#' simulationTables=c(table1))
#'
NlmeVpcParams = setClass("NlmeVpcParams",representation(
numReplicates="numeric",
seed="numeric",
predCorrection="numeric",
predVarCorr="logical",
stratifyColumns="character",
observationVars="list",
simulationTables="list"))
assign("NlmeVpcParams",NlmeVpcParams,env=.GlobalEnv)
setMethod("initialize","NlmeVpcParams",
function(.Object,
numReplicates=2,
seed=1234,
predCorrection=VPC_PRED_NONE,
predVarCorr=FALSE,
stratifyColumns="",
# observationVars=c(NlmeObservationVar()),
observationVars=list(),
simulationTables=list()){
.Object@numReplicates=numReplicates
.Object@seed=seed
.Object@predCorrection=predCorrection
.Object@predVarCorr=predVarCorr
.Object@stratifyColumns=stratifyColumns
.Object@observationVars=observationVars
.Object@simulationTables=simulationTables
.Object
})
#'
#' NlmeSimulationParams : Parameters for simulation runs
#'
#' @param numReplicates Number of replicates to simulate
#' @param seed Random number generator seed
#' @param simulationTables (NlmeSimTableDef)
#' @param isPopulation Simulating a population model(default=TRUE). The rest of arguments applies to individual models only
#' @param numPoints Number of points in simulation
#' @param maxXRange Max value of independent variable
#' @param yVariables comma separated list of Y variables
#' @param simAtObs Simulate values at observed values of ivar
#'
#' @export NlmeSimulationParams
#'
#' @examples
#'
#' table1=NlmeSimTableDef(name="simulate.csv",timesList="0,2,4,12,24",
#' variablesList="V,Cl",
#' timeAfterDose=TRUE)
#'
#' simParam = NlmeSimulationParams(numReplicates=10,
#' seed=29423,
#' simulationTables = c(table1))
#'
#' simParam = NlmeSimulationParams(isPopulation=FALSE,
#' numPoints=100,
#' maxXRange=50,
#' yVariables="C,A1",
#' simulationTables = c(table1))
#'
NlmeSimulationParams = setClass("NlmeSimulationParams",representation(
numReplicates="numeric",
seed="numeric",
isPopulation="logical",
numPoints="numeric",
maxXRange="numeric",
yVariables="character",
simAtObs="logical",
simulationTables="list"))
assign("NlmeSimulationParams",NlmeSimulationParams,env=.GlobalEnv)
setMethod("initialize","NlmeSimulationParams",
function(.Object,
numReplicates=2,
seed=1234,
isPopulation=TRUE,
numPoints=100,
maxXRange=50,
yVariables="",
simAtObs=FALSE,
simulationTables=c(NlmeSimTableDef())){
.Object@numReplicates=numReplicates
.Object@seed=seed
.Object@isPopulation=isPopulation
.Object@numPoints=numPoints
.Object@maxXRange=maxXRange
.Object@yVariables=yVariables
.Object@simAtObs=simAtObs
.Object@simulationTables=simulationTables
.Object
})
#'
#' RunVpcSimulation() : Method to execute an NLME VPC simulation
#'
#' @param hostPlatform How to execute the run(NlmeParallelHost)
#' @param dataset Dataset and model information(NlmeDataset)
#' @param params Engine parameters(NlmeEngineExtraParams)
#' @param vpcParams VPC parameters(NlmeVpcParams)
#' @param runInBackground TRUE will run in background and return prompt(Bool)
#' @param workingDir where to run the job
#'
#' @export RunVpcSimulation
#'
#' @examples
#'
#' dataset = NlmeDataset()
#'
#' vpcParams = NlmeVpcParams()
#'
#' param = NlmeEngineExtraParams(PARAMS_METHOD=METHOD_FOCE_LB,
#' PARAMS_NUM_ITERATIONS=1000)
#'
#' job = RunVpcSimulation(defaultHost,dataset,params,vpcParams,simParams)
#'
RunVpcSimulation <-function(
hostPlatform,
dataset,
params,
vpcParams=NULL,
simParams=NULL,
runInBackground=TRUE,
workingDir = NULL)
{
workFlow="WorkFlow"
cleanupFromPreviousRun()
if ( attr(hostPlatform,"hostType")== "Windows" )
runInBackground=FALSE
if ( is.null(workingDir ) )
cwd = getwd()
else
cwd = workingDir
argsFile=GenerateControlfile(dataset, params,workFlow,vpcOption=vpcParams,
simOption=simParams,workingDir=cwd)
argsList=list()
argsList=c(argsList,"GENERIC")
argsList=c(argsList,attr(attr(hostPlatform,"parallelMethod"),"method"))
argsList=c(argsList,attr(hostPlatform,"installationDirectory"))
argsList=c(argsList,attr(hostPlatform,"sharedDirectory"))
argsList=c(argsList,cwd)
argsList=c(argsList,argsFile)
argsList=c(argsList,attr(hostPlatform,"numCores"))
argsList=c(argsList,workFlow)
if ( is.null(vpcParams))
jobName="Simulation"
else
jobName="VPC"
job=SimpleNlmeJob(jobType=jobName,
localDir=cwd,
remoteDir=cwd,
host=hostPlatform,
argsList=argsList,
argsFile=argsFile,
workflow=workFlow,
runInBackground=runInBackground)
status=executeJob(job)
return(job)
}
#'
#' simmodel
#'
#' Method to execute an NLME simulation
#'
#' @param hostPlatform How to execute the run(NlmeParallelHost)
#' @param simParams Simulation parameters(NlmeSimulationParam)
#' @param model optional PK/PD model
#' @param runInBackground TRUE will run in background and return prompt(Bool)
#'
#' @export
#'
#' @examples
#'
#'
#' SimTableObs = NlmeSimTableDef(name = "SimTableObs.csv",
#' timesList = "0,1,2,4,4.9,55.1,56,57,59,60",
#' variablesList = "C, CObs",
#' timeAfterDose = FALSE)
#'
#' simParams = NlmeSimulationParams(numReplicates = 50,
#' seed = 3527,
#' simulationTables = c(SimTableObs))
#'
#' job = simmodel(defaultHost,simParams,model)
#'
simmodel <-function( hostPlatform,
simParams= NULL,
model = NULL,
runInBackground=TRUE)
{
params = NlmeEngineExtraParams(PARAMS_METHOD=METHOD_NAIVE_POOLED,
PARAMS_NUM_ITERATIONS=0)
if ( ! is.null(model) ) {
writeDefaultFiles(model=model,dataset=model@dataset,simParams=simParams)
simParams@isPopulation = model@isPopulation
workingDir = model@modelInfo@workingDir
}
else
workingDir = getwd()
return(RunVpcSimulation(hostPlatform=hostPlatform,params=params,dataset=model@dataset,simParams=simParams,runInBackground= runInBackground,workingDir=workingDir))
}
#'
#' vpcmodel
#'
#' Method to execute an NLME visual predictive check
#'
#' @param hostPlatform How to execute the run(NlmeParallelHost)
#' @param vpcParams VPC parameters(NlmeVpcParam)
#' @param model PK/PD model
#' @param runInBackground TRUE will run in background and return prompt(Bool)
#'
#' @export
#'
#' @examples
#'
#' obsVars = GetObservationVariables(model@dataset)
#'
#' observationParameters(obsVars[[1]])=c(xaxis=VPC_XAXIS_T,
#' binningMethod=VPC_BIN_NONE,
#' quantilesValues ="5,50,95")
#'
#' vpcParams = NlmeVpcParams(numReplicates=2,
#' seed=1234,
#' observationVars=obsVars)
#'
#'
#' job = vpcmodel(defaultHost,vpcParams,model)
#'
vpcmodel <-function( hostPlatform,
vpcParams = NULL ,
model ,
runInBackground=TRUE)
{
params = NlmeEngineExtraParams(PARAMS_METHOD=METHOD_NAIVE_POOLED,
PARAMS_NUM_ITERATIONS=0)
if ( ! is.null(model) ) {
writeDefaultFiles(model=model,dataset=model@dataset,simParams=vpcParams)
workingDir = model@modelInfo@workingDir
}
else
workingDir = getwd()
return(RunVpcSimulation(hostPlatform=hostPlatform,params=params,dataset=model@dataset,vpcParams=vpcParams,runInBackground= runInBackground,workingDir=workingDir))
}
| /Certara.NLME8/R/vpc.r | no_license | phxnlmedev/rpackages | R | false | false | 17,088 | r |
library(parallel)
library(XML)
#' @export
VPC_XAXIS_T=1
#' @export
VPC_XAXIS_TAD=2
#' @export
VPC_XAXIS_PRED=3
#' @export
VPC_XAXIS_OTHER=4
#' @export
XAxisNames=c("t","TAD","PRED","Other")
#' @export
VPC_BIN_NONE=1
#' @export
VPC_BIN_KMEANS=2
#' @export
VPC_BIN_EXP_CENTERS=3
#' @export
VPC_BIN_EXP_BOUNDARIES=4
#' @export
VPC_PRED_NONE=1
#' @export
VPC_PRED_PROPOTIONAL=2
#' @export
VPC_PRED_ADDITIVE=3
#' @export
VPC_OBSERVE_T=1
#' @export
VPC_MULTI_T=1
#' @export
VPC_LL_T=1
#' @export
VPC_COUNT_T=1
#' @export
VPC_ORDINAL_T=1
#' @export
VPC_EVENT_T=1
#' @export
ObserveTypeNames=c("observe","multi","LL","count","ordinal","event")
#'
#' NlmeSimTableDef : Parameters for VPC/Simulation runs
#'
#' @param name Name of the generated simulation file
#' @param timesList List of time values
#' @param variablesList List of variables
#' @param timeAfterDose Time after dose flag
#'
#' @export NlmeSimTableDef
#'
NlmeSimTableDef = setClass("NlmeSimTableDef",representation(
name="character",
timesList="character",
variablesList="character",
timeAfterDose="logical"))
setMethod("initialize","NlmeSimTableDef",
function(.Object,
name="",
timesList="",
variablesList="",
timeAfterDose=FALSE,...){
.Object@name=name
.Object@timesList=timesList
.Object@variablesList=variablesList
.Object@timeAfterDose=timeAfterDose
.Object
})
assign("NlmeSimTableDef",NlmeSimTableDef,env=.GlobalEnv)
#'
#' NlmeObservationVar : Describes an observation(observe,multi,...)
#'
#'
#' @param name of observation variable
#' @param type of observation
#' @param xaxis One of:VPC_XAXIS_T,VPC_XAXIS_TAD,VPC_XAXIS_PRED,VPC_XAXIS_OTHER
#' @param binningMethod VPC_BIN_NONE,VPC_BIN_KMEANS,VPC_BIN_EXP_CENTERS,VPC_BIN_EXP_BOUNDARIES
#' @param binningOption comma separated list to specify centers or boundary values
#' @param quantilesValues comma separated list
#' @param quantilesSecondaryValues comma separated list
#'
#' @export NlmeObservationVar
#'
#' @examples
#'
#' var = NlmeObservationVar(
#' name="Cobs",
#' type=VPC_OBSERVE_T,
#' xaxis=VPC_XAXIS_TAD,
#' binningMethod=VPC_BIN_NONE,
#' quantilesValues ="5,50,95")
#'
NlmeObservationVar = setClass("NlmeObservationVar",representation(
name="character",
type="numeric",
xaxis="numeric",
xaxisLabel="character",
binningMethod="numeric",
binningOption="character",
timeToEvent="character",
quantilesValues="character",
isBql="logical",
quantilesSecondaryValues="character"))
setMethod("initialize","NlmeObservationVar",
function(.Object,
name="",
type=VPC_OBSERVE_T,
xaxis=VPC_XAXIS_T,
xaxisLabel="",
binningMethod=VPC_BIN_NONE,
binningOption="",
timeToEvent="",
quantilesValues="5,50,95",
isBql=FALSE,
quantilesSecondaryValues=""){
.Object@name=name
.Object@type=type
.Object@xaxis=xaxis
.Object@xaxisLabel=xaxisLabel
.Object@binningMethod=binningMethod
.Object@binningOption=binningOption
.Object@timeToEvent=timeToEvent
.Object@quantilesValues=quantilesValues
.Object@isBql=isBql
.Object@quantilesSecondaryValues=quantilesSecondaryValues
.Object
})
#'
#' @export
#'
GetObservationVariables <-function(dataset=NULL, modelLines=c())
{
obsVars=c()
if ( length(modelLines) == 0 )
lines = DatasetGetObserveParams(dataset)
else
lines = modelLines
for ( l in unlist(lines) ) {
# l=gsub("\t","",l)
type=which(sapply(ObserveTypeNames, grepl, l))[[1]]
isBql= length(grep("bql",l)) != 0
name=unlist(strsplit(l,split="[(,=,,]"))[2]
obsVar=NlmeObservationVar(name=name,type=type,isBql=isBql)
obsVars=c(obsVars,obsVar)
}
return(obsVars)
}
assign("GetObservationVariables",GetObservationVariables,env=.GlobalEnv)
setGeneric(name="observationParameters",
def=function(.Object)
{
standardGeneric("observationParameters")
})
#'
#' @export observationParameters
#'
setMethod(f="observationParameters",
signature="NlmeObservationVar",
definition=function(.Object){
print(.Object)
})
assign("observationParameters",observationParameters,env=.GlobalEnv)
#'
#' @export
#'
setGeneric(name="observationParameters<-",
def=function(.Object,value)
{
standardGeneric("observationParameters<-")
})
#'
#' @export observationParameters<-
#'
setMethod(f="observationParameters<-",
signature="NlmeObservationVar",
definition=function(.Object,value){
if( ! is.na(value["name"]) )
.Object@age = value["name"]
if( ! is.na(value["xaxis"]) )
.Object@xaxis = as.integer(value["xaxis"])
if( ! is.na(value["xaxisLabel"]) )
.Object@xaxisLabel = value["xaxisLabel"]
if( ! is.na(value["binningMethod"]) )
.Object@binningMethod = as.integer(value["binningMethod"])
if( ! is.na(value["binningOption"]) )
.Object@binningOption = value["binningOption"]
if( ! is.na(value["timeToEvent"]) )
.Object@timeToEvent = value["timeToEvent"]
if( ! is.na(value["quantilesValues"]) )
.Object@quantilesValues = value["quantilesValues"]
if( ! is.na(value["quantilesSecondaryValues"]) )
.Object@quantilesSecondaryValues =value["quantilesSecondaryValues"]
if( ! is.na(value["isBql"]) )
.Object@isBql = as.logical(value["isBql"])
return(.Object)
})
#'
#' NlmeVpcParams : Parameters for VPC runs
#'
#' @param numReplicates Number of replicates to simulate
#' @param seed Random number generator seed
#' @param predCorrection One of VPC_PRED_NONE,VPC_PRED_PROPOTIONAL,VPC_PRED_ADDITIVE
#' @param predVarCorr flag to use Prediction Variance Correction
#' @param stratifyColumns List of covariates for Stratified PC
#' @param observactionVars (NlmeObservationVar)
#' @param simulationTables Optional list of simulatio tables (NlmeSimTableDef)
#'
#' @export NlmeVpcParams
#'
#' @examples
#'
#' observe1 = NlmeObservationVar(name="Cobs",
#' type=VPC_OBSERVE_T,
#' xaxis=VPC_XAXIS_TAD,
#' binningMethod=VPC_BIN_NONE,
#' quantilesValues ="5,50,95")
#'
#' observe2 = NlmeObservationVar(name="Iobs",
#' type=VPC_MULTI_T,
#' xaxis=VPC_XAXIS_PRED,
#' quantilesValues ="5,50,95")
#'
#' observe3 = NlmeObservationVar(name="Eobs",
#' type=VPC_LL_T,
#' timeToEvent="seq(1,10)"
#' quantilesValues ="5,50,95")
#'
#' table1=NlmeSimTableDef(name="simulate.csv",
#' timesList="0,2,4,12,24",
#' variablesList="V,Cl",
#' timeAfterDose=TRUE)
#'
#' vpc = NlmeVpcParams(numReplicates=10,
#' seed=29423,
#' predCorrection=VPC_PRED_PROPOTIONAL,
#' predVarCorr=TRUE,
#' stratifyColumns="sex,race,dosing",
#' observationVars=c(observe1,observe2,observe3),
#' simulationTables=c(table1))
#'
NlmeVpcParams = setClass("NlmeVpcParams",representation(
numReplicates="numeric",
seed="numeric",
predCorrection="numeric",
predVarCorr="logical",
stratifyColumns="character",
observationVars="list",
simulationTables="list"))
assign("NlmeVpcParams",NlmeVpcParams,env=.GlobalEnv)
setMethod("initialize","NlmeVpcParams",
function(.Object,
numReplicates=2,
seed=1234,
predCorrection=VPC_PRED_NONE,
predVarCorr=FALSE,
stratifyColumns="",
# observationVars=c(NlmeObservationVar()),
observationVars=list(),
simulationTables=list()){
.Object@numReplicates=numReplicates
.Object@seed=seed
.Object@predCorrection=predCorrection
.Object@predVarCorr=predVarCorr
.Object@stratifyColumns=stratifyColumns
.Object@observationVars=observationVars
.Object@simulationTables=simulationTables
.Object
})
#'
#' NlmeSimulationParams : Parameters for simulation runs
#'
#' @param numReplicates Number of replicates to simulate
#' @param seed Random number generator seed
#' @param simulationTables (NlmeSimTableDef)
#' @param isPopulation Simulating a population model(default=TRUE). The rest of arguments applies to individual models only
#' @param numPoints Number of points in simulation
#' @param maxXRange Max value of independent variable
#' @param yVariables comma separated list of Y variables
#' @param simAtObs Simulate values at observed values of ivar
#'
#' @export NlmeSimulationParams
#'
#' @examples
#'
#' table1=NlmeSimTableDef(name="simulate.csv",timesList="0,2,4,12,24",
#' variablesList="V,Cl",
#' timeAfterDose=TRUE)
#'
#' simParam = NlmeSimulationParams(numReplicates=10,
#' seed=29423,
#' simulationTables = c(table1))
#'
#' simParam = NlmeSimulationParams(isPopulation=FALSE,
#' numPoints=100,
#' maxXRange=50,
#' yVariables="C,A1",
#' simulationTables = c(table1))
#'
NlmeSimulationParams = setClass("NlmeSimulationParams",representation(
numReplicates="numeric",
seed="numeric",
isPopulation="logical",
numPoints="numeric",
maxXRange="numeric",
yVariables="character",
simAtObs="logical",
simulationTables="list"))
assign("NlmeSimulationParams",NlmeSimulationParams,env=.GlobalEnv)
setMethod("initialize","NlmeSimulationParams",
function(.Object,
numReplicates=2,
seed=1234,
isPopulation=TRUE,
numPoints=100,
maxXRange=50,
yVariables="",
simAtObs=FALSE,
simulationTables=c(NlmeSimTableDef())){
.Object@numReplicates=numReplicates
.Object@seed=seed
.Object@isPopulation=isPopulation
.Object@numPoints=numPoints
.Object@maxXRange=maxXRange
.Object@yVariables=yVariables
.Object@simAtObs=simAtObs
.Object@simulationTables=simulationTables
.Object
})
#'
#' RunVpcSimulation() : Method to execute an NLME VPC simulation
#'
#' @param hostPlatform How to execute the run(NlmeParallelHost)
#' @param dataset Dataset and model information(NlmeDataset)
#' @param params Engine parameters(NlmeEngineExtraParams)
#' @param vpcParams VPC parameters(NlmeVpcParams)
#' @param runInBackground TRUE will run in background and return prompt(Bool)
#' @param workingDir where to run the job
#'
#' @export RunVpcSimulation
#'
#' @examples
#'
#' dataset = NlmeDataset()
#'
#' vpcParams = NlmeVpcParams()
#'
#' param = NlmeEngineExtraParams(PARAMS_METHOD=METHOD_FOCE_LB,
#' PARAMS_NUM_ITERATIONS=1000)
#'
#' job = RunVpcSimulation(defaultHost,dataset,params,vpcParams,simParams)
#'
RunVpcSimulation <-function(
hostPlatform,
dataset,
params,
vpcParams=NULL,
simParams=NULL,
runInBackground=TRUE,
workingDir = NULL)
{
workFlow="WorkFlow"
cleanupFromPreviousRun()
if ( attr(hostPlatform,"hostType")== "Windows" )
runInBackground=FALSE
if ( is.null(workingDir ) )
cwd = getwd()
else
cwd = workingDir
argsFile=GenerateControlfile(dataset, params,workFlow,vpcOption=vpcParams,
simOption=simParams,workingDir=cwd)
argsList=list()
argsList=c(argsList,"GENERIC")
argsList=c(argsList,attr(attr(hostPlatform,"parallelMethod"),"method"))
argsList=c(argsList,attr(hostPlatform,"installationDirectory"))
argsList=c(argsList,attr(hostPlatform,"sharedDirectory"))
argsList=c(argsList,cwd)
argsList=c(argsList,argsFile)
argsList=c(argsList,attr(hostPlatform,"numCores"))
argsList=c(argsList,workFlow)
if ( is.null(vpcParams))
jobName="Simulation"
else
jobName="VPC"
job=SimpleNlmeJob(jobType=jobName,
localDir=cwd,
remoteDir=cwd,
host=hostPlatform,
argsList=argsList,
argsFile=argsFile,
workflow=workFlow,
runInBackground=runInBackground)
status=executeJob(job)
return(job)
}
#'
#' simmodel
#'
#' Method to execute an NLME simulation
#'
#' @param hostPlatform How to execute the run(NlmeParallelHost)
#' @param simParams Simulation parameters(NlmeSimulationParam)
#' @param model optional PK/PD model
#' @param runInBackground TRUE will run in background and return prompt(Bool)
#'
#' @export
#'
#' @examples
#'
#'
#' SimTableObs = NlmeSimTableDef(name = "SimTableObs.csv",
#' timesList = "0,1,2,4,4.9,55.1,56,57,59,60",
#' variablesList = "C, CObs",
#' timeAfterDose = FALSE)
#'
#' simParams = NlmeSimulationParams(numReplicates = 50,
#' seed = 3527,
#' simulationTables = c(SimTableObs))
#'
#' job = simmodel(defaultHost,simParams,model)
#'
simmodel <-function( hostPlatform,
simParams= NULL,
model = NULL,
runInBackground=TRUE)
{
params = NlmeEngineExtraParams(PARAMS_METHOD=METHOD_NAIVE_POOLED,
PARAMS_NUM_ITERATIONS=0)
if ( ! is.null(model) ) {
writeDefaultFiles(model=model,dataset=model@dataset,simParams=simParams)
simParams@isPopulation = model@isPopulation
workingDir = model@modelInfo@workingDir
}
else
workingDir = getwd()
return(RunVpcSimulation(hostPlatform=hostPlatform,params=params,dataset=model@dataset,simParams=simParams,runInBackground= runInBackground,workingDir=workingDir))
}
#'
#' vpcmodel
#'
#' Method to execute an NLME visual predictive check
#'
#' @param hostPlatform How to execute the run(NlmeParallelHost)
#' @param vpcParams VPC parameters(NlmeVpcParam)
#' @param model PK/PD model
#' @param runInBackground TRUE will run in background and return prompt(Bool)
#'
#' @export
#'
#' @examples
#'
#' obsVars = GetObservationVariables(model@dataset)
#'
#' observationParameters(obsVars[[1]])=c(xaxis=VPC_XAXIS_T,
#' binningMethod=VPC_BIN_NONE,
#' quantilesValues ="5,50,95")
#'
#' vpcParams = NlmeVpcParams(numReplicates=2,
#' seed=1234,
#' observationVars=obsVars)
#'
#'
#' job = vpcmodel(defaultHost,vpcParams,model)
#'
vpcmodel <-function( hostPlatform,
vpcParams = NULL ,
model ,
runInBackground=TRUE)
{
params = NlmeEngineExtraParams(PARAMS_METHOD=METHOD_NAIVE_POOLED,
PARAMS_NUM_ITERATIONS=0)
if ( ! is.null(model) ) {
writeDefaultFiles(model=model,dataset=model@dataset,simParams=vpcParams)
workingDir = model@modelInfo@workingDir
}
else
workingDir = getwd()
return(RunVpcSimulation(hostPlatform=hostPlatform,params=params,dataset=model@dataset,vpcParams=vpcParams,runInBackground= runInBackground,workingDir=workingDir))
}
|
make_frassp_conc_treatment_abs_effect_statistics <- function(inDF,
var.col,
return.outcome) {
### Pass in covariate values (assuming 1 value for each ring)
cov2 <- lai_variable[lai_variable$Date<="2013-02-06",]
covDF2 <- summaryBy(lai_variable~Ring, data=cov2, FUN=mean, keep.names=T)
### Read initial basal area data
f12 <- read.csv("temp_files/EucFACE_dendrometers2011-12_RAW.csv")
f12$ba <- ((f12$X20.09.2012/2)^2) * pi
baDF <- summaryBy(ba~Ring, data=f12, FUN=sum, na.rm=T, keep.names=T)
### return in unit of cm2/m2, which is m2 ha-1
baDF$ba_ground_area <- baDF$ba / FACE_ring_area
for (i in 1:6) {
inDF$Cov[inDF$Ring==i] <- baDF$ba_ground_area[baDF$Ring==i]
inDF$Cov2[inDF$Ring==i] <- covDF2$lai_variable[covDF2$Ring==i]
}
#### Assign amb and ele factor
for (i in (1:length(inDF$Ring))) {
if (inDF$Ring[i]==2|inDF$Ring[i]==3|inDF$Ring[i]==6) {
inDF$Trt[i] <- "amb"
} else {
inDF$Trt[i] <- "ele"
}
}
#### Assign factors
inDF$Trt <- as.factor(inDF$Trt)
inDF$Ring <- as.factor(inDF$Ring)
inDF$Datef <- as.factor(inDF$Date)
#### Update variable name so that this function can be used across different variables
colnames(inDF)[var.col] <- "Value"
## Get year list and ring list
tDF <- summaryBy(Value+Cov2+Cov~Trt+Ring+Datef,data=inDF,FUN=mean, keep.names=T)
### Analyse the variable model
## model 1: no interaction, year as factor, ring random factor, include pre-treatment effect
int.m1 <- "non-interative_with_covariate"
modelt1 <- lmer(Value~Trt + Datef + Cov2 + (1|Ring),data=tDF)
## anova
m1.anova <- Anova(modelt1, test="F")
## Check ele - amb diff
summ1 <- summary(glht(modelt1, linfct = mcp(Trt = "Tukey")))
## average effect size
eff.size1 <- coef(modelt1)[[1]][1,2]
## confidence interval
eff.conf1 <- confint(modelt1,"Trtele")
out <- list(int.state=int.m1,
mod = modelt1,
anova = m1.anova,
diff = summ1,
eff = eff.size1,
conf = eff.conf1)
### Predict the model with a standard LAI value
newDF <- tDF
cov2 <- lai_variable[lai_variable$Date<="2013-02-06",]
newDF$predicted <- predict(out$mod, newdata=newDF)
if (return.outcome == "model") {
return(out)
} else if (return.outcome == "predicted") {
return(newDF)
}
}
| /modules/p_concentration_variables/frass_p_production/make_frasp_conc_treatment_abs_effect_statistics.R | no_license | mingkaijiang/EucFACE_modeling_2020_site_parameters | R | false | false | 2,641 | r | make_frassp_conc_treatment_abs_effect_statistics <- function(inDF,
var.col,
return.outcome) {
### Pass in covariate values (assuming 1 value for each ring)
cov2 <- lai_variable[lai_variable$Date<="2013-02-06",]
covDF2 <- summaryBy(lai_variable~Ring, data=cov2, FUN=mean, keep.names=T)
### Read initial basal area data
f12 <- read.csv("temp_files/EucFACE_dendrometers2011-12_RAW.csv")
f12$ba <- ((f12$X20.09.2012/2)^2) * pi
baDF <- summaryBy(ba~Ring, data=f12, FUN=sum, na.rm=T, keep.names=T)
### return in unit of cm2/m2, which is m2 ha-1
baDF$ba_ground_area <- baDF$ba / FACE_ring_area
for (i in 1:6) {
inDF$Cov[inDF$Ring==i] <- baDF$ba_ground_area[baDF$Ring==i]
inDF$Cov2[inDF$Ring==i] <- covDF2$lai_variable[covDF2$Ring==i]
}
#### Assign amb and ele factor
for (i in (1:length(inDF$Ring))) {
if (inDF$Ring[i]==2|inDF$Ring[i]==3|inDF$Ring[i]==6) {
inDF$Trt[i] <- "amb"
} else {
inDF$Trt[i] <- "ele"
}
}
#### Assign factors
inDF$Trt <- as.factor(inDF$Trt)
inDF$Ring <- as.factor(inDF$Ring)
inDF$Datef <- as.factor(inDF$Date)
#### Update variable name so that this function can be used across different variables
colnames(inDF)[var.col] <- "Value"
## Get year list and ring list
tDF <- summaryBy(Value+Cov2+Cov~Trt+Ring+Datef,data=inDF,FUN=mean, keep.names=T)
### Analyse the variable model
## model 1: no interaction, year as factor, ring random factor, include pre-treatment effect
int.m1 <- "non-interative_with_covariate"
modelt1 <- lmer(Value~Trt + Datef + Cov2 + (1|Ring),data=tDF)
## anova
m1.anova <- Anova(modelt1, test="F")
## Check ele - amb diff
summ1 <- summary(glht(modelt1, linfct = mcp(Trt = "Tukey")))
## average effect size
eff.size1 <- coef(modelt1)[[1]][1,2]
## confidence interval
eff.conf1 <- confint(modelt1,"Trtele")
out <- list(int.state=int.m1,
mod = modelt1,
anova = m1.anova,
diff = summ1,
eff = eff.size1,
conf = eff.conf1)
### Predict the model with a standard LAI value
newDF <- tDF
cov2 <- lai_variable[lai_variable$Date<="2013-02-06",]
newDF$predicted <- predict(out$mod, newdata=newDF)
if (return.outcome == "model") {
return(out)
} else if (return.outcome == "predicted") {
return(newDF)
}
}
|
library(rlist)
## This file requires an output list from many_holidays_filter_data.R. It calculates statistics for each Holiday and for each LIBOR rate. It creates a matrix called value_table.
holidays <- filtered_LIBOR ## This should be your result from many_holidays_filter_data.R
rates <- list("ON", "X1W", "X1M", "X2M", "X3M", "X6M", "X12M")
centered_rates <- list("recentered_rate_ON", "recentered_rate_X1W", "recentered_rate_X1M", "recentered_rate_X2M", "recentered_rate_X3M", "recentered_rate_X6M", "recentered_rate_X12M")
value_table <- c(names(holidays)) # initialize the output table with a column of holidays
for (single_rate in centered_rates){
a_rate_value <- c()
for (holiday in holidays){
formula <- as.formula(paste(single_rate, "as.integer(Pre)", sep = "~")) ## this and the next line is what you change to use a different model
model <- lm(formula, data = holiday) ###
estimate <- summary(model)$coef["as.integer(Pre)", "Estimate"] ## change this line and next two lines to add different data to the table
p_value <- summary(model)$coef["as.integer(Pre)", "Pr(>|t|)"]
r_squared <-summary(model)$adj.r.squared
a_rate_value <- rbind(a_rate_value, list(estimate, p_value, r_squared, summary(model)))
}
value_table <- cbind(value_table, a_rate_value)
}
## make the column names. You will need to change this if you add different data to the table
column_names <- list()
for (rate in centered_rates){
new_sublist <- list(paste("Pre", rate, sep = " "), paste("p value", rate, sep = " "), paste(rate, "Adj Rsq", sep = " "), paste(rate, "Summary", sep = " ") )
column_names <- c(column_names, new_sublist)
}
column_names <- list.append("HOLIDAY", column_names)
colnames(value_table) <- column_names
###################################################################################################### | /Libor_Rates/Linear_Model_Check_sp/finish.R | no_license | anthonynguyen2021/LiborRatesIMAProject | R | false | false | 1,912 | r | library(rlist)
## This file requires an output list from many_holidays_filter_data.R. It calculates statistics for each Holiday and for each LIBOR rate. It creates a matrix called value_table.
holidays <- filtered_LIBOR ## This should be your result from many_holidays_filter_data.R
rates <- list("ON", "X1W", "X1M", "X2M", "X3M", "X6M", "X12M")
centered_rates <- list("recentered_rate_ON", "recentered_rate_X1W", "recentered_rate_X1M", "recentered_rate_X2M", "recentered_rate_X3M", "recentered_rate_X6M", "recentered_rate_X12M")
value_table <- c(names(holidays)) # initialize the output table with a column of holidays
for (single_rate in centered_rates){
a_rate_value <- c()
for (holiday in holidays){
formula <- as.formula(paste(single_rate, "as.integer(Pre)", sep = "~")) ## this and the next line is what you change to use a different model
model <- lm(formula, data = holiday) ###
estimate <- summary(model)$coef["as.integer(Pre)", "Estimate"] ## change this line and next two lines to add different data to the table
p_value <- summary(model)$coef["as.integer(Pre)", "Pr(>|t|)"]
r_squared <-summary(model)$adj.r.squared
a_rate_value <- rbind(a_rate_value, list(estimate, p_value, r_squared, summary(model)))
}
value_table <- cbind(value_table, a_rate_value)
}
## make the column names. You will need to change this if you add different data to the table
column_names <- list()
for (rate in centered_rates){
new_sublist <- list(paste("Pre", rate, sep = " "), paste("p value", rate, sep = " "), paste(rate, "Adj Rsq", sep = " "), paste(rate, "Summary", sep = " ") )
column_names <- c(column_names, new_sublist)
}
column_names <- list.append("HOLIDAY", column_names)
colnames(value_table) <- column_names
###################################################################################################### |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/h5delete.R
\name{h5_deleteAttribute}
\alias{h5_deleteAttribute}
\alias{h5deleteAttribute}
\title{Delete attribute}
\usage{
h5deleteAttribute(file, name, attribute)
}
\arguments{
\item{file}{The filename (character) of the file in which the object is
located.}
\item{name}{The name of the object to which the attribute belongs.}
\item{attribute}{Name of the attribute to be deleted.}
}
\description{
Deletes an attribute associated with a group or dataset within an HDF5 file.
}
\author{
Mike Smith
}
| /man/h5_deleteAttribute.Rd | no_license | grimbough/rhdf5 | R | false | true | 580 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/h5delete.R
\name{h5_deleteAttribute}
\alias{h5_deleteAttribute}
\alias{h5deleteAttribute}
\title{Delete attribute}
\usage{
h5deleteAttribute(file, name, attribute)
}
\arguments{
\item{file}{The filename (character) of the file in which the object is
located.}
\item{name}{The name of the object to which the attribute belongs.}
\item{attribute}{Name of the attribute to be deleted.}
}
\description{
Deletes an attribute associated with a group or dataset within an HDF5 file.
}
\author{
Mike Smith
}
|
ensure_version <- function(pkg, ver = "0.0") {
if (system.file(package = pkg) == "" || packageVersion(pkg) < ver)
install.packages(pkg)
}
ensure_version("shiny", "1.2.0")
ensure_version("ggplot2", "3.1.0")
ensure_version("readxl", "1.2.0")
library('shiny')
library('ggplot2')
library('readxl')
# Define UI for app that draws a histogram ----
ui <- fluidPage(
# App title ----
titlePanel("Ojivas y distribuciones de frecuencia"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
radioButtons(inputId="n",
label = "Origen de los datos",
choices = c('Generados','Cargados','Ejemplos'),
selected = " "),
conditionalPanel( condition = "input.n=='Ejemplos'",
selectInput( inputId = "m",
label = "Datos de ejemplo",
choices= c('Sueldos','Otros','Ventas'),
selected = NULL),
radioButtons(inputId="interval",
label = "Elección de intervalos de clases",
choices = c('Métodos dados','Manual'),
selected = " "),
conditionalPanel(condition = "input.interval=='Métodos dados'",
selectInput( inputId = "metodo1",
label = "Elija el método a usar",
choices= c('Fórmula de Sturges','Regla de Scott','Selección de Freedman-Diaconis'),
selected = NULL)
),
conditionalPanel(condition = "input.interval=='Manual'",
sliderInput(inputId = "bins1",
label = "Número de intervalos",
min = 2,
max = 20,
value = 2)
),
selectInput( inputId = "n1",
label = "Tipo de frecuencia",
choices= c('Frecuencia acumulada','Frecuencia acumulada relativa'),
selected = NULL)
),
conditionalPanel( condition = "input.n=='Cargados'",
fileInput( inputId = "datoscargados",
label = "Seleccionar desde un archivo guardado", buttonLabel = "Buscar...",
placeholder = "Aun no seleccionas el archivo..."),
numericInput( inputId = "columna",
label="Escoja el número de columna deseado",
min = 1,
max = 100,
step = 1,
value = 1,
width = "100%"),
radioButtons(inputId="interval1",
label = "Elección de intervalos de clases",
choices = c('Métodos dados','Manual'),
selected = " "),
conditionalPanel(condition = "input.interval1=='Métodos dados'",
selectInput( inputId = "metodo2",
label = "Elija el método a usar",
choices= c('Fórmula de Sturges','Regla de Scott','Selección de Freedman-Diaconis'),
selected = NULL)
),
conditionalPanel(condition = "input.interval1=='Manual'",
sliderInput(inputId = "bins2",
label = "Número de intervalos",
min = 2,
max = 20,
value = 2)
),
selectInput( inputId = "n2",
label = "Tipo de frecuencia",
choices= c('Frecuencia acumulada','Frecuencia acumulada relativa'),
selected = NULL)
),
conditionalPanel( condition = "input.n=='Generados'",
sliderInput(inputId = "CantidadDatos",
label = "Cantidad de datos",
min = 2,
max = 100,
value = 5),
radioButtons(inputId="interval2",
label = "Elección de intervalos de clases",
choices = c('Métodos dados','Manual'),
selected = " "),
conditionalPanel(condition = "input.interval2=='Métodos dados'",
selectInput( inputId = "metodo3",
label = "Elija el método a usar",
choices= c('Fórmula de Sturges','Regla de Scott','Selección de Freedman-Diaconis'),
selected = NULL)
),
conditionalPanel(condition = "input.interval2=='Manual'",
sliderInput(inputId = "bins3",
label = "Número de intervalos",
min = 2,
max = 20,
value = 2)
),
selectInput( inputId = "n3",
label = "Tipo de frecuencia",
choices= c('Frecuencia acumulada','Frecuencia acumulada relativa'),
selected = NULL)
)
),
# Main panel for displaying outputs ----
mainPanel(
tabsetPanel(type='tabs',id='f',
tabPanel('Datos',br(),dataTableOutput('tabla')),
tabPanel('Distribución de frecuencia',br(),br(),column(width = 12,align='center',tableOutput('tabla1'))),
tabPanel('Ojiva',br(),plotOutput('distPlot'))
)
)
)
)
# Define server logic required to draw a histogram ----
server <- function(input, output) {
Sueldos <- c(47,47,47,47,48,49,50,50,50,51,51,51,51,52,52,52,52,52,52,54,54,
54,54,54,57,60,49,49,50,50,51,51,51,51,52,52,56,56,57,57,52,52)
Ventas<-c(rep(1,4),rep(2,5),rep(3,2),rep(4,10),rep(5,9),rep(6,6),rep(7,6))
Otros<-c(rep(10,4),rep(22,5),rep(35,2),rep(46,10),rep(57,9),rep(68,6),rep(74,6))
dat<-reactive({
infile <- input$n
if(is.null(infile)){
return()
}
else if(infile=='Ejemplos'){
infile1<-input$m
if(infile1=='Sueldos'){
data.frame(Sueldos)
}
else if(infile1=='Ventas'){
data.frame(Ventas)
}
else if(infile1=='Otros')
data.frame(Otros)
}
else if(infile=='Cargados'){
infile2<-input$datoscargados
if(is.null(infile2)){
return()
}
else{
as.data.frame(read_excel(infile2$datapath))
}
}
else if(infile=='Generados'){
data.frame(Datos=sample(80:100,input$CantidadDatos,replace = TRUE))
}
})
output$tabla1<-renderTable({
if(is.null(input$n)){
return()
}
else if(input$n=='Ejemplos'){
if(is.null(input$interval)){
return()
}
else if(input$interval=='Métodos dados'){
intervalo<-if(input$metodo1=='Fórmula de Sturges'){
nclass.Sturges(dat()[,1])
}
else if(input$metodo1=='Regla de Scott'){
nclass.scott(dat()[,1])
}
else if(input$metodo1=='Selección de Freedman-Diaconis'){
nclass.FD(dat()[,1])
}
clase<-cut(dat()[,1],breaks = intervalo,include.lowest = TRUE,right = FALSE)
if(input$n1=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
return(fa)
}
else if(input$n1=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
return(fa)
}
}
else if(input$interval=='Manual'){
intervalo<-input$bins1
clase<-cut(dat()[,1],breaks = intervalo,include.lowest = TRUE,right = FALSE)
if(input$n1=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
return(fa)
}
else if(input$n1=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
return(fa)
}
}
}
else if(input$n=='Generados'){
if(is.null(input$interval2)){
return()
}
else if(input$interval2=='Métodos dados'){
intervalo1<-if(input$metodo3=='Fórmula de Sturges'){
nclass.Sturges(dat()$Datos)
}
else if(input$metodo3=='Regla de Scott'){
nclass.scott(dat()$Datos)
}
else if(input$metodo3=='Selección de Freedman-Diaconis'){
nclass.FD(dat()$Datos)
}
clase<-cut(dat()$Datos,breaks = intervalo1,include.lowest = TRUE,right = FALSE)
if(input$n3=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
return(fa)
}
else if(input$n3=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
return(fa)
}
}
else if(input$interval2=='Manual'){
intervalo1<-input$bins3
clase<-cut(dat()$Datos,breaks = intervalo1,include.lowest = TRUE,right = FALSE)
if(input$n3=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
return(fa)
}
else if(input$n3=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
return(fa)
}
}
}
else if(input$n=='Cargados'){
ncolumna<-input$columna
if(is.null(input$interval1)){
return()
}
else if(input$interval1=='Métodos dados'){
intervalo2<-if(input$metodo2=='Fórmula de Sturges'){
nclass.Sturges(dat()[,ncolumna])
}
else if(input$metodo2=='Regla de Scott'){
nclass.scott(dat()[,ncolumna])
}
else if(input$metodo2=='Selección de Freedman-Diaconis'){
nclass.FD(dat()[,ncolumna])
}
clase<-cut(dat()[,ncolumna],breaks = intervalo2,include.lowest = TRUE,right = FALSE)
if(input$n2=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
return(fa)
}
else if(input$n2=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
return(fa)
}
}
else if(input$interval1=='Manual'){
intervalo2<-input$bins2
clase<-cut(dat()[,ncolumna],breaks = intervalo2,include.lowest = TRUE,right = FALSE)
if(input$n2=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
return(fa)
}
else if(input$n2=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
return(fa)
}
}
}
},digits = 4)
output$tabla<-renderDataTable({
return(dat())
},options = list(scrollX=TRUE,scrollY=300,searching=FALSE))
output$distPlot<-renderPlot({
if(is.null(input$n)){
return()
}
else if(input$n=='Ejemplos'){
if(is.null(input$interval)){
return()
}
else if(input$interval=='Métodos dados'){
intervalo<-if(input$metodo1=='Fórmula de Sturges'){
nclass.Sturges(dat()[,1])
}
else if(input$metodo1=='Regla de Scott'){
nclass.scott(dat()[,1])
}
else if(input$metodo1=='Selección de Freedman-Diaconis'){
nclass.FD(dat()[,1])
}
clase<-cut(dat()[,1],breaks = intervalo,include.lowest = TRUE,right = FALSE)
Ancho<-(max(dat()[,1])-min(dat()[,1]))/intervalo
if(input$n1=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
ggplot(fa,mapping = aes(x=seq(min(dat()[,1])+Ancho,max(dat()[,1]),by=Ancho),y=fa$`Frecuencia acumulada`))+
geom_point(colour="blue" ,size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'),x="Distribución de frecuencia",
y="Frecuencia acumulada",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()[,1])+Ancho,max(dat()[,1]),by=Ancho),2))
}
else if(input$n1=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
ggplot(fa,mapping = aes(x=seq(min(dat()[,1])+Ancho,max(dat()[,1]),by=Ancho),y=fa$`Frecuencia acumulada relativa`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada relativa",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()[,1])+Ancho,max(dat()[,1]),by=Ancho),2))
}
}
else if(input$interval=='Manual'){
intervalo<-input$bins1
clase<-cut(dat()[,1],breaks = intervalo,include.lowest = TRUE,right = FALSE)
Ancho<-(max(dat()[,1])-min(dat()[,1]))/intervalo
if(input$n1=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
ggplot(fa,mapping = aes(x=seq(min(dat()[,1])+Ancho,max(dat()[,1]),by=Ancho),y=fa$`Frecuencia acumulada`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()[,1])+Ancho,max(dat()[,1]),by=Ancho),2))
}
else if(input$n1=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
ggplot(fa,mapping = aes(x=seq(min(dat()[,1])+Ancho,max(dat()[,1]),by=Ancho),y=fa$`Frecuencia acumulada relativa`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada relativa",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()[,1])+Ancho,max(dat()[,1]),by=Ancho),2))
}
}
}
else if(input$n=='Generados'){
if(is.null(input$interval2)){
return()
}
else if(input$interval2=='Métodos dados'){
intervalo1<-if(input$metodo3=='Fórmula de Sturges'){
nclass.Sturges(dat()$Datos)
}
else if(input$metodo3=='Regla de Scott'){
nclass.scott(dat()$Datos)
}
else if(input$metodo3=='Selección de Freedman-Diaconis'){
nclass.FD(dat()$Datos)
}
clase<-cut(dat()$Datos,breaks = intervalo1,include.lowest = TRUE,right = FALSE)
Ancho<-(max(dat()$Datos)-min(dat()$Datos))/intervalo1
if(input$n3=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
ggplot(fa,mapping = aes(x=seq(min(dat()$Datos)+Ancho,max(dat()$Datos),by=Ancho),y=fa$`Frecuencia acumulada`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()$Datos)+Ancho,max(dat()$Datos),by=Ancho),2))
}
else if(input$n3=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
ggplot(fa,mapping = aes(x=seq(min(dat()$Datos)+Ancho,max(dat()$Datos),by=Ancho),y=fa$`Frecuencia acumulada relativa`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada relativa",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()$Datos)+Ancho,max(dat()$Datos),by=Ancho),2))
}
}
else if(input$interval2=='Manual'){
intervalo1<-input$bins3
clase<-cut(dat()$Datos,breaks = intervalo1,include.lowest = TRUE,right = FALSE)
Ancho<-(max(dat()$Datos)-min(dat()$Datos))/intervalo1
if(input$n3=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
ggplot(fa,mapping = aes(x=seq(min(dat()$Datos)+Ancho,max(dat()$Datos),by=Ancho),y=fa$`Frecuencia acumulada`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()$Datos)+Ancho,max(dat()$Datos),by=Ancho),2))
}
else if(input$n3=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
ggplot(fa,mapping = aes(x=seq(min(dat()$Datos)+Ancho,max(dat()$Datos),by=Ancho),y=fa$`Frecuencia acumulada relativa`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada relativa",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()$Datos)+Ancho,max(dat()$Datos),by=Ancho),2))
}
}
}
else if(input$n=='Cargados'){
ncolumna<-input$columna
if(is.null(input$interval1)){
return()
}
else if(input$interval1=='Métodos dados'){
intervalo2<-if(input$metodo2=='Fórmula de Sturges'){
nclass.Sturges(dat()[,ncolumna])
}
else if(input$metodo2=='Regla de Scott'){
nclass.scott(dat()[,ncolumna])
}
else if(input$metodo2=='Selección de Freedman-Diaconis'){
nclass.FD(dat()[,ncolumna])
}
clase<-cut(dat()[,ncolumna],breaks = intervalo2,include.lowest = TRUE,right = FALSE)
Ancho<-(max(dat()[,ncolumna])-min(dat()[,ncolumna]))/intervalo2
if(input$n2=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
ggplot(fa,mapping = aes(x=seq(min(dat()[,ncolumna])+Ancho,max(dat()[,ncolumna]),by=Ancho),y=fa$`Frecuencia acumulada`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()[,ncolumna])+Ancho,max(dat()[,ncolumna]),by=Ancho),2))
}
else if(input$n2=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
ggplot(fa,mapping = aes(x=seq(min(dat()[,ncolumna])+Ancho,max(dat()[,ncolumna]),by=Ancho),y=fa$`Frecuencia acumulada relativa`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada relativa",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()[,ncolumna])+Ancho,max(dat()[,ncolumna]),by=Ancho),2))
}
}
else if(input$interval1=='Manual'){
intervalo2<-input$bins2
clase<-cut(dat()[,ncolumna],breaks = intervalo2,include.lowest = TRUE,right = FALSE)
Ancho<-(max(dat()[,ncolumna])-min(dat()[,ncolumna]))/intervalo2
if(input$n2=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
ggplot(fa,mapping = aes(x=seq(min(dat()[,ncolumna])+Ancho,max(dat()[,ncolumna]),by=Ancho),y=fa$`Frecuencia acumulada`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()[,ncolumna])+Ancho,max(dat()[,ncolumna]),by=Ancho),2))
}
else if(input$n2=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
ggplot(fa,mapping = aes(x=seq(min(dat()[,ncolumna])+Ancho,max(dat()[,ncolumna]),by=Ancho),y=fa$`Frecuencia acumulada relativa`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada relativa",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()[,ncolumna])+Ancho,max(dat()[,ncolumna]),by=Ancho),2))
}
}
}
})
}
# Create Shiny app ----
shinyApp(ui = ui, server = server)
| /004-Ojivas/app.R | no_license | synergyvision/pe-apps | R | false | false | 25,339 | r | ensure_version <- function(pkg, ver = "0.0") {
if (system.file(package = pkg) == "" || packageVersion(pkg) < ver)
install.packages(pkg)
}
ensure_version("shiny", "1.2.0")
ensure_version("ggplot2", "3.1.0")
ensure_version("readxl", "1.2.0")
library('shiny')
library('ggplot2')
library('readxl')
# Define UI for app that draws a histogram ----
ui <- fluidPage(
# App title ----
titlePanel("Ojivas y distribuciones de frecuencia"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
radioButtons(inputId="n",
label = "Origen de los datos",
choices = c('Generados','Cargados','Ejemplos'),
selected = " "),
conditionalPanel( condition = "input.n=='Ejemplos'",
selectInput( inputId = "m",
label = "Datos de ejemplo",
choices= c('Sueldos','Otros','Ventas'),
selected = NULL),
radioButtons(inputId="interval",
label = "Elección de intervalos de clases",
choices = c('Métodos dados','Manual'),
selected = " "),
conditionalPanel(condition = "input.interval=='Métodos dados'",
selectInput( inputId = "metodo1",
label = "Elija el método a usar",
choices= c('Fórmula de Sturges','Regla de Scott','Selección de Freedman-Diaconis'),
selected = NULL)
),
conditionalPanel(condition = "input.interval=='Manual'",
sliderInput(inputId = "bins1",
label = "Número de intervalos",
min = 2,
max = 20,
value = 2)
),
selectInput( inputId = "n1",
label = "Tipo de frecuencia",
choices= c('Frecuencia acumulada','Frecuencia acumulada relativa'),
selected = NULL)
),
conditionalPanel( condition = "input.n=='Cargados'",
fileInput( inputId = "datoscargados",
label = "Seleccionar desde un archivo guardado", buttonLabel = "Buscar...",
placeholder = "Aun no seleccionas el archivo..."),
numericInput( inputId = "columna",
label="Escoja el número de columna deseado",
min = 1,
max = 100,
step = 1,
value = 1,
width = "100%"),
radioButtons(inputId="interval1",
label = "Elección de intervalos de clases",
choices = c('Métodos dados','Manual'),
selected = " "),
conditionalPanel(condition = "input.interval1=='Métodos dados'",
selectInput( inputId = "metodo2",
label = "Elija el método a usar",
choices= c('Fórmula de Sturges','Regla de Scott','Selección de Freedman-Diaconis'),
selected = NULL)
),
conditionalPanel(condition = "input.interval1=='Manual'",
sliderInput(inputId = "bins2",
label = "Número de intervalos",
min = 2,
max = 20,
value = 2)
),
selectInput( inputId = "n2",
label = "Tipo de frecuencia",
choices= c('Frecuencia acumulada','Frecuencia acumulada relativa'),
selected = NULL)
),
conditionalPanel( condition = "input.n=='Generados'",
sliderInput(inputId = "CantidadDatos",
label = "Cantidad de datos",
min = 2,
max = 100,
value = 5),
radioButtons(inputId="interval2",
label = "Elección de intervalos de clases",
choices = c('Métodos dados','Manual'),
selected = " "),
conditionalPanel(condition = "input.interval2=='Métodos dados'",
selectInput( inputId = "metodo3",
label = "Elija el método a usar",
choices= c('Fórmula de Sturges','Regla de Scott','Selección de Freedman-Diaconis'),
selected = NULL)
),
conditionalPanel(condition = "input.interval2=='Manual'",
sliderInput(inputId = "bins3",
label = "Número de intervalos",
min = 2,
max = 20,
value = 2)
),
selectInput( inputId = "n3",
label = "Tipo de frecuencia",
choices= c('Frecuencia acumulada','Frecuencia acumulada relativa'),
selected = NULL)
)
),
# Main panel for displaying outputs ----
mainPanel(
tabsetPanel(type='tabs',id='f',
tabPanel('Datos',br(),dataTableOutput('tabla')),
tabPanel('Distribución de frecuencia',br(),br(),column(width = 12,align='center',tableOutput('tabla1'))),
tabPanel('Ojiva',br(),plotOutput('distPlot'))
)
)
)
)
# Define server logic required to draw a histogram ----
server <- function(input, output) {
Sueldos <- c(47,47,47,47,48,49,50,50,50,51,51,51,51,52,52,52,52,52,52,54,54,
54,54,54,57,60,49,49,50,50,51,51,51,51,52,52,56,56,57,57,52,52)
Ventas<-c(rep(1,4),rep(2,5),rep(3,2),rep(4,10),rep(5,9),rep(6,6),rep(7,6))
Otros<-c(rep(10,4),rep(22,5),rep(35,2),rep(46,10),rep(57,9),rep(68,6),rep(74,6))
dat<-reactive({
infile <- input$n
if(is.null(infile)){
return()
}
else if(infile=='Ejemplos'){
infile1<-input$m
if(infile1=='Sueldos'){
data.frame(Sueldos)
}
else if(infile1=='Ventas'){
data.frame(Ventas)
}
else if(infile1=='Otros')
data.frame(Otros)
}
else if(infile=='Cargados'){
infile2<-input$datoscargados
if(is.null(infile2)){
return()
}
else{
as.data.frame(read_excel(infile2$datapath))
}
}
else if(infile=='Generados'){
data.frame(Datos=sample(80:100,input$CantidadDatos,replace = TRUE))
}
})
output$tabla1<-renderTable({
if(is.null(input$n)){
return()
}
else if(input$n=='Ejemplos'){
if(is.null(input$interval)){
return()
}
else if(input$interval=='Métodos dados'){
intervalo<-if(input$metodo1=='Fórmula de Sturges'){
nclass.Sturges(dat()[,1])
}
else if(input$metodo1=='Regla de Scott'){
nclass.scott(dat()[,1])
}
else if(input$metodo1=='Selección de Freedman-Diaconis'){
nclass.FD(dat()[,1])
}
clase<-cut(dat()[,1],breaks = intervalo,include.lowest = TRUE,right = FALSE)
if(input$n1=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
return(fa)
}
else if(input$n1=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
return(fa)
}
}
else if(input$interval=='Manual'){
intervalo<-input$bins1
clase<-cut(dat()[,1],breaks = intervalo,include.lowest = TRUE,right = FALSE)
if(input$n1=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
return(fa)
}
else if(input$n1=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
return(fa)
}
}
}
else if(input$n=='Generados'){
if(is.null(input$interval2)){
return()
}
else if(input$interval2=='Métodos dados'){
intervalo1<-if(input$metodo3=='Fórmula de Sturges'){
nclass.Sturges(dat()$Datos)
}
else if(input$metodo3=='Regla de Scott'){
nclass.scott(dat()$Datos)
}
else if(input$metodo3=='Selección de Freedman-Diaconis'){
nclass.FD(dat()$Datos)
}
clase<-cut(dat()$Datos,breaks = intervalo1,include.lowest = TRUE,right = FALSE)
if(input$n3=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
return(fa)
}
else if(input$n3=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
return(fa)
}
}
else if(input$interval2=='Manual'){
intervalo1<-input$bins3
clase<-cut(dat()$Datos,breaks = intervalo1,include.lowest = TRUE,right = FALSE)
if(input$n3=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
return(fa)
}
else if(input$n3=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
return(fa)
}
}
}
else if(input$n=='Cargados'){
ncolumna<-input$columna
if(is.null(input$interval1)){
return()
}
else if(input$interval1=='Métodos dados'){
intervalo2<-if(input$metodo2=='Fórmula de Sturges'){
nclass.Sturges(dat()[,ncolumna])
}
else if(input$metodo2=='Regla de Scott'){
nclass.scott(dat()[,ncolumna])
}
else if(input$metodo2=='Selección de Freedman-Diaconis'){
nclass.FD(dat()[,ncolumna])
}
clase<-cut(dat()[,ncolumna],breaks = intervalo2,include.lowest = TRUE,right = FALSE)
if(input$n2=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
return(fa)
}
else if(input$n2=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
return(fa)
}
}
else if(input$interval1=='Manual'){
intervalo2<-input$bins2
clase<-cut(dat()[,ncolumna],breaks = intervalo2,include.lowest = TRUE,right = FALSE)
if(input$n2=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
return(fa)
}
else if(input$n2=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
return(fa)
}
}
}
},digits = 4)
output$tabla<-renderDataTable({
return(dat())
},options = list(scrollX=TRUE,scrollY=300,searching=FALSE))
output$distPlot<-renderPlot({
if(is.null(input$n)){
return()
}
else if(input$n=='Ejemplos'){
if(is.null(input$interval)){
return()
}
else if(input$interval=='Métodos dados'){
intervalo<-if(input$metodo1=='Fórmula de Sturges'){
nclass.Sturges(dat()[,1])
}
else if(input$metodo1=='Regla de Scott'){
nclass.scott(dat()[,1])
}
else if(input$metodo1=='Selección de Freedman-Diaconis'){
nclass.FD(dat()[,1])
}
clase<-cut(dat()[,1],breaks = intervalo,include.lowest = TRUE,right = FALSE)
Ancho<-(max(dat()[,1])-min(dat()[,1]))/intervalo
if(input$n1=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
ggplot(fa,mapping = aes(x=seq(min(dat()[,1])+Ancho,max(dat()[,1]),by=Ancho),y=fa$`Frecuencia acumulada`))+
geom_point(colour="blue" ,size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'),x="Distribución de frecuencia",
y="Frecuencia acumulada",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()[,1])+Ancho,max(dat()[,1]),by=Ancho),2))
}
else if(input$n1=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
ggplot(fa,mapping = aes(x=seq(min(dat()[,1])+Ancho,max(dat()[,1]),by=Ancho),y=fa$`Frecuencia acumulada relativa`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada relativa",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()[,1])+Ancho,max(dat()[,1]),by=Ancho),2))
}
}
else if(input$interval=='Manual'){
intervalo<-input$bins1
clase<-cut(dat()[,1],breaks = intervalo,include.lowest = TRUE,right = FALSE)
Ancho<-(max(dat()[,1])-min(dat()[,1]))/intervalo
if(input$n1=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
ggplot(fa,mapping = aes(x=seq(min(dat()[,1])+Ancho,max(dat()[,1]),by=Ancho),y=fa$`Frecuencia acumulada`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()[,1])+Ancho,max(dat()[,1]),by=Ancho),2))
}
else if(input$n1=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
ggplot(fa,mapping = aes(x=seq(min(dat()[,1])+Ancho,max(dat()[,1]),by=Ancho),y=fa$`Frecuencia acumulada relativa`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada relativa",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()[,1])+Ancho,max(dat()[,1]),by=Ancho),2))
}
}
}
else if(input$n=='Generados'){
if(is.null(input$interval2)){
return()
}
else if(input$interval2=='Métodos dados'){
intervalo1<-if(input$metodo3=='Fórmula de Sturges'){
nclass.Sturges(dat()$Datos)
}
else if(input$metodo3=='Regla de Scott'){
nclass.scott(dat()$Datos)
}
else if(input$metodo3=='Selección de Freedman-Diaconis'){
nclass.FD(dat()$Datos)
}
clase<-cut(dat()$Datos,breaks = intervalo1,include.lowest = TRUE,right = FALSE)
Ancho<-(max(dat()$Datos)-min(dat()$Datos))/intervalo1
if(input$n3=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
ggplot(fa,mapping = aes(x=seq(min(dat()$Datos)+Ancho,max(dat()$Datos),by=Ancho),y=fa$`Frecuencia acumulada`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()$Datos)+Ancho,max(dat()$Datos),by=Ancho),2))
}
else if(input$n3=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
ggplot(fa,mapping = aes(x=seq(min(dat()$Datos)+Ancho,max(dat()$Datos),by=Ancho),y=fa$`Frecuencia acumulada relativa`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada relativa",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()$Datos)+Ancho,max(dat()$Datos),by=Ancho),2))
}
}
else if(input$interval2=='Manual'){
intervalo1<-input$bins3
clase<-cut(dat()$Datos,breaks = intervalo1,include.lowest = TRUE,right = FALSE)
Ancho<-(max(dat()$Datos)-min(dat()$Datos))/intervalo1
if(input$n3=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
ggplot(fa,mapping = aes(x=seq(min(dat()$Datos)+Ancho,max(dat()$Datos),by=Ancho),y=fa$`Frecuencia acumulada`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()$Datos)+Ancho,max(dat()$Datos),by=Ancho),2))
}
else if(input$n3=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
ggplot(fa,mapping = aes(x=seq(min(dat()$Datos)+Ancho,max(dat()$Datos),by=Ancho),y=fa$`Frecuencia acumulada relativa`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada relativa",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()$Datos)+Ancho,max(dat()$Datos),by=Ancho),2))
}
}
}
else if(input$n=='Cargados'){
ncolumna<-input$columna
if(is.null(input$interval1)){
return()
}
else if(input$interval1=='Métodos dados'){
intervalo2<-if(input$metodo2=='Fórmula de Sturges'){
nclass.Sturges(dat()[,ncolumna])
}
else if(input$metodo2=='Regla de Scott'){
nclass.scott(dat()[,ncolumna])
}
else if(input$metodo2=='Selección de Freedman-Diaconis'){
nclass.FD(dat()[,ncolumna])
}
clase<-cut(dat()[,ncolumna],breaks = intervalo2,include.lowest = TRUE,right = FALSE)
Ancho<-(max(dat()[,ncolumna])-min(dat()[,ncolumna]))/intervalo2
if(input$n2=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
ggplot(fa,mapping = aes(x=seq(min(dat()[,ncolumna])+Ancho,max(dat()[,ncolumna]),by=Ancho),y=fa$`Frecuencia acumulada`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()[,ncolumna])+Ancho,max(dat()[,ncolumna]),by=Ancho),2))
}
else if(input$n2=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
ggplot(fa,mapping = aes(x=seq(min(dat()[,ncolumna])+Ancho,max(dat()[,ncolumna]),by=Ancho),y=fa$`Frecuencia acumulada relativa`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada relativa",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()[,ncolumna])+Ancho,max(dat()[,ncolumna]),by=Ancho),2))
}
}
else if(input$interval1=='Manual'){
intervalo2<-input$bins2
clase<-cut(dat()[,ncolumna],breaks = intervalo2,include.lowest = TRUE,right = FALSE)
Ancho<-(max(dat()[,ncolumna])-min(dat()[,ncolumna]))/intervalo2
if(input$n2=='Frecuencia acumulada'){
fr<-data.frame(table(clase))
fa<-transform(fr,fAcum=cumsum(Freq))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada")
ggplot(fa,mapping = aes(x=seq(min(dat()[,ncolumna])+Ancho,max(dat()[,ncolumna]),by=Ancho),y=fa$`Frecuencia acumulada`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()[,ncolumna])+Ancho,max(dat()[,ncolumna]),by=Ancho),2))
}
else if(input$n2=='Frecuencia acumulada relativa'){
fr<-data.frame(table(clase))
fa<-transform(fr,FreAcuRel=cumsum(prop.table(`Freq`)))
colnames(fa)<-c("Intervalos","Frecuencia","Frecuencia acumulada relativa")
ggplot(fa,mapping = aes(x=seq(min(dat()[,ncolumna])+Ancho,max(dat()[,ncolumna]),by=Ancho),y=fa$`Frecuencia acumulada relativa`))+
geom_point(colour="blue",size=2)+
geom_line( colour="blue",size=1)+
labs(title = expression('Ojiva menor que (' <=')'), x="Distribución de frecuencia",
y="Frecuencia acumulada relativa",caption = "https://synergy.vision/")+scale_x_continuous(breaks = round(seq(min(dat()[,ncolumna])+Ancho,max(dat()[,ncolumna]),by=Ancho),2))
}
}
}
})
}
# Create Shiny app ----
shinyApp(ui = ui, server = server)
|
library(tidyverse)
library(completejourney)
library(lubridate)
###Question 1
ggplot(data = transactions) +
geom_histogram(mapping = aes(x = quantity))
#Only one bar that goes all the way up the entire plot
ggplot(data = transactions %>% filter(quantity <= 10)) +
geom_histogram(mapping = aes(x =quantity))
#Still has a long tail
###Question 2
transactions %>%
mutate(date = date(transaction_timestamp)) %>%
group_by(date) %>%
summarize(total_sales_value = sum(sales_value, na.rm = TRUE)) %>%
ggplot() +
geom_line(mapping = aes(x = date, y = total_sales_value))
#Incredibly erratic and difficult to read
###Question 3
transactions_products <- left_join(
transactions,
products,
by = "product_id"
) %>%
mutate(brand = fct_explicit_na(brand)) %>%
filter(brand != "(Missing)")
transactions_products %>%
group_by(brand) %>%
summarize(total_sales_value = sum(sales_value)) %>%
ggplot(mapping = aes(x = brand, y = total_sales_value)) +
geom_bar(stat = "identity")
###Question 4
transactions_products %>%
filter(product_category %in% c("SOFT DRINKS", "CHEESE")) %>%
group_by(product_category, brand) %>%
summarize(total_sales_value = sum(sales_value)) %>%
ggplot(
mapping = aes(x = product_category, y = total_sales_value, fill = brand)
) +
geom_col(position = "fill")
###Question 5
transactions_products %>%
filter(product_category == "PNT BTR/JELLY/JAMS") %>%
group_by(package_size) %>%
summarize(count = n()) %>%
ggplot() +
geom_bar(
mapping = aes(x = package_size %>% fct_reorder(count), y = count),
stat = "identity"
) +
coord_flip()
| /submissions/02_cj-data-visualization-bgc5kq.R | no_license | GCOM7140/r4ds-exercises | R | false | false | 1,685 | r | library(tidyverse)
library(completejourney)
library(lubridate)
###Question 1
ggplot(data = transactions) +
geom_histogram(mapping = aes(x = quantity))
#Only one bar that goes all the way up the entire plot
ggplot(data = transactions %>% filter(quantity <= 10)) +
geom_histogram(mapping = aes(x =quantity))
#Still has a long tail
###Question 2
transactions %>%
mutate(date = date(transaction_timestamp)) %>%
group_by(date) %>%
summarize(total_sales_value = sum(sales_value, na.rm = TRUE)) %>%
ggplot() +
geom_line(mapping = aes(x = date, y = total_sales_value))
#Incredibly erratic and difficult to read
###Question 3
transactions_products <- left_join(
transactions,
products,
by = "product_id"
) %>%
mutate(brand = fct_explicit_na(brand)) %>%
filter(brand != "(Missing)")
transactions_products %>%
group_by(brand) %>%
summarize(total_sales_value = sum(sales_value)) %>%
ggplot(mapping = aes(x = brand, y = total_sales_value)) +
geom_bar(stat = "identity")
###Question 4
transactions_products %>%
filter(product_category %in% c("SOFT DRINKS", "CHEESE")) %>%
group_by(product_category, brand) %>%
summarize(total_sales_value = sum(sales_value)) %>%
ggplot(
mapping = aes(x = product_category, y = total_sales_value, fill = brand)
) +
geom_col(position = "fill")
###Question 5
transactions_products %>%
filter(product_category == "PNT BTR/JELLY/JAMS") %>%
group_by(package_size) %>%
summarize(count = n()) %>%
ggplot() +
geom_bar(
mapping = aes(x = package_size %>% fct_reorder(count), y = count),
stat = "identity"
) +
coord_flip()
|
library(parallel)
library(pracma)
library(Rcpp)
sourceCpp("approximation/count_distances_exdiag.cpp")
mcc <- 4
count_acc <- function(counts, m = 1:length(counts)) {
K <- length(counts)
nbads <- counts + 1
p_i <- 1 - nbads/K
rowMeans(dhyper(zeros(length(m), length(p_i)),
repmat(nbads, length(m), 1) - 1,
K - repmat(nbads, length(m), 1), repmat(t(t(m)), 1, length(p_i)) -
1))
}
## query points x_i which are within distance r of x
# sigma2 <- 0.25
# sigma2_tr <- 0.25
#
# p <- 10
# n <- 1e5
# mus <- randn(n, p)
#
# muhs <- mus + sqrt(sigma2_tr) * randn(n, p)
# ys <- mus + sqrt(sigma2) * randn(n, p)
# rSqs <- rowSums((ys - muhs)^2)
#
# t1 <- proc.time()
# counts <- countDistEx(muhs, ys, rSqs)
# (cpp_time <- proc.time() - t1)
#
# 1 - mean(counts != 0) ## accuracy
## takes 290s for 1e5
## naive way
# t1 <- proc.time()
# dd <- pdist2(muhs, ys)^2
# counts0 <- sapply(1:n, function(i) sum(dd[-i, i] < rSqs[i]))
# (naive_time <- proc.time() -t1)
#
# sum(counts != counts0)
# 1 - mean(counts != 0) ## accuracy
# rbind(cpp_time, naive_time)
#
# library(lineId)
# accs <- 1 - resample_misclassification(-t(dd), 1:n, 1:n)
# accs2 <- count_acc(counts)
# plot(accs, type = "l")
# lines(accs2, col = "red")
# accs <- count_acc(counts)
# plot(accs, type = "l", ylim = c(0,1)) | /approximation/gaussian_identity_finsam2.R | no_license | snarles/fmri | R | false | false | 1,346 | r | library(parallel)
library(pracma)
library(Rcpp)
sourceCpp("approximation/count_distances_exdiag.cpp")
mcc <- 4
count_acc <- function(counts, m = 1:length(counts)) {
K <- length(counts)
nbads <- counts + 1
p_i <- 1 - nbads/K
rowMeans(dhyper(zeros(length(m), length(p_i)),
repmat(nbads, length(m), 1) - 1,
K - repmat(nbads, length(m), 1), repmat(t(t(m)), 1, length(p_i)) -
1))
}
## query points x_i which are within distance r of x
# sigma2 <- 0.25
# sigma2_tr <- 0.25
#
# p <- 10
# n <- 1e5
# mus <- randn(n, p)
#
# muhs <- mus + sqrt(sigma2_tr) * randn(n, p)
# ys <- mus + sqrt(sigma2) * randn(n, p)
# rSqs <- rowSums((ys - muhs)^2)
#
# t1 <- proc.time()
# counts <- countDistEx(muhs, ys, rSqs)
# (cpp_time <- proc.time() - t1)
#
# 1 - mean(counts != 0) ## accuracy
## takes 290s for 1e5
## naive way
# t1 <- proc.time()
# dd <- pdist2(muhs, ys)^2
# counts0 <- sapply(1:n, function(i) sum(dd[-i, i] < rSqs[i]))
# (naive_time <- proc.time() -t1)
#
# sum(counts != counts0)
# 1 - mean(counts != 0) ## accuracy
# rbind(cpp_time, naive_time)
#
# library(lineId)
# accs <- 1 - resample_misclassification(-t(dd), 1:n, 1:n)
# accs2 <- count_acc(counts)
# plot(accs, type = "l")
# lines(accs2, col = "red")
# accs <- count_acc(counts)
# plot(accs, type = "l", ylim = c(0,1)) |
read.csv("E:/houses.csv")->houses
str(houses)
summary(houses)
# Data Cleaning
library(dplyr)
houses %>% select(c(-1, -2))->houses
houses $air_cond<-factor(houses$air_cond, labels = c("No", "Yes"))
houses $construction<-factor(houses$construction, labels = c("No", "Yes"))
houses $waterfront<-factor(houses$waterfront, labels = c("No", "Yes"))
houses $fuel<-factor(houses$fuel, labels = c("Gas", "Electric", "Oil"))
houses $sewer<-factor(houses$sewer, labels = c("None", "Private", "Public"))
#Data Vizualization
library(ggplot2)
ggplot(data = houses, aes(x=price))+geom_histogram(bins=40)
ggplot(data = houses, aes(y=price, x=waterfront, fill=waterfront))+geom_boxplot()
ggplot(data = houses, aes(x=age, y=price))+geom_point(col="purple")+geom_smooth(method="lm", se=F)
ggplot(data = houses, aes(x=living_area, y=price, col=factor(rooms)))+geom_point()+geom_smooth(method="lm", se=F)
#Splitting Data
library(caTools)
sample.split(houses$price, SplitRatio = 0.65)->split_index
train<-subset(houses, split_index==T)
test<-subset(houses, split_index==F)
nrow(train)
nrow(test)
#Model Building
lm(price~., data=train)->modl
predict(modl, test) -> result
print(result)
cbind(actual=test$price, predicted=result)->compare_result
as.data.frame(compare_result)->compare_result
compare_result$actual - compare_result$predicted -> error
cbind(compare_result, error)
sqrt(mean(compare_result$error^2))->rmse1
print(rmse1)
#Another model
lm(price~.-fireplaces-sewer-fuel, data=train)->mod2
predict(mod2, test) -> result2
cbind(actual=test$price, predicted=result2)->compare_result2
as.data.frame(compare_result2)->compare_result2
compare_result2$actual - compare_result2$predicted -> error2
cbind(compare_result2, error2)
sqrt(mean(compare_result2$error2^2))->rmse2
print(rmse2)
#Summary of both the models
summary(modl)
summary(mod2) | /RDataMining.r | no_license | priyankkumar218/MISC | R | false | false | 1,881 | r | read.csv("E:/houses.csv")->houses
str(houses)
summary(houses)
# Data Cleaning
library(dplyr)
houses %>% select(c(-1, -2))->houses
houses $air_cond<-factor(houses$air_cond, labels = c("No", "Yes"))
houses $construction<-factor(houses$construction, labels = c("No", "Yes"))
houses $waterfront<-factor(houses$waterfront, labels = c("No", "Yes"))
houses $fuel<-factor(houses$fuel, labels = c("Gas", "Electric", "Oil"))
houses $sewer<-factor(houses$sewer, labels = c("None", "Private", "Public"))
#Data Vizualization
library(ggplot2)
ggplot(data = houses, aes(x=price))+geom_histogram(bins=40)
ggplot(data = houses, aes(y=price, x=waterfront, fill=waterfront))+geom_boxplot()
ggplot(data = houses, aes(x=age, y=price))+geom_point(col="purple")+geom_smooth(method="lm", se=F)
ggplot(data = houses, aes(x=living_area, y=price, col=factor(rooms)))+geom_point()+geom_smooth(method="lm", se=F)
#Splitting Data
library(caTools)
sample.split(houses$price, SplitRatio = 0.65)->split_index
train<-subset(houses, split_index==T)
test<-subset(houses, split_index==F)
nrow(train)
nrow(test)
#Model Building
lm(price~., data=train)->modl
predict(modl, test) -> result
print(result)
cbind(actual=test$price, predicted=result)->compare_result
as.data.frame(compare_result)->compare_result
compare_result$actual - compare_result$predicted -> error
cbind(compare_result, error)
sqrt(mean(compare_result$error^2))->rmse1
print(rmse1)
#Another model
lm(price~.-fireplaces-sewer-fuel, data=train)->mod2
predict(mod2, test) -> result2
cbind(actual=test$price, predicted=result2)->compare_result2
as.data.frame(compare_result2)->compare_result2
compare_result2$actual - compare_result2$predicted -> error2
cbind(compare_result2, error2)
sqrt(mean(compare_result2$error2^2))->rmse2
print(rmse2)
#Summary of both the models
summary(modl)
summary(mod2) |
context("Tidy functions")
test_that("ordinary function calls are made as usual", {
f <- function(x, y = x, ..., z = "z") list(x, y, z, ...)
f_tidy <- tidy(f)
expect_identical(f_tidy(1), f(1))
expect_identical(f_tidy(1, 2), f(1, 2))
expect_identical(f_tidy(1, 2, 3), f(1, 2, 3))
expect_identical(f_tidy(1, z = 0), f(1, z = 0))
})
test_that("arguments can be unquoted", {
f <- function(x, ...) c(x, ...)
f_tidy <- tidy(f)
x <- "value"
xq <- local({
val <- "quosured value"
rlang::quo(val)
})
expect_identical(f_tidy(!!x), "value")
expect_identical(f_tidy("a", !!x), c("a", "value"))
expect_identical(f_tidy(!!xq), "quosured value")
})
test_that("arguments can be spliced", {
f <- function(x, y, ..., z = "z") c(x, y, z, ...)
f_tidy <- tidy(f)
expect_identical(f_tidy("x", !!! list("y")), f("x", "y"))
expect_identical(f_tidy("x", !!! list("y", "w")), f("x", "y", "w"))
expect_identical(f_tidy(!!! list(y = "y", "x")), c("x", "y", "z"))
})
test_that("tidying is idempotent", {
f <- function(x, y = x, ..., z = "z") list(x, y, z, ...)
f_t <- tidy(f)
f_tt <- tidy(tidy(f))
expect_equal(f_tt, f_t)
expect_identical(f_tt(1), f_t(1))
expect_identical(f_tt(1, 2), f_t(1, 2))
expect_identical(f_tt(1, 2, 3), f_t(1, 2, 3))
expect_identical(f_tt(1, z = 0), f_t(1, z = 0))
})
test_that("functions with void formals are vacuously tidy", {
void_fs <- list(function() NULL, closure = Sys.time, primitive = globalenv)
for (f in void_fs)
expect_identical(tidy(f), f)
})
test_that("untidying undoes tidying", {
f <- function(x, y = x, ..., z = "z") list(x, y, z, ...)
f_ <- untidy(tidy(f))
expect_equal(f_, f)
expect_false(is_tidy(f_))
expect_identical(f_(1), f_(1))
expect_identical(f_(1, 2), f_(1, 2))
expect_identical(f_(1, 2, 3), f_(1, 2, 3))
expect_identical(f_(1, z = 0), f_(1, z = 0))
})
test_that("error is signaled when attempting to tidy or untidy a non-function", {
foo <- quote(foo)
expect_errors_with_message(
"object 'foo' of mode 'function' was not found",
tidy(foo),
untidy(foo)
)
expect_errors_with_message(
"'NULL' is not a function, character or symbol",
tidy(NULL),
untidy(NULL)
)
})
| /tests/testthat/test-tidy.R | permissive | egnha/nofrills | R | false | false | 2,220 | r | context("Tidy functions")
test_that("ordinary function calls are made as usual", {
f <- function(x, y = x, ..., z = "z") list(x, y, z, ...)
f_tidy <- tidy(f)
expect_identical(f_tidy(1), f(1))
expect_identical(f_tidy(1, 2), f(1, 2))
expect_identical(f_tidy(1, 2, 3), f(1, 2, 3))
expect_identical(f_tidy(1, z = 0), f(1, z = 0))
})
test_that("arguments can be unquoted", {
f <- function(x, ...) c(x, ...)
f_tidy <- tidy(f)
x <- "value"
xq <- local({
val <- "quosured value"
rlang::quo(val)
})
expect_identical(f_tidy(!!x), "value")
expect_identical(f_tidy("a", !!x), c("a", "value"))
expect_identical(f_tidy(!!xq), "quosured value")
})
test_that("arguments can be spliced", {
f <- function(x, y, ..., z = "z") c(x, y, z, ...)
f_tidy <- tidy(f)
expect_identical(f_tidy("x", !!! list("y")), f("x", "y"))
expect_identical(f_tidy("x", !!! list("y", "w")), f("x", "y", "w"))
expect_identical(f_tidy(!!! list(y = "y", "x")), c("x", "y", "z"))
})
test_that("tidying is idempotent", {
f <- function(x, y = x, ..., z = "z") list(x, y, z, ...)
f_t <- tidy(f)
f_tt <- tidy(tidy(f))
expect_equal(f_tt, f_t)
expect_identical(f_tt(1), f_t(1))
expect_identical(f_tt(1, 2), f_t(1, 2))
expect_identical(f_tt(1, 2, 3), f_t(1, 2, 3))
expect_identical(f_tt(1, z = 0), f_t(1, z = 0))
})
test_that("functions with void formals are vacuously tidy", {
void_fs <- list(function() NULL, closure = Sys.time, primitive = globalenv)
for (f in void_fs)
expect_identical(tidy(f), f)
})
test_that("untidying undoes tidying", {
f <- function(x, y = x, ..., z = "z") list(x, y, z, ...)
f_ <- untidy(tidy(f))
expect_equal(f_, f)
expect_false(is_tidy(f_))
expect_identical(f_(1), f_(1))
expect_identical(f_(1, 2), f_(1, 2))
expect_identical(f_(1, 2, 3), f_(1, 2, 3))
expect_identical(f_(1, z = 0), f_(1, z = 0))
})
test_that("error is signaled when attempting to tidy or untidy a non-function", {
foo <- quote(foo)
expect_errors_with_message(
"object 'foo' of mode 'function' was not found",
tidy(foo),
untidy(foo)
)
expect_errors_with_message(
"'NULL' is not a function, character or symbol",
tidy(NULL),
untidy(NULL)
)
})
|
#setwd(as.character("C:/Users/miffka/Documents/!DataMining/RAnalysis"))
setwd("/media/glycosylase/EC6A2F256A2EEBD0/Documents and Settings/miffka/Documents/!DataMining/RBasics")
library(dplyr)
# Датафреймы!!!
# двумерная таблица с данными
# стандартный способ хранения данных в формате наблюдения/переменные
# строки - наблюдения, столбцы - переменные
# датафреймы наследуют свойства матрицы и списка (переменные могут быть разных типов)
# Создание датафрейма
df <- data.frame(x = 1:4, y = LETTERS[1:4], z = c(T, F))
str(df)
# Имена
df <- data.frame(x = 1:4, y = LETTERS[1:4], z = c(T, F), row.names = c("Alpha", "Bravo", "Charlie", "Delta"))
str(df)
df
rownames(df); colnames(df); dimnames(df)
# Размерности
nrow(df); ncol(df); dim(df)
# Особенности
length(df) #возвращает количество столбцов (переменных)
names(df) #возвращает имена столбцов
# этих функций стоит избегать
# Индексация
#как для матрицы
df[3:4, -1] #по элементам
df[c(F, T), c("z", "x")] #логическая индексация и по именам
#ВНИМАНИЕ - происходит схлопывание размерности
df[, 1]; df[, 1, drop = F]
#как для списка
df$z; df[[3]]; df[["z"]]
#как для матриц - фильтрация по условию
df[df$x > 2,]
#функция subset
subset(df, x > 2) #можно не дублировать название датафрейма и не обращаться к переменной по имени
subset(df, x > 2, select = c(x, z)) #можем выбирать нужные столбцы по имени
# Комбинирование
rbind(df, data.frame(x = 5:6, y = c("K", "Z"), z = T, row.names = c("Kappa", "Zulu")))
# необходимо, чтобы имена у двух датафреймов совпадали в точности
cbind(df, data.frame(season = c("Summer", "Autumn", "Winter", "Spring"), temp = c(20, 5, -10, 5)))
# необходимо, чтобы длина столбцов совпадала в точности
df
df_salary <- data.frame(x = c(3, 2, 6, 1), salary = c(100, 1000, 300, 500))
#объединение по ключу x
merge(df, df_salary, by = "x")
#результат действия - все полные записи из обоих записей, которая определяется ключом x
# объединение разными способами - найти по запросу "r join"
# Задача 2 - делаем из матрицы датафрейм
typeof(as.matrix(df))
# Задача 3 - анализ данных attitude
str(attitude)
sort(attitude$learning, decr = T)
?arrange
task3 <- arrange(attitude, desc(learning))[1:5, ]
task3$task <- apply(task3, 1, function(x) sum(x[c(2, 5, 7)]))
task31 <- task3[task3$task == max(task3$task),]$learning
?which
rownames(attitude[attitude$learning == task31,])
# Примеры работы с данными
# Импорт данных
# Из файла
#csv or tab separated values
#readlines, scan - чтение неструктурированного текста
#xml, html - library(XML), library(httr)
#json, yaml - library(rjson), library(readxl)
#Excel - library(XLConnect), library(readxl)
#SAS, Stats, SPSS, MATLAB - library(foreign), library(sas7bdat)
# Из web - library(rvest)
# Из баз данных
#реляционные - library(DBI), library(RSQLite)
#нереляционные - library(rmongodb)
# Чтение табличных данных
#read.table
#file - имя файла
#header - наличие или отсутствие заголовка в первой строке
#sep - разделитель значений
#quote - символы, обозначающие кавычки (для строк)
#na.strings - строки, кодирующие пропущенное значение
#colClasses - типы столбцов (для быстродействия и указания типа - строка-фактор-дата/время)
#comment.char - символ, обозначающий комментарии
#skip - количество строк, пропускаемых с начала файла
# Функции read.csv, read.csv2, read.delim, read.delim2 - это тот же read.table с нужными умолчаниями
# Типичные шаги предобработки данных
#импорт в датафрейм
#очистка значений, проверка типов
#работа со строками - имена, переменные строкового типа, факторы
#пропущенные значения - идентификация, способ обработки
#манипулирование переменными - преобразование, создание, удаление
#подсчет описательных статистик - split-apply-combine
#визуализация данных
#экспорт
?split
?combine
# Очистка значение, проверка типов
#Типы переменных, на которых легко ошибиться при импорте
#числовые переменные становятся строковыми
# пропущенные значения отмечены не как NA (na.strings = c("NA", "Not Available", etc.))
# из-за неверно указанных разделителя, десятичного знака (sep = ",", dec = ".")
# из-за кавычек, сопроводительного текста или комментариев
#Строковые типы становятся факторами либо наоборот
# as.character, as.factor
#Тип дата/время остается строковым as.POSIXct, as.POSIXlt, as.Date
# Функции str, summary, head и tail помогут определить, все ли в порядке
# Работа с переменными
#Удаление наблюдений с пропущенными значениями
# df[complete.cases(df),]
# na.omit(df)
#Замена NA на некоторые значения может быть потенцильно опасным
# замена средним может вносить смещение в данные
# заполнение нулями в большинстве случаев некорректно в принципе!
#Создание, изменение, удаление переменных выполняется конструкциями
# df$new_var <- <...>
# df$old_var <- f(df$old_var)
# df$old_var <- NULL (удаляем переменную)
#Работа с большим количеством переменных
?within
# Экспорт
#write.table, write.csv, write.csv2
#Если массив большой, лучше отделять этап предобработки данных
# отдельным файлом .R - скрипт очистки и начальный файл
# отдельным файлом с предобработанными ("чистыми") данными
# Массив данных
# http://alaska.usgs.gov/products/data.php&dataid=5
# https://github.com/tonytonov/Rcourse/blob/master/avianHabitat.csv
#Датасет - растительность в местах обитания охраняемых видов птиц
# Задача 4 - выбор корректных строк
attitude[attitude$rating < 50, names(attitude) != "rating"] #корректно
attitude[rating < 50, names(attitude) != "rating"] #не работает
subset(sel = -rating, sub = rating <50, attitude) #корректно
subset(attitude, rating < 50, -rating) #корректно
attitude[attitude$rating < 50, -"rating"] #не работает
# Задача 5 - визуальная инспекция данных quakes
?quakes
str(quakes)
?median
sapply(quakes, function(x) c(median(x), mean(x), max(x), min(x)))
View(quakes)
quakes[nrow(quakes)-1,]
# Работаем с данными
avian <- read.csv("https://raw.githubusercontent.com/tonytonov/Rcourse/master/R%20programming/avianHabitat.csv")
str(avian)
#Проверка данных
summary(avian) #здесь можем заметить всякие косяки у данных
any(!complete.cases(avian)) #ищем пропуски
any(avian$PDB < 0) #вот так ищем значения не в диапазоне
any(avian$PDB > 100)
# пишем функцию для проверки любого вектора
check_percent_range <- function(x) {
any(x < 0 | x > 100)
}
check_percent_range(avian$PW)
#Трансформация переменных
names(avian)
coverage_variables <- names(avian)[-(1:4)][c(T, F)]
coverage_variables # эта переменная содержит все имена процентных переменных
avian$total_cov <- rowSums(avian[, coverage_variables])
summary(avian$total_cov)
# Задача 6 - добавление данных
task6 <- read.csv("/media/glycosylase/EC6A2F256A2EEBD0/Users/miffka/Documents/!DataMining/RBasics/202_task6.csv",
sep = ";", dec = ".", na.strings = "Don't remember", comment.char = "%")
?read.csv
str(task6)
task6$Observer <- factor(c("KL"), levels = c("JT", "RA", "RR", "KL"))
str(task6)
task60 <- rbind(avian, task6)
str(task60)
summary(task60)
coverage_variables <- names(task60)[-(1:4)][c(T, F)]
task60$totcov <- rowSums(task60[, coverage_variables])
summary(task60$totcov)
# Задача 7 - сортировка растений по высоте (максимальной высоте)
avian <- read.csv("https://raw.githubusercontent.com/tonytonov/Rcourse/master/R%20programming/avianHabitat.csv")
str(avian)
heigth_var <- names(avian)[-(1:5)][c(T, F)]
avian[, heigth_var]
ans7 <- sapply(avian[, heigth_var], max)
sort(ans7)
| /202_dataframes.R | no_license | Miffka/RBasics | R | false | false | 10,222 | r | #setwd(as.character("C:/Users/miffka/Documents/!DataMining/RAnalysis"))
setwd("/media/glycosylase/EC6A2F256A2EEBD0/Documents and Settings/miffka/Documents/!DataMining/RBasics")
library(dplyr)
# Датафреймы!!!
# двумерная таблица с данными
# стандартный способ хранения данных в формате наблюдения/переменные
# строки - наблюдения, столбцы - переменные
# датафреймы наследуют свойства матрицы и списка (переменные могут быть разных типов)
# Создание датафрейма
df <- data.frame(x = 1:4, y = LETTERS[1:4], z = c(T, F))
str(df)
# Имена
df <- data.frame(x = 1:4, y = LETTERS[1:4], z = c(T, F), row.names = c("Alpha", "Bravo", "Charlie", "Delta"))
str(df)
df
rownames(df); colnames(df); dimnames(df)
# Размерности
nrow(df); ncol(df); dim(df)
# Особенности
length(df) #возвращает количество столбцов (переменных)
names(df) #возвращает имена столбцов
# этих функций стоит избегать
# Индексация
#как для матрицы
df[3:4, -1] #по элементам
df[c(F, T), c("z", "x")] #логическая индексация и по именам
#ВНИМАНИЕ - происходит схлопывание размерности
df[, 1]; df[, 1, drop = F]
#как для списка
df$z; df[[3]]; df[["z"]]
#как для матриц - фильтрация по условию
df[df$x > 2,]
#функция subset
subset(df, x > 2) #можно не дублировать название датафрейма и не обращаться к переменной по имени
subset(df, x > 2, select = c(x, z)) #можем выбирать нужные столбцы по имени
# Комбинирование
rbind(df, data.frame(x = 5:6, y = c("K", "Z"), z = T, row.names = c("Kappa", "Zulu")))
# необходимо, чтобы имена у двух датафреймов совпадали в точности
cbind(df, data.frame(season = c("Summer", "Autumn", "Winter", "Spring"), temp = c(20, 5, -10, 5)))
# необходимо, чтобы длина столбцов совпадала в точности
df
df_salary <- data.frame(x = c(3, 2, 6, 1), salary = c(100, 1000, 300, 500))
#объединение по ключу x
merge(df, df_salary, by = "x")
#результат действия - все полные записи из обоих записей, которая определяется ключом x
# объединение разными способами - найти по запросу "r join"
# Задача 2 - делаем из матрицы датафрейм
typeof(as.matrix(df))
# Задача 3 - анализ данных attitude
str(attitude)
sort(attitude$learning, decr = T)
?arrange
task3 <- arrange(attitude, desc(learning))[1:5, ]
task3$task <- apply(task3, 1, function(x) sum(x[c(2, 5, 7)]))
task31 <- task3[task3$task == max(task3$task),]$learning
?which
rownames(attitude[attitude$learning == task31,])
# Примеры работы с данными
# Импорт данных
# Из файла
#csv or tab separated values
#readlines, scan - чтение неструктурированного текста
#xml, html - library(XML), library(httr)
#json, yaml - library(rjson), library(readxl)
#Excel - library(XLConnect), library(readxl)
#SAS, Stats, SPSS, MATLAB - library(foreign), library(sas7bdat)
# Из web - library(rvest)
# Из баз данных
#реляционные - library(DBI), library(RSQLite)
#нереляционные - library(rmongodb)
# Чтение табличных данных
#read.table
#file - имя файла
#header - наличие или отсутствие заголовка в первой строке
#sep - разделитель значений
#quote - символы, обозначающие кавычки (для строк)
#na.strings - строки, кодирующие пропущенное значение
#colClasses - типы столбцов (для быстродействия и указания типа - строка-фактор-дата/время)
#comment.char - символ, обозначающий комментарии
#skip - количество строк, пропускаемых с начала файла
# Функции read.csv, read.csv2, read.delim, read.delim2 - это тот же read.table с нужными умолчаниями
# Типичные шаги предобработки данных
#импорт в датафрейм
#очистка значений, проверка типов
#работа со строками - имена, переменные строкового типа, факторы
#пропущенные значения - идентификация, способ обработки
#манипулирование переменными - преобразование, создание, удаление
#подсчет описательных статистик - split-apply-combine
#визуализация данных
#экспорт
?split
?combine
# Очистка значение, проверка типов
#Типы переменных, на которых легко ошибиться при импорте
#числовые переменные становятся строковыми
# пропущенные значения отмечены не как NA (na.strings = c("NA", "Not Available", etc.))
# из-за неверно указанных разделителя, десятичного знака (sep = ",", dec = ".")
# из-за кавычек, сопроводительного текста или комментариев
#Строковые типы становятся факторами либо наоборот
# as.character, as.factor
#Тип дата/время остается строковым as.POSIXct, as.POSIXlt, as.Date
# Функции str, summary, head и tail помогут определить, все ли в порядке
# Работа с переменными
#Удаление наблюдений с пропущенными значениями
# df[complete.cases(df),]
# na.omit(df)
#Замена NA на некоторые значения может быть потенцильно опасным
# замена средним может вносить смещение в данные
# заполнение нулями в большинстве случаев некорректно в принципе!
#Создание, изменение, удаление переменных выполняется конструкциями
# df$new_var <- <...>
# df$old_var <- f(df$old_var)
# df$old_var <- NULL (удаляем переменную)
#Работа с большим количеством переменных
?within
# Экспорт
#write.table, write.csv, write.csv2
#Если массив большой, лучше отделять этап предобработки данных
# отдельным файлом .R - скрипт очистки и начальный файл
# отдельным файлом с предобработанными ("чистыми") данными
# Массив данных
# http://alaska.usgs.gov/products/data.php&dataid=5
# https://github.com/tonytonov/Rcourse/blob/master/avianHabitat.csv
#Датасет - растительность в местах обитания охраняемых видов птиц
# Задача 4 - выбор корректных строк
attitude[attitude$rating < 50, names(attitude) != "rating"] #корректно
attitude[rating < 50, names(attitude) != "rating"] #не работает
subset(sel = -rating, sub = rating <50, attitude) #корректно
subset(attitude, rating < 50, -rating) #корректно
attitude[attitude$rating < 50, -"rating"] #не работает
# Задача 5 - визуальная инспекция данных quakes
?quakes
str(quakes)
?median
sapply(quakes, function(x) c(median(x), mean(x), max(x), min(x)))
View(quakes)
quakes[nrow(quakes)-1,]
# Работаем с данными
avian <- read.csv("https://raw.githubusercontent.com/tonytonov/Rcourse/master/R%20programming/avianHabitat.csv")
str(avian)
#Проверка данных
summary(avian) #здесь можем заметить всякие косяки у данных
any(!complete.cases(avian)) #ищем пропуски
any(avian$PDB < 0) #вот так ищем значения не в диапазоне
any(avian$PDB > 100)
# пишем функцию для проверки любого вектора
check_percent_range <- function(x) {
any(x < 0 | x > 100)
}
check_percent_range(avian$PW)
#Трансформация переменных
names(avian)
coverage_variables <- names(avian)[-(1:4)][c(T, F)]
coverage_variables # эта переменная содержит все имена процентных переменных
avian$total_cov <- rowSums(avian[, coverage_variables])
summary(avian$total_cov)
# Задача 6 - добавление данных
task6 <- read.csv("/media/glycosylase/EC6A2F256A2EEBD0/Users/miffka/Documents/!DataMining/RBasics/202_task6.csv",
sep = ";", dec = ".", na.strings = "Don't remember", comment.char = "%")
?read.csv
str(task6)
task6$Observer <- factor(c("KL"), levels = c("JT", "RA", "RR", "KL"))
str(task6)
task60 <- rbind(avian, task6)
str(task60)
summary(task60)
coverage_variables <- names(task60)[-(1:4)][c(T, F)]
task60$totcov <- rowSums(task60[, coverage_variables])
summary(task60$totcov)
# Задача 7 - сортировка растений по высоте (максимальной высоте)
avian <- read.csv("https://raw.githubusercontent.com/tonytonov/Rcourse/master/R%20programming/avianHabitat.csv")
str(avian)
heigth_var <- names(avian)[-(1:5)][c(T, F)]
avian[, heigth_var]
ans7 <- sapply(avian[, heigth_var], max)
sort(ans7)
|
#############################
# < Ziwei Meng >
# STAT W4240
# Homework <HW 06> , Problem <Problem 6>
# < Wednesday, December 9 >
#############################
#set work path
setwd("D:/Rworkspace")
#clear workspace
rm(list=ls())
#####################
X = matrix(c(1,1,0,5,6,4,4,3,4,1,2,0),6,2)
plot(X[,1],X[,2],pch=4)
########################
set.seed(59)
label = sample(2, nrow(X), replace = T)
label
pchl = 3*label - 2
plot(X[,1], X[,2], pch = pchl, cex = 2)
#######################
c1 = c(mean(X[label == 1, 1]), mean(X[label == 1, 2]))
c2 = c(mean(X[label == 2, 1]), mean(X[label == 2, 2]))
plot(X[,1], X[,2], pch = pchl, cex = 2)
points(c1[1], c1[2], col = "red", pch = 1,cex=2.2)
points(c2[1], c2[2], col = "red", pch = 4,cex=2.2)
#######################################
label1 = c(1, 1, 1, 2, 2, 2)
plot(X[,1], X[,2], pch = (3*label1-2), cex = 2)
points(c1[1], c1[2], col = "red", pch = 1,cex=2.5)
points(c2[1], c2[2], col = "red", pch = 4,cex=2.5)
########################################
c3 = c(mean(X[label1 == 1, 1]), mean(X[label1 == 1, 2]))
c4 = c(mean(X[label1 == 2, 1]), mean(X[label1 == 2, 2]))
plot(X[,1], X[,2], pch = (3*label1-2), cex = 2)
points(c3[1], c3[2], col = "red", pch = 1,cex=2.2)
points(c4[1], c4[2], col = "red", pch = 4,cex=2.2)
#########################################
plot(X[,1], X[,2], pch = (3*label1-2), col = (2*label1), cex = 2)
| /hw06_q6.R | no_license | ZiweiMeng/W4240_DataMining | R | false | false | 1,385 | r | #############################
# < Ziwei Meng >
# STAT W4240
# Homework <HW 06> , Problem <Problem 6>
# < Wednesday, December 9 >
#############################
#set work path
setwd("D:/Rworkspace")
#clear workspace
rm(list=ls())
#####################
X = matrix(c(1,1,0,5,6,4,4,3,4,1,2,0),6,2)
plot(X[,1],X[,2],pch=4)
########################
set.seed(59)
label = sample(2, nrow(X), replace = T)
label
pchl = 3*label - 2
plot(X[,1], X[,2], pch = pchl, cex = 2)
#######################
c1 = c(mean(X[label == 1, 1]), mean(X[label == 1, 2]))
c2 = c(mean(X[label == 2, 1]), mean(X[label == 2, 2]))
plot(X[,1], X[,2], pch = pchl, cex = 2)
points(c1[1], c1[2], col = "red", pch = 1,cex=2.2)
points(c2[1], c2[2], col = "red", pch = 4,cex=2.2)
#######################################
label1 = c(1, 1, 1, 2, 2, 2)
plot(X[,1], X[,2], pch = (3*label1-2), cex = 2)
points(c1[1], c1[2], col = "red", pch = 1,cex=2.5)
points(c2[1], c2[2], col = "red", pch = 4,cex=2.5)
########################################
c3 = c(mean(X[label1 == 1, 1]), mean(X[label1 == 1, 2]))
c4 = c(mean(X[label1 == 2, 1]), mean(X[label1 == 2, 2]))
plot(X[,1], X[,2], pch = (3*label1-2), cex = 2)
points(c3[1], c3[2], col = "red", pch = 1,cex=2.2)
points(c4[1], c4[2], col = "red", pch = 4,cex=2.2)
#########################################
plot(X[,1], X[,2], pch = (3*label1-2), col = (2*label1), cex = 2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.